[pypy-svn] r68382 - in pypy/branch/inline-fastpath-malloc/pypy: config doc/config interpreter interpreter/test jit/backend/cli jit/backend/cli/test jit/backend/llsupport jit/backend/llsupport/test jit/backend/test jit/backend/x86 jit/backend/x86/test jit/metainterp jit/metainterp/test lib lib/test2 module/posix module/posix/test module/unicodedata module/unicodedata/test objspace/std rlib rlib/rcairo rlib/test rpython rpython/lltypesystem rpython/lltypesystem/test rpython/memory rpython/memory/gc rpython/memory/gc/test rpython/memory/gctransform rpython/memory/test rpython/test rpython/tool/test translator/backendopt translator/backendopt/test translator/c translator/c/src translator/c/test translator/cli translator/cli/src translator/oosupport

fijal at codespeak.net fijal at codespeak.net
Tue Oct 13 16:57:37 CEST 2009


Author: fijal
Date: Tue Oct 13 16:57:33 2009
New Revision: 68382

Added:
   pypy/branch/inline-fastpath-malloc/pypy/doc/config/translation.gcconfig.removetypeptr.txt
      - copied unchanged from r68381, pypy/trunk/pypy/doc/config/translation.gcconfig.removetypeptr.txt
   pypy/branch/inline-fastpath-malloc/pypy/lib/test2/test_grp_extra.py
      - copied unchanged from r68381, pypy/trunk/pypy/lib/test2/test_grp_extra.py
   pypy/branch/inline-fastpath-malloc/pypy/rpython/lltypesystem/llgroup.py
      - copied unchanged from r68381, pypy/trunk/pypy/rpython/lltypesystem/llgroup.py
   pypy/branch/inline-fastpath-malloc/pypy/rpython/lltypesystem/test/__init__.py
      - copied unchanged from r68381, pypy/trunk/pypy/rpython/lltypesystem/test/__init__.py
   pypy/branch/inline-fastpath-malloc/pypy/rpython/lltypesystem/test/test_llgroup.py
      - copied unchanged from r68381, pypy/trunk/pypy/rpython/lltypesystem/test/test_llgroup.py
   pypy/branch/inline-fastpath-malloc/pypy/translator/c/src/llgroup.h
      - copied unchanged from r68381, pypy/trunk/pypy/translator/c/src/llgroup.h
Removed:
   pypy/branch/inline-fastpath-malloc/pypy/rlib/rcairo/
Modified:
   pypy/branch/inline-fastpath-malloc/pypy/config/translationoption.py
   pypy/branch/inline-fastpath-malloc/pypy/interpreter/argument.py
   pypy/branch/inline-fastpath-malloc/pypy/interpreter/baseobjspace.py
   pypy/branch/inline-fastpath-malloc/pypy/interpreter/eval.py
   pypy/branch/inline-fastpath-malloc/pypy/interpreter/function.py
   pypy/branch/inline-fastpath-malloc/pypy/interpreter/gateway.py
   pypy/branch/inline-fastpath-malloc/pypy/interpreter/pycode.py
   pypy/branch/inline-fastpath-malloc/pypy/interpreter/pyframe.py
   pypy/branch/inline-fastpath-malloc/pypy/interpreter/test/test_function.py
   pypy/branch/inline-fastpath-malloc/pypy/jit/backend/cli/method.py
   pypy/branch/inline-fastpath-malloc/pypy/jit/backend/cli/methodfactory.py
   pypy/branch/inline-fastpath-malloc/pypy/jit/backend/cli/runner.py
   pypy/branch/inline-fastpath-malloc/pypy/jit/backend/cli/test/test_basic.py
   pypy/branch/inline-fastpath-malloc/pypy/jit/backend/cli/test/test_runner.py
   pypy/branch/inline-fastpath-malloc/pypy/jit/backend/llsupport/gc.py
   pypy/branch/inline-fastpath-malloc/pypy/jit/backend/llsupport/test/test_gc.py
   pypy/branch/inline-fastpath-malloc/pypy/jit/backend/test/support.py
   pypy/branch/inline-fastpath-malloc/pypy/jit/backend/x86/runner.py
   pypy/branch/inline-fastpath-malloc/pypy/jit/backend/x86/test/test_zrpy_gc.py
   pypy/branch/inline-fastpath-malloc/pypy/jit/backend/x86/test/test_zrpy_loop.py
   pypy/branch/inline-fastpath-malloc/pypy/jit/backend/x86/test/test_zrpy_recursive.py
   pypy/branch/inline-fastpath-malloc/pypy/jit/backend/x86/test/test_zrpy_slist.py
   pypy/branch/inline-fastpath-malloc/pypy/jit/backend/x86/test/test_zrpy_virtualizable.py
   pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/compile.py
   pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/executor.py
   pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/jitprof.py
   pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/optimize.py
   pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/optimizeopt.py
   pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/policy.py
   pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/pyjitpl.py
   pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/simple_optimize.py
   pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/test/test_basic.py
   pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/test/test_jitprof.py
   pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/test/test_optimizeopt.py
   pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/test/test_tl.py
   pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/warmspot.py
   pypy/branch/inline-fastpath-malloc/pypy/lib/grp.py
   pypy/branch/inline-fastpath-malloc/pypy/module/posix/interp_posix.py
   pypy/branch/inline-fastpath-malloc/pypy/module/posix/test/test_posix2.py
   pypy/branch/inline-fastpath-malloc/pypy/module/unicodedata/interp_ucd.py
   pypy/branch/inline-fastpath-malloc/pypy/module/unicodedata/test/test_unicodedata.py
   pypy/branch/inline-fastpath-malloc/pypy/objspace/std/objspace.py
   pypy/branch/inline-fastpath-malloc/pypy/rlib/nonconst.py
   pypy/branch/inline-fastpath-malloc/pypy/rlib/test/test_nonconst.py
   pypy/branch/inline-fastpath-malloc/pypy/rpython/llinterp.py
   pypy/branch/inline-fastpath-malloc/pypy/rpython/lltypesystem/llarena.py
   pypy/branch/inline-fastpath-malloc/pypy/rpython/lltypesystem/lloperation.py
   pypy/branch/inline-fastpath-malloc/pypy/rpython/lltypesystem/opimpl.py
   pypy/branch/inline-fastpath-malloc/pypy/rpython/lltypesystem/rclass.py
   pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/gc/base.py
   pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/gc/generation.py
   pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/gc/hybrid.py
   pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/gc/markcompact.py
   pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/gc/marksweep.py
   pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/gc/semispace.py
   pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/gc/test/test_direct.py
   pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/gctransform/framework.py
   pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/gctransform/transform.py
   pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/gctypelayout.py
   pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/gcwrapper.py
   pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/lltypelayout.py
   pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/test/test_gctypelayout.py
   pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/test/test_transformed_gc.py
   pypy/branch/inline-fastpath-malloc/pypy/rpython/rfloat.py
   pypy/branch/inline-fastpath-malloc/pypy/rpython/rtyper.py
   pypy/branch/inline-fastpath-malloc/pypy/rpython/test/test_rdict.py
   pypy/branch/inline-fastpath-malloc/pypy/rpython/tool/test/test_mkrffi.py
   pypy/branch/inline-fastpath-malloc/pypy/translator/backendopt/inline.py
   pypy/branch/inline-fastpath-malloc/pypy/translator/backendopt/removenoops.py
   pypy/branch/inline-fastpath-malloc/pypy/translator/backendopt/test/test_removenoops.py
   pypy/branch/inline-fastpath-malloc/pypy/translator/c/database.py
   pypy/branch/inline-fastpath-malloc/pypy/translator/c/funcgen.py
   pypy/branch/inline-fastpath-malloc/pypy/translator/c/gc.py
   pypy/branch/inline-fastpath-malloc/pypy/translator/c/genc.py
   pypy/branch/inline-fastpath-malloc/pypy/translator/c/node.py
   pypy/branch/inline-fastpath-malloc/pypy/translator/c/primitive.py
   pypy/branch/inline-fastpath-malloc/pypy/translator/c/src/g_include.h
   pypy/branch/inline-fastpath-malloc/pypy/translator/c/src/mem.h
   pypy/branch/inline-fastpath-malloc/pypy/translator/c/test/test_lltyped.py
   pypy/branch/inline-fastpath-malloc/pypy/translator/c/test/test_newgc.py
   pypy/branch/inline-fastpath-malloc/pypy/translator/cli/constant.py
   pypy/branch/inline-fastpath-malloc/pypy/translator/cli/cts.py
   pypy/branch/inline-fastpath-malloc/pypy/translator/cli/src/pypylib.cs
   pypy/branch/inline-fastpath-malloc/pypy/translator/oosupport/constant.py
   pypy/branch/inline-fastpath-malloc/pypy/translator/oosupport/metavm.py
Log:
Merge trunk -> branch svn merge -r 68256:HEAD svn+ssh://codespeak.net/svn/pypy/trunk/pypy .


Modified: pypy/branch/inline-fastpath-malloc/pypy/config/translationoption.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/config/translationoption.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/config/translationoption.py	Tue Oct 13 16:57:33 2009
@@ -69,7 +69,9 @@
                  }),
     OptionDescription("gcconfig", "Configure garbage collectors", [
         BoolOption("debugprint", "Turn on debug printing for the GC",
-                   default=False)
+                   default=False),
+        BoolOption("removetypeptr", "Remove the typeptr from every object",
+                   default=False, cmdline="--gcremovetypeptr"),
         ]),
     ChoiceOption("gcrootfinder",
                  "Strategy for finding GC Roots (framework GCs only)",
@@ -95,7 +97,8 @@
     # JIT generation: use -Ojit to enable it
     BoolOption("jit", "generate a JIT",
                default=False,
-               requires=[("translation.thread", False)],
+               requires=[("translation.thread", False),
+                         ("translation.gcconfig.removetypeptr", False)],
                suggests=[("translation.gc", "hybrid"),     # or "boehm"
                          ("translation.gcrootfinder", "asmgcc"),
                          ("translation.list_comprehension_operations", True)]),
@@ -315,7 +318,7 @@
     '0':    'boehm       nobackendopt',
     '1':    'boehm       lowinline',
     'size': 'boehm       lowinline     remove_asserts',
-    'mem':  'markcompact lowinline     remove_asserts',
+    'mem':  'markcompact lowinline     remove_asserts    removetypeptr',
     '2':    'hybrid      extraopts',
     '3':    'hybrid      extraopts     remove_asserts',
     'jit':  'hybrid      extraopts     jit',
@@ -355,6 +358,8 @@
             config.translation.suggest(withsmallfuncsets=5)
         elif word == 'jit':
             config.translation.suggest(jit=True)
+        elif word == 'removetypeptr':
+            config.translation.gcconfig.suggest(removetypeptr=True)
         else:
             raise ValueError(word)
 

Modified: pypy/branch/inline-fastpath-malloc/pypy/interpreter/argument.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/interpreter/argument.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/interpreter/argument.py	Tue Oct 13 16:57:33 2009
@@ -186,127 +186,6 @@
         raise NotImplementedError()
 
 
-class ArgumentsFromValuestack(AbstractArguments):
-    """
-    Collects the arguments of a function call as stored on a PyFrame
-    valuestack.
-
-    Only for the case of purely positional arguments, for now.
-    """
-
-    def __init__(self, space, frame, nargs=0):
-        self.space = space
-        self.frame = frame
-        self.nargs = nargs
-
-    def firstarg(self):
-        if self.nargs <= 0:
-            return None
-        return self.frame.peekvalue(self.nargs - 1)
-
-    def prepend(self, w_firstarg):
-        "Return a new Arguments with a new argument inserted first."
-        args_w = self.frame.peekvalues(self.nargs)
-        return Arguments(self.space, [w_firstarg] + args_w)
-        
-    def __repr__(self):
-        return 'ArgumentsFromValuestack(%r, %r)' % (self.frame, self.nargs)
-
-    def has_keywords(self):
-        return False
-
-    def unpack(self):
-        args_w = [None] * self.nargs
-        for i in range(self.nargs):
-            args_w[i] = self.frame.peekvalue(self.nargs - 1 - i)
-        return args_w, {}
-
-    def fixedunpack(self, argcount):
-        if self.nargs > argcount:
-            raise ValueError, "too many arguments (%d expected)" % argcount
-        elif self.nargs < argcount:
-            raise ValueError, "not enough arguments (%d expected)" % argcount
-        data_w = [None] * self.nargs
-        nargs = self.nargs
-        for i in range(nargs):
-            data_w[i] = self.frame.peekvalue(nargs - 1 - i)
-        return data_w
-
-    def _rawshape(self, nextra=0):
-        return nextra + self.nargs, (), False, False
-
-    def _match_signature(self, w_firstarg, scope_w, argnames, has_vararg=False, has_kwarg=False, defaults_w=[], blindargs=0):
-        """Parse args and kwargs according to the signature of a code object,
-        or raise an ArgErr in case of failure.
-        Return the number of arguments filled in.
-        """
-        co_argcount = len(argnames)
-        extravarargs = None
-        input_argcount =  0
-
-        if w_firstarg is not None:
-            upfront = 1
-            if co_argcount > 0:
-                scope_w[0] = w_firstarg
-                input_argcount = 1
-            else:
-                extravarargs = [ w_firstarg ]
-        else:
-            upfront = 0
-
-        avail = upfront + self.nargs
-        
-        if avail + len(defaults_w) < co_argcount:
-            raise ArgErrCount(self.nargs , 0,
-                              (co_argcount, has_vararg, has_kwarg),
-                              defaults_w, co_argcount - avail - len(defaults_w))
-        if avail > co_argcount and not has_vararg:
-            raise ArgErrCount(self.nargs, 0,
-                              (co_argcount, has_vararg, has_kwarg),
-                              defaults_w, 0)
-
-        if avail >= co_argcount:
-            for i in range(co_argcount - input_argcount):
-                scope_w[i + input_argcount] = self.frame.peekvalue(self.nargs - 1 - i)
-            if has_vararg:
-                if upfront > co_argcount:
-                    assert extravarargs is not None                    
-                    stararg_w = extravarargs + [None] * self.nargs
-                    for i in range(self.nargs):
-                        stararg_w[i + len(extravarargs)] = self.frame.peekvalue(self.nargs - 1 - i)
-                else:
-                    args_left = co_argcount - upfront                
-                    stararg_w = [None] * (avail - co_argcount)
-                    for i in range(args_left, self.nargs):
-                        stararg_w[i - args_left] = self.frame.peekvalue(self.nargs - 1 - i)
-                scope_w[co_argcount] = self.space.newtuple(stararg_w)
-        else:
-            for i in range(self.nargs):
-                scope_w[i + input_argcount] = self.frame.peekvalue(self.nargs - 1 - i)
-            ndefaults = len(defaults_w)
-            missing = co_argcount - avail
-            first_default = ndefaults - missing
-            for i in range(missing):
-                scope_w[avail + i] = defaults_w[first_default + i]
-            if has_vararg:
-                scope_w[co_argcount] = self.space.newtuple([])
-
-        if has_kwarg:
-            scope_w[co_argcount + has_vararg] = self.space.newdict()
-        return co_argcount + has_vararg + has_kwarg
-    
-    def flatten(self):
-        data_w = [None] * self.nargs
-        for i in range(self.nargs):
-            data_w[i] = self.frame.peekvalue(self.nargs - 1 - i)
-        return nextra + self.nargs, (), False, False, data_w
-
-    def num_args(self):
-        return self.nargs
-
-    def num_kwds(self):
-        return 0
-
 class Arguments(AbstractArguments):
     """
     Collects the arguments of a function call.
@@ -503,7 +382,7 @@
         # which were put into place by prepend().  This way, keywords do
         # not conflict with the hidden extra argument bound by methods.
         if kwds_w and input_argcount > blindargs:
-            for name in argnames[blindargs:input_argcount]:
+            for name in argnames[blindargs:input_argcount]: # XXX
                 if name in kwds_w:
                     raise ArgErrMultipleValues(name)
 

Modified: pypy/branch/inline-fastpath-malloc/pypy/interpreter/baseobjspace.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/interpreter/baseobjspace.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/interpreter/baseobjspace.py	Tue Oct 13 16:57:33 2009
@@ -1,7 +1,7 @@
 from pypy.interpreter.executioncontext import ExecutionContext, ActionFlag
 from pypy.interpreter.executioncontext import UserDelAction, FrameTraceAction
 from pypy.interpreter.error import OperationError
-from pypy.interpreter.argument import Arguments, ArgumentsFromValuestack
+from pypy.interpreter.argument import Arguments
 from pypy.interpreter.pycompiler import CPythonCompiler, PythonAstCompiler
 from pypy.interpreter.miscutils import ThreadLocals
 from pypy.tool.cache import Cache
@@ -734,11 +734,7 @@
             # XXX: this code is copied&pasted :-( from the slow path below
             # call_valuestack().
             args = frame.make_arguments(nargs)
-            try:
-                return self.call_args_and_c_profile(frame, w_func, args)
-            finally:
-                if isinstance(args, ArgumentsFromValuestack):
-                    args.frame = None
+            return self.call_args_and_c_profile(frame, w_func, args)
 
         if not self.config.objspace.disable_call_speedhacks:
             # XXX start of hack for performance
@@ -759,11 +755,7 @@
             # XXX end of hack for performance
 
         args = frame.make_arguments(nargs)
-        try:
-            return self.call_args(w_func, args)
-        finally:
-            if isinstance(args, ArgumentsFromValuestack):
-                args.frame = None
+        return self.call_args(w_func, args)
 
     @dont_look_inside 
     def call_args_and_c_profile(self, frame, w_func, args):

Modified: pypy/branch/inline-fastpath-malloc/pypy/interpreter/eval.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/interpreter/eval.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/interpreter/eval.py	Tue Oct 13 16:57:33 2009
@@ -13,9 +13,14 @@
     hidden_applevel = False
 
     # n >= 0 : arity
-    # -n: special cases
-    # -99: hopeless    
-    fast_natural_arity = -99
+    # FLATPYCALL = 0x100
+    # n|FLATPYCALL: pycode flat case
+    # FLATPYCALL<<x (x>=1): special cases
+    # HOPELESS: hopeless
+    FLATPYCALL = 0x100
+    PASSTHROUGHARGS1 = 0x200
+    HOPELESS = 0x400
+    fast_natural_arity = HOPELESS
 
     def __init__(self, co_name):
         self.co_name = co_name

Modified: pypy/branch/inline-fastpath-malloc/pypy/interpreter/function.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/interpreter/function.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/interpreter/function.py	Tue Oct 13 16:57:33 2009
@@ -10,7 +10,7 @@
 from pypy.interpreter.error import OperationError
 from pypy.interpreter.baseobjspace import Wrappable
 from pypy.interpreter.eval import Code
-from pypy.interpreter.argument import Arguments, ArgumentsFromValuestack
+from pypy.interpreter.argument import Arguments
 from pypy.rlib.jit import hint
 
 funccallunrolling = unrolling_iterable(range(4))
@@ -82,7 +82,7 @@
                     if i < nargs:
                         new_frame.fastlocals_w[i] = args_w[i]
                 return new_frame.run()                                    
-        elif nargs >= 1 and fast_natural_arity == -1:
+        elif nargs >= 1 and fast_natural_arity == Code.PASSTHROUGHARGS1:
             assert isinstance(code, gateway.BuiltinCodePassThroughArguments1)
             return code.funcrun_obj(self, args_w[0],
                                     Arguments(self.space,
@@ -115,25 +115,23 @@
                 return code.fastcall_4(self.space, self, frame.peekvalue(3),
                                        frame.peekvalue(2), frame.peekvalue(1),
                                         frame.peekvalue(0))
-        elif (nargs|PyCode.FLATPYCALL) == fast_natural_arity:
+        elif (nargs|Code.FLATPYCALL) == fast_natural_arity:
             assert isinstance(code, PyCode)
             return self._flat_pycall(code, nargs, frame)
-        elif fast_natural_arity == -1 and nargs >= 1:
+        elif fast_natural_arity&Code.FLATPYCALL:
+            natural_arity = fast_natural_arity&0xff
+            if natural_arity > nargs >= natural_arity-len(self.defs_w):
+                assert isinstance(code, PyCode)
+                return self._flat_pycall_defaults(code, nargs, frame,
+                                                  natural_arity-nargs)
+        elif fast_natural_arity == Code.PASSTHROUGHARGS1 and nargs >= 1:
             assert isinstance(code, gateway.BuiltinCodePassThroughArguments1)
             w_obj = frame.peekvalue(nargs-1)
             args = frame.make_arguments(nargs-1)
-            try:
-                return code.funcrun_obj(self, w_obj, args)
-            finally:
-                if isinstance(args, ArgumentsFromValuestack):
-                    args.frame = None
+            return code.funcrun_obj(self, w_obj, args)
                     
         args = frame.make_arguments(nargs)
-        try:
-            return self.call_args(args)
-        finally:
-            if isinstance(args, ArgumentsFromValuestack):
-                args.frame = None
+        return self.call_args(args)
 
     def _flat_pycall(self, code, nargs, frame):
         # code is a PyCode
@@ -142,6 +140,24 @@
         for i in xrange(nargs):
             w_arg = frame.peekvalue(nargs-1-i)
             new_frame.fastlocals_w[i] = w_arg
+            
+        return new_frame.run()                        
+
+    def _flat_pycall_defaults(self, code, nargs, frame, defs_to_load):
+        # code is a PyCode
+        new_frame = self.space.createframe(code, self.w_func_globals,
+                                                   self.closure)
+        for i in xrange(nargs):
+            w_arg = frame.peekvalue(nargs-1-i)
+            new_frame.fastlocals_w[i] = w_arg
+            
+        defs_w = self.defs_w
+        ndefs = len(defs_w)
+        start = ndefs-defs_to_load
+        i = nargs
+        for j in xrange(start, ndefs):
+            new_frame.fastlocals_w[i] = defs_w[j]
+            i += 1
         return new_frame.run()                        
 
     def getdict(self):

Modified: pypy/branch/inline-fastpath-malloc/pypy/interpreter/gateway.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/interpreter/gateway.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/interpreter/gateway.py	Tue Oct 13 16:57:33 2009
@@ -559,7 +559,7 @@
 
 class BuiltinCodePassThroughArguments1(BuiltinCode):
     _immutable_ = True
-    fast_natural_arity = -1
+    fast_natural_arity = eval.Code.PASSTHROUGHARGS1
 
     def funcrun_obj(self, func, w_obj, args):
         space = func.space

Modified: pypy/branch/inline-fastpath-malloc/pypy/interpreter/pycode.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/interpreter/pycode.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/interpreter/pycode.py	Tue Oct 13 16:57:33 2009
@@ -170,11 +170,10 @@
 
     _code_new_w = staticmethod(_code_new_w)
 
-    FLATPYCALL = 0x100
     
     def _compute_flatcall(self):
         # Speed hack!
-        self.fast_natural_arity = -99
+        self.fast_natural_arity = eval.Code.HOPELESS
         if self.co_flags & (CO_VARARGS | CO_VARKEYWORDS):
             return
         if len(self._args_as_cellvars) > 0:
@@ -182,7 +181,7 @@
         if self.co_argcount > 0xff:
             return
         
-        self.fast_natural_arity = PyCode.FLATPYCALL | self.co_argcount
+        self.fast_natural_arity = eval.Code.FLATPYCALL | self.co_argcount
 
     def funcrun(self, func, args):
         frame = self.space.createframe(self, func.w_func_globals,

Modified: pypy/branch/inline-fastpath-malloc/pypy/interpreter/pyframe.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/interpreter/pyframe.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/interpreter/pyframe.py	Tue Oct 13 16:57:33 2009
@@ -3,7 +3,7 @@
 
 from pypy.tool.pairtype import extendabletype
 from pypy.interpreter import eval, baseobjspace, pycode
-from pypy.interpreter.argument import Arguments, ArgumentsFromValuestack
+from pypy.interpreter.argument import Arguments
 from pypy.interpreter.error import OperationError
 from pypy.interpreter.executioncontext import ExecutionContext
 from pypy.interpreter import pytraceback
@@ -280,10 +280,7 @@
         self.dropvaluesuntil(len(items_w))
 
     def make_arguments(self, nargs):
-        if we_are_jitted():
-            return Arguments(self.space, self.peekvalues(nargs))
-        else:
-            return ArgumentsFromValuestack(self.space, self, nargs)
+        return Arguments(self.space, self.peekvalues(nargs))
 
     @jit.dont_look_inside
     def descr__reduce__(self, space):

Modified: pypy/branch/inline-fastpath-malloc/pypy/interpreter/test/test_function.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/interpreter/test/test_function.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/interpreter/test/test_function.py	Tue Oct 13 16:57:33 2009
@@ -1,5 +1,6 @@
 
 import unittest
+from pypy.interpreter import eval
 from pypy.interpreter.function import Function, Method, descr_function_get
 from pypy.interpreter.pycode import PyCode
 from pypy.interpreter.argument import Arguments
@@ -77,6 +78,25 @@
         assert res[0] == 23
         assert res[1] == 42
 
+    def test_simple_call_default(self):
+        def func(arg1, arg2=11, arg3=111):
+            return arg1, arg2, arg3
+        res = func(1)
+        assert res[0] == 1
+        assert res[1] == 11
+        assert res[2] == 111
+        res = func(1, 22)
+        assert res[0] == 1
+        assert res[1] == 22
+        assert res[2] == 111
+        res = func(1, 22, 333)
+        assert res[0] == 1
+        assert res[1] == 22
+        assert res[2] == 333
+
+        raises(TypeError, func)
+        raises(TypeError, func, 1, 2, 3, 4)        
+
     def test_simple_varargs(self):
         def func(arg1, *args):
             return arg1, args
@@ -564,3 +584,63 @@
         """)
 
         assert space.is_true(w_res)
+
+    def test_flatcall_default_arg(self):
+        space = self.space
+        
+        def f(a, b):
+            return a+b
+        code = PyCode._from_code(self.space, f.func_code)
+        fn = Function(self.space, code, self.space.newdict(),
+                      defs_w=[space.newint(1)])
+
+        assert fn.code.fast_natural_arity == 2|eval.Code.FLATPYCALL
+
+        def bomb(*args):
+            assert False, "shortcutting should have avoided this"
+
+        code.funcrun = bomb
+        code.funcrun_obj = bomb
+
+        w_3 = space.newint(3)
+        w_4 = space.newint(4)
+        # ignore this for now
+        #w_res = space.call_function(fn, w_3)
+        # assert space.eq_w(w_res, w_4)
+
+        w_res = space.appexec([fn, w_3], """(f, x):
+        return f(x)
+        """)
+
+        assert space.eq_w(w_res, w_4)
+
+    def test_flatcall_default_arg_method(self):
+        space = self.space
+        
+        def f(self, a, b):
+            return a+b
+        code = PyCode._from_code(self.space, f.func_code)
+        fn = Function(self.space, code, self.space.newdict(),
+                      defs_w=[space.newint(1)])
+
+        assert fn.code.fast_natural_arity == 3|eval.Code.FLATPYCALL
+
+        def bomb(*args):
+            assert False, "shortcutting should have avoided this"
+
+        code.funcrun = bomb
+        code.funcrun_obj = bomb
+
+        w_3 = space.newint(3)
+
+        w_res = space.appexec([fn, w_3], """(f, x):
+        class A(object):
+           m = f
+        y = A().m(x)
+        b = A().m
+        z = b(x)
+        return y+10*z 
+        """)
+
+        assert space.eq_w(w_res, space.wrap(44))
+

Modified: pypy/branch/inline-fastpath-malloc/pypy/jit/backend/cli/method.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/jit/backend/cli/method.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/jit/backend/cli/method.py	Tue Oct 13 16:57:33 2009
@@ -6,7 +6,7 @@
 from pypy.translator.cli.dotnet import CLR
 from pypy.translator.cli import opcodes
 from pypy.jit.metainterp import history
-from pypy.jit.metainterp.history import (AbstractValue, Const, ConstInt,
+from pypy.jit.metainterp.history import (AbstractValue, Const, ConstInt, ConstFloat,
                                          ConstObj, BoxInt, LoopToken)
 from pypy.jit.metainterp.resoperation import rop, opname
 from pypy.jit.metainterp.typesystem import oohelper
@@ -32,6 +32,8 @@
         
         if self.type == history.INT:
             return dotnet.typeof(System.Int32)
+        elif self.type == history.FLOAT:
+            return dotnet.typeof(System.Double)
         elif self.type == history.REF:
             return dotnet.typeof(System.Object)
         else:
@@ -68,6 +70,16 @@
         meth.il.Emit(OpCodes.Ldc_I4, self.value)
 
 
+class __extend__(ConstFloat):
+    __metaclass__ = extendabletype
+
+    def load(self, meth):
+        # we cannot invoke il.Emit(Ldc_R8, self.value) directly because
+        # pythonnet would select the wrong overload. The C# version works
+        # arond it
+        Utils.Emit_Ldc_R8(meth.il, self.value);
+
+
 class ConstFunction(Const):
 
     def __init__(self, name):
@@ -274,6 +286,8 @@
         t = dotnet.typeof(InputArgs)
         if type == history.INT:
             fieldname = 'ints'
+        elif type == history.FLOAT:
+            fieldname = 'floats'
         elif type == history.REF:
             fieldname = 'objs'
         else:
@@ -739,6 +753,8 @@
             lines.append('self.store_result(op)')
         elif isinstance(instr, opcodes.PushArg):
             lines.append('self.push_arg(op, %d)' % instr.n)
+        elif instr == 'ldc.r8 0':
+            lines.append('Utils.Emit_Ldc_R8(self.il, 0.0)')
         else:
             assert isinstance(instr, str), 'unknown instruction %s' % instr
             if instr.startswith('call '):
@@ -751,6 +767,7 @@
     src = body.putaround('def %s(self, op):' % methname)
     dic = {'OpCodes': OpCodes,
            'System': System,
+           'Utils': Utils,
            'dotnet': dotnet}
     exec src.compile() in dic
     return dic[methname]

Modified: pypy/branch/inline-fastpath-malloc/pypy/jit/backend/cli/methodfactory.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/jit/backend/cli/methodfactory.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/jit/backend/cli/methodfactory.py	Tue Oct 13 16:57:33 2009
@@ -71,7 +71,7 @@
     def create_delegate(self, delegatetype, consts):
         t = self.typeBuilder.CreateType()
         methinfo = t.GetMethod("invoke")
-##         if self.name == 'Loop #0(r1)_2':
+##         if self.name == 'Loop1(r0)_1':
 ##             assemblyData.auto_save_assembly.Save()
         return System.Delegate.CreateDelegate(delegatetype,
                                               consts,

Modified: pypy/branch/inline-fastpath-malloc/pypy/jit/backend/cli/runner.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/jit/backend/cli/runner.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/jit/backend/cli/runner.py	Tue Oct 13 16:57:33 2009
@@ -39,7 +39,8 @@
 
 
 class CliCPU(model.AbstractCPU):
-    
+
+    supports_floats = True
     ts = oohelper
 
     def __init__(self, rtyper, stats, translate_support_code=False,
@@ -139,6 +140,9 @@
     def set_future_value_int(self, index, intvalue):
         self.get_inputargs().set_int(index, intvalue)
 
+    def set_future_value_float(self, index, intvalue):
+        self.get_inputargs().set_float(index, intvalue)
+
     def set_future_value_ref(self, index, objvalue):
         obj = dotnet.cast_to_native_object(objvalue)
         self.get_inputargs().set_obj(index, obj)
@@ -146,6 +150,9 @@
     def get_latest_value_int(self, index):
         return self.get_inputargs().get_int(index)
 
+    def get_latest_value_float(self, index):
+        return self.get_inputargs().get_float(index)
+
     def get_latest_value_ref(self, index):
         obj = self.get_inputargs().get_obj(index)
         return dotnet.cast_from_native_object(obj)
@@ -346,11 +353,16 @@
         self.ooclass = get_class_for_type(TYPE)
         self.typename = TYPE._short_name()
         self._is_array_of_pointers = (history.getkind(TYPE) == 'ref')
+        self._is_array_of_floats = (history.getkind(TYPE) == 'float')
 
     def is_array_of_pointers(self):
         # for arrays, TYPE is the type of the array item.
         return self._is_array_of_pointers
 
+    def is_array_of_floats(self):
+        # for arrays, TYPE is the type of the array item.
+        return self._is_array_of_floats
+
     def get_clitype(self):
         return dotnet.class2type(self.ooclass)
 
@@ -463,6 +475,7 @@
     selfclass = ootype.nullruntimeclass
     fieldname = ''
     _is_pointer_field = False
+    _is_float_field = False
 
     def __init__(self, TYPE, fieldname):
         DescrWithKey.__init__(self, (TYPE, fieldname))
@@ -484,10 +497,14 @@
         self.fieldname = fieldname
         self.key = key_manager.getkey((TYPE, fieldname))
         self._is_pointer_field = (history.getkind(T) == 'ref')
+        self._is_float_field = (history.getkind(T) == 'float')
 
     def is_pointer_field(self):
         return self._is_pointer_field
 
+    def is_float_field(self):
+        return self._is_float_field
+
     def equals(self, other):
         assert isinstance(other, FieldDescr)
         return self.key == other.key

Modified: pypy/branch/inline-fastpath-malloc/pypy/jit/backend/cli/test/test_basic.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/jit/backend/cli/test/test_basic.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/jit/backend/cli/test/test_basic.py	Tue Oct 13 16:57:33 2009
@@ -15,9 +15,6 @@
     def skip(self):
         py.test.skip("works only after translation")
 
-    def _skip(self):
-        py.test.skip("in-progress")
-
     test_string = skip
     test_chr2str = skip
     test_unicode = skip

Modified: pypy/branch/inline-fastpath-malloc/pypy/jit/backend/cli/test/test_runner.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/jit/backend/cli/test/test_runner.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/jit/backend/cli/test/test_runner.py	Tue Oct 13 16:57:33 2009
@@ -30,6 +30,9 @@
     test_field_basic = skip
     test_ooops = skip
 
+    def test_unused_result_float(self):
+        py.test.skip('fixme! max 32 inputargs so far')
+
     def test_ovf_operations(self, reversed=False):
         self.skip()
 

Modified: pypy/branch/inline-fastpath-malloc/pypy/jit/backend/llsupport/gc.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/jit/backend/llsupport/gc.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/jit/backend/llsupport/gc.py	Tue Oct 13 16:57:33 2009
@@ -297,7 +297,7 @@
 class GcLLDescr_framework(GcLLDescription):
 
     def __init__(self, gcdescr, translator, llop1=llop):
-        from pypy.rpython.memory.gc.base import choose_gc_from_config
+        from pypy.rpython.memory.gctypelayout import _check_typeid
         from pypy.rpython.memory.gcheader import GCHeaderBuilder
         from pypy.rpython.memory.gctransform import framework
         GcLLDescription.__init__(self, gcdescr, translator)
@@ -324,14 +324,15 @@
 
         # make a TransformerLayoutBuilder and save it on the translator
         # where it can be fished and reused by the FrameworkGCTransformer
-        self.layoutbuilder = framework.TransformerLayoutBuilder()
+        self.layoutbuilder = framework.JITTransformerLayoutBuilder(
+            gcdescr.config)
         self.layoutbuilder.delay_encoding()
         self.translator._jit2gc = {
             'layoutbuilder': self.layoutbuilder,
             'gcmapstart': lambda: gcrootmap.gcmapstart(),
             'gcmapend': lambda: gcrootmap.gcmapend(),
             }
-        self.GCClass, _ = choose_gc_from_config(gcdescr.config)
+        self.GCClass = self.layoutbuilder.GCClass
         self.moving_gc = self.GCClass.moving_gc
         self.HDRPTR = lltype.Ptr(self.GCClass.HDR)
         self.gcheaderbuilder = GCHeaderBuilder(self.HDRPTR.TO)
@@ -344,7 +345,10 @@
              symbolic.get_array_token(lltype.GcArray(lltype.Signed), True)
 
         # make a malloc function, with three arguments
-        def malloc_basic(size, type_id, has_finalizer):
+        def malloc_basic(size, tid):
+            type_id = llop.extract_ushort(rffi.USHORT, tid)
+            has_finalizer = bool(tid & (1<<16))
+            _check_typeid(type_id)
             res = llop1.do_malloc_fixedsize_clear(llmemory.GCREF,
                                                   type_id, size, True,
                                                   has_finalizer, False)
@@ -353,11 +357,13 @@
             return res
         self.malloc_basic = malloc_basic
         self.GC_MALLOC_BASIC = lltype.Ptr(lltype.FuncType(
-            [lltype.Signed, lltype.Signed, lltype.Bool], llmemory.GCREF))
+            [lltype.Signed, lltype.Signed], llmemory.GCREF))
         self.WB_FUNCPTR = lltype.Ptr(lltype.FuncType(
             [llmemory.Address, llmemory.Address], lltype.Void))
         #
-        def malloc_array(itemsize, type_id, num_elem):
+        def malloc_array(itemsize, tid, num_elem):
+            type_id = llop.extract_ushort(rffi.USHORT, tid)
+            _check_typeid(type_id)
             return llop1.do_malloc_varsize_clear(
                 llmemory.GCREF,
                 type_id, num_elem, self.array_basesize, itemsize,
@@ -393,31 +399,24 @@
         self.gcrootmap.initialize()
 
     def init_size_descr(self, S, descr):
-        from pypy.rpython.memory.gctypelayout import weakpointer_offset
         type_id = self.layoutbuilder.get_type_id(S)
+        assert not self.layoutbuilder.is_weakref(type_id)
         has_finalizer = bool(self.layoutbuilder.has_finalizer(S))
-        assert weakpointer_offset(S) == -1     # XXX
-        descr.type_id = type_id
-        descr.has_finalizer = has_finalizer
+        flags = int(has_finalizer) << 16
+        descr.tid = llop.combine_ushort(lltype.Signed, type_id, flags)
 
     def init_array_descr(self, A, descr):
         type_id = self.layoutbuilder.get_type_id(A)
-        descr.type_id = type_id
+        descr.tid = llop.combine_ushort(lltype.Signed, type_id, 0)
 
     def gc_malloc(self, sizedescr):
         assert isinstance(sizedescr, BaseSizeDescr)
-        size = sizedescr.size
-        type_id = sizedescr.type_id
-        has_finalizer = sizedescr.has_finalizer
-        assert type_id > 0
-        return self.malloc_basic(size, type_id, has_finalizer)
+        return self.malloc_basic(sizedescr.size, sizedescr.tid)
 
     def gc_malloc_array(self, arraydescr, num_elem):
         assert isinstance(arraydescr, BaseArrayDescr)
         itemsize = arraydescr.get_item_size(self.translate_support_code)
-        type_id = arraydescr.type_id
-        assert type_id > 0
-        return self.malloc_array(itemsize, type_id, num_elem)
+        return self.malloc_array(itemsize, arraydescr.tid, num_elem)
 
     def gc_malloc_str(self, num_elem):
         return self.malloc_str(num_elem)
@@ -427,16 +426,12 @@
 
     def args_for_new(self, sizedescr):
         assert isinstance(sizedescr, BaseSizeDescr)
-        size = sizedescr.size
-        type_id = sizedescr.type_id
-        has_finalizer = sizedescr.has_finalizer
-        return [size, type_id, has_finalizer]
+        return [sizedescr.size, sizedescr.tid]
 
     def args_for_new_array(self, arraydescr):
         assert isinstance(arraydescr, BaseArrayDescr)
         itemsize = arraydescr.get_item_size(self.translate_support_code)
-        type_id = arraydescr.type_id
-        return [itemsize, type_id]
+        return [itemsize, arraydescr.tid]
 
     def get_funcptr_for_new(self):
         return llhelper(self.GC_MALLOC_BASIC, self.malloc_basic)

Modified: pypy/branch/inline-fastpath-malloc/pypy/jit/backend/llsupport/test/test_gc.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/jit/backend/llsupport/test/test_gc.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/jit/backend/llsupport/test/test_gc.py	Tue Oct 13 16:57:33 2009
@@ -1,5 +1,6 @@
 import random
 from pypy.rpython.lltypesystem import lltype, llmemory, rffi, rstr
+from pypy.rpython.lltypesystem.lloperation import llop
 from pypy.rpython.annlowlevel import llhelper
 from pypy.jit.backend.llsupport.descr import *
 from pypy.jit.backend.llsupport.gc import *
@@ -119,8 +120,9 @@
         assert not contains_weakptr
         p = llmemory.raw_malloc(size)
         p = llmemory.cast_adr_to_ptr(p, RESTYPE)
-        self.record.append(("fixedsize", type_id, repr(size),
-                            has_finalizer, p))
+        flags = int(has_finalizer) << 16
+        tid = llop.combine_ushort(lltype.Signed, type_id, flags)
+        self.record.append(("fixedsize", repr(size), tid, p))
         return p
 
     def do_malloc_varsize_clear(self, RESTYPE, type_id, length, size,
@@ -129,7 +131,8 @@
         p = llmemory.raw_malloc(size + itemsize * length)
         (p + offset_to_length).signed[0] = length
         p = llmemory.cast_adr_to_ptr(p, RESTYPE)
-        self.record.append(("varsize", type_id, length,
+        tid = llop.combine_ushort(lltype.Signed, type_id, 0)
+        self.record.append(("varsize", tid, length,
                             repr(size), repr(itemsize),
                             repr(offset_to_length), p))
         return p
@@ -165,42 +168,57 @@
         self.gc_ll_descr = gc_ll_descr
         self.fake_cpu = FakeCPU()
 
+    def test_args_for_new(self):
+        S = lltype.GcStruct('S', ('x', lltype.Signed))
+        sizedescr = get_size_descr(self.gc_ll_descr, S)
+        args = self.gc_ll_descr.args_for_new(sizedescr)
+        for x in args:
+            assert lltype.typeOf(x) == lltype.Signed
+        A = lltype.GcArray(lltype.Signed)
+        arraydescr = get_array_descr(self.gc_ll_descr, A)
+        args = self.gc_ll_descr.args_for_new(sizedescr)
+        for x in args:
+            assert lltype.typeOf(x) == lltype.Signed
+
     def test_gc_malloc(self):
         S = lltype.GcStruct('S', ('x', lltype.Signed))
         sizedescr = get_size_descr(self.gc_ll_descr, S)
         p = self.gc_ll_descr.gc_malloc(sizedescr)
-        assert self.llop1.record == [("fixedsize", sizedescr.type_id,
-                                      repr(sizedescr.size), False, p)]
+        assert self.llop1.record == [("fixedsize",
+                                      repr(sizedescr.size),
+                                      sizedescr.tid, p)]
         assert repr(self.gc_ll_descr.args_for_new(sizedescr)) == repr(
-            [sizedescr.size, sizedescr.type_id, False])
+            [sizedescr.size, sizedescr.tid])
 
     def test_gc_malloc_array(self):
         A = lltype.GcArray(lltype.Signed)
         arraydescr = get_array_descr(self.gc_ll_descr, A)
         p = self.gc_ll_descr.gc_malloc_array(arraydescr, 10)
-        assert self.llop1.record == [("varsize", arraydescr.type_id, 10,
+        assert self.llop1.record == [("varsize", arraydescr.tid, 10,
                                       repr(arraydescr.get_base_size(True)),
                                       repr(arraydescr.get_item_size(True)),
                                       repr(arraydescr.get_ofs_length(True)),
                                       p)]
         assert repr(self.gc_ll_descr.args_for_new_array(arraydescr)) == repr(
-            [arraydescr.get_item_size(True), arraydescr.type_id])
+            [arraydescr.get_item_size(True), arraydescr.tid])
 
     def test_gc_malloc_str(self):
         p = self.gc_ll_descr.gc_malloc_str(10)
         type_id = self.gc_ll_descr.layoutbuilder.get_type_id(rstr.STR)
+        tid = llop.combine_ushort(lltype.Signed, type_id, 0)
         basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.STR,
                                                                   True)
-        assert self.llop1.record == [("varsize", type_id, 10,
+        assert self.llop1.record == [("varsize", tid, 10,
                                       repr(basesize), repr(itemsize),
                                       repr(ofs_length), p)]
 
     def test_gc_malloc_unicode(self):
         p = self.gc_ll_descr.gc_malloc_unicode(10)
         type_id = self.gc_ll_descr.layoutbuilder.get_type_id(rstr.UNICODE)
+        tid = llop.combine_ushort(lltype.Signed, type_id, 0)
         basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.UNICODE,
                                                                   True)
-        assert self.llop1.record == [("varsize", type_id, 10,
+        assert self.llop1.record == [("varsize", tid, 10,
                                       repr(basesize), repr(itemsize),
                                       repr(ofs_length), p)]
 

Modified: pypy/branch/inline-fastpath-malloc/pypy/jit/backend/test/support.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/jit/backend/test/support.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/jit/backend/test/support.py	Tue Oct 13 16:57:33 2009
@@ -92,6 +92,13 @@
 
 class CCompiledMixin(BaseCompiledMixin):
     type_system = 'lltype'
+    slow = False
+
+    def setup_class(cls):
+        if cls.slow:
+            from pypy.jit.conftest import option
+            if not option.run_slow_tests:
+                py.test.skip("use --slow to execute this long-running test")
 
     def _get_TranslationContext(self):
         t = TranslationContext()

Modified: pypy/branch/inline-fastpath-malloc/pypy/jit/backend/x86/runner.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/jit/backend/x86/runner.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/jit/backend/x86/runner.py	Tue Oct 13 16:57:33 2009
@@ -22,8 +22,6 @@
                                gcdescr)
         self._bootstrap_cache = {}
         self._faildescr_list = []
-        if rtyper is not None: # for tests
-            self.lltype2vtable = rtyper.lltype_to_vtable_mapping()
 
     def setup(self):
         self.assembler = Assembler386(self, self.translate_support_code)

Modified: pypy/branch/inline-fastpath-malloc/pypy/jit/backend/x86/test/test_zrpy_gc.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/jit/backend/x86/test/test_zrpy_gc.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/jit/backend/x86/test/test_zrpy_gc.py	Tue Oct 13 16:57:33 2009
@@ -4,8 +4,10 @@
 however, is the correct handling of GC, i.e. if objects are freed as
 soon as possible (at least in a simple case).
 """
+
 import weakref, random
 import py
+from pypy.annotation import policy as annpolicy
 from pypy.rlib import rgc
 from pypy.rpython.lltypesystem import lltype, llmemory, rffi
 from pypy.rpython.lltypesystem.lloperation import llop
@@ -17,6 +19,9 @@
 stack_pos = X86StackManager.stack_pos
 
 class X(object):
+    def __init__(self, x=0):
+        self.x = x
+
     next = None
 
 class CheckError(Exception):
@@ -26,22 +31,27 @@
     if not flag:
         raise CheckError
 
-
-def get_test(main):
+def get_g(main):
     main._dont_inline_ = True
-
-    def g(n):
+    def g(num, n):
         x = X()
         x.foo = 2
         main(n, x)
         x.foo = 5
         return weakref.ref(x)
     g._dont_inline_ = True
+    return g
+
+
+def get_entry(g):
 
     def entrypoint(args):
+        num = 0
+        if len(args) == 2:
+            num = int(args[1])
         r_list = []
         for i in range(20):
-            r = g(2000)
+            r = g(num, 2000)
             r_list.append(r)
             rgc.collect()
         rgc.collect(); rgc.collect()
@@ -55,7 +65,7 @@
     return entrypoint
 
 
-def compile_and_run(f, gc, CPUClass=CPU386, **kwds):
+def compile(f, gc, **kwds):
     from pypy.annotation.listdef import s_list_of_strings
     from pypy.translator.translator import TranslationContext
     from pypy.jit.metainterp.warmspot import apply_jit
@@ -66,17 +76,26 @@
     t.config.translation.gcconfig.debugprint = True
     for name, value in kwds.items():
         setattr(t.config.translation, name, value)
-    t.buildannotator().build_types(f, [s_list_of_strings])
+    ann = t.buildannotator(policy=annpolicy.StrictAnnotatorPolicy())
+    ann.build_types(f, [s_list_of_strings])
     t.buildrtyper().specialize()
     if kwds['jit']:
-        apply_jit(t, CPUClass=CPUClass, optimizer=OPTIMIZER_SIMPLE)
+        apply_jit(t, optimizer=OPTIMIZER_SIMPLE)
     cbuilder = genc.CStandaloneBuilder(t, f, t.config)
     cbuilder.generate_source()
     cbuilder.compile()
+    return cbuilder
+
+def run(cbuilder, args=''):
     #
-    data = cbuilder.cmdexec('')
+    data = cbuilder.cmdexec(args)
     return data.strip()
 
+def compile_and_run(f, gc, **kwds):
+    cbuilder = compile(f, gc, **kwds)
+    return run(cbuilder)
+
+
 
 def test_compile_boehm():
     myjitdriver = JitDriver(greens = [], reds = ['n', 'x'])
@@ -87,51 +106,100 @@
             y = X()
             y.foo = x.foo
             n -= y.foo
-    res = compile_and_run(get_test(main), "boehm", jit=True)
+    res = compile_and_run(get_entry(get_g(main)), "boehm", jit=True)
     assert int(res) >= 16
 
-def test_compile_hybrid_1():
-    # a moving GC.  Supports malloc_varsize_nonmovable.  Simple test, works
-    # without write_barriers and root stack enumeration.
-    myjitdriver = JitDriver(greens = [], reds = ['n', 'x'])
-    def main(n, x):
-        while n > 0:
-            myjitdriver.can_enter_jit(n=n, x=x)
-            myjitdriver.jit_merge_point(n=n, x=x)
+# ______________________________________________________________________
+
+class TestCompileHybrid(object):
+    def setup_class(cls):
+        funcs = []
+        name_to_func = {}
+        for fullname in dir(cls):
+            if not fullname.startswith('define'):
+                continue
+            definefunc = getattr(cls, fullname)
+            _, name = fullname.split('_', 1)
+            beforefunc, loopfunc, afterfunc = definefunc.im_func(cls)
+            if beforefunc is None:
+                def beforefunc(n, x):
+                    return n, x, None, None, None, None, None, None, None, None, None, ''
+            if afterfunc is None:
+                def afterfunc(n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s):
+                    pass
+            beforefunc.func_name = 'before_'+name
+            loopfunc.func_name = 'loop_'+name
+            afterfunc.func_name = 'after_'+name
+            funcs.append((beforefunc, loopfunc, afterfunc))
+            assert name not in name_to_func
+            name_to_func[name] = len(name_to_func)
+        def allfuncs(num, n):
+            x = X()
+            x.foo = 2
+            main_allfuncs(num, n, x)
+            x.foo = 5
+            return weakref.ref(x)
+        def main_allfuncs(num, n, x):
+            n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s = funcs[num][0](n, x)
+            while n > 0:
+                myjitdriver.can_enter_jit(num=num, n=n, x=x, x0=x0, x1=x1,
+                        x2=x2, x3=x3, x4=x4, x5=x5, x6=x6, x7=x7, l=l, s=s)
+                myjitdriver.jit_merge_point(num=num, n=n, x=x, x0=x0, x1=x1,
+                        x2=x2, x3=x3, x4=x4, x5=x5, x6=x6, x7=x7, l=l, s=s)
+
+                n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s = funcs[num][1](
+                        n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s)
+            funcs[num][2](n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s)
+        myjitdriver = JitDriver(greens = ['num'],
+                                reds = ['n', 'x', 'x0', 'x1', 'x2', 'x3', 'x4',
+                                        'x5', 'x6', 'x7', 'l', 's'])
+        cls.main_allfuncs = staticmethod(main_allfuncs)
+        cls.name_to_func = name_to_func
+        cls.cbuilder = compile(get_entry(allfuncs), "hybrid", gcrootfinder="asmgcc", jit=True)
+
+    def run(self, name):
+        num = self.name_to_func[name]
+        res = self.cbuilder.cmdexec(str(num))
+        assert int(res) == 20
+
+    def run_orig(self, name, n, x):
+        num = self.name_to_func[name]
+        self.main_allfuncs(num, n, x)
+
+    def define_compile_hybrid_1(cls):
+        # a moving GC.  Supports malloc_varsize_nonmovable.  Simple test, works
+        # without write_barriers and root stack enumeration.
+        def f(n, x, *args):
             y = X()
             y.foo = x.foo
             n -= y.foo
-    res = compile_and_run(get_test(main), "hybrid", gcrootfinder="asmgcc",
-                          jit=True)
-    assert int(res) == 20
-
-def test_compile_hybrid_2():
-    # More complex test, requires root stack enumeration but
-    # not write_barriers.
-    myjitdriver = JitDriver(greens = [], reds = ['n', 'x'])
-    def main(n, x):
-        while n > 0:
-            myjitdriver.can_enter_jit(n=n, x=x)
-            myjitdriver.jit_merge_point(n=n, x=x)
+            return (n, x) + args
+        return None, f, None
+
+    def test_compile_hybrid_1(self):
+        self.run('compile_hybrid_1')
+
+    def define_compile_hybrid_2(cls):
+        # More complex test, requires root stack enumeration but
+        # not write_barriers.
+        def f(n, x, *args):
             prev = x
-            for j in range(101):    # main() runs 20'000 times, thus allocates
+            for j in range(101):    # f() runs 20'000 times, thus allocates
                 y = X()             # a total of 2'020'000 objects
                 y.foo = prev.foo
                 prev = y
             n -= prev.foo
-    res = compile_and_run(get_test(main), "hybrid", gcrootfinder="asmgcc",
-                          jit=True)
-    assert int(res) == 20
+            return (n, x) + args
+        return None, f, None
 
-def test_compile_hybrid_3():
-    # Third version of the test.  Really requires write_barriers.
-    myjitdriver = JitDriver(greens = [], reds = ['n', 'x'])
-    def main(n, x):
-        while n > 0:
-            myjitdriver.can_enter_jit(n=n, x=x)
-            myjitdriver.jit_merge_point(n=n, x=x)
+    def test_compile_hybrid_2(self):
+        self.run('compile_hybrid_2')
+
+    def define_compile_hybrid_3(cls):
+        # Third version of the test.  Really requires write_barriers.
+        def f(n, x, *args):
             x.next = None
-            for j in range(101):    # main() runs 20'000 times, thus allocates
+            for j in range(101):    # f() runs 20'000 times, thus allocates
                 y = X()             # a total of 2'020'000 objects
                 y.foo = j+1
                 y.next = x.next
@@ -145,41 +213,40 @@
             check(not y.next)
             check(total == 101*102/2)
             n -= x.foo
-    x_test = X()
-    x_test.foo = 5
-    main(6, x_test)     # check that it does not raise CheckError
-    res = compile_and_run(get_test(main), "hybrid", gcrootfinder="asmgcc",
-                          jit=True)
-    assert int(res) == 20
-
-def test_compile_hybrid_3_extra():
-    # Extra version of the test, with tons of live vars around the residual
-    # call that all contain a GC pointer.
-    myjitdriver = JitDriver(greens = [], reds = ['n', 'x0', 'x1', 'x2', 'x3',
-                                                      'x4', 'x5', 'x6', 'x7'])
-    def residual(n=26):
-        x = X()
-        x.next = X()
-        x.next.foo = n
-        return x
-    residual._look_inside_me_ = False
-    #
-    def main(n, x):
-        residual(5)
-        x0 = residual()
-        x1 = residual()
-        x2 = residual()
-        x3 = residual()
-        x4 = residual()
-        x5 = residual()
-        x6 = residual()
-        x7 = residual()
-        n *= 19
-        while n > 0:
-            myjitdriver.can_enter_jit(n=n, x0=x0, x1=x1, x2=x2, x3=x3,
-                                           x4=x4, x5=x5, x6=x6, x7=x7)
-            myjitdriver.jit_merge_point(n=n, x0=x0, x1=x1, x2=x2, x3=x3,
-                                             x4=x4, x5=x5, x6=x6, x7=x7)
+            return (n, x) + args
+        return None, f, None
+
+
+
+    def test_compile_hybrid_3(self):
+        x_test = X()
+        x_test.foo = 5
+        self.run_orig('compile_hybrid_3', 6, x_test)     # check that it does not raise CheckError
+        self.run('compile_hybrid_3')
+
+    def define_compile_hybrid_3_extra(cls):
+        # Extra version of the test, with tons of live vars around the residual
+        # call that all contain a GC pointer.
+        def residual(n=26):
+            x = X()
+            x.next = X()
+            x.next.foo = n
+            return x
+        residual._look_inside_me_ = False
+        #
+        def before(n, x):
+            residual(5)
+            x0 = residual()
+            x1 = residual()
+            x2 = residual()
+            x3 = residual()
+            x4 = residual()
+            x5 = residual()
+            x6 = residual()
+            x7 = residual()
+            n *= 19
+            return n, None, x0, x1, x2, x3, x4, x5, x6, x7, None, None
+        def f(n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s):
             x8 = residual()
             x9 = residual()
             check(x0.next.foo == 26)
@@ -194,84 +261,54 @@
             check(x9.next.foo == 26)
             x0, x1, x2, x3, x4, x5, x6, x7 = x7, x4, x6, x5, x3, x2, x9, x8
             n -= 1
-    main(6, None)     # check that it does not raise AssertionError
-    res = compile_and_run(get_test(main), "hybrid", gcrootfinder="asmgcc",
-                          jit=True)
-    assert int(res) == 20
-
-def test_compile_hybrid_4():
-    # Fourth version of the test, with __del__.
-    from pypy.rlib.debug import debug_print
-    class Counter:
-        cnt = 0
-    counter = Counter()
-    class Z:
-        def __del__(self):
-            counter.cnt -= 1
-    myjitdriver = JitDriver(greens = [], reds = ['n', 'x'])
-    def main(n, x):
-        debug_print('counter.cnt =', counter.cnt)
-        check(counter.cnt < 5)
-        counter.cnt = n // x.foo
-        while n > 0:
-            myjitdriver.can_enter_jit(n=n, x=x)
-            myjitdriver.jit_merge_point(n=n, x=x)
+            return n, None, x0, x1, x2, x3, x4, x5, x6, x7, None, None
+        return before, f, None
+
+    def test_compile_hybrid_3_extra(self):
+        self.run_orig('compile_hybrid_3_extra', 6, None)     # check that it does not raise CheckError
+        self.run('compile_hybrid_3_extra')
+
+    def define_compile_hybrid_4(cls):
+        # Fourth version of the test, with __del__.
+        from pypy.rlib.debug import debug_print
+        class Counter:
+            cnt = 0
+        counter = Counter()
+        class Z:
+            def __del__(self):
+                counter.cnt -= 1
+        def before(n, x):
+            debug_print('counter.cnt =', counter.cnt)
+            check(counter.cnt < 5)
+            counter.cnt = n // x.foo
+            return n, x, None, None, None, None, None, None, None, None, None, None
+        def f(n, x, *args):
             Z()
             n -= x.foo
-    res = compile_and_run(get_test(main), "hybrid", gcrootfinder="asmgcc",
-                          jit=True)
-    assert int(res) == 20
-
-def test_compile_hybrid_5():
-    # Test string manipulation.
-    myjitdriver = JitDriver(greens = [], reds = ['n', 'x', 's'])
-    def main(n, x):
-        s = ''
-        while n > 0:
-            myjitdriver.can_enter_jit(n=n, x=x, s=s)
-            myjitdriver.jit_merge_point(n=n, x=x, s=s)
+            return (n, x) + args
+        return before, f, None
+
+    def test_compile_hybrid_4(self):
+        self.run('compile_hybrid_4')
+
+    def define_compile_hybrid_5(cls):
+        # Test string manipulation.
+        def f(n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s):
             n -= x.foo
             s += str(n)
-        check(len(s) == 1*5 + 2*45 + 3*450 + 4*500)
-    res = compile_and_run(get_test(main), "hybrid", gcrootfinder="asmgcc",
-                          jit=True)
-    assert int(res) == 20
-
-def test_compile_hybrid_6():
-    # Array manipulation (i.e. fixed-sized list).
-    myjitdriver = JitDriver(greens = [], reds = ['n', 'x', 'l'])
-    def main(n, x):
-        l = []
-        while n > 0:
-            myjitdriver.can_enter_jit(n=n, x=x, l=l)
-            myjitdriver.jit_merge_point(n=n, x=x, l=l)
-            if n < 200:
-                l = [n, n, n]
-            if n < 100:
-                check(len(l) == 3)
-                check(l[0] == n)
-                check(l[1] == n)
-                check(l[2] == n)
-            n -= x.foo
-        check(len(l) == 3)
-        check(l[0] == 2)
-        check(l[1] == 2)
-        check(l[2] == 2)
-    res = compile_and_run(get_test(main), "hybrid", gcrootfinder="asmgcc",
-                          jit=True)
-    assert int(res) == 20
-
-def test_compile_hybrid_7():
-    # Array of pointers (test the write barrier for setarrayitem_gc)
-    class X:
-        def __init__(self, x):
-            self.x = x
-    myjitdriver = JitDriver(greens = [], reds = ['n', 'x', 'l'])
-    def main(n, x):
-        l = [X(123)]
-        while n > 0:
-            myjitdriver.can_enter_jit(n=n, x=x, l=l)
-            myjitdriver.jit_merge_point(n=n, x=x, l=l)
+            return n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s
+        def after(n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s):
+            check(len(s) == 1*5 + 2*45 + 3*450 + 4*500)
+        return None, f, after
+
+    def test_compile_hybrid_5(self):
+        self.run('compile_hybrid_5')
+
+    def define_compile_hybrid_7(cls):
+        # Array of pointers (test the write barrier for setarrayitem_gc)
+        def before(n, x):
+            return n, x, None, None, None, None, None, None, None, None, [X(123)], None
+        def f(n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s):
             if n < 1900:
                 check(l[0].x == 123)
                 l = [None] * 16
@@ -310,31 +347,26 @@
                 check(l[14].x == n+130)
                 check(l[15].x == n+140)
             n -= x.foo
-        check(len(l) == 16)
-        check(l[0].x == 123)
-        check(l[1].x == 2)
-        check(l[2].x == 12)
-        check(l[3].x == 22)
-        check(l[4].x == 32)
-        check(l[5].x == 42)
-        check(l[6].x == 52)
-        check(l[7].x == 62)
-        check(l[8].x == 72)
-        check(l[9].x == 82)
-        check(l[10].x == 92)
-        check(l[11].x == 102)
-        check(l[12].x == 112)
-        check(l[13].x == 122)
-        check(l[14].x == 132)
-        check(l[15].x == 142)
-
-    class CPU386CollectOnLeave(CPU386):
-
-        def execute_operations(self, loop, verbose=False):
-            op = CPU386.execute_operations(self, loop, verbose)
-            rgc.collect(0)
-            return op            
-        
-    res = compile_and_run(get_test(main), "hybrid", gcrootfinder="asmgcc",
-                          CPUClass=CPU386CollectOnLeave, jit=True)
-    assert int(res) == 20
+            return n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s
+        def after(n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s):
+            check(len(l) == 16)
+            check(l[0].x == 123)
+            check(l[1].x == 2)
+            check(l[2].x == 12)
+            check(l[3].x == 22)
+            check(l[4].x == 32)
+            check(l[5].x == 42)
+            check(l[6].x == 52)
+            check(l[7].x == 62)
+            check(l[8].x == 72)
+            check(l[9].x == 82)
+            check(l[10].x == 92)
+            check(l[11].x == 102)
+            check(l[12].x == 112)
+            check(l[13].x == 122)
+            check(l[14].x == 132)
+            check(l[15].x == 142)
+        return before, f, after
+
+    def test_compile_hybrid_7(self):
+        self.run('compile_hybrid_7')

Modified: pypy/branch/inline-fastpath-malloc/pypy/jit/backend/x86/test/test_zrpy_loop.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/jit/backend/x86/test/test_zrpy_loop.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/jit/backend/x86/test/test_zrpy_loop.py	Tue Oct 13 16:57:33 2009
@@ -5,6 +5,7 @@
 class TestLoop(Jit386Mixin, LoopTest):
     # for the individual tests see
     # ====> ../../../metainterp/test/test_loop.py
+    slow = True
 
     def test_interp_many_paths(self):
         py.test.skip('not supported: pointer as argument')

Modified: pypy/branch/inline-fastpath-malloc/pypy/jit/backend/x86/test/test_zrpy_recursive.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/jit/backend/x86/test/test_zrpy_recursive.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/jit/backend/x86/test/test_zrpy_recursive.py	Tue Oct 13 16:57:33 2009
@@ -4,6 +4,7 @@
 from pypy.jit.backend.x86.test.test_zrpy_slist import Jit386Mixin
 
 class TestRecursive(Jit386Mixin, RecursiveTests):
+    slow = True
 
     def test_inline_faulty_can_inline(self):
         py.test.skip("this test is not supposed to be translated")

Modified: pypy/branch/inline-fastpath-malloc/pypy/jit/backend/x86/test/test_zrpy_slist.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/jit/backend/x86/test/test_zrpy_slist.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/jit/backend/x86/test/test_zrpy_slist.py	Tue Oct 13 16:57:33 2009
@@ -10,5 +10,4 @@
 class TestSList(Jit386Mixin, ListTests):
     # for the individual tests see
     # ====> ../../../test/test_slist.py
-    pass
-
+    slow = True

Modified: pypy/branch/inline-fastpath-malloc/pypy/jit/backend/x86/test/test_zrpy_virtualizable.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/jit/backend/x86/test/test_zrpy_virtualizable.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/jit/backend/x86/test/test_zrpy_virtualizable.py	Tue Oct 13 16:57:33 2009
@@ -6,4 +6,4 @@
 
 class TestLLImplicitVirtualizable(Jit386Mixin,
                        test_virtualizable.ImplicitVirtualizableTests):
-    pass
+    slow = True

Modified: pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/compile.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/compile.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/compile.py	Tue Oct 13 16:57:33 2009
@@ -59,7 +59,7 @@
     metainterp_sd = metainterp.staticdata
     try:
         old_loop_token = metainterp_sd.state.optimize_loop(
-            metainterp_sd, old_loop_tokens, loop, metainterp.cpu)
+            metainterp_sd, old_loop_tokens, loop)
     except InvalidLoop:
         return None
     if old_loop_token is not None:
@@ -249,8 +249,7 @@
     try:
         target_loop_token = metainterp_sd.state.optimize_bridge(metainterp_sd,
                                                                 old_loop_tokens,
-                                                                new_loop,
-                                                                metainterp.cpu)
+                                                                new_loop)
     except InvalidLoop:
         assert 0, "InvalidLoop in optimize_bridge?"
         return None

Modified: pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/executor.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/executor.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/executor.py	Tue Oct 13 16:57:33 2009
@@ -235,7 +235,9 @@
     return ConstInt(box1.getfloat() >= box2.getfloat())
 
 def do_cast_float_to_int(cpu, box1):
-    return ConstInt(int(box1.getfloat()))
+    # note: we need to call int() twice to care for the fact that
+    # int(-2147483648.0) returns a long :-(
+    return ConstInt(int(int(box1.getfloat())))
 
 def do_cast_int_to_float(cpu, box1):
     return ConstFloat(float(box1.getint()))

Modified: pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/jitprof.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/jitprof.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/jitprof.py	Tue Oct 13 16:57:33 2009
@@ -2,6 +2,7 @@
 """ A small helper module for profiling JIT
 """
 
+import os
 import time
 from pypy.rlib.debug import debug_print
 
@@ -14,6 +15,11 @@
 RECORDED_OPS
 BLACKHOLED_OPS
 GUARDS
+OPT_OPS
+OPT_GUARDS
+OPT_FORCINGS
+ABORT_TOO_LONG
+ABORT_BRIDGE
 """
 
 def _setup():
@@ -143,25 +149,37 @@
         cnt = self.counters
         tim = self.times
         calls = self.calls
-        lines = ("Tracing:    \t%d\t%f\n" % (cnt[TRACING],   tim[TRACING]) +
-                 "Backend:    \t%d\t%f\n" % (cnt[BACKEND],   tim[BACKEND]) +
-                 "Running asm:\t%d\t%f\n" % (cnt[RUNNING],   tim[RUNNING]) +
-                 "Blackhole:  \t%d\t%f\n" % (cnt[BLACKHOLE], tim[BLACKHOLE]) +
-                 "TOTAL:      \t\t%f\n" % (self.tk - self.starttime) + 
-                 "ops:           \t%d\n" % cnt[OPS] +
-                 "  calls:       \t%d\n" % calls[0][0] +
-                 "  pure calls:  \t%d\n" % calls[0][1] +                 
-                 "recorded ops:  \t%d\n" % cnt[RECORDED_OPS] +
-                 "  calls:       \t%d\n" % calls[1][0] +
-                 "  pure calls:  \t%d\n" % calls[1][1] +                 
-                 "guards:        \t%d\n" % cnt[GUARDS] +                  
-                 "blackholed ops:\t%d\n" % cnt[BLACKHOLED_OPS] +
-                 "  calls:       \t%d\n" % calls[2][0] +
-                 "  pure calls:  \t%d\n" % calls[2][1]
-                 )
-        import os
-        os.write(2, lines)
-
+        self._print_line_time("Tracing", cnt[TRACING],   tim[TRACING])
+        self._print_line_time("Backend", cnt[BACKEND],   tim[BACKEND])
+        self._print_line_time("Running asm", cnt[RUNNING],   tim[RUNNING])
+        self._print_line_time("Blackhole", cnt[BLACKHOLE], tim[BLACKHOLE])
+        line = "TOTAL:      \t\t%f\n" % (self.tk - self.starttime, )
+        os.write(2, line)
+        self._print_intline("ops", cnt[OPS])
+        self._print_intline("  calls", calls[0][0])
+        self._print_intline("  pure calls", calls[0][1])
+        self._print_intline("recorded ops", cnt[RECORDED_OPS])
+        self._print_intline("  calls", calls[1][0])
+        self._print_intline("  pure calls", calls[1][1])
+        self._print_intline("guards", cnt[GUARDS])
+        self._print_intline("blackholed ops", calls[2][0])
+        self._print_intline("  pure calls", calls[2][1])
+        self._print_intline("opt ops", cnt[OPT_OPS])
+        self._print_intline("opt guards", cnt[OPT_GUARDS])
+        self._print_intline("forcings", cnt[OPT_FORCINGS])
+        self._print_intline("trace too long", cnt[ABORT_TOO_LONG])
+        self._print_intline("bridge abort", cnt[ABORT_BRIDGE])
+
+    def _print_line_time(self, string, i, tim):
+        final = "%s:%s\t%d\t%f\n" % (string, " " * max(0, 13-len(string)), i, tim)
+        os.write(2, final)
+
+    def _print_intline(self, string, i):
+        final = string + ':' + " " * max(0, 16-len(string))
+        final += '\t' + str(i) + '\n'
+        os.write(2, final)
+        
+        
 
 class BrokenProfilerData(Exception):
     pass

Modified: pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/optimize.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/optimize.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/optimize.py	Tue Oct 13 16:57:33 2009
@@ -4,14 +4,15 @@
 from pypy.jit.metainterp.optimizeopt import optimize_loop_1
 from pypy.jit.metainterp.specnode import equals_specnodes
 
-def optimize_loop(metainterp_sd, old_loop_tokens, loop, cpu):
+def optimize_loop(metainterp_sd, old_loop_tokens, loop):
+    cpu = metainterp_sd.cpu
     metainterp_sd.logger_noopt.log_loop(loop.inputargs, loop.operations)
     finder = PerfectSpecializationFinder(cpu)
     finder.find_nodes_loop(loop)
     for old_loop_token in old_loop_tokens:
         if equals_specnodes(old_loop_token.specnodes, loop.token.specnodes):
             return old_loop_token
-    optimize_loop_1(cpu, loop)
+    optimize_loop_1(metainterp_sd, loop)
     return None
 
 # ____________________________________________________________
@@ -19,14 +20,15 @@
 from pypy.jit.metainterp.optimizefindnode import BridgeSpecializationFinder
 from pypy.jit.metainterp.optimizeopt import optimize_bridge_1
 
-def optimize_bridge(metainterp_sd, old_loop_tokens, bridge, cpu):
+def optimize_bridge(metainterp_sd, old_loop_tokens, bridge):
+    cpu = metainterp_sd.cpu    
     metainterp_sd.logger_noopt.log_loop(bridge.inputargs, bridge.operations)
     finder = BridgeSpecializationFinder(cpu)
     finder.find_nodes_bridge(bridge)
     for old_loop_token in old_loop_tokens:
         if finder.bridge_matches(old_loop_token.specnodes):
             bridge.operations[-1].descr = old_loop_token   # patch jump target
-            optimize_bridge_1(cpu, bridge)
+            optimize_bridge_1(metainterp_sd, bridge)
             return old_loop_token
     return None
 

Modified: pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/optimizeopt.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/optimizeopt.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/optimizeopt.py	Tue Oct 13 16:57:33 2009
@@ -2,6 +2,7 @@
      ConstFloat
 from pypy.jit.metainterp.history import Const, ConstInt, ConstPtr, ConstObj, REF
 from pypy.jit.metainterp.resoperation import rop, ResOperation
+from pypy.jit.metainterp.jitprof import OPT_OPS, OPT_GUARDS, OPT_FORCINGS
 from pypy.jit.metainterp.executor import execute_nonspec
 from pypy.jit.metainterp.specnode import SpecNode, NotSpecNode, ConstantSpecNode
 from pypy.jit.metainterp.specnode import AbstractVirtualStructSpecNode
@@ -15,21 +16,21 @@
 from pypy.rlib.objectmodel import we_are_translated
 from pypy.rpython.lltypesystem import lltype
 
-def optimize_loop_1(cpu, loop):
+def optimize_loop_1(metainterp_sd, loop):
     """Optimize loop.operations to make it match the input of loop.specnodes
     and to remove internal overheadish operations.  Note that loop.specnodes
     must be applicable to the loop; you will probably get an AssertionError
     if not.
     """
-    optimizer = Optimizer(cpu, loop)
+    optimizer = Optimizer(metainterp_sd, loop)
     optimizer.setup_virtuals_and_constants()
     optimizer.propagate_forward()
 
-def optimize_bridge_1(cpu, bridge):
+def optimize_bridge_1(metainterp_sd, bridge):
     """The same, but for a bridge.  The only difference is that we don't
     expect 'specnodes' on the bridge.
     """
-    optimizer = Optimizer(cpu, bridge)
+    optimizer = Optimizer(metainterp_sd, bridge)
     optimizer.propagate_forward()
 
 # ____________________________________________________________
@@ -362,15 +363,17 @@
 
 class Optimizer(object):
 
-    def __init__(self, cpu, loop):
-        self.cpu = cpu
+    def __init__(self, metainterp_sd, loop):
+        self.metainterp_sd = metainterp_sd
+        self.cpu = metainterp_sd.cpu
         self.loop = loop
         self.values = {}
         self.interned_refs = {}
-        self.resumedata_memo = resume.ResumeDataLoopMemo(cpu)
+        self.resumedata_memo = resume.ResumeDataLoopMemo(self.cpu)
         self.heap_op_optimizer = HeapOpOptimizer(self)
 
     def forget_numberings(self, virtualbox):
+        self.metainterp_sd.profiler.count(OPT_FORCINGS)
         self.resumedata_memo.forget_numberings(virtualbox)
 
     def getinterned(self, box):
@@ -513,7 +516,9 @@
                         op = op.clone()
                         must_clone = False
                     op.args[i] = box
+        self.metainterp_sd.profiler.count(OPT_OPS)
         if op.is_guard():
+            self.metainterp_sd.profiler.count(OPT_GUARDS)
             self.store_final_boxes_in_guard(op)
         elif op.can_raise():
             self.exception_might_have_happened = True

Modified: pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/policy.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/policy.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/policy.py	Tue Oct 13 16:57:33 2009
@@ -63,7 +63,7 @@
         return None
 
     def _graphs_of_all_instantiate(self, rtyper):
-        for vtable in rtyper.lltype_to_vtable_mapping().itervalues():
+        for vtable in rtyper.lltype2vtable.values():
             if vtable.instantiate:
                 yield vtable.instantiate._obj.graph
 

Modified: pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/pyjitpl.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/pyjitpl.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/pyjitpl.py	Tue Oct 13 16:57:33 2009
@@ -12,6 +12,7 @@
 from pypy.jit.metainterp.logger import Logger
 from pypy.jit.metainterp.jitprof import BLACKHOLED_OPS, EmptyProfiler
 from pypy.jit.metainterp.jitprof import GUARDS, RECORDED_OPS
+from pypy.jit.metainterp.jitprof import ABORT_TOO_LONG, ABORT_BRIDGE
 from pypy.rlib.rarithmetic import intmask
 from pypy.rlib.objectmodel import specialize
 from pypy.rlib.jit import DEBUG_OFF, DEBUG_PROFILE, DEBUG_STEPS, DEBUG_DETAILED
@@ -803,6 +804,7 @@
                 try:
                     self.metainterp.reached_can_enter_jit(self.env)
                 except GiveUp:
+                    self.metainterp.staticdata.profiler.count(ABORT_BRIDGE)
                     self.metainterp.switch_to_blackhole()
         if self.metainterp.is_blackholing():
             self.blackhole_reached_merge_point(self.env)
@@ -1315,6 +1317,7 @@
         if not self.is_blackholing():
             warmrunnerstate = self.staticdata.state
             if len(self.history.operations) > warmrunnerstate.trace_limit:
+                self.staticdata.profiler.count(ABORT_TOO_LONG)
                 self.switch_to_blackhole()
 
     def _interpret(self):

Modified: pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/simple_optimize.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/simple_optimize.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/simple_optimize.py	Tue Oct 13 16:57:33 2009
@@ -7,7 +7,7 @@
 
 EMPTY_VALUES = {}
 
-def optimize_loop(metainterp_sd, old_loops, loop, cpu=None):
+def optimize_loop(metainterp_sd, old_loops, loop):
     if old_loops:
         assert len(old_loops) == 1
         return old_loops[0]
@@ -16,7 +16,7 @@
         # we need it since the backend can modify those lists, which make
         # get_guard_op in compile.py invalid
         # in fact, x86 modifies this list for moving GCs
-        memo = resume.ResumeDataLoopMemo(cpu)
+        memo = resume.ResumeDataLoopMemo(metainterp_sd.cpu)
         newoperations = []
         for op in loop.operations:
             if op.is_guard():
@@ -29,6 +29,6 @@
         loop.operations = newoperations
         return None
 
-def optimize_bridge(metainterp_sd, old_loops, loop, cpu=None):
-    optimize_loop(metainterp_sd, [], loop, cpu)
+def optimize_bridge(metainterp_sd, old_loops, loop):
+    optimize_loop(metainterp_sd, [], loop)
     return old_loops[0]

Modified: pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/test/test_basic.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/test/test_basic.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/test/test_basic.py	Tue Oct 13 16:57:33 2009
@@ -493,6 +493,25 @@
         res = self.meta_interp(f, [-5])
         assert res == 5+4+3+2+1+0+1+2+3+4+5+6+7+8+9
 
+    def test_float(self):
+        myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res'])
+        def f(x, y):
+            x = float(x)
+            y = float(y)
+            res = 0.0
+            while y > 0.0:
+                myjitdriver.can_enter_jit(x=x, y=y, res=res)
+                myjitdriver.jit_merge_point(x=x, y=y, res=res)
+                res += x
+                y -= 1.0
+            return res
+        res = self.meta_interp(f, [6, 7])
+        assert res == 42.0
+        self.check_loop_count(1)
+        self.check_loops({'guard_true': 1,
+                          'float_add': 1, 'float_sub': 1, 'float_gt': 1,
+                          'jump': 1})
+
     def test_print(self):
         myjitdriver = JitDriver(greens = [], reds = ['n'])
         def f(n):
@@ -926,6 +945,32 @@
         res = self.interp_operations(f, [-10])
         assert res == 456 * 2
 
+    def test_residual_external_call(self):
+        class CustomPolicy(JitPolicy):
+            def look_inside_function(self, func):
+                mod = func.__module__ or '?'
+                if mod == 'pypy.rpython.lltypesystem.module.ll_math':
+                    # XXX temporary, contains force_cast
+                    return False
+                return super(CustomPolicy, self).look_inside_function(func)
+
+        import math
+        myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res'])
+        def f(x, y):
+            x = float(x)
+            res = 0
+            while y > 0:
+                myjitdriver.can_enter_jit(x=x, y=y, res=res)
+                myjitdriver.jit_merge_point(x=x, y=y, res=res)
+                rpart, ipart = math.modf(x)
+                res += ipart
+                y -= 1
+            return res
+        res = self.meta_interp(f, [6, 7], policy=CustomPolicy())
+        assert res == 42
+        self.check_loop_count(1)
+
+
 class TestOOtype(BasicTests, OOJitMixin):
 
     def test_oohash(self):
@@ -1012,6 +1057,7 @@
 
 
 
+
 class BaseLLtypeTests(BasicTests):
 
     def test_oops_on_nongc(self):

Modified: pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/test/test_jitprof.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/test/test_jitprof.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/test/test_jitprof.py	Tue Oct 13 16:57:33 2009
@@ -55,7 +55,7 @@
             ]
         assert profiler.events == expected
         assert profiler.times == [2, 1, 1, 1]
-        assert profiler.counters == [1, 1, 1, 1, 4, 3, 1, 1]
+        assert profiler.counters == [1, 1, 1, 1, 4, 3, 1, 1, 7, 1, 0, 0, 0]
 
     def test_simple_loop_with_call(self):
         @dont_look_inside

Modified: pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/test/test_optimizeopt.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/test/test_optimizeopt.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/test/test_optimizeopt.py	Tue Oct 13 16:57:33 2009
@@ -10,6 +10,7 @@
 from pypy.jit.metainterp.optimizeopt import optimize_loop_1
 from pypy.jit.metainterp.optimizeutil import InvalidLoop
 from pypy.jit.metainterp.history import AbstractDescr, ConstInt, BoxInt
+from pypy.jit.metainterp.jitprof import EmptyProfiler
 from pypy.jit.metainterp import executor, compile, resume
 from pypy.jit.metainterp.resoperation import rop, opname, ResOperation
 from pypy.jit.metainterp.test.oparser import pure_parse
@@ -22,13 +23,19 @@
         self.jitcode = code
         self.pc = pc
         self.exception_target = exc_target
+
+class FakeMetaInterpStaticData(object):
+
+    def __init__(self, cpu):
+        self.cpu = cpu
+        self.profiler = EmptyProfiler()
     
 def test_store_final_boxes_in_guard():
     from pypy.jit.metainterp.compile import ResumeGuardDescr
     from pypy.jit.metainterp.resume import tag, TAGBOX
     b0 = BoxInt()
     b1 = BoxInt()
-    opt = optimizeopt.Optimizer(None, None)
+    opt = optimizeopt.Optimizer(FakeMetaInterpStaticData(None), None)
     fdescr = ResumeGuardDescr(None)
     op = ResOperation(rop.GUARD_TRUE, [], None, descr=fdescr)
     # setup rd data
@@ -179,7 +186,7 @@
             loop.token.specnodes = self.unpack_specnodes(spectext)
         #
         self.loop = loop
-        optimize_loop_1(self.cpu, loop)
+        optimize_loop_1(FakeMetaInterpStaticData(self.cpu), loop)
         #
         expected = self.parse(optops)
         self.assert_equal(loop, expected)

Modified: pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/test/test_tl.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/test/test_tl.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/test/test_tl.py	Tue Oct 13 16:57:33 2009
@@ -17,7 +17,7 @@
         res = self.meta_interp(main, [1, 10])
         assert res == 100
 
-    def setup_class(cls):
+    def _get_main(self):
         from pypy.jit.tl.tl import interp_without_call
         from pypy.jit.tl.tlopcode import compile
 
@@ -64,12 +64,13 @@
         def main(n, inputarg):
             code = codes[n]
             return interp_without_call(code, inputarg=inputarg)
-        cls.main = main
+        return main
 
     def test_tl_base(self):
         # 'backendopt=True' is used on lltype to kill unneeded access
         # to the class, which generates spurious 'guard_class'
-        res = self.meta_interp(self.main.im_func, [0, 6], listops=True,
+        main = self._get_main()
+        res = self.meta_interp(main, [0, 6], listops=True,
                                backendopt=True)
         assert res == 5040
         self.check_loops({'int_mul':1, 'jump':1,
@@ -77,9 +78,10 @@
                           'guard_false':1})
 
     def test_tl_2(self):
-        res = self.meta_interp(self.main.im_func, [1, 10], listops=True,
+        main = self._get_main()
+        res = self.meta_interp(main, [1, 10], listops=True,
                                backendopt=True)
-        assert res == self.main.im_func(1, 10)
+        assert res == main(1, 10)
         self.check_loops({'int_sub':1, 'int_le':1,
                          'int_is_true':1, 'guard_false':1, 'jump':1})
 

Modified: pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/warmspot.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/warmspot.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/jit/metainterp/warmspot.py	Tue Oct 13 16:57:33 2009
@@ -23,6 +23,7 @@
 from pypy.jit.metainterp.typesystem import LLTypeHelper, OOTypeHelper
 from pypy.jit.metainterp.jitprof import Profiler, EmptyProfiler
 from pypy.rlib.jit import DEBUG_STEPS, DEBUG_DETAILED, DEBUG_OFF, DEBUG_PROFILE
+from pypy.rlib.nonconst import NonConstant
 
 # ____________________________________________________________
 # Bootstrapping
@@ -795,7 +796,7 @@
             elif optimizer == OPTIMIZER_FULL:
                 from pypy.jit.metainterp import optimize
                 self.optimize_loop = optimize.optimize_loop
-                self.optimize_bridge = optimize.optimize_bridge                
+                self.optimize_bridge = optimize.optimize_bridge
             else:
                 raise ValueError("unknown optimizer")
 
@@ -815,6 +816,11 @@
         # not too bad.
 
         def maybe_compile_and_run(self, *args):
+            if NonConstant(False):
+                # make sure we always see the saner optimizer from an annotation
+                # point of view, otherwise we get lots of blocked ops
+                self.set_param_optimizer(OPTIMIZER_FULL)
+                
             # get the greenargs and look for the cell corresponding to the hash
             greenargs = args[:num_green_args]
             argshash = self.getkeyhash(*greenargs) & self.hashtablemask

Modified: pypy/branch/inline-fastpath-malloc/pypy/lib/grp.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/lib/grp.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/lib/grp.py	Tue Oct 13 16:57:33 2009
@@ -71,10 +71,12 @@
         raise KeyError(gid)
     return _group_from_gstruct(res)
 
-def getgrnam(gid):
-    res = libc.getgrnam(gid)
+def getgrnam(name):
+    if not isinstance(name, str):
+        raise TypeError("expected string")
+    res = libc.getgrnam(name)
     if not res:
-        raise KeyError(gid)
+        raise KeyError(name)
     return _group_from_gstruct(res)
 
 def getgrall():

Modified: pypy/branch/inline-fastpath-malloc/pypy/module/posix/interp_posix.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/module/posix/interp_posix.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/module/posix/interp_posix.py	Tue Oct 13 16:57:33 2009
@@ -737,7 +737,11 @@
 
     Call the system call getpgid().
     """
-    return space.wrap(os.getpgid(pid))
+    try:
+        pgid = os.getpgid(pid)
+    except OSError, e:
+        raise wrap_oserror(space, e)
+    return space.wrap(pgid)
 getpgid.unwrap_spec = [ObjSpace, int]
 
 def setpgid(space, pid, pgrp):

Modified: pypy/branch/inline-fastpath-malloc/pypy/module/posix/test/test_posix2.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/module/posix/test/test_posix2.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/module/posix/test/test_posix2.py	Tue Oct 13 16:57:33 2009
@@ -47,6 +47,8 @@
             cls.w_geteuid = space.wrap(os.geteuid())
         if hasattr(os, 'getgid'):
             cls.w_getgid = space.wrap(os.getgid())
+        if hasattr(os, 'getpgid'):
+            cls.w_getpgid = space.wrap(os.getpgid(os.getpid()))
         if hasattr(os, 'getsid'):
             cls.w_getsid0 = space.wrap(os.getsid(0))
         if hasattr(os, 'sysconf'):
@@ -364,6 +366,12 @@
             os = self.posix
             assert os.getgid() == self.getgid
 
+    if hasattr(os, 'getpgid'):
+        def test_os_getpgid(self):
+            os = self.posix
+            assert os.getpgid(os.getpid()) == self.getpgid
+            raises(OSError, os.getpgid, 1234567)
+
     if hasattr(os, 'setgid'):
         def test_os_setgid_error(self):
             os = self.posix

Modified: pypy/branch/inline-fastpath-malloc/pypy/module/unicodedata/interp_ucd.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/module/unicodedata/interp_ucd.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/module/unicodedata/interp_ucd.py	Tue Oct 13 16:57:33 2009
@@ -147,7 +147,7 @@
 
     def normalize(self, space, form, w_unistr):
         if not space.is_true(space.isinstance(w_unistr, space.w_unicode)):
-            raise TypeError, 'argument 2 must be unicode'
+            raise OperationError(space.w_TypeError, space.wrap('argument 2 must be unicode'))
         if form == 'NFC':
             composed = True
             decomposition = self._canon_decomposition

Modified: pypy/branch/inline-fastpath-malloc/pypy/module/unicodedata/test/test_unicodedata.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/module/unicodedata/test/test_unicodedata.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/module/unicodedata/test/test_unicodedata.py	Tue Oct 13 16:57:33 2009
@@ -78,6 +78,10 @@
         if sys.maxunicode == 65535:
             raises(KeyError, unicodedata.lookup, "GOTHIC LETTER FAIHU")
 
+    def test_normalize(self):
+        import unicodedata
+        raises(TypeError, unicodedata.normalize, 'x')
+
 class TestUnicodeData(object):
     def setup_class(cls):
         import random, unicodedata
@@ -169,3 +173,5 @@
         raises(KeyError, unicodedb_4_1_0.lookup, 'BENZENE RING WITH CIRCLE')
         raises(KeyError, unicodedb_3_2_0.name, 9187)
         raises(KeyError, unicodedb_4_1_0.name, 9187)
+
+

Modified: pypy/branch/inline-fastpath-malloc/pypy/objspace/std/objspace.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/objspace/std/objspace.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/objspace/std/objspace.py	Tue Oct 13 16:57:33 2009
@@ -2,7 +2,6 @@
 from pypy.interpreter.baseobjspace import ObjSpace, Wrappable, UnpackValueError
 from pypy.interpreter.error import OperationError, debug_print
 from pypy.interpreter.typedef import get_unique_interplevel_subclass
-from pypy.interpreter import argument
 from pypy.interpreter import pyframe
 from pypy.interpreter import function
 from pypy.interpreter.pyopcode import unrolling_compare_dispatch_table, \
@@ -173,11 +172,7 @@
                         executioncontext.c_return_trace(f, w_function)
                         return res
                     args = f.make_arguments(nargs)
-                    try:
-                        return f.space.call_args(w_function, args)
-                    finally:
-                        if isinstance(args, argument.ArgumentsFromValuestack):
-                            args.frame = None
+                    return f.space.call_args(w_function, args)
 
             if self.config.objspace.opcodes.CALL_METHOD:
                 # def LOOKUP_METHOD(...):

Modified: pypy/branch/inline-fastpath-malloc/pypy/rlib/nonconst.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/rlib/nonconst.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/rlib/nonconst.py	Tue Oct 13 16:57:33 2009
@@ -17,6 +17,9 @@
     def __setattr__(self, attr, value):
         setattr(self.__dict__['constant'], attr, value)
 
+    def __nonzero__(self):
+        return bool(self.__dict__['constant'])
+
 class EntryNonConstant(ExtRegistryEntry):
     _about_ = NonConstant
     

Modified: pypy/branch/inline-fastpath-malloc/pypy/rlib/test/test_nonconst.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/rlib/test/test_nonconst.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/rlib/test/test_nonconst.py	Tue Oct 13 16:57:33 2009
@@ -47,3 +47,15 @@
     if option.view:
         a.translator.view()
     assert isinstance(s, SomeInstance)
+
+def test_bool_nonconst():
+    def fn():
+        return bool(NonConstant(False))
+    
+    assert not fn()
+    
+    a = RPythonAnnotator()
+    s = a.build_types(fn, [])
+    assert s.knowntype is bool
+    assert not hasattr(s, 'const')
+

Modified: pypy/branch/inline-fastpath-malloc/pypy/rpython/llinterp.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/rpython/llinterp.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/rpython/llinterp.py	Tue Oct 13 16:57:33 2009
@@ -878,7 +878,10 @@
         assert v_ptr.concretetype.TO._gckind == 'gc'
         newaddr = self.getval(v_newaddr)
         p = llmemory.cast_adr_to_ptr(newaddr, v_ptr.concretetype)
-        self.setvar(v_ptr, p)
+        if isinstance(v_ptr, Constant):
+            assert v_ptr.value == p
+        else:
+            self.setvar(v_ptr, p)
     op_gc_reload_possibly_moved.specialform = True
 
     def op_gc_id(self, v_ptr):

Modified: pypy/branch/inline-fastpath-malloc/pypy/rpython/lltypesystem/llarena.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/rpython/lltypesystem/llarena.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/rpython/lltypesystem/llarena.py	Tue Oct 13 16:57:33 2009
@@ -240,12 +240,15 @@
     """A size that is rounded up in order to preserve alignment of objects
     following it.  For arenas containing heterogenous objects.
     """
-    def __init__(self, basesize):
+    def __init__(self, basesize, minsize):
         assert isinstance(basesize, llmemory.AddressOffset)
+        assert isinstance(minsize, llmemory.AddressOffset) or minsize == 0
         self.basesize = basesize
+        self.minsize = minsize
 
     def __repr__(self):
-        return '< RoundedUpForAllocation %r >' % (self.basesize,)
+        return '< RoundedUpForAllocation %r %r >' % (self.basesize,
+                                                     self.minsize)
 
     def known_nonneg(self):
         return self.basesize.known_nonneg()
@@ -282,8 +285,12 @@
 
 def arena_reset(arena_addr, size, zero):
     """Free all objects in the arena, which can then be reused.
-    The arena is filled with zeroes if 'zero' is True.  This can also
-    be used on a subrange of the arena."""
+    This can also be used on a subrange of the arena.
+    The value of 'zero' is:
+      * 0: don't fill the area with zeroes
+      * 1: clear, optimized for a very large area of memory
+      * 2: clear, optimized for a small or medium area of memory
+    """
     arena_addr = _getfakearenaaddress(arena_addr)
     arena_addr.arena.reset(zero, arena_addr.offset, size)
 
@@ -299,10 +306,14 @@
                          % (addr.offset,))
     addr.arena.allocate_object(addr.offset, size)
 
-def round_up_for_allocation(size):
+def round_up_for_allocation(size, minsize=0):
     """Round up the size in order to preserve alignment of objects
-    following an object.  For arenas containing heterogenous objects."""
-    return RoundedUpForAllocation(size)
+    following an object.  For arenas containing heterogenous objects.
+    If minsize is specified, it gives a minimum on the resulting size."""
+    return _round_up_for_allocation(size, minsize)
+
+def _round_up_for_allocation(size, minsize):    # internal
+    return RoundedUpForAllocation(size, minsize)
 
 def arena_new_view(ptr):
     """Return a fresh memory view on an arena
@@ -385,8 +396,11 @@
 
 def llimpl_arena_reset(arena_addr, size, zero):
     if zero:
-        clear_large_memory_chunk(arena_addr, size)
-register_external(arena_reset, [llmemory.Address, int, bool], None,
+        if zero == 1:
+            clear_large_memory_chunk(arena_addr, size)
+        else:
+            llmemory.raw_memclear(arena_addr, size)
+register_external(arena_reset, [llmemory.Address, int, int], None,
                   'll_arena.arena_reset',
                   llimpl=llimpl_arena_reset,
                   llfakeimpl=arena_reset,
@@ -401,10 +415,11 @@
                   sandboxsafe=True)
 
 llimpl_round_up_for_allocation = rffi.llexternal('ROUND_UP_FOR_ALLOCATION',
-                                                 [lltype.Signed], lltype.Signed,
+                                                [lltype.Signed, lltype.Signed],
+                                                 lltype.Signed,
                                                  sandboxsafe=True,
                                                  _nowrapper=True)
-register_external(round_up_for_allocation, [int], int,
+register_external(_round_up_for_allocation, [int, int], int,
                   'll_arena.round_up_for_allocation',
                   llimpl=llimpl_round_up_for_allocation,
                   llfakeimpl=round_up_for_allocation,

Modified: pypy/branch/inline-fastpath-malloc/pypy/rpython/lltypesystem/lloperation.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/rpython/lltypesystem/lloperation.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/rpython/lltypesystem/lloperation.py	Tue Oct 13 16:57:33 2009
@@ -411,6 +411,13 @@
     'cast_adr_to_int':      LLOp(sideeffects=False),
     'cast_int_to_adr':      LLOp(canfold=True),   # not implemented in llinterp
 
+    'get_group_member':     LLOp(canfold=True),
+    'get_next_group_member':LLOp(canfold=True),
+    'is_group_member_nonzero':LLOp(canfold=True),
+    'extract_ushort':       LLOp(canfold=True),
+    'combine_ushort':       LLOp(canfold=True),
+    'gc_gettypeptr_group':  LLOp(canfold=True),
+
     # __________ used by the JIT ________
 
     'jit_marker':           LLOp(),

Modified: pypy/branch/inline-fastpath-malloc/pypy/rpython/lltypesystem/opimpl.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/rpython/lltypesystem/opimpl.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/rpython/lltypesystem/opimpl.py	Tue Oct 13 16:57:33 2009
@@ -171,10 +171,33 @@
     return not b
 
 def op_int_add(x, y):
-    assert isinstance(x, (int, llmemory.AddressOffset))
+    if not isinstance(x, (int, llmemory.AddressOffset)):
+        from pypy.rpython.lltypesystem import llgroup
+        assert isinstance(x, llgroup.CombinedSymbolic)
     assert isinstance(y, (int, llmemory.AddressOffset))
     return intmask(x + y)
 
+def op_int_sub(x, y):
+    if not isinstance(x, int):
+        from pypy.rpython.lltypesystem import llgroup
+        assert isinstance(x, llgroup.CombinedSymbolic)
+    assert isinstance(y, int)
+    return intmask(x - y)
+
+def op_int_and(x, y):
+    if not isinstance(x, int):
+        from pypy.rpython.lltypesystem import llgroup
+        assert isinstance(x, llgroup.CombinedSymbolic)
+    assert isinstance(y, int)
+    return x & y
+
+def op_int_or(x, y):
+    if not isinstance(x, int):
+        from pypy.rpython.lltypesystem import llgroup
+        assert isinstance(x, llgroup.CombinedSymbolic)
+    assert isinstance(y, int)
+    return x | y
+
 def op_int_mul(x, y):
     assert isinstance(x, (int, llmemory.AddressOffset))
     assert isinstance(y, (int, llmemory.AddressOffset))
@@ -388,6 +411,50 @@
 def op_promote_virtualizable(object, fieldname, flags):
     pass # XXX should do something
 
+def op_get_group_member(TYPE, grpptr, memberoffset):
+    from pypy.rpython.lltypesystem import llgroup
+    assert isinstance(memberoffset, llgroup.GroupMemberOffset)
+    member = memberoffset._get_group_member(grpptr)
+    return lltype.cast_pointer(TYPE, member)
+op_get_group_member.need_result_type = True
+
+def op_get_next_group_member(TYPE, grpptr, memberoffset, skipoffset):
+    from pypy.rpython.lltypesystem import llgroup
+    assert isinstance(memberoffset, llgroup.GroupMemberOffset)
+    member = memberoffset._get_next_group_member(grpptr, skipoffset)
+    return lltype.cast_pointer(TYPE, member)
+op_get_next_group_member.need_result_type = True
+
+def op_is_group_member_nonzero(memberoffset):
+    from pypy.rpython.lltypesystem import llgroup
+    if isinstance(memberoffset, llgroup.GroupMemberOffset):
+        return memberoffset.index != 0
+    else:
+        assert isinstance(memberoffset, int)
+        return memberoffset != 0
+
+def op_extract_ushort(combinedoffset):
+    from pypy.rpython.lltypesystem import llgroup
+    assert isinstance(combinedoffset, llgroup.CombinedSymbolic)
+    return combinedoffset.lowpart
+
+def op_combine_ushort(ushort, rest):
+    from pypy.rpython.lltypesystem import llgroup
+    return llgroup.CombinedSymbolic(ushort, rest)
+
+def op_gc_gettypeptr_group(TYPE, obj, grpptr, skipoffset, vtableinfo):
+    HDR            = vtableinfo[0]
+    size_gc_header = vtableinfo[1]
+    fieldname      = vtableinfo[2]
+    objaddr = llmemory.cast_ptr_to_adr(obj)
+    hdraddr = objaddr - size_gc_header
+    hdr = llmemory.cast_adr_to_ptr(hdraddr, lltype.Ptr(HDR))
+    typeid = getattr(hdr, fieldname)
+    if lltype.typeOf(typeid) == lltype.Signed:
+        typeid = op_extract_ushort(typeid)
+    return op_get_next_group_member(TYPE, grpptr, typeid, skipoffset)
+op_gc_gettypeptr_group.need_result_type = True
+
 # ____________________________________________________________
 
 def get_op_impl(opname):

Modified: pypy/branch/inline-fastpath-malloc/pypy/rpython/lltypesystem/rclass.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/rpython/lltypesystem/rclass.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/rpython/lltypesystem/rclass.py	Tue Oct 13 16:57:33 2009
@@ -389,6 +389,7 @@
                                                   OBJECT, destrptr)
             vtable = self.rclass.getvtable()
             self.rtyper.type_for_typeptr[vtable._obj] = self.lowleveltype.TO
+            self.rtyper.lltype2vtable[self.lowleveltype.TO] = vtable
 
     def common_repr(self): # -> object or nongcobject reprs
         return getinstancerepr(self.rtyper, None, self.gcflavor)

Modified: pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/gc/base.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/gc/base.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/gc/base.py	Tue Oct 13 16:57:33 2009
@@ -13,6 +13,7 @@
     malloc_zero_filled = False
     prebuilt_gc_objects_are_static_roots = True
     can_realloc = False
+    object_minimal_size = 0
 
     def __init__(self, config, chunk_size=DEFAULT_CHUNK_SIZE):
         self.gcheaderbuilder = GCHeaderBuilder(self.HDR)

Modified: pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/gc/generation.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/gc/generation.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/gc/generation.py	Tue Oct 13 16:57:33 2009
@@ -334,7 +334,7 @@
             if self.young_objects_with_id.length() > 0:
                 self.update_young_objects_with_id()
             # mark the nursery as free and fill it with zeroes again
-            llarena.arena_reset(self.nursery, self.nursery_size, True)
+            llarena.arena_reset(self.nursery, self.nursery_size, 2)
             if self.config.gcconfig.debugprint:
                 llop.debug_print(lltype.Void,
                                  "survived (fraction of the size):",

Modified: pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/gc/hybrid.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/gc/hybrid.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/gc/hybrid.py	Tue Oct 13 16:57:33 2009
@@ -222,7 +222,6 @@
     def realloc(self, ptr, newlength, fixedsize, itemsize, lengthofs, grow):
         size_gc_header = self.size_gc_header()
         addr = llmemory.cast_ptr_to_adr(ptr)
-        tid = self.get_type_id(addr)
         nonvarsize = size_gc_header + fixedsize
         try:
             varsize = ovfcheck(itemsize * newlength)
@@ -375,7 +374,7 @@
         hdr = self.header(obj)
         if hdr.tid & GCFLAG_UNVISITED:
             # This is a not-visited-yet raw_malloced object.
-            hdr.tid -= GCFLAG_UNVISITED
+            hdr.tid &= ~GCFLAG_UNVISITED
             self.rawmalloced_objects_to_trace.append(obj)
 
     def make_a_copy(self, obj, objsize):

Modified: pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/gc/markcompact.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/gc/markcompact.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/gc/markcompact.py	Tue Oct 13 16:57:33 2009
@@ -1,7 +1,7 @@
 
 import time
 
-from pypy.rpython.lltypesystem import lltype, llmemory, llarena
+from pypy.rpython.lltypesystem import lltype, llmemory, llarena, llgroup
 from pypy.rpython.memory.gc.base import MovingGCBase
 from pypy.rlib.debug import ll_assert
 from pypy.rpython.memory.support import DEFAULT_CHUNK_SIZE
@@ -12,11 +12,10 @@
 from pypy.rpython.lltypesystem.lloperation import llop
 from pypy.rlib.objectmodel import we_are_translated
 from pypy.rpython.lltypesystem import rffi
+from pypy.rpython.memory.gcheader import GCHeaderBuilder
 
-TYPEID_MASK = 0xffff0000
-first_gcflag = 2
+first_gcflag = 1 << 16
 GCFLAG_MARKBIT = first_gcflag << 0
-GCFLAG_EXTERNAL = first_gcflag << 1
 
 memoryError = MemoryError()
 
@@ -68,8 +67,10 @@
 TID_TYPE = rffi.USHORT
 BYTES_PER_TID = rffi.sizeof(TID_TYPE)
 
+
 class MarkCompactGC(MovingGCBase):
-    HDR = lltype.Struct('header', ('forward_ptr', llmemory.Address))
+    HDR = lltype.Struct('header', ('tid', lltype.Signed))
+    typeid_is_in_field = 'tid'
     TID_BACKUP = lltype.Array(TID_TYPE, hints={'nolength':True})
     WEAKREF_OFFSETS = lltype.Array(lltype.Signed)
 
@@ -79,7 +80,7 @@
     malloc_zero_filled = True
     inline_simple_malloc = True
     inline_simple_malloc_varsize = True
-    first_unused_gcflag = first_gcflag << 2
+    first_unused_gcflag = first_gcflag << 1
     total_collection_time = 0.0
     total_collection_count = 0
 
@@ -100,21 +101,18 @@
         self.objects_with_weakrefs = self.AddressStack()
         self.tid_backup = lltype.nullptr(self.TID_BACKUP)
 
-    # flags = 1 to make lltype & llmemory happy about odd/even pointers
-
-    def init_gc_object(self, addr, typeid, flags=1):
+    def init_gc_object(self, addr, typeid16, flags=0):
         hdr = llmemory.cast_adr_to_ptr(addr, lltype.Ptr(self.HDR))
-        hdr.forward_ptr = llmemory.cast_int_to_adr((typeid << 16) | flags)
+        hdr.tid = self.combine(typeid16, flags)
 
-    def init_gc_object_immortal(self, addr, typeid, flags=1):
+    def init_gc_object_immortal(self, addr, typeid16, flags=0):
         hdr = llmemory.cast_adr_to_ptr(addr, lltype.Ptr(self.HDR))
-        flags |= GCFLAG_EXTERNAL
-        hdr.forward_ptr = llmemory.cast_int_to_adr((typeid << 16) | flags)
+        hdr.tid = self.combine(typeid16, flags)
         # XXX we can store forward_ptr to itself, if we fix C backend
         # so that get_forwarding_address(obj) returns
         # obj itself if obj is a prebuilt object
 
-    def malloc_fixedsize_clear(self, typeid, size, can_collect,
+    def malloc_fixedsize_clear(self, typeid16, size, can_collect,
                                has_finalizer=False, contains_weakptr=False):
         size_gc_header = self.gcheaderbuilder.size_gc_header
         totalsize = size_gc_header + size
@@ -122,7 +120,7 @@
         if raw_malloc_usage(totalsize) > self.top_of_space - result:
             result = self.obtain_free_space(totalsize)
         llarena.arena_reserve(result, totalsize)
-        self.init_gc_object(result, typeid)
+        self.init_gc_object(result, typeid16)
         self.free += totalsize
         if has_finalizer:
             self.objects_with_finalizers.append(result + size_gc_header)
@@ -130,7 +128,7 @@
             self.objects_with_weakrefs.append(result + size_gc_header)
         return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
     
-    def malloc_varsize_clear(self, typeid, length, size, itemsize,
+    def malloc_varsize_clear(self, typeid16, length, size, itemsize,
                              offset_to_length, can_collect):
         size_gc_header = self.gcheaderbuilder.size_gc_header
         nonvarsize = size_gc_header + size
@@ -143,7 +141,7 @@
         if raw_malloc_usage(totalsize) > self.top_of_space - result:
             result = self.obtain_free_space(totalsize)
         llarena.arena_reserve(result, totalsize)
-        self.init_gc_object(result, typeid)
+        self.init_gc_object(result, typeid16)
         (result + size_gc_header + offset_to_length).signed[0] = length
         self.free = result + llarena.round_up_for_allocation(totalsize)
         return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
@@ -319,11 +317,26 @@
         self.objects_with_finalizers.delete()
         self.objects_with_finalizers = objects_with_finalizers
 
-    def get_tid(self, addr):
-        return llmemory.cast_adr_to_int(self.header(addr).forward_ptr)
+    def header(self, addr):
+        # like header(), but asserts that we have a normal header
+        hdr = MovingGCBase.header(self, addr)
+        if not we_are_translated():
+            assert isinstance(hdr.tid, llgroup.CombinedSymbolic)
+        return hdr
+
+    def header_forwarded(self, addr):
+        # like header(), but asserts that we have a forwarding header
+        hdr = MovingGCBase.header(self, addr)
+        if not we_are_translated():
+            assert isinstance(hdr.tid, int)
+        return hdr
+
+    def combine(self, typeid16, flags):
+        return llop.combine_ushort(lltype.Signed, typeid16, flags)
 
     def get_type_id(self, addr):
-        return self.get_tid(addr) >> 16
+        tid = self.header(addr).tid
+        return llop.extract_ushort(rffi.USHORT, tid)
 
     def mark_roots_recursively(self):
         self.root_walker.walk_roots(
@@ -350,13 +363,13 @@
         self.to_see.append(root.address[0])
 
     def mark(self, obj):
-        previous = self.get_tid(obj)
-        self.header(obj).forward_ptr = llmemory.cast_int_to_adr(previous | GCFLAG_MARKBIT)
+        self.header(obj).tid |= GCFLAG_MARKBIT
 
     def marked(self, obj):
-        return self.get_tid(obj) & GCFLAG_MARKBIT
+        return self.header(obj).tid & GCFLAG_MARKBIT
 
     def update_forward_pointers(self, toaddr, num_of_alive_objs):
+        self.base_forwarding_addr = toaddr
         fromaddr = self.space
         size_gc_header = self.gcheaderbuilder.size_gc_header
         i = 0
@@ -366,8 +379,7 @@
             objsize = self.get_size(obj)
             totalsize = size_gc_header + objsize
             if not self.marked(obj):
-                self.set_forwarding_address(obj, NULL, i)
-                hdr.forward_ptr = NULL
+                self.set_null_forwarding_address(obj, i)
             else:
                 llarena.arena_reserve(toaddr, totalsize)
                 self.set_forwarding_address(obj, toaddr, i)
@@ -438,30 +450,44 @@
         if pointer.address[0] != NULL:
             pointer.address[0] = self.get_forwarding_address(pointer.address[0])
 
-    def is_forwarded(self, addr):
-        return self.header(addr).forward_ptr != NULL
-
     def _is_external(self, obj):
-        tid = self.get_tid(obj)
-        return (tid & 1) and (tid & GCFLAG_EXTERNAL)
+        return not (self.space <= obj < self.top_of_space)
 
     def get_forwarding_address(self, obj):
         if self._is_external(obj):
             return obj
-        return self.header(obj).forward_ptr + self.size_gc_header()
+        return self.get_header_forwarded_addr(obj)
 
-    def set_forwarding_address(self, obj, newaddr, num):
+    def set_null_forwarding_address(self, obj, num):
         self.backup_typeid(num, obj)
-        self.header(obj).forward_ptr = newaddr
+        hdr = self.header(obj)
+        hdr.tid = -1          # make the object forwarded to NULL
+
+    def set_forwarding_address(self, obj, newobjhdr, num):
+        self.backup_typeid(num, obj)
+        forward_offset = newobjhdr - self.base_forwarding_addr
+        hdr = self.header(obj)
+        hdr.tid = forward_offset     # make the object forwarded to newobj
+
+    def restore_normal_header(self, obj, num):
+        # Reverse of set_forwarding_address().
+        typeid16 = self.get_typeid_from_backup(num)
+        hdr = self.header_forwarded(obj)
+        hdr.tid = self.combine(typeid16, 0)      # restore the normal header
+
+    def get_header_forwarded_addr(self, obj):
+        return (self.base_forwarding_addr +
+                self.header_forwarded(obj).tid +
+                self.gcheaderbuilder.size_gc_header)
 
     def surviving(self, obj):
-        return self._is_external(obj) or self.header(obj).forward_ptr != NULL
+        return self._is_external(obj) or self.header_forwarded(obj).tid != -1
 
     def backup_typeid(self, num, obj):
-        self.tid_backup[num] = rffi.cast(rffi.USHORT, self.get_type_id(obj))
+        self.tid_backup[num] = self.get_type_id(obj)
 
     def get_typeid_from_backup(self, num):
-        return rffi.cast(lltype.Signed, self.tid_backup[num])
+        return self.tid_backup[num]
 
     def get_size_from_backup(self, obj, num):
         typeid = self.get_typeid_from_backup(num)
@@ -484,7 +510,6 @@
         num = 0
         while fromaddr < self.free:
             obj = fromaddr + size_gc_header
-            hdr = llmemory.cast_adr_to_ptr(fromaddr, lltype.Ptr(self.HDR))
             objsize = self.get_size_from_backup(obj, num)
             totalsize = size_gc_header + objsize
             if not self.surviving(obj): 
@@ -492,16 +517,16 @@
                 # we clear it to make debugging easier
                 llarena.arena_reset(fromaddr, totalsize, False)
             else:
-                ll_assert(self.is_forwarded(obj), "not forwarded, surviving obj")
-                forward_ptr = hdr.forward_ptr
                 if resizing:
                     end = fromaddr
-                val = (self.get_typeid_from_backup(num) << 16) + 1
-                hdr.forward_ptr = llmemory.cast_int_to_adr(val)
-                if fromaddr != forward_ptr:
+                forward_obj = self.get_header_forwarded_addr(obj)
+                self.restore_normal_header(obj, num)
+                if obj != forward_obj:
                     #llop.debug_print(lltype.Void, "Copying from to",
                     #                 fromaddr, forward_ptr, totalsize)
-                    llmemory.raw_memmove(fromaddr, forward_ptr, totalsize)
+                    llmemory.raw_memmove(fromaddr,
+                                         forward_obj - size_gc_header,
+                                         totalsize)
                 if resizing and end - start > GC_CLEARANCE:
                     diff = end - start
                     #llop.debug_print(lltype.Void, "Cleaning", start, diff)

Modified: pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/gc/marksweep.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/gc/marksweep.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/gc/marksweep.py	Tue Oct 13 16:57:33 2009
@@ -4,7 +4,7 @@
 from pypy.rpython.memory.support import DEFAULT_CHUNK_SIZE
 from pypy.rpython.memory.support import get_address_stack
 from pypy.rpython.memory.gcheader import GCHeaderBuilder
-from pypy.rpython.lltypesystem import lltype, llmemory
+from pypy.rpython.lltypesystem import lltype, llmemory, rffi
 from pypy.rlib.objectmodel import free_non_gc_object
 from pypy.rpython.lltypesystem.lloperation import llop
 from pypy.rlib.rarithmetic import ovfcheck
@@ -25,8 +25,11 @@
     HDRPTR = lltype.Ptr(HDR)
     # need to maintain a linked list of malloced objects, since we used the
     # systems allocator and can't walk the heap
-    HDR.become(lltype.Struct('header', ('typeid', lltype.Signed),
+    HDR.become(lltype.Struct('header', ('typeid16', rffi.USHORT),
+                                       ('mark', lltype.Bool),
+                                       ('curpool_flag', lltype.Bool),
                                        ('next', HDRPTR)))
+    typeid_is_in_field = 'typeid16'
 
     POOL = lltype.GcStruct('gc_pool')
     POOLPTR = lltype.Ptr(POOL)
@@ -75,14 +78,14 @@
         if self.bytes_malloced > self.bytes_malloced_threshold:
             self.collect()
 
-    def write_malloc_statistics(self, typeid, size, result, varsize):
+    def write_malloc_statistics(self, typeid16, size, result, varsize):
         pass
 
-    def write_free_statistics(self, typeid, result):
+    def write_free_statistics(self, typeid16, result):
         pass
 
-    def malloc_fixedsize(self, typeid, size, can_collect, has_finalizer=False,
-                         contains_weakptr=False):
+    def malloc_fixedsize(self, typeid16, size, can_collect,
+                         has_finalizer=False, contains_weakptr=False):
         if can_collect:
             self.maybe_collect()
         size_gc_header = self.gcheaderbuilder.size_gc_header
@@ -97,7 +100,9 @@
         if not result:
             raise memoryError
         hdr = llmemory.cast_adr_to_ptr(result, self.HDRPTR)
-        hdr.typeid = typeid << 1
+        hdr.typeid16 = typeid16
+        hdr.mark = False
+        hdr.curpool_flag = False
         if has_finalizer:
             hdr.next = self.malloced_objects_with_finalizer
             self.malloced_objects_with_finalizer = hdr
@@ -109,13 +114,13 @@
             self.malloced_objects = hdr
         self.bytes_malloced = bytes_malloced
         result += size_gc_header
-        #llop.debug_print(lltype.Void, 'malloc typeid', typeid,
+        #llop.debug_print(lltype.Void, 'malloc typeid', typeid16,
         #                 '->', llmemory.cast_adr_to_int(result))
-        self.write_malloc_statistics(typeid, tot_size, result, False)
+        self.write_malloc_statistics(typeid16, tot_size, result, False)
         return llmemory.cast_adr_to_ptr(result, llmemory.GCREF)
     malloc_fixedsize._dont_inline_ = True
 
-    def malloc_fixedsize_clear(self, typeid, size, can_collect,
+    def malloc_fixedsize_clear(self, typeid16, size, can_collect,
                                has_finalizer=False, contains_weakptr=False):
         if can_collect:
             self.maybe_collect()
@@ -132,7 +137,9 @@
             raise memoryError
         raw_memclear(result, tot_size)
         hdr = llmemory.cast_adr_to_ptr(result, self.HDRPTR)
-        hdr.typeid = typeid << 1
+        hdr.typeid16 = typeid16
+        hdr.mark = False
+        hdr.curpool_flag = False
         if has_finalizer:
             hdr.next = self.malloced_objects_with_finalizer
             self.malloced_objects_with_finalizer = hdr
@@ -144,14 +151,14 @@
             self.malloced_objects = hdr
         self.bytes_malloced = bytes_malloced
         result += size_gc_header
-        #llop.debug_print(lltype.Void, 'malloc typeid', typeid,
+        #llop.debug_print(lltype.Void, 'malloc typeid', typeid16,
         #                 '->', llmemory.cast_adr_to_int(result))
-        self.write_malloc_statistics(typeid, tot_size, result, False)
+        self.write_malloc_statistics(typeid16, tot_size, result, False)
         return llmemory.cast_adr_to_ptr(result, llmemory.GCREF)
     malloc_fixedsize_clear._dont_inline_ = True
 
-    def malloc_varsize(self, typeid, length, size, itemsize, offset_to_length,
-                       can_collect):
+    def malloc_varsize(self, typeid16, length, size, itemsize,
+                       offset_to_length, can_collect):
         if can_collect:
             self.maybe_collect()
         size_gc_header = self.gcheaderbuilder.size_gc_header
@@ -169,20 +176,22 @@
             raise memoryError
         (result + size_gc_header + offset_to_length).signed[0] = length
         hdr = llmemory.cast_adr_to_ptr(result, self.HDRPTR)
-        hdr.typeid = typeid << 1
+        hdr.typeid16 = typeid16
+        hdr.mark = False
+        hdr.curpool_flag = False
         hdr.next = self.malloced_objects
         self.malloced_objects = hdr
         self.bytes_malloced = bytes_malloced
             
         result += size_gc_header
         #llop.debug_print(lltype.Void, 'malloc_varsize length', length,
-        #                 'typeid', typeid,
+        #                 'typeid', typeid16,
         #                 '->', llmemory.cast_adr_to_int(result))
-        self.write_malloc_statistics(typeid, tot_size, result, True)
+        self.write_malloc_statistics(typeid16, tot_size, result, True)
         return llmemory.cast_adr_to_ptr(result, llmemory.GCREF)
     malloc_varsize._dont_inline_ = True
 
-    def malloc_varsize_clear(self, typeid, length, size, itemsize,
+    def malloc_varsize_clear(self, typeid16, length, size, itemsize,
                              offset_to_length, can_collect):
         if can_collect:
             self.maybe_collect()
@@ -202,16 +211,18 @@
         raw_memclear(result, tot_size)        
         (result + size_gc_header + offset_to_length).signed[0] = length
         hdr = llmemory.cast_adr_to_ptr(result, self.HDRPTR)
-        hdr.typeid = typeid << 1
+        hdr.typeid16 = typeid16
+        hdr.mark = False
+        hdr.curpool_flag = False
         hdr.next = self.malloced_objects
         self.malloced_objects = hdr
         self.bytes_malloced = bytes_malloced
             
         result += size_gc_header
         #llop.debug_print(lltype.Void, 'malloc_varsize length', length,
-        #                 'typeid', typeid,
+        #                 'typeid', typeid16,
         #                 '->', llmemory.cast_adr_to_int(result))
-        self.write_malloc_statistics(typeid, tot_size, result, True)
+        self.write_malloc_statistics(typeid16, tot_size, result, True)
         return llmemory.cast_adr_to_ptr(result, llmemory.GCREF)
     malloc_varsize_clear._dont_inline_ = True
 
@@ -251,10 +262,10 @@
         hdr = self.malloced_objects_with_finalizer
         while hdr:
             next = hdr.next
-            typeid = hdr.typeid >> 1
+            typeid = hdr.typeid16
             gc_info = llmemory.cast_ptr_to_adr(hdr)
             obj = gc_info + size_gc_header
-            if not hdr.typeid & 1:
+            if not hdr.mark:
                 self.add_reachable_to_stack(obj, objects)
             addr = llmemory.cast_ptr_to_adr(hdr)
             size = self.fixed_size(typeid)
@@ -271,31 +282,30 @@
             curr = objects.pop()
             gc_info = curr - size_gc_header
             hdr = llmemory.cast_adr_to_ptr(gc_info, self.HDRPTR)
-            if hdr.typeid & 1:
+            if hdr.mark:
                 continue
             self.add_reachable_to_stack(curr, objects)
-            hdr.typeid = hdr.typeid | 1
+            hdr.mark = True
         objects.delete()
         # also mark self.curpool
         if self.curpool:
             gc_info = llmemory.cast_ptr_to_adr(self.curpool) - size_gc_header
             hdr = llmemory.cast_adr_to_ptr(gc_info, self.HDRPTR)
-            hdr.typeid = hdr.typeid | 1
+            hdr.mark = True
         # go through the list of objects containing weak pointers
         # and kill the links if they go to dead objects
         # if the object itself is not marked, free it
         hdr = self.objects_with_weak_pointers
         surviving = lltype.nullptr(self.HDR)
         while hdr:
-            typeid = hdr.typeid >> 1
+            typeid = hdr.typeid16
             next = hdr.next
             addr = llmemory.cast_ptr_to_adr(hdr)
             size = self.fixed_size(typeid)
             estimate = raw_malloc_usage(size_gc_header + size)
-            if hdr.typeid & 1:
-                typeid = hdr.typeid >> 1
+            if hdr.mark:
                 offset = self.weakpointer_offset(typeid)
-                hdr.typeid = hdr.typeid & (~1)
+                hdr.mark = False
                 gc_info = llmemory.cast_ptr_to_adr(hdr)
                 weakref_obj = gc_info + size_gc_header
                 pointing_to = (weakref_obj + offset).address[0]
@@ -306,7 +316,7 @@
                     # pointed to object will die
                     # XXX what to do if the object has a finalizer which resurrects
                     # the object?
-                    if not hdr_pointing_to.typeid & 1:
+                    if not hdr_pointing_to.mark:
                         (weakref_obj + offset).address[0] = NULL
                 hdr.next = surviving
                 surviving = hdr
@@ -331,7 +341,7 @@
             ppnext += llmemory.offsetof(self.POOLNODE, 'linkedlist')
             hdr = poolnode.linkedlist
             while hdr:  #sweep
-                typeid = hdr.typeid >> 1
+                typeid = hdr.typeid16
                 next = hdr.next
                 addr = llmemory.cast_ptr_to_adr(hdr)
                 size = self.fixed_size(typeid)
@@ -339,8 +349,8 @@
                     length = (addr + size_gc_header + self.varsize_offset_to_length(typeid)).signed[0]
                     size += self.varsize_item_sizes(typeid) * length
                 estimate = raw_malloc_usage(size_gc_header + size)
-                if hdr.typeid & 1:
-                    hdr.typeid = hdr.typeid & (~1)
+                if hdr.mark:
+                    hdr.mark = False
                     ppnext.address[0] = addr
                     ppnext = llmemory.cast_ptr_to_adr(hdr)
                     ppnext += llmemory.offsetof(self.HDR, 'next')
@@ -423,17 +433,17 @@
         last = lltype.nullptr(self.HDR)
         while hdr:
             next = hdr.next
-            if hdr.typeid & 1:
+            if hdr.mark:
                 hdr.next = lltype.nullptr(self.HDR)
                 if not self.malloced_objects_with_finalizer:
                     self.malloced_objects_with_finalizer = hdr
                 else:
                     last.next = hdr
-                hdr.typeid = hdr.typeid & (~1)
+                hdr.mark = False
                 last = hdr
             else:
                 obj = llmemory.cast_ptr_to_adr(hdr) + size_gc_header
-                finalizer = self.getfinalizer(hdr.typeid >> 1)
+                finalizer = self.getfinalizer(hdr.typeid16)
                 # make malloced_objects_with_finalizer consistent
                 # for the sake of a possible collection caused by finalizer
                 if not self.malloced_objects_with_finalizer:
@@ -473,7 +483,7 @@
         size_gc_header = self.gcheaderbuilder.size_gc_header
         gc_info = gcobjectaddr - size_gc_header
         hdr = llmemory.cast_adr_to_ptr(gc_info, self.HDRPTR)
-        hdr.typeid = hdr.typeid & (~1)
+        hdr.mark = False
 
     STAT_HEAP_USAGE     = 0
     STAT_BYTES_MALLOCED = 1
@@ -483,7 +493,7 @@
         size_gc_header = self.gcheaderbuilder.size_gc_header
         gc_info = obj - size_gc_header
         hdr = llmemory.cast_adr_to_ptr(gc_info, self.HDRPTR)
-        return hdr.typeid >> 1
+        return hdr.typeid16
 
     def add_reachable_to_stack(self, obj, objects):
         self.trace(obj, self._add_reachable, objects)
@@ -504,13 +514,17 @@
 
     def init_gc_object(self, addr, typeid):
         hdr = llmemory.cast_adr_to_ptr(addr, self.HDRPTR)
-        hdr.typeid = typeid << 1
+        hdr.typeid16 = typeid
+        hdr.mark = False
+        hdr.curpool_flag = False
 
     def init_gc_object_immortal(self, addr, typeid, flags=0):
         # prebuilt gc structures always have the mark bit set
         # ignore flags
         hdr = llmemory.cast_adr_to_ptr(addr, self.HDRPTR)
-        hdr.typeid = (typeid << 1) | 1
+        hdr.typeid16 = typeid
+        hdr.mark = True
+        hdr.curpool_flag = False
 
     # experimental support for thread cloning
     def x_swap_pool(self, newpool):
@@ -566,7 +580,6 @@
         # in the specified pool.  A new pool is built to contain the
         # copies, and the 'gcobjectptr' and 'pool' fields of clonedata
         # are adjusted to refer to the result.
-        CURPOOL_FLAG = sys.maxint // 2 + 1
 
         # install a new pool into which all the mallocs go
         curpool = self.x_swap_pool(lltype.nullptr(X_POOL))
@@ -583,7 +596,7 @@
         hdr = hdr.next   # skip the POOL object itself
         while hdr:
             next = hdr.next
-            hdr.typeid |= CURPOOL_FLAG   # mark all objects from malloced_list
+            hdr.curpool_flag = True   # mark all objects from malloced_list
             hdr.next = lltype.nullptr(self.HDR)  # abused to point to the copy
             oldobjects.append(llmemory.cast_ptr_to_adr(hdr))
             hdr = next
@@ -600,12 +613,11 @@
                 continue   # pointer is NULL
             oldhdr = llmemory.cast_adr_to_ptr(oldobj_addr - size_gc_header,
                                               self.HDRPTR)
-            typeid = oldhdr.typeid
-            if not (typeid & CURPOOL_FLAG):
+            if not oldhdr.curpool_flag:
                 continue   # ignore objects that were not in the malloced_list
             newhdr = oldhdr.next      # abused to point to the copy
             if not newhdr:
-                typeid = (typeid & ~CURPOOL_FLAG) >> 1
+                typeid = oldhdr.typeid16
                 size = self.fixed_size(typeid)
                 # XXX! collect() at the beginning if the free heap is low
                 if self.is_varsize(typeid):
@@ -631,11 +643,15 @@
                 newhdr_addr = newobj_addr - size_gc_header
                 newhdr = llmemory.cast_adr_to_ptr(newhdr_addr, self.HDRPTR)
 
-                saved_id   = newhdr.typeid    # XXX hack needed for genc
+                saved_id   = newhdr.typeid16  # XXX hack needed for genc
+                saved_flg1 = newhdr.mark
+                saved_flg2 = newhdr.curpool_flag
                 saved_next = newhdr.next      # where size_gc_header == 0
                 raw_memcopy(oldobj_addr, newobj_addr, size)
-                newhdr.typeid = saved_id
-                newhdr.next   = saved_next
+                newhdr.typeid16     = saved_id
+                newhdr.mark         = saved_flg1
+                newhdr.curpool_flag = saved_flg2
+                newhdr.next         = saved_next
 
                 offsets = self.offsets_to_gc_pointers(typeid)
                 i = 0
@@ -669,7 +685,7 @@
         next = lltype.nullptr(self.HDR)
         while oldobjects.non_empty():
             hdr = llmemory.cast_adr_to_ptr(oldobjects.pop(), self.HDRPTR)
-            hdr.typeid &= ~CURPOOL_FLAG   # reset the flag
+            hdr.curpool_flag = False   # reset the flag
             hdr.next = next
             next = hdr
         oldobjects.delete()

Modified: pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/gc/semispace.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/gc/semispace.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/gc/semispace.py	Tue Oct 13 16:57:33 2009
@@ -4,7 +4,7 @@
 from pypy.rpython.memory.support import DEFAULT_CHUNK_SIZE
 from pypy.rpython.memory.support import get_address_stack, get_address_deque
 from pypy.rpython.memory.support import AddressDict
-from pypy.rpython.lltypesystem import lltype, llmemory, llarena
+from pypy.rpython.lltypesystem import lltype, llmemory, llarena, rffi
 from pypy.rlib.objectmodel import free_non_gc_object
 from pypy.rlib.debug import ll_assert
 from pypy.rpython.lltypesystem.lloperation import llop
@@ -13,7 +13,6 @@
 
 import sys, os, time
 
-TYPEID_MASK = 0xffff
 first_gcflag = 1 << 16
 GCFLAG_FORWARDED = first_gcflag
 # GCFLAG_EXTERNAL is set on objects not living in the semispace:
@@ -23,6 +22,7 @@
 
 memoryError = MemoryError()
 
+
 class SemiSpaceGC(MovingGCBase):
     _alloc_flavor_ = "raw"
     inline_simple_malloc = True
@@ -32,11 +32,14 @@
     total_collection_time = 0.0
     total_collection_count = 0
 
-    HDR = lltype.Struct('header', ('tid', lltype.Signed))
+    HDR = lltype.Struct('header', ('tid', lltype.Signed))   # XXX or rffi.INT?
+    typeid_is_in_field = 'tid'
     FORWARDSTUB = lltype.GcStruct('forwarding_stub',
                                   ('forw', llmemory.Address))
     FORWARDSTUBPTR = lltype.Ptr(FORWARDSTUB)
 
+    object_minimal_size = llmemory.sizeof(FORWARDSTUB)
+
     # the following values override the default arguments of __init__ when
     # translating to a real backend.
     TRANSLATION_PARAMS = {'space_size': 8*1024*1024} # XXX adjust
@@ -64,7 +67,7 @@
     # This class only defines the malloc_{fixed,var}size_clear() methods
     # because the spaces are filled with zeroes in advance.
 
-    def malloc_fixedsize_clear(self, typeid, size, can_collect,
+    def malloc_fixedsize_clear(self, typeid16, size, can_collect,
                                has_finalizer=False, contains_weakptr=False):
         size_gc_header = self.gcheaderbuilder.size_gc_header
         totalsize = size_gc_header + size
@@ -74,7 +77,7 @@
                 raise memoryError
             result = self.obtain_free_space(totalsize)
         llarena.arena_reserve(result, totalsize)
-        self.init_gc_object(result, typeid)
+        self.init_gc_object(result, typeid16)
         self.free = result + totalsize
         if has_finalizer:
             self.objects_with_finalizers.append(result + size_gc_header)
@@ -82,7 +85,7 @@
             self.objects_with_weakrefs.append(result + size_gc_header)
         return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
 
-    def malloc_varsize_clear(self, typeid, length, size, itemsize,
+    def malloc_varsize_clear(self, typeid16, length, size, itemsize,
                              offset_to_length, can_collect):
         size_gc_header = self.gcheaderbuilder.size_gc_header
         nonvarsize = size_gc_header + size
@@ -97,7 +100,7 @@
                 raise memoryError
             result = self.obtain_free_space(totalsize)
         llarena.arena_reserve(result, totalsize)
-        self.init_gc_object(result, typeid)
+        self.init_gc_object(result, typeid16)
         (result + size_gc_header + offset_to_length).signed[0] = length
         self.free = result + llarena.round_up_for_allocation(totalsize)
         return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
@@ -385,6 +388,9 @@
         stub = llmemory.cast_adr_to_ptr(obj, self.FORWARDSTUBPTR)
         stub.forw = newobj
 
+    def combine(self, typeid16, flags):
+        return llop.combine_ushort(lltype.Signed, typeid16, flags)
+
     def get_type_id(self, addr):
         tid = self.header(addr).tid
         ll_assert(tid & (GCFLAG_FORWARDED|GCFLAG_EXTERNAL) != GCFLAG_FORWARDED,
@@ -393,15 +399,16 @@
         # Although calling get_type_id() on a forwarded object works by itself,
         # we catch it as an error because it's likely that what is then
         # done with the typeid is bogus.
-        return tid & TYPEID_MASK
+        return llop.extract_ushort(rffi.USHORT, tid)
 
-    def init_gc_object(self, addr, typeid, flags=0):
+    def init_gc_object(self, addr, typeid16, flags=0):
         hdr = llmemory.cast_adr_to_ptr(addr, lltype.Ptr(self.HDR))
-        hdr.tid = typeid | flags
+        hdr.tid = self.combine(typeid16, flags)
 
-    def init_gc_object_immortal(self, addr, typeid, flags=0):
+    def init_gc_object_immortal(self, addr, typeid16, flags=0):
         hdr = llmemory.cast_adr_to_ptr(addr, lltype.Ptr(self.HDR))
-        hdr.tid = typeid | flags | GCFLAG_EXTERNAL | GCFLAG_FORWARDED
+        flags |= GCFLAG_EXTERNAL | GCFLAG_FORWARDED
+        hdr.tid = self.combine(typeid16, flags)
         # immortal objects always have GCFLAG_FORWARDED set;
         # see get_forwarding_address().
 

Modified: pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/gc/test/test_direct.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/gc/test/test_direct.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/gc/test/test_direct.py	Tue Oct 13 16:57:33 2009
@@ -68,7 +68,7 @@
         self.gc.DEBUG = True
         self.rootwalker = DirectRootWalker(self)
         self.gc.set_root_walker(self.rootwalker)
-        self.layoutbuilder = TypeLayoutBuilder()
+        self.layoutbuilder = TypeLayoutBuilder(self.GCClass, {})
         self.get_type_id = self.layoutbuilder.get_type_id
         self.layoutbuilder.initialize_gc_query_function(self.gc)
         self.gc.setup()

Modified: pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/gctransform/framework.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/gctransform/framework.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/gctransform/framework.py	Tue Oct 13 16:57:33 2009
@@ -1,7 +1,7 @@
 from pypy.rpython.memory.gctransform.transform import GCTransformer
 from pypy.rpython.memory.gctransform.support import find_gc_ptrs_in_type, \
      get_rtti, ll_call_destructor, type_contains_pyobjs, var_ispyobj
-from pypy.rpython.lltypesystem import lltype, llmemory
+from pypy.rpython.lltypesystem import lltype, llmemory, rffi
 from pypy.rpython import rmodel
 from pypy.rpython.memory import gctypelayout
 from pypy.rpython.memory.gc import marksweep
@@ -22,6 +22,9 @@
 import sys, types
 
 
+TYPE_ID = rffi.USHORT
+
+
 class CollectAnalyzer(graphanalyze.BoolGraphAnalyzer):
 
     def analyze_direct_call(self, graph, seen=None):
@@ -127,15 +130,18 @@
         if hasattr(translator, '_jit2gc'):
             self.layoutbuilder = translator._jit2gc['layoutbuilder']
         else:
-            self.layoutbuilder = TransformerLayoutBuilder()
+            if translator.config.translation.gcconfig.removetypeptr:
+                lltype2vtable = translator.rtyper.lltype2vtable
+            else:
+                lltype2vtable = {}
+            self.layoutbuilder = TransformerLayoutBuilder(GCClass,
+                                                          lltype2vtable)
         self.layoutbuilder.transformer = self
         self.get_type_id = self.layoutbuilder.get_type_id
 
-        # set up dummy a table, to be overwritten with the real one in finish()
-        type_info_table = lltype._ptr(
-            lltype.Ptr(gctypelayout.GCData.TYPE_INFO_TABLE),
-            "delayed!type_info_table", solid=True)
-        gcdata = gctypelayout.GCData(type_info_table)
+        # set up GCData with the llgroup from the layoutbuilder, which
+        # will grow as more TYPE_INFO members are added to it
+        gcdata = gctypelayout.GCData(self.layoutbuilder.type_info_group)
 
         # initialize the following two fields with a random non-NULL address,
         # to make the annotator happy.  The fields are patched in finish()
@@ -163,6 +169,8 @@
             gcdata.gc.setup()
 
         bk = self.translator.annotator.bookkeeper
+        r_typeid16 = rffi.platform.numbertype_to_rclass[TYPE_ID]
+        s_typeid16 = annmodel.SomeInteger(knowntype=r_typeid16)
 
         # the point of this little dance is to not annotate
         # self.gcdata.static_root_xyz as constants. XXX is it still needed??
@@ -212,7 +220,7 @@
         malloc_fixedsize_clear_meth = GCClass.malloc_fixedsize_clear.im_func
         self.malloc_fixedsize_clear_ptr = getfn(
             malloc_fixedsize_clear_meth,
-            [s_gc, annmodel.SomeInteger(nonneg=True),
+            [s_gc, s_typeid16,
              annmodel.SomeInteger(nonneg=True),
              annmodel.SomeBool(), annmodel.SomeBool(),
              annmodel.SomeBool()], s_gcref,
@@ -221,7 +229,7 @@
             malloc_fixedsize_meth = GCClass.malloc_fixedsize.im_func
             self.malloc_fixedsize_ptr = getfn(
                 malloc_fixedsize_meth,
-                [s_gc, annmodel.SomeInteger(nonneg=True),
+                [s_gc, s_typeid16,
                  annmodel.SomeInteger(nonneg=True),
                  annmodel.SomeBool(), annmodel.SomeBool(),
                  annmodel.SomeBool()], s_gcref,
@@ -235,7 +243,8 @@
 ##             + [annmodel.SomeBool(), annmodel.SomeBool()], s_gcref)
         self.malloc_varsize_clear_ptr = getfn(
             GCClass.malloc_varsize_clear.im_func,
-            [s_gc] + [annmodel.SomeInteger(nonneg=True) for i in range(5)]
+            [s_gc, s_typeid16]
+            + [annmodel.SomeInteger(nonneg=True) for i in range(4)]
             + [annmodel.SomeBool()], s_gcref)
         self.collect_ptr = getfn(GCClass.collect.im_func,
             [s_gc, annmodel.SomeInteger()], annmodel.s_None)
@@ -268,7 +277,7 @@
             s_True  = annmodel.SomeBool(); s_True .const = True
             self.malloc_fast_ptr = getfn(
                 malloc_fast,
-                [s_gc, annmodel.SomeInteger(nonneg=True),
+                [s_gc, s_typeid16,
                  annmodel.SomeInteger(nonneg=True),
                  s_True, s_False,
                  s_False], s_gcref,
@@ -288,7 +297,7 @@
             s_True  = annmodel.SomeBool(); s_True .const = True
             self.malloc_varsize_clear_fast_ptr = getfn(
                 malloc_varsize_clear_fast,
-                [s_gc, annmodel.SomeInteger(nonneg=True),
+                [s_gc, s_typeid16,
                  annmodel.SomeInteger(nonneg=True),
                  annmodel.SomeInteger(nonneg=True),
                  annmodel.SomeInteger(nonneg=True),
@@ -304,7 +313,7 @@
                 "malloc_varsize_nonmovable")
             self.malloc_varsize_nonmovable_ptr = getfn(
                 malloc_nonmovable,
-                [s_gc, annmodel.SomeInteger(nonneg=True),
+                [s_gc, s_typeid16,
                  annmodel.SomeInteger(nonneg=True)], s_gcref)
         else:
             self.malloc_varsize_nonmovable_ptr = None
@@ -315,7 +324,7 @@
                 "malloc_varsize_resizable")
             self.malloc_varsize_resizable_ptr = getfn(
                 malloc_resizable,
-                [s_gc, annmodel.SomeInteger(nonneg=True),
+                [s_gc, s_typeid16,
                  annmodel.SomeInteger(nonneg=True)], s_gcref)
         else:
             self.malloc_varsize_resizable_ptr = None
@@ -399,6 +408,14 @@
             FLDTYPE = getattr(HDR, fldname)
             fields.append(('_' + fldname, FLDTYPE))
 
+        size_gc_header = self.gcdata.gc.gcheaderbuilder.size_gc_header
+        vtableinfo = (HDR, size_gc_header, self.gcdata.gc.typeid_is_in_field)
+        self.c_vtableinfo = rmodel.inputconst(lltype.Void, vtableinfo)
+        tig = self.layoutbuilder.type_info_group._as_ptr()
+        self.c_type_info_group = rmodel.inputconst(lltype.typeOf(tig), tig)
+        sko = llmemory.sizeof(gcdata.TYPE_INFO)
+        self.c_vtinfo_skip_offset = rmodel.inputconst(lltype.typeOf(sko), sko)
+
     def build_root_walker(self):
         return ShadowStackRootWalker(self)
 
@@ -421,21 +438,14 @@
         return [getattr(hdr, fldname) for fldname in HDR._names]
 
     def finish_tables(self):
-        table = self.layoutbuilder.flatten_table()
-        log.info("assigned %s typeids" % (len(table), ))
+        group = self.layoutbuilder.close_table()
+        log.info("assigned %s typeids" % (len(group.members), ))
         log.info("added %s push/pop stack root instructions" % (
                      self.num_pushs, ))
         if self.write_barrier_ptr:
             log.info("inserted %s write barrier calls" % (
                          self.write_barrier_calls, ))
 
-        # replace the type_info_table pointer in gcdata -- at this point,
-        # the database is in principle complete, so it has already seen
-        # the delayed pointer.  We need to force it to consider the new
-        # array now.
-
-        self.gcdata.type_info_table._become(table)
-
         # XXX because we call inputconst already in replace_malloc, we can't
         # modify the instance, we have to modify the 'rtyped instance'
         # instead.  horrors.  is there a better way?
@@ -463,16 +473,24 @@
         self.write_typeid_list()
         return newgcdependencies
 
+    def get_final_dependencies(self):
+        # returns an iterator enumerating the type_info_group's members,
+        # to make sure that they are all followed (only a part of them
+        # might have been followed by a previous enum_dependencies()).
+        return iter(self.layoutbuilder.type_info_group.members)
+
     def write_typeid_list(self):
         """write out the list of type ids together with some info"""
         from pypy.tool.udir import udir
         # XXX not ideal since it is not per compilation, but per run
+        # XXX argh argh, this only gives the member index but not the
+        #     real typeid, which is a complete mess to obtain now...
+        all_ids = self.layoutbuilder.id_of_type.items()
+        all_ids = [(typeinfo.index, TYPE) for (TYPE, typeinfo) in all_ids]
+        all_ids = dict(all_ids)
         f = udir.join("typeids.txt").open("w")
-        all = [(typeid, TYPE)
-               for TYPE, typeid in self.layoutbuilder.id_of_type.iteritems()]
-        all.sort()
-        for typeid, TYPE in all:
-            f.write("%s %s\n" % (typeid, TYPE))
+        for index in range(len(self.layoutbuilder.type_info_group.members)):
+            f.write("member%-4d %s\n" % (index, all_ids.get(index, '?')))
         f.close()
 
     def transform_graph(self, graph):
@@ -502,8 +520,8 @@
         assert PTRTYPE.TO == TYPE
         type_id = self.get_type_id(TYPE)
 
-        c_type_id = rmodel.inputconst(lltype.Signed, type_id)
-        info = self.layoutbuilder.type_info_list[type_id]
+        c_type_id = rmodel.inputconst(TYPE_ID, type_id)
+        info = self.layoutbuilder.get_info(type_id)
         c_size = rmodel.inputconst(lltype.Signed, info.fixedsize)
         has_finalizer = bool(self.finalizer_funcptr_for_type(TYPE))
         c_has_finalizer = rmodel.inputconst(lltype.Bool, has_finalizer)
@@ -523,9 +541,12 @@
                     c_has_finalizer, rmodel.inputconst(lltype.Bool, False)]
         else:
             assert not c_has_finalizer.value
+            info_varsize = self.layoutbuilder.get_info_varsize(type_id)
             v_length = op.args[-1]
-            c_ofstolength = rmodel.inputconst(lltype.Signed, info.ofstolength)
-            c_varitemsize = rmodel.inputconst(lltype.Signed, info.varitemsize)
+            c_ofstolength = rmodel.inputconst(lltype.Signed,
+                                              info_varsize.ofstolength)
+            c_varitemsize = rmodel.inputconst(lltype.Signed,
+                                              info_varsize.varitemsize)
             if flags.get('resizable') and self.malloc_varsize_resizable_ptr:
                 assert c_can_collect.value
                 malloc_ptr = self.malloc_varsize_resizable_ptr
@@ -656,8 +677,8 @@
 
         type_id = self.get_type_id(WEAKREF)
 
-        c_type_id = rmodel.inputconst(lltype.Signed, type_id)
-        info = self.layoutbuilder.type_info_list[type_id]
+        c_type_id = rmodel.inputconst(TYPE_ID, type_id)
+        info = self.layoutbuilder.get_info(type_id)
         c_size = rmodel.inputconst(lltype.Signed, info.fixedsize)
         malloc_ptr = self.malloc_fixedsize_ptr
         c_has_finalizer = rmodel.inputconst(lltype.Bool, False)
@@ -763,6 +784,48 @@
                                       v_structaddr])
         hop.rename('bare_' + opname)
 
+    def transform_getfield_typeptr(self, hop):
+        # this would become quite a lot of operations, even if it compiles
+        # to C code that is just as efficient as "obj->typeptr".  To avoid
+        # that, we just generate a single custom operation instead.
+        hop.genop('gc_gettypeptr_group', [hop.spaceop.args[0],
+                                          self.c_type_info_group,
+                                          self.c_vtinfo_skip_offset,
+                                          self.c_vtableinfo],
+                  resultvar = hop.spaceop.result)
+
+    def transform_setfield_typeptr(self, hop):
+        # replace such a setfield with an assertion that the typeptr is right
+        # (xxx not very useful right now, so disabled)
+        if 0:
+            v_new = hop.spaceop.args[2]
+            v_old = hop.genop('gc_gettypeptr_group', [hop.spaceop.args[0],
+                                                      self.c_type_info_group,
+                                                      self.c_vtinfo_skip_offset,
+                                                      self.c_vtableinfo],
+                              resulttype = v_new.concretetype)
+            v_eq = hop.genop("ptr_eq", [v_old, v_new],
+                             resulttype = lltype.Bool)
+            c_errmsg = rmodel.inputconst(lltype.Void,
+                                         "setfield_typeptr: wrong type")
+            hop.genop('debug_assert', [v_eq, c_errmsg])
+
+    def gct_getfield(self, hop):
+        if (hop.spaceop.args[1].value == 'typeptr' and
+            hop.spaceop.args[0].concretetype.TO._hints.get('typeptr') and
+            self.translator.config.translation.gcconfig.removetypeptr):
+            self.transform_getfield_typeptr(hop)
+        else:
+            GCTransformer.gct_getfield(self, hop)
+
+    def gct_setfield(self, hop):
+        if (hop.spaceop.args[1].value == 'typeptr' and
+            hop.spaceop.args[0].concretetype.TO._hints.get('typeptr') and
+            self.translator.config.translation.gcconfig.removetypeptr):
+            self.transform_setfield_typeptr(hop)
+        else:
+            GCTransformer.gct_setfield(self, hop)
+
     def var_needs_set_transform(self, var):
         return var_needsgc(var)
 
@@ -852,6 +915,18 @@
         return fptr
 
 
+class JITTransformerLayoutBuilder(TransformerLayoutBuilder):
+    # for the JIT: currently does not support removetypeptr
+    def __init__(self, config):
+        from pypy.rpython.memory.gc.base import choose_gc_from_config
+        try:
+            assert not config.translation.gcconfig.removetypeptr
+        except AttributeError:    # for some tests
+            pass
+        GCClass, _ = choose_gc_from_config(config)
+        TransformerLayoutBuilder.__init__(self, GCClass, {})
+
+
 def gen_zero_gc_pointers(TYPE, v, llops, previous_steps=None):
     if previous_steps is None:
         previous_steps = []

Modified: pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/gctransform/transform.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/gctransform/transform.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/gctransform/transform.py	Tue Oct 13 16:57:33 2009
@@ -10,6 +10,7 @@
 from pypy.translator.backendopt import graphanalyze
 from pypy.translator.backendopt.canraise import RaiseAnalyzer
 from pypy.translator.backendopt.ssa import DataFlowFamilyBuilder
+from pypy.translator.backendopt.constfold import constant_fold_graph
 from pypy.annotation import model as annmodel
 from pypy.rpython import rmodel, annlowlevel
 from pypy.rpython.memory import gc
@@ -144,16 +145,20 @@
         if self.inline:
             raise_analyzer = RaiseAnalyzer(self.translator)
             to_enum = self.graph_dependencies.get(graph, self.graphs_to_inline)
+            must_constfold = False
             for inline_graph in to_enum:
                 try:
                     inline.inline_function(self.translator, inline_graph, graph,
                                            self.lltype_to_classdef,
                                            raise_analyzer,
                                            cleanup=False)
+                    must_constfold = True
                 except inline.CannotInline, e:
                     print 'CANNOT INLINE:', e
                     print '\t%s into %s' % (inline_graph, graph)
             cleanup_graph(graph)
+            if must_constfold:
+                constant_fold_graph(graph)
 
     def compute_borrowed_vars(self, graph):
         # the input args are borrowed, and stay borrowed for as long as they
@@ -307,6 +312,9 @@
         newgcdependencies = self.ll_finalizers_ptrs
         return newgcdependencies
 
+    def get_final_dependencies(self):
+        pass
+
     def finish_tables(self):
         pass
 
@@ -367,6 +375,8 @@
     gct_setarrayitem = gct_setfield
     gct_setinteriorfield = gct_setfield
 
+    gct_getfield = default
+
     def gct_zero_gc_pointers_inside(self, hop):
         pass
 

Modified: pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/gctypelayout.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/gctypelayout.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/gctypelayout.py	Tue Oct 13 16:57:33 2009
@@ -1,4 +1,6 @@
-from pypy.rpython.lltypesystem import lltype, llmemory, llarena
+from pypy.rpython.lltypesystem import lltype, llmemory, llarena, llgroup
+from pypy.rpython.lltypesystem.lloperation import llop
+from pypy.rlib.objectmodel import we_are_translated
 from pypy.rlib.debug import ll_assert
 
 
@@ -17,67 +19,78 @@
 
     # structure describing the layout of a typeid
     TYPE_INFO = lltype.Struct("type_info",
+        ("infobits",       lltype.Signed),    # combination of the T_xxx consts
         ("finalizer",      FINALIZERTYPE),
         ("fixedsize",      lltype.Signed),
         ("ofstoptrs",      lltype.Ptr(OFFSETS_TO_GC_PTR)),
+        hints={'immutable': True},
+        )
+    VARSIZE_TYPE_INFO = lltype.Struct("varsize_type_info",
+        ("header",         TYPE_INFO),
         ("varitemsize",    lltype.Signed),
         ("ofstovar",       lltype.Signed),
         ("ofstolength",    lltype.Signed),
         ("varofstoptrs",   lltype.Ptr(OFFSETS_TO_GC_PTR)),
-        ("weakptrofs",     lltype.Signed),
+        hints={'immutable': True},
         )
-    TYPE_INFO_TABLE = lltype.Array(TYPE_INFO)
+    TYPE_INFO_PTR = lltype.Ptr(TYPE_INFO)
+    VARSIZE_TYPE_INFO_PTR = lltype.Ptr(VARSIZE_TYPE_INFO)
 
-    def __init__(self, type_info_table):
-        self.type_info_table = type_info_table
-        # 'type_info_table' is a list of TYPE_INFO structures when
-        # running with gcwrapper, or a real TYPE_INFO_TABLE after
-        # the gctransformer.
+    def __init__(self, type_info_group):
+        assert isinstance(type_info_group, llgroup.group)
+        self.type_info_group = type_info_group
+        self.type_info_group_ptr = type_info_group._as_ptr()
+
+    def get(self, typeid):
+        _check_typeid(typeid)
+        return llop.get_group_member(GCData.TYPE_INFO_PTR,
+                                     self.type_info_group_ptr,
+                                     typeid)
+
+    def get_varsize(self, typeid):
+        _check_typeid(typeid)
+        return llop.get_group_member(GCData.VARSIZE_TYPE_INFO_PTR,
+                                     self.type_info_group_ptr,
+                                     typeid)
 
     def q_is_varsize(self, typeid):
-        ll_assert(typeid > 0, "invalid type_id")
-        return (typeid & T_IS_FIXSIZE) == 0
+        infobits = self.get(typeid).infobits
+        return (infobits & T_IS_VARSIZE) != 0
 
     def q_has_gcptr_in_varsize(self, typeid):
-        ll_assert(typeid > 0, "invalid type_id")
-        return (typeid & (T_IS_FIXSIZE|T_NO_GCPTR_IN_VARSIZE)) == 0
+        infobits = self.get(typeid).infobits
+        return (infobits & T_HAS_GCPTR_IN_VARSIZE) != 0
 
     def q_is_gcarrayofgcptr(self, typeid):
-        ll_assert(typeid > 0, "invalid type_id")
-        return (typeid &
-                (T_IS_FIXSIZE|T_NO_GCPTR_IN_VARSIZE|T_NOT_SIMPLE_GCARRAY)) == 0
+        infobits = self.get(typeid).infobits
+        return (infobits & T_IS_GCARRAY_OF_GCPTR) != 0
 
     def q_finalizer(self, typeid):
-        ll_assert(typeid > 0, "invalid type_id")
-        return self.type_info_table[typeid].finalizer
+        return self.get(typeid).finalizer
 
     def q_offsets_to_gc_pointers(self, typeid):
-        ll_assert(typeid > 0, "invalid type_id")
-        return self.type_info_table[typeid].ofstoptrs
+        return self.get(typeid).ofstoptrs
 
     def q_fixed_size(self, typeid):
-        ll_assert(typeid > 0, "invalid type_id")
-        return self.type_info_table[typeid].fixedsize
+        return self.get(typeid).fixedsize
 
     def q_varsize_item_sizes(self, typeid):
-        ll_assert(typeid > 0, "invalid type_id")
-        return self.type_info_table[typeid].varitemsize
+        return self.get_varsize(typeid).varitemsize
 
     def q_varsize_offset_to_variable_part(self, typeid):
-        ll_assert(typeid > 0, "invalid type_id")
-        return self.type_info_table[typeid].ofstovar
+        return self.get_varsize(typeid).ofstovar
 
     def q_varsize_offset_to_length(self, typeid):
-        ll_assert(typeid > 0, "invalid type_id")
-        return self.type_info_table[typeid].ofstolength
+        return self.get_varsize(typeid).ofstolength
 
     def q_varsize_offsets_to_gcpointers_in_var_part(self, typeid):
-        ll_assert(typeid > 0, "invalid type_id")
-        return self.type_info_table[typeid].varofstoptrs
+        return self.get_varsize(typeid).varofstoptrs
 
     def q_weakpointer_offset(self, typeid):
-        ll_assert(typeid > 0, "invalid type_id")
-        return self.type_info_table[typeid].weakptrofs
+        infobits = self.get(typeid).infobits
+        if infobits & T_IS_WEAKREF:
+            return weakptr_offset
+        return -1
 
     def set_query_functions(self, gc):
         gc.set_query_functions(
@@ -93,88 +106,59 @@
             self.q_varsize_offsets_to_gcpointers_in_var_part,
             self.q_weakpointer_offset)
 
-# For the q_xxx functions that return flags, we use bit patterns
-# in the typeid instead of entries in the type_info_table.  The
-# following flag combinations are used (the idea being that it's
-# very fast on CPUs to check if all flags in a set are all zero):
-
-#   * if T_IS_FIXSIZE is set, the gc object is not var-sized
-#   * if T_IS_FIXSIZE and T_NO_GCPTR_IN_VARSIZE are both cleared,
-#           there are gc ptrs in the var-sized part
-#   * if T_IS_FIXSIZE, T_NO_GCPTR_IN_VARSIZE and T_NOT_SIMPLE_GCARRAY
-#           are all cleared, the shape is just like GcArray(gcptr)
-
-T_IS_FIXSIZE          = 0x4
-T_NO_GCPTR_IN_VARSIZE = 0x2
-T_NOT_SIMPLE_GCARRAY  = 0x1
-
-def get_typeid_bitmask(TYPE):
-    """Return the bits that we would like to be set or cleared in the type_id
-    corresponding to TYPE.  This returns (mask, expected_value), where
-    the condition is that 'type_id & mask == expected_value'.
-    """
-    if not TYPE._is_varsize():
-        return (T_IS_FIXSIZE, T_IS_FIXSIZE)     # not var-sized
 
-    if (isinstance(TYPE, lltype.GcArray)
-        and isinstance(TYPE.OF, lltype.Ptr)
-        and TYPE.OF.TO._gckind == 'gc'):
-        # a simple GcArray(gcptr)
-        return (T_IS_FIXSIZE|T_NO_GCPTR_IN_VARSIZE|T_NOT_SIMPLE_GCARRAY, 0)
-
-    if isinstance(TYPE, lltype.Struct):
-        ARRAY = TYPE._flds[TYPE._arrayfld]
-    else:
-        ARRAY = TYPE
-    assert isinstance(ARRAY, lltype.Array)
-    if ARRAY.OF != lltype.Void and len(offsets_to_gc_pointers(ARRAY.OF)) > 0:
-        # var-sized, with gc pointers in the variable part
-        return (T_IS_FIXSIZE|T_NO_GCPTR_IN_VARSIZE|T_NOT_SIMPLE_GCARRAY,
-                T_NOT_SIMPLE_GCARRAY)
-    else:
-        # var-sized, but no gc pointer in the variable part
-        return (T_IS_FIXSIZE|T_NO_GCPTR_IN_VARSIZE, T_NO_GCPTR_IN_VARSIZE)
+T_IS_VARSIZE           = 0x01
+T_HAS_GCPTR_IN_VARSIZE = 0x02
+T_IS_GCARRAY_OF_GCPTR  = 0x04
+T_IS_WEAKREF           = 0x08
+
+def _check_typeid(typeid):
+    ll_assert(llop.is_group_member_nonzero(lltype.Bool, typeid),
+              "invalid type_id")
 
 
 def encode_type_shape(builder, info, TYPE):
     """Encode the shape of the TYPE into the TYPE_INFO structure 'info'."""
     offsets = offsets_to_gc_pointers(TYPE)
+    infobits = 0
     info.ofstoptrs = builder.offsets2table(offsets, TYPE)
     info.finalizer = builder.make_finalizer_funcptr_for_type(TYPE)
-    info.weakptrofs = weakpointer_offset(TYPE)
     if not TYPE._is_varsize():
-        #info.isvarsize = False
-        #info.gcptrinvarsize = False
         info.fixedsize = llarena.round_up_for_allocation(
-            llmemory.sizeof(TYPE))
-        info.ofstolength = -1
+            llmemory.sizeof(TYPE), builder.GCClass.object_minimal_size)
         # note about round_up_for_allocation(): in the 'info' table
         # we put a rounded-up size only for fixed-size objects.  For
         # varsize ones, the GC must anyway compute the size at run-time
         # and round up that result.
     else:
-        #info.isvarsize = True
+        infobits |= T_IS_VARSIZE
+        varinfo = lltype.cast_pointer(GCData.VARSIZE_TYPE_INFO_PTR, info)
         info.fixedsize = llmemory.sizeof(TYPE, 0)
         if isinstance(TYPE, lltype.Struct):
             ARRAY = TYPE._flds[TYPE._arrayfld]
             ofs1 = llmemory.offsetof(TYPE, TYPE._arrayfld)
-            info.ofstolength = ofs1 + llmemory.ArrayLengthOffset(ARRAY)
-            info.ofstovar = ofs1 + llmemory.itemoffsetof(ARRAY, 0)
+            varinfo.ofstolength = ofs1 + llmemory.ArrayLengthOffset(ARRAY)
+            varinfo.ofstovar = ofs1 + llmemory.itemoffsetof(ARRAY, 0)
         else:
+            assert isinstance(TYPE, lltype.GcArray)
             ARRAY = TYPE
-            info.ofstolength = llmemory.ArrayLengthOffset(ARRAY)
-            info.ofstovar = llmemory.itemoffsetof(TYPE, 0)
+            if (isinstance(ARRAY.OF, lltype.Ptr)
+                and ARRAY.OF.TO._gckind == 'gc'):
+                infobits |= T_IS_GCARRAY_OF_GCPTR
+            varinfo.ofstolength = llmemory.ArrayLengthOffset(ARRAY)
+            varinfo.ofstovar = llmemory.itemoffsetof(TYPE, 0)
         assert isinstance(ARRAY, lltype.Array)
         if ARRAY.OF != lltype.Void:
             offsets = offsets_to_gc_pointers(ARRAY.OF)
         else:
             offsets = ()
-        info.varofstoptrs = builder.offsets2table(offsets, ARRAY.OF)
-        info.varitemsize = llmemory.sizeof(ARRAY.OF)
-        #info.gcptrinvarsize = len(offsets) > 0
-    #info.gcarrayofgcptr = (isinstance(TYPE, lltype.GcArray)
-    #                       and isinstance(TYPE.OF, lltype.Ptr)
-    #                       and TYPE.OF.TO._gckind == 'gc')
+        if len(offsets) > 0:
+            infobits |= T_HAS_GCPTR_IN_VARSIZE
+        varinfo.varofstoptrs = builder.offsets2table(offsets, ARRAY.OF)
+        varinfo.varitemsize = llmemory.sizeof(ARRAY.OF)
+    if TYPE == WEAKREF:
+        infobits |= T_IS_WEAKREF
+    info.infobits = infobits
 
 # ____________________________________________________________
 
@@ -183,8 +167,12 @@
     can_add_new_types = True
     can_encode_type_shape = True    # set to False initially by the JIT
 
-    def __init__(self):
-        self.type_info_list = [None]   # don't use typeid 0, helps debugging
+    size_of_fixed_type_info = llmemory.sizeof(GCData.TYPE_INFO)
+
+    def __init__(self, GCClass, lltype2vtable):
+        self.GCClass = GCClass
+        self.lltype2vtable = lltype2vtable
+        self.make_type_info_group()
         self.id_of_type = {}      # {LLTYPE: type_id}
         self.seen_roots = {}
         # the following are lists of addresses of gc pointers living inside the
@@ -199,7 +187,13 @@
         self.all_prebuilt_gc = []
         self.finalizer_funcptrs = {}
         self.offsettable_cache = {}
-        self.next_typeid_cache = {}
+
+    def make_type_info_group(self):
+        self.type_info_group = llgroup.group("typeinfo")
+        # don't use typeid 0, may help debugging
+        DUMMY = lltype.Struct("dummy", ('x', lltype.Signed))
+        dummy = lltype.malloc(DUMMY, immortal=True, zero=True)
+        self.type_info_group.add_member(dummy)
 
     def get_type_id(self, TYPE):
         try:
@@ -208,33 +202,46 @@
             assert self.can_add_new_types
             assert isinstance(TYPE, (lltype.GcStruct, lltype.GcArray))
             # Record the new type_id description as a TYPE_INFO structure.
-            # It goes into a list for now, which will be turned into a
-            # TYPE_INFO_TABLE in flatten_table() by the gc transformer.
-
-            # pick the next type_id with the correct bits set or cleared
-            mask, expected = get_typeid_bitmask(TYPE)
-            type_id = self.next_typeid_cache.get((mask, expected), 1)
-            while True:
-                if type_id == len(self.type_info_list):
-                    self.type_info_list.append(None)
-                if (self.type_info_list[type_id] is None and
-                    (type_id & mask) == expected):
-                    break         # can use this type_id
-                else:
-                    type_id += 1  # continue searching
-            self.next_typeid_cache[mask, expected] = type_id + 1
-            assert type_id & 0xffff == type_id # make sure it fits into 2 bytes
-
             # build the TYPE_INFO structure
-            info = lltype.malloc(GCData.TYPE_INFO, immortal=True, zero=True)
+            if not TYPE._is_varsize():
+                fullinfo = lltype.malloc(GCData.TYPE_INFO,
+                                         immortal=True, zero=True)
+                info = fullinfo
+            else:
+                fullinfo = lltype.malloc(GCData.VARSIZE_TYPE_INFO,
+                                         immortal=True, zero=True)
+                info = fullinfo.header
             if self.can_encode_type_shape:
                 encode_type_shape(self, info, TYPE)
             else:
                 self._pending_type_shapes.append((info, TYPE))
-            self.type_info_list[type_id] = info
+            # store it
+            type_id = self.type_info_group.add_member(fullinfo)
             self.id_of_type[TYPE] = type_id
+            # store the vtable of the type (if any) immediately thereafter
+            # (note that if gcconfig.removetypeptr is False, lltype2vtable
+            # is empty)
+            vtable = self.lltype2vtable.get(TYPE, None)
+            if vtable is not None:
+                # check that if we have a vtable, we are not varsize
+                assert lltype.typeOf(fullinfo) == GCData.TYPE_INFO_PTR
+                vtable = lltype.normalizeptr(vtable)
+                self.type_info_group.add_member(vtable)
             return type_id
 
+    def get_info(self, type_id):
+        return llop.get_group_member(GCData.TYPE_INFO_PTR,
+                                     self.type_info_group._as_ptr(),
+                                     type_id)
+
+    def get_info_varsize(self, type_id):
+        return llop.get_group_member(GCData.VARSIZE_TYPE_INFO_PTR,
+                                     self.type_info_group._as_ptr(),
+                                     type_id)
+
+    def is_weakref(self, type_id):
+        return self.get_info(type_id).infobits & T_IS_WEAKREF
+
     def encode_type_shapes_now(self):
         if not self.can_encode_type_shape:
             self.can_encode_type_shape = True
@@ -260,20 +267,11 @@
             self.offsettable_cache[TYPE] = cachedarray
             return cachedarray
 
-    def flatten_table(self):
+    def close_table(self):
+        # make sure we no longer add members to the type_info_group.
         self.can_add_new_types = False
         self.offsettable_cache = None
-        table = lltype.malloc(GCData.TYPE_INFO_TABLE, len(self.type_info_list),
-                              immortal=True)
-        fieldnames = GCData.TYPE_INFO._names
-        for tableentry, newcontent in zip(table, self.type_info_list):
-            if newcontent is None:    # empty entry
-                tableentry.weakptrofs = -1
-                tableentry.ofstolength = -1
-            else:
-                for name in fieldnames:
-                    setattr(tableentry, name, getattr(newcontent, name))
-        return table
+        return self.type_info_group
 
     def finalizer_funcptr_for_type(self, TYPE):
         if TYPE in self.finalizer_funcptrs:
@@ -287,7 +285,7 @@
         return lltype.nullptr(GCData.ADDRESS_VOID_FUNC)
 
     def initialize_gc_query_function(self, gc):
-        return GCData(self.type_info_list).set_query_functions(gc)
+        return GCData(self.type_info_group).set_query_functions(gc)
 
     def consider_constant(self, TYPE, value, gc):
         if value is not lltype.top_container(value):
@@ -349,11 +347,6 @@
         offsets.append(0)
     return offsets
 
-def weakpointer_offset(TYPE):
-    if TYPE == WEAKREF:
-        return llmemory.offsetof(WEAKREF, "weakptr")
-    return -1
-
 def gc_pointers_inside(v, adr, mutable_only=False):
     t = lltype.typeOf(v)
     if isinstance(t, lltype.Struct):
@@ -409,6 +402,7 @@
 sizeof_weakref= llmemory.sizeof(WEAKREF)
 empty_weakref = lltype.malloc(WEAKREF, immortal=True)
 empty_weakref.weakptr = llmemory.NULL
+weakptr_offset = llmemory.offsetof(WEAKREF, "weakptr")
 
 def ll_weakref_deref(wref):
     wref = llmemory.cast_weakrefptr_to_ptr(WEAKREFPTR, wref)

Modified: pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/gcwrapper.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/gcwrapper.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/gcwrapper.py	Tue Oct 13 16:57:33 2009
@@ -18,7 +18,10 @@
         self.gc.setup()
 
     def prepare_graphs(self, flowgraphs):
-        layoutbuilder = DirectRunLayoutBuilder(self.llinterp)
+        lltype2vtable = self.llinterp.typer.lltype2vtable
+        layoutbuilder = DirectRunLayoutBuilder(self.gc.__class__,
+                                               lltype2vtable,
+                                               self.llinterp)
         self.get_type_id = layoutbuilder.get_type_id
         layoutbuilder.initialize_gc_query_function(self.gc)
 
@@ -159,9 +162,9 @@
 
 class DirectRunLayoutBuilder(gctypelayout.TypeLayoutBuilder):
 
-    def __init__(self, llinterp):
+    def __init__(self, GCClass, lltype2vtable, llinterp):
         self.llinterp = llinterp
-        super(DirectRunLayoutBuilder, self).__init__()
+        super(DirectRunLayoutBuilder, self).__init__(GCClass, lltype2vtable)
 
     def make_finalizer_funcptr_for_type(self, TYPE):
         from pypy.rpython.memory.gctransform.support import get_rtti, \

Modified: pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/lltypelayout.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/lltypelayout.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/lltypelayout.py	Tue Oct 13 16:57:33 2009
@@ -118,6 +118,10 @@
         return 0
     elif isinstance(offset, llarena.RoundedUpForAllocation):
         basesize = convert_offset_to_int(offset.basesize)
+        if isinstance(offset.minsize, llmemory.AddressOffset):
+            minsize = convert_offset_to_int(offset.minsize)
+            if minsize > basesize:
+                basesize = minsize
         mask = memory_alignment - 1
         return (basesize + mask) & ~ mask
     else:

Modified: pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/test/test_gctypelayout.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/test/test_gctypelayout.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/test/test_gctypelayout.py	Tue Oct 13 16:57:33 2009
@@ -1,6 +1,12 @@
+import py
 from pypy.rpython.memory.gctypelayout import TypeLayoutBuilder, GCData
 from pypy.rpython.memory.gctypelayout import offsets_to_gc_pointers
-from pypy.rpython.lltypesystem import lltype
+from pypy.rpython.lltypesystem import lltype, rclass
+from pypy.rpython.test.test_llinterp import get_interpreter
+from pypy.objspace.flow.model import Constant
+
+class FakeGC:
+    object_minimal_size = 0
 
 def getname(T):
     try:
@@ -30,13 +36,57 @@
     for T, c in [(GC_S, 0), (GC_S2, 2), (GC_A, 0), (GC_A2, 0), (GC_S3, 2)]:
         assert len(offsets_to_gc_pointers(T)) == c
 
-def test_layout_builder():
+def test_layout_builder(lltype2vtable={}):
     # XXX a very minimal test
-    layoutbuilder = TypeLayoutBuilder()
+    layoutbuilder = TypeLayoutBuilder(FakeGC, lltype2vtable)
     for T1, T2 in [(GC_A, GC_S), (GC_A2, GC_S2), (GC_S3, GC_S2)]:
         tid1 = layoutbuilder.get_type_id(T1)
         tid2 = layoutbuilder.get_type_id(T2)
-        gcdata = GCData(layoutbuilder.type_info_list)
+        gcdata = GCData(layoutbuilder.type_info_group)
         lst1 = gcdata.q_varsize_offsets_to_gcpointers_in_var_part(tid1)
         lst2 = gcdata.q_offsets_to_gc_pointers(tid2)
         assert len(lst1) == len(lst2)
+    return layoutbuilder
+
+def test_layout_builder_with_vtable():
+    from pypy.rpython.lltypesystem.lloperation import llop
+    vtable = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True)
+    layoutbuilder = test_layout_builder({GC_S: vtable})
+    tid1 = layoutbuilder.get_type_id(GC_S)
+    tid2 = layoutbuilder.get_type_id(GC_S2)
+    tid3 = layoutbuilder.get_type_id(GC_S3)
+    group = layoutbuilder.type_info_group
+    vt = llop.get_next_group_member(rclass.CLASSTYPE, group._as_ptr(), tid1,
+                                    layoutbuilder.size_of_fixed_type_info)
+    assert vt == vtable
+    for tid in [tid2, tid3]:
+        py.test.raises((lltype.InvalidCast, AssertionError),
+                       llop.get_next_group_member,
+                       rclass.CLASSTYPE, group._as_ptr(), tid,
+                       layoutbuilder.size_of_fixed_type_info)
+
+def test_constfold():
+    layoutbuilder = TypeLayoutBuilder(FakeGC, {})
+    tid1 = layoutbuilder.get_type_id(GC_A)
+    tid2 = layoutbuilder.get_type_id(GC_S3)
+    class MockGC:
+        def set_query_functions(self, is_varsize,
+                                has_gcptr_in_varsize,
+                                is_gcarrayofgcptr,
+                                *rest):
+            self.is_varsize = is_varsize
+            self.has_gcptr_in_varsize = has_gcptr_in_varsize
+            self.is_gcarrayofgcptr = is_gcarrayofgcptr
+    gc = MockGC()
+    layoutbuilder.initialize_gc_query_function(gc)
+    #
+    def f():
+        return (1 * gc.is_varsize(tid1) +
+               10 * gc.has_gcptr_in_varsize(tid1) +
+              100 * gc.is_gcarrayofgcptr(tid1) +
+             1000 * gc.is_varsize(tid2) +
+            10000 * gc.has_gcptr_in_varsize(tid2) +
+           100000 * gc.is_gcarrayofgcptr(tid2))
+    interp, graph = get_interpreter(f, [], backendopt=True)
+    assert interp.eval_graph(graph, []) == 11001
+    assert graph.startblock.exits[0].args == [Constant(11001, lltype.Signed)]

Modified: pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/test/test_transformed_gc.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/test/test_transformed_gc.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/rpython/memory/test/test_transformed_gc.py	Tue Oct 13 16:57:33 2009
@@ -21,6 +21,7 @@
     t = TranslationContext()
     # XXX XXX XXX mess
     t.config.translation.gc = gcname
+    t.config.translation.gcconfig.removetypeptr = True
     if stacklessgc:
         t.config.translation.gcrootfinder = "stackless"
     t.config.set(**extraconfigopts)
@@ -195,7 +196,6 @@
         assert heap_size < 16000 * INT_SIZE / 4 # xxx
 
     def test_nongc_static_root(self):
-        from pypy.rpython.lltypesystem import lltype
         T1 = lltype.GcStruct("C", ('x', lltype.Signed))
         T2 = lltype.Struct("C", ('p', lltype.Ptr(T1)))
         static = lltype.malloc(T2, immortal=True)
@@ -552,7 +552,7 @@
         class A(object):
             pass
         def f():
-            from pypy.rpython.lltypesystem import lltype, rffi
+            from pypy.rpython.lltypesystem import rffi
             alist = [A() for i in range(50)]
             idarray = lltype.malloc(rffi.INTP.TO, len(alist), flavor='raw')
             # Compute the id of all the elements of the list.  The goal is
@@ -592,7 +592,11 @@
         def fix_graph_of_g(translator):
             from pypy.translator.translator import graphof
             from pypy.objspace.flow.model import Constant
-            layoutbuilder = framework.TransformerLayoutBuilder()
+            from pypy.rpython.lltypesystem import rffi
+            GCClass = self.gcpolicy.transformerclass.GCClass
+            lltype2vtable = translator.rtyper.lltype2vtable
+            layoutbuilder = framework.TransformerLayoutBuilder(GCClass,
+                                                               lltype2vtable)
             layoutbuilder.delay_encoding()
             translator._jit2gc = {
                 'layoutbuilder': layoutbuilder,
@@ -603,7 +607,7 @@
             graph = graphof(translator, g)
             for op in graph.startblock.operations:
                 if op.opname == 'do_malloc_fixedsize_clear':
-                    op.args = [Constant(type_id, lltype.Signed),
+                    op.args = [Constant(type_id, rffi.USHORT),
                                Constant(llmemory.sizeof(P), lltype.Signed),
                                Constant(True, lltype.Bool),  # can_collect
                                Constant(False, lltype.Bool), # has_finalizer
@@ -628,7 +632,11 @@
         def fix_graph_of_g(translator):
             from pypy.translator.translator import graphof
             from pypy.objspace.flow.model import Constant
-            layoutbuilder = framework.TransformerLayoutBuilder()
+            from pypy.rpython.lltypesystem import rffi
+            GCClass = self.gcpolicy.transformerclass.GCClass
+            lltype2vtable = translator.rtyper.lltype2vtable
+            layoutbuilder = framework.TransformerLayoutBuilder(GCClass,
+                                                               lltype2vtable)
             layoutbuilder.delay_encoding()
             translator._jit2gc = {
                 'layoutbuilder': layoutbuilder,
@@ -639,7 +647,7 @@
             graph = graphof(translator, g)
             for op in graph.startblock.operations:
                 if op.opname == 'do_malloc_fixedsize_clear':
-                    op.args = [Constant(type_id, lltype.Signed),
+                    op.args = [Constant(type_id, rffi.USHORT),
                                Constant(llmemory.sizeof(P), lltype.Signed),
                                Constant(True, lltype.Bool),  # can_collect
                                Constant(False, lltype.Bool), # has_finalizer
@@ -934,7 +942,6 @@
         assert res == 20 + 20
 
     def test_nongc_static_root_minor_collect(self):
-        from pypy.rpython.lltypesystem import lltype
         T1 = lltype.GcStruct("C", ('x', lltype.Signed))
         T2 = lltype.Struct("C", ('p', lltype.Ptr(T1)))
         static = lltype.malloc(T2, immortal=True)
@@ -957,7 +964,6 @@
 
 
     def test_static_root_minor_collect(self):
-        from pypy.rpython.lltypesystem import lltype
         class A:
             pass
         class B:

Modified: pypy/branch/inline-fastpath-malloc/pypy/rpython/rfloat.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/rpython/rfloat.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/rpython/rfloat.py	Tue Oct 13 16:57:33 2009
@@ -152,12 +152,13 @@
     This should be special-cased in W_FloatObject.
     In the low-level case, floats cannot be used with ints in dicts, anyway.
     """
+    from pypy.rlib.rarithmetic import intmask
     v, expo = math.frexp(f)
     v *= TAKE_NEXT
     hipart = int(v)
     v = (v - float(hipart)) * TAKE_NEXT
     x = hipart + int(v) + (expo << 15)
-    return x
+    return intmask(x)
 #
 # _________________________ Conversions _________________________
 

Modified: pypy/branch/inline-fastpath-malloc/pypy/rpython/rtyper.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/rpython/rtyper.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/rpython/rtyper.py	Tue Oct 13 16:57:33 2009
@@ -65,6 +65,7 @@
         self.class_pbc_attributes = {}
         self.oo_meth_impls = {}
         self.cache_dummy_values = {}
+        self.lltype2vtable = {}
         self.typererrors = []
         self.typererror_count = 0
         # make the primitive_to_repr constant mapping
@@ -131,12 +132,6 @@
             result[repr.lowleveltype] = classdef
         return result
 
-    def lltype_to_vtable_mapping(self):
-        result = {}
-        for repr in self.instance_reprs.itervalues():
-            result[repr.lowleveltype.TO] = repr.rclass.getvtable()
-        return result
-
     def get_type_for_typeptr(self, typeptr):
         try:
             return self.type_for_typeptr[typeptr._obj]

Modified: pypy/branch/inline-fastpath-malloc/pypy/rpython/test/test_rdict.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/rpython/test/test_rdict.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/rpython/test/test_rdict.py	Tue Oct 13 16:57:33 2009
@@ -451,9 +451,9 @@
     def test_tuple_dict(self):
         def f(i):
             d = {}
-            d[(1, 2)] = 4
-            d[(1, 3)] = 6
-            return d[(1, i)]
+            d[(1, 4.5, (str(i), 2), 2)] = 4
+            d[(1, 4.5, (str(i), 2), 3)] = 6
+            return d[(1, 4.5, (str(i), 2), i)]
 
         res = self.interpret(f, [2])
         assert res == f(2)
@@ -556,6 +556,14 @@
             return d['a']
         assert self.interpret(func, []) == 42
 
+    def test_dict_of_floats(self):
+        d = {3.0: 42, 3.1: 43, 3.2: 44, 3.3: 45, 3.4: 46}
+        def fn(f):
+            return d[f]
+
+        res = self.interpret(fn, [3.0])
+        assert res == 42
+
 
 class TestLLtype(BaseTestRdict, LLRtypeMixin):
     def test_dict_but_not_with_char_keys(self):

Modified: pypy/branch/inline-fastpath-malloc/pypy/rpython/tool/test/test_mkrffi.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/rpython/tool/test/test_mkrffi.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/rpython/tool/test/test_mkrffi.py	Tue Oct 13 16:57:33 2009
@@ -1,7 +1,6 @@
 
 import ctypes
 from pypy.rpython.tool.mkrffi import *
-from pypy.rpython.tool.test.test_c import TestBasic
 import py
 
 class random_structure(ctypes.Structure):
@@ -54,7 +53,30 @@
     """)
     assert rffi_source.source.strip() == _src.strip()
 
-class TestMkrffi(TestBasic):
+class TestMkrffi(object):
+    def setup_class(cls):
+        import ctypes
+        from pypy.tool.udir import udir
+        from pypy.translator.platform import platform
+        from pypy.translator.tool.cbuild import ExternalCompilationInfo
+        
+        c_source = """
+        void *int_to_void_p(int arg) {}
+
+        struct random_strucutre {
+          int one;
+          int *two;
+        };
+
+        struct random_structure* int_int_to_struct_p(int one, int two) {}
+        """
+
+        c_file = udir.join('rffilib.c')
+        c_file.write(c_source)
+        libname = platform.compile([c_file], ExternalCompilationInfo(),
+                                   standalone=False)
+        cls.lib = ctypes.CDLL(str(libname))
+    
     def test_single_func(self):
         func = self.lib.int_to_void_p
         func.argtypes = [ctypes.c_int]

Modified: pypy/branch/inline-fastpath-malloc/pypy/translator/backendopt/inline.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/translator/backendopt/inline.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/translator/backendopt/inline.py	Tue Oct 13 16:57:33 2009
@@ -753,7 +753,7 @@
                 subcount = inline_function(translator, graph, parentgraph,
                                            lltype_to_classdef, raise_analyzer,
                                            call_count_pred, cleanup=False)
-                to_cleanup[graph] = True
+                to_cleanup[parentgraph] = True
                 res = bool(subcount)
             except CannotInline:
                 try_again[graph] = True

Modified: pypy/branch/inline-fastpath-malloc/pypy/translator/backendopt/removenoops.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/translator/backendopt/removenoops.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/translator/backendopt/removenoops.py	Tue Oct 13 16:57:33 2009
@@ -8,6 +8,7 @@
     """Removes unary low-level ops with a name appearing in the opnames list.
     """
     positions = []
+    touched_blocks = set()
     for block in graph.iterblocks():
         for i, op in enumerate(block.operations):
             if op.opname in opnames:
@@ -37,9 +38,10 @@
             else:
                 simplify.replace_exitswitch_by_constant(block, op_arg)
         block.operations[index] = None
-       
+        touched_blocks.add(block)
+        
     # remove all operations
-    for block in graph.iterblocks():
+    for block in touched_blocks:
         if block.operations:
             block.operations[:] = filter(None, block.operations)
 

Modified: pypy/branch/inline-fastpath-malloc/pypy/translator/backendopt/test/test_removenoops.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/translator/backendopt/test/test_removenoops.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/translator/backendopt/test/test_removenoops.py	Tue Oct 13 16:57:33 2009
@@ -65,6 +65,37 @@
     result = interp.eval_graph(f_graph, [])
     assert result == 42
 
+
+def test_remove_same_as_nonconst():
+    from pypy.rlib.nonconst import NonConstant
+    from pypy.rpython.lltypesystem.lloperation import llop
+    from pypy.rpython.lltypesystem import lltype
+
+    def f():
+        if NonConstant(False):
+            x = llop.same_as(lltype.Signed, 666)
+        return 42
+
+    t = TranslationContext()
+    t.buildannotator().build_types(f, [])
+    t.buildrtyper().specialize()
+    f_graph = graphof(t, f)
+    #simple_inline_function(t, nothing, f_graph)
+    # here, the graph looks like  v21=same_as(True);  exitswitch: v21
+    remove_same_as(f_graph)
+    t.checkgraphs()
+    # only one path should be left
+    for block in f_graph.iterblocks():
+        assert len(block.exits) <= 1
+
+    for block in t.annotator.annotated:
+        assert None not in block.operations
+
+    interp = LLInterpreter(t.rtyper)
+    result = interp.eval_graph(f_graph, [])
+    assert result == 42
+
+
 def test_remove_unaryops():
     # We really want to use remove_unaryops for things like ooupcast and
     # oodowncast in dynamically typed languages, but it's easier to test

Modified: pypy/branch/inline-fastpath-malloc/pypy/translator/c/database.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/translator/c/database.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/translator/c/database.py	Tue Oct 13 16:57:33 2009
@@ -5,6 +5,7 @@
 from pypy.rpython.lltypesystem import lltype
 from pypy.rpython.lltypesystem.llmemory import WeakRef, _WeakRefType, GCREF
 from pypy.rpython.lltypesystem.rffi import CConstant
+from pypy.rpython.lltypesystem import llgroup
 from pypy.tool.sourcetools import valid_identifier
 from pypy.translator.c.primitive import PrimitiveName, PrimitiveType
 from pypy.translator.c.node import StructDefNode, ArrayDefNode
@@ -141,6 +142,8 @@
                 #raise Exception("don't know about opaque type %r" % (T,))
                 return 'struct %s @' % (
                     valid_identifier('pypy_opaque_' + T.tag),)
+        elif isinstance(T, llgroup.GroupType):
+            return "/*don't use me*/ void @"
         else:
             raise Exception("don't know about type %r" % (T,))
 
@@ -285,6 +288,8 @@
             finish_callbacks.append(('Stackless transformer: finished',
                                      self.stacklesstransformer.finish))
         if self.gctransformer:
+            finish_callbacks.append(('GC transformer: tracking vtables',
+                                    self.gctransformer.get_final_dependencies))
             finish_callbacks.append(('GC transformer: finished tables',
                                      self.gctransformer.finish_tables))
 

Modified: pypy/branch/inline-fastpath-malloc/pypy/translator/c/funcgen.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/translator/c/funcgen.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/translator/c/funcgen.py	Tue Oct 13 16:57:33 2009
@@ -781,5 +781,22 @@
     def OP_PROMOTE_VIRTUALIZABLE(self, op):
         return '/* PROMOTE_VIRTUALIZABLE %s */' % op
 
+    def OP_GET_GROUP_MEMBER(self, op):
+        typename = self.db.gettype(op.result.concretetype)
+        return '%s = (%s)_OP_GET_GROUP_MEMBER(%s, %s);' % (
+            self.expr(op.result),
+            cdecl(typename, ''),
+            self.expr(op.args[0]),
+            self.expr(op.args[1]))
+
+    def OP_GET_NEXT_GROUP_MEMBER(self, op):
+        typename = self.db.gettype(op.result.concretetype)
+        return '%s = (%s)_OP_GET_NEXT_GROUP_MEMBER(%s, %s, %s);' % (
+            self.expr(op.result),
+            cdecl(typename, ''),
+            self.expr(op.args[0]),
+            self.expr(op.args[1]),
+            self.expr(op.args[2]))
+
 
 assert not USESLOTS or '__dict__' not in dir(FunctionCodeGenerator)

Modified: pypy/branch/inline-fastpath-malloc/pypy/translator/c/gc.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/translator/c/gc.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/translator/c/gc.py	Tue Oct 13 16:57:33 2009
@@ -1,4 +1,5 @@
 import sys
+from pypy.objspace.flow.model import Constant
 from pypy.translator.c.support import cdecl
 from pypy.translator.c.node import ContainerNode
 from pypy.rpython.lltypesystem.lltype import \
@@ -11,7 +12,7 @@
 
 class BasicGcPolicy(object):
     requires_stackless = False
-    
+
     def __init__(self, db, thread_enabled=False):
         self.db = db
         self.thread_enabled = thread_enabled
@@ -49,6 +50,9 @@
             post_include_bits=['typedef void *GC_hidden_pointer;']
             )
 
+    def need_no_typeptr(self):
+        return False
+
     def gc_startup_code(self):
         return []
 
@@ -312,8 +316,11 @@
         return framework.convert_weakref_to(ptarget)
 
     def OP_GC_RELOAD_POSSIBLY_MOVED(self, funcgen, op):
-        args = [funcgen.expr(v) for v in op.args]
-        return '%s = %s; /* for moving GCs */' % (args[1], args[0])
+        if isinstance(op.args[1], Constant):
+            return '/* %s */' % (op,)
+        else:
+            args = [funcgen.expr(v) for v in op.args]
+            return '%s = %s; /* for moving GCs */' % (args[1], args[0])
 
     def common_gcheader_definition(self, defnode):
         return defnode.db.gctransformer.gc_fields()
@@ -322,6 +329,25 @@
         o = top_container(defnode.obj)
         return defnode.db.gctransformer.gc_field_values_for(o)
 
+    def need_no_typeptr(self):
+        config = self.db.translator.config
+        return config.translation.gcconfig.removetypeptr
+
+    def OP_GC_GETTYPEPTR_GROUP(self, funcgen, op):
+        # expands to a number of steps, as per rpython/lltypesystem/opimpl.py,
+        # all implemented by a single call to a C macro.
+        [v_obj, c_grpptr, c_skipoffset, c_vtableinfo] = op.args
+        typename = funcgen.db.gettype(op.result.concretetype)
+        fieldname = c_vtableinfo.value[2]
+        return (
+        '%s = (%s)_OP_GET_NEXT_GROUP_MEMBER(%s, (unsigned short)%s->_%s, %s);'
+            % (funcgen.expr(op.result),
+               cdecl(typename, ''),
+               funcgen.expr(c_grpptr),
+               funcgen.expr(v_obj),
+               fieldname,
+               funcgen.expr(c_skipoffset)))
+
 class AsmGcRootFrameworkGcPolicy(FrameworkGcPolicy):
     transformerclass = asmgcroot.AsmGcRootFrameworkGCTransformer
 

Modified: pypy/branch/inline-fastpath-malloc/pypy/translator/c/genc.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/translator/c/genc.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/translator/c/genc.py	Tue Oct 13 16:57:33 2009
@@ -463,8 +463,8 @@
             mk.definition('PROFOPT', profopt)
 
         rules = [
-            ('clean', '', 'rm -f $(OBJECTS) $(TARGET) $(GCMAPFILES) *.gc?? ../module_cache/*.gc??'),
-            ('clean_noprof', '', 'rm -f $(OBJECTS) $(TARGET) $(GCMAPFILES)'),
+            ('clean', '', 'rm -f $(OBJECTS) $(TARGET) $(GCMAPFILES) $(ASMFILES) *.gc?? ../module_cache/*.gc??'),
+            ('clean_noprof', '', 'rm -f $(OBJECTS) $(TARGET) $(GCMAPFILES) $(ASMFILES)'),
             ('debug', '', '$(MAKE) CFLAGS="-g -O1 -DRPY_ASSERT" $(TARGET)'),
             ('debug_exc', '', '$(MAKE) CFLAGS="-g -O1 -DRPY_ASSERT -DDO_LOG_EXC" $(TARGET)'),
             ('debug_mem', '', '$(MAKE) CFLAGS="-g -O1 -DRPY_ASSERT -DTRIVIAL_MALLOC_DEBUG" $(TARGET)'),
@@ -486,8 +486,10 @@
             mk.rule(*rule)
 
         if self.config.translation.gcrootfinder == 'asmgcc':
+            sfiles = ['%s.s' % (cfile[:-2],) for cfile in mk.cfiles]
             lblsfiles = ['%s.lbl.s' % (cfile[:-2],) for cfile in mk.cfiles]
             gcmapfiles = ['%s.gcmap' % (cfile[:-2],) for cfile in mk.cfiles]
+            mk.definition('ASMFILES', sfiles)
             mk.definition('ASMLBLFILES', lblsfiles)
             mk.definition('GCMAPFILES', gcmapfiles)
             mk.definition('OBJECTS', '$(ASMLBLFILES) gcmaptable.s')

Modified: pypy/branch/inline-fastpath-malloc/pypy/translator/c/node.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/translator/c/node.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/translator/c/node.py	Tue Oct 13 16:57:33 2009
@@ -3,7 +3,7 @@
      GcStruct, GcArray, RttiStruct, ContainerType, \
      parentlink, Ptr, PyObject, Void, OpaqueType, Float, \
      RuntimeTypeInfo, getRuntimeTypeInfo, Char, _subarray
-from pypy.rpython.lltypesystem import llmemory
+from pypy.rpython.lltypesystem import llmemory, llgroup
 from pypy.translator.c.funcgen import FunctionCodeGenerator
 from pypy.translator.c.external import CExternalFunctionCodeGenerator
 from pypy.translator.c.support import USESLOTS # set to False if necessary while refactoring
@@ -67,6 +67,12 @@
                                                   bare=True)
             self.prefix = somelettersfrom(STRUCT._name) + '_'
         self.dependencies = {}
+        #
+        self.fieldnames = STRUCT._names
+        if STRUCT._hints.get('typeptr', False):
+            if db.gcpolicy.need_no_typeptr():
+                assert self.fieldnames == ('typeptr',)
+                self.fieldnames = ()
 
     def setup(self):
         # this computes self.fields
@@ -80,7 +86,7 @@
         if needs_gcheader(self.STRUCT):
             for fname, T in db.gcpolicy.struct_gcheader_definition(self):
                 self.fields.append((fname, db.gettype(T, who_asks=self)))
-        for name in STRUCT._names:
+        for name in self.fieldnames:
             T = self.c_struct_field_type(name)
             if name == STRUCT._arrayfld:
                 typename = db.gettype(T, varlength=self.varlength,
@@ -147,8 +153,7 @@
             yield line
 
     def visitor_lines(self, prefix, on_field):
-        STRUCT = self.STRUCT
-        for name in STRUCT._names:
+        for name in self.fieldnames:
             FIELD_T = self.c_struct_field_type(name)
             cname = self.c_struct_field_name(name)
             for line in on_field('%s.%s' % (prefix, cname),
@@ -157,8 +162,7 @@
 
     def debug_offsets(self):
         # generate number exprs giving the offset of the elements in the struct
-        STRUCT = self.STRUCT
-        for name in STRUCT._names:
+        for name in self.fieldnames:
             FIELD_T = self.c_struct_field_type(name)
             if FIELD_T is Void:
                 yield '-1'
@@ -464,11 +468,15 @@
         return hasattr(self.T, "_hints") and self.T._hints.get('thread_local')
 
     def forward_declaration(self):
+        if llgroup.member_of_group(self.obj):
+            return
         yield '%s;' % (
             forward_cdecl(self.implementationtypename,
                 self.name, self.db.standalone, self.is_thread_local()))
 
     def implementation(self):
+        if llgroup.member_of_group(self.obj):
+            return []
         lines = list(self.initializationexpr())
         lines[0] = '%s = %s' % (
             cdecl(self.implementationtypename, self.name, self.is_thread_local()),
@@ -514,7 +522,7 @@
             for i, thing in enumerate(self.db.gcpolicy.struct_gcheader_initdata(self)):
                 data.append(('gcheader%d'%i, thing))
         
-        for name in self.T._names:
+        for name in defnode.fieldnames:
             data.append((name, getattr(self.obj, name)))
         
         # Reasonably, you should only initialise one of the fields of a union
@@ -898,6 +906,67 @@
     #obj._converted_weakref = container     # hack for genllvm :-/
     return db.getcontainernode(container, _dont_write_c_code=False)
 
+class GroupNode(ContainerNode):
+    nodekind = 'group'
+    count_members = None
+
+    def __init__(self, *args):
+        ContainerNode.__init__(self, *args)
+        self.implementationtypename = 'struct group_%s_s @' % self.name
+
+    def basename(self):
+        return self.obj.name
+
+    def enum_dependencies(self):
+        # note: for the group used by the GC, it can grow during this phase,
+        # which means that we might not return all members yet.  This is
+        # fixed by finish_tables() in rpython/memory/gctransform/framework.py
+        for member in self.obj.members:
+            yield member._as_ptr()
+
+    def _fix_members(self):
+        if self.obj.outdated:
+            raise Exception(self.obj.outdated)
+        if self.count_members is None:
+            self.count_members = len(self.obj.members)
+        else:
+            # make sure no new member showed up, because it's too late
+            assert len(self.obj.members) == self.count_members
+
+    def forward_declaration(self):
+        self._fix_members()
+        yield ''
+        ctype = ['%s {' % cdecl(self.implementationtypename, '')]
+        for i, member in enumerate(self.obj.members):
+            structtypename = self.db.gettype(typeOf(member))
+            ctype.append('\t%s;' % cdecl(structtypename, 'member%d' % i))
+        ctype.append('} @')
+        ctype = '\n'.join(ctype)
+        yield '%s;' % (
+            forward_cdecl(ctype, self.name, self.db.standalone,
+                          self.is_thread_local()))
+        yield '#include "src/llgroup.h"'
+        yield 'PYPY_GROUP_CHECK_SIZE(%s);' % self.name
+        for i, member in enumerate(self.obj.members):
+            structnode = self.db.getcontainernode(member)
+            yield '#define %s %s.member%d' % (structnode.name,
+                                              self.name, i)
+        yield ''
+
+    def initializationexpr(self):
+        self._fix_members()
+        lines = ['{']
+        lasti = len(self.obj.members) - 1
+        for i, member in enumerate(self.obj.members):
+            structnode = self.db.getcontainernode(member)
+            lines1 = list(structnode.initializationexpr())
+            lines1[0] += '\t/* member%d: %s */' % (i, structnode.name)
+            if i != lasti:
+                lines1[-1] += ','
+            lines.extend(lines1)
+        lines.append('}')
+        return lines
+
 
 ContainerNodeFactory = {
     Struct:       StructNode,
@@ -909,4 +978,5 @@
     OpaqueType:   opaquenode_factory,
     PyObjectType: PyObjectNode,
     llmemory._WeakRefType: weakrefnode_factory,
+    llgroup.GroupType: GroupNode,
     }

Modified: pypy/branch/inline-fastpath-malloc/pypy/translator/c/primitive.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/translator/c/primitive.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/translator/c/primitive.py	Tue Oct 13 16:57:33 2009
@@ -3,7 +3,7 @@
 from pypy.rlib.objectmodel import CDefinedIntSymbolic
 from pypy.rlib.rarithmetic import r_longlong, isinf, isnan
 from pypy.rpython.lltypesystem.lltype import *
-from pypy.rpython.lltypesystem import rffi
+from pypy.rpython.lltypesystem import rffi, llgroup
 from pypy.rpython.lltypesystem.llmemory import Address, \
      AddressOffset, ItemOffset, ArrayItemsOffset, FieldOffset, \
      CompositeOffset, ArrayLengthOffset, \
@@ -50,12 +50,15 @@
         elif type(value) == GCHeaderOffset:
             return '0'
         elif type(value) == RoundedUpForAllocation:
-            return 'ROUND_UP_FOR_ALLOCATION(%s)' % (
-                name_signed(value.basesize, db))
+            return 'ROUND_UP_FOR_ALLOCATION(%s, %s)' % (
+                name_signed(value.basesize, db),
+                name_signed(value.minsize, db))
         elif isinstance(value, CDefinedIntSymbolic):
             return str(value.expr)
         elif isinstance(value, ComputedIntSymbolic):
             value = value.compute_fn()
+        elif isinstance(value, llgroup.CombinedSymbolic):
+            return '(%s|%dL)' % (name_ushort(value.lowpart, db), value.rest)
         else:
             raise Exception("unimplemented symbolic %r"%value)
     if value is None:
@@ -136,6 +139,19 @@
     else:
         return 'NULL'
 
+def name_ushort(value, db):
+    if isinstance(value, Symbolic):
+        if isinstance(value, llgroup.GroupMemberOffset):
+            groupnode = db.getcontainernode(value.grpptr._as_obj())
+            structnode = db.getcontainernode(value.member._as_obj())
+            return 'GROUP_MEMBER_OFFSET(%s, %s)' % (
+                groupnode.name,
+                structnode.name,
+                )
+        else:
+            raise Exception("unimplemented symbolic %r" % value)
+    return str(value)
+
 # On 64 bit machines, SignedLongLong and Signed are the same, so the
 # order matters, because we want the Signed implementation.
 PrimitiveName = {
@@ -151,6 +167,7 @@
     Void:     name_void,
     Address:  name_address,
     GCREF:    name_gcref,
+    rffi.USHORT: name_ushort,
     }
 
 PrimitiveType = {
@@ -166,6 +183,7 @@
     Void:     'void @',
     Address:  'void* @',
     GCREF:    'void* @',
+    rffi.USHORT: 'unsigned short @',
     }
 
 def define_c_primitive(ll_type, c_name):
@@ -181,7 +199,7 @@
 for ll_type, c_name in [(rffi.SIGNEDCHAR, 'signed char'),
                         (rffi.UCHAR, 'unsigned char'),
                         (rffi.SHORT, 'short'),
-                        (rffi.USHORT, 'unsigned short'),
+                        #(rffi.USHORT, 'unsigned short'),
                         (rffi.INT, 'int'),
                         (rffi.UINT, 'unsigned int'),
                         (rffi.LONG, 'long'),

Modified: pypy/branch/inline-fastpath-malloc/pypy/translator/c/src/g_include.h
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/translator/c/src/g_include.h	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/translator/c/src/g_include.h	Tue Oct 13 16:57:33 2009
@@ -34,6 +34,7 @@
 #ifndef AVR
 #include "src/unichar.h"
 #endif
+#include "src/llgroup.h"
 
 #include "src/instrument.h"
 

Modified: pypy/branch/inline-fastpath-malloc/pypy/translator/c/src/mem.h
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/translator/c/src/mem.h	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/translator/c/src/mem.h	Tue Oct 13 16:57:33 2009
@@ -14,8 +14,9 @@
   struct rpy_memory_alignment_test1 s;
 };
 #define MEMORY_ALIGNMENT	offsetof(struct rpy_memory_alignment_test2, s)
-#define ROUND_UP_FOR_ALLOCATION(x)	\
-		(((x) + (MEMORY_ALIGNMENT-1)) & ~(MEMORY_ALIGNMENT-1))
+#define ROUND_UP_FOR_ALLOCATION(x, minsize)  \
+  ((((x)>=(minsize)?(x):(minsize))           \
+               + (MEMORY_ALIGNMENT-1)) & ~(MEMORY_ALIGNMENT-1))
 
 extern char __gcmapstart;
 extern char __gcmapend;

Modified: pypy/branch/inline-fastpath-malloc/pypy/translator/c/test/test_lltyped.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/translator/c/test/test_lltyped.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/translator/c/test/test_lltyped.py	Tue Oct 13 16:57:33 2009
@@ -701,5 +701,74 @@
         fn = self.getcompiled(llf)
         fn()
 
+    def test_llgroup(self):
+        from pypy.rpython.lltypesystem.test import test_llgroup
+        f = test_llgroup.build_test()
+        fn = self.getcompiled(f)
+        res = fn()
+        assert res == 42
 
-        
+    def test_llgroup_size_limit(self):
+        yield self._test_size_limit, True
+        yield self._test_size_limit, False
+
+    def _test_size_limit(self, toobig):
+        from pypy.rpython.lltypesystem import llgroup
+        from pypy.rpython.lltypesystem.lloperation import llop
+        from pypy.translator.platform import CompilationError
+        grp = llgroup.group("big")
+        S1 = Struct('S1', ('x', Signed), ('y', Signed),
+                          ('z', Signed), ('u', Signed),
+                          ('x2', Signed), ('y2', Signed),
+                          ('z2', Signed), ('u2', Signed),
+                          ('x3', Signed), ('y3', Signed),
+                          ('z3', Signed), ('u3', Signed),
+                          ('x4', Signed), ('y4', Signed),
+                          ('z4', Signed), ('u4', Signed))
+        goffsets = []
+        for i in range(4096 + toobig):
+            goffsets.append(grp.add_member(malloc(S1, immortal=True)))
+        grpptr = grp._as_ptr()
+        def f(n):
+            p = llop.get_group_member(Ptr(S1), grpptr, goffsets[n])
+            q = llop.get_group_member(Ptr(S1), grpptr, goffsets[0])
+            p.x = 5
+            q.x = 666
+            return p.x
+        if toobig:
+            py.test.raises(CompilationError, self.getcompiled, f, [int])
+        else:
+            fn = self.getcompiled(f, [int])
+            res = fn(-1)
+            assert res == 5
+
+    def test_round_up_for_allocation(self):
+        from pypy.rpython.lltypesystem import llmemory, llarena
+        S = Struct('S', ('x', Char), ('y', Char))
+        M = Struct('M', ('x', Char), ('y', Signed))
+        #
+        def g():
+            ssize = llarena.round_up_for_allocation(llmemory.sizeof(S))
+            msize = llarena.round_up_for_allocation(llmemory.sizeof(M))
+            smsize = llarena.round_up_for_allocation(llmemory.sizeof(S),
+                                                     llmemory.sizeof(M))
+            mssize = llarena.round_up_for_allocation(llmemory.sizeof(M),
+                                                     llmemory.sizeof(S))
+            return ssize, msize, smsize, mssize
+        #
+        glob_sizes = g()
+        #
+        def check((ssize, msize, smsize, mssize)):
+            assert ssize == llmemory.sizeof(Signed)
+            assert msize == llmemory.sizeof(Signed) * 2
+            assert smsize == msize
+            assert mssize == msize
+        #
+        def f():
+            check(glob_sizes)
+            check(g())
+            return 42
+        #
+        fn = self.getcompiled(f, [])
+        res = fn()
+        assert res == 42

Modified: pypy/branch/inline-fastpath-malloc/pypy/translator/c/test/test_newgc.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/translator/c/test/test_newgc.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/translator/c/test/test_newgc.py	Tue Oct 13 16:57:33 2009
@@ -16,6 +16,7 @@
 class TestUsingFramework(object):
     gcpolicy = "marksweep"
     should_be_moving = False
+    removetypeptr = False
     GC_CAN_MOVE = False
     GC_CANNOT_MALLOC_NONMOVABLE = False
 
@@ -25,6 +26,7 @@
     def _makefunc2(cls, f):
         t = Translation(f, [int, int], gc=cls.gcpolicy,
                         policy=annpolicy.StrictAnnotatorPolicy())
+        t.config.translation.gcconfig.removetypeptr = cls.removetypeptr
         t.disable(['backendopt'])
         t.set_backend_extra_options(c_isolated=True, c_debug_defines=True)
         t.rtype()
@@ -796,6 +798,9 @@
     def test_gc_set_max_heap_size(self):
         py.test.skip("not implemented")
 
+class TestHybridGCRemoveTypePtr(TestHybridGC):
+    removetypeptr = True
+
 class TestMarkCompactGC(TestSemiSpaceGC):
     gcpolicy = "markcompact"
     should_be_moving = True

Modified: pypy/branch/inline-fastpath-malloc/pypy/translator/cli/constant.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/translator/cli/constant.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/translator/cli/constant.py	Tue Oct 13 16:57:33 2009
@@ -288,7 +288,7 @@
 class CLIStaticMethodConst(CLIBaseConstMixin, StaticMethodConst):
     def create_pointer(self, gen):
         assert not self.is_null()
-        signature = self.cts.graph_to_signature(self.value.graph)
+        signature = self.cts.static_meth_to_signature(self.value)
         gen.ilasm.opcode('ldnull')
         gen.ilasm.opcode('ldftn', signature)
         gen.ilasm.new('instance void class %s::.ctor(object, native int)' % self.delegate_type)

Modified: pypy/branch/inline-fastpath-malloc/pypy/translator/cli/cts.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/translator/cli/cts.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/translator/cli/cts.py	Tue Oct 13 16:57:33 2009
@@ -305,20 +305,33 @@
     def ctor_name(self, t):
         return 'instance void %s::.ctor()' % self.lltype_to_cts(t)
 
+    def static_meth_to_signature(self, sm):
+        from pypy.translator.oosupport import metavm
+        graph = getattr(sm, 'graph', None)
+        if graph:
+            return self.graph_to_signature(graph)
+        module, name = metavm.get_primitive_name(sm)
+        func_name = '[pypylib]pypy.builtin.%s::%s' % (module, name)
+        T = ootype.typeOf(sm)
+        return self.format_signatue(func_name, T.ARGS, T.RESULT)
+
     def graph_to_signature(self, graph, is_method = False, func_name = None):
-        ret_type, ret_var = self.llvar_to_cts(graph.getreturnvar())
         func_name = func_name or graph.name
         func_name = self.escape_name(func_name)
         namespace = getattr(graph.func, '_namespace_', None)
         if namespace:
             func_name = '%s::%s' % (namespace, func_name)
 
-        args = [arg for arg in graph.getargs() if arg.concretetype is not ootype.Void]
+        ARGS = [arg.concretetype for arg in graph.getargs() if arg.concretetype is not ootype.Void]
         if is_method:
-            args = args[1:]
+            ARGS = ARGS[1:]
+        RESULT = graph.getreturnvar().concretetype
+        return self.format_signatue(func_name, ARGS, RESULT)
 
-        arg_types = [self.lltype_to_cts(arg.concretetype).typename() for arg in args]
+    def format_signatue(self, func_name, ARGS, RESULT):
+        arg_types = [self.lltype_to_cts(ARG).typename() for ARG in ARGS]
         arg_list = ', '.join(arg_types)
+        ret_type = self.lltype_to_cts(RESULT)
 
         return '%s %s(%s)' % (ret_type, func_name, arg_list)
 

Modified: pypy/branch/inline-fastpath-malloc/pypy/translator/cli/src/pypylib.cs
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/translator/cli/src/pypylib.cs	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/translator/cli/src/pypylib.cs	Tue Oct 13 16:57:33 2009
@@ -114,7 +114,7 @@
 
     public class InputArgs {
       public int[] ints = new int[32];
-      public float[] floats = new float[32];
+      public double[] floats = new double[32];
       public object[] objs = new object[32];
       public object exc_value = null;
       public int failed_op = -1;
@@ -129,6 +129,16 @@
         ints[i] = n;
       }
 
+      public double get_float(int i)
+      {
+        return floats[i];
+      }
+
+      public void set_float(int i, double n)
+      {
+        floats[i] = n;
+      }
+
       public object get_obj(int i)
       {
         return objs[i];
@@ -163,7 +173,7 @@
       public void ensure_floats(int n)
       {
         if (floats.Length < n)
-          floats = new float[n];
+          floats = new double[n];
       }
 
       public void ensure_objs(int n)
@@ -402,6 +412,14 @@
             return new DynamicMethod(name, res, args, typeof(Utils).Module);
         }
 
+        // if you call il.Emit(OpCodes.Ldc_R8, mydouble) from pythonnet, it
+        // selects the wrong overload. To work around it, we call it from C# and
+        // live happy
+        public static void Emit_Ldc_R8(ILGenerator il, double val) 
+        {
+            il.Emit(OpCodes.Ldc_R8, val);
+        }
+
         public static object RuntimeNew(Type t)
         {
             return t.GetConstructor(new Type[0]).Invoke(new object[0]);

Modified: pypy/branch/inline-fastpath-malloc/pypy/translator/oosupport/constant.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/translator/oosupport/constant.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/translator/oosupport/constant.py	Tue Oct 13 16:57:33 2009
@@ -748,7 +748,8 @@
     def record_dependencies(self):
         if self.value is ootype.null(self.value._TYPE):
             return
-        self.db.pending_function(self.value.graph)
+        if hasattr(self.value, 'graph'):
+            self.db.pending_function(self.value.graph)
         self.delegate_type = self.db.record_delegate(self.value._TYPE)
 
     def initialize_data(self, constgen, gen):

Modified: pypy/branch/inline-fastpath-malloc/pypy/translator/oosupport/metavm.py
==============================================================================
--- pypy/branch/inline-fastpath-malloc/pypy/translator/oosupport/metavm.py	(original)
+++ pypy/branch/inline-fastpath-malloc/pypy/translator/oosupport/metavm.py	Tue Oct 13 16:57:33 2009
@@ -437,23 +437,23 @@
         generator.branch_conditionally(False, self.label)
 
 
-class _Call(MicroInstruction):
+def get_primitive_name(sm):
+    try:
+        sm.graph
+        return None
+    except AttributeError:
+        pass
+    try:
+        return 'rffi', sm._obj.oo_primitive
+    except AttributeError:
+        pass
+    return sm._name.rsplit('.', 1)
 
-    def _get_primitive_name(self, callee):
-        try:
-            callee.graph
-            return None
-        except AttributeError:
-            pass
-        try:
-            return 'rffi', callee._obj.oo_primitive
-        except AttributeError:
-            pass
-        return callee._name.rsplit('.', 1)
+class _Call(MicroInstruction):
         
     def render(self, generator, op):
         callee = op.args[0].value
-        is_primitive = self._get_primitive_name(callee)
+        is_primitive = get_primitive_name(callee)
 
         if is_primitive:
             module, name = is_primitive



More information about the Pypy-commit mailing list