[pypy-svn] r68314 - in pypy/trunk/pypy: config doc/config jit/backend/llsupport jit/backend/llsupport/test jit/backend/x86 jit/metainterp rpython rpython/lltypesystem rpython/lltypesystem/test rpython/memory rpython/memory/gc rpython/memory/gc/test rpython/memory/gctransform rpython/memory/test translator/backendopt translator/c translator/c/src translator/c/test
arigo at codespeak.net
arigo at codespeak.net
Sun Oct 11 16:32:29 CEST 2009
Author: arigo
Date: Sun Oct 11 16:32:27 2009
New Revision: 68314
Added:
pypy/trunk/pypy/doc/config/translation.gcconfig.removetypeptr.txt
- copied unchanged from r68313, pypy/branch/gc-compress/pypy/doc/config/translation.gcconfig.removetypeptr.txt
pypy/trunk/pypy/rpython/lltypesystem/llgroup.py
- copied unchanged from r68313, pypy/branch/gc-compress/pypy/rpython/lltypesystem/llgroup.py
pypy/trunk/pypy/rpython/lltypesystem/test/__init__.py
- copied unchanged from r68313, pypy/branch/gc-compress/pypy/rpython/lltypesystem/test/__init__.py
pypy/trunk/pypy/rpython/lltypesystem/test/test_llgroup.py
- copied unchanged from r68313, pypy/branch/gc-compress/pypy/rpython/lltypesystem/test/test_llgroup.py
pypy/trunk/pypy/translator/c/src/llgroup.h
- copied unchanged from r68313, pypy/branch/gc-compress/pypy/translator/c/src/llgroup.h
Modified:
pypy/trunk/pypy/config/translationoption.py
pypy/trunk/pypy/jit/backend/llsupport/gc.py
pypy/trunk/pypy/jit/backend/llsupport/test/test_gc.py
pypy/trunk/pypy/jit/backend/x86/runner.py
pypy/trunk/pypy/jit/metainterp/policy.py
pypy/trunk/pypy/rpython/llinterp.py
pypy/trunk/pypy/rpython/lltypesystem/llarena.py
pypy/trunk/pypy/rpython/lltypesystem/lloperation.py
pypy/trunk/pypy/rpython/lltypesystem/opimpl.py
pypy/trunk/pypy/rpython/lltypesystem/rclass.py
pypy/trunk/pypy/rpython/memory/gc/base.py
pypy/trunk/pypy/rpython/memory/gc/hybrid.py
pypy/trunk/pypy/rpython/memory/gc/markcompact.py
pypy/trunk/pypy/rpython/memory/gc/marksweep.py
pypy/trunk/pypy/rpython/memory/gc/semispace.py
pypy/trunk/pypy/rpython/memory/gc/test/test_direct.py
pypy/trunk/pypy/rpython/memory/gctransform/framework.py
pypy/trunk/pypy/rpython/memory/gctransform/transform.py
pypy/trunk/pypy/rpython/memory/gctypelayout.py
pypy/trunk/pypy/rpython/memory/gcwrapper.py
pypy/trunk/pypy/rpython/memory/lltypelayout.py
pypy/trunk/pypy/rpython/memory/test/test_gctypelayout.py
pypy/trunk/pypy/rpython/memory/test/test_transformed_gc.py
pypy/trunk/pypy/rpython/rtyper.py
pypy/trunk/pypy/translator/backendopt/inline.py
pypy/trunk/pypy/translator/c/database.py
pypy/trunk/pypy/translator/c/funcgen.py
pypy/trunk/pypy/translator/c/gc.py
pypy/trunk/pypy/translator/c/node.py
pypy/trunk/pypy/translator/c/primitive.py
pypy/trunk/pypy/translator/c/src/g_include.h
pypy/trunk/pypy/translator/c/src/mem.h
pypy/trunk/pypy/translator/c/test/test_lltyped.py
pypy/trunk/pypy/translator/c/test/test_newgc.py
Log:
Merge the gc-compress branch. This adds a new option,
"--gcremovetypeptr", which compiles a framework GC in such a way as
needing only one word to store the GC headers (including the GC-specific
type data pointer) and the 'typeptr' field of all regular RPython
instances.
The option is False by default for now, but with a bit more testing it
should become True. It seems to give no noticeable speed-up, but no
slow-down either, and it saves memory.
Implemented with a new concept in rpython/lltypesystem, a "group", which
regroups otherwise-independent prebuilt structures in a lazy manner, and
offers unsigned 16-bits symbolics representing offsets of one structure
in the group. Used for the type info table, and if "--gcremovetypeptr",
for all the vtables of the program as well.
The JIT does not support yet "--gcremovetypeptr" but that could be fixed.
Modified: pypy/trunk/pypy/config/translationoption.py
==============================================================================
--- pypy/trunk/pypy/config/translationoption.py (original)
+++ pypy/trunk/pypy/config/translationoption.py Sun Oct 11 16:32:27 2009
@@ -69,7 +69,9 @@
}),
OptionDescription("gcconfig", "Configure garbage collectors", [
BoolOption("debugprint", "Turn on debug printing for the GC",
- default=False)
+ default=False),
+ BoolOption("removetypeptr", "Remove the typeptr from every object",
+ default=False, cmdline="--gcremovetypeptr"),
]),
ChoiceOption("gcrootfinder",
"Strategy for finding GC Roots (framework GCs only)",
@@ -95,7 +97,8 @@
# JIT generation: use -Ojit to enable it
BoolOption("jit", "generate a JIT",
default=False,
- requires=[("translation.thread", False)],
+ requires=[("translation.thread", False),
+ ("translation.gcconfig.removetypeptr", False)],
suggests=[("translation.gc", "hybrid"), # or "boehm"
("translation.gcrootfinder", "asmgcc"),
("translation.list_comprehension_operations", True)]),
@@ -315,7 +318,7 @@
'0': 'boehm nobackendopt',
'1': 'boehm lowinline',
'size': 'boehm lowinline remove_asserts',
- 'mem': 'markcompact lowinline remove_asserts',
+ 'mem': 'markcompact lowinline remove_asserts removetypeptr',
'2': 'hybrid extraopts',
'3': 'hybrid extraopts remove_asserts',
'jit': 'hybrid extraopts jit',
@@ -355,6 +358,8 @@
config.translation.suggest(withsmallfuncsets=5)
elif word == 'jit':
config.translation.suggest(jit=True)
+ elif word == 'removetypeptr':
+ config.translation.gcconfig.suggest(removetypeptr=True)
else:
raise ValueError(word)
Modified: pypy/trunk/pypy/jit/backend/llsupport/gc.py
==============================================================================
--- pypy/trunk/pypy/jit/backend/llsupport/gc.py (original)
+++ pypy/trunk/pypy/jit/backend/llsupport/gc.py Sun Oct 11 16:32:27 2009
@@ -295,7 +295,7 @@
class GcLLDescr_framework(GcLLDescription):
def __init__(self, gcdescr, translator, llop1=llop):
- from pypy.rpython.memory.gc.base import choose_gc_from_config
+ from pypy.rpython.memory.gctypelayout import _check_typeid
from pypy.rpython.memory.gcheader import GCHeaderBuilder
from pypy.rpython.memory.gctransform import framework
GcLLDescription.__init__(self, gcdescr, translator)
@@ -322,14 +322,15 @@
# make a TransformerLayoutBuilder and save it on the translator
# where it can be fished and reused by the FrameworkGCTransformer
- self.layoutbuilder = framework.TransformerLayoutBuilder()
+ self.layoutbuilder = framework.JITTransformerLayoutBuilder(
+ gcdescr.config)
self.layoutbuilder.delay_encoding()
self.translator._jit2gc = {
'layoutbuilder': self.layoutbuilder,
'gcmapstart': lambda: gcrootmap.gcmapstart(),
'gcmapend': lambda: gcrootmap.gcmapend(),
}
- self.GCClass, _ = choose_gc_from_config(gcdescr.config)
+ self.GCClass = self.layoutbuilder.GCClass
self.moving_gc = self.GCClass.moving_gc
self.HDRPTR = lltype.Ptr(self.GCClass.HDR)
self.gcheaderbuilder = GCHeaderBuilder(self.HDRPTR.TO)
@@ -342,7 +343,10 @@
symbolic.get_array_token(lltype.GcArray(lltype.Signed), True)
# make a malloc function, with three arguments
- def malloc_basic(size, type_id, has_finalizer):
+ def malloc_basic(size, tid):
+ type_id = llop.extract_ushort(rffi.USHORT, tid)
+ has_finalizer = bool(tid & (1<<16))
+ _check_typeid(type_id)
res = llop1.do_malloc_fixedsize_clear(llmemory.GCREF,
type_id, size, True,
has_finalizer, False)
@@ -351,11 +355,13 @@
return res
self.malloc_basic = malloc_basic
self.GC_MALLOC_BASIC = lltype.Ptr(lltype.FuncType(
- [lltype.Signed, lltype.Signed, lltype.Bool], llmemory.GCREF))
+ [lltype.Signed, lltype.Signed], llmemory.GCREF))
self.WB_FUNCPTR = lltype.Ptr(lltype.FuncType(
[llmemory.Address, llmemory.Address], lltype.Void))
#
- def malloc_array(itemsize, type_id, num_elem):
+ def malloc_array(itemsize, tid, num_elem):
+ type_id = llop.extract_ushort(rffi.USHORT, tid)
+ _check_typeid(type_id)
return llop1.do_malloc_varsize_clear(
llmemory.GCREF,
type_id, num_elem, self.array_basesize, itemsize,
@@ -391,31 +397,24 @@
self.gcrootmap.initialize()
def init_size_descr(self, S, descr):
- from pypy.rpython.memory.gctypelayout import weakpointer_offset
type_id = self.layoutbuilder.get_type_id(S)
+ assert not self.layoutbuilder.is_weakref(type_id)
has_finalizer = bool(self.layoutbuilder.has_finalizer(S))
- assert weakpointer_offset(S) == -1 # XXX
- descr.type_id = type_id
- descr.has_finalizer = has_finalizer
+ flags = int(has_finalizer) << 16
+ descr.tid = llop.combine_ushort(lltype.Signed, type_id, flags)
def init_array_descr(self, A, descr):
type_id = self.layoutbuilder.get_type_id(A)
- descr.type_id = type_id
+ descr.tid = llop.combine_ushort(lltype.Signed, type_id, 0)
def gc_malloc(self, sizedescr):
assert isinstance(sizedescr, BaseSizeDescr)
- size = sizedescr.size
- type_id = sizedescr.type_id
- has_finalizer = sizedescr.has_finalizer
- assert type_id > 0
- return self.malloc_basic(size, type_id, has_finalizer)
+ return self.malloc_basic(sizedescr.size, sizedescr.tid)
def gc_malloc_array(self, arraydescr, num_elem):
assert isinstance(arraydescr, BaseArrayDescr)
itemsize = arraydescr.get_item_size(self.translate_support_code)
- type_id = arraydescr.type_id
- assert type_id > 0
- return self.malloc_array(itemsize, type_id, num_elem)
+ return self.malloc_array(itemsize, arraydescr.tid, num_elem)
def gc_malloc_str(self, num_elem):
return self.malloc_str(num_elem)
@@ -425,16 +424,12 @@
def args_for_new(self, sizedescr):
assert isinstance(sizedescr, BaseSizeDescr)
- size = sizedescr.size
- type_id = sizedescr.type_id
- has_finalizer = sizedescr.has_finalizer
- return [size, type_id, has_finalizer]
+ return [sizedescr.size, sizedescr.tid]
def args_for_new_array(self, arraydescr):
assert isinstance(arraydescr, BaseArrayDescr)
itemsize = arraydescr.get_item_size(self.translate_support_code)
- type_id = arraydescr.type_id
- return [itemsize, type_id]
+ return [itemsize, arraydescr.tid]
def get_funcptr_for_new(self):
return llhelper(self.GC_MALLOC_BASIC, self.malloc_basic)
Modified: pypy/trunk/pypy/jit/backend/llsupport/test/test_gc.py
==============================================================================
--- pypy/trunk/pypy/jit/backend/llsupport/test/test_gc.py (original)
+++ pypy/trunk/pypy/jit/backend/llsupport/test/test_gc.py Sun Oct 11 16:32:27 2009
@@ -1,5 +1,6 @@
import random
from pypy.rpython.lltypesystem import lltype, llmemory, rffi, rstr
+from pypy.rpython.lltypesystem.lloperation import llop
from pypy.rpython.annlowlevel import llhelper
from pypy.jit.backend.llsupport.descr import *
from pypy.jit.backend.llsupport.gc import *
@@ -119,8 +120,9 @@
assert not contains_weakptr
p = llmemory.raw_malloc(size)
p = llmemory.cast_adr_to_ptr(p, RESTYPE)
- self.record.append(("fixedsize", type_id, repr(size),
- has_finalizer, p))
+ flags = int(has_finalizer) << 16
+ tid = llop.combine_ushort(lltype.Signed, type_id, flags)
+ self.record.append(("fixedsize", repr(size), tid, p))
return p
def do_malloc_varsize_clear(self, RESTYPE, type_id, length, size,
@@ -129,7 +131,8 @@
p = llmemory.raw_malloc(size + itemsize * length)
(p + offset_to_length).signed[0] = length
p = llmemory.cast_adr_to_ptr(p, RESTYPE)
- self.record.append(("varsize", type_id, length,
+ tid = llop.combine_ushort(lltype.Signed, type_id, 0)
+ self.record.append(("varsize", tid, length,
repr(size), repr(itemsize),
repr(offset_to_length), p))
return p
@@ -165,42 +168,57 @@
self.gc_ll_descr = gc_ll_descr
self.fake_cpu = FakeCPU()
+ def test_args_for_new(self):
+ S = lltype.GcStruct('S', ('x', lltype.Signed))
+ sizedescr = get_size_descr(self.gc_ll_descr, S)
+ args = self.gc_ll_descr.args_for_new(sizedescr)
+ for x in args:
+ assert lltype.typeOf(x) == lltype.Signed
+ A = lltype.GcArray(lltype.Signed)
+ arraydescr = get_array_descr(self.gc_ll_descr, A)
+ args = self.gc_ll_descr.args_for_new(sizedescr)
+ for x in args:
+ assert lltype.typeOf(x) == lltype.Signed
+
def test_gc_malloc(self):
S = lltype.GcStruct('S', ('x', lltype.Signed))
sizedescr = get_size_descr(self.gc_ll_descr, S)
p = self.gc_ll_descr.gc_malloc(sizedescr)
- assert self.llop1.record == [("fixedsize", sizedescr.type_id,
- repr(sizedescr.size), False, p)]
+ assert self.llop1.record == [("fixedsize",
+ repr(sizedescr.size),
+ sizedescr.tid, p)]
assert repr(self.gc_ll_descr.args_for_new(sizedescr)) == repr(
- [sizedescr.size, sizedescr.type_id, False])
+ [sizedescr.size, sizedescr.tid])
def test_gc_malloc_array(self):
A = lltype.GcArray(lltype.Signed)
arraydescr = get_array_descr(self.gc_ll_descr, A)
p = self.gc_ll_descr.gc_malloc_array(arraydescr, 10)
- assert self.llop1.record == [("varsize", arraydescr.type_id, 10,
+ assert self.llop1.record == [("varsize", arraydescr.tid, 10,
repr(arraydescr.get_base_size(True)),
repr(arraydescr.get_item_size(True)),
repr(arraydescr.get_ofs_length(True)),
p)]
assert repr(self.gc_ll_descr.args_for_new_array(arraydescr)) == repr(
- [arraydescr.get_item_size(True), arraydescr.type_id])
+ [arraydescr.get_item_size(True), arraydescr.tid])
def test_gc_malloc_str(self):
p = self.gc_ll_descr.gc_malloc_str(10)
type_id = self.gc_ll_descr.layoutbuilder.get_type_id(rstr.STR)
+ tid = llop.combine_ushort(lltype.Signed, type_id, 0)
basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.STR,
True)
- assert self.llop1.record == [("varsize", type_id, 10,
+ assert self.llop1.record == [("varsize", tid, 10,
repr(basesize), repr(itemsize),
repr(ofs_length), p)]
def test_gc_malloc_unicode(self):
p = self.gc_ll_descr.gc_malloc_unicode(10)
type_id = self.gc_ll_descr.layoutbuilder.get_type_id(rstr.UNICODE)
+ tid = llop.combine_ushort(lltype.Signed, type_id, 0)
basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.UNICODE,
True)
- assert self.llop1.record == [("varsize", type_id, 10,
+ assert self.llop1.record == [("varsize", tid, 10,
repr(basesize), repr(itemsize),
repr(ofs_length), p)]
Modified: pypy/trunk/pypy/jit/backend/x86/runner.py
==============================================================================
--- pypy/trunk/pypy/jit/backend/x86/runner.py (original)
+++ pypy/trunk/pypy/jit/backend/x86/runner.py Sun Oct 11 16:32:27 2009
@@ -22,8 +22,6 @@
gcdescr)
self._bootstrap_cache = {}
self._faildescr_list = []
- if rtyper is not None: # for tests
- self.lltype2vtable = rtyper.lltype_to_vtable_mapping()
def setup(self):
self.assembler = Assembler386(self, self.translate_support_code)
Modified: pypy/trunk/pypy/jit/metainterp/policy.py
==============================================================================
--- pypy/trunk/pypy/jit/metainterp/policy.py (original)
+++ pypy/trunk/pypy/jit/metainterp/policy.py Sun Oct 11 16:32:27 2009
@@ -63,7 +63,7 @@
return None
def _graphs_of_all_instantiate(self, rtyper):
- for vtable in rtyper.lltype_to_vtable_mapping().itervalues():
+ for vtable in rtyper.lltype2vtable.values():
if vtable.instantiate:
yield vtable.instantiate._obj.graph
Modified: pypy/trunk/pypy/rpython/llinterp.py
==============================================================================
--- pypy/trunk/pypy/rpython/llinterp.py (original)
+++ pypy/trunk/pypy/rpython/llinterp.py Sun Oct 11 16:32:27 2009
@@ -869,7 +869,10 @@
assert v_ptr.concretetype.TO._gckind == 'gc'
newaddr = self.getval(v_newaddr)
p = llmemory.cast_adr_to_ptr(newaddr, v_ptr.concretetype)
- self.setvar(v_ptr, p)
+ if isinstance(v_ptr, Constant):
+ assert v_ptr.value == p
+ else:
+ self.setvar(v_ptr, p)
op_gc_reload_possibly_moved.specialform = True
def op_gc_id(self, v_ptr):
Modified: pypy/trunk/pypy/rpython/lltypesystem/llarena.py
==============================================================================
--- pypy/trunk/pypy/rpython/lltypesystem/llarena.py (original)
+++ pypy/trunk/pypy/rpython/lltypesystem/llarena.py Sun Oct 11 16:32:27 2009
@@ -240,12 +240,15 @@
"""A size that is rounded up in order to preserve alignment of objects
following it. For arenas containing heterogenous objects.
"""
- def __init__(self, basesize):
+ def __init__(self, basesize, minsize):
assert isinstance(basesize, llmemory.AddressOffset)
+ assert isinstance(minsize, llmemory.AddressOffset) or minsize == 0
self.basesize = basesize
+ self.minsize = minsize
def __repr__(self):
- return '< RoundedUpForAllocation %r >' % (self.basesize,)
+ return '< RoundedUpForAllocation %r %r >' % (self.basesize,
+ self.minsize)
def known_nonneg(self):
return self.basesize.known_nonneg()
@@ -303,10 +306,14 @@
% (addr.offset,))
addr.arena.allocate_object(addr.offset, size)
-def round_up_for_allocation(size):
+def round_up_for_allocation(size, minsize=0):
"""Round up the size in order to preserve alignment of objects
- following an object. For arenas containing heterogenous objects."""
- return RoundedUpForAllocation(size)
+ following an object. For arenas containing heterogenous objects.
+ If minsize is specified, it gives a minimum on the resulting size."""
+ return _round_up_for_allocation(size, minsize)
+
+def _round_up_for_allocation(size, minsize): # internal
+ return RoundedUpForAllocation(size, minsize)
def arena_new_view(ptr):
"""Return a fresh memory view on an arena
@@ -408,10 +415,11 @@
sandboxsafe=True)
llimpl_round_up_for_allocation = rffi.llexternal('ROUND_UP_FOR_ALLOCATION',
- [lltype.Signed], lltype.Signed,
+ [lltype.Signed, lltype.Signed],
+ lltype.Signed,
sandboxsafe=True,
_nowrapper=True)
-register_external(round_up_for_allocation, [int], int,
+register_external(_round_up_for_allocation, [int, int], int,
'll_arena.round_up_for_allocation',
llimpl=llimpl_round_up_for_allocation,
llfakeimpl=round_up_for_allocation,
Modified: pypy/trunk/pypy/rpython/lltypesystem/lloperation.py
==============================================================================
--- pypy/trunk/pypy/rpython/lltypesystem/lloperation.py (original)
+++ pypy/trunk/pypy/rpython/lltypesystem/lloperation.py Sun Oct 11 16:32:27 2009
@@ -411,6 +411,13 @@
'cast_adr_to_int': LLOp(sideeffects=False),
'cast_int_to_adr': LLOp(canfold=True), # not implemented in llinterp
+ 'get_group_member': LLOp(canfold=True),
+ 'get_next_group_member':LLOp(canfold=True),
+ 'is_group_member_nonzero':LLOp(canfold=True),
+ 'extract_ushort': LLOp(canfold=True),
+ 'combine_ushort': LLOp(canfold=True),
+ 'gc_gettypeptr_group': LLOp(canfold=True),
+
# __________ used by the JIT ________
'jit_marker': LLOp(),
Modified: pypy/trunk/pypy/rpython/lltypesystem/opimpl.py
==============================================================================
--- pypy/trunk/pypy/rpython/lltypesystem/opimpl.py (original)
+++ pypy/trunk/pypy/rpython/lltypesystem/opimpl.py Sun Oct 11 16:32:27 2009
@@ -171,10 +171,33 @@
return not b
def op_int_add(x, y):
- assert isinstance(x, (int, llmemory.AddressOffset))
+ if not isinstance(x, (int, llmemory.AddressOffset)):
+ from pypy.rpython.lltypesystem import llgroup
+ assert isinstance(x, llgroup.CombinedSymbolic)
assert isinstance(y, (int, llmemory.AddressOffset))
return intmask(x + y)
+def op_int_sub(x, y):
+ if not isinstance(x, int):
+ from pypy.rpython.lltypesystem import llgroup
+ assert isinstance(x, llgroup.CombinedSymbolic)
+ assert isinstance(y, int)
+ return intmask(x - y)
+
+def op_int_and(x, y):
+ if not isinstance(x, int):
+ from pypy.rpython.lltypesystem import llgroup
+ assert isinstance(x, llgroup.CombinedSymbolic)
+ assert isinstance(y, int)
+ return x & y
+
+def op_int_or(x, y):
+ if not isinstance(x, int):
+ from pypy.rpython.lltypesystem import llgroup
+ assert isinstance(x, llgroup.CombinedSymbolic)
+ assert isinstance(y, int)
+ return x | y
+
def op_int_mul(x, y):
assert isinstance(x, (int, llmemory.AddressOffset))
assert isinstance(y, (int, llmemory.AddressOffset))
@@ -388,6 +411,50 @@
def op_promote_virtualizable(object, fieldname, flags):
pass # XXX should do something
+def op_get_group_member(TYPE, grpptr, memberoffset):
+ from pypy.rpython.lltypesystem import llgroup
+ assert isinstance(memberoffset, llgroup.GroupMemberOffset)
+ member = memberoffset._get_group_member(grpptr)
+ return lltype.cast_pointer(TYPE, member)
+op_get_group_member.need_result_type = True
+
+def op_get_next_group_member(TYPE, grpptr, memberoffset, skipoffset):
+ from pypy.rpython.lltypesystem import llgroup
+ assert isinstance(memberoffset, llgroup.GroupMemberOffset)
+ member = memberoffset._get_next_group_member(grpptr, skipoffset)
+ return lltype.cast_pointer(TYPE, member)
+op_get_next_group_member.need_result_type = True
+
+def op_is_group_member_nonzero(memberoffset):
+ from pypy.rpython.lltypesystem import llgroup
+ if isinstance(memberoffset, llgroup.GroupMemberOffset):
+ return memberoffset.index != 0
+ else:
+ assert isinstance(memberoffset, int)
+ return memberoffset != 0
+
+def op_extract_ushort(combinedoffset):
+ from pypy.rpython.lltypesystem import llgroup
+ assert isinstance(combinedoffset, llgroup.CombinedSymbolic)
+ return combinedoffset.lowpart
+
+def op_combine_ushort(ushort, rest):
+ from pypy.rpython.lltypesystem import llgroup
+ return llgroup.CombinedSymbolic(ushort, rest)
+
+def op_gc_gettypeptr_group(TYPE, obj, grpptr, skipoffset, vtableinfo):
+ HDR = vtableinfo[0]
+ size_gc_header = vtableinfo[1]
+ fieldname = vtableinfo[2]
+ objaddr = llmemory.cast_ptr_to_adr(obj)
+ hdraddr = objaddr - size_gc_header
+ hdr = llmemory.cast_adr_to_ptr(hdraddr, lltype.Ptr(HDR))
+ typeid = getattr(hdr, fieldname)
+ if lltype.typeOf(typeid) == lltype.Signed:
+ typeid = op_extract_ushort(typeid)
+ return op_get_next_group_member(TYPE, grpptr, typeid, skipoffset)
+op_gc_gettypeptr_group.need_result_type = True
+
# ____________________________________________________________
def get_op_impl(opname):
Modified: pypy/trunk/pypy/rpython/lltypesystem/rclass.py
==============================================================================
--- pypy/trunk/pypy/rpython/lltypesystem/rclass.py (original)
+++ pypy/trunk/pypy/rpython/lltypesystem/rclass.py Sun Oct 11 16:32:27 2009
@@ -389,6 +389,7 @@
OBJECT, destrptr)
vtable = self.rclass.getvtable()
self.rtyper.type_for_typeptr[vtable._obj] = self.lowleveltype.TO
+ self.rtyper.lltype2vtable[self.lowleveltype.TO] = vtable
def common_repr(self): # -> object or nongcobject reprs
return getinstancerepr(self.rtyper, None, self.gcflavor)
Modified: pypy/trunk/pypy/rpython/memory/gc/base.py
==============================================================================
--- pypy/trunk/pypy/rpython/memory/gc/base.py (original)
+++ pypy/trunk/pypy/rpython/memory/gc/base.py Sun Oct 11 16:32:27 2009
@@ -13,6 +13,7 @@
malloc_zero_filled = False
prebuilt_gc_objects_are_static_roots = True
can_realloc = False
+ object_minimal_size = 0
def __init__(self, config, chunk_size=DEFAULT_CHUNK_SIZE):
self.gcheaderbuilder = GCHeaderBuilder(self.HDR)
Modified: pypy/trunk/pypy/rpython/memory/gc/hybrid.py
==============================================================================
--- pypy/trunk/pypy/rpython/memory/gc/hybrid.py (original)
+++ pypy/trunk/pypy/rpython/memory/gc/hybrid.py Sun Oct 11 16:32:27 2009
@@ -222,7 +222,6 @@
def realloc(self, ptr, newlength, fixedsize, itemsize, lengthofs, grow):
size_gc_header = self.size_gc_header()
addr = llmemory.cast_ptr_to_adr(ptr)
- tid = self.get_type_id(addr)
nonvarsize = size_gc_header + fixedsize
try:
varsize = ovfcheck(itemsize * newlength)
@@ -375,7 +374,7 @@
hdr = self.header(obj)
if hdr.tid & GCFLAG_UNVISITED:
# This is a not-visited-yet raw_malloced object.
- hdr.tid -= GCFLAG_UNVISITED
+ hdr.tid &= ~GCFLAG_UNVISITED
self.rawmalloced_objects_to_trace.append(obj)
def make_a_copy(self, obj, objsize):
Modified: pypy/trunk/pypy/rpython/memory/gc/markcompact.py
==============================================================================
--- pypy/trunk/pypy/rpython/memory/gc/markcompact.py (original)
+++ pypy/trunk/pypy/rpython/memory/gc/markcompact.py Sun Oct 11 16:32:27 2009
@@ -1,7 +1,7 @@
import time
-from pypy.rpython.lltypesystem import lltype, llmemory, llarena
+from pypy.rpython.lltypesystem import lltype, llmemory, llarena, llgroup
from pypy.rpython.memory.gc.base import MovingGCBase
from pypy.rlib.debug import ll_assert
from pypy.rpython.memory.support import DEFAULT_CHUNK_SIZE
@@ -12,11 +12,10 @@
from pypy.rpython.lltypesystem.lloperation import llop
from pypy.rlib.objectmodel import we_are_translated
from pypy.rpython.lltypesystem import rffi
+from pypy.rpython.memory.gcheader import GCHeaderBuilder
-TYPEID_MASK = 0xffff0000
-first_gcflag = 2
+first_gcflag = 1 << 16
GCFLAG_MARKBIT = first_gcflag << 0
-GCFLAG_EXTERNAL = first_gcflag << 1
memoryError = MemoryError()
@@ -68,8 +67,10 @@
TID_TYPE = rffi.USHORT
BYTES_PER_TID = rffi.sizeof(TID_TYPE)
+
class MarkCompactGC(MovingGCBase):
- HDR = lltype.Struct('header', ('forward_ptr', llmemory.Address))
+ HDR = lltype.Struct('header', ('tid', lltype.Signed))
+ typeid_is_in_field = 'tid'
TID_BACKUP = lltype.Array(TID_TYPE, hints={'nolength':True})
WEAKREF_OFFSETS = lltype.Array(lltype.Signed)
@@ -79,7 +80,7 @@
malloc_zero_filled = True
inline_simple_malloc = True
inline_simple_malloc_varsize = True
- first_unused_gcflag = first_gcflag << 2
+ first_unused_gcflag = first_gcflag << 1
total_collection_time = 0.0
total_collection_count = 0
@@ -100,21 +101,18 @@
self.objects_with_weakrefs = self.AddressStack()
self.tid_backup = lltype.nullptr(self.TID_BACKUP)
- # flags = 1 to make lltype & llmemory happy about odd/even pointers
-
- def init_gc_object(self, addr, typeid, flags=1):
+ def init_gc_object(self, addr, typeid16, flags=0):
hdr = llmemory.cast_adr_to_ptr(addr, lltype.Ptr(self.HDR))
- hdr.forward_ptr = llmemory.cast_int_to_adr((typeid << 16) | flags)
+ hdr.tid = self.combine(typeid16, flags)
- def init_gc_object_immortal(self, addr, typeid, flags=1):
+ def init_gc_object_immortal(self, addr, typeid16, flags=0):
hdr = llmemory.cast_adr_to_ptr(addr, lltype.Ptr(self.HDR))
- flags |= GCFLAG_EXTERNAL
- hdr.forward_ptr = llmemory.cast_int_to_adr((typeid << 16) | flags)
+ hdr.tid = self.combine(typeid16, flags)
# XXX we can store forward_ptr to itself, if we fix C backend
# so that get_forwarding_address(obj) returns
# obj itself if obj is a prebuilt object
- def malloc_fixedsize_clear(self, typeid, size, can_collect,
+ def malloc_fixedsize_clear(self, typeid16, size, can_collect,
has_finalizer=False, contains_weakptr=False):
size_gc_header = self.gcheaderbuilder.size_gc_header
totalsize = size_gc_header + size
@@ -122,7 +120,7 @@
if raw_malloc_usage(totalsize) > self.top_of_space - result:
result = self.obtain_free_space(totalsize)
llarena.arena_reserve(result, totalsize)
- self.init_gc_object(result, typeid)
+ self.init_gc_object(result, typeid16)
self.free += totalsize
if has_finalizer:
self.objects_with_finalizers.append(result + size_gc_header)
@@ -130,7 +128,7 @@
self.objects_with_weakrefs.append(result + size_gc_header)
return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
- def malloc_varsize_clear(self, typeid, length, size, itemsize,
+ def malloc_varsize_clear(self, typeid16, length, size, itemsize,
offset_to_length, can_collect):
size_gc_header = self.gcheaderbuilder.size_gc_header
nonvarsize = size_gc_header + size
@@ -143,7 +141,7 @@
if raw_malloc_usage(totalsize) > self.top_of_space - result:
result = self.obtain_free_space(totalsize)
llarena.arena_reserve(result, totalsize)
- self.init_gc_object(result, typeid)
+ self.init_gc_object(result, typeid16)
(result + size_gc_header + offset_to_length).signed[0] = length
self.free = result + llarena.round_up_for_allocation(totalsize)
return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
@@ -319,11 +317,26 @@
self.objects_with_finalizers.delete()
self.objects_with_finalizers = objects_with_finalizers
- def get_tid(self, addr):
- return llmemory.cast_adr_to_int(self.header(addr).forward_ptr)
+ def header(self, addr):
+ # like header(), but asserts that we have a normal header
+ hdr = MovingGCBase.header(self, addr)
+ if not we_are_translated():
+ assert isinstance(hdr.tid, llgroup.CombinedSymbolic)
+ return hdr
+
+ def header_forwarded(self, addr):
+ # like header(), but asserts that we have a forwarding header
+ hdr = MovingGCBase.header(self, addr)
+ if not we_are_translated():
+ assert isinstance(hdr.tid, int)
+ return hdr
+
+ def combine(self, typeid16, flags):
+ return llop.combine_ushort(lltype.Signed, typeid16, flags)
def get_type_id(self, addr):
- return self.get_tid(addr) >> 16
+ tid = self.header(addr).tid
+ return llop.extract_ushort(rffi.USHORT, tid)
def mark_roots_recursively(self):
self.root_walker.walk_roots(
@@ -350,13 +363,13 @@
self.to_see.append(root.address[0])
def mark(self, obj):
- previous = self.get_tid(obj)
- self.header(obj).forward_ptr = llmemory.cast_int_to_adr(previous | GCFLAG_MARKBIT)
+ self.header(obj).tid |= GCFLAG_MARKBIT
def marked(self, obj):
- return self.get_tid(obj) & GCFLAG_MARKBIT
+ return self.header(obj).tid & GCFLAG_MARKBIT
def update_forward_pointers(self, toaddr, num_of_alive_objs):
+ self.base_forwarding_addr = toaddr
fromaddr = self.space
size_gc_header = self.gcheaderbuilder.size_gc_header
i = 0
@@ -366,8 +379,7 @@
objsize = self.get_size(obj)
totalsize = size_gc_header + objsize
if not self.marked(obj):
- self.set_forwarding_address(obj, NULL, i)
- hdr.forward_ptr = NULL
+ self.set_null_forwarding_address(obj, i)
else:
llarena.arena_reserve(toaddr, totalsize)
self.set_forwarding_address(obj, toaddr, i)
@@ -438,30 +450,44 @@
if pointer.address[0] != NULL:
pointer.address[0] = self.get_forwarding_address(pointer.address[0])
- def is_forwarded(self, addr):
- return self.header(addr).forward_ptr != NULL
-
def _is_external(self, obj):
- tid = self.get_tid(obj)
- return (tid & 1) and (tid & GCFLAG_EXTERNAL)
+ return not (self.space <= obj < self.top_of_space)
def get_forwarding_address(self, obj):
if self._is_external(obj):
return obj
- return self.header(obj).forward_ptr + self.size_gc_header()
+ return self.get_header_forwarded_addr(obj)
- def set_forwarding_address(self, obj, newaddr, num):
+ def set_null_forwarding_address(self, obj, num):
self.backup_typeid(num, obj)
- self.header(obj).forward_ptr = newaddr
+ hdr = self.header(obj)
+ hdr.tid = -1 # make the object forwarded to NULL
+
+ def set_forwarding_address(self, obj, newobjhdr, num):
+ self.backup_typeid(num, obj)
+ forward_offset = newobjhdr - self.base_forwarding_addr
+ hdr = self.header(obj)
+ hdr.tid = forward_offset # make the object forwarded to newobj
+
+ def restore_normal_header(self, obj, num):
+ # Reverse of set_forwarding_address().
+ typeid16 = self.get_typeid_from_backup(num)
+ hdr = self.header_forwarded(obj)
+ hdr.tid = self.combine(typeid16, 0) # restore the normal header
+
+ def get_header_forwarded_addr(self, obj):
+ return (self.base_forwarding_addr +
+ self.header_forwarded(obj).tid +
+ self.gcheaderbuilder.size_gc_header)
def surviving(self, obj):
- return self._is_external(obj) or self.header(obj).forward_ptr != NULL
+ return self._is_external(obj) or self.header_forwarded(obj).tid != -1
def backup_typeid(self, num, obj):
- self.tid_backup[num] = rffi.cast(rffi.USHORT, self.get_type_id(obj))
+ self.tid_backup[num] = self.get_type_id(obj)
def get_typeid_from_backup(self, num):
- return rffi.cast(lltype.Signed, self.tid_backup[num])
+ return self.tid_backup[num]
def get_size_from_backup(self, obj, num):
typeid = self.get_typeid_from_backup(num)
@@ -484,7 +510,6 @@
num = 0
while fromaddr < self.free:
obj = fromaddr + size_gc_header
- hdr = llmemory.cast_adr_to_ptr(fromaddr, lltype.Ptr(self.HDR))
objsize = self.get_size_from_backup(obj, num)
totalsize = size_gc_header + objsize
if not self.surviving(obj):
@@ -492,16 +517,16 @@
# we clear it to make debugging easier
llarena.arena_reset(fromaddr, totalsize, False)
else:
- ll_assert(self.is_forwarded(obj), "not forwarded, surviving obj")
- forward_ptr = hdr.forward_ptr
if resizing:
end = fromaddr
- val = (self.get_typeid_from_backup(num) << 16) + 1
- hdr.forward_ptr = llmemory.cast_int_to_adr(val)
- if fromaddr != forward_ptr:
+ forward_obj = self.get_header_forwarded_addr(obj)
+ self.restore_normal_header(obj, num)
+ if obj != forward_obj:
#llop.debug_print(lltype.Void, "Copying from to",
# fromaddr, forward_ptr, totalsize)
- llmemory.raw_memmove(fromaddr, forward_ptr, totalsize)
+ llmemory.raw_memmove(fromaddr,
+ forward_obj - size_gc_header,
+ totalsize)
if resizing and end - start > GC_CLEARANCE:
diff = end - start
#llop.debug_print(lltype.Void, "Cleaning", start, diff)
Modified: pypy/trunk/pypy/rpython/memory/gc/marksweep.py
==============================================================================
--- pypy/trunk/pypy/rpython/memory/gc/marksweep.py (original)
+++ pypy/trunk/pypy/rpython/memory/gc/marksweep.py Sun Oct 11 16:32:27 2009
@@ -4,7 +4,7 @@
from pypy.rpython.memory.support import DEFAULT_CHUNK_SIZE
from pypy.rpython.memory.support import get_address_stack
from pypy.rpython.memory.gcheader import GCHeaderBuilder
-from pypy.rpython.lltypesystem import lltype, llmemory
+from pypy.rpython.lltypesystem import lltype, llmemory, rffi
from pypy.rlib.objectmodel import free_non_gc_object
from pypy.rpython.lltypesystem.lloperation import llop
from pypy.rlib.rarithmetic import ovfcheck
@@ -25,8 +25,11 @@
HDRPTR = lltype.Ptr(HDR)
# need to maintain a linked list of malloced objects, since we used the
# systems allocator and can't walk the heap
- HDR.become(lltype.Struct('header', ('typeid', lltype.Signed),
+ HDR.become(lltype.Struct('header', ('typeid16', rffi.USHORT),
+ ('mark', lltype.Bool),
+ ('curpool_flag', lltype.Bool),
('next', HDRPTR)))
+ typeid_is_in_field = 'typeid16'
POOL = lltype.GcStruct('gc_pool')
POOLPTR = lltype.Ptr(POOL)
@@ -75,14 +78,14 @@
if self.bytes_malloced > self.bytes_malloced_threshold:
self.collect()
- def write_malloc_statistics(self, typeid, size, result, varsize):
+ def write_malloc_statistics(self, typeid16, size, result, varsize):
pass
- def write_free_statistics(self, typeid, result):
+ def write_free_statistics(self, typeid16, result):
pass
- def malloc_fixedsize(self, typeid, size, can_collect, has_finalizer=False,
- contains_weakptr=False):
+ def malloc_fixedsize(self, typeid16, size, can_collect,
+ has_finalizer=False, contains_weakptr=False):
if can_collect:
self.maybe_collect()
size_gc_header = self.gcheaderbuilder.size_gc_header
@@ -97,7 +100,9 @@
if not result:
raise memoryError
hdr = llmemory.cast_adr_to_ptr(result, self.HDRPTR)
- hdr.typeid = typeid << 1
+ hdr.typeid16 = typeid16
+ hdr.mark = False
+ hdr.curpool_flag = False
if has_finalizer:
hdr.next = self.malloced_objects_with_finalizer
self.malloced_objects_with_finalizer = hdr
@@ -109,13 +114,13 @@
self.malloced_objects = hdr
self.bytes_malloced = bytes_malloced
result += size_gc_header
- #llop.debug_print(lltype.Void, 'malloc typeid', typeid,
+ #llop.debug_print(lltype.Void, 'malloc typeid', typeid16,
# '->', llmemory.cast_adr_to_int(result))
- self.write_malloc_statistics(typeid, tot_size, result, False)
+ self.write_malloc_statistics(typeid16, tot_size, result, False)
return llmemory.cast_adr_to_ptr(result, llmemory.GCREF)
malloc_fixedsize._dont_inline_ = True
- def malloc_fixedsize_clear(self, typeid, size, can_collect,
+ def malloc_fixedsize_clear(self, typeid16, size, can_collect,
has_finalizer=False, contains_weakptr=False):
if can_collect:
self.maybe_collect()
@@ -132,7 +137,9 @@
raise memoryError
raw_memclear(result, tot_size)
hdr = llmemory.cast_adr_to_ptr(result, self.HDRPTR)
- hdr.typeid = typeid << 1
+ hdr.typeid16 = typeid16
+ hdr.mark = False
+ hdr.curpool_flag = False
if has_finalizer:
hdr.next = self.malloced_objects_with_finalizer
self.malloced_objects_with_finalizer = hdr
@@ -144,14 +151,14 @@
self.malloced_objects = hdr
self.bytes_malloced = bytes_malloced
result += size_gc_header
- #llop.debug_print(lltype.Void, 'malloc typeid', typeid,
+ #llop.debug_print(lltype.Void, 'malloc typeid', typeid16,
# '->', llmemory.cast_adr_to_int(result))
- self.write_malloc_statistics(typeid, tot_size, result, False)
+ self.write_malloc_statistics(typeid16, tot_size, result, False)
return llmemory.cast_adr_to_ptr(result, llmemory.GCREF)
malloc_fixedsize_clear._dont_inline_ = True
- def malloc_varsize(self, typeid, length, size, itemsize, offset_to_length,
- can_collect):
+ def malloc_varsize(self, typeid16, length, size, itemsize,
+ offset_to_length, can_collect):
if can_collect:
self.maybe_collect()
size_gc_header = self.gcheaderbuilder.size_gc_header
@@ -169,20 +176,22 @@
raise memoryError
(result + size_gc_header + offset_to_length).signed[0] = length
hdr = llmemory.cast_adr_to_ptr(result, self.HDRPTR)
- hdr.typeid = typeid << 1
+ hdr.typeid16 = typeid16
+ hdr.mark = False
+ hdr.curpool_flag = False
hdr.next = self.malloced_objects
self.malloced_objects = hdr
self.bytes_malloced = bytes_malloced
result += size_gc_header
#llop.debug_print(lltype.Void, 'malloc_varsize length', length,
- # 'typeid', typeid,
+ # 'typeid', typeid16,
# '->', llmemory.cast_adr_to_int(result))
- self.write_malloc_statistics(typeid, tot_size, result, True)
+ self.write_malloc_statistics(typeid16, tot_size, result, True)
return llmemory.cast_adr_to_ptr(result, llmemory.GCREF)
malloc_varsize._dont_inline_ = True
- def malloc_varsize_clear(self, typeid, length, size, itemsize,
+ def malloc_varsize_clear(self, typeid16, length, size, itemsize,
offset_to_length, can_collect):
if can_collect:
self.maybe_collect()
@@ -202,16 +211,18 @@
raw_memclear(result, tot_size)
(result + size_gc_header + offset_to_length).signed[0] = length
hdr = llmemory.cast_adr_to_ptr(result, self.HDRPTR)
- hdr.typeid = typeid << 1
+ hdr.typeid16 = typeid16
+ hdr.mark = False
+ hdr.curpool_flag = False
hdr.next = self.malloced_objects
self.malloced_objects = hdr
self.bytes_malloced = bytes_malloced
result += size_gc_header
#llop.debug_print(lltype.Void, 'malloc_varsize length', length,
- # 'typeid', typeid,
+ # 'typeid', typeid16,
# '->', llmemory.cast_adr_to_int(result))
- self.write_malloc_statistics(typeid, tot_size, result, True)
+ self.write_malloc_statistics(typeid16, tot_size, result, True)
return llmemory.cast_adr_to_ptr(result, llmemory.GCREF)
malloc_varsize_clear._dont_inline_ = True
@@ -251,10 +262,10 @@
hdr = self.malloced_objects_with_finalizer
while hdr:
next = hdr.next
- typeid = hdr.typeid >> 1
+ typeid = hdr.typeid16
gc_info = llmemory.cast_ptr_to_adr(hdr)
obj = gc_info + size_gc_header
- if not hdr.typeid & 1:
+ if not hdr.mark:
self.add_reachable_to_stack(obj, objects)
addr = llmemory.cast_ptr_to_adr(hdr)
size = self.fixed_size(typeid)
@@ -271,31 +282,30 @@
curr = objects.pop()
gc_info = curr - size_gc_header
hdr = llmemory.cast_adr_to_ptr(gc_info, self.HDRPTR)
- if hdr.typeid & 1:
+ if hdr.mark:
continue
self.add_reachable_to_stack(curr, objects)
- hdr.typeid = hdr.typeid | 1
+ hdr.mark = True
objects.delete()
# also mark self.curpool
if self.curpool:
gc_info = llmemory.cast_ptr_to_adr(self.curpool) - size_gc_header
hdr = llmemory.cast_adr_to_ptr(gc_info, self.HDRPTR)
- hdr.typeid = hdr.typeid | 1
+ hdr.mark = True
# go through the list of objects containing weak pointers
# and kill the links if they go to dead objects
# if the object itself is not marked, free it
hdr = self.objects_with_weak_pointers
surviving = lltype.nullptr(self.HDR)
while hdr:
- typeid = hdr.typeid >> 1
+ typeid = hdr.typeid16
next = hdr.next
addr = llmemory.cast_ptr_to_adr(hdr)
size = self.fixed_size(typeid)
estimate = raw_malloc_usage(size_gc_header + size)
- if hdr.typeid & 1:
- typeid = hdr.typeid >> 1
+ if hdr.mark:
offset = self.weakpointer_offset(typeid)
- hdr.typeid = hdr.typeid & (~1)
+ hdr.mark = False
gc_info = llmemory.cast_ptr_to_adr(hdr)
weakref_obj = gc_info + size_gc_header
pointing_to = (weakref_obj + offset).address[0]
@@ -306,7 +316,7 @@
# pointed to object will die
# XXX what to do if the object has a finalizer which resurrects
# the object?
- if not hdr_pointing_to.typeid & 1:
+ if not hdr_pointing_to.mark:
(weakref_obj + offset).address[0] = NULL
hdr.next = surviving
surviving = hdr
@@ -331,7 +341,7 @@
ppnext += llmemory.offsetof(self.POOLNODE, 'linkedlist')
hdr = poolnode.linkedlist
while hdr: #sweep
- typeid = hdr.typeid >> 1
+ typeid = hdr.typeid16
next = hdr.next
addr = llmemory.cast_ptr_to_adr(hdr)
size = self.fixed_size(typeid)
@@ -339,8 +349,8 @@
length = (addr + size_gc_header + self.varsize_offset_to_length(typeid)).signed[0]
size += self.varsize_item_sizes(typeid) * length
estimate = raw_malloc_usage(size_gc_header + size)
- if hdr.typeid & 1:
- hdr.typeid = hdr.typeid & (~1)
+ if hdr.mark:
+ hdr.mark = False
ppnext.address[0] = addr
ppnext = llmemory.cast_ptr_to_adr(hdr)
ppnext += llmemory.offsetof(self.HDR, 'next')
@@ -423,17 +433,17 @@
last = lltype.nullptr(self.HDR)
while hdr:
next = hdr.next
- if hdr.typeid & 1:
+ if hdr.mark:
hdr.next = lltype.nullptr(self.HDR)
if not self.malloced_objects_with_finalizer:
self.malloced_objects_with_finalizer = hdr
else:
last.next = hdr
- hdr.typeid = hdr.typeid & (~1)
+ hdr.mark = False
last = hdr
else:
obj = llmemory.cast_ptr_to_adr(hdr) + size_gc_header
- finalizer = self.getfinalizer(hdr.typeid >> 1)
+ finalizer = self.getfinalizer(hdr.typeid16)
# make malloced_objects_with_finalizer consistent
# for the sake of a possible collection caused by finalizer
if not self.malloced_objects_with_finalizer:
@@ -473,7 +483,7 @@
size_gc_header = self.gcheaderbuilder.size_gc_header
gc_info = gcobjectaddr - size_gc_header
hdr = llmemory.cast_adr_to_ptr(gc_info, self.HDRPTR)
- hdr.typeid = hdr.typeid & (~1)
+ hdr.mark = False
STAT_HEAP_USAGE = 0
STAT_BYTES_MALLOCED = 1
@@ -483,7 +493,7 @@
size_gc_header = self.gcheaderbuilder.size_gc_header
gc_info = obj - size_gc_header
hdr = llmemory.cast_adr_to_ptr(gc_info, self.HDRPTR)
- return hdr.typeid >> 1
+ return hdr.typeid16
def add_reachable_to_stack(self, obj, objects):
self.trace(obj, self._add_reachable, objects)
@@ -504,13 +514,17 @@
def init_gc_object(self, addr, typeid):
hdr = llmemory.cast_adr_to_ptr(addr, self.HDRPTR)
- hdr.typeid = typeid << 1
+ hdr.typeid16 = typeid
+ hdr.mark = False
+ hdr.curpool_flag = False
def init_gc_object_immortal(self, addr, typeid, flags=0):
# prebuilt gc structures always have the mark bit set
# ignore flags
hdr = llmemory.cast_adr_to_ptr(addr, self.HDRPTR)
- hdr.typeid = (typeid << 1) | 1
+ hdr.typeid16 = typeid
+ hdr.mark = True
+ hdr.curpool_flag = False
# experimental support for thread cloning
def x_swap_pool(self, newpool):
@@ -566,7 +580,6 @@
# in the specified pool. A new pool is built to contain the
# copies, and the 'gcobjectptr' and 'pool' fields of clonedata
# are adjusted to refer to the result.
- CURPOOL_FLAG = sys.maxint // 2 + 1
# install a new pool into which all the mallocs go
curpool = self.x_swap_pool(lltype.nullptr(X_POOL))
@@ -583,7 +596,7 @@
hdr = hdr.next # skip the POOL object itself
while hdr:
next = hdr.next
- hdr.typeid |= CURPOOL_FLAG # mark all objects from malloced_list
+ hdr.curpool_flag = True # mark all objects from malloced_list
hdr.next = lltype.nullptr(self.HDR) # abused to point to the copy
oldobjects.append(llmemory.cast_ptr_to_adr(hdr))
hdr = next
@@ -600,12 +613,11 @@
continue # pointer is NULL
oldhdr = llmemory.cast_adr_to_ptr(oldobj_addr - size_gc_header,
self.HDRPTR)
- typeid = oldhdr.typeid
- if not (typeid & CURPOOL_FLAG):
+ if not oldhdr.curpool_flag:
continue # ignore objects that were not in the malloced_list
newhdr = oldhdr.next # abused to point to the copy
if not newhdr:
- typeid = (typeid & ~CURPOOL_FLAG) >> 1
+ typeid = oldhdr.typeid16
size = self.fixed_size(typeid)
# XXX! collect() at the beginning if the free heap is low
if self.is_varsize(typeid):
@@ -631,11 +643,15 @@
newhdr_addr = newobj_addr - size_gc_header
newhdr = llmemory.cast_adr_to_ptr(newhdr_addr, self.HDRPTR)
- saved_id = newhdr.typeid # XXX hack needed for genc
+ saved_id = newhdr.typeid16 # XXX hack needed for genc
+ saved_flg1 = newhdr.mark
+ saved_flg2 = newhdr.curpool_flag
saved_next = newhdr.next # where size_gc_header == 0
raw_memcopy(oldobj_addr, newobj_addr, size)
- newhdr.typeid = saved_id
- newhdr.next = saved_next
+ newhdr.typeid16 = saved_id
+ newhdr.mark = saved_flg1
+ newhdr.curpool_flag = saved_flg2
+ newhdr.next = saved_next
offsets = self.offsets_to_gc_pointers(typeid)
i = 0
@@ -669,7 +685,7 @@
next = lltype.nullptr(self.HDR)
while oldobjects.non_empty():
hdr = llmemory.cast_adr_to_ptr(oldobjects.pop(), self.HDRPTR)
- hdr.typeid &= ~CURPOOL_FLAG # reset the flag
+ hdr.curpool_flag = False # reset the flag
hdr.next = next
next = hdr
oldobjects.delete()
Modified: pypy/trunk/pypy/rpython/memory/gc/semispace.py
==============================================================================
--- pypy/trunk/pypy/rpython/memory/gc/semispace.py (original)
+++ pypy/trunk/pypy/rpython/memory/gc/semispace.py Sun Oct 11 16:32:27 2009
@@ -4,7 +4,7 @@
from pypy.rpython.memory.support import DEFAULT_CHUNK_SIZE
from pypy.rpython.memory.support import get_address_stack, get_address_deque
from pypy.rpython.memory.support import AddressDict
-from pypy.rpython.lltypesystem import lltype, llmemory, llarena
+from pypy.rpython.lltypesystem import lltype, llmemory, llarena, rffi
from pypy.rlib.objectmodel import free_non_gc_object
from pypy.rlib.debug import ll_assert
from pypy.rpython.lltypesystem.lloperation import llop
@@ -13,7 +13,6 @@
import sys, os, time
-TYPEID_MASK = 0xffff
first_gcflag = 1 << 16
GCFLAG_FORWARDED = first_gcflag
# GCFLAG_EXTERNAL is set on objects not living in the semispace:
@@ -23,6 +22,7 @@
memoryError = MemoryError()
+
class SemiSpaceGC(MovingGCBase):
_alloc_flavor_ = "raw"
inline_simple_malloc = True
@@ -32,11 +32,14 @@
total_collection_time = 0.0
total_collection_count = 0
- HDR = lltype.Struct('header', ('tid', lltype.Signed))
+ HDR = lltype.Struct('header', ('tid', lltype.Signed)) # XXX or rffi.INT?
+ typeid_is_in_field = 'tid'
FORWARDSTUB = lltype.GcStruct('forwarding_stub',
('forw', llmemory.Address))
FORWARDSTUBPTR = lltype.Ptr(FORWARDSTUB)
+ object_minimal_size = llmemory.sizeof(FORWARDSTUB)
+
# the following values override the default arguments of __init__ when
# translating to a real backend.
TRANSLATION_PARAMS = {'space_size': 8*1024*1024} # XXX adjust
@@ -64,7 +67,7 @@
# This class only defines the malloc_{fixed,var}size_clear() methods
# because the spaces are filled with zeroes in advance.
- def malloc_fixedsize_clear(self, typeid, size, can_collect,
+ def malloc_fixedsize_clear(self, typeid16, size, can_collect,
has_finalizer=False, contains_weakptr=False):
size_gc_header = self.gcheaderbuilder.size_gc_header
totalsize = size_gc_header + size
@@ -74,7 +77,7 @@
raise memoryError
result = self.obtain_free_space(totalsize)
llarena.arena_reserve(result, totalsize)
- self.init_gc_object(result, typeid)
+ self.init_gc_object(result, typeid16)
self.free = result + totalsize
if has_finalizer:
self.objects_with_finalizers.append(result + size_gc_header)
@@ -82,7 +85,7 @@
self.objects_with_weakrefs.append(result + size_gc_header)
return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
- def malloc_varsize_clear(self, typeid, length, size, itemsize,
+ def malloc_varsize_clear(self, typeid16, length, size, itemsize,
offset_to_length, can_collect):
size_gc_header = self.gcheaderbuilder.size_gc_header
nonvarsize = size_gc_header + size
@@ -97,7 +100,7 @@
raise memoryError
result = self.obtain_free_space(totalsize)
llarena.arena_reserve(result, totalsize)
- self.init_gc_object(result, typeid)
+ self.init_gc_object(result, typeid16)
(result + size_gc_header + offset_to_length).signed[0] = length
self.free = result + llarena.round_up_for_allocation(totalsize)
return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
@@ -385,6 +388,9 @@
stub = llmemory.cast_adr_to_ptr(obj, self.FORWARDSTUBPTR)
stub.forw = newobj
+ def combine(self, typeid16, flags):
+ return llop.combine_ushort(lltype.Signed, typeid16, flags)
+
def get_type_id(self, addr):
tid = self.header(addr).tid
ll_assert(tid & (GCFLAG_FORWARDED|GCFLAG_EXTERNAL) != GCFLAG_FORWARDED,
@@ -393,15 +399,16 @@
# Although calling get_type_id() on a forwarded object works by itself,
# we catch it as an error because it's likely that what is then
# done with the typeid is bogus.
- return tid & TYPEID_MASK
+ return llop.extract_ushort(rffi.USHORT, tid)
- def init_gc_object(self, addr, typeid, flags=0):
+ def init_gc_object(self, addr, typeid16, flags=0):
hdr = llmemory.cast_adr_to_ptr(addr, lltype.Ptr(self.HDR))
- hdr.tid = typeid | flags
+ hdr.tid = self.combine(typeid16, flags)
- def init_gc_object_immortal(self, addr, typeid, flags=0):
+ def init_gc_object_immortal(self, addr, typeid16, flags=0):
hdr = llmemory.cast_adr_to_ptr(addr, lltype.Ptr(self.HDR))
- hdr.tid = typeid | flags | GCFLAG_EXTERNAL | GCFLAG_FORWARDED
+ flags |= GCFLAG_EXTERNAL | GCFLAG_FORWARDED
+ hdr.tid = self.combine(typeid16, flags)
# immortal objects always have GCFLAG_FORWARDED set;
# see get_forwarding_address().
Modified: pypy/trunk/pypy/rpython/memory/gc/test/test_direct.py
==============================================================================
--- pypy/trunk/pypy/rpython/memory/gc/test/test_direct.py (original)
+++ pypy/trunk/pypy/rpython/memory/gc/test/test_direct.py Sun Oct 11 16:32:27 2009
@@ -68,7 +68,7 @@
self.gc.DEBUG = True
self.rootwalker = DirectRootWalker(self)
self.gc.set_root_walker(self.rootwalker)
- self.layoutbuilder = TypeLayoutBuilder()
+ self.layoutbuilder = TypeLayoutBuilder(self.GCClass, {})
self.get_type_id = self.layoutbuilder.get_type_id
self.layoutbuilder.initialize_gc_query_function(self.gc)
self.gc.setup()
Modified: pypy/trunk/pypy/rpython/memory/gctransform/framework.py
==============================================================================
--- pypy/trunk/pypy/rpython/memory/gctransform/framework.py (original)
+++ pypy/trunk/pypy/rpython/memory/gctransform/framework.py Sun Oct 11 16:32:27 2009
@@ -1,7 +1,7 @@
from pypy.rpython.memory.gctransform.transform import GCTransformer
from pypy.rpython.memory.gctransform.support import find_gc_ptrs_in_type, \
get_rtti, ll_call_destructor, type_contains_pyobjs, var_ispyobj
-from pypy.rpython.lltypesystem import lltype, llmemory
+from pypy.rpython.lltypesystem import lltype, llmemory, rffi
from pypy.rpython import rmodel
from pypy.rpython.memory import gctypelayout
from pypy.rpython.memory.gc import marksweep
@@ -22,6 +22,9 @@
import sys, types
+TYPE_ID = rffi.USHORT
+
+
class CollectAnalyzer(graphanalyze.BoolGraphAnalyzer):
def analyze_direct_call(self, graph, seen=None):
@@ -127,15 +130,18 @@
if hasattr(translator, '_jit2gc'):
self.layoutbuilder = translator._jit2gc['layoutbuilder']
else:
- self.layoutbuilder = TransformerLayoutBuilder()
+ if translator.config.translation.gcconfig.removetypeptr:
+ lltype2vtable = translator.rtyper.lltype2vtable
+ else:
+ lltype2vtable = {}
+ self.layoutbuilder = TransformerLayoutBuilder(GCClass,
+ lltype2vtable)
self.layoutbuilder.transformer = self
self.get_type_id = self.layoutbuilder.get_type_id
- # set up dummy a table, to be overwritten with the real one in finish()
- type_info_table = lltype._ptr(
- lltype.Ptr(gctypelayout.GCData.TYPE_INFO_TABLE),
- "delayed!type_info_table", solid=True)
- gcdata = gctypelayout.GCData(type_info_table)
+ # set up GCData with the llgroup from the layoutbuilder, which
+ # will grow as more TYPE_INFO members are added to it
+ gcdata = gctypelayout.GCData(self.layoutbuilder.type_info_group)
# initialize the following two fields with a random non-NULL address,
# to make the annotator happy. The fields are patched in finish()
@@ -163,6 +169,8 @@
gcdata.gc.setup()
bk = self.translator.annotator.bookkeeper
+ r_typeid16 = rffi.platform.numbertype_to_rclass[TYPE_ID]
+ s_typeid16 = annmodel.SomeInteger(knowntype=r_typeid16)
# the point of this little dance is to not annotate
# self.gcdata.static_root_xyz as constants. XXX is it still needed??
@@ -212,7 +220,7 @@
malloc_fixedsize_clear_meth = GCClass.malloc_fixedsize_clear.im_func
self.malloc_fixedsize_clear_ptr = getfn(
malloc_fixedsize_clear_meth,
- [s_gc, annmodel.SomeInteger(nonneg=True),
+ [s_gc, s_typeid16,
annmodel.SomeInteger(nonneg=True),
annmodel.SomeBool(), annmodel.SomeBool(),
annmodel.SomeBool()], s_gcref,
@@ -221,7 +229,7 @@
malloc_fixedsize_meth = GCClass.malloc_fixedsize.im_func
self.malloc_fixedsize_ptr = getfn(
malloc_fixedsize_meth,
- [s_gc, annmodel.SomeInteger(nonneg=True),
+ [s_gc, s_typeid16,
annmodel.SomeInteger(nonneg=True),
annmodel.SomeBool(), annmodel.SomeBool(),
annmodel.SomeBool()], s_gcref,
@@ -235,7 +243,8 @@
## + [annmodel.SomeBool(), annmodel.SomeBool()], s_gcref)
self.malloc_varsize_clear_ptr = getfn(
GCClass.malloc_varsize_clear.im_func,
- [s_gc] + [annmodel.SomeInteger(nonneg=True) for i in range(5)]
+ [s_gc, s_typeid16]
+ + [annmodel.SomeInteger(nonneg=True) for i in range(4)]
+ [annmodel.SomeBool()], s_gcref)
self.collect_ptr = getfn(GCClass.collect.im_func,
[s_gc, annmodel.SomeInteger()], annmodel.s_None)
@@ -268,7 +277,7 @@
s_True = annmodel.SomeBool(); s_True .const = True
self.malloc_fast_ptr = getfn(
malloc_fast,
- [s_gc, annmodel.SomeInteger(nonneg=True),
+ [s_gc, s_typeid16,
annmodel.SomeInteger(nonneg=True),
s_True, s_False,
s_False], s_gcref,
@@ -288,7 +297,7 @@
s_True = annmodel.SomeBool(); s_True .const = True
self.malloc_varsize_clear_fast_ptr = getfn(
malloc_varsize_clear_fast,
- [s_gc, annmodel.SomeInteger(nonneg=True),
+ [s_gc, s_typeid16,
annmodel.SomeInteger(nonneg=True),
annmodel.SomeInteger(nonneg=True),
annmodel.SomeInteger(nonneg=True),
@@ -304,7 +313,7 @@
"malloc_varsize_nonmovable")
self.malloc_varsize_nonmovable_ptr = getfn(
malloc_nonmovable,
- [s_gc, annmodel.SomeInteger(nonneg=True),
+ [s_gc, s_typeid16,
annmodel.SomeInteger(nonneg=True)], s_gcref)
else:
self.malloc_varsize_nonmovable_ptr = None
@@ -315,7 +324,7 @@
"malloc_varsize_resizable")
self.malloc_varsize_resizable_ptr = getfn(
malloc_resizable,
- [s_gc, annmodel.SomeInteger(nonneg=True),
+ [s_gc, s_typeid16,
annmodel.SomeInteger(nonneg=True)], s_gcref)
else:
self.malloc_varsize_resizable_ptr = None
@@ -399,6 +408,14 @@
FLDTYPE = getattr(HDR, fldname)
fields.append(('_' + fldname, FLDTYPE))
+ size_gc_header = self.gcdata.gc.gcheaderbuilder.size_gc_header
+ vtableinfo = (HDR, size_gc_header, self.gcdata.gc.typeid_is_in_field)
+ self.c_vtableinfo = rmodel.inputconst(lltype.Void, vtableinfo)
+ tig = self.layoutbuilder.type_info_group._as_ptr()
+ self.c_type_info_group = rmodel.inputconst(lltype.typeOf(tig), tig)
+ sko = llmemory.sizeof(gcdata.TYPE_INFO)
+ self.c_vtinfo_skip_offset = rmodel.inputconst(lltype.typeOf(sko), sko)
+
def build_root_walker(self):
return ShadowStackRootWalker(self)
@@ -421,21 +438,14 @@
return [getattr(hdr, fldname) for fldname in HDR._names]
def finish_tables(self):
- table = self.layoutbuilder.flatten_table()
- log.info("assigned %s typeids" % (len(table), ))
+ group = self.layoutbuilder.close_table()
+ log.info("assigned %s typeids" % (len(group.members), ))
log.info("added %s push/pop stack root instructions" % (
self.num_pushs, ))
if self.write_barrier_ptr:
log.info("inserted %s write barrier calls" % (
self.write_barrier_calls, ))
- # replace the type_info_table pointer in gcdata -- at this point,
- # the database is in principle complete, so it has already seen
- # the delayed pointer. We need to force it to consider the new
- # array now.
-
- self.gcdata.type_info_table._become(table)
-
# XXX because we call inputconst already in replace_malloc, we can't
# modify the instance, we have to modify the 'rtyped instance'
# instead. horrors. is there a better way?
@@ -463,16 +473,24 @@
self.write_typeid_list()
return newgcdependencies
+ def get_final_dependencies(self):
+ # returns an iterator enumerating the type_info_group's members,
+ # to make sure that they are all followed (only a part of them
+ # might have been followed by a previous enum_dependencies()).
+ return iter(self.layoutbuilder.type_info_group.members)
+
def write_typeid_list(self):
"""write out the list of type ids together with some info"""
from pypy.tool.udir import udir
# XXX not ideal since it is not per compilation, but per run
+ # XXX argh argh, this only gives the member index but not the
+ # real typeid, which is a complete mess to obtain now...
+ all_ids = self.layoutbuilder.id_of_type.items()
+ all_ids = [(typeinfo.index, TYPE) for (TYPE, typeinfo) in all_ids]
+ all_ids = dict(all_ids)
f = udir.join("typeids.txt").open("w")
- all = [(typeid, TYPE)
- for TYPE, typeid in self.layoutbuilder.id_of_type.iteritems()]
- all.sort()
- for typeid, TYPE in all:
- f.write("%s %s\n" % (typeid, TYPE))
+ for index in range(len(self.layoutbuilder.type_info_group.members)):
+ f.write("member%-4d %s\n" % (index, all_ids.get(index, '?')))
f.close()
def transform_graph(self, graph):
@@ -502,8 +520,8 @@
assert PTRTYPE.TO == TYPE
type_id = self.get_type_id(TYPE)
- c_type_id = rmodel.inputconst(lltype.Signed, type_id)
- info = self.layoutbuilder.type_info_list[type_id]
+ c_type_id = rmodel.inputconst(TYPE_ID, type_id)
+ info = self.layoutbuilder.get_info(type_id)
c_size = rmodel.inputconst(lltype.Signed, info.fixedsize)
has_finalizer = bool(self.finalizer_funcptr_for_type(TYPE))
c_has_finalizer = rmodel.inputconst(lltype.Bool, has_finalizer)
@@ -523,9 +541,12 @@
c_has_finalizer, rmodel.inputconst(lltype.Bool, False)]
else:
assert not c_has_finalizer.value
+ info_varsize = self.layoutbuilder.get_info_varsize(type_id)
v_length = op.args[-1]
- c_ofstolength = rmodel.inputconst(lltype.Signed, info.ofstolength)
- c_varitemsize = rmodel.inputconst(lltype.Signed, info.varitemsize)
+ c_ofstolength = rmodel.inputconst(lltype.Signed,
+ info_varsize.ofstolength)
+ c_varitemsize = rmodel.inputconst(lltype.Signed,
+ info_varsize.varitemsize)
if flags.get('resizable') and self.malloc_varsize_resizable_ptr:
assert c_can_collect.value
malloc_ptr = self.malloc_varsize_resizable_ptr
@@ -656,8 +677,8 @@
type_id = self.get_type_id(WEAKREF)
- c_type_id = rmodel.inputconst(lltype.Signed, type_id)
- info = self.layoutbuilder.type_info_list[type_id]
+ c_type_id = rmodel.inputconst(TYPE_ID, type_id)
+ info = self.layoutbuilder.get_info(type_id)
c_size = rmodel.inputconst(lltype.Signed, info.fixedsize)
malloc_ptr = self.malloc_fixedsize_ptr
c_has_finalizer = rmodel.inputconst(lltype.Bool, False)
@@ -763,6 +784,48 @@
v_structaddr])
hop.rename('bare_' + opname)
+ def transform_getfield_typeptr(self, hop):
+ # this would become quite a lot of operations, even if it compiles
+ # to C code that is just as efficient as "obj->typeptr". To avoid
+ # that, we just generate a single custom operation instead.
+ hop.genop('gc_gettypeptr_group', [hop.spaceop.args[0],
+ self.c_type_info_group,
+ self.c_vtinfo_skip_offset,
+ self.c_vtableinfo],
+ resultvar = hop.spaceop.result)
+
+ def transform_setfield_typeptr(self, hop):
+ # replace such a setfield with an assertion that the typeptr is right
+ # (xxx not very useful right now, so disabled)
+ if 0:
+ v_new = hop.spaceop.args[2]
+ v_old = hop.genop('gc_gettypeptr_group', [hop.spaceop.args[0],
+ self.c_type_info_group,
+ self.c_vtinfo_skip_offset,
+ self.c_vtableinfo],
+ resulttype = v_new.concretetype)
+ v_eq = hop.genop("ptr_eq", [v_old, v_new],
+ resulttype = lltype.Bool)
+ c_errmsg = rmodel.inputconst(lltype.Void,
+ "setfield_typeptr: wrong type")
+ hop.genop('debug_assert', [v_eq, c_errmsg])
+
+ def gct_getfield(self, hop):
+ if (hop.spaceop.args[1].value == 'typeptr' and
+ hop.spaceop.args[0].concretetype.TO._hints.get('typeptr') and
+ self.translator.config.translation.gcconfig.removetypeptr):
+ self.transform_getfield_typeptr(hop)
+ else:
+ GCTransformer.gct_getfield(self, hop)
+
+ def gct_setfield(self, hop):
+ if (hop.spaceop.args[1].value == 'typeptr' and
+ hop.spaceop.args[0].concretetype.TO._hints.get('typeptr') and
+ self.translator.config.translation.gcconfig.removetypeptr):
+ self.transform_setfield_typeptr(hop)
+ else:
+ GCTransformer.gct_setfield(self, hop)
+
def var_needs_set_transform(self, var):
return var_needsgc(var)
@@ -852,6 +915,15 @@
return fptr
+class JITTransformerLayoutBuilder(TransformerLayoutBuilder):
+ # for the JIT: currently does not support removetypeptr
+ def __init__(self, config):
+ from pypy.rpython.memory.gc.base import choose_gc_from_config
+ assert not config.translation.gcconfig.removetypeptr
+ GCClass, _ = choose_gc_from_config(config)
+ TransformerLayoutBuilder.__init__(self, GCClass, {})
+
+
def gen_zero_gc_pointers(TYPE, v, llops, previous_steps=None):
if previous_steps is None:
previous_steps = []
Modified: pypy/trunk/pypy/rpython/memory/gctransform/transform.py
==============================================================================
--- pypy/trunk/pypy/rpython/memory/gctransform/transform.py (original)
+++ pypy/trunk/pypy/rpython/memory/gctransform/transform.py Sun Oct 11 16:32:27 2009
@@ -10,6 +10,7 @@
from pypy.translator.backendopt import graphanalyze
from pypy.translator.backendopt.canraise import RaiseAnalyzer
from pypy.translator.backendopt.ssa import DataFlowFamilyBuilder
+from pypy.translator.backendopt.constfold import constant_fold_graph
from pypy.annotation import model as annmodel
from pypy.rpython import rmodel, annlowlevel
from pypy.rpython.memory import gc
@@ -144,16 +145,20 @@
if self.inline:
raise_analyzer = RaiseAnalyzer(self.translator)
to_enum = self.graph_dependencies.get(graph, self.graphs_to_inline)
+ must_constfold = False
for inline_graph in to_enum:
try:
inline.inline_function(self.translator, inline_graph, graph,
self.lltype_to_classdef,
raise_analyzer,
cleanup=False)
+ must_constfold = True
except inline.CannotInline, e:
print 'CANNOT INLINE:', e
print '\t%s into %s' % (inline_graph, graph)
cleanup_graph(graph)
+ if must_constfold:
+ constant_fold_graph(graph)
def compute_borrowed_vars(self, graph):
# the input args are borrowed, and stay borrowed for as long as they
@@ -307,6 +312,9 @@
newgcdependencies = self.ll_finalizers_ptrs
return newgcdependencies
+ def get_final_dependencies(self):
+ pass
+
def finish_tables(self):
pass
@@ -367,6 +375,8 @@
gct_setarrayitem = gct_setfield
gct_setinteriorfield = gct_setfield
+ gct_getfield = default
+
def gct_zero_gc_pointers_inside(self, hop):
pass
Modified: pypy/trunk/pypy/rpython/memory/gctypelayout.py
==============================================================================
--- pypy/trunk/pypy/rpython/memory/gctypelayout.py (original)
+++ pypy/trunk/pypy/rpython/memory/gctypelayout.py Sun Oct 11 16:32:27 2009
@@ -1,4 +1,6 @@
-from pypy.rpython.lltypesystem import lltype, llmemory, llarena
+from pypy.rpython.lltypesystem import lltype, llmemory, llarena, llgroup
+from pypy.rpython.lltypesystem.lloperation import llop
+from pypy.rlib.objectmodel import we_are_translated
from pypy.rlib.debug import ll_assert
@@ -17,67 +19,78 @@
# structure describing the layout of a typeid
TYPE_INFO = lltype.Struct("type_info",
+ ("infobits", lltype.Signed), # combination of the T_xxx consts
("finalizer", FINALIZERTYPE),
("fixedsize", lltype.Signed),
("ofstoptrs", lltype.Ptr(OFFSETS_TO_GC_PTR)),
+ hints={'immutable': True},
+ )
+ VARSIZE_TYPE_INFO = lltype.Struct("varsize_type_info",
+ ("header", TYPE_INFO),
("varitemsize", lltype.Signed),
("ofstovar", lltype.Signed),
("ofstolength", lltype.Signed),
("varofstoptrs", lltype.Ptr(OFFSETS_TO_GC_PTR)),
- ("weakptrofs", lltype.Signed),
+ hints={'immutable': True},
)
- TYPE_INFO_TABLE = lltype.Array(TYPE_INFO)
+ TYPE_INFO_PTR = lltype.Ptr(TYPE_INFO)
+ VARSIZE_TYPE_INFO_PTR = lltype.Ptr(VARSIZE_TYPE_INFO)
- def __init__(self, type_info_table):
- self.type_info_table = type_info_table
- # 'type_info_table' is a list of TYPE_INFO structures when
- # running with gcwrapper, or a real TYPE_INFO_TABLE after
- # the gctransformer.
+ def __init__(self, type_info_group):
+ assert isinstance(type_info_group, llgroup.group)
+ self.type_info_group = type_info_group
+ self.type_info_group_ptr = type_info_group._as_ptr()
+
+ def get(self, typeid):
+ _check_typeid(typeid)
+ return llop.get_group_member(GCData.TYPE_INFO_PTR,
+ self.type_info_group_ptr,
+ typeid)
+
+ def get_varsize(self, typeid):
+ _check_typeid(typeid)
+ return llop.get_group_member(GCData.VARSIZE_TYPE_INFO_PTR,
+ self.type_info_group_ptr,
+ typeid)
def q_is_varsize(self, typeid):
- ll_assert(typeid > 0, "invalid type_id")
- return (typeid & T_IS_FIXSIZE) == 0
+ infobits = self.get(typeid).infobits
+ return (infobits & T_IS_VARSIZE) != 0
def q_has_gcptr_in_varsize(self, typeid):
- ll_assert(typeid > 0, "invalid type_id")
- return (typeid & (T_IS_FIXSIZE|T_NO_GCPTR_IN_VARSIZE)) == 0
+ infobits = self.get(typeid).infobits
+ return (infobits & T_HAS_GCPTR_IN_VARSIZE) != 0
def q_is_gcarrayofgcptr(self, typeid):
- ll_assert(typeid > 0, "invalid type_id")
- return (typeid &
- (T_IS_FIXSIZE|T_NO_GCPTR_IN_VARSIZE|T_NOT_SIMPLE_GCARRAY)) == 0
+ infobits = self.get(typeid).infobits
+ return (infobits & T_IS_GCARRAY_OF_GCPTR) != 0
def q_finalizer(self, typeid):
- ll_assert(typeid > 0, "invalid type_id")
- return self.type_info_table[typeid].finalizer
+ return self.get(typeid).finalizer
def q_offsets_to_gc_pointers(self, typeid):
- ll_assert(typeid > 0, "invalid type_id")
- return self.type_info_table[typeid].ofstoptrs
+ return self.get(typeid).ofstoptrs
def q_fixed_size(self, typeid):
- ll_assert(typeid > 0, "invalid type_id")
- return self.type_info_table[typeid].fixedsize
+ return self.get(typeid).fixedsize
def q_varsize_item_sizes(self, typeid):
- ll_assert(typeid > 0, "invalid type_id")
- return self.type_info_table[typeid].varitemsize
+ return self.get_varsize(typeid).varitemsize
def q_varsize_offset_to_variable_part(self, typeid):
- ll_assert(typeid > 0, "invalid type_id")
- return self.type_info_table[typeid].ofstovar
+ return self.get_varsize(typeid).ofstovar
def q_varsize_offset_to_length(self, typeid):
- ll_assert(typeid > 0, "invalid type_id")
- return self.type_info_table[typeid].ofstolength
+ return self.get_varsize(typeid).ofstolength
def q_varsize_offsets_to_gcpointers_in_var_part(self, typeid):
- ll_assert(typeid > 0, "invalid type_id")
- return self.type_info_table[typeid].varofstoptrs
+ return self.get_varsize(typeid).varofstoptrs
def q_weakpointer_offset(self, typeid):
- ll_assert(typeid > 0, "invalid type_id")
- return self.type_info_table[typeid].weakptrofs
+ infobits = self.get(typeid).infobits
+ if infobits & T_IS_WEAKREF:
+ return weakptr_offset
+ return -1
def set_query_functions(self, gc):
gc.set_query_functions(
@@ -93,88 +106,59 @@
self.q_varsize_offsets_to_gcpointers_in_var_part,
self.q_weakpointer_offset)
-# For the q_xxx functions that return flags, we use bit patterns
-# in the typeid instead of entries in the type_info_table. The
-# following flag combinations are used (the idea being that it's
-# very fast on CPUs to check if all flags in a set are all zero):
-
-# * if T_IS_FIXSIZE is set, the gc object is not var-sized
-# * if T_IS_FIXSIZE and T_NO_GCPTR_IN_VARSIZE are both cleared,
-# there are gc ptrs in the var-sized part
-# * if T_IS_FIXSIZE, T_NO_GCPTR_IN_VARSIZE and T_NOT_SIMPLE_GCARRAY
-# are all cleared, the shape is just like GcArray(gcptr)
-
-T_IS_FIXSIZE = 0x4
-T_NO_GCPTR_IN_VARSIZE = 0x2
-T_NOT_SIMPLE_GCARRAY = 0x1
-
-def get_typeid_bitmask(TYPE):
- """Return the bits that we would like to be set or cleared in the type_id
- corresponding to TYPE. This returns (mask, expected_value), where
- the condition is that 'type_id & mask == expected_value'.
- """
- if not TYPE._is_varsize():
- return (T_IS_FIXSIZE, T_IS_FIXSIZE) # not var-sized
- if (isinstance(TYPE, lltype.GcArray)
- and isinstance(TYPE.OF, lltype.Ptr)
- and TYPE.OF.TO._gckind == 'gc'):
- # a simple GcArray(gcptr)
- return (T_IS_FIXSIZE|T_NO_GCPTR_IN_VARSIZE|T_NOT_SIMPLE_GCARRAY, 0)
-
- if isinstance(TYPE, lltype.Struct):
- ARRAY = TYPE._flds[TYPE._arrayfld]
- else:
- ARRAY = TYPE
- assert isinstance(ARRAY, lltype.Array)
- if ARRAY.OF != lltype.Void and len(offsets_to_gc_pointers(ARRAY.OF)) > 0:
- # var-sized, with gc pointers in the variable part
- return (T_IS_FIXSIZE|T_NO_GCPTR_IN_VARSIZE|T_NOT_SIMPLE_GCARRAY,
- T_NOT_SIMPLE_GCARRAY)
- else:
- # var-sized, but no gc pointer in the variable part
- return (T_IS_FIXSIZE|T_NO_GCPTR_IN_VARSIZE, T_NO_GCPTR_IN_VARSIZE)
+T_IS_VARSIZE = 0x01
+T_HAS_GCPTR_IN_VARSIZE = 0x02
+T_IS_GCARRAY_OF_GCPTR = 0x04
+T_IS_WEAKREF = 0x08
+
+def _check_typeid(typeid):
+ ll_assert(llop.is_group_member_nonzero(lltype.Bool, typeid),
+ "invalid type_id")
def encode_type_shape(builder, info, TYPE):
"""Encode the shape of the TYPE into the TYPE_INFO structure 'info'."""
offsets = offsets_to_gc_pointers(TYPE)
+ infobits = 0
info.ofstoptrs = builder.offsets2table(offsets, TYPE)
info.finalizer = builder.make_finalizer_funcptr_for_type(TYPE)
- info.weakptrofs = weakpointer_offset(TYPE)
if not TYPE._is_varsize():
- #info.isvarsize = False
- #info.gcptrinvarsize = False
info.fixedsize = llarena.round_up_for_allocation(
- llmemory.sizeof(TYPE))
- info.ofstolength = -1
+ llmemory.sizeof(TYPE), builder.GCClass.object_minimal_size)
# note about round_up_for_allocation(): in the 'info' table
# we put a rounded-up size only for fixed-size objects. For
# varsize ones, the GC must anyway compute the size at run-time
# and round up that result.
else:
- #info.isvarsize = True
+ infobits |= T_IS_VARSIZE
+ varinfo = lltype.cast_pointer(GCData.VARSIZE_TYPE_INFO_PTR, info)
info.fixedsize = llmemory.sizeof(TYPE, 0)
if isinstance(TYPE, lltype.Struct):
ARRAY = TYPE._flds[TYPE._arrayfld]
ofs1 = llmemory.offsetof(TYPE, TYPE._arrayfld)
- info.ofstolength = ofs1 + llmemory.ArrayLengthOffset(ARRAY)
- info.ofstovar = ofs1 + llmemory.itemoffsetof(ARRAY, 0)
+ varinfo.ofstolength = ofs1 + llmemory.ArrayLengthOffset(ARRAY)
+ varinfo.ofstovar = ofs1 + llmemory.itemoffsetof(ARRAY, 0)
else:
+ assert isinstance(TYPE, lltype.GcArray)
ARRAY = TYPE
- info.ofstolength = llmemory.ArrayLengthOffset(ARRAY)
- info.ofstovar = llmemory.itemoffsetof(TYPE, 0)
+ if (isinstance(ARRAY.OF, lltype.Ptr)
+ and ARRAY.OF.TO._gckind == 'gc'):
+ infobits |= T_IS_GCARRAY_OF_GCPTR
+ varinfo.ofstolength = llmemory.ArrayLengthOffset(ARRAY)
+ varinfo.ofstovar = llmemory.itemoffsetof(TYPE, 0)
assert isinstance(ARRAY, lltype.Array)
if ARRAY.OF != lltype.Void:
offsets = offsets_to_gc_pointers(ARRAY.OF)
else:
offsets = ()
- info.varofstoptrs = builder.offsets2table(offsets, ARRAY.OF)
- info.varitemsize = llmemory.sizeof(ARRAY.OF)
- #info.gcptrinvarsize = len(offsets) > 0
- #info.gcarrayofgcptr = (isinstance(TYPE, lltype.GcArray)
- # and isinstance(TYPE.OF, lltype.Ptr)
- # and TYPE.OF.TO._gckind == 'gc')
+ if len(offsets) > 0:
+ infobits |= T_HAS_GCPTR_IN_VARSIZE
+ varinfo.varofstoptrs = builder.offsets2table(offsets, ARRAY.OF)
+ varinfo.varitemsize = llmemory.sizeof(ARRAY.OF)
+ if TYPE == WEAKREF:
+ infobits |= T_IS_WEAKREF
+ info.infobits = infobits
# ____________________________________________________________
@@ -183,8 +167,12 @@
can_add_new_types = True
can_encode_type_shape = True # set to False initially by the JIT
- def __init__(self):
- self.type_info_list = [None] # don't use typeid 0, helps debugging
+ size_of_fixed_type_info = llmemory.sizeof(GCData.TYPE_INFO)
+
+ def __init__(self, GCClass, lltype2vtable):
+ self.GCClass = GCClass
+ self.lltype2vtable = lltype2vtable
+ self.make_type_info_group()
self.id_of_type = {} # {LLTYPE: type_id}
self.seen_roots = {}
# the following are lists of addresses of gc pointers living inside the
@@ -199,7 +187,13 @@
self.all_prebuilt_gc = []
self.finalizer_funcptrs = {}
self.offsettable_cache = {}
- self.next_typeid_cache = {}
+
+ def make_type_info_group(self):
+ self.type_info_group = llgroup.group("typeinfo")
+ # don't use typeid 0, may help debugging
+ DUMMY = lltype.Struct("dummy", ('x', lltype.Signed))
+ dummy = lltype.malloc(DUMMY, immortal=True, zero=True)
+ self.type_info_group.add_member(dummy)
def get_type_id(self, TYPE):
try:
@@ -208,33 +202,46 @@
assert self.can_add_new_types
assert isinstance(TYPE, (lltype.GcStruct, lltype.GcArray))
# Record the new type_id description as a TYPE_INFO structure.
- # It goes into a list for now, which will be turned into a
- # TYPE_INFO_TABLE in flatten_table() by the gc transformer.
-
- # pick the next type_id with the correct bits set or cleared
- mask, expected = get_typeid_bitmask(TYPE)
- type_id = self.next_typeid_cache.get((mask, expected), 1)
- while True:
- if type_id == len(self.type_info_list):
- self.type_info_list.append(None)
- if (self.type_info_list[type_id] is None and
- (type_id & mask) == expected):
- break # can use this type_id
- else:
- type_id += 1 # continue searching
- self.next_typeid_cache[mask, expected] = type_id + 1
- assert type_id & 0xffff == type_id # make sure it fits into 2 bytes
-
# build the TYPE_INFO structure
- info = lltype.malloc(GCData.TYPE_INFO, immortal=True, zero=True)
+ if not TYPE._is_varsize():
+ fullinfo = lltype.malloc(GCData.TYPE_INFO,
+ immortal=True, zero=True)
+ info = fullinfo
+ else:
+ fullinfo = lltype.malloc(GCData.VARSIZE_TYPE_INFO,
+ immortal=True, zero=True)
+ info = fullinfo.header
if self.can_encode_type_shape:
encode_type_shape(self, info, TYPE)
else:
self._pending_type_shapes.append((info, TYPE))
- self.type_info_list[type_id] = info
+ # store it
+ type_id = self.type_info_group.add_member(fullinfo)
self.id_of_type[TYPE] = type_id
+ # store the vtable of the type (if any) immediately thereafter
+ # (note that if gcconfig.removetypeptr is False, lltype2vtable
+ # is empty)
+ vtable = self.lltype2vtable.get(TYPE, None)
+ if vtable is not None:
+ # check that if we have a vtable, we are not varsize
+ assert lltype.typeOf(fullinfo) == GCData.TYPE_INFO_PTR
+ vtable = lltype.normalizeptr(vtable)
+ self.type_info_group.add_member(vtable)
return type_id
+ def get_info(self, type_id):
+ return llop.get_group_member(GCData.TYPE_INFO_PTR,
+ self.type_info_group._as_ptr(),
+ type_id)
+
+ def get_info_varsize(self, type_id):
+ return llop.get_group_member(GCData.VARSIZE_TYPE_INFO_PTR,
+ self.type_info_group._as_ptr(),
+ type_id)
+
+ def is_weakref(self, type_id):
+ return self.get_info(type_id).infobits & T_IS_WEAKREF
+
def encode_type_shapes_now(self):
if not self.can_encode_type_shape:
self.can_encode_type_shape = True
@@ -260,20 +267,11 @@
self.offsettable_cache[TYPE] = cachedarray
return cachedarray
- def flatten_table(self):
+ def close_table(self):
+ # make sure we no longer add members to the type_info_group.
self.can_add_new_types = False
self.offsettable_cache = None
- table = lltype.malloc(GCData.TYPE_INFO_TABLE, len(self.type_info_list),
- immortal=True)
- fieldnames = GCData.TYPE_INFO._names
- for tableentry, newcontent in zip(table, self.type_info_list):
- if newcontent is None: # empty entry
- tableentry.weakptrofs = -1
- tableentry.ofstolength = -1
- else:
- for name in fieldnames:
- setattr(tableentry, name, getattr(newcontent, name))
- return table
+ return self.type_info_group
def finalizer_funcptr_for_type(self, TYPE):
if TYPE in self.finalizer_funcptrs:
@@ -287,7 +285,7 @@
return lltype.nullptr(GCData.ADDRESS_VOID_FUNC)
def initialize_gc_query_function(self, gc):
- return GCData(self.type_info_list).set_query_functions(gc)
+ return GCData(self.type_info_group).set_query_functions(gc)
def consider_constant(self, TYPE, value, gc):
if value is not lltype.top_container(value):
@@ -349,11 +347,6 @@
offsets.append(0)
return offsets
-def weakpointer_offset(TYPE):
- if TYPE == WEAKREF:
- return llmemory.offsetof(WEAKREF, "weakptr")
- return -1
-
def gc_pointers_inside(v, adr, mutable_only=False):
t = lltype.typeOf(v)
if isinstance(t, lltype.Struct):
@@ -409,6 +402,7 @@
sizeof_weakref= llmemory.sizeof(WEAKREF)
empty_weakref = lltype.malloc(WEAKREF, immortal=True)
empty_weakref.weakptr = llmemory.NULL
+weakptr_offset = llmemory.offsetof(WEAKREF, "weakptr")
def ll_weakref_deref(wref):
wref = llmemory.cast_weakrefptr_to_ptr(WEAKREFPTR, wref)
Modified: pypy/trunk/pypy/rpython/memory/gcwrapper.py
==============================================================================
--- pypy/trunk/pypy/rpython/memory/gcwrapper.py (original)
+++ pypy/trunk/pypy/rpython/memory/gcwrapper.py Sun Oct 11 16:32:27 2009
@@ -18,7 +18,10 @@
self.gc.setup()
def prepare_graphs(self, flowgraphs):
- layoutbuilder = DirectRunLayoutBuilder(self.llinterp)
+ lltype2vtable = self.llinterp.typer.lltype2vtable
+ layoutbuilder = DirectRunLayoutBuilder(self.gc.__class__,
+ lltype2vtable,
+ self.llinterp)
self.get_type_id = layoutbuilder.get_type_id
layoutbuilder.initialize_gc_query_function(self.gc)
@@ -159,9 +162,9 @@
class DirectRunLayoutBuilder(gctypelayout.TypeLayoutBuilder):
- def __init__(self, llinterp):
+ def __init__(self, GCClass, lltype2vtable, llinterp):
self.llinterp = llinterp
- super(DirectRunLayoutBuilder, self).__init__()
+ super(DirectRunLayoutBuilder, self).__init__(GCClass, lltype2vtable)
def make_finalizer_funcptr_for_type(self, TYPE):
from pypy.rpython.memory.gctransform.support import get_rtti, \
Modified: pypy/trunk/pypy/rpython/memory/lltypelayout.py
==============================================================================
--- pypy/trunk/pypy/rpython/memory/lltypelayout.py (original)
+++ pypy/trunk/pypy/rpython/memory/lltypelayout.py Sun Oct 11 16:32:27 2009
@@ -118,6 +118,10 @@
return 0
elif isinstance(offset, llarena.RoundedUpForAllocation):
basesize = convert_offset_to_int(offset.basesize)
+ if isinstance(offset.minsize, llmemory.AddressOffset):
+ minsize = convert_offset_to_int(offset.minsize)
+ if minsize > basesize:
+ basesize = minsize
mask = memory_alignment - 1
return (basesize + mask) & ~ mask
else:
Modified: pypy/trunk/pypy/rpython/memory/test/test_gctypelayout.py
==============================================================================
--- pypy/trunk/pypy/rpython/memory/test/test_gctypelayout.py (original)
+++ pypy/trunk/pypy/rpython/memory/test/test_gctypelayout.py Sun Oct 11 16:32:27 2009
@@ -1,6 +1,12 @@
+import py
from pypy.rpython.memory.gctypelayout import TypeLayoutBuilder, GCData
from pypy.rpython.memory.gctypelayout import offsets_to_gc_pointers
-from pypy.rpython.lltypesystem import lltype
+from pypy.rpython.lltypesystem import lltype, rclass
+from pypy.rpython.test.test_llinterp import get_interpreter
+from pypy.objspace.flow.model import Constant
+
+class FakeGC:
+ object_minimal_size = 0
def getname(T):
try:
@@ -30,13 +36,57 @@
for T, c in [(GC_S, 0), (GC_S2, 2), (GC_A, 0), (GC_A2, 0), (GC_S3, 2)]:
assert len(offsets_to_gc_pointers(T)) == c
-def test_layout_builder():
+def test_layout_builder(lltype2vtable={}):
# XXX a very minimal test
- layoutbuilder = TypeLayoutBuilder()
+ layoutbuilder = TypeLayoutBuilder(FakeGC, lltype2vtable)
for T1, T2 in [(GC_A, GC_S), (GC_A2, GC_S2), (GC_S3, GC_S2)]:
tid1 = layoutbuilder.get_type_id(T1)
tid2 = layoutbuilder.get_type_id(T2)
- gcdata = GCData(layoutbuilder.type_info_list)
+ gcdata = GCData(layoutbuilder.type_info_group)
lst1 = gcdata.q_varsize_offsets_to_gcpointers_in_var_part(tid1)
lst2 = gcdata.q_offsets_to_gc_pointers(tid2)
assert len(lst1) == len(lst2)
+ return layoutbuilder
+
+def test_layout_builder_with_vtable():
+ from pypy.rpython.lltypesystem.lloperation import llop
+ vtable = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True)
+ layoutbuilder = test_layout_builder({GC_S: vtable})
+ tid1 = layoutbuilder.get_type_id(GC_S)
+ tid2 = layoutbuilder.get_type_id(GC_S2)
+ tid3 = layoutbuilder.get_type_id(GC_S3)
+ group = layoutbuilder.type_info_group
+ vt = llop.get_next_group_member(rclass.CLASSTYPE, group._as_ptr(), tid1,
+ layoutbuilder.size_of_fixed_type_info)
+ assert vt == vtable
+ for tid in [tid2, tid3]:
+ py.test.raises((lltype.InvalidCast, AssertionError),
+ llop.get_next_group_member,
+ rclass.CLASSTYPE, group._as_ptr(), tid,
+ layoutbuilder.size_of_fixed_type_info)
+
+def test_constfold():
+ layoutbuilder = TypeLayoutBuilder(FakeGC, {})
+ tid1 = layoutbuilder.get_type_id(GC_A)
+ tid2 = layoutbuilder.get_type_id(GC_S3)
+ class MockGC:
+ def set_query_functions(self, is_varsize,
+ has_gcptr_in_varsize,
+ is_gcarrayofgcptr,
+ *rest):
+ self.is_varsize = is_varsize
+ self.has_gcptr_in_varsize = has_gcptr_in_varsize
+ self.is_gcarrayofgcptr = is_gcarrayofgcptr
+ gc = MockGC()
+ layoutbuilder.initialize_gc_query_function(gc)
+ #
+ def f():
+ return (1 * gc.is_varsize(tid1) +
+ 10 * gc.has_gcptr_in_varsize(tid1) +
+ 100 * gc.is_gcarrayofgcptr(tid1) +
+ 1000 * gc.is_varsize(tid2) +
+ 10000 * gc.has_gcptr_in_varsize(tid2) +
+ 100000 * gc.is_gcarrayofgcptr(tid2))
+ interp, graph = get_interpreter(f, [], backendopt=True)
+ assert interp.eval_graph(graph, []) == 11001
+ assert graph.startblock.exits[0].args == [Constant(11001, lltype.Signed)]
Modified: pypy/trunk/pypy/rpython/memory/test/test_transformed_gc.py
==============================================================================
--- pypy/trunk/pypy/rpython/memory/test/test_transformed_gc.py (original)
+++ pypy/trunk/pypy/rpython/memory/test/test_transformed_gc.py Sun Oct 11 16:32:27 2009
@@ -21,6 +21,7 @@
t = TranslationContext()
# XXX XXX XXX mess
t.config.translation.gc = gcname
+ t.config.translation.gcconfig.removetypeptr = True
if stacklessgc:
t.config.translation.gcrootfinder = "stackless"
t.config.set(**extraconfigopts)
@@ -195,7 +196,6 @@
assert heap_size < 16000 * INT_SIZE / 4 # xxx
def test_nongc_static_root(self):
- from pypy.rpython.lltypesystem import lltype
T1 = lltype.GcStruct("C", ('x', lltype.Signed))
T2 = lltype.Struct("C", ('p', lltype.Ptr(T1)))
static = lltype.malloc(T2, immortal=True)
@@ -552,7 +552,7 @@
class A(object):
pass
def f():
- from pypy.rpython.lltypesystem import lltype, rffi
+ from pypy.rpython.lltypesystem import rffi
alist = [A() for i in range(50)]
idarray = lltype.malloc(rffi.INTP.TO, len(alist), flavor='raw')
# Compute the id of all the elements of the list. The goal is
@@ -592,7 +592,11 @@
def fix_graph_of_g(translator):
from pypy.translator.translator import graphof
from pypy.objspace.flow.model import Constant
- layoutbuilder = framework.TransformerLayoutBuilder()
+ from pypy.rpython.lltypesystem import rffi
+ GCClass = self.gcpolicy.transformerclass.GCClass
+ lltype2vtable = translator.rtyper.lltype2vtable
+ layoutbuilder = framework.TransformerLayoutBuilder(GCClass,
+ lltype2vtable)
layoutbuilder.delay_encoding()
translator._jit2gc = {
'layoutbuilder': layoutbuilder,
@@ -603,7 +607,7 @@
graph = graphof(translator, g)
for op in graph.startblock.operations:
if op.opname == 'do_malloc_fixedsize_clear':
- op.args = [Constant(type_id, lltype.Signed),
+ op.args = [Constant(type_id, rffi.USHORT),
Constant(llmemory.sizeof(P), lltype.Signed),
Constant(True, lltype.Bool), # can_collect
Constant(False, lltype.Bool), # has_finalizer
@@ -628,7 +632,11 @@
def fix_graph_of_g(translator):
from pypy.translator.translator import graphof
from pypy.objspace.flow.model import Constant
- layoutbuilder = framework.TransformerLayoutBuilder()
+ from pypy.rpython.lltypesystem import rffi
+ GCClass = self.gcpolicy.transformerclass.GCClass
+ lltype2vtable = translator.rtyper.lltype2vtable
+ layoutbuilder = framework.TransformerLayoutBuilder(GCClass,
+ lltype2vtable)
layoutbuilder.delay_encoding()
translator._jit2gc = {
'layoutbuilder': layoutbuilder,
@@ -639,7 +647,7 @@
graph = graphof(translator, g)
for op in graph.startblock.operations:
if op.opname == 'do_malloc_fixedsize_clear':
- op.args = [Constant(type_id, lltype.Signed),
+ op.args = [Constant(type_id, rffi.USHORT),
Constant(llmemory.sizeof(P), lltype.Signed),
Constant(True, lltype.Bool), # can_collect
Constant(False, lltype.Bool), # has_finalizer
@@ -934,7 +942,6 @@
assert res == 20 + 20
def test_nongc_static_root_minor_collect(self):
- from pypy.rpython.lltypesystem import lltype
T1 = lltype.GcStruct("C", ('x', lltype.Signed))
T2 = lltype.Struct("C", ('p', lltype.Ptr(T1)))
static = lltype.malloc(T2, immortal=True)
@@ -957,7 +964,6 @@
def test_static_root_minor_collect(self):
- from pypy.rpython.lltypesystem import lltype
class A:
pass
class B:
Modified: pypy/trunk/pypy/rpython/rtyper.py
==============================================================================
--- pypy/trunk/pypy/rpython/rtyper.py (original)
+++ pypy/trunk/pypy/rpython/rtyper.py Sun Oct 11 16:32:27 2009
@@ -65,6 +65,7 @@
self.class_pbc_attributes = {}
self.oo_meth_impls = {}
self.cache_dummy_values = {}
+ self.lltype2vtable = {}
self.typererrors = []
self.typererror_count = 0
# make the primitive_to_repr constant mapping
@@ -131,12 +132,6 @@
result[repr.lowleveltype] = classdef
return result
- def lltype_to_vtable_mapping(self):
- result = {}
- for repr in self.instance_reprs.itervalues():
- result[repr.lowleveltype.TO] = repr.rclass.getvtable()
- return result
-
def get_type_for_typeptr(self, typeptr):
try:
return self.type_for_typeptr[typeptr._obj]
Modified: pypy/trunk/pypy/translator/backendopt/inline.py
==============================================================================
--- pypy/trunk/pypy/translator/backendopt/inline.py (original)
+++ pypy/trunk/pypy/translator/backendopt/inline.py Sun Oct 11 16:32:27 2009
@@ -753,7 +753,7 @@
subcount = inline_function(translator, graph, parentgraph,
lltype_to_classdef, raise_analyzer,
call_count_pred, cleanup=False)
- to_cleanup[graph] = True
+ to_cleanup[parentgraph] = True
res = bool(subcount)
except CannotInline:
try_again[graph] = True
Modified: pypy/trunk/pypy/translator/c/database.py
==============================================================================
--- pypy/trunk/pypy/translator/c/database.py (original)
+++ pypy/trunk/pypy/translator/c/database.py Sun Oct 11 16:32:27 2009
@@ -5,6 +5,7 @@
from pypy.rpython.lltypesystem import lltype
from pypy.rpython.lltypesystem.llmemory import WeakRef, _WeakRefType, GCREF
from pypy.rpython.lltypesystem.rffi import CConstant
+from pypy.rpython.lltypesystem import llgroup
from pypy.tool.sourcetools import valid_identifier
from pypy.translator.c.primitive import PrimitiveName, PrimitiveType
from pypy.translator.c.node import StructDefNode, ArrayDefNode
@@ -141,6 +142,8 @@
#raise Exception("don't know about opaque type %r" % (T,))
return 'struct %s @' % (
valid_identifier('pypy_opaque_' + T.tag),)
+ elif isinstance(T, llgroup.GroupType):
+ return "/*don't use me*/ void @"
else:
raise Exception("don't know about type %r" % (T,))
@@ -285,6 +288,8 @@
finish_callbacks.append(('Stackless transformer: finished',
self.stacklesstransformer.finish))
if self.gctransformer:
+ finish_callbacks.append(('GC transformer: tracking vtables',
+ self.gctransformer.get_final_dependencies))
finish_callbacks.append(('GC transformer: finished tables',
self.gctransformer.finish_tables))
Modified: pypy/trunk/pypy/translator/c/funcgen.py
==============================================================================
--- pypy/trunk/pypy/translator/c/funcgen.py (original)
+++ pypy/trunk/pypy/translator/c/funcgen.py Sun Oct 11 16:32:27 2009
@@ -781,5 +781,22 @@
def OP_PROMOTE_VIRTUALIZABLE(self, op):
return '/* PROMOTE_VIRTUALIZABLE %s */' % op
+ def OP_GET_GROUP_MEMBER(self, op):
+ typename = self.db.gettype(op.result.concretetype)
+ return '%s = (%s)_OP_GET_GROUP_MEMBER(%s, %s);' % (
+ self.expr(op.result),
+ cdecl(typename, ''),
+ self.expr(op.args[0]),
+ self.expr(op.args[1]))
+
+ def OP_GET_NEXT_GROUP_MEMBER(self, op):
+ typename = self.db.gettype(op.result.concretetype)
+ return '%s = (%s)_OP_GET_NEXT_GROUP_MEMBER(%s, %s, %s);' % (
+ self.expr(op.result),
+ cdecl(typename, ''),
+ self.expr(op.args[0]),
+ self.expr(op.args[1]),
+ self.expr(op.args[2]))
+
assert not USESLOTS or '__dict__' not in dir(FunctionCodeGenerator)
Modified: pypy/trunk/pypy/translator/c/gc.py
==============================================================================
--- pypy/trunk/pypy/translator/c/gc.py (original)
+++ pypy/trunk/pypy/translator/c/gc.py Sun Oct 11 16:32:27 2009
@@ -1,4 +1,5 @@
import sys
+from pypy.objspace.flow.model import Constant
from pypy.translator.c.support import cdecl
from pypy.translator.c.node import ContainerNode
from pypy.rpython.lltypesystem.lltype import \
@@ -11,7 +12,7 @@
class BasicGcPolicy(object):
requires_stackless = False
-
+
def __init__(self, db, thread_enabled=False):
self.db = db
self.thread_enabled = thread_enabled
@@ -49,6 +50,9 @@
post_include_bits=['typedef void *GC_hidden_pointer;']
)
+ def need_no_typeptr(self):
+ return False
+
def gc_startup_code(self):
return []
@@ -312,8 +316,11 @@
return framework.convert_weakref_to(ptarget)
def OP_GC_RELOAD_POSSIBLY_MOVED(self, funcgen, op):
- args = [funcgen.expr(v) for v in op.args]
- return '%s = %s; /* for moving GCs */' % (args[1], args[0])
+ if isinstance(op.args[1], Constant):
+ return '/* %s */' % (op,)
+ else:
+ args = [funcgen.expr(v) for v in op.args]
+ return '%s = %s; /* for moving GCs */' % (args[1], args[0])
def common_gcheader_definition(self, defnode):
return defnode.db.gctransformer.gc_fields()
@@ -322,6 +329,25 @@
o = top_container(defnode.obj)
return defnode.db.gctransformer.gc_field_values_for(o)
+ def need_no_typeptr(self):
+ config = self.db.translator.config
+ return config.translation.gcconfig.removetypeptr
+
+ def OP_GC_GETTYPEPTR_GROUP(self, funcgen, op):
+ # expands to a number of steps, as per rpython/lltypesystem/opimpl.py,
+ # all implemented by a single call to a C macro.
+ [v_obj, c_grpptr, c_skipoffset, c_vtableinfo] = op.args
+ typename = funcgen.db.gettype(op.result.concretetype)
+ fieldname = c_vtableinfo.value[2]
+ return (
+ '%s = (%s)_OP_GET_NEXT_GROUP_MEMBER(%s, (unsigned short)%s->_%s, %s);'
+ % (funcgen.expr(op.result),
+ cdecl(typename, ''),
+ funcgen.expr(c_grpptr),
+ funcgen.expr(v_obj),
+ fieldname,
+ funcgen.expr(c_skipoffset)))
+
class AsmGcRootFrameworkGcPolicy(FrameworkGcPolicy):
transformerclass = asmgcroot.AsmGcRootFrameworkGCTransformer
Modified: pypy/trunk/pypy/translator/c/node.py
==============================================================================
--- pypy/trunk/pypy/translator/c/node.py (original)
+++ pypy/trunk/pypy/translator/c/node.py Sun Oct 11 16:32:27 2009
@@ -3,7 +3,7 @@
GcStruct, GcArray, RttiStruct, ContainerType, \
parentlink, Ptr, PyObject, Void, OpaqueType, Float, \
RuntimeTypeInfo, getRuntimeTypeInfo, Char, _subarray
-from pypy.rpython.lltypesystem import llmemory
+from pypy.rpython.lltypesystem import llmemory, llgroup
from pypy.translator.c.funcgen import FunctionCodeGenerator
from pypy.translator.c.external import CExternalFunctionCodeGenerator
from pypy.translator.c.support import USESLOTS # set to False if necessary while refactoring
@@ -67,6 +67,12 @@
bare=True)
self.prefix = somelettersfrom(STRUCT._name) + '_'
self.dependencies = {}
+ #
+ self.fieldnames = STRUCT._names
+ if STRUCT._hints.get('typeptr', False):
+ if db.gcpolicy.need_no_typeptr():
+ assert self.fieldnames == ('typeptr',)
+ self.fieldnames = ()
def setup(self):
# this computes self.fields
@@ -80,7 +86,7 @@
if needs_gcheader(self.STRUCT):
for fname, T in db.gcpolicy.struct_gcheader_definition(self):
self.fields.append((fname, db.gettype(T, who_asks=self)))
- for name in STRUCT._names:
+ for name in self.fieldnames:
T = self.c_struct_field_type(name)
if name == STRUCT._arrayfld:
typename = db.gettype(T, varlength=self.varlength,
@@ -147,8 +153,7 @@
yield line
def visitor_lines(self, prefix, on_field):
- STRUCT = self.STRUCT
- for name in STRUCT._names:
+ for name in self.fieldnames:
FIELD_T = self.c_struct_field_type(name)
cname = self.c_struct_field_name(name)
for line in on_field('%s.%s' % (prefix, cname),
@@ -157,8 +162,7 @@
def debug_offsets(self):
# generate number exprs giving the offset of the elements in the struct
- STRUCT = self.STRUCT
- for name in STRUCT._names:
+ for name in self.fieldnames:
FIELD_T = self.c_struct_field_type(name)
if FIELD_T is Void:
yield '-1'
@@ -464,11 +468,15 @@
return hasattr(self.T, "_hints") and self.T._hints.get('thread_local')
def forward_declaration(self):
+ if llgroup.member_of_group(self.obj):
+ return
yield '%s;' % (
forward_cdecl(self.implementationtypename,
self.name, self.db.standalone, self.is_thread_local()))
def implementation(self):
+ if llgroup.member_of_group(self.obj):
+ return []
lines = list(self.initializationexpr())
lines[0] = '%s = %s' % (
cdecl(self.implementationtypename, self.name, self.is_thread_local()),
@@ -514,7 +522,7 @@
for i, thing in enumerate(self.db.gcpolicy.struct_gcheader_initdata(self)):
data.append(('gcheader%d'%i, thing))
- for name in self.T._names:
+ for name in defnode.fieldnames:
data.append((name, getattr(self.obj, name)))
# Reasonably, you should only initialise one of the fields of a union
@@ -898,6 +906,67 @@
#obj._converted_weakref = container # hack for genllvm :-/
return db.getcontainernode(container, _dont_write_c_code=False)
+class GroupNode(ContainerNode):
+ nodekind = 'group'
+ count_members = None
+
+ def __init__(self, *args):
+ ContainerNode.__init__(self, *args)
+ self.implementationtypename = 'struct group_%s_s @' % self.name
+
+ def basename(self):
+ return self.obj.name
+
+ def enum_dependencies(self):
+ # note: for the group used by the GC, it can grow during this phase,
+ # which means that we might not return all members yet. This is
+ # fixed by finish_tables() in rpython/memory/gctransform/framework.py
+ for member in self.obj.members:
+ yield member._as_ptr()
+
+ def _fix_members(self):
+ if self.obj.outdated:
+ raise Exception(self.obj.outdated)
+ if self.count_members is None:
+ self.count_members = len(self.obj.members)
+ else:
+ # make sure no new member showed up, because it's too late
+ assert len(self.obj.members) == self.count_members
+
+ def forward_declaration(self):
+ self._fix_members()
+ yield ''
+ ctype = ['%s {' % cdecl(self.implementationtypename, '')]
+ for i, member in enumerate(self.obj.members):
+ structtypename = self.db.gettype(typeOf(member))
+ ctype.append('\t%s;' % cdecl(structtypename, 'member%d' % i))
+ ctype.append('} @')
+ ctype = '\n'.join(ctype)
+ yield '%s;' % (
+ forward_cdecl(ctype, self.name, self.db.standalone,
+ self.is_thread_local()))
+ yield '#include "src/llgroup.h"'
+ yield 'PYPY_GROUP_CHECK_SIZE(%s);' % self.name
+ for i, member in enumerate(self.obj.members):
+ structnode = self.db.getcontainernode(member)
+ yield '#define %s %s.member%d' % (structnode.name,
+ self.name, i)
+ yield ''
+
+ def initializationexpr(self):
+ self._fix_members()
+ lines = ['{']
+ lasti = len(self.obj.members) - 1
+ for i, member in enumerate(self.obj.members):
+ structnode = self.db.getcontainernode(member)
+ lines1 = list(structnode.initializationexpr())
+ lines1[0] += '\t/* member%d: %s */' % (i, structnode.name)
+ if i != lasti:
+ lines1[-1] += ','
+ lines.extend(lines1)
+ lines.append('}')
+ return lines
+
ContainerNodeFactory = {
Struct: StructNode,
@@ -909,4 +978,5 @@
OpaqueType: opaquenode_factory,
PyObjectType: PyObjectNode,
llmemory._WeakRefType: weakrefnode_factory,
+ llgroup.GroupType: GroupNode,
}
Modified: pypy/trunk/pypy/translator/c/primitive.py
==============================================================================
--- pypy/trunk/pypy/translator/c/primitive.py (original)
+++ pypy/trunk/pypy/translator/c/primitive.py Sun Oct 11 16:32:27 2009
@@ -3,7 +3,7 @@
from pypy.rlib.objectmodel import CDefinedIntSymbolic
from pypy.rlib.rarithmetic import r_longlong, isinf, isnan
from pypy.rpython.lltypesystem.lltype import *
-from pypy.rpython.lltypesystem import rffi
+from pypy.rpython.lltypesystem import rffi, llgroup
from pypy.rpython.lltypesystem.llmemory import Address, \
AddressOffset, ItemOffset, ArrayItemsOffset, FieldOffset, \
CompositeOffset, ArrayLengthOffset, \
@@ -50,12 +50,15 @@
elif type(value) == GCHeaderOffset:
return '0'
elif type(value) == RoundedUpForAllocation:
- return 'ROUND_UP_FOR_ALLOCATION(%s)' % (
- name_signed(value.basesize, db))
+ return 'ROUND_UP_FOR_ALLOCATION(%s, %s)' % (
+ name_signed(value.basesize, db),
+ name_signed(value.minsize, db))
elif isinstance(value, CDefinedIntSymbolic):
return str(value.expr)
elif isinstance(value, ComputedIntSymbolic):
value = value.compute_fn()
+ elif isinstance(value, llgroup.CombinedSymbolic):
+ return '(%s|%dL)' % (name_ushort(value.lowpart, db), value.rest)
else:
raise Exception("unimplemented symbolic %r"%value)
if value is None:
@@ -136,6 +139,19 @@
else:
return 'NULL'
+def name_ushort(value, db):
+ if isinstance(value, Symbolic):
+ if isinstance(value, llgroup.GroupMemberOffset):
+ groupnode = db.getcontainernode(value.grpptr._as_obj())
+ structnode = db.getcontainernode(value.member._as_obj())
+ return 'GROUP_MEMBER_OFFSET(%s, %s)' % (
+ groupnode.name,
+ structnode.name,
+ )
+ else:
+ raise Exception("unimplemented symbolic %r" % value)
+ return str(value)
+
# On 64 bit machines, SignedLongLong and Signed are the same, so the
# order matters, because we want the Signed implementation.
PrimitiveName = {
@@ -151,6 +167,7 @@
Void: name_void,
Address: name_address,
GCREF: name_gcref,
+ rffi.USHORT: name_ushort,
}
PrimitiveType = {
@@ -166,6 +183,7 @@
Void: 'void @',
Address: 'void* @',
GCREF: 'void* @',
+ rffi.USHORT: 'unsigned short @',
}
def define_c_primitive(ll_type, c_name):
@@ -181,7 +199,7 @@
for ll_type, c_name in [(rffi.SIGNEDCHAR, 'signed char'),
(rffi.UCHAR, 'unsigned char'),
(rffi.SHORT, 'short'),
- (rffi.USHORT, 'unsigned short'),
+ #(rffi.USHORT, 'unsigned short'),
(rffi.INT, 'int'),
(rffi.UINT, 'unsigned int'),
(rffi.LONG, 'long'),
Modified: pypy/trunk/pypy/translator/c/src/g_include.h
==============================================================================
--- pypy/trunk/pypy/translator/c/src/g_include.h (original)
+++ pypy/trunk/pypy/translator/c/src/g_include.h Sun Oct 11 16:32:27 2009
@@ -34,6 +34,7 @@
#ifndef AVR
#include "src/unichar.h"
#endif
+#include "src/llgroup.h"
#include "src/instrument.h"
Modified: pypy/trunk/pypy/translator/c/src/mem.h
==============================================================================
--- pypy/trunk/pypy/translator/c/src/mem.h (original)
+++ pypy/trunk/pypy/translator/c/src/mem.h Sun Oct 11 16:32:27 2009
@@ -14,8 +14,9 @@
struct rpy_memory_alignment_test1 s;
};
#define MEMORY_ALIGNMENT offsetof(struct rpy_memory_alignment_test2, s)
-#define ROUND_UP_FOR_ALLOCATION(x) \
- (((x) + (MEMORY_ALIGNMENT-1)) & ~(MEMORY_ALIGNMENT-1))
+#define ROUND_UP_FOR_ALLOCATION(x, minsize) \
+ ((((x)>=(minsize)?(x):(minsize)) \
+ + (MEMORY_ALIGNMENT-1)) & ~(MEMORY_ALIGNMENT-1))
extern char __gcmapstart;
extern char __gcmapend;
Modified: pypy/trunk/pypy/translator/c/test/test_lltyped.py
==============================================================================
--- pypy/trunk/pypy/translator/c/test/test_lltyped.py (original)
+++ pypy/trunk/pypy/translator/c/test/test_lltyped.py Sun Oct 11 16:32:27 2009
@@ -701,5 +701,74 @@
fn = self.getcompiled(llf)
fn()
+ def test_llgroup(self):
+ from pypy.rpython.lltypesystem.test import test_llgroup
+ f = test_llgroup.build_test()
+ fn = self.getcompiled(f)
+ res = fn()
+ assert res == 42
-
+ def test_llgroup_size_limit(self):
+ yield self._test_size_limit, True
+ yield self._test_size_limit, False
+
+ def _test_size_limit(self, toobig):
+ from pypy.rpython.lltypesystem import llgroup
+ from pypy.rpython.lltypesystem.lloperation import llop
+ from pypy.translator.platform import CompilationError
+ grp = llgroup.group("big")
+ S1 = Struct('S1', ('x', Signed), ('y', Signed),
+ ('z', Signed), ('u', Signed),
+ ('x2', Signed), ('y2', Signed),
+ ('z2', Signed), ('u2', Signed),
+ ('x3', Signed), ('y3', Signed),
+ ('z3', Signed), ('u3', Signed),
+ ('x4', Signed), ('y4', Signed),
+ ('z4', Signed), ('u4', Signed))
+ goffsets = []
+ for i in range(4096 + toobig):
+ goffsets.append(grp.add_member(malloc(S1, immortal=True)))
+ grpptr = grp._as_ptr()
+ def f(n):
+ p = llop.get_group_member(Ptr(S1), grpptr, goffsets[n])
+ q = llop.get_group_member(Ptr(S1), grpptr, goffsets[0])
+ p.x = 5
+ q.x = 666
+ return p.x
+ if toobig:
+ py.test.raises(CompilationError, self.getcompiled, f, [int])
+ else:
+ fn = self.getcompiled(f, [int])
+ res = fn(-1)
+ assert res == 5
+
+ def test_round_up_for_allocation(self):
+ from pypy.rpython.lltypesystem import llmemory, llarena
+ S = Struct('S', ('x', Char), ('y', Char))
+ M = Struct('M', ('x', Char), ('y', Signed))
+ #
+ def g():
+ ssize = llarena.round_up_for_allocation(llmemory.sizeof(S))
+ msize = llarena.round_up_for_allocation(llmemory.sizeof(M))
+ smsize = llarena.round_up_for_allocation(llmemory.sizeof(S),
+ llmemory.sizeof(M))
+ mssize = llarena.round_up_for_allocation(llmemory.sizeof(M),
+ llmemory.sizeof(S))
+ return ssize, msize, smsize, mssize
+ #
+ glob_sizes = g()
+ #
+ def check((ssize, msize, smsize, mssize)):
+ assert ssize == llmemory.sizeof(Signed)
+ assert msize == llmemory.sizeof(Signed) * 2
+ assert smsize == msize
+ assert mssize == msize
+ #
+ def f():
+ check(glob_sizes)
+ check(g())
+ return 42
+ #
+ fn = self.getcompiled(f, [])
+ res = fn()
+ assert res == 42
Modified: pypy/trunk/pypy/translator/c/test/test_newgc.py
==============================================================================
--- pypy/trunk/pypy/translator/c/test/test_newgc.py (original)
+++ pypy/trunk/pypy/translator/c/test/test_newgc.py Sun Oct 11 16:32:27 2009
@@ -16,6 +16,7 @@
class TestUsingFramework(object):
gcpolicy = "marksweep"
should_be_moving = False
+ removetypeptr = False
GC_CAN_MOVE = False
GC_CANNOT_MALLOC_NONMOVABLE = False
@@ -25,6 +26,7 @@
def _makefunc2(cls, f):
t = Translation(f, [int, int], gc=cls.gcpolicy,
policy=annpolicy.StrictAnnotatorPolicy())
+ t.config.translation.gcconfig.removetypeptr = cls.removetypeptr
t.disable(['backendopt'])
t.set_backend_extra_options(c_isolated=True, c_debug_defines=True)
t.rtype()
@@ -796,6 +798,9 @@
def test_gc_set_max_heap_size(self):
py.test.skip("not implemented")
+class TestHybridGCRemoveTypePtr(TestHybridGC):
+ removetypeptr = True
+
class TestMarkCompactGC(TestSemiSpaceGC):
gcpolicy = "markcompact"
should_be_moving = True
More information about the Pypy-commit
mailing list