[pypy-svn] r51439 - in pypy/dist/pypy/rpython/memory: . gc gctransform test
arigo at codespeak.net
arigo at codespeak.net
Wed Feb 13 15:41:07 CET 2008
Author: arigo
Date: Wed Feb 13 15:41:06 2008
New Revision: 51439
Modified:
pypy/dist/pypy/rpython/memory/gc/generation.py
pypy/dist/pypy/rpython/memory/gc/marksweep.py
pypy/dist/pypy/rpython/memory/gc/semispace.py
pypy/dist/pypy/rpython/memory/gctransform/framework.py
pypy/dist/pypy/rpython/memory/gcwrapper.py
pypy/dist/pypy/rpython/memory/support.py
pypy/dist/pypy/rpython/memory/test/test_support.py
Log:
Rename AddressLinkedList to AddressStack everywhere,
and give it a complete stack behavior (the previous
class would ignore append(NULL)).
While I'm at it, here is an AddressDeque too.
Modified: pypy/dist/pypy/rpython/memory/gc/generation.py
==============================================================================
--- pypy/dist/pypy/rpython/memory/gc/generation.py (original)
+++ pypy/dist/pypy/rpython/memory/gc/generation.py Wed Feb 13 15:41:06 2008
@@ -3,6 +3,7 @@
GCFLAG_IMMORTAL
from pypy.rpython.lltypesystem.llmemory import NULL, raw_malloc_usage
from pypy.rpython.lltypesystem import lltype, llmemory, llarena
+from pypy.rpython.memory.support import DEFAULT_CHUNK_SIZE
from pypy.rlib.objectmodel import free_non_gc_object
from pypy.rlib.debug import ll_assert
from pypy.rpython.lltypesystem.lloperation import llop
@@ -30,13 +31,13 @@
needs_write_barrier = True
prebuilt_gc_objects_are_static_roots = False
- def __init__(self, AddressLinkedList,
+ def __init__(self, chunk_size=DEFAULT_CHUNK_SIZE,
nursery_size=128,
min_nursery_size=128,
auto_nursery_size=False,
space_size=4096,
max_space_size=sys.maxint//2+1):
- SemiSpaceGC.__init__(self, AddressLinkedList,
+ SemiSpaceGC.__init__(self, chunk_size = chunk_size,
space_size = space_size,
max_space_size = max_space_size)
assert min_nursery_size <= nursery_size <= space_size // 2
@@ -55,7 +56,7 @@
# of such objects is abused for this linked list; it needs to be
# reset to its correct value when GCFLAG_NO_YOUNG_PTRS is set
# again at the start of a collection.
- self.young_objects_with_weakrefs = self.AddressLinkedList()
+ self.young_objects_with_weakrefs = self.AddressStack()
self.set_nursery_size(self.initial_nursery_size)
# the GC is fully setup now. The rest can make use of it.
if self.auto_nursery_size:
Modified: pypy/dist/pypy/rpython/memory/gc/marksweep.py
==============================================================================
--- pypy/dist/pypy/rpython/memory/gc/marksweep.py (original)
+++ pypy/dist/pypy/rpython/memory/gc/marksweep.py Wed Feb 13 15:41:06 2008
@@ -1,7 +1,8 @@
from pypy.rpython.lltypesystem.llmemory import raw_malloc, raw_free
from pypy.rpython.lltypesystem.llmemory import raw_memcopy, raw_memclear
from pypy.rpython.lltypesystem.llmemory import NULL, raw_malloc_usage
-from pypy.rpython.memory.support import get_address_linked_list
+from pypy.rpython.memory.support import DEFAULT_CHUNK_SIZE
+from pypy.rpython.memory.support import get_address_stack
from pypy.rpython.memory.gcheader import GCHeaderBuilder
from pypy.rpython.lltypesystem import lltype, llmemory
from pypy.rlib.objectmodel import free_non_gc_object
@@ -38,12 +39,12 @@
POOLNODE.become(lltype.Struct('gc_pool_node', ('linkedlist', HDRPTR),
('nextnode', POOLNODEPTR)))
- def __init__(self, AddressLinkedList, start_heap_size=4096):
+ def __init__(self, chunk_size=DEFAULT_CHUNK_SIZE, start_heap_size=4096):
self.heap_usage = 0 # at the end of the latest collection
self.bytes_malloced = 0 # since the latest collection
self.bytes_malloced_threshold = start_heap_size
self.total_collection_time = 0.0
- self.AddressLinkedList = AddressLinkedList
+ self.AddressStack = get_address_stack(chunk_size)
self.malloced_objects = lltype.nullptr(self.HDR)
self.malloced_objects_with_finalizer = lltype.nullptr(self.HDR)
# these are usually only the small bits of memory that make a
@@ -226,7 +227,7 @@
## size_gc_header)
# push the roots on the mark stack
- objects = self.AddressLinkedList() # mark stack
+ objects = self.AddressStack() # mark stack
self._mark_stack = objects
self.root_walker.walk_roots(
MarkSweepGC._mark_root, # stack roots
@@ -477,7 +478,9 @@
self.trace(obj, self._add_reachable, objects)
def _add_reachable(pointer, objects):
- objects.append(pointer.address[0])
+ obj = pointer.address[0]
+ if obj:
+ objects.append(obj)
_add_reachable = staticmethod(_add_reachable)
def statistics(self, index):
@@ -558,7 +561,7 @@
curpool = self.x_swap_pool(lltype.nullptr(X_POOL))
size_gc_header = self.gcheaderbuilder.size_gc_header
- oldobjects = self.AddressLinkedList()
+ oldobjects = self.AddressStack()
# if no pool specified, use the current pool as the 'source' pool
oldpool = clonedata.pool or curpool
oldpool = lltype.cast_opaque_ptr(self.POOLPTR, oldpool)
@@ -576,7 +579,7 @@
# a stack of addresses of places that still points to old objects
# and that must possibly be fixed to point to a new copy
- stack = self.AddressLinkedList()
+ stack = self.AddressStack()
stack.append(llmemory.cast_ptr_to_adr(clonedata)
+ llmemory.offsetof(X_CLONE, 'gcobjectptr'))
while stack.non_empty():
@@ -679,9 +682,11 @@
self.trace(obj, self._add_reachable_and_rename, objects)
def _add_reachable_and_rename(self, pointer, objects):
- if pointer.address[0] == self.x_become_target_addr:
- pointer.address[0] = self.x_become_source_addr
- objects.append(pointer.address[0])
+ obj = pointer.address[0]
+ if obj:
+ if obj == self.x_become_target_addr:
+ obj = pointer.address[0] = self.x_become_source_addr
+ objects.append(obj)
def x_become(self, target_addr, source_addr):
# 1. mark from the roots, and also the objects that objects-with-del
@@ -699,7 +704,7 @@
## size_gc_header)
# push the roots on the mark stack
- objects = self.AddressLinkedList() # mark stack
+ objects = self.AddressStack() # mark stack
self._mark_stack = objects
# the last sweep did not clear the mark bit of static roots,
# since they are not in the malloced_objects list
@@ -843,8 +848,8 @@
_alloc_flavor_ = "raw"
COLLECT_EVERY = 2000
- def __init__(self, AddressLinkedList, start_heap_size=4096):
- MarkSweepGC.__init__(self, AddressLinkedList, start_heap_size)
+ def __init__(self, chunk_size=DEFAULT_CHUNK_SIZE, start_heap_size=4096):
+ MarkSweepGC.__init__(self, chunk_size, start_heap_size)
self.count_mallocs = 0
def write_malloc_statistics(self, typeid, size, result, varsize):
@@ -1021,7 +1026,7 @@
## size_gc_header)
# push the roots on the mark stack
- objects = self.AddressLinkedList() # mark stack
+ objects = self.AddressStack() # mark stack
self._mark_stack = objects
self.root_walker.walk_roots(
MarkSweepGC._mark_root, # stack roots
Modified: pypy/dist/pypy/rpython/memory/gc/semispace.py
==============================================================================
--- pypy/dist/pypy/rpython/memory/gc/semispace.py (original)
+++ pypy/dist/pypy/rpython/memory/gc/semispace.py Wed Feb 13 15:41:06 2008
@@ -1,7 +1,8 @@
from pypy.rpython.lltypesystem.llmemory import raw_malloc, raw_free
from pypy.rpython.lltypesystem.llmemory import raw_memcopy, raw_memclear
from pypy.rpython.lltypesystem.llmemory import NULL, raw_malloc_usage
-from pypy.rpython.memory.support import get_address_linked_list
+from pypy.rpython.memory.support import DEFAULT_CHUNK_SIZE
+from pypy.rpython.memory.support import get_address_stack
from pypy.rpython.memory.gcheader import GCHeaderBuilder
from pypy.rpython.lltypesystem import lltype, llmemory, llarena
from pypy.rlib.objectmodel import free_non_gc_object
@@ -27,13 +28,13 @@
HDR = lltype.Struct('header', ('forw', llmemory.Address),
('tid', lltype.Signed))
- def __init__(self, AddressLinkedList, space_size=4096,
+ def __init__(self, chunk_size=DEFAULT_CHUNK_SIZE, space_size=4096,
max_space_size=sys.maxint//2+1):
MovingGCBase.__init__(self)
self.space_size = space_size
self.max_space_size = max_space_size
self.gcheaderbuilder = GCHeaderBuilder(self.HDR)
- self.AddressLinkedList = AddressLinkedList
+ self.AddressStack = get_address_stack(chunk_size)
def setup(self):
self.tospace = llarena.arena_malloc(self.space_size, True)
@@ -42,9 +43,9 @@
self.fromspace = llarena.arena_malloc(self.space_size, True)
ll_assert(bool(self.fromspace), "couldn't allocate fromspace")
self.free = self.tospace
- self.objects_with_finalizers = self.AddressLinkedList()
- self.run_finalizers = self.AddressLinkedList()
- self.objects_with_weakrefs = self.AddressLinkedList()
+ self.objects_with_finalizers = self.AddressStack()
+ self.run_finalizers = self.AddressStack()
+ self.objects_with_weakrefs = self.AddressStack()
self.finalizer_lock_count = 0
self.red_zone = 0
@@ -312,7 +313,7 @@
# if it is not copied, add it to the list of to-be-called finalizers
# and copy it, to me make the finalizer runnable
# NOTE: the caller is calling scan_copied, so no need to do it here
- new_with_finalizer = self.AddressLinkedList()
+ new_with_finalizer = self.AddressStack()
while self.objects_with_finalizers.non_empty():
obj = self.objects_with_finalizers.pop()
if self.is_forwarded(obj):
@@ -326,7 +327,7 @@
# walk over list of objects that contain weakrefs
# if the object it references survives then update the weakref
# otherwise invalidate the weakref
- new_with_weakref = self.AddressLinkedList()
+ new_with_weakref = self.AddressStack()
while self.objects_with_weakrefs.non_empty():
obj = self.objects_with_weakrefs.pop()
if not self.is_forwarded(obj):
@@ -348,7 +349,7 @@
def update_run_finalizers(self):
# we are in an inner collection, caused by a finalizer
# the run_finalizers objects need to be copied
- new_run_finalizer = self.AddressLinkedList()
+ new_run_finalizer = self.AddressStack()
while self.run_finalizers.non_empty():
obj = self.run_finalizers.pop()
new_run_finalizer.append(self.copy(obj))
Modified: pypy/dist/pypy/rpython/memory/gctransform/framework.py
==============================================================================
--- pypy/dist/pypy/rpython/memory/gctransform/framework.py (original)
+++ pypy/dist/pypy/rpython/memory/gctransform/framework.py Wed Feb 13 15:41:06 2008
@@ -97,10 +97,8 @@
root_stack_depth = 163840
def __init__(self, translator):
- from pypy.rpython.memory.support import get_address_linked_list
from pypy.rpython.memory.gc.base import choose_gc_from_config
super(FrameworkGCTransformer, self).__init__(translator, inline=True)
- AddressLinkedList = get_address_linked_list()
if hasattr(self, 'GC_PARAMS'):
# for tests: the GC choice can be specified as class attributes
from pypy.rpython.memory.gc.marksweep import MarkSweepGC
@@ -131,7 +129,7 @@
self.gcdata = gcdata
self.malloc_fnptr_cache = {}
- gcdata.gc = GCClass(AddressLinkedList, **GC_PARAMS)
+ gcdata.gc = GCClass(**GC_PARAMS)
root_walker = self.build_root_walker()
gcdata.set_query_functions(gcdata.gc)
gcdata.gc.set_root_walker(root_walker)
Modified: pypy/dist/pypy/rpython/memory/gcwrapper.py
==============================================================================
--- pypy/dist/pypy/rpython/memory/gcwrapper.py (original)
+++ pypy/dist/pypy/rpython/memory/gcwrapper.py Wed Feb 13 15:41:06 2008
@@ -1,7 +1,6 @@
from pypy.rpython.lltypesystem import lltype, llmemory, llheap
from pypy.rpython import llinterp
from pypy.rpython.annlowlevel import llhelper
-from pypy.rpython.memory.support import get_address_linked_list
from pypy.rpython.memory import gctypelayout
from pypy.objspace.flow.model import Constant
@@ -9,8 +8,7 @@
class GCManagedHeap(object):
def __init__(self, llinterp, flowgraphs, gc_class, GC_PARAMS={}):
- self.AddressLinkedList = get_address_linked_list(10)
- self.gc = gc_class(self.AddressLinkedList, **GC_PARAMS)
+ self.gc = gc_class(chunk_size = 10, **GC_PARAMS)
self.gc.set_root_walker(LLInterpRootWalker(self))
self.llinterp = llinterp
self.prepare_graphs(flowgraphs)
Modified: pypy/dist/pypy/rpython/memory/support.py
==============================================================================
--- pypy/dist/pypy/rpython/memory/support.py (original)
+++ pypy/dist/pypy/rpython/memory/support.py Wed Feb 13 15:41:06 2008
@@ -1,14 +1,19 @@
from pypy.rpython.lltypesystem import lltype, llmemory
-from pypy.rlib.objectmodel import free_non_gc_object
+from pypy.rlib.objectmodel import free_non_gc_object, we_are_translated
+from pypy.rlib.debug import ll_assert
DEFAULT_CHUNK_SIZE = 1019
-def get_address_linked_list(chunk_size=DEFAULT_CHUNK_SIZE):
+
+def get_chunk_manager(chunk_size=DEFAULT_CHUNK_SIZE, cache={}):
+ try:
+ return cache[chunk_size]
+ except KeyError:
+ pass
CHUNK = lltype.ForwardReference()
- CHUNK.become(lltype.Struct('AddressLinkedListChunk',
- ('previous', lltype.Ptr(CHUNK)),
- ('length', lltype.Signed),
+ CHUNK.become(lltype.Struct('AddressChunk',
+ ('next', lltype.Ptr(CHUNK)),
('items', lltype.FixedSizeArray(
llmemory.Address, chunk_size))))
null_chunk = lltype.nullptr(CHUNK)
@@ -24,67 +29,150 @@
return lltype.malloc(CHUNK, flavor="raw")
result = self.free_list
- self.free_list = result.previous
+ self.free_list = result.next
return result
def put(self, chunk):
- chunk.previous = self.free_list
- self.free_list = chunk
+ if we_are_translated():
+ chunk.next = self.free_list
+ self.free_list = chunk
+ else:
+ # Don't cache the old chunks but free them immediately.
+ # Helps debugging, and avoids that old chunks full of
+ # addresses left behind by a test end up in genc...
+ lltype.free(chunk, flavor="raw")
unused_chunks = FreeList()
+ cache[chunk_size] = unused_chunks, null_chunk
+ return unused_chunks, null_chunk
+
+
+def get_address_stack(chunk_size=DEFAULT_CHUNK_SIZE, cache={}):
+ try:
+ return cache[chunk_size]
+ except KeyError:
+ pass
+
+ unused_chunks, null_chunk = get_chunk_manager(chunk_size)
- class AddressLinkedList(object):
+ class AddressStack(object):
_alloc_flavor_ = "raw"
def __init__(self):
self.chunk = unused_chunks.get()
- self.chunk.previous = null_chunk
- self.chunk.length = 0
+ self.chunk.next = null_chunk
+ self.used_in_last_chunk = 0
+ # invariant: self.used_in_last_chunk == 0 if and only if
+ # the AddressStack is empty
def enlarge(self):
new = unused_chunks.get()
- new.previous = self.chunk
- new.length = 0
+ new.next = self.chunk
self.chunk = new
- return new
+ self.used_in_last_chunk = 0
enlarge._dont_inline_ = True
def shrink(self):
old = self.chunk
- self.chunk = old.previous
+ self.chunk = old.next
unused_chunks.put(old)
- return self.chunk
+ self.used_in_last_chunk = chunk_size
shrink._dont_inline_ = True
def append(self, addr):
- if addr == llmemory.NULL:
- return
- chunk = self.chunk
- if chunk.length == chunk_size:
- chunk = self.enlarge()
- used_chunks = chunk.length
- chunk.length = used_chunks + 1
- chunk.items[used_chunks] = addr
+ used = self.used_in_last_chunk
+ if used == chunk_size:
+ self.enlarge()
+ used = 0
+ self.chunk.items[used] = addr
+ self.used_in_last_chunk = used + 1 # always > 0 here
def non_empty(self):
- chunk = self.chunk
- return chunk.length != 0 or bool(chunk.previous)
+ return self.used_in_last_chunk != 0
def pop(self):
- chunk = self.chunk
- if chunk.length == 0:
- chunk = self.shrink()
- used_chunks = self.chunk.length - 1
- result = chunk.items[used_chunks]
- chunk.length = used_chunks
+ used = self.used_in_last_chunk - 1
+ ll_assert(used >= 0, "pop on empty AddressStack")
+ result = self.chunk.items[used]
+ self.used_in_last_chunk = used
+ if used == 0 and self.chunk.next:
+ self.shrink()
return result
def delete(self):
cur = self.chunk
while cur:
- prev = cur.previous
+ next = cur.next
+ unused_chunks.put(cur)
+ cur = next
+ free_non_gc_object(self)
+
+ cache[chunk_size] = AddressStack
+ return AddressStack
+
+
+def get_address_deque(chunk_size=DEFAULT_CHUNK_SIZE, cache={}):
+ try:
+ return cache[chunk_size]
+ except KeyError:
+ pass
+
+ unused_chunks, null_chunk = get_chunk_manager(chunk_size)
+
+ class AddressDeque(object):
+ _alloc_flavor_ = "raw"
+
+ def __init__(self):
+ chunk = unused_chunks.get()
+ chunk.next = null_chunk
+ self.oldest_chunk = self.newest_chunk = chunk
+ self.index_in_oldest = 0
+ self.index_in_newest = 0
+
+ def enlarge(self):
+ new = unused_chunks.get()
+ new.next = null_chunk
+ self.newest_chunk.next = new
+ self.newest_chunk = new
+ self.index_in_newest = 0
+ enlarge._dont_inline_ = True
+
+ def shrink(self):
+ old = self.oldest_chunk
+ self.oldest_chunk = old.next
+ unused_chunks.put(old)
+ self.index_in_oldest = 0
+ shrink._dont_inline_ = True
+
+ def append(self, addr):
+ index = self.index_in_newest
+ if index == chunk_size:
+ self.enlarge()
+ index = 0
+ self.newest_chunk.items[index] = addr
+ self.index_in_newest = index + 1
+
+ def non_empty(self):
+ return (self.oldest_chunk != self.newest_chunk
+ or self.index_in_oldest < self.index_in_newest)
+
+ def popleft(self):
+ ll_assert(self.non_empty(), "pop on empty AddressDeque")
+ index = self.index_in_oldest
+ if index == chunk_size:
+ self.shrink()
+ index = 0
+ result = self.oldest_chunk.items[index]
+ self.index_in_oldest = index + 1
+ return result
+
+ def delete(self):
+ cur = self.oldest_chunk
+ while cur:
+ next = cur.next
unused_chunks.put(cur)
- cur = prev
+ cur = next
free_non_gc_object(self)
- return AddressLinkedList
+ cache[chunk_size] = AddressDeque
+ return AddressDeque
Modified: pypy/dist/pypy/rpython/memory/test/test_support.py
==============================================================================
--- pypy/dist/pypy/rpython/memory/test/test_support.py (original)
+++ pypy/dist/pypy/rpython/memory/test/test_support.py Wed Feb 13 15:41:06 2008
@@ -1,17 +1,18 @@
from pypy.rlib.objectmodel import free_non_gc_object
-from pypy.rpython.memory.support import get_address_linked_list
+from pypy.rpython.memory.support import get_address_stack
+from pypy.rpython.memory.support import get_address_deque
from pypy.rpython.test.test_llinterp import interpret
from pypy.rpython.lltypesystem import lltype, llmemory
from pypy.rpython.lltypesystem.llmemory import raw_malloc, raw_free, NULL
-class TestAddressLinkedList(object):
+class TestAddressStack(object):
def test_simple_access(self):
- AddressLinkedList = get_address_linked_list()
+ AddressStack = get_address_stack()
addr0 = raw_malloc(llmemory.sizeof(lltype.Signed))
addr1 = raw_malloc(llmemory.sizeof(lltype.Signed))
addr2 = raw_malloc(llmemory.sizeof(lltype.Signed))
- ll = AddressLinkedList()
+ ll = AddressStack()
ll.append(addr0)
ll.append(addr1)
ll.append(addr2)
@@ -27,20 +28,23 @@
assert not ll.non_empty()
ll.append(addr0)
ll.delete()
- ll = AddressLinkedList()
+ ll = AddressStack()
ll.append(addr0)
ll.append(addr1)
ll.append(addr2)
+ ll.append(NULL)
+ a = ll.pop()
+ assert a == NULL
ll.delete()
raw_free(addr2)
raw_free(addr1)
raw_free(addr0)
def test_big_access(self):
- AddressLinkedList = get_address_linked_list()
+ AddressStack = get_address_stack()
addrs = [raw_malloc(llmemory.sizeof(lltype.Signed))
for i in range(3000)]
- ll = AddressLinkedList()
+ ll = AddressStack()
for i in range(3000):
print i
ll.append(addrs[i])
@@ -57,12 +61,32 @@
for addr in addrs:
raw_free(addr)
-def test_linked_list_annotate():
- AddressLinkedList = get_address_linked_list(60)
+
+class TestAddressDeque:
+ def test_big_access(self):
+ import random
+ AddressDeque = get_address_deque(10)
+ deque = AddressDeque()
+ expected = []
+ for i in range(3000):
+ assert deque.non_empty() == (len(expected) > 0)
+ r = random.random()
+ if r < 0.51 and expected:
+ x = deque.popleft()
+ y = expected.pop(0)
+ assert x == y
+ else:
+ x = raw_malloc(llmemory.sizeof(lltype.Signed))
+ deque.append(x)
+ expected.append(x)
+
+
+def test_stack_annotate():
+ AddressStack = get_address_stack(60)
INT_SIZE = llmemory.sizeof(lltype.Signed)
def f():
addr = raw_malloc(INT_SIZE*100)
- ll = AddressLinkedList()
+ ll = AddressStack()
ll.append(addr)
ll.append(addr + INT_SIZE*1)
ll.append(addr + INT_SIZE*2)
@@ -86,7 +110,7 @@
a = ll.pop()
res = res and (a - INT_SIZE*i == addr)
ll.delete()
- ll = AddressLinkedList()
+ ll = AddressStack()
ll.append(addr)
ll.append(addr + INT_SIZE*1)
ll.append(addr + INT_SIZE*2)
@@ -95,6 +119,6 @@
return res
assert f()
- AddressLinkedList = get_address_linked_list()
+ AddressStack = get_address_stack()
res = interpret(f, [], malloc_check=False)
assert res
More information about the Pypy-commit
mailing list