[pypy-svn] r77081 - in pypy/branch/gen2-gc/pypy/rpython/memory/gc: . test
arigo at codespeak.net
arigo at codespeak.net
Wed Sep 15 11:51:45 CEST 2010
Author: arigo
Date: Wed Sep 15 11:51:44 2010
New Revision: 77081
Modified:
pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimarkpage.py
pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py
Log:
Start to work on mass_free().
Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimarkpage.py
==============================================================================
--- pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimarkpage.py (original)
+++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/minimarkpage.py Wed Sep 15 11:51:44 2010
@@ -27,11 +27,11 @@
PAGE_PTR = lltype.Ptr(lltype.ForwardReference())
PAGE_HEADER = lltype.Struct('PageHeader',
- # -- The following two pointers make a chained list of pages with the same
- # size class. Warning, 'prevpage' contains random garbage for the first
- # entry in the list.
+ # -- The following pointer makes a chained list of pages. For non-full
+ # pages, it is a chained list of pages having the same size class,
+ # rooted in 'page_for_size[size_class]'. For full pages, it is a
+ # different chained list rooted in 'full_page_for_size[size_class]'.
('nextpage', PAGE_PTR),
- ('prevpage', PAGE_PTR),
# -- The number of free blocks, and the number of uninitialized blocks.
# The number of allocated blocks is the rest.
('nuninitialized', lltype.Signed),
@@ -61,11 +61,13 @@
length = small_request_threshold / WORD + 1
self.page_for_size = lltype.malloc(rffi.CArray(PAGE_PTR), length,
flavor='raw', zero=True)
+ self.full_page_for_size = lltype.malloc(rffi.CArray(PAGE_PTR), length,
+ flavor='raw', zero=True)
self.nblocks_for_size = lltype.malloc(rffi.CArray(lltype.Signed),
length, flavor='raw')
- hdrsize = llmemory.raw_malloc_usage(llmemory.sizeof(PAGE_HEADER))
+ self.hdrsize = llmemory.raw_malloc_usage(llmemory.sizeof(PAGE_HEADER))
for i in range(1, length):
- self.nblocks_for_size[i] = (page_size - hdrsize) // (WORD * i)
+ self.nblocks_for_size[i] = (page_size - self.hdrsize) // (WORD * i)
#
self.uninitialized_pages = PAGE_NULL
self.num_uninitialized_pages = 0
@@ -91,7 +93,7 @@
#
# The 'result' was part of the chained list; read the next.
page.nfree -= 1
- page.freeblock = result.address[0]
+ freeblock = result.address[0]
llarena.arena_reset(result,
llmemory.sizeof(llmemory.Address),
False)
@@ -100,13 +102,19 @@
# The 'result' is part of the uninitialized blocks.
ll_assert(page.nuninitialized > 0,
"fully allocated page found in the page_for_size list")
- page.freeblock = result + nsize
page.nuninitialized -= 1
- if page.nuninitialized == 0:
- #
- # This was the last free block, so unlink the page from the
- # chained list.
- self.page_for_size[size_class] = page.nextpage
+ if page.nuninitialized > 0:
+ freeblock = result + nsize
+ else:
+ freeblock = NULL
+ #
+ page.freeblock = freeblock
+ if freeblock == NULL:
+ # This was the last free block, so unlink the page from the
+ # chained list and put it in the 'full_page_for_size' list.
+ self.page_for_size[size_class] = page.nextpage
+ page.nextpage = self.full_page_for_size[size_class]
+ self.full_page_for_size[size_class] = page
#
llarena.arena_reserve(result, _dummy_size(size), False)
return result
@@ -133,10 +141,9 @@
llarena.arena_reserve(page, llmemory.sizeof(PAGE_HEADER))
result = llmemory.cast_adr_to_ptr(page, PAGE_PTR)
#
- hdrsize = llmemory.raw_malloc_usage(llmemory.sizeof(PAGE_HEADER))
result.nuninitialized = self.nblocks_for_size[size_class]
result.nfree = 0
- result.freeblock = page + hdrsize
+ result.freeblock = page + self.hdrsize
result.nextpage = PAGE_NULL
ll_assert(self.page_for_size[size_class] == PAGE_NULL,
"allocate_new_page() called but a page is already waiting")
@@ -171,6 +178,72 @@
allocate_new_arena._dont_inline_ = True
+ def mass_free(self, ok_to_free_func):
+ """For each object, if ok_to_free_func(obj) returns True, then free
+ the object.
+ """
+ #
+ # For each size class:
+ size_class = self.small_request_threshold / WORD
+ while size_class >= 1:
+ #
+ # Walk the pages in 'page_for_size[size_class]' and free objects.
+ # Pages completely freed are added to 'self.free_pages', and
+ # become available for reuse by any size class. Pages not
+ # completely freed are re-chained in 'newlist'.
+ newlist = self.mass_free_in_list(self.page_for_size[size_class],
+ size_class, ok_to_free_func)
+ self.page_for_size[size_class] = newlist
+ #
+ size_class -= 1
+
+
+ def mass_free_in_list(self, page, size_class, ok_to_free_func):
+ remaining_list = PAGE_NULL
+ nblocks = self.nblocks_for_size[size_class]
+ block_size = size_class * WORD
+ #
+ while page != PAGE_NULL:
+ self.walk_page(page, block_size, nblocks, ok_to_free_func)
+ page = page.nextpage
+ #
+ return remaining_list
+
+
+ def walk_page(self, page, block_size, nblocks, ok_to_free_func):
+ """Walk over all objects in a page, and ask ok_to_free_func()."""
+ #
+ freeblock = page.freeblock
+ obj = llarena.getfakearenaaddress(llmemory.cast_ptr_to_adr(page))
+ obj += self.hdrsize
+ surviving_count = 0
+ #
+ nblocks -= page.nuninitialized
+ while nblocks > 0:
+ #
+ if obj == freeblock:
+ #
+ # 'obj' points to a free block.
+ freeblock = obj.address[0]
+ #
+ else:
+ # 'obj' points to a valid object.
+ ll_assert(not freeblock or freeblock > obj,
+ "freeblocks are linked out of order")
+ #
+ if ok_to_free_func(obj):
+ xxx
+ else:
+ # The object should survive.
+ surviving_count += 1
+ #
+ obj += block_size
+ nblocks -= 1
+ #
+ # Return the number of objects left
+ return surviving_count
+
+
def free(self, obj, size):
"""Free a previously malloc'ed block."""
ll_assert(size > 0, "free: size is null or negative")
Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py
==============================================================================
--- pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py (original)
+++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_minimarkpage.py Wed Sep 15 11:51:44 2010
@@ -2,6 +2,7 @@
from pypy.rpython.memory.gc.minimarkpage import ArenaCollection
from pypy.rpython.memory.gc.minimarkpage import PAGE_HEADER, PAGE_PTR
from pypy.rpython.memory.gc.minimarkpage import PAGE_NULL, WORD
+from pypy.rpython.memory.gc.minimarkpage import _dummy_size
from pypy.rpython.lltypesystem import lltype, llmemory, llarena
from pypy.rpython.lltypesystem.llmemory import cast_ptr_to_adr
@@ -57,7 +58,7 @@
assert ac.page_for_size[4] == page
-def arena_collection_for_test(pagesize, pagelayout):
+def arena_collection_for_test(pagesize, pagelayout, fill_with_objects=False):
assert " " not in pagelayout.rstrip(" ")
nb_pages = len(pagelayout)
arenasize = pagesize * (nb_pages + 1) - 1
@@ -69,10 +70,16 @@
page.nfree = 0
page.nuninitialized = nblocks - nusedblocks
page.freeblock = pageaddr + hdrsize + nusedblocks * size_block
- page.nextpage = ac.page_for_size[size_class]
- ac.page_for_size[size_class] = page
- if page.nextpage:
- page.nextpage.prevpage = page
+ if nusedblocks < nblocks:
+ chainedlists = ac.page_for_size
+ else:
+ chainedlists = ac.full_page_for_size
+ page.nextpage = chainedlists[size_class]
+ chainedlists[size_class] = page
+ if fill_with_objects:
+ for i in range(nusedblocks):
+ objaddr = pageaddr + hdrsize + i * size_block
+ llarena.arena_reserve(objaddr, _dummy_size(size_block))
#
ac.allocate_new_arena()
num_initialized_pages = len(pagelayout.rstrip(" "))
@@ -81,20 +88,22 @@
ac.num_uninitialized_pages -= num_initialized_pages
#
for i in reversed(range(num_initialized_pages)):
+ pageaddr = pagenum(ac, i)
c = pagelayout[i]
if '1' <= c <= '9': # a partially used page (1 block free)
size_class = int(c)
size_block = WORD * size_class
nblocks = (pagesize - hdrsize) // size_block
- link(pagenum(ac, i), size_class, size_block,
- nblocks, nblocks-1)
+ link(pageaddr, size_class, size_block, nblocks, nblocks-1)
elif c == '.': # a free, but initialized, page
- pageaddr = pagenum(ac, i)
llarena.arena_reserve(pageaddr, llmemory.sizeof(llmemory.Address))
pageaddr.address[0] = ac.free_pages
ac.free_pages = pageaddr
- elif c == '#': # a random full page, not in any linked list
- pass
+ elif c == '#': # a random full page, in the list 'full_pages'
+ size_class = fill_with_objects or 1
+ size_block = WORD * size_class
+ nblocks = (pagesize - hdrsize) // size_block
+ link(pageaddr, size_class, size_block, nblocks, nblocks)
#
ac.allocate_new_arena = lambda: should_not_allocate_new_arenas
return ac
@@ -187,3 +196,24 @@
- 1 # for start_of_page()
- 1 # the just-allocated page
)
+
+class OkToFree(object):
+ def __init__(self, ac, answer):
+ self.ac = ac
+ self.answer = answer
+ self.seen = []
+
+ def __call__(self, addr):
+ self.seen.append(addr - self.ac._startpageaddr)
+ if isinstance(self.answer, bool):
+ return self.answer
+ else:
+ return self.answer(addr)
+
+def test_mass_free_partial_remains():
+ pagesize = hdrsize + 7*WORD
+ ac = arena_collection_for_test(pagesize, "2", fill_with_objects=2)
+ ok_to_free = OkToFree(ac, False)
+ ac.mass_free(ok_to_free)
+ assert ok_to_free.seen == [hdrsize + 0*WORD,
+ hdrsize + 2*WORD]
More information about the Pypy-commit
mailing list