[pypy-svn] r77043 - in pypy/branch/gen2-gc/pypy/rpython/memory/gc: . test

arigo at codespeak.net arigo at codespeak.net
Mon Sep 13 17:08:09 CEST 2010


Author: arigo
Date: Mon Sep 13 17:08:07 2010
New Revision: 77043

Modified:
   pypy/branch/gen2-gc/pypy/rpython/memory/gc/gen2.py
   pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_direct.py
   pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_gen2.py
Log:
Progress.  Tests are lagging behind the code :-/


Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/gen2.py
==============================================================================
--- pypy/branch/gen2-gc/pypy/rpython/memory/gc/gen2.py	(original)
+++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/gen2.py	Mon Sep 13 17:08:07 2010
@@ -1,10 +1,12 @@
-from pypy.rpython.lltypesystem import lltype, llarena
+from pypy.rpython.lltypesystem import lltype, llmemory, llarena, rffi
 from pypy.rpython.memory.gc.base import MovingGCBase
 from pypy.rpython.memory.support import DEFAULT_CHUNK_SIZE
 from pypy.rlib.rarithmetic import LONG_BIT
 from pypy.rlib.objectmodel import we_are_translated
+from pypy.rlib.debug import ll_assert
 
 WORD = LONG_BIT // 8
+NULL = llmemory.NULL
 
 first_gcflag = 1 << (LONG_BIT//2)
 GCFLAG_BIG   = first_gcflag
@@ -56,6 +58,10 @@
 
 # ____________________________________________________________
 
+# Terminology: Arenas are collection of pages; both are fixed-size.
+# A page contains a number of allocated objects, called "blocks".
+
+
 class Arena(object):
     _alloc_flavor_ = "raw"
 
@@ -67,12 +73,155 @@
         self.arena_base = llarena.arena_malloc(self.arena_size, False)
         if not self.arena_base:
             raise MemoryError("couldn't allocate the next arena")
-        # 'freepages' points to the first unused page
-        self.freepages = start_of_page(self.arena_base + page_size - 1,
-                                       page_size)
+        # 'freepage' points to the first unused page
         # 'nfreepages' is the number of unused pages
+        self.freepage = start_of_page(self.arena_base + page_size - 1,
+                                      page_size)
         arena_end = self.arena_base + self.arena_size
-        self.nfreepages = (arena_end - self.freepages) / page_size
+        self.nfreepages = (arena_end - self.freepage) // page_size
+        self.nuninitializedpages = self.nfreepages
+        #
+        # The arenas containing at least one free page are linked in a
+        # doubly-linked list.  We keep this chained list in order: it
+        # starts with the arenas with the most number of allocated
+        # pages, so that the least allocated arenas near the end of the
+        # list have a chance to become completely empty and be freed.
+        self.nextarena = None
+        self.prevarena = None
+
+
+# Each initialized page in the arena starts with a PAGE_HEADER.  The
+# arena typically also contains uninitialized pages at the end.
+# Similarily, each page contains blocks of a given size, which can be
+# either allocated or freed, and a number of free blocks at the end of
+# the page are uninitialized.  The free but initialized blocks contain a
+# pointer to the next free block, forming a chained list.
+
+PAGE_PTR = lltype.Ptr(lltype.ForwardReference())
+PAGE_HEADER = lltype.Struct('page_header',
+    ('nfree', lltype.Signed),   # number of free blocks in this page
+    ('nuninitialized', lltype.Signed),   # num. uninitialized blocks (<= nfree)
+    ('freeblock', llmemory.Address),  # first free block, chained list
+    ('prevpage', PAGE_PTR),  # chained list of pages with the same size class
+    )
+PAGE_PTR.TO.become(PAGE_HEADER)
+PAGE_NULL = lltype.nullptr(PAGE_HEADER)
+
+
+class ArenaCollection(object):
+    _alloc_flavor_ = "raw"
+
+    def __init__(self, arena_size, page_size, small_request_threshold):
+        self.arena_size = arena_size
+        self.page_size = page_size
+        #
+        # 'pageaddr_for_size': for each size N between WORD and
+        # small_request_threshold (included), contains either NULL or
+        # a pointer to a page that has room for at least one more
+        # allocation of the given size.
+        length = small_request_threshold / WORD + 1
+        self.page_for_size = lltype.malloc(rffi.CArray(PAGE_PTR), length,
+                                           flavor='raw', zero=True)
+        self.arenas_start = None   # the most allocated (but not full) arena
+        self.arenas_end   = None   # the least allocated (but not empty) arena
+
+
+    def malloc(self, size):
+        """Allocate a block from a page in an arena."""
+        ll_assert(size > 0, "malloc: size is null or negative")
+        ll_assert(size <= self.small_request_threshold, "malloc: size too big")
+        ll_assert((size & (WORD-1)) == 0, "malloc: size is not aligned")
+        #
+        # Get the page to use from the size
+        size_class = size / WORD
+        page = self.page_for_size[size_class]
+        if page == PAGE_NULL:
+            page = self.allocate_new_page(size_class)
+        #
+        # The result is simply 'page.freeblock'
+        ll_assert(page.nfree > 0, "page_for_size lists a page with nfree <= 0")
+        result = page.freeblock
+        page.nfree -= 1
+        if page.nfree == 0:
+            #
+            # This was the last free block, so unlink the page from the
+            # chained list.
+            self.page_for_size[size_class] = page.prevpage
+            #
+        else:
+            # This was not the last free block, so update 'page.freeblock'
+            # to point to the next free block.  Two cases here...
+            if page.nfree < page.nuninitialized:
+                # The 'result' was not initialized at all.  We must compute
+                # the next free block by adding 'size' to 'page.freeblock'.
+                page.freeblock = result + size
+                page.nuninitialized -= 1
+                ll_assert(page.nfree == page.nuninitialized,
+                          "bad value of page.nuninitialized")
+            else:
+                # The 'result' was part of the chained list; read the next.
+                page.freeblock = result.address[0]
+        #
+        return result
+
+
+    def allocate_new_page(self, size_class):
+        """Allocate a new page for the given size_class."""
+        #
+        # Get the arena with the highest number of pages already allocated
+        arena = self.arenas_start
+        if arena is None:
+            # No arenas.  Get a fresh new arena.
+            ll_assert(self.arenas_end is None, "!arenas_start && arenas_end")
+            arena = Arena(self.arena_size, self.page_size)
+            self.arenas_start = arena
+            self.arenas_end = arena
+        #
+        # Get the page from there (same logic as in malloc() except on
+        # pages instead of on blocks)
+        result = arena.freepage
+        arena.nfreepages -= 1
+        if arena.nfreepages == 0:
+            #
+            # This was the last free page, so unlink the arena from the
+            # chained list.
+            self.arenas_start = arena.nextarena
+            if self.arenas_start is None:
+                self.arenas_end = None
+            else:
+                self.arenas_start.prevarena = None
+            #
+        else:
+            # This was not the last free page, so update 'arena.freepage'
+            # to point to the next free page.  Two cases here...
+            if arena.nfreepages < arena.nuninitializedpages:
+                # The 'result' was not initialized at all.  We must compute
+                # the next free page by adding 'page_size' to 'arena.freepage'.
+                arena.freepage = result + self.page_size
+                arena.nuninitializedpages -= 1
+                ll_assert(arena.nfreepages == arena.nuninitializedpages,
+                          "bad value of page.nuninitialized")
+            else:
+                # The 'result' was part of the chained list; read the next.
+                arena.freepage = result.address[0]
+                llarena.arena_reset(result,
+                                    llmemory.sizeof(llmemory.Address),
+                                    False)
+        #
+        # Initialize the fields of the resulting page
+        llarena.arena_reserve(result, llmemory.sizeof(PAGE_HEADER))
+        page = llmemory.cast_adr_to_ptr(result, PAGE_PTR)
+        #
+        hdrsize = llmemory.raw_malloc_usage(llmemory.sizeof(PAGE_HEADER))
+        page.nfree = ((self.page_size - hdrsize) / WORD) // size_class
+        #
+        page.nuninitialized = page.nfree
+        page.freeblock = result + hdrsize
+        page.prevpage = PAGE_NULL
+        ll_assert(self.page_for_size[size_class] == PAGE_NULL,
+                  "allocate_new_page() called but a page is already waiting")
+        self.page_for_size[size_class] = page
+        return page
 
 # ____________________________________________________________
 # Helpers to go from a pointer to the start of its page
@@ -86,8 +235,8 @@
 
 def _start_of_page_untranslated(addr, page_size):
     assert isinstance(addr, llarena.fakearenaaddress)
-    shift = page_size // 2     # for testing, assuming the whole arena is not
-                               # on a page boundary
+    shift = 4     # for testing, we assume that the whole arena is not
+                  # on a page boundary
     ofs = ((addr.offset - shift) & ~(page_size-1)) + shift
     return llarena.fakearenaaddress(addr.arena, ofs)
 

Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_direct.py
==============================================================================
--- pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_direct.py	(original)
+++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_direct.py	Mon Sep 13 17:08:07 2010
@@ -456,3 +456,7 @@
     def test_varsized_from_prebuilt_gc(self):
         DirectGCTest.test_varsized_from_prebuilt_gc(self)
     test_varsized_from_prebuilt_gc.GC_PARAMS = {'space_size': 3 * 1024 * WORD}
+
+
+class TestGen2GC(DirectGCTest):
+    from pypy.rpython.memory.gc.gen2 import Gen2GC as GCClass

Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_gen2.py
==============================================================================
--- pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_gen2.py	(original)
+++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_gen2.py	Mon Sep 13 17:08:07 2010
@@ -1,12 +1,157 @@
 from pypy.rpython.memory.gc import gen2
+from pypy.rpython.memory.gc.gen2 import WORD, PAGE_NULL, PAGE_HEADER, PAGE_PTR
+from pypy.rpython.lltypesystem import lltype, llmemory, llarena
 
-def test_arena():
-    SHIFT = 4
-    #
+SHIFT = 4
+hdrsize = llmemory.raw_malloc_usage(llmemory.sizeof(PAGE_HEADER))
+
+
+def test_allocate_arena():
     a = gen2.Arena(SHIFT + 8*20, 8)
-    assert a.freepages == a.arena_base + SHIFT
+    assert a.freepage == a.arena_base + SHIFT
     assert a.nfreepages == 20
+    assert a.nuninitializedpages == 20
+    assert a.prevarena is None
+    assert a.nextarena is None
     #
     a = gen2.Arena(SHIFT + 8*20 + 7, 8)
-    assert a.freepages == a.arena_base + SHIFT
+    assert a.freepage == a.arena_base + SHIFT
     assert a.nfreepages == 20
+    assert a.nuninitializedpages == 20
+    assert a.prevarena is None
+    assert a.nextarena is None
+
+
+def test_allocate_new_page():
+    pagesize = hdrsize + 16
+    arenasize = pagesize * 4 - 1
+    #
+    def checknewpage(page, size_class):
+        size = WORD * size_class
+        assert page.nfree == (pagesize - hdrsize) // size
+        assert page.nuninitialized == page.nfree
+        page2 = page.freeblock - hdrsize
+        assert llmemory.cast_ptr_to_adr(page) == page2
+        assert page.prevpage == PAGE_NULL
+    #
+    ac = gen2.ArenaCollection(arenasize, pagesize, 99)
+    assert ac.arenas_start is ac.arenas_end is None
+    #
+    page = ac.allocate_new_page(5)
+    checknewpage(page, 5)
+    a = ac.arenas_start
+    assert a is not None
+    assert a is ac.arenas_end
+    assert a.nfreepages == 2
+    assert a.freepage == a.arena_base + SHIFT + pagesize
+    assert ac.page_for_size[5] == page
+    #
+    page = ac.allocate_new_page(3)
+    checknewpage(page, 3)
+    assert a is ac.arenas_start is ac.arenas_end
+    assert a.nfreepages == 1
+    assert a.freepage == a.arena_base + SHIFT + 2*pagesize
+    assert ac.page_for_size[3] == page
+    #
+    page = ac.allocate_new_page(4)
+    checknewpage(page, 4)
+    assert ac.arenas_start is ac.arenas_end is None    # has been unlinked
+    assert ac.page_for_size[4] == page
+
+
+def arena_collection_for_test(pagesize, *pagelayouts):
+    nb_pages = len(pagelayouts[0])
+    arenasize = pagesize * (nb_pages + 1) - 1
+    ac = gen2.ArenaCollection(arenasize, pagesize, 9*WORD)
+    #
+    def link(pageaddr, size_class, size_block, nblocks, nusedblocks):
+        llarena.arena_reserve(pageaddr, llmemory.sizeof(PAGE_HEADER))
+        page = llmemory.cast_adr_to_ptr(pageaddr, PAGE_PTR)
+        page.nfree = nblocks - nusedblocks
+        page.nuninitialized = page.nfree
+        page.freeblock = pageaddr + hdrsize + nusedblocks * size_block
+        page.prevpage = ac.page_for_size[size_class]
+        ac.page_for_size[size_class] = page
+    #
+    alist = []
+    for layout in pagelayouts:
+        assert len(layout) == nb_pages
+        assert " " not in layout.rstrip(" ")
+        a = gen2.Arena(arenasize, pagesize)
+        alist.append(a)
+        assert lltype.typeOf(a.freepage) == llmemory.Address
+        startpageaddr = a.freepage
+        a.freepage += pagesize * min((layout + " ").index(" "),
+                                     (layout + ".").index("."))
+        a.nfreepages = layout.count(" ") + layout.count(".")
+        a.nuninitializedpages = layout.count(" ")
+        #
+        pageaddr = startpageaddr
+        for i, c in enumerate(layout):
+            if '1' <= c <= '9':   # a partially used page (1 block free)
+                size_class = int(c)
+                size_block = WORD * size_class
+                nblocks = (pagesize - hdrsize) // size_block
+                link(pageaddr, size_class, size_block, nblocks, nblocks-1)
+            elif c == '.':    # a free, but initialized, page
+                next_free_num = min((layout + " ").find(" ", i+1),
+                                    (layout + ".").find(".", i+1))
+                addr = startpageaddr + pagesize * next_free_num
+                llarena.arena_reserve(pageaddr,
+                                      llmemory.sizeof(llmemory.Address))
+                pageaddr.address[0] = addr
+            elif c == '#':    # a random full page, not in any linked list
+                pass
+            elif c == ' ':    # the tail is uninitialized free pages
+                break
+            pageaddr += pagesize
+    #
+    assert alist == sorted(alist, key=lambda a: a.nfreepages)
+    #
+    ac.arenas_start = alist[0]
+    ac.arenas_end   = alist[-1]
+    for a, b in zip(alist[:-1], alist[1:]):
+        a.nextarena = b
+        b.prevarena = a
+    return ac
+
+
+def getarena(ac, num, total=None):
+    if total is not None:
+        a = getarena(ac, total-1)
+        assert a is ac.arenas_end
+        assert a.nextarena is None
+    prev = None
+    a = ac.arenas_start
+    for i in range(num):
+        assert a.prevarena is prev
+        prev = a
+        a = a.nextarena
+    return a
+
+def checkpage(ac, page, arena, nb_page):
+    pageaddr = llmemory.cast_ptr_to_adr(page)
+    assert pageaddr == arena.arena_base + SHIFT + nb_page * ac.page_size
+
+
+def test_simple_arena_collection():
+    # Test supposing that we have two partially-used arenas
+    pagesize = hdrsize + 16
+    ac = arena_collection_for_test(pagesize,
+                                   "##.. ",
+                                   ".#   ")
+    assert ac.arenas_start.nfreepages == 3
+    assert ac.arenas_end.nfreepages == 4
+    #
+    a0 = getarena(ac, 0, 2)
+    a1 = getarena(ac, 1, 2)
+    page = ac.allocate_new_page(1); checkpage(ac, page, a0, 2)
+    page = ac.allocate_new_page(2); checkpage(ac, page, a0, 3)
+    assert getarena(ac, 0, 2) is a0
+    page = ac.allocate_new_page(3); checkpage(ac, page, a0, 4)
+    assert getarena(ac, 0, 1) is a1
+    page = ac.allocate_new_page(4); checkpage(ac, page, a1, 0)
+    page = ac.allocate_new_page(5); checkpage(ac, page, a1, 2)
+    page = ac.allocate_new_page(6); checkpage(ac, page, a1, 3)
+    page = ac.allocate_new_page(7); checkpage(ac, page, a1, 4)
+    assert ac.arenas_start is ac.arenas_end is None



More information about the Pypy-commit mailing list