[pypy-svn] r77597 - in pypy/branch/32ptr-on-64bit/pypy: config rlib rpython/lltypesystem rpython/memory rpython/memory/gc rpython/memory/gc/test rpython/memory/gctransform translator/c

arigo at codespeak.net arigo at codespeak.net
Tue Oct 5 13:09:42 CEST 2010


Author: arigo
Date: Tue Oct  5 13:09:40 2010
New Revision: 77597

Added:
   pypy/branch/32ptr-on-64bit/pypy/rpython/memory/gc/minimarkpage2.py
      - copied, changed from r77571, pypy/branch/32ptr-on-64bit/pypy/rpython/memory/gc/minimarkpage.py
   pypy/branch/32ptr-on-64bit/pypy/rpython/memory/gc/test/test_minimarkpage2.py
      - copied, changed from r77571, pypy/branch/32ptr-on-64bit/pypy/rpython/memory/gc/test/test_minimarkpage.py
Modified:
   pypy/branch/32ptr-on-64bit/pypy/config/translationoption.py
   pypy/branch/32ptr-on-64bit/pypy/rlib/rmmap.py
   pypy/branch/32ptr-on-64bit/pypy/rpython/lltypesystem/ll2ctypes.py
   pypy/branch/32ptr-on-64bit/pypy/rpython/lltypesystem/rcompressed.py
   pypy/branch/32ptr-on-64bit/pypy/rpython/memory/gc/base.py
   pypy/branch/32ptr-on-64bit/pypy/rpython/memory/gc/minimark.py
   pypy/branch/32ptr-on-64bit/pypy/rpython/memory/gc/minimarkpage.py
   pypy/branch/32ptr-on-64bit/pypy/rpython/memory/gctransform/framework.py
   pypy/branch/32ptr-on-64bit/pypy/rpython/memory/gctypelayout.py
   pypy/branch/32ptr-on-64bit/pypy/rpython/memory/gcwrapper.py
   pypy/branch/32ptr-on-64bit/pypy/translator/c/genc.py
   pypy/branch/32ptr-on-64bit/pypy/translator/c/node.py
Log:
Random progress.


Modified: pypy/branch/32ptr-on-64bit/pypy/config/translationoption.py
==============================================================================
--- pypy/branch/32ptr-on-64bit/pypy/config/translationoption.py	(original)
+++ pypy/branch/32ptr-on-64bit/pypy/config/translationoption.py	Tue Oct  5 13:09:40 2010
@@ -196,7 +196,8 @@
     BoolOption("compressptr", "Compress pointers; limits the program to 32GB",
                default=False, cmdline="--compressptr",
                requires=[("translation.type_system", "lltype"),
-                         ("translation.taggedpointers", False)]
+                         ("translation.taggedpointers", False),
+                         ("translation.gc", "minimark")]    # for now
                + [("compressptr (64-bit only)", True)]*(not IS_64_BITS)),
 
     # options for ootype

Modified: pypy/branch/32ptr-on-64bit/pypy/rlib/rmmap.py
==============================================================================
--- pypy/branch/32ptr-on-64bit/pypy/rlib/rmmap.py	(original)
+++ pypy/branch/32ptr-on-64bit/pypy/rlib/rmmap.py	Tue Oct  5 13:09:40 2010
@@ -50,7 +50,7 @@
     constant_names = ['MAP_SHARED', 'MAP_PRIVATE',
                       'PROT_READ', 'PROT_WRITE',
                       'MS_SYNC']
-    opt_constant_names = ['MAP_ANON', 'MAP_ANONYMOUS',
+    opt_constant_names = ['MAP_ANON', 'MAP_ANONYMOUS', 'MAP_FIXED',
                           'PROT_EXEC',
                           'MAP_DENYWRITE', 'MAP_EXECUTABLE']
     for name in constant_names:

Modified: pypy/branch/32ptr-on-64bit/pypy/rpython/lltypesystem/ll2ctypes.py
==============================================================================
--- pypy/branch/32ptr-on-64bit/pypy/rpython/lltypesystem/ll2ctypes.py	(original)
+++ pypy/branch/32ptr-on-64bit/pypy/rpython/lltypesystem/ll2ctypes.py	Tue Oct  5 13:09:40 2010
@@ -1154,16 +1154,14 @@
         return hop.genop('direct_ptradd', [v_ptr, v_n],
                          resulttype = v_ptr.concretetype)
 
-class _lladdress(long):
+class _lladdress(object):
     _TYPE = llmemory.Address
 
-    def __new__(cls, void_p):
+    def __init__(self, void_p):
         if isinstance(void_p, (int, long)):
             void_p = ctypes.c_void_p(void_p)
-        self = long.__new__(cls, void_p.value)
         self.void_p = void_p
         self.intval = intmask(void_p.value)
-        return self
 
     def _cast_to_ptr(self, TP):
         return force_cast(TP, self.intval)
@@ -1179,6 +1177,12 @@
     def __ne__(self, other):
         return not self == other
 
+    def __add__(self, other):
+        return _lladdress(self.intval + other)
+
+    def __sub__(self, other):
+        return _lladdress(self.intval - other)
+
 class _llgcopaque(lltype._container):
     _TYPE = llmemory.GCREF.TO
     _name = "_llgcopaque"

Modified: pypy/branch/32ptr-on-64bit/pypy/rpython/lltypesystem/rcompressed.py
==============================================================================
--- pypy/branch/32ptr-on-64bit/pypy/rpython/lltypesystem/rcompressed.py	(original)
+++ pypy/branch/32ptr-on-64bit/pypy/rpython/lltypesystem/rcompressed.py	Tue Oct  5 13:09:40 2010
@@ -7,6 +7,13 @@
 
 
 def get_compressed_gcref_repr(rtyper, baserepr):
+    # Return either the original baserepr, or another repr standing for
+    # a HiddenGcRef32.  The idea is that we only get a HiddenGcRef32 for
+    # fixed-sized structures (XXX that are not too big); thus this is only
+    # for structures that gets allocated by the minimarkpage2 mmap()-
+    # within-32GB-of-RAM.
+    if baserepr.lowleveltype.TO._is_varsize():
+        return baserepr
     try:
         comprmgr = rtyper.compressed_gcref_manager
     except AttributeError:

Modified: pypy/branch/32ptr-on-64bit/pypy/rpython/memory/gc/base.py
==============================================================================
--- pypy/branch/32ptr-on-64bit/pypy/rpython/memory/gc/base.py	(original)
+++ pypy/branch/32ptr-on-64bit/pypy/rpython/memory/gc/base.py	Tue Oct  5 13:09:40 2010
@@ -50,7 +50,7 @@
     # collection.  It is automatically set to True by test_gc.py.  The
     # checking logic is translatable, so the flag can be set to True
     # here before translation.
-    DEBUG = False
+    DEBUG = True
 
     def set_query_functions(self, is_varsize, has_gcptr_in_varsize,
                             is_gcarrayofgcptr,

Modified: pypy/branch/32ptr-on-64bit/pypy/rpython/memory/gc/minimark.py
==============================================================================
--- pypy/branch/32ptr-on-64bit/pypy/rpython/memory/gc/minimark.py	(original)
+++ pypy/branch/32ptr-on-64bit/pypy/rpython/memory/gc/minimark.py	Tue Oct  5 13:09:40 2010
@@ -3,7 +3,7 @@
 from pypy.rpython.lltypesystem.lloperation import llop
 from pypy.rpython.lltypesystem.llmemory import raw_malloc_usage
 from pypy.rpython.memory.gc.base import GCBase, MovingGCBase
-from pypy.rpython.memory.gc import minimarkpage, base, generation
+from pypy.rpython.memory.gc import base, generation
 from pypy.rlib.rarithmetic import ovfcheck, LONG_BIT, intmask, r_uint
 from pypy.rlib.rarithmetic import LONG_BIT_SHIFT
 from pypy.rlib.debug import ll_assert, debug_print, debug_start, debug_stop
@@ -102,11 +102,12 @@
 
         # The system page size.  Like obmalloc.c, we assume that it is 4K
         # for 32-bit systems; unlike obmalloc.c, we assume that it is 8K
-        # for 64-bit systems, for consistent results.
+        # for 64-bit systems, for consistent results.  (ignored if we
+        # use minimarkpage2.py)
         "page_size": 1024*WORD,
 
         # The size of an arena.  Arenas are groups of pages allocated
-        # together.
+        # together.  (ignored if we use minimarkpage2.py)
         "arena_size": 65536*WORD,
 
         # The maximum size of an object allocated compactly.  All objects
@@ -181,7 +182,14 @@
         #
         # The ArenaCollection() handles the nonmovable objects allocation.
         if ArenaCollectionClass is None:
-            ArenaCollectionClass = minimarkpage.ArenaCollection
+            if self.translated_to_c and self.config.compressptr:
+                from pypy.rpython.memory.gc import minimarkpage2
+                ArenaCollectionClass = minimarkpage2.ArenaCollection2
+                arena_size = minimarkpage2.ARENA_SIZE
+                page_size = 4096
+            else:
+                from pypy.rpython.memory.gc import minimarkpage
+                ArenaCollectionClass = minimarkpage.ArenaCollection
         self.ac = ArenaCollectionClass(arena_size, page_size,
                                        small_request_threshold)
         #
@@ -267,7 +275,7 @@
                 self.max_heap_size = float(max_heap_size)
             #
             self.minor_collection()    # to empty the nursery
-            llarena.arena_free(self.nursery)
+            self.ac.free_big_chunk(self.nursery)
             self.nursery_size = newsize
             self.allocate_nursery()
 
@@ -280,9 +288,11 @@
         # in malloc_fixedsize_clear().  The few extra pages are never used
         # anyway so it doesn't even count.
         extra = self.nonlarge_gcptrs_max + 1
-        self.nursery = llarena.arena_malloc(self.nursery_size + extra, 2)
+        fullsize = self.nursery_size + extra
+        self.nursery = self.ac.allocate_big_chunk(fullsize)
         if not self.nursery:
             raise MemoryError("cannot allocate nursery")
+        llarena.arena_reset(self.nursery, fullsize, 2)
         # the current position in the nursery:
         self.nursery_free = self.nursery
         # the end of the nursery:
@@ -1526,6 +1536,12 @@
         self.all_objects = []
         self.total_memory_used = 0
 
+    def allocate_big_chunk(self, arena_size):
+        return llarena.arena_malloc(arena_size, False)
+
+    def free_big_chunk(self, arena):
+        llarena.arena_free(arena)
+
     def malloc(self, size):
         nsize = raw_malloc_usage(size)
         ll_assert(nsize > 0, "malloc: size is null or negative")

Modified: pypy/branch/32ptr-on-64bit/pypy/rpython/memory/gc/minimarkpage.py
==============================================================================
--- pypy/branch/32ptr-on-64bit/pypy/rpython/memory/gc/minimarkpage.py	(original)
+++ pypy/branch/32ptr-on-64bit/pypy/rpython/memory/gc/minimarkpage.py	Tue Oct  5 13:09:40 2010
@@ -138,6 +138,13 @@
         self.total_memory_used = r_uint(0)
 
 
+    def allocate_big_chunk(self, arena_size):
+        return llarena.arena_malloc(arena_size, False)
+
+    def free_big_chunk(self, arena):
+        llarena.arena_free(arena)
+
+
     def malloc(self, size):
         """Allocate a block from a page in an arena."""
         nsize = llmemory.raw_malloc_usage(size)

Copied: pypy/branch/32ptr-on-64bit/pypy/rpython/memory/gc/minimarkpage2.py (from r77571, pypy/branch/32ptr-on-64bit/pypy/rpython/memory/gc/minimarkpage.py)
==============================================================================
--- pypy/branch/32ptr-on-64bit/pypy/rpython/memory/gc/minimarkpage.py	(original)
+++ pypy/branch/32ptr-on-64bit/pypy/rpython/memory/gc/minimarkpage2.py	Tue Oct  5 13:09:40 2010
@@ -2,6 +2,7 @@
 from pypy.rlib.rarithmetic import LONG_BIT, r_uint
 from pypy.rlib.objectmodel import we_are_translated
 from pypy.rlib.debug import ll_assert
+from pypy.rlib import rmmap
 
 WORD = LONG_BIT // 8
 NULL = llmemory.NULL
@@ -13,32 +14,12 @@
 # A page contains a number of allocated objects, called "blocks".
 
 # The actual allocation occurs in whole arenas, which are then subdivided
-# into pages.  For each arena we allocate one of the following structures:
+# into pages.  Arenas are allocated (after translation to C) as an mmap()
+# at fixed addresses:
 
-ARENA_PTR = lltype.Ptr(lltype.ForwardReference())
-ARENA = lltype.Struct('ArenaReference',
-    # -- The address of the arena, as returned by malloc()
-    ('base', llmemory.Address),
-    # -- The number of free and the total number of pages in the arena
-    ('nfreepages', lltype.Signed),
-    ('totalpages', lltype.Signed),
-    # -- A chained list of free pages in the arena.  Ends with NULL.
-    ('freepages', llmemory.Address),
-    # -- A linked list of arenas.  See below.
-    ('nextarena', ARENA_PTR),
-    )
-ARENA_PTR.TO.become(ARENA)
-ARENA_NULL = lltype.nullptr(ARENA)
-
-# The idea is that when we need a free page, we take it from the arena
-# which currently has the *lowest* number of free pages.  This allows
-# arenas with a lot of free pages to eventually become entirely free, at
-# which point they are returned to the OS.  If an arena has a total of
-# 64 pages, then we have 64 global lists, arenas_lists[0] to
-# arenas_lists[63], such that arenas_lists[i] contains exactly those
-# arenas that have 'nfreepages == i'.  We allocate pages out of the
-# arena in 'current_arena'; when it is exhausted we pick another arena
-# with the smallest value for nfreepages (but > 0).
+ARENA_SIZE       = 0x100000      # 1MB
+ARENA_ADDR_START = 0x10000000    # 256MB  (too low a number, segfault on linux)
+ARENA_ADDR_STOP  = 0x800000000   # 32GB
 
 # ____________________________________________________________
 #
@@ -63,28 +44,30 @@
     #    pages, it is a chained list of pages having the same size class,
     #    rooted in 'page_for_size[size_class]'.  For full pages, it is a
     #    different chained list rooted in 'full_page_for_size[size_class]'.
-    #    For free pages, it is the list 'freepages' in the arena header.
+    #    For free pages, it is the list 'freepages'.
     ('nextpage', PAGE_PTR),
-    # -- The arena this page is part of.
-    ('arena', ARENA_PTR),
     # -- The number of free blocks.  The numbers of uninitialized and
     #    allocated blocks can be deduced from the context if needed.
-    ('nfree', lltype.Signed),
-    # -- The chained list of free blocks.  It ends as a pointer to the
+    ('nfree', rffi.INT),
+    # -- The chained list of free blocks.  It ends as a reference to the
     #    first uninitialized block (pointing to data that is uninitialized,
-    #    or to the end of the page).
-    ('freeblock', llmemory.Address),
-    # -- The structure above is 4 words, which is a good value:
-    #    '(1024-4) % N' is zero or very small for various small N's,
+    #    or to the end of the page).  Each entry in the free list is encoded
+    #    as an offset to the start of the page.
+    ('freeblock', rffi.INT),
+    # -- The structure above is 2 words, which is a good value:
+    #    '(512-2) % N' is zero or very small for various small N's,
     #    i.e. there is not much wasted space.
     )
 PAGE_PTR.TO.become(PAGE_HEADER)
 PAGE_NULL = lltype.nullptr(PAGE_HEADER)
 
+FREEBLOCK = lltype.Struct('FreeBlock', ('freeblock', rffi.INT))
+FREEBLOCK_PTR = lltype.Ptr(FREEBLOCK)
+
 # ----------
 
 
-class ArenaCollection(object):
+class ArenaCollection2(object):
     _alloc_flavor_ = "raw"
 
     def __init__(self, arena_size, page_size, small_request_threshold):
@@ -111,31 +94,19 @@
         for i in range(1, length):
             self.nblocks_for_size[i] = (page_size - self.hdrsize) // (WORD * i)
         #
-        self.max_pages_per_arena = arena_size // page_size
-        self.arenas_lists = lltype.malloc(rffi.CArray(ARENA_PTR),
-                                          self.max_pages_per_arena,
-                                          flavor='raw', zero=True)
-        # this is used in mass_free() only
-        self.old_arenas_lists = lltype.malloc(rffi.CArray(ARENA_PTR),
-                                              self.max_pages_per_arena,
-                                              flavor='raw', zero=True)
-        #
-        # the arena currently consumed; it must have at least one page
-        # available, or be NULL.  The arena object that we point to is
-        # not in any 'arenas_lists'.  We will consume all its pages before
-        # we choose a next arena, even if there is a major collection
-        # in-between.
-        self.current_arena = ARENA_NULL
+        # The next address to get an arena from
+        self.next_arena_addr = ARENA_ADDR_START
         #
-        # guarantee that 'arenas_lists[1:min_empty_nfreepages]' are all empty
-        self.min_empty_nfreepages = self.max_pages_per_arena
-        #
-        # part of current_arena might still contain uninitialized pages
+        # Uninitialized pages from the current arena
+        self.next_uninitialized_page = NULL
         self.num_uninitialized_pages = 0
         #
         # the total memory used, counting every block in use, without
         # the additional bookkeeping stuff.
         self.total_memory_used = r_uint(0)
+        #
+        # Chained list of pages that used to contain stuff but are now free.
+        self.freepages = NULL
 
 
     def malloc(self, size):
@@ -153,24 +124,28 @@
             page = self.allocate_new_page(size_class)
         #
         # The result is simply 'page.freeblock'
-        result = page.freeblock
-        if page.nfree > 0:
+        pageaddr = llarena.getfakearenaaddress(llmemory.cast_ptr_to_adr(page))
+        resultofs = rffi.getintfield(page, 'freeblock')
+        result = pageaddr + resultofs
+        page_nfree = rffi.getintfield(page, 'nfree')
+        if page_nfree > 0:
             #
             # The 'result' was part of the chained list; read the next.
-            page.nfree -= 1
-            freeblock = result.address[0]
+            page_nfree -= 1
+            rffi.setintfield(page, 'nfree', page_nfree)
+            freeblockptr = llmemory.cast_adr_to_ptr(result, FREEBLOCK_PTR)
+            freeblock = rffi.getintfield(freeblockptr, 'freeblock')
             llarena.arena_reset(result,
-                                llmemory.sizeof(llmemory.Address),
+                                llmemory.sizeof(FREEBLOCK),
                                 0)
             #
         else:
             # The 'result' is part of the uninitialized blocks.
-            freeblock = result + nsize
+            freeblock = resultofs + nsize
         #
-        page.freeblock = freeblock
+        rffi.setintfield(page, 'freeblock', freeblock)
         #
-        pageaddr = llarena.getfakearenaaddress(llmemory.cast_ptr_to_adr(page))
-        if freeblock - pageaddr > self.page_size - nsize:
+        if freeblock > self.page_size - nsize:
             # This was the last free block, so unlink the page from the
             # chained list and put it in the 'full_page_for_size' list.
             self.page_for_size[size_class] = page.nextpage
@@ -184,48 +159,38 @@
     def allocate_new_page(self, size_class):
         """Allocate and return a new page for the given size_class."""
         #
-        # Allocate a new arena if needed.
-        if self.current_arena == ARENA_NULL:
-            self.allocate_new_arena()
-        #
-        # The result is simply 'current_arena.freepages'.
-        arena = self.current_arena
-        result = arena.freepages
-        if arena.nfreepages > 0:
-            #
-            # The 'result' was part of the chained list; read the next.
-            arena.nfreepages -= 1
-            freepages = result.address[0]
+        # If available, return the next page in self.freepages
+        if self.freepages != NULL:
+            result = self.freepages
+            self.freepages = result.address[0]
             llarena.arena_reset(result,
                                 llmemory.sizeof(llmemory.Address),
                                 0)
             #
         else:
-            # The 'result' is part of the uninitialized pages.
+            #
+            # No more free page.  Allocate a new arena if needed.
+            if self.next_uninitialized_page == NULL:
+                self.allocate_new_arena()
+            #
+            # The result is simply 'self.next_uninitialized_page'.
+            result = self.next_uninitialized_page
+            #
             ll_assert(self.num_uninitialized_pages > 0,
-                      "fully allocated arena found in self.current_arena")
+                      "fully allocated arena found in next_uninitialized_page")
             self.num_uninitialized_pages -= 1
             if self.num_uninitialized_pages > 0:
                 freepages = result + self.page_size
             else:
                 freepages = NULL
-        #
-        arena.freepages = freepages
-        if freepages == NULL:
-            # This was the last page, so put the arena away into
-            # arenas_lists[0].
-            ll_assert(arena.nfreepages == 0, 
-                      "freepages == NULL but nfreepages > 0")
-            arena.nextarena = self.arenas_lists[0]
-            self.arenas_lists[0] = arena
-            self.current_arena = ARENA_NULL
+            #
+            self.next_uninitialized_page = freepages
         #
         # Initialize the fields of the resulting page
         llarena.arena_reserve(result, llmemory.sizeof(PAGE_HEADER))
         page = llmemory.cast_adr_to_ptr(result, PAGE_PTR)
-        page.arena = arena
-        page.nfree = 0
-        page.freeblock = result + self.hdrsize
+        rffi.setintfield(page, 'nfree', 0)
+        rffi.setintfield(page, 'freeblock', self.hdrsize)
         page.nextpage = PAGE_NULL
         ll_assert(self.page_for_size[size_class] == PAGE_NULL,
                   "allocate_new_page() called but a page is already waiting")
@@ -233,63 +198,52 @@
         return page
 
 
-    def _all_arenas(self):
-        """For testing.  Enumerates all arenas."""
-        if self.current_arena:
-            yield self.current_arena
-        for arena in self.arenas_lists:
-            while arena:
-                yield arena
-                arena = arena.nextarena
+    def allocate_new_arena(self):
+        """Allocates an arena and load it in self.next_uninitialized_page."""
+        arena_base = self.allocate_big_chunk(self.arena_size)
+        self.next_uninitialized_page = arena_base
+        self.num_uninitialized_pages = self.arena_size // self.page_size
+    allocate_new_arena._dont_inline_ = True
 
+    def allocate_big_chunk(self, arena_size):
+        if we_are_translated():
+            return self._allocate_new_arena_mmap(arena_size)
+        else:
+            return llarena.arena_malloc(arena_size, False)
 
-    def allocate_new_arena(self):
-        """Loads in self.current_arena the arena to allocate from next."""
+    def free_big_chunk(self, arena):
+        if we_are_translated():
+            pass     # good enough
+        else:
+            llarena.arena_free(arena)
+
+    def _allocate_new_arena_mmap(self, arena_size):
         #
-        # Pick an arena from 'arenas_lists[i]', with i as small as possible
-        # but > 0.  Use caching with 'min_empty_nfreepages', which guarantees
-        # that 'arenas_lists[1:min_empty_nfreepages]' are all empty.
-        i = self.min_empty_nfreepages
-        while i < self.max_pages_per_arena:
-            #
-            if self.arenas_lists[i] != ARENA_NULL:
-                #
-                # Found it.
-                self.current_arena = self.arenas_lists[i]
-                self.arenas_lists[i] = self.current_arena.nextarena
-                return
-            #
-            i += 1
-            self.min_empty_nfreepages = i
-        #
-        # No more arena with any free page.  We must allocate a new arena.
-        if not we_are_translated():
-            for a in self._all_arenas():
-                assert a.nfreepages == 0
-        #
-        # 'arena_base' points to the start of malloced memory; it might not
-        # be a page-aligned address
-        arena_base = llarena.arena_malloc(self.arena_size, False)
-        if not arena_base:
-            raise MemoryError("couldn't allocate the next arena")
-        arena_end = arena_base + self.arena_size
-        #
-        # 'firstpage' points to the first unused page
-        firstpage = start_of_page(arena_base + self.page_size - 1,
-                                  self.page_size)
-        # 'npages' is the number of full pages just allocated
-        npages = (arena_end - firstpage) // self.page_size
-        #
-        # Allocate an ARENA object and initialize it
-        arena = lltype.malloc(ARENA, flavor='raw')
-        arena.base = arena_base
-        arena.nfreepages = 0        # they are all uninitialized pages
-        arena.totalpages = npages
-        arena.freepages = firstpage
-        self.num_uninitialized_pages = npages
-        self.current_arena = arena
+        # Round up the number in arena_size.
+        arena_size = (arena_size + ARENA_SIZE - 1) & ~(ARENA_SIZE-1)
         #
-    allocate_new_arena._dont_inline_ = True
+        # Try to mmap() at a MAP_FIXED address, in a 'while' loop until it
+        # succeeds.  The important part is that it must return an address
+        # that is in the lower 32GB of the addressable space.
+        while 1:
+            addr = self.next_arena_addr
+            if addr + arena_size > ARENA_ADDR_STOP:
+                raise MemoryError("exhausted the 32GB of memory")
+            self.next_arena_addr = addr + arena_size
+            flags = rmmap.MAP_PRIVATE | rmmap.MAP_ANONYMOUS | rmmap.MAP_FIXED
+            prot = rmmap.PROT_READ | rmmap.PROT_WRITE
+            arena_base = rmmap.c_mmap_safe(rffi.cast(rffi.CCHARP, addr),
+                                           arena_size, prot, flags, -1, 0)
+            if arena_base != rffi.cast(rffi.CCHARP, -1):
+                break
+        #
+        # 'arena_base' points to the start of mmap()ed memory.
+        # Sanity-check it.
+        if rffi.cast(lltype.Unsigned, arena_base) >= ARENA_ADDR_STOP:
+            raise MMapIgnoredFIXED("mmap() ignored the MAP_FIXED and returned"
+                                   " an address that is not in the first 32GB")
+        #
+        return rffi.cast(llmemory.Address, arena_base)
 
 
     def mass_free(self, ok_to_free_func):
@@ -304,48 +258,13 @@
             #
             # Walk the pages in 'page_for_size[size_class]' and
             # 'full_page_for_size[size_class]' and free some objects.
-            # Pages completely freed are added to 'page.arena.freepages',
+            # Pages completely freed are added to 'self.freepages',
             # and become available for reuse by any size class.  Pages
             # not completely freed are re-chained either in
             # 'full_page_for_size[]' or 'page_for_size[]'.
             self.mass_free_in_pages(size_class, ok_to_free_func)
             #
             size_class -= 1
-        #
-        # Rehash arenas into the correct arenas_lists[i].  If
-        # 'self.current_arena' contains an arena too, it remains there.
-        (self.old_arenas_lists, self.arenas_lists) = (
-            self.arenas_lists, self.old_arenas_lists)
-        #
-        i = 0
-        while i < self.max_pages_per_arena:
-            self.arenas_lists[i] = ARENA_NULL
-            i += 1
-        #
-        i = 0
-        while i < self.max_pages_per_arena:
-            arena = self.old_arenas_lists[i]
-            while arena != ARENA_NULL:
-                nextarena = arena.nextarena
-                #
-                if arena.nfreepages == arena.totalpages:
-                    #
-                    # The whole arena is empty.  Free it.
-                    llarena.arena_free(arena.base)
-                    lltype.free(arena, flavor='raw')
-                    #
-                else:
-                    # Insert 'arena' in the correct arenas_lists[n]
-                    n = arena.nfreepages
-                    ll_assert(n < self.max_pages_per_arena,
-                             "totalpages != nfreepages >= max_pages_per_arena")
-                    arena.nextarena = self.arenas_lists[n]
-                    self.arenas_lists[n] = arena
-                #
-                arena = nextarena
-            i += 1
-        #
-        self.min_empty_nfreepages = 1
 
 
     def mass_free_in_pages(self, size_class, ok_to_free_func):
@@ -398,33 +317,29 @@
     def free_page(self, page):
         """Free a whole page."""
         #
-        # Insert the freed page in the arena's 'freepages' list.
-        # If nfreepages == totalpages, then it will be freed at the
-        # end of mass_free().
-        arena = page.arena
-        arena.nfreepages += 1
+        # Insert the freed page in the 'freepages' list.
         pageaddr = llmemory.cast_ptr_to_adr(page)
         pageaddr = llarena.getfakearenaaddress(pageaddr)
         llarena.arena_reset(pageaddr, self.page_size, 0)
         llarena.arena_reserve(pageaddr, llmemory.sizeof(llmemory.Address))
-        pageaddr.address[0] = arena.freepages
-        arena.freepages = pageaddr
+        pageaddr.address[0] = self.freepages
+        self.freepages = pageaddr
 
 
     def walk_page(self, page, block_size, ok_to_free_func):
         """Walk over all objects in a page, and ask ok_to_free_func()."""
         #
+        pageaddr = llarena.getfakearenaaddress(llmemory.cast_ptr_to_adr(page))
+        #
         # 'freeblock' is the next free block
-        freeblock = page.freeblock
+        freeblock = pageaddr + rffi.getintfield(page, 'freeblock')
         #
         # 'prevfreeblockat' is the address of where 'freeblock' was read from.
         prevfreeblockat = lltype.direct_fieldptr(page, 'freeblock')
-        prevfreeblockat = llmemory.cast_ptr_to_adr(prevfreeblockat)
         #
-        obj = llarena.getfakearenaaddress(llmemory.cast_ptr_to_adr(page))
-        obj += self.hdrsize
+        obj = pageaddr + self.hdrsize
         surviving = 0    # initially
-        skip_free_blocks = page.nfree
+        skip_free_blocks = rffi.getintfield(page, 'nfree')
         #
         while True:
             #
@@ -437,11 +352,14 @@
                     break
                 #
                 # 'obj' points to a free block.  It means that
-                # 'prevfreeblockat.address[0]' does not need to be updated.
+                # 'prevfreeblockat[0]' does not need to be updated.
                 # Just read the next free block from 'obj.address[0]'.
                 skip_free_blocks -= 1
-                prevfreeblockat = obj
-                freeblock = obj.address[0]
+                prevfreeblockat = llmemory.cast_adr_to_ptr(obj, FREEBLOCK_PTR)
+                freeblock = pageaddr + rffi.getintfield(prevfreeblockat,
+                                                        'freeblock')
+                prevfreeblockat = lltype.direct_fieldptr(prevfreeblockat,
+                                                         'freeblock')
                 #
             else:
                 # 'obj' points to a valid object.
@@ -452,15 +370,20 @@
                     #
                     # The object should die.
                     llarena.arena_reset(obj, _dummy_size(block_size), 0)
-                    llarena.arena_reserve(obj,
-                                          llmemory.sizeof(llmemory.Address))
+                    llarena.arena_reserve(obj, llmemory.sizeof(FREEBLOCK))
                     # Insert 'obj' in the linked list of free blocks.
-                    prevfreeblockat.address[0] = obj
-                    prevfreeblockat = obj
-                    obj.address[0] = freeblock
+                    prevfreeblockat[0] = rffi.cast(rffi.INT, obj - pageaddr)
+                    prevfreeblockat = llmemory.cast_adr_to_ptr(obj,
+                                                               FREEBLOCK_PTR)
+                    prevfreeblockat.freeblock = rffi.cast(rffi.INT,
+                                                          freeblock - pageaddr)
+                    prevfreeblockat = lltype.direct_fieldptr(prevfreeblockat,
+                                                             'freeblock')
                     #
                     # Update the number of free objects in the page.
-                    page.nfree += 1
+                    page_nfree = rffi.getintfield(page, 'nfree')
+                    page_nfree += 1
+                    rffi.setintfield(page, 'nfree', page_nfree)
                     #
                 else:
                     # The object survives.
@@ -477,35 +400,26 @@
 
     def _nuninitialized(self, page, size_class):
         # Helper for debugging: count the number of uninitialized blocks
-        freeblock = page.freeblock
+        freeblock = rffi.getintfield(page, 'freeblock')
+        pageaddr = llmemory.cast_ptr_to_adr(page)
+        pageaddr = llarena.getfakearenaaddress(pageaddr)
         for i in range(page.nfree):
-            freeblock = freeblock.address[0]
-        assert freeblock != NULL
-        pageaddr = llarena.getfakearenaaddress(llmemory.cast_ptr_to_adr(page))
+            freeblockaddr = pageaddr + freeblock
+            freeblockptr = llmemory.cast_adr_to_ptr(freeblockaddr,
+                                                    FREEBLOCK_PTR)
+            freeblock = rffi.getintfield(freeblockptr, 'freeblock')
+        assert freeblock != 0
         num_initialized_blocks, rem = divmod(
-            freeblock - pageaddr - self.hdrsize, size_class * WORD)
+            freeblock - self.hdrsize, size_class * WORD)
         assert rem == 0, "page size_class misspecified?"
         nblocks = self.nblocks_for_size[size_class]
         return nblocks - num_initialized_blocks
 
 
 # ____________________________________________________________
-# Helpers to go from a pointer to the start of its page
 
-def start_of_page(addr, page_size):
-    """Return the address of the start of the page that contains 'addr'."""
-    if we_are_translated():
-        offset = llmemory.cast_adr_to_int(addr) % page_size
-        return addr - offset
-    else:
-        return _start_of_page_untranslated(addr, page_size)
-
-def _start_of_page_untranslated(addr, page_size):
-    assert isinstance(addr, llarena.fakearenaaddress)
-    shift = WORD  # for testing, we assume that the whole arena is not
-                  # on a page boundary
-    ofs = ((addr.offset - shift) // page_size) * page_size + shift
-    return llarena.fakearenaaddress(addr.arena, ofs)
+class MMapIgnoredFIXED(Exception):
+    pass
 
 def _dummy_size(size):
     if we_are_translated():

Copied: pypy/branch/32ptr-on-64bit/pypy/rpython/memory/gc/test/test_minimarkpage2.py (from r77571, pypy/branch/32ptr-on-64bit/pypy/rpython/memory/gc/test/test_minimarkpage.py)
==============================================================================
--- pypy/branch/32ptr-on-64bit/pypy/rpython/memory/gc/test/test_minimarkpage.py	(original)
+++ pypy/branch/32ptr-on-64bit/pypy/rpython/memory/gc/test/test_minimarkpage2.py	Tue Oct  5 13:09:40 2010
@@ -1,30 +1,23 @@
 import py
-from pypy.rpython.memory.gc.minimarkpage import ArenaCollection
-from pypy.rpython.memory.gc.minimarkpage import PAGE_HEADER, PAGE_PTR
-from pypy.rpython.memory.gc.minimarkpage import PAGE_NULL, WORD
-from pypy.rpython.memory.gc.minimarkpage import _dummy_size
-from pypy.rpython.lltypesystem import lltype, llmemory, llarena
+from pypy.rpython.memory.gc.minimarkpage2 import ArenaCollection2
+from pypy.rpython.memory.gc.minimarkpage2 import PAGE_HEADER, PAGE_PTR
+from pypy.rpython.memory.gc.minimarkpage2 import PAGE_NULL, WORD
+from pypy.rpython.memory.gc.minimarkpage2 import FREEBLOCK, FREEBLOCK_PTR
+from pypy.rpython.memory.gc.minimarkpage2 import _dummy_size
+from pypy.rpython.lltypesystem import lltype, llmemory, llarena, rffi
 from pypy.rpython.lltypesystem.llmemory import cast_ptr_to_adr
 
 NULL = llmemory.NULL
-SHIFT = WORD
 hdrsize = llmemory.raw_malloc_usage(llmemory.sizeof(PAGE_HEADER))
 
 
 def test_allocate_arena():
-    ac = ArenaCollection(SHIFT + 64*20, 64, 1)
+    ac = ArenaCollection2(64*20, 64, 1)
     ac.allocate_new_arena()
     assert ac.num_uninitialized_pages == 20
-    upages = ac.current_arena.freepages
+    upages = ac.next_uninitialized_page
     upages + 64*20   # does not raise
     py.test.raises(llarena.ArenaError, "upages + 64*20 + 1")
-    #
-    ac = ArenaCollection(SHIFT + 64*20 + 7, 64, 1)
-    ac.allocate_new_arena()
-    assert ac.num_uninitialized_pages == 20
-    upages = ac.current_arena.freepages
-    upages + 64*20 + 7   # does not raise
-    py.test.raises(llarena.ArenaError, "upages + 64*20 + 64")
 
 
 def test_allocate_new_page():
@@ -36,24 +29,24 @@
         assert (ac._nuninitialized(page, size_class) ==
                     (pagesize - hdrsize) // size)
         assert page.nfree == 0
-        page1 = page.freeblock - hdrsize
-        assert llmemory.cast_ptr_to_adr(page) == page1
+        assert page.freeblock == hdrsize
         assert page.nextpage == PAGE_NULL
     #
-    ac = ArenaCollection(arenasize, pagesize, 99)
+    ac = ArenaCollection2(arenasize, pagesize, 99)
     assert ac.num_uninitialized_pages == 0
     assert ac.total_memory_used == 0
     #
     page = ac.allocate_new_page(5)
     checknewpage(page, 5)
     assert ac.num_uninitialized_pages == 2
-    assert ac.current_arena.freepages - pagesize == cast_ptr_to_adr(page)
+    assert lltype.typeOf(ac.next_uninitialized_page) == llmemory.Address
+    assert ac.next_uninitialized_page - pagesize == cast_ptr_to_adr(page)
     assert ac.page_for_size[5] == page
     #
     page = ac.allocate_new_page(3)
     checknewpage(page, 3)
     assert ac.num_uninitialized_pages == 1
-    assert ac.current_arena.freepages - pagesize == cast_ptr_to_adr(page)
+    assert ac.next_uninitialized_page - pagesize == cast_ptr_to_adr(page)
     assert ac.page_for_size[3] == page
     #
     page = ac.allocate_new_page(4)
@@ -66,25 +59,24 @@
     assert " " not in pagelayout.rstrip(" ")
     nb_pages = len(pagelayout)
     arenasize = pagesize * (nb_pages + 1) - 1
-    ac = ArenaCollection(arenasize, pagesize, 9*WORD)
+    ac = ArenaCollection2(arenasize, pagesize, 9*WORD)
     #
     def link(pageaddr, size_class, size_block, nblocks, nusedblocks, step=1):
         assert step in (1, 2)
         llarena.arena_reserve(pageaddr, llmemory.sizeof(PAGE_HEADER))
         page = llmemory.cast_adr_to_ptr(pageaddr, PAGE_PTR)
         if step == 1:
-            page.nfree = 0
+            page.nfree = rffi.cast(rffi.INT, 0)
             nuninitialized = nblocks - nusedblocks
         else:
-            page.nfree = nusedblocks
+            page.nfree = rffi.cast(rffi.INT, nusedblocks)
             nuninitialized = nblocks - 2*nusedblocks
-        page.freeblock = pageaddr + hdrsize + nusedblocks * size_block
+        page.freeblock = rffi.cast(rffi.INT, hdrsize + nusedblocks*size_block)
         if nusedblocks < nblocks:
             chainedlists = ac.page_for_size
         else:
             chainedlists = ac.full_page_for_size
         page.nextpage = chainedlists[size_class]
-        page.arena = ac.current_arena
         chainedlists[size_class] = page
         if fill_with_objects:
             for i in range(0, nusedblocks*step, step):
@@ -93,23 +85,27 @@
             if step == 2:
                 prev = 'page.freeblock'
                 for i in range(1, nusedblocks*step, step):
-                    holeaddr = pageaddr + hdrsize + i * size_block
-                    llarena.arena_reserve(holeaddr,
-                                          llmemory.sizeof(llmemory.Address))
-                    exec '%s = holeaddr' % prev in globals(), locals()
-                    prevhole = holeaddr
-                    prev = 'prevhole.address[0]'
-                endaddr = pageaddr + hdrsize + 2*nusedblocks * size_block
-                exec '%s = endaddr' % prev in globals(), locals()
+                    holeofs = hdrsize + i * size_block
+                    llarena.arena_reserve(pageaddr + holeofs,
+                                          llmemory.sizeof(FREEBLOCK))
+                    exec '%s = rffi.cast(rffi.INT, holeofs)' % prev \
+                         in globals(), locals()
+                    prevhole = pageaddr + holeofs
+                    prevhole = llmemory.cast_adr_to_ptr(prevhole,
+                                                        FREEBLOCK_PTR)
+                    prev = 'prevhole.freeblock'
+                endofs = hdrsize + 2*nusedblocks * size_block
+                exec '%s = rffi.cast(rffi.INT, endofs)' % prev \
+                     in globals(), locals()
         assert ac._nuninitialized(page, size_class) == nuninitialized
     #
     ac.allocate_new_arena()
     num_initialized_pages = len(pagelayout.rstrip(" "))
-    ac._startpageaddr = ac.current_arena.freepages
+    ac._startpageaddr = ac.next_uninitialized_page
     if pagelayout.endswith(" "):
-        ac.current_arena.freepages += pagesize * num_initialized_pages
+        ac.next_uninitialized_page += pagesize * num_initialized_pages
     else:
-        ac.current_arena.freepages = NULL
+        ac.next_uninitialized_page = NULL
     ac.num_uninitialized_pages -= num_initialized_pages
     #
     for i in reversed(range(num_initialized_pages)):
@@ -122,9 +118,8 @@
             link(pageaddr, size_class, size_block, nblocks, nblocks-1)
         elif c == '.':    # a free, but initialized, page
             llarena.arena_reserve(pageaddr, llmemory.sizeof(llmemory.Address))
-            pageaddr.address[0] = ac.current_arena.freepages
-            ac.current_arena.freepages = pageaddr
-            ac.current_arena.nfreepages += 1
+            pageaddr.address[0] = ac.freepages
+            ac.freepages = pageaddr
         elif c == '#':    # a random full page, in the list 'full_pages'
             size_class = fill_with_objects or 1
             size_block = WORD * size_class
@@ -151,7 +146,7 @@
     assert llmemory.cast_ptr_to_adr(page) == pagenum(ac, expected_position)
 
 def freepages(ac):
-    return ac.current_arena.freepages
+    return ac.freepages or ac.next_uninitialized_page
 
 
 def test_simple_arena_collection():
@@ -172,7 +167,7 @@
     page = ac.allocate_new_page(6); checkpage(ac, page, 8)
     assert freepages(ac) == pagenum(ac, 9) and ac.num_uninitialized_pages == 1
     page = ac.allocate_new_page(7); checkpage(ac, page, 9)
-    assert not ac.current_arena and ac.num_uninitialized_pages == 0
+    assert freepages(ac) == NULL and ac.num_uninitialized_pages == 0
 
 
 def chkob(ac, num_page, pos_obj, obj):
@@ -217,18 +212,18 @@
     page = getpage(ac, 0)
     assert page.nfree == 3
     assert ac._nuninitialized(page, 2) == 3
-    chkob(ac, 0, 2*WORD, page.freeblock)
+    assert page.freeblock == hdrsize + 2*WORD
     #
     obj = ac.malloc(2*WORD); chkob(ac, 0,  2*WORD, obj)
     obj = ac.malloc(2*WORD); chkob(ac, 0,  6*WORD, obj)
     assert page.nfree == 1
     assert ac._nuninitialized(page, 2) == 3
-    chkob(ac, 0, 10*WORD, page.freeblock)
+    assert page.freeblock == hdrsize + 10*WORD
     #
     obj = ac.malloc(2*WORD); chkob(ac, 0, 10*WORD, obj)
     assert page.nfree == 0
     assert ac._nuninitialized(page, 2) == 3
-    chkob(ac, 0, 12*WORD, page.freeblock)
+    assert page.freeblock == hdrsize + 12*WORD
     #
     obj = ac.malloc(2*WORD); chkob(ac, 0, 12*WORD, obj)
     assert ac._nuninitialized(page, 2) == 2
@@ -288,7 +283,7 @@
     assert page.nextpage == PAGE_NULL
     assert ac._nuninitialized(page, 2) == 1
     assert page.nfree == 0
-    chkob(ac, 0, 4*WORD, page.freeblock)
+    assert page.freeblock == hdrsize + 4*WORD
     assert freepages(ac) == NULL
 
 def test_mass_free_emptied_page():
@@ -319,6 +314,14 @@
     assert freepages(ac) == NULL
     assert ac.page_for_size[2] == PAGE_NULL
 
+def deref(page, freeblock, repeat=1):
+    for i in range(repeat):
+        pageaddr = llmemory.cast_ptr_to_adr(page)
+        pageaddr = llarena.getfakearenaaddress(pageaddr)
+        obj = llmemory.cast_adr_to_ptr(pageaddr + freeblock, FREEBLOCK_PTR)
+        freeblock = obj.freeblock
+    return freeblock
+
 def test_mass_free_full_is_partially_emptied():
     pagesize = hdrsize + 9*WORD
     ac = arena_collection_for_test(pagesize, "#", fill_with_objects=2)
@@ -334,9 +337,9 @@
     assert page.nextpage == PAGE_NULL
     assert ac._nuninitialized(page, 2) == 0
     assert page.nfree == 2
-    assert page.freeblock == pageaddr + hdrsize + 2*WORD
-    assert page.freeblock.address[0] == pageaddr + hdrsize + 6*WORD
-    assert page.freeblock.address[0].address[0] == pageaddr + hdrsize + 8*WORD
+    assert page.freeblock == hdrsize + 2*WORD
+    assert deref(page, page.freeblock) == hdrsize + 6*WORD
+    assert deref(page, page.freeblock, 2) == hdrsize + 8*WORD
     assert freepages(ac) == NULL
     assert ac.full_page_for_size[2] == PAGE_NULL
 
@@ -359,12 +362,10 @@
     assert page.nextpage == PAGE_NULL
     assert ac._nuninitialized(page, 2) == 4
     assert page.nfree == 4
-    assert page.freeblock == pageaddr + hdrsize + 2*WORD
-    assert page.freeblock.address[0] == pageaddr + hdrsize + 6*WORD
-    assert page.freeblock.address[0].address[0] == \
-                                        pageaddr + hdrsize + 10*WORD
-    assert page.freeblock.address[0].address[0].address[0] == \
-                                        pageaddr + hdrsize + 14*WORD
+    assert deref(page, page.freeblock, 0) == hdrsize + 2*WORD
+    assert deref(page, page.freeblock, 1) == hdrsize + 6*WORD
+    assert deref(page, page.freeblock, 2) == hdrsize + 10*WORD
+    assert deref(page, page.freeblock, 3) == hdrsize + 14*WORD
     assert freepages(ac) == NULL
     assert ac.full_page_for_size[2] == PAGE_NULL
 
@@ -387,16 +388,12 @@
     assert page.nextpage == PAGE_NULL
     assert ac._nuninitialized(page, 2) == 4
     assert page.nfree == 6
-    fb = page.freeblock
-    assert fb == pageaddr + hdrsize + 2*WORD
-    assert fb.address[0] == pageaddr + hdrsize + 4*WORD
-    assert fb.address[0].address[0] == pageaddr + hdrsize + 6*WORD
-    assert fb.address[0].address[0].address[0] == \
-                                       pageaddr + hdrsize + 10*WORD
-    assert fb.address[0].address[0].address[0].address[0] == \
-                                       pageaddr + hdrsize + 12*WORD
-    assert fb.address[0].address[0].address[0].address[0].address[0] == \
-                                       pageaddr + hdrsize + 14*WORD
+    assert deref(page, page.freeblock, 0) == hdrsize + 2*WORD
+    assert deref(page, page.freeblock, 1) == hdrsize + 4*WORD
+    assert deref(page, page.freeblock, 2) == hdrsize + 6*WORD
+    assert deref(page, page.freeblock, 3) == hdrsize + 10*WORD
+    assert deref(page, page.freeblock, 4) == hdrsize + 12*WORD
+    assert deref(page, page.freeblock, 5) == hdrsize + 14*WORD
     assert freepages(ac) == NULL
     assert ac.full_page_for_size[2] == PAGE_NULL
 
@@ -408,25 +405,20 @@
     num_pages = 3
     ac = arena_collection_for_test(pagesize, " " * num_pages)
     live_objects = {}
+    all_arenas = []
     #
-    # Run the test until three arenas are freed.  This is a quick test
-    # that the arenas are really freed by the logic.
     class DoneTesting(Exception):
         counter = 0
     def my_allocate_new_arena():
-        # the following output looks cool on a 112-character-wide terminal.
-        lst = sorted(ac._all_arenas(), key=lambda a: a.base.arena._arena_index)
-        for a in lst:
-            print a.base.arena, a.base.arena.usagemap
+        # the following output looks cool on a 208-character-wide terminal.
+        DoneTesting.counter += 1
+        if DoneTesting.counter > 9:
+            raise DoneTesting
+        for a in all_arenas:
+            print a.arena.usagemap
         print '-' * 80
         ac.__class__.allocate_new_arena(ac)
-        a = ac.current_arena.base.arena
-        def my_mark_freed():
-            a.freed = True
-            DoneTesting.counter += 1
-            if DoneTesting.counter > 3:
-                raise DoneTesting
-        a.mark_freed = my_mark_freed
+        all_arenas.append(ac.next_uninitialized_page)
     ac.allocate_new_arena = my_allocate_new_arena
     try:
         while True:

Modified: pypy/branch/32ptr-on-64bit/pypy/rpython/memory/gctransform/framework.py
==============================================================================
--- pypy/branch/32ptr-on-64bit/pypy/rpython/memory/gctransform/framework.py	(original)
+++ pypy/branch/32ptr-on-64bit/pypy/rpython/memory/gctransform/framework.py	Tue Oct  5 13:09:40 2010
@@ -10,12 +10,12 @@
 from pypy.rlib import rstack, rgc
 from pypy.rlib.debug import ll_assert
 from pypy.translator.backendopt import graphanalyze
-from pypy.translator.backendopt.support import var_needsgc
 from pypy.annotation import model as annmodel
 from pypy.rpython import annlowlevel
 from pypy.rpython.rbuiltin import gen_cast
 from pypy.rpython.memory.gctypelayout import ll_weakref_deref, WEAKREF
 from pypy.rpython.memory.gctypelayout import convert_weakref_to, WEAKREFPTR
+from pypy.rpython.memory.gctypelayout import is_gc_pointer_or_hidden
 from pypy.rpython.memory.gctransform.log import log
 from pypy.tool.sourcetools import func_with_new_name
 from pypy.rpython.lltypesystem.lloperation import llop, LL_OPERATIONS
@@ -68,9 +68,7 @@
                     mallocvars[op.result] = True
             elif op.opname in ("setfield", "setarrayitem", "setinteriorfield"):
                 TYPE = op.args[-1].concretetype
-                if (op.args[0] in mallocvars and
-                    isinstance(TYPE, lltype.Ptr) and
-                    TYPE.TO._gckind == "gc"):
+                if op.args[0] in mallocvars and is_gc_pointer_or_hidden(TYPE):
                     result.add(op)
             else:
                 if collect_analyzer.analyze(op):
@@ -1017,15 +1015,20 @@
         v_struct = hop.spaceop.args[0]
         v_newvalue = hop.spaceop.args[-1]
         assert opname in ('setfield', 'setarrayitem', 'setinteriorfield')
-        assert isinstance(v_newvalue.concretetype, lltype.Ptr)
+        assert is_gc_pointer_or_hidden(v_newvalue.concretetype)
         # XXX for some GCs the skipping if the newvalue is a constant won't be
         # ok
         if (self.write_barrier_ptr is not None
             and not isinstance(v_newvalue, Constant)
             and v_struct.concretetype.TO._gckind == "gc"
             and hop.spaceop not in self.clean_sets):
-            v_newvalue = hop.genop("cast_ptr_to_adr", [v_newvalue],
-                                   resulttype = llmemory.Address)
+            if v_newvalue.concretetype == llmemory.HiddenGcRef32:
+                ...
+                v_newvalue = hop.genop("cast_ptr_to_adr", [v_newvalue],
+                                       resulttype = llmemory.Address)
+            else:
+                v_newvalue = hop.genop("cast_ptr_to_adr", [v_newvalue],
+                                       resulttype = llmemory.Address)
             v_structaddr = hop.genop("cast_ptr_to_adr", [v_struct],
                                      resulttype = llmemory.Address)
             if (self.write_barrier_from_array_ptr is not None and
@@ -1089,7 +1092,7 @@
             GCTransformer.gct_setfield(self, hop)
 
     def var_needs_set_transform(self, var):
-        return var_needsgc(var)
+        return is_gc_pointer_or_hidden(var.concretetype)
 
     def push_alive_nopyobj(self, var, llops):
         pass

Modified: pypy/branch/32ptr-on-64bit/pypy/rpython/memory/gctypelayout.py
==============================================================================
--- pypy/branch/32ptr-on-64bit/pypy/rpython/memory/gctypelayout.py	(original)
+++ pypy/branch/32ptr-on-64bit/pypy/rpython/memory/gctypelayout.py	Tue Oct  5 13:09:40 2010
@@ -366,6 +366,10 @@
 #
 # Helpers to discover GC pointers inside structures
 
+def is_gc_pointer_or_hidden(TYPE):
+    return ((isinstance(TYPE, lltype.Ptr) and TYPE.TO._gckind == 'gc')
+            or TYPE == llmemory.HiddenGcRef32)
+
 def offsets_to_gc_pointers(TYPE):
     offsets = []
     if isinstance(TYPE, lltype.Struct):

Modified: pypy/branch/32ptr-on-64bit/pypy/rpython/memory/gcwrapper.py
==============================================================================
--- pypy/branch/32ptr-on-64bit/pypy/rpython/memory/gcwrapper.py	(original)
+++ pypy/branch/32ptr-on-64bit/pypy/rpython/memory/gcwrapper.py	Tue Oct  5 13:09:40 2010
@@ -1,4 +1,5 @@
 from pypy.rpython.lltypesystem import lltype, llmemory, llheap
+from pypy.rpython.lltypesystem.lloperation import llop
 from pypy.rpython import llinterp
 from pypy.rpython.annlowlevel import llhelper
 from pypy.rpython.memory import gctypelayout
@@ -88,9 +89,12 @@
     def setinterior(self, toplevelcontainer, inneraddr, INNERTYPE, newvalue,
                     offsets=()):
         if (lltype.typeOf(toplevelcontainer).TO._gckind == 'gc' and
-            (isinstance(INNERTYPE, lltype.Ptr) and
-             INNERTYPE.TO._gckind == 'gc')
-            or INNERTYPE == llmemory.HiddenGcRef32):
+            is_gc_pointer_or_hidden(INNERTYPE)):
+            #
+            if INNERTYPE == llmemory.HiddenGcRef32:
+                newvalueaddr = llop.show_from_adr32(llmemory.Address, newvalue)
+            else:
+                newvalueaddr = llmemory.cast_ptr_to_adr(newvalue)
             #
             wb = True
             if self.has_write_barrier_from_array:
@@ -99,7 +103,7 @@
                         assert (type(index) is int    # <- fast path
                                 or lltype.typeOf(index) == lltype.Signed)
                         self.gc.write_barrier_from_array(
-                            llmemory.cast_ptr_to_adr(newvalue),
+                            newvalueaddr,
                             llmemory.cast_ptr_to_adr(toplevelcontainer),
                             index)
                         wb = False
@@ -107,7 +111,7 @@
             #
             if wb:
                 self.gc.write_barrier(
-                    llmemory.cast_ptr_to_adr(newvalue),
+                    newvalueaddr,
                     llmemory.cast_ptr_to_adr(toplevelcontainer))
         llheap.setinterior(toplevelcontainer, inneraddr, INNERTYPE, newvalue)
 

Modified: pypy/branch/32ptr-on-64bit/pypy/translator/c/genc.py
==============================================================================
--- pypy/branch/32ptr-on-64bit/pypy/translator/c/genc.py	(original)
+++ pypy/branch/32ptr-on-64bit/pypy/translator/c/genc.py	Tue Oct  5 13:09:40 2010
@@ -881,15 +881,20 @@
 
 def gen_startupcode(f, database):
     # generate the start-up code and put it into a function
+    if database.late_initializations_hiddengcref32:
+        gen_late_initializations_hiddengcref32(f, database)
     print >> f, 'char *RPython_StartupCode(void) {'
     print >> f, '\tchar *error = NULL;'
-    for line in database.gcpolicy.gc_startup_code():
-        print >> f,"\t" + line
 
     # put float infinities in global constants, we should not have so many of them for now to make
     # a table+loop preferable
     for dest, value in database.late_initializations:
         print >> f, "\t%s = %s;" % (dest, value)
+    if database.late_initializations_hiddengcref32:
+        print >> f, "\tpypy_init_hiddengcref32();"
+
+    for line in database.gcpolicy.gc_startup_code():
+        print >> f,"\t" + line
 
     firsttime = True
     for node in database.containerlist:
@@ -904,6 +909,25 @@
     print >> f, '\treturn error;'
     print >> f, '}'
 
+def gen_late_initializations_hiddengcref32(f, database):
+    print >> f, 'static void* pypy_hiddengcref32[] = {'
+    for access_expr, name in database.late_initializations_hiddengcref32:
+        print >> f, '\t&%s, %s,' % (access_expr, name)
+    print >> f, '''\tNULL  /* sentinel */
+};
+
+static void pypy_init_hiddengcref32(void)
+{
+\tvoid** p;
+\tfor (p = pypy_hiddengcref32; p[0] != NULL; p += 2)
+\t{
+\t\thiddengcref32_t h;
+\t\tOP_HIDE_INTO_ADR32((p[1]), h);
+\t\t*(hiddengcref32_t**)(p[0]) = h;
+\t}
+}
+'''
+
 def commondefs(defines):
     from pypy.rlib.rarithmetic import LONG_BIT
     defines['PYPY_LONG_BIT'] = LONG_BIT

Modified: pypy/branch/32ptr-on-64bit/pypy/translator/c/node.py
==============================================================================
--- pypy/branch/32ptr-on-64bit/pypy/translator/c/node.py	(original)
+++ pypy/branch/32ptr-on-64bit/pypy/translator/c/node.py	Tue Oct  5 13:09:40 2010
@@ -790,9 +790,12 @@
                 '-+'[value > 0])
         elif TYPE == llmemory.HiddenGcRef32:
             if value.adr64:
+                name = db.get(value.adr64.ptr)
                 db.late_initializations_hiddengcref32.append((access_expr,
-                                                              value))
-                expr = '0 /*HIDE_INTO_ADR32%s*/' % db.get(value.adr64.ptr)
+                                                              name))
+                if not name.startswith('('):
+                    name = '(%s)' % name
+                expr = '0 /*HIDE_INTO_ADR32%s*/' % name
             else:
                 expr = '0'
         else:



More information about the Pypy-commit mailing list