[pypy-commit] stmgc c7-refactor: Progress on copying the logic from minimark.py.
arigo
noreply at buildbot.pypy.org
Sat Feb 22 18:15:55 CET 2014
Author: Armin Rigo <arigo at tunes.org>
Branch: c7-refactor
Changeset: r810:3993d902abc7
Date: 2014-02-22 18:15 +0100
http://bitbucket.org/pypy/stmgc/changeset/3993d902abc7/
Log: Progress on copying the logic from minimark.py.
diff --git a/c7/stm/core.c b/c7/stm/core.c
--- a/c7/stm/core.c
+++ b/c7/stm/core.c
@@ -13,11 +13,10 @@
{
assert(_running_transaction());
- LIST_APPEND(STM_PSEGMENT->old_objects_to_trace, obj);
-
/* for old objects from the same transaction, we are done now */
if (obj_from_same_transaction(obj)) {
obj->stm_flags |= GCFLAG_WRITE_BARRIER_CALLED;
+ LIST_APPEND(STM_PSEGMENT->old_objects_pointing_to_young, obj);
return;
}
@@ -221,6 +220,15 @@
list_clear(STM_PSEGMENT->modified_objects);
}
+static void _finish_transaction(void)
+{
+ stm_thread_local_t *tl = STM_SEGMENT->running_thread;
+ release_thread_segment(tl);
+ STM_PSEGMENT->safe_point = SP_NO_TRANSACTION;
+ STM_PSEGMENT->transaction_state = TS_NONE;
+ list_clear(STM_PSEGMENT->old_objects_pointing_to_young);
+}
+
void stm_commit_transaction(void)
{
mutex_lock();
@@ -267,10 +275,7 @@
reset_all_creation_markers_and_push_created_data();
/* done */
- stm_thread_local_t *tl = STM_SEGMENT->running_thread;
- release_thread_segment(tl);
- STM_PSEGMENT->safe_point = SP_NO_TRANSACTION;
- STM_PSEGMENT->transaction_state = TS_NONE;
+ _finish_transaction();
/* we did cond_broadcast() above already, in
try_wait_for_other_safe_points(). It may wake up
@@ -345,13 +350,13 @@
/* reset all the modified objects (incl. re-adding GCFLAG_WRITE_BARRIER) */
reset_modified_from_other_segments();
+ reset_all_creation_markers();
+
stm_jmpbuf_t *jmpbuf_ptr = STM_SEGMENT->jmpbuf_ptr;
stm_thread_local_t *tl = STM_SEGMENT->running_thread;
tl->shadowstack = STM_PSEGMENT->shadowstack_at_start_of_transaction;
- release_thread_segment(tl);
- STM_PSEGMENT->safe_point = SP_NO_TRANSACTION;
- STM_PSEGMENT->transaction_state = TS_NONE;
- reset_all_creation_markers();
+
+ _finish_transaction();
cond_broadcast();
mutex_unlock();
diff --git a/c7/stm/core.h b/c7/stm/core.h
--- a/c7/stm/core.h
+++ b/c7/stm/core.h
@@ -52,7 +52,7 @@
struct stm_priv_segment_info_s {
struct stm_segment_info_s pub;
- struct list_s *old_objects_to_trace;
+ struct list_s *old_objects_pointing_to_young;
struct list_s *modified_objects;
struct list_s *creation_markers;
uint64_t start_time;
diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c
--- a/c7/stm/gcpage.c
+++ b/c7/stm/gcpage.c
@@ -21,13 +21,13 @@
{
memset(small_alloc_shared, 0, sizeof(small_alloc_shared));
memset(small_alloc_privtz, 0, sizeof(small_alloc_privtz));
- free_pages = NULL;
+ free_uniform_pages = NULL;
}
-static void check_gcpage_still_shared(void)
-{
- //...;
-}
+//static void check_gcpage_still_shared(void)
+//{
+// //...;
+//}
#define GCPAGE_NUM_PAGES 20
@@ -54,8 +54,8 @@
char *p = uninitialized_page_start;
long i;
for (i = 0; i < 16; i++) {
- *(char **)p = free_pages;
- free_pages = p;
+ *(char **)p = free_uniform_pages;
+ free_uniform_pages = p;
}
return;
@@ -69,7 +69,7 @@
/* not thread-safe! Use only when holding the mutex */
assert(_has_mutex());
- if (free_pages == NULL)
+ if (free_uniform_pages == NULL)
grab_more_free_pages_for_small_allocations();
abort();//...
diff --git a/c7/stm/gcpage.h b/c7/stm/gcpage.h
--- a/c7/stm/gcpage.h
+++ b/c7/stm/gcpage.h
@@ -19,15 +19,31 @@
char *next_object; /* the next address we will return, or NULL */
char *range_last; /* if equal to next_object: next_object starts with
a next pointer; if greater: last item of a
- contigous range of unallocated objs */
+ contiguous range of unallocated objs */
};
+
+/* For each small request size, we have three independent chained lists
+ of address ranges:
+
+ - 'small_alloc_shared': ranges are within pages that are likely to be
+ shared. We don't know for sure, because pages can be privatized
+ by normal run of stm_write().
+
+ - 'small_alloc_sh_old': moved from 'small_alloc_shared' when we're
+ looking for a range with the creation_marker set; this collects
+ the unsuitable ranges, i.e. the ones with already at least one
+ object and no creation marker.
+
+ - 'small_alloc_privtz': ranges are within pages that are privatized.
+*/
static struct small_alloc_s small_alloc_shared[GC_N_SMALL_REQUESTS];
+static struct small_alloc_s small_alloc_sh_old[GC_N_SMALL_REQUESTS];
static struct small_alloc_s small_alloc_privtz[GC_N_SMALL_REQUESTS];
-static char *free_pages;
+static char *free_uniform_pages;
static void setup_gcpage(void);
static void teardown_gcpage(void);
-static void check_gcpage_still_shared(void);
+//static void check_gcpage_still_shared(void);
static char *allocate_outside_nursery_large(uint64_t size);
diff --git a/c7/stm/list.h b/c7/stm/list.h
--- a/c7/stm/list.h
+++ b/c7/stm/list.h
@@ -45,6 +45,7 @@
static inline uintptr_t list_pop_item(struct list_s *lst)
{
+ assert(lst->count > 0);
return lst->items[--lst->count];
}
diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c
--- a/c7/stm/nursery.c
+++ b/c7/stm/nursery.c
@@ -33,6 +33,8 @@
char reserved[64];
} nursery_ctl __attribute__((aligned(64)));
+static struct list_s *old_objects_pointing_to_young;
+
/************************************************************/
@@ -43,6 +45,12 @@
assert(MEDIUM_OBJECT < LARGE_OBJECT);
assert(LARGE_OBJECT < NURSERY_SECTION_SIZE);
nursery_ctl.used = 0;
+ old_objects_pointing_to_young = list_create();
+}
+
+static void teardown_nursery(void)
+{
+ list_free(old_objects_pointing_to_young);
}
static inline bool _is_in_nursery(object_t *obj)
@@ -100,7 +108,6 @@
}
}
-
static void minor_trace_if_young(object_t **pobj)
{
/* takes a normal pointer to a thread-local pointer to an object */
@@ -110,7 +117,9 @@
if (!_is_young(obj))
return;
- /* the location the object moved to is the second word in 'obj' */
+ /* If the object was already seen here, its first word was set
+ to GCWORD_MOVED. In that case, the forwarding location, i.e.
+ where the object moved to, is stored in the second word in 'obj'. */
char *realobj = (char *)REAL_ADDRESS(stm_object_pages, obj);
object_t **pforwarded_array = (object_t **)realobj;
@@ -129,7 +138,9 @@
The pages S or W above are both pages of uniform sizes obtained
from the end of the address space. The difference is that page S
- can be shared, but page W needs to be privatized.
+ can be shared, but page W needs to be privatized. Moreover,
+ cases 2 and 4 differ in the creation_marker they need to put,
+ which has a granularity of 256 bytes.
*/
size_t size = stmcb_size_rounded_up((struct object_s *)realobj);
uintptr_t lock_idx = (((uintptr_t)obj) >> 4) - READMARKER_START;
@@ -214,9 +225,12 @@
pforwarded_array[0] = GCWORD_MOVED;
pforwarded_array[1] = nobj;
*pobj = nobj;
+
+ /* Must trace the object later */
+ LIST_APPEND(old_objects_pointing_to_young, nobj);
}
-static void minor_trace_roots(void)
+static void collect_roots_in_nursery(void)
{
stm_thread_local_t *tl = stm_thread_locals;
do {
@@ -229,6 +243,48 @@
} while (tl != stm_thread_locals);
}
+static void trace_and_drag_out_of_nursery(object_t *obj)
+{
+ if (is_in_shared_pages(obj)) {
+ /* the object needs fixing only in one copy, because all copies
+ are shared and identical. */
+ char *realobj = (char *)REAL_ADDRESS(stm_object_pages, obj);
+ stmcb_trace((struct object_s *)realobj, &minor_trace_if_young);
+ }
+ else {
+ /* every segment needs fixing */
+ long i;
+ for (i = 0; i < NB_SEGMENTS; i++) {
+ char *realobj = (char *)REAL_ADDRESS(get_segment_base(i), obj);
+ stmcb_trace((struct object_s *)realobj, &minor_trace_if_young);
+ }
+ }
+}
+
+static void collect_oldrefs_to_nursery(struct list_s *lst)
+{
+ while (!list_is_empty(lst)) {
+ object_t *obj = (object_t *)list_pop_item(lst);
+ assert(!_is_in_nursery(obj));
+
+ /* We must have GCFLAG_WRITE_BARRIER_CALLED so far. If we
+ don't, it's because the same object was stored in several
+ segment's old_objects_pointing_to_young. It's fine to
+ ignore duplicates. */
+ if ((obj->stm_flags & GCFLAG_WRITE_BARRIER_CALLED) == 0)
+ continue;
+
+ /* Remove the flag GCFLAG_WRITE_BARRIER_CALLED. No live object
+ should have this flag set after a nursery collection. */
+ obj->stm_flags &= ~GCFLAG_WRITE_BARRIER_CALLED;
+
+ /* Trace the 'obj' to replace pointers to nursery with pointers
+ outside the nursery, possibly forcing nursery objects out
+ and adding them to 'old_objects_pointing_to_young' as well. */
+ trace_and_drag_out_of_nursery(obj);
+ }
+}
+
static void reset_nursery(void)
{
/* reset the global amount-of-nursery-used-so-far */
@@ -243,6 +299,7 @@
struct stm_priv_segment_info_s *other_pseg = get_priv_segment(i);
/* no race condition here, because all other threads are paused
in safe points, so cannot be e.g. in _stm_allocate_slowpath() */
+ uintptr_t old_end = other_pseg->real_nursery_section_end;
other_pseg->real_nursery_section_end = 0;
other_pseg->pub.v_nursery_section_end = 0;
@@ -252,7 +309,10 @@
'transaction_read_version' without changing
'min_read_version_outside_nursery'.
*/
- if (other_pseg->pub.transaction_read_version < 0xff) {
+ if (other_pseg->transaction_state == TS_NONE) {
+ /* no transaction running now, nothing to do */
+ }
+ else if (other_pseg->pub.transaction_read_version < 0xff) {
other_pseg->pub.transaction_read_version++;
assert(0 < other_pseg->min_read_version_outside_nursery &&
other_pseg->min_read_version_outside_nursery
@@ -268,9 +328,15 @@
}
/* reset the creation markers */
- char *creation_markers = REAL_ADDRESS(other_pseg->pub.segment_base,
- NURSERY_START >> 8);
- memset(creation_markers, 0, NURSERY_SIZE >> 8);
+ if (old_end > NURSERY_START) {
+ char *creation_markers = REAL_ADDRESS(other_pseg->pub.segment_base,
+ NURSERY_START >> 8);
+ assert(old_end < NURSERY_START + NURSERY_SIZE);
+ memset(creation_markers, 0, (old_end - NURSERY_START) >> 8);
+ }
+ else {
+ assert(old_end == 0 || old_end == NURSERY_START);
+ }
}
}
@@ -278,18 +344,43 @@
{
/* all other threads are paused in safe points during the whole
minor collection */
+ dprintf(("minor_collection\n"));
assert(_has_mutex());
+ assert(list_is_empty(old_objects_pointing_to_young));
- check_gcpage_still_shared();
+ /* List of what we need to do and invariants we need to preserve
+ -------------------------------------------------------------
- minor_trace_roots();
+ We must move out of the nursery any object found within the
+ nursery. This requires either one or NB_SEGMENTS copies,
+ depending on the current write-state of the object.
- // copy modified_objects
+ We need to move the mark stored in the write_locks, read_markers
+ and creation_markers arrays. The creation_markers need some care
+ because they work at a coarser granularity of 256 bytes, so
+ objects with an "on" mark should not be moved too close to
+ objects with an "off" mark and vice-versa.
+ Then we must trace (= look inside) some objects outside the
+ nursery, and fix any pointer found that goes to a nursery object.
+ This tracing itself needs to be done either once or NB_SEGMENTS
+ times, depending on whether the object is fully in shared pages
+ or not. We assume that 'stmcb_size_rounded_up' produce the same
+ results on all copies (i.e. don't depend on modifiable
+ information).
+ */
- fprintf(stderr, "minor_collection\n");
- abort(); //...;
+ //check_gcpage_still_shared();
+ collect_roots_in_nursery();
+
+ long i;
+ for (i = 0; i < NB_SEGMENTS; i++) {
+ struct stm_priv_segment_info_s *other_pseg = get_priv_segment(i);
+ collect_oldrefs_to_nursery(other_pseg->old_objects_pointing_to_young);
+ }
+
+ collect_oldrefs_to_nursery(old_objects_pointing_to_young);
reset_nursery();
}
diff --git a/c7/stm/pages.c b/c7/stm/pages.c
--- a/c7/stm/pages.c
+++ b/c7/stm/pages.c
@@ -205,3 +205,22 @@
list_clear(STM_PSEGMENT->creation_markers);
}
+
+static bool is_in_shared_pages(object_t *obj)
+{
+ uintptr_t first_page = ((uintptr_t)obj) / 4096UL;
+
+ if ((obj->stm_flags & GCFLAG_SMALL_UNIFORM) != 0)
+ return (flag_page_private[first_page] == SHARED_PAGE);
+
+ ssize_t obj_size = stmcb_size_rounded_up(
+ (struct object_s *)REAL_ADDRESS(stm_object_pages, obj));
+
+ uintptr_t end_page = (((uintptr_t)obj) + obj_size + 4095) / 4096UL;
+ /* that's the page *following* the last page with the object */
+
+ while (first_page < end_page)
+ if (flag_page_private[first_page++] != SHARED_PAGE)
+ return false;
+ return true;
+}
diff --git a/c7/stm/pages.h b/c7/stm/pages.h
--- a/c7/stm/pages.h
+++ b/c7/stm/pages.h
@@ -36,3 +36,4 @@
static void set_single_creation_marker(stm_char *p, int newvalue);
static void reset_all_creation_markers(void);
static void reset_all_creation_markers_and_push_created_data(void);
+static bool is_in_shared_pages(object_t *obj);
diff --git a/c7/stm/setup.c b/c7/stm/setup.c
--- a/c7/stm/setup.c
+++ b/c7/stm/setup.c
@@ -64,7 +64,7 @@
pr->write_lock_num = i + 1;
pr->pub.segment_num = i;
pr->pub.segment_base = segment_base;
- pr->old_objects_to_trace = list_create();
+ pr->old_objects_pointing_to_young = list_create();
pr->modified_objects = list_create();
pr->creation_markers = list_create();
}
@@ -96,7 +96,9 @@
long i;
for (i = 0; i < NB_SEGMENTS; i++) {
struct stm_priv_segment_info_s *pr = get_priv_segment(i);
- list_free(pr->old_objects_to_trace);
+ list_free(pr->old_objects_pointing_to_young);
+ list_free(pr->modified_objects);
+ list_free(pr->creation_markers);
}
munmap(stm_object_pages, TOTAL_MEMORY);
@@ -107,6 +109,7 @@
teardown_core();
teardown_sync();
teardown_gcpage();
+ teardown_nursery();
}
void stm_register_thread_local(stm_thread_local_t *tl)
More information about the pypy-commit
mailing list