📄 mark.c
字号:
/* But this may be achieved by copying the *//* local mark stack back into the global one. */void GC_do_local_mark(mse *local_mark_stack, mse *local_top){ unsigned n;# define N_LOCAL_ITERS 1# ifdef GC_ASSERTIONS /* Make sure we don't hold mark lock. */ GC_acquire_mark_lock(); GC_release_mark_lock();# endif for (;;) { for (n = 0; n < N_LOCAL_ITERS; ++n) { local_top = GC_mark_from(local_top, local_mark_stack, local_mark_stack + LOCAL_MARK_STACK_SIZE); if (local_top < local_mark_stack) return; if (local_top - local_mark_stack >= LOCAL_MARK_STACK_SIZE/2) { GC_return_mark_stack(local_mark_stack, local_top); return; } } if (GC_mark_stack_top < GC_first_nonempty && GC_active_count < GC_helper_count && local_top > local_mark_stack + 1) { /* Try to share the load, since the main stack is empty, */ /* and helper threads are waiting for a refill. */ /* The entries near the bottom of the stack are likely */ /* to require more work. Thus we return those, eventhough */ /* it's harder. */ mse * p; mse * new_bottom = local_mark_stack + (local_top - local_mark_stack)/2; GC_ASSERT(new_bottom > local_mark_stack && new_bottom < local_top); GC_return_mark_stack(local_mark_stack, new_bottom - 1); memmove(local_mark_stack, new_bottom, (local_top - new_bottom + 1) * sizeof(mse)); local_top -= (new_bottom - local_mark_stack); } }}#define ENTRIES_TO_GET 5long GC_markers = 2; /* Normally changed by thread-library- */ /* -specific code. *//* Mark using the local mark stack until the global mark stack is empty *//* and there are no active workers. Update GC_first_nonempty to reflect *//* progress. *//* Caller does not hold mark lock. *//* Caller has already incremented GC_helper_count. We decrement it, *//* and maintain GC_active_count. */void GC_mark_local(mse *local_mark_stack, int id){ mse * my_first_nonempty; GC_acquire_mark_lock(); GC_active_count++; my_first_nonempty = GC_first_nonempty; GC_ASSERT(GC_first_nonempty >= GC_mark_stack && GC_first_nonempty <= GC_mark_stack_top + 1);# ifdef PRINTSTATS GC_printf1("Starting mark helper %lu\n", (unsigned long)id);# endif GC_release_mark_lock(); for (;;) { size_t n_on_stack; size_t n_to_get; mse *next; mse * my_top; mse * local_top; mse * global_first_nonempty = GC_first_nonempty; GC_ASSERT(my_first_nonempty >= GC_mark_stack && my_first_nonempty <= GC_mark_stack_top + 1); GC_ASSERT(global_first_nonempty >= GC_mark_stack && global_first_nonempty <= GC_mark_stack_top + 1); if (my_first_nonempty < global_first_nonempty) { my_first_nonempty = global_first_nonempty; } else if (global_first_nonempty < my_first_nonempty) { GC_compare_and_exchange((word *)(&GC_first_nonempty), (word) global_first_nonempty, (word) my_first_nonempty); /* If this fails, we just go ahead, without updating */ /* GC_first_nonempty. */ } /* Perhaps we should also update GC_first_nonempty, if it */ /* is less. But that would require using atomic updates. */ my_top = GC_mark_stack_top; n_on_stack = my_top - my_first_nonempty + 1; if (0 == n_on_stack) { GC_acquire_mark_lock(); my_top = GC_mark_stack_top; n_on_stack = my_top - my_first_nonempty + 1; if (0 == n_on_stack) { GC_active_count--; GC_ASSERT(GC_active_count <= GC_helper_count); /* Other markers may redeposit objects */ /* on the stack. */ if (0 == GC_active_count) GC_notify_all_marker(); while (GC_active_count > 0 && GC_first_nonempty > GC_mark_stack_top) { /* We will be notified if either GC_active_count */ /* reaches zero, or if more objects are pushed on */ /* the global mark stack. */ GC_wait_marker(); } if (GC_active_count == 0 && GC_first_nonempty > GC_mark_stack_top) { GC_bool need_to_notify = FALSE; /* The above conditions can't be falsified while we */ /* hold the mark lock, since neither */ /* GC_active_count nor GC_mark_stack_top can */ /* change. GC_first_nonempty can only be */ /* incremented asynchronously. Thus we know that */ /* both conditions actually held simultaneously. */ GC_helper_count--; if (0 == GC_helper_count) need_to_notify = TRUE;# ifdef PRINTSTATS GC_printf1( "Finished mark helper %lu\n", (unsigned long)id);# endif GC_release_mark_lock(); if (need_to_notify) GC_notify_all_marker(); return; } /* else there's something on the stack again, or */ /* another helper may push something. */ GC_active_count++; GC_ASSERT(GC_active_count > 0); GC_release_mark_lock(); continue; } else { GC_release_mark_lock(); } } n_to_get = ENTRIES_TO_GET; if (n_on_stack < 2 * ENTRIES_TO_GET) n_to_get = 1; local_top = GC_steal_mark_stack(my_first_nonempty, my_top, local_mark_stack, n_to_get, &my_first_nonempty); GC_ASSERT(my_first_nonempty >= GC_mark_stack && my_first_nonempty <= GC_mark_stack_top + 1); GC_do_local_mark(local_mark_stack, local_top); }}/* Perform Parallel mark. *//* We hold the GC lock, not the mark lock. *//* Currently runs until the mark stack is *//* empty. */void GC_do_parallel_mark(){ mse local_mark_stack[LOCAL_MARK_STACK_SIZE]; mse * local_top; mse * my_top; GC_acquire_mark_lock(); GC_ASSERT(I_HOLD_LOCK()); /* This could be a GC_ASSERT, but it seems safer to keep it on */ /* all the time, especially since it's cheap. */ if (GC_help_wanted || GC_active_count != 0 || GC_helper_count != 0) ABORT("Tried to start parallel mark in bad state");# ifdef PRINTSTATS GC_printf1("Starting marking for mark phase number %lu\n", (unsigned long)GC_mark_no);# endif GC_first_nonempty = GC_mark_stack; GC_active_count = 0; GC_helper_count = 1; GC_help_wanted = TRUE; GC_release_mark_lock(); GC_notify_all_marker(); /* Wake up potential helpers. */ GC_mark_local(local_mark_stack, 0); GC_acquire_mark_lock(); GC_help_wanted = FALSE; /* Done; clean up. */ while (GC_helper_count > 0) GC_wait_marker(); /* GC_helper_count cannot be incremented while GC_help_wanted == FALSE */# ifdef PRINTSTATS GC_printf1( "Finished marking for mark phase number %lu\n", (unsigned long)GC_mark_no);# endif GC_mark_no++; GC_release_mark_lock(); GC_notify_all_marker();}/* Try to help out the marker, if it's running. *//* We do not hold the GC lock, but the requestor does. */void GC_help_marker(word my_mark_no){ mse local_mark_stack[LOCAL_MARK_STACK_SIZE]; unsigned my_id; mse * my_first_nonempty; if (!GC_parallel) return; GC_acquire_mark_lock(); while (GC_mark_no < my_mark_no || !GC_help_wanted && GC_mark_no == my_mark_no) { GC_wait_marker(); } my_id = GC_helper_count; if (GC_mark_no != my_mark_no || my_id >= GC_markers) { /* Second test is useful only if original threads can also */ /* act as helpers. Under Linux they can't. */ GC_release_mark_lock(); return; } GC_helper_count = my_id + 1; GC_release_mark_lock(); GC_mark_local(local_mark_stack, my_id); /* GC_mark_local decrements GC_helper_count. */}#endif /* PARALLEL_MARK *//* Allocate or reallocate space for mark stack of size s words *//* May silently fail. */static void alloc_mark_stack(n)word n;{ mse * new_stack = (mse *)GC_scratch_alloc(n * sizeof(struct GC_ms_entry)); GC_mark_stack_too_small = FALSE; if (GC_mark_stack_size != 0) { if (new_stack != 0) { word displ = (word)GC_mark_stack & (GC_page_size - 1); signed_word size = GC_mark_stack_size * sizeof(struct GC_ms_entry); /* Recycle old space */ if (0 != displ) displ = GC_page_size - displ; size = (size - displ) & ~(GC_page_size - 1); if (size > 0) { GC_add_to_heap((struct hblk *) ((word)GC_mark_stack + displ), (word)size); } GC_mark_stack = new_stack; GC_mark_stack_size = n; GC_mark_stack_limit = new_stack + n;# ifdef CONDPRINT if (GC_print_stats) { GC_printf1("Grew mark stack to %lu frames\n", (unsigned long) GC_mark_stack_size); }# endif } else {# ifdef CONDPRINT if (GC_print_stats) { GC_printf1("Failed to grow mark stack to %lu frames\n", (unsigned long) n); }# endif } } else { if (new_stack == 0) { GC_err_printf0("No space for mark stack\n"); EXIT(); } GC_mark_stack = new_stack; GC_mark_stack_size = n; GC_mark_stack_limit = new_stack + n; } GC_mark_stack_top = GC_mark_stack-1;}void GC_mark_init(){ alloc_mark_stack(INITIAL_MARK_STACK_SIZE);}/* * Push all locations between b and t onto the mark stack. * b is the first location to be checked. t is one past the last * location to be checked. * Should only be used if there is no possibility of mark stack * overflow. */void GC_push_all(bottom, top)ptr_t bottom;ptr_t top;{ register word length; bottom = (ptr_t)(((word) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1)); top = (ptr_t)(((word) top) & ~(ALIGNMENT-1)); if (top == 0 || bottom == top) return; GC_mark_stack_top++; if (GC_mark_stack_top >= GC_mark_stack_limit) { ABORT("unexpected mark stack overflow"); } length = top - bottom;# if GC_DS_TAGS > ALIGNMENT - 1 length += GC_DS_TAGS; length &= ~GC_DS_TAGS;# endif GC_mark_stack_top -> mse_start = (word *)bottom; GC_mark_stack_top -> mse_descr = length;}/* * Analogous to the above, but push only those pages h with dirty_fn(h) != 0. * We use push_fn to actually push the block. * Used both to selectively push dirty pages, or to push a block * in piecemeal fashion, to allow for more marking concurrency. * Will not overflow mark stack if push_fn pushes a small fixed number * of entries. (This is invoked only if push_fn pushes a single entry, * or if it marks each object before pushing it, thus ensuring progress * in the event of a stack overflow.) */void GC_push_selected(bottom, top, dirty_fn, push_fn)ptr_t bottom;ptr_t top;int (*dirty_fn) GC_PROTO((struct hblk * h));void (*push_fn) GC_PROTO((ptr_t bottom, ptr_t top));{ register struct hblk * h; bottom = (ptr_t)(((long) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1)); top = (ptr_t)(((long) top) & ~(ALIGNMENT-1)); if (top == 0 || bottom == top) return; h = HBLKPTR(bottom + HBLKSIZE); if (top <= (ptr_t) h) { if ((*dirty_fn)(h-1)) { (*push_fn)(bottom, top); } return; } if ((*dirty_fn)(h-1)) { (*push_fn)(bottom, (ptr_t)h); } while ((ptr_t)(h+1) <= top) { if ((*dirty_fn)(h)) { if ((word)(GC_mark_stack_top - GC_mark_stack) > 3 * GC_mark_stack_size / 4) { /* Danger of mark stack overflow */ (*push_fn)((ptr_t)h, top); return; } else { (*push_fn)((ptr_t)h, (ptr_t)(h+1)); } } h++; } if ((ptr_t)h != top) { if ((*dirty_fn)(h)) { (*push_fn)((ptr_t)h, top); } } if (GC_mark_stack_top >= GC_mark_stack_limit) { ABORT("unexpected mark stack overflow"); }}# ifndef SMALL_CONFIG#ifdef PARALLEL_MARK /* Break up root sections into page size chunks to better spread */ /* out work. */ GC_bool GC_true_func(struct hblk *h) { return TRUE; }# define GC_PUSH_ALL(b,t) GC_push_selected(b,t,GC_true_func,GC_push_all);#else# define GC_PUSH_ALL(b,t) GC_push_all(b,t);#endifvoid GC_push_conditional(bottom, top, all)ptr_t bottom;ptr_t top;int all;{ if (all) { if (GC_dirty_maintained) {# ifdef PROC_VDB /* Pages that were never dirtied cannot contain pointers */ GC_push_selected(bottom, top, GC_page_was_ever_dirty, GC_push_all);# else GC_push_all(bottom, top);# endif } else { GC_push_all(bottom, top); } } else { GC_push_selected(bottom, top, GC_page_was_dirty, GC_push_all); }}#endif# if defined(MSWIN32) || defined(MSWINCE) void __cdecl GC_push_one(p)# else void GC_push_one(p)# endifword p;{ GC_PUSH_ONE_STACK(p, MARKED_FROM_REGISTER);}struct GC_ms_entry *GC_mark_and_push(obj, mark_stack_ptr, mark_stack_limit, src)GC_PTR obj;struct GC_ms_entry * mark_stack_ptr;struct GC_ms_entry * mark_stack_limit;GC_PTR *src;{ PREFETCH(obj); PUSH_CONTENTS(obj, mark_stack_ptr /* modified */, mark_stack_limit, src, was_marked /* internally generated exit label */); return mark_stack_ptr;}# ifdef __STDC__# define BASE(p) (word)GC_base((void *)(p))# else# define BASE(p) (word)GC_base((char *)(p))# endif/* Mark and push (i.e. gray) a single object p onto the main *//* mark stack. Consider p to be valid if it is an interior *//* pointer. *//* The object p has passed a preliminary pointer validity *//* test, but we do not definitely know whether it is valid. *//* Mark bits are NOT atomically updated. Thus this must be the *//* only thread setting them. */# if defined(PRINT_BLACK_LIST) || defined(KEEP_BACK_PTRS) void GC_mark_and_push_stack(p, source) ptr_t source;# else void GC_mark_and_push_stack(p)# define source 0# endifregister word p;{ register word r; register hdr * hhdr; register int displ; GET_HDR(p, hhdr); if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) { if (hhdr != 0) { r = BASE(p); hhdr = HDR(r); displ = BYTES_TO_WORDS(HBLKDISPL(r)); } } else { register map_entry_type map_entry; displ = HBLKDISPL(p); map_entry = MAP_ENTRY((hhdr -> hb_map), displ); if (map_entry >= MAX_OFFSET) {
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -