⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mark.c

📁 Boost provides free peer-reviewed portable C++ source libraries. We emphasize libraries that work
💻 C
📖 第 1 页 / 共 4 页
字号:
      		/* Ensures visibility of previously written stack contents. */    }    GC_release_mark_lock();    GC_notify_all_marker();}/* Mark from the local mark stack.		*//* On return, the local mark stack is empty.	*//* But this may be achieved by copying the	*//* local mark stack back into the global one.	*/void GC_do_local_mark(mse *local_mark_stack, mse *local_top){    unsigned n;#   define N_LOCAL_ITERS 1#   ifdef GC_ASSERTIONS      /* Make sure we don't hold mark lock. */	GC_acquire_mark_lock();	GC_release_mark_lock();#   endif    for (;;) {        for (n = 0; n < N_LOCAL_ITERS; ++n) {	    local_top = GC_mark_from(local_top, local_mark_stack,				     local_mark_stack + LOCAL_MARK_STACK_SIZE);	    if (local_top < local_mark_stack) return;	    if (local_top - local_mark_stack >= LOCAL_MARK_STACK_SIZE/2) {	 	GC_return_mark_stack(local_mark_stack, local_top);		return;	    }	}	if ((mse *)AO_load((volatile AO_t *)(&GC_mark_stack_top))	    < (mse *)AO_load(&GC_first_nonempty)	    && GC_active_count < GC_helper_count	    && local_top > local_mark_stack + 1) {	    /* Try to share the load, since the main stack is empty,	*/	    /* and helper threads are waiting for a refill.		*/	    /* The entries near the bottom of the stack are likely	*/	    /* to require more work.  Thus we return those, eventhough	*/	    /* it's harder.						*/ 	    mse * new_bottom = local_mark_stack				+ (local_top - local_mark_stack)/2;	    GC_ASSERT(new_bottom > local_mark_stack		      && new_bottom < local_top);	    GC_return_mark_stack(local_mark_stack, new_bottom - 1);	    memmove(local_mark_stack, new_bottom,		    (local_top - new_bottom + 1) * sizeof(mse));	    local_top -= (new_bottom - local_mark_stack);	}    }}#define ENTRIES_TO_GET 5long GC_markers = 2;		/* Normally changed by thread-library-	*/				/* -specific code.			*//* Mark using the local mark stack until the global mark stack is empty	*//* and there are no active workers. Update GC_first_nonempty to reflect	*//* progress.								*//* Caller does not hold mark lock.					*//* Caller has already incremented GC_helper_count.  We decrement it,	*//* and maintain GC_active_count.					*/void GC_mark_local(mse *local_mark_stack, int id){    mse * my_first_nonempty;    GC_acquire_mark_lock();    GC_active_count++;    my_first_nonempty = (mse *)AO_load(&GC_first_nonempty);    GC_ASSERT((mse *)AO_load(&GC_first_nonempty) >= GC_mark_stack && 	      (mse *)AO_load(&GC_first_nonempty) <=	      (mse *)AO_load((volatile AO_t *)(&GC_mark_stack_top)) + 1);    if (GC_print_stats == VERBOSE)	GC_log_printf("Starting mark helper %lu\n", (unsigned long)id);    GC_release_mark_lock();    for (;;) {  	size_t n_on_stack;        size_t n_to_get;	mse * my_top;	mse * local_top;        mse * global_first_nonempty = (mse *)AO_load(&GC_first_nonempty);    	GC_ASSERT(my_first_nonempty >= GC_mark_stack && 		  my_first_nonempty <=		  (mse *)AO_load((volatile AO_t *)(&GC_mark_stack_top))  + 1);    	GC_ASSERT(global_first_nonempty >= GC_mark_stack && 		  global_first_nonempty <= 		  (mse *)AO_load((volatile AO_t *)(&GC_mark_stack_top))  + 1);	if (my_first_nonempty < global_first_nonempty) {	    my_first_nonempty = global_first_nonempty;        } else if (global_first_nonempty < my_first_nonempty) {	    AO_compare_and_swap(&GC_first_nonempty, 				(AO_t) global_first_nonempty,				(AO_t) my_first_nonempty);	    /* If this fails, we just go ahead, without updating	*/	    /* GC_first_nonempty.					*/	}	/* Perhaps we should also update GC_first_nonempty, if it */	/* is less.  But that would require using atomic updates. */	my_top = (mse *)AO_load_acquire((volatile AO_t *)(&GC_mark_stack_top));	n_on_stack = my_top - my_first_nonempty + 1;        if (0 == n_on_stack) {	    GC_acquire_mark_lock();            my_top = GC_mark_stack_top;	    	/* Asynchronous modification impossible here, 	*/	        /* since we hold mark lock. 			*/            n_on_stack = my_top - my_first_nonempty + 1;	    if (0 == n_on_stack) {		GC_active_count--;		GC_ASSERT(GC_active_count <= GC_helper_count);		/* Other markers may redeposit objects	*/		/* on the stack.				*/		if (0 == GC_active_count) GC_notify_all_marker();		while (GC_active_count > 0		       && (mse *)AO_load(&GC_first_nonempty)		          > GC_mark_stack_top) {		    /* We will be notified if either GC_active_count	*/		    /* reaches zero, or if more objects are pushed on	*/		    /* the global mark stack.				*/		    GC_wait_marker();		}		if (GC_active_count == 0 &&		    (mse *)AO_load(&GC_first_nonempty) > GC_mark_stack_top) { 		    GC_bool need_to_notify = FALSE;		    /* The above conditions can't be falsified while we	*/		    /* hold the mark lock, since neither 		*/		    /* GC_active_count nor GC_mark_stack_top can	*/		    /* change.  GC_first_nonempty can only be		*/		    /* incremented asynchronously.  Thus we know that	*/		    /* both conditions actually held simultaneously.	*/		    GC_helper_count--;		    if (0 == GC_helper_count) need_to_notify = TRUE;		    if (GC_print_stats == VERBOSE)		      GC_log_printf(		        "Finished mark helper %lu\n", (unsigned long)id);		    GC_release_mark_lock();		    if (need_to_notify) GC_notify_all_marker();		    return;		}		/* else there's something on the stack again, or	*/		/* another helper may push something.			*/		GC_active_count++;	        GC_ASSERT(GC_active_count > 0);		GC_release_mark_lock();		continue;	    } else {		GC_release_mark_lock();	    }	}	n_to_get = ENTRIES_TO_GET;	if (n_on_stack < 2 * ENTRIES_TO_GET) n_to_get = 1;	local_top = GC_steal_mark_stack(my_first_nonempty, my_top,					local_mark_stack, n_to_get,				        &my_first_nonempty);        GC_ASSERT(my_first_nonempty >= GC_mark_stack && 	          my_first_nonempty <=		    (mse *)AO_load((volatile AO_t *)(&GC_mark_stack_top)) + 1);	GC_do_local_mark(local_mark_stack, local_top);    }}/* Perform Parallel mark.			*//* We hold the GC lock, not the mark lock.	*//* Currently runs until the mark stack is	*//* empty.					*/void GC_do_parallel_mark(){    mse local_mark_stack[LOCAL_MARK_STACK_SIZE];    GC_acquire_mark_lock();    GC_ASSERT(I_HOLD_LOCK());    /* This could be a GC_ASSERT, but it seems safer to keep it on	*/    /* all the time, especially since it's cheap.			*/    if (GC_help_wanted || GC_active_count != 0 || GC_helper_count != 0)	ABORT("Tried to start parallel mark in bad state");    if (GC_print_stats == VERBOSE)	GC_log_printf("Starting marking for mark phase number %lu\n",		   (unsigned long)GC_mark_no);    GC_first_nonempty = (AO_t)GC_mark_stack;    GC_active_count = 0;    GC_helper_count = 1;    GC_help_wanted = TRUE;    GC_release_mark_lock();    GC_notify_all_marker();	/* Wake up potential helpers.	*/    GC_mark_local(local_mark_stack, 0);    GC_acquire_mark_lock();    GC_help_wanted = FALSE;    /* Done; clean up.	*/    while (GC_helper_count > 0) GC_wait_marker();    /* GC_helper_count cannot be incremented while GC_help_wanted == FALSE */    if (GC_print_stats == VERBOSE)	GC_log_printf(	    "Finished marking for mark phase number %lu\n",	    (unsigned long)GC_mark_no);    GC_mark_no++;    GC_release_mark_lock();    GC_notify_all_marker();}/* Try to help out the marker, if it's running.	        *//* We do not hold the GC lock, but the requestor does.	*/void GC_help_marker(word my_mark_no){    mse local_mark_stack[LOCAL_MARK_STACK_SIZE];    unsigned my_id;    if (!GC_parallel) return;    GC_acquire_mark_lock();    while (GC_mark_no < my_mark_no           || (!GC_help_wanted && GC_mark_no == my_mark_no)) {      GC_wait_marker();    }    my_id = GC_helper_count;    if (GC_mark_no != my_mark_no || my_id >= GC_markers) {      /* Second test is useful only if original threads can also	*/      /* act as helpers.  Under Linux they can't.			*/      GC_release_mark_lock();      return;    }    GC_helper_count = my_id + 1;    GC_release_mark_lock();    GC_mark_local(local_mark_stack, my_id);    /* GC_mark_local decrements GC_helper_count. */}#endif /* PARALLEL_MARK *//* Allocate or reallocate space for mark stack of size n entries.  *//* May silently fail.						   */static void alloc_mark_stack(size_t n){    mse * new_stack = (mse *)GC_scratch_alloc(n * sizeof(struct GC_ms_entry));#   ifdef GWW_VDB      /* Don't recycle a stack segment obtained with the wrong flags. 	*/      /* Win32 GetWriteWatch requires the right kind of memory.		*/      static GC_bool GC_incremental_at_stack_alloc = 0;      GC_bool recycle_old = (!GC_incremental || GC_incremental_at_stack_alloc);      GC_incremental_at_stack_alloc = GC_incremental;#   else#     define recycle_old 1#   endif        GC_mark_stack_too_small = FALSE;    if (GC_mark_stack_size != 0) {        if (new_stack != 0) {	  if (recycle_old) {            /* Recycle old space */              size_t page_offset = (word)GC_mark_stack & (GC_page_size - 1);              size_t size = GC_mark_stack_size * sizeof(struct GC_ms_entry);	      size_t displ = 0;          	      if (0 != page_offset) displ = GC_page_size - page_offset;	      size = (size - displ) & ~(GC_page_size - 1);	      if (size > 0) {	        GC_add_to_heap((struct hblk *)	      			((word)GC_mark_stack + displ), (word)size);	      }	  }          GC_mark_stack = new_stack;          GC_mark_stack_size = n;	  GC_mark_stack_limit = new_stack + n;	  if (GC_print_stats) {	      GC_log_printf("Grew mark stack to %lu frames\n",		    	    (unsigned long) GC_mark_stack_size);	  }        } else {	  if (GC_print_stats) {	      GC_log_printf("Failed to grow mark stack to %lu frames\n",		    	    (unsigned long) n);	  }        }    } else {        if (new_stack == 0) {            GC_err_printf("No space for mark stack\n");            EXIT();        }        GC_mark_stack = new_stack;        GC_mark_stack_size = n;	GC_mark_stack_limit = new_stack + n;    }    GC_mark_stack_top = GC_mark_stack-1;}void GC_mark_init(){    alloc_mark_stack(INITIAL_MARK_STACK_SIZE);}/* * Push all locations between b and t onto the mark stack. * b is the first location to be checked. t is one past the last * location to be checked. * Should only be used if there is no possibility of mark stack * overflow. */void GC_push_all(ptr_t bottom, ptr_t top){    register word length;        bottom = (ptr_t)(((word) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));    top = (ptr_t)(((word) top) & ~(ALIGNMENT-1));    if (top == 0 || bottom == top) return;    GC_mark_stack_top++;    if (GC_mark_stack_top >= GC_mark_stack_limit) {	ABORT("unexpected mark stack overflow");    }    length = top - bottom;#   if GC_DS_TAGS > ALIGNMENT - 1	length += GC_DS_TAGS;	length &= ~GC_DS_TAGS;#   endif    GC_mark_stack_top -> mse_start = bottom;    GC_mark_stack_top -> mse_descr = length;}/* * Analogous to the above, but push only those pages h with dirty_fn(h) != 0. * We use push_fn to actually push the block. * Used both to selectively push dirty pages, or to push a block * in piecemeal fashion, to allow for more marking concurrency. * Will not overflow mark stack if push_fn pushes a small fixed number * of entries.  (This is invoked only if push_fn pushes a single entry, * or if it marks each object before pushing it, thus ensuring progress * in the event of a stack overflow.) */void GC_push_selected(ptr_t bottom, ptr_t top,		      int (*dirty_fn) (struct hblk *),		      void (*push_fn) (ptr_t, ptr_t)){    struct hblk * h;    bottom = (ptr_t)(((word) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));    top = (ptr_t)(((word) top) & ~(ALIGNMENT-1));    if (top == 0 || bottom == top) return;    h = HBLKPTR(bottom + HBLKSIZE);    if (top <= (ptr_t) h) {  	if ((*dirty_fn)(h-1)) {	    (*push_fn)(bottom, top);	}	return;    }    if ((*dirty_fn)(h-1)) {        (*push_fn)(bottom, (ptr_t)h);    }    while ((ptr_t)(h+1) <= top) {	if ((*dirty_fn)(h)) {	    if ((word)(GC_mark_stack_top - GC_mark_stack)		> 3 * GC_mark_stack_size / 4) {	 	/* Danger of mark stack overflow */		(*push_fn)((ptr_t)h, top);		return;	    } else {		(*push_fn)((ptr_t)h, (ptr_t)(h+1));	    }	}	h++;    }    if ((ptr_t)h != top) {	if ((*dirty_fn)(h)) {            (*push_fn)((ptr_t)h, top);        }    }    if (GC_mark_stack_top >= GC_mark_stack_limit) {        ABORT("unexpected mark stack overflow");    }}# ifndef SMALL_CONFIG#ifdef PARALLEL_MARK    /* Break up root sections into page size chunks to better spread 	*/    /* out work.							*/    GC_bool GC_true_func(struct hblk *h) { return TRUE; }#   define GC_PUSH_ALL(b,t) GC_push_selected(b,t,GC_true_func,GC_push_all);#else#   define GC_PUSH_ALL(b,t) GC_push_all(b,t);#endifvoid GC_push_conditional(ptr_t bottom, ptr_t top, GC_bool all){    if (all) {      if (GC_dirty_maintained) {#	ifdef PROC_VDB	    /* Pages that were never dirtied cannot contain pointers	*/	    GC_push_selected(bottom, top, GC_page_was_ever_dirty, GC_push_all);#	else	    GC_push_all(bottom, top);#	endif      } else {      	GC_push_all(bottom, top);      }    } else {	GC_push_selected(bottom, top, GC_page_was_dirty, GC_push_all);    }}#endif# if defined(MSWIN32) || defined(MSWINCE)  void __cdecl GC_push_one(word p)# else  void GC_push_one(word p)# endif{    GC_PUSH_ONE_STACK((ptr_t)p, MARKED_FROM_REGISTER);}struct GC_ms_entry *GC_mark_and_push(void *obj,				     mse *mark_stack_ptr,				     mse *mark_stack_limit,				     void **src){    hdr * hhdr;    PREFETCH(obj);    GET_HDR(obj, hhdr);    if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr),FALSE)) {      if (GC_all_interior_pointers) {	hhdr = GC_find_header(GC_base(obj));	if (hhdr == 0) {          GC_ADD_TO_BLACK_LIST_NORMAL(obj, src);	  return mark_stack_ptr;	}      } else {        GC_ADD_TO_BLACK_LIST_NORMAL(obj, src);	return mark_stack_ptr;      }    }    if (EXPECT(HBLK_IS_FREE(hhdr),0)) {	GC_ADD_TO_BLACK_LIST_NORMAL(obj, src);	return mark_stack_ptr;    }    PUSH_CONTENTS_HDR(obj, mark_stack_ptr /* modified */, mark_stack_limit,		      src, was_marked, hhdr, TRUE); was_marked:    return mark_stack_ptr;}/* Mark and push (i.e. gray) a single object p onto the main	*//* mark stack.  Consider p to be valid if it is an interior	*//* pointer.							*//* The object p has passed a preliminary pointer validity	*//* test, but we do not definitely know whether it is valid.	*//* Mark bits are NOT atomically updated.  Thus this must be the	*//* only thread setting them.					*/# if defined(PRINT_BLACK_LIST) || defined(KEEP_BACK_PTRS)    void GC_mark_and_push_stack(ptr_t p, ptr_t source)# else    void GC_mark_and_push_stack(ptr_t p)#   define source 0# endif{    hdr * hhdr;     ptr_t r = p;      PREFETCH(p);    GET_HDR(p, hhdr);    if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr),FALSE)) {        if (hhdr != 0) {          r = GC_base(p);	  hhdr = HDR(r);	}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -