⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mark.c

📁 Boost provides free peer-reviewed portable C++ source libraries. We emphasize libraries that work
💻 C
📖 第 1 页 / 共 4 页
字号:
        if (hhdr == 0) {            GC_ADD_TO_BLACK_LIST_STACK(p, source);	    return;	}    }    if (EXPECT(HBLK_IS_FREE(hhdr),0)) {	GC_ADD_TO_BLACK_LIST_NORMAL(p, src);	return;    }#   if defined(MANUAL_VDB) && defined(THREADS)      /* Pointer is on the stack.  We may have dirtied the object	*/      /* it points to, but not yet have called GC_dirty();	*/      GC_dirty(p);	/* Implicitly affects entire object.	*/#   endif    PUSH_CONTENTS_HDR(r, GC_mark_stack_top, GC_mark_stack_limit,		      source, mark_and_push_exit, hhdr, FALSE);  mark_and_push_exit: ;    /* We silently ignore pointers to near the end of a block,	*/    /* which is very mildly suboptimal.				*/    /* FIXME: We should probably add a header word to address	*/    /* this.							*/}# ifdef TRACE_BUF# define TRACE_ENTRIES 1000struct trace_entry {    char * kind;    word gc_no;    word bytes_allocd;    word arg1;    word arg2;} GC_trace_buf[TRACE_ENTRIES];int GC_trace_buf_ptr = 0;void GC_add_trace_entry(char *kind, word arg1, word arg2){    GC_trace_buf[GC_trace_buf_ptr].kind = kind;    GC_trace_buf[GC_trace_buf_ptr].gc_no = GC_gc_no;    GC_trace_buf[GC_trace_buf_ptr].bytes_allocd = GC_bytes_allocd;    GC_trace_buf[GC_trace_buf_ptr].arg1 = arg1 ^ 0x80000000;    GC_trace_buf[GC_trace_buf_ptr].arg2 = arg2 ^ 0x80000000;    GC_trace_buf_ptr++;    if (GC_trace_buf_ptr >= TRACE_ENTRIES) GC_trace_buf_ptr = 0;}void GC_print_trace(word gc_no, GC_bool lock){    int i;    struct trace_entry *p;        if (lock) LOCK();    for (i = GC_trace_buf_ptr-1; i != GC_trace_buf_ptr; i--) {    	if (i < 0) i = TRACE_ENTRIES-1;    	p = GC_trace_buf + i;    	if (p -> gc_no < gc_no || p -> kind == 0) return;    	printf("Trace:%s (gc:%d,bytes:%d) 0x%X, 0x%X\n",    		p -> kind, p -> gc_no, p -> bytes_allocd,    		(p -> arg1) ^ 0x80000000, (p -> arg2) ^ 0x80000000);    }    printf("Trace incomplete\n");    if (lock) UNLOCK();}# endif /* TRACE_BUF *//* * A version of GC_push_all that treats all interior pointers as valid * and scans the entire region immediately, in case the contents * change. */void GC_push_all_eager(ptr_t bottom, ptr_t top){    word * b = (word *)(((word) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));    word * t = (word *)(((word) top) & ~(ALIGNMENT-1));    register word *p;    register ptr_t q;    register word *lim;    register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;    register ptr_t least_ha = GC_least_plausible_heap_addr;#   define GC_greatest_plausible_heap_addr greatest_ha#   define GC_least_plausible_heap_addr least_ha    if (top == 0) return;    /* check all pointers in range and push if they appear	*/    /* to be valid.						*/      lim = t - 1 /* longword */;      for (p = b; p <= lim; p = (word *)(((ptr_t)p) + ALIGNMENT)) {	q = (ptr_t)(*p);	GC_PUSH_ONE_STACK((ptr_t)q, p);      }#   undef GC_greatest_plausible_heap_addr#   undef GC_least_plausible_heap_addr}#ifndef THREADS/* * A version of GC_push_all that treats all interior pointers as valid * and scans part of the area immediately, to make sure that saved * register values are not lost. * Cold_gc_frame delimits the stack section that must be scanned * eagerly.  A zero value indicates that no eager scanning is needed. * We don't need to worry about the MANUAL_VDB case here, since this * is only called in the single-threaded case.  We assume that we * cannot collect between an assignment and the corresponding * GC_dirty() call. */void GC_push_all_stack_partially_eager(ptr_t bottom, ptr_t top,				       ptr_t cold_gc_frame){  if (!NEED_FIXUP_POINTER && GC_all_interior_pointers) {    /* Push the hot end of the stack eagerly, so that register values   */    /* saved inside GC frames are marked before they disappear.		*/    /* The rest of the marking can be deferred until later.		*/    if (0 == cold_gc_frame) {	GC_push_all_stack(bottom, top);	return;    }    GC_ASSERT(bottom <= cold_gc_frame && cold_gc_frame <= top);#   ifdef STACK_GROWS_DOWN	GC_push_all(cold_gc_frame - sizeof(ptr_t), top);	GC_push_all_eager(bottom, cold_gc_frame);#   else /* STACK_GROWS_UP */	GC_push_all(bottom, cold_gc_frame + sizeof(ptr_t));	GC_push_all_eager(cold_gc_frame, top);#   endif /* STACK_GROWS_UP */  } else {    GC_push_all_eager(bottom, top);  }# ifdef TRACE_BUF      GC_add_trace_entry("GC_push_all_stack", bottom, top);# endif}#endif /* !THREADS */void GC_push_all_stack(ptr_t bottom, ptr_t top){# if defined(THREADS) && defined(MPROTECT_VDB)    GC_push_all_eager(bottom, top);# else    if (!NEED_FIXUP_POINTER && GC_all_interior_pointers) {      GC_push_all(bottom, top);    } else {      GC_push_all_eager(bottom, top);    }# endif}#if !defined(SMALL_CONFIG) && !defined(USE_MARK_BYTES) && \    defined(MARK_BIT_PER_GRANULE)# if GC_GRANULE_WORDS == 1#   define USE_PUSH_MARKED_ACCELERATORS#   define PUSH_GRANULE(q) \		{ ptr_t qcontents = (ptr_t)((q)[0]); \	          GC_PUSH_ONE_HEAP(qcontents, (q)); }# elif GC_GRANULE_WORDS == 2#   define USE_PUSH_MARKED_ACCELERATORS#   define PUSH_GRANULE(q) \		{ ptr_t qcontents = (ptr_t)((q)[0]); \	          GC_PUSH_ONE_HEAP(qcontents, (q)); \		  qcontents = (ptr_t)((q)[1]); \	          GC_PUSH_ONE_HEAP(qcontents, (q)+1); }# elif GC_GRANULE_WORDS == 4#   define USE_PUSH_MARKED_ACCELERATORS#   define PUSH_GRANULE(q) \		{ ptr_t qcontents = (ptr_t)((q)[0]); \	          GC_PUSH_ONE_HEAP(qcontents, (q)); \		  qcontents = (ptr_t)((q)[1]); \	          GC_PUSH_ONE_HEAP(qcontents, (q)+1); \		  qcontents = (ptr_t)((q)[2]); \	          GC_PUSH_ONE_HEAP(qcontents, (q)+2); \		  qcontents = (ptr_t)((q)[3]); \	          GC_PUSH_ONE_HEAP(qcontents, (q)+3); }# endif#endif#ifdef USE_PUSH_MARKED_ACCELERATORS/* Push all objects reachable from marked objects in the given block *//* containing objects of size 1 granule.			     */void GC_push_marked1(struct hblk *h, hdr *hhdr){    word * mark_word_addr = &(hhdr->hb_marks[0]);    word *p;    word *plim;    word *q;    word mark_word;    /* Allow registers to be used for some frequently acccessed	*/    /* global variables.  Otherwise aliasing issues are likely	*/    /* to prevent that.						*/    ptr_t greatest_ha = GC_greatest_plausible_heap_addr;    ptr_t least_ha = GC_least_plausible_heap_addr;    mse * mark_stack_top = GC_mark_stack_top;    mse * mark_stack_limit = GC_mark_stack_limit;#   define GC_mark_stack_top mark_stack_top#   define GC_mark_stack_limit mark_stack_limit#   define GC_greatest_plausible_heap_addr greatest_ha#   define GC_least_plausible_heap_addr least_ha        p = (word *)(h->hb_body);    plim = (word *)(((word)h) + HBLKSIZE);    /* go through all words in block */	while( p < plim )  {	    mark_word = *mark_word_addr++;	    q = p;	    while(mark_word != 0) {	      if (mark_word & 1) {		  PUSH_GRANULE(q);	      }	      q += GC_GRANULE_WORDS;	      mark_word >>= 1;	    }	    p += WORDSZ*GC_GRANULE_WORDS;	}#   undef GC_greatest_plausible_heap_addr#   undef GC_least_plausible_heap_addr        #   undef GC_mark_stack_top#   undef GC_mark_stack_limit    GC_mark_stack_top = mark_stack_top;}#ifndef UNALIGNED/* Push all objects reachable from marked objects in the given block *//* of size 2 (granules) objects.				     */void GC_push_marked2(struct hblk *h, hdr *hhdr){    word * mark_word_addr = &(hhdr->hb_marks[0]);    word *p;    word *plim;    word *q;    word mark_word;    ptr_t greatest_ha = GC_greatest_plausible_heap_addr;    ptr_t least_ha = GC_least_plausible_heap_addr;    mse * mark_stack_top = GC_mark_stack_top;    mse * mark_stack_limit = GC_mark_stack_limit;#   define GC_mark_stack_top mark_stack_top#   define GC_mark_stack_limit mark_stack_limit#   define GC_greatest_plausible_heap_addr greatest_ha#   define GC_least_plausible_heap_addr least_ha        p = (word *)(h->hb_body);    plim = (word *)(((word)h) + HBLKSIZE);    /* go through all words in block */	while( p < plim )  {	    mark_word = *mark_word_addr++;	    q = p;	    while(mark_word != 0) {	      if (mark_word & 1) {		  PUSH_GRANULE(q);		  PUSH_GRANULE(q + GC_GRANULE_WORDS);	      }	      q += 2 * GC_GRANULE_WORDS;	      mark_word >>= 2;	    }	    p += WORDSZ*GC_GRANULE_WORDS;	}#   undef GC_greatest_plausible_heap_addr#   undef GC_least_plausible_heap_addr        #   undef GC_mark_stack_top#   undef GC_mark_stack_limit    GC_mark_stack_top = mark_stack_top;}# if GC_GRANULE_WORDS < 4/* Push all objects reachable from marked objects in the given block *//* of size 4 (granules) objects.				     *//* There is a risk of mark stack overflow here.  But we handle that. *//* And only unmarked objects get pushed, so it's not very likely.    */void GC_push_marked4(struct hblk *h, hdr *hhdr){    word * mark_word_addr = &(hhdr->hb_marks[0]);    word *p;    word *plim;    word *q;    word mark_word;    ptr_t greatest_ha = GC_greatest_plausible_heap_addr;    ptr_t least_ha = GC_least_plausible_heap_addr;    mse * mark_stack_top = GC_mark_stack_top;    mse * mark_stack_limit = GC_mark_stack_limit;#   define GC_mark_stack_top mark_stack_top#   define GC_mark_stack_limit mark_stack_limit#   define GC_greatest_plausible_heap_addr greatest_ha#   define GC_least_plausible_heap_addr least_ha        p = (word *)(h->hb_body);    plim = (word *)(((word)h) + HBLKSIZE);    /* go through all words in block */	while( p < plim )  {	    mark_word = *mark_word_addr++;	    q = p;	    while(mark_word != 0) {	      if (mark_word & 1) {		  PUSH_GRANULE(q);		  PUSH_GRANULE(q + GC_GRANULE_WORDS);		  PUSH_GRANULE(q + 2*GC_GRANULE_WORDS);		  PUSH_GRANULE(q + 3*GC_GRANULE_WORDS);	      }	      q += 4 * GC_GRANULE_WORDS;	      mark_word >>= 4;	    }	    p += WORDSZ*GC_GRANULE_WORDS;	}#   undef GC_greatest_plausible_heap_addr#   undef GC_least_plausible_heap_addr        #   undef GC_mark_stack_top#   undef GC_mark_stack_limit    GC_mark_stack_top = mark_stack_top;}#endif /* GC_GRANULE_WORDS < 4 */#endif /* UNALIGNED */#endif /* USE_PUSH_MARKED_ACCELERATORS *//* Push all objects reachable from marked objects in the given block */void GC_push_marked(struct hblk *h, hdr *hhdr){    size_t sz = hhdr -> hb_sz;    word descr = hhdr -> hb_descr;    ptr_t p;    word bit_no;    ptr_t lim;    mse * GC_mark_stack_top_reg;    mse * mark_stack_limit = GC_mark_stack_limit;        /* Some quick shortcuts: */	if ((0 | GC_DS_LENGTH) == descr) return;        if (GC_block_empty(hhdr)/* nothing marked */) return;    GC_n_rescuing_pages++;    GC_objects_are_marked = TRUE;    if (sz > MAXOBJBYTES) {        lim = h -> hb_body;    } else {        lim = (h + 1)->hb_body - sz;    }        switch(BYTES_TO_GRANULES(sz)) {#   if defined(USE_PUSH_MARKED_ACCELERATORS)     case 1:       GC_push_marked1(h, hhdr);       break;#    if !defined(UNALIGNED)       case 2:         GC_push_marked2(h, hhdr);         break;#     if GC_GRANULE_WORDS < 4       case 4:         GC_push_marked4(h, hhdr);         break;#     endif#    endif#   endif            default:      GC_mark_stack_top_reg = GC_mark_stack_top;      for (p = h -> hb_body, bit_no = 0; p <= lim;	   p += sz, bit_no += MARK_BIT_OFFSET(sz)) {         if (mark_bit_from_hdr(hhdr, bit_no)) {           /* Mark from fields inside the object */             PUSH_OBJ(p, hhdr, GC_mark_stack_top_reg, mark_stack_limit);         }      }      GC_mark_stack_top = GC_mark_stack_top_reg;    }}#ifndef SMALL_CONFIG/* Test whether any page in the given block is dirty	*/GC_bool GC_block_was_dirty(struct hblk *h, hdr *hhdr){    size_t sz = hhdr -> hb_sz;        if (sz <= MAXOBJBYTES) {         return(GC_page_was_dirty(h));    } else {    	 ptr_t p = (ptr_t)h;         while (p < (ptr_t)h + sz) {             if (GC_page_was_dirty((struct hblk *)p)) return(TRUE);             p += HBLKSIZE;         }         return(FALSE);    }}#endif /* SMALL_CONFIG *//* Similar to GC_push_next_marked, but return address of next block	*/struct hblk * GC_push_next_marked(struct hblk *h){    hdr * hhdr = HDR(h);        if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr), FALSE)) {      h = GC_next_used_block(h);      if (h == 0) return(0);      hhdr = GC_find_header((ptr_t)h);    }    GC_push_marked(h, hhdr);    return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));}#ifndef SMALL_CONFIG/* Identical to above, but mark only from dirty pages	*/struct hblk * GC_push_next_marked_dirty(struct hblk *h){    hdr * hhdr = HDR(h);        if (!GC_dirty_maintained) { ABORT("dirty bits not set up"); }    for (;;) {	if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr), FALSE)) {          h = GC_next_used_block(h);          if (h == 0) return(0);          hhdr = GC_find_header((ptr_t)h);	}#	ifdef STUBBORN_ALLOC          if (hhdr -> hb_obj_kind == STUBBORN) {            if (GC_page_was_changed(h) && GC_block_was_dirty(h, hhdr)) {                break;            }          } else {            if (GC_block_was_dirty(h, hhdr)) break;          }#	else	  if (GC_block_was_dirty(h, hhdr)) break;#	endif        h += OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);	hhdr = HDR(h);    }    GC_push_marked(h, hhdr);    return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));}#endif/* Similar to above, but for uncollectable pages.  Needed since we	*//* do not clear marks for such pages, even for full collections.	*/struct hblk * GC_push_next_marked_uncollectable(struct hblk *h){    hdr * hhdr = HDR(h);        for (;;) {	if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr), FALSE)) {          h = GC_next_used_block(h);          if (h == 0) return(0);          hhdr = GC_find_header((ptr_t)h);	}	if (hhdr -> hb_obj_kind == UNCOLLECTABLE) break;        h += OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);	hhdr = HDR(h);    }    GC_push_marked(h, hhdr);    return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -