📄 mark.c
字号:
if (map_entry == OFFSET_TOO_BIG || !GC_all_interior_pointers) { r = BASE(p); displ = BYTES_TO_WORDS(HBLKDISPL(r)); if (r == 0) hhdr = 0; } else { /* Offset invalid, but map reflects interior pointers */ hhdr = 0; } } else { displ = BYTES_TO_WORDS(displ); displ -= map_entry; r = (word)((word *)(HBLKPTR(p)) + displ); } } /* If hhdr != 0 then r == GC_base(p), only we did it faster. */ /* displ is the word index within the block. */ if (hhdr == 0) {# ifdef PRINT_BLACK_LIST GC_add_to_black_list_stack(p, source);# else GC_add_to_black_list_stack(p);# endif# undef source /* In case we had to define it. */ } else { if (!mark_bit_from_hdr(hhdr, displ)) { set_mark_bit_from_hdr(hhdr, displ); GC_STORE_BACK_PTR(source, (ptr_t)r); PUSH_OBJ((word *)r, hhdr, GC_mark_stack_top, GC_mark_stack_limit); } }}# ifdef TRACE_BUF# define TRACE_ENTRIES 1000struct trace_entry { char * kind; word gc_no; word words_allocd; word arg1; word arg2;} GC_trace_buf[TRACE_ENTRIES];int GC_trace_buf_ptr = 0;void GC_add_trace_entry(char *kind, word arg1, word arg2){ GC_trace_buf[GC_trace_buf_ptr].kind = kind; GC_trace_buf[GC_trace_buf_ptr].gc_no = GC_gc_no; GC_trace_buf[GC_trace_buf_ptr].words_allocd = GC_words_allocd; GC_trace_buf[GC_trace_buf_ptr].arg1 = arg1 ^ 0x80000000; GC_trace_buf[GC_trace_buf_ptr].arg2 = arg2 ^ 0x80000000; GC_trace_buf_ptr++; if (GC_trace_buf_ptr >= TRACE_ENTRIES) GC_trace_buf_ptr = 0;}void GC_print_trace(word gc_no, GC_bool lock){ int i; struct trace_entry *p; if (lock) LOCK(); for (i = GC_trace_buf_ptr-1; i != GC_trace_buf_ptr; i--) { if (i < 0) i = TRACE_ENTRIES-1; p = GC_trace_buf + i; if (p -> gc_no < gc_no || p -> kind == 0) return; printf("Trace:%s (gc:%d,words:%d) 0x%X, 0x%X\n", p -> kind, p -> gc_no, p -> words_allocd, (p -> arg1) ^ 0x80000000, (p -> arg2) ^ 0x80000000); } printf("Trace incomplete\n"); if (lock) UNLOCK();}# endif /* TRACE_BUF *//* * A version of GC_push_all that treats all interior pointers as valid * and scans the entire region immediately, in case the contents * change. */void GC_push_all_eager(bottom, top)ptr_t bottom;ptr_t top;{ word * b = (word *)(((word) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1)); word * t = (word *)(((word) top) & ~(ALIGNMENT-1)); register word *p; register word q; register word *lim; register ptr_t greatest_ha = GC_greatest_plausible_heap_addr; register ptr_t least_ha = GC_least_plausible_heap_addr;# define GC_greatest_plausible_heap_addr greatest_ha# define GC_least_plausible_heap_addr least_ha if (top == 0) return; /* check all pointers in range and push if they appear */ /* to be valid. */ lim = t - 1 /* longword */; for (p = b; p <= lim; p = (word *)(((char *)p) + ALIGNMENT)) { q = *p; GC_PUSH_ONE_STACK(q, p); }# undef GC_greatest_plausible_heap_addr# undef GC_least_plausible_heap_addr}#ifndef THREADS/* * A version of GC_push_all that treats all interior pointers as valid * and scans part of the area immediately, to make sure that saved * register values are not lost. * Cold_gc_frame delimits the stack section that must be scanned * eagerly. A zero value indicates that no eager scanning is needed. */void GC_push_all_stack_partially_eager(bottom, top, cold_gc_frame)ptr_t bottom;ptr_t top;ptr_t cold_gc_frame;{ if (!NEED_FIXUP_POINTER && GC_all_interior_pointers) {# define EAGER_BYTES 1024 /* Push the hot end of the stack eagerly, so that register values */ /* saved inside GC frames are marked before they disappear. */ /* The rest of the marking can be deferred until later. */ if (0 == cold_gc_frame) { GC_push_all_stack(bottom, top); return; } GC_ASSERT(bottom <= cold_gc_frame && cold_gc_frame <= top);# ifdef STACK_GROWS_DOWN GC_push_all(cold_gc_frame - sizeof(ptr_t), top); GC_push_all_eager(bottom, cold_gc_frame);# else /* STACK_GROWS_UP */ GC_push_all(bottom, cold_gc_frame + sizeof(ptr_t)); GC_push_all_eager(cold_gc_frame, top);# endif /* STACK_GROWS_UP */ } else { GC_push_all_eager(bottom, top); }# ifdef TRACE_BUF GC_add_trace_entry("GC_push_all_stack", bottom, top);# endif}#endif /* !THREADS */void GC_push_all_stack(bottom, top)ptr_t bottom;ptr_t top;{ if (!NEED_FIXUP_POINTER && GC_all_interior_pointers) { GC_push_all(bottom, top); } else { GC_push_all_eager(bottom, top); }}#if !defined(SMALL_CONFIG) && !defined(USE_MARK_BYTES)/* Push all objects reachable from marked objects in the given block *//* of size 1 objects. */void GC_push_marked1(h, hhdr)struct hblk *h;register hdr * hhdr;{ word * mark_word_addr = &(hhdr->hb_marks[0]); register word *p; word *plim; register int i; register word q; register word mark_word; register ptr_t greatest_ha = GC_greatest_plausible_heap_addr; register ptr_t least_ha = GC_least_plausible_heap_addr; register mse * mark_stack_top = GC_mark_stack_top; register mse * mark_stack_limit = GC_mark_stack_limit;# define GC_mark_stack_top mark_stack_top# define GC_mark_stack_limit mark_stack_limit# define GC_greatest_plausible_heap_addr greatest_ha# define GC_least_plausible_heap_addr least_ha p = (word *)(h->hb_body); plim = (word *)(((word)h) + HBLKSIZE); /* go through all words in block */ while( p < plim ) { mark_word = *mark_word_addr++; i = 0; while(mark_word != 0) { if (mark_word & 1) { q = p[i]; GC_PUSH_ONE_HEAP(q, p + i); } i++; mark_word >>= 1; } p += WORDSZ; }# undef GC_greatest_plausible_heap_addr# undef GC_least_plausible_heap_addr # undef GC_mark_stack_top# undef GC_mark_stack_limit GC_mark_stack_top = mark_stack_top;}#ifndef UNALIGNED/* Push all objects reachable from marked objects in the given block *//* of size 2 objects. */void GC_push_marked2(h, hhdr)struct hblk *h;register hdr * hhdr;{ word * mark_word_addr = &(hhdr->hb_marks[0]); register word *p; word *plim; register int i; register word q; register word mark_word; register ptr_t greatest_ha = GC_greatest_plausible_heap_addr; register ptr_t least_ha = GC_least_plausible_heap_addr; register mse * mark_stack_top = GC_mark_stack_top; register mse * mark_stack_limit = GC_mark_stack_limit;# define GC_mark_stack_top mark_stack_top# define GC_mark_stack_limit mark_stack_limit# define GC_greatest_plausible_heap_addr greatest_ha# define GC_least_plausible_heap_addr least_ha p = (word *)(h->hb_body); plim = (word *)(((word)h) + HBLKSIZE); /* go through all words in block */ while( p < plim ) { mark_word = *mark_word_addr++; i = 0; while(mark_word != 0) { if (mark_word & 1) { q = p[i]; GC_PUSH_ONE_HEAP(q, p + i); q = p[i+1]; GC_PUSH_ONE_HEAP(q, p + i); } i += 2; mark_word >>= 2; } p += WORDSZ; }# undef GC_greatest_plausible_heap_addr# undef GC_least_plausible_heap_addr # undef GC_mark_stack_top# undef GC_mark_stack_limit GC_mark_stack_top = mark_stack_top;}/* Push all objects reachable from marked objects in the given block *//* of size 4 objects. *//* There is a risk of mark stack overflow here. But we handle that. *//* And only unmarked objects get pushed, so it's not very likely. */void GC_push_marked4(h, hhdr)struct hblk *h;register hdr * hhdr;{ word * mark_word_addr = &(hhdr->hb_marks[0]); register word *p; word *plim; register int i; register word q; register word mark_word; register ptr_t greatest_ha = GC_greatest_plausible_heap_addr; register ptr_t least_ha = GC_least_plausible_heap_addr; register mse * mark_stack_top = GC_mark_stack_top; register mse * mark_stack_limit = GC_mark_stack_limit;# define GC_mark_stack_top mark_stack_top# define GC_mark_stack_limit mark_stack_limit# define GC_greatest_plausible_heap_addr greatest_ha# define GC_least_plausible_heap_addr least_ha p = (word *)(h->hb_body); plim = (word *)(((word)h) + HBLKSIZE); /* go through all words in block */ while( p < plim ) { mark_word = *mark_word_addr++; i = 0; while(mark_word != 0) { if (mark_word & 1) { q = p[i]; GC_PUSH_ONE_HEAP(q, p + i); q = p[i+1]; GC_PUSH_ONE_HEAP(q, p + i + 1); q = p[i+2]; GC_PUSH_ONE_HEAP(q, p + i + 2); q = p[i+3]; GC_PUSH_ONE_HEAP(q, p + i + 3); } i += 4; mark_word >>= 4; } p += WORDSZ; }# undef GC_greatest_plausible_heap_addr# undef GC_least_plausible_heap_addr # undef GC_mark_stack_top# undef GC_mark_stack_limit GC_mark_stack_top = mark_stack_top;}#endif /* UNALIGNED */#endif /* SMALL_CONFIG *//* Push all objects reachable from marked objects in the given block */void GC_push_marked(h, hhdr)struct hblk *h;register hdr * hhdr;{ register int sz = hhdr -> hb_sz; register int descr = hhdr -> hb_descr; register word * p; register int word_no; register word * lim; register mse * GC_mark_stack_top_reg; register mse * mark_stack_limit = GC_mark_stack_limit; /* Some quick shortcuts: */ if ((0 | GC_DS_LENGTH) == descr) return; if (GC_block_empty(hhdr)/* nothing marked */) return; GC_n_rescuing_pages++; GC_objects_are_marked = TRUE; if (sz > MAXOBJSZ) { lim = (word *)h; } else { lim = (word *)(h + 1) - sz; } switch(sz) {# if !defined(SMALL_CONFIG) && !defined(USE_MARK_BYTES) case 1: GC_push_marked1(h, hhdr); break;# endif# if !defined(SMALL_CONFIG) && !defined(UNALIGNED) && \ !defined(USE_MARK_BYTES) case 2: GC_push_marked2(h, hhdr); break; case 4: GC_push_marked4(h, hhdr); break;# endif default: GC_mark_stack_top_reg = GC_mark_stack_top; for (p = (word *)h, word_no = 0; p <= lim; p += sz, word_no += sz) { if (mark_bit_from_hdr(hhdr, word_no)) { /* Mark from fields inside the object */ PUSH_OBJ((word *)p, hhdr, GC_mark_stack_top_reg, mark_stack_limit);# ifdef GATHERSTATS /* Subtract this object from total, since it was */ /* added in twice. */ GC_composite_in_use -= sz;# endif } } GC_mark_stack_top = GC_mark_stack_top_reg; }}#ifndef SMALL_CONFIG/* Test whether any page in the given block is dirty */GC_bool GC_block_was_dirty(h, hhdr)struct hblk *h;register hdr * hhdr;{ register int sz = hhdr -> hb_sz; if (sz <= MAXOBJSZ) { return(GC_page_was_dirty(h)); } else { register ptr_t p = (ptr_t)h; sz = WORDS_TO_BYTES(sz); while (p < (ptr_t)h + sz) { if (GC_page_was_dirty((struct hblk *)p)) return(TRUE); p += HBLKSIZE; } return(FALSE); }}#endif /* SMALL_CONFIG *//* Similar to GC_push_next_marked, but return address of next block */struct hblk * GC_push_next_marked(h)struct hblk *h;{ register hdr * hhdr; h = GC_next_used_block(h); if (h == 0) return(0); hhdr = HDR(h); GC_push_marked(h, hhdr); return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));}#ifndef SMALL_CONFIG/* Identical to above, but mark only from dirty pages */struct hblk * GC_push_next_marked_dirty(h)struct hblk *h;{ register hdr * hhdr; if (!GC_dirty_maintained) { ABORT("dirty bits not set up"); } for (;;) { h = GC_next_used_block(h); if (h == 0) return(0); hhdr = HDR(h);# ifdef STUBBORN_ALLOC if (hhdr -> hb_obj_kind == STUBBORN) { if (GC_page_was_changed(h) && GC_block_was_dirty(h, hhdr)) { break; } } else { if (GC_block_was_dirty(h, hhdr)) break; }# else if (GC_block_was_dirty(h, hhdr)) break;# endif h += OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz); } GC_push_marked(h, hhdr); return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));}#endif/* Similar to above, but for uncollectable pages. Needed since we *//* do not clear marks for such pages, even for full collections. */struct hblk * GC_push_next_marked_uncollectable(h)struct hblk *h;{ register hdr * hhdr = HDR(h); for (;;) { h = GC_next_used_block(h); if (h == 0) return(0); hhdr = HDR(h); if (hhdr -> hb_obj_kind == UNCOLLECTABLE) break; h += OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz); } GC_push_marked(h, hhdr); return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -