📄 reclaim.c
字号:
GC_atomic_in_use += sz * hhdr -> hb_n_marks; } if (report_if_found) { GC_reclaim_small_nonempty_block(hbp, (int)report_if_found, &GC_bytes_found); } else if (empty) { GC_bytes_found += HBLKSIZE; GC_freehblk(hbp); } else if (TRUE != GC_block_nearly_full(hhdr)){ /* group of smaller objects, enqueue the real work */ rlh = &(ok -> ok_reclaim_list[BYTES_TO_GRANULES(sz)]); hhdr -> hb_next = *rlh; *rlh = hbp; } /* else not worth salvaging. */ /* We used to do the nearly_full check later, but we */ /* already have the right cache context here. Also */ /* doing it here avoids some silly lock contention in */ /* GC_malloc_many. */ }}#if !defined(NO_DEBUGGING)/* Routines to gather and print heap block info *//* intended for debugging. Otherwise should be called *//* with lock. */struct Print_stats{ size_t number_of_blocks; size_t total_bytes;};#ifdef USE_MARK_BYTES/* Return the number of set mark bits in the given header */int GC_n_set_marks(hdr *hhdr){ int result = 0; int i; size_t sz = hhdr -> hb_sz; int offset = MARK_BIT_OFFSET(sz); int limit = FINAL_MARK_BIT(sz); for (i = 0; i < limit; i += offset) { result += hhdr -> hb_marks[i]; } GC_ASSERT(hhdr -> hb_marks[limit]); return(result);}#else/* Number of set bits in a word. Not performance critical. */static int set_bits(word n){ word m = n; int result = 0; while (m > 0) { if (m & 1) result++; m >>= 1; } return(result);}/* Return the number of set mark bits in the given header */int GC_n_set_marks(hdr *hhdr){ int result = 0; int i; int n_mark_words;# ifdef MARK_BIT_PER_OBJ int n_objs = HBLK_OBJS(hhdr -> hb_sz); if (0 == n_objs) n_objs = 1; n_mark_words = divWORDSZ(n_objs + WORDSZ - 1);# else /* MARK_BIT_PER_GRANULE */ n_mark_words = MARK_BITS_SZ;# endif for (i = 0; i < n_mark_words - 1; i++) { result += set_bits(hhdr -> hb_marks[i]); }# ifdef MARK_BIT_PER_OBJ result += set_bits((hhdr -> hb_marks[n_mark_words - 1]) << (n_mark_words * WORDSZ - n_objs));# else result += set_bits(hhdr -> hb_marks[n_mark_words - 1]);# endif return(result - 1);}#endif /* !USE_MARK_BYTES *//*ARGSUSED*/void GC_print_block_descr(struct hblk *h, word /* struct PrintStats */ raw_ps){ hdr * hhdr = HDR(h); size_t bytes = hhdr -> hb_sz; struct Print_stats *ps; unsigned n_marks = GC_n_set_marks(hhdr); if (hhdr -> hb_n_marks != n_marks) { GC_printf("(%u:%u,%u!=%u)", hhdr -> hb_obj_kind, bytes, hhdr -> hb_n_marks, n_marks); } else { GC_printf("(%u:%u,%u)", hhdr -> hb_obj_kind, bytes, n_marks); } bytes += HBLKSIZE-1; bytes &= ~(HBLKSIZE-1); ps = (struct Print_stats *)raw_ps; ps->total_bytes += bytes; ps->number_of_blocks++;}void GC_print_block_list(){ struct Print_stats pstats; GC_printf("(kind(0=ptrfree,1=normal,2=unc.):size_in_bytes, #_marks_set)\n"); pstats.number_of_blocks = 0; pstats.total_bytes = 0; GC_apply_to_all_blocks(GC_print_block_descr, (word)&pstats); GC_printf("\nblocks = %lu, bytes = %lu\n", (unsigned long)pstats.number_of_blocks, (unsigned long)pstats.total_bytes);}/* Currently for debugger use only: */void GC_print_free_list(int kind, size_t sz_in_granules){ struct obj_kind * ok = &GC_obj_kinds[kind]; ptr_t flh = ok -> ok_freelist[sz_in_granules]; struct hblk *lastBlock = 0; int n = 0; while (flh){ struct hblk *block = HBLKPTR(flh); if (block != lastBlock){ GC_printf("\nIn heap block at 0x%x:\n\t", block); lastBlock = block; } GC_printf("%d: 0x%x;", ++n, flh); flh = obj_link(flh); }}#endif /* NO_DEBUGGING *//* * Clear all obj_link pointers in the list of free objects *flp. * Clear *flp. * This must be done before dropping a list of free gcj-style objects, * since may otherwise end up with dangling "descriptor" pointers. * It may help for other pointer-containing objects. */void GC_clear_fl_links(void **flp){ void *next = *flp; while (0 != next) { *flp = 0; flp = &(obj_link(next)); next = *flp; }}/* * Perform GC_reclaim_block on the entire heap, after first clearing * small object free lists (if we are not just looking for leaks). */void GC_start_reclaim(GC_bool report_if_found){ unsigned kind; # if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC) GC_ASSERT(0 == GC_fl_builder_count);# endif /* Reset in use counters. GC_reclaim_block recomputes them. */ GC_composite_in_use = 0; GC_atomic_in_use = 0; /* Clear reclaim- and free-lists */ for (kind = 0; kind < GC_n_kinds; kind++) { void **fop; void **lim; struct hblk ** rlp; struct hblk ** rlim; struct hblk ** rlist = GC_obj_kinds[kind].ok_reclaim_list; GC_bool should_clobber = (GC_obj_kinds[kind].ok_descriptor != 0); if (rlist == 0) continue; /* This kind not used. */ if (!report_if_found) { lim = &(GC_obj_kinds[kind].ok_freelist[MAXOBJGRANULES+1]); for( fop = GC_obj_kinds[kind].ok_freelist; fop < lim; fop++ ) { if (*fop != 0) { if (should_clobber) { GC_clear_fl_links(fop); } else { *fop = 0; } } } } /* otherwise free list objects are marked, */ /* and its safe to leave them */ rlim = rlist + MAXOBJGRANULES+1; for( rlp = rlist; rlp < rlim; rlp++ ) { *rlp = 0; } } /* Go through all heap blocks (in hblklist) and reclaim unmarked objects */ /* or enqueue the block for later processing. */ GC_apply_to_all_blocks(GC_reclaim_block, (word)report_if_found);# ifdef EAGER_SWEEP /* This is a very stupid thing to do. We make it possible anyway, */ /* so that you can convince yourself that it really is very stupid. */ GC_reclaim_all((GC_stop_func)0, FALSE);# endif# if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC) GC_ASSERT(0 == GC_fl_builder_count);# endif }/* * Sweep blocks of the indicated object size and kind until either the * appropriate free list is nonempty, or there are no more blocks to * sweep. */void GC_continue_reclaim(size_t sz /* granules */, int kind){ hdr * hhdr; struct hblk * hbp; struct obj_kind * ok = &(GC_obj_kinds[kind]); struct hblk ** rlh = ok -> ok_reclaim_list; void **flh = &(ok -> ok_freelist[sz]); if (rlh == 0) return; /* No blocks of this kind. */ rlh += sz; while ((hbp = *rlh) != 0) { hhdr = HDR(hbp); *rlh = hhdr -> hb_next; GC_reclaim_small_nonempty_block(hbp, FALSE, &GC_bytes_found); if (*flh != 0) break; }}/* * Reclaim all small blocks waiting to be reclaimed. * Abort and return FALSE when/if (*stop_func)() returns TRUE. * If this returns TRUE, then it's safe to restart the world * with incorrectly cleared mark bits. * If ignore_old is TRUE, then reclaim only blocks that have been * recently reclaimed, and discard the rest. * Stop_func may be 0. */GC_bool GC_reclaim_all(GC_stop_func stop_func, GC_bool ignore_old){ word sz; unsigned kind; hdr * hhdr; struct hblk * hbp; struct obj_kind * ok; struct hblk ** rlp; struct hblk ** rlh; CLOCK_TYPE start_time; CLOCK_TYPE done_time; if (GC_print_stats == VERBOSE) GET_TIME(start_time); for (kind = 0; kind < GC_n_kinds; kind++) { ok = &(GC_obj_kinds[kind]); rlp = ok -> ok_reclaim_list; if (rlp == 0) continue; for (sz = 1; sz <= MAXOBJGRANULES; sz++) { rlh = rlp + sz; while ((hbp = *rlh) != 0) { if (stop_func != (GC_stop_func)0 && (*stop_func)()) { return(FALSE); } hhdr = HDR(hbp); *rlh = hhdr -> hb_next; if (!ignore_old || hhdr -> hb_last_reclaimed == GC_gc_no - 1) { /* It's likely we'll need it this time, too */ /* It's been touched recently, so this */ /* shouldn't trigger paging. */ GC_reclaim_small_nonempty_block(hbp, FALSE, &GC_bytes_found); } } } } if (GC_print_stats == VERBOSE) { GET_TIME(done_time); GC_log_printf("Disposing of reclaim lists took %lu msecs\n", MS_TIME_DIFF(done_time,start_time)); } return(TRUE);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -