📄 reclaim.c
字号:
/* * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers * Copyright (c) 1991-1996 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P. * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. */#include <stdio.h>#include "private/gc_priv.h"signed_word GC_bytes_found = 0; /* Number of bytes of memory reclaimed */#if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC) word GC_fl_builder_count = 0; /* Number of threads currently building free lists without */ /* holding GC lock. It is not safe to collect if this is */ /* nonzero. */#endif /* PARALLEL_MARK *//* We defer printing of leaked objects until we're done with the GC *//* cycle, since the routine for printing objects needs to run outside *//* the collector, e.g. without the allocation lock. */#define MAX_LEAKED 40ptr_t GC_leaked[MAX_LEAKED];unsigned GC_n_leaked = 0;GC_bool GC_have_errors = FALSE;void GC_add_leaked(ptr_t leaked){ if (GC_n_leaked < MAX_LEAKED) { GC_have_errors = TRUE; GC_leaked[GC_n_leaked++] = leaked; /* Make sure it's not reclaimed this cycle */ GC_set_mark_bit(leaked); }}static GC_bool printing_errors = FALSE;/* Print all objects on the list after printing any smashed objs. *//* Clear both lists. */void GC_print_all_errors (){ unsigned i; LOCK(); if (printing_errors) { UNLOCK(); return; } printing_errors = TRUE; UNLOCK(); if (GC_debugging_started) GC_print_all_smashed(); for (i = 0; i < GC_n_leaked; ++i) { ptr_t p = GC_leaked[i]; if (HDR(p) -> hb_obj_kind == PTRFREE) { GC_err_printf("Leaked atomic object at "); } else { GC_err_printf("Leaked composite object at "); } GC_print_heap_obj(p); GC_err_printf("\n"); GC_free(p); GC_leaked[i] = 0; } GC_n_leaked = 0; printing_errors = FALSE;}/* * reclaim phase * *//* * Test whether a block is completely empty, i.e. contains no marked * objects. This does not require the block to be in physical * memory. */ GC_bool GC_block_empty(hdr *hhdr){ return (hhdr -> hb_n_marks == 0);}GC_bool GC_block_nearly_full(hdr *hhdr){ return (hhdr -> hb_n_marks > 7 * HBLK_OBJS(hhdr -> hb_sz)/8);}/* FIXME: This should perhaps again be specialized for USE_MARK_BYTES *//* and USE_MARK_BITS cases. *//* * Restore unmarked small objects in h of size sz to the object * free list. Returns the new list. * Clears unmarked objects. Sz is in bytes. *//*ARGSUSED*/ptr_t GC_reclaim_clear(struct hblk *hbp, hdr *hhdr, size_t sz, ptr_t list, signed_word *count){ word bit_no = 0; word *p, *q, *plim; signed_word n_bytes_found = 0; GC_ASSERT(hhdr == GC_find_header((ptr_t)hbp)); GC_ASSERT(sz == hhdr -> hb_sz); GC_ASSERT((sz & (BYTES_PER_WORD-1)) == 0); p = (word *)(hbp->hb_body); plim = (word *)(hbp->hb_body + HBLKSIZE - sz); /* go through all words in block */ while( p <= plim ) { if( mark_bit_from_hdr(hhdr, bit_no) ) { p = (word *)((ptr_t)p + sz); } else { n_bytes_found += sz; /* object is available - put on list */ obj_link(p) = list; list = ((ptr_t)p); /* Clear object, advance p to next object in the process */ q = (word *)((ptr_t)p + sz);# ifdef USE_MARK_BYTES GC_ASSERT(!(sz & 1) && !((word)p & (2 * sizeof(word) - 1))); p[1] = 0; p += 2; while (p < q) { CLEAR_DOUBLE(p); p += 2; }# else p++; /* Skip link field */ while (p < q) { *p++ = 0; }# endif } bit_no += MARK_BIT_OFFSET(sz); } *count += n_bytes_found; return(list);}/* The same thing, but don't clear objects: *//*ARGSUSED*/ptr_t GC_reclaim_uninit(struct hblk *hbp, hdr *hhdr, size_t sz, ptr_t list, signed_word *count){ word bit_no = 0; word *p, *plim; signed_word n_bytes_found = 0; GC_ASSERT(sz == hhdr -> hb_sz); p = (word *)(hbp->hb_body); plim = (word *)((ptr_t)hbp + HBLKSIZE - sz); /* go through all words in block */ while( p <= plim ) { if( !mark_bit_from_hdr(hhdr, bit_no) ) { n_bytes_found += sz; /* object is available - put on list */ obj_link(p) = list; list = ((ptr_t)p); } p = (word *)((ptr_t)p + sz); bit_no += MARK_BIT_OFFSET(sz); } *count += n_bytes_found; return(list);}/* Don't really reclaim objects, just check for unmarked ones: *//*ARGSUSED*/void GC_reclaim_check(struct hblk *hbp, hdr *hhdr, word sz){ word bit_no = 0; ptr_t p, plim; GC_ASSERT(sz == hhdr -> hb_sz); p = hbp->hb_body; plim = p + HBLKSIZE - sz; /* go through all words in block */ while( p <= plim ) { if( !mark_bit_from_hdr(hhdr, bit_no) ) { GC_add_leaked(p); } p += sz; bit_no += MARK_BIT_OFFSET(sz); }}/* * Generic procedure to rebuild a free list in hbp. * Also called directly from GC_malloc_many. * Sz is now in bytes. */ptr_t GC_reclaim_generic(struct hblk * hbp, hdr *hhdr, size_t sz, GC_bool init, ptr_t list, signed_word *count){ ptr_t result = list; GC_ASSERT(GC_find_header((ptr_t)hbp) == hhdr); GC_remove_protection(hbp, 1, (hhdr)->hb_descr == 0 /* Pointer-free? */); if (init) { result = GC_reclaim_clear(hbp, hhdr, sz, list, count); } else { GC_ASSERT((hhdr)->hb_descr == 0 /* Pointer-free block */); result = GC_reclaim_uninit(hbp, hhdr, sz, list, count); } if (IS_UNCOLLECTABLE(hhdr -> hb_obj_kind)) GC_set_hdr_marks(hhdr); return result;}/* * Restore unmarked small objects in the block pointed to by hbp * to the appropriate object free list. * If entirely empty blocks are to be completely deallocated, then * caller should perform that check. */void GC_reclaim_small_nonempty_block(struct hblk *hbp, int report_if_found, signed_word *count){ hdr *hhdr = HDR(hbp); size_t sz = hhdr -> hb_sz; int kind = hhdr -> hb_obj_kind; struct obj_kind * ok = &GC_obj_kinds[kind]; void **flh = &(ok -> ok_freelist[BYTES_TO_GRANULES(sz)]); hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no; if (report_if_found) { GC_reclaim_check(hbp, hhdr, sz); } else { *flh = GC_reclaim_generic(hbp, hhdr, sz, (ok -> ok_init || GC_debugging_started), *flh, &GC_bytes_found); }}/* * Restore an unmarked large object or an entirely empty blocks of small objects * to the heap block free list. * Otherwise enqueue the block for later processing * by GC_reclaim_small_nonempty_block. * If report_if_found is TRUE, then process any block immediately, and * simply report free objects; do not actually reclaim them. */void GC_reclaim_block(struct hblk *hbp, word report_if_found){ hdr * hhdr = HDR(hbp); size_t sz = hhdr -> hb_sz; /* size of objects in current block */ struct obj_kind * ok = &GC_obj_kinds[hhdr -> hb_obj_kind]; struct hblk ** rlh; if( sz > MAXOBJBYTES ) { /* 1 big object */ if( !mark_bit_from_hdr(hhdr, 0) ) { if (report_if_found) { GC_add_leaked((ptr_t)hbp); } else { size_t blocks = OBJ_SZ_TO_BLOCKS(sz); if (blocks > 1) { GC_large_allocd_bytes -= blocks * HBLKSIZE; } GC_bytes_found += sz; GC_freehblk(hbp); } } else { if (hhdr -> hb_descr != 0) { GC_composite_in_use += sz; } else { GC_atomic_in_use += sz; } } } else { GC_bool empty = GC_block_empty(hhdr);# ifdef PARALLEL_MARK /* Count can be low or one too high because we sometimes */ /* have to ignore decrements. Objects can also potentially */ /* be repeatedly marked by each marker. */ /* Here we assume two markers, but this is extremely */ /* unlikely to fail spuriously with more. And if it does, it */ /* should be looked at. */ GC_ASSERT(hhdr -> hb_n_marks <= 2 * (HBLKSIZE/sz + 1) + 16);# else GC_ASSERT(sz * hhdr -> hb_n_marks <= HBLKSIZE);# endif if (hhdr -> hb_descr != 0) { GC_composite_in_use += sz * hhdr -> hb_n_marks; } else {
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -