📄 mark.c
字号:
/* * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved. * Copyright (c) 2000 by Hewlett-Packard Company. All rights reserved. * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. * */# include <stdio.h># include "private/gc_pmark.h"#if defined(MSWIN32) && defined(__GNUC__)# include <excpt.h>#endif/* We put this here to minimize the risk of inlining. *//*VARARGS*/#ifdef __WATCOMC__ void GC_noop(void *p, ...) {}#else void GC_noop() {}#endif/* Single argument version, robust against whole program analysis. */void GC_noop1(word x){ static volatile word sink; sink = x;}/* mark_proc GC_mark_procs[MAX_MARK_PROCS] = {0} -- declared in gc_priv.h */unsigned GC_n_mark_procs = GC_RESERVED_MARK_PROCS;/* Initialize GC_obj_kinds properly and standard free lists properly. *//* This must be done statically since they may be accessed before *//* GC_init is called. *//* It's done here, since we need to deal with mark descriptors. */struct obj_kind GC_obj_kinds[MAXOBJKINDS] = {/* PTRFREE */ { &GC_aobjfreelist[0], 0 /* filled in dynamically */, 0 | GC_DS_LENGTH, FALSE, FALSE },/* NORMAL */ { &GC_objfreelist[0], 0, 0 | GC_DS_LENGTH, /* Adjusted in GC_init_inner for EXTRA_BYTES */ TRUE /* add length to descr */, TRUE },/* UNCOLLECTABLE */ { &GC_uobjfreelist[0], 0, 0 | GC_DS_LENGTH, TRUE /* add length to descr */, TRUE },# ifdef ATOMIC_UNCOLLECTABLE /* AUNCOLLECTABLE */ { &GC_auobjfreelist[0], 0, 0 | GC_DS_LENGTH, FALSE /* add length to descr */, FALSE },# endif# ifdef STUBBORN_ALLOC/*STUBBORN*/ { &GC_sobjfreelist[0], 0, 0 | GC_DS_LENGTH, TRUE /* add length to descr */, TRUE },# endif};# ifdef ATOMIC_UNCOLLECTABLE# ifdef STUBBORN_ALLOC unsigned GC_n_kinds = 5;# else unsigned GC_n_kinds = 4;# endif# else# ifdef STUBBORN_ALLOC unsigned GC_n_kinds = 4;# else unsigned GC_n_kinds = 3;# endif# endif# ifndef INITIAL_MARK_STACK_SIZE# define INITIAL_MARK_STACK_SIZE (1*HBLKSIZE) /* INITIAL_MARK_STACK_SIZE * sizeof(mse) should be a */ /* multiple of HBLKSIZE. */ /* The incremental collector actually likes a larger */ /* size, since it want to push all marked dirty objs */ /* before marking anything new. Currently we let it */ /* grow dynamically. */# endif/* * Limits of stack for GC_mark routine. * All ranges between GC_mark_stack(incl.) and GC_mark_stack_top(incl.) still * need to be marked from. */word GC_n_rescuing_pages; /* Number of dirty pages we marked from */ /* excludes ptrfree pages, etc. */mse * GC_mark_stack;mse * GC_mark_stack_limit;size_t GC_mark_stack_size = 0; #ifdef PARALLEL_MARK# include "atomic_ops.h" mse * volatile GC_mark_stack_top; /* Updated only with mark lock held, but read asynchronously. */ volatile AO_t GC_first_nonempty; /* Lowest entry on mark stack */ /* that may be nonempty. */ /* Updated only by initiating */ /* thread. */#else mse * GC_mark_stack_top;#endifstatic struct hblk * scan_ptr;mark_state_t GC_mark_state = MS_NONE;GC_bool GC_mark_stack_too_small = FALSE;GC_bool GC_objects_are_marked = FALSE; /* Are there collectable marked */ /* objects in the heap? *//* Is a collection in progress? Note that this can return true in the *//* nonincremental case, if a collection has been abandoned and the *//* mark state is now MS_INVALID. */GC_bool GC_collection_in_progress(void){ return(GC_mark_state != MS_NONE);}/* clear all mark bits in the header */void GC_clear_hdr_marks(hdr *hhdr){ size_t last_bit = FINAL_MARK_BIT(hhdr -> hb_sz);# ifdef USE_MARK_BYTES BZERO(hhdr -> hb_marks, MARK_BITS_SZ); hhdr -> hb_marks[last_bit] = 1;# else BZERO(hhdr -> hb_marks, MARK_BITS_SZ*sizeof(word)); set_mark_bit_from_hdr(hhdr, last_bit);# endif hhdr -> hb_n_marks = 0;}/* Set all mark bits in the header. Used for uncollectable blocks. */void GC_set_hdr_marks(hdr *hhdr){ unsigned i; size_t sz = hhdr -> hb_sz; size_t n_marks = FINAL_MARK_BIT(sz);# ifdef USE_MARK_BYTES for (i = 0; i <= n_marks; i += MARK_BIT_OFFSET(sz)) { hhdr -> hb_marks[i] = 1; }# else for (i = 0; i < divWORDSZ(n_marks + WORDSZ); ++i) { hhdr -> hb_marks[i] = ONES; }# endif# ifdef MARK_BIT_PER_OBJ hhdr -> hb_n_marks = n_marks - 1;# else hhdr -> hb_n_marks = HBLK_OBJS(sz);# endif}/* * Clear all mark bits associated with block h. *//*ARGSUSED*/static void clear_marks_for_block(struct hblk *h, word dummy){ register hdr * hhdr = HDR(h); if (IS_UNCOLLECTABLE(hhdr -> hb_obj_kind)) return; /* Mark bit for these is cleared only once the object is */ /* explicitly deallocated. This either frees the block, or */ /* the bit is cleared once the object is on the free list. */ GC_clear_hdr_marks(hhdr);}/* Slow but general routines for setting/clearing/asking about mark bits */void GC_set_mark_bit(ptr_t p){ struct hblk *h = HBLKPTR(p); hdr * hhdr = HDR(h); word bit_no = MARK_BIT_NO(p - (ptr_t)h, hhdr -> hb_sz); if (!mark_bit_from_hdr(hhdr, bit_no)) { set_mark_bit_from_hdr(hhdr, bit_no); ++hhdr -> hb_n_marks; }}void GC_clear_mark_bit(ptr_t p){ struct hblk *h = HBLKPTR(p); hdr * hhdr = HDR(h); word bit_no = MARK_BIT_NO(p - (ptr_t)h, hhdr -> hb_sz); if (mark_bit_from_hdr(hhdr, bit_no)) { size_t n_marks; clear_mark_bit_from_hdr(hhdr, bit_no); n_marks = hhdr -> hb_n_marks - 1;# ifdef PARALLEL_MARK if (n_marks != 0) hhdr -> hb_n_marks = n_marks; /* Don't decrement to zero. The counts are approximate due to */ /* concurrency issues, but we need to ensure that a count of */ /* zero implies an empty block. */# else hhdr -> hb_n_marks = n_marks; # endif }}GC_bool GC_is_marked(ptr_t p){ struct hblk *h = HBLKPTR(p); hdr * hhdr = HDR(h); word bit_no = MARK_BIT_NO(p - (ptr_t)h, hhdr -> hb_sz); return((GC_bool)mark_bit_from_hdr(hhdr, bit_no));}/* * Clear mark bits in all allocated heap blocks. This invalidates * the marker invariant, and sets GC_mark_state to reflect this. * (This implicitly starts marking to reestablish the invariant.) */void GC_clear_marks(void){ GC_apply_to_all_blocks(clear_marks_for_block, (word)0); GC_objects_are_marked = FALSE; GC_mark_state = MS_INVALID; scan_ptr = 0;}/* Initiate a garbage collection. Initiates a full collection if the *//* mark state is invalid. *//*ARGSUSED*/void GC_initiate_gc(void){ if (GC_dirty_maintained) GC_read_dirty();# ifdef STUBBORN_ALLOC GC_read_changed();# endif# ifdef CHECKSUMS { extern void GC_check_dirty(); if (GC_dirty_maintained) GC_check_dirty(); }# endif GC_n_rescuing_pages = 0; if (GC_mark_state == MS_NONE) { GC_mark_state = MS_PUSH_RESCUERS; } else if (GC_mark_state != MS_INVALID) { ABORT("unexpected state"); } /* else this is really a full collection, and mark */ /* bits are invalid. */ scan_ptr = 0;}static void alloc_mark_stack(size_t);# if defined(MSWIN32) || defined(USE_PROC_FOR_LIBRARIES) && defined(THREADS) /* Under rare conditions, we may end up marking from nonexistent memory. */ /* Hence we need to be prepared to recover by running GC_mark_some */ /* with a suitable handler in place. */# define WRAP_MARK_SOME# endif/* Perform a small amount of marking. *//* We try to touch roughly a page of memory. *//* Return TRUE if we just finished a mark phase. *//* Cold_gc_frame is an address inside a GC frame that *//* remains valid until all marking is complete. *//* A zero value indicates that it's OK to miss some *//* register values. *//* We hold the allocation lock. In the case of *//* incremental collection, the world may not be stopped.*/#ifdef WRAP_MARK_SOME /* For win32, this is called after we establish a structured */ /* exception handler, in case Windows unmaps one of our root */ /* segments. See below. In either case, we acquire the */ /* allocator lock long before we get here. */ GC_bool GC_mark_some_inner(ptr_t cold_gc_frame)#else GC_bool GC_mark_some(ptr_t cold_gc_frame)#endif{ switch(GC_mark_state) { case MS_NONE: return(FALSE); case MS_PUSH_RESCUERS: if (GC_mark_stack_top >= GC_mark_stack_limit - INITIAL_MARK_STACK_SIZE/2) { /* Go ahead and mark, even though that might cause us to */ /* see more marked dirty objects later on. Avoid this */ /* in the future. */ GC_mark_stack_too_small = TRUE; MARK_FROM_MARK_STACK(); return(FALSE); } else { scan_ptr = GC_push_next_marked_dirty(scan_ptr); if (scan_ptr == 0) { if (GC_print_stats) { GC_log_printf("Marked from %u dirty pages\n", GC_n_rescuing_pages); } GC_push_roots(FALSE, cold_gc_frame); GC_objects_are_marked = TRUE; if (GC_mark_state != MS_INVALID) { GC_mark_state = MS_ROOTS_PUSHED; } } } return(FALSE); case MS_PUSH_UNCOLLECTABLE: if (GC_mark_stack_top >= GC_mark_stack + GC_mark_stack_size/4) {# ifdef PARALLEL_MARK /* Avoid this, since we don't parallelize the marker */ /* here. */ if (GC_parallel) GC_mark_stack_too_small = TRUE;# endif MARK_FROM_MARK_STACK(); return(FALSE); } else { scan_ptr = GC_push_next_marked_uncollectable(scan_ptr); if (scan_ptr == 0) { GC_push_roots(TRUE, cold_gc_frame); GC_objects_are_marked = TRUE; if (GC_mark_state != MS_INVALID) { GC_mark_state = MS_ROOTS_PUSHED; } } } return(FALSE); case MS_ROOTS_PUSHED:# ifdef PARALLEL_MARK /* In the incremental GC case, this currently doesn't */ /* quite do the right thing, since it runs to */ /* completion. On the other hand, starting a */ /* parallel marker is expensive, so perhaps it is */ /* the right thing? */ /* Eventually, incremental marking should run */ /* asynchronously in multiple threads, without grabbing */ /* the allocation lock. */ if (GC_parallel) { GC_do_parallel_mark(); GC_ASSERT(GC_mark_stack_top < (mse *)GC_first_nonempty); GC_mark_stack_top = GC_mark_stack - 1; if (GC_mark_stack_too_small) { alloc_mark_stack(2*GC_mark_stack_size); } if (GC_mark_state == MS_ROOTS_PUSHED) { GC_mark_state = MS_NONE; return(TRUE); } else { return(FALSE); } }# endif if (GC_mark_stack_top >= GC_mark_stack) { MARK_FROM_MARK_STACK(); return(FALSE); } else { GC_mark_state = MS_NONE; if (GC_mark_stack_too_small) { alloc_mark_stack(2*GC_mark_stack_size); } return(TRUE); } case MS_INVALID: case MS_PARTIALLY_INVALID: if (!GC_objects_are_marked) { GC_mark_state = MS_PUSH_UNCOLLECTABLE; return(FALSE); } if (GC_mark_stack_top >= GC_mark_stack) { MARK_FROM_MARK_STACK(); return(FALSE); } if (scan_ptr == 0 && GC_mark_state == MS_INVALID) { /* About to start a heap scan for marked objects. */ /* Mark stack is empty. OK to reallocate. */ if (GC_mark_stack_too_small) { alloc_mark_stack(2*GC_mark_stack_size); } GC_mark_state = MS_PARTIALLY_INVALID; } scan_ptr = GC_push_next_marked(scan_ptr); if (scan_ptr == 0 && GC_mark_state == MS_PARTIALLY_INVALID) { GC_push_roots(TRUE, cold_gc_frame); GC_objects_are_marked = TRUE; if (GC_mark_state != MS_INVALID) { GC_mark_state = MS_ROOTS_PUSHED; } } return(FALSE); default: ABORT("GC_mark_some: bad state"); return(FALSE); }}#if defined(MSWIN32) && defined(__GNUC__) typedef struct { EXCEPTION_REGISTRATION ex_reg; void *alt_path; } ext_ex_regn; static EXCEPTION_DISPOSITION mark_ex_handler( struct _EXCEPTION_RECORD *ex_rec, void *est_frame, struct _CONTEXT *context, void *disp_ctxt) { if (ex_rec->ExceptionCode == STATUS_ACCESS_VIOLATION) { ext_ex_regn *xer = (ext_ex_regn *)est_frame; /* Unwind from the inner function assuming the standard */ /* function prologue. */ /* Assumes code has not been compiled with */ /* -fomit-frame-pointer. */ context->Esp = context->Ebp; context->Ebp = *((DWORD *)context->Esp); context->Esp = context->Esp - 8; /* Resume execution at the "real" handler within the */ /* wrapper function. */ context->Eip = (DWORD )(xer->alt_path); return ExceptionContinueExecution; } else { return ExceptionContinueSearch; } }# endif /* __GNUC__ && MSWIN32 */#ifdef GC_WIN32_THREADS extern GC_bool GC_started_thread_while_stopped(void); /* In win32_threads.c. Did we invalidate mark phase with an */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -