⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mark.c

📁 A garbage collector for C and C
💻 C
📖 第 1 页 / 共 4 页
字号:
/* * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers * Copyright (c) 1991-1995 by Xerox Corporation.  All rights reserved. * Copyright (c) 2000 by Hewlett-Packard Company.  All rights reserved. * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose,  provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. * */# include <stdio.h># include "private/gc_pmark.h"#if defined(MSWIN32) && defined(__GNUC__)# include <excpt.h>#endif/* We put this here to minimize the risk of inlining. *//*VARARGS*/#ifdef __WATCOMC__  void GC_noop(void *p, ...) {}#else  void GC_noop() {}#endif/* Single argument version, robust against whole program analysis. */void GC_noop1(x)word x;{    static VOLATILE word sink;    sink = x;}/* mark_proc GC_mark_procs[MAX_MARK_PROCS] = {0} -- declared in gc_priv.h */word GC_n_mark_procs = GC_RESERVED_MARK_PROCS;/* Initialize GC_obj_kinds properly and standard free lists properly.  	*//* This must be done statically since they may be accessed before 	*//* GC_init is called.							*//* It's done here, since we need to deal with mark descriptors.		*/struct obj_kind GC_obj_kinds[MAXOBJKINDS] = {/* PTRFREE */ { &GC_aobjfreelist[0], 0 /* filled in dynamically */,		0 | GC_DS_LENGTH, FALSE, FALSE },/* NORMAL  */ { &GC_objfreelist[0], 0,		0 | GC_DS_LENGTH,  /* Adjusted in GC_init_inner for EXTRA_BYTES */		TRUE /* add length to descr */, TRUE },/* UNCOLLECTABLE */	      { &GC_uobjfreelist[0], 0,		0 | GC_DS_LENGTH, TRUE /* add length to descr */, TRUE },# ifdef ATOMIC_UNCOLLECTABLE   /* AUNCOLLECTABLE */	      { &GC_auobjfreelist[0], 0,		0 | GC_DS_LENGTH, FALSE /* add length to descr */, FALSE },# endif# ifdef STUBBORN_ALLOC/*STUBBORN*/ { &GC_sobjfreelist[0], 0,		0 | GC_DS_LENGTH, TRUE /* add length to descr */, TRUE },# endif};# ifdef ATOMIC_UNCOLLECTABLE#   ifdef STUBBORN_ALLOC      int GC_n_kinds = 5;#   else      int GC_n_kinds = 4;#   endif# else#   ifdef STUBBORN_ALLOC      int GC_n_kinds = 4;#   else      int GC_n_kinds = 3;#   endif# endif# ifndef INITIAL_MARK_STACK_SIZE#   define INITIAL_MARK_STACK_SIZE (1*HBLKSIZE)		/* INITIAL_MARK_STACK_SIZE * sizeof(mse) should be a 	*/		/* multiple of HBLKSIZE.				*/		/* The incremental collector actually likes a larger	*/		/* size, since it want to push all marked dirty objs	*/		/* before marking anything new.  Currently we let it	*/		/* grow dynamically.					*/# endif/* * Limits of stack for GC_mark routine. * All ranges between GC_mark_stack(incl.) and GC_mark_stack_top(incl.) still * need to be marked from. */word GC_n_rescuing_pages;	/* Number of dirty pages we marked from */				/* excludes ptrfree pages, etc.		*/mse * GC_mark_stack;mse * GC_mark_stack_limit;word GC_mark_stack_size = 0; #ifdef PARALLEL_MARK  mse * VOLATILE GC_mark_stack_top;#else  mse * GC_mark_stack_top;#endifstatic struct hblk * scan_ptr;mark_state_t GC_mark_state = MS_NONE;GC_bool GC_mark_stack_too_small = FALSE;GC_bool GC_objects_are_marked = FALSE;	/* Are there collectable marked	*/					/* objects in the heap?		*//* Is a collection in progress?  Note that this can return true in the	*//* nonincremental case, if a collection has been abandoned and the	*//* mark state is now MS_INVALID.					*/GC_bool GC_collection_in_progress(){    return(GC_mark_state != MS_NONE);}/* clear all mark bits in the header */void GC_clear_hdr_marks(hhdr)register hdr * hhdr;{#   ifdef USE_MARK_BYTES      BZERO(hhdr -> hb_marks, MARK_BITS_SZ);#   else      BZERO(hhdr -> hb_marks, MARK_BITS_SZ*sizeof(word));#   endif}/* Set all mark bits in the header.  Used for uncollectable blocks. */void GC_set_hdr_marks(hhdr)register hdr * hhdr;{    register int i;    for (i = 0; i < MARK_BITS_SZ; ++i) {#     ifdef USE_MARK_BYTES    	hhdr -> hb_marks[i] = 1;#     else    	hhdr -> hb_marks[i] = ONES;#     endif    }}/* * Clear all mark bits associated with block h. *//*ARGSUSED*/# if defined(__STDC__) || defined(__cplusplus)    static void clear_marks_for_block(struct hblk *h, word dummy)# else    static void clear_marks_for_block(h, dummy)    struct hblk *h;    word dummy;# endif{    register hdr * hhdr = HDR(h);        if (IS_UNCOLLECTABLE(hhdr -> hb_obj_kind)) return;        /* Mark bit for these is cleared only once the object is 	*/        /* explicitly deallocated.  This either frees the block, or	*/        /* the bit is cleared once the object is on the free list.	*/    GC_clear_hdr_marks(hhdr);}/* Slow but general routines for setting/clearing/asking about mark bits */void GC_set_mark_bit(p)ptr_t p;{    register struct hblk *h = HBLKPTR(p);    register hdr * hhdr = HDR(h);    register int word_no = (word *)p - (word *)h;        set_mark_bit_from_hdr(hhdr, word_no);}void GC_clear_mark_bit(p)ptr_t p;{    register struct hblk *h = HBLKPTR(p);    register hdr * hhdr = HDR(h);    register int word_no = (word *)p - (word *)h;        clear_mark_bit_from_hdr(hhdr, word_no);}GC_bool GC_is_marked(p)ptr_t p;{    register struct hblk *h = HBLKPTR(p);    register hdr * hhdr = HDR(h);    register int word_no = (word *)p - (word *)h;        return(mark_bit_from_hdr(hhdr, word_no));}/* * Clear mark bits in all allocated heap blocks.  This invalidates * the marker invariant, and sets GC_mark_state to reflect this. * (This implicitly starts marking to reestablish the invariant.) */void GC_clear_marks(){    GC_apply_to_all_blocks(clear_marks_for_block, (word)0);    GC_objects_are_marked = FALSE;    GC_mark_state = MS_INVALID;    scan_ptr = 0;#   ifdef GATHERSTATS	/* Counters reflect currently marked objects: reset here */        GC_composite_in_use = 0;        GC_atomic_in_use = 0;#   endif}/* Initiate a garbage collection.  Initiates a full collection if the	*//* mark	state is invalid.						*//*ARGSUSED*/void GC_initiate_gc(){    if (GC_dirty_maintained) GC_read_dirty();#   ifdef STUBBORN_ALLOC    	GC_read_changed();#   endif#   ifdef CHECKSUMS	{	    extern void GC_check_dirty();	    	    if (GC_dirty_maintained) GC_check_dirty();	}#   endif    GC_n_rescuing_pages = 0;    if (GC_mark_state == MS_NONE) {        GC_mark_state = MS_PUSH_RESCUERS;    } else if (GC_mark_state != MS_INVALID) {    	ABORT("unexpected state");    } /* else this is really a full collection, and mark	*/      /* bits are invalid.					*/    scan_ptr = 0;}static void alloc_mark_stack();/* Perform a small amount of marking.			*//* We try to touch roughly a page of memory.		*//* Return TRUE if we just finished a mark phase.	*//* Cold_gc_frame is an address inside a GC frame that	*//* remains valid until all marking is complete.		*//* A zero value indicates that it's OK to miss some	*//* register values.					*//* We hold the allocation lock.  In the case of 	*//* incremental collection, the world may not be stopped.*/#ifdef MSWIN32  /* For win32, this is called after we establish a structured	*/  /* exception handler, in case Windows unmaps one of our root	*/  /* segments.  See below.  In either case, we acquire the 	*/  /* allocator lock long before we get here.			*/  GC_bool GC_mark_some_inner(cold_gc_frame)  ptr_t cold_gc_frame;#else  GC_bool GC_mark_some(cold_gc_frame)  ptr_t cold_gc_frame;#endif{    switch(GC_mark_state) {    	case MS_NONE:    	    return(FALSE);    	        	case MS_PUSH_RESCUERS:    	    if (GC_mark_stack_top    	        >= GC_mark_stack_limit - INITIAL_MARK_STACK_SIZE/2) {		/* Go ahead and mark, even though that might cause us to */		/* see more marked dirty objects later on.  Avoid this	 */		/* in the future.					 */		GC_mark_stack_too_small = TRUE;    	        MARK_FROM_MARK_STACK();    	        return(FALSE);    	    } else {    	        scan_ptr = GC_push_next_marked_dirty(scan_ptr);    	        if (scan_ptr == 0) {#		    ifdef CONDPRINT		      if (GC_print_stats) {			GC_printf1("Marked from %lu dirty pages\n",				   (unsigned long)GC_n_rescuing_pages);		      }#		    endif    	    	    GC_push_roots(FALSE, cold_gc_frame);    	    	    GC_objects_are_marked = TRUE;    	    	    if (GC_mark_state != MS_INVALID) {    	    	        GC_mark_state = MS_ROOTS_PUSHED;    	    	    }    	    	}    	    }    	    return(FALSE);    	    	case MS_PUSH_UNCOLLECTABLE:    	    if (GC_mark_stack_top    	        >= GC_mark_stack + GC_mark_stack_size/4) {#		ifdef PARALLEL_MARK		  /* Avoid this, since we don't parallelize the marker	*/		  /* here.						*/		  if (GC_parallel) GC_mark_stack_too_small = TRUE;#		endif    	        MARK_FROM_MARK_STACK();    	        return(FALSE);    	    } else {    	        scan_ptr = GC_push_next_marked_uncollectable(scan_ptr);    	        if (scan_ptr == 0) {    	    	    GC_push_roots(TRUE, cold_gc_frame);    	    	    GC_objects_are_marked = TRUE;    	    	    if (GC_mark_state != MS_INVALID) {    	    	        GC_mark_state = MS_ROOTS_PUSHED;    	    	    }    	    	}    	    }    	    return(FALSE);    	    	case MS_ROOTS_PUSHED:#	    ifdef PARALLEL_MARK	      /* In the incremental GC case, this currently doesn't	*/	      /* quite do the right thing, since it runs to		*/	      /* completion.  On the other hand, starting a		*/	      /* parallel marker is expensive, so perhaps it is		*/	      /* the right thing?					*/	      /* Eventually, incremental marking should run		*/	      /* asynchronously in multiple threads, without grabbing	*/	      /* the allocation lock.					*/	        if (GC_parallel) {		  GC_do_parallel_mark();		  GC_ASSERT(GC_mark_stack_top < GC_first_nonempty);		  GC_mark_stack_top = GC_mark_stack - 1;    	          if (GC_mark_stack_too_small) {    	            alloc_mark_stack(2*GC_mark_stack_size);    	          }		  if (GC_mark_state == MS_ROOTS_PUSHED) {    	            GC_mark_state = MS_NONE;    	            return(TRUE);		  } else {		    return(FALSE);	          }		}#	    endif    	    if (GC_mark_stack_top >= GC_mark_stack) {    	        MARK_FROM_MARK_STACK();    	        return(FALSE);    	    } else {    	        GC_mark_state = MS_NONE;    	        if (GC_mark_stack_too_small) {    	            alloc_mark_stack(2*GC_mark_stack_size);    	        }    	        return(TRUE);    	    }    	        	case MS_INVALID:    	case MS_PARTIALLY_INVALID:	    if (!GC_objects_are_marked) {		GC_mark_state = MS_PUSH_UNCOLLECTABLE;		return(FALSE);	    }    	    if (GC_mark_stack_top >= GC_mark_stack) {    	        MARK_FROM_MARK_STACK();    	        return(FALSE);    	    }    	    if (scan_ptr == 0 && GC_mark_state == MS_INVALID) {		/* About to start a heap scan for marked objects. */		/* Mark stack is empty.  OK to reallocate.	  */		if (GC_mark_stack_too_small) {    	            alloc_mark_stack(2*GC_mark_stack_size);		}		GC_mark_state = MS_PARTIALLY_INVALID;    	    }    	    scan_ptr = GC_push_next_marked(scan_ptr);    	    if (scan_ptr == 0 && GC_mark_state == MS_PARTIALLY_INVALID) {    	    	GC_push_roots(TRUE, cold_gc_frame);    	    	GC_objects_are_marked = TRUE;    	    	if (GC_mark_state != MS_INVALID) {    	    	    GC_mark_state = MS_ROOTS_PUSHED;    	    	}    	    }    	    return(FALSE);    	default:    	    ABORT("GC_mark_some: bad state");    	    return(FALSE);    }}#ifdef MSWIN32# ifdef __GNUC__    typedef struct {      EXCEPTION_REGISTRATION ex_reg;      void *alt_path;    } ext_ex_regn;    static EXCEPTION_DISPOSITION mark_ex_handler(        struct _EXCEPTION_RECORD *ex_rec,         void *est_frame,        struct _CONTEXT *context,        void *disp_ctxt)    {        if (ex_rec->ExceptionCode == STATUS_ACCESS_VIOLATION) {          ext_ex_regn *xer = (ext_ex_regn *)est_frame;          /* Unwind from the inner function assuming the standard */          /* function prologue.                                   */          /* Assumes code has not been compiled with              */          /* -fomit-frame-pointer.                                */          context->Esp = context->Ebp;          context->Ebp = *((DWORD *)context->Esp);          context->Esp = context->Esp - 8;          /* Resume execution at the "real" handler within the    */          /* wrapper function.                                    */          context->Eip = (DWORD )(xer->alt_path);          return ExceptionContinueExecution;        } else {            return ExceptionContinueSearch;        }    }# endif /* __GNUC__ */  GC_bool GC_mark_some(cold_gc_frame)  ptr_t cold_gc_frame;  {      GC_bool ret_val;#   ifndef __GNUC__      /* Windows 98 appears to asynchronously create and remove  */      /* writable memory mappings, for reasons we haven't yet    */      /* understood.  Since we look for writable regions to      */      /* determine the root set, we may try to mark from an      */      /* address range that disappeared since we started the     */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -