⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 misc.c

📁 Boost provides free peer-reviewed portable C++ source libraries. We emphasize libraries that work
💻 C
📖 第 1 页 / 共 3 页
字号:
/*  * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved. * Copyright (c) 1999-2001 by Hewlett-Packard Company. All rights reserved. * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose,  provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. *//* Boehm, July 31, 1995 5:02 pm PDT */#include <stdio.h>#include <limits.h>#include <stdarg.h>#ifndef _WIN32_WCE#include <signal.h>#endif#define I_HIDE_POINTERS	/* To make GC_call_with_alloc_lock visible */#include "private/gc_pmark.h"#ifdef GC_SOLARIS_THREADS# include <sys/syscall.h>#endif#if defined(MSWIN32) || defined(MSWINCE)# define WIN32_LEAN_AND_MEAN# define NOSERVICE# include <windows.h># include <tchar.h>#endif#ifdef UNIX_LIKE# include <fcntl.h># include <sys/types.h># include <sys/stat.h>  int GC_log;  /* Forward decl, so we can set it.	*/#endif#ifdef NONSTOP# include <floss.h>#endif#if defined(THREADS) && defined(PCR)# include "il/PCR_IL.h"  PCR_Th_ML GC_allocate_ml;#endif/* For other platforms with threads, the lock and possibly		*//* GC_lock_holder variables are defined in the thread support code.	*/#if defined(NOSYS) || defined(ECOS)#undef STACKBASE#endif/* Dont unnecessarily call GC_register_main_static_data() in case 	*//* dyn_load.c isn't linked in.						*/#ifdef DYNAMIC_LOADING# define GC_REGISTER_MAIN_STATIC_DATA() GC_register_main_static_data()#else# define GC_REGISTER_MAIN_STATIC_DATA() TRUE#endifGC_FAR struct _GC_arrays GC_arrays /* = { 0 } */;GC_bool GC_debugging_started = FALSE;	/* defined here so we don't have to load debug_malloc.o */void (*GC_check_heap) (void) = (void (*) (void))0;void (*GC_print_all_smashed) (void) = (void (*) (void))0;void (*GC_start_call_back) (void) = (void (*) (void))0;ptr_t GC_stackbottom = 0;#ifdef IA64  ptr_t GC_register_stackbottom = 0;#endifGC_bool GC_dont_gc = 0;GC_bool GC_dont_precollect = 0;GC_bool GC_quiet = 0;#ifndef SMALL_CONFIG  GC_bool GC_print_stats = 0;#endifGC_bool GC_print_back_height = 0;#ifndef NO_DEBUGGING  GC_bool GC_dump_regularly = 0;  /* Generate regular debugging dumps. */#endif#ifdef KEEP_BACK_PTRS  long GC_backtraces = 0;	/* Number of random backtraces to 	*/  				/* generate for each GC.		*/#endif#ifdef FIND_LEAK  int GC_find_leak = 1;#else  int GC_find_leak = 0;#endif#ifdef ALL_INTERIOR_POINTERS  int GC_all_interior_pointers = 1;#else  int GC_all_interior_pointers = 0;#endiflong GC_large_alloc_warn_interval = 5;	/* Interval between unsuppressed warnings.	*/long GC_large_alloc_warn_suppressed = 0;	/* Number of warnings suppressed so far.	*//*ARGSUSED*/void * GC_default_oom_fn(size_t bytes_requested){    return(0);}void * (*GC_oom_fn) (size_t bytes_requested) = GC_default_oom_fn;void * GC_project2(void *arg1, void *arg2){  return arg2;}/* Set things up so that GC_size_map[i] >= granules(i),		*//* but not too much bigger						*//* and so that size_map contains relatively few distinct entries 	*//* This was originally stolen from Russ Atkinson's Cedar		*//* quantization alogrithm (but we precompute it).			*/ void GC_init_size_map(void){    int i;    /* Map size 0 to something bigger.			*/    /* This avoids problems at lower levels.		*/      GC_size_map[0] = 1;    for (i = 1; i <= GRANULES_TO_BYTES(TINY_FREELISTS-1) - EXTRA_BYTES; i++) {        GC_size_map[i] = ROUNDED_UP_GRANULES(i);        GC_ASSERT(GC_size_map[i] < TINY_FREELISTS);    }    /* We leave the rest of the array to be filled in on demand. */}/* Fill in additional entries in GC_size_map, including the ith one *//* We assume the ith entry is currently 0.				*//* Note that a filled in section of the array ending at n always    *//* has length at least n/4.						*/void GC_extend_size_map(size_t i){    size_t orig_granule_sz = ROUNDED_UP_GRANULES(i);    size_t granule_sz = orig_granule_sz;    size_t byte_sz = GRANULES_TO_BYTES(granule_sz);    			/* The size we try to preserve.		*/    			/* Close to i, unless this would	*/    			/* introduce too many distinct sizes.	*/    size_t smaller_than_i = byte_sz - (byte_sz >> 3);    size_t much_smaller_than_i = byte_sz - (byte_sz >> 2);    size_t low_limit;	/* The lowest indexed entry we 	*/    			/* initialize.			*/    size_t j;        if (GC_size_map[smaller_than_i] == 0) {        low_limit = much_smaller_than_i;        while (GC_size_map[low_limit] != 0) low_limit++;    } else {        low_limit = smaller_than_i + 1;        while (GC_size_map[low_limit] != 0) low_limit++;        granule_sz = ROUNDED_UP_GRANULES(low_limit);        granule_sz += granule_sz >> 3;        if (granule_sz < orig_granule_sz) granule_sz = orig_granule_sz;    }    /* For these larger sizes, we use an even number of granules.	*/    /* This makes it easier to, for example, construct a 16byte-aligned	*/    /* allocator even if GRANULE_BYTES is 8.				*/        granule_sz += 1;        granule_sz &= ~1;    if (granule_sz > MAXOBJGRANULES) {        granule_sz = MAXOBJGRANULES;    }    /* If we can fit the same number of larger objects in a block,	*/    /* do so.							*/     {        size_t number_of_objs = HBLK_GRANULES/granule_sz;        granule_sz = HBLK_GRANULES/number_of_objs;    	granule_sz &= ~1;    }    byte_sz = GRANULES_TO_BYTES(granule_sz);    /* We may need one extra byte;			*/    /* don't always fill in GC_size_map[byte_sz]	*/    byte_sz -= EXTRA_BYTES;    for (j = low_limit; j <= byte_sz; j++) GC_size_map[j] = granule_sz;  }/* * The following is a gross hack to deal with a problem that can occur * on machines that are sloppy about stack frame sizes, notably SPARC. * Bogus pointers may be written to the stack and not cleared for * a LONG time, because they always fall into holes in stack frames * that are not written.  We partially address this by clearing * sections of the stack whenever we get control. */word GC_stack_last_cleared = 0;	/* GC_no when we last did this */# ifdef THREADS#   define BIG_CLEAR_SIZE 2048	/* Clear this much now and then.	*/#   define SMALL_CLEAR_SIZE 256 /* Clear this much every time.		*/# endif# define CLEAR_SIZE 213  /* Granularity for GC_clear_stack_inner */# define DEGRADE_RATE 50ptr_t GC_min_sp;	/* Coolest stack pointer value from which we've */			/* already cleared the stack.			*/			ptr_t GC_high_water;			/* "hottest" stack pointer value we have seen	*/			/* recently.  Degrades over time.		*/word GC_bytes_allocd_at_reset;#if defined(ASM_CLEAR_CODE)  extern void *GC_clear_stack_inner(void *, ptr_t);#else  /* Clear the stack up to about limit.  Return arg. *//*ARGSUSED*/void * GC_clear_stack_inner(void *arg, ptr_t limit){    word dummy[CLEAR_SIZE];        BZERO(dummy, CLEAR_SIZE*sizeof(word));    if ((ptr_t)(dummy) COOLER_THAN limit) {        (void) GC_clear_stack_inner(arg, limit);    }    /* Make sure the recursive call is not a tail call, and the bzero	*/    /* call is not recognized as dead code.				*/    GC_noop1((word)dummy);    return(arg);}#endif/* Clear some of the inaccessible part of the stack.  Returns its	*//* argument, so it can be used in a tail call position, hence clearing  *//* another frame.							*/void * GC_clear_stack(void *arg){    ptr_t sp = GC_approx_sp();  /* Hotter than actual sp */#   ifdef THREADS        word dummy[SMALL_CLEAR_SIZE];	static unsigned random_no = 0;       			 	 /* Should be more random than it is ... */				 /* Used to occasionally clear a bigger	 */				 /* chunk.				 */#   endif    ptr_t limit;    #   define SLOP 400	/* Extra bytes we clear every time.  This clears our own	*/	/* activation record, and should cause more frequent		*/	/* clearing near the cold end of the stack, a good thing.	*/#   define GC_SLOP 4000	/* We make GC_high_water this much hotter than we really saw   	*/	/* saw it, to cover for GC noise etc. above our current frame.	*/#   define CLEAR_THRESHOLD 100000	/* We restart the clearing process after this many bytes of	*/	/* allocation.  Otherwise very heavily recursive programs	*/	/* with sparse stacks may result in heaps that grow almost	*/	/* without bounds.  As the heap gets larger, collection 	*/	/* frequency decreases, thus clearing frequency would decrease, */	/* thus more junk remains accessible, thus the heap gets	*/	/* larger ...							*/# ifdef THREADS    if (++random_no % 13 == 0) {	limit = sp;	MAKE_HOTTER(limit, BIG_CLEAR_SIZE*sizeof(word));	limit = (ptr_t)((word)limit & ~0xf);        		/* Make it sufficiently aligned for assembly	*/        		/* implementations of GC_clear_stack_inner.	*/	return GC_clear_stack_inner(arg, limit);    } else {	BZERO(dummy, SMALL_CLEAR_SIZE*sizeof(word));	return arg;    }# else    if (GC_gc_no > GC_stack_last_cleared) {        /* Start things over, so we clear the entire stack again */        if (GC_stack_last_cleared == 0) GC_high_water = (ptr_t)GC_stackbottom;        GC_min_sp = GC_high_water;        GC_stack_last_cleared = GC_gc_no;        GC_bytes_allocd_at_reset = GC_bytes_allocd;    }    /* Adjust GC_high_water */        MAKE_COOLER(GC_high_water, WORDS_TO_BYTES(DEGRADE_RATE) + GC_SLOP);        if (sp HOTTER_THAN GC_high_water) {            GC_high_water = sp;        }        MAKE_HOTTER(GC_high_water, GC_SLOP);    limit = GC_min_sp;    MAKE_HOTTER(limit, SLOP);    if (sp COOLER_THAN limit) {        limit = (ptr_t)((word)limit & ~0xf);			/* Make it sufficiently aligned for assembly	*/        		/* implementations of GC_clear_stack_inner.	*/        GC_min_sp = sp;        return(GC_clear_stack_inner(arg, limit));    } else if (GC_bytes_allocd - GC_bytes_allocd_at_reset > CLEAR_THRESHOLD) {    	/* Restart clearing process, but limit how much clearing we do. */    	GC_min_sp = sp;    	MAKE_HOTTER(GC_min_sp, CLEAR_THRESHOLD/4);    	if (GC_min_sp HOTTER_THAN GC_high_water) GC_min_sp = GC_high_water;    	GC_bytes_allocd_at_reset = GC_bytes_allocd;    }      return(arg);# endif}/* Return a pointer to the base address of p, given a pointer to a	*//* an address within an object.  Return 0 o.w.				*/void * GC_base(void * p){    ptr_t r;    struct hblk *h;    bottom_index *bi;    hdr *candidate_hdr;    ptr_t limit;        r = p;    if (!GC_is_initialized) return 0;    h = HBLKPTR(r);    GET_BI(r, bi);    candidate_hdr = HDR_FROM_BI(bi, r);    if (candidate_hdr == 0) return(0);    /* If it's a pointer to the middle of a large object, move it	*/    /* to the beginning.						*/	while (IS_FORWARDING_ADDR_OR_NIL(candidate_hdr)) {	   h = FORWARDED_ADDR(h,candidate_hdr);	   r = (ptr_t)h;	   candidate_hdr = HDR(h);	}    if (HBLK_IS_FREE(candidate_hdr)) return(0);    /* Make sure r points to the beginning of the object */	r = (ptr_t)((word)r & ~(WORDS_TO_BYTES(1) - 1));        {	    size_t offset = HBLKDISPL(r);	    signed_word sz = candidate_hdr -> hb_sz;	    size_t obj_displ = offset % sz;	    r -= obj_displ;            limit = r + sz;	    if (limit > (ptr_t)(h + 1) && sz <= HBLKSIZE) {	        return(0);	    }	    if ((ptr_t)p >= limit) return(0);	}    return((void *)r);}/* Return the size of an object, given a pointer to its base.		*//* (For small obects this also happens to work from interior pointers,	*//* but that shouldn't be relied upon.)					*/size_t GC_size(void * p){    hdr * hhdr = HDR(p);        return hhdr -> hb_sz;}size_t GC_get_heap_size(void){    return GC_heapsize;}size_t GC_get_free_bytes(void){    return GC_large_free_bytes;}size_t GC_get_bytes_since_gc(void){

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -