📄 misc.c
字号:
/* * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1999-2001 by Hewlett-Packard Company. All rights reserved. * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. *//* Boehm, July 31, 1995 5:02 pm PDT */#include <stdio.h>#include <limits.h>#ifndef _WIN32_WCE#include <signal.h>#endif#define I_HIDE_POINTERS /* To make GC_call_with_alloc_lock visible */#include "private/gc_pmark.h"#ifdef GC_SOLARIS_THREADS# include <sys/syscall.h>#endif#if defined(MSWIN32) || defined(MSWINCE)# define WIN32_LEAN_AND_MEAN# define NOSERVICE# include <windows.h># include <tchar.h>#endif# ifdef THREADS# ifdef PCR# include "il/PCR_IL.h" PCR_Th_ML GC_allocate_ml;# else# ifdef SRC_M3 /* Critical section counter is defined in the M3 runtime */ /* That's all we use. */# else# ifdef GC_SOLARIS_THREADS mutex_t GC_allocate_ml; /* Implicitly initialized. */# else# if defined(GC_WIN32_THREADS) # if defined(GC_PTHREADS) pthread_mutex_t GC_allocate_ml = PTHREAD_MUTEX_INITIALIZER;# elif defined(GC_DLL) __declspec(dllexport) CRITICAL_SECTION GC_allocate_ml;# else CRITICAL_SECTION GC_allocate_ml;# endif# else# if defined(GC_PTHREADS) && !defined(GC_SOLARIS_THREADS)# if defined(USE_SPIN_LOCK) pthread_t GC_lock_holder = NO_THREAD;# else pthread_mutex_t GC_allocate_ml = PTHREAD_MUTEX_INITIALIZER; pthread_t GC_lock_holder = NO_THREAD; /* Used only for assertions, and to prevent */ /* recursive reentry in the system call wrapper. */# endif # else --> declare allocator lock here# endif# endif# endif# endif# endif# endif#if defined(NOSYS) || defined(ECOS)#undef STACKBASE#endif/* Dont unnecessarily call GC_register_main_static_data() in case *//* dyn_load.c isn't linked in. */#ifdef DYNAMIC_LOADING# define GC_REGISTER_MAIN_STATIC_DATA() GC_register_main_static_data()#else# define GC_REGISTER_MAIN_STATIC_DATA() TRUE#endifGC_FAR struct _GC_arrays GC_arrays /* = { 0 } */;GC_bool GC_debugging_started = FALSE; /* defined here so we don't have to load debug_malloc.o */void (*GC_check_heap) GC_PROTO((void)) = (void (*) GC_PROTO((void)))0;void (*GC_print_all_smashed) GC_PROTO((void)) = (void (*) GC_PROTO((void)))0;void (*GC_start_call_back) GC_PROTO((void)) = (void (*) GC_PROTO((void)))0;ptr_t GC_stackbottom = 0;#ifdef IA64 ptr_t GC_register_stackbottom = 0;#endifGC_bool GC_dont_gc = 0;GC_bool GC_dont_precollect = 0;GC_bool GC_quiet = 0;GC_bool GC_print_stats = 0;GC_bool GC_print_back_height = 0;#ifndef NO_DEBUGGING GC_bool GC_dump_regularly = 0; /* Generate regular debugging dumps. */#endif#ifdef KEEP_BACK_PTRS long GC_backtraces = 0; /* Number of random backtraces to */ /* generate for each GC. */#endif#ifdef FIND_LEAK int GC_find_leak = 1;#else int GC_find_leak = 0;#endif#ifdef ALL_INTERIOR_POINTERS int GC_all_interior_pointers = 1;#else int GC_all_interior_pointers = 0;#endiflong GC_large_alloc_warn_interval = 5; /* Interval between unsuppressed warnings. */long GC_large_alloc_warn_suppressed = 0; /* Number of warnings suppressed so far. *//*ARGSUSED*/GC_PTR GC_default_oom_fn GC_PROTO((size_t bytes_requested)){ return(0);}GC_PTR (*GC_oom_fn) GC_PROTO((size_t bytes_requested)) = GC_default_oom_fn;extern signed_word GC_mem_found;void * GC_project2(arg1, arg2)void *arg1;void *arg2;{ return arg2;}# ifdef MERGE_SIZES /* Set things up so that GC_size_map[i] >= words(i), */ /* but not too much bigger */ /* and so that size_map contains relatively few distinct entries */ /* This is stolen from Russ Atkinson's Cedar quantization */ /* alogrithm (but we precompute it). */ void GC_init_size_map() { register unsigned i; /* Map size 0 to something bigger. */ /* This avoids problems at lower levels. */ /* One word objects don't have to be 2 word aligned, */ /* unless we're using mark bytes. */ for (i = 0; i < sizeof(word); i++) { GC_size_map[i] = MIN_WORDS; }# if MIN_WORDS > 1 GC_size_map[sizeof(word)] = MIN_WORDS;# else GC_size_map[sizeof(word)] = ROUNDED_UP_WORDS(sizeof(word));# endif for (i = sizeof(word) + 1; i <= 8 * sizeof(word); i++) { GC_size_map[i] = ALIGNED_WORDS(i); } for (i = 8*sizeof(word) + 1; i <= 16 * sizeof(word); i++) { GC_size_map[i] = (ROUNDED_UP_WORDS(i) + 1) & (~1); }# ifdef GC_GCJ_SUPPORT /* Make all sizes up to 32 words predictable, so that a */ /* compiler can statically perform the same computation, */ /* or at least a computation that results in similar size */ /* classes. */ for (i = 16*sizeof(word) + 1; i <= 32 * sizeof(word); i++) { GC_size_map[i] = (ROUNDED_UP_WORDS(i) + 3) & (~3); }# endif /* We leave the rest of the array to be filled in on demand. */ } /* Fill in additional entries in GC_size_map, including the ith one */ /* We assume the ith entry is currently 0. */ /* Note that a filled in section of the array ending at n always */ /* has length at least n/4. */ void GC_extend_size_map(i) word i; { word orig_word_sz = ROUNDED_UP_WORDS(i); word word_sz = orig_word_sz; register word byte_sz = WORDS_TO_BYTES(word_sz); /* The size we try to preserve. */ /* Close to to i, unless this would */ /* introduce too many distinct sizes. */ word smaller_than_i = byte_sz - (byte_sz >> 3); word much_smaller_than_i = byte_sz - (byte_sz >> 2); register word low_limit; /* The lowest indexed entry we */ /* initialize. */ register word j; if (GC_size_map[smaller_than_i] == 0) { low_limit = much_smaller_than_i; while (GC_size_map[low_limit] != 0) low_limit++; } else { low_limit = smaller_than_i + 1; while (GC_size_map[low_limit] != 0) low_limit++; word_sz = ROUNDED_UP_WORDS(low_limit); word_sz += word_sz >> 3; if (word_sz < orig_word_sz) word_sz = orig_word_sz; }# ifdef ALIGN_DOUBLE word_sz += 1; word_sz &= ~1;# endif if (word_sz > MAXOBJSZ) { word_sz = MAXOBJSZ; } /* If we can fit the same number of larger objects in a block, */ /* do so. */ { size_t number_of_objs = BODY_SZ/word_sz; word_sz = BODY_SZ/number_of_objs;# ifdef ALIGN_DOUBLE word_sz &= ~1;# endif } byte_sz = WORDS_TO_BYTES(word_sz); if (GC_all_interior_pointers) { /* We need one extra byte; don't fill in GC_size_map[byte_sz] */ byte_sz -= EXTRA_BYTES; } for (j = low_limit; j <= byte_sz; j++) GC_size_map[j] = word_sz; }# endif/* * The following is a gross hack to deal with a problem that can occur * on machines that are sloppy about stack frame sizes, notably SPARC. * Bogus pointers may be written to the stack and not cleared for * a LONG time, because they always fall into holes in stack frames * that are not written. We partially address this by clearing * sections of the stack whenever we get control. */word GC_stack_last_cleared = 0; /* GC_no when we last did this */# ifdef THREADS# define BIG_CLEAR_SIZE 2048 /* Clear this much now and then. */# define SMALL_CLEAR_SIZE 256 /* Clear this much every time. */# endif# define CLEAR_SIZE 213 /* Granularity for GC_clear_stack_inner */# define DEGRADE_RATE 50word GC_min_sp; /* Coolest stack pointer value from which we've */ /* already cleared the stack. */ word GC_high_water; /* "hottest" stack pointer value we have seen */ /* recently. Degrades over time. */word GC_words_allocd_at_reset;#if defined(ASM_CLEAR_CODE) extern ptr_t GC_clear_stack_inner();#else /* Clear the stack up to about limit. Return arg. *//*ARGSUSED*/ptr_t GC_clear_stack_inner(arg, limit)ptr_t arg;word limit;{ word dummy[CLEAR_SIZE]; BZERO(dummy, CLEAR_SIZE*sizeof(word)); if ((word)(dummy) COOLER_THAN limit) { (void) GC_clear_stack_inner(arg, limit); } /* Make sure the recursive call is not a tail call, and the bzero */ /* call is not recognized as dead code. */ GC_noop1((word)dummy); return(arg);}#endif/* Clear some of the inaccessible part of the stack. Returns its *//* argument, so it can be used in a tail call position, hence clearing *//* another frame. */ptr_t GC_clear_stack(arg)ptr_t arg;{ register word sp = (word)GC_approx_sp(); /* Hotter than actual sp */# ifdef THREADS word dummy[SMALL_CLEAR_SIZE]; static unsigned random_no = 0; /* Should be more random than it is ... */ /* Used to occasionally clear a bigger */ /* chunk. */# endif register word limit; # define SLOP 400 /* Extra bytes we clear every time. This clears our own */ /* activation record, and should cause more frequent */ /* clearing near the cold end of the stack, a good thing. */# define GC_SLOP 4000 /* We make GC_high_water this much hotter than we really saw */ /* saw it, to cover for GC noise etc. above our current frame. */# define CLEAR_THRESHOLD 100000 /* We restart the clearing process after this many bytes of */ /* allocation. Otherwise very heavily recursive programs */ /* with sparse stacks may result in heaps that grow almost */ /* without bounds. As the heap gets larger, collection */ /* frequency decreases, thus clearing frequency would decrease, */ /* thus more junk remains accessible, thus the heap gets */ /* larger ... */# ifdef THREADS if (++random_no % 13 == 0) { limit = sp; MAKE_HOTTER(limit, BIG_CLEAR_SIZE*sizeof(word)); limit &= ~0xf; /* Make it sufficiently aligned for assembly */ /* implementations of GC_clear_stack_inner. */ return GC_clear_stack_inner(arg, limit); } else { BZERO(dummy, SMALL_CLEAR_SIZE*sizeof(word)); return arg; }# else if (GC_gc_no > GC_stack_last_cleared) { /* Start things over, so we clear the entire stack again */ if (GC_stack_last_cleared == 0) GC_high_water = (word) GC_stackbottom; GC_min_sp = GC_high_water; GC_stack_last_cleared = GC_gc_no; GC_words_allocd_at_reset = GC_words_allocd; } /* Adjust GC_high_water */ MAKE_COOLER(GC_high_water, WORDS_TO_BYTES(DEGRADE_RATE) + GC_SLOP); if (sp HOTTER_THAN GC_high_water) { GC_high_water = sp; } MAKE_HOTTER(GC_high_water, GC_SLOP); limit = GC_min_sp; MAKE_HOTTER(limit, SLOP); if (sp COOLER_THAN limit) { limit &= ~0xf; /* Make it sufficiently aligned for assembly */ /* implementations of GC_clear_stack_inner. */ GC_min_sp = sp; return(GC_clear_stack_inner(arg, limit)); } else if (WORDS_TO_BYTES(GC_words_allocd - GC_words_allocd_at_reset) > CLEAR_THRESHOLD) { /* Restart clearing process, but limit how much clearing we do. */ GC_min_sp = sp; MAKE_HOTTER(GC_min_sp, CLEAR_THRESHOLD/4); if (GC_min_sp HOTTER_THAN GC_high_water) GC_min_sp = GC_high_water; GC_words_allocd_at_reset = GC_words_allocd; } return(arg);# endif}/* Return a pointer to the base address of p, given a pointer to a *//* an address within an object. Return 0 o.w. */# ifdef __STDC__ GC_PTR GC_base(GC_PTR p)# else GC_PTR GC_base(p) GC_PTR p;# endif{ register word r; register struct hblk *h; register bottom_index *bi; register hdr *candidate_hdr; register word limit;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -