⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 alloc.c

📁 Boost provides free peer-reviewed portable C++ source libraries. We emphasize libraries that work
💻 C
📖 第 1 页 / 共 3 页
字号:
		        (unsigned long)GC_atomic_in_use);	}        if (GC_is_full_gc)  {	    GC_used_heap_size_after_full = USED_HEAP_SIZE;	    GC_need_full_gc = FALSE;	} else {	    GC_need_full_gc =		 USED_HEAP_SIZE - GC_used_heap_size_after_full		 > min_bytes_allocd();	}    if (GC_print_stats == VERBOSE) {	GC_log_printf(		  "Immediately reclaimed %ld bytes in heap of size %lu bytes",	          (long)GC_bytes_found,	          (unsigned long)GC_heapsize);#	ifdef USE_MUNMAP	  GC_log_printf("(%lu unmapped)", (unsigned long)GC_unmapped_bytes);#	endif	GC_log_printf("\n");    }    /* Reset or increment counters for next cycle */      GC_n_attempts = 0;      GC_is_full_gc = FALSE;      GC_bytes_allocd_before_gc += GC_bytes_allocd;      GC_non_gc_bytes_at_gc = GC_non_gc_bytes;      GC_bytes_allocd = 0;      GC_bytes_freed = 0;      GC_finalizer_bytes_freed = 0;      #   ifdef USE_MUNMAP      GC_unmap_old();#   endif    if (GC_print_stats) {	GET_TIME(done_time);	GC_log_printf("Finalize + initiate sweep took %lu + %lu msecs\n",	              MS_TIME_DIFF(finalize_time,start_time),	              MS_TIME_DIFF(done_time,finalize_time));    }}/* Externally callable routine to invoke full, stop-world collection */int GC_try_to_collect(GC_stop_func stop_func){    int result;    DCL_LOCK_STATE;        if (!GC_is_initialized) GC_init();    if (GC_debugging_started) GC_print_all_smashed();    GC_INVOKE_FINALIZERS();    LOCK();    ENTER_GC();    if (!GC_is_initialized) GC_init_inner();    /* Minimize junk left in my registers */      GC_noop(0,0,0,0,0,0);    result = (int)GC_try_to_collect_inner(stop_func);    EXIT_GC();    UNLOCK();    if(result) {        if (GC_debugging_started) GC_print_all_smashed();        GC_INVOKE_FINALIZERS();    }    return(result);}void GC_gcollect(void){    (void)GC_try_to_collect(GC_never_stop_func);    if (GC_have_errors) GC_print_all_errors();}word GC_n_heap_sects = 0;	/* Number of sections currently in heap. *//* * Use the chunk of memory starting at p of size bytes as part of the heap. * Assumes p is HBLKSIZE aligned, and bytes is a multiple of HBLKSIZE. */void GC_add_to_heap(struct hblk *p, size_t bytes){    hdr * phdr;        if (GC_n_heap_sects >= MAX_HEAP_SECTS) {    	ABORT("Too many heap sections: Increase MAXHINCR or MAX_HEAP_SECTS");    }    phdr = GC_install_header(p);    if (0 == phdr) {    	/* This is extremely unlikely. Can't add it.  This will		*/    	/* almost certainly result in a	0 return from the allocator,	*/    	/* which is entirely appropriate.				*/    	return;    }    GC_heap_sects[GC_n_heap_sects].hs_start = (ptr_t)p;    GC_heap_sects[GC_n_heap_sects].hs_bytes = bytes;    GC_n_heap_sects++;    phdr -> hb_sz = bytes;    phdr -> hb_flags = 0;    GC_freehblk(p);    GC_heapsize += bytes;    if ((ptr_t)p <= (ptr_t)GC_least_plausible_heap_addr        || GC_least_plausible_heap_addr == 0) {        GC_least_plausible_heap_addr = (void *)((ptr_t)p - sizeof(word));        	/* Making it a little smaller than necessary prevents	*/        	/* us from getting a false hit from the variable	*/        	/* itself.  There's some unintentional reflection	*/        	/* here.						*/    }    if ((ptr_t)p + bytes >= (ptr_t)GC_greatest_plausible_heap_addr) {        GC_greatest_plausible_heap_addr = (void *)((ptr_t)p + bytes);    }}# if !defined(NO_DEBUGGING)void GC_print_heap_sects(void){    unsigned i;        GC_printf("Total heap size: %lu\n", (unsigned long) GC_heapsize);    for (i = 0; i < GC_n_heap_sects; i++) {        ptr_t start = GC_heap_sects[i].hs_start;        size_t len = GC_heap_sects[i].hs_bytes;        struct hblk *h;        unsigned nbl = 0;            	GC_printf("Section %d from %p to %p ", i,    		   start, start + len);    	for (h = (struct hblk *)start; h < (struct hblk *)(start + len); h++) {    	    if (GC_is_black_listed(h, HBLKSIZE)) nbl++;    	}    	GC_printf("%lu/%lu blacklisted\n", (unsigned long)nbl,    		   (unsigned long)(len/HBLKSIZE));    }}# endifvoid * GC_least_plausible_heap_addr = (void *)ONES;void * GC_greatest_plausible_heap_addr = 0;static INLINE ptr_t GC_max(ptr_t x, ptr_t y){    return(x > y? x : y);}static INLINE ptr_t GC_min(ptr_t x, ptr_t y){    return(x < y? x : y);}void GC_set_max_heap_size(GC_word n){    GC_max_heapsize = n;}GC_word GC_max_retries = 0;/* * this explicitly increases the size of the heap.  It is used * internally, but may also be invoked from GC_expand_hp by the user. * The argument is in units of HBLKSIZE. * Tiny values of n are rounded up. * Returns FALSE on failure. */GC_bool GC_expand_hp_inner(word n){    word bytes;    struct hblk * space;    word expansion_slop;	/* Number of bytes by which we expect the */    				/* heap to expand soon.			  */    if (n < MINHINCR) n = MINHINCR;    bytes = n * HBLKSIZE;    /* Make sure bytes is a multiple of GC_page_size */      {	word mask = GC_page_size - 1;	bytes += mask;	bytes &= ~mask;      }        if (GC_max_heapsize != 0 && GC_heapsize + bytes > GC_max_heapsize) {        /* Exceeded self-imposed limit */        return(FALSE);    }    space = GET_MEM(bytes);    if( space == 0 ) {	if (GC_print_stats) {	    GC_log_printf("Failed to expand heap by %ld bytes\n",		          (unsigned long)bytes);	}	return(FALSE);    }    if (GC_print_stats) {	GC_log_printf("Increasing heap size by %lu after %lu allocated bytes\n",	              (unsigned long)bytes,	              (unsigned long)GC_bytes_allocd);    }    expansion_slop = min_bytes_allocd() + 4*MAXHINCR*HBLKSIZE;    if ((GC_last_heap_addr == 0 && !((word)space & SIGNB))        || (GC_last_heap_addr != 0 && GC_last_heap_addr < (ptr_t)space)) {        /* Assume the heap is growing up */        GC_greatest_plausible_heap_addr =            (void *)GC_max((ptr_t)GC_greatest_plausible_heap_addr,                           (ptr_t)space + bytes + expansion_slop);    } else {        /* Heap is growing down */        GC_least_plausible_heap_addr =            (void *)GC_min((ptr_t)GC_least_plausible_heap_addr,                           (ptr_t)space - expansion_slop);    }#   if defined(LARGE_CONFIG)      if (((ptr_t)GC_greatest_plausible_heap_addr <= (ptr_t)space + bytes           || (ptr_t)GC_least_plausible_heap_addr >= (ptr_t)space)	  && GC_heapsize > 0) {	/* GC_add_to_heap will fix this, but ... */	WARN("Too close to address space limit: blacklisting ineffective\n", 0);      }#   endif    GC_prev_heap_addr = GC_last_heap_addr;    GC_last_heap_addr = (ptr_t)space;    GC_add_to_heap(space, bytes);    /* Force GC before we are likely to allocate past expansion_slop */      GC_collect_at_heapsize =         GC_heapsize + expansion_slop - 2*MAXHINCR*HBLKSIZE;#     if defined(LARGE_CONFIG)        if (GC_collect_at_heapsize < GC_heapsize /* wrapped */)         GC_collect_at_heapsize = (word)(-1);#     endif    return(TRUE);}/* Really returns a bool, but it's externally visible, so that's clumsy. *//* Arguments is in bytes.						*/int GC_expand_hp(size_t bytes){    int result;    DCL_LOCK_STATE;        LOCK();    if (!GC_is_initialized) GC_init_inner();    result = (int)GC_expand_hp_inner(divHBLKSZ((word)bytes));    if (result) GC_requested_heapsize += bytes;    UNLOCK();    return(result);}unsigned GC_fail_count = 0;  			/* How many consecutive GC/expansion failures?	*/			/* Reset by GC_allochblk.			*/GC_bool GC_collect_or_expand(word needed_blocks, GC_bool ignore_off_page){    if (!GC_incremental && !GC_dont_gc &&	((GC_dont_expand && GC_bytes_allocd > 0) || GC_should_collect())) {      GC_gcollect_inner();    } else {      word blocks_to_get = GC_heapsize/(HBLKSIZE*GC_free_space_divisor)      			   + needed_blocks;            if (blocks_to_get > MAXHINCR) {          word slop;          	  /* Get the minimum required to make it likely that we		*/	  /* can satisfy the current request in the presence of black-	*/	  /* listing.  This will probably be more than MAXHINCR.	*/          if (ignore_off_page) {              slop = 4;          } else {	      slop = 2*divHBLKSZ(BL_LIMIT);	      if (slop > needed_blocks) slop = needed_blocks;	  }          if (needed_blocks + slop > MAXHINCR) {              blocks_to_get = needed_blocks + slop;          } else {              blocks_to_get = MAXHINCR;          }      }      if (!GC_expand_hp_inner(blocks_to_get)        && !GC_expand_hp_inner(needed_blocks)) {      	if (GC_fail_count++ < GC_max_retries) {      	    WARN("Out of Memory!  Trying to continue ...\n", 0);	    GC_gcollect_inner();	} else {#	    if !defined(AMIGA) || !defined(GC_AMIGA_FASTALLOC)	      WARN("Out of Memory!  Returning NIL!\n", 0);#	    endif	    return(FALSE);	}      } else {          if (GC_fail_count && GC_print_stats) {	      GC_printf("Memory available again ...\n");	  }      }    }    return(TRUE);}/* * Make sure the object free list for size gran (in granules) is not empty. * Return a pointer to the first object on the free list. * The object MUST BE REMOVED FROM THE FREE LIST BY THE CALLER. * Assumes we hold the allocator lock and signals are disabled. * */ptr_t GC_allocobj(size_t gran, int kind){    void ** flh = &(GC_obj_kinds[kind].ok_freelist[gran]);    GC_bool tried_minor = FALSE;        if (gran == 0) return(0);    while (*flh == 0) {      ENTER_GC();      /* Do our share of marking work */        if(TRUE_INCREMENTAL) GC_collect_a_little_inner(1);      /* Sweep blocks for objects of this size */        GC_continue_reclaim(gran, kind);      EXIT_GC();      if (*flh == 0) {        GC_new_hblk(gran, kind);      }      if (*flh == 0) {        ENTER_GC();	if (GC_incremental && GC_time_limit == GC_TIME_UNLIMITED	    && ! tried_minor ) {	    GC_collect_a_little_inner(1);	    tried_minor = TRUE;	} else {          if (!GC_collect_or_expand((word)1,FALSE)) {	    EXIT_GC();	    return(0);	  }	}	EXIT_GC();      }    }    /* Successful allocation; reset failure count.	*/    GC_fail_count = 0;        return(*flh);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -