⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 allchblk.c

📁 Boost provides free peer-reviewed portable C++ source libraries. We emphasize libraries that work
💻 C
📖 第 1 页 / 共 2 页
字号:
	GET_HDR(next, nexthdr);	/* Coalesce with successor, if possible */	  if (0 != nexthdr && HBLK_IS_FREE(nexthdr)) {	    nextsize = nexthdr -> hb_sz;	    if (IS_MAPPED(hhdr)) {	      GC_ASSERT(!IS_MAPPED(nexthdr));	      /* make both consistent, so that we can merge */	        if (size > nextsize) {		  GC_remap((ptr_t)next, nextsize);		} else {		  GC_unmap((ptr_t)h, size);		  hhdr -> hb_flags |= WAS_UNMAPPED;		}	    } else if (IS_MAPPED(nexthdr)) {	      GC_ASSERT(!IS_MAPPED(hhdr));	      if (size > nextsize) {		GC_unmap((ptr_t)next, nextsize);	      } else {		GC_remap((ptr_t)h, size);		hhdr -> hb_flags &= ~WAS_UNMAPPED;		hhdr -> hb_last_reclaimed = nexthdr -> hb_last_reclaimed;	      }	    } else {	      /* Unmap any gap in the middle */		GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nexthdr -> hb_sz);	    }	    /* If they are both unmapped, we merge, but leave unmapped. */	    GC_remove_from_fl(hhdr, i);	    GC_remove_from_fl(nexthdr, FL_UNKNOWN);	    hhdr -> hb_sz += nexthdr -> hb_sz; 	    GC_remove_header(next);	    GC_add_to_fl(h, hhdr); 	    /* Start over at beginning of list */	    h = GC_hblkfreelist[i];	  } else /* not mergable with successor */ {	    h = hhdr -> hb_next;	  }      } /* while (h != 0) ... */    } /* for ... */}#endif /* USE_MUNMAP *//* * Return a pointer to a block starting at h of length bytes. * Memory for the block is mapped. * Remove the block from its free list, and return the remainder (if any) * to its appropriate free list. * May fail by returning 0. * The header for the returned block must be set up by the caller. * If the return value is not 0, then hhdr is the header for it. */struct hblk * GC_get_first_part(struct hblk *h, hdr *hhdr,			        size_t bytes, int index){    word total_size = hhdr -> hb_sz;    struct hblk * rest;    hdr * rest_hdr;    GC_ASSERT((total_size & (HBLKSIZE-1)) == 0);    GC_remove_from_fl(hhdr, index);    if (total_size == bytes) return h;    rest = (struct hblk *)((word)h + bytes);    rest_hdr = GC_install_header(rest);    if (0 == rest_hdr) {	/* FIXME: This is likely to be very bad news ... */	WARN("Header allocation failed: Dropping block.\n", 0);	return(0);    }    rest_hdr -> hb_sz = total_size - bytes;    rest_hdr -> hb_flags = 0;#   ifdef GC_ASSERTIONS      /* Mark h not free, to avoid assertion about adjacent free blocks. */        hhdr -> hb_flags &= ~FREE_BLK;#   endif    GC_add_to_fl(rest, rest_hdr);    return h;}/* * H is a free block.  N points at an address inside it. * A new header for n has already been set up.  Fix up h's header * to reflect the fact that it is being split, move it to the * appropriate free list. * N replaces h in the original free list. * * Nhdr is not completely filled in, since it is about to allocated. * It may in fact end up on the wrong free list for its size. * (Hence adding it to a free list is silly.  But this path is hopefully * rare enough that it doesn't matter.  The code is cleaner this way.) */void GC_split_block(struct hblk *h, hdr *hhdr, struct hblk *n,		    hdr *nhdr, int index /* Index of free list */){    word total_size = hhdr -> hb_sz;    word h_size = (word)n - (word)h;    struct hblk *prev = hhdr -> hb_prev;    struct hblk *next = hhdr -> hb_next;    /* Replace h with n on its freelist */      nhdr -> hb_prev = prev;      nhdr -> hb_next = next;      nhdr -> hb_sz = total_size - h_size;      nhdr -> hb_flags = 0;      if (0 != prev) {	HDR(prev) -> hb_next = n;      } else {        GC_hblkfreelist[index] = n;      }      if (0 != next) {	HDR(next) -> hb_prev = n;      }      INCR_FREE_BYTES(index, -(signed_word)h_size);      FREE_ASSERT(GC_free_bytes[index] > 0);#     ifdef GC_ASSERTIONS	nhdr -> hb_flags &= ~FREE_BLK;				/* Don't fail test for consecutive	*/				/* free blocks in GC_add_to_fl.		*/#     endif#   ifdef USE_MUNMAP      hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;#   endif    hhdr -> hb_sz = h_size;    GC_add_to_fl(h, hhdr);    nhdr -> hb_flags |= FREE_BLK;}	struct hblk *GC_allochblk_nth(size_t sz/* bytes */, int kind, unsigned flags, int n);/* * Allocate (and return pointer to) a heap block *   for objects of size sz bytes, searching the nth free list. * * NOTE: We set obj_map field in header correctly. *       Caller is responsible for building an object freelist in block. * * The client is responsible for clearing the block, if necessary. */struct hblk *GC_allochblk(size_t sz, int kind, unsigned flags/* IGNORE_OFF_PAGE or 0 */){    word blocks;    int start_list;    int i;    GC_ASSERT((sz & (GRANULE_BYTES - 1)) == 0);    blocks = OBJ_SZ_TO_BLOCKS(sz);    start_list = GC_hblk_fl_from_blocks(blocks);    for (i = start_list; i <= N_HBLK_FLS; ++i) {	struct hblk * result = GC_allochblk_nth(sz, kind, flags, i);	if (0 != result) {	    return result;	}    }    return 0;}/* * The same, but with search restricted to nth free list. * Flags is IGNORE_OFF_PAGE or zero. * Unlike the above, sz is in bytes. */struct hblk *GC_allochblk_nth(size_t sz, int kind, unsigned flags, int n){    struct hblk *hbp;    hdr * hhdr;		/* Header corr. to hbp */    			/* Initialized after loop if hbp !=0 	*/    			/* Gcc uninitialized use warning is bogus.	*/    struct hblk *thishbp;    hdr * thishdr;		/* Header corr. to hbp */    signed_word size_needed;    /* number of bytes in requested objects */    signed_word size_avail;	/* bytes available in this block	*/    size_needed = HBLKSIZE * OBJ_SZ_TO_BLOCKS(sz);    /* search for a big enough block in free list */	hbp = GC_hblkfreelist[n];	for(; 0 != hbp; hbp = hhdr -> hb_next) {	    GET_HDR(hbp, hhdr);	    size_avail = hhdr->hb_sz;	    if (size_avail < size_needed) continue;	    if (size_avail != size_needed		&& !GC_use_entire_heap		&& !GC_dont_gc		&& USED_HEAP_SIZE >= GC_requested_heapsize		&& !TRUE_INCREMENTAL && GC_should_collect()) {#		ifdef USE_MUNMAP		    continue;#		else		    /* If we have enough large blocks left to cover any	*/		    /* previous request for large blocks, we go ahead	*/		    /* and split.  Assuming a steady state, that should	*/		    /* be safe.  It means that we can use the full 	*/		    /* heap if we allocate only small objects.		*/		    if (!GC_enough_large_bytes_left(GC_large_allocd_bytes, n)) {		      continue;		    } 		    /* If we are deallocating lots of memory from	*/		    /* finalizers, fail and collect sooner rather	*/		    /* than later.					*/		    if (GC_finalizer_bytes_freed > (GC_heapsize >> 4))  {		      continue;		    }#		endif /* !USE_MUNMAP */	    }	    /* If the next heap block is obviously better, go on.	*/	    /* This prevents us from disassembling a single large block */	    /* to get tiny blocks.					*/	    {	      signed_word next_size;	      	      thishbp = hhdr -> hb_next;	      if (thishbp != 0) {		GET_HDR(thishbp, thishdr);	        next_size = (signed_word)(thishdr -> hb_sz);	        if (next_size < size_avail	          && next_size >= size_needed	          && !GC_is_black_listed(thishbp, (word)size_needed)) {	          continue;	        }	      }	    }	    if ( !IS_UNCOLLECTABLE(kind) &&	         (kind != PTRFREE || size_needed > MAX_BLACK_LIST_ALLOC)) {	      struct hblk * lasthbp = hbp;	      ptr_t search_end = (ptr_t)hbp + size_avail - size_needed;	      signed_word orig_avail = size_avail;	      signed_word eff_size_needed = ((flags & IGNORE_OFF_PAGE)?	      					HBLKSIZE	      					: size_needed);	      	      	      while ((ptr_t)lasthbp <= search_end	             && (thishbp = GC_is_black_listed(lasthbp,	             				      (word)eff_size_needed))		        != 0) {	        lasthbp = thishbp;	      }	      size_avail -= (ptr_t)lasthbp - (ptr_t)hbp;	      thishbp = lasthbp;	      if (size_avail >= size_needed) {	        if (thishbp != hbp &&		    0 != (thishdr = GC_install_header(thishbp))) {		  /* Make sure it's mapped before we mangle it. */#		    ifdef USE_MUNMAP		      if (!IS_MAPPED(hhdr)) {		        GC_remap((ptr_t)hbp, hhdr -> hb_sz);		        hhdr -> hb_flags &= ~WAS_UNMAPPED;		      }#		    endif	          /* Split the block at thishbp */		      GC_split_block(hbp, hhdr, thishbp, thishdr, n);		  /* Advance to thishbp */		      hbp = thishbp;		      hhdr = thishdr;		      /* We must now allocate thishbp, since it may	*/		      /* be on the wrong free list.			*/		}	      } else if (size_needed > (signed_word)BL_LIMIT	                 && orig_avail - size_needed			    > (signed_word)BL_LIMIT) {	        /* Punt, since anything else risks unreasonable heap growth. */		if (++GC_large_alloc_warn_suppressed		    >= GC_large_alloc_warn_interval) {	          WARN("Repeated allocation of very large block "		       "(appr. size %ld):\n"		       "\tMay lead to memory leak and poor performance.\n",		       size_needed);		  GC_large_alloc_warn_suppressed = 0;		}	        size_avail = orig_avail;	      } else if (size_avail == 0 && size_needed == HBLKSIZE			 && IS_MAPPED(hhdr)) {		if (!GC_find_leak) {	      	  static unsigned count = 0;	      	  	      	  /* The block is completely blacklisted.  We need 	*/	      	  /* to drop some such blocks, since otherwise we spend */	      	  /* all our time traversing them if pointerfree	*/	      	  /* blocks are unpopular.				*/	          /* A dropped block will be reconsidered at next GC.	*/	          if ((++count & 3) == 0) {	            /* Allocate and drop the block in small chunks, to	*/	            /* maximize the chance that we will recover some	*/	            /* later.						*/		      word total_size = hhdr -> hb_sz;	              struct hblk * limit = hbp + divHBLKSZ(total_size);	              struct hblk * h;		      struct hblk * prev = hhdr -> hb_prev;	              		      GC_large_free_bytes -= total_size;		      GC_remove_from_fl(hhdr, n);	              for (h = hbp; h < limit; h++) {	                if (h == hbp || 0 != (hhdr = GC_install_header(h))) {	                  (void) setup_header(	                	  hhdr, h,	              		  HBLKSIZE,	              		  PTRFREE, 0); /* Cant fail */	              	  if (GC_debugging_started) {	              	    BZERO(h, HBLKSIZE);	              	  }	                }	              }	            /* Restore hbp to point at free block */		      hbp = prev;		      if (0 == hbp) {			return GC_allochblk_nth(sz, kind, flags, n);		      }	   	      hhdr = HDR(hbp);	          }		}	      }	    }	    if( size_avail >= size_needed ) {#		ifdef USE_MUNMAP		  if (!IS_MAPPED(hhdr)) {		    GC_remap((ptr_t)hbp, hhdr -> hb_sz);		    hhdr -> hb_flags &= ~WAS_UNMAPPED;		  }#	        endif		/* hbp may be on the wrong freelist; the parameter n	*/		/* is important.					*/		hbp = GC_get_first_part(hbp, hhdr, size_needed, n);		break;	    }	}    if (0 == hbp) return 0;	    /* Add it to map of valid blocks */    	if (!GC_install_counts(hbp, (word)size_needed)) return(0);    	/* This leaks memory under very rare conditions. */    		    /* Set up header */        if (!setup_header(hhdr, hbp, sz, kind, flags)) {            GC_remove_counts(hbp, (word)size_needed);            return(0); /* ditto */        }    /* Notify virtual dirty bit implementation that we are about to write.  */    /* Ensure that pointerfree objects are not protected if it's avoidable. */    	GC_remove_protection(hbp, divHBLKSZ(size_needed),			     (hhdr -> hb_descr == 0) /* pointer-free */);            /* We just successfully allocated a block.  Restart count of	*/    /* consecutive failures.						*/    {	extern unsigned GC_fail_count;		GC_fail_count = 0;    }    GC_large_free_bytes -= size_needed;        GC_ASSERT(IS_MAPPED(hhdr));    return( hbp );} struct hblk * GC_freehblk_ptr = 0;  /* Search position hint for GC_freehblk *//* * Free a heap block. * * Coalesce the block with its neighbors if possible. * * All mark words are assumed to be cleared. */voidGC_freehblk(struct hblk *hbp){struct hblk *next, *prev;hdr *hhdr, *prevhdr, *nexthdr;signed_word size;    GET_HDR(hbp, hhdr);    size = hhdr->hb_sz;    size = HBLKSIZE * OBJ_SZ_TO_BLOCKS(size);    GC_remove_counts(hbp, (word)size);    hhdr->hb_sz = size;#   ifdef USE_MUNMAP      hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;#   endif        /* Check for duplicate deallocation in the easy case */      if (HBLK_IS_FREE(hhdr)) {        GC_printf("Duplicate large block deallocation of %p\n", hbp);	ABORT("Duplicate large block deallocation");      }    GC_ASSERT(IS_MAPPED(hhdr));    hhdr -> hb_flags |= FREE_BLK;    next = (struct hblk *)((word)hbp + size);    GET_HDR(next, nexthdr);    prev = GC_free_block_ending_at(hbp);    /* Coalesce with successor, if possible */      if(0 != nexthdr && HBLK_IS_FREE(nexthdr) && IS_MAPPED(nexthdr)) {	GC_remove_from_fl(nexthdr, FL_UNKNOWN);	hhdr -> hb_sz += nexthdr -> hb_sz; 	GC_remove_header(next);      }    /* Coalesce with predecessor, if possible. */      if (0 != prev) {	prevhdr = HDR(prev);	if (IS_MAPPED(prevhdr)) {	  GC_remove_from_fl(prevhdr, FL_UNKNOWN);	  prevhdr -> hb_sz += hhdr -> hb_sz;#	  ifdef USE_MUNMAP	    prevhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;#	  endif	  GC_remove_header(hbp);	  hbp = prev;	  hhdr = prevhdr;	}      }    /* FIXME: It is not clear we really always want to do these merges	*/    /* with -DUSE_MUNMAP, since it updates ages and hence prevents	*/    /* unmapping. 							*/    GC_large_free_bytes += size;    GC_add_to_fl(hbp, hhdr);    }

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -