⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 gmalloc.c

📁 UNIX下SH的实现源码
💻 C
📖 第 1 页 / 共 3 页
字号:
 	      return NULL; 	    } 	  /* Is it big enough to record status for its own space? 	     If so, we win.  */ 	  if ((size_t) BLOCK ((char *) newinfo + newsize * sizeof (malloc_info)) < newsize) 	    break; 	  /* Must try again.  First give back most of what we just got.  */ 	  default_morecore (- newsize * sizeof (malloc_info)); 	  newsize *= 2;  	}      /* Copy the old table to the beginning of the new,	 and zero the rest of the new table.  */      memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));      memset (&newinfo[heapsize], 0, (newsize - heapsize) * sizeof (malloc_info));      oldinfo = _heapinfo;      _heapinfo = newinfo;      heapsize = newsize;      register_heapinfo ();      /* Reset _heaplimit so ifree never decides	 it can relocate or resize the info table.  */      _heaplimit = 0;      ifree (oldinfo);      /* The new heap limit includes the new table just allocated.  */      _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));      return result;    } got_heap:  _heaplimit = BLOCK ((char *) result + size);  return result;}/* Allocate memory from the heap.  */static genptr_timalloc (size)     size_t size;{  genptr_t result;  size_t block, blocks, lastblocks, start;  register size_t i;  struct list *next;  /* ANSI C allows `malloc (0)' to either return NULL, or to return a     valid address you can realloc and free (though not dereference).     It turns out that some extant code (sunrpc, at least Ultrix's version)     expects `malloc (0)' to return non-NULL and breaks otherwise.     Be compatible.  */#if 0  if (size == 0)    return NULL;#endif  if (size < sizeof (struct list))    size = sizeof (struct list);#ifdef SUNOS_LOCALTIME_BUG  if (size < 16)    size = 16;#endif  /* Determine the allocation policy based on the request size.  */  if (size <= BLOCKSIZE / 2)    {      /* Small allocation to receive a fragment of a block.	 Determine the logarithm to base two of the fragment size. */      register size_t log = 1;      --size;      while ((size /= 2) != 0)	++log;      /* Look in the fragment lists for a	 free fragment of the desired size. */      next = _fraghead[log].next;      if (next != NULL)	{	  /* There are free fragments of this size.	     Pop a fragment out of the fragment list and return it.	     Update the block's nfree and first counters. */	  result = (genptr_t) next;	  next->prev->next = next->next;	  if (next->next != NULL)	    next->next->prev = next->prev;	  block = BLOCK (result);	  if (--_heapinfo[block].busy.info.frag.nfree != 0)	    _heapinfo[block].busy.info.frag.first = (unsigned long int)	      ((unsigned long int) ((char *) next->next - (char *) NULL)	       % BLOCKSIZE) >> log;	  /* Update the statistics.  */	  ++chunks_used;	  bytes_used += 1 << log;	  --chunks_free;	  bytes_free -= 1 << log;	}      else	{	  /* No free fragments of the desired size, so get a new block	     and break it into fragments, returning the first.  */	  result = imalloc (BLOCKSIZE);	  if (result == NULL)	    return NULL;	  /* Link all fragments but the first into the free list.  */	  next = (struct list *) ((char *) result + (1 << log));	  next->next = NULL;	  next->prev = &_fraghead[log];	  _fraghead[log].next = next;	  for (i = 2; i < (size_t) (BLOCKSIZE >> log); ++i)	    {	      next = (struct list *) ((char *) result + (i << log));	      next->next = _fraghead[log].next;	      next->prev = &_fraghead[log];	      next->prev->next = next;	      next->next->prev = next;	    }	  /* Initialize the nfree and first counters for this block.  */	  block = BLOCK (result);	  _heapinfo[block].busy.type = log;	  _heapinfo[block].busy.info.frag.nfree = i - 1;	  _heapinfo[block].busy.info.frag.first = i - 1;	  chunks_free += (BLOCKSIZE >> log) - 1;	  bytes_free += BLOCKSIZE - (1 << log);	  bytes_used -= BLOCKSIZE - (1 << log);	}    }  else    {      /* Large allocation to receive one or more blocks.	 Search the free list in a circle starting at the last place visited.	 If we loop completely around without finding a large enough	 space we will have to get more memory from the system.  */      blocks = BLOCKIFY (size);      start = block = _heapindex;      while (_heapinfo[block].free.size < blocks)	{	  block = _heapinfo[block].free.next;	  if (block == start)	    {	      /* Need to get more from the system.  Get a little extra.  */	      size_t wantblocks = blocks + malloc_extra_blocks;	      block = _heapinfo[0].free.prev;	      lastblocks = _heapinfo[block].free.size;	      /* Check to see if the new core will be contiguous with the		 final free block; if so we don't need to get as much.  */	      if (_heaplimit != 0 && block + lastblocks == _heaplimit &&		  /* We can't do this if we will have to make the heap info                     table bigger to accomodate the new space.  */		  block + wantblocks <= heapsize &&		  get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE,					ADDRESS (block + lastblocks)))		{ 		  /* We got it contiguously.  Which block we are extending		     (the `final free block' referred to above) might have		     changed, if it got combined with a freed info table.  */ 		  block = _heapinfo[0].free.prev;  		  _heapinfo[block].free.size += (wantblocks - lastblocks);		  bytes_free += (wantblocks - lastblocks) * BLOCKSIZE; 		  _heaplimit += wantblocks - lastblocks;		  continue;		}	      result = morecore (wantblocks * BLOCKSIZE);	      if (result == NULL)		return NULL;	      block = BLOCK (result);	      /* Put the new block at the end of the free list.  */	      _heapinfo[block].free.size = wantblocks;	      _heapinfo[block].free.prev = _heapinfo[0].free.prev;	      _heapinfo[block].free.next = 0;	      _heapinfo[0].free.prev = block;	      _heapinfo[_heapinfo[block].free.prev].free.next = block;	      ++chunks_free;	      bytes_free += wantblocks * BLOCKSIZE;	      /* Now loop to use some of that block for this allocation.  */	    }	}      /* At this point we have found a suitable free list entry.	 Figure out how to remove what we need from the list. */      result = ADDRESS (block);      if (_heapinfo[block].free.size > blocks)	{	  /* The block we found has a bit left over,	     so relink the tail end back into the free list. */	  _heapinfo[block + blocks].free.size	    = _heapinfo[block].free.size - blocks;	  _heapinfo[block + blocks].free.next	    = _heapinfo[block].free.next;	  _heapinfo[block + blocks].free.prev	    = _heapinfo[block].free.prev;	  _heapinfo[_heapinfo[block].free.prev].free.next	    = _heapinfo[_heapinfo[block].free.next].free.prev	    = _heapindex = block + blocks;	}      else	{	  /* The block exactly matches our requirements,	     so just remove it from the list. */	  _heapinfo[_heapinfo[block].free.next].free.prev	    = _heapinfo[block].free.prev;	  _heapinfo[_heapinfo[block].free.prev].free.next	    = _heapindex = _heapinfo[block].free.next;	  --chunks_free;	}      _heapinfo[block].busy.type = 0;      _heapinfo[block].busy.info.size = blocks;      ++chunks_used;      bytes_used += blocks * BLOCKSIZE;      bytes_free -= blocks * BLOCKSIZE;      /* Mark all the blocks of the object just allocated except for the	 first with a negative number so you can find the first block by	 adding that adjustment.  */      while (--blocks > 0)	_heapinfo[block + blocks].busy.info.size = -blocks;    }  return result;}genptr_tmalloc (size)     size_t size;{#ifdef RCHECK  struct hdr *hdr;#endif  nmalloc++;  if (malloc_initialized == 0 && malloc_initialize () == 0)    return NULL;#ifdef RCHECK  hdr = (struct hdr *) imalloc (sizeof (struct hdr) + size + 1);  if (hdr == NULL)    return NULL;  hdr->size = size;  hdr->magic = MAGICWORD;  ((char *) &hdr[1])[size] = MAGICBYTE;  zmemset ((genptr_t) (hdr + 1), MALLOCFLOOD, size);  return (genptr_t) (hdr + 1);#else  return (imalloc (size));#endif}/* Free a block of memory allocated by `malloc'. *//* Return memory to the heap. */static voidifree (ptr)     genptr_t ptr;{  int type;  size_t block, blocks;  register size_t i;  struct list *prev, *next;  genptr_t curbrk;  size_t lesscore_threshold;  register struct alignlist *l;  if (ptr == NULL)    return;  /* Threshold of free space at which we will return some to the system.  */  lesscore_threshold = FINAL_FREE_BLOCKS + 2 * malloc_extra_blocks;  for (l = _aligned_blocks; l != NULL; l = l->next)    if (l->aligned == ptr)      {	l->aligned = NULL;	/* Mark the slot in the list as free.  */	ptr = l->exact;	break;      }  block = BLOCK (ptr);  type = _heapinfo[block].busy.type;  switch (type)    {    case 0:      /* Get as many statistics as early as we can.  */      --chunks_used;      bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;      bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;      /* Find the free cluster previous to this one in the free list.	 Start searching at the last block referenced; this may benefit	 programs with locality of allocation.  */      i = _heapindex;      if (i > block)	while (i > block)	  i = _heapinfo[i].free.prev;      else	{	  do	    i = _heapinfo[i].free.next;	  while (i > 0 && i < block);	  i = _heapinfo[i].free.prev;	}      /* Determine how to link this block into the free list.  */      if (block == i + _heapinfo[i].free.size)	{	  /* Coalesce this block with its predecessor.  */	  _heapinfo[i].free.size += _heapinfo[block].busy.info.size;	  block = i;	}      else	{	  /* Really link this block back into the free list.  */	  _heapinfo[block].free.size = _heapinfo[block].busy.info.size;	  _heapinfo[block].free.next = _heapinfo[i].free.next;	  _heapinfo[block].free.prev = i;	  _heapinfo[i].free.next = block;	  _heapinfo[_heapinfo[block].free.next].free.prev = block;	  ++chunks_free;	}      /* Now that the block is linked in, see if we can coalesce it	 with its successor (by deleting its successor from the list	 and adding in its size).  */      if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)	{	  _heapinfo[block].free.size	    += _heapinfo[_heapinfo[block].free.next].free.size;	  _heapinfo[block].free.next	    = _heapinfo[_heapinfo[block].free.next].free.next;	  _heapinfo[_heapinfo[block].free.next].free.prev = block;	  --chunks_free;	}      /* How many trailing free blocks are there now?  */      blocks = _heapinfo[block].free.size;      /* Where is the current end of accessible core?  */      curbrk = default_morecore (0);      if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit))	{	  /* The end of the malloc heap is at the end of accessible core.	     It's possible that moving _heapinfo will allow us to	     return some space to the system.  */ 	  size_t info_block = BLOCK (_heapinfo); 	  size_t info_blocks = _heapinfo[info_block].busy.info.size; 	  size_t prev_block = _heapinfo[block].free.prev; 	  size_t prev_blocks = _heapinfo[prev_block].free.size; 	  size_t next_block = _heapinfo[block].free.next; 	  size_t next_blocks = _heapinfo[next_block].free.size;	  if (/* Win if this block being freed is last in core, the info table		 is just before it, the previous free block is just before the		 info table, and the two free blocks together form a useful		 amount to return to the system.  */	      (block + blocks == _heaplimit &&	       info_block + info_blocks == block &&	       prev_block != 0 && prev_block + prev_blocks == info_block &&	       blocks + prev_blocks >= lesscore_threshold) ||	      /* Nope, not the case.  We can also win if this block being		 freed is just before the info table, and the table extends		 to the end of core or is followed only by a free block,		 and the total free space is worth returning to the system.  */	      (block + blocks == info_block &&	       ((info_block + info_blocks == _heaplimit &&		 blocks >= lesscore_threshold) ||		(info_block + info_blocks == next_block &&		 next_block + next_blocks == _heaplimit &&		 blocks + next_blocks >= lesscore_threshold)))	      )	    {	      malloc_info *newinfo;	      size_t oldlimit = _heaplimit;	      /* Free the old info table, clearing _heaplimit to avoid		 recursion into this code.  We don't want to return the		 table's blocks to the system before we have copied them to		 the new location.  */	      _heaplimit = 0;	      ifree (_heapinfo);	      _heaplimit = oldlimit;	      /* Tell malloc to search from the beginning of the heap for		 free blocks, so it doesn't reuse the ones just freed.  */	      _heapindex = 0;	      /* Allocate new space for the info table and move its data.  */	      newinfo = (malloc_info *) imalloc (info_blocks							  * BLOCKSIZE);	      memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);	      _heapinfo = newinfo;	      /* We should now have coalesced the free block with the		 blocks freed from the old info table.  Examine the entire		 trailing free block to decide below whether to return some		 to the system.  */	      block = _heapinfo[0].free.prev;	      blocks = _heapinfo[block].free.size; 	    }	  /* Now see if we can return stuff to the system.  */	  if (block + blocks == _heaplimit && blocks >= lesscore_threshold)	    {	      register size_t bytes = blocks * BLOCKSIZE;	      _heaplimit -= blocks;	      default_morecore (-bytes);	      _heapinfo[_heapinfo[block].free.prev].free.next		= _heapinfo[block].free.next;	      _heapinfo[_heapinfo[block].free.next].free.prev		= _heapinfo[block].free.prev;	      block = _heapinfo[block].free.prev;	      --chunks_free;	      bytes_free -= bytes;	    }	}      /* Set the next search to begin at this block.  */      _heapindex = block;      break;    default:      /* Do some of the statistics.  */      --chunks_used;      bytes_used -= 1 << type;      ++chunks_free;      bytes_free += 1 << type;      /* Get the address of the first free fragment in this block.  */      prev = (struct list *) ((char *) ADDRESS (block) +			      (_heapinfo[block].busy.info.frag.first << type));      if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)	{	  /* If all fragments of this block are free, remove them	     from the fragment list and free the whole block.  */	  next = prev;	  for (i = 1; i < (size_t) (BLOCKSIZE >> type); ++i)	    next = next->next;	  prev->prev->next = next;	  if (next != NULL)	    next->prev = prev->prev;	  _heapinfo[block].busy.type = 0;	  _heapinfo[block].busy.info.size = 1;	  /* Keep the statistics accurate.  */	  ++chunks_used;	  bytes_used += BLOCKSIZE;	  chunks_free -= BLOCKSIZE >> type;	  bytes_free -= BLOCKSIZE;	  ifree (ADDRESS (block));	}      else if (_heapinfo[block].busy.info.frag.nfree != 0)	{	  /* If some fragments of this block are free, link this	     fragment into the fragment list after the first free	     fragment of this block. */	  next = (struct list *) ptr;	  next->next = prev->next;	  next->prev = prev;	  prev->next = next;	  if (next->next != NULL)	    next->next->prev = next;	  ++_heapinfo[block].busy.info.frag.nfree;	}      else	{	  /* No fragments of this block are free, so link this	     fragment into the fragment list and announce that	     it is the first free fragment of this block. */	  prev = (struct list *) ptr;	  _heapinfo[block].busy.info.frag.nfree = 1;	  _heapinfo[block].busy.info.frag.first = (unsigned long int)	    ((unsigned long int) ((char *) ptr - (char *) NULL)	     % BLOCKSIZE >> type);	  prev->next = _fraghead[type].next;	  prev->prev = &_fraghead[type];	  prev->prev->next = prev;	  if (prev->next != NULL)	    prev->next->prev = prev;	}      break;    }

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -