⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 malloc.c

📁 android-w.song.android.widget
💻 C
📖 第 1 页 / 共 3 页
字号:
#ifdef MALLOC_STATS  _mstats.tbsplit++;  _mstats.nsplit[nbuck]++;#endif  /* Figure out how many blocks we'll get. */  siz = binsize (nu);  nblks = binsize (nbuck) / siz;  /* Split the block and put it on the requested chain. */  nextf[nu] = mp;  while (1)    {      mp->mh_alloc = ISFREE;      mp->mh_index = nu;      if (--nblks <= 0) break;      CHAIN (mp) = (union mhead *)((char *)mp + siz);      mp = (union mhead *)((char *)mp + siz);    }  CHAIN (mp) = 0;}/* Take the memory block MP and add it to a chain < NU.  NU is the right bucket,   but is busy.  This avoids memory orphaning. */static voidxsplit (mp, nu)     union mhead *mp;     int nu;{  union mhead *nh;  int nbuck, nblks, split_max;  unsigned long siz;  nbuck = nu - 1;  while (nbuck >= SPLIT_MIN && busy[nbuck])    nbuck--;  if (nbuck < SPLIT_MIN)    return;#ifdef MALLOC_STATS  _mstats.tbsplit++;  _mstats.nsplit[nu]++;#endif  /* Figure out how many blocks we'll get. */  siz = binsize (nu);			/* original block size */  nblks = siz / binsize (nbuck);	/* should be 2 most of the time */  /* And add it to nextf[nbuck] */  siz = binsize (nbuck);		/* XXX - resetting here */  nh = mp;  while (1)    {      mp->mh_alloc = ISFREE;      mp->mh_index = nbuck;      if (--nblks <= 0) break;      CHAIN (mp) = (union mhead *)((char *)mp + siz);      mp = (union mhead *)((char *)mp + siz);    }  busy[nbuck] = 1;  CHAIN (mp) = nextf[nbuck];  nextf[nbuck] = nh;  busy[nbuck] = 0;}static voidblock_signals (setp, osetp)     sigset_t *setp, *osetp;{#ifdef HAVE_POSIX_SIGNALS  sigfillset (setp);  sigemptyset (osetp);  sigprocmask (SIG_BLOCK, setp, osetp);#else#  if defined (HAVE_BSD_SIGNALS)  *osetp = sigsetmask (-1);#  endif#endif}static voidunblock_signals (setp, osetp)     sigset_t *setp, *osetp;{#ifdef HAVE_POSIX_SIGNALS  sigprocmask (SIG_SETMASK, osetp, (sigset_t *)NULL);#else#  if defined (HAVE_BSD_SIGNALS)  sigsetmask (*osetp);#  endif#endif}/* Return some memory to the system by reducing the break.  This is only   called with NU > pagebucket, so we're always assured of giving back   more than one page of memory. */  static voidlesscore (nu)			/* give system back some memory */     register int nu;		/* size index we're discarding  */{  long siz;  siz = binsize (nu);  /* Should check for errors here, I guess. */  sbrk (-siz);  memtop -= siz;#ifdef MALLOC_STATS  _mstats.nsbrk++;  _mstats.tsbrk -= siz;  _mstats.nlesscore[nu]++;#endif}/* Ask system for more memory; add to NEXTF[NU].  BUSY[NU] must be set to 1. */  static voidmorecore (nu)     register int nu;		/* size index to get more of  */{  register union mhead *mp;  register int nblks;  register long siz;  long sbrk_amt;		/* amount to get via sbrk() */  sigset_t set, oset;  int blocked_sigs;  /* Block all signals in case we are executed from a signal handler. */  blocked_sigs = 0;#ifdef SHELL  if (interrupt_immediately || signal_is_trapped (SIGINT) || signal_is_trapped (SIGCHLD))#endif    {      block_signals (&set, &oset);      blocked_sigs = 1;    }  siz = binsize (nu);	/* size of desired block for nextf[nu] */  if (siz < 0)    goto morecore_done;		/* oops */#ifdef MALLOC_STATS  _mstats.nmorecore[nu]++;#endif  /* Try to split a larger block here, if we're within the range of sizes     to split. */  if (nu >= SPLIT_MIN)    {      bsplit (nu);      if (nextf[nu] != 0)	goto morecore_done;    }  /* Try to coalesce two adjacent blocks from the free list on nextf[nu - 1],     if we can, and we're within the range of the block coalescing limits. */  if (nu >= COMBINE_MIN && nu < COMBINE_MAX && busy[nu - 1] == 0 && nextf[nu - 1])    {      bcoalesce (nu);      if (nextf[nu] != 0)	goto morecore_done;    }  /* Take at least a page, and figure out how many blocks of the requested     size we're getting. */  if (siz <= pagesz)    {      sbrk_amt = pagesz;      nblks = sbrk_amt / siz;    }  else    {      /* We always want to request an integral multiple of the page size	 from the kernel, so let's compute whether or not `siz' is such	 an amount.  If it is, we can just request it.  If not, we want	 the smallest integral multiple of pagesize that is larger than	 `siz' and will satisfy the request. */      sbrk_amt = siz & (pagesz - 1);      if (sbrk_amt == 0)	sbrk_amt = siz;      else	sbrk_amt = siz + pagesz - sbrk_amt;      nblks = 1;    }#ifdef MALLOC_STATS  _mstats.nsbrk++;  _mstats.tsbrk += sbrk_amt;#endif  mp = (union mhead *) sbrk (sbrk_amt);  /* Totally out of memory. */  if ((long)mp == -1)    goto morecore_done;  memtop += sbrk_amt;  /* shouldn't happen, but just in case -- require 8-byte alignment */  if ((long)mp & MALIGN_MASK)    {      mp = (union mhead *) (((long)mp + MALIGN_MASK) & ~MALIGN_MASK);      nblks--;    }  /* save new header and link the nblks blocks together */  nextf[nu] = mp;  while (1)    {      mp->mh_alloc = ISFREE;      mp->mh_index = nu;      if (--nblks <= 0) break;      CHAIN (mp) = (union mhead *)((char *)mp + siz);      mp = (union mhead *)((char *)mp + siz);    }  CHAIN (mp) = 0;morecore_done:  if (blocked_sigs)    unblock_signals (&set, &oset);}static voidmalloc_debug_dummy (){  write (1, "malloc_debug_dummy\n", 19);}#define PREPOP_BIN	2#define PREPOP_SIZE	32static intpagealign (){  register int nunits;  register union mhead *mp;  long sbrk_needed;  char *curbrk;  pagesz = getpagesize ();  if (pagesz < 1024)    pagesz = 1024;  /* OK, how much do we need to allocate to make things page-aligned?     Some of this partial page will be wasted space, but we'll use as     much as we can.  Once we figure out how much to advance the break     pointer, go ahead and do it. */  memtop = curbrk = sbrk (0);  sbrk_needed = pagesz - ((long)curbrk & (pagesz - 1));	/* sbrk(0) % pagesz */  if (sbrk_needed < 0)    sbrk_needed += pagesz;  /* Now allocate the wasted space. */  if (sbrk_needed)    {#ifdef MALLOC_STATS      _mstats.nsbrk++;      _mstats.tsbrk += sbrk_needed;#endif      curbrk = sbrk (sbrk_needed);      if ((long)curbrk == -1)	return -1;      memtop += sbrk_needed;      /* Take the memory which would otherwise be wasted and populate the most	 popular bin (2 == 32 bytes) with it.  Add whatever we need to curbrk	 to make things 32-byte aligned, compute how many 32-byte chunks we're	 going to get, and set up the bin. */      curbrk += sbrk_needed & (PREPOP_SIZE - 1);      sbrk_needed -= sbrk_needed & (PREPOP_SIZE - 1);      nunits = sbrk_needed / PREPOP_SIZE;      if (nunits > 0)	{	  mp = (union mhead *)curbrk;	  nextf[PREPOP_BIN] = mp;	  while (1)	    {	      mp->mh_alloc = ISFREE;	      mp->mh_index = PREPOP_BIN;	      if (--nunits <= 0) break;	      CHAIN(mp) = (union mhead *)((char *)mp + PREPOP_SIZE);	      mp = (union mhead *)((char *)mp + PREPOP_SIZE);	    }	  CHAIN(mp) = 0;	}    }  /* compute which bin corresponds to the page size. */  for (nunits = 7; nunits < NBUCKETS; nunits++)    if (pagesz <= binsize(nunits))      break;  pagebucket = nunits;  return 0;}    static PTR_Tinternal_malloc (n, file, line, flags)		/* get a block */     size_t n;     const char *file;     int line, flags;{  register union mhead *p;  register int nunits;  register char *m, *z;  long nbytes;  mguard_t mg;  /* Get the system page size and align break pointer so future sbrks will     be page-aligned.  The page size must be at least 1K -- anything     smaller is increased. */  if (pagesz == 0)    if (pagealign () < 0)      return ((PTR_T)NULL);   /* Figure out how many bytes are required, rounding up to the nearest     multiple of 8, then figure out which nextf[] area to use.  Try to     be smart about where to start searching -- if the number of bytes     needed is greater than the page size, we can start at pagebucket. */  nbytes = ALLOCATED_BYTES(n);  nunits = (nbytes <= (pagesz >> 1)) ? STARTBUCK : pagebucket;  for ( ; nunits < NBUCKETS; nunits++)    if (nbytes <= binsize(nunits))      break;  /* Silently reject too-large requests. */  if (nunits >= NBUCKETS)    return ((PTR_T) NULL);  /* In case this is reentrant use of malloc from signal handler,     pick a block size that no other malloc level is currently     trying to allocate.  That's the easiest harmless way not to     interfere with the other level of execution.  */#ifdef MALLOC_STATS  if (busy[nunits]) _mstats.nrecurse++;#endif  while (busy[nunits]) nunits++;  busy[nunits] = 1;  if (nunits > maxbuck)    maxbuck = nunits;  /* If there are no blocks of the appropriate size, go get some */  if (nextf[nunits] == 0)    morecore (nunits);  /* Get one block off the list, and set the new list head */  if ((p = nextf[nunits]) == NULL)    {      busy[nunits] = 0;      return NULL;    }  nextf[nunits] = CHAIN (p);  busy[nunits] = 0;  /* Check for free block clobbered */  /* If not for this check, we would gobble a clobbered free chain ptr     and bomb out on the NEXT allocate of this size block */  if (p->mh_alloc != ISFREE || p->mh_index != nunits)    xbotch ((PTR_T)(p+1), 0, _("malloc: block on free list clobbered"), file, line);  /* Fill in the info, and set up the magic numbers for range checking. */  p->mh_alloc = ISALLOC;  p->mh_magic2 = MAGIC2;  p->mh_nbytes = n;  /* End guard */  mg.i = n;  z = mg.s;  m = (char *) (p + 1) + n;  *m++ = *z++, *m++ = *z++, *m++ = *z++, *m++ = *z++;#ifdef MEMSCRAMBLE  if (n)    MALLOC_MEMSET ((char *)(p + 1), 0xdf, n);	/* scramble previous contents */#endif#ifdef MALLOC_STATS  _mstats.nmalloc[nunits]++;  _mstats.tmalloc[nunits]++;  _mstats.nmal++;  _mstats.bytesreq += n;#endif /* MALLOC_STATS */#ifdef MALLOC_TRACE  if (malloc_trace && (flags & MALLOC_NOTRACE) == 0)    mtrace_alloc ("malloc", p + 1, n, file, line);  else if (_malloc_trace_buckets[nunits])    mtrace_alloc ("malloc", p + 1, n, file, line);#endif#ifdef MALLOC_REGISTER  if (malloc_register && (flags & MALLOC_NOREG) == 0)    mregister_alloc ("malloc", p + 1, n, file, line);#endif#ifdef MALLOC_WATCH  if (_malloc_nwatch > 0)    _malloc_ckwatch (p + 1, file, line, W_ALLOC, n);#endif  return (PTR_T) (p + 1);}static voidinternal_free (mem, file, line, flags)     PTR_T mem;     const char *file;     int line, flags;{  register union mhead *p;  register char *ap, *z;  register int nunits;  register unsigned int nbytes;  int ubytes;		/* caller-requested size */  mguard_t mg;  if ((ap = (char *)mem) == 0)    return;  p = (union mhead *) ap - 1;  if (p->mh_alloc == ISMEMALIGN)    {      ap -= p->mh_nbytes;      p = (union mhead *) ap - 1;    }#if defined (MALLOC_TRACE) || defined (MALLOC_REGISTER)  if (malloc_trace || malloc_register)    ubytes = p->mh_nbytes;#endif  if (p->mh_alloc != ISALLOC)

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -