📄 malloc.c
字号:
#ifdef MALLOC_STATS _mstats.nsbrk++; _mstats.tsbrk += sbrk_amt;#endif mp = (union mhead *) sbrk (sbrk_amt); /* Totally out of memory. */ if ((long)mp == -1) goto morecore_done; /* shouldn't happen, but just in case -- require 8-byte alignment */ if ((long)mp & 7) { mp = (union mhead *) (((long)mp + 8) & ~7); nblks--; } /* save new header and link the nblks blocks together */ nextf[nu] = mp; while (1) { mp->mh_alloc = ISFREE; mp->mh_index = nu; if (--nblks <= 0) break; CHAIN (mp) = (union mhead *)((char *)mp + siz); mp = (union mhead *)((char *)mp + siz); } CHAIN (mp) = 0;morecore_done:#if defined (HAVE_BSD_SIGNALS) sigsetmask (oldmask);#else# if defined (HAVE_POSIX_SIGNALS) sigprocmask (SIG_SETMASK, &oset, (sigset_t *)NULL);# else ; /* nothing to do, but need a null statement before the brace */# endif#endif /* HAVE_BSD_SIGNALS */}#if defined (MEMSCRAMBLE) || !defined (NO_CALLOC)static char *zmemset (s, c, n) char *s; int c; register int n;{ register char *sp; sp = s; while (--n >= 0) *sp++ = c; return (s);}#endif /* MEMSCRAMBLE || !NO_CALLOC */static voidmalloc_debug_dummy (){ write (1, "malloc_debug_dummy\n", 19);}PTR_Tmalloc (n) /* get a block */ size_t n;{ register union mhead *p; register long nbytes; register int nunits; /* Get the system page size and align break pointer so everything will be page-aligned. The page size must be at least 1K -- anything smaller is increased. */ if (pagesz == 0) { register long sbrk_needed; pagesz = getpagesize (); if (pagesz < 1024) pagesz = 1024; /* OK, how much do we need to allocate to make things page-aligned? This partial page is wasted space. Once we figure out how much to advance the break pointer, go ahead and do it. */ sbrk_needed = pagesz - ((long)sbrk (0) & (pagesz - 1)); /* sbrk(0) % pagesz */ if (sbrk_needed < 0) sbrk_needed += pagesz; /* Now allocate the wasted space. */ if (sbrk_needed) {#ifdef MALLOC_STATS _mstats.nsbrk++; _mstats.tsbrk += sbrk_needed;#endif if ((long)sbrk (sbrk_needed) == -1) return (NULL); } nunits = 0; nbytes = 8; while (pagesz > nbytes) { nbytes <<= 1; nunits++; } pagebucket = nunits; } /* Figure out how many bytes are required, rounding up to the nearest multiple of 4, then figure out which nextf[] area to use. Try to be smart about where to start searching -- if the number of bytes needed is greater than the page size, we can start at pagebucket. */ nbytes = (n + sizeof *p + MSLOP + 3) & ~3; nunits = 0; if (nbytes <= (pagesz >> 1)) { register unsigned int shiftr; shiftr = (nbytes - 1) >> 2; /* == (nbytes - 1) / 4 */ while (shiftr >>= 1) /* == (nbytes - 1) / {8,16,32,...} */ nunits++; } else { register u_bits32_t amt; nunits = pagebucket; amt = pagesz; while (nbytes > amt) { amt <<= 1; nunits++; } } /* In case this is reentrant use of malloc from signal handler, pick a block size that no other malloc level is currently trying to allocate. That's the easiest harmless way not to interfere with the other level of execution. */#ifdef MALLOC_STATS if (busy[nunits]) _mstats.nrecurse++;#endif while (busy[nunits]) nunits++; busy[nunits] = 1; if (nunits > maxbuck) maxbuck = nunits; /* If there are no blocks of the appropriate size, go get some */ if (nextf[nunits] == 0) morecore (nunits); /* Get one block off the list, and set the new list head */ if ((p = nextf[nunits]) == NULL) { busy[nunits] = 0; return NULL; } nextf[nunits] = CHAIN (p); busy[nunits] = 0; /* Check for free block clobbered */ /* If not for this check, we would gobble a clobbered free chain ptr and bomb out on the NEXT allocate of this size block */ if (p->mh_alloc != ISFREE || p->mh_index != nunits) botch ("malloc: block on free list clobbered"); /* Fill in the info, and if range checking, set up the magic numbers */ p->mh_alloc = ISALLOC; p->mh_nbytes = n; p->mh_magic2 = MAGIC2; { register char *m = (char *) (p + 1) + n; *m++ = MAGIC1, *m++ = MAGIC1, *m++ = MAGIC1, *m = MAGIC1; }#ifdef MEMSCRAMBLE zmemset ((char *)(p + 1), 0xdf, n); /* scramble previous contents */#endif#ifdef MALLOC_STATS _mstats.nmalloc[nunits]++; _mstats.tmalloc[nunits]++; _mstats.nmal++;#endif /* MALLOC_STATS */ return (char *) (p + 1); /* XXX - should be cast to PTR_T? */}voidfree (mem) PTR_T mem;{ register union mhead *p; register char *ap; register int nunits; if ((ap = (char *)mem) == 0) return; p = (union mhead *) ap - 1; if (p->mh_alloc == ISMEMALIGN) { ap -= p->mh_nbytes; p = (union mhead *) ap - 1; } if (p->mh_alloc != ISALLOC) { if (p->mh_alloc == ISFREE) botch ("free: called with already freed block argument"); else botch ("free: called with unallocated block argument"); } ASSERT (p->mh_magic2 == MAGIC2); ap += p->mh_nbytes; ASSERT (*ap++ == MAGIC1); ASSERT (*ap++ == MAGIC1); ASSERT (*ap++ == MAGIC1); ASSERT (*ap == MAGIC1);#ifdef MEMSCRAMBLE zmemset (mem, 0xcf, p->mh_nbytes);#endif nunits = p->mh_index; ASSERT (nunits < NBUCKETS); p->mh_alloc = ISFREE;#if 0 if (busy[nunits] == 1) botch ("calling free %d while in malloc for %d", nunits, nunits); #endif /* Protect against signal handlers calling malloc. */ busy[nunits] = 1; /* Put this block on the free list. */ CHAIN (p) = nextf[nunits]; nextf[nunits] = p; busy[nunits] = 0;#ifdef MALLOC_STATS _mstats.nmalloc[nunits]--; _mstats.nfre++;#endif /* MALLOC_STATS */}PTR_Trealloc (mem, n) PTR_T mem; register size_t n;{ register union mhead *p; register u_bits32_t tocopy; register unsigned int nbytes; register int nunits; register char *m;#ifdef MALLOC_STATS _mstats.nrealloc++;#endif if (n == 0) { free (mem); return (NULL); } if ((p = (union mhead *) mem) == 0) return malloc (n); p--; nunits = p->mh_index; ASSERT (p->mh_alloc == ISALLOC); ASSERT (p->mh_magic2 == MAGIC2); m = (char *)mem + (tocopy = p->mh_nbytes); ASSERT (*m++ == MAGIC1); ASSERT (*m++ == MAGIC1); ASSERT (*m++ == MAGIC1); ASSERT (*m == MAGIC1); /* See if desired size rounds to same power of 2 as actual size. */ nbytes = (n + sizeof *p + MSLOP + 7) & ~7; /* If ok, use the same block, just marking its size as changed. */ if (nbytes > (4 << nunits) && nbytes <= (8 << nunits)) { m = (char *)mem + tocopy; *m++ = 0; *m++ = 0; *m++ = 0; *m++ = 0; p->mh_nbytes = n; m = (char *)mem + n; *m++ = MAGIC1; *m++ = MAGIC1; *m++ = MAGIC1; *m++ = MAGIC1; return mem; }#ifdef MALLOC_STATS _mstats.nrcopy++;#endif if (n < tocopy) tocopy = n; if ((m = malloc (n)) == 0) return 0; FASTCOPY (mem, m, tocopy); free (mem); return m;}PTR_Tmemalign (alignment, size) unsigned int alignment; size_t size;{ register char *ptr; register char *aligned; register union mhead *p; ptr = malloc (size + alignment); if (ptr == 0) return 0; /* If entire block has the desired alignment, just accept it. */ if (((int) ptr & (alignment - 1)) == 0) return ptr; /* Otherwise, get address of byte in the block that has that alignment. */ aligned = (char *) (((int) ptr + alignment - 1) & -alignment); /* Store a suitable indication of how to free the block, so that free can find the true beginning of it. */ p = (union mhead *) aligned - 1; p->mh_nbytes = aligned - ptr; p->mh_alloc = ISMEMALIGN; return aligned;}#if !defined (HPUX)/* This runs into trouble with getpagesize on HPUX, and Multimax machines. Patching out seems cleaner than the ugly fix needed. */PTR_Tvalloc (size) size_t size;{ return memalign (getpagesize (), size);}#endif /* !HPUX */#ifndef NO_CALLOCPTR_Tcalloc (n, s) size_t n, s;{ size_t total; char *result; total = n * s; result = malloc (total); if (result) zmemset (result, 0, total); return result; }voidcfree (p) PTR_T p;{ free (p);}#endif /* !NO_CALLOC */#ifdef MALLOC_STATSstruct bucket_statsmalloc_bucket_stats (size) int size;{ struct bucket_stats v; register union mhead *p; v.nfree = 0; if (size < 0 || size >= NBUCKETS) { v.blocksize = 0; v.nused = v.nmal = v.nmorecore = v.nsplit = 0; return v; } v.blocksize = 1 << (size + 3); v.nused = _mstats.nmalloc[size]; v.nmal = _mstats.tmalloc[size]; v.nmorecore = _mstats.nmorecore[size]; v.nsplit = _mstats.nsplit[size]; for (p = nextf[size]; p; p = CHAIN (p)) v.nfree++; return v;}/* Return a copy of _MSTATS, with two additional fields filled in: BYTESFREE is the total number of bytes on free lists. BYTESUSED is the total number of bytes in use. These two fields are fairly expensive to compute, so we do it only when asked to. */struct _malstatsmalloc_stats (){ struct _malstats result; struct bucket_stats v; register int i; result = _mstats; result.bytesused = result.bytesfree = 0; for (i = 0; i < NBUCKETS; i++) { v = malloc_bucket_stats (i); result.bytesfree += v.nfree * v.blocksize; result.bytesused += v.nused * v.blocksize; } return (result);}static void_print_malloc_stats (s, fp) char *s; FILE *fp;{ register int i; int totused, totfree; struct bucket_stats v; fprintf (fp, "Memory allocation statistics: %s\n\tsize\tfree\tin use\ttotal\tmorecore\tsplit\n", s ? s : ""); for (i = totused = totfree = 0; i < NBUCKETS; i++) { v = malloc_bucket_stats (i); fprintf (fp, "%12lu\t%4d\t%6d\t%5d\t%8d\t%5d\n", v.blocksize, v.nfree, v.nused, v.nmal, v.nmorecore, v.nsplit); totfree += v.nfree * v.blocksize; totused += v.nused * v.blocksize; } fprintf (fp, "\nTotal bytes in use: %d, total bytes free: %d\n", totused, totfree); fprintf (fp, "Total mallocs: %d, total frees: %d, total reallocs: %d (%d copies)\n", _mstats.nmal, _mstats.nfre, _mstats.nrealloc, _mstats.nrcopy); fprintf (fp, "Total sbrks: %d, total bytes via sbrk: %d\n", _mstats.nsbrk, _mstats.tsbrk); fprintf (fp, "Total blocks split: %d, total block coalesces: %d\n", _mstats.tbsplit, _mstats.tbcoalesce);}voidprint_malloc_stats (s){ _print_malloc_stats (s, stderr);}#define TRACEROOT "/var/tmp/maltrace/trace."extern char *inttostr ();voidtrace_malloc_stats (s){ char ibuf[32], *ip; char fname[64]; int p; FILE *fp; p = (int)getpid(); ip = inttostr(p, ibuf, sizeof(ibuf)); strcpy (fname, TRACEROOT); strcat (fname, ip); fp = fopen(fname, "w"); if (fp) { _print_malloc_stats (s, fp); fflush(fp); fclose(fp); }}#endif /* MALLOC_STATS */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -