📄 malloc.c
字号:
{ if (p->mh_alloc == ISFREE) xbotch (mem, ERR_DUPFREE, _("free: called with already freed block argument"), file, line); else xbotch (mem, ERR_UNALLOC, _("free: called with unallocated block argument"), file, line); } ASSERT (p->mh_magic2 == MAGIC2); nunits = p->mh_index; nbytes = ALLOCATED_BYTES(p->mh_nbytes); /* Since the sizeof(u_bits32_t) bytes before the memory handed to the user are now used for the number of bytes allocated, a simple check of mh_magic2 is no longer sufficient to catch things like p[-1] = 'x'. We sanity-check the value of mh_nbytes against the size of the blocks in the appropriate bucket before we use it. This can still cause problems and obscure errors if mh_nbytes is wrong but still within range; the checks against the size recorded at the end of the chunk will probably fail then. Using MALLOC_REGISTER will help here, since it saves the original number of bytes requested. */ if (IN_BUCKET(nbytes, nunits) == 0) xbotch (mem, ERR_UNDERFLOW, _("free: underflow detected; mh_nbytes out of range"), file, line); ap += p->mh_nbytes; z = mg.s; *z++ = *ap++, *z++ = *ap++, *z++ = *ap++, *z++ = *ap++; if (mg.i != p->mh_nbytes) xbotch (mem, ERR_ASSERT_FAILED, _("free: start and end chunk sizes differ"), file, line);#if 1 if (nunits >= LESSCORE_MIN && ((char *)p + binsize(nunits) == memtop))#else if (((char *)p + binsize(nunits) == memtop) && nunits >= LESSCORE_MIN)#endif { /* If above LESSCORE_FRC, give back unconditionally. This should be set high enough to be infrequently encountered. If between LESSCORE_MIN and LESSCORE_FRC, call lesscore if the bucket is marked as busy or if there's already a block on the free list. */ if ((nunits >= LESSCORE_FRC) || busy[nunits] || nextf[nunits] != 0) { lesscore (nunits); /* keeps the tracing and registering code in one place */ goto free_return; } }#ifdef MEMSCRAMBLE if (p->mh_nbytes) MALLOC_MEMSET (mem, 0xcf, p->mh_nbytes);#endif ASSERT (nunits < NBUCKETS); if (busy[nunits] == 1) { xsplit (p, nunits); /* split block and add to different chain */ goto free_return; } p->mh_alloc = ISFREE; /* Protect against signal handlers calling malloc. */ busy[nunits] = 1; /* Put this block on the free list. */ CHAIN (p) = nextf[nunits]; nextf[nunits] = p; busy[nunits] = 0;free_return: ; /* Empty statement in case this is the end of the function */#ifdef MALLOC_STATS _mstats.nmalloc[nunits]--; _mstats.nfre++;#endif /* MALLOC_STATS */#ifdef MALLOC_TRACE if (malloc_trace && (flags & MALLOC_NOTRACE) == 0) mtrace_free (mem, ubytes, file, line); else if (_malloc_trace_buckets[nunits]) mtrace_free (mem, ubytes, file, line);#endif#ifdef MALLOC_REGISTER if (malloc_register && (flags & MALLOC_NOREG) == 0) mregister_free (mem, ubytes, file, line);#endif#ifdef MALLOC_WATCH if (_malloc_nwatch > 0) _malloc_ckwatch (mem, file, line, W_FREE, ubytes);#endif}static PTR_Tinternal_realloc (mem, n, file, line, flags) PTR_T mem; register size_t n; const char *file; int line, flags;{ register union mhead *p; register u_bits32_t tocopy; register unsigned int nbytes; register int nunits; register char *m, *z; mguard_t mg;#ifdef MALLOC_STATS _mstats.nrealloc++;#endif if (n == 0) { internal_free (mem, file, line, MALLOC_INTERNAL); return (NULL); } if ((p = (union mhead *) mem) == 0) return internal_malloc (n, file, line, MALLOC_INTERNAL); p--; nunits = p->mh_index; ASSERT (nunits < NBUCKETS); if (p->mh_alloc != ISALLOC) xbotch (mem, ERR_UNALLOC, _("realloc: called with unallocated block argument"), file, line); ASSERT (p->mh_magic2 == MAGIC2); nbytes = ALLOCATED_BYTES(p->mh_nbytes); /* Since the sizeof(u_bits32_t) bytes before the memory handed to the user are now used for the number of bytes allocated, a simple check of mh_magic2 is no longer sufficient to catch things like p[-1] = 'x'. We sanity-check the value of mh_nbytes against the size of the blocks in the appropriate bucket before we use it. This can still cause problems and obscure errors if mh_nbytes is wrong but still within range; the checks against the size recorded at the end of the chunk will probably fail then. Using MALLOC_REGISTER will help here, since it saves the original number of bytes requested. */ if (IN_BUCKET(nbytes, nunits) == 0) xbotch (mem, ERR_UNDERFLOW, _("realloc: underflow detected; mh_nbytes out of range"), file, line); m = (char *)mem + (tocopy = p->mh_nbytes); z = mg.s; *z++ = *m++, *z++ = *m++, *z++ = *m++, *z++ = *m++; if (mg.i != p->mh_nbytes) xbotch (mem, ERR_ASSERT_FAILED, _("realloc: start and end chunk sizes differ"), file, line);#ifdef MALLOC_WATCH if (_malloc_nwatch > 0) _malloc_ckwatch (p + 1, file, line, W_REALLOC, n);#endif#ifdef MALLOC_STATS _mstats.bytesreq += (n < tocopy) ? 0 : n - tocopy;#endif /* See if desired size rounds to same power of 2 as actual size. */ nbytes = ALLOCATED_BYTES(n); /* If ok, use the same block, just marking its size as changed. */ if (RIGHT_BUCKET(nbytes, nunits)) {#if 0 m = (char *)mem + p->mh_nbytes;#else /* Compensate for increment above. */ m -= 4;#endif *m++ = 0; *m++ = 0; *m++ = 0; *m++ = 0; m = (char *)mem + (p->mh_nbytes = n); mg.i = n; z = mg.s; *m++ = *z++, *m++ = *z++, *m++ = *z++, *m++ = *z++; return mem; } if (n < tocopy) tocopy = n;#ifdef MALLOC_STATS _mstats.nrcopy++;#endif if ((m = internal_malloc (n, file, line, MALLOC_INTERNAL|MALLOC_NOTRACE|MALLOC_NOREG)) == 0) return 0; FASTCOPY (mem, m, tocopy); internal_free (mem, file, line, MALLOC_INTERNAL);#ifdef MALLOC_TRACE if (malloc_trace && (flags & MALLOC_NOTRACE) == 0) mtrace_alloc ("realloc", m, n, file, line); else if (_malloc_trace_buckets[nunits]) mtrace_alloc ("realloc", m, n, file, line);#endif#ifdef MALLOC_REGISTER if (malloc_register && (flags & MALLOC_NOREG) == 0) mregister_alloc ("realloc", m, n, file, line);#endif#ifdef MALLOC_WATCH if (_malloc_nwatch > 0) _malloc_ckwatch (m, file, line, W_RESIZED, n);#endif return m;}static PTR_Tinternal_memalign (alignment, size, file, line, flags) size_t alignment; size_t size; const char *file; int line, flags;{ register char *ptr; register char *aligned; register union mhead *p; ptr = internal_malloc (size + alignment, file, line, MALLOC_INTERNAL); if (ptr == 0) return 0; /* If entire block has the desired alignment, just accept it. */ if (((long) ptr & (alignment - 1)) == 0) return ptr; /* Otherwise, get address of byte in the block that has that alignment. */#if 0 aligned = (char *) (((long) ptr + alignment - 1) & -alignment);#else aligned = (char *) (((long) ptr + alignment - 1) & (~alignment + 1));#endif /* Store a suitable indication of how to free the block, so that free can find the true beginning of it. */ p = (union mhead *) aligned - 1; p->mh_nbytes = aligned - ptr; p->mh_alloc = ISMEMALIGN; return aligned;}#if !defined (NO_VALLOC)/* This runs into trouble with getpagesize on HPUX, and Multimax machines. Patching out seems cleaner than the ugly fix needed. */static PTR_Tinternal_valloc (size, file, line, flags) size_t size; const char *file; int line, flags;{ return internal_memalign (getpagesize (), size, file, line, flags|MALLOC_INTERNAL);}#endif /* !NO_VALLOC */#ifndef NO_CALLOCstatic PTR_Tinternal_calloc (n, s, file, line, flags) size_t n, s; const char *file; int line, flags;{ size_t total; PTR_T result; total = n * s; result = internal_malloc (total, file, line, flags|MALLOC_INTERNAL); if (result) memset (result, 0, total); return result; }static voidinternal_cfree (p, file, line, flags) PTR_T p; const char *file; int line, flags;{ internal_free (p, file, line, flags|MALLOC_INTERNAL);}#endif /* !NO_CALLOC */#ifdef MALLOC_STATSintmalloc_free_blocks (size) int size;{ int nfree; register union mhead *p; nfree = 0; for (p = nextf[size]; p; p = CHAIN (p)) nfree++; return nfree;}#endif#if defined (MALLOC_WRAPFUNCS)PTR_Tsh_malloc (bytes, file, line) size_t bytes; const char *file; int line;{ return internal_malloc (bytes, file, line, MALLOC_WRAPPER);}PTR_Tsh_realloc (ptr, size, file, line) PTR_T ptr; size_t size; const char *file; int line;{ return internal_realloc (ptr, size, file, line, MALLOC_WRAPPER);}voidsh_free (mem, file, line) PTR_T mem; const char *file; int line;{ internal_free (mem, file, line, MALLOC_WRAPPER);}PTR_Tsh_memalign (alignment, size, file, line) size_t alignment; size_t size; const char *file; int line;{ return internal_memalign (alignment, size, file, line, MALLOC_WRAPPER);}#ifndef NO_CALLOCPTR_Tsh_calloc (n, s, file, line) size_t n, s; const char *file; int line;{ return internal_calloc (n, s, file, line, MALLOC_WRAPPER);}voidsh_cfree (mem, file, line) PTR_T mem; const char *file; int line;{ internal_cfree (mem, file, line, MALLOC_WRAPPER);}#endif#ifndef NO_VALLOCPTR_Tsh_valloc (size, file, line) size_t size; const char *file; int line;{ return internal_valloc (size, file, line, MALLOC_WRAPPER);}#endif /* !NO_VALLOC */#endif /* MALLOC_WRAPFUNCS *//* Externally-available functions that call their internal counterparts. */PTR_Tmalloc (size) size_t size;{ return internal_malloc (size, (char *)NULL, 0, 0);}PTR_Trealloc (mem, nbytes) PTR_T mem; size_t nbytes;{ return internal_realloc (mem, nbytes, (char *)NULL, 0, 0);}voidfree (mem) PTR_T mem;{ internal_free (mem, (char *)NULL, 0, 0);}PTR_Tmemalign (alignment, size) size_t alignment; size_t size;{ return internal_memalign (alignment, size, (char *)NULL, 0, 0);}#ifndef NO_VALLOCPTR_Tvalloc (size) size_t size;{ return internal_valloc (size, (char *)NULL, 0, 0);}#endif#ifndef NO_CALLOCPTR_Tcalloc (n, s) size_t n, s;{ return internal_calloc (n, s, (char *)NULL, 0, 0);}voidcfree (mem) PTR_T mem;{ internal_cfree (mem, (char *)NULL, 0, 0);}#endif
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -