📄 dlmalloc.c
字号:
((((unsigned long)(sz)) >> 9) <= 84) ? 110 + (((unsigned long)(sz)) >> 12): \ ((((unsigned long)(sz)) >> 9) <= 340) ? 119 + (((unsigned long)(sz)) >> 15): \ ((((unsigned long)(sz)) >> 9) <= 1364) ? 124 + (((unsigned long)(sz)) >> 18): \ 126)/* bins for chunks < 512 are all spaced 8 bytes apart, and hold identically sized chunks. This is exploited in malloc.*/#define MAX_SMALLBIN 63#define MAX_SMALLBIN_SIZE 512#define SMALLBIN_WIDTH 8#define smallbin_index(sz) (((unsigned long)(sz)) >> 3)/* Requests are `small' if both the corresponding and the next bin are small*/#define is_small_request(nb) (nb < MAX_SMALLBIN_SIZE - SMALLBIN_WIDTH)/* To help compensate for the large number of bins, a one-level index structure is used for bin-by-bin searching. `binblocks' is a one-word bitvector recording whether groups of BINBLOCKWIDTH bins have any (possibly) non-empty bins, so they can be skipped over all at once during during traversals. The bits are NOT always cleared as soon as all bins in a block are empty, but instead only when all are noticed to be empty during traversal in malloc.*/#define BINBLOCKWIDTH 4 /* bins per block */#define binblocks (bin_at(0)->size) /* bitvector of nonempty blocks *//* bin<->block macros */#define idx2binblock(ix) ((unsigned)1 << (ix / BINBLOCKWIDTH))#define mark_binblock(ii) (binblocks |= idx2binblock(ii))#define clear_binblock(ii) (binblocks &= ~(idx2binblock(ii)))/* Other static bookkeeping data *//* variables holding tunable values */static unsigned long trim_threshold = DEFAULT_TRIM_THRESHOLD;static unsigned long top_pad = DEFAULT_TOP_PAD;static unsigned int n_mmaps_max = DEFAULT_MMAP_MAX;static unsigned long mmap_threshold = DEFAULT_MMAP_THRESHOLD;/* The first value returned from sbrk */static char* sbrk_base = (char*)(-1);/* The maximum memory obtained from system via sbrk */static unsigned long max_sbrked_mem = 0;/* The maximum via either sbrk or mmap */static unsigned long max_total_mem = 0;/* internal working copy of mallinfo */static struct mallinfo current_mallinfo = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };/* The total memory obtained from system via sbrk */#define sbrked_mem (current_mallinfo.arena)/* Tracking mmaps */#if 0static unsigned int n_mmaps = 0;#endif /* 0 */static unsigned long mmapped_mem = 0;#if HAVE_MMAPstatic unsigned int max_n_mmaps = 0;static unsigned long max_mmapped_mem = 0;#endif/* Debugging support*/#if DEBUG/* These routines make a number of assertions about the states of data structures that should be true at all times. If any are not true, it's very likely that a user program has somehow trashed memory. (It's also possible that there is a coding error in malloc. In which case, please report it!)*/#if __STD_Cstatic void do_check_chunk(mchunkptr p)#elsestatic void do_check_chunk(p) mchunkptr p;#endif{#if 0 /* causes warnings because assert() is off */ INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;#endif /* 0 */ /* No checkable chunk is mmapped */ assert(!chunk_is_mmapped(p)); /* Check for legal address ... */ assert((char*)p >= sbrk_base); if (p != top) assert((char*)p + sz <= (char*)top); else assert((char*)p + sz <= sbrk_base + sbrked_mem);}#if __STD_Cstatic void do_check_free_chunk(mchunkptr p)#elsestatic void do_check_free_chunk(p) mchunkptr p;#endif{ INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;#if 0 /* causes warnings because assert() is off */ mchunkptr next = chunk_at_offset(p, sz);#endif /* 0 */ do_check_chunk(p); /* Check whether it claims to be free ... */ assert(!inuse(p)); /* Unless a special marker, must have OK fields */ if ((long)sz >= (long)MINSIZE) { assert((sz & MALLOC_ALIGN_MASK) == 0); assert(aligned_OK(chunk2mem(p))); /* ... matching footer field */ assert(next->prev_size == sz); /* ... and is fully consolidated */ assert(prev_inuse(p)); assert (next == top || inuse(next)); /* ... and has minimally sane links */ assert(p->fd->bk == p); assert(p->bk->fd == p); } else /* markers are always of size SIZE_SZ */ assert(sz == SIZE_SZ);}#if __STD_Cstatic void do_check_inuse_chunk(mchunkptr p)#elsestatic void do_check_inuse_chunk(p) mchunkptr p;#endif{ mchunkptr next = next_chunk(p); do_check_chunk(p); /* Check whether it claims to be in use ... */ assert(inuse(p)); /* ... and is surrounded by OK chunks. Since more things can be checked with free chunks than inuse ones, if an inuse chunk borders them and debug is on, it's worth doing them. */ if (!prev_inuse(p)) { mchunkptr prv = prev_chunk(p); assert(next_chunk(prv) == p); do_check_free_chunk(prv); } if (next == top) { assert(prev_inuse(next)); assert(chunksize(next) >= MINSIZE); } else if (!inuse(next)) do_check_free_chunk(next);}#if __STD_Cstatic void do_check_malloced_chunk(mchunkptr p, INTERNAL_SIZE_T s)#elsestatic void do_check_malloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s;#endif{#if 0 /* causes warnings because assert() is off */ INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE; long room = sz - s;#endif /* 0 */ do_check_inuse_chunk(p); /* Legal size ... */ assert((long)sz >= (long)MINSIZE); assert((sz & MALLOC_ALIGN_MASK) == 0); assert(room >= 0); assert(room < (long)MINSIZE); /* ... and alignment */ assert(aligned_OK(chunk2mem(p))); /* ... and was allocated at front of an available chunk */ assert(prev_inuse(p));}#define check_free_chunk(P) do_check_free_chunk(P)#define check_inuse_chunk(P) do_check_inuse_chunk(P)#define check_chunk(P) do_check_chunk(P)#define check_malloced_chunk(P,N) do_check_malloced_chunk(P,N)#else#define check_free_chunk(P)#define check_inuse_chunk(P)#define check_chunk(P)#define check_malloced_chunk(P,N)#endif/* Macro-based internal utilities*//* Linking chunks in bin lists. Call these only with variables, not arbitrary expressions, as arguments.*//* Place chunk p of size s in its bin, in size order, putting it ahead of others of same size.*/#define frontlink(P, S, IDX, BK, FD) \{ \ if (S < MAX_SMALLBIN_SIZE) \ { \ IDX = smallbin_index(S); \ mark_binblock(IDX); \ BK = bin_at(IDX); \ FD = BK->fd; \ P->bk = BK; \ P->fd = FD; \ FD->bk = BK->fd = P; \ } \ else \ { \ IDX = bin_index(S); \ BK = bin_at(IDX); \ FD = BK->fd; \ if (FD == BK) mark_binblock(IDX); \ else \ { \ while (FD != BK && S < chunksize(FD)) FD = FD->fd; \ BK = FD->bk; \ } \ P->bk = BK; \ P->fd = FD; \ FD->bk = BK->fd = P; \ } \}/* take a chunk off a list */#define unlink(P, BK, FD) \{ \ BK = P->bk; \ FD = P->fd; \ FD->bk = BK; \ BK->fd = FD; \} \/* Place p as the last remainder */#define link_last_remainder(P) \{ \ last_remainder->fd = last_remainder->bk = P; \ P->fd = P->bk = last_remainder; \}/* Clear the last_remainder bin */#define clear_last_remainder \ (last_remainder->fd = last_remainder->bk = last_remainder)/* Routines dealing with mmap(). */#if HAVE_MMAP#if __STD_Cstatic mchunkptr mmap_chunk(size_t size)#elsestatic mchunkptr mmap_chunk(size) size_t size;#endif{ size_t page_mask = malloc_getpagesize - 1; mchunkptr p;#ifndef MAP_ANONYMOUS static int fd = -1;#endif if(n_mmaps >= n_mmaps_max) return 0; /* too many regions */ /* For mmapped chunks, the overhead is one SIZE_SZ unit larger, because * there is no following chunk whose prev_size field could be used. */ size = (size + SIZE_SZ + page_mask) & ~page_mask;#ifdef MAP_ANONYMOUS p = (mchunkptr)mmap(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);#else /* !MAP_ANONYMOUS */ if (fd < 0) { fd = open("/dev/zero", O_RDWR); if(fd < 0) return 0; } p = (mchunkptr)mmap(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0);#endif if(p == (mchunkptr)-1) return 0; n_mmaps++; if (n_mmaps > max_n_mmaps) max_n_mmaps = n_mmaps; /* We demand that eight bytes into a page must be 8-byte aligned. */ assert(aligned_OK(chunk2mem(p))); /* The offset to the start of the mmapped region is stored * in the prev_size field of the chunk; normally it is zero, * but that can be changed in memalign(). */ p->prev_size = 0; set_head(p, size|IS_MMAPPED); mmapped_mem += size; if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem) max_mmapped_mem = mmapped_mem; if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem) max_total_mem = mmapped_mem + sbrked_mem; return p;}#if __STD_Cstatic void munmap_chunk(mchunkptr p)#elsestatic void munmap_chunk(p) mchunkptr p;#endif{ INTERNAL_SIZE_T size = chunksize(p); int ret; assert (chunk_is_mmapped(p)); assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem)); assert((n_mmaps > 0)); assert(((p->prev_size + size) & (malloc_getpagesize-1)) == 0); n_mmaps--; mmapped_mem -= (size + p->prev_size); ret = munmap((char *)p - p->prev_size, size + p->prev_size); /* munmap returns non-zero on failure */ assert(ret == 0);}#if HAVE_MREMAP#if __STD_Cstatic mchunkptr mremap_chunk(mchunkptr p, size_t new_size)#elsestatic mchunkptr mremap_chunk(p, new_size) mchunkptr p; size_t new_size;#endif{ size_t page_mask = malloc_getpagesize - 1; INTERNAL_SIZE_T offset = p->prev_size; INTERNAL_SIZE_T size = chunksize(p); char *cp; assert (chunk_is_mmapped(p)); assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem)); assert((n_mmaps > 0)); assert(((size + offset) & (malloc_getpagesize-1)) == 0); /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */ new_size = (new_size + offset + SIZE_SZ + page_mask) & ~page_mask;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -