📄 dlmalloc.src
字号:
static unsigned long max_total_mem = 0;/* internal working copy of mallinfo */static struct mallinfo current_mallinfo = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };/* The total memory obtained from system via sbrk */#define sbrked_mem (current_mallinfo.arena)/* Tracking mmaps */static unsigned int n_mmaps = 0;static unsigned int max_n_mmaps = 0;static unsigned long mmapped_mem = 0;static unsigned long max_mmapped_mem = 0;/* Debugging support*/#if DEBUG/* These routines make a number of assertions about the states of data structures that should be true at all times. If any are not true, it's very likely that a user program has somehow trashed memory. (It's also possible that there is a coding error in malloc. In which case, please report it!)*/#if __STD_Cstatic void do_check_chunk(mchunkptr p)#elsestatic void do_check_chunk(p) mchunkptr p;#endif{ INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE; /* No checkable chunk is mmapped */ assert(!chunk_is_mmapped(p)); /* Check for legal address ... */ assert((char*)p >= sbrk_base); if (p != top) assert((char*)p + sz <= (char*)top); else assert((char*)p + sz <= sbrk_base + sbrked_mem);}#if __STD_Cstatic void do_check_free_chunk(mchunkptr p)#elsestatic void do_check_free_chunk(p) mchunkptr p;#endif{ INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE; mchunkptr next = chunk_at_offset(p, sz); do_check_chunk(p); /* Check whether it claims to be free ... */ assert(!inuse(p)); /* Unless a special marker, must have OK fields */ if ((long)sz >= (long)MINSIZE) { assert((sz & MALLOC_ALIGN_MASK) == 0); assert(aligned_OK(chunk2mem(p))); /* ... matching footer field */ assert(next->prev_size == sz); /* ... and is fully consolidated */ assert(prev_inuse(p)); assert (next == top || inuse(next)); /* ... and has minimally sane links */ assert(p->fd->bk == p); assert(p->bk->fd == p); } else /* markers are always of size SIZE_SZ */ assert(sz == SIZE_SZ);}#if __STD_Cstatic void do_check_inuse_chunk(mchunkptr p)#elsestatic void do_check_inuse_chunk(p) mchunkptr p;#endif{ mchunkptr next = next_chunk(p); do_check_chunk(p); /* Check whether it claims to be in use ... */ assert(inuse(p)); /* ... and is surrounded by OK chunks. Since more things can be checked with free chunks than inuse ones, if an inuse chunk borders them and debug is on, it's worth doing them. */ if (!prev_inuse(p)) { mchunkptr prv = prev_chunk(p); assert(next_chunk(prv) == p); do_check_free_chunk(prv); } if (next == top) { assert(prev_inuse(next)); assert(chunksize(next) >= MINSIZE); } else if (!inuse(next)) do_check_free_chunk(next);}#if __STD_Cstatic void do_check_malloced_chunk(mchunkptr p, INTERNAL_SIZE_T s)#elsestatic void do_check_malloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s;#endif{ INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE; long room = sz - s; do_check_inuse_chunk(p); /* Legal size ... */ assert((long)sz >= (long)MINSIZE); assert((sz & MALLOC_ALIGN_MASK) == 0); assert(room >= 0); assert(room < (long)MINSIZE); /* ... and alignment */ assert(aligned_OK(chunk2mem(p))); /* ... and was allocated at front of an available chunk */ assert(prev_inuse(p));}#define check_free_chunk(P) do_check_free_chunk(P)#define check_inuse_chunk(P) do_check_inuse_chunk(P)#define check_chunk(P) do_check_chunk(P)#define check_malloced_chunk(P,N) do_check_malloced_chunk(P,N)#else#define check_free_chunk(P)#define check_inuse_chunk(P)#define check_chunk(P)#define check_malloced_chunk(P,N)#endif/* Macro-based internal utilities*//* Linking chunks in bin lists. Call these only with variables, not arbitrary expressions, as arguments.*//* Place chunk p of size s in its bin, in size order, putting it ahead of others of same size.*/#define frontlink(P, S, IDX, BK, FD) \{ \ if (S < MAX_SMALLBIN_SIZE) \ { \ IDX = smallbin_index(S); \ mark_binblock(IDX); \ BK = bin_at(IDX); \ FD = BK->fd; \ P->bk = BK; \ P->fd = FD; \ FD->bk = BK->fd = P; \ } \ else \ { \ IDX = bin_index(S); \ BK = bin_at(IDX); \ FD = BK->fd; \ if (FD == BK) mark_binblock(IDX); \ else \ { \ while (FD != BK && S < chunksize(FD)) FD = FD->fd; \ BK = FD->bk; \ } \ P->bk = BK; \ P->fd = FD; \ FD->bk = BK->fd = P; \ } \}/* take a chunk off a list */#define unlink(P, BK, FD) \{ \ BK = P->bk; \ FD = P->fd; \ FD->bk = BK; \ BK->fd = FD; \} \/* Place p as the last remainder */#define link_last_remainder(P) \{ \ last_remainder->fd = last_remainder->bk = P; \ P->fd = P->bk = last_remainder; \}/* Clear the last_remainder bin */#define clear_last_remainder \ (last_remainder->fd = last_remainder->bk = last_remainder)/* Routines dealing with mmap(). */#if HAVE_MMAP#if __STD_Cstatic mchunkptr mmap_chunk(size_t size)#elsestatic mchunkptr mmap_chunk(size) size_t size;#endif{ size_t page_mask = malloc_getpagesize - 1; mchunkptr p;#ifndef MAP_ANONYMOUS static int fd = -1;#endif if(n_mmaps >= n_mmaps_max) return 0; /* too many regions */ /* For mmapped chunks, the overhead is one SIZE_SZ unit larger, because * there is no following chunk whose prev_size field could be used. */ size = (size + SIZE_SZ + page_mask) & ~page_mask;#ifdef MAP_ANONYMOUS p = (mchunkptr)mmap(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);#else /* !MAP_ANONYMOUS */ if (fd < 0) { fd = open("/dev/zero", O_RDWR); if(fd < 0) return 0; } p = (mchunkptr)mmap(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0);#endif if(p == (mchunkptr)-1) return 0; n_mmaps++; if (n_mmaps > max_n_mmaps) max_n_mmaps = n_mmaps; /* We demand that eight bytes into a page must be 8-byte aligned. */ assert(aligned_OK(chunk2mem(p))); /* The offset to the start of the mmapped region is stored * in the prev_size field of the chunk; normally it is zero, * but that can be changed in memalign(). */ p->prev_size = 0; set_head(p, size|IS_MMAPPED); mmapped_mem += size; if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem) max_mmapped_mem = mmapped_mem; if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem) max_total_mem = mmapped_mem + sbrked_mem; return p;}#if __STD_Cstatic void munmap_chunk(mchunkptr p)#elsestatic void munmap_chunk(p) mchunkptr p;#endif{ INTERNAL_SIZE_T size = chunksize(p); int ret; assert (chunk_is_mmapped(p)); assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem)); assert((n_mmaps > 0)); assert(((p->prev_size + size) & (malloc_getpagesize-1)) == 0); n_mmaps--; mmapped_mem -= (size + p->prev_size); ret = munmap((char *)p - p->prev_size, size + p->prev_size); /* munmap returns non-zero on failure */ assert(ret == 0);}#if HAVE_MREMAP#if __STD_Cstatic mchunkptr mremap_chunk(mchunkptr p, size_t new_size)#elsestatic mchunkptr mremap_chunk(p, new_size) mchunkptr p; size_t new_size;#endif{ size_t page_mask = malloc_getpagesize - 1; INTERNAL_SIZE_T offset = p->prev_size; INTERNAL_SIZE_T size = chunksize(p); char *cp; assert (chunk_is_mmapped(p)); assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem)); assert((n_mmaps > 0)); assert(((size + offset) & (malloc_getpagesize-1)) == 0); /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */ new_size = (new_size + offset + SIZE_SZ + page_mask) & ~page_mask; cp = (char *)mremap((char *)p - offset, size + offset, new_size, 1); if (cp == (char *)-1) return 0; p = (mchunkptr)(cp + offset); assert(aligned_OK(chunk2mem(p))); assert((p->prev_size == offset)); set_head(p, (new_size - offset)|IS_MMAPPED); mmapped_mem -= size + offset; mmapped_mem += new_size; if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem) max_mmapped_mem = mmapped_mem; if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem) max_total_mem = mmapped_mem + sbrked_mem; return p;}#endif /* HAVE_MREMAP */#endif /* HAVE_MMAP *//* Extend the top-most chunk by obtaining memory from system. Main interface to sbrk (but see also malloc_trim).*/#if __STD_Cstatic void malloc_extend_top(INTERNAL_SIZE_T nb)#elsestatic void malloc_extend_top(nb) INTERNAL_SIZE_T nb;#endif{ char* brk; /* return value from sbrk */ INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of sbrked space */ INTERNAL_SIZE_T correction; /* bytes for 2nd sbrk call */ char* new_brk; /* return of 2nd sbrk call */ INTERNAL_SIZE_T top_size; /* new size of top chunk */ mchunkptr old_top = top; /* Record state of old top */ INTERNAL_SIZE_T old_top_size = chunksize(old_top); char* old_end = (char*)(chunk_at_offset(old_top, old_top_size)); /* Pad request with top_pad plus minimal overhead */ INTERNAL_SIZE_T sbrk_size = nb + top_pad + MINSIZE; unsigned long pagesz = malloc_getpagesize; /* If not the first time through, round to preserve pag
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -