📄 malloc.c
字号:
use by mmap, and using more than a few of them may degrade performance. The default is set to a value that serves only as a safeguard. Setting to 0 disables use of mmap for servicing large requests. If HAVE_MMAP is not set, the default value is 0, and attempts to set it to non-zero values in mallopt will fail.*/#define M_MMAP_MAX -4#ifndef DEFAULT_MMAP_MAX#if HAVE_MMAP#define DEFAULT_MMAP_MAX (65536)#else#define DEFAULT_MMAP_MAX (0)#endif#endif#ifdef __cplusplus} /* end of extern "C" */#endif#include "malloc.h"#ifndef BOUNDED_N#define BOUNDED_N(ptr, sz) (ptr)#endif#ifndef RETURN_ADDRESS#define RETURN_ADDRESS(X_) (NULL)#endif/* On some platforms we can compile internal, not exported functions better. Let the environment provide a macro and define it to be empty if it is not available. */#ifndef internal_function# define internal_function#endif/* Forward declarations. */struct malloc_chunk;typedef struct malloc_chunk* mchunkptr;/* Internal routines. */#if __STD_CVoid_t* _int_malloc(mstate, size_t);void _int_free(mstate, Void_t*);Void_t* _int_realloc(mstate, Void_t*, size_t);Void_t* _int_memalign(mstate, size_t, size_t);Void_t* _int_valloc(mstate, size_t);static Void_t* _int_pvalloc(mstate, size_t);/*static Void_t* cALLOc(size_t, size_t);*/static Void_t** _int_icalloc(mstate, size_t, size_t, Void_t**);static Void_t** _int_icomalloc(mstate, size_t, size_t*, Void_t**);static int mTRIm(size_t);static size_t mUSABLe(Void_t*);static int mALLOPt(int, int);static Void_t* internal_function mem2mem_check(Void_t *p, size_t sz);static int internal_function top_check(void);static void internal_function munmap_chunk(mchunkptr p);#if HAVE_MREMAPstatic mchunkptr internal_function mremap_chunk(mchunkptr p, size_t new_size);#endifstatic Void_t* malloc_check(size_t sz, const Void_t *caller);static void free_check(Void_t* mem, const Void_t *caller);static Void_t* realloc_check(Void_t* oldmem, size_t bytes, const Void_t *caller);static Void_t* memalign_check(size_t alignment, size_t bytes, const Void_t *caller);#ifndef NO_THREADS# if USE_STARTERstatic Void_t* malloc_starter(size_t sz, const Void_t *caller);static Void_t* memalign_starter(size_t aln, size_t sz, const Void_t *caller);static void free_starter(Void_t* mem, const Void_t *caller);# endifstatic Void_t* malloc_atfork(size_t sz, const Void_t *caller);static void free_atfork(Void_t* mem, const Void_t *caller);#endif#elseVoid_t* _int_malloc();void _int_free();Void_t* _int_realloc();Void_t* _int_memalign();Void_t* _int_valloc();Void_t* _int_pvalloc();/*static Void_t* cALLOc();*/static Void_t** _int_icalloc();static Void_t** _int_icomalloc();static int mTRIm();static size_t mUSABLe();static int mALLOPt();#endif/* ------------- Optional versions of memcopy ---------------- */#if USE_MEMCPY/* Note: memcpy is ONLY invoked with non-overlapping regions, so the (usually slower) memmove is not needed.*/#define MALLOC_COPY(dest, src, nbytes) memcpy(dest, src, nbytes)#define MALLOC_ZERO(dest, nbytes) memset(dest, 0, nbytes)#else /* !USE_MEMCPY *//* Use Duff's device for good zeroing/copying performance. */#define MALLOC_ZERO(charp, nbytes) \do { \ INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp); \ unsigned long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \ long mcn; \ if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \ switch (mctmp) { \ case 0: for(;;) { *mzp++ = 0; \ case 7: *mzp++ = 0; \ case 6: *mzp++ = 0; \ case 5: *mzp++ = 0; \ case 4: *mzp++ = 0; \ case 3: *mzp++ = 0; \ case 2: *mzp++ = 0; \ case 1: *mzp++ = 0; if(mcn <= 0) break; mcn--; } \ } \} while(0)#define MALLOC_COPY(dest,src,nbytes) \do { \ INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src; \ INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest; \ unsigned long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \ long mcn; \ if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \ switch (mctmp) { \ case 0: for(;;) { *mcdst++ = *mcsrc++; \ case 7: *mcdst++ = *mcsrc++; \ case 6: *mcdst++ = *mcsrc++; \ case 5: *mcdst++ = *mcsrc++; \ case 4: *mcdst++ = *mcsrc++; \ case 3: *mcdst++ = *mcsrc++; \ case 2: *mcdst++ = *mcsrc++; \ case 1: *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; } \ } \} while(0)#endif/* ------------------ MMAP support ------------------ */#if HAVE_MMAP#include <fcntl.h>#ifndef LACKS_SYS_MMAN_H#include <sys/mman.h>#endif#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)# define MAP_ANONYMOUS MAP_ANON#endif#if !defined(MAP_FAILED)# define MAP_FAILED ((char*)-1)#endif#ifndef MAP_NORESERVE# ifdef MAP_AUTORESRV# define MAP_NORESERVE MAP_AUTORESRV# else# define MAP_NORESERVE 0# endif#endif/* Nearly all versions of mmap support MAP_ANONYMOUS, so the following is unlikely to be needed, but is supplied just in case.*/#ifndef MAP_ANONYMOUSstatic int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */#define MMAP(addr, size, prot, flags) ((dev_zero_fd < 0) ? \ (dev_zero_fd = open("/dev/zero", O_RDWR), \ mmap((addr), (size), (prot), (flags), dev_zero_fd, 0)) : \ mmap((addr), (size), (prot), (flags), dev_zero_fd, 0))#else#define MMAP(addr, size, prot, flags) \ (mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0))#endif#endif /* HAVE_MMAP *//* ----------------------- Chunk representations -----------------------*//* This struct declaration is misleading (but accurate and necessary). It declares a "view" into memory allowing access to necessary fields at known offsets from a given base. See explanation below.*/struct malloc_chunk { INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */ INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */ struct malloc_chunk* fd; /* double links -- used only if free. */ struct malloc_chunk* bk;};/* malloc_chunk details: (The following includes lightly edited explanations by Colin Plumb.) Chunks of memory are maintained using a `boundary tag' method as described in e.g., Knuth or Standish. (See the paper by Paul Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such techniques.) Sizes of free chunks are stored both in the front of each chunk and at the end. This makes consolidating fragmented chunks into bigger chunks very fast. The size fields also hold bits representing whether chunks are free or in use. An allocated chunk looks like this: chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Size of previous chunk, if allocated | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Size of chunk, in bytes |P| mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | User data starts here... . . . . (malloc_usable_space() bytes) . . |nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Size of chunk | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Where "chunk" is the front of the chunk for the purpose of most of the malloc code, but "mem" is the pointer that is returned to the user. "Nextchunk" is the beginning of the next contiguous chunk. Chunks always begin on even word boundries, so the mem portion (which is returned to the user) is also on an even word boundary, and thus at least double-word aligned. Free chunks are stored in circular doubly-linked lists, and look like this: chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Size of previous chunk | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ `head:' | Size of chunk, in bytes |P| mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Forward pointer to next chunk in list | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Back pointer to previous chunk in list | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Unused space (may be 0 bytes long) . . . . |nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ `foot:' | Size of chunk, in bytes | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ The P (PREV_INUSE) bit, stored in the unused low-order bit of the chunk size (which is always a multiple of two words), is an in-use bit for the *previous* chunk. If that bit is *clear*, then the word before the current chunk size contains the previous chunk size, and can be used to find the front of the previous chunk. The very first chunk allocated always has this bit set, preventing access to non-existent (or non-owned) memory. If prev_inuse is set for any given chunk, then you CANNOT determine the size of the previous chunk, and might even get a memory addressing fault when trying to do so. Note that the `foot' of the current chunk is actually represented as the prev_size of the NEXT chunk. This makes it easier to deal with alignments etc but can be very confusing when trying to extend or adapt this code. The two exceptions to all this are 1. The special chunk `top' doesn't bother using the trailing size field since there is no next contiguous chunk that would have to index off it. After initialization, `top' is forced to always exist. If it would become less than MINSIZE bytes long, it is replenished. 2. Chunks allocated via mmap, which have the second-lowest-order bit (IS_MMAPPED) set in their size fields. Because they are allocated one-by-one, each must contain its own trailing size field.*//* ---------- Size and alignment checks and conversions ----------*//* conversion from malloc headers to user pointers, and back */#define chunk2mem(p) ((Void_t*)((char*)(p) + 2*SIZE_SZ))#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))/* The smallest possible chunk */#define MIN_CHUNK_SIZE (sizeof(struct malloc_chunk))/* The smallest size we can malloc is an aligned minimal chunk */#define MINSIZE \ (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))/* Check if m has acceptable alignment */#define aligned_OK(m) (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0)/* Check if a request is so large that it would wrap around zero when padded and aligned. To simplify some other code, the bound is made low enough so that adding MINSIZE will also not wrap around zero.*/#define REQUEST_OUT_OF_RANGE(req) \ ((unsigned long)(req) >= \ (unsigned long)(INTERNAL_SIZE_T)(-2 * MINSIZE))/* pad request bytes into a usable size -- internal version */#define request2size(req) \ (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \ MINSIZE : \ ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)/* Same, except also perform argument check */#define checked_request2size(req, sz) \ if (REQUEST_OUT_OF_RANGE(req)) { \ MALLOC_FAILURE_ACTION; \ return 0; \ }
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -