📄 malloc.c
字号:
action to take on failure.*/#ifdef USE_MALLOC_LOCK#ifdef WIN32static int mALLOC_MUTEx;#define MALLOC_PREACTION slwait(&mALLOC_MUTEx)#define MALLOC_POSTACTION slrelease(&mALLOC_MUTEx)#else#include <pthread.h>static pthread_mutex_t mALLOC_MUTEx = PTHREAD_MUTEX_INITIALIZER;#define MALLOC_PREACTION pthread_mutex_lock(&mALLOC_MUTEx)#define MALLOC_POSTACTION pthread_mutex_unlock(&mALLOC_MUTEx)#endif /* USE_MALLOC_LOCK */#else/* Substitute anything you like for these */#define MALLOC_PREACTION (0)#define MALLOC_POSTACTION (0)#endifVoid_t* public_mALLOc(size_t bytes) { Void_t* m; if (MALLOC_PREACTION != 0) { return 0; } m = mALLOc(bytes); if (MALLOC_POSTACTION != 0) { } return m;}void public_fREe(Void_t* m) { if (MALLOC_PREACTION != 0) { return; } fREe(m); if (MALLOC_POSTACTION != 0) { }}Void_t* public_rEALLOc(Void_t* m, size_t bytes) { if (MALLOC_PREACTION != 0) { return 0; } m = rEALLOc(m, bytes); if (MALLOC_POSTACTION != 0) { } return m;}Void_t* public_mEMALIGn(size_t alignment, size_t bytes) { Void_t* m; if (MALLOC_PREACTION != 0) { return 0; } m = mEMALIGn(alignment, bytes); if (MALLOC_POSTACTION != 0) { } return m;}Void_t* public_vALLOc(size_t bytes) { Void_t* m; if (MALLOC_PREACTION != 0) { return 0; } m = vALLOc(bytes); if (MALLOC_POSTACTION != 0) { } return m;}Void_t* public_pVALLOc(size_t bytes) { Void_t* m; if (MALLOC_PREACTION != 0) { return 0; } m = pVALLOc(bytes); if (MALLOC_POSTACTION != 0) { } return m;}Void_t* public_cALLOc(size_t n, size_t elem_size) { Void_t* m; if (MALLOC_PREACTION != 0) { return 0; } m = cALLOc(n, elem_size); if (MALLOC_POSTACTION != 0) { } return m;}Void_t** public_iCALLOc(size_t n, size_t elem_size, Void_t** chunks) { Void_t** m; if (MALLOC_PREACTION != 0) { return 0; } m = iCALLOc(n, elem_size, chunks); if (MALLOC_POSTACTION != 0) { } return m;}Void_t** public_iCOMALLOc(size_t n, size_t sizes[], Void_t** chunks) { Void_t** m; if (MALLOC_PREACTION != 0) { return 0; } m = iCOMALLOc(n, sizes, chunks); if (MALLOC_POSTACTION != 0) { } return m;}void public_cFREe(Void_t* m) { if (MALLOC_PREACTION != 0) { return; } cFREe(m); if (MALLOC_POSTACTION != 0) { }}int public_mTRIm(size_t s) { int result; if (MALLOC_PREACTION != 0) { return 0; } result = mTRIm(s); if (MALLOC_POSTACTION != 0) { } return result;}size_t public_mUSABLe(Void_t* m) { size_t result; if (MALLOC_PREACTION != 0) { return 0; } result = mUSABLe(m); if (MALLOC_POSTACTION != 0) { } return result;}void public_mSTATs() { if (MALLOC_PREACTION != 0) { return; } mSTATs(); if (MALLOC_POSTACTION != 0) { }}struct mallinfo public_mALLINFo() { struct mallinfo m; if (MALLOC_PREACTION != 0) { struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; return nm; } m = mALLINFo(); if (MALLOC_POSTACTION != 0) { } return m;}int public_mALLOPt(int p, int v) { int result; if (MALLOC_PREACTION != 0) { return 0; } result = mALLOPt(p, v); if (MALLOC_POSTACTION != 0) { } return result;}#endif/* ------------- Optional versions of memcopy ---------------- */#if USE_MEMCPY/* Note: memcpy is ONLY invoked with non-overlapping regions, so the (usually slower) memmove is not needed.*/#define MALLOC_COPY(dest, src, nbytes) memcpy(dest, src, nbytes)#define MALLOC_ZERO(dest, nbytes) memset(dest, 0, nbytes)#else /* !USE_MEMCPY *//* Use Duff's device for good zeroing/copying performance. */#define MALLOC_ZERO(charp, nbytes) \do { \ INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp); \ CHUNK_SIZE_T mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \ long mcn; \ if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \ switch (mctmp) { \ case 0: for(;;) { *mzp++ = 0; \ case 7: *mzp++ = 0; \ case 6: *mzp++ = 0; \ case 5: *mzp++ = 0; \ case 4: *mzp++ = 0; \ case 3: *mzp++ = 0; \ case 2: *mzp++ = 0; \ case 1: *mzp++ = 0; if(mcn <= 0) break; mcn--; } \ } \} while(0)#define MALLOC_COPY(dest,src,nbytes) \do { \ INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src; \ INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest; \ CHUNK_SIZE_T mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \ long mcn; \ if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \ switch (mctmp) { \ case 0: for(;;) { *mcdst++ = *mcsrc++; \ case 7: *mcdst++ = *mcsrc++; \ case 6: *mcdst++ = *mcsrc++; \ case 5: *mcdst++ = *mcsrc++; \ case 4: *mcdst++ = *mcsrc++; \ case 3: *mcdst++ = *mcsrc++; \ case 2: *mcdst++ = *mcsrc++; \ case 1: *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; } \ } \} while(0)#endif/* ------------------ MMAP support ------------------ */#if HAVE_MMAP#ifndef LACKS_FCNTL_H#include <fcntl.h>#endif#ifndef LACKS_SYS_MMAN_H#include <sys/mman.h>#endif#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)#define MAP_ANONYMOUS MAP_ANON#endif/* Nearly all versions of mmap support MAP_ANONYMOUS, so the following is unlikely to be needed, but is supplied just in case.*/#ifndef MAP_ANONYMOUSstatic int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */#define MMAP(addr, size, prot, flags) ((dev_zero_fd < 0) ? \ (dev_zero_fd = open("/dev/zero", O_RDWR), \ mmap((addr), (size), (prot), (flags), dev_zero_fd, 0)) : \ mmap((addr), (size), (prot), (flags), dev_zero_fd, 0))#else#define MMAP(addr, size, prot, flags) \ (mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0))#endif#endif /* HAVE_MMAP *//* ----------------------- Chunk representations -----------------------*//* This struct declaration is misleading (but accurate and necessary). It declares a "view" into memory allowing access to necessary fields at known offsets from a given base. See explanation below.*/struct malloc_chunk { INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */ INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */ struct malloc_chunk* fd; /* double links -- used only if free. */ struct malloc_chunk* bk;};typedef struct malloc_chunk* mchunkptr;/* malloc_chunk details: (The following includes lightly edited explanations by Colin Plumb.) Chunks of memory are maintained using a `boundary tag' method as described in e.g., Knuth or Standish. (See the paper by Paul Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such techniques.) Sizes of free chunks are stored both in the front of each chunk and at the end. This makes consolidating fragmented chunks into bigger chunks very fast. The size fields also hold bits representing whether chunks are free or in use. An allocated chunk looks like this: chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Size of previous chunk, if allocated | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Size of chunk, in bytes |P| mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | User data starts here... . . . . (malloc_usable_space() bytes) . . |nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Size of chunk | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Where "chunk" is the front of the chunk for the purpose of most of the malloc code, but "mem" is the pointer that is returned to the user. "Nextchunk" is the beginning of the next contiguous chunk. Chunks always begin on even word boundries, so the mem portion (which is returned to the user) is also on an even word boundary, and thus at least double-word aligned. Free chunks are stored in circular doubly-linked lists, and look like this: chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Size of previous chunk | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ `head:' | Size of chunk, in bytes |P| mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Forward pointer to next chunk in list | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Back pointer to previous chunk in list | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Unused space (may be 0 bytes long) . . . . |nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ `foot:' | Size of chunk, in bytes | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ The P (PREV_INUSE) bit, stored in the unused low-order bit of the chunk size (which is always a multiple of two words), is an in-use bit for the *previous* chunk. If that bit is *clear*, then the word before the current chunk size contains the previous chunk size, and can be used to find the front of the previous chunk. The very first chunk allocated always has this bit set, preventing access to non-existent (or non-owned) memory. If pr
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -