📄 malloc.h
字号:
/* Take a chunk off a bin list */#define unlink(P, BK, FD) { \ FD = P->fd; \ BK = P->bk; \ if (FD->bk != P || BK->fd != P) \ abort(); \ FD->bk = BK; \ BK->fd = FD; \}/* Indexing Bins for sizes < 512 bytes contain chunks of all the same size, spaced 8 bytes apart. Larger bins are approximately logarithmically spaced: 64 bins of size 8 32 bins of size 64 16 bins of size 512 8 bins of size 4096 4 bins of size 32768 2 bins of size 262144 1 bin of size what's left The bins top out around 1MB because we expect to service large requests via mmap.*/#define NBINS 96#define NSMALLBINS 32#define SMALLBIN_WIDTH 8#define MIN_LARGE_SIZE 256#define in_smallbin_range(sz) \ ((unsigned long)(sz) < (unsigned long)MIN_LARGE_SIZE)#define smallbin_index(sz) (((unsigned)(sz)) >> 3)#define bin_index(sz) \ ((in_smallbin_range(sz)) ? smallbin_index(sz) : __malloc_largebin_index(sz))/* FIRST_SORTED_BIN_SIZE is the chunk size corresponding to the first bin that is maintained in sorted order. This must be the smallest size corresponding to a given bin. Normally, this should be MIN_LARGE_SIZE. But you can weaken best fit guarantees to sometimes speed up malloc by increasing value. Doing this means that malloc may choose a chunk that is non-best-fitting by up to the width of the bin. Some useful cutoff values: 512 - all bins sorted 2560 - leaves bins <= 64 bytes wide unsorted 12288 - leaves bins <= 512 bytes wide unsorted 65536 - leaves bins <= 4096 bytes wide unsorted 262144 - leaves bins <= 32768 bytes wide unsorted -1 - no bins sorted (not recommended!)*/#define FIRST_SORTED_BIN_SIZE MIN_LARGE_SIZE/* #define FIRST_SORTED_BIN_SIZE 65536 *//* Unsorted chunks All remainders from chunk splits, as well as all returned chunks, are first placed in the "unsorted" bin. They are then placed in regular bins after malloc gives them ONE chance to be used before binning. So, basically, the unsorted_chunks list acts as a queue, with chunks being placed on it in free (and __malloc_consolidate), and taken off (to be either used or placed in bins) in malloc.*//* The otherwise unindexable 1-bin is used to hold unsorted chunks. */#define unsorted_chunks(M) (bin_at(M, 1))/* Top The top-most available chunk (i.e., the one bordering the end of available memory) is treated specially. It is never included in any bin, is used only if no other chunk is available, and is released back to the system if it is very large (see M_TRIM_THRESHOLD). Because top initially points to its own bin with initial zero size, thus forcing extension on the first malloc request, we avoid having any special code in malloc to check whether it even exists yet. But we still need to do so when getting memory from system, so we make initial_top treat the bin as a legal but unusable chunk during the interval between initialization and the first call to __malloc_alloc. (This is somewhat delicate, since it relies on the 2 preceding words to be zero during this interval as well.)*//* Conveniently, the unsorted bin can be used as dummy top on first call */#define initial_top(M) (unsorted_chunks(M))/* Binmap To help compensate for the large number of bins, a one-level index structure is used for bin-by-bin searching. `binmap' is a bitvector recording whether bins are definitely empty so they can be skipped over during during traversals. The bits are NOT always cleared as soon as bins are empty, but instead only when they are noticed to be empty during traversal in malloc.*//* Conservatively use 32 bits per map word, even if on 64bit system */#define BINMAPSHIFT 5#define BITSPERMAP (1U << BINMAPSHIFT)#define BINMAPSIZE (NBINS / BITSPERMAP)#define idx2block(i) ((i) >> BINMAPSHIFT)#define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT)-1))))#define mark_bin(m,i) ((m)->binmap[idx2block(i)] |= idx2bit(i))#define unmark_bin(m,i) ((m)->binmap[idx2block(i)] &= ~(idx2bit(i)))#define get_binmap(m,i) ((m)->binmap[idx2block(i)] & idx2bit(i))/* Fastbins An array of lists holding recently freed small chunks. Fastbins are not doubly linked. It is faster to single-link them, and since chunks are never removed from the middles of these lists, double linking is not necessary. Also, unlike regular bins, they are not even processed in FIFO order (they use faster LIFO) since ordering doesn't much matter in the transient contexts in which fastbins are normally used. Chunks in fastbins keep their inuse bit set, so they cannot be consolidated with other free chunks. __malloc_consolidate releases all chunks in fastbins and consolidates them with other free chunks.*/typedef struct malloc_chunk* mfastbinptr;/* offset 2 to use otherwise unindexable first 2 bins */#define fastbin_index(sz) ((((unsigned int)(sz)) >> 3) - 2)/* The maximum fastbin request size we support */#define MAX_FAST_SIZE 80#define NFASTBINS (fastbin_index(request2size(MAX_FAST_SIZE))+1)/* FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free() that triggers automatic consolidation of possibly-surrounding fastbin chunks. This is a heuristic, so the exact value should not matter too much. It is defined at half the default trim threshold as a compromise heuristic to only attempt consolidation if it is likely to lead to trimming. However, it is not dynamically tunable, since consolidation reduces fragmentation surrounding loarge chunks even if trimming is not used.*/#define FASTBIN_CONSOLIDATION_THRESHOLD \ ((unsigned long)(DEFAULT_TRIM_THRESHOLD) >> 1)/* Since the lowest 2 bits in max_fast don't matter in size comparisons, they are used as flags.*//* ANYCHUNKS_BIT held in max_fast indicates that there may be any freed chunks at all. It is set true when entering a chunk into any bin.*/#define ANYCHUNKS_BIT (1U)#define have_anychunks(M) (((M)->max_fast & ANYCHUNKS_BIT))#define set_anychunks(M) ((M)->max_fast |= ANYCHUNKS_BIT)#define clear_anychunks(M) ((M)->max_fast &= ~ANYCHUNKS_BIT)/* FASTCHUNKS_BIT held in max_fast indicates that there are probably some fastbin chunks. It is set true on entering a chunk into any fastbin, and cleared only in __malloc_consolidate.*/#define FASTCHUNKS_BIT (2U)#define have_fastchunks(M) (((M)->max_fast & FASTCHUNKS_BIT))#define set_fastchunks(M) ((M)->max_fast |= (FASTCHUNKS_BIT|ANYCHUNKS_BIT))#define clear_fastchunks(M) ((M)->max_fast &= ~(FASTCHUNKS_BIT))/* Set value of max_fast. Use impossibly small value if 0. */#define set_max_fast(M, s) \ (M)->max_fast = (((s) == 0)? SMALLBIN_WIDTH: request2size(s)) | \ ((M)->max_fast & (FASTCHUNKS_BIT|ANYCHUNKS_BIT))#define get_max_fast(M) \ ((M)->max_fast & ~(FASTCHUNKS_BIT | ANYCHUNKS_BIT))/* morecore_properties is a status word holding dynamically discovered or controlled properties of the morecore function*/#define MORECORE_CONTIGUOUS_BIT (1U)#define contiguous(M) \ (((M)->morecore_properties & MORECORE_CONTIGUOUS_BIT))#define noncontiguous(M) \ (((M)->morecore_properties & MORECORE_CONTIGUOUS_BIT) == 0)#define set_contiguous(M) \ ((M)->morecore_properties |= MORECORE_CONTIGUOUS_BIT)#define set_noncontiguous(M) \ ((M)->morecore_properties &= ~MORECORE_CONTIGUOUS_BIT)/* ----------- Internal state representation and initialization -----------*/struct malloc_state { /* The maximum chunk size to be eligible for fastbin */ size_t max_fast; /* low 2 bits used as flags */ /* Fastbins */ mfastbinptr fastbins[NFASTBINS]; /* Base of the topmost chunk -- not otherwise kept in a bin */ mchunkptr top; /* The remainder from the most recent split of a small request */ mchunkptr last_remainder; /* Normal bins packed as described above */ mchunkptr bins[NBINS * 2]; /* Bitmap of bins. Trailing zero map handles cases of largest binned size */ unsigned int binmap[BINMAPSIZE+1]; /* Tunable parameters */ unsigned long trim_threshold; size_t top_pad; size_t mmap_threshold; /* Memory map support */ int n_mmaps; int n_mmaps_max; int max_n_mmaps; /* Cache malloc_getpagesize */ unsigned int pagesize; /* Track properties of MORECORE */ unsigned int morecore_properties; /* Statistics */ size_t mmapped_mem; size_t sbrked_mem; size_t max_sbrked_mem; size_t max_mmapped_mem; size_t max_total_mem;};typedef struct malloc_state *mstate;/* There is exactly one instance of this struct in this malloc. If you are adapting this malloc in a way that does NOT use a static malloc_state, you MUST explicitly zero-fill it before using. This malloc relies on the property that malloc_state is initialized to all zeroes (as is true of C statics).*/extern struct malloc_state __malloc_state; /* never directly referenced *//* All uses of av_ are via get_malloc_state(). At most one "call" to get_malloc_state is made per invocation of the public versions of malloc and free, but other routines that in turn invoke malloc and/or free may call more then once. Also, it is called in check* routines if __MALLOC_DEBUGGING is set.*/#define get_malloc_state() (&(__malloc_state))/* External internal utilities operating on mstates */void __malloc_consolidate(mstate);/* Debugging support */#if ! __MALLOC_DEBUGGING#define check_chunk(P)#define check_free_chunk(P)#define check_inuse_chunk(P)#define check_remalloced_chunk(P,N)#define check_malloced_chunk(P,N)#define check_malloc_state()#define assert(x) ((void)0)#else#define check_chunk(P) __do_check_chunk(P)#define check_free_chunk(P) __do_check_free_chunk(P)#define check_inuse_chunk(P) __do_check_inuse_chunk(P)#define check_remalloced_chunk(P,N) __do_check_remalloced_chunk(P,N)#define check_malloced_chunk(P,N) __do_check_malloced_chunk(P,N)#define check_malloc_state() __do_check_malloc_state()extern void __do_check_chunk(mchunkptr p);extern void __do_check_free_chunk(mchunkptr p);extern void __do_check_inuse_chunk(mchunkptr p);extern void __do_check_remalloced_chunk(mchunkptr p, size_t s);extern void __do_check_malloced_chunk(mchunkptr p, size_t s);extern void __do_check_malloc_state(void);#include <assert.h>#endif
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -