📄 dlmalloc.c
字号:
/* Require that there be a remainder, ensuring top always exists */ if ( (remainder_size = chunksize(top) - nb) < (long)MINSIZE) {#if HAVE_MMAP /* If big and would otherwise need to extend, try to use mmap instead */ if ((unsigned long)nb >= (unsigned long)mmap_threshold && (victim = mmap_chunk(nb)) != 0) return chunk2mem(victim);#endif /* Try to extend */ malloc_extend_top(nb); if ( (remainder_size = chunksize(top) - nb) < (long)MINSIZE) return 0; /* propagate failure */ } victim = top; set_head(victim, nb | PREV_INUSE); top = chunk_at_offset(victim, nb); set_head(top, remainder_size | PREV_INUSE); check_malloced_chunk(victim, nb); return chunk2mem(victim);}/* free() algorithm : cases: 1. free(0) has no effect. 2. If the chunk was allocated via mmap, it is release via munmap(). 3. If a returned chunk borders the current high end of memory, it is consolidated into the top, and if the total unused topmost memory exceeds the trim threshold, malloc_trim is called. 4. Other chunks are consolidated as they arrive, and placed in corresponding bins. (This includes the case of consolidating with the current `last_remainder').*/#if __STD_Cvoid fREe(Void_t* mem)#elsevoid fREe(mem) Void_t* mem;#endif{ mchunkptr p; /* chunk corresponding to mem */ INTERNAL_SIZE_T hd; /* its head field */ INTERNAL_SIZE_T sz; /* its size */ int idx; /* its bin index */ mchunkptr next; /* next contiguous chunk */ INTERNAL_SIZE_T nextsz; /* its size */ INTERNAL_SIZE_T prevsz; /* size of previous contiguous chunk */ mchunkptr bck; /* misc temp for linking */ mchunkptr fwd; /* misc temp for linking */ int islr; /* track whether merging with last_remainder */ if (mem == 0) /* free(0) has no effect */ return; p = mem2chunk(mem); hd = p->size;#if HAVE_MMAP if (hd & IS_MMAPPED) /* release mmapped memory. */ { munmap_chunk(p); return; }#endif check_inuse_chunk(p); sz = hd & ~PREV_INUSE; next = chunk_at_offset(p, sz); nextsz = chunksize(next); if (next == top) /* merge with top */ { sz += nextsz; if (!(hd & PREV_INUSE)) /* consolidate backward */ { prevsz = p->prev_size; p = chunk_at_offset(p, -((long) prevsz)); sz += prevsz; unlink(p, bck, fwd); } set_head(p, sz | PREV_INUSE); top = p; if ((unsigned long)(sz) >= (unsigned long)trim_threshold) malloc_trim(top_pad); return; } set_head(next, nextsz); /* clear inuse bit */ islr = 0; if (!(hd & PREV_INUSE)) /* consolidate backward */ { prevsz = p->prev_size; p = chunk_at_offset(p, -((long) prevsz)); sz += prevsz; if (p->fd == last_remainder) /* keep as last_remainder */ islr = 1; else unlink(p, bck, fwd); } if (!(inuse_bit_at_offset(next, nextsz))) /* consolidate forward */ { sz += nextsz; if (!islr && next->fd == last_remainder) /* re-insert last_remainder */ { islr = 1; link_last_remainder(p); } else unlink(next, bck, fwd); } set_head(p, sz | PREV_INUSE); set_foot(p, sz); if (!islr) frontlink(p, sz, idx, bck, fwd);}/* Realloc algorithm: Chunks that were obtained via mmap cannot be extended or shrunk unless HAVE_MREMAP is defined, in which case mremap is used. Otherwise, if their reallocation is for additional space, they are copied. If for less, they are just left alone. Otherwise, if the reallocation is for additional space, and the chunk can be extended, it is, else a malloc-copy-free sequence is taken. There are several different ways that a chunk could be extended. All are tried: * Extending forward into following adjacent free chunk. * Shifting backwards, joining preceding adjacent space * Both shifting backwards and extending forward. * Extending into newly sbrked space Unless the #define REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of zero (re)allocates a minimum-sized chunk. If the reallocation is for less space, and the new request is for a `small' (<512 bytes) size, then the newly unused space is lopped off and freed. The old unix realloc convention of allowing the last-free'd chunk to be used as an argument to realloc is no longer supported. I don't know of any programs still relying on this feature, and allowing it would also allow too many other incorrect usages of realloc to be sensible.*/#if __STD_CVoid_t* rEALLOc(Void_t* oldmem, size_t bytes)#elseVoid_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;#endif{ INTERNAL_SIZE_T nb; /* padded request size */ mchunkptr oldp; /* chunk corresponding to oldmem */ INTERNAL_SIZE_T oldsize; /* its size */ mchunkptr newp; /* chunk to return */ INTERNAL_SIZE_T newsize; /* its size */ Void_t* newmem; /* corresponding user mem */ mchunkptr next; /* next contiguous chunk after oldp */ INTERNAL_SIZE_T nextsize; /* its size */ mchunkptr prev; /* previous contiguous chunk before oldp */ INTERNAL_SIZE_T prevsize; /* its size */ mchunkptr remainder; /* holds split off extra space from newp */ INTERNAL_SIZE_T remainder_size; /* its size */ mchunkptr bck; /* misc temp for linking */ mchunkptr fwd; /* misc temp for linking */#ifdef REALLOC_ZERO_BYTES_FREES if (bytes == 0) { fREe(oldmem); return 0; }#endif if ((long)bytes < 0) return 0; /* realloc of null is supposed to be same as malloc */ if (oldmem == 0) return mALLOc(bytes); newp = oldp = mem2chunk(oldmem); newsize = oldsize = chunksize(oldp); nb = request2size(bytes);#if HAVE_MMAP if (chunk_is_mmapped(oldp)) {#if HAVE_MREMAP newp = mremap_chunk(oldp, nb); if(newp) return chunk2mem(newp);#endif /* Note the extra SIZE_SZ overhead. */ if(oldsize - SIZE_SZ >= nb) return oldmem; /* do nothing */ /* Must alloc, copy, free. */ newmem = mALLOc(bytes); if (newmem == 0) return 0; /* propagate failure */ MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ); munmap_chunk(oldp); return newmem; }#endif check_inuse_chunk(oldp); if ((long)(oldsize) < (long)(nb)) { /* Try expanding forward */ next = chunk_at_offset(oldp, oldsize); if (next == top || !inuse(next)) { nextsize = chunksize(next); /* Forward into top only if a remainder */ if (next == top) { if ((long)(nextsize + newsize) >= (long)(nb + MINSIZE)) { newsize += nextsize; top = chunk_at_offset(oldp, nb); set_head(top, (newsize - nb) | PREV_INUSE); set_head_size(oldp, nb); return chunk2mem(oldp); } } /* Forward into next chunk */ else if (((long)(nextsize + newsize) >= (long)(nb))) { unlink(next, bck, fwd); newsize += nextsize; goto split; } } else { next = 0; nextsize = 0; } /* Try shifting backwards. */ if (!prev_inuse(oldp)) { prev = prev_chunk(oldp); prevsize = chunksize(prev); /* try forward + backward first to save a later consolidation */ if (next != 0) { /* into top */ if (next == top) { if ((long)(nextsize + prevsize + newsize) >= (long)(nb + MINSIZE)) { unlink(prev, bck, fwd); newp = prev; newsize += prevsize + nextsize; newmem = chunk2mem(newp); MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ); top = chunk_at_offset(newp, nb); set_head(top, (newsize - nb) | PREV_INUSE); set_head_size(newp, nb); return newmem; } } /* into next chunk */ else if (((long)(nextsize + prevsize + newsize) >= (long)(nb))) { unlink(next, bck, fwd); unlink(prev, bck, fwd); newp = prev; newsize += nextsize + prevsize; newmem = chunk2mem(newp); MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ); goto split; } } /* backward only */ if (prev != 0 && (long)(prevsize + newsize) >= (long)nb) { unlink(prev, bck, fwd); newp = prev; newsize += prevsize; newmem = chunk2mem(newp); MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ); goto split; } } /* Must allocate */ newmem = mALLOc (bytes); if (newmem == 0) /* propagate failure */ return 0; /* Avoid copy if newp is next chunk after oldp. */ /* (This can only happen when new chunk is sbrk'ed.) */ if ( (newp = mem2chunk(newmem)) == next_chunk(oldp)) { newsize += chunksize(newp); newp = oldp; goto split; } /* Otherwise copy, free, and exit */ MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ); fREe(oldmem); return newmem; } split: /* split off extra room in old or expanded chunk */ if (newsize - nb >= MINSIZE) /* split off remainder */ { remainder = chunk_at_offset(newp, nb); remainder_size = newsize - nb; set_head_size(newp, nb); set_head(remainder, remainder_size | PREV_INUSE); set_inuse_bit_at_offset(remainder, remainder_size); fREe(chunk2mem(remainder)); /* let free() deal with it */ } else { set_head_size(newp, newsize); set_inuse_bit_at_offset(newp, newsize); } check_inuse_chunk(newp); return chunk2mem(newp);}/* memalign algorithm: memalign requests more than enough space from malloc, finds a spot within that chunk that meets the alignment request, and then possibly frees the leading and trailing space. The alignment argument must be a power of two. This property is not checked by memalign, so misuse may result in random runtime errors. 8-byte alignment is guaranteed by normal malloc calls, so don't bother calling memalign with an argument of 8 or less. Overreliance on memalign is a sure way to fragment space.*/#if __STD_CVoid_t* mEMALIGn(size_t alignment, size_t bytes)#else
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -