⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ptmalloc.c

📁 这是一个内存分配管理的程序
💻 C
📖 第 1 页 / 共 5 页
字号:
      they can be individually obtained and released from the host      system. A request serviced through mmap is never reused by any      other request (at least not directly; the system may just so      happen to remap successive requests to the same locations).      Segregating space in this way has the benefit that mmapped space      can ALWAYS be individually released back to the system, which      helps keep the system level memory demands of a long-lived      program low. Mapped memory can never become `locked' between      other chunks, as can happen with normally allocated chunks, which      menas that even trimming via malloc_trim would not release them.      However, it has the disadvantages that:         1. The space cannot be reclaimed, consolidated, and then            used to service later requests, as happens with normal chunks.         2. It can lead to more wastage because of mmap page alignment            requirements         3. It causes malloc performance to be more dependent on host            system memory management support routines which may vary in            implementation quality and may impose arbitrary            limitations. Generally, servicing a request via normal            malloc steps is faster than going through a system's mmap.      All together, these considerations should lead you to use mmap      only for relatively large requests.*/#ifndef DEFAULT_MMAP_MAX#if HAVE_MMAP#define DEFAULT_MMAP_MAX       (1024)#else#define DEFAULT_MMAP_MAX       (0)#endif#endif/*    M_MMAP_MAX is the maximum number of requests to simultaneously      service using mmap. This parameter exists because:         1. Some systems have a limited number of internal tables for            use by mmap.         2. In most systems, overreliance on mmap can degrade overall            performance.         3. If a program allocates many large regions, it is probably            better off using normal sbrk-based allocation routines that            can reclaim and reallocate normal heap memory. Using a            small value allows transition into this mode after the            first few allocations.      Setting to 0 disables all use of mmap.  If HAVE_MMAP is not set,      the default value is 0, and attempts to set it to non-zero values      in mallopt will fail.*/#ifndef DEFAULT_CHECK_ACTION#define DEFAULT_CHECK_ACTION 1#endif/* What to do if the standard debugging hooks are in place and a   corrupt pointer is detected: do nothing (0), print an error message   (1), or call abort() (2). */#define HEAP_MIN_SIZE (32*1024)#define HEAP_MAX_SIZE (1024*1024) /* must be a power of two *//* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps      that are dynamically created for multi-threaded programs.  The      maximum size must be a power of two, for fast determination of      which heap belongs to a chunk.  It should be much larger than      the mmap threshold, so that requests with a size just below that      threshold can be fulfilled without creating too many heaps.*/#ifndef THREAD_STATS#define THREAD_STATS 0#endif/* If THREAD_STATS is non-zero, some statistics on mutex locking are   computed. *//* Macro to set errno.  */#ifndef __set_errno# define __set_errno(val) errno = (val)#endif/* On some platforms we can compile internal, not exported functions better.   Let the environment provide a macro and define it to be empty if it   is not available.  */#ifndef internal_function# define internal_function#endif/*  Special defines for the Linux/GNU C library.*/#ifdef _LIBC#if __STD_CVoid_t * __default_morecore (ptrdiff_t);Void_t *(*__morecore)(ptrdiff_t) = __default_morecore;#elseVoid_t * __default_morecore ();Void_t *(*__morecore)() = __default_morecore;#endif#define MORECORE (*__morecore)#define MORECORE_FAILURE 0#ifndef MORECORE_CLEARS#define MORECORE_CLEARS 1#endifstatic size_t __libc_pagesize;#define mmap    __mmap#define munmap  __munmap#define mremap  __mremap#define mprotect __mprotect#undef malloc_getpagesize#define malloc_getpagesize __libc_pagesize#else /* _LIBC */#if __STD_Cextern Void_t*     sbrk(ptrdiff_t);#elseextern Void_t*     sbrk();#endif#ifndef MORECORE#define MORECORE sbrk#endif#ifndef MORECORE_FAILURE#define MORECORE_FAILURE -1#endif#ifndef MORECORE_CLEARS#define MORECORE_CLEARS 1#endif#endif /* _LIBC */#ifdef _LIBC#define cALLOc          __libc_calloc#define fREe            __libc_free#define mALLOc          __libc_malloc#define mEMALIGn        __libc_memalign#define rEALLOc         __libc_realloc#define vALLOc          __libc_valloc#define pvALLOc         __libc_pvalloc#define mALLINFo        __libc_mallinfo#define mALLOPt         __libc_mallopt#define mALLOC_STATs    __malloc_stats#define mALLOC_USABLE_SIZe __malloc_usable_size#define mALLOC_TRIm     __malloc_trim#define mALLOC_GET_STATe __malloc_get_state#define mALLOC_SET_STATe __malloc_set_state#else#define cALLOc          calloc#define fREe            free#define mALLOc          malloc#define mEMALIGn        memalign#define rEALLOc         realloc#define vALLOc          valloc#define pvALLOc         pvalloc#define mALLINFo        mallinfo#define mALLOPt         mallopt#define mALLOC_STATs    malloc_stats#define mALLOC_USABLE_SIZe malloc_usable_size#define mALLOC_TRIm     malloc_trim#define mALLOC_GET_STATe malloc_get_state#define mALLOC_SET_STATe malloc_set_state#endif/* Public routines */#if __STD_C#ifndef _LIBCvoid    ptmalloc_init(void);#endifVoid_t* mALLOc(size_t);void    fREe(Void_t*);Void_t* rEALLOc(Void_t*, size_t);Void_t* mEMALIGn(size_t, size_t);Void_t* vALLOc(size_t);Void_t* pvALLOc(size_t);Void_t* cALLOc(size_t, size_t);void    cfree(Void_t*);int     mALLOC_TRIm(size_t);size_t  mALLOC_USABLE_SIZe(Void_t*);void    mALLOC_STATs(void);int     mALLOPt(int, int);struct mallinfo mALLINFo(void);Void_t* mALLOC_GET_STATe(void);int     mALLOC_SET_STATe(Void_t*);#else /* !__STD_C */#ifndef _LIBCvoid    ptmalloc_init();#endifVoid_t* mALLOc();void    fREe();Void_t* rEALLOc();Void_t* mEMALIGn();Void_t* vALLOc();Void_t* pvALLOc();Void_t* cALLOc();void    cfree();int     mALLOC_TRIm();size_t  mALLOC_USABLE_SIZe();void    mALLOC_STATs();int     mALLOPt();struct mallinfo mALLINFo();Void_t* mALLOC_GET_STATe();int     mALLOC_SET_STATe();#endif /* __STD_C */#ifdef __cplusplus} /* end of extern "C" */#endif#if !defined(NO_THREADS) && !HAVE_MMAP"Can't have threads support without mmap"#endif#if USE_ARENAS && !HAVE_MMAP"Can't have multiple arenas without mmap"#endif/*  Type declarations*/struct malloc_chunk{  INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */  INTERNAL_SIZE_T size;      /* Size in bytes, including overhead. */  struct malloc_chunk* fd;   /* double links -- used only if free. */  struct malloc_chunk* bk;};typedef struct malloc_chunk* mchunkptr;/*   malloc_chunk details:    (The following includes lightly edited explanations by Colin Plumb.)    Chunks of memory are maintained using a `boundary tag' method as    described in e.g., Knuth or Standish.  (See the paper by Paul    Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a    survey of such techniques.)  Sizes of free chunks are stored both    in the front of each chunk and at the end.  This makes    consolidating fragmented chunks into bigger chunks very fast.  The    size fields also hold bits representing whether chunks are free or    in use.    An allocated chunk looks like this:    chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+            |             Size of previous chunk, if allocated            | |            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+            |             Size of chunk, in bytes                         |P|      mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+            |             User data starts here...                          .            .                                                               .            .             (malloc_usable_space() bytes)                     .            .                                                               |nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+            |             Size of chunk                                     |            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+    Where "chunk" is the front of the chunk for the purpose of most of    the malloc code, but "mem" is the pointer that is returned to the    user.  "Nextchunk" is the beginning of the next contiguous chunk.    Chunks always begin on even word boundaries, so the mem portion    (which is returned to the user) is also on an even word boundary, and    thus double-word aligned.    Free chunks are stored in circular doubly-linked lists, and look like this:    chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+            |             Size of previous chunk                            |            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+    `head:' |             Size of chunk, in bytes                         |P|      mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+            |             Forward pointer to next chunk in list             |            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+            |             Back pointer to previous chunk in list            |            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+            |             Unused space (may be 0 bytes long)                .            .                                                               .            .                                                               |nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+    `foot:' |             Size of chunk, in bytes                           |            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+    The P (PREV_INUSE) bit, stored in the unused low-order bit of the    chunk size (which is always a multiple of two words), is an in-use    bit for the *previous* chunk.  If that bit is *clear*, then the    word before the current chunk size contains the previous chunk    size, and can be used to find the front of the previous chunk.    (The very first chunk allocated always has this bit set,    preventing access to non-existent (or non-owned) memory.)    Note that the `foot' of the current chunk is actually represented    as the prev_size of the NEXT chunk. (This makes it easier to    deal with alignments etc).    The two exceptions to all this are     1. The special chunk `top', which doesn't bother using the        trailing size field since there is no        next contiguous chunk that would have to index off it. (After        initialization, `top' is forced to always exist.  If it would        become less than MINSIZE bytes long, it is replenished via        malloc_extend_top.)     2. Chunks allocated via mmap, which have the second-lowest-order        bit (IS_MMAPPED) set in their size fields.  Because they are        never merged or traversed from any other chunk, they have no        foot size or inuse information.    Available chunks are kept in any of several places (all declared below):    * `av': An array of chunks serving as bin headers for consolidated       chunks. Each bin is doubly linked.  The bins are approximately       proportionally (log) spaced.  There are a lot of these bins       (128). This may look excessive, but works very well in       practice.  All procedures maintain the invariant that no       consolidated chunk physically borders another one. Chunks in       bins are kept in size order, with ties going to the       approximately least recently used chunk.       The chunks in each bin are maintained in decreasing sorted order by       size.  This is irrelevant for the small bins, which all contain       the same-sized chunks, but facilitates best-fit allocation for       larger chunks. (These lists are just sequential. Keeping them in       order almost never requires enough traversal to warrant using       fancier ordered data structures.)  Chunks of the same size are       linked with the most recently freed at the front, and allocations       are taken from the back.  This results in LRU or FIFO allocation

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -