⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 malloc.c

📁 俄罗斯高人Mamaich的Pocket gcc编译器(运行在PocketPC上)的全部源代码。
💻 C
📖 第 1 页 / 共 5 页
字号:
*/#if MALLOC_DEBUG#include <assert.h>#else#define assert(x) ((void)0)#endif/*  INTERNAL_SIZE_T is the word-size used for internal bookkeeping  of chunk sizes. On a 64-bit machine, you can reduce malloc  overhead by defining INTERNAL_SIZE_T to be a 32 bit `unsigned int'  at the expense of not being able to handle requests greater than  2^31. This limitation is hardly ever a concern; you are encouraged  to set this. However, the default version is the same as size_t.*/#ifndef INTERNAL_SIZE_T#define INTERNAL_SIZE_T size_t#endif/*  REALLOC_ZERO_BYTES_FREES should be set if a call to realloc with  zero bytes should be the same as a call to free.  The C standard  requires this. Otherwise, since this malloc returns a unique pointer  for malloc(0), so does realloc(p, 0).*/#define REALLOC_ZERO_BYTES_FREES/*  HAVE_MEMCPY should be defined if you are not otherwise using  ANSI STD C, but still have memcpy and memset in your C library  and want to use them in calloc and realloc. Otherwise simple  macro versions are defined here.  USE_MEMCPY should be defined as 1 if you actually want to  have memset and memcpy called. People report that the macro  versions are often enough faster than libc versions on many  systems that it is better to use them.*/#define HAVE_MEMCPY 1#ifndef USE_MEMCPY#ifdef HAVE_MEMCPY#define USE_MEMCPY 1#else#define USE_MEMCPY 0#endif#endif#if (__STD_C || defined(HAVE_MEMCPY))#if __STD_Cvoid* memset(void*, int, size_t);void* memcpy(void*, const void*, size_t);void* memmove(void*, const void*, size_t);#elseVoid_t* memset();Void_t* memcpy();Void_t* memmove();#endif#endif/* The following macros are only invoked with (2n+1)-multiples of   INTERNAL_SIZE_T units, with a positive integer n. This is exploited   for fast inline execution when n is small.  If the regions to be   copied do overlap, the destination lies always _below_ the source.  */#if USE_MEMCPY#define MALLOC_ZERO(charp, nbytes)                                            \do {                                                                          \  INTERNAL_SIZE_T mzsz = (nbytes);                                            \  if(mzsz <= 9*sizeof(mzsz)) {                                                \    INTERNAL_SIZE_T* mz = (INTERNAL_SIZE_T*) (charp);                         \    if(mzsz >= 5*sizeof(mzsz)) {     *mz++ = 0;                               \                                     *mz++ = 0;                               \      if(mzsz >= 7*sizeof(mzsz)) {   *mz++ = 0;                               \                                     *mz++ = 0;                               \        if(mzsz >= 9*sizeof(mzsz)) { *mz++ = 0;                               \                                     *mz++ = 0; }}}                           \                                     *mz++ = 0;                               \                                     *mz++ = 0;                               \                                     *mz   = 0;                               \  } else memset((charp), 0, mzsz);                                            \} while(0)/* If the regions overlap, dest is always _below_ src.  */#define MALLOC_COPY(dest,src,nbytes,overlap)                                  \do {                                                                          \  INTERNAL_SIZE_T mcsz = (nbytes);                                            \  if(mcsz <= 9*sizeof(mcsz)) {                                                \    INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) (src);                        \    INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) (dest);                       \    if(mcsz >= 5*sizeof(mcsz)) {     *mcdst++ = *mcsrc++;                     \                                     *mcdst++ = *mcsrc++;                     \      if(mcsz >= 7*sizeof(mcsz)) {   *mcdst++ = *mcsrc++;                     \                                     *mcdst++ = *mcsrc++;                     \        if(mcsz >= 9*sizeof(mcsz)) { *mcdst++ = *mcsrc++;                     \                                     *mcdst++ = *mcsrc++; }}}                 \                                     *mcdst++ = *mcsrc++;                     \                                     *mcdst++ = *mcsrc++;                     \                                     *mcdst   = *mcsrc  ;                     \  } else if(overlap)                                                          \    memmove(dest, src, mcsz);                                                 \  else                                                                        \    memcpy(dest, src, mcsz);                                                  \} while(0)#else /* !USE_MEMCPY *//* Use Duff's device for good zeroing/copying performance. */#define MALLOC_ZERO(charp, nbytes)                                            \do {                                                                          \  INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp);                           \  long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T), mcn;                         \  if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; }             \  switch (mctmp) {                                                            \    case 0: for(;;) { *mzp++ = 0;                                             \    case 7:           *mzp++ = 0;                                             \    case 6:           *mzp++ = 0;                                             \    case 5:           *mzp++ = 0;                                             \    case 4:           *mzp++ = 0;                                             \    case 3:           *mzp++ = 0;                                             \    case 2:           *mzp++ = 0;                                             \    case 1:           *mzp++ = 0; if(mcn <= 0) break; mcn--; }                \  }                                                                           \} while(0)/* If the regions overlap, dest is always _below_ src.  */#define MALLOC_COPY(dest,src,nbytes,overlap)                                  \do {                                                                          \  INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src;                            \  INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest;                           \  long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T), mcn;                         \  if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; }             \  switch (mctmp) {                                                            \    case 0: for(;;) { *mcdst++ = *mcsrc++;                                    \    case 7:           *mcdst++ = *mcsrc++;                                    \    case 6:           *mcdst++ = *mcsrc++;                                    \    case 5:           *mcdst++ = *mcsrc++;                                    \    case 4:           *mcdst++ = *mcsrc++;                                    \    case 3:           *mcdst++ = *mcsrc++;                                    \    case 2:           *mcdst++ = *mcsrc++;                                    \    case 1:           *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; }       \  }                                                                           \} while(0)#endif#ifndef LACKS_UNISTD_H#  include <unistd.h>#endif/*  Define HAVE_MMAP to optionally make malloc() use mmap() to allocate  very large blocks.  These will be returned to the operating system  immediately after a free().  HAVE_MMAP is also a prerequisite to  support multiple `arenas' (see USE_ARENAS below).*/#ifndef HAVE_MMAP# ifdef _POSIX_MAPPED_FILES#  define HAVE_MMAP 1# endif#endif/*  Define HAVE_MREMAP to make realloc() use mremap() to re-allocate  large blocks.  This is currently only possible on Linux with  kernel versions newer than 1.3.77.*/#ifndef HAVE_MREMAP#define HAVE_MREMAP defined(__linux__)#endif/* Define USE_ARENAS to enable support for multiple `arenas'.  These   are allocated using mmap(), are necessary for threads and   occasionally useful to overcome address space limitations affecting   sbrk(). */#ifndef USE_ARENAS#define USE_ARENAS HAVE_MMAP#endif#if HAVE_MMAP#include <unistd.h>#include <fcntl.h>#include <sys/mman.h>#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)#define MAP_ANONYMOUS MAP_ANON#endif#if !defined(MAP_FAILED)#define MAP_FAILED ((char*)-1)#endif#ifndef MAP_NORESERVE# ifdef MAP_AUTORESRV#  define MAP_NORESERVE MAP_AUTORESRV# else#  define MAP_NORESERVE 0# endif#endif#endif /* HAVE_MMAP *//*  Access to system page size. To the extent possible, this malloc  manages memory from the system in page-size units.  The following mechanics for getpagesize were adapted from  bsd/gnu getpagesize.h*/#ifndef malloc_getpagesize#  ifdef _SC_PAGESIZE         /* some SVR4 systems omit an underscore */#    ifndef _SC_PAGE_SIZE#      define _SC_PAGE_SIZE _SC_PAGESIZE#    endif#  endif#  ifdef _SC_PAGE_SIZE#    define malloc_getpagesize sysconf(_SC_PAGE_SIZE)#  else#    if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)       extern size_t getpagesize();#      define malloc_getpagesize getpagesize()#    else#      include <sys/param.h>#      ifdef EXEC_PAGESIZE#        define malloc_getpagesize EXEC_PAGESIZE#      else#        ifdef NBPG#          ifndef CLSIZE#            define malloc_getpagesize NBPG#          else#            define malloc_getpagesize (NBPG * CLSIZE)#          endif#        else#          ifdef NBPC#            define malloc_getpagesize NBPC#          else#            ifdef PAGESIZE#              define malloc_getpagesize PAGESIZE#            else#              define malloc_getpagesize (4096) /* just guess */#            endif#          endif#        endif#      endif#    endif#  endif#endif/*  This version of malloc supports the standard SVID/XPG mallinfo  routine that returns a struct containing the same kind of  information you can get from malloc_stats. It should work on  any SVID/XPG compliant system that has a /usr/include/malloc.h  defining struct mallinfo. (If you'd like to install such a thing  yourself, cut out the preliminary declarations as described above  and below and save them in a malloc.h file. But there's no  compelling reason to bother to do this.)  The main declaration needed is the mallinfo struct that is returned  (by-copy) by mallinfo().  The SVID/XPG malloinfo struct contains a  bunch of fields, most of which are not even meaningful in this  version of malloc. Some of these fields are are instead filled by  mallinfo() with other numbers that might possibly be of interest.  HAVE_USR_INCLUDE_MALLOC_H should be set if you have a  /usr/include/malloc.h file that includes a declaration of struct  mallinfo.  If so, it is included; else an SVID2/XPG2 compliant  version is declared below.  These must be precisely the same for  mallinfo() to work.*//* #define HAVE_USR_INCLUDE_MALLOC_H */#if HAVE_USR_INCLUDE_MALLOC_H# include "/usr/include/malloc.h"#else# ifdef _LIBC#  include "malloc.h"# else#  include "ptmalloc.h"# endif#endif#include <bp-checks.h>#ifndef DEFAULT_TRIM_THRESHOLD#define DEFAULT_TRIM_THRESHOLD (128 * 1024)#endif/*    M_TRIM_THRESHOLD is the maximum amount of unused top-most memory      to keep before releasing via malloc_trim in free().      Automatic trimming is mainly useful in long-lived programs.      Because trimming via sbrk can be slow on some systems, and can      sometimes be wasteful (in cases where programs immediately      afterward allocate more large chunks) the value should be high      enough so that your overall system performance would improve by      releasing.      The trim threshold and the mmap control parameters (see below)      can be traded off with one another. Trimming and mmapping are      two different ways of releasing unused memory back to the      system. Between these two, it is often possible to keep      system-level demands of a long-lived program down to a bare      minimum. For example, in one test suite of sessions measuring      the XF86 X server on Linux, using a trim threshold of 128K and a      mmap threshold of 192K led to near-minimal long term resource      consumption.      If you are using this malloc in a long-lived program, it should      pay to experiment with these values.  As a rough guide, you      might set to a value close to the average size of a process      (program) running on your system.  Releasing this much memory      would allow such a process to run in memory.  Generally, it's      worth it to tune for trimming rather than memory mapping when a      program undergoes phases where several large chunks are      allocated and released in ways that can reuse each other's      storage, perhaps mixed with phases where there are no such      chunks at all.  And in well-behaved long-lived programs,      controlling release of large blocks via trimming versus mapping      is usually faster.      However, in most programs, these parameters serve mainly as      protection against the system-level effects of carrying around      massive amounts of unneeded memory. Since frequent calls to      sbrk, mmap, and munmap otherwise degrade performance, the default      parameters are set to relatively high values that serve only as      safeguards.      The default trim value is high enough to cause trimming only in      fairly extreme (by current memory consumption standards) cases.      It must be greater than page size to have any useful effect.  To      disable trimming completely, you can set to (unsigned long)(-1);*/#ifndef DEFAULT_TOP_PAD#define DEFAULT_TOP_PAD        (0)#endif/*    M_TOP_PAD is the amount of extra `padding' space to allocate or      retain whenever sbrk is called. It is used in two ways internally:      * When sbrk is called to extend the top of the arena to satisfy        a new malloc request, this much padding is added to the sbrk        request.      * When malloc_trim is called automatically from free(),        it is used as the `pad' argument.      In both cases, the actual amount of padding is rounded      so that the end of the arena is always a system page boundary.

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -