📄 ptmalloc.c
字号:
IAV(80), IAV(81), IAV(82), IAV(83), IAV(84), IAV(85), IAV(86), IAV(87), IAV(88), IAV(89), IAV(90), IAV(91), IAV(92), IAV(93), IAV(94), IAV(95), IAV(96), IAV(97), IAV(98), IAV(99), IAV(100), IAV(101), IAV(102), IAV(103), IAV(104), IAV(105), IAV(106), IAV(107), IAV(108), IAV(109), IAV(110), IAV(111), IAV(112), IAV(113), IAV(114), IAV(115), IAV(116), IAV(117), IAV(118), IAV(119), IAV(120), IAV(121), IAV(122), IAV(123), IAV(124), IAV(125), IAV(126), IAV(127) }, &main_arena, /* next */ 0, /* size */#if THREAD_STATS 0, 0, 0, /* stat_lock_direct, stat_lock_loop, stat_lock_wait */#endif MUTEX_INITIALIZER /* mutex */};#undef IAV/* Thread specific data */static tsd_key_t arena_key;static mutex_t list_lock = MUTEX_INITIALIZER;#if THREAD_STATSstatic int stat_n_heaps = 0;#define THREAD_STAT(x) x#else#define THREAD_STAT(x) do ; while(0)#endif/* variables holding tunable values */static unsigned long trim_threshold = DEFAULT_TRIM_THRESHOLD;static unsigned long top_pad = DEFAULT_TOP_PAD;static unsigned int n_mmaps_max = DEFAULT_MMAP_MAX;static unsigned long mmap_threshold = DEFAULT_MMAP_THRESHOLD;static int check_action = DEFAULT_CHECK_ACTION;/* The first value returned from sbrk */static char* sbrk_base = (char*)(-1);/* The maximum memory obtained from system via sbrk */static unsigned long max_sbrked_mem = 0;/* The maximum via either sbrk or mmap (too difficult to track with threads) */#ifdef NO_THREADSstatic unsigned long max_total_mem = 0;#endif/* The total memory obtained from system via sbrk */#define sbrked_mem (main_arena.size)/* Tracking mmaps */static unsigned int n_mmaps = 0;static unsigned int max_n_mmaps = 0;static unsigned long mmapped_mem = 0;static unsigned long max_mmapped_mem = 0;/* Mapped memory in non-main arenas (reliable only for NO_THREADS). */static unsigned long arena_mem = 0;#ifndef _LIBC#define weak_variable#else/* In GNU libc we want the hook variables to be weak definitions to avoid a problem with Emacs. */#define weak_variable weak_function#endif/* Already initialized? */int __malloc_initialized = -1;#ifndef NO_THREADS/* The following two functions are registered via thread_atfork() to make sure that the mutexes remain in a consistent state in the fork()ed version of a thread. Also adapt the malloc and free hooks temporarily, because the `atfork' handler mechanism may use malloc/free internally (e.g. in LinuxThreads). */#if defined _LIBC || defined MALLOC_HOOKSstatic __malloc_ptr_t (*save_malloc_hook) __MALLOC_P ((size_t __size));static void (*save_free_hook) __MALLOC_P ((__malloc_ptr_t __ptr));static Void_t* save_arena;#endifstatic voidptmalloc_lock_all __MALLOC_P((void)){ arena *ar_ptr; (void)mutex_lock(&list_lock); for(ar_ptr = &main_arena;;) { (void)mutex_lock(&ar_ptr->mutex); ar_ptr = ar_ptr->next; if(ar_ptr == &main_arena) break; }#if defined _LIBC || defined MALLOC_HOOKS save_malloc_hook = __malloc_hook; save_free_hook = __free_hook; __malloc_hook = malloc_atfork; __free_hook = free_atfork; /* Only the current thread may perform malloc/free calls now. */ tsd_getspecific(arena_key, save_arena); tsd_setspecific(arena_key, (Void_t*)0);#endif}static voidptmalloc_unlock_all __MALLOC_P((void)){ arena *ar_ptr;#if defined _LIBC || defined MALLOC_HOOKS tsd_setspecific(arena_key, save_arena); __malloc_hook = save_malloc_hook; __free_hook = save_free_hook;#endif for(ar_ptr = &main_arena;;) { (void)mutex_unlock(&ar_ptr->mutex); ar_ptr = ar_ptr->next; if(ar_ptr == &main_arena) break; } (void)mutex_unlock(&list_lock);}#ifdef __linux__/* In LinuxThreads, unlocking a mutex in the child process after a fork() is currently unsafe, whereas re-initializing it is safe and does not leak resources. Therefore, a special atfork handler is installed for the child. */static voidptmalloc_unlock_all2 __MALLOC_P((void)){ arena *ar_ptr;#if defined _LIBC || defined MALLOC_HOOKS tsd_setspecific(arena_key, save_arena); __malloc_hook = save_malloc_hook; __free_hook = save_free_hook;#endif for(ar_ptr = &main_arena;;) { (void)mutex_init(&ar_ptr->mutex); ar_ptr = ar_ptr->next; if(ar_ptr == &main_arena) break; } (void)mutex_init(&list_lock);}#else#define ptmalloc_unlock_all2 ptmalloc_unlock_all#endif#endif /* !defined NO_THREADS *//* Initialization routine. */#if defined(_LIBC)#if 0static void ptmalloc_init __MALLOC_P ((void)) __attribute__ ((constructor));#endifstatic voidptmalloc_init __MALLOC_P((void))#elsevoidptmalloc_init __MALLOC_P((void))#endif{#if defined _LIBC || defined MALLOC_HOOKS# if __STD_C const char* s;# else char* s;# endif#endif if(__malloc_initialized >= 0) return; __malloc_initialized = 0;#ifdef _LIBC __libc_pagesize = __getpagesize();#endif#ifndef NO_THREADS#if defined _LIBC || defined MALLOC_HOOKS /* With some threads implementations, creating thread-specific data or initializing a mutex may call malloc() itself. Provide a simple starter version (realloc() won't work). */ save_malloc_hook = __malloc_hook; save_free_hook = __free_hook; __malloc_hook = malloc_starter; __free_hook = free_starter;#endif#ifdef _LIBC /* Initialize the pthreads interface. */ if (__pthread_initialize != NULL) __pthread_initialize();#endif#endif /* !defined NO_THREADS */ mutex_init(&main_arena.mutex); mutex_init(&list_lock); tsd_key_create(&arena_key, NULL); tsd_setspecific(arena_key, (Void_t *)&main_arena); thread_atfork(ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2);#if defined _LIBC || defined MALLOC_HOOKS if((s = __secure_getenv("MALLOC_TRIM_THRESHOLD_"))) mALLOPt(M_TRIM_THRESHOLD, atoi(s)); if((s = __secure_getenv("MALLOC_TOP_PAD_"))) mALLOPt(M_TOP_PAD, atoi(s)); if((s = __secure_getenv("MALLOC_MMAP_THRESHOLD_"))) mALLOPt(M_MMAP_THRESHOLD, atoi(s)); if((s = __secure_getenv("MALLOC_MMAP_MAX_"))) mALLOPt(M_MMAP_MAX, atoi(s)); s = getenv("MALLOC_CHECK_");#ifndef NO_THREADS __malloc_hook = save_malloc_hook; __free_hook = save_free_hook;#endif if(s) { if(s[0]) mALLOPt(M_CHECK_ACTION, (int)(s[0] - '0')); __malloc_check_init(); } if(__malloc_initialize_hook != NULL) (*__malloc_initialize_hook)();#endif __malloc_initialized = 1;}/* There are platforms (e.g. Hurd) with a link-time hook mechanism. */#ifdef thread_atfork_staticthread_atfork_static(ptmalloc_lock_all, ptmalloc_unlock_all, \ ptmalloc_unlock_all2)#endif#if defined _LIBC || defined MALLOC_HOOKS/* Hooks for debugging versions. The initial hooks just call the initialization routine, then do the normal work. */static Void_t*#if __STD_Cmalloc_hook_ini(size_t sz)#elsemalloc_hook_ini(sz) size_t sz;#endif{ __malloc_hook = NULL; ptmalloc_init(); return mALLOc(sz);}static Void_t*#if __STD_Crealloc_hook_ini(Void_t* ptr, size_t sz)#elserealloc_hook_ini(ptr, sz) Void_t* ptr; size_t sz;#endif{ __malloc_hook = NULL; __realloc_hook = NULL; ptmalloc_init(); return rEALLOc(ptr, sz);}static Void_t*#if __STD_Cmemalign_hook_ini(size_t sz, size_t alignment)#elsememalign_hook_ini(sz, alignment) size_t sz; size_t alignment;#endif{ __memalign_hook = NULL; ptmalloc_init(); return mEMALIGn(sz, alignment);}void weak_variable (*__malloc_initialize_hook) __MALLOC_P ((void)) = NULL;void weak_variable (*__free_hook) __MALLOC_P ((__malloc_ptr_t __ptr)) = NULL;__malloc_ptr_t weak_variable (*__malloc_hook) __MALLOC_P ((size_t __size)) = malloc_hook_ini;__malloc_ptr_t weak_variable (*__realloc_hook) __MALLOC_P ((__malloc_ptr_t __ptr, size_t __size)) = realloc_hook_ini;__malloc_ptr_t weak_variable (*__memalign_hook) __MALLOC_P ((size_t __size, size_t __alignment)) = memalign_hook_ini;void weak_variable (*__after_morecore_hook) __MALLOC_P ((void)) = NULL;/* Whether we are using malloc checking. */static int using_malloc_checking;/* A flag that is set by malloc_set_state, to signal that malloc checking must not be enabled on the request from the user (via the MALLOC_CHECK_ environment variable). It is reset by __malloc_check_init to tell malloc_set_state that the user has requested malloc checking. The purpose of this flag is to make sure that malloc checking is not enabled when the heap to be restored was constructed without malloc checking, and thus does not contain the required magic bytes. Otherwise the heap would be corrupted by calls to free and realloc. If it turns out that the heap was created with malloc checking and the user has requested it malloc_set_state just calls __malloc_check_init again to enable it. On the other hand, reusing such a heap without further malloc checking is safe. */static int disallow_malloc_check;/* Activate a standard set of debugging hooks. */void__malloc_check_init(){ if (disallow_malloc_check) { disallow_malloc_check = 0; return; } using_malloc_checking = 1; __malloc_hook = malloc_check; __free_hook = free_check; __realloc_hook = realloc_check; __memalign_hook = memalign_check; if(check_action & 1) fprintf(stderr, "malloc: using debugging hooks\n");}#endif/* Routines dealing with mmap(). */#if HAVE_MMAP#ifndef MAP_ANONYMOUSstatic int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */#define MMAP(addr, size, prot, flags) ((dev_zero_fd < 0) ? \ (dev_zero_fd = open("/dev/zero", O_RDWR), \ mmap((addr), (size), (prot), (flags), dev_zero_fd, 0)) : \ mmap((addr), (size), (prot), (flags), dev_zero_fd, 0))#else#define MMAP(addr, size, prot, flags) \ (mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0))#endif#if defined __GNUC__ && __GNUC__ >= 2/* This function is only called from one place, inline it. */__inline__#endifstatic mchunkptrinternal_function#if __STD_Cmmap_chunk(size_t size)#elsemmap_chunk(size) size_t size;#endif{ size_t page_mask = malloc_getpagesize - 1; mchunkptr p; if(n_mmaps >= n_mmaps_max) return 0; /* too many regions */ /* For mmapped chunks, the overhead is one SIZE_SZ unit larger, because * there is no following chunk whose prev_size field could be used. */ size = (size + SIZE_SZ + page_mask) & ~page_mask; p = (mc
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -