📄 arena.c
字号:
/* Malloc implementation for multiple threads without lock contention. Copyright (C) 2001 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Wolfram Gloger <wg@malloc.de>, 2001. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Library General Public License along with the GNU C Library; see the file COPYING.LIB. If not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *//* $Id: arena.c,v 1.9 2004/11/05 14:42:23 wg Exp $ *//* Compile-time constants. */#define HEAP_MIN_SIZE (32*1024)#ifndef HEAP_MAX_SIZE#define HEAP_MAX_SIZE (1024*1024) /* must be a power of two */#endif/* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps that are dynamically created for multi-threaded programs. The maximum size must be a power of two, for fast determination of which heap belongs to a chunk. It should be much larger than the mmap threshold, so that requests with a size just below that threshold can be fulfilled without creating too many heaps. */#ifndef THREAD_STATS#define THREAD_STATS 0#endif/* If THREAD_STATS is non-zero, some statistics on mutex locking are computed. *//***************************************************************************/#define top(ar_ptr) ((ar_ptr)->top)/* A heap is a single contiguous memory region holding (coalesceable) malloc_chunks. It is allocated with mmap() and always starts at an address aligned to HEAP_MAX_SIZE. Not used unless compiling with USE_ARENAS. */typedef struct _heap_info { mstate ar_ptr; /* Arena for this heap. */ struct _heap_info *prev; /* Previous heap. */ size_t size; /* Current size in bytes. */ size_t pad; /* Make sure the following data is properly aligned. */} heap_info;/* Thread specific data */static tsd_key_t arena_key;static mutex_t list_lock;#if THREAD_STATSstatic int stat_n_heaps;#define THREAD_STAT(x) x#else#define THREAD_STAT(x) do ; while(0)#endif/* Mapped memory in non-main arenas (reliable only for NO_THREADS). */static unsigned long arena_mem;/* Already initialized? */int __malloc_initialized = -1;/**************************************************************************/#if USE_ARENAS/* arena_get() acquires an arena and locks the corresponding mutex. First, try the one last locked successfully by this thread. (This is the common case and handled with a macro for speed.) Then, loop once over the circularly linked list of arenas. If no arena is readily available, create a new one. In this latter case, `size' is just a hint as to how much memory will be required immediately in the new arena. */#define arena_get(ptr, size) do { \ Void_t *vptr = NULL; \ ptr = (mstate)tsd_getspecific(arena_key, vptr); \ if(ptr && !mutex_trylock(&ptr->mutex)) { \ THREAD_STAT(++(ptr->stat_lock_direct)); \ } else \ ptr = arena_get2(ptr, (size)); \} while(0)/* find the heap and corresponding arena for a given ptr */#define heap_for_ptr(ptr) \ ((heap_info *)((unsigned long)(ptr) & ~(HEAP_MAX_SIZE-1)))#define arena_for_chunk(ptr) \ (chunk_non_main_arena(ptr) ? heap_for_ptr(ptr)->ar_ptr : &main_arena)#else /* !USE_ARENAS *//* There is only one arena, main_arena. */#if THREAD_STATS#define arena_get(ar_ptr, sz) do { \ ar_ptr = &main_arena; \ if(!mutex_trylock(&ar_ptr->mutex)) \ ++(ar_ptr->stat_lock_direct); \ else { \ (void)mutex_lock(&ar_ptr->mutex); \ ++(ar_ptr->stat_lock_wait); \ } \} while(0)#else#define arena_get(ar_ptr, sz) do { \ ar_ptr = &main_arena; \ (void)mutex_lock(&ar_ptr->mutex); \} while(0)#endif#define arena_for_chunk(ptr) (&main_arena)#endif /* USE_ARENAS *//**************************************************************************/#ifndef NO_THREADS/* atfork support. */static __malloc_ptr_t (*save_malloc_hook) __MALLOC_P ((size_t __size, __const __malloc_ptr_t));# if !defined _LIBC || !defined USE_TLS || (defined SHARED && !USE___THREAD)static __malloc_ptr_t (*save_memalign_hook) __MALLOC_P ((size_t __align, size_t __size, __const __malloc_ptr_t));# endifstatic void (*save_free_hook) __MALLOC_P ((__malloc_ptr_t __ptr, __const __malloc_ptr_t));static Void_t* save_arena;/* Magic value for the thread-specific arena pointer when malloc_atfork() is in use. */#define ATFORK_ARENA_PTR ((Void_t*)-1)/* The following hooks are used while the `atfork' handling mechanism is active. */static Void_t*malloc_atfork(size_t sz, const Void_t *caller){ Void_t *vptr = NULL; Void_t *victim; tsd_getspecific(arena_key, vptr); if(vptr == ATFORK_ARENA_PTR) { /* We are the only thread that may allocate at all. */ if(save_malloc_hook != malloc_check) { return _int_malloc(&main_arena, sz); } else { if(top_check()<0) return 0; victim = _int_malloc(&main_arena, sz+1); return mem2mem_check(victim, sz); } } else { /* Suspend the thread until the `atfork' handlers have completed. By that time, the hooks will have been reset as well, so that mALLOc() can be used again. */ (void)mutex_lock(&list_lock); (void)mutex_unlock(&list_lock); return public_mALLOc(sz); }}static voidfree_atfork(Void_t* mem, const Void_t *caller){ Void_t *vptr = NULL; mstate ar_ptr; mchunkptr p; /* chunk corresponding to mem */ if (mem == 0) /* free(0) has no effect */ return; p = mem2chunk(mem); /* do not bother to replicate free_check here */#if HAVE_MMAP if (chunk_is_mmapped(p)) /* release mmapped memory. */ { munmap_chunk(p); return; }#endif ar_ptr = arena_for_chunk(p); tsd_getspecific(arena_key, vptr); if(vptr != ATFORK_ARENA_PTR) (void)mutex_lock(&ar_ptr->mutex); _int_free(ar_ptr, mem); if(vptr != ATFORK_ARENA_PTR) (void)mutex_unlock(&ar_ptr->mutex);}/* The following two functions are registered via thread_atfork() to make sure that the mutexes remain in a consistent state in the fork()ed version of a thread. Also adapt the malloc and free hooks temporarily, because the `atfork' handler mechanism may use malloc/free internally (e.g. in LinuxThreads). */static voidptmalloc_lock_all __MALLOC_P((void)){ mstate ar_ptr; if(__malloc_initialized < 1) return; (void)mutex_lock(&list_lock); for(ar_ptr = &main_arena;;) { (void)mutex_lock(&ar_ptr->mutex); ar_ptr = ar_ptr->next; if(ar_ptr == &main_arena) break; } save_malloc_hook = __malloc_hook; save_free_hook = __free_hook; __malloc_hook = malloc_atfork; __free_hook = free_atfork; /* Only the current thread may perform malloc/free calls now. */ tsd_getspecific(arena_key, save_arena); tsd_setspecific(arena_key, ATFORK_ARENA_PTR);}static voidptmalloc_unlock_all __MALLOC_P((void)){ mstate ar_ptr; if(__malloc_initialized < 1) return; tsd_setspecific(arena_key, save_arena); __malloc_hook = save_malloc_hook; __free_hook = save_free_hook; for(ar_ptr = &main_arena;;) { (void)mutex_unlock(&ar_ptr->mutex); ar_ptr = ar_ptr->next; if(ar_ptr == &main_arena) break; } (void)mutex_unlock(&list_lock);}#ifdef __linux__/* In LinuxThreads, unlocking a mutex in the child process after a fork() is currently unsafe, whereas re-initializing it is safe and does not leak resources. Therefore, a special atfork handler is installed for the child. */static voidptmalloc_unlock_all2 __MALLOC_P((void)){ mstate ar_ptr; if(__malloc_initialized < 1) return;#if defined _LIBC || defined MALLOC_HOOKS tsd_setspecific(arena_key, save_arena); __malloc_hook = save_malloc_hook; __free_hook = save_free_hook;#endif for(ar_ptr = &main_arena;;) { (void)mutex_init(&ar_ptr->mutex); ar_ptr = ar_ptr->next; if(ar_ptr == &main_arena) break; } (void)mutex_init(&list_lock);}#else#define ptmalloc_unlock_all2 ptmalloc_unlock_all#endif#endif /* !defined NO_THREADS *//* Initialization routine. */#ifdef _LIBC#include <string.h>extern char **_environ;static char *internal_functionnext_env_entry (char ***position){ char **current = *position; char *result = NULL; while (*current != NULL) { if (__builtin_expect ((*current)[0] == 'M', 0) && (*current)[1] == 'A' && (*current)[2] == 'L' && (*current)[3] == 'L' && (*current)[4] == 'O' && (*current)[5] == 'C' && (*current)[6] == '_') { result = &(*current)[7]; /* Save current position for next visit. */ *position = ++current; break; } ++current; } return result;}#endif /* _LIBC *//* Set up basic state so that _int_malloc et al can work. */static voidptmalloc_init_minimal __MALLOC_P((void)){#if DEFAULT_TOP_PAD != 0 mp_.top_pad = DEFAULT_TOP_PAD;#endif mp_.n_mmaps_max = DEFAULT_MMAP_MAX; mp_.mmap_threshold = DEFAULT_MMAP_THRESHOLD; mp_.trim_threshold = DEFAULT_TRIM_THRESHOLD; mp_.pagesize = malloc_getpagesize;}#if !(USE_STARTER & 2)static#endifvoidptmalloc_init __MALLOC_P((void)){#if __STD_C const char* s;#else char* s;#endif int secure = 0; if(__malloc_initialized >= 0) return; __malloc_initialized = 0; if (mp_.pagesize == 0) ptmalloc_init_minimal();#ifndef NO_THREADS# if USE_STARTER & 1 /* With some threads implementations, creating thread-specific data or initializing a mutex may call malloc() itself. Provide a simple starter version (realloc() won't work). */ save_malloc_hook = __malloc_hook; save_memalign_hook = __memalign_hook; save_free_hook = __free_hook; __malloc_hook = malloc_starter; __memalign_hook = memalign_starter; __free_hook = free_starter;# ifdef _LIBC /* Initialize the pthreads interface. */ if (__pthread_initialize != NULL) __pthread_initialize();# endif /* !defined _LIBC */# endif /* USE_STARTER & 1 */#endif /* !defined NO_THREADS */ mutex_init(&main_arena.mutex); main_arena.next = &main_arena; mutex_init(&list_lock); tsd_key_create(&arena_key, NULL); tsd_setspecific(arena_key, (Void_t *)&main_arena); thread_atfork(ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2);#ifndef NO_THREADS# if USE_STARTER & 1 __malloc_hook = save_malloc_hook; __memalign_hook = save_memalign_hook; __free_hook = save_free_hook;# endif# if USE_STARTER & 2 __malloc_hook = 0; __memalign_hook = 0; __free_hook = 0;# endif#endif#ifdef _LIBC secure = __libc_enable_secure;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -