📄 arena.c
字号:
s = NULL; if (__builtin_expect (_environ != NULL, 1)) { char **runp = _environ; char *envline; while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL, 0)) { size_t len = strcspn (envline, "="); if (envline[len] != '=') /* This is a "MALLOC_" variable at the end of the string without a '=' character. Ignore it since otherwise we will access invalid memory below. */ continue; switch (len) { case 6: if (memcmp (envline, "CHECK_", 6) == 0) s = &envline[7]; break; case 8: if (! secure && memcmp (envline, "TOP_PAD_", 8) == 0) mALLOPt(M_TOP_PAD, atoi(&envline[9])); break; case 9: if (! secure && memcmp (envline, "MMAP_MAX_", 9) == 0) mALLOPt(M_MMAP_MAX, atoi(&envline[10])); break; case 15: if (! secure) { if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0) mALLOPt(M_TRIM_THRESHOLD, atoi(&envline[16])); else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0) mALLOPt(M_MMAP_THRESHOLD, atoi(&envline[16])); } break; default: break; } } }#else if (! secure) { if((s = getenv("MALLOC_TRIM_THRESHOLD_"))) mALLOPt(M_TRIM_THRESHOLD, atoi(s)); if((s = getenv("MALLOC_TOP_PAD_"))) mALLOPt(M_TOP_PAD, atoi(s)); if((s = getenv("MALLOC_MMAP_THRESHOLD_"))) mALLOPt(M_MMAP_THRESHOLD, atoi(s)); if((s = getenv("MALLOC_MMAP_MAX_"))) mALLOPt(M_MMAP_MAX, atoi(s)); } s = getenv("MALLOC_CHECK_");#endif if(s) { if(s[0]) mALLOPt(M_CHECK_ACTION, (int)(s[0] - '0')); __malloc_check_init(); } if(__malloc_initialize_hook != NULL) (*__malloc_initialize_hook)(); __malloc_initialized = 1;}/* There are platforms (e.g. Hurd) with a link-time hook mechanism. */#ifdef thread_atfork_staticthread_atfork_static(ptmalloc_lock_all, ptmalloc_unlock_all, \ ptmalloc_unlock_all2)#endif/* Managing heaps and arenas (for concurrent threads) */#if USE_ARENAS#if MALLOC_DEBUG > 1/* Print the complete contents of a single heap to stderr. */static void#if __STD_Cdump_heap(heap_info *heap)#elsedump_heap(heap) heap_info *heap;#endif{ char *ptr; mchunkptr p; fprintf(stderr, "Heap %p, size %10lx:\n", heap, (long)heap->size); ptr = (heap->ar_ptr != (mstate)(heap+1)) ? (char*)(heap + 1) : (char*)(heap + 1) + sizeof(struct malloc_state); p = (mchunkptr)(((unsigned long)ptr + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK); for(;;) { fprintf(stderr, "chunk %p size %10lx", p, (long)p->size); if(p == top(heap->ar_ptr)) { fprintf(stderr, " (top)\n"); break; } else if(p->size == (0|PREV_INUSE)) { fprintf(stderr, " (fence)\n"); break; } fprintf(stderr, "\n"); p = next_chunk(p); }}#endif /* MALLOC_DEBUG > 1 *//* Create a new heap. size is automatically rounded up to a multiple of the page size. */static heap_info *internal_function#if __STD_Cnew_heap(size_t size, size_t top_pad)#elsenew_heap(size, top_pad) size_t size, top_pad;#endif{ size_t page_mask = malloc_getpagesize - 1; char *p1, *p2; unsigned long ul; heap_info *h; if(size+top_pad < HEAP_MIN_SIZE) size = HEAP_MIN_SIZE; else if(size+top_pad <= HEAP_MAX_SIZE) size += top_pad; else if(size > HEAP_MAX_SIZE) return 0; else size = HEAP_MAX_SIZE; size = (size + page_mask) & ~page_mask; /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed. No swap space needs to be reserved for the following large mapping (on Linux, this is the case for all non-writable mappings anyway). */ p1 = (char *)MMAP(0, HEAP_MAX_SIZE<<1, PROT_NONE, MAP_PRIVATE|MAP_NORESERVE); if(p1 != MAP_FAILED) { p2 = (char *)(((unsigned long)p1 + (HEAP_MAX_SIZE-1)) & ~(HEAP_MAX_SIZE-1)); ul = p2 - p1; munmap(p1, ul); munmap(p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul); } else { /* Try to take the chance that an allocation of only HEAP_MAX_SIZE is already aligned. */ p2 = (char *)MMAP(0, HEAP_MAX_SIZE, PROT_NONE, MAP_PRIVATE|MAP_NORESERVE); if(p2 == MAP_FAILED) return 0; if((unsigned long)p2 & (HEAP_MAX_SIZE-1)) { munmap(p2, HEAP_MAX_SIZE); return 0; } } if(mprotect(p2, size, PROT_READ|PROT_WRITE) != 0) { munmap(p2, HEAP_MAX_SIZE); return 0; } h = (heap_info *)p2; h->size = size; THREAD_STAT(stat_n_heaps++); return h;}/* Grow or shrink a heap. size is automatically rounded up to a multiple of the page size if it is positive. */static int#if __STD_Cgrow_heap(heap_info *h, long diff)#elsegrow_heap(h, diff) heap_info *h; long diff;#endif{ size_t page_mask = malloc_getpagesize - 1; long new_size; if(diff >= 0) { diff = (diff + page_mask) & ~page_mask; new_size = (long)h->size + diff; if(new_size > HEAP_MAX_SIZE) return -1; if(mprotect((char *)h + h->size, diff, PROT_READ|PROT_WRITE) != 0) return -2; } else { new_size = (long)h->size + diff; if(new_size < (long)sizeof(*h)) return -1; /* Try to re-map the extra heap space freshly to save memory, and make it inaccessible. */ if((char *)MMAP((char *)h + new_size, -diff, PROT_NONE, MAP_PRIVATE|MAP_FIXED) == (char *) MAP_FAILED) return -2; /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/ } h->size = new_size; return 0;}/* Delete a heap. */#define delete_heap(heap) munmap((char*)(heap), HEAP_MAX_SIZE)static intinternal_function#if __STD_Cheap_trim(heap_info *heap, size_t pad)#elseheap_trim(heap, pad) heap_info *heap; size_t pad;#endif{ mstate ar_ptr = heap->ar_ptr; unsigned long pagesz = mp_.pagesize; mchunkptr top_chunk = top(ar_ptr), p, bck, fwd; heap_info *prev_heap; long new_size, top_size, extra; /* Can this heap go away completely? */ while(top_chunk == chunk_at_offset(heap, sizeof(*heap))) { prev_heap = heap->prev; p = chunk_at_offset(prev_heap, prev_heap->size - (MINSIZE-2*SIZE_SZ)); assert(p->size == (0|PREV_INUSE)); /* must be fencepost */ p = prev_chunk(p); new_size = chunksize(p) + (MINSIZE-2*SIZE_SZ); assert(new_size>0 && new_size<(long)(2*MINSIZE)); if(!prev_inuse(p)) new_size += p->prev_size; assert(new_size>0 && new_size<HEAP_MAX_SIZE); if(new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz) break; ar_ptr->system_mem -= heap->size; arena_mem -= heap->size; delete_heap(heap); heap = prev_heap; if(!prev_inuse(p)) { /* consolidate backward */ p = prev_chunk(p); unlink(p, bck, fwd); } assert(((unsigned long)((char*)p + new_size) & (pagesz-1)) == 0); assert( ((char*)p + new_size) == ((char*)heap + heap->size) ); top(ar_ptr) = top_chunk = p; set_head(top_chunk, new_size | PREV_INUSE); /*check_chunk(ar_ptr, top_chunk);*/ } top_size = chunksize(top_chunk); extra = ((top_size - pad - MINSIZE + (pagesz-1))/pagesz - 1) * pagesz; if(extra < (long)pagesz) return 0; /* Try to shrink. */ if(grow_heap(heap, -extra) != 0) return 0; ar_ptr->system_mem -= extra; arena_mem -= extra; /* Success. Adjust top accordingly. */ set_head(top_chunk, (top_size - extra) | PREV_INUSE); /*check_chunk(ar_ptr, top_chunk);*/ return 1;}static mstateinternal_function#if __STD_Carena_get2(mstate a_tsd, size_t size)#elsearena_get2(a_tsd, size) mstate a_tsd; size_t size;#endif{ mstate a; int err; if(!a_tsd) a = a_tsd = &main_arena; else { a = a_tsd->next; if(!a) { /* This can only happen while initializing the new arena. */ (void)mutex_lock(&main_arena.mutex); THREAD_STAT(++(main_arena.stat_lock_wait)); return &main_arena; } } /* Check the global, circularly linked list for available arenas. */ repeat: do { if(!mutex_trylock(&a->mutex)) { THREAD_STAT(++(a->stat_lock_loop)); tsd_setspecific(arena_key, (Void_t *)a); return a; } a = a->next; } while(a != a_tsd); /* If not even the list_lock can be obtained, try again. This can happen during `atfork', or for example on systems where thread creation makes it temporarily impossible to obtain _any_ locks. */ if(mutex_trylock(&list_lock)) { a = a_tsd; goto repeat; } (void)mutex_unlock(&list_lock); /* Nothing immediately available, so generate a new arena. */ a = _int_new_arena(size); if(!a) return 0; tsd_setspecific(arena_key, (Void_t *)a); mutex_init(&a->mutex); err = mutex_lock(&a->mutex); /* remember result */ /* Add the new arena to the global list. */ (void)mutex_lock(&list_lock); a->next = main_arena.next; atomic_write_barrier (); main_arena.next = a; (void)mutex_unlock(&list_lock); if(err) /* locking failed; keep arena for further attempts later */ return 0; THREAD_STAT(++(a->stat_lock_loop)); return a;}/* Create a new arena with initial size "size". */mstate_int_new_arena(size_t size){ mstate a; heap_info *h; char *ptr; unsigned long misalign; h = new_heap(size + (sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT), mp_.top_pad); if(!h) { /* Maybe size is too large to fit in a single heap. So, just try to create a minimally-sized arena and let _int_malloc() attempt to deal with the large request via mmap_chunk(). */ h = new_heap(sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT, mp_.top_pad); if(!h) return 0; } a = h->ar_ptr = (mstate)(h+1); malloc_init_state(a); /*a->next = NULL;*/ a->system_mem = a->max_system_mem = h->size; arena_mem += h->size;#ifdef NO_THREADS if((unsigned long)(mp_.mmapped_mem + arena_mem + main_arena.system_mem) > mp_.max_total_mem) mp_.max_total_mem = mp_.mmapped_mem + arena_mem + main_arena.system_mem;#endif /* Set up the top chunk, with proper alignment. */ ptr = (char *)(a + 1); misalign = (unsigned long)chunk2mem(ptr) & MALLOC_ALIGN_MASK; if (misalign > 0) ptr += MALLOC_ALIGNMENT - misalign; top(a) = (mchunkptr)ptr; set_head(top(a), (((char*)h + h->size) - ptr) | PREV_INUSE); return a;}/* Obtain the arena number n. Needed in malloc_stats. */mstate_int_get_arena (int n){ mstate a = &main_arena; while (n-- != 0) { a = a->next; if (a == &main_arena) return 0; } return a;}#endif /* USE_ARENAS *//* * Local variables: * c-basic-offset: 2 * End: */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -