📄 apr_pools.c
字号:
size += blok->h.endp - (char *) (blok + 1); blok = blok->h.next; } return size;}/***************************************************************** * * Pool internals and management... * NB that subprocesses are not handled by the generic cleanup code, * basically because we don't want cleanups for multiple subprocesses * to result in multiple three-second pauses. */struct process_chain;struct cleanup;static void run_cleanups(struct cleanup *c);static void free_proc_chain(struct process_chain *p);static apr_pool_t *permanent_pool;/* Each pool structure is allocated in the start of its own first block, * so we need to know how many bytes that is (once properly aligned...). * This also means that when a pool's sub-pool is destroyed, the storage * associated with it is *completely* gone, so we have to make sure it * gets taken off the parent's sub-pool list... */#define POOL_HDR_CLICKS (1 + ((sizeof(struct apr_pool_t) - 1) / CLICK_SZ))#define POOL_HDR_BYTES (POOL_HDR_CLICKS * CLICK_SZ)APR_DECLARE(apr_pool_t *) apr_pool_sub_make(apr_pool_t *p, int (*apr_abort)(int retcode)){ union block_hdr *blok; apr_pool_t *new_pool;#if APR_HAS_THREADS if (alloc_mutex) { apr_lock_acquire(alloc_mutex); }#endif blok = new_block(POOL_HDR_BYTES, apr_abort); new_pool = (apr_pool_t *) blok->h.first_avail; blok->h.first_avail += POOL_HDR_BYTES;#ifdef APR_POOL_DEBUG blok->h.owning_pool = new_pool;#endif memset((char *) new_pool, '\0', sizeof(struct apr_pool_t)); new_pool->free_first_avail = blok->h.first_avail; new_pool->first = new_pool->last = blok; if (p) { new_pool->parent = p; new_pool->sub_next = p->sub_pools; if (new_pool->sub_next) { new_pool->sub_next->sub_prev = new_pool; } p->sub_pools = new_pool; }#if APR_HAS_THREADS if (alloc_mutex) { apr_lock_release(alloc_mutex); }#endif return new_pool;}#ifdef APR_POOL_DEBUGstatic void stack_var_init(char *s){ char t; if (s < &t) { stack_direction = 1; /* stack grows up */ } else { stack_direction = -1; /* stack grows down */ }}#endif#ifdef ALLOC_STATSstatic void dump_stats(void){ fprintf(stderr, "alloc_stats: [%d] #free_blocks %llu #blocks %llu max " "%u #malloc %u #bytes %u\n", (int) getpid(), num_free_blocks_calls, num_blocks_freed, max_blocks_in_one_free, num_malloc_calls, num_malloc_bytes);}#endif/* ### why do we have this, in addition to apr_pool_sub_make? */APR_DECLARE(apr_status_t) apr_pool_create(apr_pool_t **newcont, apr_pool_t *cont){ apr_pool_t *newpool; if (cont) { newpool = apr_pool_sub_make(cont, cont->apr_abort); } else { newpool = apr_pool_sub_make(NULL, NULL); } if (newpool == NULL) { return APR_ENOPOOL; } newpool->prog_data = NULL; if (cont) { newpool->apr_abort = cont->apr_abort; } else { newpool->apr_abort = NULL; } *newcont = newpool; return APR_SUCCESS;}/***************************************************************** * * Managing generic cleanups. */struct cleanup { const void *data; apr_status_t (*plain_cleanup) (void *); apr_status_t (*child_cleanup) (void *); struct cleanup *next;};APR_DECLARE(void) apr_pool_cleanup_register(apr_pool_t *p, const void *data, apr_status_t (*plain_cleanup) (void *), apr_status_t (*child_cleanup) (void *)){ struct cleanup *c; if (p != NULL) { c = (struct cleanup *) apr_palloc(p, sizeof(struct cleanup)); c->data = data; c->plain_cleanup = plain_cleanup; c->child_cleanup = child_cleanup; c->next = p->cleanups; p->cleanups = c; }}APR_DECLARE(void) apr_pool_cleanup_kill(apr_pool_t *p, const void *data, apr_status_t (*cleanup) (void *)){ struct cleanup *c; struct cleanup **lastp; if (p == NULL) return; c = p->cleanups; lastp = &p->cleanups; while (c) { if (c->data == data && c->plain_cleanup == cleanup) { *lastp = c->next; break; } lastp = &c->next; c = c->next; }}APR_DECLARE(apr_status_t) apr_pool_cleanup_run(apr_pool_t *p, void *data, apr_status_t (*cleanup) (void *)){ apr_pool_cleanup_kill(p, data, cleanup); return (*cleanup) (data);}static void run_cleanups(struct cleanup *c){ while (c) { (*c->plain_cleanup) ((void *)c->data); c = c->next; }}static void run_child_cleanups(struct cleanup *c){ while (c) { (*c->child_cleanup) ((void *)c->data); c = c->next; }}static void cleanup_pool_for_exec(apr_pool_t *p){ run_child_cleanups(p->cleanups); p->cleanups = NULL; for (p = p->sub_pools; p; p = p->sub_next) { cleanup_pool_for_exec(p); }}APR_DECLARE(void) apr_pool_cleanup_for_exec(void){#if !defined(WIN32) && !defined(OS2) /* * Don't need to do anything on NT or OS/2, because I * am actually going to spawn the new process - not * exec it. All handles that are not inheritable, will * be automajically closed. The only problem is with * file handles that are open, but there isn't much * I can do about that (except if the child decides * to go out and close them */ cleanup_pool_for_exec(permanent_pool);#endif /* ndef WIN32 */}APR_DECLARE_NONSTD(apr_status_t) apr_pool_cleanup_null(void *data){ /* do nothing cleanup routine */ return APR_SUCCESS;}APR_DECLARE(apr_status_t) apr_pool_alloc_init(apr_pool_t *globalp){#if APR_HAS_THREADS apr_status_t status;#endif#ifdef APR_POOL_DEBUG char s; known_stack_point = &s; stack_var_init(&s);#endif#if APR_HAS_THREADS status = apr_lock_create(&alloc_mutex, APR_MUTEX, APR_INTRAPROCESS, NULL, globalp); if (status != APR_SUCCESS) { apr_lock_destroy(alloc_mutex); return status; } status = apr_lock_create(&spawn_mutex, APR_MUTEX, APR_INTRAPROCESS, NULL, globalp); if (status != APR_SUCCESS) { apr_lock_destroy(spawn_mutex); return status; }#endif permanent_pool = apr_pool_sub_make(globalp, NULL);#ifdef ALLOC_STATS atexit(dump_stats);#endif return APR_SUCCESS;}APR_DECLARE(void) apr_pool_alloc_term(apr_pool_t *globalp){#if APR_HAS_THREADS apr_lock_destroy(alloc_mutex); apr_lock_destroy(spawn_mutex); alloc_mutex = NULL; spawn_mutex = NULL;#endif apr_pool_destroy(globalp);}/* We only want to lock the mutex if we are being called from apr_pool_clear. * This is because if we also call this function from apr_destroy_real_pool, * which also locks the same mutex, and recursive locks aren't portable. * This way, we are garaunteed that we only lock this mutex once when calling * either one of these functions. */APR_DECLARE(void) apr_pool_clear(apr_pool_t *a){ while (a->sub_pools) { apr_pool_destroy(a->sub_pools); } /* * Don't hold the mutex during cleanups. */ run_cleanups(a->cleanups); a->cleanups = NULL; free_proc_chain(a->subprocesses); a->subprocesses = NULL; free_blocks(a->first->h.next); a->first->h.next = NULL; a->prog_data = NULL; a->last = a->first; a->first->h.first_avail = a->free_first_avail; debug_fill(a->first->h.first_avail, a->first->h.endp - a->first->h.first_avail);#ifdef ALLOC_USE_MALLOC { void *c, *n; for (c = a->allocation_list; c; c = n) { n = *(void **)c; free(c); } a->allocation_list = NULL; }#endif}APR_DECLARE(void) apr_pool_destroy(apr_pool_t *a){ apr_pool_clear(a);#if APR_HAS_THREADS if (alloc_mutex) { apr_lock_acquire(alloc_mutex); }#endif if (a->parent) { if (a->parent->sub_pools == a) { a->parent->sub_pools = a->sub_next; } if (a->sub_prev) { a->sub_prev->sub_next = a->sub_next; } if (a->sub_next) { a->sub_next->sub_prev = a->sub_prev; } }#if APR_HAS_THREADS if (alloc_mutex) { apr_lock_release(alloc_mutex); }#endif free_blocks(a->first);}APR_DECLARE(apr_size_t) apr_pool_num_bytes(apr_pool_t *p){ return bytes_in_block_list(p->first);}APR_DECLARE(apr_size_t) apr_pool_free_blocks_num_bytes(void){ return bytes_in_block_list(block_freelist);}/***************************************************************** * APR_POOL_DEBUG support */#ifdef APR_POOL_DEBUG/* the unix linker defines this symbol as the last byte + 1 of * the executable... so it includes TEXT, BSS, and DATA */extern char _end;/* is ptr in the range [lo,hi) */#define is_ptr_in_range(ptr, lo, hi) \ (((unsigned long)(ptr) - (unsigned long)(lo)) \ < (unsigned long)(hi) - (unsigned long)(lo))/* Find the pool that ts belongs to, return NULL if it doesn't * belong to any pool. */APR_DECLARE(apr_pool_t *) apr_find_pool(const void *ts){ const char *s = ts; union block_hdr **pb; union block_hdr *b; /* short-circuit stuff which is in TEXT, BSS, or DATA */ if (is_ptr_in_range(s, 0, &_end)) { return NULL; } /* consider stuff on the stack to also be in the NULL pool... * XXX: there's cases where we don't want to assume this */ if ((stack_direction == -1 && is_ptr_in_range(s, &ts, known_stack_point)) || (stack_direction == 1 && is_ptr_in_range(s, known_stack_point, &ts))) { abort(); return NULL; } /* search the global_block_list */ for (pb = &global_block_list; *pb; pb = &b->h.global_next) { b = *pb; if (is_ptr_in_range(s, b, b->h.endp)) { if (b->h.owning_pool == FREE_POOL) { fprintf(stderr, "Ouch! find_pool() called on pointer in a free block\n"); abort(); exit(1); } if (b != global_block_list) { /* * promote b to front of list, this is a hack to speed * up the lookup */ *pb = b->h.global_next; b->h.global_next = global_block_list; global_block_list = b; } return b->h.owning_pool; } } return NULL;}/* return TRUE iff a is an ancestor of b * NULL is considered an ancestor of all pools */APR_DECLARE(int) apr_pool_is_ancestor(apr_pool_t *a, apr_pool_t *b){ if (a == NULL) { return 1; } while (a->joined) { a = a->joined; } while (b) { if (a == b) { return 1; }
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -