📄 apr_pools.c
字号:
apr_pool_t *joined; /* the caller has guaranteed that this pool * will survive as long as ->joined */ debug_node_t *nodes; const char *file_line; apr_uint32_t creation_flags; unsigned int stat_alloc; unsigned int stat_total_alloc; unsigned int stat_clear;#if APR_HAS_THREADS apr_os_thread_t owner; apr_thread_mutex_t *mutex;#endif /* APR_HAS_THREADS */#endif /* APR_POOL_DEBUG */#ifdef NETWARE apr_os_proc_t owner_proc;#endif /* defined(NETWARE) */ cleanup_t *pre_cleanups; cleanup_t *free_pre_cleanups;};#define SIZEOF_POOL_T APR_ALIGN_DEFAULT(sizeof(apr_pool_t))/* * Variables */static apr_byte_t apr_pools_initialized = 0;static apr_pool_t *global_pool = NULL;#if !APR_POOL_DEBUGstatic apr_allocator_t *global_allocator = NULL;#endif /* !APR_POOL_DEBUG */#if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL)static apr_file_t *file_stderr = NULL;#endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL) *//* * Local functions */static void run_cleanups(cleanup_t **c);static void run_child_cleanups(cleanup_t **c);static void free_proc_chain(struct process_chain *procs);#if APR_POOL_DEBUGstatic void pool_destroy_debug(apr_pool_t *pool, const char *file_line);#endif#if !APR_POOL_DEBUG/* * Initialization */APR_DECLARE(apr_status_t) apr_pool_initialize(void){ apr_status_t rv; if (apr_pools_initialized++) return APR_SUCCESS; if ((rv = apr_allocator_create(&global_allocator)) != APR_SUCCESS) { apr_pools_initialized = 0; return rv; } if ((rv = apr_pool_create_ex(&global_pool, NULL, NULL, global_allocator)) != APR_SUCCESS) { apr_allocator_destroy(global_allocator); global_allocator = NULL; apr_pools_initialized = 0; return rv; } apr_pool_tag(global_pool, "apr_global_pool"); /* This has to happen here because mutexes might be backed by * atomics. It used to be snug and safe in apr_initialize(). * * Warning: apr_atomic_init() must always be called, by any * means possible, from apr_initialize(). */ if ((rv = apr_atomic_init(global_pool)) != APR_SUCCESS) { return rv; }#if APR_HAS_THREADS { apr_thread_mutex_t *mutex; if ((rv = apr_thread_mutex_create(&mutex, APR_THREAD_MUTEX_DEFAULT, global_pool)) != APR_SUCCESS) { return rv; } apr_allocator_mutex_set(global_allocator, mutex); }#endif /* APR_HAS_THREADS */ apr_allocator_owner_set(global_allocator, global_pool); return APR_SUCCESS;}APR_DECLARE(void) apr_pool_terminate(void){ if (!apr_pools_initialized) return; if (--apr_pools_initialized) return; apr_pool_destroy(global_pool); /* This will also destroy the mutex */ global_pool = NULL; global_allocator = NULL;}/* Node list management helper macros; list_insert() inserts 'node' * before 'point'. */#define list_insert(node, point) do { \ node->ref = point->ref; \ *node->ref = node; \ node->next = point; \ point->ref = &node->next; \} while (0)/* list_remove() removes 'node' from its list. */#define list_remove(node) do { \ *node->ref = node->next; \ node->next->ref = node->ref; \} while (0)/* Returns the amount of free space in the given node. */#define node_free_space(node_) ((apr_size_t)(node_->endp - node_->first_avail))/* * Memory allocation */APR_DECLARE(void *) apr_palloc(apr_pool_t *pool, apr_size_t size){ apr_memnode_t *active, *node; void *mem; apr_size_t free_index; size = APR_ALIGN_DEFAULT(size); active = pool->active; /* If the active node has enough bytes left, use it. */ if (size <= node_free_space(active)) { mem = active->first_avail; active->first_avail += size; return mem; } node = active->next; if (size <= node_free_space(node)) { list_remove(node); } else { if ((node = allocator_alloc(pool->allocator, size)) == NULL) { if (pool->abort_fn) pool->abort_fn(APR_ENOMEM); return NULL; } } node->free_index = 0; mem = node->first_avail; node->first_avail += size; list_insert(node, active); pool->active = node; free_index = (APR_ALIGN(active->endp - active->first_avail + 1, BOUNDARY_SIZE) - BOUNDARY_SIZE) >> BOUNDARY_INDEX; active->free_index = (APR_UINT32_TRUNC_CAST)free_index; node = active->next; if (free_index >= node->free_index) return mem; do { node = node->next; } while (free_index < node->free_index); list_remove(active); list_insert(active, node); return mem;}/* Provide an implementation of apr_pcalloc for backward compatibility * with code built before apr_pcalloc was a macro */#ifdef apr_pcalloc#undef apr_pcalloc#endifAPR_DECLARE(void *) apr_pcalloc(apr_pool_t *pool, apr_size_t size);APR_DECLARE(void *) apr_pcalloc(apr_pool_t *pool, apr_size_t size){ void *mem; size = APR_ALIGN_DEFAULT(size); if ((mem = apr_palloc(pool, size)) != NULL) { memset(mem, 0, size); } return mem;}/* * Pool creation/destruction */APR_DECLARE(void) apr_pool_clear(apr_pool_t *pool){ apr_memnode_t *active; /* Run pre destroy cleanups */ run_cleanups(&pool->pre_cleanups); pool->pre_cleanups = NULL; pool->free_pre_cleanups = NULL; /* Destroy the subpools. The subpools will detach themselves from * this pool thus this loop is safe and easy. */ while (pool->child) apr_pool_destroy(pool->child); /* Run cleanups */ run_cleanups(&pool->cleanups); pool->cleanups = NULL; pool->free_cleanups = NULL; /* Free subprocesses */ free_proc_chain(pool->subprocesses); pool->subprocesses = NULL; /* Clear the user data. */ pool->user_data = NULL; /* Find the node attached to the pool structure, reset it, make * it the active node and free the rest of the nodes. */ active = pool->active = pool->self; active->first_avail = pool->self_first_avail; if (active->next == active) return; *active->ref = NULL; allocator_free(pool->allocator, active->next); active->next = active; active->ref = &active->next;}APR_DECLARE(void) apr_pool_destroy(apr_pool_t *pool){ apr_memnode_t *active; apr_allocator_t *allocator; /* Run pre destroy cleanups */ run_cleanups(&pool->pre_cleanups); pool->pre_cleanups = NULL; pool->free_pre_cleanups = NULL; /* Destroy the subpools. The subpools will detach themselve from * this pool thus this loop is safe and easy. */ while (pool->child) apr_pool_destroy(pool->child); /* Run cleanups */ run_cleanups(&pool->cleanups); /* Free subprocesses */ free_proc_chain(pool->subprocesses); /* Remove the pool from the parents child list */ if (pool->parent) {#if APR_HAS_THREADS apr_thread_mutex_t *mutex; if ((mutex = apr_allocator_mutex_get(pool->parent->allocator)) != NULL) apr_thread_mutex_lock(mutex);#endif /* APR_HAS_THREADS */ if ((*pool->ref = pool->sibling) != NULL) pool->sibling->ref = pool->ref;#if APR_HAS_THREADS if (mutex) apr_thread_mutex_unlock(mutex);#endif /* APR_HAS_THREADS */ } /* Find the block attached to the pool structure. Save a copy of the * allocator pointer, because the pool struct soon will be no more. */ allocator = pool->allocator; active = pool->self; *active->ref = NULL;#if APR_HAS_THREADS if (apr_allocator_owner_get(allocator) == pool) { /* Make sure to remove the lock, since it is highly likely to * be invalid now. */ apr_allocator_mutex_set(allocator, NULL); }#endif /* APR_HAS_THREADS */ /* Free all the nodes in the pool (including the node holding the * pool struct), by giving them back to the allocator. */ allocator_free(allocator, active); /* If this pool happens to be the owner of the allocator, free * everything in the allocator (that includes the pool struct * and the allocator). Don't worry about destroying the optional mutex * in the allocator, it will have been destroyed by the cleanup function. */ if (apr_allocator_owner_get(allocator) == pool) { apr_allocator_destroy(allocator); }}APR_DECLARE(apr_status_t) apr_pool_create_ex(apr_pool_t **newpool, apr_pool_t *parent, apr_abortfunc_t abort_fn, apr_allocator_t *allocator){ apr_pool_t *pool; apr_memnode_t *node; *newpool = NULL; if (!parent) parent = global_pool; /* parent will always be non-NULL here except the first time a * pool is created, in which case allocator is guaranteed to be * non-NULL. */ if (!abort_fn && parent) abort_fn = parent->abort_fn; if (allocator == NULL) allocator = parent->allocator; if ((node = allocator_alloc(allocator, MIN_ALLOC - APR_MEMNODE_T_SIZE)) == NULL) { if (abort_fn) abort_fn(APR_ENOMEM); return APR_ENOMEM; } node->next = node; node->ref = &node->next; pool = (apr_pool_t *)node->first_avail; node->first_avail = pool->self_first_avail = (char *)pool + SIZEOF_POOL_T; pool->allocator = allocator; pool->active = pool->self = node; pool->abort_fn = abort_fn; pool->child = NULL; pool->cleanups = NULL; pool->free_cleanups = NULL; pool->pre_cleanups = NULL; pool->free_pre_cleanups = NULL; pool->subprocesses = NULL; pool->user_data = NULL; pool->tag = NULL;#ifdef NETWARE pool->owner_proc = (apr_os_proc_t)getnlmhandle();#endif /* defined(NETWARE) */ if ((pool->parent = parent) != NULL) {#if APR_HAS_THREADS apr_thread_mutex_t *mutex; if ((mutex = apr_allocator_mutex_get(parent->allocator)) != NULL) apr_thread_mutex_lock(mutex);#endif /* APR_HAS_THREADS */ if ((pool->sibling = parent->child) != NULL) pool->sibling->ref = &pool->sibling; parent->child = pool; pool->ref = &parent->child;#if APR_HAS_THREADS if (mutex) apr_thread_mutex_unlock(mutex);#endif /* APR_HAS_THREADS */ } else { pool->sibling = NULL; pool->ref = NULL; } *newpool = pool; return APR_SUCCESS;}/* Deprecated. Renamed to apr_pool_create_unmanaged_ex */APR_DECLARE(apr_status_t) apr_pool_create_core_ex(apr_pool_t **newpool, apr_abortfunc_t abort_fn, apr_allocator_t *allocator){ return apr_pool_create_unmanaged_ex(newpool, abort_fn, allocator);}APR_DECLARE(apr_status_t) apr_pool_create_unmanaged_ex(apr_pool_t **newpool, apr_abortfunc_t abort_fn, apr_allocator_t *allocator){ apr_pool_t *pool; apr_memnode_t *node; apr_allocator_t *pool_allocator; *newpool = NULL; if (!apr_pools_initialized) return APR_ENOPOOL; if ((pool_allocator = allocator) == NULL) { if ((pool_allocator = malloc(SIZEOF_ALLOCATOR_T)) == NULL) { if (abort_fn) abort_fn(APR_ENOMEM); return APR_ENOMEM; } memset(pool_allocator, 0, SIZEOF_ALLOCATOR_T); pool_allocator->max_free_index = APR_ALLOCATOR_MAX_FREE_UNLIMITED; } if ((node = allocator_alloc(pool_allocator, MIN_ALLOC - APR_MEMNODE_T_SIZE)) == NULL) { if (abort_fn) abort_fn(APR_ENOMEM); return APR_ENOMEM; } node->next = node; node->ref = &node->next; pool = (apr_pool_t *)node->first_avail; node->first_avail = pool->self_first_avail = (char *)pool + SIZEOF_POOL_T; pool->allocator = pool_allocator; pool->active = pool->self = node; pool->abort_fn = abort_fn; pool->child = NULL; pool->cleanups = NULL; pool->free_cleanups = NULL; pool->pre_cleanups = NULL; pool->free_pre_cleanups = NULL; pool->subprocesses = NULL; pool->user_data = NULL; pool->tag = NULL; pool->parent = NULL; pool->sibling = NULL; pool->ref = NULL;#ifdef NETWARE pool->owner_proc = (apr_os_proc_t)getnlmhandle();#endif /* defined(NETWARE) */ if (!allocator) pool_allocator->owner = pool;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -