⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 apr_pools.c

📁 log4cxx 0.10 unix下编译包
💻 C
📖 第 1 页 / 共 4 页
字号:
/* Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements.  See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License.  You may obtain a copy of the License at * *     http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */#include "apr.h"#include "apr_private.h"#include "apr_atomic.h"#include "apr_portable.h" /* for get_os_proc */#include "apr_strings.h"#include "apr_general.h"#include "apr_pools.h"#include "apr_allocator.h"#include "apr_lib.h"#include "apr_thread_mutex.h"#include "apr_hash.h"#include "apr_time.h"#define APR_WANT_MEMFUNC#include "apr_want.h"#include "apr_env.h"#if APR_HAVE_STDLIB_H#include <stdlib.h>     /* for malloc, free and abort */#endif#if APR_HAVE_UNISTD_H#include <unistd.h>     /* for getpid */#endif/* * Magic numbers */#define MIN_ALLOC 8192#define MAX_INDEX   20#define BOUNDARY_INDEX 12#define BOUNDARY_SIZE (1 << BOUNDARY_INDEX)/*  * Timing constants for killing subprocesses * There is a total 3-second delay between sending a SIGINT  * and sending of the final SIGKILL. * TIMEOUT_INTERVAL should be set to TIMEOUT_USECS / 64 * for the exponetial timeout alogrithm. */#define TIMEOUT_USECS    3000000#define TIMEOUT_INTERVAL   46875/* * Allocator */struct apr_allocator_t {    apr_uint32_t        max_index;    apr_uint32_t        max_free_index;    apr_uint32_t        current_free_index;#if APR_HAS_THREADS    apr_thread_mutex_t *mutex;#endif /* APR_HAS_THREADS */    apr_pool_t         *owner;    apr_memnode_t      *free[MAX_INDEX];};#define SIZEOF_ALLOCATOR_T  APR_ALIGN_DEFAULT(sizeof(apr_allocator_t))/* * Allocator */APR_DECLARE(apr_status_t) apr_allocator_create(apr_allocator_t **allocator){    apr_allocator_t *new_allocator;    *allocator = NULL;    if ((new_allocator = malloc(SIZEOF_ALLOCATOR_T)) == NULL)        return APR_ENOMEM;    memset(new_allocator, 0, SIZEOF_ALLOCATOR_T);    new_allocator->max_free_index = APR_ALLOCATOR_MAX_FREE_UNLIMITED;    *allocator = new_allocator;    return APR_SUCCESS;}APR_DECLARE(void) apr_allocator_destroy(apr_allocator_t *allocator){    apr_uint32_t index;    apr_memnode_t *node, **ref;    for (index = 0; index < MAX_INDEX; index++) {        ref = &allocator->free[index];        while ((node = *ref) != NULL) {            *ref = node->next;            free(node);        }    }    free(allocator);}#if APR_HAS_THREADSAPR_DECLARE(void) apr_allocator_mutex_set(apr_allocator_t *allocator,                                          apr_thread_mutex_t *mutex){    allocator->mutex = mutex;}APR_DECLARE(apr_thread_mutex_t *) apr_allocator_mutex_get(                                      apr_allocator_t *allocator){    return allocator->mutex;}#endif /* APR_HAS_THREADS */APR_DECLARE(void) apr_allocator_owner_set(apr_allocator_t *allocator,                                          apr_pool_t *pool){    allocator->owner = pool;}APR_DECLARE(apr_pool_t *) apr_allocator_owner_get(apr_allocator_t *allocator){    return allocator->owner;}APR_DECLARE(void) apr_allocator_max_free_set(apr_allocator_t *allocator,                                             apr_size_t in_size){    apr_uint32_t max_free_index;    apr_uint32_t size = (APR_UINT32_TRUNC_CAST)in_size;#if APR_HAS_THREADS    apr_thread_mutex_t *mutex;    mutex = apr_allocator_mutex_get(allocator);    if (mutex != NULL)        apr_thread_mutex_lock(mutex);#endif /* APR_HAS_THREADS */    max_free_index = APR_ALIGN(size, BOUNDARY_SIZE) >> BOUNDARY_INDEX;    allocator->current_free_index += max_free_index;    allocator->current_free_index -= allocator->max_free_index;    allocator->max_free_index = max_free_index;    if (allocator->current_free_index > max_free_index)        allocator->current_free_index = max_free_index;#if APR_HAS_THREADS    if (mutex != NULL)        apr_thread_mutex_unlock(mutex);#endif}static APR_INLINEapr_memnode_t *allocator_alloc(apr_allocator_t *allocator, apr_size_t size){    apr_memnode_t *node, **ref;    apr_uint32_t max_index;    apr_size_t i, index;    /* Round up the block size to the next boundary, but always     * allocate at least a certain size (MIN_ALLOC).     */    size = APR_ALIGN(size + APR_MEMNODE_T_SIZE, BOUNDARY_SIZE);    if (size < MIN_ALLOC)        size = MIN_ALLOC;    /* Find the index for this node size by     * dividing its size by the boundary size     */    index = (size >> BOUNDARY_INDEX) - 1;        if (index > APR_UINT32_MAX) {        return NULL;    }    /* First see if there are any nodes in the area we know     * our node will fit into.     */    if (index <= allocator->max_index) {#if APR_HAS_THREADS        if (allocator->mutex)            apr_thread_mutex_lock(allocator->mutex);#endif /* APR_HAS_THREADS */        /* Walk the free list to see if there are         * any nodes on it of the requested size         *         * NOTE: an optimization would be to check         * allocator->free[index] first and if no         * node is present, directly use         * allocator->free[max_index].  This seems         * like overkill though and could cause         * memory waste.         */        max_index = allocator->max_index;        ref = &allocator->free[index];        i = index;        while (*ref == NULL && i < max_index) {           ref++;           i++;        }        if ((node = *ref) != NULL) {            /* If we have found a node and it doesn't have any             * nodes waiting in line behind it _and_ we are on             * the highest available index, find the new highest             * available index             */            if ((*ref = node->next) == NULL && i >= max_index) {                do {                    ref--;                    max_index--;                }                while (*ref == NULL && max_index > 0);                allocator->max_index = max_index;            }            allocator->current_free_index += node->index;            if (allocator->current_free_index > allocator->max_free_index)                allocator->current_free_index = allocator->max_free_index;#if APR_HAS_THREADS            if (allocator->mutex)                apr_thread_mutex_unlock(allocator->mutex);#endif /* APR_HAS_THREADS */            node->next = NULL;            node->first_avail = (char *)node + APR_MEMNODE_T_SIZE;            return node;        }#if APR_HAS_THREADS        if (allocator->mutex)            apr_thread_mutex_unlock(allocator->mutex);#endif /* APR_HAS_THREADS */    }    /* If we found nothing, seek the sink (at index 0), if     * it is not empty.     */    else if (allocator->free[0]) {#if APR_HAS_THREADS        if (allocator->mutex)            apr_thread_mutex_lock(allocator->mutex);#endif /* APR_HAS_THREADS */        /* Walk the free list to see if there are         * any nodes on it of the requested size         */        ref = &allocator->free[0];        while ((node = *ref) != NULL && index > node->index)            ref = &node->next;        if (node) {            *ref = node->next;            allocator->current_free_index += node->index;            if (allocator->current_free_index > allocator->max_free_index)                allocator->current_free_index = allocator->max_free_index;#if APR_HAS_THREADS            if (allocator->mutex)                apr_thread_mutex_unlock(allocator->mutex);#endif /* APR_HAS_THREADS */            node->next = NULL;            node->first_avail = (char *)node + APR_MEMNODE_T_SIZE;            return node;        }#if APR_HAS_THREADS        if (allocator->mutex)            apr_thread_mutex_unlock(allocator->mutex);#endif /* APR_HAS_THREADS */    }    /* If we haven't got a suitable node, malloc a new one     * and initialize it.     */    if ((node = malloc(size)) == NULL)        return NULL;    node->next = NULL;    node->index = (APR_UINT32_TRUNC_CAST)index;    node->first_avail = (char *)node + APR_MEMNODE_T_SIZE;    node->endp = (char *)node + size;    return node;}static APR_INLINEvoid allocator_free(apr_allocator_t *allocator, apr_memnode_t *node){    apr_memnode_t *next, *freelist = NULL;    apr_uint32_t index, max_index;    apr_uint32_t max_free_index, current_free_index;#if APR_HAS_THREADS    if (allocator->mutex)        apr_thread_mutex_lock(allocator->mutex);#endif /* APR_HAS_THREADS */    max_index = allocator->max_index;    max_free_index = allocator->max_free_index;    current_free_index = allocator->current_free_index;    /* Walk the list of submitted nodes and free them one by one,     * shoving them in the right 'size' buckets as we go.     */    do {        next = node->next;        index = node->index;        if (max_free_index != APR_ALLOCATOR_MAX_FREE_UNLIMITED            && index > current_free_index) {            node->next = freelist;            freelist = node;        }        else if (index < MAX_INDEX) {            /* Add the node to the appropiate 'size' bucket.  Adjust             * the max_index when appropiate.             */            if ((node->next = allocator->free[index]) == NULL                && index > max_index) {                max_index = index;            }            allocator->free[index] = node;            current_free_index -= index;        }        else {            /* This node is too large to keep in a specific size bucket,             * just add it to the sink (at index 0).             */            node->next = allocator->free[0];            allocator->free[0] = node;            current_free_index -= index;        }    } while ((node = next) != NULL);    allocator->max_index = max_index;    allocator->current_free_index = current_free_index;#if APR_HAS_THREADS    if (allocator->mutex)        apr_thread_mutex_unlock(allocator->mutex);#endif /* APR_HAS_THREADS */    while (freelist != NULL) {        node = freelist;        freelist = node->next;        free(node);    }}APR_DECLARE(apr_memnode_t *) apr_allocator_alloc(apr_allocator_t *allocator,                                                 apr_size_t size){    return allocator_alloc(allocator, size);}APR_DECLARE(void) apr_allocator_free(apr_allocator_t *allocator,                                     apr_memnode_t *node){    allocator_free(allocator, node);}/* * Debug level */#define APR_POOL_DEBUG_GENERAL  0x01#define APR_POOL_DEBUG_VERBOSE  0x02#define APR_POOL_DEBUG_LIFETIME 0x04#define APR_POOL_DEBUG_OWNER    0x08#define APR_POOL_DEBUG_VERBOSE_ALLOC 0x10#define APR_POOL_DEBUG_VERBOSE_ALL (APR_POOL_DEBUG_VERBOSE \                                    | APR_POOL_DEBUG_VERBOSE_ALLOC)/* * Structures */typedef struct cleanup_t cleanup_t;/** A list of processes */struct process_chain {    /** The process ID */    apr_proc_t *proc;    apr_kill_conditions_e kill_how;    /** The next process in the list */    struct process_chain *next;};#if APR_POOL_DEBUGtypedef struct debug_node_t debug_node_t;struct debug_node_t {    debug_node_t *next;    apr_uint32_t  index;    void         *beginp[64];    void         *endp[64];};#define SIZEOF_DEBUG_NODE_T APR_ALIGN_DEFAULT(sizeof(debug_node_t))#endif /* APR_POOL_DEBUG *//* The ref field in the apr_pool_t struct holds a * pointer to the pointer referencing this pool. * It is used for parent, child, sibling management. * Look at apr_pool_create_ex() and apr_pool_destroy() * to see how it is used. */struct apr_pool_t {    apr_pool_t           *parent;    apr_pool_t           *child;    apr_pool_t           *sibling;    apr_pool_t          **ref;    cleanup_t            *cleanups;    cleanup_t            *free_cleanups;    apr_allocator_t      *allocator;    struct process_chain *subprocesses;    apr_abortfunc_t       abort_fn;    apr_hash_t           *user_data;    const char           *tag;#if !APR_POOL_DEBUG    apr_memnode_t        *active;    apr_memnode_t        *self; /* The node containing the pool itself */    char                 *self_first_avail;#else /* APR_POOL_DEBUG */    apr_pool_t           *joined; /* the caller has guaranteed that this pool                                   * will survive as long as ->joined */    debug_node_t         *nodes;    const char           *file_line;    apr_uint32_t          creation_flags;    unsigned int          stat_alloc;    unsigned int          stat_total_alloc;    unsigned int          stat_clear;#if APR_HAS_THREADS    apr_os_thread_t       owner;    apr_thread_mutex_t   *mutex;#endif /* APR_HAS_THREADS */#endif /* APR_POOL_DEBUG */#ifdef NETWARE    apr_os_proc_t         owner_proc;#endif /* defined(NETWARE) */};#define SIZEOF_POOL_T       APR_ALIGN_DEFAULT(sizeof(apr_pool_t))/* * Variables */static apr_byte_t   apr_pools_initialized = 0;static apr_pool_t  *global_pool = NULL;#if !APR_POOL_DEBUGstatic apr_allocator_t *global_allocator = NULL;#endif /* !APR_POOL_DEBUG */#if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL)static apr_file_t *file_stderr = NULL;#endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL) *//* * Local functions */static void run_cleanups(cleanup_t **c);static void run_child_cleanups(cleanup_t **c);static void free_proc_chain(struct process_chain *procs);#if APR_POOL_DEBUGstatic void pool_destroy_debug(apr_pool_t *pool, const char *file_line);#endif#if !APR_POOL_DEBUG/* * Initialization */APR_DECLARE(apr_status_t) apr_pool_initialize(void){    apr_status_t rv;    if (apr_pools_initialized++)        return APR_SUCCESS;    if ((rv = apr_allocator_create(&global_allocator)) != APR_SUCCESS) {        apr_pools_initialized = 0;        return rv;    }    if ((rv = apr_pool_create_ex(&global_pool, NULL, NULL,                                 global_allocator)) != APR_SUCCESS) {        apr_allocator_destroy(global_allocator);        global_allocator = NULL;        apr_pools_initialized = 0;        return rv;    }    apr_pool_tag(global_pool, "apr_global_pool");    /* This has to happen here because mutexes might be backed by     * atomics.  It used to be snug and safe in apr_initialize().     */    if ((rv = apr_atomic_init(global_pool)) != APR_SUCCESS) {        return rv;    }#if APR_HAS_THREADS    {        apr_thread_mutex_t *mutex;        if ((rv = apr_thread_mutex_create(&mutex,                                          APR_THREAD_MUTEX_DEFAULT,                                          global_pool)) != APR_SUCCESS) {            return rv;        }        apr_allocator_mutex_set(global_allocator, mutex);    }#endif /* APR_HAS_THREADS */    apr_allocator_owner_set(global_allocator, global_pool);    return APR_SUCCESS;}APR_DECLARE(void) apr_pool_terminate(void){    if (!apr_pools_initialized)        return;    if (--apr_pools_initialized)        return;    apr_pool_destroy(global_pool); /* This will also destroy the mutex */    global_pool = NULL;    global_allocator = NULL;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -