⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 proxy_util.c

📁 Apache官方在今天放出产品系列2.2的最新版本2.2.11的源码包 最流行的HTTP服务器软件之一
💻 C
📖 第 1 页 / 共 5 页
字号:
    c = strchr(uri, ':');    if (c == NULL || c[1] != '/' || c[2] != '/' || c[3] == '\0') {       return NULL;    }    /* remove path from uri */    if ((c = strchr(c + 3, '/'))) {        *c = '\0';    }    balancer = (proxy_balancer *)conf->balancers->elts;    for (i = 0; i < conf->balancers->nelts; i++) {        if (strcasecmp(balancer->name, uri) == 0) {            return balancer;        }        balancer++;    }    return NULL;}PROXY_DECLARE(const char *) ap_proxy_add_balancer(proxy_balancer **balancer,                                                  apr_pool_t *p,                                                  proxy_server_conf *conf,                                                  const char *url){    char *c, *q, *uri = apr_pstrdup(p, url);    proxy_balancer_method *lbmethod;    c = strchr(uri, ':');    if (c == NULL || c[1] != '/' || c[2] != '/' || c[3] == '\0')       return "Bad syntax for a balancer name";    /* remove path from uri */    if ((q = strchr(c + 3, '/')))        *q = '\0';    ap_str_tolower(uri);    *balancer = apr_array_push(conf->balancers);    memset(*balancer, 0, sizeof(proxy_balancer));    /*     * NOTE: The default method is byrequests, which we assume     * exists!     */    lbmethod = ap_lookup_provider(PROXY_LBMETHOD, "byrequests", "0");    if (!lbmethod) {        return "Can't find 'byrequests' lb method";    }    (*balancer)->name = uri;    (*balancer)->lbmethod = lbmethod;    (*balancer)->workers = apr_array_make(p, 5, sizeof(proxy_worker));    /* XXX Is this a right place to create mutex */#if APR_HAS_THREADS    if (apr_thread_mutex_create(&((*balancer)->mutex),                APR_THREAD_MUTEX_DEFAULT, p) != APR_SUCCESS) {        /* XXX: Do we need to log something here */        return "can not create thread mutex";    }#endif    return NULL;}PROXY_DECLARE(proxy_worker *) ap_proxy_get_worker(apr_pool_t *p,                                                  proxy_server_conf *conf,                                                  const char *url){    proxy_worker *worker;    proxy_worker *max_worker = NULL;    int max_match = 0;    int url_length;    int min_match;    int worker_name_length;    const char *c;    char *url_copy;    int i;    c = ap_strchr_c(url, ':');    if (c == NULL || c[1] != '/' || c[2] != '/' || c[3] == '\0') {       return NULL;    }    url_copy = apr_pstrdup(p, url);    url_length = strlen(url);    /*     * We need to find the start of the path and     * therefore we know the length of the scheme://hostname/     * part to we can force-lowercase everything up to     * the start of the path.     */    c = ap_strchr_c(c+3, '/');    if (c) {        char *pathstart;        pathstart = url_copy + (c - url);        *pathstart = '\0';        ap_str_tolower(url_copy);        min_match = strlen(url_copy);        *pathstart = '/';    }    else {        ap_str_tolower(url_copy);        min_match = strlen(url_copy);    }    worker = (proxy_worker *)conf->workers->elts;    /*     * Do a "longest match" on the worker name to find the worker that     * fits best to the URL, but keep in mind that we must have at least     * a minimum matching of length min_match such that     * scheme://hostname[:port] matches between worker and url.     */    for (i = 0; i < conf->workers->nelts; i++) {        if ( ((worker_name_length = strlen(worker->name)) <= url_length)           && (worker_name_length >= min_match)           && (worker_name_length > max_match)           && (strncmp(url_copy, worker->name, worker_name_length) == 0) ) {            max_worker = worker;            max_match = worker_name_length;        }        worker++;    }    return max_worker;}#if APR_HAS_THREADSstatic apr_status_t conn_pool_cleanup(void *theworker){    proxy_worker *worker = (proxy_worker *)theworker;    if (worker->cp->res) {        worker->cp->pool = NULL;    }    return APR_SUCCESS;}#endifstatic void init_conn_pool(apr_pool_t *p, proxy_worker *worker){    apr_pool_t *pool;    proxy_conn_pool *cp;    /*     * Create a connection pool's subpool.     * This pool is used for connection recycling.     * Once the worker is added it is never removed but     * it can be disabled.     */    apr_pool_create(&pool, p);    apr_pool_tag(pool, "proxy_worker_cp");    /*     * Alloc from the same pool as worker.     * proxy_conn_pool is permanently attached to the worker.     */    cp = (proxy_conn_pool *)apr_pcalloc(p, sizeof(proxy_conn_pool));    cp->pool = pool;    worker->cp = cp;}PROXY_DECLARE(const char *) ap_proxy_add_worker(proxy_worker **worker,                                                apr_pool_t *p,                                                proxy_server_conf *conf,                                                const char *url){    int rv;    apr_uri_t uri;    rv = apr_uri_parse(p, url, &uri);    if (rv != APR_SUCCESS) {        return "Unable to parse URL";    }    if (!uri.hostname || !uri.scheme) {        return "URL must be absolute!";    }    ap_str_tolower(uri.hostname);    ap_str_tolower(uri.scheme);    *worker = apr_array_push(conf->workers);    memset(*worker, 0, sizeof(proxy_worker));    (*worker)->name = apr_uri_unparse(p, &uri, APR_URI_UNP_REVEALPASSWORD);    (*worker)->scheme = uri.scheme;    (*worker)->hostname = uri.hostname;    (*worker)->port = uri.port;    (*worker)->id   = proxy_lb_workers;    (*worker)->flush_packets = flush_off;    (*worker)->flush_wait = PROXY_FLUSH_WAIT;    (*worker)->smax = -1;    /* Increase the total worker count */    proxy_lb_workers++;    init_conn_pool(p, *worker);#if APR_HAS_THREADS    if (apr_thread_mutex_create(&((*worker)->mutex),                APR_THREAD_MUTEX_DEFAULT, p) != APR_SUCCESS) {        /* XXX: Do we need to log something here */        return "can not create thread mutex";    }#endif    return NULL;}PROXY_DECLARE(proxy_worker *) ap_proxy_create_worker(apr_pool_t *p){    proxy_worker *worker;    worker = (proxy_worker *)apr_pcalloc(p, sizeof(proxy_worker));    worker->id = proxy_lb_workers;    worker->smax = -1;    /* Increase the total worker count */    proxy_lb_workers++;    init_conn_pool(p, worker);    return worker;}PROXY_DECLARE(void)ap_proxy_add_worker_to_balancer(apr_pool_t *pool, proxy_balancer *balancer,                                proxy_worker *worker){    proxy_worker *runtime;    runtime = apr_array_push(balancer->workers);    memcpy(runtime, worker, sizeof(proxy_worker));    runtime->id = proxy_lb_workers;    /* Increase the total runtime count */    proxy_lb_workers++;}PROXY_DECLARE(int) ap_proxy_pre_request(proxy_worker **worker,                                        proxy_balancer **balancer,                                        request_rec *r,                                        proxy_server_conf *conf, char **url){    int access_status;    access_status = proxy_run_pre_request(worker, balancer, r, conf, url);    if (access_status == DECLINED && *balancer == NULL) {        *worker = ap_proxy_get_worker(r->pool, conf, *url);        if (*worker) {            ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,                          "proxy: %s: found worker %s for %s",                           (*worker)->scheme, (*worker)->name, *url);            *balancer = NULL;            access_status = OK;        }        else if (r->proxyreq == PROXYREQ_PROXY) {            if (conf->forward) {                ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,                              "proxy: *: found forward proxy worker for %s",                               *url);                *balancer = NULL;                *worker = conf->forward;                access_status = OK;            }        }        else if (r->proxyreq == PROXYREQ_REVERSE) {            if (conf->reverse) {                ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,                              "proxy: *: found reverse proxy worker for %s",                               *url);                *balancer = NULL;                *worker = conf->reverse;                access_status = OK;            }        }    }    else if (access_status == DECLINED && *balancer != NULL) {        /* All the workers are busy */        ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,          "proxy: all workers are busy.  Unable to serve %s",          *url);        access_status = HTTP_SERVICE_UNAVAILABLE;    }    return access_status;}PROXY_DECLARE(int) ap_proxy_post_request(proxy_worker *worker,                                         proxy_balancer *balancer,                                         request_rec *r,                                         proxy_server_conf *conf){    int access_status;    if (balancer) {        access_status = proxy_run_post_request(worker, balancer, r, conf);    }    else {        access_status = OK;    }    return access_status;}/* DEPRECATED */PROXY_DECLARE(int) ap_proxy_connect_to_backend(apr_socket_t **newsock,                                               const char *proxy_function,                                               apr_sockaddr_t *backend_addr,                                               const char *backend_name,                                               proxy_server_conf *conf,                                               server_rec *s,                                               apr_pool_t *p){    apr_status_t rv;    int connected = 0;    int loglevel;    while (backend_addr && !connected) {        if ((rv = apr_socket_create(newsock, backend_addr->family,                                    SOCK_STREAM, 0, p)) != APR_SUCCESS) {            loglevel = backend_addr->next ? APLOG_DEBUG : APLOG_ERR;            ap_log_error(APLOG_MARK, loglevel, rv, s,                         "proxy: %s: error creating fam %d socket for target %s",                         proxy_function,                         backend_addr->family,                         backend_name);            /*             * this could be an IPv6 address from the DNS but the             * local machine won't give us an IPv6 socket; hopefully the             * DNS returned an additional address to try             */            backend_addr = backend_addr->next;            continue;        }#if !defined(TPF) && !defined(BEOS)        if (conf->recv_buffer_size > 0 &&            (rv = apr_socket_opt_set(*newsock, APR_SO_RCVBUF,                                     conf->recv_buffer_size))) {            ap_log_error(APLOG_MARK, APLOG_ERR, rv, s,                         "apr_socket_opt_set(SO_RCVBUF): Failed to set "                         "ProxyReceiveBufferSize, using default");        }#endif        rv = apr_socket_opt_set(*newsock, APR_TCP_NODELAY, 1);        if (rv != APR_SUCCESS && rv != APR_ENOTIMPL) {             ap_log_error(APLOG_MARK, APLOG_ERR, rv, s,                          "apr_socket_opt_set(APR_TCP_NODELAY): "                          "Failed to set");        }        /* Set a timeout on the socket */        if (conf->timeout_set == 1) {            apr_socket_timeout_set(*newsock, conf->timeout);        }        else {             apr_socket_timeout_set(*newsock, s->timeout);        }        ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,                     "proxy: %s: fam %d socket created to connect to %s",                     proxy_function, backend_addr->family, backend_name);        /* make the connection out of the socket */        rv = apr_socket_connect(*newsock, backend_addr);        /* if an error occurred, loop round and try again */        if (rv != APR_SUCCESS) {            apr_socket_close(*newsock);            loglevel = backend_addr->next ? APLOG_DEBUG : APLOG_ERR;            ap_log_error(APLOG_MARK, loglevel, rv, s,                         "proxy: %s: attempt to connect to %pI (%s) failed",                         proxy_function,                         backend_addr,                         backend_name);            backend_addr = backend_addr->next;            continue;        }        connected = 1;    }    return connected ? 0 : 1;}static apr_status_t connection_cleanup(void *theconn){    proxy_conn_rec *conn = (proxy_conn_rec *)theconn;    proxy_worker *worker = conn->worker;    /*     * If the connection pool is NULL the worker     * cleanup has been run. Just return.     */    if (!worker->cp) {        return APR_SUCCESS;    }#if APR_HAS_THREADS    /* Sanity check: Did we already return the pooled connection? */    if (conn->inreslist) {        ap_log_perror(APLOG_MARK, APLOG_ERR, 0, conn->pool,                      "proxy: Pooled connection 0x%pp for worker %s has been"                      " already returned to the connection pool.", conn,                      worker->name);        return APR_SUCCESS;    }#endif    /* determine if the connection need to be closed */    if (conn->close_on_recycle || conn->close || worker->disablereuse ||        !worker->is_address_reusable) {        apr_pool_t *p = conn->pool;        apr_pool_clear(p);        conn = apr_pcalloc(p, sizeof(proxy_conn_rec));        conn->pool = p;        conn->worker = worker;        apr_pool_create(&(conn->scpool), p);        apr_pool_tag(conn->scpool, "proxy_conn_scpool");    }#if APR_HAS_THREADS    if (worker->hmax && worker->cp->res) {        conn->inreslist = 1;        apr_reslist_release(worker->cp->res, (void *)conn);    }    else#endif

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -