📄 mod_caucho.c
字号:
case HMUX_EXIT: break; default: len = hmux_read_len(s); cse_skip(s, len); break; } } while (code > 0 && code != HMUX_QUIT && code != HMUX_EXIT && code != ack); return code;}/** * handles a client request */static intwrite_request(stream_t *s, request_rec *r, config_t *config, cluster_t *cluster, int *keepalive, int session_index, int backup_index, char *ip, char *session_id){ int len; int code; int write_length; time_t new_time; time_t start_time = r->request_time; hmux_start_channel(s, 1); write_env(s, r, session_id); write_headers(s, r); write_added_headers(s, r); /* read post data */ if (ap_should_client_block(r)) { char buf[BUF_LENGTH]; int ack_size = s->cluster_srun->srun->send_buffer_size; int send_length = 0; while ((len = ap_get_client_block(r, buf, BUF_LENGTH)) > 0) { /* ap_reset_timeout(r); */ cse_write_packet(s, HMUX_DATA, buf, len); send_length += len; if (ack_size <= send_length) { send_length = 0; cse_write_byte(s, HMUX_YIELD); code = send_data(s, r, HMUX_ACK, keepalive); if (code < 0 || code == HMUX_QUIT || code == HMUX_EXIT) break; } } } cse_write_byte(s, HMUX_QUIT); code = send_data(s, r, HMUX_QUIT, keepalive); if (code >= 0 || s->sent_data) return code; write_length = s->write_length; if (cse_open_connection(s, cluster, session_index, backup_index, r->request_time, r->pool)) { s->write_length = write_length; LOG(("retry connection %d\n", s->socket)); return send_data(s, r, HMUX_QUIT, keepalive); } else { return HTTP_SERVICE_UNAVAILABLE; }}#ifdef WIN32int random() { return 0; }#endif/** * Handle a request. */static intcaucho_request(request_rec *r){ config_t *config = cse_get_module_config(r); resin_host_t *host = 0; stream_t s; int retval; int keepalive = 0; int reuse; int session_index; int backup_index; char *ip; time_t now = r->request_time; char *session_id = 0; if (! config) return HTTP_SERVICE_UNAVAILABLE; if ((retval = ap_setup_client_block(r, REQUEST_CHUNKED_DECHUNK))) return retval; /* ap_soft_timeout("servlet request", r); */ if (r->request_config && ! *config->alt_session_url_prefix && ((session_id = ap_get_module_config(r->request_config, &caucho_module)) || r->prev && (session_id = ap_get_module_config(r->prev->request_config, &caucho_module)))) { /* *session_id = *config->session_url_prefix; */ } session_index = get_session_index(config, r, &backup_index); ip = r->connection->remote_ip; if (host) { } else if (config->manual_host) host = config->manual_host; else { host = cse_match_host(config, ap_get_server_name(r), ap_get_server_port(r), now); } if (! host || ! cse_open_connection(&s, &host->cluster, session_index, backup_index, now, r->pool)) { return HTTP_SERVICE_UNAVAILABLE; } reuse = write_request(&s, r, config, &host->cluster, &keepalive, session_index, backup_index, ip, session_id); /* ap_kill_timeout(r); */ ap_rflush(r); if (reuse == HMUX_QUIT) cse_recycle(&s, now); else cse_close(&s, "no reuse"); if (reuse == HTTP_SERVICE_UNAVAILABLE) return reuse; else return OK;}/** * Print the statistics for each JVM. */static voidjvm_status(cluster_t *cluster, request_rec *r){ int i; stream_t s; ap_rputs("<center><table border=2 width='80%'>\n", r); ap_rputs("<tr><th width=\"30%\">Host</th>\n", r); ap_rputs(" <th>Active</th>\n", r); ap_rputs(" <th>Pooled</th>\n", r); ap_rputs(" <th>Connect<br>Timeout</th>\n", r); ap_rputs(" <th>Live<br>Time</th>\n", r); ap_rputs(" <th>Dead<br>Time</th>\n", r); ap_rputs("</tr>\n", r); for (; cluster; cluster = cluster->next) { for (i = 0; i < cluster->srun_capacity; i++) { cluster_srun_t *cluster_srun = cluster->srun_list + i; srun_t *srun = cluster_srun->srun; int port; int pool_count; if (! srun) continue; port = srun->port; pool_count = ((srun->conn_head - srun->conn_tail + CONN_POOL_SIZE) % CONN_POOL_SIZE); ap_rputs("<tr>", r); if (! cse_open(&s, cluster, cluster_srun, r->pool, 0)) { ap_rprintf(r, "<td bgcolor='#ff6666'>%d. %s:%d%s (down)</td>", cluster_srun->index + 1, srun->hostname ? srun->hostname : "localhost", port, cluster_srun->is_backup ? "*" : ""); } else { ap_rprintf(r, "<td bgcolor='#66ff66'>%d. %s:%d%s (ok)</td>", cluster_srun->index + 1, srun->hostname ? srun->hostname : "localhost", port, cluster_srun->is_backup ? "*" : ""); } /* This needs to be close, because cse_open doesn't use recycle. */ cse_close(&s, "caucho-status"); LOG(("close\n")); ap_rprintf(r, "<td align=right>%d</td><td align=right>%d</td>", srun->active_sockets, pool_count); ap_rprintf(r, "<td align=right>%d</td><td align=right>%d</td><td align=right>%d</td>", srun->connect_timeout, srun->live_time, srun->dead_time); ap_rputs("</tr>\n", r); } } ap_rputs("</table></center>\n", r);}static voidescape_html(char *dst, char *src){ int ch; for (; (ch = *src); src++) { switch (ch) { case '<': *dst++ = '&'; *dst++ = 'l'; *dst++ = 't'; *dst++ = ';'; break; case '&': *dst++ = '&'; *dst++ = 'a'; *dst++ = 'm'; *dst++ = 'p'; *dst++ = ';'; break; default: *dst++ = ch; break; } } *dst = 0;}/** * Print a summary of the configuration so users can understand what's * going on. Ping the server to check that it's up. */static intcaucho_status(request_rec *r){ resin_host_t *host; web_app_t *app; location_t *loc; unsigned int now = r->request_time; config_t *config = cse_get_module_config(r); r->content_type = "text/html"; /* ap_soft_timeout("caucho status", r); */ if (r->header_only) { /* ap_kill_timeout(r); */ return OK; } ap_send_http_header(r); ap_rputs("<html><title>Status : Caucho Servlet Engine</title>\n", r); ap_rputs("<body bgcolor=white>\n", r); ap_rputs("<h1>Status : Caucho Servlet Engine</h1>\n", r); if (config->error) { char buf[BUF_LENGTH]; escape_html(buf, config->error); ap_rprintf(r, "<h2 color='red'>Error : %s</h2>\n", buf); } ap_rprintf(r, "<h2>Configuration Cluster</h2>\n"); jvm_status(&config->config_cluster, r); host = config ? config->hosts : 0; for (; host; host = host->next) { if (host != host->canonical) continue; /* check updates as appropriate */ cse_match_host(config, host->name, host->port, now); if (! *host->name) ap_rprintf(r, "<h2>Default Virtual Host</h2>\n"); else if (host->port) ap_rprintf(r, "<h2>Virtual Host: %s:%d</h2>\n", host->name, host->port); else ap_rprintf(r, "<h2>Virtual Host: %s</h2>\n", host->name); jvm_status(&host->cluster, r); ap_rputs("<p><center><table border=2 cellspacing=0 cellpadding=2 width='80%'>\n", r); ap_rputs("<tr><th width=\"50%\">web-app\n", r); ap_rputs(" <th>url-pattern\n", r); app = host->applications; for (; app; app = app->next) { for (loc = app->locations; loc; loc = loc->next) { if (! strcasecmp(loc->prefix, "/META-INF") || ! strcasecmp(loc->prefix, "/WEB-INF")) continue; ap_rprintf(r, "<tr bgcolor='#ffcc66'><td>%s<td>%s%s%s%s%s</tr>\n", *app->context_path ? app->context_path : "/", loc->prefix, ! loc->is_exact && ! loc->suffix ? "/*" : loc->suffix && loc->prefix[0] ? "/" : "", loc->suffix ? "*" : "", loc->suffix ? loc->suffix : "", loc->ignore ? " (ignore)" : ""); } } ap_rputs("</table></center>\n", r); } ap_rputs("<hr>", r); ap_rprintf(r, "<em>%s<em>", VERSION); ap_rputs("</body></html>\n", r); /* ap_kill_timeout(r); */ return OK;}/** * When a child process starts, clear the srun structure so it doesn't * mistakenly think the old sockets areopen. */static voidcse_open_child(server_rec *server, pool *p){ LOG(("[%d] open child\n", getpid())); cse_close_all();}/** * Close all the connections cleanly when the Apache child process exits. * * @param server the Apache server object * @param p the Apache memory pool for the server. */static voidcse_close_child(server_rec *server, pool *p){ LOG(("[%d] close child\n", getpid())); cse_close_all(); if (g_pool) { ap_destroy_pool(g_pool); g_pool = 0; } g_config = 0; LOG(("[%d] close child done\n", getpid()));}/* * Only needed configuration is pointer to resin.conf */static command_rec caucho_commands[] = { {"ResinConfigServer", cse_config_server_command, NULL, RSRC_CONF|ACCESS_CONF, TAKE12, "Adds a new configuration server."}, {"CauchoStatus", cse_caucho_status_command, NULL, RSRC_CONF|ACCESS_CONF, TAKE1, "Configures the caucho-status."}, {"CauchoConfigFile", cse_config_file_command, NULL, RSRC_CONF|ACCESS_CONF, TAKE1, "Pointer to the Caucho configuration file."}, {"CauchoServerRoot", cse_config_server_root, NULL, RSRC_CONF|ACCESS_CONF, TAKE1, "The root server directory."}, {"CauchoHost", cse_host_command, NULL, RSRC_CONF|ACCESS_CONF, TAKE12, "Servlet runner host."}, {"CauchoBackup", cse_backup_command, NULL, RSRC_CONF|ACCESS_CONF, TAKE12, "Servlet runner backup."}, {"CauchoErrorPage", cse_error_page_command, NULL, RSRC_CONF|ACCESS_CONF, TAKE1, "Error page when connections fail."}, {NULL}};/* * Caucho right has two content handlers: * caucho-status: summary information for debugging * caucho-request: dispatch a Caucho request */static const handler_rec caucho_handlers[] ={ {"caucho-status", caucho_status}, {"caucho-request", caucho_request}, {NULL}};/* * module configuration * * cse_clean_jsessionid needs to be at [2] to clean up the ;jsessionid= * dispatch to make urls like /foo;jsessionid=aaaXXX work. * * cse_dispatch itself must be after [2] to make DirectoryIndex work. * cse_dispatch must be before [8] for mod_gzip to work */module caucho_module ={ STANDARD_MODULE_STUFF, cse_module_init, /* module initializer */ cse_create_dir_config, /* per-directory config creator */ NULL, /* dir config merger */ cse_create_server_config, /* server config creator */ NULL, /* server config merger */ caucho_commands, /* command table */ caucho_handlers, /* [7] list of handlers */ cse_clean_jsessionid, /* [2] filename-to-URI translation */ NULL, /* [5] check/validate user_id */ NULL, /* [6] check user_id is valid *here* */ NULL, /* [4] check access by host address */ cse_dispatch, /* [7] MIME type checker/setter */ NULL, /* [8] fixups */ NULL, /* [10] logger */ NULL, /* [3] header parser */ cse_open_child, /* apache child process init */ cse_close_child, /* apache child process exit/cleanup */ NULL, /* [1] post read_request handling */#if defined(EAPI) NULL, /* add_module */ NULL, /* del_module */ NULL, /* rewrite_command */ NULL, /* new_connection */#endif };
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -