📄 prefork.c
字号:
int curr_pollfd, last_pollfd = 0;
apr_pollfd_t *pollset;
int offset;
void *csd;
ap_sb_handle_t *sbh;
apr_status_t rv;
apr_bucket_alloc_t *bucket_alloc;
mpm_state = AP_MPMQ_STARTING; /* for benefit of any hooks that run as this
* child initializes
*/
my_child_num = child_num_arg;
ap_my_pid = getpid();
csd = NULL;
requests_this_child = 0;
ap_fatal_signal_child_setup(ap_server_conf);
/* Get a sub context for global allocations in this child, so that
* we can have cleanups occur when the child exits.
*/
apr_allocator_create(&allocator);
apr_allocator_max_free_set(allocator, ap_max_mem_free);
apr_pool_create_ex(&pchild, pconf, NULL, allocator);
apr_allocator_owner_set(allocator, pchild);
apr_pool_create(&ptrans, pchild);
apr_pool_tag(ptrans, "transaction");
/* needs to be done before we switch UIDs so we have permissions */
ap_reopen_scoreboard(pchild, NULL, 0);
rv = apr_proc_mutex_child_init(&accept_mutex, ap_lock_fname, pchild);
if (rv != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_EMERG, rv, ap_server_conf,
"Couldn't initialize cross-process lock in child");
clean_child_exit(APEXIT_CHILDFATAL);
}
if (unixd_setup_child()) {
clean_child_exit(APEXIT_CHILDFATAL);
}
ap_run_child_init(pchild, ap_server_conf);
ap_create_sb_handle(&sbh, pchild, my_child_num, 0);
(void) ap_update_child_status(sbh, SERVER_READY, (request_rec *) NULL);
/* Set up the pollfd array */
listensocks = apr_pcalloc(pchild,
sizeof(*listensocks) * (num_listensocks));
for (lr = ap_listeners, i = 0; i < num_listensocks; lr = lr->next, i++) {
listensocks[i].accept_func = lr->accept_func;
listensocks[i].sd = lr->sd;
}
pollset = apr_palloc(pchild, sizeof(*pollset) * num_listensocks);
pollset[0].p = pchild;
for (i = 0; i < num_listensocks; i++) {
pollset[i].desc.s = listensocks[i].sd;
pollset[i].desc_type = APR_POLL_SOCKET;
pollset[i].reqevents = APR_POLLIN;
}
mpm_state = AP_MPMQ_RUNNING;
bucket_alloc = apr_bucket_alloc_create(pchild);
while (!die_now) {
/*
* (Re)initialize this child to a pre-connection state.
*/
current_conn = NULL;
apr_pool_clear(ptrans);
if ((ap_max_requests_per_child > 0
&& requests_this_child++ >= ap_max_requests_per_child)) {
clean_child_exit(0);
}
(void) ap_update_child_status(sbh, SERVER_READY, (request_rec *) NULL);
/*
* Wait for an acceptable connection to arrive.
*/
/* Lock around "accept", if necessary */
SAFE_ACCEPT(accept_mutex_on());
if (num_listensocks == 1) {
offset = 0;
}
else {
/* multiple listening sockets - need to poll */
for (;;) {
apr_status_t ret;
apr_int32_t n;
ret = apr_poll(pollset, num_listensocks, &n, -1);
if (ret != APR_SUCCESS) {
if (APR_STATUS_IS_EINTR(ret)) {
continue;
}
/* Single Unix documents select as returning errnos
* EBADF, EINTR, and EINVAL... and in none of those
* cases does it make sense to continue. In fact
* on Linux 2.0.x we seem to end up with EFAULT
* occasionally, and we'd loop forever due to it.
*/
ap_log_error(APLOG_MARK, APLOG_ERR, ret, ap_server_conf,
"apr_poll: (listen)");
clean_child_exit(1);
}
/* find a listener */
curr_pollfd = last_pollfd;
do {
curr_pollfd++;
if (curr_pollfd >= num_listensocks) {
curr_pollfd = 0;
}
/* XXX: Should we check for POLLERR? */
if (pollset[curr_pollfd].rtnevents & APR_POLLIN) {
last_pollfd = curr_pollfd;
offset = curr_pollfd;
goto got_fd;
}
} while (curr_pollfd != last_pollfd);
continue;
}
}
got_fd:
/* if we accept() something we don't want to die, so we have to
* defer the exit
*/
status = listensocks[offset].accept_func(&csd,
&listensocks[offset], ptrans);
SAFE_ACCEPT(accept_mutex_off()); /* unlock after "accept" */
if (status == APR_EGENERAL) {
/* resource shortage or should-not-occur occured */
clean_child_exit(1);
}
else if (status != APR_SUCCESS) {
continue;
}
/*
* We now have a connection, so set it up with the appropriate
* socket options, file descriptors, and read/write buffers.
*/
current_conn = ap_run_create_connection(ptrans, ap_server_conf, csd, my_child_num, sbh, bucket_alloc);
if (current_conn) {
ap_process_connection(current_conn, csd);
ap_lingering_close(current_conn);
}
/* Check the pod and the generation number after processing a
* connection so that we'll go away if a graceful restart occurred
* while we were processing the connection or we are the lucky
* idle server process that gets to die.
*/
if (ap_mpm_pod_check(pod) == APR_SUCCESS) { /* selected as idle? */
die_now = 1;
}
else if (ap_my_generation !=
ap_scoreboard_image->global->running_generation) { /* restart? */
/* yeah, this could be non-graceful restart, in which case the
* parent will kill us soon enough, but why bother checking?
*/
die_now = 1;
}
}
clean_child_exit(0);
}
static int make_child(server_rec *s, int slot)
{
int pid;
if (slot + 1 > ap_max_daemons_limit) {
ap_max_daemons_limit = slot + 1;
}
if (one_process) {
apr_signal(SIGHUP, just_die);
/* Don't catch AP_SIG_GRACEFUL in ONE_PROCESS mode :) */
apr_signal(SIGINT, just_die);
#ifdef SIGQUIT
apr_signal(SIGQUIT, SIG_DFL);
#endif
apr_signal(SIGTERM, just_die);
child_main(slot);
}
(void) ap_update_child_status_from_indexes(slot, 0, SERVER_STARTING,
(request_rec *) NULL);
#ifdef _OSD_POSIX
/* BS2000 requires a "special" version of fork() before a setuid() call */
if ((pid = os_fork(unixd_config.user_name)) == -1) {
#elif defined(TPF)
if ((pid = os_fork(s, slot)) == -1) {
#else
if ((pid = fork()) == -1) {
#endif
ap_log_error(APLOG_MARK, APLOG_ERR, errno, s, "fork: Unable to fork new process");
/* fork didn't succeed. Fix the scoreboard or else
* it will say SERVER_STARTING forever and ever
*/
(void) ap_update_child_status_from_indexes(slot, 0, SERVER_DEAD,
(request_rec *) NULL);
/* In case system resources are maxxed out, we don't want
Apache running away with the CPU trying to fork over and
over and over again. */
sleep(10);
return -1;
}
if (!pid) {
#ifdef HAVE_BINDPROCESSOR
/* by default AIX binds to a single processor
* this bit unbinds children which will then bind to another cpu
*/
int status = bindprocessor(BINDPROCESS, (int)getpid(),
PROCESSOR_CLASS_ANY);
if (status != OK) {
ap_log_error(APLOG_MARK, APLOG_WARNING, errno,
ap_server_conf, "processor unbind failed %d", status);
}
#endif
RAISE_SIGSTOP(MAKE_CHILD);
AP_MONCONTROL(1);
/* Disable the parent's signal handlers and set up proper handling in
* the child.
*/
apr_signal(SIGHUP, just_die);
apr_signal(SIGTERM, just_die);
/* The child process doesn't do anything for AP_SIG_GRACEFUL.
* Instead, the pod is used for signalling graceful restart.
*/
apr_signal(AP_SIG_GRACEFUL, SIG_IGN);
child_main(slot);
}
ap_scoreboard_image->parent[slot].pid = pid;
return 0;
}
/* start up a bunch of children */
static void startup_children(int number_to_start)
{
int i;
for (i = 0; number_to_start && i < ap_daemons_limit; ++i) {
if (ap_scoreboard_image->servers[i][0].status != SERVER_DEAD) {
continue;
}
if (make_child(ap_server_conf, i) < 0) {
break;
}
--number_to_start;
}
}
/*
* idle_spawn_rate is the number of children that will be spawned on the
* next maintenance cycle if there aren't enough idle servers. It is
* doubled up to MAX_SPAWN_RATE, and reset only when a cycle goes by
* without the need to spawn.
*/
static int idle_spawn_rate = 1;
#ifndef MAX_SPAWN_RATE
#define MAX_SPAWN_RATE (32)
#endif
static int hold_off_on_exponential_spawning;
static void perform_idle_server_maintenance(apr_pool_t *p)
{
int i;
int to_kill;
int idle_count;
worker_score *ws;
int free_length;
int free_slots[MAX_SPAWN_RATE];
int last_non_dead;
int total_non_dead;
/* initialize the free_list */
free_length = 0;
to_kill = -1;
idle_count = 0;
last_non_dead = -1;
total_non_dead = 0;
for (i = 0; i < ap_daemons_limit; ++i) {
int status;
if (i >= ap_max_daemons_limit && free_length == idle_spawn_rate)
break;
ws = &ap_scoreboard_image->servers[i][0];
status = ws->status;
if (status == SERVER_DEAD) {
/* try to keep children numbers as low as possible */
if (free_length < idle_spawn_rate) {
free_slots[free_length] = i;
++free_length;
}
}
else {
/* We consider a starting server as idle because we started it
* at least a cycle ago, and if it still hasn't finished starting
* then we're just going to swamp things worse by forking more.
* So we hopefully won't need to fork more if we count it.
* This depends on the ordering of SERVER_READY and SERVER_STARTING.
*/
if (status <= SERVER_READY) {
++ idle_count;
/* always kill the highest numbered child if we have to...
* no really well thought out reason ... other than observing
* the server behaviour under linux where lower numbered children
* tend to service more hits (and hence are more likely to have
* their data in cpu caches).
*/
to_kill = i;
}
++total_non_dead;
last_non_dead = i;
}
}
ap_max_daemons_limit = last_non_dead + 1;
if (idle_count > ap_daemons_max_free) {
/* kill off one child... we use the pod because that'll cause it to
* shut down gracefully, in case it happened to pick up a request
* while we were counting
*/
ap_mpm_pod_signal(pod);
idle_spawn_rate = 1;
}
else if (idle_count < ap_daemons_min_free) {
/* terminate the free list */
if (free_length == 0) {
/* only report this condition once */
static int reported = 0;
if (!reported) {
ap_log_error(APLOG_MARK, APLOG_ERR, 0, ap_server_conf,
"server reached MaxClients setting, consider"
" raising the MaxClients setting");
reported = 1;
}
idle_spawn_rate = 1;
}
else {
if (idle_spawn_rate >= 8) {
ap_log_error(APLOG_MARK, APLOG_INFO, 0, ap_server_conf,
"server seems busy, (you may need "
"to increase StartServers, or Min/MaxSpareServers), "
"spawning %d children, there are %d idle, and "
"%d total children", idle_spawn_rate,
idle_count, total_non_dead);
}
for (i = 0; i < free_length; ++i) {
#ifdef TPF
if (make_child(ap_server_conf, free_slots[i]) == -1) {
if(free_length == 1) {
shutdown_pending = 1;
ap_log_error(APLOG_MARK, APLOG_EMERG, 0, ap_server_conf,
"No active child processes: shutting down");
}
}
#else
make_child(ap_server_conf, free_slots[i]);
#endif /* TPF */
}
/* the next time around we want to spawn twice as many if this
* wasn't good enough, but not if we've just done a graceful
*/
if (hold_off_on_exponential_spawning) {
--hold_off_on_exponential_spawning;
}
else if (idle_spawn_rate < MAX_SPAWN_RATE) {
idle_spawn_rate *= 2;
}
}
}
else {
idle_spawn_rate = 1;
}
}
/*****************************************************************
* Executive routines.
*/
int ap_mpm_run(apr_pool_t *_pconf, apr_pool_t *plog, server_rec *s)
{
int index;
int remaining_children_to_start;
apr_status_t rv;
ap_log_pid(pconf, ap_pid_fname);
first_server_limit = server_limit;
if (changed_limit_at_restart) {
ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
"WARNING: Attempt to change ServerLimit "
"ignored during restart");
changed_limit_at_restart = 0;
}
/* Initialize cross-process accept lock */
ap_lock_fname = apr_psprintf(_pconf, "%s.%" APR_PID_T_FMT,
ap_server_root_relative(_pconf, ap_lock_fname),
ap_my_pid);
rv = apr_proc_mutex_create(&accept_mutex, ap_lock_fname,
ap_accept_lock_mech, _pconf);
if (rv != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s,
"Couldn't create accept lock");
mpm_state = AP_MPMQ_STOPPING;
return 1;
}
#if APR_USE_SYSVSEM_SERIALIZE
if (ap_accept_lock_mech == APR_LOCK_DEFAULT ||
ap_accept_lock_mech == APR_LOCK_SYSVSEM) {
#else
if (ap_accept_lock_mech == APR_LOCK_SYSVSEM) {
#endif
rv = unixd_set_proc_mutex_perms(accept_mutex);
if (rv != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s,
"Couldn't set permissions on cross-process lock; "
"check User and Group directives");
mpm_state = AP_MPMQ_STOPPING;
return 1;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -