📄 beos.c
字号:
apr_thread_mutex_lock(accept_mutex);
while (!this_worker_should_exit) {
apr_int16_t event;
apr_status_t ret;
ret = apr_poll(pollset, num_listening_sockets + 1, &srv, -1);
if (ret != APR_SUCCESS) {
if (APR_STATUS_IS_EINTR(ret)) {
continue;
}
/* poll() will only return errors in catastrophic
* circumstances. Let's try exiting gracefully, for now. */
ap_log_error(APLOG_MARK, APLOG_ERR, ret, (const server_rec *)
ap_server_conf, "apr_poll: (listen)");
this_worker_should_exit = 1;
} else {
/* if we've bailed in apr_poll what's the point of trying to use the data? */
apr_poll_revents_get(&event, listening_sockets[0], pollset);
if (event & APR_POLLIN){
apr_sockaddr_t *rec_sa;
apr_size_t len = 5;
char *tmpbuf = apr_palloc(ptrans, sizeof(char) * 5);
apr_sockaddr_info_get(&rec_sa, "127.0.0.1", APR_UNSPEC, 7772, 0, ptrans);
if ((ret = apr_recvfrom(rec_sa, listening_sockets[0], 0, tmpbuf, &len))
!= APR_SUCCESS){
ap_log_error(APLOG_MARK, APLOG_ERR, ret, NULL,
"error getting data from UDP!!");
}else {
/* add checking??? */
}
this_worker_should_exit = 1;
}
}
if (this_worker_should_exit) break;
if (num_listening_sockets == 1) {
sd = ap_listeners->sd;
goto got_fd;
}
else {
/* find a listener */
curr_pollfd = last_pollfd;
do {
curr_pollfd++;
if (curr_pollfd > num_listening_sockets)
curr_pollfd = 1;
/* Get the revent... */
apr_poll_revents_get(&event, listening_sockets[curr_pollfd], pollset);
if (event & APR_POLLIN) {
last_pollfd = curr_pollfd;
sd = listening_sockets[curr_pollfd];
goto got_fd;
}
} while (curr_pollfd != last_pollfd);
}
}
got_fd:
if (!this_worker_should_exit) {
rv = apr_accept(&csd, sd, ptrans);
apr_thread_mutex_unlock(accept_mutex);
if (rv != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf,
"apr_accept");
} else {
process_socket(ptrans, csd, child_slot, bucket_alloc);
requests_this_child--;
}
}
else {
apr_thread_mutex_unlock(accept_mutex);
break;
}
apr_pool_clear(ptrans);
}
ap_update_child_status_from_indexes(0, child_slot, SERVER_DEAD, (request_rec*)NULL);
apr_bucket_alloc_destroy(bucket_alloc);
ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, NULL,
"worker_thread %ld exiting", find_thread(NULL));
apr_thread_mutex_lock(worker_thread_count_mutex);
worker_thread_count--;
apr_thread_mutex_unlock(worker_thread_count_mutex);
return (0);
}
static int make_worker(int slot)
{
thread_id tid;
proc_info *my_info = (proc_info *)malloc(sizeof(proc_info)); /* freed by thread... */
if (my_info == NULL) {
ap_log_error(APLOG_MARK, APLOG_ALERT, errno, ap_server_conf,
"malloc: out of memory");
clean_child_exit(APEXIT_CHILDFATAL);
}
my_info->slot = slot;
apr_pool_create(&my_info->tpool, pchild);
if (slot + 1 > ap_max_child_assigned)
ap_max_child_assigned = slot + 1;
if (one_process) {
set_signals();
ap_scoreboard_image->parent[0].pid = getpid();
return 0;
}
(void) ap_update_child_status_from_indexes(0, slot, SERVER_STARTING, (request_rec*)NULL);
tid = spawn_thread(worker_thread, "apache_worker", B_NORMAL_PRIORITY,
my_info);
if (tid < B_NO_ERROR) {
ap_log_error(APLOG_MARK, APLOG_ERR, errno, NULL,
"spawn_thread: Unable to start a new thread");
/* In case system resources are maxxed out, we don't want
* Apache running away with the CPU trying to fork over and
* over and over again.
*/
(void) ap_update_child_status_from_indexes(0, slot, SERVER_DEAD,
(request_rec*)NULL);
sleep(10);
free(my_info);
return -1;
}
resume_thread(tid);
ap_scoreboard_image->servers[0][slot].tid = tid;
return 0;
}
static void check_restart(void *data)
{
if (!restart_pending && !shutdown_pending) {
int slot = (int)data;
make_worker(slot);
ap_log_error(APLOG_MARK, APLOG_INFO, 0, NULL,
"spawning a new worker thread in slot %d", slot);
}
}
/* start up a bunch of children */
static void startup_threads(int number_to_start)
{
int i;
for (i = 0; number_to_start && i < ap_thread_limit; ++i) {
if (ap_scoreboard_image->servers[0][i].tid) {
continue;
}
if (make_worker(i) < 0) {
break;
}
--number_to_start;
}
}
/*
* spawn_rate is the number of children that will be spawned on the
* next maintenance cycle if there aren't enough idle servers. It is
* doubled up to MAX_SPAWN_RATE, and reset only when a cycle goes by
* without the need to spawn.
*/
static int spawn_rate = 1;
#ifndef MAX_SPAWN_RATE
#define MAX_SPAWN_RATE (32)
#endif
static int hold_off_on_exponential_spawning;
static void perform_idle_server_maintenance(void)
{
int i;
int free_length;
int free_slots[MAX_SPAWN_RATE];
int last_non_dead = -1;
/* initialize the free_list */
free_length = 0;
for (i = 0; i < ap_thread_limit; ++i) {
if (ap_scoreboard_image->servers[0][i].tid == 0) {
if (free_length < spawn_rate) {
free_slots[free_length] = i;
++free_length;
}
}
else {
last_non_dead = i;
}
if (i >= ap_max_child_assigned && free_length >= spawn_rate) {
break;
}
}
ap_max_child_assigned = last_non_dead + 1;
if (free_length > 0) {
for (i = 0; i < free_length; ++i) {
make_worker(free_slots[i]);
}
/* the next time around we want to spawn twice as many if this
* wasn't good enough, but not if we've just done a graceful
*/
if (hold_off_on_exponential_spawning) {
--hold_off_on_exponential_spawning;
} else if (spawn_rate < MAX_SPAWN_RATE) {
spawn_rate *= 2;
}
} else {
spawn_rate = 1;
}
}
static void server_main_loop(int remaining_threads_to_start)
{
int child_slot;
apr_exit_why_e exitwhy;
int status;
apr_proc_t pid;
int i;
while (!restart_pending && !shutdown_pending) {
ap_wait_or_timeout(&exitwhy, &status, &pid, pconf);
if (pid.pid >= 0) {
if (ap_process_child_status(&pid, exitwhy, status) == APEXIT_CHILDFATAL) {
shutdown_pending = 1;
child_fatal = 1;
return;
}
/* non-fatal death... note that it's gone in the scoreboard. */
child_slot = -1;
for (i = 0; i < ap_max_child_assigned; ++i) {
if (ap_scoreboard_image->servers[0][i].tid == pid.pid) {
child_slot = i;
break;
}
}
if (child_slot >= 0) {
ap_scoreboard_image->servers[0][child_slot].tid = 0;
(void) ap_update_child_status_from_indexes(0, child_slot,
SERVER_DEAD,
(request_rec*)NULL);
if (remaining_threads_to_start
&& child_slot < ap_thread_limit) {
/* we're still doing a 1-for-1 replacement of dead
* children with new children
*/
make_worker(child_slot);
--remaining_threads_to_start;
}
#if APR_HAS_OTHER_CHILD
}
else if (apr_proc_other_child_read(&pid, status) == 0) {
/* handled */
#endif
}
else if (is_graceful) {
/* Great, we've probably just lost a slot in the
* scoreboard. Somehow we don't know about this
* child.
*/
ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ap_server_conf,
"long lost child came home! (pid %ld)", pid.pid);
}
/* Don't perform idle maintenance when a child dies,
* only do it when there's a timeout. Remember only a
* finite number of children can die, and it's pretty
* pathological for a lot to die suddenly.
*/
continue;
}
else if (remaining_threads_to_start) {
/* we hit a 1 second timeout in which none of the previous
* generation of children needed to be reaped... so assume
* they're all done, and pick up the slack if any is left.
*/
startup_threads(remaining_threads_to_start);
remaining_threads_to_start = 0;
/* In any event we really shouldn't do the code below because
* few of the servers we just started are in the IDLE state
* yet, so we'd mistakenly create an extra server.
*/
continue;
}
perform_idle_server_maintenance();
}
}
AP_DECLARE(apr_status_t) ap_mpm_query(int query_code, int *result)
{
switch(query_code){
case AP_MPMQ_MAX_DAEMON_USED:
*result = ap_max_child_assigned;
return APR_SUCCESS;
case AP_MPMQ_IS_THREADED:
*result = AP_MPMQ_DYNAMIC;
return APR_SUCCESS;
case AP_MPMQ_IS_FORKED:
*result = AP_MPMQ_NOT_SUPPORTED;
return APR_SUCCESS;
case AP_MPMQ_HARD_LIMIT_DAEMONS:
*result = HARD_SERVER_LIMIT;
return APR_SUCCESS;
case AP_MPMQ_HARD_LIMIT_THREADS:
*result = HARD_THREAD_LIMIT;
return APR_SUCCESS;
case AP_MPMQ_MAX_THREADS:
*result = HARD_THREAD_LIMIT;
return APR_SUCCESS;
case AP_MPMQ_MIN_SPARE_DAEMONS:
*result = 0;
return APR_SUCCESS;
case AP_MPMQ_MIN_SPARE_THREADS:
*result = max_spare_threads;
return APR_SUCCESS;
case AP_MPMQ_MAX_SPARE_DAEMONS:
*result = 0;
return APR_SUCCESS;
case AP_MPMQ_MAX_SPARE_THREADS:
*result = min_spare_threads;
return APR_SUCCESS;
case AP_MPMQ_MAX_REQUESTS_DAEMON:
*result = ap_max_requests_per_thread;
return APR_SUCCESS;
case AP_MPMQ_MAX_DAEMONS:
*result = HARD_SERVER_LIMIT;
return APR_SUCCESS;
case AP_MPMQ_MPM_STATE:
*result = mpm_state;
return APR_SUCCESS;
}
return APR_ENOTIMPL;
}
int ap_mpm_run(apr_pool_t *_pconf, apr_pool_t *plog, server_rec *s)
{
int remaining_threads_to_start, i,j;
apr_status_t rv;
ap_listen_rec *lr;
pconf = _pconf;
ap_server_conf = s;
/* Increase the available pool of fd's. This code from
* Joe Kloss <joek@be.com>
*/
if( FD_SETSIZE > 128 && (i = _kset_fd_limit_( 128 )) < 0 ){
ap_log_error(APLOG_MARK, APLOG_ERR, i, s,
"could not set FD_SETSIZE (_kset_fd_limit_ failed)");
}
/* BeOS R5 doesn't support pipes on select() calls, so we use a
UDP socket as these are supported in both R5 and BONE. If we only cared
about BONE we'd use a pipe, but there it is.
As we have UDP support in APR, now use the APR functions and check all the
return values...
*/
if (apr_sockaddr_info_get(&udp_sa, "127.0.0.1", APR_UNSPEC, 7772, 0, _pconf)
!= APR_SUCCESS){
ap_log_error(APLOG_MARK, APLOG_ALERT, errno, s,
"couldn't create control socket information, shutting down");
return 1;
}
if (apr_socket_create(&udp_sock, udp_sa->family, SOCK_DGRAM,
_pconf) != APR_SUCCESS){
ap_log_error(APLOG_MARK, APLOG_ALERT, errno, s,
"couldn't create control socket, shutting down");
return 1;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -