📄 ngx_process_cycle.c
字号:
"setuid(%d) failed", ccf->user); /* fatal */ exit(2); } }#if (NGX_HAVE_SCHED_SETAFFINITY) if (cpu_affinity) { ngx_log_error(NGX_LOG_NOTICE, cycle->log, 0, "sched_setaffinity(0x%08Xl)", cpu_affinity); if (sched_setaffinity(0, 32, (cpu_set_t *) &cpu_affinity) == -1) { ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, "sched_setaffinity(0x%08Xl) failed", cpu_affinity); } }#endif#if (NGX_HAVE_PR_SET_DUMPABLE) /* allow coredump after setuid() in Linux 2.4.x */ if (prctl(PR_SET_DUMPABLE, 1, 0, 0, 0) == -1) { ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, "prctl(PR_SET_DUMPABLE) failed"); }#endif if (ccf->working_directory.len) { if (chdir((char *) ccf->working_directory.data) == -1) { ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, "chdir(\"%s\") failed", ccf->working_directory.data); /* fatal */ exit(2); } } sigemptyset(&set); if (sigprocmask(SIG_SETMASK, &set, NULL) == -1) { ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, "sigprocmask() failed"); } ngx_init_temp_number(); /* * disable deleting previous events for the listening sockets because * in the worker processes there are no events at all at this point */ ls = cycle->listening.elts; for (i = 0; i < cycle->listening.nelts; i++) { ls[i].previous = NULL; } for (i = 0; ngx_modules[i]; i++) { if (ngx_modules[i]->init_process) { if (ngx_modules[i]->init_process(cycle) == NGX_ERROR) { /* fatal */ exit(2); } } } for (n = 0; n < ngx_last_process; n++) { if (ngx_processes[n].pid == -1) { continue; } if (n == ngx_process_slot) { continue; } if (ngx_processes[n].channel[1] == -1) { continue; } if (close(ngx_processes[n].channel[1]) == -1) { ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, "close() channel failed"); } } if (close(ngx_processes[ngx_process_slot].channel[0]) == -1) { ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, "close() channel failed"); }#if 0 ngx_last_process = 0;#endif if (ngx_add_channel_event(cycle, ngx_channel, NGX_READ_EVENT, ngx_channel_handler) == NGX_ERROR) { /* fatal */ exit(2); }}static voidngx_worker_process_exit(ngx_cycle_t *cycle){ ngx_uint_t i; ngx_connection_t *c;#if (NGX_THREADS) ngx_terminate = 1; ngx_wakeup_worker_threads(cycle);#endif for (i = 0; ngx_modules[i]; i++) { if (ngx_modules[i]->exit_process) { ngx_modules[i]->exit_process(cycle); } } if (ngx_exiting) { c = cycle->connections; for (i = 0; i < cycle->connection_n; i++) { if (c[i].fd != -1 && c[i].read && !c[i].read->accept && !c[i].read->channel && !c[i].read->resolver) { ngx_log_error(NGX_LOG_ALERT, cycle->log, 0, "open socket #%d left in %ui connection %s", c[i].fd, i, ngx_debug_quit ? ", aborting" : ""); ngx_debug_point(); } } if (ngx_debug_quit) { ngx_debug_point(); } } /* * Copy ngx_cycle->log related data to the special static exit cycle, * log, and log file structures enough to allow a signal handler to log. * The handler may be called when standard ngx_cycle->log allocated from * ngx_cycle->pool is already destroyed. */ ngx_exit_log_file.fd = ngx_cycle->log->file->fd; ngx_exit_log = *ngx_cycle->log; ngx_exit_log.file = &ngx_exit_log_file; ngx_exit_cycle.log = &ngx_exit_log; ngx_cycle = &ngx_exit_cycle; ngx_destroy_pool(cycle->pool); ngx_log_error(NGX_LOG_NOTICE, ngx_cycle->log, 0, "exit"); exit(0);}static voidngx_channel_handler(ngx_event_t *ev){ ngx_int_t n; ngx_channel_t ch; ngx_connection_t *c; if (ev->timedout) { ev->timedout = 0; return; } c = ev->data; ngx_log_debug0(NGX_LOG_DEBUG_CORE, ev->log, 0, "channel handler"); for ( ;; ) { n = ngx_read_channel(c->fd, &ch, sizeof(ngx_channel_t), ev->log); ngx_log_debug1(NGX_LOG_DEBUG_CORE, ev->log, 0, "channel: %i", n); if (n == NGX_ERROR) { if (ngx_event_flags & NGX_USE_EPOLL_EVENT) { ngx_del_conn(c, 0); } ngx_close_connection(c); return; } if (ngx_event_flags & NGX_USE_EVENTPORT_EVENT) { if (ngx_add_event(ev, NGX_READ_EVENT, 0) == NGX_ERROR) { return; } } if (n == NGX_AGAIN) { return; } ngx_log_debug1(NGX_LOG_DEBUG_CORE, ev->log, 0, "channel command: %d", ch.command); switch (ch.command) { case NGX_CMD_QUIT: ngx_quit = 1; break; case NGX_CMD_TERMINATE: ngx_terminate = 1; break; case NGX_CMD_REOPEN: ngx_reopen = 1; break; case NGX_CMD_OPEN_CHANNEL: ngx_log_debug3(NGX_LOG_DEBUG_CORE, ev->log, 0, "get channel s:%i pid:%P fd:%d", ch.slot, ch.pid, ch.fd); ngx_processes[ch.slot].pid = ch.pid; ngx_processes[ch.slot].channel[0] = ch.fd; break; case NGX_CMD_CLOSE_CHANNEL: ngx_log_debug4(NGX_LOG_DEBUG_CORE, ev->log, 0, "close channel s:%i pid:%P our:%P fd:%d", ch.slot, ch.pid, ngx_processes[ch.slot].pid, ngx_processes[ch.slot].channel[0]); if (close(ngx_processes[ch.slot].channel[0]) == -1) { ngx_log_error(NGX_LOG_ALERT, ev->log, ngx_errno, "close() channel failed"); } ngx_processes[ch.slot].channel[0] = -1; break; } }}#if (NGX_THREADS)static voidngx_wakeup_worker_threads(ngx_cycle_t *cycle){ ngx_int_t i; ngx_uint_t live; for ( ;; ) { live = 0; for (i = 0; i < ngx_threads_n; i++) { if (ngx_threads[i].state < NGX_THREAD_EXIT) { if (ngx_cond_signal(ngx_threads[i].cv) == NGX_ERROR) { ngx_threads[i].state = NGX_THREAD_DONE; } else { live = 1; } } if (ngx_threads[i].state == NGX_THREAD_EXIT) { ngx_thread_join(ngx_threads[i].tid, NULL); ngx_threads[i].state = NGX_THREAD_DONE; } } if (live == 0) { ngx_log_debug0(NGX_LOG_DEBUG_CORE, cycle->log, 0, "all worker threads are joined"); /* STUB */ ngx_done_events(cycle); ngx_mutex_destroy(ngx_event_timer_mutex); ngx_mutex_destroy(ngx_posted_events_mutex); return; } ngx_sched_yield(); }}static ngx_thread_value_tngx_worker_thread_cycle(void *data){ ngx_thread_t *thr = data; sigset_t set; ngx_err_t err; ngx_core_tls_t *tls; ngx_cycle_t *cycle; cycle = (ngx_cycle_t *) ngx_cycle; sigemptyset(&set); sigaddset(&set, ngx_signal_value(NGX_RECONFIGURE_SIGNAL)); sigaddset(&set, ngx_signal_value(NGX_REOPEN_SIGNAL)); sigaddset(&set, ngx_signal_value(NGX_CHANGEBIN_SIGNAL)); err = ngx_thread_sigmask(SIG_BLOCK, &set, NULL); if (err) { ngx_log_error(NGX_LOG_ALERT, cycle->log, err, ngx_thread_sigmask_n " failed"); return (ngx_thread_value_t) 1; } ngx_log_debug1(NGX_LOG_DEBUG_CORE, cycle->log, 0, "thread " NGX_TID_T_FMT " started", ngx_thread_self()); ngx_setthrtitle("worker thread"); tls = ngx_calloc(sizeof(ngx_core_tls_t), cycle->log); if (tls == NULL) { return (ngx_thread_value_t) 1; } err = ngx_thread_set_tls(ngx_core_tls_key, tls); if (err != 0) { ngx_log_error(NGX_LOG_ALERT, cycle->log, err, ngx_thread_set_tls_n " failed"); return (ngx_thread_value_t) 1; } ngx_mutex_lock(ngx_posted_events_mutex); for ( ;; ) { thr->state = NGX_THREAD_FREE; if (ngx_cond_wait(thr->cv, ngx_posted_events_mutex) == NGX_ERROR) { return (ngx_thread_value_t) 1; } if (ngx_terminate) { thr->state = NGX_THREAD_EXIT; ngx_mutex_unlock(ngx_posted_events_mutex); ngx_log_debug1(NGX_LOG_DEBUG_CORE, cycle->log, 0, "thread " NGX_TID_T_FMT " is done", ngx_thread_self()); return (ngx_thread_value_t) 0; } thr->state = NGX_THREAD_BUSY; if (ngx_event_thread_process_posted(cycle) == NGX_ERROR) { return (ngx_thread_value_t) 1; } if (ngx_event_thread_process_posted(cycle) == NGX_ERROR) { return (ngx_thread_value_t) 1; } if (ngx_process_changes) { if (ngx_process_changes(cycle, 1) == NGX_ERROR) { return (ngx_thread_value_t) 1; } } }}#endif#if 0static voidngx_garbage_collector_cycle(ngx_cycle_t *cycle, void *data){ ngx_uint_t i; ngx_gc_t ctx; ngx_path_t **path; ngx_event_t *ev; ngx_worker_process_init(cycle, 0); ev = &cycle->read_events0[ngx_channel]; ngx_accept_mutex = NULL; ngx_setproctitle("garbage collector");#if 0 ngx_add_timer(ev, 60 * 1000);#endif for ( ;; ) { if (ngx_terminate || ngx_quit) { ngx_log_error(NGX_LOG_NOTICE, cycle->log, 0, "exiting"); exit(0); } if (ngx_reopen) { ngx_reopen = 0; ngx_log_error(NGX_LOG_NOTICE, cycle->log, 0, "reopening logs"); ngx_reopen_files(cycle, -1); } path = cycle->pathes.elts; for (i = 0; i < cycle->pathes.nelts; i++) { ctx.path = path[i]; ctx.log = cycle->log; ctx.handler = path[i]->cleaner; ngx_collect_garbage(&ctx, &path[i]->name, 0); } ngx_add_timer(ev, 60 * 60 * 1000); ngx_process_events_and_timers(cycle); }}#endif
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -