📄 event.c
字号:
if (errno == EBADF && _st_kq_data->pid != getpid()) { /* We probably forked, reinitialize kqueue */ if ((_st_kq_data->kq = kqueue()) < 0) { /* There is nothing we can do here, will retry later */ return; } fcntl(_st_kq_data->kq, F_SETFD, FD_CLOEXEC); _st_kq_data->pid = getpid(); /* Re-register all descriptors on ioq with new kqueue */ memset(_st_kq_data->fd_data, 0, _st_kq_data->fd_data_size * sizeof(_kq_fd_data_t)); for (q = _ST_IOQ.next; q != &_ST_IOQ; q = q->next) { pq = _ST_POLLQUEUE_PTR(q); _st_kq_pollset_add(pq->pds, pq->npds); } goto retry_kevent; } }}int _st_kq_fd_new(int osfd){ if (osfd >= _st_kq_data->fd_data_size && _st_kq_fd_data_expand(osfd) < 0) return -1; return 0;}int _st_kq_fd_close(int osfd){ if (_ST_KQ_READ_CNT(osfd) || _ST_KQ_WRITE_CNT(osfd)) { errno = EBUSY; return -1; } return 0;}int _st_kq_fd_getlimit(void){ /* zero means no specific limit */ return 0;}static _st_eventsys_t _st_kq_eventsys = { "kqueue", ST_EVENTSYS_ALT, _st_kq_init, _st_kq_dispatch, _st_kq_pollset_add, _st_kq_pollset_del, _st_kq_fd_new, _st_kq_fd_close, _st_kq_fd_getlimit};#endif /* MD_HAVE_KQUEUE */#ifdef MD_HAVE_EPOLL/***************************************** * epoll event system */int _st_epoll_init(void){ int fdlim; int err = 0; int rv = 0; _st_epoll_data = (struct _st_epolldata *) calloc(1, sizeof(*_st_epoll_data)); if (!_st_epoll_data) return -1; fdlim = st_getfdlimit(); _st_epoll_data->fd_hint = (fdlim > 0 && fdlim < ST_EPOLL_EVTLIST_SIZE) ? fdlim : ST_EPOLL_EVTLIST_SIZE; if ((_st_epoll_data->epfd = epoll_create(_st_epoll_data->fd_hint)) < 0) { err = errno; rv = -1; goto cleanup_epoll; } fcntl(_st_epoll_data->epfd, F_SETFD, FD_CLOEXEC); _st_epoll_data->pid = getpid(); /* Allocate file descriptor data array */ _st_epoll_data->fd_data_size = _st_epoll_data->fd_hint; _st_epoll_data->fd_data = (_epoll_fd_data_t *)calloc(_st_epoll_data->fd_data_size, sizeof(_epoll_fd_data_t)); if (!_st_epoll_data->fd_data) { err = errno; rv = -1; goto cleanup_epoll; } /* Allocate event lists */ _st_epoll_data->evtlist_size = _st_epoll_data->fd_hint; _st_epoll_data->evtlist = (struct epoll_event *)malloc(_st_epoll_data->evtlist_size * sizeof(struct epoll_event)); if (!_st_epoll_data->evtlist) { err = errno; rv = -1; } cleanup_epoll: if (rv < 0) { if (_st_epoll_data->epfd >= 0) close(_st_epoll_data->epfd); free(_st_epoll_data->fd_data); free(_st_epoll_data->evtlist); free(_st_epoll_data); _st_epoll_data = NULL; errno = err; } return rv;}int _st_epoll_fd_data_expand(int maxfd){ _epoll_fd_data_t *ptr; int n = _st_epoll_data->fd_data_size; while (maxfd >= n) n <<= 1; ptr = (_epoll_fd_data_t *)realloc(_st_epoll_data->fd_data, n * sizeof(_epoll_fd_data_t)); if (!ptr) return -1; memset(ptr + _st_epoll_data->fd_data_size, 0, (n - _st_epoll_data->fd_data_size) * sizeof(_epoll_fd_data_t)); _st_epoll_data->fd_data = ptr; _st_epoll_data->fd_data_size = n; return 0;}void _st_epoll_evtlist_expand(void){ struct epoll_event *ptr; int n = _st_epoll_data->evtlist_size; while (_st_epoll_data->evtlist_cnt > n) n <<= 1; ptr = (struct epoll_event *)realloc(_st_epoll_data->evtlist, n * sizeof(struct epoll_event)); if (ptr) { _st_epoll_data->evtlist = ptr; _st_epoll_data->evtlist_size = n; }}void _st_epoll_pollset_del(struct pollfd *pds, int npds){ struct epoll_event ev; struct pollfd *pd; struct pollfd *epd = pds + npds; int old_events, events, op; /* * It's more or less OK if deleting fails because a descriptor * will either be closed or deleted in dispatch function after * it fires. */ for (pd = pds; pd < epd; pd++) { old_events = _ST_EPOLL_EVENTS(pd->fd); if (pd->events & POLLIN) _ST_EPOLL_READ_CNT(pd->fd)--; if (pd->events & POLLOUT) _ST_EPOLL_WRITE_CNT(pd->fd)--; if (pd->events & POLLPRI) _ST_EPOLL_EXCEP_CNT(pd->fd)--; events = _ST_EPOLL_EVENTS(pd->fd); /* * The _ST_EPOLL_REVENTS check below is needed so we can use * this function inside dispatch(). Outside of dispatch() * _ST_EPOLL_REVENTS is always zero for all descriptors. */ if (events != old_events && _ST_EPOLL_REVENTS(pd->fd) == 0) { op = events ? EPOLL_CTL_MOD : EPOLL_CTL_DEL; ev.events = events; ev.data.fd = pd->fd; if (epoll_ctl(_st_epoll_data->epfd, op, pd->fd, &ev) == 0 && op == EPOLL_CTL_DEL) { _st_epoll_data->evtlist_cnt--; } } }}int _st_epoll_pollset_add(struct pollfd *pds, int npds){ struct epoll_event ev; int i, fd; int old_events, events, op; /* Do as many checks as possible up front */ for (i = 0; i < npds; i++) { fd = pds[i].fd; if (fd < 0 || !pds[i].events || (pds[i].events & ~(POLLIN | POLLOUT | POLLPRI))) { errno = EINVAL; return -1; } if (fd >= _st_epoll_data->fd_data_size && _st_epoll_fd_data_expand(fd) < 0) return -1; } for (i = 0; i < npds; i++) { fd = pds[i].fd; old_events = _ST_EPOLL_EVENTS(fd); if (pds[i].events & POLLIN) _ST_EPOLL_READ_CNT(fd)++; if (pds[i].events & POLLOUT) _ST_EPOLL_WRITE_CNT(fd)++; if (pds[i].events & POLLPRI) _ST_EPOLL_EXCEP_CNT(fd)++; events = _ST_EPOLL_EVENTS(fd); if (events != old_events) { op = old_events ? EPOLL_CTL_MOD : EPOLL_CTL_ADD; ev.events = events; ev.data.fd = fd; if (epoll_ctl(_st_epoll_data->epfd, op, fd, &ev) < 0 && (op != EPOLL_CTL_ADD || errno != EEXIST)) break; if (op == EPOLL_CTL_ADD) { _st_epoll_data->evtlist_cnt++; if (_st_epoll_data->evtlist_cnt > _st_epoll_data->evtlist_size) _st_epoll_evtlist_expand(); } } } if (i < npds) { /* Error */ int err = errno; /* Unroll the state */ _st_epoll_pollset_del(pds, i + 1); errno = err; return -1; } return 0;}void _st_epoll_dispatch(void){ st_utime_t min_timeout; _st_clist_t *q; _st_pollq_t *pq; struct pollfd *pds, *epds; struct epoll_event ev; int timeout, nfd, i, osfd, notify; int events, op; short revents; if (_ST_SLEEPQ == NULL) { timeout = -1; } else { min_timeout = (_ST_SLEEPQ->due <= _ST_LAST_CLOCK) ? 0 : (_ST_SLEEPQ->due - _ST_LAST_CLOCK); timeout = (int) (min_timeout / 1000); } if (_st_epoll_data->pid != getpid()) { /* We probably forked, reinitialize epoll set */ close(_st_epoll_data->epfd); _st_epoll_data->epfd = epoll_create(_st_epoll_data->fd_hint); if (_st_epoll_data->epfd < 0) { /* There is nothing we can do here, will retry later */ return; } fcntl(_st_epoll_data->epfd, F_SETFD, FD_CLOEXEC); _st_epoll_data->pid = getpid(); /* Put all descriptors on ioq into new epoll set */ memset(_st_epoll_data->fd_data, 0, _st_epoll_data->fd_data_size * sizeof(_epoll_fd_data_t)); _st_epoll_data->evtlist_cnt = 0; for (q = _ST_IOQ.next; q != &_ST_IOQ; q = q->next) { pq = _ST_POLLQUEUE_PTR(q); _st_epoll_pollset_add(pq->pds, pq->npds); } } /* Check for I/O operations */ nfd = epoll_wait(_st_epoll_data->epfd, _st_epoll_data->evtlist, _st_epoll_data->evtlist_size, timeout); if (nfd > 0) { for (i = 0; i < nfd; i++) { osfd = _st_epoll_data->evtlist[i].data.fd; _ST_EPOLL_REVENTS(osfd) = _st_epoll_data->evtlist[i].events; if (_ST_EPOLL_REVENTS(osfd) & (EPOLLERR | EPOLLHUP)) { /* Also set I/O bits on error */ _ST_EPOLL_REVENTS(osfd) |= _ST_EPOLL_EVENTS(osfd); } } for (q = _ST_IOQ.next; q != &_ST_IOQ; q = q->next) { pq = _ST_POLLQUEUE_PTR(q); notify = 0; epds = pq->pds + pq->npds; for (pds = pq->pds; pds < epds; pds++) { if (_ST_EPOLL_REVENTS(pds->fd) == 0) { pds->revents = 0; continue; } osfd = pds->fd; events = pds->events; revents = 0; if ((events & POLLIN) && (_ST_EPOLL_REVENTS(osfd) & EPOLLIN)) revents |= POLLIN; if ((events & POLLOUT) && (_ST_EPOLL_REVENTS(osfd) & EPOLLOUT)) revents |= POLLOUT; if ((events & POLLPRI) && (_ST_EPOLL_REVENTS(osfd) & EPOLLPRI)) revents |= POLLPRI; if (_ST_EPOLL_REVENTS(osfd) & EPOLLERR) revents |= POLLERR; if (_ST_EPOLL_REVENTS(osfd) & EPOLLHUP) revents |= POLLHUP; pds->revents = revents; if (revents) { notify = 1; } } if (notify) { ST_REMOVE_LINK(&pq->links); pq->on_ioq = 0; /* * Here we will only delete/modify descriptors that * didn't fire (see comments in _st_epoll_pollset_del()). */ _st_epoll_pollset_del(pq->pds, pq->npds); if (pq->thread->flags & _ST_FL_ON_SLEEPQ) _ST_DEL_SLEEPQ(pq->thread); pq->thread->state = _ST_ST_RUNNABLE; _ST_ADD_RUNQ(pq->thread); } } for (i = 0; i < nfd; i++) { /* Delete/modify descriptors that fired */ osfd = _st_epoll_data->evtlist[i].data.fd; _ST_EPOLL_REVENTS(osfd) = 0; events = _ST_EPOLL_EVENTS(osfd); op = events ? EPOLL_CTL_MOD : EPOLL_CTL_DEL; ev.events = events; ev.data.fd = osfd; if (epoll_ctl(_st_epoll_data->epfd, op, osfd, &ev) == 0 && op == EPOLL_CTL_DEL) { _st_epoll_data->evtlist_cnt--; } } }}int _st_epoll_fd_new(int osfd){ if (osfd >= _st_epoll_data->fd_data_size && _st_epoll_fd_data_expand(osfd) < 0) return -1; return 0; }int _st_epoll_fd_close(int osfd){ if (_ST_EPOLL_READ_CNT(osfd) || _ST_EPOLL_WRITE_CNT(osfd) || _ST_EPOLL_EXCEP_CNT(osfd)) { errno = EBUSY; return -1; } return 0;}int _st_epoll_fd_getlimit(void){ /* zero means no specific limit */ return 0;}/* * Check if epoll functions are just stubs. */int _st_epoll_is_supported(void){ struct epoll_event ev; ev.events = EPOLLIN; ev.data.ptr = NULL; /* Guaranteed to fail */ epoll_ctl(-1, EPOLL_CTL_ADD, -1, &ev); return (errno != ENOSYS);}static _st_eventsys_t _st_epoll_eventsys = { "epoll", ST_EVENTSYS_ALT, _st_epoll_init, _st_epoll_dispatch, _st_epoll_pollset_add, _st_epoll_pollset_del, _st_epoll_fd_new, _st_epoll_fd_close, _st_epoll_fd_getlimit};#endif /* MD_HAVE_EPOLL *//***************************************** * Public functions */int st_set_eventsys(int eventsys){ if (_st_eventsys) { errno = EBUSY; return -1; } switch (eventsys) { case ST_EVENTSYS_DEFAULT:#ifdef USE_POLL _st_eventsys = &_st_poll_eventsys;#else _st_eventsys = &_st_select_eventsys;#endif break; case ST_EVENTSYS_SELECT: _st_eventsys = &_st_select_eventsys; break;#ifdef MD_HAVE_POLL case ST_EVENTSYS_POLL: _st_eventsys = &_st_poll_eventsys; break;#endif case ST_EVENTSYS_ALT:#if defined (MD_HAVE_KQUEUE) _st_eventsys = &_st_kq_eventsys;#elif defined (MD_HAVE_EPOLL) if (_st_epoll_is_supported()) _st_eventsys = &_st_epoll_eventsys;#endif break; default: errno = EINVAL; return -1; } return 0;}int st_get_eventsys(void){ return _st_eventsys ? _st_eventsys->val : -1;}const char *st_get_eventsys_name(void){ return _st_eventsys ? _st_eventsys->name : "";}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -