📄 os_unix.c
字号:
return resultSock; } else { /* * Most likely (errno == ENOENT || errno == ECONNREFUSED) * and no FCGI application server is running. */ close(resultSock); return -1; }}/* *-------------------------------------------------------------- * * OS_Read -- * * Pass through to the unix read function. * * Results: * Returns number of byes read, 0, or -1 failure: errno * contains actual error. * * Side effects: * None. * *-------------------------------------------------------------- */int OS_Read(int fd, char * buf, size_t len){ if (shutdownNow) return -1; return(read(fd, buf, len));}/* *-------------------------------------------------------------- * * OS_Write -- * * Pass through to unix write function. * * Results: * Returns number of byes read, 0, or -1 failure: errno * contains actual error. * * Side effects: * none. * *-------------------------------------------------------------- */int OS_Write(int fd, char * buf, size_t len){ if (shutdownNow) return -1; return(write(fd, buf, len));}/* *---------------------------------------------------------------------- * * OS_SpawnChild -- * * Spawns a new FastCGI listener process. * * Results: * 0 if success, -1 if error. * * Side effects: * Child process spawned. * *---------------------------------------------------------------------- */int OS_SpawnChild(char *appPath, int listenFd){ int forkResult; forkResult = fork(); if(forkResult < 0) { return -1; } if(forkResult == 0) { /* * Close STDIN unconditionally. It's used by the parent * process for CGI communication. The FastCGI applciation * will be replacing this with the FastCGI listenFd IF * STDIN_FILENO is the same as FCGI_LISTENSOCK_FILENO * (which it is on Unix). Regardless, STDIN, STDOUT, and * STDERR will be closed as the FastCGI process uses a * multiplexed socket in their place. */ close(STDIN_FILENO); /* * If the listenFd is already the value of FCGI_LISTENSOCK_FILENO * we're set. If not, change it so the child knows where to * get the listen socket from. */ if(listenFd != FCGI_LISTENSOCK_FILENO) { dup2(listenFd, FCGI_LISTENSOCK_FILENO); close(listenFd); } close(STDOUT_FILENO); close(STDERR_FILENO); /* * We're a child. Exec the application. * * XXX: entire environment passes through */ execl(appPath, appPath, NULL); /* * XXX: Can't do this as we've already closed STDERR!!! * * perror("exec"); */ return -1; } return 0;}/* *-------------------------------------------------------------- * * OS_AsyncReadStdin -- * * This initiates an asynchronous read on the standard * input handle. * * The abstraction is necessary because Windows NT does not * have a clean way of "select"ing a file descriptor for * I/O. * * Results: * -1 if error, 0 otherwise. * * Side effects: * Asynchronous bit is set in the readfd variable and * request is enqueued. * *-------------------------------------------------------------- */int OS_AsyncReadStdin(void *buf, int len, OS_AsyncProc procPtr, ClientData clientData){ int index = AIO_RD_IX(STDIN_FILENO); asyncIoInUse = TRUE; ASSERT(asyncIoTable[index].inUse == 0); asyncIoTable[index].procPtr = procPtr; asyncIoTable[index].clientData = clientData; asyncIoTable[index].fd = STDIN_FILENO; asyncIoTable[index].len = len; asyncIoTable[index].offset = 0; asyncIoTable[index].buf = buf; asyncIoTable[index].inUse = 1; FD_SET(STDIN_FILENO, &readFdSet); if(STDIN_FILENO > maxFd) maxFd = STDIN_FILENO; return 0;}static void GrowAsyncTable(void){ int oldTableSize = asyncIoTableSize; asyncIoTableSize = asyncIoTableSize * 2; asyncIoTable = (AioInfo *)realloc(asyncIoTable, asyncIoTableSize * sizeof(AioInfo)); if(asyncIoTable == NULL) { errno = ENOMEM; exit(errno); } memset((char *) &asyncIoTable[oldTableSize], 0, oldTableSize * sizeof(AioInfo));}/* *-------------------------------------------------------------- * * OS_AsyncRead -- * * This initiates an asynchronous read on the file * handle which may be a socket or named pipe. * * We also must save the ProcPtr and ClientData, so later * when the io completes, we know who to call. * * We don't look at any results here (the ReadFile may * return data if it is cached) but do all completion * processing in OS_Select when we get the io completion * port done notifications. Then we call the callback. * * Results: * -1 if error, 0 otherwise. * * Side effects: * Asynchronous I/O operation is queued for completion. * *-------------------------------------------------------------- */int OS_AsyncRead(int fd, int offset, void *buf, int len, OS_AsyncProc procPtr, ClientData clientData){ int index = AIO_RD_IX(fd); ASSERT(asyncIoTable != NULL); asyncIoInUse = TRUE; if(fd > maxFd) maxFd = fd; while (index >= asyncIoTableSize) { GrowAsyncTable(); } ASSERT(asyncIoTable[index].inUse == 0); asyncIoTable[index].procPtr = procPtr; asyncIoTable[index].clientData = clientData; asyncIoTable[index].fd = fd; asyncIoTable[index].len = len; asyncIoTable[index].offset = offset; asyncIoTable[index].buf = buf; asyncIoTable[index].inUse = 1; FD_SET(fd, &readFdSet); return 0;}/* *-------------------------------------------------------------- * * OS_AsyncWrite -- * * This initiates an asynchronous write on the "fake" file * descriptor (which may be a file, socket, or named pipe). * We also must save the ProcPtr and ClientData, so later * when the io completes, we know who to call. * * We don't look at any results here (the WriteFile generally * completes immediately) but do all completion processing * in OS_DoIo when we get the io completion port done * notifications. Then we call the callback. * * Results: * -1 if error, 0 otherwise. * * Side effects: * Asynchronous I/O operation is queued for completion. * *-------------------------------------------------------------- */int OS_AsyncWrite(int fd, int offset, void *buf, int len, OS_AsyncProc procPtr, ClientData clientData){ int index = AIO_WR_IX(fd); asyncIoInUse = TRUE; if(fd > maxFd) maxFd = fd; while (index >= asyncIoTableSize) { GrowAsyncTable(); } ASSERT(asyncIoTable[index].inUse == 0); asyncIoTable[index].procPtr = procPtr; asyncIoTable[index].clientData = clientData; asyncIoTable[index].fd = fd; asyncIoTable[index].len = len; asyncIoTable[index].offset = offset; asyncIoTable[index].buf = buf; asyncIoTable[index].inUse = 1; FD_SET(fd, &writeFdSet); return 0;}/* *-------------------------------------------------------------- * * OS_Close -- * * Closes the descriptor. This is a pass through to the * Unix close. * * Results: * 0 for success, -1 on failure * * Side effects: * None. * *-------------------------------------------------------------- */int OS_Close(int fd, int shutdown_ok){ if (fd == -1) return 0; if (asyncIoInUse) { int index = AIO_RD_IX(fd); FD_CLR(fd, &readFdSet); FD_CLR(fd, &readFdSetPost); if (asyncIoTable[index].inUse != 0) { asyncIoTable[index].inUse = 0; } FD_CLR(fd, &writeFdSet); FD_CLR(fd, &writeFdSetPost); index = AIO_WR_IX(fd); if (asyncIoTable[index].inUse != 0) { asyncIoTable[index].inUse = 0; } if (maxFd == fd) { maxFd--; } } /* * shutdown() the send side and then read() from client until EOF * or a timeout expires. This is done to minimize the potential * that a TCP RST will be sent by our TCP stack in response to * receipt of additional data from the client. The RST would * cause the client to discard potentially useful response data. */ if (shutdown_ok) { if (shutdown(fd, 1) == 0) { struct timeval tv; fd_set rfds; int rv; char trash[1024]; FD_ZERO(&rfds); do { FD_SET(fd, &rfds); tv.tv_sec = 2; tv.tv_usec = 0; rv = select(fd + 1, &rfds, NULL, NULL, &tv); } while (rv > 0 && read(fd, trash, sizeof(trash)) > 0); } } return close(fd);}/* *-------------------------------------------------------------- * * OS_CloseRead -- * * Cancel outstanding asynchronous reads and prevent subsequent * reads from completing. * * Results: * Socket or file is shutdown. Return values mimic Unix shutdown: * 0 success, -1 failure * *-------------------------------------------------------------- */int OS_CloseRead(int fd){ if(asyncIoTable[AIO_RD_IX(fd)].inUse != 0) { asyncIoTable[AIO_RD_IX(fd)].inUse = 0; FD_CLR(fd, &readFdSet); } return shutdown(fd, 0);}/* *-------------------------------------------------------------- * * OS_DoIo -- * * This function was formerly OS_Select. It's purpose is * to pull I/O completion events off the queue and dispatch * them to the appropriate place. * * Results: * Returns 0. * * Side effects: * Handlers are called. * *-------------------------------------------------------------- */int OS_DoIo(struct timeval *tmo){ int fd, len, selectStatus; OS_AsyncProc procPtr; ClientData clientData; AioInfo *aioPtr; fd_set readFdSetCpy; fd_set writeFdSetCpy; asyncIoInUse = TRUE; FD_ZERO(&readFdSetCpy); FD_ZERO(&writeFdSetCpy); for(fd = 0; fd <= maxFd; fd++) { if(FD_ISSET(fd, &readFdSet)) { FD_SET(fd, &readFdSetCpy); } if(FD_ISSET(fd, &writeFdSet)) { FD_SET(fd, &writeFdSetCpy); } } /* * If there were no completed events from a prior call, see if there's * any work to do. */ if(numRdPosted == 0 && numWrPosted == 0) { selectStatus = select((maxFd+1), &readFdSetCpy, &writeFdSetCpy, NULL, tmo); if(selectStatus < 0) { /*exit(errno);*/ /* not sure what's best to do here */ return -1; } for(fd = 0; fd <= maxFd; fd++) { /* * Build up a list of completed events. We'll work off of * this list as opposed to looping through the read and write * fd sets since they can be affected by a callbacl routine. */ if(FD_ISSET(fd, &readFdSetCpy)) { numRdPosted++;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -