⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 uipc_sock.c

📁 VXWORKS源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
	struct mbuf *nam;{	int s;	int error;#ifdef WV_INSTRUMENTATION#ifdef INCLUDE_WVNET    /* WV_NET_VERBOSE event */    WV_NET_MARKER_1 (NET_AUX_EVENT, WV_NET_VERBOSE, 52, 18,                     WV_NETEVENT_SOCONNECT_START, so->so_fd)#endif  /* INCLUDE_WVNET */#endif	if (so->so_options & SO_ACCEPTCONN)            {#ifdef WV_INSTRUMENTATION#ifdef INCLUDE_WVNET    /* WV_NET_ERROR event */            WV_NET_MARKER_1 (NET_AUX_EVENT, WV_NET_ERROR, 45, 8,                             WV_NETEVENT_SOCONNECT_BADSOCK, so->so_fd)#endif  /* INCLUDE_WVNET */#endif            return (EOPNOTSUPP);            }	s = splnet();	/*	 * If protocol is connection-based, can only connect once.	 * Otherwise, if connected, try to disconnect first.	 * This allows user to disconnect by connecting to, e.g.,	 * a null address.	 */	if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&	    ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||	    (error = sodisconnect(so))))		error = EISCONN;	else		error = (*so->so_proto->pr_usrreq)(so, PRU_CONNECT,		    (struct mbuf *)0, nam, (struct mbuf *)0);	splx(s);	return (error);}intsoconnect2(so1, so2)	register struct socket *so1;	struct socket *so2;{	int s = splnet();	int error;/* * XXX - This event cannot currently occur: the socket operation which uses *       the soconnect2() routine is not supported by VxWorks#ifdef WV_INSTRUMENTATION#ifdef INCLUDE_WVNET    /@ WV_NET_VERBOSE event @/    WV_NET_MARKER_2 (NET_AUX_EVENT, WV_NET_VERBOSE, 53, 19,                     WV_NETEVENT_SOCONNECT2_START, so1->so_fd, so2->so_fd)#endif  /@ INCLUDE_WVNET @/#endif * XXX - end of unused event */	error = (*so1->so_proto->pr_usrreq)(so1, PRU_CONNECT2,	    (struct mbuf *)0, (struct mbuf *)so2, (struct mbuf *)0);	splx(s);	return (error);}intsodisconnect(so)	register struct socket *so;{	int s = splnet();	int error;	if ((so->so_state & SS_ISCONNECTED) == 0) {		error = ENOTCONN;		goto bad;	}	if (so->so_state & SS_ISDISCONNECTING) {		error = EALREADY;		goto bad;	}	error = (*so->so_proto->pr_usrreq)(so, PRU_DISCONNECT,	    (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0);bad:	splx(s);#ifdef WV_INSTRUMENTATION#ifdef INCLUDE_WVNET    /* WV_NET_ERROR event */    WV_NET_MARKER_2 (NET_AUX_EVENT, WV_NET_ERROR, 46, 9,                     WV_NETEVENT_SODISCONN_STATUS, so->so_fd, error)#endif  /* INCLUDE_WVNET */#endif	return (error);}#define	SBLOCKWAIT(f)	(((f) & MSG_DONTWAIT) ? M_DONTWAIT : M_WAIT)/* * Send on a socket. * If send must go all at once and message is larger than * send buffering, then hard error. * Lock against other senders. * If must go all at once and not enough room now, then * inform user that this would block and do nothing. * Otherwise, if nonblocking, send as much as possible. * The data to be sent is described by "uio" if nonzero, * otherwise by the mbuf chain "top" (which must be null * if uio is not).  Data provided in mbuf chain must be small * enough to send all at once. * * Returns nonzero on error, timeout or signal; callers * must check for short counts if EINTR/ERESTART are returned. * Data and control buffers are freed on return. * * WRS mods removed sblock and sbunlock and replaced with splnet and splx * which should serve the purpose. * Removed null check of uio so that blocking can be implemented for zbufs * also.  * CAVEAT: the zbuf length cannot be more than the socket high water mark. * The user should implement his flow control.  * So this call would block only if zbuf length is bigger the space available * in the socket buffer and less than the socket higt water mark.  * Added a flag canWait which is set to M_DONTWAIT if the socket option SS_NBIO * is set. This prevents blocking if the user chose SS_NBIO and for some reason * if the system runs out of mbufs. canWait defaults to M_WAIT -(vinai). */intsosend(so, addr, uio, top, control, flags)	register struct socket *so;	struct mbuf *addr;	struct uio *uio;	struct mbuf *top;	struct mbuf *control;	int flags;{	struct mbuf **mp;	register struct mbuf *m;	register long space, len, resid;	int clen = 0, error, s, dontroute;	int atomic = sosendallatonce(so) || top;	register int canWait;	int outcount = 0;	if (uio)		resid = uio->uio_resid;	else		resid = top->m_pkthdr.len;	/*	 * In theory resid should be unsigned.	 * However, space must be signed, as it might be less than 0	 * if we over-committed, and we must use a signed comparison	 * of space and resid.  On the other hand, a negative resid	 * causes us to loop sending 0-length segments to the protocol.	 */	if (resid < 0)		return (EINVAL);	dontroute =	    (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&	    (so->so_proto->pr_flags & PR_ATOMIC);	if (control)		clen = control->m_len;	canWait = (so->so_state & SS_NBIO) ? M_DONTWAIT : M_WAIT;#define	snderr(errno)	{ error = errno; splx(s); goto out; }	s = splnet();restart:	do {		if (so->so_state & SS_CANTSENDMORE)			snderr(EPIPE);		if (so->so_error)			snderr(so->so_error);		if ((so->so_state & SS_ISCONNECTED) == 0) {			if (so->so_proto->pr_flags & PR_CONNREQUIRED) {				if ((so->so_state & SS_ISCONFIRMING) == 0 &&				    !(resid == 0 && clen != 0))					snderr(ENOTCONN);			} else if (addr == 0)				snderr(EDESTADDRREQ);		}		space = sbspace(&so->so_snd);		if (flags & MSG_OOB)			space += 1024;		if (atomic && ((resid > so->so_snd.sb_hiwat) ||		    (clen > so->so_snd.sb_hiwat)))			snderr(EMSGSIZE);		if (space < resid + clen && 		    (atomic || space < so->so_snd.sb_lowat || space < clen)) {			if (so->so_state & SS_NBIO)			    {			    if (flags & MSG_MBUF)				top = NULL; /* don't free the zero copy mbuf */			    snderr(EWOULDBLOCK);			    }			sbwait(&so->so_snd);			goto restart;		}		mp = &top;		space -= clen;		do {		    if (uio == NULL) {			/*			 * Data is prepackaged in "top".			 */			resid = 0;			if (flags & MSG_EOR)				top->m_flags |= M_EOR;		    } else do {		        len = min(resid, space);						if (top == 0) {			        m = mBufClGet(canWait, MT_DATA, len + max_hdr,					      FALSE);				if (m == NULL)				    snderr(ENOBUFS);				len = min(len, m->m_extSize);				m->m_flags |= M_PKTHDR;				m->m_pkthdr.len = 0;				m->m_pkthdr.rcvif = (struct ifnet *)0;                                /*                                 * the assumption here is that max_hdr is                                 * always less than the minimum cluster size                                 * available.  Or don't set len, namely use                                 * len which set by min() above.                                 */                                if (atomic && m->m_extSize > max_hdr) {                                	len = min((m->m_extSize - max_hdr),                                                  len);                                        m->m_data += max_hdr;                                }			}			else			    {			    m = mBufClGet(canWait, MT_DATA, len, FALSE);			    if (m == NULL)				snderr(ENOBUFS);			    len = min(len, m->m_extSize);			    }                   			space -= (m->m_extSize + MSIZE);			error = uiomove(mtod(m, caddr_t), (int)len, uio);			resid = uio->uio_resid;			m->m_len = len;			*mp = m;			top->m_pkthdr.len += len;			if (error)				goto release;			mp = &m->m_next;			if (resid <= 0) {				if (flags & MSG_EOR)					top->m_flags |= M_EOR;				break;			}		    } while (space > 0 && atomic);		    if (dontroute)			    so->so_options |= SO_DONTROUTE;                    if (!sosendallatonce (so))			{		        top->m_flags &= ~M_EOB;		        if (((resid == 0) || (space <= 0)) && (outcount > 0))			    top->m_flags |= M_EOB;                        outcount++;                        if (resid)			    top->m_flags &= ~M_EOB;                        }		    error = (*so->so_proto->pr_usrreq)(so,			(flags & MSG_OOB) ? PRU_SENDOOB : PRU_SEND,			top, addr, control);		    if (dontroute)			    so->so_options &= ~SO_DONTROUTE;		    clen = 0;		    control = 0;		    top = 0;		    mp = &top;		    if (error)			goto release;		} while (resid && space > 0);	} while (resid);release:	splx(s);	out:	if (top)		m_freem(top);	if (control)		m_freem(control);	if (error != 0)            {#ifdef WV_INSTRUMENTATION#ifdef INCLUDE_WVNET    /* WV_NET_CRITICAL event */            WV_NET_EVENT_2 (NET_CORE_EVENT, WV_NET_CRITICAL, 34, 4,                            WV_NETEVENT_SOSEND_FAIL, WV_NET_SEND,                            so->so_fd, error)#endif  /* INCLUDE_WVNET */#endif	    netErrnoSet (error);            }#ifdef WV_INSTRUMENTATION#ifdef INCLUDE_WVNET    /* WV_NET_VERBOSE event */        else            {            WV_NET_EVENT_1 (NET_CORE_EVENT, WV_NET_VERBOSE, 54, 20,                            WV_NETEVENT_SOSEND_FINISH, WV_NET_SEND, so->so_fd)            }#endif  /* INCLUDE_WVNET */#endif	return (error);}/* * Implement receive operations on a socket. * We depend on the way that records are added to the sockbuf * by sbappend*.  In particular, each record (mbufs linked through m_next) * must begin with an address if the protocol so specifies, * followed by an optional mbuf or mbufs containing ancillary data, * and then zero or more mbufs of data. * In order to avoid blocking network interrupts for the entire time here, * we splx() while doing the actual copy to user space. * Although the sockbuf is locked, new data may still be appended, * and thus we must maintain consistency of the sockbuf during that time. * * The caller may receive the data as a single mbuf chain by supplying * an mbuf **mp0 for use in returning the chain.  The uio is then used * only for the count in uio_resid. * * WRS mods: implement zero copy if out of band data is requested. */intsoreceive(so, paddr, uio, mp0, controlp, flagsp)	register struct socket *so;	struct mbuf **paddr;	struct uio *uio;	struct mbuf **mp0;	struct mbuf **controlp;	int *flagsp;{	register struct mbuf *m, **mp;	register int flags, len, error = 0, s, offset;	struct protosw *pr = so->so_proto;	struct mbuf *nextrecord;	int moff, type = 0;	int orig_resid = uio->uio_resid;	mp = mp0;	if (paddr)		*paddr = 0;	if (controlp)		*controlp = 0;	if (flagsp)		flags = *flagsp &~ MSG_EOR;	else		flags = 0;	/* this is the zero copy ptr To ptr to mbuf passed */	if (mp)				*mp = (struct mbuf *)0;	if (flags & MSG_OOB) {		m = mBufClGet(M_WAIT, MT_DATA, CL_SIZE_128, TRUE);		if (m == (struct mbuf *) NULL)		    {		    return (ENOBUFS);		    }		error = (*pr->pr_usrreq)(so, PRU_RCVOOB, m,		    (struct mbuf *)(long)(flags & MSG_PEEK), (struct mbuf *)0);		if (error)			goto bad;		if (mp) do { 			/* if zero copy interface */		    uio->uio_resid -= m->m_len; 		    *mp = m; 		    mp = &m->m_next; 		    m = m->m_next; 		} while (*mp);  		else do {			/* if not zero copy iface */			error = uiomove(mtod(m, caddr_t),			    (int) min(uio->uio_resid, m->m_len), uio);			m = m_free(m);		} while (uio->uio_resid && error == 0 && m);bad:		if (m)			m_freem(m);		netErrnoSet (error);		return (error);	}	if (so->so_state & SS_ISCONFIRMING && uio->uio_resid)		(*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0,		    (struct mbuf *)0, (struct mbuf *)0);	s = splnet();restart:	m = so->so_rcv.sb_mb;	/*	 * If we have less data than requested, block awaiting more	 * (subject to any timeout) if:	 *   1. the current count is less than the low water mark, or	 *   2. MSG_WAITALL is set, and it is possible to do the entire	 *	receive operation at once if we block (resid <= hiwat), or	 *   3. MSG_DONTWAIT is not set.	 * If MSG_WAITALL is set but resid is larger than the receive buffer,	 * we have to do the receive in sections, and thus risk returning	 * a short count if a timeout or signal occurs after we start.	 */	if ( m == 0 ||             ( ((flags & MSG_DONTWAIT) == 0) &&	       (so->so_rcv.sb_cc < uio->uio_resid) &&	       (so->so_rcv.sb_cc < so->so_rcv.sb_lowat)             ) ||	     ( (flags & MSG_WAITALL) &&               (uio->uio_resid <= so->so_rcv.sb_hiwat) &&	       (m->m_nextpkt == 0) &&               ((pr->pr_flags & PR_ATOMIC) == 0)             )            ) {#ifdef DIAGNOSTIC		if (m == 0 && so->so_rcv.sb_cc)			panic("receive 1");#endif		if (so->so_error) {			if (m)				goto dontblock;			error = so->so_error;			if ((flags & MSG_PEEK) == 0)				so->so_error = 0;			goto release;		}		if (so->so_state & SS_CANTRCVMORE) {			if (m)				goto dontblock;			else				goto release;		}		for (; m; m = m->m_next)			if (m->m_type == MT_OOBDATA  || (m->m_flags & M_EOR)) {				m = so->so_rcv.sb_mb;				goto dontblock;			}		if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&		    (so->so_proto->pr_flags & PR_CONNREQUIRED)) {			error = ENOTCONN;			goto release;		}		if (uio->uio_resid == 0)			goto release;		if ((so->so_state & SS_NBIO) || (flags & MSG_DONTWAIT)) {			error = EWOULDBLOCK;			goto release;		}		sbwait(&so->so_rcv);		goto restart;	}dontblock:	nextrecord = m->m_nextpkt;	if (pr->pr_flags & PR_ADDR) {#ifdef DIAGNOSTIC		if (m->m_type != MT_SONAME)			panic("receive 1a");#endif		orig_resid = 0;		if (flags & MSG_PEEK) {			if (paddr)				*paddr = m_copy(m, 0, m->m_len);			m = m->m_next;		} else {			sbfree(&so->so_rcv, m);			if (paddr) {				*paddr = m;				so->so_rcv.sb_mb = m->m_next;				m->m_next = 0;				m = so->so_rcv.sb_mb;			} else {				so->so_rcv.sb_mb = m_free(m);				m = so->so_rcv.sb_mb;			}		}	}	while (m && m->m_type == MT_CONTROL && error == 0) {		if (flags & MSG_PEEK) {			if (controlp)

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -