⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 tcp_subr.c

📁 RTEMS (Real-Time Executive for Multiprocessor Systems) is a free open source real-time operating sys
💻 C
📖 第 1 页 / 共 2 页
字号:
			 * convert the limit from user data bytes to			 * packets then to packet data bytes.			 */			i = (i + tp->t_maxseg / 2) / tp->t_maxseg;			if (i < 2)				i = 2;			i *= (u_long)(tp->t_maxseg + sizeof (struct tcpiphdr));			if (rt->rt_rmx.rmx_ssthresh)				rt->rt_rmx.rmx_ssthresh =				    (rt->rt_rmx.rmx_ssthresh + i) / 2;			else				rt->rt_rmx.rmx_ssthresh = i;			tcpstat.tcps_cachedssthresh++;		}	}	/* free the reassembly queue, if any */	t = tp->seg_next;	while (t != (struct tcpiphdr *)tp) {		t = (struct tcpiphdr *)t->ti_next;#if (defined(__GNUC__) && defined(__arm__))        LD32_UNALGN((struct tcpiphdr *)t->ti_prev,m);#else		m = REASS_MBUF((struct tcpiphdr *)t->ti_prev);#endif		remque(t->ti_prev);		m_freem(m);	}	if (tp->t_template)		(void) m_free(dtom(tp->t_template));	free(tp, M_PCB);	inp->inp_ppcb = 0;	soisdisconnected(so);	in_pcbdetach(inp);	tcpstat.tcps_closed++;	return ((struct tcpcb *)0);}voidtcp_drain(){}/* * Notify a tcp user of an asynchronous error; * store error as soft error, but wake up user * (for now, won't do anything until can select for soft error). */static voidtcp_notify(inp, error)	struct inpcb *inp;	int error;{	register struct tcpcb *tp = (struct tcpcb *)inp->inp_ppcb;	register struct socket *so = inp->inp_socket;	/*	 * Ignore some errors if we are hooked up.	 * If connection hasn't completed, has retransmitted several times,	 * and receives a second error, give up now.  This is better	 * than waiting a long time to establish a connection that	 * can never complete.	 */	if (tp->t_state == TCPS_ESTABLISHED &&	     (error == EHOSTUNREACH || error == ENETUNREACH ||	      error == EHOSTDOWN)) {		return;	} else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 &&	    tp->t_softerror)		so->so_error = error;	else		tp->t_softerror = error;	soconnwakeup (so);	sorwakeup(so);	sowwakeup(so);}#ifdef __rtems__#define INP_INFO_RLOCK(a)#define INP_INFO_RUNLOCK(a)#define INP_LOCK(a)#define INP_UNLOCK(a)#endifstatic inttcp_pcblist(SYSCTL_HANDLER_ARGS){	int error, i, n, s;	struct inpcb *inp, **inp_list;	inp_gen_t gencnt;	struct xinpgen xig;	/*	 * The process of preparing the TCB list is too time-consuming and	 * resource-intensive to repeat twice on every request.	 */	if (req->oldptr == 0) {		n = tcbinfo.ipi_count;		req->oldidx = 2 * (sizeof xig)			+ (n + n/8) * sizeof(struct xtcpcb);		return 0;	}	if (req->newptr != 0)		return EPERM;	/*	 * OK, now we're committed to doing something.	 */	s = splnet();	INP_INFO_RLOCK(&tcbinfo);	gencnt = tcbinfo.ipi_gencnt;	n = tcbinfo.ipi_count;	INP_INFO_RUNLOCK(&tcbinfo);	splx(s);	sysctl_wire_old_buffer(req, 2 * (sizeof xig)		+ n * sizeof(struct xtcpcb));	xig.xig_len = sizeof xig;	xig.xig_count = n;	xig.xig_gen = gencnt;/*	xig.xig_sogen = so_gencnt; remove by ccj */	error = SYSCTL_OUT(req, &xig, sizeof xig);	if (error)		return error;	/* ccj add exit if the count is 0 */	if (!n)		return error;  	inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK);	if (inp_list == 0)		return ENOMEM;		s = splnet();	INP_INFO_RLOCK(&tcbinfo);	for (inp = LIST_FIRST(tcbinfo.listhead), i = 0; inp && i < n;	     inp = LIST_NEXT(inp, inp_list)) {		INP_LOCK(inp);		if (inp->inp_gencnt <= gencnt)#if 0      &&		    cr_canseesocket(req->td->td_ucred, inp->inp_socket) == 0)#endif			inp_list[i++] = inp;		INP_UNLOCK(inp);	}	INP_INFO_RUNLOCK(&tcbinfo);	splx(s);	n = i;	error = 0;	for (i = 0; i < n; i++) {		inp = inp_list[i];		INP_LOCK(inp);		if (inp->inp_gencnt <= gencnt) {			struct xtcpcb xt;			caddr_t inp_ppcb;			xt.xt_len = sizeof xt;			/* XXX should avoid extra copy */			bcopy(inp, &xt.xt_inp, sizeof *inp);			inp_ppcb = inp->inp_ppcb;			if (inp_ppcb != NULL)				bcopy(inp_ppcb, &xt.xt_tp, sizeof xt.xt_tp);			else				bzero((char *) &xt.xt_tp, sizeof xt.xt_tp);#if 0      			if (inp->inp_socket)				sotoxsocket(inp->inp_socket, &xt.xt_socket);#endif			error = SYSCTL_OUT(req, &xt, sizeof xt);		}		INP_UNLOCK(inp);	}	if (!error) {		/*		 * Give the user an updated idea of our state.		 * If the generation differs from what we told		 * her before, she knows that something happened		 * while we were processing this request, and it		 * might be necessary to retry.		 */		s = splnet();		INP_INFO_RLOCK(&tcbinfo);		xig.xig_gen = tcbinfo.ipi_gencnt;#if 0    		xig.xig_sogen = so_gencnt;#endif		xig.xig_count = tcbinfo.ipi_count;		INP_INFO_RUNLOCK(&tcbinfo);		splx(s);		error = SYSCTL_OUT(req, &xig, sizeof xig);	}	free(inp_list, M_TEMP);	return error;}SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0,	    tcp_pcblist, "S,xtcpcb", "List of active TCP connections");voidtcp_ctlinput(cmd, sa, vip)	int cmd;	struct sockaddr *sa;	void *vip;{	register struct ip *ip = vip;	register struct tcphdr *th;	void (*notify) __P((struct inpcb *, int)) = tcp_notify;	if (cmd == PRC_QUENCH)		notify = tcp_quench;#if 1	else if (cmd == PRC_MSGSIZE)		notify = tcp_mtudisc;#endif	else if (!PRC_IS_REDIRECT(cmd) &&		 ((unsigned)cmd > PRC_NCMDS || inetctlerrmap[cmd] == 0))		return;	if (ip) {		th = (struct tcphdr *)((caddr_t)ip 				       + (IP_VHL_HL(ip->ip_vhl) << 2));		in_pcbnotify(&tcb, sa, th->th_dport, ip->ip_src, th->th_sport,			cmd, notify);	} else		in_pcbnotify(&tcb, sa, 0, zeroin_addr, 0, cmd, notify);}/* * When a source quench is received, close congestion window * to one segment.  We will gradually open it again as we proceed. */voidtcp_quench(inp, errnum)	struct inpcb *inp;	int errnum;{	struct tcpcb *tp = intotcpcb(inp);	if (tp)		tp->snd_cwnd = tp->t_maxseg;}#if 1/* * When `need fragmentation' ICMP is received, update our idea of the MSS * based on the new value in the route.  Also nudge TCP to send something, * since we know the packet we just sent was dropped. * This duplicates some code in the tcp_mss() function in tcp_input.c. */voidtcp_mtudisc(inp, errnum)	struct inpcb *inp;	int errnum;{	struct tcpcb *tp = intotcpcb(inp);	struct rtentry *rt;	struct rmxp_tao *taop;	struct socket *so = inp->inp_socket;	int offered;	int mss;	if (tp) {		rt = tcp_rtlookup(inp);		if (!rt || !rt->rt_rmx.rmx_mtu) {			tp->t_maxopd = tp->t_maxseg = tcp_mssdflt;			return;		}		taop = rmx_taop(rt->rt_rmx);		offered = taop->tao_mssopt;		mss = rt->rt_rmx.rmx_mtu - sizeof(struct tcpiphdr);		if (offered)			mss = min(mss, offered);		/*		 * XXX - The above conditional probably violates the TCP		 * spec.  The problem is that, since we don't know the		 * other end's MSS, we are supposed to use a conservative		 * default.  But, if we do that, then MTU discovery will		 * never actually take place, because the conservative		 * default is much less than the MTUs typically seen		 * on the Internet today.  For the moment, we'll sweep		 * this under the carpet.		 *		 * The conservative default might not actually be a problem		 * if the only case this occurs is when sending an initial		 * SYN with options and data to a host we've never talked		 * to before.  Then, they will reply with an MSS value which		 * will get recorded and the new parameters should get		 * recomputed.  For Further Study.		 */		if (tp->t_maxopd <= mss)			return;		tp->t_maxopd = mss;		if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&		    (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)			mss -= TCPOLEN_TSTAMP_APPA;		if ((tp->t_flags & (TF_REQ_CC|TF_NOOPT)) == TF_REQ_CC &&		    (tp->t_flags & TF_RCVD_CC) == TF_RCVD_CC)			mss -= TCPOLEN_CC_APPA;#if	(MCLBYTES & (MCLBYTES - 1)) == 0		if (mss > MCLBYTES)			mss &= ~(MCLBYTES-1);#else		if (mss > MCLBYTES)			mss = mss / MCLBYTES * MCLBYTES;#endif		if (so->so_snd.sb_hiwat < mss)			mss = so->so_snd.sb_hiwat;		tp->t_maxseg = mss;		tcpstat.tcps_mturesent++;		tp->t_rtt = 0;		tp->snd_nxt = tp->snd_una;		tcp_output(tp);	}}#endif/* * Look-up the routing entry to the peer of this inpcb.  If no route * is found and it cannot be allocated the return NULL.  This routine * is called by TCP routines that access the rmx structure and by tcp_mss * to get the interface MTU. */struct rtentry *tcp_rtlookup(inp)	struct inpcb *inp;{	struct route *ro;	struct rtentry *rt;	ro = &inp->inp_route;	rt = ro->ro_rt;	if (rt == NULL || !(rt->rt_flags & RTF_UP)) {		/* No route yet, so try to acquire one */		if (inp->inp_faddr.s_addr != INADDR_ANY) {			ro->ro_dst.sa_family = AF_INET;			ro->ro_dst.sa_len = sizeof(ro->ro_dst);			((struct sockaddr_in *) &ro->ro_dst)->sin_addr =				inp->inp_faddr;			rtalloc(ro);			rt = ro->ro_rt;		}	}	return rt;}/* * Return a pointer to the cached information about the remote host. * The cached information is stored in the protocol specific part of * the route metrics. */struct rmxp_tao *tcp_gettaocache(inp)	struct inpcb *inp;{	struct rtentry *rt = tcp_rtlookup(inp);	/* Make sure this is a host route and is up. */	if (rt == NULL ||	    (rt->rt_flags & (RTF_UP|RTF_HOST)) != (RTF_UP|RTF_HOST))		return NULL;	return rmx_taop(rt->rt_rmx);}/* * Clear all the TAO cache entries, called from tcp_init. * * XXX * This routine is just an empty one, because we assume that the routing * routing tables are initialized at the same time when TCP, so there is * nothing in the cache left over. */static voidtcp_cleartaocache(void){ }

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -