tcp_subr.c

来自「基于组件方式开发操作系统的OSKIT源代码」· C语言 代码 · 共 609 行 · 第 1/2 页

C
609
字号
	 * If we got enough samples through the srtt filter,	 * save the rtt and rttvar in the routing entry.	 * 'Enough' is arbitrarily defined as the 16 samples.	 * 16 samples is enough for the srtt filter to converge	 * to within 5% of the correct value; fewer samples and	 * we could save a very bogus rtt.	 *	 * Don't update the default route's characteristics and don't	 * update anything that the user "locked".	 */	if (tp->t_rttupdated >= 16 &&	    (rt = inp->inp_route.ro_rt) &&	    ((struct sockaddr_in *)rt_key(rt))->sin_addr.s_addr != INADDR_ANY) {		register u_long i = 0;		if ((rt->rt_rmx.rmx_locks & RTV_RTT) == 0) {			i = tp->t_srtt *			    (RTM_RTTUNIT / (PR_SLOWHZ * TCP_RTT_SCALE));			if (rt->rt_rmx.rmx_rtt && i)				/*				 * filter this update to half the old & half				 * the new values, converting scale.				 * See route.h and tcp_var.h for a				 * description of the scaling constants.				 */				rt->rt_rmx.rmx_rtt =				    (rt->rt_rmx.rmx_rtt + i) / 2;			else				rt->rt_rmx.rmx_rtt = i;			tcpstat.tcps_cachedrtt++;		}		if ((rt->rt_rmx.rmx_locks & RTV_RTTVAR) == 0) {			i = tp->t_rttvar *			    (RTM_RTTUNIT / (PR_SLOWHZ * TCP_RTTVAR_SCALE));			if (rt->rt_rmx.rmx_rttvar && i)				rt->rt_rmx.rmx_rttvar =				    (rt->rt_rmx.rmx_rttvar + i) / 2;			else				rt->rt_rmx.rmx_rttvar = i;			tcpstat.tcps_cachedrttvar++;		}		/*		 * update the pipelimit (ssthresh) if it has been updated		 * already or if a pipesize was specified & the threshhold		 * got below half the pipesize.  I.e., wait for bad news		 * before we start updating, then update on both good		 * and bad news.		 */		if (((rt->rt_rmx.rmx_locks & RTV_SSTHRESH) == 0 &&		    ((i = tp->snd_ssthresh) != 0) && rt->rt_rmx.rmx_ssthresh) ||		    i < (rt->rt_rmx.rmx_sendpipe / 2)) {			/*			 * convert the limit from user data bytes to			 * packets then to packet data bytes.			 */			i = (i + tp->t_maxseg / 2) / tp->t_maxseg;			if (i < 2)				i = 2;			i *= (u_long)(tp->t_maxseg + sizeof (struct tcpiphdr));			if (rt->rt_rmx.rmx_ssthresh)				rt->rt_rmx.rmx_ssthresh =				    (rt->rt_rmx.rmx_ssthresh + i) / 2;			else				rt->rt_rmx.rmx_ssthresh = i;			tcpstat.tcps_cachedssthresh++;		}	}#endif /* RTV_RTT */	/* free the reassembly queue, if any */	t = tp->seg_next;	while (t != (struct tcpiphdr *)tp) {		t = (struct tcpiphdr *)t->ti_next;		m = REASS_MBUF((struct tcpiphdr *)t->ti_prev);		remque(t->ti_prev);		m_freem(m);	}	if (tp->t_template)		(void) m_free(dtom(tp->t_template));	free(tp, M_PCB);	inp->inp_ppcb = 0;	soisdisconnected(so);	in_pcbdetach(inp);	tcpstat.tcps_closed++;	return ((struct tcpcb *)0);}voidtcp_drain(){}/* * Notify a tcp user of an asynchronous error; * store error as soft error, but wake up user * (for now, won't do anything until can select for soft error). */voidtcp_notify(inp, error)	struct inpcb *inp;	int error;{	register struct tcpcb *tp = (struct tcpcb *)inp->inp_ppcb;	register struct socket *so = inp->inp_socket;	/*	 * Ignore some errors if we are hooked up.	 * If connection hasn't completed, has retransmitted several times,	 * and receives a second error, give up now.  This is better	 * than waiting a long time to establish a connection that	 * can never complete.	 */	if (tp->t_state == TCPS_ESTABLISHED &&	     (error == EHOSTUNREACH || error == ENETUNREACH ||	      error == EHOSTDOWN)) {		return;	} else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 &&	    tp->t_softerror)		so->so_error = error;	else		tp->t_softerror = error;	wakeup((caddr_t) &so->so_timeo);	sorwakeup(so);	sowwakeup(so);}voidtcp_ctlinput(cmd, sa, ip)	int cmd;	struct sockaddr *sa;	register struct ip *ip;{	register struct tcphdr *th;	void (*notify) __P((struct inpcb *, int)) = tcp_notify;	if (cmd == PRC_QUENCH)		notify = tcp_quench;#if 1	else if (cmd == PRC_MSGSIZE)		notify = tcp_mtudisc;#endif	else if (!PRC_IS_REDIRECT(cmd) &&		 ((unsigned)cmd > PRC_NCMDS || inetctlerrmap[cmd] == 0))		return;	if (ip) {		th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));		in_pcbnotify(&tcb, sa, th->th_dport, ip->ip_src, th->th_sport,			cmd, notify);	} else		in_pcbnotify(&tcb, sa, 0, zeroin_addr, 0, cmd, notify);}/* * When a source quench is received, close congestion window * to one segment.  We will gradually open it again as we proceed. */voidtcp_quench(inp, errno)	struct inpcb *inp;	int errno;{	struct tcpcb *tp = intotcpcb(inp);	if (tp)		tp->snd_cwnd = tp->t_maxseg;}#if 1/* * When `need fragmentation' ICMP is received, update our idea of the MSS * based on the new value in the route.  Also nudge TCP to send something, * since we know the packet we just sent was dropped. * This duplicates some code in the tcp_mss() function in tcp_input.c. */voidtcp_mtudisc(inp, errno)	struct inpcb *inp;	int errno;{	struct tcpcb *tp = intotcpcb(inp);	struct rtentry *rt;	struct rmxp_tao *taop;	struct socket *so = inp->inp_socket;	int offered;	int mss;	if (tp) {		rt = tcp_rtlookup(inp);		if (!rt || !rt->rt_rmx.rmx_mtu) {			tp->t_maxopd = tp->t_maxseg = tcp_mssdflt;			return;		}		taop = rmx_taop(rt->rt_rmx);		offered = taop->tao_mssopt;		mss = rt->rt_rmx.rmx_mtu - sizeof(struct tcpiphdr);		if (offered)			mss = min(mss, offered);		/*		 * XXX - The above conditional probably violates the TCP		 * spec.  The problem is that, since we don't know the		 * other end's MSS, we are supposed to use a conservative		 * default.  But, if we do that, then MTU discovery will		 * never actually take place, because the conservative		 * default is much less than the MTUs typically seen		 * on the Internet today.  For the moment, we'll sweep		 * this under the carpet.		 *		 * The conservative default might not actually be a problem		 * if the only case this occurs is when sending an initial		 * SYN with options and data to a host we've never talked		 * to before.  Then, they will reply with an MSS value which		 * will get recorded and the new parameters should get		 * recomputed.  For Further Study.		 */		if (tp->t_maxopd <= mss)			return;		tp->t_maxopd = mss;		if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP &&		    (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)			mss -= TCPOLEN_TSTAMP_APPA;		if ((tp->t_flags & (TF_REQ_CC|TF_NOOPT)) == TF_REQ_CC &&		    (tp->t_flags & TF_RCVD_CC) == TF_RCVD_CC)			mss -= TCPOLEN_CC_APPA;#if	(MCLBYTES & (MCLBYTES - 1)) == 0		if (mss > MCLBYTES)			mss &= ~(MCLBYTES-1);#else		if (mss > MCLBYTES)			mss = mss / MCLBYTES * MCLBYTES;#endif		if (so->so_snd.sb_hiwat < mss)			mss = so->so_snd.sb_hiwat;		tp->t_maxseg = mss;		tcpstat.tcps_mturesent++;		tp->t_rtt = 0;		tp->snd_nxt = tp->snd_una;		tcp_output(tp);	}}#endif/* * Look-up the routing entry to the peer of this inpcb.  If no route * is found and it cannot be allocated the return NULL.  This routine * is called by TCP routines that access the rmx structure and by tcp_mss * to get the interface MTU. */struct rtentry *tcp_rtlookup(inp)	struct inpcb *inp;{	struct route *ro;	struct rtentry *rt;	ro = &inp->inp_route;	rt = ro->ro_rt;	if (rt == NULL || !(rt->rt_flags & RTF_UP)) {		/* No route yet, so try to acquire one */		if (inp->inp_faddr.s_addr != INADDR_ANY) {			ro->ro_dst.sa_family = AF_INET;			ro->ro_dst.sa_len = sizeof(ro->ro_dst);			((struct sockaddr_in *) &ro->ro_dst)->sin_addr =				inp->inp_faddr;			rtalloc(ro);			rt = ro->ro_rt;		}	}	return rt;}/* * Return a pointer to the cached information about the remote host. * The cached information is stored in the protocol specific part of * the route metrics. */struct rmxp_tao *tcp_gettaocache(inp)	struct inpcb *inp;{	struct rtentry *rt = tcp_rtlookup(inp);	/* Make sure this is a host route and is up. */	if (rt == NULL ||	    (rt->rt_flags & (RTF_UP|RTF_HOST)) != (RTF_UP|RTF_HOST))		return NULL;	return rmx_taop(rt->rt_rmx);}/* * Clear all the TAO cache entries, called from tcp_init. * * XXX * This routine is just an empty one, because we assume that the routing * routing tables are initialized at the same time when TCP, so there is * nothing in the cache left over. */static voidtcp_cleartaocache(void){ }

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?