⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 tcp_output.c

📁 Linux Kernel 2.6.9 for OMAP1710
💻 C
📖 第 1 页 / 共 4 页
字号:
/* * INET		An implementation of the TCP/IP protocol suite for the LINUX *		operating system.  INET is implemented using the  BSD Socket *		interface as the means of communication with the user level. * *		Implementation of the Transmission Control Protocol(TCP). * * Version:	$Id: tcp_output.c,v 1.146 2002/02/01 22:01:04 davem Exp $ * * Authors:	Ross Biro, <bir7@leland.Stanford.Edu> *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> *		Mark Evans, <evansmp@uhura.aston.ac.uk> *		Corey Minyard <wf-rch!minyard@relay.EU.net> *		Florian La Roche, <flla@stud.uni-sb.de> *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu> *		Linus Torvalds, <torvalds@cs.helsinki.fi> *		Alan Cox, <gw4pts@gw4pts.ampr.org> *		Matthew Dillon, <dillon@apollo.west.oic.com> *		Arnt Gulbrandsen, <agulbra@nvg.unit.no> *		Jorge Cwik, <jorge@laser.satlink.net> *//* * Changes:	Pedro Roque	:	Retransmit queue handled by TCP. *				:	Fragmentation on mtu decrease *				:	Segment collapse on retransmit *				:	AF independence * *		Linus Torvalds	:	send_delayed_ack *		David S. Miller	:	Charge memory using the right skb *					during syn/ack processing. *		David S. Miller :	Output engine completely rewritten. *		Andrea Arcangeli:	SYNACK carry ts_recent in tsecr. *		Cacophonix Gaul :	draft-minshall-nagle-01 *		J Hadi Salim	:	ECN support * */#include <net/tcp.h>#include <linux/compiler.h>#include <linux/module.h>#include <linux/smp_lock.h>/* People can turn this off for buggy TCP's found in printers etc. */int sysctl_tcp_retrans_collapse = 1;/* This limits the percentage of the congestion window which we * will allow a single TSO frame to consume.  Building TSO frames * which are too large can cause TCP streams to be bursty. */int sysctl_tcp_tso_win_divisor = 8;static __inline__void update_send_head(struct sock *sk, struct tcp_opt *tp, struct sk_buff *skb){	sk->sk_send_head = skb->next;	if (sk->sk_send_head == (struct sk_buff *)&sk->sk_write_queue)		sk->sk_send_head = NULL;	tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;	tcp_packets_out_inc(sk, tp, skb);}/* SND.NXT, if window was not shrunk. * If window has been shrunk, what should we make? It is not clear at all. * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-( * Anything in between SND.UNA...SND.UNA+SND.WND also can be already * invalid. OK, let's make this for now: */static __inline__ __u32 tcp_acceptable_seq(struct sock *sk, struct tcp_opt *tp){	if (!before(tp->snd_una+tp->snd_wnd, tp->snd_nxt))		return tp->snd_nxt;	else		return tp->snd_una+tp->snd_wnd;}/* Calculate mss to advertise in SYN segment. * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that: * * 1. It is independent of path mtu. * 2. Ideally, it is maximal possible segment size i.e. 65535-40. * 3. For IPv4 it is reasonable to calculate it from maximal MTU of *    attached devices, because some buggy hosts are confused by *    large MSS. * 4. We do not make 3, we advertise MSS, calculated from first *    hop device mtu, but allow to raise it to ip_rt_min_advmss. *    This may be overridden via information stored in routing table. * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible, *    probably even Jumbo". */static __u16 tcp_advertise_mss(struct sock *sk){	struct tcp_opt *tp = tcp_sk(sk);	struct dst_entry *dst = __sk_dst_get(sk);	int mss = tp->advmss;	if (dst && dst_metric(dst, RTAX_ADVMSS) < mss) {		mss = dst_metric(dst, RTAX_ADVMSS);		tp->advmss = mss;	}	return (__u16)mss;}/* RFC2861. Reset CWND after idle period longer RTO to "restart window". * This is the first part of cwnd validation mechanism. */static void tcp_cwnd_restart(struct tcp_opt *tp, struct dst_entry *dst){	s32 delta = tcp_time_stamp - tp->lsndtime;	u32 restart_cwnd = tcp_init_cwnd(tp, dst);	u32 cwnd = tp->snd_cwnd;	if (tcp_is_vegas(tp)) 		tcp_vegas_enable(tp);	tp->snd_ssthresh = tcp_current_ssthresh(tp);	restart_cwnd = min(restart_cwnd, cwnd);	while ((delta -= tp->rto) > 0 && cwnd > restart_cwnd)		cwnd >>= 1;	tp->snd_cwnd = max(cwnd, restart_cwnd);	tp->snd_cwnd_stamp = tcp_time_stamp;	tp->snd_cwnd_used = 0;}static __inline__ void tcp_event_data_sent(struct tcp_opt *tp, struct sk_buff *skb, struct sock *sk){	u32 now = tcp_time_stamp;	if (!tcp_get_pcount(&tp->packets_out) &&	    (s32)(now - tp->lsndtime) > tp->rto)		tcp_cwnd_restart(tp, __sk_dst_get(sk));	tp->lsndtime = now;	/* If it is a reply for ato after last received	 * packet, enter pingpong mode.	 */	if ((u32)(now - tp->ack.lrcvtime) < tp->ack.ato)		tp->ack.pingpong = 1;}static __inline__ void tcp_event_ack_sent(struct sock *sk){	struct tcp_opt *tp = tcp_sk(sk);	tcp_dec_quickack_mode(tp);	tcp_clear_xmit_timer(sk, TCP_TIME_DACK);}/* Determine a window scaling and initial window to offer. * Based on the assumption that the given amount of space * will be offered. Store the results in the tp structure. * NOTE: for smooth operation initial space offering should * be a multiple of mss if possible. We assume here that mss >= 1. * This MUST be enforced by all callers. */void tcp_select_initial_window(int __space, __u32 mss,			       __u32 *rcv_wnd, __u32 *window_clamp,			       int wscale_ok, __u8 *rcv_wscale){	unsigned int space = (__space < 0 ? 0 : __space);	/* If no clamp set the clamp to the max possible scaled window */	if (*window_clamp == 0)		(*window_clamp) = (65535 << 14);	space = min(*window_clamp, space);	/* Quantize space offering to a multiple of mss if possible. */	if (space > mss)		space = (space / mss) * mss;	/* NOTE: offering an initial window larger than 32767	 * will break some buggy TCP stacks. We try to be nice.	 * If we are not window scaling, then this truncates	 * our initial window offering to 32k. There should also	 * be a sysctl option to stop being nice.	 */	(*rcv_wnd) = min(space, MAX_TCP_WINDOW);	(*rcv_wscale) = 0;	if (wscale_ok) {		/* Set window scaling on max possible window		 * See RFC1323 for an explanation of the limit to 14 		 */		space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max);		while (space > 65535 && (*rcv_wscale) < 14) {			space >>= 1;			(*rcv_wscale)++;		}	}	/* Set initial window to value enough for senders,	 * following RFC1414. Senders, not following this RFC,	 * will be satisfied with 2.	 */	if (mss > (1<<*rcv_wscale)) {		int init_cwnd = 4;		if (mss > 1460*3)			init_cwnd = 2;		else if (mss > 1460)			init_cwnd = 3;		if (*rcv_wnd > init_cwnd*mss)			*rcv_wnd = init_cwnd*mss;	}	/* Set the clamp no higher than max representable value */	(*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp);}/* Chose a new window to advertise, update state in tcp_opt for the * socket, and return result with RFC1323 scaling applied.  The return * value can be stuffed directly into th->window for an outgoing * frame. */static __inline__ u16 tcp_select_window(struct sock *sk){	struct tcp_opt *tp = tcp_sk(sk);	u32 cur_win = tcp_receive_window(tp);	u32 new_win = __tcp_select_window(sk);	/* Never shrink the offered window */	if(new_win < cur_win) {		/* Danger Will Robinson!		 * Don't update rcv_wup/rcv_wnd here or else		 * we will not be able to advertise a zero		 * window in time.  --DaveM		 *		 * Relax Will Robinson.		 */		new_win = cur_win;	}	tp->rcv_wnd = new_win;	tp->rcv_wup = tp->rcv_nxt;	/* Make sure we do not exceed the maximum possible	 * scaled window.	 */	if (!tp->rcv_wscale)		new_win = min(new_win, MAX_TCP_WINDOW);	else		new_win = min(new_win, (65535U << tp->rcv_wscale));	/* RFC1323 scaling applied */	new_win >>= tp->rcv_wscale;	/* If we advertise zero window, disable fast path. */	if (new_win == 0)		tp->pred_flags = 0;	return new_win;}/* This routine actually transmits TCP packets queued in by * tcp_do_sendmsg().  This is used by both the initial * transmission and possible later retransmissions. * All SKB's seen here are completely headerless.  It is our * job to build the TCP header, and pass the packet down to * IP so it can do the same plus pass the packet off to the * device. * * We are working here with either a clone of the original * SKB, or a fresh unique copy made by the retransmit engine. */static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb){	if (skb != NULL) {		struct inet_opt *inet = inet_sk(sk);		struct tcp_opt *tp = tcp_sk(sk);		struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);		int tcp_header_size = tp->tcp_header_len;		struct tcphdr *th;		int sysctl_flags;		int err;		BUG_ON(!tcp_skb_pcount(skb));#define SYSCTL_FLAG_TSTAMPS	0x1#define SYSCTL_FLAG_WSCALE	0x2#define SYSCTL_FLAG_SACK	0x4		sysctl_flags = 0;		if (tcb->flags & TCPCB_FLAG_SYN) {			tcp_header_size = sizeof(struct tcphdr) + TCPOLEN_MSS;			if(sysctl_tcp_timestamps) {				tcp_header_size += TCPOLEN_TSTAMP_ALIGNED;				sysctl_flags |= SYSCTL_FLAG_TSTAMPS;			}			if(sysctl_tcp_window_scaling) {				tcp_header_size += TCPOLEN_WSCALE_ALIGNED;				sysctl_flags |= SYSCTL_FLAG_WSCALE;			}			if(sysctl_tcp_sack) {				sysctl_flags |= SYSCTL_FLAG_SACK;				if(!(sysctl_flags & SYSCTL_FLAG_TSTAMPS))					tcp_header_size += TCPOLEN_SACKPERM_ALIGNED;			}		} else if (tp->eff_sacks) {			/* A SACK is 2 pad bytes, a 2 byte header, plus			 * 2 32-bit sequence numbers for each SACK block.			 */			tcp_header_size += (TCPOLEN_SACK_BASE_ALIGNED +					    (tp->eff_sacks * TCPOLEN_SACK_PERBLOCK));		}				/*		 * If the connection is idle and we are restarting,		 * then we don't want to do any Vegas calculations		 * until we get fresh RTT samples.  So when we		 * restart, we reset our Vegas state to a clean		 * slate. After we get acks for this flight of		 * packets, _then_ we can make Vegas calculations		 * again.		 */		if (tcp_is_vegas(tp) && tcp_packets_in_flight(tp) == 0)			tcp_vegas_enable(tp);		th = (struct tcphdr *) skb_push(skb, tcp_header_size);		skb->h.th = th;		skb_set_owner_w(skb, sk);		/* Build TCP header and checksum it. */		th->source		= inet->sport;		th->dest		= inet->dport;		th->seq			= htonl(tcb->seq);		th->ack_seq		= htonl(tp->rcv_nxt);		*(((__u16 *)th) + 6)	= htons(((tcp_header_size >> 2) << 12) | tcb->flags);		if (tcb->flags & TCPCB_FLAG_SYN) {			/* RFC1323: The window in SYN & SYN/ACK segments			 * is never scaled.			 */			th->window	= htons(tp->rcv_wnd);		} else {			th->window	= htons(tcp_select_window(sk));		}		th->check		= 0;		th->urg_ptr		= 0;		if (tp->urg_mode &&		    between(tp->snd_up, tcb->seq+1, tcb->seq+0xFFFF)) {			th->urg_ptr		= htons(tp->snd_up-tcb->seq);			th->urg			= 1;		}		if (tcb->flags & TCPCB_FLAG_SYN) {			tcp_syn_build_options((__u32 *)(th + 1),					      tcp_advertise_mss(sk),					      (sysctl_flags & SYSCTL_FLAG_TSTAMPS),					      (sysctl_flags & SYSCTL_FLAG_SACK),					      (sysctl_flags & SYSCTL_FLAG_WSCALE),					      tp->rcv_wscale,					      tcb->when,		      			      tp->ts_recent);		} else {			tcp_build_and_update_options((__u32 *)(th + 1),						     tp, tcb->when);			TCP_ECN_send(sk, tp, skb, tcp_header_size);		}		tp->af_specific->send_check(sk, th, skb->len, skb);		if (tcb->flags & TCPCB_FLAG_ACK)			tcp_event_ack_sent(sk);		if (skb->len != tcp_header_size)			tcp_event_data_sent(tp, skb, sk);		TCP_INC_STATS(TCP_MIB_OUTSEGS);		err = tp->af_specific->queue_xmit(skb, 0);		if (err <= 0)			return err;		tcp_enter_cwr(tp);		/* NET_XMIT_CN is special. It does not guarantee,		 * that this packet is lost. It tells that device		 * is about to start to drop packets or already		 * drops some packets of the same priority and		 * invokes us to send less aggressively.		 */		return err == NET_XMIT_CN ? 0 : err;	}	return -ENOBUFS;#undef SYSCTL_FLAG_TSTAMPS#undef SYSCTL_FLAG_WSCALE#undef SYSCTL_FLAG_SACK}/* This routine just queue's the buffer  * * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, * otherwise socket can stall. */static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb){	struct tcp_opt *tp = tcp_sk(sk);	/* Advance write_seq and place onto the write_queue. */	tp->write_seq = TCP_SKB_CB(skb)->end_seq;	__skb_queue_tail(&sk->sk_write_queue, skb);	sk_charge_skb(sk, skb);	/* Queue it, remembering where we must start sending. */	if (sk->sk_send_head == NULL)		sk->sk_send_head = skb;}/* Send _single_ skb sitting at the send head. This function requires * true push pending frames to setup probe timer etc. */void tcp_push_one(struct sock *sk, unsigned cur_mss){	struct tcp_opt *tp = tcp_sk(sk);	struct sk_buff *skb = sk->sk_send_head;	if (tcp_snd_test(tp, skb, cur_mss, TCP_NAGLE_PUSH)) {		/* Send it out now. */		TCP_SKB_CB(skb)->when = tcp_time_stamp;		if (!tcp_transmit_skb(sk, skb_clone(skb, sk->sk_allocation))) {			sk->sk_send_head = NULL;			tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;			tcp_packets_out_inc(sk, tp, skb);			return;		}	}}void tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_std){

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -