⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 tcp.h

📁 嵌入式系统设计与实验教材二源码linux内核移植与编译
💻 H
📖 第 1 页 / 共 4 页
字号:
 */static inline void tcp_initialize_rcv_mss(struct sock *sk){	struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;	unsigned int hint = min(tp->advmss, tp->mss_cache);	hint = min(hint, tp->rcv_wnd/2);	hint = min(hint, TCP_MIN_RCVMSS);	hint = max(hint, TCP_MIN_MSS);	tp->ack.rcv_mss = hint;}static __inline__ void __tcp_fast_path_on(struct tcp_opt *tp, u32 snd_wnd){	tp->pred_flags = htonl((tp->tcp_header_len << 26) |			       ntohl(TCP_FLAG_ACK) |			       snd_wnd);}static __inline__ void tcp_fast_path_on(struct tcp_opt *tp){	__tcp_fast_path_on(tp, tp->snd_wnd>>tp->snd_wscale);}static inline void tcp_fast_path_check(struct sock *sk, struct tcp_opt *tp){	if (skb_queue_len(&tp->out_of_order_queue) == 0 &&	    tp->rcv_wnd &&	    atomic_read(&sk->rmem_alloc) < sk->rcvbuf &&	    !tp->urg_data)		tcp_fast_path_on(tp);}/* Compute the actual receive window we are currently advertising. * Rcv_nxt can be after the window if our peer push more data * than the offered window. */static __inline__ u32 tcp_receive_window(struct tcp_opt *tp){	s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;	if (win < 0)		win = 0;	return (u32) win;}/* Choose a new window, without checks for shrinking, and without * scaling applied to the result.  The caller does these things * if necessary.  This is a "raw" window selection. */extern u32	__tcp_select_window(struct sock *sk);/* TCP timestamps are only 32-bits, this causes a slight * complication on 64-bit systems since we store a snapshot * of jiffies in the buffer control blocks below.  We decidely * only use of the low 32-bits of jiffies and hide the ugly * casts with the following macro. */#define tcp_time_stamp		((__u32)(jiffies))/* This is what the send packet queueing engine uses to pass * TCP per-packet control information to the transmission * code.  We also store the host-order sequence numbers in * here too.  This is 36 bytes on 32-bit architectures, * 40 bytes on 64-bit machines, if this grows please adjust * skbuff.h:skbuff->cb[xxx] size appropriately. */struct tcp_skb_cb {	union {		struct inet_skb_parm	h4;#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)		struct inet6_skb_parm	h6;#endif	} header;	/* For incoming frames		*/	__u32		seq;		/* Starting sequence number	*/	__u32		end_seq;	/* SEQ + FIN + SYN + datalen	*/	__u32		when;		/* used to compute rtt's	*/	__u8		flags;		/* TCP header flags.		*/	/* NOTE: These must match up to the flags byte in a	 *       real TCP header.	 */#define TCPCB_FLAG_FIN		0x01#define TCPCB_FLAG_SYN		0x02#define TCPCB_FLAG_RST		0x04#define TCPCB_FLAG_PSH		0x08#define TCPCB_FLAG_ACK		0x10#define TCPCB_FLAG_URG		0x20#define TCPCB_FLAG_ECE		0x40#define TCPCB_FLAG_CWR		0x80	__u8		sacked;		/* State flags for SACK/FACK.	*/#define TCPCB_SACKED_ACKED	0x01	/* SKB ACK'd by a SACK block	*/#define TCPCB_SACKED_RETRANS	0x02	/* SKB retransmitted		*/#define TCPCB_LOST		0x04	/* SKB is lost			*/#define TCPCB_TAGBITS		0x07	/* All tag bits			*/#define TCPCB_EVER_RETRANS	0x80	/* Ever retransmitted frame	*/#define TCPCB_RETRANS		(TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)#define TCPCB_URG		0x20	/* Urgent pointer advenced here	*/#define TCPCB_AT_TAIL		(TCPCB_URG)	__u16		urg_ptr;	/* Valid w/URG flags is set.	*/	__u32		ack_seq;	/* Sequence number ACK'd	*/};#define TCP_SKB_CB(__skb)	((struct tcp_skb_cb *)&((__skb)->cb[0]))#define for_retrans_queue(skb, sk, tp) \		for (skb = (sk)->write_queue.next;			\		     (skb != (tp)->send_head) &&			\		     (skb != (struct sk_buff *)&(sk)->write_queue);	\		     skb=skb->next)#include <net/tcp_ecn.h>/* *	Compute minimal free write space needed to queue new packets.  */static inline int tcp_min_write_space(struct sock *sk){	return sk->wmem_queued/2;} static inline int tcp_wspace(struct sock *sk){	return sk->sndbuf - sk->wmem_queued;}/* This determines how many packets are "in the network" to the best * of our knowledge.  In many cases it is conservative, but where * detailed information is available from the receiver (via SACK * blocks etc.) we can make more aggressive calculations. * * Use this for decisions involving congestion control, use just * tp->packets_out to determine if the send queue is empty or not. * * Read this equation as: * *	"Packets sent once on transmission queue" MINUS *	"Packets left network, but not honestly ACKed yet" PLUS *	"Packets fast retransmitted" */static __inline__ unsigned int tcp_packets_in_flight(struct tcp_opt *tp){	return tp->packets_out - tp->left_out + tp->retrans_out;}/* Recalculate snd_ssthresh, we want to set it to: * * 	one half the current congestion window, but no *	less than two segments */static inline __u32 tcp_recalc_ssthresh(struct tcp_opt *tp){	return max(tp->snd_cwnd >> 1U, 2U);}/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd. * The exception is rate halving phase, when cwnd is decreasing towards * ssthresh. */static inline __u32 tcp_current_ssthresh(struct tcp_opt *tp){	if ((1<<tp->ca_state)&(TCPF_CA_CWR|TCPF_CA_Recovery))		return tp->snd_ssthresh;	else		return max(tp->snd_ssthresh,			   ((tp->snd_cwnd >> 1) +			    (tp->snd_cwnd >> 2)));}static inline void tcp_sync_left_out(struct tcp_opt *tp){	if (tp->sack_ok && tp->sacked_out >= tp->packets_out - tp->lost_out)		tp->sacked_out = tp->packets_out - tp->lost_out;	tp->left_out = tp->sacked_out + tp->lost_out;}extern void tcp_cwnd_application_limited(struct sock *sk);/* Congestion window validation. (RFC2861) */static inline void tcp_cwnd_validate(struct sock *sk, struct tcp_opt *tp){	if (tp->packets_out >= tp->snd_cwnd) {		/* Network is feed fully. */		tp->snd_cwnd_used = 0;		tp->snd_cwnd_stamp = tcp_time_stamp;	} else {		/* Network starves. */		if (tp->packets_out > tp->snd_cwnd_used)			tp->snd_cwnd_used = tp->packets_out;		if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= tp->rto)			tcp_cwnd_application_limited(sk);	}}/* Set slow start threshould and cwnd not falling to slow start */static inline void __tcp_enter_cwr(struct tcp_opt *tp){	tp->undo_marker = 0;	tp->snd_ssthresh = tcp_recalc_ssthresh(tp);	tp->snd_cwnd = min(tp->snd_cwnd,			   tcp_packets_in_flight(tp) + 1U);	tp->snd_cwnd_cnt = 0;	tp->high_seq = tp->snd_nxt;	tp->snd_cwnd_stamp = tcp_time_stamp;	TCP_ECN_queue_cwr(tp);}static inline void tcp_enter_cwr(struct tcp_opt *tp){	tp->prior_ssthresh = 0;	if (tp->ca_state < TCP_CA_CWR) {		__tcp_enter_cwr(tp);		tp->ca_state = TCP_CA_CWR;	}}extern __u32 tcp_init_cwnd(struct tcp_opt *tp);/* Slow start with delack produces 3 packets of burst, so that * it is safe "de facto". */static __inline__ __u32 tcp_max_burst(struct tcp_opt *tp){	return 3;}static __inline__ int tcp_minshall_check(struct tcp_opt *tp){	return after(tp->snd_sml,tp->snd_una) &&		!after(tp->snd_sml, tp->snd_nxt);}static __inline__ void tcp_minshall_update(struct tcp_opt *tp, int mss, struct sk_buff *skb){	if (skb->len < mss)		tp->snd_sml = TCP_SKB_CB(skb)->end_seq;}/* Return 0, if packet can be sent now without violation Nagle's rules:   1. It is full sized.   2. Or it contains FIN.   3. Or TCP_NODELAY was set.   4. Or TCP_CORK is not set, and all sent packets are ACKed.      With Minshall's modification: all sent small packets are ACKed. */static __inline__ inttcp_nagle_check(struct tcp_opt *tp, struct sk_buff *skb, unsigned mss_now, int nonagle){	return (skb->len < mss_now &&		!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&		(nonagle == 2 ||		 (!nonagle &&		  tp->packets_out &&		  tcp_minshall_check(tp))));}/* This checks if the data bearing packet SKB (usually tp->send_head) * should be put on the wire right now. */static __inline__ int tcp_snd_test(struct tcp_opt *tp, struct sk_buff *skb,				   unsigned cur_mss, int nonagle){	/*	RFC 1122 - section 4.2.3.4	 *	 *	We must queue if	 *	 *	a) The right edge of this frame exceeds the window	 *	b) There are packets in flight and we have a small segment	 *	   [SWS avoidance and Nagle algorithm]	 *	   (part of SWS is done on packetization)	 *	   Minshall version sounds: there are no _small_	 *	   segments in flight. (tcp_nagle_check)	 *	c) We have too many packets 'in flight'	 *	 * 	Don't use the nagle rule for urgent data (or	 *	for the final FIN -DaveM).	 *	 *	Also, Nagle rule does not apply to frames, which	 *	sit in the middle of queue (they have no chances	 *	to get new data) and if room at tail of skb is	 *	not enough to save something seriously (<32 for now).	 */	/* Don't be strict about the congestion window for the	 * final FIN frame.  -DaveM	 */	return ((nonagle==1 || tp->urg_mode		 || !tcp_nagle_check(tp, skb, cur_mss, nonagle)) &&		((tcp_packets_in_flight(tp) < tp->snd_cwnd) ||		 (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)) &&		!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una + tp->snd_wnd));}static __inline__ void tcp_check_probe_timer(struct sock *sk, struct tcp_opt *tp){	if (!tp->packets_out && !tp->pending)		tcp_reset_xmit_timer(sk, TCP_TIME_PROBE0, tp->rto);}static __inline__ int tcp_skb_is_last(struct sock *sk, struct sk_buff *skb){	return (skb->next == (struct sk_buff*)&sk->write_queue);}/* Push out any pending frames which were held back due to * TCP_CORK or attempt at coalescing tiny packets. * The socket must be locked by the caller. */static __inline__ void __tcp_push_pending_frames(struct sock *sk,						 struct tcp_opt *tp,						 unsigned cur_mss,						 int nonagle){	struct sk_buff *skb = tp->send_head;	if (skb) {		if (!tcp_skb_is_last(sk, skb))			nonagle = 1;		if (!tcp_snd_test(tp, skb, cur_mss, nonagle) ||		    tcp_write_xmit(sk, nonagle))			tcp_check_probe_timer(sk, tp);	}	tcp_cwnd_validate(sk, tp);}static __inline__ void tcp_push_pending_frames(struct sock *sk,					       struct tcp_opt *tp){	__tcp_push_pending_frames(sk, tp, tcp_current_mss(sk), tp->nonagle);}static __inline__ int tcp_may_send_now(struct sock *sk, struct tcp_opt *tp){	struct sk_buff *skb = tp->send_head;	return (skb &&		tcp_snd_test(tp, skb, tcp_current_mss(sk),			     tcp_skb_is_last(sk, skb) ? 1 : tp->nonagle));}static __inline__ void tcp_init_wl(struct tcp_opt *tp, u32 ack, u32 seq){	tp->snd_wl1 = seq;}static __inline__ void tcp_update_wl(struct tcp_opt *tp, u32 ack, u32 seq){	tp->snd_wl1 = seq;}extern void			tcp_destroy_sock(struct sock *sk);/* * Calculate(/check) TCP checksum */static __inline__ u16 tcp_v4_check(struct tcphdr *th, int len,				   unsigned long saddr, unsigned long daddr, 				   unsigned long base){	return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);}static __inline__ int __tcp_checksum_complete(struct sk_buff *skb){	return (unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum));}static __inline__ int tcp_checksum_complete(struct sk_buff *skb){	return skb->ip_summed != CHECKSUM_UNNECESSARY &&		__tcp_checksum_complete(skb);}/* Prequeue for VJ style copy to user, combined with checksumming. */static __inline__ void tcp_prequeue_init(struct tcp_opt *tp){	tp->ucopy.task = NULL;	tp->ucopy.len = 0;	tp->ucopy.memory = 0;	skb_queue_head_init(&tp->ucopy.prequeue);}/* Packet is added to VJ-style prequeue for processing in process * context, if a reader task is waiting. Apparently, this exciting * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93) * failed somewhere. Latency? Burstiness? Well, at least now we will * see, why it failed. 8)8)				  --ANK * * NOTE: is this not too big to inline? */static __inline__ int tcp_prequeue(struct sock *sk, struct sk_buff *skb){	struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;	if (tp->ucopy.task) {		__skb_queue_tail(&tp->ucopy.prequeue, skb);		tp->ucopy.memory += skb->truesize;		if (tp->ucopy.memory > sk->rcvbuf) {			struct sk_buff *skb1;			if (sk->lock.users) BUG();			while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {				sk->backlog_rcv(sk, skb1);				NET_INC_STATS_BH(TCPPrequeueDropped);			}			tp->ucopy.memory = 0;		} else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {			wake_up_interruptible(sk->sleep);			if (!tcp_ack_scheduled(tp))				tcp_reset_xmit_timer(sk, TCP_TIME_DACK, (3*TCP_RTO_MIN)/4);		}		return 1;	}	return 0;}#undef STATE_TRACE#ifdef STATE_TRACEstatic char *statename[]={	"Unused","Established","Syn Sent","Syn Recv",	"Fin Wait 1","Fin Wait 2","Time Wait", "Close",	"Close Wait","Last ACK","Listen","Closing"};#endifstatic __inline__ void tcp_set_state(struct sock *sk, int state){	int oldstate = sk->state;	switch (state) {	case TCP_ESTABLISHED:		if (oldstate != TCP_ESTABLISHED)			TCP_INC_STATS(TcpCurrEstab);		break;	case TCP_CLOSE:		sk->prot->unhash(sk);		if (sk->prev && !(sk->userlocks&SOCK_BINDPORT_LOCK))			tcp_put_port(sk);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -