⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 tcp.h

📁 嵌入式系统设计与实验教材二源码linux内核移植与编译
💻 H
📖 第 1 页 / 共 4 页
字号:
		/* fall through */	default:		if (oldstate==TCP_ESTABLISHED)			tcp_statistics[smp_processor_id()*2+!in_softirq()].TcpCurrEstab--;	}	/* Change state AFTER socket is unhashed to avoid closed	 * socket sitting in hash tables.	 */	sk->state = state;#ifdef STATE_TRACE	SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n",sk, statename[oldstate],statename[state]);#endif	}static __inline__ void tcp_done(struct sock *sk){	tcp_set_state(sk, TCP_CLOSE);	tcp_clear_xmit_timers(sk);	sk->shutdown = SHUTDOWN_MASK;	if (!sk->dead)		sk->state_change(sk);	else		tcp_destroy_sock(sk);}static __inline__ void tcp_sack_reset(struct tcp_opt *tp){	tp->dsack = 0;	tp->eff_sacks = 0;	tp->num_sacks = 0;}static __inline__ void tcp_build_and_update_options(__u32 *ptr, struct tcp_opt *tp, __u32 tstamp){	if (tp->tstamp_ok) {		*ptr++ = __constant_htonl((TCPOPT_NOP << 24) |					  (TCPOPT_NOP << 16) |					  (TCPOPT_TIMESTAMP << 8) |					  TCPOLEN_TIMESTAMP);		*ptr++ = htonl(tstamp);		*ptr++ = htonl(tp->ts_recent);	}	if (tp->eff_sacks) {		struct tcp_sack_block *sp = tp->dsack ? tp->duplicate_sack : tp->selective_acks;		int this_sack;		*ptr++ = __constant_htonl((TCPOPT_NOP << 24) |					  (TCPOPT_NOP << 16) |					  (TCPOPT_SACK << 8) |					  (TCPOLEN_SACK_BASE +					   (tp->eff_sacks * TCPOLEN_SACK_PERBLOCK)));		for(this_sack = 0; this_sack < tp->eff_sacks; this_sack++) {			*ptr++ = htonl(sp[this_sack].start_seq);			*ptr++ = htonl(sp[this_sack].end_seq);		}		if (tp->dsack) {			tp->dsack = 0;			tp->eff_sacks--;		}	}}/* Construct a tcp options header for a SYN or SYN_ACK packet. * If this is every changed make sure to change the definition of * MAX_SYN_SIZE to match the new maximum number of options that you * can generate. */static inline void tcp_syn_build_options(__u32 *ptr, int mss, int ts, int sack,					     int offer_wscale, int wscale, __u32 tstamp, __u32 ts_recent){	/* We always get an MSS option.	 * The option bytes which will be seen in normal data	 * packets should timestamps be used, must be in the MSS	 * advertised.  But we subtract them from tp->mss_cache so	 * that calculations in tcp_sendmsg are simpler etc.	 * So account for this fact here if necessary.  If we	 * don't do this correctly, as a receiver we won't	 * recognize data packets as being full sized when we	 * should, and thus we won't abide by the delayed ACK	 * rules correctly.	 * SACKs don't matter, we never delay an ACK when we	 * have any of those going out.	 */	*ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss);	if (ts) {		if(sack)			*ptr++ = __constant_htonl((TCPOPT_SACK_PERM << 24) | (TCPOLEN_SACK_PERM << 16) |						  (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);		else			*ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |						  (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);		*ptr++ = htonl(tstamp);		/* TSVAL */		*ptr++ = htonl(ts_recent);	/* TSECR */	} else if(sack)		*ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |					  (TCPOPT_SACK_PERM << 8) | TCPOLEN_SACK_PERM);	if (offer_wscale)		*ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_WINDOW << 16) | (TCPOLEN_WINDOW << 8) | (wscale));}/* Determine a window scaling and initial window to offer. * Based on the assumption that the given amount of space * will be offered. Store the results in the tp structure. * NOTE: for smooth operation initial space offering should * be a multiple of mss if possible. We assume here that mss >= 1. * This MUST be enforced by all callers. */static inline void tcp_select_initial_window(int __space, __u32 mss,	__u32 *rcv_wnd,	__u32 *window_clamp,	int wscale_ok,	__u8 *rcv_wscale){	unsigned int space = (__space < 0 ? 0 : __space);	/* If no clamp set the clamp to the max possible scaled window */	if (*window_clamp == 0)		(*window_clamp) = (65535 << 14);	space = min(*window_clamp, space);	/* Quantize space offering to a multiple of mss if possible. */	if (space > mss)		space = (space / mss) * mss;	/* NOTE: offering an initial window larger than 32767	 * will break some buggy TCP stacks. We try to be nice.	 * If we are not window scaling, then this truncates	 * our initial window offering to 32k. There should also	 * be a sysctl option to stop being nice.	 */	(*rcv_wnd) = min(space, MAX_TCP_WINDOW);	(*rcv_wscale) = 0;	if (wscale_ok) {		/* See RFC1323 for an explanation of the limit to 14 */		while (space > 65535 && (*rcv_wscale) < 14) {			space >>= 1;			(*rcv_wscale)++;		}		if (*rcv_wscale && sysctl_tcp_app_win && space>=mss &&		    space - max((space>>sysctl_tcp_app_win), mss>>*rcv_wscale) < 65536/2)			(*rcv_wscale)--;	}	/* Set initial window to value enough for senders,	 * following RFC1414. Senders, not following this RFC,	 * will be satisfied with 2.	 */	if (mss > (1<<*rcv_wscale)) {		int init_cwnd = 4;		if (mss > 1460*3)			init_cwnd = 2;		else if (mss > 1460)			init_cwnd = 3;		if (*rcv_wnd > init_cwnd*mss)			*rcv_wnd = init_cwnd*mss;	}	/* Set the clamp no higher than max representable value */	(*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp);}static inline int tcp_win_from_space(int space){	return sysctl_tcp_adv_win_scale<=0 ?		(space>>(-sysctl_tcp_adv_win_scale)) :		space - (space>>sysctl_tcp_adv_win_scale);}/* Note: caller must be prepared to deal with negative returns */ static inline int tcp_space(struct sock *sk){	return tcp_win_from_space(sk->rcvbuf - atomic_read(&sk->rmem_alloc));} static inline int tcp_full_space( struct sock *sk){	return tcp_win_from_space(sk->rcvbuf); }static inline void tcp_acceptq_removed(struct sock *sk){	sk->ack_backlog--;}static inline void tcp_acceptq_added(struct sock *sk){	sk->ack_backlog++;}static inline int tcp_acceptq_is_full(struct sock *sk){	return sk->ack_backlog > sk->max_ack_backlog;}static inline void tcp_acceptq_queue(struct sock *sk, struct open_request *req,					 struct sock *child){	struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;	req->sk = child;	tcp_acceptq_added(sk);	if (!tp->accept_queue_tail) {		tp->accept_queue = req;	} else {		tp->accept_queue_tail->dl_next = req;	}	tp->accept_queue_tail = req;	req->dl_next = NULL;}struct tcp_listen_opt{	u8			max_qlen_log;	/* log_2 of maximal queued SYNs */	int			qlen;	int			qlen_young;	int			clock_hand;	struct open_request	*syn_table[TCP_SYNQ_HSIZE];};static inline voidtcp_synq_removed(struct sock *sk, struct open_request *req){	struct tcp_listen_opt *lopt = sk->tp_pinfo.af_tcp.listen_opt;	if (--lopt->qlen == 0)		tcp_delete_keepalive_timer(sk);	if (req->retrans == 0)		lopt->qlen_young--;}static inline void tcp_synq_added(struct sock *sk){	struct tcp_listen_opt *lopt = sk->tp_pinfo.af_tcp.listen_opt;	if (lopt->qlen++ == 0)		tcp_reset_keepalive_timer(sk, TCP_TIMEOUT_INIT);	lopt->qlen_young++;}static inline int tcp_synq_len(struct sock *sk){	return sk->tp_pinfo.af_tcp.listen_opt->qlen;}static inline int tcp_synq_young(struct sock *sk){	return sk->tp_pinfo.af_tcp.listen_opt->qlen_young;}static inline int tcp_synq_is_full(struct sock *sk){	return tcp_synq_len(sk)>>sk->tp_pinfo.af_tcp.listen_opt->max_qlen_log;}static inline void tcp_synq_unlink(struct tcp_opt *tp, struct open_request *req,				       struct open_request **prev){	write_lock(&tp->syn_wait_lock);	*prev = req->dl_next;	write_unlock(&tp->syn_wait_lock);}static inline void tcp_synq_drop(struct sock *sk, struct open_request *req,				     struct open_request **prev){	tcp_synq_unlink(&sk->tp_pinfo.af_tcp, req, prev);	tcp_synq_removed(sk, req);	tcp_openreq_free(req);}static __inline__ void tcp_openreq_init(struct open_request *req,					struct tcp_opt *tp,					struct sk_buff *skb){	req->rcv_wnd = 0;		/* So that tcp_send_synack() knows! */	req->rcv_isn = TCP_SKB_CB(skb)->seq;	req->mss = tp->mss_clamp;	req->ts_recent = tp->saw_tstamp ? tp->rcv_tsval : 0;	req->tstamp_ok = tp->tstamp_ok;	req->sack_ok = tp->sack_ok;	req->snd_wscale = tp->snd_wscale;	req->wscale_ok = tp->wscale_ok;	req->acked = 0;	req->ecn_ok = 0;	req->rmt_port = skb->h.th->source;}#define TCP_MEM_QUANTUM	((int)PAGE_SIZE)static inline void tcp_free_skb(struct sock *sk, struct sk_buff *skb){	sk->tp_pinfo.af_tcp.queue_shrunk = 1;	sk->wmem_queued -= skb->truesize;	sk->forward_alloc += skb->truesize;	__kfree_skb(skb);}static inline void tcp_charge_skb(struct sock *sk, struct sk_buff *skb){	sk->wmem_queued += skb->truesize;	sk->forward_alloc -= skb->truesize;}extern void __tcp_mem_reclaim(struct sock *sk);extern int tcp_mem_schedule(struct sock *sk, int size, int kind);static inline void tcp_mem_reclaim(struct sock *sk){	if (sk->forward_alloc >= TCP_MEM_QUANTUM)		__tcp_mem_reclaim(sk);}static inline void tcp_enter_memory_pressure(void){	if (!tcp_memory_pressure) {		NET_INC_STATS(TCPMemoryPressures);		tcp_memory_pressure = 1;	}}static inline void tcp_moderate_sndbuf(struct sock *sk){	if (!(sk->userlocks&SOCK_SNDBUF_LOCK)) {		sk->sndbuf = min(sk->sndbuf, sk->wmem_queued/2);		sk->sndbuf = max(sk->sndbuf, SOCK_MIN_SNDBUF);	}}static inline struct sk_buff *tcp_alloc_pskb(struct sock *sk, int size, int mem, int gfp){	struct sk_buff *skb = alloc_skb(size+MAX_TCP_HEADER, gfp);	if (skb) {		skb->truesize += mem;		if (sk->forward_alloc >= (int)skb->truesize ||		    tcp_mem_schedule(sk, skb->truesize, 0)) {			skb_reserve(skb, MAX_TCP_HEADER);			return skb;		}		__kfree_skb(skb);	} else {		tcp_enter_memory_pressure();		tcp_moderate_sndbuf(sk);	}	return NULL;}static inline struct sk_buff *tcp_alloc_skb(struct sock *sk, int size, int gfp){	return tcp_alloc_pskb(sk, size, 0, gfp);}static inline struct page * tcp_alloc_page(struct sock *sk){	if (sk->forward_alloc >= (int)PAGE_SIZE ||	    tcp_mem_schedule(sk, PAGE_SIZE, 0)) {		struct page *page = alloc_pages(sk->allocation, 0);		if (page)			return page;	}	tcp_enter_memory_pressure();	tcp_moderate_sndbuf(sk);	return NULL;}static inline void tcp_writequeue_purge(struct sock *sk){	struct sk_buff *skb;	while ((skb = __skb_dequeue(&sk->write_queue)) != NULL)		tcp_free_skb(sk, skb);	tcp_mem_reclaim(sk);}extern void tcp_rfree(struct sk_buff *skb);static inline void tcp_set_owner_r(struct sk_buff *skb, struct sock *sk){	skb->sk = sk;	skb->destructor = tcp_rfree;	atomic_add(skb->truesize, &sk->rmem_alloc);	sk->forward_alloc -= skb->truesize;}extern void tcp_listen_wlock(void);/* - We may sleep inside this lock. * - If sleeping is not required (or called from BH), *   use plain read_(un)lock(&tcp_lhash_lock). */static inline void tcp_listen_lock(void){	/* read_lock synchronizes to candidates to writers */	read_lock(&tcp_lhash_lock);	atomic_inc(&tcp_lhash_users);	read_unlock(&tcp_lhash_lock);}static inline void tcp_listen_unlock(void){	if (atomic_dec_and_test(&tcp_lhash_users))		wake_up(&tcp_lhash_wait);}static inline int keepalive_intvl_when(struct tcp_opt *tp){	return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl;}static inline int keepalive_time_when(struct tcp_opt *tp){	return tp->keepalive_time ? : sysctl_tcp_keepalive_time;}static inline int tcp_fin_time(struct tcp_opt *tp){	int fin_timeout = tp->linger2 ? : sysctl_tcp_fin_timeout;	if (fin_timeout < (tp->rto<<2) - (tp->rto>>1))		fin_timeout = (tp->rto<<2) - (tp->rto>>1);	return fin_timeout;}static inline int tcp_paws_check(struct tcp_opt *tp, int rst){	if ((s32)(tp->rcv_tsval - tp->ts_recent) >= 0)		return 0;	if (xtime.tv_sec >= tp->ts_recent_stamp + TCP_PAWS_24DAYS)		return 0;	/* RST segments are not recommended to carry timestamp,	   and, if they do, it is recommended to ignore PAWS because	   "their cleanup function should take precedence over timestamps."	   Certainly, it is mistake. It is necessary to understand the reasons	   of this constraint to relax it: if peer reboots, clock may go	   out-of-sync and half-open connections will not be reset.	   Actually, the problem would be not existing if all	   the implementations followed draft about maintaining clock	   via reboots. Linux-2.2 DOES NOT!	   However, we can relax time bounds for RST segments to MSL.	 */	if (rst && xtime.tv_sec >= tp->ts_recent_stamp + TCP_PAWS_MSL)		return 0;	return 1;}#define TCP_CHECK_TIMER(sk) do { } while (0);#endif	/* _TCP_H */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -