⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 tcp.h

📁 Linux Kernel 2.6.9 for OMAP1710
💻 H
📖 第 1 页 / 共 4 页
字号:
extern void			tcp_destroy_sock(struct sock *sk);/* * Calculate(/check) TCP checksum */static __inline__ u16 tcp_v4_check(struct tcphdr *th, int len,				   unsigned long saddr, unsigned long daddr, 				   unsigned long base){	return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);}static __inline__ int __tcp_checksum_complete(struct sk_buff *skb){	return (unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum));}static __inline__ int tcp_checksum_complete(struct sk_buff *skb){	return skb->ip_summed != CHECKSUM_UNNECESSARY &&		__tcp_checksum_complete(skb);}/* Prequeue for VJ style copy to user, combined with checksumming. */static __inline__ void tcp_prequeue_init(struct tcp_opt *tp){	tp->ucopy.task = NULL;	tp->ucopy.len = 0;	tp->ucopy.memory = 0;	skb_queue_head_init(&tp->ucopy.prequeue);}/* Packet is added to VJ-style prequeue for processing in process * context, if a reader task is waiting. Apparently, this exciting * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93) * failed somewhere. Latency? Burstiness? Well, at least now we will * see, why it failed. 8)8)				  --ANK * * NOTE: is this not too big to inline? */static __inline__ int tcp_prequeue(struct sock *sk, struct sk_buff *skb){	struct tcp_opt *tp = tcp_sk(sk);	if (!sysctl_tcp_low_latency && tp->ucopy.task) {		__skb_queue_tail(&tp->ucopy.prequeue, skb);		tp->ucopy.memory += skb->truesize;		if (tp->ucopy.memory > sk->sk_rcvbuf) {			struct sk_buff *skb1;			BUG_ON(sock_owned_by_user(sk));			while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {				sk->sk_backlog_rcv(sk, skb1);				NET_INC_STATS_BH(LINUX_MIB_TCPPREQUEUEDROPPED);			}			tp->ucopy.memory = 0;		} else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {			wake_up_interruptible(sk->sk_sleep);			if (!tcp_ack_scheduled(tp))				tcp_reset_xmit_timer(sk, TCP_TIME_DACK, (3*TCP_RTO_MIN)/4);		}		return 1;	}	return 0;}#undef STATE_TRACE#ifdef STATE_TRACEstatic char *statename[]={	"Unused","Established","Syn Sent","Syn Recv",	"Fin Wait 1","Fin Wait 2","Time Wait", "Close",	"Close Wait","Last ACK","Listen","Closing"};#endifstatic __inline__ void tcp_set_state(struct sock *sk, int state){	int oldstate = sk->sk_state;	switch (state) {	case TCP_ESTABLISHED:		if (oldstate != TCP_ESTABLISHED)			TCP_INC_STATS(TCP_MIB_CURRESTAB);		break;	case TCP_CLOSE:		if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)			TCP_INC_STATS(TCP_MIB_ESTABRESETS);		sk->sk_prot->unhash(sk);		if (tcp_sk(sk)->bind_hash &&		    !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))			tcp_put_port(sk);		/* fall through */	default:		if (oldstate==TCP_ESTABLISHED)			TCP_DEC_STATS(TCP_MIB_CURRESTAB);	}	/* Change state AFTER socket is unhashed to avoid closed	 * socket sitting in hash tables.	 */	sk->sk_state = state;#ifdef STATE_TRACE	SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n",sk, statename[oldstate],statename[state]);#endif	}static __inline__ void tcp_done(struct sock *sk){	tcp_set_state(sk, TCP_CLOSE);	tcp_clear_xmit_timers(sk);	sk->sk_shutdown = SHUTDOWN_MASK;	if (!sock_flag(sk, SOCK_DEAD))		sk->sk_state_change(sk);	else		tcp_destroy_sock(sk);}static __inline__ void tcp_sack_reset(struct tcp_opt *tp){	tp->dsack = 0;	tp->eff_sacks = 0;	tp->num_sacks = 0;}static __inline__ void tcp_build_and_update_options(__u32 *ptr, struct tcp_opt *tp, __u32 tstamp){	if (tp->tstamp_ok) {		*ptr++ = __constant_htonl((TCPOPT_NOP << 24) |					  (TCPOPT_NOP << 16) |					  (TCPOPT_TIMESTAMP << 8) |					  TCPOLEN_TIMESTAMP);		*ptr++ = htonl(tstamp);		*ptr++ = htonl(tp->ts_recent);	}	if (tp->eff_sacks) {		struct tcp_sack_block *sp = tp->dsack ? tp->duplicate_sack : tp->selective_acks;		int this_sack;		*ptr++ = __constant_htonl((TCPOPT_NOP << 24) |					  (TCPOPT_NOP << 16) |					  (TCPOPT_SACK << 8) |					  (TCPOLEN_SACK_BASE +					   (tp->eff_sacks * TCPOLEN_SACK_PERBLOCK)));		for(this_sack = 0; this_sack < tp->eff_sacks; this_sack++) {			*ptr++ = htonl(sp[this_sack].start_seq);			*ptr++ = htonl(sp[this_sack].end_seq);		}		if (tp->dsack) {			tp->dsack = 0;			tp->eff_sacks--;		}	}}/* Construct a tcp options header for a SYN or SYN_ACK packet. * If this is every changed make sure to change the definition of * MAX_SYN_SIZE to match the new maximum number of options that you * can generate. */static inline void tcp_syn_build_options(__u32 *ptr, int mss, int ts, int sack,					     int offer_wscale, int wscale, __u32 tstamp, __u32 ts_recent){	/* We always get an MSS option.	 * The option bytes which will be seen in normal data	 * packets should timestamps be used, must be in the MSS	 * advertised.  But we subtract them from tp->mss_cache so	 * that calculations in tcp_sendmsg are simpler etc.	 * So account for this fact here if necessary.  If we	 * don't do this correctly, as a receiver we won't	 * recognize data packets as being full sized when we	 * should, and thus we won't abide by the delayed ACK	 * rules correctly.	 * SACKs don't matter, we never delay an ACK when we	 * have any of those going out.	 */	*ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss);	if (ts) {		if(sack)			*ptr++ = __constant_htonl((TCPOPT_SACK_PERM << 24) | (TCPOLEN_SACK_PERM << 16) |						  (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);		else			*ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |						  (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);		*ptr++ = htonl(tstamp);		/* TSVAL */		*ptr++ = htonl(ts_recent);	/* TSECR */	} else if(sack)		*ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |					  (TCPOPT_SACK_PERM << 8) | TCPOLEN_SACK_PERM);	if (offer_wscale)		*ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_WINDOW << 16) | (TCPOLEN_WINDOW << 8) | (wscale));}/* Determine a window scaling and initial window to offer. */extern void tcp_select_initial_window(int __space, __u32 mss,				      __u32 *rcv_wnd, __u32 *window_clamp,				      int wscale_ok, __u8 *rcv_wscale);static inline int tcp_win_from_space(int space){	return sysctl_tcp_adv_win_scale<=0 ?		(space>>(-sysctl_tcp_adv_win_scale)) :		space - (space>>sysctl_tcp_adv_win_scale);}/* Note: caller must be prepared to deal with negative returns */ static inline int tcp_space(const struct sock *sk){	return tcp_win_from_space(sk->sk_rcvbuf -				  atomic_read(&sk->sk_rmem_alloc));} static inline int tcp_full_space(const struct sock *sk){	return tcp_win_from_space(sk->sk_rcvbuf); }static inline void tcp_acceptq_queue(struct sock *sk, struct open_request *req,					 struct sock *child){	struct tcp_opt *tp = tcp_sk(sk);	req->sk = child;	sk_acceptq_added(sk);	if (!tp->accept_queue_tail) {		tp->accept_queue = req;	} else {		tp->accept_queue_tail->dl_next = req;	}	tp->accept_queue_tail = req;	req->dl_next = NULL;}struct tcp_listen_opt{	u8			max_qlen_log;	/* log_2 of maximal queued SYNs */	int			qlen;	int			qlen_young;	int			clock_hand;	u32			hash_rnd;	struct open_request	*syn_table[TCP_SYNQ_HSIZE];};static inline voidtcp_synq_removed(struct sock *sk, struct open_request *req){	struct tcp_listen_opt *lopt = tcp_sk(sk)->listen_opt;	if (--lopt->qlen == 0)		tcp_delete_keepalive_timer(sk);	if (req->retrans == 0)		lopt->qlen_young--;}static inline void tcp_synq_added(struct sock *sk){	struct tcp_listen_opt *lopt = tcp_sk(sk)->listen_opt;	if (lopt->qlen++ == 0)		tcp_reset_keepalive_timer(sk, TCP_TIMEOUT_INIT);	lopt->qlen_young++;}static inline int tcp_synq_len(struct sock *sk){	return tcp_sk(sk)->listen_opt->qlen;}static inline int tcp_synq_young(struct sock *sk){	return tcp_sk(sk)->listen_opt->qlen_young;}static inline int tcp_synq_is_full(struct sock *sk){	return tcp_synq_len(sk) >> tcp_sk(sk)->listen_opt->max_qlen_log;}static inline void tcp_synq_unlink(struct tcp_opt *tp, struct open_request *req,				       struct open_request **prev){	write_lock(&tp->syn_wait_lock);	*prev = req->dl_next;	write_unlock(&tp->syn_wait_lock);}static inline void tcp_synq_drop(struct sock *sk, struct open_request *req,				     struct open_request **prev){	tcp_synq_unlink(tcp_sk(sk), req, prev);	tcp_synq_removed(sk, req);	tcp_openreq_free(req);}static __inline__ void tcp_openreq_init(struct open_request *req,					struct tcp_opt *tp,					struct sk_buff *skb){	req->rcv_wnd = 0;		/* So that tcp_send_synack() knows! */	req->rcv_isn = TCP_SKB_CB(skb)->seq;	req->mss = tp->mss_clamp;	req->ts_recent = tp->saw_tstamp ? tp->rcv_tsval : 0;	req->tstamp_ok = tp->tstamp_ok;	req->sack_ok = tp->sack_ok;	req->snd_wscale = tp->snd_wscale;	req->wscale_ok = tp->wscale_ok;	req->acked = 0;	req->ecn_ok = 0;	req->rmt_port = skb->h.th->source;}extern void tcp_enter_memory_pressure(void);extern void tcp_listen_wlock(void);/* - We may sleep inside this lock. * - If sleeping is not required (or called from BH), *   use plain read_(un)lock(&tcp_lhash_lock). */static inline void tcp_listen_lock(void){	/* read_lock synchronizes to candidates to writers */	read_lock(&tcp_lhash_lock);	atomic_inc(&tcp_lhash_users);	read_unlock(&tcp_lhash_lock);}static inline void tcp_listen_unlock(void){	if (atomic_dec_and_test(&tcp_lhash_users))		wake_up(&tcp_lhash_wait);}static inline int keepalive_intvl_when(struct tcp_opt *tp){	return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl;}static inline int keepalive_time_when(struct tcp_opt *tp){	return tp->keepalive_time ? : sysctl_tcp_keepalive_time;}static inline int tcp_fin_time(struct tcp_opt *tp){	int fin_timeout = tp->linger2 ? : sysctl_tcp_fin_timeout;	if (fin_timeout < (tp->rto<<2) - (tp->rto>>1))		fin_timeout = (tp->rto<<2) - (tp->rto>>1);	return fin_timeout;}static inline int tcp_paws_check(struct tcp_opt *tp, int rst){	if ((s32)(tp->rcv_tsval - tp->ts_recent) >= 0)		return 0;	if (xtime.tv_sec >= tp->ts_recent_stamp + TCP_PAWS_24DAYS)		return 0;	/* RST segments are not recommended to carry timestamp,	   and, if they do, it is recommended to ignore PAWS because	   "their cleanup function should take precedence over timestamps."	   Certainly, it is mistake. It is necessary to understand the reasons	   of this constraint to relax it: if peer reboots, clock may go	   out-of-sync and half-open connections will not be reset.	   Actually, the problem would be not existing if all	   the implementations followed draft about maintaining clock	   via reboots. Linux-2.2 DOES NOT!	   However, we can relax time bounds for RST segments to MSL.	 */	if (rst && xtime.tv_sec >= tp->ts_recent_stamp + TCP_PAWS_MSL)		return 0;	return 1;}static inline void tcp_v4_setup_caps(struct sock *sk, struct dst_entry *dst){	sk->sk_route_caps = dst->dev->features;	if (sk->sk_route_caps & NETIF_F_TSO) {		if (sk->sk_no_largesend || dst->header_len)			sk->sk_route_caps &= ~NETIF_F_TSO;	}}#define TCP_CHECK_TIMER(sk) do { } while (0)static inline int tcp_use_frto(const struct sock *sk){	const struct tcp_opt *tp = tcp_sk(sk);		/* F-RTO must be activated in sysctl and there must be some	 * unsent new data, and the advertised window should allow	 * sending it.	 */	return (sysctl_tcp_frto && sk->sk_send_head &&		!after(TCP_SKB_CB(sk->sk_send_head)->end_seq,		       tp->snd_una + tp->snd_wnd));}static inline void tcp_mib_init(void){	/* See RFC 2012 */	TCP_ADD_STATS_USER(TCP_MIB_RTOALGORITHM, 1);	TCP_ADD_STATS_USER(TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);	TCP_ADD_STATS_USER(TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);	TCP_ADD_STATS_USER(TCP_MIB_MAXCONN, -1);}/* /proc */enum tcp_seq_states {	TCP_SEQ_STATE_LISTENING,	TCP_SEQ_STATE_OPENREQ,	TCP_SEQ_STATE_ESTABLISHED,	TCP_SEQ_STATE_TIME_WAIT,};struct tcp_seq_afinfo {	struct module		*owner;	char			*name;	sa_family_t		family;	int			(*seq_show) (struct seq_file *m, void *v);	struct file_operations	*seq_fops;};struct tcp_iter_state {	sa_family_t		family;	enum tcp_seq_states	state;	struct sock		*syn_wait_sk;	int			bucket, sbucket, num, uid;	struct seq_operations	seq_ops;};extern int tcp_proc_register(struct tcp_seq_afinfo *afinfo);extern void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo);/* TCP Westwood functions and constants */#define TCP_WESTWOOD_INIT_RTT  (20*HZ)           /* maybe too conservative?! */#define TCP_WESTWOOD_RTT_MIN   (HZ/20)           /* 50ms */static inline void tcp_westwood_update_rtt(struct tcp_opt *tp, __u32 rtt_seq){        if (tcp_is_westwood(tp))                tp->westwood.rtt = rtt_seq;}void __tcp_westwood_fast_bw(struct sock *, struct sk_buff *);void __tcp_westwood_slow_bw(struct sock *, struct sk_buff *);static inline void tcp_westwood_fast_bw(struct sock *sk, struct sk_buff *skb){        if (tcp_is_westwood(tcp_sk(sk)))                __tcp_westwood_fast_bw(sk, skb);}static inline void tcp_westwood_slow_bw(struct sock *sk, struct sk_buff *skb){        if (tcp_is_westwood(tcp_sk(sk)))                __tcp_westwood_slow_bw(sk, skb);}static inline __u32 __tcp_westwood_bw_rttmin(const struct tcp_opt *tp){        return max((tp->westwood.bw_est) * (tp->westwood.rtt_min) /		   (__u32) (tp->mss_cache_std),		   2U);}static inline __u32 tcp_westwood_bw_rttmin(const struct tcp_opt *tp){	return tcp_is_westwood(tp) ? __tcp_westwood_bw_rttmin(tp) : 0;}static inline int tcp_westwood_ssthresh(struct tcp_opt *tp){	__u32 ssthresh = 0;	if (tcp_is_westwood(tp)) {		ssthresh = __tcp_westwood_bw_rttmin(tp);		if (ssthresh)			tp->snd_ssthresh = ssthresh;  	}	return (ssthresh != 0);}static inline int tcp_westwood_cwnd(struct tcp_opt *tp){	__u32 cwnd = 0;	if (tcp_is_westwood(tp)) {		cwnd = __tcp_westwood_bw_rttmin(tp);		if (cwnd)			tp->snd_cwnd = cwnd;	}	return (cwnd != 0);}#endif	/* _TCP_H */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -