⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 tcp.h

📁 嵌入式系统设计与实验教材二源码linux内核移植与编译
💻 H
📖 第 1 页 / 共 4 页
字号:
extern int sysctl_tcp_wmem[3];extern int sysctl_tcp_rmem[3];extern int sysctl_tcp_app_win;extern int sysctl_tcp_adv_win_scale;extern atomic_t tcp_memory_allocated;extern atomic_t tcp_sockets_allocated;extern int tcp_memory_pressure;struct open_request;struct or_calltable {	int  family;	int  (*rtx_syn_ack)	(struct sock *sk, struct open_request *req, struct dst_entry*);	void (*send_ack)	(struct sk_buff *skb, struct open_request *req);	void (*destructor)	(struct open_request *req);	void (*send_reset)	(struct sk_buff *skb);};struct tcp_v4_open_req {	__u32			loc_addr;	__u32			rmt_addr;	struct ip_options	*opt;};#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)struct tcp_v6_open_req {	struct in6_addr		loc_addr;	struct in6_addr		rmt_addr;	struct sk_buff		*pktopts;	int			iif;};#endif/* this structure is too big */struct open_request {	struct open_request	*dl_next; /* Must be first member! */	__u32			rcv_isn;	__u32			snt_isn;	__u16			rmt_port;	__u16			mss;	__u8			retrans;	__u8			index;	__u16	snd_wscale : 4, 		rcv_wscale : 4, 		tstamp_ok : 1,		sack_ok : 1,		wscale_ok : 1,		ecn_ok : 1,		acked : 1;	/* The following two fields can be easily recomputed I think -AK */	__u32			window_clamp;	/* window clamp at creation time */	__u32			rcv_wnd;	/* rcv_wnd offered first time */	__u32			ts_recent;	unsigned long		expires;	struct or_calltable	*class;	struct sock		*sk;	union {		struct tcp_v4_open_req v4_req;#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)		struct tcp_v6_open_req v6_req;#endif	} af;};/* SLAB cache for open requests. */extern kmem_cache_t *tcp_openreq_cachep;#define tcp_openreq_alloc()		kmem_cache_alloc(tcp_openreq_cachep, SLAB_ATOMIC)#define tcp_openreq_fastfree(req)	kmem_cache_free(tcp_openreq_cachep, req)static inline void tcp_openreq_free(struct open_request *req){	req->class->destructor(req);	tcp_openreq_fastfree(req);}#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)#define TCP_INET_FAMILY(fam) ((fam) == AF_INET)#else#define TCP_INET_FAMILY(fam) 1#endif/* *	Pointers to address related TCP functions *	(i.e. things that depend on the address family) * * 	BUGGG_FUTURE: all the idea behind this struct is wrong. *	It mixes socket frontend with transport function. *	With port sharing between IPv6/v4 it gives the only advantage, *	only poor IPv6 needs to permanently recheck, that it *	is still IPv6 8)8) It must be cleaned up as soon as possible. *						--ANK (980802) */struct tcp_func {	int			(*queue_xmit)		(struct sk_buff *skb);	void			(*send_check)		(struct sock *sk,							 struct tcphdr *th,							 int len,							 struct sk_buff *skb);	int			(*rebuild_header)	(struct sock *sk);	int			(*conn_request)		(struct sock *sk,							 struct sk_buff *skb);	struct sock *		(*syn_recv_sock)	(struct sock *sk,							 struct sk_buff *skb,							 struct open_request *req,							 struct dst_entry *dst);		int			(*hash_connecting)	(struct sock *sk);	int			(*remember_stamp)	(struct sock *sk);	__u16			net_header_len;	int			(*setsockopt)		(struct sock *sk, 							 int level, 							 int optname, 							 char *optval, 							 int optlen);	int			(*getsockopt)		(struct sock *sk, 							 int level, 							 int optname, 							 char *optval, 							 int *optlen);	void			(*addr2sockaddr)	(struct sock *sk,							 struct sockaddr *);	int sockaddr_len;};/* * The next routines deal with comparing 32 bit unsigned ints * and worry about wraparound (automatic with unsigned arithmetic). */extern __inline int before(__u32 seq1, __u32 seq2){        return (__s32)(seq1-seq2) < 0;}extern __inline int after(__u32 seq1, __u32 seq2){	return (__s32)(seq2-seq1) < 0;}/* is s2<=s1<=s3 ? */extern __inline int between(__u32 seq1, __u32 seq2, __u32 seq3){	return seq3 - seq2 >= seq1 - seq2;}extern struct proto tcp_prot;extern struct tcp_mib tcp_statistics[NR_CPUS*2];#define TCP_INC_STATS(field)		SNMP_INC_STATS(tcp_statistics, field)#define TCP_INC_STATS_BH(field)		SNMP_INC_STATS_BH(tcp_statistics, field)#define TCP_INC_STATS_USER(field) 	SNMP_INC_STATS_USER(tcp_statistics, field)extern void			tcp_put_port(struct sock *sk);extern void			__tcp_put_port(struct sock *sk);extern void			tcp_inherit_port(struct sock *sk, struct sock *child);extern void			tcp_v4_err(struct sk_buff *skb, u32);extern void			tcp_shutdown (struct sock *sk, int how);extern int			tcp_v4_rcv(struct sk_buff *skb);extern int			tcp_v4_remember_stamp(struct sock *sk);extern int		    	tcp_v4_tw_remember_stamp(struct tcp_tw_bucket *tw);extern int			tcp_sendmsg(struct sock *sk, struct msghdr *msg, int size);extern ssize_t			tcp_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags);extern int			tcp_ioctl(struct sock *sk, 					  int cmd, 					  unsigned long arg);extern int			tcp_rcv_state_process(struct sock *sk, 						      struct sk_buff *skb,						      struct tcphdr *th,						      unsigned len);extern int			tcp_rcv_established(struct sock *sk, 						    struct sk_buff *skb,						    struct tcphdr *th, 						    unsigned len);enum tcp_ack_state_t{	TCP_ACK_SCHED = 1,	TCP_ACK_TIMER = 2,	TCP_ACK_PUSHED= 4};static inline void tcp_schedule_ack(struct tcp_opt *tp){	tp->ack.pending |= TCP_ACK_SCHED;}static inline int tcp_ack_scheduled(struct tcp_opt *tp){	return tp->ack.pending&TCP_ACK_SCHED;}static __inline__ void tcp_dec_quickack_mode(struct tcp_opt *tp){	if (tp->ack.quick && --tp->ack.quick == 0) {		/* Leaving quickack mode we deflate ATO. */		tp->ack.ato = TCP_ATO_MIN;	}}extern void tcp_enter_quickack_mode(struct tcp_opt *tp);static __inline__ void tcp_delack_init(struct tcp_opt *tp){	memset(&tp->ack, 0, sizeof(tp->ack));}static inline void tcp_clear_options(struct tcp_opt *tp){ 	tp->tstamp_ok = tp->sack_ok = tp->wscale_ok = tp->snd_wscale = 0;}enum tcp_tw_status{	TCP_TW_SUCCESS = 0,	TCP_TW_RST = 1,	TCP_TW_ACK = 2,	TCP_TW_SYN = 3};extern enum tcp_tw_status	tcp_timewait_state_process(struct tcp_tw_bucket *tw,							   struct sk_buff *skb,							   struct tcphdr *th,							   unsigned len);extern struct sock *		tcp_check_req(struct sock *sk,struct sk_buff *skb,					      struct open_request *req,					      struct open_request **prev);extern int			tcp_child_process(struct sock *parent,						  struct sock *child,						  struct sk_buff *skb);extern void			tcp_enter_loss(struct sock *sk, int how);extern void			tcp_clear_retrans(struct tcp_opt *tp);extern void			tcp_update_metrics(struct sock *sk);extern void			tcp_close(struct sock *sk, 					  long timeout);extern struct sock *		tcp_accept(struct sock *sk, int flags, int *err);extern unsigned int		tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait);extern void			tcp_write_space(struct sock *sk); extern int			tcp_getsockopt(struct sock *sk, int level, 					       int optname, char *optval, 					       int *optlen);extern int			tcp_setsockopt(struct sock *sk, int level, 					       int optname, char *optval, 					       int optlen);extern void			tcp_set_keepalive(struct sock *sk, int val);extern int			tcp_recvmsg(struct sock *sk, 					    struct msghdr *msg,					    int len, int nonblock, 					    int flags, int *addr_len);extern int			tcp_listen_start(struct sock *sk);extern void			tcp_parse_options(struct sk_buff *skb,						  struct tcp_opt *tp,						  int estab);/* *	TCP v4 functions exported for the inet6 API */extern int		       	tcp_v4_rebuild_header(struct sock *sk);extern int		       	tcp_v4_build_header(struct sock *sk, 						    struct sk_buff *skb);extern void		       	tcp_v4_send_check(struct sock *sk, 						  struct tcphdr *th, int len, 						  struct sk_buff *skb);extern int			tcp_v4_conn_request(struct sock *sk,						    struct sk_buff *skb);extern struct sock *		tcp_create_openreq_child(struct sock *sk,							 struct open_request *req,							 struct sk_buff *skb);extern struct sock *		tcp_v4_syn_recv_sock(struct sock *sk,						     struct sk_buff *skb,						     struct open_request *req,							struct dst_entry *dst);extern int			tcp_v4_do_rcv(struct sock *sk,					      struct sk_buff *skb);extern int			tcp_v4_connect(struct sock *sk,					       struct sockaddr *uaddr,					       int addr_len);extern int			tcp_connect(struct sock *sk,					    struct sk_buff *skb);extern struct sk_buff *		tcp_make_synack(struct sock *sk,						struct dst_entry *dst,						struct open_request *req);extern int			tcp_disconnect(struct sock *sk, int flags);extern void			tcp_unhash(struct sock *sk);extern int			tcp_v4_hash_connecting(struct sock *sk);/* From syncookies.c */extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, 				    struct ip_options *opt);extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, 				     __u16 *mss);/* tcp_output.c */extern int tcp_write_xmit(struct sock *, int nonagle);extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);extern void tcp_xmit_retransmit_queue(struct sock *);extern void tcp_simple_retransmit(struct sock *);extern void tcp_send_probe0(struct sock *);extern void tcp_send_partial(struct sock *);extern int  tcp_write_wakeup(struct sock *);extern void tcp_send_fin(struct sock *sk);extern void tcp_send_active_reset(struct sock *sk, int priority);extern int  tcp_send_synack(struct sock *);extern int  tcp_transmit_skb(struct sock *, struct sk_buff *);extern void tcp_send_skb(struct sock *, struct sk_buff *, int force_queue, unsigned mss_now);extern void tcp_push_one(struct sock *, unsigned mss_now);extern void tcp_send_ack(struct sock *sk);extern void tcp_send_delayed_ack(struct sock *sk);/* tcp_timer.c */extern void tcp_init_xmit_timers(struct sock *);extern void tcp_clear_xmit_timers(struct sock *);extern void tcp_delete_keepalive_timer (struct sock *);extern void tcp_reset_keepalive_timer (struct sock *, unsigned long);extern int tcp_sync_mss(struct sock *sk, u32 pmtu);extern const char timer_bug_msg[];static inline void tcp_clear_xmit_timer(struct sock *sk, int what){	struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;		switch (what) {	case TCP_TIME_RETRANS:	case TCP_TIME_PROBE0:		tp->pending = 0;#ifdef TCP_CLEAR_TIMERS		if (timer_pending(&tp->retransmit_timer) &&		    del_timer(&tp->retransmit_timer))			__sock_put(sk);#endif		break;	case TCP_TIME_DACK:		tp->ack.blocked = 0;		tp->ack.pending = 0;#ifdef TCP_CLEAR_TIMERS		if (timer_pending(&tp->delack_timer) &&		    del_timer(&tp->delack_timer))			__sock_put(sk);#endif		break;	default:		printk(timer_bug_msg);		return;	};}/* *	Reset the retransmission timer */static inline void tcp_reset_xmit_timer(struct sock *sk, int what, unsigned long when){	struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;	if (when > TCP_RTO_MAX) {#ifdef TCP_DEBUG		printk(KERN_DEBUG "reset_xmit_timer sk=%p %d when=0x%lx, caller=%p\n", sk, what, when, current_text_addr());#endif		when = TCP_RTO_MAX;	}	switch (what) {	case TCP_TIME_RETRANS:	case TCP_TIME_PROBE0:		tp->pending = what;		tp->timeout = jiffies+when;		if (!mod_timer(&tp->retransmit_timer, tp->timeout))			sock_hold(sk);		break;	case TCP_TIME_DACK:		tp->ack.pending |= TCP_ACK_TIMER;		tp->ack.timeout = jiffies+when;		if (!mod_timer(&tp->delack_timer, tp->ack.timeout))			sock_hold(sk);		break;	default:		printk(KERN_DEBUG "bug: unknown timer value\n");	};}/* Compute the current effective MSS, taking SACKs and IP options, * and even PMTU discovery events into account. */static __inline__ unsigned int tcp_current_mss(struct sock *sk){	struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;	struct dst_entry *dst = __sk_dst_get(sk);	int mss_now = tp->mss_cache; 	if (dst && dst->pmtu != tp->pmtu_cookie)		mss_now = tcp_sync_mss(sk, dst->pmtu);	if (tp->eff_sacks)		mss_now -= (TCPOLEN_SACK_BASE_ALIGNED +			    (tp->eff_sacks * TCPOLEN_SACK_PERBLOCK));	return mss_now;}/* Initialize RCV_MSS value. * RCV_MSS is an our guess about MSS used by the peer. * We haven't any direct information about the MSS. * It's better to underestimate the RCV_MSS rather than overestimate. * Overestimations make us ACKing less frequently than needed. * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss().

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -