⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 tcp.h

📁 GNU Hurd 源代码
💻 H
📖 第 1 页 / 共 3 页
字号:
/* * INET		An implementation of the TCP/IP protocol suite for the LINUX *		operating system.  INET is implemented using the  BSD Socket *		interface as the means of communication with the user level. * *		Definitions for the TCP module. * * Version:	@(#)tcp.h	1.0.5	05/23/93 * * Authors:	Ross Biro, <bir7@leland.Stanford.Edu> *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * *		This program is free software; you can redistribute it and/or *		modify it under the terms of the GNU General Public License *		as published by the Free Software Foundation; either version *		2 of the License, or (at your option) any later version. */#ifndef _TCP_H#define _TCP_H#include <linux/config.h>#include <linux/tcp.h>#include <linux/slab.h>#include <net/checksum.h>/* This is for all connections with a full identity, no wildcards. * Half of the table is for TIME_WAIT, the other half is for the * rest. * * This needs to be shared by v4 and v6 because the lookup and hashing * code needs to work with different AF's yet the port space is * shared. */extern unsigned int tcp_ehash_size;extern struct sock **tcp_ehash;/* This is for listening sockets, thus all sockets which possess wildcards. */#define TCP_LHTABLE_SIZE	32	/* Yes, really, this is all you need. */extern struct sock *tcp_listening_hash[TCP_LHTABLE_SIZE];/* There are a few simple rules, which allow for local port reuse by * an application.  In essence: * *	1) Sockets bound to different interfaces may share a local port. *	   Failing that, goto test 2. *	2) If all sockets have sk->reuse set, and none of them are in *	   TCP_LISTEN state, the port may be shared. *	   Failing that, goto test 3. *	3) If all sockets are bound to a specific sk->rcv_saddr local *	   address, and none of them are the same, the port may be *	   shared. *	   Failing this, the port cannot be shared. * * The interesting point, is test #2.  This is what an FTP server does * all day.  To optimize this case we use a specific flag bit defined * below.  As we add sockets to a bind bucket list, we perform a * check of: (newsk->reuse && (newsk->state != TCP_LISTEN)) * As long as all sockets added to a bind bucket pass this test, * the flag bit will be set. * The resulting situation is that tcp_v[46]_verify_bind() can just check * for this flag bit, if it is set and the socket trying to bind has * sk->reuse set, we don't even have to walk the owners list at all, * we return that it is ok to bind this socket to the requested local port. * * Sounds like a lot of work, but it is worth it.  In a more naive * implementation (ie. current FreeBSD etc.) the entire list of ports * must be walked for each data port opened by an ftp server.  Needless * to say, this does not scale at all.  With a couple thousand FTP * users logged onto your box, isn't it nice to know that new data * ports are created in O(1) time?  I thought so. ;-)	-DaveM */struct tcp_bind_bucket {	unsigned short		port;	unsigned short		fastreuse;	struct tcp_bind_bucket	*next;	struct sock		*owners;	struct tcp_bind_bucket	**pprev;};extern unsigned int tcp_bhash_size;extern struct tcp_bind_bucket **tcp_bhash;extern kmem_cache_t *tcp_bucket_cachep;extern struct tcp_bind_bucket *tcp_bucket_create(unsigned short snum);extern void tcp_bucket_unlock(struct sock *sk);extern int tcp_port_rover;/* Level-1 socket-demux cache. */#define TCP_NUM_REGS		32extern struct sock *tcp_regs[TCP_NUM_REGS];#define TCP_RHASH_FN(__fport) \	((((__fport) >> 7) ^ (__fport)) & (TCP_NUM_REGS - 1))#define TCP_RHASH(__fport)	tcp_regs[TCP_RHASH_FN((__fport))]#define TCP_SK_RHASH_FN(__sock)	TCP_RHASH_FN((__sock)->dport)#define TCP_SK_RHASH(__sock)	tcp_regs[TCP_SK_RHASH_FN((__sock))]static __inline__ void tcp_reg_zap(struct sock *sk){	struct sock **rpp;	rpp = &(TCP_SK_RHASH(sk));	if(*rpp == sk)		*rpp = NULL;}/* These are AF independent. */static __inline__ int tcp_bhashfn(__u16 lport){	return (lport & (tcp_bhash_size - 1));}/* This is a TIME_WAIT bucket.  It works around the memory consumption * problems of sockets in such a state on heavily loaded servers, but * without violating the protocol specification. */struct tcp_tw_bucket {	/* These _must_ match the beginning of struct sock precisely.	 * XXX Yes I know this is gross, but I'd have to edit every single	 * XXX networking file if I created a "struct sock_header". -DaveM	 * Just don't forget -fno-strict-aliasing, but it should be really	 * fixed -AK	 */	struct sock		*sklist_next;	struct sock		*sklist_prev;	struct sock		*bind_next;	struct sock		**bind_pprev;	__u32			daddr;	__u32			rcv_saddr;	__u16			dport;	unsigned short		num;	int			bound_dev_if;	struct sock		*next;	struct sock		**pprev;	unsigned char		state,				zapped;	__u16			sport;	unsigned short		family;	unsigned char		reuse,				nonagle;	/* And these are ours. */	__u32			rcv_nxt, snd_nxt;	__u16			window;	struct tcp_func		*af_specific;	struct tcp_bind_bucket	*tb;	struct tcp_tw_bucket	*next_death;	struct tcp_tw_bucket	**pprev_death;	int			death_slot;	#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)	struct in6_addr		v6_daddr;	struct in6_addr		v6_rcv_saddr;#endif};extern kmem_cache_t *tcp_timewait_cachep;/* Socket demux engine toys. */#ifdef __BIG_ENDIAN#define TCP_COMBINED_PORTS(__sport, __dport) \	(((__u32)(__sport)<<16) | (__u32)(__dport))#else /* __LITTLE_ENDIAN */#define TCP_COMBINED_PORTS(__sport, __dport) \	(((__u32)(__dport)<<16) | (__u32)(__sport))#endif#if (BITS_PER_LONG == 64)#ifdef __BIG_ENDIAN#define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr) \	__u64 __name = (((__u64)(__saddr))<<32)|((__u64)(__daddr));#else /* __LITTLE_ENDIAN */#define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr) \	__u64 __name = (((__u64)(__daddr))<<32)|((__u64)(__saddr));#endif /* __BIG_ENDIAN */#define TCP_IPV4_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\	(((*((__u64 *)&((__sk)->daddr)))== (__cookie))	&&		\	 ((*((__u32 *)&((__sk)->dport)))== (__ports))   &&		\	 (!((__sk)->bound_dev_if) || ((__sk)->bound_dev_if == (__dif))))#else /* 32-bit arch */#define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr)#define TCP_IPV4_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\	(((__sk)->daddr			== (__saddr))	&&		\	 ((__sk)->rcv_saddr		== (__daddr))	&&		\	 ((*((__u32 *)&((__sk)->dport)))== (__ports))   &&		\	 (!((__sk)->bound_dev_if) || ((__sk)->bound_dev_if == (__dif))))#endif /* 64-bit arch */#define TCP_IPV6_MATCH(__sk, __saddr, __daddr, __ports, __dif)			   \	(((*((__u32 *)&((__sk)->dport)))== (__ports))   			&& \	 ((__sk)->family		== AF_INET6)				&& \	 !ipv6_addr_cmp(&(__sk)->net_pinfo.af_inet6.daddr, (__saddr))		&& \	 !ipv6_addr_cmp(&(__sk)->net_pinfo.af_inet6.rcv_saddr, (__daddr))	&& \	 (!((__sk)->bound_dev_if) || ((__sk)->bound_dev_if == (__dif))))/* These can have wildcards, don't try too hard. */static __inline__ int tcp_lhashfn(unsigned short num){	return num & (TCP_LHTABLE_SIZE - 1);}static __inline__ int tcp_sk_listen_hashfn(struct sock *sk){	return tcp_lhashfn(sk->num);}/* Note, that it is > than ipv6 header */#define NETHDR_SIZE	(sizeof(struct iphdr) + 40)/* * 40 is maximal IP options size * 20 is the maximum TCP options size we can currently construct on a SYN. * 40 is the maximum possible TCP options size. */#define MAX_SYN_SIZE	(NETHDR_SIZE + sizeof(struct tcphdr) + 20 + MAX_HEADER + 15)#define MAX_FIN_SIZE	(NETHDR_SIZE + sizeof(struct tcphdr) + MAX_HEADER + 15)#define BASE_ACK_SIZE	(NETHDR_SIZE + MAX_HEADER + 15)#define MAX_ACK_SIZE	(NETHDR_SIZE + sizeof(struct tcphdr) + MAX_HEADER + 15)#define MAX_RESET_SIZE	(NETHDR_SIZE + sizeof(struct tcphdr) + MAX_HEADER + 15)#define MAX_TCPHEADER_SIZE (NETHDR_SIZE + sizeof(struct tcphdr) + 20 + MAX_HEADER + 15)/*  * Never offer a window over 32767 without using window scaling. Some * poor stacks do signed 16bit maths!  */#define MAX_WINDOW	32767	#define MIN_WINDOW	2048#define MAX_ACK_BACKLOG	2#define MAX_DELAY_ACK	2#define TCP_WINDOW_DIFF	2048/* urg_data states */#define URG_VALID	0x0100#define URG_NOTYET	0x0200#define URG_READ	0x0400#define TCP_RETR1	7	/*				 * This is how many retries it does before it				 * tries to figure out if the gateway is				 * down.				 */#define TCP_RETR2	15	/*				 * This should take at least				 * 90 minutes to time out.				 */#define TCP_TIMEOUT_LEN	(15*60*HZ) /* should be about 15 mins		*/#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to successfully 				  * close the socket, about 60 seconds	*/#define TCP_FIN_TIMEOUT (3*60*HZ) /* BSD style FIN_WAIT2 deadlock breaker */#define TCP_ACK_TIME	(3*HZ)	/* time to delay before sending an ACK	*/#define TCP_DONE_TIME	(5*HZ/2)/* maximum time to wait before actually				 * destroying a socket			*/#define TCP_WRITE_TIME	(30*HZ)	/* initial time to wait for an ACK,			         * after last transmit			*/#define TCP_TIMEOUT_INIT (3*HZ)	/* RFC 1122 initial timeout value	*/#define TCP_SYN_RETRIES	 10	/* number of times to retry opening a				 * connection 	(TCP_RETR2-....)	*/#define TCP_PROBEWAIT_LEN (1*HZ)/* time to wait between probes when				 * I've got something to write and				 * there is no window			*/#define TCP_KEEPALIVE_TIME (120*60*HZ)		/* two hours */#define TCP_KEEPALIVE_PROBES	9		/* Max of 9 keepalive probes	*/#define TCP_KEEPALIVE_PERIOD ((75*HZ)>>2)	/* period of keepalive check	*/#define TCP_SYNACK_PERIOD	(HZ/2) /* How often to run the synack slow timer */#define TCP_QUICK_TRIES		8  /* How often we try to retransmit, until				    * we tell the link layer that it is something				    * wrong (e.g. that it can expire redirects) */#define TCP_BUCKETGC_PERIOD	(HZ)/* TIME_WAIT reaping mechanism. */#define TCP_TWKILL_SLOTS	8	/* Please keep this a power of 2. */#define TCP_TWKILL_PERIOD	((HZ*60)/TCP_TWKILL_SLOTS)/* *	TCP option */ #define TCPOPT_NOP		1	/* Padding */#define TCPOPT_EOL		0	/* End of options */#define TCPOPT_MSS		2	/* Segment size negotiating */#define TCPOPT_WINDOW		3	/* Window scaling */#define TCPOPT_SACK_PERM        4       /* SACK Permitted */#define TCPOPT_SACK             5       /* SACK Block */#define TCPOPT_TIMESTAMP	8	/* Better RTT estimations/PAWS *//* *     TCP option lengths */#define TCPOLEN_MSS            4#define TCPOLEN_WINDOW         3#define TCPOLEN_SACK_PERM      2#define TCPOLEN_TIMESTAMP      10/* But this is what stacks really send out. */#define TCPOLEN_TSTAMP_ALIGNED		12#define TCPOLEN_WSCALE_ALIGNED		4#define TCPOLEN_SACKPERM_ALIGNED	4#define TCPOLEN_SACK_BASE		2#define TCPOLEN_SACK_BASE_ALIGNED	4#define TCPOLEN_SACK_PERBLOCK		8struct open_request;struct or_calltable {	void (*rtx_syn_ack)	(struct sock *sk, struct open_request *req);	void (*destructor)	(struct open_request *req);	void (*send_reset)	(struct sk_buff *skb);};struct tcp_v4_open_req {	__u32			loc_addr;	__u32			rmt_addr;	struct ip_options	*opt;};#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)struct tcp_v6_open_req {	struct in6_addr		loc_addr;	struct in6_addr		rmt_addr;	struct sk_buff		*pktopts;	int			iif;};#endif/* this structure is too big */struct open_request {	struct open_request	*dl_next; /* Must be first member! */	__u32			rcv_isn;	__u32			snt_isn;	__u16			rmt_port;	__u16			mss;	__u8			retrans;	__u8			__pad;	unsigned snd_wscale : 4, 		rcv_wscale : 4, 		tstamp_ok : 1,		sack_ok : 1,		wscale_ok : 1;	/* The following two fields can be easily recomputed I think -AK */	__u32			window_clamp;	/* window clamp at creation time */	__u32			rcv_wnd;	/* rcv_wnd offered first time */	__u32			ts_recent;	unsigned long		expires;	struct or_calltable	*class;	struct sock		*sk;	union {		struct tcp_v4_open_req v4_req;#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)		struct tcp_v6_open_req v6_req;#endif	} af;#ifdef CONFIG_IP_TRANSPARENT_PROXY	__u16			lcl_port; /* LVE */#endif};/* SLAB cache for open requests. */extern kmem_cache_t *tcp_openreq_cachep;#define tcp_openreq_alloc()	kmem_cache_alloc(tcp_openreq_cachep, SLAB_ATOMIC)

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -