⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 tcp_ipv6.c

📁 ARM 嵌入式 系统 设计与实例开发 实验教材 二源码
💻 C
📖 第 1 页 / 共 4 页
字号:
/* *	TCP over IPv6 *	Linux INET6 implementation  * *	Authors: *	Pedro Roque		<roque@di.fc.ul.pt>	 * *	$Id: tcp_ipv6.c,v 1.142.2.1 2001/12/21 05:06:08 davem Exp $ * *	Based on:  *	linux/net/ipv4/tcp.c *	linux/net/ipv4/tcp_input.c *	linux/net/ipv4/tcp_output.c * *	Fixes: *	Hideaki YOSHIFUJI	:	sin6_scope_id support * *	This program is free software; you can redistribute it and/or *      modify it under the terms of the GNU General Public License *      as published by the Free Software Foundation; either version *      2 of the License, or (at your option) any later version. */#define __NO_VERSION__#include <linux/module.h>#include <linux/config.h>#include <linux/errno.h>#include <linux/types.h>#include <linux/socket.h>#include <linux/sockios.h>#include <linux/net.h>#include <linux/sched.h>#include <linux/in.h>#include <linux/in6.h>#include <linux/netdevice.h>#include <linux/init.h>#include <linux/ipsec.h>#include <linux/ipv6.h>#include <linux/icmpv6.h>#include <linux/random.h>#include <net/tcp.h>#include <net/ndisc.h>#include <net/ipv6.h>#include <net/transp_v6.h>#include <net/addrconf.h>#include <net/ip6_route.h>#include <net/inet_ecn.h>#include <asm/uaccess.h>static void	tcp_v6_send_reset(struct sk_buff *skb);static void	tcp_v6_or_send_ack(struct sk_buff *skb, struct open_request *req);static void	tcp_v6_send_check(struct sock *sk, struct tcphdr *th, int len, 				  struct sk_buff *skb);static int	tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);static int	tcp_v6_xmit(struct sk_buff *skb);static struct tcp_func ipv6_mapped;static struct tcp_func ipv6_specific;/* I have no idea if this is a good hash for v6 or not. -DaveM */static __inline__ int tcp_v6_hashfn(struct in6_addr *laddr, u16 lport,				    struct in6_addr *faddr, u16 fport){	int hashent = (lport ^ fport);	hashent ^= (laddr->s6_addr32[3] ^ faddr->s6_addr32[3]);	hashent ^= hashent>>16;	hashent ^= hashent>>8;	return (hashent & (tcp_ehash_size - 1));}static __inline__ int tcp_v6_sk_hashfn(struct sock *sk){	struct in6_addr *laddr = &sk->net_pinfo.af_inet6.rcv_saddr;	struct in6_addr *faddr = &sk->net_pinfo.af_inet6.daddr;	__u16 lport = sk->num;	__u16 fport = sk->dport;	return tcp_v6_hashfn(laddr, lport, faddr, fport);}/* Grrr, addr_type already calculated by caller, but I don't want * to add some silly "cookie" argument to this method just for that. * But it doesn't matter, the recalculation is in the rarest path * this function ever takes. */static int tcp_v6_get_port(struct sock *sk, unsigned short snum){	struct tcp_bind_hashbucket *head;	struct tcp_bind_bucket *tb;	int ret;	local_bh_disable();	if (snum == 0) {		int low = sysctl_local_port_range[0];		int high = sysctl_local_port_range[1];		int remaining = (high - low) + 1;		int rover;		spin_lock(&tcp_portalloc_lock);		rover = tcp_port_rover;		do {	rover++;			if ((rover < low) || (rover > high))				rover = low;			head = &tcp_bhash[tcp_bhashfn(rover)];			spin_lock(&head->lock);			for (tb = head->chain; tb; tb = tb->next)				if (tb->port == rover)					goto next;			break;		next:			spin_unlock(&head->lock);		} while (--remaining > 0);		tcp_port_rover = rover;		spin_unlock(&tcp_portalloc_lock);		/* Exhausted local port range during search? */		ret = 1;		if (remaining <= 0)			goto fail;		/* OK, here is the one we will use. */		snum = rover;		tb = NULL;	} else {		head = &tcp_bhash[tcp_bhashfn(snum)];		spin_lock(&head->lock);		for (tb = head->chain; tb != NULL; tb = tb->next)			if (tb->port == snum)				break;	}	if (tb != NULL && tb->owners != NULL) {		if (tb->fastreuse != 0 && sk->reuse != 0 && sk->state != TCP_LISTEN) {			goto success;		} else {			struct sock *sk2 = tb->owners;			int sk_reuse = sk->reuse;			int addr_type = ipv6_addr_type(&sk->net_pinfo.af_inet6.rcv_saddr);			/* We must walk the whole port owner list in this case. -DaveM */			for( ; sk2 != NULL; sk2 = sk2->bind_next) {				if (sk != sk2 &&				    sk->bound_dev_if == sk2->bound_dev_if) {					if (!sk_reuse	||					    !sk2->reuse	||					    sk2->state == TCP_LISTEN) {						/* NOTE: IPv6 tw bucket have different format */						if (!sk2->rcv_saddr	||						    addr_type == IPV6_ADDR_ANY ||						    !ipv6_addr_cmp(&sk->net_pinfo.af_inet6.rcv_saddr,								   sk2->state != TCP_TIME_WAIT ?								   &sk2->net_pinfo.af_inet6.rcv_saddr :								   &((struct tcp_tw_bucket*)sk)->v6_rcv_saddr) ||						    (addr_type==IPV6_ADDR_MAPPED && sk2->family==AF_INET &&						     sk->rcv_saddr==sk2->rcv_saddr))							break;					}				}			}			/* If we found a conflict, fail. */			ret = 1;			if (sk2 != NULL)				goto fail_unlock;		}	}	ret = 1;	if (tb == NULL &&	    (tb = tcp_bucket_create(head, snum)) == NULL)			goto fail_unlock;	if (tb->owners == NULL) {		if (sk->reuse && sk->state != TCP_LISTEN)			tb->fastreuse = 1;		else			tb->fastreuse = 0;	} else if (tb->fastreuse &&		   ((sk->reuse == 0) || (sk->state == TCP_LISTEN)))		tb->fastreuse = 0;success:	sk->num = snum;	if (sk->prev == NULL) {		if ((sk->bind_next = tb->owners) != NULL)			tb->owners->bind_pprev = &sk->bind_next;		tb->owners = sk;		sk->bind_pprev = &tb->owners;		sk->prev = (struct sock *) tb;	} else {		BUG_TRAP(sk->prev == (struct sock *) tb);	}	ret = 0;fail_unlock:	spin_unlock(&head->lock);fail:	local_bh_enable();	return ret;}static __inline__ void __tcp_v6_hash(struct sock *sk){	struct sock **skp;	rwlock_t *lock;	BUG_TRAP(sk->pprev==NULL);	if(sk->state == TCP_LISTEN) {		skp = &tcp_listening_hash[tcp_sk_listen_hashfn(sk)];		lock = &tcp_lhash_lock;		tcp_listen_wlock();	} else {		skp = &tcp_ehash[(sk->hashent = tcp_v6_sk_hashfn(sk))].chain;		lock = &tcp_ehash[sk->hashent].lock;		write_lock(lock);	}	if((sk->next = *skp) != NULL)		(*skp)->pprev = &sk->next;	*skp = sk;	sk->pprev = skp;	sock_prot_inc_use(sk->prot);	write_unlock(lock);}static void tcp_v6_hash(struct sock *sk){	if(sk->state != TCP_CLOSE) {		if (sk->tp_pinfo.af_tcp.af_specific == &ipv6_mapped) {			tcp_prot.hash(sk);			return;		}		local_bh_disable();		__tcp_v6_hash(sk);		local_bh_enable();	}}static struct sock *tcp_v6_lookup_listener(struct in6_addr *daddr, unsigned short hnum, int dif){	struct sock *sk;	struct sock *result = NULL;	int score, hiscore;	hiscore=0;	read_lock(&tcp_lhash_lock);	sk = tcp_listening_hash[tcp_lhashfn(hnum)];	for(; sk; sk = sk->next) {		if((sk->num == hnum) && (sk->family == PF_INET6)) {			struct ipv6_pinfo *np = &sk->net_pinfo.af_inet6;						score = 1;			if(!ipv6_addr_any(&np->rcv_saddr)) {				if(ipv6_addr_cmp(&np->rcv_saddr, daddr))					continue;				score++;			}			if (sk->bound_dev_if) {				if (sk->bound_dev_if != dif)					continue;				score++;			}			if (score == 3) {				result = sk;				break;			}			if (score > hiscore) {				hiscore = score;				result = sk;			}		}	}	if (result)		sock_hold(result);	read_unlock(&tcp_lhash_lock);	return result;}/* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM * * The sockhash lock must be held as a reader here. */static inline struct sock *__tcp_v6_lookup_established(struct in6_addr *saddr, u16 sport,						       struct in6_addr *daddr, u16 hnum,						       int dif){	struct tcp_ehash_bucket *head;	struct sock *sk;	__u32 ports = TCP_COMBINED_PORTS(sport, hnum);	int hash;	/* Optimize here for direct hit, only listening connections can	 * have wildcards anyways.	 */	hash = tcp_v6_hashfn(daddr, hnum, saddr, sport);	head = &tcp_ehash[hash];	read_lock(&head->lock);	for(sk = head->chain; sk; sk = sk->next) {		/* For IPV6 do the cheaper port and family tests first. */		if(TCP_IPV6_MATCH(sk, saddr, daddr, ports, dif))			goto hit; /* You sunk my battleship! */	}	/* Must check for a TIME_WAIT'er before going to listener hash. */	for(sk = (head + tcp_ehash_size)->chain; sk; sk = sk->next) {		if(*((__u32 *)&(sk->dport))	== ports	&&		   sk->family			== PF_INET6) {			struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk;			if(!ipv6_addr_cmp(&tw->v6_daddr, saddr)	&&			   !ipv6_addr_cmp(&tw->v6_rcv_saddr, daddr) &&			   (!sk->bound_dev_if || sk->bound_dev_if == dif))				goto hit;		}	}	read_unlock(&head->lock);	return NULL;hit:	sock_hold(sk);	read_unlock(&head->lock);	return sk;}static inline struct sock *__tcp_v6_lookup(struct in6_addr *saddr, u16 sport,					   struct in6_addr *daddr, u16 hnum,					   int dif){	struct sock *sk;	sk = __tcp_v6_lookup_established(saddr, sport, daddr, hnum, dif);	if (sk)		return sk;	return tcp_v6_lookup_listener(daddr, hnum, dif);}__inline__ struct sock *tcp_v6_lookup(struct in6_addr *saddr, u16 sport,				      struct in6_addr *daddr, u16 dport,				      int dif){	struct sock *sk;	local_bh_disable();	sk = __tcp_v6_lookup(saddr, sport, daddr, ntohs(dport), dif);	local_bh_enable();	return sk;}/* * Open request hash tables. */static __inline__ unsigned tcp_v6_synq_hash(struct in6_addr *raddr, u16 rport){	unsigned h = raddr->s6_addr32[3] ^ rport;	h ^= h>>16;	h ^= h>>8;	return h&(TCP_SYNQ_HSIZE-1);}static struct open_request *tcp_v6_search_req(struct tcp_opt *tp,					      struct ipv6hdr *ip6h,					      struct tcphdr *th,					      int iif,					      struct open_request ***prevp){	struct tcp_listen_opt *lopt = tp->listen_opt;	struct open_request *req, **prev;  	__u16 rport = th->source;	for (prev = &lopt->syn_table[tcp_v6_synq_hash(&ip6h->saddr, rport)];	     (req = *prev) != NULL;	     prev = &req->dl_next) {		if (req->rmt_port == rport &&		    req->class->family == AF_INET6 &&		    !ipv6_addr_cmp(&req->af.v6_req.rmt_addr, &ip6h->saddr) &&		    !ipv6_addr_cmp(&req->af.v6_req.loc_addr, &ip6h->daddr) &&		    (!req->af.v6_req.iif || req->af.v6_req.iif == iif)) {			BUG_TRAP(req->sk == NULL);			*prevp = prev;			return req;		}	}	return NULL;}static __inline__ u16 tcp_v6_check(struct tcphdr *th, int len,				   struct in6_addr *saddr, 				   struct in6_addr *daddr, 				   unsigned long base){	return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);}static __u32 tcp_v6_init_sequence(struct sock *sk, struct sk_buff *skb){	if (skb->protocol == __constant_htons(ETH_P_IPV6)) {		return secure_tcpv6_sequence_number(skb->nh.ipv6h->daddr.s6_addr32,						    skb->nh.ipv6h->saddr.s6_addr32,						    skb->h.th->dest,						    skb->h.th->source);	} else {		return secure_tcp_sequence_number(skb->nh.iph->daddr,						  skb->nh.iph->saddr,						  skb->h.th->dest,						  skb->h.th->source);	}}static int tcp_v6_check_established(struct sock *sk){	struct in6_addr *daddr = &sk->net_pinfo.af_inet6.rcv_saddr;	struct in6_addr *saddr = &sk->net_pinfo.af_inet6.daddr;	int dif = sk->bound_dev_if;	u32 ports = TCP_COMBINED_PORTS(sk->dport, sk->num);	int hash = tcp_v6_hashfn(daddr, sk->num, saddr, sk->dport);	struct tcp_ehash_bucket *head = &tcp_ehash[hash];	struct sock *sk2, **skp;	struct tcp_tw_bucket *tw;	write_lock_bh(&head->lock);	for(skp = &(head + tcp_ehash_size)->chain; (sk2=*skp)!=NULL; skp = &sk2->next) {		tw = (struct tcp_tw_bucket*)sk2;		if(*((__u32 *)&(sk2->dport))	== ports	&&		   sk2->family			== PF_INET6	&&		   !ipv6_addr_cmp(&tw->v6_daddr, saddr)		&&		   !ipv6_addr_cmp(&tw->v6_rcv_saddr, daddr)	&&		   sk2->bound_dev_if == sk->bound_dev_if) {			struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);			if (tw->ts_recent_stamp) {				/* See comment in tcp_ipv4.c */				if ((tp->write_seq = tw->snd_nxt+65535+2) == 0)					tp->write_seq = 1;				tp->ts_recent = tw->ts_recent;				tp->ts_recent_stamp = tw->ts_recent_stamp;				sock_hold(sk2);				skp = &head->chain;				goto unique;			} else				goto not_unique;		}	}	tw = NULL;	for(skp = &head->chain; (sk2=*skp)!=NULL; skp = &sk2->next) {		if(TCP_IPV6_MATCH(sk, saddr, daddr, ports, dif))			goto not_unique;	}unique:	BUG_TRAP(sk->pprev==NULL);	if ((sk->next = *skp) != NULL)		(*skp)->pprev = &sk->next;	*skp = sk;	sk->pprev = skp;	sk->hashent = hash;	sock_prot_inc_use(sk->prot);	write_unlock_bh(&head->lock);	if (tw) {		/* Silly. Should hash-dance instead... */		local_bh_disable();		tcp_tw_deschedule(tw);		tcp_timewait_kill(tw);		NET_INC_STATS_BH(TimeWaitRecycled);		local_bh_enable();		tcp_tw_put(tw);	}	return 0;not_unique:	write_unlock_bh(&head->lock);	return -EADDRNOTAVAIL;}static int tcp_v6_hash_connecting(struct sock *sk){	unsigned short snum = sk->num;	struct tcp_bind_hashbucket *head = &tcp_bhash[tcp_bhashfn(snum)];	struct tcp_bind_bucket *tb = head->chain;	spin_lock_bh(&head->lock);	if (tb->owners == sk && sk->bind_next == NULL) {		__tcp_v6_hash(sk);		spin_unlock_bh(&head->lock);		return 0;	} else {		spin_unlock_bh(&head->lock);		return tcp_v6_check_established(sk);	}}static __inline__ int tcp_v6_iif(struct sk_buff *skb){	struct inet6_skb_parm *opt = (struct inet6_skb_parm *) skb->cb;	return opt->iif;}static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, 			  int addr_len){	struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;	struct ipv6_pinfo *np = &sk->net_pinfo.af_inet6;	struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;	struct in6_addr *saddr = NULL;	struct in6_addr saddr_buf;	struct flowi fl;	struct dst_entry *dst;	struct sk_buff *buff;	int addr_type;	int err;	if (addr_len < SIN6_LEN_RFC2133) 		return -EINVAL;	if (usin->sin6_family != AF_INET6) 		return(-EAFNOSUPPORT);	fl.fl6_flowlabel = 0;	if (np->sndflow) {		fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;		IP6_ECN_flow_init(fl.fl6_flowlabel);		if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {			struct ip6_flowlabel *flowlabel;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -