⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 tcp_ipv6.c

📁 ipv6地址转换器
💻 C
📖 第 1 页 / 共 3 页
字号:
/* *	TCP over IPv6 *	Linux INET6 implementation  * *	Authors: *	Pedro Roque		<roque@di.fc.ul.pt>	 * *	$Id: tcp_ipv6.c,v 1.104.2.10 1999/09/23 19:21:46 davem Exp $ * *	Based on:  *	linux/net/ipv4/tcp.c *	linux/net/ipv4/tcp_input.c *	linux/net/ipv4/tcp_output.c * *	This program is free software; you can redistribute it and/or *      modify it under the terms of the GNU General Public License *      as published by the Free Software Foundation; either version *      2 of the License, or (at your option) any later version. */#include <linux/config.h>#include <linux/errno.h>#include <linux/types.h>#include <linux/socket.h>#include <linux/sockios.h>#include <linux/net.h>#include <linux/sched.h>#include <linux/in.h>#include <linux/in6.h>#include <linux/netdevice.h>#include <linux/init.h>#include <linux/ipv6.h>#include <linux/icmpv6.h>#include <linux/random.h>#include <net/tcp.h>#include <net/ndisc.h>#include <net/ipv6.h>#include <net/transp_v6.h>#include <net/addrconf.h>#include <net/ip6_route.h>#include <asm/uaccess.h>extern int sysctl_max_syn_backlog;static void	tcp_v6_send_reset(struct sk_buff *skb);static void	tcp_v6_send_check(struct sock *sk, struct tcphdr *th, int len, 				  struct sk_buff *skb);static int	tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);static void	tcp_v6_xmit(struct sk_buff *skb);static struct open_request *tcp_v6_search_req(struct tcp_opt *tp,					      struct ipv6hdr *ip6h,					      struct tcphdr *th,					      int iif,					      struct open_request **prevp);static struct tcp_func ipv6_mapped;static struct tcp_func ipv6_specific;/* I have no idea if this is a good hash for v6 or not. -DaveM */static __inline__ int tcp_v6_hashfn(struct in6_addr *laddr, u16 lport,				    struct in6_addr *faddr, u16 fport){	int hashent = (lport ^ fport);	hashent ^= (laddr->s6_addr32[3] ^ faddr->s6_addr32[3]);	return (hashent & ((tcp_ehash_size/2) - 1));}static __inline__ int tcp_v6_sk_hashfn(struct sock *sk){	struct in6_addr *laddr = &sk->net_pinfo.af_inet6.rcv_saddr;	struct in6_addr *faddr = &sk->net_pinfo.af_inet6.daddr;	__u16 lport = sk->num;	__u16 fport = sk->dport;	return tcp_v6_hashfn(laddr, lport, faddr, fport);}/* Grrr, addr_type already calculated by caller, but I don't want * to add some silly "cookie" argument to this method just for that. * But it doesn't matter, the recalculation is in the rarest path * this function ever takes. */static int tcp_v6_get_port(struct sock *sk, unsigned short snum){	struct tcp_bind_bucket *tb;	SOCKHASH_LOCK();	if (snum == 0) {		int rover = tcp_port_rover;		int low = sysctl_local_port_range[0];		int high = sysctl_local_port_range[1];		int remaining = (high - low) + 1;		do {	rover++;			if ((rover < low) || (rover > high))				rover = low;			tb = tcp_bhash[tcp_bhashfn(rover)];			for ( ; tb; tb = tb->next)				if (tb->port == rover)					goto next;			break;		next:		} while (--remaining > 0);		tcp_port_rover = rover;		/* Exhausted local port range during search? */		if (remaining <= 0)			goto fail;		/* OK, here is the one we will use. */		snum = rover;		tb = NULL;	} else {		for (tb = tcp_bhash[tcp_bhashfn(snum)];		     tb != NULL;		     tb = tb->next)			if (tb->port == snum)				break;	}	if (tb != NULL && tb->owners != NULL) {		if (tb->fastreuse != 0 && sk->reuse != 0) {			goto success;		} else {			struct sock *sk2 = tb->owners;			int sk_reuse = sk->reuse;			int addr_type = ipv6_addr_type(&sk->net_pinfo.af_inet6.rcv_saddr);			for( ; sk2 != NULL; sk2 = sk2->bind_next) {				if (sk->bound_dev_if == sk2->bound_dev_if) {					if (!sk_reuse	||					    !sk2->reuse	||					    sk2->state == TCP_LISTEN) {						/* NOTE: IPv6 tw bucket have different format */						if (!sk2->rcv_saddr	||						    addr_type == IPV6_ADDR_ANY ||						    !ipv6_addr_cmp(&sk->net_pinfo.af_inet6.rcv_saddr,								   sk2->state != TCP_TIME_WAIT ?								   &sk2->net_pinfo.af_inet6.rcv_saddr :								   &((struct tcp_tw_bucket*)sk)->v6_rcv_saddr))							break;					}				}			}			/* If we found a conflict, fail. */			if (sk2 != NULL)				goto fail;		}	}	if (tb == NULL &&	    (tb = tcp_bucket_create(snum)) == NULL)			goto fail;	if (tb->owners == NULL) {		if (sk->reuse && sk->state != TCP_LISTEN)			tb->fastreuse = 1;		else			tb->fastreuse = 0;	} else if (tb->fastreuse &&		   ((sk->reuse == 0) || (sk->state == TCP_LISTEN)))		tb->fastreuse = 0;success:	sk->num = snum;	if ((sk->bind_next = tb->owners) != NULL)		tb->owners->bind_pprev = &sk->bind_next;	tb->owners = sk;	sk->bind_pprev = &tb->owners;	sk->prev = (struct sock *) tb;	SOCKHASH_UNLOCK();	return 0;fail:	SOCKHASH_UNLOCK();	return 1;}static void tcp_v6_hash(struct sock *sk){	if(sk->state != TCP_CLOSE) {		struct sock **skp;		/* Well, I know that it is ugly...		 * All this ->prot, ->af_specific etc. need LARGE cleanup --ANK		 */		if (sk->tp_pinfo.af_tcp.af_specific == &ipv6_mapped) {			tcp_prot.hash(sk);			return;		}		if(sk->state == TCP_LISTEN)			skp = &tcp_listening_hash[tcp_sk_listen_hashfn(sk)];		else			skp = &tcp_ehash[(sk->hashent = tcp_v6_sk_hashfn(sk))];		SOCKHASH_LOCK();		if((sk->next = *skp) != NULL)			(*skp)->pprev = &sk->next;		*skp = sk;		sk->pprev = skp;		SOCKHASH_UNLOCK();	}}static void tcp_v6_unhash(struct sock *sk){	SOCKHASH_LOCK();	if(sk->pprev) {		if(sk->next)			sk->next->pprev = sk->pprev;		*sk->pprev = sk->next;		sk->pprev = NULL;		tcp_reg_zap(sk);		__tcp_put_port(sk);	}	SOCKHASH_UNLOCK();}static struct sock *tcp_v6_lookup_listener(struct in6_addr *daddr, unsigned short hnum, int dif){	struct sock *sk;	struct sock *result = NULL;	int score, hiscore;	hiscore=0;	sk = tcp_listening_hash[tcp_lhashfn(hnum)];	for(; sk; sk = sk->next) {		if((sk->num == hnum) && (sk->family == PF_INET6)) {			struct ipv6_pinfo *np = &sk->net_pinfo.af_inet6;						score = 1;			if(!ipv6_addr_any(&np->rcv_saddr)) {				if(ipv6_addr_cmp(&np->rcv_saddr, daddr))					continue;				score++;			}			if (sk->bound_dev_if) {				if (sk->bound_dev_if != dif)					continue;				score++;			}			if (score == 3)				return sk;			if (score > hiscore) {				hiscore = score;				result = sk;			}		}	}	return result;}/* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM * It is assumed that this code only gets called from within NET_BH. */static inline struct sock *__tcp_v6_lookup(struct tcphdr *th,					   struct in6_addr *saddr, u16 sport,					   struct in6_addr *daddr, u16 dport,					   int dif){	struct sock *sk;	__u16 hnum = ntohs(dport);	__u32 ports = TCP_COMBINED_PORTS(sport, hnum);	int hash;	/* Check TCP register quick cache first. */	sk = TCP_RHASH(sport);	if(sk && TCP_IPV6_MATCH(sk, saddr, daddr, ports, dif))		goto hit;	/* Optimize here for direct hit, only listening connections can	 * have wildcards anyways.	 */	hash = tcp_v6_hashfn(daddr, hnum, saddr, sport);	for(sk = tcp_ehash[hash]; sk; sk = sk->next) {		/* For IPV6 do the cheaper port and family tests first. */		if(TCP_IPV6_MATCH(sk, saddr, daddr, ports, dif)) {			if (sk->state == TCP_ESTABLISHED)				TCP_RHASH(sport) = sk;			goto hit; /* You sunk my battleship! */		}	}	/* Must check for a TIME_WAIT'er before going to listener hash. */	for(sk = tcp_ehash[hash+(tcp_ehash_size/2)]; sk; sk = sk->next) {		if(*((__u32 *)&(sk->dport))	== ports	&&		   sk->family			== PF_INET6) {			struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk;			if(!ipv6_addr_cmp(&tw->v6_daddr, saddr)	&&			   !ipv6_addr_cmp(&tw->v6_rcv_saddr, daddr) &&			   (!sk->bound_dev_if || sk->bound_dev_if == dif))				goto hit;		}	}	sk = tcp_v6_lookup_listener(daddr, hnum, dif);hit:	return sk;}#define tcp_v6_lookup(sa, sp, da, dp, dif) __tcp_v6_lookup((0),(sa),(sp),(da),(dp),(dif))static __inline__ u16 tcp_v6_check(struct tcphdr *th, int len,				   struct in6_addr *saddr, 				   struct in6_addr *daddr, 				   unsigned long base){	return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);}static __u32 tcp_v6_init_sequence(struct sock *sk, struct sk_buff *skb){	__u32 si;	__u32 di;	if (skb->protocol == __constant_htons(ETH_P_IPV6)) {		si = skb->nh.ipv6h->saddr.s6_addr32[3];		di = skb->nh.ipv6h->daddr.s6_addr32[3];	} else {		si = skb->nh.iph->saddr;		di = skb->nh.iph->daddr;	}	return secure_tcp_sequence_number(di, si,					  skb->h.th->dest,					  skb->h.th->source);}static int tcp_v6_unique_address(struct sock *sk){	struct tcp_bind_bucket *tb;	unsigned short snum = sk->num;	int retval = 1;	/* Freeze the hash while we snoop around. */	SOCKHASH_LOCK();	tb = tcp_bhash[tcp_bhashfn(snum)];	for(; tb; tb = tb->next) {		if(tb->port == snum && tb->owners != NULL) {			/* Almost certainly the re-use port case, search the real hashes			 * so it actually scales.  (we hope that all ipv6 ftp servers will			 * use passive ftp, I just cover this case for completeness)			 */			sk = __tcp_v6_lookup(NULL, &sk->net_pinfo.af_inet6.daddr,					     sk->dport,					     &sk->net_pinfo.af_inet6.rcv_saddr,					     htons(snum),					     sk->bound_dev_if);			if((sk != NULL) && (sk->state != TCP_LISTEN))				retval = 0;			break;		}	}	SOCKHASH_UNLOCK();	return retval;}static __inline__ int tcp_v6_iif(struct sk_buff *skb){	struct inet6_skb_parm *opt = (struct inet6_skb_parm *) skb->cb;	return opt->iif;}static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, 			  int addr_len){	struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;	struct ipv6_pinfo *np = &sk->net_pinfo.af_inet6;	struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;	struct in6_addr *saddr = NULL;	struct in6_addr saddr_buf;	struct flowi fl;	struct dst_entry *dst;	struct sk_buff *buff;	int addr_type;	int err;	if (sk->state != TCP_CLOSE) 		return(-EISCONN);	/*	 *	Don't allow a double connect.	 */	 		if(!ipv6_addr_any(&np->daddr))		return -EINVAL;		if (addr_len < sizeof(struct sockaddr_in6)) 		return(-EINVAL);	if (usin->sin6_family && usin->sin6_family != AF_INET6) 		return(-EAFNOSUPPORT);	fl.fl6_flowlabel = 0;	if (np->sndflow) {		fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;		if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {			struct ip6_flowlabel *flowlabel;			flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);			if (flowlabel == NULL)				return -EINVAL;			ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);			fl6_sock_release(flowlabel);		}	}	/*  	 *	connect() to INADDR_ANY means loopback (BSD'ism).  	 */  	  	if(ipv6_addr_any(&usin->sin6_addr))		usin->sin6_addr.s6_addr[15] = 0x1; 	addr_type = ipv6_addr_type(&usin->sin6_addr);	if(addr_type & IPV6_ADDR_MULTICAST)		return -ENETUNREACH;	/*	 *	connect to self not allowed	 */	if (ipv6_addr_cmp(&usin->sin6_addr, &np->saddr) == 0 &&	    usin->sin6_port == sk->sport)		return (-EINVAL);	memcpy(&np->daddr, &usin->sin6_addr, sizeof(struct in6_addr));	np->flow_label = fl.fl6_flowlabel;	/*	 *	TCP over IPv4	 */	if (addr_type == IPV6_ADDR_MAPPED) {		u32 exthdrlen = tp->ext_header_len;		struct sockaddr_in sin;		SOCK_DEBUG(sk, "connect: ipv4 mapped\n");		sin.sin_family = AF_INET;		sin.sin_port = usin->sin6_port;		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];		sk->tp_pinfo.af_tcp.af_specific = &ipv6_mapped;		sk->backlog_rcv = tcp_v4_do_rcv;		err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));		if (err) {			tp->ext_header_len = exthdrlen;			sk->tp_pinfo.af_tcp.af_specific = &ipv6_specific;			sk->backlog_rcv = tcp_v6_do_rcv;			goto failure;		} else {			ipv6_addr_set(&np->saddr, 0, 0, __constant_htonl(0x0000FFFF),				      sk->saddr);			ipv6_addr_set(&np->rcv_saddr, 0, 0, __constant_htonl(0x0000FFFF),				      sk->rcv_saddr);		}		return err;	}	if (!ipv6_addr_any(&np->rcv_saddr))		saddr = &np->rcv_saddr;	fl.proto = IPPROTO_TCP;	fl.fl6_dst = &np->daddr;	fl.fl6_src = saddr;	fl.oif = sk->bound_dev_if;	fl.uli_u.ports.dport = usin->sin6_port;	fl.uli_u.ports.sport = sk->sport;	if (np->opt && np->opt->srcrt) {		struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt;		fl.nl_u.ip6_u.daddr = rt0->addr;	}	dst = ip6_route_output(sk, &fl);	if ((err = dst->error) != 0) {		dst_release(dst);		goto failure;	}	if (fl.oif == 0 && addr_type&IPV6_ADDR_LINKLOCAL) {		/* Ough! This guy tries to connect to link local		 * address and did not specify interface.		 * Actually we should kick him out, but		 * we will be patient :) --ANK		 */		sk->bound_dev_if = dst->dev->ifindex;	}	ip6_dst_store(sk, dst, NULL);	if (saddr == NULL) {		err = ipv6_get_saddr(dst, &np->daddr, &saddr_buf);		if (err)			goto failure;		saddr = &saddr_buf;	}	/* set the source address */	ipv6_addr_copy(&np->rcv_saddr, saddr);	ipv6_addr_copy(&np->saddr, saddr);	tp->ext_header_len = 0;	if (np->opt)		tp->ext_header_len = np->opt->opt_flen+np->opt->opt_nflen;	/* Reset mss clamp */	tp->mss_clamp = ~0;	err = -ENOBUFS;	buff = sock_wmalloc(sk, (MAX_HEADER + sk->prot->max_header),			    0, GFP_KERNEL);	if (buff == NULL)		goto failure;	sk->dport = usin->sin6_port;	if (!tcp_v6_unique_address(sk)) {		kfree_skb(buff);		err = -EADDRNOTAVAIL;		goto failure;	}	/*	 *	Init variables	 */	tp->write_seq = secure_tcp_sequence_number(np->saddr.s6_addr32[3],						   np->daddr.s6_addr32[3],						   sk->sport, sk->dport);	tcp_connect(sk, buff, dst->pmtu);	return 0;failure:	dst_release(xchg(&sk->dst_cache, NULL));	memset(&np->daddr, 0, sizeof(struct in6_addr));	sk->daddr = 0;	return err;}static int tcp_v6_sendmsg(struct sock *sk, struct msghdr *msg, int len){	struct ipv6_pinfo *np = &sk->net_pinfo.af_inet6;	int retval = -EINVAL;	/*	 *	Do sanity checking for sendmsg/sendto/send	 */	if (msg->msg_flags & ~(MSG_OOB|MSG_DONTROUTE|MSG_DONTWAIT|MSG_NOSIGNAL))		goto out;	if (msg->msg_name) {		struct sockaddr_in6 *addr=(struct sockaddr_in6 *)msg->msg_name;		if (msg->msg_namelen < sizeof(*addr))			goto out;		if (addr->sin6_family && addr->sin6_family != AF_INET6)			goto out;		retval = -ENOTCONN;		if(sk->state == TCP_CLOSE)			goto out;		retval = -EISCONN;		if (addr->sin6_port != sk->dport)			goto out;		if (ipv6_addr_cmp(&addr->sin6_addr, &np->daddr))			goto out;		if (np->sndflow && np->flow_label != (addr->sin6_flowinfo&IPV6_FLOWINFO_MASK))			goto out;	}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -