⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 tcp_ipv6.c

📁 ipv6地址转换器
💻 C
📖 第 1 页 / 共 3 页
字号:
	retval = tcp_do_sendmsg(sk, msg);out:	return retval;}void tcp_v6_err(struct sk_buff *skb, struct ipv6hdr *hdr,		struct inet6_skb_parm *opt,		int type, int code, unsigned char *header, __u32 info){	struct in6_addr *saddr = &hdr->saddr;	struct in6_addr *daddr = &hdr->daddr;	struct tcphdr *th = (struct tcphdr *)header;	struct ipv6_pinfo *np;	struct sock *sk;	int err;	struct tcp_opt *tp; 	__u32 seq; 	if (header + 8 > skb->tail)		return;	sk = tcp_v6_lookup(daddr, th->dest, saddr, th->source, skb->dev->ifindex);	if (sk == NULL || sk->state == TCP_TIME_WAIT) {		/* XXX: Update ICMP error count */		return;	}	tp = &sk->tp_pinfo.af_tcp;	seq = ntohl(th->seq); 	if (sk->state != TCP_LISTEN && !between(seq, tp->snd_una, tp->snd_nxt)) {		net_statistics.OutOfWindowIcmps++;		return; 	}	np = &sk->net_pinfo.af_inet6;	if (type == ICMPV6_PKT_TOOBIG) {		struct dst_entry *dst = NULL;		if (atomic_read(&sk->sock_readers))			return;		if (sk->state == TCP_LISTEN)			return;		/* icmp should have updated the destination cache entry */		if (sk->dst_cache)			dst = dst_check(&sk->dst_cache, np->dst_cookie);		if (dst == NULL) {			struct flowi fl;			/* BUGGG_FUTURE: Again, it is not clear how			   to handle rthdr case. Ignore this complexity			   for now.			 */			fl.proto = IPPROTO_TCP;			fl.nl_u.ip6_u.daddr = &np->daddr;			fl.nl_u.ip6_u.saddr = &np->saddr;			fl.oif = sk->bound_dev_if;			fl.uli_u.ports.dport = sk->dport;			fl.uli_u.ports.sport = sk->sport;			dst = ip6_route_output(sk, &fl);		} else			dst = dst_clone(dst);		if (dst->error) {			sk->err_soft = -dst->error;		} else if (tp->pmtu_cookie > dst->pmtu) {			tcp_sync_mss(sk, dst->pmtu);			tcp_simple_retransmit(sk);		} /* else let the usual retransmit timer handle it */		dst_release(dst);		return;	}	icmpv6_err_convert(type, code, &err);	/* Might be for an open_request */	switch (sk->state) {		struct open_request *req, *prev;		struct ipv6hdr hd;	case TCP_LISTEN:		if (atomic_read(&sk->sock_readers)) {			net_statistics.LockDroppedIcmps++;			 /* If too many ICMPs get dropped on busy			  * servers this needs to be solved differently.			  */ 			return;		}		/* Grrrr - fix this later. */		ipv6_addr_copy(&hd.saddr, saddr);		ipv6_addr_copy(&hd.daddr, daddr); 		req = tcp_v6_search_req(tp, &hd, th, tcp_v6_iif(skb), &prev);		if (!req)			return;		if (seq != req->snt_isn) {			net_statistics.OutOfWindowIcmps++;			return;		}		if (req->sk) {			sk = req->sk; /* report error in accept */		} else {			tp->syn_backlog--;			tcp_synq_unlink(tp, req, prev);			req->class->destructor(req);			tcp_openreq_free(req);		}		/* FALL THROUGH */ 	case TCP_SYN_SENT:	case TCP_SYN_RECV:  /* Cannot happen */ 		tcp_statistics.TcpAttemptFails++;		sk->err = err;		sk->zapped = 1;		mb();		sk->error_report(sk);		return;	}	if (np->recverr) {		/* This code isn't serialized with the socket code */		/* ANK (980927) ... which is harmless now,		   sk->err's may be safely lost.		 */		sk->err = err;		mb();		sk->error_report(sk);	} else {		sk->err_soft = err;		mb();	}}static void tcp_v6_send_synack(struct sock *sk, struct open_request *req){	struct sk_buff * skb;	struct dst_entry *dst;	struct ipv6_txoptions *opt = NULL;	struct flowi fl;	int mss;	fl.proto = IPPROTO_TCP;	fl.nl_u.ip6_u.daddr = &req->af.v6_req.rmt_addr;	fl.nl_u.ip6_u.saddr = &req->af.v6_req.loc_addr;	fl.fl6_flowlabel = 0;	fl.oif = req->af.v6_req.iif;	fl.uli_u.ports.dport = req->rmt_port;	fl.uli_u.ports.sport = sk->sport;	opt = sk->net_pinfo.af_inet6.opt;	if (opt == NULL &&	    sk->net_pinfo.af_inet6.rxopt.bits.srcrt == 2 &&	    req->af.v6_req.pktopts) {		struct sk_buff *pktopts = req->af.v6_req.pktopts;		struct inet6_skb_parm *rxopt = (struct inet6_skb_parm *)pktopts->cb;		if (rxopt->srcrt)			opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr*)(pktopts->nh.raw + rxopt->srcrt));	}	if (opt && opt->srcrt) {		struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;		fl.nl_u.ip6_u.daddr = rt0->addr;	}	dst = ip6_route_output(sk, &fl);	if (dst->error)		goto done;	mss = dst->pmtu - sizeof(struct ipv6hdr) - sizeof(struct tcphdr);	skb = tcp_make_synack(sk, dst, req, mss);	if (skb) {		struct tcphdr *th = skb->h.th;		th->check = tcp_v6_check(th, skb->len,					 &req->af.v6_req.loc_addr, &req->af.v6_req.rmt_addr,					 csum_partial((char *)th, skb->len, skb->csum));		fl.nl_u.ip6_u.daddr = &req->af.v6_req.rmt_addr;		ip6_xmit(sk, skb, &fl, opt);	}done:	dst_release(dst);        if (opt && opt != sk->net_pinfo.af_inet6.opt)		sock_kfree_s(sk, opt, opt->tot_len);}static void tcp_v6_or_free(struct open_request *req){	if (req->af.v6_req.pktopts) {		kfree_skb(req->af.v6_req.pktopts);		req->af.v6_req.pktopts = NULL;	}}static struct or_calltable or_ipv6 = {	tcp_v6_send_synack,	tcp_v6_or_free,	tcp_v6_send_reset};static int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb){	struct inet6_skb_parm *opt = (struct inet6_skb_parm *)skb->cb;	if (sk->net_pinfo.af_inet6.rxopt.all) {		if ((opt->hop && sk->net_pinfo.af_inet6.rxopt.bits.hopopts) ||		    ((IPV6_FLOWINFO_MASK&*(u32*)skb->nh.raw) &&		     sk->net_pinfo.af_inet6.rxopt.bits.rxflow) ||		    (opt->srcrt && sk->net_pinfo.af_inet6.rxopt.bits.srcrt) ||		    ((opt->dst1 || opt->dst0) && sk->net_pinfo.af_inet6.rxopt.bits.dstopts))			return 1;	}	return 0;}#define BACKLOG(sk) ((sk)->tp_pinfo.af_tcp.syn_backlog) /* lvalue! */#define BACKLOGMAX(sk) sysctl_max_syn_backlog/* FIXME: this is substantially similar to the ipv4 code. * Can some kind of merge be done? -- erics */static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb, __u32 isn){	struct tcp_opt tp;	struct open_request *req;		/* If the socket is dead, don't accept the connection.	*/	if (sk->dead) {		SOCK_DEBUG(sk, "Reset on %p: Connect on dead socket.\n", sk);		tcp_statistics.TcpAttemptFails++;		return -ENOTCONN;	}	if (skb->protocol == __constant_htons(ETH_P_IP))		return tcp_v4_conn_request(sk, skb, isn);	/* FIXME: do the same check for anycast */	if (ipv6_addr_is_multicast(&skb->nh.ipv6h->daddr))		goto drop; 	if (isn == 0) 		isn = tcp_v6_init_sequence(sk,skb);	/*	 *	There are no SYN attacks on IPv6, yet...		 */	if (BACKLOG(sk) >= BACKLOGMAX(sk)) {		(void)(net_ratelimit() && 		       printk(KERN_INFO "droping syn ack:%d max:%d\n",			       BACKLOG(sk), BACKLOGMAX(sk)));		goto drop;			}	req = tcp_openreq_alloc();	if (req == NULL) {		goto drop;	}	BACKLOG(sk)++;	req->rcv_wnd = 0;		/* So that tcp_send_synack() knows! */	req->rcv_isn = TCP_SKB_CB(skb)->seq;	req->snt_isn = isn;	tp.tstamp_ok = tp.sack_ok = tp.wscale_ok = tp.snd_wscale = 0;	tp.mss_clamp = 65535;	tcp_parse_options(NULL, skb->h.th, &tp, 0);	if (tp.mss_clamp == 65535)		tp.mss_clamp = 576 - sizeof(struct ipv6hdr) - sizeof(struct iphdr);	if (sk->tp_pinfo.af_tcp.user_mss && sk->tp_pinfo.af_tcp.user_mss < tp.mss_clamp)		tp.mss_clamp = sk->tp_pinfo.af_tcp.user_mss;        req->mss = tp.mss_clamp;	if (tp.saw_tstamp)                req->ts_recent = tp.rcv_tsval;        req->tstamp_ok = tp.tstamp_ok;	req->sack_ok = tp.sack_ok;        req->snd_wscale = tp.snd_wscale;        req->wscale_ok = tp.wscale_ok;	req->rmt_port = skb->h.th->source;	ipv6_addr_copy(&req->af.v6_req.rmt_addr, &skb->nh.ipv6h->saddr);	ipv6_addr_copy(&req->af.v6_req.loc_addr, &skb->nh.ipv6h->daddr);	req->af.v6_req.pktopts = NULL;	if (ipv6_opt_accepted(sk, skb)) {		atomic_inc(&skb->users);		req->af.v6_req.pktopts = skb;	}	req->af.v6_req.iif = sk->bound_dev_if;	/* So that link locals have meaning */	if (!sk->bound_dev_if && ipv6_addr_type(&req->af.v6_req.rmt_addr)&IPV6_ADDR_LINKLOCAL)		req->af.v6_req.iif = tcp_v6_iif(skb);	req->class = &or_ipv6;	req->retrans = 0;	req->sk = NULL;	tcp_v6_send_synack(sk, req);	req->expires = jiffies + TCP_TIMEOUT_INIT;	tcp_inc_slow_timer(TCP_SLT_SYNACK);	tcp_synq_queue(&sk->tp_pinfo.af_tcp, req);		return 0;drop:	tcp_statistics.TcpAttemptFails++;	return 0; /* don't send reset */}static void tcp_v6_send_check(struct sock *sk, struct tcphdr *th, int len, 			      struct sk_buff *skb){	struct ipv6_pinfo *np = &sk->net_pinfo.af_inet6;	th->check = 0;		th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 				    csum_partial((char *)th, th->doff<<2, 						 skb->csum));}static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,					  struct open_request *req,					  struct dst_entry *dst){	struct ipv6_pinfo *np;	struct flowi fl;	struct tcp_opt *newtp;	struct sock *newsk;	struct ipv6_txoptions *opt;	if (skb->protocol == __constant_htons(ETH_P_IP)) {		/*		 *	v6 mapped		 */		newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);		if (newsk == NULL) 			return NULL;			np = &newsk->net_pinfo.af_inet6;		ipv6_addr_set(&np->daddr, 0, 0, __constant_htonl(0x0000FFFF),			      newsk->daddr);		ipv6_addr_set(&np->saddr, 0, 0, __constant_htonl(0x0000FFFF),			      newsk->saddr);		ipv6_addr_copy(&np->rcv_saddr, &np->saddr);		newsk->tp_pinfo.af_tcp.af_specific = &ipv6_mapped;		newsk->backlog_rcv = tcp_v4_do_rcv;		newsk->net_pinfo.af_inet6.pktoptions = NULL;		newsk->net_pinfo.af_inet6.opt = NULL;		/* It is tricky place. Until this moment IPv4 tcp		   worked with IPv6 af_tcp.af_specific.		   Sync it now.		 */		tcp_sync_mss(newsk, newsk->tp_pinfo.af_tcp.pmtu_cookie);		return newsk;	}	opt = sk->net_pinfo.af_inet6.opt;	if (sk->ack_backlog > sk->max_ack_backlog)		goto out;	if (sk->net_pinfo.af_inet6.rxopt.bits.srcrt == 2 &&	    opt == NULL && req->af.v6_req.pktopts) {		struct inet6_skb_parm *rxopt = (struct inet6_skb_parm *)req->af.v6_req.pktopts->cb;		if (rxopt->srcrt)			opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr*)(req->af.v6_req.pktopts->nh.raw+rxopt->srcrt));	}	if (dst == NULL) {		fl.proto = IPPROTO_TCP;		fl.nl_u.ip6_u.daddr = &req->af.v6_req.rmt_addr;		if (opt && opt->srcrt) {			struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;			fl.nl_u.ip6_u.daddr = rt0->addr;		}		fl.nl_u.ip6_u.saddr = &req->af.v6_req.loc_addr;		fl.fl6_flowlabel = 0;		fl.oif = sk->bound_dev_if;		fl.uli_u.ports.dport = req->rmt_port;		fl.uli_u.ports.sport = sk->sport;		dst = ip6_route_output(sk, &fl);	}	if (dst->error)		goto out;	sk->tp_pinfo.af_tcp.syn_backlog--;	sk->ack_backlog++;	newsk = tcp_create_openreq_child(sk, req, skb);	if (newsk == NULL)		goto out;	ip6_dst_store(newsk, dst, NULL);	newtp = &(newsk->tp_pinfo.af_tcp);	np = &newsk->net_pinfo.af_inet6;	ipv6_addr_copy(&np->daddr, &req->af.v6_req.rmt_addr);	ipv6_addr_copy(&np->saddr, &req->af.v6_req.loc_addr);	ipv6_addr_copy(&np->rcv_saddr, &req->af.v6_req.loc_addr);	newsk->bound_dev_if = req->af.v6_req.iif;	/* Now IPv6 options... 	   First: no IPv4 options.	 */	newsk->opt = NULL;	/* Clone RX bits */	np->rxopt.all = sk->net_pinfo.af_inet6.rxopt.all;	/* Clone pktoptions received with SYN */	np->pktoptions = req->af.v6_req.pktopts;	if (np->pktoptions)		atomic_inc(&np->pktoptions->users);	np->opt = NULL;	/* Clone native IPv6 options from listening socket (if any)	   Yes, keeping reference count would be much more clever,	   but we make one more one thing there: reattach optmem	   to newsk.	 */	if (opt) {		np->opt = ipv6_dup_options(newsk, opt);		if (opt != sk->net_pinfo.af_inet6.opt)			sock_kfree_s(sk, opt, opt->tot_len);	}	newtp->ext_header_len = 0;	if (np->opt)		newtp->ext_header_len = np->opt->opt_nflen + np->opt->opt_flen;	tcp_sync_mss(newsk, dst->pmtu);	newtp->rcv_mss = newtp->mss_clamp;	newsk->daddr	= LOOPBACK4_IPV6;	newsk->saddr	= LOOPBACK4_IPV6;	newsk->rcv_saddr= LOOPBACK4_IPV6;	newsk->prot->hash(newsk);	tcp_inherit_port(sk, newsk);	add_to_prot_sklist(newsk);	sk->data_ready(sk, 0); /* Deliver SIGIO */ 	return newsk;out:	if (opt && opt != sk->net_pinfo.af_inet6.opt)		sock_kfree_s(sk, opt, opt->tot_len);	dst_release(dst);	return NULL;}static void tcp_v6_send_reset(struct sk_buff *skb){	struct tcphdr *th = skb->h.th, *t1; 	struct sk_buff *buff;	struct flowi fl;	if (th->rst)		return;	if (ipv6_addr_is_multicast(&skb->nh.ipv6h->daddr))		return; 	/*	 * We need to grab some memory, and put together an RST,	 * and then put it into the queue to be sent.	 */	buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr), GFP_ATOMIC);	if (buff == NULL) 	  	return;	skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr));	t1 = (struct tcphdr *) skb_push(buff,sizeof(struct tcphdr));	/* Swap the send and the receive. */	memset(t1, 0, sizeof(*t1));	t1->dest = th->source;	t1->source = th->dest;	t1->doff = sizeof(*t1)/4;	t1->rst = 1;  	if(th->ack) {	  	t1->seq = th->ack_seq;	} else {		t1->ack = 1;	  	if(!th->syn)			t1->ack_seq = th->seq;		else			t1->ack_seq = htonl(ntohl(th->seq)+1);	}	buff->csum = csum_partial((char *)t1, sizeof(*t1), 0);	fl.nl_u.ip6_u.daddr = &skb->nh.ipv6h->saddr;	fl.nl_u.ip6_u.saddr = &skb->nh.ipv6h->daddr;	fl.fl6_flowlabel = 0;	t1->check = csum_ipv6_magic(fl.nl_u.ip6_u.saddr,				    fl.nl_u.ip6_u.daddr, 				    sizeof(*t1), IPPROTO_TCP,				    buff->csum);	fl.proto = IPPROTO_TCP;	fl.oif = tcp_v6_iif(skb);	fl.uli_u.ports.dport = t1->dest;	fl.uli_u.ports.sport = t1->source;	/* sk = NULL, but it is safe for now. RST socket required. */	buff->dst = ip6_route_output(NULL, &fl);	if (buff->dst->error == 0) {		ip6_xmit(NULL, buff, &fl, NULL);		tcp_statistics.TcpOutSegs++;		tcp_statistics.TcpOutRsts++;		return;	}	kfree_skb(buff);}static void tcp_v6_send_ack(struct sk_buff *skb, __u32 seq, __u32 ack, __u16 window){	struct tcphdr *th = skb->h.th, *t1; 	struct sk_buff *buff;	struct flowi fl;	buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr), GFP_ATOMIC);	if (buff == NULL) 	  	return;	skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr));	t1 = (struct tcphdr *) skb_push(buff,sizeof(struct tcphdr));	/* Swap the send and the receive. */	memset(t1, 0, sizeof(*t1));	t1->dest = th->source;	t1->source = th->dest;	t1->doff = sizeof(*t1)/4;	t1->ack = 1;	t1->seq = seq;	t1->ack_seq = ack; 	t1->window = htons(window);	buff->csum = csum_partial((char *)t1, sizeof(*t1), 0);	fl.nl_u.ip6_u.daddr = &skb->nh.ipv6h->saddr;	fl.nl_u.ip6_u.saddr = &skb->nh.ipv6h->daddr;	fl.fl6_flowlabel = 0;	t1->check = csum_ipv6_magic(fl.nl_u.ip6_u.saddr,				    fl.nl_u.ip6_u.daddr, 				    sizeof(*t1), IPPROTO_TCP,				    buff->csum);	fl.proto = IPPROTO_TCP;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -