⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 tcp_ipv6.c

📁 ipv6地址转换器
💻 C
📖 第 1 页 / 共 3 页
字号:
	fl.oif = tcp_v6_iif(skb);	fl.uli_u.ports.dport = t1->dest;	fl.uli_u.ports.sport = t1->source;	/* sk = NULL, but it is safe for now. static socket required. */	buff->dst = ip6_route_output(NULL, &fl);	if (buff->dst->error == 0) {		ip6_xmit(NULL, buff, &fl, NULL);		tcp_statistics.TcpOutSegs++;		return;	}	kfree_skb(buff);}static struct open_request *tcp_v6_search_req(struct tcp_opt *tp,					      struct ipv6hdr *ip6h,					      struct tcphdr *th,					      int iif,					      struct open_request **prevp){	struct open_request *req, *prev; 	__u16 rport = th->source;	/*	assumption: the socket is not in use.	 *	as we checked the user count on tcp_rcv and we're	 *	running from a soft interrupt.	 */	prev = (struct open_request *) (&tp->syn_wait_queue); 	for (req = prev->dl_next; req; req = req->dl_next) {		if (!ipv6_addr_cmp(&req->af.v6_req.rmt_addr, &ip6h->saddr) &&		    !ipv6_addr_cmp(&req->af.v6_req.loc_addr, &ip6h->daddr) &&		    req->rmt_port == rport &&		    (!req->af.v6_req.iif || req->af.v6_req.iif == iif)) {			*prevp = prev;			return req;		}		prev = req; 	}	return NULL; }static void tcp_v6_rst_req(struct sock *sk, struct sk_buff *skb){	struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;	struct open_request *req, *prev;	req = tcp_v6_search_req(tp,skb->nh.ipv6h,skb->h.th,tcp_v6_iif(skb),&prev);	if (!req)		return;	/* Sequence number check required by RFC793 */	if (before(TCP_SKB_CB(skb)->seq, req->rcv_isn) ||	    after(TCP_SKB_CB(skb)->seq, req->rcv_isn+1))		return;	if(req->sk)		sk->ack_backlog--;	else		tp->syn_backlog--;	tcp_synq_unlink(tp, req, prev);	req->class->destructor(req);	tcp_openreq_free(req); 	net_statistics.EmbryonicRsts++; }static inline struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb){	struct tcphdr *th = skb->h.th; 	u32 flg = ((u32 *)th)[3]; 	/* Check for RST */	if (flg & __constant_htonl(0x00040000)) {		tcp_v6_rst_req(sk, skb);		return NULL;	}			/* Check SYN|ACK */	if (flg & __constant_htonl(0x00120000)) {		struct open_request *req, *dummy;		struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);					req = tcp_v6_search_req(tp, skb->nh.ipv6h, th, tcp_v6_iif(skb), &dummy);		if (req) {			sk = tcp_check_req(sk, skb, req);		}#if 0 /*def CONFIG_SYN_COOKIES */		 else {			sk = cookie_v6_check(sk, skb);		 }#endif	}	return sk;}static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb){#ifdef CONFIG_FILTER	struct sk_filter *filter;#endif	int users = 0;	/* Imagine: socket is IPv6. IPv4 packet arrives,	   goes to IPv4 receive handler and backlogged.	   From backlog it always goes here. Kerboom...	   Fortunately, tcp_rcv_established and rcv_established	   handle them correctly, but it is not case with	   tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK	 */	if (skb->protocol == __constant_htons(ETH_P_IP))		return tcp_v4_do_rcv(sk, skb);#ifdef CONFIG_FILTER	filter = sk->filter;	if (filter && sk_filter(skb, filter))		goto discard;#endif /* CONFIG_FILTER */	/*	 *	socket locking is here for SMP purposes as backlog rcv	 *	is currently called with bh processing disabled.	 */  	ipv6_statistics.Ip6InDelivers++;	/* 	 * This doesn't check if the socket has enough room for the packet.	 * Either process the packet _without_ queueing it and then free it,	 * or do the check later.	 */	skb_set_owner_r(skb, sk);	/* Do Stevens' IPV6_PKTOPTIONS.	   Yes, guys, it is the only place in our code, where we	   may make it not affecting IPv4.	   The rest of code is protocol independent,	   and I do not like idea to uglify IPv4.	   Actually, all the idea behind IPV6_PKTOPTIONS	   looks not very well thought. For now we latch	   options, received in the last packet, enqueued	   by tcp. Feel free to propose better solution.	                                       --ANK (980728)	 */	if (sk->net_pinfo.af_inet6.rxopt.all) {		users = atomic_read(&skb->users);		atomic_inc(&skb->users);	}	if (sk->state == TCP_ESTABLISHED) { /* Fast path */		if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))			goto reset;		if (users)			goto ipv6_pktoptions;		return 0;	}	if (sk->state == TCP_LISTEN) { 		struct sock *nsk;				nsk = tcp_v6_hnd_req(sk, skb);		if (!nsk)			goto discard;		/*		 * Queue it on the new socket if the new socket is active,		 * otherwise we just shortcircuit this and continue with		 * the new socket..		 */		if (atomic_read(&nsk->sock_readers)) {			skb_orphan(skb);			__skb_queue_tail(&nsk->back_log, skb);			return 0;		}		sk = nsk;	}	if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))		goto reset;	if (users)		goto ipv6_pktoptions;	return 0;reset:	tcp_v6_send_reset(skb);discard:	if (users)		kfree_skb(skb);	kfree_skb(skb);	return 0;ipv6_pktoptions:	/* Do you ask, what is it?	   1. skb was enqueued by tcp.	   2. skb is added to tail of read queue, rather than out of order.	   3. socket is not in passive state.	   4. Finally, it really contains options, which user wants to receive.	 */	if (atomic_read(&skb->users) > users &&	    TCP_SKB_CB(skb)->end_seq == sk->tp_pinfo.af_tcp.rcv_nxt &&	    !((1<<sk->state)&(TCPF_CLOSE|TCPF_LISTEN))) {		if (ipv6_opt_accepted(sk, skb)) {			struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);			kfree_skb(skb);			skb = NULL;			if (skb2) {				skb_set_owner_r(skb2, sk);				skb = xchg(&sk->net_pinfo.af_inet6.pktoptions, skb2);			}		} else {			kfree_skb(skb);			skb = xchg(&sk->net_pinfo.af_inet6.pktoptions, NULL);		}	}	if (skb)		kfree_skb(skb);	return 0;}int tcp_v6_rcv(struct sk_buff *skb, unsigned long len){	struct tcphdr *th;		struct sock *sk;	struct in6_addr *saddr = &skb->nh.ipv6h->saddr;	struct in6_addr *daddr = &skb->nh.ipv6h->daddr;	th = skb->h.th;	if (skb->pkt_type != PACKET_HOST)		goto discard_it;	/*	 *	Pull up the IP header.	 */	__skb_pull(skb, skb->h.raw - skb->data);	/*	 *	Count it even if it's bad.	 */	tcp_statistics.TcpInSegs++;	len = skb->len;	if (len < sizeof(struct tcphdr))		goto bad_packet;	/*	 *	Try to use the device checksum if provided.	 */	switch (skb->ip_summed) {	case CHECKSUM_NONE:		skb->csum = csum_partial((char *)th, len, 0);	case CHECKSUM_HW:		if (tcp_v6_check(th,len,saddr,daddr,skb->csum)) {			printk(KERN_DEBUG "tcp csum failed\n");	bad_packet:					tcp_statistics.TcpInErrs++;			goto discard_it;		}	default:		/* CHECKSUM_UNNECESSARY */	};	if((th->doff * 4) < sizeof(struct tcphdr) ||	   len < (th->doff * 4))		goto bad_packet;	sk = __tcp_v6_lookup(th, saddr, th->source, daddr, th->dest, tcp_v6_iif(skb));	if (!sk)		goto no_tcp_socket;	TCP_SKB_CB(skb)->seq = ntohl(th->seq);	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +				    len - th->doff*4);	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);	skb->used = 0;	if(sk->state == TCP_TIME_WAIT)		goto do_time_wait;	if (!atomic_read(&sk->sock_readers))		return tcp_v6_do_rcv(sk, skb);	__skb_queue_tail(&sk->back_log, skb);	return(0);no_tcp_socket:	tcp_v6_send_reset(skb);discard_it:	/*	 *	Discard frame	 */	kfree_skb(skb);	return 0;do_time_wait:	switch (tcp_timewait_state_process((struct tcp_tw_bucket *)sk,					   skb, th, skb->len)) {	case TCP_TW_ACK:		tcp_v6_send_ack(skb,				((struct tcp_tw_bucket *)sk)->snd_nxt,				((struct tcp_tw_bucket *)sk)->rcv_nxt,				((struct tcp_tw_bucket *)sk)->window);		goto discard_it; 	case TCP_TW_RST:		goto no_tcp_socket;	default:		goto discard_it; 	}}static int tcp_v6_rebuild_header(struct sock *sk){	struct dst_entry *dst = NULL;	struct ipv6_pinfo *np = &sk->net_pinfo.af_inet6;	if (sk->dst_cache)		dst = dst_check(&sk->dst_cache, np->dst_cookie);	if (dst == NULL) {		struct flowi fl;		fl.proto = IPPROTO_TCP;		fl.nl_u.ip6_u.daddr = &np->daddr;		fl.nl_u.ip6_u.saddr = &np->saddr;		fl.fl6_flowlabel = np->flow_label;		fl.oif = sk->bound_dev_if;		fl.uli_u.ports.dport = sk->dport;		fl.uli_u.ports.sport = sk->sport;		if (np->opt && np->opt->srcrt) {			struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt;			fl.nl_u.ip6_u.daddr = rt0->addr;		}		dst = ip6_route_output(sk, &fl);		if (dst->error) {			dst_release(dst);			return dst->error;		}		ip6_dst_store(sk, dst, NULL);	}	return dst->error;}static struct sock * tcp_v6_get_sock(struct sk_buff *skb, struct tcphdr *th){	struct in6_addr *saddr;	struct in6_addr *daddr;	if (skb->protocol == __constant_htons(ETH_P_IP))		return ipv4_specific.get_sock(skb, th);	saddr = &skb->nh.ipv6h->saddr;	daddr = &skb->nh.ipv6h->daddr;	return tcp_v6_lookup(saddr, th->source, daddr, th->dest, tcp_v6_iif(skb));}static void tcp_v6_xmit(struct sk_buff *skb){	struct sock *sk = skb->sk;	struct ipv6_pinfo * np = &sk->net_pinfo.af_inet6;	struct flowi fl;	struct dst_entry *dst = sk->dst_cache;	fl.proto = IPPROTO_TCP;	fl.fl6_dst = &np->daddr;	fl.fl6_src = &np->saddr;	fl.fl6_flowlabel = np->flow_label;	fl.oif = sk->bound_dev_if;	fl.uli_u.ports.sport = sk->sport;	fl.uli_u.ports.dport = sk->dport;	if (np->opt && np->opt->srcrt) {		struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt;		fl.nl_u.ip6_u.daddr = rt0->addr;	}	if (sk->dst_cache)		dst = dst_check(&sk->dst_cache, np->dst_cookie);	if (dst == NULL) {		dst = ip6_route_output(sk, &fl);		if (dst->error) {			sk->err_soft = -dst->error;			dst_release(dst);			return;		}		ip6_dst_store(sk, dst, NULL);	}	skb->dst = dst_clone(dst);	/* Restore final destination back after routing done */	fl.nl_u.ip6_u.daddr = &np->daddr;	ip6_xmit(sk, skb, &fl, np->opt);}static void v6_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr){	struct ipv6_pinfo * np = &sk->net_pinfo.af_inet6;	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) uaddr;	sin6->sin6_family = AF_INET6;	memcpy(&sin6->sin6_addr, &np->daddr, sizeof(struct in6_addr));	sin6->sin6_port	= sk->dport;	/* We do not store received flowlabel for TCP */	sin6->sin6_flowinfo = 0;}static struct tcp_func ipv6_specific = {	tcp_v6_xmit,	tcp_v6_send_check,	tcp_v6_rebuild_header,	tcp_v6_conn_request,	tcp_v6_syn_recv_sock,	tcp_v6_get_sock,	sizeof(struct ipv6hdr),	ipv6_setsockopt,	ipv6_getsockopt,	v6_addr2sockaddr,	sizeof(struct sockaddr_in6)};/* *	TCP over IPv4 via INET6 API */static struct tcp_func ipv6_mapped = {	ip_queue_xmit,	tcp_v4_send_check,	tcp_v4_rebuild_header,	tcp_v6_conn_request,	tcp_v6_syn_recv_sock,	tcp_v6_get_sock,	sizeof(struct iphdr),	ipv6_setsockopt,	ipv6_getsockopt,	v6_addr2sockaddr,	sizeof(struct sockaddr_in6)};/* NOTE: A lot of things set to zero explicitly by call to *       sk_alloc() so need not be done here. */static int tcp_v6_init_sock(struct sock *sk){	struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);	skb_queue_head_init(&tp->out_of_order_queue);	tcp_init_xmit_timers(sk);	tp->rto  = TCP_TIMEOUT_INIT;		/*TCP_WRITE_TIME*/	tp->mdev = TCP_TIMEOUT_INIT;	tp->mss_clamp = ~0;	/* So many TCP implementations out there (incorrectly) count the	 * initial SYN frame in their delayed-ACK and congestion control	 * algorithms that we must have the following bandaid to talk	 * efficiently to them.  -DaveM	 */	tp->snd_cwnd = 2;	/* See draft-stevens-tcpca-spec-01 for discussion of the	 * initialization of these values.	 */	tp->snd_cwnd_cnt = 0;	tp->snd_ssthresh = 0x7fffffff;	sk->state = TCP_CLOSE;	sk->max_ack_backlog = SOMAXCONN;	tp->rcv_mss = 536; 	/* Init SYN queue. */	tcp_synq_init(tp);	sk->tp_pinfo.af_tcp.af_specific = &ipv6_specific;	sk->write_space = tcp_write_space;	return 0;}static int tcp_v6_destroy_sock(struct sock *sk){	struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);	struct sk_buff *skb;	tcp_clear_xmit_timers(sk);	if (sk->keepopen)		tcp_dec_slow_timer(TCP_SLT_KEEPALIVE);	/*	 *	Cleanup up the write buffer.	 */  	while((skb = __skb_dequeue(&sk->write_queue)) != NULL)		kfree_skb(skb);	/*	 *  Cleans up our, hopefuly empty, out_of_order_queue	 */  	while((skb = __skb_dequeue(&tp->out_of_order_queue)) != NULL)		kfree_skb(skb);	/* Clean up a locked TCP bind bucket, this only happens if a	 * port is allocated for a socket, but it never fully connects.	 */	if(sk->prev != NULL)		tcp_put_port(sk);	return inet6_destroy_sock(sk);}struct proto tcpv6_prot = {	(struct sock *)&tcpv6_prot,	/* sklist_next */	(struct sock *)&tcpv6_prot,	/* sklist_prev */	tcp_close,			/* close */	tcp_v6_connect,			/* connect */	tcp_accept,			/* accept */	NULL,				/* retransmit */	tcp_write_wakeup,		/* write_wakeup */	tcp_read_wakeup,		/* read_wakeup */	tcp_poll,			/* poll */	tcp_ioctl,			/* ioctl */	tcp_v6_init_sock,		/* init */	tcp_v6_destroy_sock,		/* destroy */	tcp_shutdown,			/* shutdown */	tcp_setsockopt,			/* setsockopt */	tcp_getsockopt,			/* getsockopt */	tcp_v6_sendmsg,			/* sendmsg */	tcp_recvmsg,			/* recvmsg */	NULL,				/* bind */	tcp_v6_do_rcv,			/* backlog_rcv */	tcp_v6_hash,			/* hash */	tcp_v6_unhash,			/* unhash */	tcp_v6_get_port,		/* get_port */	128,				/* max_header */	0,				/* retransmits */	"TCPv6",			/* name */	0,				/* inuse */	0				/* highestinuse */};static struct inet6_protocol tcpv6_protocol ={	tcp_v6_rcv,		/* TCP handler		*/	tcp_v6_err,		/* TCP error control	*/	NULL,			/* next			*/	IPPROTO_TCP,		/* protocol ID		*/	0,			/* copy			*/	NULL,			/* data			*/	"TCPv6"			/* name			*/};__initfunc(void tcpv6_init(void)){	/* register inet6 protocol */	inet6_add_protocol(&tcpv6_protocol);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -