⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 tcp_ipv6.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 4 页
字号:
	struct tcp_md5sig_key *key;	struct tcp_md5sig_key tw_key;#endif#ifdef CONFIG_TCP_MD5SIG	if (!tw && skb->sk) {		key = tcp_v6_md5_do_lookup(skb->sk, &ipv6_hdr(skb)->daddr);	} else if (tw && tw->tw_md5_keylen) {		tw_key.key = tw->tw_md5_key;		tw_key.keylen = tw->tw_md5_keylen;		key = &tw_key;	} else {		key = NULL;	}#endif	if (ts)		tot_len += TCPOLEN_TSTAMP_ALIGNED;#ifdef CONFIG_TCP_MD5SIG	if (key)		tot_len += TCPOLEN_MD5SIG_ALIGNED;#endif	buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,			 GFP_ATOMIC);	if (buff == NULL)		return;	skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);	t1 = (struct tcphdr *) skb_push(buff,tot_len);	/* Swap the send and the receive. */	memset(t1, 0, sizeof(*t1));	t1->dest = th->source;	t1->source = th->dest;	t1->doff = tot_len/4;	t1->seq = htonl(seq);	t1->ack_seq = htonl(ack);	t1->ack = 1;	t1->window = htons(win);	topt = (__be32 *)(t1 + 1);	if (ts) {		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |				(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);		*topt++ = htonl(tcp_time_stamp);		*topt = htonl(ts);	}#ifdef CONFIG_TCP_MD5SIG	if (key) {		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |				(TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);		tcp_v6_do_calc_md5_hash((__u8 *)topt, key,					&ipv6_hdr(skb)->daddr,					&ipv6_hdr(skb)->saddr,					t1, IPPROTO_TCP, tot_len);	}#endif	buff->csum = csum_partial((char *)t1, tot_len, 0);	memset(&fl, 0, sizeof(fl));	ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);	ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr);	t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,				    tot_len, IPPROTO_TCP,				    buff->csum);	fl.proto = IPPROTO_TCP;	fl.oif = inet6_iif(skb);	fl.fl_ip_dport = t1->dest;	fl.fl_ip_sport = t1->source;	security_skb_classify_flow(skb, &fl);	if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {		if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {			ip6_xmit(tcp6_socket->sk, buff, &fl, NULL, 0);			TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);			return;		}	}	kfree_skb(buff);}static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb){	struct inet_timewait_sock *tw = inet_twsk(sk);	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);	tcp_v6_send_ack(tcptw, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,			tcptw->tw_ts_recent);	inet_twsk_put(tw);}static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req){	tcp_v6_send_ack(NULL, skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent);}static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb){	struct request_sock *req, **prev;	const struct tcphdr *th = tcp_hdr(skb);	struct sock *nsk;	/* Find possible connection requests. */	req = inet6_csk_search_req(sk, &prev, th->source,				   &ipv6_hdr(skb)->saddr,				   &ipv6_hdr(skb)->daddr, inet6_iif(skb));	if (req)		return tcp_check_req(sk, skb, req, prev);	nsk = __inet6_lookup_established(&tcp_hashinfo, &ipv6_hdr(skb)->saddr,					 th->source, &ipv6_hdr(skb)->daddr,					 ntohs(th->dest), inet6_iif(skb));	if (nsk) {		if (nsk->sk_state != TCP_TIME_WAIT) {			bh_lock_sock(nsk);			return nsk;		}		inet_twsk_put(inet_twsk(nsk));		return NULL;	}#if 0 /*def CONFIG_SYN_COOKIES*/	if (!th->rst && !th->syn && th->ack)		sk = cookie_v6_check(sk, skb, &(IPCB(skb)->opt));#endif	return sk;}/* FIXME: this is substantially similar to the ipv4 code. * Can some kind of merge be done? -- erics */static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb){	struct inet6_request_sock *treq;	struct ipv6_pinfo *np = inet6_sk(sk);	struct tcp_options_received tmp_opt;	struct tcp_sock *tp = tcp_sk(sk);	struct request_sock *req = NULL;	__u32 isn = TCP_SKB_CB(skb)->when;	if (skb->protocol == htons(ETH_P_IP))		return tcp_v4_conn_request(sk, skb);	if (!ipv6_unicast_destination(skb))		goto drop;	/*	 *	There are no SYN attacks on IPv6, yet...	 */	if (inet_csk_reqsk_queue_is_full(sk) && !isn) {		if (net_ratelimit())			printk(KERN_INFO "TCPv6: dropping request, synflood is possible\n");		goto drop;	}	if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)		goto drop;	req = inet6_reqsk_alloc(&tcp6_request_sock_ops);	if (req == NULL)		goto drop;#ifdef CONFIG_TCP_MD5SIG	tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;#endif	tcp_clear_options(&tmp_opt);	tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);	tmp_opt.user_mss = tp->rx_opt.user_mss;	tcp_parse_options(skb, &tmp_opt, 0);	tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;	tcp_openreq_init(req, &tmp_opt, skb);	treq = inet6_rsk(req);	ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);	ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);	TCP_ECN_create_request(req, tcp_hdr(skb));	treq->pktopts = NULL;	if (ipv6_opt_accepted(sk, skb) ||	    np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||	    np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {		atomic_inc(&skb->users);		treq->pktopts = skb;	}	treq->iif = sk->sk_bound_dev_if;	/* So that link locals have meaning */	if (!sk->sk_bound_dev_if &&	    ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)		treq->iif = inet6_iif(skb);	if (isn == 0)		isn = tcp_v6_init_sequence(skb);	tcp_rsk(req)->snt_isn = isn;	security_inet_conn_request(sk, skb, req);	if (tcp_v6_send_synack(sk, req, NULL))		goto drop;	inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);	return 0;drop:	if (req)		reqsk_free(req);	return 0; /* don't send reset */}static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,					  struct request_sock *req,					  struct dst_entry *dst){	struct inet6_request_sock *treq = inet6_rsk(req);	struct ipv6_pinfo *newnp, *np = inet6_sk(sk);	struct tcp6_sock *newtcp6sk;	struct inet_sock *newinet;	struct tcp_sock *newtp;	struct sock *newsk;	struct ipv6_txoptions *opt;#ifdef CONFIG_TCP_MD5SIG	struct tcp_md5sig_key *key;#endif	if (skb->protocol == htons(ETH_P_IP)) {		/*		 *	v6 mapped		 */		newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);		if (newsk == NULL)			return NULL;		newtcp6sk = (struct tcp6_sock *)newsk;		inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;		newinet = inet_sk(newsk);		newnp = inet6_sk(newsk);		newtp = tcp_sk(newsk);		memcpy(newnp, np, sizeof(struct ipv6_pinfo));		ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF),			      newinet->daddr);		ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF),			      newinet->saddr);		ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);		inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;		newsk->sk_backlog_rcv = tcp_v4_do_rcv;#ifdef CONFIG_TCP_MD5SIG		newtp->af_specific = &tcp_sock_ipv6_mapped_specific;#endif		newnp->pktoptions  = NULL;		newnp->opt	   = NULL;		newnp->mcast_oif   = inet6_iif(skb);		newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;		/*		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count		 * here, tcp_create_openreq_child now does this for us, see the comment in		 * that function for the gory details. -acme		 */		/* It is tricky place. Until this moment IPv4 tcp		   worked with IPv6 icsk.icsk_af_ops.		   Sync it now.		 */		tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);		return newsk;	}	opt = np->opt;	if (sk_acceptq_is_full(sk))		goto out_overflow;	if (dst == NULL) {		struct in6_addr *final_p = NULL, final;		struct flowi fl;		memset(&fl, 0, sizeof(fl));		fl.proto = IPPROTO_TCP;		ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);		if (opt && opt->srcrt) {			struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;			ipv6_addr_copy(&final, &fl.fl6_dst);			ipv6_addr_copy(&fl.fl6_dst, rt0->addr);			final_p = &final;		}		ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);		fl.oif = sk->sk_bound_dev_if;		fl.fl_ip_dport = inet_rsk(req)->rmt_port;		fl.fl_ip_sport = inet_sk(sk)->sport;		security_req_classify_flow(req, &fl);		if (ip6_dst_lookup(sk, &dst, &fl))			goto out;		if (final_p)			ipv6_addr_copy(&fl.fl6_dst, final_p);		if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0)			goto out;	}	newsk = tcp_create_openreq_child(sk, req, skb);	if (newsk == NULL)		goto out;	/*	 * No need to charge this sock to the relevant IPv6 refcnt debug socks	 * count here, tcp_create_openreq_child now does this for us, see the	 * comment in that function for the gory details. -acme	 */	newsk->sk_gso_type = SKB_GSO_TCPV6;	__ip6_dst_store(newsk, dst, NULL, NULL);	newtcp6sk = (struct tcp6_sock *)newsk;	inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;	newtp = tcp_sk(newsk);	newinet = inet_sk(newsk);	newnp = inet6_sk(newsk);	memcpy(newnp, np, sizeof(struct ipv6_pinfo));	ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);	ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);	ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);	newsk->sk_bound_dev_if = treq->iif;	/* Now IPv6 options...	   First: no IPv4 options.	 */	newinet->opt = NULL;	newnp->ipv6_fl_list = NULL;	/* Clone RX bits */	newnp->rxopt.all = np->rxopt.all;	/* Clone pktoptions received with SYN */	newnp->pktoptions = NULL;	if (treq->pktopts != NULL) {		newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);		kfree_skb(treq->pktopts);		treq->pktopts = NULL;		if (newnp->pktoptions)			skb_set_owner_r(newnp->pktoptions, newsk);	}	newnp->opt	  = NULL;	newnp->mcast_oif  = inet6_iif(skb);	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;	/* Clone native IPv6 options from listening socket (if any)	   Yes, keeping reference count would be much more clever,	   but we make one more one thing there: reattach optmem	   to newsk.	 */	if (opt) {		newnp->opt = ipv6_dup_options(newsk, opt);		if (opt != np->opt)			sock_kfree_s(sk, opt, opt->tot_len);	}	inet_csk(newsk)->icsk_ext_hdr_len = 0;	if (newnp->opt)		inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +						     newnp->opt->opt_flen);	tcp_mtup_init(newsk);	tcp_sync_mss(newsk, dst_mtu(dst));	newtp->advmss = dst_metric(dst, RTAX_ADVMSS);	tcp_initialize_rcv_mss(newsk);	newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;#ifdef CONFIG_TCP_MD5SIG	/* Copy over the MD5 key from the original socket */	if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {		/* We're using one, so create a matching key		 * on the newsk structure. If we fail to get		 * memory, then we end up not copying the key		 * across. Shucks.		 */		char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);		if (newkey != NULL)			tcp_v6_md5_do_add(newsk, &inet6_sk(sk)->daddr,					  newkey, key->keylen);	}#endif	__inet6_hash(&tcp_hashinfo, newsk);	inet_inherit_port(&tcp_hashinfo, sk, newsk);	return newsk;out_overflow:	NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);out:	NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);	if (opt && opt != np->opt)		sock_kfree_s(sk, opt, opt->tot_len);	dst_release(dst);	return NULL;}static __sum16 tcp_v6_checksum_init(struct sk_buff *skb){	if (skb->ip_summed == CHECKSUM_COMPLETE) {		if (!tcp_v6_check(tcp_hdr(skb), skb->len, &ipv6_hdr(skb)->saddr,				  &ipv6_hdr(skb)->daddr, skb->csum)) {			skb->ip_summed = CHECKSUM_UNNECESSARY;			return 0;		}	}	skb->csum = ~csum_unfold(tcp_v6_check(tcp_hdr(skb), skb->len,					      &ipv6_hdr(skb)->saddr,					      &ipv6_hdr(skb)->daddr, 0));	if (skb->len <= 76) {		return __skb_checksum_complete(skb);	}	return 0;}/* The socket must have it's spinlock held when we get * here. * * We have a potential double-lock case here, so even when * doing backlog processing we use the BH locking scheme. * This is because we cannot sleep with the original spinlock * held. */static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb){	struct ipv6_pinfo *np = inet6_sk(sk);	struct tcp_sock *tp;	struct sk_buff *opt_skb = NULL;	/* Imagine: socket is IPv6. IPv4 packet arrives,	   goes to IPv4 receive handler and backlogged.	   From backlog it always goes here. Kerboom...	   Fortunately, tcp_rcv_established and rcv_established	   handle them correctly, but it is not case with	   tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK	 */	if (skb->protocol == htons(ETH_P_IP))		return tcp_v4_do_rcv(sk, skb);#ifdef CONFIG_TCP_MD5SIG	if (tcp_v6_inbound_md5_hash (sk, skb))		goto discard;#endif	if (sk_filter(sk, skb))		goto discard;	/*	 *	socket locking is here for SMP purposes as backlog rcv	 *	is currently called with bh processing disabled.	 */	/* Do Stevens' IPV6_PKTOPTIONS.	   Yes, guys, it is the only place in our code, where we	   may make it not affecting IPv4.	   The rest of code is protocol independent,	   and I do not like idea to uglify IPv4.	   Actually, all the idea behind IPV6_PKTOPTIONS	   looks not very well thought. For now we latch	   options, received in the last packet, enqueued	   by tcp. Feel free to propose better solution.					       --ANK (980728)	 */	if (np->rxopt.all)		opt_skb = skb_clone(skb, GFP_ATOMIC);	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */		TCP_CHECK_TIMER(sk);		if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))			goto reset;		TCP_CHECK_TIMER(sk);		if (opt_skb)			goto ipv6_pktoptions;		return 0;	}	if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))		goto csum_err;	if (sk->sk_state == TCP_LISTEN) {		struct sock *nsk = tcp_v6_hnd_req(sk, skb);		if (!nsk)			goto discard;		/*		 * Queue it on the new socket if the new socket is active,		 * otherwise we just shortcircuit this and continue with		 * the new socket..		 */		if(nsk != sk) {			if (tcp_child_process(sk, nsk, skb))				goto reset;			if (opt_skb)				__kfree_skb(opt_skb);			return 0;		}	}	TCP_CHECK_TIMER(sk);	if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))		goto reset;	TCP_CHECK_TIMER(sk);	if (opt_skb)		goto ipv6_pktoptions;	return 0;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -