⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 tcp_ipv4.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
			return -ENOENT;		return tcp_v4_md5_do_del(sk, sin->sin_addr.s_addr);	}	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)		return -EINVAL;	if (!tcp_sk(sk)->md5sig_info) {		struct tcp_sock *tp = tcp_sk(sk);		struct tcp_md5sig_info *p = kzalloc(sizeof(*p), GFP_KERNEL);		if (!p)			return -EINVAL;		tp->md5sig_info = p;		sk->sk_route_caps &= ~NETIF_F_GSO_MASK;	}	newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);	if (!newkey)		return -ENOMEM;	return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr,				 newkey, cmd.tcpm_keylen);}static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,				   __be32 saddr, __be32 daddr,				   struct tcphdr *th, int protocol,				   int tcplen){	struct scatterlist sg[4];	__u16 data_len;	int block = 0;	__sum16 old_checksum;	struct tcp_md5sig_pool *hp;	struct tcp4_pseudohdr *bp;	struct hash_desc *desc;	int err;	unsigned int nbytes = 0;	/*	 * Okay, so RFC2385 is turned on for this connection,	 * so we need to generate the MD5 hash for the packet now.	 */	hp = tcp_get_md5sig_pool();	if (!hp)		goto clear_hash_noput;	bp = &hp->md5_blk.ip4;	desc = &hp->md5_desc;	/*	 * 1. the TCP pseudo-header (in the order: source IP address,	 * destination IP address, zero-padded protocol number, and	 * segment length)	 */	bp->saddr = saddr;	bp->daddr = daddr;	bp->pad = 0;	bp->protocol = protocol;	bp->len = htons(tcplen);	sg_init_table(sg, 4);	sg_set_buf(&sg[block++], bp, sizeof(*bp));	nbytes += sizeof(*bp);	/* 2. the TCP header, excluding options, and assuming a	 * checksum of zero/	 */	old_checksum = th->check;	th->check = 0;	sg_set_buf(&sg[block++], th, sizeof(struct tcphdr));	nbytes += sizeof(struct tcphdr);	/* 3. the TCP segment data (if any) */	data_len = tcplen - (th->doff << 2);	if (data_len > 0) {		unsigned char *data = (unsigned char *)th + (th->doff << 2);		sg_set_buf(&sg[block++], data, data_len);		nbytes += data_len;	}	/* 4. an independently-specified key or password, known to both	 * TCPs and presumably connection-specific	 */	sg_set_buf(&sg[block++], key->key, key->keylen);	nbytes += key->keylen;	sg_mark_end(&sg[block - 1]);	/* Now store the Hash into the packet */	err = crypto_hash_init(desc);	if (err)		goto clear_hash;	err = crypto_hash_update(desc, sg, nbytes);	if (err)		goto clear_hash;	err = crypto_hash_final(desc, md5_hash);	if (err)		goto clear_hash;	/* Reset header, and free up the crypto */	tcp_put_md5sig_pool();	th->check = old_checksum;out:	return 0;clear_hash:	tcp_put_md5sig_pool();clear_hash_noput:	memset(md5_hash, 0, 16);	goto out;}int tcp_v4_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,			 struct sock *sk,			 struct dst_entry *dst,			 struct request_sock *req,			 struct tcphdr *th, int protocol,			 int tcplen){	__be32 saddr, daddr;	if (sk) {		saddr = inet_sk(sk)->saddr;		daddr = inet_sk(sk)->daddr;	} else {		struct rtable *rt = (struct rtable *)dst;		BUG_ON(!rt);		saddr = rt->rt_src;		daddr = rt->rt_dst;	}	return tcp_v4_do_calc_md5_hash(md5_hash, key,				       saddr, daddr,				       th, protocol, tcplen);}EXPORT_SYMBOL(tcp_v4_calc_md5_hash);static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb){	/*	 * This gets called for each TCP segment that arrives	 * so we want to be efficient.	 * We have 3 drop cases:	 * o No MD5 hash and one expected.	 * o MD5 hash and we're not expecting one.	 * o MD5 hash and its wrong.	 */	__u8 *hash_location = NULL;	struct tcp_md5sig_key *hash_expected;	const struct iphdr *iph = ip_hdr(skb);	struct tcphdr *th = tcp_hdr(skb);	int length = (th->doff << 2) - sizeof(struct tcphdr);	int genhash;	unsigned char *ptr;	unsigned char newhash[16];	hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr);	/*	 * If the TCP option length is less than the TCP_MD5SIG	 * option length, then we can shortcut	 */	if (length < TCPOLEN_MD5SIG) {		if (hash_expected)			return 1;		else			return 0;	}	/* Okay, we can't shortcut - we have to grub through the options */	ptr = (unsigned char *)(th + 1);	while (length > 0) {		int opcode = *ptr++;		int opsize;		switch (opcode) {		case TCPOPT_EOL:			goto done_opts;		case TCPOPT_NOP:			length--;			continue;		default:			opsize = *ptr++;			if (opsize < 2)				goto done_opts;			if (opsize > length)				goto done_opts;			if (opcode == TCPOPT_MD5SIG) {				hash_location = ptr;				goto done_opts;			}		}		ptr += opsize-2;		length -= opsize;	}done_opts:	/* We've parsed the options - do we have a hash? */	if (!hash_expected && !hash_location)		return 0;	if (hash_expected && !hash_location) {		LIMIT_NETDEBUG(KERN_INFO "MD5 Hash expected but NOT found "			       "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)\n",			       NIPQUAD(iph->saddr), ntohs(th->source),			       NIPQUAD(iph->daddr), ntohs(th->dest));		return 1;	}	if (!hash_expected && hash_location) {		LIMIT_NETDEBUG(KERN_INFO "MD5 Hash NOT expected but found "			       "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)\n",			       NIPQUAD(iph->saddr), ntohs(th->source),			       NIPQUAD(iph->daddr), ntohs(th->dest));		return 1;	}	/* Okay, so this is hash_expected and hash_location -	 * so we need to calculate the checksum.	 */	genhash = tcp_v4_do_calc_md5_hash(newhash,					  hash_expected,					  iph->saddr, iph->daddr,					  th, sk->sk_protocol,					  skb->len);	if (genhash || memcmp(hash_location, newhash, 16) != 0) {		if (net_ratelimit()) {			printk(KERN_INFO "MD5 Hash failed for "			       "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)%s\n",			       NIPQUAD(iph->saddr), ntohs(th->source),			       NIPQUAD(iph->daddr), ntohs(th->dest),			       genhash ? " tcp_v4_calc_md5_hash failed" : "");		}		return 1;	}	return 0;}#endifstruct request_sock_ops tcp_request_sock_ops __read_mostly = {	.family		=	PF_INET,	.obj_size	=	sizeof(struct tcp_request_sock),	.rtx_syn_ack	=	tcp_v4_send_synack,	.send_ack	=	tcp_v4_reqsk_send_ack,	.destructor	=	tcp_v4_reqsk_destructor,	.send_reset	=	tcp_v4_send_reset,};#ifdef CONFIG_TCP_MD5SIGstatic struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {	.md5_lookup	=	tcp_v4_reqsk_md5_lookup,};#endifstatic struct timewait_sock_ops tcp_timewait_sock_ops = {	.twsk_obj_size	= sizeof(struct tcp_timewait_sock),	.twsk_unique	= tcp_twsk_unique,	.twsk_destructor= tcp_twsk_destructor,};int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb){	struct inet_request_sock *ireq;	struct tcp_options_received tmp_opt;	struct request_sock *req;	__be32 saddr = ip_hdr(skb)->saddr;	__be32 daddr = ip_hdr(skb)->daddr;	__u32 isn = TCP_SKB_CB(skb)->when;	struct dst_entry *dst = NULL;#ifdef CONFIG_SYN_COOKIES	int want_cookie = 0;#else#define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */#endif	/* Never answer to SYNs send to broadcast or multicast */	if (((struct rtable *)skb->dst)->rt_flags &	    (RTCF_BROADCAST | RTCF_MULTICAST))		goto drop;	/* TW buckets are converted to open requests without	 * limitations, they conserve resources and peer is	 * evidently real one.	 */	if (inet_csk_reqsk_queue_is_full(sk) && !isn) {#ifdef CONFIG_SYN_COOKIES		if (sysctl_tcp_syncookies) {			want_cookie = 1;		} else#endif		goto drop;	}	/* Accept backlog is full. If we have already queued enough	 * of warm entries in syn queue, drop request. It is better than	 * clogging syn queue with openreqs with exponentially increasing	 * timeout.	 */	if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)		goto drop;	req = reqsk_alloc(&tcp_request_sock_ops);	if (!req)		goto drop;#ifdef CONFIG_TCP_MD5SIG	tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;#endif	tcp_clear_options(&tmp_opt);	tmp_opt.mss_clamp = 536;	tmp_opt.user_mss  = tcp_sk(sk)->rx_opt.user_mss;	tcp_parse_options(skb, &tmp_opt, 0);	if (want_cookie) {		tcp_clear_options(&tmp_opt);		tmp_opt.saw_tstamp = 0;	}	if (tmp_opt.saw_tstamp && !tmp_opt.rcv_tsval) {		/* Some OSes (unknown ones, but I see them on web server, which		 * contains information interesting only for windows'		 * users) do not send their stamp in SYN. It is easy case.		 * We simply do not advertise TS support.		 */		tmp_opt.saw_tstamp = 0;		tmp_opt.tstamp_ok  = 0;	}	tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;	tcp_openreq_init(req, &tmp_opt, skb);	if (security_inet_conn_request(sk, skb, req))		goto drop_and_free;	ireq = inet_rsk(req);	ireq->loc_addr = daddr;	ireq->rmt_addr = saddr;	ireq->opt = tcp_v4_save_options(sk, skb);	if (!want_cookie)		TCP_ECN_create_request(req, tcp_hdr(skb));	if (want_cookie) {#ifdef CONFIG_SYN_COOKIES		syn_flood_warning(skb);#endif		isn = cookie_v4_init_sequence(sk, skb, &req->mss);	} else if (!isn) {		struct inet_peer *peer = NULL;		/* VJ's idea. We save last timestamp seen		 * from the destination in peer table, when entering		 * state TIME-WAIT, and check against it before		 * accepting new connection request.		 *		 * If "isn" is not zero, this request hit alive		 * timewait bucket, so that all the necessary checks		 * are made in the function processing timewait state.		 */		if (tmp_opt.saw_tstamp &&		    tcp_death_row.sysctl_tw_recycle &&		    (dst = inet_csk_route_req(sk, req)) != NULL &&		    (peer = rt_get_peer((struct rtable *)dst)) != NULL &&		    peer->v4daddr == saddr) {			if (get_seconds() < peer->tcp_ts_stamp + TCP_PAWS_MSL &&			    (s32)(peer->tcp_ts - req->ts_recent) >							TCP_PAWS_WINDOW) {				NET_INC_STATS_BH(LINUX_MIB_PAWSPASSIVEREJECTED);				dst_release(dst);				goto drop_and_free;			}		}		/* Kill the following clause, if you dislike this way. */		else if (!sysctl_tcp_syncookies &&			 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <			  (sysctl_max_syn_backlog >> 2)) &&			 (!peer || !peer->tcp_ts_stamp) &&			 (!dst || !dst_metric(dst, RTAX_RTT))) {			/* Without syncookies last quarter of			 * backlog is filled with destinations,			 * proven to be alive.			 * It means that we continue to communicate			 * to destinations, already remembered			 * to the moment of synflood.			 */			LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open "				       "request from %u.%u.%u.%u/%u\n",				       NIPQUAD(saddr),				       ntohs(tcp_hdr(skb)->source));			dst_release(dst);			goto drop_and_free;		}		isn = tcp_v4_init_sequence(skb);	}	tcp_rsk(req)->snt_isn = isn;	if (tcp_v4_send_synack(sk, req, dst))		goto drop_and_free;	if (want_cookie) {		reqsk_free(req);	} else {		inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);	}	return 0;drop_and_free:	reqsk_free(req);drop:	return 0;}/* * The three way handshake has completed - we got a valid synack - * now create the new socket. */struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,				  struct request_sock *req,				  struct dst_entry *dst){	struct inet_request_sock *ireq;	struct inet_sock *newinet;	struct tcp_sock *newtp;	struct sock *newsk;#ifdef CONFIG_TCP_MD5SIG	struct tcp_md5sig_key *key;#endif	if (sk_acceptq_is_full(sk))		goto exit_overflow;	if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)		goto exit;	newsk = tcp_create_openreq_child(sk, req, skb);	if (!newsk)		goto exit;	newsk->sk_gso_type = SKB_GSO_TCPV4;	sk_setup_caps(newsk, dst);	newtp		      = tcp_sk(newsk);	newinet		      = inet_sk(newsk);	ireq		      = inet_rsk(req);	newinet->daddr	      = ireq->rmt_addr;	newinet->rcv_saddr    = ireq->loc_addr;	newinet->saddr	      = ireq->loc_addr;	newinet->opt	      = ireq->opt;	ireq->opt	      = NULL;	newinet->mc_index     = inet_iif(skb);	newinet->mc_ttl	      = ip_hdr(skb)->ttl;	inet_csk(newsk)->icsk_ext_hdr_len = 0;	if (newinet->opt)		inet_csk(newsk)->icsk_ext_hdr_len = newinet->opt->optlen;	newinet->id = newtp->write_seq ^ jiffies;	tcp_mtup_init(newsk);	tcp_sync_mss(newsk, dst_mtu(dst));	newtp->advmss = dst_metric(dst, RTAX_ADVMSS);	tcp_initialize_rcv_mss(newsk);#ifdef CONFIG_TCP_MD5SIG	/* Copy over the MD5 key from the original socket */	if ((key = tcp_v4_md5_do_lookup(sk, newinet->daddr)) != NULL) {		/*		 * We're using one, so create a matching key		 * on the newsk structure. If we fail to get		 * memory, then we end up not copying the key		 * across. Shucks.		 */		char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);		if (newkey != NULL)			tcp_v4_md5_do_add(newsk, inet_sk(sk)->daddr,					  newkey, key->keylen);	}#endif	__inet_hash(&tcp_hashinfo, newsk, 0);	__inet_inherit_port(&tcp_hashinfo, sk, newsk);	return newsk;exit_overflow:	NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);exit:	NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);	dst_release(dst);	return NULL;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -