⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 tcp_ipv4.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
}static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb){	struct tcphdr *th = tcp_hdr(skb);	const struct iphdr *iph = ip_hdr(skb);	struct sock *nsk;	struct request_sock **prev;	/* Find possible connection requests. */	struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,						       iph->saddr, iph->daddr);	if (req)		return tcp_check_req(sk, skb, req, prev);	nsk = inet_lookup_established(&tcp_hashinfo, iph->saddr, th->source,				      iph->daddr, th->dest, inet_iif(skb));	if (nsk) {		if (nsk->sk_state != TCP_TIME_WAIT) {			bh_lock_sock(nsk);			return nsk;		}		inet_twsk_put(inet_twsk(nsk));		return NULL;	}#ifdef CONFIG_SYN_COOKIES	if (!th->rst && !th->syn && th->ack)		sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));#endif	return sk;}static __sum16 tcp_v4_checksum_init(struct sk_buff *skb){	const struct iphdr *iph = ip_hdr(skb);	if (skb->ip_summed == CHECKSUM_COMPLETE) {		if (!tcp_v4_check(skb->len, iph->saddr,				  iph->daddr, skb->csum)) {			skb->ip_summed = CHECKSUM_UNNECESSARY;			return 0;		}	}	skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,				       skb->len, IPPROTO_TCP, 0);	if (skb->len <= 76) {		return __skb_checksum_complete(skb);	}	return 0;}/* The socket must have it's spinlock held when we get * here. * * We have a potential double-lock case here, so even when * doing backlog processing we use the BH locking scheme. * This is because we cannot sleep with the original spinlock * held. */int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb){	struct sock *rsk;#ifdef CONFIG_TCP_MD5SIG	/*	 * We really want to reject the packet as early as possible	 * if:	 *  o We're expecting an MD5'd packet and this is no MD5 tcp option	 *  o There is an MD5 option and we're not expecting one	 */	if (tcp_v4_inbound_md5_hash(sk, skb))		goto discard;#endif	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */		TCP_CHECK_TIMER(sk);		if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {			rsk = sk;			goto reset;		}		TCP_CHECK_TIMER(sk);		return 0;	}	if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))		goto csum_err;	if (sk->sk_state == TCP_LISTEN) {		struct sock *nsk = tcp_v4_hnd_req(sk, skb);		if (!nsk)			goto discard;		if (nsk != sk) {			if (tcp_child_process(sk, nsk, skb)) {				rsk = nsk;				goto reset;			}			return 0;		}	}	TCP_CHECK_TIMER(sk);	if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {		rsk = sk;		goto reset;	}	TCP_CHECK_TIMER(sk);	return 0;reset:	tcp_v4_send_reset(rsk, skb);discard:	kfree_skb(skb);	/* Be careful here. If this function gets more complicated and	 * gcc suffers from register pressure on the x86, sk (in %ebx)	 * might be destroyed here. This current version compiles correctly,	 * but you have been warned.	 */	return 0;csum_err:	TCP_INC_STATS_BH(TCP_MIB_INERRS);	goto discard;}/* *	From tcp_input.c */int tcp_v4_rcv(struct sk_buff *skb){	const struct iphdr *iph;	struct tcphdr *th;	struct sock *sk;	int ret;	if (skb->pkt_type != PACKET_HOST)		goto discard_it;	/* Count it even if it's bad */	TCP_INC_STATS_BH(TCP_MIB_INSEGS);	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))		goto discard_it;	th = tcp_hdr(skb);	if (th->doff < sizeof(struct tcphdr) / 4)		goto bad_packet;	if (!pskb_may_pull(skb, th->doff * 4))		goto discard_it;	/* An explanation is required here, I think.	 * Packet length and doff are validated by header prediction,	 * provided case of th->doff==0 is eliminated.	 * So, we defer the checks. */	if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))		goto bad_packet;	th = tcp_hdr(skb);	iph = ip_hdr(skb);	TCP_SKB_CB(skb)->seq = ntohl(th->seq);	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +				    skb->len - th->doff * 4);	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);	TCP_SKB_CB(skb)->when	 = 0;	TCP_SKB_CB(skb)->flags	 = iph->tos;	TCP_SKB_CB(skb)->sacked	 = 0;	sk = __inet_lookup(&tcp_hashinfo, iph->saddr, th->source,			   iph->daddr, th->dest, inet_iif(skb));	if (!sk)		goto no_tcp_socket;process:	if (sk->sk_state == TCP_TIME_WAIT)		goto do_time_wait;	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))		goto discard_and_relse;	nf_reset(skb);	if (sk_filter(sk, skb))		goto discard_and_relse;	skb->dev = NULL;	bh_lock_sock_nested(sk);	ret = 0;	if (!sock_owned_by_user(sk)) {#ifdef CONFIG_NET_DMA		struct tcp_sock *tp = tcp_sk(sk);		if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)			tp->ucopy.dma_chan = get_softnet_dma();		if (tp->ucopy.dma_chan)			ret = tcp_v4_do_rcv(sk, skb);		else#endif		{			if (!tcp_prequeue(sk, skb))			ret = tcp_v4_do_rcv(sk, skb);		}	} else		sk_add_backlog(sk, skb);	bh_unlock_sock(sk);	sock_put(sk);	return ret;no_tcp_socket:	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))		goto discard_it;	if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {bad_packet:		TCP_INC_STATS_BH(TCP_MIB_INERRS);	} else {		tcp_v4_send_reset(NULL, skb);	}discard_it:	/* Discard frame. */	kfree_skb(skb);	return 0;discard_and_relse:	sock_put(sk);	goto discard_it;do_time_wait:	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {		inet_twsk_put(inet_twsk(sk));		goto discard_it;	}	if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {		TCP_INC_STATS_BH(TCP_MIB_INERRS);		inet_twsk_put(inet_twsk(sk));		goto discard_it;	}	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {	case TCP_TW_SYN: {		struct sock *sk2 = inet_lookup_listener(&tcp_hashinfo,							iph->daddr, th->dest,							inet_iif(skb));		if (sk2) {			inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);			inet_twsk_put(inet_twsk(sk));			sk = sk2;			goto process;		}		/* Fall through to ACK */	}	case TCP_TW_ACK:		tcp_v4_timewait_ack(sk, skb);		break;	case TCP_TW_RST:		goto no_tcp_socket;	case TCP_TW_SUCCESS:;	}	goto discard_it;}/* VJ's idea. Save last timestamp seen from this destination * and hold it at least for normal timewait interval to use for duplicate * segment detection in subsequent connections, before they enter synchronized * state. */int tcp_v4_remember_stamp(struct sock *sk){	struct inet_sock *inet = inet_sk(sk);	struct tcp_sock *tp = tcp_sk(sk);	struct rtable *rt = (struct rtable *)__sk_dst_get(sk);	struct inet_peer *peer = NULL;	int release_it = 0;	if (!rt || rt->rt_dst != inet->daddr) {		peer = inet_getpeer(inet->daddr, 1);		release_it = 1;	} else {		if (!rt->peer)			rt_bind_peer(rt, 1);		peer = rt->peer;	}	if (peer) {		if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||		    (peer->tcp_ts_stamp + TCP_PAWS_MSL < get_seconds() &&		     peer->tcp_ts_stamp <= tp->rx_opt.ts_recent_stamp)) {			peer->tcp_ts_stamp = tp->rx_opt.ts_recent_stamp;			peer->tcp_ts = tp->rx_opt.ts_recent;		}		if (release_it)			inet_putpeer(peer);		return 1;	}	return 0;}int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw){	struct inet_peer *peer = inet_getpeer(tw->tw_daddr, 1);	if (peer) {		const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);		if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||		    (peer->tcp_ts_stamp + TCP_PAWS_MSL < get_seconds() &&		     peer->tcp_ts_stamp <= tcptw->tw_ts_recent_stamp)) {			peer->tcp_ts_stamp = tcptw->tw_ts_recent_stamp;			peer->tcp_ts	   = tcptw->tw_ts_recent;		}		inet_putpeer(peer);		return 1;	}	return 0;}struct inet_connection_sock_af_ops ipv4_specific = {	.queue_xmit	   = ip_queue_xmit,	.send_check	   = tcp_v4_send_check,	.rebuild_header	   = inet_sk_rebuild_header,	.conn_request	   = tcp_v4_conn_request,	.syn_recv_sock	   = tcp_v4_syn_recv_sock,	.remember_stamp	   = tcp_v4_remember_stamp,	.net_header_len	   = sizeof(struct iphdr),	.setsockopt	   = ip_setsockopt,	.getsockopt	   = ip_getsockopt,	.addr2sockaddr	   = inet_csk_addr2sockaddr,	.sockaddr_len	   = sizeof(struct sockaddr_in),#ifdef CONFIG_COMPAT	.compat_setsockopt = compat_ip_setsockopt,	.compat_getsockopt = compat_ip_getsockopt,#endif};#ifdef CONFIG_TCP_MD5SIGstatic struct tcp_sock_af_ops tcp_sock_ipv4_specific = {	.md5_lookup		= tcp_v4_md5_lookup,	.calc_md5_hash		= tcp_v4_calc_md5_hash,	.md5_add		= tcp_v4_md5_add_func,	.md5_parse		= tcp_v4_parse_md5_keys,};#endif/* NOTE: A lot of things set to zero explicitly by call to *       sk_alloc() so need not be done here. */static int tcp_v4_init_sock(struct sock *sk){	struct inet_connection_sock *icsk = inet_csk(sk);	struct tcp_sock *tp = tcp_sk(sk);	skb_queue_head_init(&tp->out_of_order_queue);	tcp_init_xmit_timers(sk);	tcp_prequeue_init(tp);	icsk->icsk_rto = TCP_TIMEOUT_INIT;	tp->mdev = TCP_TIMEOUT_INIT;	/* So many TCP implementations out there (incorrectly) count the	 * initial SYN frame in their delayed-ACK and congestion control	 * algorithms that we must have the following bandaid to talk	 * efficiently to them.  -DaveM	 */	tp->snd_cwnd = 2;	/* See draft-stevens-tcpca-spec-01 for discussion of the	 * initialization of these values.	 */	tp->snd_ssthresh = 0x7fffffff;	/* Infinity */	tp->snd_cwnd_clamp = ~0;	tp->mss_cache = 536;	tp->reordering = sysctl_tcp_reordering;	icsk->icsk_ca_ops = &tcp_init_congestion_ops;	sk->sk_state = TCP_CLOSE;	sk->sk_write_space = sk_stream_write_space;	sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);	icsk->icsk_af_ops = &ipv4_specific;	icsk->icsk_sync_mss = tcp_sync_mss;#ifdef CONFIG_TCP_MD5SIG	tp->af_specific = &tcp_sock_ipv4_specific;#endif	sk->sk_sndbuf = sysctl_tcp_wmem[1];	sk->sk_rcvbuf = sysctl_tcp_rmem[1];	atomic_inc(&tcp_sockets_allocated);	return 0;}int tcp_v4_destroy_sock(struct sock *sk){	struct tcp_sock *tp = tcp_sk(sk);	tcp_clear_xmit_timers(sk);	tcp_cleanup_congestion_control(sk);	/* Cleanup up the write buffer. */	tcp_write_queue_purge(sk);	/* Cleans up our, hopefully empty, out_of_order_queue. */	__skb_queue_purge(&tp->out_of_order_queue);#ifdef CONFIG_TCP_MD5SIG	/* Clean up the MD5 key list, if any */	if (tp->md5sig_info) {		tcp_v4_clear_md5_list(sk);		kfree(tp->md5sig_info);		tp->md5sig_info = NULL;	}#endif#ifdef CONFIG_NET_DMA	/* Cleans up our sk_async_wait_queue */	__skb_queue_purge(&sk->sk_async_wait_queue);#endif	/* Clean prequeue, it must be empty really */	__skb_queue_purge(&tp->ucopy.prequeue);	/* Clean up a referenced TCP bind bucket. */	if (inet_csk(sk)->icsk_bind_hash)		inet_put_port(&tcp_hashinfo, sk);	/*	 * If sendmsg cached page exists, toss it.	 */	if (sk->sk_sndmsg_page) {		__free_page(sk->sk_sndmsg_page);		sk->sk_sndmsg_page = NULL;	}	atomic_dec(&tcp_sockets_allocated);	return 0;}EXPORT_SYMBOL(tcp_v4_destroy_sock);#ifdef CONFIG_PROC_FS/* Proc filesystem TCP sock list dumping. */static inline struct inet_timewait_sock *tw_head(struct hlist_head *head){	return hlist_empty(head) ? NULL :		list_entry(head->first, struct inet_timewait_sock, tw_node);}static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw){	return tw->tw_node.next ?		hlist_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;}static void *listening_get_next(struct seq_file *seq, void *cur){	struct inet_connection_sock *icsk;	struct hlist_node *node;	struct sock *sk = cur;	struct tcp_iter_state* st = seq->private;	if (!sk) {		st->bucket = 0;		sk = sk_head(&tcp_hashinfo.listening_hash[0]);		goto get_sk;	}	++st->num;	if (st->state == TCP_SEQ_STATE_OPENREQ) {		struct request_sock *req = cur;		icsk = inet_csk(st->syn_wait_sk);		req = req->dl_next;		while (1) {			while (req) {				if (req->rsk_ops->family == st->family) {					cur = req;					goto out;				}				req = req->dl_next;			}			if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -