⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 tcp_ipv4.c

📁 Linux Kernel 2.6.9 for OMAP1710
💻 C
📖 第 1 页 / 共 5 页
字号:
	newinet->rcv_saddr    = req->af.v4_req.loc_addr;	newinet->saddr	      = req->af.v4_req.loc_addr;	newinet->opt	      = req->af.v4_req.opt;	req->af.v4_req.opt    = NULL;	newinet->mc_index     = tcp_v4_iif(skb);	newinet->mc_ttl	      = skb->nh.iph->ttl;	newtp->ext_header_len = 0;	if (newinet->opt)		newtp->ext_header_len = newinet->opt->optlen;	newtp->ext2_header_len = dst->header_len;	newinet->id = newtp->write_seq ^ jiffies;	tcp_sync_mss(newsk, dst_pmtu(dst));	newtp->advmss = dst_metric(dst, RTAX_ADVMSS);	tcp_initialize_rcv_mss(newsk);	__tcp_v4_hash(newsk, 0);	__tcp_inherit_port(sk, newsk);	return newsk;exit_overflow:	NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);exit:	NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);	dst_release(dst);	return NULL;}static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb){	struct tcphdr *th = skb->h.th;	struct iphdr *iph = skb->nh.iph;	struct tcp_opt *tp = tcp_sk(sk);	struct sock *nsk;	struct open_request **prev;	/* Find possible connection requests. */	struct open_request *req = tcp_v4_search_req(tp, &prev, th->source,						     iph->saddr, iph->daddr);	if (req)		return tcp_check_req(sk, skb, req, prev);	nsk = __tcp_v4_lookup_established(skb->nh.iph->saddr,					  th->source,					  skb->nh.iph->daddr,					  ntohs(th->dest),					  tcp_v4_iif(skb));	if (nsk) {		if (nsk->sk_state != TCP_TIME_WAIT) {			bh_lock_sock(nsk);			return nsk;		}		tcp_tw_put((struct tcp_tw_bucket *)nsk);		return NULL;	}#ifdef CONFIG_SYN_COOKIES	if (!th->rst && !th->syn && th->ack)		sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));#endif	return sk;}static int tcp_v4_checksum_init(struct sk_buff *skb){	if (skb->ip_summed == CHECKSUM_HW) {		skb->ip_summed = CHECKSUM_UNNECESSARY;		if (!tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,				  skb->nh.iph->daddr, skb->csum))			return 0;		NETDEBUG(if (net_ratelimit())				printk(KERN_DEBUG "hw tcp v4 csum failed\n"));		skb->ip_summed = CHECKSUM_NONE;	}	if (skb->len <= 76) {		if (tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,				 skb->nh.iph->daddr,				 skb_checksum(skb, 0, skb->len, 0)))			return -1;		skb->ip_summed = CHECKSUM_UNNECESSARY;	} else {		skb->csum = ~tcp_v4_check(skb->h.th, skb->len,					  skb->nh.iph->saddr,					  skb->nh.iph->daddr, 0);	}	return 0;}/* The socket must have it's spinlock held when we get * here. * * We have a potential double-lock case here, so even when * doing backlog processing we use the BH locking scheme. * This is because we cannot sleep with the original spinlock * held. */int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb){	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */		TCP_CHECK_TIMER(sk);		if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))			goto reset;		TCP_CHECK_TIMER(sk);		return 0;	}	if (skb->len < (skb->h.th->doff << 2) || tcp_checksum_complete(skb))		goto csum_err;	if (sk->sk_state == TCP_LISTEN) {		struct sock *nsk = tcp_v4_hnd_req(sk, skb);		if (!nsk)			goto discard;		if (nsk != sk) {			if (tcp_child_process(sk, nsk, skb))				goto reset;			return 0;		}	}	TCP_CHECK_TIMER(sk);	if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))		goto reset;	TCP_CHECK_TIMER(sk);	return 0;reset:	tcp_v4_send_reset(skb);discard:	kfree_skb(skb);	/* Be careful here. If this function gets more complicated and	 * gcc suffers from register pressure on the x86, sk (in %ebx)	 * might be destroyed here. This current version compiles correctly,	 * but you have been warned.	 */	return 0;csum_err:	TCP_INC_STATS_BH(TCP_MIB_INERRS);	goto discard;}/* *	From tcp_input.c */int tcp_v4_rcv(struct sk_buff *skb){	struct tcphdr *th;	struct sock *sk;	int ret;	if (skb->pkt_type != PACKET_HOST)		goto discard_it;	/* Count it even if it's bad */	TCP_INC_STATS_BH(TCP_MIB_INSEGS);	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))		goto discard_it;	th = skb->h.th;	if (th->doff < sizeof(struct tcphdr) / 4)		goto bad_packet;	if (!pskb_may_pull(skb, th->doff * 4))		goto discard_it;	/* An explanation is required here, I think.	 * Packet length and doff are validated by header prediction,	 * provided case of th->doff==0 is elimineted.	 * So, we defer the checks. */	if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&	     tcp_v4_checksum_init(skb) < 0))		goto bad_packet;	th = skb->h.th;	TCP_SKB_CB(skb)->seq = ntohl(th->seq);	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +				    skb->len - th->doff * 4);	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);	TCP_SKB_CB(skb)->when	 = 0;	TCP_SKB_CB(skb)->flags	 = skb->nh.iph->tos;	TCP_SKB_CB(skb)->sacked	 = 0;	sk = __tcp_v4_lookup(skb->nh.iph->saddr, th->source,			     skb->nh.iph->daddr, ntohs(th->dest),			     tcp_v4_iif(skb));	if (!sk)		goto no_tcp_socket;process:	if (sk->sk_state == TCP_TIME_WAIT)		goto do_time_wait;	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))		goto discard_and_relse;	if (sk_filter(sk, skb, 0))		goto discard_and_relse;	skb->dev = NULL;	bh_lock_sock(sk);	ret = 0;	if (!sock_owned_by_user(sk)) {		if (!tcp_prequeue(sk, skb))			ret = tcp_v4_do_rcv(sk, skb);	} else		sk_add_backlog(sk, skb);	bh_unlock_sock(sk);	sock_put(sk);	return ret;no_tcp_socket:	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))		goto discard_it;	if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {bad_packet:		TCP_INC_STATS_BH(TCP_MIB_INERRS);	} else {		tcp_v4_send_reset(skb);	}discard_it:	/* Discard frame. */	kfree_skb(skb);  	return 0;discard_and_relse:	sock_put(sk);	goto discard_it;do_time_wait:	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {		tcp_tw_put((struct tcp_tw_bucket *) sk);		goto discard_it;	}	if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {		TCP_INC_STATS_BH(TCP_MIB_INERRS);		tcp_tw_put((struct tcp_tw_bucket *) sk);		goto discard_it;	}	switch (tcp_timewait_state_process((struct tcp_tw_bucket *)sk,					   skb, th, skb->len)) {	case TCP_TW_SYN: {		struct sock *sk2 = tcp_v4_lookup_listener(skb->nh.iph->daddr,							  ntohs(th->dest),							  tcp_v4_iif(skb));		if (sk2) {			tcp_tw_deschedule((struct tcp_tw_bucket *)sk);			tcp_tw_put((struct tcp_tw_bucket *)sk);			sk = sk2;			goto process;		}		/* Fall through to ACK */	}	case TCP_TW_ACK:		tcp_v4_timewait_ack(sk, skb);		break;	case TCP_TW_RST:		goto no_tcp_socket;	case TCP_TW_SUCCESS:;	}	goto discard_it;}/* With per-bucket locks this operation is not-atomic, so that * this version is not worse. */static void __tcp_v4_rehash(struct sock *sk){	sk->sk_prot->unhash(sk);	sk->sk_prot->hash(sk);}static int tcp_v4_reselect_saddr(struct sock *sk){	struct inet_opt *inet = inet_sk(sk);	int err;	struct rtable *rt;	__u32 old_saddr = inet->saddr;	__u32 new_saddr;	__u32 daddr = inet->daddr;	if (inet->opt && inet->opt->srr)		daddr = inet->opt->faddr;	/* Query new route. */	err = ip_route_connect(&rt, daddr, 0,			       RT_TOS(inet->tos) | sk->sk_localroute,			       sk->sk_bound_dev_if,			       IPPROTO_TCP,			       inet->sport, inet->dport, sk);	if (err)		return err;	__sk_dst_set(sk, &rt->u.dst);	tcp_v4_setup_caps(sk, &rt->u.dst);	tcp_sk(sk)->ext2_header_len = rt->u.dst.header_len;	new_saddr = rt->rt_src;	if (new_saddr == old_saddr)		return 0;	if (sysctl_ip_dynaddr > 1) {		printk(KERN_INFO "tcp_v4_rebuild_header(): shifting inet->"				 "saddr from %d.%d.%d.%d to %d.%d.%d.%d\n",		       NIPQUAD(old_saddr),		       NIPQUAD(new_saddr));	}	inet->saddr = new_saddr;	inet->rcv_saddr = new_saddr;	/* XXX The only one ugly spot where we need to	 * XXX really change the sockets identity after	 * XXX it has entered the hashes. -DaveM	 *	 * Besides that, it does not check for connection	 * uniqueness. Wait for troubles.	 */	__tcp_v4_rehash(sk);	return 0;}int tcp_v4_rebuild_header(struct sock *sk){	struct inet_opt *inet = inet_sk(sk);	struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0);	u32 daddr;	int err;	/* Route is OK, nothing to do. */	if (rt)		return 0;	/* Reroute. */	daddr = inet->daddr;	if (inet->opt && inet->opt->srr)		daddr = inet->opt->faddr;	{		struct flowi fl = { .oif = sk->sk_bound_dev_if,				    .nl_u = { .ip4_u =					      { .daddr = daddr,						.saddr = inet->saddr,						.tos = RT_CONN_FLAGS(sk) } },				    .proto = IPPROTO_TCP,				    .uli_u = { .ports =					       { .sport = inet->sport,						 .dport = inet->dport } } };								err = ip_route_output_flow(&rt, &fl, sk, 0);	}	if (!err) {		__sk_dst_set(sk, &rt->u.dst);		tcp_v4_setup_caps(sk, &rt->u.dst);		tcp_sk(sk)->ext2_header_len = rt->u.dst.header_len;		return 0;	}	/* Routing failed... */	sk->sk_route_caps = 0;	if (!sysctl_ip_dynaddr ||	    sk->sk_state != TCP_SYN_SENT ||	    (sk->sk_userlocks & SOCK_BINDADDR_LOCK) ||	    (err = tcp_v4_reselect_saddr(sk)) != 0)		sk->sk_err_soft = -err;	return err;}static void v4_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr){	struct sockaddr_in *sin = (struct sockaddr_in *) uaddr;	struct inet_opt *inet = inet_sk(sk);	sin->sin_family		= AF_INET;	sin->sin_addr.s_addr	= inet->daddr;	sin->sin_port		= inet->dport;}/* VJ's idea. Save last timestamp seen from this destination * and hold it at least for normal timewait interval to use for duplicate * segment detection in subsequent connections, before they enter synchronized * state. */int tcp_v4_remember_stamp(struct sock *sk){	struct inet_opt *inet = inet_sk(sk);	struct tcp_opt *tp = tcp_sk(sk);	struct rtable *rt = (struct rtable *)__sk_dst_get(sk);	struct inet_peer *peer = NULL;	int release_it = 0;	if (!rt || rt->rt_dst != inet->daddr) {		peer = inet_getpeer(inet->daddr, 1);		release_it = 1;	} else {		if (!rt->peer)			rt_bind_peer(rt, 1);		peer = rt->peer;	}	if (peer) {		if ((s32)(peer->tcp_ts - tp->ts_recent) <= 0 ||		    (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&		     peer->tcp_ts_stamp <= tp->ts_recent_stamp)) {			peer->tcp_ts_stamp = tp->ts_recent_stamp;			peer->tcp_ts = tp->ts_recent;		}		if (release_it)			inet_putpeer(peer);		return 1;	}	return 0;}int tcp_v4_tw_remember_stamp(struct tcp_tw_bucket *tw){	struct inet_peer *peer = NULL;	peer = inet_getpeer(tw->tw_daddr, 1);	if (peer) {		if ((s32)(peer->tcp_ts - tw->tw_ts_recent) <= 0 ||		    (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&		     peer->tcp_ts_stamp <= tw->tw_ts_recent_stamp)) {			peer->tcp_ts_stamp = tw->tw_ts_recent_stamp;			peer->tcp_ts = tw->tw_ts_recent;		}		inet_putpeer(peer);		return 1;	}	return 0;}struct tcp_func ipv4_specific = {	.queue_xmit	=	ip_queue_xmit,	.send_check	=	tcp_v4_send_check,	.rebuild_header	=	tcp_v4_rebuild_header,	.conn_request	=	tcp_v4_conn_request,	.syn_recv_sock	=	tcp_v4_syn_recv_sock,	.remember_stamp	=	tcp_v4_remember_stamp,	.net_header_len	=	sizeof(struct iphdr),	.setsockopt	=	ip_setsockopt,	.getsockopt	=	ip_getsockopt,	.addr2sockaddr	=	v4_addr2sockaddr,	.sockaddr_len	=	sizeof(struct sockaddr_in),};/* NOTE: A lot of things set to zero explicitly by call to *       sk_alloc() so need not be done here. */static int tcp_v4_init_sock(struct sock *sk){	struct tcp_opt *tp = tcp_sk(sk);	skb_queue_head_init(&tp->out_of_order_queue);	tcp_init_xmit_timers(sk);	tcp_prequeue_init(tp);	tp->rto  = TCP_TIMEOUT_INIT;	tp->mdev = TCP_TIMEOUT_INIT;	/* So many TCP implementations out there (incorrectly) count the	 * initial SYN frame in their delayed-ACK and congestion control	 * algorithms that we must have the following bandaid to talk	 * efficiently to them.  -DaveM	 */	tp->snd_cwnd = 2;	/* See draft-stevens-tcpca-spec-01 for discussion of the	 * initialization of these values.	 */	tp->snd_ssthresh = 0x7fffffff;	/* Infinity */	tp->snd_cwnd_clamp = ~0;	tp->mss_cache_std = tp->mss_cache = 536;	tp->reordering = sysctl_tcp_reordering;	sk->sk_state = TCP_CLOSE;	sk->sk_write_space = sk_stream_write_space;	sk->sk_use_write_queue = 1;	tp->af_specific = &ipv4_specific;	sk->sk_sndbuf = sysctl_tcp_wmem[1];	sk->sk_rcvbuf = sysctl_tcp_rmem[1];	atomic_inc(&tcp_sockets_allocated);	return 0;}int tcp_v4_destroy_sock(struct sock *sk){	struct tcp_opt *tp = tcp_sk(sk);	tcp_clear_xmit_timers(sk);	/* Cleanup up the write buffer. */  	sk_stream_writequeue_purge(sk);	/* Cleans up our, hopefully empty, out_of_order_queue. */  	__skb_queue_purge(&tp->out_of_order_queue);	/* Clean prequeue, it must be empty really */	__skb_queue_purge(&tp->ucopy.prequeue);	/* Clean up a referenced TCP bind bucket. */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -