⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 tcp_ipv4.c

📁 Linux Kernel 2.6.9 for OMAP1710
💻 C
📖 第 1 页 / 共 5 页
字号:
{	struct sock *sk;	local_bh_disable();	sk = __tcp_v4_lookup(saddr, sport, daddr, ntohs(dport), dif);	local_bh_enable();	return sk;}static inline __u32 tcp_v4_init_sequence(struct sock *sk, struct sk_buff *skb){	return secure_tcp_sequence_number(skb->nh.iph->daddr,					  skb->nh.iph->saddr,					  skb->h.th->dest,					  skb->h.th->source);}/* called with local bh disabled */static int __tcp_v4_check_established(struct sock *sk, __u16 lport,				      struct tcp_tw_bucket **twp){	struct inet_opt *inet = inet_sk(sk);	u32 daddr = inet->rcv_saddr;	u32 saddr = inet->daddr;	int dif = sk->sk_bound_dev_if;	TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)	__u32 ports = TCP_COMBINED_PORTS(inet->dport, lport);	int hash = tcp_hashfn(daddr, lport, saddr, inet->dport);	struct tcp_ehash_bucket *head = &tcp_ehash[hash];	struct sock *sk2;	struct hlist_node *node;	struct tcp_tw_bucket *tw;	write_lock(&head->lock);	/* Check TIME-WAIT sockets first. */	sk_for_each(sk2, node, &(head + tcp_ehash_size)->chain) {		tw = (struct tcp_tw_bucket *)sk2;		if (TCP_IPV4_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) {			struct tcp_opt *tp = tcp_sk(sk);			/* With PAWS, it is safe from the viewpoint			   of data integrity. Even without PAWS it			   is safe provided sequence spaces do not			   overlap i.e. at data rates <= 80Mbit/sec.			   Actually, the idea is close to VJ's one,			   only timestamp cache is held not per host,			   but per port pair and TW bucket is used			   as state holder.			   If TW bucket has been already destroyed we			   fall back to VJ's scheme and use initial			   timestamp retrieved from peer table.			 */			if (tw->tw_ts_recent_stamp &&			    (!twp || (sysctl_tcp_tw_reuse &&				      xtime.tv_sec -				      tw->tw_ts_recent_stamp > 1))) {				if ((tp->write_seq =						tw->tw_snd_nxt + 65535 + 2) == 0)					tp->write_seq = 1;				tp->ts_recent	    = tw->tw_ts_recent;				tp->ts_recent_stamp = tw->tw_ts_recent_stamp;				sock_hold(sk2);				goto unique;			} else				goto not_unique;		}	}	tw = NULL;	/* And established part... */	sk_for_each(sk2, node, &head->chain) {		if (TCP_IPV4_MATCH(sk2, acookie, saddr, daddr, ports, dif))			goto not_unique;	}unique:	/* Must record num and sport now. Otherwise we will see	 * in hash table socket with a funny identity. */	inet->num = lport;	inet->sport = htons(lport);	sk->sk_hashent = hash;	BUG_TRAP(sk_unhashed(sk));	__sk_add_node(sk, &head->chain);	sock_prot_inc_use(sk->sk_prot);	write_unlock(&head->lock);	if (twp) {		*twp = tw;		NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);	} else if (tw) {		/* Silly. Should hash-dance instead... */		tcp_tw_deschedule(tw);		NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);		tcp_tw_put(tw);	}	return 0;not_unique:	write_unlock(&head->lock);	return -EADDRNOTAVAIL;}/* * Bind a port for a connect operation and hash it. */static int tcp_v4_hash_connect(struct sock *sk){	unsigned short snum = inet_sk(sk)->num; 	struct tcp_bind_hashbucket *head; 	struct tcp_bind_bucket *tb;	int ret; 	if (!snum) { 		int rover; 		int low = sysctl_local_port_range[0]; 		int high = sysctl_local_port_range[1]; 		int remaining = (high - low) + 1;		struct hlist_node *node; 		struct tcp_tw_bucket *tw = NULL; 		local_bh_disable(); 		/* TODO. Actually it is not so bad idea to remove 		 * tcp_portalloc_lock before next submission to Linus. 		 * As soon as we touch this place at all it is time to think. 		 * 		 * Now it protects single _advisory_ variable tcp_port_rover, 		 * hence it is mostly useless. 		 * Code will work nicely if we just delete it, but 		 * I am afraid in contented case it will work not better or 		 * even worse: another cpu just will hit the same bucket 		 * and spin there. 		 * So some cpu salt could remove both contention and 		 * memory pingpong. Any ideas how to do this in a nice way? 		 */ 		spin_lock(&tcp_portalloc_lock); 		rover = tcp_port_rover; 		do { 			rover++; 			if ((rover < low) || (rover > high)) 				rover = low; 			head = &tcp_bhash[tcp_bhashfn(rover)]; 			spin_lock(&head->lock); 			/* Does not bother with rcv_saddr checks, 			 * because the established check is already 			 * unique enough. 			 */			tb_for_each(tb, node, &head->chain) { 				if (tb->port == rover) { 					BUG_TRAP(!hlist_empty(&tb->owners)); 					if (tb->fastreuse >= 0) 						goto next_port; 					if (!__tcp_v4_check_established(sk,									rover,									&tw)) 						goto ok; 					goto next_port; 				} 			} 			tb = tcp_bucket_create(head, rover); 			if (!tb) { 				spin_unlock(&head->lock); 				break; 			} 			tb->fastreuse = -1; 			goto ok; 		next_port: 			spin_unlock(&head->lock); 		} while (--remaining > 0); 		tcp_port_rover = rover; 		spin_unlock(&tcp_portalloc_lock); 		local_bh_enable(); 		return -EADDRNOTAVAIL;ok: 		/* All locks still held and bhs disabled */ 		tcp_port_rover = rover; 		spin_unlock(&tcp_portalloc_lock); 		tcp_bind_hash(sk, tb, rover);		if (sk_unhashed(sk)) { 			inet_sk(sk)->sport = htons(rover); 			__tcp_v4_hash(sk, 0); 		} 		spin_unlock(&head->lock); 		if (tw) { 			tcp_tw_deschedule(tw); 			tcp_tw_put(tw); 		}		ret = 0;		goto out; 	} 	head  = &tcp_bhash[tcp_bhashfn(snum)]; 	tb  = tcp_sk(sk)->bind_hash;	spin_lock_bh(&head->lock);	if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {		__tcp_v4_hash(sk, 0);		spin_unlock_bh(&head->lock);		return 0;	} else {		spin_unlock(&head->lock);		/* No definite answer... Walk to established hash table */		ret = __tcp_v4_check_established(sk, snum, NULL);out:		local_bh_enable();		return ret;	}}/* This will initiate an outgoing connection. */int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len){	struct inet_opt *inet = inet_sk(sk);	struct tcp_opt *tp = tcp_sk(sk);	struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;	struct rtable *rt;	u32 daddr, nexthop;	int tmp;	int err;	if (addr_len < sizeof(struct sockaddr_in))		return -EINVAL;	if (usin->sin_family != AF_INET)		return -EAFNOSUPPORT;	nexthop = daddr = usin->sin_addr.s_addr;	if (inet->opt && inet->opt->srr) {		if (!daddr)			return -EINVAL;		nexthop = inet->opt->faddr;	}	tmp = ip_route_connect(&rt, nexthop, inet->saddr,			       RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,			       IPPROTO_TCP,			       inet->sport, usin->sin_port, sk);	if (tmp < 0)		return tmp;	if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {		ip_rt_put(rt);		return -ENETUNREACH;	}	if (!inet->opt || !inet->opt->srr)		daddr = rt->rt_dst;	if (!inet->saddr)		inet->saddr = rt->rt_src;	inet->rcv_saddr = inet->saddr;	if (tp->ts_recent_stamp && inet->daddr != daddr) {		/* Reset inherited state */		tp->ts_recent	    = 0;		tp->ts_recent_stamp = 0;		tp->write_seq	    = 0;	}	if (sysctl_tcp_tw_recycle &&	    !tp->ts_recent_stamp && rt->rt_dst == daddr) {		struct inet_peer *peer = rt_get_peer(rt);		/* VJ's idea. We save last timestamp seen from		 * the destination in peer table, when entering state TIME-WAIT		 * and initialize ts_recent from it, when trying new connection.		 */		if (peer && peer->tcp_ts_stamp + TCP_PAWS_MSL >= xtime.tv_sec) {			tp->ts_recent_stamp = peer->tcp_ts_stamp;			tp->ts_recent = peer->tcp_ts;		}	}	inet->dport = usin->sin_port;	inet->daddr = daddr;	tp->ext_header_len = 0;	if (inet->opt)		tp->ext_header_len = inet->opt->optlen;	tp->mss_clamp = 536;	/* Socket identity is still unknown (sport may be zero).	 * However we set state to SYN-SENT and not releasing socket	 * lock select source port, enter ourselves into the hash tables and	 * complete initialization after this.	 */	tcp_set_state(sk, TCP_SYN_SENT);	err = tcp_v4_hash_connect(sk);	if (err)		goto failure;	err = ip_route_newports(&rt, inet->sport, inet->dport, sk);	if (err)		goto failure;	/* OK, now commit destination to socket.  */	__sk_dst_set(sk, &rt->u.dst);	tcp_v4_setup_caps(sk, &rt->u.dst);	tp->ext2_header_len = rt->u.dst.header_len;	if (!tp->write_seq)		tp->write_seq = secure_tcp_sequence_number(inet->saddr,							   inet->daddr,							   inet->sport,							   usin->sin_port);	inet->id = tp->write_seq ^ jiffies;	err = tcp_connect(sk);	rt = NULL;	if (err)		goto failure;	return 0;failure:	/* This unhashes the socket and releases the local port, if necessary. */	tcp_set_state(sk, TCP_CLOSE);	ip_rt_put(rt);	sk->sk_route_caps = 0;	inet->dport = 0;	return err;}static __inline__ int tcp_v4_iif(struct sk_buff *skb){	return ((struct rtable *)skb->dst)->rt_iif;}static __inline__ u32 tcp_v4_synq_hash(u32 raddr, u16 rport, u32 rnd){	return (jhash_2words(raddr, (u32) rport, rnd) & (TCP_SYNQ_HSIZE - 1));}static struct open_request *tcp_v4_search_req(struct tcp_opt *tp,					      struct open_request ***prevp,					      __u16 rport,					      __u32 raddr, __u32 laddr){	struct tcp_listen_opt *lopt = tp->listen_opt;	struct open_request *req, **prev;	for (prev = &lopt->syn_table[tcp_v4_synq_hash(raddr, rport, lopt->hash_rnd)];	     (req = *prev) != NULL;	     prev = &req->dl_next) {		if (req->rmt_port == rport &&		    req->af.v4_req.rmt_addr == raddr &&		    req->af.v4_req.loc_addr == laddr &&		    TCP_INET_FAMILY(req->class->family)) {			BUG_TRAP(!req->sk);			*prevp = prev;			break;		}	}	return req;}static void tcp_v4_synq_add(struct sock *sk, struct open_request *req){	struct tcp_opt *tp = tcp_sk(sk);	struct tcp_listen_opt *lopt = tp->listen_opt;	u32 h = tcp_v4_synq_hash(req->af.v4_req.rmt_addr, req->rmt_port, lopt->hash_rnd);	req->expires = jiffies + TCP_TIMEOUT_INIT;	req->retrans = 0;	req->sk = NULL;	req->dl_next = lopt->syn_table[h];	write_lock(&tp->syn_wait_lock);	lopt->syn_table[h] = req;	write_unlock(&tp->syn_wait_lock);	tcp_synq_added(sk);}/* * This routine does path mtu discovery as defined in RFC1191. */static inline void do_pmtu_discovery(struct sock *sk, struct iphdr *iph,				     u32 mtu){	struct dst_entry *dst;	struct inet_opt *inet = inet_sk(sk);	struct tcp_opt *tp = tcp_sk(sk);	/* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs	 * send out by Linux are always <576bytes so they should go through	 * unfragmented).	 */	if (sk->sk_state == TCP_LISTEN)		return;	/* We don't check in the destentry if pmtu discovery is forbidden	 * on this route. We just assume that no packet_to_big packets	 * are send back when pmtu discovery is not active.     	 * There is a small race when the user changes this flag in the	 * route, but I think that's acceptable.	 */	if ((dst = __sk_dst_check(sk, 0)) == NULL)		return;	dst->ops->update_pmtu(dst, mtu);	/* Something is about to be wrong... Remember soft error	 * for the case, if this connection will not able to recover.	 */	if (mtu < dst_pmtu(dst) && ip_dont_fragment(sk, dst))		sk->sk_err_soft = EMSGSIZE;	mtu = dst_pmtu(dst);	if (inet->pmtudisc != IP_PMTUDISC_DONT &&	    tp->pmtu_cookie > mtu) {		tcp_sync_mss(sk, mtu);		/* Resend the TCP packet because it's		 * clear that the old packet has been		 * dropped. This is the new "fast" path mtu		 * discovery.		 */		tcp_simple_retransmit(sk);	} /* else let the usual retransmit timer handle it */}/* * This routine is called by the ICMP module when it gets some * sort of error condition.  If err < 0 then the socket should * be closed and the error returned to the user.  If err > 0 * it's just the icmp type << 8 | icmp code.  After adjustment * header points to the first 8 bytes of the tcp header.  We need * to find the appropriate port. * * The locking strategy used here is very "optimistic". When * someone else accesses the socket the ICMP is just dropped * and for some paths there is no check at all. * A more general error queue to queue errors for later handling * is probably better. * */void tcp_v4_err(struct sk_buff *skb, u32 info){	struct iphdr *iph = (struct iphdr *)skb->data;	struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));	struct tcp_opt *tp;	struct inet_opt *inet;	int type = skb->h.icmph->type;	int code = skb->h.icmph->code;	struct sock *sk;	__u32 seq;	int err;	if (skb->len < (iph->ihl << 2) + 8) {		ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);		return;	}	sk = tcp_v4_lookup(iph->daddr, th->dest, iph->saddr,			   th->source, tcp_v4_iif(skb));	if (!sk) {		ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);		return;	}	if (sk->sk_state == TCP_TIME_WAIT) {		tcp_tw_put((struct tcp_tw_bucket *)sk);		return;	}	bh_lock_sock(sk);	/* If too many ICMPs get dropped on busy	 * servers this needs to be solved differently.	 */	if (sock_owned_by_user(sk))		NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);	if (sk->sk_state == TCP_CLOSE)		goto out;	tp = tcp_sk(sk);	seq = ntohl(th->seq);	if (sk->sk_state != TCP_LISTEN &&	    !between(seq, tp->snd_una, tp->snd_nxt)) {		NET_INC_STATS(LINUX_MIB_OUTOFWINDOWICMPS);		goto out;	}	switch (type) {	case ICMP_SOURCE_QUENCH:		/* Just silently ignore these. */		goto out;	case ICMP_PARAMETERPROB:		err = EPROTO;		break;	case ICMP_DEST_UNREACH:		if (code > NR_ICMP_UNREACH)			goto out;		if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */			if (!sock_owned_by_user(sk))				do_pmtu_discovery(sk, iph, info);			goto out;		}		err = icmp_err_convert[code].errno;		break;	case ICMP_TIME_EXCEEDED:		err = EHOSTUNREACH;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -