⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 tcp_input.c

📁 GNU Hurd 源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
		return;	sp->start_seq = TCP_SKB_CB(skb)->end_seq;	if(!before(sp->start_seq, sp->end_seq)) {		/* Zap this SACK, by moving forward any other SACKS. */		for(this_sack += 1; this_sack < num_sacks; this_sack++, sp++) {			struct tcp_sack_block *next = (sp + 1);			sp->start_seq = next->start_seq;			sp->end_seq = next->end_seq;		}		tp->num_sacks--;	}}static void tcp_sack_extend(struct tcp_opt *tp, struct sk_buff *old_skb, struct sk_buff *new_skb){	struct tcp_sack_block *sp = &tp->selective_acks[0];	int num_sacks = tp->num_sacks;	int this_sack;	for(this_sack = 0; this_sack < num_sacks; this_sack++, sp++) {		if(sp->end_seq == TCP_SKB_CB(old_skb)->end_seq)			break;	}	if(this_sack >= num_sacks)		return;	sp->end_seq = TCP_SKB_CB(new_skb)->end_seq;}/* This one checks to see if we can put data from the * out_of_order queue into the receive_queue. */static void tcp_ofo_queue(struct sock *sk){	struct sk_buff *skb;	struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);	while ((skb = skb_peek(&tp->out_of_order_queue))) {		if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))			break;		if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {			SOCK_DEBUG(sk, "ofo packet was already received \n");			__skb_unlink(skb, skb->list);			kfree_skb(skb);			continue;		}		SOCK_DEBUG(sk, "ofo requeuing : rcv_next %X seq %X - %X\n",			   tp->rcv_nxt, TCP_SKB_CB(skb)->seq,			   TCP_SKB_CB(skb)->end_seq);		if(tp->sack_ok)			tcp_sack_remove_skb(tp, skb);		__skb_unlink(skb, skb->list);		__skb_queue_tail(&sk->receive_queue, skb);		tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;		if(skb->h.th->fin)			tcp_fin(skb, sk, skb->h.th);	}}static void tcp_data_queue(struct sock *sk, struct sk_buff *skb){	struct sk_buff *skb1;	struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);	/*  Queue data for delivery to the user.	 *  Packets in sequence go to the receive queue.	 *  Out of sequence packets to the out_of_order_queue.	 */	if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) {		/* Ok. In sequence. */	queue_and_out:		dst_confirm(sk->dst_cache);		__skb_queue_tail(&sk->receive_queue, skb);		tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;		if(skb->h.th->fin) {			tcp_fin(skb, sk, skb->h.th);		} else {			tcp_remember_ack(tp, skb->h.th, skb); 		}		/* This may have eaten into a SACK block. */		if(tp->sack_ok && tp->num_sacks)			tcp_sack_remove_skb(tp, skb);		tcp_ofo_queue(sk);		/* Turn on fast path. */ 		if (skb_queue_len(&tp->out_of_order_queue) == 0)			tp->pred_flags = htonl(((tp->tcp_header_len >> 2) << 28) |					       (0x10 << 16) |					       tp->snd_wnd);		return;	}		/* An old packet, either a retransmit or some packet got lost. */	if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {		/* A retransmit, 2nd most common case.  Force an imediate ack. */		SOCK_DEBUG(sk, "retransmit received: seq %X\n", TCP_SKB_CB(skb)->seq);		tcp_enter_quickack_mode(tp);		kfree_skb(skb);		return;	}	if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {		/* Partial packet, seq < rcv_next < end_seq */		SOCK_DEBUG(sk, "partial packet: rcv_next %X seq %X - %X\n",			   tp->rcv_nxt, TCP_SKB_CB(skb)->seq,			   TCP_SKB_CB(skb)->end_seq);		goto queue_and_out;	}	/* Ok. This is an out_of_order segment, force an ack. */	tp->delayed_acks++;	tcp_enter_quickack_mode(tp);	/* Disable header prediction. */	tp->pred_flags = 0;	SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n",		   tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);	if (skb_peek(&tp->out_of_order_queue) == NULL) {		/* Initial out of order segment, build 1 SACK. */		if(tp->sack_ok) {			tp->num_sacks = 1;			tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq;			tp->selective_acks[0].end_seq = TCP_SKB_CB(skb)->end_seq;		}		__skb_queue_head(&tp->out_of_order_queue,skb);	} else {		for(skb1=tp->out_of_order_queue.prev; ; skb1 = skb1->prev) {			/* Already there. */			if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb1)->seq) {				if (skb->len >= skb1->len) {					if(tp->sack_ok)						tcp_sack_extend(tp, skb1, skb);					__skb_append(skb1, skb);					__skb_unlink(skb1, skb1->list);					kfree_skb(skb1);				} else {					/* A duplicate, smaller than what is in the					 * out-of-order queue right now, toss it.					 */					kfree_skb(skb);				}				break;			}						if (after(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb1)->seq)) {				__skb_append(skb1, skb);				if(tp->sack_ok)					tcp_sack_new_ofo_skb(sk, skb);				break;			}                        /* See if we've hit the start. If so insert. */			if (skb1 == skb_peek(&tp->out_of_order_queue)) {				__skb_queue_head(&tp->out_of_order_queue,skb);				if(tp->sack_ok)					tcp_sack_new_ofo_skb(sk, skb);				break;			}		}	}}/* *	This routine handles the data.  If there is room in the buffer, *	it will be have already been moved into it.  If there is no *	room, then we will just have to discard the packet. */static int tcp_data(struct sk_buff *skb, struct sock *sk, unsigned int len){	struct tcphdr *th;	struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);	th = skb->h.th;	skb_pull(skb, th->doff*4);	skb_trim(skb, len - (th->doff*4));        if (skb->len == 0 && !th->fin)		return(0);	/* 	 *	If our receive queue has grown past its limits shrink it.	 *	Make sure to do this before moving snd_nxt, otherwise	 *	data might be acked for that we don't have enough room.	 */	if (atomic_read(&sk->rmem_alloc) > sk->rcvbuf) { 		if (prune_queue(sk) < 0) { 			/* Still not enough room. That can happen when			 * skb->true_size differs significantly from skb->len.			 */			return 0;		}	}	tcp_data_queue(sk, skb);	if (before(tp->rcv_nxt, tp->copied_seq)) {		printk(KERN_DEBUG "*** tcp.c:tcp_data bug acked < copied\n");		tp->rcv_nxt = tp->copied_seq;	}	/* Above, tcp_data_queue() increments delayed_acks appropriately.	 * Now tell the user we may have some data.	 */	if (!sk->dead) {		sk->data_ready(sk,0);	}	return(1);}static void __tcp_data_snd_check(struct sock *sk, struct sk_buff *skb){	struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);	if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una + tp->snd_wnd) &&	    tcp_packets_in_flight(tp) < tp->snd_cwnd) {		/* Put more data onto the wire. */		tcp_write_xmit(sk);	} else if (tp->packets_out == 0 && !tp->pending) {		/* Start probing the receivers window. */		tcp_reset_xmit_timer(sk, TIME_PROBE0, tp->rto);	}}static __inline__ void tcp_data_snd_check(struct sock *sk){	struct sk_buff *skb = sk->tp_pinfo.af_tcp.send_head;	if (skb != NULL)		__tcp_data_snd_check(sk, skb); }/*  * Adapt the MSS value used to make delayed ack decision to the  * real world.  */ static __inline__ void tcp_measure_rcv_mss(struct sock *sk, struct sk_buff *skb){	struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);	unsigned int len = skb->len, lss; 	if (len > tp->rcv_mss) 		tp->rcv_mss = len; 	lss = tp->last_seg_size; 	tp->last_seg_size = 0; 	if (len >= 536) {		if (len == lss) 			tp->rcv_mss = len; 		tp->last_seg_size = len; 	}}/* * Check if sending an ack is needed. */static __inline__ void __tcp_ack_snd_check(struct sock *sk){	struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);	/* This also takes care of updating the window.	 * This if statement needs to be simplified.	 *	 * Rules for delaying an ack:	 *      - delay time <= 0.5 HZ	 *      - we don't have a window update to send	 *      - must send at least every 2 full sized packets	 *	- must send an ACK if we have any out of order data	 *	 * With an extra heuristic to handle loss of packet	 * situations and also helping the sender leave slow	 * start in an expediant manner.	 */	    /* Two full frames received or... */	if (((tp->rcv_nxt - tp->rcv_wup) >= tp->rcv_mss * MAX_DELAY_ACK) ||	    /* We will update the window "significantly" or... */	    tcp_raise_window(sk) ||	    /* We entered "quick ACK" mode or... */	    tcp_in_quickack_mode(tp) ||	    /* We have out of order data */	    (skb_peek(&tp->out_of_order_queue) != NULL)) {		/* Then ack it now */		tcp_send_ack(sk);	} else {		/* Else, send delayed ack. */		tcp_send_delayed_ack(tp, HZ/2);	}}static __inline__ void tcp_ack_snd_check(struct sock *sk){	struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);	if (tp->delayed_acks == 0) {		/* We sent a data segment already. */		return;	}	__tcp_ack_snd_check(sk);}/* *	This routine is only called when we have urgent data *	signalled. Its the 'slow' part of tcp_urg. It could be *	moved inline now as tcp_urg is only called from one *	place. We handle URGent data wrong. We have to - as *	BSD still doesn't use the correction from RFC961. *	For 1003.1g we should support a new option TCP_STDURG to permit *	either form (or just set the sysctl tcp_stdurg). */ static void tcp_check_urg(struct sock * sk, struct tcphdr * th){	struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);	u32 ptr = ntohs(th->urg_ptr);	if (ptr && !sysctl_tcp_stdurg)		ptr--;	ptr += ntohl(th->seq);	/* Ignore urgent data that we've already seen and read. */	if (after(tp->copied_seq, ptr))		return;	/* Do we already have a newer (or duplicate) urgent pointer? */	if (tp->urg_data && !after(ptr, tp->urg_seq))		return;	/* Tell the world about our new urgent pointer. */	if (sk->proc != 0) {		if (sk->proc > 0)			kill_proc(sk->proc, SIGURG, 1);		else			kill_pg(-sk->proc, SIGURG, 1);	}	/* We may be adding urgent data when the last byte read was	 * urgent. To do this requires some care. We cannot just ignore	 * tp->copied_seq since we would read the last urgent byte again	 * as data, nor can we alter copied_seq until this data arrives	 * or we break the sematics of SIOCATMARK (and thus sockatmark())	 */	if (tp->urg_seq == tp->copied_seq)		tp->copied_seq++;	/* Move the copied sequence on correctly */	tp->urg_data = URG_NOTYET;	tp->urg_seq = ptr;	/* Disable header prediction. */	tp->pred_flags = 0;}/* This is the 'fast' part of urgent handling. */static inline void tcp_urg(struct sock *sk, struct tcphdr *th, unsigned long len){	struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);	/* Check if we get a new urgent pointer - normally not. */	if (th->urg)		tcp_check_urg(sk,th);	/* Do we wait for any urgent data? - normally not... */	if (tp->urg_data == URG_NOTYET) {		u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff*4);		/* Is the urgent pointer pointing into this packet? */	 		if (ptr < len) {			tp->urg_data = URG_VALID | *(ptr + (unsigned char *) th);			if (!sk->dead)				sk->data_ready(sk,0);		}	}}/* Clean the out_of_order queue if we can, trying to get * the socket within its memory limits again. * * Return less than zero if we should start dropping frames * until the socket owning process reads some of the data * to stabilize the situation. */static int prune_queue(struct sock *sk){	struct tcp_opt *tp = &sk->tp_pinfo.af_tcp; 	struct sk_buff * skb;	SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq);	net_statistics.PruneCalled++; 	/* First, purge the out_of_order queue. */	skb = __skb_dequeue_tail(&tp->out_of_order_queue);	if(skb != NULL) {		/* Free it all. */		do {	net_statistics.OfoPruned += skb->len; 			kfree_skb(skb);			skb = __skb_dequeue_tail(&tp->out_of_order_queue);		} while(skb != NULL);		/* Reset SACK state.  A conforming SACK implementation will		 * do the same at a timeout based retransmit.  When a connection		 * is in a sad state like this, we care only about integrity		 * of the connection not performance.		 */		if(tp->sack_ok)			tp->num_sacks = 0;	}		/* If we are really being abused, tell the caller to silently	 * drop receive data on the floor.  It will get retransmitted	 * and hopefully then we'll have sufficient space.	 *	 * We used to try to purge the in-order packets too, but that	 * turns out to be deadly and fraught with races.  Consider:	 *	 * 1) If we acked the data, we absolutely cannot drop the	 *    packet.  This data would then never be retransmitted.	 * 2) It is possible, with a proper sequence of events involving	 *    delayed acks and backlog queue handling, to have the user	 *    read the data before it gets acked.  The previous code	 *    here got this wrong, and it lead to data corruption.	 * 3) Too much state changes happen when the FIN arrives, so once	 *    we've seen that we can't remove any in-order data safely.	 *	 * The net result is that removing in-order receive data is too	 * complex for anyones sanity.  So we don't do it anymore.  But	 * if we are really having our buffer space abused we stop accepting	 * new receive data.	 */	if(atomic_read(&sk->rmem_alloc) < (sk->rcvbuf << 1))		return 0;	/* Massive buffer overcommit. */	return -1;}/* *	TCP receive function for the ESTABLISHED state.  * *	It is split into a fast path and a slow path. The fast path is 

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -