⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mytcp_input.c

📁 一个基于linux的TCP/IP协议栈的实现
💻 C
📖 第 1 页 / 共 5 页
字号:
					}				} else {					if( !(sacked & TCPCB_RETRANS) && fack_count < prior_fackets)						reord = min(fack_count, reord);					if (sacked & TCPCB_LOST) {						TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;						tp->lost_out -= tcp_skb_pcount(skb);						tp->retransmit_skb_hint = NULL;					}				}				TCP_SKB_CB(skb)->sacked |= TCPCB_SACKED_ACKED;				flag |= FLAG_DATA_SACKED;				tp->sacked_out += tcp_skb_pcount(skb);				if (fack_count > tp->fackets_out)					tp->fackets_out = fack_count;			} else {				if( dup_sack && (sacked&TCPCB_RETRANS) )					reord = min(fack_count, reord);			}			if( dup_sack && (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) ){				TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;				tp->retrans_out -= tcp_skb_pcount(skb);				tp->retransmit_skb_hint = NULL;			}		}	}	if (lost_retrans && icsk->icsk_ca_state == TCP_CA_Recovery) {		struct sk_buff *skb;		sk_stream_for_retrans_queue(skb, sk) {			if( after(TCP_SKB_CB(skb)->seq, lost_retrans) )				break;			if( !after(TCP_SKB_CB(skb)->end_seq, tp->snd_una) )				continue;			if( (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) &&							after(lost_retrans, TCP_SKB_CB(skb)->ack_seq) &&							(IsFack(tp) || 							 !before(lost_retrans,									 TCP_SKB_CB(skb)->ack_seq + tp->reordering *									 tp->mss_cache))) {				TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;				tp->retrans_out -= tcp_skb_pcount(skb);				tp->retransmit_skb_hint = NULL;				if( !(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED)) ){					tp->lost_out += tcp_skb_pcount(skb);					TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;					flag |= FLAG_DATA_SACKED;					MYNET_INC_STATS_BH(LINUX_MIB_TCPLOSTRETRANSMIT);				}			}		}	}	tp->left_out = tp->sacked_out + tp->lost_out;	if( (reord < tp->fackets_out) && icsk->icsk_ca_state != TCP_CA_Loss )		mytcp_update_reordering( sk, ((tp->fackets_out + 1) - reord), 0 );#if FASTRETRANS_DEBUG > 0	BUG_TRAP((int)tp->sacked_out >= 0);	BUG_TRAP((int)tp->lost_out >= 0);	BUG_TRAP((int)tp->retrans_out >= 0);	BUG_TRAP((int)tcp_packets_in_flight(tp) >= 0);#endif	return flag;}static int mytcp_tso_acked(struct sock *sk, struct sk_buff *skb, __u32 now, __s32 *seq_rtt){	struct tcp_sock *tp = tcp_sk(sk);	struct tcp_skb_cb *scb = TCP_SKB_CB(skb); 	__u32 seq = tp->snd_una;	__u32 packets_acked;	int acked = 0;	BUG_ON( !after(scb->end_seq, seq) );	packets_acked = tcp_skb_pcount(skb);	if( mytcp_trim_head(sk, skb, seq - scb->seq) )		return 0;	packets_acked -= tcp_skb_pcount(skb);	if (packets_acked) {		__u8 sacked = scb->sacked;		acked |= FLAG_DATA_ACKED;		if (sacked) {			if( sacked & TCPCB_RETRANS ){				if (sacked & TCPCB_SACKED_RETRANS)					tp->retrans_out -= packets_acked;				acked |= FLAG_RETRANS_DATA_ACKED;				*seq_rtt = -1;			} else if (*seq_rtt < 0)				*seq_rtt = now - scb->when;			if (sacked & TCPCB_SACKED_ACKED)				tp->sacked_out -= packets_acked;			if (sacked & TCPCB_LOST)				tp->lost_out -= packets_acked;			if (sacked & TCPCB_URG) {				if (tp->urg_mode &&				    !before(seq, tp->snd_up))					tp->urg_mode = 0;			}		} else if (*seq_rtt < 0)			*seq_rtt = now - scb->when;		if (tp->fackets_out) {			__u32 dval = min(tp->fackets_out, packets_acked);			tp->fackets_out -= dval;		}		tp->packets_out -= packets_acked;		BUG_ON(tcp_skb_pcount(skb) == 0);		BUG_ON(!before(scb->seq, scb->end_seq));	}	return acked;}static u32 mytcp_usrtt(const struct sk_buff *skb){	struct timeval tv, now;	do_gettimeofday(&now);	skb_get_timestamp(skb, &tv);	return (now.tv_sec - tv.tv_sec) * 1000000 + (now.tv_usec - tv.tv_usec);}static int mytcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p){	struct tcp_sock *tp = tcp_sk(sk);	const struct inet_connection_sock *icsk = inet_csk(sk);	struct sk_buff *skb;	__u32 now = tcp_time_stamp;	int acked = 0;	__s32 seq_rtt = -1;	u32 pkts_acked = 0;	void (*rtt_sample)(struct sock *sk, u32 usrtt) = icsk->icsk_ca_ops->rtt_sample;	while( (skb = skb_peek(&sk->sk_write_queue)) && skb != sk->sk_send_head ){		struct tcp_skb_cb *scb = TCP_SKB_CB(skb); 		__u8 sacked = scb->sacked;		if (after(scb->end_seq, tp->snd_una)) {			if (tcp_skb_pcount(skb) > 1 && after(tp->snd_una, scb->seq))				acked |= mytcp_tso_acked( sk, skb, now, &seq_rtt );			break;		}		if( !(scb->flags & TCPCB_FLAG_SYN) ){			acked |= FLAG_DATA_ACKED;			++pkts_acked;		} else {			acked |= FLAG_SYN_ACKED;			tp->retrans_stamp = 0;		}		if (sacked) {			if (sacked & TCPCB_RETRANS) {				if(sacked & TCPCB_SACKED_RETRANS)					tp->retrans_out -= tcp_skb_pcount(skb);				acked |= FLAG_RETRANS_DATA_ACKED;				seq_rtt = -1;			} else if (seq_rtt < 0) {				seq_rtt = now - scb->when;				if (rtt_sample)					(*rtt_sample)(sk, mytcp_usrtt(skb));			}			if (sacked & TCPCB_SACKED_ACKED)				tp->sacked_out -= tcp_skb_pcount(skb);			if (sacked & TCPCB_LOST)				tp->lost_out -= tcp_skb_pcount(skb);			if (sacked & TCPCB_URG) {				if (tp->urg_mode &&				    !before(scb->end_seq, tp->snd_up))					tp->urg_mode = 0;			}		} else if (seq_rtt < 0) {			seq_rtt = now - scb->when;			if (rtt_sample)				(*rtt_sample)(sk, mytcp_usrtt(skb));		}		tcp_dec_pcount_approx(&tp->fackets_out, skb);		tcp_packets_out_dec(tp, skb);		__skb_unlink(skb, &sk->sk_write_queue);		sk_stream_free_skb(sk, skb);		clear_all_retrans_hints(tp);	}}static void mytcp_cwnd_down(struct sock *sk){	const struct inet_connection_sock *icsk = inet_csk(sk);	struct tcp_sock *tp = tcp_sk(sk);	int decr = tp->snd_cwnd_cnt + 1;	tp->snd_cwnd_cnt = decr&1;	decr >>= 1;	if (decr && tp->snd_cwnd > icsk->icsk_ca_ops->min_cwnd(sk))		tp->snd_cwnd -= decr;	tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+1);	tp->snd_cwnd_stamp = tcp_time_stamp;}static void mytcp_enter_frto_loss(struct sock *sk){	struct tcp_sock *tp = tcp_sk(sk);	struct sk_buff *skb;	int cnt = 0;	tp->sacked_out = 0;	tp->lost_out = 0;	tp->fackets_out = 0;	sk_stream_for_retrans_queue(skb, sk) {		cnt += tcp_skb_pcount(skb);		TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;		if( !(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) ){			if( !after(TCP_SKB_CB(skb)->end_seq, tp->frto_highmark) ){				TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;				tp->lost_out += tcp_skb_pcount(skb);			}		}else{			tp->sacked_out += tcp_skb_pcount(skb);			tp->fackets_out = cnt;		}	}	tcp_sync_left_out(tp);	tp->snd_cwnd = tp->frto_counter + tcp_packets_in_flight(tp)+1;	tp->snd_cwnd_cnt = 0;	tp->snd_cwnd_stamp = tcp_time_stamp;	tp->undo_marker = 0;	tp->frto_counter = 0;	tp->reordering = min_t( unsigned int, tp->reordering, mysysctl_tcp_reordering );	tcp_set_ca_state(sk, TCP_CA_Loss);	tp->high_seq = tp->frto_highmark;	TCP_ECN_queue_cwr(tp);	clear_all_retrans_hints(tp);}static inline void mytcp_moderate_cwnd(struct tcp_sock *tp){	tp->snd_cwnd = min( tp->snd_cwnd, tcp_packets_in_flight(tp)+tcp_max_burst(tp) );	tp->snd_cwnd_stamp = tcp_time_stamp;}static void mytcp_process_frto(struct sock *sk, u32 prior_snd_una){	struct tcp_sock *tp = tcp_sk(sk);		tcp_sync_left_out(tp);		if (tp->snd_una == prior_snd_una ||	    !before(tp->snd_una, tp->frto_highmark)) {		mytcp_enter_frto_loss(sk);		return;	}	if (tp->frto_counter == 1) {		tp->snd_cwnd = tcp_packets_in_flight(tp) + 2;	} else {		tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);		mytcp_moderate_cwnd(tp);	}	tp->frto_counter = (tp->frto_counter + 1) % 3;}static inline int mytcp_ack_is_dubious(const struct sock *sk, const int flag){	return (!(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) ||					inet_csk(sk)->icsk_ca_state != TCP_CA_Open);}static inline int mytcp_may_raise_cwnd(const struct sock *sk, const int flag){	const struct tcp_sock *tp = tcp_sk(sk);	return (!(flag & FLAG_ECE) || tp->snd_cwnd < tp->snd_ssthresh) &&			!((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_Recovery | TCPF_CA_CWR));}static inline int mytcp_fackets_out(struct tcp_sock *tp){	return IsReno(tp) ? tp->sacked_out+1 : tp->fackets_out;}static inline int mytcp_skb_timedout(struct sock *sk, struct sk_buff *skb){	return (tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto);}static inline int mytcp_head_timedout(struct sock *sk, struct tcp_sock *tp){	return tp->packets_out && mytcp_skb_timedout(sk, skb_peek(&sk->sk_write_queue));}static void mytcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt,			   u32 in_flight, int good){	const struct inet_connection_sock *icsk = inet_csk(sk);	icsk->icsk_ca_ops->cong_avoid(sk, ack, rtt, in_flight, good);	tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp;}void mytcp_enter_loss(struct sock *sk, int how){	const struct inet_connection_sock *icsk = inet_csk(sk);	struct tcp_sock *tp = tcp_sk(sk);	struct sk_buff *skb;	int cnt = 0;	if (icsk->icsk_ca_state <= TCP_CA_Disorder || tp->snd_una == tp->high_seq ||					(icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) {		tp->prior_ssthresh = tcp_current_ssthresh(sk);		tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);		tcp_ca_event(sk, CA_EVENT_LOSS);	}	tp->snd_cwnd	   = 1;	tp->snd_cwnd_cnt   = 0;	tp->snd_cwnd_stamp = tcp_time_stamp;	tp->bytes_acked = 0;	mytcp_clear_retrans(tp);	if (!how)		tp->undo_marker = tp->snd_una;	sk_stream_for_retrans_queue(skb, sk) {		cnt += tcp_skb_pcount(skb);		if (TCP_SKB_CB(skb)->sacked&TCPCB_RETRANS)			tp->undo_marker = 0;		TCP_SKB_CB(skb)->sacked &= (~TCPCB_TAGBITS)|TCPCB_SACKED_ACKED;		if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED) || how) {			TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED;			TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;			tp->lost_out += tcp_skb_pcount(skb);		} else {			tp->sacked_out += tcp_skb_pcount(skb);			tp->fackets_out = cnt;		}	}	tcp_sync_left_out(tp);	tp->reordering = min_t(unsigned int, tp->reordering, sysctl_tcp_reordering);	tcp_set_ca_state(sk, TCP_CA_Loss);	tp->high_seq = tp->snd_nxt;	TCP_ECN_queue_cwr(tp);	clear_all_retrans_hints(tp);}static int mytcp_check_sack_reneging(struct sock *sk){	struct sk_buff *skb;	if ((skb = skb_peek(&sk->sk_write_queue)) != NULL &&					(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) {		struct inet_connection_sock *icsk = inet_csk(sk);		MYNET_INC_STATS_BH(LINUX_MIB_TCPSACKRENEGING);		mytcp_enter_loss(sk, 1);		icsk->icsk_retransmits++;		mytcp_retransmit_skb(sk, skb_peek(&sk->sk_write_queue));		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,					  icsk->icsk_rto, TCP_RTO_MAX);		return 1;	}	return 0;}static void mytcp_mark_head_lost(struct sock *sk, struct tcp_sock *tp,				int packets, u32 high_seq){	struct sk_buff *skb;	int cnt;	BUG_TRAP(packets <= tp->packets_out);	if (tp->lost_skb_hint) {		skb = tp->lost_skb_hint;		cnt = tp->lost_cnt_hint;	} else {		skb = sk->sk_write_queue.next;		cnt = 0;	}	sk_stream_for_retrans_queue_from(skb, sk) {		tp->lost_skb_hint = skb;		tp->lost_cnt_hint = cnt;		cnt += tcp_skb_pcount(skb);		if (cnt > packets || after(TCP_SKB_CB(skb)->end_seq, high_seq))			break;		if (!(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) {			TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;			tp->lost_out += tcp_skb_pcount(skb);			if(tp->retransmit_skb_hint != NULL &&			   before(TCP_SKB_CB(skb)->seq,				  TCP_SKB_CB(tp->retransmit_skb_hint)->seq)) {				tp->retransmit_skb_hint = NULL;			}		}	}	tcp_sync_left_out(tp);}static void mytcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp){	if (IsFack(tp)) {		int lost = tp->fackets_out - tp->reordering;		if (lost <= 0)			lost = 1;		mytcp_mark_head_lost(sk, tp, lost, tp->high_seq);	} else {		mytcp_mark_head_lost(sk, tp, 1, tp->high_seq);	}	if( mytcp_head_timedout(sk, tp) ){		struct sk_buff *skb;		skb = tp->scoreboard_skb_hint ? tp->scoreboard_skb_hint			: sk->sk_write_queue.next;		sk_stream_for_retrans_queue_from(skb, sk) {			if (!mytcp_skb_timedout(sk, skb))				break;			if (!(TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS)) {				TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;				tp->lost_out += tcp_skb_pcount(skb);				if (tp->retransmit_skb_hint && before(TCP_SKB_CB(skb)->seq,					   TCP_SKB_CB(tp->retransmit_skb_hint)->seq))					tp->retransmit_skb_hint = NULL;			}		}		tp->scoreboard_skb_hint = skb;		tcp_sync_left_out(tp);	}}static inline int mytcp_packet_delayed(struct tcp_sock *tp){	return !tp->retrans_stamp ||			(tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&			 (__s32)(tp->rx_opt.rcv_tsecr - tp->retrans_stamp) < 0);}static inline int mytcp_may_undo(struct tcp_sock *tp){	return tp->undo_marker &&		(!tp->undo_retrans || mytcp_packet_delayed(tp));}static void mytcp_undo_cwr(struct sock *sk, const int undo){	struct tcp_sock *tp = tcp_sk(sk);	if (tp->prior_ssthresh) {		const struct inet_connection_sock *icsk = inet_csk(sk);		if (icsk->icsk_ca_ops->undo_cwnd)			tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk);		else			tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh<<1);		if (undo && tp->prior_ssthresh > tp->snd_ssthresh) {			tp->snd_ssthresh = tp->prior_ssthresh;			TCP_ECN_withdraw_cwr(tp);		}	} else {		tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh);	}	mytcp_moderate_cwnd(tp);	tp->snd_cwnd_stamp = tcp_time_stamp;	clear_all_retrans_hints(tp);}static int mytcp_try_undo_recovery(struct sock *sk, struct tcp_sock *tp){	if( mytcp_may_undo(tp) ){		mytcp_undo_cwr(sk, 1);		if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -