⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mytcp_input.c

📁 一个基于linux的TCP/IP协议栈的实现
💻 C
📖 第 1 页 / 共 5 页
字号:
			MYNET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO);		else			MYNET_INC_STATS_BH(LINUX_MIB_TCPFULLUNDO);		tp->undo_marker = 0;	}	if (tp->snd_una == tp->high_seq && IsReno(tp)) {		mytcp_moderate_cwnd(tp);		return 1;	}	tcp_set_ca_state(sk, TCP_CA_Open);	return 0;}static inline void mytcp_complete_cwr(struct sock *sk){	struct tcp_sock *tp = tcp_sk(sk);	tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);	tp->snd_cwnd_stamp = tcp_time_stamp;	tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);}static void mytcp_try_undo_dsack(struct sock *sk, struct tcp_sock *tp){	if( tp->undo_marker && !tp->undo_retrans ){		mytcp_undo_cwr(sk, 1);		tp->undo_marker = 0;		MYNET_INC_STATS_BH(LINUX_MIB_TCPDSACKUNDO);	}}static void mytcp_check_reno_reordering(struct sock *sk, const int addend){	struct tcp_sock *tp = tcp_sk(sk);	u32 holes;	holes = max(tp->lost_out, 1U);	holes = min(holes, tp->packets_out);	if ((tp->sacked_out + holes) > tp->packets_out) {		tp->sacked_out = tp->packets_out - holes;		mytcp_update_reordering(sk, tp->packets_out + addend, 0);	}}static void mytcp_remove_reno_sacks(struct sock *sk, struct tcp_sock *tp, int acked){	if (acked > 0) {		if( acked-1 >= tp->sacked_out )			tp->sacked_out = 0;		else			tp->sacked_out -= acked-1;	}	mytcp_check_reno_reordering(sk, acked);	tcp_sync_left_out(tp);}static void mytcp_add_reno_sack(struct sock *sk){	struct tcp_sock *tp = tcp_sk(sk);	tp->sacked_out++;	mytcp_check_reno_reordering(sk, 0);	tcp_sync_left_out(tp);}static void mytcp_try_to_open(struct sock *sk, struct tcp_sock *tp, int flag){	tp->left_out = tp->sacked_out;	if (tp->retrans_out == 0)		tp->retrans_stamp = 0;	if( flag&FLAG_ECE )		mytcp_enter_cwr(sk);	if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {		int state = TCP_CA_Open;		if (tp->left_out || tp->retrans_out || tp->undo_marker)			state = TCP_CA_Disorder;		if (inet_csk(sk)->icsk_ca_state != state) {			tcp_set_ca_state(sk, state);			tp->high_seq = tp->snd_nxt;		}		mytcp_moderate_cwnd(tp);	} else {		mytcp_cwnd_down(sk);	}}static int mytcp_try_undo_loss(struct sock *sk, struct tcp_sock *tp){	if( mytcp_may_undo(tp) ){		struct sk_buff *skb;		sk_stream_for_retrans_queue(skb, sk) {			TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;		}		clear_all_retrans_hints(tp);		tp->lost_out = 0;		tp->left_out = tp->sacked_out;		mytcp_undo_cwr(sk, 1);		MYNET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO);		inet_csk(sk)->icsk_retransmits = 0;		tp->undo_marker = 0;		if (!IsReno(tp))			tcp_set_ca_state(sk, TCP_CA_Open);		return 1;	}	return 0;}static int mytcp_try_undo_partial(struct sock *sk, struct tcp_sock *tp, int acked){	int failed = IsReno(tp) || tp->fackets_out>tp->reordering;	if( mytcp_may_undo(tp) ){		if (tp->retrans_out == 0)			tp->retrans_stamp = 0;		mytcp_update_reordering(sk, mytcp_fackets_out(tp) + acked, 1);		mytcp_undo_cwr(sk, 0);		MYNET_INC_STATS_BH(LINUX_MIB_TCPPARTIALUNDO);		failed = 0;	}	return failed;}static int mytcp_time_to_recover(struct sock *sk, struct tcp_sock *tp){	__u32 packets_out;	if (tp->lost_out)		return 1;	if (mytcp_fackets_out(tp) > tp->reordering)		return 1;	if( mytcp_head_timedout(sk, tp) )		return 1;	packets_out = tp->packets_out;	if (packets_out <= tp->reordering &&					tp->sacked_out >= max_t(__u32, packets_out/2, sysctl_tcp_reordering) &&					!mytcp_may_send_now(sk, tp)) {		return 1;	}	return 0;}static inline void mytcp_reset_reno_sack(struct tcp_sock *tp){	tp->sacked_out = 0;	tp->left_out = tp->lost_out;}void mytcp_xmit_retransmit_queue(struct sock *sk){	const struct inet_connection_sock *icsk = inet_csk(sk);	struct tcp_sock *tp = tcp_sk(sk);	struct sk_buff *skb;	int packet_cnt;	if( tp->retransmit_skb_hint ){		skb = tp->retransmit_skb_hint;		packet_cnt = tp->retransmit_cnt_hint;	}else{		skb = sk->sk_write_queue.next;		packet_cnt = 0;	}	if( tp->lost_out ){		sk_stream_for_retrans_queue_from(skb, sk) {			__u8 sacked = TCP_SKB_CB(skb)->sacked;			tp->retransmit_skb_hint = skb;			tp->retransmit_cnt_hint = packet_cnt;			if( tcp_packets_in_flight(tp) >= tp->snd_cwnd )				return;			if( sacked & TCPCB_LOST ){				if (!(sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))) {					if( mytcp_retransmit_skb(sk, skb) ){						tp->retransmit_skb_hint = NULL;						return;					}					if (icsk->icsk_ca_state != TCP_CA_Loss)						MYNET_INC_STATS_BH(LINUX_MIB_TCPFASTRETRANS);					else						MYNET_INC_STATS_BH(LINUX_MIB_TCPSLOWSTARTRETRANS);					if (skb == skb_peek(&sk->sk_write_queue))						inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,										inet_csk(sk)->icsk_rto, TCP_RTO_MAX);				}				packet_cnt += tcp_skb_pcount(skb);				if (packet_cnt >= tp->lost_out)					break;			}		}	}		if (icsk->icsk_ca_state != TCP_CA_Recovery)		return;	if (!tp->rx_opt.sack_ok)		return;	if( mytcp_may_send_now(sk, tp) )		return;	if (tp->forward_skb_hint) {		skb = tp->forward_skb_hint;		packet_cnt = tp->forward_cnt_hint;	} else{		skb = sk->sk_write_queue.next;		packet_cnt = 0;	}		sk_stream_for_retrans_queue_from(skb, sk) {		tp->forward_cnt_hint = packet_cnt;		tp->forward_skb_hint = skb;		if (++packet_cnt > tp->fackets_out)			break;		if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)			break;		if (TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS)			continue;		if( mytcp_retransmit_skb(sk, skb) ){			tp->forward_skb_hint = NULL;			break;		}		if (skb == skb_peek(&sk->sk_write_queue))			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,							inet_csk(sk)->icsk_rto, TCP_RTO_MAX);		MYNET_INC_STATS_BH(LINUX_MIB_TCPFORWARDRETRANS);	}}static void mytcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,				int prior_packets, int flag){	struct inet_connection_sock *icsk = inet_csk(sk);	struct tcp_sock *tp = tcp_sk(sk);	int is_dupack = (tp->snd_una == prior_snd_una && !(flag&FLAG_NOT_DUP));	if (!tp->packets_out)		tp->sacked_out = 0;	if (tp->sacked_out == 0)		tp->fackets_out = 0;	if (flag&FLAG_ECE)		tp->prior_ssthresh = 0;	if (tp->sacked_out && mytcp_check_sack_reneging(sk))		return;	if( (flag & FLAG_DATA_LOST ) && before(tp->snd_una, tp->high_seq) &&					icsk->icsk_ca_state != TCP_CA_Open && tp->fackets_out > tp->reordering) {		mytcp_mark_head_lost(sk, tp, tp->fackets_out-tp->reordering, tp->high_seq);		MYNET_INC_STATS_BH(LINUX_MIB_TCPLOSS);	}		tcp_sync_left_out(tp);	if( icsk->icsk_ca_state == TCP_CA_Open ){		if( !mysysctl_tcp_frto )			BUG_TRAP(tp->retrans_out == 0);		tp->retrans_stamp = 0;	}else if( !before(tp->snd_una, tp->high_seq) ){		switch (icsk->icsk_ca_state) {		case TCP_CA_Loss:			icsk->icsk_retransmits = 0;			if( mytcp_try_undo_recovery(sk, tp) )				return;			break;		case TCP_CA_CWR:			if (tp->snd_una != tp->high_seq) {				mytcp_complete_cwr(sk);				tcp_set_ca_state(sk, TCP_CA_Open);			}			break;		case TCP_CA_Disorder:			mytcp_try_undo_dsack(sk, tp);			if (!tp->undo_marker || IsReno(tp) || tp->snd_una != tp->high_seq) {				tp->undo_marker = 0;				tcp_set_ca_state(sk, TCP_CA_Open);			}			break;		case TCP_CA_Recovery:			if (IsReno(tp))				mytcp_reset_reno_sack(tp);			if( mytcp_try_undo_recovery(sk, tp) )				return;			mytcp_complete_cwr(sk);			break;		}	}	switch (icsk->icsk_ca_state) {	case TCP_CA_Recovery:		if (prior_snd_una == tp->snd_una) {			if (IsReno(tp) && is_dupack)				mytcp_add_reno_sack(sk);		} else {			int acked = prior_packets - tp->packets_out;			if (IsReno(tp))				mytcp_remove_reno_sacks(sk, tp, acked);			is_dupack = mytcp_try_undo_partial(sk, tp, acked);		}		break;	case TCP_CA_Loss:		if( flag & FLAG_DATA_ACKED )			icsk->icsk_retransmits = 0;		if (!mytcp_try_undo_loss(sk, tp)) {			mytcp_moderate_cwnd(tp);			mytcp_xmit_retransmit_queue(sk);			return;		}		if (icsk->icsk_ca_state != TCP_CA_Open)			return;	default:		if (IsReno(tp)) {			if (tp->snd_una != prior_snd_una)				mytcp_reset_reno_sack(tp);			if (is_dupack)				mytcp_add_reno_sack(sk);		}		if (icsk->icsk_ca_state == TCP_CA_Disorder)			mytcp_try_undo_dsack(sk, tp);		if (!mytcp_time_to_recover(sk, tp)) {			mytcp_try_to_open(sk, tp, flag);			return;		}		if (IsReno(tp))			MYNET_INC_STATS_BH(LINUX_MIB_TCPRENORECOVERY);		else			MYNET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERY);		tp->high_seq = tp->snd_nxt;		tp->prior_ssthresh = 0;		tp->undo_marker = tp->snd_una;		tp->undo_retrans = tp->retrans_out;		if (icsk->icsk_ca_state < TCP_CA_CWR) {			if( !(flag&FLAG_ECE) )				tp->prior_ssthresh = tcp_current_ssthresh(sk);			tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);			TCP_ECN_queue_cwr(tp);		}		tp->bytes_acked = 0;		tp->snd_cwnd_cnt = 0;		tcp_set_ca_state(sk, TCP_CA_Recovery);	}	if (is_dupack || mytcp_head_timedout(sk, tp))		mytcp_update_scoreboard(sk, tp);	mytcp_cwnd_down(sk);	mytcp_xmit_retransmit_queue(sk);}static void mytcp_ack_probe(struct sock *sk){}static int mytcp_ack(struct sock *sk, struct sk_buff *skb, int flag){	struct inet_connection_sock *icsk = inet_csk(sk);	struct tcp_sock *tp = tcp_sk(sk);	u32 prior_snd_una = tp->snd_una;	u32 ack_seq = TCP_SKB_CB(skb)->seq;	u32 ack = TCP_SKB_CB(skb)->ack_seq;	u32 prior_in_flight;	s32 seq_rtt;	int prior_packets;	if( after(ack, tp->snd_nxt) )		goto uninteresting_ack;	if( before(ack, prior_snd_una) )		goto old_ack;	if( mysysctl_tcp_abc && icsk->icsk_ca_state < TCP_CA_CWR ){		tp->bytes_acked += ack - prior_snd_una;			printk(KERN_INFO "the isck_ca_state: %d, bytes_acked: %d\n", icsk->icsk_ca_state,							tp->bytes_acked ); 	}	if( !( flag & FLAG_SLOWPATH ) && after(ack, prior_snd_una) ){		//printk(KERN_INFO "go here: %s, %d\n", __FUNCTION__, __LINE__);		tcp_update_wl(tp, ack, ack_seq);		tp->snd_una = ack;		flag |= FLAG_WIN_UPDATE;		tcp_ca_event(sk, CA_EVENT_FAST_ACK);		MYNET_INC_STATS_BH(LINUX_MIB_TCPHPACKS);	} else {		//printk(KERN_INFO "go here!! %x, %x\n", ack_seq, TCP_SKB_CB(skb)->end_seq );		if (ack_seq != TCP_SKB_CB(skb)->end_seq)			flag |= FLAG_DATA;		else			MYNET_INC_STATS_BH(LINUX_MIB_TCPPUREACKS);		flag |= mytcp_ack_update_window(sk, tp, skb, ack, ack_seq);				printk(KERN_INFO "%s:%d: sacked: %d\n", __FUNCTION__, __LINE__, TCP_SKB_CB(skb)->sacked );		if (TCP_SKB_CB(skb)->sacked)			flag |= mytcp_sacktag_write_queue(sk, skb, prior_snd_una);		if (TCP_ECN_rcv_ecn_echo(tp, skb->h.th))			flag |= FLAG_ECE;		tcp_ca_event(sk, CA_EVENT_SLOW_ACK);	}	sk->sk_err_soft = 0;	tp->rcv_tstamp = tcp_time_stamp;	prior_packets = tp->packets_out;	if (!prior_packets)		goto no_queue;	prior_in_flight = tcp_packets_in_flight(tp);	flag |= mytcp_clean_rtx_queue(sk, &seq_rtt);	if( tp->frto_counter )		mytcp_process_frto(sk, prior_snd_una);	if( mytcp_ack_is_dubious(sk, flag) ){		if( (flag & FLAG_DATA_ACKED) && mytcp_may_raise_cwnd(sk, flag))			mytcp_cong_avoid(sk, ack,  seq_rtt, prior_in_flight, 0);		mytcp_fastretrans_alert(sk, prior_snd_una, prior_packets, flag);	}else{		printk(KERN_INFO "%s:%d: the flag: %x\n", __FUNCTION__, __LINE__, flag );		if( (flag & FLAG_DATA_ACKED) ){			mytcp_cong_avoid(sk, ack, seq_rtt, prior_in_flight, 1);			printk(KERN_INFO "%s:%d: the cwnd: %d\n", __FUNCTION__, __LINE__, tp->snd_cwnd );		}	}	if ((flag & FLAG_FORWARD_PROGRESS) || !(flag&FLAG_NOT_DUP))		dst_confirm(sk->sk_dst_cache);	return 1;no_queue:	icsk->icsk_probes_out = 0;	if (sk->sk_send_head)		mytcp_ack_probe(sk);	return 1;old_ack:	if (TCP_SKB_CB(skb)->sacked)		mytcp_sacktag_write_queue(sk, skb, prior_snd_una);uninteresting_ack:	SOCK_DEBUG(sk, "Ack %u out of %u:%u\n", ack, tp->snd_una, tp->snd_nxt);	return 0;}static void mytcp_init_metrics(struct sock *sk){	}static void mytcp_init_buffer_space(struct sock *sk){}static inline int mykeepalive_time_when(const struct tcp_sock *tp){	return tp->keepalive_time ? : mysysctl_tcp_keepalive_time;}static int mytcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,				struct tcphdr *th, unsigned len){	struct tcp_sock *tp = tcp_sk(sk);	struct inet

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -