⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mytcp_input.c

📁 一个基于linux的TCP/IP协议栈的实现
💻 C
📖 第 1 页 / 共 5 页
字号:
{	int truesize = mytcp_win_from_space(skb->truesize)/2;	int window = mytcp_win_from_space(mysysctl_tcp_rmem[2])/2;		printk(KERN_INFO "%s:%d: the return: %d\n", __FUNCTION__, __LINE__, 2 * inet_csk(sk)->icsk_ack.rcv_mss );	while (tp->rcv_ssthresh <= window) {		if (truesize <= skb->len)			return 2 * inet_csk(sk)->icsk_ack.rcv_mss;		truesize >>= 1;		window >>= 1;	}	return 0;}static void mytcp_grow_window(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb){	if (tp->rcv_ssthresh < tp->window_clamp && (int)tp->rcv_ssthresh < mytcp_space(sk) &&					!mytcp_memory_pressure ) {		int incr;		printk(KERN_INFO "%s:%d: win from truesize: %d, len: %d\n", __FUNCTION__, __LINE__, mytcp_win_from_space(skb->truesize), skb->len );		if( mytcp_win_from_space(skb->truesize) <= skb->len )			incr = 2*tp->advmss;		else			incr = __mytcp_grow_window(sk, tp, skb);		if (incr) {			tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, tp->window_clamp);			inet_csk(sk)->icsk_ack.quick |= 1;		}	}}static inline void mytcp_rcv_rtt_measure(struct tcp_sock *tp){	if (tp->rcv_rtt_est.time == 0)		goto new_measure;	if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq))		return;	mytcp_rcv_rtt_update(tp, jiffies - tp->rcv_rtt_est.time, 1);new_measure:	tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd;	tp->rcv_rtt_est.time = tcp_time_stamp;}static void mytcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb){	struct inet_connection_sock *icsk = inet_csk(sk);	const unsigned int lss = icsk->icsk_ack.last_seg_size; 	unsigned int len;	icsk->icsk_ack.last_seg_size = 0; 	len = skb->len;	if (len >= icsk->icsk_ack.rcv_mss) {		icsk->icsk_ack.rcv_mss = len;	} else {		len += skb->data - skb->h.raw;		if (len >= TCP_MIN_RCVMSS + sizeof(struct tcphdr) ||		    (len >= TCP_MIN_MSS + sizeof(struct tcphdr) &&		     !( tcp_flag_word(skb->h.th) & TCP_REMNANT)) ) {			len -= tcp_sk(sk)->tcp_header_len;			icsk->icsk_ack.last_seg_size = len;			if (len == lss) {				icsk->icsk_ack.rcv_mss = len;				return;			}		}		icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;	}}static inline void MYTCP_ECN_check_ce(struct tcp_sock *tp, struct sk_buff *skb){	if (tp->ecn_flags&TCP_ECN_OK) {		if (INET_ECN_is_ce(TCP_SKB_CB(skb)->flags))			tp->ecn_flags |= TCP_ECN_DEMAND_CWR;		else if (INET_ECN_is_not_ect((TCP_SKB_CB(skb)->flags)))			mytcp_enter_quickack_mode((struct sock *)tp);	}}static void mytcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb){	struct inet_connection_sock *icsk = inet_csk(sk);	u32 now;	inet_csk_schedule_ack(sk);	mytcp_measure_rcv_mss(sk, skb);	mytcp_rcv_rtt_measure(tp);	now = tcp_time_stamp;	if (!icsk->icsk_ack.ato) {		mytcp_incr_quickack(sk);		icsk->icsk_ack.ato = TCP_ATO_MIN;	} else {		int m = now - icsk->icsk_ack.lrcvtime;		if (m <= TCP_ATO_MIN/2) {			icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2;		} else if (m < icsk->icsk_ack.ato) {			icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + m;			if (icsk->icsk_ack.ato > icsk->icsk_rto)				icsk->icsk_ack.ato = icsk->icsk_rto;		} else if (m > icsk->icsk_rto) {			mytcp_incr_quickack(sk);			sk_stream_mem_reclaim(sk);		}	}	icsk->icsk_ack.lrcvtime = now;	MYTCP_ECN_check_ce(tp, skb);	if (skb->len >= 128){		printk(KERN_INFO "%s:%d: grow the window!\n", __FUNCTION__, __LINE__ );		mytcp_grow_window(sk, tp, skb);	}}static void mytcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th){	struct tcp_sock *tp = tcp_sk(sk);	inet_csk_schedule_ack(sk);	sk->sk_shutdown |= RCV_SHUTDOWN;	sock_set_flag(sk, SOCK_DONE);	switch (sk->sk_state) {		case TCP_SYN_RECV:		case TCP_ESTABLISHED:			tcp_set_state(sk, TCP_CLOSE_WAIT);			inet_csk(sk)->icsk_ack.pingpong = 1;			break;		case TCP_CLOSE_WAIT:		case TCP_CLOSING:			break;		case TCP_LAST_ACK:			break;		case TCP_FIN_WAIT1:			mytcp_send_ack(sk);			tcp_set_state(sk, TCP_CLOSING);			break;		case TCP_FIN_WAIT2:			mytcp_send_ack(sk);			mytcp_time_wait(sk, TCP_TIME_WAIT, 0);			break;		default:			printk(KERN_ERR "%s: Impossible, sk->sk_state=%d\n",			       __FUNCTION__, sk->sk_state);			break;	};	__skb_queue_purge(&tp->out_of_order_queue);	if (tp->rx_opt.sack_ok)		tcp_sack_reset(&tp->rx_opt);	sk_stream_mem_reclaim(sk);	if (!sock_flag(sk, SOCK_DEAD)) {		sk->sk_state_change(sk);		if (sk->sk_shutdown == SHUTDOWN_MASK ||		    sk->sk_state == TCP_CLOSE)			sk_wake_async(sk, 1, POLL_HUP);		else			sk_wake_async(sk, 1, POLL_IN);	}}static inline int mytcp_sack_extend(struct tcp_sack_block *sp, u32 seq, u32 end_seq){	if (!after(seq, sp->end_seq) && !after(sp->start_seq, end_seq)) {		if (before(seq, sp->start_seq))			sp->start_seq = seq;		if (after(end_seq, sp->end_seq))			sp->end_seq = end_seq;		return 1;	}	return 0;}static void mytcp_dsack_extend(struct tcp_sock *tp, u32 seq, u32 end_seq){	if (!tp->rx_opt.dsack)		mytcp_dsack_set(tp, seq, end_seq);	else		mytcp_sack_extend(tp->duplicate_sack, seq, end_seq);}static void mytcp_ofo_queue( struct sock *sk ){	struct tcp_sock *tp = tcp_sk(sk);	__u32 dsack_high = tp->rcv_nxt;	struct sk_buff *skb;	while( (skb = skb_peek(&tp->out_of_order_queue)) != NULL ){		if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))			break;		if (before(TCP_SKB_CB(skb)->seq, dsack_high)) {			__u32 dsack = dsack_high;			if( before(TCP_SKB_CB(skb)->end_seq, dsack_high) )				dsack_high = TCP_SKB_CB(skb)->end_seq;			mytcp_dsack_extend(tp, TCP_SKB_CB(skb)->seq, dsack);		}		if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {			__skb_unlink(skb, &tp->out_of_order_queue);			__kfree_skb(skb);			continue;		}		__skb_unlink(skb, &tp->out_of_order_queue);		__skb_queue_tail(&sk->sk_receive_queue, skb);		tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;		if(skb->h.th->fin)			mytcp_fin(skb, sk, skb->h.th);	}}static void mytcp_sack_remove(struct tcp_sock *tp){	struct tcp_sack_block *sp = &tp->selective_acks[0];	int num_sacks = tp->rx_opt.num_sacks;	int this_sack;	if (skb_queue_empty(&tp->out_of_order_queue)) {		tp->rx_opt.num_sacks = 0;		tp->rx_opt.eff_sacks = tp->rx_opt.dsack;		return;	}	for(this_sack = 0; this_sack < num_sacks; ) {		if (!before(tp->rcv_nxt, sp->start_seq)) {			int i;			BUG_TRAP(!before(tp->rcv_nxt, sp->end_seq));			for (i=this_sack+1; i < num_sacks; i++)				tp->selective_acks[i-1] = tp->selective_acks[i];			num_sacks--;			continue;		}		this_sack++;		sp++;	}	if (num_sacks != tp->rx_opt.num_sacks) {		tp->rx_opt.num_sacks = num_sacks;		tp->rx_opt.eff_sacks = 				min(tp->rx_opt.num_sacks + tp->rx_opt.dsack, 4 - tp->rx_opt.tstamp_ok);	}}static void mytcp_sack_maybe_coalesce(struct tcp_sock *tp){	int this_sack;	struct tcp_sack_block *sp = &tp->selective_acks[0];	struct tcp_sack_block *swalk = sp+1;	for (this_sack = 1; this_sack < tp->rx_opt.num_sacks; ) {		if( mytcp_sack_extend(sp, swalk->start_seq, swalk->end_seq) ){			int i;			tp->rx_opt.num_sacks--;			tp->rx_opt.eff_sacks = 					min(tp->rx_opt.num_sacks + tp->rx_opt.dsack, 4 - tp->rx_opt.tstamp_ok);			for(i=this_sack; i < tp->rx_opt.num_sacks; i++)				sp[i] = sp[i+1];			continue;		}		this_sack++, swalk++;	}}static inline void mytcp_sack_swap(struct tcp_sack_block *sack1, struct tcp_sack_block *sack2){	__u32 tmp;	tmp = sack1->start_seq;	sack1->start_seq = sack2->start_seq;	sack2->start_seq = tmp;	tmp = sack1->end_seq;	sack1->end_seq = sack2->end_seq;	sack2->end_seq = tmp;}static void mytcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq){	struct tcp_sock *tp = tcp_sk(sk);	struct tcp_sack_block *sp = &tp->selective_acks[0];	int cur_sacks = tp->rx_opt.num_sacks;	int this_sack;	if (!cur_sacks)		goto new_sack;	for (this_sack=0; this_sack<cur_sacks; this_sack++, sp++) {		if( mytcp_sack_extend(sp, seq, end_seq) ){			for (; this_sack>0; this_sack--, sp--)				mytcp_sack_swap(sp, sp-1);			if (cur_sacks > 1)				mytcp_sack_maybe_coalesce(tp);			return;		}	}	if (this_sack >= 4) {		this_sack--;		tp->rx_opt.num_sacks--;		sp--;	}	for(; this_sack > 0; this_sack--, sp--)		*sp = *(sp-1);new_sack:	sp->start_seq = seq;	sp->end_seq = end_seq;	tp->rx_opt.num_sacks++;	tp->rx_opt.eff_sacks = 			min(tp->rx_opt.num_sacks + tp->rx_opt.dsack, 4 - tp->rx_opt.tstamp_ok);}static void mytcp_data_queue(struct sock *sk, struct sk_buff *skb){	printk(KERN_INFO "enter mytcp_data_queue!\n");	struct tcphdr *th = skb->h.th;	struct tcp_sock *tp = tcp_sk(sk);	int eaten = -1;	if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq)		goto drop;	__skb_pull(skb, th->doff*4);	TCP_ECN_accept_cwr(tp, skb);	printk(KERN_INFO "%s:%d: dsack: %d\n", __FUNCTION__, __LINE__, tp->rx_opt.dsack );	if (tp->rx_opt.dsack) {		tp->rx_opt.dsack = 0;		tp->rx_opt.eff_sacks = min_t(unsigned int, tp->rx_opt.num_sacks,						4 - tp->rx_opt.tstamp_ok);	}	if( TCP_SKB_CB(skb)->seq == tp->rcv_nxt ){		if( tcp_receive_window(tp) == 0 )			goto out_of_window;		printk(KERN_INFO "%s:%d: %p==%p\n", tp->ucopy.task, current );		if (tp->ucopy.task == current &&						tp->copied_seq == tp->rcv_nxt && tp->ucopy.len &&						sock_owned_by_user(sk) && !tp->urg_data) {			int chunk = min_t(unsigned int, skb->len, tp->ucopy.len);			__set_current_state(TASK_RUNNING);			local_bh_enable();			if (!skb_copy_datagram_iovec(skb, 0, tp->ucopy.iov, chunk)) {				tp->ucopy.len -= chunk;				tp->copied_seq += chunk;				eaten = (chunk == skb->len && !th->fin);				mytcp_rcv_space_adjust(sk);			}			local_bh_disable();		}		if (eaten <= 0) {queue_and_out:			if (eaten < 0 && (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||									!sk_stream_rmem_schedule(sk, skb))) {				if( mytcp_prune_queue(sk) < 0 || !sk_stream_rmem_schedule(sk, skb) )					goto drop;			}			sk_stream_set_owner_r(skb, sk);			__skb_queue_tail(&sk->sk_receive_queue, skb);		}		tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;		if(skb->len)			mytcp_event_data_recv(sk, tp, skb);		if(th->fin)			mytcp_fin(skb, sk, th);		if (!skb_queue_empty(&tp->out_of_order_queue)) {			mytcp_ofo_queue(sk);			if (skb_queue_empty(&tp->out_of_order_queue))				inet_csk(sk)->icsk_ack.pingpong = 0;		}		if (tp->rx_opt.num_sacks)			mytcp_sack_remove(tp);		tcp_fast_path_check(sk, tp);		if (eaten > 0)			__kfree_skb(skb);		else if (!sock_flag(sk, SOCK_DEAD))			sk->sk_data_ready(sk, 0);		return;	}	if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {		MYNET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST);		mytcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);out_of_window:		mytcp_enter_quickack_mode(sk);		inet_csk_schedule_ack(sk);drop:		__kfree_skb(skb);		return;	}	if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp)))		goto out_of_window;	mytcp_enter_quickack_mode(sk);	if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {		mytcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, tp->rcv_nxt);		if (!tcp_receive_window(tp))			goto out_of_window;		goto queue_and_out;	}	MYTCP_ECN_check_ce(tp, skb);	if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||					!sk_stream_rmem_schedule(sk, skb)) {		if( mytcp_prune_queue(sk) < 0 || !sk_stream_rmem_schedule(sk, skb))			goto drop;	}	tp->pred_flags = 0;	inet_csk_schedule_ack(sk);	sk_stream_set_owner_r(skb, sk);	if (!skb_peek(&tp->out_of_order_queue)) {		if (tp->rx_opt.sack_ok) {			tp->rx_opt.num_sacks = 1;			tp->rx_opt.dsack     = 0;			tp->rx_opt.eff_sacks = 1;			tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq;			tp->selective_acks[0].end_seq = TCP_SKB_CB(skb)->end_seq;		}		__skb_queue_head(&tp->out_of_order_queue,skb);	} else {		struct sk_buff *skb1 = tp->out_of_order_queue.prev;		u32 seq = TCP_SKB_CB(skb)->seq;		u32 end_seq = TCP_SKB_CB(skb)->end_seq;		if (seq == TCP_SKB_CB(skb1)->end_seq) {			__skb_append(skb1, skb, &tp->out_of_order_queue);			if (!tp->rx_opt.num_sacks || tp->selective_acks[0].end_seq != seq)				goto add_sack;			tp->selective_acks[0].end_seq = end_seq;			return;		}		do {			if (!after(TCP_SKB_CB(skb1)->seq, seq))				break;		} while ((skb1 = skb1->prev) != (struct sk_buff*)&tp->out_of_order_queue);		if (skb1 != (struct sk_buff*)&tp->out_of_order_queue &&						before(seq, TCP_SKB_CB(skb1)->end_seq)) {			if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {				__kfree_skb(skb);				mytcp_dsack_set(tp, seq, end_seq);				goto add_sack;			}			if (after(seq, TCP_SKB_CB(skb1)->seq)) {				mytcp_dsack_set(tp, seq, TCP_SKB_CB(skb1)->end_seq);			}else{				skb1 = skb1->prev;			}		}		__skb_insert(skb, skb1, skb1->next, &tp->out_of_order_queue);				while ((skb1 = skb->next) != (struct sk_buff*)&tp->out_of_order_queue &&						after(end_seq, TCP_SKB_CB(skb1)->seq)) {			if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {				mytcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, end_seq);				break;			}			__skb_unlink(skb1, &tp->out_of_order_queue);			mytcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, TCP_SKB_CB(skb1)->end_seq);			__kfree_skb(skb1);		}add_sack:		if (tp->rx_opt.sack_ok)			mytcp_sack_new_ofo_skb(sk, seq, end_seq);	}}static int mytcp_ack(struct sock *sk, struct sk_buff *skb, int flag);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -