⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mytcp_output.c

📁 一个基于linux的TCP/IP协议栈的实现
💻 C
📖 第 1 页 / 共 3 页
字号:
	mytcp_enter_cwr(sk);	return err == NET_XMIT_CN ? 0 : err;#undef SYSCTL_FLAG_TSTAMPS#undef SYSCTL_FLAG_WSCALE#undef SYSCTL_FLAG_SACK}int mytcp_connect(struct sock *sk){	struct tcp_sock *tp = tcp_sk(sk);	struct sk_buff *buff;	mytcp_connect_init(sk);	buff = alloc_skb_fclone( MAX_TCP_HEADER + 15, sk->sk_allocation );	if( unlikely(buff == NULL) )		return -ENOBUFS;	skb_reserve(buff, MAX_TCP_HEADER);	TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN;	MYTCP_ECN_send_syn(sk, tp, buff);	TCP_SKB_CB(buff)->sacked = 0;	skb_shinfo(buff)->tso_segs = 1;	skb_shinfo(buff)->tso_size = 0;	buff->csum = 0;	TCP_SKB_CB(buff)->seq = tp->write_seq++;	TCP_SKB_CB(buff)->end_seq = tp->write_seq;	tp->snd_nxt = tp->write_seq;	tp->pushed_seq = tp->write_seq;	TCP_SKB_CB(buff)->when = tcp_time_stamp;	tp->retrans_stamp = TCP_SKB_CB(buff)->when;	skb_header_release(buff);	__skb_queue_tail(&sk->sk_write_queue, buff);	sk_charge_skb(sk, buff);	tp->packets_out += tcp_skb_pcount(buff);	mytcp_transmit_skb(sk, buff, 1, GFP_KERNEL);	MYTCP_INC_STATS(TCP_MIB_ACTIVEOPENS);	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,					inet_csk(sk)->icsk_rto, TCP_RTO_MAX);	return 0;}static inline __u32 mytcp_acceptable_seq(struct sock *sk, struct tcp_sock *tp){	if (!before(tp->snd_una+tp->snd_wnd, tp->snd_nxt))		return tp->snd_nxt;	else		return tp->snd_una+tp->snd_wnd;}void mytcp_send_ack(struct sock *sk){	if( sk->sk_state != TCP_CLOSE ){		struct tcp_sock *tp = tcp_sk(sk);		struct sk_buff *buff;		buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);		if (buff == NULL) {			inet_csk_schedule_ack(sk);			inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,						  TCP_DELACK_MAX, TCP_RTO_MAX);			return;		}		skb_reserve(buff, MAX_TCP_HEADER);		buff->csum = 0;		TCP_SKB_CB(buff)->flags = TCPCB_FLAG_ACK;		TCP_SKB_CB(buff)->sacked = 0;		skb_shinfo(buff)->tso_segs = 1;		skb_shinfo(buff)->tso_size = 0;		TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = mytcp_acceptable_seq(sk, tp);		TCP_SKB_CB(buff)->when = tcp_time_stamp;		mytcp_transmit_skb(sk, buff, 0, GFP_ATOMIC);	}}unsigned int mytcp_current_mss(struct sock *sk, int large_allowed){	struct tcp_sock *tp = tcp_sk(sk);	struct dst_entry *dst = __sk_dst_get(sk);	u32 mss_now;	u16 xmit_size_goal;	int doing_tso = 0;	mss_now = tp->mss_cache;	if (large_allowed && (sk->sk_route_caps & NETIF_F_TSO) && !tp->urg_mode)		doing_tso = 1;	if (dst) {		u32 mtu = dst_mtu(dst);		if (mtu != inet_csk(sk)->icsk_pmtu_cookie)			mss_now = mytcp_sync_mss(sk, mtu);	}	if (tp->rx_opt.eff_sacks)		mss_now -= (TCPOLEN_SACK_BASE_ALIGNED +			    (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK));	xmit_size_goal = mss_now;	if (doing_tso) {		xmit_size_goal = (65535 - inet_csk(sk)->icsk_af_ops->net_header_len -						inet_csk(sk)->icsk_ext_hdr_len - tp->tcp_header_len);		if (tp->max_window && (xmit_size_goal > (tp->max_window >> 1)))			xmit_size_goal = max( (tp->max_window >> 1), 68U - tp->tcp_header_len );		xmit_size_goal -= (xmit_size_goal % mss_now);	}	tp->xmit_size_goal = xmit_size_goal;	return mss_now;}static void mytcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now){	if (skb->len <= mss_now || !(sk->sk_route_caps & NETIF_F_TSO)) {		skb_shinfo(skb)->tso_segs = 1;		skb_shinfo(skb)->tso_size = 0;	} else {		unsigned int factor;		factor = skb->len + (mss_now - 1);		factor /= mss_now;		skb_shinfo(skb)->tso_segs = factor;		skb_shinfo(skb)->tso_size = mss_now;	}}static int mytcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now){	int tso_segs = tcp_skb_pcount(skb);	if (!tso_segs || (tso_segs > 1 &&							skb_shinfo(skb)->tso_size != mss_now)) {		mytcp_set_skb_tso_segs(sk, skb, mss_now);		tso_segs = tcp_skb_pcount(skb);	}	return tso_segs;}static inline int mytcp_minshall_check(const struct tcp_sock *tp){	return after(tp->snd_sml,tp->snd_una) && !after(tp->snd_sml, tp->snd_nxt);}static inline int mytcp_nagle_check(const struct tcp_sock *tp,				const struct sk_buff *skb, unsigned mss_now, int nonagle){	return (skb->len < mss_now && ((nonagle & TCP_NAGLE_CORK) ||		 (!nonagle && tp->packets_out && mytcp_minshall_check(tp))));}static inline int mytcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb,				unsigned int cur_mss, int nonagle){	if( nonagle & TCP_NAGLE_PUSH )		return 1;	if( tp->urg_mode || (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) )		return 1;	if( !mytcp_nagle_check(tp, skb, cur_mss, nonagle) )		return 1;	return 0;}static inline unsigned int mytcp_cwnd_test(struct tcp_sock *tp, struct sk_buff *skb){	u32 in_flight, cwnd;	if( TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN )		return 1;	in_flight = tcp_packets_in_flight(tp);	cwnd = tp->snd_cwnd;	if (in_flight < cwnd)		return (cwnd - in_flight);	return 0;}static inline int mytcp_snd_wnd_test(struct tcp_sock *tp, 				struct sk_buff *skb, unsigned int cur_mss){	u32 end_seq = TCP_SKB_CB(skb)->end_seq;	if (skb->len > cur_mss)		end_seq = TCP_SKB_CB(skb)->seq + cur_mss;	return !after(end_seq, tp->snd_una + tp->snd_wnd);}static unsigned int mytcp_snd_test(struct sock *sk, struct sk_buff *skb,				unsigned int cur_mss, int nonagle){	struct tcp_sock *tp = tcp_sk(sk);	unsigned int cwnd_quota;	mytcp_init_tso_segs(sk, skb, cur_mss);	if( !mytcp_nagle_test(tp, skb, cur_mss, nonagle) ){		return 0;	}	cwnd_quota = mytcp_cwnd_test(tp, skb);	if( cwnd_quota && !mytcp_snd_wnd_test(tp, skb, cur_mss) ){		cwnd_quota = 0;	}	return cwnd_quota;}static unsigned int mytcp_window_allows(struct tcp_sock *tp, 				struct sk_buff *skb, unsigned int mss_now, unsigned int cwnd){	u32 window, cwnd_len;	window = (tp->snd_una + tp->snd_wnd - TCP_SKB_CB(skb)->seq);	cwnd_len = mss_now * cwnd;	return min(window, cwnd_len);}int mytcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss_now){	struct tcp_sock *tp = tcp_sk(sk);	struct sk_buff *buff;	int nsize, old_factor;	u16 flags;	BUG_ON(len > skb->len); 	clear_all_retrans_hints(tp);	nsize = skb_headlen(skb) - len;	if (nsize < 0)		nsize = 0;	if (skb_cloned(skb) && skb_is_nonlinear(skb) && 					pskb_expand_head(skb, 0, 0, GFP_ATOMIC))		return -ENOMEM;	buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC);	if (buff == NULL)		return -ENOMEM;	sk_charge_skb(sk, buff);	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;	flags = TCP_SKB_CB(skb)->flags;	TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);	TCP_SKB_CB(buff)->flags = flags;	TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;	TCP_SKB_CB(skb)->sacked &= ~TCPCB_AT_TAIL;	if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_HW) {		buff->csum = csum_partial_copy_nocheck(skb->data + len, skb_put(buff, nsize),						       nsize, 0);		skb_trim(skb, len);		skb->csum = csum_block_sub(skb->csum, buff->csum, len);	} else {		skb->ip_summed = CHECKSUM_HW;		skb_split(skb, buff, len);	}	buff->ip_summed = skb->ip_summed;	TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when;	buff->tstamp = skb->tstamp;	old_factor = tcp_skb_pcount(skb);	mytcp_set_skb_tso_segs(sk, skb, mss_now);	mytcp_set_skb_tso_segs(sk, buff, mss_now);	if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {		int diff = old_factor - tcp_skb_pcount(skb) - tcp_skb_pcount(buff);		tp->packets_out -= diff;		if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)			tp->sacked_out -= diff;		if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)			tp->retrans_out -= diff;		if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) {			tp->lost_out -= diff;			tp->left_out -= diff;		}		if (diff > 0) {			if (!tp->rx_opt.sack_ok) {				tp->sacked_out -= diff;				if ((int)tp->sacked_out < 0)					tp->sacked_out = 0;				tcp_sync_left_out(tp);			}			tp->fackets_out -= diff;			if ((int)tp->fackets_out < 0)				tp->fackets_out = 0;		}	}	skb_header_release(buff);	__skb_append(skb, buff, &sk->sk_write_queue);	return 0;}static int mytso_fragment(struct sock *sk, struct sk_buff *skb, 				unsigned int len, unsigned int mss_now){	struct sk_buff *buff;	int nlen = skb->len - len;	u16 flags;	if (skb->len != skb->data_len)		return mytcp_fragment(sk, skb, len, mss_now);	buff = sk_stream_alloc_pskb(sk, 0, 0, GFP_ATOMIC);	if (unlikely(buff == NULL))		return -ENOMEM;	buff->truesize = nlen;	skb->truesize -= nlen;	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;	flags = TCP_SKB_CB(skb)->flags;	TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);	TCP_SKB_CB(buff)->flags = flags;	TCP_SKB_CB(buff)->sacked = 0;	buff->ip_summed = skb->ip_summed = CHECKSUM_HW;	skb_split(skb, buff, len);	mytcp_set_skb_tso_segs(sk, skb, mss_now);	mytcp_set_skb_tso_segs(sk, buff, mss_now);	skb_header_release(buff);	__skb_append(skb, buff, &sk->sk_write_queue);	return 0;}void mytcp_cwnd_application_limited(struct sock *sk){	struct tcp_sock *tp = tcp_sk(sk);	if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&					sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {		u32 win_used = max(tp->snd_cwnd_used, 2U);		if (win_used < tp->snd_cwnd) {			tp->snd_ssthresh = tcp_current_ssthresh(sk);			tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1;		}		tp->snd_cwnd_used = 0;	}	tp->snd_cwnd_stamp = tcp_time_stamp;}static void mytcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp){	__u32 packets_out = tp->packets_out;	if (packets_out >= tp->snd_cwnd) {		tp->snd_cwnd_used = 0;		tp->snd_cwnd_stamp = tcp_time_stamp;	} else {		if (tp->packets_out > tp->snd_cwnd_used)			tp->snd_cwnd_used = tp->packets_out;		if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto)			mytcp_cwnd_application_limited(sk);	}}void mytcp_push_one(struct sock *sk, unsigned int mss_now){	struct tcp_sock *tp = tcp_sk(sk);	struct sk_buff *skb = sk->sk_send_head;	unsigned int tso_segs, cwnd_quota;	BUG_ON( !skb || skb->len < mss_now );	tso_segs = mytcp_init_tso_segs( sk, skb, mss_now );	cwnd_quota = mytcp_snd_test(sk, skb, mss_now, TCP_NAGLE_PUSH);	if (likely(cwnd_quota)){		unsigned int limit;		BUG_ON(!tso_segs);		limit = mss_now;		if (tso_segs > 1) {			limit = mytcp_window_allows( tp, skb, mss_now, cwnd_quota );			if (skb->len < limit) {				unsigned int trim = skb->len % mss_now;				if (trim)					limit = skb->len - trim;			}		}		if (skb->len > limit && unlikely(mytso_fragment(sk, skb, limit, mss_now)))			return;		TCP_SKB_CB(skb)->when = tcp_time_stamp;		if (likely(!mytcp_transmit_skb(sk, skb, 1, sk->sk_allocation))) {			myupdate_send_head(sk, tp, skb);			mytcp_cwnd_validate(sk, tp);			return;		}	}}static int mytcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb){	const struct inet_connection_sock *icsk = inet_csk(sk);	u32 send_win, cong_win, limit, in_flight;	if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)		return 0;	if (icsk->icsk_ca_state != TCP_CA_Open)		return 0;	in_flight = tcp_packets_in_flight(tp);	BUG_ON(tcp_skb_pcount(skb) <= 1 || (tp->snd_cwnd <= in_flight));	send_win = (tp->snd_una + tp->snd_wnd) - TCP_SKB_CB(skb)->seq;	cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache;	limit = min(send_win, cong_win);	if (limit >= 65536)		return 0;	if (mysysctl_tcp_tso_win_divisor) {		u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);		chunk /= mysysctl_tcp_tso_win_divisor;		if (limit >= chunk)			return 0;	}else{

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -