📄 mytcp_output.c
字号:
if (limit > tcp_max_burst(tp) * tp->mss_cache) return 0; } return 1;}static inline int mytcp_skb_is_last(const struct sock *sk, const struct sk_buff *skb){ return skb->next == (struct sk_buff *)&sk->sk_write_queue;}static int mytcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle){ struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; unsigned int tso_segs, sent_pkts; int cwnd_quota; if (unlikely(sk->sk_state == TCP_CLOSE)) return 0; sent_pkts = 0; while ((skb = sk->sk_send_head)) { unsigned int limit; tso_segs = mytcp_init_tso_segs(sk, skb, mss_now); BUG_ON(!tso_segs); cwnd_quota = mytcp_cwnd_test(tp, skb); if (!cwnd_quota){ break; } if (unlikely(!mytcp_snd_wnd_test(tp, skb, mss_now))) break; if (tso_segs == 1) { if (unlikely(!mytcp_nagle_test(tp, skb, mss_now, (mytcp_skb_is_last(sk, skb) ? nonagle : TCP_NAGLE_PUSH)))) break; }else{ if( mytcp_tso_should_defer(sk, tp, skb) ) break; } limit = mss_now; if (tso_segs > 1) { limit = mytcp_window_allows(tp, skb, mss_now, cwnd_quota); if (skb->len < limit) { unsigned int trim = skb->len % mss_now; if (trim) limit = skb->len - trim; } } if (skb->len > limit && unlikely(mytso_fragment(sk, skb, limit, mss_now))) break; TCP_SKB_CB(skb)->when = tcp_time_stamp; if (unlikely(mytcp_transmit_skb(sk, skb, 1, GFP_ATOMIC))) break; myupdate_send_head(sk, tp, skb); tcp_minshall_update(tp, mss_now, skb); sent_pkts++; } if (likely(sent_pkts)) { mytcp_cwnd_validate(sk, tp); return 0; } return !tp->packets_out && sk->sk_send_head;}void __mytcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp, unsigned int cur_mss, int nonagle){ struct sk_buff *skb = sk->sk_send_head; if (skb) { if( mytcp_write_xmit(sk, cur_mss, nonagle) ) tcp_check_probe_timer(sk, tp); }}void mytcp_send_active_reset(struct sock *sk, gfp_t priority){ struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; skb = alloc_skb(MAX_TCP_HEADER, priority); if (!skb) { MYNET_INC_STATS(LINUX_MIB_TCPABORTFAILED); return; } skb_reserve(skb, MAX_TCP_HEADER); skb->csum = 0; TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_RST); TCP_SKB_CB(skb)->sacked = 0; skb_shinfo(skb)->tso_segs = 1; skb_shinfo(skb)->tso_size = 0; TCP_SKB_CB(skb)->seq = mytcp_acceptable_seq(sk, tp); TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq; TCP_SKB_CB(skb)->when = tcp_time_stamp; if( mytcp_transmit_skb(sk, skb, 0, priority) ) MYNET_INC_STATS(LINUX_MIB_TCPABORTFAILED);}static void mytcp_queue_skb(struct sock *sk, struct sk_buff *skb){ struct tcp_sock *tp = tcp_sk(sk); tp->write_seq = TCP_SKB_CB(skb)->end_seq; skb_header_release(skb); __skb_queue_tail(&sk->sk_write_queue, skb); sk_charge_skb(sk, skb); if (sk->sk_send_head == NULL) sk->sk_send_head = skb;}void mytcp_send_fin(struct sock *sk){ struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb = skb_peek_tail(&sk->sk_write_queue); int mss_now; mss_now = mytcp_current_mss(sk, 1); if (sk->sk_send_head != NULL) { TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN; TCP_SKB_CB(skb)->end_seq++; tp->write_seq++; }else{ for (;;) { skb = alloc_skb_fclone(MAX_TCP_HEADER, GFP_KERNEL); if (skb) break; yield(); } skb_reserve(skb, MAX_TCP_HEADER); skb->csum = 0; TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_FIN); TCP_SKB_CB(skb)->sacked = 0; skb_shinfo(skb)->tso_segs = 1; skb_shinfo(skb)->tso_size = 0; TCP_SKB_CB(skb)->seq = tp->write_seq; TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1; mytcp_queue_skb(sk, skb); } __mytcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_OFF);}void mytcp_send_delayed_ack(struct sock *sk){ struct inet_connection_sock *icsk = inet_csk(sk); int ato = icsk->icsk_ack.ato; unsigned long timeout; if (ato > TCP_DELACK_MIN) { const struct tcp_sock *tp = tcp_sk(sk); int max_ato = HZ/2; if (icsk->icsk_ack.pingpong || (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)) max_ato = TCP_DELACK_MAX; if (tp->srtt) { int rtt = max(tp->srtt>>3, TCP_DELACK_MIN); if (rtt < max_ato) max_ato = rtt; } ato = min(ato, max_ato); } timeout = jiffies + ato; if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { if (icsk->icsk_ack.blocked || time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) { mytcp_send_ack(sk); return; } if (!time_before(timeout, icsk->icsk_ack.timeout)) timeout = icsk->icsk_ack.timeout; } icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; icsk->icsk_ack.timeout = timeout; sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);}static unsigned char *__mypskb_trim_head(struct sk_buff *skb, int len){ int i, k, eat; eat = len; k = 0; for (i=0; i<skb_shinfo(skb)->nr_frags; i++) { if (skb_shinfo(skb)->frags[i].size <= eat) { put_page(skb_shinfo(skb)->frags[i].page); eat -= skb_shinfo(skb)->frags[i].size; } else { skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; if (eat) { skb_shinfo(skb)->frags[k].page_offset += eat; skb_shinfo(skb)->frags[k].size -= eat; eat = 0; } k++; } } skb_shinfo(skb)->nr_frags = k; skb->tail = skb->data; skb->data_len -= len; skb->len = skb->data_len; return skb->tail;}int mytcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len){ if( skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC) ) return -ENOMEM; if( len <= skb_headlen(skb) ){ __skb_pull(skb, len); }else{ if( __mypskb_trim_head(skb, len-skb_headlen(skb)) == NULL ) return -ENOMEM; } TCP_SKB_CB(skb)->seq += len; skb->ip_summed = CHECKSUM_HW; skb->truesize -= len; sk->sk_wmem_queued -= len; sk->sk_forward_alloc += len; sock_set_flag(sk, SOCK_QUEUE_SHRUNK); if (tcp_skb_pcount(skb) > 1) mytcp_set_skb_tso_segs(sk, skb, mytcp_current_mss(sk, 1)); return 0;}static void mytcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int mss_now){ struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *next_skb = skb->next; if (!skb_cloned(skb) && !skb_cloned(next_skb)) { int skb_size = skb->len, next_skb_size = next_skb->len; u16 flags = TCP_SKB_CB(skb)->flags; if( TCP_SKB_CB(next_skb)->sacked & TCPCB_SACKED_ACKED ) return; if( after(TCP_SKB_CB(next_skb)->end_seq, tp->snd_una+tp->snd_wnd) ) return; if( (next_skb_size > skb_tailroom(skb)) || ((skb_size + next_skb_size) > mss_now) ) return; BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1); clear_all_retrans_hints(tp); __skb_unlink(next_skb, &sk->sk_write_queue); memcpy(skb_put(skb, next_skb_size), next_skb->data, next_skb_size); if (next_skb->ip_summed == CHECKSUM_HW) skb->ip_summed = CHECKSUM_HW; if (skb->ip_summed != CHECKSUM_HW) skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size); TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq; flags |= TCP_SKB_CB(next_skb)->flags; TCP_SKB_CB(skb)->flags = flags; TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & (TCPCB_EVER_RETRANS|TCPCB_AT_TAIL); if( TCP_SKB_CB(next_skb)->sacked & TCPCB_SACKED_RETRANS ) tp->retrans_out -= tcp_skb_pcount(next_skb); if( TCP_SKB_CB(next_skb)->sacked & TCPCB_LOST ){ tp->lost_out -= tcp_skb_pcount(next_skb); tp->left_out -= tcp_skb_pcount(next_skb); } if (!tp->rx_opt.sack_ok && tp->sacked_out) { tcp_dec_pcount_approx(&tp->sacked_out, next_skb); tp->left_out -= tcp_skb_pcount(next_skb); } tcp_dec_pcount_approx(&tp->fackets_out, next_skb); tcp_packets_out_dec(tp, next_skb); sk_stream_free_skb(sk, next_skb); }}int mytcp_may_send_now(struct sock *sk, struct tcp_sock *tp){ struct sk_buff *skb = sk->sk_send_head; return (skb && mytcp_snd_test(sk, skb, mytcp_current_mss(sk, 1), (mytcp_skb_is_last(sk, skb) ? TCP_NAGLE_PUSH : tp->nonagle)));}int mytcp_retransmit_skb(struct sock *sk, struct sk_buff *skb){ struct tcp_sock *tp = tcp_sk(sk); unsigned int cur_mss = mytcp_current_mss(sk, 0); int err; if (atomic_read(&sk->sk_wmem_alloc) > min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf)) return -EAGAIN; if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { if( before(TCP_SKB_CB(skb)->end_seq, tp->snd_una) ) BUG(); if( mytcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq) ) return -ENOMEM; } if (!before(TCP_SKB_CB(skb)->seq, tp->snd_una+tp->snd_wnd) && TCP_SKB_CB(skb)->seq != tp->snd_una) return -EAGAIN; if (skb->len > cur_mss) { if( mytcp_fragment(sk, skb, cur_mss, cur_mss) ) return -ENOMEM; } if(!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) && (skb->len < (cur_mss >> 1)) && (skb->next != sk->sk_send_head) && (skb->next != (struct sk_buff *)&sk->sk_write_queue) && (skb_shinfo(skb)->nr_frags == 0 && skb_shinfo(skb->next)->nr_frags == 0) && (tcp_skb_pcount(skb) == 1 && tcp_skb_pcount(skb->next) == 1) && (mysysctl_tcp_retrans_collapse != 0)) mytcp_retrans_try_collapse(sk, skb, cur_mss); if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) return -EHOSTUNREACH; if(skb->len > 0 && (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) && tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) { if( !pskb_trim(skb, 0) ){ TCP_SKB_CB(skb)->seq = TCP_SKB_CB(skb)->end_seq - 1; skb_shinfo(skb)->tso_segs = 1; skb_shinfo(skb)->tso_size = 0; skb->ip_summed = CHECKSUM_NONE; skb->csum = 0; } } TCP_SKB_CB(skb)->when = tcp_time_stamp; err = mytcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); if (err == 0) { MYTCP_INC_STATS(TCP_MIB_RETRANSSEGS); tp->total_retrans++;#if FASTRETRANS_DEBUG > 0 if( TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS ){ if (net_ratelimit()) printk(KERN_DEBUG "retrans_out leaked.\n"); }#endif TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS; tp->retrans_out += tcp_skb_pcount(skb); if (!tp->retrans_stamp) tp->retrans_stamp = TCP_SKB_CB(skb)->when; tp->undo_retrans++; TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt; } return err;}struct sk_buff * mytcp_make_synack(struct sock *sk, struct dst_entry *dst, struct request_sock *req){ struct inet_request_sock *ireq = inet_rsk(req); struct tcp_sock *tp = tcp_sk(sk); struct tcphdr *th; int tcp_header_size; struct sk_buff *skb; skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC); if (skb == NULL) return NULL; skb_reserve(skb, MAX_TCP_HEADER); skb->dst = dst_clone(dst); tcp_header_size = (sizeof(struct tcphdr) + TCPOLEN_MSS + (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0) + (ireq->wscale_ok ? TCPOLEN_WSCALE_ALIGNED : 0) + ((ireq->sack_ok && !ireq->tstamp_ok) ? TCPOLEN_SACKPERM_ALIGNED : 0)); skb->h.th = th = (struct tcphdr *) skb_push(skb, tcp_header_size); memset(th, 0, sizeof(struct tcphdr)); th->syn = 1; th->ack = 1; if( dst->dev->features & NETIF_F_TSO ) ireq->ecn_ok = 0; TCP_ECN_make_synack(req, th); th->source = inet_sk(sk)->sport; th->dest = ireq->rmt_port; TCP_SKB_CB(skb)->seq = tcp_rsk(req)->snt_isn; TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1; TCP_SKB_CB(skb)->sacked = 0; skb_shinfo(skb)->tso_segs = 1; skb_shinfo(skb)->tso_size = 0; th->seq = htonl(TCP_SKB_CB(skb)->seq); th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1); if (req->rcv_wnd == 0) { __u8 rcv_wscale; req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW); mytcp_select_initial_window( mytcp_full_space(sk), dst_metric(dst, RTAX_ADVMSS) - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), &req->rcv_wnd, &req->window_clamp, ireq->wscale_ok, &rcv_wscale); ireq->rcv_wscale = rcv_wscale; } th->window = htons(req->rcv_wnd); TCP_SKB_CB(skb)->when = tcp_time_stamp; mytcp_syn_build_options((__u32 *)(th + 1), dst_metric(dst, RTAX_ADVMSS), ireq->tstamp_ok, ireq->sack_ok, ireq->wscale_ok, ireq->rcv_wscale, TCP_SKB_CB(skb)->when, req->ts_recent); skb->csum = 0; th->doff = (tcp_header_size >> 2); MYTCP_INC_STATS(TCP_MIB_OUTSEGS); return skb;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -