📄 mytcp_minisocks.c
字号:
#ifdef CONFIG_SYSCTL#define SYNC_INIT 0 /* let the user enable it */#else#define SYNC_INIT 1#endifint mysysctl_tcp_syncookies = SYNC_INIT; int mysysctl_tcp_abort_on_overflow;extern struct inet_hashinfo mytcp_hashinfo;struct inet_timewait_death_row mytcp_death_row = { .sysctl_max_tw_buckets = NR_FILE * 2, .period = TCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS, .death_lock = SPIN_LOCK_UNLOCKED, .hashinfo = &mytcp_hashinfo, .tw_timer = TIMER_INITIALIZER(myinet_twdr_hangman, 0, (unsigned long)&mytcp_death_row), .twkill_work = __WORK_INITIALIZER(mytcp_death_row.twkill_work, myinet_twdr_twkill_work, &mytcp_death_row), .twcal_hand = -1, .twcal_timer = TIMER_INITIALIZER(myinet_twdr_twcal_tick, 0, (unsigned long)&mytcp_death_row),};int mytcp_child_process(struct sock *parent, struct sock *child, struct sk_buff *skb){ int ret = 0; int state = child->sk_state; if (!sock_owned_by_user(child)) { ret = mytcp_rcv_state_process(child, skb, skb->h.th, skb->len); if (state == TCP_SYN_RECV && child->sk_state != state) parent->sk_data_ready(parent, 0); } else { sk_add_backlog(child, skb); } bh_unlock_sock(child); sock_put(child); return ret;}void mytcp_time_wait(struct sock *sk, int state, int timeo){ struct inet_timewait_sock *tw = NULL; const struct inet_connection_sock *icsk = inet_csk(sk); const struct tcp_sock *tp = tcp_sk(sk); int recycle_ok = 0; if( mytcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp ) recycle_ok = icsk->icsk_af_ops->remember_stamp(sk); if( mytcp_death_row.tw_count < mytcp_death_row.sysctl_max_tw_buckets ) tw = myinet_twsk_alloc(sk, state); if (tw != NULL) { struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1); tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale; tcptw->tw_rcv_nxt = tp->rcv_nxt; tcptw->tw_snd_nxt = tp->snd_nxt; tcptw->tw_rcv_wnd = tcp_receive_window(tp); tcptw->tw_ts_recent = tp->rx_opt.ts_recent; tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp; __myinet_twsk_hashdance(tw, sk, &mytcp_hashinfo); if (timeo < rto) timeo = rto; if (recycle_ok) { tw->tw_timeout = rto; } else { tw->tw_timeout = TCP_TIMEWAIT_LEN; if (state == TCP_TIME_WAIT) timeo = TCP_TIMEWAIT_LEN; } myinet_twsk_schedule(tw, &mytcp_death_row, timeo, TCP_TIMEWAIT_LEN); printk(KERN_INFO "call in minisock 61!\n"); myinet_twsk_put(tw); } else { if (net_ratelimit()) printk(KERN_INFO "TCP: time wait bucket table overflow\n"); } mytcp_update_metrics(sk); mytcp_done(sk);}static __inline__ int mytcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win){ if (seq == s_win) return 1; if (after(end_seq, s_win) && before(seq, e_win)) return 1; return (seq == e_win && seq == end_seq);}enum tcp_tw_statusmytcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, const struct tcphdr *th){ struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); struct tcp_options_received tmp_opt; int paws_reject = 0; tmp_opt.saw_tstamp = 0; if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) { mytcp_parse_options(skb, &tmp_opt, 0); if (tmp_opt.saw_tstamp) { tmp_opt.ts_recent = tcptw->tw_ts_recent; tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp; paws_reject = tcp_paws_check(&tmp_opt, th->rst); } } if (tw->tw_substate == TCP_FIN_WAIT2) { if (paws_reject || !mytcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt, tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd)){ printk(KERN_INFO "nothing,just return!\n"); return TCP_TW_ACK; } if (th->rst) goto kill; if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt)) goto kill_with_rst; if (!after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) || TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) { printk(KERN_INFO "call in minisock 112!\n"); myinet_twsk_put(tw); return TCP_TW_SUCCESS; } if (!th->fin || TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) {kill_with_rst: myinet_twsk_deschedule(tw, &mytcp_death_row); printk(KERN_INFO "call in minisock 119!\n"); myinet_twsk_put(tw); return TCP_TW_RST; } tw->tw_substate = TCP_TIME_WAIT; tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq; if (tmp_opt.saw_tstamp) { tcptw->tw_ts_recent_stamp = xtime.tv_sec; tcptw->tw_ts_recent = tmp_opt.rcv_tsval; } if (tw->tw_family == MY_AF_INET && mytcp_death_row.sysctl_tw_recycle && tcptw->tw_ts_recent_stamp && mytcp_v4_tw_remember_stamp(tw)) myinet_twsk_schedule(tw, &mytcp_death_row, tw->tw_timeout, TCP_TIMEWAIT_LEN); else myinet_twsk_schedule(tw, &mytcp_death_row, TCP_TIMEWAIT_LEN, TCP_TIMEWAIT_LEN); printk(KERN_INFO "schedule and return!\n"); return TCP_TW_ACK; } if (!paws_reject && (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt && (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) { if (th->rst) { if( mysysctl_tcp_rfc1337 == 0 ){kill: myinet_twsk_deschedule(tw, &mytcp_death_row); printk(KERN_INFO "call in minisock 145!\n"); myinet_twsk_put(tw); return TCP_TW_SUCCESS; } } myinet_twsk_schedule(tw, &mytcp_death_row, TCP_TIMEWAIT_LEN, TCP_TIMEWAIT_LEN); if (tmp_opt.saw_tstamp) { tcptw->tw_ts_recent = tmp_opt.rcv_tsval; tcptw->tw_ts_recent_stamp = xtime.tv_sec; } printk(KERN_INFO "call in minisock 159!\n"); myinet_twsk_put(tw); return TCP_TW_SUCCESS; } if (th->syn && !th->rst && !th->ack && !paws_reject && (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) || (tmp_opt.saw_tstamp && (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) { u32 isn = tcptw->tw_snd_nxt + 65535 + 2; if (isn == 0) isn++; TCP_SKB_CB(skb)->when = isn; return TCP_TW_SYN; } if (paws_reject) MYNET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); if(!th->rst) { if (paws_reject || th->ack) myinet_twsk_schedule(tw, &mytcp_death_row, TCP_TIMEWAIT_LEN, TCP_TIMEWAIT_LEN); return TCP_TW_ACK; } printk(KERN_INFO "call in minisock 183!\n"); myinet_twsk_put(tw); return TCP_TW_SUCCESS;}struct sock *mytcp_check_req(struct sock *sk,struct sk_buff *skb, struct request_sock *req, struct request_sock **prev){ struct tcphdr *th = skb->h.th; u32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); int paws_reject = 0; struct tcp_options_received tmp_opt; struct sock *child; tmp_opt.saw_tstamp = 0; if (th->doff > (sizeof(struct tcphdr)>>2)) { tcp_parse_options(skb, &tmp_opt, 0); if (tmp_opt.saw_tstamp) { tmp_opt.ts_recent = req->ts_recent; tmp_opt.ts_recent_stamp = xtime.tv_sec - ((TCP_TIMEOUT_INIT/HZ)<<req->retrans); paws_reject = tcp_paws_check(&tmp_opt, th->rst); } } if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn && flg == TCP_FLAG_SYN && !paws_reject) { req->rsk_ops->rtx_syn_ack(sk, req, NULL); return NULL; } if ((flg & TCP_FLAG_ACK) && (TCP_SKB_CB(skb)->ack_seq != tcp_rsk(req)->snt_isn + 1)) return sk; if (paws_reject || !mytcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, tcp_rsk(req)->rcv_isn + 1, tcp_rsk(req)->rcv_isn + 1 + req->rcv_wnd)) { if (!(flg & TCP_FLAG_RST)) req->rsk_ops->send_ack(skb, req); if (paws_reject) MYNET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); return NULL; } if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_isn + 1)) req->ts_recent = tmp_opt.rcv_tsval; if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) { flg &= ~TCP_FLAG_SYN; } if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) goto embryonic_reset; if (!(flg & TCP_FLAG_ACK)) return NULL; if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { inet_rsk(req)->acked = 1; return NULL; } child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL); if (child == NULL) goto listen_overflow; inet_csk_reqsk_queue_unlink(sk, req, prev); inet_csk_reqsk_queue_removed(sk, req); inet_csk_reqsk_queue_add(sk, req, child); return child;listen_overflow: if( !mysysctl_tcp_abort_on_overflow ){ inet_rsk(req)->acked = 1; return NULL; }embryonic_reset: MYNET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS); if (!(flg & TCP_FLAG_RST)) req->rsk_ops->send_reset(skb); inet_csk_reqsk_queue_drop(sk, req, prev); return NULL;}struct sock *mytcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb){ struct sock *newsk = myinet_csk_clone(sk, req, GFP_ATOMIC); if (newsk != NULL) { const struct inet_request_sock *ireq = inet_rsk(req); struct tcp_request_sock *treq = tcp_rsk(req); struct inet_connection_sock *newicsk = inet_csk(sk); struct tcp_sock *newtp; newtp = tcp_sk(newsk); newtp->pred_flags = 0; newtp->rcv_nxt = treq->rcv_isn + 1; newtp->snd_nxt = newtp->snd_una = newtp->snd_sml = treq->snt_isn + 1; tcp_prequeue_init(newtp); tcp_init_wl(newtp, treq->snt_isn, treq->rcv_isn); newtp->srtt = 0; newtp->mdev = TCP_TIMEOUT_INIT; newicsk->icsk_rto = TCP_TIMEOUT_INIT; newtp->packets_out = 0; newtp->left_out = 0; newtp->retrans_out = 0; newtp->sacked_out = 0; newtp->fackets_out = 0; newtp->snd_ssthresh = 0x7fffffff; newtp->snd_cwnd = 2; newtp->snd_cwnd_cnt = 0; newtp->bytes_acked = 0; newtp->frto_counter = 0; newtp->frto_highmark = 0; newicsk->icsk_ca_ops = &mytcp_init_congestion_ops; tcp_set_ca_state(newsk, TCP_CA_Open); mytcp_init_xmit_timers(newsk); skb_queue_head_init(&newtp->out_of_order_queue); newtp->rcv_wup = treq->rcv_isn + 1; newtp->write_seq = treq->snt_isn + 1; newtp->pushed_seq = newtp->write_seq; newtp->copied_seq = treq->rcv_isn + 1; newtp->rx_opt.saw_tstamp = 0; newtp->rx_opt.dsack = 0; newtp->rx_opt.eff_sacks = 0; newtp->rx_opt.num_sacks = 0; newtp->urg_data = 0; if (sock_flag(newsk, SOCK_KEEPOPEN)) myinet_csk_reset_keepalive_timer( newsk, mykeepalive_time_when(newtp) ); newtp->rx_opt.tstamp_ok = ireq->tstamp_ok; if((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) { if( mysysctl_tcp_fack ) newtp->rx_opt.sack_ok |= 2; } newtp->window_clamp = req->window_clamp; newtp->rcv_ssthresh = req->rcv_wnd; newtp->rcv_wnd = req->rcv_wnd; newtp->rx_opt.wscale_ok = ireq->wscale_ok; if (newtp->rx_opt.wscale_ok) { newtp->rx_opt.snd_wscale = ireq->snd_wscale; newtp->rx_opt.rcv_wscale = ireq->rcv_wscale; } else { newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0; newtp->window_clamp = min(newtp->window_clamp, 65535U); } newtp->snd_wnd = ntohs(skb->h.th->window) << newtp->rx_opt.snd_wscale; newtp->max_window = newtp->snd_wnd; if (newtp->rx_opt.tstamp_ok) { newtp->rx_opt.ts_recent = req->ts_recent; newtp->rx_opt.ts_recent_stamp = xtime.tv_sec; newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; } else { newtp->rx_opt.ts_recent_stamp = 0; newtp->tcp_header_len = sizeof(struct tcphdr); } if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len) newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; newtp->rx_opt.mss_clamp = req->mss; TCP_ECN_openreq_child( newtp, req ); if( newtp->ecn_flags & TCP_ECN_OK ) sock_set_flag(newsk, SOCK_NO_LARGESEND); MYTCP_INC_STATS_BH(TCP_MIB_PASSIVEOPENS); } return newsk;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -