📄 tcp_timer.c
字号:
BUG_TRAP(!skb_queue_empty(&sk->write_queue)); if (tp->snd_wnd == 0 && !sk->dead && !((1<<sk->state)&(TCPF_SYN_SENT|TCPF_SYN_RECV))) { /* Receiver dastardly shrinks window. Our retransmits * become zero probes, but we should not timeout this * connection. If the socket is an orphan, time it out, * we cannot allow such beasts to hang infinitely. */#ifdef TCP_DEBUG if (net_ratelimit()) printk(KERN_DEBUG "TCP: Treason uncloaked! Peer %u.%u.%u.%u:%u/%u shrinks window %u:%u. Repaired.\n", NIPQUAD(sk->daddr), htons(sk->dport), sk->num, tp->snd_una, tp->snd_nxt);#endif if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) { tcp_write_err(sk); goto out; } tcp_enter_loss(sk, 0); tcp_retransmit_skb(sk, skb_peek(&sk->write_queue)); __sk_dst_reset(sk); goto out_reset_timer; } if (tcp_write_timeout(sk)) goto out; if (tp->retransmits == 0) { if (tp->ca_state == TCP_CA_Disorder || tp->ca_state == TCP_CA_Recovery) { if (tp->sack_ok) { if (tp->ca_state == TCP_CA_Recovery) NET_INC_STATS_BH(TCPSackRecoveryFail); else NET_INC_STATS_BH(TCPSackFailures); } else { if (tp->ca_state == TCP_CA_Recovery) NET_INC_STATS_BH(TCPRenoRecoveryFail); else NET_INC_STATS_BH(TCPRenoFailures); } } else if (tp->ca_state == TCP_CA_Loss) { NET_INC_STATS_BH(TCPLossFailures); } else { NET_INC_STATS_BH(TCPTimeouts); } } tcp_enter_loss(sk, 0); if (tcp_retransmit_skb(sk, skb_peek(&sk->write_queue)) > 0) { /* Retransmission failed because of local congestion, * do not backoff. */ if (!tp->retransmits) tp->retransmits=1; tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, min(tp->rto, TCP_RESOURCE_PROBE_INTERVAL)); goto out; } /* Increase the timeout each time we retransmit. Note that * we do not increase the rtt estimate. rto is initialized * from rtt, but increases here. Jacobson (SIGCOMM 88) suggests * that doubling rto each time is the least we can get away with. * In KA9Q, Karn uses this for the first few times, and then * goes to quadratic. netBSD doubles, but only goes up to *64, * and clamps at 1 to 64 sec afterwards. Note that 120 sec is * defined in the protocol as the maximum possible RTT. I guess * we'll have to use something other than TCP to talk to the * University of Mars. * * PAWS allows us longer timeouts and large windows, so once * implemented ftp to mars will work nicely. We will have to fix * the 120 second clamps though! */ tp->backoff++; tp->retransmits++;out_reset_timer: tp->rto = min(tp->rto << 1, TCP_RTO_MAX); tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto); if (tp->retransmits > sysctl_tcp_retries1) __sk_dst_reset(sk);out:;}static void tcp_write_timer(unsigned long data){ struct sock *sk = (struct sock*)data; struct tcp_opt *tp = &sk->tp_pinfo.af_tcp; int event; bh_lock_sock(sk); if (sk->lock.users) { /* Try again later */ if (!mod_timer(&tp->retransmit_timer, jiffies + (HZ/20))) sock_hold(sk); goto out_unlock; } if (sk->state == TCP_CLOSE || !tp->pending) goto out; if ((long)(tp->timeout - jiffies) > 0) { if (!mod_timer(&tp->retransmit_timer, tp->timeout)) sock_hold(sk); goto out; } event = tp->pending; tp->pending = 0; switch (event) { case TCP_TIME_RETRANS: tcp_retransmit_timer(sk); break; case TCP_TIME_PROBE0: tcp_probe_timer(sk); break; } TCP_CHECK_TIMER(sk);out: tcp_mem_reclaim(sk);out_unlock: bh_unlock_sock(sk); sock_put(sk);}/* * Timer for listening sockets */static void tcp_synack_timer(struct sock *sk){ struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp); struct tcp_listen_opt *lopt = tp->listen_opt; int max_retries = tp->syn_retries ? : sysctl_tcp_synack_retries; int thresh = max_retries; unsigned long now = jiffies; struct open_request **reqp, *req; int i, budget; if (lopt == NULL || lopt->qlen == 0) return; /* Normally all the openreqs are young and become mature * (i.e. converted to established socket) for first timeout. * If synack was not acknowledged for 3 seconds, it means * one of the following things: synack was lost, ack was lost, * rtt is high or nobody planned to ack (i.e. synflood). * When server is a bit loaded, queue is populated with old * open requests, reducing effective size of queue. * When server is well loaded, queue size reduces to zero * after several minutes of work. It is not synflood, * it is normal operation. The solution is pruning * too old entries overriding normal timeout, when * situation becomes dangerous. * * Essentially, we reserve half of room for young * embrions; and abort old ones without pity, if old * ones are about to clog our table. */ if (lopt->qlen>>(lopt->max_qlen_log-1)) { int young = (lopt->qlen_young<<1); while (thresh > 2) { if (lopt->qlen < young) break; thresh--; young <<= 1; } } if (tp->defer_accept) max_retries = tp->defer_accept; budget = 2*(TCP_SYNQ_HSIZE/(TCP_TIMEOUT_INIT/TCP_SYNQ_INTERVAL)); i = lopt->clock_hand; do { reqp=&lopt->syn_table[i]; while ((req = *reqp) != NULL) { if ((long)(now - req->expires) >= 0) { if ((req->retrans < thresh || (req->acked && req->retrans < max_retries)) && !req->class->rtx_syn_ack(sk, req, NULL)) { unsigned long timeo; if (req->retrans++ == 0) lopt->qlen_young--; timeo = min((TCP_TIMEOUT_INIT << req->retrans), TCP_RTO_MAX); req->expires = now + timeo; reqp = &req->dl_next; continue; } /* Drop this request */ write_lock(&tp->syn_wait_lock); *reqp = req->dl_next; write_unlock(&tp->syn_wait_lock); lopt->qlen--; if (req->retrans == 0) lopt->qlen_young--; tcp_openreq_free(req); continue; } reqp = &req->dl_next; } i = (i+1)&(TCP_SYNQ_HSIZE-1); } while (--budget > 0); lopt->clock_hand = i; if (lopt->qlen) tcp_reset_keepalive_timer(sk, TCP_SYNQ_INTERVAL);}void tcp_delete_keepalive_timer (struct sock *sk){ if (timer_pending(&sk->timer) && del_timer (&sk->timer)) __sock_put(sk);}void tcp_reset_keepalive_timer (struct sock *sk, unsigned long len){ if (!mod_timer(&sk->timer, jiffies+len)) sock_hold(sk);}void tcp_set_keepalive(struct sock *sk, int val){ if ((1<<sk->state)&(TCPF_CLOSE|TCPF_LISTEN)) return; if (val && !sk->keepopen) tcp_reset_keepalive_timer(sk, keepalive_time_when(&sk->tp_pinfo.af_tcp)); else if (!val) tcp_delete_keepalive_timer(sk);}static void tcp_keepalive_timer (unsigned long data){ struct sock *sk = (struct sock *) data; struct tcp_opt *tp = &sk->tp_pinfo.af_tcp; __u32 elapsed; /* Only process if socket is not in use. */ bh_lock_sock(sk); if (sk->lock.users) { /* Try again later. */ tcp_reset_keepalive_timer (sk, HZ/20); goto out; } if (sk->state == TCP_LISTEN) { tcp_synack_timer(sk); goto out; } if (sk->state == TCP_FIN_WAIT2 && sk->dead) { if (tp->linger2 >= 0) { int tmo = tcp_fin_time(tp) - TCP_TIMEWAIT_LEN; if (tmo > 0) { tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); goto out; } } tcp_send_active_reset(sk, GFP_ATOMIC); goto death; } if (!sk->keepopen || sk->state == TCP_CLOSE) goto out; elapsed = keepalive_time_when(tp); /* It is alive without keepalive 8) */ if (tp->packets_out || tp->send_head) goto resched; elapsed = tcp_time_stamp - tp->rcv_tstamp; if (elapsed >= keepalive_time_when(tp)) { if ((!tp->keepalive_probes && tp->probes_out >= sysctl_tcp_keepalive_probes) || (tp->keepalive_probes && tp->probes_out >= tp->keepalive_probes)) { tcp_send_active_reset(sk, GFP_ATOMIC); tcp_write_err(sk); goto out; } if (tcp_write_wakeup(sk) <= 0) { tp->probes_out++; elapsed = keepalive_intvl_when(tp); } else { /* If keepalive was lost due to local congestion, * try harder. */ elapsed = TCP_RESOURCE_PROBE_INTERVAL; } } else { /* It is tp->rcv_tstamp + keepalive_time_when(tp) */ elapsed = keepalive_time_when(tp) - elapsed; } TCP_CHECK_TIMER(sk); tcp_mem_reclaim(sk);resched: tcp_reset_keepalive_timer (sk, elapsed); goto out;death: tcp_done(sk);out: bh_unlock_sock(sk); sock_put(sk);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -