📄 tcp.cc
字号:
} return 0;}/* * main reception path - should only see acks, otherwise the * network connections are misconfigured */void TcpAgent::recv(Packet *pkt, Handler*){ hdr_tcp *tcph = hdr_tcp::access(pkt); int valid_ack = 0; if (qs_approved_ == 1 && tcph->seqno() > last_ack_) endQuickStart(); if (qs_requested_ == 1) processQuickStart(pkt);#ifdef notdef if (pkt->type_ != PT_ACK) { Tcl::instance().evalf("%s error \"received non-ack\"", name()); Packet::free(pkt); return; }#endif /* W.N.: check if this is from a previous incarnation */ if (tcph->ts() < lastreset_) { // Remove packet and do nothing Packet::free(pkt); return; } ++nackpack_; ts_peer_ = tcph->ts(); int ecnecho = hdr_flags::access(pkt)->ecnecho(); if (ecnecho && ecn_) ecn(tcph->seqno()); recv_helper(pkt); recv_frto_helper(pkt); /* grow cwnd and check if the connection is done */ if (tcph->seqno() > last_ack_) { recv_newack_helper(pkt); if (last_ack_ == 0 && delay_growth_) { cwnd_ = initial_window(); } } else if (tcph->seqno() == last_ack_) { if (hdr_flags::access(pkt)->eln_ && eln_) { tcp_eln(pkt); return; } if (++dupacks_ == numdupacks_ && !noFastRetrans_) { dupack_action(); } else if (dupacks_ < numdupacks_ && singledup_ ) { send_one(); } } if (QOption_ && EnblRTTCtr_) process_qoption_after_ack (tcph->seqno()); if (tcph->seqno() >= last_ack_) // Check if ACK is valid. Suggestion by Mark Allman. valid_ack = 1; Packet::free(pkt); /* * Try to send more data. */ if (valid_ack || aggressive_maxburst_) send_much(0, 0, maxburst_);}/* * Process timeout events other than rtx timeout. Having this as a separate * function allows derived classes to make alterations/enhancements (e.g., * response to new types of timeout events). */ void TcpAgent::timeout_nonrtx(int tno) { if (tno == TCP_TIMER_DELSND) { /* * delayed-send timer, with random overhead * to avoid phase effects */ send_much(1, TCP_REASON_TIMEOUT, maxburst_); }} void TcpAgent::timeout(int tno){ /* retransmit timer */ if (tno == TCP_TIMER_RTX) { // There has been a timeout - will trace this event trace_event("TIMEOUT"); frto_ = 0; // Set pipe_prev as per Eifel Response pipe_prev_ = (window() > ssthresh_) ? window() : (int)ssthresh_; if (cwnd_ < 1) cwnd_ = 1; if (qs_approved_ == 1) qs_approved_ = 0; if (highest_ack_ == maxseq_ && !slow_start_restart_) { /* * TCP option: * If no outstanding data, then don't do anything. */ // Should this return be here? // What if CWND_ACTION_ECN and cwnd < 1? // return; } else { recover_ = maxseq_; if (highest_ack_ == -1 && wnd_init_option_ == 2) /* * First packet dropped, so don't use larger * initial windows. */ wnd_init_option_ = 1; if (highest_ack_ == maxseq_ && restart_bugfix_) /* * if there is no outstanding data, don't cut * down ssthresh_. */ slowdown(CLOSE_CWND_ONE|NO_OUTSTANDING_DATA); else if (highest_ack_ < recover_ && last_cwnd_action_ == CWND_ACTION_ECN) { /* * if we are in recovery from a recent ECN, * don't cut down ssthresh_. */ slowdown(CLOSE_CWND_ONE); if (frto_enabled_ || sfrto_enabled_) { frto_ = 1; } } else { ++nrexmit_; last_cwnd_action_ = CWND_ACTION_TIMEOUT; slowdown(CLOSE_SSTHRESH_HALF|CLOSE_CWND_RESTART); if (frto_enabled_ || sfrto_enabled_) { frto_ = 1; } } } /* if there is no outstanding data, don't back off rtx timer */ if (highest_ack_ == maxseq_ && restart_bugfix_) { reset_rtx_timer(0,0); } else { reset_rtx_timer(0,1); } last_cwnd_action_ = CWND_ACTION_TIMEOUT; send_much(0, TCP_REASON_TIMEOUT, maxburst_); } else { timeout_nonrtx(tno); }}/* * Check if the packet (ack) has the ELN bit set, and if it does, and if the * last ELN-rxmitted packet is smaller than this one, then retransmit the * packet. Do not adjust the cwnd when this happens. */void TcpAgent::tcp_eln(Packet *pkt){ //int eln_rxmit; hdr_tcp *tcph = hdr_tcp::access(pkt); int ack = tcph->seqno(); if (++dupacks_ == eln_rxmit_thresh_ && ack > eln_last_rxmit_) { /* Retransmit this packet */ output(last_ack_ + 1, TCP_REASON_DUPACK); eln_last_rxmit_ = last_ack_+1; } else send_much(0, 0, maxburst_); Packet::free(pkt); return;}/* * This function is invoked when the connection is done. It in turn * invokes the Tcl finish procedure that was registered with TCP. */void TcpAgent::finish(){ Tcl::instance().evalf("%s done", this->name());}void RtxTimer::expire(Event*){ a_->timeout(TCP_TIMER_RTX);}void DelSndTimer::expire(Event*){ a_->timeout(TCP_TIMER_DELSND);}void BurstSndTimer::expire(Event*){ a_->timeout(TCP_TIMER_BURSTSND);}/* * THE FOLLOWING FUNCTIONS ARE OBSOLETE, but REMAIN HERE * DUE TO OTHER PEOPLE's TCPs THAT MIGHT USE THEM * * These functions are now replaced by ecn() and slowdown(), * respectively. *//* * Respond either to a source quench or to a congestion indication bit. * This is done at most once a roundtrip time; after a source quench, * another one will not be done until the last packet transmitted before * the previous source quench has been ACKed. */void TcpAgent::quench(int how){ if (highest_ack_ >= recover_) { recover_ = maxseq_; last_cwnd_action_ = CWND_ACTION_ECN; closecwnd(how); }}/* * close down the congestion window */void TcpAgent::closecwnd(int how){ static int first_time = 1; if (first_time == 1) { fprintf(stderr, "the TcpAgent::closecwnd() function is now deprecated, please use the function slowdown() instead\n"); } switch (how) { case 0: /* timeouts */ ssthresh_ = int( window() / 2 ); if (ssthresh_ < 2) ssthresh_ = 2; cwnd_ = int(wnd_restart_); break; case 1: /* Reno dup acks, or after a recent congestion indication. */ // cwnd_ = window()/2; cwnd_ = decrease_num_ * window(); ssthresh_ = int(cwnd_); if (ssthresh_ < 2) ssthresh_ = 2; break; case 2: /* Tahoe dup acks * after a recent congestion indication */ cwnd_ = wnd_init_; break; case 3: /* Retransmit timeout, but no outstanding data. */ cwnd_ = int(wnd_init_); break; case 4: /* Tahoe dup acks */ ssthresh_ = int( window() / 2 ); if (ssthresh_ < 2) ssthresh_ = 2; cwnd_ = 1; break; default: abort(); } fcnt_ = 0.; count_ = 0;}/* * Check if the sender has been idle or application-limited for more * than an RTO, and if so, reduce the congestion window. */void TcpAgent::process_qoption_after_send (){ int tcp_now = (int)(Scheduler::instance().clock()/tcp_tick_ + 0.5); int rto = (int)(t_rtxcur_/tcp_tick_) ; /*double ct = Scheduler::instance().clock();*/ if (!EnblRTTCtr_) { if (tcp_now - T_last >= rto) { // The sender has been idle. slowdown(THREE_QUARTER_SSTHRESH|TCP_IDLE) ; for (int i = 0 ; i < (tcp_now - T_last)/rto; i ++) { slowdown(CWND_HALF_WITH_MIN|TCP_IDLE); } T_prev = tcp_now ; W_used = 0 ; } T_last = tcp_now ; if (t_seqno_ == highest_ack_+ window()) { T_prev = tcp_now ; W_used = 0 ; } else if (t_seqno_ == curseq_-1) { // The sender has no more data to send. int tmp = t_seqno_ - highest_ack_ ; if (tmp > W_used) W_used = tmp ; if (tcp_now - T_prev >= rto) { // The sender has been application-limited. slowdown(THREE_QUARTER_SSTHRESH|TCP_IDLE); slowdown(CLOSE_CWND_HALF_WAY|TCP_IDLE); T_prev = tcp_now ; W_used = 0 ; } } } else { rtt_counting(); }}/* * Check if the sender has been idle or application-limited for more * than an RTO, and if so, reduce the congestion window, for a TCP sender * that "counts RTTs" by estimating the number of RTTs that fit into * a single clock tick. */voidTcpAgent::rtt_counting(){ int tcp_now = (int)(Scheduler::instance().clock()/tcp_tick_ + 0.5); int rtt = (int(t_srtt_) >> T_SRTT_BITS) ; if (rtt < 1) rtt = 1 ; if (tcp_now - T_last >= 2*rtt) { // The sender has been idle. int RTTs ; RTTs = (tcp_now -T_last)*RTT_goodcount/(rtt*2) ; RTTs = RTTs - Backoffs ; Backoffs = 0 ; if (RTTs > 0) { slowdown(THREE_QUARTER_SSTHRESH|TCP_IDLE) ; for (int i = 0 ; i < RTTs ; i ++) { slowdown(CWND_HALF_WITH_MIN|TCP_IDLE); RTT_prev = RTT_count ; W_used = 0 ; } } } T_last = tcp_now ; if (tcp_now - T_start >= 2*rtt) { if ((RTT_count > RTT_goodcount) || (F_full == 1)) { RTT_goodcount = RTT_count ; if (RTT_goodcount < 1) RTT_goodcount = 1 ; } RTT_prev = RTT_prev - RTT_count ; RTT_count = 0 ; T_start = tcp_now ; F_full = 0; } if (t_seqno_ == highest_ack_ + window()) { W_used = 0 ; F_full = 1 ; RTT_prev = RTT_count ; } else if (t_seqno_ == curseq_-1) { // The sender has no more data to send. int tmp = t_seqno_ - highest_ack_ ; if (tmp > W_used) W_used = tmp ; if (RTT_count - RTT_prev >= 2) { // The sender has been application-limited. slowdown(THREE_QUARTER_SSTHRESH|TCP_IDLE) ; slowdown(CLOSE_CWND_HALF_WAY|TCP_IDLE); RTT_prev = RTT_count ; Backoffs ++ ; W_used = 0; } } if (F_counting == 0) { W_timed = t_seqno_ ; F_counting = 1 ; }}void TcpAgent::process_qoption_after_ack (int seqno){ if (F_counting == 1) { if (seqno >= W_timed) { RTT_count ++ ; F_counting = 0 ; } else { if (dupacks_ == numdupacks_) RTT_count ++ ; } }}void TcpAgent::trace_event(char *eventtype){ if (et_ == NULL) return; int seqno = t_seqno_; char *wrk = et_->buffer(); char *nwrk = et_->nbuffer(); if (wrk != 0) sprintf(wrk, "E "TIME_FORMAT" %d %d TCP %s %d %d %d", et_->round(Scheduler::instance().clock()), // time addr(), // owner (src) node id daddr(), // dst node id eventtype, // event type fid_, // flow-id seqno, // current seqno int(cwnd_) //cong. window ); if (nwrk != 0) sprintf(nwrk, "E -t "TIME_FORMAT" -o TCP -e %s -s %d.%d -d %d.%d", et_->round(Scheduler::instance().clock()), // time eve
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -