📄 tcp_input.c
字号:
* (side2) ESTABLISHED -> CLOSE_WAIT -> LAST_ACK * Side2, when in CLOSE_WAIT, is supposed to send a message * to side1 to cause side1 to transition from FIN_WAIT_2 to * TIME_WAIT. This is not implemented here. However, * the way rfc793 reads, it seems optional that side2 * should send a message to force the state transition from * FIN_WAIT_2 to TIME_WAIT. The problem is that without * the FIN_WAIT_2 to TIME_WAIT transition, the system can * crash in LOOPBACK MODE with tcp_input() and tcp_output() * thrashing until they overwrite the stack. Side1 * will only transition from FIN_WAIT_2 to TIME_WAIT if it has * received a FIN, as you can see in the case statement * below. To avoid the thrashing, we force a transition * from FIN_WAIT_2 to TIME_WAIT for loopback, even when we have * not received FIN on side1. */ ((tp->t_state==TCPS_FIN_WAIT_2) && (tp->t_template->ti_src.s_addr==tp->t_template->ti_dst.s_addr))){ /* Only do this conditional if you've received FIN */ if (tiflags & TH_FIN){ if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { socantrcvmore(so); tp->t_flags |= TF_ACKNOW; tp->rcv_nxt++; } } switch (tp->t_state) { /* * In SYN_RECEIVED and ESTABLISHED STATES * enter the CLOSE_WAIT state. */ case TCPS_SYN_RECEIVED: case TCPS_ESTABLISHED:#ifdef XTI /* set T_ORDREL bit on in socket struct field * so->so_xticb.xti_evtarray when so->so_xticb.xti_epvalid and * *->xti_evtenabled are on. */ if (so->so_xticb.xti_epvalid) { /* valid xti endpoint */ if (so->so_xticb.xti_evtenabled) { so->so_xticb.xti_evtarray[XTI_EVT_T_ORDREL]++; PRINTXTID(1, ("T_ORDREL (#1) \n")); } } #endif XTI tp->t_state = TCPS_CLOSE_WAIT; break; /* * If still in FIN_WAIT_1 STATE FIN has not been acked so * enter the CLOSING state. */ case TCPS_FIN_WAIT_1:#ifdef XTI /* set T_ORDREL bit on in socket struct field * so->so_xticb.xti_evtarray when so->so_xticb.xti_epvalid and * *->xti_evtenabled are on. */ if (so->so_xticb.xti_epvalid) { /* valid xti endpoint */ if (so->so_xticb.xti_evtenabled) { so->so_xticb.xti_evtarray[XTI_EVT_T_ORDREL]++; PRINTXTID(1, ("T_ORDREL (#3) \n")); } }#endif XTI tp->t_state = TCPS_CLOSING; break; /* * In FIN_WAIT_2 state enter the TIME_WAIT state, * starting the time-wait timer, turning off the other * standard timers. */ case TCPS_FIN_WAIT_2: /*** if (tp->t_template->ti_src.s_addr==tp->t_template->ti_dst.s_addr) mprintf("tcp_input: loopback and FIN_WAIT_2"); ***/ tp->t_state = TCPS_TIME_WAIT;#ifdef XTI /* set T_ORDREL bit on in socket struct field * so->so_xticb.xti_evtarray when so->so_xticb.xti_epvalid and * *->xti_evtenabled are on. */ if (so->so_xticb.xti_epvalid) { /* valid xti endpoint */ if (so->so_xticb.xti_evtenabled) { so->so_xticb.xti_evtarray[XTI_EVT_T_ORDREL]++; PRINTXTID(1, ("T_ORDREL (#2) \n")); } }#endif XTI tcp_canceltimers(tp); tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL; soisdisconnected(so); break; /* * In TIME_WAIT state restart the 2 MSL time_wait timer. */ case TCPS_TIME_WAIT: tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL; break; } } if (so->so_options & SO_DEBUG) tcp_trace(TA_INPUT, ostate, tp, &tcp_saveti, 0); /* * Return any desired output. */#ifdef XTI if (tp->t_state >= TCPS_ESTABLISHED || (!(tp->t_acceptmode)))#endif XTI if (needoutput || (tp->t_flags & TF_ACKNOW)) (void) tcp_output(tp); smp_unlock(&so->lk_socket); return;dropafterack: /* * Generate an ACK dropping incoming segment if it occupies * sequence space, where the ACK reflects our state. */ if (tiflags & TH_RST){ goto drop; } m_freem(m); tp->t_flags |= TF_ACKNOW; (void) tcp_output(tp); smp_unlock(&so->lk_socket); return;dropwithreset: if (om) { (void) m_free(om); om = 0; } /* * Generate a RST, dropping incoming segment. * Make ACK acceptable to originator of segment. * Don't bother to respond if destination was broadcast. */ if ((tiflags & TH_RST) || in_broadcast(ti->ti_dst)){ goto drop; } if (tiflags & TH_ACK){ tcp_respond(tp, ti, (tcp_seq)0, ti->ti_ack, TH_RST, so); } else { if (tiflags & TH_SYN) ti->ti_len++; tcp_respond(tp, ti, ti->ti_seq+ti->ti_len, (tcp_seq)0, TH_RST|TH_ACK, so); } /* destroy temporarily created socket */ if (dropsocket) { if (smp && smp_owner(&so->lk_socket) == 1) (void) soabort(so); if (smp && smp_owner(&so->lk_socket)) smp_unlock(&so->lk_socket); if ( !(smp) ) (void) soabort(so); } else { if (smp && smp_owner(&so->lk_socket)) smp_unlock(&so->lk_socket); } return;drop: if (om){ (void) m_free(om); } /* * Drop space held by incoming segment and return. */ if (tp && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) tcp_trace(TA_DROP, ostate, tp, &tcp_saveti, 0); if ( (m != NULL) ){ /* set m = 0 in ip_output's m_free call */ m_freem(m); } /* destroy temporarily created socket */ if (dropsocket) { if (smp && smp_owner(&so->lk_socket) == 1) (void) soabort(so); if (smp && smp_owner(&so->lk_socket)) smp_unlock(&so->lk_socket); if ( !(smp) ) (void) soabort(so); } else { if (smp && smp_owner(&so->lk_socket)) smp_unlock(&so->lk_socket); } return;}tcp_dooptions(tp, om, ti) struct tcpcb *tp; struct mbuf *om; struct tcpiphdr *ti;{ register u_char *cp; int opt, optlen, cnt; cp = mtod(om, u_char *); cnt = om->m_len; for (; cnt > 0; cnt -= optlen, cp += optlen) { opt = cp[0]; if (opt == TCPOPT_EOL) break; if (opt == TCPOPT_NOP) optlen = 1; else { optlen = cp[1]; if (optlen <= 0) break; } switch (opt) { default: break; case TCPOPT_MAXSEG: if (optlen != 4) continue; if (!(ti->ti_flags & TH_SYN)) continue; tp->t_maxseg = *(u_short *)(cp + 2); tp->t_maxseg = ntohs((u_short)tp->t_maxseg); tp->t_maxseg = MIN(tp->t_maxseg, tcp_mss(tp)); break; } } (void) m_free(om);}#ifdef XTI/* * Pull out of band octets out of a segment so * it doesn't appear in the user's data queue. * It is still reflected in the segment length for * sequencing purposes. */tcp_pulloobxti(so, ti) struct socket *so; struct tcpiphdr *ti;{ register struct mbuf *m; struct mbuf *m0; struct mbuf *m1; char *cp; char save_urg; char *save_urg_ptr; int cnt = ti->ti_urp - 1; struct tcpcb *tp = sototcpcb(so); int xti_flag = 0; int new_oob_len; /* oob data we are processing */ if (so->so_xticb.xti_epvalid) xti_flag = 1; save_urg_ptr = &save_urg; m = dtom(ti); /* * Overwrite section - Update event information */ if (xti_flag) if (so->so_xticb.xti_evtenabled) { /* write brand new record */ so->so_xticb.xti_evtarray[XTI_EVT_T_EXDATA] = 1; PRINTXTID(1, ("T_EXDATA (#1) (brand new - 1 event) cnt=%d\n", cnt)); } /* valid xti endpoint and events enabled */ if (xti_flag) { new_oob_len = cnt + 1; new_oob_len = (new_oob_len <= XTI_EXPED_SIZE) ? new_oob_len : XTI_EXPED_SIZE; m0 = m_copy(m, 0, new_oob_len); m0 = m_pullup(m0, new_oob_len); } else { while (cnt >= 0) { if (m->m_len > cnt) { char *cp = mtod(m, caddr_t) + cnt; *save_urg_ptr = *cp; new_oob_len = 1; m0 = m_copy(m, 0, new_oob_len); bcopy(save_urg_ptr, mtod(m0, caddr_t), new_oob_len); bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1)); m->m_len--; break; } cnt -= m->m_len; m = m->m_next; if (m == 0) panic("tcp_pulloutofband"); } } if (m0 == 0) panic("tcp_pulloobxti no m0"); /* * RB: Since a user may be in the process of reading the * out-of-band data on another CPU, the socket might * not be locked. Therefore, we mustn't free the mbuf * here and let soreceive free the mbuf after it has * been read. So we handle the newly arrived out-of- * band data by copying the data over the existing * out-of-band data. */ if (so->so_exrcv.sb_mb == NULL) { sbappend(&so->so_exrcv, m0); } else { m1 = so->so_exrcv.sb_mb; so->so_exrcv.sb_cc -= m1->m_len; so->so_exrcv.sb_cc += (new_oob_len); m1->m_len = new_oob_len; /* * Fix Ron's fix to smp problem. * Reset the offset or we may walk off the end * of the mbuf resulting in an unaligned access * crash */ m1->m_off = MMINOFF; bcopy(mtod(m0, caddr_t), mtod(m1, caddr_t), new_oob_len); m_freem(m0); } tp->t_oobflags |= TCPOOB_HAVEDATA; if (xti_flag) while (1) { if (m->m_len >= new_oob_len) { m->m_len -= new_oob_len; if (m->m_len != 0 && m->m_off <= MMAXOFF) m->m_off += new_oob_len; break; } else { new_oob_len -= m->m_len; m->m_len = 0; m = m->m_next; if (!m) panic("pulloobxti no m->m_next"); } } return;}#else/* * Pull out of band byte out of a segment so * it doesn't appear in the user's data queue. * It is still reflected in the segment length for * sequencing purposes. */tcp_pulloutofband(so, ti) struct socket *so; struct tcpiphdr *ti;{ register struct mbuf *m; int cnt = ti->ti_urp - 1; m = dtom(ti); while (cnt >= 0) { if (m->m_len > cnt) { char *cp = mtod(m, caddr_t) + cnt; struct tcpcb *tp = sototcpcb(so); tp->t_iobc = *cp; tp->t_oobflags |= TCPOOB_HAVEDATA; bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1)); m->m_len--; return; } cnt -= m->m_len; m = m->m_next; if (m == 0) break; } panic("tcp_pulloutofband");}#endif XTI/* * Determine a reasonable value for maxseg size. * If the route is known, use one that can be handled * on the given interface without forcing IP to fragment. * If bigger than an mbuf cluster (NCLBYTES), round down to nearest size * to utilize large mbufs. * If interface pointer is unavailable, or the destination isn't local, * use a conservative size (512 or the default IP max size, but no more * than the mtu of the interface through which we route), * as we can't discover anything about intervening gateways or networks. * We also initialize the congestion/slow start window to be a single * segment if the destination isn't local; this information should * probably all be saved with the routing entry at the transport level. * * This is ugly, and doesn't belong at this level, but has to happen somehow. */int tcp_trailers = 0;tcp_mss(tp) register struct tcpcb *tp;{ struct route *ro; struct ifnet *ifp; int mss; struct inpcb *inp; inp = tp->t_inpcb; RTLOCK(); ro = &inp->inp_route; if ((ro->ro_rt == (struct rtentry *)0) || (ifp = ro->ro_rt->rt_ifp) == (struct ifnet *)0) { /* No route yet, so try to acquire one */ if (inp->inp_faddr.s_addr != INADDR_ANY) { ro->ro_dst.sa_family = AF_INET; ((struct sockaddr_in *) &ro->ro_dst)->sin_addr = inp->inp_faddr; rtalloc(ro); } if ((ro->ro_rt == 0) || (ifp = ro->ro_rt->rt_ifp) == 0){ RTUNLOCK(); return (TCP_MSS); } } RTUNLOCK(); mss = ifp->if_mtu - sizeof(struct tcpiphdr); if((ifp->if_flags & IFF_NOTRAILERS) == 0) {#if (NCLBYTES & (NCLBYTES - 1)) == 0 if (mss > NCLBYTES) { mss = NCLBYTES; }#else if (mss > NCLBYTES) mss = mss / NCLBYTES * NCLBYTES;#endif } if (in_localaddr(inp->inp_faddr)) return (mss); mss = MIN(mss, TCP_MSS); tp->snd_cwnd = mss; return (mss);}#ifdef XTIxti_ip_dooptions(ip,xti_tcp_opt) register struct ip *ip; struct secoptions *xti_tcp_opt;{ register u_char *cp; int opt, optlen, cnt; int return_stat = 1; /* default to failure */ cp = (u_char *)(ip + 1); cnt = (ip->ip_hl << 2) - sizeof (struct ip); for (; cnt > 0; cnt -= optlen, cp += optlen) { opt = cp[IPOPT_OPTVAL]; if (opt == IPOPT_EOL) break; if (opt == IPOPT_NOP) optlen = 1; else { optlen = cp[IPOPT_OLEN]; if (optlen <= 0 || optlen > cnt) return(1); } switch (opt) { case IPOPT_SECURITY: { short security; short compartment; short handling; long tcc; int i; bcopy(&cp[IPOPT_OFFSET], &security, sizeof(short)); security = ntohs(security); bcopy(&cp[IPOPT_OFFSET + sizeof(short)], &compartment, sizeof(short)); compartment = ntohs(compartment); bcopy(&cp[IPOPT_OFFSET + (2*sizeof(short))], &handling, sizeof(short)); handling = ntohs(handling); bcopy(&cp[IPOPT_OFFSET + (3*sizeof(short))], &tcc, sizeof(long int)); tcc = ntohl(tcc); xti_tcp_opt->security = security; xti_tcp_opt->compartment = compartment; xti_tcp_opt->handling = handling; xti_tcp_opt->tcc = tcc; return_stat = 0; /* success */ /* * bcopy(&cp[IPOPT_OFFSET], xti_tcp_opt, optlen - 2); */ } break; default: break; }; } return (return_stat);}/* * Any data in this mbuf? */int mbuf_any_len(m) struct mbuf *m;{ int tot_len = 0; if (m) do { tot_len += m->m_len; if (tot_len > 0) return (tot_len); m = m->m_next; } while (m); return(tot_len);}#endif XTI
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -