⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 tcpmisc.c

📁 用于嵌入式系统的TCP/IP协议栈及若干服务
💻 C
📖 第 1 页 / 共 5 页
字号:
		if (source == TCP_MSS_SOURCE_PEER) {            /* We're configured to use the peer's value and this is it,			   so use the passed in value */		    its_been_decided = 1;            svp->sv_flags2 |= SV_GOT_PEER_MSS_OPTION;		} else if (svp->sv_flags2 & SV_GOT_PEER_MSS_OPTION) {			/* We're configured to use the peer's value and we've already			   got it, so keep the value we already have */			mss = svp->sv_mss;			its_been_decided = 1;		}	} /* if we're configured to use peer's option */#ifdef TCP_TRANSACTION_TCP	/* If this a a transaction TCP connection, we start out using the cached MSS	   for the peer on this connection. If this MSS comes from the peer, we replace 	   that unconditionally with the MSS from the peer */	else if (svp->sv_t_tcp_flags & SV_TRANSACTION) {		if (source == TCP_MSS_SOURCE_PEER) {			its_been_decided = 1;		}	}	    #endif /* TCP_TRANSACTION_TCP */	/* unless the above processing already decided which value we should use,	   then take the lesser of the passed in value and the previously set value, if	   any */	if (!its_been_decided) {    if (svp->sv_mss  &&  svp->sv_mss < mss)        mss = svp->sv_mss;	}    /* If the new value of the MSS is different than the previous value, then purge 	   the list of saved mss-sized messages. To make sure that no smaller message	   is re-used in sqxmit when sending an mss-sized segment with the new larger	   mss size (would cause an overrun beyond the end of the message). This situation	   could arise with transaction TCP when the MSS option from the peer is larger	   than, and overrides, the cached value for that peer. In the future, it could	   also arise via path MTU discovery if the MTU of a path changes during the 	   course of a connection. */	critical;	if (svp->sv_mss != mss) {       tcp_free_saved_messages(svp);	}        svp->sv_mss = mss;	svp->sv_mssd = (mss - tcp_seg_optsize(svp,0,&(svp->sv_dseg_options))); /* max size of user data														  in a mss-sized segment */	normal;#ifdef TCP_SS_CA_FRETR_FREC 	/* The congestion window of a connection is initialized at a single MSS. (By putting 	   this line of code here, we are assuming that "tcp_set_send_mss" is called only 	   during connection setup. This could change later when and if we implement some 	   kind of dynamic adjustment of MSS based on receiving ICMP notifications */	svp->sv_cwnd = svp->sv_mssd;	svp->sv_ssthresh = TCP_MAX_WINDOW_FIELD_VALUE;#endif /* TCP_SS_CA_FRETR_FREC */	/* Also need to know the size of messages that will be allocated 	   for mss_sized packets and ack-sized packets */	svp->sv_mss_m_len = M_NEW_ALLOCLEN_FOR_SIZE(nc_hsize + sop->so_hsize +mss);} /* tcp_set_send_mss *//***************************************************************************//* Set the receive queue limits to reflect the window setting & MSS.  This  * routine is called once at the start of a connection: out of the tcp_dink * routine when sending a SYN|ACK or out of tcp_rsyn on receipt of a SYN|ACK. * UNTIL then, the receive queue is not opened. */export  void    tcp_rmax_set (fast tcpsv_t * svp){    fast    so_t    * sop;    sop = sv_valid_sop(svp, sop);    if ( sop == (so_t *)0 )        return;#ifdef TCP_WND_SCALE    /* If the receive buffer size has been set to a value larger than	   can be supported by the window scale, reduce it to the the maximum	   supportable by the window scale. */	if (svp->sv_rq_max > ( (u32) (65535 << svp->sv_rwndscale ) ) ) {       svp->sv_rq_max = (65535 << svp->sv_rwndscale);	}#endif    if ( sop->so_rq.gq_max == 0 ) {        svp->sv_rwindow = svp->sv_rq_max; 				/* Question from MBM: Why is this following statement here? The only		   use of so_psize at all related to TCP seems to be in the SO_MAXSEG		   get socket option. It would seem that this statement logically		   belongs in tcp_set_send_mss. */            /* set maximum size of writes allowed */        sop->so_psize = svp->sv_mss;    }    gq_max_set(&sop->so_rq, (int)svp->sv_rwindow);    gq_max_set(&sop->so_hq, (int)svp->sv_rwindow);} /* tcp_rmax_set *//*************************************************************************                                                                       **  Function :                                                           **                                                                       **  Description :                                                        **                                                                       **                                                                       **  Parameters : None.                                                   **                                                                       **  Return : None.                                                       **                                                                       *************************************************************************//* Try to target a state vector */export  st      tcp_target (fast m * mp){    fast    tcpsv_t * svp, * tsvp;    fast    u16     sport, dport;    fast    TCPH_T  * tcphp;                /* TCP header pointer */#ifdef TRACE    so_t    * sop;#endif    use_critical;#ifdef TCP_QUIET /* If TCP is in its quiet period after system bootup, don't     process any incoming segments */ if (tcp_quiet) {    return (st)mp->m_dispfn; }#endif    tcphp = (TCPH_T *)mp->m_cp;    sport = NetToHost16(&tcphp[TCPH_SPORT]);    dport = NetToHost16(&tcphp[TCPH_DPORT]);    TCP_TRACE_SEGMENT(mp, 1);    tsvp = (tcpsv_t *)0;    /* state vector we are targeting */    critical;       /* because we are examining state-vectors un-bound */    /* This search assumes that any state vectors spawned from a bound listening	   socket will be encountered before the listening socket itself will be	   encountered. ("tcpattach called from tcp_passive puts them in that order	   (via q_in). If this assumption is ever false, then later segements arriving	   on the spawned connection will be targeted to the listening socket and cause	   a reset (since listening sockets can only properly receive SYNs  -- MBM */    for (svp = (tcpsv_t *)tcp_q.q_next; svp != (tcpsv_t *)&tcp_q;            svp = (tcpsv_t *)svp->sv_q.q_next)  {        fast    u16     tport;#ifdef MSD_DEBUG        ++tcptargz;#endif                        if (svp->sv_state == CLOSED)            /* don't target closed state vectors */            continue;        tport = ntohs(svp->sv_src.ip_port);       /* target port */        if (tport == dport) {   /* destination port matches */            /* Is this state vector connected to a remote peer yet, and if so, is this			   segment from that remote peer? */			if ( (ntohs(svp->sv_dest.ip_port) == sport)            &&  (svp->sv_flags & SV_SEQNO)) {                /* source port matches */                if (svp->sv_dest.ip_nethost == mp->m_src.a_ipa.ip_nethost) {                    tsvp = svp;     /* bullseye */                    break;  /* look no further */                }            }                        /* This state vector not connected to a remote peer yet. Is it a listening			   socket, and if so, is the segment directed at the local IP address to 			   which this listening socket is bound? Or, if not bound to a paricluar			   local IP address, provisionally set this as the target socket. */            if (svp->sv_state == LISTEN) {				if (svp->sv_src.ip_nethost == INADDR_ANY) {                   tsvp = svp;     /* real close */				} else if (svp->sv_src.ip_nethost == mp->m_dest.a_ipa.ip_nethost) {					tsvp = svp; /* bullseye */					break; /* look no further */				}			}        /* MBM -- the below case seems to be, if we have a listening socket that is not		   bound to any particular port, and we haven't yet found a listening socket		   for this segment, use this socket. */		} else if (tport == 0 &&  svp->sv_state == LISTEN  &&  tsvp == (tcpsv_t *)0)            tsvp = svp;	} /* for */        if (tsvp != (tcpsv_t *)0) {        sv_m_bind(tsvp, mp, sv_m_detach);#ifdef  TRACE        if (tcp_tstate) {            os_printf("tcp_target: [s:%s, ", ipa2str(&tsvp->sv_src, (char *)0));            os_printf("d:%s](%s) [", ipa2str(&tsvp->sv_dest, (char *)0), tcp_st(tsvp->sv_state));            if (sv_valid_sop(tsvp, sop))                os_printf("%d", tsvp->sv_soindx);            else                os_printf("*");            os_printf("] <= [s:%s, ", ipa2str(&mp->m_src.a_ipa, (char *)0));            os_printf("d:%s], <%s>\n", ipa2str(&mp->m_dest.a_ipa, (char *)0),                                               tcp_pf(NetToHost16(&tcphp[TCPH_FLAGS])));        }#endif        normal;        /* Process the arrival of a TCP packet. */#ifndef  AVOID_MSM        return (st)tcp_net_deliver;#else        return(tcp_net_deliver(mp));#endif    }    else    debug3(tcp_debug, "tcp_target: %d <= %d, %s\n", dport, sport,                                tcp_pf(NetToHost16(&tcphp[TCPH_FLAGS])));    ;    normal;    #ifdef MSD_DEBUG    if (msd_debug)    {      os_printf("tcp_target no such port: %d <= %d, %x\n", dport, sport ,                                                   NetToHost16(&tcphp[TCPH_FLAGS]));    }#endif    debug1(tcp_debug, "tcp_target: sending reset, no such port %d\n", NetToHost16(&tcphp[TCPH_DPORT]));    return (st)tcp_reset;   /* Table V [105] */}#ifdef TCP_SELACK_ENHANCEMENTS/*************************************************************************/static void tcp_remove_out_sack_block(fast tcpsv_t *svp, int wbi){ int sbi; for (sbi = (wbi - 1); sbi >= (MAX_NUM_OUT_SACK_BLOCKS - svp->sv_num_out_sack_blocks); sbi--) {    OS_MOVE_N( &(svp->sv_out_sack_blocks[sbi + 1]), &(svp->sv_out_sack_blocks[sbi]),			                  sizeof(tcp_sack_block_) ); } /* for */ (svp->sv_num_out_sack_blocks)--;} /* tcp_remove_out_sack_block *//*************************************************************************//*****************************************************************************//*  Function to adjust the TCP outgoing SACK block list according to the     *//* sequence numbers of a new out-of-order segment that is being added to     *//* the holding queue.                                                        */  /*****************************************************************************/export void tcp_add_out_sack_block(fast tcpsv_t *svp, fast u32 seqno,int length) { int sbi; fast u32 end_seqno; end_seqno = seqno + length;  /* Before adding the new block to the list, examine the existing blocks    in the list for any that might overlap or be contiguous with the new    block. If any such are found, adjust the new block to include the 	range of the overlapping/contiguous block, and delete that block from	the list. This allows us to conform to the following passage from RFC    2018, section 4: "The first SACK block (i.e., the one immediately following    the kind and length fields in the option) MUST specify the contiguous block    of data containing the segment which triggerred this ACK ... */ /* NOTE: The case where the new block is identical to an existing block     is supposedly taken care of while it is being decided whether to add the	new message to the holding queue, and we shouldn't get into this function	in that case */ for (sbi = (MAX_NUM_OUT_SACK_BLOCKS - svp->sv_num_out_sack_blocks);           sbi < MAX_NUM_OUT_SACK_BLOCKS; sbi++) {	 tcp_sack_block_ *sbp = &(svp->sv_out_sack_blocks[sbi]);	 u32 sb_seqno = NetToHost32(&(sbp->begin_seqno));	 u32 sb_end_seqno = NetToHost32(&(sbp->end_seqno));	 /* If no overlap or contiguity, skip to the next list item */     if ( MODULO32(seqno, > , sb_end_seqno ) || MODULO32(end_seqno, < , sb_seqno)  ) {		 continue;	 } /* if */	 /* The new block overlaps or is contiguous with the block we are looking	    at. Expand the range of the new block to include the examined block. */	 if ( MODULO32(sb_seqno, < , seqno) ) {		 seqno = sb_seqno;	 } /* if */	 if ( MODULO32(sb_end_seqno, > , end_seqno) ) {		 end_seqno = sb_end_seqno;	 } /* if */	 /* Now, get rid of the examined block since it will now be redundant */     tcp_remove_out_sack_block(svp, sbi); } /* for */  /* When we get down here, all blocks contiguous/overlapping (if any) with the new one     have been consolodated into the new one, and deleted. Now put the new block in. 	But first, if the list is full, we must remove the oldest item first to make room for	the new one. */ if (svp->sv_num_out_sack_blocks == MAX_NUM_OUT_SACK_BLOCKS) {     tcp_remove_out_sack_block(svp, (MAX_NUM_OUT_SACK_BLOCKS - 1) ); } /* if */ (svp->sv_num_out_sack_blocks)++;  sbi = (MAX_NUM_OUT_SACK_BLOCKS - svp->sv_num_out_sack_blocks); HostToNet32(&(svp->sv_out_sack_blocks[sbi].begin_seqno),seqno); HostToNet32(&(svp->sv_out_sack_blocks[sbi].end_seqno),end_seqno);} /* tcp_add_out_sack_block *//*************************************************************************//*  Function to adjust the TCP SACK block list according to the sequence *//* numbers of a segment that is being added to the receive buffer (i.e., *//* sv_rnxt is being advanced, and we don't want any SACK blocks to refer *//* to sequence numbers that have been delivered to the receive buffer    *//*************************************************************************/export void tcp_adjust_out_sack_blocks_for_rnxt(fast tcpsv_t *svp) { int sbi; u32 rnxt = MU32(svp->sv_rnxt);  /* Loop thru the SACK BLOCK list looking for any that refer to sequence    numbers that (based on sv_rnxt) have been delivered to the receive buffer.	For SACK BLOCKS that have been totally delivered to the receive buffer, 	purge them from the list. For those that have partially been delivered,	adjust the beginning of the block beyond the part that has been delivered. */ /* NOTE: It is possible that this adjustment algorithm will leave a SACK block     that is contiguous with sv_rxnt. However, that is a very transient condition.	If that is the case, these blocks presumably presumably correspond to segments 	that are still in the holding queue, but which the software is now in the 	process of removing from the holding queue and delivering to the receive buffer. 	As they are delivered to the receive buffer, this function will be called to     adjust the SACK BLOCK list accordingly. */ for (sbi = (MAX_NUM_OUT_SACK_BLOCKS - svp->sv_num_out_sack_blocks);           sbi < MAX_NUM_OUT_SACK_BLOCKS; sbi++) {	 tcp_sack_block_ *sbp = &(svp->sv_out_sack_blocks[sbi]);	 u32 sb_seqno = NetToHost32(&(sbp->begin_seqno));	 u32 sb_end_seqno = NetToHost32(&(sbp->end_seqno));	 /* Has sv_rnxt yet reached the beginning of this SACK block? */     if ( MODULO32(rnxt, <= , sb_seqno )   ) {		 continue; /* no it hasn't */	 } /* if */     /* Is this block totally within the span of sv_rnxt? If so, remove it.*/	 if ( MODULO32(rnxt, >= , sb_end_seqno) ) {         tcp_remove_out_sack_block(svp, sbi);		 /* Note: Removing the block does not disturb the operation of this for		    loop, because the removal affects only items with lower indexes than			the item being removed, and the index will be increased. */		 continue;	 } /* if */     /* This block is partially within the span of sv_rnxt. Ajdjust the block to	    not include that part of the data. */	 HostToNet32(&(sbp->begin_seqno),rnxt); } /* for */ 

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -