📄 outqueue.c
字号:
* anymore for RTT measurements. Reset rto_pending so * that a new RTT measurement is started when a new * data chunk is sent. */ if (chunk->rtt_in_progress) { chunk->rtt_in_progress = 0; transport->rto_pending = 0; } /* Move the chunk to the retransmit queue. The chunks * on the retransmit queue are always kept in order. */ list_del_init(lchunk); sctp_insert_list(&q->retransmit, lchunk); } } SCTP_DEBUG_PRINTK("%s: transport: %p, reason: %d, " "cwnd: %d, ssthresh: %d, flight_size: %d, " "pba: %d\n", __FUNCTION__, transport, reason, transport->cwnd, transport->ssthresh, transport->flight_size, transport->partial_bytes_acked);}/* Mark all the eligible packets on a transport for retransmission and force * one packet out. */void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport, sctp_retransmit_reason_t reason){ int error = 0; switch(reason) { case SCTP_RTXR_T3_RTX: SCTP_INC_STATS(SCTP_MIB_T3_RETRANSMITS); sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_T3_RTX); /* Update the retran path if the T3-rtx timer has expired for * the current retran path. */ if (transport == transport->asoc->peer.retran_path) sctp_assoc_update_retran_path(transport->asoc); break; case SCTP_RTXR_FAST_RTX: SCTP_INC_STATS(SCTP_MIB_FAST_RETRANSMITS); sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX); break; case SCTP_RTXR_PMTUD: SCTP_INC_STATS(SCTP_MIB_PMTUD_RETRANSMITS); break; case SCTP_RTXR_T1_RTX: SCTP_INC_STATS(SCTP_MIB_T1_RETRANSMITS); break; default: BUG(); } sctp_retransmit_mark(q, transport, reason); /* PR-SCTP A5) Any time the T3-rtx timer expires, on any destination, * the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by * following the procedures outlined in C1 - C5. */ sctp_generate_fwdtsn(q, q->asoc->ctsn_ack_point); error = sctp_outq_flush(q, /* rtx_timeout */ 1); if (error) q->asoc->base.sk->sk_err = -error;}/* * Transmit DATA chunks on the retransmit queue. Upon return from * sctp_outq_flush_rtx() the packet 'pkt' may contain chunks which * need to be transmitted by the caller. * We assume that pkt->transport has already been set. * * The return value is a normal kernel error return value. */static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt, int rtx_timeout, int *start_timer){ struct list_head *lqueue; struct list_head *lchunk, *lchunk1; struct sctp_transport *transport = pkt->transport; sctp_xmit_t status; struct sctp_chunk *chunk, *chunk1; struct sctp_association *asoc; int error = 0; asoc = q->asoc; lqueue = &q->retransmit; /* RFC 2960 6.3.3 Handle T3-rtx Expiration * * E3) Determine how many of the earliest (i.e., lowest TSN) * outstanding DATA chunks for the address for which the * T3-rtx has expired will fit into a single packet, subject * to the MTU constraint for the path corresponding to the * destination transport address to which the retransmission * is being sent (this may be different from the address for * which the timer expires [see Section 6.4]). Call this value * K. Bundle and retransmit those K DATA chunks in a single * packet to the destination endpoint. * * [Just to be painfully clear, if we are retransmitting * because a timeout just happened, we should send only ONE * packet of retransmitted data.] */ lchunk = sctp_list_dequeue(lqueue); while (lchunk) { chunk = list_entry(lchunk, struct sctp_chunk, transmitted_list); /* Make sure that Gap Acked TSNs are not retransmitted. A * simple approach is just to move such TSNs out of the * way and into a 'transmitted' queue and skip to the * next chunk. */ if (chunk->tsn_gap_acked) { list_add_tail(lchunk, &transport->transmitted); lchunk = sctp_list_dequeue(lqueue); continue; } /* Attempt to append this chunk to the packet. */ status = sctp_packet_append_chunk(pkt, chunk); switch (status) { case SCTP_XMIT_PMTU_FULL: /* Send this packet. */ if ((error = sctp_packet_transmit(pkt)) == 0) *start_timer = 1; /* If we are retransmitting, we should only * send a single packet. */ if (rtx_timeout) { list_add(lchunk, lqueue); lchunk = NULL; } /* Bundle lchunk in the next round. */ break; case SCTP_XMIT_RWND_FULL: /* Send this packet. */ if ((error = sctp_packet_transmit(pkt)) == 0) *start_timer = 1; /* Stop sending DATA as there is no more room * at the receiver. */ list_add(lchunk, lqueue); lchunk = NULL; break; case SCTP_XMIT_NAGLE_DELAY: /* Send this packet. */ if ((error = sctp_packet_transmit(pkt)) == 0) *start_timer = 1; /* Stop sending DATA because of nagle delay. */ list_add(lchunk, lqueue); lchunk = NULL; break; default: /* The append was successful, so add this chunk to * the transmitted list. */ list_add_tail(lchunk, &transport->transmitted); /* Mark the chunk as ineligible for fast retransmit * after it is retransmitted. */ if (chunk->fast_retransmit > 0) chunk->fast_retransmit = -1; *start_timer = 1; q->empty = 0; /* Retrieve a new chunk to bundle. */ lchunk = sctp_list_dequeue(lqueue); break; } /* If we are here due to a retransmit timeout or a fast * retransmit and if there are any chunks left in the retransmit * queue that could not fit in the PMTU sized packet, they need * to be marked as ineligible for a subsequent fast retransmit. */ if (rtx_timeout && !lchunk) { list_for_each(lchunk1, lqueue) { chunk1 = list_entry(lchunk1, struct sctp_chunk, transmitted_list); if (chunk1->fast_retransmit > 0) chunk1->fast_retransmit = -1; } } } return error;}/* Cork the outqueue so queued chunks are really queued. */int sctp_outq_uncork(struct sctp_outq *q){ int error = 0; if (q->cork) q->cork = 0; error = sctp_outq_flush(q, 0); return error;}/* * Try to flush an outqueue. * * Description: Send everything in q which we legally can, subject to * congestion limitations. * * Note: This function can be called from multiple contexts so appropriate * locking concerns must be made. Today we use the sock lock to protect * this function. */int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout){ struct sctp_packet *packet; struct sctp_packet singleton; struct sctp_association *asoc = q->asoc; __u16 sport = asoc->base.bind_addr.port; __u16 dport = asoc->peer.port; __u32 vtag = asoc->peer.i.init_tag; struct sctp_transport *transport = NULL; struct sctp_transport *new_transport; struct sctp_chunk *chunk, *tmp; sctp_xmit_t status; int error = 0; int start_timer = 0; /* These transports have chunks to send. */ struct list_head transport_list; struct list_head *ltransport; INIT_LIST_HEAD(&transport_list); packet = NULL; /* * 6.10 Bundling * ... * When bundling control chunks with DATA chunks, an * endpoint MUST place control chunks first in the outbound * SCTP packet. The transmitter MUST transmit DATA chunks * within a SCTP packet in increasing order of TSN. * ... */ list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) { list_del_init(&chunk->list); /* Pick the right transport to use. */ new_transport = chunk->transport; if (!new_transport) { /* * If we have a prior transport pointer, see if * the destination address of the chunk * matches the destination address of the * current transport. If not a match, then * try to look up the transport with a given * destination address. We do this because * after processing ASCONFs, we may have new * transports created. */ if (transport && sctp_cmp_addr_exact(&chunk->dest, &transport->ipaddr)) new_transport = transport; else new_transport = sctp_assoc_lookup_paddr(asoc, &chunk->dest); /* if we still don't have a new transport, then * use the current active path. */ if (!new_transport) new_transport = asoc->peer.active_path; } else if ((new_transport->state == SCTP_INACTIVE) || (new_transport->state == SCTP_UNCONFIRMED)) { /* If the chunk is Heartbeat or Heartbeat Ack, * send it to chunk->transport, even if it's * inactive. * * 3.3.6 Heartbeat Acknowledgement: * ... * A HEARTBEAT ACK is always sent to the source IP * address of the IP datagram containing the * HEARTBEAT chunk to which this ack is responding. * ... * * ASCONF_ACKs also must be sent to the source. */ if (chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT && chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT_ACK && chunk->chunk_hdr->type != SCTP_CID_ASCONF_ACK) new_transport = asoc->peer.active_path; } /* Are we switching transports? * Take care of transport locks. */ if (new_transport != transport) { transport = new_transport; if (list_empty(&transport->send_ready)) { list_add_tail(&transport->send_ready, &transport_list); } packet = &transport->packet; sctp_packet_config(packet, vtag, asoc->peer.ecn_capable); } switch (chunk->chunk_hdr->type) { /* * 6.10 Bundling * ... * An endpoint MUST NOT bundle INIT, INIT ACK or SHUTDOWN * COMPLETE with any other chunks. [Send them immediately.] */ case SCTP_CID_INIT: case SCTP_CID_INIT_ACK: case SCTP_CID_SHUTDOWN_COMPLETE: sctp_packet_init(&singleton, transport, sport, dport); sctp_packet_config(&singleton, vtag, 0); sctp_packet_append_chunk(&singleton, chunk); error = sctp_packet_transmit(&singleton); if (error < 0) return error; break; case SCTP_CID_ABORT: if (sctp_test_T_bit(chunk)) { packet->vtag = asoc->c.my_vtag; } case SCTP_CID_SACK: case SCTP_CID_HEARTBEAT: case SCTP_CID_HEARTBEAT_ACK: case SCTP_CID_SHUTDOWN: case SCTP_CID_SHUTDOWN_ACK: case SCTP_CID_ERROR: case SCTP_CID_COOKIE_ECHO: case SCTP_CID_COOKIE_ACK: case SCTP_CID_ECN_ECNE: case SCTP_CID_ECN_CWR: case SCTP_CID_ASCONF: case SCTP_CID_ASCONF_ACK: case SCTP_CID_FWD_TSN: sctp_packet_transmit_chunk(packet, chunk); break; default: /* We built a chunk with an illegal type! */ BUG(); } } /* Is it OK to send data chunks? */ switch (asoc->state) { case SCTP_STATE_COOKIE_ECHOED: /* Only allow bundling when this packet has a COOKIE-ECHO * chunk. */ if (!packet || !packet->has_cookie_echo) break; /* fallthru */ case SCTP_STATE_ESTABLISHED: case SCTP_STATE_SHUTDOWN_PENDING: case SCTP_STATE_SHUTDOWN_RECEIVED: /* * RFC 2960 6.1 Transmission of DATA Chunks * * C) When the time comes for the sender to transmit, * before sending new DATA chunks, the sender MUST * first transmit any outstanding DATA chunks which * are marked for retransmission (limited by the * current cwnd). */ if (!list_empty(&q->retransmit)) { if (transport == asoc->peer.retran_path) goto retran; /* Switch transports & prepare the packet. */ transport = asoc->peer.retran_path; if (list_empty(&transport->send_ready)) { list_add_tail(&transport->send_ready, &transport_list); } packet = &transport->packet; sctp_packet_config(packet, vtag, asoc->peer.ecn_capable); retran: error = sctp_outq_flush_rtx(q, packet, rtx_timeout, &start_timer); if (start_timer) sctp_transport_reset_timers(transport); /* This can happen on COOKIE-ECHO resend. Only * one chunk can get bundled with a COOKIE-ECHO. */ if (packet->has_cookie_echo) goto sctp_flush_out; /* Don't send new data if there is still data * waiting to retransmit. */ if (!list_empty(&q->retransmit)) goto sctp_flush_out; } /* Finally, transmit new packets. */ start_timer = 0; while ((chunk = sctp_outq_dequeue_data(q)) != NULL) { /* RFC 2960 6.5 Every DATA chunk MUST carry a valid * stream identifier. */ if (chunk->sinfo.sinfo_stream >= asoc->c.sinit_num_ostreams) { /* Mark as failed send. */ sctp_chunk_fail(chunk, SCTP_ERROR_INV_STRM); sctp_chunk_free(chunk); continue; } /* Has this chunk expired? */ if (sctp_chunk_abandoned(chunk)) { sctp_chunk_fail(chunk, 0); sctp_chunk_free(chunk); continue; } /* If there is a specified transport, use it. * Otherwise, we want to use the active path. */ new_transport = chunk->transport;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -