⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ulpqueue.c

📁 在linux环境下的流控制传输协议(sctp)的源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
				goto done;			break;		case SCTP_DATA_LAST_FRAG:			if (!first_frag)				first_frag = pos;			else if (ctsn != next_tsn)				goto done;			last_frag = pos;			is_last = 1;			goto done;		default:			return NULL;		}	}	/* We have the reassembled event. There is no need to look	 * further.	 */done:	retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);	if (retval && is_last)		retval->msg_flags |= MSG_EOR;	return retval;}/* Helper function to reassemble chunks.  Hold chunks on the reasm queue that * need reassembling. */static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,						struct sctp_ulpevent *event){	struct sctp_ulpevent *retval = NULL;	/* Check if this is part of a fragmented message.  */	if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {		event->msg_flags |= MSG_EOR;		return event;	}	sctp_ulpq_store_reasm(ulpq, event);	if (!ulpq->pd_mode)		retval = sctp_ulpq_retrieve_reassembled(ulpq);	else {		__u32 ctsn, ctsnap;		/* Do not even bother unless this is the next tsn to		 * be delivered.		 */		ctsn = event->tsn;		ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);		if (TSN_lte(ctsn, ctsnap))			retval = sctp_ulpq_retrieve_partial(ulpq);	}	return retval;}/* Retrieve the first part (sequential fragments) for partial delivery.  */static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq){	struct sk_buff *pos, *last_frag, *first_frag;	struct sctp_ulpevent *cevent;	__u32 ctsn, next_tsn;	struct sctp_ulpevent *retval;	/* The chunks are held in the reasm queue sorted by TSN.	 * Walk through the queue sequentially and look for a sequence of	 * fragmented chunks that start a datagram.	 */	if (skb_queue_empty(&ulpq->reasm))		return NULL;	last_frag = first_frag = NULL;	retval = NULL;	next_tsn = 0;	skb_queue_walk(&ulpq->reasm, pos) {		cevent = sctp_skb2event(pos);		ctsn = cevent->tsn;		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {		case SCTP_DATA_FIRST_FRAG:			if (!first_frag) {				first_frag = pos;				next_tsn = ctsn + 1;				last_frag = pos;			} else				goto done;			break;		case SCTP_DATA_MIDDLE_FRAG:			if (!first_frag)				return NULL;			if (ctsn == next_tsn) {				next_tsn++;				last_frag = pos;			} else				goto done;			break;		default:			return NULL;		}	}	/* We have the reassembled event. There is no need to look	 * further.	 */done:	retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);	return retval;}/* * Flush out stale fragments from the reassembly queue when processing * a Forward TSN. * * RFC 3758, Section 3.6 * * After receiving and processing a FORWARD TSN, the data receiver MUST * take cautions in updating its re-assembly queue.  The receiver MUST * remove any partially reassembled message, which is still missing one * or more TSNs earlier than or equal to the new cumulative TSN point. * In the event that the receiver has invoked the partial delivery API, * a notification SHOULD also be generated to inform the upper layer API * that the message being partially delivered will NOT be completed. */void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn){	struct sk_buff *pos, *tmp;	struct sctp_ulpevent *event;	__u32 tsn;	if (skb_queue_empty(&ulpq->reasm))		return;	skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {		event = sctp_skb2event(pos);		tsn = event->tsn;		/* Since the entire message must be abandoned by the		 * sender (item A3 in Section 3.5, RFC 3758), we can		 * free all fragments on the list that are less then		 * or equal to ctsn_point		 */		if (TSN_lte(tsn, fwd_tsn)) {			__skb_unlink(pos, &ulpq->reasm);			sctp_ulpevent_free(event);		} else			break;	}}/* * Drain the reassembly queue.  If we just cleared parted delivery, it * is possible that the reassembly queue will contain already reassembled * messages.  Retrieve any such messages and give them to the user. */static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq){	struct sctp_ulpevent *event = NULL;	struct sk_buff_head temp;	if (skb_queue_empty(&ulpq->reasm))		return;	while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {		/* Do ordering if needed.  */		if ((event) && (event->msg_flags & MSG_EOR)){			skb_queue_head_init(&temp);			__skb_queue_tail(&temp, sctp_event2skb(event));			event = sctp_ulpq_order(ulpq, event);		}		/* Send event to the ULP.  'event' is the		 * sctp_ulpevent for  very first SKB on the  temp' list.		 */		if (event)			sctp_ulpq_tail_event(ulpq, event);	}}/* Helper function to gather skbs that have possibly become * ordered by an an incoming chunk. */static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,					      struct sctp_ulpevent *event){	struct sk_buff_head *event_list;	struct sk_buff *pos, *tmp;	struct sctp_ulpevent *cevent;	struct sctp_stream *in;	__u16 sid, csid;	__u16 ssn, cssn;	sid = event->stream;	ssn = event->ssn;	in  = &ulpq->asoc->ssnmap->in;	event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;	/* We are holding the chunks by stream, by SSN.  */	sctp_skb_for_each(pos, &ulpq->lobby, tmp) {		cevent = (struct sctp_ulpevent *) pos->cb;		csid = cevent->stream;		cssn = cevent->ssn;		/* Have we gone too far?  */		if (csid > sid)			break;		/* Have we not gone far enough?  */		if (csid < sid)			continue;		if (cssn != sctp_ssn_peek(in, sid))			break;		/* Found it, so mark in the ssnmap. */		sctp_ssn_next(in, sid);		__skb_unlink(pos, &ulpq->lobby);		/* Attach all gathered skbs to the event.  */		__skb_queue_tail(event_list, pos);	}}/* Helper function to store chunks needing ordering.  */static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,					   struct sctp_ulpevent *event){	struct sk_buff *pos;	struct sctp_ulpevent *cevent;	__u16 sid, csid;	__u16 ssn, cssn;	pos = skb_peek_tail(&ulpq->lobby);	if (!pos) {		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));		return;	}	sid = event->stream;	ssn = event->ssn;	cevent = (struct sctp_ulpevent *) pos->cb;	csid = cevent->stream;	cssn = cevent->ssn;	if (sid > csid) {		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));		return;	}	if ((sid == csid) && SSN_lt(cssn, ssn)) {		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));		return;	}	/* Find the right place in this list.  We store them by	 * stream ID and then by SSN.	 */	skb_queue_walk(&ulpq->lobby, pos) {		cevent = (struct sctp_ulpevent *) pos->cb;		csid = cevent->stream;		cssn = cevent->ssn;		if (csid > sid)			break;		if (csid == sid && SSN_lt(ssn, cssn))			break;	}	/* Insert before pos. */	__skb_insert(sctp_event2skb(event), pos->prev, pos, &ulpq->lobby);}static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,					     struct sctp_ulpevent *event){	__u16 sid, ssn;	struct sctp_stream *in;	/* Check if this message needs ordering.  */	if (SCTP_DATA_UNORDERED & event->msg_flags)		return event;	/* Note: The stream ID must be verified before this routine.  */	sid = event->stream;	ssn = event->ssn;	in  = &ulpq->asoc->ssnmap->in;	/* Is this the expected SSN for this stream ID?  */	if (ssn != sctp_ssn_peek(in, sid)) {		/* We've received something out of order, so find where it		 * needs to be placed.  We order by stream and then by SSN.		 */		sctp_ulpq_store_ordered(ulpq, event);		return NULL;	}	/* Mark that the next chunk has been found.  */	sctp_ssn_next(in, sid);	/* Go find any other chunks that were waiting for	 * ordering.	 */	sctp_ulpq_retrieve_ordered(ulpq, event);	return event;}/* Helper function to gather skbs that have possibly become * ordered by forward tsn skipping their dependencies. */static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid){	struct sk_buff *pos, *tmp;	struct sctp_ulpevent *cevent;	struct sctp_ulpevent *event;	struct sctp_stream *in;	struct sk_buff_head temp;	struct sk_buff_head *lobby = &ulpq->lobby;	__u16 csid, cssn;	in  = &ulpq->asoc->ssnmap->in;	/* We are holding the chunks by stream, by SSN.  */	skb_queue_head_init(&temp);	event = NULL;	sctp_skb_for_each(pos, lobby, tmp) {		cevent = (struct sctp_ulpevent *) pos->cb;		csid = cevent->stream;		cssn = cevent->ssn;		/* Have we gone too far?  */		if (csid > sid)			break;		/* Have we not gone far enough?  */		if (csid < sid)			continue;		/* see if this ssn has been marked by skipping */		if (!SSN_lt(cssn, sctp_ssn_peek(in, csid)))			break;		__skb_unlink(pos, lobby);		if (!event)			/* Create a temporary list to collect chunks on.  */			event = sctp_skb2event(pos);		/* Attach all gathered skbs to the event.  */		__skb_queue_tail(&temp, pos);	}	/* If we didn't reap any data, see if the next expected SSN	 * is next on the queue and if so, use that.	 */	if (event == NULL && pos != (struct sk_buff *)lobby) {		cevent = (struct sctp_ulpevent *) pos->cb;		csid = cevent->stream;		cssn = cevent->ssn;		if (csid == sid && cssn == sctp_ssn_peek(in, csid)) {			sctp_ssn_next(in, csid);			__skb_unlink(pos, lobby);			__skb_queue_tail(&temp, pos);			event = sctp_skb2event(pos);		}	}	/* Send event to the ULP.  'event' is the sctp_ulpevent for	 * very first SKB on the 'temp' list.	 */	if (event) {		/* see if we have more ordered that we can deliver */		sctp_ulpq_retrieve_ordered(ulpq, event);		sctp_ulpq_tail_event(ulpq, event);	}}/* Skip over an SSN. This is used during the processing of * Forwared TSN chunk to skip over the abandoned ordered data */void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn){	struct sctp_stream *in;	/* Note: The stream ID must be verified before this routine.  */	in  = &ulpq->asoc->ssnmap->in;	/* Is this an old SSN?  If so ignore. */	if (SSN_lt(ssn, sctp_ssn_peek(in, sid)))		return;	/* Mark that we are no longer expecting this SSN or lower. */	sctp_ssn_skip(in, sid, ssn);	/* Go find any other chunks that were waiting for	 * ordering and deliver them if needed.	 */	sctp_ulpq_reap_ordered(ulpq, sid);	return;}static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,		struct sk_buff_head *list, __u16 needed){	__u16 freed = 0;	__u32 tsn;	struct sk_buff *skb;	struct sctp_ulpevent *event;	struct sctp_tsnmap *tsnmap;	tsnmap = &ulpq->asoc->peer.tsn_map;	while ((skb = __skb_dequeue_tail(list)) != NULL) {		freed += skb_headlen(skb);		event = sctp_skb2event(skb);		tsn = event->tsn;		sctp_ulpevent_free(event);		sctp_tsnmap_renege(tsnmap, tsn);		if (freed >= needed)			return freed;	}	return freed;}/* Renege 'needed' bytes from the ordering queue. */static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed){	return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);}/* Renege 'needed' bytes from the reassembly queue. */static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed){	return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed);}/* Partial deliver the first message as there is pressure on rwnd. */void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,				struct sctp_chunk *chunk,				gfp_t gfp){	struct sctp_ulpevent *event;	struct sctp_association *asoc;	struct sctp_sock *sp;	asoc = ulpq->asoc;	sp = sctp_sk(asoc->base.sk);	/* If the association is already in Partial Delivery mode	 * we have noting to do.	 */	if (ulpq->pd_mode)		return;	/* If the user enabled fragment interleave socket option,	 * multiple associations can enter partial delivery.	 * Otherwise, we can only enter partial delivery if the	 * socket is not in partial deliver mode.	 */	if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) {		/* Is partial delivery possible?  */		event = sctp_ulpq_retrieve_first(ulpq);		/* Send event to the ULP.   */		if (event) {			sctp_ulpq_tail_event(ulpq, event);			sctp_ulpq_set_pd(ulpq);			return;		}	}}/* Renege some packets to make room for an incoming chunk.  */void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,		      gfp_t gfp){	struct sctp_association *asoc;	__u16 needed, freed;	asoc = ulpq->asoc;	if (chunk) {		needed = ntohs(chunk->chunk_hdr->length);		needed -= sizeof(sctp_data_chunk_t);	} else		needed = SCTP_DEFAULT_MAXWINDOW;	freed = 0;	if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {		freed = sctp_ulpq_renege_order(ulpq, needed);		if (freed < needed) {			freed += sctp_ulpq_renege_frags(ulpq, needed - freed);		}	}	/* If able to free enough room, accept this chunk. */	if (chunk && (freed >= needed)) {		__u32 tsn;		tsn = ntohl(chunk->subh.data_hdr->tsn);		sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn);		sctp_ulpq_tail_data(ulpq, chunk, gfp);		sctp_ulpq_partial_delivery(ulpq, chunk, gfp);	}	sk_mem_reclaim(asoc->base.sk);	return;}/* Notify the application if an association is aborted and in * partial delivery mode.  Send up any pending received messages. */void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp){	struct sctp_ulpevent *ev = NULL;	struct sock *sk;	if (!ulpq->pd_mode)		return;	sk = ulpq->asoc->base.sk;	if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,				       &sctp_sk(sk)->subscribe))		ev = sctp_ulpevent_make_pdapi(ulpq->asoc,					      SCTP_PARTIAL_DELIVERY_ABORTED,					      gfp);	if (ev)		__skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));	/* If there is data waiting, send it up the socket now. */	if (sctp_ulpq_clear_pd(ulpq) || ev)		sk->sk_data_ready(sk, 0);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -