⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ulpqueue.c

📁 Linux Kernel 2.6.9 for OMAP1710
💻 C
📖 第 1 页 / 共 2 页
字号:
			else if (ctsn != next_tsn)				goto done;			last_frag = pos;			is_last = 1;			goto done;		default:			return NULL;		};	}	/* We have the reassembled event. There is no need to look	 * further.	 */done:	retval = sctp_make_reassembled_event(first_frag, last_frag);	if (retval && is_last)		retval->msg_flags |= MSG_EOR;	return retval;}/* Helper function to reassemble chunks.  Hold chunks on the reasm queue that * need reassembling. */static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,						struct sctp_ulpevent *event){	struct sctp_ulpevent *retval = NULL;	/* Check if this is part of a fragmented message.  */	if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {		event->msg_flags |= MSG_EOR;		return event;	}	sctp_ulpq_store_reasm(ulpq, event);	if (!ulpq->pd_mode)		retval = sctp_ulpq_retrieve_reassembled(ulpq);	else {		__u32 ctsn, ctsnap;		/* Do not even bother unless this is the next tsn to		 * be delivered.		 */		ctsn = event->tsn;		ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);		if (TSN_lte(ctsn, ctsnap))			retval = sctp_ulpq_retrieve_partial(ulpq);	}	return retval;}/* Retrieve the first part (sequential fragments) for partial delivery.  */static inline struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq){	struct sk_buff *pos, *last_frag, *first_frag;	struct sctp_ulpevent *cevent;	__u32 ctsn, next_tsn;	struct sctp_ulpevent *retval;	/* The chunks are held in the reasm queue sorted by TSN.	 * Walk through the queue sequentially and look for a sequence of	 * fragmented chunks that start a datagram.	 */	if (skb_queue_empty(&ulpq->reasm))		return NULL;	last_frag = first_frag = NULL;	retval = NULL;	next_tsn = 0;	skb_queue_walk(&ulpq->reasm, pos) {		cevent = sctp_skb2event(pos);		ctsn = cevent->tsn;		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {		case SCTP_DATA_FIRST_FRAG:			if (!first_frag) {				first_frag = pos;				next_tsn = ctsn + 1;				last_frag = pos;			} else				goto done;			break;		case SCTP_DATA_MIDDLE_FRAG:			if (!first_frag)				return NULL;			if (ctsn == next_tsn) {				next_tsn++;				last_frag = pos;			} else				goto done;			break;		default:			return NULL;		};	}	/* We have the reassembled event. There is no need to look	 * further.	 */done:	retval = sctp_make_reassembled_event(first_frag, last_frag);	return retval;}/* Helper function to gather skbs that have possibly become * ordered by an an incoming chunk. */static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,					      struct sctp_ulpevent *event){	struct sk_buff *pos, *tmp;	struct sctp_ulpevent *cevent;	struct sctp_stream *in;	__u16 sid, csid;	__u16 ssn, cssn;	sid = event->stream;	ssn = event->ssn;	in  = &ulpq->asoc->ssnmap->in;	/* We are holding the chunks by stream, by SSN.  */	sctp_skb_for_each(pos, &ulpq->lobby, tmp) {		cevent = (struct sctp_ulpevent *) pos->cb;		csid = cevent->stream;		cssn = cevent->ssn;		/* Have we gone too far?  */		if (csid > sid)			break;		/* Have we not gone far enough?  */		if (csid < sid)			continue;		if (cssn != sctp_ssn_peek(in, sid))			break;		/* Found it, so mark in the ssnmap. */		sctp_ssn_next(in, sid);		__skb_unlink(pos, pos->list);		/* Attach all gathered skbs to the event.  */		__skb_queue_tail(sctp_event2skb(event)->list, pos);	}}/* Helper function to store chunks needing ordering.  */static inline void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,					   struct sctp_ulpevent *event){	struct sk_buff *pos;	struct sctp_ulpevent *cevent;	__u16 sid, csid;	__u16 ssn, cssn;	pos = skb_peek_tail(&ulpq->lobby);	if (!pos) {		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));		return;	}	sid = event->stream;	ssn = event->ssn;		cevent = (struct sctp_ulpevent *) pos->cb;	csid = cevent->stream;	cssn = cevent->ssn;	if (sid > csid) {		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));		return;	}	if ((sid == csid) && SSN_lt(cssn, ssn)) {		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));		return;	}	/* Find the right place in this list.  We store them by	 * stream ID and then by SSN.	 */	skb_queue_walk(&ulpq->lobby, pos) {		cevent = (struct sctp_ulpevent *) pos->cb;		csid = cevent->stream;		cssn = cevent->ssn;		if (csid > sid)			break;		if (csid == sid && SSN_lt(ssn, cssn))			break;	}	/* Insert before pos. */	__skb_insert(sctp_event2skb(event), pos->prev, pos, &ulpq->lobby);}static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,						struct sctp_ulpevent *event){	__u16 sid, ssn;	struct sctp_stream *in;	/* Check if this message needs ordering.  */	if (SCTP_DATA_UNORDERED & event->msg_flags)		return event;	/* Note: The stream ID must be verified before this routine.  */	sid = event->stream;	ssn = event->ssn;	in  = &ulpq->asoc->ssnmap->in;	/* Is this the expected SSN for this stream ID?  */	if (ssn != sctp_ssn_peek(in, sid)) {		/* We've received something out of order, so find where it		 * needs to be placed.  We order by stream and then by SSN.		 */		sctp_ulpq_store_ordered(ulpq, event);		return NULL;	}	/* Mark that the next chunk has been found.  */	sctp_ssn_next(in, sid);	/* Go find any other chunks that were waiting for	 * ordering.	 */	sctp_ulpq_retrieve_ordered(ulpq, event);	return event;}/* Helper function to gather skbs that have possibly become * ordered by forward tsn skipping their dependencies. */static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq){	struct sk_buff *pos, *tmp;	struct sctp_ulpevent *cevent;	struct sctp_ulpevent *event = NULL;	struct sctp_stream *in;	struct sk_buff_head temp;	__u16 csid, cssn;	in  = &ulpq->asoc->ssnmap->in;	/* We are holding the chunks by stream, by SSN.  */	sctp_skb_for_each(pos, &ulpq->lobby, tmp) {		cevent = (struct sctp_ulpevent *) pos->cb;		csid = cevent->stream;		cssn = cevent->ssn;		if (cssn != sctp_ssn_peek(in, csid))			break;		/* Found it, so mark in the ssnmap. */	       		sctp_ssn_next(in, csid);		__skb_unlink(pos, pos->list);		if (!event) {									/* Create a temporary list to collect chunks on.  */			event = sctp_skb2event(pos);			skb_queue_head_init(&temp);			__skb_queue_tail(&temp, sctp_event2skb(event));		} else {			/* Attach all gathered skbs to the event.  */			__skb_queue_tail(sctp_event2skb(event)->list, pos);		}	}	/* Send event to the ULP.  */	if (event)		sctp_ulpq_tail_event(ulpq, event);}/* Skip over an SSN. */void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn){	struct sctp_stream *in;	/* Note: The stream ID must be verified before this routine.  */	in  = &ulpq->asoc->ssnmap->in;	/* Is this an old SSN?  If so ignore. */	if (SSN_lt(ssn, sctp_ssn_peek(in, sid)))		return;	/* Mark that we are no longer expecting this SSN or lower. */	sctp_ssn_skip(in, sid, ssn);	/* Go find any other chunks that were waiting for	 * ordering and deliver them if needed. 	 */	sctp_ulpq_reap_ordered(ulpq);	return;}/* Renege 'needed' bytes from the ordering queue. */static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed){	__u16 freed = 0;	__u32 tsn;	struct sk_buff *skb;	struct sctp_ulpevent *event;	struct sctp_tsnmap *tsnmap;	tsnmap = &ulpq->asoc->peer.tsn_map;	while ((skb = __skb_dequeue_tail(&ulpq->lobby)) != NULL) {		freed += skb_headlen(skb);		event = sctp_skb2event(skb);		tsn = event->tsn;		sctp_ulpevent_free(event);		sctp_tsnmap_renege(tsnmap, tsn);		if (freed >= needed)			return freed;	}	return freed;}/* Renege 'needed' bytes from the reassembly queue. */static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed){	__u16 freed = 0;	__u32 tsn;	struct sk_buff *skb;	struct sctp_ulpevent *event;	struct sctp_tsnmap *tsnmap;	tsnmap = &ulpq->asoc->peer.tsn_map;	/* Walk backwards through the list, reneges the newest tsns. */	while ((skb = __skb_dequeue_tail(&ulpq->reasm)) != NULL) {		freed += skb_headlen(skb);		event = sctp_skb2event(skb);		tsn = event->tsn;		sctp_ulpevent_free(event);		sctp_tsnmap_renege(tsnmap, tsn);		if (freed >= needed)			return freed;	}	return freed;}/* Partial deliver the first message as there is pressure on rwnd. */void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,				struct sctp_chunk *chunk, int gfp){	struct sctp_ulpevent *event;	struct sctp_association *asoc;	asoc = ulpq->asoc;	/* Are we already in partial delivery mode?  */	if (!sctp_sk(asoc->base.sk)->pd_mode) {		/* Is partial delivery possible?  */		event = sctp_ulpq_retrieve_first(ulpq);		/* Send event to the ULP.   */		if (event) {			sctp_ulpq_tail_event(ulpq, event);			sctp_sk(asoc->base.sk)->pd_mode = 1;			ulpq->pd_mode = 1;			return;		}	}}/* Renege some packets to make room for an incoming chunk.  */void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,		      int gfp){	struct sctp_association *asoc;	__u16 needed, freed;	asoc = ulpq->asoc;	if (chunk) {		needed = ntohs(chunk->chunk_hdr->length);		needed -= sizeof(sctp_data_chunk_t);	} else 		needed = SCTP_DEFAULT_MAXWINDOW;	freed = 0;	if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {		freed = sctp_ulpq_renege_order(ulpq, needed);		if (freed < needed) {			freed += sctp_ulpq_renege_frags(ulpq, needed - freed);		}	}	/* If able to free enough room, accept this chunk. */	if (chunk && (freed >= needed)) {		__u32 tsn;		tsn = ntohl(chunk->subh.data_hdr->tsn);		sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn);		sctp_ulpq_tail_data(ulpq, chunk, gfp);				sctp_ulpq_partial_delivery(ulpq, chunk, gfp);	}	return;}/* Notify the application if an association is aborted and in * partial delivery mode.  Send up any pending received messages. */void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, int gfp){	struct sctp_ulpevent *ev = NULL;	struct sock *sk;	if (!ulpq->pd_mode)		return;	sk = ulpq->asoc->base.sk;	if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,				       &sctp_sk(sk)->subscribe))		ev = sctp_ulpevent_make_pdapi(ulpq->asoc,					      SCTP_PARTIAL_DELIVERY_ABORTED,					      gfp);	if (ev)		__skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));	/* If there is data waiting, send it up the socket now. */	if (sctp_ulpq_clear_pd(ulpq) || ev)		sk->sk_data_ready(sk, 0);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -