⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ehci-sched.c

📁 底层驱动开发
💻 C
📖 第 1 页 / 共 4 页
字号:
			return 0;		/* we know urb->interval is 2^N uframes */		uframe += period;	} while (uframe < mod);	return 1;}static inline intsitd_slot_ok (	struct ehci_hcd		*ehci,	u32			mod,	struct ehci_iso_stream	*stream,	u32			uframe,	struct ehci_iso_sched	*sched,	u32			period_uframes){	u32			mask, tmp;	u32			frame, uf;	mask = stream->raw_mask << (uframe & 7);	/* for IN, don't wrap CSPLIT into the next frame */	if (mask & ~0xffff)		return 0;	/* this multi-pass logic is simple, but performance may	 * suffer when the schedule data isn't cached.	 */	/* check bandwidth */	uframe %= period_uframes;	do {		u32		max_used;		frame = uframe >> 3;		uf = uframe & 7;		/* tt must be idle for start(s), any gap, and csplit.		 * assume scheduling slop leaves 10+% for control/bulk.		 */		if (!tt_no_collision (ehci, period_uframes << 3,				stream->udev, frame, mask))			return 0;		/* check starts (OUT uses more than one) */		max_used = 100 - stream->usecs;		for (tmp = stream->raw_mask & 0xff; tmp; tmp >>= 1, uf++) {			if (periodic_usecs (ehci, frame, uf) > max_used)				return 0;		}		/* for IN, check CSPLIT */		if (stream->c_usecs) {			max_used = 100 - stream->c_usecs;			do {				tmp = 1 << uf;				tmp <<= 8;				if ((stream->raw_mask & tmp) == 0)					continue;				if (periodic_usecs (ehci, frame, uf)						> max_used)					return 0;			} while (++uf < 8);		}		/* we know urb->interval is 2^N uframes */		uframe += period_uframes;	} while (uframe < mod);	stream->splits = cpu_to_le32(stream->raw_mask << (uframe & 7));	return 1;}/* * This scheduler plans almost as far into the future as it has actual * periodic schedule slots.  (Affected by TUNE_FLS, which defaults to * "as small as possible" to be cache-friendlier.)  That limits the size * transfers you can stream reliably; avoid more than 64 msec per urb. * Also avoid queue depths of less than ehci's worst irq latency (affected * by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter, * and other factors); or more than about 230 msec total (for portability, * given EHCI_TUNE_FLS and the slop).  Or, write a smarter scheduler! */#define SCHEDULE_SLOP	10	/* frames */static intiso_stream_schedule (	struct ehci_hcd		*ehci,	struct urb		*urb,	struct ehci_iso_stream	*stream){	u32			now, start, max, period;	int			status;	unsigned		mod = ehci->periodic_size << 3;	struct ehci_iso_sched	*sched = urb->hcpriv;	if (sched->span > (mod - 8 * SCHEDULE_SLOP)) {		ehci_dbg (ehci, "iso request %p too long\n", urb);		status = -EFBIG;		goto fail;	}	if ((stream->depth + sched->span) > mod) {		ehci_dbg (ehci, "request %p would overflow (%d+%d>%d)\n",			urb, stream->depth, sched->span, mod);		status = -EFBIG;		goto fail;	}	now = readl (&ehci->regs->frame_index) % mod;	/* when's the last uframe this urb could start? */	max = now + mod;	/* typical case: reuse current schedule. stream is still active,	 * and no gaps from host falling behind (irq delays etc)	 */	if (likely (!list_empty (&stream->td_list))) {		start = stream->next_uframe;		if (start < now)			start += mod;		if (likely ((start + sched->span) < max))			goto ready;		/* else fell behind; someday, try to reschedule */		status = -EL2NSYNC;		goto fail;	}	/* need to schedule; when's the next (u)frame we could start?	 * this is bigger than ehci->i_thresh allows; scheduling itself	 * isn't free, the slop should handle reasonably slow cpus.  it	 * can also help high bandwidth if the dma and irq loads don't	 * jump until after the queue is primed.	 */	start = SCHEDULE_SLOP * 8 + (now & ~0x07);	start %= mod;	stream->next_uframe = start;	/* NOTE:  assumes URB_ISO_ASAP, to limit complexity/bugs */	period = urb->interval;	if (!stream->highspeed)		period <<= 3;	/* find a uframe slot with enough bandwidth */	for (; start < (stream->next_uframe + period); start++) {		int		enough_space;		/* check schedule: enough space? */		if (stream->highspeed)			enough_space = itd_slot_ok (ehci, mod, start,					stream->usecs, period);		else {			if ((start % 8) >= 6)				continue;			enough_space = sitd_slot_ok (ehci, mod, stream,					start, sched, period);		}		/* schedule it here if there's enough bandwidth */		if (enough_space) {			stream->next_uframe = start % mod;			goto ready;		}	}	/* no room in the schedule */	ehci_dbg (ehci, "iso %ssched full %p (now %d max %d)\n",		list_empty (&stream->td_list) ? "" : "re",		urb, now, max);	status = -ENOSPC;fail:	iso_sched_free (stream, sched);	urb->hcpriv = NULL;	return status;ready:	/* report high speed start in uframes; full speed, in frames */	urb->start_frame = stream->next_uframe;	if (!stream->highspeed)		urb->start_frame >>= 3;	return 0;}/*-------------------------------------------------------------------------*/static inline voiditd_init (struct ehci_iso_stream *stream, struct ehci_itd *itd){	int i;	/* it's been recently zeroed */	itd->hw_next = EHCI_LIST_END;	itd->hw_bufp [0] = stream->buf0;	itd->hw_bufp [1] = stream->buf1;	itd->hw_bufp [2] = stream->buf2;	for (i = 0; i < 8; i++)		itd->index[i] = -1;	/* All other fields are filled when scheduling */}static inline voiditd_patch (	struct ehci_itd		*itd,	struct ehci_iso_sched	*iso_sched,	unsigned		index,	u16			uframe){	struct ehci_iso_packet	*uf = &iso_sched->packet [index];	unsigned		pg = itd->pg;	// BUG_ON (pg == 6 && uf->cross);	uframe &= 0x07;	itd->index [uframe] = index;	itd->hw_transaction [uframe] = uf->transaction;	itd->hw_transaction [uframe] |= cpu_to_le32 (pg << 12);	itd->hw_bufp [pg] |= cpu_to_le32 (uf->bufp & ~(u32)0);	itd->hw_bufp_hi [pg] |= cpu_to_le32 ((u32)(uf->bufp >> 32));	/* iso_frame_desc[].offset must be strictly increasing */	if (unlikely (uf->cross)) {		u64	bufp = uf->bufp + 4096;		itd->pg = ++pg;		itd->hw_bufp [pg] |= cpu_to_le32 (bufp & ~(u32)0);		itd->hw_bufp_hi [pg] |= cpu_to_le32 ((u32)(bufp >> 32));	}}static inline voiditd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd){	/* always prepend ITD/SITD ... only QH tree is order-sensitive */	itd->itd_next = ehci->pshadow [frame];	itd->hw_next = ehci->periodic [frame];	ehci->pshadow [frame].itd = itd;	itd->frame = frame;	wmb ();	ehci->periodic [frame] = cpu_to_le32 (itd->itd_dma) | Q_TYPE_ITD;}/* fit urb's itds into the selected schedule slot; activate as needed */static intitd_link_urb (	struct ehci_hcd		*ehci,	struct urb		*urb,	unsigned		mod,	struct ehci_iso_stream	*stream){	int			packet;	unsigned		next_uframe, uframe, frame;	struct ehci_iso_sched	*iso_sched = urb->hcpriv;	struct ehci_itd		*itd;	next_uframe = stream->next_uframe % mod;	if (unlikely (list_empty(&stream->td_list))) {		ehci_to_hcd(ehci)->self.bandwidth_allocated				+= stream->bandwidth;		ehci_vdbg (ehci,			"schedule devp %s ep%d%s-iso period %d start %d.%d\n",			urb->dev->devpath, stream->bEndpointAddress & 0x0f,			(stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out",			urb->interval,			next_uframe >> 3, next_uframe & 0x7);		stream->start = jiffies;	}	ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;	/* fill iTDs uframe by uframe */	for (packet = 0, itd = NULL; packet < urb->number_of_packets; ) {		if (itd == NULL) {			/* ASSERT:  we have all necessary itds */			// BUG_ON (list_empty (&iso_sched->td_list));			/* ASSERT:  no itds for this endpoint in this uframe */			itd = list_entry (iso_sched->td_list.next,					struct ehci_itd, itd_list);			list_move_tail (&itd->itd_list, &stream->td_list);			itd->stream = iso_stream_get (stream);			itd->urb = usb_get_urb (urb);			itd_init (stream, itd);		}		uframe = next_uframe & 0x07;		frame = next_uframe >> 3;		itd->usecs [uframe] = stream->usecs;		itd_patch (itd, iso_sched, packet, uframe);		next_uframe += stream->interval;		stream->depth += stream->interval;		next_uframe %= mod;		packet++;		/* link completed itds into the schedule */		if (((next_uframe >> 3) != frame)				|| packet == urb->number_of_packets) {			itd_link (ehci, frame % ehci->periodic_size, itd);			itd = NULL;		}	}	stream->next_uframe = next_uframe;	/* don't need that schedule data any more */	iso_sched_free (stream, iso_sched);	urb->hcpriv = NULL;	timer_action (ehci, TIMER_IO_WATCHDOG);	if (unlikely (!ehci->periodic_sched++))		return enable_periodic (ehci);	return 0;}#define	ISO_ERRS (EHCI_ISOC_BUF_ERR | EHCI_ISOC_BABBLE | EHCI_ISOC_XACTERR)static unsigneditd_complete (	struct ehci_hcd	*ehci,	struct ehci_itd	*itd,	struct pt_regs	*regs) {	struct urb				*urb = itd->urb;	struct usb_iso_packet_descriptor	*desc;	u32					t;	unsigned				uframe;	int					urb_index = -1;	struct ehci_iso_stream			*stream = itd->stream;	struct usb_device			*dev;	/* for each uframe with a packet */	for (uframe = 0; uframe < 8; uframe++) {		if (likely (itd->index[uframe] == -1))			continue;		urb_index = itd->index[uframe];		desc = &urb->iso_frame_desc [urb_index];		t = le32_to_cpup (&itd->hw_transaction [uframe]);		itd->hw_transaction [uframe] = 0;		stream->depth -= stream->interval;		/* report transfer status */		if (unlikely (t & ISO_ERRS)) {			urb->error_count++;			if (t & EHCI_ISOC_BUF_ERR)				desc->status = usb_pipein (urb->pipe)					? -ENOSR  /* hc couldn't read */					: -ECOMM; /* hc couldn't write */			else if (t & EHCI_ISOC_BABBLE)				desc->status = -EOVERFLOW;			else /* (t & EHCI_ISOC_XACTERR) */				desc->status = -EPROTO;			/* HC need not update length with this error */			if (!(t & EHCI_ISOC_BABBLE))				desc->actual_length = EHCI_ITD_LENGTH (t);		} else if (likely ((t & EHCI_ISOC_ACTIVE) == 0)) {			desc->status = 0;			desc->actual_length = EHCI_ITD_LENGTH (t);		}	}	usb_put_urb (urb);	itd->urb = NULL;	itd->stream = NULL;	list_move (&itd->itd_list, &stream->free_list);	iso_stream_put (ehci, stream);	/* handle completion now? */	if (likely ((urb_index + 1) != urb->number_of_packets))		return 0;	/* ASSERT: it's really the last itd for this urb	list_for_each_entry (itd, &stream->td_list, itd_list)		BUG_ON (itd->urb == urb);	 */	/* give urb back to the driver ... can be out-of-order */	dev = usb_get_dev (urb->dev);	ehci_urb_done (ehci, urb, regs);	urb = NULL;	/* defer stopping schedule; completion can submit */	ehci->periodic_sched--;	if (unlikely (!ehci->periodic_sched))		(void) disable_periodic (ehci);	ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;	if (unlikely (list_empty (&stream->td_list))) {		ehci_to_hcd(ehci)->self.bandwidth_allocated				-= stream->bandwidth;		ehci_vdbg (ehci,			"deschedule devp %s ep%d%s-iso\n",			dev->devpath, stream->bEndpointAddress & 0x0f,			(stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");	}	iso_stream_put (ehci, stream);	usb_put_dev (dev);	return 1;}/*-------------------------------------------------------------------------*/static int itd_submit (struct ehci_hcd *ehci, struct urb *urb,	unsigned mem_flags){	int			status = -EINVAL;	unsigned long		flags;	struct ehci_iso_stream	*stream;	/* Get iso_stream head */	stream = iso_stream_find (ehci, urb);	if (unlikely (stream == NULL)) {		ehci_dbg (ehci, "can't get iso stream\n");		return -ENOMEM;	}	if (unlikely (urb->interval != stream->interval)) {		ehci_dbg (ehci, "can't change iso interval %d --> %d\n",			stream->interval, urb->interval);		goto done;	}#ifdef EHCI_URB_TRACE	ehci_dbg (ehci,		"%s %s urb %p ep%d%s len %d, %d pkts %d uframes [%p]\n",		__FUNCTION__, urb->dev->devpath, urb,		usb_pipeendpoint (urb->pipe),		usb_pipein (urb->pipe) ? "in" : "out",		urb->transfer_buffer_length,		urb->number_of_packets, urb->interval,		stream);#endif	/* allocate ITDs w/o locking anything */	status = itd_urb_transaction (stream, ehci, urb, mem_flags);	if (unlikely (status < 0)) {		ehci_dbg (ehci, "can't init itds\n");		goto done;	}	/* schedule ... need to lock */	spin_lock_irqsave (&ehci->lock, flags);	status = iso_stream_schedule (ehci, urb, stream); 	if (likely (status == 0))		itd_link_urb (ehci, urb, ehci->periodic_size << 3, stream);	spin_unlock_irqrestore (&ehci->lock, flags);done:	if (unlikely (status < 0))		iso_stream_put (ehci, stream);	return status;}#ifdef CONFIG_USB_EHCI_SPLIT_ISO/*-------------------------------------------------------------------------*//* * "Split ISO TDs" ... used for USB 1.1 devices going through the * TTs in USB 2.0 hubs.  These need microframe scheduling. */static inline voidsitd_sched_init (	struct ehci_iso_sched	*iso_sched,	struct ehci_iso_stream	*stream,	struct urb		*urb){	unsigned	i;	dma_addr_t	dma = urb->transfer_dma;	/* how many frames are needed for these transfers */	iso_sched->span = urb->number_of_packets * stream->interval;	/* figure out per-frame sitd fields that we'll need later	 * when we fit new sitds into the schedule.	 */	for (i = 0; i < urb->number_of_packets; i++) {		struct ehci_iso_packet	*packet = &iso_sched->packet [i];		unsigned		length;		dma_addr_t		buf;		u32			trans;		length = urb->iso_frame_desc [i].length & 0x03ff;		buf = dma + urb->iso_frame_desc [i].offset;		trans = SITD_STS_ACTIVE;		if (((i + 1) == urb->number_of_packets)				&& !(urb->transfer_flags & URB_NO_INTERRUPT))

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -