ehci-sched.c

来自「linux-2.4.29操作系统的源码」· C语言 代码 · 共 1,350 行 · 第 1/3 页

C
1,350
字号
	struct ehci_qh		*qh;	struct hcd_dev		*dev;	int			is_input;	int			status = 0;	struct list_head	empty;	/* get endpoint and transfer/schedule data */	epnum = usb_pipeendpoint (urb->pipe);	is_input = usb_pipein (urb->pipe);	if (is_input)		epnum |= 0x10;	spin_lock_irqsave (&ehci->lock, flags);	dev = (struct hcd_dev *)urb->dev->hcpriv;	/* get qh and force any scheduling errors */	INIT_LIST_HEAD (&empty);	qh = qh_append_tds (ehci, urb, &empty, epnum, &dev->ep [epnum]);	if (qh == 0) {		status = -ENOMEM;		goto done;	}	if (qh->qh_state == QH_STATE_IDLE) {		if ((status = qh_schedule (ehci, qh)) != 0)			goto done;	}	/* then queue the urb's tds to the qh */	qh = qh_append_tds (ehci, urb, qtd_list, epnum, &dev->ep [epnum]);	BUG_ON (qh == 0);	/* ... update usbfs periodic stats */	hcd_to_bus (&ehci->hcd)->bandwidth_int_reqs++;done:	spin_unlock_irqrestore (&ehci->lock, flags);	if (status)		qtd_list_free (ehci, urb, qtd_list);	return status;}/*-------------------------------------------------------------------------*/static inline struct ehci_iso_stream *iso_stream_alloc (int mem_flags){	struct ehci_iso_stream *stream;	stream = kmalloc(sizeof *stream, mem_flags);	if (likely (stream != 0)) {		memset (stream, 0, sizeof(*stream));		INIT_LIST_HEAD(&stream->itd_list);		INIT_LIST_HEAD(&stream->free_itd_list);		stream->next_uframe = -1;		stream->refcount = 1;	}	return stream;}static inline voidiso_stream_init (	struct ehci_iso_stream	*stream,	struct usb_device	*dev,	int			pipe,	unsigned		interval){	u32			buf1;	unsigned		epnum, maxp, multi;	int			is_input;	long			bandwidth;	/*	 * this might be a "high bandwidth" highspeed endpoint,	 * as encoded in the ep descriptor's wMaxPacket field	 */	epnum = usb_pipeendpoint (pipe);	is_input = usb_pipein (pipe) ? USB_DIR_IN : 0;	if (is_input) {		maxp = dev->epmaxpacketin [epnum];		buf1 = (1 << 11);	} else {		maxp = dev->epmaxpacketout [epnum];		buf1 = 0;	}	multi = hb_mult(maxp);	maxp = max_packet(maxp);	buf1 |= maxp;	maxp *= multi;	stream->dev = (struct hcd_dev *)dev->hcpriv;	stream->bEndpointAddress = is_input | epnum;	stream->interval = interval;	stream->maxp = maxp;	stream->buf0 = cpu_to_le32 ((epnum << 8) | dev->devnum);	stream->buf1 = cpu_to_le32 (buf1);	stream->buf2 = cpu_to_le32 (multi);	/* usbfs wants to report the average usecs per frame tied up	 * when transfers on this endpoint are scheduled ...	 */	stream->usecs = HS_USECS_ISO (maxp);	bandwidth = stream->usecs * 8;	bandwidth /= 1 << (interval - 1);	stream->bandwidth = bandwidth;}static voidiso_stream_put(struct ehci_hcd *ehci, struct ehci_iso_stream *stream){	stream->refcount--;	/* free whenever just a dev->ep reference remains.	 * not like a QH -- no persistent state (toggle, halt)	 */	if (stream->refcount == 1) {		int is_in;		// BUG_ON (!list_empty(&stream->itd_list));		while (!list_empty (&stream->free_itd_list)) {			struct ehci_itd	*itd;			itd = list_entry (stream->free_itd_list.next,				struct ehci_itd, itd_list);			list_del (&itd->itd_list);			pci_pool_free (ehci->itd_pool, itd, itd->itd_dma);		}		is_in = (stream->bEndpointAddress & USB_DIR_IN) ? 0x10 : 0;		stream->bEndpointAddress &= 0x0f;		stream->dev->ep [is_in + stream->bEndpointAddress] = 0;		if (stream->rescheduled) {			ehci_info (ehci, "ep%d%s-iso rescheduled "				"%lu times in %lu seconds\n",				stream->bEndpointAddress, is_in ? "in" : "out",				stream->rescheduled,				((jiffies - stream->start)/HZ)				);		}		kfree(stream);	}}static inline struct ehci_iso_stream *iso_stream_get (struct ehci_iso_stream *stream){	if (likely (stream != 0))		stream->refcount++;	return stream;}static struct ehci_iso_stream *iso_stream_find (struct ehci_hcd *ehci, struct urb *urb){	unsigned		epnum;	struct hcd_dev		*dev;	struct ehci_iso_stream	*stream;	unsigned long		flags;	epnum = usb_pipeendpoint (urb->pipe);	if (usb_pipein(urb->pipe))		epnum += 0x10;	spin_lock_irqsave (&ehci->lock, flags);	dev = (struct hcd_dev *)urb->dev->hcpriv;	stream = dev->ep [epnum];	if (unlikely (stream == 0)) {		stream = iso_stream_alloc(GFP_ATOMIC);		if (likely (stream != 0)) {			/* dev->ep owns the initial refcount */			dev->ep[epnum] = stream;			iso_stream_init(stream, urb->dev, urb->pipe,					urb->interval);		}	/* if dev->ep [epnum] is a QH, info1.maxpacket is nonzero */	} else if (unlikely (stream->hw_info1 != 0)) {		ehci_dbg (ehci, "dev %s ep%d%s, not iso??\n",			urb->dev->devpath, epnum & 0x0f,			(epnum & 0x10) ? "in" : "out");		stream = 0;	}	/* caller guarantees an eventual matching iso_stream_put */	stream = iso_stream_get (stream);	spin_unlock_irqrestore (&ehci->lock, flags);	return stream;}/*-------------------------------------------------------------------------*/static inline struct ehci_itd_sched *itd_sched_alloc (unsigned packets, int mem_flags){	struct ehci_itd_sched	*itd_sched;	int			size = sizeof *itd_sched;	size += packets * sizeof (struct ehci_iso_uframe);	itd_sched = kmalloc (size, mem_flags);	if (likely (itd_sched != 0)) {		memset(itd_sched, 0, size);		INIT_LIST_HEAD (&itd_sched->itd_list);	}	return itd_sched;}static intitd_sched_init (	struct ehci_itd_sched	*itd_sched,	struct ehci_iso_stream	*stream,	struct urb		*urb){	unsigned	i;	dma_addr_t	dma = urb->transfer_dma;	/* how many uframes are needed for these transfers */	itd_sched->span = urb->number_of_packets * stream->interval;	/* figure out per-uframe itd fields that we'll need later	 * when we fit new itds into the schedule.	 */	for (i = 0; i < urb->number_of_packets; i++) {		struct ehci_iso_uframe	*uframe = &itd_sched->packet [i];		unsigned		length;		dma_addr_t		buf;		u32			trans;		length = urb->iso_frame_desc [i].length;		buf = dma + urb->iso_frame_desc [i].offset;		trans = EHCI_ISOC_ACTIVE;		trans |= buf & 0x0fff;		if (unlikely (((i + 1) == urb->number_of_packets))				&& !(urb->transfer_flags & URB_NO_INTERRUPT))			trans |= EHCI_ITD_IOC;		trans |= length << 16;		uframe->transaction = cpu_to_le32 (trans);		/* might need to cross a buffer page within a td */		uframe->bufp = (buf & ~(u64)0x0fff);		buf += length;		if (unlikely ((uframe->bufp != (buf & ~(u64)0x0fff))))			uframe->cross = 1;	}	return 0;}static voiditd_sched_free (	struct ehci_iso_stream	*stream,	struct ehci_itd_sched	*itd_sched){	list_splice (&itd_sched->itd_list, &stream->free_itd_list);	kfree (itd_sched);}static intitd_urb_transaction (	struct ehci_iso_stream	*stream,	struct ehci_hcd		*ehci,	struct urb		*urb,	int			mem_flags){	struct ehci_itd		*itd;	int			status;	dma_addr_t		itd_dma;	int			i;	unsigned		num_itds;	struct ehci_itd_sched	*itd_sched;	itd_sched = itd_sched_alloc (urb->number_of_packets, mem_flags);	if (unlikely (itd_sched == 0))		return -ENOMEM;	status = itd_sched_init (itd_sched, stream, urb);	if (unlikely (status != 0))  {		itd_sched_free (stream, itd_sched);		return status;	}	if (urb->interval < 8)		num_itds = 1 + (itd_sched->span + 7) / 8;	else		num_itds = urb->number_of_packets;	/* allocate/init ITDs */	for (i = 0; i < num_itds; i++) {		/* free_itd_list.next might be cache-hot ... but maybe		 * the HC caches it too. avoid that issue for now.		 */		/* prefer previously-allocated itds */		if (likely (!list_empty(&stream->free_itd_list))) {			itd = list_entry (stream->free_itd_list.prev,					 struct ehci_itd, itd_list);			list_del (&itd->itd_list);			itd_dma = itd->itd_dma;		} else			itd = pci_pool_alloc (ehci->itd_pool, mem_flags,					&itd_dma);		if (unlikely (0 == itd)) {			itd_sched_free (stream, itd_sched);			return -ENOMEM;		}		memset (itd, 0, sizeof *itd);		itd->itd_dma = itd_dma;		list_add (&itd->itd_list, &itd_sched->itd_list);	}	/* temporarily store schedule info in hcpriv */	urb->hcpriv = itd_sched;	urb->error_count = 0;	return 0;}/* * This scheduler plans almost as far into the future as it has actual * periodic schedule slots.  (Affected by TUNE_FLS, which defaults to * "as small as possible" to be cache-friendlier.)  That limits the size * transfers you can stream reliably; avoid more than 64 msec per urb. * Also avoid queue depths of less than ehci's worst irq latency (affected * by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter, * and other factors); or more than about 230 msec total (for portability, * given EHCI_TUNE_FLS and the slop).  Or, write a smarter scheduler! */#define SCHEDULE_SLOP	10	/* frames */static intitd_stream_schedule (	struct ehci_hcd		*ehci,	struct urb		*urb,	struct ehci_iso_stream	*stream){	u32			now, start, end, max;	int			status;	unsigned		mod = ehci->periodic_size << 3;	struct ehci_itd_sched	*itd_sched = urb->hcpriv;	if (unlikely (itd_sched->span > (mod - 8 * SCHEDULE_SLOP))) {		ehci_dbg (ehci, "iso request %p too long\n", urb);		status = -EFBIG;		goto fail;	}	now = readl (&ehci->regs->frame_index) % mod;	/* when's the last uframe this urb could start? */	max = now + mod;	max -= itd_sched->span;	max -= 8 * SCHEDULE_SLOP;	/* typical case: reuse current schedule. stream is still active,	 * and no gaps from host falling behind (irq delays etc)	 */	if (likely (!list_empty (&stream->itd_list))) {		start = stream->next_uframe;		if (start < now)			start += mod;		if (likely (start < max))			goto ready;		/* two cases:		 * (a) we missed some uframes ... can reschedule		 * (b) trying to overcommit the schedule		 * FIXME (b) should be a hard failure		 */	}	/* need to schedule; when's the next (u)frame we could start?	 * this is bigger than ehci->i_thresh allows; scheduling itself	 * isn't free, the slop should handle reasonably slow cpus.  it	 * can also help high bandwidth if the dma and irq loads don't	 * jump until after the queue is primed.	 */	start = SCHEDULE_SLOP * 8 + (now & ~0x07);	end = start;	ehci_vdbg (ehci, "%s schedule from %d (%d..%d), was %d\n",			__FUNCTION__, now, start, max,			stream->next_uframe);	/* NOTE:  assumes URB_ISO_ASAP, to limit complexity/bugs */	if (likely (max > (start + urb->interval)))		max = start + urb->interval;	/* hack:  account for itds already scheduled to this endpoint */	if (unlikely (list_empty (&stream->itd_list)))		end = max;	/* within [start..max] find a uframe slot with enough bandwidth */	end %= mod;	do {		unsigned	uframe;		int		enough_space = 1;		/* check schedule: enough space? */		uframe = start;		do {			uframe %= mod;			/* can't commit more than 80% periodic == 100 usec */			if (periodic_usecs (ehci, uframe >> 3, uframe & 0x7)					> (100 - stream->usecs)) {				enough_space = 0;				break;			}			/* we know urb->interval is 2^N uframes */			uframe += urb->interval;		} while (uframe != end);		/* (re)schedule it here if there's enough bandwidth */		if (enough_space) {			start %= mod;			if (unlikely (!list_empty (&stream->itd_list))) {				/* host fell behind ... maybe irq latencies				 * delayed this request queue for too long.				 */				stream->rescheduled++;				pr_debug ("ehci %s devpath %d "					"iso%d%s %d.%d skip %d.%d\n",					ehci->pdev->slot_name,					urb->dev->devpath,					stream->bEndpointAddress & 0x0f,					(stream->bEndpointAddress & USB_DIR_IN)						? "in" : "out",					stream->next_uframe >> 3,					stream->next_uframe & 0x7,					start >> 3, start & 0x7);			}			stream->next_uframe = start;

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?