⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ehci-sched.c

📁 linux客户机函数定义的实际例子
💻 C
📖 第 1 页 / 共 3 页
字号:

				/* If this is a split transaction, check the
				 * bandwidth available for the completion
				 * too.  check both best and worst case gaps:
				 * worst case is SPLIT near uframe end, and
				 * CSPLIT near start ... best is vice versa.
				 * Difference can be almost two uframe times.
				 *
				 * FIXME don't even bother unless we know
				 * this TT is idle in that uframe ... right
				 * now we know only one interrupt transfer
				 * will be scheduled per frame, so we don't
				 * need to update/check TT state when we
				 * schedule a split (QH, SITD, or FSTN).
				 *
				 * FIXME ehci 0.96 and above can use FSTNs
				 */
				if (!c_usecs)
				    	break;
				if (check_period (ehci, frame,
						uframe + gap_uf,
						period, c_usecs) == 0)
					continue;
				if (check_period (ehci, frame,
						uframe + gap_uf + 1,
						period, c_usecs) == 0)
					continue;

				c_mask = 0x03 << (8 + uframe + gap_uf);
				c_mask = cpu_to_le32 (c_mask);
				break;
			}
			if (uframe == 8)
				continue;

			/* QH will run once each period, starting there  */
			urb->start_frame = frame;
			status = 0;

			/* reset S-frame and (maybe) C-frame masks */
			qh->hw_info2 &= ~0xffff;
			qh->hw_info2 |= cpu_to_le32 (1 << uframe) | c_mask;
			// dbg_qh ("Schedule INTR qh", ehci, qh);

			/* stuff into the periodic schedule */
			qh->qh_state = QH_STATE_LINKED;
			vdbg ("qh %p usecs %d period %d.0 starting %d.%d",
				qh, qh->usecs, period, frame, uframe);
			do {
				if (unlikely (ehci->pshadow [frame].ptr != 0)) {
// FIXME -- just link toward the end, before any qh with a shorter period,
// AND handle it already being (implicitly) linked into this frame
// AS WELL AS updating the check_period() logic
					BUG ();
				} else {
					ehci->pshadow [frame].qh = qh_get (qh);
					ehci->periodic [frame] =
						QH_NEXT (qh->qh_dma);
				}
				wmb ();
				frame += period;
			} while (frame < ehci->periodic_size);

			/* update bandwidth utilization records (for usbfs) */
			usb_claim_bandwidth (urb->dev, urb,
				(usecs + c_usecs) / period, 0);

			/* maybe enable periodic schedule processing */
			if (!ehci->periodic_urbs++)
				enable_periodic (ehci);
			break;

		} while (frame);
	}
	spin_unlock_irqrestore (&ehci->lock, flags);
done:
	if (status)
		qtd_list_free (ehci, urb, qtd_list);

	return status;
}

static unsigned long
intr_complete (
	struct ehci_hcd	*ehci,
	unsigned	frame,
	struct ehci_qh	*qh,
	unsigned long	flags		/* caller owns ehci->lock ... */
) {
	struct ehci_qtd	*qtd;
	struct urb	*urb;
	int		unlinking;

	/* nothing to report? */
	if (likely ((qh->hw_token & __constant_cpu_to_le32 (QTD_STS_ACTIVE))
			!= 0))
		return flags;
	if (unlikely (list_empty (&qh->qtd_list))) {
		dbg ("intr qh %p no TDs?", qh);
		return flags;
	}
	
	qtd = list_entry (qh->qtd_list.next, struct ehci_qtd, qtd_list);
	urb = qtd->urb;
	unlinking = (urb->status == -ENOENT) || (urb->status == -ECONNRESET);

	/* call any completions, after patching for reactivation */
	spin_unlock_irqrestore (&ehci->lock, flags);
	/* NOTE:  currently restricted to one qtd per qh! */
	if (qh_completions (ehci, qh, 0) == 0)
		urb = 0;
	spin_lock_irqsave (&ehci->lock, flags);

	/* never reactivate requests that were unlinked ... */
	if (likely (urb != 0)) {
		if (unlinking
				|| urb->status == -ECONNRESET
				|| urb->status == -ENOENT
				// || (urb->dev == null)
				|| ehci->hcd.state == USB_STATE_HALT)
			urb = 0;
		// FIXME look at all those unlink cases ... we always
		// need exactly one completion that reports unlink.
		// the one above might not have been it!
	}

	/* normally reactivate */
	if (likely (urb != 0)) {
		if (usb_pipeout (urb->pipe))
			pci_dma_sync_single (ehci->hcd.pdev,
				qtd->buf_dma,
				urb->transfer_buffer_length,
				PCI_DMA_TODEVICE);
		urb->status = -EINPROGRESS;
		urb->actual_length = 0;

		/* patch qh and restart */
		qh_update (qh, qtd);
	}
	return flags;
}

/*-------------------------------------------------------------------------*/

static void
itd_free_list (struct ehci_hcd *ehci, struct urb *urb)
{
	struct ehci_itd *first_itd = urb->hcpriv;

	pci_unmap_single (ehci->hcd.pdev,
		first_itd->buf_dma, urb->transfer_buffer_length,
		usb_pipein (urb->pipe)
		    ? PCI_DMA_FROMDEVICE
		    : PCI_DMA_TODEVICE);
	while (!list_empty (&first_itd->itd_list)) {
		struct ehci_itd	*itd;

		itd = list_entry (
			first_itd->itd_list.next,
			struct ehci_itd, itd_list);
		list_del (&itd->itd_list);
		pci_pool_free (ehci->itd_pool, itd, itd->itd_dma);
	}
	pci_pool_free (ehci->itd_pool, first_itd, first_itd->itd_dma);
	urb->hcpriv = 0;
}

static int
itd_fill (
	struct ehci_hcd	*ehci,
	struct ehci_itd	*itd,
	struct urb	*urb,
	unsigned	index,		// urb->iso_frame_desc [index]
	dma_addr_t	dma		// mapped transfer buffer
) {
	u64		temp;
	u32		buf1;
	unsigned	i, epnum, maxp, multi;
	unsigned	length;
	int		is_input;

	itd->hw_next = EHCI_LIST_END;
	itd->urb = urb;
	itd->index = index;

	/* tell itd about its transfer buffer, max 2 pages */
	length = urb->iso_frame_desc [index].length;
	dma += urb->iso_frame_desc [index].offset;
	temp = dma & ~0x0fff;
	for (i = 0; i < 2; i++) {
		itd->hw_bufp [i] = cpu_to_le32 ((u32) temp);
		itd->hw_bufp_hi [i] = cpu_to_le32 ((u32)(temp >> 32));
		temp += 0x1000;
	}
	itd->buf_dma = dma;

	/*
	 * this might be a "high bandwidth" highspeed endpoint,
	 * as encoded in the ep descriptor's maxpacket field
	 */
	epnum = usb_pipeendpoint (urb->pipe);
	is_input = usb_pipein (urb->pipe);
	if (is_input) {
		maxp = urb->dev->epmaxpacketin [epnum];
		buf1 = (1 << 11);
	} else {
		maxp = urb->dev->epmaxpacketout [epnum];
		buf1 = 0;
	}
	buf1 |= (maxp & 0x03ff);
	multi = 1;
	multi += (maxp >> 11) & 0x03;
	maxp &= 0x03ff;
	maxp *= multi;

	/* transfer can't fit in any uframe? */ 
	if (length < 0 || maxp < length) {
		dbg ("BAD iso packet: %d bytes, max %d, urb %p [%d] (of %d)",
			length, maxp, urb, index,
			urb->iso_frame_desc [index].length);
		return -ENOSPC;
	}
	itd->usecs = usb_calc_bus_time (USB_SPEED_HIGH, is_input, 1, length);

	/* "plus" info in low order bits of buffer pointers */
	itd->hw_bufp [0] |= cpu_to_le32 ((epnum << 8) | urb->dev->devnum);
	itd->hw_bufp [1] |= cpu_to_le32 (buf1);
	itd->hw_bufp [2] |= cpu_to_le32 (multi);

	/* figure hw_transaction[] value (it's scheduled later) */
	itd->transaction = EHCI_ISOC_ACTIVE;
	itd->transaction |= dma & 0x0fff;		/* offset; buffer=0 */
	if ((index + 1) == urb->number_of_packets)
		itd->transaction |= EHCI_ITD_IOC; 	/* end-of-urb irq */
	itd->transaction |= length << 16;
	cpu_to_le32s (&itd->transaction);

	return 0;
}

static int
itd_urb_transaction (
	struct ehci_hcd		*ehci,
	struct urb		*urb,
	int			mem_flags
) {
	int			frame_index;
	struct ehci_itd		*first_itd, *itd;
	int			status;
	dma_addr_t		buf_dma, itd_dma;

	/* set up one dma mapping for this urb */
	buf_dma = pci_map_single (ehci->hcd.pdev,
		urb->transfer_buffer, urb->transfer_buffer_length,
		usb_pipein (urb->pipe)
		    ? PCI_DMA_FROMDEVICE
		    : PCI_DMA_TODEVICE);
	if (buf_dma == 0)
		return -ENOMEM;

	/* allocate/init ITDs */
	for (frame_index = 0, first_itd = 0;
			frame_index < urb->number_of_packets;
			frame_index++) {
		itd = pci_pool_alloc (ehci->itd_pool, mem_flags, &itd_dma);
		if (!itd) {
			status = -ENOMEM;
			goto fail;
		}
		memset (itd, 0, sizeof *itd);
		itd->itd_dma = itd_dma;

		status = itd_fill (ehci, itd, urb, frame_index, buf_dma);
		if (status != 0)
			goto fail;

		if (first_itd)
			list_add_tail (&itd->itd_list,
					&first_itd->itd_list);
		else {
			INIT_LIST_HEAD (&itd->itd_list);
			urb->hcpriv = first_itd = itd;
		}
	}
	urb->error_count = 0;
	return 0;

fail:
	if (urb->hcpriv)
		itd_free_list (ehci, urb);
	return status;
}

/*-------------------------------------------------------------------------*/

static inline void
itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
{
	/* always prepend ITD/SITD ... only QH tree is order-sensitive */
	itd->itd_next = ehci->pshadow [frame];
	itd->hw_next = ehci->periodic [frame];
	ehci->pshadow [frame].itd = itd;
	ehci->periodic [frame] = cpu_to_le32 (itd->itd_dma) | Q_TYPE_ITD;
}

/*
 * return zero on success, else -errno
 * - start holds first uframe to start scheduling into
 * - max is the first uframe it's NOT (!) OK to start scheduling into
 * math to be done modulo "mod" (ehci->periodic_size << 3)
 */
static int get_iso_range (
	struct ehci_hcd		*ehci,
	struct urb		*urb,
	unsigned		*start,
	unsigned		*max,
	unsigned		mod
) {
	struct list_head	*lh;
	struct hcd_dev		*dev = urb->dev->hcpriv;
	int			last = -1;
	unsigned		now, span, end;

	span = urb->interval * urb->number_of_packets;

	/* first see if we know when the next transfer SHOULD happen */
	list_for_each (lh, &dev->urb_list) {
		struct urb	*u;
		struct ehci_itd	*itd;
		unsigned	s;

		u = list_entry (lh, struct urb, urb_list);
		if (u == urb || u->pipe != urb->pipe)
			continue;
		if (u->interval != urb->interval) {	/* must not change! */ 
			dbg ("urb %p interval %d ... != %p interval %d",
				u, u->interval, urb, urb->interval);
			return -EINVAL;
		}
		
		/* URB for this endpoint... covers through when?  */
		itd = urb->hcpriv;
		s = itd->uframe + u->interval * u->number_of_packets;
		if (last < 0)
			last = s;
		else {
			/*
			 * So far we can only queue two ISO URBs...
			 *
			 * FIXME do interval math, figure out whether
			 * this URB is "before" or not ... also, handle
			 * the case where the URB might have completed,
			 * but hasn't yet been processed.
			 */
			dbg ("NYET: queue >2 URBs per ISO endpoint");
			return -EDOM;
		}
	}

	/* calculate the legal range [start,max) */
	now = readl (&ehci->regs->frame_index) + 1;	/* next uframe */
	if (!ehci->periodic_urbs)
		now += 8;				/* startup delay */
	now %= mod;
	end = now + mod;
	if (last < 0) {
		*start = now + ehci->i_thresh + /* paranoia */ 1;
		*max = end - span;
		if (*max < *start + 1)
			*max = *start + 1;
	} else {
		*start = last % mod;
		*max = (last + 1) % mod;
	}

	/* explicit start frame? */
	if (!(urb->transfer_flags & USB_ISO_ASAP)) {
		unsigned	temp;

		/* sanity check: must be in range */
		urb->start_frame %= ehci->periodic_size;
		temp = urb->start_frame << 3;
		if (temp < *start)
			temp += mod;
		if (temp > *max)
			return -EDOM;

		/* use that explicit start frame */
		*start = urb->start_frame << 3;
		temp += 8;
		if (temp < *max)
			*max = temp;
	}

	// FIXME minimize wraparound to "now" ... insist max+span
	// (and start+span) remains a few frames short of "end"

	*max %= ehci->periodic_size;
	if ((*start + span) < end)
		return 0;
	return -EFBIG;
}

static int
itd_schedule (struct ehci_hcd *ehci, struct urb *urb)
{
	unsigned	start, max, i;
	int		status;
	unsigned	mod = ehci->periodic_size << 3;

	for (i = 0; i < urb->number_of_packets; i++) {
		urb->iso_frame_desc [i].status = -EINPROGRESS;
		urb->iso_frame_desc [i].actual_length = 0;
	}

	if ((status = get_iso_range (ehci, urb, &start, &max, mod)) != 0)
		return status;

	do {
		unsigned	uframe;
		unsigned	usecs;
		struct ehci_itd	*itd;

		/* check schedule: enough space? */
		itd = urb->hcpriv;
		uframe = start;
		for (i = 0, uframe = start;
				i < urb->number_of_packets;
				i++, uframe += urb->interval) {
			uframe %= mod;

			/* can't commit more than 80% periodic == 100 usec */
			if (periodic_usecs (ehci, uframe >> 3, uframe & 0x7)
					> (100 - itd->usecs)) {

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -