📄 ehci-q.c
字号:
if (unlikely (!buf))
goto cleanup;
} else
buf = map_buf = 0;
if (!buf || usb_pipein (urb->pipe))
token |= (1 /* "in" */ << 8);
/* else it's already initted to "out" pid (0 << 8) */
maxpacket = usb_maxpacket (urb->dev, urb->pipe,
usb_pipeout (urb->pipe));
/*
* buffer gets wrapped in one or more qtds;
* last one may be "short" (including zero len)
* and may serve as a control status ack
*/
for (;;) {
int this_qtd_len;
qtd->urb = urb;
qtd->buf_dma = map_buf;
this_qtd_len = qtd_fill (qtd, buf, len, token);
len -= this_qtd_len;
buf += this_qtd_len;
/* qh makes control packets use qtd toggle; maybe switch it */
if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
token ^= QTD_TOGGLE;
if (likely (len <= 0))
break;
qtd_prev = qtd;
qtd = ehci_qtd_alloc (ehci, flags);
if (unlikely (!qtd))
goto cleanup;
qtd->urb = urb;
qtd_prev->hw_next = QTD_NEXT (qtd->qtd_dma);
list_add_tail (&qtd->qtd_list, head);
}
/*
* control requests may need a terminating data "status" ack;
* bulk ones may need a terminating short packet (zero length).
*/
if (likely (buf != 0)) {
int one_more = 0;
if (usb_pipecontrol (urb->pipe)) {
one_more = 1;
token ^= 0x0100; /* "in" <--> "out" */
token |= QTD_TOGGLE; /* force DATA1 */
} else if (usb_pipebulk (urb->pipe)
&& (urb->transfer_flags & USB_ZERO_PACKET)
&& !(urb->transfer_buffer_length % maxpacket)) {
one_more = 1;
}
if (one_more) {
qtd_prev = qtd;
qtd = ehci_qtd_alloc (ehci, flags);
if (unlikely (!qtd))
goto cleanup;
qtd->urb = urb;
qtd_prev->hw_next = QTD_NEXT (qtd->qtd_dma);
list_add_tail (&qtd->qtd_list, head);
/* never any data in such packets */
qtd_fill (qtd, 0, 0, token);
}
}
/* by default, enable interrupt on urb completion */
if (likely (!(urb->transfer_flags & URB_NO_INTERRUPT)))
qtd->hw_token |= __constant_cpu_to_le32 (QTD_IOC);
return head;
cleanup:
qtd_list_free (ehci, urb, head);
return 0;
}
/*-------------------------------------------------------------------------*/
/*
* Hardware maintains data toggle (like OHCI) ... here we (re)initialize
* the hardware data toggle in the QH, and set the pseudo-toggle in udev
* so we can see if usb_clear_halt() was called. NOP for control, since
* we set up qh->hw_info1 to always use the QTD toggle bits.
*/
static inline void
clear_toggle (struct usb_device *udev, int ep, int is_out, struct ehci_qh *qh)
{
vdbg ("clear toggle, dev %d ep 0x%x-%s",
udev->devnum, ep, is_out ? "out" : "in");
qh->hw_token &= ~__constant_cpu_to_le32 (QTD_TOGGLE);
usb_settoggle (udev, ep, is_out, 1);
}
// Would be best to create all qh's from config descriptors,
// when each interface/altsetting is established. Unlink
// any previous qh and cancel its urbs first; endpoints are
// implicitly reset then (data toggle too).
// That'd mean updating how usbcore talks to HCDs. (2.5?)
/*
* Each QH holds a qtd list; a QH is used for everything except iso.
*
* For interrupt urbs, the scheduler must set the microframe scheduling
* mask(s) each time the QH gets scheduled. For highspeed, that's
* just one microframe in the s-mask. For split interrupt transactions
* there are additional complications: c-mask, maybe FSTNs.
*/
static struct ehci_qh *
ehci_qh_make (
struct ehci_hcd *ehci,
struct urb *urb,
struct list_head *qtd_list,
int flags
) {
struct ehci_qh *qh = ehci_qh_alloc (ehci, flags);
u32 info1 = 0, info2 = 0;
if (!qh)
return qh;
/*
* init endpoint/device data for this QH
*/
info1 |= usb_pipeendpoint (urb->pipe) << 8;
info1 |= usb_pipedevice (urb->pipe) << 0;
/* using TT? */
switch (urb->dev->speed) {
case USB_SPEED_LOW:
info1 |= (1 << 12); /* EPS "low" */
/* FALL THROUGH */
case USB_SPEED_FULL:
/* EPS 0 means "full" */
info1 |= (EHCI_TUNE_RL_TT << 28);
if (usb_pipecontrol (urb->pipe)) {
info1 |= (1 << 27); /* for TT */
info1 |= 1 << 14; /* toggle from qtd */
}
info1 |= usb_maxpacket (urb->dev, urb->pipe,
usb_pipeout (urb->pipe)) << 16;
info2 |= (EHCI_TUNE_MULT_TT << 30);
info2 |= urb->dev->ttport << 23;
info2 |= urb->dev->tt->hub->devnum << 16;
/* NOTE: if (usb_pipeint (urb->pipe)) { scheduler sets c-mask }
* ... and a 0.96 scheduler might use FSTN nodes too
*/
break;
case USB_SPEED_HIGH: /* no TT involved */
info1 |= (2 << 12); /* EPS "high" */
info1 |= (EHCI_TUNE_RL_HS << 28);
if (usb_pipecontrol (urb->pipe)) {
info1 |= 64 << 16; /* usb2 fixed maxpacket */
info1 |= 1 << 14; /* toggle from qtd */
info2 |= (EHCI_TUNE_MULT_HS << 30);
} else if (usb_pipebulk (urb->pipe)) {
info1 |= 512 << 16; /* usb2 fixed maxpacket */
info2 |= (EHCI_TUNE_MULT_HS << 30);
} else {
u32 temp;
temp = usb_maxpacket (urb->dev, urb->pipe,
usb_pipeout (urb->pipe));
info1 |= (temp & 0x3ff) << 16; /* maxpacket */
/* HS intr can be "high bandwidth" */
temp = 1 + ((temp >> 11) & 0x03);
info2 |= temp << 30; /* mult */
}
break;
default:
#ifdef DEBUG
BUG ();
#endif
}
/* NOTE: if (usb_pipeint (urb->pipe)) { scheduler sets s-mask } */
qh->qh_state = QH_STATE_IDLE;
qh->hw_info1 = cpu_to_le32 (info1);
qh->hw_info2 = cpu_to_le32 (info2);
/* initialize sw and hw queues with these qtds */
list_splice (qtd_list, &qh->qtd_list);
qh_update (qh, list_entry (qtd_list->next, struct ehci_qtd, qtd_list));
/* initialize data toggle state */
if (!usb_pipecontrol (urb->pipe))
clear_toggle (urb->dev,
usb_pipeendpoint (urb->pipe),
usb_pipeout (urb->pipe),
qh);
return qh;
}
/*-------------------------------------------------------------------------*/
/* move qh (and its qtds) onto async queue; maybe enable queue. */
static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
{
u32 dma = QH_NEXT (qh->qh_dma);
struct ehci_qh *q;
if (unlikely (!(q = ehci->async))) {
u32 cmd = readl (&ehci->regs->command);
/* in case a clear of CMD_ASE didn't take yet */
while (readl (&ehci->regs->status) & STS_ASS)
udelay (100);
qh->hw_info1 |= __constant_cpu_to_le32 (QH_HEAD); /* [4.8] */
qh->qh_next.qh = qh;
qh->hw_next = dma;
wmb ();
ehci->async = qh;
writel ((u32)qh->qh_dma, &ehci->regs->async_next);
cmd |= CMD_ASE | CMD_RUN;
writel (cmd, &ehci->regs->command);
ehci->hcd.state = USB_STATE_RUNNING;
/* posted write need not be known to HC yet ... */
} else {
/* splice right after "start" of ring */
qh->hw_info1 &= ~__constant_cpu_to_le32 (QH_HEAD); /* [4.8] */
qh->qh_next = q->qh_next;
qh->hw_next = q->hw_next;
wmb ();
q->qh_next.qh = qh;
q->hw_next = dma;
}
qh->qh_state = QH_STATE_LINKED;
/* qtd completions reported later by interrupt */
}
/*-------------------------------------------------------------------------*/
static int
submit_async (
struct ehci_hcd *ehci,
struct urb *urb,
struct list_head *qtd_list,
int mem_flags
) {
struct ehci_qtd *qtd;
struct hcd_dev *dev;
int epnum;
unsigned long flags;
struct ehci_qh *qh = 0;
qtd = list_entry (qtd_list->next, struct ehci_qtd, qtd_list);
dev = (struct hcd_dev *)urb->dev->hcpriv;
epnum = usb_pipeendpoint (urb->pipe);
if (usb_pipein (urb->pipe))
epnum |= 0x10;
vdbg ("%s: submit_async urb %p len %d ep %d-%s qtd %p [qh %p]",
ehci->hcd.self.bus_name, urb, urb->transfer_buffer_length,
epnum & 0x0f, (epnum & 0x10) ? "in" : "out",
qtd, dev ? dev->ep [epnum] : (void *)~0);
spin_lock_irqsave (&ehci->lock, flags);
qh = (struct ehci_qh *) dev->ep [epnum];
if (likely (qh != 0)) {
u32 hw_next = QTD_NEXT (qtd->qtd_dma);
/* maybe patch the qh used for set_address */
if (unlikely (epnum == 0
&& le32_to_cpu (qh->hw_info1 & 0x7f) == 0))
qh->hw_info1 |= cpu_to_le32 (usb_pipedevice(urb->pipe));
/* is an URB is queued to this qh already? */
if (unlikely (!list_empty (&qh->qtd_list))) {
struct ehci_qtd *last_qtd;
int short_rx = 0;
/* update the last qtd's "next" pointer */
// dbg_qh ("non-empty qh", ehci, qh);
last_qtd = list_entry (qh->qtd_list.prev,
struct ehci_qtd, qtd_list);
last_qtd->hw_next = hw_next;
/* previous urb allows short rx? maybe optimize. */
if (!(last_qtd->urb->transfer_flags & USB_DISABLE_SPD)
&& (epnum & 0x10)) {
// only the last QTD for now
last_qtd->hw_alt_next = hw_next;
short_rx = 1;
}
/* Adjust any old copies in qh overlay too.
* Interrupt code must cope with case of HC having it
* cached, and clobbering these updates.
* ... complicates getting rid of extra interrupts!
*/
if (qh->hw_current == cpu_to_le32 (last_qtd->qtd_dma)) {
wmb ();
qh->hw_qtd_next = hw_next;
if (short_rx)
qh->hw_alt_next = hw_next
| (qh->hw_alt_next & 0x1e);
vdbg ("queue to qh %p, patch", qh);
}
/* no URB queued */
} else {
// dbg_qh ("empty qh", ehci, qh);
/* NOTE: we already canceled any queued URBs
* when the endpoint halted.
*/
/* usb_clear_halt() means qh data toggle gets reset */
if (usb_pipebulk (urb->pipe)
&& unlikely (!usb_gettoggle (urb->dev,
(epnum & 0x0f),
!(epnum & 0x10)))) {
clear_toggle (urb->dev,
epnum & 0x0f, !(epnum & 0x10), qh);
}
qh_update (qh, qtd);
}
list_splice (qtd_list, qh->qtd_list.prev);
} else {
/* can't sleep here, we have ehci->lock... */
qh = ehci_qh_make (ehci, urb, qtd_list, SLAB_ATOMIC);
if (likely (qh != 0)) {
// dbg_qh ("new qh", ehci, qh);
dev->ep [epnum] = qh;
}
}
/* Control/bulk operations through TTs don't need scheduling,
* the HC and TT handle it when the TT has a buffer ready.
*/
if (likely (qh != 0)) {
urb->hcpriv = qh_get (qh);
if (likely (qh->qh_state == QH_STATE_IDLE))
qh_link_async (ehci, qh_get (qh));
}
spin_unlock_irqrestore (&ehci->lock, flags);
if (unlikely (qh == 0)) {
qtd_list_free (ehci, urb, qtd_list);
return -ENOMEM;
}
return 0;
}
/*-------------------------------------------------------------------------*/
/* the async qh for the qtds being reclaimed are now unlinked from the HC */
/* caller must not own ehci->lock */
static void end_unlink_async (struct ehci_hcd *ehci)
{
struct ehci_qh *qh = ehci->reclaim;
qh->qh_state = QH_STATE_IDLE;
qh->qh_next.qh = 0;
qh_put (ehci, qh); // refcount from reclaim
ehci->reclaim = 0;
ehci->reclaim_ready = 0;
qh_completions (ehci, qh, 1);
// unlink any urb should now unlink all following urbs, so that
// relinking only happens for urbs before the unlinked ones.
if (!list_empty (&qh->qtd_list)
&& HCD_IS_RUNNING (ehci->hcd.state))
qh_link_async (ehci, qh);
else
qh_put (ehci, qh); // refcount from async list
}
/* makes sure the async qh will become idle */
/* caller must own ehci->lock */
static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
{
int cmd = readl (&ehci->regs->command);
struct ehci_qh *prev;
#ifdef DEBUG
if (ehci->reclaim
|| !ehci->async
|| qh->qh_state != QH_STATE_LINKED
#ifdef CONFIG_SMP
// this macro lies except on SMP compiles
|| !spin_is_locked (&ehci->lock)
#endif
)
BUG ();
#endif
qh->qh_state = QH_STATE_UNLINK;
ehci->reclaim = qh = qh_get (qh);
// dbg_qh ("start unlink", ehci, qh);
/* Remove the last QH (qhead)? Stop async schedule first. */
if (unlikely (qh == ehci->async && qh->qh_next.qh == qh)) {
/* can't get here without STS_ASS set */
if (ehci->hcd.state != USB_STATE_HALT) {
if (cmd & CMD_PSE)
writel (cmd & ~CMD_ASE, &ehci->regs->command);
else {
ehci_ready (ehci);
while (readl (&ehci->regs->status) & STS_ASS)
udelay (100);
}
}
qh->qh_next.qh = ehci->async = 0;
ehci->reclaim_ready = 1;
tasklet_schedule (&ehci->tasklet);
return;
}
if (unlikely (ehci->hcd.state == USB_STATE_HALT)) {
ehci->reclaim_ready = 1;
tasklet_schedule (&ehci->tasklet);
return;
}
prev = ehci->async;
while (prev->qh_next.qh != qh && prev->qh_next.qh != ehci->async)
prev = prev->qh_next.qh;
#ifdef DEBUG
if (prev->qh_next.qh != qh)
BUG ();
#endif
if (qh->hw_info1 & __constant_cpu_to_le32 (QH_HEAD)) {
ehci->async = prev;
prev->hw_info1 |= __constant_cpu_to_le32 (QH_HEAD);
}
prev->hw_next = qh->hw_next;
prev->qh_next = qh->qh_next;
wmb ();
ehci->reclaim_ready = 0;
cmd |= CMD_IAAD;
writel (cmd, &ehci->regs->command);
/* posted write need not be known to HC yet ... */
}
/*-------------------------------------------------------------------------*/
static void scan_async (struct ehci_hcd *ehci)
{
struct ehci_qh *qh;
unsigned long flags;
spin_lock_irqsave (&ehci->lock, flags);
rescan:
qh = ehci->async;
if (likely (qh != 0)) {
do {
/* clean any finished work for this qh */
if (!list_empty (&qh->qtd_list)) {
// dbg_qh ("scan_async", ehci, qh);
qh = qh_get (qh);
spin_unlock_irqrestore (&ehci->lock, flags);
/* concurrent unlink could happen here */
qh_completions (ehci, qh, 1);
spin_lock_irqsave (&ehci->lock, flags);
qh_put (ehci, qh);
}
/* unlink idle entries (reduces PCI usage) */
if (list_empty (&qh->qtd_list) && !ehci->reclaim) {
if (qh->qh_next.qh != qh) {
// dbg ("irq/empty");
start_unlink_async (ehci, qh);
} else {
// FIXME: arrange to stop
// after it's been idle a while.
}
}
qh = qh->qh_next.qh;
if (!qh) /* unlinked? */
goto rescan;
} while (qh != ehci->async);
}
spin_unlock_irqrestore (&ehci->lock, flags);
}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -