📄 ehci-sched.c
字号:
itd = 0;
break;
}
itd = list_entry (itd->itd_list.next,
struct ehci_itd, itd_list);
}
if (!itd)
continue;
/* that's where we'll schedule this! */
itd = urb->hcpriv;
urb->start_frame = start >> 3;
vdbg ("ISO urb %p (%d packets period %d) starting %d.%d",
urb, urb->number_of_packets, urb->interval,
urb->start_frame, start & 0x7);
for (i = 0, uframe = start, usecs = 0;
i < urb->number_of_packets;
i++, uframe += urb->interval) {
uframe %= mod;
itd->uframe = uframe;
itd->hw_transaction [uframe & 0x07] = itd->transaction;
itd_link (ehci, (uframe >> 3) % ehci->periodic_size,
itd);
wmb ();
usecs += itd->usecs;
itd = list_entry (itd->itd_list.next,
struct ehci_itd, itd_list);
}
/* update bandwidth utilization records (for usbfs)
*
* FIXME This claims each URB queued to an endpoint, as if
* transfers were concurrent, not sequential. So bandwidth
* typically gets double-billed ... comes from tying it to
* URBs rather than endpoints in the schedule. Luckily we
* don't use this usbfs data for serious decision making.
*/
usecs /= urb->number_of_packets;
usecs /= urb->interval;
usecs >>= 3;
if (usecs < 1)
usecs = 1;
usb_claim_bandwidth (urb->dev, urb, usecs, 1);
/* maybe enable periodic schedule processing */
if (!ehci->periodic_urbs++)
enable_periodic (ehci);
return 0;
} while ((start = ++start % mod) != max);
/* no room in the schedule */
dbg ("urb %p, CAN'T SCHEDULE", urb);
return -ENOSPC;
}
/*-------------------------------------------------------------------------*/
#define ISO_ERRS (EHCI_ISOC_BUF_ERR | EHCI_ISOC_BABBLE | EHCI_ISOC_XACTERR)
static unsigned long
itd_complete (
struct ehci_hcd *ehci,
struct ehci_itd *itd,
unsigned uframe,
unsigned long flags
) {
struct urb *urb = itd->urb;
struct usb_iso_packet_descriptor *desc;
u32 t;
/* update status for this uframe's transfers */
desc = &urb->iso_frame_desc [itd->index];
t = itd->hw_transaction [uframe];
itd->hw_transaction [uframe] = 0;
if (t & EHCI_ISOC_ACTIVE)
desc->status = -EXDEV;
else if (t & ISO_ERRS) {
urb->error_count++;
if (t & EHCI_ISOC_BUF_ERR)
desc->status = usb_pipein (urb->pipe)
? -ENOSR /* couldn't read */
: -ECOMM; /* couldn't write */
else if (t & EHCI_ISOC_BABBLE)
desc->status = -EOVERFLOW;
else /* (t & EHCI_ISOC_XACTERR) */
desc->status = -EPROTO;
/* HC need not update length with this error */
if (!(t & EHCI_ISOC_BABBLE))
desc->actual_length += EHCI_ITD_LENGTH (t);
} else {
desc->status = 0;
desc->actual_length += EHCI_ITD_LENGTH (t);
}
vdbg ("itd %p urb %p packet %d/%d trans %x status %d len %d",
itd, urb, itd->index + 1, urb->number_of_packets,
t, desc->status, desc->actual_length);
/* handle completion now? */
if ((itd->index + 1) != urb->number_of_packets)
return flags;
/*
* Always give the urb back to the driver ... expect it to submit
* a new urb (or resubmit this), and to have another already queued
* when un-interrupted transfers are needed.
*
* NOTE that for now we don't accelerate ISO unlinks; they just
* happen according to the current schedule. Means a delay of
* up to about a second (max).
*/
itd_free_list (ehci, urb);
if (urb->status == -EINPROGRESS)
urb->status = 0;
spin_unlock_irqrestore (&ehci->lock, flags);
usb_hcd_giveback_urb (&ehci->hcd, urb);
spin_lock_irqsave (&ehci->lock, flags);
/* defer stopping schedule; completion can submit */
ehci->periodic_urbs--;
if (!ehci->periodic_urbs)
disable_periodic (ehci);
return flags;
}
/*-------------------------------------------------------------------------*/
static int itd_submit (struct ehci_hcd *ehci, struct urb *urb, int mem_flags)
{
int status;
unsigned long flags;
dbg ("itd_submit urb %p", urb);
/* NOTE DMA mapping assumes this ... */
if (urb->iso_frame_desc [0].offset != 0)
return -EINVAL;
/* allocate ITDs w/o locking anything */
status = itd_urb_transaction (ehci, urb, mem_flags);
if (status < 0)
return status;
/* schedule ... need to lock */
spin_lock_irqsave (&ehci->lock, flags);
status = itd_schedule (ehci, urb);
spin_unlock_irqrestore (&ehci->lock, flags);
if (status < 0)
itd_free_list (ehci, urb);
return status;
}
#ifdef have_split_iso
/*-------------------------------------------------------------------------*/
/*
* "Split ISO TDs" ... used for USB 1.1 devices going through
* the TTs in USB 2.0 hubs.
*/
static void
sitd_free (struct ehci_hcd *ehci, struct ehci_sitd *sitd)
{
pci_pool_free (ehci->sitd_pool, sitd, sitd->sitd_dma);
}
static struct ehci_sitd *
sitd_make (
struct ehci_hcd *ehci,
struct urb *urb,
unsigned index, // urb->iso_frame_desc [index]
unsigned uframe, // scheduled start
dma_addr_t dma, // mapped transfer buffer
int mem_flags
) {
struct ehci_sitd *sitd;
unsigned length;
sitd = pci_pool_alloc (ehci->sitd_pool, mem_flags, &dma);
if (!sitd)
return sitd;
sitd->urb = urb;
length = urb->iso_frame_desc [index].length;
dma += urb->iso_frame_desc [index].offset;
#if 0
// FIXME: do the rest!
#else
sitd_free (ehci, sitd);
return 0;
#endif
}
static void
sitd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_sitd *sitd)
{
u32 ptr;
ptr = cpu_to_le32 (sitd->sitd_dma | 2); // type 2 == sitd
if (ehci->pshadow [frame].ptr) {
if (!sitd->sitd_next.ptr) {
sitd->sitd_next = ehci->pshadow [frame];
sitd->hw_next = ehci->periodic [frame];
} else if (sitd->sitd_next.ptr != ehci->pshadow [frame].ptr) {
dbg ("frame %d sitd link goof", frame);
BUG ();
}
}
ehci->pshadow [frame].sitd = sitd;
ehci->periodic [frame] = ptr;
}
static unsigned long
sitd_complete (
struct ehci_hcd *ehci,
struct ehci_sitd *sitd,
unsigned long flags
) {
// FIXME -- implement!
dbg ("NYI -- sitd_complete");
return flags;
}
/*-------------------------------------------------------------------------*/
static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb, int mem_flags)
{
// struct ehci_sitd *first_sitd = 0;
unsigned frame_index;
dma_addr_t dma;
dbg ("NYI -- sitd_submit");
// FIXME -- implement!
// FIXME: setup one big dma mapping
dma = 0;
for (frame_index = 0;
frame_index < urb->number_of_packets;
frame_index++) {
struct ehci_sitd *sitd;
unsigned uframe;
// FIXME: use real arguments, schedule this!
uframe = -1;
sitd = sitd_make (ehci, urb, frame_index,
uframe, dma, mem_flags);
if (sitd) {
/*
if (first_sitd)
list_add_tail (&sitd->sitd_list,
&first_sitd->sitd_list);
else
first_sitd = sitd;
*/
} else {
// FIXME: clean everything up
}
}
// if we have a first sitd, then
// store them all into the periodic schedule!
// urb->hcpriv = first sitd in sitd_list
return -ENOSYS;
}
#endif /* have_split_iso */
/*-------------------------------------------------------------------------*/
static void scan_periodic (struct ehci_hcd *ehci)
{
unsigned frame, clock, now_uframe, mod;
unsigned long flags;
mod = ehci->periodic_size << 3;
spin_lock_irqsave (&ehci->lock, flags);
/*
* When running, scan from last scan point up to "now"
* else clean up by scanning everything that's left.
* Touches as few pages as possible: cache-friendly.
* Don't scan ISO entries more than once, though.
*/
frame = ehci->next_uframe >> 3;
if (HCD_IS_RUNNING (ehci->hcd.state))
now_uframe = readl (&ehci->regs->frame_index);
else
now_uframe = (frame << 3) - 1;
now_uframe %= mod;
clock = now_uframe >> 3;
for (;;) {
union ehci_shadow q, *q_p;
u32 type, *hw_p;
unsigned uframes;
restart:
/* scan schedule to _before_ current frame index */
if (frame == clock)
uframes = now_uframe & 0x07;
else
uframes = 8;
q_p = &ehci->pshadow [frame];
hw_p = &ehci->periodic [frame];
q.ptr = q_p->ptr;
type = Q_NEXT_TYPE (*hw_p);
/* scan each element in frame's queue for completions */
while (q.ptr != 0) {
int last;
unsigned uf;
union ehci_shadow temp;
switch (type) {
case Q_TYPE_QH:
last = (q.qh->hw_next == EHCI_LIST_END);
temp = q.qh->qh_next;
type = Q_NEXT_TYPE (q.qh->hw_next);
flags = intr_complete (ehci, frame,
qh_get (q.qh), flags);
qh_put (ehci, q.qh);
q = temp;
break;
case Q_TYPE_FSTN:
last = (q.fstn->hw_next == EHCI_LIST_END);
/* for "save place" FSTNs, look at QH entries
* in the previous frame for completions.
*/
if (q.fstn->hw_prev != EHCI_LIST_END) {
dbg ("ignoring completions from FSTNs");
}
type = Q_NEXT_TYPE (q.fstn->hw_next);
q = q.fstn->fstn_next;
break;
case Q_TYPE_ITD:
last = (q.itd->hw_next == EHCI_LIST_END);
/* Unlink each (S)ITD we see, since the ISO
* URB model forces constant rescheduling.
* That complicates sharing uframes in ITDs,
* and means we need to skip uframes the HC
* hasn't yet processed.
*/
for (uf = 0; uf < uframes; uf++) {
if (q.itd->hw_transaction [uf] != 0) {
temp = q;
*q_p = q.itd->itd_next;
*hw_p = q.itd->hw_next;
type = Q_NEXT_TYPE (*hw_p);
/* might free q.itd ... */
flags = itd_complete (ehci,
temp.itd, uf, flags);
break;
}
}
/* we might skip this ITD's uframe ... */
if (uf == uframes) {
q_p = &q.itd->itd_next;
hw_p = &q.itd->hw_next;
type = Q_NEXT_TYPE (q.itd->hw_next);
}
q = *q_p;
break;
#ifdef have_split_iso
case Q_TYPE_SITD:
last = (q.sitd->hw_next == EHCI_LIST_END);
flags = sitd_complete (ehci, q.sitd, flags);
type = Q_NEXT_TYPE (q.sitd->hw_next);
// FIXME unlink SITD after split completes
q = q.sitd->sitd_next;
break;
#endif /* have_split_iso */
default:
dbg ("corrupt type %d frame %d shadow %p",
type, frame, q.ptr);
// BUG ();
last = 1;
q.ptr = 0;
}
/* did completion remove an interior q entry? */
if (unlikely (q.ptr == 0 && !last))
goto restart;
}
/* stop when we catch up to the HC */
// FIXME: this assumes we won't get lapped when
// latencies climb; that should be rare, but...
// detect it, and just go all the way around.
// FLR might help detect this case, so long as latencies
// don't exceed periodic_size msec (default 1.024 sec).
// FIXME: likewise assumes HC doesn't halt mid-scan
if (frame == clock) {
unsigned now;
if (!HCD_IS_RUNNING (ehci->hcd.state))
break;
ehci->next_uframe = now_uframe;
now = readl (&ehci->regs->frame_index) % mod;
if (now_uframe == now)
break;
/* rescan the rest of this frame, then ... */
now_uframe = now;
clock = now_uframe >> 3;
} else
frame = (frame + 1) % ehci->periodic_size;
}
spin_unlock_irqrestore (&ehci->lock, flags);
}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -