📄 ehci-sched.c
字号:
/*
* Copyright (c) 2001-2002 by David Brownell
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* this file is part of ehci-hcd.c */
/*-------------------------------------------------------------------------*/
/*
* EHCI scheduled transaction support: interrupt, iso, split iso
* These are called "periodic" transactions in the EHCI spec.
*
* Note that for interrupt transfers, the QH/QTD manipulation is shared
* with the "asynchronous" transaction support (control/bulk transfers).
* The only real difference is in how interrupt transfers are scheduled.
* We get some funky API restrictions from the current URB model, which
* works notably better for reading transfers than for writing. (And
* which accordingly needs to change before it'll work inside devices,
* or with "USB On The Go" additions to USB 2.0 ...)
*/
static int ehci_get_frame (struct usb_hcd *hcd);
/*-------------------------------------------------------------------------*/
/*
* periodic_next_shadow - return "next" pointer on shadow list
* @periodic: host pointer to qh/itd/sitd
* @tag: hardware tag for type of this record
*/
static union ehci_shadow *
periodic_next_shadow (union ehci_shadow *periodic, int tag)
{
switch (tag) {
case Q_TYPE_QH:
return &periodic->qh->qh_next;
case Q_TYPE_FSTN:
return &periodic->fstn->fstn_next;
case Q_TYPE_ITD:
return &periodic->itd->itd_next;
#ifdef have_split_iso
case Q_TYPE_SITD:
return &periodic->sitd->sitd_next;
#endif /* have_split_iso */
}
dbg ("BAD shadow %p tag %d", periodic->ptr, tag);
// BUG ();
return 0;
}
/* returns true after successful unlink */
/* caller must hold ehci->lock */
static int periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr)
{
union ehci_shadow *prev_p = &ehci->pshadow [frame];
u32 *hw_p = &ehci->periodic [frame];
union ehci_shadow here = *prev_p;
union ehci_shadow *next_p;
/* find predecessor of "ptr"; hw and shadow lists are in sync */
while (here.ptr && here.ptr != ptr) {
prev_p = periodic_next_shadow (prev_p, Q_NEXT_TYPE (*hw_p));
hw_p = &here.qh->hw_next;
here = *prev_p;
}
/* an interrupt entry (at list end) could have been shared */
if (!here.ptr) {
dbg ("entry %p no longer on frame [%d]", ptr, frame);
return 0;
}
// vdbg ("periodic unlink %p from frame %d", ptr, frame);
/* update hardware list ... HC may still know the old structure, so
* don't change hw_next until it'll have purged its cache
*/
next_p = periodic_next_shadow (&here, Q_NEXT_TYPE (*hw_p));
*hw_p = here.qh->hw_next;
/* unlink from shadow list; HCD won't see old structure again */
*prev_p = *next_p;
next_p->ptr = 0;
return 1;
}
/* how many of the uframe's 125 usecs are allocated? */
static unsigned short
periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe)
{
u32 *hw_p = &ehci->periodic [frame];
union ehci_shadow *q = &ehci->pshadow [frame];
unsigned usecs = 0;
while (q->ptr) {
switch (Q_NEXT_TYPE (*hw_p)) {
case Q_TYPE_QH:
/* is it in the S-mask? */
if (q->qh->hw_info2 & cpu_to_le32 (1 << uframe))
usecs += q->qh->usecs;
/* ... or C-mask? */
if (q->qh->hw_info2 & cpu_to_le32 (1 << (8 + uframe)))
usecs += q->qh->c_usecs;
q = &q->qh->qh_next;
break;
case Q_TYPE_FSTN:
/* for "save place" FSTNs, count the relevant INTR
* bandwidth from the previous frame
*/
if (q->fstn->hw_prev != EHCI_LIST_END) {
dbg ("not counting FSTN bandwidth yet ...");
}
q = &q->fstn->fstn_next;
break;
case Q_TYPE_ITD:
/* NOTE the "one uframe per itd" policy */
if (q->itd->hw_transaction [uframe] != 0)
usecs += q->itd->usecs;
q = &q->itd->itd_next;
break;
#ifdef have_split_iso
case Q_TYPE_SITD:
temp = q->sitd->hw_fullspeed_ep &
__constant_cpu_to_le32 (1 << 31);
// FIXME: this doesn't count data bytes right...
/* is it in the S-mask? (count SPLIT, DATA) */
if (q->sitd->hw_uframe & cpu_to_le32 (1 << uframe)) {
if (temp)
usecs += HS_USECS (188);
else
usecs += HS_USECS (1);
}
/* ... C-mask? (count CSPLIT, DATA) */
if (q->sitd->hw_uframe &
cpu_to_le32 (1 << (8 + uframe))) {
if (temp)
usecs += HS_USECS (0);
else
usecs += HS_USECS (188);
}
q = &q->sitd->sitd_next;
break;
#endif /* have_split_iso */
default:
BUG ();
}
}
#ifdef DEBUG
if (usecs > 100)
err ("overallocated uframe %d, periodic is %d usecs",
frame * 8 + uframe, usecs);
#endif
return usecs;
}
/*-------------------------------------------------------------------------*/
static void enable_periodic (struct ehci_hcd *ehci)
{
u32 cmd;
/* did clearing PSE did take effect yet?
* takes effect only at frame boundaries...
*/
while (readl (&ehci->regs->status) & STS_PSS)
udelay (20);
cmd = readl (&ehci->regs->command) | CMD_PSE;
writel (cmd, &ehci->regs->command);
/* posted write ... PSS happens later */
ehci->hcd.state = USB_STATE_RUNNING;
/* make sure tasklet scans these */
ehci->next_uframe = readl (&ehci->regs->frame_index)
% (ehci->periodic_size << 3);
}
static void disable_periodic (struct ehci_hcd *ehci)
{
u32 cmd;
/* did setting PSE not take effect yet?
* takes effect only at frame boundaries...
*/
while (!(readl (&ehci->regs->status) & STS_PSS))
udelay (20);
cmd = readl (&ehci->regs->command) & ~CMD_PSE;
writel (cmd, &ehci->regs->command);
/* posted write ... */
ehci->next_uframe = -1;
}
/*-------------------------------------------------------------------------*/
static void intr_deschedule (
struct ehci_hcd *ehci,
unsigned frame,
struct ehci_qh *qh,
unsigned period
) {
unsigned long flags;
period >>= 3; // FIXME microframe periods not handled yet
spin_lock_irqsave (&ehci->lock, flags);
do {
periodic_unlink (ehci, frame, qh);
qh_put (ehci, qh);
frame += period;
} while (frame < ehci->periodic_size);
qh->qh_state = QH_STATE_UNLINK;
qh->qh_next.ptr = 0;
ehci->periodic_urbs--;
/* maybe turn off periodic schedule */
if (!ehci->periodic_urbs)
disable_periodic (ehci);
else
vdbg ("periodic schedule still enabled");
spin_unlock_irqrestore (&ehci->lock, flags);
/*
* If the hc may be looking at this qh, then delay a uframe
* (yeech!) to be sure it's done.
* No other threads may be mucking with this qh.
*/
if (((ehci_get_frame (&ehci->hcd) - frame) % period) == 0)
udelay (125);
qh->qh_state = QH_STATE_IDLE;
qh->hw_next = EHCI_LIST_END;
vdbg ("descheduled qh %p, per = %d frame = %d count = %d, urbs = %d",
qh, period, frame,
atomic_read (&qh->refcount), ehci->periodic_urbs);
}
static int check_period (
struct ehci_hcd *ehci,
unsigned frame,
int uframe,
unsigned period,
unsigned usecs
) {
/* complete split running into next frame?
* given FSTN support, we could sometimes check...
*/
if (uframe >= 8)
return 0;
/*
* 80% periodic == 100 usec/uframe available
* convert "usecs we need" to "max already claimed"
*/
usecs = 100 - usecs;
do {
int claimed;
// FIXME delete when intr_submit handles non-empty queues
// this gives us a one intr/frame limit (vs N/uframe)
// ... and also lets us avoid tracking split transactions
// that might collide at a given TT/hub.
if (ehci->pshadow [frame].ptr)
return 0;
claimed = periodic_usecs (ehci, frame, uframe);
if (claimed > usecs)
return 0;
// FIXME update to handle sub-frame periods
} while ((frame += period) < ehci->periodic_size);
// success!
return 1;
}
static int intr_submit (
struct ehci_hcd *ehci,
struct urb *urb,
struct list_head *qtd_list,
int mem_flags
) {
unsigned epnum, period;
unsigned short usecs, c_usecs, gap_uf;
unsigned long flags;
struct ehci_qh *qh;
struct hcd_dev *dev;
int is_input;
int status = 0;
/* get endpoint and transfer/schedule data */
epnum = usb_pipeendpoint (urb->pipe);
is_input = usb_pipein (urb->pipe);
if (is_input)
epnum |= 0x10;
/*
* HS interrupt transfers are simple -- only one microframe. FS/LS
* interrupt transfers involve a SPLIT in one microframe and CSPLIT
* sometime later. We need to know how much time each will be
* needed in each microframe and, for FS/LS, how many microframes
* separate the two in the best case.
*/
usecs = usb_calc_bus_time (USB_SPEED_HIGH, is_input, 0,
urb->transfer_buffer_length);
if (urb->dev->speed == USB_SPEED_HIGH) {
gap_uf = 0;
c_usecs = 0;
/* FIXME handle HS periods of less than 1 frame. */
period = urb->interval >> 3;
if (period < 1) {
dbg ("intr period %d uframes, NYET!", urb->interval);
status = -EINVAL;
goto done;
}
} else {
/* gap is a function of full/low speed transfer times */
gap_uf = 1 + usb_calc_bus_time (urb->dev->speed, is_input, 0,
urb->transfer_buffer_length) / (125 * 1000);
/* FIXME this just approximates SPLIT/CSPLIT times */
if (is_input) { // SPLIT, gap, CSPLIT+DATA
c_usecs = usecs + HS_USECS (0);
usecs = HS_USECS (1);
} else { // SPLIT+DATA, gap, CSPLIT
usecs = usecs + HS_USECS (1);
c_usecs = HS_USECS (0);
}
period = urb->interval;
}
/*
* NOTE: current completion/restart logic doesn't handle more than
* one qtd in a periodic qh ... 16-20 KB/urb is pretty big for this.
* such big requests need many periods to transfer.
*
* FIXME want to change hcd core submit model to expect queuing
* for all transfer types ... not just ISO and (with flag) BULK.
* that means: getting rid of this check; handling the "interrupt
* urb already queued" case below like bulk queuing is handled (no
* errors possible!); and completly getting rid of that annoying
* qh restart logic. simpler/smaller overall, and more flexible.
*/
if (unlikely (qtd_list->next != qtd_list->prev)) {
dbg ("only one intr qtd per urb allowed");
status = -EINVAL;
goto done;
}
spin_lock_irqsave (&ehci->lock, flags);
/* get the qh (must be empty and idle) */
dev = (struct hcd_dev *)urb->dev->hcpriv;
qh = (struct ehci_qh *) dev->ep [epnum];
if (qh) {
/* only allow one queued interrupt urb per EP */
if (unlikely (qh->qh_state != QH_STATE_IDLE
|| !list_empty (&qh->qtd_list))) {
dbg ("interrupt urb already queued");
status = -EBUSY;
} else {
/* maybe reset hardware's data toggle in the qh */
if (unlikely (!usb_gettoggle (urb->dev, epnum & 0x0f,
!(epnum & 0x10)))) {
qh->hw_token |=
__constant_cpu_to_le32 (QTD_TOGGLE);
usb_settoggle (urb->dev, epnum & 0x0f,
!(epnum & 0x10), 1);
}
/* trust the QH was set up as interrupt ... */
list_splice (qtd_list, &qh->qtd_list);
qh_update (qh, list_entry (qtd_list->next,
struct ehci_qtd, qtd_list));
qtd_list = &qh->qtd_list;
}
} else {
/* can't sleep here, we have ehci->lock... */
qh = ehci_qh_make (ehci, urb, qtd_list, SLAB_ATOMIC);
if (likely (qh != 0)) {
// dbg ("new INTR qh %p", qh);
dev->ep [epnum] = qh;
qtd_list = &qh->qtd_list;
} else
status = -ENOMEM;
}
/* Schedule this periodic QH. */
if (likely (status == 0)) {
unsigned frame = period;
qh->hw_next = EHCI_LIST_END;
qh->usecs = usecs;
qh->c_usecs = c_usecs;
urb->hcpriv = qh_get (qh);
status = -ENOSPC;
/* pick a set of schedule slots, link the QH into them */
do {
int uframe;
u32 c_mask = 0;
/* pick a set of slots such that all uframes have
* enough periodic bandwidth available.
*/
frame--;
for (uframe = 0; uframe < 8; uframe++) {
if (check_period (ehci, frame, uframe,
period, usecs) == 0)
continue;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -