📄 ehci-sched.c
字号:
/* * Copyright (c) 2001-2003 by David Brownell * Copyright (c) 2003 Michal Sojka, for high-speed iso transfers * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. *//* this file is part of ehci-hcd.c *//*-------------------------------------------------------------------------*//* * EHCI scheduled transaction support: interrupt, iso, split iso * These are called "periodic" transactions in the EHCI spec. * * Note that for interrupt transfers, the QH/QTD manipulation is shared * with the "asynchronous" transaction support (control/bulk transfers). * The only real difference is in how interrupt transfers are scheduled. * * For ISO, we make an "iso_stream" head to serve the same role as a QH. * It keeps track of every ITD (or SITD) that's linked, and holds enough * pre-calculated schedule data to make appending to the queue be quick. */static int ehci_get_frame (struct usb_hcd *hcd);/*-------------------------------------------------------------------------*//* * periodic_next_shadow - return "next" pointer on shadow list * @periodic: host pointer to qh/itd/sitd * @tag: hardware tag for type of this record */static union ehci_shadow *periodic_next_shadow (union ehci_shadow *periodic, int tag){ switch (tag) { case Q_TYPE_QH: return &periodic->qh->qh_next; case Q_TYPE_FSTN: return &periodic->fstn->fstn_next; case Q_TYPE_ITD: return &periodic->itd->itd_next; // case Q_TYPE_SITD: default: return &periodic->sitd->sitd_next; }}/* returns true after successful unlink *//* caller must hold ehci->lock */static int periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr){ union ehci_shadow *prev_p = &ehci->pshadow [frame]; u32 *hw_p = &ehci->periodic [frame]; union ehci_shadow here = *prev_p; union ehci_shadow *next_p; /* find predecessor of "ptr"; hw and shadow lists are in sync */ while (here.ptr && here.ptr != ptr) { prev_p = periodic_next_shadow (prev_p, Q_NEXT_TYPE (*hw_p)); hw_p = &here.qh->hw_next; here = *prev_p; } /* an interrupt entry (at list end) could have been shared */ if (!here.ptr) { dbg ("entry %p no longer on frame [%d]", ptr, frame); return 0; } // vdbg ("periodic unlink %p from frame %d", ptr, frame); /* update hardware list ... HC may still know the old structure, so * don't change hw_next until it'll have purged its cache */ next_p = periodic_next_shadow (&here, Q_NEXT_TYPE (*hw_p)); *hw_p = here.qh->hw_next; /* unlink from shadow list; HCD won't see old structure again */ *prev_p = *next_p; next_p->ptr = NULL; return 1;}/* how many of the uframe's 125 usecs are allocated? */static unsigned shortperiodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe){ u32 *hw_p = &ehci->periodic [frame]; union ehci_shadow *q = &ehci->pshadow [frame]; unsigned usecs = 0; while (q->ptr) { switch (Q_NEXT_TYPE (*hw_p)) { case Q_TYPE_QH: /* is it in the S-mask? */ if (q->qh->hw_info2 & cpu_to_le32 (1 << uframe)) usecs += q->qh->usecs; /* ... or C-mask? */ if (q->qh->hw_info2 & cpu_to_le32 (1 << (8 + uframe))) usecs += q->qh->c_usecs; hw_p = &q->qh->hw_next; q = &q->qh->qh_next; break; case Q_TYPE_FSTN: /* for "save place" FSTNs, count the relevant INTR * bandwidth from the previous frame */ if (q->fstn->hw_prev != EHCI_LIST_END) { ehci_dbg (ehci, "ignoring FSTN cost ...\n"); } hw_p = &q->fstn->hw_next; q = &q->fstn->fstn_next; break; case Q_TYPE_ITD: usecs += q->itd->usecs [uframe]; hw_p = &q->itd->hw_next; q = &q->itd->itd_next; break; case Q_TYPE_SITD: /* is it in the S-mask? (count SPLIT, DATA) */ if (q->sitd->hw_uframe & cpu_to_le32 (1 << uframe)) { if (q->sitd->hw_fullspeed_ep & __constant_cpu_to_le32 (1<<31)) usecs += q->sitd->stream->usecs; else /* worst case for OUT start-split */ usecs += HS_USECS_ISO (188); } /* ... C-mask? (count CSPLIT, DATA) */ if (q->sitd->hw_uframe & cpu_to_le32 (1 << (8 + uframe))) { /* worst case for IN complete-split */ usecs += q->sitd->stream->c_usecs; } hw_p = &q->sitd->hw_next; q = &q->sitd->sitd_next; break; default: BUG (); } }#ifdef DEBUG if (usecs > 100) err ("overallocated uframe %d, periodic is %d usecs", frame * 8 + uframe, usecs);#endif return usecs;}/*-------------------------------------------------------------------------*/static int same_tt (struct usb_device *dev1, struct usb_device *dev2){ if (!dev1->tt || !dev2->tt) return 0; if (dev1->tt != dev2->tt) return 0; if (dev1->tt->multi) return dev1->ttport == dev2->ttport; else return 1;}/* return true iff the device's transaction translator is available * for a periodic transfer starting at the specified frame, using * all the uframes in the mask. */static int tt_no_collision ( struct ehci_hcd *ehci, unsigned period, struct usb_device *dev, unsigned frame, u32 uf_mask){ if (period == 0) /* error */ return 0; /* note bandwidth wastage: split never follows csplit * (different dev or endpoint) until the next uframe. * calling convention doesn't make that distinction. */ for (; frame < ehci->periodic_size; frame += period) { union ehci_shadow here; u32 type; here = ehci->pshadow [frame]; type = Q_NEXT_TYPE (ehci->periodic [frame]); while (here.ptr) { switch (type) { case Q_TYPE_ITD: type = Q_NEXT_TYPE (here.itd->hw_next); here = here.itd->itd_next; continue; case Q_TYPE_QH: if (same_tt (dev, here.qh->dev)) { u32 mask; mask = le32_to_cpu (here.qh->hw_info2); /* "knows" no gap is needed */ mask |= mask >> 8; if (mask & uf_mask) break; } type = Q_NEXT_TYPE (here.qh->hw_next); here = here.qh->qh_next; continue; case Q_TYPE_SITD: if (same_tt (dev, here.itd->urb->dev)) { u16 mask; mask = le32_to_cpu (here.sitd ->hw_uframe); /* FIXME assumes no gap for IN! */ mask |= mask >> 8; if (mask & uf_mask) break; } type = Q_NEXT_TYPE (here.qh->hw_next); here = here.sitd->sitd_next; continue; // case Q_TYPE_FSTN: default: ehci_dbg (ehci, "periodic frame %d bogus type %d\n", frame, type); } /* collision or error */ return 0; } } /* no collision */ return 1;}/*-------------------------------------------------------------------------*/static int enable_periodic (struct ehci_hcd *ehci){ u32 cmd; int status; /* did clearing PSE did take effect yet? * takes effect only at frame boundaries... */ status = handshake (&ehci->regs->status, STS_PSS, 0, 9 * 125); if (status != 0) { ehci->hcd.state = USB_STATE_HALT; return status; } cmd = readl (&ehci->regs->command) | CMD_PSE; writel (cmd, &ehci->regs->command); /* posted write ... PSS happens later */ ehci->hcd.state = USB_STATE_RUNNING; /* make sure ehci_work scans these */ ehci->next_uframe = readl (&ehci->regs->frame_index) % (ehci->periodic_size << 3); return 0;}static int disable_periodic (struct ehci_hcd *ehci){ u32 cmd; int status; /* did setting PSE not take effect yet? * takes effect only at frame boundaries... */ status = handshake (&ehci->regs->status, STS_PSS, STS_PSS, 9 * 125); if (status != 0) { ehci->hcd.state = USB_STATE_HALT; return status; } cmd = readl (&ehci->regs->command) & ~CMD_PSE; writel (cmd, &ehci->regs->command); /* posted write ... */ ehci->next_uframe = -1; return 0;}/*-------------------------------------------------------------------------*/// FIXME microframe periods not yet handledstatic void intr_deschedule ( struct ehci_hcd *ehci, struct ehci_qh *qh, int wait) { int status; unsigned frame = qh->start; do { periodic_unlink (ehci, frame, qh); qh_put (qh); frame += qh->period; } while (frame < ehci->periodic_size); qh->qh_state = QH_STATE_UNLINK; qh->qh_next.ptr = NULL; ehci->periodic_sched--; /* maybe turn off periodic schedule */ if (!ehci->periodic_sched) status = disable_periodic (ehci); else { status = 0; vdbg ("periodic schedule still enabled"); } /* * If the hc may be looking at this qh, then delay a uframe * (yeech!) to be sure it's done. * No other threads may be mucking with this qh. */ if (((ehci_get_frame (&ehci->hcd) - frame) % qh->period) == 0) { if (wait) { udelay (125); qh->hw_next = EHCI_LIST_END; } else { /* we may not be IDLE yet, but if the qh is empty * the race is very short. then if qh also isn't * rescheduled soon, it won't matter. otherwise... */ vdbg ("intr_deschedule..."); } } else qh->hw_next = EHCI_LIST_END; qh->qh_state = QH_STATE_IDLE; /* update per-qh bandwidth utilization (for usbfs) */ hcd_to_bus (&ehci->hcd)->bandwidth_allocated -= (qh->usecs + qh->c_usecs) / qh->period; dbg ("descheduled qh %p, period = %d frame = %d count = %d, urbs = %d", qh, qh->period, frame, atomic_read (&qh->kref.refcount), ehci->periodic_sched);}static int check_period ( struct ehci_hcd *ehci, unsigned frame, unsigned uframe, unsigned period, unsigned usecs) { /* complete split running into next frame? * given FSTN support, we could sometimes check... */ if (uframe >= 8) return 0; /* * 80% periodic == 100 usec/uframe available * convert "usecs we need" to "max already claimed" */ usecs = 100 - usecs; do { int claimed;// FIXME delete when intr_submit handles non-empty queues// this gives us a one intr/frame limit (vs N/uframe)// ... and also lets us avoid tracking split transactions// that might collide at a given TT/hub. if (ehci->pshadow [frame].ptr) return 0; claimed = periodic_usecs (ehci, frame, uframe); if (claimed > usecs) return 0;// FIXME update to handle sub-frame periods } while ((frame += period) < ehci->periodic_size); // success! return 1;}static int check_intr_schedule ( struct ehci_hcd *ehci, unsigned frame, unsigned uframe, const struct ehci_qh *qh, u32 *c_maskp){ int retval = -ENOSPC; if (!check_period (ehci, frame, uframe, qh->period, qh->usecs)) goto done; if (!qh->c_usecs) { retval = 0; *c_maskp = cpu_to_le32 (0); goto done; } /* This is a split transaction; check the bandwidth available for * the completion too. Check both worst and best case gaps: worst * case is SPLIT near uframe end, and CSPLIT near start ... best is * vice versa. Difference can be almost two uframe times, but we * reserve unnecessary bandwidth (waste it) this way. (Actually * even better cases exist, like immediate device NAK.) * * FIXME don't even bother unless we know this TT is idle in that * range of uframes ... for now, check_period() allows only one * interrupt transfer per frame, so needn't check "TT busy" status * when scheduling a split (QH, SITD, or FSTN). * * FIXME ehci 0.96 and above can use FSTNs */ if (!check_period (ehci, frame, uframe + qh->gap_uf + 1, qh->period, qh->c_usecs)) goto done; if (!check_period (ehci, frame, uframe + qh->gap_uf, qh->period, qh->c_usecs)) goto done; *c_maskp = cpu_to_le32 (0x03 << (8 + uframe + qh->gap_uf)); retval = 0;done: return retval;}static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh){ int status; unsigned uframe; u32 c_mask; unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */ qh->hw_next = EHCI_LIST_END; frame = qh->start; /* reuse the previous schedule slots, if we can */ if (frame < qh->period) { uframe = ffs (le32_to_cpup (&qh->hw_info2) & 0x00ff); status = check_intr_schedule (ehci, frame, --uframe, qh, &c_mask); } else { uframe = 0; c_mask = 0; status = -ENOSPC; } /* else scan the schedule to find a group of slots such that all * uframes have enough periodic bandwidth available. */ if (status) { frame = qh->period - 1; do { for (uframe = 0; uframe < 8; uframe++) { status = check_intr_schedule (ehci, frame, uframe, qh, &c_mask); if (status == 0) break; } } while (status && frame--); if (status) goto done; qh->start = frame; /* reset S-frame and (maybe) C-frame masks */ qh->hw_info2 &= ~__constant_cpu_to_le32(0xffff);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -