📄 ehci-sched.c
字号:
/* * Copyright (c) 2001-2002 by David Brownell * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. *//* this file is part of ehci-hcd.c *//*-------------------------------------------------------------------------*//* * EHCI scheduled transaction support: interrupt, iso, split iso * These are called "periodic" transactions in the EHCI spec. * * Note that for interrupt transfers, the QH/QTD manipulation is shared * with the "asynchronous" transaction support (control/bulk transfers). * The only real difference is in how interrupt transfers are scheduled. * We get some funky API restrictions from the current URB model, which * works notably better for reading transfers than for writing. (And * which accordingly needs to change before it'll work inside devices, * or with "USB On The Go" additions to USB 2.0 ...) */static int ehci_get_frame (struct usb_hcd *hcd);/*-------------------------------------------------------------------------*//* * periodic_next_shadow - return "next" pointer on shadow list * @periodic: host pointer to qh/itd/sitd * @tag: hardware tag for type of this record */static union ehci_shadow *periodic_next_shadow (union ehci_shadow *periodic, int tag){ switch (tag) { case Q_TYPE_QH: return &periodic->qh->qh_next; case Q_TYPE_FSTN: return &periodic->fstn->fstn_next; case Q_TYPE_ITD: return &periodic->itd->itd_next;#ifdef have_split_iso case Q_TYPE_SITD: return &periodic->sitd->sitd_next;#endif /* have_split_iso */ } dbg ("BAD shadow %p tag %d", periodic->ptr, tag); // BUG (); return 0;}/* returns true after successful unlink *//* caller must hold ehci->lock */static int periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr){ union ehci_shadow *prev_p = &ehci->pshadow [frame]; u32 *hw_p = &ehci->periodic [frame]; union ehci_shadow here = *prev_p; union ehci_shadow *next_p; /* find predecessor of "ptr"; hw and shadow lists are in sync */ while (here.ptr && here.ptr != ptr) { prev_p = periodic_next_shadow (prev_p, Q_NEXT_TYPE (*hw_p)); hw_p = &here.qh->hw_next; here = *prev_p; } /* an interrupt entry (at list end) could have been shared */ if (!here.ptr) { dbg ("entry %p no longer on frame [%d]", ptr, frame); return 0; } // vdbg ("periodic unlink %p from frame %d", ptr, frame); /* update hardware list ... HC may still know the old structure, so * don't change hw_next until it'll have purged its cache */ next_p = periodic_next_shadow (&here, Q_NEXT_TYPE (*hw_p)); *hw_p = here.qh->hw_next; /* unlink from shadow list; HCD won't see old structure again */ *prev_p = *next_p; next_p->ptr = 0; return 1;}/* how many of the uframe's 125 usecs are allocated? */static unsigned shortperiodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe){ u32 *hw_p = &ehci->periodic [frame]; union ehci_shadow *q = &ehci->pshadow [frame]; unsigned usecs = 0; while (q->ptr) { switch (Q_NEXT_TYPE (*hw_p)) { case Q_TYPE_QH: /* is it in the S-mask? */ if (q->qh->hw_info2 & cpu_to_le32 (1 << uframe)) usecs += q->qh->usecs; /* ... or C-mask? */ if (q->qh->hw_info2 & cpu_to_le32 (1 << (8 + uframe))) usecs += q->qh->c_usecs; q = &q->qh->qh_next; break; case Q_TYPE_FSTN: /* for "save place" FSTNs, count the relevant INTR * bandwidth from the previous frame */ if (q->fstn->hw_prev != EHCI_LIST_END) { dbg ("not counting FSTN bandwidth yet ..."); } q = &q->fstn->fstn_next; break; case Q_TYPE_ITD: /* NOTE the "one uframe per itd" policy */ if (q->itd->hw_transaction [uframe] != 0) usecs += q->itd->usecs; q = &q->itd->itd_next; break;#ifdef have_split_iso case Q_TYPE_SITD: temp = q->sitd->hw_fullspeed_ep & __constant_cpu_to_le32 (1 << 31); // FIXME: this doesn't count data bytes right... /* is it in the S-mask? (count SPLIT, DATA) */ if (q->sitd->hw_uframe & cpu_to_le32 (1 << uframe)) { if (temp) usecs += HS_USECS (188); else usecs += HS_USECS (1); } /* ... C-mask? (count CSPLIT, DATA) */ if (q->sitd->hw_uframe & cpu_to_le32 (1 << (8 + uframe))) { if (temp) usecs += HS_USECS (0); else usecs += HS_USECS (188); } q = &q->sitd->sitd_next; break;#endif /* have_split_iso */ default: BUG (); } }#ifdef DEBUG if (usecs > 100) err ("overallocated uframe %d, periodic is %d usecs", frame * 8 + uframe, usecs);#endif return usecs;}/*-------------------------------------------------------------------------*/static int enable_periodic (struct ehci_hcd *ehci){ u32 cmd; int status; /* did clearing PSE did take effect yet? * takes effect only at frame boundaries... */ status = handshake (&ehci->regs->status, STS_PSS, 0, 9 * 125); if (status != 0) { ehci->hcd.state = USB_STATE_HALT; return status; } cmd = readl (&ehci->regs->command) | CMD_PSE; writel (cmd, &ehci->regs->command); /* posted write ... PSS happens later */ ehci->hcd.state = USB_STATE_RUNNING; /* make sure ehci_work scans these */ ehci->next_uframe = readl (&ehci->regs->frame_index) % (ehci->periodic_size << 3); return 0;}static int disable_periodic (struct ehci_hcd *ehci){ u32 cmd; int status; /* did setting PSE not take effect yet? * takes effect only at frame boundaries... */ status = handshake (&ehci->regs->status, STS_PSS, STS_PSS, 9 * 125); if (status != 0) { ehci->hcd.state = USB_STATE_HALT; return status; } cmd = readl (&ehci->regs->command) & ~CMD_PSE; writel (cmd, &ehci->regs->command); /* posted write ... */ ehci->next_uframe = -1; return 0;}/*-------------------------------------------------------------------------*/// FIXME microframe periods not yet handledstatic void intr_deschedule ( struct ehci_hcd *ehci, struct ehci_qh *qh, int wait) { int status; unsigned frame = qh->start; do { periodic_unlink (ehci, frame, qh); qh_put (ehci, qh); frame += qh->period; } while (frame < ehci->periodic_size); qh->qh_state = QH_STATE_UNLINK; qh->qh_next.ptr = 0; ehci->periodic_sched--; /* maybe turn off periodic schedule */ if (!ehci->periodic_sched) status = disable_periodic (ehci); else { status = 0; vdbg ("periodic schedule still enabled"); } /* * If the hc may be looking at this qh, then delay a uframe * (yeech!) to be sure it's done. * No other threads may be mucking with this qh. */ if (((ehci_get_frame (&ehci->hcd) - frame) % qh->period) == 0) { if (wait) { udelay (125); qh->hw_next = EHCI_LIST_END; } else { /* we may not be IDLE yet, but if the qh is empty * the race is very short. then if qh also isn't * rescheduled soon, it won't matter. otherwise... */ vdbg ("intr_deschedule..."); } } else qh->hw_next = EHCI_LIST_END; qh->qh_state = QH_STATE_IDLE; /* update per-qh bandwidth utilization (for usbfs) */ hcd_to_bus (&ehci->hcd)->bandwidth_allocated -= (qh->usecs + qh->c_usecs) / qh->period; dbg ("descheduled qh %p, period = %d frame = %d count = %d, urbs = %d", qh, qh->period, frame, atomic_read (&qh->refcount), ehci->periodic_sched);}static int check_period ( struct ehci_hcd *ehci, unsigned frame, unsigned uframe, unsigned period, unsigned usecs) { /* complete split running into next frame? * given FSTN support, we could sometimes check... */ if (uframe >= 8) return 0; /* * 80% periodic == 100 usec/uframe available * convert "usecs we need" to "max already claimed" */ usecs = 100 - usecs; do { int claimed;// FIXME delete when intr_submit handles non-empty queues// this gives us a one intr/frame limit (vs N/uframe)// ... and also lets us avoid tracking split transactions// that might collide at a given TT/hub. if (ehci->pshadow [frame].ptr) return 0; claimed = periodic_usecs (ehci, frame, uframe); if (claimed > usecs) return 0;// FIXME update to handle sub-frame periods } while ((frame += period) < ehci->periodic_size); // success! return 1;}static int check_intr_schedule ( struct ehci_hcd *ehci, unsigned frame, unsigned uframe, const struct ehci_qh *qh, u32 *c_maskp){ int retval = -ENOSPC; if (!check_period (ehci, frame, uframe, qh->period, qh->usecs)) goto done; if (!qh->c_usecs) { retval = 0; *c_maskp = cpu_to_le32 (0); goto done; } /* This is a split transaction; check the bandwidth available for * the completion too. Check both worst and best case gaps: worst * case is SPLIT near uframe end, and CSPLIT near start ... best is * vice versa. Difference can be almost two uframe times, but we * reserve unnecessary bandwidth (waste it) this way. (Actually * even better cases exist, like immediate device NAK.) * * FIXME don't even bother unless we know this TT is idle in that * range of uframes ... for now, check_period() allows only one * interrupt transfer per frame, so needn't check "TT busy" status * when scheduling a split (QH, SITD, or FSTN). * * FIXME ehci 0.96 and above can use FSTNs */ if (!check_period (ehci, frame, uframe + qh->gap_uf + 1, qh->period, qh->c_usecs)) goto done; if (!check_period (ehci, frame, uframe + qh->gap_uf, qh->period, qh->c_usecs)) goto done; *c_maskp = cpu_to_le32 (0x03 << (8 + uframe + qh->gap_uf)); retval = 0;done: return retval;}static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh){ int status; unsigned uframe; u32 c_mask; unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */ qh->hw_next = EHCI_LIST_END; frame = qh->start; /* reuse the previous schedule slots, if we can */ if (frame < qh->period) { uframe = ffs (le32_to_cpup (&qh->hw_info2) & 0x00ff); status = check_intr_schedule (ehci, frame, --uframe, qh, &c_mask); } else { uframe = 0; c_mask = 0; status = -ENOSPC; } /* else scan the schedule to find a group of slots such that all * uframes have enough periodic bandwidth available. */ if (status) { frame = qh->period - 1; do { for (uframe = 0; uframe < 8; uframe++) { status = check_intr_schedule (ehci, frame, uframe, qh, &c_mask); if (status == 0) break; } } while (status && --frame); if (status) goto done; qh->start = frame; /* reset S-frame and (maybe) C-frame masks */ qh->hw_info2 &= ~0xffff; qh->hw_info2 |= cpu_to_le32 (1 << uframe) | c_mask; } else dbg ("reused previous qh %p schedule", qh); /* stuff into the periodic schedule */ qh->qh_state = QH_STATE_LINKED; dbg ("scheduled qh %p usecs %d/%d period %d.0 starting %d.%d (gap %d)", qh, qh->usecs, qh->c_usecs, qh->period, frame, uframe, qh->gap_uf); do { if (unlikely (ehci->pshadow [frame].ptr != 0)) {// FIXME -- just link toward the end, before any qh with a shorter period,// AND accomodate it already having been linked here (after some other qh)// AS WELL AS updating the schedule checking logic BUG (); } else { ehci->pshadow [frame].qh = qh_get (qh); ehci->periodic [frame] = QH_NEXT (qh->qh_dma); } wmb (); frame += qh->period; } while (frame < ehci->periodic_size); /* update per-qh bandwidth for usbfs */ hcd_to_bus (&ehci->hcd)->bandwidth_allocated += (qh->usecs + qh->c_usecs) / qh->period; /* maybe enable periodic schedule processing */ if (!ehci->periodic_sched++) status = enable_periodic (ehci);done: return status;}static int intr_submit ( struct ehci_hcd *ehci, struct urb *urb, struct list_head *qtd_list, int mem_flags) { unsigned epnum; unsigned long flags; struct ehci_qh *qh; struct hcd_dev *dev; int is_input; int status = 0; struct list_head empty; /* get endpoint and transfer/schedule data */ epnum = usb_pipeendpoint (urb->pipe); is_input = usb_pipein (urb->pipe); if (is_input) epnum |= 0x10; spin_lock_irqsave (&ehci->lock, flags); dev = (struct hcd_dev *)urb->dev->hcpriv; /* get qh and force any scheduling errors */ INIT_LIST_HEAD (&empty); qh = qh_append_tds (ehci, urb, &empty, epnum, &dev->ep [epnum]); if (qh == 0) { status = -ENOMEM; goto done; } if (qh->qh_state == QH_STATE_IDLE) { if ((status = qh_schedule (ehci, qh)) != 0) goto done; } /* then queue the urb's tds to the qh */ qh = qh_append_tds (ehci, urb, qtd_list, epnum, &dev->ep [epnum]); BUG_ON (qh == 0); /* ... update usbfs periodic stats */ hcd_to_bus (&ehci->hcd)->bandwidth_int_reqs++;done: spin_unlock_irqrestore (&ehci->lock, flags); if (status) qtd_list_free (ehci, urb, qtd_list); return status;}static unsignedintr_complete ( struct ehci_hcd *ehci, unsigned frame, struct ehci_qh *qh, struct pt_regs *regs) { unsigned count; /* nothing to report? */ if (likely ((qh->hw_token & __constant_cpu_to_le32 (QTD_STS_ACTIVE)) != 0)) return 0; if (unlikely (list_empty (&qh->qtd_list))) { dbg ("intr qh %p no TDs?", qh); return 0; } /* handle any completions */ count = qh_completions (ehci, qh, regs); if (unlikely (list_empty (&qh->qtd_list))) intr_deschedule (ehci, qh, 0); return count;}/*-------------------------------------------------------------------------*/static voiditd_free_list (struct ehci_hcd *ehci, struct urb *urb){ struct ehci_itd *first_itd = urb->hcpriv; while (!list_empty (&first_itd->itd_list)) { struct ehci_itd *itd; itd = list_entry ( first_itd->itd_list.next, struct ehci_itd, itd_list); list_del (&itd->itd_list); pci_pool_free (ehci->itd_pool, itd, itd->itd_dma); } pci_pool_free (ehci->itd_pool, first_itd, first_itd->itd_dma); urb->hcpriv = 0;}static intitd_fill ( struct ehci_hcd *ehci, struct ehci_itd *itd, struct urb *urb, unsigned index, // urb->iso_frame_desc [index] dma_addr_t dma // mapped transfer buffer) { u64 temp; u32 buf1; unsigned i, epnum, maxp, multi; unsigned length; int is_input; itd->hw_next = EHCI_LIST_END; itd->urb = urb; itd->index = index; /* tell itd about its transfer buffer, max 2 pages */ length = urb->iso_frame_desc [index].length; dma += urb->iso_frame_desc [index].offset; temp = dma & ~0x0fff;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -