📄 ehci-q.c
字号:
if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0) token ^= QTD_TOGGLE; if (likely (len <= 0)) break; qtd_prev = qtd; qtd = ehci_qtd_alloc (ehci); if (unlikely (!qtd)) goto cleanup; qtd->urb = urb; qtd_prev->hw_next = QTD_NEXT (qtd->qtd_dma); list_add_tail (&qtd->qtd_list, head); } /* unless the bulk/interrupt caller wants a chance to clean * up after short reads, hc should advance qh past this urb */ if (likely ((urb->transfer_flags & RTDM_URB_SHORT_NOT_OK) == 0 || rtdm_usb_pipecontrol (urb->pipe))) qtd->hw_alt_next = EHCI_LIST_END; /* * control requests may need a terminating data "status" ack; * bulk ones may need a terminating short packet (zero length). */ if (likely (buf != 0)) { int one_more = 0; if (rtdm_usb_pipecontrol (urb->pipe)) { one_more = 1; token ^= 0x0100; /* "in" <--> "out" */ token |= QTD_TOGGLE; /* force DATA1 */ } else if (rtdm_usb_pipebulk (urb->pipe) && (urb->transfer_flags & RTDM_URB_ZERO_PACKET) && !(urb->transfer_buffer_length % maxpacket)) { one_more = 1; } if (one_more) { qtd_prev = qtd; qtd = ehci_qtd_alloc (ehci); if (unlikely (!qtd)) goto cleanup; qtd->urb = urb; qtd_prev->hw_next = QTD_NEXT (qtd->qtd_dma); list_add_tail (&qtd->qtd_list, head); /* never any data in such packets */ qtd_fill (qtd, 0, 0, token, 0); } } /* by default, enable interrupt on urb completion */ if (likely (!(urb->transfer_flags & RTDM_URB_NO_INTERRUPT))) qtd->hw_token |= __constant_cpu_to_le32 (QTD_IOC); return head;cleanup: qtd_list_free (ehci, urb, head); return NULL;}/*-------------------------------------------------------------------------*/// Would be best to create all qh's from config descriptors,// when each interface/altsetting is established. Unlink// any previous qh and cancel its urbs first; endpoints are// implicitly reset then (data toggle too).// That'd mean updating how usbcore talks to HCDs. (2.7?)/* * Each QH holds a qtd list; a QH is used for everything except iso. * * For interrupt urbs, the scheduler must set the microframe scheduling * mask(s) each time the QH gets scheduled. For highspeed, that's * just one microframe in the s-mask. For split interrupt transactions * there are additional complications: c-mask, maybe FSTNs. */static struct ehci_qh *qh_make ( struct ehci_hcd *ehci, struct rtdm_urb *urb) { struct ehci_qh *qh = ehci_qh_alloc (ehci); u32 info1 = 0, info2 = 0; int is_input, type; int maxp = 0; if (!qh) return qh; /* * init endpoint/device data for this QH */ info1 |= rtdm_usb_pipeendpoint (urb->pipe) << 8; info1 |= rtdm_usb_pipedevice (urb->pipe) << 0; is_input = rtdm_usb_pipein (urb->pipe); type = rtdm_usb_pipetype (urb->pipe); maxp = rtdm_usb_maxpacket (urb->dev, urb->pipe, !is_input); /* Compute interrupt scheduling parameters just once, and save. * - allowing for high bandwidth, how many nsec/uframe are used? * - split transactions need a second CSPLIT uframe; same question * - splits also need a schedule gap (for full/low speed I/O) * - qh has a polling interval * * For control/bulk requests, the HC or TT handles these. */ if (type == RTDM_PIPE_INTERRUPT) { qh->usecs = NS_TO_US (rtdm_usb_calc_bus_time (USB_SPEED_HIGH, is_input, 0, hb_mult (maxp) * max_packet (maxp))); qh->start = NO_FRAME; if (urb->dev->speed == USB_SPEED_HIGH) { qh->c_usecs = 0; qh->gap_uf = 0; qh->period = urb->interval >> 3; if (qh->period == 0 && urb->interval != 1) { /* NOTE interval 2 or 4 uframes could work. * But interval 1 scheduling is simpler, and * includes high bandwidth. */ dbg ("intr period %d uframes, NYET!", urb->interval); goto done; } } else { struct rtdm_usb_tt *tt = urb->dev->tt; int think_time; /* gap is f(FS/LS transfer times) */ qh->gap_uf = 1 + rtdm_usb_calc_bus_time (urb->dev->speed, is_input, 0, maxp) / (125 * 1000); /* FIXME this just approximates SPLIT/CSPLIT times */ if (is_input) { // SPLIT, gap, CSPLIT+DATA qh->c_usecs = qh->usecs + HS_USECS (0); qh->usecs = HS_USECS (1); } else { // SPLIT+DATA, gap, CSPLIT qh->usecs += HS_USECS (1); qh->c_usecs = HS_USECS (0); } think_time = tt ? tt->think_time : 0; qh->tt_usecs = NS_TO_US (think_time + rtdm_usb_calc_bus_time (urb->dev->speed, is_input, 0, max_packet (maxp))); qh->period = urb->interval; } } /* support for tt scheduling, and access to toggles */ qh->dev = rtdm_usb_get_dev (urb->dev); /* using TT? */ switch (urb->dev->speed) { case USB_SPEED_LOW: info1 |= (1 << 12); /* EPS "low" */ /* FALL THROUGH */ case USB_SPEED_FULL: /* EPS 0 means "full" */ if (type != RTDM_PIPE_INTERRUPT) info1 |= (EHCI_TUNE_RL_TT << 28); if (type == RTDM_PIPE_CONTROL) { info1 |= (1 << 27); /* for TT */ info1 |= 1 << 14; /* toggle from qtd */ } info1 |= maxp << 16; info2 |= (EHCI_TUNE_MULT_TT << 30); info2 |= urb->dev->ttport << 23; /* set the address of the TT; for TDI's integrated * root hub tt, leave it zeroed. */ if (!ehci_is_TDI(ehci) || urb->dev->tt->hub != ehci_to_hcd(ehci)->self.root_hub) info2 |= urb->dev->tt->hub->devnum << 16; /* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */ break; case USB_SPEED_HIGH: /* no TT involved */ info1 |= (2 << 12); /* EPS "high" */ if (type == RTDM_PIPE_CONTROL) { info1 |= (EHCI_TUNE_RL_HS << 28); info1 |= 64 << 16; /* usb2 fixed maxpacket */ info1 |= 1 << 14; /* toggle from qtd */ info2 |= (EHCI_TUNE_MULT_HS << 30); } else if (type == RTDM_PIPE_BULK) { info1 |= (EHCI_TUNE_RL_HS << 28); info1 |= 512 << 16; /* usb2 fixed maxpacket */ info2 |= (EHCI_TUNE_MULT_HS << 30); } else { /* PIPE_INTERRUPT */ info1 |= max_packet (maxp) << 16; info2 |= hb_mult (maxp) << 30; } break; default: dbg("bogus dev %p speed %d", urb->dev, urb->dev->speed);done: qh_put (qh); return NULL; } /* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */ /* init as live, toggle clear, advance to dummy */ qh->qh_state = QH_STATE_IDLE; qh->hw_info1 = cpu_to_le32 (info1); qh->hw_info2 = cpu_to_le32 (info2); rtdm_usb_settoggle (urb->dev, rtdm_usb_pipeendpoint (urb->pipe), !is_input, 1); qh_refresh (ehci, qh); return qh;}/*-------------------------------------------------------------------------*//* move qh (and its qtds) onto async queue; maybe enable queue. */static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh){ __le32 dma = QH_NEXT (qh->qh_dma); struct ehci_qh *head; /* (re)start the async schedule? */ head = ehci->async; timer_action_done (ehci, TIMER_ASYNC_OFF); if (!head->qh_next.qh) { u32 cmd = readl (&ehci->regs->command); if (!(cmd & CMD_ASE)) { /* in case a clear of CMD_ASE didn't take yet */ (void) handshake (&ehci->regs->status, STS_ASS, 0, 150); cmd |= CMD_ASE | CMD_RUN; writel (cmd, &ehci->regs->command); ehci_to_hcd(ehci)->state = HC_STATE_RUNNING; /* posted write need not be known to HC yet ... */ } } /* clear halt and/or toggle; and maybe recover from silicon quirk */ if (qh->qh_state == QH_STATE_IDLE) qh_refresh (ehci, qh); /* splice right after start */ qh->qh_next = head->qh_next; qh->hw_next = head->hw_next; wmb (); head->qh_next.qh = qh; head->hw_next = dma; qh->qh_state = QH_STATE_LINKED; /* qtd completions reported later by interrupt */}/*-------------------------------------------------------------------------*/#define QH_ADDR_MASK __constant_cpu_to_le32(0x7f)/* * For control/bulk/interrupt, return QH with these TDs appended. * Allocates and initializes the QH if necessary. * Returns null if it can't allocate a QH it needs to. * If the QH has TDs (urbs) already, that's great. */static struct ehci_qh *qh_append_tds ( struct ehci_hcd *ehci, struct rtdm_urb *urb, struct list_head *qtd_list, int epnum, void **ptr){ struct ehci_qh *qh = NULL; qh = (struct ehci_qh *) *ptr; if (unlikely (qh == NULL)) { /* can't sleep here, we have ehci->lock... */ qh = qh_make (ehci, urb/*, GFP_ATOMIC*/); *ptr = qh; } if (likely (qh != NULL)) { struct ehci_qtd *qtd; if (unlikely (list_empty (qtd_list))) qtd = NULL; else qtd = list_entry (qtd_list->next, struct ehci_qtd, qtd_list); /* control qh may need patching ... */ if (unlikely (epnum == 0)) { /* usb_reset_device() briefly reverts to address 0 */ if (rtdm_usb_pipedevice (urb->pipe) == 0) qh->hw_info1 &= ~QH_ADDR_MASK; } /* just one way to queue requests: swap with the dummy qtd. * only hc or qh_refresh() ever modify the overlay. */ if (likely (qtd != NULL)) { struct ehci_qtd *dummy; dma_addr_t dma; __le32 token; /* to avoid racing the HC, use the dummy td instead of * the first td of our list (becomes new dummy). both * tds stay deactivated until we're done, when the * HC is allowed to fetch the old dummy (4.10.2). */ token = qtd->hw_token; qtd->hw_token = HALT_BIT; wmb (); dummy = qh->dummy; dma = dummy->qtd_dma; *dummy = *qtd; dummy->qtd_dma = dma; list_del (&qtd->qtd_list); list_add (&dummy->qtd_list, qtd_list); __list_splice (qtd_list, qh->qtd_list.prev); ehci_qtd_init (qtd, qtd->qtd_dma); qh->dummy = qtd; /* hc must see the new dummy at list end */ dma = qtd->qtd_dma; qtd = list_entry (qh->qtd_list.prev, struct ehci_qtd, qtd_list); qtd->hw_next = QTD_NEXT (dma); /* let the hc process these next qtds */ wmb (); dummy->hw_token = token; urb->hcpriv = qh_get (qh); } } return qh;}/*-------------------------------------------------------------------------*/static intsubmit_async ( struct ehci_hcd *ehci, struct rtdm_usb_host_endpoint *ep, struct rtdm_urb *urb, struct list_head *qtd_list//,) { struct ehci_qtd *qtd; int epnum; unsigned long context; struct ehci_qh *qh = NULL; qtd = list_entry (qtd_list->next, struct ehci_qtd, qtd_list); epnum = ep->desc.bEndpointAddress;#ifdef EHCI_URB_TRACE ehci_dbg (ehci, "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n", __FUNCTION__, urb->dev->devpath, urb, epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out", urb->transfer_buffer_length, qtd, ep->hcpriv);#endif rtdm_lock_get_irqsave(&ehci->rt_lock, context); qh = qh_append_tds (ehci, urb, qtd_list, epnum, &ep->hcpriv); /* Control/bulk operations through TTs don't need scheduling, * the HC and TT handle it when the TT has a buffer ready. */ if (likely (qh != NULL)) { if (likely (qh->qh_state == QH_STATE_IDLE)) qh_link_async (ehci, qh_get (qh)); } rtdm_lock_put_irqrestore(&ehci->rt_lock, context); if (unlikely (qh == NULL)) { qtd_list_free (ehci, urb, qtd_list); return -ENOMEM; } return 0;}/*-------------------------------------------------------------------------*//* the async qh for the qtds being reclaimed are now unlinked from the HC */static void end_unlink_async (struct ehci_hcd *ehci){ struct ehci_qh *qh = ehci->reclaim; struct ehci_qh *next; timer_action_done (ehci, TIMER_IAA_WATCHDOG); // qh->hw_next = cpu_to_le32 (qh->qh_dma); qh->qh_state = QH_STATE_IDLE; qh->qh_next.qh = NULL; qh_put (qh); // refcount from reclaim /* other unlink(s) may be pending (in QH_STATE_UNLINK_WAIT) */ next = qh->reclaim; ehci->reclaim = next; ehci->reclaim_ready = 0; qh->reclaim = NULL; qh_completions (ehci, qh); if (!list_empty (&qh->qtd_list) && HC_IS_RUNNING (ehci_to_hcd(ehci)->state)) qh_link_async (ehci, qh); else { qh_put (qh); // refcount from async list /* it's not free to turn the async schedule on/off; leave it * active but idle for a while once it empties. */ if (HC_IS_RUNNING (ehci_to_hcd(ehci)->state) && ehci->async->qh_next.qh == NULL) timer_action (ehci, TIMER_ASYNC_OFF); } if (next) { ehci->reclaim = NULL; start_unlink_async (ehci, next); }}/* makes sure the async qh will become idle *//* caller must own ehci->lock */static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh){ int cmd = readl (&ehci->regs->command); struct ehci_qh *prev;#ifdef DEBUG if (ehci->reclaim || (qh->qh_state != QH_STATE_LINKED && qh->qh_state != QH_STATE_UNLINK_WAIT) ) BUG ();#endif /* stop async schedule right now? */ if (unlikely (qh == ehci->async)) { /* can't get here without STS_ASS set */ if (ehci_to_hcd(ehci)->state != HC_STATE_HALT) { writel (cmd & ~CMD_ASE, &ehci->regs->command); wmb (); // handshake later, if we need to } timer_action_done (ehci, TIMER_ASYNC_OFF); return; } qh->qh_state = QH_STATE_UNLINK; ehci->reclaim = qh = qh_get (qh); prev = ehci->async; while (prev->qh_next.qh != qh) prev = prev->qh_next.qh; prev->hw_next = qh->hw_next; prev->qh_next = qh->qh_next; wmb (); if (unlikely (ehci_to_hcd(ehci)->state == HC_STATE_HALT)) { /* if (unlikely (qh->reclaim != 0)) * this will recurse, probably not much */ end_unlink_async (ehci); return; } ehci->reclaim_ready = 0; cmd |= CMD_IAAD; writel (cmd, &ehci->regs->command); (void) readl (&ehci->regs->command); timer_action (ehci, TIMER_IAA_WATCHDOG);}/*-------------------------------------------------------------------------*/static voidscan_async (struct ehci_hcd *ehci){ struct ehci_qh *qh; enum ehci_timer_action action = TIMER_IO_WATCHDOG; if (!++(ehci->stamp)) ehci->stamp++; timer_action_done (ehci, TIMER_ASYNC_SHRINK);rescan: qh = ehci->async->qh_next.qh; if (likely (qh != NULL)) { do { /* clean any finished work for this qh */ if (!list_empty (&qh->qtd_list) && qh->stamp != ehci->stamp) { int temp; /* unlinks could happen here; completion * reporting drops the lock. rescan using * the latest schedule, but don't rescan * qhs we already finished (no looping). */ qh = qh_get (qh); qh->stamp = ehci->stamp; temp = qh_completions (ehci, qh); qh_put (qh); if (temp != 0) { goto rescan; } } /* unlink idle entries, reducing HC PCI usage as well * as HCD schedule-scanning costs. delay for any qh * we just scanned, there's a not-unusual case that it * doesn't stay idle for long. * (plus, avoids some kind of re-activation race.) */ if (list_empty (&qh->qtd_list)) { if (qh->stamp == ehci->stamp) action = TIMER_ASYNC_SHRINK; else if (!ehci->reclaim && qh->qh_state == QH_STATE_LINKED) start_unlink_async (ehci, qh); } qh = qh->qh_next.qh; } while (qh); } if (action == TIMER_ASYNC_SHRINK) timer_action (ehci, TIMER_ASYNC_SHRINK);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -