📄 ohci-q.c
字号:
ohci_vdbg (ohci, "urb %p iso td %p (%d) len %d cc %d\n", urb, td, 1 + td->index, dlen, cc); /* BULK, INT, CONTROL ... drivers see aggregate length/status, * except that "setup" bytes aren't counted and "short" transfers * might not be reported as errors. */ } else { int type = usb_pipetype (urb->pipe); u32 tdBE = hc32_to_cpup (ohci, &td->hwBE); cc = TD_CC_GET (tdINFO); /* update packet status if needed (short is normally ok) */ if (cc == TD_DATAUNDERRUN && !(urb->transfer_flags & URB_SHORT_NOT_OK)) cc = TD_CC_NOERROR; if (cc != TD_CC_NOERROR && cc < 0x0E) { spin_lock (&urb->lock); if (urb->status == -EINPROGRESS) urb->status = cc_to_error [cc]; spin_unlock (&urb->lock); } /* count all non-empty packets except control SETUP packet */ if ((type != PIPE_CONTROL || td->index != 0) && tdBE != 0) { if (td->hwCBP == 0) urb->actual_length += tdBE - td->data_dma + 1; else urb->actual_length += hc32_to_cpup (ohci, &td->hwCBP) - td->data_dma; } if (cc != TD_CC_NOERROR && cc < 0x0E) ohci_vdbg (ohci, "urb %p td %p (%d) cc %d, len=%d/%d\n", urb, td, 1 + td->index, cc, urb->actual_length, urb->transfer_buffer_length); }}/*-------------------------------------------------------------------------*/static inline struct td *ed_halted (struct ohci_hcd *ohci, struct td *td, int cc, struct td *rev){ struct urb *urb = td->urb; struct ed *ed = td->ed; struct list_head *tmp = td->td_list.next; __hc32 toggle = ed->hwHeadP & cpu_to_hc32 (ohci, ED_C); /* clear ed halt; this is the td that caused it, but keep it inactive * until its urb->complete() has a chance to clean up. */ ed->hwINFO |= cpu_to_hc32 (ohci, ED_SKIP); wmb (); ed->hwHeadP &= ~cpu_to_hc32 (ohci, ED_H); /* put any later tds from this urb onto the donelist, after 'td', * order won't matter here: no errors, and nothing was transferred. * also patch the ed so it looks as if those tds completed normally. */ while (tmp != &ed->td_list) { struct td *next; __hc32 info; next = list_entry (tmp, struct td, td_list); tmp = next->td_list.next; if (next->urb != urb) break; /* NOTE: if multi-td control DATA segments get supported, * this urb had one of them, this td wasn't the last td * in that segment (TD_R clear), this ed halted because * of a short read, _and_ URB_SHORT_NOT_OK is clear ... * then we need to leave the control STATUS packet queued * and clear ED_SKIP. */ info = next->hwINFO; info |= cpu_to_hc32 (ohci, TD_DONE); info &= ~cpu_to_hc32 (ohci, TD_CC); next->hwINFO = info; next->next_dl_td = rev; rev = next; ed->hwHeadP = next->hwNextTD | toggle; } /* help for troubleshooting: report anything that * looks odd ... that doesn't include protocol stalls * (or maybe some other things) */ switch (cc) { case TD_DATAUNDERRUN: if ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0) break; /* fallthrough */ case TD_CC_STALL: if (usb_pipecontrol (urb->pipe)) break; /* fallthrough */ default: ohci_dbg (ohci, "urb %p path %s ep%d%s %08x cc %d --> status %d\n", urb, urb->dev->devpath, usb_pipeendpoint (urb->pipe), usb_pipein (urb->pipe) ? "in" : "out", hc32_to_cpu (ohci, td->hwINFO), cc, cc_to_error [cc]); } return rev;}/* replies to the request have to be on a FIFO basis so * we unreverse the hc-reversed done-list */static struct td *dl_reverse_done_list (struct ohci_hcd *ohci){ u32 td_dma; struct td *td_rev = NULL; struct td *td = NULL; td_dma = hc32_to_cpup (ohci, &ohci->hcca->done_head); ohci->hcca->done_head = 0; wmb(); /* get TD from hc's singly linked list, and * prepend to ours. ed->td_list changes later. */ while (td_dma) { int cc; td = dma_to_td (ohci, td_dma); if (!td) { ohci_err (ohci, "bad entry %8x\n", td_dma); break; } td->hwINFO |= cpu_to_hc32 (ohci, TD_DONE); cc = TD_CC_GET (hc32_to_cpup (ohci, &td->hwINFO)); /* Non-iso endpoints can halt on error; un-halt, * and dequeue any other TDs from this urb. * No other TD could have caused the halt. */ if (cc != TD_CC_NOERROR && (td->ed->hwHeadP & cpu_to_hc32 (ohci, ED_H))) td_rev = ed_halted (ohci, td, cc, td_rev); td->next_dl_td = td_rev; td_rev = td; td_dma = hc32_to_cpup (ohci, &td->hwNextTD); } return td_rev;}/*-------------------------------------------------------------------------*//* there are some urbs/eds to unlink; called in_irq(), with HCD locked */static voidfinish_unlinks (struct ohci_hcd *ohci, u16 tick, struct pt_regs *regs){ struct ed *ed, **last;rescan_all: for (last = &ohci->ed_rm_list, ed = *last; ed != NULL; ed = *last) { struct list_head *entry, *tmp; int completed, modified; __hc32 *prev; /* only take off EDs that the HC isn't using, accounting for * frame counter wraps and EDs with partially retired TDs */ if (likely (regs && HC_IS_RUNNING(ohci_to_hcd(ohci)->state))) { if (tick_before (tick, ed->tick)) {skip_ed: last = &ed->ed_next; continue; } if (!list_empty (&ed->td_list)) { struct td *td; u32 head; td = list_entry (ed->td_list.next, struct td, td_list); head = hc32_to_cpu (ohci, ed->hwHeadP) & TD_MASK; /* INTR_WDH may need to clean up first */ if (td->td_dma != head) goto skip_ed; } } /* reentrancy: if we drop the schedule lock, someone might * have modified this list. normally it's just prepending * entries (which we'd ignore), but paranoia won't hurt. */ *last = ed->ed_next; ed->ed_next = NULL; modified = 0; /* unlink urbs as requested, but rescan the list after * we call a completion since it might have unlinked * another (earlier) urb * * When we get here, the HC doesn't see this ed. But it * must not be rescheduled until all completed URBs have * been given back to the driver. */rescan_this: completed = 0; prev = &ed->hwHeadP; list_for_each_safe (entry, tmp, &ed->td_list) { struct td *td; struct urb *urb; urb_priv_t *urb_priv; __hc32 savebits; td = list_entry (entry, struct td, td_list); urb = td->urb; urb_priv = td->urb->hcpriv; if (urb->status == -EINPROGRESS) { prev = &td->hwNextTD; continue; } /* patch pointer hc uses */ savebits = *prev & ~cpu_to_hc32 (ohci, TD_MASK); *prev = td->hwNextTD | savebits; /* HC may have partly processed this TD */ td_done (ohci, urb, td); urb_priv->td_cnt++; /* if URB is done, clean up */ if (urb_priv->td_cnt == urb_priv->length) { modified = completed = 1; finish_urb (ohci, urb, regs); } } if (completed && !list_empty (&ed->td_list)) goto rescan_this; /* ED's now officially unlinked, hc doesn't see */ ed->state = ED_IDLE; ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H); ed->hwNextED = 0; wmb (); ed->hwINFO &= ~cpu_to_hc32 (ohci, ED_SKIP | ED_DEQUEUE); /* but if there's work queued, reschedule */ if (!list_empty (&ed->td_list)) { if (HC_IS_RUNNING(ohci_to_hcd(ohci)->state)) ed_schedule (ohci, ed); } if (modified) goto rescan_all; } /* maybe reenable control and bulk lists */ if (HC_IS_RUNNING(ohci_to_hcd(ohci)->state) && ohci_to_hcd(ohci)->state != HC_STATE_QUIESCING && !ohci->ed_rm_list) { u32 command = 0, control = 0; if (ohci->ed_controltail) { command |= OHCI_CLF; if (ohci->flags & OHCI_QUIRK_ZFMICRO) mdelay(1); if (!(ohci->hc_control & OHCI_CTRL_CLE)) { control |= OHCI_CTRL_CLE; ohci_writel (ohci, 0, &ohci->regs->ed_controlcurrent); } } if (ohci->ed_bulktail) { command |= OHCI_BLF; if (ohci->flags & OHCI_QUIRK_ZFMICRO) mdelay(1); if (!(ohci->hc_control & OHCI_CTRL_BLE)) { control |= OHCI_CTRL_BLE; ohci_writel (ohci, 0, &ohci->regs->ed_bulkcurrent); } } /* CLE/BLE to enable, CLF/BLF to (maybe) kickstart */ if (control) { ohci->hc_control |= control; if (ohci->flags & OHCI_QUIRK_ZFMICRO) mdelay(1); ohci_writel (ohci, ohci->hc_control, &ohci->regs->control); } if (command) { if (ohci->flags & OHCI_QUIRK_ZFMICRO) mdelay(1); ohci_writel (ohci, command, &ohci->regs->cmdstatus); } }}/*-------------------------------------------------------------------------*//* * Process normal completions (error or success) and clean the schedules. * * This is the main path for handing urbs back to drivers. The only other * path is finish_unlinks(), which unlinks URBs using ed_rm_list, instead of * scanning the (re-reversed) donelist as this does. */static voiddl_done_list (struct ohci_hcd *ohci, struct pt_regs *regs){ struct td *td = dl_reverse_done_list (ohci); while (td) { struct td *td_next = td->next_dl_td; struct urb *urb = td->urb; urb_priv_t *urb_priv = urb->hcpriv; struct ed *ed = td->ed; /* update URB's length and status from TD */ td_done (ohci, urb, td); urb_priv->td_cnt++; /* If all this urb's TDs are done, call complete() */ if (urb_priv->td_cnt == urb_priv->length) finish_urb (ohci, urb, regs); /* clean schedule: unlink EDs that are no longer busy */ if (list_empty (&ed->td_list)) { if (ed->state == ED_OPER) start_ed_unlink (ohci, ed); /* ... reenabling halted EDs only after fault cleanup */ } else if ((ed->hwINFO & cpu_to_hc32 (ohci, ED_SKIP | ED_DEQUEUE)) == cpu_to_hc32 (ohci, ED_SKIP)) { td = list_entry (ed->td_list.next, struct td, td_list); if (!(td->hwINFO & cpu_to_hc32 (ohci, TD_DONE))) { ed->hwINFO &= ~cpu_to_hc32 (ohci, ED_SKIP); /* ... hc may need waking-up */ switch (ed->type) { case PIPE_CONTROL: ohci_writel (ohci, OHCI_CLF, &ohci->regs->cmdstatus); break; case PIPE_BULK: ohci_writel (ohci, OHCI_BLF, &ohci->regs->cmdstatus); break; } } } td = td_next; } }
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -