musb_host.c
来自「omap3 linux 2.6 用nocc去除了冗余代码」· C语言 代码 · 共 1,976 行 · 第 1/4 页
C
1,976 行
if (head) { idle = list_empty(head); list_add_tail(&qh->ring, head); goto success; } /* else, periodic transfers get muxed to other endpoints */ /* FIXME this doesn't consider direction, so it can only * work for one half of the endpoint hardware, and assumes * the previous cases handled all non-shared endpoints... */ /* we know this qh hasn't been scheduled, so all we need to do * is choose which hardware endpoint to put it on ... * * REVISIT what we really want here is a regular schedule tree * like e.g. OHCI uses, but for now musb->periodic is just an * array of the _single_ logical endpoint associated with a * given physical one (identity mapping logical->physical). * * that simplistic approach makes TT scheduling a lot simpler; * there is none, and thus none of its complexity... */ wBestDiff = 4096; nBestEnd = -1; for (nEnd = 1; nEnd < musb->bEndCount; nEnd++) { int diff; if (musb->periodic[nEnd]) continue; hw_ep = &musb->aLocalEnd[nEnd]; if (hw_ep == musb->bulk_ep) continue; if (is_in) diff = hw_ep->wMaxPacketSizeRx - qh->maxpacket; else diff = hw_ep->wMaxPacketSizeTx - qh->maxpacket; if (diff > 0 && wBestDiff > diff) { wBestDiff = diff; nBestEnd = nEnd; } } if (nBestEnd < 0) return -ENOSPC; idle = 1; hw_ep = musb->aLocalEnd + nBestEnd; musb->periodic[nBestEnd] = qh; DBG(4, "qh %p periodic slot %d\n", qh, nBestEnd);success: qh->hw_ep = hw_ep; qh->hep->hcpriv = qh; if (idle) musb_start_urb(musb, is_in, qh); return 0;}static int musb_urb_enqueue( struct usb_hcd *hcd, struct usb_host_endpoint *hep, struct urb *urb, gfp_t mem_flags){ unsigned long flags; struct musb *musb = hcd_to_musb(hcd); struct musb_qh *qh = hep->hcpriv; struct usb_endpoint_descriptor *epd = &hep->desc; int status; unsigned type_reg; unsigned interval; /* host role must be active */ if (!is_host_active(musb) || !musb->is_active) return -ENODEV; /* DMA mapping was already done, if needed, and this urb is on * hep->urb_list ... so there's little to do unless hep wasn't * yet scheduled onto a live qh. * * REVISIT best to keep hep->hcpriv valid until the endpoint gets * disabled, testing for empty qh->ring and avoiding qh setup costs * except for the first urb queued after a config change. */ if (qh) { urb->hcpriv = qh; return 0; } /* Allocate and initialize qh, minimizing the work done each time * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it. * * REVISIT consider a dedicated qh kmem_cache, so it's harder * for bugs in other kernel code to break this driver... */ qh = kzalloc(sizeof *qh, mem_flags); if (!qh) return -ENOMEM; qh->hep = hep; qh->dev = urb->dev; INIT_LIST_HEAD(&qh->ring); qh->is_ready = 1; qh->maxpacket = le16_to_cpu(epd->wMaxPacketSize); /* no high bandwidth support yet */ if (qh->maxpacket & ~0x7ff) { status = -EMSGSIZE; goto done; } qh->epnum = epd->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; qh->type = epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */ qh->addr_reg = (u8) usb_pipedevice(urb->pipe); /* precompute rxtype/txtype/type0 register */ type_reg = (qh->type << 4) | qh->epnum; switch (urb->dev->speed) { case USB_SPEED_LOW: type_reg |= 0xc0; break; case USB_SPEED_FULL: type_reg |= 0x80; break; default: type_reg |= 0x40; } qh->type_reg = type_reg; /* precompute rxinterval/txinterval register */ interval = min((u8)16, epd->bInterval); /* log encoding */ switch (qh->type) { case USB_ENDPOINT_XFER_INT: /* fullspeed uses linear encoding */ if (USB_SPEED_FULL == urb->dev->speed) { interval = epd->bInterval; if (!interval) interval = 1; } /* FALLTHROUGH */ case USB_ENDPOINT_XFER_ISOC: /* iso always uses log encoding */ break; default: /* REVISIT we actually want to use NAK limits, hinting to the * transfer scheduling logic to try some other qh, e.g. try * for 2 msec first: * * interval = (USB_SPEED_HIGH == pUrb->dev->speed) ? 16 : 2; * * The downside of disabling this is that transfer scheduling * gets VERY unfair for nonperiodic transfers; a misbehaving * peripheral could make that hurt. Or for reads, one that's * perfectly normal: network and other drivers keep reads * posted at all times, having one pending for a week should * be perfectly safe. * * The upside of disabling it is avoidng transfer scheduling * code to put this aside for while. */ interval = 0; } qh->intv_reg = interval; /* precompute addressing for external hub/tt ports */ if (musb->bIsMultipoint) { struct usb_device *parent = urb->dev->parent; if (parent != hcd->self.root_hub) { qh->h_addr_reg = (u8) parent->devnum; /* set up tt info if needed */ if (urb->dev->tt) { qh->h_port_reg = (u8) urb->dev->ttport; qh->h_addr_reg |= 0x80; } } } /* invariant: hep->hcpriv is null OR the qh that's already scheduled. * until we get real dma queues (with an entry for each urb/buffer), * we only have work to do in the former case. */ spin_lock_irqsave(&musb->Lock, flags); if (hep->hcpriv) { /* some concurrent activity submitted another urb to hep... * odd, rare, error prone, but legal. */ kfree(qh); status = 0; } else status = musb_schedule(musb, qh, epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK); if (status == 0) { urb->hcpriv = qh; /* FIXME set urb->start_frame for iso/intr, it's tested in * musb_start_urb(), but otherwise only konicawc cares ... */ } spin_unlock_irqrestore(&musb->Lock, flags);done: if (status != 0) kfree(qh); return status;}/* * abort a transfer that's at the head of a hardware queue. * called with controller locked, irqs blocked * that hardware queue advances to the next transfer, unless prevented */static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh, int is_in){ struct musb_hw_ep *ep = qh->hw_ep; void __iomem *epio = ep->regs; unsigned hw_end = ep->bLocalEnd; void __iomem *regs = ep->musb->pRegs; u16 csr; int status = 0; MGC_SelectEnd(regs, hw_end); if (is_dma_capable()) { struct dma_channel *dma; dma = is_in ? ep->rx_channel : ep->tx_channel; if (dma) { status = ep->musb->pDmaController->channel_abort(dma); DBG(status ? 1 : 3, "abort %cX%d DMA for urb %p --> %d\n", is_in ? 'R' : 'T', ep->bLocalEnd, urb, status); urb->actual_length += dma->dwActualLength; } } /* turn off DMA requests, discard state, stop polling ... */ if (is_in) { /* giveback saves bulk toggle */ csr = musb_h_flush_rxfifo(ep, 0); /* REVISIT we still get an irq; should likely clear the * endpoint's irq status here to avoid bogus irqs. * clearing that status is platform-specific... */ } else { musb_h_tx_flush_fifo(ep); csr = musb_readw(epio, MGC_O_HDRC_TXCSR); csr &= ~( MGC_M_TXCSR_AUTOSET | MGC_M_TXCSR_DMAENAB | MGC_M_TXCSR_H_RXSTALL | MGC_M_TXCSR_H_NAKTIMEOUT | MGC_M_TXCSR_H_ERROR | MGC_M_TXCSR_FIFONOTEMPTY | MGC_M_TXCSR_TXPKTRDY ); musb_writew(epio, MGC_O_HDRC_TXCSR, csr); /* REVISIT may need to clear FLUSHFIFO ... */ musb_writew(epio, MGC_O_HDRC_TXCSR, csr); /* flush cpu writebuffer */ csr = musb_readw(epio, MGC_O_HDRC_TXCSR); } if (status == 0) musb_advance_schedule(ep->musb, urb, ep, is_in); return status;}static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb){ struct musb *musb = hcd_to_musb(hcd); struct musb_qh *qh; struct list_head *sched; struct urb *tmp; unsigned long flags; int status = -ENOENT; DBG(4, "urb=%p, dev%d ep%d%s\n", urb, usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe), usb_pipein(urb->pipe) ? "in" : "out"); spin_lock_irqsave(&musb->Lock, flags); /* make sure the urb is still queued and not completed */ spin_lock(&urb->lock); qh = urb->hcpriv; if (qh) { struct usb_host_endpoint *hep; hep = qh->hep; list_for_each_entry(tmp, &hep->urb_list, urb_list) { if (urb == tmp) { status = 0; break; } } } spin_unlock(&urb->lock); /* already completed */ if (!qh) { status = 0; goto done; } /* still queued but not found on the list */ if (status) goto done; /* Any URB not actively programmed into endpoint hardware can be * immediately given back. Such an URB must be at the head of its * endpoint queue, unless someday we get real DMA queues. And even * then, it might not be known to the hardware... * * Otherwise abort current transfer, pending dma, etc.; urb->status * has already been updated. This is a synchronous abort; it'd be * OK to hold off until after some IRQ, though. */ if (!qh->is_ready || urb->urb_list.prev != &qh->hep->urb_list) status = -EINPROGRESS; else { switch (qh->type) { case USB_ENDPOINT_XFER_CONTROL: sched = &musb->control; break; case USB_ENDPOINT_XFER_BULK: if (usb_pipein(urb->pipe)) sched = &musb->in_bulk; else sched = &musb->out_bulk; break; default: /* REVISIT when we get a schedule tree, periodic * transfers won't always be at the head of a * singleton queue... */ sched = NULL; break; } } /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */ if (status < 0 || (sched && qh != first_qh(sched))) { int ready = qh->is_ready; status = 0; qh->is_ready = 0; __musb_giveback(musb, urb, 0); qh->is_ready = ready; } else status = musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);done: spin_unlock_irqrestore(&musb->Lock, flags); return status;}/* disable an endpoint */static voidmusb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep){ u8 epnum = hep->desc.bEndpointAddress; unsigned long flags; struct musb *musb = hcd_to_musb(hcd); u8 is_in = epnum & USB_DIR_IN; struct musb_qh *qh = hep->hcpriv; struct urb *urb, *tmp; struct list_head *sched; if (!qh) return; DBG(3, "Disable host ep %d\n", epnum); spin_lock_irqsave(&musb->Lock, flags); switch (qh->type) { case USB_ENDPOINT_XFER_CONTROL: sched = &musb->control; break; case USB_ENDPOINT_XFER_BULK: if (is_in) sched = &musb->in_bulk; else sched = &musb->out_bulk; break; default: /* REVISIT when we get a schedule tree, periodic transfers * won't always be at the head of a singleton queue... */ sched = NULL; break; } /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */ /* kick first urb off the hardware, if needed */ qh->is_ready = 0; if (!sched || qh == first_qh(sched)) { urb = next_urb(qh); /* make software (then hardware) stop ASAP */ spin_lock(&urb->lock); if (urb->status == -EINPROGRESS) urb->status = -ESHUTDOWN; spin_unlock(&urb->lock); /* cleanup */ musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN); } else urb = NULL; /* then just nuke all the others */ list_for_each_entry_safe_from(urb, tmp, &hep->urb_list, urb_list) musb_giveback(qh, urb, -ESHUTDOWN); spin_unlock_irqrestore(&musb->Lock, flags);}static int musb_h_get_frame_number(struct usb_hcd *hcd){ struct musb *musb = hcd_to_musb(hcd); return musb_readw(musb->pRegs, MGC_O_HDRC_FRAME);}static int musb_h_start(struct usb_hcd *hcd){ struct musb *musb = hcd_to_musb(hcd); /* NOTE: musb_start() is called when the hub driver turns * on port power, or when (OTG) peripheral starts. */ hcd->state = HC_STATE_RUNNING; musb->port1_status = 0; return 0;}static void musb_h_stop(struct usb_hcd *hcd){ musb_stop(hcd_to_musb(hcd)); hcd->state = HC_STATE_HALT;}static int musb_bus_suspend(struct usb_hcd *hcd){ struct musb *musb = hcd_to_musb(hcd); if (is_host_active(musb) && musb->is_active) return -EBUSY; else return 0;}static int musb_bus_resume(struct usb_hcd *hcd){ /* resuming child port does the work */ return 0;}const struct hc_driver musb_hc_driver = { .description = "musb-hcd", .product_desc = "MUSB HDRC host driver", .hcd_priv_size = sizeof (struct musb), .flags = HCD_USB2 | HCD_MEMORY, /* not using irq handler or reset hooks from usbcore, since * those must be shared with peripheral code for OTG configs */ .start = musb_h_start, .stop = musb_h_stop, .get_frame_number = musb_h_get_frame_number, .urb_enqueue = musb_urb_enqueue, .urb_dequeue = musb_urb_dequeue, .endpoint_disable = musb_h_disable, .hub_status_data = musb_hub_status_data, .hub_control = musb_hub_control, .bus_suspend = musb_bus_suspend, .bus_resume = musb_bus_resume,// .start_port_reset = NULL,// .hub_irq_enable = NULL,};
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?