musb_gadget.c
来自「omap3 linux 2.6 用nocc去除了冗余代码」· C语言 代码 · 共 1,834 行 · 第 1/4 页
C
1,834 行
wFifoCount = pRequest->length - pRequest->actual; DBG(3, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n", pEnd->end_point.name, wCount, wFifoCount, pEnd->wPacketSize); wFifoCount = min(wCount, wFifoCount); musb_read_fifo(pEnd->hw_ep, wFifoCount, (u8 *) (pRequest->buf + pRequest->actual)); pRequest->actual += wFifoCount; /* REVISIT if we left anything in the fifo, flush * it and report -EOVERFLOW */ /* ack the read! */ wCsrVal |= MGC_M_RXCSR_P_WZC_BITS; wCsrVal &= ~MGC_M_RXCSR_RXPKTRDY; musb_writew(epio, MGC_O_HDRC_RXCSR, wCsrVal); } } /* reach the end or short packet detected */ if (pRequest->actual == pRequest->length || wCount < pEnd->wPacketSize) musb_g_giveback(pEnd, pRequest, 0);}/* * Data ready for a request; called from IRQ */void musb_g_rx(struct musb *musb, u8 bEnd){ u16 wCsrVal; struct usb_request *pRequest; void __iomem *pBase = musb->pRegs; struct musb_ep *pEnd = &musb->aLocalEnd[bEnd].ep_out; void __iomem *epio = musb->aLocalEnd[bEnd].regs; struct dma_channel *dma; MGC_SelectEnd(pBase, bEnd); pRequest = next_request(pEnd); wCsrVal = musb_readw(epio, MGC_O_HDRC_RXCSR); dma = is_dma_capable() ? pEnd->dma : NULL; DBG(4, "<== %s, rxcsr %04x%s %p\n", pEnd->end_point.name, wCsrVal, dma ? " (dma)" : "", pRequest); if (wCsrVal & MGC_M_RXCSR_P_SENTSTALL) { if (dma_channel_status(dma) == MGC_DMA_STATUS_BUSY) { dma->bStatus = MGC_DMA_STATUS_CORE_ABORT; (void) musb->pDmaController->channel_abort(dma); pRequest->actual += pEnd->dma->dwActualLength; } wCsrVal |= MGC_M_RXCSR_P_WZC_BITS; wCsrVal &= ~MGC_M_RXCSR_P_SENTSTALL; musb_writew(epio, MGC_O_HDRC_RXCSR, wCsrVal); if (pRequest) musb_g_giveback(pEnd, pRequest, -EPIPE); goto done; } if (wCsrVal & MGC_M_RXCSR_P_OVERRUN) { // wCsrVal |= MGC_M_RXCSR_P_WZC_BITS; wCsrVal &= ~MGC_M_RXCSR_P_OVERRUN; musb_writew(epio, MGC_O_HDRC_RXCSR, wCsrVal); DBG(3, "%s iso overrun on %p\n", pEnd->name, pRequest); if (pRequest && pRequest->status == -EINPROGRESS) pRequest->status = -EOVERFLOW; } if (wCsrVal & MGC_M_RXCSR_INCOMPRX) { /* REVISIT not necessarily an error */ DBG(4, "%s, incomprx\n", pEnd->end_point.name); } if (dma_channel_status(dma) == MGC_DMA_STATUS_BUSY) { /* "should not happen"; likely RXPKTRDY pending for DMA */ DBG((wCsrVal & MGC_M_RXCSR_DMAENAB) ? 4 : 1, "%s busy, csr %04x\n", pEnd->end_point.name, wCsrVal); goto done; } if (dma && (wCsrVal & MGC_M_RXCSR_DMAENAB)) { wCsrVal &= ~(MGC_M_RXCSR_AUTOCLEAR | MGC_M_RXCSR_DMAENAB | MGC_M_RXCSR_DMAMODE); musb_writew(epio, MGC_O_HDRC_RXCSR, MGC_M_RXCSR_P_WZC_BITS | wCsrVal); pRequest->actual += pEnd->dma->dwActualLength; DBG(4, "RXCSR%d %04x, dma off, %04x, len %Zd, req %p\n", bEnd, wCsrVal, musb_readw(epio, MGC_O_HDRC_RXCSR), pEnd->dma->dwActualLength, pRequest); musb_g_giveback(pEnd, pRequest, 0); pRequest = next_request(pEnd); if (!pRequest) goto done; /* don't start more i/o till the stall clears */ MGC_SelectEnd(pBase, bEnd); wCsrVal = musb_readw(epio, MGC_O_HDRC_RXCSR); if (wCsrVal & MGC_M_RXCSR_P_SENDSTALL) goto done; } /* analyze request if the ep is hot */ if (pRequest) rxstate(musb, to_musb_request(pRequest)); else DBG(3, "packet waiting for %s%s request\n", pEnd->desc ? "" : "inactive ", pEnd->end_point.name);done: return;}/* ------------------------------------------------------------ */static int musb_gadget_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *desc){ unsigned long flags; struct musb_ep *pEnd; struct musb_hw_ep *hw_ep; void __iomem *regs; struct musb *musb; void __iomem *pBase; u8 bEnd; u16 csr; unsigned tmp; int status = -EINVAL; if (!ep || !desc) return -EINVAL; pEnd = to_musb_ep(ep); hw_ep = pEnd->hw_ep; regs = hw_ep->regs; musb = pEnd->pThis; pBase = musb->pRegs; bEnd = pEnd->bEndNumber; spin_lock_irqsave(&musb->Lock, flags); if (pEnd->desc) { status = -EBUSY; goto fail; } pEnd->type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; /* check direction and (later) maxpacket size against endpoint */ if ((desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) != bEnd) goto fail; /* REVISIT this rules out high bandwidth periodic transfers */ tmp = le16_to_cpu(desc->wMaxPacketSize); if (tmp & ~0x07ff) goto fail; pEnd->wPacketSize = tmp; /* enable the interrupts for the endpoint, set the endpoint * packet size (or fail), set the mode, clear the fifo */ MGC_SelectEnd(pBase, bEnd); if (desc->bEndpointAddress & USB_DIR_IN) { u16 wIntrTxE = musb_readw(pBase, MGC_O_HDRC_INTRTXE); if (hw_ep->bIsSharedFifo) pEnd->is_in = 1; if (!pEnd->is_in) goto fail; if (tmp > hw_ep->wMaxPacketSizeTx) goto fail; wIntrTxE |= (1 << bEnd); musb_writew(pBase, MGC_O_HDRC_INTRTXE, wIntrTxE); /* REVISIT if can_bulk_split(), use by updating "tmp"; * likewise high bandwidth periodic tx */ musb_writew(regs, MGC_O_HDRC_TXMAXP, tmp); csr = MGC_M_TXCSR_MODE | MGC_M_TXCSR_CLRDATATOG; if (musb_readw(regs, MGC_O_HDRC_TXCSR) & MGC_M_TXCSR_FIFONOTEMPTY) csr |= MGC_M_TXCSR_FLUSHFIFO; if (pEnd->type == USB_ENDPOINT_XFER_ISOC) csr |= MGC_M_TXCSR_P_ISO; /* set twice in case of double buffering */ musb_writew(regs, MGC_O_HDRC_TXCSR, csr); /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */ musb_writew(regs, MGC_O_HDRC_TXCSR, csr); } else { u16 wIntrRxE = musb_readw(pBase, MGC_O_HDRC_INTRRXE); if (hw_ep->bIsSharedFifo) pEnd->is_in = 0; if (pEnd->is_in) goto fail; if (tmp > hw_ep->wMaxPacketSizeRx) goto fail; wIntrRxE |= (1 << bEnd); musb_writew(pBase, MGC_O_HDRC_INTRRXE, wIntrRxE); /* REVISIT if can_bulk_combine() use by updating "tmp" * likewise high bandwidth periodic rx */ musb_writew(regs, MGC_O_HDRC_RXMAXP, tmp); /* force shared fifo to OUT-only mode */ if (hw_ep->bIsSharedFifo) { csr = musb_readw(regs, MGC_O_HDRC_TXCSR); csr &= ~(MGC_M_TXCSR_MODE | MGC_M_TXCSR_TXPKTRDY); musb_writew(regs, MGC_O_HDRC_TXCSR, csr); } csr = MGC_M_RXCSR_FLUSHFIFO | MGC_M_RXCSR_CLRDATATOG; if (pEnd->type == USB_ENDPOINT_XFER_ISOC) csr |= MGC_M_RXCSR_P_ISO; else if (pEnd->type == USB_ENDPOINT_XFER_INT) csr |= MGC_M_RXCSR_DISNYET; /* set twice in case of double buffering */ musb_writew(regs, MGC_O_HDRC_RXCSR, csr); musb_writew(regs, MGC_O_HDRC_RXCSR, csr); } /* NOTE: all the I/O code _should_ work fine without DMA, in case * for some reason you run out of channels here. */ if (is_dma_capable() && musb->pDmaController) { struct dma_controller *c = musb->pDmaController; pEnd->dma = c->channel_alloc(c, hw_ep, (desc->bEndpointAddress & USB_DIR_IN)); } else pEnd->dma = NULL; pEnd->desc = desc; pEnd->busy = 0; status = 0; pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n", musb_driver_name, pEnd->end_point.name, ({ char *s; switch (pEnd->type) { case USB_ENDPOINT_XFER_BULK: s = "bulk"; break; case USB_ENDPOINT_XFER_INT: s = "int"; break; default: s = "iso"; break; }; s; }), pEnd->is_in ? "IN" : "OUT", pEnd->dma ? "dma, " : "", pEnd->wPacketSize); schedule_work(&musb->irq_work);fail: spin_unlock_irqrestore(&musb->Lock, flags); return status;}/* * Disable an endpoint flushing all requests queued. */static int musb_gadget_disable(struct usb_ep *ep){ unsigned long flags; struct musb *musb; u8 bEnd; struct musb_ep *pEnd; void __iomem *epio; int status = 0; pEnd = to_musb_ep(ep); musb = pEnd->pThis; bEnd = pEnd->bEndNumber; epio = musb->aLocalEnd[bEnd].regs; pEnd->desc = NULL; if (musb->asleep) { /* this condition arises when the musb driver suspend calls stop_activity * which then calls the gadget's disconnect(). * stop_activity will have called nuke() for the endpoints already. * pEnd->desc = NULL has to be done before bailing out, as otherwise * the endpoint will remain blocked and won't be usable again. */ return status; } spin_lock_irqsave(&musb->Lock, flags); MGC_SelectEnd(musb->pRegs, bEnd); /* zero the endpoint sizes */ if (pEnd->is_in) { u16 wIntrTxE = musb_readw(musb->pRegs, MGC_O_HDRC_INTRTXE); wIntrTxE &= ~(1 << bEnd); musb_writew(musb->pRegs, MGC_O_HDRC_INTRTXE, wIntrTxE); musb_writew(epio, MGC_O_HDRC_TXMAXP, 0); } else { u16 wIntrRxE = musb_readw(musb->pRegs, MGC_O_HDRC_INTRRXE); wIntrRxE &= ~(1 << bEnd); musb_writew(musb->pRegs, MGC_O_HDRC_INTRRXE, wIntrRxE); musb_writew(epio, MGC_O_HDRC_RXMAXP, 0); } /* abort all pending DMA and requests */ nuke(pEnd, -ESHUTDOWN); schedule_work(&musb->irq_work); spin_unlock_irqrestore(&(musb->Lock), flags); DBG(2, "%s\n", pEnd->end_point.name); return status;}/* * Allocate a request for an endpoint. * Reused by ep0 code. */struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags){ struct musb_ep *musb_ep = to_musb_ep(ep); struct musb_request *pRequest = NULL; pRequest = kzalloc(sizeof *pRequest, gfp_flags); if (pRequest) { INIT_LIST_HEAD(&pRequest->request.list); pRequest->request.dma = DMA_ADDR_INVALID; pRequest->bEnd = musb_ep->bEndNumber; pRequest->ep = musb_ep; } return &pRequest->request;}/* * Free a request * Reused by ep0 code. */void musb_free_request(struct usb_ep *ep, struct usb_request *req){ kfree(to_musb_request(req));}/* * dma-coherent memory allocation (for dma-capable endpoints) * * NOTE: the dma_*_coherent() API calls suck; most implementations are * (a) page-oriented, so small buffers lose big, and (b) asymmetric with * respect to calls with irqs disabled: alloc is safe, free is not. */static void *musb_gadget_alloc_buffer(struct usb_ep *ep, unsigned bytes, dma_addr_t * dma, gfp_t gfp_flags){ struct musb_ep *musb_ep = to_musb_ep(ep); return dma_alloc_coherent(musb_ep->pThis->controller, bytes, dma, gfp_flags);}static DEFINE_SPINLOCK(buflock);static LIST_HEAD(buffers);struct free_record { struct list_head list; struct device *dev; unsigned bytes; dma_addr_t dma;};static void do_free(unsigned long ignored){ spin_lock_irq(&buflock); while (!list_empty(&buffers)) { struct free_record *buf; buf = list_entry(buffers.next, struct free_record, list); list_del(&buf->list); spin_unlock_irq(&buflock); dma_free_coherent(buf->dev, buf->bytes, buf, buf->dma); spin_lock_irq(&buflock); } spin_unlock_irq(&buflock);}static DECLARE_TASKLET(deferred_free, do_free, 0);static void musb_gadget_free_buffer(struct usb_ep *ep, void *address, dma_addr_t dma, unsigned bytes){ struct musb_ep *musb_ep = to_musb_ep(ep); struct free_record *buf = address; unsigned long flags; buf->dev = musb_ep->pThis->controller; buf->bytes = bytes; buf->dma = dma; spin_lock_irqsave(&buflock, flags); list_add_tail(&buf->list, &buffers); tasklet_schedule(&deferred_free); spin_unlock_irqrestore(&buflock, flags);}/* * Context: controller locked, IRQs blocked. */static void musb_ep_restart(struct musb *musb, struct musb_request *req){ DBG(3, "<== %s request %p len %u on hw_ep%d\n", req->bTx ? "TX/IN" : "RX/OUT", &req->request, req->request.length, req->bEnd); MGC_SelectEnd(musb->pRegs, req->bEnd); if (req->bTx) txstate(musb, req); else rxstate(musb, req);}static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, gfp_t gfp_flags){ struct musb_ep *pEnd; struct musb_request *pRequest; struct musb *musb; int status = 0; unsigned long lockflags; if (!ep || !req) return -EINVAL; if (!req->buf) return -ENODATA; pEnd = to_musb_ep(ep); musb = pEnd->pThis; pRequest = to_musb_request(req); pRequest->musb = musb;
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?