musb_gadget.c
来自「omap3 linux 2.6 用nocc去除了冗余代码」· C语言 代码 · 共 1,834 行 · 第 1/4 页
C
1,834 行
/****************************************************************** * Copyright 2005 Mentor Graphics Corporation * Copyright (C) 2005-2006 by Texas Instruments * * This file is part of the Inventra Controller Driver for Linux. * * The Inventra Controller Driver for Linux is free software; you * can redistribute it and/or modify it under the terms of the GNU * General Public License version 2 as published by the Free Software * Foundation. * * The Inventra Controller Driver for Linux is distributed in * the hope that it will be useful, but WITHOUT ANY WARRANTY; * without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public * License for more details. * * You should have received a copy of the GNU General Public License * along with The Inventra Controller Driver for Linux ; if not, * write to the Free Software Foundation, Inc., 59 Temple Place, * Suite 330, Boston, MA 02111-1307 USA * * ANY DOWNLOAD, USE, REPRODUCTION, MODIFICATION OR DISTRIBUTION * OF THIS DRIVER INDICATES YOUR COMPLETE AND UNCONDITIONAL ACCEPTANCE * OF THOSE TERMS.THIS DRIVER IS PROVIDED "AS IS" AND MENTOR GRAPHICS * MAKES NO WARRANTIES, EXPRESS OR IMPLIED, RELATED TO THIS DRIVER. * MENTOR GRAPHICS SPECIFICALLY DISCLAIMS ALL IMPLIED WARRANTIES * OF MERCHANTABILITY; FITNESS FOR A PARTICULAR PURPOSE AND * NON-INFRINGEMENT. MENTOR GRAPHICS DOES NOT PROVIDE SUPPORT * SERVICES OR UPDATES FOR THIS DRIVER, EVEN IF YOU ARE A MENTOR * GRAPHICS SUPPORT CUSTOMER. ******************************************************************/#include <linux/kernel.h>#include <linux/list.h>#include <linux/timer.h>#include <linux/module.h>#include <linux/smp.h>#include <linux/spinlock.h>#include <linux/delay.h>#include <linux/moduleparam.h>#include <linux/stat.h>#include <linux/dma-mapping.h>#include "musbdefs.h"/* MUSB PERIPHERAL status 3-mar: * * - EP0 seems solid. It passes both USBCV and usbtest control cases. * Minor glitches: * * + remote wakeup to Linux hosts work, but saw USBCV failures; * in one test run (operator error?) * + endpoint halt tests -- in both usbtest and usbcv -- seem * to break when dma is enabled ... is something wrongly * clearing SENDSTALL? * * - Mass storage behaved ok when last tested. Network traffic patterns * (with lots of short transfers etc) need retesting; they turn up the * worst cases of the DMA, since short packets are typical but are not * required. * * - TX/IN * + both pio and dma behave in with network and g_zero tests * + no cppi throughput issues other than no-hw-queueing * + failed with FLAT_REG (DaVinci) * + seems to behave with double buffering, PIO -and- CPPI * + with gadgetfs + AIO, requests got lost? * * - RX/OUT * + both pio and dma behave in with network and g_zero tests * + dma is slow in typical case (short_not_ok is clear) * + double buffering ok with PIO * + double buffering *FAILS* with CPPI, wrong data bytes sometimes * + request lossage observed with gadgetfs * * - ISO not tested ... might work, but only weakly isochronous * * - Gadget driver disabling of softconnect during bind() is ignored; so * drivers can't hold off host requests until userspace is ready. * (Workaround: they can turn it off later.) * * - PORTABILITY (assumes PIO works): * + DaVinci, basically works with cppi dma * + OMAP 2430, ditto with mentor dma * + TUSB 6010, platform-specific dma in the works *//**************************************************************************Handling completion**************************************************************************//* * Immediately complete a request. * * @param pRequest the request to complete * @param status the status to complete the request with * Context: controller locked, IRQs blocked. */void musb_g_giveback( struct musb_ep *ep, struct usb_request *pRequest, int status)__releases(ep->musb->Lock)__acquires(ep->musb->Lock){ struct musb_request *req; struct musb *musb; int busy = ep->busy; req = to_musb_request(pRequest); list_del(&pRequest->list); if (req->request.status == -EINPROGRESS || (ep->type == USB_ENDPOINT_XFER_ISOC && req->request.status == -EOVERFLOW)) req->request.status = status; musb = req->musb; ep->busy = 1; spin_unlock(&musb->Lock); if (is_dma_capable()) { if (req->mapped) { dma_unmap_single(musb->controller, req->request.dma, req->request.length, req->bTx ? DMA_TO_DEVICE : DMA_FROM_DEVICE); req->request.dma = DMA_ADDR_INVALID; req->mapped = 0; } else if (req->request.dma != DMA_ADDR_INVALID) dma_sync_single_for_cpu(musb->controller, req->request.dma, req->request.length, req->bTx ? DMA_TO_DEVICE : DMA_FROM_DEVICE); } if (pRequest->status == 0) DBG(5, "%s done request %p, %d/%d\n", ep->end_point.name, pRequest, req->request.actual, req->request.length); else DBG(2, "%s request %p, %d/%d fault %d\n", ep->end_point.name, pRequest, req->request.actual, req->request.length, pRequest->status); req->request.complete(&req->ep->end_point, &req->request); spin_lock(&musb->Lock); ep->busy = busy;}/* ----------------------------------------------------------------------- *//* * Abort requests queued to an endpoint using the status. Synchronous. * caller locked controller and blocked irqs, and selected this ep. */static void nuke(struct musb_ep *ep, const int status){ struct musb_request *req = NULL; void __iomem *epio = ep->pThis->aLocalEnd[ep->bEndNumber].regs; ep->busy = 1; if (is_dma_capable() && ep->dma) { struct dma_controller *c = ep->pThis->pDmaController; int value; if (ep->is_in) { musb_writew(epio, MGC_O_HDRC_TXCSR, 0 | MGC_M_TXCSR_FLUSHFIFO); musb_writew(epio, MGC_O_HDRC_TXCSR, 0 | MGC_M_TXCSR_FLUSHFIFO); } else { musb_writew(epio, MGC_O_HDRC_RXCSR, 0 | MGC_M_RXCSR_FLUSHFIFO); musb_writew(epio, MGC_O_HDRC_RXCSR, 0 | MGC_M_RXCSR_FLUSHFIFO); } value = c->channel_abort(ep->dma); DBG(value ? 1 : 6, "%s: abort DMA --> %d\n", ep->name, value); c->channel_release(ep->dma); ep->dma = NULL; } while (!list_empty(&(ep->req_list))) { req = container_of(ep->req_list.next, struct musb_request, request.list); musb_g_giveback(ep, &req->request, status); }}/************************************************************************** * TX/IN and RX/OUT Data transfers **************************************************************************//* * This assumes the separate CPPI engine is responding to DMA requests * from the usb core ... sequenced a bit differently from mentor dma. */static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep){ if (can_bulk_split(musb, ep->type)) return ep->hw_ep->wMaxPacketSizeTx; else return ep->wPacketSize;}/* * An endpoint is transmitting data. This can be called either from * the IRQ routine or from ep.queue() to kickstart a request on an * endpoint. * * Context: controller locked, IRQs blocked, endpoint selected */static void txstate(struct musb *musb, struct musb_request *req){ u8 bEnd = req->bEnd; struct musb_ep *pEnd; void __iomem *epio = musb->aLocalEnd[bEnd].regs; struct usb_request *pRequest; u16 wFifoCount = 0, wCsrVal; int use_dma = 0; pEnd = req->ep; /* we shouldn't get here while DMA is active ... but we do ... */ if (dma_channel_status(pEnd->dma) == MGC_DMA_STATUS_BUSY) { DBG(4, "dma pending...\n"); return; } /* read TXCSR before */ wCsrVal = musb_readw(epio, MGC_O_HDRC_TXCSR); pRequest = &req->request; wFifoCount = min(max_ep_writesize(musb, pEnd), (int)(pRequest->length - pRequest->actual)); if (wCsrVal & MGC_M_TXCSR_TXPKTRDY) { DBG(5, "%s old packet still ready , txcsr %03x\n", pEnd->end_point.name, wCsrVal); return; } if (wCsrVal & MGC_M_TXCSR_P_SENDSTALL) { DBG(5, "%s stalling, txcsr %03x\n", pEnd->end_point.name, wCsrVal); return; } DBG(4, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n", bEnd, pEnd->wPacketSize, wFifoCount, wCsrVal); if (!use_dma) { musb_write_fifo(pEnd->hw_ep, wFifoCount, (u8 *) (pRequest->buf + pRequest->actual)); pRequest->actual += wFifoCount; wCsrVal |= MGC_M_TXCSR_TXPKTRDY; wCsrVal &= ~MGC_M_TXCSR_P_UNDERRUN; musb_writew(epio, MGC_O_HDRC_TXCSR, wCsrVal); } /* host may already have the data when this message shows... */ DBG(3, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n", pEnd->end_point.name, use_dma ? "dma" : "pio", pRequest->actual, pRequest->length, musb_readw(epio, MGC_O_HDRC_TXCSR), wFifoCount, musb_readw(epio, MGC_O_HDRC_TXMAXP));}/* * FIFO state update (e.g. data ready). * Called from IRQ, with controller locked. */void musb_g_tx(struct musb *musb, u8 bEnd){ u16 wCsrVal; struct usb_request *pRequest; u8 __iomem *pBase = musb->pRegs; struct musb_ep *pEnd = &musb->aLocalEnd[bEnd].ep_in; void __iomem *epio = musb->aLocalEnd[bEnd].regs; struct dma_channel *dma; MGC_SelectEnd(pBase, bEnd); pRequest = next_request(pEnd); wCsrVal = musb_readw(epio, MGC_O_HDRC_TXCSR); DBG(4, "<== %s, txcsr %04x\n", pEnd->end_point.name, wCsrVal); dma = is_dma_capable() ? pEnd->dma : NULL; do { /* REVISIT for high bandwidth, MGC_M_TXCSR_P_INCOMPTX * probably rates reporting as a host error */ if (wCsrVal & MGC_M_TXCSR_P_SENTSTALL) { wCsrVal |= MGC_M_TXCSR_P_WZC_BITS; wCsrVal &= ~MGC_M_TXCSR_P_SENTSTALL; musb_writew(epio, MGC_O_HDRC_TXCSR, wCsrVal); if (dma_channel_status(dma) == MGC_DMA_STATUS_BUSY) { dma->bStatus = MGC_DMA_STATUS_CORE_ABORT; musb->pDmaController->channel_abort(dma); } if (pRequest) musb_g_giveback(pEnd, pRequest, -EPIPE); break; } if (wCsrVal & MGC_M_TXCSR_P_UNDERRUN) { /* we NAKed, no big deal ... little reason to care */ wCsrVal |= MGC_M_TXCSR_P_WZC_BITS; wCsrVal &= ~(MGC_M_TXCSR_P_UNDERRUN | MGC_M_TXCSR_TXPKTRDY); musb_writew(epio, MGC_O_HDRC_TXCSR, wCsrVal); DBG(20, "underrun on ep%d, req %p\n", bEnd, pRequest); } if (dma_channel_status(dma) == MGC_DMA_STATUS_BUSY) { /* SHOULD NOT HAPPEN ... has with cppi though, after * changing SENDSTALL (and other cases); harmless? */ DBG(5, "%s dma still busy?\n", pEnd->end_point.name); break; } if (pRequest) { u8 is_dma = 0; if (dma && (wCsrVal & MGC_M_TXCSR_DMAENAB)) { is_dma = 1; wCsrVal |= MGC_M_TXCSR_P_WZC_BITS; wCsrVal &= ~(MGC_M_TXCSR_DMAENAB | MGC_M_TXCSR_P_UNDERRUN | MGC_M_TXCSR_TXPKTRDY); musb_writew(epio, MGC_O_HDRC_TXCSR, wCsrVal); /* ensure writebuffer is empty */ wCsrVal = musb_readw(epio, MGC_O_HDRC_TXCSR); pRequest->actual += pEnd->dma->dwActualLength; DBG(4, "TXCSR%d %04x, dma off, " "len %Zd, req %p\n", bEnd, wCsrVal, pEnd->dma->dwActualLength, pRequest); } if (is_dma || pRequest->actual == pRequest->length) { /* First, maybe a terminating short packet. * Some DMA engines might handle this by * themselves. */ if ((pRequest->zero && pRequest->length && (pRequest->length % pEnd->wPacketSize) == 0) ) { /* on dma completion, fifo may not * be available yet ... */ if (wCsrVal & MGC_M_TXCSR_TXPKTRDY) break; DBG(4, "sending zero pkt\n"); musb_writew(epio, MGC_O_HDRC_TXCSR, wCsrVal | MGC_M_TXCSR_MODE | MGC_M_TXCSR_TXPKTRDY); pRequest->zero = 0; break; } /* ... or if not, then complete it */ musb_g_giveback(pEnd, pRequest, 0); /* kickstart next transfer if appropriate; * the packet that just completed might not * be transmitted for hours or days. * REVISIT for double buffering... * FIXME revisit for stalls too... */ MGC_SelectEnd(pBase, bEnd); wCsrVal = musb_readw(epio, MGC_O_HDRC_TXCSR); if (wCsrVal & MGC_M_TXCSR_FIFONOTEMPTY) break; pRequest = pEnd->desc ? next_request(pEnd) : NULL; if (!pRequest) { DBG(4, "%s idle now\n", pEnd->end_point.name); break; } } txstate(musb, to_musb_request(pRequest)); } } while (0);}/* ------------------------------------------------------------ *//* * Context: controller locked, IRQs blocked, endpoint selected */static void rxstate(struct musb *musb, struct musb_request *req){ u16 wCsrVal = 0; const u8 bEnd = req->bEnd; struct usb_request *pRequest = &req->request; struct musb_ep *pEnd = &musb->aLocalEnd[bEnd].ep_out; void __iomem *epio = musb->aLocalEnd[bEnd].regs; u16 wFifoCount = 0; u16 wCount = pEnd->wPacketSize; wCsrVal = musb_readw(epio, MGC_O_HDRC_RXCSR); if (is_cppi_enabled() && pEnd->dma) { struct dma_controller *c = musb->pDmaController; struct dma_channel *channel = pEnd->dma; /* NOTE: CPPI won't actually stop advancing the DMA * queue after short packet transfers, so this is almost * always going to run as IRQ-per-packet DMA so that * faults will be handled correctly. */ if (c->channel_program(channel, pEnd->wPacketSize, !pRequest->short_not_ok, pRequest->dma + pRequest->actual, pRequest->length - pRequest->actual)) { /* make sure that if an rxpkt arrived after the irq, * the cppi engine will be ready to take it as soon * as DMA is enabled */ wCsrVal &= ~(MGC_M_RXCSR_AUTOCLEAR | MGC_M_RXCSR_DMAMODE); wCsrVal |= MGC_M_RXCSR_DMAENAB | MGC_M_RXCSR_P_WZC_BITS; musb_writew(epio, MGC_O_HDRC_RXCSR, wCsrVal); return; } } if (wCsrVal & MGC_M_RXCSR_RXPKTRDY) { wCount = musb_readw(epio, MGC_O_HDRC_RXCOUNT); if (pRequest->actual < pRequest->length) {
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?