musb_host.c
来自「omap3 linux 2.6 用nocc去除了冗余代码」· C语言 代码 · 共 1,976 行 · 第 1/4 页
C
1,976 行
/****************************************************************** * Copyright 2005 Mentor Graphics Corporation * Copyright (C) 2005-2006 by Texas Instruments * Copyright (C) 2006 by Nokia Corporation * * This file is part of the Inventra Controller Driver for Linux. * * The Inventra Controller Driver for Linux is free software; you * can redistribute it and/or modify it under the terms of the GNU * General Public License version 2 as published by the Free Software * Foundation. * * The Inventra Controller Driver for Linux is distributed in * the hope that it will be useful, but WITHOUT ANY WARRANTY; * without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public * License for more details. * * You should have received a copy of the GNU General Public License * along with The Inventra Controller Driver for Linux ; if not, * write to the Free Software Foundation, Inc., 59 Temple Place, * Suite 330, Boston, MA 02111-1307 USA * * ANY DOWNLOAD, USE, REPRODUCTION, MODIFICATION OR DISTRIBUTION * OF THIS DRIVER INDICATES YOUR COMPLETE AND UNCONDITIONAL ACCEPTANCE * OF THOSE TERMS.THIS DRIVER IS PROVIDED "AS IS" AND MENTOR GRAPHICS * MAKES NO WARRANTIES, EXPRESS OR IMPLIED, RELATED TO THIS DRIVER. * MENTOR GRAPHICS SPECIFICALLY DISCLAIMS ALL IMPLIED WARRANTIES * OF MERCHANTABILITY; FITNESS FOR A PARTICULAR PURPOSE AND * NON-INFRINGEMENT. MENTOR GRAPHICS DOES NOT PROVIDE SUPPORT * SERVICES OR UPDATES FOR THIS DRIVER, EVEN IF YOU ARE A MENTOR * GRAPHICS SUPPORT CUSTOMER. ******************************************************************/#include <linux/module.h>#include <linux/kernel.h>#include <linux/delay.h>#include <linux/sched.h>#include <linux/slab.h>#include <linux/errno.h>#include <linux/init.h>#include <linux/list.h>#include "musbdefs.h"#include "musb_host.h"/* MUSB HOST status 22-mar-2006 * * - There's still lots of partial code duplication for fault paths, so * they aren't handled as consistently as they need to be. * * - PIO mostly behaved when last tested. * + including ep0, with all usbtest cases 9, 10 * + usbtest 14 (ep0out) doesn't seem to run at all * + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest * configurations, but otherwise double buffering passes basic tests. * + for 2.6.N, for N > ~10, needs API changes for hcd framework. * * - DMA (CPPI) ... partially behaves, not currently recommended * + about 1/15 the speed of typical EHCI implementations (PCI) * + RX, all too often reqpkt seems to misbehave after tx * + TX, no known issues (other than evident silicon issue) * * - DMA (Mentor/OMAP) ...has at least toggle update problems * * - Still no traffic scheduling code to make NAKing for bulk or control * transfers unable to starve other requests; or to make efficient use * of hardware with periodic transfers. (Note that network drivers * commonly post bulk reads that stay pending for a long time; these * would make very visible trouble.) * * - Not tested with HNP, but some SRP paths seem to behave. * * NOTE 24-August: * * - Bulk traffic finally uses both sides of hardware ep1, freeing up an * extra endpoint for periodic use enabling hub + keybd + mouse. That * mostly works, except that with "usbnet" it's easy to trigger cases * with "ping" where RX loses. (a) ping to davinci, even "ping -f", * fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses * although ARP RX wins. (That test was done with a full speed link.) *//* * NOTE on endpoint usage: * * CONTROL transfers all go through ep0. BULK ones go through dedicated IN * and OUT endpoints ... hardware is dedicated for those "async" queue(s). * * (Yes, bulk _could_ use more of the endpoints than that, and would even * benefit from it ... one remote device may easily be NAKing while others * need to perform transfers in that same direction. The same thing could * be done in software though, assuming dma cooperates.) * * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints. * So far that scheduling is both dumb and optimistic: the endpoint will be * "claimed" until its software queue is no longer refilled. No multiplexing * of transfers between endpoints, or anything clever. *//*************************** Forwards ***************************/static void musb_ep_program(struct musb *pThis, u8 bEnd, struct urb *pUrb, unsigned int nOut, u8 * pBuffer, u32 dwLength);/* * Clear TX fifo. Needed to avoid BABBLE errors. */static inline void musb_h_tx_flush_fifo(struct musb_hw_ep *ep){ void __iomem *epio = ep->regs; u16 csr; int retries = 1000; csr = musb_readw(epio, MGC_O_HDRC_TXCSR); while (csr & MGC_M_TXCSR_FIFONOTEMPTY) { DBG(5, "Host TX FIFONOTEMPTY csr: %02x\n", csr); csr |= MGC_M_TXCSR_FLUSHFIFO; musb_writew(epio, MGC_O_HDRC_TXCSR, csr); csr = musb_readw(epio, MGC_O_HDRC_TXCSR); if (retries-- < 1) { ERR("Could not flush host TX fifo: csr: %04x\n", csr); return; } mdelay(1); }}/* * Start transmit. Caller is responsible for locking shared resources. * pThis must be locked. */static inline void musb_h_tx_start(struct musb_hw_ep *ep){ u16 txcsr; /* NOTE: no locks here; caller should lock and select EP */ if (ep->bLocalEnd) { txcsr = musb_readw(ep->regs, MGC_O_HDRC_TXCSR); txcsr |= MGC_M_TXCSR_TXPKTRDY | MGC_M_TXCSR_H_WZC_BITS; musb_writew(ep->regs, MGC_O_HDRC_TXCSR, txcsr); } else { txcsr = MGC_M_CSR0_H_SETUPPKT | MGC_M_CSR0_TXPKTRDY; musb_writew(ep->regs, MGC_O_HDRC_CSR0, txcsr); }}static inline void cppi_host_txdma_start(struct musb_hw_ep *ep){ u16 txcsr; /* NOTE: no locks here; caller should lock and select EP */ txcsr = musb_readw(ep->regs, MGC_O_HDRC_TXCSR); txcsr |= MGC_M_TXCSR_DMAENAB | MGC_M_TXCSR_H_WZC_BITS; musb_writew(ep->regs, MGC_O_HDRC_TXCSR, txcsr);}/* * Start the URB at the front of an endpoint's queue * end must be claimed from the caller. * * Context: controller locked, irqs blocked */static voidmusb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh){ u16 wFrame; u32 dwLength; void *pBuffer; void __iomem *pBase = musb->pRegs; struct urb *urb = next_urb(qh); struct musb_hw_ep *pEnd = qh->hw_ep; unsigned nPipe = urb->pipe; u8 bAddress = usb_pipedevice(nPipe); int bEnd = pEnd->bLocalEnd; /* initialize software qh state */ qh->offset = 0; qh->segsize = 0; /* gather right source of data */ switch (qh->type) { case USB_ENDPOINT_XFER_CONTROL: /* control transfers always start with SETUP */ is_in = 0; pEnd->out_qh = qh; musb->bEnd0Stage = MGC_END0_START; pBuffer = urb->setup_packet; dwLength = 8; break; case USB_ENDPOINT_XFER_ISOC: qh->iso_idx = 0; qh->frame = 0; pBuffer = urb->transfer_buffer + urb->iso_frame_desc[0].offset; dwLength = urb->iso_frame_desc[0].length; break; default: /* bulk, interrupt */ pBuffer = urb->transfer_buffer; dwLength = urb->transfer_buffer_length; } DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n", qh, urb, bAddress, qh->epnum, is_in ? "in" : "out", ({char *s; switch (qh->type) { case USB_ENDPOINT_XFER_CONTROL: s = ""; break; case USB_ENDPOINT_XFER_BULK: s = "-bulk"; break; case USB_ENDPOINT_XFER_ISOC: s = "-iso"; break; default: s = "-intr"; break; }; s;}), bEnd, pBuffer, dwLength); /* Configure endpoint */ if (is_in || pEnd->bIsSharedFifo) pEnd->in_qh = qh; else pEnd->out_qh = qh; musb_ep_program(musb, bEnd, urb, !is_in, pBuffer, dwLength); /* transmit may have more work: start it when it is time */ if (is_in) return; /* determine if the time is right for a periodic transfer */ switch (qh->type) { case USB_ENDPOINT_XFER_ISOC: case USB_ENDPOINT_XFER_INT: DBG(3, "check whether there's still time for periodic Tx\n"); qh->iso_idx = 0; wFrame = musb_readw(pBase, MGC_O_HDRC_FRAME); /* FIXME this doesn't implement that scheduling policy ... * or handle framecounter wrapping */ if ((urb->transfer_flags & URB_ISO_ASAP) || (wFrame >= urb->start_frame)) { /* REVISIT the SOF irq handler shouldn't duplicate * this code; and we don't init urb->start_frame... */ qh->frame = 0; goto start; } else { qh->frame = urb->start_frame; /* enable SOF interrupt so we can count down */DBG(1,"SOF for %d\n", bEnd); musb_writeb(pBase, MGC_O_HDRC_INTRUSBE, 0xff); } break; default:start: DBG(4, "Start TX%d %s\n", bEnd, pEnd->tx_channel ? "dma" : "pio"); if (!pEnd->tx_channel) musb_h_tx_start(pEnd); else if (is_cppi_enabled() || tusb_dma_omap()) cppi_host_txdma_start(pEnd); }}/* caller owns controller lock, irqs are blocked */static void__musb_giveback(struct musb *musb, struct urb *urb, int status)__releases(musb->Lock)__acquires(musb->Lock){ if ((urb->transfer_flags & URB_SHORT_NOT_OK) && (urb->actual_length < urb->transfer_buffer_length) && status == 0 && usb_pipein(urb->pipe)) status = -EREMOTEIO; spin_lock(&urb->lock); urb->hcpriv = NULL; if (urb->status == -EINPROGRESS) urb->status = status; spin_unlock(&urb->lock); DBG(({ int level; switch (urb->status) { case 0: level = 4; break; /* common/boring faults */ case -EREMOTEIO: case -ESHUTDOWN: case -ECONNRESET: case -EPIPE: level = 3; break; default: level = 2; break; }; level; }), "complete %p (%d), dev%d ep%d%s, %d/%d\n", urb, urb->status, usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe), usb_pipein(urb->pipe) ? "in" : "out", urb->actual_length, urb->transfer_buffer_length ); spin_unlock(&musb->Lock); usb_hcd_giveback_urb(musb_to_hcd(musb), urb); spin_lock(&musb->Lock);}/* for bulk/interrupt endpoints only */static inline void musb_save_toggle(struct musb_hw_ep *ep, int is_in, struct urb *urb){ struct usb_device *udev = urb->dev; u16 csr; void __iomem *epio = ep->regs; struct musb_qh *qh; /* FIXME: the current Mentor DMA code seems to have * problems getting toggle correct. */ if (is_in || ep->bIsSharedFifo) qh = ep->in_qh; else qh = ep->out_qh; if (!is_in) { csr = musb_readw(epio, MGC_O_HDRC_TXCSR); usb_settoggle(udev, qh->epnum, 1, (csr & MGC_M_TXCSR_H_DATATOGGLE) ? 1 : 0); } else { csr = musb_readw(epio, MGC_O_HDRC_RXCSR); usb_settoggle(udev, qh->epnum, 0, (csr & MGC_M_RXCSR_H_DATATOGGLE) ? 1 : 0); }}/* caller owns controller lock, irqs are blocked */static struct musb_qh *musb_giveback(struct musb_qh *qh, struct urb *urb, int status){ int is_in; struct musb_hw_ep *ep = qh->hw_ep; struct musb *musb = ep->musb; int ready = qh->is_ready; if (ep->bIsSharedFifo) is_in = 1; else is_in = usb_pipein(urb->pipe); /* save toggle eagerly, for paranoia */ switch (qh->type) { case USB_ENDPOINT_XFER_BULK: case USB_ENDPOINT_XFER_INT: musb_save_toggle(ep, is_in, urb); break; case USB_ENDPOINT_XFER_ISOC: if (status == 0 && urb->error_count) status = -EXDEV; break; } qh->is_ready = 0; __musb_giveback(musb, urb, status); qh->is_ready = ready; /* reclaim resources (and bandwidth) ASAP; deschedule it, and * invalidate qh as soon as list_empty(&hep->urb_list) */ if (list_empty(&qh->hep->urb_list)) { struct list_head *head; if (is_in) ep->rx_reinit = 1; else ep->tx_reinit = 1; /* clobber old pointers to this qh */ if (is_in || ep->bIsSharedFifo) ep->in_qh = NULL; else ep->out_qh = NULL; qh->hep->hcpriv = NULL; switch (qh->type) { case USB_ENDPOINT_XFER_ISOC: case USB_ENDPOINT_XFER_INT: /* this is where periodic bandwidth should be * de-allocated if it's tracked and allocated; * and where we'd update the schedule tree... */ musb->periodic[ep->bLocalEnd] = NULL; kfree(qh); qh = NULL; break; case USB_ENDPOINT_XFER_CONTROL: case USB_ENDPOINT_XFER_BULK: /* fifo policy for these lists, except that NAKing * should rotate a qh to the end (for fairness). */ head = qh->ring.prev; list_del(&qh->ring); kfree(qh); qh = first_qh(head); break; } } return qh;}/* * Advance this hardware endpoint's queue, completing the specified urb and * advancing to either the next urb queued to that qh, or else invalidating * that qh and advancing to the next qh scheduled after the current one. * * Context: caller owns controller lock, irqs are blocked */static voidmusb_advance_schedule(struct musb *pThis, struct urb *urb, struct musb_hw_ep *pEnd, int is_in){ struct musb_qh *qh; if (is_in || pEnd->bIsSharedFifo) qh = pEnd->in_qh; else qh = pEnd->out_qh; qh = musb_giveback(qh, urb, 0); if (qh && qh->is_ready && !list_empty(&qh->hep->urb_list)) { DBG(4, "... next ep%d %cX urb %p\n", pEnd->bLocalEnd, is_in ? 'R' : 'T', next_urb(qh)); musb_start_urb(pThis, is_in, qh); }}static inline u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr){ /* we don't want fifo to fill itself again; * ignore dma (various models), * leave toggle alone (may not have been saved yet) */ csr |= MGC_M_RXCSR_FLUSHFIFO | MGC_M_RXCSR_RXPKTRDY; csr &= ~( MGC_M_RXCSR_H_REQPKT | MGC_M_RXCSR_H_AUTOREQ | MGC_M_RXCSR_AUTOCLEAR ); /* write 2x to allow double buffering */ musb_writew(hw_ep->regs, MGC_O_HDRC_RXCSR, csr); musb_writew(hw_ep->regs, MGC_O_HDRC_RXCSR, csr); /* flush writebuffer */ return musb_readw(hw_ep->regs, MGC_O_HDRC_RXCSR);}/* * PIO RX for a packet (or part of it). */static u8 musb_host_packet_rx(struct musb *pThis, struct urb *pUrb, u8 bEnd, u8 bIsochError){ u16 wRxCount; u8 *pBuffer; u16 wCsr; u8 bDone = FALSE; u32 length; int do_flush = 0; struct musb_hw_ep *pEnd = pThis->aLocalEnd + bEnd; void __iomem *epio = pEnd->regs; struct musb_qh *qh = pEnd->in_qh; int nPipe = pUrb->pipe; void *buffer = pUrb->transfer_buffer; // MGC_SelectEnd(pBase, bEnd); wRxCount = musb_readw(epio, MGC_O_HDRC_RXCOUNT); DBG(3, "RX%d count %d, buffer %p len %d/%d\n", bEnd, wRxCount, pUrb->transfer_buffer, qh->offset, pUrb->transfer_buffer_length); /* unload FIFO */ if (usb_pipeisoc(nPipe)) { int status = 0; struct usb_iso_packet_descriptor *d; if (bIsochError) { status = -EILSEQ;
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?