📄 net2280.c
字号:
dma_addr_t dma, unsigned bytes) { /* free memory into the right allocator */#ifndef USE_KMALLOC if (dma != DMA_ADDR_INVALID) { struct net2280_ep *ep; ep = container_of(_ep, struct net2280_ep, ep); if (!_ep) return; /* one problem with this call is that some platforms * don't allow it to be used in_irq(). */ pci_free_consistent(ep->dev->pdev, bytes, buf, dma); } else#endif kfree (buf);}/*-------------------------------------------------------------------------*//* load a packet into the fifo we use for usb IN transfers. * works for all endpoints. * * NOTE: pio with ep-a..ep-d could stuff multiple packets into the fifo * at a time, but this code is simpler because it knows it only writes * one packet. ep-a..ep-d should use dma instead. */static voidwrite_fifo (struct net2280_ep *ep, struct usb_request *req){ struct net2280_ep_regs *regs = ep->regs; u8 *buf; u32 tmp; unsigned count, total; /* INVARIANT: fifo is currently empty. (testable) */ if (req) { buf = req->buf + req->actual; prefetch (buf); total = req->length - req->actual; } else { total = 0; buf = 0; } /* write just one packet at a time */ count = ep->ep.maxpacket; if (count > total) /* min() cannot be used on a bitfield */ count = total; VDEBUG (ep->dev, "write %s fifo (IN) %d bytes%s req %p\n", ep->ep.name, count, (count != ep->ep.maxpacket) ? " (short)" : "", req); while (count >= 4) { /* NOTE be careful if you try to align these. fifo lines * should normally be full (4 bytes) and successive partial * lines are ok only in certain cases. */ tmp = get_unaligned ((u32 *)buf); cpu_to_le32s (&tmp); writel (tmp, ®s->ep_data); buf += 4; count -= 4; } /* last fifo entry is "short" unless we wrote a full packet. * also explicitly validate last word in (periodic) transfers * when maxpacket is not a multiple of 4 bytes. */ if (count || total < ep->ep.maxpacket) { tmp = count ? get_unaligned ((u32 *)buf) : count; cpu_to_le32s (&tmp); set_fifo_bytecount (ep, count & 0x03); writel (tmp, ®s->ep_data); } /* pci writes may still be posted */}/* work around erratum 0106: PCI and USB race over the OUT fifo. * caller guarantees chiprev 0100, out endpoint is NAKing, and * there's no real data in the fifo. * * NOTE: also used in cases where that erratum doesn't apply: * where the host wrote "too much" data to us. */static void out_flush (struct net2280_ep *ep){ u32 *statp, tmp; ASSERT_OUT_NAKING (ep); statp = &ep->regs->ep_stat; writel ( (1 << DATA_OUT_PING_TOKEN_INTERRUPT) | (1 << DATA_PACKET_RECEIVED_INTERRUPT) , statp); writel ((1 << FIFO_FLUSH), statp); mb (); tmp = readl (statp); if (tmp & (1 << DATA_OUT_PING_TOKEN_INTERRUPT) /* high speed did bulk NYET; fifo isn't filling */ && ep->dev->gadget.speed == USB_SPEED_FULL) { unsigned usec; usec = 50; /* 64 byte bulk/interrupt */ handshake (statp, (1 << USB_OUT_PING_NAK_SENT), (1 << USB_OUT_PING_NAK_SENT), usec); /* NAK done; now CLEAR_NAK_OUT_PACKETS is safe */ }}/* unload packet(s) from the fifo we use for usb OUT transfers. * returns true iff the request completed, because of short packet * or the request buffer having filled with full packets. * * for ep-a..ep-d this will read multiple packets out when they * have been accepted. */static intread_fifo (struct net2280_ep *ep, struct net2280_request *req){ struct net2280_ep_regs *regs = ep->regs; u8 *buf = req->req.buf + req->req.actual; unsigned count, tmp, is_short; unsigned cleanup = 0, prevent = 0; /* erratum 0106 ... packets coming in during fifo reads might * be incompletely rejected. not all cases have workarounds. */ if (ep->dev->chiprev == 0x0100 && ep->dev->gadget.speed == USB_SPEED_FULL) { udelay (1); tmp = readl (&ep->regs->ep_stat); if ((tmp & (1 << NAK_OUT_PACKETS))) cleanup = 1; else if ((tmp & (1 << FIFO_FULL))) { start_out_naking (ep); prevent = 1; } /* else: hope we don't see the problem */ } /* never overflow the rx buffer. the fifo reads packets until * it sees a short one; we might not be ready for them all. */ prefetchw (buf); count = readl (®s->ep_avail); if (unlikely (count == 0)) { udelay (1); tmp = readl (&ep->regs->ep_stat); count = readl (®s->ep_avail); /* handled that data already? */ if (count == 0 && (tmp & (1 << NAK_OUT_PACKETS)) == 0) return 0; } tmp = req->req.length - req->req.actual; if (count > tmp) { /* as with DMA, data overflow gets flushed */ if ((tmp % ep->ep.maxpacket) != 0) { ERROR (ep->dev, "%s out fifo %d bytes, expected %d\n", ep->ep.name, count, tmp); req->req.status = -EOVERFLOW; cleanup = 1; /* NAK_OUT_PACKETS will be set, so flushing is safe; * the next read will start with the next packet */ } /* else it's a ZLP, no worries */ count = tmp; } req->req.actual += count; is_short = (count == 0) || ((count % ep->ep.maxpacket) != 0); VDEBUG (ep->dev, "read %s fifo (OUT) %d bytes%s%s%s req %p %d/%d\n", ep->ep.name, count, is_short ? " (short)" : "", cleanup ? " flush" : "", prevent ? " nak" : "", req, req->req.actual, req->req.length); while (count >= 4) { tmp = readl (®s->ep_data); cpu_to_le32s (&tmp); put_unaligned (tmp, (u32 *)buf); buf += 4; count -= 4; } if (count) { tmp = readl (®s->ep_data); cpu_to_le32s (&tmp); do { *buf++ = (u8) tmp; tmp >>= 8; } while (--count); } if (cleanup) out_flush (ep); if (prevent) { writel ((1 << CLEAR_NAK_OUT_PACKETS), &ep->regs->ep_rsp); (void) readl (&ep->regs->ep_rsp); } return is_short || ((req->req.actual == req->req.length) && !req->req.zero);}/* fill out dma descriptor to match a given request */static voidfill_dma_desc (struct net2280_ep *ep, struct net2280_request *req, int valid){ struct net2280_dma *td = req->td; u32 dmacount = req->req.length; /* don't let DMA continue after a short OUT packet, * so overruns can't affect the next transfer. * in case of overruns on max-size packets, we can't * stop the fifo from filling but we can flush it. */ if (ep->is_in) dmacount |= (1 << DMA_DIRECTION); else if ((dmacount % ep->ep.maxpacket) != 0) dmacount |= (1 << END_OF_CHAIN); req->valid = valid; if (valid) dmacount |= (1 << VALID_BIT); if (likely(!req->req.no_interrupt || !use_dma_chaining)) dmacount |= (1 << DMA_DONE_INTERRUPT_ENABLE); /* td->dmadesc = previously set by caller */ td->dmaaddr = cpu_to_le32p (&req->req.dma); /* 2280 may be polling VALID_BIT through ep->dma->dmadesc */ wmb (); td->dmacount = cpu_to_le32p (&dmacount);}static const u32 dmactl_default = (1 << DMA_SCATTER_GATHER_DONE_INTERRUPT) | (1 << DMA_CLEAR_COUNT_ENABLE) /* erratum 0116 workaround part 1 (use POLLING) */ | (POLL_100_USEC << DESCRIPTOR_POLLING_RATE) | (1 << DMA_VALID_BIT_POLLING_ENABLE) | (1 << DMA_VALID_BIT_ENABLE) | (1 << DMA_SCATTER_GATHER_ENABLE) /* erratum 0116 workaround part 2 (no AUTOSTART) */ | (1 << DMA_ENABLE);static inline void spin_stop_dma (struct net2280_dma_regs *dma){ handshake (&dma->dmactl, (1 << DMA_ENABLE), 0, 50);}static inline void stop_dma (struct net2280_dma_regs *dma){ writel (readl (&dma->dmactl) & ~(1 << DMA_ENABLE), &dma->dmactl); spin_stop_dma (dma);}static void start_queue (struct net2280_ep *ep, u32 dmactl, u32 td_dma){ struct net2280_dma_regs *dma = ep->dma; writel ((1 << VALID_BIT) | (ep->is_in << DMA_DIRECTION), &dma->dmacount); writel (readl (&dma->dmastat), &dma->dmastat); writel (td_dma, &dma->dmadesc); writel (dmactl, &dma->dmactl); /* erratum 0116 workaround part 3: pci arbiter away from net2280 */ (void) readl (&ep->dev->pci->pcimstctl); writel ((1 << DMA_START), &dma->dmastat); if (!ep->is_in) stop_out_naking (ep);}static void start_dma (struct net2280_ep *ep, struct net2280_request *req){ u32 tmp; struct net2280_dma_regs *dma = ep->dma; /* FIXME can't use DMA for ZLPs */ /* on this path we "know" there's no dma active (yet) */ WARN_ON (readl (&dma->dmactl) & (1 << DMA_ENABLE)); writel (0, &ep->dma->dmactl); /* previous OUT packet might have been short */ if (!ep->is_in && ((tmp = readl (&ep->regs->ep_stat)) & (1 << NAK_OUT_PACKETS)) != 0) { writel ((1 << SHORT_PACKET_TRANSFERRED_INTERRUPT), &ep->regs->ep_stat); tmp = readl (&ep->regs->ep_avail); if (tmp) { writel (readl (&dma->dmastat), &dma->dmastat); /* transfer all/some fifo data */ writel (req->req.dma, &dma->dmaaddr); tmp = min (tmp, req->req.length); /* dma irq, faking scatterlist status */ req->td->dmacount = cpu_to_le32 (req->req.length - tmp); writel ((1 << DMA_DONE_INTERRUPT_ENABLE) | tmp, &dma->dmacount); req->td->dmadesc = 0; req->valid = 1; writel ((1 << DMA_ENABLE), &dma->dmactl); writel ((1 << DMA_START), &dma->dmastat); return; } } tmp = dmactl_default; /* force packet boundaries between dma requests, but prevent the * controller from automagically writing a last "short" packet * (zero length) unless the driver explicitly said to do that. */ if (ep->is_in) { if (likely ((req->req.length % ep->ep.maxpacket) != 0 || req->req.zero)) { tmp |= (1 << DMA_FIFO_VALIDATE); ep->in_fifo_validate = 1; } else ep->in_fifo_validate = 0; } /* init req->td, pointing to the current dummy */ req->td->dmadesc = cpu_to_le32 (ep->td_dma); fill_dma_desc (ep, req, 1); if (!use_dma_chaining) req->td->dmacount |= __constant_cpu_to_le32 (1 << END_OF_CHAIN); start_queue (ep, tmp, req->td_dma);}static inline voidqueue_dma (struct net2280_ep *ep, struct net2280_request *req, int valid){ struct net2280_dma *end; dma_addr_t tmp; /* swap new dummy for old, link; fill and maybe activate */ end = ep->dummy; ep->dummy = req->td; req->td = end; tmp = ep->td_dma; ep->td_dma = req->td_dma; req->td_dma = tmp; end->dmadesc = cpu_to_le32 (ep->td_dma); fill_dma_desc (ep, req, valid);}static voiddone (struct net2280_ep *ep, struct net2280_request *req, int status){ struct net2280 *dev; unsigned stopped = ep->stopped; list_del_init (&req->queue); if (req->req.status == -EINPROGRESS) req->req.status = status; else status = req->req.status; dev = ep->dev; if (req->mapped) { pci_unmap_single (dev->pdev, req->req.dma, req->req.length, ep->is_in ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); req->req.dma = DMA_ADDR_INVALID; req->mapped = 0; } if (status && status != -ESHUTDOWN) VDEBUG (dev, "complete %s req %p stat %d len %u/%u\n", ep->ep.name, &req->req, status, req->req.actual, req->req.length); /* don't modify queue heads during completion callback */ ep->stopped = 1; spin_unlock (&dev->lock); req->req.complete (&ep->ep, &req->req); spin_lock (&dev->lock); ep->stopped = stopped;}/*-------------------------------------------------------------------------*/static intnet2280_queue (struct usb_ep *_ep, struct usb_request *_req, int gfp_flags){ struct net2280_request *req; struct net2280_ep *ep; struct net2280 *dev; unsigned long flags; /* we always require a cpu-view buffer, so that we can * always use pio (as fallback or whatever). */ req = container_of (_req, struct net2280_request, req); if (!_req || !_req->complete || !_req->buf || !list_empty (&req->queue)) return -EINVAL; if (_req->length > (~0 & DMA_BYTE_COUNT_MASK)) return -EDOM; ep = container_of (_ep, struct net2280_ep, ep); if (!_ep || (!ep->desc && ep->num != 0)) return -EINVAL; dev = ep->dev; if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) return -ESHUTDOWN; /* FIXME implement PIO fallback for ZLPs with DMA */ if (ep->dma && _req->length == 0) return -EOPNOTSUPP; /* set up dma mapping in case the caller didn't */ if (ep->dma && _req->dma == DMA_ADDR_INVALID) { _req->dma = pci_map_single (dev->pdev, _req->buf, _req->length, ep->is_in ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); req->mapped = 1; }#if 0 VDEBUG (dev, "%s queue req %p, len %d buf %p\n", _ep->name, _req, _req->length, _req->buf);#endif spin_lock_irqsave (&dev->lock, flags); _req->status = -EINPROGRESS; _req->actual = 0; /* kickstart this i/o queue? */ if (list_empty (&ep->queue) && !ep->stopped) { /* use DMA if the endpoint supports it, else pio */ if (ep->dma) start_dma (ep, req); else { /* maybe there's no control data, just status ack */ if (ep->num == 0 && _req->length == 0) { allow_status (ep); done (ep, req, 0); VDEBUG (dev, "%s status ack\n", ep->ep.name); goto done; } /* PIO ... stuff the fifo, or unblock it. */ if (ep->is_in) write_fifo (ep, _req); else if (list_empty (&ep->queue)) { u32 s; /* OUT FIFO might have packet(s) buffered */ s = readl (&ep->regs->ep_stat); if ((s & (1 << FIFO_EMPTY)) == 0) { /* note: _req->short_not_ok is * ignored here since PIO _always_ * stops queue advance here, and * _req->status doesn't change for * short reads (only _req->actual) */ if (read_fifo (ep, req)) { done (ep, req, 0); if (ep->num == 0) allow_status (ep); /* don't queue it */ req = 0; } else s = readl (&ep->regs->ep_stat); } /* don't NAK, let the fifo fill */ if (req && (s & (1 << NAK_OUT_PACKETS))) writel ((1 << CLEAR_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -