📄 atmel_usba_udc.c
字号:
} DBG(DBG_GADGET | DBG_REQ, "%s: req %p complete: status %d, actual %u\n", ep->ep.name, req, req->req.status, req->req.actual); spin_unlock(&udc->lock); req->req.complete(&ep->ep, &req->req); spin_lock(&udc->lock);}static voidrequest_complete_list(struct usba_ep *ep, struct list_head *list, int status){ struct usba_request *req, *tmp_req; list_for_each_entry_safe(req, tmp_req, list, queue) { list_del_init(&req->queue); request_complete(ep, req, status); }}static intusba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc){ struct usba_ep *ep = to_usba_ep(_ep); struct usba_udc *udc = ep->udc; unsigned long flags, ept_cfg, maxpacket; unsigned int nr_trans; DBG(DBG_GADGET, "%s: ep_enable: desc=%p\n", ep->ep.name, desc); maxpacket = le16_to_cpu(desc->wMaxPacketSize) & 0x7ff; if (((desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) != ep->index) || ep->index == 0 || desc->bDescriptorType != USB_DT_ENDPOINT || maxpacket == 0 || maxpacket > ep->fifo_size) { DBG(DBG_ERR, "ep_enable: Invalid argument"); return -EINVAL; } ep->is_isoc = 0; ep->is_in = 0; if (maxpacket <= 8) ept_cfg = USBA_BF(EPT_SIZE, USBA_EPT_SIZE_8); else /* LSB is bit 1, not 0 */ ept_cfg = USBA_BF(EPT_SIZE, fls(maxpacket - 1) - 3); DBG(DBG_HW, "%s: EPT_SIZE = %lu (maxpacket = %lu)\n", ep->ep.name, ept_cfg, maxpacket); if ((desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN) { ep->is_in = 1; ept_cfg |= USBA_EPT_DIR_IN; } switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) { case USB_ENDPOINT_XFER_CONTROL: ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_CONTROL); ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_ONE); break; case USB_ENDPOINT_XFER_ISOC: if (!ep->can_isoc) { DBG(DBG_ERR, "ep_enable: %s is not isoc capable\n", ep->ep.name); return -EINVAL; } /* * Bits 11:12 specify number of _additional_ * transactions per microframe. */ nr_trans = ((le16_to_cpu(desc->wMaxPacketSize) >> 11) & 3) + 1; if (nr_trans > 3) return -EINVAL; ep->is_isoc = 1; ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_ISO); /* * Do triple-buffering on high-bandwidth iso endpoints. */ if (nr_trans > 1 && ep->nr_banks == 3) ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_TRIPLE); else ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_DOUBLE); ept_cfg |= USBA_BF(NB_TRANS, nr_trans); break; case USB_ENDPOINT_XFER_BULK: ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK); ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_DOUBLE); break; case USB_ENDPOINT_XFER_INT: ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_INT); ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_DOUBLE); break; } spin_lock_irqsave(&ep->udc->lock, flags); if (ep->desc) { spin_unlock_irqrestore(&ep->udc->lock, flags); DBG(DBG_ERR, "ep%d already enabled\n", ep->index); return -EBUSY; } ep->desc = desc; ep->ep.maxpacket = maxpacket; usba_ep_writel(ep, CFG, ept_cfg); usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE); if (ep->can_dma) { u32 ctrl; usba_writel(udc, INT_ENB, (usba_readl(udc, INT_ENB) | USBA_BF(EPT_INT, 1 << ep->index) | USBA_BF(DMA_INT, 1 << ep->index))); ctrl = USBA_AUTO_VALID | USBA_INTDIS_DMA; usba_ep_writel(ep, CTL_ENB, ctrl); } else { usba_writel(udc, INT_ENB, (usba_readl(udc, INT_ENB) | USBA_BF(EPT_INT, 1 << ep->index))); } spin_unlock_irqrestore(&udc->lock, flags); DBG(DBG_HW, "EPT_CFG%d after init: %#08lx\n", ep->index, (unsigned long)usba_ep_readl(ep, CFG)); DBG(DBG_HW, "INT_ENB after init: %#08lx\n", (unsigned long)usba_readl(udc, INT_ENB)); return 0;}static int usba_ep_disable(struct usb_ep *_ep){ struct usba_ep *ep = to_usba_ep(_ep); struct usba_udc *udc = ep->udc; LIST_HEAD(req_list); unsigned long flags; DBG(DBG_GADGET, "ep_disable: %s\n", ep->ep.name); spin_lock_irqsave(&udc->lock, flags); if (!ep->desc) { spin_unlock_irqrestore(&udc->lock, flags); DBG(DBG_ERR, "ep_disable: %s not enabled\n", ep->ep.name); return -EINVAL; } ep->desc = NULL; list_splice_init(&ep->queue, &req_list); if (ep->can_dma) { usba_dma_writel(ep, CONTROL, 0); usba_dma_writel(ep, ADDRESS, 0); usba_dma_readl(ep, STATUS); } usba_ep_writel(ep, CTL_DIS, USBA_EPT_ENABLE); usba_writel(udc, INT_ENB, usba_readl(udc, INT_ENB) & ~USBA_BF(EPT_INT, 1 << ep->index)); request_complete_list(ep, &req_list, -ESHUTDOWN); spin_unlock_irqrestore(&udc->lock, flags); return 0;}static struct usb_request *usba_ep_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags){ struct usba_request *req; DBG(DBG_GADGET, "ep_alloc_request: %p, 0x%x\n", _ep, gfp_flags); req = kzalloc(sizeof(*req), gfp_flags); if (!req) return NULL; INIT_LIST_HEAD(&req->queue); req->req.dma = DMA_ADDR_INVALID; return &req->req;}static voidusba_ep_free_request(struct usb_ep *_ep, struct usb_request *_req){ struct usba_request *req = to_usba_req(_req); DBG(DBG_GADGET, "ep_free_request: %p, %p\n", _ep, _req); kfree(req);}static int queue_dma(struct usba_udc *udc, struct usba_ep *ep, struct usba_request *req, gfp_t gfp_flags){ unsigned long flags; int ret; DBG(DBG_DMA, "%s: req l/%u d/%08x %c%c%c\n", ep->ep.name, req->req.length, req->req.dma, req->req.zero ? 'Z' : 'z', req->req.short_not_ok ? 'S' : 's', req->req.no_interrupt ? 'I' : 'i'); if (req->req.length > 0x10000) { /* Lengths from 0 to 65536 (inclusive) are supported */ DBG(DBG_ERR, "invalid request length %u\n", req->req.length); return -EINVAL; } req->using_dma = 1; if (req->req.dma == DMA_ADDR_INVALID) { req->req.dma = dma_map_single( &udc->pdev->dev, req->req.buf, req->req.length, ep->is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); req->mapped = 1; } else { dma_sync_single_for_device( &udc->pdev->dev, req->req.dma, req->req.length, ep->is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); req->mapped = 0; } req->ctrl = USBA_BF(DMA_BUF_LEN, req->req.length) | USBA_DMA_CH_EN | USBA_DMA_END_BUF_IE | USBA_DMA_END_TR_EN | USBA_DMA_END_TR_IE; if (ep->is_in) req->ctrl |= USBA_DMA_END_BUF_EN; /* * Add this request to the queue and submit for DMA if * possible. Check if we're still alive first -- we may have * received a reset since last time we checked. */ ret = -ESHUTDOWN; spin_lock_irqsave(&udc->lock, flags); if (ep->desc) { if (list_empty(&ep->queue)) submit_request(ep, req); list_add_tail(&req->queue, &ep->queue); ret = 0; } spin_unlock_irqrestore(&udc->lock, flags); return ret;}static intusba_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags){ struct usba_request *req = to_usba_req(_req); struct usba_ep *ep = to_usba_ep(_ep); struct usba_udc *udc = ep->udc; unsigned long flags; int ret; DBG(DBG_GADGET | DBG_QUEUE | DBG_REQ, "%s: queue req %p, len %u\n", ep->ep.name, req, _req->length); if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN || !ep->desc) return -ESHUTDOWN; req->submitted = 0; req->using_dma = 0; req->last_transaction = 0; _req->status = -EINPROGRESS; _req->actual = 0; if (ep->can_dma) return queue_dma(udc, ep, req, gfp_flags); /* May have received a reset since last time we checked */ ret = -ESHUTDOWN; spin_lock_irqsave(&udc->lock, flags); if (ep->desc) { list_add_tail(&req->queue, &ep->queue); if (ep->is_in || (ep_is_control(ep) && (ep->state == DATA_STAGE_IN || ep->state == STATUS_STAGE_IN))) usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY); else usba_ep_writel(ep, CTL_ENB, USBA_RX_BK_RDY); ret = 0; } spin_unlock_irqrestore(&udc->lock, flags); return ret;}static voidusba_update_req(struct usba_ep *ep, struct usba_request *req, u32 status){ req->req.actual = req->req.length - USBA_BFEXT(DMA_BUF_LEN, status);}static int stop_dma(struct usba_ep *ep, u32 *pstatus){ unsigned int timeout; u32 status; /* * Stop the DMA controller. When writing both CH_EN * and LINK to 0, the other bits are not affected. */ usba_dma_writel(ep, CONTROL, 0); /* Wait for the FIFO to empty */ for (timeout = 40; timeout; --timeout) { status = usba_dma_readl(ep, STATUS); if (!(status & USBA_DMA_CH_EN)) break; udelay(1); } if (pstatus) *pstatus = status; if (timeout == 0) { dev_err(&ep->udc->pdev->dev, "%s: timed out waiting for DMA FIFO to empty\n", ep->ep.name); return -ETIMEDOUT; } return 0;}static int usba_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req){ struct usba_ep *ep = to_usba_ep(_ep); struct usba_udc *udc = ep->udc; struct usba_request *req = to_usba_req(_req); unsigned long flags; u32 status; DBG(DBG_GADGET | DBG_QUEUE, "ep_dequeue: %s, req %p\n", ep->ep.name, req); spin_lock_irqsave(&udc->lock, flags); if (req->using_dma) { /* * If this request is currently being transferred, * stop the DMA controller and reset the FIFO. */ if (ep->queue.next == &req->queue) { status = usba_dma_readl(ep, STATUS); if (status & USBA_DMA_CH_EN) stop_dma(ep, &status);#ifdef CONFIG_USB_GADGET_DEBUG_FS ep->last_dma_status = status;#endif usba_writel(udc, EPT_RST, 1 << ep->index); usba_update_req(ep, req, status); } } /* * Errors should stop the queue from advancing until the * completion function returns. */ list_del_init(&req->queue); request_complete(ep, req, -ECONNRESET); /* Process the next request if any */ submit_next_request(ep); spin_unlock_irqrestore(&udc->lock, flags); return 0;}static int usba_ep_set_halt(struct usb_ep *_ep, int value){ struct usba_ep *ep = to_usba_ep(_ep); struct usba_udc *udc = ep->udc; unsigned long flags; int ret = 0; DBG(DBG_GADGET, "endpoint %s: %s HALT\n", ep->ep.name, value ? "set" : "clear"); if (!ep->desc) { DBG(DBG_ERR, "Attempted to halt uninitialized ep %s\n", ep->ep.name); return -ENODEV; } if (ep->is_isoc) { DBG(DBG_ERR, "Attempted to halt isochronous ep %s\n", ep->ep.name); return -ENOTTY; } spin_lock_irqsave(&udc->lock, flags); /* * We can't halt IN endpoints while there are still data to be * transferred */ if (!list_empty(&ep->queue) || ((value && ep->is_in && (usba_ep_readl(ep, STA) & USBA_BF(BUSY_BANKS, -1L))))) { ret = -EAGAIN; } else { if (value) usba_ep_writel(ep, SET_STA, USBA_FORCE_STALL); else usba_ep_writel(ep, CLR_STA, USBA_FORCE_STALL | USBA_TOGGLE_CLR); usba_ep_readl(ep, STA); } spin_unlock_irqrestore(&udc->lock, flags); return ret;}static int usba_ep_fifo_status(struct usb_ep *_ep){ struct usba_ep *ep = to_usba_ep(_ep); return USBA_BFEXT(BYTE_COUNT, usba_ep_readl(ep, STA));}static void usba_ep_fifo_flush(struct usb_ep *_ep){ struct usba_ep *ep = to_usba_ep(_ep); struct usba_udc *udc = ep->udc; usba_writel(udc, EPT_RST, 1 << ep->index);}static const struct usb_ep_ops usba_ep_ops = { .enable = usba_ep_enable, .disable = usba_ep_disable, .alloc_request = usba_ep_alloc_request, .free_request = usba_ep_free_request, .queue = usba_ep_queue, .dequeue = usba_ep_dequeue, .set_halt = usba_ep_set_halt, .fifo_status = usba_ep_fifo_status, .fifo_flush = usba_ep_fifo_flush,};static int usba_udc_get_frame(struct usb_gadget *gadget){ struct usba_udc *udc = to_usba_udc(gadget); return USBA_BFEXT(FRAME_NUMBER, usba_readl(udc, FNUM));}static int usba_udc_wakeup(struct usb_gadget *gadget){ struct usba_udc *udc = to_usba_udc(gadget); unsigned long flags; u32 ctrl; int ret = -EINVAL; spin_lock_irqsave(&udc->lock, flags); if (udc->devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) { ctrl = usba_readl(udc, CTRL); usba_writel(udc, CTRL, ctrl | USBA_REMOTE_WAKE_UP); ret = 0; } spin_unlock_irqrestore(&udc->lock, flags); return ret;}static intusba_udc_set_selfpowered(struct usb_gadget *gadget, int is_selfpowered){ struct usba_udc *udc = to_usba_udc(gadget); unsigned long flags; spin_lock_irqsave(&udc->lock, flags); if (is_selfpowered) udc->devstatus |= 1 << USB_DEVICE_SELF_POWERED; else udc->devstatus &= ~(1 << USB_DEVICE_SELF_POWERED); spin_unlock_irqrestore(&udc->lock, flags); return 0;}static const struct usb_gadget_ops usba_udc_ops = { .get_frame = usba_udc_get_frame, .wakeup = usba_udc_wakeup, .set_selfpowered = usba_udc_set_selfpowered,};#define EP(nam, idx, maxpkt, maxbk, dma, isoc) \{ \ .ep = { \ .ops = &usba_ep_ops, \ .name = nam, \ .maxpacket = maxpkt, \ }, \ .udc = &the_udc, \ .queue = LIST_HEAD_INIT(usba_ep[idx].queue), \
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -