📄 fsl_usb2_udc.c
字号:
/* Calculate transactions needed for high bandwidth iso */ mult = (unsigned char)(1 + ((max >> 11) & 0x03)); max = max & 0x8ff; /* bit 0~10 */ /* 3 transactions at most */ if (mult > 3) goto en_done; break; default: goto en_done; } spin_lock_irqsave(&udc->lock, flags); ep->ep.maxpacket = max; ep->desc = desc; ep->stopped = 0; /* Controller related setup */ /* Init EPx Queue Head (Ep Capabilites field in QH * according to max, zlt, mult) */ struct_ep_qh_setup(udc, (unsigned char) ep_index(ep), (unsigned char) ((desc->bEndpointAddress & USB_DIR_IN) ? USB_SEND : USB_RECV), (unsigned char) (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK), max, zlt, mult); /* Init endpoint ctrl register */ dr_ep_setup((unsigned char) ep_index(ep), (unsigned char) ((desc->bEndpointAddress & USB_DIR_IN) ? USB_SEND : USB_RECV), (unsigned char) (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)); spin_unlock_irqrestore(&udc->lock, flags); retval = 0; VDBG("enabled %s (ep%d%s) maxpacket %d",ep->ep.name, ep->desc->bEndpointAddress & 0x0f, (desc->bEndpointAddress & USB_DIR_IN) ? "in" : "out", max);en_done: return retval;}/*--------------------------------------------------------------------- * @ep : the ep being unconfigured. May not be ep0 * Any pending and uncomplete req will complete with status (-ESHUTDOWN)*---------------------------------------------------------------------*/static int fsl_ep_disable(struct usb_ep *_ep){ struct fsl_udc *udc = NULL; struct fsl_ep *ep = NULL; unsigned long flags = 0; u32 epctrl; int ep_num; ep = container_of(_ep, struct fsl_ep, ep); if (!_ep || !ep->desc) { VDBG("%s not enabled", _ep ? ep->ep.name : NULL); return -EINVAL; } /* disable ep on controller */ ep_num = ep_index(ep); epctrl = fsl_readl(&dr_regs->endptctrl[ep_num]); if (ep_is_in(ep)) epctrl &= ~EPCTRL_TX_ENABLE; else epctrl &= ~EPCTRL_RX_ENABLE; fsl_writel(epctrl, &dr_regs->endptctrl[ep_num]); udc = (struct fsl_udc *)ep->udc; spin_lock_irqsave(&udc->lock, flags); /* nuke all pending requests (does flush) */ nuke(ep, -ESHUTDOWN); ep->desc = 0; ep->stopped = 1; spin_unlock_irqrestore(&udc->lock, flags); VDBG("disabled %s OK", _ep->name); return 0;}/*--------------------------------------------------------------------- * allocate a request object used by this endpoint * the main operation is to insert the req->queue to the eq->queue * Returns the request, or null if one could not be allocated*---------------------------------------------------------------------*/static struct usb_request *fsl_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags){ struct fsl_req *req = NULL; req = kzalloc(sizeof *req, gfp_flags); if (!req) return NULL; req->req.dma = DMA_ADDR_INVALID; INIT_LIST_HEAD(&req->queue); return &req->req;}static void fsl_free_request(struct usb_ep *_ep, struct usb_request *_req){ struct fsl_req *req = NULL; req = container_of(_req, struct fsl_req, req); if (_req) kfree(req);}/*-------------------------------------------------------------------------*/static int fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req){ int i = ep_index(ep) * 2 + ep_is_in(ep); u32 temp, bitmask, tmp_stat; struct ep_queue_head *dQH = &ep->udc->ep_qh[i]; /* VDBG("QH addr Register 0x%8x", dr_regs->endpointlistaddr); VDBG("ep_qh[%d] addr is 0x%8x", i, (u32)&(ep->udc->ep_qh[i])); */ bitmask = ep_is_in(ep) ? (1 << (ep_index(ep) + 16)) : (1 << (ep_index(ep))); /* check if the pipe is empty */ if (!(list_empty(&ep->queue))) { /* Add td to the end */ struct fsl_req *lastreq; lastreq = list_entry(ep->queue.prev, struct fsl_req, queue); lastreq->tail->next_td_ptr = cpu_to_le32(req->head->td_dma & DTD_ADDR_MASK); /* Read prime bit, if 1 goto done */ if (fsl_readl(&dr_regs->endpointprime) & bitmask) goto out; do { /* Set ATDTW bit in USBCMD */ temp = fsl_readl(&dr_regs->usbcmd); fsl_writel(temp | USB_CMD_ATDTW, &dr_regs->usbcmd); /* Read correct status bit */ tmp_stat = fsl_readl(&dr_regs->endptstatus) & bitmask; } while (!(fsl_readl(&dr_regs->usbcmd) & USB_CMD_ATDTW)); /* Write ATDTW bit to 0 */ temp = fsl_readl(&dr_regs->usbcmd); fsl_writel(temp & ~USB_CMD_ATDTW, &dr_regs->usbcmd); if (tmp_stat) goto out; } /* Write dQH next pointer and terminate bit to 0 */ temp = req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK; dQH->next_dtd_ptr = cpu_to_le32(temp); /* Clear active and halt bit */ temp = cpu_to_le32(~(EP_QUEUE_HEAD_STATUS_ACTIVE | EP_QUEUE_HEAD_STATUS_HALT)); dQH->size_ioc_int_sts &= temp; /* Prime endpoint by writing 1 to ENDPTPRIME */ temp = ep_is_in(ep) ? (1 << (ep_index(ep) + 16)) : (1 << (ep_index(ep))); fsl_writel(temp, &dr_regs->endpointprime);out: return 0;}/* Fill in the dTD structure * @req: request that the transfer belongs to * @length: return actually data length of the dTD * @dma: return dma address of the dTD * @is_last: return flag if it is the last dTD of the request * return: pointer to the built dTD */static struct ep_td_struct *fsl_build_dtd(struct fsl_req *req, unsigned *length, dma_addr_t *dma, int *is_last){ u32 swap_temp; struct ep_td_struct *dtd; /* how big will this transfer be? */ *length = min(req->req.length - req->req.actual, (unsigned)EP_MAX_LENGTH_TRANSFER); dtd = dma_pool_alloc(udc_controller->td_pool, GFP_KERNEL, dma); if (dtd == NULL) return dtd; dtd->td_dma = *dma; /* Clear reserved field */ swap_temp = cpu_to_le32(dtd->size_ioc_sts); swap_temp &= ~DTD_RESERVED_FIELDS; dtd->size_ioc_sts = cpu_to_le32(swap_temp); /* Init all of buffer page pointers */ swap_temp = (u32) (req->req.dma + req->req.actual); dtd->buff_ptr0 = cpu_to_le32(swap_temp); dtd->buff_ptr1 = cpu_to_le32(swap_temp + 0x1000); dtd->buff_ptr2 = cpu_to_le32(swap_temp + 0x2000); dtd->buff_ptr3 = cpu_to_le32(swap_temp + 0x3000); dtd->buff_ptr4 = cpu_to_le32(swap_temp + 0x4000); req->req.actual += *length; /* zlp is needed if req->req.zero is set */ if (req->req.zero) { if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0) *is_last = 1; else *is_last = 0; } else if (req->req.length == req->req.actual) *is_last = 1; else *is_last = 0; if ((*is_last) == 0) VDBG("multi-dtd request!\n"); /* Fill in the transfer size; set active bit */ swap_temp = ((*length << DTD_LENGTH_BIT_POS) | DTD_STATUS_ACTIVE); /* Enable interrupt for the last dtd of a request */ if (*is_last && !req->req.no_interrupt) swap_temp |= DTD_IOC; dtd->size_ioc_sts = cpu_to_le32(swap_temp); mb(); VDBG("length = %d address= 0x%x", *length, (int)*dma); return dtd;}/* Generate dtd chain for a request */static int fsl_req_to_dtd(struct fsl_req *req){ unsigned count; int is_last; int is_first =1; struct ep_td_struct *last_dtd = NULL, *dtd; dma_addr_t dma; do { dtd = fsl_build_dtd(req, &count, &dma, &is_last); if (dtd == NULL) return -ENOMEM; if (is_first) { is_first = 0; req->head = dtd; } else { last_dtd->next_td_ptr = cpu_to_le32(dma); last_dtd->next_td_virt = dtd; } last_dtd = dtd; req->dtd_count++; } while (!is_last); dtd->next_td_ptr = cpu_to_le32(DTD_NEXT_TERMINATE); req->tail = dtd; return 0;}/* queues (submits) an I/O request to an endpoint */static intfsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags){ struct fsl_ep *ep = container_of(_ep, struct fsl_ep, ep); struct fsl_req *req = container_of(_req, struct fsl_req, req); struct fsl_udc *udc; unsigned long flags; int is_iso = 0; /* catch various bogus parameters */ if (!_req || !req->req.complete || !req->req.buf || !list_empty(&req->queue)) { VDBG("%s, bad params\n", __FUNCTION__); return -EINVAL; } if (unlikely(!_ep || !ep->desc)) { VDBG("%s, bad ep\n", __FUNCTION__); return -EINVAL; } if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) { if (req->req.length > ep->ep.maxpacket) return -EMSGSIZE; is_iso = 1; } udc = ep->udc; if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) return -ESHUTDOWN; req->ep = ep; /* map virtual address to hardware */ if (req->req.dma == DMA_ADDR_INVALID) { req->req.dma = dma_map_single(ep->udc->gadget.dev.parent, req->req.buf, req->req.length, ep_is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); req->mapped = 1; } else { dma_sync_single_for_device(ep->udc->gadget.dev.parent, req->req.dma, req->req.length, ep_is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); req->mapped = 0; } req->req.status = -EINPROGRESS; req->req.actual = 0; req->dtd_count = 0; spin_lock_irqsave(&udc->lock, flags); /* build dtds and push them to device queue */ if (!fsl_req_to_dtd(req)) { fsl_queue_td(ep, req); } else { spin_unlock_irqrestore(&udc->lock, flags); return -ENOMEM; } /* Update ep0 state */ if ((ep_index(ep) == 0)) udc->ep0_state = DATA_STATE_XMIT; /* irq handler advances the queue */ if (req != NULL) list_add_tail(&req->queue, &ep->queue); spin_unlock_irqrestore(&udc->lock, flags); return 0;}/* dequeues (cancels, unlinks) an I/O request from an endpoint */static int fsl_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req){ struct fsl_ep *ep = container_of(_ep, struct fsl_ep, ep); struct fsl_req *req; unsigned long flags; int ep_num, stopped, ret = 0; u32 epctrl; if (!_ep || !_req) return -EINVAL; spin_lock_irqsave(&ep->udc->lock, flags); stopped = ep->stopped; /* Stop the ep before we deal with the queue */ ep->stopped = 1; ep_num = ep_index(ep); epctrl = fsl_readl(&dr_regs->endptctrl[ep_num]); if (ep_is_in(ep)) epctrl &= ~EPCTRL_TX_ENABLE; else epctrl &= ~EPCTRL_RX_ENABLE; fsl_writel(epctrl, &dr_regs->endptctrl[ep_num]); /* make sure it's actually queued on this endpoint */ list_for_each_entry(req, &ep->queue, queue) { if (&req->req == _req) break; } if (&req->req != _req) { ret = -EINVAL; goto out; } /* The request is in progress, or completed but not dequeued */ if (ep->queue.next == &req->queue) { _req->status = -ECONNRESET; fsl_ep_fifo_flush(_ep); /* flush current transfer */ /* The request isn't the last request in this ep queue */ if (req->queue.next != &ep->queue) { struct ep_queue_head *qh; struct fsl_req *next_req; qh = ep->qh; next_req = list_entry(req->queue.next, struct fsl_req, queue); /* Point the QH to the first TD of next request */ fsl_writel((u32) next_req->head, &qh->curr_dtd_ptr); } /* The request hasn't been processed, patch up the TD chain */ } else { struct fsl_req *prev_req; prev_req = list_entry(req->queue.prev, struct fsl_req, queue); fsl_writel(fsl_readl(&req->tail->next_td_ptr), &prev_req->tail->next_td_ptr); } done(ep, req, -ECONNRESET); /* Enable EP */out: epctrl = fsl_readl(&dr_regs->endptctrl[ep_num]); if (ep_is_in(ep)) epctrl |= EPCTRL_TX_ENABLE; else epctrl |= EPCTRL_RX_ENABLE; fsl_writel(epctrl, &dr_regs->endptctrl[ep_num]); ep->stopped = stopped; spin_unlock_irqrestore(&ep->udc->lock, flags); return ret;}/*-------------------------------------------------------------------------*//*----------------------------------------------------------------- * modify the endpoint halt feature * @ep: the non-isochronous endpoint being stalled * @value: 1--set halt 0--clear halt * Returns zero, or a negative error code.*----------------------------------------------------------------*/static int fsl_ep_set_halt(struct usb_ep *_ep, int value){ struct fsl_ep *ep = NULL; unsigned long flags = 0; int status = -EOPNOTSUPP; /* operation not supported */ unsigned char ep_dir = 0, ep_num = 0; struct fsl_udc *udc = NULL; ep = container_of(_ep, struct fsl_ep, ep); udc = ep->udc; if (!_ep || !ep->desc) { status = -EINVAL; goto out; } if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) { status = -EOPNOTSUPP; goto out; } /* Attempt to halt IN ep will fail if any transfer requests * are still queue */ if (value && ep_is_in(ep) && !list_empty(&ep->queue)) { status = -EAGAIN; goto out; } status = 0; ep_dir = ep_is_in(ep) ? USB_SEND : USB_RECV; ep_num = (unsigned char)(ep_index(ep)); spin_lock_irqsave(&ep->udc->lock, flags); dr_ep_change_stall(ep_num, ep_dir, value); spin_unlock_irqrestore(&ep->udc->lock, flags); if (ep_index(ep) == 0) { udc->ep0_state = WAIT_FOR_SETUP; udc->ep0_dir = 0; }out: VDBG(" %s %s halt stat %d", ep->ep.name, value ? "set" : "clear", status); return status;}static void fsl_ep_fifo_flush(struct usb_ep *_ep){ struct fsl_ep *ep; int ep_num, ep_dir; u32 bits; unsigned long timeout;#define FSL_UDC_FLUSH_TIMEOUT 1000
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -