⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 amd5536udc.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
			if (create_new_chain) {				last->next = dma_addr;			} else {				/* last->next = virt_to_phys(td); */			}			if (ep->in) {				/* write tx bytes */				td->status = AMD_ADDBITS(td->status,							txbytes,							UDC_DMA_IN_STS_TXBYTES);			}		}		last = td;	}	/* set last bit */	if (td) {		td->status |= AMD_BIT(UDC_DMA_IN_STS_L);		/* last desc. points to itself */		req->td_data_last = td;	}	return 0;}/* Enabling RX DMA */static void udc_set_rde(struct udc *dev){	u32 tmp;	VDBG(dev, "udc_set_rde()\n");	/* stop RDE timer */	if (timer_pending(&udc_timer)) {		set_rde = 0;		mod_timer(&udc_timer, jiffies - 1);	}	/* set RDE */	tmp = readl(&dev->regs->ctl);	tmp |= AMD_BIT(UDC_DEVCTL_RDE);	writel(tmp, &dev->regs->ctl);}/* Queues a request packet, called by gadget driver */static intudc_queue(struct usb_ep *usbep, struct usb_request *usbreq, gfp_t gfp){	int			retval = 0;	u8			open_rxfifo = 0;	unsigned long		iflags;	struct udc_ep		*ep;	struct udc_request	*req;	struct udc		*dev;	u32			tmp;	/* check the inputs */	req = container_of(usbreq, struct udc_request, req);	if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf			|| !list_empty(&req->queue))		return -EINVAL;	ep = container_of(usbep, struct udc_ep, ep);	if (!ep->desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))		return -EINVAL;	VDBG(ep->dev, "udc_queue(): ep%d-in=%d\n", ep->num, ep->in);	dev = ep->dev;	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)		return -ESHUTDOWN;	/* map dma (usually done before) */	if (ep->dma && usbreq->length != 0			&& (usbreq->dma == DMA_DONT_USE || usbreq->dma == 0)) {		VDBG(dev, "DMA map req %p\n", req);		if (ep->in)			usbreq->dma = pci_map_single(dev->pdev,						usbreq->buf,						usbreq->length,						PCI_DMA_TODEVICE);		else			usbreq->dma = pci_map_single(dev->pdev,						usbreq->buf,						usbreq->length,						PCI_DMA_FROMDEVICE);		req->dma_mapping = 1;	}	VDBG(dev, "%s queue req %p, len %d req->td_data=%p buf %p\n",			usbep->name, usbreq, usbreq->length,			req->td_data, usbreq->buf);	spin_lock_irqsave(&dev->lock, iflags);	usbreq->actual = 0;	usbreq->status = -EINPROGRESS;	req->dma_done = 0;	/* on empty queue just do first transfer */	if (list_empty(&ep->queue)) {		/* zlp */		if (usbreq->length == 0) {			/* IN zlp's are handled by hardware */			complete_req(ep, req, 0);			VDBG(dev, "%s: zlp\n", ep->ep.name);			/*			 * if set_config or set_intf is waiting for ack by zlp			 * then set CSR_DONE			 */			if (dev->set_cfg_not_acked) {				tmp = readl(&dev->regs->ctl);				tmp |= AMD_BIT(UDC_DEVCTL_CSR_DONE);				writel(tmp, &dev->regs->ctl);				dev->set_cfg_not_acked = 0;			}			/* setup command is ACK'ed now by zlp */			if (dev->waiting_zlp_ack_ep0in) {				/* clear NAK by writing CNAK in EP0_IN */				tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);				tmp |= AMD_BIT(UDC_EPCTL_CNAK);				writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);				dev->ep[UDC_EP0IN_IX].naking = 0;				UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX],							UDC_EP0IN_IX);				dev->waiting_zlp_ack_ep0in = 0;			}			goto finished;		}		if (ep->dma) {			retval = prep_dma(ep, req, gfp);			if (retval != 0)				goto finished;			/* write desc pointer to enable DMA */			if (ep->in) {				/* set HOST READY */				req->td_data->status =					AMD_ADDBITS(req->td_data->status,						UDC_DMA_IN_STS_BS_HOST_READY,						UDC_DMA_IN_STS_BS);			}			/* disabled rx dma while descriptor update */			if (!ep->in) {				/* stop RDE timer */				if (timer_pending(&udc_timer)) {					set_rde = 0;					mod_timer(&udc_timer, jiffies - 1);				}				/* clear RDE */				tmp = readl(&dev->regs->ctl);				tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);				writel(tmp, &dev->regs->ctl);				open_rxfifo = 1;				/*				 * if BNA occurred then let BNA dummy desc.				 * point to current desc.				 */				if (ep->bna_occurred) {					VDBG(dev, "copy to BNA dummy desc.\n");					memcpy(ep->bna_dummy_req->td_data,						req->td_data,						sizeof(struct udc_data_dma));				}			}			/* write desc pointer */			writel(req->td_phys, &ep->regs->desptr);			/* clear NAK by writing CNAK */			if (ep->naking) {				tmp = readl(&ep->regs->ctl);				tmp |= AMD_BIT(UDC_EPCTL_CNAK);				writel(tmp, &ep->regs->ctl);				ep->naking = 0;				UDC_QUEUE_CNAK(ep, ep->num);			}			if (ep->in) {				/* enable ep irq */				tmp = readl(&dev->regs->ep_irqmsk);				tmp &= AMD_UNMASK_BIT(ep->num);				writel(tmp, &dev->regs->ep_irqmsk);			}		}	} else if (ep->dma) {		/*		 * prep_dma not used for OUT ep's, this is not possible		 * for PPB modes, because of chain creation reasons		 */		if (ep->in) {			retval = prep_dma(ep, req, gfp);			if (retval != 0)				goto finished;		}	}	VDBG(dev, "list_add\n");	/* add request to ep queue */	if (req) {		list_add_tail(&req->queue, &ep->queue);		/* open rxfifo if out data queued */		if (open_rxfifo) {			/* enable DMA */			req->dma_going = 1;			udc_set_rde(dev);			if (ep->num != UDC_EP0OUT_IX)				dev->data_ep_queued = 1;		}		/* stop OUT naking */		if (!ep->in) {			if (!use_dma && udc_rxfifo_pending) {				DBG(dev, "udc_queue(): pending bytes in"					"rxfifo after nyet\n");				/*				 * read pending bytes afer nyet:				 * referring to isr				 */				if (udc_rxfifo_read(ep, req)) {					/* finish */					complete_req(ep, req, 0);				}				udc_rxfifo_pending = 0;			}		}	}finished:	spin_unlock_irqrestore(&dev->lock, iflags);	return retval;}/* Empty request queue of an endpoint; caller holds spinlock */static void empty_req_queue(struct udc_ep *ep){	struct udc_request	*req;	ep->halted = 1;	while (!list_empty(&ep->queue)) {		req = list_entry(ep->queue.next,			struct udc_request,			queue);		complete_req(ep, req, -ESHUTDOWN);	}}/* Dequeues a request packet, called by gadget driver */static int udc_dequeue(struct usb_ep *usbep, struct usb_request *usbreq){	struct udc_ep		*ep;	struct udc_request	*req;	unsigned		halted;	unsigned long		iflags;	ep = container_of(usbep, struct udc_ep, ep);	if (!usbep || !usbreq || (!ep->desc && (ep->num != 0				&& ep->num != UDC_EP0OUT_IX)))		return -EINVAL;	req = container_of(usbreq, struct udc_request, req);	spin_lock_irqsave(&ep->dev->lock, iflags);	halted = ep->halted;	ep->halted = 1;	/* request in processing or next one */	if (ep->queue.next == &req->queue) {		if (ep->dma && req->dma_going) {			if (ep->in)				ep->cancel_transfer = 1;			else {				u32 tmp;				u32 dma_sts;				/* stop potential receive DMA */				tmp = readl(&udc->regs->ctl);				writel(tmp & AMD_UNMASK_BIT(UDC_DEVCTL_RDE),							&udc->regs->ctl);				/*				 * Cancel transfer later in ISR				 * if descriptor was touched.				 */				dma_sts = AMD_GETBITS(req->td_data->status,							UDC_DMA_OUT_STS_BS);				if (dma_sts != UDC_DMA_OUT_STS_BS_HOST_READY)					ep->cancel_transfer = 1;				else {					udc_init_bna_dummy(ep->req);					writel(ep->bna_dummy_req->td_phys,						&ep->regs->desptr);				}				writel(tmp, &udc->regs->ctl);			}		}	}	complete_req(ep, req, -ECONNRESET);	ep->halted = halted;	spin_unlock_irqrestore(&ep->dev->lock, iflags);	return 0;}/* Halt or clear halt of endpoint */static intudc_set_halt(struct usb_ep *usbep, int halt){	struct udc_ep	*ep;	u32 tmp;	unsigned long iflags;	int retval = 0;	if (!usbep)		return -EINVAL;	pr_debug("set_halt %s: halt=%d\n", usbep->name, halt);	ep = container_of(usbep, struct udc_ep, ep);	if (!ep->desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))		return -EINVAL;	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)		return -ESHUTDOWN;	spin_lock_irqsave(&udc_stall_spinlock, iflags);	/* halt or clear halt */	if (halt) {		if (ep->num == 0)			ep->dev->stall_ep0in = 1;		else {			/*			 * set STALL			 * rxfifo empty not taken into acount			 */			tmp = readl(&ep->regs->ctl);			tmp |= AMD_BIT(UDC_EPCTL_S);			writel(tmp, &ep->regs->ctl);			ep->halted = 1;			/* setup poll timer */			if (!timer_pending(&udc_pollstall_timer)) {				udc_pollstall_timer.expires = jiffies +					HZ * UDC_POLLSTALL_TIMER_USECONDS					/ (1000 * 1000);				if (!stop_pollstall_timer) {					DBG(ep->dev, "start polltimer\n");					add_timer(&udc_pollstall_timer);				}			}		}	} else {		/* ep is halted by set_halt() before */		if (ep->halted) {			tmp = readl(&ep->regs->ctl);			/* clear stall bit */			tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);			/* clear NAK by writing CNAK */			tmp |= AMD_BIT(UDC_EPCTL_CNAK);			writel(tmp, &ep->regs->ctl);			ep->halted = 0;			UDC_QUEUE_CNAK(ep, ep->num);		}	}	spin_unlock_irqrestore(&udc_stall_spinlock, iflags);	return retval;}/* gadget interface */static const struct usb_ep_ops udc_ep_ops = {	.enable		= udc_ep_enable,	.disable	= udc_ep_disable,	.alloc_request	= udc_alloc_request,	.free_request	= udc_free_request,	.queue		= udc_queue,	.dequeue	= udc_dequeue,	.set_halt	= udc_set_halt,	/* fifo ops not implemented */};/*-------------------------------------------------------------------------*//* Get frame counter (not implemented) */static int udc_get_frame(struct usb_gadget *gadget){	return -EOPNOTSUPP;}/* Remote wakeup gadget interface */static int udc_wakeup(struct usb_gadget *gadget){	struct udc		*dev;	if (!gadget)		return -EINVAL;	dev = container_of(gadget, struct udc, gadget);	udc_remote_wakeup(dev);	return 0;}/* gadget operations */static const struct usb_gadget_ops udc_ops = {	.wakeup		= udc_wakeup,	.get_frame	= udc_get_frame,};/* Setups endpoint parameters, adds endpoints to linked list */static void make_ep_lists(struct udc *dev){	/* make gadget ep lists */	INIT_LIST_HEAD(&dev->gadget.ep_list);	list_add_tail(&dev->ep[UDC_EPIN_STATUS_IX].ep.ep_list,						&dev->gadget.ep_list);	list_add_tail(&dev->ep[UDC_EPIN_IX].ep.ep_list,						&dev->gadget.ep_list);	list_add_tail(&dev->ep[UDC_EPOUT_IX].ep.ep_list,						&dev->gadget.ep_list);	/* fifo config */	dev->ep[UDC_EPIN_STATUS_IX].fifo_depth = UDC_EPIN_SMALLINT_BUFF_SIZE;	if (dev->gadget.speed == USB_SPEED_FULL)		dev->ep[UDC_EPIN_IX].fifo_depth = UDC_FS_EPIN_BUFF_SIZE;	else if (dev->gadget.speed == USB_SPEED_HIGH)		dev->ep[UDC_EPIN_IX].fifo_depth = hs_tx_buf;	dev->ep[UDC_EPOUT_IX].fifo_depth = UDC_RXFIFO_SIZE;}/* init registers at driver load time */static int startup_registers(struct udc *dev){	u32 tmp;	/* init controller by soft reset */	udc_soft_reset(dev);	/* mask not needed interrupts */	udc_mask_unused_interrupts(dev);	/* put into initial config */	udc_basic_init(dev);	/* link up all endpoints */	udc_setup_endpoints(dev);	/* program speed */	tmp = readl(&dev->regs->cfg);	if (use_fullspeed) {		tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);	} else {		tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_HS, UDC_DEVCFG_SPD);	}	writel(tmp, &dev->regs->cfg);	return 0;}/* Inits UDC context */static void udc_basic_init(struct udc *dev){	u32	tmp;	DBG(dev, "udc_basic_init()\n");	dev->gadget.speed = USB_SPEED_UNKNOWN;	/* stop RDE timer */	if (timer_pending(&udc_timer)) {		set_rde = 0;		mod_timer(&udc_timer, jiffies - 1);	}	/* stop poll stall timer */	if (timer_pending(&udc_pollstall_timer)) {		mod_timer(&udc_pollstall_timer, jiffies - 1);	}	/* disable DMA */	tmp = readl(&dev->regs->ctl);	tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);	tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_TDE);	writel(tmp, &dev->regs->ctl);	/* enable dynamic CSR programming */	tmp = readl(&dev->regs->cfg);	tmp |= AMD_BIT(UDC_DEVCFG_CSR_PRG);	/* set self powered */	tmp |= AMD_BIT(UDC_DEVCFG_SP);	/* set remote wakeupable */	tmp |= AMD_BIT(UDC_DEVCFG_RWKP);	writel(tmp, &dev->regs->cfg);	make_ep_lists(dev);	dev->data_ep_enabled = 0;	dev->data_ep_queued = 0;}/* Sets initial endpoint parameters */static void udc_setup_endpoints(struct udc *dev){	struct udc_ep	*ep;	u32	tmp;	u32	reg;	DBG(dev, "udc_setup_endpoints()\n");	/* read enum speed */	tmp = readl(&dev->regs->sts);	tmp = AMD_GETBITS(tmp, UDC_DEVSTS_ENUM_SPEED);	if (tmp == UDC_DEVSTS_ENUM_SPEED_HIGH) {		dev->gadget.speed = USB_SPEED_HIGH;	} else if (tmp == UDC_DEVSTS_ENUM_SPEED_FULL) {		dev->gadget.speed = USB_SPEED_FULL;	}	/* set basic ep parameters */	for (tmp = 0; tmp < UDC_EP_NUM; tmp++) {		ep = &dev->ep[tmp];		ep->dev = dev;		ep->ep.name = ep_string[tmp];

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -