⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 amd5536udc.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
			reg |= AMD_BIT(UDC_EPCTL_CNAK);			writel(reg, &dev->ep[tmp].regs->ctl);			dev->ep[tmp].naking = 0;			UDC_QUEUE_CNAK(&dev->ep[tmp], dev->ep[tmp].num);		}	}	/* ...	and ep0out */	if (cnak_pending & (1 << UDC_EP0OUT_IX)) {		DBG(dev, "CNAK pending for ep%d\n", UDC_EP0OUT_IX);		/* clear NAK by writing CNAK */		reg = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);		reg |= AMD_BIT(UDC_EPCTL_CNAK);		writel(reg, &dev->ep[UDC_EP0OUT_IX].regs->ctl);		dev->ep[UDC_EP0OUT_IX].naking = 0;		UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX],				dev->ep[UDC_EP0OUT_IX].num);	}}/* Enabling RX DMA after setup packet */static void udc_ep0_set_rde(struct udc *dev){	if (use_dma) {		/*		 * only enable RXDMA when no data endpoint enabled		 * or data is queued		 */		if (!dev->data_ep_enabled || dev->data_ep_queued) {			udc_set_rde(dev);		} else {			/*			 * setup timer for enabling RDE (to not enable			 * RXFIFO DMA for data endpoints to early)			 */			if (set_rde != 0 && !timer_pending(&udc_timer)) {				udc_timer.expires =					jiffies + HZ/UDC_RDE_TIMER_DIV;				set_rde = 1;				if (!stop_timer) {					add_timer(&udc_timer);				}			}		}	}}/* Interrupt handler for data OUT traffic */static irqreturn_t udc_data_out_isr(struct udc *dev, int ep_ix){	irqreturn_t		ret_val = IRQ_NONE;	u32			tmp;	struct udc_ep		*ep;	struct udc_request	*req;	unsigned int		count;	struct udc_data_dma	*td = NULL;	unsigned		dma_done;	VDBG(dev, "ep%d irq\n", ep_ix);	ep = &dev->ep[ep_ix];	tmp = readl(&ep->regs->sts);	if (use_dma) {		/* BNA event ? */		if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {			DBG(dev, "BNA ep%dout occured - DESPTR = %x \n",					ep->num, readl(&ep->regs->desptr));			/* clear BNA */			writel(tmp | AMD_BIT(UDC_EPSTS_BNA), &ep->regs->sts);			if (!ep->cancel_transfer)				ep->bna_occurred = 1;			else				ep->cancel_transfer = 0;			ret_val = IRQ_HANDLED;			goto finished;		}	}	/* HE event ? */	if (tmp & AMD_BIT(UDC_EPSTS_HE)) {		dev_err(&dev->pdev->dev, "HE ep%dout occured\n", ep->num);		/* clear HE */		writel(tmp | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);		ret_val = IRQ_HANDLED;		goto finished;	}	if (!list_empty(&ep->queue)) {		/* next request */		req = list_entry(ep->queue.next,			struct udc_request, queue);	} else {		req = NULL;		udc_rxfifo_pending = 1;	}	VDBG(dev, "req = %p\n", req);	/* fifo mode */	if (!use_dma) {		/* read fifo */		if (req && udc_rxfifo_read(ep, req)) {			ret_val = IRQ_HANDLED;			/* finish */			complete_req(ep, req, 0);			/* next request */			if (!list_empty(&ep->queue) && !ep->halted) {				req = list_entry(ep->queue.next,					struct udc_request, queue);			} else				req = NULL;		}	/* DMA */	} else if (!ep->cancel_transfer && req != NULL) {		ret_val = IRQ_HANDLED;		/* check for DMA done */		if (!use_dma_ppb) {			dma_done = AMD_GETBITS(req->td_data->status,						UDC_DMA_OUT_STS_BS);		/* packet per buffer mode - rx bytes */		} else {			/*			 * if BNA occurred then recover desc. from			 * BNA dummy desc.			 */			if (ep->bna_occurred) {				VDBG(dev, "Recover desc. from BNA dummy\n");				memcpy(req->td_data, ep->bna_dummy_req->td_data,						sizeof(struct udc_data_dma));				ep->bna_occurred = 0;				udc_init_bna_dummy(ep->req);			}			td = udc_get_last_dma_desc(req);			dma_done = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_BS);		}		if (dma_done == UDC_DMA_OUT_STS_BS_DMA_DONE) {			/* buffer fill mode - rx bytes */			if (!use_dma_ppb) {				/* received number bytes */				count = AMD_GETBITS(req->td_data->status,						UDC_DMA_OUT_STS_RXBYTES);				VDBG(dev, "rx bytes=%u\n", count);			/* packet per buffer mode - rx bytes */			} else {				VDBG(dev, "req->td_data=%p\n", req->td_data);				VDBG(dev, "last desc = %p\n", td);				/* received number bytes */				if (use_dma_ppb_du) {					/* every desc. counts bytes */					count = udc_get_ppbdu_rxbytes(req);				} else {					/* last desc. counts bytes */					count = AMD_GETBITS(td->status,						UDC_DMA_OUT_STS_RXBYTES);					if (!count && req->req.length						== UDC_DMA_MAXPACKET) {						/*						 * on 64k packets the RXBYTES						 * field is zero						 */						count = UDC_DMA_MAXPACKET;					}				}				VDBG(dev, "last desc rx bytes=%u\n", count);			}			tmp = req->req.length - req->req.actual;			if (count > tmp) {				if ((tmp % ep->ep.maxpacket) != 0) {					DBG(dev, "%s: rx %db, space=%db\n",						ep->ep.name, count, tmp);					req->req.status = -EOVERFLOW;				}				count = tmp;			}			req->req.actual += count;			req->dma_going = 0;			/* complete request */			complete_req(ep, req, 0);			/* next request */			if (!list_empty(&ep->queue) && !ep->halted) {				req = list_entry(ep->queue.next,					struct udc_request,					queue);				/*				 * DMA may be already started by udc_queue()				 * called by gadget drivers completion				 * routine. This happens when queue				 * holds one request only.				 */				if (req->dma_going == 0) {					/* next dma */					if (prep_dma(ep, req, GFP_ATOMIC) != 0)						goto finished;					/* write desc pointer */					writel(req->td_phys,						&ep->regs->desptr);					req->dma_going = 1;					/* enable DMA */					udc_set_rde(dev);				}			} else {				/*				 * implant BNA dummy descriptor to allow				 * RXFIFO opening by RDE				 */				if (ep->bna_dummy_req) {					/* write desc pointer */					writel(ep->bna_dummy_req->td_phys,						&ep->regs->desptr);					ep->bna_occurred = 0;				}				/*				 * schedule timer for setting RDE if queue				 * remains empty to allow ep0 packets pass				 * through				 */				if (set_rde != 0						&& !timer_pending(&udc_timer)) {					udc_timer.expires =						jiffies						+ HZ*UDC_RDE_TIMER_SECONDS;					set_rde = 1;					if (!stop_timer) {						add_timer(&udc_timer);					}				}				if (ep->num != UDC_EP0OUT_IX)					dev->data_ep_queued = 0;			}		} else {			/*			* RX DMA must be reenabled for each desc in PPBDU mode			* and must be enabled for PPBNDU mode in case of BNA			*/			udc_set_rde(dev);		}	} else if (ep->cancel_transfer) {		ret_val = IRQ_HANDLED;		ep->cancel_transfer = 0;	}	/* check pending CNAKS */	if (cnak_pending) {		/* CNAk processing when rxfifo empty only */		if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) {			udc_process_cnak_queue(dev);		}	}	/* clear OUT bits in ep status */	writel(UDC_EPSTS_OUT_CLEAR, &ep->regs->sts);finished:	return ret_val;}/* Interrupt handler for data IN traffic */static irqreturn_t udc_data_in_isr(struct udc *dev, int ep_ix){	irqreturn_t ret_val = IRQ_NONE;	u32 tmp;	u32 epsts;	struct udc_ep *ep;	struct udc_request *req;	struct udc_data_dma *td;	unsigned dma_done;	unsigned len;	ep = &dev->ep[ep_ix];	epsts = readl(&ep->regs->sts);	if (use_dma) {		/* BNA ? */		if (epsts & AMD_BIT(UDC_EPSTS_BNA)) {			dev_err(&dev->pdev->dev,				"BNA ep%din occured - DESPTR = %08lx \n",				ep->num,				(unsigned long) readl(&ep->regs->desptr));			/* clear BNA */			writel(epsts, &ep->regs->sts);			ret_val = IRQ_HANDLED;			goto finished;		}	}	/* HE event ? */	if (epsts & AMD_BIT(UDC_EPSTS_HE)) {		dev_err(&dev->pdev->dev,			"HE ep%dn occured - DESPTR = %08lx \n",			ep->num, (unsigned long) readl(&ep->regs->desptr));		/* clear HE */		writel(epsts | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);		ret_val = IRQ_HANDLED;		goto finished;	}	/* DMA completion */	if (epsts & AMD_BIT(UDC_EPSTS_TDC)) {		VDBG(dev, "TDC set- completion\n");		ret_val = IRQ_HANDLED;		if (!ep->cancel_transfer && !list_empty(&ep->queue)) {			req = list_entry(ep->queue.next,					struct udc_request, queue);			if (req) {				/*				 * length bytes transfered				 * check dma done of last desc. in PPBDU mode				 */				if (use_dma_ppb_du) {					td = udc_get_last_dma_desc(req);					if (td) {						dma_done =							AMD_GETBITS(td->status,							UDC_DMA_IN_STS_BS);						/* don't care DMA done */						req->req.actual =							req->req.length;					}				} else {					/* assume all bytes transferred */					req->req.actual = req->req.length;				}				if (req->req.actual == req->req.length) {					/* complete req */					complete_req(ep, req, 0);					req->dma_going = 0;					/* further request available ? */					if (list_empty(&ep->queue)) {						/* disable interrupt */						tmp = readl(							&dev->regs->ep_irqmsk);						tmp |= AMD_BIT(ep->num);						writel(tmp,							&dev->regs->ep_irqmsk);					}				}			}		}		ep->cancel_transfer = 0;	}	/*	 * status reg has IN bit set and TDC not set (if TDC was handled,	 * IN must not be handled (UDC defect) ?	 */	if ((epsts & AMD_BIT(UDC_EPSTS_IN))			&& !(epsts & AMD_BIT(UDC_EPSTS_TDC))) {		ret_val = IRQ_HANDLED;		if (!list_empty(&ep->queue)) {			/* next request */			req = list_entry(ep->queue.next,					struct udc_request, queue);			/* FIFO mode */			if (!use_dma) {				/* write fifo */				udc_txfifo_write(ep, &req->req);				len = req->req.length - req->req.actual;						if (len > ep->ep.maxpacket)							len = ep->ep.maxpacket;						req->req.actual += len;				if (req->req.actual == req->req.length					|| (len != ep->ep.maxpacket)) {					/* complete req */					complete_req(ep, req, 0);				}			/* DMA */			} else if (req && !req->dma_going) {				VDBG(dev, "IN DMA : req=%p req->td_data=%p\n",					req, req->td_data);				if (req->td_data) {					req->dma_going = 1;					/*					 * unset L bit of first desc.					 * for chain					 */					if (use_dma_ppb && req->req.length >							ep->ep.maxpacket) {						req->td_data->status &=							AMD_CLEAR_BIT(							UDC_DMA_IN_STS_L);					}					/* write desc pointer */					writel(req->td_phys, &ep->regs->desptr);					/* set HOST READY */					req->td_data->status =						AMD_ADDBITS(						req->td_data->status,						UDC_DMA_IN_STS_BS_HOST_READY,						UDC_DMA_IN_STS_BS);					/* set poll demand bit */					tmp = readl(&ep->regs->ctl);					tmp |= AMD_BIT(UDC_EPCTL_P);					writel(tmp, &ep->regs->ctl);				}			}		}	}	/* clear status bits */	writel(epsts, &ep->regs->sts);finished:	return ret_val;}/* Interrupt handler for Control OUT traffic */static irqreturn_t udc_control_out_isr(struct udc *dev)__releases(dev->lock)__acquires(dev->lock){	irqreturn_t ret_val = IRQ_NONE;	u32 tmp;	int setup_supported;	u32 count;	int set = 0;	struct udc_ep	*ep;	struct udc_ep	*ep_tmp;	ep = &dev->ep[UDC_EP0OUT_IX];	/* clear irq */	writel(AMD_BIT(UDC_EPINT_OUT_EP0), &dev->regs->ep_irqsts);	tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);	/* check BNA and clear if set */	if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {		VDBG(dev, "ep0: BNA set\n");		writel(AMD_BIT(UDC_EPSTS_BNA),			&dev->ep[UDC_EP0OUT_IX].regs->sts);		ep->bna_occurred = 1;		ret_val = IRQ_HANDLED;		goto finished;	}	/* type of data: SETUP or DATA 0 bytes */	tmp = AMD_GETBITS(tmp, UDC_EPSTS_OUT);	VDBG(dev, "data_typ = %x\n", tmp);	/* setup data */	if (tmp == UDC_EPSTS_OUT_SETUP) {		ret_val = IRQ_HANDLED;		ep->dev->stall_ep0in = 0;		dev->waiting_zlp_ack_ep0in = 0;		/* set NAK for EP0_IN */		tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);		tmp |= AMD_BIT(UDC_EPCTL_SNAK);		writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);		dev->ep[UDC_EP0IN_IX].naking = 1;		/* get setup data */		if (use_dma) {			/* clear OUT bits in ep status */			writel(UDC_EPSTS_OUT_CLEAR,				&dev->ep[UDC_EP0OUT_IX].regs->sts);			setup_data.data[0] =				dev->ep[UDC_EP0OUT_IX].td_stp->data12;			setup_data.data[1] =				dev->ep[UDC_EP0OUT_IX].td_stp->data34;			/* set HOST READY */			dev->ep[UDC_EP0OUT_IX].td_stp->status =					UDC_DMA_STP_STS_BS_HOST_READY;		} else {			/* read fifo */			udc_rxfifo_read_dwords(dev, setup_data.data, 2);		}		/* determine direction of control data */		if ((setup_data.request.bRequestType & USB_DIR_IN) != 0) {			dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;			/* enable RDE */			udc_ep0_set_rde(dev);			set = 0;		} else {			dev->gadget.ep0 = &dev->ep[UDC_EP0OUT_IX].ep;			/*			 * implant BNA dummy descriptor to allow RXFIFO opening			 * by RDE			 */			if (ep->bna_dummy_req) {				/* write desc pointer */				writel(ep->bna_dummy_req->td_phys,					&dev->ep[UDC_EP0OUT_IX].regs->desptr);				ep->bna_occurred = 0;			}			set = 1;			dev->ep[UDC_EP0OUT_IX].naking = 1;			/*			 * setup timer for enabling RDE (to not enable			 * RXFIFO DMA for data to early)			 */			set_rde = 1;			if (!timer_pending(&udc_timer)) {				udc_timer

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -