⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 amd5536udc.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
{	struct udc_request	*req;	struct udc_data_dma	*dma_desc;	struct udc_ep	*ep;	if (!usbep)		return NULL;	ep = container_of(usbep, struct udc_ep, ep);	VDBG(ep->dev, "udc_alloc_req(): ep%d\n", ep->num);	req = kzalloc(sizeof(struct udc_request), gfp);	if (!req)		return NULL;	req->req.dma = DMA_DONT_USE;	INIT_LIST_HEAD(&req->queue);	if (ep->dma) {		/* ep0 in requests are allocated from data pool here */		dma_desc = pci_pool_alloc(ep->dev->data_requests, gfp,						&req->td_phys);		if (!dma_desc) {			kfree(req);			return NULL;		}		VDBG(ep->dev, "udc_alloc_req: req = %p dma_desc = %p, "				"td_phys = %lx\n",				req, dma_desc,				(unsigned long)req->td_phys);		/* prevent from using desc. - set HOST BUSY */		dma_desc->status = AMD_ADDBITS(dma_desc->status,						UDC_DMA_STP_STS_BS_HOST_BUSY,						UDC_DMA_STP_STS_BS);		dma_desc->bufptr = __constant_cpu_to_le32(DMA_DONT_USE);		req->td_data = dma_desc;		req->td_data_last = NULL;		req->chain_len = 1;	}	return &req->req;}/* Frees request packet, called by gadget driver */static voidudc_free_request(struct usb_ep *usbep, struct usb_request *usbreq){	struct udc_ep	*ep;	struct udc_request	*req;	if (!usbep || !usbreq)		return;	ep = container_of(usbep, struct udc_ep, ep);	req = container_of(usbreq, struct udc_request, req);	VDBG(ep->dev, "free_req req=%p\n", req);	BUG_ON(!list_empty(&req->queue));	if (req->td_data) {		VDBG(ep->dev, "req->td_data=%p\n", req->td_data);		/* free dma chain if created */		if (req->chain_len > 1) {			udc_free_dma_chain(ep->dev, req);		}		pci_pool_free(ep->dev->data_requests, req->td_data,							req->td_phys);	}	kfree(req);}/* Init BNA dummy descriptor for HOST BUSY and pointing to itself */static void udc_init_bna_dummy(struct udc_request *req){	if (req) {		/* set last bit */		req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);		/* set next pointer to itself */		req->td_data->next = req->td_phys;		/* set HOST BUSY */		req->td_data->status			= AMD_ADDBITS(req->td_data->status,					UDC_DMA_STP_STS_BS_DMA_DONE,					UDC_DMA_STP_STS_BS);#ifdef UDC_VERBOSE		pr_debug("bna desc = %p, sts = %08x\n",			req->td_data, req->td_data->status);#endif	}}/* Allocate BNA dummy descriptor */static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep){	struct udc_request *req = NULL;	struct usb_request *_req = NULL;	/* alloc the dummy request */	_req = udc_alloc_request(&ep->ep, GFP_ATOMIC);	if (_req) {		req = container_of(_req, struct udc_request, req);		ep->bna_dummy_req = req;		udc_init_bna_dummy(req);	}	return req;}/* Write data to TX fifo for IN packets */static voidudc_txfifo_write(struct udc_ep *ep, struct usb_request *req){	u8			*req_buf;	u32			*buf;	int			i, j;	unsigned		bytes = 0;	unsigned		remaining = 0;	if (!req || !ep)		return;	req_buf = req->buf + req->actual;	prefetch(req_buf);	remaining = req->length - req->actual;	buf = (u32 *) req_buf;	bytes = ep->ep.maxpacket;	if (bytes > remaining)		bytes = remaining;	/* dwords first */	for (i = 0; i < bytes / UDC_DWORD_BYTES; i++) {		writel(*(buf + i), ep->txfifo);	}	/* remaining bytes must be written by byte access */	for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {		writeb((u8)(*(buf + i) >> (j << UDC_BITS_PER_BYTE_SHIFT)),							ep->txfifo);	}	/* dummy write confirm */	writel(0, &ep->regs->confirm);}/* Read dwords from RX fifo for OUT transfers */static int udc_rxfifo_read_dwords(struct udc *dev, u32 *buf, int dwords){	int i;	VDBG(dev, "udc_read_dwords(): %d dwords\n", dwords);	for (i = 0; i < dwords; i++) {		*(buf + i) = readl(dev->rxfifo);	}	return 0;}/* Read bytes from RX fifo for OUT transfers */static int udc_rxfifo_read_bytes(struct udc *dev, u8 *buf, int bytes){	int i, j;	u32 tmp;	VDBG(dev, "udc_read_bytes(): %d bytes\n", bytes);	/* dwords first */	for (i = 0; i < bytes / UDC_DWORD_BYTES; i++) {		*((u32 *)(buf + (i<<2))) = readl(dev->rxfifo);	}	/* remaining bytes must be read by byte access */	if (bytes % UDC_DWORD_BYTES) {		tmp = readl(dev->rxfifo);		for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {			*(buf + (i<<2) + j) = (u8)(tmp & UDC_BYTE_MASK);			tmp = tmp >> UDC_BITS_PER_BYTE;		}	}	return 0;}/* Read data from RX fifo for OUT transfers */static intudc_rxfifo_read(struct udc_ep *ep, struct udc_request *req){	u8 *buf;	unsigned buf_space;	unsigned bytes = 0;	unsigned finished = 0;	/* received number bytes */	bytes = readl(&ep->regs->sts);	bytes = AMD_GETBITS(bytes, UDC_EPSTS_RX_PKT_SIZE);	buf_space = req->req.length - req->req.actual;	buf = req->req.buf + req->req.actual;	if (bytes > buf_space) {		if ((buf_space % ep->ep.maxpacket) != 0) {			DBG(ep->dev,				"%s: rx %d bytes, rx-buf space = %d bytesn\n",				ep->ep.name, bytes, buf_space);			req->req.status = -EOVERFLOW;		}		bytes = buf_space;	}	req->req.actual += bytes;	/* last packet ? */	if (((bytes % ep->ep.maxpacket) != 0) || (!bytes)		|| ((req->req.actual == req->req.length) && !req->req.zero))		finished = 1;	/* read rx fifo bytes */	VDBG(ep->dev, "ep %s: rxfifo read %d bytes\n", ep->ep.name, bytes);	udc_rxfifo_read_bytes(ep->dev, buf, bytes);	return finished;}/* create/re-init a DMA descriptor or a DMA descriptor chain */static int prep_dma(struct udc_ep *ep, struct udc_request *req, gfp_t gfp){	int	retval = 0;	u32	tmp;	VDBG(ep->dev, "prep_dma\n");	VDBG(ep->dev, "prep_dma ep%d req->td_data=%p\n",			ep->num, req->td_data);	/* set buffer pointer */	req->td_data->bufptr = req->req.dma;	/* set last bit */	req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);	/* build/re-init dma chain if maxpkt scatter mode, not for EP0 */	if (use_dma_ppb) {		retval = udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);		if (retval != 0) {			if (retval == -ENOMEM)				DBG(ep->dev, "Out of DMA memory\n");			return retval;		}		if (ep->in) {			if (req->req.length == ep->ep.maxpacket) {				/* write tx bytes */				req->td_data->status =					AMD_ADDBITS(req->td_data->status,						ep->ep.maxpacket,						UDC_DMA_IN_STS_TXBYTES);			}		}	}	if (ep->in) {		VDBG(ep->dev, "IN: use_dma_ppb=%d req->req.len=%d "				"maxpacket=%d ep%d\n",				use_dma_ppb, req->req.length,				ep->ep.maxpacket, ep->num);		/*		 * if bytes < max packet then tx bytes must		 * be written in packet per buffer mode		 */		if (!use_dma_ppb || req->req.length < ep->ep.maxpacket				|| ep->num == UDC_EP0OUT_IX				|| ep->num == UDC_EP0IN_IX) {			/* write tx bytes */			req->td_data->status =				AMD_ADDBITS(req->td_data->status,						req->req.length,						UDC_DMA_IN_STS_TXBYTES);			/* reset frame num */			req->td_data->status =				AMD_ADDBITS(req->td_data->status,						0,						UDC_DMA_IN_STS_FRAMENUM);		}		/* set HOST BUSY */		req->td_data->status =			AMD_ADDBITS(req->td_data->status,				UDC_DMA_STP_STS_BS_HOST_BUSY,				UDC_DMA_STP_STS_BS);	} else {		VDBG(ep->dev, "OUT set host ready\n");		/* set HOST READY */		req->td_data->status =			AMD_ADDBITS(req->td_data->status,				UDC_DMA_STP_STS_BS_HOST_READY,				UDC_DMA_STP_STS_BS);			/* clear NAK by writing CNAK */			if (ep->naking) {				tmp = readl(&ep->regs->ctl);				tmp |= AMD_BIT(UDC_EPCTL_CNAK);				writel(tmp, &ep->regs->ctl);				ep->naking = 0;				UDC_QUEUE_CNAK(ep, ep->num);			}	}	return retval;}/* Completes request packet ... caller MUST hold lock */static voidcomplete_req(struct udc_ep *ep, struct udc_request *req, int sts)__releases(ep->dev->lock)__acquires(ep->dev->lock){	struct udc		*dev;	unsigned		halted;	VDBG(ep->dev, "complete_req(): ep%d\n", ep->num);	dev = ep->dev;	/* unmap DMA */	if (req->dma_mapping) {		if (ep->in)			pci_unmap_single(dev->pdev,					req->req.dma,					req->req.length,					PCI_DMA_TODEVICE);		else			pci_unmap_single(dev->pdev,					req->req.dma,					req->req.length,					PCI_DMA_FROMDEVICE);		req->dma_mapping = 0;		req->req.dma = DMA_DONT_USE;	}	halted = ep->halted;	ep->halted = 1;	/* set new status if pending */	if (req->req.status == -EINPROGRESS)		req->req.status = sts;	/* remove from ep queue */	list_del_init(&req->queue);	VDBG(ep->dev, "req %p => complete %d bytes at %s with sts %d\n",		&req->req, req->req.length, ep->ep.name, sts);	spin_unlock(&dev->lock);	req->req.complete(&ep->ep, &req->req);	spin_lock(&dev->lock);	ep->halted = halted;}/* frees pci pool descriptors of a DMA chain */static int udc_free_dma_chain(struct udc *dev, struct udc_request *req){	int ret_val = 0;	struct udc_data_dma	*td;	struct udc_data_dma	*td_last = NULL;	unsigned int i;	DBG(dev, "free chain req = %p\n", req);	/* do not free first desc., will be done by free for request */	td_last = req->td_data;	td = phys_to_virt(td_last->next);	for (i = 1; i < req->chain_len; i++) {		pci_pool_free(dev->data_requests, td,				(dma_addr_t) td_last->next);		td_last = td;		td = phys_to_virt(td_last->next);	}	return ret_val;}/* Iterates to the end of a DMA chain and returns last descriptor */static struct udc_data_dma *udc_get_last_dma_desc(struct udc_request *req){	struct udc_data_dma	*td;	td = req->td_data;	while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) {		td = phys_to_virt(td->next);	}	return td;}/* Iterates to the end of a DMA chain and counts bytes received */static u32 udc_get_ppbdu_rxbytes(struct udc_request *req){	struct udc_data_dma	*td;	u32 count;	td = req->td_data;	/* received number bytes */	count = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_RXBYTES);	while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) {		td = phys_to_virt(td->next);		/* received number bytes */		if (td) {			count += AMD_GETBITS(td->status,				UDC_DMA_OUT_STS_RXBYTES);		}	}	return count;}/* Creates or re-inits a DMA chain */static int udc_create_dma_chain(	struct udc_ep *ep,	struct udc_request *req,	unsigned long buf_len, gfp_t gfp_flags){	unsigned long bytes = req->req.length;	unsigned int i;	dma_addr_t dma_addr;	struct udc_data_dma	*td = NULL;	struct udc_data_dma	*last = NULL;	unsigned long txbytes;	unsigned create_new_chain = 0;	unsigned len;	VDBG(ep->dev, "udc_create_dma_chain: bytes=%ld buf_len=%ld\n",			bytes, buf_len);	dma_addr = DMA_DONT_USE;	/* unset L bit in first desc for OUT */	if (!ep->in) {		req->td_data->status &= AMD_CLEAR_BIT(UDC_DMA_IN_STS_L);	}	/* alloc only new desc's if not already available */	len = req->req.length / ep->ep.maxpacket;	if (req->req.length % ep->ep.maxpacket) {		len++;	}	if (len > req->chain_len) {		/* shorter chain already allocated before */		if (req->chain_len > 1) {			udc_free_dma_chain(ep->dev, req);		}		req->chain_len = len;		create_new_chain = 1;	}	td = req->td_data;	/* gen. required number of descriptors and buffers */	for (i = buf_len; i < bytes; i += buf_len) {		/* create or determine next desc. */		if (create_new_chain) {			td = pci_pool_alloc(ep->dev->data_requests,					gfp_flags, &dma_addr);			if (!td)				return -ENOMEM;			td->status = 0;		} else if (i == buf_len) {			/* first td */			td = (struct udc_data_dma *) phys_to_virt(						req->td_data->next);			td->status = 0;		} else {			td = (struct udc_data_dma *) phys_to_virt(last->next);			td->status = 0;		}		if (td)			td->bufptr = req->req.dma + i; /* assign buffer */		else			break;		/* short packet ? */		if ((bytes - i) >= buf_len) {			txbytes = buf_len;		} else {			/* short packet */			txbytes = bytes - i;		}		/* link td and assign tx bytes */		if (i == buf_len) {			if (create_new_chain) {				req->td_data->next = dma_addr;			} else {				/* req->td_data->next = virt_to_phys(td); */			}			/* write tx bytes */			if (ep->in) {				/* first desc */				req->td_data->status =					AMD_ADDBITS(req->td_data->status,							ep->ep.maxpacket,							UDC_DMA_IN_STS_TXBYTES);				/* second desc */				td->status = AMD_ADDBITS(td->status,							txbytes,							UDC_DMA_IN_STS_TXBYTES);			}		} else {

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -