⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 usbdev.c

📁 LINUX 2.6.17.4的源码
💻 C
📖 第 1 页 / 共 3 页
字号:
	status = au_readl(USBD_INTSTAT);	au_writel(status, USBD_INTSTAT);	// ack'em	if (status & (1<<0))		process_ep0_receive(dev);	if (status & (1<<4))		process_ep_receive(dev, &dev->ep[4]);	if (status & (1<<5))		process_ep_receive(dev, &dev->ep[5]);}/* This ISR handles the DMA done events on EP0 */static voiddma_done_ep0_intr(int irq, void *dev_id, struct pt_regs *regs){	struct usb_dev *dev = (struct usb_dev *) dev_id;	usbdev_pkt_t* pkt;	endpoint_t *ep0 = &dev->ep[0];	u32 cs0, buff_done;	spin_lock(&ep0->lock);	cs0 = au_readl(ep0->reg->ctrl_stat);	// first check packet transmit done	if ((buff_done = get_dma_buffer_done(ep0->indma)) != 0) {		// transmitted a DATAx packet during DATA stage		// on control endpoint 0		// clear DMA done bit		if (buff_done & DMA_D0)			clear_dma_done0(ep0->indma);		if (buff_done & DMA_D1)			clear_dma_done1(ep0->indma);		pkt = send_packet_complete(ep0);		kfree(pkt);	}	/*	 * Now check packet receive done. Shouldn't get these,	 * the receive packet complete intr should happen	 * before the DMA done intr occurs.	 */	if ((buff_done = get_dma_buffer_done(ep0->outdma)) != 0) {		// clear DMA done bit		if (buff_done & DMA_D0)			clear_dma_done0(ep0->outdma);		if (buff_done & DMA_D1)			clear_dma_done1(ep0->outdma);		//process_ep0_receive(dev);	}	spin_unlock(&ep0->lock);}/* This ISR handles the DMA done events on endpoints 2,3,4,5 */static voiddma_done_ep_intr(int irq, void *dev_id, struct pt_regs *regs){	struct usb_dev *dev = (struct usb_dev *) dev_id;	int i;	for (i = 2; i < 6; i++) {	u32 buff_done;		usbdev_pkt_t* pkt;		endpoint_t *ep = &dev->ep[i];		if (!ep->active) continue;	spin_lock(&ep->lock);		if (ep->direction == USB_DIR_IN) {			buff_done = get_dma_buffer_done(ep->indma);			if (buff_done != 0) {				// transmitted a DATAx pkt on the IN ep		// clear DMA done bit		if (buff_done & DMA_D0)			clear_dma_done0(ep->indma);		if (buff_done & DMA_D1)			clear_dma_done1(ep->indma);				pkt = send_packet_complete(ep);				spin_unlock(&ep->lock);				dev->func_cb(CB_PKT_COMPLETE,					     (unsigned long)pkt,					     dev->cb_data);				spin_lock(&ep->lock);			}		} else {	/*			 * Check packet receive done (OUT ep). Shouldn't get			 * these, the rx packet complete intr should happen	 * before the DMA done intr occurs.	 */			buff_done = get_dma_buffer_done(ep->outdma);			if (buff_done != 0) {				// received a DATAx pkt on the OUT ep		// clear DMA done bit		if (buff_done & DMA_D0)			clear_dma_done0(ep->outdma);		if (buff_done & DMA_D1)			clear_dma_done1(ep->outdma);				//process_ep_receive(dev, ep);	}	}		spin_unlock(&ep->lock);	}}/*************************************************************************** * Here begins the external interface functions *************************************************************************** *//* * allocate a new packet */intusbdev_alloc_packet(int ep_addr, int data_size, usbdev_pkt_t** pkt){	endpoint_t * ep = epaddr_to_ep(&usbdev, ep_addr);	usbdev_pkt_t* lpkt = NULL;	if (!ep || !ep->active || ep->address < 2)		return -ENODEV;	if (data_size > ep->max_pkt_size)		return -EINVAL;	lpkt = *pkt = alloc_packet(ep, data_size, NULL);	if (!lpkt)		return -ENOMEM;	return 0;}/* * packet send */intusbdev_send_packet(int ep_addr, usbdev_pkt_t * pkt){	unsigned long flags;	int count;	endpoint_t * ep;	if (!pkt || !(ep = epaddr_to_ep(&usbdev, pkt->ep_addr)) ||	    !ep->active || ep->address < 2)		return -ENODEV;	if (ep->direction != USB_DIR_IN)		return -EINVAL;	spin_lock_irqsave(&ep->lock, flags);	count = send_packet(&usbdev, pkt, 1);	spin_unlock_irqrestore(&ep->lock, flags);	return count;}/* * packet receive */intusbdev_receive_packet(int ep_addr, usbdev_pkt_t** pkt){	unsigned long flags;	usbdev_pkt_t* lpkt = NULL;	endpoint_t *ep = epaddr_to_ep(&usbdev, ep_addr);	if (!ep || !ep->active || ep->address < 2)		return -ENODEV;	if (ep->direction != USB_DIR_OUT)		return -EINVAL;	spin_lock_irqsave(&ep->lock, flags);	if (ep->outlist.count > 1)		lpkt = unlink_head(&ep->outlist);	spin_unlock_irqrestore(&ep->lock, flags);	if (!lpkt) {		/* no packet available */		*pkt = NULL;		return -ENODATA;	}	*pkt = lpkt;	return lpkt->size;}/* * return total queued byte count on the endpoint. */intusbdev_get_byte_count(int ep_addr){        unsigned long flags;        pkt_list_t *list;        usbdev_pkt_t *scan;        int count = 0;	endpoint_t * ep = epaddr_to_ep(&usbdev, ep_addr);	if (!ep || !ep->active || ep->address < 2)		return -ENODEV;	if (ep->direction == USB_DIR_IN) {		list = &ep->inlist;		spin_lock_irqsave(&ep->lock, flags);		for (scan = list->head; scan; scan = scan->next)			count += scan->size;		spin_unlock_irqrestore(&ep->lock, flags);	} else {		list = &ep->outlist;		spin_lock_irqsave(&ep->lock, flags);		if (list->count > 1) {			for (scan = list->head; scan != list->tail;			     scan = scan->next)				count += scan->size;	}		spin_unlock_irqrestore(&ep->lock, flags);	}	return count;}voidusbdev_exit(void){	endpoint_t *ep;	int i;	au_writel(0, USBD_INTEN);	// disable usb dev ints	au_writel(0, USBD_ENABLE);	// disable usb dev	free_irq(AU1000_USB_DEV_REQ_INT, &usbdev);	free_irq(AU1000_USB_DEV_SUS_INT, &usbdev);	// free all control endpoint resources	ep = &usbdev.ep[0];	free_au1000_dma(ep->indma);	free_au1000_dma(ep->outdma);	endpoint_flush(ep);	// free ep resources	for (i = 2; i < 6; i++) {		ep = &usbdev.ep[i];		if (!ep->active) continue;		if (ep->direction == USB_DIR_IN) {			free_au1000_dma(ep->indma);		} else {		free_au1000_dma(ep->outdma);		}		endpoint_flush(ep);	}	kfree(usbdev.full_conf_desc);}intusbdev_init(struct usb_device_descriptor* dev_desc,	    struct usb_config_descriptor* config_desc,	    struct usb_interface_descriptor* if_desc,	    struct usb_endpoint_descriptor* ep_desc,	    struct usb_string_descriptor* str_desc[],	    void (*cb)(usbdev_cb_type_t, unsigned long, void *),	    void* cb_data){	endpoint_t *ep0;	int i, ret=0;	u8* fcd;	if (dev_desc->bNumConfigurations > 1 ||	    config_desc->bNumInterfaces > 1 ||	    if_desc->bNumEndpoints > 4) {		err("Only one config, one i/f, and no more "		    "than 4 ep's allowed");		ret = -EINVAL;		goto out;	}	if (!cb) {		err("Function-layer callback required");		ret = -EINVAL;		goto out;	}	if (dev_desc->bMaxPacketSize0 != USBDEV_EP0_MAX_PACKET_SIZE) {		warn("EP0 Max Packet size must be %d",		     USBDEV_EP0_MAX_PACKET_SIZE);		dev_desc->bMaxPacketSize0 = USBDEV_EP0_MAX_PACKET_SIZE;	}	memset(&usbdev, 0, sizeof(struct usb_dev));	usbdev.state = DEFAULT;	usbdev.dev_desc = dev_desc;	usbdev.if_desc = if_desc;	usbdev.conf_desc = config_desc;	for (i=0; i<6; i++)		usbdev.str_desc[i] = str_desc[i];	usbdev.func_cb = cb;	usbdev.cb_data = cb_data;	/* Initialize default control endpoint */	ep0 = &usbdev.ep[0];	ep0->active = 1;	ep0->type = CONTROL_EP;	ep0->max_pkt_size = USBDEV_EP0_MAX_PACKET_SIZE;	spin_lock_init(&ep0->lock);	ep0->desc = NULL;	// ep0 has no descriptor	ep0->address = 0;	ep0->direction = 0;	ep0->reg = &ep_reg[0];	/* Initialize the other requested endpoints */	for (i = 0; i < if_desc->bNumEndpoints; i++) {		struct usb_endpoint_descriptor* epd = &ep_desc[i];	endpoint_t *ep;		if ((epd->bEndpointAddress & 0x80) == USB_DIR_IN) {			ep = &usbdev.ep[2];			ep->address = 2;			if (ep->active) {				ep = &usbdev.ep[3];				ep->address = 3;				if (ep->active) {					err("too many IN ep's requested");					ret = -ENODEV;					goto out;	}	}		} else {			ep = &usbdev.ep[4];			ep->address = 4;			if (ep->active) {				ep = &usbdev.ep[5];				ep->address = 5;				if (ep->active) {					err("too many OUT ep's requested");					ret = -ENODEV;					goto out;	}	}		}		ep->active = 1;		epd->bEndpointAddress &= ~0x0f;		epd->bEndpointAddress |= (u8)ep->address;		ep->direction = epd->bEndpointAddress & 0x80;		ep->type = epd->bmAttributes & 0x03;		ep->max_pkt_size = le16_to_cpu(epd->wMaxPacketSize);		spin_lock_init(&ep->lock);		ep->desc = epd;		ep->reg = &ep_reg[ep->address];		}	/*	 * initialize the full config descriptor	 */	usbdev.full_conf_desc = fcd = kmalloc(le16_to_cpu(config_desc->wTotalLength),					      ALLOC_FLAGS);	if (!fcd) {		err("failed to alloc full config descriptor");		ret = -ENOMEM;		goto out;	}	memcpy(fcd, config_desc, USB_DT_CONFIG_SIZE);	fcd += USB_DT_CONFIG_SIZE;	memcpy(fcd, if_desc, USB_DT_INTERFACE_SIZE);	fcd += USB_DT_INTERFACE_SIZE;	for (i = 0; i < if_desc->bNumEndpoints; i++) {		memcpy(fcd, &ep_desc[i], USB_DT_ENDPOINT_SIZE);		fcd += USB_DT_ENDPOINT_SIZE;	}	/* Now we're ready to enable the controller */	au_writel(0x0002, USBD_ENABLE);	udelay(100);	au_writel(0x0003, USBD_ENABLE);	udelay(100);	/* build and send config table based on ep descriptors */	for (i = 0; i < 6; i++) {		endpoint_t *ep;		if (i == 1)			continue; // skip dummy ep		ep = &usbdev.ep[i];		if (ep->active) {			au_writel((ep->address << 4) | 0x04, USBD_CONFIG);			au_writel(((ep->max_pkt_size & 0x380) >> 7) |				  (ep->direction >> 4) | (ep->type << 4),				  USBD_CONFIG);			au_writel((ep->max_pkt_size & 0x7f) << 1, USBD_CONFIG);			au_writel(0x00, USBD_CONFIG);			au_writel(ep->address, USBD_CONFIG);		} else {			u8 dir = (i==2 || i==3) ? DIR_IN : DIR_OUT;			au_writel((i << 4) | 0x04, USBD_CONFIG);			au_writel(((16 & 0x380) >> 7) | dir |				  (BULK_EP << 4), USBD_CONFIG);			au_writel((16 & 0x7f) << 1, USBD_CONFIG);			au_writel(0x00, USBD_CONFIG);			au_writel(i, USBD_CONFIG);		}	}	/*	 * Enable Receive FIFO Complete interrupts only. Transmit	 * complete is being handled by the DMA done interrupts.	 */	au_writel(0x31, USBD_INTEN);	/*	 * Controller is now enabled, request DMA and IRQ	 * resources.	 */	/* request the USB device transfer complete interrupt */	if (request_irq(AU1000_USB_DEV_REQ_INT, req_sus_intr, SA_INTERRUPT,			"USBdev req", &usbdev)) {		err("Can't get device request intr");		ret = -ENXIO;		goto out;	}	/* request the USB device suspend interrupt */	if (request_irq(AU1000_USB_DEV_SUS_INT, req_sus_intr, SA_INTERRUPT,			"USBdev sus", &usbdev)) {		err("Can't get device suspend intr");		ret = -ENXIO;		goto out;	}	/* Request EP0 DMA and IRQ */	if ((ep0->indma = request_au1000_dma(ep_dma_id[0].id,					     ep_dma_id[0].str,					     dma_done_ep0_intr,					     SA_INTERRUPT,					     &usbdev)) < 0) {		err("Can't get %s DMA", ep_dma_id[0].str);		ret = -ENXIO;		goto out;	}	if ((ep0->outdma = request_au1000_dma(ep_dma_id[1].id,					      ep_dma_id[1].str,					      NULL, 0, NULL)) < 0) {		err("Can't get %s DMA", ep_dma_id[1].str);		ret = -ENXIO;		goto out;	}	// Flush the ep0 buffers and FIFOs	endpoint_flush(ep0);	// start packet reception on ep0	kickstart_receive_packet(ep0);	/* Request DMA and IRQ for the other endpoints */	for (i = 2; i < 6; i++) {		endpoint_t *ep = &usbdev.ep[i];		if (!ep->active)			continue;		// Flush the endpoint buffers and FIFOs		endpoint_flush(ep);		if (ep->direction == USB_DIR_IN) {			ep->indma =				request_au1000_dma(ep_dma_id[ep->address].id,						   ep_dma_id[ep->address].str,						   dma_done_ep_intr,						   SA_INTERRUPT,						   &usbdev);			if (ep->indma < 0) {				err("Can't get %s DMA",				    ep_dma_id[ep->address].str);				ret = -ENXIO;				goto out;			}		} else {			ep->outdma =				request_au1000_dma(ep_dma_id[ep->address].id,						   ep_dma_id[ep->address].str,						   NULL, 0, NULL);			if (ep->outdma < 0) {				err("Can't get %s DMA",				    ep_dma_id[ep->address].str);				ret = -ENXIO;				goto out;			}			// start packet reception on OUT endpoint			kickstart_receive_packet(ep);		}	} out:	if (ret)		usbdev_exit();	return ret;}EXPORT_SYMBOL(usbdev_init);EXPORT_SYMBOL(usbdev_exit);EXPORT_SYMBOL(usbdev_alloc_packet);EXPORT_SYMBOL(usbdev_receive_packet);EXPORT_SYMBOL(usbdev_send_packet);EXPORT_SYMBOL(usbdev_get_byte_count);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -