⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 uhci-hcd.c

📁 硬实时linux补丁rtai下usb协议栈
💻 C
📖 第 1 页 / 共 2 页
字号:
	 */	check_and_reset_hc(uhci);	return 0;}/* Make sure the controller is quiescent and that we're not using it * any more.  This is mainly for the benefit of programs which, like kexec, * expect the hardware to be idle: not doing DMA or generating IRQs. * * This routine may be called in a damaged or failing kernel.  Hence we * do not acquire the spinlock before shutting down the controller. */static void uhci_shutdown(struct pci_dev *pdev){	struct usb_hcd *hcd = (struct usb_hcd *) pci_get_drvdata(pdev);	hc_died(hcd_to_uhci(hcd));}/* * Allocate a frame list, and then setup the skeleton * * The hardware doesn't really know any difference * in the queues, but the order does matter for the * protocols higher up. The order is: * *  - any isochronous events handled before any *    of the queues. We don't do that here, because *    we'll create the actual TD entries on demand. *  - The first queue is the interrupt queue. *  - The second queue is the control queue, split into low- and full-speed *  - The third queue is bulk queue. *  - The fourth queue is the bandwidth reclamation queue, which loops back *    to the full-speed control queue. */static int uhci_start(struct usb_hcd *hcd){	struct uhci_hcd *uhci = hcd_to_uhci(hcd);	int retval = -EBUSY;	int i;	dma_addr_t dma_handle;	struct dentry *dentry;	hcd->uses_new_polling = 1;	dentry = debugfs_create_file(hcd->self.bus_name,			S_IFREG|S_IRUGO|S_IWUSR, uhci_debugfs_root, uhci,			&uhci_debug_operations);	if (!dentry) {		rtdm_dev_err(uhci_dev(uhci),				"couldn't create uhci debugfs entry\n");		retval = -ENOMEM;		goto err_create_debug_entry;	}	uhci->dentry = dentry;	uhci->fsbr = 0;	uhci->fsbrtimeout = 0;	spin_lock_init (&uhci->rt_lock);	INIT_LIST_HEAD(&uhci->qh_remove_list);	INIT_LIST_HEAD(&uhci->td_remove_list);	INIT_LIST_HEAD(&uhci->urb_remove_list);	INIT_LIST_HEAD(&uhci->urb_list);	INIT_LIST_HEAD(&uhci->complete_list);	init_waitqueue_head(&uhci->waitqh);	uhci->fl = dma_alloc_coherent(uhci_dev(uhci), sizeof(*uhci->fl),			&dma_handle, 0);	if (!uhci->fl) {		rtdm_dev_err(uhci_dev(uhci), "unable to allocate "				"consistent memory for frame list\n");		goto err_alloc_fl;	}	memset((void *)uhci->fl, 0, sizeof(*uhci->fl));	uhci->fl->dma_handle = dma_handle;	uhci->td_pool = dma_pool_create("rtdm_uhci_td", uhci_dev(uhci),			sizeof(struct uhci_td), 16, 0);	if (!uhci->td_pool) {		rtdm_dev_err(uhci_dev(uhci), "unable to create td dma_pool\n");		goto err_create_td_pool;	}	rtdm_sem_init(&uhci->td_buffer_semaphore, 1);	for(i = 0; i < UHCI_MAX_TD; i++){		uhci->td_buffer_pool[i].buffer = dma_pool_alloc (uhci->td_pool, GFP_ATOMIC, &uhci->td_buffer_pool[i].dma_address);		if(!uhci->td_buffer_pool[i].buffer){			rtdm_dev_err(uhci_dev(uhci), "unable to create td dma_pool memory buffers\n");			goto err_create_td_pool;		}		uhci->td_buffer_pool[i].in_use = 0;	}		uhci->qh_pool = dma_pool_create("rtdm_uhci_qh", uhci_dev(uhci),			sizeof(struct uhci_qh), 16, 0);	if (!uhci->qh_pool) {		rtdm_dev_err(uhci_dev(uhci), "unable to create qh dma_pool\n");		goto err_create_qh_pool;	}	rtdm_sem_init(&uhci->qh_buffer_semaphore, 1);	for(i = 0; i < UHCI_MAX_QH; i++){		uhci->qh_buffer_pool[i].buffer = dma_pool_alloc (uhci->qh_pool, GFP_ATOMIC, &uhci->qh_buffer_pool[i].dma_address);		if(!uhci->qh_buffer_pool[i].buffer){			rtdm_dev_err(uhci_dev(uhci), "unable to create qh dma_pool memory buffers\n");			goto err_create_qh_pool;		}		uhci->qh_buffer_pool[i].in_use = 0;	}	uhci->term_td = uhci_alloc_td(uhci);	if (!uhci->term_td) {		rtdm_dev_err(uhci_dev(uhci), "unable to allocate terminating TD\n");		goto err_alloc_term_td;	}	for (i = 0; i < UHCI_NUM_SKELQH; i++) {		uhci->skelqh[i] = uhci_alloc_qh(uhci);		if (!uhci->skelqh[i]) {			rtdm_dev_err(uhci_dev(uhci), "unable to allocate QH\n");			goto err_alloc_skelqh;		}	}	/*	 * 8 Interrupt queues; link all higher int queues to int1,	 * then link int1 to control and control to bulk	 */	uhci->skel_int128_qh->link =			uhci->skel_int64_qh->link =			uhci->skel_int32_qh->link =			uhci->skel_int16_qh->link =			uhci->skel_int8_qh->link =			uhci->skel_int4_qh->link =			uhci->skel_int2_qh->link =			cpu_to_le32(uhci->skel_int1_qh->dma_handle) | UHCI_PTR_QH;	uhci->skel_int1_qh->link = cpu_to_le32(uhci->skel_ls_control_qh->dma_handle) | UHCI_PTR_QH;	uhci->skel_ls_control_qh->link = cpu_to_le32(uhci->skel_fs_control_qh->dma_handle) | UHCI_PTR_QH;	uhci->skel_fs_control_qh->link = cpu_to_le32(uhci->skel_bulk_qh->dma_handle) | UHCI_PTR_QH;	uhci->skel_bulk_qh->link = cpu_to_le32(uhci->skel_term_qh->dma_handle) | UHCI_PTR_QH;	/* This dummy TD is to work around a bug in Intel PIIX controllers */	uhci_fill_td(uhci->term_td, 0, (UHCI_NULL_DATA_SIZE << 21) |		(0x7f << TD_TOKEN_DEVADDR_SHIFT) | USB_PID_IN, 0);	uhci->term_td->link = cpu_to_le32(uhci->term_td->dma_handle);	uhci->skel_term_qh->link = UHCI_PTR_TERM;	uhci->skel_term_qh->element = cpu_to_le32(uhci->term_td->dma_handle);	/*	 * Fill the frame list: make all entries point to the proper	 * interrupt queue.	 *	 * The interrupt queues will be interleaved as evenly as possible.	 * There's not much to be done about period-1 interrupts; they have	 * to occur in every frame.  But we can schedule period-2 interrupts	 * in odd-numbered frames, period-4 interrupts in frames congruent	 * to 2 (mod 4), and so on.  This way each frame only has two	 * interrupt QHs, which will help spread out bandwidth utilization.	 */	for (i = 0; i < UHCI_NUMFRAMES; i++) {		int irq;		/*		 * ffs (Find First bit Set) does exactly what we need:		 * 1,3,5,...  => ffs = 0 => use skel_int2_qh = skelqh[6],		 * 2,6,10,... => ffs = 1 => use skel_int4_qh = skelqh[5], etc.		 * ffs > 6 => not on any high-period queue, so use		 *	skel_int1_qh = skelqh[7].		 * Add UHCI_NUMFRAMES to insure at least one bit is set.		 */		irq = 6 - (int) __ffs(i + UHCI_NUMFRAMES);		if (irq < 0)			irq = 7;		/* Only place we don't use the frame list routines */		uhci->fl->frame[i] = UHCI_PTR_QH |				cpu_to_le32(uhci->skelqh[irq]->dma_handle);	}	/*	 * Some architectures require a full mb() to enforce completion of	 * the memory writes above before the I/O transfers in configure_hc().	 */	mb();	configure_hc(uhci);	start_rh(uhci);	return 0;/* * error exits: */err_alloc_skelqh:	for (i = 0; i < UHCI_NUM_SKELQH; i++)		if (uhci->skelqh[i]) {			uhci_free_qh(uhci, uhci->skelqh[i]);			uhci->skelqh[i] = NULL;		}	uhci_free_td(uhci, uhci->term_td);	uhci->term_td = NULL;err_alloc_term_td:	for(i = 0; i < UHCI_MAX_QH; i++){		dma_pool_free(uhci->qh_pool, uhci->qh_buffer_pool[i].buffer, uhci->qh_buffer_pool[i].dma_address);		uhci->qh_buffer_pool[i].buffer = NULL;	}	rtdm_sem_destroy(&uhci->qh_buffer_semaphore);	dma_pool_destroy(uhci->qh_pool);	uhci->qh_pool = NULL;err_create_qh_pool:	for(i = 0; i < UHCI_MAX_TD; i++){		dma_pool_free(uhci->td_pool, uhci->td_buffer_pool[i].buffer, uhci->td_buffer_pool[i].dma_address);		uhci->td_buffer_pool[i].buffer = NULL;	}	rtdm_sem_destroy(&uhci->td_buffer_semaphore);	dma_pool_destroy(uhci->td_pool);	uhci->td_pool = NULL;err_create_td_pool:	dma_free_coherent(uhci_dev(uhci), sizeof(*uhci->fl),			uhci->fl, uhci->fl->dma_handle);	uhci->fl = NULL;err_alloc_fl:	debugfs_remove(uhci->dentry);	uhci->dentry = NULL;err_create_debug_entry:	return retval;}static void uhci_stop(struct usb_hcd *hcd){	struct uhci_hcd *uhci = hcd_to_uhci(hcd);	unsigned long		context;	rtdm_lock_get_irqsave(&uhci->rt_lock, context);	if (!uhci->hc_inaccessible)		reset_hc(uhci);	uhci_scan_schedule(uhci, NULL);	rtdm_lock_put_irqrestore(&uhci->rt_lock, context);	release_uhci(uhci);}#ifdef CONFIG_PMstatic int uhci_rh_suspend(struct usb_hcd *hcd){	struct uhci_hcd *uhci = hcd_to_uhci(hcd);	unsigned long		context;		rtdm_lock_get_irqsave(&uhci->rt_lock, context);		if (!uhci->hc_inaccessible)		/* Not dead */		suspend_rh(uhci, UHCI_RH_SUSPENDED);	rtdm_lock_put_irqrestore(&uhci->rt_lock, context);	return 0;}static int uhci_rh_resume(struct usb_hcd *hcd){	struct uhci_hcd *uhci = hcd_to_uhci(hcd);	int rc = 0;	unsigned long		context;		rtdm_lock_get_irqsave(&uhci->rt_lock, context);	if (uhci->hc_inaccessible) {		if (uhci->rh_state == UHCI_RH_SUSPENDED) {			rtdm_dev_warn(uhci_dev(uhci), "HC isn't running!\n");			rc = -ENODEV;		}		/* Otherwise the HC is dead */	} else		wakeup_rh(uhci);	rtdm_lock_put_irqrestore(&uhci->rt_lock, context);	return rc;}static int uhci_suspend(struct usb_hcd *hcd, pm_message_t message){	struct uhci_hcd *uhci = hcd_to_uhci(hcd);	int rc = 0;	unsigned long		context;	rtdm_dev_dbg(uhci_dev(uhci), "%s\n", __FUNCTION__);	rtdm_lock_get_irqsave(&uhci->rt_lock, context);	if (uhci->hc_inaccessible)	/* Dead or already suspended */		goto done;	/* Otherwise this would never happen */	suspend_rh(uhci, UHCI_RH_SUSPENDED);	if (uhci->rh_state > UHCI_RH_SUSPENDED) {		rtdm_dev_warn(uhci_dev(uhci), "Root hub isn't suspended!\n");		hcd->state = HC_STATE_RUNNING;		rc = -EBUSY;		goto done;	};	/* All PCI host controllers are required to disable IRQ generation	 * at the source, so we must turn off PIRQ.	 */	pci_write_config_word(to_pci_dev(uhci_dev(uhci)), USBLEGSUP, 0);	uhci->hc_inaccessible = 1;	hcd->poll_rh = 0;	/* FIXME: Enable non-PME# remote wakeup? */done:	rtdm_lock_put_irqrestore(&uhci->rt_lock, context);	return rc;}static int uhci_resume(struct usb_hcd *hcd){	struct uhci_hcd *uhci = hcd_to_uhci(hcd);	rtdm_dev_dbg(uhci_dev(uhci), "%s\n", __FUNCTION__);	unsigned long		context;	if (uhci->rh_state == UHCI_RH_RESET)	/* Dead */		return 0;	rtdm_lock_get_irqsave(&uhci->rt_lock, context);	/* FIXME: Disable non-PME# remote wakeup? */	uhci->hc_inaccessible = 0;	/* The BIOS may have changed the controller settings during a	 * system wakeup.  Check it and reconfigure to avoid problems.	 */	check_and_reset_hc(uhci);	configure_hc(uhci);	/* Otherwise this would never happen */	wakeup_rh(uhci);	if (uhci->rh_state == UHCI_RH_RESET)		suspend_rh(uhci, UHCI_RH_SUSPENDED);	rtdm_lock_put_irqrestore(&uhci->rt_lock, context);	if (!uhci->working_RD) {		/* Suspended root hub needs to be polled */		hcd->poll_rh = 1;		rtdm_usb_hcd_poll_rh_status(hcd);	}	return 0;}#endif/* Wait until all the URBs for a particular device/endpoint are gone */static void uhci_hcd_endpoint_disable(struct usb_hcd *hcd,		struct rtdm_usb_host_endpoint *ep){	struct uhci_hcd *uhci = hcd_to_uhci(hcd);	wait_event_interruptible(uhci->waitqh, list_empty(&ep->urb_list));}static int uhci_hcd_get_frame_number(struct usb_hcd *hcd){	struct uhci_hcd *uhci = hcd_to_uhci(hcd);	unsigned long flags;	int is_stopped;	int frame_number;	/* Minimize latency by avoiding the spinlock */	local_irq_save(flags);	is_stopped = uhci->is_stopped;	smp_rmb();	frame_number = (is_stopped ? uhci->frame_number :			inw(uhci->io_addr + USBFRNUM));	local_irq_restore(flags);	return frame_number;}static const char hcd_name[] = "rtdm_uhci_hcd";static const struct hc_driver uhci_driver = {	.description =		hcd_name,	.rt_product_desc =		"RTDM UHCI Host Controller",	.hcd_priv_size =	sizeof(struct uhci_hcd),	/* Generic hardware linkage */	.rtdm_irq_routine =		rt_uhci_irq,	.flags =		HCD_USB11,	/* Basic lifecycle operations */	.reset =		uhci_reset,	.start =		uhci_start,#ifdef CONFIG_PM	.suspend =		uhci_suspend,	.resume =		uhci_resume,	.hub_suspend =		uhci_rh_suspend,	.hub_resume =		uhci_rh_resume,#endif	.stop =			uhci_stop,	.urb_enqueue =		uhci_urb_enqueue,	.urb_dequeue =		uhci_urb_dequeue,	.endpoint_disable =	uhci_hcd_endpoint_disable,	.get_frame_number =	uhci_hcd_get_frame_number,	.hub_status_data =	uhci_hub_status_data,	.hub_control =		uhci_hub_control,};static const struct pci_device_id uhci_pci_ids[] = { {	/* handle any USB UHCI controller */	PCI_DEVICE_CLASS(((PCI_CLASS_SERIAL_USB << 8) | 0x00), ~0),	.driver_data =	(unsigned long) &uhci_driver,	}, { /* end: all zeroes */ }};MODULE_DEVICE_TABLE(pci, uhci_pci_ids);static struct pci_driver uhci_pci_driver = {	.name =		(char *)hcd_name,	.id_table =	uhci_pci_ids,	.probe =	rtdm_usb_hcd_pci_probe,	.remove =	rtdm_usb_hcd_pci_remove,	.shutdown =	uhci_shutdown,#ifdef	CONFIG_PM	.suspend =	rtdm_usb_hcd_pci_suspend,	.resume =	rtdm_usb_hcd_pci_resume,#endif	/* PM */}; static int __init uhci_hcd_init(void){	int retval = -ENOMEM;	rtdm_printk(KERN_INFO DRIVER_DESC " " DRIVER_VERSION "\n");	if (rtdm_usb_disabled())		return -ENODEV;	if (debug) {		errbuf = kmalloc(ERRBUF_LEN, GFP_ATOMIC);		if (!errbuf)			goto errbuf_failed;	}	uhci_debugfs_root = debugfs_create_dir("uhci", NULL);	if (!uhci_debugfs_root)		goto debug_failed;	uhci_up_cachep = kmem_cache_create("uhci_urb_priv",		sizeof(struct urb_priv), 0, 0, NULL, NULL);	if (!uhci_up_cachep)		goto up_failed;	retval = pci_register_driver(&uhci_pci_driver);	if (retval)		goto init_failed;	return 0;init_failed:	if (kmem_cache_destroy(uhci_up_cachep))		warn("not all urb_priv's were freed!");up_failed:	debugfs_remove(uhci_debugfs_root);debug_failed:	kfree(errbuf);errbuf_failed:	return retval;}static void __exit uhci_hcd_cleanup(void) {	pci_unregister_driver(&uhci_pci_driver);		if (kmem_cache_destroy(uhci_up_cachep))		warn("not all urb_priv's were freed!");	debugfs_remove(uhci_debugfs_root);	kfree(errbuf);}module_init(uhci_hcd_init);module_exit(uhci_hcd_cleanup);MODULE_AUTHOR(DRIVER_AUTHOR);MODULE_DESCRIPTION(DRIVER_DESC);MODULE_LICENSE("GPL");

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -