⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 uhci-q.c

📁 host usb 主设备程序 支持sd卡 mouse keyboard 的最单单的驱动程序 gcc编译
💻 C
📖 第 1 页 / 共 4 页
字号:
/* * Universal Host Controller Interface driver for USB. * * Maintainer: Alan Stern <stern@rowland.harvard.edu> * * (C) Copyright 1999 Linus Torvalds * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com * (C) Copyright 1999 Randy Dunlap * (C) Copyright 1999 Georg Acher, acher@in.tum.de * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface *               support from usb-ohci.c by Adam Richter, adam@yggdrasil.com). * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c) * (C) Copyright 2004-2007 Alan Stern, stern@rowland.harvard.edu *//* * Technically, updating td->status here is a race, but it's not really a * problem. The worst that can happen is that we set the IOC bit again * generating a spurious interrupt. We could fix this by creating another * QH and leaving the IOC bit always set, but then we would have to play * games with the FSBR code to make sure we get the correct order in all * the cases. I don't think it's worth the effort */static void uhci_set_next_interrupt(struct uhci_hcd *uhci){	if (uhci->is_stopped)		mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies);	uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC); }static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci){	uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC);}/* * Full-Speed Bandwidth Reclamation (FSBR). * We turn on FSBR whenever a queue that wants it is advancing, * and leave it on for a short time thereafter. */static void uhci_fsbr_on(struct uhci_hcd *uhci){	struct uhci_qh *lqh;	/* The terminating skeleton QH always points back to the first	 * FSBR QH.  Make the last async QH point to the terminating	 * skeleton QH. */	uhci->fsbr_is_on = 1;	lqh = list_entry(uhci->skel_async_qh->node.prev,			struct uhci_qh, node);	lqh->link = LINK_TO_QH(uhci->skel_term_qh);}static void uhci_fsbr_off(struct uhci_hcd *uhci){	struct uhci_qh *lqh;	/* Remove the link from the last async QH to the terminating	 * skeleton QH. */	uhci->fsbr_is_on = 0;	lqh = list_entry(uhci->skel_async_qh->node.prev,			struct uhci_qh, node);	lqh->link = UHCI_PTR_TERM;}static void uhci_add_fsbr(struct uhci_hcd *uhci, struct urb *urb){	struct urb_priv *urbp = urb->hcpriv;	if (!(urb->transfer_flags & URB_NO_FSBR))		urbp->fsbr = 1;}static void uhci_urbp_wants_fsbr(struct uhci_hcd *uhci, struct urb_priv *urbp){	if (urbp->fsbr) {		uhci->fsbr_is_wanted = 1;		if (!uhci->fsbr_is_on)			uhci_fsbr_on(uhci);		else if (uhci->fsbr_expiring) {			uhci->fsbr_expiring = 0;			del_timer(&uhci->fsbr_timer);		}	}}static void uhci_fsbr_timeout(unsigned long _uhci){	struct uhci_hcd *uhci = (struct uhci_hcd *) _uhci;	unsigned long flags;	spin_lock_irqsave(&uhci->lock, flags);	if (uhci->fsbr_expiring) {		uhci->fsbr_expiring = 0;		uhci_fsbr_off(uhci);	}	spin_unlock_irqrestore(&uhci->lock, flags);}static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci){	dma_addr_t dma_handle;	struct uhci_td *td;	td = dma_pool_alloc(uhci->td_pool, GFP_ATOMIC, &dma_handle);	if (!td)		return NULL;	td->dma_handle = dma_handle;	td->frame = -1;	INIT_LIST_HEAD(&td->list);	INIT_LIST_HEAD(&td->fl_list);	return td;}static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td){	if (!list_empty(&td->list))		dev_warn(uhci_dev(uhci), "td %p still in list!\n", td);	if (!list_empty(&td->fl_list))		dev_warn(uhci_dev(uhci), "td %p still in fl_list!\n", td);	dma_pool_free(uhci->td_pool, td, td->dma_handle);}static inline void uhci_fill_td(struct uhci_td *td, u32 status,		u32 token, u32 buffer){	td->status = cpu_to_le32(status);	td->token = cpu_to_le32(token);	td->buffer = cpu_to_le32(buffer);}static void uhci_add_td_to_urbp(struct uhci_td *td, struct urb_priv *urbp){	list_add_tail(&td->list, &urbp->td_list);}static void uhci_remove_td_from_urbp(struct uhci_td *td){	list_del_init(&td->list);}/* * We insert Isochronous URBs directly into the frame list at the beginning */static inline void uhci_insert_td_in_frame_list(struct uhci_hcd *uhci,		struct uhci_td *td, unsigned framenum){	framenum &= (UHCI_NUMFRAMES - 1);	td->frame = framenum;	/* Is there a TD already mapped there? */	if (uhci->frame_cpu[framenum]) {		struct uhci_td *ftd, *ltd;		ftd = uhci->frame_cpu[framenum];		ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);		list_add_tail(&td->fl_list, &ftd->fl_list);		td->link = ltd->link;		wmb();		ltd->link = LINK_TO_TD(td);	} else {		td->link = uhci->frame[framenum];		wmb();		uhci->frame[framenum] = LINK_TO_TD(td);		uhci->frame_cpu[framenum] = td;	}}static inline void uhci_remove_td_from_frame_list(struct uhci_hcd *uhci,		struct uhci_td *td){	/* If it's not inserted, don't remove it */	if (td->frame == -1) {		WARN_ON(!list_empty(&td->fl_list));		return;	}	if (uhci->frame_cpu[td->frame] == td) {		if (list_empty(&td->fl_list)) {			uhci->frame[td->frame] = td->link;			uhci->frame_cpu[td->frame] = NULL;		} else {			struct uhci_td *ntd;			ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list);			uhci->frame[td->frame] = LINK_TO_TD(ntd);			uhci->frame_cpu[td->frame] = ntd;		}	} else {		struct uhci_td *ptd;		ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list);		ptd->link = td->link;	}	list_del_init(&td->fl_list);	td->frame = -1;}static inline void uhci_remove_tds_from_frame(struct uhci_hcd *uhci,		unsigned int framenum){	struct uhci_td *ftd, *ltd;	framenum &= (UHCI_NUMFRAMES - 1);	ftd = uhci->frame_cpu[framenum];	if (ftd) {		ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);		uhci->frame[framenum] = ltd->link;		uhci->frame_cpu[framenum] = NULL;		while (!list_empty(&ftd->fl_list))			list_del_init(ftd->fl_list.prev);	}}/* * Remove all the TDs for an Isochronous URB from the frame list */static void uhci_unlink_isochronous_tds(struct uhci_hcd *uhci, struct urb *urb){	struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;	struct uhci_td *td;	list_for_each_entry(td, &urbp->td_list, list)		uhci_remove_td_from_frame_list(uhci, td);}static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci,		struct usb_device *udev, struct usb_host_endpoint *hep){	dma_addr_t dma_handle;	struct uhci_qh *qh;	qh = dma_pool_alloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle);	if (!qh)		return NULL;	memset(qh, 0, sizeof(*qh));	qh->dma_handle = dma_handle;	qh->element = UHCI_PTR_TERM;	qh->link = UHCI_PTR_TERM;	INIT_LIST_HEAD(&qh->queue);	INIT_LIST_HEAD(&qh->node);	if (udev) {		/* Normal QH */		qh->type = hep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;		if (qh->type != USB_ENDPOINT_XFER_ISOC) {			qh->dummy_td = uhci_alloc_td(uhci);			if (!qh->dummy_td) {				dma_pool_free(uhci->qh_pool, qh, dma_handle);				return NULL;			}		}		qh->state = QH_STATE_IDLE;		qh->hep = hep;		qh->udev = udev;		hep->hcpriv = qh;		if (qh->type == USB_ENDPOINT_XFER_INT ||				qh->type == USB_ENDPOINT_XFER_ISOC)			qh->load = usb_calc_bus_time(udev->speed,					usb_endpoint_dir_in(&hep->desc),					qh->type == USB_ENDPOINT_XFER_ISOC,					le16_to_cpu(hep->desc.wMaxPacketSize))				/ 1000 + 1;	} else {		/* Skeleton QH */		qh->state = QH_STATE_ACTIVE;		qh->type = -1;	}	return qh;}static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh){	WARN_ON(qh->state != QH_STATE_IDLE && qh->udev);	if (!list_empty(&qh->queue))		dev_warn(uhci_dev(uhci), "qh %p list not empty!\n", qh);	list_del(&qh->node);	if (qh->udev) {		qh->hep->hcpriv = NULL;		if (qh->dummy_td)			uhci_free_td(uhci, qh->dummy_td);	}	dma_pool_free(uhci->qh_pool, qh, qh->dma_handle);}/* * When a queue is stopped and a dequeued URB is given back, adjust * the previous TD link (if the URB isn't first on the queue) or * save its toggle value (if it is first and is currently executing). * * Returns 0 if the URB should not yet be given back, 1 otherwise. */static int uhci_cleanup_queue(struct uhci_hcd *uhci, struct uhci_qh *qh,		struct urb *urb){	struct urb_priv *urbp = urb->hcpriv;	struct uhci_td *td;	int ret = 1;	/* Isochronous pipes don't use toggles and their TD link pointers	 * get adjusted during uhci_urb_dequeue().  But since their queues	 * cannot truly be stopped, we have to watch out for dequeues	 * occurring after the nominal unlink frame. */	if (qh->type == USB_ENDPOINT_XFER_ISOC) {		ret = (uhci->frame_number + uhci->is_stopped !=				qh->unlink_frame);		goto done;	}	/* If the URB isn't first on its queue, adjust the link pointer	 * of the last TD in the previous URB.  The toggle doesn't need	 * to be saved since this URB can't be executing yet. */	if (qh->queue.next != &urbp->node) {		struct urb_priv *purbp;		struct uhci_td *ptd;		purbp = list_entry(urbp->node.prev, struct urb_priv, node);		WARN_ON(list_empty(&purbp->td_list));		ptd = list_entry(purbp->td_list.prev, struct uhci_td,				list);		td = list_entry(urbp->td_list.prev, struct uhci_td,				list);		ptd->link = td->link;		goto done;	}	/* If the QH element pointer is UHCI_PTR_TERM then then currently	 * executing URB has already been unlinked, so this one isn't it. */	if (qh_element(qh) == UHCI_PTR_TERM)		goto done;	qh->element = UHCI_PTR_TERM;	/* Control pipes don't have to worry about toggles */	if (qh->type == USB_ENDPOINT_XFER_CONTROL)		goto done;	/* Save the next toggle value */	WARN_ON(list_empty(&urbp->td_list));	td = list_entry(urbp->td_list.next, struct uhci_td, list);	qh->needs_fixup = 1;	qh->initial_toggle = uhci_toggle(td_token(td));done:	return ret;}/* * Fix up the data toggles for URBs in a queue, when one of them * terminates early (short transfer, error, or dequeued). */static void uhci_fixup_toggles(struct uhci_qh *qh, int skip_first){	struct urb_priv *urbp = NULL;	struct uhci_td *td;	unsigned int toggle = qh->initial_toggle;	unsigned int pipe;	/* Fixups for a short transfer start with the second URB in the	 * queue (the short URB is the first). */	if (skip_first)		urbp = list_entry(qh->queue.next, struct urb_priv, node);	/* When starting with the first URB, if the QH element pointer is	 * still valid then we know the URB's toggles are okay. */	else if (qh_element(qh) != UHCI_PTR_TERM)		toggle = 2;	/* Fix up the toggle for the URBs in the queue.  Normally this	 * loop won't run more than once: When an error or short transfer	 * occurs, the queue usually gets emptied. */	urbp = list_prepare_entry(urbp, &qh->queue, node);	list_for_each_entry_continue(urbp, &qh->queue, node) {		/* If the first TD has the right toggle value, we don't		 * need to change any toggles in this URB */		td = list_entry(urbp->td_list.next, struct uhci_td, list);		if (toggle > 1 || uhci_toggle(td_token(td)) == toggle) {			td = list_entry(urbp->td_list.prev, struct uhci_td,					list);			toggle = uhci_toggle(td_token(td)) ^ 1;		/* Otherwise all the toggles in the URB have to be switched */		} else {			list_for_each_entry(td, &urbp->td_list, list) {				td->token ^= __constant_cpu_to_le32(							TD_TOKEN_TOGGLE);				toggle ^= 1;			}		}	}	wmb();	pipe = list_entry(qh->queue.next, struct urb_priv, node)->urb->pipe;	usb_settoggle(qh->udev, usb_pipeendpoint(pipe),			usb_pipeout(pipe), toggle);	qh->needs_fixup = 0;}/* * Link an Isochronous QH into its skeleton's list */static inline void link_iso(struct uhci_hcd *uhci, struct uhci_qh *qh){	list_add_tail(&qh->node, &uhci->skel_iso_qh->node);	/* Isochronous QHs aren't linked by the hardware */}/* * Link a high-period interrupt QH into the schedule at the end of its * skeleton's list */static void link_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh){	struct uhci_qh *pqh;	list_add_tail(&qh->node, &uhci->skelqh[qh->skel]->node);	pqh = list_entry(qh->node.prev, struct uhci_qh, node);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -