⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 uhci-q.c

📁 host usb 主设备程序 支持sd卡 mouse keyboard 的最单单的驱动程序 gcc编译
💻 C
📖 第 1 页 / 共 4 页
字号:
	qh->link = pqh->link;	wmb();	pqh->link = LINK_TO_QH(qh);}/* * Link a period-1 interrupt or async QH into the schedule at the * correct spot in the async skeleton's list, and update the FSBR link */static void link_async(struct uhci_hcd *uhci, struct uhci_qh *qh){	struct uhci_qh *pqh;	__le32 link_to_new_qh;	/* Find the predecessor QH for our new one and insert it in the list.	 * The list of QHs is expected to be short, so linear search won't	 * take too long. */	list_for_each_entry_reverse(pqh, &uhci->skel_async_qh->node, node) {		if (pqh->skel <= qh->skel)			break;	}	list_add(&qh->node, &pqh->node);	/* Link it into the schedule */	qh->link = pqh->link;	wmb();	link_to_new_qh = LINK_TO_QH(qh);	pqh->link = link_to_new_qh;	/* If this is now the first FSBR QH, link the terminating skeleton	 * QH to it. */	if (pqh->skel < SKEL_FSBR && qh->skel >= SKEL_FSBR)		uhci->skel_term_qh->link = link_to_new_qh;}/* * Put a QH on the schedule in both hardware and software */static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh){	WARN_ON(list_empty(&qh->queue));	/* Set the element pointer if it isn't set already.	 * This isn't needed for Isochronous queues, but it doesn't hurt. */	if (qh_element(qh) == UHCI_PTR_TERM) {		struct urb_priv *urbp = list_entry(qh->queue.next,				struct urb_priv, node);		struct uhci_td *td = list_entry(urbp->td_list.next,				struct uhci_td, list);		qh->element = LINK_TO_TD(td);	}	/* Treat the queue as if it has just advanced */	qh->wait_expired = 0;	qh->advance_jiffies = jiffies;	if (qh->state == QH_STATE_ACTIVE)		return;	qh->state = QH_STATE_ACTIVE;	/* Move the QH from its old list to the correct spot in the appropriate	 * skeleton's list */	if (qh == uhci->next_qh)		uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,				node);	list_del(&qh->node);	if (qh->skel == SKEL_ISO)		link_iso(uhci, qh);	else if (qh->skel < SKEL_ASYNC)		link_interrupt(uhci, qh);	else		link_async(uhci, qh);}/* * Unlink a high-period interrupt QH from the schedule */static void unlink_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh){	struct uhci_qh *pqh;	pqh = list_entry(qh->node.prev, struct uhci_qh, node);	pqh->link = qh->link;	mb();}/* * Unlink a period-1 interrupt or async QH from the schedule */static void unlink_async(struct uhci_hcd *uhci, struct uhci_qh *qh){	struct uhci_qh *pqh;	__le32 link_to_next_qh = qh->link;	pqh = list_entry(qh->node.prev, struct uhci_qh, node);	pqh->link = link_to_next_qh;	/* If this was the old first FSBR QH, link the terminating skeleton	 * QH to the next (new first FSBR) QH. */	if (pqh->skel < SKEL_FSBR && qh->skel >= SKEL_FSBR)		uhci->skel_term_qh->link = link_to_next_qh;	mb();}/* * Take a QH off the hardware schedule */static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh){	if (qh->state == QH_STATE_UNLINKING)		return;	WARN_ON(qh->state != QH_STATE_ACTIVE || !qh->udev);	qh->state = QH_STATE_UNLINKING;	/* Unlink the QH from the schedule and record when we did it */	if (qh->skel == SKEL_ISO)		;	else if (qh->skel < SKEL_ASYNC)		unlink_interrupt(uhci, qh);	else		unlink_async(uhci, qh);	uhci_get_current_frame_number(uhci);	qh->unlink_frame = uhci->frame_number;	/* Force an interrupt so we know when the QH is fully unlinked */	if (list_empty(&uhci->skel_unlink_qh->node))		uhci_set_next_interrupt(uhci);	/* Move the QH from its old list to the end of the unlinking list */	if (qh == uhci->next_qh)		uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,				node);	list_move_tail(&qh->node, &uhci->skel_unlink_qh->node);}/* * When we and the controller are through with a QH, it becomes IDLE. * This happens when a QH has been off the schedule (on the unlinking * list) for more than one frame, or when an error occurs while adding * the first URB onto a new QH. */static void uhci_make_qh_idle(struct uhci_hcd *uhci, struct uhci_qh *qh){	WARN_ON(qh->state == QH_STATE_ACTIVE);	if (qh == uhci->next_qh)		uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,				node);	list_move(&qh->node, &uhci->idle_qh_list);	qh->state = QH_STATE_IDLE;	/* Now that the QH is idle, its post_td isn't being used */	if (qh->post_td) {		uhci_free_td(uhci, qh->post_td);		qh->post_td = NULL;	}	/* If anyone is waiting for a QH to become idle, wake them up */	if (uhci->num_waiting)		wake_up_all(&uhci->waitqh);}/* * Find the highest existing bandwidth load for a given phase and period. */static int uhci_highest_load(struct uhci_hcd *uhci, int phase, int period){	int highest_load = uhci->load[phase];	for (phase += period; phase < MAX_PHASE; phase += period)		highest_load = max_t(int, highest_load, uhci->load[phase]);	return highest_load;}/* * Set qh->phase to the optimal phase for a periodic transfer and * check whether the bandwidth requirement is acceptable. */static int uhci_check_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh){	int minimax_load;	/* Find the optimal phase (unless it is already set) and get	 * its load value. */	if (qh->phase >= 0)		minimax_load = uhci_highest_load(uhci, qh->phase, qh->period);	else {		int phase, load;		int max_phase = min_t(int, MAX_PHASE, qh->period);		qh->phase = 0;		minimax_load = uhci_highest_load(uhci, qh->phase, qh->period);		for (phase = 1; phase < max_phase; ++phase) {			load = uhci_highest_load(uhci, phase, qh->period);			if (load < minimax_load) {				minimax_load = load;				qh->phase = phase;			}		}	}	/* Maximum allowable periodic bandwidth is 90%, or 900 us per frame */	if (minimax_load + qh->load > 900) {		dev_dbg(uhci_dev(uhci), "bandwidth allocation failed: "				"period %d, phase %d, %d + %d us\n",				qh->period, qh->phase, minimax_load, qh->load);		return -ENOSPC;	}	return 0;}/* * Reserve a periodic QH's bandwidth in the schedule */static void uhci_reserve_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh){	int i;	int load = qh->load;	char *p = "??";	for (i = qh->phase; i < MAX_PHASE; i += qh->period) {		uhci->load[i] += load;		uhci->total_load += load;	}	uhci_to_hcd(uhci)->self.bandwidth_allocated =			uhci->total_load / MAX_PHASE;	switch (qh->type) {	case USB_ENDPOINT_XFER_INT:		++uhci_to_hcd(uhci)->self.bandwidth_int_reqs;		p = "INT";		break;	case USB_ENDPOINT_XFER_ISOC:		++uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs;		p = "ISO";		break;	}	qh->bandwidth_reserved = 1;	dev_dbg(uhci_dev(uhci),			"%s dev %d ep%02x-%s, period %d, phase %d, %d us\n",			"reserve", qh->udev->devnum,			qh->hep->desc.bEndpointAddress, p,			qh->period, qh->phase, load);}/* * Release a periodic QH's bandwidth reservation */static void uhci_release_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh){	int i;	int load = qh->load;	char *p = "??";	for (i = qh->phase; i < MAX_PHASE; i += qh->period) {		uhci->load[i] -= load;		uhci->total_load -= load;	}	uhci_to_hcd(uhci)->self.bandwidth_allocated =			uhci->total_load / MAX_PHASE;	switch (qh->type) {	case USB_ENDPOINT_XFER_INT:		--uhci_to_hcd(uhci)->self.bandwidth_int_reqs;		p = "INT";		break;	case USB_ENDPOINT_XFER_ISOC:		--uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs;		p = "ISO";		break;	}	qh->bandwidth_reserved = 0;	dev_dbg(uhci_dev(uhci),			"%s dev %d ep%02x-%s, period %d, phase %d, %d us\n",			"release", qh->udev->devnum,			qh->hep->desc.bEndpointAddress, p,			qh->period, qh->phase, load);}static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci,		struct urb *urb){	struct urb_priv *urbp;	urbp = kmem_cache_zalloc(uhci_up_cachep, GFP_ATOMIC);	if (!urbp)		return NULL;	urbp->urb = urb;	urb->hcpriv = urbp;		INIT_LIST_HEAD(&urbp->node);	INIT_LIST_HEAD(&urbp->td_list);	return urbp;}static void uhci_free_urb_priv(struct uhci_hcd *uhci,		struct urb_priv *urbp){	struct uhci_td *td, *tmp;	if (!list_empty(&urbp->node))		dev_warn(uhci_dev(uhci), "urb %p still on QH's list!\n",				urbp->urb);	list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {		uhci_remove_td_from_urbp(td);		uhci_free_td(uhci, td);	}	urbp->urb->hcpriv = NULL;	kmem_cache_free(uhci_up_cachep, urbp);}/* * Map status to standard result codes * * <status> is (td_status(td) & 0xF60000), a.k.a. * uhci_status_bits(td_status(td)). * Note: <status> does not include the TD_CTRL_NAK bit. * <dir_out> is True for output TDs and False for input TDs. */static int uhci_map_status(int status, int dir_out){	if (!status)		return 0;	if (status & TD_CTRL_BITSTUFF)			/* Bitstuff error */		return -EPROTO;	if (status & TD_CTRL_CRCTIMEO) {		/* CRC/Timeout */		if (dir_out)			return -EPROTO;		else			return -EILSEQ;	}	if (status & TD_CTRL_BABBLE)			/* Babble */		return -EOVERFLOW;	if (status & TD_CTRL_DBUFERR)			/* Buffer error */		return -ENOSR;	if (status & TD_CTRL_STALLED)			/* Stalled */		return -EPIPE;	return 0;}/* * Control transfers */static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb,		struct uhci_qh *qh){	struct uhci_td *td;	unsigned long destination, status;	int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize);	int len = urb->transfer_buffer_length;	dma_addr_t data = urb->transfer_dma;	__le32 *plink;	struct urb_priv *urbp = urb->hcpriv;	int skel;	/* The "pipe" thing contains the destination in bits 8--18 */	destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP;	/* 3 errors, dummy TD remains inactive */	status = uhci_maxerr(3);	if (urb->dev->speed == USB_SPEED_LOW)		status |= TD_CTRL_LS;	/*	 * Build the TD for the control request setup packet	 */	td = qh->dummy_td;	uhci_add_td_to_urbp(td, urbp);	uhci_fill_td(td, status, destination | uhci_explen(8),			urb->setup_dma);	plink = &td->link;	status |= TD_CTRL_ACTIVE;	/*	 * If direction is "send", change the packet ID from SETUP (0x2D)	 * to OUT (0xE1).  Else change it from SETUP to IN (0x69) and	 * set Short Packet Detect (SPD) for all data packets.	 */	if (usb_pipeout(urb->pipe))		destination ^= (USB_PID_SETUP ^ USB_PID_OUT);	else {		destination ^= (USB_PID_SETUP ^ USB_PID_IN);		status |= TD_CTRL_SPD;	}	/*	 * Build the DATA TDs	 */	while (len > 0) {		int pktsze = min(len, maxsze);		td = uhci_alloc_td(uhci);		if (!td)			goto nomem;		*plink = LINK_TO_TD(td);		/* Alternate Data0/1 (start with Data1) */		destination ^= TD_TOKEN_TOGGLE;			uhci_add_td_to_urbp(td, urbp);		uhci_fill_td(td, status, destination | uhci_explen(pktsze),				data);		plink = &td->link;		data += pktsze;		len -= pktsze;	}	/*	 * Build the final TD for control status 	 */	td = uhci_alloc_td(uhci);	if (!td)		goto nomem;	*plink = LINK_TO_TD(td);	/*	 * It's IN if the pipe is an output pipe or we're not expecting	 * data back.	 */	destination &= ~TD_TOKEN_PID_MASK;	if (usb_pipeout(urb->pipe) || !urb->transfer_buffer_length)		destination |= USB_PID_IN;	else		destination |= USB_PID_OUT;	destination |= TD_TOKEN_TOGGLE;		/* End in Data1 */	status &= ~TD_CTRL_SPD;	uhci_add_td_to_urbp(td, urbp);	uhci_fill_td(td, status | TD_CTRL_IOC,			destination | uhci_explen(0), 0);	plink = &td->link;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -