⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 uhci-q.c

📁 usb driver for 2.6.17
💻 C
📖 第 1 页 / 共 3 页
字号:
			(ret == -EREMOTEIO);	return ret;}static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb,		struct uhci_qh *qh){	int ret;	/* Can't have low-speed bulk transfers */	if (urb->dev->speed == USB_SPEED_LOW)		return -EINVAL;	qh->skel = uhci->skel_bulk_qh;	ret = uhci_submit_common(uhci, urb, qh);	if (ret == 0)		uhci_inc_fsbr(uhci, urb);	return ret;}static inline int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb,		struct uhci_qh *qh){	/* USB 1.1 interrupt transfers only involve one packet per interval.	 * Drivers can submit URBs of any length, but longer ones will need	 * multiple intervals to complete.	 */	qh->skel = uhci->skelqh[__interval_to_skel(urb->interval)];	return uhci_submit_common(uhci, urb, qh);}/* * Isochronous transfers */static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,		struct uhci_qh *qh){	struct uhci_td *td = NULL;	/* Since urb->number_of_packets > 0 */	int i, frame;	unsigned long destination, status;	struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;	if (urb->number_of_packets > 900)	/* 900? Why? */		return -EFBIG;	status = TD_CTRL_ACTIVE | TD_CTRL_IOS;	destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);	/* Figure out the starting frame number */	if (urb->transfer_flags & URB_ISO_ASAP) {		if (list_empty(&qh->queue)) {			uhci_get_current_frame_number(uhci);			urb->start_frame = (uhci->frame_number + 10);		} else {		/* Go right after the last one */			struct urb *last_urb;			last_urb = list_entry(qh->queue.prev,					struct urb_priv, node)->urb;			urb->start_frame = (last_urb->start_frame +					last_urb->number_of_packets *					last_urb->interval);		}	} else {		/* FIXME: Sanity check */	}	urb->start_frame &= (UHCI_NUMFRAMES - 1);	for (i = 0; i < urb->number_of_packets; i++) {		td = uhci_alloc_td(uhci);		if (!td)			return -ENOMEM;		uhci_add_td_to_urb(urb, td);		uhci_fill_td(td, status, destination |				uhci_explen(urb->iso_frame_desc[i].length),				urb->transfer_dma +					urb->iso_frame_desc[i].offset);	}	/* Set the interrupt-on-completion flag on the last packet. */	td->status |= __constant_cpu_to_le32(TD_CTRL_IOC);	qh->skel = uhci->skel_iso_qh;	/* Add the TDs to the frame list */	frame = urb->start_frame;	list_for_each_entry(td, &urbp->td_list, list) {		uhci_insert_td_in_frame_list(uhci, td, frame);		frame += urb->interval;	}	return 0;}static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb){	struct uhci_td *td;	struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;	int status;	int i, ret = 0;	urb->actual_length = urb->error_count = 0;	i = 0;	list_for_each_entry(td, &urbp->td_list, list) {		int actlength;		unsigned int ctrlstat = td_status(td);		if (ctrlstat & TD_CTRL_ACTIVE)			return -EINPROGRESS;		actlength = uhci_actual_length(ctrlstat);		urb->iso_frame_desc[i].actual_length = actlength;		urb->actual_length += actlength;		status = uhci_map_status(uhci_status_bits(ctrlstat),				usb_pipeout(urb->pipe));		urb->iso_frame_desc[i].status = status;		if (status) {			urb->error_count++;			ret = status;		}		i++;	}	return ret;}static int uhci_urb_enqueue(struct usb_hcd *hcd,		struct usb_host_endpoint *hep,		struct urb *urb, gfp_t mem_flags){	int ret;	struct uhci_hcd *uhci = hcd_to_uhci(hcd);	unsigned long flags;	struct urb_priv *urbp;	struct uhci_qh *qh;	int bustime;	spin_lock_irqsave(&uhci->lock, flags);	ret = urb->status;	if (ret != -EINPROGRESS)		/* URB already unlinked! */		goto done;	ret = -ENOMEM;	urbp = uhci_alloc_urb_priv(uhci, urb);	if (!urbp)		goto done;	if (hep->hcpriv)		qh = (struct uhci_qh *) hep->hcpriv;	else {		qh = uhci_alloc_qh(uhci, urb->dev, hep);		if (!qh)			goto err_no_qh;	}	urbp->qh = qh;	switch (usb_pipetype(urb->pipe)) {	case PIPE_CONTROL:		ret = uhci_submit_control(uhci, urb, qh);		break;	case PIPE_BULK:		ret = uhci_submit_bulk(uhci, urb, qh);		break;	case PIPE_INTERRUPT:		if (list_empty(&qh->queue)) {			bustime = usb_check_bandwidth(urb->dev, urb);			if (bustime < 0)				ret = bustime;			else {				ret = uhci_submit_interrupt(uhci, urb, qh);				if (ret == 0)					usb_claim_bandwidth(urb->dev, urb, bustime, 0);			}		} else {	/* inherit from parent */			struct urb_priv *eurbp;			eurbp = list_entry(qh->queue.prev, struct urb_priv,					node);			urb->bandwidth = eurbp->urb->bandwidth;			ret = uhci_submit_interrupt(uhci, urb, qh);		}		break;	case PIPE_ISOCHRONOUS:		bustime = usb_check_bandwidth(urb->dev, urb);		if (bustime < 0) {			ret = bustime;			break;		}		ret = uhci_submit_isochronous(uhci, urb, qh);		if (ret == 0)			usb_claim_bandwidth(urb->dev, urb, bustime, 1);		break;	}	if (ret != 0)		goto err_submit_failed;	/* Add this URB to the QH */	urbp->qh = qh;	list_add_tail(&urbp->node, &qh->queue);	/* If the new URB is the first and only one on this QH then either	 * the QH is new and idle or else it's unlinked and waiting to	 * become idle, so we can activate it right away. */	if (qh->queue.next == &urbp->node)		uhci_activate_qh(uhci, qh);	goto done;err_submit_failed:	if (qh->state == QH_STATE_IDLE)		uhci_make_qh_idle(uhci, qh);	/* Reclaim unused QH */err_no_qh:	uhci_free_urb_priv(uhci, urbp);done:	spin_unlock_irqrestore(&uhci->lock, flags);	return ret;}static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb){	struct uhci_hcd *uhci = hcd_to_uhci(hcd);	unsigned long flags;	struct urb_priv *urbp;	spin_lock_irqsave(&uhci->lock, flags);	urbp = urb->hcpriv;	if (!urbp)			/* URB was never linked! */		goto done;	/* Remove Isochronous TDs from the frame list ASAP */	if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)		uhci_unlink_isochronous_tds(uhci, urb);	uhci_unlink_qh(uhci, urbp->qh);done:	spin_unlock_irqrestore(&uhci->lock, flags);	return 0;}/* * Finish unlinking an URB and give it back */static void uhci_giveback_urb(struct uhci_hcd *uhci, struct uhci_qh *qh,		struct urb *urb, struct pt_regs *regs)__releases(uhci->lock)__acquires(uhci->lock){	struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;	/* Isochronous TDs get unlinked directly from the frame list */	if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)		uhci_unlink_isochronous_tds(uhci, urb);	/* If the URB isn't first on its queue, adjust the link pointer	 * of the last TD in the previous URB. */	else if (qh->queue.next != &urbp->node) {		struct urb_priv *purbp;		struct uhci_td *ptd, *ltd;		purbp = list_entry(urbp->node.prev, struct urb_priv, node);		ptd = list_entry(purbp->td_list.prev, struct uhci_td,				list);		ltd = list_entry(urbp->td_list.prev, struct uhci_td,				list);		ptd->link = ltd->link;	}	/* Take the URB off the QH's queue.  If the queue is now empty,	 * this is a perfect time for a toggle fixup. */	list_del_init(&urbp->node);	if (list_empty(&qh->queue) && qh->needs_fixup) {		usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),				usb_pipeout(urb->pipe), qh->initial_toggle);		qh->needs_fixup = 0;	}	uhci_dec_fsbr(uhci, urb);	/* Safe since it checks */	uhci_free_urb_priv(uhci, urbp);	switch (usb_pipetype(urb->pipe)) {	case PIPE_ISOCHRONOUS:		/* Release bandwidth for Interrupt or Isoc. transfers */		if (urb->bandwidth)			usb_release_bandwidth(urb->dev, urb, 1);		break;	case PIPE_INTERRUPT:		/* Release bandwidth for Interrupt or Isoc. transfers */		/* Make sure we don't release if we have a queued URB */		if (list_empty(&qh->queue) && urb->bandwidth)			usb_release_bandwidth(urb->dev, urb, 0);		else			/* bandwidth was passed on to queued URB, */			/* so don't let usb_unlink_urb() release it */			urb->bandwidth = 0;		break;	}	spin_unlock(&uhci->lock);	usb_hcd_giveback_urb(uhci_to_hcd(uhci), urb, regs);	spin_lock(&uhci->lock);	/* If the queue is now empty, we can unlink the QH and give up its	 * reserved bandwidth. */	if (list_empty(&qh->queue)) {		uhci_unlink_qh(uhci, qh);		/* Bandwidth stuff not yet implemented */	}}/* * Scan the URBs in a QH's queue */#define QH_FINISHED_UNLINKING(qh)			\		(qh->state == QH_STATE_UNLINKING &&	\		uhci->frame_number + uhci->is_stopped != qh->unlink_frame)static void uhci_scan_qh(struct uhci_hcd *uhci, struct uhci_qh *qh,		struct pt_regs *regs){	struct urb_priv *urbp;	struct urb *urb;	int status;	while (!list_empty(&qh->queue)) {		urbp = list_entry(qh->queue.next, struct urb_priv, node);		urb = urbp->urb;		switch (usb_pipetype(urb->pipe)) {		case PIPE_CONTROL:			status = uhci_result_control(uhci, urb);			break;		case PIPE_ISOCHRONOUS:			status = uhci_result_isochronous(uhci, urb);			break;		default:	/* PIPE_BULK or PIPE_INTERRUPT */			status = uhci_result_common(uhci, urb);			break;		}		if (status == -EINPROGRESS)			break;		spin_lock(&urb->lock);		if (urb->status == -EINPROGRESS)	/* Not dequeued */			urb->status = status;		else			status = -ECONNRESET;		spin_unlock(&urb->lock);		/* Dequeued but completed URBs can't be given back unless		 * the QH is stopped or has finished unlinking. */		if (status == -ECONNRESET &&				!(qh->is_stopped || QH_FINISHED_UNLINKING(qh)))			return;		uhci_giveback_urb(uhci, qh, urb, regs);		if (qh->is_stopped)			break;	}	/* If the QH is neither stopped nor finished unlinking (normal case),	 * our work here is done. */ restart:	if (!(qh->is_stopped || QH_FINISHED_UNLINKING(qh)))		return;	/* Otherwise give back each of the dequeued URBs */	list_for_each_entry(urbp, &qh->queue, node) {		urb = urbp->urb;		if (urb->status != -EINPROGRESS) {			uhci_save_toggle(qh, urb);			uhci_giveback_urb(uhci, qh, urb, regs);			goto restart;		}	}	qh->is_stopped = 0;	/* There are no more dequeued URBs.  If there are still URBs on the	 * queue, the QH can now be re-activated. */	if (!list_empty(&qh->queue)) {		if (qh->needs_fixup)			uhci_fixup_toggles(qh, 0);		uhci_activate_qh(uhci, qh);	}	/* The queue is empty.  The QH can become idle if it is fully	 * unlinked. */	else if (QH_FINISHED_UNLINKING(qh))		uhci_make_qh_idle(uhci, qh);}static void uhci_free_pending_tds(struct uhci_hcd *uhci){	struct uhci_td *td, *tmp;	list_for_each_entry_safe(td, tmp, &uhci->td_remove_list, remove_list) {		list_del_init(&td->remove_list);		uhci_free_td(uhci, td);	}}/* * Process events in the schedule, but only in one thread at a time */static void uhci_scan_schedule(struct uhci_hcd *uhci, struct pt_regs *regs){	int i;	struct uhci_qh *qh;	/* Don't allow re-entrant calls */	if (uhci->scan_in_progress) {		uhci->need_rescan = 1;		return;	}	uhci->scan_in_progress = 1; rescan:	uhci->need_rescan = 0;	uhci_clear_next_interrupt(uhci);	uhci_get_current_frame_number(uhci);	if (uhci->frame_number + uhci->is_stopped != uhci->td_remove_age)		uhci_free_pending_tds(uhci);	/* Go through all the QH queues and process the URBs in each one */	for (i = 0; i < UHCI_NUM_SKELQH - 1; ++i) {		uhci->next_qh = list_entry(uhci->skelqh[i]->node.next,				struct uhci_qh, node);		while ((qh = uhci->next_qh) != uhci->skelqh[i]) {			uhci->next_qh = list_entry(qh->node.next,					struct uhci_qh, node);			uhci_scan_qh(uhci, qh, regs);		}	}	if (uhci->need_rescan)		goto rescan;	uhci->scan_in_progress = 0;	/* If the controller is stopped, we can finish these off right now */	if (uhci->is_stopped)		uhci_free_pending_tds(uhci);	if (list_empty(&uhci->td_remove_list) &&			list_empty(&uhci->skel_unlink_qh->node))		uhci_clear_next_interrupt(uhci);	else		uhci_set_next_interrupt(uhci);}static void check_fsbr(struct uhci_hcd *uhci){	/* For now, don't scan URBs for FSBR timeouts.	 * Add it back in later... */	/* Really disable FSBR */	if (!uhci->fsbr && uhci->fsbrtimeout && time_after_eq(jiffies, uhci->fsbrtimeout)) {		uhci->fsbrtimeout = 0;		uhci->skel_term_qh->link = UHCI_PTR_TERM;	}}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -