⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 uhci-hcd.c

📁 h内核
💻 C
📖 第 1 页 / 共 5 页
字号:
			data);		data += pktsze;		len -= maxsze;		usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe),			usb_pipeout(urb->pipe));	} while (len > 0);	/*	 * URB_ZERO_PACKET means adding a 0-length packet, if direction	 * is OUT and the transfer_length was an exact multiple of maxsze,	 * hence (len = transfer_length - N * maxsze) == 0	 * however, if transfer_length == 0, the zero packet was already	 * prepared above.	 */	if (usb_pipeout(urb->pipe) && (urb->transfer_flags & URB_ZERO_PACKET) &&	    !len && urb->transfer_buffer_length) {		td = uhci_alloc_td(uhci, urb->dev);		if (!td)			return -ENOMEM;		uhci_add_td_to_urb(urb, td);		uhci_fill_td(td, status, destination | uhci_explen(UHCI_NULL_DATA_SIZE) |			(usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),			 usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT),			data);		usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe),			usb_pipeout(urb->pipe));	}	/* Set the interrupt-on-completion flag on the last packet.	 * A more-or-less typical 4 KB URB (= size of one memory page)	 * will require about 3 ms to transfer; that's a little on the	 * fast side but not enough to justify delaying an interrupt	 * more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT	 * flag setting. */	td->status |= cpu_to_le32(TD_CTRL_IOC);	qh = uhci_alloc_qh(uhci, urb->dev);	if (!qh)		return -ENOMEM;	urbp->qh = qh;	qh->urbp = urbp;	/* Always breadth first */	uhci_insert_tds_in_qh(qh, urb, UHCI_PTR_BREADTH);	if (eurb)		uhci_append_queued_urb(uhci, eurb, urb);	else		uhci_insert_qh(uhci, skelqh, urb);	return -EINPROGRESS;}/* * Common result for bulk and interrupt */static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb){	struct urb_priv *urbp = urb->hcpriv;	struct uhci_td *td;	unsigned int status = 0;	int ret = 0;	urb->actual_length = 0;	list_for_each_entry(td, &urbp->td_list, list) {		unsigned int ctrlstat = td_status(td);		status = uhci_status_bits(ctrlstat);		if (status & TD_CTRL_ACTIVE)			return -EINPROGRESS;		urb->actual_length += uhci_actual_length(ctrlstat);		if (status)			goto td_error;		if (uhci_actual_length(ctrlstat) <				uhci_expected_length(td_token(td))) {			if (urb->transfer_flags & URB_SHORT_NOT_OK) {				ret = -EREMOTEIO;				goto err;			} else				return 0;		}	}	return 0;td_error:	ret = uhci_map_status(status, uhci_packetout(td_token(td)));err:	/* 	 * Enable this chunk of code if you want to see some more debugging.	 * But be careful, it has the tendancy to starve out khubd and prevent	 * disconnects from happening successfully if you have a slow debug	 * log interface (like a serial console.	 */#if 0	if ((debug == 1 && ret != -EPIPE) || debug > 1) {		/* Some debugging code */		dev_dbg(uhci_dev(uhci), "%s: failed with status %x\n",				__FUNCTION__, status);		if (errbuf) {			/* Print the chain for debugging purposes */			uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0);			lprintk(errbuf);		}	}#endif	return ret;}static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb){	int ret;	/* Can't have low-speed bulk transfers */	if (urb->dev->speed == USB_SPEED_LOW)		return -EINVAL;	ret = uhci_submit_common(uhci, urb, eurb, uhci->skel_bulk_qh);	if (ret == -EINPROGRESS)		uhci_inc_fsbr(uhci, urb);	return ret;}static inline int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb){	/* USB 1.1 interrupt transfers only involve one packet per interval;	 * that's the uhci_submit_common() "breadth first" policy.  Drivers	 * can submit urbs of any length, but longer ones might need many	 * intervals to complete.	 */	return uhci_submit_common(uhci, urb, eurb, uhci->skelqh[__interval_to_skel(urb->interval)]);}/* * Isochronous transfers */static int isochronous_find_limits(struct uhci_hcd *uhci, struct urb *urb, unsigned int *start, unsigned int *end){	struct urb *last_urb = NULL;	struct urb_priv *up;	int ret = 0;	list_for_each_entry(up, &uhci->urb_list, urb_list) {		struct urb *u = up->urb;		/* look for pending URB's with identical pipe handle */		if ((urb->pipe == u->pipe) && (urb->dev == u->dev) &&		    (u->status == -EINPROGRESS) && (u != urb)) {			if (!last_urb)				*start = u->start_frame;			last_urb = u;		}	}	if (last_urb) {		*end = (last_urb->start_frame + last_urb->number_of_packets *				last_urb->interval) & (UHCI_NUMFRAMES-1);		ret = 0;	} else		ret = -1;	/* no previous urb found */	return ret;}static int isochronous_find_start(struct uhci_hcd *uhci, struct urb *urb){	int limits;	unsigned int start = 0, end = 0;	if (urb->number_of_packets > 900)	/* 900? Why? */		return -EFBIG;	limits = isochronous_find_limits(uhci, urb, &start, &end);	if (urb->transfer_flags & URB_ISO_ASAP) {		if (limits)			urb->start_frame =					(uhci_get_current_frame_number(uhci) +						10) & (UHCI_NUMFRAMES - 1);		else			urb->start_frame = end;	} else {		urb->start_frame &= (UHCI_NUMFRAMES - 1);		/* FIXME: Sanity check */	}	return 0;}/* * Isochronous transfers */static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb){	struct uhci_td *td;	int i, ret, frame;	int status, destination;	status = TD_CTRL_ACTIVE | TD_CTRL_IOS;	destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);	ret = isochronous_find_start(uhci, urb);	if (ret)		return ret;	frame = urb->start_frame;	for (i = 0; i < urb->number_of_packets; i++, frame += urb->interval) {		if (!urb->iso_frame_desc[i].length)			continue;		td = uhci_alloc_td(uhci, urb->dev);		if (!td)			return -ENOMEM;		uhci_add_td_to_urb(urb, td);		uhci_fill_td(td, status, destination | uhci_explen(urb->iso_frame_desc[i].length - 1),			urb->transfer_dma + urb->iso_frame_desc[i].offset);		if (i + 1 >= urb->number_of_packets)			td->status |= cpu_to_le32(TD_CTRL_IOC);		uhci_insert_td_frame_list(uhci, td, frame);	}	return -EINPROGRESS;}static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb){	struct uhci_td *td;	struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;	int status;	int i, ret = 0;	urb->actual_length = 0;	i = 0;	list_for_each_entry(td, &urbp->td_list, list) {		int actlength;		unsigned int ctrlstat = td_status(td);		if (ctrlstat & TD_CTRL_ACTIVE)			return -EINPROGRESS;		actlength = uhci_actual_length(ctrlstat);		urb->iso_frame_desc[i].actual_length = actlength;		urb->actual_length += actlength;		status = uhci_map_status(uhci_status_bits(ctrlstat),				usb_pipeout(urb->pipe));		urb->iso_frame_desc[i].status = status;		if (status) {			urb->error_count++;			ret = status;		}		i++;	}	return ret;}static struct urb *uhci_find_urb_ep(struct uhci_hcd *uhci, struct urb *urb){	struct urb_priv *up;	/* We don't match Isoc transfers since they are special */	if (usb_pipeisoc(urb->pipe))		return NULL;	list_for_each_entry(up, &uhci->urb_list, urb_list) {		struct urb *u = up->urb;		if (u->dev == urb->dev && u->status == -EINPROGRESS) {			/* For control, ignore the direction */			if (usb_pipecontrol(urb->pipe) &&			    (u->pipe & ~USB_DIR_IN) == (urb->pipe & ~USB_DIR_IN))				return u;			else if (u->pipe == urb->pipe)				return u;		}	}	return NULL;}static int uhci_urb_enqueue(struct usb_hcd *hcd,		struct usb_host_endpoint *ep,		struct urb *urb, int mem_flags){	int ret;	struct uhci_hcd *uhci = hcd_to_uhci(hcd);	unsigned long flags;	struct urb *eurb;	int bustime;	spin_lock_irqsave(&uhci->schedule_lock, flags);	ret = urb->status;	if (ret != -EINPROGRESS)		/* URB already unlinked! */		goto out;	eurb = uhci_find_urb_ep(uhci, urb);	if (!uhci_alloc_urb_priv(uhci, urb)) {		ret = -ENOMEM;		goto out;	}	switch (usb_pipetype(urb->pipe)) {	case PIPE_CONTROL:		ret = uhci_submit_control(uhci, urb, eurb);		break;	case PIPE_INTERRUPT:		if (!eurb) {			bustime = usb_check_bandwidth(urb->dev, urb);			if (bustime < 0)				ret = bustime;			else {				ret = uhci_submit_interrupt(uhci, urb, eurb);				if (ret == -EINPROGRESS)					usb_claim_bandwidth(urb->dev, urb, bustime, 0);			}		} else {	/* inherit from parent */			urb->bandwidth = eurb->bandwidth;			ret = uhci_submit_interrupt(uhci, urb, eurb);		}		break;	case PIPE_BULK:		ret = uhci_submit_bulk(uhci, urb, eurb);		break;	case PIPE_ISOCHRONOUS:		bustime = usb_check_bandwidth(urb->dev, urb);		if (bustime < 0) {			ret = bustime;			break;		}		ret = uhci_submit_isochronous(uhci, urb);		if (ret == -EINPROGRESS)			usb_claim_bandwidth(urb->dev, urb, bustime, 1);		break;	}	if (ret != -EINPROGRESS) {		/* Submit failed, so delete it from the urb_list */		struct urb_priv *urbp = urb->hcpriv;		list_del_init(&urbp->urb_list);		uhci_destroy_urb_priv(uhci, urb);	} else		ret = 0;out:	spin_unlock_irqrestore(&uhci->schedule_lock, flags);	return ret;}/* * Return the result of a transfer */static void uhci_transfer_result(struct uhci_hcd *uhci, struct urb *urb){	int ret = -EINPROGRESS;	struct urb_priv *urbp;	spin_lock(&urb->lock);	urbp = (struct urb_priv *)urb->hcpriv;	if (urb->status != -EINPROGRESS)	/* URB already dequeued */		goto out;	switch (usb_pipetype(urb->pipe)) {	case PIPE_CONTROL:		ret = uhci_result_control(uhci, urb);		break;	case PIPE_BULK:	case PIPE_INTERRUPT:		ret = uhci_result_common(uhci, urb);		break;	case PIPE_ISOCHRONOUS:		ret = uhci_result_isochronous(uhci, urb);		break;	}	if (ret == -EINPROGRESS)		goto out;	urb->status = ret;	switch (usb_pipetype(urb->pipe)) {	case PIPE_CONTROL:	case PIPE_BULK:	case PIPE_ISOCHRONOUS:		/* Release bandwidth for Interrupt or Isoc. transfers */		if (urb->bandwidth)			usb_release_bandwidth(urb->dev, urb, 1);		uhci_unlink_generic(uhci, urb);		break;	case PIPE_INTERRUPT:		/* Release bandwidth for Interrupt or Isoc. transfers */		/* Make sure we don't release if we have a queued URB */		if (list_empty(&urbp->queue_list) && urb->bandwidth)			usb_release_bandwidth(urb->dev, urb, 0);		else			/* bandwidth was passed on to queued URB, */			/* so don't let usb_unlink_urb() release it */			urb->bandwidth = 0;		uhci_unlink_generic(uhci, urb);		break;	default:		dev_info(uhci_dev(uhci), "%s: unknown pipe type %d "				"for urb %p\n",				__FUNCTION__, usb_pipetype(urb->pipe), urb);	}	/* Move it from uhci->urb_list to uhci->complete_list */	uhci_moveto_complete(uhci, urbp);out:	spin_unlock(&urb->lock);}static void uhci_unlink_generic(struct uhci_hcd *uhci, struct urb *urb){	struct list_head *head;	struct uhci_td *td;	struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;	int prevactive = 0;	uhci_dec_fsbr(uhci, urb);	/* Safe since it checks */	/*	 * Now we need to find out what the last successful toggle was	 * so we can update the local data toggle for the next transfer	 *	 * There are 2 ways the last successful completed TD is found:	 *	 * 1) The TD is NOT active and the actual length < expected length	 * 2) The TD is NOT active and it's the last TD in the chain	 *	 * and a third way the first uncompleted TD is found:	 *	 * 3) The TD is active and the previous TD is NOT active	 *	 * Control and Isochronous ignore the toggle, so this is safe	 * for all types	 *	 * FIXME: The toggle fixups won't be 100% reliable until we	 * change over to using a single queue for each endpoint and	 * stop the queue before unlinking.	 */	head = &urbp->td_list;	list_for_each_entry(td, head, list) {		unsigned int ctrlstat = td_status(td);		if (!(ctrlstat & TD_CTRL_ACTIVE) &&				(uhci_actual_length(ctrlstat) <				 uhci_expected_length(td_token(td)) ||				td->list.next == head))			usb_settoggle(urb->dev, uhci_endpoint(td_token(td)),				uhci_packetout(td_token(td)),				uhci_toggle(td_token(td)) ^ 1);		else if ((ctrlstat & TD_CTRL_ACTIVE) && !prevactive)			usb_settoggle(urb->dev, uhci_endpoint(td_token(td)),				uhci_packetout(td_token(td)),				uhci_toggle(td_token(td)));		prevactive = ctrlstat & TD_CTRL_ACTIVE;	}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -