⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 usb-uhci.c

📁 linux客户机函数定义的实际例子
💻 C
📖 第 1 页 / 共 5 页
字号:
			set_td_ioc(td);	// last one generates INT

		insert_td (s, qh, td, UHCI_PTR_DEPTH * depth_first);
		if (!first_td)
			first_td=td;
		usb_dotoggle (urb->dev, usb_pipeendpoint (pipe), usb_pipeout (pipe));

	} while (!last);

	if (bulk_urb && bpriv)   // everything went OK, link with old bulk URB
		bpriv->next_queued_urb=urb;

	list_add (&qh->desc_list, &urb_priv->desc_list);

	if (urb->transfer_flags & USB_QUEUE_BULK)
		append_qh(s, td, bqh, UHCI_PTR_DEPTH * depth_first);

	queue_urb_unlocked (s, urb);
	
	if (urb->transfer_flags & USB_QUEUE_BULK)
		set_qh_element(qh, first_td->dma_addr);
	else
		qh->hw.qh.element &= cpu_to_le32(~UHCI_PTR_TERM);    // arm QH

	if (!bulk_urb) { 					// new bulk queue	
		if (urb->transfer_flags & USB_QUEUE_BULK) {
			spin_lock (&s->td_lock);		// both QHs in one go
			insert_qh (s, s->chain_end, qh, 0);	// Main QH
			insert_qh (s, s->chain_end, nqh, 0);	// Helper QH
			spin_unlock (&s->td_lock);
		}
		else
			insert_qh (s, s->chain_end, qh, 0);
	}
	
	//dbg("uhci_submit_bulk_urb: exit\n");
	return 0;
}
/*-------------------------------------------------------------------*/
_static void uhci_clean_iso_step1(uhci_t *s, urb_priv_t *urb_priv)
{
	struct list_head *p;
	uhci_desc_t *td;

	for (p = urb_priv->desc_list.next; p != &urb_priv->desc_list; p = p->next) {
				td = list_entry (p, uhci_desc_t, desc_list);
				unlink_td (s, td, 1);
	}
}
/*-------------------------------------------------------------------*/
_static void uhci_clean_iso_step2(uhci_t *s, urb_priv_t *urb_priv)
{
	struct list_head *p;
	uhci_desc_t *td;

	while ((p = urb_priv->desc_list.next) != &urb_priv->desc_list) {
				td = list_entry (p, uhci_desc_t, desc_list);
				list_del (p);
				delete_desc (s, td);
	}
}
/*-------------------------------------------------------------------*/
/* mode: CLEAN_TRANSFER_NO_DELETION: unlink but no deletion mark (step 1 of async_unlink)
         CLEAN_TRANSFER_REGULAR: regular (unlink/delete-mark)
         CLEAN_TRANSFER_DELETION_MARK: deletion mark for QH (step 2 of async_unlink)
 looks a bit complicated because of all the bulk queueing goodies
*/

_static void uhci_clean_transfer (uhci_t *s, struct urb *urb, uhci_desc_t *qh, int mode)
{
	uhci_desc_t *bqh, *nqh, *prevqh, *prevtd;
	int now;
	urb_priv_t *priv=(urb_priv_t*)urb->hcpriv;

	now=UHCI_GET_CURRENT_FRAME(s);

	bqh=priv->bottom_qh;	
	
	if (!priv->next_queued_urb)  { // no more appended bulk queues

		queue_dbg("uhci_clean_transfer: No more bulks for urb %p, qh %p, bqh %p, nqh %p", urb, qh, bqh, priv->next_qh);	
	
		if (priv->prev_queued_urb && mode != CLEAN_TRANSFER_DELETION_MARK) {  // qh not top of the queue
				unsigned long flags; 
				urb_priv_t* ppriv=(urb_priv_t*)priv->prev_queued_urb->hcpriv;

				spin_lock_irqsave (&s->qh_lock, flags);
				prevqh = list_entry (ppriv->desc_list.next, uhci_desc_t, desc_list);
				prevtd = list_entry (prevqh->vertical.prev, uhci_desc_t, vertical);
				set_td_link(prevtd, priv->bottom_qh->dma_addr | UHCI_PTR_QH); // skip current qh
				mb();
				queue_dbg("uhci_clean_transfer: relink pqh %p, ptd %p",prevqh, prevtd);
				spin_unlock_irqrestore (&s->qh_lock, flags);

				ppriv->bottom_qh = priv->bottom_qh;
				ppriv->next_queued_urb = NULL;
			}
		else {   // queue is dead, qh is top of the queue
			
			if (mode != CLEAN_TRANSFER_DELETION_MARK) 				
				unlink_qh(s, qh); // remove qh from horizontal chain

			if (bqh) {  // remove remainings of bulk queue
				nqh=priv->next_qh;

				if (mode != CLEAN_TRANSFER_DELETION_MARK) 
					unlink_qh(s, nqh);  // remove nqh from horizontal chain
				
				if (mode != CLEAN_TRANSFER_NO_DELETION) {  // add helper QHs to free desc list
					nqh->last_used = bqh->last_used = now;
					list_add_tail (&nqh->horizontal, &s->free_desc);
					list_add_tail (&bqh->horizontal, &s->free_desc);
				}			
			}
		}
	}
	else { // there are queued urbs following
	
	  queue_dbg("uhci_clean_transfer: urb %p, prevurb %p, nexturb %p, qh %p, bqh %p, nqh %p",
		       urb, priv->prev_queued_urb,  priv->next_queued_urb, qh, bqh, priv->next_qh);	
       	
		if (mode != CLEAN_TRANSFER_DELETION_MARK) {	// no work for cleanup at unlink-completion
			struct urb *nurb;
			unsigned long flags;

			nurb = priv->next_queued_urb;
			spin_lock_irqsave (&s->qh_lock, flags);		

			if (!priv->prev_queued_urb) { // top QH
				
				prevqh = list_entry (qh->horizontal.prev, uhci_desc_t, horizontal);
				set_qh_head(prevqh, bqh->dma_addr | UHCI_PTR_QH);
				list_del (&qh->horizontal);  // remove this qh form horizontal chain
				list_add (&bqh->horizontal, &prevqh->horizontal); // insert next bqh in horizontal chain
			}
			else {		// intermediate QH
				urb_priv_t* ppriv=(urb_priv_t*)priv->prev_queued_urb->hcpriv;
				urb_priv_t* npriv=(urb_priv_t*)nurb->hcpriv;
				uhci_desc_t * bnqh;
				
				bnqh = list_entry (npriv->desc_list.next, uhci_desc_t, desc_list);
				ppriv->bottom_qh = bnqh;
				ppriv->next_queued_urb = nurb;				
				prevqh = list_entry (ppriv->desc_list.next, uhci_desc_t, desc_list);
				set_qh_head(prevqh, bqh->dma_addr | UHCI_PTR_QH);
			}

			mb();
			((urb_priv_t*)nurb->hcpriv)->prev_queued_urb=priv->prev_queued_urb;
			spin_unlock_irqrestore (&s->qh_lock, flags);
		}		
	}

	if (mode != CLEAN_TRANSFER_NO_DELETION) {
		qh->last_used = now;	
		list_add_tail (&qh->horizontal, &s->free_desc); // mark qh for later deletion/kfree
	}
}
/*-------------------------------------------------------------------*/
// Release bandwidth for Interrupt or Isoc. transfers 
_static void uhci_release_bandwidth(struct urb *urb)
{       
	if (urb->bandwidth) {
		switch (usb_pipetype(urb->pipe)) {
		case PIPE_INTERRUPT:
			usb_release_bandwidth (urb->dev, urb, 0);
			break;
		case PIPE_ISOCHRONOUS:
			usb_release_bandwidth (urb->dev, urb, 1);
			break;
		default:
			break;
		}
	}	
}

_static void uhci_urb_dma_sync(uhci_t *s, struct urb *urb, urb_priv_t *urb_priv)
{
	if (urb_priv->setup_packet_dma)
		pci_dma_sync_single(s->uhci_pci, urb_priv->setup_packet_dma,
				    sizeof(struct usb_ctrlrequest), PCI_DMA_TODEVICE);

	if (urb_priv->transfer_buffer_dma)
		pci_dma_sync_single(s->uhci_pci, urb_priv->transfer_buffer_dma,
				    urb->transfer_buffer_length,
				    usb_pipein(urb->pipe) ?
				    PCI_DMA_FROMDEVICE :
				    PCI_DMA_TODEVICE);
}

_static void uhci_urb_dma_unmap(uhci_t *s, struct urb *urb, urb_priv_t *urb_priv)
{
	if (urb_priv->setup_packet_dma) {
		pci_unmap_single(s->uhci_pci, urb_priv->setup_packet_dma,
				 sizeof(struct usb_ctrlrequest), PCI_DMA_TODEVICE);
		urb_priv->setup_packet_dma = 0;
	}
	if (urb_priv->transfer_buffer_dma) {
		pci_unmap_single(s->uhci_pci, urb_priv->transfer_buffer_dma,
				 urb->transfer_buffer_length,
				 usb_pipein(urb->pipe) ?
				 PCI_DMA_FROMDEVICE :
				 PCI_DMA_TODEVICE);
		urb_priv->transfer_buffer_dma = 0;
	}
}
/*-------------------------------------------------------------------*/
/* needs urb_list_lock!
   mode: UNLINK_ASYNC_STORE_URB: unlink and move URB into unlinked list
         UNLINK_ASYNC_DONT_STORE: unlink, don't move URB into unlinked list
*/
_static int uhci_unlink_urb_async (uhci_t *s, struct urb *urb, int mode)
{
	uhci_desc_t *qh;
	urb_priv_t *urb_priv;
	
	async_dbg("unlink_urb_async called %p",urb);

	if ((urb->status == -EINPROGRESS) ||
	    ((usb_pipetype (urb->pipe) ==  PIPE_INTERRUPT) && ((urb_priv_t*)urb->hcpriv)->flags))
	{
		((urb_priv_t*)urb->hcpriv)->started = ~0;  // mark
		dequeue_urb (s, urb);

		if (mode==UNLINK_ASYNC_STORE_URB)
			list_add_tail (&urb->urb_list, &s->urb_unlinked); // store urb

		uhci_switch_timer_int(s);
       		s->unlink_urb_done = 1;
		uhci_release_bandwidth(urb);

		urb->status = -ECONNABORTED;	// mark urb as "waiting to be killed"	
		urb_priv = (urb_priv_t*)urb->hcpriv;

		switch (usb_pipetype (urb->pipe)) {
		case PIPE_INTERRUPT:
			usb_dotoggle (urb->dev, usb_pipeendpoint (urb->pipe), usb_pipeout (urb->pipe));

		case PIPE_ISOCHRONOUS:
			uhci_clean_iso_step1 (s, urb_priv);
			break;

		case PIPE_BULK:
		case PIPE_CONTROL:
			qh = list_entry (urb_priv->desc_list.next, uhci_desc_t, desc_list);
			uhci_clean_transfer (s, urb, qh, CLEAN_TRANSFER_NO_DELETION);
			break;
		}
		((urb_priv_t*)urb->hcpriv)->started = UHCI_GET_CURRENT_FRAME(s);
		return -EINPROGRESS;  // completion will follow
	}		

	return 0;    // URB already dead
}
/*-------------------------------------------------------------------*/
// kills an urb by unlinking descriptors and waiting for at least one frame
_static int uhci_unlink_urb_sync (uhci_t *s, struct urb *urb)
{
	uhci_desc_t *qh;
	urb_priv_t *urb_priv;
	unsigned long flags=0;
	struct usb_device *usb_dev;

	spin_lock_irqsave (&s->urb_list_lock, flags);

	if (urb->status == -EINPROGRESS) {

		// move descriptors out the the running chains, dequeue urb
		uhci_unlink_urb_async(s, urb, UNLINK_ASYNC_DONT_STORE);

		urb_priv = urb->hcpriv;
		urb->status = -ENOENT;	// prevent from double deletion after unlock		
		spin_unlock_irqrestore (&s->urb_list_lock, flags);
		
		// cleanup the rest
		switch (usb_pipetype (urb->pipe)) {

		case PIPE_INTERRUPT:
		case PIPE_ISOCHRONOUS:
			uhci_wait_ms(1);
			uhci_clean_iso_step2(s, urb_priv);
			break;

		case PIPE_BULK:
		case PIPE_CONTROL:
			qh = list_entry (urb_priv->desc_list.next, uhci_desc_t, desc_list);
			uhci_clean_transfer(s, urb, qh, CLEAN_TRANSFER_DELETION_MARK);
			uhci_wait_ms(1);
		}
		urb->status = -ENOENT;	// mark urb as killed		
					
		uhci_urb_dma_unmap(s, urb, urb->hcpriv);

#ifdef DEBUG_SLAB
		kmem_cache_free (urb_priv_kmem, urb->hcpriv);
#else
		kfree (urb->hcpriv);
#endif
		usb_dev = urb->dev;
		if (urb->complete) {
			dbg("unlink_urb: calling completion");
			urb->dev = NULL;
			urb->complete ((struct urb *) urb);
		}
		usb_put_dev (usb_dev);
		usb_put_urb (urb);
	}
	else
		spin_unlock_irqrestore (&s->urb_list_lock, flags);

	return 0;
}
/*-------------------------------------------------------------------*/
// async unlink_urb completion/cleanup work
// has to be protected by urb_list_lock!
// features: if set in transfer_flags, the resulting status of the killed
// transaction is not overwritten

_static void uhci_cleanup_unlink(uhci_t *s, int force)
{
	struct list_head *q;
	struct urb *urb;
	struct usb_device *dev;
	int now, type;
	urb_priv_t *urb_priv;

	q=s->urb_unlinked.next;
	now=UHCI_GET_CURRENT_FRAME(s);

	while (q != &s->urb_unlinked) {

		urb = list_entry (q, struct urb, urb_list);

		urb_priv = (urb_priv_t*)urb->hcpriv;
		q = urb->urb_list.next;
		
		if (!urb_priv) // avoid crash when URB is corrupted
			break;
			
		if (force || ((urb_priv->started != ~0) && (urb_priv->started != now))) {
			async_dbg("async cleanup %p",urb);
			type=usb_pipetype (urb->pipe);

			switch (type) { // process descriptors
			case PIPE_CONTROL:
				process_transfer (s, urb, CLEAN_TRANSFER_DELETION_MARK);  // don't unlink (already done)
				break;
			case PIPE_BULK:
				if (!s->avoid_bulk.counter)
					process_transfer (s, urb, CLEAN_TRANSFER_DELETION_MARK); // don't unlink (already done)
				else
					continue;
				break;
			case PIPE_ISOCHRONOUS:
				process_iso (s, urb, PROCESS_ISO_FORCE); // force, don't unlink
				break;
			case PIPE_INTERRUPT:
				process_interrupt (s, urb);
				break;
			}

			if (!(urb->transfer_flags & USB_TIMEOUT_KILLED))
		  		urb->status = -ECONNRESET; // mark as asynchronously killed

			dev = urb->dev;	// completion may destroy all...
			urb_priv = urb->hcpriv;
			list_del (&urb->urb_list);
			
			uhci_urb_dma_sync(s, urb, urb_priv);
			if (urb->complete) {
				spin_unlock(&s->urb_list_lock);
				urb->dev = NULL;
				urb->complete ((struct urb *) urb);
				spin_lock(&s->urb_list_lock);
			}

			if (!(urb->transfer_flags & USB_TIMEOUT_KILLED))
				urb->status = -ENOENT;  // now the urb is really dead

			switch (type) {
			case PIPE_ISOCHRONOUS:
			case PIPE_INTERRUPT:
				uhci_clean_iso_step2(s, urb_priv);
				break;
			}
	
			uhci_urb_dma_unmap(s, urb, urb_priv);

			usb_put_dev (dev);
#ifdef DEBUG_SLAB
			kmem_cache_free (urb_priv_kmem, urb_priv);
#else
			kfree (urb_priv);
#endif
			usb_put_urb (urb);
		}
	}
}
 
/*-------------------------------------------------------------------*/
_static int uhci_unlink_urb (struct urb *urb)
{
	uhci_t *s;
	unsigned long flags=0;
	dbg("uhci_unlink_urb called for %p",urb);
	if (!urb || !urb->dev)		// you never know...
		return -EINVAL;
	
	s = (uhci_t*) urb->dev->bus->hcpriv;

	if (usb_pipedevice (urb->pipe) == s->rh.devnum)
		return rh_unlink_urb (urb);

	if (!urb->hcpriv)
		return -EINVAL;

	if (urb->transfer_flags & USB_ASYNC_UNLINK) {
		int ret;
       		spin_lock_irqsave (&s->urb_list_lock, flags);
       		
		uhci_release_bandwidth(urb);
		ret = uhci_unlink_urb_async(s, urb, UNLINK_ASYNC_STORE_URB);

		spin_unlock_irqrestore (&s->urb_list_lock, flags);	
		return ret;
	}
	else
		return uhci_unlink_urb_sync(s, urb);
}
/*-------------------------------------------------------------------*/
// In case of ASAP iso transfer, search the URB-list for already queued URBs
// for this EP and calculate the earliest start frame for the new
// URB (easy seamless URB continuation!)
_static int find_iso_limits (struct urb *urb, unsigned int *start, unsigned int *end)
{
	struct urb *u, *last_urb = NULL;
	uhci_t *s = (uhci_t*) urb->dev->bus->hcpriv;
	struct list_head *p;
	int ret=-1;
	unsigned long flags;
	
	spin_lock_irqsave (&s->urb_list_lock, flags);
	p=s->urb_list.prev;

	for (; p != &s->urb_list; p = p->prev) {
		u = list_entry (p, struct urb, urb_list);
		// look for pending URBs with identical pipe handle
		// works only because iso doesn't toggle the data bit!
		if ((urb->pipe == u->pipe) && (urb->dev == u->dev) && (u->status == -EINPROGRESS)) {
			if (!last_urb)
				*start = u->start_frame;
			last_urb = u;
		}
	}
	
	if (last_urb) {

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -