⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 usb-uhci-q.c

📁 linux客户机函数定义的实际例子
💻 C
📖 第 1 页 / 共 3 页
字号:
			dbg("iso_find_start: now between start_frame and end");
			return -EAGAIN;
		}
	}

	/* check if either start_frame or start_frame+number_of_packets-1 lies between start_limit and stop_limit */
	if (limits)
		return 0;

	if (((urb->start_frame - start_limit) & 1023) < queued_size ||
	    ((urb->start_frame + number_of_frames - 1 - start_limit) & 1023) < queued_size) {
		dbg("iso_find_start: start_frame %u number_of_packets %u start_limit %u stop_limit %u",
			urb->start_frame, urb->number_of_packets, start_limit, stop_limit);
		return -EAGAIN;
	}

	return 0;
}
/*-------------------------------------------------------------------*/
static int uhci_submit_iso_urb (struct uhci_hcd *uhci, struct urb *urb, int mem_flags)
{
	urb_priv_t *urb_priv = urb->hcpriv;
	int n=0, i, ret, last=0;
	uhci_desc_t *td, **tdm;
	int status, destination;
	unsigned long flags;

	tdm = (uhci_desc_t **) kmalloc (urb->number_of_packets * sizeof (uhci_desc_t*), mem_flags);

	if (!tdm) 
		return -ENOMEM;

	memset(tdm, 0, urb->number_of_packets * sizeof (uhci_desc_t*));

	// First try to get all TDs. Cause: Removing already inserted TDs can only be done 
	// racefree in three steps: unlink TDs, wait one frame, delete TDs. 
	// So, this solutions seems simpler...

	for (n = 0; n < urb->number_of_packets; n++) {
		dbg("n:%d urb->iso_frame_desc[n].length:%d", n, urb->iso_frame_desc[n].length);
		if (!urb->iso_frame_desc[n].length)
			continue;  // allows ISO striping by setting length to zero in iso_descriptor
		
		if (alloc_td (uhci, &td, UHCI_PTR_DEPTH)) {
			ret = -ENOMEM;
			goto fail_unmap_tds;
		}
		
		last=n;
		tdm[n] = td;
	}

	__save_flags(flags);
	__cli();		      // Disable IRQs to schedule all ISO-TDs in time
	ret = iso_find_start (uhci, urb);	// adjusts urb->start_frame for later use
	
	if (ret) {
		__restore_flags(flags);
		n = urb->number_of_packets;
		goto fail_unmap_tds;
	}

	status = TD_CTRL_ACTIVE | TD_CTRL_IOS;

	destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid (urb->pipe);

	// Queue all allocated TDs
	for (n = 0; n < urb->number_of_packets; n++) {
		td = tdm[n];
		if (!td)
			continue;
			
		if (n  == last) {
			status |= TD_CTRL_IOC;
			queue_urb (uhci, urb);
		}

		fill_td (td, status, destination | (((urb->iso_frame_desc[n].length - 1) & 0x7ff) << 21),
			 urb_priv->transfer_buffer_dma + urb->iso_frame_desc[n].offset);
		list_add_tail (&td->desc_list, &urb_priv->desc_list);
	
		insert_td_horizontal (uhci, uhci->iso_td[(urb->start_frame + n*urb->interval) & 1023], td);	// store in iso-tds
	}

	kfree (tdm);
	dbg("ISO-INT# %i, start %i, now %i", urb->number_of_packets, urb->start_frame, UHCI_GET_CURRENT_FRAME (uhci) & 1023);
	ret = 0;

	__restore_flags(flags);
	return ret;	

	// Cleanup allocated TDs
fail_unmap_tds:
	dbg("ISO failed, free %i, ret %i",n,ret);
	for (i = 0; i < n; i++)
		if (tdm[i])
			delete_desc(uhci, tdm[i]);
	kfree (tdm);
	return ret;
}
/*###########################################################################*/
//                        URB UNLINK PROCESSING
/*###########################################################################*/

static void uhci_clean_iso_step1(struct uhci_hcd *uhci, urb_priv_t *urb_priv)
{
	struct list_head *p;
	uhci_desc_t *td;
	dbg("uhci_clean_iso_step1");
	for (p = urb_priv->desc_list.next; p != &urb_priv->desc_list; p = p->next) {
				td = list_entry (p, uhci_desc_t, desc_list);
				unlink_td (uhci, td, 1);
	}
}
/*-------------------------------------------------------------------*/
static void uhci_clean_iso_step2(struct uhci_hcd *uhci, urb_priv_t *urb_priv)
{
	struct list_head *p;
	uhci_desc_t *td;
	int now=UHCI_GET_CURRENT_FRAME(uhci);

	dbg("uhci_clean_iso_step2");
	while ((p = urb_priv->desc_list.next) != &urb_priv->desc_list) {
				td = list_entry (p, uhci_desc_t, desc_list);
				list_del (p);
				INIT_LIST_HEAD(&td->horizontal);
				list_add_tail (&td->horizontal, &uhci->free_desc_td);
				td->last_used=now;
	}
}
/*-------------------------------------------------------------------*/
/* mode: CLEAN_TRANSFER_NO_DELETION: unlink but no deletion mark (step 1 of async_unlink)
         CLEAN_TRANSFER_REGULAR: regular (unlink/delete-mark)
         CLEAN_TRANSFER_DELETION_MARK: deletion mark for QH (step 2 of async_unlink)
 looks a bit complicated because of all the bulk queueing goodies
*/

static void uhci_clean_transfer (struct uhci_hcd *uhci, struct urb *urb, uhci_desc_t *qh, int mode)
{
	uhci_desc_t *bqh, *nqh, *prevqh, *prevtd;
	urb_priv_t *priv=(urb_priv_t*)urb->hcpriv;
	int now=UHCI_GET_CURRENT_FRAME(uhci);

	bqh=priv->bottom_qh;	
	
	if (!priv->next_queued_urb)  { // no more appended bulk queues

		queue_dbg("uhci_clean_transfer: No more bulks for urb %p, qh %p, bqh %p, nqh %p", 
			  urb, qh, bqh, priv->next_qh);	
	
		if (priv->prev_queued_urb && mode != CLEAN_TRANSFER_DELETION_MARK) {  // qh not top of the queue
				unsigned long flags; 
				urb_priv_t* ppriv=(urb_priv_t*)priv->prev_queued_urb->hcpriv;

				spin_lock_irqsave (&uhci->qh_lock, flags);
				prevqh = list_entry (ppriv->desc_list.next, uhci_desc_t, desc_list);
				prevtd = list_entry (prevqh->vertical.prev, uhci_desc_t, vertical);
				set_td_link(prevtd, priv->bottom_qh->dma_addr | UHCI_PTR_QH); // skip current qh
				mb();
				queue_dbg("uhci_clean_transfer: relink pqh %p, ptd %p",prevqh, prevtd);
				spin_unlock_irqrestore (&uhci->qh_lock, flags);

				ppriv->bottom_qh = priv->bottom_qh;
				ppriv->next_queued_urb = NULL;
			}
		else {   // queue is dead, qh is top of the queue
			
			if (mode != CLEAN_TRANSFER_DELETION_MARK) 				
				unlink_qh(uhci, qh); // remove qh from horizontal chain

			if (bqh) {  // remove remainings of bulk queue
				nqh=priv->next_qh;

				if (mode != CLEAN_TRANSFER_DELETION_MARK) 
					unlink_qh(uhci, nqh);  // remove nqh from horizontal chain
				
				if (mode != CLEAN_TRANSFER_NO_DELETION) {  // add helper QHs to free desc list
					nqh->last_used = bqh->last_used = now;
					list_add_tail (&nqh->horizontal, &uhci->free_desc_qh);
					list_add_tail (&bqh->horizontal, &uhci->free_desc_qh);
				}			
			}
		}
	}
	else { // there are queued urbs following
	
	  queue_dbg("uhci_clean_transfer: urb %p, prevurb %p, nexturb %p, qh %p, bqh %p, nqh %p",
		       urb, priv->prev_queued_urb,  priv->next_queued_urb, qh, bqh, priv->next_qh);	
       	
		if (mode != CLEAN_TRANSFER_DELETION_MARK) {	// no work for cleanup at unlink-completion
			struct urb *nurb;
			unsigned long flags;

			nurb = priv->next_queued_urb;
			spin_lock_irqsave (&uhci->qh_lock, flags);		

			if (!priv->prev_queued_urb) { // top QH
				
				prevqh = list_entry (qh->horizontal.prev, uhci_desc_t, horizontal);
				set_qh_head(prevqh, bqh->dma_addr | UHCI_PTR_QH);
				list_del (&qh->horizontal);  // remove this qh from horizontal chain
				list_add (&bqh->horizontal, &prevqh->horizontal); // insert next bqh in horizontal chain
			}
			else {		// intermediate QH
				urb_priv_t* ppriv=(urb_priv_t*)priv->prev_queued_urb->hcpriv;
				urb_priv_t* npriv=(urb_priv_t*)nurb->hcpriv;
				uhci_desc_t * bnqh;
				
				bnqh = list_entry (npriv->desc_list.next, uhci_desc_t, desc_list);
				ppriv->bottom_qh = bnqh;
				ppriv->next_queued_urb = nurb;				
				prevqh = list_entry (ppriv->desc_list.next, uhci_desc_t, desc_list);
				set_qh_head(prevqh, bqh->dma_addr | UHCI_PTR_QH);
			}

			mb();
			((urb_priv_t*)nurb->hcpriv)->prev_queued_urb=priv->prev_queued_urb;
			spin_unlock_irqrestore (&uhci->qh_lock, flags);
		}		
	}

	if (mode != CLEAN_TRANSFER_NO_DELETION) {
		qh->last_used = now;	
		list_add_tail (&qh->horizontal, &uhci->free_desc_qh); // mark qh for later deletion/kfree
	}
}

/*-------------------------------------------------------------------*/
// async unlink_urb completion/cleanup work
// has to be protected by urb_list_lock!
// features: if set in transfer_flags, the resulting status of the killed
// transaction is not overwritten

static void uhci_cleanup_unlink(struct uhci_hcd *uhci, int force)
{
	struct list_head *q;
	struct urb *urb;
	urb_priv_t *urb_priv;
	int type, now = UHCI_GET_CURRENT_FRAME(uhci);

	q = uhci->urb_unlinked.next;

	while (q != &uhci->urb_unlinked) {
		urb_priv = list_entry (q, urb_priv_t, urb_list);
		urb = urb_priv->urb;

		q = urb_priv->urb_list.next;
					
		if (force || ((urb_priv->started != ~0) && (urb_priv->started != now))) {
			async_dbg("async cleanup %p",urb);
			type=usb_pipetype (urb->pipe);

			switch (type) { // process descriptors
			case PIPE_CONTROL:
//				usb_show_device(urb->dev);
				process_transfer (uhci, urb, CLEAN_TRANSFER_DELETION_MARK);  // don't unlink (already done)
//				usb_show_device(urb->dev);
				break;
			case PIPE_BULK:
				if (!uhci->avoid_bulk.counter)
					process_transfer (uhci, urb, CLEAN_TRANSFER_DELETION_MARK); // don't unlink (already done)
				else
					continue;
				break;
			case PIPE_ISOCHRONOUS:
				process_iso (uhci, urb, PROCESS_ISO_FORCE); // force, don't unlink
				break;
			case PIPE_INTERRUPT:
				process_interrupt (uhci, urb, PROCESS_INT_REMOVE);
				break;
			}
			
			list_del (&urb_priv->urb_list);			
			uhci_urb_dma_sync(uhci, urb, urb_priv);
			// clean up descriptors for INT/ISO
//			if (type==PIPE_ISOCHRONOUS || type==PIPE_INTERRUPT) 
//				uhci_clean_iso_step2(uhci, urb_priv);
	
			uhci_free_priv(uhci, urb, urb_priv);		

			if (!(urb->transfer_flags & USB_TIMEOUT_KILLED))
				urb->status = -ENOENT;  // now the urb is really dead

			spin_unlock(&uhci->urb_list_lock);
			usb_hcd_giveback_urb(&uhci->hcd, urb);
			spin_lock(&uhci->urb_list_lock);
		}
	}
}
/*-------------------------------------------------------------------*/
/* needs urb_list_lock!
   mode: UNLINK_ASYNC_STORE_URB: unlink and move URB into unlinked list
         UNLINK_ASYNC_DONT_STORE: unlink, don't move URB into unlinked list
*/
static int uhci_unlink_urb_async (struct uhci_hcd *uhci, struct urb *urb, int mode)
{
	uhci_desc_t *qh;
	urb_priv_t *urb_priv;
	
	async_dbg("unlink_urb_async called %p",urb);
	urb_priv = (urb_priv_t*)urb->hcpriv;
	if (urb_priv==0) {
		err("hc_priv for URB %p is zero!",urb);
		return -EINVAL;
	}
	urb_priv->started = ~0;  // mark
	dequeue_urb (uhci, urb);

	if (mode==UNLINK_ASYNC_STORE_URB)
		list_add_tail (&urb_priv->urb_list, &uhci->urb_unlinked); // store urb

	uhci_switch_timer_int(uhci);
	uhci->unlink_urb_done = 1;

	switch (usb_pipetype (urb->pipe)) {
	case PIPE_INTERRUPT:
		urb_priv->flags = 0; // mark as deleted (if called from completion)
		uhci_do_toggle (urb);

	case PIPE_ISOCHRONOUS:
		uhci_clean_iso_step1 (uhci, urb_priv);
		break;

	case PIPE_BULK:
	case PIPE_CONTROL:
		qh = list_entry (urb_priv->desc_list.next, uhci_desc_t, desc_list);
		uhci_clean_transfer (uhci, urb, qh, CLEAN_TRANSFER_NO_DELETION);
		break;
	}
	urb_priv->started = UHCI_GET_CURRENT_FRAME(uhci);
	return 0;  // completion will follow
}
/*-------------------------------------------------------------------*/
// kills an urb by unlinking descriptors and waiting for at least one frame
static int uhci_unlink_urb_sync (struct uhci_hcd *uhci, struct urb *urb)
{
	uhci_desc_t *qh;
	urb_priv_t *urb_priv;
	unsigned long flags=0;

	spin_lock_irqsave (&uhci->urb_list_lock, flags);
//	err("uhci_unlink_urb_sync %p, %i",urb,urb->status);

	// move descriptors out the the running chains, dequeue urb
	uhci_unlink_urb_async(uhci, urb, UNLINK_ASYNC_DONT_STORE);

	urb_priv = urb->hcpriv;

	spin_unlock_irqrestore (&uhci->urb_list_lock, flags);
		
	// cleanup the rest
	switch (usb_pipetype (urb->pipe)) {
	case PIPE_INTERRUPT:
	case PIPE_ISOCHRONOUS:
		uhci_wait_ms(1);
		uhci_clean_iso_step2(uhci, urb_priv);
		break;

	case PIPE_BULK:
	case PIPE_CONTROL:
		qh = list_entry (urb_priv->desc_list.next, uhci_desc_t, desc_list);
		uhci_clean_transfer(uhci, urb, qh, CLEAN_TRANSFER_DELETION_MARK);
		uhci_wait_ms(1);
	}
	urb->status = -ENOENT;	// mark urb as killed		
		
	finish_urb(uhci,urb);
	
	return 0;
}
/*-------------------------------------------------------------------*/
// unlink urbs for specific device or all devices
static void uhci_unlink_urbs(struct uhci_hcd *uhci, struct usb_device *usb_dev, int remove_all)
{
	struct list_head *p;
	struct list_head *p2;
	struct urb *urb;
	urb_priv_t *priv;
	unsigned long flags;

	spin_lock_irqsave (&uhci->urb_list_lock, flags);
	p = uhci->urb_list.prev;	
	while (p != &uhci->urb_list) {
		p2 = p;
		p = p->prev;
		priv = list_entry (p2, urb_priv_t, urb_list);
		urb = priv->urb;

//		err("unlink urb: %p, dev %p, ud %p", urb, usb_dev,urb->dev);
		
		//urb->transfer_flags |=USB_ASYNC_UNLINK;
			
		if (remove_all || (usb_dev == urb->dev)) {
			spin_unlock_irqrestore (&uhci->urb_list_lock, flags);
			err("forced removing of queued URB %p due to disconnect",urb);
			uhci_urb_dequeue(&uhci->hcd, urb);
			urb->dev = NULL; // avoid further processing of this URB
			spin_lock_irqsave (&uhci->urb_list_lock, flags);
			p = uhci->urb_list.prev;	
		}
	}
	spin_unlock_irqrestore (&uhci->urb_list_lock, flags);
} 
/*-------------------------------------------------------------------*/

// Checks for URB timeout and removes bandwidth reclamation if URB idles too long
static void uhci_check_timeouts(struct uhci_hcd *uhci)
{
	struct list_head *p,*p2;
	struct urb *urb;
	int type;	
	
	p = uhci->urb_list.prev;	

	while (p != &uhci->urb_list) {
		urb_priv_t *hcpriv;

		p2 = p;
		p = p->prev;
		hcpriv = list_entry (p2,  urb_priv_t, urb_list);
		urb = hcpriv->urb;
		type = usb_pipetype (urb->pipe);

		if ( urb->timeout && time_after(jiffies, hcpriv->started + urb->timeout)) {
			urb->transfer_flags |= USB_TIMEOUT_KILLED | USB_ASYNC_UNLINK;
			async_dbg("uhci_check_timeout: timeout for %p",urb);
			uhci_unlink_urb_async(uhci, urb, UNLINK_ASYNC_STORE_URB);
		}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -