⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ohci-q.c

📁 linux客户机函数定义的实际例子
💻 C
📖 第 1 页 / 共 2 页
字号:
			ohci->hc_control &= ~OHCI_CTRL_CLE;
			writel (ohci->hc_control,
				&ohci->regs->control); 
			break;
		case PIPE_BULK: /* stop bulk list */
			ohci->hc_control &= ~OHCI_CTRL_BLE;
			writel (ohci->hc_control,
				&ohci->regs->control); 
			break;
	}

	frame = le16_to_cpu (ohci->hcca->frame_no) & 0x1;
	ed->ed_rm_list = ohci->ed_rm_list [frame];
	ohci->ed_rm_list [frame] = ed;

	/* enable SOF interrupt */
	if (!ohci->sleeping) {
		writel (OHCI_INTR_SF, &ohci->regs->intrstatus);
		writel (OHCI_INTR_SF, &ohci->regs->intrenable);
	}
}

/*-------------------------------------------------------------------------*
 * TD handling functions
 *-------------------------------------------------------------------------*/

/* enqueue next TD for this URB (OHCI spec 5.2.8.2) */

static void
td_fill (struct ohci_hcd *ohci, unsigned int info,
	dma_addr_t data, int len,
	struct urb *urb, int index)
{
	volatile struct td	*td, *td_pt;
	urb_priv_t		*urb_priv = urb->hcpriv;

	if (index >= urb_priv->length) {
		err ("internal OHCI error: TD index > length");
		return;
	}

#if 0
	/* no interrupt needed except for URB's last TD */
	if (index != (urb_priv->length - 1))
		info |= TD_DI;
#endif

	/* use this td as the next dummy */
	td_pt = urb_priv->td [index];
	td_pt->hwNextTD = 0;

	/* fill the old dummy TD */
	td = urb_priv->td [index] = dma_to_td (ohci,
			le32_to_cpup (&urb_priv->ed->hwTailP));

	td->ed = urb_priv->ed;
	td->next_dl_td = NULL;
	td->index = index;
	td->urb = urb; 
	td->data_dma = data;
	if (!len)
		data = 0;

	td->hwINFO = cpu_to_le32 (info);
	if ((td->ed->type) == PIPE_ISOCHRONOUS) {
		td->hwCBP = cpu_to_le32 (data & 0xFFFFF000);
		td->ed->last_iso = info & 0xffff;
	} else {
		td->hwCBP = cpu_to_le32 (data); 
	}			
	if (data)
		td->hwBE = cpu_to_le32 (data + len - 1);
	else
		td->hwBE = 0;
	td->hwNextTD = cpu_to_le32 (td_pt->td_dma);
	td->hwPSW [0] = cpu_to_le16 ((data & 0x0FFF) | 0xE000);

	/* HC might read the TD right after we link it ... */
	wmb ();

	/* append to queue */
	td->ed->hwTailP = td->hwNextTD;
}

/*-------------------------------------------------------------------------*/

/* prepare all TDs of a transfer */

static void td_submit_urb (struct urb *urb)
{ 
	urb_priv_t	*urb_priv = urb->hcpriv;
	struct ohci_hcd *ohci = hcd_to_ohci (urb->dev->bus->hcpriv);
	dma_addr_t	data;
	int		data_len = urb->transfer_buffer_length;
	int		cnt = 0; 
	__u32		info = 0;
  	unsigned int	toggle = 0;

	/* OHCI handles the DATA-toggles itself, we just use the
	 * USB-toggle bits for resetting
	 */
  	if (usb_gettoggle (urb->dev, usb_pipeendpoint (urb->pipe),
			usb_pipeout (urb->pipe))) {
  		toggle = TD_T_TOGGLE;
	} else {
  		toggle = TD_T_DATA0;
		usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe),
			usb_pipeout (urb->pipe), 1);
	}

	urb_priv->td_cnt = 0;

	if (data_len) {
#ifdef CONFIG_PCI
		data = pci_map_single (ohci->hcd.pdev,
			urb->transfer_buffer, data_len,
			usb_pipeout (urb->pipe)
				? PCI_DMA_TODEVICE
				: PCI_DMA_FROMDEVICE
			);
#else
#	error "what dma addr to use"
#endif
	} else
		data = 0;

	/* NOTE:  TD_CC is set so we can tell which TDs the HC processed by
	 * using TD_CC_GET, as well as by seeing them on the done list.
	 */
	switch (usb_pipetype (urb->pipe)) {
		case PIPE_BULK:
			info = usb_pipeout (urb->pipe)
				? TD_CC | TD_DP_OUT
				: TD_CC | TD_DP_IN ;
			while (data_len > 4096) {		
				td_fill (ohci,
					info | (cnt? TD_T_TOGGLE:toggle),
					data, 4096, urb, cnt);
				data += 4096; data_len -= 4096; cnt++;
			}
			info = usb_pipeout (urb->pipe)?
				TD_CC | TD_DP_OUT : TD_CC | TD_R | TD_DP_IN ;
			td_fill (ohci, info | (cnt? TD_T_TOGGLE:toggle),
				data, data_len, urb, cnt);
			cnt++;
			if ((urb->transfer_flags & USB_ZERO_PACKET)
					&& cnt < urb_priv->length) {
				td_fill (ohci, info | (cnt? TD_T_TOGGLE:toggle),
					0, 0, urb, cnt);
				cnt++;
			}
			/* start bulk list */
			if (!ohci->sleeping) {
				wmb ();
				writel (OHCI_BLF, &ohci->regs->cmdstatus);
			}
			break;

		case PIPE_INTERRUPT:
			info = TD_CC | toggle;
			info |= usb_pipeout (urb->pipe) 
				?  TD_DP_OUT
				:  TD_R | TD_DP_IN;
			td_fill (ohci, info, data, data_len, urb, cnt++);
			break;

		case PIPE_CONTROL:
			/* control requests don't use toggle state  */
			info = TD_CC | TD_DP_SETUP | TD_T_DATA0;
			td_fill (ohci, info,
#ifdef CONFIG_PCI
				pci_map_single (ohci->hcd.pdev,
					urb->setup_packet, 8,
					PCI_DMA_TODEVICE),
#else
#	error "what dma addr to use"				
#endif
				8, urb, cnt++); 
			if (data_len > 0) {  
				info = TD_CC | TD_R | TD_T_DATA1;
				info |= usb_pipeout (urb->pipe)
				    ? TD_DP_OUT
				    : TD_DP_IN;
				/* NOTE:  mishandles transfers >8K, some >4K */
				td_fill (ohci, info, data, data_len,
						urb, cnt++);  
			} 
			info = usb_pipeout (urb->pipe)
				? TD_CC | TD_DP_IN | TD_T_DATA1
				: TD_CC | TD_DP_OUT | TD_T_DATA1;
			td_fill (ohci, info, data, 0, urb, cnt++);
			/* start control list */
			if (!ohci->sleeping) {
				wmb ();
				writel (OHCI_CLF, &ohci->regs->cmdstatus);
			}
			break;

		case PIPE_ISOCHRONOUS:
			for (cnt = 0; cnt < urb->number_of_packets; cnt++) {
				int	frame = urb->start_frame;

				// FIXME scheduling should handle frame counter
				// roll-around ... exotic case (and OHCI has
				// a 2^16 iso range, vs other HCs max of 2^10)
				frame += cnt * urb->interval;
				frame &= 0xffff;
				td_fill (ohci, TD_CC | TD_ISO | frame,
				    data + urb->iso_frame_desc [cnt].offset, 
				    urb->iso_frame_desc [cnt].length, urb, cnt); 
			}
			break;
	} 
	if (urb_priv->length != cnt) 
		dbg ("TD LENGTH %d != CNT %d", urb_priv->length, cnt);
}

/*-------------------------------------------------------------------------*
 * Done List handling functions
 *-------------------------------------------------------------------------*/

/* calculate transfer length/status and update the urb
 * PRECONDITION:  irqsafe (only for urb->status locking)
 */
static void td_done (struct urb *urb, struct td *td)
{
	u32	tdINFO = le32_to_cpup (&td->hwINFO);
	int	cc = 0;


	/* ISO ... drivers see per-TD length/status */
  	if (tdINFO & TD_ISO) {
 		u16	tdPSW = le16_to_cpu (td->hwPSW [0]);
		int	dlen = 0;

 		cc = (tdPSW >> 12) & 0xF;
		if (! ((urb->transfer_flags & USB_DISABLE_SPD)
				&& (cc == TD_DATAUNDERRUN)))
			cc = TD_CC_NOERROR;

		if (usb_pipeout (urb->pipe))
			dlen = urb->iso_frame_desc [td->index].length;
		else
			dlen = tdPSW & 0x3ff;
		urb->actual_length += dlen;
		urb->iso_frame_desc [td->index].actual_length = dlen;
		urb->iso_frame_desc [td->index].status = cc_to_error [cc];

		if (cc != 0)
			dbg ("  urb %p iso TD %d len %d CC %d",
				urb, td->index, dlen, cc);

	/* BULK, INT, CONTROL ... drivers see aggregate length/status,
	 * except that "setup" bytes aren't counted and "short" transfers
	 * might not be reported as errors.
	 */
	} else {
		int	type = usb_pipetype (urb->pipe);
		u32	tdBE = le32_to_cpup (&td->hwBE);

  		cc = TD_CC_GET (tdINFO);

		/* control endpoints only have soft stalls */
  		if (type != PIPE_CONTROL && cc == TD_CC_STALL)
			usb_endpoint_halt (urb->dev,
				usb_pipeendpoint (urb->pipe),
				usb_pipeout (urb->pipe));

		/* update packet status if needed (short may be ok) */
		if (((urb->transfer_flags & USB_DISABLE_SPD) != 0
				&& cc == TD_DATAUNDERRUN))
			cc = TD_CC_NOERROR;
		if (cc != TD_CC_NOERROR) {
			spin_lock (&urb->lock);
			if (urb->status == -EINPROGRESS)
				urb->status = cc_to_error [cc];
			spin_unlock (&urb->lock);
		}

		/* count all non-empty packets except control SETUP packet */
		if ((type != PIPE_CONTROL || td->index != 0) && tdBE != 0) {
			if (td->hwCBP == 0)
				urb->actual_length += tdBE - td->data_dma + 1;
			else
				urb->actual_length +=
					  le32_to_cpup (&td->hwCBP)
					- td->data_dma;
		}

		if (cc != 0)
			dbg ("  urb %p TD %d CC %d, len=%d",
				urb, td->index, cc, urb->actual_length);
  	}
}

/*-------------------------------------------------------------------------*/

/* replies to the request have to be on a FIFO basis so
 * we unreverse the hc-reversed done-list
 */
static struct td *dl_reverse_done_list (struct ohci_hcd *ohci)
{
	__u32		td_list_hc;
	struct td	*td_rev = NULL;
	struct td	*td_list = NULL;
  	urb_priv_t	*urb_priv = NULL;
  	unsigned long	flags;

  	spin_lock_irqsave (&ohci->lock, flags);

	td_list_hc = le32_to_cpup (&ohci->hcca->done_head);
	ohci->hcca->done_head = 0;

	while (td_list_hc) {		
		td_list = dma_to_td (ohci, td_list_hc);

		if (TD_CC_GET (le32_to_cpup (&td_list->hwINFO))) {
			urb_priv = (urb_priv_t *) td_list->urb->hcpriv;
			/* typically the endpoint halts on error; un-halt,
			 * and maybe dequeue other TDs from this urb
			 */
			if (td_list->ed->hwHeadP & ED_H) {
				if (urb_priv && ((td_list->index + 1)
						< urb_priv->length)) {
					dbg ("urb %p TD %d of %d, patch ED",
						td_list->urb,
						1 + td_list->index,
						urb_priv->length);
					td_list->ed->hwHeadP = 
			    (urb_priv->td [urb_priv->length - 1]->hwNextTD
				    & __constant_cpu_to_le32 (TD_MASK))
			    | (td_list->ed->hwHeadP & ED_C);
					urb_priv->td_cnt += urb_priv->length
						- td_list->index - 1;
				} else 
					td_list->ed->hwHeadP &= ~ED_H;
			}
		}

		td_list->next_dl_td = td_rev;	
		td_rev = td_list;
		td_list_hc = le32_to_cpup (&td_list->hwNextTD);
	}	
	spin_unlock_irqrestore (&ohci->lock, flags);
	return td_list;
}

/*-------------------------------------------------------------------------*/

/* there are some pending requests to unlink 
 * - some URBs/TDs if urb_priv->state == URB_DEL
 */
static void dl_del_list (struct ohci_hcd *ohci, unsigned int frame)
{
	unsigned long	flags;
	struct ed	*ed;
	__u32		edINFO;
	__u32		tdINFO;
	struct td	*td = NULL, *td_next = NULL,
			*tdHeadP = NULL, *tdTailP;
	__u32		*td_p;
	int		ctrl = 0, bulk = 0;

	spin_lock_irqsave (&ohci->lock, flags);

	for (ed = ohci->ed_rm_list [frame]; ed != NULL; ed = ed->ed_rm_list) {

		tdTailP = dma_to_td (ohci, le32_to_cpup (&ed->hwTailP));
		tdHeadP = dma_to_td (ohci, le32_to_cpup (&ed->hwHeadP));
		edINFO = le32_to_cpup (&ed->hwINFO);
		td_p = &ed->hwHeadP;

		for (td = tdHeadP; td != tdTailP; td = td_next) {
			struct urb *urb = td->urb;
			urb_priv_t *urb_priv = td->urb->hcpriv;

			td_next = dma_to_td (ohci,
				le32_to_cpup (&td->hwNextTD));
			if ((urb_priv->state == URB_DEL)) {
				tdINFO = le32_to_cpup (&td->hwINFO);
				/* HC may have partly processed this TD */
				if (TD_CC_GET (tdINFO) < 0xE)
					td_done (urb, td);
				*td_p = td->hwNextTD | (*td_p
					& __constant_cpu_to_le32 (0x3));

				/* URB is done; clean up */
				if (++ (urb_priv->td_cnt) == urb_priv->length) {
     					spin_unlock_irqrestore (&ohci->lock,
						flags);
					finish_urb (ohci, urb);
					spin_lock_irqsave (&ohci->lock, flags);
				}
			} else {
				td_p = &td->hwNextTD;
			}
		}

		ed->state &= ~ED_URB_DEL;
		tdHeadP = dma_to_td (ohci, le32_to_cpup (&ed->hwHeadP));

		if (tdHeadP == tdTailP) {
			if (ed->state == ED_OPER)
				ep_unlink (ohci, ed);
			td_free (ohci, tdTailP);
			ed->hwINFO = ED_SKIP;
			ed->state = ED_NEW;
		} else
			ed->hwINFO &= ~ED_SKIP;

		switch (ed->type) {
			case PIPE_CONTROL:
				ctrl = 1;
				break;
			case PIPE_BULK:
				bulk = 1;
				break;
		}
   	}

	/* maybe reenable control and bulk lists */ 
	if (!ohci->disabled) {
		if (ctrl) 	/* reset control list */
			writel (0, &ohci->regs->ed_controlcurrent);
		if (bulk)	/* reset bulk list */
			writel (0, &ohci->regs->ed_bulkcurrent);
		if (!ohci->ed_rm_list [!frame]) {
			if (ohci->ed_controltail)
				ohci->hc_control |= OHCI_CTRL_CLE;
			if (ohci->ed_bulktail)
				ohci->hc_control |= OHCI_CTRL_BLE;
			writel (ohci->hc_control, &ohci->regs->control);   
		}
	}

   	ohci->ed_rm_list [frame] = NULL;
   	spin_unlock_irqrestore (&ohci->lock, flags);
}



/*-------------------------------------------------------------------------*/

/*
 * Process normal completions (error or success) and clean the schedules.
 *
 * This is the main path for handing urbs back to drivers.  The only other
 * path is dl_del_list(), which unlinks URBs by scanning EDs, instead of
 * scanning the (re-reversed) donelist as this does.
 */
static void dl_done_list (struct ohci_hcd *ohci, struct td *td)
{
	unsigned long	flags;

  	spin_lock_irqsave (&ohci->lock, flags);
  	while (td) {
		struct td	*td_next = td->next_dl_td;
		struct urb	*urb = td->urb;
		urb_priv_t	*urb_priv = urb->hcpriv;
		struct ed	*ed = td->ed;

		/* update URB's length and status from TD */
   		td_done (urb, td);
  		urb_priv->td_cnt++;

		/* If all this urb's TDs are done, call complete().
		 * Interrupt transfers are the only special case:
		 * they're reissued, until "deleted" by usb_unlink_urb
		 * (real work done in a SOF intr, by dl_del_list).
		 */
  		if (urb_priv->td_cnt == urb_priv->length) {
			int	resubmit;

			resubmit = usb_pipeint (urb->pipe)
					&& (urb_priv->state != URB_DEL);

     			spin_unlock_irqrestore (&ohci->lock, flags);
			if (resubmit)
  				intr_resub (ohci, urb);
  			else
  				finish_urb (ohci, urb);
  			spin_lock_irqsave (&ohci->lock, flags);
  		}

		/* clean schedule:  unlink EDs that are no longer busy */
		if ((ed->hwHeadP & __constant_cpu_to_le32 (TD_MASK))
					== ed->hwTailP
				&& (ed->state == ED_OPER)) 
			ep_unlink (ohci, ed);
    		td = td_next;
  	}  
	spin_unlock_irqrestore (&ohci->lock, flags);
}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -