⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ohci1394.c

📁 ieee1394驱动,不多说了!直接可以在linux2.6内核中使用
💻 C
📖 第 1 页 / 共 5 页
字号:
				PRINT(KERN_ERR,				      "error initializing legacy IT context");				return -ENOMEM;			}			initialize_dma_trm_ctx(&ohci->it_legacy_context);		}		d = &ohci->it_legacy_context;	} else if ((packet->tcode & 0x02) && (packet->tcode != TCODE_ISO_DATA))		d = &ohci->at_resp_context;	else		d = &ohci->at_req_context;	spin_lock_irqsave(&d->lock,flags);	list_add_tail(&packet->driver_list, &d->pending_list);	dma_trm_flush(ohci, d);	spin_unlock_irqrestore(&d->lock,flags);	return 0;}static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg){	struct ti_ohci *ohci = host->hostdata;	int retval = 0;	unsigned long flags;	int phy_reg;	switch (cmd) {	case RESET_BUS:		switch (arg) {		case SHORT_RESET:			phy_reg = get_phy_reg(ohci, 5);			phy_reg |= 0x40;			set_phy_reg(ohci, 5, phy_reg); /* set ISBR */			break;		case LONG_RESET:			phy_reg = get_phy_reg(ohci, 1);			phy_reg |= 0x40;			set_phy_reg(ohci, 1, phy_reg); /* set IBR */			break;		case SHORT_RESET_NO_FORCE_ROOT:			phy_reg = get_phy_reg(ohci, 1);			if (phy_reg & 0x80) {				phy_reg &= ~0x80;				set_phy_reg(ohci, 1, phy_reg); /* clear RHB */			}			phy_reg = get_phy_reg(ohci, 5);			phy_reg |= 0x40;			set_phy_reg(ohci, 5, phy_reg); /* set ISBR */			break;		case LONG_RESET_NO_FORCE_ROOT:			phy_reg = get_phy_reg(ohci, 1);			phy_reg &= ~0x80;			phy_reg |= 0x40;			set_phy_reg(ohci, 1, phy_reg); /* clear RHB, set IBR */			break;		case SHORT_RESET_FORCE_ROOT:			phy_reg = get_phy_reg(ohci, 1);			if (!(phy_reg & 0x80)) {				phy_reg |= 0x80;				set_phy_reg(ohci, 1, phy_reg); /* set RHB */			}			phy_reg = get_phy_reg(ohci, 5);			phy_reg |= 0x40;			set_phy_reg(ohci, 5, phy_reg); /* set ISBR */			break;		case LONG_RESET_FORCE_ROOT:			phy_reg = get_phy_reg(ohci, 1);			phy_reg |= 0xc0;			set_phy_reg(ohci, 1, phy_reg); /* set RHB and IBR */			break;		default:			retval = -1;		}		break;	case GET_CYCLE_COUNTER:		retval = reg_read(ohci, OHCI1394_IsochronousCycleTimer);		break;	case SET_CYCLE_COUNTER:		reg_write(ohci, OHCI1394_IsochronousCycleTimer, arg);		break;	case SET_BUS_ID:		PRINT(KERN_ERR, "devctl command SET_BUS_ID err");		break;	case ACT_CYCLE_MASTER:		if (arg) {			/* check if we are root and other nodes are present */			u32 nodeId = reg_read(ohci, OHCI1394_NodeID);			if ((nodeId & (1<<30)) && (nodeId & 0x3f)) {				/*				 * enable cycleTimer, cycleMaster				 */				DBGMSG("Cycle master enabled");				reg_write(ohci, OHCI1394_LinkControlSet,					  OHCI1394_LinkControl_CycleTimerEnable |					  OHCI1394_LinkControl_CycleMaster);			}		} else {			/* disable cycleTimer, cycleMaster, cycleSource */			reg_write(ohci, OHCI1394_LinkControlClear,				  OHCI1394_LinkControl_CycleTimerEnable |				  OHCI1394_LinkControl_CycleMaster |				  OHCI1394_LinkControl_CycleSource);		}		break;	case CANCEL_REQUESTS:		DBGMSG("Cancel request received");		dma_trm_reset(&ohci->at_req_context);		dma_trm_reset(&ohci->at_resp_context);		break;	case ISO_LISTEN_CHANNEL:        {		u64 mask;		struct dma_rcv_ctx *d = &ohci->ir_legacy_context;		int ir_legacy_active;		if (arg<0 || arg>63) {			PRINT(KERN_ERR,			      "%s: IS0 listen channel %d is out of range",			      __FUNCTION__, arg);			return -EFAULT;		}		mask = (u64)0x1<<arg;                spin_lock_irqsave(&ohci->IR_channel_lock, flags);		if (ohci->ISO_channel_usage & mask) {			PRINT(KERN_ERR,			      "%s: IS0 listen channel %d is already used",			      __FUNCTION__, arg);			spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);			return -EFAULT;		}		ir_legacy_active = ohci->ir_legacy_channels;		ohci->ISO_channel_usage |= mask;		ohci->ir_legacy_channels |= mask;                spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);		if (!ir_legacy_active) {			if (ohci1394_register_iso_tasklet(ohci,					  &ohci->ir_legacy_tasklet) < 0) {				PRINT(KERN_ERR, "No IR DMA context available");				return -EBUSY;			}			/* the IR context can be assigned to any DMA context			 * by ohci1394_register_iso_tasklet */			d->ctx = ohci->ir_legacy_tasklet.context;			d->ctrlSet = OHCI1394_IsoRcvContextControlSet +				32*d->ctx;			d->ctrlClear = OHCI1394_IsoRcvContextControlClear +				32*d->ctx;			d->cmdPtr = OHCI1394_IsoRcvCommandPtr + 32*d->ctx;			d->ctxtMatch = OHCI1394_IsoRcvContextMatch + 32*d->ctx;			initialize_dma_rcv_ctx(&ohci->ir_legacy_context, 1);			if (printk_ratelimit())				DBGMSG("IR legacy activated");		}                spin_lock_irqsave(&ohci->IR_channel_lock, flags);		if (arg>31)			reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet,				  1<<(arg-32));		else			reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet,				  1<<arg);                spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);                DBGMSG("Listening enabled on channel %d", arg);                break;        }	case ISO_UNLISTEN_CHANNEL:        {		u64 mask;		if (arg<0 || arg>63) {			PRINT(KERN_ERR,			      "%s: IS0 unlisten channel %d is out of range",			      __FUNCTION__, arg);			return -EFAULT;		}		mask = (u64)0x1<<arg;                spin_lock_irqsave(&ohci->IR_channel_lock, flags);		if (!(ohci->ISO_channel_usage & mask)) {			PRINT(KERN_ERR,			      "%s: IS0 unlisten channel %d is not used",			      __FUNCTION__, arg);			spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);			return -EFAULT;		}		ohci->ISO_channel_usage &= ~mask;		ohci->ir_legacy_channels &= ~mask;		if (arg>31)			reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear,				  1<<(arg-32));		else			reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear,				  1<<arg);                spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);                DBGMSG("Listening disabled on channel %d", arg);		if (ohci->ir_legacy_channels == 0) {			stop_dma_rcv_ctx(&ohci->ir_legacy_context);			DBGMSG("ISO legacy receive context stopped");		}                break;        }	default:		PRINT_G(KERN_ERR, "ohci_devctl cmd %d not implemented yet",			cmd);		break;	}	return retval;}/*********************************** * rawiso ISO reception            * ***********************************//*  We use either buffer-fill or packet-per-buffer DMA mode. The DMA  buffer is split into "blocks" (regions described by one DMA  descriptor). Each block must be one page or less in size, and  must not cross a page boundary.  There is one little wrinkle with buffer-fill mode: a packet that  starts in the final block may wrap around into the first block. But  the user API expects all packets to be contiguous. Our solution is  to keep the very last page of the DMA buffer in reserve - if a  packet spans the gap, we copy its tail into this page.*/struct ohci_iso_recv {	struct ti_ohci *ohci;	struct ohci1394_iso_tasklet task;	int task_active;	enum { BUFFER_FILL_MODE = 0,	       PACKET_PER_BUFFER_MODE = 1 } dma_mode;	/* memory and PCI mapping for the DMA descriptors */	struct dma_prog_region prog;	struct dma_cmd *block; /* = (struct dma_cmd*) prog.virt */	/* how many DMA blocks fit in the buffer */	unsigned int nblocks;	/* stride of DMA blocks */	unsigned int buf_stride;	/* number of blocks to batch between interrupts */	int block_irq_interval;	/* block that DMA will finish next */	int block_dma;	/* (buffer-fill only) block that the reader will release next */	int block_reader;	/* (buffer-fill only) bytes of buffer the reader has released,	   less than one block */	int released_bytes;	/* (buffer-fill only) buffer offset at which the next packet will appear */	int dma_offset;	/* OHCI DMA context control registers */	u32 ContextControlSet;	u32 ContextControlClear;	u32 CommandPtr;	u32 ContextMatch;};static void ohci_iso_recv_task(unsigned long data);static void ohci_iso_recv_stop(struct hpsb_iso *iso);static void ohci_iso_recv_shutdown(struct hpsb_iso *iso);static int  ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync);static void ohci_iso_recv_program(struct hpsb_iso *iso);static int ohci_iso_recv_init(struct hpsb_iso *iso){	struct ti_ohci *ohci = iso->host->hostdata;	struct ohci_iso_recv *recv;	int ctx;	int ret = -ENOMEM;	recv = kmalloc(sizeof(*recv), GFP_KERNEL);	if (!recv)		return -ENOMEM;	iso->hostdata = recv;	recv->ohci = ohci;	recv->task_active = 0;	dma_prog_region_init(&recv->prog);	recv->block = NULL;	/* use buffer-fill mode, unless irq_interval is 1	   (note: multichannel requires buffer-fill) */	if (((iso->irq_interval == 1 && iso->dma_mode == HPSB_ISO_DMA_OLD_ABI) ||	     iso->dma_mode == HPSB_ISO_DMA_PACKET_PER_BUFFER) && iso->channel != -1) {		recv->dma_mode = PACKET_PER_BUFFER_MODE;	} else {		recv->dma_mode = BUFFER_FILL_MODE;	}	/* set nblocks, buf_stride, block_irq_interval */	if (recv->dma_mode == BUFFER_FILL_MODE) {		recv->buf_stride = PAGE_SIZE;		/* one block per page of data in the DMA buffer, minus the final guard page */		recv->nblocks = iso->buf_size/PAGE_SIZE - 1;		if (recv->nblocks < 3) {			DBGMSG("ohci_iso_recv_init: DMA buffer too small");			goto err;		}		/* iso->irq_interval is in packets - translate that to blocks */		if (iso->irq_interval == 1)			recv->block_irq_interval = 1;		else			recv->block_irq_interval = iso->irq_interval *							((recv->nblocks+1)/iso->buf_packets);		if (recv->block_irq_interval*4 > recv->nblocks)			recv->block_irq_interval = recv->nblocks/4;		if (recv->block_irq_interval < 1)			recv->block_irq_interval = 1;	} else {		int max_packet_size;		recv->nblocks = iso->buf_packets;		recv->block_irq_interval = iso->irq_interval;		if (recv->block_irq_interval * 4 > iso->buf_packets)			recv->block_irq_interval = iso->buf_packets / 4;		if (recv->block_irq_interval < 1)		recv->block_irq_interval = 1;		/* choose a buffer stride */		/* must be a power of 2, and <= PAGE_SIZE */		max_packet_size = iso->buf_size / iso->buf_packets;		for (recv->buf_stride = 8; recv->buf_stride < max_packet_size;		    recv->buf_stride *= 2);		if (recv->buf_stride*iso->buf_packets > iso->buf_size ||		   recv->buf_stride > PAGE_SIZE) {			/* this shouldn't happen, but anyway... */			DBGMSG("ohci_iso_recv_init: problem choosing a buffer stride");			goto err;		}	}	recv->block_reader = 0;	recv->released_bytes = 0;	recv->block_dma = 0;	recv->dma_offset = 0;	/* size of DMA program = one descriptor per block */	if (dma_prog_region_alloc(&recv->prog,				 sizeof(struct dma_cmd) * recv->nblocks,				 recv->ohci->dev))		goto err;	recv->block = (struct dma_cmd*) recv->prog.kvirt;	ohci1394_init_iso_tasklet(&recv->task,				  iso->channel == -1 ? OHCI_ISO_MULTICHANNEL_RECEIVE :				                       OHCI_ISO_RECEIVE,				  ohci_iso_recv_task, (unsigned long) iso);	if (ohci1394_register_iso_tasklet(recv->ohci, &recv->task) < 0) {		ret = -EBUSY;		goto err;	}	recv->task_active = 1;	/* recv context registers are spaced 32 bytes apart */	ctx = recv->task.context;	recv->ContextControlSet = OHCI1394_IsoRcvContextControlSet + 32 * ctx;	recv->ContextControlClear = OHCI1394_IsoRcvContextControlClear + 32 * ctx;	recv->CommandPtr = OHCI1394_IsoRcvCommandPtr + 32 * ctx;	recv->ContextMatch = OHCI1394_IsoRcvContextMatch + 32 * ctx;	if (iso->channel == -1) {		/* clear multi-channel selection mask */		reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, 0xFFFFFFFF);		reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, 0xFFFFFFFF);	}	/* write the DMA program */	ohci_iso_recv_program(iso);	DBGMSG("ohci_iso_recv_init: %s mode, DMA buffer is %lu pages"	       " (%u bytes), using %u blocks, buf_stride %u, block_irq_interval %d",	       recv->dma_mode == BUFFER_FILL_MODE ?	       "buffer-fill" : "packet-per-buffer",	       iso->buf_size/PAGE_SIZE, iso->buf_size,	       recv->nblocks, recv->buf_stride, recv->block_irq_interval);	return 0;err:	ohci_iso_recv_shutdown(iso);	return ret;}static void ohci_iso_recv_stop(struct hpsb_iso *iso){	struct ohci_iso_recv *recv = iso->hostdata;	/* disable interrupts */	reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << recv->task.context);	/* halt DMA */	ohci1394_stop_context(recv->ohci, recv->ContextControlClear, NULL);}static void ohci_iso_recv_shutdown(struct hpsb_iso *iso){	struct ohci_iso_recv *recv = iso->hostdata;	if (recv->task_active) {		ohci_iso_recv_stop(iso);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -