⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ohci1394.c

📁 1394在linux下单独的驱动程序代码
💻 C
📖 第 1 页 / 共 5 页
字号:
		DBGMSG(ohci->id,"Starting transmit DMA ctx=%d",d->ctx);		reg_write(ohci, d->cmdPtr, d->prg_bus[idx]|z);		run_context(ohci, d->ctrlSet, NULL);	}	else {		/* Wake up the dma context if necessary */		if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {		//may see pdf_doc,p80,p119			DBGMSG(ohci->id,"Waking transmit DMA ctx=%d",d->ctx);		}				/* do this always, to avoid race condition */		reg_write(ohci, d->ctrlSet, 0x1000);					//may see pdf_doc,p80,p119,p138	}	return 1;}/* Transmission of an async or iso packet *///struct ti_ohci,struct dma_trm_ctx:	see ohci1394.h//struct hpsb_packet:									see ieee1394_core.hstatic int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet){	struct ti_ohci *ohci = host->hostdata;	struct dma_trm_ctx *d;	unsigned long flags;		if (packet->data_size > ohci->max_packet_size) {		PRINT(KERN_ERR, ohci->id, 			"Transmit packet size %Zd is too big",			packet->data_size);		return 0;	}		/* Decide whether we have an iso, a request, or a response packet */	if (packet->type == hpsb_raw)		d = &ohci->at_req_context;	else if ((packet->tcode == TCODE_ISO_DATA) && (packet->type == hpsb_iso)) {	/* The legacy IT DMA context is initialized on first	* use.  However, the alloc cannot be run from	* interrupt context, so we bail out if that is the	* case. I don't see anyone sending ISO packets from		* interrupt context anyway... */				if (ohci->it_legacy_context.ohci == NULL) {			if (in_interrupt()) {				PRINT(KERN_ERR, ohci->id, 					"legacy IT context cannot be initialized during interrupt");				return 0;			}						if (alloc_dma_trm_ctx(ohci, &ohci->it_legacy_context,				DMA_CTX_ISO, 0, IT_NUM_DESC,				OHCI1394_IsoXmitContextBase) < 0) {				PRINT(KERN_ERR, ohci->id, 					"error initializing legacy IT context");				return 0;			}						initialize_dma_trm_ctx(&ohci->it_legacy_context);		}				d = &ohci->it_legacy_context;	} else if ((packet->tcode & 0x02) && (packet->tcode != TCODE_ISO_DATA))	//TCODE_ISO_DATA:0xA,ieee1394.h		d = &ohci->at_resp_context;	else 		d = &ohci->at_req_context;		spin_lock_irqsave(&d->lock,flags);		list_add_tail(&packet->driver_list, &d->pending_list);	//This function fills the FIFO with the (eventual) pending packets	//and runs or wakes up the DMA prg if necessary.	//The function MUST be called with the d->lock held.	dma_trm_flush(ohci, d);			spin_unlock_irqrestore(&d->lock,flags);		return 1;}static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg){	struct ti_ohci *ohci = host->hostdata;	int retval = 0;	unsigned long flags;	int phy_reg;		switch (cmd) {	case RESET_BUS:		switch (arg) {		case SHORT_RESET:			phy_reg = get_phy_reg(ohci, 5);			phy_reg |= 0x40;			set_phy_reg(ohci, 5, phy_reg); /* set ISBR */			break;		case LONG_RESET:			phy_reg = get_phy_reg(ohci, 1);			phy_reg |= 0x40;			set_phy_reg(ohci, 1, phy_reg); /* set IBR */			break;		case SHORT_RESET_NO_FORCE_ROOT:			phy_reg = get_phy_reg(ohci, 1);			if (phy_reg & 0x80) {				phy_reg &= ~0x80;				set_phy_reg(ohci, 1, phy_reg); /* clear RHB */			}						phy_reg = get_phy_reg(ohci, 5);			phy_reg |= 0x40;			set_phy_reg(ohci, 5, phy_reg); /* set ISBR */			break;		case LONG_RESET_NO_FORCE_ROOT:			phy_reg = get_phy_reg(ohci, 1);			phy_reg &= ~0x80;			phy_reg |= 0x40;			set_phy_reg(ohci, 1, phy_reg); /* clear RHB, set IBR */			break;		case SHORT_RESET_FORCE_ROOT:			phy_reg = get_phy_reg(ohci, 1);			if (!(phy_reg & 0x80)) {				phy_reg |= 0x80;				set_phy_reg(ohci, 1, phy_reg); /* set RHB */			}						phy_reg = get_phy_reg(ohci, 5);			phy_reg |= 0x40;			set_phy_reg(ohci, 5, phy_reg); /* set ISBR */			break;		case LONG_RESET_FORCE_ROOT:			phy_reg = get_phy_reg(ohci, 1);			phy_reg |= 0xc0;			set_phy_reg(ohci, 1, phy_reg); /* set RHB and IBR */			break;		default:			retval = -1;		}		break;				case GET_CYCLE_COUNTER:			//OHCI1394_IsochronousCycleTimer:0xF0,see pdf_doc,p55			retval = reg_read(ohci, OHCI1394_IsochronousCycleTimer);			break;					case SET_CYCLE_COUNTER:			//OHCI1394_IsochronousCycleTimer:0xF0,see pdf_doc,p55			reg_write(ohci, OHCI1394_IsochronousCycleTimer, arg);			break;					case SET_BUS_ID:			PRINT(KERN_ERR, ohci->id, "devctl command SET_BUS_ID err");			break;					case ACT_CYCLE_MASTER:			if (arg) {				/* check if we are root and other nodes are present */				//OHCI1394_NodeID:0xE8,see pdf_doc,p53				u32 nodeId = reg_read(ohci, OHCI1394_NodeID);				if ((nodeId & (1<<30)) && (nodeId & 0x3f)) {				/*				* enable cycleTimer, cycleMaster					*/					DBGMSG(ohci->id, "Cycle master enabled");					//OHCI1394_LinkControlSet:0xE0,see pdf_doc,p51					reg_write(ohci, OHCI1394_LinkControlSet, 						0x00300000);				}			} else {				/* disable cycleTimer, cycleMaster, cycleSource */				//OHCI1394_LinkControlClear:0xE4,see pdf_doc,p51				reg_write(ohci, OHCI1394_LinkControlClear, 0x00700000);			}			break;					case CANCEL_REQUESTS:			DBGMSG(ohci->id, "Cancel request received");			dma_trm_reset(&ohci->at_req_context);			dma_trm_reset(&ohci->at_resp_context);			break;					case MODIFY_USAGE:			if (arg) {				MOD_INC_USE_COUNT;			} else {				MOD_DEC_USE_COUNT;			}			retval = 1;			break;					case ISO_LISTEN_CHANNEL:			{				u64 mask;								if (arg<0 || arg>63) {					PRINT(KERN_ERR, ohci->id,						"%s: IS0 listen channel %d is out of range", 						__FUNCTION__, arg);					return -EFAULT;				}								/* activate the legacy IR context */				if (ohci->ir_legacy_context.ohci == NULL) {					if (alloc_dma_rcv_ctx(ohci, &ohci->ir_legacy_context,						DMA_CTX_ISO, 0, IR_NUM_DESC,		//DMA_CTX_ISO:enum context_type,IR_NUM_DESC:16						IR_BUF_SIZE, IR_SPLIT_BUF_SIZE,	//IR_BUF_SIZE:PAGE_SIZE,IR_SPLIT_BUF_SIZE:PAGE_SIZE						OHCI1394_IsoRcvContextBase) < 0) {	//OHCI1394_IsoRcvContextBase:0x400,see pdf_doc,p138						PRINT(KERN_ERR, ohci->id, "%s: failed to allocate an IR context",							__FUNCTION__);						return -ENOMEM;					}					ohci->ir_legacy_channels = 0;					initialize_dma_rcv_ctx(&ohci->ir_legacy_context, 1);										DBGMSG(ohci->id, "ISO receive legacy context activated");				}								mask = (u64)0x1<<arg;				                spin_lock_irqsave(&ohci->IR_channel_lock, flags);								if (ohci->ISO_channel_usage & mask) {					PRINT(KERN_ERR, ohci->id,						"%s: IS0 listen channel %d is already used", 						__FUNCTION__, arg);					spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);					return -EFAULT;				}								ohci->ISO_channel_usage |= mask;				ohci->ir_legacy_channels |= mask;								//OHCI1394_IRMultiChanMaskHiSet:0x070,see pdf_doc,p141				//OHCI1394_IRMultiChanMaskLoSet:0x078,see pdf_doc,p142				if (arg>31) 					reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet, 					1<<(arg-32));							else					reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet, 					1<<arg);							                spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);                DBGMSG(ohci->id, "Listening enabled on channel %d", arg);                break;			}		case ISO_UNLISTEN_CHANNEL:			{				u64 mask;								if (arg<0 || arg>63) {					PRINT(KERN_ERR, ohci->id,						"%s: IS0 unlisten channel %d is out of range", 						__FUNCTION__, arg);					return -EFAULT;				}								mask = (u64)0x1<<arg;				                spin_lock_irqsave(&ohci->IR_channel_lock, flags);								if (!(ohci->ISO_channel_usage & mask)) {					PRINT(KERN_ERR, ohci->id,						"%s: IS0 unlisten channel %d is not used", 						__FUNCTION__, arg);					spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);					return -EFAULT;				}								ohci->ISO_channel_usage &= ~mask;				ohci->ir_legacy_channels &= ~mask;								//OHCI1394_IRMultiChanMaskHiClear,0x74,see pdf_doc,p141				//OHCI1394_IRMultiChanMaskLoClear,0x7C,see pdf_doc,p142				if (arg>31) 					reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, 					1<<(arg-32));							else					reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, 					1<<arg);							                spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);                DBGMSG(ohci->id, "Listening disabled on channel %d", arg);								if (ohci->ir_legacy_channels == 0) {					free_dma_rcv_ctx(&ohci->ir_legacy_context);					DBGMSG(ohci->id, "ISO receive legacy context deactivated");				}                break;			}		default:			PRINT_G(KERN_ERR, "ohci_devctl cmd %d not implemented yet",				cmd);			break;	}	return retval;}/************************************ rawiso ISO reception            ************************************//*We use either buffer-fill or packet-per-buffer DMA mode. The DMAbuffer is split into "blocks" (regions described by one DMAdescriptor). Each block must be one page or less in size, andmust not cross a page boundary. There is one little wrinkle with buffer-fill mode: a packet that starts in the final block may wrap around into the first block. But the user API expects all packets to be contiguous. Our solution is to keep the very last page of the DMA buffer in reserve - if a packet spans the gap, we copy its tail into this page. */  struct ohci_iso_recv {	 struct ti_ohci *ohci;	 	 struct ohci1394_iso_tasklet task;	 int task_active;	 	 enum { BUFFER_FILL_MODE,		 PACKET_PER_BUFFER_MODE } dma_mode;	 	 /* memory and PCI mapping for the DMA descriptors */	 struct dma_prog_region prog;	 struct dma_cmd *block; /* = (struct dma_cmd*) prog.virt */	 	 /* how many DMA blocks fit in the buffer */	 unsigned int nblocks;	 	 /* stride of DMA blocks */	 unsigned int buf_stride;	 	 /* number of blocks to batch between interrupts */	 int block_irq_interval;	 	 /* block that DMA will finish next */	 int block_dma;	 	 /* (buffer-fill only) block that the reader will release next */	 int block_reader;	 	 /* (buffer-fill only) bytes of buffer the reader has released,	 less than one block */	 int released_bytes;	 	 /* (buffer-fill only) buffer offset at which the next packet will appear */	 int dma_offset;	 	 /* OHCI DMA context control registers */	 u32 ContextControlSet;	 u32 ContextControlClear;	 u32 CommandPtr;	 u32 ContextMatch; };  static void ohci_iso_recv_task(unsigned long data); static void ohci_iso_recv_stop(struct hpsb_iso *iso); static void ohci_iso_recv_shutdown(struct hpsb_iso *iso); static int  ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync); static void ohci_iso_recv_program(struct hpsb_iso *iso);  static int ohci_iso_recv_init(struct hpsb_iso *iso) {	 struct ti_ohci *ohci = iso->host->hostdata;	 struct ohci_iso_recv *recv;	 int ctx;	 int ret = -ENOMEM;	 	 recv = kmalloc(sizeof(*recv), SLAB_KERNEL);	 if (!recv)		 return -ENOMEM;	 	 iso->hostdata = recv;	 recv->ohci = ohci;	 recv->task_active = 0;	 dma_prog_region_init(&recv->prog);	 recv->block = NULL;	 	 /* use buffer-fill mode, unless irq_interval is 1	 (note: multichannel requires buffer-fill) */	 	 // enum { BUFFER_FILL_MODE,PACKET_PER_BUFFER_MODE } dma_mode;	 if (iso->irq_interval == 1 && iso->channel != -1) {		 recv->dma_mode = PACKET_PER_BUFFER_MODE;	 } else {		 recv->dma_mode = BUFFER_FILL_MODE;	 }	 	 /* set nblocks, buf_stride, block_irq_interval */	 	 if (recv->dma_mode == BUFFER_FILL_MODE) {		 recv->buf_stride = PAGE_SIZE;		 		 /* one block per page of data in the DMA buffer, minus the final guard page */		 recv->nblocks = iso->buf_size/PAGE_SIZE - 1;		 if (recv->nblocks < 3) {			 DBGMSG(ohci->id, "ohci_iso_recv_init: DMA buffer too small");			 goto err;		 }		 		 /* iso->irq_interval is in packets - translate that to blocks */		 /* (err, sort of... 1 is always the safest value) */		 recv->block_irq_interval = iso->irq_interval / recv->nblocks;		 if (recv->block_irq_interval*4 > recv->nblocks)			 recv->block_irq_interval = recv->nblocks/4;		 if (recv->block_irq_interval < 1)			 recv->block_irq_interval = 1;		 	 } else {		 int max_packet_size;		 		 recv->nblocks = iso->buf_packets;		 recv->block_irq_interval = 1;		 		 /* choose a buffer stride */		 /* must be a power of 2, and <= PAGE_SIZE */		 		 max_packet_size = iso->buf_size / iso->buf_packets;		 		 for (recv->buf_stride = 8; recv->buf_stride < max_packet_size;		 recv->buf_stride *= 2);		 		 if (recv->buf_stride*iso->buf_packets > iso->buf_size ||			 recv->buf_stride > PAGE_SIZE) {			 /* this shouldn't happen, but anyway... */			 DBGMSG(ohci->id, "ohci_iso_recv_init: problem choosing a buffer stride");			 goto err;		 }	 }	 	 recv->block_reader = 0;	 recv->released_bytes = 0;	 recv->block_dma = 0;	 recv->dma_offset = 0;	 	 /* size of DMA program = one descriptor per block */	 if (dma_prog_region_alloc(&recv->prog,		 sizeof(struct dma_cmd) * recv->nblocks,

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -