⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sba_iommu.c

📁 是关于linux2.5.1的完全源码
💻 C
📖 第 1 页 / 共 5 页
字号:
/** * sba_free_consistent - free/unmap shared mem for DMA * @hwdev: instance of PCI owned by the driver that's asking. * @size:  number of bytes mapped in driver buffer. * @vaddr:  virtual address IOVA of "consistent" buffer. * @dma_handler:  IO virtual address of "consistent" buffer. * * See Documentation/DMA-mapping.txt */void sba_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr,		dma_addr_t dma_handle){	sba_unmap_single(hwdev, dma_handle, size, 0);	free_pages((unsigned long) vaddr, get_order(size));}/*** Since 0 is a valid pdir_base index value, can't use that** to determine if a value is valid or not. Use a flag to indicate** the SG list entry contains a valid pdir index.*/#define PIDE_FLAG 0x1UL#ifdef DEBUG_LARGE_SG_ENTRIESint dump_run_sg = 0;#endif/** * sba_fill_pdir - write allocated SG entries into IO PDIR * @ioc: IO MMU structure which owns the pdir we are interested in. * @startsg:  list of IOVA/size pairs * @nents: number of entries in startsg list * * Take preprocessed SG list and write corresponding entries * in the IO PDIR. */static SBA_INLINE intsba_fill_pdir(	struct ioc *ioc,	struct scatterlist *startsg,	int nents){	struct scatterlist *dma_sg = startsg;	/* pointer to current DMA */	int n_mappings = 0;	u64 *pdirp = 0;	unsigned long dma_offset = 0;	dma_sg--;	while (nents-- > 0) {		int     cnt = sba_sg_len(startsg);		sba_sg_len(startsg) = 0;#ifdef DEBUG_LARGE_SG_ENTRIES		if (dump_run_sg)			printk(" %2d : %08lx/%05x %p\n",				nents,				(unsigned long) sba_sg_iova(startsg), cnt,				sba_sg_buffer(startsg)		);#else		DBG_RUN_SG(" %d : %08lx/%05x %p\n",				nents,				(unsigned long) sba_sg_iova(startsg), cnt,				sba_sg_buffer(startsg)		);#endif		/*		** Look for the start of a new DMA stream		*/		if ((u64)sba_sg_iova(startsg) & PIDE_FLAG) {			u32 pide = (u64)sba_sg_iova(startsg) & ~PIDE_FLAG;			dma_offset = (unsigned long) pide & ~IOVP_MASK;			sba_sg_iova(startsg) = 0;			dma_sg++;			sba_sg_iova(dma_sg) = (char *)(pide | ioc->ibase);			pdirp = &(ioc->pdir_base[pide >> IOVP_SHIFT]);			n_mappings++;		}		/*		** Look for a VCONTIG chunk		*/		if (cnt) {			unsigned long vaddr = (unsigned long) sba_sg_buffer(startsg);			ASSERT(pdirp);			/* Since multiple Vcontig blocks could make up			** one DMA stream, *add* cnt to dma_len.			*/			sba_sg_len(dma_sg) += cnt;			cnt += dma_offset;			dma_offset=0;	/* only want offset on first chunk */			cnt = ROUNDUP(cnt, IOVP_SIZE);#ifdef CONFIG_PROC_FS			ioc->msg_pages += cnt >> IOVP_SHIFT;#endif			do {				sba_io_pdir_entry(pdirp, vaddr);				vaddr += IOVP_SIZE;				cnt -= IOVP_SIZE;				pdirp++;			} while (cnt > 0);		}		startsg++;	}#ifdef DEBUG_LARGE_SG_ENTRIES	dump_run_sg = 0;#endif	return(n_mappings);}/*** Two address ranges are DMA contiguous *iff* "end of prev" and** "start of next" are both on a page boundry.**** (shift left is a quick trick to mask off upper bits)*/#define DMA_CONTIG(__X, __Y) \	(((((unsigned long) __X) | ((unsigned long) __Y)) << (BITS_PER_LONG - PAGE_SHIFT)) == 0UL)/** * sba_coalesce_chunks - preprocess the SG list * @ioc: IO MMU structure which owns the pdir we are interested in. * @startsg:  list of IOVA/size pairs * @nents: number of entries in startsg list * * First pass is to walk the SG list and determine where the breaks are * in the DMA stream. Allocates PDIR entries but does not fill them. * Returns the number of DMA chunks. * * Doing the fill seperate from the coalescing/allocation keeps the * code simpler. Future enhancement could make one pass through * the sglist do both. */static SBA_INLINE intsba_coalesce_chunks( struct ioc *ioc,	struct scatterlist *startsg,	int nents){	struct scatterlist *vcontig_sg;    /* VCONTIG chunk head */	unsigned long vcontig_len;         /* len of VCONTIG chunk */	unsigned long vcontig_end;	struct scatterlist *dma_sg;        /* next DMA stream head */	unsigned long dma_offset, dma_len; /* start/len of DMA stream */	int n_mappings = 0;	while (nents > 0) {		unsigned long vaddr = (unsigned long) (startsg->address); 		/*		** Prepare for first/next DMA stream		*/		dma_sg = vcontig_sg = startsg;		dma_len = vcontig_len = vcontig_end = sba_sg_len(startsg);		vcontig_end +=  vaddr;		dma_offset = vaddr & ~IOVP_MASK;		/* PARANOID: clear entries */		sba_sg_buffer(startsg) = sba_sg_iova(startsg);		sba_sg_iova(startsg) = 0;		sba_sg_len(startsg) = 0;		/*		** This loop terminates one iteration "early" since		** it's always looking one "ahead".		*/		while (--nents > 0) {			unsigned long vaddr;	/* tmp */			startsg++;			/* catch brokenness in SCSI layer */			ASSERT(startsg->length <= DMA_CHUNK_SIZE);			/*			** First make sure current dma stream won't			** exceed DMA_CHUNK_SIZE if we coalesce the			** next entry.			*/			if (((dma_len + dma_offset + startsg->length + ~IOVP_MASK) & IOVP_MASK) > DMA_CHUNK_SIZE)				break;			/*			** Then look for virtually contiguous blocks.			**			** append the next transaction?			*/			vaddr = (unsigned long) sba_sg_iova(startsg);			if  (vcontig_end == vaddr)			{				vcontig_len += sba_sg_len(startsg);				vcontig_end += sba_sg_len(startsg);				dma_len     += sba_sg_len(startsg);				sba_sg_buffer(startsg) = (char *)vaddr;				sba_sg_iova(startsg) = 0;				sba_sg_len(startsg) = 0;				continue;			}#ifdef DEBUG_LARGE_SG_ENTRIES			dump_run_sg = (vcontig_len > IOVP_SIZE);#endif			/*			** Not virtually contigous.			** Terminate prev chunk.			** Start a new chunk.			**			** Once we start a new VCONTIG chunk, dma_offset			** can't change. And we need the offset from the first			** chunk - not the last one. Ergo Successive chunks			** must start on page boundaries and dove tail			** with it's predecessor.			*/			sba_sg_len(vcontig_sg) = vcontig_len;			vcontig_sg = startsg;			vcontig_len = sba_sg_len(startsg);			/*			** 3) do the entries end/start on page boundaries?			**    Don't update vcontig_end until we've checked.			*/			if (DMA_CONTIG(vcontig_end, vaddr))			{				vcontig_end = vcontig_len + vaddr;				dma_len += vcontig_len;				sba_sg_buffer(startsg) = (char *)vaddr;				sba_sg_iova(startsg) = 0;				continue;			} else {				break;			}		}		/*		** End of DMA Stream		** Terminate last VCONTIG block.		** Allocate space for DMA stream.		*/		sba_sg_len(vcontig_sg) = vcontig_len;		dma_len = (dma_len + dma_offset + ~IOVP_MASK) & IOVP_MASK;		ASSERT(dma_len <= DMA_CHUNK_SIZE);		sba_sg_iova(dma_sg) = (char *) (PIDE_FLAG 			| (sba_alloc_range(ioc, dma_len) << IOVP_SHIFT)			| dma_offset);		n_mappings++;	}	return n_mappings;}/** * sba_map_sg - map Scatter/Gather list * @dev: instance of PCI owned by the driver that's asking. * @sglist:  array of buffer/length pairs * @nents:  number of entries in list * @direction:  R/W or both. * * See Documentation/DMA-mapping.txt */int sba_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,		int direction){	struct ioc *ioc;	int coalesced, filled = 0;	unsigned long flags;#ifdef ALLOW_IOV_BYPASS	struct scatterlist *sg;#endif	DBG_RUN_SG("%s() START %d entries\n", __FUNCTION__, nents);	ioc = GET_IOC(dev);	ASSERT(ioc);#ifdef ALLOW_IOV_BYPASS	if (dev->dma_mask >= ioc->dma_mask) {		for (sg = sglist ; filled < nents ; filled++, sg++){			sba_sg_buffer(sg) = sba_sg_iova(sg);			sba_sg_iova(sg) = (char *)virt_to_phys(sba_sg_buffer(sg));		}#ifdef CONFIG_PROC_FS		spin_lock_irqsave(&ioc->res_lock, flags);		ioc->msg_bypass++;		spin_unlock_irqrestore(&ioc->res_lock, flags);#endif		return filled;	}#endif	/* Fast path single entry scatterlists. */	if (nents == 1) {		sba_sg_buffer(sglist) = sba_sg_iova(sglist);		sba_sg_iova(sglist) = (char *)sba_map_single(dev,						sba_sg_buffer(sglist),						sba_sg_len(sglist), direction);#ifdef CONFIG_PROC_FS		/*		** Should probably do some stats counting, but trying to		** be precise quickly starts wasting CPU time.		*/#endif		return 1;	}	spin_lock_irqsave(&ioc->res_lock, flags);#ifdef ASSERT_PDIR_SANITY	if (sba_check_pdir(ioc,"Check before sba_map_sg()"))	{		sba_dump_sg(ioc, sglist, nents);		panic("Check before sba_map_sg()");	}#endif#ifdef CONFIG_PROC_FS	ioc->msg_calls++;#endif	/*	** First coalesce the chunks and allocate I/O pdir space	**	** If this is one DMA stream, we can properly map using the	** correct virtual address associated with each DMA page.	** w/o this association, we wouldn't have coherent DMA!	** Access to the virtual address is what forces a two pass algorithm.	*/	coalesced = sba_coalesce_chunks(ioc, sglist, nents); 	/*	** Program the I/O Pdir	**	** map the virtual addresses to the I/O Pdir	** o dma_address will contain the pdir index	** o dma_len will contain the number of bytes to map 	** o address contains the virtual address.	*/	filled = sba_fill_pdir(ioc, sglist, nents);#ifdef ASSERT_PDIR_SANITY	if (sba_check_pdir(ioc,"Check after sba_map_sg()"))	{		sba_dump_sg(ioc, sglist, nents);		panic("Check after sba_map_sg()\n");	}#endif	spin_unlock_irqrestore(&ioc->res_lock, flags);	ASSERT(coalesced == filled);	DBG_RUN_SG("%s() DONE %d mappings\n", __FUNCTION__, filled);	return filled;}/** * sba_unmap_sg - unmap Scatter/Gather list * @dev: instance of PCI owned by the driver that's asking. * @sglist:  array of buffer/length pairs * @nents:  number of entries in list * @direction:  R/W or both. * * See Documentation/DMA-mapping.txt */void sba_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,		int direction){	struct ioc *ioc;#ifdef ASSERT_PDIR_SANITY	unsigned long flags;#endif	DBG_RUN_SG("%s() START %d entries,  %p,%x\n",		__FUNCTION__, nents, sba_sg_buffer(sglist), sglist->length);	ioc = GET_IOC(dev);	ASSERT(ioc);#ifdef CONFIG_PROC_FS	ioc->usg_calls++;#endif#ifdef ASSERT_PDIR_SANITY	spin_lock_irqsave(&ioc->res_lock, flags);	sba_check_pdir(ioc,"Check before sba_unmap_sg()");	spin_unlock_irqrestore(&ioc->res_lock, flags);#endif	while (sba_sg_len(sglist) && nents--) {		sba_unmap_single(dev, (dma_addr_t)sba_sg_iova(sglist),		                 sba_sg_len(sglist), direction);#ifdef CONFIG_PROC_FS		/*		** This leaves inconsistent data in the stats, but we can't		** tell which sg lists were mapped by map_single and which		** were coalesced to a single entry.  The stats are fun,		** but speed is more important.		*/		ioc->usg_pages += (((u64)sba_sg_iova(sglist) & ~IOVP_MASK) + sba_sg_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT;#endif		++sglist;	}	DBG_RUN_SG("%s() DONE (nents %d)\n", __FUNCTION__,  nents);#ifdef ASSERT_PDIR_SANITY	spin_lock_irqsave(&ioc->res_lock, flags);	sba_check_pdir(ioc,"Check after sba_unmap_sg()");	spin_unlock_irqrestore(&ioc->res_lock, flags);#endif}unsigned longsba_dma_address (struct scatterlist *sg){	return ((unsigned long)sba_sg_iova(sg));}/****************************************************************   Initialization and claim****************************************************************/static voidsba_ioc_init(struct sba_device *sba_dev, struct ioc *ioc, int ioc_num){	u32 iova_space_size, iova_space_mask;	void * pdir_base;	int pdir_size, iov_order, tcnfg;	/*	** Firmware programs the maximum IOV space size into the imask reg	*/	iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1;#ifdef CONFIG_IA64_HP_PROTO	if (!iova_space_size)		iova_space_size = GB(1);#endif	/*	** iov_order is always based on a 1GB IOVA space since we want to	** turn on the other half for AGP GART.	*/	iov_order = get_order(iova_space_size >> (IOVP_SHIFT-PAGE_SHIFT));	ioc->pdir_size = pdir_size = (iova_space_size/IOVP_SIZE) * sizeof(u64);	DBG_INIT("%s() hpa 0x%lx IOV %dMB (%d bits) PDIR size 0x%0x\n",		__FUNCTION__, ioc->ioc_hpa, iova_space_size>>20,		iov_order + PAGE_SHIFT, ioc->pdir_size);	/* FIXME : DMA HINTs not used */	ioc->hint_shift_pdir = iov_order + PAGE_SHIFT;	ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT));	ioc->pdir_base =	pdir_base = (void *) __get_free_pages(GFP_KERNEL, get_order(pdir_size));	if (NULL == pdir_base)	{		panic(__FILE__ ":%s() could not allocate I/O Page Table\n", __FUNCTION__);	}	memset(pdir_base, 0, pdir_size);	DBG_INIT("%s() pdir %p size %x hint_shift_pdir %x hint_mask_pdir %lx\n",		__FUNCTION__, pdir_base, pdir_size,		ioc->hint_shift_pdir, ioc->hint_mask_pdir);	ASSERT((((unsigned long) pdir_base) & PAGE_MASK) == (unsigned long) pdir_base);	WRITE_REG(virt_to_phys(pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -