⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sba_iommu.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 4 页
字号:
 * * See Documentation/DMA-mapping.txt */void *sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags){	struct ioc *ioc;	void *addr;	ioc = GET_IOC(dev);	ASSERT(ioc);#ifdef CONFIG_NUMA	{		struct page *page;		page = alloc_pages_node(ioc->node == MAX_NUMNODES ?		                        numa_node_id() : ioc->node, flags,		                        get_order(size));		if (unlikely(!page))			return NULL;		addr = page_address(page);	}#else	addr = (void *) __get_free_pages(flags, get_order(size));#endif	if (unlikely(!addr))		return NULL;	memset(addr, 0, size);	*dma_handle = virt_to_phys(addr);#ifdef ALLOW_IOV_BYPASS	ASSERT(dev->coherent_dma_mask);	/* 	** Check if the PCI device can DMA to ptr... if so, just return ptr 	*/	if (likely((*dma_handle & ~dev->coherent_dma_mask) == 0)) {		DBG_BYPASS("sba_alloc_coherent() bypass mask/addr: 0x%lx/0x%lx\n",		           dev->coherent_dma_mask, *dma_handle);		return addr;	}#endif	/*	 * If device can't bypass or bypass is disabled, pass the 32bit fake	 * device to map single to get an iova mapping.	 */	*dma_handle = sba_map_single(&ioc->sac_only_dev->dev, addr, size, 0);	return addr;}/** * sba_free_coherent - free/unmap shared mem for DMA * @dev: instance of PCI owned by the driver that's asking. * @size:  number of bytes mapped in driver buffer. * @vaddr:  virtual address IOVA of "consistent" buffer. * @dma_handler:  IO virtual address of "consistent" buffer. * * See Documentation/DMA-mapping.txt */void sba_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle){	sba_unmap_single(dev, dma_handle, size, 0);	free_pages((unsigned long) vaddr, get_order(size));}/*** Since 0 is a valid pdir_base index value, can't use that** to determine if a value is valid or not. Use a flag to indicate** the SG list entry contains a valid pdir index.*/#define PIDE_FLAG 0x1UL#ifdef DEBUG_LARGE_SG_ENTRIESint dump_run_sg = 0;#endif/** * sba_fill_pdir - write allocated SG entries into IO PDIR * @ioc: IO MMU structure which owns the pdir we are interested in. * @startsg:  list of IOVA/size pairs * @nents: number of entries in startsg list * * Take preprocessed SG list and write corresponding entries * in the IO PDIR. */static SBA_INLINE intsba_fill_pdir(	struct ioc *ioc,	struct scatterlist *startsg,	int nents){	struct scatterlist *dma_sg = startsg;	/* pointer to current DMA */	int n_mappings = 0;	u64 *pdirp = NULL;	unsigned long dma_offset = 0;	while (nents-- > 0) {		int     cnt = startsg->dma_length;		startsg->dma_length = 0;#ifdef DEBUG_LARGE_SG_ENTRIES		if (dump_run_sg)			printk(" %2d : %08lx/%05x %p\n",				nents, startsg->dma_address, cnt,				sba_sg_address(startsg));#else		DBG_RUN_SG(" %d : %08lx/%05x %p\n",				nents, startsg->dma_address, cnt,				sba_sg_address(startsg));#endif		/*		** Look for the start of a new DMA stream		*/		if (startsg->dma_address & PIDE_FLAG) {			u32 pide = startsg->dma_address & ~PIDE_FLAG;			dma_offset = (unsigned long) pide & ~iovp_mask;			startsg->dma_address = 0;			if (n_mappings)				dma_sg = sg_next(dma_sg);			dma_sg->dma_address = pide | ioc->ibase;			pdirp = &(ioc->pdir_base[pide >> iovp_shift]);			n_mappings++;		}		/*		** Look for a VCONTIG chunk		*/		if (cnt) {			unsigned long vaddr = (unsigned long) sba_sg_address(startsg);			ASSERT(pdirp);			/* Since multiple Vcontig blocks could make up			** one DMA stream, *add* cnt to dma_len.			*/			dma_sg->dma_length += cnt;			cnt += dma_offset;			dma_offset=0;	/* only want offset on first chunk */			cnt = ROUNDUP(cnt, iovp_size);			do {				sba_io_pdir_entry(pdirp, vaddr);				vaddr += iovp_size;				cnt -= iovp_size;				pdirp++;			} while (cnt > 0);		}		startsg = sg_next(startsg);	}	/* force pdir update */	wmb();#ifdef DEBUG_LARGE_SG_ENTRIES	dump_run_sg = 0;#endif	return(n_mappings);}/*** Two address ranges are DMA contiguous *iff* "end of prev" and** "start of next" are both on an IOV page boundary.**** (shift left is a quick trick to mask off upper bits)*/#define DMA_CONTIG(__X, __Y) \	(((((unsigned long) __X) | ((unsigned long) __Y)) << (BITS_PER_LONG - iovp_shift)) == 0UL)/** * sba_coalesce_chunks - preprocess the SG list * @ioc: IO MMU structure which owns the pdir we are interested in. * @startsg:  list of IOVA/size pairs * @nents: number of entries in startsg list * * First pass is to walk the SG list and determine where the breaks are * in the DMA stream. Allocates PDIR entries but does not fill them. * Returns the number of DMA chunks. * * Doing the fill separate from the coalescing/allocation keeps the * code simpler. Future enhancement could make one pass through * the sglist do both. */static SBA_INLINE intsba_coalesce_chunks( struct ioc *ioc,	struct scatterlist *startsg,	int nents){	struct scatterlist *vcontig_sg;    /* VCONTIG chunk head */	unsigned long vcontig_len;         /* len of VCONTIG chunk */	unsigned long vcontig_end;	struct scatterlist *dma_sg;        /* next DMA stream head */	unsigned long dma_offset, dma_len; /* start/len of DMA stream */	int n_mappings = 0;	while (nents > 0) {		unsigned long vaddr = (unsigned long) sba_sg_address(startsg);		/*		** Prepare for first/next DMA stream		*/		dma_sg = vcontig_sg = startsg;		dma_len = vcontig_len = vcontig_end = startsg->length;		vcontig_end +=  vaddr;		dma_offset = vaddr & ~iovp_mask;		/* PARANOID: clear entries */		startsg->dma_address = startsg->dma_length = 0;		/*		** This loop terminates one iteration "early" since		** it's always looking one "ahead".		*/		while (--nents > 0) {			unsigned long vaddr;	/* tmp */			startsg = sg_next(startsg);			/* PARANOID */			startsg->dma_address = startsg->dma_length = 0;			/* catch brokenness in SCSI layer */			ASSERT(startsg->length <= DMA_CHUNK_SIZE);			/*			** First make sure current dma stream won't			** exceed DMA_CHUNK_SIZE if we coalesce the			** next entry.			*/			if (((dma_len + dma_offset + startsg->length + ~iovp_mask) & iovp_mask)			    > DMA_CHUNK_SIZE)				break;			/*			** Then look for virtually contiguous blocks.			**			** append the next transaction?			*/			vaddr = (unsigned long) sba_sg_address(startsg);			if  (vcontig_end == vaddr)			{				vcontig_len += startsg->length;				vcontig_end += startsg->length;				dma_len     += startsg->length;				continue;			}#ifdef DEBUG_LARGE_SG_ENTRIES			dump_run_sg = (vcontig_len > iovp_size);#endif			/*			** Not virtually contigous.			** Terminate prev chunk.			** Start a new chunk.			**			** Once we start a new VCONTIG chunk, dma_offset			** can't change. And we need the offset from the first			** chunk - not the last one. Ergo Successive chunks			** must start on page boundaries and dove tail			** with it's predecessor.			*/			vcontig_sg->dma_length = vcontig_len;			vcontig_sg = startsg;			vcontig_len = startsg->length;			/*			** 3) do the entries end/start on page boundaries?			**    Don't update vcontig_end until we've checked.			*/			if (DMA_CONTIG(vcontig_end, vaddr))			{				vcontig_end = vcontig_len + vaddr;				dma_len += vcontig_len;				continue;			} else {				break;			}		}		/*		** End of DMA Stream		** Terminate last VCONTIG block.		** Allocate space for DMA stream.		*/		vcontig_sg->dma_length = vcontig_len;		dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask;		ASSERT(dma_len <= DMA_CHUNK_SIZE);		dma_sg->dma_address = (dma_addr_t) (PIDE_FLAG			| (sba_alloc_range(ioc, dma_len) << iovp_shift)			| dma_offset);		n_mappings++;	}	return n_mappings;}/** * sba_map_sg - map Scatter/Gather list * @dev: instance of PCI owned by the driver that's asking. * @sglist:  array of buffer/length pairs * @nents:  number of entries in list * @dir:  R/W or both. * * See Documentation/DMA-mapping.txt */int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int dir){	struct ioc *ioc;	int coalesced, filled = 0;#ifdef ASSERT_PDIR_SANITY	unsigned long flags;#endif#ifdef ALLOW_IOV_BYPASS_SG	struct scatterlist *sg;#endif	DBG_RUN_SG("%s() START %d entries\n", __FUNCTION__, nents);	ioc = GET_IOC(dev);	ASSERT(ioc);#ifdef ALLOW_IOV_BYPASS_SG	ASSERT(to_pci_dev(dev)->dma_mask);	if (likely((ioc->dma_mask & ~to_pci_dev(dev)->dma_mask) == 0)) {		for_each_sg(sglist, sg, nents, filled) {			sg->dma_length = sg->length;			sg->dma_address = virt_to_phys(sba_sg_address(sg));		}		return filled;	}#endif	/* Fast path single entry scatterlists. */	if (nents == 1) {		sglist->dma_length = sglist->length;		sglist->dma_address = sba_map_single(dev, sba_sg_address(sglist), sglist->length, dir);		return 1;	}#ifdef ASSERT_PDIR_SANITY	spin_lock_irqsave(&ioc->res_lock, flags);	if (sba_check_pdir(ioc,"Check before sba_map_sg()"))	{		sba_dump_sg(ioc, sglist, nents);		panic("Check before sba_map_sg()");	}	spin_unlock_irqrestore(&ioc->res_lock, flags);#endif	prefetch(ioc->res_hint);	/*	** First coalesce the chunks and allocate I/O pdir space	**	** If this is one DMA stream, we can properly map using the	** correct virtual address associated with each DMA page.	** w/o this association, we wouldn't have coherent DMA!	** Access to the virtual address is what forces a two pass algorithm.	*/	coalesced = sba_coalesce_chunks(ioc, sglist, nents);	/*	** Program the I/O Pdir	**	** map the virtual addresses to the I/O Pdir	** o dma_address will contain the pdir index	** o dma_len will contain the number of bytes to map	** o address contains the virtual address.	*/	filled = sba_fill_pdir(ioc, sglist, nents);#ifdef ASSERT_PDIR_SANITY	spin_lock_irqsave(&ioc->res_lock, flags);	if (sba_check_pdir(ioc,"Check after sba_map_sg()"))	{		sba_dump_sg(ioc, sglist, nents);		panic("Check after sba_map_sg()\n");	}	spin_unlock_irqrestore(&ioc->res_lock, flags);#endif	ASSERT(coalesced == filled);	DBG_RUN_SG("%s() DONE %d mappings\n", __FUNCTION__, filled);	return filled;}/** * sba_unmap_sg - unmap Scatter/Gather list * @dev: instance of PCI owned by the driver that's asking. * @sglist:  array of buffer/length pairs * @nents:  number of entries in list * @dir:  R/W or both. * * See Documentation/DMA-mapping.txt */void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, int dir){#ifdef ASSERT_PDIR_SANITY	struct ioc *ioc;	unsigned long flags;#endif	DBG_RUN_SG("%s() START %d entries,  %p,%x\n",		__FUNCTION__, nents, sba_sg_address(sglist), sglist->length);#ifdef ASSERT_PDIR_SANITY	ioc = GET_IOC(dev);	ASSERT(ioc);	spin_lock_irqsave(&ioc->res_lock, flags);	sba_check_pdir(ioc,"Check before sba_unmap_sg()");	spin_unlock_irqrestore(&ioc->res_lock, flags);#endif	while (nents && sglist->dma_length) {		sba_unmap_single(dev, sglist->dma_address, sglist->dma_length, dir);		sglist = sg_next(sglist);		nents--;	}	DBG_RUN_SG("%s() DONE (nents %d)\n", __FUNCTION__,  nents);#ifdef ASSERT_PDIR_SANITY	spin_lock_irqsave(&ioc->res_lock, flags);	sba_check_pdir(ioc,"Check after sba_unmap_sg()");	spin_unlock_irqrestore(&ioc->res_lock, flags);#endif}/****************************************************************   Initialization and claim****************************************************************/static void __initioc_iova_init(struct ioc *ioc){	int tcnfg;	int agp_found = 0;	struct pci_dev *device = NULL;#ifdef FULL_VALID_PDIR	unsigned long index;#endif	/*	** Firmware programs the base and size of a "safe IOVA space"	** (one that doesn't overlap memory or LMMIO space) in the	** IBASE and IMASK registers.	*/	ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1UL;	ioc->imask = READ_REG(ioc->ioc_hpa + IOC_IMASK) | 0xFFFFFFFF00000000UL;	ioc->iov_size = ~ioc->imask + 1;	DBG_INIT("%s() hpa %p IOV base 0x%lx mask 0x%lx (%dMB)\n",		__FUNCTION__, ioc->ioc_hpa, ioc->ibase, ioc->imask,		ioc->iov_size >> 20);	switch (iovp_size) {		case  4*1024: tcnfg = 0; break;		case  8*1024: tcnfg = 1; break;		case 16*1024: tcnfg = 2; break;		case 64*1024: tcnfg = 3; break;		default:			panic(PFX "Unsupported IOTLB page size %ldK",				iovp_size >> 10);			break;	}	WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG);	ioc->pdir_size = (ioc->iov_size / iovp_size) * PDIR_ENTRY_SIZE;	ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL,						   get_order(ioc->pdir_size));	if (!ioc->pdir_base)		panic(PFX "Couldn't allocate I/O Page Table\n");	memset(ioc->pdir_base, 0, ioc->pdir_size);	DBG_INIT("%s() IOV page size %ldK pdir %p size %x\n", __FUNCTION__,		iovp_size >> 10, ioc->pdir_base, ioc->pdir_size);	ASSERT(ALIGN((unsigned long) ioc->pdir_base, 4*1024) == (unsigned long) ioc->pdir_base);	WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);	/*	** If an AGP device is present, only use half of the IOV space	** for PCI DMA.  Unfortunately we can't know ahead of time	** whether GART support will actually be used, for now we	** can just key on an AGP device found in the system.	** We program the next pdir index after we stop w/ a key for	** the GART code to handshake on.	*/	for_each_pci_dev(device)			agp_found |= pci_find_capability(device, PCI_CAP_ID_AGP);	if (agp_found && reserve_sba_gart) {		printk(KERN_INFO PFX "reserving %dMb of IOVA space at 0x%lx for agpgart\n",		      ioc->iov_size/2 >> 20, ioc->ibase + ioc->iov_size/2);		ioc->pdir_size /= 2;		((u64 *)ioc->pdir_base)[PDIR_INDEX(ioc->iov_size/2)] = ZX1_SBA_IOMMU_COOKIE;	}#ifdef FULL_VALID_PDIR	/*  	** Check to see if the spill page has been allocated, we don't need more than	** one across multiple SBAs.	*/	if (!prefetch_spill_page) {		char *spill_poison = "SBAIOMMU POISON";		int poison_size = 16;		void *poison_addr, *addr;		addr = (void *)__get_free_pages(GFP_KERNEL, get_order(iovp_size));		if (!addr)			panic(PFX "Couldn't allocate PDIR spill page\n");		poison_addr = addr;		for ( ; (u64) poison_addr < addr + iovp_size; poison_addr += poison_size)			memcpy(poison_addr, spill_poison, poison_size);		prefetch_spill_page = virt_to_phys(addr);		DBG_INIT("%s() prefetch spill addr: 0x%lx\n", __FUNCTION__, prefetch_spill_page);	}	/*  	** Set all the PDIR entries valid w/ the spill page as the target

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -