⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sba_iommu.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 4 页
字号:
 * Pluto is basically identical, supports fewer physical address bits: * *  0                       23                              51   55       63 * +-+------------------------+-------------------------------+----+--------+ * |V|        U               |         PPN[39:12]            | U  |   VI   | * +-+------------------------+-------------------------------+----+--------+ * *  V  == Valid Bit  (Most Significant Bit is bit 0) *  U  == Unused * PPN == Physical Page Number * VI  == Virtual Index (aka Coherent Index) * * LPA instruction output is put into PPN field. * LCI (Load Coherence Index) instruction provides the "VI" bits. * * We pre-swap the bytes since PCX-W is Big Endian and the * IOMMU uses little endian for the pdir. */void SBA_INLINEsba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,		  unsigned long hint){	u64 pa; /* physical address */	register unsigned ci; /* coherent index */	pa = virt_to_phys(vba);	pa &= IOVP_MASK;	mtsp(sid,1);	asm("lci 0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba));	pa |= (ci >> 12) & 0xff;  /* move CI (8 bits) into lowest byte */	pa |= SBA_PDIR_VALID_BIT;	/* set "valid" bit */	*pdir_ptr = cpu_to_le64(pa);	/* swap and store into I/O Pdir */	/*	 * If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit set	 * (bit #61, big endian), we have to flush and sync every time	 * IO-PDIR is changed in Ike/Astro.	 */	if (ioc_needs_fdc)		asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr));}/** * sba_mark_invalid - invalidate one or more IO PDIR entries * @ioc: IO MMU structure which owns the pdir we are interested in. * @iova:  IO Virtual Address mapped earlier * @byte_cnt:  number of bytes this mapping covers. * * Marking the IO PDIR entry(ies) as Invalid and invalidate * corresponding IO TLB entry. The Ike PCOM (Purge Command Register) * is to purge stale entries in the IO TLB when unmapping entries. * * The PCOM register supports purging of multiple pages, with a minium * of 1 page and a maximum of 2GB. Hardware requires the address be * aligned to the size of the range being purged. The size of the range * must be a power of 2. The "Cool perf optimization" in the * allocation routine helps keep that true. */static SBA_INLINE voidsba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt){	u32 iovp = (u32) SBA_IOVP(ioc,iova);	u64 *pdir_ptr = &ioc->pdir_base[PDIR_INDEX(iovp)];#ifdef ASSERT_PDIR_SANITY	/* Assert first pdir entry is set.	**	** Even though this is a big-endian machine, the entries	** in the iopdir are little endian. That's why we look at	** the byte at +7 instead of at +0.	*/	if (0x80 != (((u8 *) pdir_ptr)[7])) {		sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp));	}#endif	if (byte_cnt > IOVP_SIZE)	{#if 0		unsigned long entries_per_cacheline = ioc_needs_fdc ?				L1_CACHE_ALIGN(((unsigned long) pdir_ptr))					- (unsigned long) pdir_ptr;				: 262144;#endif		/* set "size" field for PCOM */		iovp |= get_order(byte_cnt) + PAGE_SHIFT;		do {			/* clear I/O Pdir entry "valid" bit first */			((u8 *) pdir_ptr)[7] = 0;			if (ioc_needs_fdc) {				asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr));#if 0				entries_per_cacheline = L1_CACHE_SHIFT - 3;#endif			}			pdir_ptr++;			byte_cnt -= IOVP_SIZE;		} while (byte_cnt > IOVP_SIZE);	} else		iovp |= IOVP_SHIFT;     /* set "size" field for PCOM */	/*	** clear I/O PDIR entry "valid" bit.	** We have to R/M/W the cacheline regardless how much of the	** pdir entry that we clobber.	** The rest of the entry would be useful for debugging if we	** could dump core on HPMC.	*/	((u8 *) pdir_ptr)[7] = 0;	if (ioc_needs_fdc)		asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr));	WRITE_REG( SBA_IOVA(ioc, iovp, 0, 0), ioc->ioc_hpa+IOC_PCOM);}/** * sba_dma_supported - PCI driver can query DMA support * @dev: instance of PCI owned by the driver that's asking * @mask:  number of address bits this PCI device can handle * * See Documentation/DMA-mapping.txt */static int sba_dma_supported( struct device *dev, u64 mask){	struct ioc *ioc;	if (dev == NULL) {		printk(KERN_ERR MODULE_NAME ": EISA/ISA/et al not supported\n");		BUG();		return(0);	}	/* Documentation/DMA-mapping.txt tells drivers to try 64-bit first,	 * then fall back to 32-bit if that fails.	 * We are just "encouraging" 32-bit DMA masks here since we can	 * never allow IOMMU bypass unless we add special support for ZX1.	 */	if (mask > ~0U)		return 0;	ioc = GET_IOC(dev);	/*	 * check if mask is >= than the current max IO Virt Address	 * The max IO Virt address will *always* < 30 bits.	 */	return((int)(mask >= (ioc->ibase - 1 +			(ioc->pdir_size / sizeof(u64) * IOVP_SIZE) )));}/** * sba_map_single - map one buffer and return IOVA for DMA * @dev: instance of PCI owned by the driver that's asking. * @addr:  driver buffer to map. * @size:  number of bytes to map in driver buffer. * @direction:  R/W or both. * * See Documentation/DMA-mapping.txt */static dma_addr_tsba_map_single(struct device *dev, void *addr, size_t size,	       enum dma_data_direction direction){	struct ioc *ioc;	unsigned long flags; 	dma_addr_t iovp;	dma_addr_t offset;	u64 *pdir_start;	int pide;	ioc = GET_IOC(dev);	/* save offset bits */	offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK;	/* round up to nearest IOVP_SIZE */	size = (size + offset + ~IOVP_MASK) & IOVP_MASK;	spin_lock_irqsave(&ioc->res_lock, flags);#ifdef ASSERT_PDIR_SANITY	sba_check_pdir(ioc,"Check before sba_map_single()");#endif#ifdef SBA_COLLECT_STATS	ioc->msingle_calls++;	ioc->msingle_pages += size >> IOVP_SHIFT;#endif	pide = sba_alloc_range(ioc, size);	iovp = (dma_addr_t) pide << IOVP_SHIFT;	DBG_RUN("%s() 0x%p -> 0x%lx\n",		__FUNCTION__, addr, (long) iovp | offset);	pdir_start = &(ioc->pdir_base[pide]);	while (size > 0) {		sba_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long) addr, 0);		DBG_RUN("	pdir 0x%p %02x%02x%02x%02x%02x%02x%02x%02x\n",			pdir_start,			(u8) (((u8 *) pdir_start)[7]),			(u8) (((u8 *) pdir_start)[6]),			(u8) (((u8 *) pdir_start)[5]),			(u8) (((u8 *) pdir_start)[4]),			(u8) (((u8 *) pdir_start)[3]),			(u8) (((u8 *) pdir_start)[2]),			(u8) (((u8 *) pdir_start)[1]),			(u8) (((u8 *) pdir_start)[0])			);		addr += IOVP_SIZE;		size -= IOVP_SIZE;		pdir_start++;	}	/* force FDC ops in io_pdir_entry() to be visible to IOMMU */	if (ioc_needs_fdc)		asm volatile("sync" : : );#ifdef ASSERT_PDIR_SANITY	sba_check_pdir(ioc,"Check after sba_map_single()");#endif	spin_unlock_irqrestore(&ioc->res_lock, flags);	/* form complete address */	return SBA_IOVA(ioc, iovp, offset, DEFAULT_DMA_HINT_REG);}/** * sba_unmap_single - unmap one IOVA and free resources * @dev: instance of PCI owned by the driver that's asking. * @iova:  IOVA of driver buffer previously mapped. * @size:  number of bytes mapped in driver buffer. * @direction:  R/W or both. * * See Documentation/DMA-mapping.txt */static voidsba_unmap_single(struct device *dev, dma_addr_t iova, size_t size,		 enum dma_data_direction direction){	struct ioc *ioc;#if DELAYED_RESOURCE_CNT > 0	struct sba_dma_pair *d;#endif	unsigned long flags; 	dma_addr_t offset;	DBG_RUN("%s() iovp 0x%lx/%x\n", __FUNCTION__, (long) iova, size);	ioc = GET_IOC(dev);	offset = iova & ~IOVP_MASK;	iova ^= offset;        /* clear offset bits */	size += offset;	size = ALIGN(size, IOVP_SIZE);	spin_lock_irqsave(&ioc->res_lock, flags);#ifdef SBA_COLLECT_STATS	ioc->usingle_calls++;	ioc->usingle_pages += size >> IOVP_SHIFT;#endif	sba_mark_invalid(ioc, iova, size);#if DELAYED_RESOURCE_CNT > 0	/* Delaying when we re-use a IO Pdir entry reduces the number	 * of MMIO reads needed to flush writes to the PCOM register.	 */	d = &(ioc->saved[ioc->saved_cnt]);	d->iova = iova;	d->size = size;	if (++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT) {		int cnt = ioc->saved_cnt;		while (cnt--) {			sba_free_range(ioc, d->iova, d->size);			d--;		}		ioc->saved_cnt = 0;		READ_REG(ioc->ioc_hpa+IOC_PCOM);	/* flush purges */	}#else /* DELAYED_RESOURCE_CNT == 0 */	sba_free_range(ioc, iova, size);	/* If fdc's were issued, force fdc's to be visible now */	if (ioc_needs_fdc)		asm volatile("sync" : : );	READ_REG(ioc->ioc_hpa+IOC_PCOM);	/* flush purges */#endif /* DELAYED_RESOURCE_CNT == 0 */	spin_unlock_irqrestore(&ioc->res_lock, flags);	/* XXX REVISIT for 2.5 Linux - need syncdma for zero-copy support.	** For Astro based systems this isn't a big deal WRT performance.	** As long as 2.4 kernels copyin/copyout data from/to userspace,	** we don't need the syncdma. The issue here is I/O MMU cachelines	** are *not* coherent in all cases.  May be hwrev dependent.	** Need to investigate more.	asm volatile("syncdma");		*/}/** * sba_alloc_consistent - allocate/map shared mem for DMA * @hwdev: instance of PCI owned by the driver that's asking. * @size:  number of bytes mapped in driver buffer. * @dma_handle:  IOVA of new buffer. * * See Documentation/DMA-mapping.txt */static void *sba_alloc_consistent(struct device *hwdev, size_t size,					dma_addr_t *dma_handle, gfp_t gfp){	void *ret;	if (!hwdev) {		/* only support PCI */		*dma_handle = 0;		return NULL;	}        ret = (void *) __get_free_pages(gfp, get_order(size));	if (ret) {		memset(ret, 0, size);		*dma_handle = sba_map_single(hwdev, ret, size, 0);	}	return ret;}/** * sba_free_consistent - free/unmap shared mem for DMA * @hwdev: instance of PCI owned by the driver that's asking. * @size:  number of bytes mapped in driver buffer. * @vaddr:  virtual address IOVA of "consistent" buffer. * @dma_handler:  IO virtual address of "consistent" buffer. * * See Documentation/DMA-mapping.txt */static voidsba_free_consistent(struct device *hwdev, size_t size, void *vaddr,		    dma_addr_t dma_handle){	sba_unmap_single(hwdev, dma_handle, size, 0);	free_pages((unsigned long) vaddr, get_order(size));}/*** Since 0 is a valid pdir_base index value, can't use that** to determine if a value is valid or not. Use a flag to indicate** the SG list entry contains a valid pdir index.*/#define PIDE_FLAG 0x80000000UL#ifdef SBA_COLLECT_STATS#define IOMMU_MAP_STATS#endif#include "iommu-helpers.h"#ifdef DEBUG_LARGE_SG_ENTRIESint dump_run_sg = 0;#endif/** * sba_map_sg - map Scatter/Gather list * @dev: instance of PCI owned by the driver that's asking. * @sglist:  array of buffer/length pairs * @nents:  number of entries in list * @direction:  R/W or both. * * See Documentation/DMA-mapping.txt */static intsba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,	   enum dma_data_direction direction){	struct ioc *ioc;	int coalesced, filled = 0;	unsigned long flags;	DBG_RUN_SG("%s() START %d entries\n", __FUNCTION__, nents);	ioc = GET_IOC(dev);	/* Fast path single entry scatterlists. */	if (nents == 1) {		sg_dma_address(sglist) = sba_map_single(dev,						(void *)sg_virt_addr(sglist),						sglist->length, direction);		sg_dma_len(sglist)     = sglist->length;		return 1;	}	spin_lock_irqsave(&ioc->res_lock, flags);#ifdef ASSERT_PDIR_SANITY	if (sba_check_pdir(ioc,"Check before sba_map_sg()"))	{		sba_dump_sg(ioc, sglist, nents);		panic("Check before sba_map_sg()");	}#endif#ifdef SBA_COLLECT_STATS	ioc->msg_calls++;#endif	/*	** First coalesce the chunks and allocate I/O pdir space	**	** If this is one DMA stream, we can properly map using the	** correct virtual address associated with each DMA page.	** w/o this association, we wouldn't have coherent DMA!	** Access to the virtual address is what forces a two pass algorithm.	*/	coalesced = iommu_coalesce_chunks(ioc, sglist, nents, sba_alloc_range);	/*	** Program the I/O Pdir	**	** map the virtual addresses to the I/O Pdir	** o dma_address will contain the pdir index	** o dma_len will contain the number of bytes to map 	** o address contains the virtual address.	*/	filled = iommu_fill_pdir(ioc, sglist, nents, 0, sba_io_pdir_entry);	/* force FDC ops in io_pdir_entry() to be visible to IOMMU */	if (ioc_needs_fdc)		asm volatile("sync" : : );#ifdef ASSERT_PDIR_SANITY	if (sba_check_pdir(ioc,"Check after sba_map_sg()"))	{		sba_dump_sg(ioc, sglist, nents);		panic("Check after sba_map_sg()\n");	}#endif	spin_unlock_irqrestore(&ioc->res_lock, flags);	DBG_RUN_SG("%s() DONE %d mappings\n", __FUNCTION__, filled);	return filled;}/** * sba_unmap_sg - unmap Scatter/Gather list * @dev: instance of PCI owned by the driver that's asking. * @sglist:  array of buffer/length pairs * @nents:  number of entries in list * @direction:  R/W or both. * * See Documentation/DMA-mapping.txt */static void sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,	     enum dma_data_direction direction){	struct ioc *ioc;#ifdef ASSERT_PDIR_SANITY	unsigned long flags;#endif	DBG_RUN_SG("%s() START %d entries,  %p,%x\n",		__FUNCTION__, nents, sg_virt_addr(sglist), sglist->length);	ioc = GET_IOC(dev);#ifdef SBA_COLLECT_STATS	ioc->usg_calls++;#endif#ifdef ASSERT_PDIR_SANITY	spin_lock_irqsave(&ioc->res_lock, flags);	sba_check_pdir(ioc,"Check before sba_unmap_sg()");	spin_unlock_irqrestore(&ioc->res_lock, flags);#endif	while (sg_dma_len(sglist) && nents--) {		sba_unmap_single(dev, sg_dma_address(sglist), sg_dma_len(sglist), direction);#ifdef SBA_COLLECT_STATS		ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT;		ioc->usingle_calls--;	/* kluge since call is unmap_sg() */#endif		++sglist;	}	DBG_RUN_SG("%s() DONE (nents %d)\n", __FUNCTION__,  nents);#ifdef ASSERT_PDIR_SANITY	spin_lock_irqsave(&ioc->res_lock, flags);	sba_check_pdir(ioc,"Check after sba_unmap_sg()");	spin_unlock_irqrestore(&ioc->res_lock, flags);#endif}static struct hppa_dma_ops sba_ops = {	.dma_supported =	sba_dma_supported,	.alloc_consistent =	sba_alloc_consistent,

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -