⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sba_iommu.c

📁 linux-2.4.29操作系统的源码
💻 C
📖 第 1 页 / 共 4 页
字号:
		mask = RESMAP_MASK(bits_wanted) >> bitshiftcnt;		DBG_RES("%s() o %ld %p", __FUNCTION__, o, res_ptr);		while(res_ptr < res_end)		{ 			DBG_RES("    %p %lx %lx\n", res_ptr, mask, *res_ptr);			ASSERT(0 != mask);			if(0 == ((*res_ptr) & mask)) {				*res_ptr |= mask;     /* mark resources busy! */				pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map);				pide <<= 3;	/* convert to bit address */				pide += bitshiftcnt;				break;			}			mask >>= o;			bitshiftcnt += o;			if (0 == mask) {				mask = RESMAP_MASK(bits_wanted);				bitshiftcnt=0;				res_ptr++;			}		}		/* look in the same word on the next pass */		ioc->res_bitshift = bitshiftcnt + bits_wanted;	}	/* wrapped ? */	if (res_end <= res_ptr) {		ioc->res_hint = (unsigned long *) ioc->res_map;		ioc->res_bitshift = 0;	} else {		ioc->res_hint = res_ptr;	}	return (pide);}/** * sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap * @ioc: IO MMU structure which owns the pdir we are interested in. * @size: number of bytes to create a mapping for * * Given a size, find consecutive unmarked and then mark those bits in the * resource bit map. */static intsba_alloc_range(struct ioc *ioc, size_t size){	unsigned int pages_needed = size >> IOVP_SHIFT;#ifdef CONFIG_PROC_FS	unsigned long cr_start = mfctl(16);#endif	unsigned long pide;	ASSERT(pages_needed);	ASSERT((pages_needed * IOVP_SIZE) <= DMA_CHUNK_SIZE);	ASSERT(pages_needed <= BITS_PER_LONG);	ASSERT(0 == (size & ~IOVP_MASK));	/*	** "seek and ye shall find"...praying never hurts either...	** ggg sacrifices another 710 to the computer gods.	*/	pide = sba_search_bitmap(ioc, pages_needed);	if (pide >= (ioc->res_size << 3)) {		pide = sba_search_bitmap(ioc, pages_needed);		if (pide >= (ioc->res_size << 3))			panic(__FILE__ ": I/O MMU @ %lx is out of mapping resources\n", ioc->ioc_hpa);	}#ifdef ASSERT_PDIR_SANITY	/* verify the first enable bit is clear */	if(0x00 != ((u8 *) ioc->pdir_base)[pide*sizeof(u64) + 7]) {		sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide);	}#endif	DBG_RES("%s(%x) %d -> %lx hint %x/%x\n",		__FUNCTION__, size, pages_needed, pide,		(uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),		ioc->res_bitshift );#ifdef CONFIG_PROC_FS	{		unsigned long cr_end = mfctl(16);		unsigned long tmp = cr_end - cr_start;		/* check for roll over */		cr_start = (cr_end < cr_start) ?  -(tmp) : (tmp);	}	ioc->avg_search[ioc->avg_idx++] = cr_start;	ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1;	ioc->used_pages += pages_needed;#endif	return (pide);}/** * sba_free_range - unmark bits in IO PDIR resource bitmap * @ioc: IO MMU structure which owns the pdir we are interested in. * @iova: IO virtual address which was previously allocated. * @size: number of bytes to create a mapping for * * clear bits in the ioc's resource map */static SBA_INLINE voidsba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size){	unsigned long iovp = SBA_IOVP(ioc, iova);	unsigned int pide = PDIR_INDEX(iovp);	unsigned int ridx = pide >> 3;	/* convert bit to byte address */	unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]);	int bits_not_wanted = size >> IOVP_SHIFT;	/* 3-bits "bit" address plus 2 (or 3) bits for "byte" == bit in word */	unsigned long m = RESMAP_MASK(bits_not_wanted) >> (pide & (BITS_PER_LONG - 1));	DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n",		__FUNCTION__, (uint) iova, size,		bits_not_wanted, m, pide, res_ptr, *res_ptr);#ifdef CONFIG_PROC_FS	ioc->used_pages -= bits_not_wanted;#endif	ASSERT(m != 0);	ASSERT(bits_not_wanted);	ASSERT((bits_not_wanted * IOVP_SIZE) <= DMA_CHUNK_SIZE);	ASSERT(bits_not_wanted <= BITS_PER_LONG);	ASSERT((*res_ptr & m) == m); /* verify same bits are set */	*res_ptr &= ~m;}/****************************************************************   "Dynamic DMA Mapping" support (aka "Coherent I/O")****************************************************************/#define SBA_DMA_HINT(ioc, val) ((val) << (ioc)->hint_shift_pdir)typedef unsigned long space_t;#define KERNEL_SPACE 0/** * sba_io_pdir_entry - fill in one IO PDIR entry * @pdir_ptr:  pointer to IO PDIR entry * @sid: process Space ID * @vba: Virtual CPU address of buffer to map * * SBA Mapping Routine * * Given a virtual address (vba, arg2) and space id, (sid, arg1) * sba_io_pdir_entry() loads the I/O PDIR entry pointed to by * pdir_ptr (arg0). Each IO Pdir entry consists of 8 bytes as * shown below (MSB == bit 0): * *  0                    19                                 51   55       63 * +-+---------------------+----------------------------------+----+--------+ * |V|        U            |            PPN[43:12]            | U  |   VI   | * +-+---------------------+----------------------------------+----+--------+ * *  V  == Valid Bit *  U  == Unused * PPN == Physical Page Number * VI  == Virtual Index (aka Coherent Index) * * The physical address fields are filled with the results of the LPA * instruction.  The virtual index field is filled with the results of * of the LCI (Load Coherence Index) instruction.  The 8 bits used for * the virtual index are bits 12:19 of the value returned by LCI. * * We need to pre-swap the bytes since PCX-W is Big Endian. */void SBA_INLINEsba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba){	u64 pa; /* physical address */	register unsigned ci; /* coherent index */	/* We currently only support kernel addresses.	 * fdc instr below will need to reload sr1 with KERNEL_SPACE	 * once we try to support direct DMA to user space.	 */	ASSERT(sid == KERNEL_SPACE);	pa = virt_to_phys(vba);	pa &= ~4095ULL;			/* clear out offset bits */	mtsp(sid,1);	asm("lci 0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba));	pa |= (ci >> 12) & 0xff;  /* move CI (8 bits) into lowest byte */	pa |= 0x8000000000000000ULL;	/* set "valid" bit */	*pdir_ptr = cpu_to_le64(pa);	/* swap and store into I/O Pdir */	/*	 * If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit set	 * (bit #61, big endian), we have to flush and sync every time	 * IO-PDIR is changed in Ike/Astro.	 */	if (ioc_needs_fdc) {		asm volatile("fdc 0(%%sr1,%0)\n\tsync" : : "r" (pdir_ptr));	}}/** * sba_mark_invalid - invalidate one or more IO PDIR entries * @ioc: IO MMU structure which owns the pdir we are interested in. * @iova:  IO Virtual Address mapped earlier * @byte_cnt:  number of bytes this mapping covers. * * Marking the IO PDIR entry(ies) as Invalid and invalidate * corresponding IO TLB entry. The Ike PCOM (Purge Command Register) * is to purge stale entries in the IO TLB when unmapping entries. * * The PCOM register supports purging of multiple pages, with a minium * of 1 page and a maximum of 2GB. Hardware requires the address be * aligned to the size of the range being purged. The size of the range * must be a power of 2. The "Cool perf optimization" in the * allocation routine helps keep that true. */static SBA_INLINE voidsba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt){	u32 iovp = (u32) SBA_IOVP(ioc,iova);	/* Even though this is a big-endian machine, the entries	** in the iopdir are little endian. That's why we clear the byte	** at +7 instead of at +0.	*/	int off = PDIR_INDEX(iovp)*sizeof(u64)+7;	/* Must be non-zero and rounded up */	ASSERT(byte_cnt > 0);	ASSERT(0 == (byte_cnt & ~IOVP_MASK));#ifdef ASSERT_PDIR_SANITY	/* Assert first pdir entry is set */	if (0x80 != (((u8 *) ioc->pdir_base)[off])) {		sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp));	}#endif	if (byte_cnt <= IOVP_SIZE)	{		ASSERT( off < ioc->pdir_size);		iovp |= IOVP_SHIFT;     /* set "size" field for PCOM */		/*		** clear I/O PDIR entry "valid" bit		** Do NOT clear the rest - save it for debugging.		** We should only clear bits that have previously		** been enabled.		*/		((u8 *)(ioc->pdir_base))[off] = 0;	} else {		u32 t = get_order(byte_cnt) + PAGE_SHIFT;		iovp |= t;		ASSERT(t <= 31);   /* 2GB! Max value of "size" field */		do {			/* verify this pdir entry is enabled */			ASSERT(0x80 == (((u8 *) ioc->pdir_base)[off] & 0x80));			/* clear I/O Pdir entry "valid" bit first */			((u8 *)(ioc->pdir_base))[off] = 0;			off += sizeof(u64);			byte_cnt -= IOVP_SIZE;		} while (byte_cnt > 0);	}	WRITE_REG(iovp, ioc->ioc_hpa+IOC_PCOM);}/** * sba_dma_supported - PCI driver can query DMA support * @dev: instance of PCI owned by the driver that's asking * @mask:  number of address bits this PCI device can handle * * See Documentation/DMA-mapping.txt */static intsba_dma_supported( struct pci_dev *dev, u64 mask){	if (dev == NULL) {		printk(KERN_ERR MODULE_NAME ": EISA/ISA/et al not supported\n");		BUG();		return(0);	}	/* only support 32-bit PCI devices - no DAC support (yet) */	return((int) (mask == 0xffffffffUL));}/** * sba_map_single - map one buffer and return IOVA for DMA * @dev: instance of PCI owned by the driver that's asking. * @addr:  driver buffer to map. * @size:  number of bytes to map in driver buffer. * @direction:  R/W or both. * * See Documentation/DMA-mapping.txt */static dma_addr_tsba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction){	struct ioc *ioc;	unsigned long flags; 	dma_addr_t iovp;	dma_addr_t offset;	u64 *pdir_start;	int pide;	ASSERT(size > 0);	ASSERT(size <= DMA_CHUNK_SIZE);	ASSERT(dev->sysdata);	ioc = GET_IOC(dev);	ASSERT(ioc);	/* save offset bits */	offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK;	/* round up to nearest IOVP_SIZE */	size = (size + offset + ~IOVP_MASK) & IOVP_MASK;	spin_lock_irqsave(&ioc->res_lock, flags);#ifdef ASSERT_PDIR_SANITY	sba_check_pdir(ioc,"Check before sba_map_single()");#endif#ifdef CONFIG_PROC_FS	ioc->msingle_calls++;	ioc->msingle_pages += size >> IOVP_SHIFT;#endif	pide = sba_alloc_range(ioc, size);	iovp = (dma_addr_t) pide << IOVP_SHIFT;	DBG_RUN("%s() 0x%p -> 0x%lx",		__FUNCTION__, addr, (long) iovp | offset);	pdir_start = &(ioc->pdir_base[pide]);	while (size > 0) {		ASSERT(((u8 *)pdir_start)[7] == 0); /* verify availability */		sba_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long) addr);		DBG_RUN(" pdir 0x%p %02x%02x%02x%02x%02x%02x%02x%02x\n",			pdir_start,			(u8) (((u8 *) pdir_start)[7]),			(u8) (((u8 *) pdir_start)[6]),			(u8) (((u8 *) pdir_start)[5]),			(u8) (((u8 *) pdir_start)[4]),			(u8) (((u8 *) pdir_start)[3]),			(u8) (((u8 *) pdir_start)[2]),			(u8) (((u8 *) pdir_start)[1]),			(u8) (((u8 *) pdir_start)[0])			);		addr += IOVP_SIZE;		size -= IOVP_SIZE;		pdir_start++;	}	/* form complete address */#ifdef ASSERT_PDIR_SANITY	sba_check_pdir(ioc,"Check after sba_map_single()");#endif	spin_unlock_irqrestore(&ioc->res_lock, flags);	return SBA_IOVA(ioc, iovp, offset, DEFAULT_DMA_HINT_REG);}/** * sba_unmap_single - unmap one IOVA and free resources * @dev: instance of PCI owned by the driver that's asking. * @iova:  IOVA of driver buffer previously mapped. * @size:  number of bytes mapped in driver buffer. * @direction:  R/W or both. * * See Documentation/DMA-mapping.txt */static voidsba_unmap_single(struct pci_dev *dev, dma_addr_t iova, size_t size, int direction){	struct ioc *ioc;#if DELAYED_RESOURCE_CNT > 0	struct sba_dma_pair *d;#endif	unsigned long flags; 	dma_addr_t offset;	ASSERT(dev->sysdata);	ioc = GET_IOC(dev);	ASSERT(ioc);	offset = iova & ~IOVP_MASK;	DBG_RUN("%s() iovp 0x%lx/%x\n",		__FUNCTION__, (long) iova, size);	iova ^= offset;        /* clear offset bits */	size += offset;	size = ROUNDUP(size, IOVP_SIZE);	spin_lock_irqsave(&ioc->res_lock, flags);#ifdef CONFIG_PROC_FS	ioc->usingle_calls++;	ioc->usingle_pages += size >> IOVP_SHIFT;#endif#if DELAYED_RESOURCE_CNT > 0	d = &(ioc->saved[ioc->saved_cnt]);	d->iova = iova;	d->size = size;	if (++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT) {		int cnt = ioc->saved_cnt;		while (cnt--) {			sba_mark_invalid(ioc, d->iova, d->size);			sba_free_range(ioc, d->iova, d->size);			d--;		}		ioc->saved_cnt = 0;		READ_REG(ioc->ioc_hpa+IOC_PCOM);	/* flush purges */	}#else /* DELAYED_RESOURCE_CNT == 0 */	sba_mark_invalid(ioc, iova, size);	sba_free_range(ioc, iova, size);	READ_REG(ioc->ioc_hpa+IOC_PCOM);	/* flush purges */#endif /* DELAYED_RESOURCE_CNT == 0 */	spin_unlock_irqrestore(&ioc->res_lock, flags);	/* XXX REVISIT for 2.5 Linux - need syncdma for zero-copy support.	** For Astro based systems this isn't a big deal WRT performance.	** As long as 2.4 kernels copyin/copyout data from/to userspace,	** we don't need the syncdma. The issue here is I/O MMU cachelines	** are *not* coherent in all cases.  May be hwrev dependent.	** Need to investigate more.	asm volatile("syncdma");		*/}/** * sba_alloc_consistent - allocate/map shared mem for DMA * @hwdev: instance of PCI owned by the driver that's asking. * @size:  number of bytes mapped in driver buffer. * @dma_handle:  IOVA of new buffer. * * See Documentation/DMA-mapping.txt */static void *sba_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle){	void *ret;	if (!hwdev) {		/* only support PCI */		*dma_handle = 0;		return 0;	}        ret = (void *) __get_free_pages(GFP_ATOMIC, get_order(size));	if (ret) {		memset(ret, 0, size);		*dma_handle = sba_map_single(hwdev, ret, size, 0);	}	return ret;}/** * sba_free_consistent - free/unmap shared mem for DMA * @hwdev: instance of PCI owned by the driver that's asking. * @size:  number of bytes mapped in driver buffer. * @vaddr:  virtual address IOVA of "consistent" buffer. * @dma_handler:  IO virtual address of "consistent" buffer. * * See Documentation/DMA-mapping.txt */static voidsba_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle){	sba_unmap_single(hwdev, dma_handle, size, 0);	free_pages((unsigned long) vaddr, get_order(size));}/*** Since 0 is a valid pdir_base index value, can't use that** to determine if a value is valid or not. Use a flag to indicate** the SG list entry contains a valid pdir index.*/#define PIDE_FLAG 0x80000000UL#ifdef DEBUG_LARGE_SG_ENTRIESint dump_run_sg = 0;#endif

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -