📄 sba_iommu.c
字号:
startsg++; }}#endif /* ASSERT_PDIR_SANITY *//*** One time initialization to let the world know the LBA was found.** This is the only routine which is NOT static.** Must be called exactly once before pci_init().*/void __initsba_init(void){ sba_list = (struct sba_device *) NULL; sba_count = 0;#ifdef DEBUG_SBA_INIT sba_dump_ranges((char *) 0xFED00000L);#endif register_driver(sba_drivers_for);}/**************************************************************** I/O Pdir Resource Management** Bits set in the resource map are in use.* Each bit can represent a number of pages.* LSbs represent lower addresses (IOVA's).****************************************************************/#define PAGES_PER_RANGE 1 /* could increase this to 4 or 8 if needed *//* Convert from IOVP to IOVA and vice versa. */#define SBA_IOVA(ioc,iovp,offset,hint_reg) ((iovp) | (offset) | ((hint_reg)<<(ioc->hint_shift_pdir)))#define SBA_IOVP(ioc,iova) ((iova) & ioc->hint_mask_pdir)/* FIXME : review these macros to verify correctness and usage */#define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT)#define MKIOVP(dma_hint,pide) (dma_addr_t)((long)(dma_hint) | ((long)(pide) << IOVP_SHIFT))#define MKIOVA(iovp,offset) (dma_addr_t)((long)iovp | (long)offset)#define RESMAP_MASK(n) (~0UL << (BITS_PER_LONG - (n)))#define RESMAP_IDX_MASK (sizeof(unsigned long) - 1)/*** Perf optimizations:** o search for log2(size) bits at a time.**** Search should use register width as "stride" to search the res_map. */static SBA_INLINE unsigned longsba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted){ unsigned long *res_ptr = ioc->res_hint; unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]); unsigned long pide = ~0UL; ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0); ASSERT(res_ptr < res_end); if (bits_wanted > (BITS_PER_LONG/2)) { /* Search word at a time - no mask needed */ for(; res_ptr < res_end; ++res_ptr) { if (*res_ptr == 0) { *res_ptr = RESMAP_MASK(bits_wanted); pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map); pide <<= 3; /* convert to bit address */ ASSERT(0 != pide); break; } } /* point to the next word on next pass */ res_ptr++; ioc->res_bitshift = 0; } else { /* ** Search the resource bit map on well-aligned values. ** "o" is the alignment. ** We need the alignment to invalidate I/O TLB using ** SBA HW features in the unmap path. */ unsigned long o = 1 << get_order(bits_wanted << PAGE_SHIFT); uint bitshiftcnt = ROUNDUP(ioc->res_bitshift, o); unsigned long mask; if (bitshiftcnt >= BITS_PER_LONG) { bitshiftcnt = 0; res_ptr++; } mask = RESMAP_MASK(bits_wanted) >> bitshiftcnt; DBG_RES("sba_search_bitmap() o %ld %p", o, res_ptr); while(res_ptr < res_end) { DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr); ASSERT(0 != mask); if(0 == ((*res_ptr) & mask)) { *res_ptr |= mask; /* mark resources busy! */ pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map); pide <<= 3; /* convert to bit address */ pide += bitshiftcnt; ASSERT(0 != pide); break; } mask >>= o; bitshiftcnt += o; if (0 == mask) { mask = RESMAP_MASK(bits_wanted); bitshiftcnt=0; res_ptr++; } } /* look in the same word on the next pass */ ioc->res_bitshift = bitshiftcnt + bits_wanted; } /* wrapped ? */ ioc->res_hint = (res_end == res_ptr) ? (unsigned long *) ioc->res_map : res_ptr; return (pide);}static intsba_alloc_range(struct ioc *ioc, size_t size){ unsigned int pages_needed = size >> IOVP_SHIFT;#ifdef CONFIG_PROC_FS unsigned long cr_start = mfctl(16);#endif unsigned long pide; ASSERT(pages_needed); ASSERT((pages_needed * IOVP_SIZE) < DMA_CHUNK_SIZE); ASSERT(pages_needed < BITS_PER_LONG); ASSERT(0 == (size & ~IOVP_MASK)); /* ** "seek and ye shall find"...praying never hurts either... ** ggg sacrifices another 710 to the computer gods. */ pide = sba_search_bitmap(ioc, pages_needed); if (pide >= (ioc->res_size << 3)) { pide = sba_search_bitmap(ioc, pages_needed); if (pide >= (ioc->res_size << 3)) panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n", ioc->ioc_hpa); }#ifdef ASSERT_PDIR_SANITY /* verify the first enable bit is clear */ if(0x00 != ((u8 *) ioc->pdir_base)[pide*sizeof(u64) + 7]) { sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide); }#endif DBG_RES("sba_alloc_range(%x) %d -> %lx hint %x/%x\n", size, pages_needed, pide, (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map), ioc->res_bitshift );#ifdef CONFIG_PROC_FS { unsigned long cr_end = mfctl(16); unsigned long tmp = cr_end - cr_start; /* check for roll over */ cr_start = (cr_end < cr_start) ? -(tmp) : (tmp); } ioc->avg_search[ioc->avg_idx++] = cr_start; ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1; ioc->used_pages += pages_needed;#endif return (pide);}/*** clear bits in the ioc's resource map*/static SBA_INLINE voidsba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size){ unsigned long iovp = SBA_IOVP(ioc, iova); unsigned int pide = PDIR_INDEX(iovp); unsigned int ridx = pide >> 3; /* convert bit to byte address */ unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]); int bits_not_wanted = size >> IOVP_SHIFT; /* 3-bits "bit" address plus 2 (or 3) bits for "byte" == bit in word */ unsigned long m = RESMAP_MASK(bits_not_wanted) >> (pide & (BITS_PER_LONG - 1)); DBG_RES("sba_free_range( ,%x,%x) %x/%lx %x %p %lx\n", (uint) iova, size, bits_not_wanted, m, pide, res_ptr, *res_ptr);#ifdef CONFIG_PROC_FS ioc->used_pages -= bits_not_wanted;#endif ASSERT(m != 0); ASSERT(bits_not_wanted); ASSERT((bits_not_wanted * IOVP_SIZE) < DMA_CHUNK_SIZE); ASSERT(bits_not_wanted < BITS_PER_LONG); ASSERT((*res_ptr & m) == m); /* verify same bits are set */ *res_ptr &= ~m;}/**************************************************************** "Dynamic DMA Mapping" support (aka "Coherent I/O")****************************************************************/#define SBA_DMA_HINT(ioc, val) ((val) << (ioc)->hint_shift_pdir)typedef unsigned long space_t;#define KERNEL_SPACE 0/** SBA Mapping Routine** Given a virtual address (vba, arg2) and space id, (sid, arg1)* sba_io_pdir_entry() loads the I/O PDIR entry pointed to by* pdir_ptr (arg0). Each IO Pdir entry consists of 8 bytes as* shown below (MSB == bit 0):** 0 19 51 55 63* +-+---------------------+----------------------------------+----+--------+* |V| U | PPN[43:12] | U | VI |* +-+---------------------+----------------------------------+----+--------+** V == Valid Bit* U == Unused* PPN == Physical Page Number* VI == Virtual Index (aka Coherent Index)** The physical address fields are filled with the results of the LPA* instruction. The virtual index field is filled with the results of* of the LCI (Load Coherence Index) instruction. The 8 bits used for* the virtual index are bits 12:19 of the value returned by LCI.** We need to pre-swap the bytes since PCX-W is Big Endian.*/void SBA_INLINEsba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba){ u64 pa; /* physical address */ register unsigned ci; /* coherent index */ /* We currently only support kernel addresses */ ASSERT(sid == 0); ASSERT(((unsigned long) vba & 0xc0000000UL) == 0xc0000000UL); pa = virt_to_phys(vba); pa &= ~4095ULL; /* clear out offset bits */ mtsp(sid,1); asm("lci 0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba)); pa |= (ci >> 12) & 0xff; /* move CI (8 bits) into lowest byte */ pa |= 0x8000000000000000ULL; /* set "valid" bit */ *pdir_ptr = cpu_to_le64(pa); /* swap and store into I/O Pdir */}/*********************************************************** * The Ike PCOM (Purge Command Register) is to purge * stale entries in the IO TLB when unmapping entries. * * The PCOM register supports purging of multiple pages, with a minium * of 1 page and a maximum of 2GB. Hardware requires the address be * aligned to the size of the range being purged. The size of the range * must be a power of 2. ***********************************************************/static SBA_INLINE voidsba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt){ u32 iovp = (u32) SBA_IOVP(ioc,iova); /* Even though this is a big-endian machine, the entries ** in the iopdir are swapped. That's why we clear the byte ** at +7 instead of at +0. */ int off = PDIR_INDEX(iovp)*sizeof(u64)+7; /* Must be non-zero and rounded up */ ASSERT(byte_cnt > 0); ASSERT(0 == (byte_cnt & ~IOVP_MASK));#ifdef ASSERT_PDIR_SANITY /* Assert first pdir entry is set */ if (0x80 != (((u8 *) ioc->pdir_base)[off])) { sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp)); }#endif if (byte_cnt <= IOVP_SIZE) { ASSERT( off < ioc->pdir_size); iovp |= IOVP_SHIFT; /* set "size" field for PCOM */ /* ** clear I/O PDIR entry "valid" bit ** Do NOT clear the rest - save it for debugging. ** We should only clear bits that have previously ** been enabled. */ ((u8 *)(ioc->pdir_base))[off] = 0; } else { u32 t = get_order(byte_cnt) + PAGE_SHIFT; iovp |= t; ASSERT(t <= 31); /* 2GB! Max value of "size" field */ do { /* verify this pdir entry is enabled */ ASSERT(0x80 == (((u8 *) ioc->pdir_base)[off] & 0x80)); /* clear I/O Pdir entry "valid" bit first */ ((u8 *)(ioc->pdir_base))[off] = 0; off += sizeof(u64); byte_cnt -= IOVP_SIZE; } while (byte_cnt > 0); } WRITE_REG32(iovp, ioc->ioc_hpa+IOC_PCOM);}static intsba_dma_supported( struct pci_dev *dev, u64 mask){ if (dev == NULL) { printk(MODULE_NAME ": EISA/ISA/et al not supported\n"); BUG(); return(0); } dev->dma_mask = mask; /* save it */ /* only support PCI devices */ return((int) (mask >= 0xffffffff));}/*** map_single returns a fully formed IOVA*/static dma_addr_tsba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction){ struct ioc *ioc = &sba_list->ioc[0]; /* FIXME : see Multi-IOC below */ unsigned long flags; dma_addr_t iovp; dma_addr_t offset; u64 *pdir_start; int pide; ASSERT(size > 0); /* save offset bits */ offset = ((dma_addr_t) addr) & ~IOVP_MASK; /* round up to nearest IOVP_SIZE */ size = (size + offset + ~IOVP_MASK) & IOVP_MASK; spin_lock_irqsave(&ioc->res_lock, flags);#ifdef ASSERT_PDIR_SANITY sba_check_pdir(ioc,"Check before sba_map_single()");#endif#ifdef CONFIG_PROC_FS ioc->msingle_calls++; ioc->msingle_pages += size >> IOVP_SHIFT;#endif pide = sba_alloc_range(ioc, size); iovp = (dma_addr_t) pide << IOVP_SHIFT; DBG_RUN("sba_map_single() 0x%p -> 0x%lx", addr, (long) iovp | offset); pdir_start = &(ioc->pdir_base[pide]); while (size > 0) { ASSERT(((u8 *)pdir_start)[7] == 0); /* verify availability */ sba_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long) addr); DBG_RUN(" pdir 0x%p %02x%02x%02x%02x%02x%02x%02x%02x\n", pdir_start, (u8) (((u8 *) pdir_start)[7]), (u8) (((u8 *) pdir_start)[6]), (u8) (((u8 *) pdir_start)[5]), (u8) (((u8 *) pdir_start)[4]), (u8) (((u8 *) pdir_start)[3]), (u8) (((u8 *) pdir_start)[2]), (u8) (((u8 *) pdir_start)[1]), (u8) (((u8 *) pdir_start)[0]) ); addr += IOVP_SIZE; size -= IOVP_SIZE; pdir_start++; } /* form complete address */#ifdef ASSERT_PDIR_SANITY sba_check_pdir(ioc,"Check after sba_map_single()");#endif spin_unlock_irqrestore(&ioc->res_lock, flags); return SBA_IOVA(ioc, iovp, offset, DEFAULT_DMA_HINT_REG);}static voidsba_unmap_single(struct pci_dev *dev, dma_addr_t iova, size_t size, int direction){#ifdef FIXME/* Multi-IOC (ie N-class) : need to lookup IOC from dev** o If we can't know about lba PCI data structs, that eliminates ->sysdata.** o walking up pcidev->parent dead ends at elroy too** o leaves hashing dev->bus->number into some lookup.** (may only work for N-class)** o use (struct pci_hba) and put fields in there for DMA.** (ioc and per device dma_hint.)**** Last one seems the clearest and most promising.** sba_dma_supported() fill in those fields when the driver queries** the system for support.*/
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -