📄 sba_iommu.c
字号:
sba_free_consistent, /* release cacheable host mem */ sba_map_single, sba_unmap_single, sba_map_sg, sba_unmap_sg, NULL, /* dma_sync_single */ NULL /* dma_sync_sg */};/****************************************************************************** SBA PAT PDC support**** o call pdc_pat_cell_module()** o store ranges in PCI "resource" structures****************************************************************************/static voidsba_get_pat_resources(struct sba_device *sba_dev){#if 0/*** TODO/REVISIT/FIXME: support for directed ranges requires calls to** PAT PDC to program the SBA/LBA directed range registers...this** burden may fall on the LBA code since it directly supports the** PCI subsystem. It's not clear yet. - ggg*/PAT_MOD(mod)->mod_info.mod_pages = PAT_GET_MOD_PAGES(temp); FIXME : ???PAT_MOD(mod)->mod_info.dvi = PAT_GET_DVI(temp); Tells where the dvi bits are located in the address.PAT_MOD(mod)->mod_info.ioc = PAT_GET_IOC(temp); FIXME : ???#endif}/**************************************************************** Initialization and claim****************************************************************/static voidsba_ioc_init(struct ioc *ioc){ extern unsigned long mem_max; /* arch.../setup.c */ extern void lba_init_iregs(void *, u32, u32); /* arch.../lba_pci.c */ u32 iova_space_size, iova_space_mask; void * pdir_base; int pdir_size, iov_order; /* ** Determine IOVA Space size from memory size. ** Using "mem_max" is a kluge. ** ** Ideally, PCI drivers would register the maximum number ** of DMA they can have outstanding for each device they ** own. Next best thing would be to guess how much DMA ** can be outstanding based on PCI Class/sub-class. Both ** methods still require some "extra" to support PCI ** Hot-Plug/Removal of PCI cards. (aka PCI OLARD). ** ** While we have 32-bits "IOVA" space, top two 2 bits are used ** for DMA hints - ergo only 30 bits max. */ /* limit IOVA space size to 1MB-1GB */ if (mem_max < (sba_mem_ratio*1024*1024)) { iova_space_size = 1024*1024;#ifdef __LP64__ } else if (mem_max > (sba_mem_ratio*512*1024*1024)) { iova_space_size = 512*1024*1024;#endif } else { iova_space_size = (u32) (mem_max/sba_mem_ratio); } /* ** iova space must be log2() in size. ** thus, pdir/res_map will also be log2(). */ iov_order = get_order(iova_space_size >> (IOVP_SHIFT-PAGE_SHIFT)); ASSERT(iov_order <= (30 - IOVP_SHIFT)); /* iova_space_size <= 1GB */ ASSERT(iov_order >= (20 - IOVP_SHIFT)); /* iova_space_size >= 1MB */ iova_space_size = 1 << (iov_order + IOVP_SHIFT); ioc->pdir_size = pdir_size = (iova_space_size/IOVP_SIZE) * sizeof(u64); ASSERT(pdir_size < 4*1024*1024); /* max pdir size < 4MB */ /* Verify it's a power of two */ ASSERT((1 << get_order(pdir_size)) == (pdir_size >> PAGE_SHIFT)); DBG_INIT("%s() hpa 0x%p mem %dMBIOV %dMB (%d bits) PDIR size 0x%0x", __FUNCTION__, ioc->ioc_hpa, (int) (mem_max>>20), iova_space_size>>20, iov_order + PAGE_SHIFT, pdir_size); /* FIXME : DMA HINTs not used */ ioc->hint_shift_pdir = iov_order + PAGE_SHIFT; ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT)); ioc->pdir_base = pdir_base = (void *) __get_free_pages(GFP_KERNEL, get_order(pdir_size)); if (NULL == pdir_base) { panic(__FILE__ ":%s() could not allocate I/O Page Table\n", __FUNCTION__); } memset(pdir_base, 0, pdir_size); DBG_INIT("sba_ioc_init() pdir %p size %x hint_shift_pdir %x hint_mask_pdir %lx\n", pdir_base, pdir_size, ioc->hint_shift_pdir, ioc->hint_mask_pdir); ASSERT((((unsigned long) pdir_base) & PAGE_MASK) == (unsigned long) pdir_base); WRITE_REG64(virt_to_phys(pdir_base), (u64 *)(ioc->ioc_hpa+IOC_PDIR_BASE)); DBG_INIT(" base %p\n", pdir_base); /* build IMASK for IOC and Elroy */ iova_space_mask = 0xffffffff; iova_space_mask <<= (iov_order + PAGE_SHIFT); /* ** On C3000 w/512MB mem, HP-UX 10.20 reports: ** ibase=0, imask=0xFE000000, size=0x2000000. */ ioc->ibase = IOC_IOVA_SPACE_BASE | 1; /* bit 0 == enable bit */ ioc->imask = iova_space_mask; /* save it */ DBG_INIT("%s() IOV base 0x%lx mask 0x%0lx\n", __FUNCTION__, ioc->ibase, ioc->imask); /* ** FIXME: Hint registers are programmed with default hint ** values during boot, so hints should be sane even if we ** can't reprogram them the way drivers want. */ /* ** setup Elroy IBASE/IMASK registers as well. */ lba_init_iregs(ioc->ioc_hpa, ioc->ibase, ioc->imask); /* ** Program the IOC's ibase and enable IOVA translation */ WRITE_REG32(ioc->ibase, ioc->ioc_hpa+IOC_IBASE); WRITE_REG32(ioc->imask, ioc->ioc_hpa+IOC_IMASK); /* Set I/O PDIR Page size to 4K */ WRITE_REG32(0, ioc->ioc_hpa+IOC_TCNFG); /* ** Clear I/O TLB of any possible entries. ** (Yes. This is a it paranoid...but so what) */ WRITE_REG32(0 | 31, ioc->ioc_hpa+IOC_PCOM); DBG_INIT("%s() DONE\n", __FUNCTION__);}/****************************************************************************** SBA initialization code (HW and SW)**** o identify SBA chip itself** o initialize SBA chip modes (HardFail)** o initialize SBA chip modes (HardFail)** o FIXME: initialize DMA hints for reasonable defaults****************************************************************************/static voidsba_hw_init(struct sba_device *sba_dev){ int i; int num_ioc; u32 ioc_ctl; ioc_ctl = READ_REG32(sba_dev->sba_hpa+IOC_CTRL); DBG_INIT("%s() hpa 0x%p ioc_ctl 0x%x ->", __FUNCTION__, sba_dev->sba_hpa, ioc_ctl ); ioc_ctl &= ~(IOC_CTRL_RM | IOC_CTRL_NC); ASSERT(ioc_ctl & IOC_CTRL_TE); /* astro: firmware enables this */ WRITE_REG32(ioc_ctl, sba_dev->sba_hpa+IOC_CTRL);#ifdef SBA_DEBUG_INIT ioc_ctl = READ_REG32(sba_dev->sba_hpa+IOC_CTRL); DBG_INIT(" 0x%x\n", ioc_ctl );#endif if (IS_ASTRO(sba_dev->iodc)) { /* PAT_PDC (L-class) also reports the same goofy base */ sba_dev->ioc[0].ioc_hpa = (char *) ASTRO_IOC_OFFSET; num_ioc = 1; } else { sba_dev->ioc[0].ioc_hpa = sba_dev->ioc[1].ioc_hpa = 0; num_ioc = 2; } sba_dev->num_ioc = num_ioc; for( i = 0; i < num_ioc; i++) { (unsigned long) sba_dev->ioc[i].ioc_hpa += (unsigned long) sba_dev->sba_hpa + IKE_IOC_OFFSET(i); /* ** Make sure the box crashes if we get any errors on a rope. */ WRITE_REG32(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE0_CTL); WRITE_REG32(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE1_CTL); WRITE_REG32(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE2_CTL); WRITE_REG32(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE3_CTL); WRITE_REG32(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE4_CTL); WRITE_REG32(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE5_CTL); WRITE_REG32(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE6_CTL); WRITE_REG32(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE7_CTL); /* flush out the writes */ READ_REG32(sba_dev->ioc[i].ioc_hpa + ROPE7_CTL); sba_ioc_init(&(sba_dev->ioc[i])); }}static voidsba_common_init(struct sba_device *sba_dev){ int i; /* add this one to the head of the list (order doesn't matter) ** This will be useful for debugging - especially if we get coredumps */ sba_dev->next = sba_list; sba_list = sba_dev; sba_count++; for(i=0; i< sba_dev->num_ioc; i++) { int res_size;#ifdef CONFIG_DMB_TRAP extern void iterate_pages(unsigned long , unsigned long , void (*)(pte_t * , unsigned long), unsigned long ); void set_data_memory_break(pte_t * , unsigned long);#endif /* resource map size dictated by pdir_size */ res_size = sba_dev->ioc[i].pdir_size/sizeof(u64); /* entries */ res_size >>= 3; /* convert bit count to byte count */ DBG_INIT("%s() res_size 0x%x\n", __FUNCTION__, res_size); sba_dev->ioc[i].res_size = res_size; sba_dev->ioc[i].res_map = (char *) __get_free_pages(GFP_KERNEL, get_order(res_size));#ifdef CONFIG_DMB_TRAP iterate_pages( sba_dev->ioc[i].res_map, res_size, set_data_memory_break, 0);#endif if (NULL == sba_dev->ioc[i].res_map) { panic(__FILE__ ":%s() could not allocate resource map\n", __FUNCTION__ ); } memset(sba_dev->ioc[i].res_map, 0, res_size); /* next available IOVP - circular search */ sba_dev->ioc[i].res_hint = (unsigned long *) &(sba_dev->ioc[i].res_map[L1_CACHE_BYTES]);#ifdef ASSERT_PDIR_SANITY /* Mark first bit busy - ie no IOVA 0 */ sba_dev->ioc[i].res_map[0] = 0x80; sba_dev->ioc[i].pdir_base[0] = 0xeeffc0addbba0080ULL;#endif#ifdef CONFIG_DMB_TRAP iterate_pages( sba_dev->ioc[i].res_map, res_size, set_data_memory_break, 0); iterate_pages( sba_dev->ioc[i].pdir_base, sba_dev->ioc[i].pdir_size, set_data_memory_break, 0);#endif DBG_INIT("sba_common_init() %d res_map %x %p\n", i, res_size, sba_dev->ioc[i].res_map); } sba_dev->sba_lock = SPIN_LOCK_UNLOCKED;}#ifdef CONFIG_PROC_FSstatic int sba_proc_info(char *buf, char **start, off_t offset, int len){ struct sba_device *sba_dev = sba_list;/* FIXME: Multi-IOC support broken! */ struct ioc *ioc = &sba_dev->ioc[0]; int total_pages = (int) (ioc->res_size << 3); /* 8 bits per byte */ unsigned long i = 0, avg = 0, min, max; sprintf(buf, "%s rev %d.%d\n", parisc_getHWdescription(sba_dev->iodc->hw_type, sba_dev->iodc->hversion, sba_dev->iodc->sversion), (sba_dev->hw_rev & 0x7) + 1, (sba_dev->hw_rev & 0x18) >> 3 ); sprintf(buf, "%sIO PDIR size : %d bytes (%d entries)\n", buf, ((ioc->res_size << 3) * sizeof(u64)), /* 8 bits per byte */ total_pages); /* 8 bits per byte */ sprintf(buf, "%sIO PDIR entries : %ld free %ld used (%d%%)\n", buf, total_pages - ioc->used_pages, ioc->used_pages, (int) (ioc->used_pages * 100 / total_pages)); sprintf(buf, "%sResource bitmap : %d bytes (%d pages)\n", buf, ioc->res_size, ioc->res_size << 3); /* 8 bits per byte */ min = max = ioc->avg_search[0]; for (i = 0; i < SBA_SEARCH_SAMPLE; i++) { avg += ioc->avg_search[i]; if (ioc->avg_search[i] > max) max = ioc->avg_search[i]; if (ioc->avg_search[i] < min) min = ioc->avg_search[i]; } avg /= SBA_SEARCH_SAMPLE; sprintf(buf, "%s Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n", buf, min, avg, max); sprintf(buf, "%spci_map_single(): %8ld calls %8ld pages (avg %d/1000)\n", buf, ioc->msingle_calls, ioc->msingle_pages, (int) ((ioc->msingle_pages * 1000)/ioc->msingle_calls)); /* KLUGE - unmap_sg calls unmap_single for each mapped page */ min = ioc->usingle_calls - ioc->usg_calls; max = ioc->usingle_pages - ioc->usg_pages; sprintf(buf, "%spci_unmap_single: %8ld calls %8ld pages (avg %d/1000)\n", buf, min, max, (int) ((max * 1000)/min)); sprintf(buf, "%spci_map_sg() : %8ld calls %8ld pages (avg %d/1000)\n", buf, ioc->msg_calls, ioc->msg_pages, (int) ((ioc->msg_pages * 1000)/ioc->msg_calls)); sprintf(buf, "%spci_unmap_sg() : %8ld calls %8ld pages (avg %d/1000)\n", buf, ioc->usg_calls, ioc->usg_pages, (int) ((ioc->usg_pages * 1000)/ioc->usg_calls)); return strlen(buf);}static intsba_resource_map(char *buf, char **start, off_t offset, int len){ struct sba_device *sba_dev = sba_list; struct ioc *ioc = &sba_dev->ioc[0]; unsigned long *res_ptr = (unsigned long *)ioc->res_map; int i; for(i = 0; i < (ioc->res_size / sizeof(unsigned long)); ++i, ++res_ptr) { if ((i & 7) == 0) strcat(buf,"\n "); sprintf(buf, "%s %08lx", buf, *res_ptr); } strcat(buf, "\n"); return strlen(buf);}#endif/*** Determine if lba should claim this chip (return 0) or not (return 1).** If so, initialize the chip and tell other partners in crime they** have work to do.*/intsba_driver_callback(struct hp_device *d, struct pa_iodc_driver *dri){ struct sba_device *sba_dev; u32 func_class; int i; if (IS_ASTRO(d)) { static char astro_rev[]="Astro ?.?"; /* Read HW Rev First */ func_class = READ_REG32(d->hpa); astro_rev[6] = '1' + (char) (func_class & 0x7); astro_rev[8] = '0' + (char) ((func_class & 0x18) >> 3); dri->version = astro_rev; } else { static char ike_rev[]="Ike rev ?"; /* Read HW Rev First */ func_class = READ_REG32(d->hpa + SBA_FCLASS); ike_rev[8] = '0' + (char) (func_class & 0xff); dri->version = ike_rev; } printk("%s found %s at 0x%p\n", dri->name, dri->version, d->hpa); sba_dev = kmalloc(sizeof(struct sba_device), GFP_KERNEL); if (NULL == sba_dev) { printk(MODULE_NAME " - couldn't alloc sba_device\n"); return(1); } memset(sba_dev, 0, sizeof(struct sba_device)); for(i=0; i<MAX_IOC; i++) spin_lock_init(&(sba_dev->ioc[i].res_lock)); sba_dev->hw_rev = func_class; sba_dev->iodc = d; sba_dev->sba_hpa = d->hpa; /* faster access */ sba_get_pat_resources(sba_dev); sba_hw_init(sba_dev); sba_common_init(sba_dev); hppa_dma_ops = &sba_ops;#ifdef CONFIG_PROC_FS if (IS_ASTRO(d)) { create_proc_info_entry("Astro", 0, proc_runway_root, sba_proc_info); } else { create_proc_info_entry("Ike", 0, proc_runway_root, sba_proc_info); } create_proc_info_entry("bitmap", 0, proc_runway_root, sba_resource_map);#endif return 0;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -