📄 sba_iommu.c
字号:
memset(ioc->pdir_base, 0, ioc->pdir_size); DBG_INIT("%s() pdir %p size %x hint_shift_pdir %x hint_mask_pdir %lx\n", __FUNCTION__, ioc->pdir_base, ioc->pdir_size, ioc->hint_shift_pdir, ioc->hint_mask_pdir); ASSERT((((unsigned long) ioc->pdir_base) & PAGE_MASK) == (unsigned long) ioc->pdir_base); WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE); DBG_INIT(" base %p\n", ioc->pdir_base); /* build IMASK for IOC and Elroy */ iova_space_mask = 0xffffffff; iova_space_mask <<= (iov_order + PAGE_SHIFT); ioc->imask = iova_space_mask; DBG_INIT("%s() IOV base 0x%lx mask 0x%0lx\n", __FUNCTION__, ioc->ibase, ioc->imask); /* ** FIXME: Hint registers are programmed with default hint ** values during boot, so hints should be sane even if we ** can't reprogram them the way drivers want. */ WRITE_REG(ioc->imask, ioc->ioc_hpa + IOC_IMASK); /* ** Setting the upper bits makes checking for bypass addresses ** a little faster later on. */ ioc->imask |= 0xFFFFFFFF00000000UL; /* Set I/O PDIR Page size to system page size */ switch (PAGE_SHIFT) { case 12: tcnfg = 0; break; /* 4K */ case 13: tcnfg = 1; break; /* 8K */ case 14: tcnfg = 2; break; /* 16K */ case 16: tcnfg = 3; break; /* 64K */ default: panic(PFX "Unsupported system page size %d", 1 << PAGE_SHIFT); break; } WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG); /* ** Program the IOC's ibase and enable IOVA translation ** Bit zero == enable bit. */ WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE); /* ** Clear I/O TLB of any possible entries. ** (Yes. This is a bit paranoid...but so what) */ WRITE_REG(ioc->ibase | (iov_order+PAGE_SHIFT), ioc->ioc_hpa + IOC_PCOM); /* ** If an AGP device is present, only use half of the IOV space ** for PCI DMA. Unfortunately we can't know ahead of time ** whether GART support will actually be used, for now we ** can just key on an AGP device found in the system. ** We program the next pdir index after we stop w/ a key for ** the GART code to handshake on. */ pci_for_each_dev(device) agp_found |= pci_find_capability(device, PCI_CAP_ID_AGP); if (agp_found && reserve_sba_gart) { DBG_INIT("%s: AGP device found, reserving half of IOVA for GART support\n", __FUNCTION__); ioc->pdir_size /= 2; ((u64 *)ioc->pdir_base)[PDIR_INDEX(ioc->iov_size/2)] = ZX1_SBA_IOMMU_COOKIE; }#ifdef FULL_VALID_PDIR /* ** Check to see if the spill page has been allocated, we don't need more than ** one across multiple SBAs. */ if (!prefetch_spill_page) { char *spill_poison = "SBAIOMMU POISON"; int poison_size = 16; void *poison_addr; prefetch_spill_page = (void *)__get_free_pages(GFP_KERNEL, get_order(IOVP_SIZE)); if (!prefetch_spill_page) panic(PFX "Couldn't allocate PDIR spill page\n"); poison_addr = prefetch_spill_page; for (; (u64)poison_addr < (u64)prefetch_spill_page + IOVP_SIZE ; poison_addr += poison_size) (void)memcpy(poison_addr,spill_poison,poison_size); prefetch_spill_page = (void *)virt_to_phys(prefetch_spill_page); DBG_INIT("%s() prefetch spill addr: %p\n", __FUNCTION__, prefetch_spill_page); } /* ** Set all the PDIR entries valid w/ the spill page as the target */ for (index = 0 ; index < (ioc->pdir_size / sizeof(u64)) ; index++) ((u64 *)ioc->pdir_base)[index] = (0x80000000000000FFULL | (u64)prefetch_spill_page);#endif}static void __initioc_resource_init(struct ioc *ioc){ spin_lock_init(&ioc->res_lock); /* resource map size dictated by pdir_size */ ioc->res_size = ioc->pdir_size / sizeof(u64); /* entries */ ioc->res_size >>= 3; /* convert bit count to byte count */ DBG_INIT("%s() res_size 0x%x\n", __FUNCTION__, ioc->res_size); ioc->res_map = (char *) __get_free_pages(GFP_KERNEL, get_order(ioc->res_size)); if (!ioc->res_map) panic(PFX "Couldn't allocate resource map\n"); memset(ioc->res_map, 0, ioc->res_size); /* next available IOVP - circular search */ ioc->res_hint = (unsigned long *) ioc->res_map;#ifdef ASSERT_PDIR_SANITY /* Mark first bit busy - ie no IOVA 0 */ ioc->res_map[0] = 0x1; ioc->pdir_base[0] = 0x8000000000000000ULL | ZX1_SBA_IOMMU_COOKIE;#endif#ifdef FULL_VALID_PDIR /* Mark the last resource used so we don't prefetch beyond IOVA space */ ioc->res_map[ioc->res_size - 1] |= 0x80UL; /* res_map is chars */ ioc->pdir_base[(ioc->pdir_size / sizeof(u64)) - 1] = (0x80000000000000FFULL | (u64)prefetch_spill_page);#endif DBG_INIT("%s() res_map %x %p\n", __FUNCTION__, ioc->res_size, (void *) ioc->res_map);}static void __initioc_sac_init(struct ioc *ioc){ struct pci_dev *sac = NULL; struct pci_controller *controller = NULL; /* * pci_alloc_consistent() must return a DMA address which is * SAC (single address cycle) addressable, so allocate a * pseudo-device to enforce that. */ sac = kmalloc(sizeof(*sac), GFP_KERNEL); if (!sac) panic(PFX "Couldn't allocate struct pci_dev"); memset(sac, 0, sizeof(*sac)); controller = kmalloc(sizeof(*controller), GFP_KERNEL); if (!controller) panic(PFX "Couldn't allocate struct pci_controller"); memset(controller, 0, sizeof(*controller)); controller->iommu = ioc; sac->sysdata = controller; sac->dma_mask = 0xFFFFFFFFUL; ioc->sac_only_dev = sac;}static void __initioc_zx1_init(struct ioc *ioc){ if (ioc->rev < 0x20) panic(PFX "IOC 2.0 or later required for IOMMU support\n"); ioc->dma_mask = 0xFFFFFFFFFFUL;}typedef void (initfunc)(struct ioc *);struct ioc_iommu { u32 func_id; char *name; initfunc *init;};static struct ioc_iommu ioc_iommu_info[] __initdata = { { ZX1_IOC_ID, "zx1", ioc_zx1_init }, { REO_IOC_ID, "REO" },};static struct ioc * __initioc_init(u64 hpa, void *handle){ struct ioc *ioc; struct ioc_iommu *info; ioc = kmalloc(sizeof(*ioc), GFP_KERNEL); if (!ioc) return NULL; memset(ioc, 0, sizeof(*ioc)); ioc->next = ioc_list; ioc_list = ioc; ioc->handle = handle; ioc->ioc_hpa = ioremap(hpa, 0x1000); ioc->func_id = READ_REG(ioc->ioc_hpa + IOC_FUNC_ID); ioc->rev = READ_REG(ioc->ioc_hpa + IOC_FCLASS) & 0xFFUL; ioc->dma_mask = 0xFFFFFFFFFFFFFFFFUL; /* conservative */ for (info = ioc_iommu_info; info < ioc_iommu_info + ARRAY_SIZE(ioc_iommu_info); info++) { if (ioc->func_id == info->func_id) { ioc->name = info->name; if (info->init) (info->init)(ioc); } } if (!ioc->name) { ioc->name = kmalloc(24, GFP_KERNEL); if (ioc->name) sprintf(ioc->name, "Unknown (%04x:%04x)", ioc->func_id & 0xFFFF, (ioc->func_id >> 16) & 0xFFFF); else ioc->name = "Unknown"; } ioc_iova_init(ioc); ioc_resource_init(ioc); ioc_sac_init(ioc); printk(KERN_INFO PFX "%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx\n", ioc->name, (ioc->rev >> 4) & 0xF, ioc->rev & 0xF, hpa, ioc->iov_size >> 20, ioc->ibase); return ioc;}/****************************************************************************** SBA initialization code (HW and SW)**** o identify SBA chip itself** o FIXME: initialize DMA hints for reasonable defaults****************************************************************************/#ifdef CONFIG_PROC_FSstatic intsba_proc_info_one(char *buf, struct ioc *ioc){ int total_pages = (int) (ioc->res_size << 3); /* 8 bits per byte */ unsigned long i = 0, avg = 0, min, max; sprintf(buf, "Hewlett Packard %s IOC rev %d.%d\n", ioc->name, ((ioc->rev >> 4) & 0xF), (ioc->rev & 0xF)); sprintf(buf, "%sIO PDIR size : %d bytes (%d entries)\n", buf, (int) ((ioc->res_size << 3) * sizeof(u64)), /* 8 bits/byte */ total_pages); sprintf(buf, "%sIO PDIR entries : %ld free %ld used (%d%%)\n", buf, total_pages - ioc->used_pages, ioc->used_pages, (int) (ioc->used_pages * 100 / total_pages)); sprintf(buf, "%sResource bitmap : %d bytes (%d pages)\n", buf, ioc->res_size, ioc->res_size << 3); /* 8 bits per byte */ min = max = ioc->avg_search[0]; for (i = 0; i < SBA_SEARCH_SAMPLE; i++) { avg += ioc->avg_search[i]; if (ioc->avg_search[i] > max) max = ioc->avg_search[i]; if (ioc->avg_search[i] < min) min = ioc->avg_search[i]; } avg /= SBA_SEARCH_SAMPLE; sprintf(buf, "%s Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n", buf, min, avg, max); sprintf(buf, "%spci_map_single(): %12ld calls %12ld pages (avg %d/1000)\n", buf, ioc->msingle_calls, ioc->msingle_pages, (int) ((ioc->msingle_pages * 1000)/ioc->msingle_calls));#ifdef ALLOW_IOV_BYPASS sprintf(buf, "%spci_map_single(): %12ld bypasses\n", buf, ioc->msingle_bypass);#endif sprintf(buf, "%spci_unmap_single: %12ld calls %12ld pages (avg %d/1000)\n", buf, ioc->usingle_calls, ioc->usingle_pages, (int) ((ioc->usingle_pages * 1000)/ioc->usingle_calls));#ifdef ALLOW_IOV_BYPASS sprintf(buf, "%spci_unmap_single: %12ld bypasses\n", buf, ioc->usingle_bypass);#endif sprintf(buf, "%spci_map_sg() : %12ld calls %12ld pages (avg %d/1000)\n", buf, ioc->msg_calls, ioc->msg_pages, (int) ((ioc->msg_pages * 1000)/ioc->msg_calls));#ifdef ALLOW_IOV_BYPASS sprintf(buf, "%spci_map_sg() : %12ld bypasses\n", buf, ioc->msg_bypass);#endif sprintf(buf, "%spci_unmap_sg() : %12ld calls %12ld pages (avg %d/1000)\n", buf, ioc->usg_calls, ioc->usg_pages, (int) ((ioc->usg_pages * 1000)/ioc->usg_calls)); return strlen(buf);}static intsba_proc_info(char *buf, char **start, off_t offset, int len){ struct ioc *ioc; char *base = buf; for (ioc = ioc_list; ioc; ioc = ioc->next) { buf += sba_proc_info_one(buf, ioc); } return strlen(base);}static intsba_resource_map_one(char *buf, struct ioc *ioc){ unsigned int *res_ptr = (unsigned int *)ioc->res_map; int i; buf[0] = '\0'; for(i = 0; i < (ioc->res_size / sizeof(unsigned int)); ++i, ++res_ptr) { if ((i & 7) == 0) strcat(buf,"\n "); sprintf(buf, "%s %08x", buf, *res_ptr); } strcat(buf, "\n"); return strlen(buf);}static intsba_resource_map(char *buf, char **start, off_t offset, int len){ struct ioc *ioc; char *base = buf; for (ioc = ioc_list; ioc; ioc = ioc->next) { buf += sba_resource_map_one(buf, ioc); } return strlen(base);}#endifvoidsba_enable_device(struct pci_dev *dev){ acpi_handle handle, parent; acpi_status status; struct ioc *ioc; handle = PCI_CONTROLLER(dev)->acpi_handle; if (!handle) return; /* * The IOC scope encloses PCI root bridges in the ACPI * namespace, so work our way out until we find an IOC we * claimed previously. */ do { for (ioc = ioc_list; ioc; ioc = ioc->next) if (ioc->handle == handle) { PCI_CONTROLLER(dev)->iommu = ioc; return; } status = acpi_get_parent(handle, &parent); handle = parent; } while (ACPI_SUCCESS(status)); printk("No IOC for %s in ACPI\n", dev->slot_name);}static int __initacpi_sba_ioc_add(struct acpi_device *device){ struct ioc *ioc; acpi_status status; u64 hpa, length; /* * Only SBA appears in ACPI namespace. It encloses the PCI * root bridges, and its CSR space includes the IOC function. */ status = acpi_hp_csr_space(device->handle, &hpa, &length); if (ACPI_FAILURE(status)) return 1; ioc = ioc_init(hpa + ZX1_IOC_OFFSET, device->handle); if (!ioc) return 1; return 0;}static int __initacpi_ioc_add(struct acpi_device *device){ struct ioc *ioc; acpi_status status; u64 hpa, length; status = acpi_hp_csr_space(device->handle, &hpa, &length); if (ACPI_FAILURE(status)) return 1; ioc = ioc_init(hpa, device->handle); if (!ioc) return 1; return 0;}static struct acpi_driver acpi_sba_ioc_driver = { name: "IOC IOMMU Driver", ids: "HWP0001", ops: { add: acpi_sba_ioc_add, },};static struct acpi_driver acpi_ioc_driver = { name: "IOC IOMMU Driver", ids: "HWP0004", ops: { add: acpi_ioc_add, },};void __initioc_acpi_init(void){ acpi_bus_register_driver(&acpi_sba_ioc_driver); acpi_bus_register_driver(&acpi_ioc_driver);}void __initsba_init(void){ ioc_acpi_init();#ifdef CONFIG_PROC_FS if (ioc_list) { struct proc_dir_entry * proc_mckinley_root; proc_mckinley_root = proc_mkdir("bus/mckinley",0); create_proc_info_entry(ioc_list->name, 0, proc_mckinley_root, sba_proc_info); create_proc_info_entry("bitmap", 0, proc_mckinley_root, sba_resource_map); }#endif}static int __initnosbagart(char *str){ reserve_sba_gart = 0; return 1;}__setup("nosbagart", nosbagart);EXPORT_SYMBOL(sba_init);EXPORT_SYMBOL(sba_map_single);EXPORT_SYMBOL(sba_unmap_single);EXPORT_SYMBOL(sba_map_sg);EXPORT_SYMBOL(sba_unmap_sg);EXPORT_SYMBOL(sba_dma_supported);EXPORT_SYMBOL(sba_alloc_consistent);EXPORT_SYMBOL(sba_free_consistent);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -