📄 iommu.c
字号:
(__pa(iommu->ptab) + n_pte_pages * IOMMU_PAGE_SIZE * i); pr_debug("\t[%d] 0x%016lx\n", i, iommu->stab[i]); } /* ensure that the STEs have updated */ mb(); /* setup interrupts for the iommu. */ reg = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat); out_be64(iommu->xlate_regs + IOC_IO_ExcpStat, reg & ~IOC_IO_ExcpStat_V); out_be64(iommu->xlate_regs + IOC_IO_ExcpMask, IOC_IO_ExcpMask_PFE | IOC_IO_ExcpMask_SFE); virq = irq_create_mapping(NULL, IIC_IRQ_IOEX_ATI | (iommu->nid << IIC_IRQ_NODE_SHIFT)); BUG_ON(virq == NO_IRQ); ret = request_irq(virq, ioc_interrupt, IRQF_DISABLED, iommu->name, iommu); BUG_ON(ret); /* set the IOC segment table origin register (and turn on the iommu) */ reg = IOC_IOST_Origin_E | __pa(iommu->stab) | IOC_IOST_Origin_HW; out_be64(iommu->xlate_regs + IOC_IOST_Origin, reg); in_be64(iommu->xlate_regs + IOC_IOST_Origin); /* turn on IO translation */ reg = in_be64(iommu->cmd_regs + IOC_IOCmd_Cfg) | IOC_IOCmd_Cfg_TE; out_be64(iommu->cmd_regs + IOC_IOCmd_Cfg, reg);}#if 0/* Unused for now */static struct iommu_window *find_window(struct cbe_iommu *iommu, unsigned long offset, unsigned long size){ struct iommu_window *window; /* todo: check for overlapping (but not equal) windows) */ list_for_each_entry(window, &(iommu->windows), list) { if (window->offset == offset && window->size == size) return window; } return NULL;}#endifstatic struct iommu_window * __initcell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np, unsigned long offset, unsigned long size, unsigned long pte_offset){ struct iommu_window *window; const unsigned int *ioid; ioid = of_get_property(np, "ioid", NULL); if (ioid == NULL) printk(KERN_WARNING "iommu: missing ioid for %s using 0\n", np->full_name); window = kmalloc_node(sizeof(*window), GFP_KERNEL, iommu->nid); BUG_ON(window == NULL); window->offset = offset; window->size = size; window->ioid = ioid ? *ioid : 0; window->iommu = iommu; window->pte_offset = pte_offset; window->table.it_blocksize = 16; window->table.it_base = (unsigned long)iommu->ptab; window->table.it_index = iommu->nid; window->table.it_offset = (offset >> IOMMU_PAGE_SHIFT) + window->pte_offset; window->table.it_size = size >> IOMMU_PAGE_SHIFT; iommu_init_table(&window->table, iommu->nid); pr_debug("\tioid %d\n", window->ioid); pr_debug("\tblocksize %ld\n", window->table.it_blocksize); pr_debug("\tbase 0x%016lx\n", window->table.it_base); pr_debug("\toffset 0x%lx\n", window->table.it_offset); pr_debug("\tsize %ld\n", window->table.it_size); list_add(&window->list, &iommu->windows); if (offset != 0) return window; /* We need to map and reserve the first IOMMU page since it's used * by the spider workaround. In theory, we only need to do that when * running on spider but it doesn't really matter. * * This code also assumes that we have a window that starts at 0, * which is the case on all spider based blades. */ __set_bit(0, window->table.it_map); tce_build_cell(&window->table, window->table.it_offset, 1, (unsigned long)iommu->pad_page, DMA_TO_DEVICE); window->table.it_hint = window->table.it_blocksize; return window;}static struct cbe_iommu *cell_iommu_for_node(int nid){ int i; for (i = 0; i < cbe_nr_iommus; i++) if (iommus[i].nid == nid) return &iommus[i]; return NULL;}static void cell_dma_dev_setup(struct device *dev){ struct iommu_window *window; struct cbe_iommu *iommu; struct dev_archdata *archdata = &dev->archdata; /* If we run without iommu, no need to do anything */ if (get_pci_dma_ops() == &dma_direct_ops) return; /* Current implementation uses the first window available in that * node's iommu. We -might- do something smarter later though it may * never be necessary */ iommu = cell_iommu_for_node(archdata->numa_node); if (iommu == NULL || list_empty(&iommu->windows)) { printk(KERN_ERR "iommu: missing iommu for %s (node %d)\n", archdata->of_node ? archdata->of_node->full_name : "?", archdata->numa_node); return; } window = list_entry(iommu->windows.next, struct iommu_window, list); archdata->dma_data = &window->table;}static void cell_pci_dma_dev_setup(struct pci_dev *dev){ cell_dma_dev_setup(&dev->dev);}static int cell_of_bus_notify(struct notifier_block *nb, unsigned long action, void *data){ struct device *dev = data; /* We are only intereted in device addition */ if (action != BUS_NOTIFY_ADD_DEVICE) return 0; /* We use the PCI DMA ops */ dev->archdata.dma_ops = get_pci_dma_ops(); cell_dma_dev_setup(dev); return 0;}static struct notifier_block cell_of_bus_notifier = { .notifier_call = cell_of_bus_notify};static int __init cell_iommu_get_window(struct device_node *np, unsigned long *base, unsigned long *size){ const void *dma_window; unsigned long index; /* Use ibm,dma-window if available, else, hard code ! */ dma_window = of_get_property(np, "ibm,dma-window", NULL); if (dma_window == NULL) { *base = 0; *size = 0x80000000u; return -ENODEV; } of_parse_dma_window(np, dma_window, &index, base, size); return 0;}static void __init cell_iommu_init_one(struct device_node *np, unsigned long offset){ struct cbe_iommu *iommu; unsigned long base, size; int nid, i; /* Get node ID */ nid = of_node_to_nid(np); if (nid < 0) { printk(KERN_ERR "iommu: failed to get node for %s\n", np->full_name); return; } pr_debug("iommu: setting up iommu for node %d (%s)\n", nid, np->full_name); /* XXX todo: If we can have multiple windows on the same IOMMU, which * isn't the case today, we probably want here to check wether the * iommu for that node is already setup. * However, there might be issue with getting the size right so let's * ignore that for now. We might want to completely get rid of the * multiple window support since the cell iommu supports per-page ioids */ if (cbe_nr_iommus >= NR_IOMMUS) { printk(KERN_ERR "iommu: too many IOMMUs detected ! (%s)\n", np->full_name); return; } /* Init base fields */ i = cbe_nr_iommus++; iommu = &iommus[i]; iommu->stab = NULL; iommu->nid = nid; snprintf(iommu->name, sizeof(iommu->name), "iommu%d", i); INIT_LIST_HEAD(&iommu->windows); /* Obtain a window for it */ cell_iommu_get_window(np, &base, &size); pr_debug("\ttranslating window 0x%lx...0x%lx\n", base, base + size - 1); /* Initialize the hardware */ cell_iommu_setup_hardware(iommu, size); /* Setup the iommu_table */ cell_iommu_setup_window(iommu, np, base, size, offset >> IOMMU_PAGE_SHIFT);}static void __init cell_disable_iommus(void){ int node; unsigned long base, val; void __iomem *xregs, *cregs; /* Make sure IOC translation is disabled on all nodes */ for_each_online_node(node) { if (cell_iommu_find_ioc(node, &base)) continue; xregs = ioremap(base, IOC_Reg_Size); if (xregs == NULL) continue; cregs = xregs + IOC_IOCmd_Offset; pr_debug("iommu: cleaning up iommu on node %d\n", node); out_be64(xregs + IOC_IOST_Origin, 0); (void)in_be64(xregs + IOC_IOST_Origin); val = in_be64(cregs + IOC_IOCmd_Cfg); val &= ~IOC_IOCmd_Cfg_TE; out_be64(cregs + IOC_IOCmd_Cfg, val); (void)in_be64(cregs + IOC_IOCmd_Cfg); iounmap(xregs); }}static int __init cell_iommu_init_disabled(void){ struct device_node *np = NULL; unsigned long base = 0, size; /* When no iommu is present, we use direct DMA ops */ set_pci_dma_ops(&dma_direct_ops); /* First make sure all IOC translation is turned off */ cell_disable_iommus(); /* If we have no Axon, we set up the spider DMA magic offset */ if (of_find_node_by_name(NULL, "axon") == NULL) dma_direct_offset = SPIDER_DMA_OFFSET; /* Now we need to check to see where the memory is mapped * in PCI space. We assume that all busses use the same dma * window which is always the case so far on Cell, thus we * pick up the first pci-internal node we can find and check * the DMA window from there. */ for_each_node_by_name(np, "axon") { if (np->parent == NULL || np->parent->parent != NULL) continue; if (cell_iommu_get_window(np, &base, &size) == 0) break; } if (np == NULL) { for_each_node_by_name(np, "pci-internal") { if (np->parent == NULL || np->parent->parent != NULL) continue; if (cell_iommu_get_window(np, &base, &size) == 0) break; } } of_node_put(np); /* If we found a DMA window, we check if it's big enough to enclose * all of physical memory. If not, we force enable IOMMU */ if (np && size < lmb_end_of_DRAM()) { printk(KERN_WARNING "iommu: force-enabled, dma window" " (%ldMB) smaller than total memory (%ldMB)\n", size >> 20, lmb_end_of_DRAM() >> 20); return -ENODEV; } dma_direct_offset += base; printk("iommu: disabled, direct DMA offset is 0x%lx\n", dma_direct_offset); return 0;}static int __init cell_iommu_init(void){ struct device_node *np; if (!machine_is(cell)) return -ENODEV; /* If IOMMU is disabled or we have little enough RAM to not need * to enable it, we setup a direct mapping. * * Note: should we make sure we have the IOMMU actually disabled ? */ if (iommu_is_off || (!iommu_force_on && lmb_end_of_DRAM() <= 0x80000000ull)) if (cell_iommu_init_disabled() == 0) goto bail; /* Setup various ppc_md. callbacks */ ppc_md.pci_dma_dev_setup = cell_pci_dma_dev_setup; ppc_md.tce_build = tce_build_cell; ppc_md.tce_free = tce_free_cell; /* Create an iommu for each /axon node. */ for_each_node_by_name(np, "axon") { if (np->parent == NULL || np->parent->parent != NULL) continue; cell_iommu_init_one(np, 0); } /* Create an iommu for each toplevel /pci-internal node for * old hardware/firmware */ for_each_node_by_name(np, "pci-internal") { if (np->parent == NULL || np->parent->parent != NULL) continue; cell_iommu_init_one(np, SPIDER_DMA_OFFSET); } /* Setup default PCI iommu ops */ set_pci_dma_ops(&dma_iommu_ops); bail: /* Register callbacks on OF platform device addition/removal * to handle linking them to the right DMA operations */ bus_register_notifier(&of_platform_bus_type, &cell_of_bus_notifier); return 0;}arch_initcall(cell_iommu_init);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -