📄 intel-iommu.c
字号:
"setup_dom0_devices-%x:%x:%x- context not 0\n", bus, dev, func); } } } #endif for ( bus = 0; bus < 256; bus++ ) { for ( dev = 0; dev < 32; dev++ ) { for ( func = 0; func < 8; func++ ) { l = read_pci_config(bus, dev, func, PCI_VENDOR_ID); /* some broken boards return 0 or ~0 if a slot is empty: */ if ( (l == 0xffffffff) || (l == 0x00000000) || (l == 0x0000ffff) || (l == 0xffff0000) ) continue; pdev = xmalloc(struct pci_dev); pdev->bus = bus; pdev->devfn = PCI_DEVFN(dev, func); list_add_tail(&pdev->list, &hd->pdev_list); drhd = acpi_find_matched_drhd_unit(pdev); ret = domain_context_mapping(dom0, drhd->iommu, pdev); if ( ret != 0 ) gdprintk(XENLOG_ERR VTDPREFIX, "domain_context_mapping failed\n"); } } } for_each_pdev ( dom0, pdev ) dprintk(XENLOG_INFO VTDPREFIX, "setup_dom0_devices: bdf = %x:%x:%x\n", pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));}void clear_fault_bit(struct iommu *iommu){ u64 val; val = dmar_readq( iommu->reg, cap_fault_reg_offset(dmar_readq(iommu->reg,DMAR_CAP_REG))+0x8); dmar_writeq( iommu->reg, cap_fault_reg_offset(dmar_readq(iommu->reg,DMAR_CAP_REG))+8, val); dmar_writel(iommu->reg, DMAR_FSTS_REG, DMA_FSTS_PFO);}static int init_vtd_hw(void){ struct acpi_drhd_unit *drhd; struct iommu *iommu; int ret; for_each_drhd_unit ( drhd ) { iommu = drhd->iommu; ret = iommu_set_root_entry(iommu); if ( ret ) { gdprintk(XENLOG_ERR VTDPREFIX, "IOMMU: set root entry failed\n"); return -EIO; } } return 0;}static int enable_vtd_translation(void){ struct acpi_drhd_unit *drhd; struct iommu *iommu; int vector = 0; for_each_drhd_unit ( drhd ) { iommu = drhd->iommu; vector = iommu_set_interrupt(iommu); dma_msi_data_init(iommu, vector); dma_msi_addr_init(iommu, cpu_physical_id(first_cpu(cpu_online_map))); iommu->vector = vector; clear_fault_bit(iommu); if ( iommu_enable_translation(iommu) ) return -EIO; } return 0;}static void setup_dom0_rmrr(void){ struct acpi_rmrr_unit *rmrr; struct pci_dev *pdev; int ret; for_each_rmrr_device ( rmrr, pdev ) ret = iommu_prepare_rmrr_dev(dom0, rmrr, pdev); if ( ret ) gdprintk(XENLOG_ERR VTDPREFIX, "IOMMU: mapping reserved region failed\n"); end_for_each_rmrr_device ( rmrr, pdev )}int iommu_setup(void){ struct hvm_iommu *hd = domain_hvm_iommu(dom0); struct acpi_drhd_unit *drhd; struct iommu *iommu; u64 i; if ( !vtd_enabled ) return 0; spin_lock_init(&domid_bitmap_lock); INIT_LIST_HEAD(&hd->pdev_list); /* start from scratch */ iommu_flush_all(); /* setup clflush size */ x86_clflush_size = ((cpuid_ebx(1) >> 8) & 0xff) * 8; /* Allocate IO page directory page for the domain. */ drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list); iommu = drhd->iommu; /* Allocate domain id bitmap, and set bit 0 as reserved */ domid_bitmap_size = cap_ndoms(iommu->cap); domid_bitmap = xmalloc_bytes(domid_bitmap_size / 8); if ( domid_bitmap == NULL ) goto error; memset(domid_bitmap, 0, domid_bitmap_size / 8); set_bit(0, domid_bitmap); /* * Set up 1:1 page table for dom0 except the critical segments * like Xen and tboot. */ for ( i = 0; i < max_page; i++ ) { extern int xen_in_range(paddr_t start, paddr_t end); extern int tboot_in_range(paddr_t start, paddr_t end); if ( xen_in_range(i << PAGE_SHIFT_4K, (i + 1) << PAGE_SHIFT_4K) || tboot_in_range(i << PAGE_SHIFT_4K, (i + 1) << PAGE_SHIFT_4K) ) continue; iommu_map_page(dom0, i, i); } if ( init_vtd_hw() ) goto error; setup_dom0_devices(); setup_dom0_rmrr(); if ( enable_vtd_translation() ) goto error; return 0; error: printk("iommu_setup() failed\n"); for_each_drhd_unit ( drhd ) { iommu = drhd->iommu; free_iommu(iommu); } return -EIO;}/* * If the device isn't owned by dom0, it means it already * has been assigned to other domain, or it's not exist. */int device_assigned(u8 bus, u8 devfn){ struct pci_dev *pdev; for_each_pdev( dom0, pdev ) if ( (pdev->bus == bus ) && (pdev->devfn == devfn) ) return 0; return 1;}int assign_device(struct domain *d, u8 bus, u8 devfn){ struct acpi_rmrr_unit *rmrr; struct pci_dev *pdev; int ret = 0; if ( list_empty(&acpi_drhd_units) ) return ret; gdprintk(XENLOG_INFO VTDPREFIX, "assign_device: bus = %x dev = %x func = %x\n", bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); pdev_flr(bus, devfn); reassign_device_ownership(dom0, d, bus, devfn); /* Setup rmrr identify mapping */ for_each_rmrr_device( rmrr, pdev ) if ( pdev->bus == bus && pdev->devfn == devfn ) { /* FIXME: Because USB RMRR conflicts with guest bios region, * ignore USB RMRR temporarily. */ if ( is_usb_device(pdev) ) return 0; ret = iommu_prepare_rmrr_dev(d, rmrr, pdev); if ( ret ) { gdprintk(XENLOG_ERR VTDPREFIX, "IOMMU: mapping reserved region failed\n"); return ret; } } end_for_each_rmrr_device(rmrr, pdev) return ret;}void iommu_set_pgd(struct domain *d){ struct hvm_iommu *hd = domain_hvm_iommu(d); unsigned long p2m_table; if ( hd->pgd ) { gdprintk(XENLOG_INFO VTDPREFIX, "iommu_set_pgd_1: hd->pgd = %p\n", hd->pgd); hd->pgd = NULL; } p2m_table = mfn_x(pagetable_get_mfn(d->arch.phys_table));#if CONFIG_PAGING_LEVELS == 3 if ( !hd->pgd ) { int level = agaw_to_level(hd->agaw); struct dma_pte *pmd = NULL; struct dma_pte *pgd = NULL; struct dma_pte *pte = NULL; l3_pgentry_t *l3e; unsigned long flags; int i; spin_lock_irqsave(&hd->mapping_lock, flags); if ( !hd->pgd ) { pgd = (struct dma_pte *)alloc_xenheap_page(); if ( !pgd ) { spin_unlock_irqrestore(&hd->mapping_lock, flags); gdprintk(XENLOG_ERR VTDPREFIX, "Allocate pgd memory failed!\n"); return; } memset(pgd, 0, PAGE_SIZE); hd->pgd = pgd; } l3e = map_domain_page(p2m_table); switch ( level ) { case VTD_PAGE_TABLE_LEVEL_3: /* Weybridge */ /* We only support 8 entries for the PAE L3 p2m table */ for ( i = 0; i < 8 ; i++ ) { /* Don't create new L2 entry, use ones from p2m table */ pgd[i].val = l3e[i].l3 | _PAGE_PRESENT | _PAGE_RW; } break; case VTD_PAGE_TABLE_LEVEL_4: /* Stoakley */ /* We allocate one more page for the top vtd page table. */ pmd = (struct dma_pte *)alloc_xenheap_page(); if ( !pmd ) { unmap_domain_page(l3e); spin_unlock_irqrestore(&hd->mapping_lock, flags); gdprintk(XENLOG_ERR VTDPREFIX, "Allocate pmd memory failed!\n"); return; } memset((u8*)pmd, 0, PAGE_SIZE); pte = &pgd[0]; dma_set_pte_addr(*pte, virt_to_maddr(pmd)); dma_set_pte_readable(*pte); dma_set_pte_writable(*pte); for ( i = 0; i < 8; i++ ) { /* Don't create new L2 entry, use ones from p2m table */ pmd[i].val = l3e[i].l3 | _PAGE_PRESENT | _PAGE_RW; } break; default: gdprintk(XENLOG_ERR VTDPREFIX, "iommu_set_pgd:Unsupported p2m table sharing level!\n"); break; } unmap_domain_page(l3e); spin_unlock_irqrestore(&hd->mapping_lock, flags); }#elif CONFIG_PAGING_LEVELS == 4 if ( !hd->pgd ) { int level = agaw_to_level(hd->agaw); l3_pgentry_t *l3e; mfn_t pgd_mfn; switch ( level ) { case VTD_PAGE_TABLE_LEVEL_3: l3e = map_domain_page(p2m_table); if ( (l3e_get_flags(*l3e) & _PAGE_PRESENT) == 0 ) { gdprintk(XENLOG_ERR VTDPREFIX, "iommu_set_pgd: second level wasn't there\n"); unmap_domain_page(l3e); return; } pgd_mfn = _mfn(l3e_get_pfn(*l3e)); unmap_domain_page(l3e); hd->pgd = maddr_to_virt(pagetable_get_paddr( pagetable_from_mfn(pgd_mfn))); break; case VTD_PAGE_TABLE_LEVEL_4: pgd_mfn = _mfn(p2m_table); hd->pgd = maddr_to_virt(pagetable_get_paddr( pagetable_from_mfn(pgd_mfn))); break; default: gdprintk(XENLOG_ERR VTDPREFIX, "iommu_set_pgd:Unsupported p2m table sharing level!\n"); break; } }#endif gdprintk(XENLOG_INFO VTDPREFIX, "iommu_set_pgd: hd->pgd = %p\n", hd->pgd);}u8 iommu_state[MAX_IOMMU_REGS * MAX_IOMMUS];int iommu_suspend(void){ struct acpi_drhd_unit *drhd; struct iommu *iommu; int i = 0; iommu_flush_all(); for_each_drhd_unit ( drhd ) { iommu = drhd->iommu; iommu_state[DMAR_RTADDR_REG * i] = (u64) dmar_readq(iommu->reg, DMAR_RTADDR_REG); iommu_state[DMAR_FECTL_REG * i] = (u32) dmar_readl(iommu->reg, DMAR_FECTL_REG); iommu_state[DMAR_FEDATA_REG * i] = (u32) dmar_readl(iommu->reg, DMAR_FEDATA_REG); iommu_state[DMAR_FEADDR_REG * i] = (u32) dmar_readl(iommu->reg, DMAR_FEADDR_REG); iommu_state[DMAR_FEUADDR_REG * i] = (u32) dmar_readl(iommu->reg, DMAR_FEUADDR_REG); iommu_state[DMAR_PLMBASE_REG * i] = (u32) dmar_readl(iommu->reg, DMAR_PLMBASE_REG); iommu_state[DMAR_PLMLIMIT_REG * i] = (u32) dmar_readl(iommu->reg, DMAR_PLMLIMIT_REG); iommu_state[DMAR_PHMBASE_REG * i] = (u64) dmar_readq(iommu->reg, DMAR_PHMBASE_REG); iommu_state[DMAR_PHMLIMIT_REG * i] = (u64) dmar_readq(iommu->reg, DMAR_PHMLIMIT_REG); i++; } return 0;}int iommu_resume(void){ struct acpi_drhd_unit *drhd; struct iommu *iommu; int i = 0; iommu_flush_all(); init_vtd_hw(); for_each_drhd_unit ( drhd ) { iommu = drhd->iommu; dmar_writeq( iommu->reg, DMAR_RTADDR_REG, (u64) iommu_state[DMAR_RTADDR_REG * i]); dmar_writel(iommu->reg, DMAR_FECTL_REG, (u32) iommu_state[DMAR_FECTL_REG * i]); dmar_writel(iommu->reg, DMAR_FEDATA_REG, (u32) iommu_state[DMAR_FEDATA_REG * i]); dmar_writel(iommu->reg, DMAR_FEADDR_REG, (u32) iommu_state[DMAR_FEADDR_REG * i]); dmar_writel(iommu->reg, DMAR_FEUADDR_REG, (u32) iommu_state[DMAR_FEUADDR_REG * i]); dmar_writel(iommu->reg, DMAR_PLMBASE_REG, (u32) iommu_state[DMAR_PLMBASE_REG * i]); dmar_writel(iommu->reg, DMAR_PLMLIMIT_REG, (u32) iommu_state[DMAR_PLMLIMIT_REG * i]); dmar_writeq(iommu->reg, DMAR_PHMBASE_REG, (u64) iommu_state[DMAR_PHMBASE_REG * i]); dmar_writeq(iommu->reg, DMAR_PHMLIMIT_REG, (u64) iommu_state[DMAR_PHMLIMIT_REG * i]); if ( iommu_enable_translation(iommu) ) return -EIO; i++; } return 0;}/* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -