📄 intel-iommu.c
字号:
gdprintk(XENLOG_ERR VTDPREFIX, "domain_context_unmap_one-%x:%x:%x- context == NULL:return\n", bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); return -ENOMEM; } if ( !context_present(*context) ) { gdprintk(XENLOG_WARNING VTDPREFIX, "domain_context_unmap_one-%x:%x:%x- " "context NOT present:return\n", bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); return 0; } gdprintk(XENLOG_INFO VTDPREFIX, "domain_context_unmap_one: bdf = %x:%x:%x\n", bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); spin_lock_irqsave(&iommu->lock, flags); context_clear_present(*context); context_clear_entry(*context); iommu_flush_cache_entry(context); iommu_flush_context_global(iommu, 0); iommu_flush_iotlb_global(iommu, 0); spin_unlock_irqrestore(&iommu->lock, flags); return 0;}static int domain_context_unmap( struct domain *domain, struct iommu *iommu, struct pci_dev *pdev){ int ret = 0; int dev, func, sec_bus, sub_bus; u32 type; type = pdev_type(pdev); switch ( type ) { case DEV_TYPE_PCI_BRIDGE: sec_bus = read_pci_config_byte( pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), PCI_SECONDARY_BUS); sub_bus = read_pci_config_byte( pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), PCI_SUBORDINATE_BUS); gdprintk(XENLOG_INFO VTDPREFIX, "domain_context_unmap:BRIDGE:%x:%x:%x " "sec_bus=%x sub_bus=%x\n", pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), sec_bus, sub_bus); break; case DEV_TYPE_PCIe_ENDPOINT: gdprintk(XENLOG_INFO VTDPREFIX, "domain_context_unmap:PCIe : bdf = %x:%x:%x\n", pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); ret = domain_context_unmap_one(domain, iommu, (u8)(pdev->bus), (u8)(pdev->devfn)); break; case DEV_TYPE_PCI: gdprintk(XENLOG_INFO VTDPREFIX, "domain_context_unmap:PCI: bdf = %x:%x:%x\n", pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); if ( pdev->bus == 0 ) ret = domain_context_unmap_one( domain, iommu, (u8)(pdev->bus), (u8)(pdev->devfn)); else { if ( bus2bridge[pdev->bus].bus != 0 ) gdprintk(XENLOG_WARNING VTDPREFIX, "domain_context_unmap:" "bus2bridge[%d].bus != 0\n", pdev->bus); ret = domain_context_unmap_one(domain, iommu, (u8)(bus2bridge[pdev->bus].bus), (u8)(bus2bridge[pdev->bus].devfn)); /* Unmap everything behind the PCI bridge */ for ( dev = 0; dev < 32; dev++ ) { for ( func = 0; func < 8; func++ ) { ret = domain_context_unmap_one( domain, iommu, pdev->bus, (u8)PCI_DEVFN(dev, func)); if ( ret ) return ret; } } } break; default: gdprintk(XENLOG_ERR VTDPREFIX, "domain_context_unmap:unknown type: bdf = %x:%x:%x\n", pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); ret = -EINVAL; break; } return ret;}void reassign_device_ownership( struct domain *source, struct domain *target, u8 bus, u8 devfn){ struct hvm_iommu *source_hd = domain_hvm_iommu(source); struct hvm_iommu *target_hd = domain_hvm_iommu(target); struct pci_dev *pdev, *pdev2; struct acpi_drhd_unit *drhd; struct iommu *iommu; int status; unsigned long flags; int found = 0; gdprintk(XENLOG_INFO VTDPREFIX, "reassign_device-%x:%x:%x- source = %d target = %d\n", bus, PCI_SLOT(devfn), PCI_FUNC(devfn), source->domain_id, target->domain_id); for_each_pdev( source, pdev ) { if ( (pdev->bus != bus) || (pdev->devfn != devfn) ) continue; drhd = acpi_find_matched_drhd_unit(pdev); iommu = drhd->iommu; domain_context_unmap(source, iommu, pdev); /* Move pci device from the source domain to target domain. */ spin_lock_irqsave(&source_hd->iommu_list_lock, flags); spin_lock(&target_hd->iommu_list_lock); list_move(&pdev->list, &target_hd->pdev_list); spin_unlock(&target_hd->iommu_list_lock); spin_unlock_irqrestore(&source_hd->iommu_list_lock, flags); for_each_pdev ( source, pdev2 ) { drhd = acpi_find_matched_drhd_unit(pdev2); if ( drhd->iommu == iommu ) { found = 1; break; } } if ( !found ) clear_bit(iommu->index, &source_hd->iommu_bitmap); status = domain_context_mapping(target, iommu, pdev); if ( status != 0 ) gdprintk(XENLOG_ERR VTDPREFIX, "domain_context_mapping failed\n"); break; }}void return_devices_to_dom0(struct domain *d){ struct hvm_iommu *hd = domain_hvm_iommu(d); struct pci_dev *pdev; while ( !list_empty(&hd->pdev_list) ) { pdev = list_entry(hd->pdev_list.next, typeof(*pdev), list); dprintk(XENLOG_INFO VTDPREFIX, "return_devices_to_dom0: bdf = %x:%x:%x\n", pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); pdev_flr(pdev->bus, pdev->devfn); reassign_device_ownership(d, dom0, pdev->bus, pdev->devfn); }#ifdef VTD_DEBUG for_each_pdev ( dom0, pdev ) dprintk(XENLOG_INFO VTDPREFIX, "return_devices_to_dom0:%x: bdf = %x:%x:%x\n", dom0->domain_id, pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));#endif}void iommu_domain_teardown(struct domain *d){ if ( list_empty(&acpi_drhd_units) ) return; iommu_domid_release(d);#if CONFIG_PAGING_LEVELS == 3 { struct hvm_iommu *hd = domain_hvm_iommu(d); int level = agaw_to_level(hd->agaw); struct dma_pte *pgd = NULL; switch ( level ) { case VTD_PAGE_TABLE_LEVEL_3: if ( hd->pgd ) free_xenheap_page((void *)hd->pgd); break; case VTD_PAGE_TABLE_LEVEL_4: if ( hd->pgd ) { pgd = hd->pgd; if ( pgd[0].val != 0 ) free_xenheap_page((void*)maddr_to_virt( dma_pte_addr(pgd[0]))); free_xenheap_page((void *)hd->pgd); } break; default: gdprintk(XENLOG_ERR VTDPREFIX, "Unsupported p2m table sharing level!\n"); break; } }#endif return_devices_to_dom0(d);}static int domain_context_mapped(struct pci_dev *pdev){ struct acpi_drhd_unit *drhd; struct iommu *iommu; int ret; for_each_drhd_unit ( drhd ) { iommu = drhd->iommu; ret = device_context_mapped(iommu, pdev->bus, pdev->devfn); if ( ret ) return ret; } return 0;}int iommu_map_page(struct domain *d, paddr_t gfn, paddr_t mfn){ struct hvm_iommu *hd = domain_hvm_iommu(d); struct acpi_drhd_unit *drhd; struct iommu *iommu; struct dma_pte *pte = NULL; struct page_info *pg = NULL; drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list); iommu = drhd->iommu; /* do nothing if dom0 and iommu supports pass thru */ if ( ecap_pass_thru(iommu->ecap) && (d->domain_id == 0) ) return 0; pg = addr_to_dma_page(d, gfn << PAGE_SHIFT_4K); if ( !pg ) return -ENOMEM; pte = (struct dma_pte *)map_domain_page(page_to_mfn(pg)); pte += gfn & LEVEL_MASK; dma_set_pte_addr(*pte, mfn << PAGE_SHIFT_4K); dma_set_pte_prot(*pte, DMA_PTE_READ | DMA_PTE_WRITE); iommu_flush_cache_entry(pte); unmap_domain_page(pte); for_each_drhd_unit ( drhd ) { iommu = drhd->iommu; if ( !test_bit(iommu->index, &hd->iommu_bitmap) ) continue; if ( cap_caching_mode(iommu->cap) ) iommu_flush_iotlb_psi(iommu, domain_iommu_domid(d), gfn << PAGE_SHIFT_4K, 1, 0); else if ( cap_rwbf(iommu->cap) ) iommu_flush_write_buffer(iommu); } return 0;}int iommu_unmap_page(struct domain *d, dma_addr_t gfn){ struct acpi_drhd_unit *drhd; struct iommu *iommu; drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list); iommu = drhd->iommu; /* do nothing if dom0 and iommu supports pass thru */ if ( ecap_pass_thru(iommu->ecap) && (d->domain_id == 0) ) return 0; dma_pte_clear_one(d, gfn << PAGE_SHIFT_4K); return 0;}int iommu_page_mapping(struct domain *domain, dma_addr_t iova, void *hpa, size_t size, int prot){ struct hvm_iommu *hd = domain_hvm_iommu(domain); struct acpi_drhd_unit *drhd; struct iommu *iommu; unsigned long start_pfn, end_pfn; struct dma_pte *pte = NULL; int index; struct page_info *pg = NULL; if ( (prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0 ) return -EINVAL; iova = (iova >> PAGE_SHIFT_4K) << PAGE_SHIFT_4K; start_pfn = (unsigned long)(((unsigned long) hpa) >> PAGE_SHIFT_4K); end_pfn = (unsigned long) ((PAGE_ALIGN_4K(((unsigned long)hpa) + size)) >> PAGE_SHIFT_4K); index = 0; while ( start_pfn < end_pfn ) { pg = addr_to_dma_page(domain, iova + PAGE_SIZE_4K * index); if ( !pg ) return -ENOMEM; pte = (struct dma_pte *)map_domain_page(page_to_mfn(pg)); pte += start_pfn & LEVEL_MASK; dma_set_pte_addr(*pte, start_pfn << PAGE_SHIFT_4K); dma_set_pte_prot(*pte, prot); iommu_flush_cache_entry(pte); unmap_domain_page(pte); start_pfn++; index++; } for_each_drhd_unit ( drhd ) { iommu = drhd->iommu; if ( !test_bit(iommu->index, &hd->iommu_bitmap) ) continue; if ( cap_caching_mode(iommu->cap) ) iommu_flush_iotlb_psi(iommu, domain_iommu_domid(domain), iova, index, 0); else if ( cap_rwbf(iommu->cap) ) iommu_flush_write_buffer(iommu); } return 0;}int iommu_page_unmapping(struct domain *domain, dma_addr_t addr, size_t size){ dma_pte_clear_range(domain, addr, addr + size); return 0;}void iommu_flush(struct domain *d, dma_addr_t gfn, u64 *p2m_entry){ struct hvm_iommu *hd = domain_hvm_iommu(d); struct acpi_drhd_unit *drhd; struct iommu *iommu = NULL; struct dma_pte *pte = (struct dma_pte *) p2m_entry; for_each_drhd_unit ( drhd ) { iommu = drhd->iommu; if ( !test_bit(iommu->index, &hd->iommu_bitmap) ) continue; if ( cap_caching_mode(iommu->cap) ) iommu_flush_iotlb_psi(iommu, domain_iommu_domid(d), gfn << PAGE_SHIFT_4K, 1, 0); else if ( cap_rwbf(iommu->cap) ) iommu_flush_write_buffer(iommu); } iommu_flush_cache_entry(pte);}static int iommu_prepare_rmrr_dev( struct domain *d, struct acpi_rmrr_unit *rmrr, struct pci_dev *pdev){ struct acpi_drhd_unit *drhd; unsigned long size; int ret; /* page table init */ size = rmrr->end_address - rmrr->base_address + 1; ret = iommu_page_mapping(d, rmrr->base_address, (void *)rmrr->base_address, size, DMA_PTE_READ|DMA_PTE_WRITE); if ( ret ) return ret; if ( domain_context_mapped(pdev) == 0 ) { drhd = acpi_find_matched_drhd_unit(pdev); ret = domain_context_mapping(d, drhd->iommu, pdev); if ( !ret ) return 0; } return ret;}void __init setup_dom0_devices(void){ struct hvm_iommu *hd = domain_hvm_iommu(dom0); struct acpi_drhd_unit *drhd; struct pci_dev *pdev; int bus, dev, func, ret; u32 l;#ifdef DEBUG_VTD_CONTEXT_ENTRY for ( bus = 0; bus < 256; bus++ ) { for ( dev = 0; dev < 32; dev++ ) { for ( func = 0; func < 8; func++ ) { struct context_entry *context; struct pci_dev device; device.bus = bus; device.devfn = PCI_DEVFN(dev, func); drhd = acpi_find_matched_drhd_unit(&device); context = device_to_context_entry(drhd->iommu, bus, PCI_DEVFN(dev, func)); if ( (context->lo != 0) || (context->hi != 0) ) dprintk(XENLOG_INFO VTDPREFIX,
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -