⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 iommu.c

📁 xen虚拟机源代码安装包
💻 C
📖 第 1 页 / 共 4 页
字号:
        gdprintk(XENLOG_ERR VTDPREFIX,                 "IOMMU: unsupported sagaw %lx\n", sagaw);        xfree(iommu);        return -ENODEV;    }    iommu->nr_pt_levels = agaw_to_level(agaw);    if ( !ecap_coherent(iommu->ecap) )        iommus_incoherent = 1;    spin_lock_init(&iommu->lock);    spin_lock_init(&iommu->register_lock);    drhd->iommu = iommu;    return 0;}static void iommu_free(struct acpi_drhd_unit *drhd){    struct iommu *iommu = drhd->iommu;    if ( iommu == NULL )        return;    if ( iommu->root_maddr != 0 )    {        free_pgtable_maddr(iommu->root_maddr);        iommu->root_maddr = 0;    }    if ( iommu->reg )        iounmap(iommu->reg);    free_intel_iommu(iommu->intel);    free_irq(iommu->vector);    xfree(iommu);    drhd->iommu = NULL;}#define guestwidth_to_adjustwidth(gaw) ({       \    int agaw, r = (gaw - 12) % 9;               \    agaw = (r == 0) ? gaw : (gaw + 9 - r);      \    if ( agaw > 64 )                            \        agaw = 64;                              \    agaw; })static int intel_iommu_domain_init(struct domain *d){    struct hvm_iommu *hd = domain_hvm_iommu(d);    struct iommu *iommu = NULL;    u64 i;    struct acpi_drhd_unit *drhd;    drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list);    iommu = drhd->iommu;    hd->agaw = width_to_agaw(DEFAULT_DOMAIN_ADDRESS_WIDTH);    if ( d->domain_id == 0 )    {        extern int xen_in_range(paddr_t start, paddr_t end);        extern int tboot_in_range(paddr_t start, paddr_t end);        /*          * Set up 1:1 page table for dom0 except the critical segments         * like Xen and tboot.         */        for ( i = 0; i < max_page; i++ )        {            if ( xen_in_range(i << PAGE_SHIFT_4K, (i + 1) << PAGE_SHIFT_4K) ||                 tboot_in_range(i << PAGE_SHIFT_4K, (i + 1) << PAGE_SHIFT_4K) )                continue;            iommu_map_page(d, i, i);        }        setup_dom0_devices(d);        setup_dom0_rmrr(d);        iommu_flush_all();        for_each_drhd_unit ( drhd )        {            iommu = drhd->iommu;            if ( iommu_enable_translation(iommu) )                return -EIO;        }    }    return 0;}static int domain_context_mapping_one(    struct domain *domain,    struct iommu *iommu,    u8 bus, u8 devfn){    struct hvm_iommu *hd = domain_hvm_iommu(domain);    struct context_entry *context, *context_entries;    unsigned long flags;    u64 maddr, pgd_maddr;    int agaw;    maddr = bus_to_context_maddr(iommu, bus);    context_entries = (struct context_entry *)map_vtd_domain_page(maddr);    context = &context_entries[devfn];    if ( context_present(*context) )    {        unmap_vtd_domain_page(context_entries);        return 0;    }    spin_lock_irqsave(&iommu->lock, flags);#ifdef CONTEXT_PASSTHRU    if ( ecap_pass_thru(iommu->ecap) && (domain->domain_id == 0) )        context_set_translation_type(*context, CONTEXT_TT_PASS_THRU);    else#endif    {        /* Ensure we have pagetables allocated down to leaf PTE. */        if ( hd->pgd_maddr == 0 )        {            addr_to_dma_page_maddr(domain, 0, 1);            if ( hd->pgd_maddr == 0 )            {            nomem:                unmap_vtd_domain_page(context_entries);                spin_unlock_irqrestore(&iommu->lock, flags);                return -ENOMEM;            }        }        /* Skip top levels of page tables for 2- and 3-level DRHDs. */        pgd_maddr = hd->pgd_maddr;        for ( agaw = level_to_agaw(4);              agaw != level_to_agaw(iommu->nr_pt_levels);              agaw-- )        {            struct dma_pte *p = map_vtd_domain_page(pgd_maddr);            pgd_maddr = dma_pte_addr(*p);            unmap_vtd_domain_page(p);            if ( pgd_maddr == 0 )                goto nomem;        }        context_set_address_root(*context, pgd_maddr);        context_set_translation_type(*context, CONTEXT_TT_MULTI_LEVEL);    }    /*     * domain_id 0 is not valid on Intel's IOMMU, force domain_id to     * be 1 based as required by intel's iommu hw.     */    context_set_domain_id(context, domain);    context_set_address_width(*context, agaw);    context_set_fault_enable(*context);    context_set_present(*context);    iommu_flush_cache_entry(context);    unmap_vtd_domain_page(context_entries);    /* Context entry was previously non-present (with domid 0). */    iommu_flush_context_device(iommu, 0, (((u16)bus) << 8) | devfn,                               DMA_CCMD_MASK_NOBIT, 1);    if ( iommu_flush_iotlb_dsi(iommu, 0, 1) )        iommu_flush_write_buffer(iommu);    set_bit(iommu->index, &hd->iommu_bitmap);    spin_unlock_irqrestore(&iommu->lock, flags);    return 0;}#define PCI_BASE_CLASS_BRIDGE    0x06#define PCI_CLASS_BRIDGE_PCI     0x0604enum {    DEV_TYPE_PCIe_ENDPOINT,    DEV_TYPE_PCIe_BRIDGE,    DEV_TYPE_PCI_BRIDGE,    DEV_TYPE_PCI,};int pdev_type(u8 bus, u8 devfn){    u16 class_device;    u16 status, creg;    int pos;    u8 d = PCI_SLOT(devfn), f = PCI_FUNC(devfn);    class_device = pci_conf_read16(bus, d, f, PCI_CLASS_DEVICE);    if ( class_device == PCI_CLASS_BRIDGE_PCI )    {        pos = pci_find_next_cap(bus, devfn, PCI_CAPABILITY_LIST, PCI_CAP_ID_EXP);        if ( !pos )            return DEV_TYPE_PCI_BRIDGE;        creg = pci_conf_read16(bus, d, f, pos + PCI_EXP_FLAGS);        return ((creg & PCI_EXP_FLAGS_TYPE) >> 4) == PCI_EXP_TYPE_PCI_BRIDGE ?            DEV_TYPE_PCI_BRIDGE : DEV_TYPE_PCIe_BRIDGE;    }    status = pci_conf_read16(bus, d, f, PCI_STATUS);    if ( !(status & PCI_STATUS_CAP_LIST) )        return DEV_TYPE_PCI;    if ( pci_find_next_cap(bus, devfn, PCI_CAPABILITY_LIST, PCI_CAP_ID_EXP) )        return DEV_TYPE_PCIe_ENDPOINT;    return DEV_TYPE_PCI;}#define MAX_BUSES 256static struct { u8 map, bus, devfn; } bus2bridge[MAX_BUSES];static int find_pcie_endpoint(u8 *bus, u8 *devfn, u8 *secbus){    int cnt = 0;    *secbus = *bus;    if ( *bus == 0 )        /* assume integrated PCI devices in RC have valid requester-id */        return 1;    if ( !bus2bridge[*bus].map )        return 0;    while ( bus2bridge[*bus].map )    {        *secbus = *bus;        *devfn = bus2bridge[*bus].devfn;        *bus = bus2bridge[*bus].bus;        if ( cnt++ >= MAX_BUSES )            return 0;    }    return 1;}static int domain_context_mapping(struct domain *domain, u8 bus, u8 devfn){    struct acpi_drhd_unit *drhd;    int ret = 0;    u16 sec_bus, sub_bus, ob, odf;    u32 type;    u8 secbus;    drhd = acpi_find_matched_drhd_unit(bus, devfn);    if ( !drhd )        return -ENODEV;    type = pdev_type(bus, devfn);    switch ( type )    {    case DEV_TYPE_PCIe_BRIDGE:    case DEV_TYPE_PCI_BRIDGE:        sec_bus = pci_conf_read8(bus, PCI_SLOT(devfn), PCI_FUNC(devfn),                                 PCI_SECONDARY_BUS);        sub_bus = pci_conf_read8(bus, PCI_SLOT(devfn), PCI_FUNC(devfn),                                 PCI_SUBORDINATE_BUS);        /*dmar_scope_add_buses(&drhd->scope, sec_bus, sub_bus);*/        if ( type == DEV_TYPE_PCIe_BRIDGE )            break;        for ( sub_bus &= 0xff; sec_bus <= sub_bus; sec_bus++ )        {            bus2bridge[sec_bus].map = 1;            bus2bridge[sec_bus].bus =  bus;            bus2bridge[sec_bus].devfn =  devfn;        }        break;    case DEV_TYPE_PCIe_ENDPOINT:        gdprintk(XENLOG_INFO VTDPREFIX,                 "domain_context_mapping:PCIe: bdf = %x:%x.%x\n",                 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));        ret = domain_context_mapping_one(domain, drhd->iommu, bus, devfn);        break;    case DEV_TYPE_PCI:        gdprintk(XENLOG_INFO VTDPREFIX,                 "domain_context_mapping:PCI:  bdf = %x:%x.%x\n",                 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));        ob = bus; odf = devfn;        if ( !find_pcie_endpoint(&bus, &devfn, &secbus) )        {            gdprintk(XENLOG_WARNING VTDPREFIX,                     "domain_context_mapping:invalid\n");            break;        }        if ( ob != bus || odf != devfn )            gdprintk(XENLOG_INFO VTDPREFIX,                     "domain_context_mapping:map:  "                     "bdf = %x:%x.%x -> %x:%x.%x\n",                     ob, PCI_SLOT(odf), PCI_FUNC(odf),                     bus, PCI_SLOT(devfn), PCI_FUNC(devfn));        ret = domain_context_mapping_one(domain, drhd->iommu, bus, devfn);        if ( secbus != bus )            /*             * The source-id for transactions on non-PCIe buses seem             * to originate from devfn=0 on the secondary bus behind             * the bridge.  Map that id as well.  The id to use in             * these scanarios is not particularly well documented             * anywhere.             */            domain_context_mapping_one(domain, drhd->iommu, secbus, 0);        break;    default:        gdprintk(XENLOG_ERR VTDPREFIX,                 "domain_context_mapping:unknown type : bdf = %x:%x.%x\n",                 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));        ret = -EINVAL;        break;    }    return ret;}static int domain_context_unmap_one(    struct domain *domain,    struct iommu *iommu,    u8 bus, u8 devfn){    struct context_entry *context, *context_entries;    unsigned long flags;    u64 maddr;    maddr = bus_to_context_maddr(iommu, bus);    context_entries = (struct context_entry *)map_vtd_domain_page(maddr);    context = &context_entries[devfn];    if ( !context_present(*context) )    {        unmap_vtd_domain_page(context_entries);        return 0;    }    spin_lock_irqsave(&iommu->lock, flags);    context_clear_present(*context);    context_clear_entry(*context);    iommu_flush_cache_entry(context);    iommu_flush_context_domain(iommu, domain_iommu_domid(domain), 0);    iommu_flush_iotlb_dsi(iommu, domain_iommu_domid(domain), 0);    unmap_vtd_domain_page(context_entries);    spin_unlock_irqrestore(&iommu->lock, flags);    return 0;}static int domain_context_unmap(struct domain *domain, u8 bus, u8 devfn){    struct acpi_drhd_unit *drhd;    u16 sec_bus, sub_bus;    int ret = 0;    u32 type;    u8 secbus;    drhd = acpi_find_matched_drhd_unit(bus, devfn);    if ( !drhd )        return -ENODEV;    type = pdev_type(bus, devfn);    switch ( type )    {    case DEV_TYPE_PCIe_BRIDGE:    case DEV_TYPE_PCI_BRIDGE:        sec_bus = pci_conf_read8(bus, PCI_SLOT(devfn), PCI_FUNC(devfn),                                 PCI_SECONDARY_BUS);        sub_bus = pci_conf_read8(bus, PCI_SLOT(devfn), PCI_FUNC(devfn),                                 PCI_SUBORDINATE_BUS);        /*dmar_scope_remove_buses(&drhd->scope, sec_bus, sub_bus);*/        if ( DEV_TYPE_PCI_BRIDGE )            ret = domain_context_unmap_one(domain, drhd->iommu, bus, devfn);        break;    case DEV_TYPE_PCIe_ENDPOINT:        ret = domain_context_unmap_one(domain, drhd->iommu, bus, devfn);        break;    case DEV_TYPE_PCI:        if ( find_pcie_endpoint(&bus, &devfn, &secbus) )            ret = domain_context_unmap_one(domain, drhd->iommu, bus, devfn);        if ( bus != secbus )            domain_context_unmap_one(domain, drhd->iommu, secbus, 0);        break;    default:        gdprintk(XENLOG_ERR VTDPREFIX,                 "domain_context_unmap:unknown type: bdf = %x:%x:%x\n",                 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));        ret = -EINVAL;        break;    }    return ret;}static int reassign_device_ownership(    struct domain *source,    struct domain *target,    u8 bus, u8 devfn){    struct hvm_iommu *source_hd = domain_hvm_iommu(source);    struct pci_dev *pdev;    struct acpi_drhd_unit *drhd;    struct iommu *pdev_iommu;    int ret, found = 0;    if ( !(pdev = pci_lock_domain_pdev(source, bus, devfn)) )        return -ENODEV;    drhd = acpi_find_matched_drhd_unit(bus, devfn);    pdev_iommu = drhd->iommu;    domain_context_unmap(source, bus, devfn);    ret = domain_context_mapping(target, bus, devfn);    if ( ret )        return ret;    write_lock(&pcidevs_lock);    list_move(&pdev->domain_list, &target->arch.pdev_list);    write_unlock(&pcidevs_lock);    pdev->domain = target;    spin_unlock(&pdev->lock);    read_lock(&pcidevs_lock);    for_each_pdev ( source, pdev )    {        drhd = acpi_find_matched_drhd_unit(pdev->bus, pdev->devfn);        if ( drhd->iommu == pdev_iommu )        {            found = 1;            break;        }    }    read_unlock(&pcidevs_lock);    if ( !found )        clear_bit(pdev_iommu->index, &source_hd->iommu_bitmap);    return ret;}void iommu_domain_teardown(struct domain *d){    struct hvm_iommu *hd = domain_hvm_iommu(d);    if ( list_empty(&acpi_drhd_units) )        return;    iommu_free_pagetable(hd->pgd_maddr, agaw_to_level(hd->agaw));    hd->pgd_maddr = 0;    iommu_domid_release(d);}static int domain_context_mapped(u8 bus, u8 devfn){    struct acpi_drhd_unit *drhd;    for_each_drhd_unit ( drhd )        if ( device_context_mapped(drhd->iommu, bus, devfn) )            return 1;    return 0;}int intel_iommu_map_page(    struct domain *d, unsigned long gfn, unsigned long mfn){    struct hvm_iommu *hd = domain_hvm_iommu(d);    struct acpi_drhd_unit *drhd;    struct iommu *iommu;    struct dma_pte *page = NULL, *pte = NULL;    u64 pg_maddr;    int pte_present;#ifdef CONTEXT_PASSTHRU    /* do nothing if dom0 and iommu supports pass thru */    if ( ecap_pass_thru(iommu->ecap) && (d->domain_id == 0) )        return 0;#endif    pg_maddr = addr_to_dma_page_maddr(d, (paddr_t)gfn << PAGE_SHIFT_4K, 1);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -