⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 intel-iommu.c

📁 xen 3.2.2 源码
💻 C
📖 第 1 页 / 共 5 页
字号:
/* * Copyright (c) 2006, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * * Copyright (C) Ashok Raj <ashok.raj@intel.com> * Copyright (C) Shaohua Li <shaohua.li@intel.com> * Copyright (C) Allen Kay <allen.m.kay@intel.com> - adapted to xen */#include <xen/init.h>#include <xen/irq.h>#include <xen/spinlock.h>#include <xen/sched.h>#include <xen/xmalloc.h>#include <xen/domain_page.h>#include <asm/delay.h>#include <asm/string.h>#include <asm/mm.h>#include <asm/iommu.h>#include <asm/hvm/vmx/intel-iommu.h>#include "dmar.h"#include "pci-direct.h"#include "pci_regs.h"#include "msi.h"#define domain_iommu_domid(d) ((d)->arch.hvm_domain.hvm_iommu.iommu_domid)extern void print_iommu_regs(struct acpi_drhd_unit *drhd);extern void print_vtd_entries(struct domain *d, int bus, int devfn,                              unsigned long gmfn);void pdev_flr(u8 bus, u8 devfn);static spinlock_t domid_bitmap_lock;    /* protect domain id bitmap */static int domid_bitmap_size;           /* domain id bitmap size in bit */static void *domid_bitmap;              /* iommu domain id bitmap */#define DID_FIELD_WIDTH 16#define DID_HIGH_OFFSET 8static void context_set_domain_id(struct context_entry *context,                                  struct domain *d){    unsigned long flags;    domid_t iommu_domid = domain_iommu_domid(d);    if ( iommu_domid == 0 )    {        spin_lock_irqsave(&domid_bitmap_lock, flags);        iommu_domid = find_first_zero_bit(domid_bitmap, domid_bitmap_size);        set_bit(iommu_domid, domid_bitmap);        spin_unlock_irqrestore(&domid_bitmap_lock, flags);        d->arch.hvm_domain.hvm_iommu.iommu_domid = iommu_domid;    }    context->hi &= (1 << DID_HIGH_OFFSET) - 1;    context->hi |= iommu_domid << DID_HIGH_OFFSET;}static void iommu_domid_release(struct domain *d){    domid_t iommu_domid = domain_iommu_domid(d);    if ( iommu_domid != 0 )    {        d->arch.hvm_domain.hvm_iommu.iommu_domid = 0;        clear_bit(iommu_domid, domid_bitmap);    }}static unsigned int x86_clflush_size;static int iommus_incoherent;static void __iommu_flush_cache(void *addr, int size){    int i;    if ( !iommus_incoherent )        return;    for ( i = 0; i < size; i += x86_clflush_size )        clflush((char*)addr + i);}void iommu_flush_cache_entry(void *addr){    __iommu_flush_cache(addr, 8);}void iommu_flush_cache_page(void *addr){    __iommu_flush_cache(addr, PAGE_SIZE_4K);}int nr_iommus;/* context entry handling */static struct context_entry * device_to_context_entry(struct iommu *iommu,                                                      u8 bus, u8 devfn){    struct root_entry *root;    struct context_entry *context;    unsigned long phy_addr;    unsigned long flags;    spin_lock_irqsave(&iommu->lock, flags);    root = &iommu->root_entry[bus];    if ( !root_present(*root) )    {        phy_addr = (unsigned long) alloc_xenheap_page();        if ( !phy_addr )        {            spin_unlock_irqrestore(&iommu->lock, flags);            return NULL;        }        memset((void *) phy_addr, 0, PAGE_SIZE);        iommu_flush_cache_page((void *)phy_addr);        phy_addr = virt_to_maddr((void *)phy_addr);        set_root_value(*root, phy_addr);        set_root_present(*root);        iommu_flush_cache_entry(root);    }    phy_addr = (unsigned long) get_context_addr(*root);    context = (struct context_entry *)maddr_to_virt(phy_addr);    spin_unlock_irqrestore(&iommu->lock, flags);    return &context[devfn];}static int device_context_mapped(struct iommu *iommu, u8 bus, u8 devfn){    struct root_entry *root;    struct context_entry *context;    unsigned long phy_addr;    int ret;    unsigned long flags;    spin_lock_irqsave(&iommu->lock, flags);    root = &iommu->root_entry[bus];    if ( !root_present(*root) )    {        ret = 0;        goto out;    }    phy_addr = get_context_addr(*root);    context = (struct context_entry *)maddr_to_virt(phy_addr);    ret = context_present(context[devfn]); out:    spin_unlock_irqrestore(&iommu->lock, flags);    return ret;}static struct page_info *addr_to_dma_page(struct domain *domain, u64 addr){    struct hvm_iommu *hd = domain_hvm_iommu(domain);    int addr_width = agaw_to_width(hd->agaw);    struct dma_pte *parent, *pte = NULL, *pgd;    int level = agaw_to_level(hd->agaw);    int offset;    unsigned long flags;    struct page_info *pg = NULL;    u64 *vaddr = NULL;    addr &= (((u64)1) << addr_width) - 1;    spin_lock_irqsave(&hd->mapping_lock, flags);    if ( !hd->pgd )    {        pgd = (struct dma_pte *)alloc_xenheap_page();        if ( !pgd )        {            spin_unlock_irqrestore(&hd->mapping_lock, flags);            return NULL;        }        memset(pgd, 0, PAGE_SIZE);        hd->pgd = pgd;    }    parent = hd->pgd;    while ( level > 1 )    {        offset = address_level_offset(addr, level);        pte = &parent[offset];        if ( dma_pte_addr(*pte) == 0 )        {            pg = alloc_domheap_page(NULL);            vaddr = map_domain_page(page_to_mfn(pg));            if ( !vaddr )            {                spin_unlock_irqrestore(&hd->mapping_lock, flags);                return NULL;            }            memset(vaddr, 0, PAGE_SIZE);            iommu_flush_cache_page(vaddr);            dma_set_pte_addr(*pte, page_to_maddr(pg));            /*             * high level table always sets r/w, last level             * page table control read/write             */            dma_set_pte_readable(*pte);            dma_set_pte_writable(*pte);            iommu_flush_cache_entry(pte);        }        else        {            pg = maddr_to_page(pte->val);            vaddr = map_domain_page(page_to_mfn(pg));            if ( !vaddr )            {                spin_unlock_irqrestore(&hd->mapping_lock, flags);                return NULL;            }        }        if ( parent != hd->pgd )            unmap_domain_page(parent);        if ( level == 2 && vaddr )        {            unmap_domain_page(vaddr);            break;        }        parent = (struct dma_pte *)vaddr;        vaddr = NULL;        level--;    }    spin_unlock_irqrestore(&hd->mapping_lock, flags);    return pg;}/* return address's page at specific level */static struct page_info *dma_addr_level_page(struct domain *domain,                                             u64 addr, int level){    struct hvm_iommu *hd = domain_hvm_iommu(domain);    struct dma_pte *parent, *pte = NULL;    int total = agaw_to_level(hd->agaw);    int offset;    struct page_info *pg = NULL;    parent = hd->pgd;    while ( level <= total )    {        offset = address_level_offset(addr, total);        pte = &parent[offset];        if ( dma_pte_addr(*pte) == 0 )        {            if ( parent != hd->pgd )                unmap_domain_page(parent);            break;        }        pg = maddr_to_page(pte->val);        if ( parent != hd->pgd )            unmap_domain_page(parent);        if ( level == total )            return pg;        parent = map_domain_page(page_to_mfn(pg));        total--;    }    return NULL;}static void iommu_flush_write_buffer(struct iommu *iommu){    u32 val;    unsigned long flag;    unsigned long start_time;    if ( !cap_rwbf(iommu->cap) )        return;    val = iommu->gcmd | DMA_GCMD_WBF;    spin_lock_irqsave(&iommu->register_lock, flag);    dmar_writel(iommu->reg, DMAR_GCMD_REG, val);    /* Make sure hardware complete it */    start_time = jiffies;    for ( ; ; )    {        val = dmar_readl(iommu->reg, DMAR_GSTS_REG);        if ( !(val & DMA_GSTS_WBFS) )            break;        if ( time_after(jiffies, start_time + DMAR_OPERATION_TIMEOUT) )            panic("DMAR hardware is malfunctional,"                  " please disable IOMMU\n");        cpu_relax();    }    spin_unlock_irqrestore(&iommu->register_lock, flag);}/* return value determine if we need a write buffer flush */static int __iommu_flush_context(    struct iommu *iommu,    u16 did, u16 source_id, u8 function_mask, u64 type,    int non_present_entry_flush){    u64 val = 0;    unsigned long flag;    unsigned long start_time;    /*     * In the non-present entry flush case, if hardware doesn't cache     * non-present entry we do nothing and if hardware cache non-present     * entry, we flush entries of domain 0 (the domain id is used to cache     * any non-present entries)     */    if ( non_present_entry_flush )    {        if ( !cap_caching_mode(iommu->cap) )            return 1;        else            did = 0;    }    /* use register invalidation */    switch ( type )    {    case DMA_CCMD_GLOBAL_INVL:        val = DMA_CCMD_GLOBAL_INVL;        break;    case DMA_CCMD_DOMAIN_INVL:        val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);        break;    case DMA_CCMD_DEVICE_INVL:        val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)            |DMA_CCMD_SID(source_id)|DMA_CCMD_FM(function_mask);        break;    default:        BUG();    }    val |= DMA_CCMD_ICC;    spin_lock_irqsave(&iommu->register_lock, flag);    dmar_writeq(iommu->reg, DMAR_CCMD_REG, val);    /* Make sure hardware complete it */    start_time = jiffies;    for ( ; ; )    {        val = dmar_readq(iommu->reg, DMAR_CCMD_REG);        if ( !(val & DMA_CCMD_ICC) )            break;        if ( time_after(jiffies, start_time + DMAR_OPERATION_TIMEOUT) )            panic("DMAR hardware is malfunctional, please disable IOMMU\n");        cpu_relax();    }    spin_unlock_irqrestore(&iommu->register_lock, flag);    /* flush context entry will implictly flush write buffer */    return 0;}static int inline iommu_flush_context_global(    struct iommu *iommu, int non_present_entry_flush){    return __iommu_flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,                                 non_present_entry_flush);}static int inline iommu_flush_context_domain(    struct iommu *iommu, u16 did, int non_present_entry_flush){    return __iommu_flush_context(iommu, did, 0, 0, DMA_CCMD_DOMAIN_INVL,                                 non_present_entry_flush);}static int inline iommu_flush_context_device(    struct iommu *iommu, u16 did, u16 source_id,    u8 function_mask, int non_present_entry_flush){    return __iommu_flush_context(iommu, did, source_id, function_mask,                                 DMA_CCMD_DEVICE_INVL,                                 non_present_entry_flush);}/* return value determine if we need a write buffer flush */static int __iommu_flush_iotlb(struct iommu *iommu, u16 did,                               u64 addr, unsigned int size_order, u64 type,                               int non_present_entry_flush){    int tlb_offset = ecap_iotlb_offset(iommu->ecap);    u64 val = 0, val_iva = 0;    unsigned long flag;    unsigned long start_time;    /*     * In the non-present entry flush case, if hardware doesn't cache     * non-present entry we do nothing and if hardware cache non-present     * entry, we flush entries of domain 0 (the domain id is used to cache     * any non-present entries)     */    if ( non_present_entry_flush )    {        if ( !cap_caching_mode(iommu->cap) )            return 1;        else            did = 0;    }    /* use register invalidation */    switch ( type )    {    case DMA_TLB_GLOBAL_FLUSH:        /* global flush doesn't need set IVA_REG */        val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;        break;    case DMA_TLB_DSI_FLUSH:        val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);        break;    case DMA_TLB_PSI_FLUSH:        val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);        /* Note: always flush non-leaf currently */        val_iva = size_order | addr;        break;    default:        BUG();    }    /* Note: set drain read/write */    if ( cap_read_drain(iommu->cap) )        val |= DMA_TLB_READ_DRAIN;    if ( cap_write_drain(iommu->cap) )

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -