⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 iommu_init.c

📁 xen虚拟机源代码安装包
💻 C
📖 第 1 页 / 共 2 页
字号:
/* * Copyright (C) 2007 Advanced Micro Devices, Inc. * Author: Leo Duran <leo.duran@amd.com> * Author: Wei Wang <wei.wang2@amd.com> - adapted to xen * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA */#include <xen/config.h>#include <xen/errno.h>#include <xen/pci.h>#include <xen/pci_regs.h>#include <asm/amd-iommu.h>#include <asm/msi.h>#include <asm/hvm/svm/amd-iommu-proto.h>#include <asm-x86/fixmap.h>static struct amd_iommu *vector_to_iommu[NR_VECTORS];static int nr_amd_iommus;static long amd_iommu_cmd_buffer_entries = IOMMU_CMD_BUFFER_DEFAULT_ENTRIES;static long amd_iommu_event_log_entries = IOMMU_EVENT_LOG_DEFAULT_ENTRIES;unsigned short ivrs_bdf_entries;struct ivrs_mappings *ivrs_mappings;struct list_head amd_iommu_head;struct table_struct device_table;extern void *int_remap_table;extern spinlock_t int_remap_table_lock;static int __init map_iommu_mmio_region(struct amd_iommu *iommu){    unsigned long mfn;    if ( nr_amd_iommus > MAX_AMD_IOMMUS )    {        amd_iov_error("nr_amd_iommus %d > MAX_IOMMUS\n", nr_amd_iommus);        return -ENOMEM;    }    iommu->mmio_base = (void *)fix_to_virt(        FIX_IOMMU_MMIO_BASE_0 + nr_amd_iommus * MMIO_PAGES_PER_IOMMU);    mfn = (unsigned long)(iommu->mmio_base_phys >> PAGE_SHIFT);    map_pages_to_xen((unsigned long)iommu->mmio_base, mfn,                     MMIO_PAGES_PER_IOMMU, PAGE_HYPERVISOR_NOCACHE);    memset(iommu->mmio_base, 0, IOMMU_MMIO_REGION_LENGTH);    return 0;}static void __init unmap_iommu_mmio_region(struct amd_iommu *iommu){    if ( iommu->mmio_base )    {        iounmap(iommu->mmio_base);        iommu->mmio_base = NULL;    }}static void __init register_iommu_dev_table_in_mmio_space(struct amd_iommu *iommu){    u64 addr_64, addr_lo, addr_hi;    u32 entry;    addr_64 = (u64)virt_to_maddr(iommu->dev_table.buffer);    addr_lo = addr_64 & DMA_32BIT_MASK;    addr_hi = addr_64 >> 32;    set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,                         IOMMU_DEV_TABLE_BASE_LOW_MASK,                         IOMMU_DEV_TABLE_BASE_LOW_SHIFT, &entry);    set_field_in_reg_u32((iommu->dev_table.alloc_size / PAGE_SIZE) - 1,                         entry, IOMMU_DEV_TABLE_SIZE_MASK,                         IOMMU_DEV_TABLE_SIZE_SHIFT, &entry);    writel(entry, iommu->mmio_base + IOMMU_DEV_TABLE_BASE_LOW_OFFSET);    set_field_in_reg_u32((u32)addr_hi, 0,                         IOMMU_DEV_TABLE_BASE_HIGH_MASK,                         IOMMU_DEV_TABLE_BASE_HIGH_SHIFT, &entry);    writel(entry, iommu->mmio_base + IOMMU_DEV_TABLE_BASE_HIGH_OFFSET);}static void __init register_iommu_cmd_buffer_in_mmio_space(struct amd_iommu *iommu){    u64 addr_64, addr_lo, addr_hi;    u32 power_of2_entries;    u32 entry;    addr_64 = (u64)virt_to_maddr(iommu->cmd_buffer.buffer);    addr_lo = addr_64 & DMA_32BIT_MASK;    addr_hi = addr_64 >> 32;    set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,                         IOMMU_CMD_BUFFER_BASE_LOW_MASK,                         IOMMU_CMD_BUFFER_BASE_LOW_SHIFT, &entry);    writel(entry, iommu->mmio_base + IOMMU_CMD_BUFFER_BASE_LOW_OFFSET);    power_of2_entries = get_order_from_bytes(iommu->cmd_buffer.alloc_size) +        IOMMU_CMD_BUFFER_POWER_OF2_ENTRIES_PER_PAGE;    set_field_in_reg_u32((u32)addr_hi, 0,                         IOMMU_CMD_BUFFER_BASE_HIGH_MASK,                         IOMMU_CMD_BUFFER_BASE_HIGH_SHIFT, &entry);    set_field_in_reg_u32(power_of2_entries, entry,                         IOMMU_CMD_BUFFER_LENGTH_MASK,                         IOMMU_CMD_BUFFER_LENGTH_SHIFT, &entry);    writel(entry, iommu->mmio_base+IOMMU_CMD_BUFFER_BASE_HIGH_OFFSET);}static void __init register_iommu_event_log_in_mmio_space(struct amd_iommu *iommu){    u64 addr_64, addr_lo, addr_hi;    u32 power_of2_entries;    u32 entry;    addr_64 = (u64)virt_to_maddr(iommu->event_log.buffer);    addr_lo = addr_64 & DMA_32BIT_MASK;    addr_hi = addr_64 >> 32;    set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,                         IOMMU_EVENT_LOG_BASE_LOW_MASK,                         IOMMU_EVENT_LOG_BASE_LOW_SHIFT, &entry);    writel(entry, iommu->mmio_base + IOMMU_EVENT_LOG_BASE_LOW_OFFSET);    power_of2_entries = get_order_from_bytes(iommu->event_log.alloc_size) +                        IOMMU_EVENT_LOG_POWER_OF2_ENTRIES_PER_PAGE;    set_field_in_reg_u32((u32)addr_hi, 0,                        IOMMU_EVENT_LOG_BASE_HIGH_MASK,                        IOMMU_EVENT_LOG_BASE_HIGH_SHIFT, &entry);    set_field_in_reg_u32(power_of2_entries, entry,                        IOMMU_EVENT_LOG_LENGTH_MASK,                        IOMMU_EVENT_LOG_LENGTH_SHIFT, &entry);    writel(entry, iommu->mmio_base+IOMMU_EVENT_LOG_BASE_HIGH_OFFSET);}static void __init set_iommu_translation_control(struct amd_iommu *iommu,                                                 int enable){    u32 entry;    entry = readl(iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);    set_field_in_reg_u32(iommu->ht_tunnel_support ? IOMMU_CONTROL_ENABLED :                         IOMMU_CONTROL_ENABLED, entry,                         IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_MASK,                         IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_SHIFT, &entry);    set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED :                         IOMMU_CONTROL_ENABLED, entry,                         IOMMU_CONTROL_TRANSLATION_ENABLE_MASK,                         IOMMU_CONTROL_TRANSLATION_ENABLE_SHIFT, &entry);    writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);}static void __init set_iommu_command_buffer_control(struct amd_iommu *iommu,                                                    int enable){    u32 entry;    entry = readl(iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);    set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED :                         IOMMU_CONTROL_ENABLED, entry,                         IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_MASK,                         IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_SHIFT, &entry);    writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);}static void __init register_iommu_exclusion_range(struct amd_iommu *iommu){    u64 addr_lo, addr_hi;    u32 entry;    addr_lo = iommu->exclusion_limit & DMA_32BIT_MASK;    addr_hi = iommu->exclusion_limit >> 32;    set_field_in_reg_u32((u32)addr_hi, 0,                         IOMMU_EXCLUSION_LIMIT_HIGH_MASK,                         IOMMU_EXCLUSION_LIMIT_HIGH_SHIFT, &entry);    writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_LIMIT_HIGH_OFFSET);    set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,                         IOMMU_EXCLUSION_LIMIT_LOW_MASK,                         IOMMU_EXCLUSION_LIMIT_LOW_SHIFT, &entry);    writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_LIMIT_LOW_OFFSET);    addr_lo = iommu->exclusion_base & DMA_32BIT_MASK;    addr_hi = iommu->exclusion_base >> 32;    set_field_in_reg_u32((u32)addr_hi, 0,                         IOMMU_EXCLUSION_BASE_HIGH_MASK,                         IOMMU_EXCLUSION_BASE_HIGH_SHIFT, &entry);    writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_BASE_HIGH_OFFSET);    set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,                         IOMMU_EXCLUSION_BASE_LOW_MASK,                         IOMMU_EXCLUSION_BASE_LOW_SHIFT, &entry);    set_field_in_reg_u32(iommu->exclusion_allow_all, entry,                         IOMMU_EXCLUSION_ALLOW_ALL_MASK,                         IOMMU_EXCLUSION_ALLOW_ALL_SHIFT, &entry);    set_field_in_reg_u32(iommu->exclusion_enable, entry,                         IOMMU_EXCLUSION_RANGE_ENABLE_MASK,                         IOMMU_EXCLUSION_RANGE_ENABLE_SHIFT, &entry);    writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_BASE_LOW_OFFSET);}static void __init set_iommu_event_log_control(struct amd_iommu *iommu,            int enable){    u32 entry;    entry = readl(iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);    set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED :                         IOMMU_CONTROL_DISABLED, entry,                         IOMMU_CONTROL_EVENT_LOG_ENABLE_MASK,                         IOMMU_CONTROL_EVENT_LOG_ENABLE_SHIFT, &entry);    writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);    set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED :                         IOMMU_CONTROL_DISABLED, entry,                         IOMMU_CONTROL_EVENT_LOG_INT_MASK,                         IOMMU_CONTROL_EVENT_LOG_INT_SHIFT, &entry);    writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);    set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED :                         IOMMU_CONTROL_DISABLED, entry,                         IOMMU_CONTROL_COMP_WAIT_INT_MASK,                         IOMMU_CONTROL_COMP_WAIT_INT_SHIFT, &entry);    writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);}static int amd_iommu_read_event_log(struct amd_iommu *iommu, u32 event[]){    u32 tail, head, *event_log;    int i;     BUG_ON( !iommu || !event );    /* make sure there's an entry in the log */    tail = get_field_from_reg_u32(                readl(iommu->mmio_base + IOMMU_EVENT_LOG_TAIL_OFFSET),                IOMMU_EVENT_LOG_TAIL_MASK,                IOMMU_EVENT_LOG_TAIL_SHIFT);    if ( tail != iommu->event_log_head )    {        /* read event log entry */        event_log = (u32 *)(iommu->event_log.buffer +                                        (iommu->event_log_head *                                        IOMMU_EVENT_LOG_ENTRY_SIZE));        for ( i = 0; i < IOMMU_EVENT_LOG_U32_PER_ENTRY; i++ )            event[i] = event_log[i];        if ( ++iommu->event_log_head == iommu->event_log.entries )            iommu->event_log_head = 0;        /* update head pointer */        set_field_in_reg_u32(iommu->event_log_head, 0,                             IOMMU_EVENT_LOG_HEAD_MASK,                             IOMMU_EVENT_LOG_HEAD_SHIFT, &head);        writel(head, iommu->mmio_base + IOMMU_EVENT_LOG_HEAD_OFFSET);        return 0;    }    return -EFAULT;}static void amd_iommu_msi_data_init(struct amd_iommu *iommu){    u32 msi_data;    u8 bus = (iommu->bdf >> 8) & 0xff;    u8 dev = PCI_SLOT(iommu->bdf & 0xff);    u8 func = PCI_FUNC(iommu->bdf & 0xff);    int vector = iommu->vector;    msi_data = MSI_DATA_TRIGGER_EDGE |        MSI_DATA_LEVEL_ASSERT |        MSI_DATA_DELIVERY_FIXED |        MSI_DATA_VECTOR(vector);    pci_conf_write32(bus, dev, func,        iommu->msi_cap + PCI_MSI_DATA_64, msi_data);}static void amd_iommu_msi_addr_init(struct amd_iommu *iommu, int phy_cpu){    int bus = (iommu->bdf >> 8) & 0xff;    int dev = PCI_SLOT(iommu->bdf & 0xff);    int func = PCI_FUNC(iommu->bdf & 0xff);    u32 address_hi = 0;    u32 address_lo = MSI_ADDR_HEADER |            MSI_ADDR_DESTMODE_PHYS |            MSI_ADDR_REDIRECTION_CPU |            MSI_ADDR_DEST_ID(phy_cpu);    pci_conf_write32(bus, dev, func,        iommu->msi_cap + PCI_MSI_ADDRESS_LO, address_lo);    pci_conf_write32(bus, dev, func,        iommu->msi_cap + PCI_MSI_ADDRESS_HI, address_hi);}static void amd_iommu_msi_enable(struct amd_iommu *iommu, int flag){    u16 control;    int bus = (iommu->bdf >> 8) & 0xff;    int dev = PCI_SLOT(iommu->bdf & 0xff);    int func = PCI_FUNC(iommu->bdf & 0xff);    control = pci_conf_read16(bus, dev, func,        iommu->msi_cap + PCI_MSI_FLAGS);    control &= ~(1);    if ( flag )        control |= flag;    pci_conf_write16(bus, dev, func,        iommu->msi_cap + PCI_MSI_FLAGS, control);}static void iommu_msi_unmask(unsigned int vector){    unsigned long flags;    struct amd_iommu *iommu = vector_to_iommu[vector];

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -