📄 iommu_acpi.c
字号:
/* * Copyright (C) 2007 Advanced Micro Devices, Inc. * Author: Leo Duran <leo.duran@amd.com> * Author: Wei Wang <wei.wang2@amd.com> - adapted to xen * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */#include <xen/config.h>#include <xen/errno.h>#include <asm/amd-iommu.h>#include <asm/hvm/svm/amd-iommu-proto.h>#include <asm/hvm/svm/amd-iommu-acpi.h>extern unsigned long amd_iommu_page_entries;extern unsigned short ivrs_bdf_entries;extern struct ivrs_mappings *ivrs_mappings;extern unsigned short last_bdf;static struct amd_iommu * __init find_iommu_from_bdf_cap( u16 bdf, u8 cap_offset){ struct amd_iommu *iommu; for_each_amd_iommu ( iommu ) if ( (iommu->bdf == bdf) && (iommu->cap_offset == cap_offset) ) return iommu; return NULL;}static void __init reserve_iommu_exclusion_range( struct amd_iommu *iommu, uint64_t base, uint64_t limit){ /* need to extend exclusion range? */ if ( iommu->exclusion_enable ) { if ( iommu->exclusion_base < base ) base = iommu->exclusion_base; if ( iommu->exclusion_limit > limit ) limit = iommu->exclusion_limit; } iommu->exclusion_enable = IOMMU_CONTROL_ENABLED; iommu->exclusion_base = base; iommu->exclusion_limit = limit;}static void __init reserve_iommu_exclusion_range_all( struct amd_iommu *iommu, unsigned long base, unsigned long limit){ reserve_iommu_exclusion_range(iommu, base, limit); iommu->exclusion_allow_all = IOMMU_CONTROL_ENABLED;}static void __init reserve_unity_map_for_device( u16 bdf, unsigned long base, unsigned long length, u8 iw, u8 ir){ unsigned long old_top, new_top; /* need to extend unity-mapped range? */ if ( ivrs_mappings[bdf].unity_map_enable ) { old_top = ivrs_mappings[bdf].addr_range_start + ivrs_mappings[bdf].addr_range_length; new_top = base + length; if ( old_top > new_top ) new_top = old_top; if ( ivrs_mappings[bdf].addr_range_start < base ) base = ivrs_mappings[bdf].addr_range_start; length = new_top - base; } /* extend r/w permissioms and keep aggregate */ ivrs_mappings[bdf].write_permission = iw; ivrs_mappings[bdf].read_permission = ir; ivrs_mappings[bdf].unity_map_enable = IOMMU_CONTROL_ENABLED; ivrs_mappings[bdf].addr_range_start = base; ivrs_mappings[bdf].addr_range_length = length;}static int __init register_exclusion_range_for_all_devices( unsigned long base, unsigned long limit, u8 iw, u8 ir){ unsigned long range_top, iommu_top, length; struct amd_iommu *iommu; u16 bdf; /* is part of exclusion range inside of IOMMU virtual address space? */ /* note: 'limit' parameter is assumed to be page-aligned */ range_top = limit + PAGE_SIZE; iommu_top = max_page * PAGE_SIZE; if ( base < iommu_top ) { if ( range_top > iommu_top ) range_top = iommu_top; length = range_top - base; /* reserve r/w unity-mapped page entries for devices */ /* note: these entries are part of the exclusion range */ for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ ) reserve_unity_map_for_device(bdf, base, length, iw, ir); /* push 'base' just outside of virtual address space */ base = iommu_top; } /* register IOMMU exclusion range settings */ if ( limit >= iommu_top ) { for_each_amd_iommu( iommu ) reserve_iommu_exclusion_range_all(iommu, base, limit); } return 0;}static int __init register_exclusion_range_for_device( u16 bdf, unsigned long base, unsigned long limit, u8 iw, u8 ir){ unsigned long range_top, iommu_top, length; struct amd_iommu *iommu; u16 bus, devfn, req; bus = bdf >> 8; devfn = bdf & 0xFF; iommu = find_iommu_for_device(bus, devfn); if ( !iommu ) { amd_iov_error("IVMD Error: No IOMMU for Dev_Id 0x%x!\n", bdf); return -ENODEV; } req = ivrs_mappings[bdf].dte_requestor_id; /* note: 'limit' parameter is assumed to be page-aligned */ range_top = limit + PAGE_SIZE; iommu_top = max_page * PAGE_SIZE; if ( base < iommu_top ) { if ( range_top > iommu_top ) range_top = iommu_top; length = range_top - base; /* reserve unity-mapped page entries for device */ /* note: these entries are part of the exclusion range */ reserve_unity_map_for_device(bdf, base, length, iw, ir); reserve_unity_map_for_device(req, base, length, iw, ir); /* push 'base' just outside of virtual address space */ base = iommu_top; } /* register IOMMU exclusion range settings for device */ if ( limit >= iommu_top ) { reserve_iommu_exclusion_range(iommu, base, limit); ivrs_mappings[bdf].dte_allow_exclusion = IOMMU_CONTROL_ENABLED; ivrs_mappings[req].dte_allow_exclusion = IOMMU_CONTROL_ENABLED; } return 0;}static int __init register_exclusion_range_for_iommu_devices( struct amd_iommu *iommu, unsigned long base, unsigned long limit, u8 iw, u8 ir){ unsigned long range_top, iommu_top, length; u16 bus, devfn, bdf, req; /* is part of exclusion range inside of IOMMU virtual address space? */ /* note: 'limit' parameter is assumed to be page-aligned */ range_top = limit + PAGE_SIZE; iommu_top = max_page * PAGE_SIZE; if ( base < iommu_top ) { if ( range_top > iommu_top ) range_top = iommu_top; length = range_top - base; /* reserve r/w unity-mapped page entries for devices */ /* note: these entries are part of the exclusion range */ for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ ) { bus = bdf >> 8; devfn = bdf & 0xFF; if ( iommu == find_iommu_for_device(bus, devfn) ) { reserve_unity_map_for_device(bdf, base, length, iw, ir); req = ivrs_mappings[bdf].dte_requestor_id; reserve_unity_map_for_device(req, base, length, iw, ir); } } /* push 'base' just outside of virtual address space */ base = iommu_top; } /* register IOMMU exclusion range settings */ if ( limit >= iommu_top ) reserve_iommu_exclusion_range_all(iommu, base, limit); return 0;}static int __init parse_ivmd_device_select( struct acpi_ivmd_block_header *ivmd_block, unsigned long base, unsigned long limit, u8 iw, u8 ir){ u16 bdf; bdf = ivmd_block->header.dev_id; if ( bdf >= ivrs_bdf_entries ) { amd_iov_error("IVMD Error: Invalid Dev_Id 0x%x\n", bdf); return -ENODEV; } return register_exclusion_range_for_device(bdf, base, limit, iw, ir);}static int __init parse_ivmd_device_range( struct acpi_ivmd_block_header *ivmd_block, unsigned long base, unsigned long limit, u8 iw, u8 ir){ u16 first_bdf, last_bdf, bdf; int error; first_bdf = ivmd_block->header.dev_id; if ( first_bdf >= ivrs_bdf_entries ) { amd_iov_error( "IVMD Error: Invalid Range_First Dev_Id 0x%x\n", first_bdf); return -ENODEV; } last_bdf = ivmd_block->last_dev_id; if ( (last_bdf >= ivrs_bdf_entries) || (last_bdf <= first_bdf) ) { amd_iov_error( "IVMD Error: Invalid Range_Last Dev_Id 0x%x\n", last_bdf); return -ENODEV; } for ( bdf = first_bdf, error = 0; (bdf <= last_bdf) && !error; bdf++ ) error = register_exclusion_range_for_device( bdf, base, limit, iw, ir); return error;}static int __init parse_ivmd_device_iommu( struct acpi_ivmd_block_header *ivmd_block, unsigned long base, unsigned long limit, u8 iw, u8 ir){ struct amd_iommu *iommu; /* find target IOMMU */ iommu = find_iommu_from_bdf_cap(ivmd_block->header.dev_id, ivmd_block->cap_offset); if ( !iommu ) { amd_iov_error("IVMD Error: No IOMMU for Dev_Id 0x%x Cap 0x%x\n", ivmd_block->header.dev_id, ivmd_block->cap_offset); return -ENODEV; } return register_exclusion_range_for_iommu_devices( iommu, base, limit, iw, ir);}static int __init parse_ivmd_block(struct acpi_ivmd_block_header *ivmd_block){ unsigned long start_addr, mem_length, base, limit; u8 iw, ir; if ( ivmd_block->header.length < sizeof(struct acpi_ivmd_block_header) ) { amd_iov_error("IVMD Error: Invalid Block Length!\n"); return -ENODEV; } start_addr = (unsigned long)ivmd_block->start_addr; mem_length = (unsigned long)ivmd_block->mem_length; base = start_addr & PAGE_MASK; limit = (start_addr + mem_length - 1) & PAGE_MASK; amd_iov_info("IVMD Block: Type 0x%x\n",ivmd_block->header.type); amd_iov_info(" Start_Addr_Phys 0x%lx\n", start_addr); amd_iov_info(" Mem_Length 0x%lx\n", mem_length); if ( get_field_from_byte(ivmd_block->header.flags, AMD_IOMMU_ACPI_EXCLUSION_RANGE_MASK, AMD_IOMMU_ACPI_EXCLUSION_RANGE_SHIFT) ) iw = ir = IOMMU_CONTROL_ENABLED; else if ( get_field_from_byte(ivmd_block->header.flags, AMD_IOMMU_ACPI_UNITY_MAPPING_MASK, AMD_IOMMU_ACPI_UNITY_MAPPING_SHIFT) ) { iw = get_field_from_byte(ivmd_block->header.flags, AMD_IOMMU_ACPI_IW_PERMISSION_MASK, AMD_IOMMU_ACPI_IW_PERMISSION_SHIFT); ir = get_field_from_byte(ivmd_block->header.flags, AMD_IOMMU_ACPI_IR_PERMISSION_MASK, AMD_IOMMU_ACPI_IR_PERMISSION_SHIFT); } else { amd_iov_error("IVMD Error: Invalid Flag Field!\n"); return -ENODEV; }
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -