📄 iommu_acpi.c
字号:
switch( ivmd_block->header.type ) { case AMD_IOMMU_ACPI_IVMD_ALL_TYPE: return register_exclusion_range_for_all_devices( base, limit, iw, ir); case AMD_IOMMU_ACPI_IVMD_ONE_TYPE: return parse_ivmd_device_select(ivmd_block, base, limit, iw, ir); case AMD_IOMMU_ACPI_IVMD_RANGE_TYPE: return parse_ivmd_device_range(ivmd_block, base, limit, iw, ir); case AMD_IOMMU_ACPI_IVMD_IOMMU_TYPE: return parse_ivmd_device_iommu(ivmd_block, base, limit, iw, ir); default: amd_iov_error("IVMD Error: Invalid Block Type!\n"); return -ENODEV; }}static u16 __init parse_ivhd_device_padding( u16 pad_length, u16 header_length, u16 block_length){ if ( header_length < (block_length + pad_length) ) { amd_iov_error("IVHD Error: Invalid Device_Entry Length!\n"); return 0; } return pad_length;}static u16 __init parse_ivhd_device_select( union acpi_ivhd_device *ivhd_device, struct amd_iommu *iommu){ u16 bdf; bdf = ivhd_device->header.dev_id; if ( bdf >= ivrs_bdf_entries ) { amd_iov_error("IVHD Error: Invalid Device_Entry Dev_Id 0x%x\n", bdf); return 0; } /* override flags for device */ ivrs_mappings[bdf].dte_sys_mgt_enable = get_field_from_byte(ivhd_device->header.flags, AMD_IOMMU_ACPI_SYS_MGT_MASK, AMD_IOMMU_ACPI_SYS_MGT_SHIFT); ivrs_mappings[bdf].iommu = iommu; return sizeof(struct acpi_ivhd_device_header);}static u16 __init parse_ivhd_device_range( union acpi_ivhd_device *ivhd_device, u16 header_length, u16 block_length, struct amd_iommu *iommu){ u16 dev_length, first_bdf, last_bdf, bdf; u8 sys_mgt; dev_length = sizeof(struct acpi_ivhd_device_range); if ( header_length < (block_length + dev_length) ) { amd_iov_error("IVHD Error: Invalid Device_Entry Length!\n"); return 0; } if ( ivhd_device->range.trailer.type != AMD_IOMMU_ACPI_IVHD_DEV_RANGE_END ) { amd_iov_error("IVHD Error: " "Invalid Range: End_Type 0x%x\n", ivhd_device->range.trailer.type); return 0; } first_bdf = ivhd_device->header.dev_id; if ( first_bdf >= ivrs_bdf_entries ) { amd_iov_error( "IVHD Error: Invalid Range: First Dev_Id 0x%x\n", first_bdf); return 0; } last_bdf = ivhd_device->range.trailer.dev_id; if ( (last_bdf >= ivrs_bdf_entries) || (last_bdf <= first_bdf) ) { amd_iov_error( "IVHD Error: Invalid Range: Last Dev_Id 0x%x\n", last_bdf); return 0; } amd_iov_info(" Dev_Id Range: 0x%x -> 0x%x\n", first_bdf, last_bdf); /* override flags for range of devices */ sys_mgt = get_field_from_byte(ivhd_device->header.flags, AMD_IOMMU_ACPI_SYS_MGT_MASK, AMD_IOMMU_ACPI_SYS_MGT_SHIFT); for ( bdf = first_bdf; bdf <= last_bdf; bdf++ ) { ivrs_mappings[bdf].dte_sys_mgt_enable = sys_mgt; ivrs_mappings[bdf].iommu = iommu; } return dev_length;}static u16 __init parse_ivhd_device_alias( union acpi_ivhd_device *ivhd_device, u16 header_length, u16 block_length, struct amd_iommu *iommu){ u16 dev_length, alias_id, bdf; dev_length = sizeof(struct acpi_ivhd_device_alias); if ( header_length < (block_length + dev_length) ) { amd_iov_error("IVHD Error: Invalid Device_Entry Length!\n"); return 0; } bdf = ivhd_device->header.dev_id; if ( bdf >= ivrs_bdf_entries ) { amd_iov_error("IVHD Error: Invalid Device_Entry Dev_Id 0x%x\n", bdf); return 0; } alias_id = ivhd_device->alias.dev_id; if ( alias_id >= ivrs_bdf_entries ) { amd_iov_error("IVHD Error: Invalid Alias Dev_Id 0x%x\n", alias_id); return 0; } amd_iov_info(" Dev_Id Alias: 0x%x\n", alias_id); /* override requestor_id and flags for device */ ivrs_mappings[bdf].dte_requestor_id = alias_id; ivrs_mappings[bdf].dte_sys_mgt_enable = get_field_from_byte(ivhd_device->header.flags, AMD_IOMMU_ACPI_SYS_MGT_MASK, AMD_IOMMU_ACPI_SYS_MGT_SHIFT); ivrs_mappings[bdf].iommu = iommu; ivrs_mappings[alias_id].dte_sys_mgt_enable = ivrs_mappings[bdf].dte_sys_mgt_enable; ivrs_mappings[alias_id].iommu = iommu; return dev_length;}static u16 __init parse_ivhd_device_alias_range( union acpi_ivhd_device *ivhd_device, u16 header_length, u16 block_length, struct amd_iommu *iommu){ u16 dev_length, first_bdf, last_bdf, alias_id, bdf; u8 sys_mgt; dev_length = sizeof(struct acpi_ivhd_device_alias_range); if ( header_length < (block_length + dev_length) ) { amd_iov_error("IVHD Error: Invalid Device_Entry Length!\n"); return 0; } if ( ivhd_device->alias_range.trailer.type != AMD_IOMMU_ACPI_IVHD_DEV_RANGE_END ) { amd_iov_error("IVHD Error: " "Invalid Range: End_Type 0x%x\n", ivhd_device->alias_range.trailer.type); return 0; } first_bdf = ivhd_device->header.dev_id; if ( first_bdf >= ivrs_bdf_entries ) { amd_iov_error( "IVHD Error: Invalid Range: First Dev_Id 0x%x\n", first_bdf); return 0; } last_bdf = ivhd_device->alias_range.trailer.dev_id; if ( last_bdf >= ivrs_bdf_entries || last_bdf <= first_bdf ) { amd_iov_error( "IVHD Error: Invalid Range: Last Dev_Id 0x%x\n", last_bdf); return 0; } alias_id = ivhd_device->alias_range.alias.dev_id; if ( alias_id >= ivrs_bdf_entries ) { amd_iov_error("IVHD Error: Invalid Alias Dev_Id 0x%x\n", alias_id); return 0; } amd_iov_info(" Dev_Id Range: 0x%x -> 0x%x\n", first_bdf, last_bdf); amd_iov_info(" Dev_Id Alias: 0x%x\n", alias_id); /* override requestor_id and flags for range of devices */ sys_mgt = get_field_from_byte(ivhd_device->header.flags, AMD_IOMMU_ACPI_SYS_MGT_MASK, AMD_IOMMU_ACPI_SYS_MGT_SHIFT); for ( bdf = first_bdf; bdf <= last_bdf; bdf++ ) { ivrs_mappings[bdf].dte_requestor_id = alias_id; ivrs_mappings[bdf].dte_sys_mgt_enable = sys_mgt; ivrs_mappings[bdf].iommu = iommu; } ivrs_mappings[alias_id].dte_sys_mgt_enable = sys_mgt; ivrs_mappings[alias_id].iommu = iommu; return dev_length;}static u16 __init parse_ivhd_device_extended( union acpi_ivhd_device *ivhd_device, u16 header_length, u16 block_length, struct amd_iommu *iommu){ u16 dev_length, bdf; dev_length = sizeof(struct acpi_ivhd_device_extended); if ( header_length < (block_length + dev_length) ) { amd_iov_error("IVHD Error: Invalid Device_Entry Length!\n"); return 0; } bdf = ivhd_device->header.dev_id; if ( bdf >= ivrs_bdf_entries ) { amd_iov_error("IVHD Error: Invalid Device_Entry Dev_Id 0x%x\n", bdf); return 0; } /* override flags for device */ ivrs_mappings[bdf].dte_sys_mgt_enable = get_field_from_byte(ivhd_device->header.flags, AMD_IOMMU_ACPI_SYS_MGT_MASK, AMD_IOMMU_ACPI_SYS_MGT_SHIFT); ivrs_mappings[bdf].iommu = iommu; return dev_length;}static u16 __init parse_ivhd_device_extended_range( union acpi_ivhd_device *ivhd_device, u16 header_length, u16 block_length, struct amd_iommu *iommu){ u16 dev_length, first_bdf, last_bdf, bdf; u8 sys_mgt; dev_length = sizeof(struct acpi_ivhd_device_extended_range); if ( header_length < (block_length + dev_length) ) { amd_iov_error("IVHD Error: Invalid Device_Entry Length!\n"); return 0; } if ( ivhd_device->extended_range.trailer.type != AMD_IOMMU_ACPI_IVHD_DEV_RANGE_END ) { amd_iov_error("IVHD Error: " "Invalid Range: End_Type 0x%x\n", ivhd_device->extended_range.trailer.type); return 0; } first_bdf = ivhd_device->header.dev_id; if ( first_bdf >= ivrs_bdf_entries ) { amd_iov_error( "IVHD Error: Invalid Range: First Dev_Id 0x%x\n", first_bdf); return 0; } last_bdf = ivhd_device->extended_range.trailer.dev_id; if ( (last_bdf >= ivrs_bdf_entries) || (last_bdf <= first_bdf) ) { amd_iov_error( "IVHD Error: Invalid Range: Last Dev_Id 0x%x\n", last_bdf); return 0; } amd_iov_info(" Dev_Id Range: 0x%x -> 0x%x\n", first_bdf, last_bdf); /* override flags for range of devices */ sys_mgt = get_field_from_byte(ivhd_device->header.flags, AMD_IOMMU_ACPI_SYS_MGT_MASK, AMD_IOMMU_ACPI_SYS_MGT_SHIFT); for ( bdf = first_bdf; bdf <= last_bdf; bdf++ ) { ivrs_mappings[bdf].dte_sys_mgt_enable = sys_mgt; ivrs_mappings[bdf].iommu = iommu; } return dev_length;}static int __init parse_ivhd_block(struct acpi_ivhd_block_header *ivhd_block){ union acpi_ivhd_device *ivhd_device; u16 block_length, dev_length; struct amd_iommu *iommu; if ( ivhd_block->header.length < sizeof(struct acpi_ivhd_block_header) ) { amd_iov_error("IVHD Error: Invalid Block Length!\n"); return -ENODEV; } iommu = find_iommu_from_bdf_cap(ivhd_block->header.dev_id,
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -