📄 iommu_init.c
字号:
/* FIXME: do not support mask bits at the moment */ if ( iommu->maskbit ) return; spin_lock_irqsave(&iommu->lock, flags); amd_iommu_msi_enable(iommu, IOMMU_CONTROL_ENABLED); spin_unlock_irqrestore(&iommu->lock, flags);}static void iommu_msi_mask(unsigned int vector){ unsigned long flags; struct amd_iommu *iommu = vector_to_iommu[vector]; /* FIXME: do not support mask bits at the moment */ if ( iommu->maskbit ) return; spin_lock_irqsave(&iommu->lock, flags); amd_iommu_msi_enable(iommu, IOMMU_CONTROL_DISABLED); spin_unlock_irqrestore(&iommu->lock, flags);}static unsigned int iommu_msi_startup(unsigned int vector){ iommu_msi_unmask(vector); return 0;}static void iommu_msi_end(unsigned int vector){ iommu_msi_unmask(vector); ack_APIC_irq();}static void iommu_msi_set_affinity(unsigned int vector, cpumask_t dest){ struct amd_iommu *iommu = vector_to_iommu[vector]; amd_iommu_msi_addr_init(iommu, cpu_physical_id(first_cpu(dest)));}static struct hw_interrupt_type iommu_msi_type = { .typename = "AMD_IOV_MSI", .startup = iommu_msi_startup, .shutdown = iommu_msi_mask, .enable = iommu_msi_unmask, .disable = iommu_msi_mask, .ack = iommu_msi_mask, .end = iommu_msi_end, .set_affinity = iommu_msi_set_affinity,};static void parse_event_log_entry(u32 entry[]){ u16 domain_id, device_id; u32 code; u64 *addr; char * event_str[] = {"ILLEGAL_DEV_TABLE_ENTRY", "IO_PAGE_FALT", "DEV_TABLE_HW_ERROR", "PAGE_TABLE_HW_ERROR", "ILLEGAL_COMMAND_ERROR", "COMMAND_HW_ERROR", "IOTLB_INV_TIMEOUT", "INVALID_DEV_REQUEST"}; code = get_field_from_reg_u32(entry[1], IOMMU_EVENT_CODE_MASK, IOMMU_EVENT_CODE_SHIFT); if ( (code > IOMMU_EVENT_INVALID_DEV_REQUEST) || (code < IOMMU_EVENT_ILLEGAL_DEV_TABLE_ENTRY) ) { amd_iov_error("Invalid event log entry!\n"); return; } if ( code == IOMMU_EVENT_IO_PAGE_FALT ) { device_id = get_field_from_reg_u32(entry[0], IOMMU_EVENT_DEVICE_ID_MASK, IOMMU_EVENT_DEVICE_ID_SHIFT); domain_id = get_field_from_reg_u32(entry[1], IOMMU_EVENT_DOMAIN_ID_MASK, IOMMU_EVENT_DOMAIN_ID_SHIFT); addr= (u64*) (entry + 2); printk(XENLOG_ERR "AMD_IOV: " "%s: domain:%d, device id:0x%x, fault address:0x%"PRIx64"\n", event_str[code-1], domain_id, device_id, *addr); }}static void amd_iommu_page_fault(int vector, void *dev_id, struct cpu_user_regs *regs){ u32 event[4]; unsigned long flags; int ret = 0; struct amd_iommu *iommu = dev_id; spin_lock_irqsave(&iommu->lock, flags); ret = amd_iommu_read_event_log(iommu, event); spin_unlock_irqrestore(&iommu->lock, flags); if ( ret != 0 ) return; parse_event_log_entry(event);}static int set_iommu_interrupt_handler(struct amd_iommu *iommu){ int vector, ret; vector = assign_irq_vector(AUTO_ASSIGN); vector_to_iommu[vector] = iommu; /* make irq == vector */ irq_vector[vector] = vector; vector_irq[vector] = vector; if ( !vector ) { amd_iov_error("no vectors\n"); return 0; } irq_desc[vector].handler = &iommu_msi_type; ret = request_irq(vector, amd_iommu_page_fault, 0, "amd_iommu", iommu); if ( ret ) { amd_iov_error("can't request irq\n"); return 0; } return vector;}void __init enable_iommu(struct amd_iommu *iommu){ unsigned long flags; spin_lock_irqsave(&iommu->lock, flags); if ( iommu->enabled ) { spin_unlock_irqrestore(&iommu->lock, flags); return; } iommu->dev_table.alloc_size = device_table.alloc_size; iommu->dev_table.entries = device_table.entries; iommu->dev_table.buffer = device_table.buffer; register_iommu_dev_table_in_mmio_space(iommu); register_iommu_cmd_buffer_in_mmio_space(iommu); register_iommu_event_log_in_mmio_space(iommu); register_iommu_exclusion_range(iommu); amd_iommu_msi_data_init (iommu); amd_iommu_msi_addr_init(iommu, cpu_physical_id(first_cpu(cpu_online_map))); amd_iommu_msi_enable(iommu, IOMMU_CONTROL_ENABLED); set_iommu_command_buffer_control(iommu, IOMMU_CONTROL_ENABLED); set_iommu_event_log_control(iommu, IOMMU_CONTROL_ENABLED); set_iommu_translation_control(iommu, IOMMU_CONTROL_ENABLED); printk("AMD_IOV: IOMMU %d Enabled.\n", nr_amd_iommus ); nr_amd_iommus++; iommu->enabled = 1; spin_unlock_irqrestore(&iommu->lock, flags);}static void __init deallocate_iommu_table_struct( struct table_struct *table){ if ( table->buffer ) { free_xenheap_pages(table->buffer, get_order_from_bytes(table->alloc_size)); table->buffer = NULL; }}static void __init deallocate_iommu_tables(struct amd_iommu *iommu){ deallocate_iommu_table_struct(&iommu->cmd_buffer); deallocate_iommu_table_struct(&iommu->event_log);}static int __init allocate_iommu_table_struct(struct table_struct *table, const char *name){ table->buffer = (void *) alloc_xenheap_pages( get_order_from_bytes(table->alloc_size)); if ( !table->buffer ) { amd_iov_error("Error allocating %s\n", name); return -ENOMEM; } memset(table->buffer, 0, table->alloc_size); return 0;}static int __init allocate_iommu_tables(struct amd_iommu *iommu){ /* allocate 'command buffer' in power of 2 increments of 4K */ iommu->cmd_buffer_tail = 0; iommu->cmd_buffer.alloc_size = PAGE_SIZE << get_order_from_bytes( PAGE_ALIGN(amd_iommu_cmd_buffer_entries * IOMMU_CMD_BUFFER_ENTRY_SIZE)); iommu->cmd_buffer.entries = iommu->cmd_buffer.alloc_size / IOMMU_CMD_BUFFER_ENTRY_SIZE; if ( allocate_iommu_table_struct(&iommu->cmd_buffer, "Command Buffer") != 0 ) goto error_out; /* allocate 'event log' in power of 2 increments of 4K */ iommu->event_log_head = 0; iommu->event_log.alloc_size = PAGE_SIZE << get_order_from_bytes( PAGE_ALIGN(amd_iommu_event_log_entries * IOMMU_EVENT_LOG_ENTRY_SIZE)); iommu->event_log.entries = iommu->event_log.alloc_size / IOMMU_EVENT_LOG_ENTRY_SIZE; if ( allocate_iommu_table_struct(&iommu->event_log, "Event Log") != 0 ) goto error_out; return 0; error_out: deallocate_iommu_tables(iommu); return -ENOMEM;}int __init amd_iommu_init_one(struct amd_iommu *iommu){ if ( allocate_iommu_tables(iommu) != 0 ) goto error_out; if ( map_iommu_mmio_region(iommu) != 0 ) goto error_out; if ( set_iommu_interrupt_handler(iommu) == 0 ) goto error_out; enable_iommu(iommu); return 0;error_out: return -ENODEV;}void __init amd_iommu_init_cleanup(void){ struct amd_iommu *iommu, *next; list_for_each_entry_safe ( iommu, next, &amd_iommu_head, list ) { list_del(&iommu->list); if ( iommu->enabled ) { deallocate_iommu_tables(iommu); unmap_iommu_mmio_region(iommu); } xfree(iommu); }}static int __init init_ivrs_mapping(void){ int bdf; BUG_ON( !ivrs_bdf_entries ); ivrs_mappings = xmalloc_array( struct ivrs_mappings, ivrs_bdf_entries); if ( ivrs_mappings == NULL ) { amd_iov_error("Error allocating IVRS Mappings table\n"); return -ENOMEM; } memset(ivrs_mappings, 0, ivrs_bdf_entries * sizeof(struct ivrs_mappings)); /* assign default values for device entries */ for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ ) { ivrs_mappings[bdf].dte_requestor_id = bdf; ivrs_mappings[bdf].dte_sys_mgt_enable = IOMMU_DEV_TABLE_SYS_MGT_MSG_FORWARDED; ivrs_mappings[bdf].dte_allow_exclusion = IOMMU_CONTROL_DISABLED; ivrs_mappings[bdf].unity_map_enable = IOMMU_CONTROL_DISABLED; ivrs_mappings[bdf].iommu = NULL; } return 0;}static int __init amd_iommu_setup_device_table(void){ /* allocate 'device table' on a 4K boundary */ device_table.alloc_size = PAGE_SIZE << get_order_from_bytes( PAGE_ALIGN(ivrs_bdf_entries * IOMMU_DEV_TABLE_ENTRY_SIZE)); device_table.entries = device_table.alloc_size / IOMMU_DEV_TABLE_ENTRY_SIZE; return ( allocate_iommu_table_struct(&device_table, "Device Table") );}int __init amd_iommu_setup_shared_tables(void){ BUG_ON( !ivrs_bdf_entries ); if (init_ivrs_mapping() != 0 ) goto error_out; if ( amd_iommu_setup_device_table() != 0 ) goto error_out; if ( amd_iommu_setup_intremap_table() != 0 ) goto error_out; return 0;error_out: deallocate_intremap_table(); deallocate_iommu_table_struct(&device_table); if ( ivrs_mappings ) { xfree(ivrs_mappings); ivrs_mappings = NULL; } return -ENOMEM;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -