📄 svm.c
字号:
msr_content = vmcb->debugctlmsr; break; case MSR_IA32_LASTBRANCHFROMIP: msr_content = vmcb->lastbranchfromip; break; case MSR_IA32_LASTBRANCHTOIP: msr_content = vmcb->lastbranchtoip; break; case MSR_IA32_LASTINTFROMIP: msr_content = vmcb->lastintfromip; break; case MSR_IA32_LASTINTTOIP: msr_content = vmcb->lastinttoip; break; default: if ( rdmsr_hypervisor_regs(ecx, &eax, &edx) || rdmsr_safe(ecx, eax, edx) == 0 ) { regs->eax = eax; regs->edx = edx; goto done; } svm_inject_exception(TRAP_gp_fault, 0, 0); return; } regs->eax = msr_content & 0xFFFFFFFF; regs->edx = msr_content >> 32; done: hvmtrace_msr_read(v, ecx, msr_content); HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%x, eax=%lx, edx=%lx", ecx, (unsigned long)regs->eax, (unsigned long)regs->edx); inst_len = __get_instruction_length(v, INSTR_RDMSR, NULL); } else { msr_content = (u32)regs->eax | ((u64)regs->edx << 32); hvmtrace_msr_write(v, ecx, msr_content); switch (ecx) { case MSR_IA32_TSC: hvm_set_guest_time(v, msr_content); pt_reset(v); break; case MSR_IA32_APICBASE: vlapic_msr_set(vcpu_vlapic(v), msr_content); break; case MSR_K8_VM_HSAVE_PA: svm_inject_exception(TRAP_gp_fault, 0, 0); break; case MSR_IA32_DEBUGCTLMSR: vmcb->debugctlmsr = msr_content; if ( !msr_content || !cpu_has_svm_lbrv ) break; vmcb->lbr_control.fields.enable = 1; svm_disable_intercept_for_msr(v, MSR_IA32_DEBUGCTLMSR); svm_disable_intercept_for_msr(v, MSR_IA32_LASTBRANCHFROMIP); svm_disable_intercept_for_msr(v, MSR_IA32_LASTBRANCHTOIP); svm_disable_intercept_for_msr(v, MSR_IA32_LASTINTFROMIP); svm_disable_intercept_for_msr(v, MSR_IA32_LASTINTTOIP); break; case MSR_IA32_LASTBRANCHFROMIP: vmcb->lastbranchfromip = msr_content; break; case MSR_IA32_LASTBRANCHTOIP: vmcb->lastbranchtoip = msr_content; break; case MSR_IA32_LASTINTFROMIP: vmcb->lastintfromip = msr_content; break; case MSR_IA32_LASTINTTOIP: vmcb->lastinttoip = msr_content; break; default: switch ( long_mode_do_msr_write(regs) ) { case HNDL_unhandled: wrmsr_hypervisor_regs(ecx, regs->eax, regs->edx); break; case HNDL_exception_raised: return; case HNDL_done: break; } break; } inst_len = __get_instruction_length(v, INSTR_WRMSR, NULL); } __update_guest_eip(regs, inst_len);}static void svm_vmexit_do_hlt(struct vmcb_struct *vmcb, struct cpu_user_regs *regs){ struct vcpu *curr = current; struct hvm_intack intack = hvm_vcpu_has_pending_irq(curr); unsigned int inst_len; inst_len = __get_instruction_length(curr, INSTR_HLT, NULL); __update_guest_eip(regs, inst_len); /* Check for pending exception or new interrupt. */ if ( vmcb->eventinj.fields.v || ((intack.source != hvm_intsrc_none) && !svm_interrupt_blocked(current, intack)) ) { HVMTRACE_1D(HLT, curr, /*int pending=*/ 1); return; } HVMTRACE_1D(HLT, curr, /*int pending=*/ 0); hvm_hlt(regs->eflags);}static void svm_vmexit_do_invalidate_cache(struct cpu_user_regs *regs){ enum instruction_index list[] = { INSTR_INVD, INSTR_WBINVD }; struct vcpu *curr = current; struct vmcb_struct *vmcb = curr->arch.hvm_svm.vmcb; int inst_len; if ( !list_empty(&(domain_hvm_iommu(curr->domain)->pdev_list)) ) { vmcb->general2_intercepts &= ~GENERAL2_INTERCEPT_WBINVD; wbinvd(); } inst_len = __get_instruction_length_from_list( curr, list, ARRAY_SIZE(list), NULL, NULL); __update_guest_eip(regs, inst_len);}void svm_handle_invlpg(const short invlpga, struct cpu_user_regs *regs){ struct vcpu *v = current; u8 opcode[MAX_INST_LEN], prefix, length = MAX_INST_LEN; unsigned long g_vaddr; int inst_len; if ( invlpga ) { inst_len = __get_instruction_length(v, INSTR_INVLPGA, opcode); __update_guest_eip(regs, inst_len); /* * The address is implicit on this instruction. At the moment, we don't * use ecx (ASID) to identify individual guests pages */ g_vaddr = regs->eax; } else { /* What about multiple prefix codes? */ inst_len = __get_instruction_length(v, INSTR_INVLPG, opcode); prefix = (is_prefix(opcode[0]) ? opcode[0] : 0); if ( inst_len <= 0 ) { gdprintk(XENLOG_ERR, "Error getting invlpg instr len\n"); goto crash; } inst_len--; length -= inst_len; /* * Decode memory operand of the instruction including ModRM, SIB, and * displacement to get effective address and length in bytes. Assume * the system in either 32- or 64-bit mode. */ g_vaddr = get_effective_addr_modrm64(regs, prefix, inst_len, &opcode[inst_len], &length); inst_len += length; __update_guest_eip(regs, inst_len); } HVMTRACE_3D(INVLPG, v, !!invlpga, g_vaddr, (invlpga ? regs->ecx : 0)); paging_invlpg(v, g_vaddr); svm_asid_g_invlpg(v, g_vaddr); return; crash: domain_crash(v->domain);}asmlinkage void svm_vmexit_handler(struct cpu_user_regs *regs){ unsigned int exit_reason; struct vcpu *v = current; struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; eventinj_t eventinj; int inst_len, rc; /* * Before doing anything else, we need to sync up the VLAPIC's TPR with * SVM's vTPR. It's OK if the guest doesn't touch CR8 (e.g. 32-bit Windows) * because we update the vTPR on MMIO writes to the TPR. */ vlapic_set_reg(vcpu_vlapic(v), APIC_TASKPRI, (vmcb->vintr.fields.tpr & 0x0F) << 4); exit_reason = vmcb->exitcode; hvmtrace_vmexit(v, regs->eip, exit_reason); if ( unlikely(exit_reason == VMEXIT_INVALID) ) { svm_dump_vmcb(__func__, vmcb); goto exit_and_crash; } perfc_incra(svmexits, exit_reason); hvm_maybe_deassert_evtchn_irq(); /* Event delivery caused this intercept? Queue for redelivery. */ eventinj = vmcb->exitintinfo; if ( unlikely(eventinj.fields.v) && hvm_event_needs_reinjection(eventinj.fields.type, eventinj.fields.vector) ) vmcb->eventinj = eventinj; switch ( exit_reason ) { case VMEXIT_INTR: /* Asynchronous event, handled when we STGI'd after the VMEXIT. */ HVMTRACE_0D(INTR, v); break; case VMEXIT_NMI: /* Asynchronous event, handled when we STGI'd after the VMEXIT. */ HVMTRACE_0D(NMI, v); break; case VMEXIT_SMI: /* Asynchronous event, handled when we STGI'd after the VMEXIT. */ HVMTRACE_0D(SMI, v); break; case VMEXIT_EXCEPTION_DB: if ( !v->domain->debugger_attached ) goto exit_and_crash; domain_pause_for_debugger(); break; case VMEXIT_EXCEPTION_BP: if ( !v->domain->debugger_attached ) goto exit_and_crash; /* AMD Vol2, 15.11: INT3, INTO, BOUND intercepts do not update RIP. */ inst_len = __get_instruction_length(v, INSTR_INT3, NULL); if ( inst_len == 0 ) break; __update_guest_eip(regs, inst_len); domain_pause_for_debugger(); break; case VMEXIT_EXCEPTION_NM: svm_do_no_device_fault(vmcb); break; case VMEXIT_EXCEPTION_PF: { unsigned long va; va = vmcb->exitinfo2; regs->error_code = vmcb->exitinfo1; HVM_DBG_LOG(DBG_LEVEL_VMMU, "eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx", (unsigned long)regs->eax, (unsigned long)regs->ebx, (unsigned long)regs->ecx, (unsigned long)regs->edx, (unsigned long)regs->esi, (unsigned long)regs->edi); if ( paging_fault(va, regs) ) { HVMTRACE_2D(PF_XEN, v, va, regs->error_code); break; } svm_inject_exception(TRAP_page_fault, regs->error_code, va); break; } /* Asynchronous event, handled when we STGI'd after the VMEXIT. */ case VMEXIT_EXCEPTION_MC: HVMTRACE_0D(MCE, v); break; case VMEXIT_VINTR: vmcb->vintr.fields.irq = 0; vmcb->general1_intercepts &= ~GENERAL1_INTERCEPT_VINTR; break; case VMEXIT_INVD: case VMEXIT_WBINVD: svm_vmexit_do_invalidate_cache(regs); break; case VMEXIT_TASK_SWITCH: { enum hvm_task_switch_reason reason; int32_t errcode = -1; if ( (vmcb->exitinfo2 >> 36) & 1 ) reason = TSW_iret; else if ( (vmcb->exitinfo2 >> 38) & 1 ) reason = TSW_jmp; else reason = TSW_call_or_int; if ( (vmcb->exitinfo2 >> 44) & 1 ) errcode = (uint32_t)vmcb->exitinfo2; /* * Some processors set the EXITINTINFO field when the task switch * is caused by a task gate in the IDT. In this case we will be * emulating the event injection, so we do not want the processor * to re-inject the original event! */ vmcb->eventinj.bytes = 0; hvm_task_switch((uint16_t)vmcb->exitinfo1, reason, errcode); break; } case VMEXIT_CPUID: svm_vmexit_do_cpuid(vmcb, regs); break; case VMEXIT_HLT: svm_vmexit_do_hlt(vmcb, regs); break; case VMEXIT_INVLPG: svm_handle_invlpg(0, regs); break; case VMEXIT_INVLPGA: svm_handle_invlpg(1, regs); break; case VMEXIT_VMMCALL: inst_len = __get_instruction_length(v, INSTR_VMCALL, NULL); if ( inst_len == 0 ) break; HVMTRACE_1D(VMMCALL, v, regs->eax); rc = hvm_do_hypercall(regs); if ( rc != HVM_HCALL_preempted ) { __update_guest_eip(regs, inst_len); if ( rc == HVM_HCALL_invalidate ) send_invalidate_req(); } break; case VMEXIT_CR0_READ ... VMEXIT_CR15_READ: svm_cr_access(v, exit_reason - VMEXIT_CR0_READ, TYPE_MOV_FROM_CR, regs); break; case VMEXIT_CR0_WRITE ... VMEXIT_CR15_WRITE: svm_cr_access(v, exit_reason - VMEXIT_CR0_WRITE, TYPE_MOV_TO_CR, regs); break; case VMEXIT_DR0_READ ... VMEXIT_DR7_READ: case VMEXIT_DR0_WRITE ... VMEXIT_DR7_WRITE: svm_dr_access(v, regs); break; case VMEXIT_IOIO: svm_io_instruction(v); break; case VMEXIT_MSR: svm_do_msr_access(v, regs); break; case VMEXIT_SHUTDOWN: hvm_triple_fault(); break; case VMEXIT_RDTSCP: case VMEXIT_MONITOR: case VMEXIT_MWAIT: case VMEXIT_VMRUN: case VMEXIT_VMLOAD: case VMEXIT_VMSAVE: case VMEXIT_STGI: case VMEXIT_CLGI: case VMEXIT_SKINIT: svm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0); break; case VMEXIT_NPF: perfc_incra(svmexits, VMEXIT_NPF_PERFC); regs->error_code = vmcb->exitinfo1; svm_do_nested_pgfault(vmcb->exitinfo2, regs); break; case VMEXIT_IRET: /* * IRET clears the NMI mask. However because we clear the mask * /before/ executing IRET, we set the interrupt shadow to prevent * a pending NMI from being injected immediately. This will work * perfectly unless the IRET instruction faults: in that case we * may inject an NMI before the NMI handler's IRET instruction is * retired. */ vmcb->general1_intercepts &= ~GENERAL1_INTERCEPT_IRET; vmcb->interrupt_shadow = 1; break; default: exit_and_crash: gdprintk(XENLOG_ERR, "unexpected VMEXIT: exit reason = 0x%x, " "exitinfo1 = %"PRIx64", exitinfo2 = %"PRIx64"\n", exit_reason, (u64)vmcb->exitinfo1, (u64)vmcb->exitinfo2); domain_crash(v->domain); break; } /* The exit may have updated the TPR: reflect this in the hardware vtpr */ vmcb->vintr.fields.tpr = (vlapic_get_reg(vcpu_vlapic(v), APIC_TASKPRI) & 0xFF) >> 4;}asmlinkage void svm_trace_vmentry(void){ struct vcpu *v = current; /* This is the last C code before the VMRUN instruction. */ hvmtrace_vmentry(v
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -