hvm.c
来自「xen虚拟机源代码安装包」· C语言 代码 · 共 2,304 行 · 第 1/5 页
C
2,304 行
prev_tr.base, &tss, sizeof(tss), PFEC_page_present); if ( rc == HVMCOPY_bad_gva_to_gfn ) goto out; rc = hvm_copy_from_guest_virt( &tss, tr.base, sizeof(tss), PFEC_page_present); if ( rc == HVMCOPY_bad_gva_to_gfn ) goto out; if ( hvm_set_cr3(tss.cr3) ) goto out; regs->eip = tss.eip; regs->eflags = tss.eflags | 2; regs->eax = tss.eax; regs->ecx = tss.ecx; regs->edx = tss.edx; regs->ebx = tss.ebx; regs->esp = tss.esp; regs->ebp = tss.ebp; regs->esi = tss.esi; regs->edi = tss.edi; if ( (taskswitch_reason == TSW_call_or_int) ) { regs->eflags |= X86_EFLAGS_NT; tss.back_link = prev_tr.sel; } exn_raised = 0; if ( hvm_load_segment_selector(v, x86_seg_es, tss.es) || hvm_load_segment_selector(v, x86_seg_cs, tss.cs) || hvm_load_segment_selector(v, x86_seg_ss, tss.ss) || hvm_load_segment_selector(v, x86_seg_ds, tss.ds) || hvm_load_segment_selector(v, x86_seg_fs, tss.fs) || hvm_load_segment_selector(v, x86_seg_gs, tss.gs) || hvm_load_segment_selector(v, x86_seg_ldtr, tss.ldt) ) exn_raised = 1; rc = hvm_copy_to_guest_virt( tr.base, &tss, sizeof(tss), PFEC_page_present); if ( rc == HVMCOPY_bad_gva_to_gfn ) exn_raised = 1; if ( (tss.trace & 1) && !exn_raised ) hvm_inject_exception(TRAP_debug, tss_sel & 0xfff8, 0); tr.attr.fields.type = 0xb; /* busy 32-bit tss */ hvm_set_segment_register(v, x86_seg_tr, &tr); v->arch.hvm_vcpu.guest_cr[0] |= X86_CR0_TS; hvm_update_guest_cr(v, 0); if ( (taskswitch_reason == TSW_iret) || (taskswitch_reason == TSW_jmp) ) clear_bit(41, optss_desc); /* clear B flag of old task */ if ( taskswitch_reason != TSW_iret ) set_bit(41, nptss_desc); /* set B flag of new task */ if ( errcode >= 0 ) { struct segment_register reg; unsigned long linear_addr; regs->esp -= 4; hvm_get_segment_register(current, x86_seg_ss, ®); /* Todo: do not ignore access faults here. */ if ( hvm_virtual_to_linear_addr(x86_seg_ss, ®, regs->esp, 4, hvm_access_write, 32, &linear_addr) ) hvm_copy_to_guest_virt_nofault(linear_addr, &errcode, 4, 0); } out: hvm_unmap_entry(optss_desc); hvm_unmap_entry(nptss_desc);}#define HVMCOPY_from_guest (0u<<0)#define HVMCOPY_to_guest (1u<<0)#define HVMCOPY_no_fault (0u<<1)#define HVMCOPY_fault (1u<<1)#define HVMCOPY_phys (0u<<2)#define HVMCOPY_virt (1u<<2)static enum hvm_copy_result __hvm_copy( void *buf, paddr_t addr, int size, unsigned int flags, uint32_t pfec){ struct vcpu *curr = current; unsigned long gfn, mfn; p2m_type_t p2mt; char *p; int count, todo = size; while ( todo > 0 ) { count = min_t(int, PAGE_SIZE - (addr & ~PAGE_MASK), todo); if ( flags & HVMCOPY_virt ) { gfn = paging_gva_to_gfn(curr, addr, &pfec); if ( gfn == INVALID_GFN ) { if ( flags & HVMCOPY_fault ) hvm_inject_exception(TRAP_page_fault, pfec, addr); return HVMCOPY_bad_gva_to_gfn; } } else { gfn = addr >> PAGE_SHIFT; } mfn = mfn_x(gfn_to_mfn_current(gfn, &p2mt)); if ( !p2m_is_ram(p2mt) ) return HVMCOPY_bad_gfn_to_mfn; ASSERT(mfn_valid(mfn)); p = (char *)map_domain_page(mfn) + (addr & ~PAGE_MASK); if ( flags & HVMCOPY_to_guest ) { if ( p2mt == p2m_ram_ro ) { static unsigned long lastpage; if ( xchg(&lastpage, gfn) != gfn ) gdprintk(XENLOG_DEBUG, "guest attempted write to read-only" " memory page. gfn=%#lx, mfn=%#lx\n", gfn, mfn); } else { memcpy(p, buf, count); paging_mark_dirty(curr->domain, mfn); } } else { memcpy(buf, p, count); } unmap_domain_page(p); addr += count; buf += count; todo -= count; } return HVMCOPY_okay;}enum hvm_copy_result hvm_copy_to_guest_phys( paddr_t paddr, void *buf, int size){ return __hvm_copy(buf, paddr, size, HVMCOPY_to_guest | HVMCOPY_fault | HVMCOPY_phys, 0);}enum hvm_copy_result hvm_copy_from_guest_phys( void *buf, paddr_t paddr, int size){ return __hvm_copy(buf, paddr, size, HVMCOPY_from_guest | HVMCOPY_fault | HVMCOPY_phys, 0);}enum hvm_copy_result hvm_copy_to_guest_virt( unsigned long vaddr, void *buf, int size, uint32_t pfec){ return __hvm_copy(buf, vaddr, size, HVMCOPY_to_guest | HVMCOPY_fault | HVMCOPY_virt, PFEC_page_present | PFEC_write_access | pfec);}enum hvm_copy_result hvm_copy_from_guest_virt( void *buf, unsigned long vaddr, int size, uint32_t pfec){ return __hvm_copy(buf, vaddr, size, HVMCOPY_from_guest | HVMCOPY_fault | HVMCOPY_virt, PFEC_page_present | pfec);}enum hvm_copy_result hvm_fetch_from_guest_virt( void *buf, unsigned long vaddr, int size, uint32_t pfec){ if ( hvm_nx_enabled(current) ) pfec |= PFEC_insn_fetch; return __hvm_copy(buf, vaddr, size, HVMCOPY_from_guest | HVMCOPY_fault | HVMCOPY_virt, PFEC_page_present | pfec);}enum hvm_copy_result hvm_copy_to_guest_virt_nofault( unsigned long vaddr, void *buf, int size, uint32_t pfec){ return __hvm_copy(buf, vaddr, size, HVMCOPY_to_guest | HVMCOPY_no_fault | HVMCOPY_virt, PFEC_page_present | PFEC_write_access | pfec);}enum hvm_copy_result hvm_copy_from_guest_virt_nofault( void *buf, unsigned long vaddr, int size, uint32_t pfec){ return __hvm_copy(buf, vaddr, size, HVMCOPY_from_guest | HVMCOPY_no_fault | HVMCOPY_virt, PFEC_page_present | pfec);}enum hvm_copy_result hvm_fetch_from_guest_virt_nofault( void *buf, unsigned long vaddr, int size, uint32_t pfec){ if ( hvm_nx_enabled(current) ) pfec |= PFEC_insn_fetch; return __hvm_copy(buf, vaddr, size, HVMCOPY_from_guest | HVMCOPY_no_fault | HVMCOPY_virt, PFEC_page_present | pfec);}#ifdef __x86_64__DEFINE_PER_CPU(bool_t, hvm_64bit_hcall);#endifunsigned long copy_to_user_hvm(void *to, const void *from, unsigned int len){ int rc;#ifdef __x86_64__ if ( !this_cpu(hvm_64bit_hcall) && is_compat_arg_xlat_range(to, len) ) { memcpy(to, from, len); return 0; }#endif rc = hvm_copy_to_guest_virt_nofault((unsigned long)to, (void *)from, len, 0); return rc ? len : 0; /* fake a copy_to_user() return code */}unsigned long copy_from_user_hvm(void *to, const void *from, unsigned len){ int rc;#ifdef __x86_64__ if ( !this_cpu(hvm_64bit_hcall) && is_compat_arg_xlat_range(from, len) ) { memcpy(to, from, len); return 0; }#endif rc = hvm_copy_from_guest_virt_nofault(to, (unsigned long)from, len, 0); return rc ? len : 0; /* fake a copy_from_user() return code */}#define bitmaskof(idx) (1U << ((idx) & 31))void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx){ struct vcpu *v = current; if ( cpuid_hypervisor_leaves(input, eax, ebx, ecx, edx) ) return; domain_cpuid(v->domain, input, *ecx, eax, ebx, ecx, edx); if ( input == 0x00000001 ) { /* Fix up VLAPIC details. */ *ebx &= 0x00FFFFFFu; *ebx |= (v->vcpu_id * 2) << 24; if ( vlapic_hw_disabled(vcpu_vlapic(v)) ) __clear_bit(X86_FEATURE_APIC & 31, edx); }}void hvm_rdtsc_intercept(struct cpu_user_regs *regs){ uint64_t tsc; struct vcpu *v = current; tsc = hvm_get_guest_tsc(v); regs->eax = (uint32_t)tsc; regs->edx = (uint32_t)(tsc >> 32);}int hvm_msr_read_intercept(struct cpu_user_regs *regs){ uint32_t ecx = regs->ecx; uint64_t msr_content = 0; struct vcpu *v = current; uint64_t *var_range_base, *fixed_range_base; int index, mtrr; uint32_t cpuid[4]; var_range_base = (uint64_t *)v->arch.hvm_vcpu.mtrr.var_ranges; fixed_range_base = (uint64_t *)v->arch.hvm_vcpu.mtrr.fixed_ranges; hvm_cpuid(1, &cpuid[0], &cpuid[1], &cpuid[2], &cpuid[3]); mtrr = !!(cpuid[3] & bitmaskof(X86_FEATURE_MTRR)); switch ( ecx ) { case MSR_IA32_TSC: msr_content = hvm_get_guest_tsc(v); break; case MSR_IA32_APICBASE: msr_content = vcpu_vlapic(v)->hw.apic_base_msr; break; case MSR_IA32_MCG_CAP: case MSR_IA32_MCG_STATUS: case MSR_IA32_MC0_STATUS: case MSR_IA32_MC1_STATUS: case MSR_IA32_MC2_STATUS: case MSR_IA32_MC3_STATUS: case MSR_IA32_MC4_STATUS: case MSR_IA32_MC5_STATUS: /* No point in letting the guest see real MCEs */ msr_content = 0; break; case MSR_IA32_CR_PAT: msr_content = v->arch.hvm_vcpu.pat_cr; break; case MSR_MTRRcap: if ( !mtrr ) goto gp_fault; msr_content = v->arch.hvm_vcpu.mtrr.mtrr_cap; break; case MSR_MTRRdefType: if ( !mtrr ) goto gp_fault; msr_content = v->arch.hvm_vcpu.mtrr.def_type | (v->arch.hvm_vcpu.mtrr.enabled << 10); break; case MSR_MTRRfix64K_00000: if ( !mtrr ) goto gp_fault; msr_content = fixed_range_base[0]; break; case MSR_MTRRfix16K_80000: case MSR_MTRRfix16K_A0000: if ( !mtrr ) goto gp_fault; index = regs->ecx - MSR_MTRRfix16K_80000; msr_content = fixed_range_base[index + 1]; break; case MSR_MTRRfix4K_C0000...MSR_MTRRfix4K_F8000: if ( !mtrr ) goto gp_fault; index = regs->ecx - MSR_MTRRfix4K_C0000; msr_content = fixed_range_base[index + 3]; break; case MSR_IA32_MTRR_PHYSBASE0...MSR_IA32_MTRR_PHYSMASK7: if ( !mtrr ) goto gp_fault; index = regs->ecx - MSR_IA32_MTRR_PHYSBASE0; msr_content = var_range_base[index]; break; default: return hvm_funcs.msr_read_intercept(regs); } regs->eax = (uint32_t)msr_content; regs->edx = (uint32_t)(msr_content >> 32); return X86EMUL_OKAY;gp_fault: hvm_inject_exception(TRAP_gp_fault, 0, 0); return X86EMUL_EXCEPTION;}int hvm_msr_write_intercept(struct cpu_user_regs *regs){ extern bool_t mtrr_var_range_msr_set( struct mtrr_state *v, u32 msr, u64 msr_content); extern bool_t mtrr_fix_range_msr_set( struct mtrr_state *v, int row, u64 msr_content); extern bool_t mtrr_def_type_msr_set(struct mtrr_state *v, u64 msr_content); extern bool_t pat_msr_set(u64 *pat, u64 msr); uint32_t ecx = regs->ecx; uint64_t msr_content = (uint32_t)regs->eax | ((uint64_t)regs->edx << 32); struct vcpu *v = current; int index, mtrr; uint32_t cpuid[4]; hvm_cpuid(1, &cpuid[0], &cpuid[1], &cpuid[2], &cpuid[3]); mtrr = !!(cpuid[3] & bitmaskof(X86_FEATURE_MTRR)); switch ( ecx ) { case MSR_IA32_TSC: hvm_set_guest_tsc(v, msr_content); pt_reset(v); break; case MSR_IA32_APICBASE: vlapic_msr_set(vcpu_vlapic(v), msr_content); break; case MSR_IA32_CR_PAT: if ( !pat_msr_set(&v->arch.hvm_vcpu.pat_cr, msr_content) ) goto gp_fault; break; case MSR_MTRRcap: if ( !mtrr ) goto gp_fault; goto gp_fault; case MSR_MTRRdefType: if ( !mtrr ) goto gp_fault; if ( !mtrr_def_type_msr_set(&v->arch.hvm_vcpu.mtrr, msr_content) ) goto gp_fault; break; case MSR_MTRRfix64K_00000: if ( !mtrr ) goto gp_fault; if ( !mtrr_fix_range_msr_set(&v->arch.hvm_vcpu.mtrr, 0, msr_content) ) goto gp_fault; break; case MSR_MTRRfix16K_80000: case MSR_MTRRfix16K_A0000: if ( !mtrr ) goto gp_fault; index = regs->ecx - MSR_MTRRfix16K_80000 + 1; if ( !mtrr_fix_range_msr_set(&v->arch.hvm_vcpu.mtrr, index, msr_content) ) goto gp_fault; break; case MSR_MTRRfix4K_C0000...MSR_MTRRfix4K_F8000: if ( !mtrr ) goto gp_fault; index = regs->ecx - MSR_MTRRfix4K_C0000 + 3; if ( !mtrr_fix_range_msr_set(&v->arch.hvm_vcpu.mtrr, index, msr_content) ) goto gp_fault; break; case MSR_IA32_MTRR_PHYSBASE0...MSR_IA32_MTRR_PHYSMASK7: if ( !mtrr ) goto gp_fault; if ( !mtrr_var_range_msr_set(&v->arch.hvm_vcpu.mtrr, regs->ecx, msr_content) ) goto gp_fault; break; default: return hvm_funcs.msr_write_intercept(regs); } return X86EMUL_OKAY;gp_fault: hvm_inject_exception(TRAP_gp_fault, 0, 0); return X86EMUL_EXCEPTION;
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?