📄 vmx_fault.c
字号:
/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- *//* * vmx_fault.c: handling VMX architecture-related VM exits * Copyright (c) 2005, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * * Xiaoyan Feng (Fleming Feng) <fleming.feng@intel.com> * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com) */#include <xen/config.h>#include <xen/lib.h>#include <xen/errno.h>#include <xen/sched.h>#include <xen/smp.h>#include <asm/ptrace.h>#include <xen/delay.h>#include <linux/efi.h> /* FOR EFI_UNIMPLEMENTED */#include <asm/sal.h> /* FOR struct ia64_sal_retval */#include <asm/system.h>#include <asm/io.h>#include <asm/processor.h>#include <asm/desc.h>#include <asm/vlsapic.h>#include <xen/irq.h>#include <xen/event.h>#include <asm/regionreg.h>#include <asm/privop.h>#include <asm/ia64_int.h>#include <asm/debugger.h>#include <asm/dom_fw.h>#include <asm/vmx_vcpu.h>#include <asm/kregs.h>#include <asm/vmx.h>#include <asm/vmmu.h>#include <asm/vmx_mm_def.h>#include <asm/vmx_phy_mode.h>#include <xen/mm.h>#include <asm/vmx_pal.h>#include <asm/shadow.h>#include <asm/sioemu.h>#include <public/arch-ia64/sioemu.h>/* reset all PSR field to 0, except up,mfl,mfh,pk,dt,rt,mc,it */#define INITIAL_PSR_VALUE_AT_INTERRUPTION 0x0000001808028034extern unsigned long handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr);#define DOMN_PAL_REQUEST 0x110000#define DOMN_SAL_REQUEST 0x110001static const u16 vec2off[68] = {0x0,0x400,0x800,0xc00,0x1000,0x1400,0x1800, 0x1c00,0x2000,0x2400,0x2800,0x2c00,0x3000,0x3400,0x3800,0x3c00,0x4000, 0x4400,0x4800,0x4c00,0x5000,0x5100,0x5200,0x5300,0x5400,0x5500,0x5600, 0x5700,0x5800,0x5900,0x5a00,0x5b00,0x5c00,0x5d00,0x5e00,0x5f00,0x6000, 0x6100,0x6200,0x6300,0x6400,0x6500,0x6600,0x6700,0x6800,0x6900,0x6a00, 0x6b00,0x6c00,0x6d00,0x6e00,0x6f00,0x7000,0x7100,0x7200,0x7300,0x7400, 0x7500,0x7600,0x7700,0x7800,0x7900,0x7a00,0x7b00,0x7c00,0x7d00,0x7e00, 0x7f00};void vmx_reflect_interruption(u64 ifa, u64 isr, u64 iim, u64 vec, REGS *regs){ u64 status, vector; VCPU *vcpu = current; u64 vpsr = VCPU(vcpu, vpsr); vector = vec2off[vec]; switch (vec) { case 5: // IA64_DATA_NESTED_TLB_VECTOR break; case 22: // IA64_INST_ACCESS_RIGHTS_VECTOR if (!(vpsr & IA64_PSR_IC)) goto nested_fault; if (vhpt_access_rights_fixup(vcpu, ifa, 0)) return; break; case 25: // IA64_DISABLED_FPREG_VECTOR if (!(vpsr & IA64_PSR_IC)) goto nested_fault; if (FP_PSR(vcpu) & IA64_PSR_DFH) { FP_PSR(vcpu) = IA64_PSR_MFH; if (__ia64_per_cpu_var(fp_owner) != vcpu) __ia64_load_fpu(vcpu->arch._thread.fph); } if (!(VCPU(vcpu, vpsr) & IA64_PSR_DFH)) { regs->cr_ipsr &= ~IA64_PSR_DFH; return; } break; case 32: // IA64_FP_FAULT_VECTOR if (!(vpsr & IA64_PSR_IC)) goto nested_fault; // handle fpswa emulation // fp fault status = handle_fpu_swa(1, regs, isr); if (!status) { vcpu_increment_iip(vcpu); return; } else if (IA64_RETRY == status) return; break; case 33: // IA64_FP_TRAP_VECTOR if (!(vpsr & IA64_PSR_IC)) goto nested_fault; //fp trap status = handle_fpu_swa(0, regs, isr); if (!status) return; else if (IA64_RETRY == status) { vcpu_decrement_iip(vcpu); return; } break; case 29: // IA64_DEBUG_VECTOR case 35: // IA64_TAKEN_BRANCH_TRAP_VECTOR case 36: // IA64_SINGLE_STEP_TRAP_VECTOR if (vmx_guest_kernel_mode(regs) && current->domain->debugger_attached) { domain_pause_for_debugger(); return; } if (!(vpsr & IA64_PSR_IC)) goto nested_fault; break; default: if (!(vpsr & IA64_PSR_IC)) goto nested_fault; break; } VCPU(vcpu,isr) = isr; VCPU(vcpu,iipa) = regs->cr_iip; if (vector == IA64_BREAK_VECTOR || vector == IA64_SPECULATION_VECTOR) VCPU(vcpu,iim) = iim; else set_ifa_itir_iha(vcpu, ifa, 1, 1, 1); inject_guest_interruption(vcpu, vector); return; nested_fault: panic_domain(regs, "Guest nested fault vector=%lx!\n", vector);}IA64FAULTvmx_ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim){ struct domain *d = current->domain; struct vcpu *v = current; perfc_incr(vmx_ia64_handle_break);#ifdef CRASH_DEBUG if ((iim == 0 || iim == CDB_BREAK_NUM) && !vmx_user_mode(regs) && IS_VMM_ADDRESS(regs->cr_iip)) { if (iim == 0) show_registers(regs); debugger_trap_fatal(0 /* don't care */, regs); regs_increment_iip(regs); return IA64_NO_FAULT; }#endif if (!vmx_user_mode(regs)) { show_registers(regs); gdprintk(XENLOG_DEBUG, "%s:%d imm %lx\n", __func__, __LINE__, iim); ia64_fault(11 /* break fault */, isr, ifa, iim, 0 /* cr.itir */, 0, 0, 0, (unsigned long)regs); } if (ia64_psr(regs)->cpl == 0) { /* Allow hypercalls only when cpl = 0. */ /* Only common hypercalls are handled by vmx_break_fault. */ if (iim == d->arch.breakimm) { ia64_hypercall(regs); vcpu_increment_iip(v); return IA64_NO_FAULT; } /* normal hypercalls are handled by vmx_break_fault */ BUG_ON(iim == d->arch.breakimm); if (iim == DOMN_PAL_REQUEST) { pal_emul(v); vcpu_increment_iip(v); return IA64_NO_FAULT; } else if (iim == DOMN_SAL_REQUEST) { if (d->arch.is_sioemu) sioemu_sal_assist(v); else { sal_emul(v); vcpu_increment_iip(v); } return IA64_NO_FAULT; } } vmx_reflect_interruption(ifa, isr, iim, 11, regs); return IA64_NO_FAULT;}void save_banked_regs_to_vpd(VCPU *v, REGS *regs){ unsigned long i=0UL, * src,* dst, *sunat, *dunat; IA64_PSR vpsr; src = ®s->r16; sunat = ®s->eml_unat; vpsr.val = VCPU(v, vpsr); if (vpsr.bn) { dst = &VCPU(v, vgr[0]); dunat =&VCPU(v, vnat); __asm__ __volatile__ (";;extr.u %0 = %1,%4,16;; \ dep %2 = %0, %2, 0, 16;; \ st8 [%3] = %2;;" ::"r"(i),"r"(*sunat),"r"(*dunat),"r"(dunat),"i"(IA64_PT_REGS_R16_SLOT):"memory"); } else { dst = &VCPU(v, vbgr[0]);// dunat =&VCPU(v, vbnat);// __asm__ __volatile__ (";;extr.u %0 = %1,%4,16;;// dep %2 = %0, %2, 16, 16;;// st8 [%3] = %2;;"// ::"r"(i),"r"(*sunat),"r"(*dunat),"r"(dunat),"i"(IA64_PT_REGS_R16_SLOT):"memory"); } for (i = 0; i < 16; i++) *dst++ = *src++;}// ONLY gets called from ia64_leave_kernel// ONLY call with interrupts disabled?? (else might miss one?)// NEVER successful if already reflecting a trap/fault because psr.i==0void leave_hypervisor_tail(void){ struct domain *d = current->domain; struct vcpu *v = current; /* FIXME: can this happen ? */ if (is_idle_domain(current->domain)) return; // A softirq may generate an interrupt. So call softirq early. local_irq_enable(); do_softirq(); local_irq_disable(); // FIXME: Will this work properly if doing an RFI??? if (d->arch.is_sioemu) { if (local_events_need_delivery()) { sioemu_deliver_event(); } } else if (v->vcpu_id == 0) { unsigned long callback_irq = d->arch.hvm_domain.params[HVM_PARAM_CALLBACK_IRQ]; if (v->arch.arch_vmx.pal_init_pending) { /* inject INIT interruption to guest pal */ v->arch.arch_vmx.pal_init_pending = 0; deliver_pal_init(v); return; } /* * val[63:56] == 1: val[55:0] is a delivery PCI INTx line: * Domain = val[47:32], Bus = val[31:16], * DevFn = val[15: 8], IntX = val[ 1: 0] * val[63:56] == 0: val[55:0] is a delivery as GSI */ if (callback_irq != 0 && local_events_need_delivery()) { /* change level for para-device callback irq */ /* use level irq to send discrete event */ if ((uint8_t)(callback_irq >> 56) == 1) { /* case of using PCI INTx line as callback irq */ int pdev = (callback_irq >> 11) & 0x1f; int pintx = callback_irq & 3; viosapic_set_pci_irq(d, pdev, pintx, 1); viosapic_set_pci_irq(d, pdev, pintx, 0); } else {
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -