📄 faults.c
字号:
case 21: printk("Key Permission.\n"); break; case 22: printk("Instruction Access Rights.\n"); break; case 24: /* General Exception */ code = (isr >> 4) & 0xf; printk("General Exception: %s%s.\n", reason[code], (code == 3) ? ((isr & (1UL << 37)) ? " (RSE access)" : " (data access)") : ""); if (code == 8) {#ifdef CONFIG_IA64_PRINT_HAZARDS printk("%s[%d]: possible hazard @ ip=%016lx " "(pr = %016lx)\n", current->comm, current->pid, regs->cr_iip + ia64_psr(regs)->ri, regs->pr);#endif printk("ia64_fault: returning on hazard\n"); return; } break; case 25: printk("Disabled FP-Register.\n"); break; case 26: printk("NaT consumption.\n"); break; case 29: printk("Debug.\n"); break; case 30: printk("Unaligned Reference.\n"); break; case 31: printk("Unsupported data reference.\n"); break; case 32: printk("Floating-Point Fault.\n"); break; case 33: printk("Floating-Point Trap.\n"); break; case 34: printk("Lower Privilege Transfer Trap.\n"); break; case 35: printk("Taken Branch Trap.\n"); break; case 36: printk("Single Step Trap.\n"); break; case 45: printk("IA-32 Exception.\n"); break; case 46: printk("IA-32 Intercept.\n"); break; case 47: printk("IA-32 Interrupt.\n"); break; default: printk("Fault %lu\n", vector); break; } show_registers(regs); panic("Fault in Xen.\n");}/* Also read in hyperprivop.S */int first_break = 0;voidia64_handle_break(unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim){ struct domain *d = current->domain; struct vcpu *v = current; IA64FAULT vector; /* FIXME: don't hardcode constant */ if ((iim == 0x80001 || iim == 0x80002) && ia64_get_cpl(regs->cr_ipsr) == CONFIG_CPL0_EMUL) { do_ssc(vcpu_get_gr(current, 36), regs); }#ifdef CRASH_DEBUG else if ((iim == 0 || iim == CDB_BREAK_NUM) && !guest_mode(regs)) { if (iim == 0) show_registers(regs); debugger_trap_fatal(0 /* don't care */ , regs); regs_increment_iip(regs); }#endif else if (iim == d->arch.breakimm && ia64_get_cpl(regs->cr_ipsr) == CONFIG_CPL0_EMUL) { /* by default, do not continue */ v->arch.hypercall_continuation = 0; if ((vector = ia64_hypercall(regs)) == IA64_NO_FAULT) { if (!PSCBX(v, hypercall_continuation)) vcpu_increment_iip(current); } else reflect_interruption(isr, regs, vector); } else if ((iim - HYPERPRIVOP_START) < HYPERPRIVOP_MAX && ia64_get_cpl(regs->cr_ipsr) == CONFIG_CPL0_EMUL) { if (ia64_hyperprivop(iim, regs)) vcpu_increment_iip(current); } else { if (iim == 0) die_if_kernel("bug check", regs, iim); PSCB(v, iim) = iim; reflect_interruption(isr, regs, IA64_BREAK_VECTOR); }}voidia64_handle_privop(unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long itir){ IA64FAULT vector; vector = priv_emulate(current, regs, isr); if (vector != IA64_NO_FAULT && vector != IA64_RFI_IN_PROGRESS) { // Note: if a path results in a vector to reflect that requires // iha/itir (e.g. vcpu_force_data_miss), they must be set there /* * IA64_GENEX_VECTOR may contain in the lowest byte an ISR.code * see IA64_ILLOP_FAULT, ... */ if ((vector & ~0xffUL) == IA64_GENEX_VECTOR) { isr = vector & 0xffUL; vector = IA64_GENEX_VECTOR; } reflect_interruption(isr, regs, vector); }}voidia64_handle_reflection(unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim, unsigned long vector){ struct vcpu *v = current; unsigned long check_lazy_cover = 0; unsigned long psr = regs->cr_ipsr; unsigned long status; /* Following faults shouldn't be seen from Xen itself */ BUG_ON(!(psr & IA64_PSR_CPL)); switch (vector) { case 6: vector = IA64_INST_KEY_MISS_VECTOR; break; case 7: vector = IA64_DATA_KEY_MISS_VECTOR; break; case 8: vector = IA64_DIRTY_BIT_VECTOR; break; case 9: vector = IA64_INST_ACCESS_BIT_VECTOR; break; case 10: check_lazy_cover = 1; vector = IA64_DATA_ACCESS_BIT_VECTOR; break; case 20: check_lazy_cover = 1; vector = IA64_PAGE_NOT_PRESENT_VECTOR; break; case 21: vector = IA64_KEY_PERMISSION_VECTOR; break; case 22: vector = IA64_INST_ACCESS_RIGHTS_VECTOR; break; case 23: check_lazy_cover = 1; vector = IA64_DATA_ACCESS_RIGHTS_VECTOR; break; case 24: vector = IA64_GENEX_VECTOR; break; case 25: if (PSCB(v, hpsr_dfh)) { PSCB(v, hpsr_dfh) = 0; PSCB(v, hpsr_mfh) = 1; if (__ia64_per_cpu_var(fp_owner) != v) __ia64_load_fpu(v->arch._thread.fph); } if (!PSCB(v, vpsr_dfh)) { regs->cr_ipsr &= ~IA64_PSR_DFH; return; } vector = IA64_DISABLED_FPREG_VECTOR; break; case 26: if (((isr >> 4L) & 0xfL) == 1) { /* Fault is due to a register NaT consumption fault. */ //regs->eml_unat = 0; FIXME: DO WE NEED THIS?? vector = IA64_NAT_CONSUMPTION_VECTOR; break; }#if 1 // pass null pointer dereferences through with no error // but retain debug output for non-zero ifa if (!ifa) { vector = IA64_NAT_CONSUMPTION_VECTOR; break; }#endif#ifdef CONFIG_PRIVIFY /* Some privified operations are coded using reg+64 instead of reg. */ printk("*** NaT fault... attempting to handle as privop\n"); printk("isr=%016lx, ifa=%016lx, iip=%016lx, ipsr=%016lx\n", isr, ifa, regs->cr_iip, psr); //regs->eml_unat = 0; FIXME: DO WE NEED THIS??? // certain NaT faults are higher priority than privop faults vector = priv_emulate(v, regs, isr); if (vector == IA64_NO_FAULT) { printk("*** Handled privop masquerading as NaT " "fault\n"); return; }#endif vector = IA64_NAT_CONSUMPTION_VECTOR; break; case 27: //printk("*** Handled speculation vector, itc=%lx!\n", // ia64_get_itc()); PSCB(current, iim) = iim; vector = IA64_SPECULATION_VECTOR; break; case 29: vector = IA64_DEBUG_VECTOR; if (debugger_kernel_event(regs, XEN_IA64_DEBUG_ON_KERN_DEBUG)) return; break; case 30: // FIXME: Should we handle unaligned refs in Xen?? vector = IA64_UNALIGNED_REF_VECTOR; break; case 32: status = handle_fpu_swa(1, regs, isr); if (!status) { vcpu_increment_iip(v); return; } // fetch code fail if (IA64_RETRY == status) return; printk("ia64_handle_reflection: handling FP fault\n"); vector = IA64_FP_FAULT_VECTOR; break; case 33: status = handle_fpu_swa(0, regs, isr); if (!status) return; // fetch code fail if (IA64_RETRY == status) return; printk("ia64_handle_reflection: handling FP trap\n"); vector = IA64_FP_TRAP_VECTOR; break; case 34: if (isr & (1UL << 4)) printk("ia64_handle_reflection: handling " "unimplemented instruction address %s\n", (isr & (1UL<<32)) ? "fault" : "trap"); vector = IA64_LOWERPRIV_TRANSFER_TRAP_VECTOR; break; case 35: vector = IA64_TAKEN_BRANCH_TRAP_VECTOR; if (debugger_kernel_event(regs, XEN_IA64_DEBUG_ON_KERN_TBRANCH)) return; break; case 36: vector = IA64_SINGLE_STEP_TRAP_VECTOR; if (debugger_kernel_event(regs, XEN_IA64_DEBUG_ON_KERN_SSTEP)) return; break; default: panic_domain(regs, "ia64_handle_reflection: " "unhandled vector=0x%lx\n", vector); return; } if (check_lazy_cover && (isr & IA64_ISR_IR) && handle_lazy_cover(v, regs)) return; PSCB(current, ifa) = ifa; PSCB(current, itir) = vcpu_get_itir_on_fault(v, ifa); reflect_interruption(isr, regs, vector);}voidia64_shadow_fault(unsigned long ifa, unsigned long itir, unsigned long isr, struct pt_regs *regs){ struct vcpu *v = current; struct domain *d = current->domain; unsigned long gpfn; unsigned long pte = 0; struct vhpt_lf_entry *vlfe; /* * v->arch.vhpt_pg_shift shouldn't be used here. * Currently dirty page logging bitmap is allocated based * on PAGE_SIZE. This is part of xen_domctl_shadow_op ABI. * If we want to log dirty pages in finer grained when * v->arch.vhpt_pg_shift < PAGE_SHIFT, we have to * revise the ABI and update this function and the related * tool stack (live relocation). */ unsigned long vhpt_pg_shift = PAGE_SHIFT; /* There are 2 jobs to do: - marking the page as dirty (the metaphysical address must be extracted to do that). - reflecting or not the fault (the virtual Dirty bit must be extracted to decide). Unfortunatly these informations are not immediatly available! */ /* Extract the metaphysical address. Try to get it from VHPT and M2P as we need the flags. */ vlfe = (struct vhpt_lf_entry *)ia64_thash(ifa); pte = vlfe->page_flags; if (vlfe->ti_tag == ia64_ttag(ifa)) { /* The VHPT entry is valid. */ gpfn = get_gpfn_from_mfn((pte & _PAGE_PPN_MASK) >> vhpt_pg_shift); BUG_ON(gpfn == INVALID_M2P_ENTRY); } else { unsigned long itir, iha; IA64FAULT fault; /* The VHPT entry is not valid. */ vlfe = NULL; /* FIXME: gives a chance to tpa, as the TC was valid. */ fault = vcpu_translate(v, ifa, 1, &pte, &itir, &iha); /* Try again! */ if (fault != IA64_NO_FAULT) { /* This will trigger a dtlb miss. */ ia64_ptcl(ifa, vhpt_pg_shift << 2); return; } gpfn = ((pte & _PAGE_PPN_MASK) >> vhpt_pg_shift); if (pte & _PAGE_D) pte |= _PAGE_VIRT_D; } /* Set the dirty bit in the bitmap. */ shadow_mark_page_dirty(d, gpfn); /* Update the local TC/VHPT and decides wether or not the fault should be reflected. SMP note: we almost ignore the other processors. The shadow_bitmap has been atomically updated. If the dirty fault happen on another processor, it will do its job. */ if (pte != 0) { /* We will know how to handle the fault. */ if (pte & _PAGE_VIRT_D) { /* Rewrite VHPT entry. There is no race here because only the cpu VHPT owner can write page_flags. */ if (vlfe) vlfe->page_flags = pte | _PAGE_D; /* Purge the TC locally. It will be reloaded from the VHPT iff the VHPT entry is still valid. */ ia64_ptcl(ifa, vhpt_pg_shift << 2); atomic64_inc(&d->arch.shadow_fault_count); } else { /* Reflect. In this case there is no need to purge. */ ia64_handle_reflection(ifa, regs, isr, 0, 8); } } else { /* We don't know wether or not the fault must be reflected. The VHPT entry is not valid. */ /* FIXME: in metaphysical mode, we could do an ITC now. */ ia64_ptcl(ifa, vhpt_pg_shift << 2); }}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -