📄 vmmu.c
字号:
gpfn = (pte & _PAGE_PPN_MASK)>> PAGE_SHIFT; vcpu_get_rr(vcpu, va, &rid); rid &= RR_RID_MASK; p_dtr = (thash_data_t *)&vcpu->arch.dtrs[slot]; vmx_vcpu_set_tr((thash_data_t *)&vcpu->arch.dtrs[slot], pte, itir, va, rid); vcpu_quick_region_set(PSCBX(vcpu,dtr_regions),va); return IA64_NO_FAULT;}IA64FAULT vmx_vcpu_ptr_d(VCPU *vcpu,u64 ifa, u64 ps){ int index; u64 va; va = PAGEALIGN(ifa, ps); while ((index = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB)) >= 0) { vcpu->arch.dtrs[index].pte.p=0; } thash_purge_entries(vcpu, va, ps); return IA64_NO_FAULT;}IA64FAULT vmx_vcpu_ptr_i(VCPU *vcpu, u64 ifa, u64 ps){ int index; u64 va; va = PAGEALIGN(ifa, ps); while ((index = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB)) >= 0) { vcpu->arch.itrs[index].pte.p=0; } thash_purge_entries(vcpu, va, ps); return IA64_NO_FAULT;}IA64FAULT vmx_vcpu_ptc_l(VCPU *vcpu, u64 va, u64 ps){ va = PAGEALIGN(va, ps); thash_purge_entries(vcpu, va, ps); return IA64_NO_FAULT;}IA64FAULT vmx_vcpu_ptc_e(VCPU *vcpu, u64 va){ thash_purge_all(vcpu); return IA64_NO_FAULT;}IA64FAULT vmx_vcpu_ptc_g(VCPU *vcpu, u64 va, u64 ps){ return vmx_vcpu_ptc_ga(vcpu, va, ps);}/*IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu, u64 va, u64 ps){ vmx_vcpu_ptc_l(vcpu, va, ps); return IA64_NO_FAULT;} */struct ptc_ga_args { unsigned long vadr; unsigned long rid; unsigned long ps; struct vcpu *vcpu;};static void ptc_ga_remote_func (void *varg){ u64 oldrid, moldrid, mpta, oldpsbits, vadr, flags; struct ptc_ga_args *args = (struct ptc_ga_args *)varg; VCPU *v = args->vcpu; int cpu = v->processor; vadr = args->vadr; /* Try again if VCPU has migrated. */ if (cpu != current->processor) return; local_irq_save(flags); if (!spin_trylock(&per_cpu(schedule_data, cpu).schedule_lock)) goto bail2; if (v->processor != cpu) goto bail1; oldrid = VMX(v, vrr[0]); VMX(v, vrr[0]) = args->rid; oldpsbits = VMX(v, psbits[0]); VMX(v, psbits[0]) = VMX(v, psbits[REGION_NUMBER(vadr)]); moldrid = ia64_get_rr(0x0); ia64_set_rr(0x0,vrrtomrr(v,args->rid)); mpta = ia64_get_pta(); ia64_set_pta(v->arch.arch_vmx.mpta&(~1)); ia64_srlz_d(); vadr = PAGEALIGN(vadr, args->ps); thash_purge_entries_remote(v, vadr, args->ps); VMX(v, vrr[0]) = oldrid; VMX(v, psbits[0]) = oldpsbits; ia64_set_rr(0x0,moldrid); ia64_set_pta(mpta); ia64_dv_serialize_data(); args->vcpu = NULL;bail1: spin_unlock(&per_cpu(schedule_data, cpu).schedule_lock);bail2: local_irq_restore(flags);}IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu, u64 va, u64 ps){ struct domain *d = vcpu->domain; struct vcpu *v; struct ptc_ga_args args; int cpu; args.vadr = va; vcpu_get_rr(vcpu, va, &args.rid); args.ps = ps; for_each_vcpu (d, v) { if (!v->is_initialised) continue; if (v == vcpu) { vmx_vcpu_ptc_l(v, va, ps); continue; } args.vcpu = v; do { cpu = v->processor; if (cpu != current->processor) { spin_unlock_wait(&per_cpu(schedule_data, cpu).schedule_lock); /* Flush VHPT on remote processors. */ smp_call_function_single(cpu, &ptc_ga_remote_func, &args, 0, 1); } else { ptc_ga_remote_func(&args); } } while (args.vcpu != NULL); } return IA64_NO_FAULT;}u64 vmx_vcpu_thash(VCPU *vcpu, u64 vadr){ PTA vpta; ia64_rr vrr; u64 pval; u64 vhpt_offset; u64 mask; vpta.val = vmx_vcpu_get_pta(vcpu); vcpu_get_rr(vcpu, vadr, &vrr.rrval); mask = (1UL << vpta.size) - 1; if (vpta.vf) { vadr = (vadr & 0x1fffffffffffffffUL) >> vrr.ps; vhpt_offset = vadr ^ vrr.rid; pval = (vpta.val & ~0x7fffUL) + ((vhpt_offset << 5) & mask); } else { vhpt_offset=((vadr >> vrr.ps) << 3) & mask; pval = (vadr & VRN_MASK) | (vpta.val << 3 >> (vpta.size + 3) << vpta.size) | vhpt_offset; } return pval;}u64 vmx_vcpu_ttag(VCPU *vcpu, u64 vadr){ ia64_rr vrr; PTA vpta; u64 pval; u64 rid; vpta.val = vmx_vcpu_get_pta(vcpu); vcpu_get_rr(vcpu, vadr, &vrr.rrval); if(vpta.vf){ vadr = (vadr & 0x1fffffffffffffffUL) >> vrr.ps; rid = vrr.rid; pval = vadr ^ (rid << 39); }else{ pval = 1; } return pval;}IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, u64 vadr, u64 *padr){ thash_data_t *data; ISR visr,pt_isr; REGS *regs; u64 vhpt_adr, madr; IA64_PSR vpsr; regs = vcpu_regs(vcpu); pt_isr.val = VMX(vcpu, cr_isr); visr.val = 0; visr.ei = pt_isr.ei; visr.ir = pt_isr.ir; vpsr.val = VCPU(vcpu, vpsr); visr.na = 1; /* First look in VTLB. */ data = vtlb_lookup(vcpu, vadr, DSIDE_TLB); if (data) { if (data->p == 0) { vcpu_set_isr(vcpu,visr.val); data_page_not_present(vcpu, vadr); return IA64_FAULT; } else if (data->ma == VA_MATTR_NATPAGE) { vcpu_set_isr(vcpu, visr.val); dnat_page_consumption(vcpu, vadr); return IA64_FAULT; } else { *padr = thash_translate(data, vadr); return IA64_NO_FAULT; } } /* Look in mVHPT. */ data = vhpt_lookup(vadr); if (data) { if (data->p == 0) { vcpu_set_isr(vcpu,visr.val); data_page_not_present(vcpu, vadr); return IA64_FAULT; } else if (data->ma == VA_MATTR_NATPAGE) { vcpu_set_isr(vcpu, visr.val); dnat_page_consumption(vcpu, vadr); return IA64_FAULT; } else { madr = thash_translate(data, vadr); *padr = __mpa_to_gpa(madr); return IA64_NO_FAULT; } } /* If VHPT is not enabled, inject fault. */ if (!vhpt_enabled(vcpu, vadr, NA_REF)) { if (vpsr.ic) { vcpu_set_isr(vcpu, visr.val); alt_dtlb(vcpu, vadr); return IA64_FAULT; } else { nested_dtlb(vcpu); return IA64_FAULT; } } /* Get gVHPT entry. */ vhpt_adr = vmx_vcpu_thash(vcpu, vadr); data = vtlb_lookup(vcpu, vhpt_adr, DSIDE_TLB); if (data) { /* FIXME: we should read gadr from the entry! */ if (vpsr.ic) { vcpu_set_isr(vcpu, visr.val); dtlb_fault(vcpu, vadr); return IA64_FAULT; } else { nested_dtlb(vcpu); return IA64_FAULT; } } else { if (vpsr.ic) { vcpu_set_isr(vcpu, visr.val); dvhpt_fault(vcpu, vadr); return IA64_FAULT; } else { nested_dtlb(vcpu); return IA64_FAULT; } }}u64 vmx_vcpu_tak(VCPU *vcpu, u64 vadr){ thash_data_t *data; u64 key; if (unimplemented_gva(vcpu, vadr)) { key = 1; return key; } data = vtlb_lookup(vcpu, vadr, DSIDE_TLB); if (data) { if (data->p) return data->key << 8; else return 1; } data = vhpt_lookup(vadr); if (data) { if (data->p) return data->key << 8; /* FIXME: possible mangling/masking. */ else return 1; } if (!vhpt_enabled(vcpu, vadr, NA_REF)) return 1; /* FIXME: look in the guest VHPT. */ return 1;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -