📄 vmmu.c
字号:
return IA64_FAULT; } pte &= ~PAGE_FLAGS_RV_MASK; vcpu_get_rr(vcpu, va, &rid); rid = rid& RR_RID_MASK; p_itr = (thash_data_t *)&vcpu->arch.itrs[slot]; vmx_vcpu_set_tr(p_itr, pte, itir, va, rid); vcpu_quick_region_set(PSCBX(vcpu,itr_regions),va); return IA64_NO_FAULT;}IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa){#ifdef VTLB_DEBUG int index;#endif u64 gpfn; u64 ps, va, rid; thash_data_t * p_dtr; ps = itir_ps(itir); va = PAGEALIGN(ifa, ps);#ifdef VTLB_DEBUG index = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB); if (index>=0) { // generate MCA. panic_domain(vcpu_regs(vcpu),"Tlb conflict!!"); return IA64_FAULT; }#endif if (slot >= NDTRS) { panic_domain(NULL, "bad itr.d slot (%ld)", slot); return IA64_FAULT; } pte &= ~PAGE_FLAGS_RV_MASK; /* This is a bad workaround In Linux, region 7 use 16M pagesize and is identity mapped. VHPT page size is 16K in XEN. If purge VHPT while guest insert 16M, it will iteratively purge VHPT 1024 times, which makes XEN/IPF very slow. XEN doesn't purge VHPT */ if (ps != _PAGE_SIZE_16M) thash_purge_entries(vcpu, va, ps); gpfn = (pte & _PAGE_PPN_MASK)>> PAGE_SHIFT; if (VMX_DOMAIN(vcpu) && __gpfn_is_io(vcpu->domain, gpfn)) pte |= VTLB_PTE_IO; vcpu_get_rr(vcpu, va, &rid); rid = rid& RR_RID_MASK; p_dtr = (thash_data_t *)&vcpu->arch.dtrs[slot]; vmx_vcpu_set_tr((thash_data_t *)&vcpu->arch.dtrs[slot], pte, itir, va, rid); vcpu_quick_region_set(PSCBX(vcpu,dtr_regions),va); return IA64_NO_FAULT;}IA64FAULT vmx_vcpu_ptr_d(VCPU *vcpu,u64 ifa, u64 ps){ int index; u64 va; va = PAGEALIGN(ifa, ps); while ((index = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB)) >= 0) { vcpu->arch.dtrs[index].pte.p=0; } thash_purge_entries(vcpu, va, ps); return IA64_NO_FAULT;}IA64FAULT vmx_vcpu_ptr_i(VCPU *vcpu, u64 ifa, u64 ps){ int index; u64 va; va = PAGEALIGN(ifa, ps); while ((index = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB)) >= 0) { vcpu->arch.itrs[index].pte.p=0; } thash_purge_entries(vcpu, va, ps); return IA64_NO_FAULT;}IA64FAULT vmx_vcpu_ptc_l(VCPU *vcpu, u64 va, u64 ps){ va = PAGEALIGN(va, ps); thash_purge_entries(vcpu, va, ps); return IA64_NO_FAULT;}IA64FAULT vmx_vcpu_ptc_e(VCPU *vcpu, u64 va){ thash_purge_all(vcpu); return IA64_NO_FAULT;}IA64FAULT vmx_vcpu_ptc_g(VCPU *vcpu, u64 va, u64 ps){ return vmx_vcpu_ptc_ga(vcpu, va, ps);}/*IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu, u64 va, u64 ps){ vmx_vcpu_ptc_l(vcpu, va, ps); return IA64_NO_FAULT;} */struct ptc_ga_args { unsigned long vadr; unsigned long rid; unsigned long ps; struct vcpu *vcpu;};static void ptc_ga_remote_func (void *varg){ u64 oldrid, moldrid, mpta, oldpsbits, vadr, flags; struct ptc_ga_args *args = (struct ptc_ga_args *)varg; VCPU *v = args->vcpu; int cpu = v->processor; vadr = args->vadr; /* Try again if VCPU has migrated. */ if (cpu != current->processor) return; local_irq_save(flags); if (!spin_trylock(&per_cpu(schedule_data, cpu).schedule_lock)) goto bail2; if (v->processor != cpu) goto bail1; oldrid = VMX(v, vrr[0]); VMX(v, vrr[0]) = args->rid; oldpsbits = VMX(v, psbits[0]); VMX(v, psbits[0]) = VMX(v, psbits[REGION_NUMBER(vadr)]); moldrid = ia64_get_rr(0x0); ia64_set_rr(0x0,vrrtomrr(v,args->rid)); mpta = ia64_get_pta(); ia64_set_pta(v->arch.arch_vmx.mpta&(~1)); ia64_srlz_d(); vadr = PAGEALIGN(vadr, args->ps); thash_purge_entries_remote(v, vadr, args->ps); VMX(v, vrr[0]) = oldrid; VMX(v, psbits[0]) = oldpsbits; ia64_set_rr(0x0,moldrid); ia64_set_pta(mpta); ia64_dv_serialize_data(); args->vcpu = NULL;bail1: spin_unlock(&per_cpu(schedule_data, cpu).schedule_lock);bail2: local_irq_restore(flags);}IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu, u64 va, u64 ps){ struct domain *d = vcpu->domain; struct vcpu *v; struct ptc_ga_args args; int cpu; args.vadr = va; vcpu_get_rr(vcpu, va, &args.rid); args.ps = ps; for_each_vcpu (d, v) { if (!v->is_initialised) continue; if (v == vcpu) { vmx_vcpu_ptc_l(v, va, ps); continue; } args.vcpu = v; do { cpu = v->processor; if (cpu != current->processor) { spin_unlock_wait(&per_cpu(schedule_data, cpu).schedule_lock); /* Flush VHPT on remote processors. */ smp_call_function_single(cpu, &ptc_ga_remote_func, &args, 0, 1); } else { ptc_ga_remote_func(&args); } } while (args.vcpu != NULL); } return IA64_NO_FAULT;}u64 vmx_vcpu_thash(VCPU *vcpu, u64 vadr){ PTA vpta; ia64_rr vrr; u64 pval; u64 vhpt_offset; vpta.val = vmx_vcpu_get_pta(vcpu); vcpu_get_rr(vcpu, vadr, &vrr.rrval); if(vpta.vf){ pval = ia64_call_vsa(PAL_VPS_THASH, vadr, vrr.rrval, vpta.val, 0, 0, 0, 0); pval = vpta.val & ~0xffff; }else{ vhpt_offset=((vadr>>vrr.ps)<<3)&((1UL<<(vpta.size))-1); pval = (vadr & VRN_MASK) | (vpta.val<<3>>(vpta.size+3)<<(vpta.size))| vhpt_offset; } return pval;}u64 vmx_vcpu_ttag(VCPU *vcpu, u64 vadr){ ia64_rr vrr; PTA vpta; u64 pval; vpta.val = vmx_vcpu_get_pta(vcpu); vcpu_get_rr(vcpu, vadr, &vrr.rrval); if(vpta.vf){ pval = ia64_call_vsa(PAL_VPS_TTAG, vadr, vrr.rrval, 0, 0, 0, 0, 0); }else{ pval = 1; } return pval;}IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, u64 vadr, u64 *padr){ thash_data_t *data; ISR visr,pt_isr; REGS *regs; u64 vhpt_adr, madr; IA64_PSR vpsr; regs=vcpu_regs(vcpu); pt_isr.val=VMX(vcpu,cr_isr); visr.val=0; visr.ei=pt_isr.ei; visr.ir=pt_isr.ir; vpsr.val = VCPU(vcpu, vpsr); visr.na=1; data = vtlb_lookup(vcpu, vadr, DSIDE_TLB); if(data){ if(data->p==0){ vcpu_set_isr(vcpu,visr.val); data_page_not_present(vcpu, vadr); return IA64_FAULT; }else if(data->ma == VA_MATTR_NATPAGE){ vcpu_set_isr(vcpu, visr.val); dnat_page_consumption(vcpu, vadr); return IA64_FAULT; }else{ *padr = ((data->ppn >> (data->ps - 12)) << data->ps) | (vadr & (PSIZE(data->ps) - 1)); return IA64_NO_FAULT; } } data = vhpt_lookup(vadr); if(data){ if(data->p==0){ vcpu_set_isr(vcpu,visr.val); data_page_not_present(vcpu, vadr); return IA64_FAULT; }else if(data->ma == VA_MATTR_NATPAGE){ vcpu_set_isr(vcpu, visr.val); dnat_page_consumption(vcpu, vadr); return IA64_FAULT; }else{ madr = (data->ppn >> (data->ps - 12) << data->ps) | (vadr & (PSIZE(data->ps) - 1)); *padr = __mpa_to_gpa(madr); return IA64_NO_FAULT; } } else{ if(!vhpt_enabled(vcpu, vadr, NA_REF)){ if(vpsr.ic){ vcpu_set_isr(vcpu, visr.val); alt_dtlb(vcpu, vadr); return IA64_FAULT; } else{ nested_dtlb(vcpu); return IA64_FAULT; } } else{ vhpt_adr = vmx_vcpu_thash(vcpu, vadr); data = vtlb_lookup(vcpu, vhpt_adr, DSIDE_TLB); if(data){ if(vpsr.ic){ vcpu_set_isr(vcpu, visr.val); dtlb_fault(vcpu, vadr); return IA64_FAULT; } else{ nested_dtlb(vcpu); return IA64_FAULT; } } else{ if(vpsr.ic){ vcpu_set_isr(vcpu, visr.val); dvhpt_fault(vcpu, vadr); return IA64_FAULT; } else{ nested_dtlb(vcpu); return IA64_FAULT; } } } }}u64 vmx_vcpu_tak(VCPU *vcpu, u64 vadr){ thash_data_t *data; u64 key; if (unimplemented_gva(vcpu, vadr)) { key = 1; return key; } /* FIXME: if psr.dt is set, look in the guest VHPT. */ data = vtlb_lookup(vcpu, vadr, DSIDE_TLB); if (!data || !data->p) key = 1; else key = data->key << 8; return key;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -