📄 vmmu.c
字号:
/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- *//* * vmmu.c: virtual memory management unit components. * Copyright (c) 2005, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com) * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com) */#include <asm/vmx_vcpu.h>#include <asm/vmx_pal_vsa.h>#include <xen/sched-if.h>#include <asm/vhpt.h>static int default_vtlb_sz = DEFAULT_VTLB_SZ;static int default_vhpt_sz = DEFAULT_VHPT_SZ;static void __init parse_vtlb_size(char *s){ int sz = parse_size_and_unit(s, NULL); if (sz > 0) { default_vtlb_sz = fls(sz - 1); /* minimum 16KB (for tag uniqueness) */ if (default_vtlb_sz < 14) default_vtlb_sz = 14; }}static void __init parse_vhpt_size(char *s){ int sz = parse_size_and_unit(s, NULL); if (sz > 0) { default_vhpt_sz = fls(sz - 1); default_vhpt_sz = canonicalize_vhpt_size(default_vhpt_sz); }}custom_param("vti_vtlb_size", parse_vtlb_size);custom_param("vti_vhpt_size", parse_vhpt_size);static int init_domain_vhpt(struct vcpu *v){ int rc; u64 size = v->domain->arch.hvm_domain.params[HVM_PARAM_VHPT_SIZE]; if (size == 0) size = default_vhpt_sz; else size = canonicalize_vhpt_size(size); rc = thash_alloc(&(v->arch.vhpt), size, "vhpt"); v->arch.arch_vmx.mpta = v->arch.vhpt.pta.val; return rc;}static void free_domain_vhpt(struct vcpu *v){ if (v->arch.vhpt.hash) thash_free(&(v->arch.vhpt));}int init_domain_tlb(struct vcpu *v){ int rc; rc = init_domain_vhpt(v); if (rc) return rc; rc = thash_alloc(&(v->arch.vtlb), default_vtlb_sz, "vtlb"); if (rc) { free_domain_vhpt(v); return rc; } return 0;}void free_domain_tlb(struct vcpu *v){ if (v->arch.vtlb.hash) thash_free(&(v->arch.vtlb)); free_domain_vhpt(v);}int vhpt_enabled(VCPU *vcpu, uint64_t vadr, vhpt_ref_t ref){ ia64_rr vrr; PTA vpta; IA64_PSR vpsr; vpsr.val = VCPU(vcpu, vpsr); vcpu_get_rr(vcpu, vadr, &vrr.rrval); vpta.val = vmx_vcpu_get_pta(vcpu); if ( vrr.ve & vpta.ve ) { switch ( ref ) { case DATA_REF: case NA_REF: return vpsr.dt; case INST_REF: return vpsr.dt && vpsr.it && vpsr.ic; case RSE_REF: return vpsr.dt && vpsr.rt; } } return 0;}int unimplemented_gva(VCPU *vcpu,u64 vadr){#if 0 int bit=vcpu->domain->arch.imp_va_msb; u64 ladr =(vadr<<3)>>(3+bit); if(!ladr||ladr==(1U<<(61-bit))-1){ return 0; }else{ return 1; }#else return 0;#endif}/* * Fetch guest bundle code. * INPUT: * gip: guest ip * pbundle: used to return fetched bundle. */unsigned longfetch_code(VCPU *vcpu, u64 gip, IA64_BUNDLE *pbundle){ u64 gpip=0; // guest physical IP u64 *vpa; thash_data_t *tlb; u64 mfn, maddr; struct page_info* page; again: if ( !(VCPU(vcpu, vpsr) & IA64_PSR_IT) ) { // I-side physical mode gpip = pa_clear_uc(gip); // clear UC bit } else { tlb = vtlb_lookup(vcpu, gip, ISIDE_TLB);// if( tlb == NULL )// tlb = vtlb_lookup(vcpu, gip, DSIDE_TLB ); if (tlb) gpip = thash_translate(tlb, gip); } if( gpip){ mfn = gmfn_to_mfn(vcpu->domain, gpip >>PAGE_SHIFT); if (mfn == INVALID_MFN) panic_domain(vcpu_regs(vcpu), "fetch_code: invalid memory\n"); maddr = (mfn << PAGE_SHIFT) | (gpip & (PAGE_SIZE - 1)); }else{ tlb = vhpt_lookup(gip); if (tlb == NULL) { ia64_ptcl(gip, ARCH_PAGE_SHIFT << 2); return IA64_RETRY; } maddr = thash_translate(tlb, gip); mfn = maddr >> PAGE_SHIFT; } page = mfn_to_page(mfn); if (get_page(page, vcpu->domain) == 0) { if (page_get_owner(page) != vcpu->domain) { // This page might be a page granted by another domain. panic_domain(NULL, "domain tries to execute foreign domain " "page which might be mapped by grant table.\n"); } goto again; } vpa = (u64 *)__va(maddr); pbundle->i64[0] = *vpa++; pbundle->i64[1] = *vpa; put_page(page); return IA64_NO_FAULT;}IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, u64 pte, u64 itir, u64 ifa){#ifdef VTLB_DEBUG int slot; u64 ps, va; ps = itir_ps(itir); va = PAGEALIGN(ifa, ps); slot = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB); if (slot >=0) { // generate MCA. panic_domain(vcpu_regs(vcpu),"Tlb conflict!!"); return IA64_FAULT; }#endif //VTLB_DEBUG pte &= ~PAGE_FLAGS_RV_MASK; thash_purge_and_insert(vcpu, pte, itir, ifa, ISIDE_TLB); return IA64_NO_FAULT;}IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, u64 pte, u64 itir, u64 ifa){#ifdef VTLB_DEBUG int slot; u64 ps, va; ps = itir_ps(itir); va = PAGEALIGN(ifa, ps); slot = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB); if (slot >=0) { // generate MCA. panic_domain(vcpu_regs(vcpu),"Tlb conflict!!"); return IA64_FAULT; }#endif //VTLB_DEBUG pte &= ~PAGE_FLAGS_RV_MASK; thash_purge_and_insert(vcpu, pte, itir, ifa, DSIDE_TLB); return IA64_NO_FAULT;}IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa){#ifdef VTLB_DEBUG int index;#endif u64 ps, va, rid; thash_data_t * p_itr; ps = itir_ps(itir); va = PAGEALIGN(ifa, ps);#ifdef VTLB_DEBUG index = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB); if (index >=0) { // generate MCA. panic_domain(vcpu_regs(vcpu),"Tlb conflict!!"); return IA64_FAULT; } thash_purge_entries(vcpu, va, ps);#endif if (slot >= NITRS) { panic_domain(NULL, "bad itr.i slot (%ld)", slot); return IA64_FAULT; } pte &= ~PAGE_FLAGS_RV_MASK; vcpu_get_rr(vcpu, va, &rid); rid = rid& RR_RID_MASK; p_itr = (thash_data_t *)&vcpu->arch.itrs[slot]; vmx_vcpu_set_tr(p_itr, pte, itir, va, rid); vcpu_quick_region_set(PSCBX(vcpu,itr_regions),va); return IA64_NO_FAULT;}IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa){#ifdef VTLB_DEBUG int index;#endif u64 gpfn; u64 ps, va, rid; thash_data_t * p_dtr; ps = itir_ps(itir); va = PAGEALIGN(ifa, ps);#ifdef VTLB_DEBUG index = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB); if (index>=0) { // generate MCA. panic_domain(vcpu_regs(vcpu),"Tlb conflict!!"); return IA64_FAULT; }#endif if (slot >= NDTRS) { panic_domain(NULL, "bad itr.d slot (%ld)", slot); return IA64_FAULT; } pte &= ~PAGE_FLAGS_RV_MASK; /* This is a bad workaround In Linux, region 7 use 16M pagesize and is identity mapped. VHPT page size is 16K in XEN. If purge VHPT while guest insert 16M, it will iteratively purge VHPT 1024 times, which makes XEN/IPF very slow. XEN doesn't purge VHPT */ if (ps != _PAGE_SIZE_16M) thash_purge_entries(vcpu, va, ps);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -