📄 vmmu.c
字号:
/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- *//* * vmmu.c: virtual memory management unit components. * Copyright (c) 2005, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com) * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com) */#include <asm/vmx_vcpu.h>#include <asm/vmx_pal_vsa.h>#include <xen/sched-if.h>#include <asm/vhpt.h>static int default_vtlb_sz = DEFAULT_VTLB_SZ;static int default_vhpt_sz = DEFAULT_VHPT_SZ;static void __init parse_vtlb_size(char *s){ int sz = parse_size_and_unit(s, NULL); if (sz > 0) { default_vtlb_sz = fls(sz - 1); /* minimum 16KB (for tag uniqueness) */ if (default_vtlb_sz < 14) default_vtlb_sz = 14; }}static void __init parse_vhpt_size(char *s){ int sz = parse_size_and_unit(s, NULL); if (sz > 0) { default_vhpt_sz = fls(sz - 1); default_vhpt_sz = canonicalize_vhpt_size(default_vhpt_sz); }}custom_param("vti_vtlb_size", parse_vtlb_size);custom_param("vti_vhpt_size", parse_vhpt_size);/* * Get the machine page frame number in 16KB unit * Input: * d: */static u64 get_mfn(struct domain *d, u64 gpfn){// struct domain *d; u64 xen_gppn, xen_mppn, mpfn;/* if ( domid == DOMID_SELF ) { d = current->domain; } else { d = get_domain_by_id(domid); } */ xen_gppn = arch_to_xen_ppn(gpfn); xen_mppn = gmfn_to_mfn(d, xen_gppn);/* for (i=0; i<pages; i++) { if ( gmfn_to_mfn(d, gpfn+i) == INVALID_MFN ) { return INVALID_MFN; } }*/ mpfn= xen_to_arch_ppn(xen_mppn); mpfn = mpfn | (((1UL <<(PAGE_SHIFT-ARCH_PAGE_SHIFT))-1)&gpfn); return mpfn; }static int init_domain_vhpt(struct vcpu *v){ int rc; u64 size = v->domain->arch.hvm_domain.params[HVM_PARAM_VHPT_SIZE]; if (size == 0) size = default_vhpt_sz; else size = canonicalize_vhpt_size(size); rc = thash_alloc(&(v->arch.vhpt), size, "vhpt"); v->arch.arch_vmx.mpta = v->arch.vhpt.pta.val; return rc;}static void free_domain_vhpt(struct vcpu *v){ if (v->arch.vhpt.hash) thash_free(&(v->arch.vhpt));}int init_domain_tlb(struct vcpu *v){ int rc; rc = init_domain_vhpt(v); if (rc) return rc; rc = thash_alloc(&(v->arch.vtlb), default_vtlb_sz, "vtlb"); if (rc) { free_domain_vhpt(v); return rc; } return 0;}void free_domain_tlb(struct vcpu *v){ if (v->arch.vtlb.hash) thash_free(&(v->arch.vtlb)); free_domain_vhpt(v);}/* * Insert guest TLB to machine TLB. * data: In TLB format */void machine_tlb_insert(struct vcpu *v, thash_data_t *tlb){ u64 psr; thash_data_t mtlb; unsigned int cl = tlb->cl; unsigned long mtlb_ppn; mtlb.ifa = tlb->vadr; mtlb.itir = tlb->itir & ~ITIR_RV_MASK; mtlb.page_flags = tlb->page_flags & ~PAGE_FLAGS_RV_MASK; mtlb.ppn = get_mfn(v->domain, tlb->ppn); mtlb_ppn=mtlb.ppn;#if 0 if (mtlb_ppn == INVALID_MFN) panic_domain(vcpu_regs(v), "Machine tlb insert with invalid mfn number.\n");#endif psr = ia64_clear_ic(); if ( cl == ISIDE_TLB ) { ia64_itc(1, mtlb.ifa, mtlb.page_flags, IA64_ITIR_PS_KEY(mtlb.ps, 0)); } else { ia64_itc(2, mtlb.ifa, mtlb.page_flags, IA64_ITIR_PS_KEY(mtlb.ps, 0)); } ia64_set_psr(psr); ia64_srlz_i(); return;}/* * Purge machine tlb. * INPUT * rr: guest rr. * va: only bits 0:60 is valid * size: bits format (1<<size) for the address range to purge. * */void machine_tlb_purge(u64 va, u64 ps){ ia64_ptcl(va, ps << 2);}int vhpt_enabled(VCPU *vcpu, uint64_t vadr, vhpt_ref_t ref){ ia64_rr vrr; PTA vpta; IA64_PSR vpsr; vpsr.val = VCPU(vcpu, vpsr); vcpu_get_rr(vcpu, vadr, &vrr.rrval); vpta.val = vmx_vcpu_get_pta(vcpu); if ( vrr.ve & vpta.ve ) { switch ( ref ) { case DATA_REF: case NA_REF: return vpsr.dt; case INST_REF: return vpsr.dt && vpsr.it && vpsr.ic; case RSE_REF: return vpsr.dt && vpsr.rt; } } return 0;}int unimplemented_gva(VCPU *vcpu,u64 vadr){#if 0 int bit=vcpu->domain->arch.imp_va_msb; u64 ladr =(vadr<<3)>>(3+bit); if(!ladr||ladr==(1U<<(61-bit))-1){ return 0; }else{ return 1; }#else return 0;#endif}/* * Fetch guest bundle code. * INPUT: * gip: guest ip * pbundle: used to return fetched bundle. */unsigned longfetch_code(VCPU *vcpu, u64 gip, IA64_BUNDLE *pbundle){ u64 gpip=0; // guest physical IP u64 *vpa; thash_data_t *tlb; u64 mfn, maddr; struct page_info* page; again: if ( !(VCPU(vcpu, vpsr) & IA64_PSR_IT) ) { // I-side physical mode gpip = gip; } else { tlb = vtlb_lookup(vcpu, gip, ISIDE_TLB);// if( tlb == NULL )// tlb = vtlb_lookup(vcpu, gip, DSIDE_TLB ); if (tlb) gpip = (tlb->ppn >>(tlb->ps-12)<<tlb->ps) | ( gip & (PSIZE(tlb->ps)-1) ); } if( gpip){ mfn = gmfn_to_mfn(vcpu->domain, gpip >>PAGE_SHIFT); if (mfn == INVALID_MFN) panic_domain(vcpu_regs(vcpu), "fetch_code: invalid memory\n"); maddr = (mfn << PAGE_SHIFT) | (gpip & (PAGE_SIZE - 1)); }else{ tlb = vhpt_lookup(gip); if (tlb == NULL) { ia64_ptcl(gip, ARCH_PAGE_SHIFT << 2); return IA64_RETRY; } maddr = (tlb->ppn >> (tlb->ps - 12) << tlb->ps) | (gip & (PSIZE(tlb->ps) - 1)); mfn = maddr >> PAGE_SHIFT; } page = mfn_to_page(mfn); if (get_page(page, vcpu->domain) == 0) { if (page_get_owner(page) != vcpu->domain) { // This page might be a page granted by another domain. panic_domain(NULL, "domain tries to execute foreign domain " "page which might be mapped by grant table.\n"); } goto again; } vpa = (u64 *)__va(maddr); pbundle->i64[0] = *vpa++; pbundle->i64[1] = *vpa; put_page(page); return IA64_NO_FAULT;}IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, u64 pte, u64 itir, u64 ifa){#ifdef VTLB_DEBUG int slot; u64 ps, va; ps = itir_ps(itir); va = PAGEALIGN(ifa, ps); slot = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB); if (slot >=0) { // generate MCA. panic_domain(vcpu_regs(vcpu),"Tlb conflict!!"); return IA64_FAULT; }#endif //VTLB_DEBUG pte &= ~PAGE_FLAGS_RV_MASK; thash_purge_and_insert(vcpu, pte, itir, ifa, ISIDE_TLB); return IA64_NO_FAULT;}IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, u64 pte, u64 itir, u64 ifa){#ifdef VTLB_DEBUG int slot; u64 ps, va; ps = itir_ps(itir); va = PAGEALIGN(ifa, ps); slot = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB); if (slot >=0) { // generate MCA. panic_domain(vcpu_regs(vcpu),"Tlb conflict!!"); return IA64_FAULT; }#endif //VTLB_DEBUG pte &= ~PAGE_FLAGS_RV_MASK; thash_purge_and_insert(vcpu, pte, itir, ifa, DSIDE_TLB); return IA64_NO_FAULT;}IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa){#ifdef VTLB_DEBUG int index;#endif u64 ps, va, rid; thash_data_t * p_itr; ps = itir_ps(itir); va = PAGEALIGN(ifa, ps);#ifdef VTLB_DEBUG index = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB); if (index >=0) { // generate MCA. panic_domain(vcpu_regs(vcpu),"Tlb conflict!!"); return IA64_FAULT; } thash_purge_entries(vcpu, va, ps);#endif if (slot >= NITRS) { panic_domain(NULL, "bad itr.i slot (%ld)", slot);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -