📄 vmx_init.c
字号:
/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- *//* * vmx_init.c: initialization work for vt specific domain * Copyright (c) 2005, Intel Corporation. * Kun Tian (Kevin Tian) <kevin.tian@intel.com> * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com> * Fred Yang <fred.yang@intel.com> * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * *//* * 05/08/16 Kun tian (Kevin Tian) <kevin.tian@intel.com>: * Disable doubling mapping * * 05/03/23 Kun Tian (Kevin Tian) <kevin.tian@intel.com>: * Simplied design in first step: * - One virtual environment * - Domain is bound to one LP * Later to support guest SMP: * - Need interface to handle VP scheduled to different LP */#include <xen/config.h>#include <xen/types.h>#include <xen/sched.h>#include <asm/pal.h>#include <asm/page.h>#include <asm/processor.h>#include <asm/vmx_vcpu.h>#include <xen/lib.h>#include <asm/vmmu.h>#include <public/xen.h>#include <public/hvm/ioreq.h>#include <public/event_channel.h>#include <public/arch-ia64/hvm/memmap.h>#include <asm/vmx_phy_mode.h>#include <asm/processor.h>#include <asm/vmx.h>#include <xen/mm.h>#include <asm/viosapic.h>#include <xen/event.h>#include <asm/vlsapic.h>#include <asm/vhpt.h>#include <asm/vmx_pal_vsa.h>#include <asm/patch.h>/* Global flag to identify whether Intel vmx feature is on */u32 vmx_enabled = 0;static u64 buffer_size;static u64 vp_env_info;static u64 vm_buffer = 0; /* Buffer required to bring up VMX feature */u64 __vsa_base = 0; /* Run-time service base of VMX *//* Check whether vt feature is enabled or not. */void vmx_vps_patch(void){ u64 addr; addr = (u64)&vmx_vps_sync_read; ia64_patch_imm64(addr, __vsa_base+PAL_VPS_SYNC_READ); ia64_fc((void *)addr); addr = (u64)&vmx_vps_sync_write; ia64_patch_imm64(addr, __vsa_base+PAL_VPS_SYNC_WRITE); ia64_fc((void *)addr); addr = (u64)&vmx_vps_resume_normal; ia64_patch_imm64(addr, __vsa_base+PAL_VPS_RESUME_NORMAL); ia64_fc((void *)addr); addr = (u64)&vmx_vps_resume_handler; ia64_patch_imm64(addr, __vsa_base+PAL_VPS_RESUME_HANDLER); ia64_fc((void *)addr); ia64_sync_i(); ia64_srlz_i(); }voididentify_vmx_feature(void){ pal_status_t ret; u64 avail = 1, status = 1, control = 1; vmx_enabled = 0; /* Check VT-i feature */ ret = ia64_pal_proc_get_features(&avail, &status, &control); if (ret != PAL_STATUS_SUCCESS) { printk("Get proc features failed.\n"); goto no_vti; } /* FIXME: do we need to check status field, to see whether * PSR.vm is actually enabled? If yes, aonther call to * ia64_pal_proc_set_features may be reuqired then. */ printk("avail:0x%lx, status:0x%lx,control:0x%lx, vm?0x%lx\n", avail, status, control, avail & PAL_PROC_VM_BIT); if (!(avail & PAL_PROC_VM_BIT)) { printk("No VT feature supported.\n"); goto no_vti; } ret = ia64_pal_vp_env_info(&buffer_size, &vp_env_info); if (ret != PAL_STATUS_SUCCESS) { printk("Get vp environment info failed.\n"); goto no_vti; } printk("vm buffer size: %ld\n", buffer_size); vmx_enabled = 1;no_vti: return;}/* * ** This function must be called on every processor ** * * Init virtual environment on current LP * vsa_base is the indicator whether it's first LP to be initialized * for current domain. */ void*vmx_init_env(void *start, unsigned long end_in_pa){ u64 status, tmp_base; if (!vm_buffer) { /* VM buffer must must be 4K aligned and * must be pinned by both itr and dtr. */#define VM_BUFFER_ALIGN (4 * 1024)#define VM_BUFFER_ALIGN_UP(x) (((x) + (VM_BUFFER_ALIGN - 1)) & \ ~(VM_BUFFER_ALIGN - 1)) unsigned long s_vm_buffer = VM_BUFFER_ALIGN_UP((unsigned long)start); unsigned long e_vm_buffer = s_vm_buffer + buffer_size; if (__pa(e_vm_buffer) < end_in_pa) { init_xenheap_pages(__pa(start), __pa(s_vm_buffer)); start = (void*)e_vm_buffer; vm_buffer = virt_to_xenva(s_vm_buffer); printk("vm_buffer: 0x%lx\n", vm_buffer); } else { printk("Can't allocate vm_buffer " "start 0x%p end_in_pa 0x%lx " "buffer_size 0x%lx\n", start, end_in_pa, buffer_size); vmx_enabled = 0; return start; } } status=ia64_pal_vp_init_env(__vsa_base ? VP_INIT_ENV : VP_INIT_ENV_INITALIZE, __pa(vm_buffer), vm_buffer, &tmp_base); if (status != PAL_STATUS_SUCCESS) { printk("ia64_pal_vp_init_env failed.\n"); vmx_enabled = 0; return start; } if (!__vsa_base){ __vsa_base = tmp_base; vmx_vps_patch(); } else ASSERT(tmp_base == __vsa_base); return start;}typedef union { u64 value; struct { u64 number : 8; u64 revision : 8; u64 model : 8; u64 family : 8; u64 archrev : 8; u64 rv : 24; };} cpuid3_t;/* Allocate vpd from domheap */static vpd_t *alloc_vpd(void){ int i; cpuid3_t cpuid3; struct page_info *page; vpd_t *vpd; mapped_regs_t *mregs; page = alloc_domheap_pages(NULL, get_order(VPD_SIZE), 0); if (page == NULL) { printk("VPD allocation failed.\n"); return NULL; } vpd = page_to_virt(page); printk(XENLOG_DEBUG "vpd base: 0x%p, vpd size:%ld\n", vpd, sizeof(vpd_t)); memset(vpd, 0, VPD_SIZE); mregs = &vpd->vpd_low; /* CPUID init */ for (i = 0; i < 5; i++) mregs->vcpuid[i] = ia64_get_cpuid(i); /* Limit the CPUID number to 5 */ cpuid3.value = mregs->vcpuid[3]; cpuid3.number = 4; /* 5 - 1 */ mregs->vcpuid[3] = cpuid3.value; mregs->vac.a_from_int_cr = 1; mregs->vac.a_to_int_cr = 1; mregs->vac.a_from_psr = 1; mregs->vac.a_from_cpuid = 1; mregs->vac.a_cover = 1; mregs->vac.a_bsw = 1; mregs->vac.a_int = 1; mregs->vdc.d_vmsw = 1; return vpd;}/* Free vpd to domheap */static voidfree_vpd(struct vcpu *v){ if ( v->arch.privregs ) free_domheap_pages(virt_to_page(v->arch.privregs), get_order(VPD_SIZE));}// This is used for PAL_VP_CREATE and PAL_VPS_SET_PENDING_INTERRUPT// so that we don't have to pin the vpd down with itr[].void__vmx_vpd_pin(struct vcpu* v){ unsigned long privregs = (unsigned long)v->arch.privregs; u64 psr; privregs &= ~(IA64_GRANULE_SIZE - 1); // check overlapping with current stack if (privregs == ((unsigned long)current & ~(IA64_GRANULE_SIZE - 1))) return; if (!VMX_DOMAIN(current)) { // check overlapping with vhpt if (privregs == (vcpu_vhpt_maddr(current) & ~(IA64_GRANULE_SHIFT - 1))) return; } else { // check overlapping with vhpt if (privregs == ((unsigned long)current->arch.vhpt.hash & ~(IA64_GRANULE_SHIFT - 1))) return; // check overlapping with privregs if (privregs == ((unsigned long)current->arch.privregs & ~(IA64_GRANULE_SHIFT - 1))) return; } psr = ia64_clear_ic(); ia64_ptr(0x2 /*D*/, privregs, IA64_GRANULE_SIZE); ia64_srlz_d(); ia64_itr(0x2 /*D*/, IA64_TR_MAPPED_REGS, privregs, pte_val(pfn_pte(__pa(privregs) >> PAGE_SHIFT, PAGE_KERNEL)), IA64_GRANULE_SHIFT); ia64_set_psr(psr); ia64_srlz_d();}void__vmx_vpd_unpin(struct vcpu* v){ if (!VMX_DOMAIN(current)) { int rc; rc = !set_one_rr(VRN7 << VRN_SHIFT, VCPU(current, rrs[VRN7])); BUG_ON(rc); } else { IA64FAULT fault; fault = vmx_vcpu_set_rr(current, VRN7 << VRN_SHIFT, VMX(current, vrr[VRN7])); BUG_ON(fault != IA64_NO_FAULT); }}/* * Create a VP on intialized VMX environment. */static voidvmx_create_vp(struct vcpu *v){ u64 ret; vpd_t *vpd = (vpd_t *)v->arch.privregs; u64 ivt_base; extern char vmx_ia64_ivt; /* ia64_ivt is function pointer, so need this tranlation */ ivt_base = (u64) &vmx_ia64_ivt; printk(XENLOG_DEBUG "ivt_base: 0x%lx\n", ivt_base); vmx_vpd_pin(v); ret = ia64_pal_vp_create((u64 *)vpd, (u64 *)ivt_base, 0); vmx_vpd_unpin(v); if (ret != PAL_STATUS_SUCCESS){ panic_domain(vcpu_regs(v),"ia64_pal_vp_create failed. \n"); }}/* Other non-context related tasks can be done in context switch */voidvmx_save_state(struct vcpu *v){
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -