common.c
来自「xen虚拟机源代码安装包」· C语言 代码 · 共 1,973 行 · 第 1/5 页
C
1,973 行
/****************************************************************************** * arch/x86/mm/shadow/common.c * * Shadow code that does not need to be multiply compiled. * Parts of this code are Copyright (c) 2006 by XenSource Inc. * Parts of this code are Copyright (c) 2006 by Michael A Fetterman * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */#include <xen/config.h>#include <xen/types.h>#include <xen/mm.h>#include <xen/trace.h>#include <xen/sched.h>#include <xen/perfc.h>#include <xen/irq.h>#include <xen/domain_page.h>#include <xen/guest_access.h>#include <xen/keyhandler.h>#include <asm/event.h>#include <asm/page.h>#include <asm/current.h>#include <asm/flushtlb.h>#include <asm/shadow.h>#include <xen/numa.h>#include "private.h"/* Set up the shadow-specific parts of a domain struct at start of day. * Called for every domain from arch_domain_create() */void shadow_domain_init(struct domain *d){ int i; shadow_lock_init(d); for ( i = 0; i <= SHADOW_MAX_ORDER; i++ ) INIT_LIST_HEAD(&d->arch.paging.shadow.freelists[i]); INIT_LIST_HEAD(&d->arch.paging.shadow.p2m_freelist); INIT_LIST_HEAD(&d->arch.paging.shadow.pinned_shadows); /* Use shadow pagetables for log-dirty support */ paging_log_dirty_init(d, shadow_enable_log_dirty, shadow_disable_log_dirty, shadow_clean_dirty_bitmap);#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) d->arch.paging.shadow.oos_active = 0;#endif}/* Setup the shadow-specfic parts of a vcpu struct. Note: The most important * job is to initialize the update_paging_modes() function pointer, which is * used to initialized the rest of resources. Therefore, it really does not * matter to have v->arch.paging.mode pointing to any mode, as long as it can * be compiled. */void shadow_vcpu_init(struct vcpu *v){#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) int i, j; for ( i = 0; i < SHADOW_OOS_PAGES; i++ ) { v->arch.paging.shadow.oos[i] = _mfn(INVALID_MFN); v->arch.paging.shadow.oos_snapshot[i] = _mfn(INVALID_MFN); for ( j = 0; j < SHADOW_OOS_FIXUPS; j++ ) v->arch.paging.shadow.oos_fixup[i].smfn[j] = _mfn(INVALID_MFN); }#endif v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode, 3);}#if SHADOW_AUDITint shadow_audit_enable = 0;static void shadow_audit_key(unsigned char key){ shadow_audit_enable = !shadow_audit_enable; printk("%s shadow_audit_enable=%d\n", __func__, shadow_audit_enable);}static int __init shadow_audit_key_init(void){ register_keyhandler( 'O', shadow_audit_key, "toggle shadow audits"); return 0;}__initcall(shadow_audit_key_init);#endif /* SHADOW_AUDIT */int _shadow_mode_refcounts(struct domain *d){ return shadow_mode_refcounts(d);}/**************************************************************************//* x86 emulator support for the shadow code */struct segment_register *hvm_get_seg_reg( enum x86_segment seg, struct sh_emulate_ctxt *sh_ctxt){ struct segment_register *seg_reg = &sh_ctxt->seg_reg[seg]; if ( !__test_and_set_bit(seg, &sh_ctxt->valid_seg_regs) ) hvm_get_segment_register(current, seg, seg_reg); return seg_reg;}static int hvm_translate_linear_addr( enum x86_segment seg, unsigned long offset, unsigned int bytes, enum hvm_access_type access_type, struct sh_emulate_ctxt *sh_ctxt, unsigned long *paddr){ struct segment_register *reg = hvm_get_seg_reg(seg, sh_ctxt); int okay; okay = hvm_virtual_to_linear_addr( seg, reg, offset, bytes, access_type, sh_ctxt->ctxt.addr_size, paddr); if ( !okay ) { hvm_inject_exception(TRAP_gp_fault, 0, 0); return X86EMUL_EXCEPTION; } return 0;}static inthvm_read(enum x86_segment seg, unsigned long offset, void *p_data, unsigned int bytes, enum hvm_access_type access_type, struct sh_emulate_ctxt *sh_ctxt){ unsigned long addr; int rc; rc = hvm_translate_linear_addr( seg, offset, bytes, access_type, sh_ctxt, &addr); if ( rc ) return rc; if ( access_type == hvm_access_insn_fetch ) rc = hvm_fetch_from_guest_virt(p_data, addr, bytes, 0); else rc = hvm_copy_from_guest_virt(p_data, addr, bytes, 0); switch ( rc ) { case HVMCOPY_okay: return X86EMUL_OKAY; case HVMCOPY_bad_gva_to_gfn: return X86EMUL_EXCEPTION; default: break; } return X86EMUL_UNHANDLEABLE;}static inthvm_emulate_read(enum x86_segment seg, unsigned long offset, void *p_data, unsigned int bytes, struct x86_emulate_ctxt *ctxt){ if ( !is_x86_user_segment(seg) ) return X86EMUL_UNHANDLEABLE; return hvm_read(seg, offset, p_data, bytes, hvm_access_read, container_of(ctxt, struct sh_emulate_ctxt, ctxt));}static inthvm_emulate_insn_fetch(enum x86_segment seg, unsigned long offset, void *p_data, unsigned int bytes, struct x86_emulate_ctxt *ctxt){ struct sh_emulate_ctxt *sh_ctxt = container_of(ctxt, struct sh_emulate_ctxt, ctxt); unsigned int insn_off = offset - sh_ctxt->insn_buf_eip; ASSERT(seg == x86_seg_cs); /* Fall back if requested bytes are not in the prefetch cache. */ if ( unlikely((insn_off + bytes) > sh_ctxt->insn_buf_bytes) ) return hvm_read(seg, offset, p_data, bytes, hvm_access_insn_fetch, sh_ctxt); /* Hit the cache. Simple memcpy. */ memcpy(p_data, &sh_ctxt->insn_buf[insn_off], bytes); return X86EMUL_OKAY;}static inthvm_emulate_write(enum x86_segment seg, unsigned long offset, void *p_data, unsigned int bytes, struct x86_emulate_ctxt *ctxt){ struct sh_emulate_ctxt *sh_ctxt = container_of(ctxt, struct sh_emulate_ctxt, ctxt); struct vcpu *v = current; unsigned long addr; int rc; if ( !is_x86_user_segment(seg) ) return X86EMUL_UNHANDLEABLE; /* How many emulations could we save if we unshadowed on stack writes? */ if ( seg == x86_seg_ss ) perfc_incr(shadow_fault_emulate_stack); rc = hvm_translate_linear_addr( seg, offset, bytes, hvm_access_write, sh_ctxt, &addr); if ( rc ) return rc; return v->arch.paging.mode->shadow.x86_emulate_write( v, addr, p_data, bytes, sh_ctxt);}static int hvm_emulate_cmpxchg(enum x86_segment seg, unsigned long offset, void *p_old, void *p_new, unsigned int bytes, struct x86_emulate_ctxt *ctxt){ struct sh_emulate_ctxt *sh_ctxt = container_of(ctxt, struct sh_emulate_ctxt, ctxt); struct vcpu *v = current; unsigned long addr, old[2], new[2]; int rc; if ( !is_x86_user_segment(seg) ) return X86EMUL_UNHANDLEABLE; rc = hvm_translate_linear_addr( seg, offset, bytes, hvm_access_write, sh_ctxt, &addr); if ( rc ) return rc; old[0] = new[0] = 0; memcpy(old, p_old, bytes); memcpy(new, p_new, bytes); if ( bytes <= sizeof(long) ) return v->arch.paging.mode->shadow.x86_emulate_cmpxchg( v, addr, old[0], new[0], bytes, sh_ctxt);#ifdef __i386__ if ( bytes == 8 ) return v->arch.paging.mode->shadow.x86_emulate_cmpxchg8b( v, addr, old[0], old[1], new[0], new[1], sh_ctxt);#endif return X86EMUL_UNHANDLEABLE;}static struct x86_emulate_ops hvm_shadow_emulator_ops = { .read = hvm_emulate_read, .insn_fetch = hvm_emulate_insn_fetch, .write = hvm_emulate_write, .cmpxchg = hvm_emulate_cmpxchg,};static intpv_emulate_read(enum x86_segment seg, unsigned long offset, void *p_data, unsigned int bytes, struct x86_emulate_ctxt *ctxt){ unsigned int rc; if ( !is_x86_user_segment(seg) ) return X86EMUL_UNHANDLEABLE; if ( (rc = copy_from_user(p_data, (void *)offset, bytes)) != 0 ) { propagate_page_fault(offset + bytes - rc, 0); /* read fault */ return X86EMUL_EXCEPTION; } return X86EMUL_OKAY;}static intpv_emulate_write(enum x86_segment seg, unsigned long offset, void *p_data, unsigned int bytes, struct x86_emulate_ctxt *ctxt){ struct sh_emulate_ctxt *sh_ctxt = container_of(ctxt, struct sh_emulate_ctxt, ctxt); struct vcpu *v = current; if ( !is_x86_user_segment(seg) ) return X86EMUL_UNHANDLEABLE; return v->arch.paging.mode->shadow.x86_emulate_write( v, offset, p_data, bytes, sh_ctxt);}static int pv_emulate_cmpxchg(enum x86_segment seg, unsigned long offset, void *p_old, void *p_new, unsigned int bytes, struct x86_emulate_ctxt *ctxt){ struct sh_emulate_ctxt *sh_ctxt = container_of(ctxt, struct sh_emulate_ctxt, ctxt); unsigned long old[2], new[2]; struct vcpu *v = current; if ( !is_x86_user_segment(seg) ) return X86EMUL_UNHANDLEABLE; old[0] = new[0] = 0; memcpy(old, p_old, bytes); memcpy(new, p_new, bytes); if ( bytes <= sizeof(long) ) return v->arch.paging.mode->shadow.x86_emulate_cmpxchg( v, offset, old[0], new[0], bytes, sh_ctxt);#ifdef __i386__ if ( bytes == 8 ) return v->arch.paging.mode->shadow.x86_emulate_cmpxchg8b( v, offset, old[0], old[1], new[0], new[1], sh_ctxt);#endif return X86EMUL_UNHANDLEABLE;}static struct x86_emulate_ops pv_shadow_emulator_ops = { .read = pv_emulate_read, .insn_fetch = pv_emulate_read, .write = pv_emulate_write, .cmpxchg = pv_emulate_cmpxchg,};struct x86_emulate_ops *shadow_init_emulation( struct sh_emulate_ctxt *sh_ctxt, struct cpu_user_regs *regs){ struct segment_register *creg, *sreg; struct vcpu *v = current; unsigned long addr; sh_ctxt->ctxt.regs = regs; sh_ctxt->ctxt.force_writeback = 0; if ( !is_hvm_vcpu(v) ) { sh_ctxt->ctxt.addr_size = sh_ctxt->ctxt.sp_size = BITS_PER_LONG; return &pv_shadow_emulator_ops; } /* Segment cache initialisation. Primed with CS. */ sh_ctxt->valid_seg_regs = 0; creg = hvm_get_seg_reg(x86_seg_cs, sh_ctxt); /* Work out the emulation mode. */ if ( hvm_long_mode_enabled(v) && creg->attr.fields.l ) { sh_ctxt->ctxt.addr_size = sh_ctxt->ctxt.sp_size = 64; } else {
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?