common.c
来自「xen 3.2.2 源码」· C语言 代码 · 共 1,927 行 · 第 1/5 页
C
1,927 行
/****************************************************************************** * arch/x86/mm/shadow/common.c * * Shadow code that does not need to be multiply compiled. * Parts of this code are Copyright (c) 2006 by XenSource Inc. * Parts of this code are Copyright (c) 2006 by Michael A Fetterman * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */#include <xen/config.h>#include <xen/types.h>#include <xen/mm.h>#include <xen/trace.h>#include <xen/sched.h>#include <xen/perfc.h>#include <xen/irq.h>#include <xen/domain_page.h>#include <xen/guest_access.h>#include <xen/keyhandler.h>#include <asm/event.h>#include <asm/page.h>#include <asm/current.h>#include <asm/flushtlb.h>#include <asm/shadow.h>#include "private.h"/* Set up the shadow-specific parts of a domain struct at start of day. * Called for every domain from arch_domain_create() */void shadow_domain_init(struct domain *d){ int i; shadow_lock_init(d); for ( i = 0; i <= SHADOW_MAX_ORDER; i++ ) INIT_LIST_HEAD(&d->arch.paging.shadow.freelists[i]); INIT_LIST_HEAD(&d->arch.paging.shadow.p2m_freelist); INIT_LIST_HEAD(&d->arch.paging.shadow.pinned_shadows); /* Use shadow pagetables for log-dirty support */ paging_log_dirty_init(d, shadow_enable_log_dirty, shadow_disable_log_dirty, shadow_clean_dirty_bitmap);}/* Setup the shadow-specfic parts of a vcpu struct. Note: The most important * job is to initialize the update_paging_modes() function pointer, which is * used to initialized the rest of resources. Therefore, it really does not * matter to have v->arch.paging.mode pointing to any mode, as long as it can * be compiled. */void shadow_vcpu_init(struct vcpu *v){#if CONFIG_PAGING_LEVELS == 4 v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,3,3);#elif CONFIG_PAGING_LEVELS == 3 v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,3,3);#elif CONFIG_PAGING_LEVELS == 2 v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,2,2);#endif}#if SHADOW_AUDITint shadow_audit_enable = 0;static void shadow_audit_key(unsigned char key){ shadow_audit_enable = !shadow_audit_enable; printk("%s shadow_audit_enable=%d\n", __func__, shadow_audit_enable);}static int __init shadow_audit_key_init(void){ register_keyhandler( 'O', shadow_audit_key, "toggle shadow audits"); return 0;}__initcall(shadow_audit_key_init);#endif /* SHADOW_AUDIT */int _shadow_mode_refcounts(struct domain *d){ return shadow_mode_refcounts(d);}/**************************************************************************//* x86 emulator support for the shadow code */struct segment_register *hvm_get_seg_reg( enum x86_segment seg, struct sh_emulate_ctxt *sh_ctxt){ struct segment_register *seg_reg = &sh_ctxt->seg_reg[seg]; if ( !__test_and_set_bit(seg, &sh_ctxt->valid_seg_regs) ) hvm_get_segment_register(current, seg, seg_reg); return seg_reg;}static int hvm_translate_linear_addr( enum x86_segment seg, unsigned long offset, unsigned int bytes, enum hvm_access_type access_type, struct sh_emulate_ctxt *sh_ctxt, unsigned long *paddr){ struct segment_register *reg = hvm_get_seg_reg(seg, sh_ctxt); int okay; okay = hvm_virtual_to_linear_addr( seg, reg, offset, bytes, access_type, sh_ctxt->ctxt.addr_size, paddr); if ( !okay ) { hvm_inject_exception(TRAP_gp_fault, 0, 0); return X86EMUL_EXCEPTION; } return 0;}static inthvm_read(enum x86_segment seg, unsigned long offset, unsigned long *val, unsigned int bytes, enum hvm_access_type access_type, struct sh_emulate_ctxt *sh_ctxt){ unsigned long addr; int rc; rc = hvm_translate_linear_addr( seg, offset, bytes, access_type, sh_ctxt, &addr); if ( rc ) return rc; *val = 0; if ( access_type == hvm_access_insn_fetch ) rc = hvm_fetch_from_guest_virt(val, addr, bytes); else rc = hvm_copy_from_guest_virt(val, addr, bytes); switch ( rc ) { case HVMCOPY_okay: return X86EMUL_OKAY; case HVMCOPY_bad_gva_to_gfn: return X86EMUL_EXCEPTION; default: break; } return X86EMUL_UNHANDLEABLE;}static inthvm_emulate_read(enum x86_segment seg, unsigned long offset, unsigned long *val, unsigned int bytes, struct x86_emulate_ctxt *ctxt){ if ( !is_x86_user_segment(seg) ) return X86EMUL_UNHANDLEABLE; return hvm_read(seg, offset, val, bytes, hvm_access_read, container_of(ctxt, struct sh_emulate_ctxt, ctxt));}static inthvm_emulate_insn_fetch(enum x86_segment seg, unsigned long offset, unsigned long *val, unsigned int bytes, struct x86_emulate_ctxt *ctxt){ struct sh_emulate_ctxt *sh_ctxt = container_of(ctxt, struct sh_emulate_ctxt, ctxt); unsigned int insn_off = offset - sh_ctxt->insn_buf_eip; ASSERT(seg == x86_seg_cs); /* Fall back if requested bytes are not in the prefetch cache. */ if ( unlikely((insn_off + bytes) > sh_ctxt->insn_buf_bytes) ) return hvm_read(seg, offset, val, bytes, hvm_access_insn_fetch, sh_ctxt); /* Hit the cache. Simple memcpy. */ *val = 0; memcpy(val, &sh_ctxt->insn_buf[insn_off], bytes); return X86EMUL_OKAY;}static inthvm_emulate_write(enum x86_segment seg, unsigned long offset, unsigned long val, unsigned int bytes, struct x86_emulate_ctxt *ctxt){ struct sh_emulate_ctxt *sh_ctxt = container_of(ctxt, struct sh_emulate_ctxt, ctxt); struct vcpu *v = current; unsigned long addr; int rc; if ( !is_x86_user_segment(seg) ) return X86EMUL_UNHANDLEABLE; /* How many emulations could we save if we unshadowed on stack writes? */ if ( seg == x86_seg_ss ) perfc_incr(shadow_fault_emulate_stack); rc = hvm_translate_linear_addr( seg, offset, bytes, hvm_access_write, sh_ctxt, &addr); if ( rc ) return rc; return v->arch.paging.mode->shadow.x86_emulate_write( v, addr, &val, bytes, sh_ctxt);}static int hvm_emulate_cmpxchg(enum x86_segment seg, unsigned long offset, unsigned long old, unsigned long new, unsigned int bytes, struct x86_emulate_ctxt *ctxt){ struct sh_emulate_ctxt *sh_ctxt = container_of(ctxt, struct sh_emulate_ctxt, ctxt); struct vcpu *v = current; unsigned long addr; int rc; if ( !is_x86_user_segment(seg) ) return X86EMUL_UNHANDLEABLE; rc = hvm_translate_linear_addr( seg, offset, bytes, hvm_access_write, sh_ctxt, &addr); if ( rc ) return rc; return v->arch.paging.mode->shadow.x86_emulate_cmpxchg( v, addr, old, new, bytes, sh_ctxt);}static int hvm_emulate_cmpxchg8b(enum x86_segment seg, unsigned long offset, unsigned long old_lo, unsigned long old_hi, unsigned long new_lo, unsigned long new_hi, struct x86_emulate_ctxt *ctxt){ struct sh_emulate_ctxt *sh_ctxt = container_of(ctxt, struct sh_emulate_ctxt, ctxt); struct vcpu *v = current; unsigned long addr; int rc; if ( !is_x86_user_segment(seg) ) return X86EMUL_UNHANDLEABLE; rc = hvm_translate_linear_addr( seg, offset, 8, hvm_access_write, sh_ctxt, &addr); if ( rc ) return rc; return v->arch.paging.mode->shadow.x86_emulate_cmpxchg8b( v, addr, old_lo, old_hi, new_lo, new_hi, sh_ctxt);}static struct x86_emulate_ops hvm_shadow_emulator_ops = { .read = hvm_emulate_read, .insn_fetch = hvm_emulate_insn_fetch, .write = hvm_emulate_write, .cmpxchg = hvm_emulate_cmpxchg, .cmpxchg8b = hvm_emulate_cmpxchg8b,};static intpv_emulate_read(enum x86_segment seg, unsigned long offset, unsigned long *val, unsigned int bytes, struct x86_emulate_ctxt *ctxt){ unsigned int rc; if ( !is_x86_user_segment(seg) ) return X86EMUL_UNHANDLEABLE; *val = 0; if ( (rc = copy_from_user((void *)val, (void *)offset, bytes)) != 0 ) { propagate_page_fault(offset + bytes - rc, 0); /* read fault */ return X86EMUL_EXCEPTION; } return X86EMUL_OKAY;}static intpv_emulate_write(enum x86_segment seg, unsigned long offset, unsigned long val, unsigned int bytes, struct x86_emulate_ctxt *ctxt){ struct sh_emulate_ctxt *sh_ctxt = container_of(ctxt, struct sh_emulate_ctxt, ctxt); struct vcpu *v = current; if ( !is_x86_user_segment(seg) ) return X86EMUL_UNHANDLEABLE; return v->arch.paging.mode->shadow.x86_emulate_write( v, offset, &val, bytes, sh_ctxt);}static int pv_emulate_cmpxchg(enum x86_segment seg, unsigned long offset, unsigned long old, unsigned long new, unsigned int bytes, struct x86_emulate_ctxt *ctxt){ struct sh_emulate_ctxt *sh_ctxt = container_of(ctxt, struct sh_emulate_ctxt, ctxt); struct vcpu *v = current; if ( !is_x86_user_segment(seg) ) return X86EMUL_UNHANDLEABLE; return v->arch.paging.mode->shadow.x86_emulate_cmpxchg( v, offset, old, new, bytes, sh_ctxt);}static int pv_emulate_cmpxchg8b(enum x86_segment seg, unsigned long offset, unsigned long old_lo, unsigned long old_hi, unsigned long new_lo, unsigned long new_hi, struct x86_emulate_ctxt *ctxt){ struct sh_emulate_ctxt *sh_ctxt = container_of(ctxt, struct sh_emulate_ctxt, ctxt); struct vcpu *v = current; if ( !is_x86_user_segment(seg) ) return X86EMUL_UNHANDLEABLE; return v->arch.paging.mode->shadow.x86_emulate_cmpxchg8b( v, offset, old_lo, old_hi, new_lo, new_hi, sh_ctxt);}static struct x86_emulate_ops pv_shadow_emulator_ops = { .read = pv_emulate_read, .insn_fetch = pv_emulate_read, .write = pv_emulate_write, .cmpxchg = pv_emulate_cmpxchg, .cmpxchg8b = pv_emulate_cmpxchg8b,};struct x86_emulate_ops *shadow_init_emulation( struct sh_emulate_ctxt *sh_ctxt, struct cpu_user_regs *regs){ struct segment_register *creg, *sreg; struct vcpu *v = current; unsigned long addr;
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?