private.h
来自「xen虚拟机源代码安装包」· C头文件 代码 · 共 847 行 · 第 1/2 页
H
847 行
/****************************************************************************** * arch/x86/mm/shadow/private.h * * Shadow code that is private, and does not need to be multiply compiled. * Parts of this code are Copyright (c) 2006 by XenSource Inc. * Parts of this code are Copyright (c) 2006 by Michael A Fetterman * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */#ifndef _XEN_SHADOW_PRIVATE_H#define _XEN_SHADOW_PRIVATE_H// In order to override the definition of mfn_to_page, we make sure page.h has// been included...#include <asm/page.h>#include <xen/domain_page.h>#include <asm/x86_emulate.h>#include <asm/hvm/support.h>/****************************************************************************** * Levels of self-test and paranoia */#define SHADOW_AUDIT_HASH 0x01 /* Check current hash bucket */#define SHADOW_AUDIT_HASH_FULL 0x02 /* Check every hash bucket */#define SHADOW_AUDIT_ENTRIES 0x04 /* Check this walk's shadows */#define SHADOW_AUDIT_ENTRIES_FULL 0x08 /* Check every shadow */#define SHADOW_AUDIT_ENTRIES_MFNS 0x10 /* Check gfn-mfn map in shadows */#ifdef NDEBUG#define SHADOW_AUDIT 0#define SHADOW_AUDIT_ENABLE 0#else#define SHADOW_AUDIT 0x15 /* Basic audit of all */#define SHADOW_AUDIT_ENABLE shadow_audit_enableextern int shadow_audit_enable;#endif/****************************************************************************** * Levels of optimization */#define SHOPT_WRITABLE_HEURISTIC 0x01 /* Guess at RW PTEs via linear maps */#define SHOPT_EARLY_UNSHADOW 0x02 /* Unshadow l1s on fork or exit */#define SHOPT_FAST_FAULT_PATH 0x04 /* Fast-path MMIO and not-present */#define SHOPT_PREFETCH 0x08 /* Shadow multiple entries per fault */#define SHOPT_LINUX_L3_TOPLEVEL 0x10 /* Pin l3es on early 64bit linux */#define SHOPT_SKIP_VERIFY 0x20 /* Skip PTE v'fy when safe to do so */#define SHOPT_VIRTUAL_TLB 0x40 /* Cache guest v->p translations */#define SHOPT_FAST_EMULATION 0x80 /* Fast write emulation */#define SHOPT_OUT_OF_SYNC 0x100 /* Allow guest writes to L1 PTs */#define SHADOW_OPTIMIZATIONS 0x1ff/****************************************************************************** * Debug and error-message output */#define SHADOW_PRINTK(_f, _a...) \ debugtrace_printk("sh: %s(): " _f, __func__, ##_a)#define SHADOW_ERROR(_f, _a...) \ printk("sh error: %s(): " _f, __func__, ##_a)#define SHADOW_DEBUG(flag, _f, _a...) \ do { \ if (SHADOW_DEBUG_ ## flag) \ debugtrace_printk("shdebug: %s(): " _f, __func__, ##_a); \ } while (0)// The flags for use with SHADOW_DEBUG:#define SHADOW_DEBUG_PROPAGATE 1#define SHADOW_DEBUG_MAKE_SHADOW 1#define SHADOW_DEBUG_DESTROY_SHADOW 1#define SHADOW_DEBUG_A_AND_D 1#define SHADOW_DEBUG_EMULATE 1#define SHADOW_DEBUG_P2M 1#define SHADOW_DEBUG_LOGDIRTY 0/****************************************************************************** * The shadow lock. * * This lock is per-domain. It is intended to allow us to make atomic * updates to the software TLB that the shadow tables provide. * * Specifically, it protects: * - all changes to shadow page table pages * - the shadow hash table * - the shadow page allocator * - all changes to guest page table pages * - all changes to the page_info->tlbflush_timestamp * - the page_info->count fields on shadow pages * - the shadow dirty bit array and count */#ifndef CONFIG_SMP#error shadow.h currently requires CONFIG_SMP#endif#define shadow_lock_init(_d) \ do { \ spin_lock_init(&(_d)->arch.paging.shadow.lock); \ (_d)->arch.paging.shadow.locker = -1; \ (_d)->arch.paging.shadow.locker_function = "nobody"; \ } while (0)#define shadow_locked_by_me(_d) \ (current->processor == (_d)->arch.paging.shadow.locker)#define shadow_lock(_d) \ do { \ if ( unlikely((_d)->arch.paging.shadow.locker == current->processor) )\ { \ printk("Error: shadow lock held by %s\n", \ (_d)->arch.paging.shadow.locker_function); \ BUG(); \ } \ spin_lock(&(_d)->arch.paging.shadow.lock); \ ASSERT((_d)->arch.paging.shadow.locker == -1); \ (_d)->arch.paging.shadow.locker = current->processor; \ (_d)->arch.paging.shadow.locker_function = __func__; \ } while (0)#define shadow_unlock(_d) \ do { \ ASSERT((_d)->arch.paging.shadow.locker == current->processor); \ (_d)->arch.paging.shadow.locker = -1; \ (_d)->arch.paging.shadow.locker_function = "nobody"; \ spin_unlock(&(_d)->arch.paging.shadow.lock); \ } while (0)/****************************************************************************** * Auditing routines */#if SHADOW_AUDIT & SHADOW_AUDIT_ENTRIES_FULLextern void shadow_audit_tables(struct vcpu *v);#else#define shadow_audit_tables(_v) do {} while(0)#endif/****************************************************************************** * Macro for dealing with the naming of the internal names of the * shadow code's external entry points. */#define SHADOW_INTERNAL_NAME_HIDDEN(name, guest_levels) \ name ## __guest_ ## guest_levels#define SHADOW_INTERNAL_NAME(name, guest_levels) \ SHADOW_INTERNAL_NAME_HIDDEN(name, guest_levels)#define GUEST_LEVELS 2#include "multi.h"#undef GUEST_LEVELS#define GUEST_LEVELS 3#include "multi.h"#undef GUEST_LEVELS#if CONFIG_PAGING_LEVELS == 4#define GUEST_LEVELS 4#include "multi.h"#undef GUEST_LEVELS#endif /* CONFIG_PAGING_LEVELS == 4 *//****************************************************************************** * Page metadata for shadow pages. */struct shadow_page_info{ union { /* When in use, guest page we're a shadow of */ unsigned long backpointer; /* When free, order of the freelist we're on */ unsigned int order; }; union { /* When in use, next shadow in this hash chain */ struct shadow_page_info *next_shadow; /* When free, TLB flush time when freed */ u32 tlbflush_timestamp; }; struct { unsigned int type:5; /* What kind of shadow is this? */ unsigned int pinned:1; /* Is the shadow pinned? */ unsigned int count:26; /* Reference count */ u32 mbz; /* Must be zero: this is where the owner * field lives in a non-shadow page */ } __attribute__((packed)); union { /* For unused shadow pages, a list of pages of this order; * for pinnable shadows, if pinned, a list of other pinned shadows * (see sh_type_is_pinnable() below for the definition of * "pinnable" shadow types). */ struct list_head list; /* For non-pinnable shadows, a higher entry that points at us */ paddr_t up; };};/* The structure above *must* be no larger than a struct page_info * from mm.h, since we'll be using the same space in the frametable. * Also, the mbz field must line up with the owner field of normal * pages, so they look properly like anonymous/xen pages. */static inline void shadow_check_page_struct_offsets(void) { BUILD_BUG_ON(sizeof (struct shadow_page_info) > sizeof (struct page_info)); BUILD_BUG_ON(offsetof(struct shadow_page_info, mbz) != offsetof(struct page_info, u.inuse._domain));};/* Shadow type codes */#define SH_type_none (0U) /* on the shadow free list */#define SH_type_min_shadow (1U)#define SH_type_l1_32_shadow (1U) /* shadowing a 32-bit L1 guest page */#define SH_type_fl1_32_shadow (2U) /* L1 shadow for a 32b 4M superpage */#define SH_type_l2_32_shadow (3U) /* shadowing a 32-bit L2 guest page */#define SH_type_l1_pae_shadow (4U) /* shadowing a pae L1 page */#define SH_type_fl1_pae_shadow (5U) /* L1 shadow for pae 2M superpg */#define SH_type_l2_pae_shadow (6U) /* shadowing a pae L2-low page */#define SH_type_l2h_pae_shadow (7U) /* shadowing a pae L2-high page */#define SH_type_l1_64_shadow (8U) /* shadowing a 64-bit L1 page */#define SH_type_fl1_64_shadow (9U) /* L1 shadow for 64-bit 2M superpg */#define SH_type_l2_64_shadow (10U) /* shadowing a 64-bit L2 page */#define SH_type_l2h_64_shadow (11U) /* shadowing a compat PAE L2 high page */#define SH_type_l3_64_shadow (12U) /* shadowing a 64-bit L3 page */#define SH_type_l4_64_shadow (13U) /* shadowing a 64-bit L4 page */#define SH_type_max_shadow (13U)#define SH_type_p2m_table (14U) /* in use as the p2m table */#define SH_type_monitor_table (15U) /* in use as a monitor table */#define SH_type_oos_snapshot (16U) /* in use as OOS snapshot */#define SH_type_unused (17U)/* * What counts as a pinnable shadow? */static inline int sh_type_is_pinnable(struct vcpu *v, unsigned int t) { /* Top-level shadow types in each mode can be pinned, so that they * persist even when not currently in use in a guest CR3 */ if ( t == SH_type_l2_32_shadow || t == SH_type_l2_pae_shadow || t == SH_type_l2h_pae_shadow || t == SH_type_l4_64_shadow ) return 1;#if (SHADOW_OPTIMIZATIONS & SHOPT_LINUX_L3_TOPLEVEL) /* Early 64-bit linux used three levels of pagetables for the guest * and context switched by changing one l4 entry in a per-cpu l4 * page. When we're shadowing those kernels, we have to pin l3 * shadows so they don't just evaporate on every context switch. * For all other guests, we'd rather use the up-pointer field in l3s. */ if ( unlikely((v->domain->arch.paging.shadow.opt_flags & SHOPT_LINUX_L3_TOPLEVEL) && CONFIG_PAGING_LEVELS >= 4 && t == SH_type_l3_64_shadow) ) return 1;#endif /* Everything else is not pinnable, and can use the "up" pointer */ return 0;}/* * Definitions for the shadow_flags field in page_info. * These flags are stored on *guest* pages... * Bits 1-13 are encodings for the shadow types. */#define SHF_page_type_mask \ (((1u << (SH_type_max_shadow + 1u)) - 1u) - \ ((1u << SH_type_min_shadow) - 1u))#define SHF_L1_32 (1u << SH_type_l1_32_shadow)#define SHF_FL1_32 (1u << SH_type_fl1_32_shadow)#define SHF_L2_32 (1u << SH_type_l2_32_shadow)#define SHF_L1_PAE (1u << SH_type_l1_pae_shadow)#define SHF_FL1_PAE (1u << SH_type_fl1_pae_shadow)#define SHF_L2_PAE (1u << SH_type_l2_pae_shadow)#define SHF_L2H_PAE (1u << SH_type_l2h_pae_shadow)#define SHF_L1_64 (1u << SH_type_l1_64_shadow)#define SHF_FL1_64 (1u << SH_type_fl1_64_shadow)#define SHF_L2_64 (1u << SH_type_l2_64_shadow)#define SHF_L2H_64 (1u << SH_type_l2h_64_shadow)#define SHF_L3_64 (1u << SH_type_l3_64_shadow)#define SHF_L4_64 (1u << SH_type_l4_64_shadow)#define SHF_32 (SHF_L1_32|SHF_FL1_32|SHF_L2_32)#define SHF_PAE (SHF_L1_PAE|SHF_FL1_PAE|SHF_L2_PAE|SHF_L2H_PAE)#define SHF_64 (SHF_L1_64|SHF_FL1_64|SHF_L2_64|SHF_L2H_64|SHF_L3_64|SHF_L4_64)#define SHF_L1_ANY (SHF_L1_32|SHF_L1_PAE|SHF_L1_64)#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) /* Marks a guest L1 page table which is shadowed but not write-protected. * If set, then *only* L1 shadows (SHF_L1_*) are allowed. * * out_of_sync indicates that the shadow tables may not reflect the * guest tables. If it is clear, then the shadow tables *must* reflect * the guest tables. * * oos_may_write indicates that a page may have writable mappings. * * Most of the time the flags are synonymous. There is a short period of time * during resync that oos_may_write is clear but out_of_sync is not. If a * codepath is called during that time and is sensitive to oos issues, it may * need to use the second flag. */#define SHF_out_of_sync (1u<<30)#define SHF_oos_may_write (1u<<29)#endif /* (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) */static inline int sh_page_has_multiple_shadows(struct page_info *pg){ u32 shadows; if ( !(pg->count_info & PGC_page_table) ) return 0; shadows = pg->shadow_flags & SHF_page_type_mask; /* More than one type bit set in shadow-flags? */ return ( (shadows & ~(1UL << find_first_set_bit(shadows))) != 0 );}#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) /* The caller must verify this is reasonable to call; i.e., valid mfn, * domain is translated, &c */static inline int page_is_out_of_sync(struct page_info *p) { return (p->count_info & PGC_page_table) && (p->shadow_flags & SHF_out_of_sync);}static inline int mfn_is_out_of_sync(mfn_t gmfn) { return page_is_out_of_sync(mfn_to_page(mfn_x(gmfn)));}static inline int page_oos_may_write(struct page_info *p) { return (p->count_info & PGC_page_table) && (p->shadow_flags & SHF_oos_may_write);}static inline int mfn_oos_may_write(mfn_t gmfn) { return page_oos_may_write(mfn_to_page(mfn_x(gmfn)));}#endif /* (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) *//****************************************************************************** * Various function declarations *//* Hash table functions */mfn_t shadow_hash_lookup(struct vcpu *v, unsigned long n, unsigned int t);void shadow_hash_insert(struct vcpu *v, unsigned long n, unsigned int t, mfn_t smfn);void shadow_hash_delete(struct vcpu *v, unsigned long n, unsigned int t, mfn_t smfn);/* shadow promotion */void shadow_promote(struct vcpu *v, mfn_t gmfn, u32 type);void shadow_demote(struct vcpu *v, mfn_t gmfn, u32 type);/* Shadow page allocation functions */void shadow_prealloc(struct domain *d, u32 shadow_type, unsigned int count);mfn_t shadow_alloc(struct domain *d, u32 shadow_type, unsigned long backpointer);void shadow_free(struct domain *d, mfn_t smfn);/* Install the xen mappings in various flavours of shadow */void sh_install_xen_entries_in_l4(struct vcpu *v, mfn_t gl4mfn, mfn_t sl4mfn);/* Update the shadows in response to a pagetable write from Xen */int sh_validate_guest_entry(struct vcpu *v, mfn_t gmfn, void *entry, u32 size);/* Update the shadows in response to a pagetable write from a HVM guest */void sh_validate_guest_pt_write(struct vcpu *v, mfn_t gmfn, void *entry, u32 size);/* Remove all writeable mappings of a guest frame from the shadows. * Returns non-zero if we need to flush TLBs. * level and fault_addr desribe how we found this to be a pagetable; * level==0 means we have some other reason for revoking write access. */extern int sh_remove_write_access(struct vcpu *v, mfn_t readonly_mfn, unsigned int level, unsigned long fault_addr);/* Functions that atomically write PT/P2M entries and update state */void shadow_write_p2m_entry(struct vcpu *v, unsigned long gfn, l1_pgentry_t *p, mfn_t table_mfn, l1_pgentry_t new, unsigned int level);int shadow_write_guest_entry(struct vcpu *v, intpte_t *p, intpte_t new, mfn_t gmfn);int shadow_cmpxchg_guest_entry(struct vcpu *v, intpte_t *p, intpte_t *old, intpte_t new, mfn_t gmfn);#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)/* Allow a shadowed page to go out of sync */int sh_unsync(struct vcpu *v, mfn_t gmfn);/* Pull an out-of-sync page back into sync. */void sh_resync(struct vcpu *v, mfn_t gmfn);void oos_fixup_add(struct vcpu *v, mfn_t gmfn, mfn_t smfn, unsigned long off);int sh_remove_write_access_from_sl1p(struct vcpu *v, mfn_t gmfn, mfn_t smfn, unsigned long offset);/* Pull all out-of-sync shadows back into sync. If skip != 0, we try
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?