private.h

来自「xen虚拟机源代码安装包」· C头文件 代码 · 共 847 行 · 第 1/2 页

H
847
字号
 * to avoid resyncing where we think we can get away with it. */void sh_resync_all(struct vcpu *v, int skip, int this, int others, int do_locking);static inline voidshadow_resync_all(struct vcpu *v, int do_locking){    sh_resync_all(v,                  0 /* skip */,                  1 /* this */,                  1 /* others */,                  do_locking);}static inline voidshadow_resync_current_vcpu(struct vcpu *v, int do_locking){    sh_resync_all(v,                  0 /* skip */,                  1 /* this */,                   0 /* others */,                  do_locking);}static inline voidshadow_sync_other_vcpus(struct vcpu *v, int do_locking){    sh_resync_all(v,                  1 /* skip */,                   0 /* this */,                  1 /* others */,                  do_locking);}void oos_audit_hash_is_present(struct domain *d, mfn_t gmfn);mfn_t oos_snapshot_lookup(struct vcpu *v, mfn_t gmfn);#endif /* (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) *//****************************************************************************** * Flags used in the return value of the shadow_set_lXe() functions... *//* We actually wrote something new to the shadow */#define SHADOW_SET_CHANGED            0x1/* Caller should flush TLBs to clear the old entry */#define SHADOW_SET_FLUSH              0x2/* Something went wrong: the shadow entry was invalid or refcount failed */#define SHADOW_SET_ERROR              0x4/****************************************************************************** * MFN/page-info handling  */// Override mfn_to_page from asm/page.h, which was #include'd above,// in order to make it work with our mfn type.#undef mfn_to_page#define mfn_to_page(_m) (frame_table + mfn_x(_m))#define mfn_to_shadow_page(_m) ((struct shadow_page_info *)mfn_to_page(_m))// Override page_to_mfn from asm/page.h, which was #include'd above,// in order to make it work with our mfn type.#undef page_to_mfn#define page_to_mfn(_pg) (_mfn((_pg) - frame_table))#define shadow_page_to_mfn(_spg) (page_to_mfn((struct page_info *)_spg))// Override mfn_valid from asm/page.h, which was #include'd above,// in order to make it work with our mfn type.#undef mfn_valid#define mfn_valid(_mfn) (mfn_x(_mfn) < max_page)/* Override pagetable_t <-> struct page_info conversions to work with mfn_t */#undef pagetable_get_page#define pagetable_get_page(x)   mfn_to_page(pagetable_get_mfn(x))#undef pagetable_from_page#define pagetable_from_page(pg) pagetable_from_mfn(page_to_mfn(pg))static inline intsh_mfn_is_a_page_table(mfn_t gmfn){    struct page_info *page = mfn_to_page(gmfn);    struct domain *owner;    unsigned long type_info;    if ( !mfn_valid(gmfn) )        return 0;    owner = page_get_owner(page);    if ( owner && shadow_mode_refcounts(owner)          && (page->count_info & PGC_page_table) )        return 1;     type_info = page->u.inuse.type_info & PGT_type_mask;    return type_info && (type_info <= PGT_l4_page_table);}// Provide mfn_t-aware versions of common xen functionsstatic inline void *sh_map_domain_page(mfn_t mfn){    return map_domain_page(mfn_x(mfn));}static inline void sh_unmap_domain_page(void *p) {    unmap_domain_page(p);}static inline void *sh_map_domain_page_global(mfn_t mfn){    return map_domain_page_global(mfn_x(mfn));}static inline void sh_unmap_domain_page_global(void *p) {    unmap_domain_page_global(p);}/****************************************************************************** * Log-dirty mode bitmap handling */extern void sh_mark_dirty(struct domain *d, mfn_t gmfn);static inline intsh_mfn_is_dirty(struct domain *d, mfn_t gmfn)/* Is this guest page dirty?  Call only in log-dirty mode. */{    unsigned long pfn;    mfn_t mfn, *l4, *l3, *l2;    unsigned long *l1;    int rv;    ASSERT(shadow_mode_log_dirty(d));    ASSERT(mfn_valid(d->arch.paging.log_dirty.top));    /* We /really/ mean PFN here, even for non-translated guests. */    pfn = get_gpfn_from_mfn(mfn_x(gmfn));    if ( unlikely(!VALID_M2P(pfn)) )        return 0;        if ( d->arch.paging.log_dirty.failed_allocs > 0 )        /* If we have any failed allocations our dirty log is bogus.         * Since we can't signal an error here, be conservative and         * report "dirty" in this case.  (The only current caller,         * _sh_propagate, leaves known-dirty pages writable, preventing         * subsequent dirty-logging faults from them.)         */        return 1;    l4 = map_domain_page(mfn_x(d->arch.paging.log_dirty.top));    mfn = l4[L4_LOGDIRTY_IDX(pfn)];    unmap_domain_page(l4);    if ( !mfn_valid(mfn) )        return 0;    l3 = map_domain_page(mfn_x(mfn));    mfn = l3[L3_LOGDIRTY_IDX(pfn)];    unmap_domain_page(l3);    if ( !mfn_valid(mfn) )        return 0;    l2 = map_domain_page(mfn_x(mfn));    mfn = l2[L2_LOGDIRTY_IDX(pfn)];    unmap_domain_page(l2);    if ( !mfn_valid(mfn) )        return 0;    l1 = map_domain_page(mfn_x(mfn));    rv = test_bit(L1_LOGDIRTY_IDX(pfn), l1);    unmap_domain_page(l1);    return rv;}/**************************************************************************//* VRAM dirty tracking support */struct sh_dirty_vram {    unsigned long begin_pfn;    unsigned long end_pfn;    paddr_t *sl1ma;    uint8_t *dirty_bitmap;    s_time_t last_dirty;};/**************************************************************************//* Shadow-page refcounting. */void sh_destroy_shadow(struct vcpu *v, mfn_t smfn);/* Increase the refcount of a shadow page.  Arguments are the mfn to refcount,  * and the physical address of the shadow entry that holds the ref (or zero * if the ref is held by something else).   * Returns 0 for failure, 1 for success. */static inline int sh_get_ref(struct vcpu *v, mfn_t smfn, paddr_t entry_pa){    u32 x, nx;    struct shadow_page_info *sp = mfn_to_shadow_page(smfn);    ASSERT(mfn_valid(smfn));    x = sp->count;    nx = x + 1;    if ( unlikely(nx >= 1U<<26) )    {        SHADOW_PRINTK("shadow ref overflow, gmfn=%" PRtype_info " smfn=%lx\n",                       sp->backpointer, mfn_x(smfn));        return 0;    }        /* Guarded by the shadow lock, so no need for atomic update */    sp->count = nx;    /* We remember the first shadow entry that points to each shadow. */    if ( entry_pa != 0          && !sh_type_is_pinnable(v, sp->type)          && sp->up == 0 )         sp->up = entry_pa;        return 1;}/* Decrease the refcount of a shadow page.  As for get_ref, takes the * physical address of the shadow entry that held this reference. */static inline void sh_put_ref(struct vcpu *v, mfn_t smfn, paddr_t entry_pa){    u32 x, nx;    struct shadow_page_info *sp = mfn_to_shadow_page(smfn);    ASSERT(mfn_valid(smfn));    ASSERT(sp->mbz == 0);    /* If this is the entry in the up-pointer, remove it */    if ( entry_pa != 0          && !sh_type_is_pinnable(v, sp->type)          && sp->up == entry_pa )         sp->up = 0;    x = sp->count;    nx = x - 1;    if ( unlikely(x == 0) )     {        SHADOW_ERROR("shadow ref underflow, smfn=%lx oc=%08x t=%#x\n",                     mfn_x(smfn), sp->count, sp->type);        BUG();    }    /* Guarded by the shadow lock, so no need for atomic update */    sp->count = nx;    if ( unlikely(nx == 0) )         sh_destroy_shadow(v, smfn);}/* Pin a shadow page: take an extra refcount, set the pin bit, * and put the shadow at the head of the list of pinned shadows. * Returns 0 for failure, 1 for success. */static inline int sh_pin(struct vcpu *v, mfn_t smfn){    struct shadow_page_info *sp;        ASSERT(mfn_valid(smfn));    sp = mfn_to_shadow_page(smfn);    ASSERT(sh_type_is_pinnable(v, sp->type));    if ( sp->pinned )     {        /* Already pinned: take it out of the pinned-list so it can go          * at the front */        list_del(&sp->list);    }    else    {        /* Not pinned: pin it! */        if ( !sh_get_ref(v, smfn, 0) )            return 0;        sp->pinned = 1;    }    /* Put it at the head of the list of pinned shadows */    list_add(&sp->list, &v->domain->arch.paging.shadow.pinned_shadows);    return 1;}/* Unpin a shadow page: unset the pin bit, take the shadow off the list * of pinned shadows, and release the extra ref. */static inline void sh_unpin(struct vcpu *v, mfn_t smfn){    struct shadow_page_info *sp;        ASSERT(mfn_valid(smfn));    sp = mfn_to_shadow_page(smfn);    ASSERT(sh_type_is_pinnable(v, sp->type));    if ( sp->pinned )    {        sp->pinned = 0;        list_del(&sp->list);        sp->up = 0; /* in case this stops being a pinnable type in future */        sh_put_ref(v, smfn, 0);    }}/**************************************************************************//* PTE-write emulation. */struct sh_emulate_ctxt {    struct x86_emulate_ctxt ctxt;    /* Cache of up to 31 bytes of instruction. */    uint8_t insn_buf[31];    uint8_t insn_buf_bytes;    unsigned long insn_buf_eip;    /* Cache of segment registers already gathered for this emulation. */    unsigned int valid_seg_regs;    struct segment_register seg_reg[6];    /* MFNs being written to in write/cmpxchg callbacks */    mfn_t mfn1, mfn2;#if (SHADOW_OPTIMIZATIONS & SHOPT_SKIP_VERIFY)    /* Special case for avoiding having to verify writes: remember      * whether the old value had its low bit (_PAGE_PRESENT) clear. */    int low_bit_was_clear:1;#endif};struct x86_emulate_ops *shadow_init_emulation(    struct sh_emulate_ctxt *sh_ctxt, struct cpu_user_regs *regs);void shadow_continue_emulation(    struct sh_emulate_ctxt *sh_ctxt, struct cpu_user_regs *regs);struct segment_register *hvm_get_seg_reg(    enum x86_segment seg, struct sh_emulate_ctxt *sh_ctxt);#if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB)/**************************************************************************//* Virtual TLB entries  * * We keep a cache of virtual-to-physical translations that we have seen  * since the last TLB flush.  This is safe to use for frame translations,  * but callers need to re-check the actual guest tables if the lookup fails. *  * Lookups and updates are protected by a per-vTLB (and hence per-vcpu) * lock.  This lock is held *only* while reading or writing the table, * so it is safe to take in any non-interrupt context.  Most lookups * happen with v==current, so we expect contention to be low. */#define VTLB_ENTRIES 13struct shadow_vtlb {    unsigned long page_number;      /* Guest virtual address >> PAGE_SHIFT  */    unsigned long frame_number;     /* Guest physical address >> PAGE_SHIFT */    uint32_t pfec;     /* PF error code of the lookup that filled this                        * entry.  A pfec of zero means the slot is empty                        * (since that would require us to re-try anyway) */};/* Call whenever the guest flushes hit actual TLB */static inline void vtlb_flush(struct vcpu *v) {    spin_lock(&v->arch.paging.vtlb_lock);    memset(v->arch.paging.vtlb, 0, VTLB_ENTRIES * sizeof (struct shadow_vtlb));    spin_unlock(&v->arch.paging.vtlb_lock);}static inline int vtlb_hash(unsigned long page_number){    return page_number % VTLB_ENTRIES;}/* Put a translation into the vTLB, potentially clobbering an old one */static inline void vtlb_insert(struct vcpu *v, unsigned long page,                               unsigned long frame, uint32_t pfec){    struct shadow_vtlb entry =         { .page_number = page, .frame_number = frame, .pfec = pfec };    spin_lock(&v->arch.paging.vtlb_lock);    v->arch.paging.vtlb[vtlb_hash(page)] = entry;    spin_unlock(&v->arch.paging.vtlb_lock);}/* Look a translation up in the vTLB.  Returns INVALID_GFN if not found. */static inline unsigned long vtlb_lookup(struct vcpu *v,                                        unsigned long va, uint32_t pfec){    unsigned long page_number = va >> PAGE_SHIFT;    unsigned long frame_number = INVALID_GFN;    int i = vtlb_hash(page_number);    spin_lock(&v->arch.paging.vtlb_lock);    if ( v->arch.paging.vtlb[i].pfec != 0         && v->arch.paging.vtlb[i].page_number == page_number          /* Any successful walk that had at least these pfec bits is OK */         && (v->arch.paging.vtlb[i].pfec & pfec) == pfec )    {        frame_number = v->arch.paging.vtlb[i].frame_number;    }    spin_unlock(&v->arch.paging.vtlb_lock);    return frame_number;}#endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */#endif /* _XEN_SHADOW_PRIVATE_H *//* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * indent-tabs-mode: nil * End: */

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?