⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 multi.c

📁 xen虚拟机源代码安装包
💻 C
📖 第 1 页 / 共 5 页
字号:
    return (u32)((unsigned long)ptr & ~PAGE_MASK) / sizeof(guest_l1e_t);}static u32shadow_l1_index(mfn_t *smfn, u32 guest_index){#if (GUEST_PAGING_LEVELS == 2)    *smfn = _mfn(mfn_x(*smfn) +                 (guest_index / SHADOW_L1_PAGETABLE_ENTRIES));    return (guest_index % SHADOW_L1_PAGETABLE_ENTRIES);#else    return guest_index;#endif}static u32shadow_l2_index(mfn_t *smfn, u32 guest_index){#if (GUEST_PAGING_LEVELS == 2)    // Because we use 2 shadow l2 entries for each guest entry, the number of    // guest entries per shadow page is SHADOW_L2_PAGETABLE_ENTRIES/2    //    *smfn = _mfn(mfn_x(*smfn) +                 (guest_index / (SHADOW_L2_PAGETABLE_ENTRIES / 2)));    // We multiply by two to get the index of the first of the two entries    // used to shadow the specified guest entry.    return (guest_index % (SHADOW_L2_PAGETABLE_ENTRIES / 2)) * 2;#else    return guest_index;#endif}#if GUEST_PAGING_LEVELS >= 4static u32shadow_l3_index(mfn_t *smfn, u32 guest_index){    return guest_index;}static u32shadow_l4_index(mfn_t *smfn, u32 guest_index){    return guest_index;}#endif // GUEST_PAGING_LEVELS >= 4/**************************************************************************//* Function which computes shadow entries from their corresponding guest * entries.  This is the "heart" of the shadow code. It operates using * level-1 shadow types, but handles all levels of entry. * Don't call it directly, but use the four wrappers below. */static always_inline void_sh_propagate(struct vcpu *v,               guest_intpte_t guest_intpte,              mfn_t target_mfn,               void *shadow_entry_ptr,              int level,              fetch_type_t ft,               p2m_type_t p2mt){    guest_l1e_t guest_entry = { guest_intpte };    shadow_l1e_t *sp = shadow_entry_ptr;    struct domain *d = v->domain;    gfn_t target_gfn = guest_l1e_get_gfn(guest_entry);    u32 pass_thru_flags;    u32 gflags, sflags;    /* We don't shadow PAE l3s */    ASSERT(GUEST_PAGING_LEVELS > 3 || level != 3);    /* Check there's something for the shadows to map to */    if ( !p2m_is_valid(p2mt) )    {        *sp = shadow_l1e_empty();        goto done;    }    gflags = guest_l1e_get_flags(guest_entry);    if ( unlikely(!(gflags & _PAGE_PRESENT)) )    {        /* If a guest l1 entry is not present, shadow with the magic          * guest-not-present entry. */        if ( level == 1 )            *sp = sh_l1e_gnp();        else             *sp = shadow_l1e_empty();        goto done;    }    if ( level == 1 && p2mt == p2m_mmio_dm )    {        /* Guest l1e maps emulated MMIO space */        *sp = sh_l1e_mmio(target_gfn, gflags);        if ( !d->arch.paging.shadow.has_fast_mmio_entries )            d->arch.paging.shadow.has_fast_mmio_entries = 1;        goto done;    }    // Must have a valid target_mfn unless this is a prefetch or an l1    // pointing at MMIO space.  In the case of a prefetch, an invalid    // mfn means that we can not usefully shadow anything, and so we    // return early.    //    if ( !mfn_valid(target_mfn)         && !(level == 1 && (!shadow_mode_refcounts(d)                              || p2mt == p2m_mmio_direct)) )    {        ASSERT((ft == ft_prefetch));        *sp = shadow_l1e_empty();        goto done;    }    // Propagate bits from the guest to the shadow.    // Some of these may be overwritten, below.    // Since we know the guest's PRESENT bit is set, we also set the shadow's    // SHADOW_PRESENT bit.    //    pass_thru_flags = (_PAGE_ACCESSED | _PAGE_USER |                       _PAGE_RW | _PAGE_PRESENT);    if ( guest_supports_nx(v) )        pass_thru_flags |= _PAGE_NX_BIT;    if ( !shadow_mode_refcounts(d) && !mfn_valid(target_mfn) )        pass_thru_flags |= _PAGE_PAT | _PAGE_PCD | _PAGE_PWT;    sflags = gflags & pass_thru_flags;    /*     * For HVM domains with direct access to MMIO areas, set the correct     * caching attributes in the shadows to match what was asked for.     */    if ( (level == 1) && is_hvm_domain(d) && has_arch_pdevs(d) &&         !is_xen_heap_mfn(mfn_x(target_mfn)) )    {        unsigned int type;        if ( hvm_get_mem_pinned_cacheattr(d, gfn_x(target_gfn), &type) )            sflags |= pat_type_2_pte_flags(type);        else if ( d->arch.hvm_domain.is_in_uc_mode )            sflags |= pat_type_2_pte_flags(PAT_TYPE_UNCACHABLE);        else            sflags |= get_pat_flags(v,                                    gflags,                                    gfn_to_paddr(target_gfn),                                    ((paddr_t)mfn_x(target_mfn)) << PAGE_SHIFT);    }    // Set the A&D bits for higher level shadows.    // Higher level entries do not, strictly speaking, have dirty bits, but    // since we use shadow linear tables, each of these entries may, at some    // point in time, also serve as a shadow L1 entry.    // By setting both the A&D bits in each of these, we eliminate the burden    // on the hardware to update these bits on initial accesses.    //    if ( (level > 1) && !((SHADOW_PAGING_LEVELS == 3) && (level == 3)) )        sflags |= _PAGE_ACCESSED | _PAGE_DIRTY;    // If the A or D bit has not yet been set in the guest, then we must    // prevent the corresponding kind of access.    //    if ( unlikely(!(gflags & _PAGE_ACCESSED)) )        sflags &= ~_PAGE_PRESENT;    /* D bits exist in L1es and PSE L2es */    if ( unlikely(((level == 1) ||                   ((level == 2) &&                    (gflags & _PAGE_PSE) &&                    guest_supports_superpages(v)))                  && !(gflags & _PAGE_DIRTY)) )        sflags &= ~_PAGE_RW;    // shadow_mode_log_dirty support    //    // Only allow the guest write access to a page a) on a demand fault,    // or b) if the page is already marked as dirty.    //    // (We handle log-dirty entirely inside the shadow code, without using the     // p2m_ram_logdirty p2m type: only HAP uses that.)    if ( unlikely((level == 1) && shadow_mode_log_dirty(d)) )    {        if ( mfn_valid(target_mfn) ) {            if ( ft & FETCH_TYPE_WRITE )                 paging_mark_dirty(d, mfn_x(target_mfn));            else if ( !sh_mfn_is_dirty(d, target_mfn) )                sflags &= ~_PAGE_RW;        }    }    if ( unlikely((level == 1) && d->dirty_vram            && d->dirty_vram->last_dirty == -1            && gfn_x(target_gfn) >= d->dirty_vram->begin_pfn            && gfn_x(target_gfn) < d->dirty_vram->end_pfn) )    {        if ( ft & FETCH_TYPE_WRITE )            d->dirty_vram->last_dirty = NOW();        else            sflags &= ~_PAGE_RW;    }    /* Read-only memory */    if ( p2mt == p2m_ram_ro )         sflags &= ~_PAGE_RW;        // protect guest page tables    //    if ( unlikely((level == 1)                   && sh_mfn_is_a_page_table(target_mfn)#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC )                  /* Unless the page is out of sync and the guest is                     writing to it. */                  && !(mfn_oos_may_write(target_mfn)                       && (ft == ft_demand_write))#endif /* OOS */                  ) )    {        if ( shadow_mode_trap_reads(d) )        {            // if we are trapping both reads & writes, then mark this page            // as not present...            //            sflags &= ~_PAGE_PRESENT;        }        else        {            // otherwise, just prevent any writes...            //            sflags &= ~_PAGE_RW;        }    }    // PV guests in 64-bit mode use two different page tables for user vs    // supervisor permissions, making the guest's _PAGE_USER bit irrelevant.    // It is always shadowed as present...    if ( (GUEST_PAGING_LEVELS == 4) && !is_pv_32on64_domain(d)          && !is_hvm_domain(d) )    {        sflags |= _PAGE_USER;    }    *sp = shadow_l1e_from_mfn(target_mfn, sflags); done:    SHADOW_DEBUG(PROPAGATE,                 "%s level %u guest %" SH_PRI_gpte " shadow %" SH_PRI_pte "\n",                 fetch_type_names[ft], level, guest_entry.l1, sp->l1);}/* These four wrappers give us a little bit of type-safety back around * the use of void-* pointers and intpte types in _sh_propagate(), and * allow the compiler to optimize out some level checks. */#if GUEST_PAGING_LEVELS >= 4static voidl4e_propagate_from_guest(struct vcpu *v,                          guest_l4e_t gl4e,                         mfn_t sl3mfn,                         shadow_l4e_t *sl4e,                         fetch_type_t ft){    _sh_propagate(v, gl4e.l4, sl3mfn, sl4e, 4, ft, p2m_ram_rw);}static voidl3e_propagate_from_guest(struct vcpu *v,                         guest_l3e_t gl3e,                         mfn_t sl2mfn,                          shadow_l3e_t *sl3e,                         fetch_type_t ft){    _sh_propagate(v, gl3e.l3, sl2mfn, sl3e, 3, ft, p2m_ram_rw);}#endif // GUEST_PAGING_LEVELS >= 4static voidl2e_propagate_from_guest(struct vcpu *v,                          guest_l2e_t gl2e,                         mfn_t sl1mfn,                         shadow_l2e_t *sl2e,                         fetch_type_t ft){    _sh_propagate(v, gl2e.l2, sl1mfn, sl2e, 2, ft, p2m_ram_rw);}static voidl1e_propagate_from_guest(struct vcpu *v,                          guest_l1e_t gl1e,                         mfn_t gmfn,                          shadow_l1e_t *sl1e,                         fetch_type_t ft,                          p2m_type_t p2mt){    _sh_propagate(v, gl1e.l1, gmfn, sl1e, 1, ft, p2mt);}/**************************************************************************//* These functions update shadow entries (and do bookkeeping on the shadow * tables they are in).  It is intended that they are the only * functions which ever write (non-zero) data onto a shadow page. */static inline void safe_write_entry(void *dst, void *src) /* Copy one PTE safely when processors might be running on the * destination pagetable.   This does *not* give safety against * concurrent writes (that's what the shadow lock is for), just  * stops the hardware picking up partially written entries. */{    volatile unsigned long *d = dst;    unsigned long *s = src;    ASSERT(!((unsigned long) d & (sizeof (shadow_l1e_t) - 1)));#if CONFIG_PAGING_LEVELS == 3    /* In PAE mode, pagetable entries are larger     * than machine words, so won't get written atomically.  We need to make     * sure any other cpu running on these shadows doesn't see a     * half-written entry.  Do this by marking the entry not-present first,     * then writing the high word before the low word. */    BUILD_BUG_ON(sizeof (shadow_l1e_t) != 2 * sizeof (unsigned long));    d[0] = 0;    d[1] = s[1];    d[0] = s[0];#else    /* In 64-bit, sizeof(pte) == sizeof(ulong) == 1 word,     * which will be an atomic write, since the entry is aligned. */    BUILD_BUG_ON(sizeof (shadow_l1e_t) != sizeof (unsigned long));    *d = *s;#endif}static inline void shadow_write_entries(void *d, void *s, int entries, mfn_t mfn)/* This function does the actual writes to shadow pages. * It must not be called directly, since it doesn't do the bookkeeping * that shadow_set_l*e() functions do. */{    shadow_l1e_t *dst = d;    shadow_l1e_t *src = s;    void *map = NULL;    int i;    /* Because we mirror access rights at all levels in the shadow, an     * l2 (or higher) entry with the RW bit cleared will leave us with     * no write access through the linear map.       * We detect that by writing to the shadow with copy_to_user() and      * using map_domain_page() to get a writeable mapping if we need to. */    if ( __copy_to_user(d, d, sizeof (unsigned long)) != 0 )     {        perfc_incr(shadow_linear_map_failed);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -