⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 multi.c

📁 xen 3.2.2 源码
💻 C
📖 第 1 页 / 共 5 页
字号:
    /* We don't shadow PAE l3s */    ASSERT(GUEST_PAGING_LEVELS > 3 || level != 3);    /* Check there's something for the shadows to map to */    if ( !p2m_is_valid(p2mt) )    {        *sp = shadow_l1e_empty();        goto done;    }    gflags = guest_l1e_get_flags(guest_entry);    if ( unlikely(!(gflags & _PAGE_PRESENT)) )    {        /* If a guest l1 entry is not present, shadow with the magic          * guest-not-present entry. */        if ( level == 1 )            *sp = sh_l1e_gnp();        else             *sp = shadow_l1e_empty();        goto done;    }    if ( level == 1 && p2mt == p2m_mmio_dm )    {        /* Guest l1e maps emulated MMIO space */        *sp = sh_l1e_mmio(target_gfn, gflags);        if ( !d->arch.paging.shadow.has_fast_mmio_entries )            d->arch.paging.shadow.has_fast_mmio_entries = 1;        goto done;    }    // Must have a valid target_mfn unless this is a prefetch or an l1    // pointing at MMIO space.  In the case of a prefetch, an invalid    // mfn means that we can not usefully shadow anything, and so we    // return early.    //    if ( !mfn_valid(target_mfn)         && !(level == 1 && (!shadow_mode_refcounts(d)                              || p2mt == p2m_mmio_direct)) )    {        ASSERT((ft == ft_prefetch));        *sp = shadow_l1e_empty();        goto done;    }    // Propagate bits from the guest to the shadow.    // Some of these may be overwritten, below.    // Since we know the guest's PRESENT bit is set, we also set the shadow's    // SHADOW_PRESENT bit.    //    pass_thru_flags = (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_USER |                       _PAGE_RW | _PAGE_PRESENT);    if ( guest_supports_nx(v) )        pass_thru_flags |= _PAGE_NX_BIT;    if ( !shadow_mode_refcounts(d) && !mfn_valid(target_mfn) )        pass_thru_flags |= _PAGE_PAT | _PAGE_PCD | _PAGE_PWT;    sflags = gflags & pass_thru_flags;    /*     * For HVM domains with direct access to MMIO areas, set the correct     * caching attributes in the shadows to match what was asked for.     */    if ( (level == 1) && is_hvm_domain(d) &&         !list_empty(&(domain_hvm_iommu(d)->pdev_list)) &&         !is_xen_heap_mfn(mfn_x(target_mfn)) )    {        unsigned int type;        if ( hvm_get_mem_pinned_cacheattr(d, gfn_x(target_gfn), &type) )            sflags |= pat_type_2_pte_flags(type);        else if ( d->arch.hvm_domain.is_in_uc_mode )            sflags |= pat_type_2_pte_flags(PAT_TYPE_UNCACHABLE);        else            sflags |= get_pat_flags(v,                                    gflags,                                    gfn_to_paddr(target_gfn),                                    ((paddr_t)mfn_x(target_mfn)) << PAGE_SHIFT);    }    // Set the A&D bits for higher level shadows.    // Higher level entries do not, strictly speaking, have dirty bits, but    // since we use shadow linear tables, each of these entries may, at some    // point in time, also serve as a shadow L1 entry.    // By setting both the A&D bits in each of these, we eliminate the burden    // on the hardware to update these bits on initial accesses.    //    if ( (level > 1) && !((SHADOW_PAGING_LEVELS == 3) && (level == 3)) )        sflags |= _PAGE_ACCESSED | _PAGE_DIRTY;    // If the A or D bit has not yet been set in the guest, then we must    // prevent the corresponding kind of access.    //    if ( unlikely(!(gflags & _PAGE_ACCESSED)) )        sflags &= ~_PAGE_PRESENT;    /* D bits exist in L1es and PSE L2es */    if ( unlikely(((level == 1) ||                   ((level == 2) &&                    (gflags & _PAGE_PSE) &&                    guest_supports_superpages(v)))                  && !(gflags & _PAGE_DIRTY)) )        sflags &= ~_PAGE_RW;    // shadow_mode_log_dirty support    //    // Only allow the guest write access to a page a) on a demand fault,    // or b) if the page is already marked as dirty.    //    // (We handle log-dirty entirely inside the shadow code, without using the     // p2m_ram_logdirty p2m type: only HAP uses that.)    if ( unlikely((level == 1) && shadow_mode_log_dirty(d)) )    {        if ( mfn_valid(target_mfn) ) {            if ( ft & FETCH_TYPE_WRITE )                 paging_mark_dirty(d, mfn_x(target_mfn));            else if ( !sh_mfn_is_dirty(d, target_mfn) )                sflags &= ~_PAGE_RW;        }    }    /* Read-only memory */    if ( p2mt == p2m_ram_ro )         sflags &= ~_PAGE_RW;        // protect guest page tables    //    if ( unlikely((level == 1) && sh_mfn_is_a_page_table(target_mfn)) )    {        if ( shadow_mode_trap_reads(d) )        {            // if we are trapping both reads & writes, then mark this page            // as not present...            //            sflags &= ~_PAGE_PRESENT;        }        else        {            // otherwise, just prevent any writes...            //            sflags &= ~_PAGE_RW;        }    }    // PV guests in 64-bit mode use two different page tables for user vs    // supervisor permissions, making the guest's _PAGE_USER bit irrelevant.    // It is always shadowed as present...    if ( (GUEST_PAGING_LEVELS == 4) && !is_pv_32on64_domain(d)          && !is_hvm_domain(d) )    {        sflags |= _PAGE_USER;    }    *sp = shadow_l1e_from_mfn(target_mfn, sflags); done:    SHADOW_DEBUG(PROPAGATE,                 "%s level %u guest %" SH_PRI_gpte " shadow %" SH_PRI_pte "\n",                 fetch_type_names[ft], level, guest_entry.l1, sp->l1);}/* These four wrappers give us a little bit of type-safety back around * the use of void-* pointers and intpte types in _sh_propagate(), and * allow the compiler to optimize out some level checks. */#if GUEST_PAGING_LEVELS >= 4static voidl4e_propagate_from_guest(struct vcpu *v,                          guest_l4e_t gl4e,                         mfn_t sl3mfn,                         shadow_l4e_t *sl4e,                         fetch_type_t ft){    _sh_propagate(v, gl4e.l4, sl3mfn, sl4e, 4, ft, p2m_ram_rw);}static voidl3e_propagate_from_guest(struct vcpu *v,                         guest_l3e_t gl3e,                         mfn_t sl2mfn,                          shadow_l3e_t *sl3e,                         fetch_type_t ft){    _sh_propagate(v, gl3e.l3, sl2mfn, sl3e, 3, ft, p2m_ram_rw);}#endif // GUEST_PAGING_LEVELS >= 4static voidl2e_propagate_from_guest(struct vcpu *v,                          guest_l2e_t gl2e,                         mfn_t sl1mfn,                         shadow_l2e_t *sl2e,                         fetch_type_t ft){    _sh_propagate(v, gl2e.l2, sl1mfn, sl2e, 2, ft, p2m_ram_rw);}static voidl1e_propagate_from_guest(struct vcpu *v,                          guest_l1e_t gl1e,                         mfn_t gmfn,                          shadow_l1e_t *sl1e,                         fetch_type_t ft,                          p2m_type_t p2mt){    _sh_propagate(v, gl1e.l1, gmfn, sl1e, 1, ft, p2mt);}/**************************************************************************//* These functions update shadow entries (and do bookkeeping on the shadow * tables they are in).  It is intended that they are the only * functions which ever write (non-zero) data onto a shadow page. */static inline void safe_write_entry(void *dst, void *src) /* Copy one PTE safely when processors might be running on the * destination pagetable.   This does *not* give safety against * concurrent writes (that's what the shadow lock is for), just  * stops the hardware picking up partially written entries. */{    volatile unsigned long *d = dst;    unsigned long *s = src;    ASSERT(!((unsigned long) d & (sizeof (shadow_l1e_t) - 1)));#if CONFIG_PAGING_LEVELS == 3    /* In PAE mode, pagetable entries are larger     * than machine words, so won't get written atomically.  We need to make     * sure any other cpu running on these shadows doesn't see a     * half-written entry.  Do this by marking the entry not-present first,     * then writing the high word before the low word. */    BUILD_BUG_ON(sizeof (shadow_l1e_t) != 2 * sizeof (unsigned long));    d[0] = 0;    d[1] = s[1];    d[0] = s[0];#else    /* In 32-bit and 64-bit, sizeof(pte) == sizeof(ulong) == 1 word,     * which will be an atomic write, since the entry is aligned. */    BUILD_BUG_ON(sizeof (shadow_l1e_t) != sizeof (unsigned long));    *d = *s;#endif}static inline void shadow_write_entries(void *d, void *s, int entries, mfn_t mfn)/* This function does the actual writes to shadow pages. * It must not be called directly, since it doesn't do the bookkeeping * that shadow_set_l*e() functions do. */{    shadow_l1e_t *dst = d;    shadow_l1e_t *src = s;    void *map = NULL;    int i;    /* Because we mirror access rights at all levels in the shadow, an     * l2 (or higher) entry with the RW bit cleared will leave us with     * no write access through the linear map.       * We detect that by writing to the shadow with copy_to_user() and      * using map_domain_page() to get a writeable mapping if we need to. */    if ( __copy_to_user(d, d, sizeof (unsigned long)) != 0 )     {        perfc_incr(shadow_linear_map_failed);        map = sh_map_domain_page(mfn);        ASSERT(map != NULL);        dst = map + ((unsigned long)dst & (PAGE_SIZE - 1));    }    for ( i = 0; i < entries; i++ )        safe_write_entry(dst++, src++);    if ( map != NULL ) sh_unmap_domain_page(map);}static inline intperms_strictly_increased(u32 old_flags, u32 new_flags) /* Given the flags of two entries, are the new flags a strict * increase in rights over the old ones? */{    u32 of = old_flags & (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_NX);    u32 nf = new_flags & (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_NX);    /* Flip the NX bit, since it's the only one that decreases rights;     * we calculate as if it were an "X" bit. */    of ^= _PAGE_NX_BIT;    nf ^= _PAGE_NX_BIT;    /* If the changed bits are all set in the new flags, then rights strictly      * increased between old and new. */    return ((of | (of ^ nf)) == nf);}static int inlineshadow_get_page_from_l1e(shadow_l1e_t sl1e, struct domain *d){    int res;    mfn_t mfn;    struct domain *owner;    ASSERT(!sh_l1e_is_magic(sl1e));    if ( !shadow_mode_refcounts(d) )        return 1;    res = get_page_from_l1e(sl1e, d);    // If a privileged domain is attempting to install a map of a page it does    // not own, we let it succeed anyway.    //    if ( unlikely(!res) &&         IS_PRIV(d) &&         !shadow_mode_translate(d) &&         mfn_valid(mfn = shadow_l1e_get_mfn(sl1e)) &&         (owner = page_get_owner(mfn_to_page(mfn))) &&         (d != owner) )    {        res = get_page_from_l1e(sl1e, owner);        SHADOW_PRINTK("privileged domain %d installs map of mfn %05lx "                       "which is owned by domain %d: %s\n",                       d->domain_id, mfn_x(mfn), owner->domain_id,                       res ? "success" : "failed");    }    if ( unlikely(!res) )    {        perfc_incr(shadow_get_page_fail);        SHADOW_PRINTK("failed: l1e=" SH_PRI_pte "\n");    }    return res;}static void inlineshadow_put_page_from_l1e(shadow_l1e_t sl1e, struct domain *d){     if ( !shadow_mode_refcounts(d) )        return;    put_page_from_l1e(sl1e, d);}#if GUEST_PAGING_LEVELS >= 4static int shadow_set_l4e(struct vcpu *v,                           shadow_l4e_t *sl4e, 

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -