⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 multi.c

📁 xen虚拟机源代码安装包
💻 C
📖 第 1 页 / 共 5 页
字号:
    shadow_l1e_t old_sl1e;#if SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC    mfn_t new_gmfn = shadow_l1e_get_mfn(new_sl1e);#endif    ASSERT(sl1e != NULL);#if SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC    if ( mfn_valid(new_gmfn) && mfn_oos_may_write(new_gmfn)         && ((shadow_l1e_get_flags(new_sl1e) & (_PAGE_RW|_PAGE_PRESENT))             == (_PAGE_RW|_PAGE_PRESENT)) )        oos_fixup_add(v, new_gmfn, sl1mfn, pgentry_ptr_to_slot(sl1e));#endif        old_sl1e = *sl1e;    if ( old_sl1e.l1 == new_sl1e.l1 ) return 0; /* Nothing to do */        if ( (shadow_l1e_get_flags(new_sl1e) & _PAGE_PRESENT)         && !sh_l1e_is_magic(new_sl1e) )     {        /* About to install a new reference */                if ( shadow_mode_refcounts(d) ) {            if ( shadow_get_page_from_l1e(new_sl1e, d) == 0 )             {                /* Doesn't look like a pagetable. */                flags |= SHADOW_SET_ERROR;                new_sl1e = shadow_l1e_empty();            }            else            {                shadow_vram_get_l1e(new_sl1e, sl1e, sl1mfn, d);            }        }    }     /* Write the new entry */    shadow_write_entries(sl1e, &new_sl1e, 1, sl1mfn);    flags |= SHADOW_SET_CHANGED;    if ( (shadow_l1e_get_flags(old_sl1e) & _PAGE_PRESENT)          && !sh_l1e_is_magic(old_sl1e) )    {        /* We lost a reference to an old mfn. */        /* N.B. Unlike higher-level sets, never need an extra flush          * when writing an l1e.  Because it points to the same guest frame          * as the guest l1e did, it's the guest's responsibility to         * trigger a flush later. */        if ( shadow_mode_refcounts(d) )         {            shadow_vram_put_l1e(old_sl1e, sl1e, sl1mfn, d);            shadow_put_page_from_l1e(old_sl1e, d);        }     }    return flags;}/**************************************************************************//* Macros to walk pagetables.  These take the shadow of a pagetable and  * walk every "interesting" entry.  That is, they don't touch Xen mappings,  * and for 32-bit l2s shadowed onto PAE or 64-bit, they only touch every  * second entry (since pairs of entries are managed together). For multi-page * shadows they walk all pages. *  * Arguments are an MFN, the variable to point to each entry, a variable  * to indicate that we are done (we will shortcut to the end of the scan  * when _done != 0), a variable to indicate that we should avoid Xen mappings, * and the code.  * * WARNING: These macros have side-effects.  They change the values of both  * the pointer and the MFN. */ static inline void increment_ptr_to_guest_entry(void *ptr){    if ( ptr )    {        guest_l1e_t **entry = ptr;        (*entry)++;    }}/* All kinds of l1: touch all entries */#define _SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, _done, _code)        \do {                                                                    \    int _i;                                                             \    shadow_l1e_t *_sp = sh_map_domain_page((_sl1mfn));                  \    ASSERT(mfn_to_shadow_page(_sl1mfn)->type == SH_type_l1_shadow       \           || mfn_to_shadow_page(_sl1mfn)->type == SH_type_fl1_shadow); \    for ( _i = 0; _i < SHADOW_L1_PAGETABLE_ENTRIES; _i++ )              \    {                                                                   \        (_sl1e) = _sp + _i;                                             \        if ( shadow_l1e_get_flags(*(_sl1e)) & _PAGE_PRESENT )           \            {_code}                                                     \        if ( _done ) break;                                             \        increment_ptr_to_guest_entry(_gl1p);                            \    }                                                                   \    sh_unmap_domain_page(_sp);                                          \} while (0)/* 32-bit l1, on PAE or 64-bit shadows: need to walk both pages of shadow */#if GUEST_PAGING_LEVELS == 2 && SHADOW_PAGING_LEVELS > 2#define SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, _done,  _code)        \do {                                                                    \    int __done = 0;                                                     \    _SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p,                          \                         ({ (__done = _done); }), _code);               \    _sl1mfn = _mfn(mfn_x(_sl1mfn) + 1);                                 \    if ( !__done )                                                      \        _SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p,                      \                             ({ (__done = _done); }), _code);           \} while (0)#else /* Everything else; l1 shadows are only one page */#define SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, _done, _code)         \       _SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, _done, _code)#endif    #if GUEST_PAGING_LEVELS == 2/* 32-bit l2 on PAE/64: four pages, touch every second entry, and avoid Xen */#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _dom, _code)     \do {                                                                      \    int _i, _j, __done = 0;                                               \    int _xen = !shadow_mode_external(_dom);                               \    ASSERT(mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2_32_shadow);    \    for ( _j = 0; _j < 4 && !__done; _j++ )                               \    {                                                                     \        shadow_l2e_t *_sp = sh_map_domain_page(_sl2mfn);                  \        for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i += 2 )         \            if ( (!(_xen))                                                \                 || ((_j * SHADOW_L2_PAGETABLE_ENTRIES) + _i)             \                 < (HYPERVISOR_VIRT_START >> SHADOW_L2_PAGETABLE_SHIFT) ) \            {                                                             \                (_sl2e) = _sp + _i;                                       \                if ( shadow_l2e_get_flags(*(_sl2e)) & _PAGE_PRESENT )     \                    {_code}                                               \                if ( (__done = (_done)) ) break;                          \                increment_ptr_to_guest_entry(_gl2p);                      \            }                                                             \        sh_unmap_domain_page(_sp);                                        \        _sl2mfn = _mfn(mfn_x(_sl2mfn) + 1);                               \    }                                                                     \} while (0)#elif GUEST_PAGING_LEVELS == 3/* PAE: if it's an l2h, don't touch Xen mappings */#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _dom, _code)      \do {                                                                       \    int _i;                                                                \    int _xen = !shadow_mode_external(_dom);                                \    shadow_l2e_t *_sp = sh_map_domain_page((_sl2mfn));                     \    ASSERT(mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2_pae_shadow      \           || mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2h_pae_shadow);\    for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ )                 \        if ( (!(_xen))                                                     \             || mfn_to_shadow_page(_sl2mfn)->type != SH_type_l2h_pae_shadow\             || ((_i + (3 * SHADOW_L2_PAGETABLE_ENTRIES))                  \                 < (HYPERVISOR_VIRT_START >> SHADOW_L2_PAGETABLE_SHIFT)) ) \        {                                                                  \            (_sl2e) = _sp + _i;                                            \            if ( shadow_l2e_get_flags(*(_sl2e)) & _PAGE_PRESENT )          \                {_code}                                                    \            if ( _done ) break;                                            \            increment_ptr_to_guest_entry(_gl2p);                           \        }                                                                  \    sh_unmap_domain_page(_sp);                                             \} while (0)#else /* 64-bit l2: touch all entries except for PAE compat guests. */#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _dom, _code)       \do {                                                                        \    int _i;                                                                 \    int _xen = !shadow_mode_external(_dom);                                 \    shadow_l2e_t *_sp = sh_map_domain_page((_sl2mfn));                      \    ASSERT(mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2_64_shadow ||     \           mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2h_64_shadow);     \    for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ )                  \    {                                                                       \        if ( (!(_xen))                                                      \             || !is_pv_32on64_domain(_dom)                                  \             || mfn_to_shadow_page(_sl2mfn)->type != SH_type_l2h_64_shadow  \             || (_i < COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(_dom)) )           \        {                                                                   \            (_sl2e) = _sp + _i;                                             \            if ( shadow_l2e_get_flags(*(_sl2e)) & _PAGE_PRESENT )           \                {_code}                                                     \            if ( _done ) break;                                             \            increment_ptr_to_guest_entry(_gl2p);                            \        }                                                                   \    }                                                                       \    sh_unmap_domain_page(_sp);                                              \} while (0)#endif /* different kinds of l2 */#if GUEST_PAGING_LEVELS == 4/* 64-bit l3: touch all entries */#define SHADOW_FOREACH_L3E(_sl3mfn, _sl3e, _gl3p, _done, _code)         \do {                                                                    \    int _i;                                                             \    shadow_l3e_t *_sp = sh_map_domain_page((_sl3mfn));                  \    ASSERT(mfn_to_shadow_page(_sl3mfn)->type == SH_type_l3_64_shadow);  \    for ( _i = 0; _i < SHADOW_L3_PAGETABLE_ENTRIES; _i++ )              \    {                                                                   \        (_sl3e) = _sp + _i;                                             \        if ( shadow_l3e_get_flags(*(_sl3e)) & _PAGE_PRESENT )           \            {_code}                                                     \        if ( _done ) break;                                             \        increment_ptr_to_guest_entry(_gl3p);                            \    }                                                                   \    sh_unmap_domain_page(_sp);                                          \} while (0)/* 64-bit l4: avoid Xen mappings */#define SHADOW_FOREACH_L4E(_sl4mfn, _sl4e, _gl4p, _done, _dom, _code)   \do {                                                                    \    shadow_l4e_t *_sp = sh_map_domain_page((_sl4mfn));                  \    int _xen = !shadow_mode_external(_dom);                             \    int _i;                                                             \    ASSERT(mfn_to_shadow_page(_sl4mfn)->type == SH_type_l4_64_shadow);  \    for ( _i = 0; _i < SHADOW_L4_PAGETABLE_ENTRIES; _i++ )              \    {                                                                   \        if ( (!(_xen)) || is_guest_l4_slot(_dom, _i) )                  \        {                                                               \            (_sl4e) = _sp + _i;                                         \            if ( shadow_l4e_get_flags(*(_sl4e)) & _PAGE_PRESENT )       \                {_code}                                                 \            if ( _done ) break;                                         \        }                                                               \        increment_ptr_to_guest_entry(_gl4p);                            \    }                                                                   \    sh_unmap_domain_page(_sp);                                          \} while (0)#endif/**************************************************************************//* Functions to install Xen mappings and linear mappings in shadow pages */// XXX -- this function should probably be moved to shadow-common.c, but that//        probably wants to wait until the shadow types have been moved from//        shadow-types.h to shadow-private.h//#if CONFIG_PAGING_LEVELS == 4 && GUEST_PAGING_LEVELS == 4void sh_install_xen_entries_in_l4(struct vcpu *v, mfn_t gl4mfn, mfn_t sl4mfn){    struct domain *d = v->domain;    shadow_l4e_t *sl4e;    sl4e = sh_map_domain_page(sl4mfn);    ASSERT(sl4e != NULL);    ASSERT(sizeof (l4_pgentry_t) == sizeof (shadow_l4e_t));        /* Copy the common Xen mappings from the idle domain */    memcpy(&sl4e[ROOT_PAGETABLE_FIRST_XEN_SLOT],           &idle_pg_table[ROOT_PAGETABLE_FIRST_XEN_SLOT],           ROOT_PAGETABLE_XEN_SLOTS * sizeof(l4_pgentry_t));    /* Install the per-domain mappings for this domain */    sl4e[shadow_l4_table_offset(PERDOMAIN_VIRT_START)] =        shadow_l4e_from_mfn(page_to_mfn(virt_to_page(d->arch.mm_perdomain_l3)),                            __PAGE_HYPERVISOR);    /* Shadow linear mapping for 4-level shadows.  N.B. for 3-level     * shadows on 64-bit xen, this linear mapping is later replaced by the     * monitor pagetable structure, which is built in make_monitor_table     * and maintained by sh_update_linear_entries. */    sl4e[shadow_l4_table_offset(SH_LINEAR_PT_VIRT_START)] =        shadow_l4e_from_mfn(sl4mfn, __PAGE_HYPERVISOR);    /* Self linear mapping.  */    if ( shadow_mode_translate(v->domain) && !shadow_mode_external(v->domain) )    {        // linear tables may not be used with translated PV guests        sl4e[shadow_l4_table_offset(LINEAR_PT_VIRT_START)] =            shadow_l4e_empty();    }    else    {        sl4e[shadow_l4_table_offset(LINEAR_PT_VIRT_START)] =            shadow_l4e_from_mfn(gl4mfn, __PAGE_HYPERVISOR);    }    if ( shadow_mode_translate(v->domain) )    {        /* install domain-specific P2M table */        sl4e[shadow_l4_table_offset(RO_MPT_VIRT_START)] =            shadow_l4e_from_mfn(pagetable_get_mfn(d->arch.phys_table),                                __PAGE_HYPERVISOR);    }    sh_unmap_domain_page(sl4e);    }#endif#if CONFIG_PAGING_LEVELS >= 3 && GUEST_PAGING_LEVELS >= 3// For 3-on-3 PV guests, we need to make sure the xen mappings are in// place, which means that we need to populate the l2h entry in the l3// table.static void sh_install_xen_entries_in_l2h(struct vcpu *v, mfn_t sl2hmfn){    struct domain *d = v->domain;    shadow_l2e_t *sl2e;#if CONFIG_PAGING_LEVELS == 3    int i;#else    if ( !is_pv_32on64_vcpu(v) )        return;#endif    sl2e = sh_map_domain_page(sl2hmfn);    ASSERT(sl2e != NULL);    ASSERT(sizeof (l2_pgentry_t) == sizeof (shadow_l2e_t));    #if CONFIG_PAGING_LEVELS == 3    /* Copy the common Xen mappings from the idle domain */    memcpy(&sl2e[L2_PAGETABLE_FIRST_XEN_SLOT & (L2_PAGETABLE_ENTRIES-1)],           &idle_pg_table_l2[L2_PAGETABLE_FIRST_XEN_SLOT],           L2_PAGETABLE_XEN_SLOTS * sizeof(l2_pgentry_t));    /* Install the per-domain mappings for this domain */    for ( i = 0; i < PDPT_L2_ENTRIES; i++ )        sl2e[shadow_l2_table_offset(PERDOMAIN_VIRT_START) + i] =            shadow_l2e_from_mfn(                page_to_mfn(virt_to_page(d->arch.mm_perdomain_pt) + i),                __PAGE_HYPERVISOR);        /* We don't set up a linear mapping here because we can't until this     * l2h is installed in an l3e.  sh_update_linear_entries() handles     * the linear mappings when CR3 (and so the fourth l3e) is loaded.       * We zero them here, just as a safety measure.     */    for ( i = 0; i < SHADOW_L3_PAGETABLE_ENTRIES; i++ )        sl2e[shadow_l2_table_offset(LINEAR_PT_VIRT_START) + i] =            shadow_l2e_empty();    for ( i = 0; i < SHADOW_L3_PAGETABLE_ENTRIES; i++ )        sl2e[shadow_l2_table_offset(SH_LINEAR_PT_VIRT_START) + i] =            shadow_l2e_empty();    if ( shadow_mode_translate(d) )    {        /* Install the domain-specific p2m table *

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -