⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 multi.c

📁 xen 3.2.2 源码
💻 C
📖 第 1 页 / 共 5 页
字号:
#else /* 64-bit l2: touch all entries except for PAE compat guests. */#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _dom, _code)       \do {                                                                        \    int _i;                                                                 \    int _xen = !shadow_mode_external(_dom);                                 \    shadow_l2e_t *_sp = map_shadow_page((_sl2mfn));                         \    ASSERT(mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2_64_shadow ||     \           mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2h_64_shadow);     \    for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ )                  \    {                                                                       \        if ( (!(_xen))                                                      \             || !is_pv_32on64_domain(_dom)                                  \             || mfn_to_shadow_page(_sl2mfn)->type != SH_type_l2h_64_shadow  \             || (_i < COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(_dom)) )           \        {                                                                   \            (_sl2e) = _sp + _i;                                             \            if ( shadow_l2e_get_flags(*(_sl2e)) & _PAGE_PRESENT )           \                {_code}                                                     \            if ( _done ) break;                                             \            increment_ptr_to_guest_entry(_gl2p);                            \        }                                                                   \    }                                                                       \    unmap_shadow_page(_sp);                                                 \} while (0)#endif /* different kinds of l2 */#if GUEST_PAGING_LEVELS == 4/* 64-bit l3: touch all entries */#define SHADOW_FOREACH_L3E(_sl3mfn, _sl3e, _gl3p, _done, _code)         \do {                                                                    \    int _i;                                                             \    shadow_l3e_t *_sp = map_shadow_page((_sl3mfn));                     \    ASSERT(mfn_to_shadow_page(_sl3mfn)->type == SH_type_l3_64_shadow);  \    for ( _i = 0; _i < SHADOW_L3_PAGETABLE_ENTRIES; _i++ )              \    {                                                                   \        (_sl3e) = _sp + _i;                                             \        if ( shadow_l3e_get_flags(*(_sl3e)) & _PAGE_PRESENT )           \            {_code}                                                     \        if ( _done ) break;                                             \        increment_ptr_to_guest_entry(_gl3p);                            \    }                                                                   \    unmap_shadow_page(_sp);                                             \} while (0)/* 64-bit l4: avoid Xen mappings */#define SHADOW_FOREACH_L4E(_sl4mfn, _sl4e, _gl4p, _done, _dom, _code)   \do {                                                                    \    shadow_l4e_t *_sp = map_shadow_page((_sl4mfn));                     \    int _xen = !shadow_mode_external(_dom);                             \    int _i;                                                             \    ASSERT(mfn_to_shadow_page(_sl4mfn)->type == SH_type_l4_64_shadow);  \    for ( _i = 0; _i < SHADOW_L4_PAGETABLE_ENTRIES; _i++ )              \    {                                                                   \        if ( (!(_xen)) || is_guest_l4_slot(_dom, _i) )                  \        {                                                               \            (_sl4e) = _sp + _i;                                         \            if ( shadow_l4e_get_flags(*(_sl4e)) & _PAGE_PRESENT )       \                {_code}                                                 \            if ( _done ) break;                                         \        }                                                               \        increment_ptr_to_guest_entry(_gl4p);                            \    }                                                                   \    unmap_shadow_page(_sp);                                             \} while (0)#endif/**************************************************************************//* Functions to install Xen mappings and linear mappings in shadow pages */// XXX -- this function should probably be moved to shadow-common.c, but that//        probably wants to wait until the shadow types have been moved from//        shadow-types.h to shadow-private.h//#if CONFIG_PAGING_LEVELS == 4 && GUEST_PAGING_LEVELS == 4void sh_install_xen_entries_in_l4(struct vcpu *v, mfn_t gl4mfn, mfn_t sl4mfn){    struct domain *d = v->domain;    shadow_l4e_t *sl4e;    sl4e = sh_map_domain_page(sl4mfn);    ASSERT(sl4e != NULL);    ASSERT(sizeof (l4_pgentry_t) == sizeof (shadow_l4e_t));        /* Copy the common Xen mappings from the idle domain */    memcpy(&sl4e[ROOT_PAGETABLE_FIRST_XEN_SLOT],           &idle_pg_table[ROOT_PAGETABLE_FIRST_XEN_SLOT],           ROOT_PAGETABLE_XEN_SLOTS * sizeof(l4_pgentry_t));    /* Install the per-domain mappings for this domain */    sl4e[shadow_l4_table_offset(PERDOMAIN_VIRT_START)] =        shadow_l4e_from_mfn(page_to_mfn(virt_to_page(d->arch.mm_perdomain_l3)),                            __PAGE_HYPERVISOR);    /* Linear mapping */    sl4e[shadow_l4_table_offset(SH_LINEAR_PT_VIRT_START)] =        shadow_l4e_from_mfn(sl4mfn, __PAGE_HYPERVISOR);    if ( shadow_mode_translate(v->domain) && !shadow_mode_external(v->domain) )    {        // linear tables may not be used with translated PV guests        sl4e[shadow_l4_table_offset(LINEAR_PT_VIRT_START)] =            shadow_l4e_empty();    }    else    {        sl4e[shadow_l4_table_offset(LINEAR_PT_VIRT_START)] =            shadow_l4e_from_mfn(gl4mfn, __PAGE_HYPERVISOR);    }    if ( shadow_mode_translate(v->domain) )    {        /* install domain-specific P2M table */        sl4e[shadow_l4_table_offset(RO_MPT_VIRT_START)] =            shadow_l4e_from_mfn(pagetable_get_mfn(d->arch.phys_table),                                __PAGE_HYPERVISOR);    }    if ( is_pv_32on64_domain(v->domain) )    {        /* install compat arg xlat entry */        sl4e[shadow_l4_table_offset(COMPAT_ARG_XLAT_VIRT_BASE)] =            shadow_l4e_from_mfn(                    page_to_mfn(virt_to_page(d->arch.mm_arg_xlat_l3)),                    __PAGE_HYPERVISOR);    }    sh_unmap_domain_page(sl4e);    }#endif#if CONFIG_PAGING_LEVELS >= 3 && GUEST_PAGING_LEVELS >= 3// For 3-on-3 PV guests, we need to make sure the xen mappings are in// place, which means that we need to populate the l2h entry in the l3// table.static void sh_install_xen_entries_in_l2h(struct vcpu *v, mfn_t sl2hmfn){    struct domain *d = v->domain;    shadow_l2e_t *sl2e;#if CONFIG_PAGING_LEVELS == 3    int i;#else    if ( !is_pv_32on64_vcpu(v) )        return;#endif    sl2e = sh_map_domain_page(sl2hmfn);    ASSERT(sl2e != NULL);    ASSERT(sizeof (l2_pgentry_t) == sizeof (shadow_l2e_t));    #if CONFIG_PAGING_LEVELS == 3    /* Copy the common Xen mappings from the idle domain */    memcpy(&sl2e[L2_PAGETABLE_FIRST_XEN_SLOT & (L2_PAGETABLE_ENTRIES-1)],           &idle_pg_table_l2[L2_PAGETABLE_FIRST_XEN_SLOT],           L2_PAGETABLE_XEN_SLOTS * sizeof(l2_pgentry_t));    /* Install the per-domain mappings for this domain */    for ( i = 0; i < PDPT_L2_ENTRIES; i++ )        sl2e[shadow_l2_table_offset(PERDOMAIN_VIRT_START) + i] =            shadow_l2e_from_mfn(                page_to_mfn(virt_to_page(d->arch.mm_perdomain_pt) + i),                __PAGE_HYPERVISOR);        /* We don't set up a linear mapping here because we can't until this     * l2h is installed in an l3e.  sh_update_linear_entries() handles     * the linear mappings when CR3 (and so the fourth l3e) is loaded.       * We zero them here, just as a safety measure.     */    for ( i = 0; i < SHADOW_L3_PAGETABLE_ENTRIES; i++ )        sl2e[shadow_l2_table_offset(LINEAR_PT_VIRT_START) + i] =            shadow_l2e_empty();    for ( i = 0; i < SHADOW_L3_PAGETABLE_ENTRIES; i++ )        sl2e[shadow_l2_table_offset(SH_LINEAR_PT_VIRT_START) + i] =            shadow_l2e_empty();    if ( shadow_mode_translate(d) )    {        /* Install the domain-specific p2m table */        l3_pgentry_t *p2m;        ASSERT(pagetable_get_pfn(d->arch.phys_table) != 0);        p2m = sh_map_domain_page(pagetable_get_mfn(d->arch.phys_table));        for ( i = 0; i < MACHPHYS_MBYTES>>1; i++ )        {            sl2e[shadow_l2_table_offset(RO_MPT_VIRT_START) + i] =                (l3e_get_flags(p2m[i]) & _PAGE_PRESENT)                ? shadow_l2e_from_mfn(_mfn(l3e_get_pfn(p2m[i])),                                      __PAGE_HYPERVISOR)                : shadow_l2e_empty();        }        sh_unmap_domain_page(p2m);    }#else    /* Copy the common Xen mappings from the idle domain */    memcpy(        &sl2e[COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(d)],        &compat_idle_pg_table_l2[l2_table_offset(HIRO_COMPAT_MPT_VIRT_START)],        COMPAT_L2_PAGETABLE_XEN_SLOTS(d) * sizeof(*sl2e));#endif        sh_unmap_domain_page(sl2e);}#endif#if CONFIG_PAGING_LEVELS == 2 && GUEST_PAGING_LEVELS == 2void sh_install_xen_entries_in_l2(struct vcpu *v, mfn_t gl2mfn, mfn_t sl2mfn){    struct domain *d = v->domain;    shadow_l2e_t *sl2e;    int i;    sl2e = sh_map_domain_page(sl2mfn);    ASSERT(sl2e != NULL);    ASSERT(sizeof (l2_pgentry_t) == sizeof (shadow_l2e_t));        /* Copy the common Xen mappings from the idle domain */    memcpy(&sl2e[L2_PAGETABLE_FIRST_XEN_SLOT],           &idle_pg_table[L2_PAGETABLE_FIRST_XEN_SLOT],           L2_PAGETABLE_XEN_SLOTS * sizeof(l2_pgentry_t));    /* Install the per-domain mappings for this domain */    for ( i = 0; i < PDPT_L2_ENTRIES; i++ )        sl2e[shadow_l2_table_offset(PERDOMAIN_VIRT_START) + i] =            shadow_l2e_from_mfn(                page_to_mfn(virt_to_page(d->arch.mm_perdomain_pt) + i),                __PAGE_HYPERVISOR);    /* Linear mapping */    sl2e[shadow_l2_table_offset(SH_LINEAR_PT_VIRT_START)] =        shadow_l2e_from_mfn(sl2mfn, __PAGE_HYPERVISOR);    if ( shadow_mode_translate(v->domain) && !shadow_mode_external(v->domain) )    {        // linear tables may not be used with translated PV guests        sl2e[shadow_l2_table_offset(LINEAR_PT_VIRT_START)] =            shadow_l2e_empty();    }    else    {        sl2e[shadow_l2_table_offset(LINEAR_PT_VIRT_START)] =            shadow_l2e_from_mfn(gl2mfn, __PAGE_HYPERVISOR);    }    if ( shadow_mode_translate(d) )    {        /* install domain-specific P2M table */        sl2e[shadow_l2_table_offset(RO_MPT_VIRT_START)] =            shadow_l2e_from_mfn(pagetable_get_mfn(d->arch.phys_table),                                __PAGE_HYPERVISOR);    }    sh_unmap_domain_page(sl2e);}#endif/**************************************************************************//* Create a shadow of a given guest page. */static mfn_tsh_make_shadow(struct vcpu *v, mfn_t gmfn, u32 shadow_type){    mfn_t smfn = shadow_alloc(v->domain, shadow_type, mfn_x(gmfn));    SHADOW_DEBUG(MAKE_SHADOW, "(%05lx, %u)=>%05lx\n",                  mfn_x(gmfn), shadow_type, mfn_x(smfn));    if ( shadow_type != SH_type_l2_32_shadow          && shadow_type != SH_type_l2_pae_shadow          && shadow_type != SH_type_l2h_pae_shadow          && shadow_type != SH_type_l4_64_shadow )        /* Lower-level shadow, not yet linked form a higher level */        mfn_to_shadow_page(smfn)->up = 0;#if GUEST_PAGING_LEVELS == 4#if (SHADOW_OPTIMIZATIONS & SHOPT_LINUX_L3_TOPLEVEL)     if ( shadow_type == SH_type_l4_64_shadow &&         unlikely(v->domain->arch.paging.shadow.opt_flags & SHOPT_LINUX_L3_TOPLEVEL) )    {        /* We're shadowing a new l4, but we've been assuming the guest uses         * only one l4 per vcpu and context switches using an l4 entry.          * Count the number of active l4 shadows.  If there are enough         * of them, decide that this isn't an old linux guest, and stop         * pinning l3es.  This is not very quick but it doesn't happen         * very often. */        struct list_head *l, *t;        struct shadow_page_info *sp;        struct vcpu *v2;        int l4count = 0, vcpus = 0;        list_for_each(l, &v->domain->arch.paging.shadow.pinned_shadows)        {            sp = list_entry(l, struct shadow_page_info, list);            if ( sp->type == SH_type_l4_64_shadow )                l4count++;        }        for_each_vcpu ( v->domain, v2 )             vcpus++;        if ( l4count > 2 * vcpus )         {            /* Unpin all the pinned l3 tables, and don't pin any more. */            list_for_each_safe(l, t, &v->domain->arch.paging.shadow.pinned_shadows)            {                sp = list_entry(l, struct shadow_page_info, list);                if ( sp->type == SH_type_l3_64_shadow )                    sh_unpin(v, shadow_page_to_mfn(sp));            }            v->domain->arch.paging.shadow.opt_flags &= ~SHOPT_LINUX_L3_TOPLEVEL;        }    }#endif#endif    // Create the Xen mappings...    if ( !shadow_mode_external(v->domain) )    {        switch (shadow_type)         {#if CONFIG_PAGING_LEVELS == 4 && GUEST_PAGING_LEVELS == 4        case SH_type_l4_shadow:            sh_install_xen_entries_in_l4(v, gmfn, smfn); break;#endif#if CONFIG_PAGING_LEVELS >= 3 && GUEST_PAGING_LEVELS >= 3        case SH_type_l2h_shadow:            sh_install_xen_entries_in_l2h(v, smfn); break;#endif#if CONFIG_PAGING_LEVELS == 2 && GUEST_PAGING_LEVELS == 2        case SH_type_l2_shadow:            sh_install_xen_entries_in_l2(v, gmfn, smfn); break;#endif        default: /* Do nothing */ break;        }

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -