hap.c

来自「xen 3.2.2 源码」· C语言 代码 · 共 754 行 · 第 1/2 页

C
754
字号
/****************************************************************************** * arch/x86/mm/hap/hap.c * * hardware assisted paging * Copyright (c) 2007 Advanced Micro Devices (Wei Huang) * Parts of this code are Copyright (c) 2007 by XenSource Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA */#include <xen/config.h>#include <xen/types.h>#include <xen/mm.h>#include <xen/trace.h>#include <xen/sched.h>#include <xen/perfc.h>#include <xen/irq.h>#include <xen/domain_page.h>#include <xen/guest_access.h>#include <xen/keyhandler.h>#include <asm/event.h>#include <asm/page.h>#include <asm/current.h>#include <asm/flushtlb.h>#include <asm/shared.h>#include <asm/hap.h>#include <asm/paging.h>#include <asm/domain.h>#include "private.h"/* Override macros from asm/page.h to make them work with mfn_t */#undef mfn_to_page#define mfn_to_page(_m) (frame_table + mfn_x(_m))#undef mfn_valid#define mfn_valid(_mfn) (mfn_x(_mfn) < max_page)#undef page_to_mfn#define page_to_mfn(_pg) (_mfn((_pg) - frame_table))/************************************************//*            HAP LOG DIRTY SUPPORT             *//************************************************//* hap code to call when log_dirty is enable. return 0 if no problem found. */int hap_enable_log_dirty(struct domain *d){    /* turn on PG_log_dirty bit in paging mode */    hap_lock(d);    d->arch.paging.mode |= PG_log_dirty;    hap_unlock(d);    /* set l1e entries of P2M table to be read-only. */    p2m_change_type_global(d, p2m_ram_rw, p2m_ram_logdirty);    flush_tlb_mask(d->domain_dirty_cpumask);    return 0;}int hap_disable_log_dirty(struct domain *d){    hap_lock(d);    d->arch.paging.mode &= ~PG_log_dirty;    hap_unlock(d);    /* set l1e entries of P2M table with normal mode */    p2m_change_type_global(d, p2m_ram_logdirty, p2m_ram_rw);    return 0;}void hap_clean_dirty_bitmap(struct domain *d){    /* set l1e entries of P2M table to be read-only. */    p2m_change_type_global(d, p2m_ram_rw, p2m_ram_logdirty);    flush_tlb_mask(d->domain_dirty_cpumask);}/************************************************//*             HAP SUPPORT FUNCTIONS            *//************************************************/static struct page_info *hap_alloc(struct domain *d){    struct page_info *pg = NULL;    void *p;    ASSERT(hap_locked_by_me(d));    if ( unlikely(list_empty(&d->arch.paging.hap.freelist)) )        return NULL;    pg = list_entry(d->arch.paging.hap.freelist.next, struct page_info, list);    list_del(&pg->list);    d->arch.paging.hap.free_pages--;    p = hap_map_domain_page(page_to_mfn(pg));    ASSERT(p != NULL);    clear_page(p);    hap_unmap_domain_page(p);    return pg;}static void hap_free(struct domain *d, mfn_t mfn){    struct page_info *pg = mfn_to_page(mfn);    ASSERT(hap_locked_by_me(d));    d->arch.paging.hap.free_pages++;    list_add_tail(&pg->list, &d->arch.paging.hap.freelist);}static struct page_info *hap_alloc_p2m_page(struct domain *d){    struct page_info *pg;    hap_lock(d);    pg = hap_alloc(d);#if CONFIG_PAGING_LEVELS == 3    /* Under PAE mode, top-level P2M table should be allocated below 4GB space     * because the size of h_cr3 is only 32-bit. We use alloc_domheap_pages to     * force this requirement, and exchange the guaranteed 32-bit-clean     * page for the one we just hap_alloc()ed. */    if ( d->arch.paging.hap.p2m_pages == 0         && mfn_x(page_to_mfn(pg)) >= (1UL << (32 - PAGE_SHIFT)) )    {        free_domheap_page(pg);        pg = alloc_domheap_pages(NULL, 0, MEMF_bits(32));        if ( likely(pg != NULL) )        {            void *p = hap_map_domain_page(page_to_mfn(pg));            clear_page(p);            hap_unmap_domain_page(p);        }    }#endif    if ( likely(pg != NULL) )    {        d->arch.paging.hap.total_pages--;        d->arch.paging.hap.p2m_pages++;        page_set_owner(pg, d);        pg->count_info = 1;    }    hap_unlock(d);    return pg;}void hap_free_p2m_page(struct domain *d, struct page_info *pg){    hap_lock(d);    ASSERT(page_get_owner(pg) == d);    /* Should have just the one ref we gave it in alloc_p2m_page() */    if ( (pg->count_info & PGC_count_mask) != 1 )        HAP_ERROR("Odd p2m page count c=%#x t=%"PRtype_info"\n",                  pg->count_info, pg->u.inuse.type_info);    pg->count_info = 0;    /* Free should not decrement domain's total allocation, since     * these pages were allocated without an owner. */    page_set_owner(pg, NULL);    free_domheap_page(pg);    d->arch.paging.hap.p2m_pages--;    ASSERT(d->arch.paging.hap.p2m_pages >= 0);    hap_unlock(d);}/* Return the size of the pool, rounded up to the nearest MB */static unsigned inthap_get_allocation(struct domain *d){    unsigned int pg = d->arch.paging.hap.total_pages;    return ((pg >> (20 - PAGE_SHIFT))            + ((pg & ((1 << (20 - PAGE_SHIFT)) - 1)) ? 1 : 0));}/* Set the pool of pages to the required number of pages. * Returns 0 for success, non-zero for failure. */static unsigned inthap_set_allocation(struct domain *d, unsigned int pages, int *preempted){    struct page_info *pg;    ASSERT(hap_locked_by_me(d));    while ( d->arch.paging.hap.total_pages != pages )    {        if ( d->arch.paging.hap.total_pages < pages )        {            /* Need to allocate more memory from domheap */            pg = alloc_domheap_page(NULL);            if ( pg == NULL )            {                HAP_PRINTK("failed to allocate hap pages.\n");                return -ENOMEM;            }            d->arch.paging.hap.free_pages++;            d->arch.paging.hap.total_pages++;            list_add_tail(&pg->list, &d->arch.paging.hap.freelist);        }        else if ( d->arch.paging.hap.total_pages > pages )        {            /* Need to return memory to domheap */            ASSERT(!list_empty(&d->arch.paging.hap.freelist));            pg = list_entry(d->arch.paging.hap.freelist.next,                            struct page_info, list);            list_del(&pg->list);            d->arch.paging.hap.free_pages--;            d->arch.paging.hap.total_pages--;            pg->count_info = 0;            free_domheap_page(pg);        }        /* Check to see if we need to yield and try again */        if ( preempted && hypercall_preempt_check() )        {            *preempted = 1;            return 0;        }    }    return 0;}#if CONFIG_PAGING_LEVELS == 4static void hap_install_xen_entries_in_l4(struct vcpu *v, mfn_t l4mfn){    struct domain *d = v->domain;    l4_pgentry_t *l4e;    l4e = hap_map_domain_page(l4mfn);    ASSERT(l4e != NULL);    /* Copy the common Xen mappings from the idle domain */    memcpy(&l4e[ROOT_PAGETABLE_FIRST_XEN_SLOT],           &idle_pg_table[ROOT_PAGETABLE_FIRST_XEN_SLOT],           ROOT_PAGETABLE_XEN_SLOTS * sizeof(l4_pgentry_t));    /* Install the per-domain mappings for this domain */    l4e[l4_table_offset(PERDOMAIN_VIRT_START)] =        l4e_from_pfn(mfn_x(page_to_mfn(virt_to_page(d->arch.mm_perdomain_l3))),                     __PAGE_HYPERVISOR);    /* Install a linear mapping */    l4e[l4_table_offset(LINEAR_PT_VIRT_START)] =        l4e_from_pfn(mfn_x(l4mfn), __PAGE_HYPERVISOR);    /* Install the domain-specific P2M table */    l4e[l4_table_offset(RO_MPT_VIRT_START)] =        l4e_from_pfn(mfn_x(pagetable_get_mfn(d->arch.phys_table)),                     __PAGE_HYPERVISOR);    hap_unmap_domain_page(l4e);}#endif /* CONFIG_PAGING_LEVELS == 4 */#if CONFIG_PAGING_LEVELS == 3static void hap_install_xen_entries_in_l2h(struct vcpu *v, mfn_t l2hmfn){    struct domain *d = v->domain;    l2_pgentry_t *l2e;    l3_pgentry_t *p2m;    int i;    l2e = hap_map_domain_page(l2hmfn);    ASSERT(l2e != NULL);    /* Copy the common Xen mappings from the idle domain */    memcpy(&l2e[L2_PAGETABLE_FIRST_XEN_SLOT & (L2_PAGETABLE_ENTRIES-1)],           &idle_pg_table_l2[L2_PAGETABLE_FIRST_XEN_SLOT],           L2_PAGETABLE_XEN_SLOTS * sizeof(l2_pgentry_t));    /* Install the per-domain mappings for this domain */    for ( i = 0; i < PDPT_L2_ENTRIES; i++ )        l2e[l2_table_offset(PERDOMAIN_VIRT_START) + i] =            l2e_from_pfn(                mfn_x(page_to_mfn(virt_to_page(d->arch.mm_perdomain_pt) + i)),                __PAGE_HYPERVISOR);    /* No linear mapping; will be set up by monitor-table contructor. */    for ( i = 0; i < 4; i++ )        l2e[l2_table_offset(LINEAR_PT_VIRT_START) + i] =            l2e_empty();    /* Install the domain-specific p2m table */    ASSERT(pagetable_get_pfn(d->arch.phys_table) != 0);    p2m = hap_map_domain_page(pagetable_get_mfn(d->arch.phys_table));    for ( i = 0; i < MACHPHYS_MBYTES>>1; i++ )    {        l2e[l2_table_offset(RO_MPT_VIRT_START) + i] =            (l3e_get_flags(p2m[i]) & _PAGE_PRESENT)            ? l2e_from_pfn(mfn_x(_mfn(l3e_get_pfn(p2m[i]))),                           __PAGE_HYPERVISOR)            : l2e_empty();    }    hap_unmap_domain_page(p2m);    hap_unmap_domain_page(l2e);}#endif#if CONFIG_PAGING_LEVELS == 2static void hap_install_xen_entries_in_l2(struct vcpu *v, mfn_t l2mfn){    struct domain *d = v->domain;    l2_pgentry_t *l2e;    int i;    l2e = hap_map_domain_page(l2mfn);    ASSERT(l2e != NULL);    /* Copy the common Xen mappings from the idle domain */    memcpy(&l2e[L2_PAGETABLE_FIRST_XEN_SLOT],           &idle_pg_table[L2_PAGETABLE_FIRST_XEN_SLOT],           L2_PAGETABLE_XEN_SLOTS * sizeof(l2_pgentry_t));    /* Install the per-domain mappings for this domain */    for ( i = 0; i < PDPT_L2_ENTRIES; i++ )        l2e[l2_table_offset(PERDOMAIN_VIRT_START) + i] =            l2e_from_pfn(                mfn_x(page_to_mfn(virt_to_page(d->arch.mm_perdomain_pt) + i)),                __PAGE_HYPERVISOR);    /* Install the linear mapping */    l2e[l2_table_offset(LINEAR_PT_VIRT_START)] =        l2e_from_pfn(mfn_x(l2mfn), __PAGE_HYPERVISOR);    /* Install the domain-specific P2M table */    l2e[l2_table_offset(RO_MPT_VIRT_START)] =        l2e_from_pfn(mfn_x(pagetable_get_mfn(d->arch.phys_table)),                            __PAGE_HYPERVISOR);    hap_unmap_domain_page(l2e);}#endifstatic mfn_t hap_make_monitor_table(struct vcpu *v){    struct domain *d = v->domain;    struct page_info *pg;    ASSERT(pagetable_get_pfn(v->arch.monitor_table) == 0);#if CONFIG_PAGING_LEVELS == 4    {        mfn_t m4mfn;        if ( (pg = hap_alloc(d)) == NULL )            goto oom;        m4mfn = page_to_mfn(pg);        hap_install_xen_entries_in_l4(v, m4mfn);        return m4mfn;    }#elif CONFIG_PAGING_LEVELS == 3    {        mfn_t m3mfn, m2mfn;        l3_pgentry_t *l3e;        l2_pgentry_t *l2e;        int i;        if ( (pg = hap_alloc(d)) == NULL )            goto oom;        m3mfn = page_to_mfn(pg);        /* Install a monitor l2 table in slot 3 of the l3 table.         * This is used for all Xen entries, including linear maps         */        if ( (pg = hap_alloc(d)) == NULL )

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?