p2m.c

来自「xen虚拟机源代码安装包」· C语言 代码 · 共 1,188 行 · 第 1/3 页

C
1,188
字号
        }        mfn = _mfn(l3e_get_pfn(*l3e));        unmap_domain_page(l3e);    }    l2e = map_domain_page(mfn_x(mfn));    l2e += l2_table_offset(addr);    if ( (l2e_get_flags(*l2e) & _PAGE_PRESENT) == 0 )    {        unmap_domain_page(l2e);        return _mfn(INVALID_MFN);    }    else if ( (l2e_get_flags(*l2e) & _PAGE_PSE) )    {        mfn = _mfn(l2e_get_pfn(*l2e) + l1_table_offset(addr));        *t = p2m_flags_to_type(l2e_get_flags(*l2e));        unmap_domain_page(l2e);                ASSERT(mfn_valid(mfn) || !p2m_is_ram(*t));        return (p2m_is_valid(*t)) ? mfn : _mfn(INVALID_MFN);    }    mfn = _mfn(l2e_get_pfn(*l2e));    unmap_domain_page(l2e);    l1e = map_domain_page(mfn_x(mfn));    l1e += l1_table_offset(addr);    if ( (l1e_get_flags(*l1e) & _PAGE_PRESENT) == 0 )    {        unmap_domain_page(l1e);        return _mfn(INVALID_MFN);    }    mfn = _mfn(l1e_get_pfn(*l1e));    *t = p2m_flags_to_type(l1e_get_flags(*l1e));    unmap_domain_page(l1e);    ASSERT(mfn_valid(mfn) || !p2m_is_ram(*t));    return (p2m_is_valid(*t)) ? mfn : _mfn(INVALID_MFN);}/* Read the current domain's p2m table (through the linear mapping). */static mfn_t p2m_gfn_to_mfn_current(unsigned long gfn, p2m_type_t *t){    mfn_t mfn = _mfn(INVALID_MFN);    p2m_type_t p2mt = p2m_mmio_dm;    paddr_t addr = ((paddr_t)gfn) << PAGE_SHIFT;    /* XXX This is for compatibility with the old model, where anything not      * XXX marked as RAM was considered to be emulated MMIO space.     * XXX Once we start explicitly registering MMIO regions in the p2m      * XXX we will return p2m_invalid for unmapped gfns */    if ( gfn <= current->domain->arch.p2m->max_mapped_pfn )    {        l1_pgentry_t l1e = l1e_empty();        l2_pgentry_t l2e = l2e_empty();        int ret;        ASSERT(gfn < (RO_MPT_VIRT_END - RO_MPT_VIRT_START)                / sizeof(l1_pgentry_t));        ret = __copy_from_user(&l2e,                               &__linear_l1_table[l1_linear_offset(RO_MPT_VIRT_START) + l2_linear_offset(addr)],                               sizeof(l2e));                if ( (ret == 0) && (l2e_get_flags(l2e) & _PAGE_PRESENT) &&              (l2e_get_flags(l2e) & _PAGE_PSE) )         {            p2mt = p2m_flags_to_type(l2e_get_flags(l2e));            ASSERT(l2e_get_pfn(l2e) != INVALID_MFN || !p2m_is_ram(p2mt));            if ( p2m_is_valid(p2mt) )                mfn = _mfn(l2e_get_pfn(l2e) + l1_table_offset(addr));            else                p2mt = p2m_mmio_dm;        }        else        {                    /* Need to __copy_from_user because the p2m is sparse and this             * part might not exist */            ret = __copy_from_user(&l1e,                                   &phys_to_machine_mapping[gfn],                                   sizeof(l1e));                        if ( ret == 0 ) {                p2mt = p2m_flags_to_type(l1e_get_flags(l1e));                ASSERT(l1e_get_pfn(l1e) != INVALID_MFN || !p2m_is_ram(p2mt));                if ( p2m_is_valid(p2mt) )                    mfn = _mfn(l1e_get_pfn(l1e));                else                     /* XXX see above */                    p2mt = p2m_mmio_dm;            }        }    }    *t = p2mt;    return mfn;}/* Init the datastructures for later use by the p2m code */int p2m_init(struct domain *d){    struct p2m_domain *p2m;    p2m = xmalloc(struct p2m_domain);    if ( p2m == NULL )        return -ENOMEM;    d->arch.p2m = p2m;    memset(p2m, 0, sizeof(*p2m));    p2m_lock_init(p2m);    INIT_LIST_HEAD(&p2m->pages);    p2m->set_entry = p2m_set_entry;    p2m->get_entry = p2m_gfn_to_mfn;    p2m->get_entry_current = p2m_gfn_to_mfn_current;    p2m->change_entry_type_global = p2m_change_type_global;    if ( is_hvm_domain(d) && d->arch.hvm_domain.hap_enabled &&         (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) )        ept_p2m_init(d);    return 0;}void p2m_change_entry_type_global(struct domain *d,                                  p2m_type_t ot, p2m_type_t nt){    struct p2m_domain *p2m = d->arch.p2m;    p2m_lock(p2m);    p2m->change_entry_type_global(d, ot, nt);    p2m_unlock(p2m);}staticint set_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn,                     unsigned int page_order, p2m_type_t p2mt){    unsigned long todo = 1ul << page_order;    unsigned int order;    int rc = 0;    while ( todo )    {        order = (((gfn | mfn_x(mfn) | todo) & ((1ul << 9) - 1)) == 0) ? 9 : 0;        rc = d->arch.p2m->set_entry(d, gfn, mfn, order, p2mt);        gfn += 1ul << order;        if ( mfn_x(mfn) != INVALID_MFN )            mfn = _mfn(mfn_x(mfn) + (1ul << order));        todo -= 1ul << order;    }    return rc;}// Allocate a new p2m table for a domain.//// The structure of the p2m table is that of a pagetable for xen (i.e. it is// controlled by CONFIG_PAGING_LEVELS).//// The alloc_page and free_page functions will be used to get memory to// build the p2m, and to release it again at the end of day.//// Returns 0 for success or -errno.//int p2m_alloc_table(struct domain *d,                    struct page_info * (*alloc_page)(struct domain *d),                    void (*free_page)(struct domain *d, struct page_info *pg)){    mfn_t mfn = _mfn(INVALID_MFN);    struct list_head *entry;    struct page_info *page, *p2m_top;    unsigned int page_count = 0;    unsigned long gfn = -1UL;    struct p2m_domain *p2m = d->arch.p2m;    p2m_lock(p2m);    if ( pagetable_get_pfn(d->arch.phys_table) != 0 )    {        P2M_ERROR("p2m already allocated for this domain\n");        p2m_unlock(p2m);        return -EINVAL;    }    P2M_PRINTK("allocating p2m table\n");    p2m->alloc_page = alloc_page;    p2m->free_page = free_page;    p2m_top = p2m->alloc_page(d);    if ( p2m_top == NULL )    {        p2m_unlock(p2m);        return -ENOMEM;    }    list_add_tail(&p2m_top->list, &p2m->pages);    p2m_top->count_info = 1;    p2m_top->u.inuse.type_info =#if CONFIG_PAGING_LEVELS == 4        PGT_l4_page_table#else        PGT_l3_page_table#endif        | 1 | PGT_validated;    d->arch.phys_table = pagetable_from_mfn(page_to_mfn(p2m_top));    P2M_PRINTK("populating p2m table\n");    /* Initialise physmap tables for slot zero. Other code assumes this. */    if ( !set_p2m_entry(d, 0, _mfn(INVALID_MFN), 0,                        p2m_invalid) )        goto error;    /* Copy all existing mappings from the page list and m2p */    for ( entry = d->page_list.next;          entry != &d->page_list;          entry = entry->next )    {        page = list_entry(entry, struct page_info, list);        mfn = page_to_mfn(page);        gfn = get_gpfn_from_mfn(mfn_x(mfn));        page_count++;        if (#ifdef __x86_64__            (gfn != 0x5555555555555555L)#else            (gfn != 0x55555555L)#endif             && gfn != INVALID_M2P_ENTRY            && !set_p2m_entry(d, gfn, mfn, 0, p2m_ram_rw) )            goto error;    }    P2M_PRINTK("p2m table initialised (%u pages)\n", page_count);    p2m_unlock(p2m);    return 0; error:    P2M_PRINTK("failed to initialize p2m table, gfn=%05lx, mfn=%"               PRI_mfn "\n", gfn, mfn_x(mfn));    p2m_unlock(p2m);    return -ENOMEM;}void p2m_teardown(struct domain *d)/* Return all the p2m pages to Xen. * We know we don't have any extra mappings to these pages */{    struct list_head *entry, *n;    struct page_info *pg;    struct p2m_domain *p2m = d->arch.p2m;    p2m_lock(p2m);    d->arch.phys_table = pagetable_null();    list_for_each_safe(entry, n, &p2m->pages)    {        pg = list_entry(entry, struct page_info, list);        list_del(entry);        p2m->free_page(d, pg);    }    p2m_unlock(p2m);}void p2m_final_teardown(struct domain *d){    xfree(d->arch.p2m);    d->arch.p2m = NULL;}#if P2M_AUDITstatic void audit_p2m(struct domain *d){    struct list_head *entry;    struct page_info *page;    struct domain *od;    unsigned long mfn, gfn, m2pfn, lp2mfn = 0;    mfn_t p2mfn;    unsigned long orphans_d = 0, orphans_i = 0, mpbad = 0, pmbad = 0;    int test_linear;    p2m_type_t type;    if ( !paging_mode_translate(d) )        return;    //P2M_PRINTK("p2m audit starts\n");    test_linear = ( (d == current->domain)                    && !pagetable_is_null(current->arch.monitor_table) );    if ( test_linear )        flush_tlb_local();    /* Audit part one: walk the domain's page allocation list, checking     * the m2p entries. */    for ( entry = d->page_list.next;          entry != &d->page_list;          entry = entry->next )    {        page = list_entry(entry, struct page_info, list);        mfn = mfn_x(page_to_mfn(page));        // P2M_PRINTK("auditing guest page, mfn=%#lx\n", mfn);        od = page_get_owner(page);        if ( od != d )        {            P2M_PRINTK("wrong owner %#lx -> %p(%u) != %p(%u)\n",                       mfn, od, (od?od->domain_id:-1), d, d->domain_id);            continue;        }        gfn = get_gpfn_from_mfn(mfn);        if ( gfn == INVALID_M2P_ENTRY )        {            orphans_i++;            //P2M_PRINTK("orphaned guest page: mfn=%#lx has invalid gfn\n",            //               mfn);            continue;        }        if ( gfn == 0x55555555 )        {            orphans_d++;            //P2M_PRINTK("orphaned guest page: mfn=%#lx has debug gfn\n",            //               mfn);            continue;        }        p2mfn = gfn_to_mfn_foreign(d, gfn, &type);        if ( mfn_x(p2mfn) != mfn )        {            mpbad++;            P2M_PRINTK("map mismatch mfn %#lx -> gfn %#lx -> mfn %#lx"                       " (-> gfn %#lx)\n",                       mfn, gfn, mfn_x(p2mfn),                       (mfn_valid(p2mfn)                        ? get_gpfn_from_mfn(mfn_x(p2mfn))                        : -1u));            /* This m2p entry is stale: the domain has another frame in             * this physical slot.  No great disaster, but for neatness,             * blow away the m2p entry. */            set_gpfn_from_mfn(mfn, INVALID_M2P_ENTRY);        }        if ( test_linear && (gfn <= d->arch.p2m->max_mapped_pfn) )        {            lp2mfn = mfn_x(gfn_to_mfn_current(gfn, &type));            if ( lp2mfn != mfn_x(p2mfn) )            {                P2M_PRINTK("linear mismatch gfn %#lx -> mfn %#lx "                           "(!= mfn %#lx)\n", gfn, lp2mfn, mfn_x(p2mfn));            }        }        // P2M_PRINTK("OK: mfn=%#lx, gfn=%#lx, p2mfn=%#lx, lp2mfn=%#lx\n",        //                mfn, gfn, p2mfn, lp2mfn);    }    /* Audit part two: walk the domain's p2m table, checking the entries. */    if ( pagetable_get_pfn(d->arch.phys_table) != 0 )    {        l2_pgentry_t *l2e;        l1_pgentry_t *l1e;        int i1, i2;#if CONFIG_PAGING_LEVELS == 4        l4_pgentry_t *l4e;        l3_pgentry_t *l3e;        int i3, i4;        l4e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));#else /* CONFIG_PAGING_LEVELS == 3 */        l3_pgentry_t *l3e;        int i3;        l3e = map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table)));#endif        gfn = 0;#if CONFIG_PAGING_LEVELS >= 4        for ( i4 = 0; i4 < L4_PAGETABLE_ENTRIES; i4++ )        {            if ( !(l4e_get_flags(l4e[i4]) & _PAGE_PRESENT) )            {                gfn += 1 << (L4_PAGETABLE_SHIFT - PAGE_SHIFT);                continue;            }            l3e = map_domain_page(mfn_x(_mfn(l4e_get_pfn(l4e[i4]))));#endif            for ( i3 = 0;                  i3 < ((CONFIG_PAGING_LEVELS==4) ? L3_PAGETABLE_ENTRIES : 8);

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?