common.c

来自「xen虚拟机源代码安装包」· C语言 代码 · 共 1,973 行 · 第 1/5 页

C
1,973
字号
        } else {            /* Merge with successor block? */            if ( ((sp+mask)->type != PGT_none) || ((sp+mask)->order != order) )                break;            list_del(&(sp+mask)->list);        }    }    sp->order = order;    list_add_tail(&sp->list, &d->arch.paging.shadow.freelists[order]);}/* Divert some memory from the pool to be used by the p2m mapping. * This action is irreversible: the p2m mapping only ever grows. * That's OK because the p2m table only exists for translated domains, * and those domains can't ever turn off shadow mode. * Also, we only ever allocate a max-order chunk, so as to preserve * the invariant that shadow_prealloc() always works. * Returns 0 iff it can't get a chunk (the caller should then * free up some pages in domheap and call sh_set_allocation); * returns non-zero on success. */static intsh_alloc_p2m_pages(struct domain *d){    struct page_info *pg;    u32 i;    unsigned int order = shadow_max_order(d);    ASSERT(shadow_locked_by_me(d));        if ( d->arch.paging.shadow.total_pages          < (shadow_min_acceptable_pages(d) + (1 << order)) )        return 0; /* Not enough shadow memory: need to increase it first */        pg = mfn_to_page(shadow_alloc(d, SH_type_p2m_table, 0));    d->arch.paging.shadow.p2m_pages += (1 << order);    d->arch.paging.shadow.total_pages -= (1 << order);    for (i = 0; i < (1U << order); i++)    {        /* Unlike shadow pages, mark p2m pages as owned by the domain.         * Marking the domain as the owner would normally allow the guest to         * create mappings of these pages, but these p2m pages will never be         * in the domain's guest-physical address space, and so that is not         * believed to be a concern.         */        page_set_owner(&pg[i], d);        pg[i].count_info = 1;        list_add_tail(&pg[i].list, &d->arch.paging.shadow.p2m_freelist);    }    return 1;}// Returns 0 if no memory is available...static struct page_info *shadow_alloc_p2m_page(struct domain *d){    struct list_head *entry;    struct page_info *pg;    mfn_t mfn;    void *p;        shadow_lock(d);    if ( list_empty(&d->arch.paging.shadow.p2m_freelist) &&         !sh_alloc_p2m_pages(d) )    {        shadow_unlock(d);        return NULL;    }    entry = d->arch.paging.shadow.p2m_freelist.next;    list_del(entry);    shadow_unlock(d);    pg = list_entry(entry, struct page_info, list);    mfn = page_to_mfn(pg);    p = sh_map_domain_page(mfn);    clear_page(p);    sh_unmap_domain_page(p);    return pg;}static voidshadow_free_p2m_page(struct domain *d, struct page_info *pg){    ASSERT(page_get_owner(pg) == d);    /* Should have just the one ref we gave it in alloc_p2m_page() */    if ( (pg->count_info & PGC_count_mask) != 1 )    {        SHADOW_ERROR("Odd p2m page count c=%#x t=%"PRtype_info"\n",                     pg->count_info, pg->u.inuse.type_info);    }    pg->count_info = 0;    /* Free should not decrement domain's total allocation, since      * these pages were allocated without an owner. */    page_set_owner(pg, NULL); #if defined(__x86_64__)    spin_lock_init(&pg->lock);#endif    free_domheap_pages(pg, 0);    d->arch.paging.shadow.p2m_pages--;    perfc_decr(shadow_alloc_count);}#if CONFIG_PAGING_LEVELS == 3static void p2m_install_entry_in_monitors(struct domain *d,                                           l3_pgentry_t *l3e) /* Special case, only used for external-mode domains on PAE hosts: * update the mapping of the p2m table.  Once again, this is trivial in * other paging modes (one top-level entry points to the top-level p2m, * no maintenance needed), but PAE makes life difficult by needing a * copy the eight l3es of the p2m table in eight l2h slots in the * monitor table.  This function makes fresh copies when a p2m l3e * changes. */{    l2_pgentry_t *ml2e;    struct vcpu *v;    unsigned int index;    index = ((unsigned long)l3e & ~PAGE_MASK) / sizeof(l3_pgentry_t);    ASSERT(index < MACHPHYS_MBYTES>>1);    for_each_vcpu(d, v)     {        if ( pagetable_get_pfn(v->arch.monitor_table) == 0 )             continue;        ASSERT(shadow_mode_external(v->domain));        SHADOW_DEBUG(P2M, "d=%u v=%u index=%u mfn=%#lx\n",                      d->domain_id, v->vcpu_id, index, l3e_get_pfn(*l3e));        if ( v == current ) /* OK to use linear map of monitor_table */            ml2e = __linear_l2_table + l2_linear_offset(RO_MPT_VIRT_START);        else         {            l3_pgentry_t *ml3e;            ml3e = sh_map_domain_page(pagetable_get_mfn(v->arch.monitor_table));            ASSERT(l3e_get_flags(ml3e[3]) & _PAGE_PRESENT);            ml2e = sh_map_domain_page(_mfn(l3e_get_pfn(ml3e[3])));            ml2e += l2_table_offset(RO_MPT_VIRT_START);            sh_unmap_domain_page(ml3e);        }        ml2e[index] = l2e_from_pfn(l3e_get_pfn(*l3e), __PAGE_HYPERVISOR);        if ( v != current )            sh_unmap_domain_page(ml2e);    }}#endif/* Set the pool of shadow pages to the required number of pages. * Input will be rounded up to at least shadow_min_acceptable_pages(), * plus space for the p2m table. * Returns 0 for success, non-zero for failure. */static unsigned int sh_set_allocation(struct domain *d,                                       unsigned int pages,                                      int *preempted){    struct shadow_page_info *sp;    unsigned int lower_bound;    unsigned int j, order = shadow_max_order(d);    ASSERT(shadow_locked_by_me(d));        /* Don't allocate less than the minimum acceptable, plus one page per     * megabyte of RAM (for the p2m table) */    lower_bound = shadow_min_acceptable_pages(d) + (d->tot_pages / 256);    if ( pages > 0 && pages < lower_bound )        pages = lower_bound;    /* Round up to largest block size */    pages = (pages + ((1<<SHADOW_MAX_ORDER)-1)) & ~((1<<SHADOW_MAX_ORDER)-1);    SHADOW_PRINTK("current %i target %i\n",                    d->arch.paging.shadow.total_pages, pages);    while ( d->arch.paging.shadow.total_pages != pages )     {        if ( d->arch.paging.shadow.total_pages < pages )         {            /* Need to allocate more memory from domheap */            sp = (struct shadow_page_info *)                alloc_domheap_pages(NULL, order, MEMF_node(domain_to_node(d)));            if ( sp == NULL )             {                 SHADOW_PRINTK("failed to allocate shadow pages.\n");                return -ENOMEM;            }            d->arch.paging.shadow.free_pages += 1 << order;            d->arch.paging.shadow.total_pages += 1 << order;            for ( j = 0; j < 1U << order; j++ )            {                sp[j].type = 0;                  sp[j].pinned = 0;                sp[j].count = 0;                sp[j].mbz = 0;                sp[j].tlbflush_timestamp = 0; /* Not in any TLB */            }            sp->order = order;            list_add_tail(&sp->list, &d->arch.paging.shadow.freelists[order]);        }         else if ( d->arch.paging.shadow.total_pages > pages )         {            /* Need to return memory to domheap */            _shadow_prealloc(d, order, 1);            ASSERT(!list_empty(&d->arch.paging.shadow.freelists[order]));            sp = list_entry(d->arch.paging.shadow.freelists[order].next,                            struct shadow_page_info, list);            list_del(&sp->list);#if defined(__x86_64__)            /*             * Re-instate lock field which we overwrite with shadow_page_info.             * This was safe, since the lock is only used on guest pages.             */            for ( j = 0; j < 1U << order; j++ )                spin_lock_init(&((struct page_info *)sp)[j].lock);#endif            d->arch.paging.shadow.free_pages -= 1 << order;            d->arch.paging.shadow.total_pages -= 1 << order;            free_domheap_pages((struct page_info *)sp, order);        }        /* Check to see if we need to yield and try again */        if ( preempted && hypercall_preempt_check() )        {            *preempted = 1;            return 0;        }    }    return 0;}/* Return the size of the shadow pool, rounded up to the nearest MB */static unsigned int shadow_get_allocation(struct domain *d){    unsigned int pg = d->arch.paging.shadow.total_pages;    return ((pg >> (20 - PAGE_SHIFT))            + ((pg & ((1 << (20 - PAGE_SHIFT)) - 1)) ? 1 : 0));}/**************************************************************************//* Hash table for storing the guest->shadow mappings. * The table itself is an array of pointers to shadows; the shadows are then  * threaded on a singly-linked list of shadows with the same hash value */#define SHADOW_HASH_BUCKETS 251/* Other possibly useful primes are 509, 1021, 2039, 4093, 8191, 16381 *//* Hash function that takes a gfn or mfn, plus another byte of type info */typedef u32 key_t;static inline key_t sh_hash(unsigned long n, unsigned int t) {    unsigned char *p = (unsigned char *)&n;    key_t k = t;    int i;    for ( i = 0; i < sizeof(n) ; i++ ) k = (u32)p[i] + (k<<6) + (k<<16) - k;    return k % SHADOW_HASH_BUCKETS;}#if SHADOW_AUDIT & (SHADOW_AUDIT_HASH|SHADOW_AUDIT_HASH_FULL)/* Before we get to the mechanism, define a pair of audit functions * that sanity-check the contents of the hash table. */static void sh_hash_audit_bucket(struct domain *d, int bucket)/* Audit one bucket of the hash table */{    struct shadow_page_info *sp, *x;    if ( !(SHADOW_AUDIT_ENABLE) )        return;    sp = d->arch.paging.shadow.hash_table[bucket];    while ( sp )    {        /* Not a shadow? */        BUG_ON( sp->mbz != 0 );        /* Bogus type? */        BUG_ON( sp->type == 0 );         BUG_ON( sp->type > SH_type_max_shadow );        /* Wrong bucket? */        BUG_ON( sh_hash(sp->backpointer, sp->type) != bucket );         /* Duplicate entry? */        for ( x = sp->next_shadow; x; x = x->next_shadow )            BUG_ON( x->backpointer == sp->backpointer && x->type == sp->type );        /* Follow the backpointer to the guest pagetable */        if ( sp->type != SH_type_fl1_32_shadow             && sp->type != SH_type_fl1_pae_shadow             && sp->type != SH_type_fl1_64_shadow )        {            struct page_info *gpg = mfn_to_page(_mfn(sp->backpointer));            /* Bad shadow flags on guest page? */            BUG_ON( !(gpg->shadow_flags & (1<<sp->type)) );            /* Bad type count on guest page? */#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)            if ( sp->type == SH_type_l1_32_shadow                 || sp->type == SH_type_l1_pae_shadow                 || sp->type == SH_type_l1_64_shadow )            {                if ( (gpg->u.inuse.type_info & PGT_type_mask) == PGT_writable_page                     && (gpg->u.inuse.type_info & PGT_count_mask) != 0 )                {                    if ( !page_is_out_of_sync(gpg) )                    {                        SHADOW_ERROR("MFN %#lx shadowed (by %#"PRI_mfn")"                                     " and not OOS but has typecount %#lx\n",                                     sp->backpointer,                                      mfn_x(shadow_page_to_mfn(sp)),                                      gpg->u.inuse.type_info);                        BUG();                    }                }            }            else /* Not an l1 */#endif            if ( (gpg->u.inuse.type_info & PGT_type_mask) == PGT_writable_page                  && (gpg->u.inuse.type_info & PGT_count_mask) != 0 )            {                SHADOW_ERROR("MFN %#lx shadowed (by %#"PRI_mfn")"                             " but has typecount %#lx\n",                             sp->backpointer, mfn_x(shadow_page_to_mfn(sp)),                              gpg->u.inuse.type_info);                BUG();            }        }        /* That entry was OK; on we go */        sp = sp->next_shadow;    }}#else#define sh_hash_audit_bucket(_d, _b) do {} while(0)#endif /* Hashtable bucket audit */#if SHADOW_AUDIT & SHADOW_AUDIT_HASH_FULLstatic void sh_hash_audit(struct domain *d)/* Full audit: audit every bucket in the table */{    int i;    if ( !(SHADOW_AUDIT_ENABLE) )        return;    for ( i = 0; i < SHADOW_HASH_BUCKETS; i++ )     {        sh_hash_audit_bucket(d, i);    }}#else#define sh_hash_audit(_d) do {} while(0)#endif /* Hashtable bucket audit *//* Allocate and initialise the table itself.   * Returns 0 for success, 1 for error. */static int shadow_hash_alloc(struct domain *d){    struct shadow_page_info **table;    ASSERT(shadow_locked_by_me(d));    ASSERT(!d->arch.paging.shadow.hash_table);    table = xmalloc_array(struct shadow_page_info *, SHADOW_HASH_BUCKETS);    if ( !table ) return 1;    memset(table, 0,            SHADOW_HASH_BUCKETS * sizeof (struct shadow_page_info *));    d->arch.paging.shadow.hash_table = table;    return 0;}/* Tear down the hash table and return all memory to Xen. * This function does not care whether the table is populated. */static void shadow_hash_teardown(struct domain *d){    ASSERT(shadow_locked_by_me(d));    ASSERT(d->arch.paging.shadow.hash_table);    xfree(d->arch.paging.shadow.hash_table);    d->arch.paging.shadow.hash_table = NULL;}mfn_t shadow_hash_lookup(struct vcpu *v, unsigned long n, unsigned int t)/* Find an entry in the hash table.  Returns the MFN of the shadow, * or INVALID_MFN if it doesn't exist */{    struct domain *d = v->domain;    struct shadow_page_info *sp, *prev;    key_t key;    ASSERT(shadow_locked_b

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?