⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mm.c

📁 xen虚拟机源代码安装包
💻 C
📖 第 1 页 / 共 5 页
字号:
}// caller must get_page(mfn_to_page(mfn)) before call.// caller must call set_gpfn_from_mfn() before call if necessary.// because set_gpfn_from_mfn() result must be visible before pte xchg// caller must use memory barrier. NOTE: xchg has acquire semantics.// flags: ASSIGN_xxxstatic voidassign_domain_page_replace(struct domain *d, unsigned long mpaddr,                           unsigned long mfn, unsigned long flags){    struct mm_struct *mm = &d->arch.mm;    volatile pte_t* pte;    pte_t old_pte;    pte_t npte;    unsigned long prot = flags_to_prot(flags);    pte = lookup_alloc_domain_pte(d, mpaddr);    // update pte    npte = pfn_pte(mfn, __pgprot(prot));    old_pte = ptep_xchg(mm, mpaddr, pte, npte);    if (pte_mem(old_pte)) {        unsigned long old_mfn = pte_pfn(old_pte);        // mfn = old_mfn case can happen when domain maps a granted page        // twice with the same pseudo physial address.        // It's non sense, but allowed.        // __gnttab_map_grant_ref()        //   => create_host_mapping()        //      => assign_domain_page_replace()        if (mfn != old_mfn) {            domain_put_page(d, mpaddr, pte, old_pte, 1);        }    }    perfc_incr(assign_domain_page_replace);}// caller must get_page(new_page) before// Only steal_page() calls this function.static intassign_domain_page_cmpxchg_rel(struct domain* d, unsigned long mpaddr,                               struct page_info* old_page,                               struct page_info* new_page,                               unsigned long flags, int clear_PGC_allocate){    struct mm_struct *mm = &d->arch.mm;    volatile pte_t* pte;    unsigned long old_mfn;    unsigned long old_prot;    pte_t old_pte;    unsigned long new_mfn;    unsigned long new_prot;    pte_t new_pte;    pte_t ret_pte;    BUG_ON((flags & ASSIGN_pgc_allocated) == 0);    pte = lookup_alloc_domain_pte(d, mpaddr); again:    old_prot = pte_val(*pte) & ~_PAGE_PPN_MASK;    old_mfn = page_to_mfn(old_page);    old_pte = pfn_pte(old_mfn, __pgprot(old_prot));    if (!pte_present(old_pte)) {        gdprintk(XENLOG_INFO,                "%s: old_pte 0x%lx old_prot 0x%lx old_mfn 0x%lx\n",                __func__, pte_val(old_pte), old_prot, old_mfn);        return -EINVAL;    }    new_prot = flags_to_prot(flags);    new_mfn = page_to_mfn(new_page);    new_pte = pfn_pte(new_mfn, __pgprot(new_prot));    // update pte    ret_pte = ptep_cmpxchg_rel(mm, mpaddr, pte, old_pte, new_pte);    if (unlikely(pte_val(old_pte) != pte_val(ret_pte))) {        if (pte_pfn(old_pte) == pte_pfn(ret_pte)) {            goto again;        }        gdprintk(XENLOG_INFO,                "%s: old_pte 0x%lx old_prot 0x%lx old_mfn 0x%lx "                "ret_pte 0x%lx ret_mfn 0x%lx\n",                __func__,                pte_val(old_pte), old_prot, old_mfn,                pte_val(ret_pte), pte_pfn(ret_pte));        return -EINVAL;    }    BUG_ON(!pte_mem(old_pte));    BUG_ON(!pte_pgc_allocated(old_pte));    BUG_ON(page_get_owner(old_page) != d);    BUG_ON(get_gpfn_from_mfn(old_mfn) != (mpaddr >> PAGE_SHIFT));    BUG_ON(old_mfn == new_mfn);    set_gpfn_from_mfn(old_mfn, INVALID_M2P_ENTRY);    if (likely(clear_PGC_allocate)) {        if (!test_and_clear_bit(_PGC_allocated, &old_page->count_info))            BUG();    } else {        int ret;        // adjust for count_info for domain_page_flush_and_put()        // This is slow path.        BUG_ON(!test_bit(_PGC_allocated, &old_page->count_info));        BUG_ON(d == NULL);        ret = get_page(old_page, d);        BUG_ON(ret == 0);    }    domain_page_flush_and_put(d, mpaddr, pte, old_pte, old_page);    perfc_incr(assign_domain_pge_cmpxchg_rel);    return 0;}static voidzap_domain_page_one(struct domain *d, unsigned long mpaddr,                    int clear_PGC_allocate, unsigned long mfn){    struct mm_struct *mm = &d->arch.mm;    volatile pte_t *pte;    pte_t old_pte;    struct page_info *page;    pte = lookup_noalloc_domain_pte_none(d, mpaddr);    if (pte == NULL)        return;    if (pte_none(*pte))        return;    if (mfn == INVALID_MFN) {        // clear pte        old_pte = ptep_get_and_clear(mm, mpaddr, pte);        mfn = pte_pfn(old_pte);    } else {        unsigned long old_arflags;        pte_t new_pte;        pte_t ret_pte;    again:        // memory_exchange() calls guest_physmap_remove_page() with        // a stealed page. i.e. page owner = NULL.        BUG_ON(page_get_owner(mfn_to_page(mfn)) != d &&               page_get_owner(mfn_to_page(mfn)) != NULL);        old_arflags = pte_val(*pte) & ~_PAGE_PPN_MASK;        old_pte = pfn_pte(mfn, __pgprot(old_arflags));        new_pte = __pte(0);                // update pte        ret_pte = ptep_cmpxchg_rel(mm, mpaddr, pte, old_pte, new_pte);        if (unlikely(pte_val(old_pte) != pte_val(ret_pte))) {            if (pte_pfn(old_pte) == pte_pfn(ret_pte)) {                goto again;            }            gdprintk(XENLOG_INFO, "%s: old_pte 0x%lx old_arflags 0x%lx mfn 0x%lx "                    "ret_pte 0x%lx ret_mfn 0x%lx\n",                    __func__,                    pte_val(old_pte), old_arflags, mfn,                    pte_val(ret_pte), pte_pfn(ret_pte));            return;        }        BUG_ON(mfn != pte_pfn(ret_pte));    }    page = mfn_to_page(mfn);    BUG_ON((page->count_info & PGC_count_mask) == 0);    BUG_ON(clear_PGC_allocate && (page_get_owner(page) == NULL));    domain_put_page(d, mpaddr, pte, old_pte, clear_PGC_allocate);    perfc_incr(zap_domain_page_one);}unsigned longdom0vp_zap_physmap(struct domain *d, unsigned long gpfn,                   unsigned int extent_order){    if (extent_order != 0) {        //XXX        return -ENOSYS;    }    zap_domain_page_one(d, gpfn << PAGE_SHIFT, 1, INVALID_MFN);    perfc_incr(dom0vp_zap_physmap);    return 0;}static unsigned long__dom0vp_add_physmap(struct domain* d, unsigned long gpfn,                     unsigned long mfn_or_gmfn,                     unsigned long flags, domid_t domid, int is_gmfn){    int error = -EINVAL;    struct domain* rd;    unsigned long mfn;    /* Not allowed by a domain.  */    if (flags & (ASSIGN_nocache | ASSIGN_pgc_allocated))        return -EINVAL;    rd = rcu_lock_domain_by_id(domid);    if (unlikely(rd == NULL)) {        switch (domid) {        case DOMID_XEN:            rd = dom_xen;            break;        case DOMID_IO:            rd = dom_io;            break;        default:            gdprintk(XENLOG_INFO, "d 0x%p domid %d "                    "gpfn 0x%lx mfn_or_gmfn 0x%lx flags 0x%lx domid %d\n",                    d, d->domain_id, gpfn, mfn_or_gmfn, flags, domid);            return -ESRCH;        }        BUG_ON(rd == NULL);        rcu_lock_domain(rd);    }    if (unlikely(rd == d))        goto out1;    /*     * DOMID_XEN and DOMID_IO don't have their own p2m table.     * It can be considered that their p2m conversion is p==m.     */    if (likely(is_gmfn && domid != DOMID_XEN && domid != DOMID_IO))        mfn = gmfn_to_mfn(rd, mfn_or_gmfn);    else         mfn = mfn_or_gmfn;    if (unlikely(!mfn_valid(mfn) || get_page(mfn_to_page(mfn), rd) == 0))        goto out1;    error = 0;    BUG_ON(page_get_owner(mfn_to_page(mfn)) == d &&           get_gpfn_from_mfn(mfn) != INVALID_M2P_ENTRY);    assign_domain_page_replace(d, gpfn << PAGE_SHIFT, mfn, flags);    //don't update p2m table because this page belongs to rd, not d.    perfc_incr(dom0vp_add_physmap);out1:    rcu_unlock_domain(rd);    return error;}unsigned longdom0vp_add_physmap(struct domain* d, unsigned long gpfn, unsigned long mfn,                   unsigned long flags, domid_t domid){    return __dom0vp_add_physmap(d, gpfn, mfn, flags, domid, 0);}unsigned longdom0vp_add_physmap_with_gmfn(struct domain* d, unsigned long gpfn,                             unsigned long gmfn, unsigned long flags,                             domid_t domid){    return __dom0vp_add_physmap(d, gpfn, gmfn, flags, domid, 1);}#ifdef CONFIG_XEN_IA64_EXPOSE_P2M#define P2M_PFN_ROUNDUP(x)      (((x) + PTRS_PER_PTE - 1) & \                                 ~(PTRS_PER_PTE - 1))#define P2M_PFN_ROUNDDOWN(x)    ((x) & ~(PTRS_PER_PTE - 1))#define P2M_NUM_PFN(x)          (((x) + PTRS_PER_PTE - 1) / PTRS_PER_PTE)#define MD_END(md)              ((md)->phys_addr + \                                 ((md)->num_pages << EFI_PAGE_SHIFT))static struct page_info* p2m_pte_zero_page = NULL;/* This must called before dom0 p2m table allocation */void __initexpose_p2m_init(void){    pte_t* pte;    /*     * Initialise our DOMID_P2M domain.     * This domain owns m2p table pages.     */    dom_p2m = domain_create(DOMID_P2M, DOMCRF_dummy, 0);    BUG_ON(dom_p2m == NULL);    dom_p2m->max_pages = ~0U;    pte = pte_alloc_one_kernel(NULL, 0);    BUG_ON(pte == NULL);    smp_mb();// make contents of the page visible.    p2m_pte_zero_page = virt_to_page(pte);}// allocate pgd, pmd of dest_dom if necessarystatic intallocate_pgd_pmd(struct domain* dest_dom, unsigned long dest_gpfn,                 struct domain* src_dom,                 unsigned long src_gpfn, unsigned long num_src_gpfn){    unsigned long i = 0;    BUG_ON((src_gpfn % PTRS_PER_PTE) != 0);    BUG_ON((num_src_gpfn % PTRS_PER_PTE) != 0);    while (i < num_src_gpfn) {        volatile pte_t* src_pte;        volatile pte_t* dest_pte;        src_pte = lookup_noalloc_domain_pte(src_dom,                                            (src_gpfn + i) << PAGE_SHIFT);        if (src_pte == NULL) {            i++;            continue;        }                dest_pte = lookup_alloc_domain_pte(dest_dom,                                           (dest_gpfn << PAGE_SHIFT) +                                           i * sizeof(pte_t));        if (dest_pte == NULL) {            gdprintk(XENLOG_INFO, "%s failed to allocate pte page\n",                     __func__);            return -ENOMEM;        }        // skip to next pte page        i = P2M_PFN_ROUNDDOWN(i + PTRS_PER_PTE);    }    return 0;}static intexpose_p2m_page(struct domain* d, unsigned long mpaddr, struct page_info* page){    int ret = get_page(page, dom_p2m);    BUG_ON(ret != 1);    return __assign_domain_page(d, mpaddr, page_to_maddr(page),                                ASSIGN_readonly);}// expose pte pagestatic intexpose_p2m_range(struct domain* dest_dom, unsigned long dest_gpfn,                 struct domain* src_dom,                 unsigned long src_gpfn, unsigned long num_src_gpfn){    unsigned long i = 0;    BUG_ON((src_gpfn % PTRS_PER_PTE) != 0);    BUG_ON((num_src_gpfn % PTRS_PER_PTE) != 0);    while (i < num_src_gpfn) {        volatile pte_t* pte;        pte = lookup_noalloc_domain_pte(src_dom, (src_gpfn + i) << PAGE_SHIFT);        if (pte == NULL) {            i++;            continue;        }        if (expose_p2m_page(dest_dom,                            (dest_gpfn << PAGE_SHIFT) + i * sizeof(pte_t),                            virt_to_page(pte)) < 0) {            gdprintk(XENLOG_INFO, "%s failed to assign page\n", __func__);            return -EAGAIN;        }        // skip to next pte page        i = P2M_PFN_ROUNDDOWN(i + PTRS_PER_PTE);    }    return 0;}// expose p2m_pte_zero_page static intexpose_zero_page(struct domain* dest_dom, unsigned long dest_gpfn,                 unsigned long num_src_gpfn){    unsigned long i;        for (i = 0; i < P2M_NUM_PFN(num_src_gpfn); i++) {        volatile pte_t* pte;        pte = lookup_noalloc_domain_pte(dest_dom,                                        (dest_gpfn + i) << PAGE_SHIFT);        if (pte == NULL || pte_present(*pte))            continue;        if (expose_p2m_page(dest_dom, (dest_gpfn + i) << PAGE_SHIFT,                            p2m_pte_zero_page) < 0) {            gdprintk(XENLOG_INFO, "%s failed to assign zero-pte page\n",                     __func__);            return -EAGAIN;        }    }    return 0;}static intexpose_p2m(struct domain* dest_dom, unsigned long dest_gpfn,           struct domain* src_dom,           unsigned long src_gpfn, unsigned long num_src_gpfn){    if (allocate_pgd_pmd(dest_dom, dest_gpfn,                         src_dom, src_gpfn, num_src_gpfn))        return -ENOMEM;    if (expose_p2m_range(dest_dom, dest_gpfn,                         src_dom, src_gpfn, num_src_gpfn))        return -EAGAIN;    if (expose_zero_page(dest_dom, dest_gpfn, num_src_gpfn))        return -EAGAIN;        return 0;}static voidunexpose_p2m(struct domain* dest_dom,             unsigned long dest_gpfn, unsigned long num_dest_gpfn){    unsigned long i;    for (i = 0; i < num_dest_gpfn; i++) {        zap_domain_page_one(dest_dom, (dest_gpfn + i) << PAGE_SHIFT,                            0, INVALID_MFN);    }}// It is possible to optimize loop, But this isn't performance critical.unsigned longdom0vp_expose_p2m(struct domain* d,                  unsigned long conv_start_gpfn,                  unsigned long assign_start_gpfn,

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -