⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mm.c

📁 xen 3.2.2 源码
💻 C
📖 第 1 页 / 共 5 页
字号:
    mm_p2m_teardown(d);}unsigned longdomain_get_maximum_gpfn(struct domain *d){    return (d->arch.convmem_end + PAGE_SIZE - 1) >> PAGE_SHIFT;}// stolen from share_xen_page_with_guest() in xen/arch/x86/mm.cvoidshare_xen_page_with_guest(struct page_info *page,                          struct domain *d, int readonly){    if ( page_get_owner(page) == d )        return;#if 1    if (readonly) {        printk("%s:%d readonly is not supported yet\n", __func__, __LINE__);    }#endif    // alloc_xenheap_pages() doesn't initialize page owner.    //BUG_ON(page_get_owner(page) != NULL);    spin_lock(&d->page_alloc_lock);#ifndef __ia64__    /* The incremented type count pins as writable or read-only. */    page->u.inuse.type_info  = (readonly ? PGT_none : PGT_writable_page);    page->u.inuse.type_info |= PGT_validated | 1;#endif    page_set_owner(page, d);    wmb(); /* install valid domain ptr before updating refcnt. */    ASSERT(page->count_info == 0);    /* Only add to the allocation list if the domain isn't dying. */    if ( !d->is_dying )    {        page->count_info |= PGC_allocated | 1;        if ( unlikely(d->xenheap_pages++ == 0) )            get_knownalive_domain(d);        list_add_tail(&page->list, &d->xenpage_list);    }    // grant_table_destroy() releases these pages.    // but it doesn't clear their m2p entry. So there might remain stale    // entries. such a stale entry is cleared here.    set_gpfn_from_mfn(page_to_mfn(page), INVALID_M2P_ENTRY);    spin_unlock(&d->page_alloc_lock);}voidshare_xen_page_with_privileged_guests(struct page_info *page, int readonly){    share_xen_page_with_guest(page, dom_xen, readonly);}unsigned longgmfn_to_mfn_foreign(struct domain *d, unsigned long gpfn){	unsigned long pte;	pte = lookup_domain_mpa(d,gpfn << PAGE_SHIFT, NULL);	if (!pte) {		panic("gmfn_to_mfn_foreign: bad gpfn. spinning...\n");	}	return ((pte & _PFN_MASK) >> PAGE_SHIFT);}// given a domain virtual address, pte and pagesize, extract the metaphysical// address, convert the pte for a physical address for (possibly different)// Xen PAGE_SIZE and return modified pte.  (NOTE: TLB insert should use// current->arch.vhpt_pg_shift!)u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__, u64* itir,                         struct p2m_entry* entry){	struct domain *d = current->domain;	ia64_itir_t _itir = {.itir = itir__};	u64 mask, mpaddr, pteval2;	u64 arflags;	u64 arflags2;	u64 maflags2;	pteval &= ((1UL << 53) - 1);// ignore [63:53] bits	// FIXME address had better be pre-validated on insert	mask = ~itir_mask(_itir.itir);	mpaddr = ((pteval & _PAGE_PPN_MASK) & ~mask) | (address & mask);	if (_itir.ps > PAGE_SHIFT)		_itir.ps = PAGE_SHIFT;	((ia64_itir_t*)itir)->itir = _itir.itir;/* Copy the whole register. */	((ia64_itir_t*)itir)->ps = _itir.ps;	/* Overwrite ps part! */	pteval2 = lookup_domain_mpa(d, mpaddr, entry);	if (_itir.ps < PAGE_SHIFT)		pteval2 |= mpaddr & ~PAGE_MASK & ~((1L << _itir.ps) - 1);	/* Check access rights.  */	arflags  = pteval  & _PAGE_AR_MASK;	arflags2 = pteval2 & _PAGE_AR_MASK;	if (arflags != _PAGE_AR_R && arflags2 == _PAGE_AR_R) {#if 0		dprintk(XENLOG_WARNING,                "%s:%d "		        "pteval 0x%lx arflag 0x%lx address 0x%lx itir 0x%lx "		        "pteval2 0x%lx arflags2 0x%lx mpaddr 0x%lx\n",		        __func__, __LINE__,		        pteval, arflags, address, itir__,		        pteval2, arflags2, mpaddr);#endif		pteval = (pteval & ~_PAGE_AR_MASK) | _PAGE_AR_R;	}	/* Check memory attribute. The switch is on the *requested* memory	   attribute.  */	maflags2 = pteval2 & _PAGE_MA_MASK;	switch (pteval & _PAGE_MA_MASK) {	case _PAGE_MA_NAT:		/* NaT pages are always accepted!  */                		break;	case _PAGE_MA_UC:	case _PAGE_MA_UCE:	case _PAGE_MA_WC:		if (maflags2 == _PAGE_MA_WB) {			/* Don't let domains WB-map uncached addresses.			   This can happen when domU tries to touch i/o			   port space.  Also prevents possible address			   aliasing issues.  */			if (!(mpaddr - IO_PORTS_PADDR < IO_PORTS_SIZE)) {				u64 ucwb;								/*				 * If dom0 page has both UC & WB attributes				 * don't warn about attempted UC access.				 */				ucwb = efi_mem_attribute(mpaddr, PAGE_SIZE);				ucwb &= EFI_MEMORY_UC | EFI_MEMORY_WB;				ucwb ^= EFI_MEMORY_UC | EFI_MEMORY_WB;				if (d != dom0 || ucwb != 0)					gdprintk(XENLOG_WARNING, "Warning: UC"						 " to WB for mpaddr=%lx\n",						 mpaddr);			}			pteval = (pteval & ~_PAGE_MA_MASK) | _PAGE_MA_WB;		}		break;	case _PAGE_MA_WB:		if (maflags2 != _PAGE_MA_WB) {			/* Forbid non-coherent access to coherent memory. */			panic_domain(NULL, "try to use WB mem attr on "			             "UC page, mpaddr=%lx\n", mpaddr);		}		break;	default:		panic_domain(NULL, "try to use unknown mem attribute\n");	}	/* If shadow mode is enabled, virtualize dirty bit.  */	if (shadow_mode_enabled(d) && (pteval & _PAGE_D)) {		u64 mp_page = mpaddr >> PAGE_SHIFT;		pteval |= _PAGE_VIRT_D;		/* If the page is not already dirty, don't set the dirty bit! */		if (mp_page < d->arch.shadow_bitmap_size * 8    		    && !test_bit(mp_page, d->arch.shadow_bitmap))    			pteval &= ~_PAGE_D;	}    	/* Ignore non-addr bits of pteval2 and force PL0->1	   (PL3 is unaffected) */	return (pteval & ~(_PAGE_PPN_MASK | _PAGE_PL_MASK)) |	       (pteval2 & _PAGE_PPN_MASK) |	       (vcpu_pl_adjust(pteval, 7) & _PAGE_PL_MASK);}// given a current domain metaphysical address, return the physical addressunsigned long translate_domain_mpaddr(unsigned long mpaddr,                                      struct p2m_entry* entry){	unsigned long pteval;	pteval = lookup_domain_mpa(current->domain, mpaddr, entry);	return ((pteval & _PAGE_PPN_MASK) | (mpaddr & ~PAGE_MASK));}//XXX !xxx_present() should be used instread of !xxx_none()?// pud, pmd, pte page is zero cleared when they are allocated.// Their area must be visible before population so that// cmpxchg must have release semantics.static volatile pte_t*lookup_alloc_domain_pte(struct domain* d, unsigned long mpaddr){    struct mm_struct *mm = &d->arch.mm;    volatile pgd_t *pgd;    volatile pud_t *pud;    volatile pmd_t *pmd;    BUG_ON(mm->pgd == NULL);    pgd = pgd_offset(mm, mpaddr); again_pgd:    if (unlikely(pgd_none(*pgd))) { // acquire semantics        pud_t *old_pud = NULL;        pud = pud_alloc_one(mm, mpaddr);        if (unlikely(!pgd_cmpxchg_rel(mm, pgd, old_pud, pud))) {            pud_free(pud);            goto again_pgd;        }    }    pud = pud_offset(pgd, mpaddr); again_pud:    if (unlikely(pud_none(*pud))) { // acquire semantics        pmd_t* old_pmd = NULL;        pmd = pmd_alloc_one(mm, mpaddr);        if (unlikely(!pud_cmpxchg_rel(mm, pud, old_pmd, pmd))) {            pmd_free(pmd);            goto again_pud;        }    }    pmd = pmd_offset(pud, mpaddr); again_pmd:    if (unlikely(pmd_none(*pmd))) { // acquire semantics        pte_t* old_pte = NULL;        pte_t* pte = pte_alloc_one_kernel(mm, mpaddr);        if (unlikely(!pmd_cmpxchg_kernel_rel(mm, pmd, old_pte, pte))) {            pte_free_kernel(pte);            goto again_pmd;        }    }    return pte_offset_map(pmd, mpaddr);}//XXX xxx_none() should be used instread of !xxx_present()?volatile pte_t*lookup_noalloc_domain_pte(struct domain* d, unsigned long mpaddr){    struct mm_struct *mm = &d->arch.mm;    volatile pgd_t *pgd;    volatile pud_t *pud;    volatile pmd_t *pmd;    BUG_ON(mm->pgd == NULL);    pgd = pgd_offset(mm, mpaddr);    if (unlikely(!pgd_present(*pgd))) // acquire semantics        return NULL;    pud = pud_offset(pgd, mpaddr);    if (unlikely(!pud_present(*pud))) // acquire semantics        return NULL;    pmd = pmd_offset(pud, mpaddr);    if (unlikely(!pmd_present(*pmd))) // acquire semantics        return NULL;    return pte_offset_map(pmd, mpaddr);}static volatile pte_t*lookup_noalloc_domain_pte_none(struct domain* d, unsigned long mpaddr){    struct mm_struct *mm = &d->arch.mm;    volatile pgd_t *pgd;    volatile pud_t *pud;    volatile pmd_t *pmd;    BUG_ON(mm->pgd == NULL);    pgd = pgd_offset(mm, mpaddr);    if (unlikely(pgd_none(*pgd))) // acquire semantics        return NULL;    pud = pud_offset(pgd, mpaddr);    if (unlikely(pud_none(*pud))) // acquire semantics        return NULL;    pmd = pmd_offset(pud, mpaddr);    if (unlikely(pmd_none(*pmd))) // acquire semantics        return NULL;    return pte_offset_map(pmd, mpaddr);}unsigned long____lookup_domain_mpa(struct domain *d, unsigned long mpaddr){    volatile pte_t *pte;    pte = lookup_noalloc_domain_pte(d, mpaddr);    if (pte == NULL)        return INVALID_MFN;    if (pte_present(*pte))        return (pte->pte & _PFN_MASK);    else if (VMX_DOMAIN(d->vcpu[0]))        return GPFN_INV_MASK;    return INVALID_MFN;}unsigned long lookup_domain_mpa(struct domain *d, unsigned long mpaddr,                                struct p2m_entry* entry){    volatile pte_t *pte = lookup_noalloc_domain_pte(d, mpaddr);    if (pte != NULL) {        pte_t tmp_pte = *pte;// pte is volatile. copy the value.        if (pte_present(tmp_pte)) {            if (entry != NULL)                p2m_entry_set(entry, pte, tmp_pte);            return pte_val(tmp_pte);        } else if (VMX_DOMAIN(d->vcpu[0]))            return GPFN_INV_MASK;    }    if (mpaddr < d->arch.convmem_end && !d->is_dying) {        gdprintk(XENLOG_WARNING, "vcpu %d iip 0x%016lx: non-allocated mpa "                 "d %"PRId16" 0x%lx (< 0x%lx)\n",                 current->vcpu_id, PSCB(current, iip),                 d->domain_id, mpaddr, d->arch.convmem_end);    } else if (mpaddr - IO_PORTS_PADDR < IO_PORTS_SIZE) {        /* Log I/O port probing, but complain less loudly about it */        gdprintk(XENLOG_INFO, "vcpu %d iip 0x%016lx: bad I/O port access "                 "d %"PRId16" 0x%lx\n",                 current->vcpu_id, PSCB(current, iip), d->domain_id,                 IO_SPACE_SPARSE_DECODING(mpaddr - IO_PORTS_PADDR));    } else {        gdprintk(XENLOG_WARNING, "vcpu %d iip 0x%016lx: bad mpa "                 "d %"PRId16" 0x%lx (=> 0x%lx)\n",                 current->vcpu_id, PSCB(current, iip),                 d->domain_id, mpaddr, d->arch.convmem_end);    }    debugger_event (XEN_IA64_DEBUG_ON_BAD_MPA);    if (entry != NULL)        p2m_entry_set(entry, NULL, __pte(0));    //XXX This is a work around until the emulation memory access to a region    //    where memory or device are attached is implemented.    return pte_val(pfn_pte(0, __pgprot(__DIRTY_BITS | _PAGE_PL_PRIV |                                       _PAGE_AR_RWX)));}// FIXME: ONLY USE FOR DOMAIN PAGE_SIZE == PAGE_SIZE#if 1void *domain_mpa_to_imva(struct domain *d, unsigned long mpaddr){    unsigned long pte = lookup_domain_mpa(d, mpaddr, NULL);    unsigned long imva;    pte &= _PAGE_PPN_MASK;    imva = (unsigned long) __va(pte);    imva |= mpaddr & ~PAGE_MASK;    return (void*)imva;}#elsevoid *domain_mpa_to_imva(struct domain *d, unsigned long mpaddr){    unsigned long imva = __gpa_to_mpa(d, mpaddr);    return (void *)__va(imva);}#endifunsigned longpaddr_to_maddr(unsigned long paddr){    struct vcpu *v = current;    struct domain *d = v->domain;    u64 pa;    pa = ____lookup_domain_mpa(d, paddr);    if (pa == INVALID_MFN) {        printk("%s: called with bad memory address: 0x%lx - iip=%lx\n",               __func__, paddr, vcpu_regs(v)->cr_iip);        return 0;    }    return (pa & _PFN_MASK) | (paddr & ~PAGE_MASK);}/* Allocate a new page for domain and map it to the specified metaphysical   address.  */static struct page_info *__assign_new_domain_page(struct domain *d, unsigned long mpaddr,                         volatile pte_t* pte){    struct page_info *p;    unsigned long maddr;    BUG_ON(!pte_none(*pte));    p = alloc_domheap_page(d);    if (unlikely(!p)) {        printk("assign_new_domain_page: Can't alloc!!!! Aaaargh!\n");        return(p);    }    // zero out pages for security reasons    clear_page(page_to_virt(p));    maddr = page_to_maddr (p);    if (unlikely(maddr > __get_cpu_var(vhpt_paddr)                 && maddr < __get_cpu_var(vhpt_pend))) {        /* FIXME: how can this happen ?           vhpt is allocated by alloc_domheap_page.  */        printk("assign_new_domain_page: reassigned vhpt page %lx!!\n",               maddr);    }    set_gpfn_from_mfn(page_to_mfn(p), mpaddr >> PAGE_SHIFT);    // clear_page() and set_gpfn_from_mfn() become visible before set_pte_rel()    // because set_pte_rel() has release semantics    set_pte_rel(pte,                pfn_pte(maddr >> PAGE_SHIFT,                        __pgprot(_PAGE_PGC_ALLOCATED | __DIRTY_BITS |                                 _PAGE_PL_PRIV | _PAGE_AR_RWX)));    smp_mb();    return p;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -