📄 mm.c
字号:
l4e_from_page(virt_to_page(d->arch.mm_arg_xlat_l3), __PAGE_HYPERVISOR); return 1; fail: MEM_LOG("Failure in alloc_l4_table: entry %d", i); while ( i-- > 0 ) if ( is_guest_l4_slot(d, i) ) put_page_from_l4e(pl4e[i], pfn); return 0;}#else#define alloc_l4_table(page) (0)#endifstatic void free_l1_table(struct page_info *page){ struct domain *d = page_get_owner(page); unsigned long pfn = page_to_mfn(page); l1_pgentry_t *pl1e; int i; pl1e = map_domain_page(pfn); for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ ) if ( is_guest_l1_slot(i) ) put_page_from_l1e(pl1e[i], d); unmap_domain_page(pl1e);}static void free_l2_table(struct page_info *page){#ifdef CONFIG_COMPAT struct domain *d = page_get_owner(page);#endif unsigned long pfn = page_to_mfn(page); l2_pgentry_t *pl2e; int i; pl2e = map_domain_page(pfn); for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ ) if ( is_guest_l2_slot(d, page->u.inuse.type_info, i) ) put_page_from_l2e(pl2e[i], pfn); unmap_domain_page(pl2e); page->u.inuse.type_info &= ~PGT_pae_xen_l2;}#if CONFIG_PAGING_LEVELS >= 3static void free_l3_table(struct page_info *page){ struct domain *d = page_get_owner(page); unsigned long pfn = page_to_mfn(page); l3_pgentry_t *pl3e; int i; pl3e = map_domain_page(pfn); for ( i = 0; i < L3_PAGETABLE_ENTRIES; i++ ) if ( is_guest_l3_slot(i) ) { put_page_from_l3e(pl3e[i], pfn); unadjust_guest_l3e(pl3e[i], d); } unmap_domain_page(pl3e);}#endif#if CONFIG_PAGING_LEVELS >= 4static void free_l4_table(struct page_info *page){ struct domain *d = page_get_owner(page); unsigned long pfn = page_to_mfn(page); l4_pgentry_t *pl4e = page_to_virt(page); int i; for ( i = 0; i < L4_PAGETABLE_ENTRIES; i++ ) if ( is_guest_l4_slot(d, i) ) put_page_from_l4e(pl4e[i], pfn);}#endif/* How to write an entry to the guest pagetables. * Returns 0 for failure (pointer not valid), 1 for success. */static inline int update_intpte(intpte_t *p, intpte_t old, intpte_t new, unsigned long mfn, struct vcpu *v){ int rv = 1;#ifndef PTE_UPDATE_WITH_CMPXCHG rv = paging_write_guest_entry(v, p, new, _mfn(mfn));#else { intpte_t t = old; for ( ; ; ) { rv = paging_cmpxchg_guest_entry(v, p, &t, new, _mfn(mfn)); if ( unlikely(rv == 0) ) { MEM_LOG("Failed to update %" PRIpte " -> %" PRIpte ": saw %" PRIpte, old, new, t); break; } if ( t == old ) break; /* Allowed to change in Accessed/Dirty flags only. */ BUG_ON((t ^ old) & ~(intpte_t)(_PAGE_ACCESSED|_PAGE_DIRTY)); old = t; } }#endif return rv;}/* Macro that wraps the appropriate type-changes around update_intpte(). * Arguments are: type, ptr, old, new, mfn, vcpu */#define UPDATE_ENTRY(_t,_p,_o,_n,_m,_v) \ update_intpte(&_t ## e_get_intpte(*(_p)), \ _t ## e_get_intpte(_o), _t ## e_get_intpte(_n), \ (_m), (_v))/* Update the L1 entry at pl1e to new value nl1e. */static int mod_l1_entry(l1_pgentry_t *pl1e, l1_pgentry_t nl1e, unsigned long gl1mfn){ l1_pgentry_t ol1e; struct vcpu *curr = current; struct domain *d = curr->domain; unsigned long mfn; if ( unlikely(__copy_from_user(&ol1e, pl1e, sizeof(ol1e)) != 0) ) return 0; if ( unlikely(paging_mode_refcounts(d)) ) return UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, curr); if ( l1e_get_flags(nl1e) & _PAGE_PRESENT ) { /* Translate foreign guest addresses. */ mfn = gmfn_to_mfn(FOREIGNDOM, l1e_get_pfn(nl1e)); if ( unlikely(mfn == INVALID_MFN) ) return 0; ASSERT((mfn & ~(PADDR_MASK >> PAGE_SHIFT)) == 0); nl1e = l1e_from_pfn(mfn, l1e_get_flags(nl1e)); if ( unlikely(l1e_get_flags(nl1e) & l1_disallow_mask(d)) ) { MEM_LOG("Bad L1 flags %x", l1e_get_flags(nl1e) & l1_disallow_mask(d)); return 0; } /* Fast path for identical mapping, r/w and presence. */ if ( !l1e_has_changed(ol1e, nl1e, _PAGE_RW | _PAGE_PRESENT) ) { adjust_guest_l1e(nl1e, d); return UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, curr); } if ( unlikely(!get_page_from_l1e(nl1e, FOREIGNDOM)) ) return 0; adjust_guest_l1e(nl1e, d); if ( unlikely(!UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, curr)) ) { put_page_from_l1e(nl1e, d); return 0; } } else { if ( unlikely(!UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, curr)) ) return 0; } put_page_from_l1e(ol1e, d); return 1;}/* Update the L2 entry at pl2e to new value nl2e. pl2e is within frame pfn. */static int mod_l2_entry(l2_pgentry_t *pl2e, l2_pgentry_t nl2e, unsigned long pfn, unsigned long type){ l2_pgentry_t ol2e; struct vcpu *curr = current; struct domain *d = curr->domain; if ( unlikely(!is_guest_l2_slot(d, type, pgentry_ptr_to_slot(pl2e))) ) { MEM_LOG("Illegal L2 update attempt in Xen-private area %p", pl2e); return 0; } if ( unlikely(__copy_from_user(&ol2e, pl2e, sizeof(ol2e)) != 0) ) return 0; if ( l2e_get_flags(nl2e) & _PAGE_PRESENT ) { if ( unlikely(l2e_get_flags(nl2e) & L2_DISALLOW_MASK) ) { MEM_LOG("Bad L2 flags %x", l2e_get_flags(nl2e) & L2_DISALLOW_MASK); return 0; } /* Fast path for identical mapping and presence. */ if ( !l2e_has_changed(ol2e, nl2e, _PAGE_PRESENT) ) { adjust_guest_l2e(nl2e, d); return UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn, curr); } if ( unlikely(!get_page_from_l2e(nl2e, pfn, d)) ) return 0; adjust_guest_l2e(nl2e, d); if ( unlikely(!UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn, curr)) ) { put_page_from_l2e(nl2e, pfn); return 0; } } else if ( unlikely(!UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn, curr)) ) { return 0; } put_page_from_l2e(ol2e, pfn); return 1;}#if CONFIG_PAGING_LEVELS >= 3/* Update the L3 entry at pl3e to new value nl3e. pl3e is within frame pfn. */static int mod_l3_entry(l3_pgentry_t *pl3e, l3_pgentry_t nl3e, unsigned long pfn){ l3_pgentry_t ol3e; struct vcpu *curr = current; struct domain *d = curr->domain; int okay; if ( unlikely(!is_guest_l3_slot(pgentry_ptr_to_slot(pl3e))) ) { MEM_LOG("Illegal L3 update attempt in Xen-private area %p", pl3e); return 0; }#if defined(CONFIG_X86_PAE) || defined(CONFIG_COMPAT) /* * Disallow updates to final L3 slot. It contains Xen mappings, and it * would be a pain to ensure they remain continuously valid throughout. */ if ( is_pv_32bit_domain(d) && (pgentry_ptr_to_slot(pl3e) >= 3) ) return 0;#endif if ( unlikely(__copy_from_user(&ol3e, pl3e, sizeof(ol3e)) != 0) ) return 0; if ( l3e_get_flags(nl3e) & _PAGE_PRESENT ) { if ( unlikely(l3e_get_flags(nl3e) & l3_disallow_mask(d)) ) { MEM_LOG("Bad L3 flags %x", l3e_get_flags(nl3e) & l3_disallow_mask(d)); return 0; } /* Fast path for identical mapping and presence. */ if ( !l3e_has_changed(ol3e, nl3e, _PAGE_PRESENT) ) { adjust_guest_l3e(nl3e, d); return UPDATE_ENTRY(l3, pl3e, ol3e, nl3e, pfn, curr); } if ( unlikely(!get_page_from_l3e(nl3e, pfn, d)) ) return 0; adjust_guest_l3e(nl3e, d); if ( unlikely(!UPDATE_ENTRY(l3, pl3e, ol3e, nl3e, pfn, curr)) ) { put_page_from_l3e(nl3e, pfn); return 0; } } else if ( unlikely(!UPDATE_ENTRY(l3, pl3e, ol3e, nl3e, pfn, curr)) ) { return 0; } okay = create_pae_xen_mappings(d, pl3e); BUG_ON(!okay); pae_flush_pgd(pfn, pgentry_ptr_to_slot(pl3e), nl3e); put_page_from_l3e(ol3e, pfn); return 1;}#endif#if CONFIG_PAGING_LEVELS >= 4/* Update the L4 entry at pl4e to new value nl4e. pl4e is within frame pfn. */static int mod_l4_entry(l4_pgentry_t *pl4e, l4_pgentry_t nl4e, unsigned long pfn){ struct vcpu *curr = current; struct domain *d = curr->domain; l4_pgentry_t ol4e; if ( unlikely(!is_guest_l4_slot(d, pgentry_ptr_to_slot(pl4e))) ) { MEM_LOG("Illegal L4 update attempt in Xen-private area %p", pl4e); return 0; } if ( unlikely(__copy_from_user(&ol4e, pl4e, sizeof(ol4e)) != 0) ) return 0; if ( l4e_get_flags(nl4e) & _PAGE_PRESENT ) { if ( unlikely(l4e_get_flags(nl4e) & L4_DISALLOW_MASK) ) { MEM_LOG("Bad L4 flags %x", l4e_get_flags(nl4e) & L4_DISALLOW_MASK); return 0; } /* Fast path for identical mapping and presence. */ if ( !l4e_has_changed(ol4e, nl4e, _PAGE_PRESENT) ) { adjust_guest_l4e(nl4e, d); return UPDATE_ENTRY(l4, pl4e, ol4e, nl4e, pfn, curr); } if ( unlikely(!get_page_from_l4e(nl4e, pfn, d)) ) return 0; adjust_guest_l4e(nl4e, d); if ( unlikely(!UPDATE_ENTRY(l4, pl4e, ol4e, nl4e, pfn, curr)) ) { put_page_from_l4e(nl4e, pfn); return 0; } } else if ( unlikely(!UPDATE_ENTRY(l4, pl4e, ol4e, nl4e, pfn, curr)) ) { return 0; } put_page_from_l4e(ol4e, pfn); return 1;}#endifvoid put_page(struct page_info *page){ u32 nx, x, y = page->count_info; do { x = y; nx = x - 1; } while ( unlikely((y = cmpxchg(&page->count_info, x, nx)) != x) ); if ( unlikely((nx & PGC_count_mask) == 0) ) { cleanup_page_cacheattr(page); free_domheap_page(page); }}int get_page(struct page_info *page, struct domain *domain){ u32 x, nx, y = page->count_info; u32 d, nd = page->u.inuse._domain; u32 _domain = pickle_domptr(domain); do { x = y; nx = x + 1; d = nd; if ( unlikely((x & PGC_count_mask) == 0) || /* Not allocated? */ unlikely((nx & PGC_count_mask) == 0) || /* Count overflow? */ unlikely(d != _domain) ) /* Wrong owner? */ {
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -