mm.c
来自「xen 3.2.2 源码」· C语言 代码 · 共 618 行 · 第 1/2 页
C
618 行
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * Copyright (C) IBM Corp. 2005, 2006 * * Authors: Hollis Blanchard <hollisb@us.ibm.com> * Jimi Xenidis <jimix@watson.ibm.com> * Ryan Harper <ryanh@us.ibm.com> */#include <xen/config.h>#include <xen/mm.h>#include <xen/paging.h>#include <xen/kernel.h>#include <xen/sched.h>#include <xen/perfc.h>#include <asm/init.h>#include <asm/page.h>#include <asm/platform.h>#include <asm/string.h>#include <asm/platform.h>#include <public/arch-powerpc.h>#ifdef VERBOSE#define MEM_LOG(_f, _a...) \ printk("DOM%u: (file=mm.c, line=%d) " _f "\n", \ current->domain->domain_id , __LINE__ , ## _a )#else#define MEM_LOG(_f, _a...) ((void)0)#endif/* Frame table and its size in pages. */struct page_info *frame_table;unsigned long max_page;unsigned long total_pages;/* machine to phys mapping to used by all domains */unsigned long *machine_phys_mapping;void __init init_frametable(void){ unsigned long p; unsigned long nr_pages; int i; nr_pages = PFN_UP(max_page * sizeof(struct page_info)); p = alloc_boot_pages(nr_pages, 1); if (p == 0) panic("Not enough memory for frame table\n"); frame_table = (struct page_info *)(p << PAGE_SHIFT); for (i = 0; i < nr_pages; i += 1) clear_page((void *)((p + i) << PAGE_SHIFT));}/* Array of PFNs, indexed by MFN. */void __init init_machine_to_phys_table(void){ unsigned long p; unsigned long nr_pages; int i; nr_pages = PFN_UP(max_page * sizeof(unsigned long)); p = alloc_boot_pages(nr_pages, 1); if (p == 0) panic("Not enough memory for machine phys mapping table\n"); machine_phys_mapping = (unsigned long *)(p << PAGE_SHIFT); for (i = 0; i < nr_pages; i += 1) clear_page((void *)((p + i) << PAGE_SHIFT));}void share_xen_page_with_guest( struct page_info *page, struct domain *d, int readonly){ if ( page_get_owner(page) == d ) return; /* this causes us to leak pages in the Domain and reuslts in * Zombie domains, I think we are missing a piece, until we find * it we disable the following code */ set_gpfn_from_mfn(page_to_mfn(page), INVALID_M2P_ENTRY); spin_lock(&d->page_alloc_lock); /* The incremented type count pins as writable or read-only. */ page->u.inuse.type_info = (readonly ? PGT_none : PGT_writable_page); page->u.inuse.type_info |= PGT_validated | 1; page_set_owner(page, d); wmb(); /* install valid domain ptr before updating refcnt. */ ASSERT(page->count_info == 0); /* Only add to the allocation list if the domain isn't dying. */ if ( !d->is_dying ) { page->count_info |= PGC_allocated | 1; if ( unlikely(d->xenheap_pages++ == 0) ) get_knownalive_domain(d); list_add_tail(&page->list, &d->xenpage_list); } spin_unlock(&d->page_alloc_lock);}void share_xen_page_with_privileged_guests( struct page_info *page, int readonly){ unimplemented();}static ulong foreign_to_mfn(struct domain *d, ulong pfn){ pfn -= 1UL << cpu_foreign_map_order(); BUG_ON(pfn >= d->arch.foreign_mfn_count); return d->arch.foreign_mfns[pfn];}static int set_foreign(struct domain *d, ulong pfn, ulong mfn){ pfn -= 1UL << cpu_foreign_map_order(); BUG_ON(pfn >= d->arch.foreign_mfn_count); d->arch.foreign_mfns[pfn] = mfn; return 0;}static int create_grant_va_mapping( unsigned long va, unsigned long frame, struct vcpu *v){ if (v->domain->domain_id != 0) { printk("only Dom0 can map a grant entry\n"); BUG(); return GNTST_permission_denied; } set_foreign(v->domain, va >> PAGE_SHIFT, frame); return GNTST_okay;}static int destroy_grant_va_mapping( unsigned long addr, unsigned long frame, struct domain *d){ if (d->domain_id != 0) { printk("only Dom0 can map a grant entry\n"); BUG(); return GNTST_permission_denied; } set_foreign(d, addr >> PAGE_SHIFT, ~0UL); return GNTST_okay;}int create_grant_host_mapping( unsigned long addr, unsigned long frame, unsigned int flags, unsigned int cache_flags){ if (flags & GNTMAP_application_map) { printk("%s: GNTMAP_application_map not supported\n", __func__); BUG(); return GNTST_general_error; } if (flags & GNTMAP_contains_pte) { printk("%s: GNTMAP_contains_pte not supported\n", __func__); BUG(); return GNTST_general_error; } if (cache_flags) { printk("%s: cache_flags not supported\n", __func__); BUG(); return GNTST_general_error; } return create_grant_va_mapping(addr, frame, current);}int replace_grant_host_mapping( unsigned long addr, unsigned long frame, unsigned long new_addr, unsigned int flags){ if (new_addr) { printk("%s: new_addr not supported\n", __func__); BUG(); return GNTST_general_error; } if (flags & GNTMAP_contains_pte) { printk("%s: GNTMAP_contains_pte not supported\n", __func__); BUG(); return GNTST_general_error; } /* may have force the remove here */ return destroy_grant_va_mapping(addr, frame, current->domain);}int steal_page(struct domain *d, struct page_info *page, unsigned int memflags){ panic("%s called\n", __func__); return 1;}void put_page_type(struct page_info *page){ unsigned long nx, x, y = page->u.inuse.type_info; do { x = y; nx = x - 1; ASSERT((x & PGT_count_mask) != 0); /* * The page should always be validated while a reference is held. The * exception is during domain destruction, when we forcibly invalidate * page-table pages if we detect a referential loop. * See domain.c:relinquish_list(). */ ASSERT((x & PGT_validated) || page_get_owner(page)->is_dying); if ( unlikely((nx & PGT_count_mask) == 0) ) { /* Record TLB information for flush later. */ page->tlbflush_timestamp = tlbflush_current_time(); } } while ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x) );}int get_page_type(struct page_info *page, unsigned long type){ unsigned long nx, x, y = page->u.inuse.type_info; ASSERT(!(type & ~PGT_type_mask)); again: do { x = y; nx = x + 1; if ( unlikely((nx & PGT_count_mask) == 0) ) { MEM_LOG("Type count overflow on pfn %lx", page_to_mfn(page)); return 0; } else if ( unlikely((x & PGT_count_mask) == 0) ) { if ( (x & PGT_type_mask) != type ) { /* * On type change we check to flush stale TLB entries. This * may be unnecessary (e.g., page was GDT/LDT) but those * circumstances should be very rare. */ cpumask_t mask = page_get_owner(page)->domain_dirty_cpumask; tlbflush_filter(mask, page->tlbflush_timestamp); if ( unlikely(!cpus_empty(mask)) ) { perfc_incr(need_flush_tlb_flush); flush_tlb_mask(mask); } /* We lose existing type, back pointer, and validity. */ nx &= ~(PGT_type_mask | PGT_validated); nx |= type; /* No special validation needed for writable pages. */ /* Page tables and GDT/LDT need to be scanned for validity. */ if ( type == PGT_writable_page ) nx |= PGT_validated; } } else if ( unlikely((x & PGT_type_mask) != type) ) { return 0; } else if ( unlikely(!(x & PGT_validated)) ) { /* Someone else is updating validation of this page. Wait... */ while ( (y = page->u.inuse.type_info) == x ) cpu_relax(); goto again; } } while ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x) ); if ( unlikely(!(nx & PGT_validated)) ) { /* Noone else is updating simultaneously. */ __set_bit(_PGT_validated, &page->u.inuse.type_info); } return 1;
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?