mmu.c

来自「linux 内核源代码」· C语言 代码 · 共 1,499 行 · 第 1/3 页

C
1,499
字号
	struct kvm_pte_chain *pte_chain;	struct hlist_node *node;	int i;	if (!parent_pte)		return;	if (!page->multimapped) {		u64 *old = page->parent_pte;		if (!old) {			page->parent_pte = parent_pte;			return;		}		page->multimapped = 1;		pte_chain = mmu_alloc_pte_chain(vcpu);		INIT_HLIST_HEAD(&page->parent_ptes);		hlist_add_head(&pte_chain->link, &page->parent_ptes);		pte_chain->parent_ptes[0] = old;	}	hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link) {		if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])			continue;		for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)			if (!pte_chain->parent_ptes[i]) {				pte_chain->parent_ptes[i] = parent_pte;				return;			}	}	pte_chain = mmu_alloc_pte_chain(vcpu);	BUG_ON(!pte_chain);	hlist_add_head(&pte_chain->link, &page->parent_ptes);	pte_chain->parent_ptes[0] = parent_pte;}static void mmu_page_remove_parent_pte(struct kvm_mmu_page *page,				       u64 *parent_pte){	struct kvm_pte_chain *pte_chain;	struct hlist_node *node;	int i;	if (!page->multimapped) {		BUG_ON(page->parent_pte != parent_pte);		page->parent_pte = NULL;		return;	}	hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link)		for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {			if (!pte_chain->parent_ptes[i])				break;			if (pte_chain->parent_ptes[i] != parent_pte)				continue;			while (i + 1 < NR_PTE_CHAIN_ENTRIES				&& pte_chain->parent_ptes[i + 1]) {				pte_chain->parent_ptes[i]					= pte_chain->parent_ptes[i + 1];				++i;			}			pte_chain->parent_ptes[i] = NULL;			if (i == 0) {				hlist_del(&pte_chain->link);				mmu_free_pte_chain(pte_chain);				if (hlist_empty(&page->parent_ptes)) {					page->multimapped = 0;					page->parent_pte = NULL;				}			}			return;		}	BUG();}static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm_vcpu *vcpu,						gfn_t gfn){	unsigned index;	struct hlist_head *bucket;	struct kvm_mmu_page *page;	struct hlist_node *node;	pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);	index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;	bucket = &vcpu->kvm->mmu_page_hash[index];	hlist_for_each_entry(page, node, bucket, hash_link)		if (page->gfn == gfn && !page->role.metaphysical) {			pgprintk("%s: found role %x\n",				 __FUNCTION__, page->role.word);			return page;		}	return NULL;}static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,					     gfn_t gfn,					     gva_t gaddr,					     unsigned level,					     int metaphysical,					     unsigned hugepage_access,					     u64 *parent_pte){	union kvm_mmu_page_role role;	unsigned index;	unsigned quadrant;	struct hlist_head *bucket;	struct kvm_mmu_page *page;	struct hlist_node *node;	role.word = 0;	role.glevels = vcpu->mmu.root_level;	role.level = level;	role.metaphysical = metaphysical;	role.hugepage_access = hugepage_access;	if (vcpu->mmu.root_level <= PT32_ROOT_LEVEL) {		quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));		quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;		role.quadrant = quadrant;	}	pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__,		 gfn, role.word);	index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;	bucket = &vcpu->kvm->mmu_page_hash[index];	hlist_for_each_entry(page, node, bucket, hash_link)		if (page->gfn == gfn && page->role.word == role.word) {			mmu_page_add_parent_pte(vcpu, page, parent_pte);			pgprintk("%s: found\n", __FUNCTION__);			return page;		}	page = kvm_mmu_alloc_page(vcpu, parent_pte);	if (!page)		return page;	pgprintk("%s: adding gfn %lx role %x\n", __FUNCTION__, gfn, role.word);	page->gfn = gfn;	page->role = role;	hlist_add_head(&page->hash_link, bucket);	if (!metaphysical)		rmap_write_protect(vcpu, gfn);	return page;}static void kvm_mmu_page_unlink_children(struct kvm *kvm,					 struct kvm_mmu_page *page){	unsigned i;	u64 *pt;	u64 ent;	pt = page->spt;	if (page->role.level == PT_PAGE_TABLE_LEVEL) {		for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {			if (pt[i] & PT_PRESENT_MASK)				rmap_remove(&pt[i]);			pt[i] = 0;		}		kvm_flush_remote_tlbs(kvm);		return;	}	for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {		ent = pt[i];		pt[i] = 0;		if (!(ent & PT_PRESENT_MASK))			continue;		ent &= PT64_BASE_ADDR_MASK;		mmu_page_remove_parent_pte(page_header(ent), &pt[i]);	}	kvm_flush_remote_tlbs(kvm);}static void kvm_mmu_put_page(struct kvm_mmu_page *page,			     u64 *parent_pte){	mmu_page_remove_parent_pte(page, parent_pte);}static void kvm_mmu_zap_page(struct kvm *kvm,			     struct kvm_mmu_page *page){	u64 *parent_pte;	while (page->multimapped || page->parent_pte) {		if (!page->multimapped)			parent_pte = page->parent_pte;		else {			struct kvm_pte_chain *chain;			chain = container_of(page->parent_ptes.first,					     struct kvm_pte_chain, link);			parent_pte = chain->parent_ptes[0];		}		BUG_ON(!parent_pte);		kvm_mmu_put_page(page, parent_pte);		set_shadow_pte(parent_pte, 0);	}	kvm_mmu_page_unlink_children(kvm, page);	if (!page->root_count) {		hlist_del(&page->hash_link);		kvm_mmu_free_page(kvm, page);	} else		list_move(&page->link, &kvm->active_mmu_pages);}static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn){	unsigned index;	struct hlist_head *bucket;	struct kvm_mmu_page *page;	struct hlist_node *node, *n;	int r;	pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);	r = 0;	index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;	bucket = &vcpu->kvm->mmu_page_hash[index];	hlist_for_each_entry_safe(page, node, n, bucket, hash_link)		if (page->gfn == gfn && !page->role.metaphysical) {			pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,				 page->role.word);			kvm_mmu_zap_page(vcpu->kvm, page);			r = 1;		}	return r;}static void mmu_unshadow(struct kvm_vcpu *vcpu, gfn_t gfn){	struct kvm_mmu_page *page;	while ((page = kvm_mmu_lookup_page(vcpu, gfn)) != NULL) {		pgprintk("%s: zap %lx %x\n",			 __FUNCTION__, gfn, page->role.word);		kvm_mmu_zap_page(vcpu->kvm, page);	}}static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa){	int slot = memslot_id(kvm, gfn_to_memslot(kvm, gpa >> PAGE_SHIFT));	struct kvm_mmu_page *page_head = page_header(__pa(pte));	__set_bit(slot, &page_head->slot_bitmap);}hpa_t safe_gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa){	hpa_t hpa = gpa_to_hpa(vcpu, gpa);	return is_error_hpa(hpa) ? bad_page_address | (gpa & ~PAGE_MASK): hpa;}hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa){	struct page *page;	ASSERT((gpa & HPA_ERR_MASK) == 0);	page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);	if (!page)		return gpa | HPA_ERR_MASK;	return ((hpa_t)page_to_pfn(page) << PAGE_SHIFT)		| (gpa & (PAGE_SIZE-1));}hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva){	gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);	if (gpa == UNMAPPED_GVA)		return UNMAPPED_GVA;	return gpa_to_hpa(vcpu, gpa);}struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva){	gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);	if (gpa == UNMAPPED_GVA)		return NULL;	return pfn_to_page(gpa_to_hpa(vcpu, gpa) >> PAGE_SHIFT);}static void nonpaging_new_cr3(struct kvm_vcpu *vcpu){}static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p){	int level = PT32E_ROOT_LEVEL;	hpa_t table_addr = vcpu->mmu.root_hpa;	for (; ; level--) {		u32 index = PT64_INDEX(v, level);		u64 *table;		u64 pte;		ASSERT(VALID_PAGE(table_addr));		table = __va(table_addr);		if (level == 1) {			pte = table[index];			if (is_present_pte(pte) && is_writeble_pte(pte))				return 0;			mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT);			page_header_update_slot(vcpu->kvm, table, v);			table[index] = p | PT_PRESENT_MASK | PT_WRITABLE_MASK |								PT_USER_MASK;			rmap_add(vcpu, &table[index]);			return 0;		}		if (table[index] == 0) {			struct kvm_mmu_page *new_table;			gfn_t pseudo_gfn;			pseudo_gfn = (v & PT64_DIR_BASE_ADDR_MASK)				>> PAGE_SHIFT;			new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,						     v, level - 1,						     1, 0, &table[index]);			if (!new_table) {				pgprintk("nonpaging_map: ENOMEM\n");				return -ENOMEM;			}			table[index] = __pa(new_table->spt) | PT_PRESENT_MASK				| PT_WRITABLE_MASK | PT_USER_MASK;		}		table_addr = table[index] & PT64_BASE_ADDR_MASK;	}}static void mmu_free_roots(struct kvm_vcpu *vcpu){	int i;	struct kvm_mmu_page *page;	if (!VALID_PAGE(vcpu->mmu.root_hpa))		return;#ifdef CONFIG_X86_64	if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {		hpa_t root = vcpu->mmu.root_hpa;		page = page_header(root);		--page->root_count;		vcpu->mmu.root_hpa = INVALID_PAGE;		return;	}#endif	for (i = 0; i < 4; ++i) {		hpa_t root = vcpu->mmu.pae_root[i];		if (root) {			root &= PT64_BASE_ADDR_MASK;			page = page_header(root);			--page->root_count;		}		vcpu->mmu.pae_root[i] = INVALID_PAGE;	}	vcpu->mmu.root_hpa = INVALID_PAGE;}static void mmu_alloc_roots(struct kvm_vcpu *vcpu){	int i;	gfn_t root_gfn;	struct kvm_mmu_page *page;	root_gfn = vcpu->cr3 >> PAGE_SHIFT;#ifdef CONFIG_X86_64	if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {		hpa_t root = vcpu->mmu.root_hpa;		ASSERT(!VALID_PAGE(root));		page = kvm_mmu_get_page(vcpu, root_gfn, 0,					PT64_ROOT_LEVEL, 0, 0, NULL);		root = __pa(page->spt);		++page->root_count;		vcpu->mmu.root_hpa = root;		return;	}#endif	for (i = 0; i < 4; ++i) {		hpa_t root = vcpu->mmu.pae_root[i];		ASSERT(!VALID_PAGE(root));		if (vcpu->mmu.root_level == PT32E_ROOT_LEVEL) {			if (!is_present_pte(vcpu->pdptrs[i])) {				vcpu->mmu.pae_root[i] = 0;				continue;			}			root_gfn = vcpu->pdptrs[i] >> PAGE_SHIFT;		} else if (vcpu->mmu.root_level == 0)			root_gfn = 0;		page = kvm_mmu_get_page(vcpu, root_gfn, i << 30,					PT32_ROOT_LEVEL, !is_paging(vcpu),					0, NULL);		root = __pa(page->spt);		++page->root_count;		vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK;	}	vcpu->mmu.root_hpa = __pa(vcpu->mmu.pae_root);}static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr){	return vaddr;}static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,			       u32 error_code){	gpa_t addr = gva;	hpa_t paddr;	int r;	r = mmu_topup_memory_caches(vcpu);	if (r)		return r;	ASSERT(vcpu);	ASSERT(VALID_PAGE(vcpu->mmu.root_hpa));	paddr = gpa_to_hpa(vcpu , addr & PT64_BASE_ADDR_MASK);	if (is_error_hpa(paddr))		return 1;	return nonpaging_map(vcpu, addr & PAGE_MASK, paddr);}static void nonpaging_free(struct kvm_vcpu *vcpu){	mmu_free_roots(vcpu);}static int nonpaging_init_context(struct kvm_vcpu *vcpu){	struct kvm_mmu *context = &vcpu->mmu;	context->new_cr3 = nonpaging_new_cr3;	context->page_fault = nonpaging_page_fault;	context->gva_to_gpa = nonpaging_gva_to_gpa;	context->free = nonpaging_free;	context->root_level = 0;	context->shadow_root_level = PT32E_ROOT_LEVEL;	context->root_hpa = INVALID_PAGE;	return 0;}static void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu){	++vcpu->stat.tlb_flush;	kvm_x86_ops->tlb_flush(vcpu);}static void paging_new_cr3(struct kvm_vcpu *vcpu){	pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->cr3);	mmu_free_roots(vcpu);}static void inject_page_fault(struct kvm_vcpu *vcpu,			      u64 addr,			      u32 err_code){	kvm_x86_ops->inject_page_fault(vcpu, addr, err_code);}static void paging_free(struct kvm_vcpu *vcpu){	nonpaging_free(vcpu);}#define PTTYPE 64#include "paging_tmpl.h"#undef PTTYPE#define PTTYPE 32#include "paging_tmpl.h"#undef PTTYPEstatic int paging64_init_context_common(struct kvm_vcpu *vcpu, int level){	struct kvm_mmu *context = &vcpu->mmu;	ASSERT(is_pae(vcpu));	context->new_cr3 = paging_new_cr3;	context->page_fault = paging64_page_fault;	context->gva_to_gpa = paging64_gva_to_gpa;	context->free = paging_free;	context->root_level = level;	context->shadow_root_level = level;	context->root_hpa = INVALID_PAGE;	return 0;}static int paging64_init_context(struct kvm_vcpu *vcpu){

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?