mm.c
来自「xen虚拟机源代码安装包」· C语言 代码 · 共 2,038 行 · 第 1/5 页
C
2,038 行
l1e_write(fix_pae_highmem_pl1e-cpu, l1e_from_pfn(mfn, __PAGE_HYPERVISOR)); /* First check the previous high mapping can't be in the TLB. * (i.e. have we loaded CR3 since we last did this?) */ if ( unlikely(this_cpu(make_cr3_timestamp) == this_cpu(tlbflush_time)) ) flush_tlb_one_local(fix_to_virt(FIX_PAE_HIGHMEM_0 + cpu)); highmem_l3tab = (l3_pgentry_t *)fix_to_virt(FIX_PAE_HIGHMEM_0 + cpu); lowmem_l3tab = cache->table[cache->inuse_idx]; memcpy(lowmem_l3tab, highmem_l3tab, sizeof(cache->table[0])); l1e_write(fix_pae_highmem_pl1e-cpu, l1e_empty()); this_cpu(make_cr3_timestamp) = this_cpu(tlbflush_time); v->arch.cr3 = __pa(lowmem_l3tab); spin_unlock(&cache->lock);}#else /* !defined(__i386__) */void make_cr3(struct vcpu *v, unsigned long mfn){ v->arch.cr3 = mfn << PAGE_SHIFT;}#endif /* !defined(__i386__) */void write_ptbase(struct vcpu *v){ write_cr3(v->arch.cr3);}/* * Should be called after CR3 is updated. * * Uses values found in vcpu->arch.(guest_table and guest_table_user), and * for HVM guests, arch.monitor_table and hvm's guest CR3. * * Update ref counts to shadow tables appropriately. */void update_cr3(struct vcpu *v){ unsigned long cr3_mfn=0; if ( paging_mode_enabled(v->domain) ) { paging_update_cr3(v); return; }#if CONFIG_PAGING_LEVELS == 4 if ( !(v->arch.flags & TF_kernel_mode) ) cr3_mfn = pagetable_get_pfn(v->arch.guest_table_user); else#endif cr3_mfn = pagetable_get_pfn(v->arch.guest_table); make_cr3(v, cr3_mfn);}static void invalidate_shadow_ldt(struct vcpu *v){ int i; unsigned long pfn; struct page_info *page; if ( v->arch.shadow_ldt_mapcnt == 0 ) return; v->arch.shadow_ldt_mapcnt = 0; for ( i = 16; i < 32; i++ ) { pfn = l1e_get_pfn(v->arch.perdomain_ptes[i]); if ( pfn == 0 ) continue; l1e_write(&v->arch.perdomain_ptes[i], l1e_empty()); page = mfn_to_page(pfn); ASSERT_PAGE_IS_TYPE(page, PGT_seg_desc_page); ASSERT_PAGE_IS_DOMAIN(page, v->domain); put_page_and_type(page); } /* Dispose of the (now possibly invalid) mappings from the TLB. */ if ( v == current ) queue_deferred_ops(v->domain, DOP_FLUSH_TLB | DOP_RELOAD_LDT); else flush_tlb_mask(v->domain->domain_dirty_cpumask);}static int alloc_segdesc_page(struct page_info *page){ struct desc_struct *descs; int i; descs = map_domain_page(page_to_mfn(page)); for ( i = 0; i < 512; i++ ) if ( unlikely(!check_descriptor(page_get_owner(page), &descs[i])) ) goto fail; unmap_domain_page(descs); return 1; fail: unmap_domain_page(descs); return 0;}/* Map shadow page at offset @off. */int map_ldt_shadow_page(unsigned int off){ struct vcpu *v = current; struct domain *d = v->domain; unsigned long gmfn, mfn; l1_pgentry_t l1e, nl1e; unsigned long gva = v->arch.guest_context.ldt_base + (off << PAGE_SHIFT); int okay; BUG_ON(unlikely(in_irq())); guest_get_eff_kern_l1e(v, gva, &l1e); if ( unlikely(!(l1e_get_flags(l1e) & _PAGE_PRESENT)) ) return 0; gmfn = l1e_get_pfn(l1e); mfn = gmfn_to_mfn(d, gmfn); if ( unlikely(!mfn_valid(mfn)) ) return 0; okay = get_page_and_type(mfn_to_page(mfn), d, PGT_seg_desc_page); if ( unlikely(!okay) ) return 0; nl1e = l1e_from_pfn(mfn, l1e_get_flags(l1e) | _PAGE_RW); l1e_write(&v->arch.perdomain_ptes[off + 16], nl1e); v->arch.shadow_ldt_mapcnt++; return 1;}static int get_page_from_pagenr(unsigned long page_nr, struct domain *d){ struct page_info *page = mfn_to_page(page_nr); if ( unlikely(!mfn_valid(page_nr)) || unlikely(!get_page(page, d)) ) { MEM_LOG("Could not get page ref for pfn %lx", page_nr); return 0; } return 1;}static int get_page_and_type_from_pagenr(unsigned long page_nr, unsigned long type, struct domain *d){ struct page_info *page = mfn_to_page(page_nr); if ( unlikely(!get_page_from_pagenr(page_nr, d)) ) return 0; if ( unlikely(!get_page_type(page, type)) ) { put_page(page); return 0; } return 1;}/* * We allow root tables to map each other (a.k.a. linear page tables). It * needs some special care with reference counts and access permissions: * 1. The mapping entry must be read-only, or the guest may get write access * to its own PTEs. * 2. We must only bump the reference counts for an *already validated* * L2 table, or we can end up in a deadlock in get_page_type() by waiting * on a validation that is required to complete that validation. * 3. We only need to increment the reference counts for the mapped page * frame if it is mapped by a different root table. This is sufficient and * also necessary to allow validation of a root table mapping itself. */#define define_get_linear_pagetable(level) \static int \get_##level##_linear_pagetable( \ level##_pgentry_t pde, unsigned long pde_pfn, struct domain *d) \{ \ unsigned long x, y; \ struct page_info *page; \ unsigned long pfn; \ \ if ( (level##e_get_flags(pde) & _PAGE_RW) ) \ { \ MEM_LOG("Attempt to create linear p.t. with write perms"); \ return 0; \ } \ \ if ( (pfn = level##e_get_pfn(pde)) != pde_pfn ) \ { \ /* Make sure the mapped frame belongs to the correct domain. */ \ if ( unlikely(!get_page_from_pagenr(pfn, d)) ) \ return 0; \ \ /* \ * Ensure that the mapped frame is an already-validated page table. \ * If so, atomically increment the count (checking for overflow). \ */ \ page = mfn_to_page(pfn); \ y = page->u.inuse.type_info; \ do { \ x = y; \ if ( unlikely((x & PGT_count_mask) == PGT_count_mask) || \ unlikely((x & (PGT_type_mask|PGT_validated)) != \ (PGT_##level##_page_table|PGT_validated)) ) \ { \ put_page(page); \ return 0; \ } \ } \ while ( (y = cmpxchg(&page->u.inuse.type_info, x, x + 1)) != x ); \ } \ \ return 1; \}int is_iomem_page(unsigned long mfn){ return (!mfn_valid(mfn) || (page_get_owner(mfn_to_page(mfn)) == dom_io));}intget_page_from_l1e( l1_pgentry_t l1e, struct domain *d){ unsigned long mfn = l1e_get_pfn(l1e); struct page_info *page = mfn_to_page(mfn); uint32_t l1f = l1e_get_flags(l1e); struct vcpu *curr = current; struct domain *owner; int okay; if ( !(l1f & _PAGE_PRESENT) ) return 1; if ( unlikely(l1f & l1_disallow_mask(d)) ) { MEM_LOG("Bad L1 flags %x", l1f & l1_disallow_mask(d)); return 0; } if ( is_iomem_page(mfn) ) { /* DOMID_IO reverts to caller for privilege checks. */ if ( d == dom_io ) d = curr->domain; if ( !iomem_access_permitted(d, mfn, mfn) ) { if ( mfn != (PADDR_MASK >> PAGE_SHIFT) ) /* INVALID_MFN? */ MEM_LOG("Non-privileged (%u) attempt to map I/O space %08lx", d->domain_id, mfn); return 0; } return 1; } /* * Let privileged domains transfer the right to map their target * domain's pages. This is used to allow stub-domain pvfb export to dom0, * until pvfb supports granted mappings. At that time this minor hack * can go away. */ owner = page_get_owner(page); if ( unlikely(d != owner) && (owner != NULL) && (d != curr->domain) && IS_PRIV_FOR(d, owner) ) d = owner; /* Foreign mappings into guests in shadow external mode don't * contribute to writeable mapping refcounts. (This allows the * qemu-dm helper process in dom0 to map the domain's memory without * messing up the count of "real" writable mappings.) */ okay = (((l1f & _PAGE_RW) && !(unlikely(paging_mode_external(d) && (d != curr->domain)))) ? get_page_and_type(page, d, PGT_writable_page) : get_page(page, d)); if ( !okay ) { MEM_LOG("Error getting mfn %lx (pfn %lx) from L1 entry %" PRIpte " for dom%d", mfn, get_gpfn_from_mfn(mfn), l1e_get_intpte(l1e), d->domain_id); } else if ( pte_flags_to_cacheattr(l1f) != ((page->count_info >> PGC_cacheattr_base) & 7) ) { uint32_t x, nx, y = page->count_info; uint32_t cacheattr = pte_flags_to_cacheattr(l1f); if ( is_xen_heap_page(page) ) { if ( (l1f & _PAGE_RW) && !(unlikely(paging_mode_external(d) && (d != curr->domain))) ) put_page_type(page); put_page(page); MEM_LOG("Attempt to change cache attributes of Xen heap page"); return 0; } while ( ((y >> PGC_cacheattr_base) & 7) != cacheattr ) { x = y; nx = (x & ~PGC_cacheattr_mask) | (cacheattr << PGC_cacheattr_base); y = cmpxchg(&page->count_info, x, nx); }#ifdef __x86_64__ map_pages_to_xen((unsigned long)mfn_to_virt(mfn), mfn, 1, PAGE_HYPERVISOR | cacheattr_to_pte_flags(cacheattr));#endif } return okay;}/* NB. Virtual address 'l2e' maps to a machine address within frame 'pfn'. */define_get_linear_pagetable(l2);static intget_page_from_l2e( l2_pgentry_t l2e, unsigned long pfn, struct domain *d){ int rc; if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) ) return 1; if ( unlikely((l2e_get_flags(l2e) & L2_DISALLOW_MASK)) ) { MEM_LOG("Bad L2 flags %x", l2e_get_flags(l2e) & L2_DISALLOW_MASK); return 0; } rc = get_page_and_type_from_pagenr(l2e_get_pfn(l2e), PGT_l1_page_table, d); if ( unlikely(!rc) ) rc = get_l2_linear_pagetable(l2e, pfn, d); return rc;}#if CONFIG_PAGING_LEVELS >= 3define_get_linear_pagetable(l3);static intget_page_from_l3e( l3_pgentry_t l3e, unsigned long pfn, struct domain *d){ int rc; if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) ) return 1; if ( unlikely((l3e_get_flags(l3e) & l3_disallow_mask(d))) ) { MEM_LOG("Bad L3 flags %x", l3e_get_flags(l3e) & l3_disallow_mask(d)); return 0; } rc = get_page_and_type_from_pagenr(l3e_get_pfn(l3e), PGT_l2_page_table, d); if ( unlikely(!rc) ) rc = get_l3_linear_pagetable(l3e, pfn, d); return rc;}#endif /* 3 level */#if CONFIG_PAGING_LEVELS >= 4define_get_linear_pagetable(l4);static intget_page_from_l4e( l4_pgentry_t l4e, unsigned long pfn, struct domain *d){ int rc; if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) ) return 1; if ( unlikely((l4e_get_flags(l4e) & L4_DISALLOW_MASK)) ) { MEM_LOG("Bad L4 flags %x", l4e_get_flags(l4e) & L4_DISALLOW_MASK); return 0; } rc = get_page_and_type_from_pagenr(l4e_get_pfn(l4e), PGT_l3_page_table, d); if ( unlikely(!rc) ) rc = get_l4_linear_pagetable(l4e, pfn, d); return rc;}#endif /* 4 level */
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?