📄 multi.c
字号:
} ASSERT(mfn_valid(gw->l2mfn)); /* This mfn is a pagetable: make sure the guest can't write to it. */ if ( shadow_op && sh_remove_write_access(v, gw->l2mfn, 2, va) != 0 ) flush_tlb_mask(d->domain_dirty_cpumask); /* Get the l2e */ l2p = sh_map_domain_page(gw->l2mfn); gw->l2e = l2p[guest_l2_table_offset(va)];#else /* 32-bit only... */ /* Get l2e from the top level table */ gw->l2mfn = pagetable_get_mfn(v->arch.guest_table); l2p = ((guest_l2e_t *)v->arch.paging.shadow.guest_vtable); gw->l2e = l2p[guest_l2_table_offset(va)];#endif /* All levels... */ gflags = guest_l2e_get_flags(gw->l2e) ^ _PAGE_NX_BIT; rc |= ((gflags & mflags) ^ mflags); if ( rc & _PAGE_PRESENT ) goto out; pse = (guest_supports_superpages(v) && (guest_l2e_get_flags(gw->l2e) & _PAGE_PSE)); if ( pse ) { /* Special case: this guest VA is in a PSE superpage, so there's * no guest l1e. We make one up so that the propagation code * can generate a shadow l1 table. Start with the gfn of the * first 4k-page of the superpage. */ gfn_t start = guest_l2e_get_gfn(gw->l2e); /* Grant full access in the l1e, since all the guest entry's * access controls are enforced in the shadow l2e. */ int flags = (_PAGE_PRESENT|_PAGE_USER|_PAGE_RW| _PAGE_ACCESSED|_PAGE_DIRTY); /* PSE level 2 entries use bit 12 for PAT; propagate it to bit 7 * of the level 1. */ if ( (guest_l2e_get_flags(gw->l2e) & _PAGE_PSE_PAT) ) flags |= _PAGE_PAT; /* Copy the cache-control bits to the l1 as well, because we * can't represent PAT in the (non-PSE) shadow l2e. :( * This could cause problems if a guest ever maps an area of * memory with superpages using more than one caching mode. */ flags |= guest_l2e_get_flags(gw->l2e) & (_PAGE_PWT|_PAGE_PCD); /* Increment the pfn by the right number of 4k pages. * The ~0x1 is to mask out the PAT bit mentioned above. */ start = _gfn((gfn_x(start) & ~0x1) + guest_l1_table_offset(va)); gw->l1e = guest_l1e_from_gfn(start, flags); gw->l1mfn = _mfn(INVALID_MFN); } else { /* Not a superpage: carry on and find the l1e. */ gw->l1mfn = gfn_to_mfn(d, guest_l2e_get_gfn(gw->l2e), &p2mt); if ( !p2m_is_ram(p2mt) ) { rc |= _PAGE_PRESENT; goto out; } ASSERT(mfn_valid(gw->l1mfn)); /* This mfn is a pagetable: make sure the guest can't write to it. */ if ( shadow_op && sh_remove_write_access(v, gw->l1mfn, 1, va) != 0 ) flush_tlb_mask(d->domain_dirty_cpumask); l1p = sh_map_domain_page(gw->l1mfn); gw->l1e = l1p[guest_l1_table_offset(va)]; gflags = guest_l1e_get_flags(gw->l1e) ^ _PAGE_NX_BIT; rc |= ((gflags & mflags) ^ mflags); } /* Go back and set accessed and dirty bits only if the walk was a * success. Although the PRMs say higher-level _PAGE_ACCESSED bits * get set whenever a lower-level PT is used, at least some hardware * walkers behave this way. */ if ( rc == 0 ) {#if GUEST_PAGING_LEVELS == 4 /* 64-bit only... */ if ( set_ad_bits(l4p + guest_l4_table_offset(va), &gw->l4e, 0) ) paging_mark_dirty(d, mfn_x(gw->l4mfn)); if ( set_ad_bits(l3p + guest_l3_table_offset(va), &gw->l3e, 0) ) paging_mark_dirty(d, mfn_x(gw->l3mfn));#endif if ( set_ad_bits(l2p + guest_l2_table_offset(va), &gw->l2e, (pse && (pfec & PFEC_write_access))) ) paging_mark_dirty(d, mfn_x(gw->l2mfn)); if ( !pse ) { if ( set_ad_bits(l1p + guest_l1_table_offset(va), &gw->l1e, (pfec & PFEC_write_access)) ) paging_mark_dirty(d, mfn_x(gw->l1mfn)); } } out:#if GUEST_PAGING_LEVELS == 4 if ( l3p ) sh_unmap_domain_page(l3p);#endif#if GUEST_PAGING_LEVELS >= 3 if ( l2p ) sh_unmap_domain_page(l2p);#endif if ( l1p ) sh_unmap_domain_page(l1p); return rc;}/* Given a walk_t, translate the gw->va into the guest's notion of the * corresponding frame number. */static inline gfn_tguest_walk_to_gfn(walk_t *gw){ if ( !(guest_l1e_get_flags(gw->l1e) & _PAGE_PRESENT) ) return _gfn(INVALID_GFN); return guest_l1e_get_gfn(gw->l1e);}/* Given a walk_t, translate the gw->va into the guest's notion of the * corresponding physical address. */static inline paddr_tguest_walk_to_gpa(walk_t *gw){ if ( !(guest_l1e_get_flags(gw->l1e) & _PAGE_PRESENT) ) return 0; return guest_l1e_get_paddr(gw->l1e) + (gw->va & ~PAGE_MASK);}#if 0 /* Keep for debugging *//* Pretty-print the contents of a guest-walk */static inline void print_gw(walk_t *gw){ SHADOW_PRINTK("GUEST WALK TO %#lx:\n", gw->va);#if GUEST_PAGING_LEVELS >= 3 /* PAE or 64... */#if GUEST_PAGING_LEVELS >= 4 /* 64-bit only... */ SHADOW_PRINTK(" l4mfn=%" PRI_mfn "\n", mfn_x(gw->l4mfn)); SHADOW_PRINTK(" l4e=%" SH_PRI_gpte "\n", gw->l4e.l4); SHADOW_PRINTK(" l3mfn=%" PRI_mfn "\n", mfn_x(gw->l3mfn));#endif /* PAE or 64... */ SHADOW_PRINTK(" l3e=%" SH_PRI_gpte "\n", gw->l3e.l3);#endif /* All levels... */ SHADOW_PRINTK(" l2mfn=%" PRI_mfn "\n", mfn_x(gw->l2mfn)); SHADOW_PRINTK(" l2e=%" SH_PRI_gpte "\n", gw->l2e.l2); SHADOW_PRINTK(" l1mfn=%" PRI_mfn "\n", mfn_x(gw->l1mfn)); SHADOW_PRINTK(" l1e=%" SH_PRI_gpte "\n", gw->l1e.l1);}#endif /* 0 */#if SHADOW_AUDIT & SHADOW_AUDIT_ENTRIES/* Lightweight audit: pass all the shadows associated with this guest walk * through the audit mechanisms */static void sh_audit_gw(struct vcpu *v, walk_t *gw) { mfn_t smfn; if ( !(SHADOW_AUDIT_ENABLE) ) return;#if GUEST_PAGING_LEVELS >= 4 /* 64-bit only... */ if ( mfn_valid(gw->l4mfn) && mfn_valid((smfn = get_shadow_status(v, gw->l4mfn, SH_type_l4_shadow))) ) (void) sh_audit_l4_table(v, smfn, _mfn(INVALID_MFN)); if ( mfn_valid(gw->l3mfn) && mfn_valid((smfn = get_shadow_status(v, gw->l3mfn, SH_type_l3_shadow))) ) (void) sh_audit_l3_table(v, smfn, _mfn(INVALID_MFN));#endif /* PAE or 64... */ if ( mfn_valid(gw->l2mfn) ) { if ( mfn_valid((smfn = get_shadow_status(v, gw->l2mfn, SH_type_l2_shadow))) ) (void) sh_audit_l2_table(v, smfn, _mfn(INVALID_MFN));#if GUEST_PAGING_LEVELS == 3 if ( mfn_valid((smfn = get_shadow_status(v, gw->l2mfn, SH_type_l2h_shadow))) ) (void) sh_audit_l2_table(v, smfn, _mfn(INVALID_MFN));#endif } if ( mfn_valid(gw->l1mfn) && mfn_valid((smfn = get_shadow_status(v, gw->l1mfn, SH_type_l1_shadow))) ) (void) sh_audit_l1_table(v, smfn, _mfn(INVALID_MFN)); else if ( (guest_l2e_get_flags(gw->l2e) & _PAGE_PRESENT) && (guest_l2e_get_flags(gw->l2e) & _PAGE_PSE) && mfn_valid( (smfn = get_fl1_shadow_status(v, guest_l2e_get_gfn(gw->l2e)))) ) (void) sh_audit_fl1_table(v, smfn, _mfn(INVALID_MFN));}#else#define sh_audit_gw(_v, _gw) do {} while(0)#endif /* audit code */#if (CONFIG_PAGING_LEVELS == GUEST_PAGING_LEVELS) && (CONFIG_PAGING_LEVELS == SHADOW_PAGING_LEVELS)void *sh_guest_map_l1e(struct vcpu *v, unsigned long addr, unsigned long *gl1mfn){ void *pl1e = NULL; walk_t gw; ASSERT(shadow_mode_translate(v->domain)); // XXX -- this is expensive, but it's easy to cobble together... // FIXME! shadow_lock(v->domain); if ( guest_walk_tables(v, addr, &gw, PFEC_page_present, 1) == 0 && mfn_valid(gw.l1mfn) ) { if ( gl1mfn ) *gl1mfn = mfn_x(gw.l1mfn); pl1e = map_domain_page(mfn_x(gw.l1mfn)) + (guest_l1_table_offset(addr) * sizeof(guest_l1e_t)); } shadow_unlock(v->domain); return pl1e;}voidsh_guest_get_eff_l1e(struct vcpu *v, unsigned long addr, void *eff_l1e){ walk_t gw; ASSERT(shadow_mode_translate(v->domain)); // XXX -- this is expensive, but it's easy to cobble together... // FIXME! shadow_lock(v->domain); (void) guest_walk_tables(v, addr, &gw, PFEC_page_present, 1); *(guest_l1e_t *)eff_l1e = gw.l1e; shadow_unlock(v->domain);}#endif /* CONFIG==SHADOW==GUEST *//**************************************************************************//* Functions to compute the correct index into a shadow page, given an * index into the guest page (as returned by guest_get_index()). * This is trivial when the shadow and guest use the same sized PTEs, but * gets more interesting when those sizes are mismatched (e.g. 32-bit guest, * PAE- or 64-bit shadows). * * These functions also increment the shadow mfn, when necessary. When PTE * sizes are mismatched, it takes 2 shadow L1 pages for a single guest L1 * page. In this case, we allocate 2 contiguous pages for the shadow L1, and * use simple pointer arithmetic on a pointer to the guest L1e to figure out * which shadow page we really want. Similarly, when PTE sizes are * mismatched, we shadow a guest L2 page with 4 shadow L2 pages. (The easiest * way to see this is: a 32-bit guest L2 page maps 4GB of virtual address * space, while a PAE- or 64-bit shadow L2 page maps 1GB of virtual address * space.) * * For PAE guests, for every 32-bytes of guest L3 page table, we use 64-bytes * of shadow (to store both the shadow, and the info that would normally be * stored in page_info fields). This arrangement allows the shadow and the * "page_info" fields to always be stored in the same page (in fact, in * the same cache line), avoiding an extra call to map_domain_page(). */static inline u32guest_index(void *ptr){ return (u32)((unsigned long)ptr & ~PAGE_MASK) / sizeof(guest_l1e_t);}static u32shadow_l1_index(mfn_t *smfn, u32 guest_index){#if (GUEST_PAGING_LEVELS == 2) && (SHADOW_PAGING_LEVELS != 2) *smfn = _mfn(mfn_x(*smfn) + (guest_index / SHADOW_L1_PAGETABLE_ENTRIES)); return (guest_index % SHADOW_L1_PAGETABLE_ENTRIES);#else return guest_index;#endif}static u32shadow_l2_index(mfn_t *smfn, u32 guest_index){#if (GUEST_PAGING_LEVELS == 2) && (SHADOW_PAGING_LEVELS != 2) // Because we use 2 shadow l2 entries for each guest entry, the number of // guest entries per shadow page is SHADOW_L2_PAGETABLE_ENTRIES/2 // *smfn = _mfn(mfn_x(*smfn) + (guest_index / (SHADOW_L2_PAGETABLE_ENTRIES / 2))); // We multiple by two to get the index of the first of the two entries // used to shadow the specified guest entry. return (guest_index % (SHADOW_L2_PAGETABLE_ENTRIES / 2)) * 2;#else return guest_index;#endif}#if GUEST_PAGING_LEVELS >= 4static u32shadow_l3_index(mfn_t *smfn, u32 guest_index){ return guest_index;}static u32shadow_l4_index(mfn_t *smfn, u32 guest_index){ return guest_index;}#endif // GUEST_PAGING_LEVELS >= 4extern u32 get_pat_flags(struct vcpu *v, u32 gl1e_flags, paddr_t gpaddr, paddr_t spaddr);unsigned char pat_type_2_pte_flags(unsigned char pat_type);/**************************************************************************//* Function which computes shadow entries from their corresponding guest * entries. This is the "heart" of the shadow code. It operates using * level-1 shadow types, but handles all levels of entry. * Don't call it directly, but use the four wrappers below. */static always_inline void_sh_propagate(struct vcpu *v, guest_intpte_t guest_intpte, mfn_t target_mfn, void *shadow_entry_ptr, int level, fetch_type_t ft, p2m_type_t p2mt){ guest_l1e_t guest_entry = { guest_intpte }; shadow_l1e_t *sp = shadow_entry_ptr; struct domain *d = v->domain; gfn_t target_gfn = guest_l1e_get_gfn(guest_entry); u32 pass_thru_flags; u32 gflags, sflags;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -