📄 multi.c
字号:
shadow_l4e_t new_sl4e, mfn_t sl4mfn){ int flags = 0, ok; shadow_l4e_t old_sl4e; paddr_t paddr; ASSERT(sl4e != NULL); old_sl4e = *sl4e; if ( old_sl4e.l4 == new_sl4e.l4 ) return 0; /* Nothing to do */ paddr = ((((paddr_t)mfn_x(sl4mfn)) << PAGE_SHIFT) | (((unsigned long)sl4e) & ~PAGE_MASK)); if ( shadow_l4e_get_flags(new_sl4e) & _PAGE_PRESENT ) { /* About to install a new reference */ mfn_t sl3mfn = shadow_l4e_get_mfn(new_sl4e); ok = sh_get_ref(v, sl3mfn, paddr); /* Are we pinning l3 shadows to handle wierd linux behaviour? */ if ( sh_type_is_pinnable(v, SH_type_l3_64_shadow) ) ok |= sh_pin(v, sl3mfn); if ( !ok ) { domain_crash(v->domain); return SHADOW_SET_ERROR; } } /* Write the new entry */ shadow_write_entries(sl4e, &new_sl4e, 1, sl4mfn); flags |= SHADOW_SET_CHANGED; if ( shadow_l4e_get_flags(old_sl4e) & _PAGE_PRESENT ) { /* We lost a reference to an old mfn. */ mfn_t osl3mfn = shadow_l4e_get_mfn(old_sl4e); if ( (mfn_x(osl3mfn) != mfn_x(shadow_l4e_get_mfn(new_sl4e))) || !perms_strictly_increased(shadow_l4e_get_flags(old_sl4e), shadow_l4e_get_flags(new_sl4e)) ) { flags |= SHADOW_SET_FLUSH; } sh_put_ref(v, osl3mfn, paddr); } return flags;}static int shadow_set_l3e(struct vcpu *v, shadow_l3e_t *sl3e, shadow_l3e_t new_sl3e, mfn_t sl3mfn){ int flags = 0; shadow_l3e_t old_sl3e; paddr_t paddr; ASSERT(sl3e != NULL); old_sl3e = *sl3e; if ( old_sl3e.l3 == new_sl3e.l3 ) return 0; /* Nothing to do */ paddr = ((((paddr_t)mfn_x(sl3mfn)) << PAGE_SHIFT) | (((unsigned long)sl3e) & ~PAGE_MASK)); if ( shadow_l3e_get_flags(new_sl3e) & _PAGE_PRESENT ) /* About to install a new reference */ if ( !sh_get_ref(v, shadow_l3e_get_mfn(new_sl3e), paddr) ) { domain_crash(v->domain); return SHADOW_SET_ERROR; } /* Write the new entry */ shadow_write_entries(sl3e, &new_sl3e, 1, sl3mfn); flags |= SHADOW_SET_CHANGED; if ( shadow_l3e_get_flags(old_sl3e) & _PAGE_PRESENT ) { /* We lost a reference to an old mfn. */ mfn_t osl2mfn = shadow_l3e_get_mfn(old_sl3e); if ( (mfn_x(osl2mfn) != mfn_x(shadow_l3e_get_mfn(new_sl3e))) || !perms_strictly_increased(shadow_l3e_get_flags(old_sl3e), shadow_l3e_get_flags(new_sl3e)) ) { flags |= SHADOW_SET_FLUSH; } sh_put_ref(v, osl2mfn, paddr); } return flags;}#endif /* GUEST_PAGING_LEVELS >= 4 */ static int shadow_set_l2e(struct vcpu *v, shadow_l2e_t *sl2e, shadow_l2e_t new_sl2e, mfn_t sl2mfn){ int flags = 0; shadow_l2e_t old_sl2e; paddr_t paddr;#if GUEST_PAGING_LEVELS == 2 && SHADOW_PAGING_LEVELS > 2 /* In 2-on-3 we work with pairs of l2es pointing at two-page * shadows. Reference counting and up-pointers track from the first * page of the shadow to the first l2e, so make sure that we're * working with those: * Align the pointer down so it's pointing at the first of the pair */ sl2e = (shadow_l2e_t *)((unsigned long)sl2e & ~(sizeof(shadow_l2e_t))); /* Align the mfn of the shadow entry too */ new_sl2e.l2 &= ~(1<<PAGE_SHIFT);#endif ASSERT(sl2e != NULL); old_sl2e = *sl2e; if ( old_sl2e.l2 == new_sl2e.l2 ) return 0; /* Nothing to do */ paddr = ((((paddr_t)mfn_x(sl2mfn)) << PAGE_SHIFT) | (((unsigned long)sl2e) & ~PAGE_MASK)); if ( shadow_l2e_get_flags(new_sl2e) & _PAGE_PRESENT ) /* About to install a new reference */ if ( !sh_get_ref(v, shadow_l2e_get_mfn(new_sl2e), paddr) ) { domain_crash(v->domain); return SHADOW_SET_ERROR; } /* Write the new entry */#if GUEST_PAGING_LEVELS == 2 && SHADOW_PAGING_LEVELS > 2 { shadow_l2e_t pair[2] = { new_sl2e, new_sl2e }; /* The l1 shadow is two pages long and need to be pointed to by * two adjacent l1es. The pair have the same flags, but point * at odd and even MFNs */ ASSERT(!(pair[0].l2 & (1<<PAGE_SHIFT))); pair[1].l2 |= (1<<PAGE_SHIFT); shadow_write_entries(sl2e, &pair, 2, sl2mfn); }#else /* normal case */ shadow_write_entries(sl2e, &new_sl2e, 1, sl2mfn);#endif flags |= SHADOW_SET_CHANGED; if ( shadow_l2e_get_flags(old_sl2e) & _PAGE_PRESENT ) { /* We lost a reference to an old mfn. */ mfn_t osl1mfn = shadow_l2e_get_mfn(old_sl2e); if ( (mfn_x(osl1mfn) != mfn_x(shadow_l2e_get_mfn(new_sl2e))) || !perms_strictly_increased(shadow_l2e_get_flags(old_sl2e), shadow_l2e_get_flags(new_sl2e)) ) { flags |= SHADOW_SET_FLUSH; } sh_put_ref(v, osl1mfn, paddr); } return flags;}static int shadow_set_l1e(struct vcpu *v, shadow_l1e_t *sl1e, shadow_l1e_t new_sl1e, mfn_t sl1mfn){ int flags = 0; struct domain *d = v->domain; shadow_l1e_t old_sl1e; ASSERT(sl1e != NULL); old_sl1e = *sl1e; if ( old_sl1e.l1 == new_sl1e.l1 ) return 0; /* Nothing to do */ if ( (shadow_l1e_get_flags(new_sl1e) & _PAGE_PRESENT) && !sh_l1e_is_magic(new_sl1e) ) { /* About to install a new reference */ if ( shadow_mode_refcounts(d) ) { if ( shadow_get_page_from_l1e(new_sl1e, d) == 0 ) { /* Doesn't look like a pagetable. */ flags |= SHADOW_SET_ERROR; new_sl1e = shadow_l1e_empty(); } } } /* Write the new entry */ shadow_write_entries(sl1e, &new_sl1e, 1, sl1mfn); flags |= SHADOW_SET_CHANGED; if ( (shadow_l1e_get_flags(old_sl1e) & _PAGE_PRESENT) && !sh_l1e_is_magic(old_sl1e) ) { /* We lost a reference to an old mfn. */ /* N.B. Unlike higher-level sets, never need an extra flush * when writing an l1e. Because it points to the same guest frame * as the guest l1e did, it's the guest's responsibility to * trigger a flush later. */ if ( shadow_mode_refcounts(d) ) { shadow_put_page_from_l1e(old_sl1e, d); } } return flags;}/**************************************************************************//* Macros to walk pagetables. These take the shadow of a pagetable and * walk every "interesting" entry. That is, they don't touch Xen mappings, * and for 32-bit l2s shadowed onto PAE or 64-bit, they only touch every * second entry (since pairs of entries are managed together). For multi-page * shadows they walk all pages. * * Arguments are an MFN, the variable to point to each entry, a variable * to indicate that we are done (we will shortcut to the end of the scan * when _done != 0), a variable to indicate that we should avoid Xen mappings, * and the code. * * WARNING: These macros have side-effects. They change the values of both * the pointer and the MFN. */ static inline void increment_ptr_to_guest_entry(void *ptr){ if ( ptr ) { guest_l1e_t **entry = ptr; (*entry)++; }}/* All kinds of l1: touch all entries */#define _SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, _done, _code) \do { \ int _i; \ shadow_l1e_t *_sp = map_shadow_page((_sl1mfn)); \ ASSERT(mfn_to_shadow_page(_sl1mfn)->type == SH_type_l1_shadow \ || mfn_to_shadow_page(_sl1mfn)->type == SH_type_fl1_shadow); \ for ( _i = 0; _i < SHADOW_L1_PAGETABLE_ENTRIES; _i++ ) \ { \ (_sl1e) = _sp + _i; \ if ( shadow_l1e_get_flags(*(_sl1e)) & _PAGE_PRESENT ) \ {_code} \ if ( _done ) break; \ increment_ptr_to_guest_entry(_gl1p); \ } \ unmap_shadow_page(_sp); \} while (0)/* 32-bit l1, on PAE or 64-bit shadows: need to walk both pages of shadow */#if GUEST_PAGING_LEVELS == 2 && SHADOW_PAGING_LEVELS > 2#define SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, _done, _code) \do { \ int __done = 0; \ _SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, \ ({ (__done = _done); }), _code); \ _sl1mfn = _mfn(mfn_x(_sl1mfn) + 1); \ if ( !__done ) \ _SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, \ ({ (__done = _done); }), _code); \} while (0)#else /* Everything else; l1 shadows are only one page */#define SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, _done, _code) \ _SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, _done, _code)#endif #if GUEST_PAGING_LEVELS == 2 && SHADOW_PAGING_LEVELS > 2/* 32-bit l2 on PAE/64: four pages, touch every second entry, and avoid Xen */#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _dom, _code) \do { \ int _i, _j, __done = 0; \ int _xen = !shadow_mode_external(_dom); \ ASSERT(mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2_32_shadow); \ for ( _j = 0; _j < 4 && !__done; _j++ ) \ { \ shadow_l2e_t *_sp = map_shadow_page(_sl2mfn); \ for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i += 2 ) \ if ( (!(_xen)) \ || ((_j * SHADOW_L2_PAGETABLE_ENTRIES) + _i) \ < (HYPERVISOR_VIRT_START >> SHADOW_L2_PAGETABLE_SHIFT) ) \ { \ (_sl2e) = _sp + _i; \ if ( shadow_l2e_get_flags(*(_sl2e)) & _PAGE_PRESENT ) \ {_code} \ if ( (__done = (_done)) ) break; \ increment_ptr_to_guest_entry(_gl2p); \ } \ unmap_shadow_page(_sp); \ _sl2mfn = _mfn(mfn_x(_sl2mfn) + 1); \ } \} while (0)#elif GUEST_PAGING_LEVELS == 2/* 32-bit on 32-bit: avoid Xen entries */#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _dom, _code) \do { \ int _i; \ int _xen = !shadow_mode_external(_dom); \ shadow_l2e_t *_sp = map_shadow_page((_sl2mfn)); \ ASSERT(mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2_32_shadow); \ for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ ) \ if ( (!(_xen)) \ || \ (_i < (HYPERVISOR_VIRT_START >> SHADOW_L2_PAGETABLE_SHIFT)) ) \ { \ (_sl2e) = _sp + _i; \ if ( shadow_l2e_get_flags(*(_sl2e)) & _PAGE_PRESENT ) \ {_code} \ if ( _done ) break; \ increment_ptr_to_guest_entry(_gl2p); \ } \ unmap_shadow_page(_sp); \} while (0)#elif GUEST_PAGING_LEVELS == 3/* PAE: if it's an l2h, don't touch Xen mappings */#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _dom, _code) \do { \ int _i; \ int _xen = !shadow_mode_external(_dom); \ shadow_l2e_t *_sp = map_shadow_page((_sl2mfn)); \ ASSERT(mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2_pae_shadow \ || mfn_to_shadow_page(_sl2mfn)->type == SH_type_l2h_pae_shadow);\ for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ ) \ if ( (!(_xen)) \ || mfn_to_shadow_page(_sl2mfn)->type != SH_type_l2h_pae_shadow\ || ((_i + (3 * SHADOW_L2_PAGETABLE_ENTRIES)) \ < (HYPERVISOR_VIRT_START >> SHADOW_L2_PAGETABLE_SHIFT)) ) \ { \ (_sl2e) = _sp + _i; \ if ( shadow_l2e_get_flags(*(_sl2e)) & _PAGE_PRESENT ) \ {_code} \ if ( _done ) break; \ increment_ptr_to_guest_entry(_gl2p); \ } \ unmap_shadow_page(_sp); \} while (0)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -