📄 mm.c
字号:
{ struct page_info *page = mfn_to_page(m2p_start_mfn + i); share_xen_page_with_privileged_guests(page, XENSHARE_readonly); } } for ( v = RDWR_COMPAT_MPT_VIRT_START; v != RDWR_COMPAT_MPT_VIRT_END; v += 1 << L2_PAGETABLE_SHIFT ) { l3e = l4e_to_l3e(idle_pg_table[l4_table_offset(v)])[ l3_table_offset(v)]; if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) ) continue; l2e = l3e_to_l2e(l3e)[l2_table_offset(v)]; if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) ) continue; m2p_start_mfn = l2e_get_pfn(l2e); for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ ) { struct page_info *page = mfn_to_page(m2p_start_mfn + i); share_xen_page_with_privileged_guests(page, XENSHARE_readonly); } }}long subarch_memory_op(int op, XEN_GUEST_HANDLE(void) arg){ struct xen_machphys_mfn_list xmml; l3_pgentry_t l3e; l2_pgentry_t l2e; unsigned long v; xen_pfn_t mfn; unsigned int i; long rc = 0; switch ( op ) { case XENMEM_machphys_mfn_list: if ( copy_from_guest(&xmml, arg, 1) ) return -EFAULT; for ( i = 0, v = RDWR_MPT_VIRT_START; (i != xmml.max_extents) && (v != RDWR_MPT_VIRT_END); i++, v += 1 << 21 ) { l3e = l4e_to_l3e(idle_pg_table[l4_table_offset(v)])[ l3_table_offset(v)]; if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) ) break; l2e = l3e_to_l2e(l3e)[l2_table_offset(v)]; if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) ) break; mfn = l2e_get_pfn(l2e) + l1_table_offset(v); if ( copy_to_guest_offset(xmml.extent_start, i, &mfn, 1) ) return -EFAULT; } xmml.nr_extents = i; if ( copy_to_guest(arg, &xmml, 1) ) return -EFAULT; break; default: rc = -ENOSYS; break; } return rc;}long do_stack_switch(unsigned long ss, unsigned long esp){ fixup_guest_stack_selector(current->domain, ss); current->arch.guest_context.kernel_ss = ss; current->arch.guest_context.kernel_sp = esp; return 0;}long do_set_segment_base(unsigned int which, unsigned long base){ struct vcpu *v = current; long ret = 0; switch ( which ) { case SEGBASE_FS: if ( wrmsr_safe(MSR_FS_BASE, base, base>>32) ) ret = -EFAULT; else v->arch.guest_context.fs_base = base; break; case SEGBASE_GS_USER: if ( wrmsr_safe(MSR_SHADOW_GS_BASE, base, base>>32) ) ret = -EFAULT; else v->arch.guest_context.gs_base_user = base; break; case SEGBASE_GS_KERNEL: if ( wrmsr_safe(MSR_GS_BASE, base, base>>32) ) ret = -EFAULT; else v->arch.guest_context.gs_base_kernel = base; break; case SEGBASE_GS_USER_SEL: __asm__ __volatile__ ( " swapgs \n" "1: movl %k0,%%gs \n" " "safe_swapgs" \n" ".section .fixup,\"ax\" \n" "2: xorl %k0,%k0 \n" " jmp 1b \n" ".previous \n" ".section __ex_table,\"a\"\n" " .align 8 \n" " .quad 1b,2b \n" ".previous " : : "r" (base&0xffff) ); break; default: ret = -EINVAL; break; } return ret;}/* Returns TRUE if given descriptor is valid for GDT or LDT. */int check_descriptor(const struct domain *dom, struct desc_struct *d){ u32 a = d->a, b = d->b; u16 cs; unsigned int dpl; /* A not-present descriptor will always fault, so is safe. */ if ( !(b & _SEGMENT_P) ) goto good; /* Check and fix up the DPL. */ dpl = (b >> 13) & 3; __fixup_guest_selector(dom, dpl); b = (b & ~_SEGMENT_DPL) | (dpl << 13); /* All code and data segments are okay. No base/limit checking. */ if ( (b & _SEGMENT_S) ) { if ( is_pv_32bit_domain(dom) ) { unsigned long base, limit; if ( b & _SEGMENT_L ) goto bad; /* * Older PAE Linux guests use segments which are limited to * 0xf6800000. Extend these to allow access to the larger read-only * M2P table available in 32on64 mode. */ base = (b & (0xff << 24)) | ((b & 0xff) << 16) | (a >> 16); limit = (b & 0xf0000) | (a & 0xffff); limit++; /* We add one because limit is inclusive. */ if ( (b & _SEGMENT_G) ) limit <<= 12; if ( (base == 0) && (limit > HYPERVISOR_COMPAT_VIRT_START(dom)) ) { a |= 0x0000ffff; b |= 0x000f0000; } } goto good; } /* Invalid type 0 is harmless. It is used for 2nd half of a call gate. */ if ( (b & _SEGMENT_TYPE) == 0x000 ) goto good; /* Everything but a call gate is discarded here. */ if ( (b & _SEGMENT_TYPE) != 0xc00 ) goto bad; /* Validate the target code selector. */ cs = a >> 16; if ( !guest_gate_selector_okay(dom, cs) ) goto bad; /* * Force DPL to zero, causing a GP fault with its error code indicating * the gate in use, allowing emulation. This is necessary because with * native guests (kernel in ring 3) call gates cannot be used directly * to transition from user to kernel mode (and whether a gate is used * to enter the kernel can only be determined when the gate is being * used), and with compat guests call gates cannot be used at all as * there are only 64-bit ones. * Store the original DPL in the selector's RPL field. */ b &= ~_SEGMENT_DPL; cs = (cs & ~3) | dpl; a = (a & 0xffffU) | (cs << 16); /* Reserved bits must be zero. */ if ( b & (is_pv_32bit_domain(dom) ? 0xe0 : 0xff) ) goto bad; good: d->a = a; d->b = b; return 1; bad: return 0;}void domain_set_alloc_bitsize(struct domain *d){ if ( !is_pv_32on64_domain(d) || (MACH2PHYS_COMPAT_NR_ENTRIES(d) >= max_page) || d->arch.physaddr_bitsize > 0 ) return; d->arch.physaddr_bitsize = /* 2^n entries can be contained in guest's p2m mapping space */ fls(MACH2PHYS_COMPAT_NR_ENTRIES(d)) - 1 /* 2^n pages -> 2^(n+PAGE_SHIFT) bits */ + PAGE_SHIFT;}unsigned int domain_clamp_alloc_bitsize(struct domain *d, unsigned int bits){ if ( (d == NULL) || (d->arch.physaddr_bitsize == 0) ) return bits; return min(d->arch.physaddr_bitsize, bits);}#include "compat/mm.c"/* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -