⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 entry.s

📁 xen虚拟机源代码安装包
💻 S
📖 第 1 页 / 共 2 页
字号:
/* * Hypercall and fault low-level handling routines. * * Copyright (c) 2002-2004, K A Fraser * Copyright (c) 1991, 1992 Linus Torvalds *  * Calling back to a guest OS: * =========================== *  * First, we require that all callbacks (either via a supplied * interrupt-descriptor-table, or via the special event or failsafe callbacks * in the shared-info-structure) are to ring 1. This just makes life easier, * in that it means we don't have to do messy GDT/LDT lookups to find * out which the privilege-level of the return code-selector. That code * would just be a hassle to write, and would need to account for running * off the end of the GDT/LDT, for example. For all callbacks we check * that the provided return CS is not == __HYPERVISOR_{CS,DS}. Apart from that  * we're safe as don't allow a guest OS to install ring-0 privileges into the * GDT/LDT. It's up to the guest OS to ensure all returns via the IDT are to * ring 1. If not, we load incorrect SS/ESP values from the TSS (for ring 1 * rather than the correct ring) and bad things are bound to ensue -- IRET is * likely to fault, and we may end up killing the domain (no harm can * come to Xen, though). *       * When doing a callback, we check if the return CS is in ring 0. If so, * callback is delayed until next return to ring != 0. * If return CS is in ring 1, then we create a callback frame * starting at return SS/ESP. The base of the frame does an intra-privilege * interrupt-return. * If return CS is in ring > 1, we create a callback frame starting * at SS/ESP taken from appropriate section of the current TSS. The base * of the frame does an inter-privilege interrupt-return. *  * Note that the "failsafe callback" uses a special stackframe: * { return_DS, return_ES, return_FS, return_GS, return_EIP, *   return_CS, return_EFLAGS[, return_ESP, return_SS] } * That is, original values for DS/ES/FS/GS are placed on stack rather than * in DS/ES/FS/GS themselves. Why? It saves us loading them, only to have them * saved/restored in guest OS. Furthermore, if we load them we may cause * a fault if they are invalid, which is a hassle to deal with. We avoid * that problem if we don't load them :-) This property allows us to use * the failsafe callback as a fallback: if we ever fault on loading DS/ES/FS/GS * on return to ring != 0, we can simply package it up as a return via * the failsafe callback, and let the guest OS sort it out (perhaps by * killing an application process). Note that we also do this for any * faulting IRET -- just let the guest OS handle it via the event * callback. * * We terminate a domain in the following cases: *  - creating a callback stack frame (due to bad ring-1 stack). *  - faulting IRET on entry to failsafe callback handler. * So, each domain must keep its ring-1 %ss/%esp and failsafe callback * handler in good order (absolutely no faults allowed!). */#include <xen/config.h>#include <xen/errno.h>#include <xen/softirq.h>#include <asm/asm_defns.h>#include <asm/apicdef.h>#include <asm/page.h>#include <public/xen.h>#define GET_GUEST_REGS(reg)                     \        movl $~(STACK_SIZE-1),reg;              \        andl %esp,reg;                          \        orl  $(STACK_SIZE-CPUINFO_sizeof),reg;#define GET_CURRENT(reg)         \        movl $STACK_SIZE-4, reg; \        orl  %esp, reg;          \        andl $~3,reg;            \        movl (reg),reg;        ALIGNrestore_all_guest:        ASSERT_INTERRUPTS_DISABLED        testl $X86_EFLAGS_VM,UREGS_eflags(%esp)        popl  %ebx        popl  %ecx        popl  %edx        popl  %esi        popl  %edi        popl  %ebp        popl  %eax        leal  4(%esp),%esp        jnz   .Lrestore_iret_guest#ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL        testb $2,UREGS_cs-UREGS_eip(%esp)        jnz   .Lrestore_sregs_guest        call  restore_ring0_guest        jmp   .Lrestore_iret_guest#endif.Lrestore_sregs_guest:.Lft1:  mov  UREGS_ds-UREGS_eip(%esp),%ds.Lft2:  mov  UREGS_es-UREGS_eip(%esp),%es.Lft3:  mov  UREGS_fs-UREGS_eip(%esp),%fs.Lft4:  mov  UREGS_gs-UREGS_eip(%esp),%gs.Lrestore_iret_guest:.Lft5:  iret.section .fixup,"ax".Lfx1:  sti        SAVE_ALL_GPRS        mov   UREGS_error_code(%esp),%esi        pushfl                         # EFLAGS        movl  $__HYPERVISOR_CS,%eax        pushl %eax                     # CS        movl  $.Ldf1,%eax        pushl %eax                     # EIP        pushl %esi                     # error_code/entry_vector        jmp   handle_exception.Ldf1:  GET_CURRENT(%ebx)        jmp   test_all_eventsfailsafe_callback:        GET_CURRENT(%ebx)        leal  VCPU_trap_bounce(%ebx),%edx        movl  VCPU_failsafe_addr(%ebx),%eax        movl  %eax,TRAPBOUNCE_eip(%edx)        movl  VCPU_failsafe_sel(%ebx),%eax        movw  %ax,TRAPBOUNCE_cs(%edx)        movb  $TBF_FAILSAFE,TRAPBOUNCE_flags(%edx)        bt    $_VGCF_failsafe_disables_events,VCPU_guest_context_flags(%ebx)        jnc   1f        orb   $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)1:      call  create_bounce_frame        xorl  %eax,%eax        movl  %eax,UREGS_ds(%esp)        movl  %eax,UREGS_es(%esp)        movl  %eax,UREGS_fs(%esp)        movl  %eax,UREGS_gs(%esp)        jmp   test_all_events.previous.section __pre_ex_table,"a"        .long .Lft1,.Lfx1        .long .Lft2,.Lfx1        .long .Lft3,.Lfx1        .long .Lft4,.Lfx1        .long .Lft5,.Lfx1.previous.section __ex_table,"a"        .long .Ldf1,failsafe_callback.previous        ALIGNrestore_all_xen:        popl %ebx        popl %ecx        popl %edx        popl %esi        popl %edi        popl %ebp        popl %eax        addl $4,%esp        iret        ALIGNENTRY(hypercall)        subl $4,%esp        FIXUP_RING0_GUEST_STACK        SAVE_ALL(1f,1f)1:      sti        GET_CURRENT(%ebx)        cmpl  $NR_hypercalls,%eax        jae   bad_hypercall        PERFC_INCR(PERFC_hypercalls, %eax, %ebx)#ifndef NDEBUG        /* Create shadow parameters and corrupt those not used by this call. */        pushl %eax        pushl UREGS_eip+4(%esp)        pushl 28(%esp) # EBP        pushl 28(%esp) # EDI        pushl 28(%esp) # ESI        pushl 28(%esp) # EDX        pushl 28(%esp) # ECX        pushl 28(%esp) # EBX        movzb hypercall_args_table(,%eax,1),%ecx        leal  (%esp,%ecx,4),%edi        subl  $6,%ecx        negl  %ecx        movl  %eax,%esi        movl  $0xDEADBEEF,%eax        rep   stosl        movl  %esi,%eax#define SHADOW_BYTES 32 /* 6 shadow parameters + EIP + hypercall # */#else        /*          * We need shadow parameters even on non-debug builds. We depend on the         * original versions not being clobbered (needed to create a hypercall         * continuation). But that isn't guaranteed by the function-call ABI.         */         pushl 20(%esp) # EBP        pushl 20(%esp) # EDI        pushl 20(%esp) # ESI        pushl 20(%esp) # EDX        pushl 20(%esp) # ECX        pushl 20(%esp) # EBX#define SHADOW_BYTES 24 /* 6 shadow parameters */#endif        cmpb  $0,tb_init_done        je    1f        call  trace_hypercall        /* Now restore all the registers that trace_hypercall clobbered */        movl  UREGS_eax+SHADOW_BYTES(%esp),%eax /* Hypercall # */#undef SHADOW_BYTES1:      call *hypercall_table(,%eax,4)        addl  $24,%esp     # Discard the shadow parameters#ifndef NDEBUG        /* Deliberately corrupt real parameter regs used by this hypercall. */        popl  %ecx         # Shadow EIP        cmpl  %ecx,UREGS_eip+4(%esp)        popl  %ecx         # Shadow hypercall index        jne   skip_clobber # If EIP has changed then don't clobber        movzb hypercall_args_table(,%ecx,1),%ecx        movl  %esp,%edi        movl  %eax,%esi        movl  $0xDEADBEEF,%eax        rep   stosl        movl  %esi,%eaxskip_clobber:#endif        movl %eax,UREGS_eax(%esp)       # save the return valuetest_all_events:        xorl %ecx,%ecx        notl %ecx        cli                             # tests must not race interrupts/*test_softirqs:*/          movl VCPU_processor(%ebx),%eax        shl  $IRQSTAT_shift,%eax        test %ecx,irq_stat(%eax,1)        jnz  process_softirqs        testb $1,VCPU_mce_pending(%ebx)        jnz  process_mce        testb $1,VCPU_nmi_pending(%ebx)        jnz  process_nmitest_guest_events:        movl VCPU_vcpu_info(%ebx),%eax        testb $0xFF,VCPUINFO_upcall_mask(%eax)        jnz  restore_all_guest        testb $0xFF,VCPUINFO_upcall_pending(%eax)        jz   restore_all_guest/*process_guest_events:*/        sti        leal VCPU_trap_bounce(%ebx),%edx        movl VCPU_event_addr(%ebx),%eax        movl %eax,TRAPBOUNCE_eip(%edx)        movl VCPU_event_sel(%ebx),%eax        movw %ax,TRAPBOUNCE_cs(%edx)        movb $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)        call create_bounce_frame        jmp  test_all_events        ALIGNprocess_softirqs:        sti               call do_softirq        jmp  test_all_events        ALIGN/* %ebx: struct vcpu */process_mce:        cmpw $VCPU_TRAP_MCE,VCPU_trap_priority(%ebx)        jae  test_guest_events        sti        movb $0,VCPU_mce_pending(%ebx)        call set_guest_machinecheck_trapbounce        test %eax,%eax        jz   test_all_events        movw VCPU_trap_priority(%ebx),%dx           # safe priority for the        movw %dx,VCPU_old_trap_priority(%ebx)       # iret hypercall        movw $VCPU_TRAP_MCE,VCPU_trap_priority(%ebx)        jmp process_trap        ALIGN/* %ebx: struct vcpu */process_nmi:        cmpw $VCPU_TRAP_NMI,VCPU_trap_priority(%ebx)        jae  test_guest_events        sti        movb $0,VCPU_nmi_pending(%ebx)        call set_guest_nmi_trapbounce        test %eax,%eax        jz   test_all_events        movw VCPU_trap_priority(%ebx),%dx           # safe priority for the        movw %dx,VCPU_old_trap_priority(%ebx)       # iret hypercall        movw $VCPU_TRAP_NMI,VCPU_trap_priority(%ebx)        /* FALLTHROUGH */process_trap:        leal VCPU_trap_bounce(%ebx),%edx        call create_bounce_frame        jmp  test_all_eventsbad_hypercall:        movl $-ENOSYS,UREGS_eax(%esp)        jmp  test_all_events/* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK:            *//*   {EIP, CS, EFLAGS, [ESP, SS]}                                        *//* %edx == trap_bounce, %ebx == struct vcpu                       *//* %eax,%ecx are clobbered. %gs:%esi contain new UREGS_ss/UREGS_esp. */create_bounce_frame:        ASSERT_INTERRUPTS_ENABLED        movl UREGS_eflags+4(%esp),%ecx        movb UREGS_cs+4(%esp),%cl        testl $(2|X86_EFLAGS_VM),%ecx        jz   ring1 /* jump if returning to an existing ring-1 activation */        movl VCPU_kernel_sp(%ebx),%esi.Lft6:  mov  VCPU_kernel_ss(%ebx),%gs        testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)        jz   .Lnvm86_1        subl $16,%esi       /* push ES/DS/FS/GS (VM86 stack frame) */        movl UREGS_es+4(%esp),%eax.Lft7:  movl %eax,%gs:(%esi)        movl UREGS_ds+4(%esp),%eax.Lft8:  movl %eax,%gs:4(%esi)        movl UREGS_fs+4(%esp),%eax.Lft9:  movl %eax,%gs:8(%esi)        movl UREGS_gs+4(%esp),%eax.Lft10: movl %eax,%gs:12(%esi).Lnvm86_1:        subl $8,%esi        /* push SS/ESP (inter-priv iret) */        movl UREGS_esp+4(%esp),%eax.Lft11: movl %eax,%gs:(%esi)        movl UREGS_ss+4(%esp),%eax.Lft12: movl %eax,%gs:4(%esi)        jmp 1fring1:  /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */        movl UREGS_esp+4(%esp),%esi.Lft13: mov  UREGS_ss+4(%esp),%gs1:      /* Construct a stack frame: EFLAGS, CS/EIP */        movb TRAPBOUNCE_flags(%edx),%cl        subl $12,%esi        movl UREGS_eip+4(%esp),%eax.Lft14: movl %eax,%gs:(%esi)        movl VCPU_vcpu_info(%ebx),%eax        pushl VCPUINFO_upcall_mask(%eax)        testb $TBF_INTERRUPT,%cl        setnz %ch                        # TBF_INTERRUPT -> set upcall mask        orb  %ch,VCPUINFO_upcall_mask(%eax)        popl %eax        shll $16,%eax                    # Bits 16-23: saved_upcall_mask        movw UREGS_cs+4(%esp),%ax        # Bits  0-15: CS#ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL        testw $2,%ax        jnz  .Lft15        and  $~3,%ax                     # RPL 1 -> RPL 0#endif.Lft15: movl %eax,%gs:4(%esi)        test $0x00FF0000,%eax            # Bits 16-23: saved_upcall_mask        setz %ch                         # %ch == !saved_upcall_mask        movl UREGS_eflags+4(%esp),%eax        andl $~X86_EFLAGS_IF,%eax        shlb $1,%ch                      # Bit 9 (EFLAGS.IF)        orb  %ch,%ah                     # Fold EFLAGS.IF into %eax.Lft16: movl %eax,%gs:8(%esi)        test $TBF_EXCEPTION_ERRCODE,%cl        jz   1f        subl $4,%esi                    # push error_code onto guest frame        movl TRAPBOUNCE_error_code(%edx),%eax.Lft17: movl %eax,%gs:(%esi)1:      testb $TBF_FAILSAFE,%cl        jz   2f        subl $16,%esi                # add DS/ES/FS/GS to failsafe stack frame        testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)        jz   .Lnvm86_2        xorl %eax,%eax               # VM86: we write zero selector values.Lft18: movl %eax,%gs:(%esi).Lft19: movl %eax,%gs:4(%esi).Lft20: movl %eax,%gs:8(%esi).Lft21: movl %eax,%gs:12(%esi)        jmp  2f.Lnvm86_2:        movl UREGS_ds+4(%esp),%eax   # non-VM86: write real selector values.Lft22: movl %eax,%gs:(%esi)        movl UREGS_es+4(%esp),%eax.Lft23: movl %eax,%gs:4(%esi)        movl UREGS_fs+4(%esp),%eax.Lft24: movl %eax,%gs:8(%esi)        movl UREGS_gs+4(%esp),%eax.Lft25: movl %eax,%gs:12(%esi)

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -