⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 entry.s

📁 xen虚拟机源代码安装包
💻 S
📖 第 1 页 / 共 2 页
字号:
/* * Hypercall and fault low-level handling routines. * * Copyright (c) 2005, K A Fraser */#include <xen/config.h>#include <xen/errno.h>#include <xen/softirq.h>#include <asm/asm_defns.h>#include <asm/apicdef.h>#include <asm/page.h>#include <public/xen.h>#define GET_GUEST_REGS(reg)                     \        movq $~(STACK_SIZE-1),reg;              \        andq %rsp,reg;                          \        orq  $(STACK_SIZE-CPUINFO_sizeof),reg;#define GET_CURRENT(reg)         \        movq $STACK_SIZE-8, reg; \        orq  %rsp, reg;          \        andq $~7,reg;            \        movq (reg),reg;        ALIGN/* %rbx: struct vcpu */switch_to_kernel:        leaq  VCPU_trap_bounce(%rbx),%rdx        /* TB_eip = (32-bit syscall && syscall32_addr) ?         *          syscall32_addr : syscall_addr */        xor   %eax,%eax        cmpw  $FLAT_USER_CS32,UREGS_cs(%rsp)        cmoveq VCPU_syscall32_addr(%rbx),%rax        testq %rax,%rax        cmovzq VCPU_syscall_addr(%rbx),%rax        movq  %rax,TRAPBOUNCE_eip(%rdx)        /* TB_flags = VGCF_syscall_disables_events ? TBF_INTERRUPT : 0 */        btl   $_VGCF_syscall_disables_events,VCPU_guest_context_flags(%rbx)        setc  %cl        leal  (,%rcx,TBF_INTERRUPT),%ecx        movb  %cl,TRAPBOUNCE_flags(%rdx)        call  create_bounce_frame        andl  $~X86_EFLAGS_DF,UREGS_eflags(%rsp)        jmp   test_all_events/* %rbx: struct vcpu, interrupts disabled */restore_all_guest:        ASSERT_INTERRUPTS_DISABLED        RESTORE_ALL        testw $TRAP_syscall,4(%rsp)        jz    iret_exit_to_guest        addq  $8,%rsp        popq  %rcx                    # RIP        popq  %r11                    # CS        cmpw  $FLAT_USER_CS32,%r11        popq  %r11                    # RFLAGS        popq  %rsp                    # RSP        je    1f        sysretq1:      sysretl        ALIGN/* No special register assumptions. */iret_exit_to_guest:        addq  $8,%rsp.Lft0:  iretq.section .fixup,"ax".Lfx0:  sti        SAVE_ALL        movq  UREGS_error_code(%rsp),%rsi        movq  %rsp,%rax        andq  $~0xf,%rsp        pushq $__HYPERVISOR_DS         # SS        pushq %rax                     # RSP        pushfq                         # RFLAGS        pushq $__HYPERVISOR_CS         # CS        leaq  .Ldf0(%rip),%rax        pushq %rax                     # RIP        pushq %rsi                     # error_code/entry_vector        jmp   handle_exception.Ldf0:  GET_CURRENT(%rbx)        jmp   test_all_eventsfailsafe_callback:        GET_CURRENT(%rbx)        leaq  VCPU_trap_bounce(%rbx),%rdx        movq  VCPU_failsafe_addr(%rbx),%rax        movq  %rax,TRAPBOUNCE_eip(%rdx)        movb  $TBF_FAILSAFE,TRAPBOUNCE_flags(%rdx)        bt    $_VGCF_failsafe_disables_events,VCPU_guest_context_flags(%rbx)        jnc   1f        orb   $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)1:      call  create_bounce_frame        jmp   test_all_events.previous.section __pre_ex_table,"a"        .quad .Lft0,.Lfx0.previous.section __ex_table,"a"        .quad .Ldf0,failsafe_callback.previous        ALIGN/* No special register assumptions. */restore_all_xen:        RESTORE_ALL        addq  $8,%rsp        iretq/* * When entering SYSCALL from kernel mode: *  %rax                            = hypercall vector *  %rdi, %rsi, %rdx, %r10, %r8, %9 = hypercall arguments *  %rcx                            = SYSCALL-saved %rip *  NB. We must move %r10 to %rcx for C function-calling ABI. * * When entering SYSCALL from user mode: *  Vector directly to the registered arch.syscall_addr. * * Initial work is done by per-CPU stack trampolines. At this point %rsp * has been initialised to point at the correct Xen stack, and %rsp, %rflags * and %cs have been saved. All other registers are still to be saved onto * the stack, starting with %rip, and an appropriate %ss must be saved into * the space left by the trampoline. */        ALIGNENTRY(syscall_enter)        sti        movl  $FLAT_KERNEL_SS,24(%rsp)        pushq %rcx        pushq $0        movl  $TRAP_syscall,4(%rsp)        movq  24(%rsp),%r11 /* Re-load user RFLAGS into %r11 before SAVE_ALL */        SAVE_ALL        GET_CURRENT(%rbx)        movq  VCPU_domain(%rbx),%rcx        testb $1,DOMAIN_is_32bit_pv(%rcx)        jnz   compat_syscall        testb $TF_kernel_mode,VCPU_thread_flags(%rbx)        jz    switch_to_kernel/*hypercall:*/        movq  %r10,%rcx        cmpq  $NR_hypercalls,%rax        jae   bad_hypercall#ifndef NDEBUG        /* Deliberately corrupt parameter regs not used by this hypercall. */        pushq %rdi; pushq %rsi; pushq %rdx; pushq %rcx; pushq %r8 ; pushq %r9         leaq  hypercall_args_table(%rip),%r10        movq  $6,%rcx        sub   (%r10,%rax,1),%cl        movq  %rsp,%rdi        movl  $0xDEADBEEF,%eax        rep   stosq        popq  %r9 ; popq  %r8 ; popq  %rcx; popq  %rdx; popq  %rsi; popq  %rdi        movq  UREGS_rax(%rsp),%rax        pushq %rax        pushq UREGS_rip+8(%rsp)#define SHADOW_BYTES 16 /* Shadow EIP + shadow hypercall # */#else#define SHADOW_BYTES 0  /* No on-stack shadow state */#endif        cmpb  $0,tb_init_done(%rip)        je    1f        call  trace_hypercall        /* Now restore all the registers that trace_hypercall clobbered */        movq  UREGS_rax+SHADOW_BYTES(%rsp),%rax   /* Hypercall #  */        movq  UREGS_rdi+SHADOW_BYTES(%rsp),%rdi   /* Arg 1        */        movq  UREGS_rsi+SHADOW_BYTES(%rsp),%rsi   /* Arg 2        */        movq  UREGS_rdx+SHADOW_BYTES(%rsp),%rdx   /* Arg 3        */        movq  UREGS_r10+SHADOW_BYTES(%rsp),%rcx   /* Arg 4        */        movq  UREGS_rdi+SHADOW_BYTES(%rsp),%r8    /* Arg 5        */        movq  UREGS_rbp+SHADOW_BYTES(%rsp),%r9    /* Arg 6        */#undef SHADOW_BYTES1:      leaq  hypercall_table(%rip),%r10        PERFC_INCR(PERFC_hypercalls, %rax, %rbx)        callq *(%r10,%rax,8)#ifndef NDEBUG        /* Deliberately corrupt parameter regs used by this hypercall. */        popq  %r10         # Shadow RIP        cmpq  %r10,UREGS_rip+8(%rsp)        popq  %rcx         # Shadow hypercall index        jne   skip_clobber /* If RIP has changed then don't clobber. */        leaq  hypercall_args_table(%rip),%r10        movb  (%r10,%rcx,1),%cl        movl  $0xDEADBEEF,%r10d        cmpb  $1,%cl; jb skip_clobber; movq %r10,UREGS_rdi(%rsp)        cmpb  $2,%cl; jb skip_clobber; movq %r10,UREGS_rsi(%rsp)        cmpb  $3,%cl; jb skip_clobber; movq %r10,UREGS_rdx(%rsp)        cmpb  $4,%cl; jb skip_clobber; movq %r10,UREGS_r10(%rsp)        cmpb  $5,%cl; jb skip_clobber; movq %r10,UREGS_r8(%rsp)        cmpb  $6,%cl; jb skip_clobber; movq %r10,UREGS_r9(%rsp)skip_clobber:#endif        movq  %rax,UREGS_rax(%rsp)       # save the return value/* %rbx: struct vcpu */test_all_events:        cli                             # tests must not race interrupts/*test_softirqs:*/          movl  VCPU_processor(%rbx),%eax        shl   $IRQSTAT_shift,%rax        leaq  irq_stat(%rip),%rcx        testl $~0,(%rcx,%rax,1)        jnz   process_softirqs        testb $1,VCPU_mce_pending(%rbx)        jnz   process_mce        testb $1,VCPU_nmi_pending(%rbx)        jnz   process_nmitest_guest_events:        movq  VCPU_vcpu_info(%rbx),%rax        testb $0xFF,VCPUINFO_upcall_mask(%rax)        jnz   restore_all_guest        testb $0xFF,VCPUINFO_upcall_pending(%rax)        jz    restore_all_guest/*process_guest_events:*/        sti        leaq  VCPU_trap_bounce(%rbx),%rdx        movq  VCPU_event_addr(%rbx),%rax        movq  %rax,TRAPBOUNCE_eip(%rdx)        movb  $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)        call  create_bounce_frame        jmp   test_all_events        ALIGN/* %rbx: struct vcpu */process_softirqs:        sti               call do_softirq        jmp  test_all_events        ALIGN/* %rbx: struct vcpu */process_mce:        cmpw $VCPU_TRAP_MCE,VCPU_trap_priority(%rbx)        jae  test_guest_events        sti        movb $0,VCPU_mce_pending(%rbx)        call set_guest_machinecheck_trapbounce        test %eax,%eax        jz   test_all_events        movw VCPU_trap_priority(%rbx),%dx           # safe priority for the        movw %dx,VCPU_old_trap_priority(%rbx)       # iret hypercall        movw $VCPU_TRAP_MCE,VCPU_trap_priority(%rbx)        jmp  process_trap        ALIGN/* %rbx: struct vcpu */process_nmi:        cmpw $VCPU_TRAP_NMI,VCPU_trap_priority(%rbx)        jae  test_guest_events        sti        movb $0,VCPU_nmi_pending(%rbx)        call set_guest_nmi_trapbounce        test %eax,%eax        jz   test_all_events        movw VCPU_trap_priority(%rbx),%dx           # safe priority for the        movw %dx,VCPU_old_trap_priority(%rbx)       # iret hypercall        movw $VCPU_TRAP_NMI,VCPU_trap_priority(%rbx)        /* FALLTHROUGH */process_trap:        leaq VCPU_trap_bounce(%rbx),%rdx        call create_bounce_frame        jmp  test_all_eventsbad_hypercall:        movq $-ENOSYS,UREGS_rax(%rsp)        jmp  test_all_eventsENTRY(sysenter_entry)        sti        pushq $FLAT_USER_SS        pushq $0        pushfq        .globl sysenter_eflags_savedsysenter_eflags_saved:        pushq $0        pushq $0        pushq $0        movl  $TRAP_syscall,4(%rsp)        SAVE_ALL        GET_CURRENT(%rbx)        cmpb  $0,VCPU_sysenter_disables_events(%rbx)        movq  $0,UREGS_rip(%rsp) /* null rip */        movl  $3,UREGS_cs(%rsp)  /* ring 3 null cs */        movq  VCPU_sysenter_addr(%rbx),%rax        setne %cl        leaq  VCPU_trap_bounce(%rbx),%rdx        testq %rax,%rax        leal  (,%rcx,TBF_INTERRUPT),%ecx        jz    2f1:      movq  VCPU_domain(%rbx),%rdi        movq  %rax,TRAPBOUNCE_eip(%rdx)        movb  %cl,TRAPBOUNCE_flags(%rdx)        testb $1,DOMAIN_is_32bit_pv(%rdi)        jnz   compat_sysenter        call  create_bounce_frame        jmp   test_all_events2:      movl  %eax,TRAPBOUNCE_error_code(%rdx)        movq  VCPU_gp_fault_addr(%rbx),%rax        movb  $(TBF_EXCEPTION|TBF_EXCEPTION_ERRCODE|TBF_INTERRUPT),%cl        movl  $TRAP_gp_fault,UREGS_entry_vector(%rsp)        jmp   1bENTRY(int80_direct_trap)        pushq $0        SAVE_ALL        GET_CURRENT(%rbx)        /* Check that the callback is non-null. */        leaq  VCPU_int80_bounce(%rbx),%rdx        cmpb  $0,TRAPBOUNCE_flags(%rdx)        jz    int80_slow_path        movq  VCPU_domain(%rbx),%rax        testb $1,DOMAIN_is_32bit_pv(%rax)        jnz   compat_int80_direct_trap        call  create_bounce_frame        jmp   test_all_eventsint80_slow_path:        /*          * Setup entry vector and error code as if this was a GPF caused by an         * IDT entry with DPL==0.         */        movl  $((0x80 << 3) | 0x2),UREGS_error_code(%rsp)        movl  $TRAP_gp_fault,UREGS_entry_vector(%rsp)        /* A GPF wouldn't have incremented the instruction pointer. */        subq  $2,UREGS_rip(%rsp)        jmp   handle_exception_saved/* CREATE A BASIC EXCEPTION FRAME ON GUEST OS STACK:                     *//*   { RCX, R11, [DS-GS,] [CR2,] [ERRCODE,] RIP, CS, RFLAGS, RSP, SS }   *//* %rdx: trap_bounce, %rbx: struct vcpu                                  *//* On return only %rbx and %rdx are guaranteed non-clobbered.            */create_bounce_frame:        ASSERT_INTERRUPTS_ENABLED        testb $TF_kernel_mode,VCPU_thread_flags(%rbx)        jnz   1f        /* Push new frame at registered guest-OS stack base. */        pushq %rdx        movq  %rbx,%rdi        call  toggle_guest_mode        popq  %rdx        movq  VCPU_kernel_sp(%rbx),%rsi        jmp   2f1:      /* In kernel context already: push new frame at existing %rsp. */        movq  UREGS_rsp+8(%rsp),%rsi        andb  $0xfc,UREGS_cs+8(%rsp)    # Indicate kernel context to guest.2:      andq  $~0xf,%rsi                # Stack frames are 16-byte aligned.        movq  $HYPERVISOR_VIRT_START,%rax        cmpq  %rax,%rsi        jb    1f                        # In +ve address space? Then okay.        movq  $HYPERVISOR_VIRT_END+60,%rax        cmpq  %rax,%rsi        jb    domain_crash_synchronous  # Above Xen private area? Then okay.1:      movb  TRAPBOUNCE_flags(%rdx),%cl        subq  $40,%rsi        movq  UREGS_ss+8(%rsp),%rax.Lft2:  movq  %rax,32(%rsi)             # SS        movq  UREGS_rsp+8(%rsp),%rax.Lft3:  movq  %rax,24(%rsi)             # RSP        movq  VCPU_vcpu_info(%rbx),%rax        pushq VCPUINFO_upcall_mask(%rax)        testb $TBF_INTERRUPT,%cl        setnz %ch                       # TBF_INTERRUPT -> set upcall mask        orb   %ch,VCPUINFO_upcall_mask(%rax)        popq  %rax        shlq  $32,%rax                  # Bits 32-39: saved_upcall_mask        movw  UREGS_cs+8(%rsp),%ax      # Bits  0-15: CS.Lft4:  movq  %rax,8(%rsi)              # CS / saved_upcall_mask

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -