📄 entry.s
字号:
shrq $32,%rax testb $0xFF,%al # Bits 0-7: saved_upcall_mask setz %ch # %ch == !saved_upcall_mask movl UREGS_eflags+8(%rsp),%eax andl $~X86_EFLAGS_IF,%eax addb %ch,%ch # Bit 9 (EFLAGS.IF) orb %ch,%ah # Fold EFLAGS.IF into %eax.Lft5: movq %rax,16(%rsi) # RFLAGS movq UREGS_rip+8(%rsp),%rax.Lft6: movq %rax,(%rsi) # RIP testb $TBF_EXCEPTION_ERRCODE,%cl jz 1f subq $8,%rsi movl TRAPBOUNCE_error_code(%rdx),%eax.Lft7: movq %rax,(%rsi) # ERROR CODE1: testb $TBF_FAILSAFE,%cl jz 2f subq $32,%rsi movl %gs,%eax.Lft8: movq %rax,24(%rsi) # GS movl %fs,%eax.Lft9: movq %rax,16(%rsi) # FS movl %es,%eax.Lft10: movq %rax,8(%rsi) # ES movl %ds,%eax.Lft11: movq %rax,(%rsi) # DS2: subq $16,%rsi movq UREGS_r11+8(%rsp),%rax.Lft12: movq %rax,8(%rsi) # R11 movq UREGS_rcx+8(%rsp),%rax.Lft13: movq %rax,(%rsi) # RCX /* Rewrite our stack frame and return to guest-OS mode. */ /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */ /* Also clear AC: alignment checks shouldn't trigger in kernel mode. */ movl $TRAP_syscall,UREGS_entry_vector+8(%rsp) andl $~(X86_EFLAGS_AC|X86_EFLAGS_VM|X86_EFLAGS_RF|\ X86_EFLAGS_NT|X86_EFLAGS_TF),UREGS_eflags+8(%rsp) movq $FLAT_KERNEL_SS,UREGS_ss+8(%rsp) movq %rsi,UREGS_rsp+8(%rsp) movq $FLAT_KERNEL_CS,UREGS_cs+8(%rsp) movq TRAPBOUNCE_eip(%rdx),%rax testq %rax,%rax jz domain_crash_synchronous movq %rax,UREGS_rip+8(%rsp) ret.section __ex_table,"a" .quad .Lft2,domain_crash_synchronous , .Lft3,domain_crash_synchronous .quad .Lft4,domain_crash_synchronous , .Lft5,domain_crash_synchronous .quad .Lft6,domain_crash_synchronous , .Lft7,domain_crash_synchronous .quad .Lft8,domain_crash_synchronous , .Lft9,domain_crash_synchronous .quad .Lft10,domain_crash_synchronous , .Lft11,domain_crash_synchronous .quad .Lft12,domain_crash_synchronous , .Lft13,domain_crash_synchronous.previousdomain_crash_synchronous_string: .asciz "domain_crash_sync called from entry.S\n"ENTRY(domain_crash_synchronous) # Get out of the guest-save area of the stack. GET_GUEST_REGS(%rax) movq %rax,%rsp # create_bounce_frame() temporarily clobbers CS.RPL. Fix up. movq CPUINFO_current_vcpu(%rax),%rax movq VCPU_domain(%rax),%rax testb $1,DOMAIN_is_32bit_pv(%rax) setz %al leal (%rax,%rax,2),%eax orb %al,UREGS_cs(%rsp) # printk(domain_crash_synchronous_string) leaq domain_crash_synchronous_string(%rip),%rdi xorl %eax,%eax call printk jmp __domain_crash_synchronous ALIGN/* No special register assumptions. */ENTRY(ret_from_intr) GET_CURRENT(%rbx) testb $3,UREGS_cs(%rsp) jz restore_all_xen movq VCPU_domain(%rbx),%rax testb $1,DOMAIN_is_32bit_pv(%rax) jz test_all_events jmp compat_test_all_events ALIGN/* No special register assumptions. */ENTRY(handle_exception) SAVE_ALLhandle_exception_saved: testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%rsp) jz exception_with_ints_disabled sti1: movq %rsp,%rdi movl UREGS_entry_vector(%rsp),%eax leaq exception_table(%rip),%rdx GET_CURRENT(%rbx) PERFC_INCR(PERFC_exceptions, %rax, %rbx) callq *(%rdx,%rax,8) testb $3,UREGS_cs(%rsp) jz restore_all_xen leaq VCPU_trap_bounce(%rbx),%rdx movq VCPU_domain(%rbx),%rax testb $1,DOMAIN_is_32bit_pv(%rax) jnz compat_post_handle_exception testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%rdx) jz test_all_events call create_bounce_frame movb $0,TRAPBOUNCE_flags(%rdx) jmp test_all_events/* No special register assumptions. */exception_with_ints_disabled: testb $3,UREGS_cs(%rsp) # interrupts disabled outside Xen? jnz FATAL_exception_with_ints_disabled movq %rsp,%rdi call search_pre_exception_table testq %rax,%rax # no fixup code for faulting EIP? jz 1b movq %rax,UREGS_rip(%rsp) subq $8,UREGS_rsp(%rsp) # add ec/ev to previous stack frame testb $15,UREGS_rsp(%rsp) # return %rsp is now aligned? jz 1f # then there is a pad quadword already movq %rsp,%rsi subq $8,%rsp movq %rsp,%rdi movq $UREGS_kernel_sizeof/8,%rcx rep; movsq # make room for ec/ev1: movq UREGS_error_code(%rsp),%rax # ec/ev movq %rax,UREGS_kernel_sizeof(%rsp) jmp restore_all_xen # return to fixup code/* No special register assumptions. */FATAL_exception_with_ints_disabled: movl UREGS_entry_vector(%rsp),%edi movq %rsp,%rsi call fatal_trap ud2ENTRY(divide_error) pushq $0 movl $TRAP_divide_error,4(%rsp) jmp handle_exceptionENTRY(coprocessor_error) pushq $0 movl $TRAP_copro_error,4(%rsp) jmp handle_exceptionENTRY(simd_coprocessor_error) pushq $0 movl $TRAP_simd_error,4(%rsp) jmp handle_exceptionENTRY(device_not_available) pushq $0 movl $TRAP_no_device,4(%rsp) jmp handle_exceptionENTRY(debug) pushq $0 movl $TRAP_debug,4(%rsp) jmp handle_exceptionENTRY(int3) pushq $0 movl $TRAP_int3,4(%rsp) jmp handle_exceptionENTRY(overflow) pushq $0 movl $TRAP_overflow,4(%rsp) jmp handle_exceptionENTRY(bounds) pushq $0 movl $TRAP_bounds,4(%rsp) jmp handle_exceptionENTRY(invalid_op) pushq $0 movl $TRAP_invalid_op,4(%rsp) jmp handle_exceptionENTRY(coprocessor_segment_overrun) pushq $0 movl $TRAP_copro_seg,4(%rsp) jmp handle_exceptionENTRY(invalid_TSS) movl $TRAP_invalid_tss,4(%rsp) jmp handle_exceptionENTRY(segment_not_present) movl $TRAP_no_segment,4(%rsp) jmp handle_exceptionENTRY(stack_segment) movl $TRAP_stack_error,4(%rsp) jmp handle_exceptionENTRY(general_protection) movl $TRAP_gp_fault,4(%rsp) jmp handle_exceptionENTRY(alignment_check) movl $TRAP_alignment_check,4(%rsp) jmp handle_exceptionENTRY(page_fault) movl $TRAP_page_fault,4(%rsp) jmp handle_exceptionENTRY(spurious_interrupt_bug) pushq $0 movl $TRAP_spurious_int,4(%rsp) jmp handle_exceptionENTRY(double_fault) SAVE_ALL movq %rsp,%rdi call do_double_fault ud2ENTRY(early_page_fault) SAVE_ALL movq %rsp,%rdi call do_early_page_fault jmp restore_all_xenhandle_ist_exception: SAVE_ALL testb $3,UREGS_cs(%rsp) jz 1f /* Interrupted guest context. Copy the context to stack bottom. */ GET_GUEST_REGS(%rdi) movq %rsp,%rsi movl $UREGS_kernel_sizeof/8,%ecx movq %rdi,%rsp rep movsq1: movq %rsp,%rdi movl UREGS_entry_vector(%rsp),%eax leaq exception_table(%rip),%rdx callq *(%rdx,%rax,8) jmp ret_from_intrENTRY(nmi) pushq $0 movl $TRAP_nmi,4(%rsp) jmp handle_ist_exceptionENTRY(machine_check) pushq $0 movl $TRAP_machine_check,4(%rsp) jmp handle_ist_exception.dataENTRY(exception_table) .quad do_divide_error .quad do_debug .quad do_nmi .quad do_int3 .quad do_overflow .quad do_bounds .quad do_invalid_op .quad do_device_not_available .quad 0 # double_fault .quad do_coprocessor_segment_overrun .quad do_invalid_TSS .quad do_segment_not_present .quad do_stack_segment .quad do_general_protection .quad do_page_fault .quad do_spurious_interrupt_bug .quad do_coprocessor_error .quad do_alignment_check .quad do_machine_check .quad do_simd_coprocessor_errorENTRY(hypercall_table) .quad do_set_trap_table /* 0 */ .quad do_mmu_update .quad do_set_gdt .quad do_stack_switch .quad do_set_callbacks .quad do_fpu_taskswitch /* 5 */ .quad do_sched_op_compat .quad do_platform_op .quad do_set_debugreg .quad do_get_debugreg .quad do_update_descriptor /* 10 */ .quad do_ni_hypercall .quad do_memory_op .quad do_multicall .quad do_update_va_mapping .quad do_set_timer_op /* 15 */ .quad do_event_channel_op_compat .quad do_xen_version .quad do_console_io .quad do_physdev_op_compat .quad do_grant_table_op /* 20 */ .quad do_vm_assist .quad do_update_va_mapping_otherdomain .quad do_iret .quad do_vcpu_op .quad do_set_segment_base /* 25 */ .quad do_mmuext_op .quad do_xsm_op .quad do_nmi_op .quad do_sched_op .quad do_callback_op /* 30 */ .quad do_xenoprof_op .quad do_event_channel_op .quad do_physdev_op .quad do_hvm_op .quad do_sysctl /* 35 */ .quad do_domctl .quad do_kexec_op .rept __HYPERVISOR_arch_0-((.-hypercall_table)/8) .quad do_ni_hypercall .endr .quad do_mca /* 48 */ .rept NR_hypercalls-((.-hypercall_table)/8) .quad do_ni_hypercall .endrENTRY(hypercall_args_table) .byte 1 /* do_set_trap_table */ /* 0 */ .byte 4 /* do_mmu_update */ .byte 2 /* do_set_gdt */ .byte 2 /* do_stack_switch */ .byte 3 /* do_set_callbacks */ .byte 1 /* do_fpu_taskswitch */ /* 5 */ .byte 2 /* do_sched_op_compat */ .byte 1 /* do_platform_op */ .byte 2 /* do_set_debugreg */ .byte 1 /* do_get_debugreg */ .byte 2 /* do_update_descriptor */ /* 10 */ .byte 0 /* do_ni_hypercall */ .byte 2 /* do_memory_op */ .byte 2 /* do_multicall */ .byte 3 /* do_update_va_mapping */ .byte 1 /* do_set_timer_op */ /* 15 */ .byte 1 /* do_event_channel_op_compat */ .byte 2 /* do_xen_version */ .byte 3 /* do_console_io */ .byte 1 /* do_physdev_op_compat */ .byte 3 /* do_grant_table_op */ /* 20 */ .byte 2 /* do_vm_assist */ .byte 4 /* do_update_va_mapping_otherdomain */ .byte 0 /* do_iret */ .byte 3 /* do_vcpu_op */ .byte 2 /* do_set_segment_base */ /* 25 */ .byte 4 /* do_mmuext_op */ .byte 1 /* do_xsm_op */ .byte 2 /* do_nmi_op */ .byte 2 /* do_sched_op */ .byte 2 /* do_callback_op */ /* 30 */ .byte 2 /* do_xenoprof_op */ .byte 2 /* do_event_channel_op */ .byte 2 /* do_physdev_op */ .byte 2 /* do_hvm_op */ .byte 1 /* do_sysctl */ /* 35 */ .byte 1 /* do_domctl */ .byte 2 /* do_kexec */ .byte 1 /* do_xsm_op */ .rept __HYPERVISOR_arch_0-(.-hypercall_args_table) .byte 0 /* do_ni_hypercall */ .endr .byte 1 /* do_mca */ /* 48 */ .rept NR_hypercalls-(.-hypercall_args_table) .byte 0 /* do_ni_hypercall */ .endr
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -