📄 entry.s
字号:
2: testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp) jz .Lnvm86_3 xorl %eax,%eax /* zero DS-GS, just as a real CPU would */ movl %eax,UREGS_ds+4(%esp) movl %eax,UREGS_es+4(%esp) movl %eax,UREGS_fs+4(%esp) movl %eax,UREGS_gs+4(%esp).Lnvm86_3: /* Rewrite our stack frame and return to ring 1. */ /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */ andl $~(X86_EFLAGS_VM|X86_EFLAGS_RF|\ X86_EFLAGS_NT|X86_EFLAGS_TF),UREGS_eflags+4(%esp) mov %gs,UREGS_ss+4(%esp) movl %esi,UREGS_esp+4(%esp) movzwl TRAPBOUNCE_cs(%edx),%eax /* Null selectors (0-3) are not allowed. */ testl $~3,%eax jz domain_crash_synchronous movl %eax,UREGS_cs+4(%esp) movl TRAPBOUNCE_eip(%edx),%eax movl %eax,UREGS_eip+4(%esp) ret.section __ex_table,"a" .long .Lft6,domain_crash_synchronous , .Lft7,domain_crash_synchronous .long .Lft8,domain_crash_synchronous , .Lft9,domain_crash_synchronous .long .Lft10,domain_crash_synchronous , .Lft11,domain_crash_synchronous .long .Lft12,domain_crash_synchronous , .Lft13,domain_crash_synchronous .long .Lft14,domain_crash_synchronous , .Lft15,domain_crash_synchronous .long .Lft16,domain_crash_synchronous , .Lft17,domain_crash_synchronous .long .Lft18,domain_crash_synchronous , .Lft19,domain_crash_synchronous .long .Lft20,domain_crash_synchronous , .Lft21,domain_crash_synchronous .long .Lft22,domain_crash_synchronous , .Lft23,domain_crash_synchronous .long .Lft24,domain_crash_synchronous , .Lft25,domain_crash_synchronous.previousdomain_crash_synchronous_string: .asciz "domain_crash_sync called from entry.S (%lx)\n"domain_crash_synchronous: pushl $domain_crash_synchronous_string call printk jmp __domain_crash_synchronous ALIGNENTRY(ret_from_intr) GET_CURRENT(%ebx) movl UREGS_eflags(%esp),%eax movb UREGS_cs(%esp),%al testl $(3|X86_EFLAGS_VM),%eax jnz test_all_events jmp restore_all_xenENTRY(divide_error) pushl $TRAP_divide_error<<16 ALIGNhandle_exception: FIXUP_RING0_GUEST_STACK SAVE_ALL(1f,2f) .text 1 /* Exception within Xen: make sure we have valid %ds,%es. */1: mov %ecx,%ds mov %ecx,%es jmp 2f .previous2: testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%esp) jz exception_with_ints_disabled sti # re-enable interrupts1: xorl %eax,%eax movw UREGS_entry_vector(%esp),%ax movl %esp,%edx pushl %edx # push the cpu_user_regs pointer GET_CURRENT(%ebx) PERFC_INCR(PERFC_exceptions, %eax, %ebx) call *exception_table(,%eax,4) addl $4,%esp movl UREGS_eflags(%esp),%eax movb UREGS_cs(%esp),%al testl $(3|X86_EFLAGS_VM),%eax jz restore_all_xen leal VCPU_trap_bounce(%ebx),%edx testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%edx) jz test_all_events call create_bounce_frame movb $0,TRAPBOUNCE_flags(%edx) jmp test_all_eventsexception_with_ints_disabled: movl UREGS_eflags(%esp),%eax movb UREGS_cs(%esp),%al testl $(3|X86_EFLAGS_VM),%eax # interrupts disabled outside Xen? jnz FATAL_exception_with_ints_disabled pushl %esp call search_pre_exception_table addl $4,%esp testl %eax,%eax # no fixup code for faulting EIP? jz 1b movl %eax,UREGS_eip(%esp) movl %esp,%esi subl $4,%esp movl %esp,%edi movl $UREGS_kernel_sizeof/4,%ecx rep; movsl # make room for error_code/entry_vector movl UREGS_error_code(%esp),%eax # error_code/entry_vector movl %eax,UREGS_kernel_sizeof(%esp) jmp restore_all_xen # return to fixup codeFATAL_exception_with_ints_disabled: xorl %esi,%esi movw UREGS_entry_vector(%esp),%si movl %esp,%edx pushl %edx # push the cpu_user_regs pointer pushl %esi # push the trapnr (entry vector) call fatal_trap ud2 ENTRY(coprocessor_error) pushl $TRAP_copro_error<<16 jmp handle_exceptionENTRY(simd_coprocessor_error) pushl $TRAP_simd_error<<16 jmp handle_exceptionENTRY(device_not_available) pushl $TRAP_no_device<<16 jmp handle_exceptionENTRY(debug) pushl $TRAP_debug<<16 jmp handle_exceptionENTRY(int3) pushl $TRAP_int3<<16 jmp handle_exceptionENTRY(overflow) pushl $TRAP_overflow<<16 jmp handle_exceptionENTRY(bounds) pushl $TRAP_bounds<<16 jmp handle_exceptionENTRY(invalid_op) pushl $TRAP_invalid_op<<16 jmp handle_exceptionENTRY(coprocessor_segment_overrun) pushl $TRAP_copro_seg<<16 jmp handle_exceptionENTRY(invalid_TSS) movw $TRAP_invalid_tss,2(%esp) jmp handle_exceptionENTRY(segment_not_present) movw $TRAP_no_segment,2(%esp) jmp handle_exceptionENTRY(stack_segment) movw $TRAP_stack_error,2(%esp) jmp handle_exceptionENTRY(general_protection) movw $TRAP_gp_fault,2(%esp) jmp handle_exceptionENTRY(alignment_check) movw $TRAP_alignment_check,2(%esp) jmp handle_exceptionENTRY(page_fault) movw $TRAP_page_fault,2(%esp) jmp handle_exceptionENTRY(spurious_interrupt_bug) pushl $TRAP_spurious_int<<16 jmp handle_exceptionENTRY(early_page_fault) SAVE_ALL(1f,1f)1: movl %esp,%eax pushl %eax call do_early_page_fault addl $4,%esp jmp restore_all_xenhandle_nmi_mce:#ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL # NMI/MCE entry protocol is incompatible with guest kernel in ring 0. addl $4,%esp iret#else # Save state but do not trash the segment registers! SAVE_ALL(.Lnmi_mce_xen,.Lnmi_mce_common).Lnmi_mce_common: xorl %eax,%eax movw UREGS_entry_vector(%esp),%ax movl %esp,%edx pushl %edx call *exception_table(,%eax,4) addl $4,%esp /* * NB. We may return to Xen context with polluted %ds/%es. But in such * cases we have put guest DS/ES on the guest stack frame, which will * be detected by SAVE_ALL(), or we have rolled back restore_guest. */ jmp ret_from_intr.Lnmi_mce_xen: /* Check the outer (guest) context for %ds/%es state validity. */ GET_GUEST_REGS(%ebx) testl $X86_EFLAGS_VM,%ss:UREGS_eflags(%ebx) mov %ds,%eax mov %es,%edx jnz .Lnmi_mce_vm86 /* We may have interrupted Xen while messing with %ds/%es... */ cmpw %ax,%cx mov %ecx,%ds /* Ensure %ds is valid */ cmove UREGS_ds(%ebx),%eax /* Grab guest DS if it wasn't in %ds */ cmpw %dx,%cx movl %eax,UREGS_ds(%ebx) /* Ensure guest frame contains guest DS */ cmove UREGS_es(%ebx),%edx /* Grab guest ES if it wasn't in %es */ mov %ecx,%es /* Ensure %es is valid */ movl $.Lrestore_sregs_guest,%ecx movl %edx,UREGS_es(%ebx) /* Ensure guest frame contains guest ES */ cmpl %ecx,UREGS_eip(%esp) jbe .Lnmi_mce_common cmpl $.Lrestore_iret_guest,UREGS_eip(%esp) ja .Lnmi_mce_common /* Roll outer context restore_guest back to restoring %ds/%es. */ movl %ecx,UREGS_eip(%esp) jmp .Lnmi_mce_common.Lnmi_mce_vm86: /* vm86 is easy: the CPU saved %ds/%es so we can safely stomp them. */ mov %ecx,%ds mov %ecx,%es jmp .Lnmi_mce_common#endif /* !CONFIG_X86_SUPERVISOR_MODE_KERNEL */ENTRY(nmi) pushl $TRAP_nmi<<16 jmp handle_nmi_mceENTRY(machine_check) pushl $TRAP_machine_check<<16 jmp handle_nmi_mceENTRY(setup_vm86_frame) mov %ecx,%ds mov %ecx,%es # Copies the entire stack frame forwards by 16 bytes. .macro copy_vm86_words count=18 .if \count pushl ((\count-1)*4)(%esp) popl ((\count-1)*4)+16(%esp) copy_vm86_words "(\count-1)" .endif .endm copy_vm86_words addl $16,%esp ret.dataENTRY(exception_table) .long do_divide_error .long do_debug .long do_nmi .long do_int3 .long do_overflow .long do_bounds .long do_invalid_op .long do_device_not_available .long 0 # double fault .long do_coprocessor_segment_overrun .long do_invalid_TSS .long do_segment_not_present .long do_stack_segment .long do_general_protection .long do_page_fault .long do_spurious_interrupt_bug .long do_coprocessor_error .long do_alignment_check .long do_machine_check .long do_simd_coprocessor_errorENTRY(hypercall_table) .long do_set_trap_table /* 0 */ .long do_mmu_update .long do_set_gdt .long do_stack_switch .long do_set_callbacks .long do_fpu_taskswitch /* 5 */ .long do_sched_op_compat .long do_platform_op .long do_set_debugreg .long do_get_debugreg .long do_update_descriptor /* 10 */ .long do_ni_hypercall .long do_memory_op .long do_multicall .long do_update_va_mapping .long do_set_timer_op /* 15 */ .long do_event_channel_op_compat .long do_xen_version .long do_console_io .long do_physdev_op_compat .long do_grant_table_op /* 20 */ .long do_vm_assist .long do_update_va_mapping_otherdomain .long do_iret .long do_vcpu_op .long do_ni_hypercall /* 25 */ .long do_mmuext_op .long do_xsm_op .long do_nmi_op .long do_sched_op .long do_callback_op /* 30 */ .long do_xenoprof_op .long do_event_channel_op .long do_physdev_op .long do_hvm_op .long do_sysctl /* 35 */ .long do_domctl .long do_kexec_op .rept __HYPERVISOR_arch_0-((.-hypercall_table)/4) .long do_ni_hypercall .endr .long do_mca /* 48 */ .rept NR_hypercalls-((.-hypercall_table)/4) .long do_ni_hypercall .endrENTRY(hypercall_args_table) .byte 1 /* do_set_trap_table */ /* 0 */ .byte 4 /* do_mmu_update */ .byte 2 /* do_set_gdt */ .byte 2 /* do_stack_switch */ .byte 4 /* do_set_callbacks */ .byte 1 /* do_fpu_taskswitch */ /* 5 */ .byte 2 /* do_sched_op_compat */ .byte 1 /* do_platform_op */ .byte 2 /* do_set_debugreg */ .byte 1 /* do_get_debugreg */ .byte 4 /* do_update_descriptor */ /* 10 */ .byte 0 /* do_ni_hypercall */ .byte 2 /* do_memory_op */ .byte 2 /* do_multicall */ .byte 4 /* do_update_va_mapping */ .byte 2 /* do_set_timer_op */ /* 15 */ .byte 1 /* do_event_channel_op_compat */ .byte 2 /* do_xen_version */ .byte 3 /* do_console_io */ .byte 1 /* do_physdev_op_compat */ .byte 3 /* do_grant_table_op */ /* 20 */ .byte 2 /* do_vm_assist */ .byte 5 /* do_update_va_mapping_otherdomain */ .byte 0 /* do_iret */ .byte 3 /* do_vcpu_op */ .byte 0 /* do_ni_hypercall */ /* 25 */ .byte 4 /* do_mmuext_op */ .byte 1 /* do_xsm_op */ .byte 2 /* do_nmi_op */ .byte 2 /* do_sched_op */ .byte 2 /* do_callback_op */ /* 30 */ .byte 2 /* do_xenoprof_op */ .byte 2 /* do_event_channel_op */ .byte 2 /* do_physdev_op */ .byte 2 /* do_hvm_op */ .byte 1 /* do_sysctl */ /* 35 */ .byte 1 /* do_domctl */ .byte 2 /* do_kexec_op */ .rept __HYPERVISOR_arch_0-(.-hypercall_args_table) .byte 0 /* do_ni_hypercall */ .endr .byte 1 /* do_mca */ /* 48 */ .rept NR_hypercalls-(.-hypercall_args_table) .byte 0 /* do_ni_hypercall */ .endr
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -