⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 entry_32.s

📁 linux 内核源代码
💻 S
📖 第 1 页 / 共 2 页
字号:
	/* since we are on a wrong stack, we cant make it a C code :( */ \	PER_CPU(gdt_page, %ebx); \	GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah); \	addl %esp, %eax; \	pushl $__KERNEL_DS; \	CFI_ADJUST_CFA_OFFSET 4; \	pushl %eax; \	CFI_ADJUST_CFA_OFFSET 4; \	lss (%esp), %esp; \	CFI_ADJUST_CFA_OFFSET -8;#define UNWIND_ESPFIX_STACK \	movl %ss, %eax; \	/* see if on espfix stack */ \	cmpw $__ESPFIX_SS, %ax; \	jne 27f; \	movl $__KERNEL_DS, %eax; \	movl %eax, %ds; \	movl %eax, %es; \	/* switch to normal stack */ \	FIXUP_ESPFIX_STACK; \27:;/* * Build the entry stubs and pointer table with * some assembler magic. */.dataENTRY(interrupt).textENTRY(irq_entries_start)	RING0_INT_FRAMEvector=0.rept NR_IRQS	ALIGN .if vector	CFI_ADJUST_CFA_OFFSET -4 .endif1:	pushl $~(vector)	CFI_ADJUST_CFA_OFFSET 4	jmp common_interrupt .previous	.long 1b .textvector=vector+1.endrEND(irq_entries_start).previousEND(interrupt).previous/* * the CPU automatically disables interrupts when executing an IRQ vector, * so IRQ-flags tracing has to follow that: */	ALIGNcommon_interrupt:	SAVE_ALL	TRACE_IRQS_OFF	movl %esp,%eax	call do_IRQ	jmp ret_from_intrENDPROC(common_interrupt)	CFI_ENDPROC#define BUILD_INTERRUPT(name, nr)	\ENTRY(name)				\	RING0_INT_FRAME;		\	pushl $~(nr);			\	CFI_ADJUST_CFA_OFFSET 4;	\	SAVE_ALL;			\	TRACE_IRQS_OFF			\	movl %esp,%eax;			\	call smp_##name;		\	jmp ret_from_intr;		\	CFI_ENDPROC;			\ENDPROC(name)/* The include is where all of the SMP etc. interrupts come from */#include "entry_arch.h"KPROBE_ENTRY(page_fault)	RING0_EC_FRAME	pushl $do_page_fault	CFI_ADJUST_CFA_OFFSET 4	ALIGNerror_code:	/* the function address is in %fs's slot on the stack */	pushl %es	CFI_ADJUST_CFA_OFFSET 4	/*CFI_REL_OFFSET es, 0*/	pushl %ds	CFI_ADJUST_CFA_OFFSET 4	/*CFI_REL_OFFSET ds, 0*/	pushl %eax	CFI_ADJUST_CFA_OFFSET 4	CFI_REL_OFFSET eax, 0	pushl %ebp	CFI_ADJUST_CFA_OFFSET 4	CFI_REL_OFFSET ebp, 0	pushl %edi	CFI_ADJUST_CFA_OFFSET 4	CFI_REL_OFFSET edi, 0	pushl %esi	CFI_ADJUST_CFA_OFFSET 4	CFI_REL_OFFSET esi, 0	pushl %edx	CFI_ADJUST_CFA_OFFSET 4	CFI_REL_OFFSET edx, 0	pushl %ecx	CFI_ADJUST_CFA_OFFSET 4	CFI_REL_OFFSET ecx, 0	pushl %ebx	CFI_ADJUST_CFA_OFFSET 4	CFI_REL_OFFSET ebx, 0	cld	pushl %fs	CFI_ADJUST_CFA_OFFSET 4	/*CFI_REL_OFFSET fs, 0*/	movl $(__KERNEL_PERCPU), %ecx	movl %ecx, %fs	UNWIND_ESPFIX_STACK	popl %ecx	CFI_ADJUST_CFA_OFFSET -4	/*CFI_REGISTER es, ecx*/	movl PT_FS(%esp), %edi		# get the function address	movl PT_ORIG_EAX(%esp), %edx	# get the error code	movl $-1, PT_ORIG_EAX(%esp)	# no syscall to restart	mov  %ecx, PT_FS(%esp)	/*CFI_REL_OFFSET fs, ES*/	movl $(__USER_DS), %ecx	movl %ecx, %ds	movl %ecx, %es	movl %esp,%eax			# pt_regs pointer	call *%edi	jmp ret_from_exception	CFI_ENDPROCKPROBE_END(page_fault)ENTRY(coprocessor_error)	RING0_INT_FRAME	pushl $0	CFI_ADJUST_CFA_OFFSET 4	pushl $do_coprocessor_error	CFI_ADJUST_CFA_OFFSET 4	jmp error_code	CFI_ENDPROCEND(coprocessor_error)ENTRY(simd_coprocessor_error)	RING0_INT_FRAME	pushl $0	CFI_ADJUST_CFA_OFFSET 4	pushl $do_simd_coprocessor_error	CFI_ADJUST_CFA_OFFSET 4	jmp error_code	CFI_ENDPROCEND(simd_coprocessor_error)ENTRY(device_not_available)	RING0_INT_FRAME	pushl $-1			# mark this as an int	CFI_ADJUST_CFA_OFFSET 4	SAVE_ALL	GET_CR0_INTO_EAX	testl $0x4, %eax		# EM (math emulation bit)	jne device_not_available_emulate	preempt_stop(CLBR_ANY)	call math_state_restore	jmp ret_from_exceptiondevice_not_available_emulate:	pushl $0			# temporary storage for ORIG_EIP	CFI_ADJUST_CFA_OFFSET 4	call math_emulate	addl $4, %esp	CFI_ADJUST_CFA_OFFSET -4	jmp ret_from_exception	CFI_ENDPROCEND(device_not_available)/* * Debug traps and NMI can happen at the one SYSENTER instruction * that sets up the real kernel stack. Check here, since we can't * allow the wrong stack to be used. * * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have * already pushed 3 words if it hits on the sysenter instruction: * eflags, cs and eip. * * We just load the right stack, and push the three (known) values * by hand onto the new stack - while updating the return eip past * the instruction that would have done it for sysenter. */#define FIX_STACK(offset, ok, label)		\	cmpw $__KERNEL_CS,4(%esp);		\	jne ok;					\label:						\	movl TSS_sysenter_esp0+offset(%esp),%esp;	\	CFI_DEF_CFA esp, 0;			\	CFI_UNDEFINED eip;			\	pushfl;					\	CFI_ADJUST_CFA_OFFSET 4;		\	pushl $__KERNEL_CS;			\	CFI_ADJUST_CFA_OFFSET 4;		\	pushl $sysenter_past_esp;		\	CFI_ADJUST_CFA_OFFSET 4;		\	CFI_REL_OFFSET eip, 0KPROBE_ENTRY(debug)	RING0_INT_FRAME	cmpl $sysenter_entry,(%esp)	jne debug_stack_correct	FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)debug_stack_correct:	pushl $-1			# mark this as an int	CFI_ADJUST_CFA_OFFSET 4	SAVE_ALL	xorl %edx,%edx			# error code 0	movl %esp,%eax			# pt_regs pointer	call do_debug	jmp ret_from_exception	CFI_ENDPROCKPROBE_END(debug)/* * NMI is doubly nasty. It can happen _while_ we're handling * a debug fault, and the debug fault hasn't yet been able to * clear up the stack. So we first check whether we got  an * NMI on the sysenter entry path, but after that we need to * check whether we got an NMI on the debug path where the debug * fault happened on the sysenter path. */KPROBE_ENTRY(nmi)	RING0_INT_FRAME	pushl %eax	CFI_ADJUST_CFA_OFFSET 4	movl %ss, %eax	cmpw $__ESPFIX_SS, %ax	popl %eax	CFI_ADJUST_CFA_OFFSET -4	je nmi_espfix_stack	cmpl $sysenter_entry,(%esp)	je nmi_stack_fixup	pushl %eax	CFI_ADJUST_CFA_OFFSET 4	movl %esp,%eax	/* Do not access memory above the end of our stack page,	 * it might not exist.	 */	andl $(THREAD_SIZE-1),%eax	cmpl $(THREAD_SIZE-20),%eax	popl %eax	CFI_ADJUST_CFA_OFFSET -4	jae nmi_stack_correct	cmpl $sysenter_entry,12(%esp)	je nmi_debug_stack_checknmi_stack_correct:	/* We have a RING0_INT_FRAME here */	pushl %eax	CFI_ADJUST_CFA_OFFSET 4	SAVE_ALL	xorl %edx,%edx		# zero error code	movl %esp,%eax		# pt_regs pointer	call do_nmi	jmp restore_nocheck_notrace	CFI_ENDPROCnmi_stack_fixup:	RING0_INT_FRAME	FIX_STACK(12,nmi_stack_correct, 1)	jmp nmi_stack_correctnmi_debug_stack_check:	/* We have a RING0_INT_FRAME here */	cmpw $__KERNEL_CS,16(%esp)	jne nmi_stack_correct	cmpl $debug,(%esp)	jb nmi_stack_correct	cmpl $debug_esp_fix_insn,(%esp)	ja nmi_stack_correct	FIX_STACK(24,nmi_stack_correct, 1)	jmp nmi_stack_correctnmi_espfix_stack:	/* We have a RING0_INT_FRAME here.	 *	 * create the pointer to lss back	 */	pushl %ss	CFI_ADJUST_CFA_OFFSET 4	pushl %esp	CFI_ADJUST_CFA_OFFSET 4	addw $4, (%esp)	/* copy the iret frame of 12 bytes */	.rept 3	pushl 16(%esp)	CFI_ADJUST_CFA_OFFSET 4	.endr	pushl %eax	CFI_ADJUST_CFA_OFFSET 4	SAVE_ALL	FIXUP_ESPFIX_STACK		# %eax == %esp	xorl %edx,%edx			# zero error code	call do_nmi	RESTORE_REGS	lss 12+4(%esp), %esp		# back to espfix stack	CFI_ADJUST_CFA_OFFSET -241:	INTERRUPT_RETURN	CFI_ENDPROC.section __ex_table,"a"	.align 4	.long 1b,iret_exc.previousKPROBE_END(nmi)#ifdef CONFIG_PARAVIRTENTRY(native_iret)1:	iret.section __ex_table,"a"	.align 4	.long 1b,iret_exc.previousEND(native_iret)ENTRY(native_irq_enable_sysexit)	sti	sysexitEND(native_irq_enable_sysexit)#endifKPROBE_ENTRY(int3)	RING0_INT_FRAME	pushl $-1			# mark this as an int	CFI_ADJUST_CFA_OFFSET 4	SAVE_ALL	xorl %edx,%edx		# zero error code	movl %esp,%eax		# pt_regs pointer	call do_int3	jmp ret_from_exception	CFI_ENDPROCKPROBE_END(int3)ENTRY(overflow)	RING0_INT_FRAME	pushl $0	CFI_ADJUST_CFA_OFFSET 4	pushl $do_overflow	CFI_ADJUST_CFA_OFFSET 4	jmp error_code	CFI_ENDPROCEND(overflow)ENTRY(bounds)	RING0_INT_FRAME	pushl $0	CFI_ADJUST_CFA_OFFSET 4	pushl $do_bounds	CFI_ADJUST_CFA_OFFSET 4	jmp error_code	CFI_ENDPROCEND(bounds)ENTRY(invalid_op)	RING0_INT_FRAME	pushl $0	CFI_ADJUST_CFA_OFFSET 4	pushl $do_invalid_op	CFI_ADJUST_CFA_OFFSET 4	jmp error_code	CFI_ENDPROCEND(invalid_op)ENTRY(coprocessor_segment_overrun)	RING0_INT_FRAME	pushl $0	CFI_ADJUST_CFA_OFFSET 4	pushl $do_coprocessor_segment_overrun	CFI_ADJUST_CFA_OFFSET 4	jmp error_code	CFI_ENDPROCEND(coprocessor_segment_overrun)ENTRY(invalid_TSS)	RING0_EC_FRAME	pushl $do_invalid_TSS	CFI_ADJUST_CFA_OFFSET 4	jmp error_code	CFI_ENDPROCEND(invalid_TSS)ENTRY(segment_not_present)	RING0_EC_FRAME	pushl $do_segment_not_present	CFI_ADJUST_CFA_OFFSET 4	jmp error_code	CFI_ENDPROCEND(segment_not_present)ENTRY(stack_segment)	RING0_EC_FRAME	pushl $do_stack_segment	CFI_ADJUST_CFA_OFFSET 4	jmp error_code	CFI_ENDPROCEND(stack_segment)KPROBE_ENTRY(general_protection)	RING0_EC_FRAME	pushl $do_general_protection	CFI_ADJUST_CFA_OFFSET 4	jmp error_code	CFI_ENDPROCKPROBE_END(general_protection)ENTRY(alignment_check)	RING0_EC_FRAME	pushl $do_alignment_check	CFI_ADJUST_CFA_OFFSET 4	jmp error_code	CFI_ENDPROCEND(alignment_check)ENTRY(divide_error)	RING0_INT_FRAME	pushl $0			# no error code	CFI_ADJUST_CFA_OFFSET 4	pushl $do_divide_error	CFI_ADJUST_CFA_OFFSET 4	jmp error_code	CFI_ENDPROCEND(divide_error)#ifdef CONFIG_X86_MCEENTRY(machine_check)	RING0_INT_FRAME	pushl $0	CFI_ADJUST_CFA_OFFSET 4	pushl machine_check_vector	CFI_ADJUST_CFA_OFFSET 4	jmp error_code	CFI_ENDPROCEND(machine_check)#endifENTRY(spurious_interrupt_bug)	RING0_INT_FRAME	pushl $0	CFI_ADJUST_CFA_OFFSET 4	pushl $do_spurious_interrupt_bug	CFI_ADJUST_CFA_OFFSET 4	jmp error_code	CFI_ENDPROCEND(spurious_interrupt_bug)ENTRY(kernel_thread_helper)	pushl $0		# fake return address for unwinder	CFI_STARTPROC	movl %edx,%eax	push %edx	CFI_ADJUST_CFA_OFFSET 4	call *%ebx	push %eax	CFI_ADJUST_CFA_OFFSET 4	call do_exit	CFI_ENDPROCENDPROC(kernel_thread_helper)#ifdef CONFIG_XENENTRY(xen_hypervisor_callback)	CFI_STARTPROC	pushl $0	CFI_ADJUST_CFA_OFFSET 4	SAVE_ALL	TRACE_IRQS_OFF	/* Check to see if we got the event in the critical	   region in xen_iret_direct, after we've reenabled	   events and checked for pending events.  This simulates	   iret instruction's behaviour where it delivers a	   pending interrupt when enabling interrupts. */	movl PT_EIP(%esp),%eax	cmpl $xen_iret_start_crit,%eax	jb   1f	cmpl $xen_iret_end_crit,%eax	jae  1f	call xen_iret_crit_fixup1:	mov %esp, %eax	call xen_evtchn_do_upcall	jmp  ret_from_intr	CFI_ENDPROCENDPROC(xen_hypervisor_callback)# Hypervisor uses this for application faults while it executes.# We get here for two reasons:#  1. Fault while reloading DS, ES, FS or GS#  2. Fault while executing IRET# Category 1 we fix up by reattempting the load, and zeroing the segment# register if the load fails.# Category 2 we fix up by jumping to do_iret_error. We cannot use the# normal Linux return path in this case because if we use the IRET hypercall# to pop the stack frame we end up in an infinite loop of failsafe callbacks.# We distinguish between categories by maintaining a status value in EAX.ENTRY(xen_failsafe_callback)	CFI_STARTPROC	pushl %eax	CFI_ADJUST_CFA_OFFSET 4	movl $1,%eax1:	mov 4(%esp),%ds2:	mov 8(%esp),%es3:	mov 12(%esp),%fs4:	mov 16(%esp),%gs	testl %eax,%eax	popl %eax	CFI_ADJUST_CFA_OFFSET -4	lea 16(%esp),%esp	CFI_ADJUST_CFA_OFFSET -16	jz 5f	addl $16,%esp	jmp iret_exc		# EAX != 0 => Category 2 (Bad IRET)5:	pushl $0		# EAX == 0 => Category 1 (Bad segment)	CFI_ADJUST_CFA_OFFSET 4	SAVE_ALL	jmp ret_from_exception	CFI_ENDPROC.section .fixup,"ax"6:	xorl %eax,%eax	movl %eax,4(%esp)	jmp 1b7:	xorl %eax,%eax	movl %eax,8(%esp)	jmp 2b8:	xorl %eax,%eax	movl %eax,12(%esp)	jmp 3b9:	xorl %eax,%eax	movl %eax,16(%esp)	jmp 4b.previous.section __ex_table,"a"	.align 4	.long 1b,6b	.long 2b,7b	.long 3b,8b	.long 4b,9b.previousENDPROC(xen_failsafe_callback)#endif	/* CONFIG_XEN */.section .rodata,"a"#include "syscall_table_32.S"syscall_table_size=(.-sys_call_table)

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -