⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 entry_32.s

📁 linux 内核源代码
💻 S
📖 第 1 页 / 共 2 页
字号:
/* *  linux/arch/i386/entry.S * *  Copyright (C) 1991, 1992  Linus Torvalds *//* * entry.S contains the system-call and fault low-level handling routines. * This also contains the timer-interrupt handler, as well as all interrupts * and faults that can result in a task-switch. * * NOTE: This code handles signal-recognition, which happens every time * after a timer-interrupt and after each system call. * * I changed all the .align's to 4 (16 byte alignment), as that's faster * on a 486. * * Stack layout in 'syscall_exit': * 	ptrace needs to have all regs on the stack. *	if the order here is changed, it needs to be *	updated in fork.c:copy_process, signal.c:do_signal, *	ptrace.c and ptrace.h * *	 0(%esp) - %ebx *	 4(%esp) - %ecx *	 8(%esp) - %edx *       C(%esp) - %esi *	10(%esp) - %edi *	14(%esp) - %ebp *	18(%esp) - %eax *	1C(%esp) - %ds *	20(%esp) - %es *	24(%esp) - %fs *	28(%esp) - orig_eax *	2C(%esp) - %eip *	30(%esp) - %cs *	34(%esp) - %eflags *	38(%esp) - %oldesp *	3C(%esp) - %oldss * * "current" is in register %ebx during any slow entries. */#include <linux/linkage.h>#include <asm/thread_info.h>#include <asm/irqflags.h>#include <asm/errno.h>#include <asm/segment.h>#include <asm/smp.h>#include <asm/page.h>#include <asm/desc.h>#include <asm/percpu.h>#include <asm/dwarf2.h>#include "irq_vectors.h"/* * We use macros for low-level operations which need to be overridden * for paravirtualization.  The following will never clobber any registers: *   INTERRUPT_RETURN (aka. "iret") *   GET_CR0_INTO_EAX (aka. "movl %cr0, %eax") *   ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit"). * * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY). * Allowing a register to be clobbered can shrink the paravirt replacement * enough to patch inline, increasing performance. */#define nr_syscalls ((syscall_table_size)/4)CF_MASK		= 0x00000001TF_MASK		= 0x00000100IF_MASK		= 0x00000200DF_MASK		= 0x00000400 NT_MASK		= 0x00004000VM_MASK		= 0x00020000#ifdef CONFIG_PREEMPT#define preempt_stop(clobbers)	DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF#else#define preempt_stop(clobbers)#define resume_kernel		restore_nocheck#endif.macro TRACE_IRQS_IRET#ifdef CONFIG_TRACE_IRQFLAGS	testl $IF_MASK,PT_EFLAGS(%esp)     # interrupts off?	jz 1f	TRACE_IRQS_ON1:#endif.endm#ifdef CONFIG_VM86#define resume_userspace_sig	check_userspace#else#define resume_userspace_sig	resume_userspace#endif#define SAVE_ALL \	cld; \	pushl %fs; \	CFI_ADJUST_CFA_OFFSET 4;\	/*CFI_REL_OFFSET fs, 0;*/\	pushl %es; \	CFI_ADJUST_CFA_OFFSET 4;\	/*CFI_REL_OFFSET es, 0;*/\	pushl %ds; \	CFI_ADJUST_CFA_OFFSET 4;\	/*CFI_REL_OFFSET ds, 0;*/\	pushl %eax; \	CFI_ADJUST_CFA_OFFSET 4;\	CFI_REL_OFFSET eax, 0;\	pushl %ebp; \	CFI_ADJUST_CFA_OFFSET 4;\	CFI_REL_OFFSET ebp, 0;\	pushl %edi; \	CFI_ADJUST_CFA_OFFSET 4;\	CFI_REL_OFFSET edi, 0;\	pushl %esi; \	CFI_ADJUST_CFA_OFFSET 4;\	CFI_REL_OFFSET esi, 0;\	pushl %edx; \	CFI_ADJUST_CFA_OFFSET 4;\	CFI_REL_OFFSET edx, 0;\	pushl %ecx; \	CFI_ADJUST_CFA_OFFSET 4;\	CFI_REL_OFFSET ecx, 0;\	pushl %ebx; \	CFI_ADJUST_CFA_OFFSET 4;\	CFI_REL_OFFSET ebx, 0;\	movl $(__USER_DS), %edx; \	movl %edx, %ds; \	movl %edx, %es; \	movl $(__KERNEL_PERCPU), %edx; \	movl %edx, %fs#define RESTORE_INT_REGS \	popl %ebx;	\	CFI_ADJUST_CFA_OFFSET -4;\	CFI_RESTORE ebx;\	popl %ecx;	\	CFI_ADJUST_CFA_OFFSET -4;\	CFI_RESTORE ecx;\	popl %edx;	\	CFI_ADJUST_CFA_OFFSET -4;\	CFI_RESTORE edx;\	popl %esi;	\	CFI_ADJUST_CFA_OFFSET -4;\	CFI_RESTORE esi;\	popl %edi;	\	CFI_ADJUST_CFA_OFFSET -4;\	CFI_RESTORE edi;\	popl %ebp;	\	CFI_ADJUST_CFA_OFFSET -4;\	CFI_RESTORE ebp;\	popl %eax;	\	CFI_ADJUST_CFA_OFFSET -4;\	CFI_RESTORE eax#define RESTORE_REGS	\	RESTORE_INT_REGS; \1:	popl %ds;	\	CFI_ADJUST_CFA_OFFSET -4;\	/*CFI_RESTORE ds;*/\2:	popl %es;	\	CFI_ADJUST_CFA_OFFSET -4;\	/*CFI_RESTORE es;*/\3:	popl %fs;	\	CFI_ADJUST_CFA_OFFSET -4;\	/*CFI_RESTORE fs;*/\.pushsection .fixup,"ax";	\4:	movl $0,(%esp);	\	jmp 1b;		\5:	movl $0,(%esp);	\	jmp 2b;		\6:	movl $0,(%esp);	\	jmp 3b;		\.section __ex_table,"a";\	.align 4;	\	.long 1b,4b;	\	.long 2b,5b;	\	.long 3b,6b;	\.popsection#define RING0_INT_FRAME \	CFI_STARTPROC simple;\	CFI_SIGNAL_FRAME;\	CFI_DEF_CFA esp, 3*4;\	/*CFI_OFFSET cs, -2*4;*/\	CFI_OFFSET eip, -3*4#define RING0_EC_FRAME \	CFI_STARTPROC simple;\	CFI_SIGNAL_FRAME;\	CFI_DEF_CFA esp, 4*4;\	/*CFI_OFFSET cs, -2*4;*/\	CFI_OFFSET eip, -3*4#define RING0_PTREGS_FRAME \	CFI_STARTPROC simple;\	CFI_SIGNAL_FRAME;\	CFI_DEF_CFA esp, PT_OLDESP-PT_EBX;\	/*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/\	CFI_OFFSET eip, PT_EIP-PT_OLDESP;\	/*CFI_OFFSET es, PT_ES-PT_OLDESP;*/\	/*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/\	CFI_OFFSET eax, PT_EAX-PT_OLDESP;\	CFI_OFFSET ebp, PT_EBP-PT_OLDESP;\	CFI_OFFSET edi, PT_EDI-PT_OLDESP;\	CFI_OFFSET esi, PT_ESI-PT_OLDESP;\	CFI_OFFSET edx, PT_EDX-PT_OLDESP;\	CFI_OFFSET ecx, PT_ECX-PT_OLDESP;\	CFI_OFFSET ebx, PT_EBX-PT_OLDESPENTRY(ret_from_fork)	CFI_STARTPROC	pushl %eax	CFI_ADJUST_CFA_OFFSET 4	call schedule_tail	GET_THREAD_INFO(%ebp)	popl %eax	CFI_ADJUST_CFA_OFFSET -4	pushl $0x0202			# Reset kernel eflags	CFI_ADJUST_CFA_OFFSET 4	popfl	CFI_ADJUST_CFA_OFFSET -4	jmp syscall_exit	CFI_ENDPROCEND(ret_from_fork)/* * Return to user mode is not as complex as all this looks, * but we want the default path for a system call return to * go as quickly as possible which is why some of this is * less clear than it otherwise should be. */	# userspace resumption stub bypassing syscall exit tracing	ALIGN	RING0_PTREGS_FRAMEret_from_exception:	preempt_stop(CLBR_ANY)ret_from_intr:	GET_THREAD_INFO(%ebp)check_userspace:	movl PT_EFLAGS(%esp), %eax	# mix EFLAGS and CS	movb PT_CS(%esp), %al	andl $(VM_MASK | SEGMENT_RPL_MASK), %eax	cmpl $USER_RPL, %eax	jb resume_kernel		# not returning to v8086 or userspaceENTRY(resume_userspace)	LOCKDEP_SYS_EXIT 	DISABLE_INTERRUPTS(CLBR_ANY)	# make sure we don't miss an interrupt					# setting need_resched or sigpending					# between sampling and the iret	movl TI_flags(%ebp), %ecx	andl $_TIF_WORK_MASK, %ecx	# is there any work to be done on					# int/exception return?	jne work_pending	jmp restore_allEND(ret_from_exception)#ifdef CONFIG_PREEMPTENTRY(resume_kernel)	DISABLE_INTERRUPTS(CLBR_ANY)	cmpl $0,TI_preempt_count(%ebp)	# non-zero preempt_count ?	jnz restore_nocheckneed_resched:	movl TI_flags(%ebp), %ecx	# need_resched set ?	testb $_TIF_NEED_RESCHED, %cl	jz restore_all	testl $IF_MASK,PT_EFLAGS(%esp)	# interrupts off (exception path) ?	jz restore_all	call preempt_schedule_irq	jmp need_reschedEND(resume_kernel)#endif	CFI_ENDPROC/* SYSENTER_RETURN points to after the "sysenter" instruction in   the vsyscall page.  See vsyscall-sysentry.S, which defines the symbol.  */	# sysenter call handler stubENTRY(sysenter_entry)	CFI_STARTPROC simple	CFI_SIGNAL_FRAME	CFI_DEF_CFA esp, 0	CFI_REGISTER esp, ebp	movl TSS_sysenter_esp0(%esp),%espsysenter_past_esp:	/*	 * No need to follow this irqs on/off section: the syscall	 * disabled irqs and here we enable it straight after entry:	 */	ENABLE_INTERRUPTS(CLBR_NONE)	pushl $(__USER_DS)	CFI_ADJUST_CFA_OFFSET 4	/*CFI_REL_OFFSET ss, 0*/	pushl %ebp	CFI_ADJUST_CFA_OFFSET 4	CFI_REL_OFFSET esp, 0	pushfl	CFI_ADJUST_CFA_OFFSET 4	pushl $(__USER_CS)	CFI_ADJUST_CFA_OFFSET 4	/*CFI_REL_OFFSET cs, 0*/	/*	 * Push current_thread_info()->sysenter_return to the stack.	 * A tiny bit of offset fixup is necessary - 4*4 means the 4 words	 * pushed above; +8 corresponds to copy_thread's esp0 setting.	 */	pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)	CFI_ADJUST_CFA_OFFSET 4	CFI_REL_OFFSET eip, 0/* * Load the potential sixth argument from user stack. * Careful about security. */	cmpl $__PAGE_OFFSET-3,%ebp	jae syscall_fault1:	movl (%ebp),%ebp.section __ex_table,"a"	.align 4	.long 1b,syscall_fault.previous	pushl %eax	CFI_ADJUST_CFA_OFFSET 4	SAVE_ALL	GET_THREAD_INFO(%ebp)	/* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */	testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)	jnz syscall_trace_entry	cmpl $(nr_syscalls), %eax	jae syscall_badsys	call *sys_call_table(,%eax,4)	movl %eax,PT_EAX(%esp)	LOCKDEP_SYS_EXIT	DISABLE_INTERRUPTS(CLBR_ANY)	TRACE_IRQS_OFF	movl TI_flags(%ebp), %ecx	testw $_TIF_ALLWORK_MASK, %cx	jne syscall_exit_work/* if something modifies registers it must also disable sysexit */	movl PT_EIP(%esp), %edx	movl PT_OLDESP(%esp), %ecx	xorl %ebp,%ebp	TRACE_IRQS_ON1:	mov  PT_FS(%esp), %fs	ENABLE_INTERRUPTS_SYSEXIT	CFI_ENDPROC.pushsection .fixup,"ax"2:	movl $0,PT_FS(%esp)	jmp 1b.section __ex_table,"a"	.align 4	.long 1b,2b.popsectionENDPROC(sysenter_entry)	# system call handler stubENTRY(system_call)	RING0_INT_FRAME			# can't unwind into user space anyway	pushl %eax			# save orig_eax	CFI_ADJUST_CFA_OFFSET 4	SAVE_ALL	GET_THREAD_INFO(%ebp)					# system call tracing in operation / emulation	/* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */	testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)	jnz syscall_trace_entry	cmpl $(nr_syscalls), %eax	jae syscall_badsyssyscall_call:	call *sys_call_table(,%eax,4)	movl %eax,PT_EAX(%esp)		# store the return valuesyscall_exit:	LOCKDEP_SYS_EXIT	DISABLE_INTERRUPTS(CLBR_ANY)	# make sure we don't miss an interrupt					# setting need_resched or sigpending					# between sampling and the iret	TRACE_IRQS_OFF	testl $TF_MASK,PT_EFLAGS(%esp)	# If tracing set singlestep flag on exit	jz no_singlestep	orl $_TIF_SINGLESTEP,TI_flags(%ebp)no_singlestep:	movl TI_flags(%ebp), %ecx	testw $_TIF_ALLWORK_MASK, %cx	# current->work	jne syscall_exit_workrestore_all:	movl PT_EFLAGS(%esp), %eax	# mix EFLAGS, SS and CS	# Warning: PT_OLDSS(%esp) contains the wrong/random values if we	# are returning to the kernel.	# See comments in process.c:copy_thread() for details.	movb PT_OLDSS(%esp), %ah	movb PT_CS(%esp), %al	andl $(VM_MASK | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax	cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax	CFI_REMEMBER_STATE	je ldt_ss			# returning to user-space with LDT SSrestore_nocheck:	TRACE_IRQS_IRETrestore_nocheck_notrace:	RESTORE_REGS	addl $4, %esp			# skip orig_eax/error_code	CFI_ADJUST_CFA_OFFSET -41:	INTERRUPT_RETURN.section .fixup,"ax"iret_exc:	pushl $0			# no error code	pushl $do_iret_error	jmp error_code.previous.section __ex_table,"a"	.align 4	.long 1b,iret_exc.previous	CFI_RESTORE_STATEldt_ss:	larl PT_OLDSS(%esp), %eax	jnz restore_nocheck	testl $0x00400000, %eax		# returning to 32bit stack?	jnz restore_nocheck		# allright, normal return#ifdef CONFIG_PARAVIRT	/*	 * The kernel can't run on a non-flat stack if paravirt mode	 * is active.  Rather than try to fixup the high bits of	 * ESP, bypass this code entirely.  This may break DOSemu	 * and/or Wine support in a paravirt VM, although the option	 * is still available to implement the setting of the high	 * 16-bits in the INTERRUPT_RETURN paravirt-op.	 */	cmpl $0, pv_info+PARAVIRT_enabled	jne restore_nocheck#endif	/* If returning to userspace with 16bit stack,	 * try to fix the higher word of ESP, as the CPU	 * won't restore it.	 * This is an "official" bug of all the x86-compatible	 * CPUs, which we can try to work around to make	 * dosemu and wine happy. */	movl PT_OLDESP(%esp), %eax	movl %esp, %edx	call patch_espfix_desc	pushl $__ESPFIX_SS	CFI_ADJUST_CFA_OFFSET 4	pushl %eax	CFI_ADJUST_CFA_OFFSET 4	DISABLE_INTERRUPTS(CLBR_EAX)	TRACE_IRQS_OFF	lss (%esp), %esp	CFI_ADJUST_CFA_OFFSET -8	jmp restore_nocheck	CFI_ENDPROCENDPROC(system_call)	# perform work that needs to be done immediately before resumption	ALIGN	RING0_PTREGS_FRAME		# can't unwind into user space anywaywork_pending:	testb $_TIF_NEED_RESCHED, %cl	jz work_notifysigwork_resched:	call schedule	LOCKDEP_SYS_EXIT	DISABLE_INTERRUPTS(CLBR_ANY)	# make sure we don't miss an interrupt					# setting need_resched or sigpending					# between sampling and the iret	TRACE_IRQS_OFF	movl TI_flags(%ebp), %ecx	andl $_TIF_WORK_MASK, %ecx	# is there any work to be done other					# than syscall tracing?	jz restore_all	testb $_TIF_NEED_RESCHED, %cl	jnz work_reschedwork_notifysig:				# deal with pending signals and					# notify-resume requests#ifdef CONFIG_VM86	testl $VM_MASK, PT_EFLAGS(%esp)	movl %esp, %eax	jne work_notifysig_v86		# returning to kernel-space or					# vm86-space	xorl %edx, %edx	call do_notify_resume	jmp resume_userspace_sig	ALIGNwork_notifysig_v86:	pushl %ecx			# save ti_flags for do_notify_resume	CFI_ADJUST_CFA_OFFSET 4	call save_v86_state		# %eax contains pt_regs pointer	popl %ecx	CFI_ADJUST_CFA_OFFSET -4	movl %eax, %esp#else	movl %esp, %eax#endif	xorl %edx, %edx	call do_notify_resume	jmp resume_userspace_sigEND(work_pending)	# perform syscall exit tracing	ALIGNsyscall_trace_entry:	movl $-ENOSYS,PT_EAX(%esp)	movl %esp, %eax	xorl %edx,%edx	call do_syscall_trace	cmpl $0, %eax	jne resume_userspace		# ret != 0 -> running under PTRACE_SYSEMU,					# so must skip actual syscall	movl PT_ORIG_EAX(%esp), %eax	cmpl $(nr_syscalls), %eax	jnae syscall_call	jmp syscall_exitEND(syscall_trace_entry)	# perform syscall exit tracing	ALIGNsyscall_exit_work:	testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl	jz work_pending	TRACE_IRQS_ON	ENABLE_INTERRUPTS(CLBR_ANY)	# could let do_syscall_trace() call					# schedule() instead	movl %esp, %eax	movl $1, %edx	call do_syscall_trace	jmp resume_userspaceEND(syscall_exit_work)	CFI_ENDPROC	RING0_INT_FRAME			# can't unwind into user space anywaysyscall_fault:	pushl %eax			# save orig_eax	CFI_ADJUST_CFA_OFFSET 4	SAVE_ALL	GET_THREAD_INFO(%ebp)	movl $-EFAULT,PT_EAX(%esp)	jmp resume_userspaceEND(syscall_fault)syscall_badsys:	movl $-ENOSYS,PT_EAX(%esp)	jmp resume_userspaceEND(syscall_badsys)	CFI_ENDPROC#define FIXUP_ESPFIX_STACK \

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -