entry.s

来自「是关于linux2.5.1的完全源码」· S 代码 · 共 788 行 · 第 1/2 页

S
788
字号
/* *  linux/arch/i386/entry.S * *  Copyright (C) 1991, 1992  Linus Torvalds *//* * entry.S contains the system-call and fault low-level handling routines. * This also contains the timer-interrupt handler, as well as all interrupts * and faults that can result in a task-switch. * * NOTE: This code handles signal-recognition, which happens every time * after a timer-interrupt and after each system call. * * I changed all the .align's to 4 (16 byte alignment), as that's faster * on a 486. * * Stack layout in 'ret_from_system_call': * 	ptrace needs to have all regs on the stack. *	if the order here is changed, it needs to be *	updated in fork.c:copy_process, signal.c:do_signal, *	ptrace.c and ptrace.h * *	 0(%esp) - %ebx *	 4(%esp) - %ecx *	 8(%esp) - %edx *       C(%esp) - %esi *	10(%esp) - %edi *	14(%esp) - %ebp *	18(%esp) - %eax *	1C(%esp) - %ds *	20(%esp) - %es *	24(%esp) - orig_eax *	28(%esp) - %eip *	2C(%esp) - %cs *	30(%esp) - %eflags *	34(%esp) - %oldesp *	38(%esp) - %oldss * * "current" is in register %ebx during any slow entries. */#include <linux/config.h>#include <linux/sys.h>#include <linux/linkage.h>#include <asm/thread_info.h>#include <asm/errno.h>#include <asm/segment.h>#include <asm/smp.h>#include <asm/irq_vectors.h>EBX		= 0x00ECX		= 0x04EDX		= 0x08ESI		= 0x0CEDI		= 0x10EBP		= 0x14EAX		= 0x18DS		= 0x1CES		= 0x20ORIG_EAX	= 0x24EIP		= 0x28CS		= 0x2CEFLAGS		= 0x30OLDESP		= 0x34OLDSS		= 0x38CF_MASK		= 0x00000001IF_MASK		= 0x00000200NT_MASK		= 0x00004000VM_MASK		= 0x00020000/* * These are offsets into the irq_stat structure * There is one per cpu and it is aligned to 32 * byte boundry (we put that here as a shift count) */irq_array_shift	= CONFIG_X86_L1_CACHE_SHIFTlocal_irq_count	= 4local_bh_count	= 8#ifdef CONFIG_SMP#define GET_CPU_IDX \		movl TI_CPU(%ebx), %eax;  \		shll $irq_array_shift, %eax#define GET_CURRENT_CPU_IDX \		GET_THREAD_INFO(%ebx); \		GET_CPU_IDX#define CPU_IDX (,%eax)#else#define GET_CPU_IDX#define GET_CURRENT_CPU_IDX GET_THREAD_INFO(%ebx)#define CPU_IDX#endif#ifdef CONFIG_PREEMPT#define preempt_stop cli#define INC_PRE_COUNT(reg) incl TI_PRE_COUNT(reg);#define DEC_PRE_COUNT(reg) decl TI_PRE_COUNT(reg);#else#define preempt_stop#define INC_PRE_COUNT(reg)#define DEC_PRE_COUNT(reg)#define resume_kernel restore_all#endif#define SAVE_ALL \	cld; \	pushl %es; \	pushl %ds; \	pushl %eax; \	pushl %ebp; \	pushl %edi; \	pushl %esi; \	pushl %edx; \	pushl %ecx; \	pushl %ebx; \	movl $(__KERNEL_DS), %edx; \	movl %edx, %ds; \	movl %edx, %es;#define RESTORE_ALL	\	popl %ebx;	\	popl %ecx;	\	popl %edx;	\	popl %esi;	\	popl %edi;	\	popl %ebp;	\	popl %eax;	\1:	popl %ds;	\2:	popl %es;	\	addl $4, %esp;	\3:	iret;		\.section .fixup,"ax";	\4:	movl $0,(%esp);	\	jmp 1b;		\5:	movl $0,(%esp);	\	jmp 2b;		\6:	pushl %ss;	\	popl %ds;	\	pushl %ss;	\	popl %es;	\	pushl $11;	\	call do_exit;	\.previous;		\.section __ex_table,"a";\	.align 4;	\	.long 1b,4b;	\	.long 2b,5b;	\	.long 3b,6b;	\.previousENTRY(lcall7)	pushfl			# We get a different stack layout with call				# gates, which has to be cleaned up later..	pushl %eax	SAVE_ALL	movl EIP(%esp), %eax	# due to call gates, this is eflags, not eip..	movl CS(%esp), %edx	# this is eip..	movl EFLAGS(%esp), %ecx	# and this is cs..	movl %eax,EFLAGS(%esp)	#	movl %edx,EIP(%esp)	# Now we move them to their "normal" places	movl %ecx,CS(%esp)	#	movl %esp, %ebx	pushl %ebx	andl $-8192, %ebx	# GET_THREAD_INFO	movl TI_EXEC_DOMAIN(%ebx), %edx	# Get the execution domain	movl 4(%edx), %edx	# Get the lcall7 handler for the domain	pushl $0x7	call *%edx	addl $4, %esp	popl %eax	jmp resume_userspaceENTRY(lcall27)	pushfl			# We get a different stack layout with call				# gates, which has to be cleaned up later..	pushl %eax	SAVE_ALL	movl EIP(%esp), %eax	# due to call gates, this is eflags, not eip..	movl CS(%esp), %edx	# this is eip..	movl EFLAGS(%esp), %ecx	# and this is cs..	movl %eax,EFLAGS(%esp)	#	movl %edx,EIP(%esp)	# Now we move them to their "normal" places	movl %ecx,CS(%esp)	#	movl %esp, %ebx	pushl %ebx	andl $-8192, %ebx	# GET_THREAD_INFO	movl TI_EXEC_DOMAIN(%ebx), %edx	# Get the execution domain	movl 4(%edx), %edx	# Get the lcall7 handler for the domain	pushl $0x27	call *%edx	addl $4, %esp	popl %eax	jmp resume_userspaceENTRY(ret_from_fork)#if CONFIG_SMP || CONFIG_PREEMPT	call SYMBOL_NAME(schedule_tail)#endif	GET_THREAD_INFO(%ebx)	jmp syscall_exit/* * Return to user mode is not as complex as all this looks, * but we want the default path for a system call return to * go as quickly as possible which is why some of this is * less clear than it otherwise should be. */	# userspace resumption stub bypassing syscall exit tracing	ALIGNret_from_intr:	preempt_stop	DEC_PRE_COUNT(%ebx)ret_from_exception:	movl EFLAGS(%esp), %eax		# mix EFLAGS and CS	movb CS(%esp), %al	testl $(VM_MASK | 3), %eax	jz resume_kernel		# returning to kernel or vm86-spaceENTRY(resume_userspace) 	cli				# make sure we don't miss an interrupt					# setting need_resched or sigpending					# between sampling and the iret	movl TI_FLAGS(%ebx), %ecx	andl $_TIF_WORK_MASK, %ecx	# is there any work to be done on					# int/exception return?	jne work_pending	jmp restore_all#ifdef CONFIG_PREEMPTENTRY(resume_kernel)	cmpl $0,TI_PRE_COUNT(%ebx)	jnz restore_all	movl TI_FLAGS(%ebx), %ecx	testb $_TIF_NEED_RESCHED, %cl	jz restore_all	movl SYMBOL_NAME(irq_stat)+local_bh_count CPU_IDX, %ecx	addl SYMBOL_NAME(irq_stat)+local_irq_count CPU_IDX, %ecx	jnz restore_all	movl $PREEMPT_ACTIVE,TI_PRE_COUNT(%ebx)	sti	call SYMBOL_NAME(schedule)	movl $0,TI_PRE_COUNT(%ebx) 	jmp restore_all#endif	# system call handler stub	ALIGNENTRY(system_call)	pushl %eax			# save orig_eax	SAVE_ALL	GET_THREAD_INFO(%ebx)	cmpl $(NR_syscalls), %eax	jae syscall_badsys					# system call tracing in operation	testb $_TIF_SYSCALL_TRACE,TI_FLAGS(%ebx)	jnz syscall_trace_entrysyscall_call:	call *SYMBOL_NAME(sys_call_table)(,%eax,4)	movl %eax,EAX(%esp)		# store the return valuesyscall_exit:	cli				# make sure we don't miss an interrupt					# setting need_resched or sigpending					# between sampling and the iret	movl TI_FLAGS(%ebx), %ecx	testw $_TIF_ALLWORK_MASK, %cx	# current->work	jne syscall_exit_workrestore_all:	RESTORE_ALL	# perform work that needs to be done immediately before resumption	ALIGNwork_pending:	testb $_TIF_NEED_RESCHED, %cl	jz work_notifysigwork_resched:	call SYMBOL_NAME(schedule)	cli				# make sure we don't miss an interrupt					# setting need_resched or sigpending					# between sampling and the iret	movl TI_FLAGS(%ebx), %ecx	andl $_TIF_WORK_MASK, %ecx	# is there any work to be done other					# than syscall tracing?	jz restore_all	testb $_TIF_NEED_RESCHED, %cl	jnz work_reschedwork_notifysig:				# deal with pending signals and					# notify-resume requests	testl $(VM_MASK),EFLAGS(%esp)	movl %esp, %eax	jne work_notifysig_v86		# returning to kernel-space or					# vm86-space	xorl %edx, %edx	call SYMBOL_NAME(do_notify_resume)	jmp restore_all	ALIGNwork_notifysig_v86:	pushl %ecx	call SYMBOL_NAME(save_v86_state)	popl %ecx	movl %eax, %esp	xorl %edx, %edx	call SYMBOL_NAME(do_notify_resume)	jmp restore_all	# perform syscall exit tracing	ALIGNsyscall_trace_entry:	movl $-ENOSYS,EAX(%esp)	movl %esp, %eax	xorl %edx,%edx	call SYMBOL_NAME(do_syscall_trace)	movl ORIG_EAX(%esp), %eax	cmpl $(NR_syscalls), %eax	jnae syscall_call	jmp syscall_exit	# perform syscall exit tracing	ALIGNsyscall_exit_work:	testb $_TIF_SYSCALL_TRACE, %cl	jz work_pending	sti				# could let do_syscall_trace() call					# schedule() instead	movl %esp, %eax	movl $1, %edx	call SYMBOL_NAME(do_syscall_trace)	jmp resume_userspace	ALIGNsyscall_badsys:	movl $-ENOSYS,EAX(%esp)	jmp resume_userspace/* * Build the entry stubs and pointer table with * some assembler magic. */.dataENTRY(interrupt).textvector=0ENTRY(irq_entries_start).rept NR_IRQS	ALIGN1:	pushl $vector-256	jmp common_interrupt.data	.long 1b.textvector=vector+1.endr	ALIGNcommon_interrupt:	SAVE_ALL	GET_THREAD_INFO(%ebx)	INC_PRE_COUNT(%ebx)	call SYMBOL_NAME(do_IRQ)	jmp ret_from_intr#define BUILD_INTERRUPT(name, nr)	\ENTRY(name)				\	pushl $nr-256;			\	SAVE_ALL			\	GET_THREAD_INFO(%ebx);		\	INC_PRE_COUNT(%ebx)		\	call SYMBOL_NAME(smp_/**/name);	\	jmp ret_from_intr;/* * The following vectors are part of the Linux architecture, there * is no hardware IRQ pin equivalent for them, they are triggered * through the ICC by us (IPIs) */#ifdef CONFIG_SMPBUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR)BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR)BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)#endif/* * every pentium local APIC has two 'local interrupts', with a * soft-definable vector attached to both interrupts, one of * which is a timer interrupt, the other one is error counter * overflow. Linux uses the local APIC timer interrupt to get * a much simpler SMP time architecture: */#ifdef CONFIG_X86_LOCAL_APIC

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?