⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 entry.s

📁 linux-2.6.15.6
💻 S
📖 第 1 页 / 共 2 页
字号:
	/* Interrupt came from user space */	/*	 * Has a correct top of stack, but a partial stack frame	 * %rcx: thread info. Interrupts off.	 */		retint_with_reschedule:	movl $_TIF_WORK_MASK,%ediretint_check:	movl threadinfo_flags(%rcx),%edx	andl %edi,%edx	CFI_REMEMBER_STATE	jnz  retint_carefulretint_swapgs:	 		swapgs retint_restore_args:					cli	RESTORE_ARGS 0,8,0						iret_label:		iretq	.section __ex_table,"a"	.quad iret_label,bad_iret		.previous	.section .fixup,"ax"	/* force a signal here? this matches i386 behaviour */	/* running with kernel gs */bad_iret:	movq $-9999,%rdi	/* better code? */	jmp do_exit				.previous			/* edi: workmask, edx: work */retint_careful:	CFI_RESTORE_STATE	bt    $TIF_NEED_RESCHED,%edx	jnc   retint_signal	sti	pushq %rdi	CFI_ADJUST_CFA_OFFSET	8	call  schedule	popq %rdi			CFI_ADJUST_CFA_OFFSET	-8	GET_THREAD_INFO(%rcx)	cli	jmp retint_check	retint_signal:	testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx	jz    retint_swapgs	sti	SAVE_REST	movq $-1,ORIG_RAX(%rsp) 				xorl %esi,%esi		# oldset	movq %rsp,%rdi		# &pt_regs	call do_notify_resume	RESTORE_REST	cli	movl $_TIF_NEED_RESCHED,%edi	GET_THREAD_INFO(%rcx)	jmp retint_check#ifdef CONFIG_PREEMPT	/* Returning to kernel space. Check if we need preemption */	/* rcx:	 threadinfo. interrupts off. */	.p2alignretint_kernel:		cmpl $0,threadinfo_preempt_count(%rcx)	jnz  retint_restore_args	bt  $TIF_NEED_RESCHED,threadinfo_flags(%rcx)	jnc  retint_restore_args	bt   $9,EFLAGS-ARGOFFSET(%rsp)	/* interrupts off? */	jnc  retint_restore_args	call preempt_schedule_irq	jmp exit_intr#endif		CFI_ENDPROC	/* * APIC interrupts. */			.macro apicinterrupt num,func	INTR_FRAME	pushq $\num-256	CFI_ADJUST_CFA_OFFSET 8	interrupt \func	jmp ret_from_intr	CFI_ENDPROC	.endmENTRY(thermal_interrupt)	apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interruptENTRY(threshold_interrupt)	apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt#ifdef CONFIG_SMP	ENTRY(reschedule_interrupt)	apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt	.macro INVALIDATE_ENTRY numENTRY(invalidate_interrupt\num)	apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt		.endm	INVALIDATE_ENTRY 0	INVALIDATE_ENTRY 1	INVALIDATE_ENTRY 2	INVALIDATE_ENTRY 3	INVALIDATE_ENTRY 4	INVALIDATE_ENTRY 5	INVALIDATE_ENTRY 6	INVALIDATE_ENTRY 7ENTRY(call_function_interrupt)	apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt#endif#ifdef CONFIG_X86_LOCAL_APIC	ENTRY(apic_timer_interrupt)	apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interruptENTRY(error_interrupt)	apicinterrupt ERROR_APIC_VECTOR,smp_error_interruptENTRY(spurious_interrupt)	apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt#endif				/* * Exception entry points. */ 			.macro zeroentry sym	INTR_FRAME	pushq $0	/* push error code/oldrax */ 	CFI_ADJUST_CFA_OFFSET 8	pushq %rax	/* push real oldrax to the rdi slot */ 	CFI_ADJUST_CFA_OFFSET 8	leaq  \sym(%rip),%rax	jmp error_entry	CFI_ENDPROC	.endm		.macro errorentry sym	XCPT_FRAME	pushq %rax	CFI_ADJUST_CFA_OFFSET 8	leaq  \sym(%rip),%rax	jmp error_entry	CFI_ENDPROC	.endm	/* error code is on the stack already */	/* handle NMI like exceptions that can happen everywhere */	.macro paranoidentry sym	SAVE_ALL	cld	movl $1,%ebx	movl  $MSR_GS_BASE,%ecx	rdmsr	testl %edx,%edx	js    1f	swapgs	xorl  %ebx,%ebx1:	movq %rsp,%rdi	movq ORIG_RAX(%rsp),%rsi	movq $-1,ORIG_RAX(%rsp)	call \sym	cli	.endm	/* * Exception entry point. This expects an error code/orig_rax on the stack * and the exception handler in %rax.	 */ 		  				ENTRY(error_entry)	_frame RDI	/* rdi slot contains rax, oldrax contains error code */	cld		subq  $14*8,%rsp	CFI_ADJUST_CFA_OFFSET	(14*8)	movq %rsi,13*8(%rsp)	CFI_REL_OFFSET	rsi,RSI	movq 14*8(%rsp),%rsi	/* load rax from rdi slot */	movq %rdx,12*8(%rsp)	CFI_REL_OFFSET	rdx,RDX	movq %rcx,11*8(%rsp)	CFI_REL_OFFSET	rcx,RCX	movq %rsi,10*8(%rsp)	/* store rax */ 	CFI_REL_OFFSET	rax,RAX	movq %r8, 9*8(%rsp)	CFI_REL_OFFSET	r8,R8	movq %r9, 8*8(%rsp)	CFI_REL_OFFSET	r9,R9	movq %r10,7*8(%rsp)	CFI_REL_OFFSET	r10,R10	movq %r11,6*8(%rsp)	CFI_REL_OFFSET	r11,R11	movq %rbx,5*8(%rsp) 	CFI_REL_OFFSET	rbx,RBX	movq %rbp,4*8(%rsp) 	CFI_REL_OFFSET	rbp,RBP	movq %r12,3*8(%rsp) 	CFI_REL_OFFSET	r12,R12	movq %r13,2*8(%rsp) 	CFI_REL_OFFSET	r13,R13	movq %r14,1*8(%rsp) 	CFI_REL_OFFSET	r14,R14	movq %r15,(%rsp) 	CFI_REL_OFFSET	r15,R15	xorl %ebx,%ebx		testl $3,CS(%rsp)	je  error_kernelspaceerror_swapgs:		swapgserror_sti:		movq %rdi,RDI(%rsp) 		movq %rsp,%rdi	movq ORIG_RAX(%rsp),%rsi	/* get error code */ 	movq $-1,ORIG_RAX(%rsp)	call *%rax	/* ebx:	no swapgs flag (1: don't need swapgs, 0: need it) */	 error_exit:			movl %ebx,%eax			RESTORE_REST	cli	GET_THREAD_INFO(%rcx)		testl %eax,%eax	jne  retint_kernel	movl  threadinfo_flags(%rcx),%edx	movl  $_TIF_WORK_MASK,%edi	andl  %edi,%edx	jnz  retint_careful	swapgs 	RESTORE_ARGS 0,8,0							iretq	CFI_ENDPROCerror_kernelspace:	incl %ebx       /* There are two places in the kernel that can potentially fault with          usergs. Handle them here. The exception handlers after	   iret run with kernel gs again, so don't set the user space flag.	   B stepping K8s sometimes report an truncated RIP for IRET 	   exceptions returning to compat mode. Check for these here too. */	leaq iret_label(%rip),%rbp	cmpq %rbp,RIP(%rsp) 	je   error_swapgs	movl %ebp,%ebp	/* zero extend */	cmpq %rbp,RIP(%rsp) 	je   error_swapgs	cmpq $gs_change,RIP(%rsp)        je   error_swapgs	jmp  error_sti	       /* Reload gs selector with exception handling */       /* edi:  new selector */ ENTRY(load_gs_index)	CFI_STARTPROC	pushf	CFI_ADJUST_CFA_OFFSET 8	cli        swapgsgs_change:             movl %edi,%gs   2:	mfence		/* workaround */	swapgs        popf	CFI_ADJUST_CFA_OFFSET -8        ret	CFI_ENDPROC               .section __ex_table,"a"        .align 8        .quad gs_change,bad_gs        .previous        .section .fixup,"ax"	/* running with kernelgs */bad_gs: 	swapgs			/* switch back to user gs */	xorl %eax,%eax        movl %eax,%gs        jmp  2b        .previous       	/* * Create a kernel thread. * * C extern interface: *	extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) * * asm input arguments: *	rdi: fn, rsi: arg, rdx: flags */ENTRY(kernel_thread)	CFI_STARTPROC	FAKE_STACK_FRAME $child_rip	SAVE_ALL	# rdi: flags, rsi: usp, rdx: will be &pt_regs	movq %rdx,%rdi	orq  kernel_thread_flags(%rip),%rdi	movq $-1, %rsi	movq %rsp, %rdx	xorl %r8d,%r8d	xorl %r9d,%r9d		# clone now	call do_fork	movq %rax,RAX(%rsp)	xorl %edi,%edi	/*	 * It isn't worth to check for reschedule here,	 * so internally to the x86_64 port you can rely on kernel_thread()	 * not to reschedule the child before returning, this avoids the need	 * of hacks for example to fork off the per-CPU idle tasks.         * [Hopefully no generic code relies on the reschedule -AK]		 */	RESTORE_ALL	UNFAKE_STACK_FRAME	ret	CFI_ENDPROC	child_rip:	/*	 * Here we are in the child and the registers are set as they were	 * at kernel_thread() invocation in the parent.	 */	movq %rdi, %rax	movq %rsi, %rdi	call *%rax	# exit	xorl %edi, %edi	call do_exit/* * execve(). This function needs to use IRET, not SYSRET, to set up all state properly. * * C extern interface: *	 extern long execve(char *name, char **argv, char **envp) * * asm input arguments: *	rdi: name, rsi: argv, rdx: envp * * We want to fallback into: *	extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs regs) * * do_sys_execve asm fallback arguments: *	rdi: name, rsi: argv, rdx: envp, fake frame on the stack */ENTRY(execve)	CFI_STARTPROC	FAKE_STACK_FRAME $0	SAVE_ALL		call sys_execve	movq %rax, RAX(%rsp)		RESTORE_REST	testq %rax,%rax	je int_ret_from_sys_call	RESTORE_ARGS	UNFAKE_STACK_FRAME	ret	CFI_ENDPROCKPROBE_ENTRY(page_fault)	errorentry do_page_fault	.previous .textENTRY(coprocessor_error)	zeroentry do_coprocessor_errorENTRY(simd_coprocessor_error)	zeroentry do_simd_coprocessor_error	ENTRY(device_not_available)	zeroentry math_state_restore	/* runs on exception stack */KPROBE_ENTRY(debug) 	INTR_FRAME	pushq $0	CFI_ADJUST_CFA_OFFSET 8			paranoidentry do_debug	jmp paranoid_exit	CFI_ENDPROC	.previous .text	/* runs on exception stack */	ENTRY(nmi)	INTR_FRAME	pushq $-1	CFI_ADJUST_CFA_OFFSET 8	paranoidentry do_nmi	/* 	 * "Paranoid" exit path from exception stack.  	 * Paranoid because this is used by NMIs and cannot take	 * any kernel state for granted.	 * We don't do kernel preemption checks here, because only	 * NMI should be common and it does not enable IRQs and	 * cannot get reschedule ticks.	 */	/* ebx:	no swapgs flag */paranoid_exit:	testl %ebx,%ebx				/* swapgs needed? */	jnz paranoid_restore	testl $3,CS(%rsp)	jnz   paranoid_userspaceparanoid_swapgs:		swapgsparanoid_restore:		RESTORE_ALL 8	iretqparanoid_userspace:		GET_THREAD_INFO(%rcx)	movl threadinfo_flags(%rcx),%ebx	andl $_TIF_WORK_MASK,%ebx	jz paranoid_swapgs	movq %rsp,%rdi			/* &pt_regs */	call sync_regs	movq %rax,%rsp			/* switch stack for scheduling */	testl $_TIF_NEED_RESCHED,%ebx	jnz paranoid_schedule	movl %ebx,%edx			/* arg3: thread flags */	sti	xorl %esi,%esi 			/* arg2: oldset */	movq %rsp,%rdi 			/* arg1: &pt_regs */	call do_notify_resume	cli	jmp paranoid_userspaceparanoid_schedule:	sti	call schedule	cli	jmp paranoid_userspace	CFI_ENDPROCKPROBE_ENTRY(int3)	zeroentry do_int3		.previous .textENTRY(overflow)	zeroentry do_overflowENTRY(bounds)	zeroentry do_boundsENTRY(invalid_op)	zeroentry do_invalid_op	ENTRY(coprocessor_segment_overrun)	zeroentry do_coprocessor_segment_overrunENTRY(reserved)	zeroentry do_reserved	/* runs on exception stack */ENTRY(double_fault)	XCPT_FRAME	paranoidentry do_double_fault	jmp paranoid_exit	CFI_ENDPROCENTRY(invalid_TSS)	errorentry do_invalid_TSSENTRY(segment_not_present)	errorentry do_segment_not_present	/* runs on exception stack */ENTRY(stack_segment)	XCPT_FRAME	paranoidentry do_stack_segment	jmp paranoid_exit	CFI_ENDPROCKPROBE_ENTRY(general_protection)	errorentry do_general_protection	.previous .textENTRY(alignment_check)	errorentry do_alignment_checkENTRY(divide_error)	zeroentry do_divide_errorENTRY(spurious_interrupt_bug)	zeroentry do_spurious_interrupt_bug#ifdef CONFIG_X86_MCE	/* runs on exception stack */ENTRY(machine_check)	INTR_FRAME	pushq $0	CFI_ADJUST_CFA_OFFSET 8		paranoidentry do_machine_check	jmp paranoid_exit	CFI_ENDPROC#endifENTRY(call_debug)       zeroentry do_call_debugENTRY(call_softirq)	CFI_STARTPROC	movq %gs:pda_irqstackptr,%rax	pushq %r15	CFI_ADJUST_CFA_OFFSET 8	movq %rsp,%r15	CFI_DEF_CFA_REGISTER	r15	incl %gs:pda_irqcount	cmove %rax,%rsp	call __do_softirq	movq %r15,%rsp	CFI_DEF_CFA_REGISTER	rsp	decl %gs:pda_irqcount	popq %r15	CFI_ADJUST_CFA_OFFSET -8	ret	CFI_ENDPROC

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -