⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 entry_64.s

📁 linux 内核源代码
💻 S
📖 第 1 页 / 共 2 页
字号:
		/* edi: workmask, edx: work */retint_careful:	CFI_RESTORE_STATE	bt    $TIF_NEED_RESCHED,%edx	jnc   retint_signal	TRACE_IRQS_ON	sti	pushq %rdi	CFI_ADJUST_CFA_OFFSET	8	call  schedule	popq %rdi			CFI_ADJUST_CFA_OFFSET	-8	GET_THREAD_INFO(%rcx)	cli	TRACE_IRQS_OFF	jmp retint_check	retint_signal:	testl $(_TIF_SIGPENDING|_TIF_SINGLESTEP|_TIF_MCE_NOTIFY),%edx	jz    retint_swapgs	TRACE_IRQS_ON	sti	SAVE_REST	movq $-1,ORIG_RAX(%rsp) 				xorl %esi,%esi		# oldset	movq %rsp,%rdi		# &pt_regs	call do_notify_resume	RESTORE_REST	cli	TRACE_IRQS_OFF	movl $_TIF_NEED_RESCHED,%edi	GET_THREAD_INFO(%rcx)	jmp retint_check#ifdef CONFIG_PREEMPT	/* Returning to kernel space. Check if we need preemption */	/* rcx:	 threadinfo. interrupts off. */ENTRY(retint_kernel)	cmpl $0,threadinfo_preempt_count(%rcx)	jnz  retint_restore_args	bt  $TIF_NEED_RESCHED,threadinfo_flags(%rcx)	jnc  retint_restore_args	bt   $9,EFLAGS-ARGOFFSET(%rsp)	/* interrupts off? */	jnc  retint_restore_args	call preempt_schedule_irq	jmp exit_intr#endif		CFI_ENDPROCEND(common_interrupt)	/* * APIC interrupts. */			.macro apicinterrupt num,func	INTR_FRAME	pushq $~(\num)	CFI_ADJUST_CFA_OFFSET 8	interrupt \func	jmp ret_from_intr	CFI_ENDPROC	.endmENTRY(thermal_interrupt)	apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interruptEND(thermal_interrupt)ENTRY(threshold_interrupt)	apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interruptEND(threshold_interrupt)#ifdef CONFIG_SMP	ENTRY(reschedule_interrupt)	apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interruptEND(reschedule_interrupt)	.macro INVALIDATE_ENTRY numENTRY(invalidate_interrupt\num)	apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt	END(invalidate_interrupt\num)	.endm	INVALIDATE_ENTRY 0	INVALIDATE_ENTRY 1	INVALIDATE_ENTRY 2	INVALIDATE_ENTRY 3	INVALIDATE_ENTRY 4	INVALIDATE_ENTRY 5	INVALIDATE_ENTRY 6	INVALIDATE_ENTRY 7ENTRY(call_function_interrupt)	apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interruptEND(call_function_interrupt)ENTRY(irq_move_cleanup_interrupt)	apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interruptEND(irq_move_cleanup_interrupt)#endifENTRY(apic_timer_interrupt)	apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interruptEND(apic_timer_interrupt)ENTRY(error_interrupt)	apicinterrupt ERROR_APIC_VECTOR,smp_error_interruptEND(error_interrupt)ENTRY(spurious_interrupt)	apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interruptEND(spurious_interrupt)				/* * Exception entry points. */ 			.macro zeroentry sym	INTR_FRAME	pushq $0	/* push error code/oldrax */ 	CFI_ADJUST_CFA_OFFSET 8	pushq %rax	/* push real oldrax to the rdi slot */ 	CFI_ADJUST_CFA_OFFSET 8	CFI_REL_OFFSET rax,0	leaq  \sym(%rip),%rax	jmp error_entry	CFI_ENDPROC	.endm		.macro errorentry sym	XCPT_FRAME	pushq %rax	CFI_ADJUST_CFA_OFFSET 8	CFI_REL_OFFSET rax,0	leaq  \sym(%rip),%rax	jmp error_entry	CFI_ENDPROC	.endm	/* error code is on the stack already */	/* handle NMI like exceptions that can happen everywhere */	.macro paranoidentry sym, ist=0, irqtrace=1	SAVE_ALL	cld	movl $1,%ebx	movl  $MSR_GS_BASE,%ecx	rdmsr	testl %edx,%edx	js    1f	swapgs	xorl  %ebx,%ebx1:	.if \ist	movq	%gs:pda_data_offset, %rbp	.endif	movq %rsp,%rdi	movq ORIG_RAX(%rsp),%rsi	movq $-1,ORIG_RAX(%rsp)	.if \ist	subq	$EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)	.endif	call \sym	.if \ist	addq	$EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)	.endif	cli	.if \irqtrace	TRACE_IRQS_OFF	.endif	.endm	/* 	 * "Paranoid" exit path from exception stack.  	 * Paranoid because this is used by NMIs and cannot take	 * any kernel state for granted.	 * We don't do kernel preemption checks here, because only	 * NMI should be common and it does not enable IRQs and	 * cannot get reschedule ticks.	 *	 * "trace" is 0 for the NMI handler only, because irq-tracing	 * is fundamentally NMI-unsafe. (we cannot change the soft and	 * hard flags at once, atomically)	 */	.macro paranoidexit trace=1	/* ebx:	no swapgs flag */paranoid_exit\trace:	testl %ebx,%ebx				/* swapgs needed? */	jnz paranoid_restore\trace	testl $3,CS(%rsp)	jnz   paranoid_userspace\traceparanoid_swapgs\trace:	.if \trace	TRACE_IRQS_IRETQ 0	.endif	swapgsparanoid_restore\trace:	RESTORE_ALL 8	iretqparanoid_userspace\trace:	GET_THREAD_INFO(%rcx)	movl threadinfo_flags(%rcx),%ebx	andl $_TIF_WORK_MASK,%ebx	jz paranoid_swapgs\trace	movq %rsp,%rdi			/* &pt_regs */	call sync_regs	movq %rax,%rsp			/* switch stack for scheduling */	testl $_TIF_NEED_RESCHED,%ebx	jnz paranoid_schedule\trace	movl %ebx,%edx			/* arg3: thread flags */	.if \trace	TRACE_IRQS_ON	.endif	sti	xorl %esi,%esi 			/* arg2: oldset */	movq %rsp,%rdi 			/* arg1: &pt_regs */	call do_notify_resume	cli	.if \trace	TRACE_IRQS_OFF	.endif	jmp paranoid_userspace\traceparanoid_schedule\trace:	.if \trace	TRACE_IRQS_ON	.endif	sti	call schedule	cli	.if \trace	TRACE_IRQS_OFF	.endif	jmp paranoid_userspace\trace	CFI_ENDPROC	.endm/* * Exception entry point. This expects an error code/orig_rax on the stack * and the exception handler in %rax.	 */ 		  				KPROBE_ENTRY(error_entry)	_frame RDI	CFI_REL_OFFSET rax,0	/* rdi slot contains rax, oldrax contains error code */	cld		subq  $14*8,%rsp	CFI_ADJUST_CFA_OFFSET	(14*8)	movq %rsi,13*8(%rsp)	CFI_REL_OFFSET	rsi,RSI	movq 14*8(%rsp),%rsi	/* load rax from rdi slot */	CFI_REGISTER	rax,rsi	movq %rdx,12*8(%rsp)	CFI_REL_OFFSET	rdx,RDX	movq %rcx,11*8(%rsp)	CFI_REL_OFFSET	rcx,RCX	movq %rsi,10*8(%rsp)	/* store rax */ 	CFI_REL_OFFSET	rax,RAX	movq %r8, 9*8(%rsp)	CFI_REL_OFFSET	r8,R8	movq %r9, 8*8(%rsp)	CFI_REL_OFFSET	r9,R9	movq %r10,7*8(%rsp)	CFI_REL_OFFSET	r10,R10	movq %r11,6*8(%rsp)	CFI_REL_OFFSET	r11,R11	movq %rbx,5*8(%rsp) 	CFI_REL_OFFSET	rbx,RBX	movq %rbp,4*8(%rsp) 	CFI_REL_OFFSET	rbp,RBP	movq %r12,3*8(%rsp) 	CFI_REL_OFFSET	r12,R12	movq %r13,2*8(%rsp) 	CFI_REL_OFFSET	r13,R13	movq %r14,1*8(%rsp) 	CFI_REL_OFFSET	r14,R14	movq %r15,(%rsp) 	CFI_REL_OFFSET	r15,R15	xorl %ebx,%ebx		testl $3,CS(%rsp)	je  error_kernelspaceerror_swapgs:		swapgserror_sti:		movq %rdi,RDI(%rsp) 		CFI_REL_OFFSET	rdi,RDI	movq %rsp,%rdi	movq ORIG_RAX(%rsp),%rsi	/* get error code */ 	movq $-1,ORIG_RAX(%rsp)	call *%rax	/* ebx:	no swapgs flag (1: don't need swapgs, 0: need it) */error_exit:	movl %ebx,%eax	RESTORE_REST	cli	TRACE_IRQS_OFF	GET_THREAD_INFO(%rcx)		testl %eax,%eax	jne  retint_kernel	LOCKDEP_SYS_EXIT_IRQ	movl  threadinfo_flags(%rcx),%edx	movl  $_TIF_WORK_MASK,%edi	andl  %edi,%edx	jnz  retint_careful	jmp retint_swapgs	CFI_ENDPROCerror_kernelspace:	incl %ebx       /* There are two places in the kernel that can potentially fault with          usergs. Handle them here. The exception handlers after	   iret run with kernel gs again, so don't set the user space flag.	   B stepping K8s sometimes report an truncated RIP for IRET 	   exceptions returning to compat mode. Check for these here too. */	leaq iret_label(%rip),%rbp	cmpq %rbp,RIP(%rsp) 	je   error_swapgs	movl %ebp,%ebp	/* zero extend */	cmpq %rbp,RIP(%rsp) 	je   error_swapgs	cmpq $gs_change,RIP(%rsp)        je   error_swapgs	jmp  error_stiKPROBE_END(error_entry)	       /* Reload gs selector with exception handling */       /* edi:  new selector */ ENTRY(load_gs_index)	CFI_STARTPROC	pushf	CFI_ADJUST_CFA_OFFSET 8	cli        swapgsgs_change:             movl %edi,%gs   2:	mfence		/* workaround */	swapgs        popf	CFI_ADJUST_CFA_OFFSET -8        ret	CFI_ENDPROCENDPROC(load_gs_index)               .section __ex_table,"a"        .align 8        .quad gs_change,bad_gs        .previous        .section .fixup,"ax"	/* running with kernelgs */bad_gs: 	swapgs			/* switch back to user gs */	xorl %eax,%eax        movl %eax,%gs        jmp  2b        .previous       	/* * Create a kernel thread. * * C extern interface: *	extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) * * asm input arguments: *	rdi: fn, rsi: arg, rdx: flags */ENTRY(kernel_thread)	CFI_STARTPROC	FAKE_STACK_FRAME $child_rip	SAVE_ALL	# rdi: flags, rsi: usp, rdx: will be &pt_regs	movq %rdx,%rdi	orq  kernel_thread_flags(%rip),%rdi	movq $-1, %rsi	movq %rsp, %rdx	xorl %r8d,%r8d	xorl %r9d,%r9d		# clone now	call do_fork	movq %rax,RAX(%rsp)	xorl %edi,%edi	/*	 * It isn't worth to check for reschedule here,	 * so internally to the x86_64 port you can rely on kernel_thread()	 * not to reschedule the child before returning, this avoids the need	 * of hacks for example to fork off the per-CPU idle tasks.         * [Hopefully no generic code relies on the reschedule -AK]		 */	RESTORE_ALL	UNFAKE_STACK_FRAME	ret	CFI_ENDPROCENDPROC(kernel_thread)	child_rip:	pushq $0		# fake return address	CFI_STARTPROC	/*	 * Here we are in the child and the registers are set as they were	 * at kernel_thread() invocation in the parent.	 */	movq %rdi, %rax	movq %rsi, %rdi	call *%rax	# exit	mov %eax, %edi	call do_exit	CFI_ENDPROCENDPROC(child_rip)/* * execve(). This function needs to use IRET, not SYSRET, to set up all state properly. * * C extern interface: *	 extern long execve(char *name, char **argv, char **envp) * * asm input arguments: *	rdi: name, rsi: argv, rdx: envp * * We want to fallback into: *	extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs regs) * * do_sys_execve asm fallback arguments: *	rdi: name, rsi: argv, rdx: envp, fake frame on the stack */ENTRY(kernel_execve)	CFI_STARTPROC	FAKE_STACK_FRAME $0	SAVE_ALL		call sys_execve	movq %rax, RAX(%rsp)		RESTORE_REST	testq %rax,%rax	je int_ret_from_sys_call	RESTORE_ARGS	UNFAKE_STACK_FRAME	ret	CFI_ENDPROCENDPROC(kernel_execve)KPROBE_ENTRY(page_fault)	errorentry do_page_faultKPROBE_END(page_fault)ENTRY(coprocessor_error)	zeroentry do_coprocessor_errorEND(coprocessor_error)ENTRY(simd_coprocessor_error)	zeroentry do_simd_coprocessor_error	END(simd_coprocessor_error)ENTRY(device_not_available)	zeroentry math_state_restoreEND(device_not_available)	/* runs on exception stack */KPROBE_ENTRY(debug) 	INTR_FRAME	pushq $0	CFI_ADJUST_CFA_OFFSET 8			paranoidentry do_debug, DEBUG_STACK	paranoidexitKPROBE_END(debug)	/* runs on exception stack */	KPROBE_ENTRY(nmi)	INTR_FRAME	pushq $-1	CFI_ADJUST_CFA_OFFSET 8	paranoidentry do_nmi, 0, 0#ifdef CONFIG_TRACE_IRQFLAGS	paranoidexit 0#else	jmp paranoid_exit1 	CFI_ENDPROC#endifKPROBE_END(nmi)KPROBE_ENTRY(int3) 	INTR_FRAME 	pushq $0 	CFI_ADJUST_CFA_OFFSET 8 	paranoidentry do_int3, DEBUG_STACK 	jmp paranoid_exit1 	CFI_ENDPROCKPROBE_END(int3)ENTRY(overflow)	zeroentry do_overflowEND(overflow)ENTRY(bounds)	zeroentry do_boundsEND(bounds)ENTRY(invalid_op)	zeroentry do_invalid_op	END(invalid_op)ENTRY(coprocessor_segment_overrun)	zeroentry do_coprocessor_segment_overrunEND(coprocessor_segment_overrun)ENTRY(reserved)	zeroentry do_reservedEND(reserved)	/* runs on exception stack */ENTRY(double_fault)	XCPT_FRAME	paranoidentry do_double_fault	jmp paranoid_exit1	CFI_ENDPROCEND(double_fault)ENTRY(invalid_TSS)	errorentry do_invalid_TSSEND(invalid_TSS)ENTRY(segment_not_present)	errorentry do_segment_not_presentEND(segment_not_present)	/* runs on exception stack */ENTRY(stack_segment)	XCPT_FRAME	paranoidentry do_stack_segment	jmp paranoid_exit1	CFI_ENDPROCEND(stack_segment)KPROBE_ENTRY(general_protection)	errorentry do_general_protectionKPROBE_END(general_protection)ENTRY(alignment_check)	errorentry do_alignment_checkEND(alignment_check)ENTRY(divide_error)	zeroentry do_divide_errorEND(divide_error)ENTRY(spurious_interrupt_bug)	zeroentry do_spurious_interrupt_bugEND(spurious_interrupt_bug)#ifdef CONFIG_X86_MCE	/* runs on exception stack */ENTRY(machine_check)	INTR_FRAME	pushq $0	CFI_ADJUST_CFA_OFFSET 8		paranoidentry do_machine_check	jmp paranoid_exit1	CFI_ENDPROCEND(machine_check)#endif/* Call softirq on interrupt stack. Interrupts are off. */ENTRY(call_softirq)	CFI_STARTPROC	push %rbp	CFI_ADJUST_CFA_OFFSET	8	CFI_REL_OFFSET rbp,0	mov  %rsp,%rbp	CFI_DEF_CFA_REGISTER rbp	incl %gs:pda_irqcount	cmove %gs:pda_irqstackptr,%rsp	push  %rbp			# backlink for old unwinder	call __do_softirq	leaveq	CFI_DEF_CFA_REGISTER	rsp	CFI_ADJUST_CFA_OFFSET   -8	decl %gs:pda_irqcount	ret	CFI_ENDPROCENDPROC(call_softirq)KPROBE_ENTRY(ignore_sysret)	CFI_STARTPROC	mov $-ENOSYS,%eax	sysret	CFI_ENDPROCENDPROC(ignore_sysret)

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -