⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 entry.s

📁 linux 内核源代码
💻 S
📖 第 1 页 / 共 3 页
字号:
/* * arch/ia64/kernel/entry.S * * Kernel entry points. * * Copyright (C) 1998-2003, 2005 Hewlett-Packard Co *	David Mosberger-Tang <davidm@hpl.hp.com> * Copyright (C) 1999, 2002-2003 *	Asit Mallick <Asit.K.Mallick@intel.com> * 	Don Dugger <Don.Dugger@intel.com> *	Suresh Siddha <suresh.b.siddha@intel.com> *	Fenghua Yu <fenghua.yu@intel.com> * Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> *//* * ia64_switch_to now places correct virtual mapping in in TR2 for * kernel stack. This allows us to handle interrupts without changing * to physical mode. * * Jonathan Nicklin	<nicklin@missioncriticallinux.com> * Patrick O'Rourke	<orourke@missioncriticallinux.com> * 11/07/2000 *//* * Global (preserved) predicate usage on syscall entry/exit path: * *	pKStk:		See entry.h. *	pUStk:		See entry.h. *	pSys:		See entry.h. *	pNonSys:	!pSys */#include <asm/asmmacro.h>#include <asm/cache.h>#include <asm/errno.h>#include <asm/kregs.h>#include <asm/asm-offsets.h>#include <asm/pgtable.h>#include <asm/percpu.h>#include <asm/processor.h>#include <asm/thread_info.h>#include <asm/unistd.h>#include "minstate.h"	/*	 * execve() is special because in case of success, we need to	 * setup a null register window frame.	 */ENTRY(ia64_execve)	/*	 * Allocate 8 input registers since ptrace() may clobber them	 */	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)	alloc loc1=ar.pfs,8,2,4,0	mov loc0=rp	.body	mov out0=in0			// filename	;;				// stop bit between alloc and call	mov out1=in1			// argv	mov out2=in2			// envp	add out3=16,sp			// regs	br.call.sptk.many rp=sys_execve.ret0:#ifdef CONFIG_IA32_SUPPORT	/*	 * Check if we're returning to ia32 mode. If so, we need to restore ia32 registers	 * from pt_regs.	 */	adds r16=PT(CR_IPSR)+16,sp	;;	ld8 r16=[r16]#endif	cmp4.ge p6,p7=r8,r0	mov ar.pfs=loc1			// restore ar.pfs	sxt4 r8=r8			// return 64-bit result	;;	stf.spill [sp]=f0(p6)	cmp.ne pKStk,pUStk=r0,r0	// a successful execve() lands us in user-mode...	mov rp=loc0(p6)	mov ar.pfs=r0			// clear ar.pfs on success(p7)	br.ret.sptk.many rp	/*	 * In theory, we'd have to zap this state only to prevent leaking of	 * security sensitive state (e.g., if current->mm->dumpable is zero).  However,	 * this executes in less than 20 cycles even on Itanium, so it's not worth	 * optimizing for...).	 */	mov ar.unat=0; 		mov ar.lc=0	mov r4=0;		mov f2=f0;		mov b1=r0	mov r5=0;		mov f3=f0;		mov b2=r0	mov r6=0;		mov f4=f0;		mov b3=r0	mov r7=0;		mov f5=f0;		mov b4=r0	ldf.fill f12=[sp];	mov f13=f0;		mov b5=r0	ldf.fill f14=[sp];	ldf.fill f15=[sp];	mov f16=f0	ldf.fill f17=[sp];	ldf.fill f18=[sp];	mov f19=f0	ldf.fill f20=[sp];	ldf.fill f21=[sp];	mov f22=f0	ldf.fill f23=[sp];	ldf.fill f24=[sp];	mov f25=f0	ldf.fill f26=[sp];	ldf.fill f27=[sp];	mov f28=f0	ldf.fill f29=[sp];	ldf.fill f30=[sp];	mov f31=f0#ifdef CONFIG_IA32_SUPPORT	tbit.nz p6,p0=r16, IA64_PSR_IS_BIT	movl loc0=ia64_ret_from_ia32_execve	;;(p6)	mov rp=loc0#endif	br.ret.sptk.many rpEND(ia64_execve)/* * sys_clone2(u64 flags, u64 ustack_base, u64 ustack_size, u64 parent_tidptr, u64 child_tidptr, *	      u64 tls) */GLOBAL_ENTRY(sys_clone2)	/*	 * Allocate 8 input registers since ptrace() may clobber them	 */	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)	alloc r16=ar.pfs,8,2,6,0	DO_SAVE_SWITCH_STACK	adds r2=PT(R16)+IA64_SWITCH_STACK_SIZE+16,sp	mov loc0=rp	mov loc1=r16				// save ar.pfs across do_fork	.body	mov out1=in1	mov out3=in2	tbit.nz p6,p0=in0,CLONE_SETTLS_BIT	mov out4=in3	// parent_tidptr: valid only w/CLONE_PARENT_SETTID	;;(p6)	st8 [r2]=in5				// store TLS in r16 for copy_thread()	mov out5=in4	// child_tidptr:  valid only w/CLONE_CHILD_SETTID or CLONE_CHILD_CLEARTID	adds out2=IA64_SWITCH_STACK_SIZE+16,sp	// out2 = &regs	mov out0=in0				// out0 = clone_flags	br.call.sptk.many rp=do_fork.ret1:	.restore sp	adds sp=IA64_SWITCH_STACK_SIZE,sp	// pop the switch stack	mov ar.pfs=loc1	mov rp=loc0	br.ret.sptk.many rpEND(sys_clone2)/* * sys_clone(u64 flags, u64 ustack_base, u64 parent_tidptr, u64 child_tidptr, u64 tls) *	Deprecated.  Use sys_clone2() instead. */GLOBAL_ENTRY(sys_clone)	/*	 * Allocate 8 input registers since ptrace() may clobber them	 */	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)	alloc r16=ar.pfs,8,2,6,0	DO_SAVE_SWITCH_STACK	adds r2=PT(R16)+IA64_SWITCH_STACK_SIZE+16,sp	mov loc0=rp	mov loc1=r16				// save ar.pfs across do_fork	.body	mov out1=in1	mov out3=16				// stacksize (compensates for 16-byte scratch area)	tbit.nz p6,p0=in0,CLONE_SETTLS_BIT	mov out4=in2	// parent_tidptr: valid only w/CLONE_PARENT_SETTID	;;(p6)	st8 [r2]=in4				// store TLS in r13 (tp)	mov out5=in3	// child_tidptr:  valid only w/CLONE_CHILD_SETTID or CLONE_CHILD_CLEARTID	adds out2=IA64_SWITCH_STACK_SIZE+16,sp	// out2 = &regs	mov out0=in0				// out0 = clone_flags	br.call.sptk.many rp=do_fork.ret2:	.restore sp	adds sp=IA64_SWITCH_STACK_SIZE,sp	// pop the switch stack	mov ar.pfs=loc1	mov rp=loc0	br.ret.sptk.many rpEND(sys_clone)/* * prev_task <- ia64_switch_to(struct task_struct *next) *	With Ingo's new scheduler, interrupts are disabled when this routine gets *	called.  The code starting at .map relies on this.  The rest of the code *	doesn't care about the interrupt masking status. */GLOBAL_ENTRY(ia64_switch_to)	.prologue	alloc r16=ar.pfs,1,0,0,0	DO_SAVE_SWITCH_STACK	.body	adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13	movl r25=init_task	mov r27=IA64_KR(CURRENT_STACK)	adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0	dep r20=0,in0,61,3		// physical address of "next"	;;	st8 [r22]=sp			// save kernel stack pointer of old task	shr.u r26=r20,IA64_GRANULE_SHIFT	cmp.eq p7,p6=r25,in0	;;	/*	 * If we've already mapped this task's page, we can skip doing it again.	 */(p6)	cmp.eq p7,p6=r26,r27(p6)	br.cond.dpnt .map	;;.done:	ld8 sp=[r21]			// load kernel stack pointer of new task	mov IA64_KR(CURRENT)=in0	// update "current" application register	mov r8=r13			// return pointer to previously running task	mov r13=in0			// set "current" pointer	;;	DO_LOAD_SWITCH_STACK#ifdef CONFIG_SMP	sync.i				// ensure "fc"s done by this CPU are visible on other CPUs#endif	br.ret.sptk.many rp		// boogie on out in new context.map:	rsm psr.ic			// interrupts (psr.i) are already disabled here	movl r25=PAGE_KERNEL	;;	srlz.d	or r23=r25,r20			// construct PA | page properties	mov r25=IA64_GRANULE_SHIFT<<2	;;	mov cr.itir=r25	mov cr.ifa=in0			// VA of next task...	;;	mov r25=IA64_TR_CURRENT_STACK	mov IA64_KR(CURRENT_STACK)=r26	// remember last page we mapped...	;;	itr.d dtr[r25]=r23		// wire in new mapping...	ssm psr.ic			// reenable the psr.ic bit	;;	srlz.d	br.cond.sptk .doneEND(ia64_switch_to)/* * Note that interrupts are enabled during save_switch_stack and load_switch_stack.  This * means that we may get an interrupt with "sp" pointing to the new kernel stack while * ar.bspstore is still pointing to the old kernel backing store area.  Since ar.rsc, * ar.rnat, ar.bsp, and ar.bspstore are all preserved by interrupts, this is not a * problem.  Also, we don't need to specify unwind information for preserved registers * that are not modified in save_switch_stack as the right unwind information is already * specified at the call-site of save_switch_stack. *//* * save_switch_stack: *	- r16 holds ar.pfs *	- b7 holds address to return to *	- rp (b0) holds return address to save */GLOBAL_ENTRY(save_switch_stack)	.prologue	.altrp b7	flushrs			// flush dirty regs to backing store (must be first in insn group)	.save @priunat,r17	mov r17=ar.unat		// preserve caller's	.body#ifdef CONFIG_ITANIUM	adds r2=16+128,sp	adds r3=16+64,sp	adds r14=SW(R4)+16,sp	;;	st8.spill [r14]=r4,16		// spill r4	lfetch.fault.excl.nt1 [r3],128	;;	lfetch.fault.excl.nt1 [r2],128	lfetch.fault.excl.nt1 [r3],128	;;	lfetch.fault.excl [r2]	lfetch.fault.excl [r3]	adds r15=SW(R5)+16,sp#else	add r2=16+3*128,sp	add r3=16,sp	add r14=SW(R4)+16,sp	;;	st8.spill [r14]=r4,SW(R6)-SW(R4)	// spill r4 and prefetch offset 0x1c0	lfetch.fault.excl.nt1 [r3],128	//		prefetch offset 0x010	;;	lfetch.fault.excl.nt1 [r3],128	//		prefetch offset 0x090	lfetch.fault.excl.nt1 [r2],128	//		prefetch offset 0x190	;;	lfetch.fault.excl.nt1 [r3]	//		prefetch offset 0x110	lfetch.fault.excl.nt1 [r2]	//		prefetch offset 0x210	adds r15=SW(R5)+16,sp#endif	;;	st8.spill [r15]=r5,SW(R7)-SW(R5)	// spill r5	mov.m ar.rsc=0			// put RSE in mode: enforced lazy, little endian, pl 0	add r2=SW(F2)+16,sp		// r2 = &sw->f2	;;	st8.spill [r14]=r6,SW(B0)-SW(R6)	// spill r6	mov.m r18=ar.fpsr		// preserve fpsr	add r3=SW(F3)+16,sp		// r3 = &sw->f3	;;	stf.spill [r2]=f2,32	mov.m r19=ar.rnat	mov r21=b0	stf.spill [r3]=f3,32	st8.spill [r15]=r7,SW(B2)-SW(R7)	// spill r7	mov r22=b1	;;	// since we're done with the spills, read and save ar.unat:	mov.m r29=ar.unat	mov.m r20=ar.bspstore	mov r23=b2	stf.spill [r2]=f4,32	stf.spill [r3]=f5,32	mov r24=b3	;;	st8 [r14]=r21,SW(B1)-SW(B0)		// save b0	st8 [r15]=r23,SW(B3)-SW(B2)		// save b2	mov r25=b4	mov r26=b5	;;	st8 [r14]=r22,SW(B4)-SW(B1)		// save b1	st8 [r15]=r24,SW(AR_PFS)-SW(B3)		// save b3	mov r21=ar.lc		// I-unit	stf.spill [r2]=f12,32	stf.spill [r3]=f13,32	;;	st8 [r14]=r25,SW(B5)-SW(B4)		// save b4	st8 [r15]=r16,SW(AR_LC)-SW(AR_PFS)	// save ar.pfs	stf.spill [r2]=f14,32	stf.spill [r3]=f15,32	;;	st8 [r14]=r26				// save b5	st8 [r15]=r21				// save ar.lc	stf.spill [r2]=f16,32	stf.spill [r3]=f17,32	;;	stf.spill [r2]=f18,32	stf.spill [r3]=f19,32	;;	stf.spill [r2]=f20,32	stf.spill [r3]=f21,32	;;	stf.spill [r2]=f22,32	stf.spill [r3]=f23,32	;;	stf.spill [r2]=f24,32	stf.spill [r3]=f25,32	;;	stf.spill [r2]=f26,32	stf.spill [r3]=f27,32	;;	stf.spill [r2]=f28,32	stf.spill [r3]=f29,32	;;	stf.spill [r2]=f30,SW(AR_UNAT)-SW(F30)	stf.spill [r3]=f31,SW(PR)-SW(F31)	add r14=SW(CALLER_UNAT)+16,sp	;;	st8 [r2]=r29,SW(AR_RNAT)-SW(AR_UNAT)	// save ar.unat	st8 [r14]=r17,SW(AR_FPSR)-SW(CALLER_UNAT) // save caller_unat	mov r21=pr	;;	st8 [r2]=r19,SW(AR_BSPSTORE)-SW(AR_RNAT) // save ar.rnat	st8 [r3]=r21				// save predicate registers	;;	st8 [r2]=r20				// save ar.bspstore	st8 [r14]=r18				// save fpsr	mov ar.rsc=3		// put RSE back into eager mode, pl 0	br.cond.sptk.many b7END(save_switch_stack)/* * load_switch_stack: *	- "invala" MUST be done at call site (normally in DO_LOAD_SWITCH_STACK) *	- b7 holds address to return to *	- must not touch r8-r11 */ENTRY(load_switch_stack)	.prologue	.altrp b7	.body	lfetch.fault.nt1 [sp]	adds r2=SW(AR_BSPSTORE)+16,sp	adds r3=SW(AR_UNAT)+16,sp	mov ar.rsc=0						// put RSE into enforced lazy mode	adds r14=SW(CALLER_UNAT)+16,sp	adds r15=SW(AR_FPSR)+16,sp	;;	ld8 r27=[r2],(SW(B0)-SW(AR_BSPSTORE))	// bspstore	ld8 r29=[r3],(SW(B1)-SW(AR_UNAT))	// unat	;;	ld8 r21=[r2],16		// restore b0	ld8 r22=[r3],16		// restore b1	;;	ld8 r23=[r2],16		// restore b2	ld8 r24=[r3],16		// restore b3	;;	ld8 r25=[r2],16		// restore b4	ld8 r26=[r3],16		// restore b5	;;	ld8 r16=[r2],(SW(PR)-SW(AR_PFS))	// ar.pfs	ld8 r17=[r3],(SW(AR_RNAT)-SW(AR_LC))	// ar.lc	;;	ld8 r28=[r2]		// restore pr	ld8 r30=[r3]		// restore rnat	;;	ld8 r18=[r14],16	// restore caller's unat	ld8 r19=[r15],24	// restore fpsr	;;	ldf.fill f2=[r14],32	ldf.fill f3=[r15],32	;;	ldf.fill f4=[r14],32	ldf.fill f5=[r15],32	;;	ldf.fill f12=[r14],32	ldf.fill f13=[r15],32	;;	ldf.fill f14=[r14],32	ldf.fill f15=[r15],32	;;	ldf.fill f16=[r14],32	ldf.fill f17=[r15],32	;;	ldf.fill f18=[r14],32	ldf.fill f19=[r15],32	mov b0=r21	;;	ldf.fill f20=[r14],32	ldf.fill f21=[r15],32	mov b1=r22	;;	ldf.fill f22=[r14],32	ldf.fill f23=[r15],32	mov b2=r23	;;	mov ar.bspstore=r27	mov ar.unat=r29		// establish unat holding the NaT bits for r4-r7	mov b3=r24	;;	ldf.fill f24=[r14],32	ldf.fill f25=[r15],32	mov b4=r25	;;	ldf.fill f26=[r14],32	ldf.fill f27=[r15],32	mov b5=r26	;;	ldf.fill f28=[r14],32	ldf.fill f29=[r15],32	mov ar.pfs=r16	;;	ldf.fill f30=[r14],32	ldf.fill f31=[r15],24	mov ar.lc=r17	;;	ld8.fill r4=[r14],16	ld8.fill r5=[r15],16	mov pr=r28,-1	;;	ld8.fill r6=[r14],16	ld8.fill r7=[r15],16	mov ar.unat=r18				// restore caller's unat	mov ar.rnat=r30				// must restore after bspstore but before rsc!	mov ar.fpsr=r19				// restore fpsr	mov ar.rsc=3				// put RSE back into eager mode, pl 0	br.cond.sptk.many b7END(load_switch_stack)GLOBAL_ENTRY(prefetch_stack)	add r14 = -IA64_SWITCH_STACK_SIZE, sp	add r15 = IA64_TASK_THREAD_KSP_OFFSET, in0	;;	ld8 r16 = [r15]				// load next's stack pointer	lfetch.fault.excl [r14], 128	;;	lfetch.fault.excl [r14], 128	lfetch.fault [r16], 128	;;	lfetch.fault.excl [r14], 128	lfetch.fault [r16], 128	;;	lfetch.fault.excl [r14], 128	lfetch.fault [r16], 128	;;	lfetch.fault.excl [r14], 128	lfetch.fault [r16], 128	;;	lfetch.fault [r16], 128	br.ret.sptk.many rpEND(prefetch_stack)GLOBAL_ENTRY(kernel_execve)	mov r15=__NR_execve			// put syscall number in place	break __BREAK_SYSCALL	br.ret.sptk.many rpEND(kernel_execve)GLOBAL_ENTRY(clone)	mov r15=__NR_clone			// put syscall number in place	break __BREAK_SYSCALL	br.ret.sptk.many rpEND(clone)	/*	 * Invoke a system call, but do some tracing before and after the call.	 * We MUST preserve the current register frame throughout this routine	 * because some system calls (such as ia64_execve) directly	 * manipulate ar.pfs.	 */GLOBAL_ENTRY(ia64_trace_syscall)	PT_REGS_UNWIND_INFO(0)	/*	 * We need to preserve the scratch registers f6-f11 in case the system	 * call is sigreturn.	 */	adds r16=PT(F6)+16,sp	adds r17=PT(F7)+16,sp	;; 	stf.spill [r16]=f6,32 	stf.spill [r17]=f7,32	;; 	stf.spill [r16]=f8,32 	stf.spill [r17]=f9,32	;; 	stf.spill [r16]=f10 	stf.spill [r17]=f11	br.call.sptk.many rp=syscall_trace_enter // give parent a chance to catch syscall args	adds r16=PT(F6)+16,sp

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -