⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 entry.s

📁 上传linux-jx2410的源代码
💻 S
📖 第 1 页 / 共 3 页
字号:
/* * ia64/kernel/entry.S * * Kernel entry points. * * Copyright (C) 1998-2001 Hewlett-Packard Co *	David Mosberger-Tang <davidm@hpl.hp.com> * Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> * Copyright (C) 1999 Asit Mallick <Asit.K.Mallick@intel.com> * Copyright (C) 1999 Don Dugger <Don.Dugger@intel.com> *//* * ia64_switch_to now places correct virtual mapping in in TR2 for * kernel stack. This allows us to handle interrupts without changing * to physical mode. * * Jonathan Nicklin	<nicklin@missioncriticallinux.com> * Patrick O'Rourke	<orourke@missioncriticallinux.com> * 11/07/2000 //* * Global (preserved) predicate usage on syscall entry/exit path: * *	pKern:		See entry.h. *	pUser:		See entry.h. *	pSys:		See entry.h. *	pNonSys:	!pSys */#include <linux/config.h>#include <asm/cache.h>#include <asm/errno.h>#include <asm/kregs.h>#include <asm/offsets.h>#include <asm/processor.h>#include <asm/unistd.h>#include <asm/asmmacro.h>#include <asm/pgtable.h>#include "minstate.h"	/*	 * execve() is special because in case of success, we need to	 * setup a null register window frame.	 */ENTRY(ia64_execve)	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(3)	alloc loc1=ar.pfs,3,2,4,0	mov loc0=rp	.body	mov out0=in0			// filename	;;				// stop bit between alloc and call	mov out1=in1			// argv	mov out2=in2			// envp	add out3=16,sp			// regs	br.call.sptk.many rp=sys_execve.ret0:	cmp4.ge p6,p7=r8,r0	mov ar.pfs=loc1			// restore ar.pfs	sxt4 r8=r8			// return 64-bit result	;;	stf.spill [sp]=f0(p6)	cmp.ne pKern,pUser=r0,r0	// a successful execve() lands us in user-mode...	mov rp=loc0(p6)	mov ar.pfs=r0			// clear ar.pfs on success(p7)	br.ret.sptk.many rp	/*	 * In theory, we'd have to zap this state only to prevent leaking of	 * security sensitive state (e.g., if current->mm->dumpable is zero).  However,	 * this executes in less than 20 cycles even on Itanium, so it's not worth	 * optimizing for...).	 */	mov r4=0;		mov f2=f0;		mov b1=r0	mov r5=0;		mov f3=f0;		mov b2=r0	mov r6=0;		mov f4=f0;		mov b3=r0	mov r7=0;		mov f5=f0;		mov b4=r0	mov ar.unat=0;		mov f10=f0;		mov b5=r0	ldf.fill f11=[sp];	ldf.fill f12=[sp];	mov f13=f0	ldf.fill f14=[sp];	ldf.fill f15=[sp];	mov f16=f0	ldf.fill f17=[sp];	ldf.fill f18=[sp];	mov f19=f0	ldf.fill f20=[sp];	ldf.fill f21=[sp];	mov f22=f0	ldf.fill f23=[sp];	ldf.fill f24=[sp];	mov f25=f0	ldf.fill f26=[sp];	ldf.fill f27=[sp];	mov f28=f0	ldf.fill f29=[sp];	ldf.fill f30=[sp];	mov f31=f0	mov ar.lc=0	br.ret.sptk.many rpEND(ia64_execve)GLOBAL_ENTRY(sys_clone2)	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)	alloc r16=ar.pfs,3,2,4,0	DO_SAVE_SWITCH_STACK	mov loc0=rp	mov loc1=r16				// save ar.pfs across do_fork	.body	mov out1=in1	mov out3=in2	adds out2=IA64_SWITCH_STACK_SIZE+16,sp	// out2 = &regs	mov out0=in0				// out0 = clone_flags	br.call.sptk.many rp=do_fork.ret1:	.restore sp	adds sp=IA64_SWITCH_STACK_SIZE,sp	// pop the switch stack	mov ar.pfs=loc1	mov rp=loc0	br.ret.sptk.many rpEND(sys_clone2)GLOBAL_ENTRY(sys_clone)	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)	alloc r16=ar.pfs,2,2,4,0	DO_SAVE_SWITCH_STACK	mov loc0=rp	mov loc1=r16				// save ar.pfs across do_fork	.body	mov out1=in1	mov out3=0	adds out2=IA64_SWITCH_STACK_SIZE+16,sp	// out2 = &regs	mov out0=in0				// out0 = clone_flags	br.call.sptk.many rp=do_fork.ret2:	.restore sp	adds sp=IA64_SWITCH_STACK_SIZE,sp	// pop the switch stack	mov ar.pfs=loc1	mov rp=loc0	br.ret.sptk.many rpEND(sys_clone)/* * prev_task <- ia64_switch_to(struct task_struct *next) */GLOBAL_ENTRY(ia64_switch_to)	.prologue	alloc r16=ar.pfs,1,0,0,0	DO_SAVE_SWITCH_STACK	.body	adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13	mov r27=IA64_KR(CURRENT_STACK)	dep r20=0,in0,61,3		// physical address of "current"	;;	st8 [r22]=sp			// save kernel stack pointer of old task	shr.u r26=r20,IA64_GRANULE_SHIFT	shr.u r17=r20,KERNEL_TR_PAGE_SHIFT	;;	cmp.ne p6,p7=KERNEL_TR_PAGE_NUM,r17	adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0	;;	/*	 * If we've already mapped this task's page, we can skip doing it again.	 */(p6)	cmp.eq p7,p6=r26,r27(p6)	br.cond.dpnt .map	;;.done:(p6)	ssm psr.ic			// if we we had to map, renable the psr.ic bit FIRST!!!	;;(p6)	srlz.d	ld8 sp=[r21]			// load kernel stack pointer of new task	mov IA64_KR(CURRENT)=r20	// update "current" application register	mov r8=r13			// return pointer to previously running task	mov r13=in0			// set "current" pointer	;;(p6)	ssm psr.i			// renable psr.i AFTER the ic bit is serialized	DO_LOAD_SWITCH_STACK#ifdef CONFIG_SMP	sync.i				// ensure "fc"s done by this CPU are visible on other CPUs#endif	br.ret.sptk.many rp		// boogie on out in new context.map:	rsm psr.i | psr.ic	movl r25=PAGE_KERNEL	;;	srlz.d	or r23=r25,r20			// construct PA | page properties	mov r25=IA64_GRANULE_SHIFT<<2	;;	mov cr.itir=r25	mov cr.ifa=in0			// VA of next task...	;;	mov r25=IA64_TR_CURRENT_STACK	mov IA64_KR(CURRENT_STACK)=r26	// remember last page we mapped...	;;	itr.d dtr[r25]=r23		// wire in new mapping...	br.cond.sptk .doneEND(ia64_switch_to)/* * Note that interrupts are enabled during save_switch_stack and * load_switch_stack.  This means that we may get an interrupt with * "sp" pointing to the new kernel stack while ar.bspstore is still * pointing to the old kernel backing store area.  Since ar.rsc, * ar.rnat, ar.bsp, and ar.bspstore are all preserved by interrupts, * this is not a problem.  Also, we don't need to specify unwind * information for preserved registers that are not modified in * save_switch_stack as the right unwind information is already * specified at the call-site of save_switch_stack. *//* * save_switch_stack: *	- r16 holds ar.pfs *	- b7 holds address to return to *	- rp (b0) holds return address to save */GLOBAL_ENTRY(save_switch_stack)	.prologue	.altrp b7	flushrs			// flush dirty regs to backing store (must be first in insn group)	.save @priunat,r17	mov r17=ar.unat		// preserve caller's	.body	adds r3=80,sp	;;	lfetch.fault.excl.nt1 [r3],128	mov ar.rsc=0		// put RSE in mode: enforced lazy, little endian, pl 0	adds r2=16+128,sp	;;	lfetch.fault.excl.nt1 [r2],128	lfetch.fault.excl.nt1 [r3],128	adds r14=SW(R4)+16,sp	;;	lfetch.fault.excl [r2]	lfetch.fault.excl [r3]	adds r15=SW(R5)+16,sp	;;	mov r18=ar.fpsr		// preserve fpsr	mov r19=ar.rnat	add r2=SW(F2)+16,sp	// r2 = &sw->f2.mem.offset 0,0; st8.spill [r14]=r4,16		// spill r4.mem.offset 8,0; st8.spill [r15]=r5,16		// spill r5	add r3=SW(F3)+16,sp	// r3 = &sw->f3	;;	stf.spill [r2]=f2,32	stf.spill [r3]=f3,32	mov r21=b0.mem.offset 0,0; st8.spill [r14]=r6,16		// spill r6.mem.offset 8,0; st8.spill [r15]=r7,16		// spill r7	mov r22=b1	;;	// since we're done with the spills, read and save ar.unat:	mov r29=ar.unat		// M-unit	mov r20=ar.bspstore	// M-unit	mov r23=b2	stf.spill [r2]=f4,32	stf.spill [r3]=f5,32	mov r24=b3	;;	st8 [r14]=r21,16	// save b0	st8 [r15]=r22,16	// save b1	mov r25=b4	stf.spill [r2]=f10,32	stf.spill [r3]=f11,32	mov r26=b5	;;	st8 [r14]=r23,16	// save b2	st8 [r15]=r24,16	// save b3	mov r21=ar.lc		// I-unit	stf.spill [r2]=f12,32	stf.spill [r3]=f13,32	;;	st8 [r14]=r25,16	// save b4	st8 [r15]=r26,16	// save b5	stf.spill [r2]=f14,32	stf.spill [r3]=f15,32	;;	st8 [r14]=r16		// save ar.pfs	st8 [r15]=r21		// save ar.lc	stf.spill [r2]=f16,32	stf.spill [r3]=f17,32	;;	stf.spill [r2]=f18,32	stf.spill [r3]=f19,32	;;	stf.spill [r2]=f20,32	stf.spill [r3]=f21,32	;;	stf.spill [r2]=f22,32	stf.spill [r3]=f23,32	;;	stf.spill [r2]=f24,32	stf.spill [r3]=f25,32	add r14=SW(CALLER_UNAT)+16,sp	;;	stf.spill [r2]=f26,32	stf.spill [r3]=f27,32	add r15=SW(AR_FPSR)+16,sp	;;	stf.spill [r2]=f28,32	stf.spill [r3]=f29,32	st8 [r14]=r17		// save caller_unat	st8 [r15]=r18		// save fpsr	mov r21=pr	;;	stf.spill [r2]=f30,(SW(AR_UNAT)-SW(F30))	stf.spill [r3]=f31,(SW(AR_RNAT)-SW(F31))	;;	st8 [r2]=r29,16		// save ar.unat	st8 [r3]=r19,16		// save ar.rnat	;;	st8 [r2]=r20		// save ar.bspstore	st8 [r3]=r21		// save predicate registers	mov ar.rsc=3		// put RSE back into eager mode, pl 0	br.cond.sptk.many b7END(save_switch_stack)/* * load_switch_stack: *	- "invala" MUST be done at call site (normally in DO_LOAD_SWITCH_STACK) *	- b7 holds address to return to *	- must not touch r8-r11 */ENTRY(load_switch_stack)	.prologue	.altrp b7	.body	lfetch.fault.nt1 [sp]	adds r2=SW(AR_BSPSTORE)+16,sp	adds r3=SW(AR_UNAT)+16,sp	mov ar.rsc=0						// put RSE into enforced lazy mode	adds r14=SW(CALLER_UNAT)+16,sp	adds r15=SW(AR_FPSR)+16,sp	;;	ld8 r27=[r2],(SW(B0)-SW(AR_BSPSTORE))	// bspstore	ld8 r29=[r3],(SW(B1)-SW(AR_UNAT))	// unat	;;	ld8 r21=[r2],16		// restore b0	ld8 r22=[r3],16		// restore b1	;;	ld8 r23=[r2],16		// restore b2	ld8 r24=[r3],16		// restore b3	;;	ld8 r25=[r2],16		// restore b4	ld8 r26=[r3],16		// restore b5	;;	ld8 r16=[r2],(SW(PR)-SW(AR_PFS))	// ar.pfs	ld8 r17=[r3],(SW(AR_RNAT)-SW(AR_LC))	// ar.lc	;;	ld8 r28=[r2]		// restore pr	ld8 r30=[r3]		// restore rnat	;;	ld8 r18=[r14],16	// restore caller's unat	ld8 r19=[r15],24	// restore fpsr	;;	ldf.fill f2=[r14],32	ldf.fill f3=[r15],32	;;	ldf.fill f4=[r14],32	ldf.fill f5=[r15],32	;;	ldf.fill f10=[r14],32	ldf.fill f11=[r15],32	;;	ldf.fill f12=[r14],32	ldf.fill f13=[r15],32	;;	ldf.fill f14=[r14],32	ldf.fill f15=[r15],32	;;	ldf.fill f16=[r14],32	ldf.fill f17=[r15],32	;;	ldf.fill f18=[r14],32	ldf.fill f19=[r15],32	mov b0=r21	;;	ldf.fill f20=[r14],32	ldf.fill f21=[r15],32	mov b1=r22	;;	ldf.fill f22=[r14],32	ldf.fill f23=[r15],32	mov b2=r23	;;	mov ar.bspstore=r27	mov ar.unat=r29		// establish unat holding the NaT bits for r4-r7	mov b3=r24	;;	ldf.fill f24=[r14],32	ldf.fill f25=[r15],32	mov b4=r25	;;	ldf.fill f26=[r14],32	ldf.fill f27=[r15],32	mov b5=r26	;;	ldf.fill f28=[r14],32	ldf.fill f29=[r15],32	mov ar.pfs=r16	;;	ldf.fill f30=[r14],32	ldf.fill f31=[r15],24	mov ar.lc=r17	;;	ld8.fill r4=[r14],16	ld8.fill r5=[r15],16

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -