⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 entry.s

📁 Linux内核源代码 为压缩文件 是<<Linux内核>>一书中的源代码
💻 S
📖 第 1 页 / 共 3 页
字号:
/* * ia64/kernel/entry.S * * Kernel entry points. * * Copyright (C) 1998-2000 Hewlett-Packard Co * Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com> * Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> * Copyright (C) 1999 Asit Mallick <Asit.K.Mallick@intel.com> * Copyright (C) 1999 Don Dugger <Don.Dugger@intel.com> *//* * ia64_switch_to now places correct virtual mapping in in TR2 for * kernel stack. This allows us to handle interrupts without changing * to physical mode. * * ar.k4 is now used to hold last virtual map address *  * Jonathan Nickin	<nicklin@missioncriticallinux.com> * Patrick O'Rourke	<orourke@missioncriticallinux.com> * 11/07/2000 //* * Global (preserved) predicate usage on syscall entry/exit path: * *	pKern:		See entry.h. *	pSys:		See entry.h. *	pNonSys:	!pSys *	p2:		(Alias of pKern!) True if any signals are pending. */#include <linux/config.h>#include <asm/cache.h>#include <asm/errno.h>#include <asm/offsets.h>#include <asm/processor.h>#include <asm/unistd.h>#include <asm/asmmacro.h>#include <asm/pgtable.h>	#include "entry.h"	.text	.psr abi64	.psr lsb	.lsb	/*	 * execve() is special because in case of success, we need to	 * setup a null register window frame.	 */ENTRY(ia64_execve)	UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(3))	alloc loc1=ar.pfs,3,2,4,0	mov loc0=rp	UNW(.body)	mov out0=in0			// filename	;;				// stop bit between alloc and call	mov out1=in1			// argv	mov out2=in2			// envp	add out3=16,sp			// regs	br.call.sptk.few rp=sys_execve.ret0:	cmp4.ge p6,p0=r8,r0	mov ar.pfs=loc1			// restore ar.pfs	;;(p6)	mov ar.pfs=r0			// clear ar.pfs in case of success	sxt4 r8=r8			// return 64-bit result	mov rp=loc0	br.ret.sptk.few rpEND(ia64_execve)GLOBAL_ENTRY(sys_clone2)	UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2))	alloc r16=ar.pfs,3,2,4,0	DO_SAVE_SWITCH_STACK	mov loc0=rp	mov loc1=r16				// save ar.pfs across do_fork	UNW(.body)	mov out1=in1	mov out3=in2	adds out2=IA64_SWITCH_STACK_SIZE+16,sp	// out2 = &regs	mov out0=in0				// out0 = clone_flags	br.call.sptk.few rp=do_fork.ret1:	UNW(.restore sp)	adds sp=IA64_SWITCH_STACK_SIZE,sp	// pop the switch stack	mov ar.pfs=loc1	mov rp=loc0	br.ret.sptk.many rpEND(sys_clone2)GLOBAL_ENTRY(sys_clone)	UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2))	alloc r16=ar.pfs,2,2,4,0	DO_SAVE_SWITCH_STACK	mov loc0=rp	mov loc1=r16				// save ar.pfs across do_fork	UNW(.body)	mov out1=in1	mov out3=0	adds out2=IA64_SWITCH_STACK_SIZE+16,sp	// out2 = &regs	mov out0=in0				// out0 = clone_flags	br.call.sptk.few rp=do_fork.ret2:	UNW(.restore sp)	adds sp=IA64_SWITCH_STACK_SIZE,sp	// pop the switch stack	mov ar.pfs=loc1	mov rp=loc0	br.ret.sptk.many rpEND(sys_clone)#define KSTACK_TR	2/* * prev_task <- ia64_switch_to(struct task_struct *next) */GLOBAL_ENTRY(ia64_switch_to)	UNW(.prologue)	alloc r16=ar.pfs,1,0,0,0	DO_SAVE_SWITCH_STACK	UNW(.body)	adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13	mov r27=ar.k4	dep r20=0,in0,61,3		// physical address of "current"	;;	st8 [r22]=sp			// save kernel stack pointer of old task	shr.u r26=r20,_PAGE_SIZE_256M	;;	cmp.eq p7,p6=r26,r0		// check < 256M	adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0	;;	/*	 * If we've already mapped this task's page, we can skip doing it	 * again.	 */(p6)	cmp.eq p7,p6=r26,r27(p6)	br.cond.dpnt.few .map	;;.done:	ld8 sp=[r21]			// load kernel stack pointer of new task(p6)	ssm psr.ic			// if we we had to map, renable the psr.ic bit FIRST!!!	;;(p6)	srlz.d	mov ar.k6=r20			// copy "current" into ar.k6	mov r8=r13			// return pointer to previously running task	mov r13=in0			// set "current" pointer	;;(p6)	ssm psr.i			// renable psr.i AFTER the ic bit is serialized	DO_LOAD_SWITCH_STACK( )#ifdef CONFIG_SMP	sync.i				// ensure "fc"s done by this CPU are visible on other CPUs#endif 	br.ret.sptk.few rp		// boogie on out in new context.map:	rsm psr.i | psr.ic	movl r25=__DIRTY_BITS|_PAGE_PL_0|_PAGE_AR_RWX	;;	srlz.d	or r23=r25,r20			// construct PA | page properties	mov r25=_PAGE_SIZE_256M<<2	;;	mov cr.itir=r25	mov cr.ifa=in0			// VA of next task...	;;	mov r25=KSTACK_TR		// use tr entry #2...	mov ar.k4=r26			// remember last page we mapped...	;;	itr.d dtr[r25]=r23		// wire in new mapping...	br.cond.sptk.many .done	;;END(ia64_switch_to)#ifndef CONFIG_IA64_NEW_UNWIND	/*	 * Like save_switch_stack, but also save the stack frame that is active	 * at the time this function is called.	 */ENTRY(save_switch_stack_with_current_frame)	UNW(.prologue)	alloc r16=ar.pfs,0,0,0,0		// pass ar.pfs to save_switch_stack	DO_SAVE_SWITCH_STACK	br.ret.sptk.few rpEND(save_switch_stack_with_current_frame)#endif /* !CONFIG_IA64_NEW_UNWIND *//* * Note that interrupts are enabled during save_switch_stack and * load_switch_stack.  This means that we may get an interrupt with * "sp" pointing to the new kernel stack while ar.bspstore is still * pointing to the old kernel backing store area.  Since ar.rsc, * ar.rnat, ar.bsp, and ar.bspstore are all preserved by interrupts, * this is not a problem.  Also, we don't need to specify unwind * information for preserved registers that are not modified in * save_switch_stack as the right unwind information is already * specified at the call-site of save_switch_stack. *//* * save_switch_stack: *	- r16 holds ar.pfs *	- b7 holds address to return to *	- rp (b0) holds return address to save */GLOBAL_ENTRY(save_switch_stack)	UNW(.prologue)	UNW(.altrp b7)	flushrs			// flush dirty regs to backing store (must be first in insn group)	mov r17=ar.unat		// preserve caller's	adds r2=16,sp		// r2 = &sw->caller_unat	;;	mov r18=ar.fpsr		// preserve fpsr	mov ar.rsc=r0		// put RSE in mode: enforced lazy, little endian, pl 0	;;	mov r19=ar.rnat	adds r3=24,sp		// r3 = &sw->ar_fpsr	;;	.savesp ar.unat,SW(CALLER_UNAT)	st8 [r2]=r17,16	.savesp ar.fpsr,SW(AR_FPSR)	st8 [r3]=r18,24	;;	UNW(.body)	stf.spill [r2]=f2,32	stf.spill [r3]=f3,32	mov r21=b0	;;	stf.spill [r2]=f4,32	stf.spill [r3]=f5,32	;;	stf.spill [r2]=f10,32	stf.spill [r3]=f11,32	mov r22=b1	;;	stf.spill [r2]=f12,32	stf.spill [r3]=f13,32	mov r23=b2	;;	stf.spill [r2]=f14,32	stf.spill [r3]=f15,32	mov r24=b3	;;	stf.spill [r2]=f16,32	stf.spill [r3]=f17,32	mov r25=b4	;;	stf.spill [r2]=f18,32	stf.spill [r3]=f19,32	mov r26=b5	;;	stf.spill [r2]=f20,32	stf.spill [r3]=f21,32	mov r17=ar.lc				// I-unit	;;	stf.spill [r2]=f22,32	stf.spill [r3]=f23,32	;;	stf.spill [r2]=f24,32	stf.spill [r3]=f25,32	;;	stf.spill [r2]=f26,32	stf.spill [r3]=f27,32	;;	stf.spill [r2]=f28,32	stf.spill [r3]=f29,32	;;	stf.spill [r2]=f30,32	stf.spill [r3]=f31,24	;;.mem.offset 0,0;	st8.spill [r2]=r4,16.mem.offset 8,0;	st8.spill [r3]=r5,16	;;.mem.offset 0,0;	st8.spill [r2]=r6,16.mem.offset 8,0;	st8.spill [r3]=r7,16	;;	st8 [r2]=r21,16		// save b0	st8 [r3]=r22,16		// save b1	/* since we're done with the spills, read and save ar.unat: */	mov r18=ar.unat		// M-unit	mov r20=ar.bspstore	// M-unit	;;	st8 [r2]=r23,16		// save b2	st8 [r3]=r24,16		// save b3	;;	st8 [r2]=r25,16		// save b4	st8 [r3]=r26,16		// save b5	;;	st8 [r2]=r16,16		// save ar.pfs	st8 [r3]=r17,16		// save ar.lc	mov r21=pr	;;	st8 [r2]=r18,16		// save ar.unat	st8 [r3]=r19,16		// save ar.rnat	mov b7=r28	;;	st8 [r2]=r20		// save ar.bspstore	st8 [r3]=r21		// save predicate registers	mov ar.rsc=3		// put RSE back into eager mode, pl 0	br.cond.sptk.few b7END(save_switch_stack)/* * load_switch_stack: *	- b7 holds address to return to */ENTRY(load_switch_stack)	UNW(.prologue)	UNW(.altrp b7)	invala			// invalidate ALAT	UNW(.body)	adds r2=IA64_SWITCH_STACK_B0_OFFSET+16,sp	// get pointer to switch_stack.b0	mov ar.rsc=r0		// put RSE into enforced lazy mode	adds r3=IA64_SWITCH_STACK_B0_OFFSET+24,sp	// get pointer to switch_stack.b1	;;	ld8 r21=[r2],16		// restore b0	ld8 r22=[r3],16		// restore b1	;;	ld8 r23=[r2],16		// restore b2	ld8 r24=[r3],16		// restore b3	;;	ld8 r25=[r2],16		// restore b4	ld8 r26=[r3],16		// restore b5	;;	ld8 r16=[r2],16		// restore ar.pfs	ld8 r17=[r3],16		// restore ar.lc	;;	ld8 r18=[r2],16		// restore ar.unat	ld8 r19=[r3],16		// restore ar.rnat	mov b0=r21	;;	ld8 r20=[r2]		// restore ar.bspstore	ld8 r21=[r3]		// restore predicate registers	mov ar.pfs=r16	;;	mov ar.bspstore=r20	;;	loadrs			// invalidate stacked regs outside current frame	adds r2=16-IA64_SWITCH_STACK_SIZE,r2	// get pointer to switch_stack.caller_unat	;;			// stop bit for rnat dependency	mov ar.rnat=r19	mov ar.unat=r18		// establish unat holding the NaT bits for r4-r7	adds r3=16-IA64_SWITCH_STACK_SIZE,r3	// get pointer to switch_stack.ar_fpsr	;;	ld8 r18=[r2],16		// restore caller's unat	ld8 r19=[r3],24		// restore fpsr	mov ar.lc=r17	;;	ldf.fill f2=[r2],32	ldf.fill f3=[r3],32	mov pr=r21,-1	;;	ldf.fill f4=[r2],32	ldf.fill f5=[r3],32	;;	ldf.fill f10=[r2],32	ldf.fill f11=[r3],32	mov b1=r22	;;	ldf.fill f12=[r2],32	ldf.fill f13=[r3],32	mov b2=r23	;;	ldf.fill f14=[r2],32	ldf.fill f15=[r3],32	mov b3=r24	;;	ldf.fill f16=[r2],32	ldf.fill f17=[r3],32	mov b4=r25	;;	ldf.fill f18=[r2],32	ldf.fill f19=[r3],32	mov b5=r26	;;	ldf.fill f20=[r2],32	ldf.fill f21=[r3],32	;;	ldf.fill f22=[r2],32	ldf.fill f23=[r3],32	;;	ldf.fill f24=[r2],32	ldf.fill f25=[r3],32	;;	ldf.fill f26=[r2],32	ldf.fill f27=[r3],32	;;	ldf.fill f28=[r2],32	ldf.fill f29=[r3],32	;;	ldf.fill f30=[r2],32	ldf.fill f31=[r3],24	;;	ld8.fill r4=[r2],16	ld8.fill r5=[r3],16	;;	ld8.fill r6=[r2],16	ld8.fill r7=[r3],16	mov ar.unat=r18				// restore caller's unat	mov ar.fpsr=r19				// restore fpsr	mov ar.rsc=3				// put RSE back into eager mode, pl 0	br.cond.sptk.few b7END(load_switch_stack)GLOBAL_ENTRY(__ia64_syscall)	.regstk 6,0,0,0	mov r15=in5				// put syscall number in place	break __BREAK_SYSCALL	movl r2=errno	cmp.eq p6,p7=-1,r10	;;(p6)	st4 [r2]=r8(p6)	mov r8=-1	br.ret.sptk.few rpEND(__ia64_syscall)	//	// We invoke syscall_trace through this intermediate function to	// ensure that the syscall input arguments are not clobbered.  We	// also use it to preserve b6, which contains the syscall entry point.	//GLOBAL_ENTRY(invoke_syscall_trace)#ifdef CONFIG_IA64_NEW_UNWIND	UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8))	alloc loc1=ar.pfs,8,3,0,0	mov loc0=rp	UNW(.body)	mov loc2=b6	;;	br.call.sptk.few rp=syscall_trace.ret3:	mov rp=loc0	mov ar.pfs=loc1	mov b6=loc2	br.ret.sptk.few rp#else /* !CONFIG_IA64_NEW_SYSCALL */	UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8))	alloc loc1=ar.pfs,8,3,0,0

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -