⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 entry.s

📁 xen虚拟机源代码安装包
💻 S
📖 第 1 页 / 共 4 页
字号:
/* * ia64/kernel/entry.S * * Kernel entry points. * * Copyright (C) 1998-2003, 2005 Hewlett-Packard Co *	David Mosberger-Tang <davidm@hpl.hp.com> * Copyright (C) 1999, 2002-2003 *	Asit Mallick <Asit.K.Mallick@intel.com> * 	Don Dugger <Don.Dugger@intel.com> *	Suresh Siddha <suresh.b.siddha@intel.com> *	Fenghua Yu <fenghua.yu@intel.com> * Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> *//* * ia64_switch_to now places correct virtual mapping in in TR2 for * kernel stack. This allows us to handle interrupts without changing * to physical mode. * * Jonathan Nicklin	<nicklin@missioncriticallinux.com> * Patrick O'Rourke	<orourke@missioncriticallinux.com> * 11/07/2000 *//* * Global (preserved) predicate usage on syscall entry/exit path: * *	pKStk:		See entry.h. *	pUStk:		See entry.h. *	pSys:		See entry.h. *	pNonSys:	!pSys */#include <linux/config.h>#include <asm/asmmacro.h>#include <asm/cache.h>#ifdef XEN#include <xen/errno.h>#else#include <asm/errno.h>#endif#include <asm/kregs.h>#include <asm/offsets.h>#include <asm/pgtable.h>#include <asm/percpu.h>#include <asm/processor.h>#include <asm/thread_info.h>#include <asm/unistd.h>#include "minstate.h"#ifndef XEN	/*	 * execve() is special because in case of success, we need to	 * setup a null register window frame.	 */ENTRY(ia64_execve)	/*	 * Allocate 8 input registers since ptrace() may clobber them	 */	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)	alloc loc1=ar.pfs,8,2,4,0	mov loc0=rp	.body	mov out0=in0			// filename	;;				// stop bit between alloc and call	mov out1=in1			// argv	mov out2=in2			// envp	add out3=16,sp			// regs	br.call.sptk.many rp=sys_execve.ret0:#ifdef CONFIG_IA32_SUPPORT	/*	 * Check if we're returning to ia32 mode. If so, we need to restore ia32 registers	 * from pt_regs.	 */	adds r16=PT(CR_IPSR)+16,sp	;;	ld8 r16=[r16]#endif	cmp4.ge p6,p7=r8,r0	mov ar.pfs=loc1			// restore ar.pfs	sxt4 r8=r8			// return 64-bit result	;;	stf.spill [sp]=f0(p6)	cmp.ne pKStk,pUStk=r0,r0	// a successful execve() lands us in user-mode...	mov rp=loc0(p6)	mov ar.pfs=r0			// clear ar.pfs on success(p7)	br.ret.sptk.many rp	/*	 * In theory, we'd have to zap this state only to prevent leaking of	 * security sensitive state (e.g., if current->mm->dumpable is zero).  However,	 * this executes in less than 20 cycles even on Itanium, so it's not worth	 * optimizing for...).	 */	mov ar.unat=0; 		mov ar.lc=0	mov r4=0;		mov f2=f0;		mov b1=r0	mov r5=0;		mov f3=f0;		mov b2=r0	mov r6=0;		mov f4=f0;		mov b3=r0	mov r7=0;		mov f5=f0;		mov b4=r0	ldf.fill f12=[sp];	mov f13=f0;		mov b5=r0	ldf.fill f14=[sp];	ldf.fill f15=[sp];	mov f16=f0	ldf.fill f17=[sp];	ldf.fill f18=[sp];	mov f19=f0	ldf.fill f20=[sp];	ldf.fill f21=[sp];	mov f22=f0	ldf.fill f23=[sp];	ldf.fill f24=[sp];	mov f25=f0	ldf.fill f26=[sp];	ldf.fill f27=[sp];	mov f28=f0	ldf.fill f29=[sp];	ldf.fill f30=[sp];	mov f31=f0#ifdef CONFIG_IA32_SUPPORT	tbit.nz p6,p0=r16, IA64_PSR_IS_BIT	movl loc0=ia64_ret_from_ia32_execve	;;(p6)	mov rp=loc0#endif	br.ret.sptk.many rpEND(ia64_execve)/* * sys_clone2(u64 flags, u64 ustack_base, u64 ustack_size, u64 parent_tidptr, u64 child_tidptr, *	      u64 tls) */GLOBAL_ENTRY(sys_clone2)	/*	 * Allocate 8 input registers since ptrace() may clobber them	 */	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)	alloc r16=ar.pfs,8,2,6,0	DO_SAVE_SWITCH_STACK	adds r2=PT(R16)+IA64_SWITCH_STACK_SIZE+16,sp	mov loc0=rp	mov loc1=r16				// save ar.pfs across do_fork	.body	mov out1=in1	mov out3=in2	tbit.nz p6,p0=in0,CLONE_SETTLS_BIT	mov out4=in3	// parent_tidptr: valid only w/CLONE_PARENT_SETTID	;;(p6)	st8 [r2]=in5				// store TLS in r16 for copy_thread()	mov out5=in4	// child_tidptr:  valid only w/CLONE_CHILD_SETTID or CLONE_CHILD_CLEARTID	adds out2=IA64_SWITCH_STACK_SIZE+16,sp	// out2 = &regs	mov out0=in0				// out0 = clone_flags	br.call.sptk.many rp=do_fork.ret1:	.restore sp	adds sp=IA64_SWITCH_STACK_SIZE,sp	// pop the switch stack	mov ar.pfs=loc1	mov rp=loc0	br.ret.sptk.many rpEND(sys_clone2)/* * sys_clone(u64 flags, u64 ustack_base, u64 parent_tidptr, u64 child_tidptr, u64 tls) *	Deprecated.  Use sys_clone2() instead. */GLOBAL_ENTRY(sys_clone)	/*	 * Allocate 8 input registers since ptrace() may clobber them	 */	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)	alloc r16=ar.pfs,8,2,6,0	DO_SAVE_SWITCH_STACK	adds r2=PT(R16)+IA64_SWITCH_STACK_SIZE+16,sp	mov loc0=rp	mov loc1=r16				// save ar.pfs across do_fork	.body	mov out1=in1	mov out3=16				// stacksize (compensates for 16-byte scratch area)	tbit.nz p6,p0=in0,CLONE_SETTLS_BIT	mov out4=in2	// parent_tidptr: valid only w/CLONE_PARENT_SETTID	;;(p6)	st8 [r2]=in4				// store TLS in r13 (tp)	mov out5=in3	// child_tidptr:  valid only w/CLONE_CHILD_SETTID or CLONE_CHILD_CLEARTID	adds out2=IA64_SWITCH_STACK_SIZE+16,sp	// out2 = &regs	mov out0=in0				// out0 = clone_flags	br.call.sptk.many rp=do_fork.ret2:	.restore sp	adds sp=IA64_SWITCH_STACK_SIZE,sp	// pop the switch stack	mov ar.pfs=loc1	mov rp=loc0	br.ret.sptk.many rpEND(sys_clone)#endif/* * prev_task <- ia64_switch_to(struct task_struct *next) *	With Ingo's new scheduler, interrupts are disabled when this routine gets *	called.  The code starting at .map relies on this.  The rest of the code *	doesn't care about the interrupt masking status. */GLOBAL_ENTRY(ia64_switch_to)	.prologue	alloc r16=ar.pfs,1,0,0,0	DO_SAVE_SWITCH_STACK	.body	adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13#ifdef XEN	movl r24=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_STACK_OFFSET;;	ld8 r27=[r24]	adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0	dep r20=0,in0,60,4		// physical address of "next"#else	movl r25=init_task	mov r27=IA64_KR(CURRENT_STACK)	adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0	dep r20=0,in0,61,3		// physical address of "next"#endif	;;	st8 [r22]=sp			// save kernel stack pointer of old task	shr.u r26=r20,IA64_GRANULE_SHIFT#ifdef XEN	;;	/*	 * If we've already mapped this task's page, we can skip doing it again.	 */	cmp.eq p7,p6=r26,r27(p6)	br.cond.dpnt .map#else		cmp.eq p7,p6=r25,in0	;;	/*	 * If we've already mapped this task's page, we can skip doing it again.	 */(p6)	cmp.eq p7,p6=r26,r27(p6)	br.cond.dpnt .map#endif		;;.done:(p6)	ssm psr.ic			// if we had to map, reenable the psr.ic bit FIRST!!!	;;(p6)	srlz.d	ld8 sp=[r21]			// load kernel stack pointer of new task#ifdef XEN	add r25=IA64_KR_CURRENT_OFFSET-IA64_KR_CURRENT_STACK_OFFSET,r24	;;	st8 [r25]=in0			// update "current" application register	;;	bsw.0	;;	mov r8=r13			// return pointer to previously running task	mov r13=in0			// set "current" pointer	mov r21=in0	;;	bsw.1	;;#else	mov IA64_KR(CURRENT)=in0	// update "current" application register	mov r8=r13          // return pointer to previously running task	mov r13=in0         // set "current" pointer#endif	DO_LOAD_SWITCH_STACK#ifdef CONFIG_SMP	sync.i				// ensure "fc"s done by this CPU are visible on other CPUs#endif	br.ret.sptk.many rp		// boogie on out in new context.map:	rsm psr.ic			// interrupts (psr.i) are already disabled here	movl r25=PAGE_KERNEL#ifdef XEN		movl r27=IA64_GRANULE_SHIFT << 2#endif	;;	srlz.d	or r23=r25,r20			// construct PA | page properties#ifdef XEN	ptr.d in0,r27			// to purge dtr[IA64_TR_VHPT]#else	movl r27=IA64_GRANULE_SHIFT << 2	#endif	;;	mov cr.itir=r27	mov cr.ifa=in0			// VA of next task...#ifdef XEN	srlz.d#endif	;;	mov r25=IA64_TR_CURRENT_STACK#ifdef XEN	st8 [r24]=r26			// remember last page we mapped...#else	mov IA64_KR(CURRENT_STACK)=r26	// remember last page we mapped...#endif	;;	itr.d dtr[r25]=r23		// wire in new mapping...	br.cond.sptk .doneEND(ia64_switch_to)/* * Note that interrupts are enabled during save_switch_stack and load_switch_stack.  This * means that we may get an interrupt with "sp" pointing to the new kernel stack while * ar.bspstore is still pointing to the old kernel backing store area.  Since ar.rsc, * ar.rnat, ar.bsp, and ar.bspstore are all preserved by interrupts, this is not a * problem.  Also, we don't need to specify unwind information for preserved registers * that are not modified in save_switch_stack as the right unwind information is already * specified at the call-site of save_switch_stack. *//* * save_switch_stack: *	- r16 holds ar.pfs *	- b7 holds address to return to *	- rp (b0) holds return address to save */GLOBAL_ENTRY(save_switch_stack)	.prologue	.altrp b7	flushrs			// flush dirty regs to backing store (must be first in insn group)	.save @priunat,r17	mov r17=ar.unat		// preserve caller's	.body#ifdef CONFIG_ITANIUM	adds r2=16+128,sp	adds r3=16+64,sp	adds r14=SW(R4)+16,sp	;;	st8.spill [r14]=r4,16		// spill r4	lfetch.fault.excl.nt1 [r3],128	;;	lfetch.fault.excl.nt1 [r2],128	lfetch.fault.excl.nt1 [r3],128	;;	lfetch.fault.excl [r2]	lfetch.fault.excl [r3]	adds r15=SW(R5)+16,sp#else	add r2=16+3*128,sp	add r3=16,sp	add r14=SW(R4)+16,sp	;;	st8.spill [r14]=r4,SW(R6)-SW(R4)	// spill r4 and prefetch offset 0x1c0	lfetch.fault.excl.nt1 [r3],128	//		prefetch offset 0x010	;;	lfetch.fault.excl.nt1 [r3],128	//		prefetch offset 0x090	lfetch.fault.excl.nt1 [r2],128	//		prefetch offset 0x190	;;	lfetch.fault.excl.nt1 [r3]	//		prefetch offset 0x110	lfetch.fault.excl.nt1 [r2]	//		prefetch offset 0x210	adds r15=SW(R5)+16,sp#endif	;;	st8.spill [r15]=r5,SW(R7)-SW(R5)	// spill r5	mov.m ar.rsc=0			// put RSE in mode: enforced lazy, little endian, pl 0	add r2=SW(F2)+16,sp		// r2 = &sw->f2	;;	st8.spill [r14]=r6,SW(B0)-SW(R6)	// spill r6	mov.m r18=ar.fpsr		// preserve fpsr	add r3=SW(F3)+16,sp		// r3 = &sw->f3	;;	stf.spill [r2]=f2,32	mov.m r19=ar.rnat	mov r21=b0	stf.spill [r3]=f3,32	st8.spill [r15]=r7,SW(B2)-SW(R7)	// spill r7	mov r22=b1	;;	// since we're done with the spills, read and save ar.unat:	mov.m r29=ar.unat	mov.m r20=ar.bspstore	mov r23=b2	stf.spill [r2]=f4,32	stf.spill [r3]=f5,32	mov r24=b3	;;	st8 [r14]=r21,SW(B1)-SW(B0)		// save b0	st8 [r15]=r23,SW(B3)-SW(B2)		// save b2	mov r25=b4	mov r26=b5	;;	st8 [r14]=r22,SW(B4)-SW(B1)		// save b1	st8 [r15]=r24,SW(AR_PFS)-SW(B3)		// save b3	mov r21=ar.lc		// I-unit	stf.spill [r2]=f12,32	stf.spill [r3]=f13,32	;;	st8 [r14]=r25,SW(B5)-SW(B4)		// save b4	st8 [r15]=r16,SW(AR_LC)-SW(AR_PFS)	// save ar.pfs	stf.spill [r2]=f14,32	stf.spill [r3]=f15,32	;;	st8 [r14]=r26				// save b5	st8 [r15]=r21				// save ar.lc	stf.spill [r2]=f16,32	stf.spill [r3]=f17,32	;;	stf.spill [r2]=f18,32	stf.spill [r3]=f19,32	;;	stf.spill [r2]=f20,32	stf.spill [r3]=f21,32	;;	stf.spill [r2]=f22,32	stf.spill [r3]=f23,32	;;	stf.spill [r2]=f24,32	stf.spill [r3]=f25,32	;;	stf.spill [r2]=f26,32	stf.spill [r3]=f27,32	;;	stf.spill [r2]=f28,32	stf.spill [r3]=f29,32	;;	stf.spill [r2]=f30,SW(AR_UNAT)-SW(F30)	stf.spill [r3]=f31,SW(PR)-SW(F31)	add r14=SW(CALLER_UNAT)+16,sp	;;	st8 [r2]=r29,SW(AR_RNAT)-SW(AR_UNAT)	// save ar.unat	st8 [r14]=r17,SW(AR_FPSR)-SW(CALLER_UNAT) // save caller_unat	mov r21=pr	;;	st8 [r2]=r19,SW(AR_BSPSTORE)-SW(AR_RNAT) // save ar.rnat	st8 [r3]=r21				// save predicate registers	;;	st8 [r2]=r20				// save ar.bspstore	st8 [r14]=r18				// save fpsr	mov ar.rsc=3		// put RSE back into eager mode, pl 0	br.cond.sptk.many b7END(save_switch_stack)/* * load_switch_stack: *	- "invala" MUST be done at call site (normally in DO_LOAD_SWITCH_STACK) *	- b7 holds address to return to *	- must not touch r8-r11 */#ifdef XENGLOBAL_ENTRY(load_switch_stack)#elseENTRY(load_switch_stack)#endif	.prologue	.altrp b7	.body	lfetch.fault.nt1 [sp]	adds r2=SW(AR_BSPSTORE)+16,sp	adds r3=SW(AR_UNAT)+16,sp	mov ar.rsc=0						// put RSE into enforced lazy mode	adds r14=SW(CALLER_UNAT)+16,sp	adds r15=SW(AR_FPSR)+16,sp	;;	ld8 r27=[r2],(SW(B0)-SW(AR_BSPSTORE))	// bspstore	ld8 r29=[r3],(SW(B1)-SW(AR_UNAT))	// unat	;;	ld8 r21=[r2],16		// restore b0	ld8 r22=[r3],16		// restore b1	;;	ld8 r23=[r2],16		// restore b2	ld8 r24=[r3],16		// restore b3	;;	ld8 r25=[r2],16		// restore b4	ld8 r26=[r3],16		// restore b5	;;	ld8 r16=[r2],(SW(PR)-SW(AR_PFS))	// ar.pfs	ld8 r17=[r3],(SW(AR_RNAT)-SW(AR_LC))	// ar.lc	;;	ld8 r28=[r2]		// restore pr	ld8 r30=[r3]		// restore rnat	;;	ld8 r18=[r14],16	// restore caller's unat

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -