⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 entry.s

📁 是关于linux2.5.1的完全源码
💻 S
📖 第 1 页 / 共 3 页
字号:
	mov pr=r28,-1	;;	ld8.fill r6=[r14],16	ld8.fill r7=[r15],16	mov ar.unat=r18				// restore caller's unat	mov ar.rnat=r30				// must restore after bspstore but before rsc!	mov ar.fpsr=r19				// restore fpsr	mov ar.rsc=3				// put RSE back into eager mode, pl 0	br.cond.sptk.many b7END(load_switch_stack)GLOBAL_ENTRY(__ia64_syscall)	.regstk 6,0,0,0	mov r15=in5				// put syscall number in place	break __BREAK_SYSCALL	movl r2=errno	cmp.eq p6,p7=-1,r10	;;(p6)	st4 [r2]=r8(p6)	mov r8=-1	br.ret.sptk.many rpEND(__ia64_syscall)	/*	 * We invoke syscall_trace through this intermediate function to	 * ensure that the syscall input arguments are not clobbered.  We	 * also use it to preserve b6, which contains the syscall entry point.	 */GLOBAL_ENTRY(invoke_syscall_trace)	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)	alloc loc1=ar.pfs,8,3,0,0	mov loc0=rp	.body	mov loc2=b6	;;	br.call.sptk.many rp=syscall_trace.ret3:	mov rp=loc0	mov ar.pfs=loc1	mov b6=loc2	br.ret.sptk.many rpEND(invoke_syscall_trace)	/*	 * Invoke a system call, but do some tracing before and after the call.	 * We MUST preserve the current register frame throughout this routine	 * because some system calls (such as ia64_execve) directly	 * manipulate ar.pfs.	 *	 * Input:	 *	r15 = syscall number	 *	b6  = syscall entry point	 */	.global ia64_strace_leave_kernelGLOBAL_ENTRY(ia64_trace_syscall)	PT_REGS_UNWIND_INFO(0)	br.call.sptk.many rp=invoke_syscall_trace // give parent a chance to catch syscall args.ret6:	br.call.sptk.many rp=b6			// do the syscallstrace_check_retval:	cmp.lt p6,p0=r8,r0			// syscall failed?	adds r2=PT(R8)+16,sp			// r2 = &pt_regs.r8	adds r3=PT(R10)+16,sp			// r3 = &pt_regs.r10	mov r10=0(p6)	br.cond.sptk strace_error		// syscall failed ->	;;					// avoid RAW on r10strace_save_retval:.mem.offset 0,0;	st8.spill [r2]=r8	// store return value in slot for r8.mem.offset 8,0;	st8.spill [r3]=r10	// clear error indication in slot for r10ia64_strace_leave_kernel:	br.call.sptk.many rp=invoke_syscall_trace // give parent a chance to catch return value.rety:	br.cond.sptk ia64_leave_kernelstrace_error:	ld8 r3=[r2]				// load pt_regs.r8	sub r9=0,r8				// negate return value to get errno value	;;	cmp.ne p6,p0=r3,r0			// is pt_regs.r8!=0?	adds r3=16,r2				// r3=&pt_regs.r10	;;(p6)	mov r10=-1(p6)	mov r8=r9	br.cond.sptk strace_save_retvalEND(ia64_trace_syscall)GLOBAL_ENTRY(ia64_ret_from_clone)	PT_REGS_UNWIND_INFO(0)#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)	/*	 * We need to call schedule_tail() to complete the scheduling process.	 * Called by ia64_switch_to after do_fork()->copy_thread().  r8 contains the	 * address of the previously executing task.	 */	br.call.sptk.many rp=ia64_invoke_schedule_tail.ret8:#endif	adds r2=TI_FLAGS+IA64_TASK_SIZE,r13	;;	ld4 r2=[r2]	;;	mov r8=0	tbit.nz p6,p0=r2,TIF_SYSCALL_TRACE(p6)	br.cond.spnt strace_check_retval	;;					// added stop bits to prevent r8 dependencyEND(ia64_ret_from_clone)	// fall throughGLOBAL_ENTRY(ia64_ret_from_syscall)	PT_REGS_UNWIND_INFO(0)	cmp.ge p6,p7=r8,r0			// syscall executed successfully?	adds r2=PT(R8)+16,sp			// r2 = &pt_regs.r8	adds r3=PT(R10)+16,sp			// r3 = &pt_regs.r10	;;	.mem.offset 0,0(p6)	st8.spill [r2]=r8	// store return value in slot for r8 and set unat bit	.mem.offset 8,0(p6)	st8.spill [r3]=r0	// clear error indication in slot for r10 and set unat bit(p7)	br.cond.spnt handle_syscall_error	// handle potential syscall failureEND(ia64_ret_from_syscall)	// fall throughGLOBAL_ENTRY(ia64_leave_kernel)	PT_REGS_UNWIND_INFO(0)	// work.need_resched etc. mustn't get changed by this CPU before it returns to userspace:(pUser)	cmp.eq.unc p6,p0=r0,r0			// p6 <- pUser(pUser)	rsm psr.i	;;(pUser)	adds r17=TI_FLAGS+IA64_TASK_SIZE,r13	;;.work_processed:(p6)	ld4 r18=[r17]				// load current_thread_info()->flags	adds r2=PT(R8)+16,r12	adds r3=PT(R9)+16,r12	;;	// start restoring the state saved on the kernel stack (struct pt_regs):	ld8.fill r8=[r2],16	ld8.fill r9=[r3],16(p6)	and r19=TIF_WORK_MASK,r18		// any work other than TIF_SYSCALL_TRACE?	;;	ld8.fill r10=[r2],16	ld8.fill r11=[r3],16(p6)	cmp4.ne.unc p6,p0=r19, r0		// any special work pending?	;;	ld8.fill r16=[r2],16	ld8.fill r17=[r3],16(p6)	br.cond.spnt .work_pending	;;	ld8.fill r18=[r2],16	ld8.fill r19=[r3],16	;;	ld8.fill r20=[r2],16	ld8.fill r21=[r3],16	;;	ld8.fill r22=[r2],16	ld8.fill r23=[r3],16	;;	ld8.fill r24=[r2],16	ld8.fill r25=[r3],16	;;	ld8.fill r26=[r2],16	ld8.fill r27=[r3],16	;;	ld8.fill r28=[r2],16	ld8.fill r29=[r3],16	;;	ld8.fill r30=[r2],16	ld8.fill r31=[r3],16	;;	rsm psr.i | psr.ic	// initiate turning off of interrupt and interruption collection	invala			// invalidate ALAT	;;	ld8 r1=[r2],16		// ar.ccv	ld8 r13=[r3],16		// ar.fpsr	;;	ld8 r14=[r2],16		// b0	ld8 r15=[r3],16+8	// b7	;;	ldf.fill f6=[r2],32	ldf.fill f7=[r3],32	;;	ldf.fill f8=[r2],32	ldf.fill f9=[r3],32	;;	mov ar.ccv=r1	mov ar.fpsr=r13	mov b0=r14	;;	srlz.i			// ensure interruption collection is off	mov b7=r15	;;	bsw.0			// switch back to bank 0	;;	adds r16=16,r12	adds r17=24,r12	;;	ld8 rCRIPSR=[r16],16	// load cr.ipsr	ld8 rCRIIP=[r17],16	// load cr.iip	;;	ld8 rCRIFS=[r16],16	// load cr.ifs	ld8 rARUNAT=[r17],16	// load ar.unat	cmp.eq p9,p0=r0,r0	// set p9 to indicate that we should restore cr.ifs	;;	ld8 rARPFS=[r16],16	// load ar.pfs	ld8 rARRSC=[r17],16	// load ar.rsc	;;	ld8 rARRNAT=[r16],16	// load ar.rnat (may be garbage)	ld8 rARBSPSTORE=[r17],16 // load ar.bspstore (may be garbage)	;;	ld8 rARPR=[r16],16	// load predicates	ld8 rB6=[r17],16	// load b6	;;	ld8 r19=[r16],16	// load ar.rsc value for "loadrs"	ld8.fill r1=[r17],16	// load r1	;;	ld8.fill r2=[r16],16	ld8.fill r3=[r17],16	;;	ld8.fill r12=[r16],16	ld8.fill r13=[r17],16	;;	ld8.fill r14=[r16]	ld8.fill r15=[r17]	shr.u r18=r19,16	// get byte size of existing "dirty" partition	;;	mov r16=ar.bsp		// get existing backing store pointer	movl r17=THIS_CPU(ia64_phys_stacked_size_p8)	;;	ld4 r17=[r17]		// r17 = cpu_data->phys_stacked_size_p8(pKern)	br.cond.dpnt skip_rbs_switch	/*	 * Restore user backing store.	 *	 * NOTE: alloc, loadrs, and cover can't be predicated.	 */(pNonSys) br.cond.dpnt dont_preserve_current_frame	cover				// add current frame into dirty partition	;;	mov r19=ar.bsp			// get new backing store pointer	sub r16=r16,r18			// krbs = old bsp - size of dirty partition	cmp.ne p9,p0=r0,r0		// clear p9 to skip restore of cr.ifs	;;	sub r19=r19,r16			// calculate total byte size of dirty partition	add r18=64,r18			// don't force in0-in7 into memory...	;;	shl r19=r19,16			// shift size of dirty partition into loadrs position	;;dont_preserve_current_frame:	/*	 * To prevent leaking bits between the kernel and user-space,	 * we must clear the stacked registers in the "invalid" partition here.	 * Not pretty, but at least it's fast (3.34 registers/cycle on Itanium,	 * 5 registers/cycle on McKinley).	 */#	define pRecurse	p6#	define pReturn	p7#ifdef CONFIG_ITANIUM#	define Nregs	10#else#	define Nregs	14#endif	alloc loc0=ar.pfs,2,Nregs-2,2,0	shr.u loc1=r18,9		// RNaTslots <= dirtySize / (64*8) + 1	sub r17=r17,r18			// r17 = (physStackedSize + 8) - dirtySize	;;#if 1	.align 32		// see comment below about gas bug...#endif	mov ar.rsc=r19			// load ar.rsc to be used for "loadrs"	shladd in0=loc1,3,r17	mov in1=0#if 0	// gas-2.12.90 is unable to generate a stop bit after .align, which is bad,	// because alloc must be at the beginning of an insn-group.	.align 32#else	nop 0	nop 0	nop 0#endif	;;rse_clear_invalid:#ifdef CONFIG_ITANIUM	// cycle 0 { .mii	alloc loc0=ar.pfs,2,Nregs-2,2,0	cmp.lt pRecurse,p0=Nregs*8,in0	// if more than Nregs regs left to clear, (re)curse	add out0=-Nregs*8,in0}{ .mfb	add out1=1,in1			// increment recursion count	nop.f 0	nop.b 0				// can't do br.call here because of alloc (WAW on CFM)	;;}{ .mfi	// cycle 1	mov loc1=0	nop.f 0	mov loc2=0}{ .mib	mov loc3=0	mov loc4=0(pRecurse) br.call.sptk.many b6=rse_clear_invalid}{ .mfi	// cycle 2	mov loc5=0	nop.f 0	cmp.ne pReturn,p0=r0,in1	// if recursion count != 0, we need to do a br.ret}{ .mib	mov loc6=0	mov loc7=0(pReturn) br.ret.sptk.many b6}#else /* !CONFIG_ITANIUM */	alloc loc0=ar.pfs,2,Nregs-2,2,0	cmp.lt pRecurse,p0=Nregs*8,in0	// if more than Nregs regs left to clear, (re)curse	add out0=-Nregs*8,in0	add out1=1,in1			// increment recursion count	mov loc1=0	mov loc2=0	;;	mov loc3=0	mov loc4=0	mov loc9=0	mov loc5=0	mov loc6=0(pRecurse) br.call.sptk.many b6=rse_clear_invalid	;;	mov loc7=0	mov loc8=0	cmp.ne pReturn,p0=r0,in1	// if recursion count != 0, we need to do a br.ret	mov loc10=0	mov loc11=0(pReturn) br.ret.sptk.many b6#endif /* !CONFIG_ITANIUM */#	undef pRecurse#	undef pReturn	;;	alloc r17=ar.pfs,0,0,0,0	// drop current register frame	;;	loadrs	;;skip_rbs_switch:	mov b6=rB6	mov ar.pfs=rARPFS(pUser)	mov ar.bspstore=rARBSPSTORE(p9)	mov cr.ifs=rCRIFS	mov cr.ipsr=rCRIPSR	mov cr.iip=rCRIIP	;;(pUser)	mov ar.rnat=rARRNAT	// must happen with RSE in lazy mode	mov ar.rsc=rARRSC	mov ar.unat=rARUNAT	mov pr=rARPR,-1	rfi.work_pending:	tbit.z p6,p0=r18,TIF_NEED_RESCHED		// current_thread_info()->need_resched==0?(p6)	br.cond.sptk.few .notify#if __GNUC__ < 3	br.call.spnt.many rp=invoke_schedule#else	br.call.spnt.many rp=schedule#endif.ret9:	cmp.eq p6,p0=r0,r0				// p6 <- 1	rsm psr.i	;;	adds r17=TI_FLAGS+IA64_TASK_SIZE,r13	br.cond.sptk.many .work_processed		// re-check.notify:	br.call.spnt.many rp=notify_resume_user.ret10:	cmp.ne p6,p0=r0,r0				// p6 <- 0	br.cond.sptk.many .work_processed		// don't re-checkEND(ia64_leave_kernel)ENTRY(handle_syscall_error)	/*	 * Some system calls (e.g., ptrace, mmap) can return arbitrary	 * values which could lead us to mistake a negative return	 * value as a failed syscall.  Those syscall must deposit	 * a non-zero value in pt_regs.r8 to indicate an error.	 * If pt_regs.r8 is zero, we assume that the call completed	 * successfully.	 */	PT_REGS_UNWIND_INFO(0)	ld8 r3=[r2]		// load pt_regs.r8	sub r9=0,r8		// negate return value to get errno	;;	mov r10=-1		// return -1 in pt_regs.r10 to indicate error	cmp.eq p6,p7=r3,r0	// is pt_regs.r8==0?	adds r3=16,r2		// r3=&pt_regs.r10	;;(p6)	mov r9=r8(p6)	mov r10=0	;;.mem.offset 0,0; st8.spill [r2]=r9	// store errno in pt_regs.r8 and set unat bit.mem.offset 8,0; st8.spill [r3]=r10	// store error indication in pt_regs.r10 and set unat bit	br.cond.sptk ia64_leave_kernelEND(handle_syscall_error)#ifdef CONFIG_SMP	/*	 * Invoke schedule_tail(task) while preserving in0-in7, which may be needed	 * in case a system call gets restarted.	 */GLOBAL_ENTRY(ia64_invoke_schedule_tail)	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)	alloc loc1=ar.pfs,8,2,1,0	mov loc0=rp	mov out0=r8				// Address of previous task	;;	br.call.sptk.many rp=schedule_tail.ret11:	mov ar.pfs=loc1	mov rp=loc0	br.ret.sptk.many rpEND(ia64_invoke_schedule_tail)#endif /* CONFIG_SMP */#if __GNUC__ < 3	/*	 * Invoke schedule() while preserving in0-in7, which may be needed	 * in case a system call gets restarted.  Note that declaring schedule()

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -