⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 entry.s

📁 xen虚拟机源代码安装包
💻 S
📖 第 1 页 / 共 4 页
字号:
	ld8 r19=[r15],24	// restore fpsr	;;	ldf.fill f2=[r14],32	ldf.fill f3=[r15],32	;;	ldf.fill f4=[r14],32	ldf.fill f5=[r15],32	;;	ldf.fill f12=[r14],32	ldf.fill f13=[r15],32	;;	ldf.fill f14=[r14],32	ldf.fill f15=[r15],32	;;	ldf.fill f16=[r14],32	ldf.fill f17=[r15],32	;;	ldf.fill f18=[r14],32	ldf.fill f19=[r15],32	mov b0=r21	;;	ldf.fill f20=[r14],32	ldf.fill f21=[r15],32	mov b1=r22	;;	ldf.fill f22=[r14],32	ldf.fill f23=[r15],32	mov b2=r23	;;	mov ar.bspstore=r27	mov ar.unat=r29		// establish unat holding the NaT bits for r4-r7	mov b3=r24	;;	ldf.fill f24=[r14],32	ldf.fill f25=[r15],32	mov b4=r25	;;	ldf.fill f26=[r14],32	ldf.fill f27=[r15],32	mov b5=r26	;;	ldf.fill f28=[r14],32	ldf.fill f29=[r15],32	mov ar.pfs=r16	;;	ldf.fill f30=[r14],32	ldf.fill f31=[r15],24	mov ar.lc=r17	;;	ld8.fill r4=[r14],16	ld8.fill r5=[r15],16	mov pr=r28,-1	;;	ld8.fill r6=[r14],16	ld8.fill r7=[r15],16	mov ar.unat=r18				// restore caller's unat	mov ar.rnat=r30				// must restore after bspstore but before rsc!	mov ar.fpsr=r19				// restore fpsr	mov ar.rsc=3				// put RSE back into eager mode, pl 0	br.cond.sptk.many b7END(load_switch_stack)#ifndef XENGLOBAL_ENTRY(execve)	mov r15=__NR_execve			// put syscall number in place	break __BREAK_SYSCALL	br.ret.sptk.many rpEND(execve)GLOBAL_ENTRY(clone)	mov r15=__NR_clone			// put syscall number in place	break __BREAK_SYSCALL	br.ret.sptk.many rpEND(clone)	/*	 * Invoke a system call, but do some tracing before and after the call.	 * We MUST preserve the current register frame throughout this routine	 * because some system calls (such as ia64_execve) directly	 * manipulate ar.pfs.	 */GLOBAL_ENTRY(ia64_trace_syscall)	PT_REGS_UNWIND_INFO(0)	/*	 * We need to preserve the scratch registers f6-f11 in case the system	 * call is sigreturn.	 */	adds r16=PT(F6)+16,sp	adds r17=PT(F7)+16,sp	;; 	stf.spill [r16]=f6,32 	stf.spill [r17]=f7,32	;; 	stf.spill [r16]=f8,32 	stf.spill [r17]=f9,32	;; 	stf.spill [r16]=f10 	stf.spill [r17]=f11	br.call.sptk.many rp=syscall_trace_enter // give parent a chance to catch syscall args	adds r16=PT(F6)+16,sp	adds r17=PT(F7)+16,sp	;;	ldf.fill f6=[r16],32	ldf.fill f7=[r17],32	;;	ldf.fill f8=[r16],32	ldf.fill f9=[r17],32	;;	ldf.fill f10=[r16]	ldf.fill f11=[r17]	// the syscall number may have changed, so re-load it and re-calculate the	// syscall entry-point:	adds r15=PT(R15)+16,sp			// r15 = &pt_regs.r15 (syscall #)	;;	ld8 r15=[r15]	mov r3=NR_syscalls - 1	;;	adds r15=-1024,r15	movl r16=sys_call_table	;;	shladd r20=r15,3,r16			// r20 = sys_call_table + 8*(syscall-1024)	cmp.leu p6,p7=r15,r3	;;(p6)	ld8 r20=[r20]				// load address of syscall entry point(p7)	movl r20=sys_ni_syscall	;;	mov b6=r20	br.call.sptk.many rp=b6			// do the syscall.strace_check_retval:	cmp.lt p6,p0=r8,r0			// syscall failed?	adds r2=PT(R8)+16,sp			// r2 = &pt_regs.r8	adds r3=PT(R10)+16,sp			// r3 = &pt_regs.r10	mov r10=0(p6)	br.cond.sptk strace_error		// syscall failed ->	;;					// avoid RAW on r10.strace_save_retval:.mem.offset 0,0; st8.spill [r2]=r8		// store return value in slot for r8.mem.offset 8,0; st8.spill [r3]=r10		// clear error indication in slot for r10	br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value.ret3:	br.cond.sptk .work_pending_syscall_endstrace_error:	ld8 r3=[r2]				// load pt_regs.r8	sub r9=0,r8				// negate return value to get errno value	;;	cmp.ne p6,p0=r3,r0			// is pt_regs.r8!=0?	adds r3=16,r2				// r3=&pt_regs.r10	;;(p6)	mov r10=-1(p6)	mov r8=r9	br.cond.sptk .strace_save_retvalEND(ia64_trace_syscall)	/*	 * When traced and returning from sigreturn, we invoke syscall_trace but then	 * go straight to ia64_leave_kernel rather than ia64_leave_syscall.	 */GLOBAL_ENTRY(ia64_strace_leave_kernel)	PT_REGS_UNWIND_INFO(0){	/*	 * Some versions of gas generate bad unwind info if the first instruction of a	 * procedure doesn't go into the first slot of a bundle.  This is a workaround.	 */	nop.m 0	nop.i 0	br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value}.ret4:	br.cond.sptk ia64_leave_kernelEND(ia64_strace_leave_kernel)#endifGLOBAL_ENTRY(ia64_ret_from_clone)	PT_REGS_UNWIND_INFO(0){	/*	 * Some versions of gas generate bad unwind info if the first instruction of a	 * procedure doesn't go into the first slot of a bundle.  This is a workaround.	 */	nop.m 0	nop.i 0	/*	 * We need to call schedule_tail() to complete the scheduling process.	 * Called by ia64_switch_to() after do_fork()->copy_thread().  r8 contains the	 * address of the previously executing task.	 */	br.call.sptk.many rp=ia64_invoke_schedule_tail}#ifdef XEN	// new domains are cloned but not exec'ed so switch to user mode here	cmp.ne pKStk,pUStk=r0,r0	adds r16 = IA64_VCPU_FLAGS_OFFSET, r13	;;	ld8 r16 = [r16]				// arch.arch_vmx.flags	;;	cmp.eq p6,p0 = r16, r0(p6)	br.cond.spnt ia64_leave_kernel		// !VMX_DOMAIN	;;	adds r16 = PT(CR_IFS)+16, r12	;;	ld8 r16 = [r16]	cmp.eq pNonSys,pSys=r0,r0		// pSys=0,pNonSys=1	;;	cmp.eq p6,p7 = 0x6, r16(p7)	br.cond.sptk ia64_leave_hypervisor	// VMX_DOMAIN	;;	/*	 * cr.ifs.v==0 && cr.ifm(ar.pfm)==6 means that HYPERVISOR_suspend	 * has been called. (i.e. HVM with PV driver is restored here)	 * We need to allocate a dummy RSE stack frame to resume.	 */	alloc r32=ar.pfs, 0, 0, 6, 0	cmp.eq pSys,pNonSys=r0,r0		// pSys=1,pNonSys=0	;;	bsw.0	;;	mov r21=r13				// set current	;;	bsw.1	;;	mov r8=r0	br.cond.sptk.many ia64_leave_hypercall#else.ret8:	adds r2=TI_FLAGS+IA64_TASK_SIZE,r13	;;	ld4 r2=[r2]	;;	mov r8=0	and r2=_TIF_SYSCALL_TRACEAUDIT,r2	;;	cmp.ne p6,p0=r2,r0(p6)	br.cond.spnt .strace_check_retval#endif	;;					// added stop bits to prevent r8 dependencyEND(ia64_ret_from_clone)	// fall throughGLOBAL_ENTRY(ia64_ret_from_syscall)	PT_REGS_UNWIND_INFO(0)	cmp.ge p6,p7=r8,r0			// syscall executed successfully?	adds r2=PT(R8)+16,sp			// r2 = &pt_regs.r8	mov r10=r0				// clear error indication in r10#ifndef XEN    (p7)	br.cond.spnt handle_syscall_error	// handle potential syscall failure#endifEND(ia64_ret_from_syscall)	// fall through/* * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't *	need to switch to bank 0 and doesn't restore the scratch registers. *	To avoid leaking kernel bits, the scratch registers are set to *	the following known-to-be-safe values: * *		  r1: restored (global pointer) *		  r2: cleared *		  r3: 1 (when returning to user-level) *	      r8-r11: restored (syscall return value(s)) *		 r12: restored (user-level stack pointer) *		 r13: restored (user-level thread pointer) *		 r14: set to __kernel_syscall_via_epc *		 r15: restored (syscall #) *	     r16-r17: cleared *		 r18: user-level b6 *		 r19: cleared *		 r20: user-level ar.fpsr *		 r21: user-level b0 *		 r22: cleared *		 r23: user-level ar.bspstore *		 r24: user-level ar.rnat *		 r25: user-level ar.unat *		 r26: user-level ar.pfs *		 r27: user-level ar.rsc *		 r28: user-level ip *		 r29: user-level psr *		 r30: user-level cfm *		 r31: user-level pr *	      f6-f11: cleared *		  pr: restored (user-level pr) *		  b0: restored (user-level rp) *	          b6: restored *		  b7: set to __kernel_syscall_via_epc *	     ar.unat: restored (user-level ar.unat) *	      ar.pfs: restored (user-level ar.pfs) *	      ar.rsc: restored (user-level ar.rsc) *	     ar.rnat: restored (user-level ar.rnat) *	 ar.bspstore: restored (user-level ar.bspstore) *	     ar.fpsr: restored (user-level ar.fpsr) *	      ar.ccv: cleared *	      ar.csd: cleared *	      ar.ssd: cleared */ENTRY(ia64_leave_syscall)	PT_REGS_UNWIND_INFO(0)	/*	 * work.need_resched etc. mustn't get changed by this CPU before it returns to	 * user- or fsys-mode, hence we disable interrupts early on.	 *	 * p6 controls whether current_thread_info()->flags needs to be check for	 * extra work.  We always check for extra work when returning to user-level.	 * With CONFIG_PREEMPT, we also check for extra work when the preempt_count	 * is 0.  After extra work processing has been completed, execution	 * resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check	 * needs to be redone.	 */#ifdef CONFIG_PREEMPT	rsm psr.i				// disable interrupts	cmp.eq pLvSys,p0=r0,r0			// pLvSys=1: leave from syscall(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13	;;	.pred.rel.mutex pUStk,pKStk(pKStk) ld4 r21=[r20]			// r21 <- preempt_count(pUStk)	mov r21=0			// r21 <- 0	;;	cmp.eq p6,p0=r21,r0		// p6 <- pUStk || (preempt_count == 0)#else /* !CONFIG_PREEMPT */(pUStk)	rsm psr.i	cmp.eq pLvSys,p0=r0,r0		// pLvSys=1: leave from syscall(pUStk)	cmp.eq.unc p6,p0=r0,r0		// p6 <- pUStk#endif.work_processed_syscall:	adds r2=PT(LOADRS)+16,r12	adds r3=PT(AR_BSPSTORE)+16,r12#ifdef XEN	;;#else	adds r18=TI_FLAGS+IA64_TASK_SIZE,r13	;;(p6)	ld4 r31=[r18]				// load current_thread_info()->flags#endif	ld8 r19=[r2],PT(B6)-PT(LOADRS)		// load ar.rsc value for "loadrs"	nop.i 0	;;#ifndef XEN    	mov r16=ar.bsp				// M2  get existing backing store pointer#endif    	ld8 r18=[r2],PT(R9)-PT(B6)		// load b6#ifndef XEN(p6)	and r15=TIF_WORK_MASK,r31		// any work other than TIF_SYSCALL_TRACE?#endif	;;	ld8 r23=[r3],PT(R11)-PT(AR_BSPSTORE)	// load ar.bspstore (may be garbage)#ifndef XEN(p6)	cmp4.ne.unc p6,p0=r15, r0		// any special work pending?(p6)	br.cond.spnt .work_pending_syscall#endif	;;	// start restoring the state saved on the kernel stack (struct pt_regs):	ld8 r9=[r2],PT(CR_IPSR)-PT(R9)	ld8 r11=[r3],PT(CR_IIP)-PT(R11)(pNonSys) break 0		//      bug check: we shouldn't be here if pNonSys is TRUE!	;;	invala			// M0|1 invalidate ALAT	rsm psr.i | psr.ic	// M2   turn off interrupts and interruption collection#ifndef XEN	cmp.eq p9,p0=r0,r0	// A    set p9 to indicate that we should restore cr.ifs#endif	ld8 r29=[r2],16		// M0|1 load cr.ipsr	ld8 r28=[r3],16		// M0|1 load cr.iip	mov r22=r0		// A    clear r22	;;	ld8 r30=[r2],16		// M0|1 load cr.ifs	ld8 r25=[r3],16		// M0|1 load ar.unat(pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13	;;	ld8 r26=[r2],PT(B0)-PT(AR_PFS)	// M0|1 load ar.pfs(pKStk)	mov r22=psr			// M2   read PSR now that interrupts are disabled	nop 0	;;	ld8 r21=[r2],PT(AR_RNAT)-PT(B0) // M0|1 load b0	ld8 r27=[r3],PT(PR)-PT(AR_RSC)	// M0|1 load ar.rsc	mov f6=f0			// F    clear f6	;;	ld8 r24=[r2],PT(AR_FPSR)-PT(AR_RNAT)	// M0|1 load ar.rnat (may be garbage)	ld8 r31=[r3],PT(R1)-PT(PR)		// M0|1 load predicates	mov f7=f0				// F    clear f7	;;	ld8 r20=[r2],PT(R12)-PT(AR_FPSR)	// M0|1 load ar.fpsr	ld8.fill r1=[r3],16			// M0|1 load r1(pUStk) mov r17=1				// A	;;(pUStk) st1 [r14]=r17				// M2|3	ld8.fill r13=[r3],16			// M0|1	mov f8=f0				// F    clear f8	;;	ld8.fill r12=[r2]			// M0|1 restore r12 (sp)#ifdef XEN    	ld8.fill r2=[r3]			// M0|1#else    	ld8.fill r15=[r3]			// M0|1 restore r15#endif    	mov b6=r18				// I0   restore b6#ifdef XEN	movl r17=THIS_CPU(ia64_phys_stacked_size_p8)    // A#else	addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0 // A#endif	mov f9=f0					// F    clear f9(pKStk) br.cond.dpnt.many skip_rbs_switch		// B	srlz.d				// M0   ensure interruption collection is off (for cover)	shr.u r18=r19,16		// I0|1 get byte size of existing "dirty" partition#ifndef XEN    	cover				// B    add current frame into dirty partition & set cr.ifs#endif    	;;(pUStk) ld4 r17=[r17]			// M0|1 r17 = cpu_data->phys_stacked_size_p8	mov r19=ar.bsp			// M2   get new backing store pointer	mov f10=f0			// F    clear f10	nop.m 0#ifdef XEN	mov r14=r0#else	movl r14=__kernel_syscall_via_epc // X#endif	;;	mov.m ar.csd=r0			// M2   clear ar.csd	mov.m ar.ccv=r0			// M2   clear ar.ccv	mov b7=r14			// I0   clear b7 (hint with __kernel_syscall_via_epc)	mov.m ar.ssd=r0			// M2   clear ar.ssd	mov f11=f0			// F    clear f11	br.cond.sptk.many rbs_switch	// BEND(ia64_leave_syscall)#ifdef CONFIG_IA32_SUPPORTGLOBAL_ENTRY(ia64_ret_from_ia32_execve)	PT_REGS_UNWIND_INFO(0)	adds r2=PT(R8)+16,sp			// r2 = &pt_regs.r8	adds r3=PT(R10)+16,sp			// r3 = &pt_regs.r10	;;	.mem.offset 0,0	st8.spill [r2]=r8	// store return value in slot for r8 and set unat bit	.mem.offset 8,0	st8.spill [r3]=r0	// clear error indication in slot for r10 and set unat bitEND(ia64_ret_from_ia32_execve)	// fall through#endif /* CONFIG_IA32_SUPPORT */GLOBAL_ENTRY(ia64_leave_kernel)	PT_REGS_UNWIND_INFO(0)	/*	 * work.need_resched etc. mustn't get changed by this CPU before it returns to	 * user- or fsys-mode, hence we disable interrupts early on.	 *	 * p6 controls whether current_thread_info()->flags needs to be check for	 * extra work.  We always check for extra work when returning to user-level.	 * With CONFIG_PREEMPT, we also check for extra work when the preempt_count	 * is 0.  After extra work processing has been completed, execution	 * resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check	 * needs to be redone.	 */#ifdef CONFIG_PREEMPT	rsm psr.i				// disable interrupts	cmp.eq p0,pLvSys=r0,r0			// pLvSys=0: leave from kernel(pKStk)	adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13	;;	.pred.rel.mutex pUStk,pKStk(pKStk)	ld4 r21=[r20]			// r21 <- preempt_count(pUStk)	mov r21=0			// r21 <- 0	;;	cmp.eq p6,p0=r21,r0		// p6 <- pUStk || (preempt_count == 0)#else

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -