⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 head.s

📁 xen虚拟机源代码安装包
💻 S
📖 第 1 页 / 共 3 页
字号:
	ldfps	 f97,f98=[sp]		// M0	ldfps	 f99,f100=[sp]		// M1	mov      f101=f0		// F	setf.s	 f102=r0		// M2	setf.s	 f103=r0		// M3	mov      f104=f0		// F	ldfps	 f105,f106=[sp]		// M0	ldfps	 f107,f108=[sp]		// M1	mov      f109=f0		// F	setf.s	 f110=r0		// M2	setf.s	 f111=r0		// M3	mov      f112=f0		// F	ldfps	 f113,f114=[sp]		// M0	ldfps	 f115,f116=[sp]		// M1	mov      f117=f0		// F	setf.s	 f118=r0		// M2	setf.s	 f119=r0		// M3	mov      f120=f0		// F	ldfps	 f121,f122=[sp]		// M0	ldfps	 f123,f124=[sp]		// M1	mov      f125=f0		// F	setf.s	 f126=r0		// M2	setf.s	 f127=r0		// M3	br.ret.sptk.many rp		// FEND(__ia64_init_fpu)/* * Switch execution mode from virtual to physical * * Inputs: *	r16 = new psr to establish * Output: *	r19 = old virtual address of ar.bsp *	r20 = old virtual address of sp * * Note: RSE must already be in enforced lazy mode */GLOBAL_ENTRY(ia64_switch_mode_phys) {	alloc r2=ar.pfs,0,0,0,0	rsm psr.i | psr.ic		// disable interrupts and interrupt collection	mov r15=ip }	;; {	flushrs				// must be first insn in group	srlz.i }	;;	mov cr.ipsr=r16			// set new PSR	add r3=1f-ia64_switch_mode_phys,r15	mov r19=ar.bsp	mov r20=sp	mov r14=rp			// get return address into a general register	;;	// going to physical mode, use tpa to translate virt->phys	tpa r17=r19	tpa r3=r3	tpa sp=sp	tpa r14=r14	;;	mov r18=ar.rnat			// save ar.rnat	mov ar.bspstore=r17		// this steps on ar.rnat	mov cr.iip=r3	mov cr.ifs=r0	;;	mov ar.rnat=r18			// restore ar.rnat	rfi				// must be last insn in group	;;1:	mov rp=r14	br.ret.sptk.many rpEND(ia64_switch_mode_phys)/* * Switch execution mode from physical to virtual * * Inputs: *	r16 = new psr to establish *	r19 = new bspstore to establish *	r20 = new sp to establish * * Note: RSE must already be in enforced lazy mode */GLOBAL_ENTRY(ia64_switch_mode_virt) {	alloc r2=ar.pfs,0,0,0,0	rsm psr.i | psr.ic		// disable interrupts and interrupt collection	mov r15=ip }	;; {	flushrs				// must be first insn in group	srlz.i }	;;	mov cr.ipsr=r16			// set new PSR	add r3=1f-ia64_switch_mode_virt,r15	mov r14=rp			// get return address into a general register	;;	// going to virtual	//   - for code addresses, set upper bits of addr to KERNEL_START	//   - for stack addresses, copy from input argument	movl r18=KERNEL_START	dep r3=0,r3,KERNEL_TR_PAGE_SHIFT,64-KERNEL_TR_PAGE_SHIFT	dep r14=0,r14,KERNEL_TR_PAGE_SHIFT,64-KERNEL_TR_PAGE_SHIFT	mov sp=r20	;;	or r3=r3,r18	or r14=r14,r18	;;	mov r18=ar.rnat			// save ar.rnat	mov ar.bspstore=r19		// this steps on ar.rnat	mov cr.iip=r3	mov cr.ifs=r0	;;	mov ar.rnat=r18			// restore ar.rnat	rfi				// must be last insn in group	;;1:	mov rp=r14	br.ret.sptk.many rpEND(ia64_switch_mode_virt)GLOBAL_ENTRY(ia64_delay_loop)	.prologue{	nop 0			// work around GAS unwind info generation bug...	.save ar.lc,r2	mov r2=ar.lc	.body	;;	mov ar.lc=r32}	;;	// force loop to be 32-byte aligned (GAS bug means we cannot use .align	// inside function body without corrupting unwind info).{	nop 0 }1:	br.cloop.sptk.few 1b	;;	mov ar.lc=r2	br.ret.sptk.many rpEND(ia64_delay_loop)#ifndef XEN/* * Return a CPU-local timestamp in nano-seconds.  This timestamp is * NOT synchronized across CPUs its return value must never be * compared against the values returned on another CPU.  The usage in * kernel/sched.c ensures that. * * The return-value of sched_clock() is NOT supposed to wrap-around. * If it did, it would cause some scheduling hiccups (at the worst). * Fortunately, with a 64-bit cycle-counter ticking at 100GHz, even * that would happen only once every 5+ years. * * The code below basically calculates: * *   (ia64_get_itc() * local_cpu_data->nsec_per_cyc) >> IA64_NSEC_PER_CYC_SHIFT * * except that the multiplication and the shift are done with 128-bit * intermediate precision so that we can produce a full 64-bit result. */GLOBAL_ENTRY(sched_clock)#ifdef XEN	movl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET#else	addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0#endif	mov.m r9=ar.itc		// fetch cycle-counter				(35 cyc)	;;	ldf8 f8=[r8]	;;	setf.sig f9=r9		// certain to stall, so issue it _after_ ldf8...	;;	xmpy.lu f10=f9,f8	// calculate low 64 bits of 128-bit product	(4 cyc)	xmpy.hu f11=f9,f8	// calculate high 64 bits of 128-bit product	;;	getf.sig r8=f10		//						(5 cyc)	getf.sig r9=f11	;;	shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT	br.ret.sptk.many rpEND(sched_clock)GLOBAL_ENTRY(start_kernel_thread)	.prologue	.save rp, r0				// this is the end of the call-chain	.body	alloc r2 = ar.pfs, 0, 0, 2, 0	mov out0 = r9	mov out1 = r11;;	br.call.sptk.many rp = kernel_thread_helper;;	mov out0 = r8	br.call.sptk.many rp = sys_exit;;1:	br.sptk.few 1b				// not reachedEND(start_kernel_thread)#endif /* XEN */#ifdef CONFIG_IA64_BRL_EMU/* *  Assembly routines used by brl_emu.c to set preserved register state. */#define SET_REG(reg)				\ GLOBAL_ENTRY(ia64_set_##reg);			\	alloc r16=ar.pfs,1,0,0,0;		\	mov reg=r32;				\	;;					\	br.ret.sptk.many rp;			\ END(ia64_set_##reg)SET_REG(b1);SET_REG(b2);SET_REG(b3);SET_REG(b4);SET_REG(b5);#endif /* CONFIG_IA64_BRL_EMU */#ifdef CONFIG_SMP	/*	 * This routine handles spinlock contention.  It uses a non-standard calling	 * convention to avoid converting leaf routines into interior routines.  Because	 * of this special convention, there are several restrictions:	 *	 * - do not use gp relative variables, this code is called from the kernel	 *   and from modules, r1 is undefined.	 * - do not use stacked registers, the caller owns them.	 * - do not use the scratch stack space, the caller owns it.	 * - do not use any registers other than the ones listed below	 *	 * Inputs:	 *   ar.pfs - saved CFM of caller	 *   ar.ccv - 0 (and available for use)	 *   r27    - flags from spin_lock_irqsave or 0.  Must be preserved.	 *   r28    - available for use.	 *   r29    - available for use.	 *   r30    - available for use.	 *   r31    - address of lock, available for use.	 *   b6     - return address	 *   p14    - available for use.	 *   p15    - used to track flag status.	 *	 * If you patch this code to use more registers, do not forget to update	 * the clobber lists for spin_lock() in include/asm-ia64/spinlock.h.	 */#if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3)GLOBAL_ENTRY(ia64_spinlock_contention_pre3_4)	.prologue	.save ar.pfs, r0	// this code effectively has a zero frame size	.save rp, r28	.body	nop 0	tbit.nz p15,p0=r27,IA64_PSR_I_BIT	.restore sp		// pop existing prologue after next insn	mov b6 = r28	.prologue	.save ar.pfs, r0	.altrp b6	.body	;;(p15)	ssm psr.i		// reenable interrupts if they were on				// DavidM says that srlz.d is slow and is not required in this case.wait:	// exponential backoff, kdb, lockmeter etc. go in here	hint @pause	ld4 r30=[r31]		// don't use ld4.bias; if it's contended, we won't write the word	nop 0	;;	cmp4.ne p14,p0=r30,r0(p14)	br.cond.sptk.few .wait(p15)	rsm psr.i		// disable interrupts if we reenabled them	br.cond.sptk.few b6	// lock is now free, try to acquire	.global ia64_spinlock_contention_pre3_4_end	// for kernprofia64_spinlock_contention_pre3_4_end:END(ia64_spinlock_contention_pre3_4)#elseGLOBAL_ENTRY(ia64_spinlock_contention)	.prologue	.altrp b6	.body	tbit.nz p15,p0=r27,IA64_PSR_I_BIT	;;.wait:(p15)	ssm psr.i		// reenable interrupts if they were on				// DavidM says that srlz.d is slow and is not required in this case.wait2:	// exponential backoff, kdb, lockmeter etc. go in here	hint @pause	ld4 r30=[r31]		// don't use ld4.bias; if it's contended, we won't write the word	;;	cmp4.ne p14,p0=r30,r0	mov r30 = 1(p14)	br.cond.sptk.few .wait2(p15)	rsm psr.i		// disable interrupts if we reenabled them	;;	cmpxchg4.acq r30=[r31], r30, ar.ccv	;;	cmp4.ne p14,p0=r0,r30(p14)	br.cond.sptk.few .wait	br.ret.sptk.many b6	// lock is now takenEND(ia64_spinlock_contention)#endif#ifdef CONFIG_HOTPLUG_CPUGLOBAL_ENTRY(ia64_jump_to_sal)	alloc r16=ar.pfs,1,0,0,0;;	rsm psr.i  | psr.ic{	flushrs	srlz.i}	tpa r25=in0	movl r18=tlb_purge_done;;	DATA_VA_TO_PA(r18);;	mov b1=r18 	// Return location	movl r18=ia64_do_tlb_purge;;	DATA_VA_TO_PA(r18);;	mov b2=r18 	// doing tlb_flush work	mov ar.rsc=0  // Put RSE  in enforced lazy, LE mode	movl r17=1f;;	DATA_VA_TO_PA(r17);;	mov cr.iip=r17	movl r16=SAL_PSR_BITS_TO_SET;;	mov cr.ipsr=r16	mov cr.ifs=r0;;	rfi;;1:	/*	 * Invalidate all TLB data/inst	 */	br.sptk.many b2;; // jump to tlb purge codetlb_purge_done:	RESTORE_REGION_REGS(r25, r17,r18,r19);;	RESTORE_REG(b0, r25, r17);;	RESTORE_REG(b1, r25, r17);;	RESTORE_REG(b2, r25, r17);;	RESTORE_REG(b3, r25, r17);;	RESTORE_REG(b4, r25, r17);;	RESTORE_REG(b5, r25, r17);;	ld8 r1=[r25],0x08;;	ld8 r12=[r25],0x08;;	ld8 r13=[r25],0x08;;	RESTORE_REG(ar.fpsr, r25, r17);;	RESTORE_REG(ar.pfs, r25, r17);;	RESTORE_REG(ar.rnat, r25, r17);;	RESTORE_REG(ar.unat, r25, r17);;	RESTORE_REG(ar.bspstore, r25, r17);;	RESTORE_REG(cr.dcr, r25, r17);;	RESTORE_REG(cr.iva, r25, r17);;	RESTORE_REG(cr.pta, r25, r17);;#ifdef XEN	dv_serialize_instruction#endif	RESTORE_REG(cr.itv, r25, r17);;	RESTORE_REG(cr.pmv, r25, r17);;	RESTORE_REG(cr.cmcv, r25, r17);;	RESTORE_REG(cr.lrr0, r25, r17);;	RESTORE_REG(cr.lrr1, r25, r17);;	ld8 r4=[r25],0x08;;	ld8 r5=[r25],0x08;;	ld8 r6=[r25],0x08;;	ld8 r7=[r25],0x08;;	ld8 r17=[r25],0x08;;	mov pr=r17,-1;;	RESTORE_REG(ar.lc, r25, r17);;	/*	 * Now Restore floating point regs	 */	ldf.fill.nta f2=[r25],16;;	ldf.fill.nta f3=[r25],16;;	ldf.fill.nta f4=[r25],16;;	ldf.fill.nta f5=[r25],16;;	ldf.fill.nta f16=[r25],16;;	ldf.fill.nta f17=[r25],16;;	ldf.fill.nta f18=[r25],16;;	ldf.fill.nta f19=[r25],16;;	ldf.fill.nta f20=[r25],16;;	ldf.fill.nta f21=[r25],16;;	ldf.fill.nta f22=[r25],16;;	ldf.fill.nta f23=[r25],16;;	ldf.fill.nta f24=[r25],16;;	ldf.fill.nta f25=[r25],16;;	ldf.fill.nta f26=[r25],16;;	ldf.fill.nta f27=[r25],16;;	ldf.fill.nta f28=[r25],16;;	ldf.fill.nta f29=[r25],16;;	ldf.fill.nta f30=[r25],16;;	ldf.fill.nta f31=[r25],16;;	/*	 * Now that we have done all the register restores	 * we are now ready for the big DIVE to SAL Land	 */	ssm psr.ic;;	srlz.d;;	br.ret.sptk.many b0;;END(ia64_jump_to_sal)#endif /* CONFIG_HOTPLUG_CPU */#endif /* CONFIG_SMP */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -