⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ivt.s

📁 优龙2410linux2.6.8内核源代码
💻 S
📖 第 1 页 / 共 4 页
字号:
	 * Output:	r17:	physical address of L3 PTE of faulting address	 *		r29:	saved b0	 *		r30:	continuation address	 *		r31:	saved pr	 *	 * Clobbered:	b0, r18, r19, r21, psr.dt (cleared)	 */	rsm psr.dt				// switch to using physical data addressing	mov r19=IA64_KR(PT_BASE)		// get the page table base address	shl r21=r16,3				// shift bit 60 into sign bit	;;	shr.u r17=r16,61			// get the region number into r17	;;	cmp.eq p6,p7=5,r17			// is faulting address in region 5?	shr.u r18=r16,PGDIR_SHIFT		// get bits 33-63 of faulting address	;;(p7)	dep r17=r17,r19,(PAGE_SHIFT-3),3	// put region number bits in place	srlz.d	LOAD_PHYSICAL(p6, r19, swapper_pg_dir)	// region 5 is rooted at swapper_pg_dir	.pred.rel "mutex", p6, p7(p6)	shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT(p7)	shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3	;;(p6)	dep r17=r18,r19,3,(PAGE_SHIFT-3)	// r17=PTA + IFA(33,42)*8(p7)	dep r17=r18,r17,3,(PAGE_SHIFT-6)	// r17=PTA + (((IFA(61,63) << 7) | IFA(33,39))*8)	cmp.eq p7,p6=0,r21			// unused address bits all zeroes?	shr.u r18=r16,PMD_SHIFT			// shift L2 index into position	;;	ld8 r17=[r17]				// fetch the L1 entry (may be 0)	;;(p7)	cmp.eq p6,p7=r17,r0			// was L1 entry NULL?	dep r17=r18,r17,3,(PAGE_SHIFT-3)	// compute address of L2 page table entry	;;(p7)	ld8 r17=[r17]				// fetch the L2 entry (may be 0)	shr.u r19=r16,PAGE_SHIFT		// shift L3 index into position	;;(p7)	cmp.eq.or.andcm p6,p7=r17,r0		// was L2 entry NULL?	dep r17=r19,r17,3,(PAGE_SHIFT-3)	// compute address of L3 page table entry(p6)	br.cond.spnt page_fault	mov b0=r30	br.sptk.many b0				// return to continuation pointEND(nested_dtlb_miss)	.org ia64_ivt+0x1800/////////////////////////////////////////////////////////////////////////////////////////// 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)ENTRY(ikey_miss)	DBG_FAULT(6)	FAULT(6)END(ikey_miss)	//-----------------------------------------------------------------------------------	// call do_page_fault (predicates are in r31, psr.dt may be off, r16 is faulting address)ENTRY(page_fault)	ssm psr.dt	;;	srlz.i	;;	SAVE_MIN_WITH_COVER	alloc r15=ar.pfs,0,0,3,0	mov out0=cr.ifa	mov out1=cr.isr	adds r3=8,r2				// set up second base pointer	;;	ssm psr.ic | PSR_DEFAULT_BITS	;;	srlz.i					// guarantee that interruption collectin is on	;;(p15)	ssm psr.i				// restore psr.i	movl r14=ia64_leave_kernel	;;	SAVE_REST	mov rp=r14	;;	adds out2=16,r12			// out2 = pointer to pt_regs	br.call.sptk.many b6=ia64_do_page_fault	// ignore return addressEND(page_fault)	.org ia64_ivt+0x1c00/////////////////////////////////////////////////////////////////////////////////////////// 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)ENTRY(dkey_miss)	DBG_FAULT(7)	FAULT(7)END(dkey_miss)	.org ia64_ivt+0x2000/////////////////////////////////////////////////////////////////////////////////////////// 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)ENTRY(dirty_bit)	DBG_FAULT(8)	/*	 * What we do here is to simply turn on the dirty bit in the PTE.  We need to	 * update both the page-table and the TLB entry.  To efficiently access the PTE,	 * we address it through the virtual page table.  Most likely, the TLB entry for	 * the relevant virtual page table page is still present in the TLB so we can	 * normally do this without additional TLB misses.  In case the necessary virtual	 * page table TLB entry isn't present, we take a nested TLB miss hit where we look	 * up the physical address of the L3 PTE and then continue at label 1 below.	 */	mov r16=cr.ifa				// get the address that caused the fault	movl r30=1f				// load continuation point in case of nested fault	;;	thash r17=r16				// compute virtual address of L3 PTE	mov r29=b0				// save b0 in case of nested fault	mov r31=pr				// save pr#ifdef CONFIG_SMP	mov r28=ar.ccv				// save ar.ccv	;;1:	ld8 r18=[r17]	;;					// avoid RAW on r18	mov ar.ccv=r18				// set compare value for cmpxchg	or r25=_PAGE_D|_PAGE_A,r18		// set the dirty and accessed bits	;;	cmpxchg8.acq r26=[r17],r25,ar.ccv	mov r24=PAGE_SHIFT<<2	;;	cmp.eq p6,p7=r26,r18	;;(p6)	itc.d r25				// install updated PTE	;;	/*	 * Tell the assemblers dependency-violation checker that the above "itc" instructions	 * cannot possibly affect the following loads:	 */	dv_serialize_data	ld8 r18=[r17]				// read PTE again	;;	cmp.eq p6,p7=r18,r25			// is it same as the newly installed	;;(p7)	ptc.l r16,r24	mov b0=r29				// restore b0	mov ar.ccv=r28#else	;;1:	ld8 r18=[r17]	;;					// avoid RAW on r18	or r18=_PAGE_D|_PAGE_A,r18		// set the dirty and accessed bits	mov b0=r29				// restore b0	;;	st8 [r17]=r18				// store back updated PTE	itc.d r18				// install updated PTE#endif	mov pr=r31,-1				// restore pr	rfiEND(idirty_bit)	.org ia64_ivt+0x2400/////////////////////////////////////////////////////////////////////////////////////////// 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)ENTRY(iaccess_bit)	DBG_FAULT(9)	// Like Entry 8, except for instruction access	mov r16=cr.ifa				// get the address that caused the fault	movl r30=1f				// load continuation point in case of nested fault	mov r31=pr				// save predicates#ifdef CONFIG_ITANIUM	/*	 * Erratum 10 (IFA may contain incorrect address) has "NoFix" status.	 */	mov r17=cr.ipsr	;;	mov r18=cr.iip	tbit.z p6,p0=r17,IA64_PSR_IS_BIT	// IA64 instruction set?	;;(p6)	mov r16=r18				// if so, use cr.iip instead of cr.ifa#endif /* CONFIG_ITANIUM */	;;	thash r17=r16				// compute virtual address of L3 PTE	mov r29=b0				// save b0 in case of nested fault)#ifdef CONFIG_SMP	mov r28=ar.ccv				// save ar.ccv	;;1:	ld8 r18=[r17]	;;	mov ar.ccv=r18				// set compare value for cmpxchg	or r25=_PAGE_A,r18			// set the accessed bit	;;	cmpxchg8.acq r26=[r17],r25,ar.ccv	mov r24=PAGE_SHIFT<<2	;;	cmp.eq p6,p7=r26,r18	;;(p6)	itc.i r25				// install updated PTE	;;	/*	 * Tell the assemblers dependency-violation checker that the above "itc" instructions	 * cannot possibly affect the following loads:	 */	dv_serialize_data	ld8 r18=[r17]				// read PTE again	;;	cmp.eq p6,p7=r18,r25			// is it same as the newly installed	;;(p7)	ptc.l r16,r24	mov b0=r29				// restore b0	mov ar.ccv=r28#else /* !CONFIG_SMP */	;;1:	ld8 r18=[r17]	;;	or r18=_PAGE_A,r18			// set the accessed bit	mov b0=r29				// restore b0	;;	st8 [r17]=r18				// store back updated PTE	itc.i r18				// install updated PTE#endif /* !CONFIG_SMP */	mov pr=r31,-1	rfiEND(iaccess_bit)	.org ia64_ivt+0x2800/////////////////////////////////////////////////////////////////////////////////////////// 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)ENTRY(daccess_bit)	DBG_FAULT(10)	// Like Entry 8, except for data access	mov r16=cr.ifa				// get the address that caused the fault	movl r30=1f				// load continuation point in case of nested fault	;;	thash r17=r16				// compute virtual address of L3 PTE	mov r31=pr	mov r29=b0				// save b0 in case of nested fault)#ifdef CONFIG_SMP	mov r28=ar.ccv				// save ar.ccv	;;1:	ld8 r18=[r17]	;;					// avoid RAW on r18	mov ar.ccv=r18				// set compare value for cmpxchg	or r25=_PAGE_A,r18			// set the dirty bit	;;	cmpxchg8.acq r26=[r17],r25,ar.ccv	mov r24=PAGE_SHIFT<<2	;;	cmp.eq p6,p7=r26,r18	;;(p6)	itc.d r25				// install updated PTE	/*	 * Tell the assemblers dependency-violation checker that the above "itc" instructions	 * cannot possibly affect the following loads:	 */	dv_serialize_data	;;	ld8 r18=[r17]				// read PTE again	;;	cmp.eq p6,p7=r18,r25			// is it same as the newly installed	;;(p7)	ptc.l r16,r24	mov ar.ccv=r28#else	;;1:	ld8 r18=[r17]	;;					// avoid RAW on r18	or r18=_PAGE_A,r18			// set the accessed bit	;;	st8 [r17]=r18				// store back updated PTE	itc.d r18				// install updated PTE#endif	mov b0=r29				// restore b0	mov pr=r31,-1	rfiEND(daccess_bit)	.org ia64_ivt+0x2c00/////////////////////////////////////////////////////////////////////////////////////////// 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)ENTRY(break_fault)	/*	 * The streamlined system call entry/exit paths only save/restore the initial part	 * of pt_regs.  This implies that the callers of system-calls must adhere to the	 * normal procedure calling conventions.	 *	 *   Registers to be saved & restored:	 *	CR registers: cr.ipsr, cr.iip, cr.ifs	 *	AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore, ar.fpsr	 * 	others: pr, b0, b6, loadrs, r1, r11, r12, r13, r15	 *   Registers to be restored only:	 * 	r8-r11: output value from the system call.	 *	 * During system call exit, scratch registers (including r15) are modified/cleared	 * to prevent leaking bits from kernel to user level.	 */	DBG_FAULT(11)	mov r16=IA64_KR(CURRENT)		// r16 = current task; 12 cycle read lat.	mov r17=cr.iim	mov r18=__IA64_BREAK_SYSCALL	mov r21=ar.fpsr	mov r29=cr.ipsr	mov r19=b6	mov r25=ar.unat	mov r27=ar.rsc	mov r26=ar.pfs	mov r28=cr.iip	mov r31=pr				// prepare to save predicates	mov r20=r1	;;	adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16	cmp.eq p0,p7=r18,r17			// is this a system call? (p7 <- false, if so)(p7)	br.cond.spnt non_syscall	;;	ld1 r17=[r16]				// load current->thread.on_ustack flag	st1 [r16]=r0				// clear current->thread.on_ustack flag	add r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16	// set r1 for MINSTATE_START_SAVE_MIN_VIRT	;;	invala	/* adjust return address so we skip over the break instruction: */	extr.u r8=r29,41,2			// extract ei field from cr.ipsr	;;	cmp.eq p6,p7=2,r8			// isr.ei==2?	mov r2=r1				// setup r2 for ia64_syscall_setup	;;(p6)	mov r8=0				// clear ei to 0(p6)	adds r28=16,r28				// switch cr.iip to next bundle cr.ipsr.ei wrapped(p7)	adds r8=1,r8				// increment ei to next slot	;;	cmp.eq pKStk,pUStk=r0,r17		// are we in kernel mode already?	dep r29=r8,r29,41,2			// insert new ei into cr.ipsr	;;	// switch from user to kernel RBS:	MINSTATE_START_SAVE_MIN_VIRT	br.call.sptk.many b7=ia64_syscall_setup	;;	MINSTATE_END_SAVE_MIN_VIRT		// switch to bank 1	ssm psr.ic | PSR_DEFAULT_BITS	;;	srlz.i					// guarantee that interruption collection is on	;;(p15)	ssm psr.i				// restore psr.i	;;	mov r3=NR_syscalls - 1	movl r16=sys_call_table	adds r15=-1024,r15			// r15 contains the syscall number---subtract 1024	movl r2=ia64_ret_from_syscall	;;	shladd r20=r15,3,r16			// r20 = sys_call_table + 8*(syscall-1024)	cmp.leu p6,p7=r15,r3			// (syscall > 0 && syscall < 1024 + NR_syscalls) ?	mov rp=r2				// set the real return addr	;;(p6)	ld8 r20=[r20]				// load address of syscall entry point(p7)	movl r20=sys_ni_syscall	add r2=TI_FLAGS+IA64_TASK_SIZE,r13	;;	ld4 r2=[r2]				// r2 = current_thread_info()->flags	;;	and r2=_TIF_SYSCALL_TRACEAUDIT,r2	// mask trace or audit	;;	cmp.eq p8,p0=r2,r0	mov b6=r20	;;(p8)	br.call.sptk.many b6=b6			// ignore this return addr	br.cond.sptk ia64_trace_syscall	// NOT REACHEDEND(break_fault)	.org ia64_ivt+0x3000/////////////////////////////////////////////////////////////////////////////////////////// 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)ENTRY(interrupt)	DBG_FAULT(12)	mov r31=pr		// prepare to save predicates	;;	SAVE_MIN_WITH_COVER	// uses r31; defines r2 and r3	ssm psr.ic | PSR_DEFAULT_BITS	;;	adds r3=8,r2		// set up second base pointer for SAVE_REST	srlz.i			// ensure everybody knows psr.ic is back on	;;	SAVE_REST	;;	alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group	mov out0=cr.ivr		// pass cr.ivr as first arg	add out1=16,sp		// pass pointer to pt_regs as second arg	;;	srlz.d			// make sure we see the effect of cr.ivr	movl r14=ia64_leave_kernel	;;	mov rp=r14	br.call.sptk.many b6=ia64_handle_irqEND(interrupt)	.org ia64_ivt+0x3400/////////////////////////////////////////////////////////////////////////////////////////// 0x3400 Entry 13 (size 64 bundles) Reserved	DBG_FAULT(13)	FAULT(13)	.org ia64_ivt+0x3800/////////////////////////////////////////////////////////////////////////////////////////// 0x3800 Entry 14 (size 64 bundles) Reserved	DBG_FAULT(14)	FAULT(14)

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -