entry.s

来自「Linux Kernel 2.6.9 for OMAP1710」· S 代码 · 共 1,027 行 · 第 1/2 页

S
1,027
字号
	mfspr	r12,SPRN_SPEFSCR /* save spefscr register value */	stw	r12,THREAD+THREAD_SPEFSCR(r2)#endif /* CONFIG_SPE */	and.	r0,r0,r11	/* FP or altivec or SPE enabled? */	beq+	1f	andc	r11,r11,r0	MTMSRD(r11)	isync1:	stw	r11,_MSR(r1)	mfcr	r10	stw	r10,_CCR(r1)	stw	r1,KSP(r3)	/* Set old stack pointer */#ifdef CONFIG_SMP	/* We need a sync somewhere here to make sure that if the	 * previous task gets rescheduled on another CPU, it sees all	 * stores it has performed on this one.	 */	sync#endif /* CONFIG_SMP */	tophys(r0,r4)	CLR_TOP32(r0)	mtspr	SPRG3,r0	/* Update current THREAD phys addr */	lwz	r1,KSP(r4)	/* Load new stack pointer */	/* save the old current 'last' for return value */	mr	r3,r2	addi	r2,r4,-THREAD	/* Update current */#ifdef CONFIG_ALTIVECBEGIN_FTR_SECTION	lwz	r0,THREAD+THREAD_VRSAVE(r2)	mtspr	SPRN_VRSAVE,r0		/* if G4, restore VRSAVE reg */END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)#endif /* CONFIG_ALTIVEC */#ifdef CONFIG_SPE	lwz	r0,THREAD+THREAD_SPEFSCR(r2)	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */#endif /* CONFIG_SPE */	lwz	r0,_CCR(r1)	mtcrf	0xFF,r0	/* r3-r12 are destroyed -- Cort */	REST_NVGPRS(r1)	lwz	r4,_NIP(r1)	/* Return to _switch caller in new task */	mtlr	r4	addi	r1,r1,INT_FRAME_SIZE	blr	.globl	sigreturn_exitsigreturn_exit:	subi	r1,r3,STACK_FRAME_OVERHEAD	rlwinm	r12,r1,0,0,18	/* current_thread_info() */	lwz	r9,TI_FLAGS(r12)	andi.	r0,r9,_TIF_SYSCALL_TRACE	bnel-	do_syscall_trace	/* fall through */	.globl	ret_from_except_fullret_from_except_full:	REST_NVGPRS(r1)	/* fall through */	.globl	ret_from_exceptret_from_except:	/* Hard-disable interrupts so that current_thread_info()->flags	 * can't change between when we test it and when we return	 * from the interrupt. */	LOAD_MSR_KERNEL(r10,MSR_KERNEL)	SYNC			/* Some chip revs have problems here... */	MTMSRD(r10)		/* disable interrupts */	lwz	r3,_MSR(r1)	/* Returning to user mode? */	andi.	r0,r3,MSR_PR	beq	resume_kerneluser_exc_return:		/* r10 contains MSR_KERNEL here */	/* Check current_thread_info()->flags */	rlwinm	r9,r1,0,0,18	lwz	r9,TI_FLAGS(r9)	andi.	r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED)	bne	do_workrestore_user:#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)	/* Check whether this process has its own DBCR0 value */	lwz	r0,PTRACE(r2)	andi.	r0,r0,PT_PTRACED	bnel-	load_dbcr0#endif#ifdef CONFIG_PREEMPT	b	restore/* N.B. the only way to get here is from the beq following ret_from_except. */resume_kernel:	/* check current_thread_info->preempt_count */	rlwinm	r9,r1,0,0,18	lwz	r0,TI_PREEMPT(r9)	cmpwi	0,r0,0		/* if non-zero, just restore regs and return */	bne	restore	lwz	r0,TI_FLAGS(r9)	andi.	r0,r0,_TIF_NEED_RESCHED	beq+	restore	andi.	r0,r3,MSR_EE	/* interrupts off? */	beq	restore		/* don't schedule if so */1:	lis	r0,PREEMPT_ACTIVE@h	stw	r0,TI_PREEMPT(r9)	ori	r10,r10,MSR_EE	SYNC	MTMSRD(r10)		/* hard-enable interrupts */	bl	schedule	LOAD_MSR_KERNEL(r10,MSR_KERNEL)	SYNC	MTMSRD(r10)		/* disable interrupts */	rlwinm	r9,r1,0,0,18	li	r0,0	stw	r0,TI_PREEMPT(r9)	lwz	r3,TI_FLAGS(r9)	andi.	r0,r3,_TIF_NEED_RESCHED	bne-	1b#elseresume_kernel:#endif /* CONFIG_PREEMPT */	/* interrupts are hard-disabled at this point */restore:	lwz	r0,GPR0(r1)	lwz	r2,GPR2(r1)	REST_4GPRS(3, r1)	REST_2GPRS(7, r1)	lwz	r10,_XER(r1)	lwz	r11,_CTR(r1)	mtspr	XER,r10	mtctr	r11	PPC405_ERR77(0,r1)	stwcx.	r0,0,r1			/* to clear the reservation */#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))	lwz	r9,_MSR(r1)	andi.	r10,r9,MSR_RI		/* check if this exception occurred */	beql	nonrecoverable		/* at a bad place (MSR:RI = 0) */	lwz	r10,_CCR(r1)	lwz	r11,_LINK(r1)	mtcrf	0xFF,r10	mtlr	r11	/*	 * Once we put values in SRR0 and SRR1, we are in a state	 * where exceptions are not recoverable, since taking an	 * exception will trash SRR0 and SRR1.  Therefore we clear the	 * MSR:RI bit to indicate this.  If we do take an exception,	 * we can't return to the point of the exception but we	 * can restart the exception exit path at the label	 * exc_exit_restart below.  -- paulus	 */	LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)	SYNC	MTMSRD(r10)		/* clear the RI bit */	.globl exc_exit_restartexc_exit_restart:	lwz	r9,_MSR(r1)	lwz	r12,_NIP(r1)	FIX_SRR1(r9,r10)	mtspr	SRR0,r12	mtspr	SRR1,r9	REST_4GPRS(9, r1)	lwz	r1,GPR1(r1)	.globl exc_exit_restart_endexc_exit_restart_end:	SYNC	RFI#else /* !(CONFIG_4xx || CONFIG_BOOKE) */	/*	 * This is a bit different on 4xx/Book-E because it doesn't have	 * the RI bit in the MSR.	 * The TLB miss handler checks if we have interrupted	 * the exception exit path and restarts it if so	 * (well maybe one day it will... :).	 */	lwz	r11,_LINK(r1)	mtlr	r11	lwz	r10,_CCR(r1)	mtcrf	0xff,r10	REST_2GPRS(9, r1)	.globl exc_exit_restartexc_exit_restart:	lwz	r11,_NIP(r1)	lwz	r12,_MSR(r1)exc_exit_start:	mtspr	SRR0,r11	mtspr	SRR1,r12	REST_2GPRS(11, r1)	lwz	r1,GPR1(r1)	.globl exc_exit_restart_endexc_exit_restart_end:	PPC405_ERR77_SYNC	rfi	b	.			/* prevent prefetch past rfi *//* * Returning from a critical interrupt in user mode doesn't need * to be any different from a normal exception.  For a critical * interrupt in the kernel, we just return (without checking for * preemption) since the interrupt may have happened at some crucial * place (e.g. inside the TLB miss handler), and because we will be * running with r1 pointing into critical_stack, not the current * process's kernel stack (and therefore current_thread_info() will * give the wrong answer). * We have to restore various SPRs that may have been in use at the * time of the critical interrupt. * * Note that SPRG6 is used for machine check on CONFIG_BOOKE parts and * thus not saved in the critical handler */	.globl	ret_from_crit_excret_from_crit_exc:	REST_NVGPRS(r1)	lwz	r3,_MSR(r1)	andi.	r3,r3,MSR_PR	LOAD_MSR_KERNEL(r10,MSR_KERNEL)	bne	user_exc_return	lwz	r0,GPR0(r1)	lwz	r2,GPR2(r1)	REST_4GPRS(3, r1)	REST_2GPRS(7, r1)	lwz	r10,_XER(r1)	lwz	r11,_CTR(r1)	mtspr	XER,r10	mtctr	r11	PPC405_ERR77(0,r1)	stwcx.	r0,0,r1			/* to clear the reservation */	lwz	r11,_LINK(r1)	mtlr	r11	lwz	r10,_CCR(r1)	mtcrf	0xff,r10#ifdef CONFIG_40x	/* avoid any possible TLB misses here by turning off MSR.DR, we	 * assume the instructions here are mapped by a pinned TLB entry */	li	r10,MSR_IR	mtmsr	r10	isync	tophys(r1, r1)#endif	lwz	r9,_DEAR(r1)	lwz	r10,_ESR(r1)	mtspr	SPRN_DEAR,r9	mtspr	SPRN_ESR,r10	lwz	r11,_NIP(r1)	lwz	r12,_MSR(r1)	mtspr	CSRR0,r11	mtspr	CSRR1,r12	lwz	r9,GPR9(r1)	lwz	r12,GPR12(r1)	BOOKE_SAVE_COR	BOOKE_LOAD_COR	lwz	r10,crit_sprg0@l(COR)	mtspr	SPRN_SPRG0,r10	lwz	r10,crit_sprg1@l(COR)	mtspr	SPRN_SPRG1,r10	lwz	r10,crit_sprg4@l(COR)	mtspr	SPRN_SPRG4,r10	lwz	r10,crit_sprg5@l(COR)	mtspr	SPRN_SPRG5,r10#ifdef CONFIG_40x	lwz	r10,crit_sprg6@l(COR)	mtspr	SPRN_SPRG6,r10#endif	lwz	r10,crit_sprg7@l(COR)	mtspr	SPRN_SPRG7,r10	lwz	r10,crit_srr0@l(COR)	mtspr	SRR0,r10	lwz	r10,crit_srr1@l(COR)	mtspr	SRR1,r10	lwz	r10,crit_pid@l(COR)	mtspr	SPRN_PID,r10	lwz	r10,GPR10(r1)	lwz	r11,GPR11(r1)	lwz	r1,GPR1(r1)	BOOKE_REST_COR	PPC405_ERR77_SYNC	rfci	b	.		/* prevent prefetch past rfci */#ifdef CONFIG_BOOKE/* * Return from a machine check interrupt, similar to a critical * interrupt. */	.globl	ret_from_mcheck_excret_from_mcheck_exc:	REST_NVGPRS(r1)	lwz	r3,_MSR(r1)	andi.	r3,r3,MSR_PR	LOAD_MSR_KERNEL(r10,MSR_KERNEL)	bne	user_exc_return	lwz	r0,GPR0(r1)	lwz	r2,GPR2(r1)	REST_4GPRS(3, r1)	REST_2GPRS(7, r1)	lwz	r10,_XER(r1)	lwz	r11,_CTR(r1)	mtspr	XER,r10	mtctr	r11	stwcx.	r0,0,r1			/* to clear the reservation */	lwz	r11,_LINK(r1)	mtlr	r11	lwz	r10,_CCR(r1)	mtcrf	0xff,r10	lwz	r9,_DEAR(r1)	lwz	r10,_ESR(r1)	mtspr	SPRN_DEAR,r9	mtspr	SPRN_ESR,r10	lwz	r11,_NIP(r1)	lwz	r12,_MSR(r1)	mtspr	MCSRR0,r11	mtspr	MCSRR1,r12	lwz	r9,GPR9(r1)	lwz	r12,GPR12(r1)	mtspr	SPRG6W,r8	lis	r8,mcheck_save@ha	lwz	r10,mcheck_sprg0@l(r8)	mtspr	SPRN_SPRG0,r10	lwz	r10,mcheck_sprg1@l(r8)	mtspr	SPRN_SPRG1,r10	lwz	r10,mcheck_sprg4@l(r8)	mtspr	SPRN_SPRG4,r10	lwz	r10,mcheck_sprg5@l(r8)	mtspr	SPRN_SPRG5,r10	lwz	r10,mcheck_sprg7@l(r8)	mtspr	SPRN_SPRG7,r10	lwz	r10,mcheck_srr0@l(r8)	mtspr	SRR0,r10	lwz	r10,mcheck_srr1@l(r8)	mtspr	SRR1,r10	lwz	r10,mcheck_csrr0@l(r8)	mtspr	CSRR0,r10	lwz	r10,mcheck_csrr1@l(r8)	mtspr	CSRR1,r10	lwz	r10,mcheck_pid@l(r8)	mtspr	SPRN_PID,r10	lwz	r10,GPR10(r1)	lwz	r11,GPR11(r1)	lwz	r1,GPR1(r1)	mfspr	r8,SPRG6R	RFMCI#endif /* CONFIG_BOOKE *//* * Load the DBCR0 value for a task that is being ptraced, * having first saved away the global DBCR0. */load_dbcr0:	mfmsr	r0		/* first disable debug exceptions */	rlwinm	r0,r0,0,~MSR_DE	mtmsr	r0	isync	mfspr	r10,SPRN_DBCR0	lis	r11,global_dbcr0@ha	addi	r11,r11,global_dbcr0@l	lwz	r0,THREAD+THREAD_DBCR0(r2)	stw	r10,0(r11)	mtspr	SPRN_DBCR0,r0	lwz	r10,4(r11)	addi	r10,r10,1	stw	r10,4(r11)	li	r11,-1	mtspr	SPRN_DBSR,r11	/* clear all pending debug events */	blr	.comm	global_dbcr0,8#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */do_work:			/* r10 contains MSR_KERNEL here */	andi.	r0,r9,_TIF_NEED_RESCHED	beq	do_user_signaldo_resched:			/* r10 contains MSR_KERNEL here */	ori	r10,r10,MSR_EE	SYNC	MTMSRD(r10)		/* hard-enable interrupts */	bl	schedulerecheck:	LOAD_MSR_KERNEL(r10,MSR_KERNEL)	SYNC	MTMSRD(r10)		/* disable interrupts */	rlwinm	r9,r1,0,0,18	lwz	r9,TI_FLAGS(r9)	andi.	r0,r9,_TIF_NEED_RESCHED	bne-	do_resched	andi.	r0,r9,_TIF_SIGPENDING	beq	restore_userdo_user_signal:			/* r10 contains MSR_KERNEL here */	ori	r10,r10,MSR_EE	SYNC	MTMSRD(r10)		/* hard-enable interrupts */	/* save r13-r31 in the exception frame, if not already done */	lwz	r3,TRAP(r1)	andi.	r0,r3,1	beq	2f	SAVE_NVGPRS(r1)	rlwinm	r3,r3,0,0,30	stw	r3,TRAP(r1)2:	li	r3,0	addi	r4,r1,STACK_FRAME_OVERHEAD	bl	do_signal	REST_NVGPRS(r1)	b	recheck/* * We come here when we are at the end of handling an exception * that occurred at a place where taking an exception will lose * state information, such as the contents of SRR0 and SRR1. */nonrecoverable:	lis	r10,exc_exit_restart_end@ha	addi	r10,r10,exc_exit_restart_end@l	cmplw	r12,r10	bge	3f	lis	r11,exc_exit_restart@ha	addi	r11,r11,exc_exit_restart@l	cmplw	r12,r11	blt	3f	lis	r10,ee_restarts@ha	lwz	r12,ee_restarts@l(r10)	addi	r12,r12,1	stw	r12,ee_restarts@l(r10)	mr	r12,r11		/* restart at exc_exit_restart */	blr3:	/* OK, we can't recover, kill this process */	/* but the 601 doesn't implement the RI bit, so assume it's OK */BEGIN_FTR_SECTION	blrEND_FTR_SECTION_IFSET(CPU_FTR_601)	lwz	r3,TRAP(r1)	andi.	r0,r3,1	beq	4f	SAVE_NVGPRS(r1)	rlwinm	r3,r3,0,0,30	stw	r3,TRAP(r1)4:	addi	r3,r1,STACK_FRAME_OVERHEAD	bl	nonrecoverable_exception	/* shouldn't return */	b	4b	.comm	ee_restarts,4/* * PROM code for specific machines follows.  Put it * here so it's easy to add arch-specific sections later. * -- Cort */#ifdef CONFIG_PPC_OF/* * On CHRP, the Run-Time Abstraction Services (RTAS) have to be * called with the MMU off. */_GLOBAL(enter_rtas)	stwu	r1,-INT_FRAME_SIZE(r1)	mflr	r0	stw	r0,INT_FRAME_SIZE+4(r1)	lis	r4,rtas_data@ha	lwz	r4,rtas_data@l(r4)	lis	r6,1f@ha	/* physical return address for rtas */	addi	r6,r6,1f@l	tophys(r6,r6)	tophys(r7,r1)	lis	r8,rtas_entry@ha	lwz	r8,rtas_entry@l(r8)	mfmsr	r9	stw	r9,8(r1)	LOAD_MSR_KERNEL(r0,MSR_KERNEL)	SYNC			/* disable interrupts so SRR0/1 */	MTMSRD(r0)		/* don't get trashed */	li	r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)	mtlr	r6	CLR_TOP32(r7)	mtspr	SPRG2,r7	mtspr	SRR0,r8	mtspr	SRR1,r9	RFI1:	tophys(r9,r1)	lwz	r8,INT_FRAME_SIZE+4(r9)	/* get return address */	lwz	r9,8(r9)	/* original msr value */	FIX_SRR1(r9,r0)	addi	r1,r1,INT_FRAME_SIZE	li	r0,0	mtspr	SPRG2,r0	mtspr	SRR0,r8	mtspr	SRR1,r9	RFI			/* return to caller */	.globl	machine_check_in_rtasmachine_check_in_rtas:	twi	31,0,0	/* XXX load up BATs and panic */#endif /* CONFIG_PPC_OF */

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?