📄 entry_32.s
字号:
stwu r1,-INT_FRAME_SIZE(r1) mflr r0 stw r0,INT_FRAME_SIZE+4(r1) /* r3-r12 are caller saved -- Cort */ SAVE_NVGPRS(r1) stw r0,_NIP(r1) /* Return to switch caller */ mfmsr r11 li r0,MSR_FP /* Disable floating-point */#ifdef CONFIG_ALTIVECBEGIN_FTR_SECTION oris r0,r0,MSR_VEC@h /* Disable altivec */ mfspr r12,SPRN_VRSAVE /* save vrsave register value */ stw r12,THREAD+THREAD_VRSAVE(r2)END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)#endif /* CONFIG_ALTIVEC */#ifdef CONFIG_SPE oris r0,r0,MSR_SPE@h /* Disable SPE */ mfspr r12,SPRN_SPEFSCR /* save spefscr register value */ stw r12,THREAD+THREAD_SPEFSCR(r2)#endif /* CONFIG_SPE */ and. r0,r0,r11 /* FP or altivec or SPE enabled? */ beq+ 1f andc r11,r11,r0 MTMSRD(r11) isync1: stw r11,_MSR(r1) mfcr r10 stw r10,_CCR(r1) stw r1,KSP(r3) /* Set old stack pointer */#ifdef CONFIG_SMP /* We need a sync somewhere here to make sure that if the * previous task gets rescheduled on another CPU, it sees all * stores it has performed on this one. */ sync#endif /* CONFIG_SMP */ tophys(r0,r4) CLR_TOP32(r0) mtspr SPRN_SPRG3,r0 /* Update current THREAD phys addr */ lwz r1,KSP(r4) /* Load new stack pointer */ /* save the old current 'last' for return value */ mr r3,r2 addi r2,r4,-THREAD /* Update current */#ifdef CONFIG_ALTIVECBEGIN_FTR_SECTION lwz r0,THREAD+THREAD_VRSAVE(r2) mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)#endif /* CONFIG_ALTIVEC */#ifdef CONFIG_SPE lwz r0,THREAD+THREAD_SPEFSCR(r2) mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */#endif /* CONFIG_SPE */ lwz r0,_CCR(r1) mtcrf 0xFF,r0 /* r3-r12 are destroyed -- Cort */ REST_NVGPRS(r1) lwz r4,_NIP(r1) /* Return to _switch caller in new task */ mtlr r4 addi r1,r1,INT_FRAME_SIZE blr .globl fast_exception_returnfast_exception_return:#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) andi. r10,r9,MSR_RI /* check for recoverable interrupt */ beq 1f /* if not, we've got problems */#endif2: REST_4GPRS(3, r11) lwz r10,_CCR(r11) REST_GPR(1, r11) mtcr r10 lwz r10,_LINK(r11) mtlr r10 REST_GPR(10, r11) mtspr SPRN_SRR1,r9 mtspr SPRN_SRR0,r12 REST_GPR(9, r11) REST_GPR(12, r11) lwz r11,GPR11(r11) SYNC RFI#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))/* check if the exception happened in a restartable section */1: lis r3,exc_exit_restart_end@ha addi r3,r3,exc_exit_restart_end@l cmplw r12,r3 bge 3f lis r4,exc_exit_restart@ha addi r4,r4,exc_exit_restart@l cmplw r12,r4 blt 3f lis r3,fee_restarts@ha tophys(r3,r3) lwz r5,fee_restarts@l(r3) addi r5,r5,1 stw r5,fee_restarts@l(r3) mr r12,r4 /* restart at exc_exit_restart */ b 2b .comm fee_restarts,4/* aargh, a nonrecoverable interrupt, panic *//* aargh, we don't know which trap this is *//* but the 601 doesn't implement the RI bit, so assume it's OK */3:BEGIN_FTR_SECTION b 2bEND_FTR_SECTION_IFSET(CPU_FTR_601) li r10,-1 stw r10,_TRAP(r11) addi r3,r1,STACK_FRAME_OVERHEAD lis r10,MSR_KERNEL@h ori r10,r10,MSR_KERNEL@l bl transfer_to_handler_full .long nonrecoverable_exception .long ret_from_except#endif .globl sigreturn_exitsigreturn_exit: subi r1,r3,STACK_FRAME_OVERHEAD rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ lwz r9,TI_FLAGS(r12) andi. r0,r9,_TIF_SYSCALL_T_OR_A beq+ ret_from_except_full bl do_syscall_trace_leave /* fall through */ .globl ret_from_except_fullret_from_except_full: REST_NVGPRS(r1) /* fall through */ .globl ret_from_exceptret_from_except: /* Hard-disable interrupts so that current_thread_info()->flags * can't change between when we test it and when we return * from the interrupt. */ LOAD_MSR_KERNEL(r10,MSR_KERNEL) SYNC /* Some chip revs have problems here... */ MTMSRD(r10) /* disable interrupts */ lwz r3,_MSR(r1) /* Returning to user mode? */ andi. r0,r3,MSR_PR beq resume_kerneluser_exc_return: /* r10 contains MSR_KERNEL here */ /* Check current_thread_info()->flags */ rlwinm r9,r1,0,0,(31-THREAD_SHIFT) lwz r9,TI_FLAGS(r9) andi. r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED) bne do_workrestore_user:#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) /* Check whether this process has its own DBCR0 value. The single step bit tells us that dbcr0 should be loaded. */ lwz r0,THREAD+THREAD_DBCR0(r2) andis. r10,r0,DBCR0_IC@h bnel- load_dbcr0#endif#ifdef CONFIG_PREEMPT b restore/* N.B. the only way to get here is from the beq following ret_from_except. */resume_kernel: /* check current_thread_info->preempt_count */ rlwinm r9,r1,0,0,(31-THREAD_SHIFT) lwz r0,TI_PREEMPT(r9) cmpwi 0,r0,0 /* if non-zero, just restore regs and return */ bne restore lwz r0,TI_FLAGS(r9) andi. r0,r0,_TIF_NEED_RESCHED beq+ restore andi. r0,r3,MSR_EE /* interrupts off? */ beq restore /* don't schedule if so */1: bl preempt_schedule_irq rlwinm r9,r1,0,0,(31-THREAD_SHIFT) lwz r3,TI_FLAGS(r9) andi. r0,r3,_TIF_NEED_RESCHED bne- 1b#elseresume_kernel:#endif /* CONFIG_PREEMPT */ /* interrupts are hard-disabled at this point */restore: lwz r0,GPR0(r1) lwz r2,GPR2(r1) REST_4GPRS(3, r1) REST_2GPRS(7, r1) lwz r10,_XER(r1) lwz r11,_CTR(r1) mtspr SPRN_XER,r10 mtctr r11 PPC405_ERR77(0,r1) stwcx. r0,0,r1 /* to clear the reservation */#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) lwz r9,_MSR(r1) andi. r10,r9,MSR_RI /* check if this exception occurred */ beql nonrecoverable /* at a bad place (MSR:RI = 0) */ lwz r10,_CCR(r1) lwz r11,_LINK(r1) mtcrf 0xFF,r10 mtlr r11 /* * Once we put values in SRR0 and SRR1, we are in a state * where exceptions are not recoverable, since taking an * exception will trash SRR0 and SRR1. Therefore we clear the * MSR:RI bit to indicate this. If we do take an exception, * we can't return to the point of the exception but we * can restart the exception exit path at the label * exc_exit_restart below. -- paulus */ LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI) SYNC MTMSRD(r10) /* clear the RI bit */ .globl exc_exit_restartexc_exit_restart: lwz r9,_MSR(r1) lwz r12,_NIP(r1) FIX_SRR1(r9,r10) mtspr SPRN_SRR0,r12 mtspr SPRN_SRR1,r9 REST_4GPRS(9, r1) lwz r1,GPR1(r1) .globl exc_exit_restart_endexc_exit_restart_end: SYNC RFI#else /* !(CONFIG_4xx || CONFIG_BOOKE) */ /* * This is a bit different on 4xx/Book-E because it doesn't have * the RI bit in the MSR. * The TLB miss handler checks if we have interrupted * the exception exit path and restarts it if so * (well maybe one day it will... :). */ lwz r11,_LINK(r1) mtlr r11 lwz r10,_CCR(r1) mtcrf 0xff,r10 REST_2GPRS(9, r1) .globl exc_exit_restartexc_exit_restart: lwz r11,_NIP(r1) lwz r12,_MSR(r1)exc_exit_start: mtspr SPRN_SRR0,r11 mtspr SPRN_SRR1,r12 REST_2GPRS(11, r1) lwz r1,GPR1(r1) .globl exc_exit_restart_endexc_exit_restart_end: PPC405_ERR77_SYNC rfi b . /* prevent prefetch past rfi *//* * Returning from a critical interrupt in user mode doesn't need * to be any different from a normal exception. For a critical * interrupt in the kernel, we just return (without checking for * preemption) since the interrupt may have happened at some crucial * place (e.g. inside the TLB miss handler), and because we will be * running with r1 pointing into critical_stack, not the current * process's kernel stack (and therefore current_thread_info() will * give the wrong answer). * We have to restore various SPRs that may have been in use at the * time of the critical interrupt. * */#ifdef CONFIG_40x#define PPC_40x_TURN_OFF_MSR_DR \ /* avoid any possible TLB misses here by turning off MSR.DR, we \ * assume the instructions here are mapped by a pinned TLB entry */ \ li r10,MSR_IR; \ mtmsr r10; \ isync; \ tophys(r1, r1);#else#define PPC_40x_TURN_OFF_MSR_DR#endif#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \ REST_NVGPRS(r1); \ lwz r3,_MSR(r1); \ andi. r3,r3,MSR_PR; \ LOAD_MSR_KERNEL(r10,MSR_KERNEL); \ bne user_exc_return; \ lwz r0,GPR0(r1); \ lwz r2,GPR2(r1); \ REST_4GPRS(3, r1); \ REST_2GPRS(7, r1); \ lwz r10,_XER(r1); \ lwz r11,_CTR(r1); \ mtspr SPRN_XER,r10; \ mtctr r11; \ PPC405_ERR77(0,r1); \ stwcx. r0,0,r1; /* to clear the reservation */ \ lwz r11,_LINK(r1); \ mtlr r11; \ lwz r10,_CCR(r1); \ mtcrf 0xff,r10; \ PPC_40x_TURN_OFF_MSR_DR; \ lwz r9,_DEAR(r1); \ lwz r10,_ESR(r1); \ mtspr SPRN_DEAR,r9; \ mtspr SPRN_ESR,r10; \ lwz r11,_NIP(r1); \ lwz r12,_MSR(r1); \ mtspr exc_lvl_srr0,r11; \ mtspr exc_lvl_srr1,r12; \ lwz r9,GPR9(r1); \ lwz r12,GPR12(r1); \ lwz r10,GPR10(r1); \ lwz r11,GPR11(r1); \ lwz r1,GPR1(r1); \ PPC405_ERR77_SYNC; \ exc_lvl_rfi; \ b .; /* prevent prefetch past exc_lvl_rfi */ .globl ret_from_crit_excret_from_crit_exc: RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI)#ifdef CONFIG_BOOKE .globl ret_from_debug_excret_from_debug_exc: RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, RFDI) .globl ret_from_mcheck_excret_from_mcheck_exc: RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, RFMCI)#endif /* CONFIG_BOOKE *//* * Load the DBCR0 value for a task that is being ptraced, * having first saved away the global DBCR0. Note that r0 * has the dbcr0 value to set upon entry to this. */load_dbcr0: mfmsr r10 /* first disable debug exceptions */ rlwinm r10,r10,0,~MSR_DE mtmsr r10 isync mfspr r10,SPRN_DBCR0 lis r11,global_dbcr0@ha addi r11,r11,global_dbcr0@l stw r10,0(r11) mtspr SPRN_DBCR0,r0 lwz r10,4(r11) addi r10,r10,1 stw r10,4(r11) li r11,-1 mtspr SPRN_DBSR,r11 /* clear all pending debug events */ blr .comm global_dbcr0,8#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */do_work: /* r10 contains MSR_KERNEL here */ andi. r0,r9,_TIF_NEED_RESCHED beq do_user_signaldo_resched: /* r10 contains MSR_KERNEL here */ ori r10,r10,MSR_EE SYNC MTMSRD(r10) /* hard-enable interrupts */ bl schedulerecheck: LOAD_MSR_KERNEL(r10,MSR_KERNEL) SYNC MTMSRD(r10) /* disable interrupts */ rlwinm r9,r1,0,0,(31-THREAD_SHIFT) lwz r9,TI_FLAGS(r9) andi. r0,r9,_TIF_NEED_RESCHED bne- do_resched andi. r0,r9,_TIF_SIGPENDING beq restore_userdo_user_signal: /* r10 contains MSR_KERNEL here */ ori r10,r10,MSR_EE SYNC MTMSRD(r10) /* hard-enable interrupts */ /* save r13-r31 in the exception frame, if not already done */ lwz r3,_TRAP(r1) andi. r0,r3,1 beq 2f SAVE_NVGPRS(r1) rlwinm r3,r3,0,0,30 stw r3,_TRAP(r1)2: li r3,0 addi r4,r1,STACK_FRAME_OVERHEAD bl do_signal REST_NVGPRS(r1) b recheck/* * We come here when we are at the end of handling an exception * that occurred at a place where taking an exception will lose * state information, such as the contents of SRR0 and SRR1. */nonrecoverable: lis r10,exc_exit_restart_end@ha addi r10,r10,exc_exit_restart_end@l cmplw r12,r10 bge 3f lis r11,exc_exit_restart@ha addi r11,r11,exc_exit_restart@l cmplw r12,r11 blt 3f lis r10,ee_restarts@ha lwz r12,ee_restarts@l(r10) addi r12,r12,1 stw r12,ee_restarts@l(r10) mr r12,r11 /* restart at exc_exit_restart */ blr3: /* OK, we can't recover, kill this process */ /* but the 601 doesn't implement the RI bit, so assume it's OK */BEGIN_FTR_SECTION blrEND_FTR_SECTION_IFSET(CPU_FTR_601) lwz r3,_TRAP(r1) andi. r0,r3,1 beq 4f SAVE_NVGPRS(r1) rlwinm r3,r3,0,0,30 stw r3,_TRAP(r1)4: addi r3,r1,STACK_FRAME_OVERHEAD bl nonrecoverable_exception /* shouldn't return */ b 4b .comm ee_restarts,4/* * PROM code for specific machines follows. Put it * here so it's easy to add arch-specific sections later. * -- Cort */#ifdef CONFIG_PPC_RTAS/* * On CHRP, the Run-Time Abstraction Services (RTAS) have to be * called with the MMU off. */_GLOBAL(enter_rtas) stwu r1,-INT_FRAME_SIZE(r1) mflr r0 stw r0,INT_FRAME_SIZE+4(r1) LOADADDR(r4, rtas) lis r6,1f@ha /* physical return address for rtas */ addi r6,r6,1f@l tophys(r6,r6) tophys(r7,r1) lwz r8,RTASENTRY(r4) lwz r4,RTASBASE(r4) mfmsr r9 stw r9,8(r1) LOAD_MSR_KERNEL(r0,MSR_KERNEL) SYNC /* disable interrupts so SRR0/1 */ MTMSRD(r0) /* don't get trashed */ li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR) mtlr r6 mtspr SPRN_SPRG2,r7 mtspr SPRN_SRR0,r8 mtspr SPRN_SRR1,r9 RFI1: tophys(r9,r1) lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */ lwz r9,8(r9) /* original msr value */ FIX_SRR1(r9,r0) addi r1,r1,INT_FRAME_SIZE li r0,0 mtspr SPRN_SPRG2,r0 mtspr SPRN_SRR0,r8 mtspr SPRN_SRR1,r9 RFI /* return to caller */ .globl machine_check_in_rtasmachine_check_in_rtas: twi 31,0,0 /* XXX load up BATs and panic */#endif /* CONFIG_PPC_RTAS */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -