📄 entry_64.s
字号:
/* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE because we don't need to leave the 288-byte ABI gap at the top of the kernel stack. */ addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE mr r1,r8 /* start using new stack pointer */ std r7,PACAKSAVE(r13) ld r6,_CCR(r1) mtcrf 0xFF,r6#ifdef CONFIG_ALTIVECBEGIN_FTR_SECTION ld r0,THREAD_VRSAVE(r4) mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)#endif /* CONFIG_ALTIVEC */ /* r3-r13 are destroyed -- Cort */ REST_8GPRS(14, r1) REST_10GPRS(22, r1) /* convert old thread to its task_struct for return value */ addi r3,r3,-THREAD ld r7,_NIP(r1) /* Return to _switch caller in new task */ mtlr r7 addi r1,r1,SWITCH_FRAME_SIZE blr .align 7_GLOBAL(ret_from_except) ld r11,_TRAP(r1) andi. r0,r11,1 bne .ret_from_except_lite REST_NVGPRS(r1)_GLOBAL(ret_from_except_lite) /* * Disable interrupts so that current_thread_info()->flags * can't change between when we test it and when we return * from the interrupt. */ mfmsr r10 /* Get current interrupt state */ rldicl r9,r10,48,1 /* clear MSR_EE */ rotldi r9,r9,16 mtmsrd r9,1 /* Update machine state */#ifdef CONFIG_PREEMPT clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */ li r0,_TIF_NEED_RESCHED /* bits to check */ ld r3,_MSR(r1) ld r4,TI_FLAGS(r9) /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */ rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */ bne do_work#else /* !CONFIG_PREEMPT */ ld r3,_MSR(r1) /* Returning to user mode? */ andi. r3,r3,MSR_PR beq restore /* if not, just restore regs and return */ /* Check current_thread_info()->flags */ clrrdi r9,r1,THREAD_SHIFT ld r4,TI_FLAGS(r9) andi. r0,r4,_TIF_USER_WORK_MASK bne do_work#endifrestore:#ifdef CONFIG_PPC_ISERIES ld r5,SOFTE(r1) cmpdi 0,r5,0 beq 4f /* Check for pending interrupts (iSeries) */ ld r3,PACALPPACA+LPPACAANYINT(r13) cmpdi r3,0 beq+ 4f /* skip do_IRQ if no interrupts */ li r3,0 stb r3,PACAPROCENABLED(r13) /* ensure we are soft-disabled */ ori r10,r10,MSR_EE mtmsrd r10 /* hard-enable again */ addi r3,r1,STACK_FRAME_OVERHEAD bl .do_IRQ b .ret_from_except_lite /* loop back and handle more */4: stb r5,PACAPROCENABLED(r13)#endif ld r3,_MSR(r1) andi. r0,r3,MSR_RI beq- unrecov_restore andi. r0,r3,MSR_PR /* * r13 is our per cpu area, only restore it if we are returning to * userspace */ beq 1f REST_GPR(13, r1)1: ld r3,_CTR(r1) ld r0,_LINK(r1) mtctr r3 mtlr r0 ld r3,_XER(r1) mtspr SPRN_XER,r3 REST_8GPRS(5, r1) stdcx. r0,0,r1 /* to clear the reservation */ mfmsr r0 li r2, MSR_RI andc r0,r0,r2 mtmsrd r0,1 ld r0,_MSR(r1) mtspr SPRN_SRR1,r0 ld r2,_CCR(r1) mtcrf 0xFF,r2 ld r2,_NIP(r1) mtspr SPRN_SRR0,r2 ld r0,GPR0(r1) ld r2,GPR2(r1) ld r3,GPR3(r1) ld r4,GPR4(r1) ld r1,GPR1(r1) rfid b . /* prevent speculative execution *//* Note: this must change if we start using the TIF_NOTIFY_RESUME bit */do_work:#ifdef CONFIG_PREEMPT andi. r0,r3,MSR_PR /* Returning to user mode? */ bne user_work /* Check that preempt_count() == 0 and interrupts are enabled */ lwz r8,TI_PREEMPT(r9) cmpwi cr1,r8,0#ifdef CONFIG_PPC_ISERIES ld r0,SOFTE(r1) cmpdi r0,0#else andi. r0,r3,MSR_EE#endif crandc eq,cr1*4+eq,eq bne restore /* here we are preempting the current task */1:#ifdef CONFIG_PPC_ISERIES li r0,1 stb r0,PACAPROCENABLED(r13)#endif ori r10,r10,MSR_EE mtmsrd r10,1 /* reenable interrupts */ bl .preempt_schedule mfmsr r10 clrrdi r9,r1,THREAD_SHIFT rldicl r10,r10,48,1 /* disable interrupts again */ rotldi r10,r10,16 mtmsrd r10,1 ld r4,TI_FLAGS(r9) andi. r0,r4,_TIF_NEED_RESCHED bne 1b b restoreuser_work:#endif /* Enable interrupts */ ori r10,r10,MSR_EE mtmsrd r10,1 andi. r0,r4,_TIF_NEED_RESCHED beq 1f bl .schedule b .ret_from_except_lite1: bl .save_nvgprs li r3,0 addi r4,r1,STACK_FRAME_OVERHEAD bl .do_signal b .ret_from_exceptunrecov_restore: addi r3,r1,STACK_FRAME_OVERHEAD bl .unrecoverable_exception b unrecov_restore#ifdef CONFIG_PPC_RTAS/* * On CHRP, the Run-Time Abstraction Services (RTAS) have to be * called with the MMU off. * * In addition, we need to be in 32b mode, at least for now. * * Note: r3 is an input parameter to rtas, so don't trash it... */_GLOBAL(enter_rtas) mflr r0 std r0,16(r1) stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */ /* Because RTAS is running in 32b mode, it clobbers the high order half * of all registers that it saves. We therefore save those registers * RTAS might touch to the stack. (r0, r3-r13 are caller saved) */ SAVE_GPR(2, r1) /* Save the TOC */ SAVE_GPR(13, r1) /* Save paca */ SAVE_8GPRS(14, r1) /* Save the non-volatiles */ SAVE_10GPRS(22, r1) /* ditto */ mfcr r4 std r4,_CCR(r1) mfctr r5 std r5,_CTR(r1) mfspr r6,SPRN_XER std r6,_XER(r1) mfdar r7 std r7,_DAR(r1) mfdsisr r8 std r8,_DSISR(r1) mfsrr0 r9 std r9,_SRR0(r1) mfsrr1 r10 std r10,_SRR1(r1) /* There is no way it is acceptable to get here with interrupts enabled, * check it with the asm equivalent of WARN_ON */ mfmsr r6 andi. r0,r6,MSR_EE1: tdnei r0,0.section __bug_table,"a" .llong 1b,__LINE__ + 0x1000000, 1f, 2f.previous.section .rodata,"a"1: .asciz __FILE__2: .asciz "enter_rtas".previous /* Unfortunately, the stack pointer and the MSR are also clobbered, * so they are saved in the PACA which allows us to restore * our original state after RTAS returns. */ std r1,PACAR1(r13) std r6,PACASAVEDMSR(r13) /* Setup our real return addr */ SET_REG_TO_LABEL(r4,.rtas_return_loc) SET_REG_TO_CONST(r9,KERNELBASE) sub r4,r4,r9 mtlr r4 li r0,0 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI andc r0,r6,r0 li r9,1 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG) ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP andc r6,r0,r9 ori r6,r6,MSR_RI sync /* disable interrupts so SRR0/1 */ mtmsrd r0 /* don't get trashed */ SET_REG_TO_LABEL(r4,rtas) ld r5,RTASENTRY(r4) /* get the rtas->entry value */ ld r4,RTASBASE(r4) /* get the rtas->base value */ mtspr SPRN_SRR0,r5 mtspr SPRN_SRR1,r6 rfid b . /* prevent speculative execution */_STATIC(rtas_return_loc) /* relocation is off at this point */ mfspr r4,SPRN_SPRG3 /* Get PACA */ SET_REG_TO_CONST(r5, KERNELBASE) sub r4,r4,r5 /* RELOC the PACA base pointer */ mfmsr r6 li r0,MSR_RI andc r6,r6,r0 sync mtmsrd r6 ld r1,PACAR1(r4) /* Restore our SP */ LOADADDR(r3,.rtas_restore_regs) ld r4,PACASAVEDMSR(r4) /* Restore our MSR */ mtspr SPRN_SRR0,r3 mtspr SPRN_SRR1,r4 rfid b . /* prevent speculative execution */_STATIC(rtas_restore_regs) /* relocation is on at this point */ REST_GPR(2, r1) /* Restore the TOC */ REST_GPR(13, r1) /* Restore paca */ REST_8GPRS(14, r1) /* Restore the non-volatiles */ REST_10GPRS(22, r1) /* ditto */ mfspr r13,SPRN_SPRG3 ld r4,_CCR(r1) mtcr r4 ld r5,_CTR(r1) mtctr r5 ld r6,_XER(r1) mtspr SPRN_XER,r6 ld r7,_DAR(r1) mtdar r7 ld r8,_DSISR(r1) mtdsisr r8 ld r9,_SRR0(r1) mtsrr0 r9 ld r10,_SRR1(r1) mtsrr1 r10 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */ ld r0,16(r1) /* get return address */ mtlr r0 blr /* return to caller */#endif /* CONFIG_PPC_RTAS */#ifdef CONFIG_PPC_MULTIPLATFORM_GLOBAL(enter_prom) mflr r0 std r0,16(r1) stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */ /* Because PROM is running in 32b mode, it clobbers the high order half * of all registers that it saves. We therefore save those registers * PROM might touch to the stack. (r0, r3-r13 are caller saved) */ SAVE_8GPRS(2, r1) SAVE_GPR(13, r1) SAVE_8GPRS(14, r1) SAVE_10GPRS(22, r1) mfcr r4 std r4,_CCR(r1) mfctr r5 std r5,_CTR(r1) mfspr r6,SPRN_XER std r6,_XER(r1) mfdar r7 std r7,_DAR(r1) mfdsisr r8 std r8,_DSISR(r1) mfsrr0 r9 std r9,_SRR0(r1) mfsrr1 r10 std r10,_SRR1(r1) mfmsr r11 std r11,_MSR(r1) /* Get the PROM entrypoint */ ld r0,GPR4(r1) mtlr r0 /* Switch MSR to 32 bits mode */ mfmsr r11 li r12,1 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG) andc r11,r11,r12 li r12,1 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG) andc r11,r11,r12 mtmsrd r11 isync /* Restore arguments & enter PROM here... */ ld r3,GPR3(r1) blrl /* Just make sure that r1 top 32 bits didn't get * corrupt by OF */ rldicl r1,r1,0,32 /* Restore the MSR (back to 64 bits) */ ld r0,_MSR(r1) mtmsrd r0 isync /* Restore other registers */ REST_GPR(2, r1) REST_GPR(13, r1) REST_8GPRS(14, r1) REST_10GPRS(22, r1) ld r4,_CCR(r1) mtcr r4 ld r5,_CTR(r1) mtctr r5 ld r6,_XER(r1) mtspr SPRN_XER,r6 ld r7,_DAR(r1) mtdar r7 ld r8,_DSISR(r1) mtdsisr r8 ld r9,_SRR0(r1) mtsrr0 r9 ld r10,_SRR1(r1) mtsrr1 r10 addi r1,r1,PROM_FRAME_SIZE ld r0,16(r1) mtlr r0 blr #endif /* CONFIG_PPC_MULTIPLATFORM */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -