📄 head.s
字号:
add r8,r9,r10 mfspr r9,SIAR std r9,0(r8) mfspr r9,SDAR std r9,8(r8) addi r10,r7,PACAPMC1 addi r7,r7,PACAPMCC1 b 7f /* PMC Trace User */9: LOADADDR(r11, perfmon_base)#if 0 addi r8,r11,32 ld r12,24(r11) subi r12,r12,18: ldarx r10,0,r8 addi r9,r10,16 and r9,r9,r12 stdcx. r9,0,r8 bne- 8b ld r9,16(r11) /* profile buffer */ add r8,r9,r10 mfspr r9,SIAR std r9,0(r8) mfspr r9,SDAR std r9,8(r8) addi r10,r13,THREAD+THREAD_PMC1 addi r7,r13,THREAD+THREAD_PMCC1#endif b 7f /* PMC Timeslice */10: addi r10,r7,PACAPMC1 addi r7,r7,PACAPMCC1 b 7f /* Accumulate counter values for kernel traces */7: ld r9,0(r7) mfspr r8,PMC1 add r9,r9,r8 std r9,0(r7) ld r9,8(r7) mfspr r8,PMC2 add r9,r9,r8 std r9,8(r7) ld r9,16(r7) mfspr r8,PMC3 add r9,r9,r8 std r9,16(r7) ld r9,24(r7) mfspr r8,PMC4 add r9,r9,r8 std r9,24(r7) ld r9,32(r7) mfspr r8,PMC5 add r9,r9,r8 std r9,32(r7) ld r9,40(r7) mfspr r8,PMC6 add r9,r9,r8 std r9,40(r7) ld r9,48(r7) mfspr r8,PMC7 add r9,r9,r8 std r9,48(r7) ld r9,56(r7) mfspr r8,PMC8 add r9,r9,r8 std r9,56(r7) /* Reset all counters for kernel traces */ ld r9,0(r10) mtspr PMC1,r9 ld r9,8(r10) mtspr PMC2,r9 ld r9,16(r10) mtspr PMC3,r9 ld r9,24(r10) mtspr PMC4,r9 ld r9,32(r10) mtspr PMC5,r9 ld r9,40(r10) mtspr PMC6,r9 ld r9,48(r10) mtspr PMC7,r9 ld r9,56(r10) mtspr PMC8,r9 ld r9,64(r10) mtspr MMCR0,r9 ld r9,72(r10) mtspr MMCR1,r9 ld r9,80(r10) mtspr MMCRA,r9 blr_GLOBAL(do_hash_page_ISI) li r4,0_GLOBAL(do_hash_page_DSI) rlwimi r4,r23,32-13,30,30 /* Insert MSR_PR as _PAGE_USER */ ori r4,r4,1 /* add _PAGE_PRESENT */ mflr r21 /* Save LR in r21 */#ifdef DO_SOFT_DISABLE /* * We hard enable here (but first soft disable) so that the hash_page * code can spin on the hash_table_lock with problem on a shared * processor. */ li r0,0 stb r0,PACAPROCENABLED(r20) /* Soft Disabled */ mfmsr r0 ori r0,r0,MSR_EE+MSR_RI mtmsrd r0 /* Hard Enable, RI on */#endif /* * r3 contains the faulting address * r4 contains the required access permissions * r5 contains the trap number * * at return r3 = 0 for success */ bl .hash_page /* build HPTE if possible */#ifdef DO_SOFT_DISABLE /* * Now go back to hard disabled. */ mfmsr r0 li r4,0 ori r4,r4,MSR_EE+MSR_RI andc r0,r0,r4 mtmsrd r0 /* Hard Disable, RI off */ ld r0,SOFTE(r1) cmpdi 0,r0,0 /* See if we will soft enable in */ /* save_remaining_regs */ beq 5f CHECKANYINT(r4,r5) bne- HardwareInterrupt_entry /* Convert this DSI into an External */ /* to process interrupts which occurred */ /* during hash_page */5: stb r0,PACAPROCENABLED(r20) /* Restore soft enable/disable status */#endif or. r3,r3,r3 /* Check return code */ beq fast_exception_return /* Return from exception on success */ mtlr r21 /* restore LR */ blr /* Return to DSI or ISI on failure *//* * r20 points to the PACA, r21 to the exception frame, * r23 contains the saved CR. * r20 - r23, SRR0 and SRR1 are saved in the exception frame. * We assume we aren't going to take any exceptions during this procedure. */_GLOBAL(do_stab_bolted) stw r23,EX_CCR(r21) /* save CR in exc. frame */ /* (((ea >> 28) & 0x1fff) << 15) | (ea >> 60) */ mfspr r21,DAR rldicl r20,r21,36,32 /* Permits a full 32b of ESID */ rldicr r20,r20,15,48 rldicl r21,r21,4,60 or r20,r20,r21 li r21,9 /* VSID_RANDOMIZER */ sldi r21,r21,32 oris r21,r21,58231 ori r21,r21,39831 mulld r20,r20,r21 clrldi r20,r20,28 /* r20 = vsid */ mfsprg r21,3 ld r21,PACASTABVIRT(r21) /* Hash to the primary group */ mfspr r22,DAR rldicl r22,r22,36,59 rldicr r22,r22,7,56 or r21,r21,r22 /* r21 = first ste of the group */ /* Search the primary group for a free entry */ li r22,01: ld r23,0(r21) /* Test valid bit of the current ste */ rldicl r23,r23,57,63 cmpwi r23,0 bne 2f ld r23,8(r21) /* Get the current vsid part of the ste */ rldimi r23,r20,12,0 /* Insert the new vsid value */ std r23,8(r21) /* Put new entry back into the stab */ eieio /* Order vsid update */ ld r23,0(r21) /* Get the esid part of the ste */ mfspr r20,DAR /* Get the new esid */ rldicl r20,r20,36,28 /* Permits a full 36b of ESID */ rldimi r23,r20,28,0 /* Insert the new esid value */ ori r23,r23,144 /* Turn on valid and kp */ std r23,0(r21) /* Put new entry back into the stab */ sync /* Order the update */ b 3f2: addi r22,r22,1 addi r21,r21,16 cmpldi r22,7 ble 1b /* Stick for only searching the primary group for now. */ /* At least for now, we use a very simple random castout scheme */ /* Use the TB as a random number ; OR in 1 to avoid entry 0 */ mftb r22 andi. r22,r22,7 ori r22,r22,1 sldi r22,r22,4 /* r21 currently points to and ste one past the group of interest */ /* make it point to the randomly selected entry */ subi r21,r21,128 or r21,r21,r22 /* r21 is the entry to invalidate */ isync /* mark the entry invalid */ ld r23,0(r21) li r22,-129 and r23,r23,r22 std r23,0(r21) sync ld r23,8(r21) rldimi r23,r20,12,0 std r23,8(r21) eieio ld r23,0(r21) /* Get the esid part of the ste */ mr r22,r23 mfspr r20,DAR /* Get the new esid */ rldicl r20,r20,36,28 /* Permits a full 32b of ESID */ rldimi r23,r20,28,0 /* Insert the new esid value */ ori r23,r23,144 /* Turn on valid and kp */ std r23,0(r21) /* Put new entry back into the stab */ rldicl r22,r22,36,28 rldicr r22,r22,28,35 slbie r22 sync3: /* All done -- return from exception. */ mfsprg r20,3 /* Load the PACA pointer */ ld r21,PACAEXCSP(r20) /* Get the exception frame pointer */ addi r21,r21,EXC_FRAME_SIZE lwz r23,EX_CCR(r21) /* get saved CR */ /* note that this is almost identical to maskable_exception_exit */ mtcr r23 /* restore CR */ mfmsr r22 li r23, MSR_RI andc r22,r22,r23 mtmsrd r22,1 ld r22,EX_SRR0(r21) /* Get SRR0 from exc. frame */ ld r23,EX_SRR1(r21) /* Get SRR1 from exc. frame */ mtspr SRR0,r22 mtspr SRR1,r23 ld r22,EX_R22(r21) /* restore r22 and r23 */ ld r23,EX_R23(r21) mfspr r20,SPRG2 mfspr r21,SPRG1 rfid_TRACEBACK(do_stab_bolted)/* * r20 points to the PACA, r21 to the exception frame, * r23 contains the saved CR. * r20 - r23, SRR0 and SRR1 are saved in the exception frame. * We assume we aren't going to take any exceptions during this procedure. */_GLOBAL(do_slb_bolted) stw r23,EX_CCR(r21) /* save CR in exc. frame */ /* (((ea >> 28) & 0x1fff) << 15) | (ea >> 60) */ mfspr r21,DAR rldicl r20,r21,36,32 /* Permits a full 32b of ESID */ rldicr r20,r20,15,48 rldicl r21,r21,4,60 or r20,r20,r21 li r21,9 /* VSID_RANDOMIZER */ sldi r21,r21,32 oris r21,r21,58231 ori r21,r21,39831 mulld r20,r20,r21 clrldi r20,r20,28 /* r20 = vsid */ /* Search the SLB for a free entry */ li r22,11: slbmfee r23,r22 rldicl r23,r23,37,63 cmpwi r23,0 beq 4f /* Found an invalid entry */ addi r22,r22,1 cmpldi r22,64 blt 1b /* No free entry - just take the next entry, round-robin */ /* XXX we should get the number of SLB entries from the naca */SLB_NUM_ENTRIES = 642: mfspr r21,SPRG3 ld r22,PACASTABRR(r21) addi r23,r22,1 cmpdi r23,SLB_NUM_ENTRIES blt 3f li r23,13: std r23,PACASTABRR(r21) /* r20 = vsid, r22 = entry */ /* * Never cast out the segment for our kernel stack. Since we * dont invalidate the ERAT we could have a valid translation * for the kernel stack during the first part of exception exit * which gets invalidated due to a tlbie from another cpu at a * non recoverable point (after setting srr0/1) - Anton */ slbmfee r23,r22 srdi r23,r23,28 /* * This is incorrect (r1 is not the kernel stack) if we entered * from userspace but there is no critical window from userspace * so this should be OK. Also if we cast out the userspace stack * segment while in userspace we will fault it straight back in. */ srdi r21,r1,28 cmpd r21,r23 beq- 2b 4: /* Put together the vsid portion of the entry. */ li r21,0 rldimi r21,r20,12,0 ori r20,r21,1024#ifndef CONFIG_PPC_ISERIES ori r20,r20,256 /* map kernel region with large ptes */#endif /* Invalidate the old entry */ slbmfee r21,r22 lis r23,-2049 ori r23,r23,65535 and r21,r21,r23 slbie r21 /* Put together the esid portion of the entry. */ mfspr r21,DAR /* Get the new esid */ rldicl r21,r21,36,28 /* Permits a full 36b of ESID */ li r23,0 rldimi r23,r21,28,0 /* Insert esid */ oris r21,r23,2048 /* valid bit */ rldimi r21,r22,0,52 /* Insert entry */ isync slbmte r20,r21 isync /* All done -- return from exception. */ mfsprg r20,3 /* Load the PACA pointer */ ld r21,PACAEXCSP(r20) /* Get the exception frame pointer */ addi r21,r21,EXC_FRAME_SIZE lwz r23,EX_CCR(r21) /* get saved CR */ /* note that this is almost identical to maskable_exception_exit */ mtcr r23 /* restore CR */ mfmsr r22 li r23, MSR_RI andc r22,r22,r23 mtmsrd r22,1 ld r22,EX_SRR0(r21) /* Get SRR0 from exc. frame */ ld r23,EX_SRR1(r21) /* Get SRR1 from exc. frame */ mtspr SRR0,r22 mtspr SRR1,r23 ld r22,EX_R22(r21) /* restore r22 and r23 */ ld r23,EX_R23(r21) mfspr r20,SPRG2 mfspr r21,SPRG1 rfid_TRACEBACK(do_slb_bolted)_GLOBAL(do_stab_SI) mflr r21 /* Save LR in r21 */ /* * r3 contains the faulting address * r4 contains the required access permissions * * at return r3 = 0 for success */ bl .ste_allocate /* build STE if possible */ or. r3,r3,r3 /* Check return code */ beq fast_exception_return /* Return from exception on success */ mtlr r21 /* restore LR */ blr /* Return to DSI or ISI on failure *//* * This code finishes saving the registers to the exception frame. * Address translation is already on. */_GLOBAL(save_remaining_regs) /* * Save the rest of the registers into the pt_regs structure */ std r22,_NIP(r1) std r23,_MSR(r1) std r6,TRAP(r1) ld r6,GPR6(r1) SAVE_2GPRS(14, r1) SAVE_4GPRS(16, r1) SAVE_8GPRS(24, r1) /* * Clear the RESULT field */ li r22,0 std r22,RESULT(r1) /* * Test if from user state; result will be tested later */ andi. r23,r23,MSR_PR /* Set CR for later branch */ /* * Indicate that r1 contains the kernel stack and * get the Kernel TOC and CURRENT pointers from the paca */ std r22,PACAKSAVE(r13) /* r1 is now kernel sp */ ld r2,PACATOC(r13) /* Get Kernel TOC pointer */ /* * If from user state, update THREAD.regs */ beq 2f /* Modify THREAD.regs if from user */ addi r24,r1,STACK_FRAME_OVERHEAD ld r22,PACACURRENT(r13) std r24,THREAD+PT_REGS(r22)#ifdef CONFIG_ALTIVECBEGIN_FTR_SECTION mfspr r24,SPRN_VRSAVE /* if save vrsave register value */ std r24,THREAD+THREAD_VRSAVE(r22)END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)#endif /* CONFIG_ALTIVEC */2: SET_REG_TO_CONST(r22, MSR_KERNEL)#ifdef DO_SOFT_DISABLE stb r20,PACAPROCENABLED(r13) /* possibly soft enable */ ori r22,r22,MSR_EE /* always hard enable */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -