head.s
来自「Linux Kernel 2.6.9 for OMAP1710」· S 代码 · 共 1,711 行 · 第 1/3 页
S
1,711 行
rlwinm. r2,r2,0,31,31 /* Check for little endian access */ rlwimi r2,r2,1,30,30 /* change 1 -> 3 */ xor r1,r1,r2 mtspr DAR,r1 /* Set fault address */ mfmsr r0 /* Restore "normal" registers */ xoris r0,r0,MSR_TGPR>>16 mtcrf 0x80,r3 /* Restore CR0 */ mtmsr r0 b InstructionAccess/* * Handle TLB miss for DATA Load operation on 603/603e */ . = 0x1100DataLoadTLBMiss:/* * r0: stored ctr * r1: linux style pte ( later becomes ppc hardware pte ) * r2: ptr to linux-style pte * r3: scratch */ mfctr r0 /* Get PTE (linux-style) and check access */ mfspr r3,DMISS lis r1,KERNELBASE@h /* check if kernel address */ cmplw 0,r3,r1 mfspr r2,SPRG3 li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */ lwz r2,PGDIR(r2) blt+ 112f lis r2,swapper_pg_dir@ha /* if kernel address, use */ addi r2,r2,swapper_pg_dir@l /* kernel page table */ mfspr r1,SRR1 /* and MSR_PR bit from SRR1 */ rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */112: tophys(r2,r2) rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ lwz r2,0(r2) /* get pmd entry */ rlwinm. r2,r2,0,0,19 /* extract address of pte page */ beq- DataAddressInvalid /* return if no mapping */ rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */ lwz r3,0(r2) /* get linux-style pte */ andc. r1,r1,r3 /* check access & ~permission */ bne- DataAddressInvalid /* return if access not permitted */ ori r3,r3,_PAGE_ACCESSED /* set _PAGE_ACCESSED in pte */ /* * NOTE! We are assuming this is not an SMP system, otherwise * we would need to update the pte atomically with lwarx/stwcx. */ stw r3,0(r2) /* update PTE (accessed bit) */ /* Convert linux-style PTE to low word of PPC-style PTE */ rlwinm r1,r3,32-10,31,31 /* _PAGE_RW -> PP lsb */ rlwinm r2,r3,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */ and r1,r1,r2 /* writable if _RW and _DIRTY */ rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */ rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */ ori r1,r1,0xe14 /* clear out reserved bits and M */ andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */ mtspr SPRN_RPA,r1 mfspr r3,DMISS tlbld r3 mfspr r3,SRR1 /* Need to restore CR0 */ mtcrf 0x80,r3 rfiDataAddressInvalid: mfspr r3,SRR1 rlwinm r1,r3,9,6,6 /* Get load/store bit */ addis r1,r1,0x2000 mtspr DSISR,r1 mtctr r0 /* Restore CTR */ andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */ mtspr SRR1,r2 mfspr r1,DMISS /* Get failing address */ rlwinm. r2,r2,0,31,31 /* Check for little endian access */ beq 20f /* Jump if big endian */ xori r1,r1,320: mtspr DAR,r1 /* Set fault address */ mfmsr r0 /* Restore "normal" registers */ xoris r0,r0,MSR_TGPR>>16 mtcrf 0x80,r3 /* Restore CR0 */ mtmsr r0 b DataAccess/* * Handle TLB miss for DATA Store on 603/603e */ . = 0x1200DataStoreTLBMiss:/* * r0: stored ctr * r1: linux style pte ( later becomes ppc hardware pte ) * r2: ptr to linux-style pte * r3: scratch */ mfctr r0 /* Get PTE (linux-style) and check access */ mfspr r3,DMISS lis r1,KERNELBASE@h /* check if kernel address */ cmplw 0,r3,r1 mfspr r2,SPRG3 li r1,_PAGE_RW|_PAGE_USER|_PAGE_PRESENT /* access flags */ lwz r2,PGDIR(r2) blt+ 112f lis r2,swapper_pg_dir@ha /* if kernel address, use */ addi r2,r2,swapper_pg_dir@l /* kernel page table */ mfspr r1,SRR1 /* and MSR_PR bit from SRR1 */ rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */112: tophys(r2,r2) rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ lwz r2,0(r2) /* get pmd entry */ rlwinm. r2,r2,0,0,19 /* extract address of pte page */ beq- DataAddressInvalid /* return if no mapping */ rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */ lwz r3,0(r2) /* get linux-style pte */ andc. r1,r1,r3 /* check access & ~permission */ bne- DataAddressInvalid /* return if access not permitted */ ori r3,r3,_PAGE_ACCESSED|_PAGE_DIRTY /* * NOTE! We are assuming this is not an SMP system, otherwise * we would need to update the pte atomically with lwarx/stwcx. */ stw r3,0(r2) /* update PTE (accessed/dirty bits) */ /* Convert linux-style PTE to low word of PPC-style PTE */ rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */ li r1,0xe15 /* clear out reserved bits and M */ andc r1,r3,r1 /* PP = user? 2: 0 */ mtspr SPRN_RPA,r1 mfspr r3,DMISS tlbld r3 mfspr r3,SRR1 /* Need to restore CR0 */ mtcrf 0x80,r3 rfi#ifndef CONFIG_ALTIVEC#define AltivecAssistException UnknownException#endif EXCEPTION(0x1300, Trap_13, InstructionBreakpoint, EXC_XFER_EE) EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_EE) EXCEPTION(0x1500, Trap_15, UnknownException, EXC_XFER_EE)#ifdef CONFIG_POWER4 EXCEPTION(0x1600, Trap_16, UnknownException, EXC_XFER_EE) EXCEPTION(0x1700, Trap_17, AltivecAssistException, EXC_XFER_EE) EXCEPTION(0x1800, Trap_18, TAUException, EXC_XFER_STD)#else /* !CONFIG_POWER4 */ EXCEPTION(0x1600, Trap_16, AltivecAssistException, EXC_XFER_EE) EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD) EXCEPTION(0x1800, Trap_18, UnknownException, EXC_XFER_EE)#endif /* CONFIG_POWER4 */ EXCEPTION(0x1900, Trap_19, UnknownException, EXC_XFER_EE) EXCEPTION(0x1a00, Trap_1a, UnknownException, EXC_XFER_EE) EXCEPTION(0x1b00, Trap_1b, UnknownException, EXC_XFER_EE) EXCEPTION(0x1c00, Trap_1c, UnknownException, EXC_XFER_EE) EXCEPTION(0x1d00, Trap_1d, UnknownException, EXC_XFER_EE) EXCEPTION(0x1e00, Trap_1e, UnknownException, EXC_XFER_EE) EXCEPTION(0x1f00, Trap_1f, UnknownException, EXC_XFER_EE) EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_EE) EXCEPTION(0x2100, Trap_21, UnknownException, EXC_XFER_EE) EXCEPTION(0x2200, Trap_22, UnknownException, EXC_XFER_EE) EXCEPTION(0x2300, Trap_23, UnknownException, EXC_XFER_EE) EXCEPTION(0x2400, Trap_24, UnknownException, EXC_XFER_EE) EXCEPTION(0x2500, Trap_25, UnknownException, EXC_XFER_EE) EXCEPTION(0x2600, Trap_26, UnknownException, EXC_XFER_EE) EXCEPTION(0x2700, Trap_27, UnknownException, EXC_XFER_EE) EXCEPTION(0x2800, Trap_28, UnknownException, EXC_XFER_EE) EXCEPTION(0x2900, Trap_29, UnknownException, EXC_XFER_EE) EXCEPTION(0x2a00, Trap_2a, UnknownException, EXC_XFER_EE) EXCEPTION(0x2b00, Trap_2b, UnknownException, EXC_XFER_EE) EXCEPTION(0x2c00, Trap_2c, UnknownException, EXC_XFER_EE) EXCEPTION(0x2d00, Trap_2d, UnknownException, EXC_XFER_EE) EXCEPTION(0x2e00, Trap_2e, UnknownException, EXC_XFER_EE) EXCEPTION(0x2f00, MOLTrampoline, UnknownException, EXC_XFER_EE_LITE) .globl mol_trampoline .set mol_trampoline, i0x2f00 . = 0x3000AltiVecUnavailable: EXCEPTION_PROLOG#ifdef CONFIG_ALTIVEC bne load_up_altivec /* if from user, just load it up */#endif /* CONFIG_ALTIVEC */ EXC_XFER_EE_LITE(0xf20, AltivecUnavailException)#ifdef CONFIG_PPC64BRIDGEDataAccess: EXCEPTION_PROLOG b DataAccessContInstructionAccess: EXCEPTION_PROLOG b InstructionAccessContDataSegment: EXCEPTION_PROLOG addi r3,r1,STACK_FRAME_OVERHEAD mfspr r4,DAR stw r4,_DAR(r11) EXC_XFER_STD(0x380, UnknownException)InstructionSegment: EXCEPTION_PROLOG addi r3,r1,STACK_FRAME_OVERHEAD EXC_XFER_STD(0x480, UnknownException)#endif /* CONFIG_PPC64BRIDGE *//* * This task wants to use the FPU now. * On UP, disable FP for the task which had the FPU previously, * and save its floating-point registers in its thread_struct. * Load up this task's FP registers from its thread_struct, * enable the FPU for the current task and return to the task. */load_up_fpu: mfmsr r5 ori r5,r5,MSR_FP#ifdef CONFIG_PPC64BRIDGE clrldi r5,r5,1 /* turn off 64-bit mode */#endif /* CONFIG_PPC64BRIDGE */ SYNC MTMSRD(r5) /* enable use of fpu now */ isync/* * For SMP, we don't do lazy FPU switching because it just gets too * horrendously complex, especially when a task switches from one CPU * to another. Instead we call giveup_fpu in switch_to. */#ifndef CONFIG_SMP tophys(r6,0) /* get __pa constant */ addis r3,r6,last_task_used_math@ha lwz r4,last_task_used_math@l(r3) cmpwi 0,r4,0 beq 1f add r4,r4,r6 addi r4,r4,THREAD /* want last_task_used_math->thread */ SAVE_32FPRS(0, r4) mffs fr0 stfd fr0,THREAD_FPSCR-4(r4) lwz r5,PT_REGS(r4) add r5,r5,r6 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) li r10,MSR_FP|MSR_FE0|MSR_FE1 andc r4,r4,r10 /* disable FP for previous task */ stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)1:#endif /* CONFIG_SMP */ /* enable use of FP after return */ mfspr r5,SPRG3 /* current task's THREAD (phys) */ lwz r4,THREAD_FPEXC_MODE(r5) ori r9,r9,MSR_FP /* enable FP for current */ or r9,r9,r4 lfd fr0,THREAD_FPSCR-4(r5) mtfsf 0xff,fr0 REST_32FPRS(0, r5)#ifndef CONFIG_SMP subi r4,r5,THREAD sub r4,r4,r6 stw r4,last_task_used_math@l(r3)#endif /* CONFIG_SMP */ /* restore registers and return */ /* we haven't used ctr or xer or lr */ /* fall through to fast_exception_return */ .globl fast_exception_returnfast_exception_return: andi. r10,r9,MSR_RI /* check for recoverable interrupt */ beq 1f /* if not, we've got problems */2: REST_4GPRS(3, r11) lwz r10,_CCR(r11) REST_GPR(1, r11) mtcr r10 lwz r10,_LINK(r11) mtlr r10 REST_GPR(10, r11) mtspr SRR1,r9 mtspr SRR0,r12 REST_GPR(9, r11) REST_GPR(12, r11) lwz r11,GPR11(r11) SYNC RFI/* check if the exception happened in a restartable section */1: lis r3,exc_exit_restart_end@ha addi r3,r3,exc_exit_restart_end@l cmplw r12,r3 bge 3f lis r4,exc_exit_restart@ha addi r4,r4,exc_exit_restart@l cmplw r12,r4 blt 3f lis r3,fee_restarts@ha tophys(r3,r3) lwz r5,fee_restarts@l(r3) addi r5,r5,1 stw r5,fee_restarts@l(r3) mr r12,r4 /* restart at exc_exit_restart */ b 2b .comm fee_restarts,4/* aargh, a nonrecoverable interrupt, panic *//* aargh, we don't know which trap this is *//* but the 601 doesn't implement the RI bit, so assume it's OK */3:BEGIN_FTR_SECTION b 2bEND_FTR_SECTION_IFSET(CPU_FTR_601) li r10,-1 stw r10,TRAP(r11) addi r3,r1,STACK_FRAME_OVERHEAD li r10,MSR_KERNEL bl transfer_to_handler_full .long nonrecoverable_exception .long ret_from_except/* * FP unavailable trap from kernel - print a message, but let * the task use FP in the kernel until it returns to user mode. */KernelFP: lwz r3,_MSR(r1) ori r3,r3,MSR_FP stw r3,_MSR(r1) /* enable use of FP after return */ lis r3,86f@h ori r3,r3,86f@l mr r4,r2 /* current */ lwz r5,_NIP(r1) bl printk b ret_from_except86: .string "floating point used in kernel (task=%p, pc=%x)\n" .align 4,0#ifdef CONFIG_ALTIVEC/* Note that the AltiVec support is closely modeled after the FP * support. Changes to one are likely to be applicable to the * other! */load_up_altivec:/* * Disable AltiVec for the task which had AltiVec previously, * and save its AltiVec registers in its thread_struct. * Enables AltiVec for use in the kernel on return. * On SMP we know the AltiVec units are free, since we give it up every * switch. -- Kumar */ mfmsr r5 oris r5,r5,MSR_VEC@h MTMSRD(r5) /* enable use of AltiVec now */ isync/* * For SMP, we don't do lazy AltiVec switching because it just gets too * horrendously complex, especially when a task switches from one CPU * to another. Instead we call giveup_altivec in switch_to. */#ifndef CONFIG_SMP tophys(r6,0) addis r3,r6,last_task_used_altivec@ha lwz r4,last_task_used_altivec@l(r3) cmpwi 0,r4,0 beq 1f add r4,r4,r6 addi r4,r4,THREAD /* want THREAD of last_task_used_altivec */ SAVE_32VR(0,r10,r4) mfvscr vr0 li r10,THREAD_VSCR stvx vr0,r10,r4 lwz r5,PT_REGS(r4) add r5,r5,r6 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) lis r10,MSR_VEC@h andc r4,r4,r10 /* disable altivec for previous task */ stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)1:#endif /* CONFIG_SMP */ /* enable use of AltiVec after return */ oris r9,r9,MSR_VEC@h mfspr r5,SPRG3 /* current task's THREAD (phys) */ li r4,1 li r10,THREAD_VSCR stw r4,THREAD_USED_VR(r5) lvx vr0,r10,r5 mtvscr vr0 REST_32VR(0,r10,r5)#ifndef CONFIG_SMP subi r4,r5,THREAD sub r4,r4,r6 stw r4,last_task_used_altivec@l(r3)#endif /* CONFIG_SMP */ /* restore registers and return */ /* we haven't used ctr or xer or lr */ b fast_exception_return/* * AltiVec unavailable trap from kernel - print a message, but let * the task use AltiVec in the kernel until it returns to user mode. */KernelAltiVec: lwz r3,_MSR(r1) oris r3,r3,MSR_VEC@h stw r3,_MSR(r1) /* enable use of AltiVec after return */ lis r3,87f@h ori r3,r3,87f@l mr r4,r2 /* current */ lwz r5,_NIP(r1) bl printk b ret_from_except87: .string "AltiVec used in kernel (task=%p, pc=%x) \n" .align 4,0/* * giveup_altivec(tsk) * Disable AltiVec for the task given as the argument, * and save the AltiVec registers in its thread_struct. * Enables AltiVec for use in the kernel on return. */ .globl giveup_altivecgiveup_altivec: mfmsr r5 oris r5,r5,MSR_VEC@h SYNC MTMSRD(r5) /* enable use of AltiVec now */ isync cmpwi 0,r3,0 beqlr- /* if no previous owner, done */ addi r3,r3,THREAD /* want THREAD of task */ lwz r5,PT_REGS(r3) cmpwi 0,r5,0 SAVE_32VR(0, r4, r3) mfvscr vr0 li r4,THREAD_VSCR stvx vr0,r4,r3 beq 1f lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) lis r3,MSR_VEC@h andc r4,r4,r3 /* disable AltiVec for previous task */ stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)1:#ifndef CONFIG_SMP li r5,0 lis r4,last_task_used_altivec@ha stw r5,last_task_used_altivec@l(r4)#endif /* CONFIG_SMP */ blr#endif /* CONFIG_ALTIVEC *//* * giveup_fpu(tsk) * Disable FP for the task given as the argument, * and save the floating-point registers in its thread_struct. * Enables the FPU for use in the kernel on return. */ .globl giveup_fpugiveup_fpu: mfmsr r5 ori r5,r5,MSR_FP SYNC_601 ISYNC_601 MTMSRD(r5) /* enable use of fpu now */ SYNC_601 isync cmpwi 0,r3,0 beqlr- /* if no previous owner, done */ addi r3,r3,THREAD /* want THREAD of task */ lwz r5,PT_REGS(r3) cmpwi 0,r5,0 SAVE_32FPRS(0, r3) mffs fr0 stfd fr0,THREAD_FPSCR-4(r3) beq 1f lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) li r3,MSR_FP|MSR_FE0|MSR_FE1 andc r4,r4,r3 /* disable FP for previous task */ stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)1:#ifndef CONFIG_SMP li r5,0 lis r4,last_task_used_math@ha stw r5,last_task_used_math@l(r4)#endif /* CONFIG_SMP */ blr/* * This code is jumped to from the startup code to copy * the kernel image to physical address 0. */relocate_kernel: addis r9,r26,klimit@ha /* fetch klimit */ lwz r25,klimit@l(r9) addis r25,r25,-KERNELBASE@h li r3,0 /* Destination base address */ li r6,0 /* Destination offset */ li r5,0x4000 /* # bytes of memory to copy */ bl copy_and_flush /* copy the first 0x4000 bytes */ addi r0,r3,4f@l /* jump to the address of 4f */ mtctr r0 /* in copy and do the rest. */ bctr /* jump to the copy */4: mr r5,r25 bl copy_and_flush /* copy the rest */ b turn_on_mmu/* * Copy routine used to copy the kernel to start at physical address 0 * and flush and invalidate the caches as needed. * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5. */copy_and_flush: addi r5,r5,-4 addi r6,r6,-44: li r0,L1_CACHE_LINE_SIZE/4 mtctr r03: addi r6,r6,4 /* copy a cache line */ lwzx r0,r6,r4 stwx r0,r6,r3 bdnz 3b dcbst r6,r3 /* write it to memory */ sync icbi r6,r3 /* flush the icache line */ cmplw 0,r6,r5 blt 4b sync /* additional sync needed on g4 */ isync addi r5,r5,4 addi r6,r6,4 blr#ifdef CONFIG_APUS/* * On APUS the physical base address of the kernel is not known at compile * time, which means the __pa/__va constants used are incorrect. In the * __init section is recorded the virtual addresses of instructions using * these constants, so all that has to be done is fix these before * continuing the kernel boot. * * r4 = The physical address of the kernel base. */fix_mem_constants: mr r10,r4 addis r10,r10,-KERNELBASE@h /* virt_to_phys constant */ neg r11,r10 /* phys_to_virt constant */ lis r12,__vtop_table_begin@h ori r12,r12,__vtop_table_begin@l add r12,r12,r10 /* table begin phys address */ lis r13,__vtop_table_end@h ori r13,r13,__vtop_table_end@l add r13,r13,r10 /* table end phys address */ subi r12,r12,4 subi r13,r13,41: lwzu r14,4(r12) /* virt address of instruction */ add r14,r14,r10 /* phys address of instruction */ lwz r15,0(r14) /* instruction, now insert top */ rlwimi r15,r10,16,16,31 /* half of vp const in low half */ stw r15,0(r14) /* of instruction and restore. */ dcbst r0,r14 /* write it to memory */ sync icbi r0,r14 /* flush the icache line */ cmpw r12,r13 bne 1b sync /* additional sync needed on g4 */ isync/* * Map the memory where the exception handlers will * be copied to when hash constants have been patched. */#ifdef CONFIG_APUS_FAST_EXCEPT lis r8,0xfff0#else lis r8,0
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?