head.s
来自「底层驱动开发」· S 代码 · 共 2,012 行 · 第 1/4 页
S
2,012 行
.align 7_GLOBAL(do_stab_bolted_pSeries) mtcrf 0x80,r12 mfspr r12,SPRG2 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)/* * Vectors for the FWNMI option. Share common code. */ .globl system_reset_fwnmisystem_reset_fwnmi: HMT_MEDIUM mtspr SPRG1,r13 /* save r13 */ RUNLATCH_ON(r13) EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common) .globl machine_check_fwnmimachine_check_fwnmi: HMT_MEDIUM mtspr SPRG1,r13 /* save r13 */ RUNLATCH_ON(r13) EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)#ifdef CONFIG_PPC_ISERIES/*** ISeries-LPAR interrupt handlers ***/ STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC) .globl data_access_iSeriesdata_access_iSeries: mtspr SPRG1,r13BEGIN_FTR_SECTION mtspr SPRG2,r12 mfspr r13,DAR mfspr r12,DSISR srdi r13,r13,60 rlwimi r13,r12,16,0x20 mfcr r12 cmpwi r13,0x2c beq .do_stab_bolted_iSeries mtcrf 0x80,r12 mfspr r12,SPRG2END_FTR_SECTION_IFCLR(CPU_FTR_SLB) EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN) EXCEPTION_PROLOG_ISERIES_2 b data_access_common.do_stab_bolted_iSeries: mtcrf 0x80,r12 mfspr r12,SPRG2 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB) EXCEPTION_PROLOG_ISERIES_2 b .do_stab_bolted .globl data_access_slb_iSeriesdata_access_slb_iSeries: mtspr SPRG1,r13 /* save r13 */ EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB) std r3,PACA_EXSLB+EX_R3(r13) ld r12,PACALPPACA+LPPACASRR1(r13) mfspr r3,DAR b .do_slb_miss STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN) .globl instruction_access_slb_iSeriesinstruction_access_slb_iSeries: mtspr SPRG1,r13 /* save r13 */ EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB) std r3,PACA_EXSLB+EX_R3(r13) ld r12,PACALPPACA+LPPACASRR1(r13) ld r3,PACALPPACA+LPPACASRR0(r13) b .do_slb_miss MASKABLE_EXCEPTION_ISERIES(0x500, hardware_interrupt) STD_EXCEPTION_ISERIES(0x600, alignment, PACA_EXGEN) STD_EXCEPTION_ISERIES(0x700, program_check, PACA_EXGEN) STD_EXCEPTION_ISERIES(0x800, fp_unavailable, PACA_EXGEN) MASKABLE_EXCEPTION_ISERIES(0x900, decrementer) STD_EXCEPTION_ISERIES(0xa00, trap_0a, PACA_EXGEN) STD_EXCEPTION_ISERIES(0xb00, trap_0b, PACA_EXGEN) .globl system_call_iSeriessystem_call_iSeries: mr r9,r13 mfspr r13,SPRG3 EXCEPTION_PROLOG_ISERIES_2 b system_call_common STD_EXCEPTION_ISERIES( 0xd00, single_step, PACA_EXGEN) STD_EXCEPTION_ISERIES( 0xe00, trap_0e, PACA_EXGEN) STD_EXCEPTION_ISERIES( 0xf00, performance_monitor, PACA_EXGEN) .globl system_reset_iSeriessystem_reset_iSeries: mfspr r13,SPRG3 /* Get paca address */ mfmsr r24 ori r24,r24,MSR_RI mtmsrd r24 /* RI on */ lhz r24,PACAPACAINDEX(r13) /* Get processor # */ cmpwi 0,r24,0 /* Are we processor 0? */ beq .__start_initialization_iSeries /* Start up the first processor */ mfspr r4,SPRN_CTRLF li r5,CTRL_RUNLATCH /* Turn off the run light */ andc r4,r4,r5 mtspr SPRN_CTRLT,r41: HMT_LOW#ifdef CONFIG_SMP lbz r23,PACAPROCSTART(r13) /* Test if this processor * should start */ sync LOADADDR(r3,current_set) sldi r28,r24,3 /* get current_set[cpu#] */ ldx r3,r3,r28 addi r1,r3,THREAD_SIZE subi r1,r1,STACK_FRAME_OVERHEAD cmpwi 0,r23,0 beq iSeries_secondary_smp_loop /* Loop until told to go */ bne .__secondary_start /* Loop until told to go */iSeries_secondary_smp_loop: /* Let the Hypervisor know we are alive */ /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */ lis r3,0x8002 rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */#else /* CONFIG_SMP */ /* Yield the processor. This is required for non-SMP kernels which are running on multi-threaded machines. */ lis r3,0x8000 rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */ addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */ li r4,0 /* "yield timed" */ li r5,-1 /* "yield forever" */#endif /* CONFIG_SMP */ li r0,-1 /* r0=-1 indicates a Hypervisor call */ sc /* Invoke the hypervisor via a system call */ mfspr r13,SPRG3 /* Put r13 back ???? */ b 1b /* If SMP not configured, secondaries * loop forever */ .globl decrementer_iSeries_maskeddecrementer_iSeries_masked: li r11,1 stb r11,PACALPPACA+LPPACADECRINT(r13) lwz r12,PACADEFAULTDECR(r13) mtspr SPRN_DEC,r12 /* fall through */ .globl hardware_interrupt_iSeries_maskedhardware_interrupt_iSeries_masked: mtcrf 0x80,r9 /* Restore regs */ ld r11,PACALPPACA+LPPACASRR0(r13) ld r12,PACALPPACA+LPPACASRR1(r13) mtspr SRR0,r11 mtspr SRR1,r12 ld r9,PACA_EXGEN+EX_R9(r13) ld r10,PACA_EXGEN+EX_R10(r13) ld r11,PACA_EXGEN+EX_R11(r13) ld r12,PACA_EXGEN+EX_R12(r13) ld r13,PACA_EXGEN+EX_R13(r13) rfid b . /* prevent speculative execution */#endif /* CONFIG_PPC_ISERIES *//*** Common interrupt handlers ***/ STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception) /* * Machine check is different because we use a different * save area: PACA_EXMC instead of PACA_EXGEN. */ .align 7 .globl machine_check_commonmachine_check_common: EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC) DISABLE_INTS bl .save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD bl .machine_check_exception b .ret_from_except STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt) STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception) STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) STD_EXCEPTION_COMMON(0xf00, performance_monitor, .performance_monitor_exception) STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)#ifdef CONFIG_ALTIVEC STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)#else STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)#endif/* * Here we have detected that the kernel stack pointer is bad. * R9 contains the saved CR, r13 points to the paca, * r10 contains the (bad) kernel stack pointer, * r11 and r12 contain the saved SRR0 and SRR1. * We switch to using an emergency stack, save the registers there, * and call kernel_bad_stack(), which panics. */bad_stack: ld r1,PACAEMERGSP(r13) subi r1,r1,64+INT_FRAME_SIZE std r9,_CCR(r1) std r10,GPR1(r1) std r11,_NIP(r1) std r12,_MSR(r1) mfspr r11,DAR mfspr r12,DSISR std r11,_DAR(r1) std r12,_DSISR(r1) mflr r10 mfctr r11 mfxer r12 std r10,_LINK(r1) std r11,_CTR(r1) std r12,_XER(r1) SAVE_GPR(0,r1) SAVE_GPR(2,r1) SAVE_4GPRS(3,r1) SAVE_2GPRS(7,r1) SAVE_10GPRS(12,r1) SAVE_10GPRS(22,r1) addi r11,r1,INT_FRAME_SIZE std r11,0(r1) li r12,0 std r12,0(r11) ld r2,PACATOC(r13)1: addi r3,r1,STACK_FRAME_OVERHEAD bl .kernel_bad_stack b 1b/* * Return from an exception with minimal checks. * The caller is assumed to have done EXCEPTION_PROLOG_COMMON. * If interrupts have been enabled, or anything has been * done that might have changed the scheduling status of * any task or sent any task a signal, you should use * ret_from_except or ret_from_except_lite instead of this. */fast_exception_return: ld r12,_MSR(r1) ld r11,_NIP(r1) andi. r3,r12,MSR_RI /* check if RI is set */ beq- unrecov_fer ld r3,_CCR(r1) ld r4,_LINK(r1) ld r5,_CTR(r1) ld r6,_XER(r1) mtcr r3 mtlr r4 mtctr r5 mtxer r6 REST_GPR(0, r1) REST_8GPRS(2, r1) mfmsr r10 clrrdi r10,r10,2 /* clear RI (LE is 0 already) */ mtmsrd r10,1 mtspr SRR1,r12 mtspr SRR0,r11 REST_4GPRS(10, r1) ld r1,GPR1(r1) rfid b . /* prevent speculative execution */unrecov_fer: bl .save_nvgprs1: addi r3,r1,STACK_FRAME_OVERHEAD bl .unrecoverable_exception b 1b/* * Here r13 points to the paca, r9 contains the saved CR, * SRR0 and SRR1 are saved in r11 and r12, * r9 - r13 are saved in paca->exgen. */ .align 7 .globl data_access_commondata_access_common: RUNLATCH_ON(r10) /* It wont fit in the 0x300 handler */ mfspr r10,DAR std r10,PACA_EXGEN+EX_DAR(r13) mfspr r10,DSISR stw r10,PACA_EXGEN+EX_DSISR(r13) EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN) ld r3,PACA_EXGEN+EX_DAR(r13) lwz r4,PACA_EXGEN+EX_DSISR(r13) li r5,0x300 b .do_hash_page /* Try to handle as hpte fault */ .align 7 .globl instruction_access_commoninstruction_access_common: EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN) ld r3,_NIP(r1) andis. r4,r12,0x5820 li r5,0x400 b .do_hash_page /* Try to handle as hpte fault */ .align 7 .globl hardware_interrupt_common .globl hardware_interrupt_entryhardware_interrupt_common: EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)hardware_interrupt_entry: DISABLE_INTS addi r3,r1,STACK_FRAME_OVERHEAD bl .do_IRQ b .ret_from_except_lite .align 7 .globl alignment_commonalignment_common: mfspr r10,DAR std r10,PACA_EXGEN+EX_DAR(r13) mfspr r10,DSISR stw r10,PACA_EXGEN+EX_DSISR(r13) EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN) ld r3,PACA_EXGEN+EX_DAR(r13) lwz r4,PACA_EXGEN+EX_DSISR(r13) std r3,_DAR(r1) std r4,_DSISR(r1) bl .save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD ENABLE_INTS bl .alignment_exception b .ret_from_except .align 7 .globl program_check_commonprogram_check_common: EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) bl .save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD ENABLE_INTS bl .program_check_exception b .ret_from_except .align 7 .globl fp_unavailable_commonfp_unavailable_common: EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN) bne .load_up_fpu /* if from user, just load it up */ bl .save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD ENABLE_INTS bl .kernel_fp_unavailable_exception BUG_OPCODE/* * load_up_fpu(unused, unused, tsk) * Disable FP for the task which had the FPU previously, * and save its floating-point registers in its thread_struct. * Enables the FPU for use in the kernel on return. * On SMP we know the fpu is free, since we give it up every * switch (ie, no lazy save of the FP registers). * On entry: r13 == 'current' && last_task_used_math != 'current' */_STATIC(load_up_fpu) mfmsr r5 /* grab the current MSR */ ori r5,r5,MSR_FP mtmsrd r5 /* enable use of fpu now */ isync/* * For SMP, we don't do lazy FPU switching because it just gets too * horrendously complex, especially when a task switches from one CPU * to another. Instead we call giveup_fpu in switch_to. * */#ifndef CONFIG_SMP ld r3,last_task_used_math@got(r2) ld r4,0(r3) cmpdi 0,r4,0 beq 1f /* Save FP state to last_task_used_math's THREAD struct */ addi r4,r4,THREAD SAVE_32FPRS(0, r4) mffs fr0 stfd fr0,THREAD_FPSCR(r4) /* Disable FP for last_task_used_math */ ld r5,PT_REGS(r4) ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) li r6,MSR_FP|MSR_FE0|MSR_FE1 andc r4,r4,r6 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)1:#endif /* CONFIG_SMP */ /* enable use of FP after return */ ld r4,PACACURRENT(r13) addi r5,r4,THREAD /* Get THREAD */ ld r4,THREAD_FPEXC_MODE(r5) ori r12,r12,MSR_FP or r12,r12,r4 std r12,_MSR(r1) lfd fr0,THREAD_FPSCR(r5) mtfsf 0xff,fr0 REST_32FPRS(0, r5)#ifndef CONFIG_SMP /* Update last_task_used_math to 'current' */ subi r4,r5,THREAD /* Back to 'current' */ std r4,0(r3)#endif /* CONFIG_SMP */ /* restore registers and return */ b fast_exception_return .align 7 .globl altivec_unavailable_commonaltivec_unavailable_common: EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)#ifdef CONFIG_ALTIVECBEGIN_FTR_SECTION bne .load_up_altivec /* if from user, just load it up */END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)#endif bl .save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD ENABLE_INTS bl .altivec_unavailable_exception b .ret_from_except#ifdef CONFIG_ALTIVEC/* * load_up_altivec(unused, unused, tsk) * Disable VMX for the task which had it previously, * and save its vector registers in its thread_struct. * Enables the VMX for use in the kernel on return. * On SMP we know the VMX is free, since we give it up every * switch (ie, no lazy save of the vector registers). * On entry: r13 == 'current' && last_task_used_altivec != 'current' */_STATIC(load_up_altivec) mfmsr r5 /* grab the current MSR */ oris r5,r5,MSR_VEC@h mtmsrd r5 /* enable use of VMX now */ isync/* * For SMP, we don't do lazy VMX switching because it just gets too * horrendously complex, especially when a task switches from one CPU * to another. Instead we call giveup_altvec in switch_to. * VRSAVE isn't dealt with here, that is done in the normal context * switch code. Note that we could rely on vrsave value to eventually * avoid saving all of the VREGs here... */#ifndef CONFIG_SMP ld r3,last_task_used_altivec@got(r2) ld r4,0(r3) cmpdi 0,r4,0 beq 1f /* Save VMX state to last_task_used_altivec's THREAD struct */ addi r4,r4,THREAD SAVE_32VRS(0,r5,r4) mfvscr vr0 li r10,THREAD_VSCR stvx vr0,r10,r4 /* Disable VMX for last_task_used_altivec */ ld r5,PT_REGS(r4) ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) lis r6,MSR_VEC@h andc r4,r4,r6 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)1:#endif /* CONFIG_SMP */ /* Hack: if we get an altivec unavailable trap with VRSAVE * set to all zeros, we assume this is a broken application * that fails to set it properly, and thus we switch it to * all 1's */ mfspr r4,SPRN_VRSAVE cmpdi 0,r4,0 bne+ 1f li r4,-1 mtspr SPRN_VRSAVE,r41: /* enable use of VMX after return */ ld r4,PACACURRENT(r13) addi r5,r4,THREAD /* Get THREAD */ oris r12,r12,MSR_VEC@h std r12,_MSR(r1) li r4,1 li r10,THREAD_VSCR stw r4,THREAD_USED_VR(r5) lvx vr0,r10,r5 mtvscr vr0 REST_32VRS(0,r4,r5)#ifndef CONFIG_SMP /* Update last_task_used_math to 'current' */ subi r4,r5,THREAD /* Back to 'current' */ std r4,0(r3)#endif /* CONFIG_SMP */ /* restore registers and return */ b fast_exception_return#endif /* CONFIG_ALTIVEC *//* * Hash table stuff
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?