📄 exceptions.s
字号:
. = 0xd00ex_trace: GET_STACK r13 SPRN_SRR1 EXCEPTION_HEAD r13 ex_program_continued li r0, 0xd00 /* exception vector for GDB stub */ bctr . = 0xe00ex_fp: GET_STACK r13 SPRN_SRR1 EXCEPTION_HEAD r13 ex_program_continued li r0, 0xe00 /* exception vector for GDB stub */ bctr . = 0xf00ex_perfmon: GET_STACK r13 SPRN_SRR1 EXCEPTION_HEAD r13 ex_perfmon_continued bctr .align 3 .globl exception_vectors_endexception_vectors_end: /* put some stuff here so we see the next symbol */ .long 0xdeadbeef .long 0xdeadbeef.macro FAST_RESUME LOAD_C_STATE r1 /* restore most C volatiles */ ld r0, UREGS_ctr(r1) mtctr r0 /* clear MSR:RI/EE to set SRR0/SRR1 */ li r0, 0 mtmsrd r0, 1 ld r0, UREGS_pc(r1) mtspr SPRN_HSRR0, r0 ld r0, UREGS_msr(r1) mtspr SPRN_HSRR1, r0 ld r0, UREGS_srr0(r1) mtspr SPRN_SRR0, r0 ld r0, UREGS_srr1(r1) mtspr SPRN_SRR1, r0 ld r13, UREGS_r13(r1) ld r0, UREGS_r0(r1) ld r1, UREGS_r1(r1) HRFID b . /* prevent speculative icache fetch */.endm/* Not a whole lot just yet */ex_machcheck_continued:/* We enter with the exception number in r0. The EXCEPTION_SAVE_STATE macro * clobbers r0 though, so we have to move it around a little bit. Not ideal, * but hopefully program exception is not performance-critical... Maybe there's * a better way, but this works for now. */ex_program_continued: SAVE_GPRS r14, r31, r1 /* save all the non-volatiles */ /* save these for debug, no needed for restore */ mfspr r14, SPRN_HID4 std r14, UREGS_hid4(r1) mfdar r14 std r14, UREGS_dar(r1) mfdsisr r14 stw r14, UREGS_dsisr(r1) mr r14, r0 EXCEPTION_SAVE_STATE r1 mr r4, r14 LOADADDR r12, program_exception mr r3, r1 /* pass pointer to cpu_user_regs */ subi r1, r1, STACK_FRAME_OVERHEAD /* make a "caller" stack frame */ CALL_CFUNC r12 /* reload state and rfid */ addi r1, r1, STACK_FRAME_OVERHEAD /* restore stack to cpu_user_regs */ LOAD_GPRS r14, r31, r1 FAST_RESUMEex_external_continued: EXCEPTION_SAVE_STATE r1 LOADADDR r12, do_external mr r3, r1 /* pass pointer to cpu_user_regs */ subi r1, r1, STACK_FRAME_OVERHEAD /* make a "caller" stack frame */ CALL_CFUNC r12 addi r1, r1, STACK_FRAME_OVERHEAD /* restore stack to cpu_user_regs */ b fast_resumeex_hcall_continued: /* We have to save the non-volatiles here in case of a block hcall (which * will end up in context_switch()). */ SAVE_GPRS r14, r31, r1 EXCEPTION_SAVE_STATE r1 LOADADDR r12, do_hcall mr r3, r1 /* pass pointer to cpu_user_regs */ subi r1, r1, STACK_FRAME_OVERHEAD /* make a "caller" stack frame */ CALL_CFUNC r12 /* call hcall handler */ /* test for pending softirqs, and loop until there are no more. */ mfmsr r14 ori r14, r14, MSR_EE xori r15, r14, MSR_EEhcall_test_all_events: mtmsrd r15, 1 /* disable interrupts */ ld r3, PAREA_vcpu(r13) lwz r3, VCPU_processor(r3) LOADADDR r4, irq_stat sldi r3, r3, IRQSTAT_shift add r4, r3, r4 ld r5, IRQSTAT_pending(r4) cmpldi r5, 0 beq hcall_out /* no more softirqs; exit loop */ LOADADDR r6, do_softirq mtmsrd r14, 1 /* enable interrupts */ CALL_CFUNC r6 /* process softirqs */ b hcall_test_all_events /* look for more */hcall_out: addi r1, r1, STACK_FRAME_OVERHEAD /* restore stack to cpu_user_regs */ LOAD_GPRS r14, r15, r1 /* we clobbered r14/r15 */ b fast_resumeex_dec_continued: EXCEPTION_SAVE_STATE r1 LOADADDR r12, do_dec mr r3, r1 /* pass pointer to cpu_user_regs */ subi r1, r1, STACK_FRAME_OVERHEAD /* make a "caller" stack frame */ CALL_CFUNC r12 addi r1, r1, STACK_FRAME_OVERHEAD /* restore stack to cpu_user_regs */ b fast_resumeex_perfmon_continued: EXCEPTION_SAVE_STATE r1 LOADADDR r12, do_perfmon mr r3, r1 /* pass pointer to cpu_user_regs */ subi r1, r1, STACK_FRAME_OVERHEAD /* make a "caller" stack frame */ CALL_CFUNC r12 addi r1, r1, STACK_FRAME_OVERHEAD /* restore stack to cpu_user_regs */ b fast_resumeex_hdec_continued: /* When we get an HDEC, we (almost?) always context_switch, so we need to * save the nonvolatiles. */ SAVE_GPRS r14, r31, r1 H_EXCEPTION_SAVE_STATE r1 LOADADDR r12, do_timer mr r3, r1 subi r1, r1, STACK_FRAME_OVERHEAD /* make a "caller" stack frame */ CALL_CFUNC r12 /* if we are resuming into hypervisor, don't handle softirqs */ ld r10, (UREGS_msr + STACK_FRAME_OVERHEAD)(r1) rldicl. r11, r10, 4, 63 /* test SRR1:HV */ bne hdec_out /* test for pending softirqs, and loop until there are no more. */ mfmsr r14 ori r14, r14, MSR_EE xori r15, r14, MSR_EEtest_all_events: mtmsrd r15, 1 /* disable interrupts */ ld r3, PAREA_vcpu(r13) lwz r3, VCPU_processor(r3) LOADADDR r4, irq_stat sldi r3, r3, IRQSTAT_shift add r4, r3, r4 ld r5, IRQSTAT_pending(r4) cmpldi r5, 0 beq hdec_out /* no more softirqs; exit loop */ LOADADDR r6, do_softirq mtmsrd r14, 1 /* enable interrupts */ CALL_CFUNC r6 /* process softirqs */ b test_all_events /* look for more */hdec_out: addi r1, r1, STACK_FRAME_OVERHEAD /* restore stack to cpu_user_regs */ LOAD_GPRS r14, r15, r1 /* we clobbered r14/r15 in the loop *//* r1 points to the to-be-restored cpu_user_regs. These could be mid-hypervisor * stack (returning into elsewhere in Xen) or at the top of the stack * (restoring the domain). */_GLOBAL(full_resume) /* disable MSR:EE, since we could have come from do_softirq() */ mfmsr r7 ori r7, r7, MSR_EE xori r7, r7, MSR_EE mtmsrd r7, 1 LOAD_GPRS r14, r31, r1 /* restore all non-volatiles */fast_resume: ld r10, UREGS_msr(r1) rldicl. r11, r10, 4, 63 /* test SRR1:HV */ bne 1f /* returning to hypervisor */ /* check for pending irqs */ mr r3, r1 subi r1, r1, STACK_FRAME_OVERHEAD bl .deliver_ee addi r1, r1, STACK_FRAME_OVERHEAD /* if we took a DEC in hypervisor mode, we don't want to reload the DEC * until we return to the domain. MSR_EE is clear, so the domain will take * any impending DEC. */ ld r3, PAREA_vcpu(r13) lwz r0, VCPU_dec(r3) mtdec r01: FAST_RESUME /* not reached *//* move all of the below somewhere else */_GLOBAL(papr_hcall_jump) mtctr r4 bctr /* return to caller via LR *//* XXX don't need to load all the registers */_GLOBAL(xen_hvcall_jump) mtctr r4 ld r10, (UREGS_gprs + GPR_WIDTH * 11)(r3) ld r9, (UREGS_gprs + GPR_WIDTH * 10)(r3) ld r8, (UREGS_gprs + GPR_WIDTH * 9)(r3) ld r7, (UREGS_gprs + GPR_WIDTH * 8)(r3) ld r6, (UREGS_gprs + GPR_WIDTH * 7)(r3) ld r5, (UREGS_gprs + GPR_WIDTH * 6)(r3) ld r4, (UREGS_gprs + GPR_WIDTH * 5)(r3) ld r3, (UREGS_gprs + GPR_WIDTH * 4)(r3) bctr_GLOBAL(_reset_stack_and_jump) ld r2, 8(r3) ld r3, 0(r3) mtctr r3 mr r1, r4 bctr_GLOBAL(sleep) mfmsr r3 ori r4, r3, MSR_EE oris r4, r4, MSR_POW@h sync mtmsrd r4 isync mtmsrd r3 blr/* The primary processor issues a firmware call to spin us up at this * address, passing our CPU number in r3. We only need a function * entry point instead of a descriptor since this is never called from * C code. */ .globl spin_startspin_start: /* We discovered by experiment that the ERAT must be flushed early. */ isync slbia isync /* Do a cache flush for our text, in case the loader didn't */ LOADADDR(r9, _start) LOADADDR(r8, _etext)4: dcbf r0,r9 icbi r0,r9 addi r9,r9,0x20 /* up to a 4 way set per line */ cmpld cr0,r9,r8 blt 4b sync isync /* Write our processor number as an acknowledgment that we're alive. */ LOADADDR(r14, __spin_ack) stw r3, 0(r14) sync /* If NR_CPUS is too small, we should just spin forever. */ LOADADDR(r15, NR_CPUS) cmpd r3, r15 blt 2f b . /* Find our index in the array of processor_area struct pointers. */2: LOADADDR(r14, global_cpu_table) mulli r15, r3, 8 add r14, r14, r15 /* Spin until the pointer for our processor goes valid. */1: ld r15, 0(r14) cmpldi r15, 0 beq 1b /* Dereference the pointer and load our stack pointer. */ isync ld r1, PAREA_stack(r15) li r14, STACK_FRAME_OVERHEAD sub r1, r1, r14 /* Load up the TOC and entry point for the C function to be called. */ LOADADDR(r14, secondary_cpu_init) ld r2, 8(r14) ld r11, 0(r14) mtctr r11 /* Warning: why do we need this synchronizing instruction on 970FX? */ isync /* Jump into C code now. */ bctrl nop b .
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -