cpu_asm.s
来自「RTEMS (Real-Time Executive for Multiproc」· S 代码 · 共 1,090 行 · 第 1/2 页
S
1,090 行
NOPENDFRAME(_CPU_Context_restore) ASM_EXTERN(_ISR_Nest_level, SZ_INT)ASM_EXTERN(_Thread_Dispatch_disable_level,SZ_INT)ASM_EXTERN(_Context_Switch_necessary,SZ_INT)ASM_EXTERN(_ISR_Signals_to_thread_executing,SZ_INT)ASM_EXTERN(_Thread_Executing,SZ_INT) .extern _Thread_Dispatch.extern _ISR_Vector_table /* void _DBG_Handler() * * This routine services the (at least) MIPS1 debug vector, * only used the the hardware debugging features. This code, * while optional, is best located here because its intrinsically * associated with exceptions in general & thus tied pretty * closely to _ISR_Handler. * */FRAME(_DBG_Handler,sp,0,ra) .set noreorder la k0,_ISR_Handler j k0 NOP .set reorderENDFRAME(_DBG_Handler) /* void __ISR_Handler() * * This routine provides the RTEMS interrupt management. * * void _ISR_Handler() * * * This discussion ignores a lot of the ugly details in a real * implementation such as saving enough registers/state to be * able to do something real. Keep in mind that the goal is * to invoke a user's ISR handler which is written in C and * uses a certain set of registers. * * Also note that the exact order is to a large extent flexible. * Hardware will dictate a sequence for a certain subset of * _ISR_Handler while requirements for setting * * At entry to "common" _ISR_Handler, the vector number must be * available. On some CPUs the hardware puts either the vector * number or the offset into the vector table for this ISR in a * known place. If the hardware does not give us this information, * then the assembly portion of RTEMS for this port will contain * a set of distinct interrupt entry points which somehow place * the vector number in a known place (which is safe if another * interrupt nests this one) and branches to _ISR_Handler. * */FRAME(_ISR_Handler,sp,0,ra) .set noreorder /* Q: _ISR_Handler, not using IDT/SIM ...save extra regs? */ /* wastes a lot of stack space for context?? */ ADDIU sp,sp,-EXCP_STACK_SIZE STREG ra, R_RA*R_SZ(sp) /* store ra on the stack */ STREG v0, R_V0*R_SZ(sp) STREG v1, R_V1*R_SZ(sp) STREG a0, R_A0*R_SZ(sp) STREG a1, R_A1*R_SZ(sp) STREG a2, R_A2*R_SZ(sp) STREG a3, R_A3*R_SZ(sp) STREG t0, R_T0*R_SZ(sp) STREG t1, R_T1*R_SZ(sp) STREG t2, R_T2*R_SZ(sp) STREG t3, R_T3*R_SZ(sp) STREG t4, R_T4*R_SZ(sp) STREG t5, R_T5*R_SZ(sp) STREG t6, R_T6*R_SZ(sp) STREG t7, R_T7*R_SZ(sp) mflo t0 STREG t8, R_T8*R_SZ(sp) STREG t0, R_MDLO*R_SZ(sp) STREG t9, R_T9*R_SZ(sp) mfhi t0 STREG gp, R_GP*R_SZ(sp) STREG t0, R_MDHI*R_SZ(sp) STREG fp, R_FP*R_SZ(sp) .set noat STREG AT, R_AT*R_SZ(sp) .set at MFC0 t0,C0_SR MFC0 t1,C0_EPC STREG t0,R_SR*R_SZ(sp) STREG t1,R_EPC*R_SZ(sp) #ifdef INSTRUMENT_EXECUTING_THREAD lw t2, _Thread_Executing NOP sw t2, 0x8001FFF0#endif /* determine if an interrupt generated this exception */ MFC0 t0,C0_CAUSE NOP and t1,t0,CAUSE_EXCMASK beq t1, 0, _ISR_Handler_1_ISR_Handler_Exception: /* If we return from the exception, it is assumed nothing * bad is going on and we can continue to run normally. * But we want to save the entire CPU context so exception * handlers can look at it and change it. * * NOTE: This is the path the debugger stub will take. */ /* already got t0 = cause in the interrupt test above */ STREG t0,R_CAUSE*R_SZ(sp) STREG sp, R_SP*R_SZ(sp) STREG s0,R_S0*R_SZ(sp) /* save s0 - s7 */ STREG s1,R_S1*R_SZ(sp) STREG s2,R_S2*R_SZ(sp) STREG s3,R_S3*R_SZ(sp) STREG s4,R_S4*R_SZ(sp) STREG s5,R_S5*R_SZ(sp) STREG s6,R_S6*R_SZ(sp) STREG s7,R_S7*R_SZ(sp) /* CP0 special registers */#if __mips == 1 MFC0 t0,C0_TAR#endif MFC0 t1,C0_BADVADDR #if __mips == 1 STREG t0,R_TAR*R_SZ(sp)#else NOP#endif STREG t1,R_BADVADDR*R_SZ(sp) #if ( CPU_HARDWARE_FP == TRUE ) MFC0 t0,C0_SR /* FPU is enabled, save state */ NOP srl t0,t0,16 andi t0,t0,(SR_CU1 >> 16) beqz t0, 1f NOP la a1,R_F0*R_SZ(sp) jal _CPU_Context_save_fp_from_exception NOP MFC1 t0,C1_REVISION MFC1 t1,C1_STATUS STREG t0,R_FEIR*R_SZ(sp) STREG t1,R_FCSR*R_SZ(sp) 1: #endif move a0,sp jal mips_vector_exceptions NOP /* ** note, if the exception vector returns, rely on it to have ** adjusted EPC so we will return to some correct address. If ** this is not done, we might get stuck in an infinite loop because ** we'll return to the instruction where the exception occured and ** it could throw again. ** ** It is expected the only code using the exception processing is ** either the gdb stub or some user code which is either going to ** panic or do something useful. Regardless, it is up to each ** exception routine to properly adjust EPC, so the code below ** may be helpful for doing just that. */ /* *********************************************************************** this code follows the R3000's exception return logic, but is not** needed because the gdb stub does it for us. It might be useful** for something else at some point...** * compute the address of the instruction we'll return to * LDREG t1, R_CAUSE*R_SZ(sp) LDREG t0, R_EPC*R_SZ(sp) * first see if the exception happened in the delay slot * li t3,CAUSE_BD AND t4,t1,t3 beqz t4,excnodelay NOP * it did, now see if the branch occured or not * li t3,CAUSE_BT AND t4,t1,t3 beqz t4,excnobranch NOP * branch was taken, we resume at the branch target * LDREG t0, R_TAR*R_SZ(sp) j excreturn NOPexcnobranch: ADDU t0,R_SZexcnodelay: ADDU t0,R_SZ excreturn: STREG t0, R_EPC*R_SZ(sp) NOP********************************************************************* */ /* if we're returning into mips_break, move to the next instruction */ LDREG t0,R_EPC*R_SZ(sp) la t1,mips_break xor t2,t0,t1 bnez t2,3f addu t0,R_SZ STREG t0,R_EPC*R_SZ(sp) NOP3: #if ( CPU_HARDWARE_FP == TRUE ) MFC0 t0,C0_SR /* FPU is enabled, restore state */ NOP srl t0,t0,16 andi t0,t0,(SR_CU1 >> 16) beqz t0, 2f NOP la a1,R_F0*R_SZ(sp) jal _CPU_Context_restore_fp_from_exception NOP LDREG t0,R_FEIR*R_SZ(sp) LDREG t1,R_FCSR*R_SZ(sp) MTC1 t0,C1_REVISION MTC1 t1,C1_STATUS2:#endif LDREG s0,R_S0*R_SZ(sp) /* restore s0 - s7 */ LDREG s1,R_S1*R_SZ(sp) LDREG s2,R_S2*R_SZ(sp) LDREG s3,R_S3*R_SZ(sp) LDREG s4,R_S4*R_SZ(sp) LDREG s5,R_S5*R_SZ(sp) LDREG s6,R_S6*R_SZ(sp) LDREG s7,R_S7*R_SZ(sp) /* do NOT restore the sp as this could mess up the world */ /* do NOT restore the cause as this could mess up the world */ /* ** Jump all the way out. If theres a pending interrupt, just ** let it be serviced later. Since we're probably using the ** gdb stub, we've already disrupted the ISR service timing ** anyhow. We oughtn't mix exception and interrupt processing ** in the same exception call in case the exception stuff ** might interfere with the dispatching & timer ticks. */ j _ISR_Handler_exit NOP_ISR_Handler_1: MFC0 t1,C0_SR and t0,CAUSE_IPMASK and t0,t1 /* external interrupt not enabled, ignore */ /* but if it's not an exception or an interrupt, */ /* Then where did it come from??? */ beq t0,zero,_ISR_Handler_exit /* * save some or all context on stack * may need to save some special interrupt information for exit * * #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE ) * if ( _ISR_Nest_level == 0 ) * switch to software interrupt stack * #endif */ /* * _ISR_Nest_level++; */ LDREG t0,_ISR_Nest_level NOP ADD t0,t0,1 STREG t0,_ISR_Nest_level /* * _Thread_Dispatch_disable_level++; */ LDREG t1,_Thread_Dispatch_disable_level NOP ADD t1,t1,1 STREG t1,_Thread_Dispatch_disable_level /* * Call the CPU model or BSP specific routine to decode the * interrupt source and actually vector to device ISR handlers. */ #ifdef INSTRUMENT_ISR_VECTORING NOP li t1, 1 sw t1, 0x8001e000#endif move a0,sp jal mips_vector_isr_handlers NOP #ifdef INSTRUMENT_ISR_VECTORING li t1, 0 sw t1, 0x8001e000 NOP#endif /* * --_ISR_Nest_level; */ LDREG t2,_ISR_Nest_level NOP ADD t2,t2,-1 STREG t2,_ISR_Nest_level /* * --_Thread_Dispatch_disable_level; */ LDREG t1,_Thread_Dispatch_disable_level NOP ADD t1,t1,-1 STREG t1,_Thread_Dispatch_disable_level /* * if ( _Thread_Dispatch_disable_level || _ISR_Nest_level ) * goto the label "exit interrupt (simple case)" */ or t0,t2,t1 bne t0,zero,_ISR_Handler_exit NOP /* * #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE ) * restore stack * #endif * * if ( !_Context_Switch_necessary && !_ISR_Signals_to_thread_executing ) * goto the label "exit interrupt (simple case)" */ LDREG t0,_Context_Switch_necessary LDREG t1,_ISR_Signals_to_thread_executing NOP or t0,t0,t1 beq t0,zero,_ISR_Handler_exit NOP #ifdef INSTRUMENT_EXECUTING_THREAD lw t0,_Thread_Executing NOP sw t0,0x8001FFF4#endif/*** Turn on interrupts before entering Thread_Dispatch which** will run for a while, thus allowing new interrupts to** be serviced. Observe the Thread_Dispatch_disable_level interlock** that prevents recursive entry into Thread_Dispatch.*/ MFC0 t0, C0_SR#if __mips == 3 li t1,SR_EXL | SR_IE#elif __mips == 1 li t1,SR_IEC#endif or t0, t1 MTC0 t0, C0_SR NOP /* save off our stack frame so the context switcher can get to it */ la t0,__exceptionStackFrame STREG sp,(t0) jal _Thread_Dispatch NOP /* and make sure its clear in case we didn't dispatch. if we did, its ** already cleared */ la t0,__exceptionStackFrame STREG zero,(t0) NOP/* ** turn interrupts back off while we restore context so** a badly timed interrupt won't accidentally mess things up*/ MFC0 t0, C0_SR#if __mips == 3 li t1,SR_EXL | SR_IE#elif __mips == 1 /* ints off, current & prev kernel mode on (kernel mode enabled is bit clear..argh!) */ li t1,SR_IEC | SR_KUP | SR_KUC #endif not t1 and t0, t1#if __mips == 1 /* disabled 7/29, gregm, this tasks context was saved previously in an interrupt, ** so we'll just restore the task's previous interrupt enables. ** ** make sure previous int enable is on because we're returning from an interrupt ** which means interrupts have to be enabled li t1,SR_IEP or t0,t1 */#endif MTC0 t0, C0_SR NOP #ifdef INSTRUMENT_EXECUTING_THREAD lw t0,_Thread_Executing NOP sw t0,0x8001FFF8#endif /* * prepare to get out of interrupt * return from interrupt (maybe to _ISR_Dispatch) * * LABEL "exit interrupt (simple case):" * prepare to get out of interrupt * return from interrupt */_ISR_Handler_exit:/*** Skip the SR restore because its a global register. _CPU_Context_switch_restore** adjusts it according to each task's configuration. If we didn't dispatch, the** SR value isn't changed, so all we need to do is return.***/ /* restore context from stack */ #ifdef INSTRUMENT_EXECUTING_THREAD lw t0,_Thread_Executing NOP sw t0, 0x8001FFFC#endif LDREG t8, R_MDLO*R_SZ(sp) LDREG t0, R_T0*R_SZ(sp) mtlo t8 LDREG t8, R_MDHI*R_SZ(sp) LDREG t1, R_T1*R_SZ(sp) mthi t8 LDREG t2, R_T2*R_SZ(sp) LDREG t3, R_T3*R_SZ(sp) LDREG t4, R_T4*R_SZ(sp) LDREG t5, R_T5*R_SZ(sp) LDREG t6, R_T6*R_SZ(sp) LDREG t7, R_T7*R_SZ(sp) LDREG t8, R_T8*R_SZ(sp) LDREG t9, R_T9*R_SZ(sp) LDREG gp, R_GP*R_SZ(sp) LDREG fp, R_FP*R_SZ(sp) LDREG ra, R_RA*R_SZ(sp) LDREG a0, R_A0*R_SZ(sp) LDREG a1, R_A1*R_SZ(sp) LDREG a2, R_A2*R_SZ(sp) LDREG a3, R_A3*R_SZ(sp) LDREG v1, R_V1*R_SZ(sp) LDREG v0, R_V0*R_SZ(sp) LDREG k1, R_EPC*R_SZ(sp) .set noat LDREG AT, R_AT*R_SZ(sp) .set at ADDIU sp,sp,EXCP_STACK_SIZE j k1 rfe NOP .set reorderENDFRAME(_ISR_Handler) FRAME(mips_break,sp,0,ra) .set noreorder break 0x0 /* this statement must be first in this function, assumed so by mips-stub.c */ NOP j ra NOP .set reorderENDFRAME(mips_break)
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?