excep_s.s
来自「MIPS YAMON, a famous monitor inc. source」· S 代码 · 共 2,069 行 · 第 1/4 页
S
2,069 行
addiu sp, k1, -4*4 /* If exception handler returns, we jump to restore code */ b EXCEP_exc_handler_ret move a0, k1rerun_ejtag: la a0, ejtag_handler_arg lw a1, 0(a0) b 1b add a1, 1 /* count up exc_return_flag argument *//*************************************************************** central entry for cacheerr exceptions** this code always runs in KSEG1*/exc_handler_cacheerr: /* Store unmodified C0_Config register in uncached memory */ /* This value is put into exc_context in excep.c */ la k0, EXCEP_C0_Config_cacheerr li k1, KSEG1BASE or k1, k0 mfc0 k0, C0_Config sw k0, 0(k1) /* Change C0_Config to run uncached */ or k0, M_ConfigK0 xor k0, M_ConfigK0 ^ (K_CacheAttrU << S_ConfigK0) MTC0( k0, C0_Config ) /* Set up k0 again and jump to exc_handler */ b exc_handler li k0, SYS_CACHEERR_RAM_VECTOR_OFS/*************************************************************** central entry for general exceptions*/exc_handler_general: /* Store exception vector offset */ la k1, exc_handler_args sw k0, 0(k1) /**** Test for FPU emulator ****/ la k1, FPUEMUL_status lb k1, 0(k1) beqz k1, exc_default nop la k1, exc_default j FPUEMUL_handler nop/*************************************************************** central entry common to most exceptions*/exc_handler: /* Store exception vector offset */ la k1, exc_handler_args sw k0, 0(k1)exc_default: /**** Store context ****/ j EXCEP_store_gpr_but_k0k1_ss nop exc_handler_after_gpr: jal store_cp1_gpr_cp0_control nop /* Set up arguments for exception_sr() */ /* Clear 'EXCEP_return_flag' */ la a1, exc_handler_args li a3, 0exc_rerun: sw a3, 4(a1) move a2, k1 lw a1, 0(a1) /* Set a0 to the cause code field of the CAUSE register */ MFC0( t0, C0_Cause ) li t1, M_CauseExcCode and a0, t0, t1 srl a0, S_CauseExcCode /* call low level exception handler */ jal exception_sr /* It is imperative that k1 survives */ addiu sp, k1, -4*4 /* If exception handler returns, we jump to restore code */ b EXCEP_exc_handler_ret move a0, k1 /* A registered exception handler is allowed to call * EXCEP_return (see below) in case it does not want to * handle the exception. * The address of EXCEP_return was returned when the * exception handler was registered. * We need to flag whether this is the initial handling * of an exception or such a return from a registered * exception handler. This is done using the * EXCEP_return_flag variable. */ EXCEP_return : la a2, ejtag_context beq a2, k1, rerun_ejtag nop /* Set 'EXCEP_return_flag' */ la a1, exc_handler_args lw a3, 4(a1) b exc_rerun add a3, 1 /* count up exc_return_flag argument */END( EXCEP_yamon_vector_handler )LEAF( EXCEP_save_context ) /* entry parameters ** a0 pointer to context */ /**** Store current stack pointer for reuse by appl_if ****/ la v0, EXCEP_shell_sp sw sp, 0(v0) /**** Store context ****/ li v0, 1 /* return value after context restore */ la k0, sys_64bit lb k0, 0(k0) bne k0, zero, 64f move k1, a0 sw $31, GDB_REG32_OFS(GDB_FR_REG31)(k1) jal EXCEP_store_gpr_but_k0k1ra nop jal store_cp1_gpr_cp0_control nop /* restore ra and insert as CP0_EPC */ lw ra, GDB_REG32_OFS(GDB_FR_REG31)(k1) b 32f sw ra, GDB_REG32_OFS(GDB_FR_EPC)(k1)64:SET_MIPS3() sd $31, GDB_REG64_OFS(GDB_FR_REG31)(k1) jal EXCEP_store_gpr_but_k0k1ra nop jal store_cp1_gpr_cp0_control nop /* restore ra and insert as CP0_EPC */ ld ra, GDB_REG64_OFS(GDB_FR_REG31)(k1) sd ra, GDB_REG64_OFS(GDB_FR_EPC)(k1)SET_MIPS0()32: lw v0, GDB_REG32_OFS(GDB_FR_STATUS)(k1) or v0, M_StatusERL | M_StatusEXL xor v0, M_StatusERL sw v0, GDB_REG32_OFS(GDB_FR_STATUS)(k1) jr ra li v0, 0 /* return value after context save */END( EXCEP_save_context )LEAF( EXCEP_get_context_ptr ) /* Retrieves pointer to current exception context. */ la v0, exc_context beq v0, k1, 2f nop la v0, ejtag_context1: bne v0, k1, 1b /* Stay here if k1 does not point to a known context */ nop2: jr ra nopEND( EXCEP_get_context_ptr )LEAF( EXCEP_exc_handler_ret_ss ) /* Same functionality as EXCEP_exc_handler_ret() * except that for CPUs implementing MIPS32/MIPS64 * Release 2, we first make sure to use shadowset 0. */ la t0, excep_cp0_regs_mask lw t0, 0(t0) sll t0, 31-6 /* Release 2 */ bgez t0, EXCEP_exc_handler_ret nop /* Clear ERL, BEV, but first set KSU=0 (kernel mode), so that * we stay in Kernel mode independent of EXL. * We then clear SRSCtl and perform an ERET in order to start * using shadowset 0. */ MFC0( t0, C0_Status ) li t1, (M_StatusKSU | M_StatusIE) or t0, t1 xor t0, t1 MTC0( t0, C0_Status ) li t1, (M_StatusERL | M_StatusBEV) or t0, t1 xor t0, t1 MTC0( t0, C0_Status ) MTC0_SEL_OPCODE( R_zero, R_C0_SRSCtl, R_C0_SelSRSCtl ) /* Copy a0 to previous shadow set (now 0) so that it survives eret */ WRPGPR( R_a0, R_a0 ) la t0, EXCEP_exc_handler_ret MTC0( t0, C0_EPC)SET_MIPS3() eretSET_MIPS0()END( EXCEP_exc_handler_ret_ss ) LEAF( EXCEP_exc_handler_ret ) /* Return context and return from exception. * * a0 = Pointer to context to be restored. * restore_control_gpr_but_k1() will perform jr k1 rather than jr ra. */ /* Determine ejtag return based on context pointer */ la k0, ejtag_context beq a0, k0, return_from_ejtag nop /* Normal return from exception */ la k1, restore_done b restore_control_gpr_but_k1 move k0, a0restore_done:SET_MIPS3() eretSET_MIPS0()return_from_ejtag: /* EJTAG return from exception */ la k1, restore_done_ejtag b restore_control_gpr_but_k1 move k0, a0restore_done_ejtag: mfc0 k1, C0_DESAVE /* "restore" k1 */ DERET nop END( EXCEP_exc_handler_ret ) LEAF( EXCEP_exc_handler_jump ) /* Restore context and jump to function given by a0. * a0 = Pointer to function taking over (assume 32 bit) * * Function is of type t_EXCEP_esr defined as : * typedef void (*t_EXCEP_esr)(void); * * This entry is invoked only for jumping to a handler * registered by EXCEP_register_esr(x,TRUE,x,x). * The a0 function is responsible for handling the * exception and possibly issue an eret (or deret * in the case of an EJTAG exception). * * At this point in the exception processing only GPR * registers have modified, so there is no need to * restore any control or floating point registers. * * Caution: restoring control registers at this point * may cause an exception -> infinite exception loop. * */ /* a0 holds return address. Move this to k1, since * EXCEP_restore_gpr_but_k1() will perform a jr k1 */ /* Set context to be restored */ move k0, k1 /* Determine ejtag jump based on context pointer */ la k1, ejtag_context bne k1, k0, EXCEP_restore_gpr_but_k1 /* branch normally taken */ move k1, a0 /* jump in ejtag context - we need to restore ALL registers */ la k1, excep_tmp sw a0, 0(k1) /* a0 is type (*t_EXCEP_esr)() === UINT32 */ la k1, ejtag_jump_restore_done b restore_control_gpr_but_k1 nopejtag_jump_restore_done: /* we have only k1 left - "restore" return address */ la k1, excep_tmp lw k1, 0(k1) jr k1 mfc0 k1, C0_DESAVE /* restore k1 */END( EXCEP_exc_handler_jump ) LEAF(EXCEP_store_gpr_but_k0k1ra) /**** Store context ****/ la k0, sys_64bit lb k0, 0(k0) bne k0, zero, store_gpr_64bit nop /* Store 32 bit CPU Registers */ sw $0, GDB_REG32_OFS(GDB_FR_REG0 )(k1) sw $1, GDB_REG32_OFS(GDB_FR_REG1 )(k1) sw $2, GDB_REG32_OFS(GDB_FR_REG2 )(k1) sw $3, GDB_REG32_OFS(GDB_FR_REG3 )(k1) sw $4, GDB_REG32_OFS(GDB_FR_REG4 )(k1) sw $5, GDB_REG32_OFS(GDB_FR_REG5 )(k1) sw $6, GDB_REG32_OFS(GDB_FR_REG6 )(k1) sw $7, GDB_REG32_OFS(GDB_FR_REG7 )(k1) sw $8, GDB_REG32_OFS(GDB_FR_REG8 )(k1) sw $9, GDB_REG32_OFS(GDB_FR_REG9 )(k1) sw $10, GDB_REG32_OFS(GDB_FR_REG10)(k1) sw $11, GDB_REG32_OFS(GDB_FR_REG11)(k1) sw $12, GDB_REG32_OFS(GDB_FR_REG12)(k1) sw $13, GDB_REG32_OFS(GDB_FR_REG13)(k1) sw $14, GDB_REG32_OFS(GDB_FR_REG14)(k1) sw $15, GDB_REG32_OFS(GDB_FR_REG15)(k1) sw $16, GDB_REG32_OFS(GDB_FR_REG16)(k1) sw $17, GDB_REG32_OFS(GDB_FR_REG17)(k1) sw $18, GDB_REG32_OFS(GDB_FR_REG18)(k1) sw $19, GDB_REG32_OFS(GDB_FR_REG19)(k1) sw $20, GDB_REG32_OFS(GDB_FR_REG20)(k1) sw $21, GDB_REG32_OFS(GDB_FR_REG21)(k1) sw $22, GDB_REG32_OFS(GDB_FR_REG22)(k1) sw $23, GDB_REG32_OFS(GDB_FR_REG23)(k1) sw $24, GDB_REG32_OFS(GDB_FR_REG24)(k1) sw $25, GDB_REG32_OFS(GDB_FR_REG25)(k1) /* Not k0, k1 = $26, $27 */ sw $28, GDB_REG32_OFS(GDB_FR_REG28)(k1) sw $29, GDB_REG32_OFS(GDB_FR_REG29)(k1) sw $30, GDB_REG32_OFS(GDB_FR_REG30)(k1) /* Not ra = $31 (saved separately) */ jr ra nopstore_gpr_64bit : /* Store 64 bit CPU Registers */SET_MIPS3() sd $0, GDB_REG64_OFS(GDB_FR_REG0 )(k1) sd $1, GDB_REG64_OFS(GDB_FR_REG1 )(k1) sd $2, GDB_REG64_OFS(GDB_FR_REG2 )(k1) sd $3, GDB_REG64_OFS(GDB_FR_REG3 )(k1) sd $4, GDB_REG64_OFS(GDB_FR_REG4 )(k1) sd $5, GDB_REG64_OFS(GDB_FR_REG5 )(k1) sd $6, GDB_REG64_OFS(GDB_FR_REG6 )(k1) sd $7, GDB_REG64_OFS(GDB_FR_REG7 )(k1) sd $8, GDB_REG64_OFS(GDB_FR_REG8 )(k1) sd $9, GDB_REG64_OFS(GDB_FR_REG9 )(k1) sd $10, GDB_REG64_OFS(GDB_FR_REG10)(k1) sd $11, GDB_REG64_OFS(GDB_FR_REG11)(k1) sd $12, GDB_REG64_OFS(GDB_FR_REG12)(k1) sd $13, GDB_REG64_OFS(GDB_FR_REG13)(k1) sd $14, GDB_REG64_OFS(GDB_FR_REG14)(k1) sd $15, GDB_REG64_OFS(GDB_FR_REG15)(k1) sd $16, GDB_REG64_OFS(GDB_FR_REG16)(k1) sd $17, GDB_REG64_OFS(GDB_FR_REG17)(k1) sd $18, GDB_REG64_OFS(GDB_FR_REG18)(k1) sd $19, GDB_REG64_OFS(GDB_FR_REG19)(k1) sd $20, GDB_REG64_OFS(GDB_FR_REG20)(k1) sd $21, GDB_REG64_OFS(GDB_FR_REG21)(k1) sd $22, GDB_REG64_OFS(GDB_FR_REG22)(k1) sd $23, GDB_REG64_OFS(GDB_FR_REG23)(k1) sd $24, GDB_REG64_OFS(GDB_FR_REG24)(k1) sd $25, GDB_REG64_OFS(GDB_FR_REG25)(k1) /* Not k0, k1 = $26, $27 */ sd $28, GDB_REG64_OFS(GDB_FR_REG28)(k1) sd $29, GDB_REG64_OFS(GDB_FR_REG29)(k1) sd $30, GDB_REG64_OFS(GDB_FR_REG30)(k1) /* Not ra = $31 (saved separately) */SET_MIPS0() jr ra nopEND(EXCEP_store_gpr_but_k0k1ra)LEAF( sys_store_control_regs ) /* Store the CP0 registers that are available for the CPU in use. * * We skip the following registers, since YAMON doesn't depend * on them and GDB doesn't request them : * * LLAddr * Xcontext * TraceControl * TraceControl2 * UserTraceData * TraceBPC * PerfCnt * ErrCtl * CacheErr * TagLo * DataLo * TagHi * DataHi * DESAVE */ /* Get availability of regs : * * Bit 0 = 32(1) or 64(0) bit CPU * Bit 1 = FPU * Bit 2 = TLB * Bit 3 = Watch registers * Bit 4 = MIPS32/MIPS64 * ----- Following only apply to MIPS32/MIPS64 * Bit 5 = EJTAG (MIPS32/64 only) * Bit 6 = MIPS32/MIPS64 Release 2 * Bit 7 = SRSMap * Bit 8 = PageGrain */ la t0, excep_cp0_regs_mask lw t0, 0(t0) /* 32 bit or 64 bit */ sll t1, t0, 31-0 bgez t1, store_control_64bit nop /* Status register (always available) */ MFC0( t1, C0_Status) sw t1, GDB_REG32_OFS(GDB_FR_STATUS)(a0) sw t1, GDB_REG32_OFS(GDB_FR_CP0_REG12)(a0) /* Check for FPU */ sll t2, t0, 31-1 bgez t2, 1f nop /* Check that FPU is enabled */ sll t1, 31-S_StatusCU1 bgez t1, 1f nop /* Store FPU registers */ cfc1 t1, $0 cfc1 t2, $31 sw t1, GDB_REG32_OFS(GDB_FR_FIR)(a0) sw t2, GDB_REG32_OFS(GDB_FR_FSR)(a0)1: /* Registers available for all MIPS CPUs */ mflo t1 sw t1, GDB_REG32_OFS(GDB_FR_LO)(a0) mfhi t1 sw t1, GDB_REG32_OFS(GDB_FR_HI)(a0) MFC0( t1, C0_BadVAddr ) sw t1, GDB_REG32_OFS(GDB_FR_BADVADDR)(a0) sw t1, GDB_REG32_OFS(GDB_FR_CP0_REG8)(a0) MFC0( t1, C0_Count ) sw t1, GDB_REG32_OFS(GDB_FR_CP0_REG9)(a0) MFC0( t1, C0_Compare ) sw t1, GDB_REG32_OFS(GDB_FR_CP0_REG11)(a0) MFC0( t1, C0_Cause ) sw t1, GDB_REG32_OFS(GDB_FR_CAUSE)(a0) sw t1, GDB_REG32_OFS(GDB_FR_CP0_REG13)(a0) MFC0( t1, C0_EPC ) sw t1, GDB_REG32_OFS(GDB_FR_EPC)(a0) sw t1, GDB_REG32_OFS(GDB_FR_CP0_REG14)(a0) MFC0( t1, C0_PRId ) sw t1, GDB_REG32_OFS(GDB_FR_CP0_PRID)(a0) MFC0( t1, C0_ErrorEPC ) sw t1, GDB_REG32_OFS(GDB_FR_CP0_ERROREPC)(a0) MFC0_SEL_OPCODE( R_t1, R_C0_Config, R_C0_SelConfig ) sw t1, GDB_REG32_OFS(GDB_FR_CP0_CONFIG)(a0) /* Check for TLB */ sll t1, t0, 31-2 bgez t1, 1f nop /* TLB */ MFC0( t1, C0_Index ) sw t1, GDB_REG32_OFS(GDB_FR_CP0_INDEX)(a0) MFC0( t1, C0_Random ) sw t1, GDB_REG32_OFS(GDB_FR_CP0_RANDOM)(a0) MFC0( t1, C0_EntryLo0 ) sw t1, GDB_REG32_OFS(GDB_FR_CP0_ENTRYLO0)(a0)
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?