📄 frv.c
字号:
return GET_H_GR(gr) & 0xffff;}voidfrvbf_h_gr_lo_set_handler (SIM_CPU *current_cpu, UINT gr, UHI newval){ USI value = (GET_H_GR (gr) & 0xffff0000) | (newval & 0xffff); SET_H_GR (gr, value);}/* Cover fns to access the tbr bits. */USIspr_tbr_get_handler (SIM_CPU *current_cpu){ int tbr = ((GET_H_TBR_TBA () & 0xfffff) << 12) | ((GET_H_TBR_TT () & 0xff) << 4); return tbr;}voidspr_tbr_set_handler (SIM_CPU *current_cpu, USI newval){ int tbr = newval; SET_H_TBR_TBA ((tbr >> 12) & 0xfffff) ; SET_H_TBR_TT ((tbr >> 4) & 0xff) ;}/* Cover fns to access the bpsr bits. */USIspr_bpsr_get_handler (SIM_CPU *current_cpu){ int bpsr = ((GET_H_BPSR_BS () & 0x1) << 12) | ((GET_H_BPSR_BET () & 0x1) ); return bpsr;}voidspr_bpsr_set_handler (SIM_CPU *current_cpu, USI newval){ int bpsr = newval; SET_H_BPSR_BS ((bpsr >> 12) & 1); SET_H_BPSR_BET ((bpsr ) & 1);}/* Cover fns to access the psr bits. */USIspr_psr_get_handler (SIM_CPU *current_cpu){ int psr = ((GET_H_PSR_IMPLE () & 0xf) << 28) | ((GET_H_PSR_VER () & 0xf) << 24) | ((GET_H_PSR_ICE () & 0x1) << 16) | ((GET_H_PSR_NEM () & 0x1) << 14) | ((GET_H_PSR_CM () & 0x1) << 13) | ((GET_H_PSR_BE () & 0x1) << 12) | ((GET_H_PSR_ESR () & 0x1) << 11) | ((GET_H_PSR_EF () & 0x1) << 8) | ((GET_H_PSR_EM () & 0x1) << 7) | ((GET_H_PSR_PIL () & 0xf) << 3) | ((GET_H_PSR_S () & 0x1) << 2) | ((GET_H_PSR_PS () & 0x1) << 1) | ((GET_H_PSR_ET () & 0x1) ); return psr;}voidspr_psr_set_handler (SIM_CPU *current_cpu, USI newval){ /* The handler for PSR.S references the value of PSR.ESR, so set PSR.S first. */ SET_H_PSR_S ((newval >> 2) & 1); SET_H_PSR_IMPLE ((newval >> 28) & 0xf); SET_H_PSR_VER ((newval >> 24) & 0xf); SET_H_PSR_ICE ((newval >> 16) & 1); SET_H_PSR_NEM ((newval >> 14) & 1); SET_H_PSR_CM ((newval >> 13) & 1); SET_H_PSR_BE ((newval >> 12) & 1); SET_H_PSR_ESR ((newval >> 11) & 1); SET_H_PSR_EF ((newval >> 8) & 1); SET_H_PSR_EM ((newval >> 7) & 1); SET_H_PSR_PIL ((newval >> 3) & 0xf); SET_H_PSR_PS ((newval >> 1) & 1); SET_H_PSR_ET ((newval ) & 1);}voidfrvbf_h_psr_s_set_handler (SIM_CPU *current_cpu, BI newval){ /* If switching from user to supervisor mode, or vice-versa, then switch the supervisor/user context. */ int psr_s = GET_H_PSR_S (); if (psr_s != (newval & 1)) { frvbf_switch_supervisor_user_context (current_cpu); CPU (h_psr_s) = newval & 1; }}/* Cover fns to access the ccr bits. */USIspr_ccr_get_handler (SIM_CPU *current_cpu){ int ccr = ((GET_H_ICCR (H_ICCR_ICC3) & 0xf) << 28) | ((GET_H_ICCR (H_ICCR_ICC2) & 0xf) << 24) | ((GET_H_ICCR (H_ICCR_ICC1) & 0xf) << 20) | ((GET_H_ICCR (H_ICCR_ICC0) & 0xf) << 16) | ((GET_H_FCCR (H_FCCR_FCC3) & 0xf) << 12) | ((GET_H_FCCR (H_FCCR_FCC2) & 0xf) << 8) | ((GET_H_FCCR (H_FCCR_FCC1) & 0xf) << 4) | ((GET_H_FCCR (H_FCCR_FCC0) & 0xf) ); return ccr;}voidspr_ccr_set_handler (SIM_CPU *current_cpu, USI newval){ int ccr = newval; SET_H_ICCR (H_ICCR_ICC3, (newval >> 28) & 0xf); SET_H_ICCR (H_ICCR_ICC2, (newval >> 24) & 0xf); SET_H_ICCR (H_ICCR_ICC1, (newval >> 20) & 0xf); SET_H_ICCR (H_ICCR_ICC0, (newval >> 16) & 0xf); SET_H_FCCR (H_FCCR_FCC3, (newval >> 12) & 0xf); SET_H_FCCR (H_FCCR_FCC2, (newval >> 8) & 0xf); SET_H_FCCR (H_FCCR_FCC1, (newval >> 4) & 0xf); SET_H_FCCR (H_FCCR_FCC0, (newval ) & 0xf);}QIfrvbf_set_icc_for_shift_right ( SIM_CPU *current_cpu, SI value, SI shift, QI icc){ /* Set the C flag of the given icc to the logical OR of the bits shifted out. */ int mask = (1 << shift) - 1; if ((value & mask) != 0) return icc | 0x1; return icc & 0xe;}QIfrvbf_set_icc_for_shift_left ( SIM_CPU *current_cpu, SI value, SI shift, QI icc){ /* Set the V flag of the given icc to the logical OR of the bits shifted out. */ int mask = ((1 << shift) - 1) << (32 - shift); if ((value & mask) != 0) return icc | 0x2; return icc & 0xd;}/* Cover fns to access the cccr bits. */USIspr_cccr_get_handler (SIM_CPU *current_cpu){ int cccr = ((GET_H_CCCR (H_CCCR_CC7) & 0x3) << 14) | ((GET_H_CCCR (H_CCCR_CC6) & 0x3) << 12) | ((GET_H_CCCR (H_CCCR_CC5) & 0x3) << 10) | ((GET_H_CCCR (H_CCCR_CC4) & 0x3) << 8) | ((GET_H_CCCR (H_CCCR_CC3) & 0x3) << 6) | ((GET_H_CCCR (H_CCCR_CC2) & 0x3) << 4) | ((GET_H_CCCR (H_CCCR_CC1) & 0x3) << 2) | ((GET_H_CCCR (H_CCCR_CC0) & 0x3) ); return cccr;}voidspr_cccr_set_handler (SIM_CPU *current_cpu, USI newval){ int cccr = newval; SET_H_CCCR (H_CCCR_CC7, (newval >> 14) & 0x3); SET_H_CCCR (H_CCCR_CC6, (newval >> 12) & 0x3); SET_H_CCCR (H_CCCR_CC5, (newval >> 10) & 0x3); SET_H_CCCR (H_CCCR_CC4, (newval >> 8) & 0x3); SET_H_CCCR (H_CCCR_CC3, (newval >> 6) & 0x3); SET_H_CCCR (H_CCCR_CC2, (newval >> 4) & 0x3); SET_H_CCCR (H_CCCR_CC1, (newval >> 2) & 0x3); SET_H_CCCR (H_CCCR_CC0, (newval ) & 0x3);}/* Cover fns to access the sr bits. */USIspr_sr_get_handler (SIM_CPU *current_cpu, UINT spr){ /* If PSR.ESR is not set, then SR0-3 map onto SGR4-7 which will be GR4-7, otherwise the correct mapping of USG4-7 or SGR4-7 will be in SR0-3. */ int psr_esr = GET_H_PSR_ESR (); if (! psr_esr) return GET_H_GR (4 + (spr - H_SPR_SR0)); return CPU (h_spr[spr]);}voidspr_sr_set_handler (SIM_CPU *current_cpu, UINT spr, USI newval){ /* If PSR.ESR is not set, then SR0-3 map onto SGR4-7 which will be GR4-7, otherwise the correct mapping of USG4-7 or SGR4-7 will be in SR0-3. */ int psr_esr = GET_H_PSR_ESR (); if (! psr_esr) SET_H_GR (4 + (spr - H_SPR_SR0), newval); else CPU (h_spr[spr]) = newval;}/* Switch SR0-SR4 with GR4-GR7 if PSR.ESR is set. */voidfrvbf_switch_supervisor_user_context (SIM_CPU *current_cpu){ if (GET_H_PSR_ESR ()) { /* We need to be in supervisor mode to swap the registers. Access the PSR.S directly in order to avoid recursive context switches. */ int i; int save_psr_s = CPU (h_psr_s); CPU (h_psr_s) = 1; for (i = 0; i < 4; ++i) { int gr = i + 4; int spr = i + H_SPR_SR0; SI tmp = GET_H_SPR (spr); SET_H_SPR (spr, GET_H_GR (gr)); SET_H_GR (gr, tmp); } CPU (h_psr_s) = save_psr_s; }}/* Handle load/store of quad registers. */voidfrvbf_load_quad_GR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI targ_ix){ int i; SI value[4]; /* Check memory alignment */ address = check_memory_alignment (current_cpu, address, 0xf); /* If we need to count cycles, then the cache operation will be initiated from the model profiling functions. See frvbf_model_.... */ if (model_insn) { CPU_LOAD_ADDRESS (current_cpu) = address; CPU_LOAD_LENGTH (current_cpu) = 16; } else { for (i = 0; i < 4; ++i) { value[i] = frvbf_read_mem_SI (current_cpu, pc, address); address += 4; } sim_queue_fn_xi_write (current_cpu, frvbf_h_gr_quad_set_handler, targ_ix, value); }}voidfrvbf_store_quad_GR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI src_ix){ int i; SI value[4]; USI hsr0; /* Check register and memory alignment. */ src_ix = check_register_alignment (current_cpu, src_ix, 3); address = check_memory_alignment (current_cpu, address, 0xf); for (i = 0; i < 4; ++i) { /* GR0 is always 0. */ if (src_ix == 0) value[i] = 0; else value[i] = GET_H_GR (src_ix + i); } hsr0 = GET_HSR0 (); if (GET_HSR0_DCE (hsr0)) sim_queue_fn_mem_xi_write (current_cpu, frvbf_mem_set_XI, address, value); else sim_queue_mem_xi_write (current_cpu, address, value);}voidfrvbf_load_quad_FRint (SIM_CPU *current_cpu, PCADDR pc, SI address, SI targ_ix){ int i; SI value[4]; /* Check memory alignment */ address = check_memory_alignment (current_cpu, address, 0xf); /* If we need to count cycles, then the cache operation will be initiated from the model profiling functions. See frvbf_model_.... */ if (model_insn) { CPU_LOAD_ADDRESS (current_cpu) = address; CPU_LOAD_LENGTH (current_cpu) = 16; } else { for (i = 0; i < 4; ++i) { value[i] = frvbf_read_mem_SI (current_cpu, pc, address); address += 4; } sim_queue_fn_xi_write (current_cpu, frvbf_h_fr_quad_set_handler, targ_ix, value); }}voidfrvbf_store_quad_FRint (SIM_CPU *current_cpu, PCADDR pc, SI address, SI src_ix){ int i; SI value[4]; USI hsr0; /* Check register and memory alignment. */ src_ix = check_fr_register_alignment (current_cpu, src_ix, 3); address = check_memory_alignment (current_cpu, address, 0xf); for (i = 0; i < 4; ++i) value[i] = GET_H_FR (src_ix + i); hsr0 = GET_HSR0 (); if (GET_HSR0_DCE (hsr0)) sim_queue_fn_mem_xi_write (current_cpu, frvbf_mem_set_XI, address, value); else sim_queue_mem_xi_write (current_cpu, address, value);}voidfrvbf_load_quad_CPR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI targ_ix){ int i; SI value[4]; /* Check memory alignment */ address = check_memory_alignment (current_cpu, address, 0xf); /* If we need to count cycles, then the cache operation will be initiated from the model profiling functions. See frvbf_model_.... */ if (model_insn) { CPU_LOAD_ADDRESS (current_cpu) = address; CPU_LOAD_LENGTH (current_cpu) = 16; } else { for (i = 0; i < 4; ++i) { value[i] = frvbf_read_mem_SI (current_cpu, pc, address); address += 4; } sim_queue_fn_xi_write (current_cpu, frvbf_h_cpr_quad_set_handler, targ_ix, value); }}voidfrvbf_store_quad_CPR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI src_ix){ int i; SI value[4]; USI hsr0; /* Check register and memory alignment. */ src_ix = check_register_alignment (current_cpu, src_ix, 3); address = check_memory_alignment (current_cpu, address, 0xf); for (i = 0; i < 4; ++i) value[i] = GET_H_CPR (src_ix + i); hsr0 = GET_HSR0 (); if (GET_HSR0_DCE (hsr0)) sim_queue_fn_mem_xi_write (current_cpu, frvbf_mem_set_XI, address, value); else sim_queue_mem_xi_write (current_cpu, address, value);}voidfrvbf_signed_integer_divide ( SIM_CPU *current_cpu, SI arg1, SI arg2, int target_index, int non_excepting){ enum frv_dtt dtt = FRV_DTT_NO_EXCEPTION; if (arg1 == 0x80000000 && arg2 == -1) { /* 0x80000000/(-1) must result in 0x7fffffff when ISR.EDE is set otherwise it may result in 0x7fffffff (sparc compatibility) or 0x80000000 (C language compatibility). */ USI isr; dtt = FRV_DTT_OVERFLOW; isr = GET_ISR (); if (GET_ISR_EDE (isr)) sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index, 0x7fffffff); else sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index, 0x80000000); frvbf_force_update (current_cpu); /* Force update of target register. */ } else if (arg2 == 0) dtt = FRV_DTT_DIVISION_BY_ZERO; else sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index, arg1 / arg2); /* Check for exceptions. */ if (dtt != FRV_DTT_NO_EXCEPTION) dtt = frvbf_division_exception (current_cpu, dtt, target_index, non_excepting); if (non_excepting && dtt == FRV_DTT_NO_EXCEPTION) { /* Non excepting instruction. Clear the NE flag for the target register. */ SI NE_flags[2]; GET_NE_FLAGS (NE_flags, H_SPR_GNER0); CLEAR_NE_FLAG (NE_flags, target_index); SET_NE_FLAGS (H_SPR_GNER0, NE_flags); }}voidfrvbf_unsigned_integer_divide ( SIM_CPU *current_cpu, USI arg1, USI arg2, int target_index, int non_excepting){ if (arg2 == 0) frvbf_division_exception (current_cpu, FRV_DTT_DIVISION_BY_ZERO, target_index, non_excepting); else { sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index, arg1 / arg2); if (non_excepting) { /* Non excepting instruction. Clear the NE flag for the target register. */ SI NE_flags[2]; GET_NE_FLAGS (NE_flags, H_SPR_GNER0); CLEAR_NE_FLAG (NE_flags, target_index); SET_NE_FLAGS (H_SPR_GNER0, NE_flags); } }}/* Clear accumulators. */voidfrvbf_clear_accumulators (SIM_CPU *current_cpu, SI acc_ix, int A){ SIM_DESC sd = CPU_STATE (current_cpu); int acc_mask = (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr500) ? 7 : (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550) ? 7 : (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr450) ? 11 : (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr400) ? 3 : 63; FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu); ps->mclracc_acc = acc_ix; ps->mclracc_A = A; if (A == 0 || acc_ix != 0) /* Clear 1 accumuator? */ { /* This instruction is a nop if the referenced accumulator is not implemented. */ if ((acc_ix & acc_mask) == acc_ix) sim_queue_fn_di_write (current_cpu, frvbf_h_acc40S_set, acc_ix, 0); } else { /* Clear all implemented accumulators. */ int i; for (i = 0; i <= acc_mask; ++i) if ((i & acc_mask) == i) sim_queue_fn_di_write (current_cpu, frvbf_h_acc40S_set, i, 0); }}/* Functions to aid insn semantics. *//* Compute the result of the SCAN and SCANI insns after the shift and xor. */SIfrvbf_scan_result (SIM_CPU *current_cpu, SI value){ SI i; SI mask; if (value == 0) return 63; /* Find the position of the first non-zero bit. The loop will terminate since there is guaranteed to be at least one non-zero bit. */ mask = 1 << (sizeof (mask) * 8 - 1); for (i = 0; (value & mask) == 0; ++i) value <<= 1; return i;}/* Compute the result of the cut insns. */SIfrvbf_cut (SIM_CPU *current_cpu, SI reg1, SI reg2, SI cut_point){ SI result; if (cut_point < 32) { result = reg1 << cut_point; result |= (reg2 >> (32 - cut_point)) & ((1 << cut_point) - 1); } else result = reg2 << (cut_point - 32); return result;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -