📄 r4k_cp0.c
字号:
if (!IS_KERNEL_MODE(curEmp)) { Em_EXCEPTION(cpuNum,EXC_CPU,0); ReenterTC(&EMP[cpuNum]); } if (co_reg == C0_COUNT) { /* There is one thing which could change this */ /* somebody wrote to C0_COUNT */ EMP[cpuNum].CP0[C0_COUNT] += (EmbraCpuCycleCount(cpuNum) - EMP[cpuNum].timerCycleCount)/ COUNTER_FREQUENCY_DIVIDER; EMP[cpuNum].CP0[C0_COUNT] &= 0xffffffff; EMP[cpuNum].timerCycleCount = EmbraCpuCycleCount(cpuNum); } else if (co_reg == C0_RAND) { unsigned numRandEntries = EMP[cpuNum].numTlbEntries - EMP[cpuNum].CP0[C0_TLBWIRED]; EMP[cpuNum].CP0[C0_RAND] = EMP[cpuNum].CP0[C0_TLBWIRED] + ((uint32)EmbraCpuCycleCount(cpuNum)) % numRandEntries; } if (rs(instr) == mfc_op) { EMP[cpuNum].R[gp_reg] = (Reg32_s) EMP[cpuNum].CP0[co_reg]; } else if (rs(instr) == dmfc_op) { EMP[cpuNum].R[gp_reg] = EMP[cpuNum].CP0[co_reg]; } else { ASSERT(0); } return NORMAL_CODE; }static Cp0RegControl cp0RegCtl[NUM_CP0_REGS] = CP0_REG_CONTROL_ARRAY;/***************************************************************** * MoveToC0 ****************************************************************/uintEm_MoveToC0(int cpuNum, Inst instr, VA nextPC ){ unsigned gp_reg = rt(instr); unsigned co_reg = rd(instr); Reg val; int bd = IN_BD( EMP[cpuNum].PC ); int mode = CURRENT_MODE(&EMP[cpuNum]); if (!IS_KERNEL_MODE(curEmp)) { Em_EXCEPTION(cpuNum,EXC_CPU,0); ReenterTC(&EMP[cpuNum]); } /* ASSERT( gp_reg < 32 && co_reg < 32 );*/ if (cp0RegCtl[co_reg].read_only) { return NORMAL_CODE; } if (cp0RegCtl[co_reg].size == -1) { CPUWarning("WriteC0Reg write to invalid reg %d\n",co_reg); } val = EMP[cpuNum].R[gp_reg]; if (rs(instr) == mtc_op) { val = (Reg32_s) val; } else if (rs(instr) == dmtc_op) { /* val = val; */ } else { ASSERT(0); } if ((cp0RegCtl[co_reg].zero_mask & val) != 0) { /* Trying to write must be zero bits, force them to zero. */ val &= ~cp0RegCtl[co_reg].zero_mask; /* Print a warning message for the programmer. Special * case 32bit signed-extention bits that set high zero bits * of 64 bit registers (IRIX 6.2 does this). */ } switch( co_reg ) { case C0_TLBHI: { uint old_asid = GET_ASID(EMP[cpuNum].CP0[C0_TLBHI]); uint new_asid = GET_ASID(val); /* This is an MMU context switch */ qc_mmu_switch( cpuNum, old_asid, new_asid, 0 ); EMP[cpuNum].CP0[C0_TLBHI] = val & TLBHI_FILLMASK; } break; case C0_SR: { int modeChange = 0; /* Check for interrupts both before and after status register */ /* changes this lets us catch the intr whether we are raising or */ /* lowering intrs */ /* This should never succeed. Either we delivered the interupt via a callback or it was masked */ if( Update_And_Check_Interrupts( cpuNum,0 ) ) { /* Take interrupt before mtc0 */#ifdef DEBUG_EMBRA_INTR char* typeOfInt = ""; int hwIntrBits =(EMP[cpuNum].CP0[C0_CAUSE] & CAUSE_IPMASK)>>CAUSE_IPSHIFT; /* XXX - Hand Copied from kern/ml/SIMMP.c */ if( hwIntrBits & 0x10 ) { typeOfInt = "CLOCK"; } if( hwIntrBits & 0x04 ) { typeOfInt = "DISK or ETHER"; } if( hwIntrBits & 0x20 ) { typeOfInt = "IPI"; } CPUError("EMBRA mtc0 problem %lld %d EXC %d HWBits 0x%x %-5s PC 0x%x\n", EmbraCpuCycleCount(cpuNum), cpuNum, EXC_INT>>2, hwIntrBits, typeOfInt, EMP[cpuNum].CP0[C0_EPC]);#endif ReenterTC(&EMP[cpuNum]); /* NOT REACHED */ } if (embraprintsr) { if ((((EMP[cpuNum].CP0[co_reg] & 0xff00) >> 8) != ((EMP[cpuNum].R[gp_reg] & 0xff00) >> 8)) || ((EMP[cpuNum].CP0[co_reg] & 0x1) != (EMP[cpuNum].R[gp_reg] & 0x1))) { LogEntry("IMASK mtSR", cpuNum, "\tIM %x IEC %x -> NIM %x NIEC %x SR %x PC %x RA %x\n", (EMP[cpuNum].CP0[co_reg] & 0xff00) >> 8, (EMP[cpuNum].CP0[co_reg] & 0x1), (EMP[cpuNum].R[gp_reg] & 0xff00) >> 8, (EMP[cpuNum].R[gp_reg] & 0x1), EMP[cpuNum].R[gp_reg], EMP[cpuNum].PC, EMP[cpuNum].R[31]); } } EMP[cpuNum].CP0[co_reg] = val; UpdateCPUMode(&EMP[cpuNum]); /* When the status register changes see if there are any pending */ /* interrupts at this (possibly new) level */ if( Update_And_Check_Interrupts( cpuNum, (bd?nextPC:EMP[cpuNum].PC+INST_SIZE))){ /* * We took interrupt after mtc0. Skip it when we return. */ ReenterTC(&EMP[cpuNum]); /* NOT REACHED */ } } break; case C0_COUNT: EMP[cpuNum].CP0[co_reg] = val; EMP[cpuNum].CP0[co_reg] &= 0xffffffff; EMP[cpuNum].timerCycleCount = EmbraCpuCycleCount(cpuNum); if (EventCallbackActive(&(timerCallbackHdr[cpuNum]))) { EventCallbackRemove(&(timerCallbackHdr[cpuNum])); } EmbraSetTimerCallback(cpuNum); break; case C0_COMPARE: EMP[cpuNum].CP0[co_reg] = val; EMP[cpuNum].CP0[co_reg] &= 0xffffffff; EMP[cpuNum].CP0[C0_CAUSE] &= ~CAUSE_IP8; if (EventCallbackActive(&(timerCallbackHdr[cpuNum]))) { EventCallbackRemove(&(timerCallbackHdr[cpuNum])); } EmbraSetTimerCallback(cpuNum); break; case C0_CAUSE: /* Only CAUSE_SW1 and CAUSE_SW2 are writable bits */ EMP[cpuNum].CP0[C0_CAUSE] &= ~(CAUSE_SW2|CAUSE_SW1); EMP[cpuNum].CP0[C0_CAUSE] |= (val & (CAUSE_SW2|CAUSE_SW1)); /* When the status register changes see if there are any pending */ /* interrupts at this (possibly new) level */ if( Update_And_Check_Interrupts( cpuNum, (bd?nextPC:EMP[cpuNum].PC+INST_SIZE))){ /* * We took interrupt after mtc0. Skip it when we return. */ ReenterTC(&EMP[cpuNum]); /* NOT REACHED */ } break; default: EMP[cpuNum].CP0[co_reg] = val; } return NORMAL_CODE; }/**************************************************************** * EmbraTimerCallback * * This is the routine called back on the event that * the C0_COMPARE == C0_COUNT, it is set when the * COMPARE register is written. Raise IP(7) in the * C0_CAUSE register to signal a timer interrupt is * pending. Note: IP(7) is cleared when when the * C0_COMPARE register is written. use DEV_IEC_MAGICERR * as it corresponds to hw interrupt bit #5 which raises * ip7 in the cause register. ****************************************************************/static void EmbraTimerCallback(int cpuNum, EventCallbackHdr *ECBhdr, void *empty) { EMP[cpuNum].CP0[C0_CAUSE] |= CAUSE_IP8; if (Update_And_Check_Interrupts(cpuNum,0)){ }}/***************************************************************** * EmbraSetTimerCallback * * Each processor has its own r4kTimerInfo and r4k_timerHdr * associated with it. When the Compare register is written to * we check current cycle count, and, projecting into the future the * diff btwn the new Compare reg value and the current cycle count, a * callback is set to fire in that calculate amt of time. It is tricky * only when they decide to write to the count reg, which is permited on * system initialization or to synchronize processors. Since we do not * update the Count register continually, our cycle count and the Count * register value could then be out-of-synch. The r4kTimerInfo variable * is used to track this discrepency. ****************************************************************/static void EmbraSetTimerCallback(int cpuNum){ SimTime timeInFuture; EMP[cpuNum].CP0[C0_COUNT] += (EmbraCpuCycleCount(cpuNum) - EMP[cpuNum].timerCycleCount)/ COUNTER_FREQUENCY_DIVIDER; EMP[cpuNum].CP0[C0_COUNT] &= 0xffffffff; EMP[cpuNum].timerCycleCount = EmbraCpuCycleCount(cpuNum); if (EMP[cpuNum].CP0[C0_COMPARE] >= EMP[cpuNum].CP0[C0_COUNT]) { timeInFuture = (EMP[cpuNum].CP0[C0_COMPARE] - EMP[cpuNum].CP0[C0_COUNT]); timeInFuture *= COUNTER_FREQUENCY_DIVIDER; } else { timeInFuture = 0x100000000LL - (EMP[cpuNum].CP0[C0_COUNT] - EMP[cpuNum].CP0[C0_COMPARE]); timeInFuture *= COUNTER_FREQUENCY_DIVIDER; } EventDoCallback(cpuNum, EmbraTimerCallback, &(timerCallbackHdr[cpuNum]), NULL, timeInFuture);}/* The kernel sets the cop1 usable flag to 0 when it first starts a *//* new context */ /* to limit the saving of FPU registers to those procs who use them. The *//* first time a process uses a FPU instruction it needs to trap into the *//* kernel for it to set this bit. *//* Called from callout in callout.s */void Em_RaiseC1Unusable( int cpuNum ) { ASSERT (curEmp->myNum == cpuNum); /* Insure that we got here properly */ ASSERT( !(EMP[cpuNum].CP0[C0_SR] & SR_CU1 ) ); /* I think its lousy to set the cause register before calling */ /* EXCEPTION, but it does simplify parameter passing */ Em_EXCEPTION(cpuNum, EXC_CPU, 1); ReenterTC(&EMP[cpuNum]); /* NOT REACHED */}/***************************************************************** * CacheOP ****************************************************************/uintEm_CacheOP(int cpuNum, Inst instr){ int mode = CURRENT_MODE(&EMP[cpuNum]); if (!IS_KERNEL_MODE(curEmp)) { static int haveWarned = FALSE; if (!haveWarned) { CPUWarning("Embra: CPU %d hit cache op @ 0x%llx not in kernel mode\n", cpuNum, (Reg64) curEmp->PC); haveWarned = TRUE; } Em_EXCEPTION(cpuNum,EXC_CPU,0); ReenterTC(&EMP[cpuNum]); } /* NOP for now */ return 0;}/***************************************************************** * UpdateCPUMode * * This function is shared between mipsy and embra * Update with care. Do not remove the embra specific * stuff. *****************************************************************/static void UpdateCPUMode(CPUState *P){ StatusReg statusReg; statusReg.ts_data = P->CP0[C0_SR]; P->cpuMode = KERNEL_MODE; if (!statusReg.s32.ts_erl && !statusReg.s32.ts_exl) { ASSERT(statusReg.s32.ts_ksu != 3); if (statusReg.s32.ts_ksu == 2) { P->cpuMode = USER_MODE; } else if (statusReg.s32.ts_ksu == 1) { P->cpuMode = SUPERVISOR_MODE; } }#if defined(SIM_MIPS32) if ((statusReg.s32.ts_ux) || (statusReg.s32.ts_sx) || (statusReg.s32.ts_kx)) { static int warned_64 = 0; if (!warned_64) { CPUWarning("Embra: 64-bit mode not implemented, ignoring SR write\n"); warned_64 = 1; } statusReg.s32.ts_ux = 0; statusReg.s32.ts_sx = 0; statusReg.s32.ts_kx = 0; P->CP0[C0_SR] = statusReg.ts_data; }#endif /* Don't allow reverse endian either. */ if (statusReg.s32.ts_re) { static int warned_re = 0; if (!warned_re) { CPUWarning("Embra: reverse-endian mode not implemented, ignore SR write\n"); warned_re = 1; } statusReg.s32.ts_re = 0; } if (DEBUG_INTR()) { LogEntry("CPUMode" , P->myNum, " going to %d PC=0x%llx SR=%x erl=%d exl=%d ksu=%d\n", P->cpuMode, (uint64) P->PC, P->CP0[C0_SR], statusReg.s32.ts_erl,statusReg.s32.ts_exl, statusReg.s32.ts_ksu); } P->notFRbit = (statusReg.s32.ts_fr == 0); switch (P->cpuMode) { case KERNEL_MODE: P->mmu = P->kernelMMU; P->is32bitMode = (statusReg.s32.ts_kx == 0); break; case SUPERVISOR_MODE: P->mmu = P->userMMU; P->is32bitMode = (statusReg.s32.ts_sx == 0);#if defined(SIM_MIPS32) /* this assert is incorrect for 64bit mode */ ASSERT(IS_SUPERV_SEG(P->PC));#endif break; case USER_MODE: P->mmu = P->userMMU; P->is32bitMode = (statusReg.s32.ts_ux == 0); ASSERT( IS_KUSEG(P->PC)); break; default: ASSERT(0); } }
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -