📄 r4k_cp0.c
字号:
vAddr, BDOOR_LOAD_WORD, &buffer); returnFlag = rval ? BUSERROR : SUCCESS; break; case ld_op: /* XXX backward compatibility XXX */ case ldc2_op: rval = ((MagicFunction)dat)(cpuNum, vAddr, BDOOR_LOAD_DOUBLE, &buffer); returnFlag = rval ? BUSERROR : SUCCESS; break; case sb_op: ((unsigned char *)&buffer)[0] = 0xff & EMP[cpuNum].R[rt(instr)]; rval = ((MagicFunction)dat)(cpuNum, vAddr, BDOOR_STORE_BYTE, &buffer); returnFlag = rval ? BUSERROR : SUCCESS; break; case sh_op: ((unsigned short *)&buffer)[0] = 0xffff & EMP[cpuNum].R[rt(instr)]; rval = ((MagicFunction)dat)(cpuNum, vAddr, BDOOR_STORE_HALF, &buffer); returnFlag = rval ? BUSERROR : SUCCESS; break; case sw_op: ((uint *)&buffer)[0] = EMP[cpuNum].R[rt(instr)]; rval = ((MagicFunction)dat)(cpuNum, vAddr, BDOOR_STORE_WORD, &buffer); returnFlag = rval ? BUSERROR : SUCCESS; break;#if defined(SIM_MIPS64) case sd_op: ((Reg64 *)&buffer)[0] = EMP[cpuNum].R[rt(instr)]; rval = ((MagicFunction)dat)(cpuNum, vAddr, BDOOR_STORE_DOUBLE, &buffer); returnFlag = rval ? BUSERROR : SUCCESS; break;#else case sd_op: /* XXX backward compatibility XXX */#endif case sdc2_op: ((uint *)&buffer)[0] = EMP[cpuNum].R[rt(instr)]; ((uint *)&buffer)[1] = EMP[cpuNum].R[rt(instr)+1]; rval = ((MagicFunction)dat)(cpuNum, vAddr, BDOOR_STORE_DOUBLE, &buffer); returnFlag = rval ? BUSERROR : SUCCESS; break; default: CPUError("Data addressed functions are only triggered from [ls][bhwd]\n"); } if (returnFlag == SUCCESS) { *pAddr = (PA) &buffer; } else if (returnFlag == BUSERROR) { *pAddr = NULL; if (LATCH_BADVADDR(sr_reg)) EMP[cpuNum].CP0[C0_BADVADDR] = vAddr; Em_EXCEPTION(cpuNum, EXC_DBE, 0); return EXCEPTION_CODE; } else { CPUError("Unknown result type: 0x%x\n", returnFlag); } } }#ifdef NOTDEF /* Log all backdoor writes since you're totally paranoid now */ LogEntry("BACKDOOR", cpuNum, "0x%x 0x%x\n", vAddr, *pAddr);#endif return BACKDOOR_CODE;firewall: { /* first check if line became incoherent during low-level recovery */ if (SimMagic_IsIncoherent(*pAddr)) { if (LATCH_BADVADDR(sr_reg)) EMP[cpuNum].CP0[C0_BADVADDR] = vAddr; Em_EXCEPTION(cpuNum, (act==ACT_IREAD) ? EXC_IBE : EXC_DBE, 0); return EXCEPTION_CODE; } /* come here if access is fine but firewall needs to be checked */ if (writing && !CPUVec.CheckFirewall(cpuNum, *pAddr)) { if (LATCH_BADVADDR(sr_reg)) EMP[cpuNum].CP0[C0_BADVADDR] = vAddr; Em_EXCEPTION(cpuNum, EXC_DBE, 0); CPUWarning("Fwall violation: " "cpu %d vaddr 0x%x paddr 0x%x\n", cpuNum, vAddr, *pAddr); return EXCEPTION_CODE; }/*if qc changes because of R4000 this might need changes!!*/ /* if address is k0seg, then add phys->mem mapping to mmu so we don't have to translate again. */ /* DISABLED: we now preemptively put all the k0seg translations * in in qc_renew, then zero out the ones with annotations * in EmbraInstallMemAnnotation. So we don't need to worry * about putting more here (and could disable annotations if * we accidentally did so). */#ifdef notdef if ((vAddr > K0BASE && vAddr < K0BASE + K0SIZE) && (EMP[cpuNum].mmu[PAGE_NUMBER(vAddr)] == 0)) { PA pa = (PA)PHYS_TO_MEMADDR(M_FROM_CPU(cpuNum), FORM_ADDR(PAGE_NUMBER(*pAddr), 0)); EMP[cpuNum].mmu[PAGE_NUMBER(vAddr)] = (embra.emode == EMBRA_PAGE && CheckFirewall(cpuNum, *pAddr)) ? MMU_PROT_WRITE(pa) : MMU_PROT_READ(pa); }#endif return NORMAL_CODE; }}/***************************************************************** * Is_Tlb_Writable - * See if the given VPN, asid pair holds a writable (dirty bit is set) * mapping in the TLB. *****************************************************************/int Em_Is_Tlb_Writable( int cpuNum, VA vAddr, ASID asid ){ int tlb_index; if( IS_KSEG0(vAddr) ) /* KSEG1 also unmapped */ return 1; /* Check the TLB Hash Table */ tlb_index = Tlb_Lookup( cpuNum, GET_REGION(vAddr), CONVERT_TO_VPN2(PAGE_NUMBER(vAddr)), asid ); if( tlb_index ) { tlb_index--; if (IS_LO_0(PAGE_NUMBER(vAddr))) return( IS_DIRTY(EMP[cpuNum].tlbEntry[tlb_index].Lo0)); else return( IS_DIRTY(EMP[cpuNum].tlbEntry[tlb_index].Lo1)); } return 0;}/***************************************************************** * Is_Tlb_Readable - * See if the given VPN, asid pair holds a valid * mapping in the TLB. *****************************************************************/int Em_Is_Tlb_Readable( int cpuNum, VA vAddr, ASID asid ){ int tlb_index; if( IS_KSEG0(vAddr) ) return 1; /* Check the TLB Hash Table */ tlb_index = Tlb_Lookup( cpuNum, GET_REGION(vAddr), CONVERT_TO_VPN2(PAGE_NUMBER(vAddr)), asid ); if( tlb_index ) { tlb_index--; if (IS_LO_0(PAGE_NUMBER(vAddr))) return( IS_VALID(EMP[cpuNum].tlbEntry[tlb_index].Lo0)); else return( IS_VALID(EMP[cpuNum].tlbEntry[tlb_index].Lo1)); } return 0;} /***************************************************************** * ProbeTLB - * Probe TLB for a matching entry. The Index register is loaded * with the address of the TLB entry whose contents match the * contents of the EntryHi register. If no TLB entry matches, the * high order bit of the Index register is set. * * Multiple matches are not possible (they would have caused a TLB * shutdown error *****************************************************************/uint Em_ProbeTLB(int cpuNum){ int tlb_idx; ASSERT (curEmp->myNum == cpuNum); if (!IS_KERNEL_MODE(curEmp)) { EMP[cpuNum].CP0[C0_CAUSE] = CAUSE_SET_CE( EMP[cpuNum].CP0[C0_CAUSE], 0 ); Em_EXCEPTION(cpuNum,EXC_CPU,0); /* Coprocessor Unusable */ ReenterTC(curEmp); } tlb_idx = Tlb_Lookup( cpuNum, GET_REGION( EMP[cpuNum].CP0[C0_TLBHI]), GET_VPN2( EMP[cpuNum].CP0[C0_TLBHI] ), GET_ASID( EMP[cpuNum].CP0[C0_TLBHI] ) ); if( tlb_idx ) { /* We matched */ EMP[cpuNum].CP0[C0_INX] = (tlb_idx - 1)<<TLBINX_INXSHIFT; return NORMAL_CODE; } /* maintain inclusion property of mmu */ /* Since there was no entry with that vpn2 and asid */ /* we need to take out both pages if present in the mmu */ /* even page */ qc_erase_etlb(cpuNum, (EMP[cpuNum].tlbEntry[tlb_idx-1].Hi)); /* Probe Miss */ EMP[cpuNum].CP0[C0_INX] = (Reg32_s)0x80000000; return NORMAL_CODE;}/***************************************************************** * ReadTLBEntry - * The EntryHi and EntryLo registers are loaded with the contents * of the TLB entry pointed at by the contents of the TLB Index * register. The results are unspecified if the contents or Index * are greater than the number of TLB entries. *****************************************************************/uint Em_ReadTLBEntry(int cpuNum){ IndexReg index; ASSERT (curEmp->myNum == cpuNum); if (!IS_KERNEL_MODE(&EMP[cpuNum])) { EMP[cpuNum].CP0[C0_CAUSE] = CAUSE_SET_CE( EMP[cpuNum].CP0[C0_CAUSE], 0 ); Em_EXCEPTION(cpuNum,EXC_CPU,0); /* Coprocessor Unusable */ ReenterTC(curEmp); return EXCEPTION_CODE; } /* I'm going to assume that the index register is in the correct range. */ index = EMP[cpuNum].CP0[C0_INX]; /* Even though its not the intent of the instruction, it */ /* resets the MMU context */ qc_mmu_switch( cpuNum, CURRENT_ASID(cpuNum), GET_ASID(EMP[cpuNum].tlbEntry[GET_IDX(index)].Hi), 0 ); EMP[cpuNum].CP0[C0_PGMASK] = EMP[cpuNum].tlbEntry[GET_IDX(index)].PgMsk;#if defined(SIM_MIPS32) ASSERT(EMP[cpuNum].tlbEntry[GET_IDX(index)].PgMsk == 0);#endif EMP[cpuNum].CP0[C0_TLBHI] = EMP[cpuNum].tlbEntry[GET_IDX(index)].Hi & ~(Reg)EMP[cpuNum].tlbEntry[GET_IDX(index)].PgMsk & ~TLBHI_G; EMP[cpuNum].CP0[C0_TLBLO_0] = EMP[cpuNum].tlbEntry[GET_IDX(index)].Lo0; EMP[cpuNum].CP0[C0_TLBLO_1] = EMP[cpuNum].tlbEntry[GET_IDX(index)].Lo1; if (IS_GLOBAL_HI(EMP[cpuNum].tlbEntry[GET_IDX(index)].Hi)){ EMP[cpuNum].CP0[C0_TLBLO_0] |= TLBLO_G; EMP[cpuNum].CP0[C0_TLBLO_1] |= TLBLO_G; } else { EMP[cpuNum].CP0[C0_TLBLO_0] &= ~TLBLO_G; EMP[cpuNum].CP0[C0_TLBLO_1] &= ~TLBLO_G; } return NORMAL_CODE;}/***************************************************************** * Insert_TLB_HT * Insert a TLB entry into the TLB hash table. This is supposed to be * called after the entry has been inserted to the TLB_Hi & Lo structures *****************************************************************/static void Insert_TLB_HT( int cpuNum, int idx ){ int hashNum; /* The hardware matching is independent of the valid bit. The valid */ /* bit's sole purpose is to indicate which exception vector to take */ /* Thus we decide whether to add an entry based on its */ /* virtual address*/ if (IS_UNMAPPED_TLBHI(EMP[cpuNum].tlbEntry[idx].Hi)) return; /* Insert all global entries under ASID 0 */ if( IS_GLOBAL_HI( EMP[cpuNum].tlbEntry[idx].Hi ) ) { hashNum = TLBHash( GET_VPN2( EMP[cpuNum].tlbEntry[idx].Hi ), GET_REGION(EMP[cpuNum].tlbEntry[idx].Hi),0 ); } else { hashNum = TLBHash( GET_VPN2( EMP[cpuNum].tlbEntry[idx].Hi ), GET_REGION( EMP[cpuNum].tlbEntry[idx].Hi), GET_ASID( EMP[cpuNum].tlbEntry[idx].Hi ) );#if defined(SIM_MIPS32) if( GET_ASID( EMP[cpuNum].tlbEntry[idx].Hi ) == 0 ) CPUWarning("Non-global ASID 0 entry written to TLB. %d 0x%x\n", EmbraCpuCycleCount(cpuNum), EMP[cpuNum].PC);#endif } List_Insert( &EMP[cpuNum].indexList[idx].links, LIST_ATFRONT(&EMP[cpuNum].tlbIndexHeaders[hashNum]) ); EMP[cpuNum].indexList[idx].onList = 1;}/***************************************************************** * Do_TLB_Write * Write the indexed TLB entry. The TLB entry pointed at by the * contents of the TLB index register is loaded with the contents * of the EntryHi and EntryLo registers. * Update QC array *****************************************************************/static uint Do_TLB_Write(int cpuNum, int idx ){ Reg frameMask; if (!IS_KERNEL_MODE(&EMP[cpuNum])) { EMP[cpuNum].CP0[C0_CAUSE] = CAUSE_SET_CE( EMP[cpuNum].CP0[C0_CAUSE], 0 ); Em_EXCEPTION(cpuNum,EXC_CPU,0); /* Coprocessor Unusable */ ReenterTC(curEmp); }#ifdef DEBUG_CP0 CPUPrint("%d TLB Idx:%d Hi:0x%08x Lo0:0x%08x Lo1:0x%08x\n", cpuNum, idx, EMP[cpuNum].CP0[C0_TLBHI], EMP[cpuNum].CP0[C0_TLBLO_0], EMP[cpuNum].CP0[C0_TLBLO_1] );#endif ASSERT( CURRENT_ASID(cpuNum) == GET_ASID(EMP[cpuNum].CP0[C0_TLBHI]) || GET_ASID(EMP[cpuNum].CP0[C0_TLBHI]) == 0 ); /* Remove entry from quick check */ /* It is possible that e.g. (VPN, ASID) (0x1000, 4) is being used, */ /* and we get a request to remove (0x1000, 2). Since that enty is */ /* alreay out of our qc, we don't want to modify it */ if( GET_ASID( EMP[cpuNum].tlbEntry[idx].Hi ) == CURRENT_ASID(cpuNum) || IS_GLOBAL_HI( EMP[cpuNum].tlbEntry[idx].Hi ) ) { /* even page */ /* Only remove QC state if we are removing an entry that is our */ /* ASID or is global. This could simply remove the cache info */ Em_Tlb_Remove(cpuNum,idx); /* * being anal sometimes pays off. */#ifndef EMBRA_USE_QC64 if (embra.emode == EMBRA_CACHE) { uint base = TLBHI2ADDR(EMP[cpuNum].tlbEntry[idx].Hi); if (!IS_KSEG0(base)) { int i; ASSERT (!(PAGE_NUMBER(base)&1)); ASSERT (!EMP[cpuNum].mmu[PAGE_NUMBER(base)]); ASSERT (!EMP[cpuNum].mmu[PAGE_NUMBER(base)+1]); for (i=0; i<2*LINES_PER_PAGE;i++) { ASSERT (!EMP[cpuNum].qc_v[ADDR2SLINE(base)+i]); } } }#endif } /* Remove old entry from the hash list */ if( EMP[cpuNum].indexList[idx].onList ) { List_Remove( &EMP[cpuNum].indexList[idx].links ); EMP[cpuNum].indexList[idx].onList = 0; } if (IS_R10000(&EMP[cpuNum])) { frameMask = (EMP[cpuNum].CP0[C0_FRAMEMASK] << TLBFRAMESHIFT); } else { frameMask = 0; } /* Actually write the TLB data structure */ EMP[cpuNum].tlbEntry[idx].PgMsk = EMP[cpuNum].CP0[C0_PGMASK]; EMP[cpuNum].tlbEntry[idx].Hi = EMP[cpuNum].CP0[C0_TLBHI] & ~(Reg)EMP[cpuNum].CP0[C0_PGMASK]; EMP[cpuNum].tlbEntry[idx].Lo0 = (EMP[cpuNum].CP0[C0_TLBLO_0] & ~TLBLO_G) & ~frameMask; EMP[cpuNum].tlbEntry[idx].Lo1 = (EMP[cpuNum].CP0[C0_TLBLO_1] & ~TLBLO_G) & ~frameMask; EMP[cpuNum].tlbEntrySize[idx] = ComputeTlbEntrySize( EMP[cpuNum].tlbEntry[idx].PgMsk); if (IS_GLOBAL_LO(EMP[cpuNum].CP0[C0_TLBLO_0]) && IS_GLOBAL_LO(EMP[cpuNum].CP0[C0_TLBLO_1])) EMP[cpuNum].tlbEntry[idx].Hi |= TLBHI_G; /* Add the new entry, if necessary */ Insert_TLB_HT( cpuNum,idx ); /* Update the QC only if entry is valid, and either our asid or global*/
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -