📄 qc.c
字号:
}void qc_tlb_replace_page( int cpuNum, int idx){ qc_tlb_inval_page( cpuNum, idx);}void qc_cache_inval_page( int cpuNum, int idx)/* invalidates vQC entry and destroys backmap from pQC *//* it does not change cache information in pQC */{ PLN pline; EntryHi hi = EMP[cpuNum].tlbEntry[idx].Hi & (~TLBHI_G); EntryLo lo0 = EMP[cpuNum].tlbEntry[idx].Lo0; EntryLo lo1 = EMP[cpuNum].tlbEntry[idx].Lo1; if (!embra.useVQC) return; /* physarray does not contain a backmap */#ifdef DISABLE_QC return; /* EXP */#endif ASSERT( embra.emode == EMBRA_CACHE ); if( ! IS_KSEG0( hi ) ) { bzero( &EMP[cpuNum].qc_v[TLBENT2SLINE(hi)], LINES_PER_PAGE ); /* If we want, we can destroy the backmap from physical to */ /* virtual address in phys_info. This should aid performance, */ /* because now some needless virtual QC clobberings won't */ /* happen, but it should not impact correctness */ if( IS_VALID(lo0) ) { for( pline = TLBENT2SLINE(lo0); pline < (TLBENT2SLINE(lo0) + LINES_PER_PAGE); pline++) { /* Note that some of the backpoints from this physical page */ /* may still be valid because they may point to K0 addresses */ /* There is no need to zero these*/ /* They can also point to other mapped pages (this kind of */ /* aliasing is especially common with K2) */ /* Therefore only destroy the backpointers to the virtual */ /* page which was just unmapped. Leave the cache info */ if( TLBHI2ADDR(PQC_VLINEADDR(EMP[cpuNum].qc_p[pline])) == TLBHI2ADDR(hi) ) { ASSERT(EMP[cpuNum].qc_v[PQC_VLINE(EMP[cpuNum].qc_p[pline])] == MEM_INVALID); /* This zeroes the vline, but leaves the cache information */ EMP[cpuNum].qc_p[pline] = PQC_STATUS(EMP[cpuNum].qc_p[pline]); } } } bzero( &EMP[cpuNum].qc_v[TLBENT2SLINE(hi)+LINES_PER_PAGE], LINES_PER_PAGE ); /* If we want, we can destroy the backmap from physical to */ /* virtual address in phys_info. This should aid performance, */ /* because now some needless virtual QC clobberings won't */ /* happen, but it should not impact correctness */ if( IS_VALID(lo1) ) { for( pline = TLBENT2SLINE(lo1); pline < (TLBENT2SLINE(lo1) + LINES_PER_PAGE); pline++) { /* Note that some of the backpoints from this physical page */ /* may still be valid because they may point to K0 addresses */ /* There is no need to zero these*/ /* They can also point to other mapped pages (this kind of */ /* aliasing is especially common with K2) */ /* Therefore only destroy the backpointers to the virtual */ /* page which was just unmapped. Leave the cache info */ if( TLBHI2ADDR(PQC_VLINEADDR(EMP[cpuNum].qc_p[pline])) == TLBHI2ADDR(hi) ) { ASSERT(EMP[cpuNum].qc_v[PQC_VLINE(EMP[cpuNum].qc_p[pline])] == MEM_INVALID); /* This zeroes the vline, but leaves the cache information */ EMP[cpuNum].qc_p[pline] = PQC_STATUS(EMP[cpuNum].qc_p[pline]); } } } }}/* Always invalidate the previous virtual line. That way, physical *//* entries are aliased only once */void set_qc_state( int cpuNum, VLN vline, PLN pline, int new_state ){ PA pAddr = SLINE2ADDR(pline);#ifdef LOG_QC_UPDATE CPUPrint("LOG: %lld qc_p/pa update pAddr = %x newstate %x\n",EMP[cpuNum].cycleCount,pline,new_state);#endif ASSERT(!CPUVec.CheckFirewall); /* this function doesn't support the firewall yet*/ /* WATCHPOINTS */ if (annWatchpoints == TRUE) { if (AnnFMRangeCheck(vline << log2SCACHE_LINE_SIZE, ANNFM_LD_TYPE | ANNFM_ST_TYPE)) { new_state = MEM_INVALID;#if 0 LogEntry("DEBUG", cpuNum, "Leaving cache line 0x%x empty\n",vline);#endif } } if (new_state==MEM_D_EXCLUSIVE && TCcoherence_is_code(PHYS_TO_MEMADDR(M_FROM_CPU(cpuNum), pAddr))) { new_state = MEM_D_SHARED; } if (embra.useVQC) { VLN old_vline = PQC_VLINE(EMP[cpuNum].qc_p[pline]); if( old_vline ) { EMP[cpuNum].qc_v[old_vline] = MEM_INVALID; } EMP[cpuNum].qc_v[vline] = new_state; switch( new_state ) { case MEM_INVALID: EMP[cpuNum].qc_p[pline] = 0; break; case MEM_D_EXCLUSIVE: EMP[cpuNum].qc_p[pline] = PQC_SET_DIRTY( vline ); break; case MEM_I_EXCLUSIVE: ASSERT (0); break; case MEM_I_SHARED: case MEM_D_SHARED: EMP[cpuNum].qc_p[pline] = PQC_SET_SHARED( vline ); break; } if (new_state != MEM_INVALID) { MA mmuEntry = EMP[cpuNum].mmu[PAGE_NUMBER(SLINE2ADDR(vline))]; ASSERT (mmuEntry); if (ADDR2SLINE(MEMADDR_TO_PHYS(M_FROM_CPU(cpuNum), MMU_PROT_READ(mmuEntry)))!= (pline&~(LINES_PER_PAGE-1))) { CPUError("EMBRA: set_qc_state error vLine=%x pLine=%x mmuEntry=%x \n", vline,pline,mmuEntry); } } } else { /*!embra.useVQC */ switch( new_state ) { case MEM_INVALID: EMP[cpuNum].pa_p[pline] = PA_SET_INV; break; case MEM_D_EXCLUSIVE: case MEM_I_EXCLUSIVE: EMP[cpuNum].pa_p[pline] = PA_SET_DIRTY; break; case MEM_I_SHARED: case MEM_D_SHARED: EMP[cpuNum].pa_p[pline] = PA_SET_SHARED; break; } }}/* XXX - Caller check ASID and/or global bit */ void qc_cache_reload_page( int cpuNum, VA vLine, EntryLo lo0, EntryLo lo1){ PA pLine; if( IS_VALID(lo0) ) { for( pLine = ADDR2SLINE(TLBLO2ADDR(lo0)); pLine < ADDR2SLINE(TLBLO2ADDR(lo0)) + LINES_PER_PAGE; pLine++, vLine++ ) { uint tag = EMP[cpuNum].cache_tag[SCACHE_INDEXOF(SLINE2ADDR(pLine))]; if( CACHE_VALID( tag ) && CACHE_PLINE( tag ) == pLine ) { if( CACHE_EXCL( tag ) && IS_DIRTY(lo0) ) { set_qc_state(cpuNum, vLine, pLine, MEM_D_EXCLUSIVE); } else { /* Don't know I/D */ set_qc_state(cpuNum, vLine, pLine, MEM_D_SHARED); } } } } else { vLine += LINES_PER_PAGE; } if( IS_VALID(lo1) ) { for( pLine = ADDR2SLINE(TLBLO2ADDR(lo1)); pLine < ADDR2SLINE(TLBLO2ADDR(lo1)) + LINES_PER_PAGE; pLine++, vLine++ ) { uint tag = EMP[cpuNum].cache_tag[SCACHE_INDEXOF(SLINE2ADDR(pLine))]; if( CACHE_VALID( tag ) && CACHE_PLINE( tag ) == pLine ) { if( CACHE_EXCL( tag ) && IS_DIRTY(lo1) ) { set_qc_state(cpuNum, vLine, pLine, MEM_D_EXCLUSIVE); } else { /* Don't know I/D */ set_qc_state(cpuNum, vLine, pLine, MEM_D_SHARED); } } } }}static void qcMapOnePage(int cpuNum,VA vAddr, EntryLo lo){ if (!EMBRA_IS_PADDR(M_FROM_CPU(cpuNum), TLBLO2ADDR(lo))) { /* BAD mapping -- setting mmu entry to 0 so mem_ref will be called and it will bus error */ SetMMUEntry(&EMP[cpuNum],vAddr,0); CPUWarning("bad physical address (0x%x) being mapped into tlb\n", TLBLO2ADDR(lo)); } else if (IS_VALID(lo)) { PA pAddr = REMAP_PHYS(TLBLO2ADDR(lo),cpuNum); MA mAddr = PHYS_TO_MEMADDR(M_FROM_CPU(cpuNum), pAddr); ASSERT ( EMBRA_IS_MEMADDR(M_FROM_CPU(cpuNum), mAddr)); if ( embra.emode == EMBRA_PAGE && CPUVec.CheckFirewall && SimMagic_IsIncoherent(pAddr)) { SetMMUEntry(&EMP[cpuNum],vAddr,0); } else if (embra.emode==EMBRA_PAGE && annWatchpoints && AnnFMRangeCheck(vAddr,ANNFM_LD_TYPE|ANNFM_ST_TYPE)) { SetMMUEntry(&EMP[cpuNum],vAddr,0); } else if ( embra.emode == EMBRA_PAGE && IS_DIRTY(lo) && !TCcoherence_is_code(PHYS_TO_MEMADDR(M_FROM_CPU(cpuNum), pAddr)) && !(CPUVec.CheckFirewall && !CPUVec.CheckFirewall(cpuNum, pAddr)) ) { /* OK to write this page directly in TC without calling out * to mem_ref */ SetMMUEntry(&EMP[cpuNum],vAddr, MMU_PROT_WRITE(mAddr)); } else { SetMMUEntry(&EMP[cpuNum],vAddr, MMU_PROT_READ(mAddr)); } }} void qc_map_page( int cpuNum, int idx){ EntryHi hi = EMP[cpuNum].tlbEntry[idx].Hi; EntryLo lo0 = EMP[cpuNum].tlbEntry[idx].Lo0; EntryLo lo1 = EMP[cpuNum].tlbEntry[idx].Lo1; VA vAddr0 = TLB_ENT2VPN(hi)*DEFAULT_PAGESZ; VA vAddr1 = (TLB_ENT2VPN(hi)|1)*DEFAULT_PAGESZ; ASSERT( vAddr0 != vAddr1); if( !IS_KSEG0( hi ) && (IS_VALID(lo0) || IS_VALID(lo1))) { if( EMP[cpuNum].kernelMMU[TLB_ENT2VPN(hi)] != 0 ) { qc_erase_etlb( cpuNum, hi ); } } qcMapOnePage(cpuNum,vAddr0,lo0); qcMapOnePage(cpuNum,vAddr1,lo1); if (embra.emode == EMBRA_CACHE) { qc_cache_reload_page( cpuNum, ADDR2SLINE(TLBHI2ADDR(hi)), lo0 ,lo1); }}void qc_flush_etlb(int cpuNum){}void qc_erase_etlb(int cpuNum, EntryHi hi){}K0A non_excepting_tv( int cpuNum, VA vAddr){ MA mAddr; if (IS_BACKDOOR(vAddr)) { return vAddr; } ASSERT(cpuNum >= 0 && cpuNum < TOTAL_CPUS); if (!EMP[cpuNum].mmu) { CPUWarning("\n\nEMBRA: non_excepting_tc called before init\n\n"); return 0; } mAddr = EMP[cpuNum].kernelMMU[PAGE_NUMBER(vAddr)]; if( mAddr ) { mAddr += PAGE_OFFSET( vAddr ); /* Only needed for page mode, but instead of checking its */ /* cheaper to just do it */ return MEMADDR_TO_K0(M_FROM_CPU(cpuNum), MMU2ADDR(mAddr)); } return 0;}/* This function acesses true TLB state */void qc_mmu_switch( int cpuNum, unsigned old_asid, unsigned new_asid, uint forceSelfSwitch){ static int saved_asid[SIM_MAXCPUS]; int numTlbEntries; int i; /* Don't cxt switch to 0. We leave the old asid in the */ /* quick_ASID variable, fooling the rest of the code into */ /* thinking that the previous ASID is running. This is not */ /* hardware consistent, but it should be sematically correct */ if (forceSelfSwitch) { /* normally switching to self is a nop. However we may need * to force it to bring the qc into consistency with a state * change in the outside world (firewall, incoherent line, * change in the amount of physical memory, etc). */ ASSERT(old_asid == new_asid); ASSERT(old_asid == CURRENT_ASID(cpuNum)); goto force_self; } /* Optimizations - Dont cxt switch to self (that is solid) */ if( old_asid == new_asid ) return;#ifndef TORNADO /* * Unlike IRIX, Tornado uses ASID 0 for non-global entries */ /* Switches to 0 are nops. We leave the old asid in the quick_ASID */ /* variable, fooling the rest of the code into thinking that the */ /* previous ASID is running. This is not hardware consistent, but */ /* it should be semantically correct */ /* Ex. 6,0,6 stays at 6 */ /* Ex. 6,0,6,0,5 looks like 6,5 */ if( new_asid == 0 ) { /* Switch to 0 is nop */ ASSERT( CURRENT_ASID(cpuNum) != 0 ); saved_asid[cpuNum] = CURRENT_ASID(cpuNum);/*old_asid;*/ return; } else { /* Switch out of 0 */ if( old_asid == 0 ) /* Switched back to ourselves, do nothing */ if( new_asid == saved_asid[cpuNum] ) return; else /* We've gone (for example) 5,0,8 */ old_asid = saved_asid[cpuNum]; /* and fallthrough */ } ASSERT( new_asid != old_asid ); ASSERT( new_asid != 0 );#endif /* TORNADO */ quick_ASID[cpuNum] = new_asid;force_self: qc_flush_etlb(cpuNum); numTlbEntries = EMP[cpuNum].numTlbEntries; for(i = 0; i < numTlbEntries; i++ ) { /* If the entry is (1)valid, (2)has the previous asid, and (3)does */ /* not have the global bit set, then invalidate that page in the */ /* quick check array */ int global = IS_GLOBAL_HI( EMP[cpuNum].tlbEntry[i].Hi); if( GET_ASID( EMP[cpuNum].tlbEntry[i].Hi ) == old_asid && !global) { int hi = EMP[cpuNum].tlbEntry[i].Hi; ASSERT (TLBENT2SLINE(hi) == TLBENT2SLINE(hi&~TLBHI_G)); if( embra.emode == EMBRA_CACHE ) { qc_cache_inval_page( cpuNum,i); } qc_tlb_inval_page( cpuNum, i); } } for(i = 0; i < numTlbEntries; i++ ) { /* Now restore old mappings. Global mappings are never erased */ if( GET_ASID( EMP[cpuNum].tlbEntry[i].Hi ) == CURRENT_ASID(cpuNum) ) { qc_map_page( cpuNum,i); } }}void qc_CheckForDuplicate(CPUState *P, int index){ int j; Reg hi = P->tlbEntry[index].Hi & ~TLBHI_G;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -