📄 cache.c
字号:
type |= E_UPGRADE; } else { CACHE_INC( cpuNum, CURRENT_MODE(&EMP[cpuNum]), i_miss, d_miss, state ); } if (VQC_INST(state)) { ASSERT( !(type&E_UPGRADE)); L2_IMISS_EVENT( EmbraCpuCycleCount(cpuNum), cpuNum, vAddr, pAddr, miss_handling_time, type | E_I); } else { VA pc = CLEAR_BD(EMP[cpuNum].PC); L2_DMISS_EVENT( EmbraCpuCycleCount(cpuNum), cpuNum, pc, vAddr, pAddr, miss_handling_time, type | E_D, 0); } /* Charge stall time accounting*/ CACHE_INC_BY(cpuNum, CURRENT_MODE(&EMP[cpuNum]), i_stall_cyc, d_stall_cyc, state, miss_handling_time); } /* Update vQC and cache tags */ Cache_CommitRef( cpuNum, pAddr, vAddr, pcPAddr, state, smht_flags); if( cx_me ) { lastMissState[cpuNum] = state; lastInstrCount[cpuNum] = iCount; EMP[cpuNum].jumpPC = (uint)continue_run_without_chaining; ReenterTC_CX(&EMP[cpuNum]); } lastMissState[cpuNum] = MEM_INVALID;}#endif /* OLD VERSION_EB */static void Cache_CommitRef(int cpuNum, PA pAddr, VA vAddr, PA pcPAddr, EmVQCMemState state, int flags){ uint line_no = SCACHE_INDEXOF( pAddr ); PLN pline = ADDR2SLINE( pAddr );/* int mode = CURRENT_MODE(&EMP[cpuNum]); Dir_Entry new_dir_entry; */ /* not used XXX */ int miss_handling_time = (flags&SMHT_UPGRADE ? UPGRADE_TIME : MEM_CYCLE_TIME ); uint type = E_L2; if( flags & SMHT_UPGRADE ) { CACHE_SINC( cpuNum, CURRENT_MODE(&EMP[cpuNum]), upgrades ); type |= E_UPGRADE; } else { CACHE_INC( cpuNum, CURRENT_MODE(&EMP[cpuNum]), i_miss, d_miss, state ); } if (VQC_SHARED(state)) { type |= E_READ; } else { type |= E_WRITE; } /* Like Mipsy, before I consult the caches, if this sc will fail, then just let it fail & don't let it grab any line or directory entry */ if( (flags & SMHT_SC) && EMP[cpuNum].LLAddr == 0 ) { if (interest(pAddr)) { LogEntry("sc_failed",cpuNum,"pAddr=%08x flags=%x \n",pAddr,flags); } return; } /* make sure that if this is an upgrade, the line is in the cache * If not warning + make it a miss (ed) */ if (flags &SMHT_UPGRADE) { PA line1 = CACHE_PLINE(EMP[cpuNum].cache_tag[line_no]); PA line2 = ADDR2SLINE(pAddr); if (line1!=line2 || !CACHE_VALID( EMP[cpuNum].cache_tag[line_no])) { LogEntry("EMBRA",cpuNum,"WARNING Upgrade not in cache pAddr=0x%08x\n", pAddr,SLINE2ADDR(line1)); flags &= ~SMHT_UPGRADE; type &= ~E_UPGRADE; } } /* If there is a previous entry in this line, which is not this line, kick it out */ if( CACHE_VALID( EMP[cpuNum].cache_tag[line_no] ) ) { /* Kick out previous line */ /* If this is an upgrade this could be redundant work */ if (!(flags & SMHT_UPGRADE)) { uint type = E_L2; PA oldAddr = SLINE2ADDR(CACHE_PLINE(EMP[cpuNum].cache_tag[line_no])); if (CACHE_EXCL( EMP[0].cache_tag[line_no])) { type |= E_WRITEBACK; } else { type |= E_FLUSH_CLEAN; } if (IS_KSEG0(pAddr) || IS_KSEG1(pAddr) || IS_KSEG2(pAddr)) { type |= E_KERN_REPLACED; } else { type |= E_USER_REPLACED; } if (!oldAddr) { LogEntry("Ed's cautious",cpuNum,"Transition on line 0\n"); } L2_LINE_TRANS_EVENT(cpuNum,oldAddr,type, vAddr, 0, IS_KUSEG(EMP[cpuNum].PC)); } if (embra.useVQC){ set_qc_state(cpuNum, PQC_VLINE(EMP[cpuNum].qc_p[CACHE_PLINE(EMP[cpuNum].cache_tag[line_no])]), CACHE_PLINE( EMP[cpuNum].cache_tag[line_no] ), MEM_INVALID ); } else { /* !embra.useVQC */ set_qc_state( cpuNum,0, CACHE_PLINE( EMP[cpuNum].cache_tag[line_no] ), MEM_INVALID ); } /* If we are evicting something (who is not us) with an ll outstanding, smash the LL regsiter. */ if( CACHE_PLINE( EMP[cpuNum].cache_tag[line_no] ) == ADDR2SLINE(EMP[cpuNum].LLAddr) && pline != CACHE_PLINE( EMP[cpuNum].cache_tag[line_no] )) { EMP[cpuNum].LLAddr = 0; } } /* * transitions for the insertion fot he line in the cache */ if (VQC_INST(state)) { ASSERT( !(type&E_UPGRADE)); L2_IMISS_EVENT( EmbraCpuCycleCount(cpuNum), cpuNum, vAddr, pAddr, miss_handling_time, type | E_I); } else { VA pc = CLEAR_BD(EMP[cpuNum].PC); L2_DMISS_EVENT( EmbraCpuCycleCount(cpuNum), cpuNum, pc, vAddr, pAddr, miss_handling_time, type | E_D, 0); } /* Set the qc entry and the cache tags. */ /* If we have an intra-cpu conflict then 1. PC line is present in cache 2. l/s is executed, cache miss and PC line evicted 3. l/s completes even though PC line is not in cache 4. If l/s in delay slot then l/s owns line, else Imiss, and PC owns line However, we don't emit an icache check after every l/s for performance. Therefore, just count the I miss, and assign the qc to the proper party */ /* XXX - This means that UP MUST return the */ /* address so the QC does not rewind (that will infinite loop) */ if( flags & SMHT_DOUBLECOUNT ){ /* Data miss registered earlier, this is the imiss after the current data reference instruction */ CACHE_INC( cpuNum, mode, i_miss, d_miss, MEM_I_SHARED ); /* Register our data access with the directory */ Directory_NoLock_Modify(cpuNum, pAddr,vAddr, state); /* Now do the conflict that will occur after the instruction is executed */ /* Set the data QC to inaccessible */ set_qc_state( cpuNum, ADDR2SLINE(vAddr), pline, MEM_INVALID ); /* Register our instruction access with the directory */ Directory_NoLock_Modify(cpuNum, pcPAddr,CLEAR_BD(EMP[cpuNum].PC), MEM_I_SHARED); /* Set the instr QC to accessible */ set_qc_state( cpuNum, ADDR2SLINE(CLEAR_BD(EMP[cpuNum].PC)), ADDR2SLINE(pcPAddr), MEM_I_SHARED ); EMP[cpuNum].cache_tag[line_no] = CACHE_SET_SHARED( ADDR2SLINE(pcPAddr) ); } else { /* Register our access with the directory */ Directory_NoLock_Modify(cpuNum, pAddr,vAddr, state); /* Set the new line's quick check to the proper, accesible state */ set_qc_state( cpuNum, ADDR2SLINE(vAddr), pline, state ); /* Update Embra tags */ if( VQC_SHARED( state ) ) { EMP[cpuNum].cache_tag[line_no] = CACHE_SET_SHARED( pline ); } else { ASSERT( VQC_EXCL( state ) ); EMP[cpuNum].cache_tag[line_no] = CACHE_SET_EXCL( pline ); } }}/* Called via backdoor to insure that cache state is consistent */void cache_consistency_check( int cpuNum ){ int i, j; /* User, k0, k2 */ int virt_qc_lines[3]; int virt_qc_lines_backmapped[3]; int virt_qc_lines_in_cache[3]; int phys_qc_lines = 0; int phys_qc_lines_in_cache = 0; int num_repeated_plines = 0; int invalid_cache_tags=0; extern void qc_consistency_check(void);#define LINE_TO_INDEX(_line) ( ((_line)<(ADDR2SLINE(K0BASE)))?0:((_line)>=ADDR2SLINE(K2BASE))?2:1) qc_consistency_check(); for( i = 0; i < 3; i++ ) { virt_qc_lines[i] = 0; virt_qc_lines_backmapped[i] = 0; virt_qc_lines_in_cache[i] = 0; } for( i = 0; i < ADDR2SLINE(0xffffffff); i++ ) { if( EMP[cpuNum].qc_v[i] != MEM_INVALID ) { PLN pline; K0A k0a = non_excepting_tv(cpuNum, SLINE2ADDR(i) ); if( !k0a ) { /* A line can be in the cache and not be mapped, but if its */ /* in the virt QC, it has to be both mapped and in the cache */ CPUPut("Line 0x%x not mapped\n", i); continue; } pline = ADDR2SLINE(K0_TO_PHYS_REMAP(k0a, cpuNum)); virt_qc_lines[LINE_TO_INDEX(i)]++; if( PQC_VLINE(EMP[cpuNum].qc_p[pline]) == i ) { virt_qc_lines_backmapped[LINE_TO_INDEX(i)]++; } for( j = 0; j < LINES_PER_CACHE; j++ ) { if ( CACHE_PLINE( EMP[cpuNum].cache_tag[j] ) == pline ) { virt_qc_lines_in_cache[LINE_TO_INDEX(i)]++; break; } } } } CPUPut("VIRT (lines/bkmap/cache) KU/%d/%d/%d K0/%d/%d/%d K2/%d/%d/%d\n", virt_qc_lines[0], virt_qc_lines_backmapped[0], virt_qc_lines_in_cache[0], virt_qc_lines[1], virt_qc_lines_backmapped[1], virt_qc_lines_in_cache[1], virt_qc_lines[2], virt_qc_lines_backmapped[2], virt_qc_lines_in_cache[2] ); for( i = 0; i < ADDR2SLINE(MEM_SIZE(M_FROM_CPU(cpuNum))); i++ ) { if( PQC_VALID(EMP[cpuNum].qc_p[i]) ){ phys_qc_lines++; for( j = 0; j < LINES_PER_CACHE; j++ ) { if ( CACHE_PLINE( EMP[cpuNum].cache_tag[j] ) == i ) { phys_qc_lines_in_cache++; break; } } } } CPUPut("PHYS (lines/cache) %d/%d\n", phys_qc_lines, phys_qc_lines_in_cache); for( i = 0; i < LINES_PER_CACHE; i++ ) { if( CACHE_INVALID( EMP[cpuNum].cache_tag[i] ) ) invalid_cache_tags++; for( j = 0; j < LINES_PER_CACHE; j++ ) { if( CACHE_PLINE( EMP[cpuNum].cache_tag[i] ) == CACHE_PLINE( EMP[cpuNum].cache_tag[j] ) && CACHE_VALID( EMP[cpuNum].cache_tag[i] ) && i != j ) num_repeated_plines++; } } CPUPut("%d repeated lines %d invalid tags, %d lines in cache\n", num_repeated_plines, invalid_cache_tags, LINES_PER_CACHE );}int cache_verify_excl( int cpuNum, PLN pline ){ int j; if( !PQC_VLINE(EMP[cpuNum].qc_p[pline]) ) { return PQC_DIRTY(EMP[cpuNum].qc_p[pline]); } for( j = 0; j < LINES_PER_CACHE; j++ ) { if ( CACHE_PLINE( EMP[cpuNum].cache_tag[j] ) == pline ) { return CACHE_EXCL( EMP[cpuNum].cache_tag[j] ); } } return 0;}int cache_verify_shared( int cpuNum, PLN pline ){ int j; if( !PQC_VLINE(EMP[cpuNum].qc_p[pline]) ) { return PQC_SHARED(EMP[cpuNum].qc_p[pline]); } for( j = 0; j < LINES_PER_CACHE; j++ ) { if ( CACHE_PLINE( EMP[cpuNum].cache_tag[j] ) == pline ) { return CACHE_SHARED( EMP[cpuNum].cache_tag[j] ); } } return 0;}/* XXXX - This is broken **//* This is called on a QC miss. It drives all cache state transitions */void MPCache_Ref( int cpuNum, PA pAddr, VA vAddr, EmVQCMemState state ){ uint line_no = SCACHE_INDEXOF( pAddr ); register unsigned pline; Dir_Entry new_dir_entry; int miss_handling_time; ASSERT( embra.parallel ); pline = ADDR2SLINE(pAddr); /* We lock the directory entry, and then make ALL quick check, and */ /* cache tag modifications, including interventions (already done), */ /* and updates to our own QC */ new_dir_entry = Directory_Lock(cpuNum, pAddr, vAddr, state); /* Set the new line's quick check to the proper, accesible state */ /* Note that this invalidates phys_info[pline].virt_line */ /* Since the new entry could be the same as the old ( like on a TLB */ /* fault) the invalidate happens first */ /* Also note since at this point we don't know if we have a TLB or a */ /* cache miss, we can't yet stall the processor. */ set_qc_state( cpuNum, ADDR2SLINE(vAddr), pline, state ); if( pline == CACHE_PLINE( EMP[cpuNum].cache_tag[line_no] ) && pline ) { /* If permissions match, then this is a real cache hit, and a qc */ /* miss */ /* By above test cache_tag is valid, so only need to check excl */ VASSERT( !VQC_SHARED( state ) || CACHE_VALID( EMP[cpuNum].cache_tag[line_no] ) || !pline , ("State %d, line_no %d\n", state, line_no) ); if( VQC_SHARED( state ) || ( VQC_EXCL( state) && CACHE_EXCL( EMP[cpuNum].cache_tag[line_no] ) ) ) { /* real cache hit, vQC miss */ Directory_Free(cpuNum, pAddr, new_dir_entry); return; } else { /* Upgrade */ CACHE_SINC( cpuNum, CURRENT_MODE(&EMP[cpuNum]), upgrades ); /* Remove xfer time */ miss_handling_time = MEM_CYCLE_TIME - SCACHE_LINE_SIZE/8; goto accountingDone; } } CACHE_INC( cpuNum, CURRENT_MODE(&EMP[cpuNum]), i_miss, d_miss, state ); miss_handling_time = MEM_CYCLE_TIME;accountingDone: EMP[cpuNum].cycleCountdown -= miss_handling_time; CACHE_INC_BY(cpuNum, CURRENT_MODE(&EMP[cpuNum]), i_stall_cyc, d_stall_cyc, state, miss_handling_time ); /* If there is a previous entry in this line, kick it out */ if( CACHE_VALID( EMP[cpuNum].cache_tag[line_no] ) ) { /* Kick out previous line */ /* We know that pline_tag[line_no] != pline */ if (embra.useVQC) set_qc_state( cpuNum, PQC_VLINE( EMP[cpuNum].qc_p[ CACHE_PLINE( EMP[cpuNum].cache_tag[line_no])]), CACHE_PLINE( EMP[cpuNum].cache_tag[line_no] ), MEM_INVALID ); } else { /* !embra.useVQC */ set_qc_state( cpuNum,0, CACHE_PLINE( EMP[cpuNum].cache_tag[line_no] ), MEM_INVALID ); } /* Update Embra tags */ if( VQC_SHARED( state ) ) { EMP[cpuNum].cache_tag[line_no] = CACHE_SET_SHARED( pline ); } else { ASSERT( VQC_EXCL( state ) ); EMP[cpuNum].cache_tag[line_no] = CACHE_SET_EXCL( pline ); } /* Free up the directory entry */ Directory_Free(cpuNum, pAddr, new_dir_entry);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -