📄 scache.c
字号:
stall = CPUVec.CycleCount(cpuNum) - STATS_VALUE(cpuNum, stallStart); } if (type & E_UPGRADE) { PREFETCH_NOUPGRADE_EVENT(cpuNum,CURRENT_PC(cpuNum),smht->pfvAddr, smht->pAddr,smht->scacheLRU,stall); } else { PREFETCH_NOMISS_EVENT(cpuNum,CURRENT_PC(cpuNum),smht->pfvAddr, smht->pAddr,smht->scacheLRU,stall); } reported = 1; } else { for (i = 0; i < smht->numMHTwait; i++) { int mhtind = smht->mhtInd[i]; MHT *mht = &(CACHE[cacheNum].MHT[mhtind]); ASSERT (mhtind < MHT_SIZE); if (mht->cmd & SC_IGET) { /* Sending to Icache */ if (!reported) { uint type = E_L2 | E_I | E_READ; if (result & MEMSYS_RESULT_CACHE) { type |= E_FOUND_IN_CACHE; } if (result & MEMSYS_RESULT_REMOTE_HOME) { type |= E_REMOTE; } L2_IMISS_EVENT(CPUVec.CycleCount(cpuNum), cpuNum, mht->vAddr, smht->pAddr, (CPUVec.CycleCount(cpuNum) - mht->startTime), type); reported = 1; } else { /* Multiple mht entries for L2 miss, only reported first to memstat */ } mht->status = status; /* WARNING:--- This does not work properly with SYNCs --- */ MHTReqDone(cpuNum, mht, mode, status); } else { /* Sending to Dcache */ if (!reported) { uint type = E_L2 | E_D; if (mode == MEMSYS_EXCLUSIVE) { if (result & MEMSYS_RESULT_INVALIDATE) { type |= E_WRITE | E_CAUSED_INVAL; } else { type |= E_WRITE; } } else { type |= E_READ; } if ((smht->memsyscmd & MEMSYS_CMDMASK) == MEMSYS_UPGRADE) { type |= E_UPGRADE; } if (mht->cmd == SC_DSCUGETX) { type |= E_SC_UPGRADE; } ASSERT((result & MEMSYS_RESULT_CACHE) || (result & MEMSYS_RESULT_MEMORY)); ASSERT(!((result & MEMSYS_RESULT_CACHE) && (result &MEMSYS_RESULT_MEMORY))); if (result & MEMSYS_RESULT_CACHE) { type |= E_FOUND_IN_CACHE; } if (result & MEMSYS_RESULT_REMOTE_HOME) { type |= E_REMOTE; } L2_DMISS_EVENT(CPUVec.CycleCount(cpuNum), cpuNum, smht->PC, mht->vAddr, smht->pAddr, CPUVec.CycleCount(cpuNum) - mht->startTime, type, smht->scacheLRU); reported = 1; } else { /* Multiple mht entries for L2 miss, only reported first to memstat */ } if ((smht->memsyscmd & MEMSYS_CMDMASK) == MEMSYS_UPGRADE) { MHTReqDone(cpuNum, mht, mode, status); } else { mht->status = status; MHTReqDone(cpuNum, mht, mode, status); } } } } if (!reported) { CPUWarning("No mht entries for 2cache miss, not reported to memstat\n"); } FreeSMHT(cpuNum, smht - SCACHE[scacheNum].SMHT); /* --- WARNING: This was put in because SYNCs mean that a request --- * --- arrival does not necessarily wake up the processor --- */ CPUVec.Unstall(cpuNum);} /***************************************************************** * AllocSMHT * * Allocate an entry in the miss handling table for the * specified second level miss. This routine is respondsible for * checking for conflict and merge miss as well. *****************************************************************/static SMHTStatusAllocSMHT(int cpuNum, MCMD cmd, PA pAddr, int lru, int *mhtind){ int scacheNum = GET_SCACHE_NUM(cpuNum); int entry; uint ind = SCACHE_INDEXOF(pAddr); /* /SCACHE_LINE_SIZE) % SCACHE_INDEX; */ if (SCACHE[scacheNum].SMHTnumInuse == 0) { entry = 0; /* Special case for speed: allocate when SMHT is empty */ } else { int i; entry = -1; for (i = 0; i < SMHT_SIZE; i++) { if (!SCACHE[scacheNum].SMHT[i].inuse) { if (entry == -1) entry = i; } else { if (SCACHE[scacheNum].SMHT[i].ind == ind) { *mhtind = i; if (!SameCacheLine(SCACHE[scacheNum].SMHT[i].pAddr, pAddr, SCACHE_LINE_SIZE)) { /* Currently we only support one miss per cache index */ return SMHTCONFLICT; } /* Except we call them merges if two misses happen to the same * line */ if (((SCACHE[scacheNum].SMHT[i].memsyscmd & MEMSYS_CMDMASK) == MEMSYS_GET) && ((cmd & MEMSYS_CMDMASK) == MEMSYS_GETX)) { /* Call it a conflict if we need a GETX and only a * GET is outstanding. */ /* --- THIS IS NOT RIGHT (MAH), if its the same cache --- * --- line it should MERGE and issue as an UPGRADE when --- * --- the SHARED line comes back --- * Currently, we only buffer writes when the request outstanding * can satisfy the write when it returns. */ return SMHTCONFLICT; } return SMHTMERGE; } } } if (entry == -1) { if (i == SMHT_SIZE) { /* SMHT is full */ ASSERT(SCACHE[scacheNum].SMHTnumInuse == SMHT_SIZE); return SMHTFULL; } else { entry = i; } } } SCACHE[scacheNum].SMHTnumInuse++; SCACHE[scacheNum].SMHT[entry].inuse = 1; SCACHE[scacheNum].SMHT[entry].memsyscmd = cmd; SCACHE[scacheNum].SMHT[entry].startTime = CPUVec.CycleCount(cpuNum); SCACHE[scacheNum].SMHT[entry].pAddr = pAddr; /* RPB -- technically, this is not quite right. Consider the case where the write buffer is active and a request is NAKed */ SCACHE[scacheNum].SMHT[entry].PC = CURRENT_PC(cpuNum); SCACHE[scacheNum].SMHT[entry].ind = ind; SCACHE[scacheNum].SMHT[entry].scacheLRU = lru; SCACHE[scacheNum].SMHT[entry].numMHTwait = 0; SCACHE[scacheNum].SMHT[entry].pfvAddr = 0; { PA tag = SCACHE[scacheNum].set[ind].tags[lru]; int formerState; if (tag & INVALID_TAG) { formerState = SCACHE_INVALID; } else if (tag & EXCLUSIVE_TAG) { formerState = SCACHE_DIRTY_EXCLUSIVE; } else { formerState = SCACHE_SHARED; } SCACHE[scacheNum].SMHT[entry].formerState = formerState; } *mhtind = entry; return SMHTSUCCESS;}/***************************************************************** * FreeSMHT - Delete a MHT entry * *****************************************************************/static voidFreeSMHT(int cpuNum, int entryNum){ int scacheNum = GET_SCACHE_NUM(cpuNum); ASSERT(SCACHE[scacheNum].SMHT[entryNum].inuse); SCACHE[scacheNum].SMHT[entryNum].inuse = 0; SCACHE[scacheNum].SMHTnumInuse--; ASSERT(SCACHE[scacheNum].SMHTnumInuse >= 0);}/***************************************************************** * EmptySMHT - Check whether SMHT is empty. * *****************************************************************/intEmptySMHT(int cpuNum){ int scacheNum = GET_SCACHE_NUM(cpuNum); int entryNum; for (entryNum = 0; entryNum < SMHT_SIZE; entryNum++) { if (SCACHE[scacheNum].SMHT[entryNum].inuse) return FALSE; } return TRUE;}#ifdef CC_CHECKER/* extern void MustBeInDirectory(int cpu, uint); */static intCacheChecker(PA paddr, int inCpu, int isExclusive){ int cpu; int ind; bool foundit=0; if (paddr != (PA) -1) { PA tag = isExclusive ? SCACHE_TAG(paddr):SCACHE_TAG_EX(paddr); ind = SCACHE_INDEXOF(paddr) /* /SCACHE_LINE_SIZE) % SCACHE_INDEX; */#ifdef DIR_CHECKER MustBeInDirectory(inCpu, paddr);#endif for (cpu = 0; cpu < TOTAL_CPUS; cpu++) { if (cpu == inCpu) continue; foundit=0; if (isExclusive) { for(way=0;way<SCACHE_ASSOC;++way) { foundit |= ((SCACHE[cpu].set[ind].tags[way] & ~EXCLUSIVE_TAG) == tag); } if (!foundit) CPUError("Cache check failed at paddr 0x%x\n", paddr); } else { for(way=0;way<SCACHE_ASSOC;++way ) { foundit |= ((SCACHE[cpu].set[ind].tags[way]) == tag); } if (!foundit) CPUError("Cache check failed at paddr 0x%x\n", paddr); } } } return 0;}#endif static void SCacheLRUTouch(uint *lruword, int set){ switch( SCACHE_ASSOC ) { case 1: /* Direct-mapped */ break; case 2: /* 2-way */ *lruword = set; break; case 4: { /* 4-way */ unsigned char *lru; lru = (unsigned char *)lruword; if (lru[0] == set) { /* Hit on most recently used - do nothing */ } else if (lru[1] == set) { lru[1] = lru[0]; lru[0] = set; } else if (lru[2] == set) { lru[2] = lru[1]; lru[1] = lru[0]; lru[0] = set; } else if (lru[3] == set) { lru[3] = lru[2]; lru[2] = lru[1]; lru[1] = lru[0]; lru[0] = set; } else { CPUWarning("LRU screw-up in SCacheLRUTouch\n"); ASSERT(0); } break; } default: ASSERT(0); }}static int SCacheLRU(uint lruword){ switch( SCACHE_ASSOC ) { case 1: return 0; case 2: return (!lruword); case 4: return ((char *)&lruword)[3]; default: ASSERT(0); } return( 0 ); /*compiler happy */}static void SCacheLRUInit(uint *lruword){ if (SCACHE_ASSOC == 4) { *lruword = 0x00010203; /* Need to have all sets here or LRU algorithm crashes */ } else { *lruword = 0; }}static void SCacheLRUMake(uint *lruword, int set){ unsigned char* lru; unsigned char t1,t2; switch( SCACHE_ASSOC ) { case 1: break; case 2: *lruword = !set; break; case 4: lru = (unsigned char*) &lruword; if (lru[3] != set) { t1 = lru[2]; lru[2] = lru[3]; lru[3] = set; if (t1 != set) { t2 = lru[1]; lru[1] = t1; if (t2 != set) lru[0] = t2; } } break; default: ASSERT( 0 ); }}/* ====================================================================== Utility routines - These routines need to be here, even though they may be specific to one memory model or the other ======================================================================*/intSameCacheLine(PA addr1, PA addr2, unsigned cacheLineSize){ /* Now, match only if SAME cache line */ return((addr1 & ~(PA)(cacheLineSize-1)) == (addr2 & ~(PA)(cacheLineSize-1)));}PAQuadWordAlign(PA offset){ if (sizeof(PA) == 8) { return (offset & ~(0xFLL)); } else { return (offset & ~(0xF)); }}#ifdef DATA_HANDLING/* write all dirty cache lines back to memory */void FlushEntireCache(int cpuNum, int leaveShared){#ifndef SOLO int scacheNum = GET_SCACHE_NUM(cpuNum); int numSets = SCACHE_SIZE/SCACHE_LINE_SIZE/SCACHE_ASSOC; int setNum, w; struct SCacheSet* set; int nDirty = 0; for (setNum = 0; setNum < numSets; setNum++) { set = &SCACHE[scacheNum].set[setNum]; for(w=0;w<SCACHE_ASSOC; ++w) { if (set->tags[w] & EXCLUSIVE_TAG) { PA pAddr = STAG_TO_PA(set->tags[w], setNum); byte *memAddr = PHYS_TO_MEMADDR(M_FROM_CPU(cpuNum), pAddr); nDirty++; SCacheFlush(cpuNum, TRUE, leaveShared, pAddr, SCACHE_LINE_SIZE, memAddr); } else if (!(set->tags[w] & INVALID_TAG) && !leaveShared) { PA pAddr = STAG_TO_PA(set->tags[w], setNum);#ifndef SOLO ASSERT (IS_VALID_PA(M_FROM_CPU(cpuNum), pAddr));#endif SCacheFlush(cpuNum, FALSE, FALSE, pAddr, SCACHE_LINE_SIZE, NULL); } } } CPUPrint("FlushEntireCache: %d dirtly lines found in cache on cpu %d.\n", nDirty, cpuNum);#endif}#endif
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -