📄 pcache.c
字号:
(uint64) socc[0], (uint64) socc[1], (uint64) socc[2], (uint64) socc[3], (uint64) socc[4]); }#endif /* You could print periodic memsys stats here if they existed */}/***************************************************************** * MemRefDumpStats *****************************************************************/void MemRefDumpStats(int cpuNum){ int cacheNum = GET_CACHE_NUM(cpuNum); int scacheNum = GET_SCACHE_NUM(cpuNum); int j; Cache *C = &(CACHE[cacheNum]); SCache *SC = &(SCACHE[scacheNum]);#ifdef SOLO CPUPrint("C%d libc Wait Time: %lld\n", cpuNum, STATS_VALUE(cpuNum, libcWaitTime));#endif CPUPrint("C%d I Misses: %lld I Refs: %lld\n", cacheNum, (uint64)C->stats.ICache.ReadMisses, (uint64)STATS_VALUE(cpuNum, iReads)); CPUPrint("C%d I Inval %lld LinesInval %lld\n", cacheNum, (uint64)C->stats.ICache.Inval, (uint64)C->stats.ICache.LinesInval); CPUPrint("C%d D Read Misses: %lld Reads: %lld\n", cacheNum, (uint64)C->stats.DCache.ReadMisses, (uint64)STATS_VALUE(cpuNum, dReads)); CPUPrint("C%d D Write Misses: %lld Writes: %lld\n", cacheNum, (uint64)C->stats.DCache.WriteMisses, (uint64)STATS_VALUE(cpuNum, dWrites)); CPUPrint("C%d D Upgrades %lld Writebacks %lld \n", cacheNum, (uint64)C->stats.DCache.UpgradeMisses, (uint64)C->stats.DCache.Writebacks); CPUPrint("C%d D Inval %lld LinesInval %lld Dwngrd %lld LinesWback %lld\n", cacheNum, (uint64)C->stats.DCache.Inval, (uint64)C->stats.DCache.LinesInval, (uint64)C->stats.DCache.Downgrade, (uint64)C->stats.DCache.LinesWriteback); CPUPrint("C%d IGet Retries %lld\n", cacheNum, (uint64)C->stats.ICache.RetriedIGets); CPUPrint("C%d DGet Retries %lld\n", cacheNum, (uint64)C->stats.DCache.RetriedDGets); CPUPrint("C%d DGetX Retries %lld\n", cacheNum, (uint64)C->stats.DCache.RetriedDGetXs); for (j = 0; j < MHT_SIZE+1; j++) { CPUPrint("C%d MHT%d %lld\n", cacheNum, j, (uint64)C->stats.mhtOccupancyHist[j]); } CPUPrint("C%d S IMisses %lld IRefs: %lld\n", cacheNum, (uint64)SC->stats.IgetMisses, (uint64)SC->stats.Igets); CPUPrint("C%d S DMisses: GetMisses %lld GetRefs %lld\n", cacheNum, (uint64)SC->stats.DgetMisses, (uint64)SC->stats.Dgets); CPUPrint("C%d S DMisses: GetXMisses %lld GetXRefs %lld\n", cacheNum, (uint64)SC->stats.DgetXMisses, (uint64)SC->stats.DgetXs); CPUPrint("C%d S NAKs: %lld Merges: %lld\n", cacheNum, (uint64)SC->stats.NAKs, (uint64)SC->stats.MergeMisses); CPUPrint("C%d S UpgMisses: %lld UpgRefs: %lld (%.2f%%) Wbacks %lld Replace %lld\n", cacheNum, (uint64)SC->stats.DupgradeMisses, (uint64)SC->stats.Dupgrades, (SC->stats.Dupgrades>0 ? (100.0 * ((double)SC->stats.DupgradeMisses / (double)SC->stats.Dupgrades)): (double)-1.0), (uint64)SC->stats.Writebacks, (uint64)SC->stats.Replacements); CPUPrint("C%d S Inval %lld LinesInval %lld Dwngrd %lld LinesWback %lld\n", cacheNum, (uint64)SC->stats.Inval, (uint64)SC->stats.LinesInval, (uint64)SC->stats.Downgrade, (uint64)SC->stats.LinesWriteback); for (j = 0; j < SMHT_SIZE+1; j++) { CPUPrint("C%d SMHT%d %lld\n", cacheNum, j, (uint64)SC->stats.smhtOccupancyHist[j]); } CPUPrint("C%d S IGetMHTRetries %lld DGetMHTRetries %lld DGetXMHTRetries %lld DUGetXMHTRetries %lld\n", cacheNum, (uint64)SC->stats.IGetMHTRetries, (uint64)SC->stats.DGetMHTRetries, (uint64)SC->stats.DGetXMHTRetries, (uint64)SC->stats.DUGetXMHTRetries); CPUPrint("C%d S Prefetches %lld\n", cacheNum, (uint64)SC->stats.Prefetches);#ifdef HWBCOPY if (SimConfigGetBool("Hwbcopy.Streaming")) { for (j = 1; j <= SimConfigGetInt("Hwbcopy.StreamDepth"); j++) { CPUPrint("C%d S StreamGets[%d] %lld\n", cacheNum, j, (uint64)SC->stats.StreamGets[j]); CPUPrint("C%d S StreamGetXs[%d] %lld\n", cacheNum, j, (uint64)SC->stats.StreamGetXs[j]); } }#endif if (cpuNum == (TOTAL_CPUS-1)) { memsysVec.MemsysDumpStats(); }}/***************************************************************** * InitPCaches * Initialize the primary caches. *****************************************************************/void InitPCaches(void){ int i,j,k; int log2ICACHE_ASSOC = GetLog2(ICACHE_ASSOC); int log2DCACHE_ASSOC = GetLog2(DCACHE_ASSOC); if ( ICACHE_LINE_SIZE > SCACHE_LINE_SIZE ) { CPUError("ICache.LineSize must be smaller than SCacheLineSize\n"); } if ( DCACHE_LINE_SIZE > SCACHE_LINE_SIZE ) { CPUError("DCache.LineSize must be smaller than SCacheLineSize\n"); } if ( machines.DCacheAssoc != DCACHE_ASSOC ) { CPUError("DCache.Assoc !=%i (not implemented) \n",DCACHE_ASSOC); } if ( machines.ICacheAssoc != ICACHE_ASSOC ) { CPUError("ICache.Assoc !=%i (not implemented) \n",ICACHE_ASSOC); }#ifdef DATA_HANDLING if ( DCACHE_HIT_ALWAYS || ICACHE_HIT_ALWAYS ) { CPUError("ICache.AHit and DCache.AMiss are not supported with data handling!\n"); }#endif if ( DCACHE_MISS_ALWAYS || ICACHE_MISS_ALWAYS ) { if ( SCACHE_HIT_TIME != 0 ) { CPUError("ICacheAMiss || DCacheAMiss set. SCacheHitTime must be set to 0!\n"); } } if ( ICACHE_HIT_ALWAYS && ICACHE_MISS_ALWAYS) { CPUError("Make up your mind: ICache.HitAlways OR ICache.MissAlways\n"); } if ( DCACHE_HIT_ALWAYS && DCACHE_MISS_ALWAYS) { CPUError("Make up your mind: DCache.HitAlways OR DCache.MissAlways\n"); } /* Set up variables for the cache macros... it's better to compute these once here than every time we do a lookup. */ iTagShift = log2ICACHE_SIZE - log2ICACHE_ASSOC; iAddrDivisor = ICACHE_SIZE/ICACHE_ASSOC; iIndexMask = ICACHE_INDEX - 1; dTagShift = log2DCACHE_SIZE - log2DCACHE_ASSOC; dAddrDivisor = DCACHE_SIZE/DCACHE_ASSOC; dIndexMask = DCACHE_INDEX - 1; CACHE = (Cache *)calloc(TOTAL_CPUS, sizeof(Cache)); CPUPrint("ICACHE: Assoc = %d Size = %#x Line = %d\n", ICACHE_ASSOC, ICACHE_SIZE, ICACHE_LINE_SIZE); CPUPrint("DCACHE: Assoc = %d Size = %#x Line = %d\n", DCACHE_ASSOC, DCACHE_SIZE, DCACHE_LINE_SIZE); CPUPrint("SIMOS running with a write buffer size %d!\n", WRITE_BUFFER_SIZE); for (j=0; j<TOTAL_CPUS; j++) { /* Allocate and initialize the ICaches */ CACHE[j].ICache.set = (struct ICacheSet *)calloc(ICACHE_INDEX,sizeof(struct ICacheSet)); CACHE[j].DCache.set = (struct DCacheSet *)calloc(DCACHE_INDEX,sizeof(struct DCacheSet)); for (i=0; i<ICACHE_INDEX; i++) { for (k=0; k<ICACHE_ASSOC; k++) { CACHE[j].ICache.set[i].tags[k] = INVALID_TAG; } ICACHE_INIT_LRU(CACHE[j].ICache.set[i].LRU); } /* Initialize the DCaches */ for (i=0; i<DCACHE_INDEX; i++) { for (k=0; k<DCACHE_ASSOC; k++) { CACHE[j].DCache.set[i].tags[k] = INVALID_TAG; } DCACHE_INIT_LRU(CACHE[j].DCache.set[i].LRU); } /* Initialize the Miss Handling Tables */ for (i=0; i<MHT_SIZE; i++) { CACHE[j].MHT[i].inuse = FALSE; CACHE[j].MHT[i].writeBuffer = MemAlign(sizeof(uint64), sizeof(*CACHE[j].MHT[i].writeBuffer)); for (k = 0; k < 128; k++) { CACHE[j].MHT[i].writeBuffer->mask[k] = 0; } } CACHE[j].MHTnumInuse = 0; CACHE[j].activeWriteBuffers = 0; } useWriteBuffer = (WRITE_BUFFER_SIZE > 0); if ((TOTAL_CPUS == 1) && !UPGRADES_ON_UP) { /* *** Upgrades are not needed on a UP. But some protocols (such as * flash) require them so they are configurable. */ noUpgrades = TRUE; }}/***************************************************************** * MemRefPrefetch * *****************************************************************/ResultMemRefPrefetch(int cpuNum, VA vAddr, PA pAddr, int hint){ PFResult pfRet = PF_SUCCESS; switch (hint) { case 0: case 4: case 6: if (!STATS_VALUE(cpuNum, prefMHTStallStart)) { STATS_INC(cpuNum, prefStats.prefs, 1); } else { STATS_ADD_INTERVAL(cpuNum, prefStats.prefMHTStallTime, prefMHTStallStart); } pfRet = SCachePrefetch(cpuNum, vAddr, pAddr, MEMSYS_GET); { switch (pfRet) { case PF_SUCCESS: /* prefetch was immediately available from the memory system */ STATS_INC(cpuNum, prefStats.prefL1Hits, 1); break; case PF_RESIDENT: STATS_INC(cpuNum, prefStats.prefL2Hits, 1); break; case PF_MERGE: STATS_INC(cpuNum, prefStats.prefMerges, 1); break; case PF_STALL: STATS_INC(cpuNum, prefStats.prefStalls, 1); break; case PF_FAILURE: STATS_INC(cpuNum, prefStats.prefMHTStall, 1); STATS_SET(cpuNum, prefMHTStallStart, CPUVec.CycleCount(cpuNum)); return FAILURE; case PF_UPGRADE: STATS_INC(cpuNum, prefStats.prefUpgrades, 1); break; default: CPUWarning("Bad PREF return value\n"); } } break; case 1: case 5: case 7: if (!STATS_VALUE(cpuNum, prefMHTStallStart)) { STATS_INC(cpuNum, prefStats.prefXs, 1); } else { STATS_ADD_INTERVAL(cpuNum, prefStats.prefXMHTStallTime, prefMHTStallStart); } pfRet = SCachePrefetch(cpuNum, vAddr, pAddr, MEMSYS_GETX); { switch (pfRet) { case PF_SUCCESS: /* prefetch was immediately available from the memory system */ STATS_INC(cpuNum, prefStats.prefXL1Hits, 1); break; case PF_RESIDENT: STATS_INC(cpuNum, prefStats.prefXL2Hits, 1); break; case PF_MERGE: STATS_INC(cpuNum, prefStats.prefXMerges, 1); break; case PF_STALL: /* normal case: prefetch out in memsys */ STATS_INC(cpuNum, prefStats.prefXStalls, 1); break; case PF_FAILURE: STATS_INC(cpuNum, prefStats.prefXMHTStall, 1); STATS_SET(cpuNum, prefMHTStallStart,CPUVec.CycleCount(cpuNum)); return FAILURE; case PF_UPGRADE: STATS_INC(cpuNum, prefStats.prefXUpgrades, 1); break; default: CPUWarning("Bad PREF return value\n"); } } break; default: CPUWarning("Unknown hint in PREF at %#x\n", vAddr); break; } return SUCCESS;}/***************************************************************** * Implementation of supported cache ops *****************************************************************/Result MemRefIndexHitWBInval(int cpuNum, VA vAddr, PA pAddr){ int ret; int wasDirty; PA realPA; byte data[128]; Result res = SUCCESS; ret = CacheExtractIndex(cpuNum, pAddr, SCACHE_LINE_SIZE, &wasDirty, &realPA, data); if (ret) { /* Issue bogus command (get won't occur because of the -1), but with an associated writeback */ if (wasDirty) { res = memsysVec.MemsysCmd(cpuNum, MEMSYS_GET, 0LL, -1, realPA, TRUE, data); } else { res = memsysVec.MemsysCmd(cpuNum, MEMSYS_GET, 0LL, -1, realPA, FALSE, data); } } return res;}Result MemRefHitWBInval(int cpuNum, VA vAddr, PA pAddr){ int ret; int wasDirty; byte data[128]; Result res = SUCCESS; ret = CacheExtract(cpuNum, pAddr, SCACHE_LINE_SIZE, &wasDirty, data); if (ret) { /* If line found */ /* Issue bogus command (get won't occur because of the -1), but with an associated writeback */ if (wasDirty) { res = memsysVec.MemsysCmd(cpuNum, MEMSYS_GET, 0LL, -1, pAddr, TRUE, data); } else { res = memsysVec.MemsysCmd(cpuNum, MEMSYS_GET, 0LL, -1, pAddr, FALSE, data); } } return res;}/***************************************************************** * MemRefReadInst * * Main entry into instruction cache from CPU * Return values: * SUCCESS - Request hit in the cache, instruction returned. * STALL - Request missed in the cache, sent to the 2nd-level or memsys. * FAILURE - Request missed but couldn't be issued to 2nd-level or * memory system. Caller must retry. * BUSERROR - Request suffered a bus error. * * Simulate a two-way set assoc physically index ICache. * XXX - Should make this virtually index, physically tagged. *****************************************************************/Result MemRefReadInst(int cpuNum, VA vAddr, PA pAddr, Inst *inst){ int cacheNum = GET_CACHE_NUM(cpuNum); int ind = ICACHE_INDEXOF(pAddr); PA tag = ICACHE_TAG(pAddr); struct ICacheSet* set = & CACHE[cacheNum].ICache.set[ind]; Result ret; int lineindex, setindex; if ( ICACHE_HIT_ALWAYS ) { setindex = 0; set->data[setindex] = (byte*)CPUVec.PAToHostAddr(cpuNum, pAddr&~(PA)(ICACHE_LINE_SIZE-1), vAddr); lineindex = pAddr & (ICACHE_LINE_SIZE-1); *inst = *(Inst *) (set->data[setindex]+ lineindex); QUICK_ICACHE_SET(cpuNum, pAddr, set, setindex); return SUCCESS; }
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -