miss_queue.cc
来自「linux下基于c++的处理器仿真平台。具有处理器流水线」· CC 代码 · 共 738 行 · 第 1/2 页
CC
738 行
/* * Copyright (c) 2003, 2004, 2005 * The Regents of The University of Michigan * All Rights Reserved * * This code is part of the M5 simulator, developed by Nathan Binkert, * Erik Hallnor, Steve Raasch, and Steve Reinhardt, with contributions * from Ron Dreslinski, Dave Greene, Lisa Hsu, Kevin Lim, Ali Saidi, * and Andrew Schultz. * * Permission is granted to use, copy, create derivative works and * redistribute this software and such derivative works for any * purpose, so long as the copyright notice above, this grant of * permission, and the disclaimer below appear in all copies made; and * so long as the name of The University of Michigan is not used in * any advertising or publicity pertaining to the use or distribution * of this software without specific, written prior authorization. * * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION FROM THE * UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY PURPOSE, AND * WITHOUT WARRANTY BY THE UNIVERSITY OF MICHIGAN OF ANY KIND, EITHER * EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE. THE REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE * LIABLE FOR ANY DAMAGES, INCLUDING DIRECT, SPECIAL, INDIRECT, * INCIDENTAL, OR CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM * ARISING OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF SUCH * DAMAGES. *//** * @file * Miss and writeback queue definitions. */#include "cpu/exec_context.hh"#include "cpu/smt.hh" //for maxThreadsPerCPU#include "mem/cache/base_cache.hh"#include "mem/cache/miss/miss_queue.hh"#include "mem/cache/prefetch/base_prefetcher.hh"using namespace std;// simple constructor/** * @todo Remove the +16 from the write buffer constructor once we handle * stalling on writebacks do to compression writes. */MissQueue::MissQueue(int numMSHRs, int numTargets, int write_buffers, bool write_allocate, bool prefetch_miss) : mq(numMSHRs, 4), wb(write_buffers,numMSHRs+1000), numMSHR(numMSHRs), numTarget(numTargets), writeBuffers(write_buffers), writeAllocate(write_allocate), order(0), prefetchMiss(prefetch_miss){ noTargetMSHR = NULL;}voidMissQueue::regStats(const string &name){ using namespace Stats; writebacks .init(maxThreadsPerCPU) .name(name + ".writebacks") .desc("number of writebacks") .flags(total) ; // MSHR hit statistics for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) { MemCmd cmd = (MemCmdEnum)access_idx; const string &cstr = cmd.toString(); mshr_hits[access_idx] .init(maxThreadsPerCPU) .name(name + "." + cstr + "_mshr_hits") .desc("number of " + cstr + " MSHR hits") .flags(total | nozero | nonan) ; } demandMshrHits .name(name + ".demand_mshr_hits") .desc("number of demand (read+write) MSHR hits") .flags(total) ; demandMshrHits = mshr_hits[Read] + mshr_hits[Write]; overallMshrHits .name(name + ".overall_mshr_hits") .desc("number of overall MSHR hits") .flags(total) ; overallMshrHits = demandMshrHits + mshr_hits[Soft_Prefetch] + mshr_hits[Hard_Prefetch]; // MSHR miss statistics for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) { MemCmd cmd = (MemCmdEnum)access_idx; const string &cstr = cmd.toString(); mshr_misses[access_idx] .init(maxThreadsPerCPU) .name(name + "." + cstr + "_mshr_misses") .desc("number of " + cstr + " MSHR misses") .flags(total | nozero | nonan) ; } demandMshrMisses .name(name + ".demand_mshr_misses") .desc("number of demand (read+write) MSHR misses") .flags(total) ; demandMshrMisses = mshr_misses[Read] + mshr_misses[Write]; overallMshrMisses .name(name + ".overall_mshr_misses") .desc("number of overall MSHR misses") .flags(total) ; overallMshrMisses = demandMshrMisses + mshr_misses[Soft_Prefetch] + mshr_misses[Hard_Prefetch]; // MSHR miss latency statistics for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) { MemCmd cmd = (MemCmdEnum)access_idx; const string &cstr = cmd.toString(); mshr_miss_latency[access_idx] .init(maxThreadsPerCPU) .name(name + "." + cstr + "_mshr_miss_latency") .desc("number of " + cstr + " MSHR miss cycles") .flags(total | nozero | nonan) ; } demandMshrMissLatency .name(name + ".demand_mshr_miss_latency") .desc("number of demand (read+write) MSHR miss cycles") .flags(total) ; demandMshrMissLatency = mshr_miss_latency[Read] + mshr_miss_latency[Write]; overallMshrMissLatency .name(name + ".overall_mshr_miss_latency") .desc("number of overall MSHR miss cycles") .flags(total) ; overallMshrMissLatency = demandMshrMissLatency + mshr_miss_latency[Soft_Prefetch] + mshr_miss_latency[Hard_Prefetch]; // MSHR uncacheable statistics for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) { MemCmd cmd = (MemCmdEnum)access_idx; const string &cstr = cmd.toString(); mshr_uncacheable[access_idx] .init(maxThreadsPerCPU) .name(name + "." + cstr + "_mshr_uncacheable") .desc("number of " + cstr + " MSHR uncacheable") .flags(total | nozero | nonan) ; } overallMshrUncacheable .name(name + ".overall_mshr_uncacheable_misses") .desc("number of overall MSHR uncacheable misses") .flags(total) ; overallMshrUncacheable = mshr_uncacheable[Read] + mshr_uncacheable[Write] + mshr_uncacheable[Soft_Prefetch] + mshr_uncacheable[Hard_Prefetch]; // MSHR miss latency statistics for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) { MemCmd cmd = (MemCmdEnum)access_idx; const string &cstr = cmd.toString(); mshr_uncacheable_lat[access_idx] .init(maxThreadsPerCPU) .name(name + "." + cstr + "_mshr_uncacheable_latency") .desc("number of " + cstr + " MSHR uncacheable cycles") .flags(total | nozero | nonan) ; } overallMshrUncacheableLatency .name(name + ".overall_mshr_uncacheable_latency") .desc("number of overall MSHR uncacheable cycles") .flags(total) ; overallMshrUncacheableLatency = mshr_uncacheable_lat[Read] + mshr_uncacheable_lat[Write] + mshr_uncacheable_lat[Soft_Prefetch] + mshr_uncacheable_lat[Hard_Prefetch];#if 0 // MSHR access formulas for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) { MemCmd cmd = (MemCmdEnum)access_idx; const string &cstr = cmd.toString(); mshrAccesses[access_idx] .name(name + "." + cstr + "_mshr_accesses") .desc("number of " + cstr + " mshr accesses(hits+misses)") .flags(total | nozero | nonan) ; mshrAccesses[access_idx] = mshr_hits[access_idx] + mshr_misses[access_idx] + mshr_uncacheable[access_idx]; } demandMshrAccesses .name(name + ".demand_mshr_accesses") .desc("number of demand (read+write) mshr accesses") .flags(total | nozero | nonan) ; demandMshrAccesses = demandMshrHits + demandMshrMisses; overallMshrAccesses .name(name + ".overall_mshr_accesses") .desc("number of overall (read+write) mshr accesses") .flags(total | nozero | nonan) ; overallMshrAccesses = overallMshrHits + overallMshrMisses + overallMshrUncacheable;#endif // MSHR miss rate formulas for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) { MemCmd cmd = (MemCmdEnum)access_idx; const string &cstr = cmd.toString(); mshrMissRate[access_idx] .name(name + "." + cstr + "_mshr_miss_rate") .desc("mshr miss rate for " + cstr + " accesses") .flags(total | nozero | nonan) ; mshrMissRate[access_idx] = mshr_misses[access_idx] / cache->accesses[access_idx]; } demandMshrMissRate .name(name + ".demand_mshr_miss_rate") .desc("mshr miss rate for demand accesses") .flags(total) ; demandMshrMissRate = demandMshrMisses / cache->demandAccesses; overallMshrMissRate .name(name + ".overall_mshr_miss_rate") .desc("mshr miss rate for overall accesses") .flags(total) ; overallMshrMissRate = overallMshrMisses / cache->overallAccesses; // mshrMiss latency formulas for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) { MemCmd cmd = (MemCmdEnum)access_idx; const string &cstr = cmd.toString(); avgMshrMissLatency[access_idx] .name(name + "." + cstr + "_avg_mshr_miss_latency") .desc("average " + cstr + " mshr miss latency") .flags(total | nozero | nonan) ; avgMshrMissLatency[access_idx] = mshr_miss_latency[access_idx] / mshr_misses[access_idx]; } demandAvgMshrMissLatency .name(name + ".demand_avg_mshr_miss_latency") .desc("average overall mshr miss latency") .flags(total) ; demandAvgMshrMissLatency = demandMshrMissLatency / demandMshrMisses; overallAvgMshrMissLatency .name(name + ".overall_avg_mshr_miss_latency") .desc("average overall mshr miss latency") .flags(total) ; overallAvgMshrMissLatency = overallMshrMissLatency / overallMshrMisses; // mshrUncacheable latency formulas for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) { MemCmd cmd = (MemCmdEnum)access_idx; const string &cstr = cmd.toString(); avgMshrUncacheableLatency[access_idx] .name(name + "." + cstr + "_avg_mshr_uncacheable_latency") .desc("average " + cstr + " mshr uncacheable latency") .flags(total | nozero | nonan) ; avgMshrUncacheableLatency[access_idx] = mshr_uncacheable_lat[access_idx] / mshr_uncacheable[access_idx]; } overallAvgMshrUncacheableLatency .name(name + ".overall_avg_mshr_uncacheable_latency") .desc("average overall mshr uncacheable latency") .flags(total) ; overallAvgMshrUncacheableLatency = overallMshrUncacheableLatency / overallMshrUncacheable; mshr_cap_events .init(maxThreadsPerCPU) .name(name + ".mshr_cap_events") .desc("number of times MSHR cap was activated") .flags(total) ; //software prefetching stats soft_prefetch_mshr_full .init(maxThreadsPerCPU) .name(name + ".soft_prefetch_mshr_full") .desc("number of mshr full events for SW prefetching instrutions") .flags(total) ; mshr_no_allocate_misses .name(name +".no_allocate_misses") .desc("Number of misses that were no-allocate") ;}voidMissQueue::setCache(BaseCache *_cache){ cache = _cache; blkSize = cache->getBlockSize();}voidMissQueue::setPrefetcher(BasePrefetcher *_prefetcher){ prefetcher = _prefetcher;}MSHR*MissQueue::allocateMiss(MemReqPtr &req, int size, Tick time){ MSHR* mshr = mq.allocate(req, size); mshr->order = order++; if (!req->isUncacheable() ){//&& !req->isNoAllocate()) { // Mark this as a cache line fill mshr->req->flags |= CACHE_LINE_FILL; } if (mq.isFull()) { cache->setBlocked(Blocked_NoMSHRs); } if (req->cmd != Hard_Prefetch) { //If we need to request the bus (not on HW prefetch), do so cache->setMasterRequest(Request_MSHR, time); } return mshr;}MSHR*MissQueue::allocateWrite(MemReqPtr &req, int size, Tick time){ MSHR* mshr = wb.allocate(req,req->size); mshr->order = order++;
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?