cache_impl.hh
来自「M5,一个功能强大的多处理器系统模拟器.很多针对处理器架构,性能的研究都使用它作」· HH 代码 · 共 1,524 行 · 第 1/4 页
HH
1,524 行
/* * Copyright (c) 2002, 2003, 2004, 2005 * The Regents of The University of Michigan * All Rights Reserved * * This code is part of the M5 simulator. * * Permission is granted to use, copy, create derivative works and * redistribute this software and such derivative works for any * purpose, so long as the copyright notice above, this grant of * permission, and the disclaimer below appear in all copies made; and * so long as the name of The University of Michigan is not used in * any advertising or publicity pertaining to the use or distribution * of this software without specific, written prior authorization. * * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION FROM THE * UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY PURPOSE, AND * WITHOUT WARRANTY BY THE UNIVERSITY OF MICHIGAN OF ANY KIND, EITHER * EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE. THE REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE * LIABLE FOR ANY DAMAGES, INCLUDING DIRECT, SPECIAL, INDIRECT, * INCIDENTAL, OR CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM * ARISING OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF SUCH * DAMAGES. * * Authors: Erik G. Hallnor * David A. Greene * Nathan L. Binkert * Steven K. Reinhardt * Ronald G. Dreslinski *//** * @file * Cache definitions. */#include "sim/host.hh"#include "base/misc.hh"#include "base/range_ops.hh"#include "mem/cache/cache.hh"#include "mem/cache/blk.hh"#include "mem/cache/mshr.hh"#include "mem/cache/prefetch/base.hh"#include "sim/sim_exit.hh" // for SimExitEventtemplate<class TagStore>Cache<TagStore>::Cache(const Params *p, TagStore *tags, BasePrefetcher *pf) : BaseCache(p), prefetchAccess(p->prefetch_access), tags(tags), prefetcher(pf), doFastWrites(true), prefetchMiss(p->prefetch_miss){ tempBlock = new BlkType(); tempBlock->data = new uint8_t[blkSize]; cpuSidePort = new CpuSidePort(p->name + "-cpu_side_port", this, "CpuSidePort", p->cpu_side_filter_ranges); memSidePort = new MemSidePort(p->name + "-mem_side_port", this, "MemSidePort", p->mem_side_filter_ranges); cpuSidePort->setOtherPort(memSidePort); memSidePort->setOtherPort(cpuSidePort); tags->setCache(this); prefetcher->setCache(this);}template<class TagStore>voidCache<TagStore>::regStats(){ BaseCache::regStats(); tags->regStats(name()); prefetcher->regStats(name());}template<class TagStore>Port *Cache<TagStore>::getPort(const std::string &if_name, int idx){ if (if_name == "" || if_name == "cpu_side") { return cpuSidePort; } else if (if_name == "mem_side") { return memSidePort; } else if (if_name == "functional") { CpuSidePort *funcPort = new CpuSidePort(name() + "-cpu_side_funcport", this, "CpuSideFuncPort", std::vector<Range<Addr> >()); funcPort->setOtherPort(memSidePort); return funcPort; } else { panic("Port name %s unrecognized\n", if_name); }}template<class TagStore>voidCache<TagStore>::deletePortRefs(Port *p){ if (cpuSidePort == p || memSidePort == p) panic("Can only delete functional ports\n"); delete p;}template<class TagStore>voidCache<TagStore>::cmpAndSwap(BlkType *blk, PacketPtr pkt){ uint64_t overwrite_val; bool overwrite_mem; uint64_t condition_val64; uint32_t condition_val32; int offset = tags->extractBlkOffset(pkt->getAddr()); uint8_t *blk_data = blk->data + offset; assert(sizeof(uint64_t) >= pkt->getSize()); overwrite_mem = true; // keep a copy of our possible write value, and copy what is at the // memory address into the packet pkt->writeData((uint8_t *)&overwrite_val); pkt->setData(blk_data); if (pkt->req->isCondSwap()) { if (pkt->getSize() == sizeof(uint64_t)) { condition_val64 = pkt->req->getExtraData(); overwrite_mem = !std::memcmp(&condition_val64, blk_data, sizeof(uint64_t)); } else if (pkt->getSize() == sizeof(uint32_t)) { condition_val32 = (uint32_t)pkt->req->getExtraData(); overwrite_mem = !std::memcmp(&condition_val32, blk_data, sizeof(uint32_t)); } else panic("Invalid size for conditional read/write\n"); } if (overwrite_mem) std::memcpy(blk_data, &overwrite_val, pkt->getSize());}template<class TagStore>voidCache<TagStore>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk){ assert(blk); // Occasionally this is not true... if we are a lower-level cache // satisfying a string of Read and ReadEx requests from // upper-level caches, a Read will mark the block as shared but we // can satisfy a following ReadEx anyway since we can rely on the // Read requester(s) to have buffered the ReadEx snoop and to // invalidate their blocks after receiving them. // assert(pkt->needsExclusive() ? blk->isWritable() : blk->isValid()); assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize); // Check RMW operations first since both isRead() and // isWrite() will be true for them if (pkt->cmd == MemCmd::SwapReq) { cmpAndSwap(blk, pkt); } else if (pkt->isWrite()) { blk->status |= BlkDirty; if (blk->checkWrite(pkt)) { pkt->writeDataToBlock(blk->data, blkSize); } } else if (pkt->isRead()) { if (pkt->isLocked()) { blk->trackLoadLocked(pkt); } pkt->setDataFromBlock(blk->data, blkSize); if (pkt->getSize() == blkSize) { // special handling for coherent block requests from // upper-level caches if (pkt->needsExclusive()) { // on ReadExReq we give up our copy tags->invalidateBlk(blk); } else { // on ReadReq we create shareable copies here and in // the requester pkt->assertShared(); blk->status &= ~BlkWritable; } } } else { // Not a read or write... must be an upgrade. it's OK // to just ack those as long as we have an exclusive // copy at this level. assert(pkt->cmd == MemCmd::UpgradeReq); tags->invalidateBlk(blk); }}///////////////////////////////////////////////////////// MSHR helper functions///////////////////////////////////////////////////////template<class TagStore>voidCache<TagStore>::markInService(MSHR *mshr){ markInServiceInternal(mshr);#if 0 if (mshr->originalCmd == MemCmd::HardPFReq) { DPRINTF(HWPrefetch, "%s:Marking a HW_PF in service\n", name()); //Also clear pending if need be if (!prefetcher->havePending()) { deassertMemSideBusRequest(Request_PF); } }#endif}template<class TagStore>voidCache<TagStore>::squash(int threadNum){ bool unblock = false; BlockedCause cause = NUM_BLOCKED_CAUSES; if (noTargetMSHR && noTargetMSHR->threadNum == threadNum) { noTargetMSHR = NULL; unblock = true; cause = Blocked_NoTargets; } if (mshrQueue.isFull()) { unblock = true; cause = Blocked_NoMSHRs; } mshrQueue.squash(threadNum); if (unblock && !mshrQueue.isFull()) { clearBlocked(cause); }}///////////////////////////////////////////////////////// Access path: requests coming in from the CPU side///////////////////////////////////////////////////////template<class TagStore>boolCache<TagStore>::access(PacketPtr pkt, BlkType *&blk, int &lat){ if (pkt->req->isUncacheable()) { blk = NULL; lat = hitLatency; return false; } blk = tags->findBlock(pkt->getAddr(), lat); if (prefetchAccess) { //We are determining prefetches on access stream, call prefetcher prefetcher->handleMiss(pkt, curTick); } DPRINTF(Cache, "%s %x %s\n", pkt->cmdString(), pkt->getAddr(), (blk) ? "hit" : "miss"); if (blk != NULL) { if (blk->isPrefetch()) { //Signal that this was a hit under prefetch (no need for //use prefetch (only can get here if true) DPRINTF(HWPrefetch, "Hit a block that was prefetched\n"); blk->status &= ~BlkHWPrefetched; if (prefetchMiss) { //If we are using the miss stream, signal the //prefetcher otherwise the access stream would have //already signaled this hit prefetcher->handleMiss(pkt, curTick); } } if (pkt->needsExclusive() ? blk->isWritable() : blk->isReadable()) { // OK to satisfy access hits[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++; satisfyCpuSideRequest(pkt, blk); return true; } } // Can't satisfy access normally... either no block (blk == NULL) // or have block but need exclusive & only have shared. // Writeback handling is special case. We can write the block // into the cache without having a writeable copy (or any copy at // all). if (pkt->cmd == MemCmd::Writeback) { PacketList writebacks; assert(blkSize == pkt->getSize()); if (blk == NULL) { // need to do a replacement blk = allocateBlock(pkt->getAddr(), writebacks); if (blk == NULL) { // no replaceable block available, give up. // writeback will be forwarded to next level. incMissCount(pkt); return false; } blk->status = BlkValid | BlkReadable; } std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize); blk->status |= BlkDirty; // copy writebacks from replacement to write buffer while (!writebacks.empty()) { PacketPtr wbPkt = writebacks.front(); allocateWriteBuffer(wbPkt, curTick + hitLatency, true); writebacks.pop_front(); } // nothing else to do; writeback doesn't expect response assert(!pkt->needsResponse()); hits[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++; return true; } incMissCount(pkt); if (blk == NULL && pkt->isLocked() && pkt->isWrite()) { // complete miss on store conditional... just give up now pkt->req->setExtraData(0); return true; } return false;}class ForwardResponseRecord : public Packet::SenderState{ Packet::SenderState *prevSenderState; int prevSrc;#ifndef NDEBUG BaseCache *cache;#endif public: ForwardResponseRecord(Packet *pkt, BaseCache *_cache) : prevSenderState(pkt->senderState), prevSrc(pkt->getSrc())#ifndef NDEBUG , cache(_cache)#endif {} void restore(Packet *pkt, BaseCache *_cache) { assert(_cache == cache); pkt->senderState = prevSenderState; pkt->setDest(prevSrc); }};template<class TagStore>boolCache<TagStore>::timingAccess(PacketPtr pkt){//@todo Add back in MemDebug Calls// MemDebug::cacheAccess(pkt); // we charge hitLatency for doing just about anything here Tick time = curTick + hitLatency;
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?