cache_tags_impl.hh
来自「linux下基于c++的处理器仿真平台。具有处理器流水线」· HH 代码 · 共 755 行 · 第 1/2 页
HH
755 行
/* * Copyright (c) 2003, 2004, 2005 * The Regents of The University of Michigan * All Rights Reserved * * This code is part of the M5 simulator, developed by Nathan Binkert, * Erik Hallnor, Steve Raasch, and Steve Reinhardt, with contributions * from Ron Dreslinski, Dave Greene, Lisa Hsu, Kevin Lim, Ali Saidi, * and Andrew Schultz. * * Permission is granted to use, copy, create derivative works and * redistribute this software and such derivative works for any * purpose, so long as the copyright notice above, this grant of * permission, and the disclaimer below appear in all copies made; and * so long as the name of The University of Michigan is not used in * any advertising or publicity pertaining to the use or distribution * of this software without specific, written prior authorization. * * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION FROM THE * UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY PURPOSE, AND * WITHOUT WARRANTY BY THE UNIVERSITY OF MICHIGAN OF ANY KIND, EITHER * EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE. THE REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE * LIABLE FOR ANY DAMAGES, INCLUDING DIRECT, SPECIAL, INDIRECT, * INCIDENTAL, OR CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM * ARISING OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF SUCH * DAMAGES. *//** @file * Definitions of Cache TagStore template policy. */#include "mem/cache/base_cache.hh"#include "mem/cache/miss/mshr.hh"#include "mem/cache/tags/cache_tags.hh"#include "mem/cache/prefetch/base_prefetcher.hh"#include "base/trace.hh" // for DPRINTFusing namespace std;template <class Tags, class Compression>CacheTags<Tags,Compression>::CacheTags(Tags *_ct, int comp_latency, bool do_fast_writes, bool store_compressed, bool adaptive_compression, bool prefetch_miss) : ct(_ct), compLatency(comp_latency), doFastWrites(do_fast_writes), storeCompressed(store_compressed), adaptiveCompression(adaptive_compression), prefetchMiss(prefetch_miss), blkSize(ct->getBlockSize()){ cache = NULL;}template <class Tags, class Compression>voidCacheTags<Tags,Compression>::regStats(const string &name){ using namespace Stats; ct->regStats(name);}template <class Tags, class Compression>voidCacheTags<Tags,Compression>::setCache(BaseCache *_cache, int bus_width, int bus_ratio){ cache = _cache; busWidth = bus_width; busRatio = bus_ratio; ct->setCache(cache); objName = cache->name();}template <class Tags, class Compression>voidCacheTags<Tags,Compression>::setPrefetcher(BasePrefetcher *_prefetcher){ prefetcher = _prefetcher;}template <class Tags, class Compression>typename CacheTags<Tags,Compression>::BlkType*CacheTags<Tags,Compression>::handleAccess(MemReqPtr &req, int & lat, MemReqList & writebacks, bool update){ MemCmd cmd = req->cmd; // Set the block offset here req->offset = ct->extractBlkOffset(req->paddr); BlkType *blk = NULL; if (update) { blk = ct->findBlock(req, lat); } else { blk = ct->findBlock(req->paddr, req->asid); lat = 0; } if (blk != NULL) { // Hit if (blk->isPrefetch()) { //Signal that this was a hit under prefetch (no need for use prefetch (only can get here if true) DPRINTF(HWPrefetch, "%s:Hit a block that was prefetched\n", cache->name()); blk->status &= ~BlkHWPrefetched; if (prefetchMiss) { //If we are using the miss stream, signal the prefetcher //otherwise the access stream would have already signaled this hit prefetcher->handleMiss(req, curTick); } } if ((cmd.isWrite() && blk->isWritable()) || (cmd.isRead() && blk->isValid())) { // We are satisfying the request req->flags |= SATISFIED; if (cmd.isWrite()){ blk->status |= BlkDirty; ct->fixCopy(req, writebacks); } if (blk->isCompressed()) { // If the data is compressed, need to increase the latency lat += (compLatency/4); } if (cache->doData()) { bool write_data = false; assert(verifyData(blk)); if (cmd.isWrite()){ write_data = true; assert(req->offset < blkSize); assert(req->size <= blkSize); assert(req->offset+req->size <= blkSize); memcpy(blk->data + req->offset, req->data, req->size); } else { assert(req->offset < blkSize); assert(req->size <= blkSize); assert(req->offset + req->size <=blkSize); memcpy(req->data, blk->data + req->offset, req->size); } if (write_data || (adaptiveCompression && blk->isCompressed())) { // If we wrote data, need to update the internal block // data. updateData(blk, writebacks, !(adaptiveCompression && blk->isReferenced())); } } } else { // permission violation, treat it as a miss blk = NULL; } } return blk;}template <class Tags, class Compression>typename CacheTags<Tags,Compression>::BlkType*CacheTags<Tags,Compression>::handleFill(BlkType *blk, MemReqPtr &req, CacheBlk::State new_state, MemReqList & writebacks, MemReqPtr target){#ifndef NDEBUG BlkType *tmp_blk = ct->findBlock(req->paddr, req->asid); assert(tmp_blk == blk);#endif blk = doReplacement(blk, req, new_state, writebacks); if (cache->doData()) { if (req->cmd.isRead()) { memcpy(blk->data, req->data, blkSize); } } int bus_transactions = (req->size > busWidth) ? (req->size - busWidth) / busWidth : 0; blk->whenReady = curTick + (busRatio * bus_transactions) + (req->isCompressed() ? compLatency/4 : 0); // Respond to target, if any if (target) { MemCmd cmd = target->cmd; target->flags |= SATISFIED; if (cmd == Invalidate) { invalidateBlk(blk); blk = NULL; } if (cmd == Copy) { writebacks.push_back(target); } else { if (blk && (cmd.isWrite() ? blk->isWritable() : blk->isValid())) { assert(cmd.isWrite() || cmd.isRead()); if (cmd.isWrite()) { blk->status |= BlkDirty; ct->fixCopy(req, writebacks); if (cache->doData()) { assert(target->offset + target->size <= blkSize); memcpy(blk->data + target->offset, target->data, target->size); } } else { if (cache->doData()) { assert(target->offset + target->size <= blkSize); memcpy(target->data, blk->data + target->offset, target->size); } } } } } if (blk && cache->doData()) { // Need to write the data into the block updateData(blk, writebacks, !adaptiveCompression || true); } return blk;}template <class Tags, class Compression>typename CacheTags<Tags,Compression>::BlkType*CacheTags<Tags,Compression>::handleFill(BlkType *blk, MSHR * mshr, CacheBlk::State new_state, MemReqList & writebacks){#ifndef NDEBUG BlkType *tmp_blk = ct->findBlock(mshr->req->paddr, mshr->req->asid); assert(tmp_blk == blk);#endif MemReqPtr req = mshr->req; blk = doReplacement(blk, req, new_state, writebacks); if (cache->doData()) { if (req->cmd.isRead()) { memcpy(blk->data, req->data, blkSize); } } int bus_transactions = (req->size > busWidth) ? (req->size - busWidth) / busWidth : 0; blk->whenReady = curTick + (busRatio * bus_transactions) + (req->isCompressed() ? compLatency/4 : 0); // respond to MSHR targets, if any Tick response_base = ((!req->isCompressed()) ? curTick : blk->whenReady) + ct->getHitLatency(); // First offset for critical word first calculations int initial_offset = 0; if (mshr->hasTargets()) { initial_offset = mshr->getTarget()->offset; } while (mshr->hasTargets()) { MemReqPtr target = mshr->getTarget(); MemCmd cmd = target->cmd; target->flags |= SATISFIED; // How many bytes pass the first request is this one int transfer_offset = target->offset - initial_offset; if (transfer_offset < 0) { transfer_offset += blkSize; } // Covert byte offset to cycle offset int tgt_latency = (!target->isCompressed()) ? (transfer_offset/busWidth) * busRatio : 0; Tick completion_time = response_base + tgt_latency; if (cmd == Invalidate) { //Mark the blk as invalid now, if it hasn't been already if (blk) { invalidateBlk(blk); blk = NULL; } //Also get rid of the invalidate mshr->popTarget(); DPRINTF(Cache, "Popping off a Invalidate for blk_addr: %x\n", req->paddr & (((ULL(1))<<48)-1)); continue; } if (cmd == Copy) { writebacks.push_back(target); break; } if (blk && (cmd.isWrite() ? blk->isWritable() : blk->isValid())) { assert(cmd.isWrite() || cmd.isRead()); if (cmd.isWrite()) { blk->status |= BlkDirty; ct->fixCopy(req, writebacks); if (cache->doData()) { assert(target->offset + target->size <= blkSize); memcpy(blk->data + target->offset, target->data, target->size); } } else { if (cache->doData()) { assert(target->offset + target->size <= blkSize); memcpy(target->data, blk->data + target->offset, target->size); } } } else { // Invalid access, need to do another request // can occur if block is invalidated, or not correct // permissions break; } cache->respondToMiss(target, completion_time); mshr->popTarget(); } if (blk && cache->doData()) { // Need to write the data into the block updateData(blk, writebacks, !adaptiveCompression || true); } return blk;}template <class Tags, class Compression>typename CacheTags<Tags,Compression>::BlkType*CacheTags<Tags,Compression>::pseudoFill(MSHR * mshr, CacheBlk::State new_state, MemReqList & writebacks){ MemReqPtr req = mshr->req; assert(ct->findBlock(req->paddr, req->asid) == NULL); BlkType *blk = doReplacement(NULL, req, new_state, writebacks); if (cache->doData()) { if (req->cmd.isRead()) { memcpy(blk->data, req->data, blkSize); } } blk->whenReady = curTick; // respond to MSHR targets, if any Tick response_base = curTick + ct->getHitLatency();
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?