cache_impl.hh
来自「M5,一个功能强大的多处理器系统模拟器.很多针对处理器架构,性能的研究都使用它作」· HH 代码 · 共 1,524 行 · 第 1/4 页
HH
1,524 行
pkt->assertMemInhibit(); if (!pkt->needsExclusive()) { pkt->assertShared(); } else { // if we're not asserting the shared line, we need to // invalidate our copy. we'll do that below as long as // the packet's invalidate flag is set... assert(pkt->isInvalidate()); } doTimingSupplyResponse(pkt, wb_pkt->getPtr<uint8_t>(), false, false); if (pkt->isInvalidate()) { // Invalidation trumps our writeback... discard here markInService(mshr); } // If this was a shared writeback, there may still be // other shared copies above that require invalidation. // We could be more selective and return here if the // request is non-exclusive or if the writeback is // exclusive. break; } } handleSnoop(pkt, blk, true, false, false);}template<class TagStore>TickCache<TagStore>::snoopAtomic(PacketPtr pkt){ if (pkt->req->isUncacheable() || pkt->cmd == MemCmd::Writeback) { // Can't get a hit on an uncacheable address // Revisit this for multi level coherence return hitLatency; } BlkType *blk = tags->findBlock(pkt->getAddr()); handleSnoop(pkt, blk, false, false, false); return hitLatency;}template<class TagStore>MSHR *Cache<TagStore>::getNextMSHR(){ // Check both MSHR queue and write buffer for potential requests MSHR *miss_mshr = mshrQueue.getNextMSHR(); MSHR *write_mshr = writeBuffer.getNextMSHR(); // Now figure out which one to send... some cases are easy if (miss_mshr && !write_mshr) { return miss_mshr; } if (write_mshr && !miss_mshr) { return write_mshr; } if (miss_mshr && write_mshr) { // We have one of each... normally we favor the miss request // unless the write buffer is full if (writeBuffer.isFull() && writeBuffer.inServiceEntries == 0) { // Write buffer is full, so we'd like to issue a write; // need to search MSHR queue for conflicting earlier miss. MSHR *conflict_mshr = mshrQueue.findPending(write_mshr->addr, write_mshr->size); if (conflict_mshr && conflict_mshr->order < write_mshr->order) { // Service misses in order until conflict is cleared. return conflict_mshr; } // No conflicts; issue write return write_mshr; } // Write buffer isn't full, but need to check it for // conflicting earlier writeback MSHR *conflict_mshr = writeBuffer.findPending(miss_mshr->addr, miss_mshr->size); if (conflict_mshr) { // not sure why we don't check order here... it was in the // original code but commented out. // The only way this happens is if we are // doing a write and we didn't have permissions // then subsequently saw a writeback (owned got evicted) // We need to make sure to perform the writeback first // To preserve the dirty data, then we can issue the write // should we return write_mshr here instead? I.e. do we // have to flush writes in order? I don't think so... not // for Alpha anyway. Maybe for x86? return conflict_mshr; } // No conflicts; issue read return miss_mshr; } // fall through... no pending requests. Try a prefetch. assert(!miss_mshr && !write_mshr); if (!mshrQueue.isFull()) { // If we have a miss queue slot, we can try a prefetch PacketPtr pkt = prefetcher->getPacket(); if (pkt) { // Update statistic on number of prefetches issued // (hwpf_mshr_misses) mshr_misses[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++; // Don't request bus, since we already have it return allocateMissBuffer(pkt, curTick, false); } } return NULL;}template<class TagStore>PacketPtrCache<TagStore>::getTimingPacket(){ MSHR *mshr = getNextMSHR(); if (mshr == NULL) { return NULL; } // use request from 1st target PacketPtr tgt_pkt = mshr->getTarget()->pkt; PacketPtr pkt = NULL; if (mshr->isSimpleForward()) { // no response expected, just forward packet as it is assert(tags->findBlock(mshr->addr) == NULL); pkt = tgt_pkt; } else { BlkType *blk = tags->findBlock(mshr->addr); pkt = getBusPacket(tgt_pkt, blk, mshr->needsExclusive()); mshr->isCacheFill = (pkt != NULL); if (pkt == NULL) { // not a cache block request, but a response is expected assert(!mshr->isSimpleForward()); // make copy of current packet to forward, keep current // copy for response handling pkt = new Packet(tgt_pkt); pkt->allocate(); if (pkt->isWrite()) { pkt->setData(tgt_pkt->getPtr<uint8_t>()); } } } assert(pkt != NULL); pkt->senderState = mshr; return pkt;}/////////////////// CpuSidePort/////////////////template<class TagStore>voidCache<TagStore>::CpuSidePort::getDeviceAddressRanges(AddrRangeList &resp, bool &snoop){ // CPU side port doesn't snoop; it's a target only. bool dummy; otherPort->getPeerAddressRanges(resp, dummy); FilterRangeList(filterRanges, resp); snoop = false;}template<class TagStore>boolCache<TagStore>::CpuSidePort::recvTiming(PacketPtr pkt){ // illegal to block responses... can lead to deadlock if (pkt->isRequest() && !pkt->memInhibitAsserted() && blocked) { DPRINTF(Cache,"Scheduling a retry while blocked\n"); mustSendRetry = true; return false; } myCache()->timingAccess(pkt); return true;}template<class TagStore>TickCache<TagStore>::CpuSidePort::recvAtomic(PacketPtr pkt){ return myCache()->atomicAccess(pkt);}template<class TagStore>voidCache<TagStore>::CpuSidePort::recvFunctional(PacketPtr pkt){ myCache()->functionalAccess(pkt, this, otherPort);}template<class TagStore>Cache<TagStore>::CpuSidePort::CpuSidePort(const std::string &_name, Cache<TagStore> *_cache, const std::string &_label, std::vector<Range<Addr> > filterRanges) : BaseCache::CachePort(_name, _cache, _label, filterRanges){}/////////////////// MemSidePort/////////////////template<class TagStore>voidCache<TagStore>::MemSidePort::getDeviceAddressRanges(AddrRangeList &resp, bool &snoop){ otherPort->getPeerAddressRanges(resp, snoop); FilterRangeList(filterRanges, resp); // Memory-side port always snoops, so unconditionally set flag for // caller. snoop = true;}template<class TagStore>boolCache<TagStore>::MemSidePort::recvTiming(PacketPtr pkt){ // this needs to be fixed so that the cache updates the mshr and sends the // packet back out on the link, but it probably won't happen so until this // gets fixed, just panic when it does if (pkt->wasNacked()) panic("Need to implement cache resending nacked packets!\n"); if (pkt->isRequest() && blocked) { DPRINTF(Cache,"Scheduling a retry while blocked\n"); mustSendRetry = true; return false; } if (pkt->isResponse()) { myCache()->handleResponse(pkt); } else { myCache()->snoopTiming(pkt); } return true;}template<class TagStore>TickCache<TagStore>::MemSidePort::recvAtomic(PacketPtr pkt){ // in atomic mode, responses go back to the sender via the // function return from sendAtomic(), not via a separate // sendAtomic() from the responder. Thus we should never see a // response packet in recvAtomic() (anywhere, not just here). assert(!pkt->isResponse()); return myCache()->snoopAtomic(pkt);}template<class TagStore>voidCache<TagStore>::MemSidePort::recvFunctional(PacketPtr pkt){ myCache()->functionalAccess(pkt, this, otherPort);}template<class TagStore>voidCache<TagStore>::MemSidePort::sendPacket(){ // if we have responses that are ready, they take precedence if (deferredPacketReady()) { bool success = sendTiming(transmitList.front().pkt); if (success) { //send successful, remove packet transmitList.pop_front(); } waitingOnRetry = !success; } else { // check for non-response packets (requests & writebacks) PacketPtr pkt = myCache()->getTimingPacket(); if (pkt == NULL) { // can happen if e.g. we attempt a writeback and fail, but // before the retry, the writeback is eliminated because // we snoop another cache's ReadEx. waitingOnRetry = false; } else { MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState); bool success = sendTiming(pkt); waitingOnRetry = !success; if (waitingOnRetry) { DPRINTF(CachePort, "now waiting on a retry\n"); if (!mshr->isSimpleForward()) { delete pkt; } } else { myCache()->markInService(mshr); } } } // tried to send packet... if it was successful (no retry), see if // we need to rerequest bus or not if (!waitingOnRetry) { Tick nextReady = std::min(deferredPacketReadyTime(), myCache()->nextMSHRReadyTime()); // @TODO: need to facotr in prefetch requests here somehow if (nextReady != MaxTick) { DPRINTF(CachePort, "more packets to send @ %d\n", nextReady); sendEvent->schedule(std::max(nextReady, curTick + 1)); } else { // no more to send right now: if we're draining, we may be done if (drainEvent) { drainEvent->process(); drainEvent = NULL; } } }}template<class TagStore>voidCache<TagStore>::MemSidePort::recvRetry(){ assert(waitingOnRetry); sendPacket();}template<class TagStore>voidCache<TagStore>::MemSidePort::processSendEvent(){ assert(!waitingOnRetry); sendPacket();}template<class TagStore>Cache<TagStore>::MemSidePort::MemSidePort(const std::string &_name, Cache<TagStore> *_cache, const std::string &_label, std::vector<Range<Addr> > filterRanges) : BaseCache::CachePort(_name, _cache, _label, filterRanges){ // override default send event from SimpleTimingPort delete sendEvent; sendEvent = new SendEvent(this);}
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?