cache_impl.hh

来自「M5,一个功能强大的多处理器系统模拟器.很多针对处理器架构,性能的研究都使用它作」· HH 代码 · 共 1,524 行 · 第 1/4 页

HH
1,524
字号
    if (pkt->isResponse()) {        // must be cache-to-cache response from upper to lower level        ForwardResponseRecord *rec =            dynamic_cast<ForwardResponseRecord *>(pkt->senderState);        assert(rec != NULL);        rec->restore(pkt, this);        delete rec;        memSidePort->respond(pkt, time);        return true;    }    assert(pkt->isRequest());    if (pkt->memInhibitAsserted()) {        DPRINTF(Cache, "mem inhibited on 0x%x: not responding\n",                pkt->getAddr());        assert(!pkt->req->isUncacheable());        // Special tweak for multilevel coherence: snoop downward here        // on invalidates since there may be other caches below here        // that have shared copies.  Not necessary if we know that        // supplier had exclusive copy to begin with.        if (pkt->needsExclusive() && !pkt->isSupplyExclusive()) {            Packet *snoopPkt = new Packet(pkt, true);  // clear flags            snoopPkt->setExpressSnoop();            snoopPkt->assertMemInhibit();            memSidePort->sendTiming(snoopPkt);            // main memory will delete snoopPkt        }        return true;    }    if (pkt->req->isUncacheable()) {        // writes go in write buffer, reads use MSHR        if (pkt->isWrite() && !pkt->isRead()) {            allocateWriteBuffer(pkt, time, true);        } else {            allocateUncachedReadBuffer(pkt, time, true);        }        assert(pkt->needsResponse()); // else we should delete it here??        return true;    }    int lat = hitLatency;    BlkType *blk = NULL;    bool satisfied = access(pkt, blk, lat);#if 0    /** @todo make the fast write alloc (wh64) work with coherence. */    PacketList writebacks;    // If this is a block size write/hint (WH64) allocate the block here    // if the coherence protocol allows it.    if (!blk && pkt->getSize() >= blkSize && coherence->allowFastWrites() &&        (pkt->cmd == MemCmd::WriteReq         || pkt->cmd == MemCmd::WriteInvalidateReq) ) {        // not outstanding misses, can do this        MSHR *outstanding_miss = mshrQueue.findMatch(pkt->getAddr());        if (pkt->cmd == MemCmd::WriteInvalidateReq || !outstanding_miss) {            if (outstanding_miss) {                warn("WriteInv doing a fastallocate"                     "with an outstanding miss to the same address\n");            }            blk = handleFill(NULL, pkt, BlkValid | BlkWritable,                                   writebacks);            ++fastWrites;        }    }    // copy writebacks to write buffer    while (!writebacks.empty()) {        PacketPtr wbPkt = writebacks.front();        allocateWriteBuffer(wbPkt, time, true);        writebacks.pop_front();    }#endif    bool needsResponse = pkt->needsResponse();    if (satisfied) {        if (needsResponse) {            pkt->makeTimingResponse();            cpuSidePort->respond(pkt, curTick+lat);        } else {            delete pkt;        }    } else {        // miss        if (prefetchMiss)            prefetcher->handleMiss(pkt, time);        Addr blk_addr = pkt->getAddr() & ~(Addr(blkSize-1));        MSHR *mshr = mshrQueue.findMatch(blk_addr);        if (mshr) {            // MSHR hit            //@todo remove hw_pf here            mshr_hits[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;            if (mshr->threadNum != 0/*pkt->req->getThreadNum()*/) {                mshr->threadNum = -1;            }            mshr->allocateTarget(pkt, time, order++);            if (mshr->getNumTargets() == numTarget) {                noTargetMSHR = mshr;                setBlocked(Blocked_NoTargets);                // need to be careful with this... if this mshr isn't                // ready yet (i.e. time > curTick_, we don't want to                // move it ahead of mshrs that are ready                // mshrQueue.moveToFront(mshr);            }        } else {            // no MSHR            mshr_misses[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;            // always mark as cache fill for now... if we implement            // no-write-allocate or bypass accesses this will have to            // be changed.            if (pkt->cmd == MemCmd::Writeback) {                allocateWriteBuffer(pkt, time, true);            } else {                if (blk && blk->isValid()) {                    // If we have a write miss to a valid block, we                    // need to mark the block non-readable.  Otherwise                    // if we allow reads while there's an outstanding                    // write miss, the read could return stale data                    // out of the cache block... a more aggressive                    // system could detect the overlap (if any) and                    // forward data out of the MSHRs, but we don't do                    // that yet.  Note that we do need to leave the                    // block valid so that it stays in the cache, in                    // case we get an upgrade response (and hence no                    // new data) when the write miss completes.                    // As long as CPUs do proper store/load forwarding                    // internally, and have a sufficiently weak memory                    // model, this is probably unnecessary, but at some                    // point it must have seemed like we needed it...                    assert(pkt->needsExclusive() && !blk->isWritable());                    blk->status &= ~BlkReadable;                }                allocateMissBuffer(pkt, time, true);            }        }    }    return true;}// See comment in cache.hh.template<class TagStore>PacketPtrCache<TagStore>::getBusPacket(PacketPtr cpu_pkt, BlkType *blk,                              bool needsExclusive){    bool blkValid = blk && blk->isValid();    if (cpu_pkt->req->isUncacheable()) {        assert(blk == NULL);        return NULL;    }    if (!blkValid && (cpu_pkt->cmd == MemCmd::Writeback ||                      cpu_pkt->cmd == MemCmd::UpgradeReq)) {        // Writebacks that weren't allocated in access() and upgrades        // from upper-level caches that missed completely just go        // through.        return NULL;    }    assert(cpu_pkt->needsResponse());    MemCmd cmd;    // @TODO make useUpgrades a parameter.    // Note that ownership protocols require upgrade, otherwise a    // write miss on a shared owned block will generate a ReadExcl,    // which will clobber the owned copy.    const bool useUpgrades = true;    if (blkValid && useUpgrades) {        // only reason to be here is that blk is shared        // (read-only) and we need exclusive        assert(needsExclusive && !blk->isWritable());        cmd = MemCmd::UpgradeReq;    } else {        // block is invalid        cmd = needsExclusive ? MemCmd::ReadExReq : MemCmd::ReadReq;    }    PacketPtr pkt = new Packet(cpu_pkt->req, cmd, Packet::Broadcast, blkSize);    pkt->allocate();    return pkt;}template<class TagStore>TickCache<TagStore>::atomicAccess(PacketPtr pkt){    int lat = hitLatency;    // @TODO: make this a parameter    bool last_level_cache = false;    if (pkt->memInhibitAsserted()) {        assert(!pkt->req->isUncacheable());        // have to invalidate ourselves and any lower caches even if        // upper cache will be responding        if (pkt->isInvalidate()) {            BlkType *blk = tags->findBlock(pkt->getAddr());            if (blk && blk->isValid()) {                tags->invalidateBlk(blk);                DPRINTF(Cache, "rcvd mem-inhibited %s on 0x%x: invalidating\n",                        pkt->cmdString(), pkt->getAddr());            }            if (!last_level_cache) {                DPRINTF(Cache, "forwarding mem-inhibited %s on 0x%x\n",                        pkt->cmdString(), pkt->getAddr());                lat += memSidePort->sendAtomic(pkt);            }        } else {            DPRINTF(Cache, "rcvd mem-inhibited %s on 0x%x: not responding\n",                    pkt->cmdString(), pkt->getAddr());        }        return lat;    }    // should assert here that there are no outstanding MSHRs or    // writebacks... that would mean that someone used an atomic    // access in timing mode    BlkType *blk = NULL;    if (!access(pkt, blk, lat)) {        // MISS        PacketPtr busPkt = getBusPacket(pkt, blk, pkt->needsExclusive());        bool isCacheFill = (busPkt != NULL);        if (busPkt == NULL) {            // just forwarding the same request to the next level            // no local cache operation involved            busPkt = pkt;        }        DPRINTF(Cache, "Sending an atomic %s for %x\n",                busPkt->cmdString(), busPkt->getAddr());#if TRACING_ON        CacheBlk::State old_state = blk ? blk->status : 0;#endif        lat += memSidePort->sendAtomic(busPkt);        DPRINTF(Cache, "Receive response: %s for addr %x in state %i\n",                busPkt->cmdString(), busPkt->getAddr(), old_state);        bool is_error = busPkt->isError();        assert(!busPkt->wasNacked());        if (is_error && pkt->needsResponse()) {            pkt->makeAtomicResponse();            pkt->copyError(busPkt);        } else if (isCacheFill && !is_error) {            PacketList writebacks;            blk = handleFill(busPkt, blk, writebacks);            satisfyCpuSideRequest(pkt, blk);            delete busPkt;            // Handle writebacks if needed            while (!writebacks.empty()){                PacketPtr wbPkt = writebacks.front();                memSidePort->sendAtomic(wbPkt);                writebacks.pop_front();                delete wbPkt;            }        }    }    // We now have the block one way or another (hit or completed miss)    if (pkt->needsResponse()) {        pkt->makeAtomicResponse();    }    return lat;}template<class TagStore>voidCache<TagStore>::functionalAccess(PacketPtr pkt,                                  CachePort *incomingPort,                                  CachePort *otherSidePort){    Addr blk_addr = pkt->getAddr() & ~(blkSize - 1);    BlkType *blk = tags->findBlock(pkt->getAddr());    pkt->pushLabel(name());    CacheBlkPrintWrapper cbpw(blk);    bool done =        (blk && pkt->checkFunctional(&cbpw, blk_addr, blkSize, blk->data))        || incomingPort->checkFunctional(pkt)        || mshrQueue.checkFunctional(pkt, blk_addr)        || writeBuffer.checkFunctional(pkt, blk_addr)        || otherSidePort->checkFunctional(pkt);    // We're leaving the cache, so pop cache->name() label    pkt->popLabel();    if (!done) {        otherSidePort->sendFunctional(pkt);    }}///////////////////////////////////////////////////////// Response handling: responses from the memory side///////////////////////////////////////////////////////template<class TagStore>voidCache<TagStore>::handleResponse(PacketPtr pkt){    Tick time = curTick + hitLatency;    MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);    bool is_error = pkt->isError();    assert(mshr);    if (pkt->wasNacked()) {        //pkt->reinitFromRequest();        warn("NACKs from devices not connected to the same bus "             "not implemented\n");        return;    }    if (is_error) {        DPRINTF(Cache, "Cache received packet with error for address %x, "                "cmd: %s\n", pkt->getAddr(), pkt->cmdString());    }    DPRINTF(Cache, "Handling response to %x\n", pkt->getAddr());    MSHRQueue *mq = mshr->queue;    bool wasFull = mq->isFull();    if (mshr == noTargetMSHR) {        // we always clear at least one target        clearBlocked(Blocked_NoTargets);        noTargetMSHR = NULL;    }    // Initial target is used just for stats    MSHR::Target *initial_tgt = mshr->getTarget();    BlkType *blk = tags->findBlock(pkt->getAddr());    int stats_cmd_idx = initial_tgt->pkt->cmdToIndex();    Tick miss_latency = curTick - initial_tgt->recvTime;    PacketList writebacks;    if (pkt->req->isUncacheable()) {        mshr_uncacheable_lat[stats_cmd_idx][0/*pkt->req->getThreadNum()*/] +=            miss_latency;    } else {        mshr_miss_latency[stats_cmd_idx][0/*pkt->req->getThreadNum()*/] +=            miss_latency;    }    if (mshr->isCacheFill && !is_error) {        DPRINTF(Cache, "Block for addr %x being updated in Cache\n",                pkt->getAddr());        // give mshr a chance to do some dirty work        mshr->handleFill(pkt, blk);        blk = handleFill(pkt, blk, writebacks);        assert(blk != NULL);    }

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?