cache_impl.hh

来自「M5,一个功能强大的多处理器系统模拟器.很多针对处理器架构,性能的研究都使用它作」· HH 代码 · 共 1,524 行 · 第 1/4 页

HH
1,524
字号
    // First offset for critical word first calculations    int initial_offset = 0;    if (mshr->hasTargets()) {        initial_offset = mshr->getTarget()->pkt->getOffset(blkSize);    }    while (mshr->hasTargets()) {        MSHR::Target *target = mshr->getTarget();        if (target->isCpuSide()) {            Tick completion_time;            if (blk != NULL) {                satisfyCpuSideRequest(target->pkt, blk);                // How many bytes past the first request is this one                int transfer_offset =                    target->pkt->getOffset(blkSize) - initial_offset;                if (transfer_offset < 0) {                    transfer_offset += blkSize;                }                // If critical word (no offset) return first word time                completion_time = tags->getHitLatency() +                    (transfer_offset ? pkt->finishTime : pkt->firstWordTime);                assert(!target->pkt->req->isUncacheable());                missLatency[target->pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/] +=                    completion_time - target->recvTime;            } else {                // not a cache fill, just forwarding response                completion_time = tags->getHitLatency() + pkt->finishTime;                if (pkt->isRead() && !is_error) {                    target->pkt->setData(pkt->getPtr<uint8_t>());                }            }            target->pkt->makeTimingResponse();            // if this packet is an error copy that to the new packet            if (is_error)                target->pkt->copyError(pkt);            if (pkt->isInvalidate()) {                // If intermediate cache got ReadRespWithInvalidate,                // propagate that.  Response should not have                // isInvalidate() set otherwise.                assert(target->pkt->cmd == MemCmd::ReadResp);                assert(pkt->cmd == MemCmd::ReadRespWithInvalidate);                target->pkt->cmd = MemCmd::ReadRespWithInvalidate;            }            cpuSidePort->respond(target->pkt, completion_time);        } else {            // I don't believe that a snoop can be in an error state            assert(!is_error);            // response to snoop request            DPRINTF(Cache, "processing deferred snoop...\n");            handleSnoop(target->pkt, blk, true, true,                        mshr->pendingInvalidate || pkt->isInvalidate());        }        mshr->popTarget();    }    if (pkt->isInvalidate()) {        tags->invalidateBlk(blk);    }    if (mshr->promoteDeferredTargets()) {        MSHRQueue *mq = mshr->queue;        mq->markPending(mshr);        requestMemSideBus((RequestCause)mq->index, pkt->finishTime);    } else {        mq->deallocate(mshr);        if (wasFull && !mq->isFull()) {            clearBlocked((BlockedCause)mq->index);        }    }    // copy writebacks to write buffer    while (!writebacks.empty()) {        PacketPtr wbPkt = writebacks.front();        allocateWriteBuffer(wbPkt, time, true);        writebacks.pop_front();    }    // if we used temp block, clear it out    if (blk == tempBlock) {        if (blk->isDirty()) {            allocateWriteBuffer(writebackBlk(blk), time, true);        }        tags->invalidateBlk(blk);    }    delete pkt;}template<class TagStore>PacketPtrCache<TagStore>::writebackBlk(BlkType *blk){    assert(blk && blk->isValid() && blk->isDirty());    writebacks[0/*pkt->req->getThreadNum()*/]++;    Request *writebackReq =        new Request(tags->regenerateBlkAddr(blk->tag, blk->set), blkSize, 0);    PacketPtr writeback = new Packet(writebackReq, MemCmd::Writeback, -1);    writeback->allocate();    std::memcpy(writeback->getPtr<uint8_t>(), blk->data, blkSize);    blk->status &= ~BlkDirty;    return writeback;}template<class TagStore>typename Cache<TagStore>::BlkType*Cache<TagStore>::allocateBlock(Addr addr, PacketList &writebacks){    BlkType *blk = tags->findReplacement(addr, writebacks);    if (blk->isValid()) {        Addr repl_addr = tags->regenerateBlkAddr(blk->tag, blk->set);        MSHR *repl_mshr = mshrQueue.findMatch(repl_addr);        if (repl_mshr) {            // must be an outstanding upgrade request on block            // we're about to replace...            assert(!blk->isWritable());            assert(repl_mshr->needsExclusive());            // too hard to replace block with transient state            return NULL;        } else {            DPRINTF(Cache, "replacement: replacing %x with %x: %s\n",                    repl_addr, addr,                    blk->isDirty() ? "writeback" : "clean");            if (blk->isDirty()) {                // Save writeback packet for handling by caller                writebacks.push_back(writebackBlk(blk));            }        }    }    // Set tag for new block.  Caller is responsible for setting status.    blk->tag = tags->extractTag(addr);    return blk;}// Note that the reason we return a list of writebacks rather than// inserting them directly in the write buffer is that this function// is called by both atomic and timing-mode accesses, and in atomic// mode we don't mess with the write buffer (we just perform the// writebacks atomically once the original request is complete).template<class TagStore>typename Cache<TagStore>::BlkType*Cache<TagStore>::handleFill(PacketPtr pkt, BlkType *blk,                            PacketList &writebacks){    Addr addr = pkt->getAddr();#if TRACING_ON    CacheBlk::State old_state = blk ? blk->status : 0;#endif    if (blk == NULL) {        // better have read new data...        assert(pkt->hasData());        // need to do a replacement        blk = allocateBlock(addr, writebacks);        if (blk == NULL) {            // No replaceable block... just use temporary storage to            // complete the current request and then get rid of it            assert(!tempBlock->isValid());            blk = tempBlock;            tempBlock->set = tags->extractSet(addr);            DPRINTF(Cache, "using temp block for %x\n", addr);        }    } else {        // existing block... probably an upgrade        assert(blk->tag == tags->extractTag(addr));        // either we're getting new data or the block should already be valid        assert(pkt->hasData() || blk->isValid());    }    if (!pkt->sharedAsserted()) {        blk->status = BlkValid | BlkReadable | BlkWritable;    } else {        assert(!pkt->needsExclusive());        blk->status = BlkValid | BlkReadable;    }    DPRINTF(Cache, "Block addr %x moving from state %i to %i\n",            addr, old_state, blk->status);    // if we got new data, copy it in    if (pkt->isRead()) {        std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);    }    blk->whenReady = pkt->finishTime;    return blk;}///////////////////////////////////////////////////////// Snoop path: requests coming in from the memory side///////////////////////////////////////////////////////template<class TagStore>voidCache<TagStore>::doTimingSupplyResponse(PacketPtr req_pkt, uint8_t *blk_data,                       bool already_copied, bool pending_inval){    // timing-mode snoop responses require a new packet, unless we    // already made a copy...    PacketPtr pkt = already_copied ? req_pkt : new Packet(req_pkt, true);    if (!req_pkt->isInvalidate()) {        // note that we're ignoring the shared flag on req_pkt... it's        // basically irrelevant, as we'll always assert shared unless        // it's an exclusive request, in which case the shared line        // should never be asserted1        pkt->assertShared();    }    pkt->allocate();    pkt->makeTimingResponse();    if (pkt->isRead()) {        pkt->setDataFromBlock(blk_data, blkSize);    }    if (pkt->cmd == MemCmd::ReadResp && pending_inval) {        // Assume we defer a response to a read from a far-away cache        // A, then later defer a ReadExcl from a cache B on the same        // bus as us.  We'll assert MemInhibit in both cases, but in        // the latter case MemInhibit will keep the invalidation from        // reaching cache A.  This special response tells cache A that        // it gets the block to satisfy its read, but must immediately        // invalidate it.        pkt->cmd = MemCmd::ReadRespWithInvalidate;    }    memSidePort->respond(pkt, curTick + hitLatency);}template<class TagStore>voidCache<TagStore>::handleSnoop(PacketPtr pkt, BlkType *blk,                             bool is_timing, bool is_deferred,                             bool pending_inval){    // deferred snoops can only happen in timing mode    assert(!(is_deferred && !is_timing));    // pending_inval only makes sense on deferred snoops    assert(!(pending_inval && !is_deferred));    assert(pkt->isRequest());    // first propagate snoop upward to see if anyone above us wants to    // handle it.  save & restore packet src since it will get    // rewritten to be relative to cpu-side bus (if any)    bool alreadyResponded = pkt->memInhibitAsserted();    if (is_timing) {        Packet *snoopPkt = new Packet(pkt, true);  // clear flags        snoopPkt->setExpressSnoop();        snoopPkt->senderState = new ForwardResponseRecord(pkt, this);        cpuSidePort->sendTiming(snoopPkt);        if (snoopPkt->memInhibitAsserted()) {            // cache-to-cache response from some upper cache            assert(!alreadyResponded);            pkt->assertMemInhibit();        } else {            delete snoopPkt->senderState;        }        if (snoopPkt->sharedAsserted()) {            pkt->assertShared();        }        delete snoopPkt;    } else {        int origSrc = pkt->getSrc();        cpuSidePort->sendAtomic(pkt);        if (!alreadyResponded && pkt->memInhibitAsserted()) {            // cache-to-cache response from some upper cache:            // forward response to original requester            assert(pkt->isResponse());        }        pkt->setSrc(origSrc);    }    if (!blk || !blk->isValid()) {        return;    }    // we may end up modifying both the block state and the packet (if    // we respond in atomic mode), so just figure out what to do now    // and then do it later    bool respond = blk->isDirty() && pkt->needsResponse();    bool have_exclusive = blk->isWritable();    bool invalidate = pkt->isInvalidate();    if (pkt->isRead() && !pkt->isInvalidate()) {        assert(!pkt->needsExclusive());        pkt->assertShared();        int bits_to_clear = BlkWritable;        const bool haveOwnershipState = true; // for now        if (!haveOwnershipState) {            // if we don't support pure ownership (dirty && !writable),            // have to clear dirty bit here, assume memory snarfs data            // on cache-to-cache xfer            bits_to_clear |= BlkDirty;        }        blk->status &= ~bits_to_clear;    }    DPRINTF(Cache, "snooped a %s request for addr %x, %snew state is %i\n",            pkt->cmdString(), blockAlign(pkt->getAddr()),            respond ? "responding, " : "", invalidate ? 0 : blk->status);    if (respond) {        assert(!pkt->memInhibitAsserted());        pkt->assertMemInhibit();        if (have_exclusive) {            pkt->setSupplyExclusive();        }        if (is_timing) {            doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval);        } else {            pkt->makeAtomicResponse();            pkt->setDataFromBlock(blk->data, blkSize);        }    }    // Do this last in case it deallocates block data or something    // like that    if (invalidate) {        tags->invalidateBlk(blk);    }}template<class TagStore>voidCache<TagStore>::snoopTiming(PacketPtr pkt){    // Note that some deferred snoops don't have requests, since the    // original access may have already completed    if ((pkt->req && pkt->req->isUncacheable()) ||        pkt->cmd == MemCmd::Writeback) {        //Can't get a hit on an uncacheable address        //Revisit this for multi level coherence        return;    }    BlkType *blk = tags->findBlock(pkt->getAddr());    Addr blk_addr = pkt->getAddr() & ~(Addr(blkSize-1));    MSHR *mshr = mshrQueue.findMatch(blk_addr);    // Let the MSHR itself track the snoop and decide whether we want    // to go ahead and do the regular cache snoop    if (mshr && mshr->handleSnoop(pkt, order++)) {        DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %x\n",                blk_addr);        if (mshr->getNumTargets() > numTarget)            warn("allocating bonus target for snoop"); //handle later        return;    }    //We also need to check the writeback buffers and handle those    std::vector<MSHR *> writebacks;    if (writeBuffer.findMatches(blk_addr, writebacks)) {        DPRINTF(Cache, "Snoop hit in writeback to addr: %x\n",                pkt->getAddr());        //Look through writebacks for any non-uncachable writes, use that        for (int i = 0; i < writebacks.size(); i++) {            mshr = writebacks[i];            assert(!mshr->isUncacheable());            assert(mshr->getNumTargets() == 1);            PacketPtr wb_pkt = mshr->getTarget()->pkt;            assert(wb_pkt->cmd == MemCmd::Writeback);            assert(!pkt->memInhibitAsserted());

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?