📄 l2cache.c
字号:
* If the outgoing buffer is full (indicated by pending_writeback), * stall until there is a free slot in outgoing buffer. */ if (captr->pending_writeback) return 0; else { Cache_start_send_to_bus(captr, req); return 1; } case COHE_REPLY: YS__errmsg(captr->nodeid, "L2 cache should never recieve COHE_REPLY."); default: break; } YS__errmsg(captr->nodeid, "Invalid case in L2ProcessDataReq()"); return 0;}/*=========================================================================== * L2ProcessTagRequest: simulates access requests to the L2 cache tag array. * progress = 0: initial state * progress = 1: hit, send it to data array * progress = 2: miss, send it down to memory * Returns 0 on failure; 1 on success. */static int L2ProcessTagRequest(CACHE * captr, REQ * req){ MSHR_Response mshr_status; cline_t *cline; if (cparam.L2_perfect) req->progress = 1; if (req->req_type == WBACK) { if (AddToPipe(captr->data_pipe[L2ReqDATAPIPE], req)) { captr->num_in_pipes++; req->progress = 0; return 1; } else return 0; } if (req->prcr_req_type == FLUSHC || req->prcr_req_type == PURGEC) return L2ProcessFlushPurge(captr, req); if (req->progress == 0) { Cache_search(captr, req->vaddr, req->paddr, &cline); mshr_status = L2Cache_check_mshr(captr, req); switch (mshr_status) { case NOMSHR: if (cline->state == PR_CL || cline->state == PR_DY) req->req_type = REPLY_EXCL; else req->req_type = REPLY_SH; if (req->prefetch == 2) captr->pstats->sl2.unnecessary++; else if (req->prefetch == 8) captr->pstats->l2p.unnecessary++; req->progress = 1; break; case MSHR_COAL: if (req->prefetch == 2) captr->pstats->sl2.unnecessary++; return 1; /* success if coalesced */ case MSHR_NEW: if (req->prefetch == 2) captr->pstats->sl2.issued++; else if (req->prefetch == 8) captr->pstats->l2p.issued++; if (cparam.L2_prefetch && req->prefetch == 0) L2Cache_start_prefetch(captr, req); req->progress = 2; /* this indicates that cache need to send something out. */ break; case MSHR_USELESS_FETCH: /* * a prefetch to a line that already has an outstanding fetch. * Drop this prefetch even without discriminate prefetch on. */ if (req->prefetch == 2) captr->pstats->sl2.unnecessary++; else if (req->prefetch == 8) captr->pstats->l2p.unnecessary++; YS__PoolReturnObj(&YS__ReqPool, req); return 1; case MSHR_STALL_WAR: case MSHR_STALL_COHE: case MSHR_STALL_COAL: case MSHR_STALL_RELEASE: case NOMSHR_STALL: case NOMSHR_STALL_COHE: case MSHR_STALL_FLUSH: return 0; default: YS__errmsg(captr->nodeid, "Default case in L2 mshr_misscache %d", mshr_status); } } /* * Going to Data RAM in this case. note that this isn't considered * progress because no resource has been booked, nor has any visible * state been changed (although LRU bits may have been). NOTE: hits * must be sent to the data array before being sent to L1 cache */ if (req->progress == 1) { if (AddToPipe(captr->data_pipe[L2ReqDATAPIPE], req)) { captr->num_in_pipes++; req->progress = 0; if ((req->hit_type != L1IHIT) && (req->hit_type != L1DHIT)) req->hit_type = L2HIT; return 1; } else return 0; } return Cache_start_send_to_bus(captr, req);}/*=========================================================================== * Simulate cache flush/purge operations. Unlike write-through L1 cache, * L2 cache may generate write-back transactions for flush. Since L1 * has been checked before this function is called, we don't need * invalidation requests to L1 cache. */static int L2ProcessFlushPurge(CACHE *captr, REQ *req){ int i; cline_t *cline; REQ *ireq = NULL; if (cparam.L2_perfect) return 1; if (req->progress == 0) { /* * First, tries to merge with MSHR. Then, search the cache. */ req->wrb_req = 0; req->invl_req = 0; if (captr->mshr_count > 0) { unsigned blocknum = ADDR2BNUM2(req->paddr); for (i = 0; i < captr->max_mshrs; i++) { if (captr->mshrs[i].valid && blocknum == captr->mshrs[i].blocknum) { captr->mshrs[i].pend_opers = req->prcr_req_type; req->hit_type = MEMHIT; Cache_global_perform(captr, req, 1); return 1; } } } if (Cache_search(captr, req->vaddr, req->paddr, &cline) == 0) { if (cline->state == PR_DY && req->prcr_req_type == FLUSHC) { /* * Need write back when flushing dirty private data. */ req->wrb_req = Cache_make_req(captr, cline, REPLY_EXCL); req->wrb_req->type = WRITEBACK; req->wrb_req->parent = 0; /* not associated with any cohe */ } if (req->prcr_req_type == FLUSHC && cparam.L1D_writeback) { req->invl_req = Cache_make_req(captr, cline, INVALIDATE); req->invl_req->prcr_req_type = FLUSHC; req->invl_req->type = COHE; req->invl_req->l2mshr = (MSHR *)cline; req->invl_req->wrb_req = req->wrb_req; req->invl_req->wb_count = 0; req->invl_req->parent = req; } ireq = Cache_make_req(captr, cline, INVALIDATE); ireq->type = COHE; ireq->parent = req; if (cline->pref == 2) captr->pstats->sl2.useless++; else if (cline->pref == 8) captr->pstats->l2p.useless++; cline->state = INVALID; cline->tag = -1; req->hit_type = L2HIT; } else { req->hit_type = MEMHIT; } req->progress = 1; } if (((req->invl_req) && (lqueue_full(&(captr->l1d_partner->cohe_queue)))) || ((ireq) && (lqueue_full(&(captr->l1i_partner->cohe_queue))))) { if (ireq) YS__PoolReturnObj(&YS__ReqPool, ireq); return 0; } if (ireq) { lqueue_add(&(captr->l1i_partner->cohe_queue), ireq, captr->nodeid); captr->l1i_partner->inq_empty = 0; } if (req->invl_req) { lqueue_add(&(captr->l1d_partner->cohe_queue), req->invl_req, captr->nodeid); captr->l1d_partner->inq_empty = 0; return 1; } if (req->wrb_req) { if (!AddToPipe(captr->data_pipe[L2ReqDATAPIPE], req->wrb_req)) return 0; req->wrb_req->progress = 0; captr->num_in_pipes++; } Cache_global_perform(captr, req, 1); return 1;}int L2ProcessTagFlushReply(CACHE *captr, REQ *req){ if (req->wrb_req == 0 && req->wb_count) { req->wrb_req = (REQ *) YS__PoolGetObj(&YS__ReqPool); req->wrb_req->paddr = req->parent->paddr & block_mask2; req->wrb_req->vaddr = req->parent->vaddr & block_mask2; req->wrb_req->req_type = REPLY_EXCL; req->wrb_req->type = WRITEBACK; req->wrb_req->node = captr->nodeid; req->wrb_req->src_proc = captr->procid; req->wrb_req->ifetch = 0; req->wrb_req->dest_proc = AddrMap_lookup(req->node, req->paddr); req->wrb_req->parent = 0; req->memattributes = req->memattributes; } if (req->wrb_req) { if (!AddToPipe(captr->data_pipe[L2ReqDATAPIPE], req->wrb_req)) return 0; req->wrb_req->progress = 0; captr->num_in_pipes++; } Cache_global_perform(captr, req->parent, 1); if (!req->cohe_count) YS__PoolReturnObj(&YS__ReqPool, req); return 1;}/*=========================================================================== * L2Cache_check_mshr: This function is called for all incoming REQUESTs, * to check whether the incoming transaction matches an outstanding * transaction in an MSHR, whether it hits in the cache, and whether it * needs an MSHR). */static MSHR_Response L2Cache_check_mshr(CACHE * captr, REQ * req){ MSHR_Response response; MSHR *pmshr; cline_t *cline; int misscache, i; /* * First determines whether the incoming transaction matches any MSHR. */ pmshr = 0; if (captr->mshr_count > 0) { unsigned blocknum = ADDR2BNUM2(req->paddr); for (i = 0; i < captr->max_mshrs; i++) { if (captr->mshrs[i].valid && blocknum == captr->mshrs[i].blocknum) { pmshr = &(captr->mshrs[i]); break; } } } /* * Determine if the desired line is available in the cache. */ misscache = Cache_search(captr, req->vaddr, req->paddr, &cline); /* * If it's an L2 prefetch, we will drop it no matter what as long as it * matches an MSHR or a cache line. */ if ((req->prefetch == 8) && (pmshr || misscache == 0)) return MSHR_USELESS_FETCH; if (pmshr == 0) { /* the line is not present in any MSHR */ if (misscache == 0) { /* the line is in cache */ if (Cache_hit_update(captr, cline, req)) { /* update LRU ages */ if (cparam.L2_prefetch & 2) L2Cache_start_prefetch(captr, req); } if (req->prcr_req_type == READ) return NOMSHR; else { /* it's a write or rmw */ if (cline->state != SH_CL) { /* * The access is either a wirte or a rmw; and the cache line * is in private state. */ if (cline->state == PR_CL) cline->state = PR_DY; return NOMSHR; } /* Otherwise (share state), we will need upgrade. */ } } /* The line is either not in l2 cache; or the request is a write and * the line is in l2 cache but with share state. */ if (captr->mshr_count == captr->max_mshrs) { /* * None are available, the value "NOMSHR_STALL" is returned. */ captr->pstats->l2stall.full++; return NOMSHR_STALL; } /* * Find the first free MSHR */ for (i = 0; i < captr->max_mshrs; i++) { if (captr->mshrs[i].valid != 1) { pmshr = &(captr->mshrs[i]); break; } } req->l2mshr = pmshr; pmshr->valid = 1; pmshr->mainreq = req; pmshr->setnum = cline->index >> captr->idx_shift; pmshr->blocknum = req->paddr >> captr->block_shift; pmshr->counter = 0; pmshr->pending_cohe = 0; pmshr->stall_WAR = 0; pmshr->demand = -1.0; pmshr->cline = cline; pmshr->misscache = misscache; pmshr->pend_opers = 0; pmshr->only_prefs = (req->prefetch & 10) ? 1 : 0; pmshr->has_writes = (req->prcr_req_type == READ) ? 0 : 1; pmshr->releasing = 0; captr->mshr_count++; captr->reqmshr_count++; captr->reqs_at_mshr_count++; if (misscache == 0) { /* line present in cache -- upgrade-type access */ /* * In the case of upgrades, the line is locked into cache by * setting "mshr_out"; this guarantees that the line is not * victimized on a later "REPLY" before the upgrade reply returns. * In all cases where the line is present in cache, the * "hit_update" function is called to update the ages of the lines * in the set (for LRU replacement). */ req->req_type = UPGRADE; cline->mshr_out = 1; } else { if ((req->prcr_req_type == WRITE) || (req->prcr_req_type == RMW)) req->req_type = READ_OWN; else req->req_type = READ_SH; } return MSHR_NEW; } /* Matches in MSHR -- REQUEST must either be merged, dropped, forwarded around cache, or stalled. */ /* * If there is pending flush/purge operation, or pending coherence * request, or the MSHR is being released, stall. */ if (pmshr->pend_opers) { captr->pstats->l2stall.flush++; return MSHR_STALL_FLUSH; }
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -