📄 l1d_cache.c
字号:
captr->pstats->l1dp.unnecessary++; else if (req->prefetch == 1) captr->pstats->sl1.unnecessary++; else if (req->prefetch == 2) captr->pstats->sl2.unnecessary++; /* * send the request back to the processor, as it's done. * REQs also get freed up there. */ if (req->prefetch == 4) YS__PoolReturnObj(&YS__ReqPool, req); else Cache_global_perform(captr, req, 1); return 1; case MSHR_COAL: /* Coalesced into an MSHR. success. */ return 1; case MSHR_NEW: /* need to send down some sort of miss or upgrd */ if (req->prefetch == 4) captr->pstats->l1dp.issued++; else if (req->prefetch == 1) captr->pstats->sl1.issued++; req->progress = 1; if (cparam.L1D_prefetch && !req->prefetch && req->hit_type != L1DHIT) L1DCache_start_prefetch(captr, req); break; case MSHR_USELESS_FETCH: /* * a prefetch to a line that already has an outstanding fetch. * Drop this prefetch even without discriminate prefetch on. */ if (req->prefetch == 4) captr->pstats->l1dp.unnecessary++; else if (req->prefetch == 1) captr->pstats->sl1.unnecessary++; else if (req->prefetch == 2) captr->pstats->sl2.unnecessary++; req->hit_type = L1DHIT; if (req->prefetch == 4) YS__PoolReturnObj(&YS__ReqPool, req); else Cache_global_perform(captr, req, 1); return 1; case NOMSHR_FWD: /* * means REQUEST is a non-write-allocate write miss/hit or * an L2 prefetch. No prefetch stats here, since that will * be done in L2 cache. */ if (req->hit_type == L1DHIT) /* A write hits L1 cache. */ Cache_global_perform(captr, req, 0); req->progress = 1; break; case MSHR_STALL_WAR: /* a write wants to merge with a read MSHR*/ case MSHR_STALL_COAL: /* too many requests coalesced to MSHR */ case MSHR_STALL_FLUSH: /* pending flush/purge requests */ case NOMSHR_STALL: /* No MSHRs available for this request */ if (req->prefetch && DISCRIMINATE_PREFETCH) { /* just drop the prefetch here. */ if (req->prefetch == 1) captr->pstats->sl1.dropped++; else if (req->prefetch == 2) captr->pstats->sl2.dropped++; else if (req->prefetch == 4) captr->pstats->l1dp.dropped++; YS__PoolReturnObj(&YS__ReqPool, req); return 1; } return 0; default: YS__errmsg(captr->nodeid, "Default case in L1D mshr_hittype: %d", mshr_status); } } } /* * we'll send down an upgrade, miss, or non-allocating access * successful if there's space in the output port; otherwise, return 0 * and allow retry. */ if (cparam.L1D_writeback) lq = &(captr->l2_partner->request_queue); else lq = &(PID2WBUF(0, captr->procid)->inqueue); if (!lqueue_full(lq)) { lqueue_add(lq, req, captr->nodeid); if (cparam.L1D_writeback) captr->l2_partner->inq_empty = 0; return 1; } return 0;}/*============================================================================= * Check MSHR for a new L1 REQUEST. */static MSHR_Response L1DCache_check_mshr(CACHE *captr, REQ *req){ MSHR *pmshr; cline_t *cline; int misscache, i; /* * First determines whether the incoming transaction matches any MSHR */ pmshr = 0; if (captr->mshr_count > 0) { unsigned blocknum = ADDR2BNUM1D(req->paddr); for (i = 0; i < captr->max_mshrs; i++) { if ((captr->mshrs[i].valid == 1) && (blocknum == captr->mshrs[i].blocknum)) { pmshr = &(captr->mshrs[i]); break; } } } /* * Determine if the requested line is available in the cache. */ misscache = Cache_search(captr, req->vaddr, req->paddr, &cline); /* * If it's an L1 prefetch, we will drop it no matter what as long as it * matches an MSHR or a cache line. */ if ((req->prefetch == 4) && (pmshr || misscache == 0)) return MSHR_USELESS_FETCH; if (pmshr == 0) { /* the line is not present in any MSHR */ if (misscache == 0) /* hit */ { if (Cache_hit_update(captr, cline, req)) { /* update LRU ages */ if (cparam.L1D_prefetch & 2) L1DCache_start_prefetch(captr, req); } if (req->prcr_req_type == READ) { req->hit_type = L1DHIT; return NOMSHR; } else { /* it's a write or rmw */ if (cline->state != SH_CL) { /* * The access is either a write or a rmw; and the cache line * in private state. We need forward the write to L2 cache * through write buffer. But it doesn't need anything here. */ if (req->prefetch) return MSHR_USELESS_FETCH; if (cline->state == PR_CL) cline->state = PR_DY; req->hit_type = L1DHIT; if (cparam.L1D_writeback) return NOMSHR; else return NOMSHR_FWD; } else { /* the line is in shared state */ if (req->prefetch == 2) { /* * It is an L2 write prefetch. * Nothing to do with L1 cache. */ return NOMSHR_FWD; } /* Otherwise, it will allocate an L1 MSHR. */ } } } /* The line is either not in l1 cache, or in l1 cache but the cache line is in share state. */ if ((req->prcr_req_type == WRITE && !misscache) || req->prefetch == 2) { /* * A write missing in L1 cache or an L2 prefetch goes to the next * level of cache without taking an MSHR here. */ return NOMSHR_FWD; } /**** Otherwise, we need to allocate an MSHR for this request ****/ if (captr->mshr_count == captr->max_mshrs) { /* * None are available, the value "NOMSHR_STALL" is returned. */ captr->pstats->l1dstall.full++; return NOMSHR_STALL; } /* * Find the first free MSHR. */ for (i = 0; i < captr->max_mshrs; i++) { if (captr->mshrs[i].valid != 1) { pmshr = &(captr->mshrs[i]); break; } } req->l1mshr = pmshr; pmshr->valid = 1; pmshr->mainreq = req; pmshr->setnum = cline->index >> captr->set_shift; pmshr->blocknum = req->paddr >> captr->block_shift; pmshr->counter = 0; pmshr->pending_cohe = 0; pmshr->stall_WAR = 0; pmshr->demand = -1.0; pmshr->only_prefs = (req->prefetch) ? 1 : 0; pmshr->has_writes = 0; pmshr->cline = cline; pmshr->misscache = misscache; pmshr->pend_opers = 0; captr->mshr_count++; captr->reqmshr_count++; captr->reqs_at_mshr_count++; return MSHR_NEW; } /* Matches an MSHR. The REQUEST must be merged, dropped, forwarded or stalled. */ /* * If there are pending flush/purge operations, stall. */ if (pmshr->pend_opers) { captr->pstats->l1dstall.flush++; return MSHR_STALL_FLUSH; } /* * Now, how does the MSHR handle prefetch matches? At the first level * cache, prefetches should either be dropped, forwarded around cache, or * stalled. There is never any need to coalesce at L1, since one fetch is * sufficient. At the L2, though, prefetches cannot be dropped, as they * might already have many requests coalesced into them and waiting for * them at the L1 cache. */ if (req->prefetch) { if (req->prcr_req_type == READ) { /* * If a read prefetch wants to coalesce at L1 cache, drop it. */ return MSHR_USELESS_FETCH; } else { /* it's a write prefetch */ if (req->prefetch == 1) { /* * If an L1 write prefetch wants to coalesce at L1 cache, * send down as a NOMSHR_FWD, after transforming to an L2 * write prefetch. (may cause upgrade) */ if (pmshr->mainreq->prcr_req_type != READ) return MSHR_USELESS_FETCH; } else if (req->prefetch == 2) return NOMSHR_FWD; } } /* * Now we need to consider the possibility of a "WAR" stall. This is a * case where an MSHR has an exclusive-mode request wants to merge with a * shared-mode MSHR. Even if this is a read request to an MSHR with an * outstanding WAR, this request should be stalled, as otherwise the read * would be processed out-of-order with respect to the stalled write */ if (pmshr->stall_WAR) { captr->pstats->l1dstall.war++; return MSHR_STALL_WAR; } if (req->prcr_req_type != READ && pmshr->mainreq->prcr_req_type == READ) { /* * Write after read -- stall system. Note: if the access is a prefetch * that is going to be dropped with DISCRIMINATE prefetching, there is * no reason to count this in stats or start considering this an "old" * WAR. */ if (!req->prefetch || !DISCRIMINATE_PREFETCH) { captr->pstats->l1dstall.war++; pmshr->stall_WAR = 1; } return MSHR_STALL_WAR; } /* * Too many requests coalesced with MSHR */ if (pmshr->counter == MAX_COALS-1) { captr->pstats->l1dstall.coal++; return MSHR_STALL_COAL; } /* * No problems with coalescing the request, so coalesce it. */ pmshr->coal_req[pmshr->counter++] = req; if (pmshr->only_prefs && !req->prefetch) { /* * Demand access coalesces with late prefetch accesses. */ if (pmshr->mainreq->prefetch == 1) { captr->pstats->sl1.late++; captr->pstats->sl1.useful++; } else if (pmshr->mainreq->prefetch == 4) { captr->pstats->l1dp.late++; captr->pstats->l1dp.useful++; } pmshr->only_prefs = 0; pmshr->demand = YS__Simtime; } return MSHR_COAL;}/*============================================================================= * L1ProcessTagReply: simulates access replies to the L1 cache. * This functions always return 1 because it won't fail for a write-through * cache -- no write-back to lower level, no invalidation to higher level. */static int L1DProcessTagReply(CACHE * captr, REQ * req){ int misscache; HIT_TYPE hit_type; MISS_TYPE ccdres; cline_t *cline; MSHR *pmshr; if (cparam.L1D_perfect) YS__errmsg(captr->nodeid, "Perfect L1 D-cache shouldn't receive REPLY"); if (req->progress == 0) { /* no progress made so far */ pmshr = req->l1mshr; if (pmshr == 0) YS__errmsg(captr->nodeid, "L1D Cache %i received a reply for a nonexistent MSHR\n",
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -