📄 l1d_cache.c
字号:
/* * Copyright (c) 2002 The Board of Trustees of the University of Illinois and * William Marsh Rice University * Copyright (c) 2002 The University of Utah * Copyright (c) 2002 The University of Notre Dame du Lac * * All rights reserved. * * Based on RSIM 1.0, developed by: * Professor Sarita Adve's RSIM research group * University of Illinois at Urbana-Champaign and William Marsh Rice University * http://www.cs.uiuc.edu/rsim and http://www.ece.rice.edu/~rsim/dist.html * ML-RSIM/URSIM extensions by: * The Impulse Research Group, University of Utah * http://www.cs.utah.edu/impulse * Lambert Schaelicke, University of Utah and University of Notre Dame du Lac * http://www.cse.nd.edu/~lambert * Mike Parker, University of Utah * http://www.cs.utah.edu/~map * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal with the Software without restriction, including without * limitation the rights to use, copy, modify, merge, publish, distribute, * sublicense, and/or sell copies of the Software, and to permit persons to * whom the Software is furnished to do so, subject to the following * conditions: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimers. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimers in the * documentation and/or other materials provided with the distribution. * 3. Neither the names of Professor Sarita Adve's RSIM research group, * the University of Illinois at Urbana-Champaign, William Marsh Rice * University, nor the names of its contributors may be used to endorse * or promote products derived from this Software without specific prior * written permission. * 4. Neither the names of the ML-RSIM project, the URSIM project, the * Impulse research group, the University of Utah, the University of * Notre Dame du Lac, nor the names of its contributors may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS WITH THE SOFTWARE. */#include <stdio.h>#include "Processor/proc_config.h"#include "Processor/memunit.h"#include "Processor/capconf.h"#include "Processor/simio.h"#include "Processor/tlb.h"#include "sim_main/simsys.h"#include "Caches/system.h"#include "Caches/req.h"#include "Caches/pipeline.h"#include "Caches/cache.h"#include "Caches/ubuf.h"#include "Caches/syscontrol.h"/* * Macros which specify which pipeline each type of access belongs to. * Note: we have added a separate COHE tag pipe in order to avoid deadlock. * Such a port may be considered excessive in a real system; thus, it may * be advisable to reserve a certain number of MSHR-like buffers for incoming * COHE messages and simply withhold processing of additional COHEs until * space opens up in one of these buffers. */#define L1ReqTAGPIPE 0#define L1ReplyTAGPIPE 1#define L1CoheTAGPIPE 2 /* * Functions that actually process transactions */static int L1DProcessTagRequest (CACHE*, REQ*);static int L1DProcessTagReply (CACHE*, REQ*);static int L1DProcessTagCohe (CACHE*, REQ*);static void L1DProcessFlushPurge (CACHE*, REQ*);static MSHR_Response L1DCache_check_mshr (CACHE*, REQ*);static void L1DCache_start_prefetch (CACHE*, REQ*);static int L1DCache_uncoalesce_mshr (CACHE*, MSHR*);extern int L2ProcessL1Reply (CACHE*, REQ*);/*============================================================================= * L1CacheInSim: function that brings new transactions from the ports into * the pipelines associated with the various cache parts. An operation can * be removed from its port as soon as pipeline space opens up for it. * Called whenever there may be something on an input port (indicated by * "inq_empty"). */void L1DCacheInSim(int gid){ CACHE *captr = L1DCaches[gid]; int nothing_on_ports = 1; REQ *req; /*--------------------------------------------------------------------------- * Process replies first so that their holding resources (cache line, * mshr, etc.) can be freed and available for other transactions. */ while (req = lqueue_head(&captr->reply_queue)) { if (AddToPipe(captr->tag_pipe[L1ReplyTAGPIPE], req)) { req->progress = 0; lqueue_remove(&(captr->reply_queue)); captr->num_in_pipes++; } else { nothing_on_ports = 0; break; } } /*--------------------------------------------------------------------------- * Request queue needs some special treatment because it's the * communication channel between the processor and memory system. */ while (req = lqueue_head(&captr->request_queue)) { if (IsSysControl_local(req)) { if (SysControl_local_request(gid, req)) { req->progress = 0; L1DQ_FULL[gid] = lqueue_full(&(captr->request_queue)); continue; } else { nothing_on_ports = 0; break; } } else if (tlb_uncached(req->memattributes)) { /* UBuffer_add_request removes the request from the queue and frees the memory unit upon success */ if (UBuffer_add_request(req)) { L1DQ_FULL[gid] = lqueue_full(&(captr->request_queue)); continue; } else { nothing_on_ports = 0; break; } } else if (AddToPipe(captr->tag_pipe[L1ReqTAGPIPE], req)) { /* * It's been committed, free the corresponding mem unit if this * request originally came from the processor module. */ if (req->prefetch != 4) FreeAMemUnit(req->d.proc_data.proc_id, req->d.proc_data.inst_tag); L1DQ_FULL[gid] = 0; captr->num_in_pipes++; req->progress = 0; lqueue_remove(&(captr->request_queue)); } else { /* couldn't add it to pipe */ nothing_on_ports = 0; break; } } /*--------------------------------------------------------------------------- * Coherency check queue. */ while (req = lqueue_head(&captr->cohe_queue)) { if (AddToPipe(captr->tag_pipe[L1CoheTAGPIPE], req)) { req->progress = 0; lqueue_remove(&(captr->cohe_queue)); captr->num_in_pipes++; } else { nothing_on_ports = 0; break; } } if (nothing_on_ports) /* nothing available for processing */ captr->inq_empty = 1; /* All inqs are apparently empty */}/*============================================================================= * L1DCacheOutSim: initiates actual processing of various REQ types. * Called each cycle if there is something in pipes to be processed */void L1DCacheOutSim(int gid){ CACHE *captr = L1DCaches[gid]; REQ *req; int pipenum, ctr, index; Pipeline *pipe; /*--------------------------------------------------------------------------- * Always processes replies first because they are going to release the * resources and unstall stalled execution. */ pipe = captr->tag_pipe[L1ReplyTAGPIPE]; for (ctr = 0; ctr < pipe->ports; ctr++) { GetPipeElt(req, pipe); if (req == 0) break; if (L1DProcessTagReply(captr, req)) { ClearPipeElt(pipe); captr->num_in_pipes--; } } /*-------------------------------------------------------------------------*/ pipe = captr->tag_pipe[L1ReqTAGPIPE]; for (ctr = 0; ctr < pipe->ports; ctr++) { GetPipeElt(req, pipe); if (req == 0) break; if (L1DProcessTagRequest(captr, req)) { ClearPipeElt(pipe); captr->num_in_pipes--; } } /*--------------------------------------------------------------------------- * Cohe requests are sent by L2 cache which has already sent cohe response * back to the requester. Since it's not in the critical path, it's * processed last. */ pipe = captr->tag_pipe[L1CoheTAGPIPE]; for (ctr = 0; ctr < pipe->ports; ctr++) { GetPipeElt(req, pipe); if (req == 0) break; if (L1DProcessTagCohe(captr, req)) { ClearPipeElt(pipe); captr->num_in_pipes--; } }}/*============================================================================= * Simulates flush or purge operations to caches. In current simulator, each * such operation flushs/purges data in cache hierarchy, including L1 and L2. * In the processor's or sytem interface's point of view, each flush/purge * operation applies to one L2 cache line. So for each flush/purge, L1 cache * has to check the physical address range contains the specified L2 cache * line, which possible spans several L1 cache lines (if L1 cache line is * smaller than L2 cache line). In real world, this probably needs * (L2 cache line size / L1 cache line size) times access of L1 tag array. * But for simplicity, we just assume it could be done in one cycle. Since * flush/purge doesn't likely happen a lot in real applications, the * assumption won't affect the system performance significantly. */static void L1DProcessFlushPurge(CACHE * captr, REQ * req){ int paddr = req->paddr & block_mask2; int eaddr = paddr + ARCH_linesz2; int vaddr = req->vaddr & block_mask2; int pbnum; int i; cline_t *cline; for (; paddr < eaddr; paddr += ARCH_linesz1i, vaddr += ARCH_linesz1i) { if (captr->mshr_count > 0) { /* * First check if there are any outstanding requests for this * cache lines. If yes, mark the MSHR so that it will perform * flush/purge after all coalesced requests have been served. * It also stops the MSHR from coalesce more requests. */ pbnum = paddr >> captr->block_shift; for (i = 0; i < captr->max_mshrs; i++) { if (captr->mshrs[i].valid && pbnum == captr->mshrs[i].blocknum) { captr->mshrs[i].pend_opers = req->prcr_req_type; break; } } if (i != captr->max_mshrs) { req->hit_type = L1DHIT; continue; } } /* * Invalidate the line if it presents in cache. We don't need write * data back to L2 because it's a write-through cache. */ if (Cache_search(captr, vaddr, paddr, &cline) == 0) { if (cline->pref == 1) captr->pstats->sl1.useless++; else if (cline->pref == 4) captr->pstats->l1dp.useless++; cline->state = INVALID; cline->tag = -1; } }}/*============================================================================= * L1ProcessTagRequest: simulates request accesses to the L1 cache. * We use "req->progress" to record the processing progress of a request. * progress = 0: initial value * progress = 1: has been processed, but stuck because the output queue * is full. Just try to send it out if comes again. * Returns 0 on failure; 1 on success. */static int L1DProcessTagRequest(CACHE * captr, REQ * req){ int status; MSHR_Response mshr_status; LinkQueue *lq; if (cparam.L1D_perfect) { req->hit_type = L1DHIT; Cache_global_perform(captr, req, 1); return 1; } if (req->progress == 0) { if (req->prcr_req_type == FLUSHC) { if (!cparam.L1D_writeback) L1DProcessFlushPurge(captr, req); req->progress = 1; } else if (req->prcr_req_type == PURGEC) { L1DProcessFlushPurge(captr, req); req->progress = 1; } else { /* READ, WRITE, or RMW */ mshr_status = L1DCache_check_mshr(captr, req); switch (mshr_status) { case NOMSHR: /* a hit with no associated forward request */ if (req->prefetch == 4)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -