⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 l1i_cache.c

📁 ml-rsim 多处理器模拟器 支持类bsd操作系统
💻 C
📖 第 1 页 / 共 2 页
字号:
/* * Copyright (c) 2002 The Board of Trustees of the University of Illinois and *                    William Marsh Rice University * Copyright (c) 2002 The University of Utah * Copyright (c) 2002 The University of Notre Dame du Lac * * All rights reserved. * * Based on RSIM 1.0, developed by: *   Professor Sarita Adve's RSIM research group *   University of Illinois at Urbana-Champaign and     William Marsh Rice University *   http://www.cs.uiuc.edu/rsim and http://www.ece.rice.edu/~rsim/dist.html * ML-RSIM/URSIM extensions by: *   The Impulse Research Group, University of Utah *   http://www.cs.utah.edu/impulse *   Lambert Schaelicke, University of Utah and University of Notre Dame du Lac *   http://www.cse.nd.edu/~lambert *   Mike Parker, University of Utah *   http://www.cs.utah.edu/~map * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal with the Software without restriction, including without * limitation the rights to use, copy, modify, merge, publish, distribute, * sublicense, and/or sell copies of the Software, and to permit persons to * whom the Software is furnished to do so, subject to the following * conditions: * * 1. Redistributions of source code must retain the above copyright notice, *    this list of conditions and the following disclaimers.  * 2. Redistributions in binary form must reproduce the above copyright *    notice, this list of conditions and the following disclaimers in the *    documentation and/or other materials provided with the distribution. * 3. Neither the names of Professor Sarita Adve's RSIM research group, *    the University of Illinois at Urbana-Champaign, William Marsh Rice *    University, nor the names of its contributors may be used to endorse *    or promote products derived from this Software without specific prior *    written permission.  * 4. Neither the names of the ML-RSIM project, the URSIM project, the *    Impulse research group, the University of Utah, the University of *    Notre Dame du Lac, nor the names of its contributors may be used to *    endorse or promote products derived from this software without specific *    prior written permission.  * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS WITH THE SOFTWARE.  */#include <stdio.h>#include "Processor/proc_config.h"#include "Processor/memunit.h"#include "Processor/capconf.h"#include "Processor/simio.h"#include "Processor/tlb.h"#include "Processor/predecode.h"#include "sim_main/simsys.h"#include "Caches/system.h"#include "Caches/req.h"#include "Caches/pipeline.h"#include "Caches/cache.h"#include "Caches/ubuf.h"#include "Caches/syscontrol.h"/*  * Macros which specify which pipeline each type of access belongs to.   * Note: we have added a separate COHE tag pipe in order to avoid deadlock. * Such a port may be considered excessive in a real system; thus, it may  * be advisable to reserve a certain number of MSHR-like buffers for incoming * COHE messages and simply withhold processing of additional COHEs until  * space opens up in one of these buffers.  */#define L1ReqTAGPIPE 		0#define L1ReplyTAGPIPE 		1#define L1CoheTAGPIPE 		2    /*  * Functions that actually process transactions  */static int           L1IProcessTagRequest     (CACHE*, REQ*);static int           L1IProcessTagReply       (CACHE*, REQ*);static int           L1IProcessTagCohe        (CACHE*, REQ*);static void          L1IProcessFlushPurge     (CACHE*, REQ*);static MSHR_Response L1ICache_check_mshr      (CACHE*, REQ*);static void          L1ICache_start_prefetch  (CACHE*, REQ*);static int           L1ICache_uncoalesce_mshr (CACHE*, MSHR*);extern int           L2ProcessL1Reply         (CACHE*, REQ*);/*============================================================================= * L1CacheInSim: function that brings new transactions from the ports into * the pipelines associated with the various cache parts. An operation can * be removed from its port as soon as pipeline space opens up for it. * Called whenever there may be something on an input port (indicated by * "inq_empty"). */void L1ICacheInSim(int gid){  CACHE *captr = L1ICaches[gid];  int    nothing_on_ports = 1;  REQ   *req;  /*---------------------------------------------------------------------------   * Process replies first so that their holding resources (cache line,   * mshr, etc.) can be freed and available for other transactions.   */  while (req = lqueue_head(&captr->reply_queue))    {      if (AddToPipe(captr->tag_pipe[L1ReplyTAGPIPE], req))	{	  req->progress = 0;	  lqueue_remove(&(captr->reply_queue));	  captr->num_in_pipes++;	}      else	{	  nothing_on_ports = 0;	  break;	}    }    /*---------------------------------------------------------------------------   * Request queue needs some special treatment because it's the   * communication channel between the processor and memory system.    */  while (req = lqueue_head(&captr->request_queue))    {      if (AddToPipe(captr->tag_pipe[L1ReqTAGPIPE], req))	{	  L1IQ_FULL[gid] = 0;	  captr->num_in_pipes++;	  req->progress = 0;	  lqueue_remove(&(captr->request_queue));	}      else	{ /* couldn't add it to pipe */	  nothing_on_ports = 0;	  break;	}    }  /*---------------------------------------------------------------------------   * Coherency check queue.   */  while (req = lqueue_head(&captr->cohe_queue))    {      if (AddToPipe(captr->tag_pipe[L1CoheTAGPIPE], req))	{	  req->progress = 0;	  lqueue_remove(&(captr->cohe_queue));	  captr->num_in_pipes++;	}      else	{	  nothing_on_ports = 0;	  break;	}    }  if (nothing_on_ports)        /* nothing available for processing */    captr->inq_empty = 1;      /* All inqs are apparently empty */}/*============================================================================= * L1CacheOutSim: initiates actual processing of various REQ types. * Called each cycle if there is something in pipes to be processed */void L1ICacheOutSim(int gid){  CACHE    *captr  = L1ICaches[gid];  REQ      *req;  int       pipenum, ctr, index;  Pipeline *pipe;    /*---------------------------------------------------------------------------   * Always processes replies first because they are going to release the   * resources and unstall stalled execution.   */  pipe = captr->tag_pipe[L1ReplyTAGPIPE];  for (ctr = 0; ctr < pipe->ports; ctr++)    {      GetPipeElt(req, pipe);      if (req == 0)	break;      if (L1IProcessTagReply(captr, req))	{	  ClearPipeElt(pipe);	  captr->num_in_pipes--;	}    }    /*-------------------------------------------------------------------------*/    pipe = captr->tag_pipe[L1ReqTAGPIPE];  for (ctr = 0; ctr < pipe->ports; ctr++)    {      GetPipeElt(req, pipe);      if (req == 0)	break;      if (L1IProcessTagRequest(captr, req))	{	  ClearPipeElt(pipe);	  captr->num_in_pipes--;	}    }  /*---------------------------------------------------------------------------   * Cohe requests are sent by L2 cache which has already sent cohe response   * back to the requester. Since it's not in the critical path, it's   * processed last.   */  pipe = captr->tag_pipe[L1CoheTAGPIPE];  for (ctr = 0; ctr < pipe->ports; ctr++)    {      GetPipeElt(req, pipe);      if (req == 0)	break;      if (L1IProcessTagCohe(captr, req))	{	  ClearPipeElt(pipe);	  captr->num_in_pipes--;	}    }}/*============================================================================= * Simulates flush or purge operations to caches. In current simulator, each * such operation flushs/purges data in cache hierarchy, including L1 and L2. * In the processor's or sytem interface's point of view, each flush/purge * operation applies to one L2 cache line.  So for each flush/purge, L1 cache * has to check the physical address range contains the specified L2 cache  * line, which possible spans several L1 cache lines (if L1 cache line is * smaller than L2 cache line). In real world, this probably needs * (L2 cache line size / L1 cache line size) times access of L1 tag array. * But for simplicity, we just assume it could be done in one cycle. Since  * flush/purge doesn't likely happen a lot in real applications, the  * assumption won't affect the system performance significantly.  */static void L1IProcessFlushPurge(CACHE * captr, REQ * req){  int paddr = req->paddr & block_mask2;  int eaddr = paddr + ARCH_linesz2;  int vaddr = req->vaddr & block_mask2;  int pbnum;  int i;  cline_t *cline;  for (; paddr < eaddr; paddr += ARCH_linesz1i, vaddr += ARCH_linesz1i)    {      if (captr->mshr_count > 0)	{	  /*	   * First check if there are any outstanding requests for this	   * cache lines. If yes, mark the MSHR so that it will perform	   * flush/purge after all coalesced requests have been served.	   * It also stops the MSHR from coalesce more requests.	   */	  pbnum = paddr >> captr->block_shift;	  for (i = 0; i < captr->max_mshrs; i++)	    {	      if (captr->mshrs[i].valid && pbnum == captr->mshrs[i].blocknum)		{		  captr->mshrs[i].pend_opers = req->prcr_req_type;		  break;		}	    }	  	  if (i != captr->max_mshrs)	    {	      req->hit_type = L1IHIT; 	      continue; 	    }	}      /*       * Invalidate the line if it presents in cache. We don't need write       * data back to L2 because it's an instruction cache.       */      if (Cache_search(captr, vaddr, paddr, &cline) == 0)	{	  if (cline->pref == 1)            captr->pstats->sl1.useless++;	  else if (cline->pref == 4)            captr->pstats->l1ip.useless++;	  cline->state = INVALID;	  cline->tag = -1;	}     }}/*============================================================================= * L1ProcessTagRequest: simulates request accesses to the L1 cache. * We use "req->progress" to record the processing progress of a request. *  progress = 0: initial value *  progress = 1: has been processed, but stuck because the output queue  *                is full. Just try to send it out if comes again. * Returns 0 on failure; 1 on success. */static int L1IProcessTagRequest(CACHE * captr, REQ * req){  int            status;  MSHR_Response  mshr_status;  LinkQueue      *lq;  cline_t        *cline;    if (cparam.L1I_perfect)    {      Cache_search(captr, req->vaddr, req->paddr, &cline);      req->d.proc_instruction.instructions = captr->data +	((cline->index * captr->linesz + (req->paddr & captr->block_mask)) /	 SIZE_OF_SPARC_INSTRUCTION) * SIZEOF_INSTR;      PredecodeBlock(req->paddr & block_mask1i, req->node,		     captr->data + cline->index *		     captr->linesz / SIZE_OF_SPARC_INSTRUCTION *		     SIZEOF_INSTR,		     captr->linesz / SIZE_OF_SPARC_INSTRUCTION);       req->hit_type = L1IHIT;      Cache_global_perform(captr, req, 1);               return 1;    }  if (req->progress == 0)     {       if (req->prcr_req_type == FLUSHC)	 {	   L1IProcessFlushPurge(captr, req);	   req->progress = 1;	 }       else if (req->prcr_req_type == PURGEC)	 {	   L1IProcessFlushPurge(captr, req);	   req->progress = 1;	 }       else	 {         /* IFetch */	   mshr_status = L1ICache_check_mshr(captr, req);           switch (mshr_status)	     {	     case NOMSHR: /* a hit with no associated forward request */	       if (req->prefetch == 4)		 captr->pstats->l1ip.unnecessary++;	       else if (req->prefetch == 1)		 captr->pstats->sl1.unnecessary++;	       else if (req->prefetch == 2)		 captr->pstats->sl2.unnecessary++;	       /*		* send the request back to the processor, as it's done.		* REQs also get freed up there.		*/	       if (req->prefetch == 4)		 YS__PoolReturnObj(&YS__ReqPool, req);	       else		 Cache_global_perform(captr, req, 1);	       return 1; 	     case MSHR_COAL: /* Coalesced into an MSHR. success. */	       return 1; 	     case MSHR_NEW: /* need to send down some sort of miss or upgrd */	       if (req->prefetch == 4)		 captr->pstats->l1ip.issued++;	       else if (req->prefetch == 1)		 captr->pstats->sl1.issued++;	       req->progress = 1; 	       if (cparam.L1I_prefetch && !req->prefetch &&		   req->hit_type != L1IHIT)		 L1ICache_start_prefetch(captr, req);	       break; 	     case MSHR_USELESS_FETCH:	       /*		* a prefetch to a line that already has an outstanding fetch.		* Drop this prefetch even without discriminate prefetch on.		*/	       if (req->prefetch == 4)		 captr->pstats->l1ip.unnecessary++;	       else if (req->prefetch == 1)		 captr->pstats->sl1.unnecessary++;	       else if (req->prefetch == 2)		 captr->pstats->sl2.unnecessary++; 	       req->hit_type = L1IHIT;	       if (req->prefetch == 4)		 YS__PoolReturnObj(&YS__ReqPool, req);	       else		 Cache_global_perform(captr, req, 1);	       return 1; 	     case NOMSHR_FWD:	       /*		* means REQUEST is a non-write-allocate write miss/hit or		* an L2 prefetch. No prefetch stats here, since that will		* be done in L2 cache.		*/	       if (req->hit_type == L1IHIT)        /* A write hits L1 cache. */		 Cache_global_perform(captr, req, 0);	       req->progress = 1;	       break;	     case MSHR_STALL_COAL: /* too many requests coalesced to  MSHR   */	     case MSHR_STALL_FLUSH:   /* pending flush/purge requests        */	     case NOMSHR_STALL:       /* No MSHRs available for this request */	       if (req->prefetch && DISCRIMINATE_PREFETCH)		 {		   /* just drop the prefetch here. */		   if (req->prefetch == 1)		     captr->pstats->sl1.dropped++;		   else if (req->prefetch == 2)		     captr->pstats->sl2.dropped++;		   else if (req->prefetch == 4)		     captr->pstats->l1ip.dropped++; 		   YS__PoolReturnObj(&YS__ReqPool, req);		   return 1;		 }	       return 0; 	     default:	       YS__errmsg(captr->nodeid,			  "Default case in L1I mshr_hittype: %d", mshr_status);	     }	 }     }    /*    * we'll send down an upgrade, miss, or non-allocating access    * successful if there's space in the output port; otherwise, return 0    * and allow retry.    */   lq = &(captr->l2_partner->request_queue);   if (!lqueue_full(lq))     {       lqueue_add(lq, req, captr->nodeid);       captr->l2_partner->inq_empty = 0;       return 1;     }    return 0;}/*============================================================================= * Check MSHR for a new L1 REQUEST. */static MSHR_Response L1ICache_check_mshr(CACHE *captr, REQ *req){  MSHR    *pmshr;  cline_t *cline;  int      misscache, i;  /*    * First determines whether the incoming transaction matches any MSHR 

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -