📄 ms_ldst.c
字号:
/* * Copyright (C) 1996-1998 by the Board of Trustees * of Leland Stanford Junior University. * * This file is part of the SimOS distribution. * See LICENSE file for terms of the license. * */ /* * ms_ldst - Process loads and stores for the MXS simulator * * Jim Bennett * 1993, 1994, 1995 */#include <sys/types.h>#include <string.h>#include <stdlib.h>#include "ms.h"#include "common_cache.h"#include "simutil.h"#include "memsys.h"#ifdef MIPSY_MXS#include "hw_events.h"#include "cpu_state.h"#include "memref.h"#endif/* These are needed to make the DCACHE_TAG and DCACHE_INDEXOF macros work */#define dIndexMask (DCACHE_INDEX - 1)#define dTagShift (log2DCACHE_SIZE - 1)static int ms_lsop (struct s_cpu_state *st, struct s_lsq *ls, struct s_ldst_buffer *ldst_ent, int update_inst);static int ms_lsop_uncached (struct s_cpu_state *st, struct s_lsq *ls);static int ms_cache (struct s_cpu_state *st, struct s_lsq *ls, struct s_ldst_buffer *entry);static int ldst_buffer_conflict (struct s_cpu_state *st, struct s_ldst_buffer *ldst_ent);static struct s_ldst_buffer * ldst_buffer_alloc (struct s_cpu_state *st, int paddr, int lstype);static void ldst_buffer_free (struct s_cpu_state *st, struct s_ldst_buffer *entry);static void ldst_buffer_write(struct s_cpu_state *st, struct s_ldst_buffer *entry, int paddr, char *data,int size);static int ldst_buffer_updateRead (struct s_cpu_state *st, struct s_ldst_buffer *entry, int paddr, char *data,int size);static int recurse_unstall (struct s_cpu_state *st, int br_node);static void set_new_excuse (struct s_cpu_state *st, int reg, int excuse);#ifdef MIPSY_MXS#define WouldFault(st, lstype, addr) (0)#else#define WouldFault(st, lstype, addr) \ (is_misaligned (lstype, addr) || \ (!((((addr) >= pmap.text_base) && ((addr) < pmap.max_text)) || \ (((addr) >= pmap.data_base) && ((addr) < pmap.data_top)) )))#endif/* * ldst_init - Initialize the load/store unit data structures. This * routine should be called after each exception. */void ldst_init( struct s_cpu_state *st) { int i; for (i = 0; i < LDST_BUFFER_SIZE; i++) { st->ldst_buffer[i].next = &st->ldst_buffer[i+1]; st->ldst_buffer[i].prev = &st->ldst_buffer[i-1]; st->ldst_buffer[i].missTag = (void *) -1; st->ldst_buffer[i].ls = &st->lsq[i]; st->lsq[i].ldst_ent = &st->ldst_buffer[i]; } st->ldst_buffer[0].prev = NULL; st->ldst_buffer[LDST_BUFFER_SIZE-1].next = NULL; /* All entries go on the free list to start */ st->ldst_head = NULL; st->ldst_tail = NULL; st->ldst_nextReserved = NULL; st->ldst_nextAvail = &st->ldst_buffer[0]; } /* * ms_ldst_dep - Update load/store dependencies */void ms_ldst_dep (struct s_cpu_state *st) {#ifdef ONE_PHASE_LS /* For a one phase load/store, update the IWIN_LDST_DEP */ /* flag of instructions at the head of the chain. */ int inum; inum = st->iwin_head_ldst; if (inum >= 0) { /* Disallow speculative stores. */ if ((st->iwin_flags [inum] & (IWIN_STORE | IWIN_SPEC)) == (IWIN_STORE | IWIN_SPEC)) return; st->iwin_flags [inum] &= ~IWIN_LDST_DEP; CheckInstAvail (st, inum); if (!(st->iwin_flags [inum] & IWIN_STORE)) { inum = st->iwin_ldst [inum]; while (inum >= 0) { if (st->iwin_flags [inum] & IWIN_STORE) break; st->iwin_flags [inum] &= ~IWIN_LDST_DEP; CheckInstAvail (st, inum); inum = st->iwin_ldst [inum]; } } }#else /* For a two phase load/store, check for instructions */ /* that have completed phase one and are ready to go. */ /* Ready = addr valid + predecessors addr valid */ /* and ready to issue. */ int inum; for (inum = st->iwin_head_ldst; inum >= 0; inum = st->iwin_ldst [inum] ) { /* Can't issue or move around a load or store */ /* until we know the address it will use. */ if (!(st->iwin_flags [inum] & IWIN_ADDR_VALID)) break; st->iwin_flags [inum] &= ~IWIN_LDST_DEP; CheckInstAvail (st, inum); /* If this instruction didn't issue, then done for */ /* this cycle, try again next time. */ if (!(st->iwin_flags [inum] & IWIN_AVAIL)) { st->iwin_flags [inum] |= IWIN_LDST_DEP; break; } }#endif } /* * ms_lsq - Add a load or store request to the load/store buffer * * Set the status of the entry according to whether it is * cached/uncached, load/store, ready, conflicting, etc. */void ms_lsq (struct s_cpu_state *st, int addr, int reg, int reg2, int lstype, int inum) { struct s_ldst_buffer *entry; struct s_ldst_buffer *store_entry; struct s_lsq *ls; entry = ldst_buffer_alloc (st, st->iwin_paddr [inum], lstype); if (entry == NULL) { st->stall_type = ST_LSQ_FULL; st->stall_issue = 1; return; } st->inum2ldst[inum] = entry; st->iwin_flags[inum] |= IWIN_LDSTBUF; ls = entry->ls; ls->status = 0; ls->lstype = lstype; ls->addr = addr; ls->paddr = entry->paddr; ls->reg = reg; ls->reg2 = reg2; ls->inum = inum; if (st->iwin_flags [inum] & IWIN_UNCACHED) ls->status |= LS_ST_UNCACHED; else { /* If it is a cached operation, must check for */ /* conflicts with other cached operations. */ if (!IsPrefetch(lstype) && ldst_buffer_conflict (st, entry)) ls->status |= LS_ST_CONFLICT; } /* If it's a store, do the operation once in order to */ /* get the valid bytes information set. Then check */ /* if the value was really available. */ if (IsStore(lstype)) { int reg_ix; REGSTAT *rs; IncStat (ST_NSTORES); if (!IsSC(lstype)) ms_lsop (st, ls, entry, 0); reg_ix = st->iwin[inum].r3 >> 1; rs = &st->reg_rstat [reg_ix]; if (rs->reg_status & REG_IN_WIN) ls->status |= LS_ST_PEND; } /* If it's a load, check if it's also waiting for the */ /* value from a store. */ else if (IsLoad(lstype)) { IncStat (ST_NLOADS); if (entry->loadhit) {#ifdef LOAD_BYPASS store_entry = entry->loadhit; if (store_entry->ls->status & LS_ST_PEND) ls->status |= LS_ST_PEND;#else ls->status |= LS_ST_PEND;#endif } } } /* * ms_memory - Memory operations. All of the memory operations, * whether cached or uncached, have been stored in the load * store buffer. This routine checks each cycle which * operations can be forwarded to the cache. */void ms_memory (struct s_cpu_state *st) { int cache_ops, check_loads, exit_loop; struct s_ldst_buffer *store_entry; struct s_ldst_buffer *entry, *next_entry; struct s_lsq *ls; next_entry = 0; /* Slience bogus compiler error message */ /* First update the pending flags for the store ops */ check_loads = 0; for (entry = st->ldst_head; (entry); entry = next_entry) { next_entry = entry->next; ls = entry->ls; if (IsStore(ls->lstype) && (ls->status & LS_ST_PEND)) { int reg_ix; REGSTAT *rs; reg_ix = st->iwin[ls->inum].r3 >> 1; rs = &st->reg_rstat [reg_ix]; if ((rs->reg_status & REG_IN_WIN) == 0) { /* Turn off LS_ST_CACHED so */ /* it will be retried in cache */ ls->status &= ~(LS_ST_PEND | LS_ST_CACHED); /* Call ms_lsop to write the */ /* correct value in the load */ /* store buffer. */ if (!IsSC(ls->lstype)) ms_lsop (st, ls, ls->ldst_ent, 0); check_loads = 1; } } } /* Then if a store became available this cycle, check */ /* for waiting loads as well. */#ifdef LOAD_BYPASS if (check_loads) { for (entry = st->ldst_head; (entry); entry = next_entry) { next_entry = entry->next; ls = entry->ls; if (IsLoad(ls->lstype) && (ls->status & LS_ST_PEND)) { store_entry = entry->loadhit; if ((store_entry->ls->status & LS_ST_PEND) == 0) /* Turn off LS_ST_CACHED so */ /* it will be retried in cache */ ls->status &= ~(LS_ST_PEND | LS_ST_CACHED); } } }#endif /* Update the conflict status of entries */ for (entry = st->ldst_head; (entry); entry = next_entry) { next_entry = entry->next; ls = entry->ls; if (ls->status & LS_ST_CONFLICT) { if (!ldst_buffer_conflict (st, entry)) ls->status &= ~LS_ST_CONFLICT; } } /* The status is up to date, try to send requests */ /* to the cache. */ cache_ops = 0; exit_loop = 0; for (entry = st->ldst_head; (entry) && (!exit_loop); entry = next_entry) { next_entry = entry->next; ls = entry->ls; if (ls->status & LS_ST_DONE) continue; /* Send it to the cache if it isn't uncached */ /* and it's never been accepted by the cache */ /* and it doesn't conflict with prior operations */ if ((ls->status & (LS_ST_UNCACHED | LS_ST_CACHED | LS_ST_CONFLICT)) == 0) { if (ms_cache (st, ls, entry) >= 0) { /* The cache has accepted it. */ cache_ops++; if (cache_ops >= CACHE_WIDTH) exit_loop = 1; } } /* Uncached operations are handled when the */ /* data is available, and they are no longer */ /* speculative, in the imprecise model. In the */ /* precise model, are handled at graduation. */ else if ((ls->status & (LS_ST_UNCACHED | LS_ST_PEND)) == LS_ST_UNCACHED) {#ifdef PRECISE if (st->iwin_headgrad == ls->inum)#else if ((st->iwin_flags[ls->inum] & IWIN_SPEC) == 0)#endif /* !PRECISE */ { entry->dataPtr = (char *) ls->paddr; /* Mark as active */ if (ms_lsop_uncached (st, ls) == 0) { ls->status |= LS_ST_DONE; } exit_loop = 1; /* One uncached per cycle */ } } /* If this request was rejected by the cache, */ /* stick here until it is accepted */ if (ls->status & LS_ST_FAILED) exit_loop = 1; }#ifndef PRECISE ldst_retire_stores (st);#endif /* !PRECISE */ } /* * ldst_buffer_conflict - Check starting at the given entry * in the load/store buffer if there is a potential cache * conflict between this operation and all prior entries * in the buffer. Return TRUE if so. */static int ldst_buffer_conflict (struct s_cpu_state *st, struct s_ldst_buffer *entry) { struct s_ldst_buffer *prev_entry; struct s_lsq *ls; int matched; int new_paddr, new_mask, new_isload; struct s_ldst_buffer *new_loadhit; int cacheindex, mask; int i, cmask; int cache_tags [DCACHE_ASSOC]; int next_tag, old_tag; prev_entry = 0; /* Slience bogus compiler error message */ ls = entry->ls; new_paddr = ls->paddr; new_mask = (IsDouble(ls->lstype) ? ~7 : ~3); new_isload = IsLoad (ls->lstype); new_loadhit = entry->loadhit; cache_tags [0] = DCACHE_TAG(new_paddr); next_tag = 1; matched = 0; for (entry = entry->prev; (entry); entry = prev_entry) { prev_entry = entry->prev; ls = entry->ls; if (ls->status & LS_ST_UNCACHED) continue; /* Prior request uncached, so */ /* ignore it */ /* * This previous request does not conflict if: * 1) Was to a different cache index * 2) Can't possibly cause a conflict miss replacement * 3) Was to a different word of the line * 4) Both requests are loads * 5) The new request is a load that hits in the * store buffer */ cacheindex = DCACHE_INDEXOF(ls->paddr); mask = (IsDouble(ls->lstype) ? ~7 : ~3); if (cacheindex != DCACHE_INDEXOF(new_paddr)) continue; /* Potential for conflict miss; report match. */ old_tag = DCACHE_TAG(ls->paddr); for (i=0; i<next_tag; i++) if (old_tag == cache_tags[i]) break; if (i >= next_tag) { if (next_tag >= DCACHE_ASSOC) { matched = 1; IncStat(ST_LDSTDEP_CONFLICT); break; } cache_tags [next_tag] = old_tag; next_tag++; continue; } if (old_tag != DCACHE_TAG(new_paddr)) continue; cmask = mask | new_mask; if ((ls->paddr&cmask) != (new_paddr&cmask) ) continue; /* Different word of same line */ if (new_isload && (IsLoad (ls->lstype) || (new_loadhit)) ) continue; matched = 1; IncStat(ST_LDSTDEP_FAIL); break; } return (matched); } /* * ms_cache - Items are pulled from the load/store queue one * at a time and processed by this routine. * * Return -1 if the operation could not be performed */static int ms_cache (struct s_cpu_state *st, struct s_lsq *ls, struct s_ldst_buffer *entry) { char *dataPtr = NULL; int flavor; int ret; WorkDecls; IncStat(ST_CACHE); /* Screen out errors due to speculative loads */ if (WouldFault(st, ls->lstype, ls->addr)) { if (IsLoad (ls->lstype)) st->reg_rstat[ls->reg >> 1].reg_status |= REG_ERROR; st->iwin_flags[ls->inum] |= IWIN_FAULT; CheckSquash (st, &st->iwin[ls->inum]); ls->status |= LS_ST_CACHED;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -