⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 issue.cc

📁 linux下基于c++的处理器仿真平台。具有处理器流水线
💻 CC
📖 第 1 页 / 共 4 页
字号:
/* * Copyright (c) 2000, 2001, 2002, 2003, 2004, 2005 * The Regents of The University of Michigan * All Rights Reserved * * This code is part of the M5 simulator, developed by Nathan Binkert, * Erik Hallnor, Steve Raasch, and Steve Reinhardt, with contributions * from Ron Dreslinski, Dave Greene, Lisa Hsu, Kevin Lim, Ali Saidi, * and Andrew Schultz. * * Permission is granted to use, copy, create derivative works and * redistribute this software and such derivative works for any * purpose, so long as the copyright notice above, this grant of * permission, and the disclaimer below appear in all copies made; and * so long as the name of The University of Michigan is not used in * any advertising or publicity pertaining to the use or distribution * of this software without specific, written prior authorization. * * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION FROM THE * UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY PURPOSE, AND * WITHOUT WARRANTY BY THE UNIVERSITY OF MICHIGAN OF ANY KIND, EITHER * EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE. THE REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE * LIABLE FOR ANY DAMAGES, INCLUDING DIRECT, SPECIAL, INDIRECT, * INCIDENTAL, OR CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM * ARISING OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF SUCH * DAMAGES. */#include <string>#include <iostream>#include <iomanip>#include <algorithm>#include <sstream>#include "base/cprintf.hh"#include "encumbered/cpu/full/cpu.hh"#include "encumbered/cpu/full/dep_link.hh"#include "encumbered/cpu/full/dyn_inst.hh"#include "encumbered/cpu/full/iq/iqueue.hh"#include "encumbered/cpu/full/issue.hh"#include "encumbered/cpu/full/readyq.hh"#include "encumbered/cpu/full/storebuffer.hh"#include "encumbered/cpu/full/thread.hh"#include "encumbered/cpu/full/writeback.hh"#include "mem/cache/cache.hh"#include "mem/functional/memory_control.hh"#include "mem/mem_interface.hh"#include "sim/eventq.hh"using namespace std;////  Make sure you disable this for "normal" operation!!!//#define REMOVE_WP_LOADS 0////  Local declarations//InstSeqNum issue_break = 0;voidissue_breakpoint(){    ccprintf(cerr,"Reached issue breakpoint @ %d\n", curTick);}struct issue_info{    InstSeqNum seq;    unsigned      clust;    string        inst;    issue_info(InstSeqNum s, unsigned c, string &i) {	seq = s;	clust = c;	inst = i;    }};booloperator<(const issue_info &l, const issue_info &r){    return l.seq < r.seq;}static const char *unissued_names[Num_OpClasses + 2];#define UNISSUED_CACHE_BLOCKED (Num_OpClasses)#define UNISSUED_TOO_YOUNG     (Num_OpClasses + 1)//#define DEBUG_ISSUE#define ISSUE_OLDEST 0#define DEBUG_FLOSS 1#define DUMP_LOADS 0#define DUMP_ISSUE 0#define DUMP_REFRESH 0/* *  LSQ_REFRESH() - memory access dependence checker/scheduler *//* * load/store queue (LSQ): holds loads and stores in program order, indicating * status of load/store access: * *   - issued: address computation complete, memory access in progress *   - completed: memory access has completed, stored value available *   - squashed: memory access was squashed, ignore this entry * * loads may execute when: *   1) register operands are ready, and *   2) memory operands are ready (no earlier unresolved store) * * loads are serviced by: *   1) previous store at same address in LSQ (hit latency), or *   2) data cache (hit latency + miss latency) * * stores may execute when: *   1) register operands are ready * * stores are serviced by: *   1) depositing store value into the load/store queue *   2) writing store value to the store buffer (plus tag check) at commit *   3) writing store buffer entry to data cache when cache is free * * NOTE: the load/store queue can bypass a store value to a load in the same *   cycle the store executes (using a bypass network), thus stores complete *   in effective zero time after their effective address is known */struct StoreInfoRecord {    Addr addr;    bool addr_known;    bool data_known;};////  Memory Address Disambiguation////  This function walks the LSQ, taking note of stores, and looks for load//  instructions that might conflict with earlier stores.////  NOTE: This function is the *only* place were loads get placed on the//        ready list. All other instructions are enqueued when their last//        operand is marked as ready. This means that a load can be "ops-ready"//        but *not* on the ready list.////  We walk the LSQ from oldest to youngest looking for stores, and when we//  encounter a load, we walk the list of earlier stores from youngest to//  oldest looking for the youngest store that matches the load.//voidFullCPU::lsq_refresh(){    load_store_queue *ls_queue = (load_store_queue *)LSQ;    // The list of all "earlier" stores    list<StoreInfoRecord> store_list[SMT_MAX_THREADS];    typedef list<StoreInfoRecord>::reverse_iterator r_it;    // mark threads that have no chance of issuing anything else, for    // various reasons:    // - any store under DISAMBIG_CONSERVATIVE    // - memory barrier under any model    bool thread_done[SMT_MAX_THREADS];    unsigned num_threads_done = 0;    for (int i = 0; i < number_of_threads; ++i)	thread_done[i] = false;#if DUMP_REFRESH    cout << "@" << curTick << ":\n";#endif    //    //  Scan the LSQ:    //    BaseIQ::iterator lsq = ls_queue->head();    for (; lsq.notnull(); lsq = lsq.next()) {	//  Skip any LSQ entries that have been squashed	if (lsq->squashed)	    continue;	unsigned thread = lsq->thread_number();	if (thread_done[thread])	    continue;	DynInst *inst = lsq->inst;	if (inst->isMemBarrier()) {	    // Memory barrier: total blocking condition.  We're done	    // with this thread.	    thread_done[thread] = true;	    if (++num_threads_done == number_of_threads)		break; // out of lsq loop	} else if (inst->isStore()) {	    // Stores...	    // If it's ready to go, put it on the ready list.	    // (This used to be done unconditionally in writeback, but	    // was moved here so we could suppress it when a memory	    // barrier is pending.)	    if (!lsq->queued && lsq->ops_ready()) {		ls_queue->ready_list_enqueue(lsq);	    }	    if (disambig_mode == DISAMBIG_CONSERVATIVE) {		// force stores to go completely in order.		thread_done[thread] = true;		if (++num_threads_done == number_of_threads)		    break; // out of lsq loop	    } else {		// Store (and not conservative disambiguation).  Just		// record for now.  Note that under normal		// disambiguation, an unknown-address store doesn't		// end the thread, since a younger store with known		// address & data could forward data to an even		// younger load.		StoreInfoRecord sir;		sir.addr = inst->eff_addr;		// Under oracle disambiguation, we treat all addresses as		// known even if the pipeline hasn't calculated them yet.		sir.addr_known = (STORE_ADDR_READY(lsq)				  || disambig_mode == DISAMBIG_ORACLE);		sir.data_known = lsq->ops_ready();		store_list[thread].push_back(sir);	    }	} else {	    // must be a load, or it wouldn't be in LSQ	    assert(inst->isLoad());	    //	    //  If this is a cacheable load that has calculated its	    //  effective address, but is not yet on the ready-list,	    //  check it against earlier stores:	    //     (1) Earlier stores that haven't calculated their address	    //           --> can't issue the load	    //     (2) Earlier stores that *have* calculated their address	    //          (a) If the address matches:	    //              --> if the store data is available: issue the load	    //              --> if the store data is *not* avaialable:	    //                  the load must wait	    //          (b) If the address doesn't match, issue the load	    //	    //  Uncacheable loads can't be issued until they become	    //  non-speculative, so we ignore them here.  They will be	    //  added to the ready list in commit.	    if (!lsq->queued && lsq->ops_ready()		&& !(lsq->inst->mem_req_flags & UNCACHEABLE)) {		bool enqueue = true;		//		//  Check for conflicts with earlier stores		//		//  We walk *backwards* through the list of stores so that		//  we match up this load with the most recent store		//		for (r_it store=store_list[thread].rbegin();		     store != store_list[thread].rend();		     ++store)		{		    if (store->addr_known) {			//			//  Store *HAS* calculated its effective address			//			if (store->addr == inst->eff_addr) {			    //			    //  Load & Store *ADDRESSES MATCH*			    //    --> this is the most-recent store			    //			    if (store->data_known) {				//				// FIXME! We disregard the size of the				// operations and forward from the store				// to the load				//			    }			    else {				//				//  The store doesn't have its data yet..				//  --> we need that data... this load must				//      wait				enqueue = false;			    }			    //  this is the only store we really care about,			    //  so we don't want to look any earlier			    break;			} else {			    //			    //  Load & Store addresses do NOT match			    //			}		    } else {			//			//  Store has not calculated its effective address			//			//  we can't issue this load, since we don't know if			//  this store conflicts			enqueue = false;			break;		    }		}		if (enqueue) {		    ls_queue->ready_list_enqueue(lsq);#if DUMP_REFRESH		    cout << "  " << lsq->seq << endl;#endif		} else {		    ++lsq_blocked_loads[thread];		}	    }	}    }}// /* Try to issue the LSQ portion of a load.  Returns flag indicating * whether instruction was actually issued.  If load is issued, * completion latency (if known) is returned via latency pointer * argument, and caller is responsible for scheduling writeback event. * If latency is unknown, returned latency value is set to -1, and a * future callback from the memory system will schedule the writeback. */boolFullCPU::issue_load(BaseIQ::iterator lsq, int *latency){    int load_lat;    bool addr_is_valid;    DynInst *inst;    int thread_number;    bool issued;    inst = lsq->inst;    thread_number = lsq->thread_number();    addr_is_valid = inst->xc->validDataAddr(inst->eff_addr) &&	inst->fault == No_Fault;#if FULL_SYSTEM    if (inst->xc->misspeculating() &&	inst->xc->memctrl->badaddr(inst->phys_eff_addr))	addr_is_valid = false;#endif    /* reset lsq->mem_result value */    lsq->mem_result = MA_HIT;    /* Assume it's issued for now. If neccessary, unset later */    issued = true;    /* for loads, determine cache access latency:     * first scan LSQ to see if a store forward is     * possible, if not, access the data cache */    load_lat = 0;    for (BaseIQ::iterator earlier_lsq = lsq.prev();	 earlier_lsq.notnull(); earlier_lsq = earlier_lsq.prev()) {	/* FIXME: not dealing with partials! */	if (earlier_lsq->inst->isStore() &&	    earlier_lsq->inst->eff_addr == inst->eff_addr &&	    earlier_lsq->inst->asid == inst->asid) {	    /* hit in the LSQ */	    lsq_forw_loads[thread_number]++;	    load_lat = cycles(1);	    break;	}    }    /* was the value store forward from the LSQ? */    if (!load_lat) {	if (!inst->spec_mode && !addr_is_valid)	    sim_invalid_addrs++;	/* no! go to the data cache if addr is valid */	if (addr_is_valid) {#if DUMP_LOADS	    if (!inst->spec_mode) {		cerr << "T" << inst->thread_number << " : Load from 0x" << hex		     << inst->eff_addr << " issued" << endl;	    }#endif	    //	    //  Prepare memory request...	    //	    MemReqPtr req = new MemReq();	    req->thread_num = thread_number;	    req->asid = inst->asid;	    req->cmd = Read;	    req->vaddr = inst->eff_addr;	    req->paddr = inst->phys_eff_addr;	    req->flags = inst->mem_req_flags;	    req->size = 1;	    req->time = curTick;	    req->data = new uint8_t[1];	    req->xc = inst->xc;	    req->pc = inst->PC;	    WritebackEvent *wb_event		= new WritebackEvent(this, lsq->rob_entry, req);	    req->completionEvent = wb_event;	    // so we can invalidate on a squash	    lsq->rob_entry->wb_event = wb_event;	    BaseIQ::CacheMissEvent *cm_event		= IQ[0]->new_cm_event(lsq->rob_entry);	    lsq->rob_entry->cache_event_ptr = cm_event;#if REMOVE_WP_LOADS	    //	    //  Treat spec-mode loads as if they are hits...	    //   --> don't actually send them to the cache!	    //	    if (inst->spec_mode) {		lsq->mem_result = MA_HIT;		wb_event->schedule(curTick + cycles(3));		if (cm_event != 0)		    cm_event->schedule(curTick + cycles(3));	    } else {#endif	    lsq->mem_result = dcacheInterface->access(req);#if REMOVE_WP_LOADS	    }#endif	    //	    //  We schedule this event ourselves... not required to be	    //  part of the cache...	    //            if (cm_event != NULL) {                cm_event->annotate(lsq->mem_result);                Tick latency = dcacheInterface->getHitLatency();                assert(latency >= clock);                cm_event->schedule(curTick + latency);            }	    // negative load latency prevents issue()	    // from scheduling a writeback event... this	    // will come from the memory system	    // instead.	    load_lat = -1;	} else {            load_lat = *latency;            if (inst->xc->misspeculating()) {                // invalid addr is misspeculation, just use op latency                inv_addr_loads[thread_number]++;            } else if (inst->fault == No_Fault) {                // invalid addr is a bad address, panic                panic("invalid addr 0x%x accessed and not misspeculating", 		      inst->eff_addr);            }	}    }    *latency = load_lat;  // return a latency value back to lsq_issue()    lsq->rob_entry->mem_result = lsq->mem_result;    return issued;}// /* Try to issue the LSQ portion of a prefetch instruction.  Currently, * this always succeeds, as we just throw away the prefetch if it * can't be issued immediately.  Instruction is also marked as * completed.  Returned latency should be ignored. */boolFullCPU::issue_prefetch(BaseIQ::iterator rs, int *latency){    bool addr_is_valid;    int pf_size = 0;    DynInst *inst;    int thread_number;    /* Prefetches always issue & complete right away: they are just     * discarded (treated like no-ops) if there aren't enough     * resources for them.     */    /* reset rs->mem_result value */    rs->mem_result = MA_HIT;    rs->rob_entry->completed = true;    if (softwarePrefetchPolicy == SWP_DISABLE) {	// prefetching disabled: nothing to do	return true;    }    if (softwarePrefetchPolicy == SWP_SQUASH) {	cout << "# " << rs->seq << endl;	rs->dump();	fatal("The swp instruction should be squashed earlier.\n");    }    inst = rs->inst;    thread_number = rs->thread_number();    addr_is_valid = (inst->eff_addr != MemReq::inval_addr		     && inst->xc->validDataAddr(inst->eff_addr)		     && inst->fault == No_Fault);#if FULL_SYSTEM    if (inst->xc->misspeculating() &&	inst->xc->memctrl->badaddr(inst->phys_eff_addr))	addr_is_valid = false;#endif    /* no! go to the data cache if addr is valid */    if (addr_is_valid) {	MemReqPtr req = new MemReq();	req->thread_num = thread_number;	req->asid = inst->asid;	req->cmd = Soft_Prefetch;	req->vaddr = inst->eff_addr;	req->paddr = inst->phys_eff_addr;	req->flags = inst->mem_req_flags;	req->size = pf_size;	req->completionEvent = NULL;	req->time = curTick;	req->data = new uint8_t[pf_size];	req->xc = inst->xc;	req->pc = inst->PC;	dcacheInterface->access(req);    } else {	/* invalid addr */	inv_addr_swpfs[thread_number]++;    }    //  We "return" the original FU op-latency through the "latency" pointer    return true;}////  Issue an instruction from the Store-Ready Queue//boolFullCPU::sb_issue(StoreBuffer::iterator i, unsigned pool_num){    /* wrport should be available */    int fu_lat = FUPools[pool_num]->getUnit(MemWriteOp);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -