⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 commit.cc

📁 linux下基于c++的处理器仿真平台。具有处理器流水线
💻 CC
📖 第 1 页 / 共 2 页
字号:
/* * Copyright (c) 2000, 2001, 2002, 2003, 2004, 2005 * The Regents of The University of Michigan * All Rights Reserved * * This code is part of the M5 simulator, developed by Nathan Binkert, * Erik Hallnor, Steve Raasch, and Steve Reinhardt, with contributions * from Ron Dreslinski, Dave Greene, Lisa Hsu, Kevin Lim, Ali Saidi, * and Andrew Schultz. * * Permission is granted to use, copy, create derivative works and * redistribute this software and such derivative works for any * purpose, so long as the copyright notice above, this grant of * permission, and the disclaimer below appear in all copies made; and * so long as the name of The University of Michigan is not used in * any advertising or publicity pertaining to the use or distribution * of this software without specific, written prior authorization. * * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION FROM THE * UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY PURPOSE, AND * WITHOUT WARRANTY BY THE UNIVERSITY OF MICHIGAN OF ANY KIND, EITHER * EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE. THE REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE * LIABLE FOR ANY DAMAGES, INCLUDING DIRECT, SPECIAL, INDIRECT, * INCIDENTAL, OR CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM * ARISING OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF SUCH * DAMAGES. */#include "base/cprintf.hh"#include "base/statistics.hh"#include "cpu/exetrace.hh"#include "cpu/smt.hh"#include "encumbered/cpu/full/bpred.hh"#include "encumbered/cpu/full/floss_reasons.hh"#include "encumbered/cpu/full/cpu.hh"#include "encumbered/cpu/full/iq/iqueue.hh"#include "encumbered/cpu/full/rob_station.hh"#include "encumbered/cpu/full/storebuffer.hh"#include "encumbered/cpu/full/thread.hh"#include "encumbered/cpu/full/writeback.hh"#include "mem/mem_interface.hh"#include "sim/sim_events.hh"#include "sim/stats.hh"using namespace std;/*======================================================================*//* *  IQ_COMMIT() - instruction retirement pipeline stage * *//*  Number of cycles we are allowed to go without committing an instruction  */#define CRASH_COUNT 100000class FaultHandlerDelayEvent : public Event{    FullCPU *cpu;    int thread;  public:    FaultHandlerDelayEvent(FullCPU *_cpu, int _thread, Tick tick)	: Event(&mainEventQueue), cpu(_cpu), thread(_thread)    {	setFlags(AutoDelete);	schedule(tick);    }    ~FaultHandlerDelayEvent() {}    virtual void process()    {	cpu->fetch_fault_count[thread]--;    }};/* this function commits the results of the oldest completed entries from the   IQ and LSQ to the architected reg file, stores in the LSQ will commit   their store data to the data cache at this point as well */voidFullCPU::commit(){    static int crash_counter = 0;    unsigned committed = 0;    unsigned committed_thread[SMT_MAX_THREADS];    int finished_thread[SMT_MAX_THREADS];    int num_finished_threads;    CommitEndCause reason;    CommitEndCause reason_overall = COMMIT_CAUSE_NOT_SET;;    CommitEndCause reason_thread[SMT_MAX_THREADS];    int detail;    int detail_overall = 0;    int detail_thread[SMT_MAX_THREADS];    // in case there are NO unfinished instructions    InstSeqNum seq_overall = InstSeqNum(-1);    InstSeqNum seq_thread[SMT_MAX_THREADS];    unsigned blame = 0;   // thread numbers    unsigned blame_overall = 0;    //    //  This code causes the simulator to halt if it doesn't commit    //  an instruction in CRACH_COUNT cycles    //    if (++crash_counter > CRASH_COUNT) {	ccprintf(cerr, "DEADLOCK!\n");	dumpIQ();	LSQ->dump();	ROB.dump();	panic("We stopped committing instructions!!!");    }    //    //  Determine which threads we don't need to worry about    //    num_finished_threads = 0;    int num_inactive_threads = 0;    for (int i = 0; i < number_of_threads; i++) {	// if thread has no instructions in ROB then we can skip it	if (!thread_info[i].active || ROB.num_thread(i) == 0) {	    finished_thread[i] = true;	    num_finished_threads++;	    if (!thread_info[i].active		|| execContexts[i]->status() != ExecContext::Active) {		num_inactive_threads++;	    }	} else {	    finished_thread[i] = false;	}    }    if (num_finished_threads == number_of_threads) {	// If we're not committing because all the threads are	// inactive, don't consider this a microarchitectural	// deadlock... it can happen e.g. in an MP where there is only	// one runnable thread.	if (num_inactive_threads == number_of_threads) {	    crash_counter = 0;	}	return;    }    //    //  Initialize & allocate per-thread data structs...    //    //  FIXME:  we don't really want to do all this allocation every cycle    //    ROBStation **commit_list[SMT_MAX_THREADS];    unsigned clist_num[SMT_MAX_THREADS];    unsigned clist_idx[SMT_MAX_THREADS];    bool list_done[SMT_MAX_THREADS];    unsigned num_list_done = num_finished_threads;    unsigned completed[SMT_MAX_THREADS];    unsigned total_completed = 0;    for (int i = 0; i < number_of_threads; ++i) {	clist_num[i] = 0;	list_done[i] = finished_thread[i];	clist_idx[i] = 0;	reason_thread[i] = COMMIT_CAUSE_NOT_SET;	detail_thread[i] = 0;	seq_thread[i] = 0;	completed[i] = 0;	committed_thread[i] = 0;	if (!finished_thread[i]) {	    // allocate storage for the max number of insts we could	    // commit in a cycle	    commit_list[i] = new ROBStation *[commit_width];	} else {	    commit_list[i] = 0;	}    }    unsigned num_eligible = 0;    //    //  put commitable instructions into the lists...    //    //  We walk the ROB, filling each per-thread list    //    //  We also keep track of the first non-commitable inst for each thread    //  and overall, and also squash instructions we encounter along the way    //    bool done = false;    for (ROBStation *rs = ROB.head(); (rs != NULL) && !done;	 rs = ROB.next(rs))    {	unsigned thread = rs->thread_number;	//	//  count the number of instruction ready to commit	//	if (!finished_thread[thread] && rs->completed) {	    ++completed[thread];	    ++total_completed;	}	reason = COMMIT_CAUSE_NOT_SET;	//	//  ignore instructions from threads that we're done listing...	//	if (!list_done[thread]) {	    //	    //  If we're still looking at a thread and run across a squashed	    //  instruction, then blow it away...	    //	    if (rs->squashed) {		if (ptrace)		    ptrace->deleteInst(rs->inst);		remove_ROB_element(rs);		// go look at next instruction		continue;	    }	    //	    //  Add potentially-eligible instructions to the list	    //  because things change as we commit, we'll double-check each	    //  instruction as it comes up...	    //	    if (eligible_to_commit(rs, &reason)) {		commit_list[thread][clist_num[thread]++] = rs;		++num_eligible;		if (clist_num[thread] == commit_width) {		    list_done[thread] = true;		    ++num_list_done;		    if (num_list_done == number_of_threads)			done = true;		}	    } else {		DynInst *inst = rs->inst;		//		//  An ineligible instruction means that we're done		//  looking at this thread... determine the commit-end		//  cause for _this_thread_ in case we need it later		//		if (rs->completed) {		    //		    //  For completed but not eligible instructions,		    //  we'll use the "reason" determined by the		    //  eligible_to_commit() function		    //		    reason_thread[thread] = reason;		} else if (inst->isLoad() && !rs->eaCompPending			   && rs->issued) {		    // It's a load that's been issued from the LSQ,		    // so it's a memory stall... (not necessarily a miss,		    // despite the name)		    reason_thread[thread] = COMMIT_DMISS;		    detail_thread[thread] = rs->mem_result;		} else if (inst->isMemBarrier()) {		    reason_thread[thread] = COMMIT_MEMBAR;		} else {		    // anything else: blame it on the function unit		    reason_thread[thread] = COMMIT_FU;		    detail_thread[thread] = inst->opClass();		}		if (clist_num[thread] == 0) {		    // Special checks when the oldest instruction on		    // the thread is not committable: it might be an		    // instruction that has to wait until this point		    // to issue.		    // Uncached loads must wait to guarantee they're		    // non-speculative.  Memory barriers must wait to		    // guarantee that all previous loads have		    // completed.  (MBs also must wait for the store		    // buffer to empty to guarantee all previous		    // stores have completed as well.)		    bool uc_load = (inst->isLoad()				    && (inst->mem_req_flags & UNCACHEABLE));		    bool ready_mb = (inst->isMemBarrier()				     && storebuffer->count(thread) == 0);		    if ((uc_load || ready_mb)			&& rs->lsq_entry.notnull()			&& !rs->lsq_entry->queued			&& rs->lsq_entry->ops_ready()) {			LSQ->ready_list_enqueue(rs->lsq_entry);		    }		}		//		//  Make a note of the first uncommitable inst...		//		if ((seq_overall == 0) || (seq_overall > rs->seq)) {		    seq_overall = rs->seq;		    reason_overall = reason_thread[thread];		    blame_overall  = thread;		    detail_overall = detail_thread[thread];		}		list_done[thread] = true;		++num_list_done;		if (clist_num[thread] == 0) {		    finished_thread[thread] = true;		    ++num_finished_threads;		}		if (num_list_done == number_of_threads)		    done = true;	    }	}    }    //    //  We'll blame the oldest uncommitted instruction by default    //    reason = reason_overall;    blame  = blame_overall;    detail = detail_overall;    if (num_eligible == 0) {	//	//  Assign blame	//	switch(reason) {	  case COMMIT_BW:	  case COMMIT_NO_INSN:	  case COMMIT_STOREBUF:	  case COMMIT_MEMBAR:	    break;	  case COMMIT_FU:	    floss_state.commit_fu[0][0] = OpClass(detail);	    break;	  case COMMIT_DMISS:	    floss_state.commit_mem_result[0] = MemAccessResult(detail);	    break;	  case COMMIT_CAUSE_NOT_SET:	    done = true;  // dummy	    break;	  default:	    fatal("commit causes screwed up");	}	floss_state.commit_end_cause[0] = reason;	//	//  De-allocate memory	//	for (int i = 0; i < number_of_threads; ++i)	    if (commit_list[i])		delete [] commit_list[i];	return;    }    //    //    //    if (prioritized_commit) {	//	//  Choose the order of threads to commit	//	fatal("prioritized commit isn't implemented, yet...");    }    //    //  Prepare to enter the commit loop...    //    //  ... do commit-model specific tasks    //    //    //  Choose the thread we're commiting by looking at the oldest    //  eligible instruction    //    unsigned pt_thread = 0;    if (commit_model == COMMIT_MODEL_PERTHREAD)	pt_thread = oldest_inst(commit_list, clist_num, clist_idx);    //    //  Increment the RR value, looking for a thread we can commit from    //    if (commit_model == COMMIT_MODEL_RR) {	do {	    rr_commit_last_thread		= (rr_commit_last_thread +1) % number_of_threads;	} while (finished_thread[rr_commit_last_thread]);	//	//  Mark all remaining threads as done...	//	for (int i = 0; i < number_of_threads; ++i)	    finished_thread[i] = true;	num_finished_threads = number_of_threads - 1;	finished_thread[rr_commit_last_thread] = false;    }    //    //  Main commit loop    //    done = false;    do {	ROBStation *rs = 0;	unsigned thread = 0;	//	//  Choose the instruction to commit	//	switch(commit_model) {	  case COMMIT_MODEL_SMT:	    thread = oldest_inst(commit_list, clist_num, clist_idx);	    rs = commit_list[thread][clist_idx[thread]++];	    break;	  case COMMIT_MODEL_SSCALAR:	    thread = oldest_inst(commit_list, clist_num, clist_idx);	    rs = commit_list[thread][clist_idx[thread]++];	    // if this inst is younger than the oldest non-commitable	    // inst, we are done.	    if (rs->seq > seq_overall) {		rs = 0;		done = true;		reason_thread[thread] = COMMIT_NO_INSN;	    }	    break;	  case COMMIT_MODEL_PERTHREAD:	    thread = pt_thread;	    if (clist_num[thread] - clist_idx[thread]) {		rs = commit_list[thread][clist_idx[thread]++];	    } else {		rs = 0;		done = true;		reason_thread[thread] = COMMIT_NO_INSN;	    }	    break;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -