⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 commit.cc

📁 linux下基于c++的处理器仿真平台。具有处理器流水线
💻 CC
📖 第 1 页 / 共 2 页
字号:
	  case COMMIT_MODEL_RR:	    thread = rr_commit_last_thread;	    if (clist_num[thread] - clist_idx[thread])		rs = commit_list[thread][clist_idx[thread]++];	    else		reason_thread[thread] = COMMIT_NO_INSN;	    break;	  default:	    fatal("commit model screwed up");	    break;	};	//	//  If we have an instruction to commit, do it...	//	if (rs) {	    --num_eligible;	    if (eligible_to_commit(rs, &reason)) {		if (rs->inst->spec_mode == 0) {		    commit_one_inst(rs);		    ++committed;		    ++committed_thread[thread];		    crash_counter = 0;		} else {		    //		    //  It is possible for completed, mis-speculated		    //  instructions to arrive here between the time		    //  a mispredicted branch is written-back and the time		    //  the recovery event occurs.  In this case, a mis-		    //  speculated instruction would not have been		    //  squashed...  if this happens, squash it now...		    //  --> this doesn't count as a committed instruction		    //		    rs->squash();		    if (ptrace)			ptrace->deleteInst(rs->inst);		    remove_ROB_element(rs);		}		//		//  Check ending conditions		//		if (committed == commit_width) {		    reason = COMMIT_BW;		    blame = thread;		    done = true;		} else if (num_eligible == 0) {		    reason = COMMIT_NO_INSN;		    blame = thread;		    done = true;		}	    } else {		//  We can't commit this instruction... reason is set in		//  eligible_to_commit(), so just set thread		blame = thread;		// we're done with this thread		clist_idx[thread] = clist_num[thread];	    }	} else {	    //  use the default blame info...	    finished_thread[thread] = true;	    ++num_finished_threads;	    if (num_finished_threads == number_of_threads)		done = true;	}	//	//  Check to see if we've examined all eligible instructions	//  in this thread...	//	if (clist_idx[thread] == clist_num[thread]) {	    finished_thread[thread] = true;	    ++num_finished_threads;	    if (num_finished_threads == number_of_threads) {		done = true;	    }	}    } while (!done);    //    //  Assign blame    //    switch(reason) {      case COMMIT_BW:	    if (total_completed > commit_width) {		//  we want to count the number of instructions that could		//  have committed if we hadn't run out of bandwidth		++commit_eligible_samples;		for (int t = 0; t < number_of_threads; ++t) {		    assert(completed[t] >= committed_thread[t]);		    unsigned uncommitted = completed[t] - committed_thread[t];		    commit_eligible[t] += uncommitted;		    commit_bwlimit_stat[t].sample(uncommitted);		}	    }	    break;      case COMMIT_NO_INSN:      case COMMIT_STOREBUF:      case COMMIT_MEMBAR:	break;      case COMMIT_FU:	floss_state.commit_fu[0][0] = OpClass(detail);	break;      case COMMIT_DMISS:	floss_state.commit_mem_result[0] = MemAccessResult(detail);	break;      case COMMIT_CAUSE_NOT_SET:	done = true;  // dummy	break;      default:	fatal("commit causes screwed up");    }    floss_state.commit_end_cause[0] = reason;    //    //  De-allocate memory    //    for (int i = 0; i < number_of_threads; ++i)	if (commit_list[i])	    delete [] commit_list[i];    n_committed_dist.sample(committed);    if (floss_state.commit_end_cause[0] == COMMIT_CAUSE_NOT_SET) {	// very rarely we can have a queue-full fetch problem even when	// we committed the full B/W of instructions, or all of the	// entries in the IQ... maybe because LSQ is full??	floss_state.commit_end_cause[0] =	    (committed == commit_width) ? COMMIT_BW : COMMIT_NO_INSN;	// we arbitrarily attribute these to thread 0; should be factored out	// when interpreting results	floss_state.commit_end_thread = 0;    }}boolFullCPU::eligible_to_commit(ROBStation *rs,			enum CommitEndCause *reason){    bool storebuf_stall = false;    // To be ready to commit:    //  - ROB entry must be complete    //  - for loads/stores, LSQ entry must be complete    //  - for stores, a store buffer entry must be available    //  - for "leading" thread of reg-file-checking redundant pair,    //      reg check buffer entry must be available    if (!rs->completed)	return false;    if (rs->inst->isStore()) {	storebuf_stall = storebuffer->full();	if (*reason == COMMIT_CAUSE_NOT_SET && storebuf_stall)	    *reason = COMMIT_STOREBUF;    }    //    //  If everything is OK for committing this instruction...    //    if (!storebuf_stall)	return true;    return false;}unsignedFullCPU::oldest_inst(ROBStation ***clist, unsigned *cnum, unsigned *cidx){    unsigned rv = 0;    InstSeqNum oldest_seq = 0;    bool not_set = true;    for (int t = 0; t < number_of_threads; ++t) {	//	//  Look at this thread if:	//    (1)  The thread has a commit list	//    (2)  There are still instructions in the list	//	if (clist[t] != 0 && cnum[t] > 0 && cidx[t] < cnum[t]	    && (oldest_seq > clist[t][cidx[t]]->seq || not_set))	{	    rv = t;	    oldest_seq = clist[t][cidx[t]]->seq;	    not_set = false;	}    }    return rv;}voidFullCPU::commit_one_inst(ROBStation *rs){    DynInst *inst = rs->inst;    bool store_inst = false;    unsigned thread = rs->thread_number;    //    // Stores: commit to store buffer if an entry is available.    // Skip stores that faulted and write prefetches that didn't    // translate to a valid physical address..    //    if (inst->isStore() && inst->fault == No_Fault &&	!(inst->isDataPrefetch() &&	  inst->phys_eff_addr == MemReq::inval_addr)) {	assert(inst->phys_eff_addr != MemReq::inval_addr);	if (inst->isCopy()) {	    storebuffer->addCopy(thread, inst->asid, 				 dcacheInterface->getBlockSize(), inst->xc, 				 inst->eff_addr, inst->phys_eff_addr,				 inst->copySrcEffAddr, 				 inst->copySrcPhysEffAddr,				 inst->mem_req_flags,				 inst->PC, rs->seq,				 inst->fetch_seq, rs->queue_num);	} else {	    storebuffer->add(thread, inst->asid, inst->store_size, 			     inst->store_data,			     inst->xc,			     inst->eff_addr, inst->phys_eff_addr,			     inst->mem_req_flags,			     inst->PC, rs->seq,			     inst->fetch_seq, rs->queue_num);	}	// remember to remove LSQ entry	store_inst = true;	//  check for bogus store size	assert(inst->store_size <= 64);    }        if (rs->inst->isWriteBarrier()) {	storebuffer->addWriteBarrier(thread);    }    // Faulting instruction: we are holding off dispatch of the fault    // handler waiting for this to commit.  Notify dispatch that we've    // committed the instruction so it can continue.    if (inst->fault != No_Fault) {	assert(fetch_fault_count[thread] == 1);	new FaultHandlerDelayEvent(this, thread,				   curTick + cycles(fault_handler_delay));    }    // if we're committing a branch, update predictor state...    // if we're using leading-thread prediction, put the    // outcome in the queue too    if (rs->inst->isControl()) {	branch_pred->update(thread, inst->PC, inst->Next_PC,			    inst->Next_PC != inst->PC + sizeof(MachInst),			    /* pred taken? */			    inst->Pred_PC != inst->PC + sizeof(MachInst),			    /* correct pred? */			    inst->Pred_PC == inst->Next_PC,			    rs->inst->staticInst, &inst->dir_update);    }    thread_info[thread].commit_counter++;    // track last committed PC for sampling stats    commitPC[thread] = inst->PC;    traceFunctions(inst->PC);    update_com_inst_stats(inst);    // invalidate ROB operation instance    rs->tag++;    if (DTRACE(Pipeline)) {	string s;	inst->dump(s);	DPRINTF(Pipeline, "Commit %s\n", s);    }    //    //  Special Handling: When instruction commits    //  before branch recovery is done...    //    //  We need to tell the event handler not to try    //  to update the now non-existant ROB entry.    //    //  Note that we're OK if there is no event here,    //  as long as there is _some_ event pending    //    if (rs->inst->recover_inst) {	assert ((rs->recovery_event != NULL) ||		thread_info[thread].recovery_event_pending);	if (rs->recovery_event) {	    //  This recovery event will still happen...	    //  we just have to tell it that it doesn't need to worry	    //  about updating this ROB entry	    rs->recovery_event->invalidate_branch_entry();	    rs->recovery_event = 0;  // to make remove_ROB_entry() happy	}    }    //    //  Store Instructions: Remove LSQ portion of store    //    if (store_inst)	LSQ->squash(rs->lsq_entry);    if (ptrace) {	ptrace->moveInst(rs->inst, PipeTrace::Commit, 0, 0, 0);	ptrace->deleteInst(rs->inst);    }    // update head entry of IQ    remove_ROB_element(rs);    //    // check for instruction-count-based events    //    /**     *@todo com_inst is used as a Stat && in other ways, like here. needs fix     *in case com_inst becomes binned...     */    comInstEventQueue[thread]->serviceEvents(com_inst[thread]);    comLoadEventQueue[thread]->serviceEvents(com_loads[thread]);}voidFullCPU::update_com_inst_stats(DynInst *inst){    unsigned thread = inst->thread_number;    //    //  Pick off the software prefetches    //#ifdef TARGET_ALPHA    if (inst->isDataPrefetch()) {	stat_com_swp[thread]++;    } else {	com_inst[thread]++;	stat_com_inst[thread]++;    }#else    com_inst[thread]++;    stat_com_inst[thread]++;#endif    //    //  Control Instructions    //    if (inst->isControl())	stat_com_branches[thread]++;    //    //  Memory references    //    if (inst->isMemRef()) {	stat_com_refs[thread]++;	if (inst->isLoad()) {	    com_loads[thread]++;	    stat_com_loads[thread]++;	}    }    if (inst->isMemBarrier()) {	stat_com_membars[thread]++;    }}// register commit-stage statisticsvoidFullCPU::commitRegStats(){    using namespace Stats;    n_committed_dist	.init(0,commit_width,1)	.name(name() + ".COM:committed_per_cycle")	.desc("Number of insts commited each cycle")	.flags(pdf)	;    //    //  Commit-Eligible instructions...    //    //  -> The number of instructions eligible to commit in those    //  cycles where we reached our commit BW limit (less the number    //  actually committed)    //    //  -> The average value is computed over ALL CYCLES... not just    //  the BW limited cycles    //    //  -> The standard deviation is computed only over cycles where    //  we reached the BW limit    //    commit_eligible	.init(number_of_threads)	.name(name() + ".COM:bw_limited")	.desc("number of insts not committed due to BW limits")	.flags(total)	;    commit_eligible_samples	.name(name() + ".COM:bw_lim_events")	.desc("number cycles where commit BW limit reached")	;    commit_bwlimit_stat	.init(number_of_threads)	.name(name() + ".COM:bw_lim_stdev")	.desc("standard deviation of bw_lim_avg value")	.precision(4)	.flags(total)	;}voidFullCPU::commitRegFormulas(){    using namespace Stats;    bw_lim_avg	.name(name() + ".COM:bw_lim_avg")	.desc("Avg number not committed in cycles BW limited")	.precision(4)	.flags(total)	;    bw_lim_avg = commit_eligible / commit_eligible_samples;    bw_lim_rate	.name(name() + ".COM:bw_lim_rate")	.desc("Average number not committed due to BW (over all cycles)")	.precision(4)	.flags(total)	;    bw_lim_rate = commit_eligible / numCycles;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -