⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 profile.c

📁 这个是LINUX下的GDB调度工具的源码
💻 C
📖 第 1 页 / 共 4 页
字号:
	}      else	{	  SET_H_GR (q->regnum,		    CACHE_RETURN_DATA (cache, slot, q->address, SI, 4));	}      break;    case 8:      if (q->regtype == REGTYPE_FR)	{	  SET_H_FR_DOUBLE (q->regnum,			   CACHE_RETURN_DATA (cache, slot, q->address, DF, 8));	}      else	{	  SET_H_GR_DOUBLE (q->regnum,			   CACHE_RETURN_DATA (cache, slot, q->address, DI, 8));	}      break;    case 16:      if (q->regtype == REGTYPE_FR)	frvbf_h_fr_quad_set_handler (current_cpu, q->regnum,				     CACHE_RETURN_DATA_ADDRESS (cache, slot,								q->address,								16));      else	frvbf_h_gr_quad_set_handler (current_cpu, q->regnum,				     CACHE_RETURN_DATA_ADDRESS (cache, slot,								q->address,								16));      break;    default:      abort ();    }}static intrequest_complete (SIM_CPU *cpu, CACHE_QUEUE_ELEMENT *q){  FRV_CACHE* cache;  if (! q->active || q->cycles > 0)    return 0;  cache = CPU_DATA_CACHE (cpu);  switch (q->request)    {    case cache_load:      /* For loads, we must wait until the data is returned from the cache.  */      if (frv_cache_data_in_buffer (cache, 0, q->address, q->reqno))	{	  copy_load_data (cpu, cache, 0, q);	  return 1;	}      if (frv_cache_data_in_buffer (cache, 1, q->address, q->reqno))	{	  copy_load_data (cpu, cache, 1, q);	  return 1;	}      break;    case cache_flush:      /* We must wait until the data is flushed.  */      if (frv_cache_data_flushed (cache, 0, q->address, q->reqno))	return 1;      if (frv_cache_data_flushed (cache, 1, q->address, q->reqno))	return 1;      break;    default:      /* All other requests are complete once they've been made.  */      return 1;    }  return 0;}/* Run the insn and data caches through the given number of cycles, taking   note of load requests which are fullfilled as a result.  */static voidrun_caches (SIM_CPU *cpu, int cycles){  FRV_CACHE* data_cache = CPU_DATA_CACHE (cpu);  FRV_CACHE* insn_cache = CPU_INSN_CACHE (cpu);  int i;  /* For each cycle, run the caches, noting which requests have been fullfilled     and submitting new requests on their designated cycles.  */  for (i = 0; i < cycles; ++i)    {      int j;      /* Run the caches through 1 cycle.  */      frv_cache_run (data_cache, 1);      frv_cache_run (insn_cache, 1);      /* Note whether prefetched insn data has been loaded yet.  */      for (j = LS; j < FRV_CACHE_PIPELINES; ++j)	{	  if (frv_insn_fetch_buffer[j].reqno != NO_REQNO	      && frv_cache_data_in_buffer (insn_cache, j,					   frv_insn_fetch_buffer[j].address,					   frv_insn_fetch_buffer[j].reqno))	    frv_insn_fetch_buffer[j].reqno = NO_REQNO;	}      /* Check to see which requests have been satisfied and which should	 be submitted now.  */      for (j = 0; j < cache_queue.ix; ++j)	{	  CACHE_QUEUE_ELEMENT *q = & cache_queue.q[j];	  if (! q->active)	    continue;	  /* If a load has been satisfied, complete the operation and remove it	     from the queue.  */	  if (request_complete (cpu, q))	    {	      remove_cache_queue_element (cpu, j);	      --j;	      continue;	    }	  /* Decrease the cycle count of each queued request.	     Submit a request for each queued request whose cycle count has	     become zero.  */	  --q->cycles;	  if (q->cycles == 0)	    submit_cache_request (q);	}    }}static voidapply_latency_adjustments (SIM_CPU *cpu){  FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);  int i;  /* update the latencies of the registers.  */  int *fr  = ps->fr_busy;  int *acc = ps->acc_busy;  for (i = 0; i < 64; ++i)    {      if (ps->fr_busy_adjust[i] > 0)	*fr -= ps->fr_busy_adjust[i]; /* OK if it goes negative.  */      if (ps->acc_busy_adjust[i] > 0)	*acc -= ps->acc_busy_adjust[i]; /* OK if it goes negative.  */      ++fr;      ++acc;    }}/* Account for the number of cycles which have just passed in the latency of   various system elements.  Works for negative cycles too so that latency   can be extended in the case of insn fetch latency.   If negative or zero, then no adjustment is necessary.  */static voidupdate_latencies (SIM_CPU *cpu, int cycles){  FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);  int i;  /* update the latencies of the registers.  */  int *fdiv;  int *fsqrt;  int *idiv;  int *flt;  int *media;  int *ccr;  int *gr  = ps->gr_busy;  int *fr  = ps->fr_busy;  int *acc = ps->acc_busy;  int *spr;  /* This loop handles GR, FR and ACC registers.  */  for (i = 0; i < 64; ++i)    {      if (*gr <= cycles)	{	  *gr = 0;	  reset_gr_flags (cpu, i);	}      else	*gr -= cycles;      /* If the busy drops to 0, then mark the register as	 "not in use".  */      if (*fr <= cycles)	{	  int *fr_lat = ps->fr_latency + i;	  *fr = 0;	  ps->fr_busy_adjust[i] = 0;	  /* Only clear flags if this register has no target latency.  */	  if (*fr_lat == 0)	    reset_fr_flags (cpu, i);	}      else	*fr -= cycles;      /* If the busy drops to 0, then mark the register as	 "not in use".  */      if (*acc <= cycles)	{	  int *acc_lat = ps->acc_latency + i;	  *acc = 0;	  ps->acc_busy_adjust[i] = 0;	  /* Only clear flags if this register has no target latency.  */	  if (*acc_lat == 0)	    reset_acc_flags (cpu, i);	}      else	*acc -= cycles;      ++gr;      ++fr;      ++acc;    }  /* This loop handles CCR registers.  */  ccr = ps->ccr_busy;  for (i = 0; i < 8; ++i)    {      if (*ccr <= cycles)	{	  *ccr = 0;	  reset_cc_flags (cpu, i);	}      else	*ccr -= cycles;      ++ccr;    }  /* This loop handles SPR registers.  */  spr = ps->spr_busy;  for (i = 0; i < 4096; ++i)    {      if (*spr <= cycles)	*spr = 0;      else	*spr -= cycles;      ++spr;    }  /* This loop handles resources.  */  idiv = ps->idiv_busy;  fdiv = ps->fdiv_busy;  fsqrt = ps->fsqrt_busy;  for (i = 0; i < 2; ++i)    {      *idiv = (*idiv <= cycles) ? 0 : (*idiv - cycles);      *fdiv = (*fdiv <= cycles) ? 0 : (*fdiv - cycles);      *fsqrt = (*fsqrt <= cycles) ? 0 : (*fsqrt - cycles);      ++idiv;      ++fdiv;      ++fsqrt;    }  /* Float and media units can occur in 4 slots on some machines.  */  flt = ps->float_busy;  media = ps->media_busy;  for (i = 0; i < 4; ++i)    {      *flt = (*flt <= cycles) ? 0 : (*flt - cycles);      *media = (*media <= cycles) ? 0 : (*media - cycles);      ++flt;      ++media;    }}/* Print information about the wait for the given number of cycles.  */voidfrv_model_trace_wait_cycles (SIM_CPU *cpu, int cycles, const char *hazard_name){  if (TRACE_INSN_P (cpu) && cycles > 0)    {      SIM_DESC sd = CPU_STATE (cpu);      trace_printf (sd, cpu, "**** %s wait %d cycles ***\n",		    hazard_name, cycles);    }}voidtrace_vliw_wait_cycles (SIM_CPU *cpu){  if (TRACE_INSN_P (cpu))    {      FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);      frv_model_trace_wait_cycles (cpu, ps->vliw_wait, hazard_name);    }}/* Wait for the given number of cycles.  */voidfrv_model_advance_cycles (SIM_CPU *cpu, int cycles){  PROFILE_DATA *p = CPU_PROFILE_DATA (cpu);  update_latencies (cpu, cycles);  run_caches (cpu, cycles);  PROFILE_MODEL_TOTAL_CYCLES (p) += cycles;}voidhandle_resource_wait (SIM_CPU *cpu){  FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);  if (ps->vliw_wait != 0)    frv_model_advance_cycles (cpu, ps->vliw_wait);  if (ps->vliw_load_stall > ps->vliw_wait)    ps->vliw_load_stall -= ps->vliw_wait;  else    ps->vliw_load_stall = 0;}/* Account for the number of cycles until these resources will be available   again.  */static voidupdate_target_latencies (SIM_CPU *cpu){  FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);  int i;  /* update the latencies of the registers.  */  int *ccr_lat;  int *gr_lat  = ps->gr_latency;  int *fr_lat  = ps->fr_latency;  int *acc_lat = ps->acc_latency;  int *spr_lat;  int *ccr;  int *gr = ps->gr_busy;  int  *fr = ps->fr_busy;  int  *acc = ps->acc_busy;  int *spr;  /* This loop handles GR, FR and ACC registers.  */  for (i = 0; i < 64; ++i)    {      if (*gr_lat)	{	  *gr = *gr_lat;	  *gr_lat = 0;	}      if (*fr_lat)	{	  *fr = *fr_lat;	  *fr_lat = 0;	}      if (*acc_lat)	{	  *acc = *acc_lat;	  *acc_lat = 0;	}      ++gr; ++gr_lat;      ++fr; ++fr_lat;      ++acc; ++acc_lat;    }  /* This loop handles CCR registers.  */  ccr = ps->ccr_busy;  ccr_lat = ps->ccr_latency;  for (i = 0; i < 8; ++i)    {      if (*ccr_lat)	{	  *ccr = *ccr_lat;	  *ccr_lat = 0;	}      ++ccr; ++ccr_lat;    }  /* This loop handles SPR registers.  */  spr = ps->spr_busy;  spr_lat = ps->spr_latency;  for (i = 0; i < 4096; ++i)    {      if (*spr_lat)	{	  *spr = *spr_lat;	  *spr_lat = 0;	}      ++spr; ++spr_lat;    }}/* Run the caches until all pending cache flushes are complete.  */static voidwait_for_flush (SIM_CPU *cpu){  SI address = CPU_LOAD_ADDRESS (cpu);  int wait = 0;  while (flush_pending_for_address (cpu, address))    {      frv_model_advance_cycles (cpu, 1);      ++wait;    }  if (TRACE_INSN_P (cpu) && wait)    {      sprintf (hazard_name, "Data cache flush address %p:", address);      frv_model_trace_wait_cycles (cpu, wait, hazard_name);    }}/* Initialize cycle counting for an insn.   FIRST_P is non-zero if this is the first insn in a set of parallel   insns.  */voidfrvbf_model_insn_before (SIM_CPU *cpu, int first_p){  SIM_DESC sd = CPU_STATE (cpu);  FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);  ps->vliw_wait = 0;  ps->post_wait = 0;  memset (ps->fr_busy_adjust, 0, sizeof (ps->fr_busy_adjust));  memset (ps->acc_busy_adjust, 0, sizeof (ps->acc_busy_adjust));  if (first_p)    {      ps->vliw_insns++;      ps->vliw_cycles = 0;      ps->vliw_branch_taken = 0;      ps->vliw_load_stall = 0;    }  switch (STATE_ARCHITECTURE (sd)->mach)    {    case bfd_mach_fr400:    case bfd_mach_fr450:      fr400_model_insn_before (cpu, first_p);      break;    case bfd_mach_fr500:      fr500_model_insn_before (cpu, first_p);      break;    case bfd_mach_fr550:      fr550_model_insn_before (cpu, first_p);      break;    default:      break;    }  if (first_p)    wait_for_flush (cpu);}/* Record the cycles computed for an insn.   LAST_P is non-zero if this is the last insn in a set of parallel insns,   and we update the total cycle count.   CYCLES is the cycle count of the insn.  */voidfrvbf_model_insn_after (SIM_CPU *cpu, int last_p, int cycles){  PROFILE_DATA *p = CPU_PROFILE_DATA (cpu);  FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);  SIM_DESC sd = CPU_STATE (cpu);  PROFILE_MODEL_CUR_INSN_CYCLES (p) = cycles;  /* The number of cycles for a VLIW insn is the maximum number of cycles     used by any individual insn within it.  */  if (cycles > ps->vliw_cycles)    ps->vliw_cycles = cycles;  if (last_p)    {      /*  This is the last insn in a VLIW insn.  */      struct frv_interrupt_timer *timer = & frv_interrupt_state.timer;      activate_cache_requests (cpu); /* before advancing cycles.  */      apply_latency_adjustments (cpu); /* must go first.  */       update_target_latencies (cpu); /* must go next.  */       frv_model_advance_cycles (cpu, ps->vliw_cycles);      PROFILE_MODEL_LOAD_STALL_CYCLES (p) += ps->vliw_load_stall;      /* Check the interrupt timer.  cycles contains the total cycle count.  */      if (timer->enabled)	{	  cycles = PROFILE_MODEL_TOTAL_CYCLES (p);	  if (timer->current % timer->value	      + (cycles - timer->current) >= timer->value)	    frv_queue_external_interrupt (cpu, timer->interrupt);	  timer->current = cycles;	}      ps->past_first_p = 0; /* Next one will be the first in a new VLIW.  */      ps->branch_address = -1;    }  else    ps->past_first_p = 1;  switch (STATE_ARCHITECTURE (sd)->mach)    {    case bfd_mach_fr400:    case bfd_mach_fr450:      fr400_model_insn_after (cpu, last_p, cycles);      break;    case bfd_mach_fr500:      fr500_model_insn_after (cpu, last_p, cycles);      break;    case bfd_mach_fr550:      fr550_model_insn_after (cpu, last_p, cycles);      break;    default:      break;    }}USIfrvbf_model_branch (SIM_CPU *current_cpu, PCADDR target, int hint){  /* Record the hint and branch address for use in profiling.  */  FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);  ps->branch_hint = hint;  ps->branch_address = target;}/* Top up the latency of the given GR by the given number of cycles.  */voidupdate_GR_latency (SIM_CPU *cpu, INT out_GR, int cycles){  if (out_GR >= 0)    {      FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);      int *gr = ps->gr_latency;      if (gr[out_GR] < cycles)	gr[out_GR] = cycles;    }}voiddecrease_GR_busy (SIM_CPU *cpu, INT in_GR, int cycles){  if (in_GR >= 0)    {      FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);      int *gr = ps->gr_busy;      gr[in_GR] -= cycles;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -