⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 reorg.c

📁 GCC编译器源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
   PANNUL_P points to a non-zero value if we already know that we need   to annul INSN.  If this routine determines that annulling is needed,   it may set that value non-zero.   PNEW_THREAD points to a location that is to receive the place at which   execution should continue.  */static rtxsteal_delay_list_from_target (insn, condition, seq, delay_list,			      sets, needed, other_needed,			      slots_to_fill, pslots_filled, pannul_p,			      pnew_thread)     rtx insn, condition;     rtx seq;     rtx delay_list;     struct resources *sets, *needed, *other_needed;     int slots_to_fill;     int *pslots_filled;     int *pannul_p;     rtx *pnew_thread;{  rtx temp;  int slots_remaining = slots_to_fill - *pslots_filled;  int total_slots_filled = *pslots_filled;  rtx new_delay_list = 0;  int must_annul = *pannul_p;  int i;  /* We can't do anything if there are more delay slots in SEQ than we     can handle, or if we don't know that it will be a taken branch.     We know that it will be a taken branch if it is either an unconditional     branch or a conditional branch with a stricter branch condition.     Also, exit if the branch has more than one set, since then it is computing     other results that can't be ignored, e.g. the HPPA mov&branch instruction.     ??? It may be possible to move other sets into INSN in addition to     moving the instructions in the delay slots.  */  if (XVECLEN (seq, 0) - 1 > slots_remaining      || ! condition_dominates_p (condition, XVECEXP (seq, 0, 0))      || ! single_set (XVECEXP (seq, 0, 0)))    return delay_list;  for (i = 1; i < XVECLEN (seq, 0); i++)    {      rtx trial = XVECEXP (seq, 0, i);      int flags;      if (insn_references_resource_p (trial, sets, 0)	  || insn_sets_resource_p (trial, needed, 0)	  || insn_sets_resource_p (trial, sets, 0)#ifdef HAVE_cc0	  /* If TRIAL sets CC0, we can't copy it, so we can't steal this	     delay list.  */	  || find_reg_note (trial, REG_CC_USER, NULL_RTX)#endif	  /* If TRIAL is from the fallthrough code of an annulled branch insn	     in SEQ, we cannot use it.  */	  || (INSN_ANNULLED_BRANCH_P (XVECEXP (seq, 0, 0))	      && ! INSN_FROM_TARGET_P (trial)))	return delay_list;      /* If this insn was already done (usually in a previous delay slot),	 pretend we put it in our delay slot.  */      if (redundant_insn (trial, insn, new_delay_list))	continue;      /* We will end up re-vectoring this branch, so compute flags	 based on jumping to the new label.  */      flags = get_jump_flags (insn, JUMP_LABEL (XVECEXP (seq, 0, 0)));      if (! must_annul	  && ((condition == const_true_rtx	       || (! insn_sets_resource_p (trial, other_needed, 0)		   && ! may_trap_p (PATTERN (trial)))))	  ? eligible_for_delay (insn, total_slots_filled, trial, flags)	  : (must_annul = 1,	     eligible_for_annul_false (insn, total_slots_filled, trial, flags)))	{	  temp = copy_rtx (trial);	  INSN_FROM_TARGET_P (temp) = 1;	  new_delay_list = add_to_delay_list (temp, new_delay_list);	  total_slots_filled++;	  if (--slots_remaining == 0)	    break;	}      else	return delay_list;    }  /* Show the place to which we will be branching.  */  *pnew_thread = next_active_insn (JUMP_LABEL (XVECEXP (seq, 0, 0)));  /* Add any new insns to the delay list and update the count of the     number of slots filled.  */  *pslots_filled = total_slots_filled;  *pannul_p = must_annul;  if (delay_list == 0)    return new_delay_list;  for (temp = new_delay_list; temp; temp = XEXP (temp, 1))    delay_list = add_to_delay_list (XEXP (temp, 0), delay_list);  return delay_list;}/* Similar to steal_delay_list_from_target except that SEQ is on the    fallthrough path of INSN.  Here we only do something if the delay insn   of SEQ is an unconditional branch.  In that case we steal its delay slot   for INSN since unconditional branches are much easier to fill.  */static rtxsteal_delay_list_from_fallthrough (insn, condition, seq, 				   delay_list, sets, needed, other_needed,				   slots_to_fill, pslots_filled, pannul_p)     rtx insn, condition;     rtx seq;     rtx delay_list;     struct resources *sets, *needed, *other_needed;     int slots_to_fill;     int *pslots_filled;     int *pannul_p;{  int i;  int flags;  flags = get_jump_flags (insn, JUMP_LABEL (insn));  /* We can't do anything if SEQ's delay insn isn't an     unconditional branch.  */  if (! simplejump_p (XVECEXP (seq, 0, 0))      && GET_CODE (PATTERN (XVECEXP (seq, 0, 0))) != RETURN)    return delay_list;  for (i = 1; i < XVECLEN (seq, 0); i++)    {      rtx trial = XVECEXP (seq, 0, i);      /* If TRIAL sets CC0, stealing it will move it too far from the use	 of CC0.  */      if (insn_references_resource_p (trial, sets, 0)	  || insn_sets_resource_p (trial, needed, 0)	  || insn_sets_resource_p (trial, sets, 0)#ifdef HAVE_cc0	  || sets_cc0_p (PATTERN (trial))#endif	  )	break;      /* If this insn was already done, we don't need it.  */      if (redundant_insn (trial, insn, delay_list))	{	  delete_from_delay_slot (trial);	  continue;	}      if (! *pannul_p	  && ((condition == const_true_rtx	       || (! insn_sets_resource_p (trial, other_needed, 0)		   && ! may_trap_p (PATTERN (trial)))))	  ? eligible_for_delay (insn, *pslots_filled, trial, flags)	  : (*pannul_p = 1,	     eligible_for_annul_true (insn, *pslots_filled, trial, flags)))	{	  delete_from_delay_slot (trial);	  delay_list = add_to_delay_list (trial, delay_list);	  if (++(*pslots_filled) == slots_to_fill)	    break;	}      else	break;    }  return delay_list;}/* Try merging insns starting at THREAD which match exactly the insns in   INSN's delay list.   If all insns were matched and the insn was previously annulling, the   annul bit will be cleared.   For each insn that is merged, if the branch is or will be non-annulling,   we delete the merged insn.  */static voidtry_merge_delay_insns (insn, thread)     rtx insn, thread;{  rtx trial, next_trial;  rtx delay_insn = XVECEXP (PATTERN (insn), 0, 0);  int annul_p = INSN_ANNULLED_BRANCH_P (delay_insn);  int slot_number = 1;  int num_slots = XVECLEN (PATTERN (insn), 0);  rtx next_to_match = XVECEXP (PATTERN (insn), 0, slot_number);  struct resources set, needed;  rtx merged_insns = 0;  int i;  int flags;  flags = get_jump_flags (delay_insn, JUMP_LABEL (delay_insn));  CLEAR_RESOURCE (&needed);  CLEAR_RESOURCE (&set);  /* If this is not an annulling branch, take into account anything needed in     NEXT_TO_MATCH.  This prevents two increments from being incorrectly     folded into one.  If we are annulling, this would be the correct     thing to do.  (The alternative, looking at things set in NEXT_TO_MATCH     will essentially disable this optimization.  This method is somewhat of     a kludge, but I don't see a better way.)  */  if (! annul_p)    mark_referenced_resources (next_to_match, &needed, 1);  for (trial = thread; !stop_search_p (trial, 1); trial = next_trial)    {      rtx pat = PATTERN (trial);      rtx oldtrial = trial;      next_trial = next_nonnote_insn (trial);      /* TRIAL must be a CALL_INSN or INSN.  Skip USE and CLOBBER.  */      if (GET_CODE (trial) == INSN	  && (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER))	continue;      if (GET_CODE (next_to_match) == GET_CODE (trial)#ifdef HAVE_cc0	  /* We can't share an insn that sets cc0.  */	  && ! sets_cc0_p (pat)#endif	  && ! insn_references_resource_p (trial, &set, 1)	  && ! insn_sets_resource_p (trial, &set, 1)	  && ! insn_sets_resource_p (trial, &needed, 1)	  && (trial = try_split (pat, trial, 0)) != 0	  /* Update next_trial, in case try_split succeeded.  */	  && (next_trial = next_nonnote_insn (trial))	  /* Likewise THREAD.  */	  && (thread = oldtrial == thread ? trial : thread)	  && rtx_equal_p (PATTERN (next_to_match), PATTERN (trial))	  /* Have to test this condition if annul condition is different	     from (and less restrictive than) non-annulling one.  */	  && eligible_for_delay (delay_insn, slot_number - 1, trial, flags))	{	  if (! annul_p)	    {	      update_block (trial, thread);	      if (trial == thread)		thread = next_active_insn (thread);	      delete_insn (trial);	      INSN_FROM_TARGET_P (next_to_match) = 0;	    }	  else	    merged_insns = gen_rtx (INSN_LIST, VOIDmode, trial, merged_insns);	  if (++slot_number == num_slots)	    break;	  next_to_match = XVECEXP (PATTERN (insn), 0, slot_number);	  if (! annul_p)	    mark_referenced_resources (next_to_match, &needed, 1);	}      mark_set_resources (trial, &set, 0, 1);      mark_referenced_resources (trial, &needed, 1);    }  /* See if we stopped on a filled insn.  If we did, try to see if its     delay slots match.  */  if (slot_number != num_slots      && trial && GET_CODE (trial) == INSN      && GET_CODE (PATTERN (trial)) == SEQUENCE      && ! INSN_ANNULLED_BRANCH_P (XVECEXP (PATTERN (trial), 0, 0)))    {      rtx pat = PATTERN (trial);      rtx filled_insn = XVECEXP (pat, 0, 0);      /* Account for resources set/needed by the filled insn.  */      mark_set_resources (filled_insn, &set, 0, 1);      mark_referenced_resources (filled_insn, &needed, 1);      for (i = 1; i < XVECLEN (pat, 0); i++)	{	  rtx dtrial = XVECEXP (pat, 0, i);	  if (! insn_references_resource_p (dtrial, &set, 1)	      && ! insn_sets_resource_p (dtrial, &set, 1)	      && ! insn_sets_resource_p (dtrial, &needed, 1)#ifdef HAVE_cc0	      && ! sets_cc0_p (PATTERN (dtrial))#endif	      && rtx_equal_p (PATTERN (next_to_match), PATTERN (dtrial))	      && eligible_for_delay (delay_insn, slot_number - 1, dtrial, flags))	    {	      if (! annul_p)		{		  update_block (dtrial, thread);		  delete_from_delay_slot (dtrial);		  INSN_FROM_TARGET_P (next_to_match) = 0;		}	      else		merged_insns = gen_rtx (INSN_LIST, SImode, dtrial,					merged_insns);	      if (++slot_number == num_slots)		break;	      next_to_match = XVECEXP (PATTERN (insn), 0, slot_number);	    }	}    }  /* If all insns in the delay slot have been matched and we were previously     annulling the branch, we need not any more.  In that case delete all the     merged insns.  Also clear the INSN_FROM_TARGET_P bit of each insn the     the delay list so that we know that it isn't only being used at the     target.  */  if (slot_number == num_slots && annul_p)    {      for (; merged_insns; merged_insns = XEXP (merged_insns, 1))	{	  if (GET_MODE (merged_insns) == SImode)	    {	      update_block (XEXP (merged_insns, 0), thread);	      delete_from_delay_slot (XEXP (merged_insns, 0));	    }	  else	    {	      update_block (XEXP (merged_insns, 0), thread);	      delete_insn (XEXP (merged_insns, 0));	    }	}      INSN_ANNULLED_BRANCH_P (delay_insn) = 0;      for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)	INSN_FROM_TARGET_P (XVECEXP (PATTERN (insn), 0, i)) = 0;    }}/* See if INSN is redundant with an insn in front of TARGET.  Often this   is called when INSN is a candidate for a delay slot of TARGET.   DELAY_LIST are insns that will be placed in delay slots of TARGET in front   of INSN.  Often INSN will be redundant with an insn in a delay slot of   some previous insn.  This happens when we have a series of branches to the   same label; in that case the first insn at the target might want to go   into each of the delay slots.   If we are not careful, this routine can take up a significant fraction   of the total compilation time (4%), but only wins rarely.  Hence we   speed this routine up by making two passes.  The first pass goes back   until it hits a label and sees if it find an insn with an identical   pattern.  Only in this (relatively rare) event does it check for   data conflicts.   We do not split insns we encounter.  This could cause us not to find a   redundant insn, but the cost of splitting seems greater than the possible   gain in rare cases.  */static rtxredundant_insn (insn, target, delay_list)     rtx insn;     rtx target;     rtx delay_list;{  rtx target_main = target;  rtx ipat = PATTERN (insn);  rtx trial, pat;  struct resources needed, set;  int i;  /* If INSN has any REG_UNUSED notes, it can't match anything since we     are allowed to not actually assign to such a register.  */  if (find_reg_note (insn, REG_UNUSED, NULL_RTX) != 0)    return 0;  /* Scan backwards looking for a match.  */  for (trial = PREV_INSN (target); trial; trial = PREV_INSN (trial))    {      if (GET_CODE (trial) == CODE_LABEL)	return 0;      if (GET_RTX_CLASS (GET_CODE (trial)) != 'i')	continue;      pat = PATTERN (trial);      if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)	continue;      if (GET_CODE (pat) == SEQUENCE)	{	  /* Stop for a CALL and its delay slots because it is difficult to	     track its resource needs correctly.  */	  if (GET_CODE (XVECEXP (pat, 0, 0)) == CALL_INSN)	    return 0;	  /* Stop for an INSN or JUMP_

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -