⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sh.c

📁 GCC编译器源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
}/* Find the last barrier from insn FROM which is close enough to hold the   constant pool.  If we can't find one, then create one near the end of   the range.  */static rtxfind_barrier (num_mova, mova, from)     int num_mova;     rtx mova, from;{  int count_si = 0;  int count_hi = 0;  int found_hi = 0;  int found_si = 0;  int leading_mova = num_mova;  rtx barrier_before_mova, found_barrier = 0, good_barrier = 0;  int si_limit;  int hi_limit;  /* For HImode: range is 510, add 4 because pc counts from address of     second instruction after this one, subtract 2 for the jump instruction     that we may need to emit before the table, subtract 2 for the instruction     that fills the jump delay slot (in very rare cases, reorg will take an     instruction from after the constant pool or will leave the delay slot     empty).  This gives 510.     For SImode: range is 1020, add 4 because pc counts from address of     second instruction after this one, subtract 2 in case pc is 2 byte     aligned, subtract 2 for the jump instruction that we may need to emit     before the table, subtract 2 for the instruction that fills the jump     delay slot.  This gives 1018.  */  /* The branch will always be shortened now that the reference address for     forward branches is the successor address, thus we need no longer make     adjustments to the [sh]i_limit for -O0.  */  si_limit = 1018;  hi_limit = 510;  while (from && count_si < si_limit && count_hi < hi_limit)    {      int inc = 0;      /* The instructions created by fixup_addr_diff_vecs have no valid length       info yet.  They should be considered to have zero at this point.  */      if (INSN_UID (from) < max_uid_before_fixup_addr_diff_vecs)	inc = get_attr_length (from);      if (GET_CODE (from) == BARRIER)	{	  found_barrier = from;	  /* If we are at the end of the function, or in front of an alignment	     instruction, we need not insert an extra alignment.  We prefer	     this kind of barrier.  */		  if (cache_align_p (next_real_insn (found_barrier)))	    good_barrier = from;	}      if (broken_move (from))	{	  rtx pat, src, dst;	  enum machine_mode mode;	  pat = PATTERN (from);	  if (GET_CODE (pat) == PARALLEL)	    pat = XVECEXP (pat, 0, 0);	  src = SET_SRC (pat);	  dst = SET_DEST (pat);	  mode = GET_MODE (dst);	  /* We must explicitly check the mode, because sometimes the	     front end will generate code to load unsigned constants into	     HImode targets without properly sign extending them.  */	  if (mode == HImode || (mode == SImode && hi_const (src)))	    {	      found_hi += 2;	      /* We put the short constants before the long constants, so		 we must count the length of short constants in the range		 for the long constants.  */	      /* ??? This isn't optimal, but is easy to do.  */	      si_limit -= 2;	    }	  else	    {	      if (found_si > count_si)		count_si = found_si;	      found_si += GET_MODE_SIZE (mode);	      if (num_mova)		si_limit -= GET_MODE_SIZE (mode);	    }	}      if (GET_CODE (from) == INSN	  && GET_CODE (PATTERN (from)) == SET	  && GET_CODE (SET_SRC (PATTERN (from))) == UNSPEC	  && XINT (SET_SRC (PATTERN (from)), 1) == 1)	{	  if (! num_mova++)	    {	      leading_mova = 0;	      mova = from;	      barrier_before_mova = good_barrier ? good_barrier : found_barrier;	    }	  if (found_si > count_si)	    count_si = found_si;	}      else if (GET_CODE (from) == JUMP_INSN	       && (GET_CODE (PATTERN (from)) == ADDR_VEC		   || GET_CODE (PATTERN (from)) == ADDR_DIFF_VEC))	{	  if (num_mova)	    num_mova--;	  if (cache_align_p (NEXT_INSN (next_nonnote_insn (from))))	    {	      /* We have just passed the barrier in front front of the		 ADDR_DIFF_VEC.  Since the ADDR_DIFF_VEC is accessed		 as data, just like our pool constants, this is a good		 opportunity to accommodate what we have gathered so far.		 If we waited any longer, we could end up at a barrier in		 front of code, which gives worse cache usage for separated		 instruction / data caches.  */	      good_barrier = found_barrier;	      break;	    }	}      if (found_si)	count_si += inc;      if (found_hi)	count_hi += inc;      from = NEXT_INSN (from);    }  if (num_mova)    if (leading_mova)      {	/* Try as we might, the leading mova is out of range.  Change	   it into a load (which will become a pcload) and retry.  */	SET_SRC (PATTERN (mova)) = XVECEXP (SET_SRC (PATTERN (mova)), 0, 0);	INSN_CODE (mova) = -1;        return find_barrier (0, 0, mova);      }    else      {	/* Insert the constant pool table before the mova instruction,	   to prevent the mova label reference from going out of range.  */	from = mova;	good_barrier = found_barrier = barrier_before_mova;      }  if (found_barrier)    {      /* We have before prepared barriers to come in pairs, with an	 alignment instruction in-between.  We want to use the first	 barrier, so that the alignment applies to the code.	 If we are compiling for SH3 or newer, there are some exceptions	 when the second barrier and the alignment doesn't exist yet, so	 we have to add it.  */      if (good_barrier)	found_barrier = good_barrier;      else if (! TARGET_SMALLCODE)	{	  found_barrier	    = emit_insn_before (gen_align_log (GEN_INT (CACHE_LOG)),				found_barrier);	  found_barrier = emit_barrier_before (found_barrier);	}    }  else    {      /* We didn't find a barrier in time to dump our stuff,	 so we'll make one.  */      rtx label = gen_label_rtx ();      /* If we exceeded the range, then we must back up over the last	 instruction we looked at.  Otherwise, we just need to undo the	 NEXT_INSN at the end of the loop.  */      if (count_hi > hi_limit || count_si > si_limit)	from = PREV_INSN (PREV_INSN (from));      else	from = PREV_INSN (from);      /* Walk back to be just before any jump or label.	 Putting it before a label reduces the number of times the branch	 around the constant pool table will be hit.  Putting it before	 a jump makes it more likely that the bra delay slot will be	 filled.  */      while (GET_CODE (from) == JUMP_INSN || GET_CODE (from) == NOTE	     || GET_CODE (from) == CODE_LABEL)	from = PREV_INSN (from);      from = emit_jump_insn_after (gen_jump (label), from);      JUMP_LABEL (from) = label;      LABEL_NUSES (label) = 1;      found_barrier = emit_barrier_after (from);      emit_label_after (label, found_barrier);      if (! TARGET_SMALLCODE)	{	  emit_barrier_after (found_barrier);	  emit_insn_after (gen_align_log (GEN_INT (CACHE_LOG)), found_barrier);	}    }  return found_barrier;}/* If the instruction INSN is implemented by a special function, and we can   positively find the register that is used to call the sfunc, and this   register is not used anywhere else in this instruction - except as the   destination of a set, return this register; else, return 0.  */rtxsfunc_uses_reg (insn)     rtx insn;{  int i;  rtx pattern, part, reg_part, reg;  if (GET_CODE (insn) != INSN)    return 0;  pattern = PATTERN (insn);  if (GET_CODE (pattern) != PARALLEL || get_attr_type (insn) != TYPE_SFUNC)    return 0;  for (reg_part = 0, i = XVECLEN (pattern, 0) - 1; i >= 1; i--)    {      part = XVECEXP (pattern, 0, i);      if (GET_CODE (part) == USE && GET_MODE (XEXP (part, 0)) == SImode)	reg_part = part;    }  if (! reg_part)    return 0;  reg = XEXP (reg_part, 0);  for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)    {      part = XVECEXP (pattern, 0, i);      if (part == reg_part)	continue;      if (reg_mentioned_p (reg, ((GET_CODE (part) == SET				  && GET_CODE (SET_DEST (part)) == REG)				 ? SET_SRC (part) : part)))	return 0;    }  return reg;}/* See if the only way in which INSN uses REG is by calling it, or by   setting it while calling it.  Set *SET to a SET rtx if the register   is set by INSN.  */static intnoncall_uses_reg (reg, insn, set)     rtx reg;     rtx insn;     rtx *set;{  rtx pattern, reg2;  *set = NULL_RTX;  reg2 = sfunc_uses_reg (insn);  if (reg2 && REGNO (reg2) == REGNO (reg))    {      pattern = single_set (insn);      if (pattern	  && GET_CODE (SET_DEST (pattern)) == REG	  && REGNO (reg) == REGNO (SET_DEST (pattern)))	*set = pattern;      return 0;    }  if (GET_CODE (insn) != CALL_INSN)    {      /* We don't use rtx_equal_p because we don't care if the mode is	 different.  */      pattern = single_set (insn);      if (pattern	  && GET_CODE (SET_DEST (pattern)) == REG	  && REGNO (reg) == REGNO (SET_DEST (pattern)))	{	  rtx par, part;	  int i;	  *set = pattern;	  par = PATTERN (insn);	  if (GET_CODE (par) == PARALLEL)	    for (i = XVECLEN (par, 0) - 1; i >= 0; i--)	      {		part = XVECEXP (par, 0, i);		if (GET_CODE (part) != SET && reg_mentioned_p (reg, part))		  return 1;	      }	  return reg_mentioned_p (reg, SET_SRC (pattern));	}      return 1;    }  pattern = PATTERN (insn);  if (GET_CODE (pattern) == PARALLEL)    {      int i;      for (i = XVECLEN (pattern, 0) - 1; i >= 1; i--)	if (reg_mentioned_p (reg, XVECEXP (pattern, 0, i)))	  return 1;      pattern = XVECEXP (pattern, 0, 0);    }  if (GET_CODE (pattern) == SET)    {      if (reg_mentioned_p (reg, SET_DEST (pattern)))	{	  /* We don't use rtx_equal_p, because we don't care if the             mode is different.  */	  if (GET_CODE (SET_DEST (pattern)) != REG	      || REGNO (reg) != REGNO (SET_DEST (pattern)))	    return 1;	  *set = pattern;	}      pattern = SET_SRC (pattern);    }  if (GET_CODE (pattern) != CALL      || GET_CODE (XEXP (pattern, 0)) != MEM      || ! rtx_equal_p (reg, XEXP (XEXP (pattern, 0), 0)))    return 1;  return 0;}/* Given a X, a pattern of an insn or a part of it, return a mask of used   general registers.  Bits 0..15 mean that the respective registers   are used as inputs in the instruction.  Bits 16..31 mean that the   registers 0..15, respectively, are used as outputs, or are clobbered.   IS_DEST should be set to 16 if X is the destination of a SET, else to 0.  */intregs_used (x, is_dest)     rtx x; int is_dest;{  enum rtx_code code;  char *fmt;  int i, used = 0;  if (! x)    return used;  code = GET_CODE (x);  switch (code)    {    case REG:      if (REGNO (x) < 16)	return (((1 << HARD_REGNO_NREGS (0, GET_MODE (x))) - 1)		<< (REGNO (x) + is_dest));      return 0;    case SUBREG:      {	rtx y = SUBREG_REG (x);     	if (GET_CODE (y) != REG)	  break;	if (REGNO (y) < 16)	  return (((1 << HARD_REGNO_NREGS (0, GET_MODE (x))) - 1)		  << (REGNO (y) + SUBREG_WORD (x) + is_dest));	return 0;      }    case SET:      return regs_used (SET_SRC (x), 0) | regs_used (SET_DEST (x), 16);    case RETURN:      /* If there was a return value, it must have been indicated with USE.  */      return 0x00ffff00;    case CLOBBER:      is_dest = 1;      break;    case MEM:      is_dest = 0;      break;    case CALL:      used |= 0x00ff00f0;      break;    }  fmt = GET_RTX_FORMAT (code);  for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)    {      if (fmt[i] == 'E')	{	  register int j;	  for (j = XVECLEN (x, i) - 1; j >= 0; j--)	    used |= regs_used (XVECEXP (x, i, j), is_dest);	}      else if (fmt[i] == 'e')	used |= regs_used (XEXP (x, i), is_dest);    }  return used;}/* Create an instruction that prevents redirection of a conditional branch   to the destination of the JUMP with address ADDR.   If the branch needs to be implemented as an indirect jump, try to find   a scratch register for it.   If NEED_BLOCK is 0, don't do anything unless we need a scratch register.   If any preceding insn that doesn't fit into a delay slot is good enough,   pass 1.  Pass 2 if a definite blocking insn is needed.   -1 is used internally to avoid deep recursion.   If a blocking instruction is made or recognized, return it.  */   static rtxgen_block_redirect (jump, addr, need_block)     rtx jump;     int addr, need_block;{  int dead = 0;  rtx prev = prev_nonnote_insn (jump);  rtx dest;  /* First, check if we already have an instruction that satisfies our need.  */  if (prev && GET_CODE (prev) == INSN && ! INSN_DELETED_P (prev))    {      if (INSN_CODE (prev) == CODE_FOR_indirect_jump_scratch)	return prev;      if (GET_CODE (PATTERN (prev)) == USE	  || GET_CODE (PATTERN (prev)) == CLOBBER	  || get_attr_in_delay_slot (prev) == IN_DELAY_SLOT_YES)	prev = jump;      else if ((need_block &= ~1) < 0)	return prev;      else if (recog_memoized (prev) == CODE_FOR_block_branch_redirect)	need_block = 0;    }  /* We can't use JUMP_LABEL here because it might be undefined     when not optimizing.  */  dest = XEXP (SET_SRC (PATTERN (jump)), 0);  /* If the branch is out of range, try to find a scratch register for it.  */  if (optimize      && (insn_addresses[INSN_UID (dest)] - addr + 4092U > 4092 + 4098))    {      rtx scan;      /* Don't look for the stack pointer as a scratch register,	 it would cause trouble if an interrupt occurred.  */      unsigned try = 0x7fff, used;      int jump_left = flag_expensive_optimizations + 1;          /* It is likely that the most recent eligible instruction is wanted for	 the delay slot.  Therefore, find out which registers it uses, and	 try to avoid using them.  */	       for (scan = jump; scan = PREV_INSN (scan); )	{	  enum rtx_code code;	  if (INSN_DELETED_P (scan))	    continue;	  code = GET_CODE (scan);	  if (code == CODE_LABEL || code == JUMP_INSN)	    break;	  if (code == INSN	      && GET_CODE (PATTERN (scan)) != USE	      && GET_CODE (PATTERN (scan)) != CLOBBER	      && get_attr_in_delay_slot (scan) == IN_DELAY_SLOT_YES)	    {	      try &= ~regs_used (PATTERN (scan), 0);	      break;	    }	}      for (used = dead = 0, scan = 

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -