⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 alpha.c

📁 gcc-2.95.3 Linux下最常用的C编译器
💻 C
📖 第 1 页 / 共 5 页
字号:
     int extra_offset;{  rtx base;  HOST_WIDE_INT offset = 0;  if (GET_CODE (ref) != MEM)    abort ();  if (reload_in_progress      && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))    {      base = find_replacement (&XEXP (ref, 0));      if (! memory_address_p (GET_MODE (ref), base))	abort ();    }  else    {      base = XEXP (ref, 0);    }  if (GET_CODE (base) == PLUS)    offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);  return plus_constant (base, offset + extra_offset);}/* Subfunction of the following function.  Update the flags of any MEM   found in part of X.  */static voidalpha_set_memflags_1 (x, in_struct_p, volatile_p, unchanging_p)     rtx x;     int in_struct_p, volatile_p, unchanging_p;{  int i;  switch (GET_CODE (x))    {    case SEQUENCE:    case PARALLEL:      for (i = XVECLEN (x, 0) - 1; i >= 0; i--)	alpha_set_memflags_1 (XVECEXP (x, 0, i), in_struct_p, volatile_p,			      unchanging_p);      break;    case INSN:      alpha_set_memflags_1 (PATTERN (x), in_struct_p, volatile_p,			    unchanging_p);      break;    case SET:      alpha_set_memflags_1 (SET_DEST (x), in_struct_p, volatile_p,			    unchanging_p);      alpha_set_memflags_1 (SET_SRC (x), in_struct_p, volatile_p,			    unchanging_p);      break;    case MEM:      MEM_IN_STRUCT_P (x) = in_struct_p;      MEM_VOLATILE_P (x) = volatile_p;      RTX_UNCHANGING_P (x) = unchanging_p;      /* Sadly, we cannot use alias sets because the extra aliasing	 produced by the AND interferes.  Given that two-byte quantities	 are the only thing we would be able to differentiate anyway,	 there does not seem to be any point in convoluting the early	 out of the alias check.  */      /* MEM_ALIAS_SET (x) = alias_set; */      break;    default:      break;    }}/* Given INSN, which is either an INSN or a SEQUENCE generated to   perform a memory operation, look for any MEMs in either a SET_DEST or   a SET_SRC and copy the in-struct, unchanging, and volatile flags from   REF into each of the MEMs found.  If REF is not a MEM, don't do   anything.  */voidalpha_set_memflags (insn, ref)     rtx insn;     rtx ref;{  int in_struct_p, volatile_p, unchanging_p;  if (GET_CODE (ref) != MEM)    return;  in_struct_p = MEM_IN_STRUCT_P (ref);  volatile_p = MEM_VOLATILE_P (ref);  unchanging_p = RTX_UNCHANGING_P (ref);  /* This is only called from alpha.md, after having had something      generated from one of the insn patterns.  So if everything is     zero, the pattern is already up-to-date.  */  if (! in_struct_p && ! volatile_p && ! unchanging_p)    return;  alpha_set_memflags_1 (insn, in_struct_p, volatile_p, unchanging_p);}/* Try to output insns to set TARGET equal to the constant C if it can be   done in less than N insns.  Do all computations in MODE.  Returns the place   where the output has been placed if it can be done and the insns have been   emitted.  If it would take more than N insns, zero is returned and no   insns and emitted.  */rtxalpha_emit_set_const (target, mode, c, n)     rtx target;     enum machine_mode mode;     HOST_WIDE_INT c;     int n;{  rtx pat;  int i;  /* Try 1 insn, then 2, then up to N. */  for (i = 1; i <= n; i++)    if ((pat = alpha_emit_set_const_1 (target, mode, c, i)) != 0)      return pat;  return 0;}/* Internal routine for the above to check for N or below insns.  */static rtxalpha_emit_set_const_1 (target, mode, c, n)     rtx target;     enum machine_mode mode;     HOST_WIDE_INT c;     int n;{  HOST_WIDE_INT new = c;  int i, bits;  /* Use a pseudo if highly optimizing and still generating RTL.  */  rtx subtarget    = (flag_expensive_optimizations && rtx_equal_function_value_matters       ? 0 : target);  rtx temp;#if HOST_BITS_PER_WIDE_INT == 64  /* We are only called for SImode and DImode.  If this is SImode, ensure that     we are sign extended to a full word.  This does not make any sense when     cross-compiling on a narrow machine.  */  if (mode == SImode)    c = (c & 0xffffffff) - 2 * (c & 0x80000000);#endif  /* If this is a sign-extended 32-bit constant, we can do this in at most     three insns, so do it if we have enough insns left.  We always have     a sign-extended 32-bit constant when compiling on a narrow machine.   */  if (HOST_BITS_PER_WIDE_INT != 64      || c >> 31 == -1 || c >> 31 == 0)    {      HOST_WIDE_INT low = (c & 0xffff) - 2 * (c & 0x8000);      HOST_WIDE_INT tmp1 = c - low;      HOST_WIDE_INT high	= ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);      HOST_WIDE_INT extra = 0;      /* If HIGH will be interpreted as negative but the constant is	 positive, we must adjust it to do two ldha insns.  */      if ((high & 0x8000) != 0 && c >= 0)	{	  extra = 0x4000;	  tmp1 -= 0x40000000;	  high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);	}      if (c == low || (low == 0 && extra == 0))	{	  /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)	     but that meant that we can't handle INT_MIN on 32-bit machines	     (like NT/Alpha), because we recurse indefinitely through 	     emit_move_insn to gen_movdi.  So instead, since we know exactly	     what we want, create it explicitly.  */	  if (target == NULL)	    target = gen_reg_rtx (mode);	  emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));	  return target;	}      else if (n >= 2 + (extra != 0))	{	  temp = copy_to_suggested_reg (GEN_INT (low), subtarget, mode);	  if (extra != 0)	    temp = expand_binop (mode, add_optab, temp, GEN_INT (extra << 16),				 subtarget, 0, OPTAB_WIDEN);	  return expand_binop (mode, add_optab, temp, GEN_INT (high << 16),			       target, 0, OPTAB_WIDEN);	}    }  /* If we couldn't do it that way, try some other methods.  But if we have     no instructions left, don't bother.  Likewise, if this is SImode and     we can't make pseudos, we can't do anything since the expand_binop     and expand_unop calls will widen and try to make pseudos.  */  if (n == 1      || (mode == SImode && ! rtx_equal_function_value_matters))    return 0;#if HOST_BITS_PER_WIDE_INT == 64  /* First, see if can load a value into the target that is the same as the     constant except that all bytes that are 0 are changed to be 0xff.  If we     can, then we can do a ZAPNOT to obtain the desired constant.  */  for (i = 0; i < 64; i += 8)    if ((new & ((HOST_WIDE_INT) 0xff << i)) == 0)      new |= (HOST_WIDE_INT) 0xff << i;  /* We are only called for SImode and DImode.  If this is SImode, ensure that     we are sign extended to a full word.  */  if (mode == SImode)    new = (new & 0xffffffff) - 2 * (new & 0x80000000);  if (new != c      && (temp = alpha_emit_set_const (subtarget, mode, new, n - 1)) != 0)    return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new),			 target, 0, OPTAB_WIDEN);#endif  /* Next, see if we can load a related constant and then shift and possibly     negate it to get the constant we want.  Try this once each increasing     numbers of insns.  */  for (i = 1; i < n; i++)    {      /* First try complementing.  */      if ((temp = alpha_emit_set_const (subtarget, mode, ~ c, i)) != 0)	return expand_unop (mode, one_cmpl_optab, temp, target, 0);      /* Next try to form a constant and do a left shift.  We can do this	 if some low-order bits are zero; the exact_log2 call below tells	 us that information.  The bits we are shifting out could be any	 value, but here we'll just try the 0- and sign-extended forms of	 the constant.  To try to increase the chance of having the same	 constant in more than one insn, start at the highest number of	 bits to shift, but try all possibilities in case a ZAPNOT will	 be useful.  */      if ((bits = exact_log2 (c & - c)) > 0)	for (; bits > 0; bits--)	  if ((temp = (alpha_emit_set_const		       (subtarget, mode,			(unsigned HOST_WIDE_INT) (c >> bits), i))) != 0	      || ((temp = (alpha_emit_set_const			  (subtarget, mode,			   ((unsigned HOST_WIDE_INT) c) >> bits, i)))		  != 0))	    return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),				 target, 0, OPTAB_WIDEN);      /* Now try high-order zero bits.  Here we try the shifted-in bits as	 all zero and all ones.  Be careful to avoid shifting outside the	 mode and to avoid shifting outside the host wide int size.  */      /* On narrow hosts, don't shift a 1 into the high bit, since we'll	 confuse the recursive call and set all of the high 32 bits.  */      if ((bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)		   - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64))) > 0)	for (; bits > 0; bits--)	  if ((temp = alpha_emit_set_const (subtarget, mode,					    c << bits, i)) != 0	      || ((temp = (alpha_emit_set_const			   (subtarget, mode,			    ((c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1)),			    i)))		  != 0))	    return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),				 target, 1, OPTAB_WIDEN);      /* Now try high-order 1 bits.  We get that with a sign-extension.	 But one bit isn't enough here.  Be careful to avoid shifting outside	 the mode and to avoid shifting outside the host wide int size. */            if ((bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)		   - floor_log2 (~ c) - 2)) > 0)	for (; bits > 0; bits--)	  if ((temp = alpha_emit_set_const (subtarget, mode,					    c << bits, i)) != 0	      || ((temp = (alpha_emit_set_const			   (subtarget, mode,			    ((c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1)),			    i)))		  != 0))	    return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),				 target, 0, OPTAB_WIDEN);    }  return 0;}/* Having failed to find a 3 insn sequence in alpha_emit_set_const,   fall back to a straight forward decomposition.  We do this to avoid   exponential run times encountered when looking for longer sequences   with alpha_emit_set_const.  */rtxalpha_emit_set_long_const (target, c1, c2)     rtx target;     HOST_WIDE_INT c1, c2;{  HOST_WIDE_INT d1, d2, d3, d4;  /* Decompose the entire word */#if HOST_BITS_PER_WIDE_INT >= 64  if (c2 != -(c1 < 0))    abort ();  d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;  c1 -= d1;  d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;  c1 = (c1 - d2) >> 32;  d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;  c1 -= d3;  d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;  if (c1 != d4)    abort ();#else  d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;  c1 -= d1;  d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;  if (c1 != d2)    abort ();  c2 += (d2 < 0);  d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;  c2 -= d3;  d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;  if (c2 != d4)    abort ();#endif  /* Construct the high word */  if (d4)    {      emit_move_insn (target, GEN_INT (d4));      if (d3)	emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));    }  else    emit_move_insn (target, GEN_INT (d3));  /* Shift it into place */  emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));  /* Add in the low bits.  */  if (d2)    emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));  if (d1)    emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));  return target;}/* Generate the comparison for a conditional branch.  */rtxalpha_emit_conditional_branch (code)     enum rtx_code code;{  enum rtx_code cmp_code, branch_code;  enum machine_mode cmp_mode, branch_mode = VOIDmode;  rtx op0 = alpha_compare_op0, op1 = alpha_compare_op1;  rtx tem;  /* The general case: fold the comparison code to the types of compares     that we have, choosing the branch as necessary.  */  switch (code)    {    case EQ:  case LE:  case LT:  case LEU:  case LTU:      /* We have these compares: */      cmp_code = code, branch_code = NE;      break;    case NE:      /* This must be reversed. */      cmp_code = EQ, branch_code = EQ;      break;    case GE:  case GT: case GEU:  case GTU:      /* For FP, we swap them, for INT, we reverse them.  */      if (alpha_compare_fp_p)	{	  cmp_code = swap_condition (code);	  branch_code = NE;	  tem = op0, op0 = op1, op1 = tem;	}      else	{	  cmp_code = reverse_condition (code);	  branch_code = EQ;	}      break;    default:      abort ();    }  if (alpha_compare_fp_p)    {      cmp_mode = DFmode;      if (flag_fast_math)	{	  /* When we are not as concerned about non-finite values, and we	     are comparing against zero, we can branch directly.  */	  if (op1 == CONST0_RTX (DFmode))	    cmp_code = NIL, branch_code = code;	  else if (op0 == CONST0_RTX (DFmode))	    {	      /* Undo the swap we probably did just above.  */	      tem = op0, op0 = op1, op1 = tem;	      branch_code = swap_condition (cmp_code);	      cmp_code = NIL;	    }	}      else	{	  /* ??? We mark the the branch mode to be CCmode to prevent the	     compare and branch from being combined, since the compare 	     insn follows IEEE rules that the branch does not.  */	  branch_mode = CCmode;	}    }  else    {      cmp_mode = DImode;      /* The following optimizations are only for signed compares.  */      if (code != LEU && code != LTU && code != GEU && code != GTU)	{	  /* Whee.  Compare and branch against 0 directly.  */	  if (op1 == const0_rtx)	    cmp_code = NIL, branch_code = code;	  /* We want to use cmpcc/bcc when we can, since there is a zero delay	     bypass between logicals and br/cmov on EV5.  But we don't want to	     force valid immediate constants into registers needlessly.  */	  else if (GET_CODE (op1) == CONST_INT)	    {	      HOST_WIDE_INT v = INTVAL (op1), n = -v;	      if (! CONST_OK_FOR_LETTER_P (v, 'I')		  && (CONST_OK_FOR_LETTER_P (n, 'K')		      || CONST_OK_FOR_LETTER_P (n, 'L')))		{		  cmp_code = PLUS, branch_code = code;		  op1 = GEN_INT (n);		}	    }	}    }  /* Force op0 into a register.  */  if (GET_CODE (op0) != REG)    op0 = force_reg (cmp_mode, op0);  /* Emit an initial compare instruction, if necessary.  */  tem = op0;  if (cmp_code != NIL)    {      tem = gen_reg_rtx (cmp_mode);      emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));    }  /* Return the branch comparison.  */  return gen_rtx_fmt_ee (branch_code, branch_mode, tem, CONST0_RTX (cmp_mode));}/* Rewrite a comparison against zero CMP of the form   (CODE (cc0) (const_int 0)) so it can be written validly in   a conditional move (if_then_else CMP ...).   If both of the operands that set cc0 are non-zero we must emit   an insn to perform the compare (it can't be done within   the conditional move). */rtx

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -