⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 alpha.c

📁 Mac OS X 10.4.9 for x86 Source Code gcc 实现源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
    case MINUS:      if (float_mode_p)	*total = cost_data->fp_add;      else if (GET_CODE (XEXP (x, 0)) == MULT	       && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))	{	  *total = (rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)		    + rtx_cost (XEXP (x, 1), outer_code) + COSTS_N_INSNS (1));	  return true;	}      return false;    case MULT:      if (float_mode_p)	*total = cost_data->fp_mult;      else if (mode == DImode)	*total = cost_data->int_mult_di;      else	*total = cost_data->int_mult_si;      return false;    case ASHIFT:      if (GET_CODE (XEXP (x, 1)) == CONST_INT	  && INTVAL (XEXP (x, 1)) <= 3)	{	  *total = COSTS_N_INSNS (1);	  return false;	}      /* FALLTHRU */    case ASHIFTRT:    case LSHIFTRT:      *total = cost_data->int_shift;      return false;    case IF_THEN_ELSE:      if (float_mode_p)        *total = cost_data->fp_add;      else        *total = cost_data->int_cmov;      return false;    case DIV:    case UDIV:    case MOD:    case UMOD:      if (!float_mode_p)	*total = cost_data->int_div;      else if (mode == SFmode)        *total = cost_data->fp_div_sf;      else        *total = cost_data->fp_div_df;      return false;    case MEM:      *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);      return true;    case NEG:      if (! float_mode_p)	{	  *total = COSTS_N_INSNS (1);	  return false;	}      /* FALLTHRU */    case ABS:      if (! float_mode_p)	{	  *total = COSTS_N_INSNS (1) + cost_data->int_cmov;	  return false;	}      /* FALLTHRU */    case FLOAT:    case UNSIGNED_FLOAT:    case FIX:    case UNSIGNED_FIX:    case FLOAT_EXTEND:    case FLOAT_TRUNCATE:      *total = cost_data->fp_add;      return false;    default:      return false;    }}/* REF is an alignable memory location.  Place an aligned SImode   reference into *PALIGNED_MEM and the number of bits to shift into   *PBITNUM.  SCRATCH is a free register for use in reloading out   of range stack slots.  */voidget_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum){  rtx base;  HOST_WIDE_INT offset = 0;  if (GET_CODE (ref) != MEM)    abort ();  if (reload_in_progress      && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))    {      base = find_replacement (&XEXP (ref, 0));      if (! memory_address_p (GET_MODE (ref), base))	abort ();    }  else    {      base = XEXP (ref, 0);    }  if (GET_CODE (base) == PLUS)    offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);  *paligned_mem    = widen_memory_access (ref, SImode, (offset & ~3) - offset);  if (WORDS_BIG_ENDIAN)    *pbitnum = GEN_INT (32 - (GET_MODE_BITSIZE (GET_MODE (ref))			      + (offset & 3) * 8));  else    *pbitnum = GEN_INT ((offset & 3) * 8);}/* Similar, but just get the address.  Handle the two reload cases.   Add EXTRA_OFFSET to the address we return.  */rtxget_unaligned_address (rtx ref, int extra_offset){  rtx base;  HOST_WIDE_INT offset = 0;  if (GET_CODE (ref) != MEM)    abort ();  if (reload_in_progress      && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))    {      base = find_replacement (&XEXP (ref, 0));      if (! memory_address_p (GET_MODE (ref), base))	abort ();    }  else    {      base = XEXP (ref, 0);    }  if (GET_CODE (base) == PLUS)    offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);  return plus_constant (base, offset + extra_offset);}/* On the Alpha, all (non-symbolic) constants except zero go into   a floating-point register via memory.  Note that we cannot   return anything that is not a subset of CLASS, and that some   symbolic constants cannot be dropped to memory.  */enum reg_classalpha_preferred_reload_class(rtx x, enum reg_class class){  /* Zero is present in any register class.  */  if (x == CONST0_RTX (GET_MODE (x)))    return class;  /* These sorts of constants we can easily drop to memory.  */  if (GET_CODE (x) == CONST_INT      || GET_CODE (x) == CONST_DOUBLE      || GET_CODE (x) == CONST_VECTOR)    {      if (class == FLOAT_REGS)	return NO_REGS;      if (class == ALL_REGS)	return GENERAL_REGS;      return class;    }  /* All other kinds of constants should not (and in the case of HIGH     cannot) be dropped to memory -- instead we use a GENERAL_REGS     secondary reload.  */  if (CONSTANT_P (x))    return (class == ALL_REGS ? GENERAL_REGS : class);  return class;}/* Loading and storing HImode or QImode values to and from memory   usually requires a scratch register.  The exceptions are loading   QImode and HImode from an aligned address to a general register   unless byte instructions are permitted.   We also cannot load an unaligned address or a paradoxical SUBREG   into an FP register.   We also cannot do integral arithmetic into FP regs, as might result   from register elimination into a DImode fp register.  */enum reg_classsecondary_reload_class (enum reg_class class, enum machine_mode mode,			rtx x, int in){  if ((mode == QImode || mode == HImode) && ! TARGET_BWX)    {      if (GET_CODE (x) == MEM	  || (GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER)	  || (GET_CODE (x) == SUBREG	      && (GET_CODE (SUBREG_REG (x)) == MEM		  || (GET_CODE (SUBREG_REG (x)) == REG		      && REGNO (SUBREG_REG (x)) >= FIRST_PSEUDO_REGISTER))))	{	  if (!in || !aligned_memory_operand(x, mode))	    return GENERAL_REGS;	}    }  if (class == FLOAT_REGS)    {      if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)	return GENERAL_REGS;      if (GET_CODE (x) == SUBREG	  && (GET_MODE_SIZE (GET_MODE (x))	      > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))	return GENERAL_REGS;      if (in && INTEGRAL_MODE_P (mode)	  && ! (memory_operand (x, mode) || x == const0_rtx))	return GENERAL_REGS;    }  return NO_REGS;}/* Subfunction of the following function.  Update the flags of any MEM   found in part of X.  */static intalpha_set_memflags_1 (rtx *xp, void *data){  rtx x = *xp, orig = (rtx) data;  if (GET_CODE (x) != MEM)    return 0;  MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);  MEM_IN_STRUCT_P (x) = MEM_IN_STRUCT_P (orig);  MEM_SCALAR_P (x) = MEM_SCALAR_P (orig);  MEM_NOTRAP_P (x) = MEM_NOTRAP_P (orig);  MEM_READONLY_P (x) = MEM_READONLY_P (orig);  /* Sadly, we cannot use alias sets because the extra aliasing     produced by the AND interferes.  Given that two-byte quantities     are the only thing we would be able to differentiate anyway,     there does not seem to be any point in convoluting the early     out of the alias check.  */  return -1;}/* Given INSN, which is an INSN list or the PATTERN of a single insn   generated to perform a memory operation, look for any MEMs in either   a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and   volatile flags from REF into each of the MEMs found.  If REF is not   a MEM, don't do anything.  */voidalpha_set_memflags (rtx insn, rtx ref){  rtx *base_ptr;  if (GET_CODE (ref) != MEM)    return;  /* This is only called from alpha.md, after having had something     generated from one of the insn patterns.  So if everything is     zero, the pattern is already up-to-date.  */  if (!MEM_VOLATILE_P (ref)      && !MEM_IN_STRUCT_P (ref)      && !MEM_SCALAR_P (ref)      && !MEM_NOTRAP_P (ref)      && !MEM_READONLY_P (ref))    return;  if (INSN_P (insn))    base_ptr = &PATTERN (insn);  else    base_ptr = &insn;  for_each_rtx (base_ptr, alpha_set_memflags_1, (void *) ref);}static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT,				 int, bool);/* Internal routine for alpha_emit_set_const to check for N or below insns.   If NO_OUTPUT is true, then we only check to see if N insns are possible,   and return pc_rtx if successful.  */static rtxalpha_emit_set_const_1 (rtx target, enum machine_mode mode,			HOST_WIDE_INT c, int n, bool no_output){  HOST_WIDE_INT new;  int i, bits;  /* Use a pseudo if highly optimizing and still generating RTL.  */  rtx subtarget    = (flag_expensive_optimizations && !no_new_pseudos ? 0 : target);  rtx temp, insn;  /* If this is a sign-extended 32-bit constant, we can do this in at most     three insns, so do it if we have enough insns left.  We always have     a sign-extended 32-bit constant when compiling on a narrow machine.  */  if (HOST_BITS_PER_WIDE_INT != 64      || c >> 31 == -1 || c >> 31 == 0)    {      HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;      HOST_WIDE_INT tmp1 = c - low;      HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;      HOST_WIDE_INT extra = 0;      /* If HIGH will be interpreted as negative but the constant is	 positive, we must adjust it to do two ldha insns.  */      if ((high & 0x8000) != 0 && c >= 0)	{	  extra = 0x4000;	  tmp1 -= 0x40000000;	  high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);	}      if (c == low || (low == 0 && extra == 0))	{	  /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)	     but that meant that we can't handle INT_MIN on 32-bit machines	     (like NT/Alpha), because we recurse indefinitely through	     emit_move_insn to gen_movdi.  So instead, since we know exactly	     what we want, create it explicitly.  */	  if (no_output)	    return pc_rtx;	  if (target == NULL)	    target = gen_reg_rtx (mode);	  emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));	  return target;	}      else if (n >= 2 + (extra != 0))	{	  if (no_output)	    return pc_rtx;	  if (no_new_pseudos)	    {	      emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));	      temp = target;	    }	  else	    temp = copy_to_suggested_reg (GEN_INT (high << 16),					  subtarget, mode);	  /* As of 2002-02-23, addsi3 is only available when not optimizing.	     This means that if we go through expand_binop, we'll try to	     generate extensions, etc, which will require new pseudos, which	     will fail during some split phases.  The SImode add patterns	     still exist, but are not named.  So build the insns by hand.  */	  if (extra != 0)	    {	      if (! subtarget)		subtarget = gen_reg_rtx (mode);	      insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));	      insn = gen_rtx_SET (VOIDmode, subtarget, insn);	      emit_insn (insn);	      temp = subtarget;	    }	  if (target == NULL)	    target = gen_reg_rtx (mode);	  insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));	  insn = gen_rtx_SET (VOIDmode, target, insn);	  emit_insn (insn);	  return target;	}    }  /* If we couldn't do it that way, try some other methods.  But if we have     no instructions left, don't bother.  Likewise, if this is SImode and     we can't make pseudos, we can't do anything since the expand_binop     and expand_unop calls will widen and try to make pseudos.  */  if (n == 1 || (mode == SImode && no_new_pseudos))    return 0;  /* Next, see if we can load a related constant and then shift and possibly     negate it to get the constant we want.  Try this once each increasing     numbers of insns.  */  for (i = 1; i < n; i++)    {      /* First, see if minus some low bits, we've an easy load of	 high bits.  */      new = ((c & 0xffff) ^ 0x8000) - 0x8000;      if (new != 0)	{          temp = alpha_emit_set_const (subtarget, mode, c - new, i, no_output);	  if (temp)	    {	      if (no_output)		return temp;	      return expand_binop (mode, add_optab, temp, GEN_INT (new),				   target, 0, OPTAB_WIDEN);	    }	}      /* Next try complementing.  */      temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);      if (temp)	{	  if (no_output)	    return temp;	  return expand_unop (mode, one_cmpl_optab, temp, target, 0);	}      /* Next try to form a constant and do a left shift.  We can do this	 if some low-order bits are zero; the exact_log2 call below tells	 us that information.  The bits we are shifting out could be any	 value, but here we'll just try the 0- and sign-extended forms of	 the constant.  To try to increase the chance of having the same	 constant in more than one insn, start at the highest number of	 bits to shift, but try all possibilities in case a ZAPNOT will	 be useful.  */      bits = exact_log2 (c & -c);      if (bits > 0)	for (; bits > 0; bits--)	  {	    new = c >> bits;	    temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);	    if (!temp && c < 0)	      {		new = (unsigned HOST_WIDE_INT)c >> bits;		temp = alpha_emit_set_const (subtarget, mode, new,					     i, no_output);	      }	    if (temp)	      {		if (no_output)		  return temp;	        return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),				     target, 0, OPTAB_WIDEN);	      }	  }      /* Now try high-order zero bits.  Here we try the shifted-in bits as	 all zero and all ones.  Be careful to avoid shifting outside the	 mode and to avoid shifting outside the host wide int size.  */      /* On narrow hosts, don't shift a 1 into the high bit, since we'll	 confuse the recursive call and set all of the high 32 bits.  */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -