⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 alpha.c

📁 GCC编译器源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
     and expand_unop calls will widen and try to make pseudos.  */  if (n == 1      || (mode == SImode && ! rtx_equal_function_value_matters))    return 0;#if HOST_BITS_PER_WIDE_INT == 64  /* First, see if can load a value into the target that is the same as the     constant except that all bytes that are 0 are changed to be 0xff.  If we     can, then we can do a ZAPNOT to obtain the desired constant.  */  for (i = 0; i < 64; i += 8)    if ((new & ((HOST_WIDE_INT) 0xff << i)) == 0)      new |= (HOST_WIDE_INT) 0xff << i;  /* We are only called for SImode and DImode.  If this is SImode, ensure that     we are sign extended to a full word.  */  if (mode == SImode)    new = (new & 0xffffffff) - 2 * (new & 0x80000000);  if (new != c      && (temp = alpha_emit_set_const (subtarget, mode, new, n - 1)) != 0)    return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new),			 target, 0, OPTAB_WIDEN);#endif  /* Next, see if we can load a related constant and then shift and possibly     negate it to get the constant we want.  Try this once each increasing     numbers of insns.  */  for (i = 1; i < n; i++)    {      /* First try complementing.  */      if ((temp = alpha_emit_set_const (subtarget, mode, ~ c, i)) != 0)	return expand_unop (mode, one_cmpl_optab, temp, target, 0);      /* Next try to form a constant and do a left shift.  We can do this	 if some low-order bits are zero; the exact_log2 call below tells	 us that information.  The bits we are shifting out could be any	 value, but here we'll just try the 0- and sign-extended forms of	 the constant.  To try to increase the chance of having the same	 constant in more than one insn, start at the highest number of	 bits to shift, but try all possibilities in case a ZAPNOT will	 be useful.  */      if ((bits = exact_log2 (c & - c)) > 0)	for (; bits > 0; bits--)	  if ((temp = (alpha_emit_set_const		       (subtarget, mode,			(unsigned HOST_WIDE_INT) c >> bits, i))) != 0	      || ((temp = (alpha_emit_set_const			  (subtarget, mode,			   ((unsigned HOST_WIDE_INT) c) >> bits, i)))		  != 0))	    return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),				 target, 0, OPTAB_WIDEN);      /* Now try high-order zero bits.  Here we try the shifted-in bits as	 all zero and all ones.  Be careful to avoid shifting outside the	 mode and to avoid shifting outside the host wide int size.  */      if ((bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)		   - floor_log2 (c) - 1)) > 0)	for (; bits > 0; bits--)	  if ((temp = alpha_emit_set_const (subtarget, mode,					    c << bits, i)) != 0	      || ((temp = (alpha_emit_set_const			   (subtarget, mode,			    ((c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1)),			    i)))		  != 0))	    return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),				 target, 1, OPTAB_WIDEN);      /* Now try high-order 1 bits.  We get that with a sign-extension.	 But one bit isn't enough here.  Be careful to avoid shifting outside	 the mode and to avoid shifting outside the host wide int size. */            if ((bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)		   - floor_log2 (~ c) - 2)) > 0)	for (; bits > 0; bits--)	  if ((temp = alpha_emit_set_const (subtarget, mode,					    c << bits, i)) != 0	      || ((temp = (alpha_emit_set_const			   (subtarget, mode,			    ((c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1)),			    i)))		  != 0))	    return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),				 target, 0, OPTAB_WIDEN);    }  return 0;}#if HOST_BITS_PER_WIDE_INT == 64/* Having failed to find a 3 insn sequence in alpha_emit_set_const,   fall back to a straight forward decomposition.  We do this to avoid   exponential run times encountered when looking for longer sequences   with alpha_emit_set_const.  */rtxalpha_emit_set_long_const (target, c)     rtx target;     HOST_WIDE_INT c;{  /* Use a pseudo if highly optimizing and still generating RTL.  */  rtx subtarget    = (flag_expensive_optimizations && rtx_equal_function_value_matters       ? 0 : target);  HOST_WIDE_INT d1, d2, d3, d4;  rtx r1, r2;  /* Decompose the entire word */  d1 = ((c & 0xffff) ^ 0x8000) - 0x8000;  c -= d1;  d2 = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;  c = (c - d2) >> 32;  d3 = ((c & 0xffff) ^ 0x8000) - 0x8000;  c -= d3;  d4 = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;  if (c - d4 != 0)    abort();  /* Construct the high word */  if (d3 == 0)    r1 = copy_to_suggested_reg (GEN_INT (d4), subtarget, DImode);  else if (d4 == 0)    r1 = copy_to_suggested_reg (GEN_INT (d3), subtarget, DImode);  else    r1 = expand_binop (DImode, add_optab, GEN_INT (d3), GEN_INT (d4),		       subtarget, 0, OPTAB_WIDEN);  /* Shift it into place */  r2 = expand_binop (DImode, ashl_optab, r1, GEN_INT (32), 		     subtarget, 0, OPTAB_WIDEN);  if (subtarget == 0 && d1 == d3 && d2 == d4)    r1 = expand_binop (DImode, add_optab, r1, r2, subtarget, 0, OPTAB_WIDEN);  else    {      r1 = r2;      /* Add in the low word */      if (d2 != 0)	r1 = expand_binop (DImode, add_optab, r1, GEN_INT (d2),		           subtarget, 0, OPTAB_WIDEN);      if (d1 != 0)	r1 = expand_binop (DImode, add_optab, r1, GEN_INT (d1),		           subtarget, 0, OPTAB_WIDEN);    }  if (subtarget == 0)    r1 = copy_to_suggested_reg(r1, target, DImode);  return r1;}#endif /* HOST_BITS_PER_WIDE_INT == 64 *//* Rewrite a comparison against zero CMP of the form   (CODE (cc0) (const_int 0)) so it can be written validly in   a conditional move (if_then_else CMP ...).   If both of the operands that set cc0 are non-zero we must emit   an insn to perform the compare (it can't be done within   the conditional move). */rtxalpha_emit_conditional_move (cmp, mode)     rtx cmp;     enum machine_mode mode;{  enum rtx_code code = GET_CODE (cmp);  enum rtx_code cmov_code = NE;  rtx op0 = alpha_compare_op0;  rtx op1 = alpha_compare_op1;  enum machine_mode cmp_mode    = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));  enum machine_mode cmp_op_mode = alpha_compare_fp_p ? DFmode : DImode;  rtx tem;  if (alpha_compare_fp_p != FLOAT_MODE_P (mode))    return 0;  /* We may be able to use a conditional move directly.     This avoids emitting spurious compares. */  if (signed_comparison_operator (cmp, cmp_op_mode)      && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))    return gen_rtx (code, VOIDmode, op0, op1);  /* We can't put the comparison insides a conditional move;     emit a compare instruction and put that inside the     conditional move.  Make sure we emit only comparisons we have;     swap or reverse as necessary.  */  switch (code)    {    case EQ:  case LE:  case LT:  case LEU:  case LTU:      /* We have these compares: */      break;    case NE:      /* This must be reversed. */      code = reverse_condition (code);      cmov_code = EQ;      break;    case GE:  case GT:  case GEU:  case GTU:      /* These must be swapped.  Make sure the new first operand is in	 a register.  */      code = swap_condition (code);      tem = op0, op0 = op1, op1 = tem;      op0 = force_reg (cmp_mode, op0);      break;    default:      abort ();    }  tem = gen_reg_rtx (cmp_op_mode);  emit_move_insn (tem, gen_rtx (code, cmp_op_mode, op0, op1));  return gen_rtx (cmov_code, VOIDmode, tem, CONST0_RTX (cmp_op_mode));}/* Adjust the cost of a scheduling dependency.  Return the new cost of   a dependency LINK or INSN on DEP_INSN.  COST is the current cost.  */intalpha_adjust_cost (insn, link, dep_insn, cost)     rtx insn;     rtx link;     rtx dep_insn;     int cost;{  rtx set;  /* If the dependence is an anti-dependence, there is no cost.  For an     output dependence, there is sometimes a cost, but it doesn't seem     worth handling those few cases.  */  if (REG_NOTE_KIND (link) != 0)    return 0;  /* EV5 costs are as given in alpha.md; exceptions are given here. */  if (alpha_cpu == PROCESSOR_EV5)    {      /* And the lord DEC saith:  "A special bypass provides an effective	 latency of 0 cycles for an ICMP or ILOG insn producing the test	 operand of an IBR or CMOV insn." */      if (recog_memoized (dep_insn) >= 0	  && (get_attr_type (dep_insn) == TYPE_ICMP	      || get_attr_type (dep_insn) == TYPE_ILOG)	  && recog_memoized (insn) >= 0	  && (get_attr_type (insn) == TYPE_IBR	      || (get_attr_type (insn) == TYPE_CMOV		  && !((set = single_set (dep_insn)) != 0		       && GET_CODE (PATTERN (insn)) == SET		       && GET_CODE (SET_SRC (PATTERN (insn))) == IF_THEN_ELSE		       && (rtx_equal_p (SET_DEST (set),					XEXP (SET_SRC (PATTERN (insn)), 1))			   || rtx_equal_p (SET_DEST (set),					   XEXP (SET_SRC (PATTERN (insn)), 2)))))))	return 1;      return cost;    }   /* If INSN is a store insn and DEP_INSN is setting the data being stored,     we can sometimes lower the cost.  */  if (recog_memoized (insn) >= 0 && get_attr_type (insn) == TYPE_ST      && (set = single_set (dep_insn)) != 0      && GET_CODE (PATTERN (insn)) == SET      && rtx_equal_p (SET_DEST (set), SET_SRC (PATTERN (insn))))    switch (get_attr_type (dep_insn))      {      case TYPE_LD:	/* No savings here.  */	return cost;      case TYPE_IMULL:      case TYPE_IMULQ:	/* In these cases, we save one cycle.  */	return cost - 2;      default:	/* In all other cases, we save two cycles.  */	return MAX (0, cost - 4);      }  /* Another case that needs adjustment is an arithmetic or logical     operation.  It's cost is usually one cycle, but we default it to     two in the MD file.  The only case that it is actually two is     for the address in loads and stores.  */  if (recog_memoized (dep_insn) >= 0      && (get_attr_type (dep_insn) == TYPE_IADD	  || get_attr_type (dep_insn) == TYPE_ILOG))    switch (get_attr_type (insn))      {      case TYPE_LD:      case TYPE_ST:	return cost;      default:	return 2;      }  /* The final case is when a compare feeds into an integer branch.  The cost     is only one cycle in that case.  */  if (recog_memoized (dep_insn) >= 0      && get_attr_type (dep_insn) == TYPE_ICMP      && recog_memoized (insn) >= 0      && get_attr_type (insn) == TYPE_IBR)    return 2;  /* Otherwise, return the default cost. */  return cost;}/* Print an operand.  Recognize special options, documented below.  */voidprint_operand (file, x, code)    FILE *file;    rtx x;    char code;{  int i;  switch (code)    {    case '&':      /* Generates fp-rounding mode suffix: nothing for normal, 'c' for	 chopped, 'm' for minus-infinity, and 'd' for dynamic rounding	 mode.  alpha_fprm controls which suffix is generated.  */      switch (alpha_fprm)	{	case ALPHA_FPRM_NORM:	  break;	case ALPHA_FPRM_MINF: 	  fputc ('m', file);	  break;	case ALPHA_FPRM_CHOP:	  fputc ('c', file);	  break;	case ALPHA_FPRM_DYN:	  fputc ('d', file);	  break;	}      break;    case '\'':      /* Generates trap-mode suffix for instructions that accept the su	 suffix only (cmpt et al).  */      if (alpha_tp == ALPHA_TP_INSN)	fputs ("su", file);      break;    case ')':      /* Generates trap-mode suffix for instructions that accept the u, su,	 and sui suffix.  This is the bulk of the IEEE floating point	 instructions (addt et al).  */      switch (alpha_fptm)	{	case ALPHA_FPTM_N:	  break;	case ALPHA_FPTM_U:	  fputc ('u', file);	  break;	case ALPHA_FPTM_SU:	  fputs ("su", file);	  break;	case ALPHA_FPTM_SUI:	  fputs ("sui", file);	  break;	}      break;    case '+':      /* Generates trap-mode suffix for instructions that accept the sui	 suffix (cvtqt and cvtqs).  */      switch (alpha_fptm)	{	case ALPHA_FPTM_N: case ALPHA_FPTM_U:	case ALPHA_FPTM_SU:	/* cvtqt/cvtqs can't cause underflow */	  break;	case ALPHA_FPTM_SUI:	  fputs ("sui", file);	  break;	}      break;    case ',':      /* Generates single precision instruction suffix.  */      fprintf (file, "%c", (TARGET_FLOAT_VAX ? 'f' : 's'));      break;    case '-':      /* Generates double precision instruction suffix.  */      fprintf (file, "%c", (TARGET_FLOAT_VAX ? 'g' : 't'));      break;    case 'r':      /* If this operand is the constant zero, write it as "$31".  */      if (GET_CODE (x) == REG)	fprintf (file, "%s", reg_names[REGNO (x)]);      else if (x == CONST0_RTX (GET_MODE (x)))	fprintf (file, "$31");      else	output_operand_lossage ("invalid %%r value");      break;    case 'R':      /* Similar, but for floating-point.  */      if (GET_CODE (x) == REG)	fprintf (file, "%s", reg_names[REGNO (x)]);      else if (x == CONST0_RTX (GET_MODE (x)))	fprintf (file, "$f31");      else	output_operand_lossage ("invalid %%R value");      break;    case 'N':      /* Write the 1's complement of a constant.  */      if (GET_CODE (x) != CONST_INT)	output_operand_lossage ("invalid %%N value");      fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));      break;    case 'P':      /* Write 1 << C, for a constant C.  */      if (GET_CODE (x) != CONST_INT)	output_operand_lossage ("invalid %%P value");      fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));      break;    case 'h':      /* Write the high-order 16 bits of a constant, sign-extended.  */      if (GET_CODE (x) != CONST_INT)	output_operand_lossage ("invalid %%h value");      fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);      break;    case 'L':      /* Write the low-order 16 bits of a constant, sign-extended.  */      if (GET_CODE (x) != CONST_INT)	output_operand_lossage ("invalid %%L value");      fprintf (file, HOST_WIDE_INT_PRINT_DEC,	       (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));      break;    case 'm':      /* Write mask for ZAP insn.  */      if (GET_CODE (x) == CONST_DOUBLE)	{	  HOST_WIDE_INT mask = 0;	  HOST_WIDE_INT value;	  value = CONST_DOUBLE_LOW (x);	  for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;	       i++, value >>= 8)	    if (value & 0xff)	      mask |= (1 << i);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -