⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ia64.c

📁 linux下编程用 编译软件
💻 C
📖 第 1 页 / 共 5 页
字号:
static rtxspill_xfmode_rfmode_operand (rtx in, int force, enum machine_mode mode){  if (GET_CODE (in) == SUBREG      && GET_MODE (SUBREG_REG (in)) == TImode      && GET_CODE (SUBREG_REG (in)) == REG)    {      rtx memt = assign_stack_temp (TImode, 16, 0);      emit_move_insn (memt, SUBREG_REG (in));      return adjust_address (memt, mode, 0);    }  else if (force && GET_CODE (in) == REG)    {      rtx memx = assign_stack_temp (mode, 16, 0);      emit_move_insn (memx, in);      return memx;    }  else    return in;}/* Expand the movxf or movrf pattern (MODE says which) with the given   OPERANDS, returning true if the pattern should then invoke   DONE.  */boolia64_expand_movxf_movrf (enum machine_mode mode, rtx operands[]){  rtx op0 = operands[0];  if (GET_CODE (op0) == SUBREG)    op0 = SUBREG_REG (op0);  /* We must support XFmode loads into general registers for stdarg/vararg,     unprototyped calls, and a rare case where a long double is passed as     an argument after a float HFA fills the FP registers.  We split them into     DImode loads for convenience.  We also need to support XFmode stores     for the last case.  This case does not happen for stdarg/vararg routines,     because we do a block store to memory of unnamed arguments.  */  if (GET_CODE (op0) == REG && GR_REGNO_P (REGNO (op0)))    {      rtx out[2];      /* We're hoping to transform everything that deals with XFmode	 quantities and GR registers early in the compiler.  */      gcc_assert (!no_new_pseudos);      /* Struct to register can just use TImode instead.  */      if ((GET_CODE (operands[1]) == SUBREG	   && GET_MODE (SUBREG_REG (operands[1])) == TImode)	  || (GET_CODE (operands[1]) == REG	      && GR_REGNO_P (REGNO (operands[1]))))	{	  rtx op1 = operands[1];	  if (GET_CODE (op1) == SUBREG)	    op1 = SUBREG_REG (op1);	  else	    op1 = gen_rtx_REG (TImode, REGNO (op1));	  emit_move_insn (gen_rtx_REG (TImode, REGNO (op0)), op1);	  return true;	}      if (GET_CODE (operands[1]) == CONST_DOUBLE)	{	  /* Don't word-swap when reading in the constant.  */	  emit_move_insn (gen_rtx_REG (DImode, REGNO (op0)),			  operand_subword (operands[1], WORDS_BIG_ENDIAN,					   0, mode));	  emit_move_insn (gen_rtx_REG (DImode, REGNO (op0) + 1),			  operand_subword (operands[1], !WORDS_BIG_ENDIAN,					   0, mode));	  return true;	}      /* If the quantity is in a register not known to be GR, spill it.  */      if (register_operand (operands[1], mode))	operands[1] = spill_xfmode_rfmode_operand (operands[1], 1, mode);      gcc_assert (GET_CODE (operands[1]) == MEM);      /* Don't word-swap when reading in the value.  */      out[0] = gen_rtx_REG (DImode, REGNO (op0));      out[1] = gen_rtx_REG (DImode, REGNO (op0) + 1);      emit_move_insn (out[0], adjust_address (operands[1], DImode, 0));      emit_move_insn (out[1], adjust_address (operands[1], DImode, 8));      return true;    }  if (GET_CODE (operands[1]) == REG && GR_REGNO_P (REGNO (operands[1])))    {      /* We're hoping to transform everything that deals with XFmode	 quantities and GR registers early in the compiler.  */      gcc_assert (!no_new_pseudos);      /* Op0 can't be a GR_REG here, as that case is handled above.	 If op0 is a register, then we spill op1, so that we now have a	 MEM operand.  This requires creating an XFmode subreg of a TImode reg	 to force the spill.  */      if (register_operand (operands[0], mode))	{	  rtx op1 = gen_rtx_REG (TImode, REGNO (operands[1]));	  op1 = gen_rtx_SUBREG (mode, op1, 0);	  operands[1] = spill_xfmode_rfmode_operand (op1, 0, mode);	}      else	{	  rtx in[2];	  gcc_assert (GET_CODE (operands[0]) == MEM);	  /* Don't word-swap when writing out the value.  */	  in[0] = gen_rtx_REG (DImode, REGNO (operands[1]));	  in[1] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);	  emit_move_insn (adjust_address (operands[0], DImode, 0), in[0]);	  emit_move_insn (adjust_address (operands[0], DImode, 8), in[1]);	  return true;	}    }  if (!reload_in_progress && !reload_completed)    {      operands[1] = spill_xfmode_rfmode_operand (operands[1], 0, mode);      if (GET_MODE (op0) == TImode && GET_CODE (op0) == REG)	{	  rtx memt, memx, in = operands[1];	  if (CONSTANT_P (in))	    in = validize_mem (force_const_mem (mode, in));	  if (GET_CODE (in) == MEM)	    memt = adjust_address (in, TImode, 0);	  else	    {	      memt = assign_stack_temp (TImode, 16, 0);	      memx = adjust_address (memt, mode, 0);	      emit_move_insn (memx, in);	    }	  emit_move_insn (op0, memt);	  return true;	}      if (!ia64_move_ok (operands[0], operands[1]))	operands[1] = force_reg (mode, operands[1]);    }  return false;}/* Emit comparison instruction if necessary, returning the expression   that holds the compare result in the proper mode.  */static GTY(()) rtx cmptf_libfunc;rtxia64_expand_compare (enum rtx_code code, enum machine_mode mode){  rtx op0 = ia64_compare_op0, op1 = ia64_compare_op1;  rtx cmp;  /* If we have a BImode input, then we already have a compare result, and     do not need to emit another comparison.  */  if (GET_MODE (op0) == BImode)    {      gcc_assert ((code == NE || code == EQ) && op1 == const0_rtx);      cmp = op0;    }  /* HPUX TFmode compare requires a library call to _U_Qfcmp, which takes a     magic number as its third argument, that indicates what to do.     The return value is an integer to be compared against zero.  */  else if (GET_MODE (op0) == TFmode)    {      enum qfcmp_magic {	QCMP_INV = 1,	/* Raise FP_INVALID on SNaN as a side effect.  */	QCMP_UNORD = 2,	QCMP_EQ = 4,	QCMP_LT = 8,	QCMP_GT = 16      } magic;      enum rtx_code ncode;      rtx ret, insns;            gcc_assert (cmptf_libfunc && GET_MODE (op1) == TFmode);      switch (code)	{	  /* 1 = equal, 0 = not equal.  Equality operators do	     not raise FP_INVALID when given an SNaN operand.  */	case EQ:        magic = QCMP_EQ;                  ncode = NE; break;	case NE:        magic = QCMP_EQ;                  ncode = EQ; break;	  /* isunordered() from C99.  */	case UNORDERED: magic = QCMP_UNORD;               ncode = NE; break;	case ORDERED:   magic = QCMP_UNORD;               ncode = EQ; break;	  /* Relational operators raise FP_INVALID when given	     an SNaN operand.  */	case LT:        magic = QCMP_LT        |QCMP_INV; ncode = NE; break;	case LE:        magic = QCMP_LT|QCMP_EQ|QCMP_INV; ncode = NE; break;	case GT:        magic = QCMP_GT        |QCMP_INV; ncode = NE; break;	case GE:        magic = QCMP_GT|QCMP_EQ|QCMP_INV; ncode = NE; break;	  /* FUTURE: Implement UNEQ, UNLT, UNLE, UNGT, UNGE, LTGT.	     Expanders for buneq etc. weuld have to be added to ia64.md	     for this to be useful.  */	default: gcc_unreachable ();	}      start_sequence ();      ret = emit_library_call_value (cmptf_libfunc, 0, LCT_CONST, DImode, 3,				     op0, TFmode, op1, TFmode,				     GEN_INT (magic), DImode);      cmp = gen_reg_rtx (BImode);      emit_insn (gen_rtx_SET (VOIDmode, cmp,			      gen_rtx_fmt_ee (ncode, BImode,					      ret, const0_rtx)));      insns = get_insns ();      end_sequence ();      emit_libcall_block (insns, cmp, cmp,			  gen_rtx_fmt_ee (code, BImode, op0, op1));      code = NE;    }  else    {      cmp = gen_reg_rtx (BImode);      emit_insn (gen_rtx_SET (VOIDmode, cmp,			      gen_rtx_fmt_ee (code, BImode, op0, op1)));      code = NE;    }  return gen_rtx_fmt_ee (code, mode, cmp, const0_rtx);}/* Generate an integral vector comparison.  Return true if the condition has   been reversed, and so the sense of the comparison should be inverted.  */static boolia64_expand_vecint_compare (enum rtx_code code, enum machine_mode mode,			    rtx dest, rtx op0, rtx op1){  bool negate = false;  rtx x;  /* Canonicalize the comparison to EQ, GT, GTU.  */  switch (code)    {    case EQ:    case GT:    case GTU:      break;    case NE:    case LE:    case LEU:      code = reverse_condition (code);      negate = true;      break;    case GE:    case GEU:      code = reverse_condition (code);      negate = true;      /* FALLTHRU */    case LT:    case LTU:      code = swap_condition (code);      x = op0, op0 = op1, op1 = x;      break;    default:      gcc_unreachable ();    }  /* Unsigned parallel compare is not supported by the hardware.  Play some     tricks to turn this into a signed comparison against 0.  */  if (code == GTU)    {      switch (mode)	{	case V2SImode:	  {	    rtx t1, t2, mask;	    /* Perform a parallel modulo subtraction.  */	    t1 = gen_reg_rtx (V2SImode);	    emit_insn (gen_subv2si3 (t1, op0, op1));	    /* Extract the original sign bit of op0.  */	    mask = GEN_INT (-0x80000000);	    mask = gen_rtx_CONST_VECTOR (V2SImode, gen_rtvec (2, mask, mask));	    mask = force_reg (V2SImode, mask);	    t2 = gen_reg_rtx (V2SImode);	    emit_insn (gen_andv2si3 (t2, op0, mask));	    /* XOR it back into the result of the subtraction.  This results	       in the sign bit set iff we saw unsigned underflow.  */	    x = gen_reg_rtx (V2SImode);	    emit_insn (gen_xorv2si3 (x, t1, t2));	    code = GT;	    op0 = x;	    op1 = CONST0_RTX (mode);	  }	  break;	case V8QImode:	case V4HImode:	  /* Perform a parallel unsigned saturating subtraction.  */	  x = gen_reg_rtx (mode);	  emit_insn (gen_rtx_SET (VOIDmode, x,				  gen_rtx_US_MINUS (mode, op0, op1)));	  code = EQ;	  op0 = x;	  op1 = CONST0_RTX (mode);	  negate = !negate;	  break;	default:	  gcc_unreachable ();	}    }  x = gen_rtx_fmt_ee (code, mode, op0, op1);  emit_insn (gen_rtx_SET (VOIDmode, dest, x));  return negate;}/* Emit an integral vector conditional move.  */voidia64_expand_vecint_cmov (rtx operands[]){  enum machine_mode mode = GET_MODE (operands[0]);  enum rtx_code code = GET_CODE (operands[3]);  bool negate;  rtx cmp, x, ot, of;  cmp = gen_reg_rtx (mode);  negate = ia64_expand_vecint_compare (code, mode, cmp,				       operands[4], operands[5]);  ot = operands[1+negate];  of = operands[2-negate];  if (ot == CONST0_RTX (mode))    {      if (of == CONST0_RTX (mode))	{	  emit_move_insn (operands[0], ot);	  return;	}      x = gen_rtx_NOT (mode, cmp);      x = gen_rtx_AND (mode, x, of);      emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));    }  else if (of == CONST0_RTX (mode))    {      x = gen_rtx_AND (mode, cmp, ot);      emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));    }  else    {      rtx t, f;      t = gen_reg_rtx (mode);      x = gen_rtx_AND (mode, cmp, operands[1+negate]);      emit_insn (gen_rtx_SET (VOIDmode, t, x));      f = gen_reg_rtx (mode);      x = gen_rtx_NOT (mode, cmp);      x = gen_rtx_AND (mode, x, operands[2-negate]);      emit_insn (gen_rtx_SET (VOIDmode, f, x));      x = gen_rtx_IOR (mode, t, f);      emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));    }}/* Emit an integral vector min or max operation.  Return true if all done.  */boolia64_expand_vecint_minmax (enum rtx_code code, enum machine_mode mode,			   rtx operands[]){  rtx xops[6];  /* These four combinations are supported directly.  */  if (mode == V8QImode && (code == UMIN || code == UMAX))    return false;  if (mode == V4HImode && (code == SMIN || code == SMAX))    return false;  /* This combination can be implemented with only saturating subtraction.  */  if (mode == V4HImode && code == UMAX)    {      rtx x, tmp = gen_reg_rtx (mode);      x = gen_rtx_US_MINUS (mode, operands[1], operands[2]);      emit_insn (gen_rtx_SET (VOIDmode, tmp, x));      emit_insn (gen_addv4hi3 (operands[0], tmp, operands[2]));      return true;    }  /* Everything else implemented via vector comparisons.  */  xops[0] = operands[0];  xops[4] = xops[1] = operands[1];  xops[5] = xops[2] = operands[2];  switch (code)    {    case UMIN:      code = LTU;      break;    case UMAX:      code = GTU;      break;    case SMIN:      code = LT;      break;    case SMAX:      code = GT;      break;    default:      gcc_unreachable ();    }  xops[3] = gen_rtx_fmt_ee (code, VOIDmode, operands[1], operands[2]);  ia64_expand_vecint_cmov (xops);  return true;}/* Emit an integral vector widening sum operations.  */void

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -