⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ia64.c

📁 Mac OS X 10.4.9 for x86 Source Code gcc 实现源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
     do not need to emit another comparison.  */  if (GET_MODE (op0) == BImode)    {      if ((code == NE || code == EQ) && op1 == const0_rtx)	cmp = op0;      else	abort ();    }  /* HPUX TFmode compare requires a library call to _U_Qfcmp, which takes a     magic number as its third argument, that indicates what to do.     The return value is an integer to be compared against zero.  */  else if (GET_MODE (op0) == TFmode)    {      enum qfcmp_magic {	QCMP_INV = 1,	/* Raise FP_INVALID on SNaN as a side effect.  */	QCMP_UNORD = 2,	QCMP_EQ = 4,	QCMP_LT = 8,	QCMP_GT = 16      } magic;      enum rtx_code ncode;      rtx ret, insns;      if (!cmptf_libfunc || GET_MODE (op1) != TFmode)	abort ();      switch (code)	{	  /* 1 = equal, 0 = not equal.  Equality operators do	     not raise FP_INVALID when given an SNaN operand.  */	case EQ:        magic = QCMP_EQ;                  ncode = NE; break;	case NE:        magic = QCMP_EQ;                  ncode = EQ; break;	  /* isunordered() from C99.  */	case UNORDERED: magic = QCMP_UNORD;               ncode = NE; break;	case ORDERED:   magic = QCMP_UNORD;               ncode = EQ; break;	  /* Relational operators raise FP_INVALID when given	     an SNaN operand.  */	case LT:        magic = QCMP_LT        |QCMP_INV; ncode = NE; break;	case LE:        magic = QCMP_LT|QCMP_EQ|QCMP_INV; ncode = NE; break;	case GT:        magic = QCMP_GT        |QCMP_INV; ncode = NE; break;	case GE:        magic = QCMP_GT|QCMP_EQ|QCMP_INV; ncode = NE; break;	  /* FUTURE: Implement UNEQ, UNLT, UNLE, UNGT, UNGE, LTGT.	     Expanders for buneq etc. weuld have to be added to ia64.md	     for this to be useful.  */	default: abort ();	}      start_sequence ();      ret = emit_library_call_value (cmptf_libfunc, 0, LCT_CONST, DImode, 3,				     op0, TFmode, op1, TFmode,				     GEN_INT (magic), DImode);      cmp = gen_reg_rtx (BImode);      emit_insn (gen_rtx_SET (VOIDmode, cmp,			      gen_rtx_fmt_ee (ncode, BImode,					      ret, const0_rtx)));      insns = get_insns ();      end_sequence ();      emit_libcall_block (insns, cmp, cmp,			  gen_rtx_fmt_ee (code, BImode, op0, op1));      code = NE;    }  else    {      cmp = gen_reg_rtx (BImode);      emit_insn (gen_rtx_SET (VOIDmode, cmp,			      gen_rtx_fmt_ee (code, BImode, op0, op1)));      code = NE;    }  return gen_rtx_fmt_ee (code, mode, cmp, const0_rtx);}/* Generate an integral vector comparison.  */static boolia64_expand_vecint_compare (enum rtx_code code, enum machine_mode mode,			    rtx dest, rtx op0, rtx op1){  bool negate = false;  rtx x;  switch (code)    {    case EQ:    case GT:      break;    case NE:      code = EQ;      negate = true;      break;    case LE:      code = GT;      negate = true;      break;    case GE:      negate = true;      /* FALLTHRU */    case LT:      x = op0;      op0 = op1;      op1 = x;      code = GT;      break;    case GTU:    case GEU:    case LTU:    case LEU:      {	rtx w0h, w0l, w1h, w1l, ch, cl;	enum machine_mode wmode;	rtx (*unpack_l) (rtx, rtx, rtx);	rtx (*unpack_h) (rtx, rtx, rtx);	rtx (*pack) (rtx, rtx, rtx);	/* We don't have native unsigned comparisons, but we can generate	   them better than generic code can.  */	if (mode == V2SImode)	  abort ();	else if (mode == V8QImode)	  {	    wmode = V4HImode;	    pack = gen_pack2_sss;	    unpack_l = gen_unpack1_l;	    unpack_h = gen_unpack1_h;	  }	else if (mode == V4HImode)	  {	    wmode = V2SImode;	    pack = gen_pack4_sss;	    unpack_l = gen_unpack2_l;	    unpack_h = gen_unpack2_h;	  }	else	  abort ();	/* Unpack into wider vectors, zero extending the elements.  */	w0l = gen_reg_rtx (wmode);	w0h = gen_reg_rtx (wmode);	w1l = gen_reg_rtx (wmode);	w1h = gen_reg_rtx (wmode);	emit_insn (unpack_l (gen_lowpart (mode, w0l), op0, CONST0_RTX (mode)));	emit_insn (unpack_h (gen_lowpart (mode, w0h), op0, CONST0_RTX (mode)));	emit_insn (unpack_l (gen_lowpart (mode, w1l), op1, CONST0_RTX (mode)));	emit_insn (unpack_h (gen_lowpart (mode, w1h), op1, CONST0_RTX (mode)));	/* Compare in the wider mode.  */	cl = gen_reg_rtx (wmode);	ch = gen_reg_rtx (wmode);	code = signed_condition (code);	ia64_expand_vecint_compare (code, wmode, cl, w0l, w1l);	negate = ia64_expand_vecint_compare (code, wmode, ch, w0h, w1h);	/* Repack into a single narrower vector.  */	emit_insn (pack (dest, cl, ch));      }      return negate;    default:      abort ();    }  x = gen_rtx_fmt_ee (code, mode, op0, op1);  emit_insn (gen_rtx_SET (VOIDmode, dest, x));  return negate;}static voidia64_expand_vcondu_v2si (enum rtx_code code, rtx operands[]){  rtx dl, dh, bl, bh, op1l, op1h, op2l, op2h, op4l, op4h, op5l, op5h, x;  /* In this case, we extract the two SImode quantities and generate     normal comparisons for each of them.  */  op1l = gen_lowpart (SImode, operands[1]);  op2l = gen_lowpart (SImode, operands[2]);  op4l = gen_lowpart (SImode, operands[4]);  op5l = gen_lowpart (SImode, operands[5]);  op1h = gen_reg_rtx (SImode);  op2h = gen_reg_rtx (SImode);  op4h = gen_reg_rtx (SImode);  op5h = gen_reg_rtx (SImode);  emit_insn (gen_lshrdi3 (gen_lowpart (DImode, op1h),			  gen_lowpart (DImode, operands[1]), GEN_INT (32)));  emit_insn (gen_lshrdi3 (gen_lowpart (DImode, op2h),			  gen_lowpart (DImode, operands[2]), GEN_INT (32)));  emit_insn (gen_lshrdi3 (gen_lowpart (DImode, op4h),			  gen_lowpart (DImode, operands[4]), GEN_INT (32)));  emit_insn (gen_lshrdi3 (gen_lowpart (DImode, op5h),			  gen_lowpart (DImode, operands[5]), GEN_INT (32)));  bl = gen_reg_rtx (BImode);  x = gen_rtx_fmt_ee (code, BImode, op4l, op5l);  emit_insn (gen_rtx_SET (VOIDmode, bl, x));  bh = gen_reg_rtx (BImode);  x = gen_rtx_fmt_ee (code, BImode, op4h, op5h);  emit_insn (gen_rtx_SET (VOIDmode, bh, x));  /* With the results of the comparisons, emit conditional moves.  */  dl = gen_reg_rtx (SImode);  x = gen_rtx_IF_THEN_ELSE (SImode, bl, op1l, op2l);  emit_insn (gen_rtx_SET (VOIDmode, dl, x));  dh = gen_reg_rtx (SImode);  x = gen_rtx_IF_THEN_ELSE (SImode, bh, op1h, op2h);  emit_insn (gen_rtx_SET (VOIDmode, dh, x));  /* Merge the two partial results back into a vector.  */  x = gen_rtx_VEC_CONCAT (V2SImode, dl, dh);  emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));}/* Emit an integral vector conditional move.  */voidia64_expand_vecint_cmov (rtx operands[]){  enum machine_mode mode = GET_MODE (operands[0]);  enum rtx_code code = GET_CODE (operands[3]);  bool negate;  rtx cmp, x, ot, of;  /* Since we don't have unsigned V2SImode comparisons, it's more efficient     to special-case them entirely.  */  if (mode == V2SImode      && (code == GTU || code == GEU || code == LEU || code == LTU))    {      ia64_expand_vcondu_v2si (code, operands);      return;    }  cmp = gen_reg_rtx (mode);  negate = ia64_expand_vecint_compare (code, mode, cmp,				       operands[4], operands[5]);  ot = operands[1+negate];  of = operands[2-negate];  if (ot == CONST0_RTX (mode))    {      if (of == CONST0_RTX (mode))	{	  emit_move_insn (operands[0], ot);	  return;	}      x = gen_rtx_NOT (mode, cmp);      x = gen_rtx_AND (mode, x, of);      emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));    }  else if (of == CONST0_RTX (mode))    {      x = gen_rtx_AND (mode, cmp, ot);      emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));    }  else    {      rtx t, f;      t = gen_reg_rtx (mode);      x = gen_rtx_AND (mode, cmp, operands[1+negate]);      emit_insn (gen_rtx_SET (VOIDmode, t, x));      f = gen_reg_rtx (mode);      x = gen_rtx_NOT (mode, cmp);      x = gen_rtx_AND (mode, x, operands[2-negate]);      emit_insn (gen_rtx_SET (VOIDmode, f, x));      x = gen_rtx_IOR (mode, t, f);      emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));    }}/* Emit an integral vector min or max operation.  Return true if all done.  */boolia64_expand_vecint_minmax (enum rtx_code code, enum machine_mode mode,			   rtx operands[]){  rtx xops[5];  /* These four combinations are supported directly.  */  if (mode == V8QImode && (code == UMIN || code == UMAX))    return false;  if (mode == V4HImode && (code == SMIN || code == SMAX))    return false;  /* Everything else implemented via vector comparisons.  */  xops[0] = operands[0];  xops[4] = xops[1] = operands[1];  xops[5] = xops[2] = operands[2];  switch (code)    {    case UMIN:      code = LTU;      break;    case UMAX:      code = GTU;      break;    case SMIN:      code = LT;      break;    case SMAX:      code = GT;      break;    default:      abort ();    }  xops[3] = gen_rtx_fmt_ee (code, VOIDmode, operands[1], operands[2]);  ia64_expand_vecint_cmov (xops);  return true;}/* Emit the appropriate sequence for a call.  */voidia64_expand_call (rtx retval, rtx addr, rtx nextarg ATTRIBUTE_UNUSED,		  int sibcall_p){  rtx insn, b0;  addr = XEXP (addr, 0);  addr = convert_memory_address (DImode, addr);  b0 = gen_rtx_REG (DImode, R_BR (0));  /* ??? Should do this for functions known to bind local too.  */  if (TARGET_NO_PIC || TARGET_AUTO_PIC)    {      if (sibcall_p)	insn = gen_sibcall_nogp (addr);      else if (! retval)	insn = gen_call_nogp (addr, b0);      else	insn = gen_call_value_nogp (retval, addr, b0);      insn = emit_call_insn (insn);    }  else    {      if (sibcall_p)	insn = gen_sibcall_gp (addr);      else if (! retval)	insn = gen_call_gp (addr, b0);      else	insn = gen_call_value_gp (retval, addr, b0);      insn = emit_call_insn (insn);      use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);    }  if (sibcall_p)    use_reg (&CALL_INSN_FUNCTION_USAGE (insn), b0);}voidia64_reload_gp (void){  rtx tmp;  if (current_frame_info.reg_save_gp)    tmp = gen_rtx_REG (DImode, current_frame_info.reg_save_gp);  else    {      HOST_WIDE_INT offset;      offset = (current_frame_info.spill_cfa_off	        + current_frame_info.spill_size);      if (frame_pointer_needed)        {          tmp = hard_frame_pointer_rtx;          offset = -offset;        }      else        {          tmp = stack_pointer_rtx;          offset = current_frame_info.total_size - offset;        }      if (CONST_OK_FOR_I (offset))        emit_insn (gen_adddi3 (pic_offset_table_rtx,			       tmp, GEN_INT (offset)));      else        {          emit_move_insn (pic_offset_table_rtx, GEN_INT (offset));          emit_insn (gen_adddi3 (pic_offset_table_rtx,			         pic_offset_table_rtx, tmp));        }      tmp = gen_rtx_MEM (DImode, pic_offset_table_rtx);    }  emit_move_insn (pic_offset_table_rtx, tmp);}voidia64_split_call (rtx retval, rtx addr, rtx retaddr, rtx scratch_r,		 rtx scratch_b, int noreturn_p, int sibcall_p){  rtx insn;  bool is_desc = false;  /* If we find we're calling through a register, then we're actually     calling through a descriptor, so load up the values.  */  if (REG_P (addr) && GR_REGNO_P (REGNO (addr)))    {      rtx tmp;      bool addr_dead_p;      /* ??? We are currently constrained to *not* use peep2, because	 we can legitimately change the global lifetime of the GP	 (in the form of killing where previously live).  This is	 because a call through a descriptor doesn't use the previous	 value of the GP, while a direct call does, and we do not	 commit to either form until the split here.	 That said, this means that we lack precise life info for	 whether ADDR is dead after this call.  This is not terribly	 important, since we can fix things up essentially for free

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -