⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ia64.c

📁 linux下的gcc编译器
💻 C
📖 第 1 页 / 共 5 页
字号:
{  return ((GET_MODE (op) == mode || mode == VOIDmode)	  && GET_CODE (op) == REG	  && REGNO (op) == AR_CCV_REGNUM);}/* Return 1 if this is the ar.pfs register.  */intar_pfs_reg_operand (op, mode)     register rtx op;     enum machine_mode mode;{  return ((GET_MODE (op) == mode || mode == VOIDmode)	  && GET_CODE (op) == REG	  && REGNO (op) == AR_PFS_REGNUM);}/* Like general_operand, but don't allow (mem (addressof)).  */intgeneral_tfmode_operand (op, mode)     rtx op;     enum machine_mode mode;{  if (! general_operand (op, mode))    return 0;  if (GET_CODE (op) == MEM && GET_CODE (XEXP (op, 0)) == ADDRESSOF)    return 0;  return 1;}/* Similarly.  */intdestination_tfmode_operand (op, mode)     rtx op;     enum machine_mode mode;{  if (! destination_operand (op, mode))    return 0;  if (GET_CODE (op) == MEM && GET_CODE (XEXP (op, 0)) == ADDRESSOF)    return 0;  return 1;}/* Similarly.  */inttfreg_or_fp01_operand (op, mode)     rtx op;     enum machine_mode mode;{  if (GET_CODE (op) == SUBREG)    return 0;  return fr_reg_or_fp01_operand (op, mode);}/* Return 1 if OP is valid as a base register in a reg + offset address.  */intbasereg_operand (op, mode)     rtx op;     enum machine_mode mode;{  /* ??? Should I copy the flag_omit_frame_pointer and cse_not_expected     checks from pa.c basereg_operand as well?  Seems to be OK without them     in test runs.  */  return (register_operand (op, mode) &&	  REG_POINTER ((GET_CODE (op) == SUBREG) ? SUBREG_REG (op) : op));}/* Return 1 if the operands of a move are ok.  */intia64_move_ok (dst, src)     rtx dst, src;{  /* If we're under init_recog_no_volatile, we'll not be able to use     memory_operand.  So check the code directly and don't worry about     the validity of the underlying address, which should have been     checked elsewhere anyway.  */  if (GET_CODE (dst) != MEM)    return 1;  if (GET_CODE (src) == MEM)    return 0;  if (register_operand (src, VOIDmode))    return 1;  /* Otherwise, this must be a constant, and that either 0 or 0.0 or 1.0.  */  if (INTEGRAL_MODE_P (GET_MODE (dst)))    return src == const0_rtx;  else    return GET_CODE (src) == CONST_DOUBLE && CONST_DOUBLE_OK_FOR_G (src);}/* Return 0 if we are doing C++ code.  This optimization fails with   C++ because of GNAT c++/6685.  */intaddp4_optimize_ok (op1, op2)     rtx op1, op2;{  if (!strcmp (lang_hooks.name, "GNU C++"))    return 0;  return (basereg_operand (op1, GET_MODE(op1)) !=	  basereg_operand (op2, GET_MODE(op2)));}/* Check if OP is a mask suitible for use with SHIFT in a dep.z instruction.   Return the length of the field, or <= 0 on failure.  */intia64_depz_field_mask (rop, rshift)     rtx rop, rshift;{  unsigned HOST_WIDE_INT op = INTVAL (rop);  unsigned HOST_WIDE_INT shift = INTVAL (rshift);  /* Get rid of the zero bits we're shifting in.  */  op >>= shift;  /* We must now have a solid block of 1's at bit 0.  */  return exact_log2 (op + 1);}/* Expand a symbolic constant load.  *//* ??? Should generalize this, so that we can also support 32 bit pointers.  */voidia64_expand_load_address (dest, src, scratch)      rtx dest, src, scratch;{  rtx temp;  /* The destination could be a MEM during initial rtl generation,     which isn't a valid destination for the PIC load address patterns.  */  if (! register_operand (dest, DImode))    if (! scratch || ! register_operand (scratch, DImode))      temp = gen_reg_rtx (DImode);    else      temp = scratch;  else    temp = dest;  if (tls_symbolic_operand (src, Pmode))    abort ();  if (TARGET_AUTO_PIC)    emit_insn (gen_load_gprel64 (temp, src));  else if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_FLAG (src))    emit_insn (gen_load_fptr (temp, src));  else if ((GET_MODE (src) == Pmode || GET_MODE (src) == ptr_mode)           && sdata_symbolic_operand (src, VOIDmode))    emit_insn (gen_load_gprel (temp, src));  else if (GET_CODE (src) == CONST	   && GET_CODE (XEXP (src, 0)) == PLUS	   && GET_CODE (XEXP (XEXP (src, 0), 1)) == CONST_INT	   && (INTVAL (XEXP (XEXP (src, 0), 1)) & 0x1fff) != 0)    {      rtx subtarget = no_new_pseudos ? temp : gen_reg_rtx (DImode);      rtx sym = XEXP (XEXP (src, 0), 0);      HOST_WIDE_INT ofs, hi, lo;      /* Split the offset into a sign extended 14-bit low part	 and a complementary high part.  */      ofs = INTVAL (XEXP (XEXP (src, 0), 1));      lo = ((ofs & 0x3fff) ^ 0x2000) - 0x2000;      hi = ofs - lo;      if (! scratch)	scratch = no_new_pseudos ? subtarget : gen_reg_rtx (DImode);      ia64_expand_load_address (subtarget, plus_constant (sym, hi), scratch);      emit_insn (gen_adddi3 (temp, subtarget, GEN_INT (lo)));    }  else    {      rtx insn;      if (! scratch)	scratch = no_new_pseudos ? temp : gen_reg_rtx (DImode);      insn = emit_insn (gen_load_symptr (temp, src, scratch));#ifdef POINTERS_EXTEND_UNSIGNED      if (GET_MODE (temp) != GET_MODE (src))	src = convert_memory_address (GET_MODE (temp), src);#endif      REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, src, REG_NOTES (insn));    }  if (temp != dest)    {      if (GET_MODE (dest) != GET_MODE (temp))	temp = convert_to_mode (GET_MODE (dest), temp, 0);      emit_move_insn (dest, temp);    }}static GTY(()) rtx gen_tls_tga;static rtxgen_tls_get_addr (){  if (!gen_tls_tga)    {      gen_tls_tga = init_one_libfunc ("__tls_get_addr");     }  return gen_tls_tga;}static GTY(()) rtx thread_pointer_rtx;static rtxgen_thread_pointer (){  if (!thread_pointer_rtx)    {      thread_pointer_rtx = gen_rtx_REG (Pmode, 13);      RTX_UNCHANGING_P (thread_pointer_rtx) = 1;    }  return thread_pointer_rtx;}rtxia64_expand_move (op0, op1)     rtx op0, op1;{  enum machine_mode mode = GET_MODE (op0);  if (!reload_in_progress && !reload_completed && !ia64_move_ok (op0, op1))    op1 = force_reg (mode, op1);  if (mode == Pmode || mode == ptr_mode)    {      enum tls_model tls_kind;      if ((tls_kind = tls_symbolic_operand (op1, Pmode)))	{	  rtx tga_op1, tga_op2, tga_ret, tga_eqv, tmp, insns;	  rtx orig_op0 = op0;	  switch (tls_kind)	    {	    case TLS_MODEL_GLOBAL_DYNAMIC:	      start_sequence ();	      tga_op1 = gen_reg_rtx (Pmode);	      emit_insn (gen_load_ltoff_dtpmod (tga_op1, op1));	      tga_op1 = gen_rtx_MEM (Pmode, tga_op1);	      RTX_UNCHANGING_P (tga_op1) = 1;	      tga_op2 = gen_reg_rtx (Pmode);	      emit_insn (gen_load_ltoff_dtprel (tga_op2, op1));	      tga_op2 = gen_rtx_MEM (Pmode, tga_op2);	      RTX_UNCHANGING_P (tga_op2) = 1;	      	      tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,						 LCT_CONST, Pmode, 2, tga_op1,						 Pmode, tga_op2, Pmode);	      insns = get_insns ();	      end_sequence ();	      if (GET_MODE (op0) != Pmode)		op0 = tga_ret;	      emit_libcall_block (insns, op0, tga_ret, op1);	      break;	    case TLS_MODEL_LOCAL_DYNAMIC:	      /* ??? This isn't the completely proper way to do local-dynamic		 If the call to __tls_get_addr is used only by a single symbol,		 then we should (somehow) move the dtprel to the second arg		 to avoid the extra add.  */	      start_sequence ();	      tga_op1 = gen_reg_rtx (Pmode);	      emit_insn (gen_load_ltoff_dtpmod (tga_op1, op1));	      tga_op1 = gen_rtx_MEM (Pmode, tga_op1);	      RTX_UNCHANGING_P (tga_op1) = 1;	      tga_op2 = const0_rtx;	      tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,						 LCT_CONST, Pmode, 2, tga_op1,						 Pmode, tga_op2, Pmode);	      insns = get_insns ();	      end_sequence ();	      tga_eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),					UNSPEC_LD_BASE);	      tmp = gen_reg_rtx (Pmode);	      emit_libcall_block (insns, tmp, tga_ret, tga_eqv);	      if (!register_operand (op0, Pmode))		op0 = gen_reg_rtx (Pmode);	      if (TARGET_TLS64)		{		  emit_insn (gen_load_dtprel (op0, op1));		  emit_insn (gen_adddi3 (op0, tmp, op0));		}	      else		emit_insn (gen_add_dtprel (op0, tmp, op1));	      break;	    case TLS_MODEL_INITIAL_EXEC:	      tmp = gen_reg_rtx (Pmode);	      emit_insn (gen_load_ltoff_tprel (tmp, op1));	      tmp = gen_rtx_MEM (Pmode, tmp);	      RTX_UNCHANGING_P (tmp) = 1;	      tmp = force_reg (Pmode, tmp);	      if (!register_operand (op0, Pmode))		op0 = gen_reg_rtx (Pmode);	      emit_insn (gen_adddi3 (op0, tmp, gen_thread_pointer ()));	      break;	    case TLS_MODEL_LOCAL_EXEC:	      if (!register_operand (op0, Pmode))		op0 = gen_reg_rtx (Pmode);	      if (TARGET_TLS64)		{		  emit_insn (gen_load_tprel (op0, op1));		  emit_insn (gen_adddi3 (op0, gen_thread_pointer (), op0));		}	      else		emit_insn (gen_add_tprel (op0, gen_thread_pointer (), op1));	      break;	    default:	      abort ();	    }	  if (orig_op0 == op0)	    return NULL_RTX;	  if (GET_MODE (orig_op0) == Pmode)	    return op0;	  return gen_lowpart (GET_MODE (orig_op0), op0);	}      else if (!TARGET_NO_PIC &&	       (symbolic_operand (op1, Pmode) ||		symbolic_operand (op1, ptr_mode)))	{	  /* Before optimization starts, delay committing to any particular	     type of PIC address load.  If this function gets deferred, we	     may acquire information that changes the value of the	     sdata_symbolic_operand predicate.	     But don't delay for function pointers.  Loading a function address	     actually loads the address of the descriptor not the function.	     If we represent these as SYMBOL_REFs, then they get cse'd with	     calls, and we end up with calls to the descriptor address instead	     of calls to the function address.  Functions are not candidates	     for sdata anyways.	     Don't delay for LABEL_REF because the splitter loses REG_LABEL	     notes.  Don't delay for pool addresses on general principals;	     they'll never become non-local behind our back.  */	  if (rtx_equal_function_value_matters	      && GET_CODE (op1) != LABEL_REF	      && ! (GET_CODE (op1) == SYMBOL_REF		    && (SYMBOL_REF_FLAG (op1)			|| CONSTANT_POOL_ADDRESS_P (op1)			|| STRING_POOL_ADDRESS_P (op1))))	    if (GET_MODE (op1) == DImode)	      emit_insn (gen_movdi_symbolic (op0, op1));	    else	      emit_insn (gen_movsi_symbolic (op0, op1));	  else	    ia64_expand_load_address (op0, op1, NULL_RTX);	  return NULL_RTX;	}    }  return op1;}/* Split a post-reload TImode reference into two DImode components.  */rtxia64_split_timode (out, in, scratch)     rtx out[2];     rtx in, scratch;{  switch (GET_CODE (in))    {    case REG:      out[0] = gen_rtx_REG (DImode, REGNO (in));      out[1] = gen_rtx_REG (DImode, REGNO (in) + 1);      return NULL_RTX;    case MEM:      {	rtx base = XEXP (in, 0);	switch (GET_CODE (base))	  {	  case REG:	    out[0] = adjust_address (in, DImode, 0);	    break;	  case POST_MODIFY:	    base = XEXP (base, 0);	    out[0] = adjust_address (in, DImode, 0);	    break;	  /* Since we're changing the mode, we need to change to POST_MODIFY	     as well to preserve the size of the increment.  Either that or	     do the update in two steps, but we've already got this scratch	     register handy so let's use it.  */	  case POST_INC:	    base = XEXP (base, 0);	    out[0]	      = change_address (in, DImode,				gen_rtx_POST_MODIFY				(Pmode, base, plus_constant (base, 16)));	    break;	  case POST_DEC:	    base = XEXP (base, 0);	    out[0]	      = change_address (in, DImode,				gen_rtx_POST_MODIFY				(Pmode, base, plus_constant (base, -16)));	    break;	  default:	    abort ();	  }	if (scratch == NULL_RTX)	  abort ();	out[1] = change_address (in, DImode, scratch);	return gen_adddi3 (scratch, base, GEN_INT (8));      }    case CONST_INT:    case CONST_DOUBLE:      split_double (in, &out[0], &out[1]);      return NULL_RTX;    default:      abort ();    }}/* ??? Fixing GR->FR TFmode moves during reload is hard.  You need to go   through memory plus an extra GR scratch register.  Except that you can   either get the first from SECONDARY_MEMORY_NEEDED or the second from   SECONDARY_RELOAD_CLASS, but not both.   We got into problems in the first place by allowing a construct like   (subreg:TF (reg:TI)), which we got from a union containing a long double.     This solution attempts to prevent this situation from occurring.  When   we see something like the above, we spill the inner register to memory.  */rtxspill_tfmode_operand (in, force)

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -