⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 alpha.c

📁 gcc-2.95.3 Linux下最常用的C编译器
💻 C
📖 第 1 页 / 共 5 页
字号:
alpha_emit_conditional_move (cmp, mode)     rtx cmp;     enum machine_mode mode;{  enum rtx_code code = GET_CODE (cmp);  enum rtx_code cmov_code = NE;  rtx op0 = alpha_compare_op0;  rtx op1 = alpha_compare_op1;  enum machine_mode cmp_mode    = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));  enum machine_mode cmp_op_mode = alpha_compare_fp_p ? DFmode : DImode;  enum machine_mode cmov_mode = VOIDmode;  rtx tem;  if (alpha_compare_fp_p != FLOAT_MODE_P (mode))    return 0;  /* We may be able to use a conditional move directly.     This avoids emitting spurious compares. */  if (signed_comparison_operator (cmp, cmp_op_mode)      && (!alpha_compare_fp_p || flag_fast_math)      && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))    return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);  /* We can't put the comparison insides a conditional move;     emit a compare instruction and put that inside the     conditional move.  Make sure we emit only comparisons we have;     swap or reverse as necessary.  */  switch (code)    {    case EQ:  case LE:  case LT:  case LEU:  case LTU:      /* We have these compares: */      break;    case NE:      /* This must be reversed. */      code = reverse_condition (code);      cmov_code = EQ;      break;    case GE:  case GT:  case GEU:  case GTU:      /* These must be swapped.  Make sure the new first operand is in	 a register.  */      code = swap_condition (code);      tem = op0, op0 = op1, op1 = tem;      op0 = force_reg (cmp_mode, op0);      break;    default:      abort ();    }  /* ??? We mark the branch mode to be CCmode to prevent the compare     and cmov from being combined, since the compare insn follows IEEE     rules that the cmov does not.  */  if (alpha_compare_fp_p && !flag_fast_math)    cmov_mode = CCmode;  tem = gen_reg_rtx (cmp_op_mode);  emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_op_mode, op0, op1));  return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_op_mode));}/* Use ext[wlq][lh] as the Architecture Handbook describes for extracting   unaligned data:           unsigned:                       signed:   word:   ldq_u  r1,X(r11)                ldq_u  r1,X(r11)           ldq_u  r2,X+1(r11)              ldq_u  r2,X+1(r11)           lda    r3,X(r11)                lda    r3,X+2(r11)           extwl  r1,r3,r1                 extql  r1,r3,r1           extwh  r2,r3,r2                 extqh  r2,r3,r2           or     r1.r2.r1                 or     r1,r2,r1                                           sra    r1,48,r1   long:   ldq_u  r1,X(r11)                ldq_u  r1,X(r11)           ldq_u  r2,X+3(r11)              ldq_u  r2,X+3(r11)           lda    r3,X(r11)                lda    r3,X(r11)           extll  r1,r3,r1                 extll  r1,r3,r1           extlh  r2,r3,r2                 extlh  r2,r3,r2           or     r1.r2.r1                 addl   r1,r2,r1   quad:   ldq_u  r1,X(r11)           ldq_u  r2,X+7(r11)           lda    r3,X(r11)           extql  r1,r3,r1           extqh  r2,r3,r2           or     r1.r2.r1*/voidalpha_expand_unaligned_load (tgt, mem, size, ofs, sign)     rtx tgt, mem;     HOST_WIDE_INT size, ofs;     int sign;{  rtx meml, memh, addr, extl, exth;  enum machine_mode mode;  meml = gen_reg_rtx (DImode);  memh = gen_reg_rtx (DImode);  addr = gen_reg_rtx (DImode);  extl = gen_reg_rtx (DImode);  exth = gen_reg_rtx (DImode);  emit_move_insn (meml,		  change_address (mem, DImode,				  gen_rtx_AND (DImode, 					       plus_constant (XEXP (mem, 0),							      ofs),					       GEN_INT (-8))));  emit_move_insn (memh,		  change_address (mem, DImode,				  gen_rtx_AND (DImode, 					       plus_constant (XEXP (mem, 0),							      ofs + size - 1),					       GEN_INT (-8))));  if (sign && size == 2)    {      emit_move_insn (addr, plus_constant (XEXP (mem, 0), ofs+2));      emit_insn (gen_extxl (extl, meml, GEN_INT (64), addr));      emit_insn (gen_extqh (exth, memh, addr));      /* We must use tgt here for the target.  Alpha-vms port fails if we use	 addr for the target, because addr is marked as a pointer and combine	 knows that pointers are always sign-extended 32 bit values.  */      addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);      addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48), 			   addr, 1, OPTAB_WIDEN);    }  else    {      emit_move_insn (addr, plus_constant (XEXP (mem, 0), ofs));      emit_insn (gen_extxl (extl, meml, GEN_INT (size*8), addr));      switch (size)	{	case 2:	  emit_insn (gen_extwh (exth, memh, addr));	  mode = HImode;	  break;	case 4:	  emit_insn (gen_extlh (exth, memh, addr));	  mode = SImode;	  break;	case 8:	  emit_insn (gen_extqh (exth, memh, addr));	  mode = DImode;	  break;	default:	  abort();	}      addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),			   gen_lowpart (mode, exth), gen_lowpart (mode, tgt),			   sign, OPTAB_WIDEN);    }  if (addr != tgt)    emit_move_insn (tgt, gen_lowpart(GET_MODE (tgt), addr));}/* Similarly, use ins and msk instructions to perform unaligned stores.  */voidalpha_expand_unaligned_store (dst, src, size, ofs)     rtx dst, src;     HOST_WIDE_INT size, ofs;{  rtx dstl, dsth, addr, insl, insh, meml, memh;    dstl = gen_reg_rtx (DImode);  dsth = gen_reg_rtx (DImode);  insl = gen_reg_rtx (DImode);  insh = gen_reg_rtx (DImode);  meml = change_address (dst, DImode,			 gen_rtx_AND (DImode, 				      plus_constant (XEXP (dst, 0), ofs),				      GEN_INT (-8)));  memh = change_address (dst, DImode,			 gen_rtx_AND (DImode, 				      plus_constant (XEXP (dst, 0),						     ofs+size-1),				      GEN_INT (-8)));  emit_move_insn (dsth, memh);  emit_move_insn (dstl, meml);  addr = copy_addr_to_reg (plus_constant (XEXP (dst, 0), ofs));  if (src != const0_rtx)    {      emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),			    GEN_INT (size*8), addr));      switch (size)	{	case 2:	  emit_insn (gen_inswl (insl, gen_lowpart (HImode, src), addr));	  break;	case 4:	  emit_insn (gen_insll (insl, gen_lowpart (SImode, src), addr));	  break;	case 8:	  emit_insn (gen_insql (insl, src, addr));	  break;	}    }  emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));  switch (size)    {    case 2:      emit_insn (gen_mskxl (dstl, dstl, GEN_INT (0xffff), addr));      break;    case 4:      emit_insn (gen_mskxl (dstl, dstl, GEN_INT (0xffffffff), addr));      break;    case 8:      {#if HOST_BITS_PER_WIDE_INT == 32	rtx msk = immed_double_const (0xffffffff, 0xffffffff, DImode);#else	rtx msk = immed_double_const (0xffffffffffffffff, 0, DImode);#endif	emit_insn (gen_mskxl (dstl, dstl, msk, addr));      }      break;    }  if (src != const0_rtx)    {      dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);      dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);    }    /* Must store high before low for degenerate case of aligned.  */  emit_move_insn (memh, dsth);  emit_move_insn (meml, dstl);}/* The block move code tries to maximize speed by separating loads and   stores at the expense of register pressure: we load all of the data   before we store it back out.  There are two secondary effects worth   mentioning, that this speeds copying to/from aligned and unaligned   buffers, and that it makes the code significantly easier to write.  */#define MAX_MOVE_WORDS	8/* Load an integral number of consecutive unaligned quadwords.  */static voidalpha_expand_unaligned_load_words (out_regs, smem, words, ofs)     rtx *out_regs;     rtx smem;     HOST_WIDE_INT words, ofs;{  rtx const im8 = GEN_INT (-8);  rtx const i64 = GEN_INT (64);  rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];  rtx sreg, areg;  HOST_WIDE_INT i;  /* Generate all the tmp registers we need.  */  for (i = 0; i < words; ++i)    {      data_regs[i] = out_regs[i];      ext_tmps[i] = gen_reg_rtx (DImode);    }  data_regs[words] = gen_reg_rtx (DImode);  if (ofs != 0)    smem = change_address (smem, GET_MODE (smem),			   plus_constant (XEXP (smem, 0), ofs));    /* Load up all of the source data.  */  for (i = 0; i < words; ++i)    {      emit_move_insn (data_regs[i],		      change_address (smem, DImode,				      gen_rtx_AND (DImode,						   plus_constant (XEXP(smem,0),								  8*i),						   im8)));    }  emit_move_insn (data_regs[words],		  change_address (smem, DImode,				  gen_rtx_AND (DImode,					       plus_constant (XEXP(smem,0),							      8*words - 1),					       im8)));  /* Extract the half-word fragments.  Unfortunately DEC decided to make     extxh with offset zero a noop instead of zeroing the register, so      we must take care of that edge condition ourselves with cmov.  */  sreg = copy_addr_to_reg (XEXP (smem, 0));  areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL, 		       1, OPTAB_WIDEN);  for (i = 0; i < words; ++i)    {      emit_insn (gen_extxl (data_regs[i], data_regs[i], i64, sreg));      emit_insn (gen_extqh (ext_tmps[i], data_regs[i+1], sreg));      emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],			      gen_rtx_IF_THEN_ELSE (DImode,						    gen_rtx_EQ (DImode, areg,								const0_rtx),						    const0_rtx, ext_tmps[i])));    }  /* Merge the half-words into whole words.  */  for (i = 0; i < words; ++i)    {      out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],				  ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);    }}/* Store an integral number of consecutive unaligned quadwords.  DATA_REGS   may be NULL to store zeros.  */static voidalpha_expand_unaligned_store_words (data_regs, dmem, words, ofs)     rtx *data_regs;     rtx dmem;     HOST_WIDE_INT words, ofs;{  rtx const im8 = GEN_INT (-8);  rtx const i64 = GEN_INT (64);#if HOST_BITS_PER_WIDE_INT == 32  rtx const im1 = immed_double_const (0xffffffff, 0xffffffff, DImode);#else  rtx const im1 = immed_double_const (0xffffffffffffffff, 0, DImode);#endif  rtx ins_tmps[MAX_MOVE_WORDS];  rtx st_tmp_1, st_tmp_2, dreg;  rtx st_addr_1, st_addr_2;  HOST_WIDE_INT i;  /* Generate all the tmp registers we need.  */  if (data_regs != NULL)    for (i = 0; i < words; ++i)      ins_tmps[i] = gen_reg_rtx(DImode);  st_tmp_1 = gen_reg_rtx(DImode);  st_tmp_2 = gen_reg_rtx(DImode);    if (ofs != 0)    dmem = change_address (dmem, GET_MODE (dmem),			   plus_constant (XEXP (dmem, 0), ofs));    st_addr_2 = change_address (dmem, DImode,			      gen_rtx_AND (DImode,					   plus_constant (XEXP(dmem,0),							  words*8 - 1),				       im8));  st_addr_1 = change_address (dmem, DImode,			      gen_rtx_AND (DImode, 					   XEXP (dmem, 0),					   im8));  /* Load up the destination end bits.  */  emit_move_insn (st_tmp_2, st_addr_2);  emit_move_insn (st_tmp_1, st_addr_1);  /* Shift the input data into place.  */  dreg = copy_addr_to_reg (XEXP (dmem, 0));  if (data_regs != NULL)    {      for (i = words-1; i >= 0; --i)	{	  emit_insn (gen_insxh (ins_tmps[i], data_regs[i], i64, dreg));	  emit_insn (gen_insql (data_regs[i], data_regs[i], dreg));	}      for (i = words-1; i > 0; --i)	{	  ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],					ins_tmps[i-1], ins_tmps[i-1], 1,					OPTAB_WIDEN);	}    }  /* Split and merge the ends with the destination data.  */  emit_insn (gen_mskxh (st_tmp_2, st_tmp_2, i64, dreg));  emit_insn (gen_mskxl (st_tmp_1, st_tmp_1, im1, dreg));  if (data_regs != NULL)    {      st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],			       st_tmp_2, 1, OPTAB_WIDEN);      st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],			       st_tmp_1, 1, OPTAB_WIDEN);    }  /* Store it all.  */  emit_move_insn (st_addr_2, st_tmp_2);  for (i = words-1; i > 0; --i)    {      emit_move_insn (change_address (dmem, DImode,				      gen_rtx_AND (DImode,						   plus_constant(XEXP (dmem,0),								 i*8),					       im8)),		      data_regs ? ins_tmps[i-1] : const0_rtx);    }  emit_move_insn (st_addr_1, st_tmp_1);}/* Expand string/block move operations.   operands[0] is the pointer to the destination.   operands[1] is the pointer to the source.   operands[2] is the number of bytes to move.   operands[3] is the alignment.  */intalpha_expand_block_move (operands)     rtx operands[];{  rtx bytes_rtx	= operands[2];  rtx align_rtx = operands[3];  HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);  HOST_WIDE_INT bytes = orig_bytes;  HOST_WIDE_INT src_align = INTVAL (align_rtx);  HOST_WIDE_INT dst_align = src_align;  rtx orig_src	= operands[1];  rtx orig_dst	= operands[0];  rtx data_regs[2*MAX_MOVE_WORDS+16];  rtx tmp;  int i, words, ofs, nregs = 0;    if (bytes <= 0)    return 1;  if (bytes > MAX_MOVE_WORDS*8)    return 0;  /* Look for additional alignment information from recorded register info.  */  tmp = XEXP (orig_src, 0);  if (GET_CODE (tmp) == REG)    {      if (REGNO_POINTER_ALIGN (REGNO (tmp)) > src_align)	src_align = REGNO_POINTER_ALIGN (REGNO (tmp));    }  else if (GET_CODE (tmp) == PLUS	   && GET_CODE (XEXP (tmp, 0)) == REG	   && GET_CODE (XEXP (tmp, 1)) == CONST_INT)    {      HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));      int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));      if (a > src_align)	{          if (a >= 8 && c % 8 == 0)	    src_align = 8;          else if (a >= 4 && c % 4 == 0)	    src_align = 4;          else if (a >= 2 && c % 2 == 0)	    src_align = 2;	}    }	  tmp = XEXP (orig_dst, 0);  if (GET_CODE (tmp) == REG)    {      if (REGNO_POINTER_ALIGN (REGNO (tmp)) > dst_align)	dst_align = REGNO_POINTER_ALIGN (REGNO (tmp));    }  else if (GET_CODE (tmp) == PLUS	   && GET_CODE (XEXP (tmp, 0)) == REG	   && GET_CODE (XEXP (tmp, 1)) == CONST_INT)    {      HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));      int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));      if (a > dst_align)	{          if (a >= 8 && c % 8 == 0)	    dst_align = 8;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -