⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 arm.c

📁 linux下的gcc编译器
💻 C
📖 第 1 页 / 共 5 页
字号:
    }  switch (code)    {    case SET:      /* See if we can do this by sign_extending a constant that is known	 to be negative.  This is a good, way of doing it, since the shift	 may well merge into a subsequent insn.  */      if (set_sign_bit_copies > 1)	{	  if (const_ok_for_arm	      (temp1 = ARM_SIGN_EXTEND (remainder 					<< (set_sign_bit_copies - 1))))	    {	      if (generate)		{		  rtx new_src = subtargets ? gen_reg_rtx (mode) : target;		  emit_insn (gen_rtx_SET (VOIDmode, new_src, 					  GEN_INT (temp1)));		  emit_insn (gen_ashrsi3 (target, new_src, 					  GEN_INT (set_sign_bit_copies - 1)));		}	      return 2;	    }	  /* For an inverted constant, we will need to set the low bits,	     these will be shifted out of harm's way.  */	  temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;	  if (const_ok_for_arm (~temp1))	    {	      if (generate)		{		  rtx new_src = subtargets ? gen_reg_rtx (mode) : target;		  emit_insn (gen_rtx_SET (VOIDmode, new_src,					  GEN_INT (temp1)));		  emit_insn (gen_ashrsi3 (target, new_src, 					  GEN_INT (set_sign_bit_copies - 1)));		}	      return 2;	    }	}      /* See if we can generate this by setting the bottom (or the top)	 16 bits, and then shifting these into the other half of the	 word.  We only look for the simplest cases, to do more would cost	 too much.  Be careful, however, not to generate this when the	 alternative would take fewer insns.  */      if (val & 0xffff0000)	{	  temp1 = remainder & 0xffff0000;	  temp2 = remainder & 0x0000ffff;	  /* Overlaps outside this range are best done using other methods.  */	  for (i = 9; i < 24; i++)	    {	      if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)		  && !const_ok_for_arm (temp2))		{		  rtx new_src = (subtargets				 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)				 : target);		  insns = arm_gen_constant (code, mode, temp2, new_src,					    source, subtargets, generate);		  source = new_src;		  if (generate)		    emit_insn (gen_rtx_SET			       (VOIDmode, target,				gen_rtx_IOR (mode,					     gen_rtx_ASHIFT (mode, source,							     GEN_INT (i)),					     source)));		  return insns + 1;		}	    }	  /* Don't duplicate cases already considered.  */	  for (i = 17; i < 24; i++)	    {	      if (((temp1 | (temp1 >> i)) == remainder)		  && !const_ok_for_arm (temp1))		{		  rtx new_src = (subtargets				 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)				 : target);		  insns = arm_gen_constant (code, mode, temp1, new_src,					    source, subtargets, generate);		  source = new_src;		  if (generate)		    emit_insn		      (gen_rtx_SET (VOIDmode, target,				    gen_rtx_IOR				    (mode,				     gen_rtx_LSHIFTRT (mode, source,						       GEN_INT (i)),				     source)));		  return insns + 1;		}	    }	}      break;    case IOR:    case XOR:      /* If we have IOR or XOR, and the constant can be loaded in a	 single instruction, and we can find a temporary to put it in,	 then this can be done in two instructions instead of 3-4.  */      if (subtargets	  /* TARGET can't be NULL if SUBTARGETS is 0 */	  || (reload_completed && !reg_mentioned_p (target, source)))	{	  if (const_ok_for_arm (ARM_SIGN_EXTEND (~val)))	    {	      if (generate)		{		  rtx sub = subtargets ? gen_reg_rtx (mode) : target;		  emit_insn (gen_rtx_SET (VOIDmode, sub, GEN_INT (val)));		  emit_insn (gen_rtx_SET (VOIDmode, target, 					  gen_rtx (code, mode, source, sub)));		}	      return 2;	    }	}      if (code == XOR)	break;      if (set_sign_bit_copies > 8	  && (val & (-1 << (32 - set_sign_bit_copies))) == val)	{	  if (generate)	    {	      rtx sub = subtargets ? gen_reg_rtx (mode) : target;	      rtx shift = GEN_INT (set_sign_bit_copies);	      emit_insn (gen_rtx_SET (VOIDmode, sub,				      gen_rtx_NOT (mode, 						   gen_rtx_ASHIFT (mode,								   source, 								   shift))));	      emit_insn (gen_rtx_SET (VOIDmode, target,				      gen_rtx_NOT (mode,						   gen_rtx_LSHIFTRT (mode, sub,								     shift))));	    }	  return 2;	}      if (set_zero_bit_copies > 8	  && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)	{	  if (generate)	    {	      rtx sub = subtargets ? gen_reg_rtx (mode) : target;	      rtx shift = GEN_INT (set_zero_bit_copies);	      emit_insn (gen_rtx_SET (VOIDmode, sub,				      gen_rtx_NOT (mode,						   gen_rtx_LSHIFTRT (mode,								     source,								     shift))));	      emit_insn (gen_rtx_SET (VOIDmode, target,				      gen_rtx_NOT (mode,						   gen_rtx_ASHIFT (mode, sub,								   shift))));	    }	  return 2;	}      if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~val)))	{	  if (generate)	    {	      rtx sub = subtargets ? gen_reg_rtx (mode) : target;	      emit_insn (gen_rtx_SET (VOIDmode, sub,				      gen_rtx_NOT (mode, source)));	      source = sub;	      if (subtargets)		sub = gen_reg_rtx (mode);	      emit_insn (gen_rtx_SET (VOIDmode, sub,				      gen_rtx_AND (mode, source, 						   GEN_INT (temp1))));	      emit_insn (gen_rtx_SET (VOIDmode, target,				      gen_rtx_NOT (mode, sub)));	    }	  return 3;	}      break;    case AND:      /* See if two shifts will do 2 or more insn's worth of work.  */      if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)	{	  HOST_WIDE_INT shift_mask = ((0xffffffff				       << (32 - clear_sign_bit_copies))				      & 0xffffffff);	  if ((remainder | shift_mask) != 0xffffffff)	    {	      if (generate)		{		  rtx new_src = subtargets ? gen_reg_rtx (mode) : target;		  insns = arm_gen_constant (AND, mode, remainder | shift_mask,					    new_src, source, subtargets, 1);		  source = new_src;		}	      else		{		  rtx targ = subtargets ? NULL_RTX : target;		  insns = arm_gen_constant (AND, mode, remainder | shift_mask,					    targ, source, subtargets, 0);		}	    }	  if (generate)	    {	      rtx new_src = subtargets ? gen_reg_rtx (mode) : target;	      rtx shift = GEN_INT (clear_sign_bit_copies);	      emit_insn (gen_ashlsi3 (new_src, source, shift));	      emit_insn (gen_lshrsi3 (target, new_src, shift));	    }	  return insns + 2;	}      if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)	{	  HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;	  	  if ((remainder | shift_mask) != 0xffffffff)	    {	      if (generate)		{		  rtx new_src = subtargets ? gen_reg_rtx (mode) : target;		  insns = arm_gen_constant (AND, mode, remainder | shift_mask,					    new_src, source, subtargets, 1);		  source = new_src;		}	      else		{		  rtx targ = subtargets ? NULL_RTX : target;		  insns = arm_gen_constant (AND, mode, remainder | shift_mask,					    targ, source, subtargets, 0);		}	    }	  if (generate)	    {	      rtx new_src = subtargets ? gen_reg_rtx (mode) : target;	      rtx shift = GEN_INT (clear_zero_bit_copies);	      emit_insn (gen_lshrsi3 (new_src, source, shift));	      emit_insn (gen_ashlsi3 (target, new_src, shift));	    }	  return insns + 2;	}      break;    default:      break;    }  for (i = 0; i < 32; i++)    if (remainder & (1 << i))      num_bits_set++;  if (code == AND || (can_invert && num_bits_set > 16))    remainder = (~remainder) & 0xffffffff;  else if (code == PLUS && num_bits_set > 16)    remainder = (-remainder) & 0xffffffff;  else    {      can_invert = 0;      can_negate = 0;    }  /* Now try and find a way of doing the job in either two or three     instructions.     We start by looking for the largest block of zeros that are aligned on     a 2-bit boundary, we then fill up the temps, wrapping around to the     top of the word when we drop off the bottom.     In the worst case this code should produce no more than four insns.  */  {    int best_start = 0;    int best_consecutive_zeros = 0;    for (i = 0; i < 32; i += 2)      {	int consecutive_zeros = 0;	if (!(remainder & (3 << i)))	  {	    while ((i < 32) && !(remainder & (3 << i)))	      {		consecutive_zeros += 2;		i += 2;	      }	    if (consecutive_zeros > best_consecutive_zeros)	      {		best_consecutive_zeros = consecutive_zeros;		best_start = i - consecutive_zeros;	      }	    i -= 2;	  }      }    /* So long as it won't require any more insns to do so, it's       desirable to emit a small constant (in bits 0...9) in the last       insn.  This way there is more chance that it can be combined with       a later addressing insn to form a pre-indexed load or store       operation.  Consider:	       *((volatile int *)0xe0000100) = 1;	       *((volatile int *)0xe0000110) = 2;       We want this to wind up as:		mov rA, #0xe0000000		mov rB, #1		str rB, [rA, #0x100]		mov rB, #2		str rB, [rA, #0x110]       rather than having to synthesize both large constants from scratch.       Therefore, we calculate how many insns would be required to emit       the constant starting from `best_start', and also starting from        zero (ie with bit 31 first to be output).  If `best_start' doesn't        yield a shorter sequence, we may as well use zero.  */    if (best_start != 0	&& ((((unsigned HOST_WIDE_INT) 1) << best_start) < remainder)	&& (count_insns_for_constant (remainder, 0) <= 	    count_insns_for_constant (remainder, best_start)))      best_start = 0;    /* Now start emitting the insns.  */    i = best_start;    do      {	int end;	if (i <= 0)	  i += 32;	if (remainder & (3 << (i - 2)))	  {	    end = i - 8;	    if (end < 0)	      end += 32;	    temp1 = remainder & ((0x0ff << end)				 | ((i < end) ? (0xff >> (32 - end)) : 0));	    remainder &= ~temp1;	    if (generate)	      {		rtx new_src, temp1_rtx;		if (code == SET || code == MINUS)		  {		    new_src = (subtargets ? gen_reg_rtx (mode) : target);		    if (can_invert && code != MINUS)		      temp1 = ~temp1;		  }		else		  {		    if (remainder && subtargets)		      new_src = gen_reg_rtx (mode);		    else		      new_src = target;		    if (can_invert)		      temp1 = ~temp1;		    else if (can_negate)		      temp1 = -temp1;		  }		temp1 = trunc_int_for_mode (temp1, mode);		temp1_rtx = GEN_INT (temp1);		if (code == SET)		  ;		else if (code == MINUS)		  temp1_rtx = gen_rtx_MINUS (mode, temp1_rtx, source);		else		  temp1_rtx = gen_rtx_fmt_ee (code, mode, source, temp1_rtx);		emit_insn (gen_rtx_SET (VOIDmode, new_src, temp1_rtx));		source = new_src;	      }	    if (code == SET)	      {		can_invert = 0;		code = PLUS;	      }	    else if (code == MINUS)	      code = PLUS;	    insns++;	    i -= 6;	  }	i -= 2;      }    while (remainder);  }  return insns;}/* Canonicalize a comparison so that we are more likely to recognize it.   This can be done for a few constant compares, where we can make the   immediate value easier to load.  */enum rtx_codearm_canonicalize_comparison (code, op1)     enum rtx_code code;     rtx * op1;{  unsigned HOST_WIDE_INT i = INTVAL (*op1);  switch (code)    {    case EQ:    case NE:      return code;    case GT:    case LE:      if (i != ((((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1)) - 1)	  && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))	{

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -