⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mcore.c

📁 Mac OS X 10.4.9 for x86 Source Code gcc 实现源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
    case IOR:      *total = COSTS_N_INSNS (mcore_ior_cost (x));      return true;    case DIV:    case UDIV:    case MOD:    case UMOD:    case FLOAT:    case FIX:      *total = COSTS_N_INSNS (100);      return true;      default:      return false;    }}/* Check to see if a comparison against a constant can be made more efficient   by incrementing/decrementing the constant to get one that is more efficient   to load.  */intmcore_modify_comparison (enum rtx_code code){  rtx op1 = arch_compare_op1;    if (GET_CODE (op1) == CONST_INT)    {      int val = INTVAL (op1);            switch (code)	{	case LE:	  if (CONST_OK_FOR_J (val + 1))	    {	      arch_compare_op1 = GEN_INT (val + 1);	      return 1;	    }	  break;	  	default:	  break;	}    }    return 0;}/* Prepare the operands for a comparison.  */rtxmcore_gen_compare_reg (enum rtx_code code){  rtx op0 = arch_compare_op0;  rtx op1 = arch_compare_op1;  rtx cc_reg = gen_rtx_REG (CCmode, CC_REG);  if (CONSTANT_P (op1) && GET_CODE (op1) != CONST_INT)    op1 = force_reg (SImode, op1);  /* cmpnei: 0-31 (K immediate)     cmplti: 1-32 (J immediate, 0 using btsti x,31).  */  switch (code)    {    case EQ:	/* Use inverted condition, cmpne.  */      code = NE;      /* Drop through.  */          case NE:	/* Use normal condition, cmpne.  */      if (GET_CODE (op1) == CONST_INT && ! CONST_OK_FOR_K (INTVAL (op1)))	op1 = force_reg (SImode, op1);      break;    case LE:	/* Use inverted condition, reversed cmplt.  */      code = GT;      /* Drop through.  */          case GT:	/* Use normal condition, reversed cmplt.  */      if (GET_CODE (op1) == CONST_INT)	op1 = force_reg (SImode, op1);      break;    case GE:	/* Use inverted condition, cmplt.  */      code = LT;      /* Drop through.  */          case LT:	/* Use normal condition, cmplt.  */      if (GET_CODE (op1) == CONST_INT && 	  /* covered by btsti x,31.  */	  INTVAL (op1) != 0 &&	  ! CONST_OK_FOR_J (INTVAL (op1)))	op1 = force_reg (SImode, op1);      break;    case GTU:	/* Use inverted condition, cmple.  */      if (GET_CODE (op1) == CONST_INT && INTVAL (op1) == 0)	{	  /* Unsigned > 0 is the same as != 0, but we need	     to invert the condition, so we want to set	     code = EQ.  This cannot be done however, as the	     mcore does not support such a test.  Instead we	     cope with this case in the "bgtu" pattern itself	     so we should never reach this point.  */	  /* code = EQ; */	  abort ();	  break;	}      code = LEU;      /* Drop through.  */          case LEU:	/* Use normal condition, reversed cmphs.  */      if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)	op1 = force_reg (SImode, op1);      break;    case LTU:	/* Use inverted condition, cmphs.  */      code = GEU;      /* Drop through.  */          case GEU:	/* Use normal condition, cmphs.  */      if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)	op1 = force_reg (SImode, op1);      break;    default:      break;    }  emit_insn (gen_rtx_SET (VOIDmode, cc_reg, gen_rtx_fmt_ee (code, CCmode, op0, op1)));    return cc_reg;}intmcore_symbolic_address_p (rtx x){  switch (GET_CODE (x))    {    case SYMBOL_REF:    case LABEL_REF:      return 1;    case CONST:      x = XEXP (x, 0);      return (   (GET_CODE (XEXP (x, 0)) == SYMBOL_REF	       || GET_CODE (XEXP (x, 0)) == LABEL_REF)	      && GET_CODE (XEXP (x, 1)) == CONST_INT);    default:      return 0;    }}intmcore_call_address_operand (rtx x, enum machine_mode mode){  return register_operand (x, mode) || CONSTANT_P (x);}/* Functions to output assembly code for a function call.  */char *mcore_output_call (rtx operands[], int index){  static char buffer[20];  rtx addr = operands [index];    if (REG_P (addr))    {      if (TARGET_CG_DATA)	{	  if (mcore_current_function_name == 0)	    abort ();	  	  ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,			      "unknown", 1);	}      sprintf (buffer, "jsr\t%%%d", index);    }  else    {      if (TARGET_CG_DATA)	{	  if (mcore_current_function_name == 0)	    abort ();	  	  if (GET_CODE (addr) != SYMBOL_REF)	    abort ();	  	  ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name, XSTR (addr, 0), 0);	}            sprintf (buffer, "jbsr\t%%%d", index);    }  return buffer;}/* Can we load a constant with a single instruction ?  */static intconst_ok_for_mcore (int value){  if (value >= 0 && value <= 127)    return 1;    /* Try exact power of two.  */  if ((value & (value - 1)) == 0)    return 1;    /* Try exact power of two - 1.  */  if ((value & (value + 1)) == 0)    return 1;    return 0;}/* Can we load a constant inline with up to 2 instructions ?  */intmcore_const_ok_for_inline (long value){  int x, y;     return try_constant_tricks (value, & x, & y) > 0;}/* Are we loading the constant using a not ?  */intmcore_const_trick_uses_not (long value){  int x, y;  return try_constant_tricks (value, & x, & y) == 2; }       /* Try tricks to load a constant inline and return the trick number if   success (0 is non-inlinable).     0: not inlinable   1: single instruction (do the usual thing)   2: single insn followed by a 'not'   3: single insn followed by a subi   4: single insn followed by an addi   5: single insn followed by rsubi   6: single insn followed by bseti   7: single insn followed by bclri   8: single insn followed by rotli   9: single insn followed by lsli   10: single insn followed by ixh   11: single insn followed by ixw.  */static inttry_constant_tricks (long value, int * x, int * y){  int i;  unsigned bit, shf, rot;  if (const_ok_for_mcore (value))    return 1;	/* Do the usual thing.  */    if (TARGET_HARDLIT)     {      if (const_ok_for_mcore (~value))	{	  *x = ~value;	  return 2;	}            for (i = 1; i <= 32; i++)	{	  if (const_ok_for_mcore (value - i))	    {	      *x = value - i;	      *y = i;	      	      return 3;	    }	  	  if (const_ok_for_mcore (value + i))	    {	      *x = value + i;	      *y = i;	      	      return 4;	    }	}            bit = 0x80000000L;            for (i = 0; i <= 31; i++)	{	  if (const_ok_for_mcore (i - value))	    {	      *x = i - value;	      *y = i;	      	      return 5;	    }	  	  if (const_ok_for_mcore (value & ~bit))	    {	      *y = bit;	      *x = value & ~bit;	      	      return 6;	    }	  	  if (const_ok_for_mcore (value | bit))	    {	      *y = ~bit;	      *x = value | bit;	      	      return 7;	    }	  	  bit >>= 1;	}            shf = value;      rot = value;            for (i = 1; i < 31; i++)	{	  int c;	  	  /* MCore has rotate left.  */	  c = rot << 31;	  rot >>= 1;	  rot &= 0x7FFFFFFF;	  rot |= c;   /* Simulate rotate.  */	  	  if (const_ok_for_mcore (rot))	    {	      *y = i;	      *x = rot;	      	      return 8;	    }	  	  if (shf & 1)	    shf = 0;	/* Can't use logical shift, low order bit is one.  */	  	  shf >>= 1;	  	  if (shf != 0 && const_ok_for_mcore (shf))	    {	      *y = i;	      *x = shf;	      	      return 9;	    }	}            if ((value % 3) == 0 && const_ok_for_mcore (value / 3))	{	  *x = value / 3;	  	  return 10;	}            if ((value % 5) == 0 && const_ok_for_mcore (value / 5))	{	  *x = value / 5;	  	  return 11;	}    }    return 0;}/* Check whether reg is dead at first.  This is done by searching ahead   for either the next use (i.e., reg is live), a death note, or a set of   reg.  Don't just use dead_or_set_p() since reload does not always mark    deaths (especially if PRESERVE_DEATH_NOTES_REGNO_P is not defined). We   can ignore subregs by extracting the actual register.  BRC  */intmcore_is_dead (rtx first, rtx reg){  rtx insn;  /* For mcore, subregs can't live independently of their parent regs.  */  if (GET_CODE (reg) == SUBREG)    reg = SUBREG_REG (reg);  /* Dies immediately.  */  if (dead_or_set_p (first, reg))    return 1;  /* Look for conclusive evidence of live/death, otherwise we have     to assume that it is live.  */  for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))    {      if (GET_CODE (insn) == JUMP_INSN)	return 0;	/* We lose track, assume it is alive.  */      else if (GET_CODE(insn) == CALL_INSN)	{	  /* Call's might use it for target or register parms.  */	  if (reg_referenced_p (reg, PATTERN (insn))	      || find_reg_fusage (insn, USE, reg))	    return 0;	  else if (dead_or_set_p (insn, reg))            return 1;	}      else if (GET_CODE (insn) == INSN)	{	  if (reg_referenced_p (reg, PATTERN (insn)))            return 0;	  else if (dead_or_set_p (insn, reg))            return 1;	}    }  /* No conclusive evidence either way, we cannot take the chance     that control flow hid the use from us -- "I'm not dead yet".  */  return 0;}/* Count the number of ones in mask.  */intmcore_num_ones (int mask){  /* A trick to count set bits recently posted on comp.compilers.  */  mask =  (mask >> 1  & 0x55555555) + (mask & 0x55555555);  mask = ((mask >> 2) & 0x33333333) + (mask & 0x33333333);  mask = ((mask >> 4) + mask) & 0x0f0f0f0f;  mask = ((mask >> 8) + mask);  return (mask + (mask >> 16)) & 0xff;}/* Count the number of zeros in mask.  */intmcore_num_zeros (int mask){  return 32 - mcore_num_ones (mask);}/* Determine byte being masked.  */intmcore_byte_offset (unsigned int mask){  if (mask == 0x00ffffffL)    return 0;  else if (mask == 0xff00ffffL)    return 1;  else if (mask == 0xffff00ffL)    return 2;  else if (mask == 0xffffff00L)    return 3;  return -1;}/* Determine halfword being masked.  */intmcore_halfword_offset (unsigned int mask){  if (mask == 0x0000ffffL)    return 0;  else if (mask == 0xffff0000L)    return 1;  return -1;}/* Output a series of bseti's corresponding to mask.  */const char *mcore_output_bseti (rtx dst, int mask){  rtx out_operands[2];  int bit;  out_operands[0] = dst;  for (bit = 0; bit < 32; bit++)    {      if ((mask & 0x1) == 0x1)	{	  out_operands[1] = GEN_INT (bit);	  	  output_asm_insn ("bseti\t%0,%1", out_operands);	}      mask >>= 1;    }    return "";}/* Output a series of bclri's corresponding to mask.  */const char *mcore_output_bclri (rtx dst, int mask)

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -