⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mips.c

📁 GCC编译器源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
  int eqne_p;  int invert;  rtx reg;  rtx reg2;  test = map_test_to_internal_test (test_code);  if (test == ITEST_MAX)    abort ();  p_info = &info[ (int)test ];  eqne_p = (p_info->test_code == XOR);  mode = GET_MODE (cmp0);  if (mode == VOIDmode)    mode = GET_MODE (cmp1);  /* Eliminate simple branches */  branch_p = (result == (rtx)0);  if (branch_p)    {      if (GET_CODE (cmp0) == REG || GET_CODE (cmp0) == SUBREG)	{	  /* Comparisons against zero are simple branches */	  if (GET_CODE (cmp1) == CONST_INT && INTVAL (cmp1) == 0)	    return (rtx)0;	  /* Test for beq/bne.  */	  if (eqne_p)	    return (rtx)0;	}      /* allocate a pseudo to calculate the value in.  */      result = gen_reg_rtx (mode);    }  /* Make sure we can handle any constants given to us.  */  if (GET_CODE (cmp0) == CONST_INT)    cmp0 = force_reg (mode, cmp0);  if (GET_CODE (cmp1) == CONST_INT)    {      HOST_WIDE_INT value = INTVAL (cmp1);      if (value < p_info->const_low	  || value > p_info->const_high	  /* ??? Why?  And why wasn't the similar code below modified too?  */	  || (TARGET_64BIT	      && HOST_BITS_PER_WIDE_INT < 64	      && p_info->const_add != 0	      && ((p_info->unsignedp		   ? ((unsigned HOST_WIDE_INT) (value + p_info->const_add)		      > INTVAL (cmp1))		   : (value + p_info->const_add) > INTVAL (cmp1))		  != (p_info->const_add > 0))))	cmp1 = force_reg (mode, cmp1);    }  /* See if we need to invert the result.  */  invert = (GET_CODE (cmp1) == CONST_INT)		? p_info->invert_const		: p_info->invert_reg;  if (p_invert != (int *)0)    {      *p_invert = invert;      invert = FALSE;    }  /* Comparison to constants, may involve adding 1 to change a LT into LE.     Comparison between two registers, may involve switching operands.  */  if (GET_CODE (cmp1) == CONST_INT)    {      if (p_info->const_add != 0)	{	  HOST_WIDE_INT new = INTVAL (cmp1) + p_info->const_add;	  /* If modification of cmp1 caused overflow,	     we would get the wrong answer if we follow the usual path;	     thus, x > 0xffffffffU would turn into x > 0U.  */	  if ((p_info->unsignedp	       ? (unsigned HOST_WIDE_INT) new > INTVAL (cmp1)	       : new > INTVAL (cmp1))	      != (p_info->const_add > 0))	    {	      /* This test is always true, but if INVERT is true then		 the result of the test needs to be inverted so 0 should		 be returned instead.  */	      emit_move_insn (result, invert ? const0_rtx : const_true_rtx);	      return result;	    }	  else	    cmp1 = GEN_INT (new);	}    }  else if (p_info->reverse_regs)    {      rtx temp = cmp0;      cmp0 = cmp1;      cmp1 = temp;    }  if (test == ITEST_NE && GET_CODE (cmp1) == CONST_INT && INTVAL (cmp1) == 0)    reg = cmp0;  else    {      reg = (invert || eqne_p) ? gen_reg_rtx (mode) : result;      convert_move (reg, gen_rtx (p_info->test_code, mode, cmp0, cmp1), 0);    }  if (test == ITEST_NE)    {      convert_move (result, gen_rtx (GTU, mode, reg, const0_rtx), 0);      invert = FALSE;    }  else if (test == ITEST_EQ)    {      reg2 = (invert) ? gen_reg_rtx (mode) : result;      convert_move (reg2, gen_rtx (LTU, mode, reg, const1_rtx), 0);      reg = reg2;    }  if (invert)    convert_move (result, gen_rtx (XOR, mode, reg, const1_rtx), 0);  return result;}/* Emit the common code for doing conditional branches.   operand[0] is the label to jump to.   The comparison operands are saved away by cmp{si,di,sf,df}.  */voidgen_conditional_branch (operands, test_code)     rtx operands[];     enum rtx_code test_code;{  enum cmp_type type = branch_type;  rtx cmp0 = branch_cmp[0];  rtx cmp1 = branch_cmp[1];  enum machine_mode mode;  rtx reg;  int invert;  rtx label1, label2;  switch (type)    {    default:      abort_with_insn (gen_rtx (test_code, VOIDmode, cmp0, cmp1), "bad test");    case CMP_SI:    case CMP_DI:      mode = type == CMP_SI ? SImode : DImode;      invert = FALSE;      reg = gen_int_relational (test_code, NULL_RTX, cmp0, cmp1, &invert);      if (reg)	{	  cmp0 = reg;	  cmp1 = const0_rtx;	  test_code = NE;	}      else if (GET_CODE (cmp1) == CONST_INT && INTVAL (cmp1) != 0)	{	  /* We don't want to build a comparison against a non-zero             constant.  */	  cmp1 = force_reg (mode, cmp1);	}      break;    case CMP_SF:    case CMP_DF:      if (mips_isa < 4)	reg = gen_rtx (REG, CCmode, FPSW_REGNUM);      else	reg = gen_reg_rtx (CCmode);      /* For cmp0 != cmp1, build cmp0 == cmp1, and test for result ==         0 in the instruction built below.  The MIPS FPU handles         inequality testing by testing for equality and looking for a         false result.  */      emit_insn (gen_rtx (SET, VOIDmode,			  reg,			  gen_rtx (test_code == NE ? EQ : test_code,				   CCmode, cmp0, cmp1)));      test_code = test_code == NE ? EQ : NE;      mode = CCmode;      cmp0 = reg;      cmp1 = const0_rtx;      invert = FALSE;      break;    }  /* Generate the branch.  */  label1 = gen_rtx (LABEL_REF, VOIDmode, operands[0]);  label2 = pc_rtx;  if (invert)    {      label2 = label1;      label1 = pc_rtx;    }  emit_jump_insn (gen_rtx (SET, VOIDmode,			   pc_rtx,			   gen_rtx (IF_THEN_ELSE, VOIDmode,				    gen_rtx (test_code, mode, cmp0, cmp1),				    label1,				    label2)));}/* Emit the common code for conditional moves.  OPERANDS is the array   of operands passed to the conditional move defined_expand.  */voidgen_conditional_move (operands)     rtx *operands;{  rtx op0 = branch_cmp[0];  rtx op1 = branch_cmp[1];  enum machine_mode mode = GET_MODE (branch_cmp[0]);  enum rtx_code cmp_code = GET_CODE (operands[1]);  enum rtx_code move_code = NE;  enum machine_mode op_mode = GET_MODE (operands[0]);  enum machine_mode cmp_mode;  rtx cmp_reg;  if (GET_MODE_CLASS (mode) != MODE_FLOAT)    {      switch (cmp_code)	{	case EQ:	  cmp_code = XOR;	  move_code = EQ;	  break;	case NE:	  cmp_code = XOR;	  break;	case LT:	  break;	case GE:	  cmp_code = LT;	  move_code = EQ;	  break;	case GT:	  cmp_code = LT;	  op0 = force_reg (mode, branch_cmp[1]);	  op1 = branch_cmp[0];	  break;	case LE:	  cmp_code = LT;	  op0 = force_reg (mode, branch_cmp[1]);	  op1 = branch_cmp[0];	  move_code = EQ;	  break;	case LTU:	  break;	case GEU:	  cmp_code = LTU;	  move_code = EQ;	  break;	case GTU:	  cmp_code = LTU;	  op0 = force_reg (mode, branch_cmp[1]);	  op1 = branch_cmp[0];	  break;	case LEU:	  cmp_code = LTU;	  op0 = force_reg (mode, branch_cmp[1]);	  op1 = branch_cmp[0];	  move_code = EQ;	  break;	default:	  abort ();	}    }  else    {      if (cmp_code == NE)	{	  cmp_code = EQ;	  move_code = EQ;	}    }	    if (mode == SImode || mode == DImode)    cmp_mode = mode;  else if (mode == SFmode || mode == DFmode)    cmp_mode = CCmode;  else    abort ();  cmp_reg = gen_reg_rtx (cmp_mode);  emit_insn (gen_rtx (SET, cmp_mode,		      cmp_reg,		      gen_rtx (cmp_code, cmp_mode, op0, op1)));  emit_insn (gen_rtx (SET, op_mode,		      operands[0],		      gen_rtx (IF_THEN_ELSE, op_mode,			       gen_rtx (move_code, VOIDmode,					cmp_reg,					CONST0_RTX (SImode)),			       operands[2],			       operands[3])));}/* Write a loop to move a constant number of bytes.  Generate load/stores as follows:   do {     temp1 = src[0];     temp2 = src[1];     ...     temp<last> = src[MAX_MOVE_REGS-1];     dest[0] = temp1;     dest[1] = temp2;     ...     dest[MAX_MOVE_REGS-1] = temp<last>;     src += MAX_MOVE_REGS;     dest += MAX_MOVE_REGS;   } while (src != final);   This way, no NOP's are needed, and only MAX_MOVE_REGS+3 temp   registers are needed.   Aligned moves move MAX_MOVE_REGS*4 bytes every (2*MAX_MOVE_REGS)+3   cycles, unaligned moves move MAX_MOVE_REGS*4 bytes every   (4*MAX_MOVE_REGS)+3 cycles, assuming no cache misses.  */#define MAX_MOVE_REGS 4#define MAX_MOVE_BYTES (MAX_MOVE_REGS * UNITS_PER_WORD)static voidblock_move_loop (dest_reg, src_reg, bytes, align, orig_dest, orig_src)     rtx dest_reg;		/* register holding destination address */     rtx src_reg;		/* register holding source address */     int bytes;			/* # bytes to move */     int align;			/* alignment */     rtx orig_dest;		/* original dest for change_address */     rtx orig_src;		/* original source for making a reg note */{  rtx dest_mem		= change_address (orig_dest, BLKmode, dest_reg);  rtx src_mem		= change_address (orig_src, BLKmode, src_reg);  rtx align_rtx		= GEN_INT (align);  rtx label;  rtx final_src;  rtx bytes_rtx;  int leftover;  if (bytes < 2*MAX_MOVE_BYTES)    abort ();  leftover = bytes % MAX_MOVE_BYTES;  bytes -= leftover;  label = gen_label_rtx ();  final_src = gen_reg_rtx (Pmode);  bytes_rtx = GEN_INT (bytes);  if (bytes > 0x7fff)    {      if (TARGET_LONG64)	{	  emit_insn (gen_movdi (final_src, bytes_rtx));	  emit_insn (gen_adddi3 (final_src, final_src, src_reg));	}      else	{	  emit_insn (gen_movsi (final_src, bytes_rtx));	  emit_insn (gen_addsi3 (final_src, final_src, src_reg));	}    }  else    {      if (TARGET_LONG64)	emit_insn (gen_adddi3 (final_src, src_reg, bytes_rtx));      else	emit_insn (gen_addsi3 (final_src, src_reg, bytes_rtx));    }  emit_label (label);  bytes_rtx = GEN_INT (MAX_MOVE_BYTES);  emit_insn (gen_movstrsi_internal (dest_mem, src_mem, bytes_rtx, align_rtx));  if (TARGET_LONG64)    {      emit_insn (gen_adddi3 (src_reg, src_reg, bytes_rtx));      emit_insn (gen_adddi3 (dest_reg, dest_reg, bytes_rtx));      emit_insn (gen_cmpdi (src_reg, final_src));    }  else    {      emit_insn (gen_addsi3 (src_reg, src_reg, bytes_rtx));      emit_insn (gen_addsi3 (dest_reg, dest_reg, bytes_rtx));      emit_insn (gen_cmpsi (src_reg, final_src));    }  emit_jump_insn (gen_bne (label));  if (leftover)    emit_insn (gen_movstrsi_internal (dest_mem, src_mem,				      GEN_INT (leftover),				      align_rtx));}/* Use a library function to move some bytes.  */static voidblock_move_call (dest_reg, src_reg, bytes_rtx)     rtx dest_reg;     rtx src_reg;     rtx bytes_rtx;{  /* We want to pass the size as Pmode, which will normally be SImode     but will be DImode if we are using 64 bit longs and pointers.  */  if (GET_MODE (bytes_rtx) != VOIDmode      && GET_MODE (bytes_rtx) != Pmode)    bytes_rtx = convert_to_mode (Pmode, bytes_rtx, TRUE);#ifdef TARGET_MEM_FUNCTIONS  emit_library_call (gen_rtx (SYMBOL_REF, Pmode, "memcpy"), 0,		     VOIDmode, 3,		     dest_reg, Pmode,		     src_reg, Pmode,		     convert_to_mode (TYPE_MODE (sizetype), bytes_rtx,				      TREE_UNSIGNED (sizetype)),		     TYPE_MODE (sizetype));#else  emit_library_call (gen_rtx (SYMBOL_REF, Pmode, "bcopy"), 0,		     VOIDmode, 3,		     src_reg, Pmode,		     dest_reg, Pmode,		     convert_to_mode (TYPE_MODE (integer_type_node),				       bytes_rtx,				       TREE_UNSIGNED (integer_type_node)),		     TYPE_MODE (integer_type_node));#endif}/* Expand string/block move operations.   operands[0] is the pointer to the destination.   operands[1] is the pointer to the source.   operands[2] is the number of bytes to move.   operands[3] is the alignment.  */voidexpand_block_move (operands)     rtx operands[];{  rtx bytes_rtx	= operands[2];  rtx align_rtx = operands[3];  int constp	= (GET_CODE (bytes_rtx) == CONST_INT);  int bytes	= (constp ? INTVAL (bytes_rtx) : 0);  int align	= INTVAL (align_rtx);  rtx orig_src	= operands[1];  rtx orig_dest	= operands[0];  rtx src_reg;  rtx dest_reg;  if (constp && bytes <= 0)    return;  if (align > UNITS_PER_WORD)    align = UNITS_PER_WORD;  /* Move the address into scratch registers.  */  dest_reg = copy_addr_to_reg (XEXP (orig_dest, 0));  src_reg  = copy_addr_to_reg (XEXP (orig_src, 0));  if (TARGET_MEMCPY)    block_move_call (dest_reg, src_reg, bytes_rtx);  else if (constp && bytes <= 2*MAX_MOVE_BYTES)    emit_insn (gen_movstrsi_internal (change_address (orig_dest, BLKmode,						      dest_reg),				      change_address (orig_src, BLKmode,						      src_reg),				      bytes_rtx, align_rtx));  else if (constp && align >= UNITS_PER_WORD && optimize)    block_move_loop (dest_reg, src_reg, bytes, align, orig_dest, orig_src);  else if (constp && optimize)    {      /* If the alignment is not word aligned, generate a test at	 runtime, to see whether things wound up aligned, and we	 can use the faster lw/sw instead ulw/usw.  */      rtx temp		= gen_reg_rtx (Pmode);      rtx aligned_label = gen_label_rtx ();      rtx join_label	= gen_label_rtx ();      int leftover	= bytes % MAX_MOVE_BYTES;      bytes -= leftover;      if (TARGET_LONG64)	{	  emit_insn (gen_iordi3 (temp, src_reg, dest_reg));	  emit_insn (gen_anddi3 (temp, temp, GEN_INT (UNITS_PER_WORD-1)));	  emit_insn (gen_cmpdi (temp, const0_rtx));	}      else	{	  emit_insn (gen_iorsi3 (temp, src_reg, dest_reg));	  emit_insn (gen_andsi3 (temp, temp, GEN_INT (UNITS_PER_WORD-1)));	  emit_insn (gen_cmpsi (temp, const0_rtx));	}      emit_jump_insn (gen_beq (aligned_label));      /* Unaligned loop.  */      block_move_loop (dest_reg, src_reg, bytes, 1, orig_dest, orig_src);      emit_jump_insn (gen_jump (join_label));      emit_barrier ();      /* Aligned loop.  */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -