⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 i960.c

📁 GUN开源阻止下的编译器GCC
💻 C
📖 第 1 页 / 共 5 页
字号:
  if (GET_CODE (x) == PLUS)    {      rtx base = XEXP (x, 0);      rtx offset = XEXP (x, 1);      if (GET_CODE (base) == SUBREG)	base = SUBREG_REG (base);      if (GET_CODE (offset) == SUBREG)	offset = SUBREG_REG (offset);      if (GET_CODE (base) == REG)	{	  if (GET_CODE (offset) == REG)	    return 2;	  if (GET_CODE (offset) == CONST_INT)	    {	      if ((unsigned)INTVAL (offset) < 2047)		return 2;	      return 4;	    }	  if (CONSTANT_P (offset))	    return 4;	}      if (GET_CODE (base) == PLUS || GET_CODE (base) == MULT)	return 6;      /* This is an invalid address.  The return value doesn't matter, but	 for convenience we make this more expensive than anything else.  */      return 12;    }  if (GET_CODE (x) == MULT)    return 6;  /* Symbol_refs and other unrecognized addresses are cost 4.  */  return 4;}/* Emit insns to move operands[1] into operands[0].   Return 1 if we have written out everything that needs to be done to   do the move.  Otherwise, return 0 and the caller will emit the move   normally.  */intemit_move_sequence (operands, mode)     rtx *operands;     enum machine_mode mode;{  /* We can only store registers to memory.  */  if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) != REG)    operands[1] = force_reg (mode, operands[1]);  /* Storing multi-word values in unaligned hard registers to memory may     require a scratch since we have to store them a register at a time and     adding 4 to the memory address may not yield a valid insn.  */  /* ??? We don't always need the scratch, but that would complicate things.     Maybe later.  */  if (GET_MODE_SIZE (mode) > UNITS_PER_WORD      && GET_CODE (operands[0]) == MEM      && GET_CODE (operands[1]) == REG      && REGNO (operands[1]) < FIRST_PSEUDO_REGISTER      && ! HARD_REGNO_MODE_OK (REGNO (operands[1]), mode))    {      emit_insn (gen_rtx (PARALLEL, VOIDmode,			  gen_rtvec (2,				     gen_rtx (SET, VOIDmode,					      operands[0], operands[1]),				     gen_rtx (CLOBBER, VOIDmode,					      gen_rtx (SCRATCH, Pmode)))));      return 1;    }  return 0;}/* Output assembler to move a double word value.  */char *i960_output_move_double (dst, src)     rtx dst, src;{  rtx operands[5];  if (GET_CODE (dst) == REG      && GET_CODE (src) == REG)    {      if ((REGNO (src) & 1)	  || (REGNO (dst) & 1))	{	  /* We normally copy the low-numbered register first.  However, if	     the second source register is the same as the first destination	     register, we must copy in the opposite order.  */	  if (REGNO (src) + 1 == REGNO (dst))	    return "mov	%D1,%D0\n\tmov	%1,%0";	  else	    return "mov	%1,%0\n\tmov	%D1,%D0";	}      else	return "movl	%1,%0";    }  else if (GET_CODE (dst) == REG	   && GET_CODE (src) == CONST_INT	   && CONST_OK_FOR_LETTER_P (INTVAL (src), 'I'))    {      if (REGNO (dst) & 1)	return "mov	%1,%0\n\tmov	0,%D0";      else	return "movl	%1,%0";    }  else if (GET_CODE (dst) == REG	   && GET_CODE (src) == MEM)    {      if (REGNO (dst) & 1)	{	  /* One can optimize a few cases here, but you have to be	     careful of clobbering registers used in the address and	     edge conditions.  */	  operands[0] = dst;	  operands[1] = src;	  operands[2] = gen_rtx (REG, Pmode, REGNO (dst) + 1);	  operands[3] = gen_rtx (MEM, word_mode, operands[2]);	  operands[4] = adj_offsettable_operand (operands[3], UNITS_PER_WORD);	  output_asm_insn ("lda	%1,%2\n\tld	%3,%0\n\tld	%4,%D0", operands);	  return "";	}      else	return "ldl	%1,%0";    }  else if (GET_CODE (dst) == MEM	   && GET_CODE (src) == REG)    {      if (REGNO (src) & 1)	{	  /* This is handled by emit_move_sequence so we shouldn't get here.  */	  abort ();	}      return "stl	%1,%0";    }  else    abort ();}/* Output assembler to move a quad word value.  */char *i960_output_move_quad (dst, src)     rtx dst, src;{  rtx operands[7];  if (GET_CODE (dst) == REG      && GET_CODE (src) == REG)    {      if ((REGNO (src) & 3)	  || (REGNO (dst) & 3))	{	  /* We normally copy starting with the low numbered register.	     However, if there is an overlap such that the first dest reg	     is <= the last source reg but not < the first source reg, we	     must copy in the opposite order.  */	  if (REGNO (dst) <= REGNO (src) + 3	      && REGNO (dst) >= REGNO (src))	    return "mov	%F1,%F0\n\tmov	%E1,%E0\n\tmov	%D1,%D0\n\tmov	%1,%0";	  else	    return "mov	%1,%0\n\tmov	%D1,%D0\n\tmov	%E1,%E0\n\tmov	%F1,%F0";	}      else	return "movq	%1,%0";    }  else if (GET_CODE (dst) == REG	   && GET_CODE (src) == CONST_INT	   && CONST_OK_FOR_LETTER_P (INTVAL (src), 'I'))    {      if (REGNO (dst) & 3)	return "mov	%1,%0\n\tmov	0,%D0\n\tmov	0,%E0\n\tmov	0,%F0";      else	return "movq	%1,%0";    }  else if (GET_CODE (dst) == REG	   && GET_CODE (src) == MEM)    {      if (REGNO (dst) & 3)	{	  /* One can optimize a few cases here, but you have to be	     careful of clobbering registers used in the address and	     edge conditions.  */	  operands[0] = dst;	  operands[1] = src;	  operands[2] = gen_rtx (REG, Pmode, REGNO (dst) + 3);	  operands[3] = gen_rtx (MEM, word_mode, operands[2]);	  operands[4] = adj_offsettable_operand (operands[3], UNITS_PER_WORD);	  operands[5] = adj_offsettable_operand (operands[4], UNITS_PER_WORD);	  operands[6] = adj_offsettable_operand (operands[5], UNITS_PER_WORD);	  output_asm_insn ("lda	%1,%2\n\tld	%3,%0\n\tld	%4,%D0\n\tld	%5,%E0\n\tld	%6,%F0", operands);	  return "";	}      else	return "ldq	%1,%0";    }  else if (GET_CODE (dst) == MEM	   && GET_CODE (src) == REG)    {      if (REGNO (src) & 3)	{	  /* This is handled by emit_move_sequence so we shouldn't get here.  */	  abort ();	}      return "stq	%1,%0";    }  else    abort ();}/* Emit insns to load a constant to non-floating point registers.   Uses several strategies to try to use as few insns as possible.  */char *i960_output_ldconst (dst, src)     register rtx dst, src;{  register int rsrc1;  register unsigned rsrc2;  enum machine_mode mode = GET_MODE (dst);  rtx operands[4];  operands[0] = operands[2] = dst;  operands[1] = operands[3] = src;  /* Anything that isn't a compile time constant, such as a SYMBOL_REF,     must be a ldconst insn.  */  if (GET_CODE (src) != CONST_INT && GET_CODE (src) != CONST_DOUBLE)    {      output_asm_insn ("ldconst	%1,%0", operands);      return "";    }  else if (mode == XFmode)    {      REAL_VALUE_TYPE d;      long value_long[3];      int i;      if (fp_literal_zero (src, XFmode))	return "movt	0,%0";      REAL_VALUE_FROM_CONST_DOUBLE (d, src);      REAL_VALUE_TO_TARGET_LONG_DOUBLE (d, value_long);      output_asm_insn ("# ldconst	%1,%0",operands);      for (i = 0; i < 3; i++)	{	  operands[0] = gen_rtx (REG, SImode, REGNO (dst) + i);	  operands[1] = GEN_INT (value_long[i]);	  output_asm_insn (i960_output_ldconst (operands[0], operands[1]),			   operands);	}      return "";    }  else if (mode == DFmode)    {      rtx first, second;      if (fp_literal_zero (src, DFmode))	return "movl	0,%0";      split_double (src, &first, &second);      output_asm_insn ("# ldconst	%1,%0",operands);      operands[0] = gen_rtx (REG, SImode, REGNO (dst));      operands[1] = first;      output_asm_insn (i960_output_ldconst (operands[0], operands[1]),		      operands);      operands[0] = gen_rtx (REG, SImode, REGNO (dst) + 1);      operands[1] = second;      output_asm_insn (i960_output_ldconst (operands[0], operands[1]),		      operands);      return "";    }  else if (mode == SFmode)    {      REAL_VALUE_TYPE d;      long value;      REAL_VALUE_FROM_CONST_DOUBLE (d, src);      REAL_VALUE_TO_TARGET_SINGLE (d, value);      output_asm_insn ("# ldconst	%1,%0",operands);      operands[0] = gen_rtx (REG, SImode, REGNO (dst));      operands[1] = gen_rtx (CONST_INT, VOIDmode, value);      output_asm_insn (i960_output_ldconst (operands[0], operands[1]),		      operands);      return "";    }  else if (mode == TImode)    {      /* ??? This is currently not handled at all.  */      abort ();      /* Note: lowest order word goes in lowest numbered reg.  */      rsrc1 = INTVAL (src);      if (rsrc1 >= 0 && rsrc1 < 32)	return "movq	%1,%0";      else	output_asm_insn ("movq\t0,%0\t# ldconstq %1,%0",operands);      /* Go pick up the low-order word.  */    }  else if (mode == DImode)    {      rtx upperhalf, lowerhalf, xoperands[2];      if (GET_CODE (src) == CONST_DOUBLE || GET_CODE (src) == CONST_INT) 	split_double (src, &lowerhalf, &upperhalf);      else	abort ();      /* Note: lowest order word goes in lowest numbered reg.  */      /* Numbers from 0 to 31 can be handled with a single insn.  */      rsrc1 = INTVAL (lowerhalf);      if (upperhalf == const0_rtx && rsrc1 >= 0 && rsrc1 < 32)	return "movl	%1,%0";      /* Output the upper half with a recursive call.  */      xoperands[0] = gen_rtx (REG, SImode, REGNO (dst) + 1);      xoperands[1] = upperhalf;      output_asm_insn (i960_output_ldconst (xoperands[0], xoperands[1]),		       xoperands);      /* The lower word is emitted as normally.  */    }  else    {      rsrc1 = INTVAL (src);      if (mode == QImode)	{	  if (rsrc1 > 0xff)	    rsrc1 &= 0xff;	}      else if (mode == HImode)	{	  if (rsrc1 > 0xffff)	    rsrc1 &= 0xffff;	}    }  if (rsrc1 >= 0)    {      /* ldconst	0..31,X		-> 	mov	0..31,X  */      if (rsrc1 < 32)	{	  if (i960_last_insn_type == I_TYPE_REG && TARGET_C_SERIES)	    return "lda	%1,%0";	  return "mov	%1,%0";	}      /* ldconst	32..63,X	->	add	31,nn,X  */      if (rsrc1 < 63)	{	  if (i960_last_insn_type == I_TYPE_REG && TARGET_C_SERIES)	    return "lda	%1,%0";	  operands[1] = gen_rtx (CONST_INT, VOIDmode, rsrc1 - 31);	  output_asm_insn ("addo\t31,%1,%0\t# ldconst %3,%0", operands);	  return "";	}    }  else if (rsrc1 < 0)    {      /* ldconst	-1..-31		->	sub	0,0..31,X  */      if (rsrc1 >= -31)	{	  /* return 'sub -(%1),0,%0' */	  operands[1] = gen_rtx (CONST_INT, VOIDmode, - rsrc1);	  output_asm_insn ("subo\t%1,0,%0\t# ldconst %3,%0", operands);	  return "";	}            /* ldconst	-32		->	not	31,X  */      if (rsrc1 == -32)	{	  operands[1] = gen_rtx (CONST_INT, VOIDmode, ~rsrc1);	  output_asm_insn ("not\t%1,%0	# ldconst %3,%0", operands);	  return "";	}    }  /* If const is a single bit.  */  if (bitpos (rsrc1) >= 0)    {      operands[1] = gen_rtx (CONST_INT, VOIDmode, bitpos (rsrc1));      output_asm_insn ("setbit\t%1,0,%0\t# ldconst %3,%0", operands);      return "";    }  /* If const is a bit string of less than 6 bits (1..31 shifted).  */  if (is_mask (rsrc1))    {      int s, e;      if (bitstr (rsrc1, &s, &e) < 6)	{	  rsrc2 = ((unsigned int) rsrc1) >> s;	  operands[1] = gen_rtx (CONST_INT, VOIDmode, rsrc2);	  operands[2] = gen_rtx (CONST_INT, VOIDmode, s);	  output_asm_insn ("shlo\t%2,%1,%0\t# ldconst %3,%0", operands);	  return "";	}    }  /* Unimplemented cases:     const is in range 0..31 but rotated around end of word:     ror	31,3,g0	-> ldconst 0xe0000003,g0        and any 2 instruction cases that might be worthwhile  */    output_asm_insn ("ldconst	%1,%0", operands);  return "";}/* Determine if there is an opportunity for a bypass optimization.   Bypass succeeds on the 960K* if the destination of the previous   instruction is the second operand of the current instruction.   Bypass always succeeds on the C*.    Return 1 if the pattern should interchange the operands.   CMPBR_FLAG is true if this is for a compare-and-branch insn.   OP1 and OP2 are the two source operands of a 3 operand insn.  */inti960_bypass (insn, op1, op2, cmpbr_flag)     register rtx insn, op1, op2;     int cmpbr_flag;{  register rtx prev_insn, prev_dest;  if (TARGET_C_SERIES)    return 0;  /* Can't do this if op1 isn't a register.  */  if (! REG_P (op1))    return 0;  /* Can't do this for a compare-and-branch if both ops aren't regs.  */  if (cmpbr_flag && ! REG_P (op2))    return 0;  prev_insn = prev_real_insn (insn);  if (prev_insn && GET_CODE (prev_insn) == INSN      && GET_CODE (PATTERN (prev_insn)) == SET)    {      prev_dest = SET_DEST (PATTERN (prev_insn));      if ((GET_CODE (prev_dest) == REG && REGNO (prev_dest) == REGNO (op1))	  || (GET_CODE (prev_dest) == SUBREG	      && GET_CODE (SUBREG_REG (prev_dest)) == REG	      && REGNO (SUBREG_REG (prev_dest)) == REGNO (op1)))	return 1;    }  return 0;}/* Output the code which declares the function name.  This also handles   leaf routines, which have special requirements, and initializes some   global variables.  */voidi960_function_name_declare (file, name, fndecl)     FILE *file;     char *name;     tree fndecl;{  register int i, j;  int leaf_proc_ok;  rtx insn;  /* Increment global return label.  */  ret_label++;  /* Compute whether tail calls and leaf routine optimizations can be performed     for this function.  */  if (TARGET_TAILCALL)    tail_call_ok = 1;  else    tail_call_ok = 0;  if (TARGET_LEAFPROC)    leaf_proc_ok = 1;  else    leaf_proc_ok = 0;  /* Even if nobody uses extra parms, can't have leafproc or tail calls if     argblock, because argblock uses g14 implicitly.  */  if (current_function_args_size != 0 || VARARGS_STDARG_FUNCTION (fndecl))    {      tail_call_ok = 0;      leaf_proc_ok = 0;    }        /* See if caller passes in an address to return value. */  if (aggregate_value_p (DECL_RESULT (fndecl)))    {      tail_call_ok = 0;      leaf_proc_ok = 0;    }  /* Can not use tail calls or make this a leaf routine if there is a non     zero frame size.  */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -