⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 i860.c

📁 Mac OS X 10.4.9 for x86 Source Code gcc 实现源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
     so use output_move_double to do that in the cases that require it.  */  if ((mode == DImode || mode == DFmode)      && ! FP_REG_P (operands[1]))    return output_move_double (operands);  return store_opcode (mode, "%r1,%L0(%?r31)", operands[1]);}/* Output a load-from-memory whose operands are OPERANDS[0,1].   OPERANDS[0] is a reg, and OPERANDS[1] is a mem.   This function returns a template for an insn.   This is in static storage.   It may also output some insns directly.   It may alter the values of operands[0] and operands[1].  */const char *output_load (rtx *operands){  enum machine_mode mode = GET_MODE (operands[0]);  rtx address = XEXP (operands[1], 0);  /* We don't bother trying to see if we know %hi(address).     This is because we are doing a load, and if we know the     %hi value, we probably also know that value in memory.  */  cc_status.flags |= CC_KNOW_HI_R31 | CC_HI_R31_ADJ;  cc_status.mdep = address;  if (! ((cc_prev_status.flags & CC_KNOW_HI_R31)	 && (cc_prev_status.flags & CC_HI_R31_ADJ)	 && address == cc_prev_status.mdep	 && cc_prev_status.mdep == cc_status.mdep))    {      CC_STATUS_INIT;      output_asm_insn ("orh %h1,%?r0,%?r31", operands);      cc_prev_status.mdep = address;    }  /* Code below isn't smart enough to move a doubleword in two parts,     so use output_move_double to do that in the cases that require it.  */  if ((mode == DImode || mode == DFmode)      && ! FP_REG_P (operands[0]))    return output_move_double (operands);  return load_opcode (mode, "%L1(%?r31),%0", operands[0]);}#if 0/* Load the address specified by OPERANDS[3] into the register   specified by OPERANDS[0].   OPERANDS[3] may be the result of a sum, hence it could either be:   (1) CONST   (2) REG   (2) REG + CONST_INT   (3) REG + REG + CONST_INT   (4) REG + REG  (special case of 3).   Note that (3) is not a legitimate address.   All cases are handled here.  */voidoutput_load_address (rtx *operands){  rtx base, offset;  if (CONSTANT_P (operands[3]))    {      output_asm_insn ("mov %3,%0", operands);      return;    }  if (REG_P (operands[3]))    {      if (REGNO (operands[0]) != REGNO (operands[3]))	output_asm_insn ("shl %?r0,%3,%0", operands);      return;    }  if (GET_CODE (operands[3]) != PLUS)    abort ();  base = XEXP (operands[3], 0);  offset = XEXP (operands[3], 1);  if (GET_CODE (base) == CONST_INT)    {      rtx tmp = base;      base = offset;      offset = tmp;    }  if (GET_CODE (offset) != CONST_INT)    {      /* Operand is (PLUS (REG) (REG)).  */      base = operands[3];      offset = const0_rtx;    }  if (REG_P (base))    {      operands[6] = base;      operands[7] = offset;      CC_STATUS_PARTIAL_INIT;      if (SMALL_INT (offset))	output_asm_insn ("adds %7,%6,%0", operands);      else	output_asm_insn ("mov %7,%0\n\tadds %0,%6,%0", operands);    }  else if (GET_CODE (base) == PLUS)    {      operands[6] = XEXP (base, 0);      operands[7] = XEXP (base, 1);      operands[8] = offset;      CC_STATUS_PARTIAL_INIT;      if (SMALL_INT (offset))	output_asm_insn ("adds %6,%7,%0\n\tadds %8,%0,%0", operands);      else	output_asm_insn ("mov %8,%0\n\tadds %0,%6,%0\n\tadds %0,%7,%0", operands);    }  else    abort ();}#endif/* Output code to place a size count SIZE in register REG.   Because block moves are pipelined, we don't include the   first element in the transfer of SIZE to REG.   For this, we subtract ALIGN.  (Actually, I think it is not   right to subtract on this machine, so right now we don't.)  */static voidoutput_size_for_block_move (rtx size, rtx reg, rtx align){  rtx xoperands[3];  xoperands[0] = reg;  xoperands[1] = size;  xoperands[2] = align;#if 1  cc_status.flags &= ~ CC_KNOW_HI_R31;  output_asm_insn (singlemove_string (xoperands), xoperands);#else  if (GET_CODE (size) == REG)    output_asm_insn ("sub %2,%1,%0", xoperands);  else    {      xoperands[1] = GEN_INT (INTVAL (size) - INTVAL (align));      cc_status.flags &= ~ CC_KNOW_HI_R31;      output_asm_insn ("mov %1,%0", xoperands);    }#endif}/* Emit code to perform a block move.   OPERANDS[0] is the destination.   OPERANDS[1] is the source.   OPERANDS[2] is the size.   OPERANDS[3] is the known safe alignment.   OPERANDS[4..6] are pseudos we can safely clobber as temps.  */const char *output_block_move (rtx *operands){  /* A vector for our computed operands.  Note that load_output_address     makes use of (and can clobber) up to the 8th element of this vector.  */  rtx xoperands[10];#if 0  rtx zoperands[10];#endif  static int movmemsi_label = 0;  int i;  rtx temp1 = operands[4];  rtx alignrtx = operands[3];  int align = INTVAL (alignrtx);  int chunk_size;  xoperands[0] = operands[0];  xoperands[1] = operands[1];  xoperands[2] = temp1;  /* We can't move more than four bytes at a time     because we have only one register to move them through.  */  if (align > 4)    {      align = 4;      alignrtx = GEN_INT (4);    }  /* Recognize special cases of block moves.  These occur     when GNU C++ is forced to treat something as BLKmode     to keep it in memory, when its mode could be represented     with something smaller.     We cannot do this for global variables, since we don't know     what pages they don't cross.  Sigh.  */  if (GET_CODE (operands[2]) == CONST_INT      && ! CONSTANT_ADDRESS_P (operands[0])      && ! CONSTANT_ADDRESS_P (operands[1]))    {      int size = INTVAL (operands[2]);      rtx op0 = xoperands[0];      rtx op1 = xoperands[1];      if ((align & 3) == 0 && (size & 3) == 0 && (size >> 2) <= 16)	{	  if (memory_address_p (SImode, plus_constant (op0, size))	      && memory_address_p (SImode, plus_constant (op1, size)))	    {	      cc_status.flags &= ~CC_KNOW_HI_R31;	      for (i = (size>>2)-1; i >= 0; i--)		{		  xoperands[0] = plus_constant (op0, i * 4);		  xoperands[1] = plus_constant (op1, i * 4);		  output_asm_insn ("ld.l %a1,%?r31\n\tst.l %?r31,%a0",				   xoperands);		}	      return "";	    }	}      else if ((align & 1) == 0 && (size & 1) == 0 && (size >> 1) <= 16)	{	  if (memory_address_p (HImode, plus_constant (op0, size))	      && memory_address_p (HImode, plus_constant (op1, size)))	    {	      cc_status.flags &= ~CC_KNOW_HI_R31;	      for (i = (size>>1)-1; i >= 0; i--)		{		  xoperands[0] = plus_constant (op0, i * 2);		  xoperands[1] = plus_constant (op1, i * 2);		  output_asm_insn ("ld.s %a1,%?r31\n\tst.s %?r31,%a0",				   xoperands);		}	      return "";	    }	}      else if (size <= 16)	{	  if (memory_address_p (QImode, plus_constant (op0, size))	      && memory_address_p (QImode, plus_constant (op1, size)))	    {	      cc_status.flags &= ~CC_KNOW_HI_R31;	      for (i = size-1; i >= 0; i--)		{		  xoperands[0] = plus_constant (op0, i);		  xoperands[1] = plus_constant (op1, i);		  output_asm_insn ("ld.b %a1,%?r31\n\tst.b %?r31,%a0",				   xoperands);		}	      return "";	    }	}    }  /* Since we clobber untold things, nix the condition codes.  */  CC_STATUS_INIT;  /* This is the size of the transfer.     Either use the register which already contains the size,     or use a free register (used by no operands).  */  output_size_for_block_move (operands[2], operands[4], alignrtx);#if 0  /* Also emit code to decrement the size value by ALIGN.  */  zoperands[0] = operands[0];  zoperands[3] = plus_constant (operands[0], align);  output_load_address (zoperands);#endif  /* Generate number for unique label.  */  xoperands[3] = GEN_INT (movmemsi_label++);  /* Calculate the size of the chunks we will be trying to move first.  */#if 0  if ((align & 3) == 0)    chunk_size = 4;  else if ((align & 1) == 0)    chunk_size = 2;  else#endif    chunk_size = 1;  /* Copy the increment (negative) to a register for bla insn.  */  xoperands[4] = GEN_INT (- chunk_size);  xoperands[5] = operands[5];  output_asm_insn ("adds %4,%?r0,%5", xoperands);  /* Predecrement the loop counter.  This happens again also in the `bla'     instruction which precedes the loop, but we need to have it done     two times before we enter the loop because of the bizarre semantics     of the bla instruction.  */  output_asm_insn ("adds %5,%2,%2", xoperands);  /* Check for the case where the original count was less than or equal to     zero.  Avoid going through the loop at all if the original count was     indeed less than or equal to zero.  Note that we treat the count as     if it were a signed 32-bit quantity here, rather than an unsigned one,     even though we really shouldn't.  We have to do this because of the     semantics of the `ble' instruction, which assume that the count is     a signed 32-bit value.  Anyway, in practice it won't matter because     nobody is going to try to do a memcpy() of more than half of the     entire address space (i.e. 2 gigabytes) anyway.  */  output_asm_insn ("bc .Le%3", xoperands);  /* Make available a register which is a temporary.  */  xoperands[6] = operands[6];  /* Now the actual loop.     In xoperands, elements 1 and 0 are the input and output vectors.     Element 2 is the loop index.  Element 5 is the increment.  */  output_asm_insn ("subs %1,%5,%1", xoperands);  output_asm_insn ("bla %5,%2,.Lm%3", xoperands);  output_asm_insn ("adds %0,%2,%6", xoperands);  output_asm_insn ("\n.Lm%3:", xoperands);	    /* Label for bla above.  */  output_asm_insn ("\n.Ls%3:",  xoperands);	    /* Loop start label.  */  output_asm_insn ("adds %5,%6,%6", xoperands);  /* NOTE:  The code here which is supposed to handle the cases where the     sources and destinations are known to start on a 4 or 2 byte boundary     are currently broken.  They fail to do anything about the overflow     bytes which might still need to be copied even after we have copied     some number of words or halfwords.  Thus, for now we use the lowest     common denominator, i.e. the code which just copies some number of     totally unaligned individual bytes.  (See the calculation of     chunk_size above.  */  if (chunk_size == 4)    {      output_asm_insn ("ld.l %2(%1),%?r31", xoperands);      output_asm_insn ("bla %5,%2,.Ls%3", xoperands);      output_asm_insn ("st.l %?r31,8(%6)", xoperands);    }  else if (chunk_size == 2)    {      output_asm_insn ("ld.s %2(%1),%?r31", xoperands);      output_asm_insn ("bla %5,%2,.Ls%3", xoperands);      output_asm_insn ("st.s %?r31,4(%6)", xoperands);    }  else /* chunk_size == 1 */    {      output_asm_insn ("ld.b %2(%1),%?r31", xoperands);      output_asm_insn ("bla %5,%2,.Ls%3", xoperands);      output_asm_insn ("st.b %?r31,2(%6)", xoperands);    }  output_asm_insn ("\n.Le%3:", xoperands);	    /* Here if count <= 0.  */  return "";}/* Special routine to convert an SFmode value represented as a   CONST_DOUBLE into its equivalent unsigned long bit pattern.   We convert the value from a double precision floating-point   value to single precision first, and thence to a bit-wise   equivalent unsigned long value.  This routine is used when   generating an immediate move of an SFmode value directly   into a general register because the SVR4 assembler doesn't   grok floating literals in instruction operand contexts.  */unsigned longsfmode_constant_to_ulong (rtx x){  REAL_VALUE_TYPE d;  unsigned long l;  if (GET_CODE (x) != CONST_DOUBLE || GET_MODE (x) != SFmode)    abort ();  REAL_VALUE_FROM_CONST_DOUBLE (d, x);  REAL_VALUE_TO_TARGET_SINGLE (d, l);  return l;}/* This function generates the assembly code for function entry.   ASM_FILE is a stdio stream to output the code to.   SIZE is an int: how many units of temporary storage to allocate.   Refer to the array `regs_ever_live' to determine which registers   to save; `regs_ever_live[I]' is nonzero if register number I   is ever used in the function.  This macro is responsible for   knowing which registers should not be saved even if used.   NOTE: `frame_lower_bytes' is the count of bytes which will lie   between the new `fp' value and the new `sp' value after the   prologue is done.  `frame_upper_bytes' is the count of bytes   that will lie between the new `fp' and the *old* `sp' value   after the new `fp' is setup (in the prologue).  The upper   part of each frame always includes at least 2 words (8 bytes)   to hold the saved frame pointer and the saved return address.   The SVR4 ABI for the i860 now requires that the values of the   stack pointer and frame pointer registers be kept aligned to   16-byte boundaries at all times.  We obey that restriction here.   The SVR4 ABI for the i860 is entirely vague when it comes to specifying   exactly where the "preserved" registers should be saved.  The native   SVR4 C compiler I now have doesn't help to clarify the requirements   very much because it is plainly out-of-date and non-ABI-compliant   (in at least one important way, i.e. how it generates function   epilogues).   The native SVR4 C compiler saves the "preserved" registers (i.e.   r4-r15 and f2-f7) in the lower part of a frame (i.e. at negative   offsets from the frame pointer).   Previous versions of GCC also saved the "preserved" registers in the   "negative" part of the frame, but they saved them using positive   offsets from the (adjusted) stack pointer (after it had been adjusted   to allocate space for the new frame).  That's just plain wrong

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -