⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 rs6000.c

📁 GCC编译器源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
  if (cum->words < GP_ARG_NUM_REG      && GP_ARG_NUM_REG < (cum->words + RS6000_ARG_SIZE (mode, type, named)))    {      int ret = GP_ARG_NUM_REG - cum->words;      if (ret && TARGET_DEBUG_ARG)	fprintf (stderr, "function_arg_partial_nregs: %d\n", ret);      return ret;    }  return 0;}/* A C expression that indicates when an argument must be passed by   reference.  If nonzero for an argument, a copy of that argument is   made in memory and a pointer to the argument is passed instead of   the argument itself.  The pointer is passed in whatever way is   appropriate for passing a pointer to that type.   Under V.4, structures and unions are passed by reference.  */intfunction_arg_pass_by_reference (cum, mode, type, named)     CUMULATIVE_ARGS *cum;     enum machine_mode mode;     tree type;     int named;{  if ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_SOLARIS)      && type && AGGREGATE_TYPE_P (type))    {      if (TARGET_DEBUG_ARG)	fprintf (stderr, "function_arg_pass_by_reference: aggregate\n");      return 1;    }  return 0;}/* Perform any needed actions needed for a function that is receiving a   variable number of arguments.    CUM is as above.   MODE and TYPE are the mode and type of the current parameter.   PRETEND_SIZE is a variable that should be set to the amount of stack   that must be pushed by the prolog to pretend that our caller pushed   it.   Normally, this macro will push all remaining incoming registers on the   stack and set PRETEND_SIZE to the length of the registers pushed.  */voidsetup_incoming_varargs (cum, mode, type, pretend_size, no_rtl)     CUMULATIVE_ARGS *cum;     enum machine_mode mode;     tree type;     int *pretend_size;     int no_rtl;{  rtx save_area = virtual_incoming_args_rtx;  int reg_size	= (TARGET_64BIT) ? 8 : 4;  if (TARGET_DEBUG_ARG)    fprintf (stderr,	     "setup_vararg: words = %2d, fregno = %2d, nargs = %4d, proto = %d, mode = %4s, no_rtl= %d\n",	     cum->words, cum->fregno, cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode), no_rtl);  if ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_SOLARIS) && !no_rtl)    {      rs6000_sysv_varargs_p = 1;      save_area = plus_constant (frame_pointer_rtx, RS6000_VARARGS_OFFSET);    }  if (cum->words < 8)    {      int first_reg_offset = cum->words;      if (MUST_PASS_IN_STACK (mode, type))	first_reg_offset += RS6000_ARG_SIZE (TYPE_MODE (type), type, 1);      if (first_reg_offset > GP_ARG_NUM_REG)	first_reg_offset = GP_ARG_NUM_REG;      if (!no_rtl && first_reg_offset != GP_ARG_NUM_REG)	move_block_from_reg	  (GP_ARG_MIN_REG + first_reg_offset,	   gen_rtx (MEM, BLKmode,		    plus_constant (save_area, first_reg_offset * reg_size)),	   GP_ARG_NUM_REG - first_reg_offset,	   (GP_ARG_NUM_REG - first_reg_offset) * UNITS_PER_WORD);      *pretend_size = (GP_ARG_NUM_REG - first_reg_offset) * UNITS_PER_WORD;    }  /* Save FP registers if needed.  */  if ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_SOLARIS) && TARGET_HARD_FLOAT && !no_rtl)    {      int fregno     = cum->fregno;      int num_fp_reg = FP_ARG_V4_MAX_REG + 1 - fregno;      if (num_fp_reg >= 0)	{	  rtx cr1 = gen_rtx (REG, CCmode, 69);	  rtx lab = gen_label_rtx ();	  int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG) * 8);	  emit_jump_insn (gen_rtx (SET, VOIDmode,				   pc_rtx,				   gen_rtx (IF_THEN_ELSE, VOIDmode,					    gen_rtx (NE, VOIDmode, cr1, const0_rtx),					    gen_rtx (LABEL_REF, VOIDmode, lab),					    pc_rtx)));	  while ( num_fp_reg-- >= 0)	    {	      emit_move_insn (gen_rtx (MEM, DFmode, plus_constant (save_area, off)),			      gen_rtx (REG, DFmode, fregno++));	      off += 8;	    }	  emit_label (lab);	}    }}/* If defined, is a C expression that produces the machine-specific   code for a call to `__builtin_saveregs'.  This code will be moved   to the very beginning of the function, before any parameter access   are made.  The return value of this function should be an RTX that   contains the value to use as the return of `__builtin_saveregs'.   The argument ARGS is a `tree_list' containing the arguments that   were passed to `__builtin_saveregs'.   If this macro is not defined, the compiler will output an ordinary   call to the library function `__builtin_saveregs'.      On the Power/PowerPC return the address of the area on the stack   used to hold arguments.  Under AIX, this includes the 8 word register   save area.  Under V.4 this does not.  */struct rtx_def *expand_builtin_saveregs (args)     tree args;{  return virtual_incoming_args_rtx;}/* Generate a memory reference for expand_block_move, copying volatile,   and other bits from an original memory reference.  */static rtxexpand_block_move_mem (mode, addr, orig_mem)     enum machine_mode mode;     rtx addr;     rtx orig_mem;{  rtx mem = gen_rtx (MEM, mode, addr);  RTX_UNCHANGING_P (mem) = RTX_UNCHANGING_P (orig_mem);  MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);  MEM_IN_STRUCT_P (mem) = MEM_IN_STRUCT_P (orig_mem);#ifdef MEM_UNALIGNED_P  MEM_UNALIGNED_P (mem) = MEM_UNALIGNED_P (orig_mem);#endif  return mem;}/* Expand a block move operation, and return 1 if successful.  Return 0   if we should let the compiler generate normal code.   operands[0] is the destination   operands[1] is the source   operands[2] is the length   operands[3] is the alignment */#define MAX_MOVE_REG 4intexpand_block_move (operands)     rtx operands[];{  rtx orig_dest = operands[0];  rtx orig_src	= operands[1];  rtx bytes_rtx	= operands[2];  rtx align_rtx = operands[3];  int constp	= (GET_CODE (bytes_rtx) == CONST_INT);  int align	= XINT (align_rtx, 0);  int bytes;  int offset;  int num_reg;  int i;  rtx src_reg;  rtx dest_reg;  rtx src_addr;  rtx dest_addr;  rtx tmp_reg;  rtx stores[MAX_MOVE_REG];  int move_bytes;  /* If this is not a fixed size move, just call memcpy */  if (!constp)    return 0;  /* Anything to move? */  bytes = INTVAL (bytes_rtx);  if (bytes <= 0)    return 1;  /* Don't support real large moves.  If string instructions are not used,     then don't generate more than 8 loads.  */  if (TARGET_STRING)    {      if (bytes > 4*8)	return 0;    }  else if (!STRICT_ALIGNMENT)    {      if (bytes > 4*8)	return 0;    }  else if (bytes > 8*align)    return 0;  /* Move the address into scratch registers.  */  dest_reg = copy_addr_to_reg (XEXP (orig_dest, 0));  src_reg  = copy_addr_to_reg (XEXP (orig_src,  0));  if (TARGET_STRING)	/* string instructions are available */    {      for ( ; bytes > 0; bytes -= move_bytes)	{	  if (bytes > 24		/* move up to 32 bytes at a time */	      && !fixed_regs[5]	      && !fixed_regs[6]	      && !fixed_regs[7]	      && !fixed_regs[8]	      && !fixed_regs[9]	      && !fixed_regs[10]	      && !fixed_regs[11]	      && !fixed_regs[12])	    {	      move_bytes = (bytes > 32) ? 32 : bytes;	      emit_insn (gen_movstrsi_8reg (expand_block_move_mem (BLKmode, dest_reg, orig_dest),					    expand_block_move_mem (BLKmode, src_reg, orig_src),					    GEN_INT ((move_bytes == 32) ? 0 : move_bytes),					    align_rtx));	    }	  else if (bytes > 16	/* move up to 24 bytes at a time */		   && !fixed_regs[7]		   && !fixed_regs[8]		   && !fixed_regs[9]		   && !fixed_regs[10]		   && !fixed_regs[11]		   && !fixed_regs[12])	    {	      move_bytes = (bytes > 24) ? 24 : bytes;	      emit_insn (gen_movstrsi_6reg (expand_block_move_mem (BLKmode, dest_reg, orig_dest),					    expand_block_move_mem (BLKmode, src_reg, orig_src),					    GEN_INT (move_bytes),					    align_rtx));	    }	  else if (bytes > 8	/* move up to 16 bytes at a time */		   && !fixed_regs[9]		   && !fixed_regs[10]		   && !fixed_regs[11]		   && !fixed_regs[12])	    {	      move_bytes = (bytes > 16) ? 16 : bytes;	      emit_insn (gen_movstrsi_4reg (expand_block_move_mem (BLKmode, dest_reg, orig_dest),					    expand_block_move_mem (BLKmode, src_reg, orig_src),					    GEN_INT (move_bytes),					    align_rtx));	    }	  else if (bytes > 4 && !TARGET_64BIT)	    {			/* move up to 8 bytes at a time */	      move_bytes = (bytes > 8) ? 8 : bytes;	      emit_insn (gen_movstrsi_2reg (expand_block_move_mem (BLKmode, dest_reg, orig_dest),					    expand_block_move_mem (BLKmode, src_reg, orig_src),					    GEN_INT (move_bytes),					    align_rtx));	    }	  else if (bytes >= 4 && (align >= 4 || !STRICT_ALIGNMENT))	    {			/* move 4 bytes */	      move_bytes = 4;	      tmp_reg = gen_reg_rtx (SImode);	      emit_move_insn (tmp_reg, expand_block_move_mem (SImode, src_reg, orig_src));	      emit_move_insn (expand_block_move_mem (SImode, dest_reg, orig_dest), tmp_reg);	    }	  else if (bytes == 2 && (align >= 2 || !STRICT_ALIGNMENT))	    {			/* move 2 bytes */	      move_bytes = 2;	      tmp_reg = gen_reg_rtx (HImode);	      emit_move_insn (tmp_reg, expand_block_move_mem (HImode, src_reg, orig_src));	      emit_move_insn (expand_block_move_mem (HImode, dest_reg, orig_dest), tmp_reg);	    }	  else if (bytes == 1)	/* move 1 byte */	    {	      move_bytes = 1;	      tmp_reg = gen_reg_rtx (QImode);	      emit_move_insn (tmp_reg, expand_block_move_mem (QImode, src_reg, orig_src));	      emit_move_insn (expand_block_move_mem (QImode, dest_reg, orig_dest), tmp_reg);	    }	  else	    {			/* move up to 4 bytes at a time */	      move_bytes = (bytes > 4) ? 4 : bytes;	      emit_insn (gen_movstrsi_1reg (expand_block_move_mem (BLKmode, dest_reg, orig_dest),					    expand_block_move_mem (BLKmode, src_reg, orig_src),					    GEN_INT (move_bytes),					    align_rtx));	    }	  if (bytes > move_bytes)	    {	      emit_insn (gen_addsi3 (src_reg, src_reg, GEN_INT (move_bytes)));	      emit_insn (gen_addsi3 (dest_reg, dest_reg, GEN_INT (move_bytes)));	    }	}    }  else			/* string instructions not available */    {      num_reg = offset = 0;      for ( ; bytes > 0; (bytes -= move_bytes), (offset += move_bytes))	{	  /* Calculate the correct offset for src/dest */	  if (offset == 0)	    {	      src_addr  = src_reg;	      dest_addr = dest_reg;	    }	  else	    {	      src_addr  = gen_rtx (PLUS, Pmode, src_reg,  GEN_INT (offset));	      dest_addr = gen_rtx (PLUS, Pmode, dest_reg, GEN_INT (offset));	    }	  /* Generate the appropriate load and store, saving the stores for later */	  if (bytes >= 8 && TARGET_64BIT && (align >= 8 || !STRICT_ALIGNMENT))	    {	      move_bytes = 8;	      tmp_reg = gen_reg_rtx (DImode);	      emit_insn (gen_movdi (tmp_reg, expand_block_move_mem (DImode, src_addr, orig_src)));	      stores[ num_reg++ ] = gen_movdi (expand_block_move_mem (DImode, dest_addr, orig_dest), tmp_reg);	    }	  else if (bytes >= 4 && (align >= 4 || !STRICT_ALIGNMENT))	    {	      move_bytes = 4;	      tmp_reg = gen_reg_rtx (SImode);	      emit_insn (gen_movsi (tmp_reg, expand_block_move_mem (SImode, src_addr, orig_src)));	      stores[ num_reg++ ] = gen_movsi (expand_block_move_mem (SImode, dest_addr, orig_dest), tmp_reg);	    }	  else if (bytes >= 2 && (align >= 2 || !STRICT_ALIGNMENT))	    {	      move_bytes = 2;	      tmp_reg = gen_reg_rtx (HImode);	      emit_insn (gen_movsi (tmp_reg, expand_block_move_mem (HImode, src_addr, orig_src)));	      stores[ num_reg++ ] = gen_movhi (expand_block_move_mem (HImode, dest_addr, orig_dest), tmp_reg);	    }	  else	    {	      move_bytes = 1;	      tmp_reg = gen_reg_rtx (QImode);	      emit_insn (gen_movsi (tmp_reg, expand_block_move_mem (QImode, src_addr, orig_src)));	      stores[ num_reg++ ] = gen_movqi (expand_block_move_mem (QImode, dest_addr, orig_dest), tmp_reg);	    }	  if (num_reg >= MAX_MOVE_REG)	    {	      for (i = 0; i < num_reg; i++)		emit_insn (stores[i]);	      num_reg = 0;	    }	}      for (i = 0; i < num_reg; i++)	emit_insn (stores[i]);    }  return 1;}/* Return 1 if OP is a load multiple operation.  It is known to be a   PARALLEL and the first section will be tested.  */intload_multiple_operation (op, mode)     rtx op;     enum machine_mode mode;{  int count = XVECLEN (op, 0);  int dest_regno;  rtx src_addr;  int i;  /* Perform a quick check so we don't blow up below.  */  if (count <= 1      || GET_CODE (XVECEXP (op, 0, 0)) != SET      || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != REG      || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != MEM)    return 0;  dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, 0)));  src_addr = XEXP (SET_SRC (XVECEXP (op, 0, 0)), 0);  for (i = 1; i < count; i++)    {      rtx elt = XVECEXP (op, 0, i);      if (GET_CODE (elt) != SET	  || GET_CODE (SET_DEST (elt)) != REG	  || GET_MODE (SET_DEST (elt)) != SImode	  || REGNO (SET_DEST (elt)) != dest_regno + i	  || GET_CODE (SET_SRC (elt)) != MEM	  || GET_MODE (SET_SRC (elt)) != SImode	  || GET_CODE (XEXP (SET_SRC (elt), 0)) != PLUS	  || ! rtx_equal_p (XEXP (XEXP (SET_SRC (elt), 0), 0), src_addr)	  || GET_CODE (XEXP (XEXP (SET_SRC (elt), 0), 1)) != CONST_INT	  || INTVAL (XEXP (XEXP (SET_SRC (elt), 0), 1)) != i * 4)	return 0;    }  return 1;}/* Similar, but tests for store multiple.  Here, the second vector element   is a CLOBBER.  It will be tested later.  */intstore_multiple_operation (op, mode)     rtx op;     enum machine_mode mode;{  int count = XVECLEN (op, 0) - 1;  int src_regno;  rtx dest_addr;  int i;  /* Perform a quick check so we don't blow up below.  */  if (count <= 1      || GET_CODE (XVECEXP (op, 0, 0)) != SET      || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != MEM      || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != REG)    return 0;  src_regno = REGNO (SET_SRC (XVECEXP (op, 0, 0)));  dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, 0)), 0);  for (i = 1; i < count; i++)    {      rtx elt = XVECEXP (op, 0, i + 1);      if (GET_CODE (elt) != SET	  || GET_CODE (SET_SRC (elt)) != REG	  || GET_MODE (SET_SRC (elt)) != SImode	  || REGNO (SET_SRC (elt)) != src_regno + i	  || GET_CODE (SET_DEST (elt)) != MEM	  || GET_MODE (SET_DEST (elt)) != SImode	  || GET_CODE (XEXP (SET_DEST (elt), 0)) != PLUS	  || ! rtx_equal_p (XEXP (XEXP (SET_DEST (elt), 0), 0), dest_addr)	  || GET_CODE (XEXP (XEXP (SET_DEST (elt), 0), 1)) != CONST_INT	  || INTVAL (XEXP (XEXP (SET_DEST (elt), 0), 1)) != i * 4)	return 0;    }

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -