📄 rs6000.c
字号:
&& (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype))) != void_type_node)); /* Allocate the va_list constructor. */ block = assign_stack_local (BLKmode, 3 * UNITS_PER_WORD, BITS_PER_WORD); RTX_UNCHANGING_P (block) = 1; RTX_UNCHANGING_P (XEXP (block, 0)) = 1; mem_gpr_fpr = change_address (block, word_mode, XEXP (block, 0)); mem_overflow = change_address (block, ptr_mode, plus_constant (XEXP (block, 0), UNITS_PER_WORD)); mem_reg_save_area = change_address (block, ptr_mode, plus_constant (XEXP (block, 0), 2 * UNITS_PER_WORD)); /* Construct the two characters of `gpr' and `fpr' as a unit. */ words = current_function_args_info.words; gpr = current_function_args_info.sysv_gregno - GP_ARG_MIN_REG; fpr = current_function_args_info.fregno - FP_ARG_MIN_REG; /* Varargs has the va_dcl argument, but we don't count it. */ if (!stdarg_p) { if (gpr > GP_ARG_NUM_REG) words -= 1; else gpr -= 1; } if (BYTES_BIG_ENDIAN) { HOST_WIDE_INT bits = gpr << 8 | fpr; if (HOST_BITS_PER_WIDE_INT >= BITS_PER_WORD) tmp = GEN_INT (bits << (BITS_PER_WORD - 16)); else { bits <<= BITS_PER_WORD - HOST_BITS_PER_WIDE_INT - 16; tmp = immed_double_const (0, bits, word_mode); } } else tmp = GEN_INT (fpr << 8 | gpr); emit_move_insn (mem_gpr_fpr, tmp); /* Find the overflow area. */ tmp = expand_binop (Pmode, add_optab, virtual_incoming_args_rtx, GEN_INT (words * UNITS_PER_WORD), mem_overflow, 0, OPTAB_WIDEN); if (tmp != mem_overflow) emit_move_insn (mem_overflow, tmp); /* Find the register save area. */ tmp = expand_binop (Pmode, add_optab, virtual_stack_vars_rtx, GEN_INT (-RS6000_VARARGS_SIZE), mem_reg_save_area, 0, OPTAB_WIDEN); if (tmp != mem_reg_save_area) emit_move_insn (mem_reg_save_area, tmp); /* Return the address of the va_list constructor. */ return XEXP (block, 0);}/* Generate a memory reference for expand_block_move, copying volatile, and other bits from an original memory reference. */static rtxexpand_block_move_mem (mode, addr, orig_mem) enum machine_mode mode; rtx addr; rtx orig_mem;{ rtx mem = gen_rtx_MEM (mode, addr); RTX_UNCHANGING_P (mem) = RTX_UNCHANGING_P (orig_mem); MEM_COPY_ATTRIBUTES (mem, orig_mem);#ifdef MEM_UNALIGNED_P MEM_UNALIGNED_P (mem) = MEM_UNALIGNED_P (orig_mem);#endif return mem;}/* Expand a block move operation, and return 1 if successful. Return 0 if we should let the compiler generate normal code. operands[0] is the destination operands[1] is the source operands[2] is the length operands[3] is the alignment */#define MAX_MOVE_REG 4intexpand_block_move (operands) rtx operands[];{ rtx orig_dest = operands[0]; rtx orig_src = operands[1]; rtx bytes_rtx = operands[2]; rtx align_rtx = operands[3]; int constp = (GET_CODE (bytes_rtx) == CONST_INT); int align = XINT (align_rtx, 0); int bytes; int offset; int num_reg; int i; rtx src_reg; rtx dest_reg; rtx src_addr; rtx dest_addr; rtx tmp_reg; rtx stores[MAX_MOVE_REG]; int move_bytes; /* If this is not a fixed size move, just call memcpy */ if (! constp) return 0; /* Anything to move? */ bytes = INTVAL (bytes_rtx); if (bytes <= 0) return 1; /* Don't support real large moves. If string instructions are not used, then don't generate more than 8 loads. */ if (TARGET_STRING) { if (bytes > 8*4) return 0; } else if (! STRICT_ALIGNMENT) { if (TARGET_POWERPC64 && align >= 4) { if (bytes > 8*8) return 0; } else if (bytes > 8*4) return 0; } else if (bytes > 8*align) return 0; /* Move the address into scratch registers. */ dest_reg = copy_addr_to_reg (XEXP (orig_dest, 0)); src_reg = copy_addr_to_reg (XEXP (orig_src, 0)); if (TARGET_STRING) /* string instructions are available */ { for ( ; bytes > 0; bytes -= move_bytes) { if (bytes > 24 /* move up to 32 bytes at a time */ && ! fixed_regs[5] && ! fixed_regs[6] && ! fixed_regs[7] && ! fixed_regs[8] && ! fixed_regs[9] && ! fixed_regs[10] && ! fixed_regs[11] && ! fixed_regs[12]) { move_bytes = (bytes > 32) ? 32 : bytes; emit_insn (gen_movstrsi_8reg (expand_block_move_mem (BLKmode, dest_reg, orig_dest), expand_block_move_mem (BLKmode, src_reg, orig_src), GEN_INT ((move_bytes == 32) ? 0 : move_bytes), align_rtx)); } else if (bytes > 16 /* move up to 24 bytes at a time */ && ! fixed_regs[7] && ! fixed_regs[8] && ! fixed_regs[9] && ! fixed_regs[10] && ! fixed_regs[11] && ! fixed_regs[12]) { move_bytes = (bytes > 24) ? 24 : bytes; emit_insn (gen_movstrsi_6reg (expand_block_move_mem (BLKmode, dest_reg, orig_dest), expand_block_move_mem (BLKmode, src_reg, orig_src), GEN_INT (move_bytes), align_rtx)); } else if (bytes > 8 /* move up to 16 bytes at a time */ && ! fixed_regs[9] && ! fixed_regs[10] && ! fixed_regs[11] && ! fixed_regs[12]) { move_bytes = (bytes > 16) ? 16 : bytes; emit_insn (gen_movstrsi_4reg (expand_block_move_mem (BLKmode, dest_reg, orig_dest), expand_block_move_mem (BLKmode, src_reg, orig_src), GEN_INT (move_bytes), align_rtx)); } else if (bytes > 4 && ! TARGET_POWERPC64) { /* move up to 8 bytes at a time */ move_bytes = (bytes > 8) ? 8 : bytes; emit_insn (gen_movstrsi_2reg (expand_block_move_mem (BLKmode, dest_reg, orig_dest), expand_block_move_mem (BLKmode, src_reg, orig_src), GEN_INT (move_bytes), align_rtx)); } else if (bytes >= 4 && (align >= 4 || ! STRICT_ALIGNMENT)) { /* move 4 bytes */ move_bytes = 4; tmp_reg = gen_reg_rtx (SImode); emit_move_insn (tmp_reg, expand_block_move_mem (SImode, src_reg, orig_src)); emit_move_insn (expand_block_move_mem (SImode, dest_reg, orig_dest), tmp_reg); } else if (bytes == 2 && (align >= 2 || ! STRICT_ALIGNMENT)) { /* move 2 bytes */ move_bytes = 2; tmp_reg = gen_reg_rtx (HImode); emit_move_insn (tmp_reg, expand_block_move_mem (HImode, src_reg, orig_src)); emit_move_insn (expand_block_move_mem (HImode, dest_reg, orig_dest), tmp_reg); } else if (bytes == 1) /* move 1 byte */ { move_bytes = 1; tmp_reg = gen_reg_rtx (QImode); emit_move_insn (tmp_reg, expand_block_move_mem (QImode, src_reg, orig_src)); emit_move_insn (expand_block_move_mem (QImode, dest_reg, orig_dest), tmp_reg); } else { /* move up to 4 bytes at a time */ move_bytes = (bytes > 4) ? 4 : bytes; emit_insn (gen_movstrsi_1reg (expand_block_move_mem (BLKmode, dest_reg, orig_dest), expand_block_move_mem (BLKmode, src_reg, orig_src), GEN_INT (move_bytes), align_rtx)); } if (bytes > move_bytes) { if (! TARGET_POWERPC64) { emit_insn (gen_addsi3 (src_reg, src_reg, GEN_INT (move_bytes))); emit_insn (gen_addsi3 (dest_reg, dest_reg, GEN_INT (move_bytes))); } else { emit_insn (gen_adddi3 (src_reg, src_reg, GEN_INT (move_bytes))); emit_insn (gen_adddi3 (dest_reg, dest_reg, GEN_INT (move_bytes))); } } } } else /* string instructions not available */ { num_reg = offset = 0; for ( ; bytes > 0; (bytes -= move_bytes), (offset += move_bytes)) { /* Calculate the correct offset for src/dest */ if (offset == 0) { src_addr = src_reg; dest_addr = dest_reg; } else { src_addr = gen_rtx_PLUS (Pmode, src_reg, GEN_INT (offset)); dest_addr = gen_rtx_PLUS (Pmode, dest_reg, GEN_INT (offset)); } /* Generate the appropriate load and store, saving the stores for later. */ if (bytes >= 8 && TARGET_POWERPC64 /* 64-bit loads and stores require word-aligned displacements. */ && (align >= 8 || (! STRICT_ALIGNMENT && align >= 4))) { move_bytes = 8; tmp_reg = gen_reg_rtx (DImode); emit_insn (gen_movdi (tmp_reg, expand_block_move_mem (DImode, src_addr, orig_src))); stores[num_reg++] = gen_movdi (expand_block_move_mem (DImode, dest_addr, orig_dest), tmp_reg); } else if (bytes >= 4 && (align >= 4 || ! STRICT_ALIGNMENT)) { move_bytes = 4; tmp_reg = gen_reg_rtx (SImode); emit_insn (gen_movsi (tmp_reg, expand_block_move_mem (SImode, src_addr, orig_src))); stores[num_reg++] = gen_movsi (expand_block_move_mem (SImode, dest_addr, orig_dest), tmp_reg); } else if (bytes >= 2 && (align >= 2 || ! STRICT_ALIGNMENT)) { move_bytes = 2; tmp_reg = gen_reg_rtx (HImode); emit_insn (gen_movhi (tmp_reg, expand_block_move_mem (HImode, src_addr, orig_src))); stores[num_reg++] = gen_movhi (expand_block_move_mem (HImode, dest_addr, orig_dest), tmp_reg); } else { move_bytes = 1; tmp_reg = gen_reg_rtx (QImode); emit_insn (gen_movqi (tmp_reg, expand_block_move_mem (QImode, src_addr, orig_src))); stores[num_reg++] = gen_movqi (expand_block_move_mem (QImode, dest_addr, orig_dest), tmp_reg); } if (num_reg >= MAX_MOVE_REG) { for (i = 0; i < num_reg; i++) emit_insn (stores[i]); num_reg = 0; } } for (i = 0; i < num_reg; i++) emit_insn (stores[i]); } return 1;}/* Return 1 if OP is a load multiple operation. It is known to be a PARALLEL and the first section will be tested. */intload_multiple_operation (op, mode) rtx op; enum machine_mode mode ATTRIBUTE_UNUSED;{ int count = XVECLEN (op, 0); int dest_regno; rtx src_addr; int i; /* Perform a quick check so we don't blow up below. */ if (count <= 1 || GET_CODE (XVECEXP (op, 0, 0)) != SET || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != REG || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != MEM) return 0; dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, 0))); src_addr = XEXP (SET_SRC (XVECEXP (op, 0, 0)), 0); for (i = 1; i < count; i++) { rtx elt = XVECEXP (op, 0, i); if (GET_CODE (elt) != SET || GET_CODE (SET_DEST (elt)) != REG || GET_MODE (SET_DEST (elt)) != SImode || REGNO (SET_DEST (elt)) != dest_regno + i || GET_CODE (SET_SRC (elt)) != MEM || GET_MODE (SET_SRC (elt)) != SImode || GET_CODE (XEXP (SET_SRC (elt), 0)) != PLUS || ! rtx_equal_p (XEXP (XEXP (SET_SRC (elt), 0), 0), src_addr) || GET_CODE (XEXP (XEXP (SET_SRC (elt), 0), 1)) != CONST_INT || INTVAL (XEXP (XEXP (SET_SRC (elt), 0), 1)) != i * 4) return 0; } return 1;}/* Similar, but tests for store multiple. Here, the second vector element is a CLOBBER. It will be tested later. */intstore_multiple_operation (op, mode) rtx op; enum machine_mode mode ATTRIBUTE_UNUSED;{ int count = XVECLEN (op, 0) - 1; int src_regno; rtx dest_addr; int i; /* Perform a quick check so we don't blow up below. */ if (count <= 1 || GET_CODE (XVECEXP (op, 0, 0)) != SET || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != MEM || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != REG) return 0; src_regno = REGNO (SET_SRC (XVECEXP (op, 0, 0))); dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, 0)), 0); for (i = 1; i < count; i++) { rtx elt = XVECEXP (op, 0, i + 1); if (GET_CODE (elt) != SET || GET_CODE (SET_SRC (elt)) != REG || GET_MODE (SET_SRC (elt)) != SImode || REGNO (SET_SRC (elt)) != src_regno + i || GET_CODE (SET_DEST (elt)) != MEM || GET_MODE (SET_DEST (elt)) != SImode || GET_CODE (XEXP (SET_DEST (elt), 0)) != PLUS || ! rtx_equal_p (XEXP (XEXP (SET_DEST (elt), 0), 0), dest_addr) || GET_CODE (XEXP (XEXP (SET_DEST (elt), 0), 1)) != CONST_INT || INTVAL (XEXP (XEXP (SET_DEST (elt), 0), 1)) != i * 4) return 0; } return 1;}/* Return 1 if OP is a comparison operation that is valid for a branch insn. We only check the opcode against the mode of the CC value here. */intbranch_comparison_operator (op, mode) register rtx op;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -