📄 sparc.c
字号:
wordpart[2][0] = gen_rtx (REG, SImode, REGNO (op0) + 2); wordpart[3][0] = gen_rtx (REG, SImode, REGNO (op0) + 3); } else if (optype0 == OFFSOP) { wordpart[0][0] = adj_offsettable_operand (op0, 0); wordpart[1][0] = adj_offsettable_operand (op0, 4); wordpart[2][0] = adj_offsettable_operand (op0, 8); wordpart[3][0] = adj_offsettable_operand (op0, 12); } else { wordpart[0][0] = op0; wordpart[1][0] = op0; wordpart[2][0] = op0; wordpart[3][0] = op0; } if (optype1 == REGOP) { wordpart[0][1] = gen_rtx (REG, SImode, REGNO (op1) + 0); wordpart[1][1] = gen_rtx (REG, SImode, REGNO (op1) + 1); wordpart[2][1] = gen_rtx (REG, SImode, REGNO (op1) + 2); wordpart[3][1] = gen_rtx (REG, SImode, REGNO (op1) + 3); } else if (optype1 == OFFSOP) { wordpart[0][1] = adj_offsettable_operand (op1, 0); wordpart[1][1] = adj_offsettable_operand (op1, 4); wordpart[2][1] = adj_offsettable_operand (op1, 8); wordpart[3][1] = adj_offsettable_operand (op1, 12); } else if (optype1 == CNSTOP) { REAL_VALUE_TYPE r; long l[4]; /* This only works for TFmode floating point constants. */ if (GET_CODE (op1) != CONST_DOUBLE || GET_MODE (op1) != TFmode) abort (); REAL_VALUE_FROM_CONST_DOUBLE (r, op1); REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l); wordpart[0][1] = GEN_INT (l[0]); wordpart[1][1] = GEN_INT (l[1]); wordpart[2][1] = GEN_INT (l[2]); wordpart[3][1] = GEN_INT (l[3]); } else { wordpart[0][1] = op1; wordpart[1][1] = op1; wordpart[2][1] = op1; wordpart[3][1] = op1; } /* Easy case: try moving the quad as two pairs. Check for moving between an even/odd register pair and a memory location. Also handle new v9 fp regs here. */ /* ??? Should also handle the case of non-offsettable addresses here. We can at least do the first pair as a ldd/std, and then do the third and fourth words individually. */ if ((optype0 == REGOP && optype1 == OFFSOP && (REGNO (op0) & 1) == 0) || (optype0 == OFFSOP && optype1 == REGOP && (REGNO (op1) & 1) == 0)) { rtx mem, reg; if (optype0 == REGOP) mem = op1, reg = op0; else mem = op0, reg = op1; if (mem_aligned_8 (mem) /* If this is a floating point register higher than %f31, then we *must* use an aligned load, since `ld' will not accept the register number. */ || (TARGET_V9 && REGNO (reg) >= 64)) { if (TARGET_V9 && FP_REG_P (reg)) { if ((REGNO (reg) & 3) != 0) abort (); return (mem == op1 ? "ldq %1,%0" : "stq %1,%0"); } operands[2] = adj_offsettable_operand (mem, 8); if (mem == op1) return TARGET_V9 ? "ldx %1,%0;ldx %2,%R0" : "ldd %1,%0;ldd %2,%S0"; else return TARGET_V9 ? "stx %1,%0;stx %R1,%2" : "std %1,%0;std %S1,%2"; } } /* If the first move would clobber the source of the second one, do them in the other order. */ /* Overlapping registers. */ if (optype0 == REGOP && optype1 == REGOP && (REGNO (op0) == REGNO (wordpart[1][3]) || REGNO (op0) == REGNO (wordpart[1][2]) || REGNO (op0) == REGNO (wordpart[1][1]))) { /* Do fourth word. */ output_asm_insn (singlemove_string (wordpart[3]), wordpart[3]); /* Do the third word. */ output_asm_insn (singlemove_string (wordpart[2]), wordpart[2]); /* Do the second word. */ output_asm_insn (singlemove_string (wordpart[1]), wordpart[1]); /* Do lowest-numbered word. */ return singlemove_string (wordpart[0]); } /* Loading into a register which overlaps a register used in the address. */ if (optype0 == REGOP && optype1 != REGOP && reg_overlap_mentioned_p (op0, op1)) { /* ??? Not implemented yet. This is a bit complicated, because we must load which ever part overlaps the address last. If the address is a double-reg address, then there are two parts which need to be done last, which is impossible. We would need a scratch register in that case. */ abort (); } /* Normal case: move the four words in lowest to highest address order. */ output_asm_insn (singlemove_string (wordpart[0]), wordpart[0]); /* Make any unoffsettable addresses point at the second word. */ if (addreg0) output_asm_insn ("add %0,0x4,%0", &addreg0); if (addreg1) output_asm_insn ("add %0,0x4,%0", &addreg1); /* Do the second word. */ output_asm_insn (singlemove_string (wordpart[1]), wordpart[1]); /* Make any unoffsettable addresses point at the third word. */ if (addreg0) output_asm_insn ("add %0,0x4,%0", &addreg0); if (addreg1) output_asm_insn ("add %0,0x4,%0", &addreg1); /* Do the third word. */ output_asm_insn (singlemove_string (wordpart[2]), wordpart[2]); /* Make any unoffsettable addresses point at the fourth word. */ if (addreg0) output_asm_insn ("add %0,0x4,%0", &addreg0); if (addreg1) output_asm_insn ("add %0,0x4,%0", &addreg1); /* Do the fourth word. */ output_asm_insn (singlemove_string (wordpart[3]), wordpart[3]); /* Undo the adds we just did. */ if (addreg0) output_asm_insn ("add %0,-0xc,%0", &addreg0); if (addreg1) output_asm_insn ("add %0,-0xc,%0", &addreg1); return "";}/* Output assembler code to perform a doubleword move insn with operands OPERANDS, one of which must be a floating point register. */char *output_fp_move_double (operands) rtx *operands;{ if (FP_REG_P (operands[0])) { if (FP_REG_P (operands[1])) { if (TARGET_V9) return "fmovd %1,%0"; else return "fmovs %1,%0\n\tfmovs %R1,%R0"; } else if (GET_CODE (operands[1]) == REG) abort (); else return output_move_double (operands); } else if (FP_REG_P (operands[1])) { if (GET_CODE (operands[0]) == REG) abort (); else return output_move_double (operands); } else abort ();}/* Output assembler code to perform a quadword move insn with operands OPERANDS, one of which must be a floating point register. */char *output_fp_move_quad (operands) rtx *operands;{ register rtx op0 = operands[0]; register rtx op1 = operands[1]; if (FP_REG_P (op0)) { if (FP_REG_P (op1)) { if (TARGET_V9) return "fmovq %1,%0"; else return "fmovs %1,%0\n\tfmovs %R1,%R0\n\tfmovs %S1,%S0\n\tfmovs %T1,%T0"; } else if (GET_CODE (op1) == REG) abort (); else return output_move_quad (operands); } else if (FP_REG_P (op1)) { if (GET_CODE (op0) == REG) abort (); else return output_move_quad (operands); } else abort ();}/* Return a REG that occurs in ADDR with coefficient 1. ADDR can be effectively incremented by incrementing REG. */static rtxfind_addr_reg (addr) rtx addr;{ while (GET_CODE (addr) == PLUS) { /* We absolutely can not fudge the frame pointer here, because the frame pointer must always be 8 byte aligned. It also confuses debuggers. */ if (GET_CODE (XEXP (addr, 0)) == REG && REGNO (XEXP (addr, 0)) != FRAME_POINTER_REGNUM) addr = XEXP (addr, 0); else if (GET_CODE (XEXP (addr, 1)) == REG && REGNO (XEXP (addr, 1)) != FRAME_POINTER_REGNUM) addr = XEXP (addr, 1); else if (CONSTANT_P (XEXP (addr, 0))) addr = XEXP (addr, 1); else if (CONSTANT_P (XEXP (addr, 1))) addr = XEXP (addr, 0); else abort (); } if (GET_CODE (addr) == REG) return addr; abort ();}#if 0 /* not currently used */voidoutput_sized_memop (opname, mode, signedp) char *opname; enum machine_mode mode; int signedp;{ static char *ld_size_suffix_u[] = { "ub", "uh", "", "?", "d" }; static char *ld_size_suffix_s[] = { "sb", "sh", "", "?", "d" }; static char *st_size_suffix[] = { "b", "h", "", "?", "d" }; char **opnametab, *modename; if (opname[0] == 'l') if (signedp) opnametab = ld_size_suffix_s; else opnametab = ld_size_suffix_u; else opnametab = st_size_suffix; modename = opnametab[GET_MODE_SIZE (mode) >> 1]; fprintf (asm_out_file, "\t%s%s", opname, modename);}voidoutput_move_with_extension (operands) rtx *operands;{ if (GET_MODE (operands[2]) == HImode) output_asm_insn ("sll %2,0x10,%0", operands); else if (GET_MODE (operands[2]) == QImode) output_asm_insn ("sll %2,0x18,%0", operands); else abort ();}#endif /* not currently used */#if 0/* ??? These are only used by the movstrsi pattern, but we get better code in general without that, because emit_block_move can do just as good a job as this function does when alignment and size are known. When they aren't known, a call to strcpy may be faster anyways, because it is likely to be carefully crafted assembly language code, and below we just do a byte-wise copy. Also, emit_block_move expands into multiple read/write RTL insns, which can then be optimized, whereas our movstrsi pattern can not be optimized at all. *//* Load the address specified by OPERANDS[3] into the register specified by OPERANDS[0]. OPERANDS[3] may be the result of a sum, hence it could either be: (1) CONST (2) REG (2) REG + CONST_INT (3) REG + REG + CONST_INT (4) REG + REG (special case of 3). Note that (3) is not a legitimate address. All cases are handled here. */voidoutput_load_address (operands) rtx *operands;{ rtx base, offset; if (CONSTANT_P (operands[3])) { output_asm_insn ("set %3,%0", operands); return; } if (REG_P (operands[3])) { if (REGNO (operands[0]) != REGNO (operands[3])) output_asm_insn ("mov %3,%0", operands); return; } if (GET_CODE (operands[3]) != PLUS) abort (); base = XEXP (operands[3], 0); offset = XEXP (operands[3], 1); if (GET_CODE (base) == CONST_INT) { rtx tmp = base; base = offset; offset = tmp; } if (GET_CODE (offset) != CONST_INT) { /* Operand is (PLUS (REG) (REG)). */ base = operands[3]; offset = const0_rtx; } if (REG_P (base)) { operands[6] = base; operands[7] = offset; if (SMALL_INT (offset)) output_asm_insn ("add %6,%7,%0", operands); else output_asm_insn ("set %7,%0\n\tadd %0,%6,%0", operands); } else if (GET_CODE (base) == PLUS) { operands[6] = XEXP (base, 0); operands[7] = XEXP (base, 1); operands[8] = offset; if (SMALL_INT (offset)) output_asm_insn ("add %6,%7,%0\n\tadd %0,%8,%0", operands); else output_asm_insn ("set %8,%0\n\tadd %0,%6,%0\n\tadd %0,%7,%0", operands); } else abort ();}/* Output code to place a size count SIZE in register REG. ALIGN is the size of the unit of transfer. Because block moves are pipelined, we don't include the first element in the transfer of SIZE to REG. */static voidoutput_size_for_block_move (size, reg, align) rtx size, reg; rtx align;{ rtx xoperands[3]; xoperands[0] = reg; xoperands[1] = size; xoperands[2] = align; if (GET_CODE (size) == REG) output_asm_insn ("sub %1,%2,%0", xoperands); else { xoperands[1] = gen_rtx (CONST_INT, VOIDmode, INTVAL (size) - INTVAL (align)); output_asm_insn ("set %1,%0", xoperands); }}/* Emit code to perform a block move. OPERANDS[0] is the destination. OPERANDS[1] is the source. OPERANDS[2] is the size. OPERANDS[3] is the alignment safe to use. OPERANDS[4] is a register we can safely clobber as a temp. */char *output_block_move (operands) rtx *operands;{ /* A vector for our computed operands. Note that load_output_address makes use of (and can clobber) up to the 8th element of this vector. */ rtx xoperands[10]; rtx zoperands[10]; static int movstrsi_label = 0; int i; rtx temp1 = operands[4]; rtx sizertx = operands[2]; rtx alignrtx = operands[3]; int align = INTVAL (alignrtx); char label3[30], label5[30]; xoperands[0] = operands[0]; xoperands[1] = operands[1]; xoperands[2] = temp1; /* We can't move more than this many bytes at a time because we have only one register, %g1, to move them through. */ if (align > UNITS_PER_WORD) { align = UNITS_PER_WORD; alignrtx = gen_rtx (CONST_INT, VOIDmode, UNITS_PER_WORD); } /* We consider 8 ld/st pairs, for a total of 16 inline insns to be reasonable here. (Actually will emit a maximum of 18 inline insns for the case of size == 31 and align == 4). */ if (GET_CODE (sizertx) == CONST_INT && (INTVAL (sizertx) / align) <= 8 && memory_address_p (QImode, plus_constant_for_outpu
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -