📄 sparc.c
字号:
&& reg_overlap_mentioned_p (op0, op1)) { /* ??? Not implemented yet. This is a bit complicated, because we must load which ever part overlaps the address last. If the address is a double-reg address, then there are two parts which need to be done last, which is impossible. We would need a scratch register in that case. */ abort (); } /* Normal case: move the four words in lowest to higest address order. */ output_asm_insn (singlemove_string (wordpart[0]), wordpart[0]); /* Make any unoffsettable addresses point at the second word. */ if (addreg0) output_asm_insn ("add %0,0x4,%0", &addreg0); if (addreg1) output_asm_insn ("add %0,0x4,%0", &addreg1); /* Do the second word. */ output_asm_insn (singlemove_string (wordpart[1]), wordpart[1]); /* Make any unoffsettable addresses point at the third word. */ if (addreg0) output_asm_insn ("add %0,0x4,%0", &addreg0); if (addreg1) output_asm_insn ("add %0,0x4,%0", &addreg1); /* Do the third word. */ output_asm_insn (singlemove_string (wordpart[2]), wordpart[2]); /* Make any unoffsettable addresses point at the fourth word. */ if (addreg0) output_asm_insn ("add %0,0x4,%0", &addreg0); if (addreg1) output_asm_insn ("add %0,0x4,%0", &addreg1); /* Do the fourth word. */ output_asm_insn (singlemove_string (wordpart[3]), wordpart[3]); /* Undo the adds we just did. */ if (addreg0) output_asm_insn ("add %0,-0xc,%0", &addreg0); if (addreg1) output_asm_insn ("add %0,-0xc,%0", &addreg1); return "";}/* Output assembler code to perform a doubleword move insn with operands OPERANDS, one of which must be a floating point register. */char *output_fp_move_double (operands) rtx *operands;{ rtx addr; if (FP_REG_P (operands[0])) { if (FP_REG_P (operands[1])) return "fmovs %1,%0\n\tfmovs %R1,%R0"; else if (GET_CODE (operands[1]) == REG) { if ((REGNO (operands[1]) & 1) == 0) return "std %1,[%@-8]\n\tldd [%@-8],%0"; else return "st %R1,[%@-4]\n\tst %1,[%@-8]\n\tldd [%@-8],%0"; } else return output_move_double (operands); } else if (FP_REG_P (operands[1])) { if (GET_CODE (operands[0]) == REG) { if ((REGNO (operands[0]) & 1) == 0) return "std %1,[%@-8]\n\tldd [%@-8],%0"; else return "std %1,[%@-8]\n\tld [%@-4],%R0\n\tld [%@-8],%0"; } else return output_move_double (operands); } else abort ();}/* Output assembler code to perform a quadword move insn with operands OPERANDS, one of which must be a floating point register. */char *output_fp_move_quad (operands) rtx *operands;{ register rtx op0 = operands[0]; register rtx op1 = operands[1]; register rtx addr; if (FP_REG_P (op0)) { if (FP_REG_P (op1)) return "fmovs %1,%0\n\tfmovs %R1,%R0\n\tfmovs %S1,%S0\n\tfmovs %T1,%T0"; if (GET_CODE (op1) == REG) { if ((REGNO (op1) & 1) == 0) return "std %1,[%@-8]\n\tldd [%@-8],%0\n\tstd %S1,[%@-8]\n\tldd [%@-8],%S0"; else return "st %R1,[%@-4]\n\tst %1,[%@-8]\n\tldd [%@-8],%0\n\tst %T1,[%@-4]\n\tst %S1,[%@-8]\n\tldd [%@-8],%S0"; } else return output_move_quad (operands); } else if (FP_REG_P (op1)) { if (GET_CODE (op0) == REG) { if ((REGNO (op0) & 1) == 0) return "std %1,[%@-8]\n\tldd [%@-8],%0\n\tstd %S1,[%@-8]\n\tldd [%@-8],%S0"; else return "std %S1,[%@-8]\n\tld [%@-4],%T0\n\tld [%@-8],%S0\n\tstd %1,[%@-8]\n\tld [%@-4],%R0\n\tld [%@-8],%0"; } else return output_move_quad (operands); } else abort ();}/* Return a REG that occurs in ADDR with coefficient 1. ADDR can be effectively incremented by incrementing REG. */static rtxfind_addr_reg (addr) rtx addr;{ while (GET_CODE (addr) == PLUS) { /* We absolutely can not fudge the frame pointer here, because the frame pointer must always be 8 byte aligned. It also confuses debuggers. */ if (GET_CODE (XEXP (addr, 0)) == REG && REGNO (XEXP (addr, 0)) != FRAME_POINTER_REGNUM) addr = XEXP (addr, 0); else if (GET_CODE (XEXP (addr, 1)) == REG && REGNO (XEXP (addr, 1)) != FRAME_POINTER_REGNUM) addr = XEXP (addr, 1); else if (CONSTANT_P (XEXP (addr, 0))) addr = XEXP (addr, 1); else if (CONSTANT_P (XEXP (addr, 1))) addr = XEXP (addr, 0); else abort (); } if (GET_CODE (addr) == REG) return addr; abort ();}voidoutput_sized_memop (opname, mode, signedp) char *opname; enum machine_mode mode; int signedp;{ static char *ld_size_suffix_u[] = { "ub", "uh", "", "?", "d" }; static char *ld_size_suffix_s[] = { "sb", "sh", "", "?", "d" }; static char *st_size_suffix[] = { "b", "h", "", "?", "d" }; char **opnametab, *modename; if (opname[0] == 'l') if (signedp) opnametab = ld_size_suffix_s; else opnametab = ld_size_suffix_u; else opnametab = st_size_suffix; modename = opnametab[GET_MODE_SIZE (mode) >> 1]; fprintf (asm_out_file, "\t%s%s", opname, modename);}voidoutput_move_with_extension (operands) rtx *operands;{ if (GET_MODE (operands[2]) == HImode) output_asm_insn ("sll %2,0x10,%0", operands); else if (GET_MODE (operands[2]) == QImode) output_asm_insn ("sll %2,0x18,%0", operands); else abort ();}/* Load the address specified by OPERANDS[3] into the register specified by OPERANDS[0]. OPERANDS[3] may be the result of a sum, hence it could either be: (1) CONST (2) REG (2) REG + CONST_INT (3) REG + REG + CONST_INT (4) REG + REG (special case of 3). Note that (3) is not a legitimate address. All cases are handled here. */voidoutput_load_address (operands) rtx *operands;{ rtx base, offset; if (CONSTANT_P (operands[3])) { output_asm_insn ("set %3,%0", operands); return; } if (REG_P (operands[3])) { if (REGNO (operands[0]) != REGNO (operands[3])) output_asm_insn ("mov %3,%0", operands); return; } if (GET_CODE (operands[3]) != PLUS) abort (); base = XEXP (operands[3], 0); offset = XEXP (operands[3], 1); if (GET_CODE (base) == CONST_INT) { rtx tmp = base; base = offset; offset = tmp; } if (GET_CODE (offset) != CONST_INT) { /* Operand is (PLUS (REG) (REG)). */ base = operands[3]; offset = const0_rtx; } if (REG_P (base)) { operands[6] = base; operands[7] = offset; if (SMALL_INT (offset)) output_asm_insn ("add %6,%7,%0", operands); else output_asm_insn ("set %7,%0\n\tadd %0,%6,%0", operands); } else if (GET_CODE (base) == PLUS) { operands[6] = XEXP (base, 0); operands[7] = XEXP (base, 1); operands[8] = offset; if (SMALL_INT (offset)) output_asm_insn ("add %6,%7,%0\n\tadd %0,%8,%0", operands); else output_asm_insn ("set %8,%0\n\tadd %0,%6,%0\n\tadd %0,%7,%0", operands); } else abort ();}/* Output code to place a size count SIZE in register REG. ALIGN is the size of the unit of transfer. Because block moves are pipelined, we don't include the first element in the transfer of SIZE to REG. */static voidoutput_size_for_block_move (size, reg, align) rtx size, reg; rtx align;{ rtx xoperands[3]; xoperands[0] = reg; xoperands[1] = size; xoperands[2] = align; if (GET_CODE (size) == REG) output_asm_insn ("sub %1,%2,%0", xoperands); else { xoperands[1] = gen_rtx (CONST_INT, VOIDmode, INTVAL (size) - INTVAL (align)); output_asm_insn ("set %1,%0", xoperands); }}/* Emit code to perform a block move. OPERANDS[0] is the destination. OPERANDS[1] is the source. OPERANDS[2] is the size. OPERANDS[3] is the alignment safe to use. OPERANDS[4] is a register we can safely clobber as a temp. */char *output_block_move (operands) rtx *operands;{ /* A vector for our computed operands. Note that load_output_address makes use of (and can clobber) up to the 8th element of this vector. */ rtx xoperands[10]; rtx zoperands[10]; static int movstrsi_label = 0; int i; rtx temp1 = operands[4]; rtx sizertx = operands[2]; rtx alignrtx = operands[3]; int align = INTVAL (alignrtx); char label3[30], label5[30]; xoperands[0] = operands[0]; xoperands[1] = operands[1]; xoperands[2] = temp1; /* We can't move more than this many bytes at a time because we have only one register, %g1, to move them through. */ if (align > UNITS_PER_WORD) { align = UNITS_PER_WORD; alignrtx = gen_rtx (CONST_INT, VOIDmode, UNITS_PER_WORD); } /* We consider 8 ld/st pairs, for a total of 16 inline insns to be reasonable here. (Actually will emit a maximum of 18 inline insns for the case of size == 31 and align == 4). */ if (GET_CODE (sizertx) == CONST_INT && (INTVAL (sizertx) / align) <= 8 && memory_address_p (QImode, plus_constant_for_output (xoperands[0], INTVAL (sizertx))) && memory_address_p (QImode, plus_constant_for_output (xoperands[1], INTVAL (sizertx)))) { int size = INTVAL (sizertx); int offset = 0; /* We will store different integers into this particular RTX. */ xoperands[2] = rtx_alloc (CONST_INT); PUT_MODE (xoperands[2], VOIDmode); /* This case is currently not handled. Abort instead of generating bad code. */ if (align > 4) abort (); if (align >= 4) { for (i = (size >> 2) - 1; i >= 0; i--) { INTVAL (xoperands[2]) = (i << 2) + offset; output_asm_insn ("ld [%a1+%2],%%g1\n\tst %%g1,[%a0+%2]", xoperands); } offset += (size & ~0x3); size = size & 0x3; if (size == 0) return ""; } if (align >= 2) { for (i = (size >> 1) - 1; i >= 0; i--) { INTVAL (xoperands[2]) = (i << 1) + offset; output_asm_insn ("lduh [%a1+%2],%%g1\n\tsth %%g1,[%a0+%2]", xoperands); } offset += (size & ~0x1); size = size & 0x1; if (size == 0) return ""; } if (align >= 1) { for (i = size - 1; i >= 0; i--) { INTVAL (xoperands[2]) = i + offset; output_asm_insn ("ldub [%a1+%2],%%g1\n\tstb %%g1,[%a0+%2]", xoperands); } return ""; } /* We should never reach here. */ abort (); } /* If the size isn't known to be a multiple of the alignment, we have to do it in smaller pieces. If we could determine that the size was a multiple of 2 (or whatever), we could be smarter about this. */ if (GET_CODE (sizertx) != CONST_INT) align = 1; else { int size = INTVAL (sizertx); while (size % align) align >>= 1; } if (align != INTVAL (alignrtx)) alignrtx = gen_rtx (CONST_INT, VOIDmode, align); xoperands[3] = gen_rtx (CONST_INT, VOIDmode, movstrsi_label++); xoperands[4] = gen_rtx (CONST_INT, VOIDmode, align); xoperands[5] = gen_rtx (CONST_INT, VOIDmode, movstrsi_label++); ASM_GENERATE_INTERNAL_LABEL (label3, "Lm", INTVAL (xoperands[3])); ASM_GENERATE_INTERNAL_LABEL (label5, "Lm", INTVAL (xoperands[5])); /* This is the size of the transfer. Emit code to decrement the size value by ALIGN, and store the result in the temp1 register. */ output_size_for_block_move (sizertx, temp1, alignrtx); /* Must handle the case when the size is zero or negative, so the first thing we do is compare the size against zero, and only copy bytes if it is zero or greater. Note that we have already subtracted off the alignment once, so we must copy 1 alignment worth of bytes if the size is zero here. The SUN assembler complains about labels in branch delay slots, so we do this before outputting the load address, so that there will always be a harmless insn between the branch here and the next label emitted below. */ { char pattern[100]; sprintf (pattern, "cmp %%2,0\n\tbl %s", &label5[1]); output_asm_insn (pattern, xoperands); } zoperands[0] = operands[0]; zoperands[3] = plus_constant_for_output (operands[0], align); output_load_address (zoperands); /* ??? This might be much faster if the loops below were preconditioned and unrolled. That is, at run time, copy enough bytes one at a time to ensure that the target and source addresses are aligned to the the largest possible alignment. Then use a preconditioned unrolled loop to copy say 16 bytes at a time. Then copy bytes one at a time until finish the rest. */ /* Output the first label separately, so that it is spaced properly. */ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "Lm", INTVAL (xoperands[3]));
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -