📄 sh.c
字号:
switch (GET_CODE (x)) { case TRUNCATE: { rtx inner = XEXP (x, 0); int offset = 0; enum machine_mode inner_mode; /* We might see SUBREGs with vector mode registers inside. */ if (GET_CODE (inner) == SUBREG && (GET_MODE_SIZE (GET_MODE (inner)) == GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner)))) && subreg_lowpart_p (inner)) inner = SUBREG_REG (inner); if (GET_CODE (inner) == CONST_INT) { x = GEN_INT (trunc_int_for_mode (INTVAL (inner), GET_MODE (x))); goto default_output; } inner_mode = GET_MODE (inner); if (GET_CODE (inner) == SUBREG && (GET_MODE_SIZE (GET_MODE (inner)) < GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner)))) && GET_CODE (SUBREG_REG (inner)) == REG) { offset = subreg_regno_offset (REGNO (SUBREG_REG (inner)), GET_MODE (SUBREG_REG (inner)), SUBREG_BYTE (inner), GET_MODE (inner)); inner = SUBREG_REG (inner); } if (GET_CODE (inner) != REG || GET_MODE_SIZE (inner_mode) > 8) abort (); /* Floating point register pairs are always big endian; general purpose registers are 64 bit wide. */ regno = REGNO (inner); regno = (HARD_REGNO_NREGS (regno, inner_mode) - HARD_REGNO_NREGS (regno, mode)) + offset; x = inner; goto reg; } case SIGN_EXTEND: x = XEXP (x, 0); goto reg; /* FIXME: We need this on SHmedia32 because reload generates some sign-extended HI or QI loads into DImode registers but, because Pmode is SImode, the address ends up with a subreg:SI of the DImode register. Maybe reload should be fixed so as to apply alter_subreg to such loads? */ case IF_THEN_ELSE: gcc_assert (trapping_target_operand (x, VOIDmode)); x = XEXP (XEXP (x, 2), 0); goto default_output; case SUBREG: gcc_assert (SUBREG_BYTE (x) == 0 && GET_CODE (SUBREG_REG (x)) == REG); x = SUBREG_REG (x); /* Fall through. */ reg: case REG: regno += REGNO (x); if (FP_REGISTER_P (regno) && mode == V16SFmode) fprintf ((stream), "mtrx%s", reg_names[regno] + 2); else if (FP_REGISTER_P (REGNO (x)) && mode == V4SFmode) fprintf ((stream), "fv%s", reg_names[regno] + 2); else if (GET_CODE (x) == REG && mode == V2SFmode) fprintf ((stream), "fp%s", reg_names[regno] + 2); else if (FP_REGISTER_P (REGNO (x)) && GET_MODE_SIZE (mode) > 4) fprintf ((stream), "d%s", reg_names[regno] + 1); else fputs (reg_names[regno], (stream)); break; case MEM: output_address (XEXP (x, 0)); break; case CONST: if (TARGET_SHMEDIA && GET_CODE (XEXP (x, 0)) == SIGN_EXTEND && (GET_MODE (XEXP (x, 0)) == DImode || GET_MODE (XEXP (x, 0)) == SImode) && GET_CODE (XEXP (XEXP (x, 0), 0)) == TRUNCATE && GET_MODE (XEXP (XEXP (x, 0), 0)) == HImode) { rtx val = XEXP (XEXP (XEXP (x, 0), 0), 0); fputc ('(', stream); if (GET_CODE (val) == ASHIFTRT) { fputc ('(', stream); if (GET_CODE (XEXP (val, 0)) == CONST) fputc ('(', stream); output_addr_const (stream, XEXP (val, 0)); if (GET_CODE (XEXP (val, 0)) == CONST) fputc (')', stream); fputs (" >> ", stream); output_addr_const (stream, XEXP (val, 1)); fputc (')', stream); } else { if (GET_CODE (val) == CONST) fputc ('(', stream); output_addr_const (stream, val); if (GET_CODE (val) == CONST) fputc (')', stream); } fputs (" & 65535)", stream); break; } /* Fall through. */ default: if (TARGET_SH1) fputc ('#', stream); output_addr_const (stream, x); break; } break; }}/* Like force_operand, but guarantees that VALUE ends up in TARGET. */static voidforce_into (rtx value, rtx target){ value = force_operand (value, target); if (! rtx_equal_p (value, target)) emit_insn (gen_move_insn (target, value));}/* Emit code to perform a block move. Choose the best method. OPERANDS[0] is the destination. OPERANDS[1] is the source. OPERANDS[2] is the size. OPERANDS[3] is the alignment safe to use. */intexpand_block_move (rtx *operands){ int align = INTVAL (operands[3]); int constp = (GET_CODE (operands[2]) == CONST_INT); int bytes = (constp ? INTVAL (operands[2]) : 0); if (! constp) return 0; /* If we could use mov.l to move words and dest is word-aligned, we can use movua.l for loads and still generate a relatively short and efficient sequence. */ if (TARGET_SH4A_ARCH && align < 4 && MEM_ALIGN (operands[0]) >= 32 && can_move_by_pieces (bytes, 32)) { rtx dest = copy_rtx (operands[0]); rtx src = copy_rtx (operands[1]); /* We could use different pseudos for each copied word, but since movua can only load into r0, it's kind of pointless. */ rtx temp = gen_reg_rtx (SImode); rtx src_addr = copy_addr_to_reg (XEXP (src, 0)); int copied = 0; while (copied + 4 <= bytes) { rtx to = adjust_address (dest, SImode, copied); rtx from = adjust_automodify_address (src, SImode, src_addr, copied); emit_insn (gen_movua (temp, from)); emit_move_insn (src_addr, plus_constant (src_addr, 4)); emit_move_insn (to, temp); copied += 4; } if (copied < bytes) move_by_pieces (adjust_address (dest, BLKmode, copied), adjust_automodify_address (src, BLKmode, src_addr, copied), bytes - copied, align, 0); return 1; } /* If it isn't a constant number of bytes, or if it doesn't have 4 byte alignment, or if it isn't a multiple of 4 bytes, then fail. */ if (align < 4 || (bytes % 4 != 0)) return 0; if (TARGET_HARD_SH4) { if (bytes < 12) return 0; else if (bytes == 12) { rtx func_addr_rtx = gen_reg_rtx (Pmode); rtx r4 = gen_rtx_REG (SImode, 4); rtx r5 = gen_rtx_REG (SImode, 5); function_symbol (func_addr_rtx, "__movmemSI12_i4", SFUNC_STATIC); force_into (XEXP (operands[0], 0), r4); force_into (XEXP (operands[1], 0), r5); emit_insn (gen_block_move_real_i4 (func_addr_rtx)); return 1; } else if (! TARGET_SMALLCODE) { const char *entry_name; rtx func_addr_rtx = gen_reg_rtx (Pmode); int dwords; rtx r4 = gen_rtx_REG (SImode, 4); rtx r5 = gen_rtx_REG (SImode, 5); rtx r6 = gen_rtx_REG (SImode, 6); entry_name = (bytes & 4 ? "__movmem_i4_odd" : "__movmem_i4_even"); function_symbol (func_addr_rtx, entry_name, SFUNC_STATIC); force_into (XEXP (operands[0], 0), r4); force_into (XEXP (operands[1], 0), r5); dwords = bytes >> 3; emit_insn (gen_move_insn (r6, GEN_INT (dwords - 1))); emit_insn (gen_block_lump_real_i4 (func_addr_rtx)); return 1; } else return 0; } if (bytes < 64) { char entry[30]; rtx func_addr_rtx = gen_reg_rtx (Pmode); rtx r4 = gen_rtx_REG (SImode, 4); rtx r5 = gen_rtx_REG (SImode, 5); sprintf (entry, "__movmemSI%d", bytes); function_symbol (func_addr_rtx, entry, SFUNC_STATIC); force_into (XEXP (operands[0], 0), r4); force_into (XEXP (operands[1], 0), r5); emit_insn (gen_block_move_real (func_addr_rtx)); return 1; } /* This is the same number of bytes as a memcpy call, but to a different less common function name, so this will occasionally use more space. */ if (! TARGET_SMALLCODE) { rtx func_addr_rtx = gen_reg_rtx (Pmode); int final_switch, while_loop; rtx r4 = gen_rtx_REG (SImode, 4); rtx r5 = gen_rtx_REG (SImode, 5); rtx r6 = gen_rtx_REG (SImode, 6); function_symbol (func_addr_rtx, "__movmem", SFUNC_STATIC); force_into (XEXP (operands[0], 0), r4); force_into (XEXP (operands[1], 0), r5); /* r6 controls the size of the move. 16 is decremented from it for each 64 bytes moved. Then the negative bit left over is used as an index into a list of move instructions. e.g., a 72 byte move would be set up with size(r6) = 14, for one iteration through the big while loop, and a switch of -2 for the last part. */ final_switch = 16 - ((bytes / 4) % 16); while_loop = ((bytes / 4) / 16 - 1) * 16; emit_insn (gen_move_insn (r6, GEN_INT (while_loop + final_switch))); emit_insn (gen_block_lump_real (func_addr_rtx)); return 1; } return 0;}/* Prepare operands for a move define_expand; specifically, one of the operands must be in a register. */intprepare_move_operands (rtx operands[], enum machine_mode mode){ if ((mode == SImode || mode == DImode) && flag_pic && ! ((mode == Pmode || mode == ptr_mode) && tls_symbolic_operand (operands[1], Pmode) != 0)) { rtx temp; if (SYMBOLIC_CONST_P (operands[1])) { if (GET_CODE (operands[0]) == MEM) operands[1] = force_reg (Pmode, operands[1]); else if (TARGET_SHMEDIA && GET_CODE (operands[1]) == LABEL_REF && target_reg_operand (operands[0], mode)) /* It's ok. */; else { temp = no_new_pseudos ? operands[0] : gen_reg_rtx (Pmode); operands[1] = legitimize_pic_address (operands[1], mode, temp); } } else if (GET_CODE (operands[1]) == CONST && GET_CODE (XEXP (operands[1], 0)) == PLUS && SYMBOLIC_CONST_P (XEXP (XEXP (operands[1], 0), 0))) { temp = no_new_pseudos ? operands[0] : gen_reg_rtx (Pmode); temp = legitimize_pic_address (XEXP (XEXP (operands[1], 0), 0), mode, temp); operands[1] = expand_binop (mode, add_optab, temp, XEXP (XEXP (operands[1], 0), 1), no_new_pseudos ? temp : gen_reg_rtx (Pmode), 0, OPTAB_LIB_WIDEN); } } if (! reload_in_progress && ! reload_completed) { /* Copy the source to a register if both operands aren't registers. */ if (! register_operand (operands[0], mode) && ! sh_register_operand (operands[1], mode)) operands[1] = copy_to_mode_reg (mode, operands[1]); if (GET_CODE (operands[0]) == MEM && ! memory_operand (operands[0], mode)) { /* This is like change_address_1 (operands[0], mode, 0, 1) , except that we can't use that function because it is static. */ rtx new = change_address (operands[0], mode, 0); MEM_COPY_ATTRIBUTES (new, operands[0]); operands[0] = new; } /* This case can happen while generating code to move the result of a library call to the target. Reject `st r0,@(rX,rY)' because reload will fail to find a spill register for rX, since r0 is already being used for the source. */ else if (TARGET_SH1 && refers_to_regno_p (R0_REG, R0_REG + 1, operands[1], (rtx *)0) && GET_CODE (operands[0]) == MEM && GET_CODE (XEXP (operands[0], 0)) == PLUS && GET_CODE (XEXP (XEXP (operands[0], 0), 1)) == REG) operands[1] = copy_to_mode_reg (mode, operands[1]); } if (mode == Pmode || mode == ptr_mode) { rtx op0, op1, opc; enum tls_model tls_kind; op0 = operands[0]; op1 = operands[1]; if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == PLUS && tls_symbolic_operand (XEXP (XEXP (op1, 0), 0), Pmode)) { opc = XEXP (XEXP (op1, 0), 1); op1 = XEXP (XEXP (op1, 0), 0); } else opc = NULL_RTX; if ((tls_kind = tls_symbolic_operand (op1, Pmode))) { rtx tga_op1, tga_ret, tmp, tmp2; switch (tls_kind) { case TLS_MODEL_GLOBAL_DYNAMIC: tga_ret = gen_rtx_REG (Pmode, R0_REG); emit_call_insn (gen_tls_global_dynamic (tga_ret, op1)); op1 = tga_ret; break; case TLS_MODEL_LOCAL_DYNAMIC: tga_ret = gen_rtx_REG (Pmode, R0_REG); emit_call_insn (gen_tls_local_dynamic (tga_ret, op1)); tmp = gen_reg_rtx (Pmode); emit_move_insn (tmp, tga_ret); if (register_operand (op0, Pmode)) tmp2 = op0; else tmp2 = gen_reg_rtx (Pmode); emit_insn (gen_symDTPOFF2reg (tmp2, op1, tmp)); op1 = tmp2; break; case TLS_MODEL_INITIAL_EXEC: if (! flag_pic) { /* Don't schedule insns for getting GOT address when the first scheduling is enabled, to avoid spill failures for R0. */ if (flag_schedule_insns) emit_insn (gen_blockage ()); emit_insn (gen_GOTaddr2picreg ()); emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, PIC_REG))); if (flag_schedule_insns) emit_insn (gen_blockage ()); } tga_op1 = no_new_pseudos ? op0 : gen_reg_rtx (Pmode); tmp = gen_sym2GOTTPOFF (op1); emit_insn (gen_tls_initial_exec (tga_op1, tmp)); op1 = tga_op1; break; case TLS_MODEL_LOCAL_EXEC: tmp2 = gen_reg_rtx (Pmode); emit_insn (gen_load_gbr (tmp2)); tmp = gen_reg_rtx (Pmode); emit_insn (gen_symTPOFF2reg (tmp, op1)); if (register_operand (op0, Pmode)) op1 = op0; else op1 = gen_reg_rtx (Pmode); emit_insn (gen_addsi3 (op1, tmp, tmp2)); break; default: gcc_unreachable (); } if (opc)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -