📄 sh.c
字号:
rtx r5 = gen_rtx_REG (SImode, 5); rtx r6 = gen_rtx_REG (SImode, 6); entry_name = get_identifier (bytes & 4 ? "__movmem_i4_odd" : "__movmem_i4_even"); sym = function_symbol (IDENTIFIER_POINTER (entry_name)); func_addr_rtx = copy_to_mode_reg (Pmode, sym); force_into (XEXP (operands[0], 0), r4); force_into (XEXP (operands[1], 0), r5); dwords = bytes >> 3; emit_insn (gen_move_insn (r6, GEN_INT (dwords - 1))); emit_insn (gen_block_lump_real_i4 (func_addr_rtx)); return 1; } else return 0; } if (bytes < 64) { char entry[30]; tree entry_name; rtx sym; rtx func_addr_rtx; rtx r4 = gen_rtx_REG (SImode, 4); rtx r5 = gen_rtx_REG (SImode, 5); sprintf (entry, "__movmemSI%d", bytes); entry_name = get_identifier (entry); sym = function_symbol (IDENTIFIER_POINTER (entry_name)); func_addr_rtx = copy_to_mode_reg (Pmode, sym); force_into (XEXP (operands[0], 0), r4); force_into (XEXP (operands[1], 0), r5); emit_insn (gen_block_move_real (func_addr_rtx)); return 1; } /* This is the same number of bytes as a memcpy call, but to a different less common function name, so this will occasionally use more space. */ if (! TARGET_SMALLCODE) { tree entry_name; rtx sym; rtx func_addr_rtx; int final_switch, while_loop; rtx r4 = gen_rtx_REG (SImode, 4); rtx r5 = gen_rtx_REG (SImode, 5); rtx r6 = gen_rtx_REG (SImode, 6); entry_name = get_identifier ("__movmem"); sym = function_symbol (IDENTIFIER_POINTER (entry_name)); func_addr_rtx = copy_to_mode_reg (Pmode, sym); force_into (XEXP (operands[0], 0), r4); force_into (XEXP (operands[1], 0), r5); /* r6 controls the size of the move. 16 is decremented from it for each 64 bytes moved. Then the negative bit left over is used as an index into a list of move instructions. e.g., a 72 byte move would be set up with size(r6) = 14, for one iteration through the big while loop, and a switch of -2 for the last part. */ final_switch = 16 - ((bytes / 4) % 16); while_loop = ((bytes / 4) / 16 - 1) * 16; emit_insn (gen_move_insn (r6, GEN_INT (while_loop + final_switch))); emit_insn (gen_block_lump_real (func_addr_rtx)); return 1; } return 0;}/* Prepare operands for a move define_expand; specifically, one of the operands must be in a register. */intprepare_move_operands (rtx operands[], enum machine_mode mode){ if ((mode == SImode || mode == DImode) && flag_pic && ! ((mode == Pmode || mode == ptr_mode) && tls_symbolic_operand (operands[1], Pmode) != 0)) { rtx temp; if (SYMBOLIC_CONST_P (operands[1])) { if (GET_CODE (operands[0]) == MEM) operands[1] = force_reg (Pmode, operands[1]); else if (TARGET_SHMEDIA && GET_CODE (operands[1]) == LABEL_REF && target_reg_operand (operands[0], mode)) /* It's ok. */; else { temp = no_new_pseudos ? operands[0] : gen_reg_rtx (Pmode); operands[1] = legitimize_pic_address (operands[1], mode, temp); } } else if (GET_CODE (operands[1]) == CONST && GET_CODE (XEXP (operands[1], 0)) == PLUS && SYMBOLIC_CONST_P (XEXP (XEXP (operands[1], 0), 0))) { temp = no_new_pseudos ? operands[0] : gen_reg_rtx (Pmode); temp = legitimize_pic_address (XEXP (XEXP (operands[1], 0), 0), mode, temp); operands[1] = expand_binop (mode, add_optab, temp, XEXP (XEXP (operands[1], 0), 1), no_new_pseudos ? temp : gen_reg_rtx (Pmode), 0, OPTAB_LIB_WIDEN); } } if (! reload_in_progress && ! reload_completed) { /* Copy the source to a register if both operands aren't registers. */ if (! register_operand (operands[0], mode) && ! sh_register_operand (operands[1], mode)) operands[1] = copy_to_mode_reg (mode, operands[1]); if (GET_CODE (operands[0]) == MEM && ! memory_operand (operands[0], mode)) { /* This is like change_address_1 (operands[0], mode, 0, 1) , except that we can't use that function because it is static. */ rtx new = change_address (operands[0], mode, 0); MEM_COPY_ATTRIBUTES (new, operands[0]); operands[0] = new; } /* This case can happen while generating code to move the result of a library call to the target. Reject `st r0,@(rX,rY)' because reload will fail to find a spill register for rX, since r0 is already being used for the source. */ else if (refers_to_regno_p (R0_REG, R0_REG + 1, operands[1], (rtx *)0) && GET_CODE (operands[0]) == MEM && GET_CODE (XEXP (operands[0], 0)) == PLUS && GET_CODE (XEXP (XEXP (operands[0], 0), 1)) == REG) operands[1] = copy_to_mode_reg (mode, operands[1]); } if (mode == Pmode || mode == ptr_mode) { rtx op0, op1; enum tls_model tls_kind; op0 = operands[0]; op1 = operands[1]; if ((tls_kind = tls_symbolic_operand (op1, Pmode))) { rtx tga_op1, tga_ret, tmp, tmp2; switch (tls_kind) { case TLS_MODEL_GLOBAL_DYNAMIC: tga_ret = gen_rtx_REG (Pmode, R0_REG); emit_call_insn (gen_tls_global_dynamic (tga_ret, op1)); op1 = tga_ret; break; case TLS_MODEL_LOCAL_DYNAMIC: tga_ret = gen_rtx_REG (Pmode, R0_REG); emit_call_insn (gen_tls_local_dynamic (tga_ret, op1)); tmp = gen_reg_rtx (Pmode); emit_move_insn (tmp, tga_ret); if (register_operand (op0, Pmode)) tmp2 = op0; else tmp2 = gen_reg_rtx (Pmode); emit_insn (gen_symDTPOFF2reg (tmp2, op1, tmp)); op1 = tmp2; break; case TLS_MODEL_INITIAL_EXEC: if (! flag_pic) { /* Don't schedule insns for getting GOT address when the first scheduling is enabled, to avoid spill failures for R0. */ if (flag_schedule_insns) emit_insn (gen_blockage ()); emit_insn (gen_GOTaddr2picreg ()); emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, PIC_REG))); if (flag_schedule_insns) emit_insn (gen_blockage ()); } tga_op1 = no_new_pseudos ? op0 : gen_reg_rtx (Pmode); tmp = gen_sym2GOTTPOFF (op1); emit_insn (gen_tls_initial_exec (tga_op1, tmp)); op1 = tga_op1; break; case TLS_MODEL_LOCAL_EXEC: tmp2 = gen_reg_rtx (Pmode); emit_insn (gen_load_gbr (tmp2)); tmp = gen_reg_rtx (Pmode); emit_insn (gen_symTPOFF2reg (tmp, op1)); if (register_operand (op0, Pmode)) op1 = op0; else op1 = gen_reg_rtx (Pmode); emit_insn (gen_addsi3 (op1, tmp, tmp2)); break; default: abort (); } operands[1] = op1; } } return 0;}/* Prepare the operands for an scc instruction; make sure that the compare has been done. */rtxprepare_scc_operands (enum rtx_code code){ rtx t_reg = gen_rtx_REG (SImode, T_REG); enum rtx_code oldcode = code; enum machine_mode mode; /* First need a compare insn. */ switch (code) { case NE: /* It isn't possible to handle this case. */ abort (); case LT: code = GT; break; case LE: code = GE; break; case LTU: code = GTU; break; case LEU: code = GEU; break; default: break; } if (code != oldcode) { rtx tmp = sh_compare_op0; sh_compare_op0 = sh_compare_op1; sh_compare_op1 = tmp; } mode = GET_MODE (sh_compare_op0); if (mode == VOIDmode) mode = GET_MODE (sh_compare_op1); sh_compare_op0 = force_reg (mode, sh_compare_op0); if ((code != EQ && code != NE && (sh_compare_op1 != const0_rtx || code == GTU || code == GEU || code == LTU || code == LEU)) || (mode == DImode && sh_compare_op1 != const0_rtx) || (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT)) sh_compare_op1 = force_reg (mode, sh_compare_op1); if ((TARGET_SH4 || TARGET_SH2A) && GET_MODE_CLASS (mode) == MODE_FLOAT) (mode == SFmode ? emit_sf_insn : emit_df_insn) (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, gen_rtx_SET (VOIDmode, t_reg, gen_rtx_fmt_ee (code, SImode, sh_compare_op0, sh_compare_op1)), gen_rtx_USE (VOIDmode, get_fpscr_rtx ())))); else emit_insn (gen_rtx_SET (VOIDmode, t_reg, gen_rtx_fmt_ee (code, SImode, sh_compare_op0, sh_compare_op1))); return t_reg;}/* Called from the md file, set up the operands of a compare instruction. */voidfrom_compare (rtx *operands, int code){ enum machine_mode mode = GET_MODE (sh_compare_op0); rtx insn; if (mode == VOIDmode) mode = GET_MODE (sh_compare_op1); if (code != EQ || mode == DImode || (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT)) { /* Force args into regs, since we can't use constants here. */ sh_compare_op0 = force_reg (mode, sh_compare_op0); if (sh_compare_op1 != const0_rtx || code == GTU || code == GEU || (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT)) sh_compare_op1 = force_reg (mode, sh_compare_op1); } if (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT && code == GE) { from_compare (operands, GT); insn = gen_ieee_ccmpeqsf_t (sh_compare_op0, sh_compare_op1); } else insn = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, T_REG), gen_rtx_fmt_ee (code, SImode, sh_compare_op0, sh_compare_op1)); if ((TARGET_SH4 || TARGET_SH2A) && GET_MODE_CLASS (mode) == MODE_FLOAT) { insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, insn, gen_rtx_USE (VOIDmode, get_fpscr_rtx ()))); (mode == SFmode ? emit_sf_insn : emit_df_insn) (insn); } else emit_insn (insn);}/* Functions to output assembly code. *//* Return a sequence of instructions to perform DI or DF move. Since the SH cannot move a DI or DF in one instruction, we have to take care when we see overlapping source and dest registers. */const char *output_movedouble (rtx insn ATTRIBUTE_UNUSED, rtx operands[], enum machine_mode mode){ rtx dst = operands[0]; rtx src = operands[1]; if (GET_CODE (dst) == MEM && GET_CODE (XEXP (dst, 0)) == PRE_DEC) return "mov.l %T1,%0\n\tmov.l %1,%0"; if (register_operand (dst, mode) && register_operand (src, mode)) { if (REGNO (src) == MACH_REG) return "sts mach,%S0\n\tsts macl,%R0"; /* When mov.d r1,r2 do r2->r3 then r1->r2; when mov.d r1,r0 do r1->r0 then r2->r1. */ if (REGNO (src) + 1 == REGNO (dst)) return "mov %T1,%T0\n\tmov %1,%0"; else return "mov %1,%0\n\tmov %T1,%T0"; } else if (GET_CODE (src) == CONST_INT) { if (INTVAL (src) < 0) output_asm_insn ("mov #-1,%S0", operands); else output_asm_insn ("mov #0,%S0", operands); return "mov %1,%R0"; } else if (GET_CODE (src) == MEM) { int ptrreg = -1; int dreg = REGNO (dst); rtx inside = XEXP (src, 0); if (GET_CODE (inside) == REG) ptrreg = REGNO (inside); else if (GET_CODE (inside) == SUBREG) ptrreg = subreg_regno (inside); else if (GET_CODE (inside) == PLUS) { ptrreg = REGNO (XEXP (inside, 0)); /* ??? A r0+REG address shouldn't be possible here, because it isn't an offsettable address. Unfortunately, offsettable addresses use QImode to check the offset, and a QImode offsettable address requires r0 for the other operand, which is not currently supported, so we can't use the 'o' constraint. Thus we must check for and handle r0+REG addresses here. We punt for now, since this is likely very rare. */ if (GET_CODE (XEXP (inside, 1)) == REG) abort (); } else if (GET_CODE (inside) == LABEL_REF) return "mov.l %1,%0\n\tmov.l %1+4,%T0"; else if (GET_CODE (inside) == POST_INC) return "mov.l %1,%0\n\tmov.l %1,%T0"; else abort (); /* Work out the safe way to copy. Copy into the second half first. */ if (dreg == ptrreg) return "mov.l %T1,%T0\n\tmov.l %1,%0"; } return "mov.l %1,%0\n\tmov.l %T1,%T0";}/* Print an instruction which would have gone into a delay slot after another instruction, but couldn't because the other instruction expanded into a sequence where putting the slot insn at the end wouldn't work. */static voidprint_slot (rtx insn){ final_scan_insn (XVECEXP (insn, 0, 1), asm_out_file, optimize, 0, 1, NULL); INSN_DELETED_P (XVECEXP (insn, 0, 1)) = 1;}const char *output_far_jump (rtx insn, rtx op){ struct { rtx lab, reg, op; } this; rtx braf_base_lab = NULL_RTX; const char *jump; int far; int offset = branch_dest (insn) - INSN_ADDRESSES (INSN_UID (insn)); rtx prev; this.lab = gen_label_rtx (); if (TARGET_SH2 && offset >= -32764 && offset - get_attr_length (insn) <= 32766) { far = 0; jump = "mov.w %O0,%1; braf %1"; } else
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -