📄 sh.c
字号:
static voiddump_table (scan) rtx scan;{ int i; int need_align = 1; /* Do two passes, first time dump out the HI sized constants. */ for (i = 0; i < pool_size; i++) { pool_node *p = &pool_vector[i]; if (p->mode == HImode) { if (need_align) { scan = emit_insn_after (gen_align_2 (), scan); need_align = 0; } scan = emit_label_after (p->label, scan); scan = emit_insn_after (gen_consttable_2 (p->value), scan); } } need_align = 1; for (i = 0; i < pool_size; i++) { pool_node *p = &pool_vector[i]; switch (p->mode) { case HImode: break; case SImode: case SFmode: if (need_align) { need_align = 0; scan = emit_label_after (gen_label_rtx (), scan); scan = emit_insn_after (gen_align_4 (), scan); } if (p->label) scan = emit_label_after (p->label, scan); scan = emit_insn_after (gen_consttable_4 (p->value), scan); break; case DFmode: case DImode: if (need_align) { need_align = 0; scan = emit_label_after (gen_label_rtx (), scan); scan = emit_insn_after (gen_align_4 (), scan); } if (p->label) scan = emit_label_after (p->label, scan); scan = emit_insn_after (gen_consttable_8 (p->value), scan); break; default: abort (); break; } } scan = emit_insn_after (gen_consttable_end (), scan); scan = emit_barrier_after (scan); pool_size = 0;}/* Return non-zero if constant would be an ok source for a mov.w instead of a mov.l. */static inthi_const (src) rtx src;{ return (GET_CODE (src) == CONST_INT && INTVAL (src) >= -32768 && INTVAL (src) <= 32767);}/* Non-zero if the insn is a move instruction which needs to be fixed. *//* ??? For a DImode/DFmode moves, we don't need to fix it if each half of the CONST_DOUBLE input value is CONST_OK_FOR_I. For a SFmode move, we don't need to fix it if the input value is CONST_OK_FOR_I. */static intbroken_move (insn) rtx insn;{ if (GET_CODE (insn) == INSN) { rtx pat = PATTERN (insn); if (GET_CODE (pat) == PARALLEL) pat = XVECEXP (pat, 0, 0); if (GET_CODE (pat) == SET /* We can load any 8 bit value if we don't care what the high order bits end up as. */ && GET_MODE (SET_DEST (pat)) != QImode && CONSTANT_P (SET_SRC (pat)) && ! (TARGET_SH3E && GET_CODE (SET_SRC (pat)) == CONST_DOUBLE && (fp_zero_operand (SET_SRC (pat)) || fp_one_operand (SET_SRC (pat))) && GET_CODE (SET_DEST (pat)) == REG && REGNO (SET_DEST (pat)) >= FIRST_FP_REG && REGNO (SET_DEST (pat)) <= LAST_FP_REG) && (GET_CODE (SET_SRC (pat)) != CONST_INT || ! CONST_OK_FOR_I (INTVAL (SET_SRC (pat))))) return 1; } return 0;}static intmova_p (insn) rtx insn;{ return (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SET && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC && XINT (SET_SRC (PATTERN (insn)), 1) == 1);}/* Find the last barrier from insn FROM which is close enough to hold the constant pool. If we can't find one, then create one near the end of the range. */static rtxfind_barrier (num_mova, mova, from) int num_mova; rtx mova, from;{ int count_si = 0; int count_hi = 0; int found_hi = 0; int found_si = 0; int hi_align = 2; int si_align = 2; int leading_mova = num_mova; rtx barrier_before_mova, found_barrier = 0, good_barrier = 0; int si_limit; int hi_limit; /* For HImode: range is 510, add 4 because pc counts from address of second instruction after this one, subtract 2 for the jump instruction that we may need to emit before the table, subtract 2 for the instruction that fills the jump delay slot (in very rare cases, reorg will take an instruction from after the constant pool or will leave the delay slot empty). This gives 510. For SImode: range is 1020, add 4 because pc counts from address of second instruction after this one, subtract 2 in case pc is 2 byte aligned, subtract 2 for the jump instruction that we may need to emit before the table, subtract 2 for the instruction that fills the jump delay slot. This gives 1018. */ /* The branch will always be shortened now that the reference address for forward branches is the successor address, thus we need no longer make adjustments to the [sh]i_limit for -O0. */ si_limit = 1018; hi_limit = 510; while (from && count_si < si_limit && count_hi < hi_limit) { int inc = get_attr_length (from); int new_align = 1; if (GET_CODE (from) == CODE_LABEL) { if (optimize) new_align = 1 << label_to_alignment (from); else if (GET_CODE (prev_nonnote_insn (from)) == BARRIER) new_align = 1 << barrier_align (from); else new_align = 1; inc = 0; } if (GET_CODE (from) == BARRIER) { found_barrier = from; /* If we are at the end of the function, or in front of an alignment instruction, we need not insert an extra alignment. We prefer this kind of barrier. */ if (barrier_align (from) > 2) good_barrier = from; } if (broken_move (from)) { rtx pat, src, dst; enum machine_mode mode; pat = PATTERN (from); if (GET_CODE (pat) == PARALLEL) pat = XVECEXP (pat, 0, 0); src = SET_SRC (pat); dst = SET_DEST (pat); mode = GET_MODE (dst); /* We must explicitly check the mode, because sometimes the front end will generate code to load unsigned constants into HImode targets without properly sign extending them. */ if (mode == HImode || (mode == SImode && hi_const (src) && REGNO (dst) != FPUL_REG)) { found_hi += 2; /* We put the short constants before the long constants, so we must count the length of short constants in the range for the long constants. */ /* ??? This isn't optimal, but is easy to do. */ si_limit -= 2; } else { while (si_align > 2 && found_si + si_align - 2 > count_si) si_align >>= 1; if (found_si > count_si) count_si = found_si; found_si += GET_MODE_SIZE (mode); if (num_mova) si_limit -= GET_MODE_SIZE (mode); } } if (mova_p (from)) { if (! num_mova++) { leading_mova = 0; mova = from; barrier_before_mova = good_barrier ? good_barrier : found_barrier; } if (found_si > count_si) count_si = found_si; } else if (GET_CODE (from) == JUMP_INSN && (GET_CODE (PATTERN (from)) == ADDR_VEC || GET_CODE (PATTERN (from)) == ADDR_DIFF_VEC)) { if (num_mova) num_mova--; if (barrier_align (next_real_insn (from)) == CACHE_LOG) { /* We have just passed the barrier in front of the ADDR_DIFF_VEC, which is stored in found_barrier. Since the ADDR_DIFF_VEC is accessed as data, just like our pool constants, this is a good opportunity to accommodate what we have gathered so far. If we waited any longer, we could end up at a barrier in front of code, which gives worse cache usage for separated instruction / data caches. */ good_barrier = found_barrier; break; } else { rtx body = PATTERN (from); inc = XVECLEN (body, 1) * GET_MODE_SIZE (GET_MODE (body)); } } if (found_si) { if (new_align > si_align) { si_limit -= count_si - 1 & new_align - si_align; si_align = new_align; } count_si = count_si + new_align - 1 & -new_align; count_si += inc; } if (found_hi) { if (new_align > hi_align) { hi_limit -= count_hi - 1 & new_align - hi_align; hi_align = new_align; } count_hi = count_hi + new_align - 1 & -new_align; count_hi += inc; } from = NEXT_INSN (from); } if (num_mova) if (leading_mova) { /* Try as we might, the leading mova is out of range. Change it into a load (which will become a pcload) and retry. */ SET_SRC (PATTERN (mova)) = XVECEXP (SET_SRC (PATTERN (mova)), 0, 0); INSN_CODE (mova) = -1; return find_barrier (0, 0, mova); } else { /* Insert the constant pool table before the mova instruction, to prevent the mova label reference from going out of range. */ from = mova; good_barrier = found_barrier = barrier_before_mova; } if (found_barrier) { if (good_barrier && next_real_insn (found_barrier)) found_barrier = good_barrier; } else { /* We didn't find a barrier in time to dump our stuff, so we'll make one. */ rtx label = gen_label_rtx (); /* If we exceeded the range, then we must back up over the last instruction we looked at. Otherwise, we just need to undo the NEXT_INSN at the end of the loop. */ if (count_hi > hi_limit || count_si > si_limit) from = PREV_INSN (PREV_INSN (from)); else from = PREV_INSN (from); /* Walk back to be just before any jump or label. Putting it before a label reduces the number of times the branch around the constant pool table will be hit. Putting it before a jump makes it more likely that the bra delay slot will be filled. */ while (GET_CODE (from) == JUMP_INSN || GET_CODE (from) == NOTE || GET_CODE (from) == CODE_LABEL) from = PREV_INSN (from); from = emit_jump_insn_after (gen_jump (label), from); JUMP_LABEL (from) = label; LABEL_NUSES (label) = 1; found_barrier = emit_barrier_after (from); emit_label_after (label, found_barrier); } return found_barrier;}/* If the instruction INSN is implemented by a special function, and we can positively find the register that is used to call the sfunc, and this register is not used anywhere else in this instruction - except as the destination of a set, return this register; else, return 0. */rtxsfunc_uses_reg (insn) rtx insn;{ int i; rtx pattern, part, reg_part, reg; if (GET_CODE (insn) != INSN) return 0; pattern = PATTERN (insn); if (GET_CODE (pattern) != PARALLEL || get_attr_type (insn) != TYPE_SFUNC) return 0; for (reg_part = 0, i = XVECLEN (pattern, 0) - 1; i >= 1; i--) { part = XVECEXP (pattern, 0, i); if (GET_CODE (part) == USE && GET_MODE (XEXP (part, 0)) == SImode) reg_part = part; } if (! reg_part) return 0; reg = XEXP (reg_part, 0); for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--) { part = XVECEXP (pattern, 0, i); if (part == reg_part || GET_CODE (part) == CLOBBER) continue; if (reg_mentioned_p (reg, ((GET_CODE (part) == SET && GET_CODE (SET_DEST (part)) == REG) ? SET_SRC (part) : part))) return 0; } return reg;}/* See if the only way in which INSN uses REG is by calling it, or by setting it while calling it. Set *SET to a SET rtx if the register is set by INSN. */static intnoncall_uses_reg (reg, insn, set) rtx reg; rtx insn; rtx *set;{ rtx pattern, reg2; *set = NULL_RTX; reg2 = sfunc_uses_reg (insn); if (reg2 && REGNO (reg2) == REGNO (reg)) { pattern = single_set (insn); if (pattern && GET_CODE (SET_DEST (pattern)) == REG && REGNO (reg) == REGNO (SET_DEST (pattern))) *set = pattern; return 0; } if (GET_CODE (insn) != CALL_INSN) { /* We don't use rtx_equal_p because we don't care if the mode is different. */ pattern = single_set (insn); if (pattern && GET_CODE (SET_DEST (pattern)) == REG && REGNO (reg) == REGNO (SET_DEST (pattern))) { rtx par, part; int i; *set = pattern; par = PATTERN (insn); if (GET_CODE (par) == PARALLEL) for (i = XVECLEN (par, 0) - 1; i >= 0; i--) { part = XVECEXP (par, 0, i); if (GET_CODE (part) != SET && reg_mentioned_p (reg, part)) return 1; } return reg_mentioned_p (reg, SET_SRC (pattern)); } return 1; } pattern = PATTERN (insn); if (GET_CODE (pattern) == PARALLEL) { int i; for (i = XVECLEN (pattern, 0) - 1; i >= 1; i--) if (reg_mentioned_p (reg, XVECEXP (pattern, 0, i))) return 1; pattern = XVECEXP (pattern, 0, 0); } if (GET_CODE (pattern) == SET) { if (reg_mentioned_p (reg, SET_DEST (pattern))) { /* We don't use rtx_equal_p, because we don't care if the mode is different. */ if (GET_CODE (SET_DEST (pattern)) != REG || REGNO (reg) != REGNO (SET_DEST (pattern))) return 1; *set = pattern; } pattern = SET_SRC (pattern); } if (GET_CODE (pattern) != CALL || GET_CODE (XEXP (pattern, 0)) != MEM || ! rtx_equal_p (reg, XEXP (XEXP (pattern, 0), 0))) return 1; return 0;}/* Given a X, a pattern of an insn or a part of it, return a mask of used general registers. Bits 0..15 mean that the respecti
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -