📄 jump.c
字号:
q = prev_nonnote_insn (p); if (q && GET_RTX_CLASS (GET_CODE (q)) == 'i' && sets_cc0_p (PATTERN (q))) p = q;#endif if (p) p = PREV_INSN (p); /* If we found all the uses and there was no data conflict, we can move the assignment unless we can branch into the middle from somewhere. */ if (nuses == 0 && p && no_labels_between_p (p, insn) && ! reg_referenced_between_p (temp1, p, NEXT_INSN (temp3)) && ! reg_set_between_p (temp1, p, temp3) && (GET_CODE (SET_SRC (temp4)) == CONST_INT || ! reg_set_between_p (SET_SRC (temp4), p, temp2))) { emit_insn_after_with_line_notes (PATTERN (temp2), p, temp2); delete_insn (temp2); /* Set NEXT to an insn that we know won't go away. */ next = next_active_insn (insn); /* Delete the jump around the set. Note that we must do this before we redirect the test jumps so that it won't delete the code immediately following the assignment we moved (which might be a jump). */ delete_insn (insn); /* We either have two consecutive labels or a jump to a jump, so adjust all the JUMP_INSNs to branch to where INSN branches to. */ for (p = NEXT_INSN (p); p != next; p = NEXT_INSN (p)) if (GET_CODE (p) == JUMP_INSN) redirect_jump (p, target); changed = 1; continue; } }#ifndef HAVE_cc0 /* If we have if (...) x = exp; and branches are expensive, EXP is a single insn, does not have any side effects, cannot trap, and is not too costly, convert this to t = exp; if (...) x = t; Don't do this when we have CC0 because it is unlikely to help and we'd need to worry about where to place the new insn and the potential for conflicts. We also can't do this when we have notes on the insn for the same reason as above. We set: TEMP to the "x = exp;" insn. TEMP1 to the single set in the "x = exp; insn. TEMP2 to "x". */ if (! reload_completed && this_is_condjump && ! this_is_simplejump && BRANCH_COST >= 3 && (temp = next_nonnote_insn (insn)) != 0 && GET_CODE (temp) == INSN && REG_NOTES (temp) == 0 && (reallabelprev == temp || ((temp2 = next_active_insn (temp)) != 0 && simplejump_p (temp2) && JUMP_LABEL (temp2) == JUMP_LABEL (insn))) && (temp1 = single_set (temp)) != 0 && (temp2 = SET_DEST (temp1), GET_CODE (temp2) == REG) && GET_MODE_CLASS (GET_MODE (temp2)) == MODE_INT#ifdef SMALL_REGISTER_CLASSES && REGNO (temp2) >= FIRST_PSEUDO_REGISTER#endif && GET_CODE (SET_SRC (temp1)) != REG && GET_CODE (SET_SRC (temp1)) != SUBREG && GET_CODE (SET_SRC (temp1)) != CONST_INT && ! side_effects_p (SET_SRC (temp1)) && ! may_trap_p (SET_SRC (temp1)) && rtx_cost (SET_SRC (temp1)) < 10) { rtx new = gen_reg_rtx (GET_MODE (temp2)); if (validate_change (temp, &SET_DEST (temp1), new, 0)) { next = emit_insn_after (gen_move_insn (temp2, new), insn); emit_insn_after_with_line_notes (PATTERN (temp), PREV_INSN (insn), temp); delete_insn (temp); } } /* Similarly, if it takes two insns to compute EXP but they have the same destination. Here TEMP3 will be the second insn and TEMP4 the SET from that insn. */ if (! reload_completed && this_is_condjump && ! this_is_simplejump && BRANCH_COST >= 4 && (temp = next_nonnote_insn (insn)) != 0 && GET_CODE (temp) == INSN && REG_NOTES (temp) == 0 && (temp3 = next_nonnote_insn (temp)) != 0 && GET_CODE (temp3) == INSN && REG_NOTES (temp3) == 0 && (reallabelprev == temp3 || ((temp2 = next_active_insn (temp3)) != 0 && simplejump_p (temp2) && JUMP_LABEL (temp2) == JUMP_LABEL (insn))) && (temp1 = single_set (temp)) != 0 && (temp2 = SET_DEST (temp1), GET_CODE (temp2) == REG) && GET_MODE_CLASS (GET_MODE (temp2)) == MODE_INT#ifdef SMALL_REGISTER_CLASSES && REGNO (temp2) >= FIRST_PSEUDO_REGISTER#endif && ! side_effects_p (SET_SRC (temp1)) && ! may_trap_p (SET_SRC (temp1)) && rtx_cost (SET_SRC (temp1)) < 10 && (temp4 = single_set (temp3)) != 0 && rtx_equal_p (SET_DEST (temp4), temp2) && ! side_effects_p (SET_SRC (temp4)) && ! may_trap_p (SET_SRC (temp4)) && rtx_cost (SET_SRC (temp4)) < 10) { rtx new = gen_reg_rtx (GET_MODE (temp2)); if (validate_change (temp, &SET_DEST (temp1), new, 0)) { next = emit_insn_after (gen_move_insn (temp2, new), insn); emit_insn_after_with_line_notes (PATTERN (temp), PREV_INSN (insn), temp); emit_insn_after_with_line_notes (replace_rtx (PATTERN (temp3), temp2, new), PREV_INSN (insn), temp3); delete_insn (temp); delete_insn (temp3); } } /* Finally, handle the case where two insns are used to compute EXP but a temporary register is used. Here we must ensure that the temporary register is not used anywhere else. */ if (! reload_completed && after_regscan && this_is_condjump && ! this_is_simplejump && BRANCH_COST >= 4 && (temp = next_nonnote_insn (insn)) != 0 && GET_CODE (temp) == INSN && REG_NOTES (temp) == 0 && (temp3 = next_nonnote_insn (temp)) != 0 && GET_CODE (temp3) == INSN && REG_NOTES (temp3) == 0 && (reallabelprev == temp3 || ((temp2 = next_active_insn (temp3)) != 0 && simplejump_p (temp2) && JUMP_LABEL (temp2) == JUMP_LABEL (insn))) && (temp1 = single_set (temp)) != 0 && (temp5 = SET_DEST (temp1), GET_CODE (temp5) == REG) && REGNO (temp5) >= FIRST_PSEUDO_REGISTER && regno_first_uid[REGNO (temp5)] == INSN_UID (temp) && regno_last_uid[REGNO (temp5)] == INSN_UID (temp3) && ! side_effects_p (SET_SRC (temp1)) && ! may_trap_p (SET_SRC (temp1)) && rtx_cost (SET_SRC (temp1)) < 10 && (temp4 = single_set (temp3)) != 0 && (temp2 = SET_DEST (temp4), GET_CODE (temp2) == REG) && GET_MODE_CLASS (GET_MODE (temp2)) == MODE_INT#ifdef SMALL_REGISTER_CLASSES && REGNO (temp2) >= FIRST_PSEUDO_REGISTER#endif && rtx_equal_p (SET_DEST (temp4), temp2) && ! side_effects_p (SET_SRC (temp4)) && ! may_trap_p (SET_SRC (temp4)) && rtx_cost (SET_SRC (temp4)) < 10) { rtx new = gen_reg_rtx (GET_MODE (temp2)); if (validate_change (temp3, &SET_DEST (temp4), new, 0)) { next = emit_insn_after (gen_move_insn (temp2, new), insn); emit_insn_after_with_line_notes (PATTERN (temp), PREV_INSN (insn), temp); emit_insn_after_with_line_notes (PATTERN (temp3), PREV_INSN (insn), temp3); delete_insn (temp); delete_insn (temp3); } }#endif /* HAVE_cc0 */ /* We deal with four cases: 1) x = a; if (...) x = b; and either A or B is zero, 2) if (...) x = 0; and jumps are expensive, 3) x = a; if (...) x = b; and A and B are constants where all the set bits in A are also set in B and jumps are expensive, and 4) x = a; if (...) x = b; and A and B non-zero, and jumps are more expensive. 5) if (...) x = b; if jumps are even more expensive. In each of these try to use a store-flag insn to avoid the jump. (If the jump would be faster, the machine should not have defined the scc insns!). These cases are often made by the previous optimization. INSN here is the jump around the store. We set: TEMP to the "x = b;" insn. TEMP1 to X. TEMP2 to B (const0_rtx in the second case). TEMP3 to A (X in the second case). TEMP4 to the condition being tested. TEMP5 to the earliest insn used to find the condition. */ if (/* We can't do this after reload has completed. */ ! reload_completed && this_is_condjump && ! this_is_simplejump /* Set TEMP to the "x = b;" insn. */ && (temp = next_nonnote_insn (insn)) != 0 && GET_CODE (temp) == INSN && GET_CODE (PATTERN (temp)) == SET && GET_CODE (temp1 = SET_DEST (PATTERN (temp))) == REG#ifdef SMALL_REGISTER_CLASSES && REGNO (temp1) >= FIRST_PSEUDO_REGISTER#endif && GET_MODE_CLASS (GET_MODE (temp1)) == MODE_INT && (GET_CODE (temp2 = SET_SRC (PATTERN (temp))) == REG || GET_CODE (temp2) == SUBREG || GET_CODE (temp2) == CONST_INT) /* Allow either form, but prefer the former if both apply. There is no point in using the old value of TEMP1 if it is a register, since cse will alias them. It can lose if the old value were a hard register since CSE won't replace hard registers. */ && (((temp3 = reg_set_last (temp1, insn)) != 0 && GET_CODE (temp3) == CONST_INT) /* Make the latter case look like x = x; if (...) x = 0; */ || (temp3 = temp1, ((BRANCH_COST >= 2 && temp2 == const0_rtx) || BRANCH_COST >= 3))) /* INSN must either branch to the insn after TEMP or the insn after TEMP must branch to the same place as INSN. */ && (reallabelprev == temp || ((temp4 = next_active_insn (temp)) != 0 && simplejump_p (temp4) && JUMP_LABEL (temp4) == JUMP_LABEL (insn))) && (temp4 = get_condition (insn, &temp5)) != 0 /* We must be comparing objects whose modes imply the size. We could handle BLKmode if (1) emit_store_flag could and (2) we could find the size reliably. */ && GET_MODE (XEXP (temp4, 0)) != BLKmode /* If B is zero, OK; if A is zero, can only do (1) if we can reverse the condition. See if (3) applies possibly by reversing the condition. Prefer reversing to (4) when branches are very expensive. */ && ((reversep = 0, temp2 == const0_rtx) || (temp3 == const0_rtx && (reversep = can_reverse_comparison_p (temp4, insn))) || (BRANCH_COST >= 2 && GET_CODE (temp2) == CONST_INT && GET_CODE (temp3) == CONST_INT && ((INTVAL (temp2) & INTVAL (temp3)) == INTVAL (temp2) || ((INTVAL (temp2) & INTVAL (temp3)) == INTVAL (temp3) && (reversep = can_reverse_comparison_p (temp4, insn))))) || BRANCH_COST >= 3)#ifdef HAVE_cc0 /* If the previous insn sets CC0 and something else, we can't do this since we are going to delete that insn. */ && ! ((temp6 = prev_nonnote_insn (insn)) != 0 && GET_CODE (temp6) == INSN && (sets_cc0_p (PATTERN (temp6)) == -1 || (sets_cc0_p (PATTERN (temp6)) == 1 && FIND_REG_INC_NOTE (temp6, NULL_RTX))))#endif ) { enum rtx_code code = GET_CODE (temp4); rtx uval, cval, var = temp1; int normalizep; rtx target; /* If necessary, reverse the condition. */ if (reversep) code = reverse_condition (code), uval = temp2, cval = temp3; else uval = temp3, cval = temp2; /* See if we can do this with a store-flag insn. */ start_sequence (); /* If CVAL is non-zero, normalize to -1. Otherwise, if UVAL is the constant 1, it is best to just compute the result directly. If UVAL is constant and STORE_FLAG_VALUE includes all of its bits, it is best to compute the flag value unnormalized and `and' it with UVAL. Otherwise, normalize to -1 and `and' with UVAL. */ normalizep = (cval != const0_rtx ? -1 : (uval == const1_rtx ? 1 : (GET_CODE (uval) == CONST_INT && (INTVAL (uval) & ~STORE_FLAG_VALUE) == 0) ? 0 : -1)); /* We will be putting the store-flag insn immediately in front of the comparison that was originally being done, so we know all the variables in TEMP4 will be valid. However, this might be in front of the assignment of A to VAR. If it is, it would clobber the store-flag we will be emitting. Therefore, emit into a temporary which will be copied to VAR immediately after TEMP. */ target = emit_store_flag (gen_reg_rtx (GET_MODE (var)), code, XEXP (temp4, 0), XEXP (temp4, 1), VOIDmode, (code == LTU || code == LEU || code == GEU || code == GTU), normalizep); if (target) { rtx before = insn; rtx seq; /* Put the store-flag insns in front of the first insn used to compute the condition to ensure that we use the same values of them as the current comparison. However, the remainder of the insns we generate will be placed directly in front of the jump insn, in case any of the pseudos we use are modified earlier. */ seq = get_insns (); end_sequence (); emit_insns_before (seq, temp5); start_sequence (); /* Both CVAL and UVAL are non-zero. */ if (cval != const0_rtx && uval != const0_rtx) { rtx tem1, tem2; tem1 = expand_and (uval, target, NULL_RTX); if (GET_CODE (cval) == CONST_INT && GET_CODE (uval) == CONST_INT && (INTVAL (cval) & INTVAL (uval)) == INTVAL (cval)) tem2 = cval; else { tem2 = expand_unop (GET_MODE (var), one_cmpl_optab, target, NULL_RTX, 0); tem2 = expand_and (cval, tem2, (GET_CODE (tem2) == REG ? tem2 : 0)); } /* If we usually make new pseudos, do so here. This turns out to help machines that have conditional move insns. */ if (flag_expensive_optimizations) target = 0; target = expand_binop (GET_MODE (var), ior_optab, tem1, tem2, target, 1, OPTAB_WIDEN); } else if (normalizep != 1) target = expand_and (uval, target, (GET_CODE (target) == REG && ! preserve_subexpressions_p () ? target : NULL_RTX)); emit_move_insn (var, target); seq = get_insns (); end_sequence ();#ifdef HAVE_cc0
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -