📄 mt.c
字号:
rtx sl,sh; int move_high_first = 0; /* Assume no overlap. */ switch (GET_CODE (operands[0])) /* Dest. */ { case SUBREG: case REG: if ((GET_CODE (operands[1]) == REG || GET_CODE (operands[1]) == SUBREG) && true_regnum (operands[0]) <= true_regnum (operands[1])) move_high_first = 1; if (GET_CODE (operands[0]) == SUBREG) { dl = gen_rtx_SUBREG (nmode, SUBREG_REG (operands[0]), SUBREG_BYTE (operands[0]) + GET_MODE_SIZE (nmode)); dh = gen_rtx_SUBREG (nmode, SUBREG_REG (operands[0]), SUBREG_BYTE (operands[0])); } else if (GET_CODE (operands[0]) == REG && ! IS_PSEUDO_P (operands[0])) { int r = REGNO (operands[0]); dh = gen_rtx_REG (nmode, r); dl = gen_rtx_REG (nmode, r + HARD_REGNO_NREGS (r, nmode)); } else { dh = gen_rtx_SUBREG (nmode, operands[0], 0); dl = gen_rtx_SUBREG (nmode, operands[0], GET_MODE_SIZE (nmode)); } break; case MEM: switch (GET_CODE (XEXP (operands[0], 0))) { case POST_INC: case POST_DEC: gcc_unreachable (); default: dl = operand_subword (operands[0], GET_MODE_SIZE (nmode)/UNITS_PER_WORD, 0, omode); dh = operand_subword (operands[0], 0, 0, omode); } break; default: gcc_unreachable (); } switch (GET_CODE (operands[1])) { case REG: if (! IS_PSEUDO_P (operands[1])) { int r = REGNO (operands[1]); sh = gen_rtx_REG (nmode, r); sl = gen_rtx_REG (nmode, r + HARD_REGNO_NREGS (r, nmode)); } else { sh = gen_rtx_SUBREG (nmode, operands[1], 0); sl = gen_rtx_SUBREG (nmode, operands[1], GET_MODE_SIZE (nmode)); } break; case CONST_DOUBLE: if (operands[1] == const0_rtx) sh = sl = const0_rtx; else split_double (operands[1], & sh, & sl); break; case CONST_INT: if (operands[1] == const0_rtx) sh = sl = const0_rtx; else { int vl, vh; switch (nmode) { default: gcc_unreachable (); } sl = GEN_INT (vl); sh = GEN_INT (vh); } break; case SUBREG: sl = gen_rtx_SUBREG (nmode, SUBREG_REG (operands[1]), SUBREG_BYTE (operands[1]) + GET_MODE_SIZE (nmode)); sh = gen_rtx_SUBREG (nmode, SUBREG_REG (operands[1]), SUBREG_BYTE (operands[1])); break; case MEM: switch (GET_CODE (XEXP (operands[1], 0))) { case POST_DEC: case POST_INC: gcc_unreachable (); break; default: sl = operand_subword (operands[1], GET_MODE_SIZE (nmode)/UNITS_PER_WORD, 0, omode); sh = operand_subword (operands[1], 0, 0, omode); /* Check if the DF load is going to clobber the register used for the address, and if so make sure that is going to be the second move. */ if (GET_CODE (dl) == REG && true_regnum (dl) == true_regnum (XEXP (XEXP (sl, 0 ), 0))) move_high_first = 1; } break; default: gcc_unreachable (); } if (move_high_first) { operands[2] = dh; operands[3] = sh; operands[4] = dl; operands[5] = sl; } else { operands[2] = dl; operands[3] = sl; operands[4] = dh; operands[5] = sh; } return;}/* Implement TARGET_MUST_PASS_IN_STACK hook. */static boolmt_pass_in_stack (enum machine_mode mode ATTRIBUTE_UNUSED, tree type){ return (((type) != 0 && (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST || TREE_ADDRESSABLE (type))));}/* Structures to hold branch information during reorg. */typedef struct branch_info{ rtx insn; /* The branch insn. */ struct branch_info *next;} branch_info;typedef struct label_info{ rtx label; /* The label. */ branch_info *branches; /* branches to this label. */ struct label_info *next;} label_info;/* Chain of labels found in current function, used during reorg. */static label_info *mt_labels;/* If *X is a label, add INSN to the list of branches for that label. */static intmt_add_branches (rtx *x, void *insn){ if (GET_CODE (*x) == LABEL_REF) { branch_info *branch = xmalloc (sizeof (*branch)); rtx label = XEXP (*x, 0); label_info *info; for (info = mt_labels; info; info = info->next) if (info->label == label) break; if (!info) { info = xmalloc (sizeof (*info)); info->next = mt_labels; mt_labels = info; info->label = label; info->branches = NULL; } branch->next = info->branches; info->branches = branch; branch->insn = insn; } return 0;}/* If BRANCH has a filled delay slot, check if INSN is dependent upon it. If so, undo the delay slot fill. Returns the next insn, if we patch out the branch. Returns the branch insn, if we cannot patch out the branch (due to anti-dependency in the delay slot). In that case, the caller must insert nops at the branch target. */static rtxmt_check_delay_slot (rtx branch, rtx insn){ rtx slot; rtx tmp; rtx p; rtx jmp; gcc_assert (GET_CODE (PATTERN (branch)) == SEQUENCE); if (INSN_DELETED_P (branch)) return NULL_RTX; slot = XVECEXP (PATTERN (branch), 0, 1); tmp = PATTERN (insn); note_stores (PATTERN (slot), insn_dependent_p_1, &tmp); if (tmp) /* Not dependent. */ return NULL_RTX; /* Undo the delay slot. */ jmp = XVECEXP (PATTERN (branch), 0, 0); tmp = PATTERN (jmp); note_stores (PATTERN (slot), insn_dependent_p_1, &tmp); if (!tmp) /* Anti dependent. */ return branch; p = PREV_INSN (branch); NEXT_INSN (p) = slot; PREV_INSN (slot) = p; NEXT_INSN (slot) = jmp; PREV_INSN (jmp) = slot; NEXT_INSN (jmp) = branch; PREV_INSN (branch) = jmp; XVECEXP (PATTERN (branch), 0, 0) = NULL_RTX; XVECEXP (PATTERN (branch), 0, 1) = NULL_RTX; delete_insn (branch); return jmp;}/* Insert nops to satisfy pipeline constraints. We only deal with ms2 constraints here. Earlier CPUs are dealt with by inserting nops with final_prescan (but that can lead to inferior code, and is impractical with ms2's JAL hazard). ms2 dynamic constraints 1) a load and a following use must be separated by one insn 2) an insn and a following dependent call must be separated by two insns only arith insns are placed in delay slots so #1 cannot happen with a load in a delay slot. #2 can happen with an arith insn in the delay slot. */static voidmt_reorg_hazard (void){ rtx insn, next; /* Find all the branches */ for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) { rtx jmp; if (!INSN_P (insn)) continue; jmp = PATTERN (insn); if (GET_CODE (jmp) != SEQUENCE) /* If it's not got a filled delay slot, then it can't conflict. */ continue; jmp = XVECEXP (jmp, 0, 0); if (recog_memoized (jmp) == CODE_FOR_tablejump) for (jmp = XEXP (XEXP (XVECEXP (PATTERN (jmp), 0, 1), 0), 0); !JUMP_TABLE_DATA_P (jmp); jmp = NEXT_INSN (jmp)) continue; for_each_rtx (&PATTERN (jmp), mt_add_branches, insn); } /* Now scan for dependencies. */ for (insn = get_insns (); insn && !INSN_P (insn); insn = NEXT_INSN (insn)) continue; for (; insn; insn = next) { rtx jmp, tmp; enum attr_type attr; gcc_assert (INSN_P (insn) && !INSN_DELETED_P (insn)); for (next = NEXT_INSN (insn); next; next = NEXT_INSN (next)) { if (!INSN_P (next)) continue; if (GET_CODE (PATTERN (next)) != USE) break; } jmp = insn; if (GET_CODE (PATTERN (insn)) == SEQUENCE) jmp = XVECEXP (PATTERN (insn), 0, 0); attr = recog_memoized (jmp) >= 0 ? get_attr_type (jmp) : TYPE_UNKNOWN; if (next && attr == TYPE_LOAD) { /* A load. See if NEXT is dependent, and if so insert a nop. */ tmp = PATTERN (next); if (GET_CODE (tmp) == SEQUENCE) tmp = PATTERN (XVECEXP (tmp, 0, 0)); note_stores (PATTERN (insn), insn_dependent_p_1, &tmp); if (!tmp) emit_insn_after (gen_nop (), insn); } if (attr == TYPE_CALL) { /* A call. Make sure we're not dependent on either of the previous two dynamic instructions. */ int nops = 0; int count; rtx prev = insn; rtx rescan = NULL_RTX; for (count = 2; count && !nops;) { int type; prev = PREV_INSN (prev); if (!prev) { /* If we reach the start of the function, we must presume the caller set the address in the delay slot of the call instruction. */ nops = count; break; } if (BARRIER_P (prev)) break; if (LABEL_P (prev)) { /* Look at branches to this label. */ label_info *label; branch_info *branch; for (label = mt_labels; label; label = label->next) if (label->label == prev) { for (branch = label->branches; branch; branch = branch->next) { tmp = mt_check_delay_slot (branch->insn, jmp); if (tmp == branch->insn) { nops = count; break; } if (tmp && branch->insn == next) rescan = tmp; } break; } continue; } if (!INSN_P (prev) || GET_CODE (PATTERN (prev)) == USE) continue; if (GET_CODE (PATTERN (prev)) == SEQUENCE) { /* Look at the delay slot. */ tmp = mt_check_delay_slot (prev, jmp); if (tmp == prev) nops = count; break; } type = (INSN_CODE (prev) >= 0 ? get_attr_type (prev) : TYPE_COMPLEX); if (type == TYPE_CALL || type == TYPE_BRANCH) break; if (type == TYPE_LOAD || type == TYPE_ARITH || type == TYPE_COMPLEX) { tmp = PATTERN (jmp); note_stores (PATTERN (prev), insn_dependent_p_1, &tmp); if (!tmp) { nops = count; break; } } if (INSN_CODE (prev) >= 0) count--; } if (rescan) for (next = NEXT_INSN (rescan); next && !INSN_P (next); next = NEXT_INSN (next)) continue; while (nops--) emit_insn_before (gen_nop (), insn); } } /* Free the data structures. */ while (mt_labels) { label_info *label = mt_labels; branch_info *branch, *next; mt_labels = label->next; for (branch = label->branches; branch; branch = next) { next = branch->next; free (branch); } free (label); }}/* Fixup the looping instructions, do delayed branch scheduling, fixup scheduling hazards. */static voidmt_machine_reorg (void){ if (mt_flag_delayed_branch) dbr_schedule (get_insns (), dump_file); if (TARGET_MS2) { /* Force all instructions to be split into their final form. */ split_all_insns_noflow (); mt_reorg_hazard (); }}/* Initialize the GCC target structure. */const struct attribute_spec mt_attribute_table[];#undef TARGET_ATTRIBUTE_TABLE#define TARGET_ATTRIBUTE_TABLE mt_attribute_table#undef TARGET_STRUCT_VALUE_RTX#define TARGET_STRUCT_VALUE_RTX mt_struct_value_rtx#undef TARGET_PROMOTE_PROTOTYPES#define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true#undef TARGET_PASS_BY_REFERENCE#define TARGET_PASS_BY_REFERENCE mt_pass_by_reference#undef TARGET_MUST_PASS_IN_STACK#define TARGET_MUST_PASS_IN_STACK mt_pass_in_stack#undef TARGET_ARG_PARTIAL_BYTES#define TARGET_ARG_PARTIAL_BYTES mt_arg_partial_bytes#undef TARGET_SETUP_INCOMING_VARARGS#define TARGET_SETUP_INCOMING_VARARGS mt_setup_incoming_varargs#undef TARGET_MACHINE_DEPENDENT_REORG#define TARGET_MACHINE_DEPENDENT_REORG mt_machine_reorgstruct gcc_target targetm = TARGET_INITIALIZER;#include "gt-mt.h"
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -