📄 sched.c
字号:
for (link = LOG_LINKS (prev); link; link = XEXP (link, 1)) add_dependence (insn, XEXP (link, 0), GET_MODE (link)); return; }#endif case REG: { int regno = REGNO (x); if (regno < FIRST_PSEUDO_REGISTER) { int i; i = HARD_REGNO_NREGS (regno, GET_MODE (x)); while (--i >= 0) { reg_last_uses[regno + i] = gen_rtx (INSN_LIST, VOIDmode, insn, reg_last_uses[regno + i]); if (reg_last_sets[regno + i]) add_dependence (insn, reg_last_sets[regno + i], 0); if ((call_used_regs[regno + i] || global_regs[regno + i]) && last_function_call) /* Function calls clobber all call_used regs. */ add_dependence (insn, last_function_call, REG_DEP_ANTI); } } else { reg_last_uses[regno] = gen_rtx (INSN_LIST, VOIDmode, insn, reg_last_uses[regno]); if (reg_last_sets[regno]) add_dependence (insn, reg_last_sets[regno], 0); /* If the register does not already cross any calls, then add this insn to the sched_before_next_call list so that it will still not cross calls after scheduling. */ if (reg_n_calls_crossed[regno] == 0) add_dependence (sched_before_next_call, insn, REG_DEP_ANTI); } return; } case MEM: { /* Reading memory. */ rtx pending, pending_mem; pending = pending_read_insns; pending_mem = pending_read_mems; while (pending) { /* If a dependency already exists, don't create a new one. */ if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn))) if (read_dependence (XEXP (pending_mem, 0), x)) add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI); pending = XEXP (pending, 1); pending_mem = XEXP (pending_mem, 1); } pending = pending_write_insns; pending_mem = pending_write_mems; while (pending) { /* If a dependency already exists, don't create a new one. */ if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn))) if (true_dependence (XEXP (pending_mem, 0), x)) add_dependence (insn, XEXP (pending, 0), 0); pending = XEXP (pending, 1); pending_mem = XEXP (pending_mem, 1); } if (last_pending_memory_flush) add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI); /* Always add these dependencies to pending_reads, since this insn may be followed by a write. */ add_insn_mem_dependence (&pending_read_insns, &pending_read_mems, insn, x); /* Take advantage of tail recursion here. */ sched_analyze_2 (XEXP (x, 0), insn); return; } case ASM_OPERANDS: case ASM_INPUT: case UNSPEC_VOLATILE: case TRAP_IF: { rtx u; /* Traditional and volatile asm instructions must be considered to use and clobber all hard registers and all of memory. So must TRAP_IF and UNSPEC_VOLATILE operations. */ if (code != ASM_OPERANDS || MEM_VOLATILE_P (x)) { for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) { for (u = reg_last_uses[i]; u; u = XEXP (u, 1)) if (GET_CODE (PATTERN (XEXP (u, 0))) != USE) add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI); reg_last_uses[i] = 0; if (reg_last_sets[i] && GET_CODE (PATTERN (reg_last_sets[i])) != USE) add_dependence (insn, reg_last_sets[i], 0); reg_last_sets[i] = insn; } flush_pending_lists (insn); } /* For all ASM_OPERANDS, we must traverse the vector of input operands. We can not just fall through here since then we would be confused by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate traditional asms unlike their normal usage. */ if (code == ASM_OPERANDS) { for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++) sched_analyze_2 (ASM_OPERANDS_INPUT (x, j), insn); return; } break; } case PRE_DEC: case POST_DEC: case PRE_INC: case POST_INC: /* These both read and modify the result. We must handle them as writes to get proper dependencies for following instructions. We must handle them as reads to get proper dependencies from this to previous instructions. Thus we need to pass them to both sched_analyze_1 and sched_analyze_2. We must call sched_analyze_2 first in order to get the proper antecedent for the read. */ sched_analyze_2 (XEXP (x, 0), insn); sched_analyze_1 (x, insn); return; } /* Other cases: walk the insn. */ fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e') sched_analyze_2 (XEXP (x, i), insn); else if (fmt[i] == 'E') for (j = 0; j < XVECLEN (x, i); j++) sched_analyze_2 (XVECEXP (x, i, j), insn); }}/* Analyze an INSN with pattern X to find all dependencies. */static voidsched_analyze_insn (x, insn) rtx x, insn;{ register RTX_CODE code = GET_CODE (x); rtx link; if (code == SET || code == CLOBBER) sched_analyze_1 (x, insn); else if (code == PARALLEL) { register int i; for (i = XVECLEN (x, 0) - 1; i >= 0; i--) { code = GET_CODE (XVECEXP (x, 0, i)); if (code == SET || code == CLOBBER) sched_analyze_1 (XVECEXP (x, 0, i), insn); else sched_analyze_2 (XVECEXP (x, 0, i), insn); } } else sched_analyze_2 (x, insn); /* Handle function calls. */ if (GET_CODE (insn) == CALL_INSN) { rtx dep_insn; rtx prev_dep_insn; /* When scheduling instructions, we make sure calls don't lose their accompanying USE insns by depending them one on another in order. */ prev_dep_insn = insn; dep_insn = PREV_INSN (insn); while (GET_CODE (dep_insn) == INSN && GET_CODE (PATTERN (dep_insn)) == USE) { SCHED_GROUP_P (prev_dep_insn) = 1; /* Make a copy of all dependencies on dep_insn, and add to insn. This is so that all of the dependencies will apply to the group. */ for (link = LOG_LINKS (dep_insn); link; link = XEXP (link, 1)) add_dependence (insn, XEXP (link, 0), GET_MODE (link)); prev_dep_insn = dep_insn; dep_insn = PREV_INSN (dep_insn); } }}/* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS for every dependency. */static intsched_analyze (head, tail) rtx head, tail;{ register rtx insn; register int n_insns = 0; register rtx u; register int luid = 0; for (insn = head; ; insn = NEXT_INSN (insn)) { INSN_LUID (insn) = luid++; if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN) { sched_analyze_insn (PATTERN (insn), insn); n_insns += 1; } else if (GET_CODE (insn) == CALL_INSN) { rtx dest = 0; rtx x; register int i; /* Any instruction using a hard register which may get clobbered by a call needs to be marked as dependent on this call. This prevents a use of a hard return reg from being moved past a void call (i.e. it does not explicitly set the hard return reg). */ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) if (call_used_regs[i] || global_regs[i]) { for (u = reg_last_uses[i]; u; u = XEXP (u, 1)) if (GET_CODE (PATTERN (XEXP (u, 0))) != USE) add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI); reg_last_uses[i] = 0; if (reg_last_sets[i] && GET_CODE (PATTERN (reg_last_sets[i])) != USE) add_dependence (insn, reg_last_sets[i], REG_DEP_ANTI); reg_last_sets[i] = insn; /* Insn, being a CALL_INSN, magically depends on `last_function_call' already. */ } /* For each insn which shouldn't cross a call, add a dependence between that insn and this call insn. */ x = LOG_LINKS (sched_before_next_call); while (x) { add_dependence (insn, XEXP (x, 0), REG_DEP_ANTI); x = XEXP (x, 1); } LOG_LINKS (sched_before_next_call) = 0; sched_analyze_insn (PATTERN (insn), insn); /* We don't need to flush memory for a function call which does not involve memory. */ if (! CONST_CALL_P (insn)) { /* In the absence of interprocedural alias analysis, we must flush all pending reads and writes, and start new dependencies starting from here. */ flush_pending_lists (insn); } /* Depend this function call (actually, the user of this function call) on all hard register clobberage. */ last_function_call = insn; n_insns += 1; } if (insn == tail) return n_insns; }}/* Called when we see a set of a register. If death is true, then we are scanning backwards. Mark that register as unborn. If nobody says otherwise, that is how things will remain. If death is false, then we are scanning forwards. Mark that register as being born. */static voidsched_note_set (b, x, death) int b; rtx x; int death;{ register int regno, j; register rtx reg = SET_DEST (x); int subreg_p = 0; if (reg == 0) return; while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == STRICT_LOW_PART || GET_CODE (reg) == SIGN_EXTRACT || GET_CODE (reg) == ZERO_EXTRACT) { /* Must treat modification of just one hardware register of a multi-reg value or just a byte field of a register exactly the same way that mark_set_1 in flow.c does, i.e. anything except a paradoxical subreg does not kill the entire register. */ if (GET_CODE (reg) != SUBREG || REG_SIZE (SUBREG_REG (reg)) > REG_SIZE (reg)) subreg_p = 1; reg = SUBREG_REG (reg); } if (GET_CODE (reg) != REG) return; /* Global registers are always live, so the code below does not apply to them. */ regno = REGNO (reg); if (regno >= FIRST_PSEUDO_REGISTER || ! global_regs[regno]) { register int offset = regno / REGSET_ELT_BITS; register REGSET_ELT_TYPE bit = (REGSET_ELT_TYPE) 1 << (regno % REGSET_ELT_BITS); if (death) { /* If we only set part of the register, then this set does not kill it. */ if (subreg_p) return; /* Try killing this register. */ if (regno < FIRST_PSEUDO_REGISTER) { int j = HARD_REGNO_NREGS (regno, GET_MODE (reg)); while (--j >= 0) { offset = (regno + j) / REGSET_ELT_BITS; bit = (REGSET_ELT_TYPE) 1 << ((regno + j) % REGSET_ELT_BITS); bb_live_regs[offset] &= ~bit; bb_dead_regs[offset] |= bit; } } else { bb_live_regs[offset] &= ~bit; bb_dead_regs[offset] |= bit; } } else { /* Make the register live again. */ if (regno < FIRST_PSEUDO_REGISTER) { int j = HARD_REGNO_NREGS (regno, GET_MODE (reg)); while (--j >= 0) { offset = (regno + j) / REGSET_ELT_BITS; bit = (REGSET_ELT_TYPE) 1 << ((regno + j) % REGSET_ELT_BITS); bb_live_regs[offset] |= bit; bb_dead_regs[offset] &= ~bit; } } else { bb_live_regs[offset] |= bit; bb_dead_regs[offset] &= ~bit; } } }}/* Macros and functions for keeping the priority queue sorted, and dealing with queueing and unqueueing of instructions. */#define SCHED_SORT(READY, NEW_READY, OLD_READY) \ do { if ((NEW_READY) - (OLD_READY) == 1) \ swap_sort (READY, NEW_READY); \ else if ((NEW_READY) - (OLD_READY) > 1) \ qsort (READY, NEW_READY, sizeof (rtx), rank_for_schedule); } \ while (0)/* Returns a positive value if y is preferred; returns a negative value if x is preferred. Should never return 0, since that will make the sort unstable. */static intrank_for_schedule (x, y) rtx *x, *y;{ rtx tmp = *y; rtx tmp2 = *x; rtx link; int tmp_class, tmp2_class; int value; /* Choose the instruction with the highest priority, if different. */ if (value = INSN_PRIORITY (tmp) - INSN_PRIORITY (tmp2)) return value; if (last_scheduled_insn) { /* Classify the instructions into three classes: 1) Data dependent on last schedule insn. 2) Anti/Output dependent on last scheduled insn. 3) Independent of last scheduled insn, or has latency of one. Choose the insn from the highest numbered class if different. */ link = find_insn_list (tmp, LOG_LINKS (last_scheduled_insn)); if (link == 0 || insn_cost (tmp, link, last_scheduled_insn) == 1) tmp_class = 3; else if (REG_NOTE_KIND (link) == 0) /* Data dependence. */ tmp_class = 1;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -