📄 sched.c
字号:
static voidsched_analyze_1 (x, insn) rtx x; rtx insn;{ register int regno; register rtx dest = SET_DEST (x); if (dest == 0) return; while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT) { if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT) { /* The second and third arguments are values read by this insn. */ sched_analyze_2 (XEXP (dest, 1), insn); sched_analyze_2 (XEXP (dest, 2), insn); } dest = SUBREG_REG (dest); } if (GET_CODE (dest) == REG) { register int i; regno = REGNO (dest); /* A hard reg in a wide mode may really be multiple registers. If so, mark all of them just like the first. */ if (regno < FIRST_PSEUDO_REGISTER) { i = HARD_REGNO_NREGS (regno, GET_MODE (dest)); while (--i >= 0) { rtx u; for (u = reg_last_uses[regno+i]; u; u = XEXP (u, 1)) add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI); reg_last_uses[regno + i] = 0; if (reg_last_sets[regno + i]) add_dependence (insn, reg_last_sets[regno + i], REG_DEP_OUTPUT); reg_pending_sets[(regno + i) / REGSET_ELT_BITS] |= (REGSET_ELT_TYPE) 1 << ((regno + i) % REGSET_ELT_BITS); if ((call_used_regs[i] || global_regs[i]) && last_function_call) /* Function calls clobber all call_used regs. */ add_dependence (insn, last_function_call, REG_DEP_ANTI); } } else { rtx u; for (u = reg_last_uses[regno]; u; u = XEXP (u, 1)) add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI); reg_last_uses[regno] = 0; if (reg_last_sets[regno]) add_dependence (insn, reg_last_sets[regno], REG_DEP_OUTPUT); reg_pending_sets[regno / REGSET_ELT_BITS] |= (REGSET_ELT_TYPE) 1 << (regno % REGSET_ELT_BITS); /* Pseudos that are REG_EQUIV to something may be replaced by that during reloading. We need only add dependencies for the address in the REG_EQUIV note. */ if (! reload_completed && reg_known_equiv_p[regno] && GET_CODE (reg_known_value[regno]) == MEM) sched_analyze_2 (XEXP (reg_known_value[regno], 0), insn); /* Don't let it cross a call after scheduling if it doesn't already cross one. */ if (reg_n_calls_crossed[regno] == 0 && last_function_call) add_dependence (insn, last_function_call, REG_DEP_ANTI); } } else if (GET_CODE (dest) == MEM) { /* Writing memory. */ if (pending_lists_length > 32) { /* Flush all pending reads and writes to prevent the pending lists from getting any larger. Insn scheduling runs too slowly when these lists get long. The number 32 was chosen because it seems like a reasonable number. When compiling GCC with itself, this flush occurs 8 times for sparc, and 10 times for m88k using the number 32. */ flush_pending_lists (insn); } else { rtx pending, pending_mem; pending = pending_read_insns; pending_mem = pending_read_mems; while (pending) { /* If a dependency already exists, don't create a new one. */ if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn))) if (anti_dependence (XEXP (pending_mem, 0), dest)) add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI); pending = XEXP (pending, 1); pending_mem = XEXP (pending_mem, 1); } pending = pending_write_insns; pending_mem = pending_write_mems; while (pending) { /* If a dependency already exists, don't create a new one. */ if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn))) if (output_dependence (XEXP (pending_mem, 0), dest)) add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT); pending = XEXP (pending, 1); pending_mem = XEXP (pending_mem, 1); } if (last_pending_memory_flush) add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI); add_insn_mem_dependence (&pending_write_insns, &pending_write_mems, insn, dest); } sched_analyze_2 (XEXP (dest, 0), insn); } /* Analyze reads. */ if (GET_CODE (x) == SET) sched_analyze_2 (SET_SRC (x), insn);}/* Analyze the uses of memory and registers in rtx X in INSN. */static voidsched_analyze_2 (x, insn) rtx x; rtx insn;{ register int i; register int j; register enum rtx_code code; register char *fmt; if (x == 0) return; code = GET_CODE (x); switch (code) { case CONST_INT: case CONST_DOUBLE: case SYMBOL_REF: case CONST: case LABEL_REF: /* Ignore constants. Note that we must handle CONST_DOUBLE here because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but this does not mean that this insn is using cc0. */ return;#ifdef HAVE_cc0 case CC0: { rtx link, prev; /* There may be a note before this insn now, but all notes will be removed before we actually try to schedule the insns, so it won't cause a problem later. We must avoid it here though. */ /* User of CC0 depends on immediately preceding insn. */ SCHED_GROUP_P (insn) = 1; /* Make a copy of all dependencies on the immediately previous insn, and add to this insn. This is so that all the dependencies will apply to the group. Remove an explicit dependence on this insn as SCHED_GROUP_P now represents it. */ prev = PREV_INSN (insn); while (GET_CODE (prev) == NOTE) prev = PREV_INSN (prev); if (find_insn_list (prev, LOG_LINKS (insn))) remove_dependence (insn, prev); for (link = LOG_LINKS (prev); link; link = XEXP (link, 1)) add_dependence (insn, XEXP (link, 0), REG_NOTE_KIND (link)); return; }#endif case REG: { int regno = REGNO (x); if (regno < FIRST_PSEUDO_REGISTER) { int i; i = HARD_REGNO_NREGS (regno, GET_MODE (x)); while (--i >= 0) { reg_last_uses[regno + i] = gen_rtx (INSN_LIST, VOIDmode, insn, reg_last_uses[regno + i]); if (reg_last_sets[regno + i]) add_dependence (insn, reg_last_sets[regno + i], 0); if ((call_used_regs[regno + i] || global_regs[regno + i]) && last_function_call) /* Function calls clobber all call_used regs. */ add_dependence (insn, last_function_call, REG_DEP_ANTI); } } else { reg_last_uses[regno] = gen_rtx (INSN_LIST, VOIDmode, insn, reg_last_uses[regno]); if (reg_last_sets[regno]) add_dependence (insn, reg_last_sets[regno], 0); /* Pseudos that are REG_EQUIV to something may be replaced by that during reloading. We need only add dependencies for the address in the REG_EQUIV note. */ if (! reload_completed && reg_known_equiv_p[regno] && GET_CODE (reg_known_value[regno]) == MEM) sched_analyze_2 (XEXP (reg_known_value[regno], 0), insn); /* If the register does not already cross any calls, then add this insn to the sched_before_next_call list so that it will still not cross calls after scheduling. */ if (reg_n_calls_crossed[regno] == 0) add_dependence (sched_before_next_call, insn, REG_DEP_ANTI); } return; } case MEM: { /* Reading memory. */ rtx pending, pending_mem; pending = pending_read_insns; pending_mem = pending_read_mems; while (pending) { /* If a dependency already exists, don't create a new one. */ if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn))) if (read_dependence (XEXP (pending_mem, 0), x)) add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI); pending = XEXP (pending, 1); pending_mem = XEXP (pending_mem, 1); } pending = pending_write_insns; pending_mem = pending_write_mems; while (pending) { /* If a dependency already exists, don't create a new one. */ if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn))) if (true_dependence (XEXP (pending_mem, 0), x)) add_dependence (insn, XEXP (pending, 0), 0); pending = XEXP (pending, 1); pending_mem = XEXP (pending_mem, 1); } if (last_pending_memory_flush) add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI); /* Always add these dependencies to pending_reads, since this insn may be followed by a write. */ add_insn_mem_dependence (&pending_read_insns, &pending_read_mems, insn, x); /* Take advantage of tail recursion here. */ sched_analyze_2 (XEXP (x, 0), insn); return; } case ASM_OPERANDS: case ASM_INPUT: case UNSPEC_VOLATILE: case TRAP_IF: { rtx u; /* Traditional and volatile asm instructions must be considered to use and clobber all hard registers, all pseudo-registers and all of memory. So must TRAP_IF and UNSPEC_VOLATILE operations. Consider for instance a volatile asm that changes the fpu rounding mode. An insn should not be moved across this even if it only uses pseudo-regs because it might give an incorrectly rounded result. */ if (code != ASM_OPERANDS || MEM_VOLATILE_P (x)) { int max_reg = max_reg_num (); for (i = 0; i < max_reg; i++) { for (u = reg_last_uses[i]; u; u = XEXP (u, 1)) add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI); reg_last_uses[i] = 0; if (reg_last_sets[i]) add_dependence (insn, reg_last_sets[i], 0); } reg_pending_sets_all = 1; flush_pending_lists (insn); } /* For all ASM_OPERANDS, we must traverse the vector of input operands. We can not just fall through here since then we would be confused by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate traditional asms unlike their normal usage. */ if (code == ASM_OPERANDS) { for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++) sched_analyze_2 (ASM_OPERANDS_INPUT (x, j), insn); return; } break; } case PRE_DEC: case POST_DEC: case PRE_INC: case POST_INC: /* These both read and modify the result. We must handle them as writes to get proper dependencies for following instructions. We must handle them as reads to get proper dependencies from this to previous instructions. Thus we need to pass them to both sched_analyze_1 and sched_analyze_2. We must call sched_analyze_2 first in order to get the proper antecedent for the read. */ sched_analyze_2 (XEXP (x, 0), insn); sched_analyze_1 (x, insn); return; } /* Other cases: walk the insn. */ fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'e') sched_analyze_2 (XEXP (x, i), insn); else if (fmt[i] == 'E') for (j = 0; j < XVECLEN (x, i); j++) sched_analyze_2 (XVECEXP (x, i, j), insn); }}/* Analyze an INSN with pattern X to find all dependencies. */static voidsched_analyze_insn (x, insn, loop_notes) rtx x, insn; rtx loop_notes;{ register RTX_CODE code = GET_CODE (x); rtx link; int maxreg = max_reg_num (); int i; if (code == SET || code == CLOBBER) sched_analyze_1 (x, insn); else if (code == PARALLEL) { register int i; for (i = XVECLEN (x, 0) - 1; i >= 0; i--) { code = GET_CODE (XVECEXP (x, 0, i)); if (code == SET || code == CLOBBER) sched_analyze_1 (XVECEXP (x, 0, i), insn); else sched_analyze_2 (XVECEXP (x, 0, i), insn); } } else sched_analyze_2 (x, insn); /* Mark registers CLOBBERED or used by called function. */ if (GET_CODE (insn) == CALL_INSN) for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1)) { if (GET_CODE (XEXP (link, 0)) == CLOBBER) sched_analyze_1 (XEXP (link, 0), insn); else sched_analyze_2 (XEXP (link, 0), insn); } /* If there is a LOOP_{BEG,END} note in the middle of a basic block, then we must be sure that no instructions are scheduled across it. Otherwise, the reg_n_refs info (which depends on loop_depth) would become incorrect. */ if (loop_notes) { int max_reg = max_reg_num (); rtx link; for (i = 0; i < max_reg; i++) { rtx u; for (u = reg_last_uses[i]; u; u = XEXP (u, 1)) add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI); reg_last_uses[i] = 0; if (reg_last_sets[i]) add_dependence (insn, reg_last
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -