📄 sched.c
字号:
int ncost = cost; ADJUST_COST (used, link, insn, ncost); if (ncost <= 1) LINK_COST_FREE (link) = ncost = 1; if (cost == ncost) LINK_COST_ZERO (link) = 1; cost = ncost; }#endif return cost;}/* Compute the priority number for INSN. */static intpriority (insn) rtx insn;{ if (insn && GET_RTX_CLASS (GET_CODE (insn)) == 'i') { int prev_priority; int max_priority; int this_priority = INSN_PRIORITY (insn); rtx prev; if (this_priority > 0) return this_priority; max_priority = 1; /* Nonzero if these insns must be scheduled together. */ if (SCHED_GROUP_P (insn)) { prev = insn; while (SCHED_GROUP_P (prev)) { prev = PREV_INSN (prev); INSN_REF_COUNT (prev) += 1; } } for (prev = LOG_LINKS (insn); prev; prev = XEXP (prev, 1)) { rtx x = XEXP (prev, 0); /* A dependence pointing to a note is always obsolete, because sched_analyze_insn will have created any necessary new dependences which replace it. Notes can be created when instructions are deleted by insn splitting, or by register allocation. */ if (GET_CODE (x) == NOTE) { remove_dependence (insn, x); continue; } /* Clear the link cost adjustment bits. */ LINK_COST_FREE (prev) = 0;#ifdef ADJUST_COST LINK_COST_ZERO (prev) = 0;#endif /* This priority calculation was chosen because it results in the least instruction movement, and does not hurt the performance of the resulting code compared to the old algorithm. This makes the sched algorithm more stable, which results in better code, because there is less register pressure, cross jumping is more likely to work, and debugging is easier. When all instructions have a latency of 1, there is no need to move any instructions. Subtracting one here ensures that in such cases all instructions will end up with a priority of one, and hence no scheduling will be done. The original code did not subtract the one, and added the insn_cost of the current instruction to its priority (e.g. move the insn_cost call down to the end). */ if (REG_NOTE_KIND (prev) == 0) /* Data dependence. */ prev_priority = priority (x) + insn_cost (x, prev, insn) - 1; else /* Anti or output dependence. Don't add the latency of this insn's result, because it isn't being used. */ prev_priority = priority (x); if (prev_priority > max_priority) max_priority = prev_priority; INSN_REF_COUNT (x) += 1; } prepare_unit (insn_unit (insn)); INSN_PRIORITY (insn) = max_priority; return INSN_PRIORITY (insn); } return 0;}/* Remove all INSN_LISTs and EXPR_LISTs from the pending lists and add them to the unused_*_list variables, so that they can be reused. */static voidfree_pending_lists (){ register rtx link, prev_link; if (pending_read_insns) { prev_link = pending_read_insns; link = XEXP (prev_link, 1); while (link) { prev_link = link; link = XEXP (link, 1); } XEXP (prev_link, 1) = unused_insn_list; unused_insn_list = pending_read_insns; pending_read_insns = 0; } if (pending_write_insns) { prev_link = pending_write_insns; link = XEXP (prev_link, 1); while (link) { prev_link = link; link = XEXP (link, 1); } XEXP (prev_link, 1) = unused_insn_list; unused_insn_list = pending_write_insns; pending_write_insns = 0; } if (pending_read_mems) { prev_link = pending_read_mems; link = XEXP (prev_link, 1); while (link) { prev_link = link; link = XEXP (link, 1); } XEXP (prev_link, 1) = unused_expr_list; unused_expr_list = pending_read_mems; pending_read_mems = 0; } if (pending_write_mems) { prev_link = pending_write_mems; link = XEXP (prev_link, 1); while (link) { prev_link = link; link = XEXP (link, 1); } XEXP (prev_link, 1) = unused_expr_list; unused_expr_list = pending_write_mems; pending_write_mems = 0; }}/* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST. The MEM is a memory reference contained within INSN, which we are saving so that we can do memory aliasing on it. */static voidadd_insn_mem_dependence (insn_list, mem_list, insn, mem) rtx *insn_list, *mem_list, insn, mem;{ register rtx link; if (unused_insn_list) { link = unused_insn_list; unused_insn_list = XEXP (link, 1); } else link = rtx_alloc (INSN_LIST); XEXP (link, 0) = insn; XEXP (link, 1) = *insn_list; *insn_list = link; if (unused_expr_list) { link = unused_expr_list; unused_expr_list = XEXP (link, 1); } else link = rtx_alloc (EXPR_LIST); XEXP (link, 0) = mem; XEXP (link, 1) = *mem_list; *mem_list = link; pending_lists_length++;}/* Make a dependency between every memory reference on the pending lists and INSN, thus flushing the pending lists. */static voidflush_pending_lists (insn) rtx insn;{ rtx link; while (pending_read_insns) { add_dependence (insn, XEXP (pending_read_insns, 0), REG_DEP_ANTI); link = pending_read_insns; pending_read_insns = XEXP (pending_read_insns, 1); XEXP (link, 1) = unused_insn_list; unused_insn_list = link; link = pending_read_mems; pending_read_mems = XEXP (pending_read_mems, 1); XEXP (link, 1) = unused_expr_list; unused_expr_list = link; } while (pending_write_insns) { add_dependence (insn, XEXP (pending_write_insns, 0), REG_DEP_ANTI); link = pending_write_insns; pending_write_insns = XEXP (pending_write_insns, 1); XEXP (link, 1) = unused_insn_list; unused_insn_list = link; link = pending_write_mems; pending_write_mems = XEXP (pending_write_mems, 1); XEXP (link, 1) = unused_expr_list; unused_expr_list = link; } pending_lists_length = 0; if (last_pending_memory_flush) add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI); last_pending_memory_flush = insn;}/* Analyze a single SET or CLOBBER rtx, X, creating all dependencies generated by the write to the destination of X, and reads of everything mentioned. */static voidsched_analyze_1 (x, insn) rtx x; rtx insn;{ register int regno; register rtx dest = SET_DEST (x); if (dest == 0) return; while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT) { if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT) { /* The second and third arguments are values read by this insn. */ sched_analyze_2 (XEXP (dest, 1), insn); sched_analyze_2 (XEXP (dest, 2), insn); } dest = SUBREG_REG (dest); } if (GET_CODE (dest) == REG) { register int offset, bit, i; regno = REGNO (dest); /* A hard reg in a wide mode may really be multiple registers. If so, mark all of them just like the first. */ if (regno < FIRST_PSEUDO_REGISTER) { i = HARD_REGNO_NREGS (regno, GET_MODE (dest)); while (--i >= 0) { rtx u; for (u = reg_last_uses[regno+i]; u; u = XEXP (u, 1)) add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI); reg_last_uses[regno + i] = 0; if (reg_last_sets[regno + i]) add_dependence (insn, reg_last_sets[regno + i], REG_DEP_OUTPUT); reg_last_sets[regno + i] = insn; if ((call_used_regs[i] || global_regs[i]) && last_function_call) /* Function calls clobber all call_used regs. */ add_dependence (insn, last_function_call, REG_DEP_ANTI); } } else { rtx u; for (u = reg_last_uses[regno]; u; u = XEXP (u, 1)) add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI); reg_last_uses[regno] = 0; if (reg_last_sets[regno]) add_dependence (insn, reg_last_sets[regno], REG_DEP_OUTPUT); reg_last_sets[regno] = insn; /* Don't let it cross a call after scheduling if it doesn't already cross one. */ if (reg_n_calls_crossed[regno] == 0 && last_function_call) add_dependence (insn, last_function_call, REG_DEP_ANTI); } } else if (GET_CODE (dest) == MEM) { /* Writing memory. */ if (pending_lists_length > 32) { /* Flush all pending reads and writes to prevent the pending lists from getting any larger. Insn scheduling runs too slowly when these lists get long. The number 32 was chosen because it seems like a reasonable number. When compiling GCC with itself, this flush occurs 8 times for sparc, and 10 times for m88k using the number 32. */ flush_pending_lists (insn); } else { rtx pending, pending_mem; pending = pending_read_insns; pending_mem = pending_read_mems; while (pending) { /* If a dependency already exists, don't create a new one. */ if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn))) if (anti_dependence (XEXP (pending_mem, 0), dest, insn)) add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI); pending = XEXP (pending, 1); pending_mem = XEXP (pending_mem, 1); } pending = pending_write_insns; pending_mem = pending_write_mems; while (pending) { /* If a dependency already exists, don't create a new one. */ if (! find_insn_list (XEXP (pending, 0), LOG_LINKS (insn))) if (output_dependence (XEXP (pending_mem, 0), dest)) add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT); pending = XEXP (pending, 1); pending_mem = XEXP (pending_mem, 1); } if (last_pending_memory_flush) add_dependence (insn, last_pending_memory_flush, REG_DEP_ANTI); add_insn_mem_dependence (&pending_write_insns, &pending_write_mems, insn, dest); } sched_analyze_2 (XEXP (dest, 0), insn); } /* Analyze reads. */ if (GET_CODE (x) == SET) sched_analyze_2 (SET_SRC (x), insn);}/* Analyze the uses of memory and registers in rtx X in INSN. */static voidsched_analyze_2 (x, insn) rtx x; rtx insn;{ register int i; register int j; register enum rtx_code code; register char *fmt; if (x == 0) return; code = GET_CODE (x); switch (code) { case CONST_INT: case CONST_DOUBLE: case SYMBOL_REF: case CONST: case LABEL_REF: /* Ignore constants. Note that we must handle CONST_DOUBLE here because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but this does not mean that this insn is using cc0. */ return;#ifdef HAVE_cc0 case CC0: { rtx link, prev; /* There may be a note before this insn now, but all notes will be removed before we actually try to schedule the insns, so it won't cause a problem later. We must avoid it here though. */ /* User of CC0 depends on immediately preceding insn. */ SCHED_GROUP_P (insn) = 1; /* Make a copy of all dependencies on the immediately previous insn, and add to this insn. This is so that all the dependencies will apply to the group. Remove an explicit dependence on this insn as SCHED_GROUP_P now represents it. */ prev = PREV_INSN (insn); while (GET_CODE (prev) == NOTE) prev = PREV_INSN (prev); if (find_insn_list (prev, LOG_LINKS (insn))) remove_dependence (insn, prev);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -