📄 sched.c
字号:
} if (! found) abort (); return;}#ifndef INSN_SCHEDULINGvoid schedule_insns () {}#else#ifndef __GNUC__#define __inline#endif/* Computation of memory dependencies. *//* The *_insns and *_mems are paired lists. Each pending memory operation will have a pointer to the MEM rtx on one list and a pointer to the containing insn on the other list in the same place in the list. *//* We can't use add_dependence like the old code did, because a single insn may have multiple memory accesses, and hence needs to be on the list once for each memory access. Add_dependence won't let you add an insn to a list more than once. *//* An INSN_LIST containing all insns with pending read operations. */static rtx pending_read_insns;/* An EXPR_LIST containing all MEM rtx's which are pending reads. */static rtx pending_read_mems;/* An INSN_LIST containing all insns with pending write operations. */static rtx pending_write_insns;/* An EXPR_LIST containing all MEM rtx's which are pending writes. */static rtx pending_write_mems;/* Indicates the combined length of the two pending lists. We must prevent these lists from ever growing too large since the number of dependencies produced is at least O(N*N), and execution time is at least O(4*N*N), as a function of the length of these pending lists. */static int pending_lists_length;/* An INSN_LIST containing all INSN_LISTs allocated but currently unused. */static rtx unused_insn_list;/* An EXPR_LIST containing all EXPR_LISTs allocated but currently unused. */static rtx unused_expr_list;/* The last insn upon which all memory references must depend. This is an insn which flushed the pending lists, creating a dependency between it and all previously pending memory references. This creates a barrier (or a checkpoint) which no memory reference is allowed to cross. This includes all non constant CALL_INSNs. When we do interprocedural alias analysis, this restriction can be relaxed. This may also be an INSN that writes memory if the pending lists grow too large. */static rtx last_pending_memory_flush;/* The last function call we have seen. All hard regs, and, of course, the last function call, must depend on this. */static rtx last_function_call;/* The LOG_LINKS field of this is a list of insns which use a pseudo register that does not already cross a call. We create dependencies between each of those insn and the next call insn, to ensure that they won't cross a call after scheduling is done. */static rtx sched_before_next_call;/* Pointer to the last instruction scheduled. Used by rank_for_schedule, so that insns independent of the last scheduled insn will be preferred over dependent instructions. */static rtx last_scheduled_insn;/* Process an insn's memory dependencies. There are four kinds of dependencies: (0) read dependence: read follows read (1) true dependence: read follows write (2) anti dependence: write follows read (3) output dependence: write follows write We are careful to build only dependencies which actually exist, and use transitivity to avoid building too many links. *//* Return the INSN_LIST containing INSN in LIST, or NULL if LIST does not contain INSN. */__inline static rtxfind_insn_list (insn, list) rtx insn; rtx list;{ while (list) { if (XEXP (list, 0) == insn) return list; list = XEXP (list, 1); } return 0;}/* Compute the function units used by INSN. This caches the value returned by function_units_used. A function unit is encoded as the unit number if the value is non-negative and the compliment of a mask if the value is negative. A function unit index is the non-negative encoding. */__inline static intinsn_unit (insn) rtx insn;{ register int unit = INSN_UNIT (insn); if (unit == 0) { recog_memoized (insn); /* A USE insn, or something else we don't need to understand. We can't pass these directly to function_units_used because it will trigger a fatal error for unrecognizable insns. */ if (INSN_CODE (insn) < 0) unit = -1; else { unit = function_units_used (insn); /* Increment non-negative values so we can cache zero. */ if (unit >= 0) unit++; } /* We only cache 16 bits of the result, so if the value is out of range, don't cache it. */ if (FUNCTION_UNITS_SIZE < HOST_BITS_PER_SHORT || unit >= 0 || (~unit & ((1 << (HOST_BITS_PER_SHORT - 1)) - 1)) == 0) INSN_UNIT (insn) = unit; } return (unit > 0 ? unit - 1 : unit);}/* Compute the blockage range for executing INSN on UNIT. This caches the value returned by the blockage_range_function for the unit. These values are encoded in an int where the upper half gives the minimum value and the lower half gives the maximum value. */__inline static unsigned intblockage_range (unit, insn) int unit; rtx insn;{ unsigned int blockage = INSN_BLOCKAGE (insn); unsigned int range; if (UNIT_BLOCKED (blockage) != unit + 1) { range = function_units[unit].blockage_range_function (insn); /* We only cache the blockage range for one unit and then only if the values fit. */ if (HOST_BITS_PER_INT >= UNIT_BITS + 2 * BLOCKAGE_BITS) INSN_BLOCKAGE (insn) = ENCODE_BLOCKAGE (unit + 1, range); } else range = BLOCKAGE_RANGE (blockage); return range;}/* A vector indexed by function unit instance giving the last insn to use the unit. The value of the function unit instance index for unit U instance I is (U + I * FUNCTION_UNITS_SIZE). */static rtx unit_last_insn[FUNCTION_UNITS_SIZE * MAX_MULTIPLICITY];/* A vector indexed by function unit instance giving the minimum time when the unit will unblock based on the maximum blockage cost. */static int unit_tick[FUNCTION_UNITS_SIZE * MAX_MULTIPLICITY];/* A vector indexed by function unit number giving the number of insns that remain to use the unit. */static int unit_n_insns[FUNCTION_UNITS_SIZE];/* Reset the function unit state to the null state. */static voidclear_units (){ int unit; bzero (unit_last_insn, sizeof (unit_last_insn)); bzero (unit_tick, sizeof (unit_tick)); bzero (unit_n_insns, sizeof (unit_n_insns));}/* Record an insn as one that will use the units encoded by UNIT. */__inline static voidprepare_unit (unit) int unit;{ int i; if (unit >= 0) unit_n_insns[unit]++; else for (i = 0, unit = ~unit; unit; i++, unit >>= 1) if ((unit & 1) != 0) prepare_unit (i);}/* Return the actual hazard cost of executing INSN on the unit UNIT, instance INSTANCE at time CLOCK if the previous actual hazard cost was COST. */__inline static intactual_hazard_this_instance (unit, instance, insn, clock, cost) int unit, instance, clock, cost; rtx insn;{ int i; int tick = unit_tick[instance]; if (tick - clock > cost) { /* The scheduler is operating in reverse, so INSN is the executing insn and the unit's last insn is the candidate insn. We want a more exact measure of the blockage if we execute INSN at CLOCK given when we committed the execution of the unit's last insn. The blockage value is given by either the unit's max blockage constant, blockage range function, or blockage function. Use the most exact form for the given unit. */ if (function_units[unit].blockage_range_function) { if (function_units[unit].blockage_function) tick += (function_units[unit].blockage_function (insn, unit_last_insn[instance]) - function_units[unit].max_blockage); else tick += ((int) MAX_BLOCKAGE_COST (blockage_range (unit, insn)) - function_units[unit].max_blockage); } if (tick - clock > cost) cost = tick - clock; } return cost;}/* Record INSN as having begun execution on the units encoded by UNIT at time CLOCK. */__inline static voidschedule_unit (unit, insn, clock) int unit, clock; rtx insn;{ int i; if (unit >= 0) { int instance = unit;#if MAX_MULTIPLICITY > 1 /* Find the first free instance of the function unit and use that one. We assume that one is free. */ for (i = function_units[unit].multiplicity - 1; i > 0; i--) { if (! actual_hazard_this_instance (unit, instance, insn, clock, 0)) break; instance += FUNCTION_UNITS_SIZE; }#endif unit_last_insn[instance] = insn; unit_tick[instance] = (clock + function_units[unit].max_blockage); } else for (i = 0, unit = ~unit; unit; i++, unit >>= 1) if ((unit & 1) != 0) schedule_unit (i, insn, clock);}/* Return the actual hazard cost of executing INSN on the units encoded by UNIT at time CLOCK if the previous actual hazard cost was COST. */__inline static intactual_hazard (unit, insn, clock, cost) int unit, clock, cost; rtx insn;{ int i; if (unit >= 0) { /* Find the instance of the function unit with the minimum hazard. */ int instance = unit; int best = instance; int best_cost = actual_hazard_this_instance (unit, instance, insn, clock, cost); int this_cost;#if MAX_MULTIPLICITY > 1 if (best_cost > cost) { for (i = function_units[unit].multiplicity - 1; i > 0; i--) { instance += FUNCTION_UNITS_SIZE; this_cost = actual_hazard_this_instance (unit, instance, insn, clock, cost); if (this_cost < best_cost) { best = instance; best_cost = this_cost; if (this_cost <= cost) break; } } }#endif cost = MAX (cost, best_cost); } else for (i = 0, unit = ~unit; unit; i++, unit >>= 1) if ((unit & 1) != 0) cost = actual_hazard (i, insn, clock, cost); return cost;}/* Return the potential hazard cost of executing an instruction on the units encoded by UNIT if the previous potential hazard cost was COST. An insn with a large blockage time is chosen in preference to one with a smaller time; an insn that uses a unit that is more likely to be used is chosen in preference to one with a unit that is less used. We are trying to minimize a subsequent actual hazard. */__inline static intpotential_hazard (unit, insn, cost) int unit, cost; rtx insn;{ int i, ncost; unsigned int minb, maxb; if (unit >= 0) { minb = maxb = function_units[unit].max_blockage; if (maxb > 1) { if (function_units[unit].blockage_range_function) { maxb = minb = blockage_range (unit, insn); maxb = MAX_BLOCKAGE_COST (maxb); minb = MIN_BLOCKAGE_COST (minb); } if (maxb > 1) { /* Make the number of instructions left dominate. Make the minimum delay dominate the maximum delay. If all these are the same, use the unit number to add an arbitrary ordering. Other terms can be added. */ ncost = minb * 0x40 + maxb; ncost *= (unit_n_insns[unit] - 1) * 0x1000 + unit; if (ncost > cost) cost = ncost; } } } else for (i = 0, unit = ~unit; unit; i++, unit >>= 1) if ((unit & 1) != 0) cost = potential_hazard (i, insn, cost); return cost;}/* Compute cost of executing INSN given the dependence LINK on the insn USED. This is the number of virtual cycles taken between instruction issue and instruction results. */__inline static intinsn_cost (insn, link, used) rtx insn, link, used;{ register int cost = INSN_COST (insn); if (cost == 0) { recog_memoized (insn); /* A USE insn, or something else we don't need to understand. We can't pass these directly to result_ready_cost because it will trigger a fatal error for unrecognizable insns. */ if (INSN_CODE (insn) < 0) { INSN_COST (insn) = 1; return 1; } else { cost = result_ready_cost (insn); if (cost < 1) cost = 1; INSN_COST (insn) = cost; } } /* A USE insn should never require the value used to be computed. This allows the computation of a function's result and parameter values to overlap the return and call. */ recog_memoized (used); if (INSN_CODE (used) < 0) LINK_COST_FREE (link) = 1; /* If some dependencies vary the cost, compute the adjustment. Most commonly, the adjustment is complete: either the cost is ignored (in the case of an output- or anti-dependence), or the cost is unchanged. These values are cached in the link as LINK_COST_FREE and LINK_COST_ZERO. */ if (LINK_COST_FREE (link)) cost = 1;#ifdef ADJUST_COST else if (! LINK_COST_ZERO (link)) {
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -