📄 sched.c
字号:
return 0; return ((MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem)) || (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0), SIZE_FOR_MODE (x), XEXP (x, 0), 0) && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem) && GET_MODE (mem) != QImode && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x)) && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x) && GET_MODE (x) != QImode && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem))));}/* Anti dependence: X is written after read in MEM takes place. */intanti_dependence (mem, x) rtx mem; rtx x;{ /* If MEM is an unchanging read, then it can't possibly conflict with the store to X, because there is at most one store to MEM, and it must have occurred somewhere before MEM. */ if (RTX_UNCHANGING_P (mem)) return 0; return ((MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem)) || (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0), SIZE_FOR_MODE (x), XEXP (x, 0), 0) && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem) && GET_MODE (mem) != QImode && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x)) && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x) && GET_MODE (x) != QImode && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem))));}/* Output dependence: X is written after store in MEM takes place. */intoutput_dependence (mem, x) rtx mem; rtx x;{ return ((MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem)) || (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0), SIZE_FOR_MODE (x), XEXP (x, 0), 0) && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem) && GET_MODE (mem) != QImode && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x)) && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x) && GET_MODE (x) != QImode && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem))));}/* Helper functions for instruction scheduling. *//* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the LOG_LINKS of INSN, if not already there. DEP_TYPE indicates the type of dependence that this link represents. */static voidadd_dependence (insn, elem, dep_type) rtx insn; rtx elem; enum reg_note dep_type;{ rtx link, next; /* Don't depend an insn on itself. */ if (insn == elem) return; /* If elem is part of a sequence that must be scheduled together, then make the dependence point to the last insn of the sequence. When HAVE_cc0, it is possible for NOTEs to exist between users and setters of the condition codes, so we must skip past notes here. Otherwise, NOTEs are impossible here. */ next = NEXT_INSN (elem);#ifdef HAVE_cc0 while (next && GET_CODE (next) == NOTE) next = NEXT_INSN (next);#endif if (next && SCHED_GROUP_P (next)) { /* Notes will never intervene here though, so don't bother checking for them. */ /* We must reject CODE_LABELs, so that we don't get confused by one that has LABEL_PRESERVE_P set, which is represented by the same bit in the rtl as SCHED_GROUP_P. A CODE_LABEL can never be SCHED_GROUP_P. */ while (NEXT_INSN (next) && SCHED_GROUP_P (NEXT_INSN (next)) && GET_CODE (NEXT_INSN (next)) != CODE_LABEL) next = NEXT_INSN (next); /* Again, don't depend an insn on itself. */ if (insn == next) return; /* Make the dependence to NEXT, the last insn of the group, instead of the original ELEM. */ elem = next; } /* Check that we don't already have this dependence. */ for (link = LOG_LINKS (insn); link; link = XEXP (link, 1)) if (XEXP (link, 0) == elem) { /* If this is a more restrictive type of dependence than the existing one, then change the existing dependence to this type. */ if ((int) dep_type < (int) REG_NOTE_KIND (link)) PUT_REG_NOTE_KIND (link, dep_type); return; } /* Might want to check one level of transitivity to save conses. */ link = rtx_alloc (INSN_LIST); /* Insn dependency, not data dependency. */ PUT_REG_NOTE_KIND (link, dep_type); XEXP (link, 0) = elem; XEXP (link, 1) = LOG_LINKS (insn); LOG_LINKS (insn) = link;}/* Remove ELEM wrapped in an INSN_LIST from the LOG_LINKS of INSN. Abort if not found. */static voidremove_dependence (insn, elem) rtx insn; rtx elem;{ rtx prev, link; int found = 0; for (prev = 0, link = LOG_LINKS (insn); link; prev = link, link = XEXP (link, 1)) { if (XEXP (link, 0) == elem) { if (prev) XEXP (prev, 1) = XEXP (link, 1); else LOG_LINKS (insn) = XEXP (link, 1); found = 1; } } if (! found) abort (); return;}#ifndef INSN_SCHEDULINGvoidschedule_insns (dump_file) FILE *dump_file;{}#else#ifndef __GNUC__#define __inline#endif/* Computation of memory dependencies. *//* The *_insns and *_mems are paired lists. Each pending memory operation will have a pointer to the MEM rtx on one list and a pointer to the containing insn on the other list in the same place in the list. *//* We can't use add_dependence like the old code did, because a single insn may have multiple memory accesses, and hence needs to be on the list once for each memory access. Add_dependence won't let you add an insn to a list more than once. *//* An INSN_LIST containing all insns with pending read operations. */static rtx pending_read_insns;/* An EXPR_LIST containing all MEM rtx's which are pending reads. */static rtx pending_read_mems;/* An INSN_LIST containing all insns with pending write operations. */static rtx pending_write_insns;/* An EXPR_LIST containing all MEM rtx's which are pending writes. */static rtx pending_write_mems;/* Indicates the combined length of the two pending lists. We must prevent these lists from ever growing too large since the number of dependencies produced is at least O(N*N), and execution time is at least O(4*N*N), as a function of the length of these pending lists. */static int pending_lists_length;/* An INSN_LIST containing all INSN_LISTs allocated but currently unused. */static rtx unused_insn_list;/* An EXPR_LIST containing all EXPR_LISTs allocated but currently unused. */static rtx unused_expr_list;/* The last insn upon which all memory references must depend. This is an insn which flushed the pending lists, creating a dependency between it and all previously pending memory references. This creates a barrier (or a checkpoint) which no memory reference is allowed to cross. This includes all non constant CALL_INSNs. When we do interprocedural alias analysis, this restriction can be relaxed. This may also be an INSN that writes memory if the pending lists grow too large. */static rtx last_pending_memory_flush;/* The last function call we have seen. All hard regs, and, of course, the last function call, must depend on this. */static rtx last_function_call;/* The LOG_LINKS field of this is a list of insns which use a pseudo register that does not already cross a call. We create dependencies between each of those insn and the next call insn, to ensure that they won't cross a call after scheduling is done. */static rtx sched_before_next_call;/* Pointer to the last instruction scheduled. Used by rank_for_schedule, so that insns independent of the last scheduled insn will be preferred over dependent instructions. */static rtx last_scheduled_insn;/* Process an insn's memory dependencies. There are four kinds of dependencies: (0) read dependence: read follows read (1) true dependence: read follows write (2) anti dependence: write follows read (3) output dependence: write follows write We are careful to build only dependencies which actually exist, and use transitivity to avoid building too many links. *//* Return the INSN_LIST containing INSN in LIST, or NULL if LIST does not contain INSN. */__inline static rtxfind_insn_list (insn, list) rtx insn; rtx list;{ while (list) { if (XEXP (list, 0) == insn) return list; list = XEXP (list, 1); } return 0;}/* Compute the function units used by INSN. This caches the value returned by function_units_used. A function unit is encoded as the unit number if the value is non-negative and the compliment of a mask if the value is negative. A function unit index is the non-negative encoding. */__inline static intinsn_unit (insn) rtx insn;{ register int unit = INSN_UNIT (insn); if (unit == 0) { recog_memoized (insn); /* A USE insn, or something else we don't need to understand. We can't pass these directly to function_units_used because it will trigger a fatal error for unrecognizable insns. */ if (INSN_CODE (insn) < 0) unit = -1; else { unit = function_units_used (insn); /* Increment non-negative values so we can cache zero. */ if (unit >= 0) unit++; } /* We only cache 16 bits of the result, so if the value is out of range, don't cache it. */ if (FUNCTION_UNITS_SIZE < HOST_BITS_PER_SHORT || unit >= 0 || (~unit & ((1 << (HOST_BITS_PER_SHORT - 1)) - 1)) == 0) INSN_UNIT (insn) = unit; } return (unit > 0 ? unit - 1 : unit);}/* Compute the blockage range for executing INSN on UNIT. This caches the value returned by the blockage_range_function for the unit. These values are encoded in an int where the upper half gives the minimum value and the lower half gives the maximum value. */__inline static unsigned intblockage_range (unit, insn) int unit; rtx insn;{ unsigned int blockage = INSN_BLOCKAGE (insn); unsigned int range; if (UNIT_BLOCKED (blockage) != unit + 1) { range = function_units[unit].blockage_range_function (insn); /* We only cache the blockage range for one unit and then only if the values fit. */ if (HOST_BITS_PER_INT >= UNIT_BITS + 2 * BLOCKAGE_BITS) INSN_BLOCKAGE (insn) = ENCODE_BLOCKAGE (unit + 1, range); } else range = BLOCKAGE_RANGE (blockage); return range;}/* A vector indexed by function unit instance giving the last insn to use the unit. The value of the function unit instance index for unit U instance I is (U + I * FUNCTION_UNITS_SIZE). */static rtx unit_last_insn[FUNCTION_UNITS_SIZE * MAX_MULTIPLICITY];/* A vector indexed by function unit instance giving the minimum time when the unit will unblock based on the maximum blockage cost. */static int unit_tick[FUNCTION_UNITS_SIZE * MAX_MULTIPLICITY];/* A vector indexed by function unit number giving the number of insns that remain to use the unit. */static int unit_n_insns[FUNCTION_UNITS_SIZE];/* Reset the function unit state to the null state. */static voidclear_units (){ bzero ((char *) unit_last_insn, sizeof (unit_last_insn)); bzero ((char *) unit_tick, sizeof (unit_tick)); bzero ((char *) unit_n_insns, sizeof (unit_n_insns));}/* Record an insn as one that will use the units encoded by UNIT. */__inline static voidprepare_unit (unit) int unit;{ int i; if (unit >= 0) unit_n_insns[unit]++; else for (i = 0, unit = ~unit; unit; i++, unit >>= 1) if ((unit & 1) != 0) prepare_unit (i);}/* Return the actual hazard cost of executing INSN on the unit UNIT, instance INSTANCE at time CLOCK if the previous actual hazard cost was COST. */__inline static intactual_hazard_this_instance (unit, instance, insn, clock, cost) int unit, instance, clock, cost; rtx insn;{ int tick = unit_tick[instance]; if (tick - clock > cost) { /* The scheduler is operating in reverse, so INSN is the executing insn and the unit's last insn is the candidate insn. We want a more exact measure of the blockage if we execute INSN at CLOCK given when we committed the execution of the unit's last insn. The blockage value is given by either the unit's max blockage constant, blockage range function, or blockage function. Use the most exact form for the given unit. */ if (function_units[unit].blockage_range_function) { if (function_units[unit].blockage_function) tick += (function_units[unit].blockage_function (insn, unit_last_insn[instance]) - function_units[unit].max_blockage); else tick += ((int) MAX_BLOCKAGE_COST (blockage_range (unit, insn)) - function_units[unit].max_blockage); } if (tick - clock > cost) cost = tick - clock; } return cost;}/* Record INSN as having begun execution on the units encoded by UNIT at time CLOCK. */__inline static voidschedule_unit (unit, insn, clock) int unit, clock; rtx insn;{ int i;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -