📄 sched.c
字号:
return 0; break; case 'e': if (rtx_equal_for_memref_p (XEXP (x, i), XEXP (y, i)) == 0) return 0; break; case 'S': case 's': if (strcmp (XSTR (x, i), XSTR (y, i))) return 0; break; case 'u': /* These are just backpointers, so they don't matter. */ break; case '0': break; /* It is believed that rtx's at this level will never contain anything but integers and other rtx's, except for within LABEL_REFs and SYMBOL_REFs. */ default: abort (); } } return 1;}/* Given an rtx X, find a SYMBOL_REF or LABEL_REF within X and return it, or return 0 if none found. */static rtxfind_symbolic_term (x) rtx x;{ register int i; register enum rtx_code code; register char *fmt; code = GET_CODE (x); if (code == SYMBOL_REF || code == LABEL_REF) return x; if (GET_RTX_CLASS (code) == 'o') return 0; fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { rtx t; if (fmt[i] == 'e') { t = find_symbolic_term (XEXP (x, i)); if (t != 0) return t; } else if (fmt[i] == 'E') break; } return 0;}/* Return nonzero if X and Y (memory addresses) could reference the same location in memory. C is an offset accumulator. When C is nonzero, we are testing aliases between X and Y + C. XSIZE is the size in bytes of the X reference, similarly YSIZE is the size in bytes for Y. If XSIZE or YSIZE is zero, we do not know the amount of memory being referenced (the reference was BLKmode), so make the most pessimistic assumptions. We recognize the following cases of non-conflicting memory: (1) addresses involving the frame pointer cannot conflict with addresses involving static variables. (2) static variables with different addresses cannot conflict. Nice to notice that varying addresses cannot conflict with fp if no local variables had their addresses taken, but that's too hard now. */static intmemrefs_conflict_p (xsize, x, ysize, y, c) rtx x, y; int xsize, ysize; HOST_WIDE_INT c;{ if (GET_CODE (x) == HIGH) x = XEXP (x, 0); else if (GET_CODE (x) == LO_SUM) x = XEXP (x, 1); else x = canon_rtx (x); if (GET_CODE (y) == HIGH) y = XEXP (y, 0); else if (GET_CODE (y) == LO_SUM) y = XEXP (y, 1); else y = canon_rtx (y); if (rtx_equal_for_memref_p (x, y)) return (xsize == 0 || ysize == 0 || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0)); if (y == frame_pointer_rtx || y == stack_pointer_rtx) { rtx t = y; int tsize = ysize; y = x; ysize = xsize; x = t; xsize = tsize; } if (x == frame_pointer_rtx || x == stack_pointer_rtx) { rtx y1; if (CONSTANT_P (y)) return 0; if (GET_CODE (y) == PLUS && canon_rtx (XEXP (y, 0)) == x && (y1 = canon_rtx (XEXP (y, 1))) && GET_CODE (y1) == CONST_INT) { c += INTVAL (y1); return (xsize == 0 || ysize == 0 || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0)); } if (GET_CODE (y) == PLUS && (y1 = canon_rtx (XEXP (y, 0))) && CONSTANT_P (y1)) return 0; return 1; } if (GET_CODE (x) == PLUS) { /* The fact that X is canonicalized means that this PLUS rtx is canonicalized. */ rtx x0 = XEXP (x, 0); rtx x1 = XEXP (x, 1); if (GET_CODE (y) == PLUS) { /* The fact that Y is canonicalized means that this PLUS rtx is canonicalized. */ rtx y0 = XEXP (y, 0); rtx y1 = XEXP (y, 1); if (rtx_equal_for_memref_p (x1, y1)) return memrefs_conflict_p (xsize, x0, ysize, y0, c); if (rtx_equal_for_memref_p (x0, y0)) return memrefs_conflict_p (xsize, x1, ysize, y1, c); if (GET_CODE (x1) == CONST_INT) if (GET_CODE (y1) == CONST_INT) return memrefs_conflict_p (xsize, x0, ysize, y0, c - INTVAL (x1) + INTVAL (y1)); else return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1)); else if (GET_CODE (y1) == CONST_INT) return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1)); /* Handle case where we cannot understand iteration operators, but we notice that the base addresses are distinct objects. */ x = find_symbolic_term (x); if (x == 0) return 1; y = find_symbolic_term (y); if (y == 0) return 1; return rtx_equal_for_memref_p (x, y); } else if (GET_CODE (x1) == CONST_INT) return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1)); } else if (GET_CODE (y) == PLUS) { /* The fact that Y is canonicalized means that this PLUS rtx is canonicalized. */ rtx y0 = XEXP (y, 0); rtx y1 = XEXP (y, 1); if (GET_CODE (y1) == CONST_INT) return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1)); else return 1; } if (GET_CODE (x) == GET_CODE (y)) switch (GET_CODE (x)) { case MULT: { /* Handle cases where we expect the second operands to be the same, and check only whether the first operand would conflict or not. */ rtx x0, y0; rtx x1 = canon_rtx (XEXP (x, 1)); rtx y1 = canon_rtx (XEXP (y, 1)); if (! rtx_equal_for_memref_p (x1, y1)) return 1; x0 = canon_rtx (XEXP (x, 0)); y0 = canon_rtx (XEXP (y, 0)); if (rtx_equal_for_memref_p (x0, y0)) return (xsize == 0 || ysize == 0 || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0)); /* Can't properly adjust our sizes. */ if (GET_CODE (x1) != CONST_INT) return 1; xsize /= INTVAL (x1); ysize /= INTVAL (x1); c /= INTVAL (x1); return memrefs_conflict_p (xsize, x0, ysize, y0, c); } } if (CONSTANT_P (x)) { if (GET_CODE (x) == CONST_INT && GET_CODE (y) == CONST_INT) { c += (INTVAL (y) - INTVAL (x)); return (xsize == 0 || ysize == 0 || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0)); } if (GET_CODE (x) == CONST) { if (GET_CODE (y) == CONST) return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)), ysize, canon_rtx (XEXP (y, 0)), c); else return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)), ysize, y, c); } if (GET_CODE (y) == CONST) return memrefs_conflict_p (xsize, x, ysize, canon_rtx (XEXP (y, 0)), c); if (CONSTANT_P (y)) return (rtx_equal_for_memref_p (x, y) && (xsize == 0 || ysize == 0 || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0))); return 1; } return 1;}/* Functions to compute memory dependencies. Since we process the insns in execution order, we can build tables to keep track of what registers are fixed (and not aliased), what registers are varying in known ways, and what registers are varying in unknown ways. If both memory references are volatile, then there must always be a dependence between the two references, since their order can not be changed. A volatile and non-volatile reference can be interchanged though. A MEM_IN_STRUCT reference at a varying address can never conflict with a non-MEM_IN_STRUCT reference at a fixed address. *//* Read dependence: X is read after read in MEM takes place. There can only be a dependence here if both reads are volatile. */intread_dependence (mem, x) rtx mem; rtx x;{ return MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem);}/* True dependence: X is read after store in MEM takes place. */inttrue_dependence (mem, x) rtx mem; rtx x;{ /* If X is an unchanging read, then it can't possibly conflict with any non-unchanging store. It may conflict with an unchanging write though, because there may be a single store to this address to initialize it. Just fall through to the code below to resolve the case where we have both an unchanging read and an unchanging write. This won't handle all cases optimally, but the possible performance loss should be negligible. */ if (RTX_UNCHANGING_P (x) && ! RTX_UNCHANGING_P (mem)) return 0; return ((MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem)) || (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0), SIZE_FOR_MODE (x), XEXP (x, 0), 0) && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem) && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x)) && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x) && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem))));}/* Anti dependence: X is written after read in MEM takes place. */intanti_dependence (mem, x) rtx mem; rtx x;{ /* If MEM is an unchanging read, then it can't possibly conflict with the store to X, because there is at most one store to MEM, and it must have occured somewhere before MEM. */ if (RTX_UNCHANGING_P (mem)) return 0; return ((MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem)) || (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0), SIZE_FOR_MODE (x), XEXP (x, 0), 0) && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem) && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x)) && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x) && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem))));}/* Output dependence: X is written after store in MEM takes place. */intoutput_dependence (mem, x) rtx mem; rtx x;{ return ((MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem)) || (memrefs_conflict_p (SIZE_FOR_MODE (mem), XEXP (mem, 0), SIZE_FOR_MODE (x), XEXP (x, 0), 0) && ! (MEM_IN_STRUCT_P (mem) && rtx_addr_varies_p (mem) && ! MEM_IN_STRUCT_P (x) && ! rtx_addr_varies_p (x)) && ! (MEM_IN_STRUCT_P (x) && rtx_addr_varies_p (x) && ! MEM_IN_STRUCT_P (mem) && ! rtx_addr_varies_p (mem))));}/* Helper functions for instruction scheduling. *//* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the LOG_LINKS of INSN, if not already there. DEP_TYPE indicates the type of dependence that this link represents. */voidadd_dependence (insn, elem, dep_type) rtx insn; rtx elem; enum reg_note dep_type;{ rtx link, next; /* Don't depend an insn on itself. */ if (insn == elem) return; /* If elem is part of a sequence that must be scheduled together, then make the dependence point to the last insn of the sequence. When HAVE_cc0, it is possible for NOTEs to exist between users and setters of the condition codes, so we must skip past notes here. Otherwise, NOTEs are impossible here. */ next = NEXT_INSN (elem);#ifdef HAVE_cc0 while (next && GET_CODE (next) == NOTE) next = NEXT_INSN (next);#endif if (next && SCHED_GROUP_P (next)) { /* Notes will never intervene here though, so don't bother checking for them. */ while (NEXT_INSN (next) && SCHED_GROUP_P (NEXT_INSN (next))) next = NEXT_INSN (next); /* Again, don't depend an insn on itself. */ if (insn == next) return; /* Make the dependence to NEXT, the last insn of the group, instead of the original ELEM. */ elem = next; } /* Check that we don't already have this dependence. */ for (link = LOG_LINKS (insn); link; link = XEXP (link, 1)) if (XEXP (link, 0) == elem) { /* If this is a more restrictive type of dependence than the existing one, then change the existing dependence to this type. */ if ((int) dep_type < (int) REG_NOTE_KIND (link)) PUT_REG_NOTE_KIND (link, dep_type); return; } /* Might want to check one level of transitivity to save conses. */ link = rtx_alloc (INSN_LIST); /* Insn dependency, not data dependency. */ PUT_REG_NOTE_KIND (link, dep_type); XEXP (link, 0) = elem; XEXP (link, 1) = LOG_LINKS (insn); LOG_LINKS (insn) = link;}/* Remove ELEM wrapped in an INSN_LIST from the LOG_LINKS of INSN. Abort if not found. */voidremove_dependence (insn, elem) rtx insn; rtx elem;{ rtx prev, link; int found = 0; for (prev = 0, link = LOG_LINKS (insn); link; prev = link, link = XEXP (link, 1)) { if (XEXP (link, 0) == elem) { if (prev) XEXP (prev, 1) = XEXP (link, 1); else LOG_LINKS (insn) = XEXP (link, 1); found = 1; }
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -