📄 predicates.md
字号:
op = SUBREG_REG (op); if (reload_in_progress || reload_completed) return REG_OK_FOR_INDEX_STRICT_P (op); else return REG_OK_FOR_INDEX_NONSTRICT_P (op);});; Return false if this is any eliminable register. Otherwise general_operand.(define_predicate "general_no_elim_operand" (if_then_else (match_code "reg,subreg") (match_operand 0 "register_no_elim_operand") (match_operand 0 "general_operand")));; Return false if this is any eliminable register. Otherwise;; register_operand or a constant.(define_predicate "nonmemory_no_elim_operand" (ior (match_operand 0 "register_no_elim_operand") (match_operand 0 "immediate_operand")));; Test for a valid operand for a call instruction.(define_predicate "call_insn_operand" (ior (match_operand 0 "constant_call_address_operand") (ior (match_operand 0 "register_no_elim_operand") (match_operand 0 "memory_operand"))));; Similarly, but for tail calls, in which we cannot allow memory references.(define_predicate "sibcall_insn_operand" (ior (match_operand 0 "constant_call_address_operand") (match_operand 0 "register_no_elim_operand")));; Match exactly zero.(define_predicate "const0_operand" (match_code "const_int,const_double,const_vector"){ if (mode == VOIDmode) mode = GET_MODE (op); return op == CONST0_RTX (mode);});; Match exactly one.(define_predicate "const1_operand" (and (match_code "const_int") (match_test "op == const1_rtx")));; Match 2, 4, or 8. Used for leal multiplicands.(define_predicate "const248_operand" (match_code "const_int"){ HOST_WIDE_INT i = INTVAL (op); return i == 2 || i == 4 || i == 8;});; Match 0 or 1.(define_predicate "const_0_to_1_operand" (and (match_code "const_int") (match_test "op == const0_rtx || op == const1_rtx")));; Match 0 to 3.(define_predicate "const_0_to_3_operand" (and (match_code "const_int") (match_test "INTVAL (op) >= 0 && INTVAL (op) <= 3")));; Match 0 to 7.(define_predicate "const_0_to_7_operand" (and (match_code "const_int") (match_test "INTVAL (op) >= 0 && INTVAL (op) <= 7")));; Match 0 to 15.(define_predicate "const_0_to_15_operand" (and (match_code "const_int") (match_test "INTVAL (op) >= 0 && INTVAL (op) <= 15")));; Match 0 to 63.(define_predicate "const_0_to_63_operand" (and (match_code "const_int") (match_test "INTVAL (op) >= 0 && INTVAL (op) <= 63")));; Match 0 to 255.(define_predicate "const_0_to_255_operand" (and (match_code "const_int") (match_test "INTVAL (op) >= 0 && INTVAL (op) <= 255")));; Match (0 to 255) * 8(define_predicate "const_0_to_255_mul_8_operand" (match_code "const_int"){ unsigned HOST_WIDE_INT val = INTVAL (op); return val <= 255*8 && val % 8 == 0;});; Return nonzero if OP is CONST_INT >= 1 and <= 31 (a valid operand;; for shift & compare patterns, as shifting by 0 does not change flags).(define_predicate "const_1_to_31_operand" (and (match_code "const_int") (match_test "INTVAL (op) >= 1 && INTVAL (op) <= 31")));; Match 2 or 3.(define_predicate "const_2_to_3_operand" (and (match_code "const_int") (match_test "INTVAL (op) == 2 || INTVAL (op) == 3")));; Match 4 to 7.(define_predicate "const_4_to_7_operand" (and (match_code "const_int") (match_test "INTVAL (op) >= 4 && INTVAL (op) <= 7")));; Match exactly one bit in 4-bit mask.(define_predicate "const_pow2_1_to_8_operand" (match_code "const_int"){ unsigned int log = exact_log2 (INTVAL (op)); return log <= 3;});; Match exactly one bit in 8-bit mask.(define_predicate "const_pow2_1_to_128_operand" (match_code "const_int"){ unsigned int log = exact_log2 (INTVAL (op)); return log <= 7;});; True if this is a constant appropriate for an increment or decrement.; APPLE LOCAL begin mainline 2006-04-19 4434601(define_predicate "incdec_operand" (match_code "const_int"){ /* On Pentium4, the inc and dec operations causes extra dependency on flag registers, since carry flag is not set. */ if (!TARGET_USE_INCDEC && !optimize_size) return 0; return op == const1_rtx || op == constm1_rtx;}); APPLE LOCAL end mainline 2006-04-19 4434601;; True for registers, or 1 or -1. Used to optimize double-word shifts.(define_predicate "reg_or_pm1_operand" (ior (match_operand 0 "register_operand") (and (match_code "const_int") (match_test "op == const1_rtx || op == constm1_rtx"))));; True if OP is acceptable as operand of DImode shift expander.(define_predicate "shiftdi_operand" (if_then_else (match_test "TARGET_64BIT") (match_operand 0 "nonimmediate_operand") (match_operand 0 "register_operand")))(define_predicate "ashldi_input_operand" (if_then_else (match_test "TARGET_64BIT") (match_operand 0 "nonimmediate_operand") (match_operand 0 "reg_or_pm1_operand")));; Return true if OP is a vector load from the constant pool with just;; the first element nonzero.(define_predicate "zero_extended_scalar_load_operand" (match_code "mem"){ unsigned n_elts; op = maybe_get_pool_constant (op); if (!op) return 0; if (GET_CODE (op) != CONST_VECTOR) return 0; n_elts = (GET_MODE_SIZE (GET_MODE (op)) / GET_MODE_SIZE (GET_MODE_INNER (GET_MODE (op)))); for (n_elts--; n_elts > 0; n_elts--) { rtx elt = CONST_VECTOR_ELT (op, n_elts); if (elt != CONST0_RTX (GET_MODE_INNER (GET_MODE (op)))) return 0; } return 1;});; APPLE LOCAL begin mainline candidate 4283414;; Return true if operand is a vector constant that is all ones.(define_predicate "vector_all_ones_operand" (match_code "const_vector"){ int nunits = GET_MODE_NUNITS (mode); if (GET_CODE (op) == CONST_VECTOR && CONST_VECTOR_NUNITS (op) == nunits) { int i; for (i = 0; i < nunits; ++i) { rtx x = CONST_VECTOR_ELT (op, i); if (x != constm1_rtx) return 0; } return 1; } return 0;});; APPLE LOCAL begin mainline candidate 4476324;; Return 1 when OP is operand acceptable for standard SSE move.(define_predicate "vector_move_operand" (ior (match_operand 0 "nonimmediate_operand") (match_operand 0 "const0_operand")));; Return 1 when OP is nonimmediate or standard SSE constant(define_predicate "nonimmediate_or_sse_const_operand" (ior (match_operand 0 "nonimmediate_operand") (and (match_code "const_vector") (match_test "standard_sse_constant_p (op) > 0"))));; APPLE LOCAL end mainline candidate 4476324;; Return true if OP is a nonimmediate or a zero.(define_predicate "nonimmediate_or_0_operand" (ior (match_operand 0 "nonimmediate_operand") (match_operand 0 "const0_operand")));; APPLE LOCAL end mainline candidate 4283414;; Return true if OP is a register or a zero.(define_predicate "reg_or_0_operand" (ior (match_operand 0 "register_operand") (match_operand 0 "const0_operand")));; Return true if op if a valid address, and does not contain;; a segment override.(define_special_predicate "no_seg_address_operand" (match_operand 0 "address_operand"){ struct ix86_address parts; if (! ix86_decompose_address (op, &parts)) abort (); return parts.seg == SEG_DEFAULT;});; Return nonzero if the rtx is known aligned.(define_predicate "aligned_operand" (match_operand 0 "general_operand"){ struct ix86_address parts; /* Registers and immediate operands are always "aligned". */ if (GET_CODE (op) != MEM) return 1;/* APPLE LOCAL begin mainline 2006-04-19 4434601 */ /* All patterns using aligned_operand on memory operands ends up in promoting memory operand to 64bit and thus causing memory mismatch. */ if (TARGET_MEMORY_MISMATCH_STALL && !optimize_size) return 0;/* APPLE LOCAL end mainline 2006-04-19 4434601 */ /* Don't even try to do any aligned optimizations with volatiles. */ if (MEM_VOLATILE_P (op)) return 0; op = XEXP (op, 0); /* Pushes and pops are only valid on the stack pointer. */ if (GET_CODE (op) == PRE_DEC || GET_CODE (op) == POST_INC) return 1; /* Decode the address. */ if (!ix86_decompose_address (op, &parts)) abort (); /* Look for some component that isn't known to be aligned. */ if (parts.index) { if (REGNO_POINTER_ALIGN (REGNO (parts.index)) * parts.scale < 32) return 0; } if (parts.base) { if (REGNO_POINTER_ALIGN (REGNO (parts.base)) < 32) return 0; } if (parts.disp) { if (GET_CODE (parts.disp) != CONST_INT || (INTVAL (parts.disp) & 3) != 0) return 0; } /* Didn't find one -- this must be an aligned address. */ return 1;});; Returns 1 if OP is memory operand with a displacement.(define_predicate "memory_displacement_operand" (match_operand 0 "memory_operand"){ struct ix86_address parts; if (!ix86_decompose_address (XEXP (op, 0), &parts)) abort (); return parts.disp != NULL_RTX;});; Returns 1 if OP is memory operand that cannot be represented;; by the modRM array.(define_predicate "long_memory_operand" (and (match_operand 0 "memory_operand") (match_test "memory_address_length (op) != 0")));; Return 1 if OP is a comparison operator that can be issued by fcmov.(define_predicate "fcmov_comparison_operator" (match_operand 0 "comparison_operator"){ enum machine_mode inmode = GET_MODE (XEXP (op, 0)); enum rtx_code code = GET_CODE (op); if (inmode == CCFPmode || inmode == CCFPUmode) { enum rtx_code second_code, bypass_code; ix86_fp_comparison_codes (code, &bypass_code, &code, &second_code); if (bypass_code != UNKNOWN || second_code != UNKNOWN) return 0; code = ix86_fp_compare_code_to_integer (code); } /* i387 supports just limited amount of conditional codes. */ switch (code) { case LTU: case GTU: case LEU: case GEU: if (inmode == CCmode || inmode == CCFPmode || inmode == CCFPUmode) return 1; return 0; case ORDERED: case UNORDERED: case EQ: case NE: return 1; default: return 0; }});; Return 1 if OP is a comparison that can be used in the CMPSS/CMPPS insns.;; The first set are supported directly; the second set can't be done with;; full IEEE support, i.e. NaNs.;;;; ??? It would seem that we have a lot of uses of this predicate that pass;; it the wrong mode. We got away with this because the old function didn't;; check the mode at all. Mirror that for now by calling this a special;; predicate.(define_special_predicate "sse_comparison_operator" (match_code "eq,lt,le,unordered,ne,unge,ungt,ordered"));; Return 1 if OP is a valid comparison operator in valid mode.(define_predicate "ix86_comparison_operator" (match_operand 0 "comparison_operator"){ enum machine_mode inmode = GET_MODE (XEXP (op, 0)); enum rtx_code code = GET_CODE (op); if (inmode == CCFPmode || inmode == CCFPUmode) { enum rtx_code second_code, bypass_code; ix86_fp_comparison_codes (code, &bypass_code, &code, &second_code); return (bypass_code == UNKNOWN && second_code == UNKNOWN); } switch (code) { case EQ: case NE: return 1; case LT: case GE: if (inmode == CCmode || inmode == CCGCmode || inmode == CCGOCmode || inmode == CCNOmode) return 1; return 0; case LTU: case GTU: case LEU: case ORDERED: case UNORDERED: case GEU: if (inmode == CCmode) return 1; return 0; case GT: case LE: if (inmode == CCmode || inmode == CCGCmode || inmode == CCNOmode) return 1; return 0; default: return 0; }});; Return 1 if OP is a valid comparison operator testing carry flag to be set.(define_predicate "ix86_carry_flag_operator" (match_code "ltu,lt,unlt,gt,ungt,le,unle,ge,unge,ltgt,uneq"){ enum machine_mode inmode = GET_MODE (XEXP (op, 0)); enum rtx_code code = GET_CODE (op); if (GET_CODE (XEXP (op, 0)) != REG || REGNO (XEXP (op, 0)) != FLAGS_REG || XEXP (op, 1) != const0_rtx) return 0; if (inmode == CCFPmode || inmode == CCFPUmode) { enum rtx_code second_code, bypass_code; ix86_fp_comparison_codes (code, &bypass_code, &code, &second_code); if (bypass_code != UNKNOWN || second_code != UNKNOWN) return 0; code = ix86_fp_compare_code_to_integer (code); } else if (inmode != CCmode) return 0; return code == LTU;});; Nearly general operand, but accept any const_double, since we wish;; to be able to drop them into memory rather than have them get pulled;; into registers.(define_predicate "cmp_fp_expander_operand" (ior (match_code "const_double") (match_operand 0 "general_operand")));; Return true if this is a valid binary floating-point operation.(define_predicate "binary_fp_operator" (match_code "plus,minus,mult,div"));; Return true if this is a multiply operation.(define_predicate "mult_operator" (match_code "mult"));; Return true if this is a division operation.(define_predicate "div_operator" (match_code "div"));; Return true if this is a float extend operation.(define_predicate "float_operator" (match_code "float"));; Return true for ARITHMETIC_P.(define_predicate "arith_or_logical_operator" (match_code "plus,mult,and,ior,xor,smin,smax,umin,umax,compare,minus,div, mod,udiv,umod,ashift,rotate,ashiftrt,lshiftrt,rotatert"));; Return 1 if OP is a binary operator that can be promoted to wider mode.;; Modern CPUs have same latency for HImode and SImode multiply,;; but 386 and 486 do HImode multiply faster. */(define_predicate "promotable_binary_operator" (ior (match_code "plus,and,ior,xor,ashift") (and (match_code "mult") (match_test "ix86_tune > PROCESSOR_I486"))));; To avoid problems when jump re-emits comparisons like testqi_ext_ccno_0,;; re-recognize the operand to avoid a copy_to_mode_reg that will fail.;;;; ??? It seems likely that this will only work because cmpsi is an;; expander, and no actual insns use this.(define_predicate "cmpsi_operand_1" (match_code "and"){ return (GET_MODE (op) == SImode && GET_CODE (XEXP (op, 0)) == ZERO_EXTRACT && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT && GET_CODE (XEXP (XEXP (op, 0), 2)) == CONST_INT && INTVAL (XEXP (XEXP (op, 0), 1)) == 8 && INTVAL (XEXP (XEXP (op, 0), 2)) == 8 && GET_CODE (XEXP (op, 1)) == CONST_INT);})(define_predicate "cmpsi_operand" (ior (match_operand 0 "nonimmediate_operand") (match_operand 0 "cmpsi_operand_1")))(define_predicate "compare_operator" (match_code "compare"))(define_predicate "absneg_operator" (match_code "abs,neg"))
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -