📄 s390.c
字号:
case NE: return CC0 | CC1 | CC2; default: return -1; } break; case CCLmode: switch (GET_CODE (code)) { case EQ: return CC0 | CC2; case NE: return CC1 | CC3; default: return -1; } break; case CCL1mode: switch (GET_CODE (code)) { case LTU: return CC2 | CC3; /* carry */ case GEU: return CC0 | CC1; /* no carry */ default: return -1; } break; case CCL2mode: switch (GET_CODE (code)) { case GTU: return CC0 | CC1; /* borrow */ case LEU: return CC2 | CC3; /* no borrow */ default: return -1; } break; case CCL3mode: switch (GET_CODE (code)) { case EQ: return CC0 | CC2; case NE: return CC1 | CC3; case LTU: return CC1; case GTU: return CC3; case LEU: return CC1 | CC2; case GEU: return CC2 | CC3; default: return -1; } case CCUmode: switch (GET_CODE (code)) { case EQ: return CC0; case NE: return CC1 | CC2 | CC3; case LTU: return CC1; case GTU: return CC2; case LEU: return CC0 | CC1; case GEU: return CC0 | CC2; default: return -1; } break; case CCURmode: switch (GET_CODE (code)) { case EQ: return CC0; case NE: return CC2 | CC1 | CC3; case LTU: return CC2; case GTU: return CC1; case LEU: return CC0 | CC2; case GEU: return CC0 | CC1; default: return -1; } break; case CCAPmode: switch (GET_CODE (code)) { case EQ: return CC0; case NE: return CC1 | CC2 | CC3; case LT: return CC1 | CC3; case GT: return CC2; case LE: return CC0 | CC1 | CC3; case GE: return CC0 | CC2; default: return -1; } break; case CCANmode: switch (GET_CODE (code)) { case EQ: return CC0; case NE: return CC1 | CC2 | CC3; case LT: return CC1; case GT: return CC2 | CC3; case LE: return CC0 | CC1; case GE: return CC0 | CC2 | CC3; default: return -1; } break; case CCSmode: switch (GET_CODE (code)) { case EQ: return CC0; case NE: return CC1 | CC2 | CC3; case LT: return CC1; case GT: return CC2; case LE: return CC0 | CC1; case GE: return CC0 | CC2; case UNORDERED: return CC3; case ORDERED: return CC0 | CC1 | CC2; case UNEQ: return CC0 | CC3; case UNLT: return CC1 | CC3; case UNGT: return CC2 | CC3; case UNLE: return CC0 | CC1 | CC3; case UNGE: return CC0 | CC2 | CC3; case LTGT: return CC1 | CC2; default: return -1; } break; case CCSRmode: switch (GET_CODE (code)) { case EQ: return CC0; case NE: return CC2 | CC1 | CC3; case LT: return CC2; case GT: return CC1; case LE: return CC0 | CC2; case GE: return CC0 | CC1; case UNORDERED: return CC3; case ORDERED: return CC0 | CC2 | CC1; case UNEQ: return CC0 | CC3; case UNLT: return CC2 | CC3; case UNGT: return CC1 | CC3; case UNLE: return CC0 | CC2 | CC3; case UNGE: return CC0 | CC1 | CC3; case LTGT: return CC2 | CC1; default: return -1; } break; default: return -1; }}/* If INV is false, return assembler mnemonic string to implement a branch specified by CODE. If INV is true, return mnemonic for the corresponding inverted branch. */static const char *s390_branch_condition_mnemonic (rtx code, int inv){ static const char *const mnemonic[16] = { NULL, "o", "h", "nle", "l", "nhe", "lh", "ne", "e", "nlh", "he", "nl", "le", "nh", "no", NULL }; int mask = s390_branch_condition_mask (code); gcc_assert (mask >= 0); if (inv) mask ^= 15; if (mask < 1 || mask > 14) abort (); return mnemonic[mask];}/* Return the part of op which has a value different from def. The size of the part is determined by mode. Use this function only if you already know that op really contains such a part. */unsigned HOST_WIDE_INTs390_extract_part (rtx op, enum machine_mode mode, int def){ unsigned HOST_WIDE_INT value = 0; int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode); int part_bits = GET_MODE_BITSIZE (mode); unsigned HOST_WIDE_INT part_mask = (1 << part_bits) - 1; int i; for (i = 0; i < max_parts; i++) { if (i == 0) value = (unsigned HOST_WIDE_INT) INTVAL (op); else value >>= part_bits; if ((value & part_mask) != (def & part_mask)) return value & part_mask; } abort ();}/* If OP is an integer constant of mode MODE with exactly one part of mode PART_MODE unequal to DEF, return the number of that part. Otherwise, return -1. */ints390_single_part (rtx op, enum machine_mode mode, enum machine_mode part_mode, int def){ unsigned HOST_WIDE_INT value = 0; int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode); unsigned HOST_WIDE_INT part_mask = (1 << GET_MODE_BITSIZE (part_mode)) - 1; int i, part = -1; if (GET_CODE (op) != CONST_INT) return -1; for (i = 0; i < n_parts; i++) { if (i == 0) value = (unsigned HOST_WIDE_INT) INTVAL (op); else value >>= GET_MODE_BITSIZE (part_mode); if ((value & part_mask) != (def & part_mask)) { if (part != -1) return -1; else part = i; } } return part == -1 ? -1 : n_parts - 1 - part;}/* Check whether we can (and want to) split a double-word move in mode MODE from SRC to DST into two single-word moves, moving the subword FIRST_SUBWORD first. */bools390_split_ok_p (rtx dst, rtx src, enum machine_mode mode, int first_subword){ /* Floating point registers cannot be split. */ if (FP_REG_P (src) || FP_REG_P (dst)) return false; /* We don't need to split if operands are directly accessible. */ if (s_operand (src, mode) || s_operand (dst, mode)) return false; /* Non-offsettable memory references cannot be split. */ if ((GET_CODE (src) == MEM && !offsettable_memref_p (src)) || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst))) return false; /* Moving the first subword must not clobber a register needed to move the second subword. */ if (register_operand (dst, mode)) { rtx subreg = operand_subword (dst, first_subword, 0, mode); if (reg_overlap_mentioned_p (subreg, src)) return false; } return true;}/* Check whether the address of memory reference MEM2 equals exactly the address of memory reference MEM1 plus DELTA. Return true if we can prove this to be the case, false otherwise. */bools390_offset_p (rtx mem1, rtx mem2, rtx delta){ rtx addr1, addr2, addr_delta; if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM) return false; addr1 = XEXP (mem1, 0); addr2 = XEXP (mem2, 0); addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1); if (!addr_delta || !rtx_equal_p (addr_delta, delta)) return false; return true;}/* Expand logical operator CODE in mode MODE with operands OPERANDS. */voids390_expand_logical_operator (enum rtx_code code, enum machine_mode mode, rtx *operands){ enum machine_mode wmode = mode; rtx dst = operands[0]; rtx src1 = operands[1]; rtx src2 = operands[2]; rtx op, clob, tem; /* If we cannot handle the operation directly, use a temp register. */ if (!s390_logical_operator_ok_p (operands)) dst = gen_reg_rtx (mode); /* QImode and HImode patterns make sense only if we have a destination in memory. Otherwise perform the operation in SImode. */ if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM) wmode = SImode; /* Widen operands if required. */ if (mode != wmode) { if (GET_CODE (dst) == SUBREG && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0) dst = tem; else if (REG_P (dst)) dst = gen_rtx_SUBREG (wmode, dst, 0); else dst = gen_reg_rtx (wmode); if (GET_CODE (src1) == SUBREG && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0) src1 = tem; else if (GET_MODE (src1) != VOIDmode) src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0); if (GET_CODE (src2) == SUBREG && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0) src2 = tem; else if (GET_MODE (src2) != VOIDmode) src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0); } /* Emit the instruction. */ op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, wmode, src1, src2)); clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM)); emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob))); /* Fix up the destination if needed. */ if (dst != operands[0]) emit_move_insn (operands[0], gen_lowpart (mode, dst));}/* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR). */bools390_logical_operator_ok_p (rtx *operands){ /* If the destination operand is in memory, it needs to coincide with one of the source operands. After reload, it has to be the first source operand. */ if (GET_CODE (operands[0]) == MEM) return rtx_equal_p (operands[0], operands[1]) || (!reload_completed && rtx_equal_p (operands[0], operands[2])); return true;}/* Narrow logical operation CODE of memory operand MEMOP with immediate operand IMMOP to switch from SS to SI type instructions. */voids390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop){ int def = code == AND ? -1 : 0; HOST_WIDE_INT mask; int part; gcc_assert (GET_CODE (*memop) == MEM); gcc_assert (!MEM_VOLATILE_P (*memop)); mask = s390_extract_part (*immop, QImode, def); part = s390_single_part (*immop, GET_MODE (*memop), QImode, def); gcc_assert (part >= 0); *memop = adjust_address (*memop, QImode, part); *immop = gen_int_mode (mask, QImode);}/* Change optimizations to be performed, depending on the optimization level. LEVEL is the optimization level specified; 2 if `-O2' is specified, 1 if `-O' is specified, and 0 if neither is specified. SIZE is nonzero if `-Os' is specified and zero otherwise. */voidoptimization_options (int level ATTRIBUTE_UNUSED, int size ATTRIBUTE_UNUSED){ /* ??? There are apparently still problems with -fcaller-saves. */ flag_caller_saves = 0; /* By default, always emit DWARF-2 unwind info. This allows debugging without maintaining a stack frame back-chain. */ flag_asynchronous_unwind_tables = 1;}voidoverride_options (void){ int i; static struct pta { const char *const name; /* processor name or nickname. */ const enum processor_type processor; const enum processor_flags flags; } const processor_alias_table[] = { {"g5", PROCESSOR_9672_G5, PF_IEEE_FLOAT}, {"g6", PROCESSOR_9672_G6, PF_IEEE_FLOAT}, {"z900", PROCESSOR_2064_Z900, PF_IEEE_FLOAT | PF_ZARCH}, {"z990", PROCESSOR_2084_Z990, PF_IEEE_FLOAT | PF_ZARCH | PF_LONG_DISPLACEMENT}, }; int const pta_size = ARRAY_SIZE (processor_alias_table); /* Acquire a unique set number for our register saves and restores. */ s390_sr_alias_set = new_alias_set (); /* Set up function hooks. */ init_machine_status = s390_init_machine_status; /* Architecture mode defaults according to ABI. */ if (!(target_flags_explicit & MASK_ZARCH)) { if (TARGET_64BIT) target_flags |= MASK_ZARCH; else target_flags &= ~MASK_ZARCH; } /* Determine processor architectural level. */ if (!s390_arch_string) s390_arch_string = TARGET_ZARCH? "z900" : "g5"; for (i = 0; i < pta_size; i++) if (! strcmp (s390_arch_string, processor_alias_table[i].name)) { s390_arch = processor_alias_table[i].processor; s390_arch_flags = processor_alias_table[i].flags; break; } if (i == pta_size) error ("Unknown cpu used in -march=%s.", s390_arch_string); /* Determine processor to tune for. */ if (!s390_tune_string) { s390_tune = s390_arch; s390_tune_flags = s390_arch_flags; s390_tune_string = s390_arch_string; } else { for (i = 0; i < pta_size; i++) if (! strcmp (s390_tune_string, processor_alias_table[i].name)) { s390_tune = processor_alias_table[i].processor; s390_tune_flags = processor_alias_table[i].flags; break; } if (i == pta_size) error ("Unknown cpu used in -mtune=%s.", s390_tune_string); } /* Sanity checks. */ if (TARGET_ZARCH && !(s390_arch_flags & PF_ZARCH)) error ("z/Architecture mode not supported on %s.", s390_arch_string); if (TARGET_64BIT && !TARGET_ZARCH) error ("64-bit ABI not supported in ESA/390 mode."); /* Set processor cost function. */ if (s390_tune == PROCESSOR_2084_Z990) s390_cost = &z990_cost;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -