⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 s390.c

📁 linux下编程用 编译软件
💻 C
📖 第 1 页 / 共 5 页
字号:
/* If INV is false, return assembler mnemonic string to implement   a branch specified by CODE.  If INV is true, return mnemonic   for the corresponding inverted branch.  */static const char *s390_branch_condition_mnemonic (rtx code, int inv){  static const char *const mnemonic[16] =    {      NULL, "o", "h", "nle",      "l", "nhe", "lh", "ne",      "e", "nlh", "he", "nl",      "le", "nh", "no", NULL    };  int mask = s390_branch_condition_mask (code);  gcc_assert (mask >= 0);  if (inv)    mask ^= 15;  gcc_assert (mask >= 1 && mask <= 14);  return mnemonic[mask];}/* Return the part of op which has a value different from def.   The size of the part is determined by mode.   Use this function only if you already know that op really   contains such a part.  */unsigned HOST_WIDE_INTs390_extract_part (rtx op, enum machine_mode mode, int def){  unsigned HOST_WIDE_INT value = 0;  int max_parts = HOST_BITS_PER_WIDE_INT / GET_MODE_BITSIZE (mode);  int part_bits = GET_MODE_BITSIZE (mode);  unsigned HOST_WIDE_INT part_mask    = ((unsigned HOST_WIDE_INT)1 << part_bits) - 1;  int i;  for (i = 0; i < max_parts; i++)    {      if (i == 0)	value = (unsigned HOST_WIDE_INT) INTVAL (op);      else	value >>= part_bits;      if ((value & part_mask) != (def & part_mask))	return value & part_mask;    }  gcc_unreachable ();}/* If OP is an integer constant of mode MODE with exactly one   part of mode PART_MODE unequal to DEF, return the number of that   part. Otherwise, return -1.  */ints390_single_part (rtx op,		  enum machine_mode mode,		  enum machine_mode part_mode,		  int def){  unsigned HOST_WIDE_INT value = 0;  int n_parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (part_mode);  unsigned HOST_WIDE_INT part_mask    = ((unsigned HOST_WIDE_INT)1 << GET_MODE_BITSIZE (part_mode)) - 1;  int i, part = -1;  if (GET_CODE (op) != CONST_INT)    return -1;  for (i = 0; i < n_parts; i++)    {      if (i == 0)	value = (unsigned HOST_WIDE_INT) INTVAL (op);      else	value >>= GET_MODE_BITSIZE (part_mode);      if ((value & part_mask) != (def & part_mask))	{	  if (part != -1)	    return -1;	  else	    part = i;	}    }  return part == -1 ? -1 : n_parts - 1 - part;}/* Check whether we can (and want to) split a double-word   move in mode MODE from SRC to DST into two single-word   moves, moving the subword FIRST_SUBWORD first.  */bools390_split_ok_p (rtx dst, rtx src, enum machine_mode mode, int first_subword){  /* Floating point registers cannot be split.  */  if (FP_REG_P (src) || FP_REG_P (dst))    return false;  /* We don't need to split if operands are directly accessible.  */  if (s_operand (src, mode) || s_operand (dst, mode))    return false;  /* Non-offsettable memory references cannot be split.  */  if ((GET_CODE (src) == MEM && !offsettable_memref_p (src))      || (GET_CODE (dst) == MEM && !offsettable_memref_p (dst)))    return false;  /* Moving the first subword must not clobber a register     needed to move the second subword.  */  if (register_operand (dst, mode))    {      rtx subreg = operand_subword (dst, first_subword, 0, mode);      if (reg_overlap_mentioned_p (subreg, src))        return false;    }  return true;}/* Return true if it can be proven that [MEM1, MEM1 + SIZE]   and [MEM2, MEM2 + SIZE] do overlap and false   otherwise.  */bools390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size){  rtx addr1, addr2, addr_delta;  HOST_WIDE_INT delta;  if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)    return true;  if (size == 0)    return false;  addr1 = XEXP (mem1, 0);  addr2 = XEXP (mem2, 0);  addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);  /* This overlapping check is used by peepholes merging memory block operations.     Overlapping operations would otherwise be recognized by the S/390 hardware     and would fall back to a slower implementation. Allowing overlapping      operations would lead to slow code but not to wrong code. Therefore we are     somewhat optimistic if we cannot prove that the memory blocks are      overlapping.     That's why we return false here although this may accept operations on     overlapping memory areas.  */  if (!addr_delta || GET_CODE (addr_delta) != CONST_INT)    return false;  delta = INTVAL (addr_delta);  if (delta == 0      || (delta > 0 && delta < size)      || (delta < 0 && -delta < size))    return true;  return false;}/* Check whether the address of memory reference MEM2 equals exactly   the address of memory reference MEM1 plus DELTA.  Return true if   we can prove this to be the case, false otherwise.  */bools390_offset_p (rtx mem1, rtx mem2, rtx delta){  rtx addr1, addr2, addr_delta;  if (GET_CODE (mem1) != MEM || GET_CODE (mem2) != MEM)    return false;  addr1 = XEXP (mem1, 0);  addr2 = XEXP (mem2, 0);  addr_delta = simplify_binary_operation (MINUS, Pmode, addr2, addr1);  if (!addr_delta || !rtx_equal_p (addr_delta, delta))    return false;  return true;}/* Expand logical operator CODE in mode MODE with operands OPERANDS.  */voids390_expand_logical_operator (enum rtx_code code, enum machine_mode mode,			      rtx *operands){  enum machine_mode wmode = mode;  rtx dst = operands[0];  rtx src1 = operands[1];  rtx src2 = operands[2];  rtx op, clob, tem;  /* If we cannot handle the operation directly, use a temp register.  */  if (!s390_logical_operator_ok_p (operands))    dst = gen_reg_rtx (mode);  /* QImode and HImode patterns make sense only if we have a destination     in memory.  Otherwise perform the operation in SImode.  */  if ((mode == QImode || mode == HImode) && GET_CODE (dst) != MEM)    wmode = SImode;  /* Widen operands if required.  */  if (mode != wmode)    {      if (GET_CODE (dst) == SUBREG	  && (tem = simplify_subreg (wmode, dst, mode, 0)) != 0)	dst = tem;      else if (REG_P (dst))	dst = gen_rtx_SUBREG (wmode, dst, 0);      else        dst = gen_reg_rtx (wmode);      if (GET_CODE (src1) == SUBREG	  && (tem = simplify_subreg (wmode, src1, mode, 0)) != 0)	src1 = tem;      else if (GET_MODE (src1) != VOIDmode)	src1 = gen_rtx_SUBREG (wmode, force_reg (mode, src1), 0);      if (GET_CODE (src2) == SUBREG	  && (tem = simplify_subreg (wmode, src2, mode, 0)) != 0)	src2 = tem;      else if (GET_MODE (src2) != VOIDmode)	src2 = gen_rtx_SUBREG (wmode, force_reg (mode, src2), 0);    }  /* Emit the instruction.  */  op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, wmode, src1, src2));  clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));  emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));  /* Fix up the destination if needed.  */  if (dst != operands[0])    emit_move_insn (operands[0], gen_lowpart (mode, dst));}/* Check whether OPERANDS are OK for a logical operation (AND, IOR, XOR).  */bools390_logical_operator_ok_p (rtx *operands){  /* If the destination operand is in memory, it needs to coincide     with one of the source operands.  After reload, it has to be     the first source operand.  */  if (GET_CODE (operands[0]) == MEM)    return rtx_equal_p (operands[0], operands[1])	   || (!reload_completed && rtx_equal_p (operands[0], operands[2]));  return true;}/* Narrow logical operation CODE of memory operand MEMOP with immediate   operand IMMOP to switch from SS to SI type instructions.  */voids390_narrow_logical_operator (enum rtx_code code, rtx *memop, rtx *immop){  int def = code == AND ? -1 : 0;  HOST_WIDE_INT mask;  int part;  gcc_assert (GET_CODE (*memop) == MEM);  gcc_assert (!MEM_VOLATILE_P (*memop));  mask = s390_extract_part (*immop, QImode, def);  part = s390_single_part (*immop, GET_MODE (*memop), QImode, def);  gcc_assert (part >= 0);  *memop = adjust_address (*memop, QImode, part);  *immop = gen_int_mode (mask, QImode);}/* How to allocate a 'struct machine_function'.  */static struct machine_function *s390_init_machine_status (void){  return ggc_alloc_cleared (sizeof (struct machine_function));}/* Change optimizations to be performed, depending on the   optimization level.   LEVEL is the optimization level specified; 2 if `-O2' is   specified, 1 if `-O' is specified, and 0 if neither is specified.   SIZE is nonzero if `-Os' is specified and zero otherwise.  */voidoptimization_options (int level ATTRIBUTE_UNUSED, int size ATTRIBUTE_UNUSED){  /* ??? There are apparently still problems with -fcaller-saves.  */  flag_caller_saves = 0;  /* By default, always emit DWARF-2 unwind info.  This allows debugging     without maintaining a stack frame back-chain.  */  flag_asynchronous_unwind_tables = 1;  /* Use MVCLE instructions to decrease code size if requested.  */  if (size != 0)    target_flags |= MASK_MVCLE;}/* Return true if ARG is the name of a processor.  Set *TYPE and *FLAGS   to the associated processor_type and processor_flags if so.  */static bools390_handle_arch_option (const char *arg,			 enum processor_type *type,			 enum processor_flags *flags){  static struct pta    {      const char *const name;		/* processor name or nickname.  */      const enum processor_type processor;      const enum processor_flags flags;    }  const processor_alias_table[] =    {      {"g5", PROCESSOR_9672_G5, PF_IEEE_FLOAT},      {"g6", PROCESSOR_9672_G6, PF_IEEE_FLOAT},      {"z900", PROCESSOR_2064_Z900, PF_IEEE_FLOAT | PF_ZARCH},      {"z990", PROCESSOR_2084_Z990, PF_IEEE_FLOAT | PF_ZARCH				    | PF_LONG_DISPLACEMENT},      {"z9-109", PROCESSOR_2094_Z9_109, PF_IEEE_FLOAT | PF_ZARCH                                       | PF_LONG_DISPLACEMENT | PF_EXTIMM},    };  size_t i;  for (i = 0; i < ARRAY_SIZE (processor_alias_table); i++)    if (strcmp (arg, processor_alias_table[i].name) == 0)      {	*type = processor_alias_table[i].processor;	*flags = processor_alias_table[i].flags;	return true;      }  return false;}/* Implement TARGET_HANDLE_OPTION.  */static bools390_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED){  switch (code)    {    case OPT_march_:      return s390_handle_arch_option (arg, &s390_arch, &s390_arch_flags);    case OPT_mstack_guard_:      if (sscanf (arg, HOST_WIDE_INT_PRINT_DEC, &s390_stack_guard) != 1)	return false;      if (exact_log2 (s390_stack_guard) == -1)	error ("stack guard value must be an exact power of 2");      return true;    case OPT_mstack_size_:      if (sscanf (arg, HOST_WIDE_INT_PRINT_DEC, &s390_stack_size) != 1)	return false;      if (exact_log2 (s390_stack_size) == -1)	error ("stack size must be an exact power of 2");      return true;    case OPT_mtune_:      return s390_handle_arch_option (arg, &s390_tune, &s390_tune_flags);    case OPT_mwarn_framesize_:      return sscanf (arg, HOST_WIDE_INT_PRINT_DEC, &s390_warn_framesize) == 1;    default:      return true;    }}voidoverride_options (void){  /* Set up function hooks.  */  init_machine_status = s390_init_machine_status;  /* Architecture mode defaults according to ABI.  */  if (!(target_flags_explicit & MASK_ZARCH))    {      if (TARGET_64BIT)	target_flags |= MASK_ZARCH;      else	target_flags &= ~MASK_ZARCH;    }  /* Determine processor architectural level.  */  if (!s390_arch_string)    {      s390_arch_string = TARGET_ZARCH? "z900" : "g5";      s390_handle_arch_option (s390_arch_string, &s390_arch, &s390_arch_flags);    }  /* Determine processor to tune for.  */  if (s390_tune == PROCESSOR_max)    {      s390_tune = s390_arch;      s390_tune_flags = s390_arch_flags;    }  /* Sanity checks.  */  if (TARGET_ZARCH && !(s390_arch_flags & PF_ZARCH))    error ("z/Architecture mode not supported on %s", s390_arch_string);  if (TARGET_64BIT && !TARGET_ZARCH)    error ("64-bit ABI not supported in ESA/390 mode");  /* Set processor cost function.  */  if (s390_tune == PROCESSOR_2094_Z9_109)    s390_cost = &z9_109_cost;  else if (s390_tune == PROCESSOR_2084_Z990)    s390_cost = &z990_cost;  else    s390_cost = &z900_cost;    if (TARGET_BACKCHAIN && TARGET_PACKED_STACK && TARGET_HARD_FLOAT)    error ("-mbackchain -mpacked-stack -mhard-float are not supported "	   "in combination");  if (s390_stack_size)    {      if (!s390_stack_guard)	error ("-mstack-size implies use of -mstack-guard");      else if (s390_stack_guard >= s390_stack_size)	error ("stack size must be greater than the stack guard value");      else if (s390_stack_size > 1 << 16)	error ("stack size must not be greater than 64k");    }  else if (s390_stack_guard)    error ("-mstack-guard implies use of -mstack-size"); }/* Map for smallest class containing reg regno.  */const enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] ={ GENERAL_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,  ADDR_REGS,    ADDR_REGS, ADDR_REGS, ADDR_REGS,  ADDR_REGS,    ADDR_REGS, ADDR_REGS, ADDR_REGS,  ADDR_REGS,    ADDR_REGS, ADDR_REGS, ADDR_REGS,  FP_REGS,      FP_REGS,   FP_REGS,   FP_REGS,  FP_REGS,      FP_REGS,   FP_REGS,   FP_REGS,  FP_REGS,      FP_REGS,   FP_REGS,   FP_REGS,  FP_REGS,      FP_REGS,   FP_REGS,   FP_REGS,  ADDR_REGS,    CC_REGS,   ADDR_REGS, ADDR_REGS,  ACCESS_REGS,	ACCESS_REGS};/* Return attribute type of insn.  */static enum attr_types390_safe_attr_type (rtx insn){  if (recog_memoized (insn) >= 0)    return get_attr_type (insn);  else    return TYPE_NONE;}/* Return true if DISP is a valid short displacement.  */static bools390_short_displacement (rtx disp){  /* No displacement is OK.  */  if (!disp)    return true;  /* Integer displacement in range.  */  if (GET_CODE (disp) == CONST_INT)    return INTVAL (disp) >= 0 && INTVAL (disp) < 4096;  /* GOT offset is not OK, the GOT can be large.  */  if (GET_CODE (disp) == CONST      && GET_CODE (XEXP (disp, 0)) == UNSPEC      && (XINT (XEXP (disp, 0), 1) == UNSPEC_GOT          || XINT (XEXP (disp, 0), 1) == UNSPEC_GOTNTPOFF))    return false;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -