⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 xtensa.c

📁 linux下的gcc编译器
💻 C
📖 第 1 页 / 共 5 页
字号:
     rtx cmp1;			/* second operand to compare */{  rtx (*gen_fn) PARAMS ((rtx, rtx, rtx));  rtx brtmp;  int reverse_regs, invert;  switch (test_code)    {    case EQ: reverse_regs = 0; invert = 0; gen_fn = gen_seq_sf; break;    case NE: reverse_regs = 0; invert = 1; gen_fn = gen_seq_sf; break;    case LE: reverse_regs = 0; invert = 0; gen_fn = gen_sle_sf; break;    case GT: reverse_regs = 1; invert = 0; gen_fn = gen_slt_sf; break;    case LT: reverse_regs = 0; invert = 0; gen_fn = gen_slt_sf; break;    case GE: reverse_regs = 1; invert = 0; gen_fn = gen_sle_sf; break;    default:       fatal_insn ("bad test", gen_rtx (test_code, VOIDmode, cmp0, cmp1));      reverse_regs = 0; invert = 0; gen_fn = 0; /* avoid compiler warnings */    }  if (reverse_regs)    {      rtx temp = cmp0;      cmp0 = cmp1;      cmp1 = temp;    }  brtmp = gen_rtx_REG (CCmode, FPCC_REGNUM);  emit_insn (gen_fn (brtmp, cmp0, cmp1));  return gen_rtx (invert ? EQ : NE, VOIDmode, brtmp, const0_rtx);}voidxtensa_expand_conditional_branch (operands, test_code)     rtx *operands;     enum rtx_code test_code;{  enum cmp_type type = branch_type;  rtx cmp0 = branch_cmp[0];  rtx cmp1 = branch_cmp[1];  rtx cmp;  int invert;  rtx label1, label2;  switch (type)    {    case CMP_DF:    default:      fatal_insn ("bad test", gen_rtx (test_code, VOIDmode, cmp0, cmp1));    case CMP_SI:      invert = FALSE;      cmp = gen_int_relational (test_code, cmp0, cmp1, &invert);      break;    case CMP_SF:      if (!TARGET_HARD_FLOAT)	fatal_insn ("bad test", gen_rtx (test_code, VOIDmode, cmp0, cmp1));      invert = FALSE;      cmp = gen_float_relational (test_code, cmp0, cmp1);      break;    }  /* Generate the branch.  */  label1 = gen_rtx_LABEL_REF (VOIDmode, operands[0]);  label2 = pc_rtx;  if (invert)    {      label2 = label1;      label1 = pc_rtx;    }  emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,			       gen_rtx_IF_THEN_ELSE (VOIDmode, cmp,						     label1,						     label2)));}static rtxgen_conditional_move (cmp)     rtx cmp;{  enum rtx_code code = GET_CODE (cmp);  rtx op0 = branch_cmp[0];  rtx op1 = branch_cmp[1];  if (branch_type == CMP_SI)    {      /* Jump optimization calls get_condition() which canonicalizes	 comparisons like (GE x <const>) to (GT x <const-1>).	 Transform those comparisons back to GE, since that is the	 comparison supported in Xtensa.  We shouldn't have to	 transform <LE x const> comparisons, because neither	 xtensa_expand_conditional_branch() nor get_condition() will	 produce them. */      if ((code == GT) && (op1 == constm1_rtx))	{	  code = GE;	  op1 = const0_rtx;	}      cmp = gen_rtx (code, VOIDmode, cc0_rtx, const0_rtx);      if (boolean_operator (cmp, VOIDmode))	{	  /* swap the operands to make const0 second */	  if (op0 == const0_rtx)	    {	      op0 = op1;	      op1 = const0_rtx;	    }	  /* if not comparing against zero, emit a comparison (subtract) */	  if (op1 != const0_rtx)	    {	      op0 = expand_binop (SImode, sub_optab, op0, op1,				  0, 0, OPTAB_LIB_WIDEN);	      op1 = const0_rtx;	    }	}      else if (branch_operator (cmp, VOIDmode))	{	  /* swap the operands to make const0 second */	  if (op0 == const0_rtx)	    {	      op0 = op1;	      op1 = const0_rtx;	      switch (code)		{		case LT: code = GE; break;		case GE: code = LT; break;		default: abort ();		}	    }	  if (op1 != const0_rtx)	    return 0;	}      else	return 0;      return gen_rtx (code, VOIDmode, op0, op1);    }  if (TARGET_HARD_FLOAT && (branch_type == CMP_SF))    return gen_float_relational (code, op0, op1);  return 0;}intxtensa_expand_conditional_move (operands, isflt)    rtx *operands;    int isflt;{  rtx cmp;  rtx (*gen_fn) PARAMS ((rtx, rtx, rtx, rtx, rtx));  if (!(cmp = gen_conditional_move (operands[1])))    return 0;  if (isflt)    gen_fn = (branch_type == CMP_SI	      ? gen_movsfcc_internal0	      : gen_movsfcc_internal1);  else    gen_fn = (branch_type == CMP_SI	      ? gen_movsicc_internal0	      : gen_movsicc_internal1);  emit_insn (gen_fn (operands[0], XEXP (cmp, 0),		     operands[2], operands[3], cmp));  return 1;}intxtensa_expand_scc (operands)     rtx *operands;{  rtx dest = operands[0];  rtx cmp = operands[1];  rtx one_tmp, zero_tmp;  rtx (*gen_fn) PARAMS ((rtx, rtx, rtx, rtx, rtx));  if (!(cmp = gen_conditional_move (cmp)))    return 0;  one_tmp = gen_reg_rtx (SImode);  zero_tmp = gen_reg_rtx (SImode);  emit_insn (gen_movsi (one_tmp, const_true_rtx));  emit_insn (gen_movsi (zero_tmp, const0_rtx));  gen_fn = (branch_type == CMP_SI	    ? gen_movsicc_internal0	    : gen_movsicc_internal1);  emit_insn (gen_fn (dest, XEXP (cmp, 0), one_tmp, zero_tmp, cmp));  return 1;}/* Emit insns to move operands[1] into operands[0].   Return 1 if we have written out everything that needs to be done to   do the move.  Otherwise, return 0 and the caller will emit the move   normally.  */intxtensa_emit_move_sequence (operands, mode)     rtx *operands;     enum machine_mode mode;{  if (CONSTANT_P (operands[1])      && GET_CODE (operands[1]) != CONSTANT_P_RTX      && (GET_CODE (operands[1]) != CONST_INT	  || !xtensa_simm12b (INTVAL (operands[1]))))    {      xtensa_load_constant (operands[0], operands[1]);      return 1;    }  if (!(reload_in_progress | reload_completed))    {      if (!xtensa_valid_move (mode, operands))	operands[1] = force_reg (mode, operands[1]);      operands[1] = xtensa_copy_incoming_a7 (operands[1]);    }  /* During reload we don't want to emit (subreg:X (mem:Y)) since that     instruction won't be recognized after reload. So we remove the     subreg and adjust mem accordingly. */  if (reload_in_progress)    {      operands[0] = fixup_subreg_mem (operands[0]);      operands[1] = fixup_subreg_mem (operands[1]);    }  return 0;}static rtxfixup_subreg_mem (x)     rtx x;{  if (GET_CODE (x) == SUBREG      && GET_CODE (SUBREG_REG (x)) == REG      && REGNO (SUBREG_REG (x)) >= FIRST_PSEUDO_REGISTER)    {      rtx temp =	gen_rtx_SUBREG (GET_MODE (x),			reg_equiv_mem [REGNO (SUBREG_REG (x))],			SUBREG_BYTE (x));      x = alter_subreg (&temp);    }  return x;}/* Check if an incoming argument in a7 is expected to be used soon and   if OPND is a register or register pair that includes a7.  If so,   create a new pseudo and copy a7 into that pseudo at the very   beginning of the function, followed by the special "set_frame_ptr"   unspec_volatile insn.  The return value is either the original   operand, if it is not a7, or the new pseudo containing a copy of   the incoming argument.  This is necessary because the register   allocator will ignore conflicts with a7 and may either assign some   other pseudo to a7 or use a7 as the hard_frame_pointer, clobbering   the incoming argument in a7.  By copying the argument out of a7 as   the very first thing, and then immediately following that with an   unspec_volatile to keep the scheduler away, we should avoid any   problems.  Putting the set_frame_ptr insn at the beginning, with   only the a7 copy before it, also makes it easier for the prologue   expander to initialize the frame pointer after the a7 copy and to   fix up the a7 copy to use the stack pointer instead of the frame   pointer.  */rtxxtensa_copy_incoming_a7 (opnd)     rtx opnd;{  rtx entry_insns = 0;  rtx reg, tmp;  enum machine_mode mode;  if (!cfun->machine->need_a7_copy)    return opnd;  /* This function should never be called again once a7 has been copied.  */  if (cfun->machine->set_frame_ptr_insn)    abort ();  mode = GET_MODE (opnd);  /* The operand using a7 may come in a later instruction, so just return     the original operand if it doesn't use a7.  */  reg = opnd;  if (GET_CODE (reg) == SUBREG)    {      if (SUBREG_BYTE (reg) != 0)	abort ();      reg = SUBREG_REG (reg);    }  if (GET_CODE (reg) != REG      || REGNO (reg) > A7_REG      || REGNO (reg) + HARD_REGNO_NREGS (A7_REG, mode) <= A7_REG)    return opnd;  /* 1-word args will always be in a7; 2-word args in a6/a7.  */  if (REGNO (reg) + HARD_REGNO_NREGS (A7_REG, mode) - 1 != A7_REG)    abort ();  cfun->machine->need_a7_copy = false;  /* Copy a7 to a new pseudo at the function entry.  Use gen_raw_REG to     create the REG for a7 so that hard_frame_pointer_rtx is not used.  */  push_to_sequence (entry_insns);  tmp = gen_reg_rtx (mode);  switch (mode)    {    case DFmode:    case DImode:      emit_insn (gen_movsi_internal (gen_rtx_SUBREG (SImode, tmp, 0),				     gen_rtx_REG (SImode, A7_REG - 1)));      emit_insn (gen_movsi_internal (gen_rtx_SUBREG (SImode, tmp, 4),				     gen_raw_REG (SImode, A7_REG)));      break;    case SFmode:      emit_insn (gen_movsf_internal (tmp, gen_raw_REG (mode, A7_REG)));      break;    case SImode:      emit_insn (gen_movsi_internal (tmp, gen_raw_REG (mode, A7_REG)));      break;    case HImode:      emit_insn (gen_movhi_internal (tmp, gen_raw_REG (mode, A7_REG)));      break;    case QImode:      emit_insn (gen_movqi_internal (tmp, gen_raw_REG (mode, A7_REG)));      break;    default:      abort ();    }  cfun->machine->set_frame_ptr_insn = emit_insn (gen_set_frame_ptr ());  entry_insns = get_insns ();  end_sequence ();  if (cfun->machine->vararg_a7)    {      /* This is called from within builtin_savereg, so we're already	 inside a start_sequence that will be placed at the start of	 the function.  */      emit_insn (entry_insns);    }  else    {      /* Put entry_insns after the NOTE that starts the function.  If	 this is inside a start_sequence, make the outer-level insn	 chain current, so the code is placed at the start of the	 function.  */      push_topmost_sequence ();      emit_insn_after (entry_insns, get_insns ());      pop_topmost_sequence ();    }  return tmp;}/* Try to expand a block move operation to an RTL block move instruction.   If not optimizing or if the block size is not a constant or if the   block is small, the expansion fails and GCC falls back to calling   memcpy().   operands[0] is the destination   operands[1] is the source   operands[2] is the length   operands[3] is the alignment */intxtensa_expand_block_move (operands)     rtx *operands;{  rtx dest = operands[0];  rtx src = operands[1];  int bytes = INTVAL (operands[2]);  int align = XINT (operands[3], 0);  int num_pieces, move_ratio;  /* If this is not a fixed size move, just call memcpy */  if (!optimize || (GET_CODE (operands[2]) != CONST_INT))    return 0;  /* Anything to move? */  if (bytes <= 0)    return 1;  if (align > MOVE_MAX)    align = MOVE_MAX;  /* decide whether to expand inline based on the optimization level */  move_ratio = 4;  if (optimize > 2)    move_ratio = LARGEST_MOVE_RATIO;  num_pieces = (bytes / align) + (bytes % align); /* close enough anyway */  if (num_pieces >= move_ratio)    return 0;  /* make sure the memory addresses are valid */  operands[0] = validize_mem (dest);  operands[1] = validize_mem (src);  emit_insn (gen_movstrsi_internal (operands[0], operands[1],				    operands[2], operands[3]));  return 1;}/*  Emit a sequence of instructions to implement a block move, trying    to hide load delay slots as much as possible.  Load N values into    temporary registers, store those N values, and repeat until the    complete block has been moved.  N=delay_slots+1 */struct meminsnbuf {  char template[30];  rtx operands[2];};voidxtensa_emit_block_move (operands, tmpregs, delay_slots)     rtx *operands;     rtx *tmpregs;     int delay_slots;{  rtx dest = operands[0];  rtx src = operands[1];  int bytes = INTVAL (operands[2]);  int align = XINT (operands[3], 0);  rtx from_addr = XEXP (src, 0);  rtx to_addr = XEXP (dest, 0);  int from_struct = MEM_IN_STRUCT_P (src);  int to_struct = MEM_IN_STRUCT_P (dest);  int offset = 0;  int chunk_size, item_size;  struct meminsnbuf *ldinsns, *stinsns;  const char *ldname, *stname;  enum machine_mode mode;  if (align > MOVE_MAX)    align = MOVE_MAX;  item_size = align;  chunk_size = delay_slots + 1;  ldinsns = (struct meminsnbuf *)    alloca (chunk_size * sizeof (struct meminsnbuf));  stinsns = (struct meminsnbuf *)    alloca (chunk_size * sizeof (struct meminsnbuf));  mode = xtensa_find_mode_for_size (item_size);  item_size = GET_MODE_SIZE (mode);  ldname = xtensa_ld_opcodes[(int) mode];  stname = xtensa_st_opcodes[(int) mode];  while (bytes > 0)    {      int n;      for (n = 0; n < chunk_size; n++)	{	  rtx addr, mem;	  if (bytes == 0)	    {	      chunk_size = n;	      break;	    }	  if (bytes < item_size)	    {	      /* find a smaller item_size which we can load & store */	      item_size = bytes;	      mode = xtensa_find_mode_for_size (item_size);	      item_size = GET_MODE_SIZE (mode);	      ldname = xtensa_ld_opcodes[(int) mode];	      stname = xtensa_st_opcodes[(int) mode];	    }	  /* record the load instruction opcode and operands */	  addr = plus_constant (from_addr, offset);	  mem = gen_rtx_MEM (mode, addr);	  if (! memory_address_p (mode, addr))	    abort ();	  MEM_IN_STRUCT_P (mem) = from_struct;	  ldinsns[n].operands[0] = tmpregs[n];	  ldinsns[n].operands[1] = mem;	  sprintf (ldinsns[n].template, "%s\t%%0, %%1", ldname);	  /* record the store instruction opcode and operands */	  addr = plus_constant (to_addr, offset);	  mem = gen_rtx_MEM (mode, addr);	  if (! memory_address_p (mode, addr))	    abort ();	  MEM_IN_STRUCT_P (mem) = to_struct;	  stinsns[n].operands[0] = tmpregs[n];	  stinsns[n].operands[1] = mem;	  sprintf (stinsns[n].template, "%s\t%%0, %%1", stname);	  offset += item_size;	  bytes -= item_size;	}      /* now output the loads followed by the stores */      for (n = 0; n < chunk_size; n++)	output_asm_insn (ldinsns[n].template, ldinsns[n].operands);      for (n = 0; n < chunk_size; n++)	output_asm_insn (stinsns[n].template, stinsns[n].operands);    }

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -