⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 xtensa.c

📁 gcc3.2.1源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
  if (reverse_regs)    {      rtx temp = cmp0;      cmp0 = cmp1;      cmp1 = temp;    }  brtmp = gen_rtx_REG (CCmode, FPCC_REGNUM);  emit_insn (gen_fn (brtmp, cmp0, cmp1));  return gen_rtx (invert ? EQ : NE, VOIDmode, brtmp, const0_rtx);}voidxtensa_expand_conditional_branch (operands, test_code)     rtx *operands;     enum rtx_code test_code;{  enum cmp_type type = branch_type;  rtx cmp0 = branch_cmp[0];  rtx cmp1 = branch_cmp[1];  rtx cmp;  int invert;  rtx label1, label2;  switch (type)    {    case CMP_DF:    default:      fatal_insn ("bad test", gen_rtx (test_code, VOIDmode, cmp0, cmp1));    case CMP_SI:      invert = FALSE;      cmp = gen_int_relational (test_code, cmp0, cmp1, &invert);      break;    case CMP_SF:      if (!TARGET_HARD_FLOAT)	fatal_insn ("bad test", gen_rtx (test_code, VOIDmode, cmp0, cmp1));      invert = FALSE;      cmp = gen_float_relational (test_code, cmp0, cmp1);      break;    }  /* Generate the branch.  */  label1 = gen_rtx_LABEL_REF (VOIDmode, operands[0]);  label2 = pc_rtx;  if (invert)    {      label2 = label1;      label1 = pc_rtx;    }  emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,			       gen_rtx_IF_THEN_ELSE (VOIDmode, cmp,						     label1,						     label2)));}static rtxgen_conditional_move (cmp)     rtx cmp;{  enum rtx_code code = GET_CODE (cmp);  rtx op0 = branch_cmp[0];  rtx op1 = branch_cmp[1];  if (branch_type == CMP_SI)    {      /* Jump optimization calls get_condition() which canonicalizes	 comparisons like (GE x <const>) to (GT x <const-1>).	 Transform those comparisons back to GE, since that is the	 comparison supported in Xtensa.  We shouldn't have to	 transform <LE x const> comparisons, because neither	 xtensa_expand_conditional_branch() nor get_condition() will	 produce them. */      if ((code == GT) && (op1 == constm1_rtx))	{	  code = GE;	  op1 = const0_rtx;	}      cmp = gen_rtx (code, VOIDmode, cc0_rtx, const0_rtx);      if (boolean_operator (cmp, VOIDmode))	{	  /* swap the operands to make const0 second */	  if (op0 == const0_rtx)	    {	      op0 = op1;	      op1 = const0_rtx;	    }	  /* if not comparing against zero, emit a comparison (subtract) */	  if (op1 != const0_rtx)	    {	      op0 = expand_binop (SImode, sub_optab, op0, op1,				  0, 0, OPTAB_LIB_WIDEN);	      op1 = const0_rtx;	    }	}      else if (branch_operator (cmp, VOIDmode))	{	  /* swap the operands to make const0 second */	  if (op0 == const0_rtx)	    {	      op0 = op1;	      op1 = const0_rtx;	      switch (code)		{		case LT: code = GE; break;		case GE: code = LT; break;		default: abort ();		}	    }	  if (op1 != const0_rtx)	    return 0;	}      else	return 0;      return gen_rtx (code, VOIDmode, op0, op1);    }  if (TARGET_HARD_FLOAT && (branch_type == CMP_SF))    return gen_float_relational (code, op0, op1);  return 0;}intxtensa_expand_conditional_move (operands, isflt)    rtx *operands;    int isflt;{  rtx cmp;  rtx (*gen_fn) PARAMS ((rtx, rtx, rtx, rtx, rtx));  if (!(cmp = gen_conditional_move (operands[1])))    return 0;  if (isflt)    gen_fn = (branch_type == CMP_SI	      ? gen_movsfcc_internal0	      : gen_movsfcc_internal1);  else    gen_fn = (branch_type == CMP_SI	      ? gen_movsicc_internal0	      : gen_movsicc_internal1);  emit_insn (gen_fn (operands[0], XEXP (cmp, 0),		     operands[2], operands[3], cmp));  return 1;}intxtensa_expand_scc (operands)     rtx *operands;{  rtx dest = operands[0];  rtx cmp = operands[1];  rtx one_tmp, zero_tmp;  rtx (*gen_fn) PARAMS ((rtx, rtx, rtx, rtx, rtx));  if (!(cmp = gen_conditional_move (cmp)))    return 0;  one_tmp = gen_reg_rtx (SImode);  zero_tmp = gen_reg_rtx (SImode);  emit_insn (gen_movsi (one_tmp, const_true_rtx));  emit_insn (gen_movsi (zero_tmp, const0_rtx));  gen_fn = (branch_type == CMP_SI	    ? gen_movsicc_internal0	    : gen_movsicc_internal1);  emit_insn (gen_fn (dest, XEXP (cmp, 0), one_tmp, zero_tmp, cmp));  return 1;}/* Emit insns to move operands[1] into operands[0].   Return 1 if we have written out everything that needs to be done to   do the move.  Otherwise, return 0 and the caller will emit the move   normally.  */intxtensa_emit_move_sequence (operands, mode)     rtx *operands;     enum machine_mode mode;{  if (CONSTANT_P (operands[1])      && GET_CODE (operands[1]) != CONSTANT_P_RTX      && (GET_CODE (operands[1]) != CONST_INT	  || !xtensa_simm12b (INTVAL (operands[1]))))    {      xtensa_load_constant (operands[0], operands[1]);      return 1;    }  if (!(reload_in_progress | reload_completed))    {      if (!xtensa_valid_move (mode, operands))	operands[1] = force_reg (mode, operands[1]);      /* Check if this move is copying an incoming argument in a7.  If	 so, emit the move, followed by the special "set_frame_ptr"	 unspec_volatile insn, at the very beginning of the function.	 This is necessary because the register allocator will ignore	 conflicts with a7 and may assign some other pseudo to a7.  If	 that pseudo was assigned prior to this move, it would clobber	 the incoming argument in a7.  By copying the argument out of	 a7 as the very first thing, and then immediately following	 that with an unspec_volatile to keep the scheduler away, we	 should avoid any problems.  */      if (a7_overlap_mentioned_p (operands[1]))	{	  rtx mov;	  switch (mode)	    {	    case SImode:	      mov = gen_movsi_internal (operands[0], operands[1]);	      break;	    case HImode:	      mov = gen_movhi_internal (operands[0], operands[1]);	      break;	    case QImode:	      mov = gen_movqi_internal (operands[0], operands[1]);	      break;	    default:	      abort ();	    }	  /* Insert the instructions before any other argument copies.	     (The set_frame_ptr insn comes _after_ the move, so push it	     out first.)  */	  push_topmost_sequence ();	  emit_insn_after (gen_set_frame_ptr (), get_insns ());	  emit_insn_after (mov, get_insns ());	  pop_topmost_sequence ();	  return 1;	}    }  /* During reload we don't want to emit (subreg:X (mem:Y)) since that     instruction won't be recognized after reload. So we remove the     subreg and adjust mem accordingly. */  if (reload_in_progress)    {      operands[0] = fixup_subreg_mem (operands[0]);      operands[1] = fixup_subreg_mem (operands[1]);    }  return 0;}static rtxfixup_subreg_mem (x)     rtx x;{  if (GET_CODE (x) == SUBREG      && GET_CODE (SUBREG_REG (x)) == REG      && REGNO (SUBREG_REG (x)) >= FIRST_PSEUDO_REGISTER)    {      rtx temp =	gen_rtx_SUBREG (GET_MODE (x),			reg_equiv_mem [REGNO (SUBREG_REG (x))],			SUBREG_BYTE (x));      x = alter_subreg (&temp);    }  return x;}/* Try to expand a block move operation to an RTL block move instruction.   If not optimizing or if the block size is not a constant or if the   block is small, the expansion fails and GCC falls back to calling   memcpy().   operands[0] is the destination   operands[1] is the source   operands[2] is the length   operands[3] is the alignment */intxtensa_expand_block_move (operands)     rtx *operands;{  rtx dest = operands[0];  rtx src = operands[1];  int bytes = INTVAL (operands[2]);  int align = XINT (operands[3], 0);  int num_pieces, move_ratio;  /* If this is not a fixed size move, just call memcpy */  if (!optimize || (GET_CODE (operands[2]) != CONST_INT))    return 0;  /* Anything to move? */  if (bytes <= 0)    return 1;  if (align > MOVE_MAX)    align = MOVE_MAX;  /* decide whether to expand inline based on the optimization level */  move_ratio = 4;  if (optimize > 2)    move_ratio = LARGEST_MOVE_RATIO;  num_pieces = (bytes / align) + (bytes % align); /* close enough anyway */  if (num_pieces >= move_ratio)    return 0;   /* make sure the memory addresses are valid */  operands[0] = validize_mem (dest);  operands[1] = validize_mem (src);  emit_insn (gen_movstrsi_internal (operands[0], operands[1],				    operands[2], operands[3]));  return 1;}/*  Emit a sequence of instructions to implement a block move, trying    to hide load delay slots as much as possible.  Load N values into    temporary registers, store those N values, and repeat until the    complete block has been moved.  N=delay_slots+1 */struct meminsnbuf {  char template[30];  rtx operands[2];};voidxtensa_emit_block_move (operands, tmpregs, delay_slots)     rtx *operands;     rtx *tmpregs;     int delay_slots;{  rtx dest = operands[0];  rtx src = operands[1];  int bytes = INTVAL (operands[2]);  int align = XINT (operands[3], 0);  rtx from_addr = XEXP (src, 0);  rtx to_addr = XEXP (dest, 0);  int from_struct = MEM_IN_STRUCT_P (src);  int to_struct = MEM_IN_STRUCT_P (dest);  int offset = 0;  int chunk_size, item_size;  struct meminsnbuf *ldinsns, *stinsns;  const char *ldname, *stname;  enum machine_mode mode;  if (align > MOVE_MAX)    align = MOVE_MAX;  item_size = align;  chunk_size = delay_slots + 1;  ldinsns = (struct meminsnbuf *)    alloca (chunk_size * sizeof (struct meminsnbuf));  stinsns = (struct meminsnbuf *)    alloca (chunk_size * sizeof (struct meminsnbuf));  mode = xtensa_find_mode_for_size (item_size);  item_size = GET_MODE_SIZE (mode);  ldname = xtensa_ld_opcodes[(int) mode];  stname = xtensa_st_opcodes[(int) mode];  while (bytes > 0)    {      int n;      for (n = 0; n < chunk_size; n++)	{	  rtx addr, mem;	  if (bytes == 0)	    {	      chunk_size = n;	      break;	    }	  if (bytes < item_size)	    {	      /* find a smaller item_size which we can load & store */	      item_size = bytes;	      mode = xtensa_find_mode_for_size (item_size);	      item_size = GET_MODE_SIZE (mode);	      ldname = xtensa_ld_opcodes[(int) mode];	      stname = xtensa_st_opcodes[(int) mode];	    }	  /* record the load instruction opcode and operands */	  addr = plus_constant (from_addr, offset);	  mem = gen_rtx_MEM (mode, addr);	  if (! memory_address_p (mode, addr))	    abort ();	  MEM_IN_STRUCT_P (mem) = from_struct;	  ldinsns[n].operands[0] = tmpregs[n];	  ldinsns[n].operands[1] = mem;	  sprintf (ldinsns[n].template, "%s\t%%0, %%1", ldname);	  /* record the store instruction opcode and operands */	  addr = plus_constant (to_addr, offset);	  mem = gen_rtx_MEM (mode, addr);	  if (! memory_address_p (mode, addr))	    abort ();	  MEM_IN_STRUCT_P (mem) = to_struct;	  stinsns[n].operands[0] = tmpregs[n];	  stinsns[n].operands[1] = mem;	  sprintf (stinsns[n].template, "%s\t%%0, %%1", stname);	  offset += item_size;	  bytes -= item_size;	}      /* now output the loads followed by the stores */      for (n = 0; n < chunk_size; n++)	output_asm_insn (ldinsns[n].template, ldinsns[n].operands);      for (n = 0; n < chunk_size; n++)	output_asm_insn (stinsns[n].template, stinsns[n].operands);    }}static enum machine_modextensa_find_mode_for_size (item_size)     unsigned item_size;{  enum machine_mode mode, tmode;  while (1)    {      mode = VOIDmode;      /* find mode closest to but not bigger than item_size */      for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT);	   tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode))	if (GET_MODE_SIZE (tmode) <= item_size)	  mode = tmode;      if (mode == VOIDmode)	abort ();      item_size = GET_MODE_SIZE (mode);      if (xtensa_ld_opcodes[(int) mode]	  && xtensa_st_opcodes[(int) mode])	break;      /* cannot load & store this mode; try something smaller */      item_size -= 1;    }  return mode;}voidxtensa_expand_nonlocal_goto (operands)     rtx *operands;{  rtx goto_handler = operands[1];  rtx containing_fp = operands[3];  /* generate a call to "__xtensa_nonlocal_goto" (in libgcc); the code     is too big to generate in-line */  if (GET_CODE (containing_fp) != REG)    containing_fp = force_reg (Pmode, containing_fp);  goto_handler = replace_rtx (copy_rtx (goto_handler),			      virtual_stack_vars_rtx,			      containing_fp);  emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__xtensa_nonlocal_goto"),		     0, VOIDmode, 2,		     containing_fp, Pmode,		     goto_handler, Pmode);}static voidxtensa_init_machine_status (p)     struct function *p;{  p->machine = (struct machine_function *)    xcalloc (1, sizeof (struct machine_function));}static voidxtensa_free_machine_status (p)     struct function *p;{  free (p->machine);  p->machine = NULL;}voidxtensa_setup_frame_addresses (){  /* Set flag to cause FRAME_POINTER_REQUIRED to be set. */  cfun->machine->accesses_prev_frame = 1;  emit_library_call    (gen_rtx_SYMBOL_REF (Pmode, "__xtensa_libgcc_window_spill"),     0, VOIDmode, 0);}/* Emit the assembly for the end of a zero-cost loop. Normally we just emit   a comment showing where the end of the loop is. However, if there is a   label or a branch at the end of the loop then we need to place a nop   there. If the loop ends with a label we need the nop so that branches

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -