⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 i386.c

📁 gcc编译工具没有什么特别
💻 C
📖 第 1 页 / 共 5 页
字号:
  if (mode != VOIDmode && mode != GET_MODE (op))    return 0;  code = GET_CODE (op);  if (GET_RTX_CLASS (code) != '<')    return 0;  return (code != GT && code != LE);}intix86_logical_operator (op, mode)     register rtx op;     enum machine_mode mode ATTRIBUTE_UNUSED;{  return GET_CODE (op) == AND || GET_CODE (op) == IOR || GET_CODE (op) == XOR;}/* Returns 1 if OP contains a symbol reference */intsymbolic_reference_mentioned_p (op)     rtx op;{  register char *fmt;  register int i;  if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)    return 1;  fmt = GET_RTX_FORMAT (GET_CODE (op));  for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)    {      if (fmt[i] == 'E')	{	  register int j;	  for (j = XVECLEN (op, i) - 1; j >= 0; j--)	    if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))	      return 1;	}      else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))	return 1;    }  return 0;}/* Attempt to expand a binary operator.  Make the expansion closer to the   actual machine, then just general_operand, which will allow 3 separate   memory references (one output, two input) in a single insn.  Return   whether the insn fails, or succeeds.  */intix86_expand_binary_operator (code, mode, operands)     enum rtx_code code;     enum machine_mode mode;     rtx operands[];{  int modified;  /* Recognize <var1> = <value> <op> <var1> for commutative operators */  if (GET_RTX_CLASS (code) == 'c'      && (rtx_equal_p (operands[0], operands[2])	  || immediate_operand (operands[1], mode)))    {      rtx temp = operands[1];      operands[1] = operands[2];      operands[2] = temp;    }  /* If optimizing, copy to regs to improve CSE */  if (TARGET_PSEUDO && optimize      && ((reload_in_progress | reload_completed) == 0))    {      if (GET_CODE (operands[1]) == MEM	  && ! rtx_equal_p (operands[0], operands[1]))	operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);      if (GET_CODE (operands[2]) == MEM)	operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);      if (GET_CODE (operands[1]) == CONST_INT && code == MINUS)	{	  rtx temp = gen_reg_rtx (GET_MODE (operands[0]));	  emit_move_insn (temp, operands[1]);	  operands[1] = temp;	  return TRUE;	}    }  if (!ix86_binary_operator_ok (code, mode, operands))    {      /* If not optimizing, try to make a valid insn (optimize code	 previously did this above to improve chances of CSE) */      if ((! TARGET_PSEUDO || !optimize)	  && ((reload_in_progress | reload_completed) == 0)	  && (GET_CODE (operands[1]) == MEM || GET_CODE (operands[2]) == MEM))	{	  modified = FALSE;	  if (GET_CODE (operands[1]) == MEM	      && ! rtx_equal_p (operands[0], operands[1]))	    {	      operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);	      modified = TRUE;	    }	  if (GET_CODE (operands[2]) == MEM)	    {	      operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);	      modified = TRUE;	    }	  if (GET_CODE (operands[1]) == CONST_INT && code == MINUS)	    {	      rtx temp = gen_reg_rtx (GET_MODE (operands[0]));	      emit_move_insn (temp, operands[1]);	      operands[1] = temp;	      return TRUE;	    }	  if (modified && ! ix86_binary_operator_ok (code, mode, operands))	    return FALSE;	}      else	return FALSE;    }  return TRUE;}/* Return TRUE or FALSE depending on whether the binary operator meets the   appropriate constraints.  */intix86_binary_operator_ok (code, mode, operands)     enum rtx_code code;     enum machine_mode mode ATTRIBUTE_UNUSED;     rtx operands[3];{  return (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)    && (GET_CODE (operands[1]) != CONST_INT || GET_RTX_CLASS (code) == 'c');}/* Attempt to expand a unary operator.  Make the expansion closer to the   actual machine, then just general_operand, which will allow 2 separate   memory references (one output, one input) in a single insn.  Return   whether the insn fails, or succeeds.  */intix86_expand_unary_operator (code, mode, operands)     enum rtx_code code;     enum machine_mode mode;     rtx operands[];{  /* If optimizing, copy to regs to improve CSE */  if (TARGET_PSEUDO      && optimize      && ((reload_in_progress | reload_completed) == 0)      && GET_CODE (operands[1]) == MEM)    operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);  if (! ix86_unary_operator_ok (code, mode, operands))    {      if ((! TARGET_PSEUDO || optimize == 0)	  && ((reload_in_progress | reload_completed) == 0)	  && GET_CODE (operands[1]) == MEM)	{	  operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);	  if (! ix86_unary_operator_ok (code, mode, operands))	    return FALSE;	}      else	return FALSE;    }  return TRUE;}/* Return TRUE or FALSE depending on whether the unary operator meets the   appropriate constraints.  */intix86_unary_operator_ok (code, mode, operands)     enum rtx_code code ATTRIBUTE_UNUSED;     enum machine_mode mode ATTRIBUTE_UNUSED;     rtx operands[2] ATTRIBUTE_UNUSED;{  return TRUE;}static rtx pic_label_rtx;static char pic_label_name [256];static int pic_label_no = 0;/* This function generates code for -fpic that loads %ebx with   the return address of the caller and then returns.  */voidasm_output_function_prefix (file, name)     FILE *file;     char *name ATTRIBUTE_UNUSED;{  rtx xops[2];  int pic_reg_used = flag_pic && (current_function_uses_pic_offset_table				  || current_function_uses_const_pool);  xops[0] = pic_offset_table_rtx;  xops[1] = stack_pointer_rtx;  /* Deep branch prediction favors having a return for every call. */  if (pic_reg_used && TARGET_DEEP_BRANCH_PREDICTION)    {      tree prologue_node;      if (pic_label_rtx == 0)	{	  pic_label_rtx = gen_label_rtx ();	  ASM_GENERATE_INTERNAL_LABEL (pic_label_name, "LPR", pic_label_no++);	  LABEL_NAME (pic_label_rtx) = pic_label_name;	}      prologue_node = make_node (FUNCTION_DECL);      DECL_RESULT (prologue_node) = 0;      /* This used to call ASM_DECLARE_FUNCTION_NAME() but since it's an	 internal (non-global) label that's being emitted, it didn't make	 sense to have .type information for local labels.   This caused	 the SCO OpenServer 5.0.4 ELF assembler grief (why are you giving  	 me debug info for a label that you're declaring non-global?) this	 was changed to call ASM_OUTPUT_LABEL() instead. */      ASM_OUTPUT_LABEL (file, pic_label_name);       output_asm_insn ("movl (%1),%0", xops);      output_asm_insn ("ret", xops);    }}/* Generate the assembly code for function entry.   FILE is an stdio stream to output the code to.   SIZE is an int: how many units of temporary storage to allocate. */voidfunction_prologue (file, size)     FILE *file ATTRIBUTE_UNUSED;     int size ATTRIBUTE_UNUSED;{  if (TARGET_SCHEDULE_PROLOGUE)    {      pic_label_rtx = 0;      return;    }  ix86_prologue (0);}/* Expand the prologue into a bunch of separate insns. */voidix86_expand_prologue (){  if (! TARGET_SCHEDULE_PROLOGUE)      return;  ix86_prologue (1);}voidload_pic_register (do_rtl)     int do_rtl;{  rtx xops[4];  if (TARGET_DEEP_BRANCH_PREDICTION)    {      xops[0] = pic_offset_table_rtx;      if (pic_label_rtx == 0)	{	  pic_label_rtx = gen_label_rtx ();	  ASM_GENERATE_INTERNAL_LABEL (pic_label_name, "LPR", pic_label_no++);	  LABEL_NAME (pic_label_rtx) = pic_label_name;	}      xops[1] = gen_rtx_MEM (QImode,			 gen_rtx (SYMBOL_REF, Pmode,				  LABEL_NAME (pic_label_rtx)));      if (do_rtl)	{	  emit_insn (gen_prologue_get_pc (xops[0], xops[1]));	  emit_insn (gen_prologue_set_got (xops[0],#ifdef YES_UNDERSCORES					   gen_rtx_SYMBOL_REF (Pmode,  					            "$__GLOBAL_OFFSET_TABLE_"),#else					   gen_rtx_SYMBOL_REF (Pmode,					            "$_GLOBAL_OFFSET_TABLE_"),#endif					   xops[1]));	}      else	{	  output_asm_insn (AS1 (call,%X1), xops);	  output_asm_insn ("addl $%__GLOBAL_OFFSET_TABLE_,%0", xops);	  pic_label_rtx = 0;	}    }  else    {      xops[0] = pic_offset_table_rtx;      xops[1] = gen_label_rtx ();      if (do_rtl)	{	  /* We can't put a raw CODE_LABEL into the RTL, and we can't emit	     a new CODE_LABEL after reload, so we need a single pattern to	     emit the 3 necessary instructions.  */	  emit_insn (gen_prologue_get_pc_and_set_got (xops[0]));	}      else	{	  output_asm_insn (AS1 (call,%P1), xops);	  ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "L",				     CODE_LABEL_NUMBER (xops[1]));	  output_asm_insn (AS1 (pop%L0,%0), xops);	  output_asm_insn ("addl $%__GLOBAL_OFFSET_TABLE_+[.-%P1],%0", xops);	}    }  /* When -fpic, we must emit a scheduling barrier, so that the instruction     that restores %ebx (which is PIC_OFFSET_TABLE_REGNUM), does not get     moved before any instruction which implicitly uses the got.   */  if (do_rtl)    emit_insn (gen_blockage ());}/* Compute the size of local storage taking into consideration the   desired stack alignment which is to be maintained.  Also determine   the number of registers saved below the local storage.  */HOST_WIDE_INTix86_compute_frame_size (size, nregs_on_stack)     HOST_WIDE_INT size;     int *nregs_on_stack;{  int limit;  int nregs;  int regno;  int padding;  int pic_reg_used = flag_pic && (current_function_uses_pic_offset_table				  || current_function_uses_const_pool);  HOST_WIDE_INT total_size;  limit = frame_pointer_needed	  ? FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM;  nregs = 0;  for (regno = limit - 1; regno >= 0; regno--)    if ((regs_ever_live[regno] && ! call_used_regs[regno])	|| (regno == PIC_OFFSET_TABLE_REGNUM && pic_reg_used))      nregs++;  padding = 0;  total_size = size + (nregs * UNITS_PER_WORD);#ifdef PREFERRED_STACK_BOUNDARY  {    int offset;    int preferred_alignment = PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT;    offset = 4;    if (frame_pointer_needed)      offset += UNITS_PER_WORD;    total_size += offset;        padding = ((total_size + preferred_alignment - 1)	       & -preferred_alignment) - total_size;    if (padding < (((offset + preferred_alignment - 1)		    & -preferred_alignment) - offset))      padding += preferred_alignment;    /* Don't bother aligning the stack of a leaf function       which doesn't allocate any stack slots.  */    if (size == 0 && current_function_is_leaf)      padding = 0;  }#endif  if (nregs_on_stack)    *nregs_on_stack = nregs;  return size + padding;}static voidix86_prologue (do_rtl)     int do_rtl;{  register int regno;  int limit;  rtx xops[4];  int pic_reg_used = flag_pic && (current_function_uses_pic_offset_table				  || current_function_uses_const_pool);  HOST_WIDE_INT tsize = ix86_compute_frame_size (get_frame_size (), (int *)0);  rtx insn;  int cfa_offset = INCOMING_FRAME_SP_OFFSET, cfa_store_offset = cfa_offset;  xops[0] = stack_pointer_rtx;  xops[1] = frame_pointer_rtx;  xops[2] = GEN_INT (tsize);  if (frame_pointer_needed)    {      if (do_rtl)	{	  insn = emit_insn (gen_rtx (SET, VOIDmode,				     gen_rtx_MEM (SImode,					      gen_rtx (PRE_DEC, SImode,						       stack_pointer_rtx)),				     frame_pointer_rtx));	  RTX_FRAME_RELATED_P (insn) = 1;	  insn = emit_move_insn (xops[1], xops[0]);	  RTX_FRAME_RELATED_P (insn) = 1;	}      else	{	  output_asm_insn ("push%L1 %1", xops);#ifdef INCOMING_RETURN_ADDR_RTX 	  if (dwarf2out_do_frame ()) 	    { 	      char *l = dwarf2out_cfi_label (); 	      cfa_store_offset += 4; 	      cfa_offset = cfa_store_offset; 	      dwarf2out_def_cfa (l, STACK_POINTER_REGNUM, cfa_offset); 	      dwarf2out_reg_save (l, FRAME_POINTER_REGNUM, - cfa_store_offset); 	    }#endif	  output_asm_insn (AS2 (mov%L0,%0,%1), xops);#ifdef INCOMING_RETURN_ADDR_RTX 	  if (dwarf2out_do_frame ()) 	    dwarf2out_def_cfa ("", FRAME_POINTER_REGNUM, cfa_offset);#endif	}    }  if (tsize == 0)    ;  else if (! TARGET_STACK_PROBE || tsize < CHECK_STACK_LIMIT)    {      if (do_rtl)	{	  insn = emit_insn (gen_prologue_set_stack_ptr (xops[2]));	  RTX_FRAME_RELATED_P (insn) = 1;	}      else	{	  output_asm_insn (AS2 (sub%L0,%2,%0), xops);#ifdef INCOMING_RETURN_ADDR_RTX 	  if (dwarf2out_do_frame ()) 	    { 	      cfa_store_offset += tsize; 	      if (! frame_pointer_needed) 		{

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -