⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 jump.c

📁 gcc库的原代码,对编程有很大帮助.
💻 C
📖 第 1 页 / 共 5 页
字号:
		      break;		    pbody = PATTERN (p);		    if (GET_CODE (pbody) == SET)		      break;		    dest = SET_DEST (pbody);		    if (! (GET_CODE (dest) == MEM			   && GET_CODE (XEXP (dest, 0)) == POST_INC			   && XEXP (XEXP (dest, 0), 0) == stack_pointer_rtx))		      break;		    total_pushed -= GET_MODE_SIZE (GET_MODE (SET_DEST (pbody)));		    /* If this push doesn't fully fit in the space		       of the stack adjust that we deleted,		       make another stack adjust here for what we		       didn't use up.  There should be peepholes		       to recognize the resulting sequence of insns.  */		    if (total_pushed < 0)		      {			emit_insn_before (gen_add2_insn (stack_pointer_rtx,							 GEN_INT (- total_pushed)),					  p);			break;		      }		    XEXP (dest, 0)		      = plus_constant (stack_pointer_rtx, total_pushed);		  }	      }#endif	    /* Detect and delete no-op move instructions	       resulting from not allocating a parameter in a register.  */	    if (GET_CODE (body) == SET		&& (SET_DEST (body) == SET_SRC (body)		    || (GET_CODE (SET_DEST (body)) == MEM			&& GET_CODE (SET_SRC (body)) == MEM			&& rtx_equal_p (SET_SRC (body), SET_DEST (body))))		&& ! (GET_CODE (SET_DEST (body)) == MEM		      && MEM_VOLATILE_P (SET_DEST (body)))		&& ! (GET_CODE (SET_SRC (body)) == MEM		      && MEM_VOLATILE_P (SET_SRC (body))))	      delete_computation (insn);	    /* Detect and ignore no-op move instructions	       resulting from smart or fortuitous register allocation.  */	    else if (GET_CODE (body) == SET)	      {		int sreg = true_regnum (SET_SRC (body));		int dreg = true_regnum (SET_DEST (body));		if (sreg == dreg && sreg >= 0)		  delete_insn (insn);		else if (sreg >= 0 && dreg >= 0)		  {		    rtx trial;		    rtx tem = find_equiv_reg (NULL_RTX, insn, 0,					      sreg, NULL_PTR, dreg,					      GET_MODE (SET_SRC (body)));#ifdef PRESERVE_DEATH_INFO_REGNO_P		    /* Deleting insn could lose a death-note for SREG or DREG		       so don't do it if final needs accurate death-notes.  */		    if (! PRESERVE_DEATH_INFO_REGNO_P (sreg)			&& ! PRESERVE_DEATH_INFO_REGNO_P (dreg))#endif		      {			/* DREG may have been the target of a REG_DEAD note in			   the insn which makes INSN redundant.  If so, reorg			   would still think it is dead.  So search for such a			   note and delete it if we find it.  */			for (trial = prev_nonnote_insn (insn);			     trial && GET_CODE (trial) != CODE_LABEL;			     trial = prev_nonnote_insn (trial))			  if (find_regno_note (trial, REG_DEAD, dreg))			    {			      remove_death (dreg, trial);			      break;			    }			if (tem != 0			    && GET_MODE (tem) == GET_MODE (SET_DEST (body)))			  delete_insn (insn);		      }		  }		else if (dreg >= 0 && CONSTANT_P (SET_SRC (body))			 && find_equiv_reg (SET_SRC (body), insn, 0, dreg,					    NULL_PTR, 0,					    GET_MODE (SET_DEST (body))))		  {		    /* This handles the case where we have two consecutive		       assignments of the same constant to pseudos that didn't		       get a hard reg.  Each SET from the constant will be		       converted into a SET of the spill register and an		       output reload will be made following it.  This produces		       two loads of the same constant into the same spill		       register.  */		    rtx in_insn = insn;		    /* Look back for a death note for the first reg.		       If there is one, it is no longer accurate.  */		    while (in_insn && GET_CODE (in_insn) != CODE_LABEL)		      {			if ((GET_CODE (in_insn) == INSN			     || GET_CODE (in_insn) == JUMP_INSN)			    && find_regno_note (in_insn, REG_DEAD, dreg))			  {			    remove_death (dreg, in_insn);			    break;			  }			in_insn = PREV_INSN (in_insn);		      }		    /* Delete the second load of the value.  */		    delete_insn (insn);		  }	      }	    else if (GET_CODE (body) == PARALLEL)	      {		/* If each part is a set between two identical registers or		   a USE or CLOBBER, delete the insn. */		int i, sreg, dreg;		rtx tem;		for (i = XVECLEN (body, 0) - 1; i >= 0; i--)		  {		    tem = XVECEXP (body, 0, i);		    if (GET_CODE (tem) == USE || GET_CODE (tem) == CLOBBER)		      continue;		    if (GET_CODE (tem) != SET		    	|| (sreg = true_regnum (SET_SRC (tem))) < 0		    	|| (dreg = true_regnum (SET_DEST (tem))) < 0		    	|| dreg != sreg)		      break;		  }		  		if (i < 0)		  delete_insn (insn);	      }	    /* Also delete insns to store bit fields if they are no-ops.  */	    /* Not worth the hair to detect this in the big-endian case.  */	    else if (! BYTES_BIG_ENDIAN		     && GET_CODE (body) == SET		     && GET_CODE (SET_DEST (body)) == ZERO_EXTRACT		     && XEXP (SET_DEST (body), 2) == const0_rtx		     && XEXP (SET_DEST (body), 0) == SET_SRC (body)		     && ! (GET_CODE (SET_SRC (body)) == MEM			   && MEM_VOLATILE_P (SET_SRC (body))))	      delete_insn (insn);	  }      insn = next;    }  /* If we haven't yet gotten to reload and we have just run regscan,     delete any insn that sets a register that isn't used elsewhere.     This helps some of the optimizations below by having less insns     being jumped around.  */  if (! reload_completed && after_regscan)    for (insn = f; insn; insn = next)      {	rtx set = single_set (insn);	next = NEXT_INSN (insn);	if (set && GET_CODE (SET_DEST (set)) == REG	    && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER	    && regno_first_uid[REGNO (SET_DEST (set))] == INSN_UID (insn)	    /* We use regno_last_note_uid so as not to delete the setting	       of a reg that's used in notes.  A subsequent optimization	       might arrange to use that reg for real.  */	       	    && regno_last_note_uid[REGNO (SET_DEST (set))] == INSN_UID (insn)	    && ! side_effects_p (SET_SRC (set))	    && ! find_reg_note (insn, REG_RETVAL, 0))	  delete_insn (insn);      }  /* Now iterate optimizing jumps until nothing changes over one pass.  */  changed = 1;  while (changed)    {      changed = 0;      for (insn = f; insn; insn = next)	{	  rtx reallabelprev;	  rtx temp, temp1, temp2, temp3, temp4, temp5, temp6;	  rtx nlabel;	  int this_is_simplejump, this_is_condjump, reversep;	  int this_is_condjump_in_parallel;#if 0	  /* If NOT the first iteration, if this is the last jump pass	     (just before final), do the special peephole optimizations.	     Avoiding the first iteration gives ordinary jump opts	     a chance to work before peephole opts.  */	  if (reload_completed && !first && !flag_no_peephole)	    if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)	      peephole (insn);#endif	  /* That could have deleted some insns after INSN, so check now	     what the following insn is.  */	  next = NEXT_INSN (insn);	  /* See if this is a NOTE_INSN_LOOP_BEG followed by an unconditional	     jump.  Try to optimize by duplicating the loop exit test if so.	     This is only safe immediately after regscan, because it uses	     the values of regno_first_uid and regno_last_uid.  */	  if (after_regscan && GET_CODE (insn) == NOTE	      && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG	      && (temp1 = next_nonnote_insn (insn)) != 0	      && simplejump_p (temp1))	    {	      temp = PREV_INSN (insn);	      if (duplicate_loop_exit_test (insn))		{		  changed = 1;		  next = NEXT_INSN (temp);		  continue;		}	    }	  if (GET_CODE (insn) != JUMP_INSN)	    continue;	  this_is_simplejump = simplejump_p (insn);	  this_is_condjump = condjump_p (insn);	  this_is_condjump_in_parallel = condjump_in_parallel_p (insn);	  /* Tension the labels in dispatch tables.  */	  if (GET_CODE (PATTERN (insn)) == ADDR_VEC)	    changed |= tension_vector_labels (PATTERN (insn), 0);	  if (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)	    changed |= tension_vector_labels (PATTERN (insn), 1);	  /* If a dispatch table always goes to the same place,	     get rid of it and replace the insn that uses it.  */	  if (GET_CODE (PATTERN (insn)) == ADDR_VEC	      || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)	    {	      int i;	      rtx pat = PATTERN (insn);	      int diff_vec_p = GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC;	      int len = XVECLEN (pat, diff_vec_p);	      rtx dispatch = prev_real_insn (insn);	      for (i = 0; i < len; i++)		if (XEXP (XVECEXP (pat, diff_vec_p, i), 0)		    != XEXP (XVECEXP (pat, diff_vec_p, 0), 0))		  break;	      if (i == len		  && dispatch != 0		  && GET_CODE (dispatch) == JUMP_INSN		  && JUMP_LABEL (dispatch) != 0		  /* Don't mess with a casesi insn.  */		  && !(GET_CODE (PATTERN (dispatch)) == SET		       && (GET_CODE (SET_SRC (PATTERN (dispatch)))			   == IF_THEN_ELSE))		  && next_real_insn (JUMP_LABEL (dispatch)) == insn)		{		  redirect_tablejump (dispatch,				      XEXP (XVECEXP (pat, diff_vec_p, 0), 0));		  changed = 1;		}	    }	  reallabelprev = prev_active_insn (JUMP_LABEL (insn));	  /* If a jump references the end of the function, try to turn	     it into a RETURN insn, possibly a conditional one.  */	  if (JUMP_LABEL (insn)	      && (next_active_insn (JUMP_LABEL (insn)) == 0		  || GET_CODE (PATTERN (next_active_insn (JUMP_LABEL (insn))))		      == RETURN))	    changed |= redirect_jump (insn, NULL_RTX);	  /* Detect jump to following insn.  */	  if (reallabelprev == insn && condjump_p (insn))	    {	      next = next_real_insn (JUMP_LABEL (insn));	      delete_jump (insn);	      changed = 1;	      continue;	    }	  /* If we have an unconditional jump preceded by a USE, try to put	     the USE before the target and jump there.  This simplifies many	     of the optimizations below since we don't have to worry about	     dealing with these USE insns.  We only do this if the label	     being branch to already has the identical USE or if code	     never falls through to that label.  */	  if (this_is_simplejump	      && (temp = prev_nonnote_insn (insn)) != 0	      && GET_CODE (temp) == INSN && GET_CODE (PATTERN (temp)) == USE	      && (temp1 = prev_nonnote_insn (JUMP_LABEL (insn))) != 0	      && (GET_CODE (temp1) == BARRIER		  || (GET_CODE (temp1) == INSN		      && rtx_equal_p (PATTERN (temp), PATTERN (temp1)))))	    {	      if (GET_CODE (temp1) == BARRIER)		{		  emit_insn_after (PATTERN (temp), temp1);		  temp1 = NEXT_INSN (temp1);		}	      delete_insn (temp);	      redirect_jump (insn, get_label_before (temp1));	      reallabelprev = prev_real_insn (temp1);	      changed = 1;	    }	  /* Simplify   if (...) x = a; else x = b; by converting it	     to         x = b; if (...) x = a;	     if B is sufficiently simple, the test doesn't involve X,	     and nothing in the test modifies B or X.	     If we have small register classes, we also can't do this if X	     is a hard register.	     If the "x = b;" insn has any REG_NOTES, we don't do this because	     of the possibility that we are running after CSE and there is a	     REG_EQUAL note that is only valid if the branch has already been	     taken.  If we move the insn with the REG_EQUAL note, we may	     fold the comparison to always be false in a later CSE pass.	     (We could also delete the REG_NOTES when moving the insn, but it	     seems simpler to not move it.)  An exception is that we can move	     the insn if the only note is a REG_EQUAL or REG_EQUIV whose	     value is the same as "b".	     INSN is the branch over the `else' part. 	     We set:	     TEMP to the jump insn preceding "x = a;"	     TEMP1 to X	     TEMP2 to the insn that sets "x = b;"	     TEMP3 to the insn that sets "x = a;"	     TEMP4 to the set of "x = b";  */	  if (this_is_simplejump	      && (temp3 = prev_active_insn (insn)) != 0	      && GET_CODE (temp3) == INSN	      && (temp4 = single_set (temp3)) != 0	      && GET_CODE (temp1 = SET_DEST (temp4)) == REG#ifdef SMALL_REGISTER_CLASSES	      && REGNO (temp1) >= FIRST_PSEUDO_REGISTER#endif	      && (temp2 = next_active_insn (insn)) != 0	      && GET_CODE (temp2) == INSN	      && (temp4 = single_set (temp2)) != 0	      && rtx_equal_p (SET_DEST (temp4), temp1)	      && (GET_CODE (SET_SRC (temp4)) == REG		  || GET_CODE (SET_SRC (temp4)) == SUBREG		  || CONSTANT_P (SET_SRC (temp4)))	      && (REG_NOTES (temp2) == 0		  || ((REG_NOTE_KIND (REG_NOTES (temp2)) == REG_EQUAL		       || REG_NOTE_KIND (REG_NOTES (temp2)) == REG_EQUIV)		      && XEXP (REG_NOTES (temp2), 1) == 0		      && rtx_equal_p (XEXP (REG_NOTES (temp2), 0),				      SET_SRC (temp4))))	      && (temp = prev_active_insn (temp3)) != 0	      && condjump_p (temp) && ! simplejump_p (temp)	      /* TEMP must skip over the "x = a;" insn */	      && prev_real_insn (JUMP_LABEL (temp)) == insn	      && no_labels_between_p (insn, JUMP_LABEL (temp))	      /* There must be no other entries to the "x = b;" insn.  */	      && no_labels_between_p (JUMP_LABEL (temp), temp2)	      /* INSN must either branch to the insn after TEMP2 or the insn		 after TEMP2 must branch to the same place as INSN.  */	      && (reallabelprev == temp2		  || ((temp5 = next_active_insn (temp2)) != 0		      && simplejump_p (temp5)		      && JUMP_LABEL (temp5) == JUMP_LABEL (insn))))	    {	      /* The test expression, X, may be a complicated test with

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -