📄 fold-const.c
字号:
/* Fold a constant sub-tree into a single node for C-compiler Copyright (C) 1987, 88, 92, 93, 94, 1995 Free Software Foundation, Inc.This file is part of GNU CC.GNU CC is free software; you can redistribute it and/or modifyit under the terms of the GNU General Public License as published bythe Free Software Foundation; either version 2, or (at your option)any later version.GNU CC is distributed in the hope that it will be useful,but WITHOUT ANY WARRANTY; without even the implied warranty ofMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See theGNU General Public License for more details.You should have received a copy of the GNU General Public Licensealong with GNU CC; see the file COPYING. If not, write tothe Free Software Foundation, 59 Temple Place - Suite 330,Boston, MA 02111-1307, USA. *//*@@ This file should be rewritten to use an arbitrary precision @@ representation for "struct tree_int_cst" and "struct tree_real_cst". @@ Perhaps the routines could also be used for bc/dc, and made a lib. @@ The routines that translate from the ap rep should @@ warn if precision et. al. is lost. @@ This would also make life easier when this technology is used @@ for cross-compilers. *//* The entry points in this file are fold, size_int and size_binop. fold takes a tree as argument and returns a simplified tree. size_binop takes a tree code for an arithmetic operation and two operands that are trees, and produces a tree for the result, assuming the type comes from `sizetype'. size_int takes an integer value, and creates a tree constant with type from `sizetype'. */ #include <stdio.h>#include <setjmp.h>#include "config.h"#include "flags.h"#include "tree.h"/* Handle floating overflow for `const_binop'. */static jmp_buf float_error;static void encode PROTO((HOST_WIDE_INT *, HOST_WIDE_INT, HOST_WIDE_INT));static void decode PROTO((HOST_WIDE_INT *, HOST_WIDE_INT *, HOST_WIDE_INT *));int div_and_round_double PROTO((enum tree_code, int, HOST_WIDE_INT, HOST_WIDE_INT, HOST_WIDE_INT, HOST_WIDE_INT, HOST_WIDE_INT *, HOST_WIDE_INT *, HOST_WIDE_INT *, HOST_WIDE_INT *));static int split_tree PROTO((tree, enum tree_code, tree *, tree *, int *));static tree const_binop PROTO((enum tree_code, tree, tree, int));static tree fold_convert PROTO((tree, tree));static enum tree_code invert_tree_comparison PROTO((enum tree_code));static enum tree_code swap_tree_comparison PROTO((enum tree_code));static int truth_value_p PROTO((enum tree_code));static int operand_equal_for_comparison_p PROTO((tree, tree, tree));static int twoval_comparison_p PROTO((tree, tree *, tree *, int *));static tree eval_subst PROTO((tree, tree, tree, tree, tree));static tree omit_one_operand PROTO((tree, tree, tree));static tree pedantic_omit_one_operand PROTO((tree, tree, tree));static tree distribute_bit_expr PROTO((enum tree_code, tree, tree, tree));static tree make_bit_field_ref PROTO((tree, tree, int, int, int));static tree optimize_bit_field_compare PROTO((enum tree_code, tree, tree, tree));static tree decode_field_reference PROTO((tree, int *, int *, enum machine_mode *, int *, int *, tree *, tree *));static int all_ones_mask_p PROTO((tree, int));static int simple_operand_p PROTO((tree));static tree range_test PROTO((enum tree_code, tree, enum tree_code, enum tree_code, tree, tree, tree));static tree unextend PROTO((tree, int, int, tree));static tree fold_truthop PROTO((enum tree_code, tree, tree, tree));static tree strip_compound_expr PROTO((tree, tree));#ifndef BRANCH_COST#define BRANCH_COST 1#endif/* Yield nonzero if a signed left shift of A by B bits overflows. */#define left_shift_overflows(a, b) ((a) != ((a) << (b)) >> (b))/* Suppose A1 + B1 = SUM1, using 2's complement arithmetic ignoring overflow. Suppose A, B and SUM have the same respective signs as A1, B1, and SUM1. Then this yields nonzero if overflow occurred during the addition. Overflow occurs if A and B have the same sign, but A and SUM differ in sign. Use `^' to test whether signs differ, and `< 0' to isolate the sign. */#define overflow_sum_sign(a, b, sum) ((~((a) ^ (b)) & ((a) ^ (sum))) < 0)/* To do constant folding on INTEGER_CST nodes requires two-word arithmetic. We do that by representing the two-word integer in 4 words, with only HOST_BITS_PER_WIDE_INT/2 bits stored in each word, as a positive number. */#define LOWPART(x) \ ((x) & (((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT/2)) - 1))#define HIGHPART(x) \ ((unsigned HOST_WIDE_INT) (x) >> HOST_BITS_PER_WIDE_INT/2)#define BASE ((unsigned HOST_WIDE_INT) 1 << HOST_BITS_PER_WIDE_INT/2)/* Unpack a two-word integer into 4 words. LOW and HI are the integer, as two `HOST_WIDE_INT' pieces. WORDS points to the array of HOST_WIDE_INTs. */static voidencode (words, low, hi) HOST_WIDE_INT *words; HOST_WIDE_INT low, hi;{ words[0] = LOWPART (low); words[1] = HIGHPART (low); words[2] = LOWPART (hi); words[3] = HIGHPART (hi);}/* Pack an array of 4 words into a two-word integer. WORDS points to the array of words. The integer is stored into *LOW and *HI as two `HOST_WIDE_INT' pieces. */static voiddecode (words, low, hi) HOST_WIDE_INT *words; HOST_WIDE_INT *low, *hi;{ *low = words[0] | words[1] * BASE; *hi = words[2] | words[3] * BASE;}/* Make the integer constant T valid for its type by setting to 0 or 1 all the bits in the constant that don't belong in the type. Yield 1 if a signed overflow occurs, 0 otherwise. If OVERFLOW is nonzero, a signed overflow has already occurred in calculating T, so propagate it. Make the real constant T valid for its type by calling CHECK_FLOAT_VALUE, if it exists. */intforce_fit_type (t, overflow) tree t; int overflow;{ HOST_WIDE_INT low, high; register int prec; if (TREE_CODE (t) == REAL_CST) {#ifdef CHECK_FLOAT_VALUE CHECK_FLOAT_VALUE (TYPE_MODE (TREE_TYPE (t)), TREE_REAL_CST (t), overflow);#endif return overflow; } else if (TREE_CODE (t) != INTEGER_CST) return overflow; low = TREE_INT_CST_LOW (t); high = TREE_INT_CST_HIGH (t); if (TREE_CODE (TREE_TYPE (t)) == POINTER_TYPE) prec = POINTER_SIZE; else prec = TYPE_PRECISION (TREE_TYPE (t)); /* First clear all bits that are beyond the type's precision. */ if (prec == 2 * HOST_BITS_PER_WIDE_INT) ; else if (prec > HOST_BITS_PER_WIDE_INT) { TREE_INT_CST_HIGH (t) &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT)); } else { TREE_INT_CST_HIGH (t) = 0; if (prec < HOST_BITS_PER_WIDE_INT) TREE_INT_CST_LOW (t) &= ~((HOST_WIDE_INT) (-1) << prec); } /* Unsigned types do not suffer sign extension or overflow. */ if (TREE_UNSIGNED (TREE_TYPE (t))) return overflow; /* If the value's sign bit is set, extend the sign. */ if (prec != 2 * HOST_BITS_PER_WIDE_INT && (prec > HOST_BITS_PER_WIDE_INT ? (TREE_INT_CST_HIGH (t) & ((HOST_WIDE_INT) 1 << (prec - HOST_BITS_PER_WIDE_INT - 1))) : TREE_INT_CST_LOW (t) & ((HOST_WIDE_INT) 1 << (prec - 1)))) { /* Value is negative: set to 1 all the bits that are outside this type's precision. */ if (prec > HOST_BITS_PER_WIDE_INT) { TREE_INT_CST_HIGH (t) |= ((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT)); } else { TREE_INT_CST_HIGH (t) = -1; if (prec < HOST_BITS_PER_WIDE_INT) TREE_INT_CST_LOW (t) |= ((HOST_WIDE_INT) (-1) << prec); } } /* Yield nonzero if signed overflow occurred. */ return ((overflow | (low ^ TREE_INT_CST_LOW (t)) | (high ^ TREE_INT_CST_HIGH (t))) != 0);}/* Add two doubleword integers with doubleword result. Each argument is given as two `HOST_WIDE_INT' pieces. One argument is L1 and H1; the other, L2 and H2. The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */intadd_double (l1, h1, l2, h2, lv, hv) HOST_WIDE_INT l1, h1, l2, h2; HOST_WIDE_INT *lv, *hv;{ HOST_WIDE_INT l, h; l = l1 + l2; h = h1 + h2 + ((unsigned HOST_WIDE_INT) l < l1); *lv = l; *hv = h; return overflow_sum_sign (h1, h2, h);}/* Negate a doubleword integer with doubleword result. Return nonzero if the operation overflows, assuming it's signed. The argument is given as two `HOST_WIDE_INT' pieces in L1 and H1. The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */intneg_double (l1, h1, lv, hv) HOST_WIDE_INT l1, h1; HOST_WIDE_INT *lv, *hv;{ if (l1 == 0) { *lv = 0; *hv = - h1; return (*hv & h1) < 0; } else { *lv = - l1; *hv = ~ h1; return 0; }}/* Multiply two doubleword integers with doubleword result. Return nonzero if the operation overflows, assuming it's signed. Each argument is given as two `HOST_WIDE_INT' pieces. One argument is L1 and H1; the other, L2 and H2. The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */intmul_double (l1, h1, l2, h2, lv, hv) HOST_WIDE_INT l1, h1, l2, h2; HOST_WIDE_INT *lv, *hv;{ HOST_WIDE_INT arg1[4]; HOST_WIDE_INT arg2[4]; HOST_WIDE_INT prod[4 * 2]; register unsigned HOST_WIDE_INT carry; register int i, j, k; HOST_WIDE_INT toplow, tophigh, neglow, neghigh; encode (arg1, l1, h1); encode (arg2, l2, h2); bzero ((char *) prod, sizeof prod); for (i = 0; i < 4; i++) { carry = 0; for (j = 0; j < 4; j++) { k = i + j; /* This product is <= 0xFFFE0001, the sum <= 0xFFFF0000. */ carry += arg1[i] * arg2[j]; /* Since prod[p] < 0xFFFF, this sum <= 0xFFFFFFFF. */ carry += prod[k]; prod[k] = LOWPART (carry); carry = HIGHPART (carry); } prod[i + 4] = carry; } decode (prod, lv, hv); /* This ignores prod[4] through prod[4*2-1] */ /* Check for overflow by calculating the top half of the answer in full; it should agree with the low half's sign bit. */ decode (prod+4, &toplow, &tophigh); if (h1 < 0) { neg_double (l2, h2, &neglow, &neghigh); add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh); } if (h2 < 0) { neg_double (l1, h1, &neglow, &neghigh); add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh); } return (*hv < 0 ? ~(toplow & tophigh) : toplow | tophigh) != 0;}/* Shift the doubleword integer in L1, H1 left by COUNT places keeping only PREC bits of result. Shift right if COUNT is negative. ARITH nonzero specifies arithmetic shifting; otherwise use logical shift. Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */voidlshift_double (l1, h1, count, prec, lv, hv, arith) HOST_WIDE_INT l1, h1, count; int prec; HOST_WIDE_INT *lv, *hv; int arith;{ if (count < 0) { rshift_double (l1, h1, - count, prec, lv, hv, arith); return; } #ifdef SHIFT_COUNT_TRUNCATED if (SHIFT_COUNT_TRUNCATED) count %= prec;#endif if (count >= HOST_BITS_PER_WIDE_INT) { *hv = (unsigned HOST_WIDE_INT) l1 << count - HOST_BITS_PER_WIDE_INT; *lv = 0; } else { *hv = (((unsigned HOST_WIDE_INT) h1 << count) | ((unsigned HOST_WIDE_INT) l1 >> HOST_BITS_PER_WIDE_INT - count - 1 >> 1)); *lv = (unsigned HOST_WIDE_INT) l1 << count; }}/* Shift the doubleword integer in L1, H1 right by COUNT places keeping only PREC bits of result. COUNT must be positive. ARITH nonzero specifies arithmetic shifting; otherwise use logical shift. Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */voidrshift_double (l1, h1, count, prec, lv, hv, arith) HOST_WIDE_INT l1, h1, count; int prec; HOST_WIDE_INT *lv, *hv; int arith;{ unsigned HOST_WIDE_INT signmask; signmask = (arith ? -((unsigned HOST_WIDE_INT) h1 >> (HOST_BITS_PER_WIDE_INT - 1)) : 0);#ifdef SHIFT_COUNT_TRUNCATED if (SHIFT_COUNT_TRUNCATED) count %= prec;#endif if (count >= HOST_BITS_PER_WIDE_INT) { *hv = signmask; *lv = ((signmask << 2 * HOST_BITS_PER_WIDE_INT - count - 1 << 1) | ((unsigned HOST_WIDE_INT) h1 >> count - HOST_BITS_PER_WIDE_INT)); } else { *lv = (((unsigned HOST_WIDE_INT) l1 >> count) | ((unsigned HOST_WIDE_INT) h1 << HOST_BITS_PER_WIDE_INT - count - 1 << 1)); *hv = ((signmask << HOST_BITS_PER_WIDE_INT - count) | ((unsigned HOST_WIDE_INT) h1 >> count)); }}/* Rotate the doubleword integer in L1, H1 left by COUNT places keeping only PREC bits of result. Rotate right if COUNT is negative. Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */voidlrotate_double (l1, h1, count, prec, lv, hv) HOST_WIDE_INT l1, h1, count; int prec; HOST_WIDE_INT *lv, *hv;{ HOST_WIDE_INT s1l, s1h, s2l, s2h; count %= prec; if (count < 0) count += prec; lshift_double (l1, h1, count, prec, &s1l, &s1h, 0); rshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0); *lv = s1l | s2l; *hv = s1h | s2h;}/* Rotate the doubleword integer in L1, H1 left by COUNT places keeping only PREC bits of result. COUNT must be positive. Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */voidrrotate_double (l1, h1, count, prec, lv, hv) HOST_WIDE_INT l1, h1, count; int prec; HOST_WIDE_INT *lv, *hv;{ HOST_WIDE_INT s1l, s1h, s2l, s2h; count %= prec; if (count < 0) count += prec; rshift_double (l1, h1, count, prec, &s1l, &s1h, 0); lshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0); *lv = s1l | s2l; *hv = s1h | s2h;}/* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM). CODE is a tree code for a kind of division, one of TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR or EXACT_DIV_EXPR It controls how the quotient is rounded to a integer. Return nonzero if the operation overflows. UNS nonzero says do unsigned division. */intdiv_and_round_double (code, uns, lnum_orig, hnum_orig, lden_orig, hden_orig, lquo, hquo, lrem, hrem) enum tree_code code; int uns;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -