📄 fold-const.c
字号:
/* Fold a constant sub-tree into a single node for C-compiler Copyright (C) 1987, 1988 Free Software Foundation, Inc.This file is part of GNU CC.GNU CC is free software; you can redistribute it and/or modifyit under the terms of the GNU General Public License as published bythe Free Software Foundation; either version 1, or (at your option)any later version.GNU CC is distributed in the hope that it will be useful,but WITHOUT ANY WARRANTY; without even the implied warranty ofMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See theGNU General Public License for more details.You should have received a copy of the GNU General Public Licensealong with GNU CC; see the file COPYING. If not, write tothe Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. *//*@@ Fix lossage on folding division of big integers. *//*@@ This file should be rewritten to use an arbitary precision @@ representation for "struct tree_int_cst" and "struct tree_real_cst". @@ Perhaps the routines could also be used for bc/dc, and made a lib. @@ The routines that translate from the ap rep should @@ warn if precision et. al. is lost. @@ This would also make life easier when this technology is used @@ for cross-compilers. *//* There are only two entry points in this file: fold and combine. fold takes a tree as argument and returns a simplified tree. combine takes a tree code for an arithmetic operation and two operands that are trees for constant values and returns the result of the specified operation on those values, also as a tree. */ #include <stdio.h>#include <setjmp.h>#include "config.h"#include "tree.h"static void lshift_double ();static void rshift_double ();static void lrotate_double ();static void rrotate_double ();/* To do constant folding on INTEGER_CST nodes requires 64-bit arithmetic. We do that by representing the 64-bit integer as 8 shorts, with only 8 bits stored in each short, as a positive number. *//* Unpack a 64-bit integer into 8 shorts. LOW and HI are the integer, as two `int' pieces. SHORTS points to the array of shorts. */static voidencode (shorts, low, hi) short *shorts; int low, hi;{ shorts[0] = low & 0xff; shorts[1] = (low >> 8) & 0xff; shorts[2] = (low >> 16) & 0xff; shorts[3] = (low >> 24) & 0xff; shorts[4] = hi & 0xff; shorts[5] = (hi >> 8) & 0xff; shorts[6] = (hi >> 16) & 0xff; shorts[7] = (hi >> 24) & 0xff;}/* Pack an array of 8 shorts into a 64-bit integer. SHORTS points to the array of shorts. The integer is stored into *LOW and *HI as two `int' pieces. */static voiddecode (shorts, low, hi) short *shorts; int *low, *hi;{ /* The casts in the following statement should not be needed, but they get around bugs in some C compilers. */ *low = (((long)shorts[3] << 24) | ((long)shorts[2] << 16) | ((long)shorts[1] << 8) | (long)shorts[0]); *hi = (((long)shorts[7] << 24) | ((long)shorts[6] << 16) | ((long)shorts[5] << 8) | (long)shorts[4]);}/* Make the integer constant T valid for its type by setting to 0 or 1 all the bits in the constant that don't belong in the type. */static voidforce_fit_type (t) tree t;{ register int prec = TYPE_PRECISION (TREE_TYPE (t)); if (TREE_CODE (TREE_TYPE (t)) == POINTER_TYPE) prec = BITS_PER_WORD; /* First clear all bits that are beyond the type's precision. */ if (prec == 2 * HOST_BITS_PER_INT) ; else if (prec > HOST_BITS_PER_INT) { TREE_INT_CST_HIGH (t) &= ~((-1) << (prec - HOST_BITS_PER_INT)); } else { TREE_INT_CST_HIGH (t) = 0; if (prec < HOST_BITS_PER_INT) TREE_INT_CST_LOW (t) &= ~((-1) << prec); } /* If it's a signed type and value's sign bit is set, extend the sign. */ if (! TREE_UNSIGNED (TREE_TYPE (t)) && prec != 2 * HOST_BITS_PER_INT && (prec > HOST_BITS_PER_INT ? TREE_INT_CST_HIGH (t) & (1 << (prec - HOST_BITS_PER_INT - 1)) : TREE_INT_CST_LOW (t) & (1 << (prec - 1)))) { /* Value is negative: set to 1 all the bits that are outside this type's precision. */ if (prec > HOST_BITS_PER_INT) { TREE_INT_CST_HIGH (t) |= ((-1) << (prec - HOST_BITS_PER_INT)); } else { TREE_INT_CST_HIGH (t) = -1; if (prec < HOST_BITS_PER_INT) TREE_INT_CST_LOW (t) |= ((-1) << prec); } }}/* Add two 64-bit integers with 64-bit result. Each argument is given as two `int' pieces. One argument is L1 and H1; the other, L2 and H2. The value is stored as two `int' pieces in *LV and *HV. We use the 8-shorts representation internally. */static voidadd_double (l1, h1, l2, h2, lv, hv) int l1, h1, l2, h2; int *lv, *hv;{ short arg1[8]; short arg2[8]; register int carry = 0; register int i; encode (arg1, l1, h1); encode (arg2, l2, h2); for (i = 0; i < 8; i++) { carry += arg1[i] + arg2[i]; arg1[i] = carry & 0xff; carry >>= 8; } decode (arg1, lv, hv);}/* Negate a 64-bit integers with 64-bit result. The argument is given as two `int' pieces in L1 and H1. The value is stored as two `int' pieces in *LV and *HV. We use the 8-shorts representation internally. */static voidneg_double (l1, h1, lv, hv) int l1, h1; int *lv, *hv;{ if (l1 == 0) { *lv = 0; *hv = - h1; } else { *lv = - l1; *hv = ~ h1; }}/* Multiply two 64-bit integers with 64-bit result. Each argument is given as two `int' pieces. One argument is L1 and H1; the other, L2 and H2. The value is stored as two `int' pieces in *LV and *HV. We use the 8-shorts representation internally. */static voidmul_double (l1, h1, l2, h2, lv, hv) int l1, h1, l2, h2; int *lv, *hv;{ short arg1[8]; short arg2[8]; short prod[16]; register int carry = 0; register int i, j, k; /* These two cases are used extensively, arising from pointer combinations. */ if (h2 == 0) { if (l2 == 2) { unsigned temp = l1 + l1; *hv = h1 * 2 + (temp < l1); *lv = temp; return; } if (l2 == 4) { unsigned temp = l1 + l1; h1 = h1 * 4 + ((temp < l1) << 1); l1 = temp; temp += temp; h1 += (temp < l1); *lv = temp; *hv = h1; return; } if (l2 == 8) { unsigned temp = l1 + l1; h1 = h1 * 8 + ((temp < l1) << 2); l1 = temp; temp += temp; h1 += (temp < l1) << 1; l1 = temp; temp += temp; h1 += (temp < l1); *lv = temp; *hv = h1; return; } } encode (arg1, l1, h1); encode (arg2, l2, h2); bzero (prod, sizeof prod); for (i = 0; i < 8; i++) for (j = 0; j < 8; j++) { k = i + j; carry = arg1[i] * arg2[j]; while (carry) { carry += prod[k]; prod[k] = carry & 0xff; carry >>= 8; k++; } } decode (prod, lv, hv); /* @@decode ignores prod[8] -> prod[15] */}/* Shift the 64-bit integer in L1, H1 left by COUNT places keeping only PREC bits of result. Shift right if COUNT is negative. ARITH nonzero specifies arithmetic shifting; otherwise use logical shift. Store the value as two `int' pieces in *LV and *HV. */static voidlshift_double (l1, h1, count, prec, lv, hv, arith) int l1, h1, count, prec; int *lv, *hv; int arith;{ short arg1[8]; register int i; register int carry; if (count < 0) { rshift_double (l1, h1, - count, prec, lv, hv, arith); return; } encode (arg1, l1, h1); if (count > prec) count = prec; while (count > 0) { carry = 0; for (i = 0; i < 8; i++) { carry += arg1[i] << 1; arg1[i] = carry & 0xff; carry >>= 8; } count--; } decode (arg1, lv, hv);}/* Shift the 64-bit integer in L1, H1 right by COUNT places keeping only PREC bits of result. COUNT must be positive. ARITH nonzero specifies arithmetic shifting; otherwise use logical shift. Store the value as two `int' pieces in *LV and *HV. */static voidrshift_double (l1, h1, count, prec, lv, hv, arith) int l1, h1, count, prec; int *lv, *hv; int arith;{ short arg1[8]; register int i; register int carry; encode (arg1, l1, h1); if (count > prec) count = prec; while (count > 0) { carry = arith && arg1[7] >> 7; for (i = 7; i >= 0; i--) { carry <<= 8; carry += arg1[i]; arg1[i] = (carry >> 1) & 0xff; } count--; } decode (arg1, lv, hv);}/* Rotate the 64-bit integer in L1, H1 left by COUNT places keeping only PREC bits of result. Rotate right if COUNT is negative. Store the value as two `int' pieces in *LV and *HV. */static voidlrotate_double (l1, h1, count, prec, lv, hv) int l1, h1, count, prec; int *lv, *hv;{ short arg1[8]; register int i; register int carry; if (count < 0) { rrotate_double (l1, h1, - count, prec, lv, hv); return; } encode (arg1, l1, h1); if (count > prec) count = prec; carry = arg1[7] >> 7; while (count > 0) { for (i = 0; i < 8; i++) { carry += arg1[i] << 1; arg1[i] = carry & 0xff; carry >>= 8; } count--; } decode (arg1, lv, hv);}/* Rotate the 64-bit integer in L1, H1 left by COUNT places keeping only PREC bits of result. COUNT must be positive. Store the value as two `int' pieces in *LV and *HV. */static voidrrotate_double (l1, h1, count, prec, lv, hv) int l1, h1, count, prec; int *lv, *hv;{ short arg1[8]; register int i; register int carry; encode (arg1, l1, h1); if (count > prec) count = prec; carry = arg1[0] & 1; while (count > 0) { for (i = 7; i >= 0; i--) { carry <<= 8; carry += arg1[i]; arg1[i] = (carry >> 1) & 0xff; } count--; } decode (arg1, lv, hv);}/* Divide 64 bit integer LNUM, HNUM by 64 bit integer LDEN, HDEN for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM). CODE is a tree code for a kind of division, one of TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR or EXACT_DIV_EXPR It controls how the quotient is rounded to a integer. UNS nonzero says do unsigned division. */static voiddiv_and_round_double (code, uns, lnum_orig, hnum_orig, lden_orig, hden_orig, lquo, hquo, lrem, hrem) enum tree_code code; int uns; int lnum_orig, hnum_orig; /* num == numerator == dividend */ int lden_orig, hden_orig; /* den == denominator == divisor */ int *lquo, *hquo, *lrem, *hrem;{ int quo_neg = 0; short num[9], den[8], quo[8]; /* extra element for scaling. */ register int i, j, work; register int carry = 0; unsigned int lnum = lnum_orig; int hnum = hnum_orig; unsigned int lden = lden_orig; int hden = hden_orig; if ((hden == 0) && (lden == 0)) abort (); /* calculate quotient sign and convert operands to unsigned. */ if (!uns) { if (hden < 0) { quo_neg = ~ quo_neg; neg_double (lden, hden, &lden, &hden); } if (hnum < 0) { quo_neg = ~ quo_neg; neg_double (lnum, hnum, &lnum, &hnum); } } if (hnum == 0 && hden == 0) { /* single precision */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -