]>
gcc.gnu.org Git - gcc.git/blob - gcc/fold-const.c
1 /* Fold a constant sub-tree into a single node for C-compiler
2 Copyright (C) 1987, 1988, 1992 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
20 /*@@ Fix lossage on folding division of big integers. */
22 /*@@ This file should be rewritten to use an arbitrary precision
23 @@ representation for "struct tree_int_cst" and "struct tree_real_cst".
24 @@ Perhaps the routines could also be used for bc/dc, and made a lib.
25 @@ The routines that translate from the ap rep should
26 @@ warn if precision et. al. is lost.
27 @@ This would also make life easier when this technology is used
28 @@ for cross-compilers. */
31 /* The entry points in this file are fold, size_int and size_binop.
33 fold takes a tree as argument and returns a simplified tree.
35 size_binop takes a tree code for an arithmetic operation
36 and two operands that are trees, and produces a tree for the
37 result, assuming the type comes from `sizetype'.
39 size_int takes an integer value, and creates a tree constant
40 with type from `sizetype'. */
48 /* Handle floating overflow for `const_binop'. */
49 static jmp_buf float_error
;
52 void rshift_double ();
53 void lrotate_double ();
54 void rrotate_double ();
55 static tree
const_binop ();
61 /* Yield nonzero if a signed left shift of A by B bits overflows. */
62 #define left_shift_overflows(a, b) ((a) != ((a) << (b)) >> (b))
64 /* Yield nonzero if A and B have the same sign. */
65 #define same_sign(a, b) ((a) ^ (b) >= 0)
67 /* Suppose A1 + B1 = SUM1, using 2's complement arithmetic ignoring overflow.
68 Suppose A, B and SUM have the same respective signs as A1, B1, and SUM1.
69 Then this yields nonzero if overflow occurred during the addition.
70 Overflow occurs if A and B have the same sign, but A and SUM differ in sign.
71 Use `^' to test whether signs differ, and `< 0' to isolate the sign. */
72 #define overflow_sum_sign(a, b, sum) ((~((a) ^ (b)) & ((a) ^ (sum))) < 0)
74 /* To do constant folding on INTEGER_CST nodes requires two-word arithmetic.
75 We do that by representing the two-word integer as MAX_SHORTS shorts,
76 with only 8 bits stored in each short, as a positive number. */
78 /* Unpack a two-word integer into MAX_SHORTS shorts.
79 LOW and HI are the integer, as two `HOST_WIDE_INT' pieces.
80 SHORTS points to the array of shorts. */
83 encode (shorts
, low
, hi
)
85 HOST_WIDE_INT low
, hi
;
89 for (i
= 0; i
< MAX_SHORTS
/ 2; i
++)
91 shorts
[i
] = (low
>> (i
* 8)) & 0xff;
92 shorts
[i
+ MAX_SHORTS
/ 2] = (hi
>> (i
* 8) & 0xff);
96 /* Pack an array of MAX_SHORTS shorts into a two-word integer.
97 SHORTS points to the array of shorts.
98 The integer is stored into *LOW and *HI as two `HOST_WIDE_INT' pieces. */
101 decode (shorts
, low
, hi
)
103 HOST_WIDE_INT
*low
, *hi
;
106 HOST_WIDE_INT lv
= 0, hv
= 0;
108 for (i
= 0; i
< MAX_SHORTS
/ 2; i
++)
110 lv
|= (HOST_WIDE_INT
) shorts
[i
] << (i
* 8);
111 hv
|= (HOST_WIDE_INT
) shorts
[i
+ MAX_SHORTS
/ 2] << (i
* 8);
117 /* Make the integer constant T valid for its type
118 by setting to 0 or 1 all the bits in the constant
119 that don't belong in the type. */
125 register int prec
= TYPE_PRECISION (TREE_TYPE (t
));
127 if (TREE_CODE (TREE_TYPE (t
)) == POINTER_TYPE
)
130 /* First clear all bits that are beyond the type's precision. */
132 if (prec
== 2 * HOST_BITS_PER_WIDE_INT
)
134 else if (prec
> HOST_BITS_PER_WIDE_INT
)
136 TREE_INT_CST_HIGH (t
)
137 &= ~((HOST_WIDE_INT
) (-1) << (prec
- HOST_BITS_PER_WIDE_INT
));
141 TREE_INT_CST_HIGH (t
) = 0;
142 if (prec
< HOST_BITS_PER_WIDE_INT
)
143 TREE_INT_CST_LOW (t
) &= ~((HOST_WIDE_INT
) (-1) << prec
);
146 /* If it's a signed type and value's sign bit is set, extend the sign. */
148 if (! TREE_UNSIGNED (TREE_TYPE (t
))
149 && prec
!= 2 * HOST_BITS_PER_WIDE_INT
150 && (prec
> HOST_BITS_PER_WIDE_INT
151 ? (TREE_INT_CST_HIGH (t
)
152 & ((HOST_WIDE_INT
) 1 << (prec
- HOST_BITS_PER_WIDE_INT
- 1)))
153 : TREE_INT_CST_LOW (t
) & ((HOST_WIDE_INT
) 1 << (prec
- 1))))
155 /* Value is negative:
156 set to 1 all the bits that are outside this type's precision. */
157 if (prec
> HOST_BITS_PER_WIDE_INT
)
159 TREE_INT_CST_HIGH (t
)
160 |= ((HOST_WIDE_INT
) (-1) << (prec
- HOST_BITS_PER_WIDE_INT
));
164 TREE_INT_CST_HIGH (t
) = -1;
165 if (prec
< HOST_BITS_PER_WIDE_INT
)
166 TREE_INT_CST_LOW (t
) |= ((HOST_WIDE_INT
) (-1) << prec
);
171 /* Add two doubleword integers with doubleword result.
172 Each argument is given as two `HOST_WIDE_INT' pieces.
173 One argument is L1 and H1; the other, L2 and H2.
174 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV.
175 We use the 8-shorts representation internally. */
178 add_double (l1
, h1
, l2
, h2
, lv
, hv
)
179 HOST_WIDE_INT l1
, h1
, l2
, h2
;
180 HOST_WIDE_INT
*lv
, *hv
;
182 short arg1
[MAX_SHORTS
];
183 short arg2
[MAX_SHORTS
];
184 register int carry
= 0;
187 encode (arg1
, l1
, h1
);
188 encode (arg2
, l2
, h2
);
190 for (i
= 0; i
< MAX_SHORTS
; i
++)
192 carry
+= arg1
[i
] + arg2
[i
];
193 arg1
[i
] = carry
& 0xff;
197 decode (arg1
, lv
, hv
);
198 return overflow_sum_sign (h1
, h2
, *hv
);
201 /* Negate a doubleword integer with doubleword result.
202 Return nonzero if the operation overflows, assuming it's signed.
203 The argument is given as two `HOST_WIDE_INT' pieces in L1 and H1.
204 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV.
205 We use the 8-shorts representation internally. */
208 neg_double (l1
, h1
, lv
, hv
)
209 HOST_WIDE_INT l1
, h1
;
210 HOST_WIDE_INT
*lv
, *hv
;
216 return same_sign (h1
, *hv
);
226 /* Multiply two doubleword integers with doubleword result.
227 Return nonzero if the operation overflows, assuming it's signed.
228 Each argument is given as two `HOST_WIDE_INT' pieces.
229 One argument is L1 and H1; the other, L2 and H2.
230 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV.
231 We use the 8-shorts representation internally. */
234 mul_double (l1
, h1
, l2
, h2
, lv
, hv
)
235 HOST_WIDE_INT l1
, h1
, l2
, h2
;
236 HOST_WIDE_INT
*lv
, *hv
;
238 short arg1
[MAX_SHORTS
];
239 short arg2
[MAX_SHORTS
];
240 short prod
[MAX_SHORTS
* 2];
241 register int carry
= 0;
242 register int i
, j
, k
;
243 HOST_WIDE_INT toplow
, tophigh
, neglow
, neghigh
;
245 /* These cases are used extensively, arising from pointer combinations. */
250 int overflow
= left_shift_overflows (h1
, 1);
251 unsigned HOST_WIDE_INT temp
= l1
+ l1
;
252 *hv
= (h1
<< 1) + (temp
< l1
);
258 int overflow
= left_shift_overflows (h1
, 2);
259 unsigned HOST_WIDE_INT temp
= l1
+ l1
;
260 h1
= (h1
<< 2) + ((temp
< l1
) << 1);
270 int overflow
= left_shift_overflows (h1
, 3);
271 unsigned HOST_WIDE_INT temp
= l1
+ l1
;
272 h1
= (h1
<< 3) + ((temp
< l1
) << 2);
275 h1
+= (temp
< l1
) << 1;
285 encode (arg1
, l1
, h1
);
286 encode (arg2
, l2
, h2
);
288 bzero (prod
, sizeof prod
);
290 for (i
= 0; i
< MAX_SHORTS
; i
++)
291 for (j
= 0; j
< MAX_SHORTS
; j
++)
294 carry
= arg1
[i
] * arg2
[j
];
298 prod
[k
] = carry
& 0xff;
304 decode (prod
, lv
, hv
); /* This ignores
305 prod[MAX_SHORTS] -> prod[MAX_SHORTS*2-1] */
307 /* Check for overflow by calculating the top half of the answer in full;
308 it should agree with the low half's sign bit. */
309 decode (prod
+MAX_SHORTS
, &toplow
, &tophigh
);
312 neg_double (l2
, h2
, &neglow
, &neghigh
);
313 add_double (neglow
, neghigh
, toplow
, tophigh
, &toplow
, &tophigh
);
317 neg_double (l1
, h1
, &neglow
, &neghigh
);
318 add_double (neglow
, neghigh
, toplow
, tophigh
, &toplow
, &tophigh
);
320 return (*hv
< 0 ? ~(toplow
& tophigh
) : toplow
| tophigh
) != 0;
323 /* Shift the doubleword integer in L1, H1 left by COUNT places
324 keeping only PREC bits of result.
325 Shift right if COUNT is negative.
326 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
327 Return nonzero if the arithmetic shift overflows, assuming it's signed.
328 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
331 lshift_double (l1
, h1
, count
, prec
, lv
, hv
, arith
)
332 HOST_WIDE_INT l1
, h1
;
334 HOST_WIDE_INT
*lv
, *hv
;
337 short arg1
[MAX_SHORTS
];
339 register int carry
, overflow
;
343 rshift_double (l1
, h1
, - count
, prec
, lv
, hv
, arith
);
347 encode (arg1
, l1
, h1
);
356 for (i
= 0; i
< MAX_SHORTS
; i
++)
358 carry
+= arg1
[i
] << 1;
359 arg1
[i
] = carry
& 0xff;
363 overflow
|= carry
^ (arg1
[7] >> 7);
366 decode (arg1
, lv
, hv
);
370 /* Shift the doubleword integer in L1, H1 right by COUNT places
371 keeping only PREC bits of result. COUNT must be positive.
372 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
373 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
376 rshift_double (l1
, h1
, count
, prec
, lv
, hv
, arith
)
377 HOST_WIDE_INT l1
, h1
, count
, prec
;
378 HOST_WIDE_INT
*lv
, *hv
;
381 short arg1
[MAX_SHORTS
];
385 encode (arg1
, l1
, h1
);
392 carry
= arith
&& arg1
[7] >> 7;
393 for (i
= MAX_SHORTS
- 1; i
>= 0; i
--)
397 arg1
[i
] = (carry
>> 1) & 0xff;
402 decode (arg1
, lv
, hv
);
405 /* Rotate the doubldword integer in L1, H1 left by COUNT places
406 keeping only PREC bits of result.
407 Rotate right if COUNT is negative.
408 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
411 lrotate_double (l1
, h1
, count
, prec
, lv
, hv
)
412 HOST_WIDE_INT l1
, h1
, count
, prec
;
413 HOST_WIDE_INT
*lv
, *hv
;
415 short arg1
[MAX_SHORTS
];
421 rrotate_double (l1
, h1
, - count
, prec
, lv
, hv
);
425 encode (arg1
, l1
, h1
);
430 carry
= arg1
[MAX_SHORTS
- 1] >> 7;
433 for (i
= 0; i
< MAX_SHORTS
; i
++)
435 carry
+= arg1
[i
] << 1;
436 arg1
[i
] = carry
& 0xff;
442 decode (arg1
, lv
, hv
);
445 /* Rotate the doubleword integer in L1, H1 left by COUNT places
446 keeping only PREC bits of result. COUNT must be positive.
447 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
450 rrotate_double (l1
, h1
, count
, prec
, lv
, hv
)
451 HOST_WIDE_INT l1
, h1
, count
, prec
;
452 HOST_WIDE_INT
*lv
, *hv
;
454 short arg1
[MAX_SHORTS
];
458 encode (arg1
, l1
, h1
);
466 for (i
= MAX_SHORTS
- 1; i
>= 0; i
--)
470 arg1
[i
] = (carry
>> 1) & 0xff;
475 decode (arg1
, lv
, hv
);
478 /* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN
479 for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM).
480 CODE is a tree code for a kind of division, one of
481 TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR
483 It controls how the quotient is rounded to a integer.
484 Return nonzero if the operation overflows.
485 UNS nonzero says do unsigned division. */
488 div_and_round_double (code
, uns
,
489 lnum_orig
, hnum_orig
, lden_orig
, hden_orig
,
490 lquo
, hquo
, lrem
, hrem
)
493 HOST_WIDE_INT lnum_orig
, hnum_orig
; /* num == numerator == dividend */
494 HOST_WIDE_INT lden_orig
, hden_orig
; /* den == denominator == divisor */
495 HOST_WIDE_INT
*lquo
, *hquo
, *lrem
, *hrem
;
498 short num
[MAX_SHORTS
+ 1]; /* extra element for scaling. */
499 short den
[MAX_SHORTS
], quo
[MAX_SHORTS
];
500 register int i
, j
, work
;
501 register int carry
= 0;
502 unsigned HOST_WIDE_INT lnum
= lnum_orig
;
503 HOST_WIDE_INT hnum
= hnum_orig
;
504 unsigned HOST_WIDE_INT lden
= lden_orig
;
505 HOST_WIDE_INT hden
= hden_orig
;
508 if ((hden
== 0) && (lden
== 0))
511 /* calculate quotient sign and convert operands to unsigned. */
517 /* (minimum integer) / (-1) is the only overflow case. */
518 if (neg_double (lnum
, hnum
, &lnum
, &hnum
) && (lden
& hden
) == -1)
524 neg_double (lden
, hden
, &lden
, &hden
);
528 if (hnum
== 0 && hden
== 0)
529 { /* single precision */
531 *lquo
= lnum
/ lden
; /* rounds toward zero since positive args */
536 { /* trivial case: dividend < divisor */
537 /* hden != 0 already checked. */
544 bzero (quo
, sizeof quo
);
546 bzero (num
, sizeof num
); /* to zero 9th element */
547 bzero (den
, sizeof den
);
549 encode (num
, lnum
, hnum
);
550 encode (den
, lden
, hden
);
552 /* This code requires more than just hden == 0.
553 We also have to require that we don't need more than three bytes
554 to hold CARRY. If we ever did need four bytes to hold it, we
555 would lose part of it when computing WORK on the next round. */
556 if (hden
== 0 && ((lden
<< 8) >> 8) == lden
)
557 { /* simpler algorithm */
558 /* hnum != 0 already checked. */
559 for (i
= MAX_SHORTS
- 1; i
>= 0; i
--)
561 work
= num
[i
] + (carry
<< 8);
562 quo
[i
] = work
/ lden
;
566 else { /* full double precision,
567 with thanks to Don Knuth's
568 "Seminumerical Algorithms". */
570 int quo_est
, scale
, num_hi_sig
, den_hi_sig
, quo_hi_sig
;
572 /* Find the highest non-zero divisor digit. */
573 for (i
= MAX_SHORTS
- 1; ; i
--)
578 for (i
= MAX_SHORTS
- 1; ; i
--)
583 quo_hi_sig
= num_hi_sig
- den_hi_sig
+ 1;
585 /* Insure that the first digit of the divisor is at least BASE/2.
586 This is required by the quotient digit estimation algorithm. */
588 scale
= BASE
/ (den
[den_hi_sig
] + 1);
589 if (scale
> 1) { /* scale divisor and dividend */
591 for (i
= 0; i
<= MAX_SHORTS
- 1; i
++) {
592 work
= (num
[i
] * scale
) + carry
;
593 num
[i
] = work
& 0xff;
595 if (num
[i
] != 0) num_hi_sig
= i
;
598 for (i
= 0; i
<= MAX_SHORTS
- 1; i
++) {
599 work
= (den
[i
] * scale
) + carry
;
600 den
[i
] = work
& 0xff;
602 if (den
[i
] != 0) den_hi_sig
= i
;
607 for (i
= quo_hi_sig
; i
> 0; i
--) {
608 /* guess the next quotient digit, quo_est, by dividing the first
609 two remaining dividend digits by the high order quotient digit.
610 quo_est is never low and is at most 2 high. */
612 int num_hi
; /* index of highest remaining dividend digit */
614 num_hi
= i
+ den_hi_sig
;
616 work
= (num
[num_hi
] * BASE
) + (num_hi
> 0 ? num
[num_hi
- 1] : 0);
617 if (num
[num_hi
] != den
[den_hi_sig
]) {
618 quo_est
= work
/ den
[den_hi_sig
];
624 /* refine quo_est so it's usually correct, and at most one high. */
625 while ((den
[den_hi_sig
- 1] * quo_est
)
626 > (((work
- (quo_est
* den
[den_hi_sig
])) * BASE
)
627 + ((num_hi
- 1) > 0 ? num
[num_hi
- 2] : 0)))
630 /* Try QUO_EST as the quotient digit, by multiplying the
631 divisor by QUO_EST and subtracting from the remaining dividend.
632 Keep in mind that QUO_EST is the I - 1st digit. */
636 for (j
= 0; j
<= den_hi_sig
; j
++)
640 work
= num
[i
+ j
- 1] - (quo_est
* den
[j
]) + carry
;
648 num
[i
+ j
- 1] = digit
;
651 /* if quo_est was high by one, then num[i] went negative and
652 we need to correct things. */
657 carry
= 0; /* add divisor back in */
658 for (j
= 0; j
<= den_hi_sig
; j
++)
660 work
= num
[i
+ j
- 1] + den
[j
] + carry
;
670 num
[i
+ j
- 1] = work
;
672 num
[num_hi
] += carry
;
675 /* store the quotient digit. */
676 quo
[i
- 1] = quo_est
;
680 decode (quo
, lquo
, hquo
);
683 /* if result is negative, make it so. */
685 neg_double (*lquo
, *hquo
, lquo
, hquo
);
687 /* compute trial remainder: rem = num - (quo * den) */
688 mul_double (*lquo
, *hquo
, lden_orig
, hden_orig
, lrem
, hrem
);
689 neg_double (*lrem
, *hrem
, lrem
, hrem
);
690 add_double (lnum_orig
, hnum_orig
, *lrem
, *hrem
, lrem
, hrem
);
695 case TRUNC_MOD_EXPR
: /* round toward zero */
696 case EXACT_DIV_EXPR
: /* for this one, it shouldn't matter */
700 case FLOOR_MOD_EXPR
: /* round toward negative infinity */
701 if (quo_neg
&& (*lrem
!= 0 || *hrem
!= 0)) /* ratio < 0 && rem != 0 */
704 add_double (*lquo
, *hquo
, (HOST_WIDE_INT
) -1, (HOST_WIDE_INT
) -1,
707 else return overflow
;
711 case CEIL_MOD_EXPR
: /* round toward positive infinity */
712 if (!quo_neg
&& (*lrem
!= 0 || *hrem
!= 0)) /* ratio > 0 && rem != 0 */
714 add_double (*lquo
, *hquo
, (HOST_WIDE_INT
) 1, (HOST_WIDE_INT
) 0,
717 else return overflow
;
721 case ROUND_MOD_EXPR
: /* round to closest integer */
723 HOST_WIDE_INT labs_rem
= *lrem
, habs_rem
= *hrem
;
724 HOST_WIDE_INT labs_den
= lden
, habs_den
= hden
, ltwice
, htwice
;
726 /* get absolute values */
727 if (*hrem
< 0) neg_double (*lrem
, *hrem
, &labs_rem
, &habs_rem
);
728 if (hden
< 0) neg_double (lden
, hden
, &labs_den
, &habs_den
);
730 /* if (2 * abs (lrem) >= abs (lden)) */
731 mul_double ((HOST_WIDE_INT
) 2, (HOST_WIDE_INT
) 0,
732 labs_rem
, habs_rem
, <wice
, &htwice
);
733 if (((unsigned HOST_WIDE_INT
) habs_den
734 < (unsigned HOST_WIDE_INT
) htwice
)
735 || (((unsigned HOST_WIDE_INT
) habs_den
736 == (unsigned HOST_WIDE_INT
) htwice
)
737 && ((HOST_WIDE_INT
unsigned) labs_den
738 < (unsigned HOST_WIDE_INT
) ltwice
)))
742 add_double (*lquo
, *hquo
,
743 (HOST_WIDE_INT
) -1, (HOST_WIDE_INT
) -1, lquo
, hquo
);
746 add_double (*lquo
, *hquo
, (HOST_WIDE_INT
) 1, (HOST_WIDE_INT
) 0,
749 else return overflow
;
757 /* compute true remainder: rem = num - (quo * den) */
758 mul_double (*lquo
, *hquo
, lden_orig
, hden_orig
, lrem
, hrem
);
759 neg_double (*lrem
, *hrem
, lrem
, hrem
);
760 add_double (lnum_orig
, hnum_orig
, *lrem
, *hrem
, lrem
, hrem
);
764 /* Effectively truncate a real value to represent
765 the nearest possible value in a narrower mode.
766 The result is actually represented in the same data type as the argument,
767 but its value is usually different. */
770 real_value_truncate (mode
, arg
)
771 enum machine_mode mode
;
775 /* Make sure the value is actually stored in memory before we turn off
779 REAL_VALUE_TYPE value
;
780 jmp_buf handler
, old_handler
;
783 if (setjmp (handler
))
785 error ("floating overflow");
788 handled
= push_float_handler (handler
, old_handler
);
789 value
= REAL_VALUE_TRUNCATE (mode
, arg
);
790 pop_float_handler (handled
, old_handler
);
794 #if TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
796 /* Check for infinity in an IEEE double precision number. */
802 /* The IEEE 64-bit double format. */
807 unsigned exponent
: 11;
808 unsigned mantissa1
: 20;
813 unsigned mantissa1
: 20;
814 unsigned exponent
: 11;
820 if (u
.big_endian
.sign
== 1)
823 return (u
.big_endian
.exponent
== 2047
824 && u
.big_endian
.mantissa1
== 0
825 && u
.big_endian
.mantissa2
== 0);
830 return (u
.little_endian
.exponent
== 2047
831 && u
.little_endian
.mantissa1
== 0
832 && u
.little_endian
.mantissa2
== 0);
836 /* Check whether an IEEE double precision number is a NaN. */
842 /* The IEEE 64-bit double format. */
847 unsigned exponent
: 11;
848 unsigned mantissa1
: 20;
853 unsigned mantissa1
: 20;
854 unsigned exponent
: 11;
860 if (u
.big_endian
.sign
== 1)
863 return (u
.big_endian
.exponent
== 2047
864 && (u
.big_endian
.mantissa1
!= 0
865 || u
.big_endian
.mantissa2
!= 0));
870 return (u
.little_endian
.exponent
== 2047
871 && (u
.little_endian
.mantissa1
!= 0
872 || u
.little_endian
.mantissa2
!= 0));
876 /* Check for a negative IEEE double precision number. */
882 /* The IEEE 64-bit double format. */
887 unsigned exponent
: 11;
888 unsigned mantissa1
: 20;
893 unsigned mantissa1
: 20;
894 unsigned exponent
: 11;
900 if (u
.big_endian
.sign
== 1)
903 return u
.big_endian
.sign
;
908 return u
.little_endian
.sign
;
911 #else /* Target not IEEE */
913 /* Let's assume other float formats don't have infinity.
914 (This can be overridden by redefining REAL_VALUE_ISINF.) */
922 /* Let's assume other float formats don't have NaNs.
923 (This can be overridden by redefining REAL_VALUE_ISNAN.) */
931 /* Let's assume other float formats don't have minus zero.
932 (This can be overridden by redefining REAL_VALUE_NEGATIVE.) */
939 #endif /* Target not IEEE */
941 /* Split a tree IN into a constant and a variable part
942 that could be combined with CODE to make IN.
943 CODE must be a commutative arithmetic operation.
944 Store the constant part into *CONP and the variable in &VARP.
945 Return 1 if this was done; zero means the tree IN did not decompose
948 If CODE is PLUS_EXPR we also split trees that use MINUS_EXPR.
949 Therefore, we must tell the caller whether the variable part
950 was subtracted. We do this by storing 1 or -1 into *VARSIGNP.
951 The value stored is the coefficient for the variable term.
952 The constant term we return should always be added;
953 we negate it if necessary. */
956 split_tree (in
, code
, varp
, conp
, varsignp
)
962 register tree outtype
= TREE_TYPE (in
);
966 /* Strip any conversions that don't change the machine mode. */
967 while ((TREE_CODE (in
) == NOP_EXPR
968 || TREE_CODE (in
) == CONVERT_EXPR
)
969 && (TYPE_MODE (TREE_TYPE (in
))
970 == TYPE_MODE (TREE_TYPE (TREE_OPERAND (in
, 0)))))
971 in
= TREE_OPERAND (in
, 0);
973 if (TREE_CODE (in
) == code
974 || (TREE_CODE (TREE_TYPE (in
)) != REAL_TYPE
975 /* We can associate addition and subtraction together
976 (even though the C standard doesn't say so)
977 for integers because the value is not affected.
978 For reals, the value might be affected, so we can't. */
980 ((code
== PLUS_EXPR
&& TREE_CODE (in
) == MINUS_EXPR
)
981 || (code
== MINUS_EXPR
&& TREE_CODE (in
) == PLUS_EXPR
))))
983 enum tree_code code
= TREE_CODE (TREE_OPERAND (in
, 0));
984 if (code
== INTEGER_CST
)
986 *conp
= TREE_OPERAND (in
, 0);
987 *varp
= TREE_OPERAND (in
, 1);
988 if (TYPE_MODE (TREE_TYPE (*varp
)) != TYPE_MODE (outtype
)
989 && TREE_TYPE (*varp
) != outtype
)
990 *varp
= convert (outtype
, *varp
);
991 *varsignp
= (TREE_CODE (in
) == MINUS_EXPR
) ? -1 : 1;
994 if (TREE_CONSTANT (TREE_OPERAND (in
, 1)))
996 *conp
= TREE_OPERAND (in
, 1);
997 *varp
= TREE_OPERAND (in
, 0);
999 if (TYPE_MODE (TREE_TYPE (*varp
)) != TYPE_MODE (outtype
)
1000 && TREE_TYPE (*varp
) != outtype
)
1001 *varp
= convert (outtype
, *varp
);
1002 if (TREE_CODE (in
) == MINUS_EXPR
)
1004 /* If operation is subtraction and constant is second,
1005 must negate it to get an additive constant.
1006 And this cannot be done unless it is a manifest constant.
1007 It could also be the address of a static variable.
1008 We cannot negate that, so give up. */
1009 if (TREE_CODE (*conp
) == INTEGER_CST
)
1010 /* Subtracting from integer_zero_node loses for long long. */
1011 *conp
= fold (build1 (NEGATE_EXPR
, TREE_TYPE (*conp
), *conp
));
1017 if (TREE_CONSTANT (TREE_OPERAND (in
, 0)))
1019 *conp
= TREE_OPERAND (in
, 0);
1020 *varp
= TREE_OPERAND (in
, 1);
1021 if (TYPE_MODE (TREE_TYPE (*varp
)) != TYPE_MODE (outtype
)
1022 && TREE_TYPE (*varp
) != outtype
)
1023 *varp
= convert (outtype
, *varp
);
1024 *varsignp
= (TREE_CODE (in
) == MINUS_EXPR
) ? -1 : 1;
1031 /* Combine two constants NUM and ARG2 under operation CODE
1032 to produce a new constant.
1033 We assume ARG1 and ARG2 have the same data type,
1034 or at least are the same kind of constant and the same machine mode.
1036 If NOTRUNC is nonzero, do not truncate the result to fit the data type. */
1039 const_binop (code
, arg1
, arg2
, notrunc
)
1040 enum tree_code code
;
1041 register tree arg1
, arg2
;
1044 if (TREE_CODE (arg1
) == INTEGER_CST
)
1046 register HOST_WIDE_INT int1l
= TREE_INT_CST_LOW (arg1
);
1047 register HOST_WIDE_INT int1h
= TREE_INT_CST_HIGH (arg1
);
1048 HOST_WIDE_INT int2l
= TREE_INT_CST_LOW (arg2
);
1049 HOST_WIDE_INT int2h
= TREE_INT_CST_HIGH (arg2
);
1050 HOST_WIDE_INT low
, hi
;
1051 HOST_WIDE_INT garbagel
, garbageh
;
1053 int uns
= TREE_UNSIGNED (TREE_TYPE (arg1
));
1054 /* Propagate overflow flags from operands; also record new overflow. */
1056 = TREE_CONSTANT_OVERFLOW (arg1
) | TREE_CONSTANT_OVERFLOW (arg2
);
1061 t
= build_int_2 (int1l
| int2l
, int1h
| int2h
);
1065 t
= build_int_2 (int1l
^ int2l
, int1h
^ int2h
);
1069 t
= build_int_2 (int1l
& int2l
, int1h
& int2h
);
1072 case BIT_ANDTC_EXPR
:
1073 t
= build_int_2 (int1l
& ~int2l
, int1h
& ~int2h
);
1079 overflow
= lshift_double (int1l
, int1h
, int2l
,
1080 TYPE_PRECISION (TREE_TYPE (arg1
)),
1083 t
= build_int_2 (low
, hi
);
1089 lrotate_double (int1l
, int1h
, int2l
,
1090 TYPE_PRECISION (TREE_TYPE (arg1
)),
1092 t
= build_int_2 (low
, hi
);
1099 if ((unsigned HOST_WIDE_INT
) int2l
< int1l
)
1102 overflow
= ! same_sign (hi
, int2h
);
1104 t
= build_int_2 (int2l
, int2h
);
1110 if ((unsigned HOST_WIDE_INT
) int1l
< int2l
)
1113 overflow
= ! same_sign (hi
, int1h
);
1115 t
= build_int_2 (int1l
, int1h
);
1118 overflow
= add_double (int1l
, int1h
, int2l
, int2h
, &low
, &hi
);
1119 t
= build_int_2 (low
, hi
);
1123 if (int2h
== 0 && int2l
== 0)
1125 t
= build_int_2 (int1l
, int1h
);
1128 neg_double (int2l
, int2h
, &low
, &hi
);
1129 add_double (int1l
, int1h
, low
, hi
, &low
, &hi
);
1130 overflow
= overflow_sum_sign (hi
, int2h
, int1h
);
1131 t
= build_int_2 (low
, hi
);
1135 /* Optimize simple cases. */
1138 unsigned HOST_WIDE_INT temp
;
1143 t
= build_int_2 (0, 0);
1146 t
= build_int_2 (int2l
, int2h
);
1149 overflow
= left_shift_overflows (int2h
, 1);
1150 temp
= int2l
+ int2l
;
1151 int2h
= (int2h
<< 1) + (temp
< int2l
);
1152 t
= build_int_2 (temp
, int2h
);
1154 #if 0 /* This code can lose carries. */
1156 temp
= int2l
+ int2l
+ int2l
;
1157 int2h
= int2h
* 3 + (temp
< int2l
);
1158 t
= build_int_2 (temp
, int2h
);
1162 overflow
= left_shift_overflows (int2h
, 2);
1163 temp
= int2l
+ int2l
;
1164 int2h
= (int2h
<< 2) + ((temp
< int2l
) << 1);
1167 int2h
+= (temp
< int2l
);
1168 t
= build_int_2 (temp
, int2h
);
1171 overflow
= left_shift_overflows (int2h
, 3);
1172 temp
= int2l
+ int2l
;
1173 int2h
= (int2h
<< 3) + ((temp
< int2l
) << 2);
1176 int2h
+= (temp
< int2l
) << 1;
1179 int2h
+= (temp
< int2l
);
1180 t
= build_int_2 (temp
, int2h
);
1191 t
= build_int_2 (0, 0);
1196 t
= build_int_2 (int1l
, int1h
);
1201 overflow
= mul_double (int1l
, int1h
, int2l
, int2h
, &low
, &hi
);
1202 t
= build_int_2 (low
, hi
);
1205 case TRUNC_DIV_EXPR
:
1206 case FLOOR_DIV_EXPR
: case CEIL_DIV_EXPR
:
1207 case EXACT_DIV_EXPR
:
1208 /* This is a shortcut for a common special case.
1209 It reduces the number of tree nodes generated
1211 if (int2h
== 0 && int2l
> 0
1212 && TREE_TYPE (arg1
) == sizetype
1213 && int1h
== 0 && int1l
>= 0)
1215 if (code
== CEIL_DIV_EXPR
)
1217 return size_int (int1l
/ int2l
);
1219 case ROUND_DIV_EXPR
:
1220 if (int2h
== 0 && int2l
== 1)
1222 t
= build_int_2 (int1l
, int1h
);
1225 if (int1l
== int2l
&& int1h
== int2h
)
1227 if ((int1l
| int1h
) == 0)
1229 t
= build_int_2 (1, 0);
1232 overflow
= div_and_round_double (code
, uns
,
1233 int1l
, int1h
, int2l
, int2h
,
1234 &low
, &hi
, &garbagel
, &garbageh
);
1235 t
= build_int_2 (low
, hi
);
1238 case TRUNC_MOD_EXPR
: case ROUND_MOD_EXPR
:
1239 case FLOOR_MOD_EXPR
: case CEIL_MOD_EXPR
:
1240 overflow
= div_and_round_double (code
, uns
,
1241 int1l
, int1h
, int2l
, int2h
,
1242 &garbagel
, &garbageh
, &low
, &hi
);
1243 t
= build_int_2 (low
, hi
);
1250 low
= (((unsigned HOST_WIDE_INT
) int1h
1251 < (unsigned HOST_WIDE_INT
) int2h
)
1252 || (((unsigned HOST_WIDE_INT
) int1h
1253 == (unsigned HOST_WIDE_INT
) int2h
)
1254 && ((unsigned HOST_WIDE_INT
) int1l
1255 < (unsigned HOST_WIDE_INT
) int2l
)));
1259 low
= ((int1h
< int2h
)
1260 || ((int1h
== int2h
)
1261 && ((unsigned HOST_WIDE_INT
) int1l
1262 < (unsigned HOST_WIDE_INT
) int2l
)));
1264 if (low
== (code
== MIN_EXPR
))
1265 t
= build_int_2 (int1l
, int1h
);
1267 t
= build_int_2 (int2l
, int2h
);
1274 TREE_TYPE (t
) = TREE_TYPE (arg1
);
1277 TREE_CONSTANT_OVERFLOW (t
) = overflow
;
1280 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1281 if (TREE_CODE (arg1
) == REAL_CST
)
1283 register REAL_VALUE_TYPE d1
;
1284 register REAL_VALUE_TYPE d2
;
1285 register REAL_VALUE_TYPE value
;
1288 d1
= TREE_REAL_CST (arg1
);
1289 d2
= TREE_REAL_CST (arg2
);
1290 if (setjmp (float_error
))
1292 pedwarn ("floating overflow in constant expression");
1293 return build (code
, TREE_TYPE (arg1
), arg1
, arg2
);
1295 set_float_handler (float_error
);
1297 #ifdef REAL_ARITHMETIC
1298 REAL_ARITHMETIC (value
, code
, d1
, d2
);
1315 #ifndef REAL_INFINITY
1324 value
= MIN (d1
, d2
);
1328 value
= MAX (d1
, d2
);
1334 #endif /* no REAL_ARITHMETIC */
1335 t
= build_real (TREE_TYPE (arg1
),
1336 real_value_truncate (TYPE_MODE (TREE_TYPE (arg1
)), value
));
1337 set_float_handler (NULL_PTR
);
1340 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1341 if (TREE_CODE (arg1
) == COMPLEX_CST
)
1343 register tree r1
= TREE_REALPART (arg1
);
1344 register tree i1
= TREE_IMAGPART (arg1
);
1345 register tree r2
= TREE_REALPART (arg2
);
1346 register tree i2
= TREE_IMAGPART (arg2
);
1352 t
= build_complex (const_binop (PLUS_EXPR
, r1
, r2
, notrunc
),
1353 const_binop (PLUS_EXPR
, i1
, i2
, notrunc
));
1357 t
= build_complex (const_binop (MINUS_EXPR
, r1
, r2
, notrunc
),
1358 const_binop (MINUS_EXPR
, i1
, i2
, notrunc
));
1362 t
= build_complex (const_binop (MINUS_EXPR
,
1363 const_binop (MULT_EXPR
,
1365 const_binop (MULT_EXPR
,
1368 const_binop (PLUS_EXPR
,
1369 const_binop (MULT_EXPR
,
1371 const_binop (MULT_EXPR
,
1378 register tree magsquared
1379 = const_binop (PLUS_EXPR
,
1380 const_binop (MULT_EXPR
, r2
, r2
, notrunc
),
1381 const_binop (MULT_EXPR
, i2
, i2
, notrunc
),
1383 t
= build_complex (const_binop (RDIV_EXPR
,
1384 const_binop (PLUS_EXPR
,
1385 const_binop (MULT_EXPR
, r1
, r2
, notrunc
),
1386 const_binop (MULT_EXPR
, i1
, i2
, notrunc
),
1388 magsquared
, notrunc
),
1389 const_binop (RDIV_EXPR
,
1390 const_binop (MINUS_EXPR
,
1391 const_binop (MULT_EXPR
, i1
, r2
, notrunc
),
1392 const_binop (MULT_EXPR
, r1
, i2
, notrunc
),
1394 magsquared
, notrunc
));
1401 TREE_TYPE (t
) = TREE_TYPE (arg1
);
1407 /* Return an INTEGER_CST with value V and type from `sizetype'. */
1411 unsigned int number
;
1414 /* Type-size nodes already made for small sizes. */
1415 static tree size_table
[2*HOST_BITS_PER_WIDE_INT
+ 1];
1417 if (number
>= 0 && number
< 2*HOST_BITS_PER_WIDE_INT
+ 1
1418 && size_table
[number
] != 0)
1419 return size_table
[number
];
1420 if (number
>= 0 && number
< 2*HOST_BITS_PER_WIDE_INT
+ 1)
1422 push_obstacks_nochange ();
1423 /* Make this a permanent node. */
1424 end_temporary_allocation ();
1425 t
= build_int_2 (number
, 0);
1426 TREE_TYPE (t
) = sizetype
;
1427 size_table
[number
] = t
;
1432 t
= build_int_2 (number
, 0);
1433 TREE_TYPE (t
) = sizetype
;
1438 /* Combine operands OP1 and OP2 with arithmetic operation CODE.
1439 CODE is a tree code. Data type is taken from `sizetype',
1440 If the operands are constant, so is the result. */
1443 size_binop (code
, arg0
, arg1
)
1444 enum tree_code code
;
1447 /* Handle the special case of two integer constants faster. */
1448 if (TREE_CODE (arg0
) == INTEGER_CST
&& TREE_CODE (arg1
) == INTEGER_CST
)
1450 /* And some specific cases even faster than that. */
1451 if (code
== PLUS_EXPR
1452 && TREE_INT_CST_LOW (arg0
) == 0
1453 && TREE_INT_CST_HIGH (arg0
) == 0)
1455 if (code
== MINUS_EXPR
1456 && TREE_INT_CST_LOW (arg1
) == 0
1457 && TREE_INT_CST_HIGH (arg1
) == 0)
1459 if (code
== MULT_EXPR
1460 && TREE_INT_CST_LOW (arg0
) == 1
1461 && TREE_INT_CST_HIGH (arg0
) == 0)
1463 /* Handle general case of two integer constants. */
1464 return const_binop (code
, arg0
, arg1
, 1);
1467 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
1468 return error_mark_node
;
1470 return fold (build (code
, sizetype
, arg0
, arg1
));
1473 /* Given T, a tree representing type conversion of ARG1, a constant,
1474 return a constant tree representing the result of conversion. */
1477 fold_convert (t
, arg1
)
1481 register tree type
= TREE_TYPE (t
);
1483 if (TREE_CODE (type
) == POINTER_TYPE
1484 || TREE_CODE (type
) == INTEGER_TYPE
1485 || TREE_CODE (type
) == ENUMERAL_TYPE
)
1487 if (TREE_CODE (arg1
) == INTEGER_CST
)
1489 /* Given an integer constant, make new constant with new type,
1490 appropriately sign-extended or truncated. */
1491 t
= build_int_2 (TREE_INT_CST_LOW (arg1
),
1492 TREE_INT_CST_HIGH (arg1
));
1493 /* Carry forward overflow indication unless truncating. */
1494 if (TYPE_PRECISION (type
) >= TYPE_PRECISION (TREE_TYPE (t
)))
1495 TREE_CONSTANT_OVERFLOW (t
) = TREE_CONSTANT_OVERFLOW (arg1
);
1496 TREE_TYPE (t
) = type
;
1499 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1500 else if (TREE_CODE (arg1
) == REAL_CST
)
1503 l
= real_value_from_int_cst (TYPE_MIN_VALUE (type
)),
1504 x
= TREE_REAL_CST (arg1
),
1505 u
= real_value_from_int_cst (TYPE_MAX_VALUE (type
));
1506 /* See if X will be in range after truncation towards 0.
1507 To compensate for truncation, move the bounds away from 0,
1508 but reject if X exactly equals the adjusted bounds. */
1509 #ifdef REAL_ARITHMETIC
1510 REAL_ARITHMETIC (l
, MINUS_EXPR
, l
, dconst1
);
1511 REAL_ARITHMETIC (u
, PLUS_EXPR
, u
, dconst1
);
1516 if (! (REAL_VALUES_LESS (l
, x
) && REAL_VALUES_LESS (x
, u
)))
1518 pedwarn ("real constant out of range for integer conversion");
1521 #ifndef REAL_ARITHMETIC
1524 HOST_WIDE_INT low
, high
;
1525 HOST_WIDE_INT half_word
1526 = (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2);
1528 d
= TREE_REAL_CST (arg1
);
1532 high
= (HOST_WIDE_INT
) (d
/ half_word
/ half_word
);
1533 d
-= (REAL_VALUE_TYPE
) high
* half_word
* half_word
;
1534 if (d
>= (REAL_VALUE_TYPE
) half_word
* half_word
/ 2)
1536 low
= d
- (REAL_VALUE_TYPE
) half_word
* half_word
/ 2;
1537 low
|= (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1);
1540 low
= (HOST_WIDE_INT
) d
;
1541 if (TREE_REAL_CST (arg1
) < 0)
1542 neg_double (low
, high
, &low
, &high
);
1543 t
= build_int_2 (low
, high
);
1547 HOST_WIDE_INT low
, high
;
1548 REAL_VALUE_TO_INT (low
, high
, TREE_REAL_CST (arg1
));
1549 t
= build_int_2 (low
, high
);
1552 TREE_TYPE (t
) = type
;
1555 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1556 TREE_TYPE (t
) = type
;
1558 else if (TREE_CODE (type
) == REAL_TYPE
)
1560 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1561 if (TREE_CODE (arg1
) == INTEGER_CST
)
1562 return build_real_from_int_cst (type
, arg1
);
1563 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1564 if (TREE_CODE (arg1
) == REAL_CST
)
1566 if (setjmp (float_error
))
1568 pedwarn ("floating overflow in constant expression");
1571 set_float_handler (float_error
);
1573 t
= build_real (type
, real_value_truncate (TYPE_MODE (type
),
1574 TREE_REAL_CST (arg1
)));
1575 set_float_handler (NULL_PTR
);
1579 TREE_CONSTANT (t
) = 1;
1583 /* Return an expr equal to X but certainly not valid as an lvalue. */
1591 /* These things are certainly not lvalues. */
1592 if (TREE_CODE (x
) == NON_LVALUE_EXPR
1593 || TREE_CODE (x
) == INTEGER_CST
1594 || TREE_CODE (x
) == REAL_CST
1595 || TREE_CODE (x
) == STRING_CST
1596 || TREE_CODE (x
) == ADDR_EXPR
)
1599 result
= build1 (NON_LVALUE_EXPR
, TREE_TYPE (x
), x
);
1600 TREE_CONSTANT (result
) = TREE_CONSTANT (x
);
1604 /* Given a tree comparison code, return the code that is the logical inverse
1605 of the given code. It is not safe to do this for floating-point
1606 comparisons, except for NE_EXPR and EQ_EXPR. */
1608 static enum tree_code
1609 invert_tree_comparison (code
)
1610 enum tree_code code
;
1631 /* Similar, but return the comparison that results if the operands are
1632 swapped. This is safe for floating-point. */
1634 static enum tree_code
1635 swap_tree_comparison (code
)
1636 enum tree_code code
;
1656 /* Return nonzero if two operands are necessarily equal.
1657 If ONLY_CONST is non-zero, only return non-zero for constants.
1658 This function tests whether the operands are indistinguishable;
1659 it does not test whether they are equal using C's == operation.
1660 The distinction is important for IEEE floating point, because
1661 (1) -0.0 and 0.0 are distinguishable, but -0.0==0.0, and
1662 (2) two NaNs may be indistinguishable, but NaN!=NaN. */
1665 operand_equal_p (arg0
, arg1
, only_const
)
1669 /* If both types don't have the same signedness, then we can't consider
1670 them equal. We must check this before the STRIP_NOPS calls
1671 because they may change the signedness of the arguments. */
1672 if (TREE_UNSIGNED (TREE_TYPE (arg0
)) != TREE_UNSIGNED (TREE_TYPE (arg1
)))
1678 /* If ARG0 and ARG1 are the same SAVE_EXPR, they are necessarily equal.
1679 We don't care about side effects in that case because the SAVE_EXPR
1680 takes care of that for us. */
1681 if (TREE_CODE (arg0
) == SAVE_EXPR
&& arg0
== arg1
)
1682 return ! only_const
;
1684 if (TREE_SIDE_EFFECTS (arg0
) || TREE_SIDE_EFFECTS (arg1
))
1687 if (TREE_CODE (arg0
) == TREE_CODE (arg1
)
1688 && TREE_CODE (arg0
) == ADDR_EXPR
1689 && TREE_OPERAND (arg0
, 0) == TREE_OPERAND (arg1
, 0))
1692 if (TREE_CODE (arg0
) == TREE_CODE (arg1
)
1693 && TREE_CODE (arg0
) == INTEGER_CST
1694 && TREE_INT_CST_LOW (arg0
) == TREE_INT_CST_LOW (arg1
)
1695 && TREE_INT_CST_HIGH (arg0
) == TREE_INT_CST_HIGH (arg1
))
1698 /* Detect when real constants are equal. */
1699 if (TREE_CODE (arg0
) == TREE_CODE (arg1
)
1700 && TREE_CODE (arg0
) == REAL_CST
)
1701 return !bcmp (&TREE_REAL_CST (arg0
), &TREE_REAL_CST (arg1
),
1702 sizeof (REAL_VALUE_TYPE
));
1710 if (TREE_CODE (arg0
) != TREE_CODE (arg1
))
1712 /* This is needed for conversions and for COMPONENT_REF.
1713 Might as well play it safe and always test this. */
1714 if (TYPE_MODE (TREE_TYPE (arg0
)) != TYPE_MODE (TREE_TYPE (arg1
)))
1717 switch (TREE_CODE_CLASS (TREE_CODE (arg0
)))
1720 /* Two conversions are equal only if signedness and modes match. */
1721 if ((TREE_CODE (arg0
) == NOP_EXPR
|| TREE_CODE (arg0
) == CONVERT_EXPR
)
1722 && (TREE_UNSIGNED (TREE_TYPE (arg0
))
1723 != TREE_UNSIGNED (TREE_TYPE (arg1
))))
1726 return operand_equal_p (TREE_OPERAND (arg0
, 0),
1727 TREE_OPERAND (arg1
, 0), 0);
1731 return (operand_equal_p (TREE_OPERAND (arg0
, 0),
1732 TREE_OPERAND (arg1
, 0), 0)
1733 && operand_equal_p (TREE_OPERAND (arg0
, 1),
1734 TREE_OPERAND (arg1
, 1), 0));
1737 switch (TREE_CODE (arg0
))
1740 return operand_equal_p (TREE_OPERAND (arg0
, 0),
1741 TREE_OPERAND (arg1
, 0), 0);
1745 return (operand_equal_p (TREE_OPERAND (arg0
, 0),
1746 TREE_OPERAND (arg1
, 0), 0)
1747 && operand_equal_p (TREE_OPERAND (arg0
, 1),
1748 TREE_OPERAND (arg1
, 1), 0));
1751 return (operand_equal_p (TREE_OPERAND (arg0
, 0),
1752 TREE_OPERAND (arg1
, 0), 0)
1753 && operand_equal_p (TREE_OPERAND (arg0
, 1),
1754 TREE_OPERAND (arg1
, 1), 0)
1755 && operand_equal_p (TREE_OPERAND (arg0
, 2),
1756 TREE_OPERAND (arg1
, 2), 0));
1764 /* Similar to operand_equal_p, but see if ARG0 might have been made by
1765 shorten_compare from ARG1 when ARG1 was being compared with OTHER.
1767 When in doubt, return 0. */
1770 operand_equal_for_comparison_p (arg0
, arg1
, other
)
1774 int unsignedp1
, unsignedpo
;
1775 tree primarg1
, primother
;
1778 if (operand_equal_p (arg0
, arg1
, 0))
1781 if (TREE_CODE (TREE_TYPE (arg0
)) != INTEGER_TYPE
)
1784 /* Duplicate what shorten_compare does to ARG1 and see if that gives the
1785 actual comparison operand, ARG0.
1787 First throw away any conversions to wider types
1788 already present in the operands. */
1790 primarg1
= get_narrower (arg1
, &unsignedp1
);
1791 primother
= get_narrower (other
, &unsignedpo
);
1793 correct_width
= TYPE_PRECISION (TREE_TYPE (arg1
));
1794 if (unsignedp1
== unsignedpo
1795 && TYPE_PRECISION (TREE_TYPE (primarg1
)) < correct_width
1796 && TYPE_PRECISION (TREE_TYPE (primother
)) < correct_width
)
1798 tree type
= TREE_TYPE (arg0
);
1800 /* Make sure shorter operand is extended the right way
1801 to match the longer operand. */
1802 primarg1
= convert (signed_or_unsigned_type (unsignedp1
,
1803 TREE_TYPE (primarg1
)),
1806 if (operand_equal_p (arg0
, convert (type
, primarg1
), 0))
1813 /* See if ARG is an expression that is either a comparison or is performing
1814 arithmetic on comparisons. The comparisons must only be comparing
1815 two different values, which will be stored in *CVAL1 and *CVAL2; if
1816 they are non-zero it means that some operands have already been found.
1817 No variables may be used anywhere else in the expression except in the
1820 If this is true, return 1. Otherwise, return zero. */
1823 twoval_comparison_p (arg
, cval1
, cval2
)
1825 tree
*cval1
, *cval2
;
1827 enum tree_code code
= TREE_CODE (arg
);
1828 char class = TREE_CODE_CLASS (code
);
1830 /* We can handle some of the 'e' cases here. */
1832 && (code
== TRUTH_NOT_EXPR
1833 || (code
== SAVE_EXPR
&& SAVE_EXPR_RTL (arg
) == 0)))
1835 else if (class == 'e'
1836 && (code
== TRUTH_ANDIF_EXPR
|| code
== TRUTH_ORIF_EXPR
1837 || code
== COMPOUND_EXPR
))
1843 return twoval_comparison_p (TREE_OPERAND (arg
, 0), cval1
, cval2
);
1846 return (twoval_comparison_p (TREE_OPERAND (arg
, 0), cval1
, cval2
)
1847 && twoval_comparison_p (TREE_OPERAND (arg
, 1), cval1
, cval2
));
1853 if (code
== COND_EXPR
)
1854 return (twoval_comparison_p (TREE_OPERAND (arg
, 0), cval1
, cval2
)
1855 && twoval_comparison_p (TREE_OPERAND (arg
, 1), cval1
, cval2
)
1856 && twoval_comparison_p (TREE_OPERAND (arg
, 2),
1861 /* First see if we can handle the first operand, then the second. For
1862 the second operand, we know *CVAL1 can't be zero. It must be that
1863 one side of the comparison is each of the values; test for the
1864 case where this isn't true by failing if the two operands
1867 if (operand_equal_p (TREE_OPERAND (arg
, 0),
1868 TREE_OPERAND (arg
, 1), 0))
1872 *cval1
= TREE_OPERAND (arg
, 0);
1873 else if (operand_equal_p (*cval1
, TREE_OPERAND (arg
, 0), 0))
1875 else if (*cval2
== 0)
1876 *cval2
= TREE_OPERAND (arg
, 0);
1877 else if (operand_equal_p (*cval2
, TREE_OPERAND (arg
, 0), 0))
1882 if (operand_equal_p (*cval1
, TREE_OPERAND (arg
, 1), 0))
1884 else if (*cval2
== 0)
1885 *cval2
= TREE_OPERAND (arg
, 1);
1886 else if (operand_equal_p (*cval2
, TREE_OPERAND (arg
, 1), 0))
1897 /* ARG is a tree that is known to contain just arithmetic operations and
1898 comparisons. Evaluate the operations in the tree substituting NEW0 for
1899 any occurrence of OLD0 as an operand of a comparison and likewise for
1903 eval_subst (arg
, old0
, new0
, old1
, new1
)
1905 tree old0
, new0
, old1
, new1
;
1907 tree type
= TREE_TYPE (arg
);
1908 enum tree_code code
= TREE_CODE (arg
);
1909 char class = TREE_CODE_CLASS (code
);
1911 /* We can handle some of the 'e' cases here. */
1912 if (class == 'e' && code
== TRUTH_NOT_EXPR
)
1914 else if (class == 'e'
1915 && (code
== TRUTH_ANDIF_EXPR
|| code
== TRUTH_ORIF_EXPR
))
1921 return fold (build1 (code
, type
,
1922 eval_subst (TREE_OPERAND (arg
, 0),
1923 old0
, new0
, old1
, new1
)));
1926 return fold (build (code
, type
,
1927 eval_subst (TREE_OPERAND (arg
, 0),
1928 old0
, new0
, old1
, new1
),
1929 eval_subst (TREE_OPERAND (arg
, 1),
1930 old0
, new0
, old1
, new1
)));
1936 return eval_subst (TREE_OPERAND (arg
, 0), old0
, new0
, old1
, new1
);
1939 return eval_subst (TREE_OPERAND (arg
, 1), old0
, new0
, old1
, new1
);
1942 return fold (build (code
, type
,
1943 eval_subst (TREE_OPERAND (arg
, 0),
1944 old0
, new0
, old1
, new1
),
1945 eval_subst (TREE_OPERAND (arg
, 1),
1946 old0
, new0
, old1
, new1
),
1947 eval_subst (TREE_OPERAND (arg
, 2),
1948 old0
, new0
, old1
, new1
)));
1953 tree arg0
= TREE_OPERAND (arg
, 0);
1954 tree arg1
= TREE_OPERAND (arg
, 1);
1956 /* We need to check both for exact equality and tree equality. The
1957 former will be true if the operand has a side-effect. In that
1958 case, we know the operand occurred exactly once. */
1960 if (arg0
== old0
|| operand_equal_p (arg0
, old0
, 0))
1962 else if (arg0
== old1
|| operand_equal_p (arg0
, old1
, 0))
1965 if (arg1
== old0
|| operand_equal_p (arg1
, old0
, 0))
1967 else if (arg1
== old1
|| operand_equal_p (arg1
, old1
, 0))
1970 return fold (build (code
, type
, arg0
, arg1
));
1977 /* Return a tree for the case when the result of an expression is RESULT
1978 converted to TYPE and OMITTED was previously an operand of the expression
1979 but is now not needed (e.g., we folded OMITTED * 0).
1981 If OMITTED has side effects, we must evaluate it. Otherwise, just do
1982 the conversion of RESULT to TYPE. */
1985 omit_one_operand (type
, result
, omitted
)
1986 tree type
, result
, omitted
;
1988 tree t
= convert (type
, result
);
1990 if (TREE_SIDE_EFFECTS (omitted
))
1991 return build (COMPOUND_EXPR
, type
, omitted
, t
);
1996 /* Return a simplified tree node for the truth-negation of ARG. This
1997 never alters ARG itself. We assume that ARG is an operation that
1998 returns a truth value (0 or 1). */
2001 invert_truthvalue (arg
)
2004 tree type
= TREE_TYPE (arg
);
2005 enum tree_code code
= TREE_CODE (arg
);
2007 /* If this is a comparison, we can simply invert it, except for
2008 floating-point non-equality comparisons, in which case we just
2009 enclose a TRUTH_NOT_EXPR around what we have. */
2011 if (TREE_CODE_CLASS (code
) == '<')
2013 if (TREE_CODE (TREE_TYPE (TREE_OPERAND (arg
, 0))) == REAL_TYPE
2014 && code
!= NE_EXPR
&& code
!= EQ_EXPR
)
2015 return build1 (TRUTH_NOT_EXPR
, type
, arg
);
2017 return build (invert_tree_comparison (code
), type
,
2018 TREE_OPERAND (arg
, 0), TREE_OPERAND (arg
, 1));
2024 return convert (type
, build_int_2 (TREE_INT_CST_LOW (arg
) == 0
2025 && TREE_INT_CST_HIGH (arg
) == 0, 0));
2027 case TRUTH_AND_EXPR
:
2028 return build (TRUTH_OR_EXPR
, type
,
2029 invert_truthvalue (TREE_OPERAND (arg
, 0)),
2030 invert_truthvalue (TREE_OPERAND (arg
, 1)));
2033 return build (TRUTH_AND_EXPR
, type
,
2034 invert_truthvalue (TREE_OPERAND (arg
, 0)),
2035 invert_truthvalue (TREE_OPERAND (arg
, 1)));
2037 case TRUTH_XOR_EXPR
:
2038 /* Here we can invert either operand. We invert the first operand
2039 unless the second operand is a TRUTH_NOT_EXPR in which case our
2040 result is the XOR of the first operand with the inside of the
2041 negation of the second operand. */
2043 if (TREE_CODE (TREE_OPERAND (arg
, 1)) == TRUTH_NOT_EXPR
)
2044 return build (TRUTH_XOR_EXPR
, type
, TREE_OPERAND (arg
, 0),
2045 TREE_OPERAND (TREE_OPERAND (arg
, 1), 0));
2047 return build (TRUTH_XOR_EXPR
, type
,
2048 invert_truthvalue (TREE_OPERAND (arg
, 0)),
2049 TREE_OPERAND (arg
, 1));
2051 case TRUTH_ANDIF_EXPR
:
2052 return build (TRUTH_ORIF_EXPR
, type
,
2053 invert_truthvalue (TREE_OPERAND (arg
, 0)),
2054 invert_truthvalue (TREE_OPERAND (arg
, 1)));
2056 case TRUTH_ORIF_EXPR
:
2057 return build (TRUTH_ANDIF_EXPR
, type
,
2058 invert_truthvalue (TREE_OPERAND (arg
, 0)),
2059 invert_truthvalue (TREE_OPERAND (arg
, 1)));
2061 case TRUTH_NOT_EXPR
:
2062 return TREE_OPERAND (arg
, 0);
2065 return build (COND_EXPR
, type
, TREE_OPERAND (arg
, 0),
2066 invert_truthvalue (TREE_OPERAND (arg
, 1)),
2067 invert_truthvalue (TREE_OPERAND (arg
, 2)));
2070 return build (COMPOUND_EXPR
, type
, TREE_OPERAND (arg
, 0),
2071 invert_truthvalue (TREE_OPERAND (arg
, 1)));
2073 case NON_LVALUE_EXPR
:
2074 return invert_truthvalue (TREE_OPERAND (arg
, 0));
2079 return build1 (TREE_CODE (arg
), type
,
2080 invert_truthvalue (TREE_OPERAND (arg
, 0)));
2083 if (! integer_onep (TREE_OPERAND (arg
, 1)))
2085 return build (EQ_EXPR
, type
, arg
, convert (type
, integer_zero_node
));
2091 /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
2092 operands are another bit-wise operation with a common input. If so,
2093 distribute the bit operations to save an operation and possibly two if
2094 constants are involved. For example, convert
2095 (A | B) & (A | C) into A | (B & C)
2096 Further simplification will occur if B and C are constants.
2098 If this optimization cannot be done, 0 will be returned. */
2101 distribute_bit_expr (code
, type
, arg0
, arg1
)
2102 enum tree_code code
;
2109 if (TREE_CODE (arg0
) != TREE_CODE (arg1
)
2110 || TREE_CODE (arg0
) == code
2111 || (TREE_CODE (arg0
) != BIT_AND_EXPR
2112 && TREE_CODE (arg0
) != BIT_IOR_EXPR
))
2115 if (operand_equal_p (TREE_OPERAND (arg0
, 0), TREE_OPERAND (arg1
, 0), 0))
2117 common
= TREE_OPERAND (arg0
, 0);
2118 left
= TREE_OPERAND (arg0
, 1);
2119 right
= TREE_OPERAND (arg1
, 1);
2121 else if (operand_equal_p (TREE_OPERAND (arg0
, 0), TREE_OPERAND (arg1
, 1), 0))
2123 common
= TREE_OPERAND (arg0
, 0);
2124 left
= TREE_OPERAND (arg0
, 1);
2125 right
= TREE_OPERAND (arg1
, 0);
2127 else if (operand_equal_p (TREE_OPERAND (arg0
, 1), TREE_OPERAND (arg1
, 0), 0))
2129 common
= TREE_OPERAND (arg0
, 1);
2130 left
= TREE_OPERAND (arg0
, 0);
2131 right
= TREE_OPERAND (arg1
, 1);
2133 else if (operand_equal_p (TREE_OPERAND (arg0
, 1), TREE_OPERAND (arg1
, 1), 0))
2135 common
= TREE_OPERAND (arg0
, 1);
2136 left
= TREE_OPERAND (arg0
, 0);
2137 right
= TREE_OPERAND (arg1
, 0);
2142 return fold (build (TREE_CODE (arg0
), type
, common
,
2143 fold (build (code
, type
, left
, right
))));
2146 /* Return a BIT_FIELD_REF of type TYPE to refer to BITSIZE bits of INNER
2147 starting at BITPOS. The field is unsigned if UNSIGNEDP is non-zero. */
2150 make_bit_field_ref (inner
, type
, bitsize
, bitpos
, unsignedp
)
2153 int bitsize
, bitpos
;
2156 tree result
= build (BIT_FIELD_REF
, type
, inner
,
2157 size_int (bitsize
), size_int (bitpos
));
2159 TREE_UNSIGNED (result
) = unsignedp
;
2164 /* Optimize a bit-field compare.
2166 There are two cases: First is a compare against a constant and the
2167 second is a comparison of two items where the fields are at the same
2168 bit position relative to the start of a chunk (byte, halfword, word)
2169 large enough to contain it. In these cases we can avoid the shift
2170 implicit in bitfield extractions.
2172 For constants, we emit a compare of the shifted constant with the
2173 BIT_AND_EXPR of a mask and a byte, halfword, or word of the operand being
2174 compared. For two fields at the same position, we do the ANDs with the
2175 similar mask and compare the result of the ANDs.
2177 CODE is the comparison code, known to be either NE_EXPR or EQ_EXPR.
2178 COMPARE_TYPE is the type of the comparison, and LHS and RHS
2179 are the left and right operands of the comparison, respectively.
2181 If the optimization described above can be done, we return the resulting
2182 tree. Otherwise we return zero. */
2185 optimize_bit_field_compare (code
, compare_type
, lhs
, rhs
)
2186 enum tree_code code
;
2190 int lbitpos
, lbitsize
, rbitpos
, rbitsize
;
2191 int lnbitpos
, lnbitsize
, rnbitpos
, rnbitsize
;
2192 tree type
= TREE_TYPE (lhs
);
2193 tree signed_type
, unsigned_type
;
2194 int const_p
= TREE_CODE (rhs
) == INTEGER_CST
;
2195 enum machine_mode lmode
, rmode
, lnmode
, rnmode
;
2196 int lunsignedp
, runsignedp
;
2197 int lvolatilep
= 0, rvolatilep
= 0;
2198 tree linner
, rinner
;
2202 /* Get all the information about the extractions being done. If the bit size
2203 if the same as the size of the underlying object, we aren't doing an
2204 extraction at all and so can do nothing. */
2205 linner
= get_inner_reference (lhs
, &lbitsize
, &lbitpos
, &offset
, &lmode
,
2206 &lunsignedp
, &lvolatilep
);
2207 if (lbitsize
== GET_MODE_BITSIZE (lmode
) || lbitsize
< 0
2213 /* If this is not a constant, we can only do something if bit positions,
2214 sizes, and signedness are the same. */
2215 rinner
= get_inner_reference (rhs
, &rbitsize
, &rbitpos
, &offset
,
2216 &rmode
, &runsignedp
, &rvolatilep
);
2218 if (lbitpos
!= rbitpos
|| lbitsize
!= rbitsize
2219 || lunsignedp
!= runsignedp
|| offset
!= 0)
2223 /* See if we can find a mode to refer to this field. We should be able to,
2224 but fail if we can't. */
2225 lnmode
= get_best_mode (lbitsize
, lbitpos
,
2226 TYPE_ALIGN (TREE_TYPE (linner
)), word_mode
,
2228 if (lnmode
== VOIDmode
)
2231 /* Set signed and unsigned types of the precision of this mode for the
2233 signed_type
= type_for_mode (lnmode
, 0);
2234 unsigned_type
= type_for_mode (lnmode
, 1);
2238 rnmode
= get_best_mode (rbitsize
, rbitpos
,
2239 TYPE_ALIGN (TREE_TYPE (rinner
)), word_mode
,
2241 if (rnmode
== VOIDmode
)
2245 /* Compute the bit position and size for the new reference and our offset
2246 within it. If the new reference is the same size as the original, we
2247 won't optimize anything, so return zero. */
2248 lnbitsize
= GET_MODE_BITSIZE (lnmode
);
2249 lnbitpos
= lbitpos
& ~ (lnbitsize
- 1);
2250 lbitpos
-= lnbitpos
;
2251 if (lnbitsize
== lbitsize
)
2256 rnbitsize
= GET_MODE_BITSIZE (rnmode
);
2257 rnbitpos
= rbitpos
& ~ (rnbitsize
- 1);
2258 rbitpos
-= rnbitpos
;
2259 if (rnbitsize
== rbitsize
)
2263 #if BYTES_BIG_ENDIAN
2264 lbitpos
= lnbitsize
- lbitsize
- lbitpos
;
2267 /* Make the mask to be used against the extracted field. */
2268 mask
= convert (unsigned_type
, build_int_2 (~0, ~0));
2269 mask
= const_binop (LSHIFT_EXPR
, mask
, size_int (lnbitsize
- lbitsize
), 0);
2270 mask
= const_binop (RSHIFT_EXPR
, mask
,
2271 size_int (lnbitsize
- lbitsize
- lbitpos
), 0);
2274 /* If not comparing with constant, just rework the comparison
2276 return build (code
, compare_type
,
2277 build (BIT_AND_EXPR
, unsigned_type
,
2278 make_bit_field_ref (linner
, unsigned_type
,
2279 lnbitsize
, lnbitpos
, 1),
2281 build (BIT_AND_EXPR
, unsigned_type
,
2282 make_bit_field_ref (rinner
, unsigned_type
,
2283 rnbitsize
, rnbitpos
, 1),
2286 /* Otherwise, we are handling the constant case. See if the constant is too
2287 big for the field. Warn and return a tree of for 0 (false) if so. We do
2288 this not only for its own sake, but to avoid having to test for this
2289 error case below. If we didn't, we might generate wrong code.
2291 For unsigned fields, the constant shifted right by the field length should
2292 be all zero. For signed fields, the high-order bits should agree with
2297 if (! integer_zerop (const_binop (RSHIFT_EXPR
,
2298 convert (unsigned_type
, rhs
),
2299 size_int (lbitsize
), 0)))
2301 warning ("comparison is always %s due to width of bitfield",
2302 code
== NE_EXPR
? "one" : "zero");
2303 return convert (compare_type
,
2305 ? integer_one_node
: integer_zero_node
));
2310 tree tem
= const_binop (RSHIFT_EXPR
, convert (signed_type
, rhs
),
2311 size_int (lbitsize
- 1), 0);
2312 if (! integer_zerop (tem
) && ! integer_all_onesp (tem
))
2314 warning ("comparison is always %s due to width of bitfield",
2315 code
== NE_EXPR
? "one" : "zero");
2316 return convert (compare_type
,
2318 ? integer_one_node
: integer_zero_node
));
2322 /* Single-bit compares should always be against zero. */
2323 if (lbitsize
== 1 && ! integer_zerop (rhs
))
2325 code
= code
== EQ_EXPR
? NE_EXPR
: EQ_EXPR
;
2326 rhs
= convert (type
, integer_zero_node
);
2329 /* Make a new bitfield reference, shift the constant over the
2330 appropriate number of bits and mask it with the computed mask
2331 (in case this was a signed field). If we changed it, make a new one. */
2332 lhs
= make_bit_field_ref (linner
, unsigned_type
, lnbitsize
, lnbitpos
, 1);
2334 rhs
= fold (const_binop (BIT_AND_EXPR
,
2335 const_binop (LSHIFT_EXPR
,
2336 convert (unsigned_type
, rhs
),
2337 size_int (lbitpos
)),
2340 return build (code
, compare_type
,
2341 build (BIT_AND_EXPR
, unsigned_type
, lhs
, mask
),
2345 /* Subroutine for fold_truthop: decode a field reference.
2347 If EXP is a comparison reference, we return the innermost reference.
2349 *PBITSIZE is set to the number of bits in the reference, *PBITPOS is
2350 set to the starting bit number.
2352 If the innermost field can be completely contained in a mode-sized
2353 unit, *PMODE is set to that mode. Otherwise, it is set to VOIDmode.
2355 *PVOLATILEP is set to 1 if the any expression encountered is volatile;
2356 otherwise it is not changed.
2358 *PUNSIGNEDP is set to the signedness of the field.
2360 *PMASK is set to the mask used. This is either contained in a
2361 BIT_AND_EXPR or derived from the width of the field.
2363 Return 0 if this is not a component reference or is one that we can't
2364 do anything with. */
2367 decode_field_reference (exp
, pbitsize
, pbitpos
, pmode
, punsignedp
,
2370 int *pbitsize
, *pbitpos
;
2371 enum machine_mode
*pmode
;
2372 int *punsignedp
, *pvolatilep
;
2381 if (TREE_CODE (exp
) == BIT_AND_EXPR
)
2383 mask
= TREE_OPERAND (exp
, 1);
2384 exp
= TREE_OPERAND (exp
, 0);
2385 STRIP_NOPS (exp
); STRIP_NOPS (mask
);
2386 if (TREE_CODE (mask
) != INTEGER_CST
)
2390 if (TREE_CODE (exp
) != COMPONENT_REF
&& TREE_CODE (exp
) != ARRAY_REF
2391 && TREE_CODE (exp
) != BIT_FIELD_REF
)
2394 inner
= get_inner_reference (exp
, pbitsize
, pbitpos
, &offset
, pmode
,
2395 punsignedp
, pvolatilep
);
2396 if (*pbitsize
< 0 || offset
!= 0)
2401 tree unsigned_type
= type_for_size (*pbitsize
, 1);
2402 int precision
= TYPE_PRECISION (unsigned_type
);
2404 mask
= convert (unsigned_type
, build_int_2 (~0, ~0));
2405 mask
= const_binop (LSHIFT_EXPR
, mask
, size_int (precision
- *pbitsize
), 0);
2406 mask
= const_binop (RSHIFT_EXPR
, mask
, size_int (precision
- *pbitsize
), 0);
2413 /* Return non-zero if MASK represents a mask of SIZE ones in the low-order
2417 all_ones_mask_p (mask
, size
)
2421 tree type
= TREE_TYPE (mask
);
2422 int precision
= TYPE_PRECISION (type
);
2425 operand_equal_p (mask
,
2426 const_binop (RSHIFT_EXPR
,
2427 const_binop (LSHIFT_EXPR
,
2428 convert (signed_type (type
),
2429 build_int_2 (~0, ~0)),
2430 size_int (precision
- size
), 0),
2431 size_int (precision
- size
), 0),
2435 /* Subroutine for fold_truthop: determine if an operand is simple enough
2436 to be evaluated unconditionally. */
2442 simple_operand_p (exp
)
2445 /* Strip any conversions that don't change the machine mode. */
2446 while ((TREE_CODE (exp
) == NOP_EXPR
2447 || TREE_CODE (exp
) == CONVERT_EXPR
)
2448 && (TYPE_MODE (TREE_TYPE (exp
))
2449 == TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp
, 0)))))
2450 exp
= TREE_OPERAND (exp
, 0);
2452 return (TREE_CODE_CLASS (TREE_CODE (exp
)) == 'c'
2453 || (TREE_CODE_CLASS (TREE_CODE (exp
)) == 'd'
2454 && ! TREE_ADDRESSABLE (exp
)
2455 && ! TREE_THIS_VOLATILE (exp
)
2456 && ! DECL_NONLOCAL (exp
)
2457 /* Don't regard global variables as simple. They may be
2458 allocated in ways unknown to the compiler (shared memory,
2459 #pragma weak, etc). */
2460 && ! TREE_PUBLIC (exp
)
2461 && ! DECL_EXTERNAL (exp
)
2462 /* Loading a static variable is unduly expensive, but global
2463 registers aren't expensive. */
2464 && (! TREE_STATIC (exp
) || DECL_REGISTER (exp
))));
2467 /* Subroutine for fold_truthop: try to optimize a range test.
2469 For example, "i >= 2 && i =< 9" can be done as "(unsigned) (i - 2) <= 7".
2471 JCODE is the logical combination of the two terms. It is TRUTH_AND_EXPR
2472 (representing TRUTH_ANDIF_EXPR and TRUTH_AND_EXPR) or TRUTH_OR_EXPR
2473 (representing TRUTH_ORIF_EXPR and TRUTH_OR_EXPR). TYPE is the type of
2476 VAR is the value being tested. LO_CODE and HI_CODE are the comparison
2477 operators comparing VAR to LO_CST and HI_CST. LO_CST is known to be no
2478 larger than HI_CST (they may be equal).
2480 We return the simplified tree or 0 if no optimization is possible. */
2483 range_test (jcode
, type
, lo_code
, hi_code
, var
, lo_cst
, hi_cst
)
2484 enum tree_code jcode
, lo_code
, hi_code
;
2485 tree type
, var
, lo_cst
, hi_cst
;
2488 enum tree_code rcode
;
2490 /* See if this is a range test and normalize the constant terms. */
2492 if (jcode
== TRUTH_AND_EXPR
)
2497 /* See if we have VAR != CST && VAR != CST+1. */
2498 if (! (hi_code
== NE_EXPR
2499 && TREE_INT_CST_LOW (hi_cst
) - TREE_INT_CST_LOW (lo_cst
) == 1
2500 && tree_int_cst_equal (integer_one_node
,
2501 const_binop (MINUS_EXPR
,
2502 hi_cst
, lo_cst
, 0))))
2510 if (hi_code
== LT_EXPR
)
2511 hi_cst
= const_binop (MINUS_EXPR
, hi_cst
, integer_one_node
, 0);
2512 else if (hi_code
!= LE_EXPR
)
2515 if (lo_code
== GT_EXPR
)
2516 lo_cst
= const_binop (PLUS_EXPR
, lo_cst
, integer_one_node
, 0);
2518 /* We now have VAR >= LO_CST && VAR <= HI_CST. */
2531 /* See if we have VAR == CST || VAR == CST+1. */
2532 if (! (hi_code
== EQ_EXPR
2533 && TREE_INT_CST_LOW (hi_cst
) - TREE_INT_CST_LOW (lo_cst
) == 1
2534 && tree_int_cst_equal (integer_one_node
,
2535 const_binop (MINUS_EXPR
,
2536 hi_cst
, lo_cst
, 0))))
2544 if (hi_code
== GE_EXPR
)
2545 hi_cst
= const_binop (MINUS_EXPR
, hi_cst
, integer_one_node
, 0);
2546 else if (hi_code
!= GT_EXPR
)
2549 if (lo_code
== LE_EXPR
)
2550 lo_cst
= const_binop (PLUS_EXPR
, lo_cst
, integer_one_node
, 0);
2552 /* We now have VAR < LO_CST || VAR > HI_CST. */
2561 /* When normalizing, it is possible to both increment the smaller constant
2562 and decrement the larger constant. See if they are still ordered. */
2563 if (tree_int_cst_lt (hi_cst
, lo_cst
))
2566 /* Fail if VAR isn't an integer. */
2567 utype
= TREE_TYPE (var
);
2568 if (TREE_CODE (utype
) != INTEGER_TYPE
2569 && TREE_CODE (utype
) != ENUMERAL_TYPE
)
2572 /* The range test is invalid if subtracting the two constants results
2573 in overflow. This can happen in traditional mode. */
2574 if (! int_fits_type_p (hi_cst
, TREE_TYPE (var
))
2575 || ! int_fits_type_p (lo_cst
, TREE_TYPE (var
)))
2578 if (! TREE_UNSIGNED (utype
))
2580 utype
= unsigned_type (utype
);
2581 var
= convert (utype
, var
);
2582 lo_cst
= convert (utype
, lo_cst
);
2583 hi_cst
= convert (utype
, hi_cst
);
2586 return fold (convert (type
,
2587 build (rcode
, utype
,
2588 build (MINUS_EXPR
, utype
, var
, lo_cst
),
2589 const_binop (MINUS_EXPR
, hi_cst
, lo_cst
, 0))));
2592 /* Find ways of folding logical expressions of LHS and RHS:
2593 Try to merge two comparisons to the same innermost item.
2594 Look for range tests like "ch >= '0' && ch <= '9'".
2595 Look for combinations of simple terms on machines with expensive branches
2596 and evaluate the RHS unconditionally.
2598 For example, if we have p->a == 2 && p->b == 4 and we can make an
2599 object large enough to span both A and B, we can do this with a comparison
2600 against the object ANDed with the a mask.
2602 If we have p->a == q->a && p->b == q->b, we may be able to use bit masking
2603 operations to do this with one comparison.
2605 We check for both normal comparisons and the BIT_AND_EXPRs made this by
2606 function and the one above.
2608 CODE is the logical operation being done. It can be TRUTH_ANDIF_EXPR,
2609 TRUTH_AND_EXPR, TRUTH_ORIF_EXPR, or TRUTH_OR_EXPR.
2611 TRUTH_TYPE is the type of the logical operand and LHS and RHS are its
2614 We return the simplified tree or 0 if no optimization is possible. */
2617 fold_truthop (code
, truth_type
, lhs
, rhs
)
2618 enum tree_code code
;
2619 tree truth_type
, lhs
, rhs
;
2621 /* If this is the "or" of two comparisons, we can do something if we
2622 the comparisons are NE_EXPR. If this is the "and", we can do something
2623 if the comparisons are EQ_EXPR. I.e.,
2624 (a->b == 2 && a->c == 4) can become (a->new == NEW).
2626 WANTED_CODE is this operation code. For single bit fields, we can
2627 convert EQ_EXPR to NE_EXPR so we need not reject the "wrong"
2628 comparison for one-bit fields. */
2630 enum tree_code wanted_code
;
2631 enum tree_code lcode
, rcode
;
2632 tree ll_arg
, lr_arg
, rl_arg
, rr_arg
;
2633 tree ll_inner
, lr_inner
, rl_inner
, rr_inner
;
2634 int ll_bitsize
, ll_bitpos
, lr_bitsize
, lr_bitpos
;
2635 int rl_bitsize
, rl_bitpos
, rr_bitsize
, rr_bitpos
;
2636 int xll_bitpos
, xlr_bitpos
, xrl_bitpos
, xrr_bitpos
;
2637 int lnbitsize
, lnbitpos
, rnbitsize
, rnbitpos
;
2638 int ll_unsignedp
, lr_unsignedp
, rl_unsignedp
, rr_unsignedp
;
2639 enum machine_mode ll_mode
, lr_mode
, rl_mode
, rr_mode
;
2640 enum machine_mode lnmode
, rnmode
;
2641 tree ll_mask
, lr_mask
, rl_mask
, rr_mask
;
2642 tree l_const
, r_const
;
2644 int first_bit
, end_bit
;
2647 /* Start by getting the comparison codes and seeing if this looks like
2648 a range test. Fail if anything is volatile. */
2650 if (TREE_SIDE_EFFECTS (lhs
)
2651 || TREE_SIDE_EFFECTS (rhs
))
2654 lcode
= TREE_CODE (lhs
);
2655 rcode
= TREE_CODE (rhs
);
2657 if (TREE_CODE_CLASS (lcode
) != '<'
2658 || TREE_CODE_CLASS (rcode
) != '<')
2661 code
= ((code
== TRUTH_AND_EXPR
|| code
== TRUTH_ANDIF_EXPR
)
2662 ? TRUTH_AND_EXPR
: TRUTH_OR_EXPR
);
2664 ll_arg
= TREE_OPERAND (lhs
, 0);
2665 lr_arg
= TREE_OPERAND (lhs
, 1);
2666 rl_arg
= TREE_OPERAND (rhs
, 0);
2667 rr_arg
= TREE_OPERAND (rhs
, 1);
2669 if (TREE_CODE (lr_arg
) == INTEGER_CST
2670 && TREE_CODE (rr_arg
) == INTEGER_CST
2671 && operand_equal_p (ll_arg
, rl_arg
, 0))
2673 if (tree_int_cst_lt (lr_arg
, rr_arg
))
2674 result
= range_test (code
, truth_type
, lcode
, rcode
,
2675 ll_arg
, lr_arg
, rr_arg
);
2677 result
= range_test (code
, truth_type
, rcode
, lcode
,
2678 ll_arg
, rr_arg
, lr_arg
);
2680 /* If this isn't a range test, it also isn't a comparison that
2681 can be merged. However, it wins to evaluate the RHS unconditionally
2682 on machines with expensive branches. */
2684 if (result
== 0 && BRANCH_COST
>= 2)
2686 if (TREE_CODE (ll_arg
) != VAR_DECL
2687 && TREE_CODE (ll_arg
) != PARM_DECL
)
2689 /* Avoid evaluating the variable part twice. */
2690 ll_arg
= save_expr (ll_arg
);
2691 lhs
= build (lcode
, TREE_TYPE (lhs
), ll_arg
, lr_arg
);
2692 rhs
= build (rcode
, TREE_TYPE (rhs
), ll_arg
, rr_arg
);
2694 return build (code
, truth_type
, lhs
, rhs
);
2699 /* If the RHS can be evaluated unconditionally and its operands are
2700 simple, it wins to evaluate the RHS unconditionally on machines
2701 with expensive branches. In this case, this isn't a comparison
2702 that can be merged. */
2704 /* @@ I'm not sure it wins on the m88110 to do this if the comparisons
2705 are with zero (tmw). */
2707 if (BRANCH_COST
>= 2
2708 && TREE_CODE (TREE_TYPE (rhs
)) == INTEGER_TYPE
2709 && simple_operand_p (rl_arg
)
2710 && simple_operand_p (rr_arg
))
2711 return build (code
, truth_type
, lhs
, rhs
);
2713 /* See if the comparisons can be merged. Then get all the parameters for
2716 if ((lcode
!= EQ_EXPR
&& lcode
!= NE_EXPR
)
2717 || (rcode
!= EQ_EXPR
&& rcode
!= NE_EXPR
))
2721 ll_inner
= decode_field_reference (ll_arg
,
2722 &ll_bitsize
, &ll_bitpos
, &ll_mode
,
2723 &ll_unsignedp
, &volatilep
, &ll_mask
);
2724 lr_inner
= decode_field_reference (lr_arg
,
2725 &lr_bitsize
, &lr_bitpos
, &lr_mode
,
2726 &lr_unsignedp
, &volatilep
, &lr_mask
);
2727 rl_inner
= decode_field_reference (rl_arg
,
2728 &rl_bitsize
, &rl_bitpos
, &rl_mode
,
2729 &rl_unsignedp
, &volatilep
, &rl_mask
);
2730 rr_inner
= decode_field_reference (rr_arg
,
2731 &rr_bitsize
, &rr_bitpos
, &rr_mode
,
2732 &rr_unsignedp
, &volatilep
, &rr_mask
);
2734 /* It must be true that the inner operation on the lhs of each
2735 comparison must be the same if we are to be able to do anything.
2736 Then see if we have constants. If not, the same must be true for
2738 if (volatilep
|| ll_inner
== 0 || rl_inner
== 0
2739 || ! operand_equal_p (ll_inner
, rl_inner
, 0))
2742 if (TREE_CODE (lr_arg
) == INTEGER_CST
2743 && TREE_CODE (rr_arg
) == INTEGER_CST
)
2744 l_const
= lr_arg
, r_const
= rr_arg
;
2745 else if (lr_inner
== 0 || rr_inner
== 0
2746 || ! operand_equal_p (lr_inner
, rr_inner
, 0))
2749 l_const
= r_const
= 0;
2751 /* If either comparison code is not correct for our logical operation,
2752 fail. However, we can convert a one-bit comparison against zero into
2753 the opposite comparison against that bit being set in the field. */
2755 wanted_code
= (code
== TRUTH_AND_EXPR
? EQ_EXPR
: NE_EXPR
);
2756 if (lcode
!= wanted_code
)
2758 if (l_const
&& integer_zerop (l_const
) && integer_pow2p (ll_mask
))
2764 if (rcode
!= wanted_code
)
2766 if (r_const
&& integer_zerop (r_const
) && integer_pow2p (rl_mask
))
2772 /* See if we can find a mode that contains both fields being compared on
2773 the left. If we can't, fail. Otherwise, update all constants and masks
2774 to be relative to a field of that size. */
2775 first_bit
= MIN (ll_bitpos
, rl_bitpos
);
2776 end_bit
= MAX (ll_bitpos
+ ll_bitsize
, rl_bitpos
+ rl_bitsize
);
2777 lnmode
= get_best_mode (end_bit
- first_bit
, first_bit
,
2778 TYPE_ALIGN (TREE_TYPE (ll_inner
)), word_mode
,
2780 if (lnmode
== VOIDmode
)
2783 lnbitsize
= GET_MODE_BITSIZE (lnmode
);
2784 lnbitpos
= first_bit
& ~ (lnbitsize
- 1);
2785 type
= type_for_size (lnbitsize
, 1);
2786 xll_bitpos
= ll_bitpos
- lnbitpos
, xrl_bitpos
= rl_bitpos
- lnbitpos
;
2788 #if BYTES_BIG_ENDIAN
2789 xll_bitpos
= lnbitsize
- xll_bitpos
- ll_bitsize
;
2790 xrl_bitpos
= lnbitsize
- xrl_bitpos
- rl_bitsize
;
2793 ll_mask
= const_binop (LSHIFT_EXPR
, convert (type
, ll_mask
),
2794 size_int (xll_bitpos
), 0);
2795 rl_mask
= const_binop (LSHIFT_EXPR
, convert (type
, rl_mask
),
2796 size_int (xrl_bitpos
), 0);
2798 /* Make sure the constants are interpreted as unsigned, so we
2799 don't have sign bits outside the range of their type. */
2803 l_const
= convert (unsigned_type (TREE_TYPE (l_const
)), l_const
);
2804 l_const
= const_binop (LSHIFT_EXPR
, convert (type
, l_const
),
2805 size_int (xll_bitpos
), 0);
2809 r_const
= convert (unsigned_type (TREE_TYPE (r_const
)), r_const
);
2810 r_const
= const_binop (LSHIFT_EXPR
, convert (type
, r_const
),
2811 size_int (xrl_bitpos
), 0);
2814 /* If the right sides are not constant, do the same for it. Also,
2815 disallow this optimization if a size or signedness mismatch occurs
2816 between the left and right sides. */
2819 if (ll_bitsize
!= lr_bitsize
|| rl_bitsize
!= rr_bitsize
2820 || ll_unsignedp
!= lr_unsignedp
|| rl_unsignedp
!= rr_unsignedp
2821 /* Make sure the two fields on the right
2822 correspond to the left without being swapped. */
2823 || ll_bitpos
- rl_bitpos
!= lr_bitpos
- rr_bitpos
)
2826 first_bit
= MIN (lr_bitpos
, rr_bitpos
);
2827 end_bit
= MAX (lr_bitpos
+ lr_bitsize
, rr_bitpos
+ rr_bitsize
);
2828 rnmode
= get_best_mode (end_bit
- first_bit
, first_bit
,
2829 TYPE_ALIGN (TREE_TYPE (lr_inner
)), word_mode
,
2831 if (rnmode
== VOIDmode
)
2834 rnbitsize
= GET_MODE_BITSIZE (rnmode
);
2835 rnbitpos
= first_bit
& ~ (rnbitsize
- 1);
2836 xlr_bitpos
= lr_bitpos
- rnbitpos
, xrr_bitpos
= rr_bitpos
- rnbitpos
;
2838 #if BYTES_BIG_ENDIAN
2839 xlr_bitpos
= rnbitsize
- xlr_bitpos
- lr_bitsize
;
2840 xrr_bitpos
= rnbitsize
- xrr_bitpos
- rr_bitsize
;
2843 lr_mask
= const_binop (LSHIFT_EXPR
, convert (type
, lr_mask
),
2844 size_int (xlr_bitpos
), 0);
2845 rr_mask
= const_binop (LSHIFT_EXPR
, convert (type
, rr_mask
),
2846 size_int (xrr_bitpos
), 0);
2848 /* Make a mask that corresponds to both fields being compared.
2849 Do this for both items being compared. If the masks agree,
2850 we can do this by masking both and comparing the masked
2852 ll_mask
= const_binop (BIT_IOR_EXPR
, ll_mask
, rl_mask
, 0);
2853 lr_mask
= const_binop (BIT_IOR_EXPR
, lr_mask
, rr_mask
, 0);
2854 if (operand_equal_p (ll_mask
, lr_mask
, 0) && lnbitsize
== rnbitsize
)
2856 lhs
= make_bit_field_ref (ll_inner
, type
, lnbitsize
, lnbitpos
,
2857 ll_unsignedp
|| rl_unsignedp
);
2858 rhs
= make_bit_field_ref (lr_inner
, type
, rnbitsize
, rnbitpos
,
2859 lr_unsignedp
|| rr_unsignedp
);
2860 if (! all_ones_mask_p (ll_mask
, lnbitsize
))
2862 lhs
= build (BIT_AND_EXPR
, type
, lhs
, ll_mask
);
2863 rhs
= build (BIT_AND_EXPR
, type
, rhs
, ll_mask
);
2865 return build (wanted_code
, truth_type
, lhs
, rhs
);
2868 /* There is still another way we can do something: If both pairs of
2869 fields being compared are adjacent, we may be able to make a wider
2870 field containing them both. */
2871 if ((ll_bitsize
+ ll_bitpos
== rl_bitpos
2872 && lr_bitsize
+ lr_bitpos
== rr_bitpos
)
2873 || (ll_bitpos
== rl_bitpos
+ rl_bitsize
2874 && lr_bitpos
== rr_bitpos
+ rr_bitsize
))
2875 return build (wanted_code
, truth_type
,
2876 make_bit_field_ref (ll_inner
, type
,
2877 ll_bitsize
+ rl_bitsize
,
2878 MIN (ll_bitpos
, rl_bitpos
),
2880 make_bit_field_ref (lr_inner
, type
,
2881 lr_bitsize
+ rr_bitsize
,
2882 MIN (lr_bitpos
, rr_bitpos
),
2888 /* Handle the case of comparisons with constants. If there is something in
2889 common between the masks, those bits of the constants must be the same.
2890 If not, the condition is always false. Test for this to avoid generating
2891 incorrect code below. */
2892 result
= const_binop (BIT_AND_EXPR
, ll_mask
, rl_mask
, 0);
2893 if (! integer_zerop (result
)
2894 && simple_cst_equal (const_binop (BIT_AND_EXPR
, result
, l_const
, 0),
2895 const_binop (BIT_AND_EXPR
, result
, r_const
, 0)) != 1)
2897 if (wanted_code
== NE_EXPR
)
2899 warning ("`or' of unmatched not-equal tests is always 1");
2900 return convert (truth_type
, integer_one_node
);
2904 warning ("`and' of mutually exclusive equal-tests is always zero");
2905 return convert (truth_type
, integer_zero_node
);
2909 /* Construct the expression we will return. First get the component
2910 reference we will make. Unless the mask is all ones the width of
2911 that field, perform the mask operation. Then compare with the
2913 result
= make_bit_field_ref (ll_inner
, type
, lnbitsize
, lnbitpos
,
2914 ll_unsignedp
|| rl_unsignedp
);
2916 ll_mask
= const_binop (BIT_IOR_EXPR
, ll_mask
, rl_mask
, 0);
2917 if (! all_ones_mask_p (ll_mask
, lnbitsize
))
2918 result
= build (BIT_AND_EXPR
, type
, result
, ll_mask
);
2920 return build (wanted_code
, truth_type
, result
,
2921 const_binop (BIT_IOR_EXPR
, l_const
, r_const
, 0));
2924 /* Perform constant folding and related simplification of EXPR.
2925 The related simplifications include x*1 => x, x*0 => 0, etc.,
2926 and application of the associative law.
2927 NOP_EXPR conversions may be removed freely (as long as we
2928 are careful not to change the C type of the overall expression)
2929 We cannot simplify through a CONVERT_EXPR, FIX_EXPR or FLOAT_EXPR,
2930 but we can constant-fold them if they have constant operands. */
2936 register tree t
= expr
;
2937 tree t1
= NULL_TREE
;
2939 tree type
= TREE_TYPE (expr
);
2940 register tree arg0
, arg1
;
2941 register enum tree_code code
= TREE_CODE (t
);
2945 /* WINS will be nonzero when the switch is done
2946 if all operands are constant. */
2950 /* Return right away if already constant. */
2951 if (TREE_CONSTANT (t
))
2953 if (code
== CONST_DECL
)
2954 return DECL_INITIAL (t
);
2958 kind
= TREE_CODE_CLASS (code
);
2959 if (code
== NOP_EXPR
|| code
== FLOAT_EXPR
|| code
== CONVERT_EXPR
)
2961 /* Special case for conversion ops that can have fixed point args. */
2962 arg0
= TREE_OPERAND (t
, 0);
2964 /* Don't use STRIP_NOPS, because signedness of argument type matters. */
2966 STRIP_TYPE_NOPS (arg0
);
2968 if (arg0
!= 0 && TREE_CODE (arg0
) != INTEGER_CST
2969 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
2970 && TREE_CODE (arg0
) != REAL_CST
2971 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
2973 /* Note that TREE_CONSTANT isn't enough:
2974 static var addresses are constant but we can't
2975 do arithmetic on them. */
2978 else if (kind
== 'e' || kind
== '<'
2979 || kind
== '1' || kind
== '2' || kind
== 'r')
2981 register int len
= tree_code_length
[(int) code
];
2983 for (i
= 0; i
< len
; i
++)
2985 tree op
= TREE_OPERAND (t
, i
);
2988 continue; /* Valid for CALL_EXPR, at least. */
2990 /* Strip any conversions that don't change the mode. */
2993 if (TREE_CODE (op
) != INTEGER_CST
2994 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
2995 && TREE_CODE (op
) != REAL_CST
2996 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
2998 /* Note that TREE_CONSTANT isn't enough:
2999 static var addresses are constant but we can't
3000 do arithmetic on them. */
3010 /* If this is a commutative operation, and ARG0 is a constant, move it
3011 to ARG1 to reduce the number of tests below. */
3012 if ((code
== PLUS_EXPR
|| code
== MULT_EXPR
|| code
== MIN_EXPR
3013 || code
== MAX_EXPR
|| code
== BIT_IOR_EXPR
|| code
== BIT_XOR_EXPR
3014 || code
== BIT_AND_EXPR
)
3015 && (TREE_CODE (arg0
) == INTEGER_CST
|| TREE_CODE (arg0
) == REAL_CST
))
3017 tem
= arg0
; arg0
= arg1
; arg1
= tem
;
3019 tem
= TREE_OPERAND (t
, 0); TREE_OPERAND (t
, 0) = TREE_OPERAND (t
, 1);
3020 TREE_OPERAND (t
, 1) = tem
;
3023 /* Now WINS is set as described above,
3024 ARG0 is the first operand of EXPR,
3025 and ARG1 is the second operand (if it has more than one operand).
3027 First check for cases where an arithmetic operation is applied to a
3028 compound, conditional, or comparison operation. Push the arithmetic
3029 operation inside the compound or conditional to see if any folding
3030 can then be done. Convert comparison to conditional for this purpose.
3031 The also optimizes non-constant cases that used to be done in
3033 if (TREE_CODE_CLASS (code
) == '1')
3035 if (TREE_CODE (arg0
) == COMPOUND_EXPR
)
3036 return build (COMPOUND_EXPR
, type
, TREE_OPERAND (arg0
, 0),
3037 fold (build1 (code
, type
, TREE_OPERAND (arg0
, 1))));
3038 else if (TREE_CODE (arg0
) == COND_EXPR
)
3040 t
= fold (build (COND_EXPR
, type
, TREE_OPERAND (arg0
, 0),
3041 fold (build1 (code
, type
, TREE_OPERAND (arg0
, 1))),
3042 fold (build1 (code
, type
, TREE_OPERAND (arg0
, 2)))));
3044 /* If this was a conversion, and all we did was to move into
3045 inside the COND_EXPR, bring it back out. Then return so we
3046 don't get into an infinite recursion loop taking the conversion
3047 out and then back in. */
3049 if ((code
== NOP_EXPR
|| code
== CONVERT_EXPR
3050 || code
== NON_LVALUE_EXPR
)
3051 && TREE_CODE (t
) == COND_EXPR
3052 && TREE_CODE (TREE_OPERAND (t
, 1)) == code
3053 && TREE_CODE (TREE_OPERAND (t
, 2)) == code
3054 && (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t
, 1), 0))
3055 == TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t
, 2), 0))))
3056 t
= build1 (code
, type
,
3058 TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t
, 1), 0)),
3059 TREE_OPERAND (t
, 0),
3060 TREE_OPERAND (TREE_OPERAND (t
, 1), 0),
3061 TREE_OPERAND (TREE_OPERAND (t
, 2), 0)));
3064 else if (TREE_CODE_CLASS (TREE_CODE (arg0
)) == '<')
3065 return fold (build (COND_EXPR
, type
, arg0
,
3066 fold (build1 (code
, type
, integer_one_node
)),
3067 fold (build1 (code
, type
, integer_zero_node
))));
3069 else if (TREE_CODE_CLASS (code
) == '2')
3071 if (TREE_CODE (arg1
) == COMPOUND_EXPR
)
3072 return build (COMPOUND_EXPR
, type
, TREE_OPERAND (arg1
, 0),
3073 fold (build (code
, type
, arg0
, TREE_OPERAND (arg1
, 1))));
3074 else if (TREE_CODE (arg1
) == COND_EXPR
3075 || TREE_CODE_CLASS (TREE_CODE (arg1
)) == '<')
3077 tree test
, true_value
, false_value
;
3079 if (TREE_CODE (arg1
) == COND_EXPR
)
3081 test
= TREE_OPERAND (arg1
, 0);
3082 true_value
= TREE_OPERAND (arg1
, 1);
3083 false_value
= TREE_OPERAND (arg1
, 2);
3088 true_value
= integer_one_node
;
3089 false_value
= integer_zero_node
;
3092 if (TREE_CODE (arg0
) != VAR_DECL
&& TREE_CODE (arg0
) != PARM_DECL
)
3093 arg0
= save_expr (arg0
);
3094 test
= fold (build (COND_EXPR
, type
, test
,
3095 fold (build (code
, type
, arg0
, true_value
)),
3096 fold (build (code
, type
, arg0
, false_value
))));
3097 if (TREE_CODE (arg0
) == SAVE_EXPR
)
3098 return build (COMPOUND_EXPR
, type
,
3099 convert (void_type_node
, arg0
), test
);
3101 return convert (type
, test
);
3104 else if (TREE_CODE (arg0
) == COMPOUND_EXPR
)
3105 return build (COMPOUND_EXPR
, type
, TREE_OPERAND (arg0
, 0),
3106 fold (build (code
, type
, TREE_OPERAND (arg0
, 1), arg1
)));
3107 else if (TREE_CODE (arg0
) == COND_EXPR
3108 || TREE_CODE_CLASS (TREE_CODE (arg0
)) == '<')
3110 tree test
, true_value
, false_value
;
3112 if (TREE_CODE (arg0
) == COND_EXPR
)
3114 test
= TREE_OPERAND (arg0
, 0);
3115 true_value
= TREE_OPERAND (arg0
, 1);
3116 false_value
= TREE_OPERAND (arg0
, 2);
3121 true_value
= integer_one_node
;
3122 false_value
= integer_zero_node
;
3125 if (TREE_CODE (arg1
) != VAR_DECL
&& TREE_CODE (arg1
) != PARM_DECL
)
3126 arg1
= save_expr (arg1
);
3127 test
= fold (build (COND_EXPR
, type
, test
,
3128 fold (build (code
, type
, true_value
, arg1
)),
3129 fold (build (code
, type
, false_value
, arg1
))));
3130 if (TREE_CODE (arg1
) == SAVE_EXPR
)
3131 return build (COMPOUND_EXPR
, type
,
3132 convert (void_type_node
, arg1
), test
);
3134 return convert (type
, test
);
3137 else if (TREE_CODE_CLASS (code
) == '<'
3138 && TREE_CODE (arg0
) == COMPOUND_EXPR
)
3139 return build (COMPOUND_EXPR
, type
, TREE_OPERAND (arg0
, 0),
3140 fold (build (code
, type
, TREE_OPERAND (arg0
, 1), arg1
)));
3141 else if (TREE_CODE_CLASS (code
) == '<'
3142 && TREE_CODE (arg1
) == COMPOUND_EXPR
)
3143 return build (COMPOUND_EXPR
, type
, TREE_OPERAND (arg1
, 0),
3144 fold (build (code
, type
, arg0
, TREE_OPERAND (arg1
, 1))));
3156 return fold (DECL_INITIAL (t
));
3161 case FIX_TRUNC_EXPR
:
3162 /* Other kinds of FIX are not handled properly by fold_convert. */
3163 /* Two conversions in a row are not needed unless:
3164 - the intermediate type is narrower than both initial and final, or
3165 - the intermediate type and innermost type differ in signedness,
3166 and the outermost type is wider than the intermediate, or
3167 - the initial type is a pointer type and the precisions of the
3168 intermediate and final types differ, or
3169 - the final type is a pointer type and the precisions of the
3170 initial and intermediate types differ. */
3171 if ((TREE_CODE (TREE_OPERAND (t
, 0)) == NOP_EXPR
3172 || TREE_CODE (TREE_OPERAND (t
, 0)) == CONVERT_EXPR
)
3173 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t
, 0)))
3174 > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t
, 0), 0)))
3176 TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t
, 0)))
3177 > TYPE_PRECISION (TREE_TYPE (t
)))
3178 && ! ((TREE_CODE (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t
, 0), 0)))
3180 && (TREE_CODE (TREE_TYPE (TREE_OPERAND (t
, 0)))
3182 && (TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (t
, 0)))
3183 != TREE_UNSIGNED (TREE_OPERAND (TREE_OPERAND (t
, 0), 0)))
3184 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t
, 0)))
3185 < TYPE_PRECISION (TREE_TYPE (t
))))
3186 && ((TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (t
, 0)))
3187 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t
, 0)))
3188 > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t
, 0), 0)))))
3190 (TREE_UNSIGNED (TREE_TYPE (t
))
3191 && (TYPE_PRECISION (TREE_TYPE (t
))
3192 > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t
, 0))))))
3193 && ! ((TREE_CODE (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t
, 0), 0)))
3195 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t
, 0)))
3196 != TYPE_PRECISION (TREE_TYPE (t
))))
3197 && ! (TREE_CODE (TREE_TYPE (t
)) == POINTER_TYPE
3198 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t
, 0), 0)))
3199 != TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t
, 0))))))
3200 return convert (TREE_TYPE (t
), TREE_OPERAND (TREE_OPERAND (t
, 0), 0));
3202 if (TREE_CODE (TREE_OPERAND (t
, 0)) == MODIFY_EXPR
3203 && TREE_CONSTANT (TREE_OPERAND (TREE_OPERAND (t
, 0), 1))
3204 /* Detect assigning a bitfield. */
3205 && !(TREE_CODE (TREE_OPERAND (TREE_OPERAND (t
, 0), 0)) == COMPONENT_REF
3206 && DECL_BIT_FIELD (TREE_OPERAND (TREE_OPERAND (TREE_OPERAND (t
, 0), 0), 1))))
3208 /* Don't leave an assignment inside a conversion
3209 unless assigning a bitfield. */
3210 tree prev
= TREE_OPERAND (t
, 0);
3211 TREE_OPERAND (t
, 0) = TREE_OPERAND (prev
, 1);
3212 /* First do the assignment, then return converted constant. */
3213 t
= build (COMPOUND_EXPR
, TREE_TYPE (t
), prev
, fold (t
));
3219 TREE_CONSTANT (t
) = TREE_CONSTANT (arg0
);
3222 return fold_convert (t
, arg0
);
3224 #if 0 /* This loses on &"foo"[0]. */
3229 /* Fold an expression like: "foo"[2] */
3230 if (TREE_CODE (arg0
) == STRING_CST
3231 && TREE_CODE (arg1
) == INTEGER_CST
3232 && !TREE_INT_CST_HIGH (arg1
)
3233 && (i
= TREE_INT_CST_LOW (arg1
)) < TREE_STRING_LENGTH (arg0
))
3235 t
= build_int_2 (TREE_STRING_POINTER (arg0
)[i
], 0);
3236 TREE_TYPE (t
) = TREE_TYPE (TREE_TYPE (arg0
));
3244 TREE_CONSTANT (t
) = wins
;
3250 if (TREE_CODE (arg0
) == INTEGER_CST
)
3252 HOST_WIDE_INT low
, high
;
3253 int overflow
= neg_double (TREE_INT_CST_LOW (arg0
),
3254 TREE_INT_CST_HIGH (arg0
),
3256 t
= build_int_2 (low
, high
);
3257 TREE_CONSTANT_OVERFLOW (t
)
3258 = overflow
| TREE_CONSTANT_OVERFLOW (arg0
);
3259 TREE_TYPE (t
) = type
;
3262 else if (TREE_CODE (arg0
) == REAL_CST
)
3263 t
= build_real (type
, REAL_VALUE_NEGATE (TREE_REAL_CST (arg0
)));
3264 TREE_TYPE (t
) = type
;
3266 else if (TREE_CODE (arg0
) == NEGATE_EXPR
)
3267 return TREE_OPERAND (arg0
, 0);
3269 /* Convert - (a - b) to (b - a) for non-floating-point. */
3270 else if (TREE_CODE (arg0
) == MINUS_EXPR
&& TREE_CODE (type
) != REAL_TYPE
)
3271 return build (MINUS_EXPR
, type
, TREE_OPERAND (arg0
, 1),
3272 TREE_OPERAND (arg0
, 0));
3279 if (TREE_CODE (arg0
) == INTEGER_CST
)
3281 if (! TREE_UNSIGNED (type
)
3282 && TREE_INT_CST_HIGH (arg0
) < 0)
3284 HOST_WIDE_INT low
, high
;
3285 int overflow
= neg_double (TREE_INT_CST_LOW (arg0
),
3286 TREE_INT_CST_HIGH (arg0
),
3288 t
= build_int_2 (low
, high
);
3289 TREE_TYPE (t
) = type
;
3290 force_fit_type (t
, overflow
);
3293 else if (TREE_CODE (arg0
) == REAL_CST
)
3295 if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (arg0
)))
3296 t
= build_real (type
,
3297 REAL_VALUE_NEGATE (TREE_REAL_CST (arg0
)));
3299 TREE_TYPE (t
) = type
;
3301 else if (TREE_CODE (arg0
) == ABS_EXPR
|| TREE_CODE (arg0
) == NEGATE_EXPR
)
3302 return build1 (ABS_EXPR
, type
, TREE_OPERAND (arg0
, 0));
3308 if (TREE_CODE (arg0
) == INTEGER_CST
)
3309 t
= build_int_2 (~ TREE_INT_CST_LOW (arg0
),
3310 ~ TREE_INT_CST_HIGH (arg0
));
3311 TREE_TYPE (t
) = type
;
3313 TREE_CONSTANT_OVERFLOW (t
) = TREE_CONSTANT_OVERFLOW (arg0
);
3315 else if (TREE_CODE (arg0
) == BIT_NOT_EXPR
)
3316 return TREE_OPERAND (arg0
, 0);
3320 /* A + (-B) -> A - B */
3321 if (TREE_CODE (arg1
) == NEGATE_EXPR
)
3322 return fold (build (MINUS_EXPR
, type
, arg0
, TREE_OPERAND (arg1
, 0)));
3323 else if (TREE_CODE (type
) != REAL_TYPE
)
3325 if (integer_zerop (arg1
))
3326 return non_lvalue (convert (type
, arg0
));
3328 /* If we are adding two BIT_AND_EXPR's, both of which are and'ing
3329 with a constant, and the two constants have no bits in common,
3330 we should treat this as a BIT_IOR_EXPR since this may produce more
3332 if (TREE_CODE (arg0
) == BIT_AND_EXPR
3333 && TREE_CODE (arg1
) == BIT_AND_EXPR
3334 && TREE_CODE (TREE_OPERAND (arg0
, 1)) == INTEGER_CST
3335 && TREE_CODE (TREE_OPERAND (arg1
, 1)) == INTEGER_CST
3336 && integer_zerop (const_binop (BIT_AND_EXPR
,
3337 TREE_OPERAND (arg0
, 1),
3338 TREE_OPERAND (arg1
, 1), 0)))
3340 code
= BIT_IOR_EXPR
;
3344 /* In IEEE floating point, x+0 may not equal x. */
3345 else if (TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
3346 && real_zerop (arg1
))
3347 return non_lvalue (convert (type
, arg0
));
3349 /* In most languages, can't associate operations on floats
3350 through parentheses. Rather than remember where the parentheses
3351 were, we don't associate floats at all. It shouldn't matter much. */
3352 if (TREE_CODE (type
) == REAL_TYPE
)
3354 /* The varsign == -1 cases happen only for addition and subtraction.
3355 It says that the arg that was split was really CON minus VAR.
3356 The rest of the code applies to all associative operations. */
3362 if (split_tree (arg0
, code
, &var
, &con
, &varsign
))
3366 /* EXPR is (CON-VAR) +- ARG1. */
3367 /* If it is + and VAR==ARG1, return just CONST. */
3368 if (code
== PLUS_EXPR
&& operand_equal_p (var
, arg1
, 0))
3369 return convert (TREE_TYPE (t
), con
);
3371 /* Otherwise return (CON +- ARG1) - VAR. */
3372 TREE_SET_CODE (t
, MINUS_EXPR
);
3373 TREE_OPERAND (t
, 1) = var
;
3375 = fold (build (code
, TREE_TYPE (t
), con
, arg1
));
3379 /* EXPR is (VAR+CON) +- ARG1. */
3380 /* If it is - and VAR==ARG1, return just CONST. */
3381 if (code
== MINUS_EXPR
&& operand_equal_p (var
, arg1
, 0))
3382 return convert (TREE_TYPE (t
), con
);
3384 /* Otherwise return VAR +- (ARG1 +- CON). */
3385 TREE_OPERAND (t
, 1) = tem
3386 = fold (build (code
, TREE_TYPE (t
), arg1
, con
));
3387 TREE_OPERAND (t
, 0) = var
;
3388 if (integer_zerop (tem
)
3389 && (code
== PLUS_EXPR
|| code
== MINUS_EXPR
))
3390 return convert (type
, var
);
3391 /* If we have x +/- (c - d) [c an explicit integer]
3392 change it to x -/+ (d - c) since if d is relocatable
3393 then the latter can be a single immediate insn
3394 and the former cannot. */
3395 if (TREE_CODE (tem
) == MINUS_EXPR
3396 && TREE_CODE (TREE_OPERAND (tem
, 0)) == INTEGER_CST
)
3398 tree tem1
= TREE_OPERAND (tem
, 1);
3399 TREE_OPERAND (tem
, 1) = TREE_OPERAND (tem
, 0);
3400 TREE_OPERAND (tem
, 0) = tem1
;
3402 (code
== PLUS_EXPR
? MINUS_EXPR
: PLUS_EXPR
));
3408 if (split_tree (arg1
, code
, &var
, &con
, &varsign
))
3410 /* EXPR is ARG0 +- (CON +- VAR). */
3413 (code
== PLUS_EXPR
? MINUS_EXPR
: PLUS_EXPR
));
3414 if (TREE_CODE (t
) == MINUS_EXPR
3415 && operand_equal_p (var
, arg0
, 0))
3417 /* If VAR and ARG0 cancel, return just CON or -CON. */
3418 if (code
== PLUS_EXPR
)
3419 return convert (TREE_TYPE (t
), con
);
3420 return fold (build1 (NEGATE_EXPR
, TREE_TYPE (t
),
3421 convert (TREE_TYPE (t
), con
)));
3424 = fold (build (code
, TREE_TYPE (t
), arg0
, con
));
3425 TREE_OPERAND (t
, 1) = var
;
3426 if (integer_zerop (TREE_OPERAND (t
, 0))
3427 && TREE_CODE (t
) == PLUS_EXPR
)
3428 return convert (TREE_TYPE (t
), var
);
3433 #if defined (REAL_IS_NOT_DOUBLE) && ! defined (REAL_ARITHMETIC)
3434 if (TREE_CODE (arg1
) == REAL_CST
)
3436 #endif /* REAL_IS_NOT_DOUBLE, and no REAL_ARITHMETIC */
3438 t1
= const_binop (code
, arg0
, arg1
, 0);
3439 if (t1
!= NULL_TREE
)
3441 /* The return value should always have
3442 the same type as the original expression. */
3443 TREE_TYPE (t1
) = TREE_TYPE (t
);
3449 if (TREE_CODE (type
) != REAL_TYPE
)
3451 if (! wins
&& integer_zerop (arg0
))
3452 return build1 (NEGATE_EXPR
, type
, arg1
);
3453 if (integer_zerop (arg1
))
3454 return non_lvalue (convert (type
, arg0
));
3456 /* Convert A - (-B) to A + B. */
3457 else if (TREE_CODE (arg1
) == NEGATE_EXPR
)
3458 return fold (build (PLUS_EXPR
, type
, arg0
, TREE_OPERAND (arg1
, 0)));
3459 else if (TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
)
3461 /* Except with IEEE floating point, 0-x equals -x. */
3462 if (! wins
&& real_zerop (arg0
))
3463 return build1 (NEGATE_EXPR
, type
, arg1
);
3464 /* Except with IEEE floating point, x-0 equals x. */
3465 if (real_zerop (arg1
))
3466 return non_lvalue (convert (type
, arg0
));
3468 /* Fold &x - &x. This can happen from &x.foo - &x.
3469 This is unsafe for certain floats even in non-IEEE formats.
3470 In IEEE, it is unsafe because it does wrong for NaNs.
3471 Also note that operand_equal_p is always false if an operand
3474 if (operand_equal_p (arg0
, arg1
,
3475 TREE_CODE (type
) == REAL_TYPE
))
3476 return convert (type
, integer_zero_node
);
3481 if (TREE_CODE (type
) != REAL_TYPE
)
3483 if (integer_zerop (arg1
))
3484 return omit_one_operand (type
, arg1
, arg0
);
3485 if (integer_onep (arg1
))
3486 return non_lvalue (convert (type
, arg0
));
3488 /* (a * (1 << b)) is (a << b) */
3489 if (TREE_CODE (arg1
) == LSHIFT_EXPR
3490 && integer_onep (TREE_OPERAND (arg1
, 0)))
3491 return fold (build (LSHIFT_EXPR
, type
, arg0
,
3492 TREE_OPERAND (arg1
, 1)));
3493 if (TREE_CODE (arg0
) == LSHIFT_EXPR
3494 && integer_onep (TREE_OPERAND (arg0
, 0)))
3495 return fold (build (LSHIFT_EXPR
, type
, arg1
,
3496 TREE_OPERAND (arg0
, 1)));
3500 /* x*0 is 0, except for IEEE floating point. */
3501 if (TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
3502 && real_zerop (arg1
))
3503 return omit_one_operand (type
, arg1
, arg0
);
3504 /* In IEEE floating point, x*1 is not equivalent to x for snans.
3505 However, ANSI says we can drop signals,
3506 so we can do this anyway. */
3507 if (real_onep (arg1
))
3508 return non_lvalue (convert (type
, arg0
));
3510 if (! wins
&& real_twop (arg1
))
3512 tree arg
= save_expr (arg0
);
3513 return build (PLUS_EXPR
, type
, arg
, arg
);
3520 if (integer_all_onesp (arg1
))
3521 return omit_one_operand (type
, arg1
, arg0
);
3522 if (integer_zerop (arg1
))
3523 return non_lvalue (convert (type
, arg0
));
3524 t1
= distribute_bit_expr (code
, type
, arg0
, arg1
);
3525 if (t1
!= NULL_TREE
)
3528 /* (a << C1) | (a >> C2) if A is unsigned and C1+C2 is the size of A
3529 is a rotate of A by C1 bits. */
3531 if ((TREE_CODE (arg0
) == RSHIFT_EXPR
3532 || TREE_CODE (arg0
) == LSHIFT_EXPR
)
3533 && (TREE_CODE (arg1
) == RSHIFT_EXPR
3534 || TREE_CODE (arg1
) == LSHIFT_EXPR
)
3535 && TREE_CODE (arg0
) != TREE_CODE (arg1
)
3536 && operand_equal_p (TREE_OPERAND (arg0
, 0), TREE_OPERAND (arg1
,0), 0)
3537 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0
, 0)))
3538 && TREE_CODE (TREE_OPERAND (arg0
, 1)) == INTEGER_CST
3539 && TREE_CODE (TREE_OPERAND (arg1
, 1)) == INTEGER_CST
3540 && TREE_INT_CST_HIGH (TREE_OPERAND (arg0
, 1)) == 0
3541 && TREE_INT_CST_HIGH (TREE_OPERAND (arg1
, 1)) == 0
3542 && ((TREE_INT_CST_LOW (TREE_OPERAND (arg0
, 1))
3543 + TREE_INT_CST_LOW (TREE_OPERAND (arg1
, 1)))
3544 == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0
, 0)))))
3545 return build (LROTATE_EXPR
, type
, TREE_OPERAND (arg0
, 0),
3546 TREE_CODE (arg0
) == LSHIFT_EXPR
3547 ? TREE_OPERAND (arg0
, 1) : TREE_OPERAND (arg1
, 1));
3552 if (integer_zerop (arg1
))
3553 return non_lvalue (convert (type
, arg0
));
3554 if (integer_all_onesp (arg1
))
3555 return fold (build1 (BIT_NOT_EXPR
, type
, arg0
));
3560 if (integer_all_onesp (arg1
))
3561 return non_lvalue (convert (type
, arg0
));
3562 if (integer_zerop (arg1
))
3563 return omit_one_operand (type
, arg1
, arg0
);
3564 t1
= distribute_bit_expr (code
, type
, arg0
, arg1
);
3565 if (t1
!= NULL_TREE
)
3567 /* Simplify ((int)c & 0x377) into (int)c, if c is unsigned char. */
3568 if (TREE_CODE (arg0
) == INTEGER_CST
&& TREE_CODE (arg1
) == NOP_EXPR
3569 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg1
, 0))))
3571 int prec
= TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg1
, 0)));
3572 if (prec
< BITS_PER_WORD
&& prec
< HOST_BITS_PER_WIDE_INT
3573 && (~TREE_INT_CST_LOW (arg0
)
3574 & (((HOST_WIDE_INT
) 1 << prec
) - 1)) == 0)
3575 return build1 (NOP_EXPR
, type
, TREE_OPERAND (arg1
, 0));
3577 if (TREE_CODE (arg1
) == INTEGER_CST
&& TREE_CODE (arg0
) == NOP_EXPR
3578 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0
, 0))))
3580 int prec
= TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0
, 0)));
3581 if (prec
< BITS_PER_WORD
&& prec
< HOST_BITS_PER_WIDE_INT
3582 && (~TREE_INT_CST_LOW (arg1
)
3583 & (((HOST_WIDE_INT
) 1 << prec
) - 1)) == 0)
3584 return build1 (NOP_EXPR
, type
, TREE_OPERAND (arg0
, 0));
3588 case BIT_ANDTC_EXPR
:
3589 if (integer_all_onesp (arg0
))
3590 return non_lvalue (convert (type
, arg1
));
3591 if (integer_zerop (arg0
))
3592 return omit_one_operand (type
, arg0
, arg1
);
3593 if (TREE_CODE (arg1
) == INTEGER_CST
)
3595 arg1
= fold (build1 (BIT_NOT_EXPR
, type
, arg1
));
3596 code
= BIT_AND_EXPR
;
3601 case TRUNC_DIV_EXPR
:
3602 case ROUND_DIV_EXPR
:
3603 case FLOOR_DIV_EXPR
:
3605 case EXACT_DIV_EXPR
:
3607 if (integer_onep (arg1
))
3608 return non_lvalue (convert (type
, arg0
));
3609 if (integer_zerop (arg1
))
3612 /* If we have ((a * C1) / C2) and C1 % C2 == 0, we can replace this with
3613 (a * (C1/C2). Also look for when we have a SAVE_EXPR in
3615 if (TREE_CODE (arg1
) == INTEGER_CST
3616 && TREE_INT_CST_LOW (arg1
) > 0 && TREE_INT_CST_HIGH (arg1
) == 0
3617 && TREE_CODE (arg0
) == MULT_EXPR
3618 && TREE_CODE (TREE_OPERAND (arg0
, 1)) == INTEGER_CST
3619 && TREE_INT_CST_LOW (TREE_OPERAND (arg0
, 1)) > 0
3620 && TREE_INT_CST_HIGH (TREE_OPERAND (arg0
, 1)) == 0
3621 && 0 == (TREE_INT_CST_LOW (TREE_OPERAND (arg0
, 1))
3622 % TREE_INT_CST_LOW (arg1
)))
3625 = build_int_2 (TREE_INT_CST_LOW (TREE_OPERAND (arg0
, 1))
3626 / TREE_INT_CST_LOW (arg1
), 0);
3628 TREE_TYPE (new_op
) = type
;
3629 return build (MULT_EXPR
, type
, TREE_OPERAND (arg0
, 0), new_op
);
3632 else if (TREE_CODE (arg1
) == INTEGER_CST
3633 && TREE_INT_CST_LOW (arg1
) > 0 && TREE_INT_CST_HIGH (arg1
) == 0
3634 && TREE_CODE (arg0
) == SAVE_EXPR
3635 && TREE_CODE (TREE_OPERAND (arg0
, 0)) == MULT_EXPR
3636 && (TREE_CODE (TREE_OPERAND (TREE_OPERAND (arg0
, 0), 1))
3638 && (TREE_INT_CST_LOW (TREE_OPERAND (TREE_OPERAND (arg0
, 0), 1))
3640 && (TREE_INT_CST_HIGH (TREE_OPERAND (TREE_OPERAND (arg0
, 0), 1))
3642 && (TREE_INT_CST_LOW (TREE_OPERAND (TREE_OPERAND (arg0
, 0), 1))
3643 % TREE_INT_CST_LOW (arg1
)) == 0)
3646 = build_int_2 (TREE_INT_CST_LOW (TREE_OPERAND (TREE_OPERAND (arg0
, 0), 1))
3647 / TREE_INT_CST_LOW (arg1
), 0);
3649 TREE_TYPE (new_op
) = type
;
3650 return build (MULT_EXPR
, type
,
3651 TREE_OPERAND (TREE_OPERAND (arg0
, 0), 0), new_op
);
3654 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3655 #ifndef REAL_INFINITY
3656 if (TREE_CODE (arg1
) == REAL_CST
3657 && real_zerop (arg1
))
3660 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3665 case FLOOR_MOD_EXPR
:
3666 case ROUND_MOD_EXPR
:
3667 case TRUNC_MOD_EXPR
:
3668 if (integer_onep (arg1
))
3669 return omit_one_operand (type
, integer_zero_node
, arg0
);
3670 if (integer_zerop (arg1
))
3678 if (integer_zerop (arg1
))
3679 return non_lvalue (convert (type
, arg0
));
3680 /* Since negative shift count is not well-defined,
3681 don't try to compute it in the compiler. */
3682 if (tree_int_cst_lt (arg1
, integer_zero_node
))
3687 if (operand_equal_p (arg0
, arg1
, 0))
3689 if (TREE_CODE (type
) == INTEGER_TYPE
3690 && operand_equal_p (arg1
, TYPE_MIN_VALUE (type
), 1))
3691 return omit_one_operand (type
, arg1
, arg0
);
3695 if (operand_equal_p (arg0
, arg1
, 0))
3697 if (TREE_CODE (type
) == INTEGER_TYPE
3698 && operand_equal_p (arg1
, TYPE_MAX_VALUE (type
), 1))
3699 return omit_one_operand (type
, arg1
, arg0
);
3702 case TRUTH_NOT_EXPR
:
3703 /* Note that the operand of this must be an int
3704 and its values must be 0 or 1.
3705 ("true" is a fixed value perhaps depending on the language,
3706 but we don't handle values other than 1 correctly yet.) */
3707 return invert_truthvalue (arg0
);
3709 case TRUTH_ANDIF_EXPR
:
3710 /* Note that the operands of this must be ints
3711 and their values must be 0 or 1.
3712 ("true" is a fixed value perhaps depending on the language.) */
3713 /* If first arg is constant zero, return it. */
3714 if (integer_zerop (arg0
))
3716 case TRUTH_AND_EXPR
:
3717 /* If either arg is constant true, drop it. */
3718 if (TREE_CODE (arg0
) == INTEGER_CST
&& ! integer_zerop (arg0
))
3719 return non_lvalue (arg1
);
3720 if (TREE_CODE (arg1
) == INTEGER_CST
&& ! integer_zerop (arg1
))
3721 return non_lvalue (arg0
);
3722 /* If second arg is constant zero, result is zero, but first arg
3723 must be evaluated. */
3724 if (integer_zerop (arg1
))
3725 return omit_one_operand (type
, arg1
, arg0
);
3728 /* Check for the possibility of merging component references. If our
3729 lhs is another similar operation, try to merge its rhs with our
3730 rhs. Then try to merge our lhs and rhs. */
3733 if (TREE_CODE (arg0
) == code
)
3735 tem
= fold_truthop (code
, type
,
3736 TREE_OPERAND (arg0
, 1), arg1
);
3738 return fold (build (code
, type
, TREE_OPERAND (arg0
, 0), tem
));
3741 tem
= fold_truthop (code
, type
, arg0
, arg1
);
3747 case TRUTH_ORIF_EXPR
:
3748 /* Note that the operands of this must be ints
3749 and their values must be 0 or true.
3750 ("true" is a fixed value perhaps depending on the language.) */
3751 /* If first arg is constant true, return it. */
3752 if (TREE_CODE (arg0
) == INTEGER_CST
&& ! integer_zerop (arg0
))
3755 /* If either arg is constant zero, drop it. */
3756 if (TREE_CODE (arg0
) == INTEGER_CST
&& integer_zerop (arg0
))
3757 return non_lvalue (arg1
);
3758 if (TREE_CODE (arg1
) == INTEGER_CST
&& integer_zerop (arg1
))
3759 return non_lvalue (arg0
);
3760 /* If second arg is constant true, result is true, but we must
3761 evaluate first arg. */
3762 if (TREE_CODE (arg1
) == INTEGER_CST
&& ! integer_zerop (arg1
))
3763 return omit_one_operand (type
, arg1
, arg0
);
3766 case TRUTH_XOR_EXPR
:
3767 /* If either arg is constant zero, drop it. */
3768 if (integer_zerop (arg0
))
3769 return non_lvalue (arg1
);
3770 if (integer_zerop (arg1
))
3771 return non_lvalue (arg0
);
3772 /* If either arg is constant true, this is a logical inversion. */
3773 if (integer_onep (arg0
))
3774 return non_lvalue (invert_truthvalue (arg1
));
3775 if (integer_onep (arg1
))
3776 return non_lvalue (invert_truthvalue (arg0
));
3785 /* If one arg is a constant integer, put it last. */
3786 if (TREE_CODE (arg0
) == INTEGER_CST
3787 && TREE_CODE (arg1
) != INTEGER_CST
)
3789 TREE_OPERAND (t
, 0) = arg1
;
3790 TREE_OPERAND (t
, 1) = arg0
;
3791 arg0
= TREE_OPERAND (t
, 0);
3792 arg1
= TREE_OPERAND (t
, 1);
3793 code
= swap_tree_comparison (code
);
3794 TREE_SET_CODE (t
, code
);
3797 /* Convert foo++ == CONST into ++foo == CONST + INCR.
3798 First, see if one arg is constant; find the constant arg
3799 and the other one. */
3801 tree constop
= 0, varop
;
3804 if (TREE_CONSTANT (arg1
))
3805 constoploc
= &TREE_OPERAND (t
, 1), constop
= arg1
, varop
= arg0
;
3806 if (TREE_CONSTANT (arg0
))
3807 constoploc
= &TREE_OPERAND (t
, 0), constop
= arg0
, varop
= arg1
;
3809 if (constop
&& TREE_CODE (varop
) == POSTINCREMENT_EXPR
)
3811 /* This optimization is invalid for ordered comparisons
3812 if CONST+INCR overflows or if foo+incr might overflow.
3813 This optimization is invalid for floating point due to rounding.
3814 For pointer types we assume overflow doesn't happen. */
3815 if (TREE_CODE (TREE_TYPE (varop
)) == POINTER_TYPE
3816 || (TREE_CODE (TREE_TYPE (varop
)) != REAL_TYPE
3817 && (code
== EQ_EXPR
|| code
== NE_EXPR
)))
3820 = fold (build (PLUS_EXPR
, TREE_TYPE (varop
),
3821 constop
, TREE_OPERAND (varop
, 1)));
3822 TREE_SET_CODE (varop
, PREINCREMENT_EXPR
);
3823 *constoploc
= newconst
;
3827 else if (constop
&& TREE_CODE (varop
) == POSTDECREMENT_EXPR
)
3829 if (TREE_CODE (TREE_TYPE (varop
)) == POINTER_TYPE
3830 || (TREE_CODE (TREE_TYPE (varop
)) != REAL_TYPE
3831 && (code
== EQ_EXPR
|| code
== NE_EXPR
)))
3834 = fold (build (MINUS_EXPR
, TREE_TYPE (varop
),
3835 constop
, TREE_OPERAND (varop
, 1)));
3836 TREE_SET_CODE (varop
, PREDECREMENT_EXPR
);
3837 *constoploc
= newconst
;
3843 /* Change X >= CST to X > (CST - 1) if CST is positive. */
3844 if (TREE_CODE (arg1
) == INTEGER_CST
3845 && TREE_CODE (arg0
) != INTEGER_CST
3846 && ! tree_int_cst_lt (arg1
, integer_one_node
))
3848 switch (TREE_CODE (t
))
3852 TREE_SET_CODE (t
, code
);
3853 arg1
= const_binop (MINUS_EXPR
, arg1
, integer_one_node
, 0);
3854 TREE_OPERAND (t
, 1) = arg1
;
3859 TREE_SET_CODE (t
, code
);
3860 arg1
= const_binop (MINUS_EXPR
, arg1
, integer_one_node
, 0);
3861 TREE_OPERAND (t
, 1) = arg1
;
3865 /* If this is an EQ or NE comparison with zero and ARG0 is
3866 (1 << foo) & bar, convert it to (bar >> foo) & 1. Both require
3867 two operations, but the latter can be done in one less insn
3868 one machine that have only two-operand insns or on which a
3869 constant cannot be the first operand. */
3870 if (integer_zerop (arg1
) && (code
== EQ_EXPR
|| code
== NE_EXPR
)
3871 && TREE_CODE (arg0
) == BIT_AND_EXPR
)
3873 if (TREE_CODE (TREE_OPERAND (arg0
, 0)) == LSHIFT_EXPR
3874 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0
, 0), 0)))
3876 fold (build (code
, type
,
3877 build (BIT_AND_EXPR
, TREE_TYPE (arg0
),
3879 TREE_TYPE (TREE_OPERAND (arg0
, 0)),
3880 TREE_OPERAND (arg0
, 1),
3881 TREE_OPERAND (TREE_OPERAND (arg0
, 0), 1)),
3882 convert (TREE_TYPE (arg0
),
3885 else if (TREE_CODE (TREE_OPERAND (arg0
, 1)) == LSHIFT_EXPR
3886 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0
, 1), 0)))
3888 fold (build (code
, type
,
3889 build (BIT_AND_EXPR
, TREE_TYPE (arg0
),
3891 TREE_TYPE (TREE_OPERAND (arg0
, 1)),
3892 TREE_OPERAND (arg0
, 0),
3893 TREE_OPERAND (TREE_OPERAND (arg0
, 1), 1)),
3894 convert (TREE_TYPE (arg0
),
3899 /* If this is an NE comparison of zero with an AND of one, remove the
3900 comparison since the AND will give the correct value. */
3901 if (code
== NE_EXPR
&& integer_zerop (arg1
)
3902 && TREE_CODE (arg0
) == BIT_AND_EXPR
3903 && integer_onep (TREE_OPERAND (arg0
, 1)))
3904 return convert (type
, arg0
);
3906 /* If we have (A & C) == C where C is a power of 2, convert this into
3907 (A & C) != 0. Similarly for NE_EXPR. */
3908 if ((code
== EQ_EXPR
|| code
== NE_EXPR
)
3909 && TREE_CODE (arg0
) == BIT_AND_EXPR
3910 && integer_pow2p (TREE_OPERAND (arg0
, 1))
3911 && operand_equal_p (TREE_OPERAND (arg0
, 1), arg1
, 0))
3912 return build (code
== EQ_EXPR
? NE_EXPR
: EQ_EXPR
, type
,
3913 arg0
, integer_zero_node
);
3915 /* Simplify comparison of something with itself. (For IEEE
3916 floating-point, we can only do some of these simplifications.) */
3917 if (operand_equal_p (arg0
, arg1
, 0))
3924 if (TREE_CODE (TREE_TYPE (arg0
)) == INTEGER_TYPE
)
3926 t
= build_int_2 (1, 0);
3927 TREE_TYPE (t
) = type
;
3931 TREE_SET_CODE (t
, code
);
3935 /* For NE, we can only do this simplification if integer. */
3936 if (TREE_CODE (TREE_TYPE (arg0
)) != INTEGER_TYPE
)
3938 /* ... fall through ... */
3941 t
= build_int_2 (0, 0);
3942 TREE_TYPE (t
) = type
;
3947 /* An unsigned comparison against 0 can be simplified. */
3948 if (integer_zerop (arg1
)
3949 && (TREE_CODE (TREE_TYPE (arg1
)) == INTEGER_TYPE
3950 || TREE_CODE (TREE_TYPE (arg1
)) == POINTER_TYPE
)
3951 && TREE_UNSIGNED (TREE_TYPE (arg1
)))
3953 switch (TREE_CODE (t
))
3957 TREE_SET_CODE (t
, NE_EXPR
);
3961 TREE_SET_CODE (t
, EQ_EXPR
);
3964 return omit_one_operand (integer_type_node
,
3965 integer_one_node
, arg0
);
3967 return omit_one_operand (integer_type_node
,
3968 integer_zero_node
, arg0
);
3972 /* If we are comparing an expression that just has comparisons
3973 of two integer values, arithmetic expressions of those comparisons,
3974 and constants, we can simplify it. There are only three cases
3975 to check: the two values can either be equal, the first can be
3976 greater, or the second can be greater. Fold the expression for
3977 those three values. Since each value must be 0 or 1, we have
3978 eight possibilities, each of which corresponds to the constant 0
3979 or 1 or one of the six possible comparisons.
3981 This handles common cases like (a > b) == 0 but also handles
3982 expressions like ((x > y) - (y > x)) > 0, which supposedly
3983 occur in macroized code. */
3985 if (TREE_CODE (arg1
) == INTEGER_CST
&& TREE_CODE (arg0
) != INTEGER_CST
)
3987 tree cval1
= 0, cval2
= 0;
3989 if (twoval_comparison_p (arg0
, &cval1
, &cval2
)
3990 /* Don't handle degenerate cases here; they should already
3991 have been handled anyway. */
3992 && cval1
!= 0 && cval2
!= 0
3993 && ! (TREE_CONSTANT (cval1
) && TREE_CONSTANT (cval2
))
3994 && TREE_TYPE (cval1
) == TREE_TYPE (cval2
)
3995 && TREE_CODE (TREE_TYPE (cval1
)) == INTEGER_TYPE
3996 && ! operand_equal_p (TYPE_MIN_VALUE (TREE_TYPE (cval1
)),
3997 TYPE_MAX_VALUE (TREE_TYPE (cval2
)), 0))
3999 tree maxval
= TYPE_MAX_VALUE (TREE_TYPE (cval1
));
4000 tree minval
= TYPE_MIN_VALUE (TREE_TYPE (cval1
));
4002 /* We can't just pass T to eval_subst in case cval1 or cval2
4003 was the same as ARG1. */
4006 = fold (build (code
, type
,
4007 eval_subst (arg0
, cval1
, maxval
, cval2
, minval
),
4010 = fold (build (code
, type
,
4011 eval_subst (arg0
, cval1
, maxval
, cval2
, maxval
),
4014 = fold (build (code
, type
,
4015 eval_subst (arg0
, cval1
, minval
, cval2
, maxval
),
4018 /* All three of these results should be 0 or 1. Confirm they
4019 are. Then use those values to select the proper code
4022 if ((integer_zerop (high_result
)
4023 || integer_onep (high_result
))
4024 && (integer_zerop (equal_result
)
4025 || integer_onep (equal_result
))
4026 && (integer_zerop (low_result
)
4027 || integer_onep (low_result
)))
4029 /* Make a 3-bit mask with the high-order bit being the
4030 value for `>', the next for '=', and the low for '<'. */
4031 switch ((integer_onep (high_result
) * 4)
4032 + (integer_onep (equal_result
) * 2)
4033 + integer_onep (low_result
))
4037 return omit_one_operand (type
, integer_zero_node
, arg0
);
4058 return omit_one_operand (type
, integer_one_node
, arg0
);
4061 return fold (build (code
, type
, cval1
, cval2
));
4066 /* If this is a comparison of a field, we may be able to simplify it. */
4067 if ((TREE_CODE (arg0
) == COMPONENT_REF
4068 || TREE_CODE (arg0
) == BIT_FIELD_REF
)
4069 && (code
== EQ_EXPR
|| code
== NE_EXPR
)
4070 /* Handle the constant case even without -O
4071 to make sure the warnings are given. */
4072 && (optimize
|| TREE_CODE (arg1
) == INTEGER_CST
))
4074 t1
= optimize_bit_field_compare (code
, type
, arg0
, arg1
);
4078 /* From here on, the only cases we handle are when the result is
4079 known to be a constant.
4081 To compute GT, swap the arguments and do LT.
4082 To compute GE, do LT and invert the result.
4083 To compute LE, swap the arguments, do LT and invert the result.
4084 To compute NE, do EQ and invert the result.
4086 Therefore, the code below must handle only EQ and LT. */
4088 if (code
== LE_EXPR
|| code
== GT_EXPR
)
4090 tem
= arg0
, arg0
= arg1
, arg1
= tem
;
4091 code
= swap_tree_comparison (code
);
4094 /* Note that it is safe to invert for real values here because we
4095 will check below in the one case that it matters. */
4098 if (code
== NE_EXPR
|| code
== GE_EXPR
)
4101 code
= invert_tree_comparison (code
);
4104 /* Compute a result for LT or EQ if args permit;
4105 otherwise return T. */
4106 if (TREE_CODE (arg0
) == INTEGER_CST
&& TREE_CODE (arg1
) == INTEGER_CST
)
4108 if (code
== EQ_EXPR
)
4109 t1
= build_int_2 ((TREE_INT_CST_LOW (arg0
)
4110 == TREE_INT_CST_LOW (arg1
))
4111 && (TREE_INT_CST_HIGH (arg0
)
4112 == TREE_INT_CST_HIGH (arg1
)),
4115 t1
= build_int_2 ((TREE_UNSIGNED (TREE_TYPE (arg0
))
4116 ? INT_CST_LT_UNSIGNED (arg0
, arg1
)
4117 : INT_CST_LT (arg0
, arg1
)),
4121 /* Assume a nonexplicit constant cannot equal an explicit one,
4122 since such code would be undefined anyway.
4123 Exception: on sysvr4, using #pragma weak,
4124 a label can come out as 0. */
4125 else if (TREE_CODE (arg1
) == INTEGER_CST
4126 && !integer_zerop (arg1
)
4127 && TREE_CONSTANT (arg0
)
4128 && TREE_CODE (arg0
) == ADDR_EXPR
4130 t1
= build_int_2 (0, 0);
4132 /* Two real constants can be compared explicitly. */
4133 else if (TREE_CODE (arg0
) == REAL_CST
&& TREE_CODE (arg1
) == REAL_CST
)
4135 /* If either operand is a NaN, the result is false with two
4136 exceptions: First, an NE_EXPR is true on NaNs, but that case
4137 is already handled correctly since we will be inverting the
4138 result for NE_EXPR. Second, if we had inverted a LE_EXPR
4139 or a GE_EXPR into a LT_EXPR, we must return true so that it
4140 will be inverted into false. */
4142 if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg0
))
4143 || REAL_VALUE_ISNAN (TREE_REAL_CST (arg1
)))
4144 t1
= build_int_2 (invert
&& code
== LT_EXPR
, 0);
4146 else if (code
== EQ_EXPR
)
4147 t1
= build_int_2 (REAL_VALUES_EQUAL (TREE_REAL_CST (arg0
),
4148 TREE_REAL_CST (arg1
)),
4151 t1
= build_int_2 (REAL_VALUES_LESS (TREE_REAL_CST (arg0
),
4152 TREE_REAL_CST (arg1
)),
4156 if (t1
== NULL_TREE
)
4160 TREE_INT_CST_LOW (t1
) ^= 1;
4162 TREE_TYPE (t1
) = type
;
4166 if (TREE_CODE (arg0
) == INTEGER_CST
)
4167 return TREE_OPERAND (t
, (integer_zerop (arg0
) ? 2 : 1));
4168 else if (operand_equal_p (arg1
, TREE_OPERAND (expr
, 2), 0))
4169 return omit_one_operand (type
, arg1
, arg0
);
4171 /* If the second operand is zero, invert the comparison and swap
4172 the second and third operands. Likewise if the second operand
4173 is constant and the third is not or if the third operand is
4174 equivalent to the first operand of the comparison. */
4176 if (integer_zerop (arg1
)
4177 || (TREE_CONSTANT (arg1
) && ! TREE_CONSTANT (TREE_OPERAND (t
, 2)))
4178 || (TREE_CODE_CLASS (TREE_CODE (arg0
)) == '<'
4179 && operand_equal_for_comparison_p (TREE_OPERAND (arg0
, 0),
4180 TREE_OPERAND (t
, 2),
4181 TREE_OPERAND (arg0
, 1))))
4183 /* See if this can be inverted. If it can't, possibly because
4184 it was a floating-point inequality comparison, don't do
4186 tem
= invert_truthvalue (arg0
);
4188 if (TREE_CODE (tem
) != TRUTH_NOT_EXPR
)
4190 arg0
= TREE_OPERAND (t
, 0) = tem
;
4191 TREE_OPERAND (t
, 1) = TREE_OPERAND (t
, 2);
4192 TREE_OPERAND (t
, 2) = arg1
;
4193 arg1
= TREE_OPERAND (t
, 1);
4197 /* If we have A op B ? A : C, we may be able to convert this to a
4198 simpler expression, depending on the operation and the values
4199 of B and C. IEEE floating point prevents this though,
4200 because A or B might be -0.0 or a NaN. */
4202 if (TREE_CODE_CLASS (TREE_CODE (arg0
)) == '<'
4203 && (TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
4204 || TREE_CODE (TREE_TYPE (TREE_OPERAND (arg0
, 0))) != REAL_TYPE
)
4205 && operand_equal_for_comparison_p (TREE_OPERAND (arg0
, 0),
4206 arg1
, TREE_OPERAND (arg0
, 1)))
4208 tree arg2
= TREE_OPERAND (t
, 2);
4209 enum tree_code comp_code
= TREE_CODE (arg0
);
4211 /* If we have A op 0 ? A : -A, this is A, -A, abs (A), or abs (-A),
4212 depending on the comparison operation. */
4213 if (integer_zerop (TREE_OPERAND (arg0
, 1))
4214 && TREE_CODE (arg2
) == NEGATE_EXPR
4215 && operand_equal_p (TREE_OPERAND (arg2
, 0), arg1
, 0))
4219 return fold (build1 (NEGATE_EXPR
, type
, arg1
));
4221 return convert (type
, arg1
);
4224 return fold (build1 (ABS_EXPR
, type
, arg1
));
4227 return fold (build1 (NEGATE_EXPR
, type
,
4228 fold (build1 (ABS_EXPR
, type
, arg1
))));
4231 /* If this is A != 0 ? A : 0, this is simply A. For ==, it is
4234 if (integer_zerop (TREE_OPERAND (arg0
, 1)) && integer_zerop (arg2
))
4236 if (comp_code
== NE_EXPR
)
4237 return convert (type
, arg1
);
4238 else if (comp_code
== EQ_EXPR
)
4239 return convert (type
, integer_zero_node
);
4242 /* If this is A op B ? A : B, this is either A, B, min (A, B),
4243 or max (A, B), depending on the operation. */
4245 if (operand_equal_for_comparison_p (TREE_OPERAND (arg0
, 1),
4246 arg2
, TREE_OPERAND (arg0
, 0)))
4250 return convert (type
, arg2
);
4252 return convert (type
, arg1
);
4255 return fold (build (MIN_EXPR
, type
, arg1
, arg2
));
4258 return fold (build (MAX_EXPR
, type
, arg1
, arg2
));
4261 /* If this is A op C1 ? A : C2 with C1 and C2 constant integers,
4262 we might still be able to simplify this. For example,
4263 if C1 is one less or one more than C2, this might have started
4264 out as a MIN or MAX and been transformed by this function.
4265 Only good for INTEGER_TYPE, because we need TYPE_MAX_VALUE. */
4267 if (TREE_CODE (type
) == INTEGER_TYPE
4268 && TREE_CODE (TREE_OPERAND (arg0
, 1)) == INTEGER_CST
4269 && TREE_CODE (arg2
) == INTEGER_CST
)
4273 /* We can replace A with C1 in this case. */
4274 arg1
= TREE_OPERAND (t
, 1)
4275 = convert (type
, TREE_OPERAND (arg0
, 1));
4279 /* If C1 is C2 + 1, this is min(A, C2). */
4280 if (! operand_equal_p (arg2
, TYPE_MAX_VALUE (type
), 1)
4281 && operand_equal_p (TREE_OPERAND (arg0
, 1),
4282 const_binop (PLUS_EXPR
, arg2
,
4283 integer_one_node
, 0), 1))
4284 return fold (build (MIN_EXPR
, type
, arg1
, arg2
));
4288 /* If C1 is C2 - 1, this is min(A, C2). */
4289 if (! operand_equal_p (arg2
, TYPE_MIN_VALUE (type
), 1)
4290 && operand_equal_p (TREE_OPERAND (arg0
, 1),
4291 const_binop (MINUS_EXPR
, arg2
,
4292 integer_one_node
, 0), 1))
4293 return fold (build (MIN_EXPR
, type
, arg1
, arg2
));
4297 /* If C1 is C2 - 1, this is max(A, C2). */
4298 if (! operand_equal_p (arg2
, TYPE_MIN_VALUE (type
), 1)
4299 && operand_equal_p (TREE_OPERAND (arg0
, 1),
4300 const_binop (MINUS_EXPR
, arg2
,
4301 integer_one_node
, 0), 1))
4302 return fold (build (MAX_EXPR
, type
, arg1
, arg2
));
4306 /* If C1 is C2 + 1, this is max(A, C2). */
4307 if (! operand_equal_p (arg2
, TYPE_MAX_VALUE (type
), 1)
4308 && operand_equal_p (TREE_OPERAND (arg0
, 1),
4309 const_binop (PLUS_EXPR
, arg2
,
4310 integer_one_node
, 0), 1))
4311 return fold (build (MAX_EXPR
, type
, arg1
, arg2
));
4316 /* Convert A ? 1 : 0 to simply A. */
4317 if (integer_onep (TREE_OPERAND (t
, 1))
4318 && integer_zerop (TREE_OPERAND (t
, 2))
4319 /* If we try to convert TREE_OPERAND (t, 0) to our type, the
4320 call to fold will try to move the conversion inside
4321 a COND, which will recurse. In that case, the COND_EXPR
4322 is probably the best choice, so leave it alone. */
4323 && type
== TREE_TYPE (arg0
))
4327 /* Look for expressions of the form A & 2 ? 2 : 0. The result of this
4328 operation is simply A & 2. */
4330 if (integer_zerop (TREE_OPERAND (t
, 2))
4331 && TREE_CODE (arg0
) == NE_EXPR
4332 && integer_zerop (TREE_OPERAND (arg0
, 1))
4333 && integer_pow2p (arg1
)
4334 && TREE_CODE (TREE_OPERAND (arg0
, 0)) == BIT_AND_EXPR
4335 && operand_equal_p (TREE_OPERAND (TREE_OPERAND (arg0
, 0), 1),
4337 return convert (type
, TREE_OPERAND (arg0
, 0));
4342 if (!TREE_SIDE_EFFECTS (arg0
))
4348 } /* switch (code) */
This page took 0.23723 seconds and 5 git commands to generate.