]> gcc.gnu.org Git - gcc.git/blob - gcc/fold-const.c
Update FSF address.
[gcc.git] / gcc / fold-const.c
1 /* Fold a constant sub-tree into a single node for C-compiler
2 Copyright (C) 1987, 88, 92, 93, 94, 1995 Free Software Foundation, Inc.
3
4 This file is part of GNU CC.
5
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
10
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
20
21 /*@@ This file should be rewritten to use an arbitrary precision
22 @@ representation for "struct tree_int_cst" and "struct tree_real_cst".
23 @@ Perhaps the routines could also be used for bc/dc, and made a lib.
24 @@ The routines that translate from the ap rep should
25 @@ warn if precision et. al. is lost.
26 @@ This would also make life easier when this technology is used
27 @@ for cross-compilers. */
28
29
30 /* The entry points in this file are fold, size_int and size_binop.
31
32 fold takes a tree as argument and returns a simplified tree.
33
34 size_binop takes a tree code for an arithmetic operation
35 and two operands that are trees, and produces a tree for the
36 result, assuming the type comes from `sizetype'.
37
38 size_int takes an integer value, and creates a tree constant
39 with type from `sizetype'. */
40
41 #include <stdio.h>
42 #include <setjmp.h>
43 #include "config.h"
44 #include "flags.h"
45 #include "tree.h"
46
47 /* Handle floating overflow for `const_binop'. */
48 static jmp_buf float_error;
49
50 static void encode PROTO((HOST_WIDE_INT *, HOST_WIDE_INT, HOST_WIDE_INT));
51 static void decode PROTO((HOST_WIDE_INT *, HOST_WIDE_INT *, HOST_WIDE_INT *));
52 int div_and_round_double PROTO((enum tree_code, int, HOST_WIDE_INT,
53 HOST_WIDE_INT, HOST_WIDE_INT,
54 HOST_WIDE_INT, HOST_WIDE_INT *,
55 HOST_WIDE_INT *, HOST_WIDE_INT *,
56 HOST_WIDE_INT *));
57 static int split_tree PROTO((tree, enum tree_code, tree *, tree *, int *));
58 static tree const_binop PROTO((enum tree_code, tree, tree, int));
59 static tree fold_convert PROTO((tree, tree));
60 static enum tree_code invert_tree_comparison PROTO((enum tree_code));
61 static enum tree_code swap_tree_comparison PROTO((enum tree_code));
62 static int truth_value_p PROTO((enum tree_code));
63 static int operand_equal_for_comparison_p PROTO((tree, tree, tree));
64 static int twoval_comparison_p PROTO((tree, tree *, tree *, int *));
65 static tree eval_subst PROTO((tree, tree, tree, tree, tree));
66 static tree omit_one_operand PROTO((tree, tree, tree));
67 static tree pedantic_omit_one_operand PROTO((tree, tree, tree));
68 static tree distribute_bit_expr PROTO((enum tree_code, tree, tree, tree));
69 static tree make_bit_field_ref PROTO((tree, tree, int, int, int));
70 static tree optimize_bit_field_compare PROTO((enum tree_code, tree,
71 tree, tree));
72 static tree decode_field_reference PROTO((tree, int *, int *,
73 enum machine_mode *, int *,
74 int *, tree *));
75 static int all_ones_mask_p PROTO((tree, int));
76 static int simple_operand_p PROTO((tree));
77 static tree range_test PROTO((enum tree_code, tree, enum tree_code,
78 enum tree_code, tree, tree, tree));
79 static tree unextend PROTO((tree, int, int));
80 static tree fold_truthop PROTO((enum tree_code, tree, tree, tree));
81 static tree strip_compound_expr PROTO((tree, tree));
82
83 #ifndef BRANCH_COST
84 #define BRANCH_COST 1
85 #endif
86
87 /* Yield nonzero if a signed left shift of A by B bits overflows. */
88 #define left_shift_overflows(a, b) ((a) != ((a) << (b)) >> (b))
89
90 /* Suppose A1 + B1 = SUM1, using 2's complement arithmetic ignoring overflow.
91 Suppose A, B and SUM have the same respective signs as A1, B1, and SUM1.
92 Then this yields nonzero if overflow occurred during the addition.
93 Overflow occurs if A and B have the same sign, but A and SUM differ in sign.
94 Use `^' to test whether signs differ, and `< 0' to isolate the sign. */
95 #define overflow_sum_sign(a, b, sum) ((~((a) ^ (b)) & ((a) ^ (sum))) < 0)
96 \f
97 /* To do constant folding on INTEGER_CST nodes requires two-word arithmetic.
98 We do that by representing the two-word integer in 4 words, with only
99 HOST_BITS_PER_WIDE_INT/2 bits stored in each word, as a positive number. */
100
101 #define LOWPART(x) \
102 ((x) & (((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT/2)) - 1))
103 #define HIGHPART(x) \
104 ((unsigned HOST_WIDE_INT) (x) >> HOST_BITS_PER_WIDE_INT/2)
105 #define BASE ((unsigned HOST_WIDE_INT) 1 << HOST_BITS_PER_WIDE_INT/2)
106
107 /* Unpack a two-word integer into 4 words.
108 LOW and HI are the integer, as two `HOST_WIDE_INT' pieces.
109 WORDS points to the array of HOST_WIDE_INTs. */
110
111 static void
112 encode (words, low, hi)
113 HOST_WIDE_INT *words;
114 HOST_WIDE_INT low, hi;
115 {
116 words[0] = LOWPART (low);
117 words[1] = HIGHPART (low);
118 words[2] = LOWPART (hi);
119 words[3] = HIGHPART (hi);
120 }
121
122 /* Pack an array of 4 words into a two-word integer.
123 WORDS points to the array of words.
124 The integer is stored into *LOW and *HI as two `HOST_WIDE_INT' pieces. */
125
126 static void
127 decode (words, low, hi)
128 HOST_WIDE_INT *words;
129 HOST_WIDE_INT *low, *hi;
130 {
131 *low = words[0] | words[1] * BASE;
132 *hi = words[2] | words[3] * BASE;
133 }
134 \f
135 /* Make the integer constant T valid for its type
136 by setting to 0 or 1 all the bits in the constant
137 that don't belong in the type.
138 Yield 1 if a signed overflow occurs, 0 otherwise.
139 If OVERFLOW is nonzero, a signed overflow has already occurred
140 in calculating T, so propagate it.
141
142 Make the real constant T valid for its type by calling CHECK_FLOAT_VALUE,
143 if it exists. */
144
145 int
146 force_fit_type (t, overflow)
147 tree t;
148 int overflow;
149 {
150 HOST_WIDE_INT low, high;
151 register int prec;
152
153 if (TREE_CODE (t) == REAL_CST)
154 {
155 #ifdef CHECK_FLOAT_VALUE
156 CHECK_FLOAT_VALUE (TYPE_MODE (TREE_TYPE (t)), TREE_REAL_CST (t),
157 overflow);
158 #endif
159 return overflow;
160 }
161
162 else if (TREE_CODE (t) != INTEGER_CST)
163 return overflow;
164
165 low = TREE_INT_CST_LOW (t);
166 high = TREE_INT_CST_HIGH (t);
167
168 if (TREE_CODE (TREE_TYPE (t)) == POINTER_TYPE)
169 prec = POINTER_SIZE;
170 else
171 prec = TYPE_PRECISION (TREE_TYPE (t));
172
173 /* First clear all bits that are beyond the type's precision. */
174
175 if (prec == 2 * HOST_BITS_PER_WIDE_INT)
176 ;
177 else if (prec > HOST_BITS_PER_WIDE_INT)
178 {
179 TREE_INT_CST_HIGH (t)
180 &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
181 }
182 else
183 {
184 TREE_INT_CST_HIGH (t) = 0;
185 if (prec < HOST_BITS_PER_WIDE_INT)
186 TREE_INT_CST_LOW (t) &= ~((HOST_WIDE_INT) (-1) << prec);
187 }
188
189 /* Unsigned types do not suffer sign extension or overflow. */
190 if (TREE_UNSIGNED (TREE_TYPE (t)))
191 return overflow;
192
193 /* If the value's sign bit is set, extend the sign. */
194 if (prec != 2 * HOST_BITS_PER_WIDE_INT
195 && (prec > HOST_BITS_PER_WIDE_INT
196 ? (TREE_INT_CST_HIGH (t)
197 & ((HOST_WIDE_INT) 1 << (prec - HOST_BITS_PER_WIDE_INT - 1)))
198 : TREE_INT_CST_LOW (t) & ((HOST_WIDE_INT) 1 << (prec - 1))))
199 {
200 /* Value is negative:
201 set to 1 all the bits that are outside this type's precision. */
202 if (prec > HOST_BITS_PER_WIDE_INT)
203 {
204 TREE_INT_CST_HIGH (t)
205 |= ((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
206 }
207 else
208 {
209 TREE_INT_CST_HIGH (t) = -1;
210 if (prec < HOST_BITS_PER_WIDE_INT)
211 TREE_INT_CST_LOW (t) |= ((HOST_WIDE_INT) (-1) << prec);
212 }
213 }
214
215 /* Yield nonzero if signed overflow occurred. */
216 return
217 ((overflow | (low ^ TREE_INT_CST_LOW (t)) | (high ^ TREE_INT_CST_HIGH (t)))
218 != 0);
219 }
220 \f
221 /* Add two doubleword integers with doubleword result.
222 Each argument is given as two `HOST_WIDE_INT' pieces.
223 One argument is L1 and H1; the other, L2 and H2.
224 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
225
226 int
227 add_double (l1, h1, l2, h2, lv, hv)
228 HOST_WIDE_INT l1, h1, l2, h2;
229 HOST_WIDE_INT *lv, *hv;
230 {
231 HOST_WIDE_INT l, h;
232
233 l = l1 + l2;
234 h = h1 + h2 + ((unsigned HOST_WIDE_INT) l < l1);
235
236 *lv = l;
237 *hv = h;
238 return overflow_sum_sign (h1, h2, h);
239 }
240
241 /* Negate a doubleword integer with doubleword result.
242 Return nonzero if the operation overflows, assuming it's signed.
243 The argument is given as two `HOST_WIDE_INT' pieces in L1 and H1.
244 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
245
246 int
247 neg_double (l1, h1, lv, hv)
248 HOST_WIDE_INT l1, h1;
249 HOST_WIDE_INT *lv, *hv;
250 {
251 if (l1 == 0)
252 {
253 *lv = 0;
254 *hv = - h1;
255 return (*hv & h1) < 0;
256 }
257 else
258 {
259 *lv = - l1;
260 *hv = ~ h1;
261 return 0;
262 }
263 }
264 \f
265 /* Multiply two doubleword integers with doubleword result.
266 Return nonzero if the operation overflows, assuming it's signed.
267 Each argument is given as two `HOST_WIDE_INT' pieces.
268 One argument is L1 and H1; the other, L2 and H2.
269 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
270
271 int
272 mul_double (l1, h1, l2, h2, lv, hv)
273 HOST_WIDE_INT l1, h1, l2, h2;
274 HOST_WIDE_INT *lv, *hv;
275 {
276 HOST_WIDE_INT arg1[4];
277 HOST_WIDE_INT arg2[4];
278 HOST_WIDE_INT prod[4 * 2];
279 register unsigned HOST_WIDE_INT carry;
280 register int i, j, k;
281 HOST_WIDE_INT toplow, tophigh, neglow, neghigh;
282
283 encode (arg1, l1, h1);
284 encode (arg2, l2, h2);
285
286 bzero ((char *) prod, sizeof prod);
287
288 for (i = 0; i < 4; i++)
289 {
290 carry = 0;
291 for (j = 0; j < 4; j++)
292 {
293 k = i + j;
294 /* This product is <= 0xFFFE0001, the sum <= 0xFFFF0000. */
295 carry += arg1[i] * arg2[j];
296 /* Since prod[p] < 0xFFFF, this sum <= 0xFFFFFFFF. */
297 carry += prod[k];
298 prod[k] = LOWPART (carry);
299 carry = HIGHPART (carry);
300 }
301 prod[i + 4] = carry;
302 }
303
304 decode (prod, lv, hv); /* This ignores prod[4] through prod[4*2-1] */
305
306 /* Check for overflow by calculating the top half of the answer in full;
307 it should agree with the low half's sign bit. */
308 decode (prod+4, &toplow, &tophigh);
309 if (h1 < 0)
310 {
311 neg_double (l2, h2, &neglow, &neghigh);
312 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
313 }
314 if (h2 < 0)
315 {
316 neg_double (l1, h1, &neglow, &neghigh);
317 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
318 }
319 return (*hv < 0 ? ~(toplow & tophigh) : toplow | tophigh) != 0;
320 }
321 \f
322 /* Shift the doubleword integer in L1, H1 left by COUNT places
323 keeping only PREC bits of result.
324 Shift right if COUNT is negative.
325 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
326 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
327
328 void
329 lshift_double (l1, h1, count, prec, lv, hv, arith)
330 HOST_WIDE_INT l1, h1, count;
331 int prec;
332 HOST_WIDE_INT *lv, *hv;
333 int arith;
334 {
335 if (count < 0)
336 {
337 rshift_double (l1, h1, - count, prec, lv, hv, arith);
338 return;
339 }
340
341 #ifdef SHIFT_COUNT_TRUNCATED
342 if (SHIFT_COUNT_TRUNCATED)
343 count %= prec;
344 #endif
345
346 if (count >= HOST_BITS_PER_WIDE_INT)
347 {
348 *hv = (unsigned HOST_WIDE_INT) l1 << count - HOST_BITS_PER_WIDE_INT;
349 *lv = 0;
350 }
351 else
352 {
353 *hv = (((unsigned HOST_WIDE_INT) h1 << count)
354 | ((unsigned HOST_WIDE_INT) l1 >> HOST_BITS_PER_WIDE_INT - count - 1 >> 1));
355 *lv = (unsigned HOST_WIDE_INT) l1 << count;
356 }
357 }
358
359 /* Shift the doubleword integer in L1, H1 right by COUNT places
360 keeping only PREC bits of result. COUNT must be positive.
361 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
362 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
363
364 void
365 rshift_double (l1, h1, count, prec, lv, hv, arith)
366 HOST_WIDE_INT l1, h1, count;
367 int prec;
368 HOST_WIDE_INT *lv, *hv;
369 int arith;
370 {
371 unsigned HOST_WIDE_INT signmask;
372 signmask = (arith
373 ? -((unsigned HOST_WIDE_INT) h1 >> (HOST_BITS_PER_WIDE_INT - 1))
374 : 0);
375
376 #ifdef SHIFT_COUNT_TRUNCATED
377 if (SHIFT_COUNT_TRUNCATED)
378 count %= prec;
379 #endif
380
381 if (count >= HOST_BITS_PER_WIDE_INT)
382 {
383 *hv = signmask;
384 *lv = ((signmask << 2 * HOST_BITS_PER_WIDE_INT - count - 1 << 1)
385 | ((unsigned HOST_WIDE_INT) h1 >> count - HOST_BITS_PER_WIDE_INT));
386 }
387 else
388 {
389 *lv = (((unsigned HOST_WIDE_INT) l1 >> count)
390 | ((unsigned HOST_WIDE_INT) h1 << HOST_BITS_PER_WIDE_INT - count - 1 << 1));
391 *hv = ((signmask << HOST_BITS_PER_WIDE_INT - count)
392 | ((unsigned HOST_WIDE_INT) h1 >> count));
393 }
394 }
395 \f
396 /* Rotate the doubleword integer in L1, H1 left by COUNT places
397 keeping only PREC bits of result.
398 Rotate right if COUNT is negative.
399 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
400
401 void
402 lrotate_double (l1, h1, count, prec, lv, hv)
403 HOST_WIDE_INT l1, h1, count;
404 int prec;
405 HOST_WIDE_INT *lv, *hv;
406 {
407 HOST_WIDE_INT s1l, s1h, s2l, s2h;
408
409 count %= prec;
410 if (count < 0)
411 count += prec;
412
413 lshift_double (l1, h1, count, prec, &s1l, &s1h, 0);
414 rshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0);
415 *lv = s1l | s2l;
416 *hv = s1h | s2h;
417 }
418
419 /* Rotate the doubleword integer in L1, H1 left by COUNT places
420 keeping only PREC bits of result. COUNT must be positive.
421 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
422
423 void
424 rrotate_double (l1, h1, count, prec, lv, hv)
425 HOST_WIDE_INT l1, h1, count;
426 int prec;
427 HOST_WIDE_INT *lv, *hv;
428 {
429 HOST_WIDE_INT s1l, s1h, s2l, s2h;
430
431 count %= prec;
432 if (count < 0)
433 count += prec;
434
435 rshift_double (l1, h1, count, prec, &s1l, &s1h, 0);
436 lshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0);
437 *lv = s1l | s2l;
438 *hv = s1h | s2h;
439 }
440 \f
441 /* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN
442 for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM).
443 CODE is a tree code for a kind of division, one of
444 TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR
445 or EXACT_DIV_EXPR
446 It controls how the quotient is rounded to a integer.
447 Return nonzero if the operation overflows.
448 UNS nonzero says do unsigned division. */
449
450 int
451 div_and_round_double (code, uns,
452 lnum_orig, hnum_orig, lden_orig, hden_orig,
453 lquo, hquo, lrem, hrem)
454 enum tree_code code;
455 int uns;
456 HOST_WIDE_INT lnum_orig, hnum_orig; /* num == numerator == dividend */
457 HOST_WIDE_INT lden_orig, hden_orig; /* den == denominator == divisor */
458 HOST_WIDE_INT *lquo, *hquo, *lrem, *hrem;
459 {
460 int quo_neg = 0;
461 HOST_WIDE_INT num[4 + 1]; /* extra element for scaling. */
462 HOST_WIDE_INT den[4], quo[4];
463 register int i, j;
464 unsigned HOST_WIDE_INT work;
465 register int carry = 0;
466 HOST_WIDE_INT lnum = lnum_orig;
467 HOST_WIDE_INT hnum = hnum_orig;
468 HOST_WIDE_INT lden = lden_orig;
469 HOST_WIDE_INT hden = hden_orig;
470 int overflow = 0;
471
472 if ((hden == 0) && (lden == 0))
473 abort ();
474
475 /* calculate quotient sign and convert operands to unsigned. */
476 if (!uns)
477 {
478 if (hnum < 0)
479 {
480 quo_neg = ~ quo_neg;
481 /* (minimum integer) / (-1) is the only overflow case. */
482 if (neg_double (lnum, hnum, &lnum, &hnum) && (lden & hden) == -1)
483 overflow = 1;
484 }
485 if (hden < 0)
486 {
487 quo_neg = ~ quo_neg;
488 neg_double (lden, hden, &lden, &hden);
489 }
490 }
491
492 if (hnum == 0 && hden == 0)
493 { /* single precision */
494 *hquo = *hrem = 0;
495 /* This unsigned division rounds toward zero. */
496 *lquo = lnum / (unsigned HOST_WIDE_INT) lden;
497 goto finish_up;
498 }
499
500 if (hnum == 0)
501 { /* trivial case: dividend < divisor */
502 /* hden != 0 already checked. */
503 *hquo = *lquo = 0;
504 *hrem = hnum;
505 *lrem = lnum;
506 goto finish_up;
507 }
508
509 bzero ((char *) quo, sizeof quo);
510
511 bzero ((char *) num, sizeof num); /* to zero 9th element */
512 bzero ((char *) den, sizeof den);
513
514 encode (num, lnum, hnum);
515 encode (den, lden, hden);
516
517 /* Special code for when the divisor < BASE. */
518 if (hden == 0 && lden < BASE)
519 {
520 /* hnum != 0 already checked. */
521 for (i = 4 - 1; i >= 0; i--)
522 {
523 work = num[i] + carry * BASE;
524 quo[i] = work / (unsigned HOST_WIDE_INT) lden;
525 carry = work % (unsigned HOST_WIDE_INT) lden;
526 }
527 }
528 else
529 {
530 /* Full double precision division,
531 with thanks to Don Knuth's "Seminumerical Algorithms". */
532 int quo_est, scale, num_hi_sig, den_hi_sig;
533
534 /* Find the highest non-zero divisor digit. */
535 for (i = 4 - 1; ; i--)
536 if (den[i] != 0) {
537 den_hi_sig = i;
538 break;
539 }
540
541 /* Insure that the first digit of the divisor is at least BASE/2.
542 This is required by the quotient digit estimation algorithm. */
543
544 scale = BASE / (den[den_hi_sig] + 1);
545 if (scale > 1) { /* scale divisor and dividend */
546 carry = 0;
547 for (i = 0; i <= 4 - 1; i++) {
548 work = (num[i] * scale) + carry;
549 num[i] = LOWPART (work);
550 carry = HIGHPART (work);
551 } num[4] = carry;
552 carry = 0;
553 for (i = 0; i <= 4 - 1; i++) {
554 work = (den[i] * scale) + carry;
555 den[i] = LOWPART (work);
556 carry = HIGHPART (work);
557 if (den[i] != 0) den_hi_sig = i;
558 }
559 }
560
561 num_hi_sig = 4;
562
563 /* Main loop */
564 for (i = num_hi_sig - den_hi_sig - 1; i >= 0; i--) {
565 /* guess the next quotient digit, quo_est, by dividing the first
566 two remaining dividend digits by the high order quotient digit.
567 quo_est is never low and is at most 2 high. */
568 unsigned HOST_WIDE_INT tmp;
569
570 num_hi_sig = i + den_hi_sig + 1;
571 work = num[num_hi_sig] * BASE + num[num_hi_sig - 1];
572 if (num[num_hi_sig] != den[den_hi_sig])
573 quo_est = work / den[den_hi_sig];
574 else
575 quo_est = BASE - 1;
576
577 /* refine quo_est so it's usually correct, and at most one high. */
578 tmp = work - quo_est * den[den_hi_sig];
579 if (tmp < BASE
580 && den[den_hi_sig - 1] * quo_est > (tmp * BASE + num[num_hi_sig - 2]))
581 quo_est--;
582
583 /* Try QUO_EST as the quotient digit, by multiplying the
584 divisor by QUO_EST and subtracting from the remaining dividend.
585 Keep in mind that QUO_EST is the I - 1st digit. */
586
587 carry = 0;
588 for (j = 0; j <= den_hi_sig; j++)
589 {
590 work = quo_est * den[j] + carry;
591 carry = HIGHPART (work);
592 work = num[i + j] - LOWPART (work);
593 num[i + j] = LOWPART (work);
594 carry += HIGHPART (work) != 0;
595 }
596
597 /* if quo_est was high by one, then num[i] went negative and
598 we need to correct things. */
599
600 if (num[num_hi_sig] < carry)
601 {
602 quo_est--;
603 carry = 0; /* add divisor back in */
604 for (j = 0; j <= den_hi_sig; j++)
605 {
606 work = num[i + j] + den[j] + carry;
607 carry = HIGHPART (work);
608 num[i + j] = LOWPART (work);
609 }
610 num [num_hi_sig] += carry;
611 }
612
613 /* store the quotient digit. */
614 quo[i] = quo_est;
615 }
616 }
617
618 decode (quo, lquo, hquo);
619
620 finish_up:
621 /* if result is negative, make it so. */
622 if (quo_neg)
623 neg_double (*lquo, *hquo, lquo, hquo);
624
625 /* compute trial remainder: rem = num - (quo * den) */
626 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
627 neg_double (*lrem, *hrem, lrem, hrem);
628 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
629
630 switch (code)
631 {
632 case TRUNC_DIV_EXPR:
633 case TRUNC_MOD_EXPR: /* round toward zero */
634 case EXACT_DIV_EXPR: /* for this one, it shouldn't matter */
635 return overflow;
636
637 case FLOOR_DIV_EXPR:
638 case FLOOR_MOD_EXPR: /* round toward negative infinity */
639 if (quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio < 0 && rem != 0 */
640 {
641 /* quo = quo - 1; */
642 add_double (*lquo, *hquo, (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1,
643 lquo, hquo);
644 }
645 else return overflow;
646 break;
647
648 case CEIL_DIV_EXPR:
649 case CEIL_MOD_EXPR: /* round toward positive infinity */
650 if (!quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio > 0 && rem != 0 */
651 {
652 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
653 lquo, hquo);
654 }
655 else return overflow;
656 break;
657
658 case ROUND_DIV_EXPR:
659 case ROUND_MOD_EXPR: /* round to closest integer */
660 {
661 HOST_WIDE_INT labs_rem = *lrem, habs_rem = *hrem;
662 HOST_WIDE_INT labs_den = lden, habs_den = hden, ltwice, htwice;
663
664 /* get absolute values */
665 if (*hrem < 0) neg_double (*lrem, *hrem, &labs_rem, &habs_rem);
666 if (hden < 0) neg_double (lden, hden, &labs_den, &habs_den);
667
668 /* if (2 * abs (lrem) >= abs (lden)) */
669 mul_double ((HOST_WIDE_INT) 2, (HOST_WIDE_INT) 0,
670 labs_rem, habs_rem, &ltwice, &htwice);
671 if (((unsigned HOST_WIDE_INT) habs_den
672 < (unsigned HOST_WIDE_INT) htwice)
673 || (((unsigned HOST_WIDE_INT) habs_den
674 == (unsigned HOST_WIDE_INT) htwice)
675 && ((HOST_WIDE_INT unsigned) labs_den
676 < (unsigned HOST_WIDE_INT) ltwice)))
677 {
678 if (*hquo < 0)
679 /* quo = quo - 1; */
680 add_double (*lquo, *hquo,
681 (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1, lquo, hquo);
682 else
683 /* quo = quo + 1; */
684 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
685 lquo, hquo);
686 }
687 else return overflow;
688 }
689 break;
690
691 default:
692 abort ();
693 }
694
695 /* compute true remainder: rem = num - (quo * den) */
696 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
697 neg_double (*lrem, *hrem, lrem, hrem);
698 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
699 return overflow;
700 }
701 \f
702 #ifndef REAL_ARITHMETIC
703 /* Effectively truncate a real value to represent the nearest possible value
704 in a narrower mode. The result is actually represented in the same data
705 type as the argument, but its value is usually different.
706
707 A trap may occur during the FP operations and it is the responsibility
708 of the calling function to have a handler established. */
709
710 REAL_VALUE_TYPE
711 real_value_truncate (mode, arg)
712 enum machine_mode mode;
713 REAL_VALUE_TYPE arg;
714 {
715 return REAL_VALUE_TRUNCATE (mode, arg);
716 }
717
718 #if TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
719
720 /* Check for infinity in an IEEE double precision number. */
721
722 int
723 target_isinf (x)
724 REAL_VALUE_TYPE x;
725 {
726 /* The IEEE 64-bit double format. */
727 union {
728 REAL_VALUE_TYPE d;
729 struct {
730 unsigned sign : 1;
731 unsigned exponent : 11;
732 unsigned mantissa1 : 20;
733 unsigned mantissa2;
734 } little_endian;
735 struct {
736 unsigned mantissa2;
737 unsigned mantissa1 : 20;
738 unsigned exponent : 11;
739 unsigned sign : 1;
740 } big_endian;
741 } u;
742
743 u.d = dconstm1;
744 if (u.big_endian.sign == 1)
745 {
746 u.d = x;
747 return (u.big_endian.exponent == 2047
748 && u.big_endian.mantissa1 == 0
749 && u.big_endian.mantissa2 == 0);
750 }
751 else
752 {
753 u.d = x;
754 return (u.little_endian.exponent == 2047
755 && u.little_endian.mantissa1 == 0
756 && u.little_endian.mantissa2 == 0);
757 }
758 }
759
760 /* Check whether an IEEE double precision number is a NaN. */
761
762 int
763 target_isnan (x)
764 REAL_VALUE_TYPE x;
765 {
766 /* The IEEE 64-bit double format. */
767 union {
768 REAL_VALUE_TYPE d;
769 struct {
770 unsigned sign : 1;
771 unsigned exponent : 11;
772 unsigned mantissa1 : 20;
773 unsigned mantissa2;
774 } little_endian;
775 struct {
776 unsigned mantissa2;
777 unsigned mantissa1 : 20;
778 unsigned exponent : 11;
779 unsigned sign : 1;
780 } big_endian;
781 } u;
782
783 u.d = dconstm1;
784 if (u.big_endian.sign == 1)
785 {
786 u.d = x;
787 return (u.big_endian.exponent == 2047
788 && (u.big_endian.mantissa1 != 0
789 || u.big_endian.mantissa2 != 0));
790 }
791 else
792 {
793 u.d = x;
794 return (u.little_endian.exponent == 2047
795 && (u.little_endian.mantissa1 != 0
796 || u.little_endian.mantissa2 != 0));
797 }
798 }
799
800 /* Check for a negative IEEE double precision number. */
801
802 int
803 target_negative (x)
804 REAL_VALUE_TYPE x;
805 {
806 /* The IEEE 64-bit double format. */
807 union {
808 REAL_VALUE_TYPE d;
809 struct {
810 unsigned sign : 1;
811 unsigned exponent : 11;
812 unsigned mantissa1 : 20;
813 unsigned mantissa2;
814 } little_endian;
815 struct {
816 unsigned mantissa2;
817 unsigned mantissa1 : 20;
818 unsigned exponent : 11;
819 unsigned sign : 1;
820 } big_endian;
821 } u;
822
823 u.d = dconstm1;
824 if (u.big_endian.sign == 1)
825 {
826 u.d = x;
827 return u.big_endian.sign;
828 }
829 else
830 {
831 u.d = x;
832 return u.little_endian.sign;
833 }
834 }
835 #else /* Target not IEEE */
836
837 /* Let's assume other float formats don't have infinity.
838 (This can be overridden by redefining REAL_VALUE_ISINF.) */
839
840 target_isinf (x)
841 REAL_VALUE_TYPE x;
842 {
843 return 0;
844 }
845
846 /* Let's assume other float formats don't have NaNs.
847 (This can be overridden by redefining REAL_VALUE_ISNAN.) */
848
849 target_isnan (x)
850 REAL_VALUE_TYPE x;
851 {
852 return 0;
853 }
854
855 /* Let's assume other float formats don't have minus zero.
856 (This can be overridden by redefining REAL_VALUE_NEGATIVE.) */
857
858 target_negative (x)
859 REAL_VALUE_TYPE x;
860 {
861 return x < 0;
862 }
863 #endif /* Target not IEEE */
864 #endif /* no REAL_ARITHMETIC */
865 \f
866 /* Split a tree IN into a constant and a variable part
867 that could be combined with CODE to make IN.
868 CODE must be a commutative arithmetic operation.
869 Store the constant part into *CONP and the variable in &VARP.
870 Return 1 if this was done; zero means the tree IN did not decompose
871 this way.
872
873 If CODE is PLUS_EXPR we also split trees that use MINUS_EXPR.
874 Therefore, we must tell the caller whether the variable part
875 was subtracted. We do this by storing 1 or -1 into *VARSIGNP.
876 The value stored is the coefficient for the variable term.
877 The constant term we return should always be added;
878 we negate it if necessary. */
879
880 static int
881 split_tree (in, code, varp, conp, varsignp)
882 tree in;
883 enum tree_code code;
884 tree *varp, *conp;
885 int *varsignp;
886 {
887 register tree outtype = TREE_TYPE (in);
888 *varp = 0;
889 *conp = 0;
890
891 /* Strip any conversions that don't change the machine mode. */
892 while ((TREE_CODE (in) == NOP_EXPR
893 || TREE_CODE (in) == CONVERT_EXPR)
894 && (TYPE_MODE (TREE_TYPE (in))
895 == TYPE_MODE (TREE_TYPE (TREE_OPERAND (in, 0)))))
896 in = TREE_OPERAND (in, 0);
897
898 if (TREE_CODE (in) == code
899 || (! FLOAT_TYPE_P (TREE_TYPE (in))
900 /* We can associate addition and subtraction together
901 (even though the C standard doesn't say so)
902 for integers because the value is not affected.
903 For reals, the value might be affected, so we can't. */
904 && ((code == PLUS_EXPR && TREE_CODE (in) == MINUS_EXPR)
905 || (code == MINUS_EXPR && TREE_CODE (in) == PLUS_EXPR))))
906 {
907 enum tree_code code = TREE_CODE (TREE_OPERAND (in, 0));
908 if (code == INTEGER_CST)
909 {
910 *conp = TREE_OPERAND (in, 0);
911 *varp = TREE_OPERAND (in, 1);
912 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
913 && TREE_TYPE (*varp) != outtype)
914 *varp = convert (outtype, *varp);
915 *varsignp = (TREE_CODE (in) == MINUS_EXPR) ? -1 : 1;
916 return 1;
917 }
918 if (TREE_CONSTANT (TREE_OPERAND (in, 1)))
919 {
920 *conp = TREE_OPERAND (in, 1);
921 *varp = TREE_OPERAND (in, 0);
922 *varsignp = 1;
923 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
924 && TREE_TYPE (*varp) != outtype)
925 *varp = convert (outtype, *varp);
926 if (TREE_CODE (in) == MINUS_EXPR)
927 {
928 /* If operation is subtraction and constant is second,
929 must negate it to get an additive constant.
930 And this cannot be done unless it is a manifest constant.
931 It could also be the address of a static variable.
932 We cannot negate that, so give up. */
933 if (TREE_CODE (*conp) == INTEGER_CST)
934 /* Subtracting from integer_zero_node loses for long long. */
935 *conp = fold (build1 (NEGATE_EXPR, TREE_TYPE (*conp), *conp));
936 else
937 return 0;
938 }
939 return 1;
940 }
941 if (TREE_CONSTANT (TREE_OPERAND (in, 0)))
942 {
943 *conp = TREE_OPERAND (in, 0);
944 *varp = TREE_OPERAND (in, 1);
945 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
946 && TREE_TYPE (*varp) != outtype)
947 *varp = convert (outtype, *varp);
948 *varsignp = (TREE_CODE (in) == MINUS_EXPR) ? -1 : 1;
949 return 1;
950 }
951 }
952 return 0;
953 }
954 \f
955 /* Combine two constants NUM and ARG2 under operation CODE
956 to produce a new constant.
957 We assume ARG1 and ARG2 have the same data type,
958 or at least are the same kind of constant and the same machine mode.
959
960 If NOTRUNC is nonzero, do not truncate the result to fit the data type. */
961
962 static tree
963 const_binop (code, arg1, arg2, notrunc)
964 enum tree_code code;
965 register tree arg1, arg2;
966 int notrunc;
967 {
968 if (TREE_CODE (arg1) == INTEGER_CST)
969 {
970 register HOST_WIDE_INT int1l = TREE_INT_CST_LOW (arg1);
971 register HOST_WIDE_INT int1h = TREE_INT_CST_HIGH (arg1);
972 HOST_WIDE_INT int2l = TREE_INT_CST_LOW (arg2);
973 HOST_WIDE_INT int2h = TREE_INT_CST_HIGH (arg2);
974 HOST_WIDE_INT low, hi;
975 HOST_WIDE_INT garbagel, garbageh;
976 register tree t;
977 int uns = TREE_UNSIGNED (TREE_TYPE (arg1));
978 int overflow = 0;
979
980 switch (code)
981 {
982 case BIT_IOR_EXPR:
983 t = build_int_2 (int1l | int2l, int1h | int2h);
984 break;
985
986 case BIT_XOR_EXPR:
987 t = build_int_2 (int1l ^ int2l, int1h ^ int2h);
988 break;
989
990 case BIT_AND_EXPR:
991 t = build_int_2 (int1l & int2l, int1h & int2h);
992 break;
993
994 case BIT_ANDTC_EXPR:
995 t = build_int_2 (int1l & ~int2l, int1h & ~int2h);
996 break;
997
998 case RSHIFT_EXPR:
999 int2l = - int2l;
1000 case LSHIFT_EXPR:
1001 /* It's unclear from the C standard whether shifts can overflow.
1002 The following code ignores overflow; perhaps a C standard
1003 interpretation ruling is needed. */
1004 lshift_double (int1l, int1h, int2l,
1005 TYPE_PRECISION (TREE_TYPE (arg1)),
1006 &low, &hi,
1007 !uns);
1008 t = build_int_2 (low, hi);
1009 TREE_TYPE (t) = TREE_TYPE (arg1);
1010 if (!notrunc)
1011 force_fit_type (t, 0);
1012 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2);
1013 TREE_CONSTANT_OVERFLOW (t)
1014 = TREE_CONSTANT_OVERFLOW (arg1) | TREE_CONSTANT_OVERFLOW (arg2);
1015 return t;
1016
1017 case RROTATE_EXPR:
1018 int2l = - int2l;
1019 case LROTATE_EXPR:
1020 lrotate_double (int1l, int1h, int2l,
1021 TYPE_PRECISION (TREE_TYPE (arg1)),
1022 &low, &hi);
1023 t = build_int_2 (low, hi);
1024 break;
1025
1026 case PLUS_EXPR:
1027 if (int1h == 0)
1028 {
1029 int2l += int1l;
1030 if ((unsigned HOST_WIDE_INT) int2l < int1l)
1031 {
1032 hi = int2h++;
1033 overflow = int2h < hi;
1034 }
1035 t = build_int_2 (int2l, int2h);
1036 break;
1037 }
1038 if (int2h == 0)
1039 {
1040 int1l += int2l;
1041 if ((unsigned HOST_WIDE_INT) int1l < int2l)
1042 {
1043 hi = int1h++;
1044 overflow = int1h < hi;
1045 }
1046 t = build_int_2 (int1l, int1h);
1047 break;
1048 }
1049 overflow = add_double (int1l, int1h, int2l, int2h, &low, &hi);
1050 t = build_int_2 (low, hi);
1051 break;
1052
1053 case MINUS_EXPR:
1054 if (int2h == 0 && int2l == 0)
1055 {
1056 t = build_int_2 (int1l, int1h);
1057 break;
1058 }
1059 neg_double (int2l, int2h, &low, &hi);
1060 add_double (int1l, int1h, low, hi, &low, &hi);
1061 overflow = overflow_sum_sign (hi, int2h, int1h);
1062 t = build_int_2 (low, hi);
1063 break;
1064
1065 case MULT_EXPR:
1066 overflow = mul_double (int1l, int1h, int2l, int2h, &low, &hi);
1067 t = build_int_2 (low, hi);
1068 break;
1069
1070 case TRUNC_DIV_EXPR:
1071 case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR:
1072 case EXACT_DIV_EXPR:
1073 /* This is a shortcut for a common special case.
1074 It reduces the number of tree nodes generated
1075 and saves time. */
1076 if (int2h == 0 && int2l > 0
1077 && TREE_TYPE (arg1) == sizetype
1078 && int1h == 0 && int1l >= 0)
1079 {
1080 if (code == CEIL_DIV_EXPR)
1081 int1l += int2l-1;
1082 return size_int (int1l / int2l);
1083 }
1084 case ROUND_DIV_EXPR:
1085 if (int2h == 0 && int2l == 1)
1086 {
1087 t = build_int_2 (int1l, int1h);
1088 break;
1089 }
1090 if (int1l == int2l && int1h == int2h)
1091 {
1092 if ((int1l | int1h) == 0)
1093 abort ();
1094 t = build_int_2 (1, 0);
1095 break;
1096 }
1097 overflow = div_and_round_double (code, uns,
1098 int1l, int1h, int2l, int2h,
1099 &low, &hi, &garbagel, &garbageh);
1100 t = build_int_2 (low, hi);
1101 break;
1102
1103 case TRUNC_MOD_EXPR: case ROUND_MOD_EXPR:
1104 case FLOOR_MOD_EXPR: case CEIL_MOD_EXPR:
1105 overflow = div_and_round_double (code, uns,
1106 int1l, int1h, int2l, int2h,
1107 &garbagel, &garbageh, &low, &hi);
1108 t = build_int_2 (low, hi);
1109 break;
1110
1111 case MIN_EXPR:
1112 case MAX_EXPR:
1113 if (uns)
1114 {
1115 low = (((unsigned HOST_WIDE_INT) int1h
1116 < (unsigned HOST_WIDE_INT) int2h)
1117 || (((unsigned HOST_WIDE_INT) int1h
1118 == (unsigned HOST_WIDE_INT) int2h)
1119 && ((unsigned HOST_WIDE_INT) int1l
1120 < (unsigned HOST_WIDE_INT) int2l)));
1121 }
1122 else
1123 {
1124 low = ((int1h < int2h)
1125 || ((int1h == int2h)
1126 && ((unsigned HOST_WIDE_INT) int1l
1127 < (unsigned HOST_WIDE_INT) int2l)));
1128 }
1129 if (low == (code == MIN_EXPR))
1130 t = build_int_2 (int1l, int1h);
1131 else
1132 t = build_int_2 (int2l, int2h);
1133 break;
1134
1135 default:
1136 abort ();
1137 }
1138 got_it:
1139 TREE_TYPE (t) = TREE_TYPE (arg1);
1140 TREE_OVERFLOW (t)
1141 = ((notrunc ? !uns && overflow : force_fit_type (t, overflow && !uns))
1142 | TREE_OVERFLOW (arg1)
1143 | TREE_OVERFLOW (arg2));
1144 TREE_CONSTANT_OVERFLOW (t) = (TREE_OVERFLOW (t)
1145 | TREE_CONSTANT_OVERFLOW (arg1)
1146 | TREE_CONSTANT_OVERFLOW (arg2));
1147 return t;
1148 }
1149 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1150 if (TREE_CODE (arg1) == REAL_CST)
1151 {
1152 REAL_VALUE_TYPE d1;
1153 REAL_VALUE_TYPE d2;
1154 int overflow = 0;
1155 REAL_VALUE_TYPE value;
1156 tree t;
1157
1158 d1 = TREE_REAL_CST (arg1);
1159 d2 = TREE_REAL_CST (arg2);
1160
1161 /* If either operand is a NaN, just return it. Otherwise, set up
1162 for floating-point trap; we return an overflow. */
1163 if (REAL_VALUE_ISNAN (d1))
1164 return arg1;
1165 else if (REAL_VALUE_ISNAN (d2))
1166 return arg2;
1167 else if (setjmp (float_error))
1168 {
1169 t = copy_node (arg1);
1170 overflow = 1;
1171 goto got_float;
1172 }
1173
1174 set_float_handler (float_error);
1175
1176 #ifdef REAL_ARITHMETIC
1177 REAL_ARITHMETIC (value, code, d1, d2);
1178 #else
1179 switch (code)
1180 {
1181 case PLUS_EXPR:
1182 value = d1 + d2;
1183 break;
1184
1185 case MINUS_EXPR:
1186 value = d1 - d2;
1187 break;
1188
1189 case MULT_EXPR:
1190 value = d1 * d2;
1191 break;
1192
1193 case RDIV_EXPR:
1194 #ifndef REAL_INFINITY
1195 if (d2 == 0)
1196 abort ();
1197 #endif
1198
1199 value = d1 / d2;
1200 break;
1201
1202 case MIN_EXPR:
1203 value = MIN (d1, d2);
1204 break;
1205
1206 case MAX_EXPR:
1207 value = MAX (d1, d2);
1208 break;
1209
1210 default:
1211 abort ();
1212 }
1213 #endif /* no REAL_ARITHMETIC */
1214 t = build_real (TREE_TYPE (arg1),
1215 real_value_truncate (TYPE_MODE (TREE_TYPE (arg1)), value));
1216 got_float:
1217 set_float_handler (NULL_PTR);
1218
1219 TREE_OVERFLOW (t)
1220 = (force_fit_type (t, overflow)
1221 | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2));
1222 TREE_CONSTANT_OVERFLOW (t)
1223 = TREE_OVERFLOW (t)
1224 | TREE_CONSTANT_OVERFLOW (arg1)
1225 | TREE_CONSTANT_OVERFLOW (arg2);
1226 return t;
1227 }
1228 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1229 if (TREE_CODE (arg1) == COMPLEX_CST)
1230 {
1231 register tree r1 = TREE_REALPART (arg1);
1232 register tree i1 = TREE_IMAGPART (arg1);
1233 register tree r2 = TREE_REALPART (arg2);
1234 register tree i2 = TREE_IMAGPART (arg2);
1235 register tree t;
1236
1237 switch (code)
1238 {
1239 case PLUS_EXPR:
1240 t = build_complex (const_binop (PLUS_EXPR, r1, r2, notrunc),
1241 const_binop (PLUS_EXPR, i1, i2, notrunc));
1242 break;
1243
1244 case MINUS_EXPR:
1245 t = build_complex (const_binop (MINUS_EXPR, r1, r2, notrunc),
1246 const_binop (MINUS_EXPR, i1, i2, notrunc));
1247 break;
1248
1249 case MULT_EXPR:
1250 t = build_complex (const_binop (MINUS_EXPR,
1251 const_binop (MULT_EXPR,
1252 r1, r2, notrunc),
1253 const_binop (MULT_EXPR,
1254 i1, i2, notrunc),
1255 notrunc),
1256 const_binop (PLUS_EXPR,
1257 const_binop (MULT_EXPR,
1258 r1, i2, notrunc),
1259 const_binop (MULT_EXPR,
1260 i1, r2, notrunc),
1261 notrunc));
1262 break;
1263
1264 case RDIV_EXPR:
1265 {
1266 register tree magsquared
1267 = const_binop (PLUS_EXPR,
1268 const_binop (MULT_EXPR, r2, r2, notrunc),
1269 const_binop (MULT_EXPR, i2, i2, notrunc),
1270 notrunc);
1271
1272 t = build_complex
1273 (const_binop (INTEGRAL_TYPE_P (TREE_TYPE (r1))
1274 ? TRUNC_DIV_EXPR : RDIV_EXPR,
1275 const_binop (PLUS_EXPR,
1276 const_binop (MULT_EXPR, r1, r2,
1277 notrunc),
1278 const_binop (MULT_EXPR, i1, i2,
1279 notrunc),
1280 notrunc),
1281 magsquared, notrunc),
1282 const_binop (INTEGRAL_TYPE_P (TREE_TYPE (r1))
1283 ? TRUNC_DIV_EXPR : RDIV_EXPR,
1284 const_binop (MINUS_EXPR,
1285 const_binop (MULT_EXPR, i1, r2,
1286 notrunc),
1287 const_binop (MULT_EXPR, r1, i2,
1288 notrunc),
1289 notrunc),
1290 magsquared, notrunc));
1291 }
1292 break;
1293
1294 default:
1295 abort ();
1296 }
1297 TREE_TYPE (t) = TREE_TYPE (arg1);
1298 return t;
1299 }
1300 return 0;
1301 }
1302 \f
1303 /* Return an INTEGER_CST with value V and type from `sizetype'. */
1304
1305 tree
1306 size_int (number)
1307 unsigned HOST_WIDE_INT number;
1308 {
1309 register tree t;
1310 /* Type-size nodes already made for small sizes. */
1311 static tree size_table[2*HOST_BITS_PER_WIDE_INT + 1];
1312
1313 if (number < 2*HOST_BITS_PER_WIDE_INT + 1
1314 && size_table[number] != 0)
1315 return size_table[number];
1316 if (number < 2*HOST_BITS_PER_WIDE_INT + 1)
1317 {
1318 push_obstacks_nochange ();
1319 /* Make this a permanent node. */
1320 end_temporary_allocation ();
1321 t = build_int_2 (number, 0);
1322 TREE_TYPE (t) = sizetype;
1323 size_table[number] = t;
1324 pop_obstacks ();
1325 }
1326 else
1327 {
1328 t = build_int_2 (number, 0);
1329 TREE_TYPE (t) = sizetype;
1330 }
1331 return t;
1332 }
1333
1334 /* Combine operands OP1 and OP2 with arithmetic operation CODE.
1335 CODE is a tree code. Data type is taken from `sizetype',
1336 If the operands are constant, so is the result. */
1337
1338 tree
1339 size_binop (code, arg0, arg1)
1340 enum tree_code code;
1341 tree arg0, arg1;
1342 {
1343 /* Handle the special case of two integer constants faster. */
1344 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
1345 {
1346 /* And some specific cases even faster than that. */
1347 if (code == PLUS_EXPR
1348 && TREE_INT_CST_LOW (arg0) == 0
1349 && TREE_INT_CST_HIGH (arg0) == 0)
1350 return arg1;
1351 if (code == MINUS_EXPR
1352 && TREE_INT_CST_LOW (arg1) == 0
1353 && TREE_INT_CST_HIGH (arg1) == 0)
1354 return arg0;
1355 if (code == MULT_EXPR
1356 && TREE_INT_CST_LOW (arg0) == 1
1357 && TREE_INT_CST_HIGH (arg0) == 0)
1358 return arg1;
1359 /* Handle general case of two integer constants. */
1360 return const_binop (code, arg0, arg1, 1);
1361 }
1362
1363 if (arg0 == error_mark_node || arg1 == error_mark_node)
1364 return error_mark_node;
1365
1366 return fold (build (code, sizetype, arg0, arg1));
1367 }
1368 \f
1369 /* Given T, a tree representing type conversion of ARG1, a constant,
1370 return a constant tree representing the result of conversion. */
1371
1372 static tree
1373 fold_convert (t, arg1)
1374 register tree t;
1375 register tree arg1;
1376 {
1377 register tree type = TREE_TYPE (t);
1378 int overflow = 0;
1379
1380 if (TREE_CODE (type) == POINTER_TYPE || INTEGRAL_TYPE_P (type))
1381 {
1382 if (TREE_CODE (arg1) == INTEGER_CST)
1383 {
1384 /* If we would build a constant wider than GCC supports,
1385 leave the conversion unfolded. */
1386 if (TYPE_PRECISION (type) > 2 * HOST_BITS_PER_WIDE_INT)
1387 return t;
1388
1389 /* Given an integer constant, make new constant with new type,
1390 appropriately sign-extended or truncated. */
1391 t = build_int_2 (TREE_INT_CST_LOW (arg1),
1392 TREE_INT_CST_HIGH (arg1));
1393 TREE_TYPE (t) = type;
1394 /* Indicate an overflow if (1) ARG1 already overflowed,
1395 or (2) force_fit_type indicates an overflow.
1396 Tell force_fit_type that an overflow has already occurred
1397 if ARG1 is a too-large unsigned value and T is signed. */
1398 TREE_OVERFLOW (t)
1399 = (TREE_OVERFLOW (arg1)
1400 | force_fit_type (t,
1401 (TREE_INT_CST_HIGH (arg1) < 0
1402 & (TREE_UNSIGNED (type)
1403 < TREE_UNSIGNED (TREE_TYPE (arg1))))));
1404 TREE_CONSTANT_OVERFLOW (t)
1405 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
1406 }
1407 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1408 else if (TREE_CODE (arg1) == REAL_CST)
1409 {
1410 /* Don't initialize these, use assignments.
1411 Initialized local aggregates don't work on old compilers. */
1412 REAL_VALUE_TYPE x;
1413 REAL_VALUE_TYPE l;
1414 REAL_VALUE_TYPE u;
1415
1416 x = TREE_REAL_CST (arg1);
1417 l = real_value_from_int_cst (TYPE_MIN_VALUE (type));
1418 u = real_value_from_int_cst (TYPE_MAX_VALUE (type));
1419 /* See if X will be in range after truncation towards 0.
1420 To compensate for truncation, move the bounds away from 0,
1421 but reject if X exactly equals the adjusted bounds. */
1422 #ifdef REAL_ARITHMETIC
1423 REAL_ARITHMETIC (l, MINUS_EXPR, l, dconst1);
1424 REAL_ARITHMETIC (u, PLUS_EXPR, u, dconst1);
1425 #else
1426 l--;
1427 u++;
1428 #endif
1429 /* If X is a NaN, use zero instead and show we have an overflow.
1430 Otherwise, range check. */
1431 if (REAL_VALUE_ISNAN (x))
1432 overflow = 1, x = dconst0;
1433 else if (! (REAL_VALUES_LESS (l, x) && REAL_VALUES_LESS (x, u)))
1434 overflow = 1;
1435
1436 #ifndef REAL_ARITHMETIC
1437 {
1438 HOST_WIDE_INT low, high;
1439 HOST_WIDE_INT half_word
1440 = (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2);
1441
1442 if (x < 0)
1443 x = -x;
1444
1445 high = (HOST_WIDE_INT) (x / half_word / half_word);
1446 x -= (REAL_VALUE_TYPE) high * half_word * half_word;
1447 if (x >= (REAL_VALUE_TYPE) half_word * half_word / 2)
1448 {
1449 low = x - (REAL_VALUE_TYPE) half_word * half_word / 2;
1450 low |= (HOST_WIDE_INT) -1 << (HOST_BITS_PER_WIDE_INT - 1);
1451 }
1452 else
1453 low = (HOST_WIDE_INT) x;
1454 if (TREE_REAL_CST (arg1) < 0)
1455 neg_double (low, high, &low, &high);
1456 t = build_int_2 (low, high);
1457 }
1458 #else
1459 {
1460 HOST_WIDE_INT low, high;
1461 REAL_VALUE_TO_INT (&low, &high, x);
1462 t = build_int_2 (low, high);
1463 }
1464 #endif
1465 TREE_TYPE (t) = type;
1466 TREE_OVERFLOW (t)
1467 = TREE_OVERFLOW (arg1) | force_fit_type (t, overflow);
1468 TREE_CONSTANT_OVERFLOW (t)
1469 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
1470 }
1471 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1472 TREE_TYPE (t) = type;
1473 }
1474 else if (TREE_CODE (type) == REAL_TYPE)
1475 {
1476 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1477 if (TREE_CODE (arg1) == INTEGER_CST)
1478 return build_real_from_int_cst (type, arg1);
1479 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1480 if (TREE_CODE (arg1) == REAL_CST)
1481 {
1482 if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg1)))
1483 return arg1;
1484 else if (setjmp (float_error))
1485 {
1486 overflow = 1;
1487 t = copy_node (arg1);
1488 goto got_it;
1489 }
1490 set_float_handler (float_error);
1491
1492 t = build_real (type, real_value_truncate (TYPE_MODE (type),
1493 TREE_REAL_CST (arg1)));
1494 set_float_handler (NULL_PTR);
1495
1496 got_it:
1497 TREE_OVERFLOW (t)
1498 = TREE_OVERFLOW (arg1) | force_fit_type (t, overflow);
1499 TREE_CONSTANT_OVERFLOW (t)
1500 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
1501 return t;
1502 }
1503 }
1504 TREE_CONSTANT (t) = 1;
1505 return t;
1506 }
1507 \f
1508 /* Return an expr equal to X but certainly not valid as an lvalue.
1509 Also make sure it is not valid as an null pointer constant. */
1510
1511 tree
1512 non_lvalue (x)
1513 tree x;
1514 {
1515 tree result;
1516
1517 /* These things are certainly not lvalues. */
1518 if (TREE_CODE (x) == NON_LVALUE_EXPR
1519 || TREE_CODE (x) == INTEGER_CST
1520 || TREE_CODE (x) == REAL_CST
1521 || TREE_CODE (x) == STRING_CST
1522 || TREE_CODE (x) == ADDR_EXPR)
1523 {
1524 if (TREE_CODE (x) == INTEGER_CST && integer_zerop (x))
1525 {
1526 /* Use NOP_EXPR instead of NON_LVALUE_EXPR
1527 so convert_for_assignment won't strip it.
1528 This is so this 0 won't be treated as a null pointer constant. */
1529 result = build1 (NOP_EXPR, TREE_TYPE (x), x);
1530 TREE_CONSTANT (result) = TREE_CONSTANT (x);
1531 return result;
1532 }
1533 return x;
1534 }
1535
1536 result = build1 (NON_LVALUE_EXPR, TREE_TYPE (x), x);
1537 TREE_CONSTANT (result) = TREE_CONSTANT (x);
1538 return result;
1539 }
1540
1541 /* Nonzero means lvalues are limited to those valid in pedantic ANSI C.
1542 Zero means allow extended lvalues. */
1543
1544 int pedantic_lvalues;
1545
1546 /* When pedantic, return an expr equal to X but certainly not valid as a
1547 pedantic lvalue. Otherwise, return X. */
1548
1549 tree
1550 pedantic_non_lvalue (x)
1551 tree x;
1552 {
1553 if (pedantic_lvalues)
1554 return non_lvalue (x);
1555 else
1556 return x;
1557 }
1558 \f
1559 /* Given a tree comparison code, return the code that is the logical inverse
1560 of the given code. It is not safe to do this for floating-point
1561 comparisons, except for NE_EXPR and EQ_EXPR. */
1562
1563 static enum tree_code
1564 invert_tree_comparison (code)
1565 enum tree_code code;
1566 {
1567 switch (code)
1568 {
1569 case EQ_EXPR:
1570 return NE_EXPR;
1571 case NE_EXPR:
1572 return EQ_EXPR;
1573 case GT_EXPR:
1574 return LE_EXPR;
1575 case GE_EXPR:
1576 return LT_EXPR;
1577 case LT_EXPR:
1578 return GE_EXPR;
1579 case LE_EXPR:
1580 return GT_EXPR;
1581 default:
1582 abort ();
1583 }
1584 }
1585
1586 /* Similar, but return the comparison that results if the operands are
1587 swapped. This is safe for floating-point. */
1588
1589 static enum tree_code
1590 swap_tree_comparison (code)
1591 enum tree_code code;
1592 {
1593 switch (code)
1594 {
1595 case EQ_EXPR:
1596 case NE_EXPR:
1597 return code;
1598 case GT_EXPR:
1599 return LT_EXPR;
1600 case GE_EXPR:
1601 return LE_EXPR;
1602 case LT_EXPR:
1603 return GT_EXPR;
1604 case LE_EXPR:
1605 return GE_EXPR;
1606 default:
1607 abort ();
1608 }
1609 }
1610
1611 /* Return nonzero if CODE is a tree code that represents a truth value. */
1612
1613 static int
1614 truth_value_p (code)
1615 enum tree_code code;
1616 {
1617 return (TREE_CODE_CLASS (code) == '<'
1618 || code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR
1619 || code == TRUTH_OR_EXPR || code == TRUTH_ORIF_EXPR
1620 || code == TRUTH_XOR_EXPR || code == TRUTH_NOT_EXPR);
1621 }
1622 \f
1623 /* Return nonzero if two operands are necessarily equal.
1624 If ONLY_CONST is non-zero, only return non-zero for constants.
1625 This function tests whether the operands are indistinguishable;
1626 it does not test whether they are equal using C's == operation.
1627 The distinction is important for IEEE floating point, because
1628 (1) -0.0 and 0.0 are distinguishable, but -0.0==0.0, and
1629 (2) two NaNs may be indistinguishable, but NaN!=NaN. */
1630
1631 int
1632 operand_equal_p (arg0, arg1, only_const)
1633 tree arg0, arg1;
1634 int only_const;
1635 {
1636 /* If both types don't have the same signedness, then we can't consider
1637 them equal. We must check this before the STRIP_NOPS calls
1638 because they may change the signedness of the arguments. */
1639 if (TREE_UNSIGNED (TREE_TYPE (arg0)) != TREE_UNSIGNED (TREE_TYPE (arg1)))
1640 return 0;
1641
1642 STRIP_NOPS (arg0);
1643 STRIP_NOPS (arg1);
1644
1645 /* If ARG0 and ARG1 are the same SAVE_EXPR, they are necessarily equal.
1646 We don't care about side effects in that case because the SAVE_EXPR
1647 takes care of that for us. */
1648 if (TREE_CODE (arg0) == SAVE_EXPR && arg0 == arg1)
1649 return ! only_const;
1650
1651 if (TREE_SIDE_EFFECTS (arg0) || TREE_SIDE_EFFECTS (arg1))
1652 return 0;
1653
1654 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1655 && TREE_CODE (arg0) == ADDR_EXPR
1656 && TREE_OPERAND (arg0, 0) == TREE_OPERAND (arg1, 0))
1657 return 1;
1658
1659 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1660 && TREE_CODE (arg0) == INTEGER_CST
1661 && TREE_INT_CST_LOW (arg0) == TREE_INT_CST_LOW (arg1)
1662 && TREE_INT_CST_HIGH (arg0) == TREE_INT_CST_HIGH (arg1))
1663 return 1;
1664
1665 /* Detect when real constants are equal. */
1666 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1667 && TREE_CODE (arg0) == REAL_CST)
1668 return !bcmp ((char *) &TREE_REAL_CST (arg0),
1669 (char *) &TREE_REAL_CST (arg1),
1670 sizeof (REAL_VALUE_TYPE));
1671
1672 if (only_const)
1673 return 0;
1674
1675 if (arg0 == arg1)
1676 return 1;
1677
1678 if (TREE_CODE (arg0) != TREE_CODE (arg1))
1679 return 0;
1680 /* This is needed for conversions and for COMPONENT_REF.
1681 Might as well play it safe and always test this. */
1682 if (TYPE_MODE (TREE_TYPE (arg0)) != TYPE_MODE (TREE_TYPE (arg1)))
1683 return 0;
1684
1685 switch (TREE_CODE_CLASS (TREE_CODE (arg0)))
1686 {
1687 case '1':
1688 /* Two conversions are equal only if signedness and modes match. */
1689 if ((TREE_CODE (arg0) == NOP_EXPR || TREE_CODE (arg0) == CONVERT_EXPR)
1690 && (TREE_UNSIGNED (TREE_TYPE (arg0))
1691 != TREE_UNSIGNED (TREE_TYPE (arg1))))
1692 return 0;
1693
1694 return operand_equal_p (TREE_OPERAND (arg0, 0),
1695 TREE_OPERAND (arg1, 0), 0);
1696
1697 case '<':
1698 case '2':
1699 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1700 TREE_OPERAND (arg1, 0), 0)
1701 && operand_equal_p (TREE_OPERAND (arg0, 1),
1702 TREE_OPERAND (arg1, 1), 0));
1703
1704 case 'r':
1705 switch (TREE_CODE (arg0))
1706 {
1707 case INDIRECT_REF:
1708 return operand_equal_p (TREE_OPERAND (arg0, 0),
1709 TREE_OPERAND (arg1, 0), 0);
1710
1711 case COMPONENT_REF:
1712 case ARRAY_REF:
1713 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1714 TREE_OPERAND (arg1, 0), 0)
1715 && operand_equal_p (TREE_OPERAND (arg0, 1),
1716 TREE_OPERAND (arg1, 1), 0));
1717
1718 case BIT_FIELD_REF:
1719 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1720 TREE_OPERAND (arg1, 0), 0)
1721 && operand_equal_p (TREE_OPERAND (arg0, 1),
1722 TREE_OPERAND (arg1, 1), 0)
1723 && operand_equal_p (TREE_OPERAND (arg0, 2),
1724 TREE_OPERAND (arg1, 2), 0));
1725 }
1726 break;
1727 }
1728
1729 return 0;
1730 }
1731 \f
1732 /* Similar to operand_equal_p, but see if ARG0 might have been made by
1733 shorten_compare from ARG1 when ARG1 was being compared with OTHER.
1734
1735 When in doubt, return 0. */
1736
1737 static int
1738 operand_equal_for_comparison_p (arg0, arg1, other)
1739 tree arg0, arg1;
1740 tree other;
1741 {
1742 int unsignedp1, unsignedpo;
1743 tree primarg1, primother;
1744 unsigned correct_width;
1745
1746 if (operand_equal_p (arg0, arg1, 0))
1747 return 1;
1748
1749 if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0))
1750 || ! INTEGRAL_TYPE_P (TREE_TYPE (arg1)))
1751 return 0;
1752
1753 /* Duplicate what shorten_compare does to ARG1 and see if that gives the
1754 actual comparison operand, ARG0.
1755
1756 First throw away any conversions to wider types
1757 already present in the operands. */
1758
1759 primarg1 = get_narrower (arg1, &unsignedp1);
1760 primother = get_narrower (other, &unsignedpo);
1761
1762 correct_width = TYPE_PRECISION (TREE_TYPE (arg1));
1763 if (unsignedp1 == unsignedpo
1764 && TYPE_PRECISION (TREE_TYPE (primarg1)) < correct_width
1765 && TYPE_PRECISION (TREE_TYPE (primother)) < correct_width)
1766 {
1767 tree type = TREE_TYPE (arg0);
1768
1769 /* Make sure shorter operand is extended the right way
1770 to match the longer operand. */
1771 primarg1 = convert (signed_or_unsigned_type (unsignedp1,
1772 TREE_TYPE (primarg1)),
1773 primarg1);
1774
1775 if (operand_equal_p (arg0, convert (type, primarg1), 0))
1776 return 1;
1777 }
1778
1779 return 0;
1780 }
1781 \f
1782 /* See if ARG is an expression that is either a comparison or is performing
1783 arithmetic on comparisons. The comparisons must only be comparing
1784 two different values, which will be stored in *CVAL1 and *CVAL2; if
1785 they are non-zero it means that some operands have already been found.
1786 No variables may be used anywhere else in the expression except in the
1787 comparisons. If SAVE_P is true it means we removed a SAVE_EXPR around
1788 the expression and save_expr needs to be called with CVAL1 and CVAL2.
1789
1790 If this is true, return 1. Otherwise, return zero. */
1791
1792 static int
1793 twoval_comparison_p (arg, cval1, cval2, save_p)
1794 tree arg;
1795 tree *cval1, *cval2;
1796 int *save_p;
1797 {
1798 enum tree_code code = TREE_CODE (arg);
1799 char class = TREE_CODE_CLASS (code);
1800
1801 /* We can handle some of the 'e' cases here. */
1802 if (class == 'e' && code == TRUTH_NOT_EXPR)
1803 class = '1';
1804 else if (class == 'e'
1805 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR
1806 || code == COMPOUND_EXPR))
1807 class = '2';
1808
1809 /* ??? Disable this since the SAVE_EXPR might already be in use outside
1810 the expression. There may be no way to make this work, but it needs
1811 to be looked at again for 2.6. */
1812 #if 0
1813 else if (class == 'e' && code == SAVE_EXPR && SAVE_EXPR_RTL (arg) == 0)
1814 {
1815 /* If we've already found a CVAL1 or CVAL2, this expression is
1816 two complex to handle. */
1817 if (*cval1 || *cval2)
1818 return 0;
1819
1820 class = '1';
1821 *save_p = 1;
1822 }
1823 #endif
1824
1825 switch (class)
1826 {
1827 case '1':
1828 return twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p);
1829
1830 case '2':
1831 return (twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p)
1832 && twoval_comparison_p (TREE_OPERAND (arg, 1),
1833 cval1, cval2, save_p));
1834
1835 case 'c':
1836 return 1;
1837
1838 case 'e':
1839 if (code == COND_EXPR)
1840 return (twoval_comparison_p (TREE_OPERAND (arg, 0),
1841 cval1, cval2, save_p)
1842 && twoval_comparison_p (TREE_OPERAND (arg, 1),
1843 cval1, cval2, save_p)
1844 && twoval_comparison_p (TREE_OPERAND (arg, 2),
1845 cval1, cval2, save_p));
1846 return 0;
1847
1848 case '<':
1849 /* First see if we can handle the first operand, then the second. For
1850 the second operand, we know *CVAL1 can't be zero. It must be that
1851 one side of the comparison is each of the values; test for the
1852 case where this isn't true by failing if the two operands
1853 are the same. */
1854
1855 if (operand_equal_p (TREE_OPERAND (arg, 0),
1856 TREE_OPERAND (arg, 1), 0))
1857 return 0;
1858
1859 if (*cval1 == 0)
1860 *cval1 = TREE_OPERAND (arg, 0);
1861 else if (operand_equal_p (*cval1, TREE_OPERAND (arg, 0), 0))
1862 ;
1863 else if (*cval2 == 0)
1864 *cval2 = TREE_OPERAND (arg, 0);
1865 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 0), 0))
1866 ;
1867 else
1868 return 0;
1869
1870 if (operand_equal_p (*cval1, TREE_OPERAND (arg, 1), 0))
1871 ;
1872 else if (*cval2 == 0)
1873 *cval2 = TREE_OPERAND (arg, 1);
1874 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 1), 0))
1875 ;
1876 else
1877 return 0;
1878
1879 return 1;
1880 }
1881
1882 return 0;
1883 }
1884 \f
1885 /* ARG is a tree that is known to contain just arithmetic operations and
1886 comparisons. Evaluate the operations in the tree substituting NEW0 for
1887 any occurrence of OLD0 as an operand of a comparison and likewise for
1888 NEW1 and OLD1. */
1889
1890 static tree
1891 eval_subst (arg, old0, new0, old1, new1)
1892 tree arg;
1893 tree old0, new0, old1, new1;
1894 {
1895 tree type = TREE_TYPE (arg);
1896 enum tree_code code = TREE_CODE (arg);
1897 char class = TREE_CODE_CLASS (code);
1898
1899 /* We can handle some of the 'e' cases here. */
1900 if (class == 'e' && code == TRUTH_NOT_EXPR)
1901 class = '1';
1902 else if (class == 'e'
1903 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR))
1904 class = '2';
1905
1906 switch (class)
1907 {
1908 case '1':
1909 return fold (build1 (code, type,
1910 eval_subst (TREE_OPERAND (arg, 0),
1911 old0, new0, old1, new1)));
1912
1913 case '2':
1914 return fold (build (code, type,
1915 eval_subst (TREE_OPERAND (arg, 0),
1916 old0, new0, old1, new1),
1917 eval_subst (TREE_OPERAND (arg, 1),
1918 old0, new0, old1, new1)));
1919
1920 case 'e':
1921 switch (code)
1922 {
1923 case SAVE_EXPR:
1924 return eval_subst (TREE_OPERAND (arg, 0), old0, new0, old1, new1);
1925
1926 case COMPOUND_EXPR:
1927 return eval_subst (TREE_OPERAND (arg, 1), old0, new0, old1, new1);
1928
1929 case COND_EXPR:
1930 return fold (build (code, type,
1931 eval_subst (TREE_OPERAND (arg, 0),
1932 old0, new0, old1, new1),
1933 eval_subst (TREE_OPERAND (arg, 1),
1934 old0, new0, old1, new1),
1935 eval_subst (TREE_OPERAND (arg, 2),
1936 old0, new0, old1, new1)));
1937 }
1938
1939 case '<':
1940 {
1941 tree arg0 = TREE_OPERAND (arg, 0);
1942 tree arg1 = TREE_OPERAND (arg, 1);
1943
1944 /* We need to check both for exact equality and tree equality. The
1945 former will be true if the operand has a side-effect. In that
1946 case, we know the operand occurred exactly once. */
1947
1948 if (arg0 == old0 || operand_equal_p (arg0, old0, 0))
1949 arg0 = new0;
1950 else if (arg0 == old1 || operand_equal_p (arg0, old1, 0))
1951 arg0 = new1;
1952
1953 if (arg1 == old0 || operand_equal_p (arg1, old0, 0))
1954 arg1 = new0;
1955 else if (arg1 == old1 || operand_equal_p (arg1, old1, 0))
1956 arg1 = new1;
1957
1958 return fold (build (code, type, arg0, arg1));
1959 }
1960 }
1961
1962 return arg;
1963 }
1964 \f
1965 /* Return a tree for the case when the result of an expression is RESULT
1966 converted to TYPE and OMITTED was previously an operand of the expression
1967 but is now not needed (e.g., we folded OMITTED * 0).
1968
1969 If OMITTED has side effects, we must evaluate it. Otherwise, just do
1970 the conversion of RESULT to TYPE. */
1971
1972 static tree
1973 omit_one_operand (type, result, omitted)
1974 tree type, result, omitted;
1975 {
1976 tree t = convert (type, result);
1977
1978 if (TREE_SIDE_EFFECTS (omitted))
1979 return build (COMPOUND_EXPR, type, omitted, t);
1980
1981 return non_lvalue (t);
1982 }
1983
1984 /* Similar, but call pedantic_non_lvalue instead of non_lvalue. */
1985
1986 static tree
1987 pedantic_omit_one_operand (type, result, omitted)
1988 tree type, result, omitted;
1989 {
1990 tree t = convert (type, result);
1991
1992 if (TREE_SIDE_EFFECTS (omitted))
1993 return build (COMPOUND_EXPR, type, omitted, t);
1994
1995 return pedantic_non_lvalue (t);
1996 }
1997
1998
1999 \f
2000 /* Return a simplified tree node for the truth-negation of ARG. This
2001 never alters ARG itself. We assume that ARG is an operation that
2002 returns a truth value (0 or 1). */
2003
2004 tree
2005 invert_truthvalue (arg)
2006 tree arg;
2007 {
2008 tree type = TREE_TYPE (arg);
2009 enum tree_code code = TREE_CODE (arg);
2010
2011 if (code == ERROR_MARK)
2012 return arg;
2013
2014 /* If this is a comparison, we can simply invert it, except for
2015 floating-point non-equality comparisons, in which case we just
2016 enclose a TRUTH_NOT_EXPR around what we have. */
2017
2018 if (TREE_CODE_CLASS (code) == '<')
2019 {
2020 if (FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
2021 && code != NE_EXPR && code != EQ_EXPR)
2022 return build1 (TRUTH_NOT_EXPR, type, arg);
2023 else
2024 return build (invert_tree_comparison (code), type,
2025 TREE_OPERAND (arg, 0), TREE_OPERAND (arg, 1));
2026 }
2027
2028 switch (code)
2029 {
2030 case INTEGER_CST:
2031 return convert (type, build_int_2 (TREE_INT_CST_LOW (arg) == 0
2032 && TREE_INT_CST_HIGH (arg) == 0, 0));
2033
2034 case TRUTH_AND_EXPR:
2035 return build (TRUTH_OR_EXPR, type,
2036 invert_truthvalue (TREE_OPERAND (arg, 0)),
2037 invert_truthvalue (TREE_OPERAND (arg, 1)));
2038
2039 case TRUTH_OR_EXPR:
2040 return build (TRUTH_AND_EXPR, type,
2041 invert_truthvalue (TREE_OPERAND (arg, 0)),
2042 invert_truthvalue (TREE_OPERAND (arg, 1)));
2043
2044 case TRUTH_XOR_EXPR:
2045 /* Here we can invert either operand. We invert the first operand
2046 unless the second operand is a TRUTH_NOT_EXPR in which case our
2047 result is the XOR of the first operand with the inside of the
2048 negation of the second operand. */
2049
2050 if (TREE_CODE (TREE_OPERAND (arg, 1)) == TRUTH_NOT_EXPR)
2051 return build (TRUTH_XOR_EXPR, type, TREE_OPERAND (arg, 0),
2052 TREE_OPERAND (TREE_OPERAND (arg, 1), 0));
2053 else
2054 return build (TRUTH_XOR_EXPR, type,
2055 invert_truthvalue (TREE_OPERAND (arg, 0)),
2056 TREE_OPERAND (arg, 1));
2057
2058 case TRUTH_ANDIF_EXPR:
2059 return build (TRUTH_ORIF_EXPR, type,
2060 invert_truthvalue (TREE_OPERAND (arg, 0)),
2061 invert_truthvalue (TREE_OPERAND (arg, 1)));
2062
2063 case TRUTH_ORIF_EXPR:
2064 return build (TRUTH_ANDIF_EXPR, type,
2065 invert_truthvalue (TREE_OPERAND (arg, 0)),
2066 invert_truthvalue (TREE_OPERAND (arg, 1)));
2067
2068 case TRUTH_NOT_EXPR:
2069 return TREE_OPERAND (arg, 0);
2070
2071 case COND_EXPR:
2072 return build (COND_EXPR, type, TREE_OPERAND (arg, 0),
2073 invert_truthvalue (TREE_OPERAND (arg, 1)),
2074 invert_truthvalue (TREE_OPERAND (arg, 2)));
2075
2076 case COMPOUND_EXPR:
2077 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg, 0),
2078 invert_truthvalue (TREE_OPERAND (arg, 1)));
2079
2080 case NON_LVALUE_EXPR:
2081 return invert_truthvalue (TREE_OPERAND (arg, 0));
2082
2083 case NOP_EXPR:
2084 case CONVERT_EXPR:
2085 case FLOAT_EXPR:
2086 return build1 (TREE_CODE (arg), type,
2087 invert_truthvalue (TREE_OPERAND (arg, 0)));
2088
2089 case BIT_AND_EXPR:
2090 if (!integer_onep (TREE_OPERAND (arg, 1)))
2091 break;
2092 return build (EQ_EXPR, type, arg, convert (type, integer_zero_node));
2093
2094 case SAVE_EXPR:
2095 return build1 (TRUTH_NOT_EXPR, type, arg);
2096
2097 case CLEANUP_POINT_EXPR:
2098 return build1 (CLEANUP_POINT_EXPR, type,
2099 invert_truthvalue (TREE_OPERAND (arg, 0)));
2100 }
2101 if (TREE_CODE (TREE_TYPE (arg)) != BOOLEAN_TYPE)
2102 abort ();
2103 return build1 (TRUTH_NOT_EXPR, type, arg);
2104 }
2105
2106 /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
2107 operands are another bit-wise operation with a common input. If so,
2108 distribute the bit operations to save an operation and possibly two if
2109 constants are involved. For example, convert
2110 (A | B) & (A | C) into A | (B & C)
2111 Further simplification will occur if B and C are constants.
2112
2113 If this optimization cannot be done, 0 will be returned. */
2114
2115 static tree
2116 distribute_bit_expr (code, type, arg0, arg1)
2117 enum tree_code code;
2118 tree type;
2119 tree arg0, arg1;
2120 {
2121 tree common;
2122 tree left, right;
2123
2124 if (TREE_CODE (arg0) != TREE_CODE (arg1)
2125 || TREE_CODE (arg0) == code
2126 || (TREE_CODE (arg0) != BIT_AND_EXPR
2127 && TREE_CODE (arg0) != BIT_IOR_EXPR))
2128 return 0;
2129
2130 if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), 0))
2131 {
2132 common = TREE_OPERAND (arg0, 0);
2133 left = TREE_OPERAND (arg0, 1);
2134 right = TREE_OPERAND (arg1, 1);
2135 }
2136 else if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 1), 0))
2137 {
2138 common = TREE_OPERAND (arg0, 0);
2139 left = TREE_OPERAND (arg0, 1);
2140 right = TREE_OPERAND (arg1, 0);
2141 }
2142 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 0), 0))
2143 {
2144 common = TREE_OPERAND (arg0, 1);
2145 left = TREE_OPERAND (arg0, 0);
2146 right = TREE_OPERAND (arg1, 1);
2147 }
2148 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 1), 0))
2149 {
2150 common = TREE_OPERAND (arg0, 1);
2151 left = TREE_OPERAND (arg0, 0);
2152 right = TREE_OPERAND (arg1, 0);
2153 }
2154 else
2155 return 0;
2156
2157 return fold (build (TREE_CODE (arg0), type, common,
2158 fold (build (code, type, left, right))));
2159 }
2160 \f
2161 /* Return a BIT_FIELD_REF of type TYPE to refer to BITSIZE bits of INNER
2162 starting at BITPOS. The field is unsigned if UNSIGNEDP is non-zero. */
2163
2164 static tree
2165 make_bit_field_ref (inner, type, bitsize, bitpos, unsignedp)
2166 tree inner;
2167 tree type;
2168 int bitsize, bitpos;
2169 int unsignedp;
2170 {
2171 tree result = build (BIT_FIELD_REF, type, inner,
2172 size_int (bitsize), size_int (bitpos));
2173
2174 TREE_UNSIGNED (result) = unsignedp;
2175
2176 return result;
2177 }
2178
2179 /* Optimize a bit-field compare.
2180
2181 There are two cases: First is a compare against a constant and the
2182 second is a comparison of two items where the fields are at the same
2183 bit position relative to the start of a chunk (byte, halfword, word)
2184 large enough to contain it. In these cases we can avoid the shift
2185 implicit in bitfield extractions.
2186
2187 For constants, we emit a compare of the shifted constant with the
2188 BIT_AND_EXPR of a mask and a byte, halfword, or word of the operand being
2189 compared. For two fields at the same position, we do the ANDs with the
2190 similar mask and compare the result of the ANDs.
2191
2192 CODE is the comparison code, known to be either NE_EXPR or EQ_EXPR.
2193 COMPARE_TYPE is the type of the comparison, and LHS and RHS
2194 are the left and right operands of the comparison, respectively.
2195
2196 If the optimization described above can be done, we return the resulting
2197 tree. Otherwise we return zero. */
2198
2199 static tree
2200 optimize_bit_field_compare (code, compare_type, lhs, rhs)
2201 enum tree_code code;
2202 tree compare_type;
2203 tree lhs, rhs;
2204 {
2205 int lbitpos, lbitsize, rbitpos, rbitsize;
2206 int lnbitpos, lnbitsize, rnbitpos, rnbitsize;
2207 tree type = TREE_TYPE (lhs);
2208 tree signed_type, unsigned_type;
2209 int const_p = TREE_CODE (rhs) == INTEGER_CST;
2210 enum machine_mode lmode, rmode, lnmode, rnmode;
2211 int lunsignedp, runsignedp;
2212 int lvolatilep = 0, rvolatilep = 0;
2213 tree linner, rinner;
2214 tree mask;
2215 tree offset;
2216
2217 /* Get all the information about the extractions being done. If the bit size
2218 if the same as the size of the underlying object, we aren't doing an
2219 extraction at all and so can do nothing. */
2220 linner = get_inner_reference (lhs, &lbitsize, &lbitpos, &offset, &lmode,
2221 &lunsignedp, &lvolatilep);
2222 if (linner == lhs || lbitsize == GET_MODE_BITSIZE (lmode) || lbitsize < 0
2223 || offset != 0)
2224 return 0;
2225
2226 if (!const_p)
2227 {
2228 /* If this is not a constant, we can only do something if bit positions,
2229 sizes, and signedness are the same. */
2230 rinner = get_inner_reference (rhs, &rbitsize, &rbitpos, &offset,
2231 &rmode, &runsignedp, &rvolatilep);
2232
2233 if (rinner == rhs || lbitpos != rbitpos || lbitsize != rbitsize
2234 || lunsignedp != runsignedp || offset != 0)
2235 return 0;
2236 }
2237
2238 /* See if we can find a mode to refer to this field. We should be able to,
2239 but fail if we can't. */
2240 lnmode = get_best_mode (lbitsize, lbitpos,
2241 TYPE_ALIGN (TREE_TYPE (linner)), word_mode,
2242 lvolatilep);
2243 if (lnmode == VOIDmode)
2244 return 0;
2245
2246 /* Set signed and unsigned types of the precision of this mode for the
2247 shifts below. */
2248 signed_type = type_for_mode (lnmode, 0);
2249 unsigned_type = type_for_mode (lnmode, 1);
2250
2251 if (! const_p)
2252 {
2253 rnmode = get_best_mode (rbitsize, rbitpos,
2254 TYPE_ALIGN (TREE_TYPE (rinner)), word_mode,
2255 rvolatilep);
2256 if (rnmode == VOIDmode)
2257 return 0;
2258 }
2259
2260 /* Compute the bit position and size for the new reference and our offset
2261 within it. If the new reference is the same size as the original, we
2262 won't optimize anything, so return zero. */
2263 lnbitsize = GET_MODE_BITSIZE (lnmode);
2264 lnbitpos = lbitpos & ~ (lnbitsize - 1);
2265 lbitpos -= lnbitpos;
2266 if (lnbitsize == lbitsize)
2267 return 0;
2268
2269 if (! const_p)
2270 {
2271 rnbitsize = GET_MODE_BITSIZE (rnmode);
2272 rnbitpos = rbitpos & ~ (rnbitsize - 1);
2273 rbitpos -= rnbitpos;
2274 if (rnbitsize == rbitsize)
2275 return 0;
2276 }
2277
2278 if (BYTES_BIG_ENDIAN)
2279 lbitpos = lnbitsize - lbitsize - lbitpos;
2280
2281 /* Make the mask to be used against the extracted field. */
2282 mask = build_int_2 (~0, ~0);
2283 TREE_TYPE (mask) = unsigned_type;
2284 force_fit_type (mask, 0);
2285 mask = convert (unsigned_type, mask);
2286 mask = const_binop (LSHIFT_EXPR, mask, size_int (lnbitsize - lbitsize), 0);
2287 mask = const_binop (RSHIFT_EXPR, mask,
2288 size_int (lnbitsize - lbitsize - lbitpos), 0);
2289
2290 if (! const_p)
2291 /* If not comparing with constant, just rework the comparison
2292 and return. */
2293 return build (code, compare_type,
2294 build (BIT_AND_EXPR, unsigned_type,
2295 make_bit_field_ref (linner, unsigned_type,
2296 lnbitsize, lnbitpos, 1),
2297 mask),
2298 build (BIT_AND_EXPR, unsigned_type,
2299 make_bit_field_ref (rinner, unsigned_type,
2300 rnbitsize, rnbitpos, 1),
2301 mask));
2302
2303 /* Otherwise, we are handling the constant case. See if the constant is too
2304 big for the field. Warn and return a tree of for 0 (false) if so. We do
2305 this not only for its own sake, but to avoid having to test for this
2306 error case below. If we didn't, we might generate wrong code.
2307
2308 For unsigned fields, the constant shifted right by the field length should
2309 be all zero. For signed fields, the high-order bits should agree with
2310 the sign bit. */
2311
2312 if (lunsignedp)
2313 {
2314 if (! integer_zerop (const_binop (RSHIFT_EXPR,
2315 convert (unsigned_type, rhs),
2316 size_int (lbitsize), 0)))
2317 {
2318 warning ("comparison is always %s due to width of bitfield",
2319 code == NE_EXPR ? "one" : "zero");
2320 return convert (compare_type,
2321 (code == NE_EXPR
2322 ? integer_one_node : integer_zero_node));
2323 }
2324 }
2325 else
2326 {
2327 tree tem = const_binop (RSHIFT_EXPR, convert (signed_type, rhs),
2328 size_int (lbitsize - 1), 0);
2329 if (! integer_zerop (tem) && ! integer_all_onesp (tem))
2330 {
2331 warning ("comparison is always %s due to width of bitfield",
2332 code == NE_EXPR ? "one" : "zero");
2333 return convert (compare_type,
2334 (code == NE_EXPR
2335 ? integer_one_node : integer_zero_node));
2336 }
2337 }
2338
2339 /* Single-bit compares should always be against zero. */
2340 if (lbitsize == 1 && ! integer_zerop (rhs))
2341 {
2342 code = code == EQ_EXPR ? NE_EXPR : EQ_EXPR;
2343 rhs = convert (type, integer_zero_node);
2344 }
2345
2346 /* Make a new bitfield reference, shift the constant over the
2347 appropriate number of bits and mask it with the computed mask
2348 (in case this was a signed field). If we changed it, make a new one. */
2349 lhs = make_bit_field_ref (linner, unsigned_type, lnbitsize, lnbitpos, 1);
2350 if (lvolatilep)
2351 {
2352 TREE_SIDE_EFFECTS (lhs) = 1;
2353 TREE_THIS_VOLATILE (lhs) = 1;
2354 }
2355
2356 rhs = fold (const_binop (BIT_AND_EXPR,
2357 const_binop (LSHIFT_EXPR,
2358 convert (unsigned_type, rhs),
2359 size_int (lbitpos), 0),
2360 mask, 0));
2361
2362 return build (code, compare_type,
2363 build (BIT_AND_EXPR, unsigned_type, lhs, mask),
2364 rhs);
2365 }
2366 \f
2367 /* Subroutine for fold_truthop: decode a field reference.
2368
2369 If EXP is a comparison reference, we return the innermost reference.
2370
2371 *PBITSIZE is set to the number of bits in the reference, *PBITPOS is
2372 set to the starting bit number.
2373
2374 If the innermost field can be completely contained in a mode-sized
2375 unit, *PMODE is set to that mode. Otherwise, it is set to VOIDmode.
2376
2377 *PVOLATILEP is set to 1 if the any expression encountered is volatile;
2378 otherwise it is not changed.
2379
2380 *PUNSIGNEDP is set to the signedness of the field.
2381
2382 *PMASK is set to the mask used. This is either contained in a
2383 BIT_AND_EXPR or derived from the width of the field.
2384
2385 Return 0 if this is not a component reference or is one that we can't
2386 do anything with. */
2387
2388 static tree
2389 decode_field_reference (exp, pbitsize, pbitpos, pmode, punsignedp,
2390 pvolatilep, pmask)
2391 tree exp;
2392 int *pbitsize, *pbitpos;
2393 enum machine_mode *pmode;
2394 int *punsignedp, *pvolatilep;
2395 tree *pmask;
2396 {
2397 tree and_mask = 0;
2398 tree mask, inner, offset;
2399 tree unsigned_type;
2400 int precision;
2401
2402 /* All the optimizations using this function assume integer fields.
2403 There are problems with FP fields since the type_for_size call
2404 below can fail for, e.g., XFmode. */
2405 if (! INTEGRAL_TYPE_P (TREE_TYPE (exp)))
2406 return 0;
2407
2408 STRIP_NOPS (exp);
2409
2410 if (TREE_CODE (exp) == BIT_AND_EXPR)
2411 {
2412 and_mask = TREE_OPERAND (exp, 1);
2413 exp = TREE_OPERAND (exp, 0);
2414 STRIP_NOPS (exp); STRIP_NOPS (and_mask);
2415 if (TREE_CODE (and_mask) != INTEGER_CST)
2416 return 0;
2417 }
2418
2419
2420 inner = get_inner_reference (exp, pbitsize, pbitpos, &offset, pmode,
2421 punsignedp, pvolatilep);
2422 if ((inner == exp && and_mask == 0)
2423 || *pbitsize < 0 || offset != 0)
2424 return 0;
2425
2426 /* Compute the mask to access the bitfield. */
2427 unsigned_type = type_for_size (*pbitsize, 1);
2428 precision = TYPE_PRECISION (unsigned_type);
2429
2430 mask = build_int_2 (~0, ~0);
2431 TREE_TYPE (mask) = unsigned_type;
2432 force_fit_type (mask, 0);
2433 mask = const_binop (LSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
2434 mask = const_binop (RSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
2435
2436 /* Merge it with the mask we found in the BIT_AND_EXPR, if any. */
2437 if (and_mask != 0)
2438 mask = fold (build (BIT_AND_EXPR, unsigned_type,
2439 convert (unsigned_type, and_mask), mask));
2440
2441 *pmask = mask;
2442 return inner;
2443 }
2444
2445 /* Return non-zero if MASK represents a mask of SIZE ones in the low-order
2446 bit positions. */
2447
2448 static int
2449 all_ones_mask_p (mask, size)
2450 tree mask;
2451 int size;
2452 {
2453 tree type = TREE_TYPE (mask);
2454 int precision = TYPE_PRECISION (type);
2455 tree tmask;
2456
2457 tmask = build_int_2 (~0, ~0);
2458 TREE_TYPE (tmask) = signed_type (type);
2459 force_fit_type (tmask, 0);
2460 return
2461 tree_int_cst_equal (mask,
2462 const_binop (RSHIFT_EXPR,
2463 const_binop (LSHIFT_EXPR, tmask,
2464 size_int (precision - size),
2465 0),
2466 size_int (precision - size), 0));
2467 }
2468
2469 /* Subroutine for fold_truthop: determine if an operand is simple enough
2470 to be evaluated unconditionally. */
2471
2472 static int
2473 simple_operand_p (exp)
2474 tree exp;
2475 {
2476 /* Strip any conversions that don't change the machine mode. */
2477 while ((TREE_CODE (exp) == NOP_EXPR
2478 || TREE_CODE (exp) == CONVERT_EXPR)
2479 && (TYPE_MODE (TREE_TYPE (exp))
2480 == TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)))))
2481 exp = TREE_OPERAND (exp, 0);
2482
2483 return (TREE_CODE_CLASS (TREE_CODE (exp)) == 'c'
2484 || (TREE_CODE_CLASS (TREE_CODE (exp)) == 'd'
2485 && ! TREE_ADDRESSABLE (exp)
2486 && ! TREE_THIS_VOLATILE (exp)
2487 && ! DECL_NONLOCAL (exp)
2488 /* Don't regard global variables as simple. They may be
2489 allocated in ways unknown to the compiler (shared memory,
2490 #pragma weak, etc). */
2491 && ! TREE_PUBLIC (exp)
2492 && ! DECL_EXTERNAL (exp)
2493 /* Loading a static variable is unduly expensive, but global
2494 registers aren't expensive. */
2495 && (! TREE_STATIC (exp) || DECL_REGISTER (exp))));
2496 }
2497 \f
2498 /* Subroutine for fold_truthop: try to optimize a range test.
2499
2500 For example, "i >= 2 && i =< 9" can be done as "(unsigned) (i - 2) <= 7".
2501
2502 JCODE is the logical combination of the two terms. It is TRUTH_AND_EXPR
2503 (representing TRUTH_ANDIF_EXPR and TRUTH_AND_EXPR) or TRUTH_OR_EXPR
2504 (representing TRUTH_ORIF_EXPR and TRUTH_OR_EXPR). TYPE is the type of
2505 the result.
2506
2507 VAR is the value being tested. LO_CODE and HI_CODE are the comparison
2508 operators comparing VAR to LO_CST and HI_CST. LO_CST is known to be no
2509 larger than HI_CST (they may be equal).
2510
2511 We return the simplified tree or 0 if no optimization is possible. */
2512
2513 static tree
2514 range_test (jcode, type, lo_code, hi_code, var, lo_cst, hi_cst)
2515 enum tree_code jcode, lo_code, hi_code;
2516 tree type, var, lo_cst, hi_cst;
2517 {
2518 tree utype;
2519 enum tree_code rcode;
2520
2521 /* See if this is a range test and normalize the constant terms. */
2522
2523 if (jcode == TRUTH_AND_EXPR)
2524 {
2525 switch (lo_code)
2526 {
2527 case NE_EXPR:
2528 /* See if we have VAR != CST && VAR != CST+1. */
2529 if (! (hi_code == NE_EXPR
2530 && TREE_INT_CST_LOW (hi_cst) - TREE_INT_CST_LOW (lo_cst) == 1
2531 && tree_int_cst_equal (integer_one_node,
2532 const_binop (MINUS_EXPR,
2533 hi_cst, lo_cst, 0))))
2534 return 0;
2535
2536 rcode = GT_EXPR;
2537 break;
2538
2539 case GT_EXPR:
2540 case GE_EXPR:
2541 if (hi_code == LT_EXPR)
2542 hi_cst = const_binop (MINUS_EXPR, hi_cst, integer_one_node, 0);
2543 else if (hi_code != LE_EXPR)
2544 return 0;
2545
2546 if (lo_code == GT_EXPR)
2547 lo_cst = const_binop (PLUS_EXPR, lo_cst, integer_one_node, 0);
2548
2549 /* We now have VAR >= LO_CST && VAR <= HI_CST. */
2550 rcode = LE_EXPR;
2551 break;
2552
2553 default:
2554 return 0;
2555 }
2556 }
2557 else
2558 {
2559 switch (lo_code)
2560 {
2561 case EQ_EXPR:
2562 /* See if we have VAR == CST || VAR == CST+1. */
2563 if (! (hi_code == EQ_EXPR
2564 && TREE_INT_CST_LOW (hi_cst) - TREE_INT_CST_LOW (lo_cst) == 1
2565 && tree_int_cst_equal (integer_one_node,
2566 const_binop (MINUS_EXPR,
2567 hi_cst, lo_cst, 0))))
2568 return 0;
2569
2570 rcode = LE_EXPR;
2571 break;
2572
2573 case LE_EXPR:
2574 case LT_EXPR:
2575 if (hi_code == GE_EXPR)
2576 hi_cst = const_binop (MINUS_EXPR, hi_cst, integer_one_node, 0);
2577 else if (hi_code != GT_EXPR)
2578 return 0;
2579
2580 if (lo_code == LE_EXPR)
2581 lo_cst = const_binop (PLUS_EXPR, lo_cst, integer_one_node, 0);
2582
2583 /* We now have VAR < LO_CST || VAR > HI_CST. */
2584 rcode = GT_EXPR;
2585 break;
2586
2587 default:
2588 return 0;
2589 }
2590 }
2591
2592 /* When normalizing, it is possible to both increment the smaller constant
2593 and decrement the larger constant. See if they are still ordered. */
2594 if (tree_int_cst_lt (hi_cst, lo_cst))
2595 return 0;
2596
2597 /* Fail if VAR isn't an integer. */
2598 utype = TREE_TYPE (var);
2599 if (! INTEGRAL_TYPE_P (utype))
2600 return 0;
2601
2602 /* The range test is invalid if subtracting the two constants results
2603 in overflow. This can happen in traditional mode. */
2604 if (! int_fits_type_p (hi_cst, TREE_TYPE (var))
2605 || ! int_fits_type_p (lo_cst, TREE_TYPE (var)))
2606 return 0;
2607
2608 if (! TREE_UNSIGNED (utype))
2609 {
2610 utype = unsigned_type (utype);
2611 var = convert (utype, var);
2612 lo_cst = convert (utype, lo_cst);
2613 hi_cst = convert (utype, hi_cst);
2614 }
2615
2616 return fold (convert (type,
2617 build (rcode, utype,
2618 build (MINUS_EXPR, utype, var, lo_cst),
2619 const_binop (MINUS_EXPR, hi_cst, lo_cst, 0))));
2620 }
2621 \f
2622 /* Subroutine for fold_truthop: C is an INTEGER_CST interpreted as a P
2623 bit value. Arrange things so the extra bits will be set to zero if and
2624 only if C is signed-extended to its full width. */
2625
2626 static tree
2627 unextend (c, p, unsignedp)
2628 tree c;
2629 int p;
2630 int unsignedp;
2631 {
2632 tree type = TREE_TYPE (c);
2633 int modesize = GET_MODE_BITSIZE (TYPE_MODE (type));
2634 tree temp;
2635
2636 if (p == modesize || unsignedp)
2637 return c;
2638
2639 if (TREE_UNSIGNED (type))
2640 c = convert (signed_type (type), c);
2641
2642 /* We work by getting just the sign bit into the low-order bit, then
2643 into the high-order bit, then sign-extend. We then XOR that value
2644 with C. */
2645 temp = const_binop (RSHIFT_EXPR, c, size_int (p - 1), 0);
2646 temp = const_binop (BIT_AND_EXPR, temp, size_int (1), 0);
2647 temp = const_binop (LSHIFT_EXPR, temp, size_int (modesize - 1), 0);
2648 temp = const_binop (RSHIFT_EXPR, temp, size_int (modesize - p - 1), 0);
2649 return convert (type, const_binop (BIT_XOR_EXPR, c, temp, 0));
2650 }
2651 \f
2652 /* Find ways of folding logical expressions of LHS and RHS:
2653 Try to merge two comparisons to the same innermost item.
2654 Look for range tests like "ch >= '0' && ch <= '9'".
2655 Look for combinations of simple terms on machines with expensive branches
2656 and evaluate the RHS unconditionally.
2657
2658 For example, if we have p->a == 2 && p->b == 4 and we can make an
2659 object large enough to span both A and B, we can do this with a comparison
2660 against the object ANDed with the a mask.
2661
2662 If we have p->a == q->a && p->b == q->b, we may be able to use bit masking
2663 operations to do this with one comparison.
2664
2665 We check for both normal comparisons and the BIT_AND_EXPRs made this by
2666 function and the one above.
2667
2668 CODE is the logical operation being done. It can be TRUTH_ANDIF_EXPR,
2669 TRUTH_AND_EXPR, TRUTH_ORIF_EXPR, or TRUTH_OR_EXPR.
2670
2671 TRUTH_TYPE is the type of the logical operand and LHS and RHS are its
2672 two operands.
2673
2674 We return the simplified tree or 0 if no optimization is possible. */
2675
2676 static tree
2677 fold_truthop (code, truth_type, lhs, rhs)
2678 enum tree_code code;
2679 tree truth_type, lhs, rhs;
2680 {
2681 /* If this is the "or" of two comparisons, we can do something if we
2682 the comparisons are NE_EXPR. If this is the "and", we can do something
2683 if the comparisons are EQ_EXPR. I.e.,
2684 (a->b == 2 && a->c == 4) can become (a->new == NEW).
2685
2686 WANTED_CODE is this operation code. For single bit fields, we can
2687 convert EQ_EXPR to NE_EXPR so we need not reject the "wrong"
2688 comparison for one-bit fields. */
2689
2690 enum tree_code wanted_code;
2691 enum tree_code lcode, rcode;
2692 tree ll_arg, lr_arg, rl_arg, rr_arg;
2693 tree ll_inner, lr_inner, rl_inner, rr_inner;
2694 int ll_bitsize, ll_bitpos, lr_bitsize, lr_bitpos;
2695 int rl_bitsize, rl_bitpos, rr_bitsize, rr_bitpos;
2696 int xll_bitpos, xlr_bitpos, xrl_bitpos, xrr_bitpos;
2697 int lnbitsize, lnbitpos, rnbitsize, rnbitpos;
2698 int ll_unsignedp, lr_unsignedp, rl_unsignedp, rr_unsignedp;
2699 enum machine_mode ll_mode, lr_mode, rl_mode, rr_mode;
2700 enum machine_mode lnmode, rnmode;
2701 tree ll_mask, lr_mask, rl_mask, rr_mask;
2702 tree l_const, r_const;
2703 tree type, result;
2704 int first_bit, end_bit;
2705 int volatilep;
2706
2707 /* Start by getting the comparison codes and seeing if this looks like
2708 a range test. Fail if anything is volatile. If one operand is a
2709 BIT_AND_EXPR with the constant one, treat it as if it were surrounded
2710 with a NE_EXPR. */
2711
2712 if (TREE_SIDE_EFFECTS (lhs)
2713 || TREE_SIDE_EFFECTS (rhs))
2714 return 0;
2715
2716 lcode = TREE_CODE (lhs);
2717 rcode = TREE_CODE (rhs);
2718
2719 if (lcode == BIT_AND_EXPR && integer_onep (TREE_OPERAND (lhs, 1)))
2720 lcode = NE_EXPR, lhs = build (NE_EXPR, truth_type, lhs, integer_zero_node);
2721
2722 if (rcode == BIT_AND_EXPR && integer_onep (TREE_OPERAND (rhs, 1)))
2723 rcode = NE_EXPR, rhs = build (NE_EXPR, truth_type, rhs, integer_zero_node);
2724
2725 if (TREE_CODE_CLASS (lcode) != '<'
2726 || TREE_CODE_CLASS (rcode) != '<')
2727 return 0;
2728
2729 code = ((code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR)
2730 ? TRUTH_AND_EXPR : TRUTH_OR_EXPR);
2731
2732 ll_arg = TREE_OPERAND (lhs, 0);
2733 lr_arg = TREE_OPERAND (lhs, 1);
2734 rl_arg = TREE_OPERAND (rhs, 0);
2735 rr_arg = TREE_OPERAND (rhs, 1);
2736
2737 if (TREE_CODE (lr_arg) == INTEGER_CST
2738 && TREE_CODE (rr_arg) == INTEGER_CST
2739 && operand_equal_p (ll_arg, rl_arg, 0))
2740 {
2741 if (tree_int_cst_lt (lr_arg, rr_arg))
2742 result = range_test (code, truth_type, lcode, rcode,
2743 ll_arg, lr_arg, rr_arg);
2744 else
2745 result = range_test (code, truth_type, rcode, lcode,
2746 ll_arg, rr_arg, lr_arg);
2747
2748 /* If this isn't a range test, it also isn't a comparison that
2749 can be merged. However, it wins to evaluate the RHS unconditionally
2750 on machines with expensive branches. */
2751
2752 if (result == 0 && BRANCH_COST >= 2)
2753 {
2754 if (TREE_CODE (ll_arg) != VAR_DECL
2755 && TREE_CODE (ll_arg) != PARM_DECL)
2756 {
2757 /* Avoid evaluating the variable part twice. */
2758 ll_arg = save_expr (ll_arg);
2759 lhs = build (lcode, TREE_TYPE (lhs), ll_arg, lr_arg);
2760 rhs = build (rcode, TREE_TYPE (rhs), ll_arg, rr_arg);
2761 }
2762 return build (code, truth_type, lhs, rhs);
2763 }
2764 return result;
2765 }
2766
2767 /* If the RHS can be evaluated unconditionally and its operands are
2768 simple, it wins to evaluate the RHS unconditionally on machines
2769 with expensive branches. In this case, this isn't a comparison
2770 that can be merged. */
2771
2772 /* @@ I'm not sure it wins on the m88110 to do this if the comparisons
2773 are with zero (tmw). */
2774
2775 if (BRANCH_COST >= 2
2776 && INTEGRAL_TYPE_P (TREE_TYPE (rhs))
2777 && simple_operand_p (rl_arg)
2778 && simple_operand_p (rr_arg))
2779 return build (code, truth_type, lhs, rhs);
2780
2781 /* See if the comparisons can be merged. Then get all the parameters for
2782 each side. */
2783
2784 if ((lcode != EQ_EXPR && lcode != NE_EXPR)
2785 || (rcode != EQ_EXPR && rcode != NE_EXPR))
2786 return 0;
2787
2788 volatilep = 0;
2789 ll_inner = decode_field_reference (ll_arg,
2790 &ll_bitsize, &ll_bitpos, &ll_mode,
2791 &ll_unsignedp, &volatilep, &ll_mask);
2792 lr_inner = decode_field_reference (lr_arg,
2793 &lr_bitsize, &lr_bitpos, &lr_mode,
2794 &lr_unsignedp, &volatilep, &lr_mask);
2795 rl_inner = decode_field_reference (rl_arg,
2796 &rl_bitsize, &rl_bitpos, &rl_mode,
2797 &rl_unsignedp, &volatilep, &rl_mask);
2798 rr_inner = decode_field_reference (rr_arg,
2799 &rr_bitsize, &rr_bitpos, &rr_mode,
2800 &rr_unsignedp, &volatilep, &rr_mask);
2801
2802 /* It must be true that the inner operation on the lhs of each
2803 comparison must be the same if we are to be able to do anything.
2804 Then see if we have constants. If not, the same must be true for
2805 the rhs's. */
2806 if (volatilep || ll_inner == 0 || rl_inner == 0
2807 || ! operand_equal_p (ll_inner, rl_inner, 0))
2808 return 0;
2809
2810 if (TREE_CODE (lr_arg) == INTEGER_CST
2811 && TREE_CODE (rr_arg) == INTEGER_CST)
2812 l_const = lr_arg, r_const = rr_arg;
2813 else if (lr_inner == 0 || rr_inner == 0
2814 || ! operand_equal_p (lr_inner, rr_inner, 0))
2815 return 0;
2816 else
2817 l_const = r_const = 0;
2818
2819 /* If either comparison code is not correct for our logical operation,
2820 fail. However, we can convert a one-bit comparison against zero into
2821 the opposite comparison against that bit being set in the field. */
2822
2823 wanted_code = (code == TRUTH_AND_EXPR ? EQ_EXPR : NE_EXPR);
2824 if (lcode != wanted_code)
2825 {
2826 if (l_const && integer_zerop (l_const) && integer_pow2p (ll_mask))
2827 l_const = ll_mask;
2828 else
2829 return 0;
2830 }
2831
2832 if (rcode != wanted_code)
2833 {
2834 if (r_const && integer_zerop (r_const) && integer_pow2p (rl_mask))
2835 r_const = rl_mask;
2836 else
2837 return 0;
2838 }
2839
2840 /* See if we can find a mode that contains both fields being compared on
2841 the left. If we can't, fail. Otherwise, update all constants and masks
2842 to be relative to a field of that size. */
2843 first_bit = MIN (ll_bitpos, rl_bitpos);
2844 end_bit = MAX (ll_bitpos + ll_bitsize, rl_bitpos + rl_bitsize);
2845 lnmode = get_best_mode (end_bit - first_bit, first_bit,
2846 TYPE_ALIGN (TREE_TYPE (ll_inner)), word_mode,
2847 volatilep);
2848 if (lnmode == VOIDmode)
2849 return 0;
2850
2851 lnbitsize = GET_MODE_BITSIZE (lnmode);
2852 lnbitpos = first_bit & ~ (lnbitsize - 1);
2853 type = type_for_size (lnbitsize, 1);
2854 xll_bitpos = ll_bitpos - lnbitpos, xrl_bitpos = rl_bitpos - lnbitpos;
2855
2856 if (BYTES_BIG_ENDIAN)
2857 {
2858 xll_bitpos = lnbitsize - xll_bitpos - ll_bitsize;
2859 xrl_bitpos = lnbitsize - xrl_bitpos - rl_bitsize;
2860 }
2861
2862 ll_mask = const_binop (LSHIFT_EXPR, convert (type, ll_mask),
2863 size_int (xll_bitpos), 0);
2864 rl_mask = const_binop (LSHIFT_EXPR, convert (type, rl_mask),
2865 size_int (xrl_bitpos), 0);
2866
2867 if (l_const)
2868 {
2869 l_const = convert (type, unextend (l_const, ll_bitsize, ll_unsignedp));
2870 l_const = const_binop (LSHIFT_EXPR, l_const, size_int (xll_bitpos), 0);
2871 if (! integer_zerop (const_binop (BIT_AND_EXPR, l_const,
2872 fold (build1 (BIT_NOT_EXPR,
2873 type, ll_mask)),
2874 0)))
2875 {
2876 warning ("comparison is always %s",
2877 wanted_code == NE_EXPR ? "one" : "zero");
2878
2879 return convert (truth_type,
2880 wanted_code == NE_EXPR
2881 ? integer_one_node : integer_zero_node);
2882 }
2883 }
2884 if (r_const)
2885 {
2886 r_const = convert (type, unextend (r_const, rl_bitsize, rl_unsignedp));
2887 r_const = const_binop (LSHIFT_EXPR, r_const, size_int (xrl_bitpos), 0);
2888 if (! integer_zerop (const_binop (BIT_AND_EXPR, r_const,
2889 fold (build1 (BIT_NOT_EXPR,
2890 type, rl_mask)),
2891 0)))
2892 {
2893 warning ("comparison is always %s",
2894 wanted_code == NE_EXPR ? "one" : "zero");
2895
2896 return convert (truth_type,
2897 wanted_code == NE_EXPR
2898 ? integer_one_node : integer_zero_node);
2899 }
2900 }
2901
2902 /* If the right sides are not constant, do the same for it. Also,
2903 disallow this optimization if a size or signedness mismatch occurs
2904 between the left and right sides. */
2905 if (l_const == 0)
2906 {
2907 if (ll_bitsize != lr_bitsize || rl_bitsize != rr_bitsize
2908 || ll_unsignedp != lr_unsignedp || rl_unsignedp != rr_unsignedp
2909 /* Make sure the two fields on the right
2910 correspond to the left without being swapped. */
2911 || ll_bitpos - rl_bitpos != lr_bitpos - rr_bitpos)
2912 return 0;
2913
2914 first_bit = MIN (lr_bitpos, rr_bitpos);
2915 end_bit = MAX (lr_bitpos + lr_bitsize, rr_bitpos + rr_bitsize);
2916 rnmode = get_best_mode (end_bit - first_bit, first_bit,
2917 TYPE_ALIGN (TREE_TYPE (lr_inner)), word_mode,
2918 volatilep);
2919 if (rnmode == VOIDmode)
2920 return 0;
2921
2922 rnbitsize = GET_MODE_BITSIZE (rnmode);
2923 rnbitpos = first_bit & ~ (rnbitsize - 1);
2924 xlr_bitpos = lr_bitpos - rnbitpos, xrr_bitpos = rr_bitpos - rnbitpos;
2925
2926 if (BYTES_BIG_ENDIAN)
2927 {
2928 xlr_bitpos = rnbitsize - xlr_bitpos - lr_bitsize;
2929 xrr_bitpos = rnbitsize - xrr_bitpos - rr_bitsize;
2930 }
2931
2932 lr_mask = const_binop (LSHIFT_EXPR, convert (type, lr_mask),
2933 size_int (xlr_bitpos), 0);
2934 rr_mask = const_binop (LSHIFT_EXPR, convert (type, rr_mask),
2935 size_int (xrr_bitpos), 0);
2936
2937 /* Make a mask that corresponds to both fields being compared.
2938 Do this for both items being compared. If the masks agree,
2939 we can do this by masking both and comparing the masked
2940 results. */
2941 ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0);
2942 lr_mask = const_binop (BIT_IOR_EXPR, lr_mask, rr_mask, 0);
2943 if (operand_equal_p (ll_mask, lr_mask, 0) && lnbitsize == rnbitsize)
2944 {
2945 lhs = make_bit_field_ref (ll_inner, type, lnbitsize, lnbitpos,
2946 ll_unsignedp || rl_unsignedp);
2947 rhs = make_bit_field_ref (lr_inner, type, rnbitsize, rnbitpos,
2948 lr_unsignedp || rr_unsignedp);
2949 if (! all_ones_mask_p (ll_mask, lnbitsize))
2950 {
2951 lhs = build (BIT_AND_EXPR, type, lhs, ll_mask);
2952 rhs = build (BIT_AND_EXPR, type, rhs, ll_mask);
2953 }
2954 return build (wanted_code, truth_type, lhs, rhs);
2955 }
2956
2957 /* There is still another way we can do something: If both pairs of
2958 fields being compared are adjacent, we may be able to make a wider
2959 field containing them both. */
2960 if ((ll_bitsize + ll_bitpos == rl_bitpos
2961 && lr_bitsize + lr_bitpos == rr_bitpos)
2962 || (ll_bitpos == rl_bitpos + rl_bitsize
2963 && lr_bitpos == rr_bitpos + rr_bitsize))
2964 return build (wanted_code, truth_type,
2965 make_bit_field_ref (ll_inner, type,
2966 ll_bitsize + rl_bitsize,
2967 MIN (ll_bitpos, rl_bitpos),
2968 ll_unsignedp),
2969 make_bit_field_ref (lr_inner, type,
2970 lr_bitsize + rr_bitsize,
2971 MIN (lr_bitpos, rr_bitpos),
2972 lr_unsignedp));
2973
2974 return 0;
2975 }
2976
2977 /* Handle the case of comparisons with constants. If there is something in
2978 common between the masks, those bits of the constants must be the same.
2979 If not, the condition is always false. Test for this to avoid generating
2980 incorrect code below. */
2981 result = const_binop (BIT_AND_EXPR, ll_mask, rl_mask, 0);
2982 if (! integer_zerop (result)
2983 && simple_cst_equal (const_binop (BIT_AND_EXPR, result, l_const, 0),
2984 const_binop (BIT_AND_EXPR, result, r_const, 0)) != 1)
2985 {
2986 if (wanted_code == NE_EXPR)
2987 {
2988 warning ("`or' of unmatched not-equal tests is always 1");
2989 return convert (truth_type, integer_one_node);
2990 }
2991 else
2992 {
2993 warning ("`and' of mutually exclusive equal-tests is always zero");
2994 return convert (truth_type, integer_zero_node);
2995 }
2996 }
2997
2998 /* Construct the expression we will return. First get the component
2999 reference we will make. Unless the mask is all ones the width of
3000 that field, perform the mask operation. Then compare with the
3001 merged constant. */
3002 result = make_bit_field_ref (ll_inner, type, lnbitsize, lnbitpos,
3003 ll_unsignedp || rl_unsignedp);
3004
3005 ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0);
3006 if (! all_ones_mask_p (ll_mask, lnbitsize))
3007 result = build (BIT_AND_EXPR, type, result, ll_mask);
3008
3009 return build (wanted_code, truth_type, result,
3010 const_binop (BIT_IOR_EXPR, l_const, r_const, 0));
3011 }
3012 \f
3013 /* If T contains a COMPOUND_EXPR which was inserted merely to evaluate
3014 S, a SAVE_EXPR, return the expression actually being evaluated. Note
3015 that we may sometimes modify the tree. */
3016
3017 static tree
3018 strip_compound_expr (t, s)
3019 tree t;
3020 tree s;
3021 {
3022 tree type = TREE_TYPE (t);
3023 enum tree_code code = TREE_CODE (t);
3024
3025 /* See if this is the COMPOUND_EXPR we want to eliminate. */
3026 if (code == COMPOUND_EXPR && TREE_CODE (TREE_OPERAND (t, 0)) == CONVERT_EXPR
3027 && TREE_OPERAND (TREE_OPERAND (t, 0), 0) == s)
3028 return TREE_OPERAND (t, 1);
3029
3030 /* See if this is a COND_EXPR or a simple arithmetic operator. We
3031 don't bother handling any other types. */
3032 else if (code == COND_EXPR)
3033 {
3034 TREE_OPERAND (t, 0) = strip_compound_expr (TREE_OPERAND (t, 0), s);
3035 TREE_OPERAND (t, 1) = strip_compound_expr (TREE_OPERAND (t, 1), s);
3036 TREE_OPERAND (t, 2) = strip_compound_expr (TREE_OPERAND (t, 2), s);
3037 }
3038 else if (TREE_CODE_CLASS (code) == '1')
3039 TREE_OPERAND (t, 0) = strip_compound_expr (TREE_OPERAND (t, 0), s);
3040 else if (TREE_CODE_CLASS (code) == '<'
3041 || TREE_CODE_CLASS (code) == '2')
3042 {
3043 TREE_OPERAND (t, 0) = strip_compound_expr (TREE_OPERAND (t, 0), s);
3044 TREE_OPERAND (t, 1) = strip_compound_expr (TREE_OPERAND (t, 1), s);
3045 }
3046
3047 return t;
3048 }
3049 \f
3050 /* Perform constant folding and related simplification of EXPR.
3051 The related simplifications include x*1 => x, x*0 => 0, etc.,
3052 and application of the associative law.
3053 NOP_EXPR conversions may be removed freely (as long as we
3054 are careful not to change the C type of the overall expression)
3055 We cannot simplify through a CONVERT_EXPR, FIX_EXPR or FLOAT_EXPR,
3056 but we can constant-fold them if they have constant operands. */
3057
3058 tree
3059 fold (expr)
3060 tree expr;
3061 {
3062 register tree t = expr;
3063 tree t1 = NULL_TREE;
3064 tree tem;
3065 tree type = TREE_TYPE (expr);
3066 register tree arg0, arg1;
3067 register enum tree_code code = TREE_CODE (t);
3068 register int kind;
3069 int invert;
3070
3071 /* WINS will be nonzero when the switch is done
3072 if all operands are constant. */
3073
3074 int wins = 1;
3075
3076 /* Don't try to process an RTL_EXPR since its operands aren't trees. */
3077 if (code == RTL_EXPR)
3078 return t;
3079
3080 /* Return right away if already constant. */
3081 if (TREE_CONSTANT (t))
3082 {
3083 if (code == CONST_DECL)
3084 return DECL_INITIAL (t);
3085 return t;
3086 }
3087
3088 kind = TREE_CODE_CLASS (code);
3089 if (code == NOP_EXPR || code == FLOAT_EXPR || code == CONVERT_EXPR)
3090 {
3091 tree subop;
3092
3093 /* Special case for conversion ops that can have fixed point args. */
3094 arg0 = TREE_OPERAND (t, 0);
3095
3096 /* Don't use STRIP_NOPS, because signedness of argument type matters. */
3097 if (arg0 != 0)
3098 STRIP_TYPE_NOPS (arg0);
3099
3100 if (arg0 != 0 && TREE_CODE (arg0) == COMPLEX_CST)
3101 subop = TREE_REALPART (arg0);
3102 else
3103 subop = arg0;
3104
3105 if (subop != 0 && TREE_CODE (subop) != INTEGER_CST
3106 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3107 && TREE_CODE (subop) != REAL_CST
3108 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3109 )
3110 /* Note that TREE_CONSTANT isn't enough:
3111 static var addresses are constant but we can't
3112 do arithmetic on them. */
3113 wins = 0;
3114 }
3115 else if (kind == 'e' || kind == '<'
3116 || kind == '1' || kind == '2' || kind == 'r')
3117 {
3118 register int len = tree_code_length[(int) code];
3119 register int i;
3120 for (i = 0; i < len; i++)
3121 {
3122 tree op = TREE_OPERAND (t, i);
3123 tree subop;
3124
3125 if (op == 0)
3126 continue; /* Valid for CALL_EXPR, at least. */
3127
3128 if (kind == '<' || code == RSHIFT_EXPR)
3129 {
3130 /* Signedness matters here. Perhaps we can refine this
3131 later. */
3132 STRIP_TYPE_NOPS (op);
3133 }
3134 else
3135 {
3136 /* Strip any conversions that don't change the mode. */
3137 STRIP_NOPS (op);
3138 }
3139
3140 if (TREE_CODE (op) == COMPLEX_CST)
3141 subop = TREE_REALPART (op);
3142 else
3143 subop = op;
3144
3145 if (TREE_CODE (subop) != INTEGER_CST
3146 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3147 && TREE_CODE (subop) != REAL_CST
3148 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3149 )
3150 /* Note that TREE_CONSTANT isn't enough:
3151 static var addresses are constant but we can't
3152 do arithmetic on them. */
3153 wins = 0;
3154
3155 if (i == 0)
3156 arg0 = op;
3157 else if (i == 1)
3158 arg1 = op;
3159 }
3160 }
3161
3162 /* If this is a commutative operation, and ARG0 is a constant, move it
3163 to ARG1 to reduce the number of tests below. */
3164 if ((code == PLUS_EXPR || code == MULT_EXPR || code == MIN_EXPR
3165 || code == MAX_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR
3166 || code == BIT_AND_EXPR)
3167 && (TREE_CODE (arg0) == INTEGER_CST || TREE_CODE (arg0) == REAL_CST))
3168 {
3169 tem = arg0; arg0 = arg1; arg1 = tem;
3170
3171 tem = TREE_OPERAND (t, 0); TREE_OPERAND (t, 0) = TREE_OPERAND (t, 1);
3172 TREE_OPERAND (t, 1) = tem;
3173 }
3174
3175 /* Now WINS is set as described above,
3176 ARG0 is the first operand of EXPR,
3177 and ARG1 is the second operand (if it has more than one operand).
3178
3179 First check for cases where an arithmetic operation is applied to a
3180 compound, conditional, or comparison operation. Push the arithmetic
3181 operation inside the compound or conditional to see if any folding
3182 can then be done. Convert comparison to conditional for this purpose.
3183 The also optimizes non-constant cases that used to be done in
3184 expand_expr.
3185
3186 Before we do that, see if this is a BIT_AND_EXPR or a BIT_OR_EXPR,
3187 one of the operands is a comparison and the other is a comparison, a
3188 BIT_AND_EXPR with the constant 1, or a truth value. In that case, the
3189 code below would make the expression more complex. Change it to a
3190 TRUTH_{AND,OR}_EXPR. Likewise, convert a similar NE_EXPR to
3191 TRUTH_XOR_EXPR and an EQ_EXPR to the inversion of a TRUTH_XOR_EXPR. */
3192
3193 if ((code == BIT_AND_EXPR || code == BIT_IOR_EXPR
3194 || code == EQ_EXPR || code == NE_EXPR)
3195 && ((truth_value_p (TREE_CODE (arg0))
3196 && (truth_value_p (TREE_CODE (arg1))
3197 || (TREE_CODE (arg1) == BIT_AND_EXPR
3198 && integer_onep (TREE_OPERAND (arg1, 1)))))
3199 || (truth_value_p (TREE_CODE (arg1))
3200 && (truth_value_p (TREE_CODE (arg0))
3201 || (TREE_CODE (arg0) == BIT_AND_EXPR
3202 && integer_onep (TREE_OPERAND (arg0, 1)))))))
3203 {
3204 t = fold (build (code == BIT_AND_EXPR ? TRUTH_AND_EXPR
3205 : code == BIT_IOR_EXPR ? TRUTH_OR_EXPR
3206 : TRUTH_XOR_EXPR,
3207 type, arg0, arg1));
3208
3209 if (code == EQ_EXPR)
3210 t = invert_truthvalue (t);
3211
3212 return t;
3213 }
3214
3215 if (TREE_CODE_CLASS (code) == '1')
3216 {
3217 if (TREE_CODE (arg0) == COMPOUND_EXPR)
3218 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3219 fold (build1 (code, type, TREE_OPERAND (arg0, 1))));
3220 else if (TREE_CODE (arg0) == COND_EXPR)
3221 {
3222 t = fold (build (COND_EXPR, type, TREE_OPERAND (arg0, 0),
3223 fold (build1 (code, type, TREE_OPERAND (arg0, 1))),
3224 fold (build1 (code, type, TREE_OPERAND (arg0, 2)))));
3225
3226 /* If this was a conversion, and all we did was to move into
3227 inside the COND_EXPR, bring it back out. But leave it if
3228 it is a conversion from integer to integer and the
3229 result precision is no wider than a word since such a
3230 conversion is cheap and may be optimized away by combine,
3231 while it couldn't if it were outside the COND_EXPR. Then return
3232 so we don't get into an infinite recursion loop taking the
3233 conversion out and then back in. */
3234
3235 if ((code == NOP_EXPR || code == CONVERT_EXPR
3236 || code == NON_LVALUE_EXPR)
3237 && TREE_CODE (t) == COND_EXPR
3238 && TREE_CODE (TREE_OPERAND (t, 1)) == code
3239 && TREE_CODE (TREE_OPERAND (t, 2)) == code
3240 && (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0))
3241 == TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 2), 0)))
3242 && ! (INTEGRAL_TYPE_P (TREE_TYPE (t))
3243 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0)))
3244 && TYPE_PRECISION (TREE_TYPE (t)) <= BITS_PER_WORD))
3245 t = build1 (code, type,
3246 build (COND_EXPR,
3247 TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0)),
3248 TREE_OPERAND (t, 0),
3249 TREE_OPERAND (TREE_OPERAND (t, 1), 0),
3250 TREE_OPERAND (TREE_OPERAND (t, 2), 0)));
3251 return t;
3252 }
3253 else if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<')
3254 return fold (build (COND_EXPR, type, arg0,
3255 fold (build1 (code, type, integer_one_node)),
3256 fold (build1 (code, type, integer_zero_node))));
3257 }
3258 else if (TREE_CODE_CLASS (code) == '2'
3259 || TREE_CODE_CLASS (code) == '<')
3260 {
3261 if (TREE_CODE (arg1) == COMPOUND_EXPR)
3262 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0),
3263 fold (build (code, type,
3264 arg0, TREE_OPERAND (arg1, 1))));
3265 else if (TREE_CODE (arg1) == COND_EXPR
3266 || TREE_CODE_CLASS (TREE_CODE (arg1)) == '<')
3267 {
3268 tree test, true_value, false_value;
3269
3270 if (TREE_CODE (arg1) == COND_EXPR)
3271 {
3272 test = TREE_OPERAND (arg1, 0);
3273 true_value = TREE_OPERAND (arg1, 1);
3274 false_value = TREE_OPERAND (arg1, 2);
3275 }
3276 else
3277 {
3278 tree testtype = TREE_TYPE (arg1);
3279 test = arg1;
3280 true_value = convert (testtype, integer_one_node);
3281 false_value = convert (testtype, integer_zero_node);
3282 }
3283
3284 /* If ARG0 is complex we want to make sure we only evaluate
3285 it once. Though this is only required if it is volatile, it
3286 might be more efficient even if it is not. However, if we
3287 succeed in folding one part to a constant, we do not need
3288 to make this SAVE_EXPR. Since we do this optimization
3289 primarily to see if we do end up with constant and this
3290 SAVE_EXPR interferes with later optimizations, suppressing
3291 it when we can is important. */
3292
3293 if (TREE_CODE (arg0) != SAVE_EXPR
3294 && ((TREE_CODE (arg0) != VAR_DECL
3295 && TREE_CODE (arg0) != PARM_DECL)
3296 || TREE_SIDE_EFFECTS (arg0)))
3297 {
3298 tree lhs = fold (build (code, type, arg0, true_value));
3299 tree rhs = fold (build (code, type, arg0, false_value));
3300
3301 if (TREE_CONSTANT (lhs) || TREE_CONSTANT (rhs))
3302 return fold (build (COND_EXPR, type, test, lhs, rhs));
3303
3304 arg0 = save_expr (arg0);
3305 }
3306
3307 test = fold (build (COND_EXPR, type, test,
3308 fold (build (code, type, arg0, true_value)),
3309 fold (build (code, type, arg0, false_value))));
3310 if (TREE_CODE (arg0) == SAVE_EXPR)
3311 return build (COMPOUND_EXPR, type,
3312 convert (void_type_node, arg0),
3313 strip_compound_expr (test, arg0));
3314 else
3315 return convert (type, test);
3316 }
3317
3318 else if (TREE_CODE (arg0) == COMPOUND_EXPR)
3319 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3320 fold (build (code, type, TREE_OPERAND (arg0, 1), arg1)));
3321 else if (TREE_CODE (arg0) == COND_EXPR
3322 || TREE_CODE_CLASS (TREE_CODE (arg0)) == '<')
3323 {
3324 tree test, true_value, false_value;
3325
3326 if (TREE_CODE (arg0) == COND_EXPR)
3327 {
3328 test = TREE_OPERAND (arg0, 0);
3329 true_value = TREE_OPERAND (arg0, 1);
3330 false_value = TREE_OPERAND (arg0, 2);
3331 }
3332 else
3333 {
3334 tree testtype = TREE_TYPE (arg0);
3335 test = arg0;
3336 true_value = convert (testtype, integer_one_node);
3337 false_value = convert (testtype, integer_zero_node);
3338 }
3339
3340 if (TREE_CODE (arg1) != SAVE_EXPR
3341 && ((TREE_CODE (arg1) != VAR_DECL
3342 && TREE_CODE (arg1) != PARM_DECL)
3343 || TREE_SIDE_EFFECTS (arg1)))
3344 {
3345 tree lhs = fold (build (code, type, true_value, arg1));
3346 tree rhs = fold (build (code, type, false_value, arg1));
3347
3348 if (TREE_CONSTANT (lhs) || TREE_CONSTANT (rhs)
3349 || TREE_CONSTANT (arg1))
3350 return fold (build (COND_EXPR, type, test, lhs, rhs));
3351
3352 arg1 = save_expr (arg1);
3353 }
3354
3355 test = fold (build (COND_EXPR, type, test,
3356 fold (build (code, type, true_value, arg1)),
3357 fold (build (code, type, false_value, arg1))));
3358 if (TREE_CODE (arg1) == SAVE_EXPR)
3359 return build (COMPOUND_EXPR, type,
3360 convert (void_type_node, arg1),
3361 strip_compound_expr (test, arg1));
3362 else
3363 return convert (type, test);
3364 }
3365 }
3366 else if (TREE_CODE_CLASS (code) == '<'
3367 && TREE_CODE (arg0) == COMPOUND_EXPR)
3368 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3369 fold (build (code, type, TREE_OPERAND (arg0, 1), arg1)));
3370 else if (TREE_CODE_CLASS (code) == '<'
3371 && TREE_CODE (arg1) == COMPOUND_EXPR)
3372 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0),
3373 fold (build (code, type, arg0, TREE_OPERAND (arg1, 1))));
3374
3375 switch (code)
3376 {
3377 case INTEGER_CST:
3378 case REAL_CST:
3379 case STRING_CST:
3380 case COMPLEX_CST:
3381 case CONSTRUCTOR:
3382 return t;
3383
3384 case CONST_DECL:
3385 return fold (DECL_INITIAL (t));
3386
3387 case NOP_EXPR:
3388 case FLOAT_EXPR:
3389 case CONVERT_EXPR:
3390 case FIX_TRUNC_EXPR:
3391 /* Other kinds of FIX are not handled properly by fold_convert. */
3392
3393 if (TREE_TYPE (TREE_OPERAND (t, 0)) == TREE_TYPE (t))
3394 return TREE_OPERAND (t, 0);
3395
3396 /* Handle cases of two conversions in a row. */
3397 if (TREE_CODE (TREE_OPERAND (t, 0)) == NOP_EXPR
3398 || TREE_CODE (TREE_OPERAND (t, 0)) == CONVERT_EXPR)
3399 {
3400 tree inside_type = TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0));
3401 tree inter_type = TREE_TYPE (TREE_OPERAND (t, 0));
3402 tree final_type = TREE_TYPE (t);
3403 int inside_int = INTEGRAL_TYPE_P (inside_type);
3404 int inside_ptr = POINTER_TYPE_P (inside_type);
3405 int inside_float = FLOAT_TYPE_P (inside_type);
3406 int inside_prec = TYPE_PRECISION (inside_type);
3407 int inside_unsignedp = TREE_UNSIGNED (inside_type);
3408 int inter_int = INTEGRAL_TYPE_P (inter_type);
3409 int inter_ptr = POINTER_TYPE_P (inter_type);
3410 int inter_float = FLOAT_TYPE_P (inter_type);
3411 int inter_prec = TYPE_PRECISION (inter_type);
3412 int inter_unsignedp = TREE_UNSIGNED (inter_type);
3413 int final_int = INTEGRAL_TYPE_P (final_type);
3414 int final_ptr = POINTER_TYPE_P (final_type);
3415 int final_float = FLOAT_TYPE_P (final_type);
3416 int final_prec = TYPE_PRECISION (final_type);
3417 int final_unsignedp = TREE_UNSIGNED (final_type);
3418
3419 /* In addition to the cases of two conversions in a row
3420 handled below, if we are converting something to its own
3421 type via an object of identical or wider precision, neither
3422 conversion is needed. */
3423 if (inside_type == final_type
3424 && ((inter_int && final_int) || (inter_float && final_float))
3425 && inter_prec >= final_prec)
3426 return TREE_OPERAND (TREE_OPERAND (t, 0), 0);
3427
3428 /* Likewise, if the intermediate and final types are either both
3429 float or both integer, we don't need the middle conversion if
3430 it is wider than the final type and doesn't change the signedness
3431 (for integers). Avoid this if the final type is a pointer
3432 since then we sometimes need the inner conversion. */
3433 if ((((inter_int || inter_ptr) && (inside_int || inside_ptr))
3434 || (inter_float && inside_float))
3435 && inter_prec >= inside_prec
3436 && (inter_float || inter_unsignedp == inside_unsignedp)
3437 && ! final_ptr)
3438 return convert (final_type, TREE_OPERAND (TREE_OPERAND (t, 0), 0));
3439
3440 /* Two conversions in a row are not needed unless:
3441 - some conversion is floating-point (overstrict for now), or
3442 - the intermediate type is narrower than both initial and
3443 final, or
3444 - the intermediate type and innermost type differ in signedness,
3445 and the outermost type is wider than the intermediate, or
3446 - the initial type is a pointer type and the precisions of the
3447 intermediate and final types differ, or
3448 - the final type is a pointer type and the precisions of the
3449 initial and intermediate types differ. */
3450 if (! inside_float && ! inter_float && ! final_float
3451 && (inter_prec > inside_prec || inter_prec > final_prec)
3452 && ! (inside_int && inter_int
3453 && inter_unsignedp != inside_unsignedp
3454 && inter_prec < final_prec)
3455 && ((inter_unsignedp && inter_prec > inside_prec)
3456 == (final_unsignedp && final_prec > inter_prec))
3457 && ! (inside_ptr && inter_prec != final_prec)
3458 && ! (final_ptr && inside_prec != inter_prec))
3459 return convert (final_type, TREE_OPERAND (TREE_OPERAND (t, 0), 0));
3460 }
3461
3462 if (TREE_CODE (TREE_OPERAND (t, 0)) == MODIFY_EXPR
3463 && TREE_CONSTANT (TREE_OPERAND (TREE_OPERAND (t, 0), 1))
3464 /* Detect assigning a bitfield. */
3465 && !(TREE_CODE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)) == COMPONENT_REF
3466 && DECL_BIT_FIELD (TREE_OPERAND (TREE_OPERAND (TREE_OPERAND (t, 0), 0), 1))))
3467 {
3468 /* Don't leave an assignment inside a conversion
3469 unless assigning a bitfield. */
3470 tree prev = TREE_OPERAND (t, 0);
3471 TREE_OPERAND (t, 0) = TREE_OPERAND (prev, 1);
3472 /* First do the assignment, then return converted constant. */
3473 t = build (COMPOUND_EXPR, TREE_TYPE (t), prev, fold (t));
3474 TREE_USED (t) = 1;
3475 return t;
3476 }
3477 if (!wins)
3478 {
3479 TREE_CONSTANT (t) = TREE_CONSTANT (arg0);
3480 return t;
3481 }
3482 return fold_convert (t, arg0);
3483
3484 #if 0 /* This loses on &"foo"[0]. */
3485 case ARRAY_REF:
3486 {
3487 int i;
3488
3489 /* Fold an expression like: "foo"[2] */
3490 if (TREE_CODE (arg0) == STRING_CST
3491 && TREE_CODE (arg1) == INTEGER_CST
3492 && !TREE_INT_CST_HIGH (arg1)
3493 && (i = TREE_INT_CST_LOW (arg1)) < TREE_STRING_LENGTH (arg0))
3494 {
3495 t = build_int_2 (TREE_STRING_POINTER (arg0)[i], 0);
3496 TREE_TYPE (t) = TREE_TYPE (TREE_TYPE (arg0));
3497 force_fit_type (t, 0);
3498 }
3499 }
3500 return t;
3501 #endif /* 0 */
3502
3503 case COMPONENT_REF:
3504 if (TREE_CODE (arg0) == CONSTRUCTOR)
3505 {
3506 tree m = purpose_member (arg1, CONSTRUCTOR_ELTS (arg0));
3507 if (m)
3508 t = TREE_VALUE (m);
3509 }
3510 return t;
3511
3512 case RANGE_EXPR:
3513 TREE_CONSTANT (t) = wins;
3514 return t;
3515
3516 case NEGATE_EXPR:
3517 if (wins)
3518 {
3519 if (TREE_CODE (arg0) == INTEGER_CST)
3520 {
3521 HOST_WIDE_INT low, high;
3522 int overflow = neg_double (TREE_INT_CST_LOW (arg0),
3523 TREE_INT_CST_HIGH (arg0),
3524 &low, &high);
3525 t = build_int_2 (low, high);
3526 TREE_TYPE (t) = type;
3527 TREE_OVERFLOW (t)
3528 = (TREE_OVERFLOW (arg0)
3529 | force_fit_type (t, overflow));
3530 TREE_CONSTANT_OVERFLOW (t)
3531 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg0);
3532 }
3533 else if (TREE_CODE (arg0) == REAL_CST)
3534 t = build_real (type, REAL_VALUE_NEGATE (TREE_REAL_CST (arg0)));
3535 TREE_TYPE (t) = type;
3536 }
3537 else if (TREE_CODE (arg0) == NEGATE_EXPR)
3538 return TREE_OPERAND (arg0, 0);
3539
3540 /* Convert - (a - b) to (b - a) for non-floating-point. */
3541 else if (TREE_CODE (arg0) == MINUS_EXPR && ! FLOAT_TYPE_P (type))
3542 return build (MINUS_EXPR, type, TREE_OPERAND (arg0, 1),
3543 TREE_OPERAND (arg0, 0));
3544
3545 return t;
3546
3547 case ABS_EXPR:
3548 if (wins)
3549 {
3550 if (TREE_CODE (arg0) == INTEGER_CST)
3551 {
3552 if (! TREE_UNSIGNED (type)
3553 && TREE_INT_CST_HIGH (arg0) < 0)
3554 {
3555 HOST_WIDE_INT low, high;
3556 int overflow = neg_double (TREE_INT_CST_LOW (arg0),
3557 TREE_INT_CST_HIGH (arg0),
3558 &low, &high);
3559 t = build_int_2 (low, high);
3560 TREE_TYPE (t) = type;
3561 TREE_OVERFLOW (t)
3562 = (TREE_OVERFLOW (arg0)
3563 | force_fit_type (t, overflow));
3564 TREE_CONSTANT_OVERFLOW (t)
3565 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg0);
3566 }
3567 }
3568 else if (TREE_CODE (arg0) == REAL_CST)
3569 {
3570 if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (arg0)))
3571 t = build_real (type,
3572 REAL_VALUE_NEGATE (TREE_REAL_CST (arg0)));
3573 }
3574 TREE_TYPE (t) = type;
3575 }
3576 else if (TREE_CODE (arg0) == ABS_EXPR || TREE_CODE (arg0) == NEGATE_EXPR)
3577 return build1 (ABS_EXPR, type, TREE_OPERAND (arg0, 0));
3578 return t;
3579
3580 case CONJ_EXPR:
3581 if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
3582 return arg0;
3583 else if (TREE_CODE (arg0) == COMPLEX_EXPR)
3584 return build (COMPLEX_EXPR, TREE_TYPE (arg0),
3585 TREE_OPERAND (arg0, 0),
3586 fold (build1 (NEGATE_EXPR,
3587 TREE_TYPE (TREE_TYPE (arg0)),
3588 TREE_OPERAND (arg0, 1))));
3589 else if (TREE_CODE (arg0) == COMPLEX_CST)
3590 return build_complex (TREE_OPERAND (arg0, 0),
3591 fold (build1 (NEGATE_EXPR,
3592 TREE_TYPE (TREE_TYPE (arg0)),
3593 TREE_OPERAND (arg0, 1))));
3594 else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
3595 return fold (build (TREE_CODE (arg0), type,
3596 fold (build1 (CONJ_EXPR, type,
3597 TREE_OPERAND (arg0, 0))),
3598 fold (build1 (CONJ_EXPR,
3599 type, TREE_OPERAND (arg0, 1)))));
3600 else if (TREE_CODE (arg0) == CONJ_EXPR)
3601 return TREE_OPERAND (arg0, 0);
3602 return t;
3603
3604 case BIT_NOT_EXPR:
3605 if (wins)
3606 {
3607 if (TREE_CODE (arg0) == INTEGER_CST)
3608 t = build_int_2 (~ TREE_INT_CST_LOW (arg0),
3609 ~ TREE_INT_CST_HIGH (arg0));
3610 TREE_TYPE (t) = type;
3611 force_fit_type (t, 0);
3612 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg0);
3613 TREE_CONSTANT_OVERFLOW (t) = TREE_CONSTANT_OVERFLOW (arg0);
3614 }
3615 else if (TREE_CODE (arg0) == BIT_NOT_EXPR)
3616 return TREE_OPERAND (arg0, 0);
3617 return t;
3618
3619 case PLUS_EXPR:
3620 /* A + (-B) -> A - B */
3621 if (TREE_CODE (arg1) == NEGATE_EXPR)
3622 return fold (build (MINUS_EXPR, type, arg0, TREE_OPERAND (arg1, 0)));
3623 else if (! FLOAT_TYPE_P (type))
3624 {
3625 if (integer_zerop (arg1))
3626 return non_lvalue (convert (type, arg0));
3627
3628 /* If we are adding two BIT_AND_EXPR's, both of which are and'ing
3629 with a constant, and the two constants have no bits in common,
3630 we should treat this as a BIT_IOR_EXPR since this may produce more
3631 simplifications. */
3632 if (TREE_CODE (arg0) == BIT_AND_EXPR
3633 && TREE_CODE (arg1) == BIT_AND_EXPR
3634 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
3635 && TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST
3636 && integer_zerop (const_binop (BIT_AND_EXPR,
3637 TREE_OPERAND (arg0, 1),
3638 TREE_OPERAND (arg1, 1), 0)))
3639 {
3640 code = BIT_IOR_EXPR;
3641 goto bit_ior;
3642 }
3643
3644 /* (A * C) + (B * C) -> (A+B) * C. Since we are most concerned
3645 about the case where C is a constant, just try one of the
3646 four possibilities. */
3647
3648 if (TREE_CODE (arg0) == MULT_EXPR && TREE_CODE (arg1) == MULT_EXPR
3649 && operand_equal_p (TREE_OPERAND (arg0, 1),
3650 TREE_OPERAND (arg1, 1), 0))
3651 return fold (build (MULT_EXPR, type,
3652 fold (build (PLUS_EXPR, type,
3653 TREE_OPERAND (arg0, 0),
3654 TREE_OPERAND (arg1, 0))),
3655 TREE_OPERAND (arg0, 1)));
3656 }
3657 /* In IEEE floating point, x+0 may not equal x. */
3658 else if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3659 || flag_fast_math)
3660 && real_zerop (arg1))
3661 return non_lvalue (convert (type, arg0));
3662 associate:
3663 /* In most languages, can't associate operations on floats
3664 through parentheses. Rather than remember where the parentheses
3665 were, we don't associate floats at all. It shouldn't matter much.
3666 However, associating multiplications is only very slightly
3667 inaccurate, so do that if -ffast-math is specified. */
3668 if (FLOAT_TYPE_P (type)
3669 && ! (flag_fast_math && code == MULT_EXPR))
3670 goto binary;
3671
3672 /* The varsign == -1 cases happen only for addition and subtraction.
3673 It says that the arg that was split was really CON minus VAR.
3674 The rest of the code applies to all associative operations. */
3675 if (!wins)
3676 {
3677 tree var, con;
3678 int varsign;
3679
3680 if (split_tree (arg0, code, &var, &con, &varsign))
3681 {
3682 if (varsign == -1)
3683 {
3684 /* EXPR is (CON-VAR) +- ARG1. */
3685 /* If it is + and VAR==ARG1, return just CONST. */
3686 if (code == PLUS_EXPR && operand_equal_p (var, arg1, 0))
3687 return convert (TREE_TYPE (t), con);
3688
3689 /* If ARG0 is a constant, don't change things around;
3690 instead keep all the constant computations together. */
3691
3692 if (TREE_CONSTANT (arg0))
3693 return t;
3694
3695 /* Otherwise return (CON +- ARG1) - VAR. */
3696 t = build (MINUS_EXPR, type,
3697 fold (build (code, type, con, arg1)), var);
3698 }
3699 else
3700 {
3701 /* EXPR is (VAR+CON) +- ARG1. */
3702 /* If it is - and VAR==ARG1, return just CONST. */
3703 if (code == MINUS_EXPR && operand_equal_p (var, arg1, 0))
3704 return convert (TREE_TYPE (t), con);
3705
3706 /* If ARG0 is a constant, don't change things around;
3707 instead keep all the constant computations together. */
3708
3709 if (TREE_CONSTANT (arg0))
3710 return t;
3711
3712 /* Otherwise return VAR +- (ARG1 +- CON). */
3713 tem = fold (build (code, type, arg1, con));
3714 t = build (code, type, var, tem);
3715
3716 if (integer_zerop (tem)
3717 && (code == PLUS_EXPR || code == MINUS_EXPR))
3718 return convert (type, var);
3719 /* If we have x +/- (c - d) [c an explicit integer]
3720 change it to x -/+ (d - c) since if d is relocatable
3721 then the latter can be a single immediate insn
3722 and the former cannot. */
3723 if (TREE_CODE (tem) == MINUS_EXPR
3724 && TREE_CODE (TREE_OPERAND (tem, 0)) == INTEGER_CST)
3725 {
3726 tree tem1 = TREE_OPERAND (tem, 1);
3727 TREE_OPERAND (tem, 1) = TREE_OPERAND (tem, 0);
3728 TREE_OPERAND (tem, 0) = tem1;
3729 TREE_SET_CODE (t,
3730 (code == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR));
3731 }
3732 }
3733 return t;
3734 }
3735
3736 if (split_tree (arg1, code, &var, &con, &varsign))
3737 {
3738 if (TREE_CONSTANT (arg1))
3739 return t;
3740
3741 if (varsign == -1)
3742 TREE_SET_CODE (t,
3743 (code == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR));
3744
3745 /* EXPR is ARG0 +- (CON +- VAR). */
3746 if (TREE_CODE (t) == MINUS_EXPR
3747 && operand_equal_p (var, arg0, 0))
3748 {
3749 /* If VAR and ARG0 cancel, return just CON or -CON. */
3750 if (code == PLUS_EXPR)
3751 return convert (TREE_TYPE (t), con);
3752 return fold (build1 (NEGATE_EXPR, TREE_TYPE (t),
3753 convert (TREE_TYPE (t), con)));
3754 }
3755
3756 t = build (TREE_CODE (t), type,
3757 fold (build (code, TREE_TYPE (t), arg0, con)), var);
3758
3759 if (integer_zerop (TREE_OPERAND (t, 0))
3760 && TREE_CODE (t) == PLUS_EXPR)
3761 return convert (TREE_TYPE (t), var);
3762 return t;
3763 }
3764 }
3765 binary:
3766 #if defined (REAL_IS_NOT_DOUBLE) && ! defined (REAL_ARITHMETIC)
3767 if (TREE_CODE (arg1) == REAL_CST)
3768 return t;
3769 #endif /* REAL_IS_NOT_DOUBLE, and no REAL_ARITHMETIC */
3770 if (wins)
3771 t1 = const_binop (code, arg0, arg1, 0);
3772 if (t1 != NULL_TREE)
3773 {
3774 /* The return value should always have
3775 the same type as the original expression. */
3776 TREE_TYPE (t1) = TREE_TYPE (t);
3777 return t1;
3778 }
3779 return t;
3780
3781 case MINUS_EXPR:
3782 if (! FLOAT_TYPE_P (type))
3783 {
3784 if (! wins && integer_zerop (arg0))
3785 return build1 (NEGATE_EXPR, type, arg1);
3786 if (integer_zerop (arg1))
3787 return non_lvalue (convert (type, arg0));
3788
3789 /* (A * C) - (B * C) -> (A-B) * C. Since we are most concerned
3790 about the case where C is a constant, just try one of the
3791 four possibilities. */
3792
3793 if (TREE_CODE (arg0) == MULT_EXPR && TREE_CODE (arg1) == MULT_EXPR
3794 && operand_equal_p (TREE_OPERAND (arg0, 1),
3795 TREE_OPERAND (arg1, 1), 0))
3796 return fold (build (MULT_EXPR, type,
3797 fold (build (MINUS_EXPR, type,
3798 TREE_OPERAND (arg0, 0),
3799 TREE_OPERAND (arg1, 0))),
3800 TREE_OPERAND (arg0, 1)));
3801 }
3802 /* Convert A - (-B) to A + B. */
3803 else if (TREE_CODE (arg1) == NEGATE_EXPR)
3804 return fold (build (PLUS_EXPR, type, arg0, TREE_OPERAND (arg1, 0)));
3805
3806 else if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3807 || flag_fast_math)
3808 {
3809 /* Except with IEEE floating point, 0-x equals -x. */
3810 if (! wins && real_zerop (arg0))
3811 return build1 (NEGATE_EXPR, type, arg1);
3812 /* Except with IEEE floating point, x-0 equals x. */
3813 if (real_zerop (arg1))
3814 return non_lvalue (convert (type, arg0));
3815 }
3816
3817 /* Fold &x - &x. This can happen from &x.foo - &x.
3818 This is unsafe for certain floats even in non-IEEE formats.
3819 In IEEE, it is unsafe because it does wrong for NaNs.
3820 Also note that operand_equal_p is always false if an operand
3821 is volatile. */
3822
3823 if ((! FLOAT_TYPE_P (type) || flag_fast_math)
3824 && operand_equal_p (arg0, arg1, 0))
3825 return convert (type, integer_zero_node);
3826
3827 goto associate;
3828
3829 case MULT_EXPR:
3830 if (! FLOAT_TYPE_P (type))
3831 {
3832 if (integer_zerop (arg1))
3833 return omit_one_operand (type, arg1, arg0);
3834 if (integer_onep (arg1))
3835 return non_lvalue (convert (type, arg0));
3836
3837 /* ((A / C) * C) is A if the division is an
3838 EXACT_DIV_EXPR. Since C is normally a constant,
3839 just check for one of the four possibilities. */
3840
3841 if (TREE_CODE (arg0) == EXACT_DIV_EXPR
3842 && operand_equal_p (TREE_OPERAND (arg0, 1), arg1, 0))
3843 return TREE_OPERAND (arg0, 0);
3844
3845 /* (a * (1 << b)) is (a << b) */
3846 if (TREE_CODE (arg1) == LSHIFT_EXPR
3847 && integer_onep (TREE_OPERAND (arg1, 0)))
3848 return fold (build (LSHIFT_EXPR, type, arg0,
3849 TREE_OPERAND (arg1, 1)));
3850 if (TREE_CODE (arg0) == LSHIFT_EXPR
3851 && integer_onep (TREE_OPERAND (arg0, 0)))
3852 return fold (build (LSHIFT_EXPR, type, arg1,
3853 TREE_OPERAND (arg0, 1)));
3854 }
3855 else
3856 {
3857 /* x*0 is 0, except for IEEE floating point. */
3858 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3859 || flag_fast_math)
3860 && real_zerop (arg1))
3861 return omit_one_operand (type, arg1, arg0);
3862 /* In IEEE floating point, x*1 is not equivalent to x for snans.
3863 However, ANSI says we can drop signals,
3864 so we can do this anyway. */
3865 if (real_onep (arg1))
3866 return non_lvalue (convert (type, arg0));
3867 /* x*2 is x+x */
3868 if (! wins && real_twop (arg1))
3869 {
3870 tree arg = save_expr (arg0);
3871 return build (PLUS_EXPR, type, arg, arg);
3872 }
3873 }
3874 goto associate;
3875
3876 case BIT_IOR_EXPR:
3877 bit_ior:
3878 if (integer_all_onesp (arg1))
3879 return omit_one_operand (type, arg1, arg0);
3880 if (integer_zerop (arg1))
3881 return non_lvalue (convert (type, arg0));
3882 t1 = distribute_bit_expr (code, type, arg0, arg1);
3883 if (t1 != NULL_TREE)
3884 return t1;
3885
3886 /* (a << C1) | (a >> C2) if A is unsigned and C1+C2 is the size of A
3887 is a rotate of A by C1 bits. */
3888
3889 if ((TREE_CODE (arg0) == RSHIFT_EXPR
3890 || TREE_CODE (arg0) == LSHIFT_EXPR)
3891 && (TREE_CODE (arg1) == RSHIFT_EXPR
3892 || TREE_CODE (arg1) == LSHIFT_EXPR)
3893 && TREE_CODE (arg0) != TREE_CODE (arg1)
3894 && operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1,0), 0)
3895 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0)))
3896 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
3897 && TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST
3898 && TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == 0
3899 && TREE_INT_CST_HIGH (TREE_OPERAND (arg1, 1)) == 0
3900 && ((TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1))
3901 + TREE_INT_CST_LOW (TREE_OPERAND (arg1, 1)))
3902 == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)))))
3903 return build (LROTATE_EXPR, type, TREE_OPERAND (arg0, 0),
3904 TREE_CODE (arg0) == LSHIFT_EXPR
3905 ? TREE_OPERAND (arg0, 1) : TREE_OPERAND (arg1, 1));
3906
3907 goto associate;
3908
3909 case BIT_XOR_EXPR:
3910 if (integer_zerop (arg1))
3911 return non_lvalue (convert (type, arg0));
3912 if (integer_all_onesp (arg1))
3913 return fold (build1 (BIT_NOT_EXPR, type, arg0));
3914 goto associate;
3915
3916 case BIT_AND_EXPR:
3917 bit_and:
3918 if (integer_all_onesp (arg1))
3919 return non_lvalue (convert (type, arg0));
3920 if (integer_zerop (arg1))
3921 return omit_one_operand (type, arg1, arg0);
3922 t1 = distribute_bit_expr (code, type, arg0, arg1);
3923 if (t1 != NULL_TREE)
3924 return t1;
3925 /* Simplify ((int)c & 0x377) into (int)c, if c is unsigned char. */
3926 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == NOP_EXPR
3927 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg1, 0))))
3928 {
3929 int prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg1, 0)));
3930 if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT
3931 && (~TREE_INT_CST_LOW (arg0)
3932 & (((HOST_WIDE_INT) 1 << prec) - 1)) == 0)
3933 return build1 (NOP_EXPR, type, TREE_OPERAND (arg1, 0));
3934 }
3935 if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) == NOP_EXPR
3936 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0))))
3937 {
3938 int prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)));
3939 if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT
3940 && (~TREE_INT_CST_LOW (arg1)
3941 & (((HOST_WIDE_INT) 1 << prec) - 1)) == 0)
3942 return build1 (NOP_EXPR, type, TREE_OPERAND (arg0, 0));
3943 }
3944 goto associate;
3945
3946 case BIT_ANDTC_EXPR:
3947 if (integer_all_onesp (arg0))
3948 return non_lvalue (convert (type, arg1));
3949 if (integer_zerop (arg0))
3950 return omit_one_operand (type, arg0, arg1);
3951 if (TREE_CODE (arg1) == INTEGER_CST)
3952 {
3953 arg1 = fold (build1 (BIT_NOT_EXPR, type, arg1));
3954 code = BIT_AND_EXPR;
3955 goto bit_and;
3956 }
3957 goto binary;
3958
3959 case RDIV_EXPR:
3960 /* In most cases, do nothing with a divide by zero. */
3961 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3962 #ifndef REAL_INFINITY
3963 if (TREE_CODE (arg1) == REAL_CST && real_zerop (arg1))
3964 return t;
3965 #endif
3966 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3967
3968 /* In IEEE floating point, x/1 is not equivalent to x for snans.
3969 However, ANSI says we can drop signals, so we can do this anyway. */
3970 if (real_onep (arg1))
3971 return non_lvalue (convert (type, arg0));
3972
3973 /* If ARG1 is a constant, we can convert this to a multiply by the
3974 reciprocal. This does not have the same rounding properties,
3975 so only do this if -ffast-math. We can actually always safely
3976 do it if ARG1 is a power of two, but it's hard to tell if it is
3977 or not in a portable manner. */
3978 if (TREE_CODE (arg1) == REAL_CST && flag_fast_math
3979 && 0 != (tem = const_binop (code, build_real (type, dconst1),
3980 arg1, 0)))
3981 return fold (build (MULT_EXPR, type, arg0, tem));
3982
3983 goto binary;
3984
3985 case TRUNC_DIV_EXPR:
3986 case ROUND_DIV_EXPR:
3987 case FLOOR_DIV_EXPR:
3988 case CEIL_DIV_EXPR:
3989 case EXACT_DIV_EXPR:
3990 if (integer_onep (arg1))
3991 return non_lvalue (convert (type, arg0));
3992 if (integer_zerop (arg1))
3993 return t;
3994
3995 /* If we have ((a / C1) / C2) where both division are the same type, try
3996 to simplify. First see if C1 * C2 overflows or not. */
3997 if (TREE_CODE (arg0) == code && TREE_CODE (arg1) == INTEGER_CST
3998 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST)
3999 {
4000 tree new_divisor;
4001
4002 new_divisor = const_binop (MULT_EXPR, TREE_OPERAND (arg0, 1), arg1, 0);
4003 tem = const_binop (FLOOR_DIV_EXPR, new_divisor, arg1, 0);
4004
4005 if (TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1)) == TREE_INT_CST_LOW (tem)
4006 && TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == TREE_INT_CST_HIGH (tem))
4007 {
4008 /* If no overflow, divide by C1*C2. */
4009 return fold (build (code, type, TREE_OPERAND (arg0, 0), new_divisor));
4010 }
4011 }
4012
4013 /* Look for ((a * C1) / C3) or (((a * C1) + C2) / C3),
4014 where C1 % C3 == 0 or C3 % C1 == 0. We can simplify these
4015 expressions, which often appear in the offsets or sizes of
4016 objects with a varying size. Only deal with positive divisors
4017 and multiplicands. If C2 is negative, we must have C2 % C3 == 0.
4018
4019 Look for NOPs and SAVE_EXPRs inside. */
4020
4021 if (TREE_CODE (arg1) == INTEGER_CST
4022 && tree_int_cst_sgn (arg1) >= 0)
4023 {
4024 int have_save_expr = 0;
4025 tree c2 = integer_zero_node;
4026 tree xarg0 = arg0;
4027
4028 if (TREE_CODE (xarg0) == SAVE_EXPR)
4029 have_save_expr = 1, xarg0 = TREE_OPERAND (xarg0, 0);
4030
4031 STRIP_NOPS (xarg0);
4032
4033 if (TREE_CODE (xarg0) == PLUS_EXPR
4034 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST)
4035 c2 = TREE_OPERAND (xarg0, 1), xarg0 = TREE_OPERAND (xarg0, 0);
4036 else if (TREE_CODE (xarg0) == MINUS_EXPR
4037 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
4038 /* If we are doing this computation unsigned, the negate
4039 is incorrect. */
4040 && ! TREE_UNSIGNED (type))
4041 {
4042 c2 = fold (build1 (NEGATE_EXPR, type, TREE_OPERAND (xarg0, 1)));
4043 xarg0 = TREE_OPERAND (xarg0, 0);
4044 }
4045
4046 if (TREE_CODE (xarg0) == SAVE_EXPR)
4047 have_save_expr = 1, xarg0 = TREE_OPERAND (xarg0, 0);
4048
4049 STRIP_NOPS (xarg0);
4050
4051 if (TREE_CODE (xarg0) == MULT_EXPR
4052 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
4053 && tree_int_cst_sgn (TREE_OPERAND (xarg0, 1)) >= 0
4054 && (integer_zerop (const_binop (TRUNC_MOD_EXPR,
4055 TREE_OPERAND (xarg0, 1), arg1, 1))
4056 || integer_zerop (const_binop (TRUNC_MOD_EXPR, arg1,
4057 TREE_OPERAND (xarg0, 1), 1)))
4058 && (tree_int_cst_sgn (c2) >= 0
4059 || integer_zerop (const_binop (TRUNC_MOD_EXPR, c2,
4060 arg1, 1))))
4061 {
4062 tree outer_div = integer_one_node;
4063 tree c1 = TREE_OPERAND (xarg0, 1);
4064 tree c3 = arg1;
4065
4066 /* If C3 > C1, set them equal and do a divide by
4067 C3/C1 at the end of the operation. */
4068 if (tree_int_cst_lt (c1, c3))
4069 outer_div = const_binop (code, c3, c1, 0), c3 = c1;
4070
4071 /* The result is A * (C1/C3) + (C2/C3). */
4072 t = fold (build (PLUS_EXPR, type,
4073 fold (build (MULT_EXPR, type,
4074 TREE_OPERAND (xarg0, 0),
4075 const_binop (code, c1, c3, 1))),
4076 const_binop (code, c2, c3, 1)));
4077
4078 if (! integer_onep (outer_div))
4079 t = fold (build (code, type, t, convert (type, outer_div)));
4080
4081 if (have_save_expr)
4082 t = save_expr (t);
4083
4084 return t;
4085 }
4086 }
4087
4088 goto binary;
4089
4090 case CEIL_MOD_EXPR:
4091 case FLOOR_MOD_EXPR:
4092 case ROUND_MOD_EXPR:
4093 case TRUNC_MOD_EXPR:
4094 if (integer_onep (arg1))
4095 return omit_one_operand (type, integer_zero_node, arg0);
4096 if (integer_zerop (arg1))
4097 return t;
4098
4099 /* Look for ((a * C1) % C3) or (((a * C1) + C2) % C3),
4100 where C1 % C3 == 0. Handle similarly to the division case,
4101 but don't bother with SAVE_EXPRs. */
4102
4103 if (TREE_CODE (arg1) == INTEGER_CST
4104 && ! integer_zerop (arg1))
4105 {
4106 tree c2 = integer_zero_node;
4107 tree xarg0 = arg0;
4108
4109 if (TREE_CODE (xarg0) == PLUS_EXPR
4110 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST)
4111 c2 = TREE_OPERAND (xarg0, 1), xarg0 = TREE_OPERAND (xarg0, 0);
4112 else if (TREE_CODE (xarg0) == MINUS_EXPR
4113 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
4114 && ! TREE_UNSIGNED (type))
4115 {
4116 c2 = fold (build1 (NEGATE_EXPR, type, TREE_OPERAND (xarg0, 1)));
4117 xarg0 = TREE_OPERAND (xarg0, 0);
4118 }
4119
4120 STRIP_NOPS (xarg0);
4121
4122 if (TREE_CODE (xarg0) == MULT_EXPR
4123 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
4124 && integer_zerop (const_binop (TRUNC_MOD_EXPR,
4125 TREE_OPERAND (xarg0, 1),
4126 arg1, 1))
4127 && tree_int_cst_sgn (c2) >= 0)
4128 /* The result is (C2%C3). */
4129 return omit_one_operand (type, const_binop (code, c2, arg1, 1),
4130 TREE_OPERAND (xarg0, 0));
4131 }
4132
4133 goto binary;
4134
4135 case LSHIFT_EXPR:
4136 case RSHIFT_EXPR:
4137 case LROTATE_EXPR:
4138 case RROTATE_EXPR:
4139 if (integer_zerop (arg1))
4140 return non_lvalue (convert (type, arg0));
4141 /* Since negative shift count is not well-defined,
4142 don't try to compute it in the compiler. */
4143 if (TREE_CODE (arg1) == INTEGER_CST && tree_int_cst_sgn (arg1) < 0)
4144 return t;
4145 /* Rewrite an LROTATE_EXPR by a constant into an
4146 RROTATE_EXPR by a new constant. */
4147 if (code == LROTATE_EXPR && TREE_CODE (arg1) == INTEGER_CST)
4148 {
4149 TREE_SET_CODE (t, RROTATE_EXPR);
4150 code = RROTATE_EXPR;
4151 TREE_OPERAND (t, 1) = arg1
4152 = const_binop
4153 (MINUS_EXPR,
4154 convert (TREE_TYPE (arg1),
4155 build_int_2 (GET_MODE_BITSIZE (TYPE_MODE (type)), 0)),
4156 arg1, 0);
4157 if (tree_int_cst_sgn (arg1) < 0)
4158 return t;
4159 }
4160
4161 /* If we have a rotate of a bit operation with the rotate count and
4162 the second operand of the bit operation both constant,
4163 permute the two operations. */
4164 if (code == RROTATE_EXPR && TREE_CODE (arg1) == INTEGER_CST
4165 && (TREE_CODE (arg0) == BIT_AND_EXPR
4166 || TREE_CODE (arg0) == BIT_ANDTC_EXPR
4167 || TREE_CODE (arg0) == BIT_IOR_EXPR
4168 || TREE_CODE (arg0) == BIT_XOR_EXPR)
4169 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST)
4170 return fold (build (TREE_CODE (arg0), type,
4171 fold (build (code, type,
4172 TREE_OPERAND (arg0, 0), arg1)),
4173 fold (build (code, type,
4174 TREE_OPERAND (arg0, 1), arg1))));
4175
4176 /* Two consecutive rotates adding up to the width of the mode can
4177 be ignored. */
4178 if (code == RROTATE_EXPR && TREE_CODE (arg1) == INTEGER_CST
4179 && TREE_CODE (arg0) == RROTATE_EXPR
4180 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
4181 && TREE_INT_CST_HIGH (arg1) == 0
4182 && TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == 0
4183 && ((TREE_INT_CST_LOW (arg1)
4184 + TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1)))
4185 == GET_MODE_BITSIZE (TYPE_MODE (type))))
4186 return TREE_OPERAND (arg0, 0);
4187
4188 goto binary;
4189
4190 case MIN_EXPR:
4191 if (operand_equal_p (arg0, arg1, 0))
4192 return arg0;
4193 if (INTEGRAL_TYPE_P (type)
4194 && operand_equal_p (arg1, TYPE_MIN_VALUE (type), 1))
4195 return omit_one_operand (type, arg1, arg0);
4196 goto associate;
4197
4198 case MAX_EXPR:
4199 if (operand_equal_p (arg0, arg1, 0))
4200 return arg0;
4201 if (INTEGRAL_TYPE_P (type)
4202 && operand_equal_p (arg1, TYPE_MAX_VALUE (type), 1))
4203 return omit_one_operand (type, arg1, arg0);
4204 goto associate;
4205
4206 case TRUTH_NOT_EXPR:
4207 /* Note that the operand of this must be an int
4208 and its values must be 0 or 1.
4209 ("true" is a fixed value perhaps depending on the language,
4210 but we don't handle values other than 1 correctly yet.) */
4211 tem = invert_truthvalue (arg0);
4212 /* Avoid infinite recursion. */
4213 if (TREE_CODE (tem) == TRUTH_NOT_EXPR)
4214 return t;
4215 return convert (type, tem);
4216
4217 case TRUTH_ANDIF_EXPR:
4218 /* Note that the operands of this must be ints
4219 and their values must be 0 or 1.
4220 ("true" is a fixed value perhaps depending on the language.) */
4221 /* If first arg is constant zero, return it. */
4222 if (integer_zerop (arg0))
4223 return arg0;
4224 case TRUTH_AND_EXPR:
4225 /* If either arg is constant true, drop it. */
4226 if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
4227 return non_lvalue (arg1);
4228 if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1))
4229 return non_lvalue (arg0);
4230 /* If second arg is constant zero, result is zero, but first arg
4231 must be evaluated. */
4232 if (integer_zerop (arg1))
4233 return omit_one_operand (type, arg1, arg0);
4234
4235 truth_andor:
4236 /* We only do these simplifications if we are optimizing. */
4237 if (!optimize)
4238 return t;
4239
4240 /* Check for things like (A || B) && (A || C). We can convert this
4241 to A || (B && C). Note that either operator can be any of the four
4242 truth and/or operations and the transformation will still be
4243 valid. Also note that we only care about order for the
4244 ANDIF and ORIF operators. */
4245 if (TREE_CODE (arg0) == TREE_CODE (arg1)
4246 && (TREE_CODE (arg0) == TRUTH_ANDIF_EXPR
4247 || TREE_CODE (arg0) == TRUTH_ORIF_EXPR
4248 || TREE_CODE (arg0) == TRUTH_AND_EXPR
4249 || TREE_CODE (arg0) == TRUTH_OR_EXPR))
4250 {
4251 tree a00 = TREE_OPERAND (arg0, 0);
4252 tree a01 = TREE_OPERAND (arg0, 1);
4253 tree a10 = TREE_OPERAND (arg1, 0);
4254 tree a11 = TREE_OPERAND (arg1, 1);
4255 int commutative = ((TREE_CODE (arg0) == TRUTH_OR_EXPR
4256 || TREE_CODE (arg0) == TRUTH_AND_EXPR)
4257 && (code == TRUTH_AND_EXPR
4258 || code == TRUTH_OR_EXPR));
4259
4260 if (operand_equal_p (a00, a10, 0))
4261 return fold (build (TREE_CODE (arg0), type, a00,
4262 fold (build (code, type, a01, a11))));
4263 else if (commutative && operand_equal_p (a00, a11, 0))
4264 return fold (build (TREE_CODE (arg0), type, a00,
4265 fold (build (code, type, a01, a10))));
4266 else if (commutative && operand_equal_p (a01, a10, 0))
4267 return fold (build (TREE_CODE (arg0), type, a01,
4268 fold (build (code, type, a00, a11))));
4269
4270 /* This case if tricky because we must either have commutative
4271 operators or else A10 must not have side-effects. */
4272
4273 else if ((commutative || ! TREE_SIDE_EFFECTS (a10))
4274 && operand_equal_p (a01, a11, 0))
4275 return fold (build (TREE_CODE (arg0), type,
4276 fold (build (code, type, a00, a10)),
4277 a01));
4278 }
4279
4280 /* Check for the possibility of merging component references. If our
4281 lhs is another similar operation, try to merge its rhs with our
4282 rhs. Then try to merge our lhs and rhs. */
4283 if (TREE_CODE (arg0) == code
4284 && 0 != (tem = fold_truthop (code, type,
4285 TREE_OPERAND (arg0, 1), arg1)))
4286 return fold (build (code, type, TREE_OPERAND (arg0, 0), tem));
4287
4288 if ((tem = fold_truthop (code, type, arg0, arg1)) != 0)
4289 return tem;
4290
4291 return t;
4292
4293 case TRUTH_ORIF_EXPR:
4294 /* Note that the operands of this must be ints
4295 and their values must be 0 or true.
4296 ("true" is a fixed value perhaps depending on the language.) */
4297 /* If first arg is constant true, return it. */
4298 if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
4299 return arg0;
4300 case TRUTH_OR_EXPR:
4301 /* If either arg is constant zero, drop it. */
4302 if (TREE_CODE (arg0) == INTEGER_CST && integer_zerop (arg0))
4303 return non_lvalue (arg1);
4304 if (TREE_CODE (arg1) == INTEGER_CST && integer_zerop (arg1))
4305 return non_lvalue (arg0);
4306 /* If second arg is constant true, result is true, but we must
4307 evaluate first arg. */
4308 if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1))
4309 return omit_one_operand (type, arg1, arg0);
4310 goto truth_andor;
4311
4312 case TRUTH_XOR_EXPR:
4313 /* If either arg is constant zero, drop it. */
4314 if (integer_zerop (arg0))
4315 return non_lvalue (arg1);
4316 if (integer_zerop (arg1))
4317 return non_lvalue (arg0);
4318 /* If either arg is constant true, this is a logical inversion. */
4319 if (integer_onep (arg0))
4320 return non_lvalue (invert_truthvalue (arg1));
4321 if (integer_onep (arg1))
4322 return non_lvalue (invert_truthvalue (arg0));
4323 return t;
4324
4325 case EQ_EXPR:
4326 case NE_EXPR:
4327 case LT_EXPR:
4328 case GT_EXPR:
4329 case LE_EXPR:
4330 case GE_EXPR:
4331 /* If one arg is a constant integer, put it last. */
4332 if (TREE_CODE (arg0) == INTEGER_CST
4333 && TREE_CODE (arg1) != INTEGER_CST)
4334 {
4335 TREE_OPERAND (t, 0) = arg1;
4336 TREE_OPERAND (t, 1) = arg0;
4337 arg0 = TREE_OPERAND (t, 0);
4338 arg1 = TREE_OPERAND (t, 1);
4339 code = swap_tree_comparison (code);
4340 TREE_SET_CODE (t, code);
4341 }
4342
4343 /* Convert foo++ == CONST into ++foo == CONST + INCR.
4344 First, see if one arg is constant; find the constant arg
4345 and the other one. */
4346 {
4347 tree constop = 0, varop;
4348 int constopnum = -1;
4349
4350 if (TREE_CONSTANT (arg1))
4351 constopnum = 1, constop = arg1, varop = arg0;
4352 if (TREE_CONSTANT (arg0))
4353 constopnum = 0, constop = arg0, varop = arg1;
4354
4355 if (constop && TREE_CODE (varop) == POSTINCREMENT_EXPR)
4356 {
4357 /* This optimization is invalid for ordered comparisons
4358 if CONST+INCR overflows or if foo+incr might overflow.
4359 This optimization is invalid for floating point due to rounding.
4360 For pointer types we assume overflow doesn't happen. */
4361 if (TREE_CODE (TREE_TYPE (varop)) == POINTER_TYPE
4362 || (! FLOAT_TYPE_P (TREE_TYPE (varop))
4363 && (code == EQ_EXPR || code == NE_EXPR)))
4364 {
4365 tree newconst
4366 = fold (build (PLUS_EXPR, TREE_TYPE (varop),
4367 constop, TREE_OPERAND (varop, 1)));
4368 TREE_SET_CODE (varop, PREINCREMENT_EXPR);
4369
4370 t = build (code, type, TREE_OPERAND (t, 0),
4371 TREE_OPERAND (t, 1));
4372 TREE_OPERAND (t, constopnum) = newconst;
4373 return t;
4374 }
4375 }
4376 else if (constop && TREE_CODE (varop) == POSTDECREMENT_EXPR)
4377 {
4378 if (TREE_CODE (TREE_TYPE (varop)) == POINTER_TYPE
4379 || (! FLOAT_TYPE_P (TREE_TYPE (varop))
4380 && (code == EQ_EXPR || code == NE_EXPR)))
4381 {
4382 tree newconst
4383 = fold (build (MINUS_EXPR, TREE_TYPE (varop),
4384 constop, TREE_OPERAND (varop, 1)));
4385 TREE_SET_CODE (varop, PREDECREMENT_EXPR);
4386 t = build (code, type, TREE_OPERAND (t, 0),
4387 TREE_OPERAND (t, 1));
4388 TREE_OPERAND (t, constopnum) = newconst;
4389 return t;
4390 }
4391 }
4392 }
4393
4394 /* Change X >= CST to X > (CST - 1) if CST is positive. */
4395 if (TREE_CODE (arg1) == INTEGER_CST
4396 && TREE_CODE (arg0) != INTEGER_CST
4397 && tree_int_cst_sgn (arg1) > 0)
4398 {
4399 switch (TREE_CODE (t))
4400 {
4401 case GE_EXPR:
4402 code = GT_EXPR;
4403 arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0);
4404 t = build (code, type, TREE_OPERAND (t, 0), arg1);
4405 break;
4406
4407 case LT_EXPR:
4408 code = LE_EXPR;
4409 arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0);
4410 t = build (code, type, TREE_OPERAND (t, 0), arg1);
4411 break;
4412 }
4413 }
4414
4415 /* If this is an EQ or NE comparison with zero and ARG0 is
4416 (1 << foo) & bar, convert it to (bar >> foo) & 1. Both require
4417 two operations, but the latter can be done in one less insn
4418 one machine that have only two-operand insns or on which a
4419 constant cannot be the first operand. */
4420 if (integer_zerop (arg1) && (code == EQ_EXPR || code == NE_EXPR)
4421 && TREE_CODE (arg0) == BIT_AND_EXPR)
4422 {
4423 if (TREE_CODE (TREE_OPERAND (arg0, 0)) == LSHIFT_EXPR
4424 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 0), 0)))
4425 return
4426 fold (build (code, type,
4427 build (BIT_AND_EXPR, TREE_TYPE (arg0),
4428 build (RSHIFT_EXPR,
4429 TREE_TYPE (TREE_OPERAND (arg0, 0)),
4430 TREE_OPERAND (arg0, 1),
4431 TREE_OPERAND (TREE_OPERAND (arg0, 0), 1)),
4432 convert (TREE_TYPE (arg0),
4433 integer_one_node)),
4434 arg1));
4435 else if (TREE_CODE (TREE_OPERAND (arg0, 1)) == LSHIFT_EXPR
4436 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 1), 0)))
4437 return
4438 fold (build (code, type,
4439 build (BIT_AND_EXPR, TREE_TYPE (arg0),
4440 build (RSHIFT_EXPR,
4441 TREE_TYPE (TREE_OPERAND (arg0, 1)),
4442 TREE_OPERAND (arg0, 0),
4443 TREE_OPERAND (TREE_OPERAND (arg0, 1), 1)),
4444 convert (TREE_TYPE (arg0),
4445 integer_one_node)),
4446 arg1));
4447 }
4448
4449 /* If this is an NE or EQ comparison of zero against the result of a
4450 signed MOD operation whose second operand is a power of 2, make
4451 the MOD operation unsigned since it is simpler and equivalent. */
4452 if ((code == NE_EXPR || code == EQ_EXPR)
4453 && integer_zerop (arg1)
4454 && ! TREE_UNSIGNED (TREE_TYPE (arg0))
4455 && (TREE_CODE (arg0) == TRUNC_MOD_EXPR
4456 || TREE_CODE (arg0) == CEIL_MOD_EXPR
4457 || TREE_CODE (arg0) == FLOOR_MOD_EXPR
4458 || TREE_CODE (arg0) == ROUND_MOD_EXPR)
4459 && integer_pow2p (TREE_OPERAND (arg0, 1)))
4460 {
4461 tree newtype = unsigned_type (TREE_TYPE (arg0));
4462 tree newmod = build (TREE_CODE (arg0), newtype,
4463 convert (newtype, TREE_OPERAND (arg0, 0)),
4464 convert (newtype, TREE_OPERAND (arg0, 1)));
4465
4466 return build (code, type, newmod, convert (newtype, arg1));
4467 }
4468
4469 /* If this is an NE comparison of zero with an AND of one, remove the
4470 comparison since the AND will give the correct value. */
4471 if (code == NE_EXPR && integer_zerop (arg1)
4472 && TREE_CODE (arg0) == BIT_AND_EXPR
4473 && integer_onep (TREE_OPERAND (arg0, 1)))
4474 return convert (type, arg0);
4475
4476 /* If we have (A & C) == C where C is a power of 2, convert this into
4477 (A & C) != 0. Similarly for NE_EXPR. */
4478 if ((code == EQ_EXPR || code == NE_EXPR)
4479 && TREE_CODE (arg0) == BIT_AND_EXPR
4480 && integer_pow2p (TREE_OPERAND (arg0, 1))
4481 && operand_equal_p (TREE_OPERAND (arg0, 1), arg1, 0))
4482 return build (code == EQ_EXPR ? NE_EXPR : EQ_EXPR, type,
4483 arg0, integer_zero_node);
4484
4485 /* If X is unsigned, convert X < (1 << Y) into X >> Y == 0
4486 and similarly for >= into !=. */
4487 if ((code == LT_EXPR || code == GE_EXPR)
4488 && TREE_UNSIGNED (TREE_TYPE (arg0))
4489 && TREE_CODE (arg1) == LSHIFT_EXPR
4490 && integer_onep (TREE_OPERAND (arg1, 0)))
4491 return build (code == LT_EXPR ? EQ_EXPR : NE_EXPR, type,
4492 build (RSHIFT_EXPR, TREE_TYPE (arg0), arg0,
4493 TREE_OPERAND (arg1, 1)),
4494 convert (TREE_TYPE (arg0), integer_zero_node));
4495
4496 else if ((code == LT_EXPR || code == GE_EXPR)
4497 && TREE_UNSIGNED (TREE_TYPE (arg0))
4498 && (TREE_CODE (arg1) == NOP_EXPR
4499 || TREE_CODE (arg1) == CONVERT_EXPR)
4500 && TREE_CODE (TREE_OPERAND (arg1, 0)) == LSHIFT_EXPR
4501 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg1, 0), 0)))
4502 return
4503 build (code == LT_EXPR ? EQ_EXPR : NE_EXPR, type,
4504 convert (TREE_TYPE (arg0),
4505 build (RSHIFT_EXPR, TREE_TYPE (arg0), arg0,
4506 TREE_OPERAND (TREE_OPERAND (arg1, 0), 1))),
4507 convert (TREE_TYPE (arg0), integer_zero_node));
4508
4509 /* Simplify comparison of something with itself. (For IEEE
4510 floating-point, we can only do some of these simplifications.) */
4511 if (operand_equal_p (arg0, arg1, 0))
4512 {
4513 switch (code)
4514 {
4515 case EQ_EXPR:
4516 case GE_EXPR:
4517 case LE_EXPR:
4518 if (INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
4519 {
4520 t = build_int_2 (1, 0);
4521 TREE_TYPE (t) = type;
4522 return t;
4523 }
4524 code = EQ_EXPR;
4525 TREE_SET_CODE (t, code);
4526 break;
4527
4528 case NE_EXPR:
4529 /* For NE, we can only do this simplification if integer. */
4530 if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
4531 break;
4532 /* ... fall through ... */
4533 case GT_EXPR:
4534 case LT_EXPR:
4535 t = build_int_2 (0, 0);
4536 TREE_TYPE (t) = type;
4537 return t;
4538 }
4539 }
4540
4541 /* An unsigned comparison against 0 can be simplified. */
4542 if (integer_zerop (arg1)
4543 && (INTEGRAL_TYPE_P (TREE_TYPE (arg1))
4544 || TREE_CODE (TREE_TYPE (arg1)) == POINTER_TYPE)
4545 && TREE_UNSIGNED (TREE_TYPE (arg1)))
4546 {
4547 switch (TREE_CODE (t))
4548 {
4549 case GT_EXPR:
4550 code = NE_EXPR;
4551 TREE_SET_CODE (t, NE_EXPR);
4552 break;
4553 case LE_EXPR:
4554 code = EQ_EXPR;
4555 TREE_SET_CODE (t, EQ_EXPR);
4556 break;
4557 case GE_EXPR:
4558 return omit_one_operand (type,
4559 convert (type, integer_one_node),
4560 arg0);
4561 case LT_EXPR:
4562 return omit_one_operand (type,
4563 convert (type, integer_zero_node),
4564 arg0);
4565 }
4566 }
4567
4568 /* If we are comparing an expression that just has comparisons
4569 of two integer values, arithmetic expressions of those comparisons,
4570 and constants, we can simplify it. There are only three cases
4571 to check: the two values can either be equal, the first can be
4572 greater, or the second can be greater. Fold the expression for
4573 those three values. Since each value must be 0 or 1, we have
4574 eight possibilities, each of which corresponds to the constant 0
4575 or 1 or one of the six possible comparisons.
4576
4577 This handles common cases like (a > b) == 0 but also handles
4578 expressions like ((x > y) - (y > x)) > 0, which supposedly
4579 occur in macroized code. */
4580
4581 if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) != INTEGER_CST)
4582 {
4583 tree cval1 = 0, cval2 = 0;
4584 int save_p = 0;
4585
4586 if (twoval_comparison_p (arg0, &cval1, &cval2, &save_p)
4587 /* Don't handle degenerate cases here; they should already
4588 have been handled anyway. */
4589 && cval1 != 0 && cval2 != 0
4590 && ! (TREE_CONSTANT (cval1) && TREE_CONSTANT (cval2))
4591 && TREE_TYPE (cval1) == TREE_TYPE (cval2)
4592 && INTEGRAL_TYPE_P (TREE_TYPE (cval1))
4593 && ! operand_equal_p (TYPE_MIN_VALUE (TREE_TYPE (cval1)),
4594 TYPE_MAX_VALUE (TREE_TYPE (cval2)), 0))
4595 {
4596 tree maxval = TYPE_MAX_VALUE (TREE_TYPE (cval1));
4597 tree minval = TYPE_MIN_VALUE (TREE_TYPE (cval1));
4598
4599 /* We can't just pass T to eval_subst in case cval1 or cval2
4600 was the same as ARG1. */
4601
4602 tree high_result
4603 = fold (build (code, type,
4604 eval_subst (arg0, cval1, maxval, cval2, minval),
4605 arg1));
4606 tree equal_result
4607 = fold (build (code, type,
4608 eval_subst (arg0, cval1, maxval, cval2, maxval),
4609 arg1));
4610 tree low_result
4611 = fold (build (code, type,
4612 eval_subst (arg0, cval1, minval, cval2, maxval),
4613 arg1));
4614
4615 /* All three of these results should be 0 or 1. Confirm they
4616 are. Then use those values to select the proper code
4617 to use. */
4618
4619 if ((integer_zerop (high_result)
4620 || integer_onep (high_result))
4621 && (integer_zerop (equal_result)
4622 || integer_onep (equal_result))
4623 && (integer_zerop (low_result)
4624 || integer_onep (low_result)))
4625 {
4626 /* Make a 3-bit mask with the high-order bit being the
4627 value for `>', the next for '=', and the low for '<'. */
4628 switch ((integer_onep (high_result) * 4)
4629 + (integer_onep (equal_result) * 2)
4630 + integer_onep (low_result))
4631 {
4632 case 0:
4633 /* Always false. */
4634 return omit_one_operand (type, integer_zero_node, arg0);
4635 case 1:
4636 code = LT_EXPR;
4637 break;
4638 case 2:
4639 code = EQ_EXPR;
4640 break;
4641 case 3:
4642 code = LE_EXPR;
4643 break;
4644 case 4:
4645 code = GT_EXPR;
4646 break;
4647 case 5:
4648 code = NE_EXPR;
4649 break;
4650 case 6:
4651 code = GE_EXPR;
4652 break;
4653 case 7:
4654 /* Always true. */
4655 return omit_one_operand (type, integer_one_node, arg0);
4656 }
4657
4658 t = build (code, type, cval1, cval2);
4659 if (save_p)
4660 return save_expr (t);
4661 else
4662 return fold (t);
4663 }
4664 }
4665 }
4666
4667 /* If this is a comparison of a field, we may be able to simplify it. */
4668 if ((TREE_CODE (arg0) == COMPONENT_REF
4669 || TREE_CODE (arg0) == BIT_FIELD_REF)
4670 && (code == EQ_EXPR || code == NE_EXPR)
4671 /* Handle the constant case even without -O
4672 to make sure the warnings are given. */
4673 && (optimize || TREE_CODE (arg1) == INTEGER_CST))
4674 {
4675 t1 = optimize_bit_field_compare (code, type, arg0, arg1);
4676 return t1 ? t1 : t;
4677 }
4678
4679 /* If this is a comparison of complex values and either or both
4680 sizes are a COMPLEX_EXPR, it is best to split up the comparisons
4681 and join them with a TRUTH_ANDIF_EXPR or TRUTH_ORIF_EXPR. This
4682 may prevent needless evaluations. */
4683 if ((code == EQ_EXPR || code == NE_EXPR)
4684 && TREE_CODE (TREE_TYPE (arg0)) == COMPLEX_TYPE
4685 && (TREE_CODE (arg0) == COMPLEX_EXPR
4686 || TREE_CODE (arg1) == COMPLEX_EXPR))
4687 {
4688 tree subtype = TREE_TYPE (TREE_TYPE (arg0));
4689 tree real0 = fold (build1 (REALPART_EXPR, subtype, arg0));
4690 tree imag0 = fold (build1 (IMAGPART_EXPR, subtype, arg0));
4691 tree real1 = fold (build1 (REALPART_EXPR, subtype, arg1));
4692 tree imag1 = fold (build1 (IMAGPART_EXPR, subtype, arg1));
4693
4694 return fold (build ((code == EQ_EXPR ? TRUTH_ANDIF_EXPR
4695 : TRUTH_ORIF_EXPR),
4696 type,
4697 fold (build (code, type, real0, real1)),
4698 fold (build (code, type, imag0, imag1))));
4699 }
4700
4701 /* From here on, the only cases we handle are when the result is
4702 known to be a constant.
4703
4704 To compute GT, swap the arguments and do LT.
4705 To compute GE, do LT and invert the result.
4706 To compute LE, swap the arguments, do LT and invert the result.
4707 To compute NE, do EQ and invert the result.
4708
4709 Therefore, the code below must handle only EQ and LT. */
4710
4711 if (code == LE_EXPR || code == GT_EXPR)
4712 {
4713 tem = arg0, arg0 = arg1, arg1 = tem;
4714 code = swap_tree_comparison (code);
4715 }
4716
4717 /* Note that it is safe to invert for real values here because we
4718 will check below in the one case that it matters. */
4719
4720 invert = 0;
4721 if (code == NE_EXPR || code == GE_EXPR)
4722 {
4723 invert = 1;
4724 code = invert_tree_comparison (code);
4725 }
4726
4727 /* Compute a result for LT or EQ if args permit;
4728 otherwise return T. */
4729 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
4730 {
4731 if (code == EQ_EXPR)
4732 t1 = build_int_2 ((TREE_INT_CST_LOW (arg0)
4733 == TREE_INT_CST_LOW (arg1))
4734 && (TREE_INT_CST_HIGH (arg0)
4735 == TREE_INT_CST_HIGH (arg1)),
4736 0);
4737 else
4738 t1 = build_int_2 ((TREE_UNSIGNED (TREE_TYPE (arg0))
4739 ? INT_CST_LT_UNSIGNED (arg0, arg1)
4740 : INT_CST_LT (arg0, arg1)),
4741 0);
4742 }
4743
4744 /* Assume a nonexplicit constant cannot equal an explicit one,
4745 since such code would be undefined anyway.
4746 Exception: on sysvr4, using #pragma weak,
4747 a label can come out as 0. */
4748 else if (TREE_CODE (arg1) == INTEGER_CST
4749 && !integer_zerop (arg1)
4750 && TREE_CONSTANT (arg0)
4751 && TREE_CODE (arg0) == ADDR_EXPR
4752 && code == EQ_EXPR)
4753 t1 = build_int_2 (0, 0);
4754
4755 /* Two real constants can be compared explicitly. */
4756 else if (TREE_CODE (arg0) == REAL_CST && TREE_CODE (arg1) == REAL_CST)
4757 {
4758 /* If either operand is a NaN, the result is false with two
4759 exceptions: First, an NE_EXPR is true on NaNs, but that case
4760 is already handled correctly since we will be inverting the
4761 result for NE_EXPR. Second, if we had inverted a LE_EXPR
4762 or a GE_EXPR into a LT_EXPR, we must return true so that it
4763 will be inverted into false. */
4764
4765 if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg0))
4766 || REAL_VALUE_ISNAN (TREE_REAL_CST (arg1)))
4767 t1 = build_int_2 (invert && code == LT_EXPR, 0);
4768
4769 else if (code == EQ_EXPR)
4770 t1 = build_int_2 (REAL_VALUES_EQUAL (TREE_REAL_CST (arg0),
4771 TREE_REAL_CST (arg1)),
4772 0);
4773 else
4774 t1 = build_int_2 (REAL_VALUES_LESS (TREE_REAL_CST (arg0),
4775 TREE_REAL_CST (arg1)),
4776 0);
4777 }
4778
4779 if (t1 == NULL_TREE)
4780 return t;
4781
4782 if (invert)
4783 TREE_INT_CST_LOW (t1) ^= 1;
4784
4785 TREE_TYPE (t1) = type;
4786 return t1;
4787
4788 case COND_EXPR:
4789 /* Pedantic ANSI C says that a conditional expression is never an lvalue,
4790 so all simple results must be passed through pedantic_non_lvalue. */
4791 if (TREE_CODE (arg0) == INTEGER_CST)
4792 return pedantic_non_lvalue
4793 (TREE_OPERAND (t, (integer_zerop (arg0) ? 2 : 1)));
4794 else if (operand_equal_p (arg1, TREE_OPERAND (expr, 2), 0))
4795 return pedantic_omit_one_operand (type, arg1, arg0);
4796
4797 /* If the second operand is zero, invert the comparison and swap
4798 the second and third operands. Likewise if the second operand
4799 is constant and the third is not or if the third operand is
4800 equivalent to the first operand of the comparison. */
4801
4802 if (integer_zerop (arg1)
4803 || (TREE_CONSTANT (arg1) && ! TREE_CONSTANT (TREE_OPERAND (t, 2)))
4804 || (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
4805 && operand_equal_for_comparison_p (TREE_OPERAND (arg0, 0),
4806 TREE_OPERAND (t, 2),
4807 TREE_OPERAND (arg0, 1))))
4808 {
4809 /* See if this can be inverted. If it can't, possibly because
4810 it was a floating-point inequality comparison, don't do
4811 anything. */
4812 tem = invert_truthvalue (arg0);
4813
4814 if (TREE_CODE (tem) != TRUTH_NOT_EXPR)
4815 {
4816 t = build (code, type, tem,
4817 TREE_OPERAND (t, 2), TREE_OPERAND (t, 1));
4818 arg0 = tem;
4819 arg1 = TREE_OPERAND (t, 2);
4820 STRIP_NOPS (arg1);
4821 }
4822 }
4823
4824 /* If we have A op B ? A : C, we may be able to convert this to a
4825 simpler expression, depending on the operation and the values
4826 of B and C. IEEE floating point prevents this though,
4827 because A or B might be -0.0 or a NaN. */
4828
4829 if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
4830 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
4831 || ! FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg0, 0)))
4832 || flag_fast_math)
4833 && operand_equal_for_comparison_p (TREE_OPERAND (arg0, 0),
4834 arg1, TREE_OPERAND (arg0, 1)))
4835 {
4836 tree arg2 = TREE_OPERAND (t, 2);
4837 enum tree_code comp_code = TREE_CODE (arg0);
4838
4839 STRIP_NOPS (arg2);
4840
4841 /* If we have A op 0 ? A : -A, this is A, -A, abs (A), or abs (-A),
4842 depending on the comparison operation. */
4843 if ((FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg0, 1)))
4844 ? real_zerop (TREE_OPERAND (arg0, 1))
4845 : integer_zerop (TREE_OPERAND (arg0, 1)))
4846 && TREE_CODE (arg2) == NEGATE_EXPR
4847 && operand_equal_p (TREE_OPERAND (arg2, 0), arg1, 0))
4848 switch (comp_code)
4849 {
4850 case EQ_EXPR:
4851 return pedantic_non_lvalue
4852 (fold (build1 (NEGATE_EXPR, type, arg1)));
4853 case NE_EXPR:
4854 return pedantic_non_lvalue (convert (type, arg1));
4855 case GE_EXPR:
4856 case GT_EXPR:
4857 return pedantic_non_lvalue
4858 (convert (type, fold (build1 (ABS_EXPR,
4859 TREE_TYPE (arg1), arg1))));
4860 case LE_EXPR:
4861 case LT_EXPR:
4862 return pedantic_non_lvalue
4863 (fold (build1 (NEGATE_EXPR, type,
4864 convert (type,
4865 fold (build1 (ABS_EXPR,
4866 TREE_TYPE (arg1),
4867 arg1))))));
4868 }
4869
4870 /* If this is A != 0 ? A : 0, this is simply A. For ==, it is
4871 always zero. */
4872
4873 if (integer_zerop (TREE_OPERAND (arg0, 1)) && integer_zerop (arg2))
4874 {
4875 if (comp_code == NE_EXPR)
4876 return pedantic_non_lvalue (convert (type, arg1));
4877 else if (comp_code == EQ_EXPR)
4878 return pedantic_non_lvalue (convert (type, integer_zero_node));
4879 }
4880
4881 /* If this is A op B ? A : B, this is either A, B, min (A, B),
4882 or max (A, B), depending on the operation. */
4883
4884 if (operand_equal_for_comparison_p (TREE_OPERAND (arg0, 1),
4885 arg2, TREE_OPERAND (arg0, 0)))
4886 {
4887 tree comp_op0 = TREE_OPERAND (arg0, 0);
4888 tree comp_op1 = TREE_OPERAND (arg0, 1);
4889 tree comp_type = TREE_TYPE (comp_op0);
4890
4891 switch (comp_code)
4892 {
4893 case EQ_EXPR:
4894 return pedantic_non_lvalue (convert (type, arg2));
4895 case NE_EXPR:
4896 return pedantic_non_lvalue (convert (type, arg1));
4897 case LE_EXPR:
4898 case LT_EXPR:
4899 return pedantic_non_lvalue
4900 (convert (type, (fold (build (MIN_EXPR, comp_type,
4901 comp_op0, comp_op1)))));
4902 case GE_EXPR:
4903 case GT_EXPR:
4904 return pedantic_non_lvalue
4905 (convert (type, fold (build (MAX_EXPR, comp_type,
4906 comp_op0, comp_op1))));
4907 }
4908 }
4909
4910 /* If this is A op C1 ? A : C2 with C1 and C2 constant integers,
4911 we might still be able to simplify this. For example,
4912 if C1 is one less or one more than C2, this might have started
4913 out as a MIN or MAX and been transformed by this function.
4914 Only good for INTEGER_TYPEs, because we need TYPE_MAX_VALUE. */
4915
4916 if (INTEGRAL_TYPE_P (type)
4917 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
4918 && TREE_CODE (arg2) == INTEGER_CST)
4919 switch (comp_code)
4920 {
4921 case EQ_EXPR:
4922 /* We can replace A with C1 in this case. */
4923 arg1 = convert (type, TREE_OPERAND (arg0, 1));
4924 t = build (code, type, TREE_OPERAND (t, 0), arg1,
4925 TREE_OPERAND (t, 2));
4926 break;
4927
4928 case LT_EXPR:
4929 /* If C1 is C2 + 1, this is min(A, C2). */
4930 if (! operand_equal_p (arg2, TYPE_MAX_VALUE (type), 1)
4931 && operand_equal_p (TREE_OPERAND (arg0, 1),
4932 const_binop (PLUS_EXPR, arg2,
4933 integer_one_node, 0), 1))
4934 return pedantic_non_lvalue
4935 (fold (build (MIN_EXPR, type, arg1, arg2)));
4936 break;
4937
4938 case LE_EXPR:
4939 /* If C1 is C2 - 1, this is min(A, C2). */
4940 if (! operand_equal_p (arg2, TYPE_MIN_VALUE (type), 1)
4941 && operand_equal_p (TREE_OPERAND (arg0, 1),
4942 const_binop (MINUS_EXPR, arg2,
4943 integer_one_node, 0), 1))
4944 return pedantic_non_lvalue
4945 (fold (build (MIN_EXPR, type, arg1, arg2)));
4946 break;
4947
4948 case GT_EXPR:
4949 /* If C1 is C2 - 1, this is max(A, C2). */
4950 if (! operand_equal_p (arg2, TYPE_MIN_VALUE (type), 1)
4951 && operand_equal_p (TREE_OPERAND (arg0, 1),
4952 const_binop (MINUS_EXPR, arg2,
4953 integer_one_node, 0), 1))
4954 return pedantic_non_lvalue
4955 (fold (build (MAX_EXPR, type, arg1, arg2)));
4956 break;
4957
4958 case GE_EXPR:
4959 /* If C1 is C2 + 1, this is max(A, C2). */
4960 if (! operand_equal_p (arg2, TYPE_MAX_VALUE (type), 1)
4961 && operand_equal_p (TREE_OPERAND (arg0, 1),
4962 const_binop (PLUS_EXPR, arg2,
4963 integer_one_node, 0), 1))
4964 return pedantic_non_lvalue
4965 (fold (build (MAX_EXPR, type, arg1, arg2)));
4966 break;
4967 }
4968 }
4969
4970 /* If the second operand is simpler than the third, swap them
4971 since that produces better jump optimization results. */
4972 if ((TREE_CONSTANT (arg1) || TREE_CODE_CLASS (TREE_CODE (arg1)) == 'd'
4973 || TREE_CODE (arg1) == SAVE_EXPR)
4974 && ! (TREE_CONSTANT (TREE_OPERAND (t, 2))
4975 || TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (t, 2))) == 'd'
4976 || TREE_CODE (TREE_OPERAND (t, 2)) == SAVE_EXPR))
4977 {
4978 /* See if this can be inverted. If it can't, possibly because
4979 it was a floating-point inequality comparison, don't do
4980 anything. */
4981 tem = invert_truthvalue (arg0);
4982
4983 if (TREE_CODE (tem) != TRUTH_NOT_EXPR)
4984 {
4985 t = build (code, type, tem,
4986 TREE_OPERAND (t, 2), TREE_OPERAND (t, 1));
4987 arg0 = tem;
4988 arg1 = TREE_OPERAND (t, 2);
4989 STRIP_NOPS (arg1);
4990 }
4991 }
4992
4993 /* Convert A ? 1 : 0 to simply A. */
4994 if (integer_onep (TREE_OPERAND (t, 1))
4995 && integer_zerop (TREE_OPERAND (t, 2))
4996 /* If we try to convert TREE_OPERAND (t, 0) to our type, the
4997 call to fold will try to move the conversion inside
4998 a COND, which will recurse. In that case, the COND_EXPR
4999 is probably the best choice, so leave it alone. */
5000 && type == TREE_TYPE (arg0))
5001 return pedantic_non_lvalue (arg0);
5002
5003 /* Look for expressions of the form A & 2 ? 2 : 0. The result of this
5004 operation is simply A & 2. */
5005
5006 if (integer_zerop (TREE_OPERAND (t, 2))
5007 && TREE_CODE (arg0) == NE_EXPR
5008 && integer_zerop (TREE_OPERAND (arg0, 1))
5009 && integer_pow2p (arg1)
5010 && TREE_CODE (TREE_OPERAND (arg0, 0)) == BIT_AND_EXPR
5011 && operand_equal_p (TREE_OPERAND (TREE_OPERAND (arg0, 0), 1),
5012 arg1, 1))
5013 return pedantic_non_lvalue (convert (type, TREE_OPERAND (arg0, 0)));
5014
5015 return t;
5016
5017 case COMPOUND_EXPR:
5018 /* When pedantic, a compound expression can be neither an lvalue
5019 nor an integer constant expression. */
5020 if (TREE_SIDE_EFFECTS (arg0) || pedantic)
5021 return t;
5022 /* Don't let (0, 0) be null pointer constant. */
5023 if (integer_zerop (arg1))
5024 return non_lvalue (arg1);
5025 return arg1;
5026
5027 case COMPLEX_EXPR:
5028 if (wins)
5029 return build_complex (arg0, arg1);
5030 return t;
5031
5032 case REALPART_EXPR:
5033 if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
5034 return t;
5035 else if (TREE_CODE (arg0) == COMPLEX_EXPR)
5036 return omit_one_operand (type, TREE_OPERAND (arg0, 0),
5037 TREE_OPERAND (arg0, 1));
5038 else if (TREE_CODE (arg0) == COMPLEX_CST)
5039 return TREE_REALPART (arg0);
5040 else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
5041 return fold (build (TREE_CODE (arg0), type,
5042 fold (build1 (REALPART_EXPR, type,
5043 TREE_OPERAND (arg0, 0))),
5044 fold (build1 (REALPART_EXPR,
5045 type, TREE_OPERAND (arg0, 1)))));
5046 return t;
5047
5048 case IMAGPART_EXPR:
5049 if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
5050 return convert (type, integer_zero_node);
5051 else if (TREE_CODE (arg0) == COMPLEX_EXPR)
5052 return omit_one_operand (type, TREE_OPERAND (arg0, 1),
5053 TREE_OPERAND (arg0, 0));
5054 else if (TREE_CODE (arg0) == COMPLEX_CST)
5055 return TREE_IMAGPART (arg0);
5056 else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
5057 return fold (build (TREE_CODE (arg0), type,
5058 fold (build1 (IMAGPART_EXPR, type,
5059 TREE_OPERAND (arg0, 0))),
5060 fold (build1 (IMAGPART_EXPR, type,
5061 TREE_OPERAND (arg0, 1)))));
5062 return t;
5063
5064 /* Pull arithmetic ops out of the CLEANUP_POINT_EXPR where
5065 appropriate. */
5066 case CLEANUP_POINT_EXPR:
5067 if (! TREE_SIDE_EFFECTS (arg0))
5068 return convert (type, arg0);
5069
5070 {
5071 enum tree_code code0 = TREE_CODE (arg0);
5072 int kind0 = TREE_CODE_CLASS (code0);
5073 tree arg00 = TREE_OPERAND (arg0, 0);
5074 tree arg01;
5075
5076 if (kind0 == '1' || code0 == TRUTH_NOT_EXPR)
5077 return fold (build1 (code0, type,
5078 fold (build1 (CLEANUP_POINT_EXPR,
5079 TREE_TYPE (arg00), arg00))));
5080
5081 if (kind0 == '<' || kind0 == '2'
5082 || code0 == TRUTH_ANDIF_EXPR || code0 == TRUTH_ORIF_EXPR
5083 || code0 == TRUTH_AND_EXPR || code0 == TRUTH_OR_EXPR
5084 || code0 == TRUTH_XOR_EXPR)
5085 {
5086 arg01 = TREE_OPERAND (arg0, 1);
5087
5088 if (! TREE_SIDE_EFFECTS (arg00))
5089 return fold (build (code0, type, arg00,
5090 fold (build1 (CLEANUP_POINT_EXPR,
5091 TREE_TYPE (arg01), arg01))));
5092
5093 if (! TREE_SIDE_EFFECTS (arg01))
5094 return fold (build (code0, type,
5095 fold (build1 (CLEANUP_POINT_EXPR,
5096 TREE_TYPE (arg00), arg00)),
5097 arg01));
5098 }
5099
5100 return t;
5101 }
5102
5103 default:
5104 return t;
5105 } /* switch (code) */
5106 }
This page took 0.263032 seconds and 5 git commands to generate.