]> gcc.gnu.org Git - gcc.git/blob - gcc/fold-const.c
# Fix misspellings in comments.
[gcc.git] / gcc / fold-const.c
1 /* Fold a constant sub-tree into a single node for C-compiler
2 Copyright (C) 1987, 1988, 1992 Free Software Foundation, Inc.
3
4 This file is part of GNU CC.
5
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
10
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
19
20 /*@@ Fix lossage on folding division of big integers. */
21
22 /*@@ This file should be rewritten to use an arbitrary precision
23 @@ representation for "struct tree_int_cst" and "struct tree_real_cst".
24 @@ Perhaps the routines could also be used for bc/dc, and made a lib.
25 @@ The routines that translate from the ap rep should
26 @@ warn if precision et. al. is lost.
27 @@ This would also make life easier when this technology is used
28 @@ for cross-compilers. */
29
30
31 /* The entry points in this file are fold, size_int and size_binop.
32
33 fold takes a tree as argument and returns a simplified tree.
34
35 size_binop takes a tree code for an arithmetic operation
36 and two operands that are trees, and produces a tree for the
37 result, assuming the type comes from `sizetype'.
38
39 size_int takes an integer value, and creates a tree constant
40 with type from `sizetype'. */
41
42 #include <stdio.h>
43 #include <setjmp.h>
44 #include "config.h"
45 #include "flags.h"
46 #include "tree.h"
47
48 /* Handle floating overflow for `const_binop'. */
49 static jmp_buf float_error;
50
51 int lshift_double ();
52 void rshift_double ();
53 void lrotate_double ();
54 void rrotate_double ();
55 static tree const_binop ();
56
57 #ifndef BRANCH_COST
58 #define BRANCH_COST 1
59 #endif
60
61 /* Yield nonzero if a signed left shift of A by B bits overflows. */
62 #define left_shift_overflows(a, b) ((a) != ((a) << (b)) >> (b))
63
64 /* Yield nonzero if A and B have the same sign. */
65 #define same_sign(a, b) ((a) ^ (b) >= 0)
66
67 /* Suppose A1 + B1 = SUM1, using 2's complement arithmetic ignoring overflow.
68 Suppose A, B and SUM have the same respective signs as A1, B1, and SUM1.
69 Then this yields nonzero if overflow occurred during the addition.
70 Overflow occurs if A and B have the same sign, but A and SUM differ in sign.
71 Use `^' to test whether signs differ, and `< 0' to isolate the sign. */
72 #define overflow_sum_sign(a, b, sum) ((~((a) ^ (b)) & ((a) ^ (sum))) < 0)
73 \f
74 /* To do constant folding on INTEGER_CST nodes requires two-word arithmetic.
75 We do that by representing the two-word integer as MAX_SHORTS shorts,
76 with only 8 bits stored in each short, as a positive number. */
77
78 /* Unpack a two-word integer into MAX_SHORTS shorts.
79 LOW and HI are the integer, as two `HOST_WIDE_INT' pieces.
80 SHORTS points to the array of shorts. */
81
82 static void
83 encode (shorts, low, hi)
84 short *shorts;
85 HOST_WIDE_INT low, hi;
86 {
87 register int i;
88
89 for (i = 0; i < MAX_SHORTS / 2; i++)
90 {
91 shorts[i] = (low >> (i * 8)) & 0xff;
92 shorts[i + MAX_SHORTS / 2] = (hi >> (i * 8) & 0xff);
93 }
94 }
95
96 /* Pack an array of MAX_SHORTS shorts into a two-word integer.
97 SHORTS points to the array of shorts.
98 The integer is stored into *LOW and *HI as two `HOST_WIDE_INT' pieces. */
99
100 static void
101 decode (shorts, low, hi)
102 short *shorts;
103 HOST_WIDE_INT *low, *hi;
104 {
105 register int i;
106 HOST_WIDE_INT lv = 0, hv = 0;
107
108 for (i = 0; i < MAX_SHORTS / 2; i++)
109 {
110 lv |= (HOST_WIDE_INT) shorts[i] << (i * 8);
111 hv |= (HOST_WIDE_INT) shorts[i + MAX_SHORTS / 2] << (i * 8);
112 }
113
114 *low = lv, *hi = hv;
115 }
116 \f
117 /* Make the integer constant T valid for its type
118 by setting to 0 or 1 all the bits in the constant
119 that don't belong in the type. */
120
121 void
122 force_fit_type (t)
123 tree t;
124 {
125 register int prec = TYPE_PRECISION (TREE_TYPE (t));
126
127 if (TREE_CODE (TREE_TYPE (t)) == POINTER_TYPE)
128 prec = POINTER_SIZE;
129
130 /* First clear all bits that are beyond the type's precision. */
131
132 if (prec == 2 * HOST_BITS_PER_WIDE_INT)
133 ;
134 else if (prec > HOST_BITS_PER_WIDE_INT)
135 {
136 TREE_INT_CST_HIGH (t)
137 &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
138 }
139 else
140 {
141 TREE_INT_CST_HIGH (t) = 0;
142 if (prec < HOST_BITS_PER_WIDE_INT)
143 TREE_INT_CST_LOW (t) &= ~((HOST_WIDE_INT) (-1) << prec);
144 }
145
146 /* If it's a signed type and value's sign bit is set, extend the sign. */
147
148 if (! TREE_UNSIGNED (TREE_TYPE (t))
149 && prec != 2 * HOST_BITS_PER_WIDE_INT
150 && (prec > HOST_BITS_PER_WIDE_INT
151 ? (TREE_INT_CST_HIGH (t)
152 & ((HOST_WIDE_INT) 1 << (prec - HOST_BITS_PER_WIDE_INT - 1)))
153 : TREE_INT_CST_LOW (t) & ((HOST_WIDE_INT) 1 << (prec - 1))))
154 {
155 /* Value is negative:
156 set to 1 all the bits that are outside this type's precision. */
157 if (prec > HOST_BITS_PER_WIDE_INT)
158 {
159 TREE_INT_CST_HIGH (t)
160 |= ((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
161 }
162 else
163 {
164 TREE_INT_CST_HIGH (t) = -1;
165 if (prec < HOST_BITS_PER_WIDE_INT)
166 TREE_INT_CST_LOW (t) |= ((HOST_WIDE_INT) (-1) << prec);
167 }
168 }
169 }
170 \f
171 /* Add two doubleword integers with doubleword result.
172 Each argument is given as two `HOST_WIDE_INT' pieces.
173 One argument is L1 and H1; the other, L2 and H2.
174 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV.
175 We use the 8-shorts representation internally. */
176
177 int
178 add_double (l1, h1, l2, h2, lv, hv)
179 HOST_WIDE_INT l1, h1, l2, h2;
180 HOST_WIDE_INT *lv, *hv;
181 {
182 short arg1[MAX_SHORTS];
183 short arg2[MAX_SHORTS];
184 register int carry = 0;
185 register int i;
186
187 encode (arg1, l1, h1);
188 encode (arg2, l2, h2);
189
190 for (i = 0; i < MAX_SHORTS; i++)
191 {
192 carry += arg1[i] + arg2[i];
193 arg1[i] = carry & 0xff;
194 carry >>= 8;
195 }
196
197 decode (arg1, lv, hv);
198 return overflow_sum_sign (h1, h2, *hv);
199 }
200
201 /* Negate a doubleword integer with doubleword result.
202 Return nonzero if the operation overflows, assuming it's signed.
203 The argument is given as two `HOST_WIDE_INT' pieces in L1 and H1.
204 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV.
205 We use the 8-shorts representation internally. */
206
207 int
208 neg_double (l1, h1, lv, hv)
209 HOST_WIDE_INT l1, h1;
210 HOST_WIDE_INT *lv, *hv;
211 {
212 if (l1 == 0)
213 {
214 *lv = 0;
215 *hv = - h1;
216 return same_sign (h1, *hv);
217 }
218 else
219 {
220 *lv = - l1;
221 *hv = ~ h1;
222 return 0;
223 }
224 }
225 \f
226 /* Multiply two doubleword integers with doubleword result.
227 Return nonzero if the operation overflows, assuming it's signed.
228 Each argument is given as two `HOST_WIDE_INT' pieces.
229 One argument is L1 and H1; the other, L2 and H2.
230 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV.
231 We use the 8-shorts representation internally. */
232
233 int
234 mul_double (l1, h1, l2, h2, lv, hv)
235 HOST_WIDE_INT l1, h1, l2, h2;
236 HOST_WIDE_INT *lv, *hv;
237 {
238 short arg1[MAX_SHORTS];
239 short arg2[MAX_SHORTS];
240 short prod[MAX_SHORTS * 2];
241 register int carry = 0;
242 register int i, j, k;
243 HOST_WIDE_INT toplow, tophigh, neglow, neghigh;
244
245 /* These cases are used extensively, arising from pointer combinations. */
246 if (h2 == 0)
247 {
248 if (l2 == 2)
249 {
250 int overflow = left_shift_overflows (h1, 1);
251 unsigned HOST_WIDE_INT temp = l1 + l1;
252 *hv = (h1 << 1) + (temp < l1);
253 *lv = temp;
254 return overflow;
255 }
256 if (l2 == 4)
257 {
258 int overflow = left_shift_overflows (h1, 2);
259 unsigned HOST_WIDE_INT temp = l1 + l1;
260 h1 = (h1 << 2) + ((temp < l1) << 1);
261 l1 = temp;
262 temp += temp;
263 h1 += (temp < l1);
264 *lv = temp;
265 *hv = h1;
266 return overflow;
267 }
268 if (l2 == 8)
269 {
270 int overflow = left_shift_overflows (h1, 3);
271 unsigned HOST_WIDE_INT temp = l1 + l1;
272 h1 = (h1 << 3) + ((temp < l1) << 2);
273 l1 = temp;
274 temp += temp;
275 h1 += (temp < l1) << 1;
276 l1 = temp;
277 temp += temp;
278 h1 += (temp < l1);
279 *lv = temp;
280 *hv = h1;
281 return overflow;
282 }
283 }
284
285 encode (arg1, l1, h1);
286 encode (arg2, l2, h2);
287
288 bzero (prod, sizeof prod);
289
290 for (i = 0; i < MAX_SHORTS; i++)
291 for (j = 0; j < MAX_SHORTS; j++)
292 {
293 k = i + j;
294 carry = arg1[i] * arg2[j];
295 while (carry)
296 {
297 carry += prod[k];
298 prod[k] = carry & 0xff;
299 carry >>= 8;
300 k++;
301 }
302 }
303
304 decode (prod, lv, hv); /* This ignores
305 prod[MAX_SHORTS] -> prod[MAX_SHORTS*2-1] */
306
307 /* Check for overflow by calculating the top half of the answer in full;
308 it should agree with the low half's sign bit. */
309 decode (prod+MAX_SHORTS, &toplow, &tophigh);
310 if (h1 < 0)
311 {
312 neg_double (l2, h2, &neglow, &neghigh);
313 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
314 }
315 if (h2 < 0)
316 {
317 neg_double (l1, h1, &neglow, &neghigh);
318 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
319 }
320 return (*hv < 0 ? ~(toplow & tophigh) : toplow | tophigh) != 0;
321 }
322 \f
323 /* Shift the doubleword integer in L1, H1 left by COUNT places
324 keeping only PREC bits of result.
325 Shift right if COUNT is negative.
326 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
327 Return nonzero if the arithmetic shift overflows, assuming it's signed.
328 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
329
330 int
331 lshift_double (l1, h1, count, prec, lv, hv, arith)
332 HOST_WIDE_INT l1, h1;
333 int count, prec;
334 HOST_WIDE_INT *lv, *hv;
335 int arith;
336 {
337 short arg1[MAX_SHORTS];
338 register int i;
339 register int carry, overflow;
340
341 if (count < 0)
342 {
343 rshift_double (l1, h1, - count, prec, lv, hv, arith);
344 return 0;
345 }
346
347 encode (arg1, l1, h1);
348
349 if (count > prec)
350 count = prec;
351
352 overflow = 0;
353 while (count > 0)
354 {
355 carry = 0;
356 for (i = 0; i < MAX_SHORTS; i++)
357 {
358 carry += arg1[i] << 1;
359 arg1[i] = carry & 0xff;
360 carry >>= 8;
361 }
362 count--;
363 overflow |= carry ^ (arg1[7] >> 7);
364 }
365
366 decode (arg1, lv, hv);
367 return overflow;
368 }
369
370 /* Shift the doubleword integer in L1, H1 right by COUNT places
371 keeping only PREC bits of result. COUNT must be positive.
372 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
373 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
374
375 void
376 rshift_double (l1, h1, count, prec, lv, hv, arith)
377 HOST_WIDE_INT l1, h1, count, prec;
378 HOST_WIDE_INT *lv, *hv;
379 int arith;
380 {
381 short arg1[MAX_SHORTS];
382 register int i;
383 register int carry;
384
385 encode (arg1, l1, h1);
386
387 if (count > prec)
388 count = prec;
389
390 while (count > 0)
391 {
392 carry = arith && arg1[7] >> 7;
393 for (i = MAX_SHORTS - 1; i >= 0; i--)
394 {
395 carry <<= 8;
396 carry += arg1[i];
397 arg1[i] = (carry >> 1) & 0xff;
398 }
399 count--;
400 }
401
402 decode (arg1, lv, hv);
403 }
404 \f
405 /* Rotate the doubldword integer in L1, H1 left by COUNT places
406 keeping only PREC bits of result.
407 Rotate right if COUNT is negative.
408 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
409
410 void
411 lrotate_double (l1, h1, count, prec, lv, hv)
412 HOST_WIDE_INT l1, h1, count, prec;
413 HOST_WIDE_INT *lv, *hv;
414 {
415 short arg1[MAX_SHORTS];
416 register int i;
417 register int carry;
418
419 if (count < 0)
420 {
421 rrotate_double (l1, h1, - count, prec, lv, hv);
422 return;
423 }
424
425 encode (arg1, l1, h1);
426
427 if (count > prec)
428 count = prec;
429
430 carry = arg1[MAX_SHORTS - 1] >> 7;
431 while (count > 0)
432 {
433 for (i = 0; i < MAX_SHORTS; i++)
434 {
435 carry += arg1[i] << 1;
436 arg1[i] = carry & 0xff;
437 carry >>= 8;
438 }
439 count--;
440 }
441
442 decode (arg1, lv, hv);
443 }
444
445 /* Rotate the doubleword integer in L1, H1 left by COUNT places
446 keeping only PREC bits of result. COUNT must be positive.
447 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
448
449 void
450 rrotate_double (l1, h1, count, prec, lv, hv)
451 HOST_WIDE_INT l1, h1, count, prec;
452 HOST_WIDE_INT *lv, *hv;
453 {
454 short arg1[MAX_SHORTS];
455 register int i;
456 register int carry;
457
458 encode (arg1, l1, h1);
459
460 if (count > prec)
461 count = prec;
462
463 carry = arg1[0] & 1;
464 while (count > 0)
465 {
466 for (i = MAX_SHORTS - 1; i >= 0; i--)
467 {
468 carry <<= 8;
469 carry += arg1[i];
470 arg1[i] = (carry >> 1) & 0xff;
471 }
472 count--;
473 }
474
475 decode (arg1, lv, hv);
476 }
477 \f
478 /* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN
479 for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM).
480 CODE is a tree code for a kind of division, one of
481 TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR
482 or EXACT_DIV_EXPR
483 It controls how the quotient is rounded to a integer.
484 Return nonzero if the operation overflows.
485 UNS nonzero says do unsigned division. */
486
487 static int
488 div_and_round_double (code, uns,
489 lnum_orig, hnum_orig, lden_orig, hden_orig,
490 lquo, hquo, lrem, hrem)
491 enum tree_code code;
492 int uns;
493 HOST_WIDE_INT lnum_orig, hnum_orig; /* num == numerator == dividend */
494 HOST_WIDE_INT lden_orig, hden_orig; /* den == denominator == divisor */
495 HOST_WIDE_INT *lquo, *hquo, *lrem, *hrem;
496 {
497 int quo_neg = 0;
498 short num[MAX_SHORTS + 1]; /* extra element for scaling. */
499 short den[MAX_SHORTS], quo[MAX_SHORTS];
500 register int i, j, work;
501 register int carry = 0;
502 unsigned HOST_WIDE_INT lnum = lnum_orig;
503 HOST_WIDE_INT hnum = hnum_orig;
504 unsigned HOST_WIDE_INT lden = lden_orig;
505 HOST_WIDE_INT hden = hden_orig;
506 int overflow = 0;
507
508 if ((hden == 0) && (lden == 0))
509 abort ();
510
511 /* calculate quotient sign and convert operands to unsigned. */
512 if (!uns)
513 {
514 if (hnum < 0)
515 {
516 quo_neg = ~ quo_neg;
517 /* (minimum integer) / (-1) is the only overflow case. */
518 if (neg_double (lnum, hnum, &lnum, &hnum) && (lden & hden) == -1)
519 overflow = 1;
520 }
521 if (hden < 0)
522 {
523 quo_neg = ~ quo_neg;
524 neg_double (lden, hden, &lden, &hden);
525 }
526 }
527
528 if (hnum == 0 && hden == 0)
529 { /* single precision */
530 *hquo = *hrem = 0;
531 *lquo = lnum / lden; /* rounds toward zero since positive args */
532 goto finish_up;
533 }
534
535 if (hnum == 0)
536 { /* trivial case: dividend < divisor */
537 /* hden != 0 already checked. */
538 *hquo = *lquo = 0;
539 *hrem = hnum;
540 *lrem = lnum;
541 goto finish_up;
542 }
543
544 bzero (quo, sizeof quo);
545
546 bzero (num, sizeof num); /* to zero 9th element */
547 bzero (den, sizeof den);
548
549 encode (num, lnum, hnum);
550 encode (den, lden, hden);
551
552 /* This code requires more than just hden == 0.
553 We also have to require that we don't need more than three bytes
554 to hold CARRY. If we ever did need four bytes to hold it, we
555 would lose part of it when computing WORK on the next round. */
556 if (hden == 0 && ((lden << 8) >> 8) == lden)
557 { /* simpler algorithm */
558 /* hnum != 0 already checked. */
559 for (i = MAX_SHORTS - 1; i >= 0; i--)
560 {
561 work = num[i] + (carry << 8);
562 quo[i] = work / lden;
563 carry = work % lden;
564 }
565 }
566 else { /* full double precision,
567 with thanks to Don Knuth's
568 "Seminumerical Algorithms". */
569 #define BASE 256
570 int quo_est, scale, num_hi_sig, den_hi_sig, quo_hi_sig;
571
572 /* Find the highest non-zero divisor digit. */
573 for (i = MAX_SHORTS - 1; ; i--)
574 if (den[i] != 0) {
575 den_hi_sig = i;
576 break;
577 }
578 for (i = MAX_SHORTS - 1; ; i--)
579 if (num[i] != 0) {
580 num_hi_sig = i;
581 break;
582 }
583 quo_hi_sig = num_hi_sig - den_hi_sig + 1;
584
585 /* Insure that the first digit of the divisor is at least BASE/2.
586 This is required by the quotient digit estimation algorithm. */
587
588 scale = BASE / (den[den_hi_sig] + 1);
589 if (scale > 1) { /* scale divisor and dividend */
590 carry = 0;
591 for (i = 0; i <= MAX_SHORTS - 1; i++) {
592 work = (num[i] * scale) + carry;
593 num[i] = work & 0xff;
594 carry = work >> 8;
595 if (num[i] != 0) num_hi_sig = i;
596 }
597 carry = 0;
598 for (i = 0; i <= MAX_SHORTS - 1; i++) {
599 work = (den[i] * scale) + carry;
600 den[i] = work & 0xff;
601 carry = work >> 8;
602 if (den[i] != 0) den_hi_sig = i;
603 }
604 }
605
606 /* Main loop */
607 for (i = quo_hi_sig; i > 0; i--) {
608 /* guess the next quotient digit, quo_est, by dividing the first
609 two remaining dividend digits by the high order quotient digit.
610 quo_est is never low and is at most 2 high. */
611
612 int num_hi; /* index of highest remaining dividend digit */
613
614 num_hi = i + den_hi_sig;
615
616 work = (num[num_hi] * BASE) + (num_hi > 0 ? num[num_hi - 1] : 0);
617 if (num[num_hi] != den[den_hi_sig]) {
618 quo_est = work / den[den_hi_sig];
619 }
620 else {
621 quo_est = BASE - 1;
622 }
623
624 /* refine quo_est so it's usually correct, and at most one high. */
625 while ((den[den_hi_sig - 1] * quo_est)
626 > (((work - (quo_est * den[den_hi_sig])) * BASE)
627 + ((num_hi - 1) > 0 ? num[num_hi - 2] : 0)))
628 quo_est--;
629
630 /* Try QUO_EST as the quotient digit, by multiplying the
631 divisor by QUO_EST and subtracting from the remaining dividend.
632 Keep in mind that QUO_EST is the I - 1st digit. */
633
634 carry = 0;
635
636 for (j = 0; j <= den_hi_sig; j++)
637 {
638 int digit;
639
640 work = num[i + j - 1] - (quo_est * den[j]) + carry;
641 digit = work & 0xff;
642 carry = work >> 8;
643 if (digit < 0)
644 {
645 digit += BASE;
646 carry--;
647 }
648 num[i + j - 1] = digit;
649 }
650
651 /* if quo_est was high by one, then num[i] went negative and
652 we need to correct things. */
653
654 if (num[num_hi] < 0)
655 {
656 quo_est--;
657 carry = 0; /* add divisor back in */
658 for (j = 0; j <= den_hi_sig; j++)
659 {
660 work = num[i + j - 1] + den[j] + carry;
661 if (work > BASE)
662 {
663 work -= BASE;
664 carry = 1;
665 }
666 else
667 {
668 carry = 0;
669 }
670 num[i + j - 1] = work;
671 }
672 num [num_hi] += carry;
673 }
674
675 /* store the quotient digit. */
676 quo[i - 1] = quo_est;
677 }
678 }
679
680 decode (quo, lquo, hquo);
681
682 finish_up:
683 /* if result is negative, make it so. */
684 if (quo_neg)
685 neg_double (*lquo, *hquo, lquo, hquo);
686
687 /* compute trial remainder: rem = num - (quo * den) */
688 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
689 neg_double (*lrem, *hrem, lrem, hrem);
690 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
691
692 switch (code)
693 {
694 case TRUNC_DIV_EXPR:
695 case TRUNC_MOD_EXPR: /* round toward zero */
696 case EXACT_DIV_EXPR: /* for this one, it shouldn't matter */
697 return overflow;
698
699 case FLOOR_DIV_EXPR:
700 case FLOOR_MOD_EXPR: /* round toward negative infinity */
701 if (quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio < 0 && rem != 0 */
702 {
703 /* quo = quo - 1; */
704 add_double (*lquo, *hquo, (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1,
705 lquo, hquo);
706 }
707 else return overflow;
708 break;
709
710 case CEIL_DIV_EXPR:
711 case CEIL_MOD_EXPR: /* round toward positive infinity */
712 if (!quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio > 0 && rem != 0 */
713 {
714 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
715 lquo, hquo);
716 }
717 else return overflow;
718 break;
719
720 case ROUND_DIV_EXPR:
721 case ROUND_MOD_EXPR: /* round to closest integer */
722 {
723 HOST_WIDE_INT labs_rem = *lrem, habs_rem = *hrem;
724 HOST_WIDE_INT labs_den = lden, habs_den = hden, ltwice, htwice;
725
726 /* get absolute values */
727 if (*hrem < 0) neg_double (*lrem, *hrem, &labs_rem, &habs_rem);
728 if (hden < 0) neg_double (lden, hden, &labs_den, &habs_den);
729
730 /* if (2 * abs (lrem) >= abs (lden)) */
731 mul_double ((HOST_WIDE_INT) 2, (HOST_WIDE_INT) 0,
732 labs_rem, habs_rem, &ltwice, &htwice);
733 if (((unsigned HOST_WIDE_INT) habs_den
734 < (unsigned HOST_WIDE_INT) htwice)
735 || (((unsigned HOST_WIDE_INT) habs_den
736 == (unsigned HOST_WIDE_INT) htwice)
737 && ((HOST_WIDE_INT unsigned) labs_den
738 < (unsigned HOST_WIDE_INT) ltwice)))
739 {
740 if (*hquo < 0)
741 /* quo = quo - 1; */
742 add_double (*lquo, *hquo,
743 (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1, lquo, hquo);
744 else
745 /* quo = quo + 1; */
746 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
747 lquo, hquo);
748 }
749 else return overflow;
750 }
751 break;
752
753 default:
754 abort ();
755 }
756
757 /* compute true remainder: rem = num - (quo * den) */
758 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
759 neg_double (*lrem, *hrem, lrem, hrem);
760 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
761 return overflow;
762 }
763 \f
764 /* Effectively truncate a real value to represent
765 the nearest possible value in a narrower mode.
766 The result is actually represented in the same data type as the argument,
767 but its value is usually different. */
768
769 REAL_VALUE_TYPE
770 real_value_truncate (mode, arg)
771 enum machine_mode mode;
772 REAL_VALUE_TYPE arg;
773 {
774 #ifdef __STDC__
775 /* Make sure the value is actually stored in memory before we turn off
776 the handler. */
777 volatile
778 #endif
779 REAL_VALUE_TYPE value;
780 jmp_buf handler, old_handler;
781 int handled;
782
783 if (setjmp (handler))
784 {
785 error ("floating overflow");
786 return dconst0;
787 }
788 handled = push_float_handler (handler, old_handler);
789 value = REAL_VALUE_TRUNCATE (mode, arg);
790 pop_float_handler (handled, old_handler);
791 return value;
792 }
793
794 #if TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
795
796 /* Check for infinity in an IEEE double precision number. */
797
798 int
799 target_isinf (x)
800 REAL_VALUE_TYPE x;
801 {
802 /* The IEEE 64-bit double format. */
803 union {
804 REAL_VALUE_TYPE d;
805 struct {
806 unsigned sign : 1;
807 unsigned exponent : 11;
808 unsigned mantissa1 : 20;
809 unsigned mantissa2;
810 } little_endian;
811 struct {
812 unsigned mantissa2;
813 unsigned mantissa1 : 20;
814 unsigned exponent : 11;
815 unsigned sign : 1;
816 } big_endian;
817 } u;
818
819 u.d = dconstm1;
820 if (u.big_endian.sign == 1)
821 {
822 u.d = x;
823 return (u.big_endian.exponent == 2047
824 && u.big_endian.mantissa1 == 0
825 && u.big_endian.mantissa2 == 0);
826 }
827 else
828 {
829 u.d = x;
830 return (u.little_endian.exponent == 2047
831 && u.little_endian.mantissa1 == 0
832 && u.little_endian.mantissa2 == 0);
833 }
834 }
835
836 /* Check whether an IEEE double precision number is a NaN. */
837
838 int
839 target_isnan (x)
840 REAL_VALUE_TYPE x;
841 {
842 /* The IEEE 64-bit double format. */
843 union {
844 REAL_VALUE_TYPE d;
845 struct {
846 unsigned sign : 1;
847 unsigned exponent : 11;
848 unsigned mantissa1 : 20;
849 unsigned mantissa2;
850 } little_endian;
851 struct {
852 unsigned mantissa2;
853 unsigned mantissa1 : 20;
854 unsigned exponent : 11;
855 unsigned sign : 1;
856 } big_endian;
857 } u;
858
859 u.d = dconstm1;
860 if (u.big_endian.sign == 1)
861 {
862 u.d = x;
863 return (u.big_endian.exponent == 2047
864 && (u.big_endian.mantissa1 != 0
865 || u.big_endian.mantissa2 != 0));
866 }
867 else
868 {
869 u.d = x;
870 return (u.little_endian.exponent == 2047
871 && (u.little_endian.mantissa1 != 0
872 || u.little_endian.mantissa2 != 0));
873 }
874 }
875
876 /* Check for a negative IEEE double precision number. */
877
878 int
879 target_negative (x)
880 REAL_VALUE_TYPE x;
881 {
882 /* The IEEE 64-bit double format. */
883 union {
884 REAL_VALUE_TYPE d;
885 struct {
886 unsigned sign : 1;
887 unsigned exponent : 11;
888 unsigned mantissa1 : 20;
889 unsigned mantissa2;
890 } little_endian;
891 struct {
892 unsigned mantissa2;
893 unsigned mantissa1 : 20;
894 unsigned exponent : 11;
895 unsigned sign : 1;
896 } big_endian;
897 } u;
898
899 u.d = dconstm1;
900 if (u.big_endian.sign == 1)
901 {
902 u.d = x;
903 return u.big_endian.sign;
904 }
905 else
906 {
907 u.d = x;
908 return u.little_endian.sign;
909 }
910 }
911 #else /* Target not IEEE */
912
913 /* Let's assume other float formats don't have infinity.
914 (This can be overridden by redefining REAL_VALUE_ISINF.) */
915
916 target_isinf (x)
917 REAL_VALUE_TYPE x;
918 {
919 return 0;
920 }
921
922 /* Let's assume other float formats don't have NaNs.
923 (This can be overridden by redefining REAL_VALUE_ISNAN.) */
924
925 target_isnan (x)
926 REAL_VALUE_TYPE x;
927 {
928 return 0;
929 }
930
931 /* Let's assume other float formats don't have minus zero.
932 (This can be overridden by redefining REAL_VALUE_NEGATIVE.) */
933
934 target_negative (x)
935 REAL_VALUE_TYPE x;
936 {
937 return x < 0;
938 }
939 #endif /* Target not IEEE */
940 \f
941 /* Split a tree IN into a constant and a variable part
942 that could be combined with CODE to make IN.
943 CODE must be a commutative arithmetic operation.
944 Store the constant part into *CONP and the variable in &VARP.
945 Return 1 if this was done; zero means the tree IN did not decompose
946 this way.
947
948 If CODE is PLUS_EXPR we also split trees that use MINUS_EXPR.
949 Therefore, we must tell the caller whether the variable part
950 was subtracted. We do this by storing 1 or -1 into *VARSIGNP.
951 The value stored is the coefficient for the variable term.
952 The constant term we return should always be added;
953 we negate it if necessary. */
954
955 static int
956 split_tree (in, code, varp, conp, varsignp)
957 tree in;
958 enum tree_code code;
959 tree *varp, *conp;
960 int *varsignp;
961 {
962 register tree outtype = TREE_TYPE (in);
963 *varp = 0;
964 *conp = 0;
965
966 /* Strip any conversions that don't change the machine mode. */
967 while ((TREE_CODE (in) == NOP_EXPR
968 || TREE_CODE (in) == CONVERT_EXPR)
969 && (TYPE_MODE (TREE_TYPE (in))
970 == TYPE_MODE (TREE_TYPE (TREE_OPERAND (in, 0)))))
971 in = TREE_OPERAND (in, 0);
972
973 if (TREE_CODE (in) == code
974 || (TREE_CODE (TREE_TYPE (in)) != REAL_TYPE
975 /* We can associate addition and subtraction together
976 (even though the C standard doesn't say so)
977 for integers because the value is not affected.
978 For reals, the value might be affected, so we can't. */
979 &&
980 ((code == PLUS_EXPR && TREE_CODE (in) == MINUS_EXPR)
981 || (code == MINUS_EXPR && TREE_CODE (in) == PLUS_EXPR))))
982 {
983 enum tree_code code = TREE_CODE (TREE_OPERAND (in, 0));
984 if (code == INTEGER_CST)
985 {
986 *conp = TREE_OPERAND (in, 0);
987 *varp = TREE_OPERAND (in, 1);
988 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
989 && TREE_TYPE (*varp) != outtype)
990 *varp = convert (outtype, *varp);
991 *varsignp = (TREE_CODE (in) == MINUS_EXPR) ? -1 : 1;
992 return 1;
993 }
994 if (TREE_CONSTANT (TREE_OPERAND (in, 1)))
995 {
996 *conp = TREE_OPERAND (in, 1);
997 *varp = TREE_OPERAND (in, 0);
998 *varsignp = 1;
999 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
1000 && TREE_TYPE (*varp) != outtype)
1001 *varp = convert (outtype, *varp);
1002 if (TREE_CODE (in) == MINUS_EXPR)
1003 {
1004 /* If operation is subtraction and constant is second,
1005 must negate it to get an additive constant.
1006 And this cannot be done unless it is a manifest constant.
1007 It could also be the address of a static variable.
1008 We cannot negate that, so give up. */
1009 if (TREE_CODE (*conp) == INTEGER_CST)
1010 /* Subtracting from integer_zero_node loses for long long. */
1011 *conp = fold (build1 (NEGATE_EXPR, TREE_TYPE (*conp), *conp));
1012 else
1013 return 0;
1014 }
1015 return 1;
1016 }
1017 if (TREE_CONSTANT (TREE_OPERAND (in, 0)))
1018 {
1019 *conp = TREE_OPERAND (in, 0);
1020 *varp = TREE_OPERAND (in, 1);
1021 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
1022 && TREE_TYPE (*varp) != outtype)
1023 *varp = convert (outtype, *varp);
1024 *varsignp = (TREE_CODE (in) == MINUS_EXPR) ? -1 : 1;
1025 return 1;
1026 }
1027 }
1028 return 0;
1029 }
1030 \f
1031 /* Combine two constants NUM and ARG2 under operation CODE
1032 to produce a new constant.
1033 We assume ARG1 and ARG2 have the same data type,
1034 or at least are the same kind of constant and the same machine mode.
1035
1036 If NOTRUNC is nonzero, do not truncate the result to fit the data type. */
1037
1038 static tree
1039 const_binop (code, arg1, arg2, notrunc)
1040 enum tree_code code;
1041 register tree arg1, arg2;
1042 int notrunc;
1043 {
1044 if (TREE_CODE (arg1) == INTEGER_CST)
1045 {
1046 register HOST_WIDE_INT int1l = TREE_INT_CST_LOW (arg1);
1047 register HOST_WIDE_INT int1h = TREE_INT_CST_HIGH (arg1);
1048 HOST_WIDE_INT int2l = TREE_INT_CST_LOW (arg2);
1049 HOST_WIDE_INT int2h = TREE_INT_CST_HIGH (arg2);
1050 HOST_WIDE_INT low, hi;
1051 HOST_WIDE_INT garbagel, garbageh;
1052 register tree t;
1053 int uns = TREE_UNSIGNED (TREE_TYPE (arg1));
1054 /* Propagate overflow flags from operands; also record new overflow. */
1055 int overflow
1056 = TREE_CONSTANT_OVERFLOW (arg1) | TREE_CONSTANT_OVERFLOW (arg2);
1057
1058 switch (code)
1059 {
1060 case BIT_IOR_EXPR:
1061 t = build_int_2 (int1l | int2l, int1h | int2h);
1062 break;
1063
1064 case BIT_XOR_EXPR:
1065 t = build_int_2 (int1l ^ int2l, int1h ^ int2h);
1066 break;
1067
1068 case BIT_AND_EXPR:
1069 t = build_int_2 (int1l & int2l, int1h & int2h);
1070 break;
1071
1072 case BIT_ANDTC_EXPR:
1073 t = build_int_2 (int1l & ~int2l, int1h & ~int2h);
1074 break;
1075
1076 case RSHIFT_EXPR:
1077 int2l = - int2l;
1078 case LSHIFT_EXPR:
1079 overflow = lshift_double (int1l, int1h, int2l,
1080 TYPE_PRECISION (TREE_TYPE (arg1)),
1081 &low, &hi,
1082 !uns);
1083 t = build_int_2 (low, hi);
1084 break;
1085
1086 case RROTATE_EXPR:
1087 int2l = - int2l;
1088 case LROTATE_EXPR:
1089 lrotate_double (int1l, int1h, int2l,
1090 TYPE_PRECISION (TREE_TYPE (arg1)),
1091 &low, &hi);
1092 t = build_int_2 (low, hi);
1093 break;
1094
1095 case PLUS_EXPR:
1096 if (int1h == 0)
1097 {
1098 int2l += int1l;
1099 if ((unsigned HOST_WIDE_INT) int2l < int1l)
1100 {
1101 hi = int2h++;
1102 overflow = ! same_sign (hi, int2h);
1103 }
1104 t = build_int_2 (int2l, int2h);
1105 break;
1106 }
1107 if (int2h == 0)
1108 {
1109 int1l += int2l;
1110 if ((unsigned HOST_WIDE_INT) int1l < int2l)
1111 {
1112 hi = int1h++;
1113 overflow = ! same_sign (hi, int1h);
1114 }
1115 t = build_int_2 (int1l, int1h);
1116 break;
1117 }
1118 overflow = add_double (int1l, int1h, int2l, int2h, &low, &hi);
1119 t = build_int_2 (low, hi);
1120 break;
1121
1122 case MINUS_EXPR:
1123 if (int2h == 0 && int2l == 0)
1124 {
1125 t = build_int_2 (int1l, int1h);
1126 break;
1127 }
1128 neg_double (int2l, int2h, &low, &hi);
1129 add_double (int1l, int1h, low, hi, &low, &hi);
1130 overflow = overflow_sum_sign (hi, int2h, int1h);
1131 t = build_int_2 (low, hi);
1132 break;
1133
1134 case MULT_EXPR:
1135 /* Optimize simple cases. */
1136 if (int1h == 0)
1137 {
1138 unsigned HOST_WIDE_INT temp;
1139
1140 switch (int1l)
1141 {
1142 case 0:
1143 t = build_int_2 (0, 0);
1144 goto got_it;
1145 case 1:
1146 t = build_int_2 (int2l, int2h);
1147 goto got_it;
1148 case 2:
1149 overflow = left_shift_overflows (int2h, 1);
1150 temp = int2l + int2l;
1151 int2h = (int2h << 1) + (temp < int2l);
1152 t = build_int_2 (temp, int2h);
1153 goto got_it;
1154 #if 0 /* This code can lose carries. */
1155 case 3:
1156 temp = int2l + int2l + int2l;
1157 int2h = int2h * 3 + (temp < int2l);
1158 t = build_int_2 (temp, int2h);
1159 goto got_it;
1160 #endif
1161 case 4:
1162 overflow = left_shift_overflows (int2h, 2);
1163 temp = int2l + int2l;
1164 int2h = (int2h << 2) + ((temp < int2l) << 1);
1165 int2l = temp;
1166 temp += temp;
1167 int2h += (temp < int2l);
1168 t = build_int_2 (temp, int2h);
1169 goto got_it;
1170 case 8:
1171 overflow = left_shift_overflows (int2h, 3);
1172 temp = int2l + int2l;
1173 int2h = (int2h << 3) + ((temp < int2l) << 2);
1174 int2l = temp;
1175 temp += temp;
1176 int2h += (temp < int2l) << 1;
1177 int2l = temp;
1178 temp += temp;
1179 int2h += (temp < int2l);
1180 t = build_int_2 (temp, int2h);
1181 goto got_it;
1182 default:
1183 break;
1184 }
1185 }
1186
1187 if (int2h == 0)
1188 {
1189 if (int2l == 0)
1190 {
1191 t = build_int_2 (0, 0);
1192 break;
1193 }
1194 if (int2l == 1)
1195 {
1196 t = build_int_2 (int1l, int1h);
1197 break;
1198 }
1199 }
1200
1201 overflow = mul_double (int1l, int1h, int2l, int2h, &low, &hi);
1202 t = build_int_2 (low, hi);
1203 break;
1204
1205 case TRUNC_DIV_EXPR:
1206 case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR:
1207 case EXACT_DIV_EXPR:
1208 /* This is a shortcut for a common special case.
1209 It reduces the number of tree nodes generated
1210 and saves time. */
1211 if (int2h == 0 && int2l > 0
1212 && TREE_TYPE (arg1) == sizetype
1213 && int1h == 0 && int1l >= 0)
1214 {
1215 if (code == CEIL_DIV_EXPR)
1216 int1l += int2l-1;
1217 return size_int (int1l / int2l);
1218 }
1219 case ROUND_DIV_EXPR:
1220 if (int2h == 0 && int2l == 1)
1221 {
1222 t = build_int_2 (int1l, int1h);
1223 break;
1224 }
1225 if (int1l == int2l && int1h == int2h)
1226 {
1227 if ((int1l | int1h) == 0)
1228 abort ();
1229 t = build_int_2 (1, 0);
1230 break;
1231 }
1232 overflow = div_and_round_double (code, uns,
1233 int1l, int1h, int2l, int2h,
1234 &low, &hi, &garbagel, &garbageh);
1235 t = build_int_2 (low, hi);
1236 break;
1237
1238 case TRUNC_MOD_EXPR: case ROUND_MOD_EXPR:
1239 case FLOOR_MOD_EXPR: case CEIL_MOD_EXPR:
1240 overflow = div_and_round_double (code, uns,
1241 int1l, int1h, int2l, int2h,
1242 &garbagel, &garbageh, &low, &hi);
1243 t = build_int_2 (low, hi);
1244 break;
1245
1246 case MIN_EXPR:
1247 case MAX_EXPR:
1248 if (uns)
1249 {
1250 low = (((unsigned HOST_WIDE_INT) int1h
1251 < (unsigned HOST_WIDE_INT) int2h)
1252 || (((unsigned HOST_WIDE_INT) int1h
1253 == (unsigned HOST_WIDE_INT) int2h)
1254 && ((unsigned HOST_WIDE_INT) int1l
1255 < (unsigned HOST_WIDE_INT) int2l)));
1256 }
1257 else
1258 {
1259 low = ((int1h < int2h)
1260 || ((int1h == int2h)
1261 && ((unsigned HOST_WIDE_INT) int1l
1262 < (unsigned HOST_WIDE_INT) int2l)));
1263 }
1264 if (low == (code == MIN_EXPR))
1265 t = build_int_2 (int1l, int1h);
1266 else
1267 t = build_int_2 (int2l, int2h);
1268 break;
1269
1270 default:
1271 abort ();
1272 }
1273 got_it:
1274 TREE_TYPE (t) = TREE_TYPE (arg1);
1275 if (! notrunc)
1276 force_fit_type (t);
1277 TREE_CONSTANT_OVERFLOW (t) = overflow;
1278 return t;
1279 }
1280 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1281 if (TREE_CODE (arg1) == REAL_CST)
1282 {
1283 register REAL_VALUE_TYPE d1;
1284 register REAL_VALUE_TYPE d2;
1285 register REAL_VALUE_TYPE value;
1286 tree t;
1287
1288 d1 = TREE_REAL_CST (arg1);
1289 d2 = TREE_REAL_CST (arg2);
1290 if (setjmp (float_error))
1291 {
1292 pedwarn ("floating overflow in constant expression");
1293 return build (code, TREE_TYPE (arg1), arg1, arg2);
1294 }
1295 set_float_handler (float_error);
1296
1297 #ifdef REAL_ARITHMETIC
1298 REAL_ARITHMETIC (value, code, d1, d2);
1299 #else
1300 switch (code)
1301 {
1302 case PLUS_EXPR:
1303 value = d1 + d2;
1304 break;
1305
1306 case MINUS_EXPR:
1307 value = d1 - d2;
1308 break;
1309
1310 case MULT_EXPR:
1311 value = d1 * d2;
1312 break;
1313
1314 case RDIV_EXPR:
1315 #ifndef REAL_INFINITY
1316 if (d2 == 0)
1317 abort ();
1318 #endif
1319
1320 value = d1 / d2;
1321 break;
1322
1323 case MIN_EXPR:
1324 value = MIN (d1, d2);
1325 break;
1326
1327 case MAX_EXPR:
1328 value = MAX (d1, d2);
1329 break;
1330
1331 default:
1332 abort ();
1333 }
1334 #endif /* no REAL_ARITHMETIC */
1335 t = build_real (TREE_TYPE (arg1),
1336 real_value_truncate (TYPE_MODE (TREE_TYPE (arg1)), value));
1337 set_float_handler (NULL_PTR);
1338 return t;
1339 }
1340 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1341 if (TREE_CODE (arg1) == COMPLEX_CST)
1342 {
1343 register tree r1 = TREE_REALPART (arg1);
1344 register tree i1 = TREE_IMAGPART (arg1);
1345 register tree r2 = TREE_REALPART (arg2);
1346 register tree i2 = TREE_IMAGPART (arg2);
1347 register tree t;
1348
1349 switch (code)
1350 {
1351 case PLUS_EXPR:
1352 t = build_complex (const_binop (PLUS_EXPR, r1, r2, notrunc),
1353 const_binop (PLUS_EXPR, i1, i2, notrunc));
1354 break;
1355
1356 case MINUS_EXPR:
1357 t = build_complex (const_binop (MINUS_EXPR, r1, r2, notrunc),
1358 const_binop (MINUS_EXPR, i1, i2, notrunc));
1359 break;
1360
1361 case MULT_EXPR:
1362 t = build_complex (const_binop (MINUS_EXPR,
1363 const_binop (MULT_EXPR,
1364 r1, r2, notrunc),
1365 const_binop (MULT_EXPR,
1366 i1, i2, notrunc),
1367 notrunc),
1368 const_binop (PLUS_EXPR,
1369 const_binop (MULT_EXPR,
1370 r1, i2, notrunc),
1371 const_binop (MULT_EXPR,
1372 i1, r2, notrunc),
1373 notrunc));
1374 break;
1375
1376 case RDIV_EXPR:
1377 {
1378 register tree magsquared
1379 = const_binop (PLUS_EXPR,
1380 const_binop (MULT_EXPR, r2, r2, notrunc),
1381 const_binop (MULT_EXPR, i2, i2, notrunc),
1382 notrunc);
1383 t = build_complex (const_binop (RDIV_EXPR,
1384 const_binop (PLUS_EXPR,
1385 const_binop (MULT_EXPR, r1, r2, notrunc),
1386 const_binop (MULT_EXPR, i1, i2, notrunc),
1387 notrunc),
1388 magsquared, notrunc),
1389 const_binop (RDIV_EXPR,
1390 const_binop (MINUS_EXPR,
1391 const_binop (MULT_EXPR, i1, r2, notrunc),
1392 const_binop (MULT_EXPR, r1, i2, notrunc),
1393 notrunc),
1394 magsquared, notrunc));
1395 }
1396 break;
1397
1398 default:
1399 abort ();
1400 }
1401 TREE_TYPE (t) = TREE_TYPE (arg1);
1402 return t;
1403 }
1404 return 0;
1405 }
1406 \f
1407 /* Return an INTEGER_CST with value V and type from `sizetype'. */
1408
1409 tree
1410 size_int (number)
1411 unsigned int number;
1412 {
1413 register tree t;
1414 /* Type-size nodes already made for small sizes. */
1415 static tree size_table[2*HOST_BITS_PER_WIDE_INT + 1];
1416
1417 if (number >= 0 && number < 2*HOST_BITS_PER_WIDE_INT + 1
1418 && size_table[number] != 0)
1419 return size_table[number];
1420 if (number >= 0 && number < 2*HOST_BITS_PER_WIDE_INT + 1)
1421 {
1422 push_obstacks_nochange ();
1423 /* Make this a permanent node. */
1424 end_temporary_allocation ();
1425 t = build_int_2 (number, 0);
1426 TREE_TYPE (t) = sizetype;
1427 size_table[number] = t;
1428 pop_obstacks ();
1429 }
1430 else
1431 {
1432 t = build_int_2 (number, 0);
1433 TREE_TYPE (t) = sizetype;
1434 }
1435 return t;
1436 }
1437
1438 /* Combine operands OP1 and OP2 with arithmetic operation CODE.
1439 CODE is a tree code. Data type is taken from `sizetype',
1440 If the operands are constant, so is the result. */
1441
1442 tree
1443 size_binop (code, arg0, arg1)
1444 enum tree_code code;
1445 tree arg0, arg1;
1446 {
1447 /* Handle the special case of two integer constants faster. */
1448 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
1449 {
1450 /* And some specific cases even faster than that. */
1451 if (code == PLUS_EXPR
1452 && TREE_INT_CST_LOW (arg0) == 0
1453 && TREE_INT_CST_HIGH (arg0) == 0)
1454 return arg1;
1455 if (code == MINUS_EXPR
1456 && TREE_INT_CST_LOW (arg1) == 0
1457 && TREE_INT_CST_HIGH (arg1) == 0)
1458 return arg0;
1459 if (code == MULT_EXPR
1460 && TREE_INT_CST_LOW (arg0) == 1
1461 && TREE_INT_CST_HIGH (arg0) == 0)
1462 return arg1;
1463 /* Handle general case of two integer constants. */
1464 return const_binop (code, arg0, arg1, 1);
1465 }
1466
1467 if (arg0 == error_mark_node || arg1 == error_mark_node)
1468 return error_mark_node;
1469
1470 return fold (build (code, sizetype, arg0, arg1));
1471 }
1472 \f
1473 /* Given T, a tree representing type conversion of ARG1, a constant,
1474 return a constant tree representing the result of conversion. */
1475
1476 static tree
1477 fold_convert (t, arg1)
1478 register tree t;
1479 register tree arg1;
1480 {
1481 register tree type = TREE_TYPE (t);
1482
1483 if (TREE_CODE (type) == POINTER_TYPE
1484 || TREE_CODE (type) == INTEGER_TYPE
1485 || TREE_CODE (type) == ENUMERAL_TYPE)
1486 {
1487 if (TREE_CODE (arg1) == INTEGER_CST)
1488 {
1489 /* Given an integer constant, make new constant with new type,
1490 appropriately sign-extended or truncated. */
1491 t = build_int_2 (TREE_INT_CST_LOW (arg1),
1492 TREE_INT_CST_HIGH (arg1));
1493 /* Carry forward overflow indication unless truncating. */
1494 if (TYPE_PRECISION (type) >= TYPE_PRECISION (TREE_TYPE (t)))
1495 TREE_CONSTANT_OVERFLOW (t) = TREE_CONSTANT_OVERFLOW (arg1);
1496 TREE_TYPE (t) = type;
1497 force_fit_type (t);
1498 }
1499 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1500 else if (TREE_CODE (arg1) == REAL_CST)
1501 {
1502 REAL_VALUE_TYPE
1503 l = real_value_from_int_cst (TYPE_MIN_VALUE (type)),
1504 x = TREE_REAL_CST (arg1),
1505 u = real_value_from_int_cst (TYPE_MAX_VALUE (type));
1506 /* See if X will be in range after truncation towards 0.
1507 To compensate for truncation, move the bounds away from 0,
1508 but reject if X exactly equals the adjusted bounds. */
1509 #ifdef REAL_ARITHMETIC
1510 REAL_ARITHMETIC (l, MINUS_EXPR, l, dconst1);
1511 REAL_ARITHMETIC (u, PLUS_EXPR, u, dconst1);
1512 #else
1513 l--;
1514 u++;
1515 #endif
1516 if (! (REAL_VALUES_LESS (l, x) && REAL_VALUES_LESS (x, u)))
1517 {
1518 pedwarn ("real constant out of range for integer conversion");
1519 return t;
1520 }
1521 #ifndef REAL_ARITHMETIC
1522 {
1523 REAL_VALUE_TYPE d;
1524 HOST_WIDE_INT low, high;
1525 HOST_WIDE_INT half_word
1526 = (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2);
1527
1528 d = TREE_REAL_CST (arg1);
1529 if (d < 0)
1530 d = -d;
1531
1532 high = (HOST_WIDE_INT) (d / half_word / half_word);
1533 d -= (REAL_VALUE_TYPE) high * half_word * half_word;
1534 if (d >= (REAL_VALUE_TYPE) half_word * half_word / 2)
1535 {
1536 low = d - (REAL_VALUE_TYPE) half_word * half_word / 2;
1537 low |= (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1);
1538 }
1539 else
1540 low = (HOST_WIDE_INT) d;
1541 if (TREE_REAL_CST (arg1) < 0)
1542 neg_double (low, high, &low, &high);
1543 t = build_int_2 (low, high);
1544 }
1545 #else
1546 {
1547 HOST_WIDE_INT low, high;
1548 REAL_VALUE_TO_INT (low, high, TREE_REAL_CST (arg1));
1549 t = build_int_2 (low, high);
1550 }
1551 #endif
1552 TREE_TYPE (t) = type;
1553 force_fit_type (t);
1554 }
1555 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1556 TREE_TYPE (t) = type;
1557 }
1558 else if (TREE_CODE (type) == REAL_TYPE)
1559 {
1560 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1561 if (TREE_CODE (arg1) == INTEGER_CST)
1562 return build_real_from_int_cst (type, arg1);
1563 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1564 if (TREE_CODE (arg1) == REAL_CST)
1565 {
1566 if (setjmp (float_error))
1567 {
1568 pedwarn ("floating overflow in constant expression");
1569 return t;
1570 }
1571 set_float_handler (float_error);
1572
1573 t = build_real (type, real_value_truncate (TYPE_MODE (type),
1574 TREE_REAL_CST (arg1)));
1575 set_float_handler (NULL_PTR);
1576 return t;
1577 }
1578 }
1579 TREE_CONSTANT (t) = 1;
1580 return t;
1581 }
1582 \f
1583 /* Return an expr equal to X but certainly not valid as an lvalue. */
1584
1585 tree
1586 non_lvalue (x)
1587 tree x;
1588 {
1589 tree result;
1590
1591 /* These things are certainly not lvalues. */
1592 if (TREE_CODE (x) == NON_LVALUE_EXPR
1593 || TREE_CODE (x) == INTEGER_CST
1594 || TREE_CODE (x) == REAL_CST
1595 || TREE_CODE (x) == STRING_CST
1596 || TREE_CODE (x) == ADDR_EXPR)
1597 return x;
1598
1599 result = build1 (NON_LVALUE_EXPR, TREE_TYPE (x), x);
1600 TREE_CONSTANT (result) = TREE_CONSTANT (x);
1601 return result;
1602 }
1603 \f
1604 /* Given a tree comparison code, return the code that is the logical inverse
1605 of the given code. It is not safe to do this for floating-point
1606 comparisons, except for NE_EXPR and EQ_EXPR. */
1607
1608 static enum tree_code
1609 invert_tree_comparison (code)
1610 enum tree_code code;
1611 {
1612 switch (code)
1613 {
1614 case EQ_EXPR:
1615 return NE_EXPR;
1616 case NE_EXPR:
1617 return EQ_EXPR;
1618 case GT_EXPR:
1619 return LE_EXPR;
1620 case GE_EXPR:
1621 return LT_EXPR;
1622 case LT_EXPR:
1623 return GE_EXPR;
1624 case LE_EXPR:
1625 return GT_EXPR;
1626 default:
1627 abort ();
1628 }
1629 }
1630
1631 /* Similar, but return the comparison that results if the operands are
1632 swapped. This is safe for floating-point. */
1633
1634 static enum tree_code
1635 swap_tree_comparison (code)
1636 enum tree_code code;
1637 {
1638 switch (code)
1639 {
1640 case EQ_EXPR:
1641 case NE_EXPR:
1642 return code;
1643 case GT_EXPR:
1644 return LT_EXPR;
1645 case GE_EXPR:
1646 return LE_EXPR;
1647 case LT_EXPR:
1648 return GT_EXPR;
1649 case LE_EXPR:
1650 return GE_EXPR;
1651 default:
1652 abort ();
1653 }
1654 }
1655 \f
1656 /* Return nonzero if two operands are necessarily equal.
1657 If ONLY_CONST is non-zero, only return non-zero for constants.
1658 This function tests whether the operands are indistinguishable;
1659 it does not test whether they are equal using C's == operation.
1660 The distinction is important for IEEE floating point, because
1661 (1) -0.0 and 0.0 are distinguishable, but -0.0==0.0, and
1662 (2) two NaNs may be indistinguishable, but NaN!=NaN. */
1663
1664 int
1665 operand_equal_p (arg0, arg1, only_const)
1666 tree arg0, arg1;
1667 int only_const;
1668 {
1669 /* If both types don't have the same signedness, then we can't consider
1670 them equal. We must check this before the STRIP_NOPS calls
1671 because they may change the signedness of the arguments. */
1672 if (TREE_UNSIGNED (TREE_TYPE (arg0)) != TREE_UNSIGNED (TREE_TYPE (arg1)))
1673 return 0;
1674
1675 STRIP_NOPS (arg0);
1676 STRIP_NOPS (arg1);
1677
1678 /* If ARG0 and ARG1 are the same SAVE_EXPR, they are necessarily equal.
1679 We don't care about side effects in that case because the SAVE_EXPR
1680 takes care of that for us. */
1681 if (TREE_CODE (arg0) == SAVE_EXPR && arg0 == arg1)
1682 return ! only_const;
1683
1684 if (TREE_SIDE_EFFECTS (arg0) || TREE_SIDE_EFFECTS (arg1))
1685 return 0;
1686
1687 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1688 && TREE_CODE (arg0) == ADDR_EXPR
1689 && TREE_OPERAND (arg0, 0) == TREE_OPERAND (arg1, 0))
1690 return 1;
1691
1692 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1693 && TREE_CODE (arg0) == INTEGER_CST
1694 && TREE_INT_CST_LOW (arg0) == TREE_INT_CST_LOW (arg1)
1695 && TREE_INT_CST_HIGH (arg0) == TREE_INT_CST_HIGH (arg1))
1696 return 1;
1697
1698 /* Detect when real constants are equal. */
1699 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1700 && TREE_CODE (arg0) == REAL_CST)
1701 return !bcmp (&TREE_REAL_CST (arg0), &TREE_REAL_CST (arg1),
1702 sizeof (REAL_VALUE_TYPE));
1703
1704 if (only_const)
1705 return 0;
1706
1707 if (arg0 == arg1)
1708 return 1;
1709
1710 if (TREE_CODE (arg0) != TREE_CODE (arg1))
1711 return 0;
1712 /* This is needed for conversions and for COMPONENT_REF.
1713 Might as well play it safe and always test this. */
1714 if (TYPE_MODE (TREE_TYPE (arg0)) != TYPE_MODE (TREE_TYPE (arg1)))
1715 return 0;
1716
1717 switch (TREE_CODE_CLASS (TREE_CODE (arg0)))
1718 {
1719 case '1':
1720 /* Two conversions are equal only if signedness and modes match. */
1721 if ((TREE_CODE (arg0) == NOP_EXPR || TREE_CODE (arg0) == CONVERT_EXPR)
1722 && (TREE_UNSIGNED (TREE_TYPE (arg0))
1723 != TREE_UNSIGNED (TREE_TYPE (arg1))))
1724 return 0;
1725
1726 return operand_equal_p (TREE_OPERAND (arg0, 0),
1727 TREE_OPERAND (arg1, 0), 0);
1728
1729 case '<':
1730 case '2':
1731 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1732 TREE_OPERAND (arg1, 0), 0)
1733 && operand_equal_p (TREE_OPERAND (arg0, 1),
1734 TREE_OPERAND (arg1, 1), 0));
1735
1736 case 'r':
1737 switch (TREE_CODE (arg0))
1738 {
1739 case INDIRECT_REF:
1740 return operand_equal_p (TREE_OPERAND (arg0, 0),
1741 TREE_OPERAND (arg1, 0), 0);
1742
1743 case COMPONENT_REF:
1744 case ARRAY_REF:
1745 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1746 TREE_OPERAND (arg1, 0), 0)
1747 && operand_equal_p (TREE_OPERAND (arg0, 1),
1748 TREE_OPERAND (arg1, 1), 0));
1749
1750 case BIT_FIELD_REF:
1751 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1752 TREE_OPERAND (arg1, 0), 0)
1753 && operand_equal_p (TREE_OPERAND (arg0, 1),
1754 TREE_OPERAND (arg1, 1), 0)
1755 && operand_equal_p (TREE_OPERAND (arg0, 2),
1756 TREE_OPERAND (arg1, 2), 0));
1757 }
1758 break;
1759 }
1760
1761 return 0;
1762 }
1763 \f
1764 /* Similar to operand_equal_p, but see if ARG0 might have been made by
1765 shorten_compare from ARG1 when ARG1 was being compared with OTHER.
1766
1767 When in doubt, return 0. */
1768
1769 static int
1770 operand_equal_for_comparison_p (arg0, arg1, other)
1771 tree arg0, arg1;
1772 tree other;
1773 {
1774 int unsignedp1, unsignedpo;
1775 tree primarg1, primother;
1776 int correct_width;
1777
1778 if (operand_equal_p (arg0, arg1, 0))
1779 return 1;
1780
1781 if (TREE_CODE (TREE_TYPE (arg0)) != INTEGER_TYPE)
1782 return 0;
1783
1784 /* Duplicate what shorten_compare does to ARG1 and see if that gives the
1785 actual comparison operand, ARG0.
1786
1787 First throw away any conversions to wider types
1788 already present in the operands. */
1789
1790 primarg1 = get_narrower (arg1, &unsignedp1);
1791 primother = get_narrower (other, &unsignedpo);
1792
1793 correct_width = TYPE_PRECISION (TREE_TYPE (arg1));
1794 if (unsignedp1 == unsignedpo
1795 && TYPE_PRECISION (TREE_TYPE (primarg1)) < correct_width
1796 && TYPE_PRECISION (TREE_TYPE (primother)) < correct_width)
1797 {
1798 tree type = TREE_TYPE (arg0);
1799
1800 /* Make sure shorter operand is extended the right way
1801 to match the longer operand. */
1802 primarg1 = convert (signed_or_unsigned_type (unsignedp1,
1803 TREE_TYPE (primarg1)),
1804 primarg1);
1805
1806 if (operand_equal_p (arg0, convert (type, primarg1), 0))
1807 return 1;
1808 }
1809
1810 return 0;
1811 }
1812 \f
1813 /* See if ARG is an expression that is either a comparison or is performing
1814 arithmetic on comparisons. The comparisons must only be comparing
1815 two different values, which will be stored in *CVAL1 and *CVAL2; if
1816 they are non-zero it means that some operands have already been found.
1817 No variables may be used anywhere else in the expression except in the
1818 comparisons.
1819
1820 If this is true, return 1. Otherwise, return zero. */
1821
1822 static int
1823 twoval_comparison_p (arg, cval1, cval2)
1824 tree arg;
1825 tree *cval1, *cval2;
1826 {
1827 enum tree_code code = TREE_CODE (arg);
1828 char class = TREE_CODE_CLASS (code);
1829
1830 /* We can handle some of the 'e' cases here. */
1831 if (class == 'e'
1832 && (code == TRUTH_NOT_EXPR
1833 || (code == SAVE_EXPR && SAVE_EXPR_RTL (arg) == 0)))
1834 class = '1';
1835 else if (class == 'e'
1836 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR
1837 || code == COMPOUND_EXPR))
1838 class = '2';
1839
1840 switch (class)
1841 {
1842 case '1':
1843 return twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2);
1844
1845 case '2':
1846 return (twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2)
1847 && twoval_comparison_p (TREE_OPERAND (arg, 1), cval1, cval2));
1848
1849 case 'c':
1850 return 1;
1851
1852 case 'e':
1853 if (code == COND_EXPR)
1854 return (twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2)
1855 && twoval_comparison_p (TREE_OPERAND (arg, 1), cval1, cval2)
1856 && twoval_comparison_p (TREE_OPERAND (arg, 2),
1857 cval1, cval2));
1858 return 0;
1859
1860 case '<':
1861 /* First see if we can handle the first operand, then the second. For
1862 the second operand, we know *CVAL1 can't be zero. It must be that
1863 one side of the comparison is each of the values; test for the
1864 case where this isn't true by failing if the two operands
1865 are the same. */
1866
1867 if (operand_equal_p (TREE_OPERAND (arg, 0),
1868 TREE_OPERAND (arg, 1), 0))
1869 return 0;
1870
1871 if (*cval1 == 0)
1872 *cval1 = TREE_OPERAND (arg, 0);
1873 else if (operand_equal_p (*cval1, TREE_OPERAND (arg, 0), 0))
1874 ;
1875 else if (*cval2 == 0)
1876 *cval2 = TREE_OPERAND (arg, 0);
1877 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 0), 0))
1878 ;
1879 else
1880 return 0;
1881
1882 if (operand_equal_p (*cval1, TREE_OPERAND (arg, 1), 0))
1883 ;
1884 else if (*cval2 == 0)
1885 *cval2 = TREE_OPERAND (arg, 1);
1886 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 1), 0))
1887 ;
1888 else
1889 return 0;
1890
1891 return 1;
1892 }
1893
1894 return 0;
1895 }
1896 \f
1897 /* ARG is a tree that is known to contain just arithmetic operations and
1898 comparisons. Evaluate the operations in the tree substituting NEW0 for
1899 any occurrence of OLD0 as an operand of a comparison and likewise for
1900 NEW1 and OLD1. */
1901
1902 static tree
1903 eval_subst (arg, old0, new0, old1, new1)
1904 tree arg;
1905 tree old0, new0, old1, new1;
1906 {
1907 tree type = TREE_TYPE (arg);
1908 enum tree_code code = TREE_CODE (arg);
1909 char class = TREE_CODE_CLASS (code);
1910
1911 /* We can handle some of the 'e' cases here. */
1912 if (class == 'e' && code == TRUTH_NOT_EXPR)
1913 class = '1';
1914 else if (class == 'e'
1915 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR))
1916 class = '2';
1917
1918 switch (class)
1919 {
1920 case '1':
1921 return fold (build1 (code, type,
1922 eval_subst (TREE_OPERAND (arg, 0),
1923 old0, new0, old1, new1)));
1924
1925 case '2':
1926 return fold (build (code, type,
1927 eval_subst (TREE_OPERAND (arg, 0),
1928 old0, new0, old1, new1),
1929 eval_subst (TREE_OPERAND (arg, 1),
1930 old0, new0, old1, new1)));
1931
1932 case 'e':
1933 switch (code)
1934 {
1935 case SAVE_EXPR:
1936 return eval_subst (TREE_OPERAND (arg, 0), old0, new0, old1, new1);
1937
1938 case COMPOUND_EXPR:
1939 return eval_subst (TREE_OPERAND (arg, 1), old0, new0, old1, new1);
1940
1941 case COND_EXPR:
1942 return fold (build (code, type,
1943 eval_subst (TREE_OPERAND (arg, 0),
1944 old0, new0, old1, new1),
1945 eval_subst (TREE_OPERAND (arg, 1),
1946 old0, new0, old1, new1),
1947 eval_subst (TREE_OPERAND (arg, 2),
1948 old0, new0, old1, new1)));
1949 }
1950
1951 case '<':
1952 {
1953 tree arg0 = TREE_OPERAND (arg, 0);
1954 tree arg1 = TREE_OPERAND (arg, 1);
1955
1956 /* We need to check both for exact equality and tree equality. The
1957 former will be true if the operand has a side-effect. In that
1958 case, we know the operand occurred exactly once. */
1959
1960 if (arg0 == old0 || operand_equal_p (arg0, old0, 0))
1961 arg0 = new0;
1962 else if (arg0 == old1 || operand_equal_p (arg0, old1, 0))
1963 arg0 = new1;
1964
1965 if (arg1 == old0 || operand_equal_p (arg1, old0, 0))
1966 arg1 = new0;
1967 else if (arg1 == old1 || operand_equal_p (arg1, old1, 0))
1968 arg1 = new1;
1969
1970 return fold (build (code, type, arg0, arg1));
1971 }
1972 }
1973
1974 return arg;
1975 }
1976 \f
1977 /* Return a tree for the case when the result of an expression is RESULT
1978 converted to TYPE and OMITTED was previously an operand of the expression
1979 but is now not needed (e.g., we folded OMITTED * 0).
1980
1981 If OMITTED has side effects, we must evaluate it. Otherwise, just do
1982 the conversion of RESULT to TYPE. */
1983
1984 static tree
1985 omit_one_operand (type, result, omitted)
1986 tree type, result, omitted;
1987 {
1988 tree t = convert (type, result);
1989
1990 if (TREE_SIDE_EFFECTS (omitted))
1991 return build (COMPOUND_EXPR, type, omitted, t);
1992
1993 return t;
1994 }
1995 \f
1996 /* Return a simplified tree node for the truth-negation of ARG. This
1997 never alters ARG itself. We assume that ARG is an operation that
1998 returns a truth value (0 or 1). */
1999
2000 tree
2001 invert_truthvalue (arg)
2002 tree arg;
2003 {
2004 tree type = TREE_TYPE (arg);
2005 enum tree_code code = TREE_CODE (arg);
2006
2007 /* If this is a comparison, we can simply invert it, except for
2008 floating-point non-equality comparisons, in which case we just
2009 enclose a TRUTH_NOT_EXPR around what we have. */
2010
2011 if (TREE_CODE_CLASS (code) == '<')
2012 {
2013 if (TREE_CODE (TREE_TYPE (TREE_OPERAND (arg, 0))) == REAL_TYPE
2014 && code != NE_EXPR && code != EQ_EXPR)
2015 return build1 (TRUTH_NOT_EXPR, type, arg);
2016 else
2017 return build (invert_tree_comparison (code), type,
2018 TREE_OPERAND (arg, 0), TREE_OPERAND (arg, 1));
2019 }
2020
2021 switch (code)
2022 {
2023 case INTEGER_CST:
2024 return convert (type, build_int_2 (TREE_INT_CST_LOW (arg) == 0
2025 && TREE_INT_CST_HIGH (arg) == 0, 0));
2026
2027 case TRUTH_AND_EXPR:
2028 return build (TRUTH_OR_EXPR, type,
2029 invert_truthvalue (TREE_OPERAND (arg, 0)),
2030 invert_truthvalue (TREE_OPERAND (arg, 1)));
2031
2032 case TRUTH_OR_EXPR:
2033 return build (TRUTH_AND_EXPR, type,
2034 invert_truthvalue (TREE_OPERAND (arg, 0)),
2035 invert_truthvalue (TREE_OPERAND (arg, 1)));
2036
2037 case TRUTH_XOR_EXPR:
2038 /* Here we can invert either operand. We invert the first operand
2039 unless the second operand is a TRUTH_NOT_EXPR in which case our
2040 result is the XOR of the first operand with the inside of the
2041 negation of the second operand. */
2042
2043 if (TREE_CODE (TREE_OPERAND (arg, 1)) == TRUTH_NOT_EXPR)
2044 return build (TRUTH_XOR_EXPR, type, TREE_OPERAND (arg, 0),
2045 TREE_OPERAND (TREE_OPERAND (arg, 1), 0));
2046 else
2047 return build (TRUTH_XOR_EXPR, type,
2048 invert_truthvalue (TREE_OPERAND (arg, 0)),
2049 TREE_OPERAND (arg, 1));
2050
2051 case TRUTH_ANDIF_EXPR:
2052 return build (TRUTH_ORIF_EXPR, type,
2053 invert_truthvalue (TREE_OPERAND (arg, 0)),
2054 invert_truthvalue (TREE_OPERAND (arg, 1)));
2055
2056 case TRUTH_ORIF_EXPR:
2057 return build (TRUTH_ANDIF_EXPR, type,
2058 invert_truthvalue (TREE_OPERAND (arg, 0)),
2059 invert_truthvalue (TREE_OPERAND (arg, 1)));
2060
2061 case TRUTH_NOT_EXPR:
2062 return TREE_OPERAND (arg, 0);
2063
2064 case COND_EXPR:
2065 return build (COND_EXPR, type, TREE_OPERAND (arg, 0),
2066 invert_truthvalue (TREE_OPERAND (arg, 1)),
2067 invert_truthvalue (TREE_OPERAND (arg, 2)));
2068
2069 case COMPOUND_EXPR:
2070 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg, 0),
2071 invert_truthvalue (TREE_OPERAND (arg, 1)));
2072
2073 case NON_LVALUE_EXPR:
2074 return invert_truthvalue (TREE_OPERAND (arg, 0));
2075
2076 case NOP_EXPR:
2077 case CONVERT_EXPR:
2078 case FLOAT_EXPR:
2079 return build1 (TREE_CODE (arg), type,
2080 invert_truthvalue (TREE_OPERAND (arg, 0)));
2081
2082 case BIT_AND_EXPR:
2083 if (! integer_onep (TREE_OPERAND (arg, 1)))
2084 abort ();
2085 return build (EQ_EXPR, type, arg, convert (type, integer_zero_node));
2086 }
2087
2088 abort ();
2089 }
2090
2091 /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
2092 operands are another bit-wise operation with a common input. If so,
2093 distribute the bit operations to save an operation and possibly two if
2094 constants are involved. For example, convert
2095 (A | B) & (A | C) into A | (B & C)
2096 Further simplification will occur if B and C are constants.
2097
2098 If this optimization cannot be done, 0 will be returned. */
2099
2100 static tree
2101 distribute_bit_expr (code, type, arg0, arg1)
2102 enum tree_code code;
2103 tree type;
2104 tree arg0, arg1;
2105 {
2106 tree common;
2107 tree left, right;
2108
2109 if (TREE_CODE (arg0) != TREE_CODE (arg1)
2110 || TREE_CODE (arg0) == code
2111 || (TREE_CODE (arg0) != BIT_AND_EXPR
2112 && TREE_CODE (arg0) != BIT_IOR_EXPR))
2113 return 0;
2114
2115 if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), 0))
2116 {
2117 common = TREE_OPERAND (arg0, 0);
2118 left = TREE_OPERAND (arg0, 1);
2119 right = TREE_OPERAND (arg1, 1);
2120 }
2121 else if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 1), 0))
2122 {
2123 common = TREE_OPERAND (arg0, 0);
2124 left = TREE_OPERAND (arg0, 1);
2125 right = TREE_OPERAND (arg1, 0);
2126 }
2127 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 0), 0))
2128 {
2129 common = TREE_OPERAND (arg0, 1);
2130 left = TREE_OPERAND (arg0, 0);
2131 right = TREE_OPERAND (arg1, 1);
2132 }
2133 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 1), 0))
2134 {
2135 common = TREE_OPERAND (arg0, 1);
2136 left = TREE_OPERAND (arg0, 0);
2137 right = TREE_OPERAND (arg1, 0);
2138 }
2139 else
2140 return 0;
2141
2142 return fold (build (TREE_CODE (arg0), type, common,
2143 fold (build (code, type, left, right))));
2144 }
2145 \f
2146 /* Return a BIT_FIELD_REF of type TYPE to refer to BITSIZE bits of INNER
2147 starting at BITPOS. The field is unsigned if UNSIGNEDP is non-zero. */
2148
2149 static tree
2150 make_bit_field_ref (inner, type, bitsize, bitpos, unsignedp)
2151 tree inner;
2152 tree type;
2153 int bitsize, bitpos;
2154 int unsignedp;
2155 {
2156 tree result = build (BIT_FIELD_REF, type, inner,
2157 size_int (bitsize), size_int (bitpos));
2158
2159 TREE_UNSIGNED (result) = unsignedp;
2160
2161 return result;
2162 }
2163
2164 /* Optimize a bit-field compare.
2165
2166 There are two cases: First is a compare against a constant and the
2167 second is a comparison of two items where the fields are at the same
2168 bit position relative to the start of a chunk (byte, halfword, word)
2169 large enough to contain it. In these cases we can avoid the shift
2170 implicit in bitfield extractions.
2171
2172 For constants, we emit a compare of the shifted constant with the
2173 BIT_AND_EXPR of a mask and a byte, halfword, or word of the operand being
2174 compared. For two fields at the same position, we do the ANDs with the
2175 similar mask and compare the result of the ANDs.
2176
2177 CODE is the comparison code, known to be either NE_EXPR or EQ_EXPR.
2178 COMPARE_TYPE is the type of the comparison, and LHS and RHS
2179 are the left and right operands of the comparison, respectively.
2180
2181 If the optimization described above can be done, we return the resulting
2182 tree. Otherwise we return zero. */
2183
2184 static tree
2185 optimize_bit_field_compare (code, compare_type, lhs, rhs)
2186 enum tree_code code;
2187 tree compare_type;
2188 tree lhs, rhs;
2189 {
2190 int lbitpos, lbitsize, rbitpos, rbitsize;
2191 int lnbitpos, lnbitsize, rnbitpos, rnbitsize;
2192 tree type = TREE_TYPE (lhs);
2193 tree signed_type, unsigned_type;
2194 int const_p = TREE_CODE (rhs) == INTEGER_CST;
2195 enum machine_mode lmode, rmode, lnmode, rnmode;
2196 int lunsignedp, runsignedp;
2197 int lvolatilep = 0, rvolatilep = 0;
2198 tree linner, rinner;
2199 tree mask;
2200 tree offset;
2201
2202 /* Get all the information about the extractions being done. If the bit size
2203 if the same as the size of the underlying object, we aren't doing an
2204 extraction at all and so can do nothing. */
2205 linner = get_inner_reference (lhs, &lbitsize, &lbitpos, &offset, &lmode,
2206 &lunsignedp, &lvolatilep);
2207 if (lbitsize == GET_MODE_BITSIZE (lmode) || lbitsize < 0
2208 || offset != 0)
2209 return 0;
2210
2211 if (!const_p)
2212 {
2213 /* If this is not a constant, we can only do something if bit positions,
2214 sizes, and signedness are the same. */
2215 rinner = get_inner_reference (rhs, &rbitsize, &rbitpos, &offset,
2216 &rmode, &runsignedp, &rvolatilep);
2217
2218 if (lbitpos != rbitpos || lbitsize != rbitsize
2219 || lunsignedp != runsignedp || offset != 0)
2220 return 0;
2221 }
2222
2223 /* See if we can find a mode to refer to this field. We should be able to,
2224 but fail if we can't. */
2225 lnmode = get_best_mode (lbitsize, lbitpos,
2226 TYPE_ALIGN (TREE_TYPE (linner)), word_mode,
2227 lvolatilep);
2228 if (lnmode == VOIDmode)
2229 return 0;
2230
2231 /* Set signed and unsigned types of the precision of this mode for the
2232 shifts below. */
2233 signed_type = type_for_mode (lnmode, 0);
2234 unsigned_type = type_for_mode (lnmode, 1);
2235
2236 if (! const_p)
2237 {
2238 rnmode = get_best_mode (rbitsize, rbitpos,
2239 TYPE_ALIGN (TREE_TYPE (rinner)), word_mode,
2240 rvolatilep);
2241 if (rnmode == VOIDmode)
2242 return 0;
2243 }
2244
2245 /* Compute the bit position and size for the new reference and our offset
2246 within it. If the new reference is the same size as the original, we
2247 won't optimize anything, so return zero. */
2248 lnbitsize = GET_MODE_BITSIZE (lnmode);
2249 lnbitpos = lbitpos & ~ (lnbitsize - 1);
2250 lbitpos -= lnbitpos;
2251 if (lnbitsize == lbitsize)
2252 return 0;
2253
2254 if (! const_p)
2255 {
2256 rnbitsize = GET_MODE_BITSIZE (rnmode);
2257 rnbitpos = rbitpos & ~ (rnbitsize - 1);
2258 rbitpos -= rnbitpos;
2259 if (rnbitsize == rbitsize)
2260 return 0;
2261 }
2262
2263 #if BYTES_BIG_ENDIAN
2264 lbitpos = lnbitsize - lbitsize - lbitpos;
2265 #endif
2266
2267 /* Make the mask to be used against the extracted field. */
2268 mask = convert (unsigned_type, build_int_2 (~0, ~0));
2269 mask = const_binop (LSHIFT_EXPR, mask, size_int (lnbitsize - lbitsize), 0);
2270 mask = const_binop (RSHIFT_EXPR, mask,
2271 size_int (lnbitsize - lbitsize - lbitpos), 0);
2272
2273 if (! const_p)
2274 /* If not comparing with constant, just rework the comparison
2275 and return. */
2276 return build (code, compare_type,
2277 build (BIT_AND_EXPR, unsigned_type,
2278 make_bit_field_ref (linner, unsigned_type,
2279 lnbitsize, lnbitpos, 1),
2280 mask),
2281 build (BIT_AND_EXPR, unsigned_type,
2282 make_bit_field_ref (rinner, unsigned_type,
2283 rnbitsize, rnbitpos, 1),
2284 mask));
2285
2286 /* Otherwise, we are handling the constant case. See if the constant is too
2287 big for the field. Warn and return a tree of for 0 (false) if so. We do
2288 this not only for its own sake, but to avoid having to test for this
2289 error case below. If we didn't, we might generate wrong code.
2290
2291 For unsigned fields, the constant shifted right by the field length should
2292 be all zero. For signed fields, the high-order bits should agree with
2293 the sign bit. */
2294
2295 if (lunsignedp)
2296 {
2297 if (! integer_zerop (const_binop (RSHIFT_EXPR,
2298 convert (unsigned_type, rhs),
2299 size_int (lbitsize), 0)))
2300 {
2301 warning ("comparison is always %s due to width of bitfield",
2302 code == NE_EXPR ? "one" : "zero");
2303 return convert (compare_type,
2304 (code == NE_EXPR
2305 ? integer_one_node : integer_zero_node));
2306 }
2307 }
2308 else
2309 {
2310 tree tem = const_binop (RSHIFT_EXPR, convert (signed_type, rhs),
2311 size_int (lbitsize - 1), 0);
2312 if (! integer_zerop (tem) && ! integer_all_onesp (tem))
2313 {
2314 warning ("comparison is always %s due to width of bitfield",
2315 code == NE_EXPR ? "one" : "zero");
2316 return convert (compare_type,
2317 (code == NE_EXPR
2318 ? integer_one_node : integer_zero_node));
2319 }
2320 }
2321
2322 /* Single-bit compares should always be against zero. */
2323 if (lbitsize == 1 && ! integer_zerop (rhs))
2324 {
2325 code = code == EQ_EXPR ? NE_EXPR : EQ_EXPR;
2326 rhs = convert (type, integer_zero_node);
2327 }
2328
2329 /* Make a new bitfield reference, shift the constant over the
2330 appropriate number of bits and mask it with the computed mask
2331 (in case this was a signed field). If we changed it, make a new one. */
2332 lhs = make_bit_field_ref (linner, unsigned_type, lnbitsize, lnbitpos, 1);
2333
2334 rhs = fold (const_binop (BIT_AND_EXPR,
2335 const_binop (LSHIFT_EXPR,
2336 convert (unsigned_type, rhs),
2337 size_int (lbitpos)),
2338 mask, 0));
2339
2340 return build (code, compare_type,
2341 build (BIT_AND_EXPR, unsigned_type, lhs, mask),
2342 rhs);
2343 }
2344 \f
2345 /* Subroutine for fold_truthop: decode a field reference.
2346
2347 If EXP is a comparison reference, we return the innermost reference.
2348
2349 *PBITSIZE is set to the number of bits in the reference, *PBITPOS is
2350 set to the starting bit number.
2351
2352 If the innermost field can be completely contained in a mode-sized
2353 unit, *PMODE is set to that mode. Otherwise, it is set to VOIDmode.
2354
2355 *PVOLATILEP is set to 1 if the any expression encountered is volatile;
2356 otherwise it is not changed.
2357
2358 *PUNSIGNEDP is set to the signedness of the field.
2359
2360 *PMASK is set to the mask used. This is either contained in a
2361 BIT_AND_EXPR or derived from the width of the field.
2362
2363 Return 0 if this is not a component reference or is one that we can't
2364 do anything with. */
2365
2366 static tree
2367 decode_field_reference (exp, pbitsize, pbitpos, pmode, punsignedp,
2368 pvolatilep, pmask)
2369 tree exp;
2370 int *pbitsize, *pbitpos;
2371 enum machine_mode *pmode;
2372 int *punsignedp, *pvolatilep;
2373 tree *pmask;
2374 {
2375 tree mask = 0;
2376 tree inner;
2377 tree offset;
2378
2379 STRIP_NOPS (exp);
2380
2381 if (TREE_CODE (exp) == BIT_AND_EXPR)
2382 {
2383 mask = TREE_OPERAND (exp, 1);
2384 exp = TREE_OPERAND (exp, 0);
2385 STRIP_NOPS (exp); STRIP_NOPS (mask);
2386 if (TREE_CODE (mask) != INTEGER_CST)
2387 return 0;
2388 }
2389
2390 if (TREE_CODE (exp) != COMPONENT_REF && TREE_CODE (exp) != ARRAY_REF
2391 && TREE_CODE (exp) != BIT_FIELD_REF)
2392 return 0;
2393
2394 inner = get_inner_reference (exp, pbitsize, pbitpos, &offset, pmode,
2395 punsignedp, pvolatilep);
2396 if (*pbitsize < 0 || offset != 0)
2397 return 0;
2398
2399 if (mask == 0)
2400 {
2401 tree unsigned_type = type_for_size (*pbitsize, 1);
2402 int precision = TYPE_PRECISION (unsigned_type);
2403
2404 mask = convert (unsigned_type, build_int_2 (~0, ~0));
2405 mask = const_binop (LSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
2406 mask = const_binop (RSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
2407 }
2408
2409 *pmask = mask;
2410 return inner;
2411 }
2412
2413 /* Return non-zero if MASK represents a mask of SIZE ones in the low-order
2414 bit positions. */
2415
2416 static int
2417 all_ones_mask_p (mask, size)
2418 tree mask;
2419 int size;
2420 {
2421 tree type = TREE_TYPE (mask);
2422 int precision = TYPE_PRECISION (type);
2423
2424 return
2425 operand_equal_p (mask,
2426 const_binop (RSHIFT_EXPR,
2427 const_binop (LSHIFT_EXPR,
2428 convert (signed_type (type),
2429 build_int_2 (~0, ~0)),
2430 size_int (precision - size), 0),
2431 size_int (precision - size), 0),
2432 0);
2433 }
2434
2435 /* Subroutine for fold_truthop: determine if an operand is simple enough
2436 to be evaluated unconditionally. */
2437
2438 #ifdef __GNUC__
2439 __inline
2440 #endif
2441 static int
2442 simple_operand_p (exp)
2443 tree exp;
2444 {
2445 /* Strip any conversions that don't change the machine mode. */
2446 while ((TREE_CODE (exp) == NOP_EXPR
2447 || TREE_CODE (exp) == CONVERT_EXPR)
2448 && (TYPE_MODE (TREE_TYPE (exp))
2449 == TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)))))
2450 exp = TREE_OPERAND (exp, 0);
2451
2452 return (TREE_CODE_CLASS (TREE_CODE (exp)) == 'c'
2453 || (TREE_CODE_CLASS (TREE_CODE (exp)) == 'd'
2454 && ! TREE_ADDRESSABLE (exp)
2455 && ! TREE_THIS_VOLATILE (exp)
2456 && ! DECL_NONLOCAL (exp)
2457 /* Don't regard global variables as simple. They may be
2458 allocated in ways unknown to the compiler (shared memory,
2459 #pragma weak, etc). */
2460 && ! TREE_PUBLIC (exp)
2461 && ! DECL_EXTERNAL (exp)
2462 /* Loading a static variable is unduly expensive, but global
2463 registers aren't expensive. */
2464 && (! TREE_STATIC (exp) || DECL_REGISTER (exp))));
2465 }
2466 \f
2467 /* Subroutine for fold_truthop: try to optimize a range test.
2468
2469 For example, "i >= 2 && i =< 9" can be done as "(unsigned) (i - 2) <= 7".
2470
2471 JCODE is the logical combination of the two terms. It is TRUTH_AND_EXPR
2472 (representing TRUTH_ANDIF_EXPR and TRUTH_AND_EXPR) or TRUTH_OR_EXPR
2473 (representing TRUTH_ORIF_EXPR and TRUTH_OR_EXPR). TYPE is the type of
2474 the result.
2475
2476 VAR is the value being tested. LO_CODE and HI_CODE are the comparison
2477 operators comparing VAR to LO_CST and HI_CST. LO_CST is known to be no
2478 larger than HI_CST (they may be equal).
2479
2480 We return the simplified tree or 0 if no optimization is possible. */
2481
2482 tree
2483 range_test (jcode, type, lo_code, hi_code, var, lo_cst, hi_cst)
2484 enum tree_code jcode, lo_code, hi_code;
2485 tree type, var, lo_cst, hi_cst;
2486 {
2487 tree utype;
2488 enum tree_code rcode;
2489
2490 /* See if this is a range test and normalize the constant terms. */
2491
2492 if (jcode == TRUTH_AND_EXPR)
2493 {
2494 switch (lo_code)
2495 {
2496 case NE_EXPR:
2497 /* See if we have VAR != CST && VAR != CST+1. */
2498 if (! (hi_code == NE_EXPR
2499 && TREE_INT_CST_LOW (hi_cst) - TREE_INT_CST_LOW (lo_cst) == 1
2500 && tree_int_cst_equal (integer_one_node,
2501 const_binop (MINUS_EXPR,
2502 hi_cst, lo_cst, 0))))
2503 return 0;
2504
2505 rcode = GT_EXPR;
2506 break;
2507
2508 case GT_EXPR:
2509 case GE_EXPR:
2510 if (hi_code == LT_EXPR)
2511 hi_cst = const_binop (MINUS_EXPR, hi_cst, integer_one_node, 0);
2512 else if (hi_code != LE_EXPR)
2513 return 0;
2514
2515 if (lo_code == GT_EXPR)
2516 lo_cst = const_binop (PLUS_EXPR, lo_cst, integer_one_node, 0);
2517
2518 /* We now have VAR >= LO_CST && VAR <= HI_CST. */
2519 rcode = LE_EXPR;
2520 break;
2521
2522 default:
2523 return 0;
2524 }
2525 }
2526 else
2527 {
2528 switch (lo_code)
2529 {
2530 case EQ_EXPR:
2531 /* See if we have VAR == CST || VAR == CST+1. */
2532 if (! (hi_code == EQ_EXPR
2533 && TREE_INT_CST_LOW (hi_cst) - TREE_INT_CST_LOW (lo_cst) == 1
2534 && tree_int_cst_equal (integer_one_node,
2535 const_binop (MINUS_EXPR,
2536 hi_cst, lo_cst, 0))))
2537 return 0;
2538
2539 rcode = LE_EXPR;
2540 break;
2541
2542 case LE_EXPR:
2543 case LT_EXPR:
2544 if (hi_code == GE_EXPR)
2545 hi_cst = const_binop (MINUS_EXPR, hi_cst, integer_one_node, 0);
2546 else if (hi_code != GT_EXPR)
2547 return 0;
2548
2549 if (lo_code == LE_EXPR)
2550 lo_cst = const_binop (PLUS_EXPR, lo_cst, integer_one_node, 0);
2551
2552 /* We now have VAR < LO_CST || VAR > HI_CST. */
2553 rcode = GT_EXPR;
2554 break;
2555
2556 default:
2557 return 0;
2558 }
2559 }
2560
2561 /* When normalizing, it is possible to both increment the smaller constant
2562 and decrement the larger constant. See if they are still ordered. */
2563 if (tree_int_cst_lt (hi_cst, lo_cst))
2564 return 0;
2565
2566 /* Fail if VAR isn't an integer. */
2567 utype = TREE_TYPE (var);
2568 if (TREE_CODE (utype) != INTEGER_TYPE
2569 && TREE_CODE (utype) != ENUMERAL_TYPE)
2570 return 0;
2571
2572 /* The range test is invalid if subtracting the two constants results
2573 in overflow. This can happen in traditional mode. */
2574 if (! int_fits_type_p (hi_cst, TREE_TYPE (var))
2575 || ! int_fits_type_p (lo_cst, TREE_TYPE (var)))
2576 return 0;
2577
2578 if (! TREE_UNSIGNED (utype))
2579 {
2580 utype = unsigned_type (utype);
2581 var = convert (utype, var);
2582 lo_cst = convert (utype, lo_cst);
2583 hi_cst = convert (utype, hi_cst);
2584 }
2585
2586 return fold (convert (type,
2587 build (rcode, utype,
2588 build (MINUS_EXPR, utype, var, lo_cst),
2589 const_binop (MINUS_EXPR, hi_cst, lo_cst, 0))));
2590 }
2591 \f
2592 /* Find ways of folding logical expressions of LHS and RHS:
2593 Try to merge two comparisons to the same innermost item.
2594 Look for range tests like "ch >= '0' && ch <= '9'".
2595 Look for combinations of simple terms on machines with expensive branches
2596 and evaluate the RHS unconditionally.
2597
2598 For example, if we have p->a == 2 && p->b == 4 and we can make an
2599 object large enough to span both A and B, we can do this with a comparison
2600 against the object ANDed with the a mask.
2601
2602 If we have p->a == q->a && p->b == q->b, we may be able to use bit masking
2603 operations to do this with one comparison.
2604
2605 We check for both normal comparisons and the BIT_AND_EXPRs made this by
2606 function and the one above.
2607
2608 CODE is the logical operation being done. It can be TRUTH_ANDIF_EXPR,
2609 TRUTH_AND_EXPR, TRUTH_ORIF_EXPR, or TRUTH_OR_EXPR.
2610
2611 TRUTH_TYPE is the type of the logical operand and LHS and RHS are its
2612 two operands.
2613
2614 We return the simplified tree or 0 if no optimization is possible. */
2615
2616 static tree
2617 fold_truthop (code, truth_type, lhs, rhs)
2618 enum tree_code code;
2619 tree truth_type, lhs, rhs;
2620 {
2621 /* If this is the "or" of two comparisons, we can do something if we
2622 the comparisons are NE_EXPR. If this is the "and", we can do something
2623 if the comparisons are EQ_EXPR. I.e.,
2624 (a->b == 2 && a->c == 4) can become (a->new == NEW).
2625
2626 WANTED_CODE is this operation code. For single bit fields, we can
2627 convert EQ_EXPR to NE_EXPR so we need not reject the "wrong"
2628 comparison for one-bit fields. */
2629
2630 enum tree_code wanted_code;
2631 enum tree_code lcode, rcode;
2632 tree ll_arg, lr_arg, rl_arg, rr_arg;
2633 tree ll_inner, lr_inner, rl_inner, rr_inner;
2634 int ll_bitsize, ll_bitpos, lr_bitsize, lr_bitpos;
2635 int rl_bitsize, rl_bitpos, rr_bitsize, rr_bitpos;
2636 int xll_bitpos, xlr_bitpos, xrl_bitpos, xrr_bitpos;
2637 int lnbitsize, lnbitpos, rnbitsize, rnbitpos;
2638 int ll_unsignedp, lr_unsignedp, rl_unsignedp, rr_unsignedp;
2639 enum machine_mode ll_mode, lr_mode, rl_mode, rr_mode;
2640 enum machine_mode lnmode, rnmode;
2641 tree ll_mask, lr_mask, rl_mask, rr_mask;
2642 tree l_const, r_const;
2643 tree type, result;
2644 int first_bit, end_bit;
2645 int volatilep;
2646
2647 /* Start by getting the comparison codes and seeing if this looks like
2648 a range test. Fail if anything is volatile. */
2649
2650 if (TREE_SIDE_EFFECTS (lhs)
2651 || TREE_SIDE_EFFECTS (rhs))
2652 return 0;
2653
2654 lcode = TREE_CODE (lhs);
2655 rcode = TREE_CODE (rhs);
2656
2657 if (TREE_CODE_CLASS (lcode) != '<'
2658 || TREE_CODE_CLASS (rcode) != '<')
2659 return 0;
2660
2661 code = ((code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR)
2662 ? TRUTH_AND_EXPR : TRUTH_OR_EXPR);
2663
2664 ll_arg = TREE_OPERAND (lhs, 0);
2665 lr_arg = TREE_OPERAND (lhs, 1);
2666 rl_arg = TREE_OPERAND (rhs, 0);
2667 rr_arg = TREE_OPERAND (rhs, 1);
2668
2669 if (TREE_CODE (lr_arg) == INTEGER_CST
2670 && TREE_CODE (rr_arg) == INTEGER_CST
2671 && operand_equal_p (ll_arg, rl_arg, 0))
2672 {
2673 if (tree_int_cst_lt (lr_arg, rr_arg))
2674 result = range_test (code, truth_type, lcode, rcode,
2675 ll_arg, lr_arg, rr_arg);
2676 else
2677 result = range_test (code, truth_type, rcode, lcode,
2678 ll_arg, rr_arg, lr_arg);
2679
2680 /* If this isn't a range test, it also isn't a comparison that
2681 can be merged. However, it wins to evaluate the RHS unconditionally
2682 on machines with expensive branches. */
2683
2684 if (result == 0 && BRANCH_COST >= 2)
2685 {
2686 if (TREE_CODE (ll_arg) != VAR_DECL
2687 && TREE_CODE (ll_arg) != PARM_DECL)
2688 {
2689 /* Avoid evaluating the variable part twice. */
2690 ll_arg = save_expr (ll_arg);
2691 lhs = build (lcode, TREE_TYPE (lhs), ll_arg, lr_arg);
2692 rhs = build (rcode, TREE_TYPE (rhs), ll_arg, rr_arg);
2693 }
2694 return build (code, truth_type, lhs, rhs);
2695 }
2696 return result;
2697 }
2698
2699 /* If the RHS can be evaluated unconditionally and its operands are
2700 simple, it wins to evaluate the RHS unconditionally on machines
2701 with expensive branches. In this case, this isn't a comparison
2702 that can be merged. */
2703
2704 /* @@ I'm not sure it wins on the m88110 to do this if the comparisons
2705 are with zero (tmw). */
2706
2707 if (BRANCH_COST >= 2
2708 && TREE_CODE (TREE_TYPE (rhs)) == INTEGER_TYPE
2709 && simple_operand_p (rl_arg)
2710 && simple_operand_p (rr_arg))
2711 return build (code, truth_type, lhs, rhs);
2712
2713 /* See if the comparisons can be merged. Then get all the parameters for
2714 each side. */
2715
2716 if ((lcode != EQ_EXPR && lcode != NE_EXPR)
2717 || (rcode != EQ_EXPR && rcode != NE_EXPR))
2718 return 0;
2719
2720 volatilep = 0;
2721 ll_inner = decode_field_reference (ll_arg,
2722 &ll_bitsize, &ll_bitpos, &ll_mode,
2723 &ll_unsignedp, &volatilep, &ll_mask);
2724 lr_inner = decode_field_reference (lr_arg,
2725 &lr_bitsize, &lr_bitpos, &lr_mode,
2726 &lr_unsignedp, &volatilep, &lr_mask);
2727 rl_inner = decode_field_reference (rl_arg,
2728 &rl_bitsize, &rl_bitpos, &rl_mode,
2729 &rl_unsignedp, &volatilep, &rl_mask);
2730 rr_inner = decode_field_reference (rr_arg,
2731 &rr_bitsize, &rr_bitpos, &rr_mode,
2732 &rr_unsignedp, &volatilep, &rr_mask);
2733
2734 /* It must be true that the inner operation on the lhs of each
2735 comparison must be the same if we are to be able to do anything.
2736 Then see if we have constants. If not, the same must be true for
2737 the rhs's. */
2738 if (volatilep || ll_inner == 0 || rl_inner == 0
2739 || ! operand_equal_p (ll_inner, rl_inner, 0))
2740 return 0;
2741
2742 if (TREE_CODE (lr_arg) == INTEGER_CST
2743 && TREE_CODE (rr_arg) == INTEGER_CST)
2744 l_const = lr_arg, r_const = rr_arg;
2745 else if (lr_inner == 0 || rr_inner == 0
2746 || ! operand_equal_p (lr_inner, rr_inner, 0))
2747 return 0;
2748 else
2749 l_const = r_const = 0;
2750
2751 /* If either comparison code is not correct for our logical operation,
2752 fail. However, we can convert a one-bit comparison against zero into
2753 the opposite comparison against that bit being set in the field. */
2754
2755 wanted_code = (code == TRUTH_AND_EXPR ? EQ_EXPR : NE_EXPR);
2756 if (lcode != wanted_code)
2757 {
2758 if (l_const && integer_zerop (l_const) && integer_pow2p (ll_mask))
2759 l_const = ll_mask;
2760 else
2761 return 0;
2762 }
2763
2764 if (rcode != wanted_code)
2765 {
2766 if (r_const && integer_zerop (r_const) && integer_pow2p (rl_mask))
2767 r_const = rl_mask;
2768 else
2769 return 0;
2770 }
2771
2772 /* See if we can find a mode that contains both fields being compared on
2773 the left. If we can't, fail. Otherwise, update all constants and masks
2774 to be relative to a field of that size. */
2775 first_bit = MIN (ll_bitpos, rl_bitpos);
2776 end_bit = MAX (ll_bitpos + ll_bitsize, rl_bitpos + rl_bitsize);
2777 lnmode = get_best_mode (end_bit - first_bit, first_bit,
2778 TYPE_ALIGN (TREE_TYPE (ll_inner)), word_mode,
2779 volatilep);
2780 if (lnmode == VOIDmode)
2781 return 0;
2782
2783 lnbitsize = GET_MODE_BITSIZE (lnmode);
2784 lnbitpos = first_bit & ~ (lnbitsize - 1);
2785 type = type_for_size (lnbitsize, 1);
2786 xll_bitpos = ll_bitpos - lnbitpos, xrl_bitpos = rl_bitpos - lnbitpos;
2787
2788 #if BYTES_BIG_ENDIAN
2789 xll_bitpos = lnbitsize - xll_bitpos - ll_bitsize;
2790 xrl_bitpos = lnbitsize - xrl_bitpos - rl_bitsize;
2791 #endif
2792
2793 ll_mask = const_binop (LSHIFT_EXPR, convert (type, ll_mask),
2794 size_int (xll_bitpos), 0);
2795 rl_mask = const_binop (LSHIFT_EXPR, convert (type, rl_mask),
2796 size_int (xrl_bitpos), 0);
2797
2798 /* Make sure the constants are interpreted as unsigned, so we
2799 don't have sign bits outside the range of their type. */
2800
2801 if (l_const)
2802 {
2803 l_const = convert (unsigned_type (TREE_TYPE (l_const)), l_const);
2804 l_const = const_binop (LSHIFT_EXPR, convert (type, l_const),
2805 size_int (xll_bitpos), 0);
2806 }
2807 if (r_const)
2808 {
2809 r_const = convert (unsigned_type (TREE_TYPE (r_const)), r_const);
2810 r_const = const_binop (LSHIFT_EXPR, convert (type, r_const),
2811 size_int (xrl_bitpos), 0);
2812 }
2813
2814 /* If the right sides are not constant, do the same for it. Also,
2815 disallow this optimization if a size or signedness mismatch occurs
2816 between the left and right sides. */
2817 if (l_const == 0)
2818 {
2819 if (ll_bitsize != lr_bitsize || rl_bitsize != rr_bitsize
2820 || ll_unsignedp != lr_unsignedp || rl_unsignedp != rr_unsignedp
2821 /* Make sure the two fields on the right
2822 correspond to the left without being swapped. */
2823 || ll_bitpos - rl_bitpos != lr_bitpos - rr_bitpos)
2824 return 0;
2825
2826 first_bit = MIN (lr_bitpos, rr_bitpos);
2827 end_bit = MAX (lr_bitpos + lr_bitsize, rr_bitpos + rr_bitsize);
2828 rnmode = get_best_mode (end_bit - first_bit, first_bit,
2829 TYPE_ALIGN (TREE_TYPE (lr_inner)), word_mode,
2830 volatilep);
2831 if (rnmode == VOIDmode)
2832 return 0;
2833
2834 rnbitsize = GET_MODE_BITSIZE (rnmode);
2835 rnbitpos = first_bit & ~ (rnbitsize - 1);
2836 xlr_bitpos = lr_bitpos - rnbitpos, xrr_bitpos = rr_bitpos - rnbitpos;
2837
2838 #if BYTES_BIG_ENDIAN
2839 xlr_bitpos = rnbitsize - xlr_bitpos - lr_bitsize;
2840 xrr_bitpos = rnbitsize - xrr_bitpos - rr_bitsize;
2841 #endif
2842
2843 lr_mask = const_binop (LSHIFT_EXPR, convert (type, lr_mask),
2844 size_int (xlr_bitpos), 0);
2845 rr_mask = const_binop (LSHIFT_EXPR, convert (type, rr_mask),
2846 size_int (xrr_bitpos), 0);
2847
2848 /* Make a mask that corresponds to both fields being compared.
2849 Do this for both items being compared. If the masks agree,
2850 we can do this by masking both and comparing the masked
2851 results. */
2852 ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0);
2853 lr_mask = const_binop (BIT_IOR_EXPR, lr_mask, rr_mask, 0);
2854 if (operand_equal_p (ll_mask, lr_mask, 0) && lnbitsize == rnbitsize)
2855 {
2856 lhs = make_bit_field_ref (ll_inner, type, lnbitsize, lnbitpos,
2857 ll_unsignedp || rl_unsignedp);
2858 rhs = make_bit_field_ref (lr_inner, type, rnbitsize, rnbitpos,
2859 lr_unsignedp || rr_unsignedp);
2860 if (! all_ones_mask_p (ll_mask, lnbitsize))
2861 {
2862 lhs = build (BIT_AND_EXPR, type, lhs, ll_mask);
2863 rhs = build (BIT_AND_EXPR, type, rhs, ll_mask);
2864 }
2865 return build (wanted_code, truth_type, lhs, rhs);
2866 }
2867
2868 /* There is still another way we can do something: If both pairs of
2869 fields being compared are adjacent, we may be able to make a wider
2870 field containing them both. */
2871 if ((ll_bitsize + ll_bitpos == rl_bitpos
2872 && lr_bitsize + lr_bitpos == rr_bitpos)
2873 || (ll_bitpos == rl_bitpos + rl_bitsize
2874 && lr_bitpos == rr_bitpos + rr_bitsize))
2875 return build (wanted_code, truth_type,
2876 make_bit_field_ref (ll_inner, type,
2877 ll_bitsize + rl_bitsize,
2878 MIN (ll_bitpos, rl_bitpos),
2879 ll_unsignedp),
2880 make_bit_field_ref (lr_inner, type,
2881 lr_bitsize + rr_bitsize,
2882 MIN (lr_bitpos, rr_bitpos),
2883 lr_unsignedp));
2884
2885 return 0;
2886 }
2887
2888 /* Handle the case of comparisons with constants. If there is something in
2889 common between the masks, those bits of the constants must be the same.
2890 If not, the condition is always false. Test for this to avoid generating
2891 incorrect code below. */
2892 result = const_binop (BIT_AND_EXPR, ll_mask, rl_mask, 0);
2893 if (! integer_zerop (result)
2894 && simple_cst_equal (const_binop (BIT_AND_EXPR, result, l_const, 0),
2895 const_binop (BIT_AND_EXPR, result, r_const, 0)) != 1)
2896 {
2897 if (wanted_code == NE_EXPR)
2898 {
2899 warning ("`or' of unmatched not-equal tests is always 1");
2900 return convert (truth_type, integer_one_node);
2901 }
2902 else
2903 {
2904 warning ("`and' of mutually exclusive equal-tests is always zero");
2905 return convert (truth_type, integer_zero_node);
2906 }
2907 }
2908
2909 /* Construct the expression we will return. First get the component
2910 reference we will make. Unless the mask is all ones the width of
2911 that field, perform the mask operation. Then compare with the
2912 merged constant. */
2913 result = make_bit_field_ref (ll_inner, type, lnbitsize, lnbitpos,
2914 ll_unsignedp || rl_unsignedp);
2915
2916 ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0);
2917 if (! all_ones_mask_p (ll_mask, lnbitsize))
2918 result = build (BIT_AND_EXPR, type, result, ll_mask);
2919
2920 return build (wanted_code, truth_type, result,
2921 const_binop (BIT_IOR_EXPR, l_const, r_const, 0));
2922 }
2923 \f
2924 /* Perform constant folding and related simplification of EXPR.
2925 The related simplifications include x*1 => x, x*0 => 0, etc.,
2926 and application of the associative law.
2927 NOP_EXPR conversions may be removed freely (as long as we
2928 are careful not to change the C type of the overall expression)
2929 We cannot simplify through a CONVERT_EXPR, FIX_EXPR or FLOAT_EXPR,
2930 but we can constant-fold them if they have constant operands. */
2931
2932 tree
2933 fold (expr)
2934 tree expr;
2935 {
2936 register tree t = expr;
2937 tree t1 = NULL_TREE;
2938 tree tem;
2939 tree type = TREE_TYPE (expr);
2940 register tree arg0, arg1;
2941 register enum tree_code code = TREE_CODE (t);
2942 register int kind;
2943 int invert;
2944
2945 /* WINS will be nonzero when the switch is done
2946 if all operands are constant. */
2947
2948 int wins = 1;
2949
2950 /* Return right away if already constant. */
2951 if (TREE_CONSTANT (t))
2952 {
2953 if (code == CONST_DECL)
2954 return DECL_INITIAL (t);
2955 return t;
2956 }
2957
2958 kind = TREE_CODE_CLASS (code);
2959 if (code == NOP_EXPR || code == FLOAT_EXPR || code == CONVERT_EXPR)
2960 {
2961 /* Special case for conversion ops that can have fixed point args. */
2962 arg0 = TREE_OPERAND (t, 0);
2963
2964 /* Don't use STRIP_NOPS, because signedness of argument type matters. */
2965 if (arg0 != 0)
2966 STRIP_TYPE_NOPS (arg0);
2967
2968 if (arg0 != 0 && TREE_CODE (arg0) != INTEGER_CST
2969 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
2970 && TREE_CODE (arg0) != REAL_CST
2971 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
2972 )
2973 /* Note that TREE_CONSTANT isn't enough:
2974 static var addresses are constant but we can't
2975 do arithmetic on them. */
2976 wins = 0;
2977 }
2978 else if (kind == 'e' || kind == '<'
2979 || kind == '1' || kind == '2' || kind == 'r')
2980 {
2981 register int len = tree_code_length[(int) code];
2982 register int i;
2983 for (i = 0; i < len; i++)
2984 {
2985 tree op = TREE_OPERAND (t, i);
2986
2987 if (op == 0)
2988 continue; /* Valid for CALL_EXPR, at least. */
2989
2990 /* Strip any conversions that don't change the mode. */
2991 STRIP_NOPS (op);
2992
2993 if (TREE_CODE (op) != INTEGER_CST
2994 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
2995 && TREE_CODE (op) != REAL_CST
2996 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
2997 )
2998 /* Note that TREE_CONSTANT isn't enough:
2999 static var addresses are constant but we can't
3000 do arithmetic on them. */
3001 wins = 0;
3002
3003 if (i == 0)
3004 arg0 = op;
3005 else if (i == 1)
3006 arg1 = op;
3007 }
3008 }
3009
3010 /* If this is a commutative operation, and ARG0 is a constant, move it
3011 to ARG1 to reduce the number of tests below. */
3012 if ((code == PLUS_EXPR || code == MULT_EXPR || code == MIN_EXPR
3013 || code == MAX_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR
3014 || code == BIT_AND_EXPR)
3015 && (TREE_CODE (arg0) == INTEGER_CST || TREE_CODE (arg0) == REAL_CST))
3016 {
3017 tem = arg0; arg0 = arg1; arg1 = tem;
3018
3019 tem = TREE_OPERAND (t, 0); TREE_OPERAND (t, 0) = TREE_OPERAND (t, 1);
3020 TREE_OPERAND (t, 1) = tem;
3021 }
3022
3023 /* Now WINS is set as described above,
3024 ARG0 is the first operand of EXPR,
3025 and ARG1 is the second operand (if it has more than one operand).
3026
3027 First check for cases where an arithmetic operation is applied to a
3028 compound, conditional, or comparison operation. Push the arithmetic
3029 operation inside the compound or conditional to see if any folding
3030 can then be done. Convert comparison to conditional for this purpose.
3031 The also optimizes non-constant cases that used to be done in
3032 expand_expr. */
3033 if (TREE_CODE_CLASS (code) == '1')
3034 {
3035 if (TREE_CODE (arg0) == COMPOUND_EXPR)
3036 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3037 fold (build1 (code, type, TREE_OPERAND (arg0, 1))));
3038 else if (TREE_CODE (arg0) == COND_EXPR)
3039 {
3040 t = fold (build (COND_EXPR, type, TREE_OPERAND (arg0, 0),
3041 fold (build1 (code, type, TREE_OPERAND (arg0, 1))),
3042 fold (build1 (code, type, TREE_OPERAND (arg0, 2)))));
3043
3044 /* If this was a conversion, and all we did was to move into
3045 inside the COND_EXPR, bring it back out. Then return so we
3046 don't get into an infinite recursion loop taking the conversion
3047 out and then back in. */
3048
3049 if ((code == NOP_EXPR || code == CONVERT_EXPR
3050 || code == NON_LVALUE_EXPR)
3051 && TREE_CODE (t) == COND_EXPR
3052 && TREE_CODE (TREE_OPERAND (t, 1)) == code
3053 && TREE_CODE (TREE_OPERAND (t, 2)) == code
3054 && (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0))
3055 == TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 2), 0))))
3056 t = build1 (code, type,
3057 build (COND_EXPR,
3058 TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0)),
3059 TREE_OPERAND (t, 0),
3060 TREE_OPERAND (TREE_OPERAND (t, 1), 0),
3061 TREE_OPERAND (TREE_OPERAND (t, 2), 0)));
3062 return t;
3063 }
3064 else if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<')
3065 return fold (build (COND_EXPR, type, arg0,
3066 fold (build1 (code, type, integer_one_node)),
3067 fold (build1 (code, type, integer_zero_node))));
3068 }
3069 else if (TREE_CODE_CLASS (code) == '2')
3070 {
3071 if (TREE_CODE (arg1) == COMPOUND_EXPR)
3072 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0),
3073 fold (build (code, type, arg0, TREE_OPERAND (arg1, 1))));
3074 else if (TREE_CODE (arg1) == COND_EXPR
3075 || TREE_CODE_CLASS (TREE_CODE (arg1)) == '<')
3076 {
3077 tree test, true_value, false_value;
3078
3079 if (TREE_CODE (arg1) == COND_EXPR)
3080 {
3081 test = TREE_OPERAND (arg1, 0);
3082 true_value = TREE_OPERAND (arg1, 1);
3083 false_value = TREE_OPERAND (arg1, 2);
3084 }
3085 else
3086 {
3087 test = arg1;
3088 true_value = integer_one_node;
3089 false_value = integer_zero_node;
3090 }
3091
3092 if (TREE_CODE (arg0) != VAR_DECL && TREE_CODE (arg0) != PARM_DECL)
3093 arg0 = save_expr (arg0);
3094 test = fold (build (COND_EXPR, type, test,
3095 fold (build (code, type, arg0, true_value)),
3096 fold (build (code, type, arg0, false_value))));
3097 if (TREE_CODE (arg0) == SAVE_EXPR)
3098 return build (COMPOUND_EXPR, type,
3099 convert (void_type_node, arg0), test);
3100 else
3101 return convert (type, test);
3102 }
3103
3104 else if (TREE_CODE (arg0) == COMPOUND_EXPR)
3105 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3106 fold (build (code, type, TREE_OPERAND (arg0, 1), arg1)));
3107 else if (TREE_CODE (arg0) == COND_EXPR
3108 || TREE_CODE_CLASS (TREE_CODE (arg0)) == '<')
3109 {
3110 tree test, true_value, false_value;
3111
3112 if (TREE_CODE (arg0) == COND_EXPR)
3113 {
3114 test = TREE_OPERAND (arg0, 0);
3115 true_value = TREE_OPERAND (arg0, 1);
3116 false_value = TREE_OPERAND (arg0, 2);
3117 }
3118 else
3119 {
3120 test = arg0;
3121 true_value = integer_one_node;
3122 false_value = integer_zero_node;
3123 }
3124
3125 if (TREE_CODE (arg1) != VAR_DECL && TREE_CODE (arg1) != PARM_DECL)
3126 arg1 = save_expr (arg1);
3127 test = fold (build (COND_EXPR, type, test,
3128 fold (build (code, type, true_value, arg1)),
3129 fold (build (code, type, false_value, arg1))));
3130 if (TREE_CODE (arg1) == SAVE_EXPR)
3131 return build (COMPOUND_EXPR, type,
3132 convert (void_type_node, arg1), test);
3133 else
3134 return convert (type, test);
3135 }
3136 }
3137 else if (TREE_CODE_CLASS (code) == '<'
3138 && TREE_CODE (arg0) == COMPOUND_EXPR)
3139 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3140 fold (build (code, type, TREE_OPERAND (arg0, 1), arg1)));
3141 else if (TREE_CODE_CLASS (code) == '<'
3142 && TREE_CODE (arg1) == COMPOUND_EXPR)
3143 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0),
3144 fold (build (code, type, arg0, TREE_OPERAND (arg1, 1))));
3145
3146 switch (code)
3147 {
3148 case INTEGER_CST:
3149 case REAL_CST:
3150 case STRING_CST:
3151 case COMPLEX_CST:
3152 case CONSTRUCTOR:
3153 return t;
3154
3155 case CONST_DECL:
3156 return fold (DECL_INITIAL (t));
3157
3158 case NOP_EXPR:
3159 case FLOAT_EXPR:
3160 case CONVERT_EXPR:
3161 case FIX_TRUNC_EXPR:
3162 /* Other kinds of FIX are not handled properly by fold_convert. */
3163 /* Two conversions in a row are not needed unless:
3164 - the intermediate type is narrower than both initial and final, or
3165 - the intermediate type and innermost type differ in signedness,
3166 and the outermost type is wider than the intermediate, or
3167 - the initial type is a pointer type and the precisions of the
3168 intermediate and final types differ, or
3169 - the final type is a pointer type and the precisions of the
3170 initial and intermediate types differ. */
3171 if ((TREE_CODE (TREE_OPERAND (t, 0)) == NOP_EXPR
3172 || TREE_CODE (TREE_OPERAND (t, 0)) == CONVERT_EXPR)
3173 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3174 > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3175 ||
3176 TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3177 > TYPE_PRECISION (TREE_TYPE (t)))
3178 && ! ((TREE_CODE (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3179 == INTEGER_TYPE)
3180 && (TREE_CODE (TREE_TYPE (TREE_OPERAND (t, 0)))
3181 == INTEGER_TYPE)
3182 && (TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (t, 0)))
3183 != TREE_UNSIGNED (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3184 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3185 < TYPE_PRECISION (TREE_TYPE (t))))
3186 && ((TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (t, 0)))
3187 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3188 > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))))
3189 ==
3190 (TREE_UNSIGNED (TREE_TYPE (t))
3191 && (TYPE_PRECISION (TREE_TYPE (t))
3192 > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0))))))
3193 && ! ((TREE_CODE (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3194 == POINTER_TYPE)
3195 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3196 != TYPE_PRECISION (TREE_TYPE (t))))
3197 && ! (TREE_CODE (TREE_TYPE (t)) == POINTER_TYPE
3198 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3199 != TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0))))))
3200 return convert (TREE_TYPE (t), TREE_OPERAND (TREE_OPERAND (t, 0), 0));
3201
3202 if (TREE_CODE (TREE_OPERAND (t, 0)) == MODIFY_EXPR
3203 && TREE_CONSTANT (TREE_OPERAND (TREE_OPERAND (t, 0), 1))
3204 /* Detect assigning a bitfield. */
3205 && !(TREE_CODE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)) == COMPONENT_REF
3206 && DECL_BIT_FIELD (TREE_OPERAND (TREE_OPERAND (TREE_OPERAND (t, 0), 0), 1))))
3207 {
3208 /* Don't leave an assignment inside a conversion
3209 unless assigning a bitfield. */
3210 tree prev = TREE_OPERAND (t, 0);
3211 TREE_OPERAND (t, 0) = TREE_OPERAND (prev, 1);
3212 /* First do the assignment, then return converted constant. */
3213 t = build (COMPOUND_EXPR, TREE_TYPE (t), prev, fold (t));
3214 TREE_USED (t) = 1;
3215 return t;
3216 }
3217 if (!wins)
3218 {
3219 TREE_CONSTANT (t) = TREE_CONSTANT (arg0);
3220 return t;
3221 }
3222 return fold_convert (t, arg0);
3223
3224 #if 0 /* This loses on &"foo"[0]. */
3225 case ARRAY_REF:
3226 {
3227 int i;
3228
3229 /* Fold an expression like: "foo"[2] */
3230 if (TREE_CODE (arg0) == STRING_CST
3231 && TREE_CODE (arg1) == INTEGER_CST
3232 && !TREE_INT_CST_HIGH (arg1)
3233 && (i = TREE_INT_CST_LOW (arg1)) < TREE_STRING_LENGTH (arg0))
3234 {
3235 t = build_int_2 (TREE_STRING_POINTER (arg0)[i], 0);
3236 TREE_TYPE (t) = TREE_TYPE (TREE_TYPE (arg0));
3237 force_fit_type (t);
3238 }
3239 }
3240 return t;
3241 #endif /* 0 */
3242
3243 case RANGE_EXPR:
3244 TREE_CONSTANT (t) = wins;
3245 return t;
3246
3247 case NEGATE_EXPR:
3248 if (wins)
3249 {
3250 if (TREE_CODE (arg0) == INTEGER_CST)
3251 {
3252 HOST_WIDE_INT low, high;
3253 int overflow = neg_double (TREE_INT_CST_LOW (arg0),
3254 TREE_INT_CST_HIGH (arg0),
3255 &low, &high);
3256 t = build_int_2 (low, high);
3257 TREE_CONSTANT_OVERFLOW (t)
3258 = overflow | TREE_CONSTANT_OVERFLOW (arg0);
3259 TREE_TYPE (t) = type;
3260 force_fit_type (t);
3261 }
3262 else if (TREE_CODE (arg0) == REAL_CST)
3263 t = build_real (type, REAL_VALUE_NEGATE (TREE_REAL_CST (arg0)));
3264 TREE_TYPE (t) = type;
3265 }
3266 else if (TREE_CODE (arg0) == NEGATE_EXPR)
3267 return TREE_OPERAND (arg0, 0);
3268
3269 /* Convert - (a - b) to (b - a) for non-floating-point. */
3270 else if (TREE_CODE (arg0) == MINUS_EXPR && TREE_CODE (type) != REAL_TYPE)
3271 return build (MINUS_EXPR, type, TREE_OPERAND (arg0, 1),
3272 TREE_OPERAND (arg0, 0));
3273
3274 return t;
3275
3276 case ABS_EXPR:
3277 if (wins)
3278 {
3279 if (TREE_CODE (arg0) == INTEGER_CST)
3280 {
3281 if (! TREE_UNSIGNED (type)
3282 && TREE_INT_CST_HIGH (arg0) < 0)
3283 {
3284 HOST_WIDE_INT low, high;
3285 int overflow = neg_double (TREE_INT_CST_LOW (arg0),
3286 TREE_INT_CST_HIGH (arg0),
3287 &low, &high);
3288 t = build_int_2 (low, high);
3289 TREE_TYPE (t) = type;
3290 force_fit_type (t, overflow);
3291 }
3292 }
3293 else if (TREE_CODE (arg0) == REAL_CST)
3294 {
3295 if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (arg0)))
3296 t = build_real (type,
3297 REAL_VALUE_NEGATE (TREE_REAL_CST (arg0)));
3298 }
3299 TREE_TYPE (t) = type;
3300 }
3301 else if (TREE_CODE (arg0) == ABS_EXPR || TREE_CODE (arg0) == NEGATE_EXPR)
3302 return build1 (ABS_EXPR, type, TREE_OPERAND (arg0, 0));
3303 return t;
3304
3305 case BIT_NOT_EXPR:
3306 if (wins)
3307 {
3308 if (TREE_CODE (arg0) == INTEGER_CST)
3309 t = build_int_2 (~ TREE_INT_CST_LOW (arg0),
3310 ~ TREE_INT_CST_HIGH (arg0));
3311 TREE_TYPE (t) = type;
3312 force_fit_type (t);
3313 TREE_CONSTANT_OVERFLOW (t) = TREE_CONSTANT_OVERFLOW (arg0);
3314 }
3315 else if (TREE_CODE (arg0) == BIT_NOT_EXPR)
3316 return TREE_OPERAND (arg0, 0);
3317 return t;
3318
3319 case PLUS_EXPR:
3320 /* A + (-B) -> A - B */
3321 if (TREE_CODE (arg1) == NEGATE_EXPR)
3322 return fold (build (MINUS_EXPR, type, arg0, TREE_OPERAND (arg1, 0)));
3323 else if (TREE_CODE (type) != REAL_TYPE)
3324 {
3325 if (integer_zerop (arg1))
3326 return non_lvalue (convert (type, arg0));
3327
3328 /* If we are adding two BIT_AND_EXPR's, both of which are and'ing
3329 with a constant, and the two constants have no bits in common,
3330 we should treat this as a BIT_IOR_EXPR since this may produce more
3331 simplifications. */
3332 if (TREE_CODE (arg0) == BIT_AND_EXPR
3333 && TREE_CODE (arg1) == BIT_AND_EXPR
3334 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
3335 && TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST
3336 && integer_zerop (const_binop (BIT_AND_EXPR,
3337 TREE_OPERAND (arg0, 1),
3338 TREE_OPERAND (arg1, 1), 0)))
3339 {
3340 code = BIT_IOR_EXPR;
3341 goto bit_ior;
3342 }
3343 }
3344 /* In IEEE floating point, x+0 may not equal x. */
3345 else if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3346 && real_zerop (arg1))
3347 return non_lvalue (convert (type, arg0));
3348 associate:
3349 /* In most languages, can't associate operations on floats
3350 through parentheses. Rather than remember where the parentheses
3351 were, we don't associate floats at all. It shouldn't matter much. */
3352 if (TREE_CODE (type) == REAL_TYPE)
3353 goto binary;
3354 /* The varsign == -1 cases happen only for addition and subtraction.
3355 It says that the arg that was split was really CON minus VAR.
3356 The rest of the code applies to all associative operations. */
3357 if (!wins)
3358 {
3359 tree var, con;
3360 int varsign;
3361
3362 if (split_tree (arg0, code, &var, &con, &varsign))
3363 {
3364 if (varsign == -1)
3365 {
3366 /* EXPR is (CON-VAR) +- ARG1. */
3367 /* If it is + and VAR==ARG1, return just CONST. */
3368 if (code == PLUS_EXPR && operand_equal_p (var, arg1, 0))
3369 return convert (TREE_TYPE (t), con);
3370
3371 /* Otherwise return (CON +- ARG1) - VAR. */
3372 TREE_SET_CODE (t, MINUS_EXPR);
3373 TREE_OPERAND (t, 1) = var;
3374 TREE_OPERAND (t, 0)
3375 = fold (build (code, TREE_TYPE (t), con, arg1));
3376 }
3377 else
3378 {
3379 /* EXPR is (VAR+CON) +- ARG1. */
3380 /* If it is - and VAR==ARG1, return just CONST. */
3381 if (code == MINUS_EXPR && operand_equal_p (var, arg1, 0))
3382 return convert (TREE_TYPE (t), con);
3383
3384 /* Otherwise return VAR +- (ARG1 +- CON). */
3385 TREE_OPERAND (t, 1) = tem
3386 = fold (build (code, TREE_TYPE (t), arg1, con));
3387 TREE_OPERAND (t, 0) = var;
3388 if (integer_zerop (tem)
3389 && (code == PLUS_EXPR || code == MINUS_EXPR))
3390 return convert (type, var);
3391 /* If we have x +/- (c - d) [c an explicit integer]
3392 change it to x -/+ (d - c) since if d is relocatable
3393 then the latter can be a single immediate insn
3394 and the former cannot. */
3395 if (TREE_CODE (tem) == MINUS_EXPR
3396 && TREE_CODE (TREE_OPERAND (tem, 0)) == INTEGER_CST)
3397 {
3398 tree tem1 = TREE_OPERAND (tem, 1);
3399 TREE_OPERAND (tem, 1) = TREE_OPERAND (tem, 0);
3400 TREE_OPERAND (tem, 0) = tem1;
3401 TREE_SET_CODE (t,
3402 (code == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR));
3403 }
3404 }
3405 return t;
3406 }
3407
3408 if (split_tree (arg1, code, &var, &con, &varsign))
3409 {
3410 /* EXPR is ARG0 +- (CON +- VAR). */
3411 if (varsign == -1)
3412 TREE_SET_CODE (t,
3413 (code == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR));
3414 if (TREE_CODE (t) == MINUS_EXPR
3415 && operand_equal_p (var, arg0, 0))
3416 {
3417 /* If VAR and ARG0 cancel, return just CON or -CON. */
3418 if (code == PLUS_EXPR)
3419 return convert (TREE_TYPE (t), con);
3420 return fold (build1 (NEGATE_EXPR, TREE_TYPE (t),
3421 convert (TREE_TYPE (t), con)));
3422 }
3423 TREE_OPERAND (t, 0)
3424 = fold (build (code, TREE_TYPE (t), arg0, con));
3425 TREE_OPERAND (t, 1) = var;
3426 if (integer_zerop (TREE_OPERAND (t, 0))
3427 && TREE_CODE (t) == PLUS_EXPR)
3428 return convert (TREE_TYPE (t), var);
3429 return t;
3430 }
3431 }
3432 binary:
3433 #if defined (REAL_IS_NOT_DOUBLE) && ! defined (REAL_ARITHMETIC)
3434 if (TREE_CODE (arg1) == REAL_CST)
3435 return t;
3436 #endif /* REAL_IS_NOT_DOUBLE, and no REAL_ARITHMETIC */
3437 if (wins)
3438 t1 = const_binop (code, arg0, arg1, 0);
3439 if (t1 != NULL_TREE)
3440 {
3441 /* The return value should always have
3442 the same type as the original expression. */
3443 TREE_TYPE (t1) = TREE_TYPE (t);
3444 return t1;
3445 }
3446 return t;
3447
3448 case MINUS_EXPR:
3449 if (TREE_CODE (type) != REAL_TYPE)
3450 {
3451 if (! wins && integer_zerop (arg0))
3452 return build1 (NEGATE_EXPR, type, arg1);
3453 if (integer_zerop (arg1))
3454 return non_lvalue (convert (type, arg0));
3455 }
3456 /* Convert A - (-B) to A + B. */
3457 else if (TREE_CODE (arg1) == NEGATE_EXPR)
3458 return fold (build (PLUS_EXPR, type, arg0, TREE_OPERAND (arg1, 0)));
3459 else if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT)
3460 {
3461 /* Except with IEEE floating point, 0-x equals -x. */
3462 if (! wins && real_zerop (arg0))
3463 return build1 (NEGATE_EXPR, type, arg1);
3464 /* Except with IEEE floating point, x-0 equals x. */
3465 if (real_zerop (arg1))
3466 return non_lvalue (convert (type, arg0));
3467
3468 /* Fold &x - &x. This can happen from &x.foo - &x.
3469 This is unsafe for certain floats even in non-IEEE formats.
3470 In IEEE, it is unsafe because it does wrong for NaNs.
3471 Also note that operand_equal_p is always false if an operand
3472 is volatile. */
3473
3474 if (operand_equal_p (arg0, arg1,
3475 TREE_CODE (type) == REAL_TYPE))
3476 return convert (type, integer_zero_node);
3477 }
3478 goto associate;
3479
3480 case MULT_EXPR:
3481 if (TREE_CODE (type) != REAL_TYPE)
3482 {
3483 if (integer_zerop (arg1))
3484 return omit_one_operand (type, arg1, arg0);
3485 if (integer_onep (arg1))
3486 return non_lvalue (convert (type, arg0));
3487
3488 /* (a * (1 << b)) is (a << b) */
3489 if (TREE_CODE (arg1) == LSHIFT_EXPR
3490 && integer_onep (TREE_OPERAND (arg1, 0)))
3491 return fold (build (LSHIFT_EXPR, type, arg0,
3492 TREE_OPERAND (arg1, 1)));
3493 if (TREE_CODE (arg0) == LSHIFT_EXPR
3494 && integer_onep (TREE_OPERAND (arg0, 0)))
3495 return fold (build (LSHIFT_EXPR, type, arg1,
3496 TREE_OPERAND (arg0, 1)));
3497 }
3498 else
3499 {
3500 /* x*0 is 0, except for IEEE floating point. */
3501 if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3502 && real_zerop (arg1))
3503 return omit_one_operand (type, arg1, arg0);
3504 /* In IEEE floating point, x*1 is not equivalent to x for snans.
3505 However, ANSI says we can drop signals,
3506 so we can do this anyway. */
3507 if (real_onep (arg1))
3508 return non_lvalue (convert (type, arg0));
3509 /* x*2 is x+x */
3510 if (! wins && real_twop (arg1))
3511 {
3512 tree arg = save_expr (arg0);
3513 return build (PLUS_EXPR, type, arg, arg);
3514 }
3515 }
3516 goto associate;
3517
3518 case BIT_IOR_EXPR:
3519 bit_ior:
3520 if (integer_all_onesp (arg1))
3521 return omit_one_operand (type, arg1, arg0);
3522 if (integer_zerop (arg1))
3523 return non_lvalue (convert (type, arg0));
3524 t1 = distribute_bit_expr (code, type, arg0, arg1);
3525 if (t1 != NULL_TREE)
3526 return t1;
3527
3528 /* (a << C1) | (a >> C2) if A is unsigned and C1+C2 is the size of A
3529 is a rotate of A by C1 bits. */
3530
3531 if ((TREE_CODE (arg0) == RSHIFT_EXPR
3532 || TREE_CODE (arg0) == LSHIFT_EXPR)
3533 && (TREE_CODE (arg1) == RSHIFT_EXPR
3534 || TREE_CODE (arg1) == LSHIFT_EXPR)
3535 && TREE_CODE (arg0) != TREE_CODE (arg1)
3536 && operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1,0), 0)
3537 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0)))
3538 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
3539 && TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST
3540 && TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == 0
3541 && TREE_INT_CST_HIGH (TREE_OPERAND (arg1, 1)) == 0
3542 && ((TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1))
3543 + TREE_INT_CST_LOW (TREE_OPERAND (arg1, 1)))
3544 == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)))))
3545 return build (LROTATE_EXPR, type, TREE_OPERAND (arg0, 0),
3546 TREE_CODE (arg0) == LSHIFT_EXPR
3547 ? TREE_OPERAND (arg0, 1) : TREE_OPERAND (arg1, 1));
3548
3549 goto associate;
3550
3551 case BIT_XOR_EXPR:
3552 if (integer_zerop (arg1))
3553 return non_lvalue (convert (type, arg0));
3554 if (integer_all_onesp (arg1))
3555 return fold (build1 (BIT_NOT_EXPR, type, arg0));
3556 goto associate;
3557
3558 case BIT_AND_EXPR:
3559 bit_and:
3560 if (integer_all_onesp (arg1))
3561 return non_lvalue (convert (type, arg0));
3562 if (integer_zerop (arg1))
3563 return omit_one_operand (type, arg1, arg0);
3564 t1 = distribute_bit_expr (code, type, arg0, arg1);
3565 if (t1 != NULL_TREE)
3566 return t1;
3567 /* Simplify ((int)c & 0x377) into (int)c, if c is unsigned char. */
3568 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == NOP_EXPR
3569 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg1, 0))))
3570 {
3571 int prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg1, 0)));
3572 if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT
3573 && (~TREE_INT_CST_LOW (arg0)
3574 & (((HOST_WIDE_INT) 1 << prec) - 1)) == 0)
3575 return build1 (NOP_EXPR, type, TREE_OPERAND (arg1, 0));
3576 }
3577 if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) == NOP_EXPR
3578 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0))))
3579 {
3580 int prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)));
3581 if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT
3582 && (~TREE_INT_CST_LOW (arg1)
3583 & (((HOST_WIDE_INT) 1 << prec) - 1)) == 0)
3584 return build1 (NOP_EXPR, type, TREE_OPERAND (arg0, 0));
3585 }
3586 goto associate;
3587
3588 case BIT_ANDTC_EXPR:
3589 if (integer_all_onesp (arg0))
3590 return non_lvalue (convert (type, arg1));
3591 if (integer_zerop (arg0))
3592 return omit_one_operand (type, arg0, arg1);
3593 if (TREE_CODE (arg1) == INTEGER_CST)
3594 {
3595 arg1 = fold (build1 (BIT_NOT_EXPR, type, arg1));
3596 code = BIT_AND_EXPR;
3597 goto bit_and;
3598 }
3599 goto binary;
3600
3601 case TRUNC_DIV_EXPR:
3602 case ROUND_DIV_EXPR:
3603 case FLOOR_DIV_EXPR:
3604 case CEIL_DIV_EXPR:
3605 case EXACT_DIV_EXPR:
3606 case RDIV_EXPR:
3607 if (integer_onep (arg1))
3608 return non_lvalue (convert (type, arg0));
3609 if (integer_zerop (arg1))
3610 return t;
3611
3612 /* If we have ((a * C1) / C2) and C1 % C2 == 0, we can replace this with
3613 (a * (C1/C2). Also look for when we have a SAVE_EXPR in
3614 between. */
3615 if (TREE_CODE (arg1) == INTEGER_CST
3616 && TREE_INT_CST_LOW (arg1) > 0 && TREE_INT_CST_HIGH (arg1) == 0
3617 && TREE_CODE (arg0) == MULT_EXPR
3618 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
3619 && TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1)) > 0
3620 && TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == 0
3621 && 0 == (TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1))
3622 % TREE_INT_CST_LOW (arg1)))
3623 {
3624 tree new_op
3625 = build_int_2 (TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1))
3626 / TREE_INT_CST_LOW (arg1), 0);
3627
3628 TREE_TYPE (new_op) = type;
3629 return build (MULT_EXPR, type, TREE_OPERAND (arg0, 0), new_op);
3630 }
3631
3632 else if (TREE_CODE (arg1) == INTEGER_CST
3633 && TREE_INT_CST_LOW (arg1) > 0 && TREE_INT_CST_HIGH (arg1) == 0
3634 && TREE_CODE (arg0) == SAVE_EXPR
3635 && TREE_CODE (TREE_OPERAND (arg0, 0)) == MULT_EXPR
3636 && (TREE_CODE (TREE_OPERAND (TREE_OPERAND (arg0, 0), 1))
3637 == INTEGER_CST)
3638 && (TREE_INT_CST_LOW (TREE_OPERAND (TREE_OPERAND (arg0, 0), 1))
3639 > 0)
3640 && (TREE_INT_CST_HIGH (TREE_OPERAND (TREE_OPERAND (arg0, 0), 1))
3641 == 0)
3642 && (TREE_INT_CST_LOW (TREE_OPERAND (TREE_OPERAND (arg0, 0), 1))
3643 % TREE_INT_CST_LOW (arg1)) == 0)
3644 {
3645 tree new_op
3646 = build_int_2 (TREE_INT_CST_LOW (TREE_OPERAND (TREE_OPERAND (arg0, 0), 1))
3647 / TREE_INT_CST_LOW (arg1), 0);
3648
3649 TREE_TYPE (new_op) = type;
3650 return build (MULT_EXPR, type,
3651 TREE_OPERAND (TREE_OPERAND (arg0, 0), 0), new_op);
3652 }
3653
3654 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3655 #ifndef REAL_INFINITY
3656 if (TREE_CODE (arg1) == REAL_CST
3657 && real_zerop (arg1))
3658 return t;
3659 #endif
3660 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3661
3662 goto binary;
3663
3664 case CEIL_MOD_EXPR:
3665 case FLOOR_MOD_EXPR:
3666 case ROUND_MOD_EXPR:
3667 case TRUNC_MOD_EXPR:
3668 if (integer_onep (arg1))
3669 return omit_one_operand (type, integer_zero_node, arg0);
3670 if (integer_zerop (arg1))
3671 return t;
3672 goto binary;
3673
3674 case LSHIFT_EXPR:
3675 case RSHIFT_EXPR:
3676 case LROTATE_EXPR:
3677 case RROTATE_EXPR:
3678 if (integer_zerop (arg1))
3679 return non_lvalue (convert (type, arg0));
3680 /* Since negative shift count is not well-defined,
3681 don't try to compute it in the compiler. */
3682 if (tree_int_cst_lt (arg1, integer_zero_node))
3683 return t;
3684 goto binary;
3685
3686 case MIN_EXPR:
3687 if (operand_equal_p (arg0, arg1, 0))
3688 return arg0;
3689 if (TREE_CODE (type) == INTEGER_TYPE
3690 && operand_equal_p (arg1, TYPE_MIN_VALUE (type), 1))
3691 return omit_one_operand (type, arg1, arg0);
3692 goto associate;
3693
3694 case MAX_EXPR:
3695 if (operand_equal_p (arg0, arg1, 0))
3696 return arg0;
3697 if (TREE_CODE (type) == INTEGER_TYPE
3698 && operand_equal_p (arg1, TYPE_MAX_VALUE (type), 1))
3699 return omit_one_operand (type, arg1, arg0);
3700 goto associate;
3701
3702 case TRUTH_NOT_EXPR:
3703 /* Note that the operand of this must be an int
3704 and its values must be 0 or 1.
3705 ("true" is a fixed value perhaps depending on the language,
3706 but we don't handle values other than 1 correctly yet.) */
3707 return invert_truthvalue (arg0);
3708
3709 case TRUTH_ANDIF_EXPR:
3710 /* Note that the operands of this must be ints
3711 and their values must be 0 or 1.
3712 ("true" is a fixed value perhaps depending on the language.) */
3713 /* If first arg is constant zero, return it. */
3714 if (integer_zerop (arg0))
3715 return arg0;
3716 case TRUTH_AND_EXPR:
3717 /* If either arg is constant true, drop it. */
3718 if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
3719 return non_lvalue (arg1);
3720 if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1))
3721 return non_lvalue (arg0);
3722 /* If second arg is constant zero, result is zero, but first arg
3723 must be evaluated. */
3724 if (integer_zerop (arg1))
3725 return omit_one_operand (type, arg1, arg0);
3726
3727 truth_andor:
3728 /* Check for the possibility of merging component references. If our
3729 lhs is another similar operation, try to merge its rhs with our
3730 rhs. Then try to merge our lhs and rhs. */
3731 if (optimize)
3732 {
3733 if (TREE_CODE (arg0) == code)
3734 {
3735 tem = fold_truthop (code, type,
3736 TREE_OPERAND (arg0, 1), arg1);
3737 if (tem)
3738 return fold (build (code, type, TREE_OPERAND (arg0, 0), tem));
3739 }
3740
3741 tem = fold_truthop (code, type, arg0, arg1);
3742 if (tem)
3743 return tem;
3744 }
3745 return t;
3746
3747 case TRUTH_ORIF_EXPR:
3748 /* Note that the operands of this must be ints
3749 and their values must be 0 or true.
3750 ("true" is a fixed value perhaps depending on the language.) */
3751 /* If first arg is constant true, return it. */
3752 if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
3753 return arg0;
3754 case TRUTH_OR_EXPR:
3755 /* If either arg is constant zero, drop it. */
3756 if (TREE_CODE (arg0) == INTEGER_CST && integer_zerop (arg0))
3757 return non_lvalue (arg1);
3758 if (TREE_CODE (arg1) == INTEGER_CST && integer_zerop (arg1))
3759 return non_lvalue (arg0);
3760 /* If second arg is constant true, result is true, but we must
3761 evaluate first arg. */
3762 if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1))
3763 return omit_one_operand (type, arg1, arg0);
3764 goto truth_andor;
3765
3766 case TRUTH_XOR_EXPR:
3767 /* If either arg is constant zero, drop it. */
3768 if (integer_zerop (arg0))
3769 return non_lvalue (arg1);
3770 if (integer_zerop (arg1))
3771 return non_lvalue (arg0);
3772 /* If either arg is constant true, this is a logical inversion. */
3773 if (integer_onep (arg0))
3774 return non_lvalue (invert_truthvalue (arg1));
3775 if (integer_onep (arg1))
3776 return non_lvalue (invert_truthvalue (arg0));
3777 break;
3778
3779 case EQ_EXPR:
3780 case NE_EXPR:
3781 case LT_EXPR:
3782 case GT_EXPR:
3783 case LE_EXPR:
3784 case GE_EXPR:
3785 /* If one arg is a constant integer, put it last. */
3786 if (TREE_CODE (arg0) == INTEGER_CST
3787 && TREE_CODE (arg1) != INTEGER_CST)
3788 {
3789 TREE_OPERAND (t, 0) = arg1;
3790 TREE_OPERAND (t, 1) = arg0;
3791 arg0 = TREE_OPERAND (t, 0);
3792 arg1 = TREE_OPERAND (t, 1);
3793 code = swap_tree_comparison (code);
3794 TREE_SET_CODE (t, code);
3795 }
3796
3797 /* Convert foo++ == CONST into ++foo == CONST + INCR.
3798 First, see if one arg is constant; find the constant arg
3799 and the other one. */
3800 {
3801 tree constop = 0, varop;
3802 tree *constoploc;
3803
3804 if (TREE_CONSTANT (arg1))
3805 constoploc = &TREE_OPERAND (t, 1), constop = arg1, varop = arg0;
3806 if (TREE_CONSTANT (arg0))
3807 constoploc = &TREE_OPERAND (t, 0), constop = arg0, varop = arg1;
3808
3809 if (constop && TREE_CODE (varop) == POSTINCREMENT_EXPR)
3810 {
3811 /* This optimization is invalid for ordered comparisons
3812 if CONST+INCR overflows or if foo+incr might overflow.
3813 This optimization is invalid for floating point due to rounding.
3814 For pointer types we assume overflow doesn't happen. */
3815 if (TREE_CODE (TREE_TYPE (varop)) == POINTER_TYPE
3816 || (TREE_CODE (TREE_TYPE (varop)) != REAL_TYPE
3817 && (code == EQ_EXPR || code == NE_EXPR)))
3818 {
3819 tree newconst
3820 = fold (build (PLUS_EXPR, TREE_TYPE (varop),
3821 constop, TREE_OPERAND (varop, 1)));
3822 TREE_SET_CODE (varop, PREINCREMENT_EXPR);
3823 *constoploc = newconst;
3824 return t;
3825 }
3826 }
3827 else if (constop && TREE_CODE (varop) == POSTDECREMENT_EXPR)
3828 {
3829 if (TREE_CODE (TREE_TYPE (varop)) == POINTER_TYPE
3830 || (TREE_CODE (TREE_TYPE (varop)) != REAL_TYPE
3831 && (code == EQ_EXPR || code == NE_EXPR)))
3832 {
3833 tree newconst
3834 = fold (build (MINUS_EXPR, TREE_TYPE (varop),
3835 constop, TREE_OPERAND (varop, 1)));
3836 TREE_SET_CODE (varop, PREDECREMENT_EXPR);
3837 *constoploc = newconst;
3838 return t;
3839 }
3840 }
3841 }
3842
3843 /* Change X >= CST to X > (CST - 1) if CST is positive. */
3844 if (TREE_CODE (arg1) == INTEGER_CST
3845 && TREE_CODE (arg0) != INTEGER_CST
3846 && ! tree_int_cst_lt (arg1, integer_one_node))
3847 {
3848 switch (TREE_CODE (t))
3849 {
3850 case GE_EXPR:
3851 code = GT_EXPR;
3852 TREE_SET_CODE (t, code);
3853 arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0);
3854 TREE_OPERAND (t, 1) = arg1;
3855 break;
3856
3857 case LT_EXPR:
3858 code = LE_EXPR;
3859 TREE_SET_CODE (t, code);
3860 arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0);
3861 TREE_OPERAND (t, 1) = arg1;
3862 }
3863 }
3864
3865 /* If this is an EQ or NE comparison with zero and ARG0 is
3866 (1 << foo) & bar, convert it to (bar >> foo) & 1. Both require
3867 two operations, but the latter can be done in one less insn
3868 one machine that have only two-operand insns or on which a
3869 constant cannot be the first operand. */
3870 if (integer_zerop (arg1) && (code == EQ_EXPR || code == NE_EXPR)
3871 && TREE_CODE (arg0) == BIT_AND_EXPR)
3872 {
3873 if (TREE_CODE (TREE_OPERAND (arg0, 0)) == LSHIFT_EXPR
3874 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 0), 0)))
3875 return
3876 fold (build (code, type,
3877 build (BIT_AND_EXPR, TREE_TYPE (arg0),
3878 build (RSHIFT_EXPR,
3879 TREE_TYPE (TREE_OPERAND (arg0, 0)),
3880 TREE_OPERAND (arg0, 1),
3881 TREE_OPERAND (TREE_OPERAND (arg0, 0), 1)),
3882 convert (TREE_TYPE (arg0),
3883 integer_one_node)),
3884 arg1));
3885 else if (TREE_CODE (TREE_OPERAND (arg0, 1)) == LSHIFT_EXPR
3886 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 1), 0)))
3887 return
3888 fold (build (code, type,
3889 build (BIT_AND_EXPR, TREE_TYPE (arg0),
3890 build (RSHIFT_EXPR,
3891 TREE_TYPE (TREE_OPERAND (arg0, 1)),
3892 TREE_OPERAND (arg0, 0),
3893 TREE_OPERAND (TREE_OPERAND (arg0, 1), 1)),
3894 convert (TREE_TYPE (arg0),
3895 integer_one_node)),
3896 arg1));
3897 }
3898
3899 /* If this is an NE comparison of zero with an AND of one, remove the
3900 comparison since the AND will give the correct value. */
3901 if (code == NE_EXPR && integer_zerop (arg1)
3902 && TREE_CODE (arg0) == BIT_AND_EXPR
3903 && integer_onep (TREE_OPERAND (arg0, 1)))
3904 return convert (type, arg0);
3905
3906 /* If we have (A & C) == C where C is a power of 2, convert this into
3907 (A & C) != 0. Similarly for NE_EXPR. */
3908 if ((code == EQ_EXPR || code == NE_EXPR)
3909 && TREE_CODE (arg0) == BIT_AND_EXPR
3910 && integer_pow2p (TREE_OPERAND (arg0, 1))
3911 && operand_equal_p (TREE_OPERAND (arg0, 1), arg1, 0))
3912 return build (code == EQ_EXPR ? NE_EXPR : EQ_EXPR, type,
3913 arg0, integer_zero_node);
3914
3915 /* Simplify comparison of something with itself. (For IEEE
3916 floating-point, we can only do some of these simplifications.) */
3917 if (operand_equal_p (arg0, arg1, 0))
3918 {
3919 switch (code)
3920 {
3921 case EQ_EXPR:
3922 case GE_EXPR:
3923 case LE_EXPR:
3924 if (TREE_CODE (TREE_TYPE (arg0)) == INTEGER_TYPE)
3925 {
3926 t = build_int_2 (1, 0);
3927 TREE_TYPE (t) = type;
3928 return t;
3929 }
3930 code = EQ_EXPR;
3931 TREE_SET_CODE (t, code);
3932 break;
3933
3934 case NE_EXPR:
3935 /* For NE, we can only do this simplification if integer. */
3936 if (TREE_CODE (TREE_TYPE (arg0)) != INTEGER_TYPE)
3937 break;
3938 /* ... fall through ... */
3939 case GT_EXPR:
3940 case LT_EXPR:
3941 t = build_int_2 (0, 0);
3942 TREE_TYPE (t) = type;
3943 return t;
3944 }
3945 }
3946
3947 /* An unsigned comparison against 0 can be simplified. */
3948 if (integer_zerop (arg1)
3949 && (TREE_CODE (TREE_TYPE (arg1)) == INTEGER_TYPE
3950 || TREE_CODE (TREE_TYPE (arg1)) == POINTER_TYPE)
3951 && TREE_UNSIGNED (TREE_TYPE (arg1)))
3952 {
3953 switch (TREE_CODE (t))
3954 {
3955 case GT_EXPR:
3956 code = NE_EXPR;
3957 TREE_SET_CODE (t, NE_EXPR);
3958 break;
3959 case LE_EXPR:
3960 code = EQ_EXPR;
3961 TREE_SET_CODE (t, EQ_EXPR);
3962 break;
3963 case GE_EXPR:
3964 return omit_one_operand (integer_type_node,
3965 integer_one_node, arg0);
3966 case LT_EXPR:
3967 return omit_one_operand (integer_type_node,
3968 integer_zero_node, arg0);
3969 }
3970 }
3971
3972 /* If we are comparing an expression that just has comparisons
3973 of two integer values, arithmetic expressions of those comparisons,
3974 and constants, we can simplify it. There are only three cases
3975 to check: the two values can either be equal, the first can be
3976 greater, or the second can be greater. Fold the expression for
3977 those three values. Since each value must be 0 or 1, we have
3978 eight possibilities, each of which corresponds to the constant 0
3979 or 1 or one of the six possible comparisons.
3980
3981 This handles common cases like (a > b) == 0 but also handles
3982 expressions like ((x > y) - (y > x)) > 0, which supposedly
3983 occur in macroized code. */
3984
3985 if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) != INTEGER_CST)
3986 {
3987 tree cval1 = 0, cval2 = 0;
3988
3989 if (twoval_comparison_p (arg0, &cval1, &cval2)
3990 /* Don't handle degenerate cases here; they should already
3991 have been handled anyway. */
3992 && cval1 != 0 && cval2 != 0
3993 && ! (TREE_CONSTANT (cval1) && TREE_CONSTANT (cval2))
3994 && TREE_TYPE (cval1) == TREE_TYPE (cval2)
3995 && TREE_CODE (TREE_TYPE (cval1)) == INTEGER_TYPE
3996 && ! operand_equal_p (TYPE_MIN_VALUE (TREE_TYPE (cval1)),
3997 TYPE_MAX_VALUE (TREE_TYPE (cval2)), 0))
3998 {
3999 tree maxval = TYPE_MAX_VALUE (TREE_TYPE (cval1));
4000 tree minval = TYPE_MIN_VALUE (TREE_TYPE (cval1));
4001
4002 /* We can't just pass T to eval_subst in case cval1 or cval2
4003 was the same as ARG1. */
4004
4005 tree high_result
4006 = fold (build (code, type,
4007 eval_subst (arg0, cval1, maxval, cval2, minval),
4008 arg1));
4009 tree equal_result
4010 = fold (build (code, type,
4011 eval_subst (arg0, cval1, maxval, cval2, maxval),
4012 arg1));
4013 tree low_result
4014 = fold (build (code, type,
4015 eval_subst (arg0, cval1, minval, cval2, maxval),
4016 arg1));
4017
4018 /* All three of these results should be 0 or 1. Confirm they
4019 are. Then use those values to select the proper code
4020 to use. */
4021
4022 if ((integer_zerop (high_result)
4023 || integer_onep (high_result))
4024 && (integer_zerop (equal_result)
4025 || integer_onep (equal_result))
4026 && (integer_zerop (low_result)
4027 || integer_onep (low_result)))
4028 {
4029 /* Make a 3-bit mask with the high-order bit being the
4030 value for `>', the next for '=', and the low for '<'. */
4031 switch ((integer_onep (high_result) * 4)
4032 + (integer_onep (equal_result) * 2)
4033 + integer_onep (low_result))
4034 {
4035 case 0:
4036 /* Always false. */
4037 return omit_one_operand (type, integer_zero_node, arg0);
4038 case 1:
4039 code = LT_EXPR;
4040 break;
4041 case 2:
4042 code = EQ_EXPR;
4043 break;
4044 case 3:
4045 code = LE_EXPR;
4046 break;
4047 case 4:
4048 code = GT_EXPR;
4049 break;
4050 case 5:
4051 code = NE_EXPR;
4052 break;
4053 case 6:
4054 code = GE_EXPR;
4055 break;
4056 case 7:
4057 /* Always true. */
4058 return omit_one_operand (type, integer_one_node, arg0);
4059 }
4060
4061 return fold (build (code, type, cval1, cval2));
4062 }
4063 }
4064 }
4065
4066 /* If this is a comparison of a field, we may be able to simplify it. */
4067 if ((TREE_CODE (arg0) == COMPONENT_REF
4068 || TREE_CODE (arg0) == BIT_FIELD_REF)
4069 && (code == EQ_EXPR || code == NE_EXPR)
4070 /* Handle the constant case even without -O
4071 to make sure the warnings are given. */
4072 && (optimize || TREE_CODE (arg1) == INTEGER_CST))
4073 {
4074 t1 = optimize_bit_field_compare (code, type, arg0, arg1);
4075 return t1 ? t1 : t;
4076 }
4077
4078 /* From here on, the only cases we handle are when the result is
4079 known to be a constant.
4080
4081 To compute GT, swap the arguments and do LT.
4082 To compute GE, do LT and invert the result.
4083 To compute LE, swap the arguments, do LT and invert the result.
4084 To compute NE, do EQ and invert the result.
4085
4086 Therefore, the code below must handle only EQ and LT. */
4087
4088 if (code == LE_EXPR || code == GT_EXPR)
4089 {
4090 tem = arg0, arg0 = arg1, arg1 = tem;
4091 code = swap_tree_comparison (code);
4092 }
4093
4094 /* Note that it is safe to invert for real values here because we
4095 will check below in the one case that it matters. */
4096
4097 invert = 0;
4098 if (code == NE_EXPR || code == GE_EXPR)
4099 {
4100 invert = 1;
4101 code = invert_tree_comparison (code);
4102 }
4103
4104 /* Compute a result for LT or EQ if args permit;
4105 otherwise return T. */
4106 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
4107 {
4108 if (code == EQ_EXPR)
4109 t1 = build_int_2 ((TREE_INT_CST_LOW (arg0)
4110 == TREE_INT_CST_LOW (arg1))
4111 && (TREE_INT_CST_HIGH (arg0)
4112 == TREE_INT_CST_HIGH (arg1)),
4113 0);
4114 else
4115 t1 = build_int_2 ((TREE_UNSIGNED (TREE_TYPE (arg0))
4116 ? INT_CST_LT_UNSIGNED (arg0, arg1)
4117 : INT_CST_LT (arg0, arg1)),
4118 0);
4119 }
4120
4121 /* Assume a nonexplicit constant cannot equal an explicit one,
4122 since such code would be undefined anyway.
4123 Exception: on sysvr4, using #pragma weak,
4124 a label can come out as 0. */
4125 else if (TREE_CODE (arg1) == INTEGER_CST
4126 && !integer_zerop (arg1)
4127 && TREE_CONSTANT (arg0)
4128 && TREE_CODE (arg0) == ADDR_EXPR
4129 && code == EQ_EXPR)
4130 t1 = build_int_2 (0, 0);
4131
4132 /* Two real constants can be compared explicitly. */
4133 else if (TREE_CODE (arg0) == REAL_CST && TREE_CODE (arg1) == REAL_CST)
4134 {
4135 /* If either operand is a NaN, the result is false with two
4136 exceptions: First, an NE_EXPR is true on NaNs, but that case
4137 is already handled correctly since we will be inverting the
4138 result for NE_EXPR. Second, if we had inverted a LE_EXPR
4139 or a GE_EXPR into a LT_EXPR, we must return true so that it
4140 will be inverted into false. */
4141
4142 if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg0))
4143 || REAL_VALUE_ISNAN (TREE_REAL_CST (arg1)))
4144 t1 = build_int_2 (invert && code == LT_EXPR, 0);
4145
4146 else if (code == EQ_EXPR)
4147 t1 = build_int_2 (REAL_VALUES_EQUAL (TREE_REAL_CST (arg0),
4148 TREE_REAL_CST (arg1)),
4149 0);
4150 else
4151 t1 = build_int_2 (REAL_VALUES_LESS (TREE_REAL_CST (arg0),
4152 TREE_REAL_CST (arg1)),
4153 0);
4154 }
4155
4156 if (t1 == NULL_TREE)
4157 return t;
4158
4159 if (invert)
4160 TREE_INT_CST_LOW (t1) ^= 1;
4161
4162 TREE_TYPE (t1) = type;
4163 return t1;
4164
4165 case COND_EXPR:
4166 if (TREE_CODE (arg0) == INTEGER_CST)
4167 return TREE_OPERAND (t, (integer_zerop (arg0) ? 2 : 1));
4168 else if (operand_equal_p (arg1, TREE_OPERAND (expr, 2), 0))
4169 return omit_one_operand (type, arg1, arg0);
4170
4171 /* If the second operand is zero, invert the comparison and swap
4172 the second and third operands. Likewise if the second operand
4173 is constant and the third is not or if the third operand is
4174 equivalent to the first operand of the comparison. */
4175
4176 if (integer_zerop (arg1)
4177 || (TREE_CONSTANT (arg1) && ! TREE_CONSTANT (TREE_OPERAND (t, 2)))
4178 || (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
4179 && operand_equal_for_comparison_p (TREE_OPERAND (arg0, 0),
4180 TREE_OPERAND (t, 2),
4181 TREE_OPERAND (arg0, 1))))
4182 {
4183 /* See if this can be inverted. If it can't, possibly because
4184 it was a floating-point inequality comparison, don't do
4185 anything. */
4186 tem = invert_truthvalue (arg0);
4187
4188 if (TREE_CODE (tem) != TRUTH_NOT_EXPR)
4189 {
4190 arg0 = TREE_OPERAND (t, 0) = tem;
4191 TREE_OPERAND (t, 1) = TREE_OPERAND (t, 2);
4192 TREE_OPERAND (t, 2) = arg1;
4193 arg1 = TREE_OPERAND (t, 1);
4194 }
4195 }
4196
4197 /* If we have A op B ? A : C, we may be able to convert this to a
4198 simpler expression, depending on the operation and the values
4199 of B and C. IEEE floating point prevents this though,
4200 because A or B might be -0.0 or a NaN. */
4201
4202 if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
4203 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
4204 || TREE_CODE (TREE_TYPE (TREE_OPERAND (arg0, 0))) != REAL_TYPE)
4205 && operand_equal_for_comparison_p (TREE_OPERAND (arg0, 0),
4206 arg1, TREE_OPERAND (arg0, 1)))
4207 {
4208 tree arg2 = TREE_OPERAND (t, 2);
4209 enum tree_code comp_code = TREE_CODE (arg0);
4210
4211 /* If we have A op 0 ? A : -A, this is A, -A, abs (A), or abs (-A),
4212 depending on the comparison operation. */
4213 if (integer_zerop (TREE_OPERAND (arg0, 1))
4214 && TREE_CODE (arg2) == NEGATE_EXPR
4215 && operand_equal_p (TREE_OPERAND (arg2, 0), arg1, 0))
4216 switch (comp_code)
4217 {
4218 case EQ_EXPR:
4219 return fold (build1 (NEGATE_EXPR, type, arg1));
4220 case NE_EXPR:
4221 return convert (type, arg1);
4222 case GE_EXPR:
4223 case GT_EXPR:
4224 return fold (build1 (ABS_EXPR, type, arg1));
4225 case LE_EXPR:
4226 case LT_EXPR:
4227 return fold (build1 (NEGATE_EXPR, type,
4228 fold (build1 (ABS_EXPR, type, arg1))));
4229 }
4230
4231 /* If this is A != 0 ? A : 0, this is simply A. For ==, it is
4232 always zero. */
4233
4234 if (integer_zerop (TREE_OPERAND (arg0, 1)) && integer_zerop (arg2))
4235 {
4236 if (comp_code == NE_EXPR)
4237 return convert (type, arg1);
4238 else if (comp_code == EQ_EXPR)
4239 return convert (type, integer_zero_node);
4240 }
4241
4242 /* If this is A op B ? A : B, this is either A, B, min (A, B),
4243 or max (A, B), depending on the operation. */
4244
4245 if (operand_equal_for_comparison_p (TREE_OPERAND (arg0, 1),
4246 arg2, TREE_OPERAND (arg0, 0)))
4247 switch (comp_code)
4248 {
4249 case EQ_EXPR:
4250 return convert (type, arg2);
4251 case NE_EXPR:
4252 return convert (type, arg1);
4253 case LE_EXPR:
4254 case LT_EXPR:
4255 return fold (build (MIN_EXPR, type, arg1, arg2));
4256 case GE_EXPR:
4257 case GT_EXPR:
4258 return fold (build (MAX_EXPR, type, arg1, arg2));
4259 }
4260
4261 /* If this is A op C1 ? A : C2 with C1 and C2 constant integers,
4262 we might still be able to simplify this. For example,
4263 if C1 is one less or one more than C2, this might have started
4264 out as a MIN or MAX and been transformed by this function.
4265 Only good for INTEGER_TYPE, because we need TYPE_MAX_VALUE. */
4266
4267 if (TREE_CODE (type) == INTEGER_TYPE
4268 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
4269 && TREE_CODE (arg2) == INTEGER_CST)
4270 switch (comp_code)
4271 {
4272 case EQ_EXPR:
4273 /* We can replace A with C1 in this case. */
4274 arg1 = TREE_OPERAND (t, 1)
4275 = convert (type, TREE_OPERAND (arg0, 1));
4276 break;
4277
4278 case LT_EXPR:
4279 /* If C1 is C2 + 1, this is min(A, C2). */
4280 if (! operand_equal_p (arg2, TYPE_MAX_VALUE (type), 1)
4281 && operand_equal_p (TREE_OPERAND (arg0, 1),
4282 const_binop (PLUS_EXPR, arg2,
4283 integer_one_node, 0), 1))
4284 return fold (build (MIN_EXPR, type, arg1, arg2));
4285 break;
4286
4287 case LE_EXPR:
4288 /* If C1 is C2 - 1, this is min(A, C2). */
4289 if (! operand_equal_p (arg2, TYPE_MIN_VALUE (type), 1)
4290 && operand_equal_p (TREE_OPERAND (arg0, 1),
4291 const_binop (MINUS_EXPR, arg2,
4292 integer_one_node, 0), 1))
4293 return fold (build (MIN_EXPR, type, arg1, arg2));
4294 break;
4295
4296 case GT_EXPR:
4297 /* If C1 is C2 - 1, this is max(A, C2). */
4298 if (! operand_equal_p (arg2, TYPE_MIN_VALUE (type), 1)
4299 && operand_equal_p (TREE_OPERAND (arg0, 1),
4300 const_binop (MINUS_EXPR, arg2,
4301 integer_one_node, 0), 1))
4302 return fold (build (MAX_EXPR, type, arg1, arg2));
4303 break;
4304
4305 case GE_EXPR:
4306 /* If C1 is C2 + 1, this is max(A, C2). */
4307 if (! operand_equal_p (arg2, TYPE_MAX_VALUE (type), 1)
4308 && operand_equal_p (TREE_OPERAND (arg0, 1),
4309 const_binop (PLUS_EXPR, arg2,
4310 integer_one_node, 0), 1))
4311 return fold (build (MAX_EXPR, type, arg1, arg2));
4312 break;
4313 }
4314 }
4315
4316 /* Convert A ? 1 : 0 to simply A. */
4317 if (integer_onep (TREE_OPERAND (t, 1))
4318 && integer_zerop (TREE_OPERAND (t, 2))
4319 /* If we try to convert TREE_OPERAND (t, 0) to our type, the
4320 call to fold will try to move the conversion inside
4321 a COND, which will recurse. In that case, the COND_EXPR
4322 is probably the best choice, so leave it alone. */
4323 && type == TREE_TYPE (arg0))
4324 return arg0;
4325
4326
4327 /* Look for expressions of the form A & 2 ? 2 : 0. The result of this
4328 operation is simply A & 2. */
4329
4330 if (integer_zerop (TREE_OPERAND (t, 2))
4331 && TREE_CODE (arg0) == NE_EXPR
4332 && integer_zerop (TREE_OPERAND (arg0, 1))
4333 && integer_pow2p (arg1)
4334 && TREE_CODE (TREE_OPERAND (arg0, 0)) == BIT_AND_EXPR
4335 && operand_equal_p (TREE_OPERAND (TREE_OPERAND (arg0, 0), 1),
4336 arg1, 1))
4337 return convert (type, TREE_OPERAND (arg0, 0));
4338
4339 return t;
4340
4341 case COMPOUND_EXPR:
4342 if (!TREE_SIDE_EFFECTS (arg0))
4343 return arg1;
4344 return t;
4345
4346 default:
4347 return t;
4348 } /* switch (code) */
4349 }
This page took 0.23723 seconds and 5 git commands to generate.