]> gcc.gnu.org Git - gcc.git/blob - gcc/fold-const.c
*** empty log message ***
[gcc.git] / gcc / fold-const.c
1 /* Fold a constant sub-tree into a single node for C-compiler
2 Copyright (C) 1987, 1988, 1992 Free Software Foundation, Inc.
3
4 This file is part of GNU CC.
5
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
10
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
19
20 /*@@ Fix lossage on folding division of big integers. */
21
22 /*@@ This file should be rewritten to use an arbitary precision
23 @@ representation for "struct tree_int_cst" and "struct tree_real_cst".
24 @@ Perhaps the routines could also be used for bc/dc, and made a lib.
25 @@ The routines that translate from the ap rep should
26 @@ warn if precision et. al. is lost.
27 @@ This would also make life easier when this technology is used
28 @@ for cross-compilers. */
29
30
31 /* The entry points in this file are fold, size_int and size_binop.
32
33 fold takes a tree as argument and returns a simplified tree.
34
35 size_binop takes a tree code for an arithmetic operation
36 and two operands that are trees, and produces a tree for the
37 result, assuming the type comes from `sizetype'.
38
39 size_int takes an integer value, and creates a tree constant
40 with type from `sizetype'. */
41
42 #include <stdio.h>
43 #include <setjmp.h>
44 #include "config.h"
45 #include "flags.h"
46 #include "tree.h"
47
48 void lshift_double ();
49 void rshift_double ();
50 void lrotate_double ();
51 void rrotate_double ();
52 static tree const_binop ();
53 \f
54 /* To do constant folding on INTEGER_CST nodes requires 64-bit arithmetic.
55 We do that by representing the 64-bit integer as 8 shorts,
56 with only 8 bits stored in each short, as a positive number. */
57
58 /* Unpack a 64-bit integer into 8 shorts.
59 LOW and HI are the integer, as two `int' pieces.
60 SHORTS points to the array of shorts. */
61
62 static void
63 encode (shorts, low, hi)
64 short *shorts;
65 int low, hi;
66 {
67 shorts[0] = low & 0xff;
68 shorts[1] = (low >> 8) & 0xff;
69 shorts[2] = (low >> 16) & 0xff;
70 shorts[3] = (low >> 24) & 0xff;
71 shorts[4] = hi & 0xff;
72 shorts[5] = (hi >> 8) & 0xff;
73 shorts[6] = (hi >> 16) & 0xff;
74 shorts[7] = (hi >> 24) & 0xff;
75 }
76
77 /* Pack an array of 8 shorts into a 64-bit integer.
78 SHORTS points to the array of shorts.
79 The integer is stored into *LOW and *HI as two `int' pieces. */
80
81 static void
82 decode (shorts, low, hi)
83 short *shorts;
84 int *low, *hi;
85 {
86 /* The casts in the following statement should not be
87 needed, but they get around bugs in some C compilers. */
88 *low = (((long)shorts[3] << 24) | ((long)shorts[2] << 16)
89 | ((long)shorts[1] << 8) | (long)shorts[0]);
90 *hi = (((long)shorts[7] << 24) | ((long)shorts[6] << 16)
91 | ((long)shorts[5] << 8) | (long)shorts[4]);
92 }
93 \f
94 /* Make the integer constant T valid for its type
95 by setting to 0 or 1 all the bits in the constant
96 that don't belong in the type. */
97
98 static void
99 force_fit_type (t)
100 tree t;
101 {
102 register int prec = TYPE_PRECISION (TREE_TYPE (t));
103
104 if (TREE_CODE (TREE_TYPE (t)) == POINTER_TYPE)
105 prec = POINTER_SIZE;
106
107 /* First clear all bits that are beyond the type's precision. */
108
109 if (prec == 2 * HOST_BITS_PER_INT)
110 ;
111 else if (prec > HOST_BITS_PER_INT)
112 {
113 TREE_INT_CST_HIGH (t)
114 &= ~((-1) << (prec - HOST_BITS_PER_INT));
115 }
116 else
117 {
118 TREE_INT_CST_HIGH (t) = 0;
119 if (prec < HOST_BITS_PER_INT)
120 TREE_INT_CST_LOW (t)
121 &= ~((-1) << prec);
122 }
123
124 /* If it's a signed type and value's sign bit is set, extend the sign. */
125
126 if (! TREE_UNSIGNED (TREE_TYPE (t))
127 && prec != 2 * HOST_BITS_PER_INT
128 && (prec > HOST_BITS_PER_INT
129 ? TREE_INT_CST_HIGH (t) & (1 << (prec - HOST_BITS_PER_INT - 1))
130 : TREE_INT_CST_LOW (t) & (1 << (prec - 1))))
131 {
132 /* Value is negative:
133 set to 1 all the bits that are outside this type's precision. */
134 if (prec > HOST_BITS_PER_INT)
135 {
136 TREE_INT_CST_HIGH (t)
137 |= ((-1) << (prec - HOST_BITS_PER_INT));
138 }
139 else
140 {
141 TREE_INT_CST_HIGH (t) = -1;
142 if (prec < HOST_BITS_PER_INT)
143 TREE_INT_CST_LOW (t)
144 |= ((-1) << prec);
145 }
146 }
147 }
148 \f
149 /* Add two 64-bit integers with 64-bit result.
150 Each argument is given as two `int' pieces.
151 One argument is L1 and H1; the other, L2 and H2.
152 The value is stored as two `int' pieces in *LV and *HV.
153 We use the 8-shorts representation internally. */
154
155 void
156 add_double (l1, h1, l2, h2, lv, hv)
157 int l1, h1, l2, h2;
158 int *lv, *hv;
159 {
160 short arg1[8];
161 short arg2[8];
162 register int carry = 0;
163 register int i;
164
165 encode (arg1, l1, h1);
166 encode (arg2, l2, h2);
167
168 for (i = 0; i < 8; i++)
169 {
170 carry += arg1[i] + arg2[i];
171 arg1[i] = carry & 0xff;
172 carry >>= 8;
173 }
174
175 decode (arg1, lv, hv);
176 }
177
178 /* Negate a 64-bit integers with 64-bit result.
179 The argument is given as two `int' pieces in L1 and H1.
180 The value is stored as two `int' pieces in *LV and *HV.
181 We use the 8-shorts representation internally. */
182
183 void
184 neg_double (l1, h1, lv, hv)
185 int l1, h1;
186 int *lv, *hv;
187 {
188 if (l1 == 0)
189 {
190 *lv = 0;
191 *hv = - h1;
192 }
193 else
194 {
195 *lv = - l1;
196 *hv = ~ h1;
197 }
198 }
199 \f
200 /* Multiply two 64-bit integers with 64-bit result.
201 Each argument is given as two `int' pieces.
202 One argument is L1 and H1; the other, L2 and H2.
203 The value is stored as two `int' pieces in *LV and *HV.
204 We use the 8-shorts representation internally. */
205
206 void
207 mul_double (l1, h1, l2, h2, lv, hv)
208 int l1, h1, l2, h2;
209 int *lv, *hv;
210 {
211 short arg1[8];
212 short arg2[8];
213 short prod[16];
214 register int carry = 0;
215 register int i, j, k;
216
217 /* These two cases are used extensively, arising from pointer
218 combinations. */
219 if (h2 == 0)
220 {
221 if (l2 == 2)
222 {
223 unsigned temp = l1 + l1;
224 *hv = h1 * 2 + (temp < l1);
225 *lv = temp;
226 return;
227 }
228 if (l2 == 4)
229 {
230 unsigned temp = l1 + l1;
231 h1 = h1 * 4 + ((temp < l1) << 1);
232 l1 = temp;
233 temp += temp;
234 h1 += (temp < l1);
235 *lv = temp;
236 *hv = h1;
237 return;
238 }
239 if (l2 == 8)
240 {
241 unsigned temp = l1 + l1;
242 h1 = h1 * 8 + ((temp < l1) << 2);
243 l1 = temp;
244 temp += temp;
245 h1 += (temp < l1) << 1;
246 l1 = temp;
247 temp += temp;
248 h1 += (temp < l1);
249 *lv = temp;
250 *hv = h1;
251 return;
252 }
253 }
254
255 encode (arg1, l1, h1);
256 encode (arg2, l2, h2);
257
258 bzero (prod, sizeof prod);
259
260 for (i = 0; i < 8; i++)
261 for (j = 0; j < 8; j++)
262 {
263 k = i + j;
264 carry = arg1[i] * arg2[j];
265 while (carry)
266 {
267 carry += prod[k];
268 prod[k] = carry & 0xff;
269 carry >>= 8;
270 k++;
271 }
272 }
273
274 decode (prod, lv, hv); /* @@decode ignores prod[8] -> prod[15] */
275 }
276 \f
277 /* Shift the 64-bit integer in L1, H1 left by COUNT places
278 keeping only PREC bits of result.
279 Shift right if COUNT is negative.
280 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
281 Store the value as two `int' pieces in *LV and *HV. */
282
283 void
284 lshift_double (l1, h1, count, prec, lv, hv, arith)
285 int l1, h1, count, prec;
286 int *lv, *hv;
287 int arith;
288 {
289 short arg1[8];
290 register int i;
291 register int carry;
292
293 if (count < 0)
294 {
295 rshift_double (l1, h1, - count, prec, lv, hv, arith);
296 return;
297 }
298
299 encode (arg1, l1, h1);
300
301 if (count > prec)
302 count = prec;
303
304 while (count > 0)
305 {
306 carry = 0;
307 for (i = 0; i < 8; i++)
308 {
309 carry += arg1[i] << 1;
310 arg1[i] = carry & 0xff;
311 carry >>= 8;
312 }
313 count--;
314 }
315
316 decode (arg1, lv, hv);
317 }
318
319 /* Shift the 64-bit integer in L1, H1 right by COUNT places
320 keeping only PREC bits of result. COUNT must be positive.
321 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
322 Store the value as two `int' pieces in *LV and *HV. */
323
324 void
325 rshift_double (l1, h1, count, prec, lv, hv, arith)
326 int l1, h1, count, prec;
327 int *lv, *hv;
328 int arith;
329 {
330 short arg1[8];
331 register int i;
332 register int carry;
333
334 encode (arg1, l1, h1);
335
336 if (count > prec)
337 count = prec;
338
339 while (count > 0)
340 {
341 carry = arith && arg1[7] >> 7;
342 for (i = 7; i >= 0; i--)
343 {
344 carry <<= 8;
345 carry += arg1[i];
346 arg1[i] = (carry >> 1) & 0xff;
347 }
348 count--;
349 }
350
351 decode (arg1, lv, hv);
352 }
353 \f
354 /* Rotate the 64-bit integer in L1, H1 left by COUNT places
355 keeping only PREC bits of result.
356 Rotate right if COUNT is negative.
357 Store the value as two `int' pieces in *LV and *HV. */
358
359 void
360 lrotate_double (l1, h1, count, prec, lv, hv)
361 int l1, h1, count, prec;
362 int *lv, *hv;
363 {
364 short arg1[8];
365 register int i;
366 register int carry;
367
368 if (count < 0)
369 {
370 rrotate_double (l1, h1, - count, prec, lv, hv);
371 return;
372 }
373
374 encode (arg1, l1, h1);
375
376 if (count > prec)
377 count = prec;
378
379 carry = arg1[7] >> 7;
380 while (count > 0)
381 {
382 for (i = 0; i < 8; i++)
383 {
384 carry += arg1[i] << 1;
385 arg1[i] = carry & 0xff;
386 carry >>= 8;
387 }
388 count--;
389 }
390
391 decode (arg1, lv, hv);
392 }
393
394 /* Rotate the 64-bit integer in L1, H1 left by COUNT places
395 keeping only PREC bits of result. COUNT must be positive.
396 Store the value as two `int' pieces in *LV and *HV. */
397
398 void
399 rrotate_double (l1, h1, count, prec, lv, hv)
400 int l1, h1, count, prec;
401 int *lv, *hv;
402 {
403 short arg1[8];
404 register int i;
405 register int carry;
406
407 encode (arg1, l1, h1);
408
409 if (count > prec)
410 count = prec;
411
412 carry = arg1[0] & 1;
413 while (count > 0)
414 {
415 for (i = 7; i >= 0; i--)
416 {
417 carry <<= 8;
418 carry += arg1[i];
419 arg1[i] = (carry >> 1) & 0xff;
420 }
421 count--;
422 }
423
424 decode (arg1, lv, hv);
425 }
426 \f
427 /* Divide 64 bit integer LNUM, HNUM by 64 bit integer LDEN, HDEN
428 for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM).
429 CODE is a tree code for a kind of division, one of
430 TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR
431 or EXACT_DIV_EXPR
432 It controls how the quotient is rounded to a integer.
433 UNS nonzero says do unsigned division. */
434
435 static void
436 div_and_round_double (code, uns,
437 lnum_orig, hnum_orig, lden_orig, hden_orig,
438 lquo, hquo, lrem, hrem)
439 enum tree_code code;
440 int uns;
441 int lnum_orig, hnum_orig; /* num == numerator == dividend */
442 int lden_orig, hden_orig; /* den == denominator == divisor */
443 int *lquo, *hquo, *lrem, *hrem;
444 {
445 int quo_neg = 0;
446 short num[9], den[8], quo[8]; /* extra element for scaling. */
447 register int i, j, work;
448 register int carry = 0;
449 unsigned int lnum = lnum_orig;
450 int hnum = hnum_orig;
451 unsigned int lden = lden_orig;
452 int hden = hden_orig;
453
454 if ((hden == 0) && (lden == 0))
455 abort ();
456
457 /* calculate quotient sign and convert operands to unsigned. */
458 if (!uns)
459 {
460 if (hden < 0)
461 {
462 quo_neg = ~ quo_neg;
463 neg_double (lden, hden, &lden, &hden);
464 }
465 if (hnum < 0)
466 {
467 quo_neg = ~ quo_neg;
468 neg_double (lnum, hnum, &lnum, &hnum);
469 }
470 }
471
472 if (hnum == 0 && hden == 0)
473 { /* single precision */
474 *hquo = *hrem = 0;
475 *lquo = lnum / lden; /* rounds toward zero since positive args */
476 goto finish_up;
477 }
478
479 if (hnum == 0)
480 { /* trivial case: dividend < divisor */
481 /* hden != 0 already checked. */
482 *hquo = *lquo = 0;
483 *hrem = hnum;
484 *lrem = lnum;
485 goto finish_up;
486 }
487
488 bzero (quo, sizeof quo);
489
490 bzero (num, sizeof num); /* to zero 9th element */
491 bzero (den, sizeof den);
492
493 encode (num, lnum, hnum);
494 encode (den, lden, hden);
495
496 /* This code requires more than just hden == 0.
497 We also have to require that we don't need more than three bytes
498 to hold CARRY. If we ever did need four bytes to hold it, we
499 would lose part of it when computing WORK on the next round. */
500 if (hden == 0 && ((lden << 8) >> 8) == lden)
501 { /* simpler algorithm */
502 /* hnum != 0 already checked. */
503 for (i = 7; i >= 0; i--)
504 {
505 work = num[i] + (carry << 8);
506 quo[i] = work / lden;
507 carry = work % lden;
508 }
509 }
510 else { /* full double precision,
511 with thanks to Don Knuth's
512 "Semi-Numericial Algorithms". */
513 #define BASE 256
514 int quo_est, scale, num_hi_sig, den_hi_sig, quo_hi_sig;
515
516 /* Find the highest non-zero divisor digit. */
517 for (i = 7; ; i--)
518 if (den[i] != 0) {
519 den_hi_sig = i;
520 break;
521 }
522 for (i = 7; ; i--)
523 if (num[i] != 0) {
524 num_hi_sig = i;
525 break;
526 }
527 quo_hi_sig = num_hi_sig - den_hi_sig + 1;
528
529 /* Insure that the first digit of the divisor is at least BASE/2.
530 This is required by the quotient digit estimation algorithm. */
531
532 scale = BASE / (den[den_hi_sig] + 1);
533 if (scale > 1) { /* scale divisor and dividend */
534 carry = 0;
535 for (i = 0; i <= 8; i++) {
536 work = (num[i] * scale) + carry;
537 num[i] = work & 0xff;
538 carry = work >> 8;
539 if (num[i] != 0) num_hi_sig = i;
540 }
541 carry = 0;
542 for (i = 0; i <= 7; i++) {
543 work = (den[i] * scale) + carry;
544 den[i] = work & 0xff;
545 carry = work >> 8;
546 if (den[i] != 0) den_hi_sig = i;
547 }
548 }
549
550 /* Main loop */
551 for (i = quo_hi_sig; i > 0; i--) {
552 /* quess the next quotient digit, quo_est, by dividing the first
553 two remaining dividend digits by the high order quotient digit.
554 quo_est is never low and is at most 2 high. */
555
556 int num_hi; /* index of highest remaining dividend digit */
557
558 num_hi = i + den_hi_sig;
559
560 work = (num[num_hi] * BASE) + (num_hi > 0 ? num[num_hi - 1] : 0);
561 if (num[num_hi] != den[den_hi_sig]) {
562 quo_est = work / den[den_hi_sig];
563 }
564 else {
565 quo_est = BASE - 1;
566 }
567
568 /* refine quo_est so it's usually correct, and at most one high. */
569 while ((den[den_hi_sig - 1] * quo_est)
570 > (((work - (quo_est * den[den_hi_sig])) * BASE)
571 + ((num_hi - 1) > 0 ? num[num_hi - 2] : 0)))
572 quo_est--;
573
574 /* Try QUO_EST as the quotient digit, by multiplying the
575 divisor by QUO_EST and subtracting from the remaining dividend.
576 Keep in mind that QUO_EST is the I - 1st digit. */
577
578 carry = 0;
579
580 for (j = 0; j <= den_hi_sig; j++)
581 {
582 int digit;
583
584 work = num[i + j - 1] - (quo_est * den[j]) + carry;
585 digit = work & 0xff;
586 carry = work >> 8;
587 if (digit < 0)
588 {
589 digit += BASE;
590 carry--;
591 }
592 num[i + j - 1] = digit;
593 }
594
595 /* if quo_est was high by one, then num[i] went negative and
596 we need to correct things. */
597
598 if (num[num_hi] < 0)
599 {
600 quo_est--;
601 carry = 0; /* add divisor back in */
602 for (j = 0; j <= den_hi_sig; j++)
603 {
604 work = num[i + j - 1] + den[j] + carry;
605 if (work > BASE)
606 {
607 work -= BASE;
608 carry = 1;
609 }
610 else
611 {
612 carry = 0;
613 }
614 num[i + j - 1] = work;
615 }
616 num [num_hi] += carry;
617 }
618
619 /* store the quotient digit. */
620 quo[i - 1] = quo_est;
621 }
622 }
623
624 decode (quo, lquo, hquo);
625
626 finish_up:
627 /* if result is negative, make it so. */
628 if (quo_neg)
629 neg_double (*lquo, *hquo, lquo, hquo);
630
631 /* compute trial remainder: rem = num - (quo * den) */
632 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
633 neg_double (*lrem, *hrem, lrem, hrem);
634 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
635
636 switch (code)
637 {
638 case TRUNC_DIV_EXPR:
639 case TRUNC_MOD_EXPR: /* round toward zero */
640 case EXACT_DIV_EXPR: /* for this one, it shouldn't matter */
641 return;
642
643 case FLOOR_DIV_EXPR:
644 case FLOOR_MOD_EXPR: /* round toward negative infinity */
645 if (quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio < 0 && rem != 0 */
646 {
647 /* quo = quo - 1; */
648 add_double (*lquo, *hquo, -1, -1, lquo, hquo);
649 }
650 else return;
651 break;
652
653 case CEIL_DIV_EXPR:
654 case CEIL_MOD_EXPR: /* round toward positive infinity */
655 if (!quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio > 0 && rem != 0 */
656 {
657 add_double (*lquo, *hquo, 1, 0, lquo, hquo);
658 }
659 else return;
660 break;
661
662 case ROUND_DIV_EXPR:
663 case ROUND_MOD_EXPR: /* round to closest integer */
664 {
665 int labs_rem = *lrem, habs_rem = *hrem;
666 int labs_den = lden, habs_den = hden, ltwice, htwice;
667
668 /* get absolute values */
669 if (*hrem < 0) neg_double (*lrem, *hrem, &labs_rem, &habs_rem);
670 if (hden < 0) neg_double (lden, hden, &labs_den, &habs_den);
671
672 /* if (2 * abs (lrem) >= abs (lden)) */
673 mul_double (2, 0, labs_rem, habs_rem, &ltwice, &htwice);
674 if (((unsigned) habs_den < (unsigned) htwice)
675 || (((unsigned) habs_den == (unsigned) htwice)
676 && ((unsigned) labs_den < (unsigned) ltwice)))
677 {
678 if (*hquo < 0)
679 /* quo = quo - 1; */
680 add_double (*lquo, *hquo, -1, -1, lquo, hquo);
681 else
682 /* quo = quo + 1; */
683 add_double (*lquo, *hquo, 1, 0, lquo, hquo);
684 }
685 else return;
686 }
687 break;
688
689 default:
690 abort ();
691 }
692
693 /* compute true remainder: rem = num - (quo * den) */
694 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
695 neg_double (*lrem, *hrem, lrem, hrem);
696 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
697 }
698 \f
699 #if TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
700
701 /* Check for infinity in an IEEE double precision number. */
702
703 int
704 target_isinf (x)
705 REAL_VALUE_TYPE x;
706 {
707 /* The IEEE 64-bit double format. */
708 union {
709 REAL_VALUE_TYPE d;
710 struct {
711 unsigned sign : 1;
712 unsigned exponent : 11;
713 unsigned mantissa1 : 20;
714 unsigned mantissa2;
715 } little_endian;
716 struct {
717 unsigned mantissa2;
718 unsigned mantissa1 : 20;
719 unsigned exponent : 11;
720 unsigned sign : 1;
721 } big_endian;
722 } u;
723
724 u.d = dconstm1;
725 if (u.big_endian.sign == 1)
726 {
727 u.d = x;
728 return (u.big_endian.exponent == 2047
729 && u.big_endian.mantissa1 == 0
730 && u.big_endian.mantissa2 == 0);
731 }
732 else
733 {
734 u.d = x;
735 return (u.little_endian.exponent == 2047
736 && u.little_endian.mantissa1 == 0
737 && u.little_endian.mantissa2 == 0);
738 }
739 }
740
741 /* Check whether an IEEE double precision number is a NaN. */
742
743 int
744 target_isnan (x)
745 REAL_VALUE_TYPE x;
746 {
747 /* The IEEE 64-bit double format. */
748 union {
749 REAL_VALUE_TYPE d;
750 struct {
751 unsigned sign : 1;
752 unsigned exponent : 11;
753 unsigned mantissa1 : 20;
754 unsigned mantissa2;
755 } little_endian;
756 struct {
757 unsigned mantissa2;
758 unsigned mantissa1 : 20;
759 unsigned exponent : 11;
760 unsigned sign : 1;
761 } big_endian;
762 } u;
763
764 u.d = dconstm1;
765 if (u.big_endian.sign == 1)
766 {
767 u.d = x;
768 return (u.big_endian.exponent == 2047
769 && (u.big_endian.mantissa1 != 0
770 || u.big_endian.mantissa2 != 0));
771 }
772 else
773 {
774 u.d = x;
775 return (u.little_endian.exponent == 2047
776 && (u.little_endian.mantissa1 != 0
777 || u.little_endian.mantissa2 != 0));
778 }
779 }
780
781 /* Check for minus zero in an IEEE double precision number. */
782
783 int
784 target_minus_zero (x)
785 REAL_VALUE_TYPE x;
786 {
787 REAL_VALUE_TYPE d1, d2;
788
789 d1 = REAL_VALUE_NEGATE (x);
790 d2 = dconst0;
791
792 return !bcmp (&d1, &d2, sizeof (d1));
793 }
794 #else /* Target not IEEE */
795
796 /* Let's assume other float formats don't have infinity.
797 (This can be overridden by redefining REAL_VALUE_ISINF.) */
798
799 target_isinf (x)
800 REAL_VALUE_TYPE x;
801 {
802 return 0;
803 }
804
805 /* Let's assume other float formats don't have NaNs.
806 (This can be overridden by redefining REAL_VALUE_ISNAN.) */
807
808 target_isnan (x)
809 REAL_VALUE_TYPE x;
810 {
811 return 0;
812 }
813
814 /* Let's assume other float formats don't have minus zero.
815 (This can be overridden by redefining REAL_VALUE_MINUS_ZERO.) */
816
817 target_minus_zero (x)
818 REAL_VALUE_TYPE x;
819 {
820 return 0;
821 }
822 #endif /* Target not IEEE */
823 \f
824 /* Split a tree IN into a constant and a variable part
825 that could be combined with CODE to make IN.
826 CODE must be a commutative arithmetic operation.
827 Store the constant part into *CONP and the variable in &VARP.
828 Return 1 if this was done; zero means the tree IN did not decompose
829 this way.
830
831 If CODE is PLUS_EXPR we also split trees that use MINUS_EXPR.
832 Therefore, we must tell the caller whether the variable part
833 was subtracted. We do this by storing 1 or -1 into *VARSIGNP.
834 The value stored is the coefficient for the variable term.
835 The constant term we return should always be added;
836 we negate it if necessary. */
837
838 static int
839 split_tree (in, code, varp, conp, varsignp)
840 tree in;
841 enum tree_code code;
842 tree *varp, *conp;
843 int *varsignp;
844 {
845 register tree outtype = TREE_TYPE (in);
846 *varp = 0;
847 *conp = 0;
848
849 /* Strip any conversions that don't change the machine mode. */
850 while ((TREE_CODE (in) == NOP_EXPR
851 || TREE_CODE (in) == CONVERT_EXPR)
852 && (TYPE_MODE (TREE_TYPE (in))
853 == TYPE_MODE (TREE_TYPE (TREE_OPERAND (in, 0)))))
854 in = TREE_OPERAND (in, 0);
855
856 if (TREE_CODE (in) == code
857 || (TREE_CODE (TREE_TYPE (in)) != REAL_TYPE
858 /* We can associate addition and subtraction together
859 (even though the C standard doesn't say so)
860 for integers because the value is not affected.
861 For reals, the value might be affected, so we can't. */
862 &&
863 ((code == PLUS_EXPR && TREE_CODE (in) == MINUS_EXPR)
864 || (code == MINUS_EXPR && TREE_CODE (in) == PLUS_EXPR))))
865 {
866 enum tree_code code = TREE_CODE (TREE_OPERAND (in, 0));
867 if (code == INTEGER_CST)
868 {
869 *conp = TREE_OPERAND (in, 0);
870 *varp = TREE_OPERAND (in, 1);
871 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
872 && TREE_TYPE (*varp) != outtype)
873 *varp = convert (outtype, *varp);
874 *varsignp = (TREE_CODE (in) == MINUS_EXPR) ? -1 : 1;
875 return 1;
876 }
877 if (TREE_CONSTANT (TREE_OPERAND (in, 1)))
878 {
879 *conp = TREE_OPERAND (in, 1);
880 *varp = TREE_OPERAND (in, 0);
881 *varsignp = 1;
882 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
883 && TREE_TYPE (*varp) != outtype)
884 *varp = convert (outtype, *varp);
885 if (TREE_CODE (in) == MINUS_EXPR)
886 {
887 /* If operation is subtraction and constant is second,
888 must negate it to get an additive constant.
889 And this cannot be done unless it is a manifest constant.
890 It could also be the address of a static variable.
891 We cannot negate that, so give up. */
892 if (TREE_CODE (*conp) == INTEGER_CST)
893 /* Subtracting from integer_zero_node loses for long long. */
894 *conp = fold (build1 (NEGATE_EXPR, TREE_TYPE (*conp), *conp));
895 else
896 return 0;
897 }
898 return 1;
899 }
900 if (TREE_CONSTANT (TREE_OPERAND (in, 0)))
901 {
902 *conp = TREE_OPERAND (in, 0);
903 *varp = TREE_OPERAND (in, 1);
904 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
905 && TREE_TYPE (*varp) != outtype)
906 *varp = convert (outtype, *varp);
907 *varsignp = (TREE_CODE (in) == MINUS_EXPR) ? -1 : 1;
908 return 1;
909 }
910 }
911 return 0;
912 }
913 \f
914 /* Combine two constants NUM and ARG2 under operation CODE
915 to produce a new constant.
916 We assume ARG1 and ARG2 have the same data type,
917 or at least are the same kind of constant and the same machine mode. */
918
919 /* Handle floating overflow for `const_binop'. */
920 static jmp_buf const_binop_error;
921
922 static tree
923 const_binop (code, arg1, arg2)
924 enum tree_code code;
925 register tree arg1, arg2;
926 {
927 if (TREE_CODE (arg1) == INTEGER_CST)
928 {
929 register int int1l = TREE_INT_CST_LOW (arg1);
930 register int int1h = TREE_INT_CST_HIGH (arg1);
931 int int2l = TREE_INT_CST_LOW (arg2);
932 int int2h = TREE_INT_CST_HIGH (arg2);
933 int low, hi;
934 int garbagel, garbageh;
935 register tree t;
936 int uns = TREE_UNSIGNED (TREE_TYPE (arg1));
937
938 switch (code)
939 {
940 case BIT_IOR_EXPR:
941 t = build_int_2 (int1l | int2l, int1h | int2h);
942 break;
943
944 case BIT_XOR_EXPR:
945 t = build_int_2 (int1l ^ int2l, int1h ^ int2h);
946 break;
947
948 case BIT_AND_EXPR:
949 t = build_int_2 (int1l & int2l, int1h & int2h);
950 break;
951
952 case BIT_ANDTC_EXPR:
953 t = build_int_2 (int1l & ~int2l, int1h & ~int2h);
954 break;
955
956 case RSHIFT_EXPR:
957 int2l = - int2l;
958 case LSHIFT_EXPR:
959 lshift_double (int1l, int1h, int2l,
960 TYPE_PRECISION (TREE_TYPE (arg1)),
961 &low, &hi,
962 !uns);
963 t = build_int_2 (low, hi);
964 break;
965
966 case RROTATE_EXPR:
967 int2l = - int2l;
968 case LROTATE_EXPR:
969 lrotate_double (int1l, int1h, int2l,
970 TYPE_PRECISION (TREE_TYPE (arg1)),
971 &low, &hi);
972 t = build_int_2 (low, hi);
973 break;
974
975 case PLUS_EXPR:
976 if (int1h == 0)
977 {
978 int2l += int1l;
979 if ((unsigned) int2l < int1l)
980 int2h += 1;
981 t = build_int_2 (int2l, int2h);
982 break;
983 }
984 if (int2h == 0)
985 {
986 int1l += int2l;
987 if ((unsigned) int1l < int2l)
988 int1h += 1;
989 t = build_int_2 (int1l, int1h);
990 break;
991 }
992 add_double (int1l, int1h, int2l, int2h, &low, &hi);
993 t = build_int_2 (low, hi);
994 break;
995
996 case MINUS_EXPR:
997 if (int2h == 0 && int2l == 0)
998 {
999 t = build_int_2 (int1l, int1h);
1000 break;
1001 }
1002 neg_double (int2l, int2h, &int2l, &int2h);
1003 add_double (int1l, int1h, int2l, int2h, &low, &hi);
1004 t = build_int_2 (low, hi);
1005 break;
1006
1007 case MULT_EXPR:
1008 /* Optimize simple cases. */
1009 if (int1h == 0)
1010 {
1011 unsigned temp;
1012
1013 switch (int1l)
1014 {
1015 case 0:
1016 t = build_int_2 (0, 0);
1017 goto got_it;
1018 case 1:
1019 t = build_int_2 (int2l, int2h);
1020 goto got_it;
1021 case 2:
1022 temp = int2l + int2l;
1023 int2h = int2h * 2 + (temp < int2l);
1024 t = build_int_2 (temp, int2h);
1025 goto got_it;
1026 case 3:
1027 temp = int2l + int2l + int2l;
1028 int2h = int2h * 3 + (temp < int2l);
1029 t = build_int_2 (temp, int2h);
1030 goto got_it;
1031 case 4:
1032 temp = int2l + int2l;
1033 int2h = int2h * 4 + ((temp < int2l) << 1);
1034 int2l = temp;
1035 temp += temp;
1036 int2h += (temp < int2l);
1037 t = build_int_2 (temp, int2h);
1038 goto got_it;
1039 case 8:
1040 temp = int2l + int2l;
1041 int2h = int2h * 8 + ((temp < int2l) << 2);
1042 int2l = temp;
1043 temp += temp;
1044 int2h += (temp < int2l) << 1;
1045 int2l = temp;
1046 temp += temp;
1047 int2h += (temp < int2l);
1048 t = build_int_2 (temp, int2h);
1049 goto got_it;
1050 default:
1051 break;
1052 }
1053 }
1054
1055 if (int2h == 0)
1056 {
1057 if (int2l == 0)
1058 {
1059 t = build_int_2 (0, 0);
1060 break;
1061 }
1062 if (int2l == 1)
1063 {
1064 t = build_int_2 (int1l, int1h);
1065 break;
1066 }
1067 }
1068
1069 mul_double (int1l, int1h, int2l, int2h, &low, &hi);
1070 t = build_int_2 (low, hi);
1071 break;
1072
1073 case TRUNC_DIV_EXPR:
1074 case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR:
1075 case EXACT_DIV_EXPR:
1076 /* This is a shortcut for a common special case.
1077 It reduces the number of tree nodes generated
1078 and saves time. */
1079 if (int2h == 0 && int2l > 0
1080 && TREE_TYPE (arg1) == sizetype
1081 && int1h == 0 && int1l >= 0)
1082 {
1083 if (code == CEIL_DIV_EXPR)
1084 int1l += int2l-1;
1085 return size_int (int1l / int2l);
1086 }
1087 case ROUND_DIV_EXPR:
1088 if (int2h == 0 && int2l == 1)
1089 {
1090 t = build_int_2 (int1l, int1h);
1091 break;
1092 }
1093 if (int1l == int2l && int1h == int2h)
1094 {
1095 if ((int1l | int1h) == 0)
1096 abort ();
1097 t = build_int_2 (1, 0);
1098 break;
1099 }
1100 div_and_round_double (code, uns, int1l, int1h, int2l, int2h,
1101 &low, &hi, &garbagel, &garbageh);
1102 t = build_int_2 (low, hi);
1103 break;
1104
1105 case TRUNC_MOD_EXPR: case ROUND_MOD_EXPR:
1106 case FLOOR_MOD_EXPR: case CEIL_MOD_EXPR:
1107 div_and_round_double (code, uns, int1l, int1h, int2l, int2h,
1108 &garbagel, &garbageh, &low, &hi);
1109 t = build_int_2 (low, hi);
1110 break;
1111
1112 case MIN_EXPR:
1113 case MAX_EXPR:
1114 if (uns)
1115 {
1116 low = (((unsigned) int1h < (unsigned) int2h)
1117 || (((unsigned) int1h == (unsigned) int2h)
1118 && ((unsigned) int1l < (unsigned) int2l)));
1119 }
1120 else
1121 {
1122 low = ((int1h < int2h)
1123 || ((int1h == int2h)
1124 && ((unsigned) int1l < (unsigned) int2l)));
1125 }
1126 if (low == (code == MIN_EXPR))
1127 t = build_int_2 (int1l, int1h);
1128 else
1129 t = build_int_2 (int2l, int2h);
1130 break;
1131
1132 default:
1133 abort ();
1134 }
1135 got_it:
1136 TREE_TYPE (t) = TREE_TYPE (arg1);
1137 force_fit_type (t);
1138 return t;
1139 }
1140 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1141 if (TREE_CODE (arg1) == REAL_CST)
1142 {
1143 register REAL_VALUE_TYPE d1;
1144 register REAL_VALUE_TYPE d2;
1145 register REAL_VALUE_TYPE value;
1146
1147 d1 = TREE_REAL_CST (arg1);
1148 d2 = TREE_REAL_CST (arg2);
1149 if (setjmp (const_binop_error))
1150 {
1151 warning ("floating overflow in constant folding");
1152 return build (code, TREE_TYPE (arg1), arg1, arg2);
1153 }
1154 set_float_handler (const_binop_error);
1155
1156 #ifdef REAL_ARITHMETIC
1157 REAL_ARITHMETIC (value, code, d1, d2);
1158 #else
1159 switch (code)
1160 {
1161 case PLUS_EXPR:
1162 value = d1 + d2;
1163 break;
1164
1165 case MINUS_EXPR:
1166 value = d1 - d2;
1167 break;
1168
1169 case MULT_EXPR:
1170 value = d1 * d2;
1171 break;
1172
1173 case RDIV_EXPR:
1174 #ifndef REAL_INFINITY
1175 if (d2 == 0)
1176 abort ();
1177 #endif
1178
1179 value = d1 / d2;
1180 break;
1181
1182 case MIN_EXPR:
1183 value = MIN (d1, d2);
1184 break;
1185
1186 case MAX_EXPR:
1187 value = MAX (d1, d2);
1188 break;
1189
1190 default:
1191 abort ();
1192 }
1193 #endif /* no REAL_ARITHMETIC */
1194 set_float_handler (0);
1195 return build_real (TREE_TYPE (arg1),
1196 REAL_VALUE_TRUNCATE (TYPE_MODE (TREE_TYPE (arg1)), value));
1197 }
1198 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1199 if (TREE_CODE (arg1) == COMPLEX_CST)
1200 {
1201 register tree r1 = TREE_REALPART (arg1);
1202 register tree i1 = TREE_IMAGPART (arg1);
1203 register tree r2 = TREE_REALPART (arg2);
1204 register tree i2 = TREE_IMAGPART (arg2);
1205 register tree t;
1206
1207 switch (code)
1208 {
1209 case PLUS_EXPR:
1210 t = build_complex (const_binop (PLUS_EXPR, r1, r2),
1211 const_binop (PLUS_EXPR, i1, i2));
1212 break;
1213
1214 case MINUS_EXPR:
1215 t = build_complex (const_binop (MINUS_EXPR, r1, r2),
1216 const_binop (MINUS_EXPR, i1, i2));
1217 break;
1218
1219 case MULT_EXPR:
1220 t = build_complex (const_binop (MINUS_EXPR,
1221 const_binop (MULT_EXPR, r1, r2),
1222 const_binop (MULT_EXPR, i1, i2)),
1223 const_binop (PLUS_EXPR,
1224 const_binop (MULT_EXPR, r1, i2),
1225 const_binop (MULT_EXPR, i1, r2)));
1226 break;
1227
1228 case RDIV_EXPR:
1229 {
1230 register tree magsquared
1231 = const_binop (PLUS_EXPR,
1232 const_binop (MULT_EXPR, r2, r2),
1233 const_binop (MULT_EXPR, i2, i2));
1234 t = build_complex (const_binop (RDIV_EXPR,
1235 const_binop (PLUS_EXPR,
1236 const_binop (MULT_EXPR, r1, r2),
1237 const_binop (MULT_EXPR, i1, i2)),
1238 magsquared),
1239 const_binop (RDIV_EXPR,
1240 const_binop (MINUS_EXPR,
1241 const_binop (MULT_EXPR, i1, r2),
1242 const_binop (MULT_EXPR, r1, i2)),
1243 magsquared));
1244 }
1245 break;
1246
1247 default:
1248 abort ();
1249 }
1250 TREE_TYPE (t) = TREE_TYPE (arg1);
1251 return t;
1252 }
1253 return 0;
1254 }
1255 \f
1256 /* Return an INTEGER_CST with value V and type from `sizetype'. */
1257
1258 tree
1259 size_int (number)
1260 unsigned int number;
1261 {
1262 register tree t;
1263 /* Type-size nodes already made for small sizes. */
1264 static tree size_table[2*HOST_BITS_PER_INT+1];
1265
1266 if (number >= 0 && number < 2*HOST_BITS_PER_INT+1 && size_table[number] != 0)
1267 return size_table[number];
1268 if (number >= 0 && number < 2*HOST_BITS_PER_INT+1)
1269 {
1270 int temp = allocation_temporary_p ();
1271
1272 push_obstacks_nochange ();
1273 /* Make this a permanent node. */
1274 if (temp)
1275 end_temporary_allocation ();
1276 t = build_int_2 (number, 0);
1277 TREE_TYPE (t) = sizetype;
1278 size_table[number] = t;
1279 pop_obstacks ();
1280 }
1281 else
1282 {
1283 t = build_int_2 (number, 0);
1284 TREE_TYPE (t) = sizetype;
1285 }
1286 return t;
1287 }
1288
1289 /* Combine operands OP1 and OP2 with arithmetic operation CODE.
1290 CODE is a tree code. Data type is taken from `sizetype',
1291 If the operands are constant, so is the result. */
1292
1293 tree
1294 size_binop (code, arg0, arg1)
1295 enum tree_code code;
1296 tree arg0, arg1;
1297 {
1298 /* Handle the special case of two integer constants faster. */
1299 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
1300 {
1301 /* And some specific cases even faster than that. */
1302 if (code == PLUS_EXPR
1303 && TREE_INT_CST_LOW (arg0) == 0
1304 && TREE_INT_CST_HIGH (arg0) == 0)
1305 return arg1;
1306 if (code == MINUS_EXPR
1307 && TREE_INT_CST_LOW (arg1) == 0
1308 && TREE_INT_CST_HIGH (arg1) == 0)
1309 return arg0;
1310 if (code == MULT_EXPR
1311 && TREE_INT_CST_LOW (arg0) == 1
1312 && TREE_INT_CST_HIGH (arg0) == 0)
1313 return arg1;
1314 /* Handle general case of two integer constants. */
1315 return const_binop (code, arg0, arg1);
1316 }
1317
1318 if (arg0 == error_mark_node || arg1 == error_mark_node)
1319 return error_mark_node;
1320
1321 return fold (build (code, sizetype, arg0, arg1));
1322 }
1323 \f
1324 /* Given T, a tree representing type conversion of ARG1, a constant,
1325 return a constant tree representing the result of conversion. */
1326
1327 static tree
1328 fold_convert (t, arg1)
1329 register tree t;
1330 register tree arg1;
1331 {
1332 register tree type = TREE_TYPE (t);
1333
1334 if (TREE_CODE (type) == POINTER_TYPE
1335 || TREE_CODE (type) == INTEGER_TYPE
1336 || TREE_CODE (type) == ENUMERAL_TYPE)
1337 {
1338 if (TREE_CODE (arg1) == INTEGER_CST)
1339 {
1340 /* Given an integer constant, make new constant with new type,
1341 appropriately sign-extended or truncated. */
1342 t = build_int_2 (TREE_INT_CST_LOW (arg1),
1343 TREE_INT_CST_HIGH (arg1));
1344 TREE_TYPE (t) = type;
1345 force_fit_type (t);
1346 }
1347 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1348 else if (TREE_CODE (arg1) == REAL_CST)
1349 {
1350 if (REAL_VALUES_LESS (real_value_from_int_cst (TYPE_MAX_VALUE (type)),
1351 TREE_REAL_CST (arg1))
1352 || REAL_VALUES_LESS (TREE_REAL_CST (arg1),
1353 real_value_from_int_cst (TYPE_MIN_VALUE (type))))
1354 {
1355 warning ("real constant out of range for integer conversion");
1356 return t;
1357 }
1358 #ifndef REAL_ARITHMETIC
1359 {
1360 REAL_VALUE_TYPE d;
1361 int low, high;
1362 int half_word = 1 << (HOST_BITS_PER_INT / 2);
1363
1364 d = TREE_REAL_CST (arg1);
1365 if (d < 0)
1366 d = -d;
1367
1368 high = (int) (d / half_word / half_word);
1369 d -= (REAL_VALUE_TYPE) high * half_word * half_word;
1370 low = (unsigned) d;
1371 if (TREE_REAL_CST (arg1) < 0)
1372 neg_double (low, high, &low, &high);
1373 t = build_int_2 (low, high);
1374 }
1375 #else
1376 {
1377 int low, high;
1378 REAL_VALUE_TO_INT (low, high, TREE_REAL_CST (arg1));
1379 t = build_int_2 (low, high);
1380 }
1381 #endif
1382 TREE_TYPE (t) = type;
1383 force_fit_type (t);
1384 }
1385 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1386 TREE_TYPE (t) = type;
1387 }
1388 else if (TREE_CODE (type) == REAL_TYPE)
1389 {
1390 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1391 if (TREE_CODE (arg1) == INTEGER_CST)
1392 return build_real_from_int_cst (type, arg1);
1393 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1394 if (TREE_CODE (arg1) == REAL_CST)
1395 return build_real (type, REAL_VALUE_TRUNCATE (TYPE_MODE (type),
1396 TREE_REAL_CST (arg1)));
1397 }
1398 TREE_CONSTANT (t) = 1;
1399 return t;
1400 }
1401 \f
1402 /* Return an expr equal to X but certainly not valid as an lvalue. */
1403
1404 tree
1405 non_lvalue (x)
1406 tree x;
1407 {
1408 tree result;
1409
1410 /* These things are certainly not lvalues. */
1411 if (TREE_CODE (x) == NON_LVALUE_EXPR
1412 || TREE_CODE (x) == INTEGER_CST
1413 || TREE_CODE (x) == REAL_CST
1414 || TREE_CODE (x) == STRING_CST
1415 || TREE_CODE (x) == ADDR_EXPR)
1416 return x;
1417
1418 result = build1 (NON_LVALUE_EXPR, TREE_TYPE (x), x);
1419 TREE_CONSTANT (result) = TREE_CONSTANT (x);
1420 return result;
1421 }
1422
1423 /* Return nonzero if two operands are necessarily equal.
1424 If ONLY_CONST is non-zero, only return non-zero for constants. */
1425
1426 int
1427 operand_equal_p (arg0, arg1, only_const)
1428 tree arg0, arg1;
1429 int only_const;
1430 {
1431 /* If both types don't have the same signedness, then we can't consider
1432 them equal. We must check this before the STRIP_NOPS calls
1433 because they may change the signedness of the arguments. */
1434 if (TREE_UNSIGNED (TREE_TYPE (arg0)) != TREE_UNSIGNED (TREE_TYPE (arg1)))
1435 return 0;
1436
1437 STRIP_NOPS (arg0);
1438 STRIP_NOPS (arg1);
1439
1440 /* If ARG0 and ARG1 are the same SAVE_EXPR, they are necessarily equal.
1441 We don't care about side effects in that case because the SAVE_EXPR
1442 takes care of that for us. */
1443 if (TREE_CODE (arg0) == SAVE_EXPR && arg0 == arg1)
1444 return ! only_const;
1445
1446 if (TREE_SIDE_EFFECTS (arg0) || TREE_SIDE_EFFECTS (arg1))
1447 return 0;
1448
1449 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1450 && TREE_CODE (arg0) == ADDR_EXPR
1451 && TREE_OPERAND (arg0, 0) == TREE_OPERAND (arg1, 0))
1452 return 1;
1453
1454 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1455 && TREE_CODE (arg0) == INTEGER_CST
1456 && TREE_INT_CST_LOW (arg0) == TREE_INT_CST_LOW (arg1)
1457 && TREE_INT_CST_HIGH (arg0) == TREE_INT_CST_HIGH (arg1))
1458 return 1;
1459
1460 /* Detect when real constants are equal.
1461 But reject weird values because we can't be sure what to do with them. */
1462 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1463 && TREE_CODE (arg0) == REAL_CST
1464 && REAL_VALUES_EQUAL (TREE_REAL_CST (arg0), TREE_REAL_CST (arg1))
1465 && !REAL_VALUE_ISINF (TREE_REAL_CST (arg0))
1466 && !REAL_VALUE_ISNAN (TREE_REAL_CST (arg0)))
1467 return 1;
1468
1469 if (only_const)
1470 return 0;
1471
1472 if (arg0 == arg1)
1473 return 1;
1474
1475 if (TREE_CODE (arg0) != TREE_CODE (arg1))
1476 return 0;
1477 /* This is needed for conversions and for COMPONENT_REF.
1478 Might as well play it safe and always test this. */
1479 if (TYPE_MODE (TREE_TYPE (arg0)) != TYPE_MODE (TREE_TYPE (arg1)))
1480 return 0;
1481
1482 switch (TREE_CODE_CLASS (TREE_CODE (arg0)))
1483 {
1484 case '1':
1485 /* Two conversions are equal only if signedness and modes match. */
1486 if ((TREE_CODE (arg0) == NOP_EXPR || TREE_CODE (arg0) == CONVERT_EXPR)
1487 && (TREE_UNSIGNED (TREE_TYPE (arg0))
1488 != TREE_UNSIGNED (TREE_TYPE (arg1))))
1489 return 0;
1490
1491 return operand_equal_p (TREE_OPERAND (arg0, 0),
1492 TREE_OPERAND (arg1, 0), 0);
1493
1494 case '<':
1495 case '2':
1496 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1497 TREE_OPERAND (arg1, 0), 0)
1498 && operand_equal_p (TREE_OPERAND (arg0, 1),
1499 TREE_OPERAND (arg1, 1), 0));
1500
1501 case 'r':
1502 switch (TREE_CODE (arg0))
1503 {
1504 case INDIRECT_REF:
1505 return operand_equal_p (TREE_OPERAND (arg0, 0),
1506 TREE_OPERAND (arg1, 0), 0);
1507
1508 case COMPONENT_REF:
1509 case ARRAY_REF:
1510 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1511 TREE_OPERAND (arg1, 0), 0)
1512 && operand_equal_p (TREE_OPERAND (arg0, 1),
1513 TREE_OPERAND (arg1, 1), 0));
1514
1515 case BIT_FIELD_REF:
1516 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1517 TREE_OPERAND (arg1, 0), 0)
1518 && operand_equal_p (TREE_OPERAND (arg0, 1),
1519 TREE_OPERAND (arg1, 1), 0)
1520 && operand_equal_p (TREE_OPERAND (arg0, 2),
1521 TREE_OPERAND (arg1, 2), 0));
1522 }
1523 break;
1524 }
1525
1526 return 0;
1527 }
1528
1529 /* Return nonzero if comparing COMP1 with COMP2
1530 gives the same result as comparing OP1 with OP2.
1531 When in doubt, return 0. */
1532
1533 static int
1534 comparison_equiv_p (comp1, comp2, op1, op2)
1535 tree comp1, comp2, op1, op2;
1536 {
1537 int unsignedp1, unsignedp2;
1538 tree primop1, primop2;
1539 int correct_width;
1540
1541 if (operand_equal_p (comp1, op1, 0)
1542 && operand_equal_p (comp2, op2, 0))
1543 return 1;
1544
1545 if (TREE_CODE (TREE_TYPE (op1)) != INTEGER_TYPE)
1546 return 0;
1547
1548 if (TREE_TYPE (op1) != TREE_TYPE (op2))
1549 return 0;
1550
1551 if (TREE_TYPE (comp1) != TREE_TYPE (comp2))
1552 return 0;
1553
1554 /* Duplicate what shorten_compare does to the comparison operands,
1555 and see if that gives the actual comparison operands, COMP1 and COMP2. */
1556
1557 /* Throw away any conversions to wider types
1558 already present in the operands. */
1559 primop1 = get_narrower (op1, &unsignedp1);
1560 primop2 = get_narrower (op2, &unsignedp2);
1561
1562 correct_width = TYPE_PRECISION (TREE_TYPE (op2));
1563 if (unsignedp1 == unsignedp2
1564 && TYPE_PRECISION (TREE_TYPE (primop1)) < correct_width
1565 && TYPE_PRECISION (TREE_TYPE (primop2)) < correct_width)
1566 {
1567 tree type = TREE_TYPE (comp1);
1568
1569 /* Make sure shorter operand is extended the right way
1570 to match the longer operand. */
1571 primop1 = convert (signed_or_unsigned_type (unsignedp1, TREE_TYPE (primop1)),
1572 primop1);
1573 primop2 = convert (signed_or_unsigned_type (unsignedp2, TREE_TYPE (primop2)),
1574 primop2);
1575
1576 primop1 = convert (type, primop1);
1577 primop2 = convert (type, primop2);
1578
1579 if (operand_equal_p (comp1, primop1, 0)
1580 && operand_equal_p (comp2, primop2, 0))
1581 return 1;
1582 }
1583
1584 return 0;
1585 }
1586 \f
1587 /* Return a tree for the case when the result of an expression is RESULT
1588 converted to TYPE and OMITTED was previously an operand of the expression
1589 but is now not needed (e.g., we folded OMITTED * 0).
1590
1591 If OMITTED has side effects, we must evaluate it. Otherwise, just do
1592 the conversion of RESULT to TYPE. */
1593
1594 static tree
1595 omit_one_operand (type, result, omitted)
1596 tree type, result, omitted;
1597 {
1598 tree t = convert (type, result);
1599
1600 if (TREE_SIDE_EFFECTS (omitted))
1601 return build (COMPOUND_EXPR, type, omitted, t);
1602
1603 return t;
1604 }
1605 \f
1606 /* Return a simplified tree node for the truth-negation of ARG
1607 (perhaps by altering ARG). It is known that ARG is an operation that
1608 returns a truth value (0 or 1). */
1609
1610 tree
1611 invert_truthvalue (arg)
1612 tree arg;
1613 {
1614 tree type = TREE_TYPE (arg);
1615
1616 /* For floating-point comparisons, it isn't safe to invert the condition.
1617 So just enclose a TRUTH_NOT_EXPR around what we have. */
1618 if (TREE_CODE_CLASS (TREE_CODE (arg)) == '<'
1619 && TREE_CODE (TREE_TYPE (TREE_OPERAND (arg, 0))) == REAL_TYPE)
1620 return build1 (TRUTH_NOT_EXPR, type, arg);
1621
1622 switch (TREE_CODE (arg))
1623 {
1624 case NE_EXPR:
1625 TREE_SET_CODE (arg, EQ_EXPR);
1626 return arg;
1627
1628 case EQ_EXPR:
1629 TREE_SET_CODE (arg, NE_EXPR);
1630 return arg;
1631
1632 case GE_EXPR:
1633 TREE_SET_CODE (arg, LT_EXPR);
1634 return arg;
1635
1636 case GT_EXPR:
1637 TREE_SET_CODE (arg, LE_EXPR);
1638 return arg;
1639
1640 case LE_EXPR:
1641 TREE_SET_CODE (arg, GT_EXPR);
1642 return arg;
1643
1644 case LT_EXPR:
1645 TREE_SET_CODE (arg, GE_EXPR);
1646 return arg;
1647
1648 case INTEGER_CST:
1649 return convert (type, build_int_2 (TREE_INT_CST_LOW (arg) == 0
1650 && TREE_INT_CST_HIGH (arg) == 0, 0));
1651
1652 case TRUTH_AND_EXPR:
1653 return build (TRUTH_OR_EXPR, type,
1654 invert_truthvalue (TREE_OPERAND (arg, 0)),
1655 invert_truthvalue (TREE_OPERAND (arg, 1)));
1656
1657 case TRUTH_OR_EXPR:
1658 return build (TRUTH_AND_EXPR, type,
1659 invert_truthvalue (TREE_OPERAND (arg, 0)),
1660 invert_truthvalue (TREE_OPERAND (arg, 1)));
1661
1662 case TRUTH_ANDIF_EXPR:
1663 return build (TRUTH_ORIF_EXPR, type,
1664 invert_truthvalue (TREE_OPERAND (arg, 0)),
1665 invert_truthvalue (TREE_OPERAND (arg, 1)));
1666
1667 case TRUTH_ORIF_EXPR:
1668 return build (TRUTH_ANDIF_EXPR, type,
1669 invert_truthvalue (TREE_OPERAND (arg, 0)),
1670 invert_truthvalue (TREE_OPERAND (arg, 1)));
1671
1672 case TRUTH_NOT_EXPR:
1673 return TREE_OPERAND (arg, 0);
1674
1675 case COND_EXPR:
1676 return build (COND_EXPR, type, TREE_OPERAND (arg, 0),
1677 invert_truthvalue (TREE_OPERAND (arg, 1)),
1678 invert_truthvalue (TREE_OPERAND (arg, 2)));
1679
1680 case COMPOUND_EXPR:
1681 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg, 0),
1682 invert_truthvalue (TREE_OPERAND (arg, 1)));
1683
1684 case NON_LVALUE_EXPR:
1685 return invert_truthvalue (TREE_OPERAND (arg, 0));
1686
1687 case NOP_EXPR:
1688 case CONVERT_EXPR:
1689 case FLOAT_EXPR:
1690 return build1 (TREE_CODE (arg), type,
1691 invert_truthvalue (TREE_OPERAND (arg, 0)));
1692
1693 case BIT_AND_EXPR:
1694 if (! integer_onep (TREE_OPERAND (arg, 1)))
1695 abort ();
1696 return build (EQ_EXPR, type, arg, convert (type, integer_zero_node));
1697 }
1698
1699 abort ();
1700 }
1701
1702 /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
1703 operands are another bit-wise operation with a common input. If so,
1704 distribute the bit operations to save an operation and possibly two if
1705 constants are involved. For example, convert
1706 (A | B) & (A | C) into A | (B & C)
1707 Further simplification will occur if B and C are constants.
1708
1709 If this optimization cannot be done, 0 will be returned. */
1710
1711 static tree
1712 distribute_bit_expr (code, type, arg0, arg1)
1713 enum tree_code code;
1714 tree type;
1715 tree arg0, arg1;
1716 {
1717 tree common;
1718 tree left, right;
1719
1720 if (TREE_CODE (arg0) != TREE_CODE (arg1)
1721 || TREE_CODE (arg0) == code
1722 || (TREE_CODE (arg0) != BIT_AND_EXPR
1723 && TREE_CODE (arg0) != BIT_IOR_EXPR))
1724 return 0;
1725
1726 if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), 0))
1727 {
1728 common = TREE_OPERAND (arg0, 0);
1729 left = TREE_OPERAND (arg0, 1);
1730 right = TREE_OPERAND (arg1, 1);
1731 }
1732 else if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 1), 0))
1733 {
1734 common = TREE_OPERAND (arg0, 0);
1735 left = TREE_OPERAND (arg0, 1);
1736 right = TREE_OPERAND (arg1, 0);
1737 }
1738 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 0), 0))
1739 {
1740 common = TREE_OPERAND (arg0, 1);
1741 left = TREE_OPERAND (arg0, 0);
1742 right = TREE_OPERAND (arg1, 1);
1743 }
1744 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 1), 0))
1745 {
1746 common = TREE_OPERAND (arg0, 1);
1747 left = TREE_OPERAND (arg0, 0);
1748 right = TREE_OPERAND (arg1, 0);
1749 }
1750 else
1751 return 0;
1752
1753 return fold (build (TREE_CODE (arg0), type, common,
1754 fold (build (code, type, left, right))));
1755 }
1756 \f
1757 /* Return a BIT_FIELD_REF of type TYPE to refer to BITSIZE bits of INNER
1758 starting at BITPOS. The field is unsigned if UNSIGNEDP is non-zero. */
1759
1760 static tree
1761 make_bit_field_ref (inner, type, bitsize, bitpos, unsignedp)
1762 tree inner;
1763 tree type;
1764 int bitsize, bitpos;
1765 int unsignedp;
1766 {
1767 tree result = build (BIT_FIELD_REF, type, inner,
1768 size_int (bitsize), size_int (bitpos));
1769
1770 TREE_UNSIGNED (result) = unsignedp;
1771
1772 return result;
1773 }
1774
1775 /* Optimize a bit-field compare.
1776
1777 There are two cases: First is a compare against a constant and the
1778 second is a comparison of two items where the fields are at the same
1779 bit position relative to the start of a chunk (byte, halfword, word)
1780 large enough to contain it. In these cases we can avoid the shift
1781 implicit in bitfield extractions.
1782
1783 For constants, we emit a compare of the shifted constant with the
1784 BIT_AND_EXPR of a mask and a byte, halfword, or word of the operand being
1785 compared. For two fields at the same position, we do the ANDs with the
1786 similar mask and compare the result of the ANDs.
1787
1788 CODE is the comparison code, known to be either NE_EXPR or EQ_EXPR.
1789 COMPARE_TYPE is the type of the comparison, and LHS and RHS
1790 are the left and right operands of the comparison, respectively.
1791
1792 If the optimization described above can be done, we return the resuling
1793 tree. Otherwise we return zero. */
1794
1795 static tree
1796 optimize_bit_field_compare (code, compare_type, lhs, rhs)
1797 enum tree_code code;
1798 tree compare_type;
1799 tree lhs, rhs;
1800 {
1801 int lbitpos, lbitsize, rbitpos, rbitsize;
1802 int lnbitpos, lnbitsize, rnbitpos, rnbitsize;
1803 tree type = TREE_TYPE (lhs);
1804 tree signed_type, unsigned_type;
1805 int const_p = TREE_CODE (rhs) == INTEGER_CST;
1806 enum machine_mode lmode, rmode, lnmode, rnmode;
1807 int lunsignedp, runsignedp;
1808 int lvolatilep = 0, rvolatilep = 0;
1809 tree linner, rinner;
1810 tree mask;
1811
1812 /* Get all the information about the extractions being done. If the bit size
1813 if the same as the size of the underlying object, we aren't doing an
1814 extraction at all and so can do nothing. */
1815 linner = get_inner_reference (lhs, &lbitsize, &lbitpos, &lmode,
1816 &lunsignedp, &lvolatilep);
1817 if (lbitsize == GET_MODE_BITSIZE (lmode))
1818 return 0;
1819
1820 if (!const_p)
1821 {
1822 /* If this is not a constant, we can only do something if bit positions,
1823 sizes, and signedness are the same. */
1824 rinner = get_inner_reference (rhs, &rbitsize, &rbitpos,
1825 &rmode, &runsignedp, &rvolatilep);
1826
1827 if (lbitpos != rbitpos || lbitsize != rbitsize
1828 || lunsignedp != runsignedp)
1829 return 0;
1830 }
1831
1832 /* See if we can find a mode to refer to this field. We should be able to,
1833 but fail if we can't. */
1834 lnmode = get_best_mode (lbitsize, lbitpos,
1835 TYPE_ALIGN (TREE_TYPE (linner)), word_mode,
1836 lvolatilep);
1837 if (lnmode == VOIDmode)
1838 return 0;
1839
1840 /* Set signed and unsigned types of the precision of this mode for the
1841 shifts below. */
1842 signed_type = type_for_mode (lnmode, 0);
1843 unsigned_type = type_for_mode (lnmode, 1);
1844
1845 if (! const_p)
1846 {
1847 rnmode = get_best_mode (rbitsize, rbitpos,
1848 TYPE_ALIGN (TREE_TYPE (rinner)), word_mode,
1849 rvolatilep);
1850 if (rnmode == VOIDmode)
1851 return 0;
1852 }
1853
1854 /* Compute the bit position and size for the new reference and our offset
1855 within it. If the new reference is the same size as the original, we
1856 won't optimize anything, so return zero. */
1857 lnbitsize = GET_MODE_BITSIZE (lnmode);
1858 lnbitpos = lbitpos & ~ (lnbitsize - 1);
1859 lbitpos -= lnbitpos;
1860 if (lnbitsize == lbitsize)
1861 return 0;
1862
1863 if (! const_p)
1864 {
1865 rnbitsize = GET_MODE_BITSIZE (rnmode);
1866 rnbitpos = rbitpos & ~ (rnbitsize - 1);
1867 rbitpos -= rnbitpos;
1868 if (rnbitsize == rbitsize)
1869 return 0;
1870 }
1871
1872 #if BYTES_BIG_ENDIAN
1873 lbitpos = lnbitsize - lbitsize - lbitpos;
1874 rbitpos = rnbitsize - rbitsize - rbitpos;
1875 #endif
1876
1877 /* Make the mask to be used against the extracted field. */
1878 mask = convert (unsigned_type, build_int_2 (~0, ~0));
1879 mask = const_binop (LSHIFT_EXPR, mask, size_int (lnbitsize - lbitsize));
1880 mask = const_binop (RSHIFT_EXPR, mask,
1881 size_int (lnbitsize - lbitsize - lbitpos));
1882
1883 if (! const_p)
1884 /* If not comparing with constant, just rework the comparison
1885 and return. */
1886 return build (code, compare_type,
1887 build (BIT_AND_EXPR, type,
1888 make_bit_field_ref (linner, type,
1889 lnbitsize, lnbitpos, lunsignedp),
1890 mask),
1891 build (BIT_AND_EXPR, type,
1892 make_bit_field_ref (rinner, type,
1893 rnbitsize, rnbitpos, runsignedp),
1894 mask));
1895
1896 /* Otherwise, we are handling the constant case. See if the constant is too
1897 big for the field. Warn and return a tree of for 0 (false) if so. We do
1898 this not only for its own sake, but to avoid having to test for this
1899 error case below. If we didn't, we might generate wrong code.
1900
1901 For unsigned fields, the constant shifted right by the field length should
1902 be all zero. For signed fields, the high-order bits should agree with
1903 the sign bit. */
1904
1905 if (lunsignedp)
1906 {
1907 if (! integer_zerop (const_binop (RSHIFT_EXPR,
1908 convert (unsigned_type, rhs),
1909 size_int (lbitsize))))
1910 {
1911 warning ("comparison is always %s due to width of bitfield",
1912 code == NE_EXPR ? "one" : "zero");
1913 return convert (compare_type,
1914 (code == NE_EXPR
1915 ? integer_one_node : integer_zero_node));
1916 }
1917 }
1918 else
1919 {
1920 tree tem = const_binop (RSHIFT_EXPR, convert (signed_type, rhs),
1921 size_int (lbitsize - 1));
1922 if (! integer_zerop (tem) && ! integer_all_onesp (tem))
1923 {
1924 warning ("comparison is always %s due to width of bitfield",
1925 code == NE_EXPR ? "one" : "zero");
1926 return convert (compare_type,
1927 (code == NE_EXPR
1928 ? integer_one_node : integer_zero_node));
1929 }
1930 }
1931
1932 /* Single-bit compares should always be against zero. */
1933 if (lbitsize == 1 && ! integer_zerop (rhs))
1934 {
1935 code = code == EQ_EXPR ? NE_EXPR : EQ_EXPR;
1936 rhs = convert (type, integer_zero_node);
1937 }
1938
1939 /* Make a new bitfield reference, shift the constant over the
1940 appropriate number of bits and mask it with the computed mask
1941 (in case this was a signed field). If we changed it, make a new one. */
1942 lhs = make_bit_field_ref (linner, TREE_TYPE (lhs), lnbitsize, lnbitpos,
1943 lunsignedp);
1944
1945 rhs = fold (build1 (NOP_EXPR, type,
1946 const_binop (BIT_AND_EXPR,
1947 const_binop (LSHIFT_EXPR,
1948 convert (unsigned_type, rhs),
1949 size_int (lbitpos)), mask)));
1950
1951 return build (code, compare_type,
1952 build (BIT_AND_EXPR, type, lhs, mask),
1953 rhs);
1954 }
1955 \f
1956 /* Subroutine for the following routine: decode a field reference.
1957
1958 If EXP is a comparison reference, we return the innermost reference.
1959
1960 *PBITSIZE is set to the number of bits in the reference, *PBITPOS is
1961 set to the starting bit number.
1962
1963 If the innermost field can be completely contained in a mode-sized
1964 unit, *PMODE is set to that mode. Otherwise, it is set to VOIDmode.
1965
1966 *PVOLATILEP is set to 1 if the any expression encountered is volatile;
1967 otherwise it is not changed.
1968
1969 *PUNSIGNEDP is set to the signedness of the field.
1970
1971 *PMASK is set to the mask used. This is either contained in a
1972 BIT_AND_EXPR or derived from the width of the field.
1973
1974 Return 0 if this is not a component reference or is one that we can't
1975 do anything with. */
1976
1977 static tree
1978 decode_field_reference (exp, pbitsize, pbitpos, pmode, punsignedp,
1979 pvolatilep, pmask)
1980 tree exp;
1981 int *pbitsize, *pbitpos;
1982 enum machine_mode *pmode;
1983 int *punsignedp, *pvolatilep;
1984 tree *pmask;
1985 {
1986 tree mask = 0;
1987 tree inner;
1988
1989 STRIP_NOPS (exp);
1990
1991 if (TREE_CODE (exp) == BIT_AND_EXPR)
1992 {
1993 mask = TREE_OPERAND (exp, 1);
1994 exp = TREE_OPERAND (exp, 0);
1995 STRIP_NOPS (exp); STRIP_NOPS (mask);
1996 if (TREE_CODE (mask) != INTEGER_CST)
1997 return 0;
1998 }
1999
2000 if (TREE_CODE (exp) != COMPONENT_REF && TREE_CODE (exp) != ARRAY_REF
2001 && TREE_CODE (exp) != BIT_FIELD_REF)
2002 return 0;
2003
2004 inner = get_inner_reference (exp, pbitsize, pbitpos, pmode,
2005 punsignedp, pvolatilep);
2006
2007 if (mask == 0)
2008 {
2009 tree unsigned_type = type_for_size (*pbitsize, 1);
2010 int precision = TYPE_PRECISION (unsigned_type);
2011
2012 mask = convert (unsigned_type, build_int_2 (~0, ~0));
2013 mask = const_binop (LSHIFT_EXPR, mask, size_int (precision - *pbitsize));
2014 mask = const_binop (RSHIFT_EXPR, mask, size_int (precision - *pbitsize));
2015 }
2016
2017 *pmask = mask;
2018 return inner;
2019 }
2020
2021 /* Return non-zero if MASK respresents a mask of SIZE ones in the low-order
2022 bit positions. */
2023
2024 static int
2025 all_ones_mask_p (mask, size)
2026 tree mask;
2027 int size;
2028 {
2029 tree type = TREE_TYPE (mask);
2030 int precision = TYPE_PRECISION (type);
2031
2032 return
2033 operand_equal_p (mask,
2034 const_binop (RSHIFT_EXPR,
2035 const_binop (LSHIFT_EXPR,
2036 convert (signed_type (type),
2037 build_int_2 (~0, ~0)),
2038 size_int (precision - size)),
2039 size_int (precision - size)), 0);
2040 }
2041 \f
2042 /* Try to merge two comparisons to the same innermost item.
2043
2044 For example, if we have p->a == 2 && p->b == 4 and we can make an
2045 object large enough to span both A and B, we can do this with a comparison
2046 against the object ANDed with the a mask.
2047
2048 If we have p->a == q->a && p->b == q->b, we may be able to use bit masking
2049 operations to do this with one comparison.
2050
2051 We check for both normal comparisons and the BIT_AND_EXPRs made this by
2052 function and the one above.
2053
2054 CODE is the logical operation being done. It can be TRUTH_ANDIF_EXPR,
2055 TRUTH_AND_EXPR, TRUTH_ORIF_EXPR, or TRUTH_OR_EXPR.
2056
2057 TRUTH_TYPE is the type of the logical operand and LHS and RHS are its
2058 two operands.
2059
2060 We return the simplified tree or 0 if no optimization is possible. */
2061
2062 static tree
2063 merge_component_references (code, truth_type, lhs, rhs)
2064 enum tree_code code;
2065 tree truth_type, lhs, rhs;
2066 {
2067 /* If this is the "or" of two comparisons, we can do something if we
2068 the comparisons are NE_EXPR. If this is the "and", we can do something
2069 if the comparisons are EQ_EXPR. I.e.,
2070 (a->b == 2 && a->c == 4) can become (a->new == NEW).
2071
2072 WANTED_CODE is this operation code. For single bit fields, we can
2073 convert EQ_EXPR to NE_EXPR so we need not reject the "wrong"
2074 comparison for one-bit fields. */
2075
2076 enum tree_code wanted_code
2077 = (code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR) ? EQ_EXPR : NE_EXPR;
2078 enum tree_code lcode, rcode;
2079 tree ll_inner, lr_inner, rl_inner, rr_inner;
2080 int ll_bitsize, ll_bitpos, lr_bitsize, lr_bitpos;
2081 int rl_bitsize, rl_bitpos, rr_bitsize, rr_bitpos;
2082 int xll_bitpos, xlr_bitpos, xrl_bitpos, xrr_bitpos;
2083 int lnbitsize, lnbitpos, rnbitsize, rnbitpos;
2084 int ll_unsignedp, lr_unsignedp, rl_unsignedp, rr_unsignedp;
2085 enum machine_mode ll_mode, lr_mode, rl_mode, rr_mode;
2086 enum machine_mode lnmode, rnmode;
2087 tree ll_mask, lr_mask, rl_mask, rr_mask;
2088 tree l_const = 0, r_const = 0;
2089 tree type, result;
2090 int first_bit, end_bit;
2091 int volatilep = 0;
2092
2093 /* Start by getting the comparison codes and seeing if we may be able
2094 to do something. Then get all the parameters for each side. Fail
2095 if anything is volatile. */
2096
2097 lcode = TREE_CODE (lhs);
2098 rcode = TREE_CODE (rhs);
2099 if ((lcode != EQ_EXPR && lcode != NE_EXPR)
2100 || (rcode != EQ_EXPR && rcode != NE_EXPR)
2101 || TREE_SIDE_EFFECTS (lhs) || TREE_SIDE_EFFECTS (rhs))
2102 return 0;
2103
2104 ll_inner = decode_field_reference (TREE_OPERAND (lhs, 0),
2105 &ll_bitsize, &ll_bitpos, &ll_mode,
2106 &ll_unsignedp, &volatilep, &ll_mask);
2107 lr_inner = decode_field_reference (TREE_OPERAND (lhs, 1),
2108 &lr_bitsize, &lr_bitpos, &lr_mode,
2109 &lr_unsignedp, &volatilep, &lr_mask);
2110 rl_inner = decode_field_reference (TREE_OPERAND (rhs, 0),
2111 &rl_bitsize, &rl_bitpos, &rl_mode,
2112 &rl_unsignedp, &volatilep, &rl_mask);
2113 rr_inner = decode_field_reference (TREE_OPERAND (rhs, 1),
2114 &rr_bitsize, &rr_bitpos, &rr_mode,
2115 &rr_unsignedp, &volatilep, &rr_mask);
2116
2117 /* It must be true that the inner operation on the lhs of each
2118 comparison must be the same if we are to be able to do anything.
2119 Then see if we have constants. If not, the same must be true for
2120 the rhs's. */
2121 if (volatilep || ll_inner == 0 || rl_inner == 0
2122 || ! operand_equal_p (ll_inner, rl_inner, 0))
2123 return 0;
2124
2125 if (TREE_CODE (TREE_OPERAND (lhs, 1)) == INTEGER_CST
2126 && TREE_CODE (TREE_OPERAND (rhs, 1)) == INTEGER_CST)
2127 l_const = TREE_OPERAND (lhs, 1), r_const = TREE_OPERAND (rhs, 1);
2128 else if (lr_inner == 0 || rr_inner == 0
2129 || ! operand_equal_p (lr_inner, rr_inner, 0))
2130 return 0;
2131
2132 /* If either comparison code is not correct for our logical operation,
2133 fail. However, we can convert a one-bit comparison against zero into
2134 the opposite comparison against that bit being set in the field. */
2135 if (lcode != wanted_code)
2136 {
2137 if (l_const && integer_zerop (l_const) && integer_pow2p (ll_mask))
2138 l_const = ll_mask;
2139 else
2140 return 0;
2141 }
2142
2143 if (rcode != wanted_code)
2144 {
2145 if (r_const && integer_zerop (r_const) && integer_pow2p (rl_mask))
2146 r_const = rl_mask;
2147 else
2148 return 0;
2149 }
2150
2151 /* See if we can find a mode that contains both fields being compared on
2152 the left. If we can't, fail. Otherwise, update all constants and masks
2153 to be relative to a field of that size. */
2154 first_bit = MIN (ll_bitpos, rl_bitpos);
2155 end_bit = MAX (ll_bitpos + ll_bitsize, rl_bitpos + rl_bitsize);
2156 lnmode = get_best_mode (end_bit - first_bit, first_bit,
2157 TYPE_ALIGN (TREE_TYPE (ll_inner)), word_mode,
2158 volatilep);
2159 if (lnmode == VOIDmode)
2160 return 0;
2161
2162 lnbitsize = GET_MODE_BITSIZE (lnmode);
2163 lnbitpos = first_bit & ~ (lnbitsize - 1);
2164 type = type_for_size (lnbitsize, 1);
2165 xll_bitpos = ll_bitpos - lnbitpos, xrl_bitpos = rl_bitpos - lnbitpos;
2166
2167 #if BYTES_BIG_ENDIAN
2168 xll_bitpos = lnbitsize - xll_bitpos - ll_bitsize;
2169 xrl_bitpos = lnbitsize - xrl_bitpos - rl_bitsize;
2170 #endif
2171
2172 ll_mask = const_binop (LSHIFT_EXPR, convert (type, ll_mask),
2173 size_int (xll_bitpos));
2174 rl_mask = const_binop (LSHIFT_EXPR, convert (type, rl_mask),
2175 size_int (xrl_bitpos));
2176
2177 /* Make sure the constants are interpreted as unsigned, so we
2178 don't have sign bits outside the range of their type. */
2179
2180 if (l_const)
2181 {
2182 l_const = convert (unsigned_type (TREE_TYPE (l_const)), l_const);
2183 l_const = const_binop (LSHIFT_EXPR, convert (type, l_const),
2184 size_int (xll_bitpos));
2185 }
2186 if (r_const)
2187 {
2188 r_const = convert (unsigned_type (TREE_TYPE (r_const)), r_const);
2189 r_const = const_binop (LSHIFT_EXPR, convert (type, r_const),
2190 size_int (xrl_bitpos));
2191 }
2192
2193 /* If the right sides are not constant, do the same for it. Also,
2194 disallow this optimization if a size or signedness mismatch occurs
2195 between the left and right sides. */
2196 if (l_const == 0)
2197 {
2198 if (ll_bitsize != lr_bitsize || rl_bitsize != rr_bitsize
2199 || ll_unsignedp != lr_unsignedp || rl_unsignedp != rr_unsignedp)
2200 return 0;
2201
2202 first_bit = MIN (lr_bitpos, rr_bitpos);
2203 end_bit = MAX (lr_bitpos + lr_bitsize, rr_bitpos + rr_bitsize);
2204 rnmode = get_best_mode (end_bit - first_bit, first_bit,
2205 TYPE_ALIGN (TREE_TYPE (lr_inner)), word_mode,
2206 volatilep);
2207 if (rnmode == VOIDmode)
2208 return 0;
2209
2210 rnbitsize = GET_MODE_BITSIZE (rnmode);
2211 rnbitpos = first_bit & ~ (rnbitsize - 1);
2212 xlr_bitpos = lr_bitpos - rnbitpos, xrr_bitpos = rr_bitpos - rnbitpos;
2213
2214 #if BYTES_BIG_ENDIAN
2215 xlr_bitpos = rnbitsize - xlr_bitpos - lr_bitsize;
2216 xrr_bitpos = rnbitsize - xrr_bitpos - rr_bitsize;
2217 #endif
2218
2219 lr_mask = const_binop (LSHIFT_EXPR, convert (type, lr_mask),
2220 size_int (xlr_bitpos));
2221 rr_mask = const_binop (LSHIFT_EXPR, convert (type, rr_mask),
2222 size_int (xrr_bitpos));
2223
2224 /* Make a mask that corresponds to both fields being compared.
2225 Do this for both items being compared. If the masks agree,
2226 we can do this by masking both and comparing the masked
2227 results. */
2228 ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask);
2229 lr_mask = const_binop (BIT_IOR_EXPR, lr_mask, rr_mask);
2230 if (operand_equal_p (ll_mask, lr_mask, 0) && lnbitsize == rnbitsize)
2231 {
2232 lhs = make_bit_field_ref (ll_inner, type, lnbitsize, lnbitpos,
2233 ll_unsignedp || rl_unsignedp);
2234 rhs = make_bit_field_ref (lr_inner, type, rnbitsize, rnbitpos,
2235 lr_unsignedp || rr_unsignedp);
2236 if (! all_ones_mask_p (ll_mask, lnbitsize))
2237 {
2238 lhs = build (BIT_AND_EXPR, type, lhs, ll_mask);
2239 rhs = build (BIT_AND_EXPR, type, rhs, ll_mask);
2240 }
2241 return build (wanted_code, truth_type, lhs, rhs);
2242 }
2243
2244 /* There is still another way we can do something: If both pairs of
2245 fields being compared are adjacent, we may be able to make a wider
2246 field containing them both. */
2247 if ((ll_bitsize + ll_bitpos == rl_bitpos
2248 && lr_bitsize + lr_bitpos == rr_bitpos)
2249 || (ll_bitpos == rl_bitpos + rl_bitsize
2250 && lr_bitpos == rr_bitpos + rr_bitsize))
2251 return build (wanted_code, truth_type,
2252 make_bit_field_ref (ll_inner, type,
2253 ll_bitsize + rl_bitsize,
2254 MIN (ll_bitpos, rl_bitpos),
2255 ll_unsignedp),
2256 make_bit_field_ref (lr_inner, type,
2257 lr_bitsize + rr_bitsize,
2258 MIN (lr_bitpos, rr_bitpos),
2259 lr_unsignedp));
2260
2261 return 0;
2262 }
2263
2264 /* Handle the case of comparisons with constants. If there is something in
2265 common between the masks, those bits of the constants must be the same.
2266 If not, the condition is always false. Test for this to avoid generating
2267 incorrect code below. */
2268 result = const_binop (BIT_AND_EXPR, ll_mask, rl_mask);
2269 if (! integer_zerop (result)
2270 && simple_cst_equal (const_binop (BIT_AND_EXPR, result, l_const),
2271 const_binop (BIT_AND_EXPR, result, r_const)) != 1)
2272 {
2273 if (wanted_code == NE_EXPR)
2274 {
2275 warning ("`or' of unmatched not-equal tests is always 1");
2276 return convert (truth_type, integer_one_node);
2277 }
2278 else
2279 {
2280 warning ("`and' of mutually exclusive equal-tests is always zero");
2281 return convert (truth_type, integer_zero_node);
2282 }
2283 }
2284
2285 /* Construct the expression we will return. First get the component
2286 reference we will make. Unless the mask is all ones the width of
2287 that field, perform the mask operation. Then compare with the
2288 merged constant. */
2289 result = make_bit_field_ref (ll_inner, type, lnbitsize, lnbitpos,
2290 ll_unsignedp || rl_unsignedp);
2291
2292 ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask);
2293 if (! all_ones_mask_p (ll_mask, lnbitsize))
2294 result = build (BIT_AND_EXPR, type, result, ll_mask);
2295
2296 return build (wanted_code, truth_type, result,
2297 const_binop (BIT_IOR_EXPR, l_const, r_const));
2298 }
2299 \f
2300 /* Perform constant folding and related simplification of EXPR.
2301 The related simplifications include x*1 => x, x*0 => 0, etc.,
2302 and application of the associative law.
2303 NOP_EXPR conversions may be removed freely (as long as we
2304 are careful not to change the C type of the overall expression)
2305 We cannot simplify through a CONVERT_EXPR, FIX_EXPR or FLOAT_EXPR,
2306 but we can constant-fold them if they have constant operands. */
2307
2308 tree
2309 fold (expr)
2310 tree expr;
2311 {
2312 register tree t = expr;
2313 tree t1 = NULL_TREE;
2314 tree type = TREE_TYPE (expr);
2315 register tree arg0, arg1;
2316 register enum tree_code code = TREE_CODE (t);
2317 register int kind;
2318
2319 /* WINS will be nonzero when the switch is done
2320 if all operands are constant. */
2321
2322 int wins = 1;
2323
2324 /* Return right away if already constant. */
2325 if (TREE_CONSTANT (t))
2326 {
2327 if (code == CONST_DECL)
2328 return DECL_INITIAL (t);
2329 return t;
2330 }
2331
2332 kind = TREE_CODE_CLASS (code);
2333 if (kind == 'e' || kind == '<' || kind == '1' || kind == '2' || kind == 'r')
2334 {
2335 register int len = tree_code_length[(int) code];
2336 register int i;
2337 for (i = 0; i < len; i++)
2338 {
2339 tree op = TREE_OPERAND (t, i);
2340
2341 if (op == 0)
2342 continue; /* Valid for CALL_EXPR, at least. */
2343
2344 /* Strip any conversions that don't change the mode. */
2345 STRIP_NOPS (op);
2346
2347 if (TREE_CODE (op) != INTEGER_CST
2348 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
2349 && TREE_CODE (op) != REAL_CST
2350 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
2351 )
2352 /* Note that TREE_CONSTANT isn't enough:
2353 static var addresses are constant but we can't
2354 do arithmetic on them. */
2355 wins = 0;
2356
2357 if (i == 0)
2358 arg0 = op;
2359 else if (i == 1)
2360 arg1 = op;
2361 }
2362 }
2363
2364 /* If this is a commutative operation, and ARG0 is a constant, move it
2365 to ARG1 to reduce the number of tests below. */
2366 if ((code == PLUS_EXPR || code == MULT_EXPR || code == MIN_EXPR
2367 || code == MAX_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR
2368 || code == BIT_AND_EXPR)
2369 && (TREE_CODE (arg0) == INTEGER_CST || TREE_CODE (arg0) == REAL_CST))
2370 {
2371 tree tem = arg0;
2372 arg0 = arg1; arg1 = tem;
2373
2374 TREE_OPERAND (t, 0) = arg0;
2375 TREE_OPERAND (t, 1) = arg1;
2376 }
2377
2378 /* Now WINS is set as described above,
2379 ARG0 is the first operand of EXPR,
2380 and ARG1 is the second operand (if it has more than one operand).
2381
2382 First check for cases where an arithmetic operation is applied to a
2383 compound, conditional, or comparison operation. Push the arithmetic
2384 operation inside the compound or conditional to see if any folding
2385 can then be done. Convert comparison to conditional for this purpose.
2386 The also optimizes non-constant cases that used to be done in
2387 expand_expr. */
2388 if (TREE_CODE_CLASS (code) == '1')
2389 {
2390 if (TREE_CODE (arg0) == COMPOUND_EXPR)
2391 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
2392 fold (build1 (code, type, TREE_OPERAND (arg0, 1))));
2393 else if (TREE_CODE (arg0) == COND_EXPR)
2394 return fold (build (COND_EXPR, type, TREE_OPERAND (arg0, 0),
2395 fold (build1 (code, type, TREE_OPERAND (arg0, 1))),
2396 fold (build1 (code, type, TREE_OPERAND (arg0, 2)))));
2397 else if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<')
2398 return fold (build (COND_EXPR, type, arg0,
2399 fold (build1 (code, type, integer_one_node)),
2400 fold (build1 (code, type, integer_zero_node))));
2401 }
2402 else if (TREE_CODE_CLASS (code) == '2')
2403 {
2404 if (TREE_CODE (arg1) == COMPOUND_EXPR)
2405 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0),
2406 fold (build (code, type, arg0, TREE_OPERAND (arg1, 1))));
2407 else if (TREE_CODE (arg1) == COND_EXPR
2408 || TREE_CODE_CLASS (TREE_CODE (arg1)) == '<')
2409 {
2410 tree test, true_value, false_value;
2411
2412 if (TREE_CODE (arg1) == COND_EXPR)
2413 {
2414 test = TREE_OPERAND (arg1, 0);
2415 true_value = TREE_OPERAND (arg1, 1);
2416 false_value = TREE_OPERAND (arg1, 2);
2417 }
2418 else
2419 {
2420 test = arg1;
2421 true_value = integer_one_node;
2422 false_value = integer_zero_node;
2423 }
2424
2425 if (TREE_CODE (arg0) != VAR_DECL && TREE_CODE (arg0) != PARM_DECL)
2426 arg0 = save_expr (arg0);
2427 test = fold (build (COND_EXPR, type, test,
2428 fold (build (code, type, arg0, true_value)),
2429 fold (build (code, type, arg0, false_value))));
2430 if (TREE_CODE (arg0) == SAVE_EXPR)
2431 return build (COMPOUND_EXPR, type,
2432 convert (void_type_node, arg0), test);
2433 else
2434 return convert (type, test);
2435 }
2436
2437 else if (TREE_CODE (arg0) == COMPOUND_EXPR)
2438 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
2439 fold (build (code, type, TREE_OPERAND (arg0, 1), arg1)));
2440 else if (TREE_CODE (arg0) == COND_EXPR
2441 || TREE_CODE_CLASS (TREE_CODE (arg0)) == '<')
2442 {
2443 tree test, true_value, false_value;
2444
2445 if (TREE_CODE (arg0) == COND_EXPR)
2446 {
2447 test = TREE_OPERAND (arg0, 0);
2448 true_value = TREE_OPERAND (arg0, 1);
2449 false_value = TREE_OPERAND (arg0, 2);
2450 }
2451 else
2452 {
2453 test = arg0;
2454 true_value = integer_one_node;
2455 false_value = integer_zero_node;
2456 }
2457
2458 if (TREE_CODE (arg1) != VAR_DECL && TREE_CODE (arg1) != PARM_DECL)
2459 arg1 = save_expr (arg1);
2460 test = fold (build (COND_EXPR, type, test,
2461 fold (build (code, type, true_value, arg1)),
2462 fold (build (code, type, false_value, arg1))));
2463 if (TREE_CODE (arg1) == SAVE_EXPR)
2464 return build (COMPOUND_EXPR, type,
2465 convert (void_type_node, arg1), test);
2466 else
2467 return convert (type, test);
2468 }
2469 }
2470
2471 switch (code)
2472 {
2473 case INTEGER_CST:
2474 case REAL_CST:
2475 case STRING_CST:
2476 case COMPLEX_CST:
2477 case CONSTRUCTOR:
2478 return t;
2479
2480 case CONST_DECL:
2481 return fold (DECL_INITIAL (t));
2482
2483 case NOP_EXPR:
2484 case FLOAT_EXPR:
2485 case CONVERT_EXPR:
2486 case FIX_TRUNC_EXPR:
2487 /* Other kinds of FIX are not handled properly by fold_convert. */
2488 /* Two conversions in a row are not needed unless:
2489 - the intermediate type is narrower than both initial and final, or
2490 - the initial type is a pointer type and the precisions of the
2491 intermediate and final types differ, or
2492 - the final type is a pointer type and the precisions of the
2493 initial and intermediate types differ. */
2494 if ((TREE_CODE (TREE_OPERAND (t, 0)) == NOP_EXPR
2495 || TREE_CODE (TREE_OPERAND (t, 0)) == CONVERT_EXPR)
2496 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
2497 > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
2498 ||
2499 TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
2500 > TYPE_PRECISION (TREE_TYPE (t)))
2501 && ((TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (t, 0)))
2502 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
2503 > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))))
2504 ==
2505 (TREE_UNSIGNED (TREE_TYPE (t))
2506 && (TYPE_PRECISION (TREE_TYPE (t))
2507 > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0))))))
2508 && ! ((TREE_CODE (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
2509 == POINTER_TYPE)
2510 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
2511 != TYPE_PRECISION (TREE_TYPE (t))))
2512 && ! (TREE_CODE (TREE_TYPE (t)) == POINTER_TYPE
2513 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
2514 != TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0))))))
2515 return convert (TREE_TYPE (t), TREE_OPERAND (TREE_OPERAND (t, 0), 0));
2516
2517 if (TREE_CODE (TREE_OPERAND (t, 0)) == MODIFY_EXPR
2518 && TREE_CONSTANT (TREE_OPERAND (TREE_OPERAND (t, 0), 1)))
2519 {
2520 /* Don't leave an assignment inside a conversion. */
2521 tree prev = TREE_OPERAND (t, 0);
2522 TREE_OPERAND (t, 0) = TREE_OPERAND (prev, 1);
2523 /* First do the assignment, then return converted constant. */
2524 t = build (COMPOUND_EXPR, TREE_TYPE (t), prev, fold (t));
2525 TREE_USED (t) = 1;
2526 return t;
2527 }
2528 if (!wins)
2529 {
2530 TREE_CONSTANT (t) = TREE_CONSTANT (arg0);
2531 return t;
2532 }
2533 return fold_convert (t, arg0);
2534
2535 #if 0 /* This loses on &"foo"[0]. */
2536 case ARRAY_REF:
2537 {
2538 int i;
2539
2540 /* Fold an expression like: "foo"[2] */
2541 if (TREE_CODE (arg0) == STRING_CST
2542 && TREE_CODE (arg1) == INTEGER_CST
2543 && !TREE_INT_CST_HIGH (arg1)
2544 && (i = TREE_INT_CST_LOW (arg1)) < TREE_STRING_LENGTH (arg0))
2545 {
2546 t = build_int_2 (TREE_STRING_POINTER (arg0)[i], 0);
2547 TREE_TYPE (t) = TREE_TYPE (TREE_TYPE (arg0));
2548 force_fit_type (t);
2549 }
2550 }
2551 return t;
2552 #endif /* 0 */
2553
2554 case RANGE_EXPR:
2555 TREE_CONSTANT (t) = wins;
2556 return t;
2557
2558 case NEGATE_EXPR:
2559 if (wins)
2560 {
2561 if (TREE_CODE (arg0) == INTEGER_CST)
2562 {
2563 if (TREE_INT_CST_LOW (arg0) == 0)
2564 t = build_int_2 (0, - TREE_INT_CST_HIGH (arg0));
2565 else
2566 t = build_int_2 (- TREE_INT_CST_LOW (arg0),
2567 ~ TREE_INT_CST_HIGH (arg0));
2568 TREE_TYPE (t) = type;
2569 force_fit_type (t);
2570 }
2571 else if (TREE_CODE (arg0) == REAL_CST)
2572 t = build_real (type, REAL_VALUE_NEGATE (TREE_REAL_CST (arg0)));
2573 TREE_TYPE (t) = type;
2574 }
2575 else if (TREE_CODE (arg0) == NEGATE_EXPR)
2576 return TREE_OPERAND (arg0, 0);
2577
2578 /* Convert - (a - b) to (b - a) for non-floating-point. */
2579 else if (TREE_CODE (arg0) == MINUS_EXPR && TREE_CODE (type) != REAL_TYPE)
2580 return build (MINUS_EXPR, type, TREE_OPERAND (arg0, 1),
2581 TREE_OPERAND (arg0, 0));
2582
2583 return t;
2584
2585 case ABS_EXPR:
2586 if (wins)
2587 {
2588 if (TREE_CODE (arg0) == INTEGER_CST)
2589 {
2590 if (! TREE_UNSIGNED (type)
2591 && TREE_INT_CST_HIGH (arg0) < 0)
2592 {
2593 if (TREE_INT_CST_LOW (arg0) == 0)
2594 t = build_int_2 (0, - TREE_INT_CST_HIGH (arg0));
2595 else
2596 t = build_int_2 (- TREE_INT_CST_LOW (arg0),
2597 ~ TREE_INT_CST_HIGH (arg0));
2598 }
2599 }
2600 else if (TREE_CODE (arg0) == REAL_CST)
2601 {
2602 if (REAL_VALUES_LESS (TREE_REAL_CST (arg0), dconst0))
2603 t = build_real (type,
2604 REAL_VALUE_NEGATE (TREE_REAL_CST (arg0)));
2605 }
2606 TREE_TYPE (t) = type;
2607 }
2608 else if (TREE_CODE (arg0) == ABS_EXPR || TREE_CODE (arg0) == NEGATE_EXPR)
2609 return build1 (ABS_EXPR, type, TREE_OPERAND (arg0, 0));
2610 return t;
2611
2612 case BIT_NOT_EXPR:
2613 if (wins)
2614 {
2615 if (TREE_CODE (arg0) == INTEGER_CST)
2616 t = build_int_2 (~ TREE_INT_CST_LOW (arg0),
2617 ~ TREE_INT_CST_HIGH (arg0));
2618 TREE_TYPE (t) = type;
2619 force_fit_type (t);
2620 }
2621 else if (TREE_CODE (arg0) == BIT_NOT_EXPR)
2622 return TREE_OPERAND (arg0, 0);
2623 return t;
2624
2625 case PLUS_EXPR:
2626 /* A + (-B) -> A - B */
2627 if (TREE_CODE (arg1) == NEGATE_EXPR)
2628 return fold (build (MINUS_EXPR, type, arg0, TREE_OPERAND (arg1, 0)));
2629 else if (TREE_CODE (type) != REAL_TYPE)
2630 {
2631 if (integer_zerop (arg1))
2632 return non_lvalue (convert (type, arg0));
2633
2634 /* If we are adding two BIT_AND_EXPR's, both of which are and'ing
2635 with a constant, and the two constants have no bits in common,
2636 we should treat this as a BIT_IOR_EXPR since this may produce more
2637 simplifications. */
2638 if (TREE_CODE (arg0) == BIT_AND_EXPR
2639 && TREE_CODE (arg1) == BIT_AND_EXPR
2640 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
2641 && TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST
2642 && integer_zerop (const_binop (BIT_AND_EXPR,
2643 TREE_OPERAND (arg0, 1),
2644 TREE_OPERAND (arg1, 1))))
2645 {
2646 code = BIT_IOR_EXPR;
2647 goto bit_ior;
2648 }
2649 }
2650 /* In IEEE floating point, x+0 may not equal x. */
2651 else if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
2652 && real_zerop (arg1))
2653 return non_lvalue (convert (type, arg0));
2654 associate:
2655 /* In most languages, can't associate operations on floats
2656 through parentheses. Rather than remember where the parentheses
2657 were, we don't associate floats at all. It shouldn't matter much. */
2658 if (TREE_CODE (type) == REAL_TYPE)
2659 goto binary;
2660 /* The varsign == -1 cases happen only for addition and subtraction.
2661 It says that the arg that was split was really CON minus VAR.
2662 The rest of the code applies to all associative operations. */
2663 if (!wins)
2664 {
2665 tree var, con, tem;
2666 int varsign;
2667
2668 if (split_tree (arg0, code, &var, &con, &varsign))
2669 {
2670 if (varsign == -1)
2671 {
2672 /* EXPR is (CON-VAR) +- ARG1. */
2673 /* If it is + and VAR==ARG1, return just CONST. */
2674 if (code == PLUS_EXPR && operand_equal_p (var, arg1, 0))
2675 return convert (TREE_TYPE (t), con);
2676
2677 /* Otherwise return (CON +- ARG1) - VAR. */
2678 TREE_SET_CODE (t, MINUS_EXPR);
2679 TREE_OPERAND (t, 1) = var;
2680 TREE_OPERAND (t, 0)
2681 = fold (build (code, TREE_TYPE (t), con, arg1));
2682 }
2683 else
2684 {
2685 /* EXPR is (VAR+CON) +- ARG1. */
2686 /* If it is - and VAR==ARG1, return just CONST. */
2687 if (code == MINUS_EXPR && operand_equal_p (var, arg1, 0))
2688 return convert (TREE_TYPE (t), con);
2689
2690 /* Otherwise return VAR +- (ARG1 +- CON). */
2691 TREE_OPERAND (t, 1) = tem
2692 = fold (build (code, TREE_TYPE (t), arg1, con));
2693 TREE_OPERAND (t, 0) = var;
2694 if (integer_zerop (tem)
2695 && (code == PLUS_EXPR || code == MINUS_EXPR))
2696 return convert (type, var);
2697 /* If we have x +/- (c - d) [c an explicit integer]
2698 change it to x -/+ (d - c) since if d is relocatable
2699 then the latter can be a single immediate insn
2700 and the former cannot. */
2701 if (TREE_CODE (tem) == MINUS_EXPR
2702 && TREE_CODE (TREE_OPERAND (tem, 0)) == INTEGER_CST)
2703 {
2704 tree tem1 = TREE_OPERAND (tem, 1);
2705 TREE_OPERAND (tem, 1) = TREE_OPERAND (tem, 0);
2706 TREE_OPERAND (tem, 0) = tem1;
2707 TREE_SET_CODE (t,
2708 (code == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR));
2709 }
2710 }
2711 return t;
2712 }
2713
2714 if (split_tree (arg1, code, &var, &con, &varsign))
2715 {
2716 /* EXPR is ARG0 +- (CON +- VAR). */
2717 if (varsign == -1)
2718 TREE_SET_CODE (t,
2719 (code == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR));
2720 if (TREE_CODE (t) == MINUS_EXPR
2721 && operand_equal_p (var, arg0, 0))
2722 {
2723 /* If VAR and ARG0 cancel, return just CON or -CON. */
2724 if (code == PLUS_EXPR)
2725 return convert (TREE_TYPE (t), con);
2726 return fold (build1 (NEGATE_EXPR, TREE_TYPE (t),
2727 convert (TREE_TYPE (t), con)));
2728 }
2729 TREE_OPERAND (t, 0)
2730 = fold (build (code, TREE_TYPE (t), arg0, con));
2731 TREE_OPERAND (t, 1) = var;
2732 if (integer_zerop (TREE_OPERAND (t, 0))
2733 && TREE_CODE (t) == PLUS_EXPR)
2734 return convert (TREE_TYPE (t), var);
2735 return t;
2736 }
2737 }
2738 binary:
2739 #if defined (REAL_IS_NOT_DOUBLE) && ! defined (REAL_ARITHMETIC)
2740 if (TREE_CODE (arg1) == REAL_CST)
2741 return t;
2742 #endif /* REAL_IS_NOT_DOUBLE, and no REAL_ARITHMETIC */
2743 if (wins)
2744 t1 = const_binop (code, arg0, arg1);
2745 if (t1 != NULL_TREE)
2746 {
2747 /* The return value should always have
2748 the same type as the original expression. */
2749 TREE_TYPE (t1) = TREE_TYPE (t);
2750 return t1;
2751 }
2752 return t;
2753
2754 case MINUS_EXPR:
2755 if (TREE_CODE (type) != REAL_TYPE)
2756 {
2757 if (! wins && integer_zerop (arg0))
2758 return build1 (NEGATE_EXPR, type, arg1);
2759 if (integer_zerop (arg1))
2760 return non_lvalue (convert (type, arg0));
2761 }
2762 /* Convert A - (-B) to A + B. */
2763 else if (TREE_CODE (arg1) == NEGATE_EXPR)
2764 return fold (build (PLUS_EXPR, type, arg0, TREE_OPERAND (arg1, 0)));
2765 else
2766 {
2767 if (! wins && real_zerop (arg0))
2768 return build1 (NEGATE_EXPR, type, arg1);
2769 /* In IEEE floating point, x-0 may not equal x. */
2770 if (real_zerop (arg1) && TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT)
2771 return non_lvalue (convert (type, arg0));
2772 }
2773 /* Fold &x - &x. This can happen from &x.foo - &x.
2774 Note that can't be done for certain floats even in non-IEEE formats.
2775 Also note that operand_equal_p is always false is an operand
2776 is volatile. */
2777
2778 if (operand_equal_p (arg0, arg1,
2779 TREE_CODE (type) == REAL_TYPE))
2780 return convert (type, integer_zero_node);
2781 goto associate;
2782
2783 case MULT_EXPR:
2784 if (TREE_CODE (type) != REAL_TYPE)
2785 {
2786 if (integer_zerop (arg1))
2787 return omit_one_operand (type, arg1, arg0);
2788 if (integer_onep (arg1))
2789 return non_lvalue (convert (type, arg0));
2790
2791 /* (a * (1 << b)) is (a << b) */
2792 if (TREE_CODE (arg1) == LSHIFT_EXPR
2793 && integer_onep (TREE_OPERAND (arg1, 0)))
2794 return fold (build (LSHIFT_EXPR, type, arg0,
2795 TREE_OPERAND (arg1, 1)));
2796 if (TREE_CODE (arg0) == LSHIFT_EXPR
2797 && integer_onep (TREE_OPERAND (arg0, 0)))
2798 return fold (build (LSHIFT_EXPR, type, arg1,
2799 TREE_OPERAND (arg0, 1)));
2800 }
2801 /* In IEEE floating point, these optimizations are not correct. */
2802 else
2803 {
2804 if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
2805 && real_zerop (arg1))
2806 return omit_one_operand (type, arg1, arg0);
2807 /* In IEEE floating point, x*1 is not equivalent to x for nans.
2808 However, ANSI says we can drop signals,
2809 so we can do this anyway. */
2810 if (real_onep (arg1))
2811 return non_lvalue (convert (type, arg0));
2812 /* x*2 is x+x */
2813 if (! wins && real_twop (arg1))
2814 {
2815 tree arg = save_expr (arg0);
2816 return build (PLUS_EXPR, type, arg, arg);
2817 }
2818 }
2819 goto associate;
2820
2821 case BIT_IOR_EXPR:
2822 bit_ior:
2823 if (integer_all_onesp (arg1))
2824 return omit_one_operand (type, arg1, arg0);
2825 if (integer_zerop (arg1))
2826 return non_lvalue (convert (type, arg0));
2827 t1 = distribute_bit_expr (code, type, arg0, arg1);
2828 if (t1 != NULL_TREE)
2829 return t1;
2830 goto associate;
2831
2832 case BIT_XOR_EXPR:
2833 if (integer_zerop (arg1))
2834 return non_lvalue (convert (type, arg0));
2835 if (integer_all_onesp (arg1))
2836 return fold (build1 (BIT_NOT_EXPR, type, arg0));
2837 goto associate;
2838
2839 case BIT_AND_EXPR:
2840 bit_and:
2841 if (integer_all_onesp (arg1))
2842 return non_lvalue (convert (type, arg0));
2843 if (integer_zerop (arg1))
2844 return omit_one_operand (type, arg1, arg0);
2845 t1 = distribute_bit_expr (code, type, arg0, arg1);
2846 if (t1 != NULL_TREE)
2847 return t1;
2848 /* Simplify ((int)c & 0x377) into (int)c, if c is unsigned char. */
2849 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == NOP_EXPR
2850 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg1, 0))))
2851 {
2852 int prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg1, 0)));
2853 if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_INT
2854 && (~TREE_INT_CST_LOW (arg0) & ((1 << prec) - 1)) == 0)
2855 return build1 (NOP_EXPR, type, TREE_OPERAND (arg1, 0));
2856 }
2857 if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) == NOP_EXPR
2858 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0))))
2859 {
2860 int prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)));
2861 if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_INT
2862 && (~TREE_INT_CST_LOW (arg1) & ((1 << prec) - 1)) == 0)
2863 return build1 (NOP_EXPR, type, TREE_OPERAND (arg0, 0));
2864 }
2865 goto associate;
2866
2867 case BIT_ANDTC_EXPR:
2868 if (integer_all_onesp (arg0))
2869 return non_lvalue (convert (type, arg1));
2870 if (integer_zerop (arg0))
2871 return omit_one_operand (type, arg0, arg1);
2872 if (TREE_CODE (arg1) == INTEGER_CST)
2873 {
2874 arg1 = fold (build1 (BIT_NOT_EXPR, type, arg1));
2875 code = BIT_AND_EXPR;
2876 goto bit_and;
2877 }
2878 goto binary;
2879
2880 case TRUNC_DIV_EXPR:
2881 case ROUND_DIV_EXPR:
2882 case FLOOR_DIV_EXPR:
2883 case CEIL_DIV_EXPR:
2884 case EXACT_DIV_EXPR:
2885 case RDIV_EXPR:
2886 if (integer_onep (arg1))
2887 return non_lvalue (convert (type, arg0));
2888 if (integer_zerop (arg1))
2889 return t;
2890 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
2891 #ifndef REAL_INFINITY
2892 if (TREE_CODE (arg1) == REAL_CST
2893 && real_zerop (arg1))
2894 return t;
2895 #endif
2896 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
2897
2898 goto binary;
2899
2900 case CEIL_MOD_EXPR:
2901 case FLOOR_MOD_EXPR:
2902 case ROUND_MOD_EXPR:
2903 case TRUNC_MOD_EXPR:
2904 if (integer_onep (arg1))
2905 return omit_one_operand (type, integer_zero_node, arg0);
2906 if (integer_zerop (arg1))
2907 return t;
2908 goto binary;
2909
2910 case LSHIFT_EXPR:
2911 case RSHIFT_EXPR:
2912 case LROTATE_EXPR:
2913 case RROTATE_EXPR:
2914 if (integer_zerop (arg1))
2915 return non_lvalue (convert (type, arg0));
2916 /* Since negative shift count is not well-defined,
2917 don't try to compute it in the compiler. */
2918 if (tree_int_cst_lt (arg1, integer_zero_node))
2919 return t;
2920 goto binary;
2921
2922 case MIN_EXPR:
2923 if (operand_equal_p (arg0, arg1, 0))
2924 return arg0;
2925 if (TREE_CODE (type) == INTEGER_TYPE
2926 && operand_equal_p (arg1, TYPE_MIN_VALUE (type), 1))
2927 return omit_one_operand (type, arg1, arg0);
2928 goto associate;
2929
2930 case MAX_EXPR:
2931 if (operand_equal_p (arg0, arg1, 0))
2932 return arg0;
2933 if (TREE_CODE (type) == INTEGER_TYPE
2934 && operand_equal_p (arg1, TYPE_MAX_VALUE (type), 1))
2935 return omit_one_operand (type, arg1, arg0);
2936 goto associate;
2937
2938 case TRUTH_NOT_EXPR:
2939 /* Note that the operand of this must be an int
2940 and its values must be 0 or 1.
2941 ("true" is a fixed value perhaps depending on the language,
2942 but we don't handle values other than 1 correctly yet.) */
2943 return invert_truthvalue (arg0);
2944
2945 case TRUTH_ANDIF_EXPR:
2946 /* Note that the operands of this must be ints
2947 and their values must be 0 or 1.
2948 ("true" is a fixed value perhaps depending on the language.) */
2949 /* If first arg is constant zero, return it. */
2950 if (TREE_CODE (arg0) == INTEGER_CST && integer_zerop (arg0))
2951 return arg0;
2952 case TRUTH_AND_EXPR:
2953 /* If either arg is constant true, drop it. */
2954 if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
2955 return non_lvalue (arg1);
2956 if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1))
2957 return non_lvalue (arg0);
2958 /* Both known to be zero => return zero. */
2959 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
2960 return arg0;
2961
2962 truth_andor:
2963 /* Check for the possibility of merging component references. If our
2964 lhs is another similar operation, try to merge its rhs with our
2965 rhs. Then try to merge our lhs and rhs. */
2966 if (optimize)
2967 {
2968 tree tem;
2969
2970 if (TREE_CODE (arg0) == code)
2971 {
2972 tem = merge_component_references (code, type,
2973 TREE_OPERAND (arg0, 1), arg1);
2974 if (tem)
2975 return fold (build (code, type, TREE_OPERAND (arg0, 0), tem));
2976 }
2977
2978 tem = merge_component_references (code, type, arg0, arg1);
2979 if (tem)
2980 return tem;
2981 }
2982 return t;
2983
2984 case TRUTH_ORIF_EXPR:
2985 /* Note that the operands of this must be ints
2986 and their values must be 0 or true.
2987 ("true" is a fixed value perhaps depending on the language.) */
2988 /* If first arg is constant true, return it. */
2989 if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
2990 return arg0;
2991 case TRUTH_OR_EXPR:
2992 /* If either arg is constant zero, drop it. */
2993 if (TREE_CODE (arg0) == INTEGER_CST && integer_zerop (arg0))
2994 return non_lvalue (arg1);
2995 if (TREE_CODE (arg1) == INTEGER_CST && integer_zerop (arg1))
2996 return non_lvalue (arg0);
2997 /* Both known to be true => return true. */
2998 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
2999 return arg0;
3000 goto truth_andor;
3001
3002 case EQ_EXPR:
3003 case NE_EXPR:
3004 case LT_EXPR:
3005 case GT_EXPR:
3006 case LE_EXPR:
3007 case GE_EXPR:
3008 /* If one arg is a constant integer, put it last. */
3009 if (TREE_CODE (arg0) == INTEGER_CST
3010 && TREE_CODE (arg1) != INTEGER_CST)
3011 {
3012 TREE_OPERAND (t, 0) = arg1;
3013 TREE_OPERAND (t, 1) = arg0;
3014 arg0 = TREE_OPERAND (t, 0);
3015 arg1 = TREE_OPERAND (t, 1);
3016 switch (code)
3017 {
3018 case GT_EXPR:
3019 code = LT_EXPR;
3020 break;
3021 case GE_EXPR:
3022 code = LE_EXPR;
3023 break;
3024 case LT_EXPR:
3025 code = GT_EXPR;
3026 break;
3027 case LE_EXPR:
3028 code = GE_EXPR;
3029 break;
3030 }
3031 TREE_SET_CODE (t, code);
3032 }
3033
3034 /* Convert foo++ == CONST into ++foo == CONST + INCR.
3035 First, see if one arg is constant; find the constant arg
3036 and the other one. */
3037 {
3038 tree constop = 0, varop;
3039 tree *constoploc;
3040
3041 if (TREE_CONSTANT (arg1))
3042 constoploc = &TREE_OPERAND (t, 1), constop = arg1, varop = arg0;
3043 if (TREE_CONSTANT (arg0))
3044 constoploc = &TREE_OPERAND (t, 0), constop = arg0, varop = arg1;
3045
3046 if (constop && TREE_CODE (varop) == POSTINCREMENT_EXPR)
3047 {
3048 tree newconst
3049 = fold (build (PLUS_EXPR, TREE_TYPE (varop),
3050 constop, TREE_OPERAND (varop, 1)));
3051 /* This optimization is invalid for ordered comparisons
3052 if CONST+INCR overflows or if foo+incr might overflow.
3053 For pointer types we assume overflow doesn't happen. */
3054 if (TREE_CODE (TREE_TYPE (varop)) == POINTER_TYPE
3055 || code == EQ_EXPR || code == NE_EXPR)
3056 {
3057 /* This optimization is invalid for floating point
3058 if adding one to the constant does not change it. */
3059 if (TREE_CODE (TREE_TYPE (newconst)) != REAL_TYPE
3060 || !REAL_VALUES_EQUAL (TREE_REAL_CST (newconst),
3061 TREE_REAL_CST (constop)))
3062 {
3063 TREE_SET_CODE (varop, PREINCREMENT_EXPR);
3064 *constoploc = newconst;
3065 return t;
3066 }
3067 }
3068 }
3069 else if (constop && TREE_CODE (varop) == POSTDECREMENT_EXPR)
3070 {
3071 tree newconst
3072 = fold (build (MINUS_EXPR, TREE_TYPE (varop),
3073 constop, TREE_OPERAND (varop, 1)));
3074 if (TREE_CODE (TREE_TYPE (varop)) == POINTER_TYPE
3075 || code == EQ_EXPR || code == NE_EXPR)
3076 {
3077 if (TREE_CODE (TREE_TYPE (newconst)) != REAL_TYPE
3078 || !REAL_VALUES_EQUAL (TREE_REAL_CST (newconst),
3079 TREE_REAL_CST (constop)))
3080 {
3081 TREE_SET_CODE (varop, PREDECREMENT_EXPR);
3082 *constoploc = newconst;
3083 return t;
3084 }
3085 }
3086 }
3087 }
3088
3089 /* Change X >= CST to X > (CST - 1) if CST is positive. */
3090 if (TREE_CODE (arg1) == INTEGER_CST
3091 && TREE_CODE (arg0) != INTEGER_CST
3092 && ! tree_int_cst_lt (arg1, integer_one_node))
3093 {
3094 switch (TREE_CODE (t))
3095 {
3096 case GE_EXPR:
3097 code = GT_EXPR;
3098 TREE_SET_CODE (t, code);
3099 arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node);
3100 TREE_OPERAND (t, 1) = arg1;
3101 break;
3102
3103 case LT_EXPR:
3104 code = LE_EXPR;
3105 TREE_SET_CODE (t, code);
3106 arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node);
3107 TREE_OPERAND (t, 1) = arg1;
3108 }
3109 }
3110
3111 /* If we are comparing the result of a comparison to a constant,
3112 we can often simplify this, since the comparison result is known to
3113 be either 0 or 1. We can ignore conversions if the LHS is a
3114 comparison. */
3115
3116 if (TREE_CODE (arg1) == INTEGER_CST)
3117 {
3118 tree comparison = arg0;
3119
3120 while (TREE_CODE (comparison) == NOP_EXPR
3121 || TREE_CODE (comparison) == CONVERT_EXPR)
3122 comparison = TREE_OPERAND (comparison, 0);
3123
3124 if (TREE_CODE_CLASS (TREE_CODE (comparison)) == '<'
3125 || TREE_CODE (comparison) == TRUTH_ANDIF_EXPR
3126 || TREE_CODE (comparison) == TRUTH_ORIF_EXPR
3127 || TREE_CODE (comparison) == TRUTH_AND_EXPR
3128 || TREE_CODE (comparison) == TRUTH_OR_EXPR
3129 || TREE_CODE (comparison) == TRUTH_NOT_EXPR)
3130 {
3131 /* We do different things depending on whether the
3132 constant being compared against is < 0, == 0, == 1, or > 1.
3133 Each of those cases, in order, corresponds to one
3134 character in a string. The value of the character is
3135 the result to return. A '0' or '1' means return always true
3136 or always false, respectively; 'c' means return the result
3137 of the comparison, and 'i' means return the result of the
3138 inverted comparison. */
3139
3140 char *actions, action;
3141
3142 switch (code)
3143 {
3144 case EQ_EXPR:
3145 actions = "0ic0";
3146 break;
3147 case NE_EXPR:
3148 actions = "1ci1";
3149 break;
3150 case LE_EXPR:
3151 actions = "0i11";
3152 break;
3153 case LT_EXPR:
3154 actions = "00i1";
3155 break;
3156 case GE_EXPR:
3157 actions = "11c0";
3158 break;
3159 case GT_EXPR:
3160 actions = "1c00";
3161 break;
3162 }
3163
3164 if (tree_int_cst_lt (arg1, integer_zero_node))
3165 action = actions[0];
3166 else if (integer_zerop (arg1))
3167 action = actions[1];
3168 else if (integer_onep (arg1))
3169 action = actions[2];
3170 else
3171 action = actions[3];
3172
3173 switch (action)
3174 {
3175 case '0':
3176 return omit_one_operand (type, integer_zero_node,
3177 comparison);
3178
3179 case '1':
3180 return omit_one_operand (type, integer_one_node, comparison);
3181
3182 case 'c':
3183 return convert (type, comparison);
3184
3185 case 'i':
3186 return convert (type, invert_truthvalue (comparison));
3187
3188 default:
3189 abort ();
3190 }
3191 }
3192 }
3193
3194 /* If this is an EQ or NE comparison with zero and ARG0 is
3195 (1 << foo) & bar, convert it to (bar >> foo) & 1. Both require
3196 two operations, but the latter can be done in one less insn
3197 one machine that have only two-operand insns or on which a
3198 constant cannot be the first operand. */
3199 if (integer_zerop (arg1) && (code == EQ_EXPR || code == NE_EXPR)
3200 && TREE_CODE (arg0) == BIT_AND_EXPR)
3201 {
3202 if (TREE_CODE (TREE_OPERAND (arg0, 0)) == LSHIFT_EXPR
3203 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 0), 0)))
3204 return
3205 fold (build (code, type,
3206 build (BIT_AND_EXPR, TREE_TYPE (arg0),
3207 build (RSHIFT_EXPR,
3208 TREE_TYPE (TREE_OPERAND (arg0, 0)),
3209 TREE_OPERAND (arg0, 1),
3210 TREE_OPERAND (TREE_OPERAND (arg0, 0), 1)),
3211 convert (TREE_TYPE (arg0),
3212 integer_one_node)),
3213 arg1));
3214 else if (TREE_CODE (TREE_OPERAND (arg0, 1)) == LSHIFT_EXPR
3215 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 1), 0)))
3216 return
3217 fold (build (code, type,
3218 build (BIT_AND_EXPR, TREE_TYPE (arg0),
3219 build (RSHIFT_EXPR,
3220 TREE_TYPE (TREE_OPERAND (arg0, 1)),
3221 TREE_OPERAND (arg0, 0),
3222 TREE_OPERAND (TREE_OPERAND (arg0, 1), 1)),
3223 convert (TREE_TYPE (arg0),
3224 integer_one_node)),
3225 arg1));
3226 }
3227
3228 /* If this is an NE comparison of zero with an AND of one, remove the
3229 comparison since the AND will give the correct value. */
3230 if (code == NE_EXPR && integer_zerop (arg1)
3231 && TREE_CODE (arg0) == BIT_AND_EXPR
3232 && integer_onep (TREE_OPERAND (arg0, 1)))
3233 return convert (type, arg0);
3234
3235 /* If we have (A & C) == C where C is a power of 2, convert this into
3236 (A & C) != 0. Similarly for NE_EXPR. */
3237 if ((code == EQ_EXPR || code == NE_EXPR)
3238 && TREE_CODE (arg0) == BIT_AND_EXPR
3239 && integer_pow2p (TREE_OPERAND (arg0, 1))
3240 && operand_equal_p (TREE_OPERAND (arg0, 1), arg1, 0))
3241 return build (code == EQ_EXPR ? NE_EXPR : EQ_EXPR, type,
3242 arg0, integer_zero_node);
3243
3244 /* Simplify comparison of an integer with itself.
3245 (This may not be safe with IEEE floats if they are nans.) */
3246 if (operand_equal_p (arg0, arg1, 0)
3247 && TREE_CODE (TREE_TYPE (arg1)) == INTEGER_TYPE)
3248 {
3249 switch (code)
3250 {
3251 case EQ_EXPR:
3252 case GE_EXPR:
3253 case LE_EXPR:
3254 t = build_int_2 (1, 0);
3255 TREE_TYPE (t) = type;
3256 return t;
3257 case NE_EXPR:
3258 case GT_EXPR:
3259 case LT_EXPR:
3260 t = build_int_2 (0, 0);
3261 TREE_TYPE (t) = type;
3262 return t;
3263 }
3264 }
3265
3266 /* An unsigned comparison against 0 can be simplified. */
3267 if (integer_zerop (arg1)
3268 && (TREE_CODE (TREE_TYPE (arg1)) == INTEGER_TYPE
3269 || TREE_CODE (TREE_TYPE (arg1)) == POINTER_TYPE)
3270 && TREE_UNSIGNED (TREE_TYPE (arg1)))
3271 {
3272 switch (TREE_CODE (t))
3273 {
3274 case GT_EXPR:
3275 TREE_SET_CODE (t, NE_EXPR);
3276 break;
3277 case LE_EXPR:
3278 TREE_SET_CODE (t, EQ_EXPR);
3279 break;
3280 case GE_EXPR:
3281 return omit_one_operand (integer_type_node,
3282 integer_one_node, arg0);
3283 case LT_EXPR:
3284 return omit_one_operand (integer_type_node,
3285 integer_zero_node, arg0);
3286 }
3287 }
3288
3289 /* To compute GT, swap the arguments and do LT.
3290 To compute GE, do LT and invert the result.
3291 To compute LE, swap the arguments, do LT and invert the result.
3292 To compute NE, do EQ and invert the result. */
3293 if (code == LE_EXPR || code == GT_EXPR)
3294 {
3295 register tree temp = arg0;
3296 arg0 = arg1;
3297 arg1 = temp;
3298 }
3299
3300 /* Compute a result for LT or EQ if args permit;
3301 otherwise return T. */
3302 if (TREE_CODE (arg0) == INTEGER_CST
3303 && TREE_CODE (arg1) == INTEGER_CST)
3304 {
3305 if (code == EQ_EXPR || code == NE_EXPR)
3306 t = build_int_2
3307 (TREE_INT_CST_LOW (arg0) == TREE_INT_CST_LOW (arg1)
3308 && TREE_INT_CST_HIGH (arg0) == TREE_INT_CST_HIGH (arg1),
3309 0);
3310 else
3311 t = build_int_2 ((TREE_UNSIGNED (TREE_TYPE (arg0))
3312 ? INT_CST_LT_UNSIGNED (arg0, arg1)
3313 : INT_CST_LT (arg0, arg1)),
3314 0);
3315 }
3316 /* Assume a nonexplicit constant cannot equal an explicit one,
3317 since such code would be undefined anyway.
3318 Exception: on sysvr4, using #pragma weak,
3319 a label can come out as 0. */
3320 else if (TREE_CODE (arg1) == INTEGER_CST
3321 && !integer_zerop (arg1)
3322 && TREE_CONSTANT (arg0)
3323 && TREE_CODE (arg0) == ADDR_EXPR
3324 && (code == EQ_EXPR || code == NE_EXPR))
3325 {
3326 t = build_int_2 (0, 0);
3327 }
3328 /* Two real constants can be compared explicitly. */
3329 else if (TREE_CODE (arg0) == REAL_CST
3330 && TREE_CODE (arg1) == REAL_CST)
3331 {
3332 if (code == EQ_EXPR || code == NE_EXPR)
3333 t = build_int_2 (REAL_VALUES_EQUAL (TREE_REAL_CST (arg0),
3334 TREE_REAL_CST (arg1)),
3335 0);
3336 else
3337 t = build_int_2 (REAL_VALUES_LESS (TREE_REAL_CST (arg0),
3338 TREE_REAL_CST (arg1)),
3339 0);
3340 }
3341 else if ((TREE_CODE (arg0) == COMPONENT_REF
3342 || TREE_CODE (arg0) == BIT_FIELD_REF)
3343 && (code == EQ_EXPR || code == NE_EXPR)
3344 /* Handle the constant case even without -O
3345 to make sure the warnings are given. */
3346 && (optimize || TREE_CODE (arg1) == INTEGER_CST))
3347 {
3348 tree tem = optimize_bit_field_compare (code, type, arg0, arg1);
3349 return tem ? tem : t;
3350 }
3351
3352 /* If what we want is other than LT or EQ, invert the result. */
3353 if ((code == GE_EXPR || code == LE_EXPR || code == NE_EXPR)
3354 && TREE_CODE (t) == INTEGER_CST)
3355 TREE_INT_CST_LOW (t) ^= 1;
3356 TREE_TYPE (t) = type;
3357 return t;
3358
3359 case COND_EXPR:
3360 if (TREE_CODE (arg0) == INTEGER_CST)
3361 return TREE_OPERAND (t, (integer_zerop (arg0) ? 2 : 1));
3362 else if (operand_equal_p (arg1, TREE_OPERAND (expr, 2), 0))
3363 return omit_one_operand (type, arg1, arg0);
3364 else if (integer_onep (TREE_OPERAND (t, 1))
3365 && integer_zerop (TREE_OPERAND (t, 2))
3366 /* If we try to convert TREE_OPERAND (t, 0) to our type, the
3367 call to fold will try to move the conversion inside
3368 a COND, which will recurse. In that case, the COND_EXPR
3369 is probably the best choice, so leave it alone. */
3370 && type == TREE_TYPE (arg0))
3371 return arg0;
3372 else if (integer_zerop (arg1) && integer_onep (TREE_OPERAND (t, 2)))
3373 return convert (type, invert_truthvalue (arg0));
3374
3375 /* If we have (a >= 0 ? a : -a) or the same with ">", this is an
3376 absolute value expression. */
3377
3378 if ((TREE_CODE (arg0) == GE_EXPR || TREE_CODE (arg0) == GT_EXPR)
3379 && integer_zerop (TREE_OPERAND (arg0, 1))
3380 && TREE_CODE (TREE_OPERAND (t, 2)) == NEGATE_EXPR
3381 && operand_equal_p (TREE_OPERAND (arg0, 0), arg1, 0)
3382 && operand_equal_p (TREE_OPERAND (TREE_OPERAND (t, 2), 0), arg1, 0))
3383 return fold (build1 (ABS_EXPR, type, arg1));
3384
3385 /* Similarly for (a <= 0 ? -a : a). */
3386
3387 if ((TREE_CODE (arg0) == LE_EXPR || TREE_CODE (arg0) == LT_EXPR)
3388 && integer_zerop (TREE_OPERAND (arg0, 1))
3389 && TREE_CODE (arg1) == NEGATE_EXPR
3390 && operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (t, 2), 0)
3391 && operand_equal_p (TREE_OPERAND (arg1, 0), TREE_OPERAND (t, 2), 0))
3392 return fold (build1 (ABS_EXPR, type, TREE_OPERAND (t, 2)));
3393
3394 /* If we have a GT, GE, LT, or LE comparison, this might be a MIN or
3395 MAX test. If so, make a MIN_EXPR or MAX_EXPR. */
3396
3397 if (TREE_CODE (arg0) == GT_EXPR || TREE_CODE (arg0) == GE_EXPR
3398 || TREE_CODE (arg0) == LT_EXPR || TREE_CODE (arg0) == LE_EXPR)
3399 {
3400 tree hi_true, lo_true;
3401
3402 if (TREE_CODE (arg0) == GT_EXPR || TREE_CODE (arg0) == GE_EXPR)
3403 hi_true = TREE_OPERAND (arg0, 0), lo_true = TREE_OPERAND (arg0, 1);
3404 else
3405 hi_true = TREE_OPERAND (arg0, 1), lo_true = TREE_OPERAND (arg0, 0);
3406
3407 if (comparison_equiv_p (hi_true, lo_true, arg1, TREE_OPERAND (t, 2)))
3408 /* We use arg1 and the other arg because they must have the same
3409 type as the intended result.
3410 The values being compared might have a narrower type. */
3411 return fold (build (MAX_EXPR, type, arg1, TREE_OPERAND (t, 2)));
3412 else if (comparison_equiv_p (lo_true, hi_true,
3413 arg1, TREE_OPERAND (t, 2)))
3414 return fold (build (MIN_EXPR, type, arg1, TREE_OPERAND (t, 2)));
3415 }
3416
3417 /* Look for cases when we are comparing some expression A for equality
3418 with zero and the result is to be zero if A is zero. In that case,
3419 check to see if the value of A is the same as the value to be
3420 returned when A is non-zero.
3421
3422 There are two cases: One is where we have (A ? A : 0) and the
3423 other is when a single bit is tested (e.g., A & 2 ? 2 : 0).
3424 In these cases, the result of the conditional is simply A.
3425
3426 Start by setting ARG1 to be the true value and ARG0 to be the thing
3427 compared with zero. Then check for the two cases above. */
3428
3429 if (integer_zerop (TREE_OPERAND (t, 2))
3430 && TREE_CODE (arg0) == NE_EXPR
3431 && integer_zerop (TREE_OPERAND (arg0, 1))
3432 && ! TREE_SIDE_EFFECTS (arg1))
3433 ;
3434 else if (integer_zerop (arg1)
3435 && TREE_CODE (arg0) == EQ_EXPR
3436 && integer_zerop (TREE_OPERAND (arg0, 1))
3437 && ! TREE_SIDE_EFFECTS (TREE_OPERAND (t, 2)))
3438 arg1 = TREE_OPERAND (t, 2);
3439 else
3440 return t;
3441
3442 arg0 = TREE_OPERAND (arg0, 0);
3443
3444 STRIP_NOPS (arg1);
3445 if (operand_equal_p (arg0, arg1, 0)
3446 || (TREE_CODE (arg1) == INTEGER_CST
3447 && integer_pow2p (arg1)
3448 && TREE_CODE (arg0) == BIT_AND_EXPR
3449 && operand_equal_p (TREE_OPERAND (arg0, 1), arg1, 0)))
3450 return convert (type, arg0);
3451 return t;
3452
3453 case COMPOUND_EXPR:
3454 if (!TREE_SIDE_EFFECTS (arg0))
3455 return arg1;
3456 return t;
3457
3458 default:
3459 return t;
3460 } /* switch (code) */
3461 }
This page took 0.207033 seconds and 6 git commands to generate.