]> gcc.gnu.org Git - gcc.git/blob - gcc/simplify-rtx.c
bitmap.c: Change NULL_PTR to NULL or "(rtx*)0".
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001 Free Software Foundation, Inc.
4
5 This file is part of GNU CC.
6
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
11
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include <setjmp.h>
26
27 #include "rtl.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "flags.h"
32 #include "real.h"
33 #include "insn-config.h"
34 #include "recog.h"
35 #include "function.h"
36 #include "expr.h"
37 #include "toplev.h"
38 #include "output.h"
39 #include "ggc.h"
40
41 /* Simplification and canonicalization of RTL. */
42
43 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
44 virtual regs here because the simplify_*_operation routines are called
45 by integrate.c, which is called before virtual register instantiation.
46
47 ?!? FIXED_BASE_PLUS_P and NONZERO_BASE_PLUS_P need to move into
48 a header file so that their definitions can be shared with the
49 simplification routines in simplify-rtx.c. Until then, do not
50 change these macros without also changing the copy in simplify-rtx.c. */
51
52 #define FIXED_BASE_PLUS_P(X) \
53 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
54 || ((X) == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])\
55 || (X) == virtual_stack_vars_rtx \
56 || (X) == virtual_incoming_args_rtx \
57 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
58 && (XEXP (X, 0) == frame_pointer_rtx \
59 || XEXP (X, 0) == hard_frame_pointer_rtx \
60 || ((X) == arg_pointer_rtx \
61 && fixed_regs[ARG_POINTER_REGNUM]) \
62 || XEXP (X, 0) == virtual_stack_vars_rtx \
63 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
64 || GET_CODE (X) == ADDRESSOF)
65
66 /* Similar, but also allows reference to the stack pointer.
67
68 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
69 arg_pointer_rtx by itself is nonzero, because on at least one machine,
70 the i960, the arg pointer is zero when it is unused. */
71
72 #define NONZERO_BASE_PLUS_P(X) \
73 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
74 || (X) == virtual_stack_vars_rtx \
75 || (X) == virtual_incoming_args_rtx \
76 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
77 && (XEXP (X, 0) == frame_pointer_rtx \
78 || XEXP (X, 0) == hard_frame_pointer_rtx \
79 || ((X) == arg_pointer_rtx \
80 && fixed_regs[ARG_POINTER_REGNUM]) \
81 || XEXP (X, 0) == virtual_stack_vars_rtx \
82 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
83 || (X) == stack_pointer_rtx \
84 || (X) == virtual_stack_dynamic_rtx \
85 || (X) == virtual_outgoing_args_rtx \
86 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
87 && (XEXP (X, 0) == stack_pointer_rtx \
88 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
89 || XEXP (X, 0) == virtual_outgoing_args_rtx)) \
90 || GET_CODE (X) == ADDRESSOF)
91
92 /* Much code operates on (low, high) pairs; the low value is an
93 unsigned wide int, the high value a signed wide int. We
94 occasionally need to sign extend from low to high as if low were a
95 signed wide int. */
96 #define HWI_SIGN_EXTEND(low) \
97 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
98
99 static rtx simplify_plus_minus PARAMS ((enum rtx_code,
100 enum machine_mode, rtx, rtx));
101 static void check_fold_consts PARAMS ((PTR));
102 \f
103 /* Make a binary operation by properly ordering the operands and
104 seeing if the expression folds. */
105
106 rtx
107 simplify_gen_binary (code, mode, op0, op1)
108 enum rtx_code code;
109 enum machine_mode mode;
110 rtx op0, op1;
111 {
112 rtx tem;
113
114 /* Put complex operands first and constants second if commutative. */
115 if (GET_RTX_CLASS (code) == 'c'
116 && ((CONSTANT_P (op0) && GET_CODE (op1) != CONST_INT)
117 || (GET_RTX_CLASS (GET_CODE (op0)) == 'o'
118 && GET_RTX_CLASS (GET_CODE (op1)) != 'o')
119 || (GET_CODE (op0) == SUBREG
120 && GET_RTX_CLASS (GET_CODE (SUBREG_REG (op0))) == 'o'
121 && GET_RTX_CLASS (GET_CODE (op1)) != 'o')))
122 tem = op0, op0 = op1, op1 = tem;
123
124 /* If this simplifies, do it. */
125 tem = simplify_binary_operation (code, mode, op0, op1);
126
127 if (tem)
128 return tem;
129
130 /* Handle addition and subtraction of CONST_INT specially. Otherwise,
131 just form the operation. */
132
133 if (code == PLUS && GET_CODE (op1) == CONST_INT
134 && GET_MODE (op0) != VOIDmode)
135 return plus_constant (op0, INTVAL (op1));
136 else if (code == MINUS && GET_CODE (op1) == CONST_INT
137 && GET_MODE (op0) != VOIDmode)
138 return plus_constant (op0, - INTVAL (op1));
139 else
140 return gen_rtx_fmt_ee (code, mode, op0, op1);
141 }
142 \f
143 /* Make a unary operation by first seeing if it folds and otherwise making
144 the specified operation. */
145
146 rtx
147 simplify_gen_unary (code, mode, op, op_mode)
148 enum rtx_code code;
149 enum machine_mode mode;
150 rtx op;
151 enum machine_mode op_mode;
152 {
153 rtx tem;
154
155 /* If this simplifies, use it. */
156 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
157 return tem;
158
159 return gen_rtx_fmt_e (code, mode, op);
160 }
161
162 /* Likewise for ternary operations. */
163
164 rtx
165 simplify_gen_ternary (code, mode, op0_mode, op0, op1, op2)
166 enum rtx_code code;
167 enum machine_mode mode, op0_mode;
168 rtx op0, op1, op2;
169 {
170 rtx tem;
171
172 /* If this simplifies, use it. */
173 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
174 op0, op1, op2)))
175 return tem;
176
177 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
178 }
179 \f
180 /* Likewise, for relational operations.
181 CMP_MODE specifies mode comparison is done in.
182 */
183
184 rtx
185 simplify_gen_relational (code, mode, cmp_mode, op0, op1)
186 enum rtx_code code;
187 enum machine_mode mode;
188 enum machine_mode cmp_mode;
189 rtx op0, op1;
190 {
191 rtx tem;
192
193 if ((tem = simplify_relational_operation (code, cmp_mode, op0, op1)) != 0)
194 return tem;
195
196 /* Put complex operands first and constants second. */
197 if ((CONSTANT_P (op0) && GET_CODE (op1) != CONST_INT)
198 || (GET_RTX_CLASS (GET_CODE (op0)) == 'o'
199 && GET_RTX_CLASS (GET_CODE (op1)) != 'o')
200 || (GET_CODE (op0) == SUBREG
201 && GET_RTX_CLASS (GET_CODE (SUBREG_REG (op0))) == 'o'
202 && GET_RTX_CLASS (GET_CODE (op1)) != 'o'))
203 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
204
205 return gen_rtx_fmt_ee (code, mode, op0, op1);
206 }
207 \f
208 /* Replace all occurrences of OLD in X with NEW and try to simplify the
209 resulting RTX. Return a new RTX which is as simplified as possible. */
210
211 rtx
212 simplify_replace_rtx (x, old, new)
213 rtx x;
214 rtx old;
215 rtx new;
216 {
217 enum rtx_code code = GET_CODE (x);
218 enum machine_mode mode = GET_MODE (x);
219
220 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
221 to build a new expression substituting recursively. If we can't do
222 anything, return our input. */
223
224 if (x == old)
225 return new;
226
227 switch (GET_RTX_CLASS (code))
228 {
229 case '1':
230 {
231 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
232 rtx op = (XEXP (x, 0) == old
233 ? new : simplify_replace_rtx (XEXP (x, 0), old, new));
234
235 return simplify_gen_unary (code, mode, op, op_mode);
236 }
237
238 case '2':
239 case 'c':
240 return
241 simplify_gen_binary (code, mode,
242 simplify_replace_rtx (XEXP (x, 0), old, new),
243 simplify_replace_rtx (XEXP (x, 1), old, new));
244 case '<':
245 return
246 simplify_gen_relational (code, mode,
247 (GET_MODE (XEXP (x, 0)) != VOIDmode
248 ? GET_MODE (XEXP (x, 0))
249 : GET_MODE (XEXP (x, 1))),
250 simplify_replace_rtx (XEXP (x, 0), old, new),
251 simplify_replace_rtx (XEXP (x, 1), old, new));
252
253 case '3':
254 case 'b':
255 return
256 simplify_gen_ternary (code, mode, GET_MODE (XEXP (x, 0)),
257 simplify_replace_rtx (XEXP (x, 0), old, new),
258 simplify_replace_rtx (XEXP (x, 1), old, new),
259 simplify_replace_rtx (XEXP (x, 2), old, new));
260
261 case 'x':
262 /* The only case we try to handle is a lowpart SUBREG of a single-word
263 CONST_INT. */
264 if (code == SUBREG && subreg_lowpart_p (x) && old == SUBREG_REG (x)
265 && GET_CODE (new) == CONST_INT
266 && GET_MODE_SIZE (GET_MODE (old)) <= UNITS_PER_WORD)
267 return GEN_INT (INTVAL (new) & GET_MODE_MASK (mode));
268
269 return x;
270
271 default:
272 if (GET_CODE (x) == MEM)
273 {
274 /* We can't use change_address here, since it verifies memory address
275 for corectness. We don't want such check, since we may handle
276 addresses previously incorect (such as ones in push instructions)
277 and it is caller's work to verify whether resulting insn match. */
278 rtx addr = simplify_replace_rtx (XEXP (x, 0), old, new);
279 rtx mem;
280 if (XEXP (x, 0) != addr)
281 {
282 mem = gen_rtx_MEM (GET_MODE (x), addr);
283 MEM_COPY_ATTRIBUTES (mem, x);
284 }
285 else
286 mem = x;
287 return mem;
288 }
289
290 return x;
291 }
292 return x;
293 }
294 \f
295 /* Try to simplify a unary operation CODE whose output mode is to be
296 MODE with input operand OP whose mode was originally OP_MODE.
297 Return zero if no simplification can be made. */
298
299 rtx
300 simplify_unary_operation (code, mode, op, op_mode)
301 enum rtx_code code;
302 enum machine_mode mode;
303 rtx op;
304 enum machine_mode op_mode;
305 {
306 unsigned int width = GET_MODE_BITSIZE (mode);
307
308 /* The order of these tests is critical so that, for example, we don't
309 check the wrong mode (input vs. output) for a conversion operation,
310 such as FIX. At some point, this should be simplified. */
311
312 #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC)
313
314 if (code == FLOAT && GET_MODE (op) == VOIDmode
315 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
316 {
317 HOST_WIDE_INT hv, lv;
318 REAL_VALUE_TYPE d;
319
320 if (GET_CODE (op) == CONST_INT)
321 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
322 else
323 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
324
325 #ifdef REAL_ARITHMETIC
326 REAL_VALUE_FROM_INT (d, lv, hv, mode);
327 #else
328 if (hv < 0)
329 {
330 d = (double) (~ hv);
331 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
332 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
333 d += (double) (unsigned HOST_WIDE_INT) (~ lv);
334 d = (- d - 1.0);
335 }
336 else
337 {
338 d = (double) hv;
339 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
340 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
341 d += (double) (unsigned HOST_WIDE_INT) lv;
342 }
343 #endif /* REAL_ARITHMETIC */
344 d = real_value_truncate (mode, d);
345 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
346 }
347 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
348 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
349 {
350 HOST_WIDE_INT hv, lv;
351 REAL_VALUE_TYPE d;
352
353 if (GET_CODE (op) == CONST_INT)
354 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
355 else
356 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
357
358 if (op_mode == VOIDmode)
359 {
360 /* We don't know how to interpret negative-looking numbers in
361 this case, so don't try to fold those. */
362 if (hv < 0)
363 return 0;
364 }
365 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
366 ;
367 else
368 hv = 0, lv &= GET_MODE_MASK (op_mode);
369
370 #ifdef REAL_ARITHMETIC
371 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
372 #else
373
374 d = (double) (unsigned HOST_WIDE_INT) hv;
375 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
376 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
377 d += (double) (unsigned HOST_WIDE_INT) lv;
378 #endif /* REAL_ARITHMETIC */
379 d = real_value_truncate (mode, d);
380 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
381 }
382 #endif
383
384 if (GET_CODE (op) == CONST_INT
385 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
386 {
387 register HOST_WIDE_INT arg0 = INTVAL (op);
388 register HOST_WIDE_INT val;
389
390 switch (code)
391 {
392 case NOT:
393 val = ~ arg0;
394 break;
395
396 case NEG:
397 val = - arg0;
398 break;
399
400 case ABS:
401 val = (arg0 >= 0 ? arg0 : - arg0);
402 break;
403
404 case FFS:
405 /* Don't use ffs here. Instead, get low order bit and then its
406 number. If arg0 is zero, this will return 0, as desired. */
407 arg0 &= GET_MODE_MASK (mode);
408 val = exact_log2 (arg0 & (- arg0)) + 1;
409 break;
410
411 case TRUNCATE:
412 val = arg0;
413 break;
414
415 case ZERO_EXTEND:
416 if (op_mode == VOIDmode)
417 op_mode = mode;
418 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
419 {
420 /* If we were really extending the mode,
421 we would have to distinguish between zero-extension
422 and sign-extension. */
423 if (width != GET_MODE_BITSIZE (op_mode))
424 abort ();
425 val = arg0;
426 }
427 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
428 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
429 else
430 return 0;
431 break;
432
433 case SIGN_EXTEND:
434 if (op_mode == VOIDmode)
435 op_mode = mode;
436 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
437 {
438 /* If we were really extending the mode,
439 we would have to distinguish between zero-extension
440 and sign-extension. */
441 if (width != GET_MODE_BITSIZE (op_mode))
442 abort ();
443 val = arg0;
444 }
445 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
446 {
447 val
448 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
449 if (val
450 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
451 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
452 }
453 else
454 return 0;
455 break;
456
457 case SQRT:
458 case FLOAT_EXTEND:
459 case FLOAT_TRUNCATE:
460 return 0;
461
462 default:
463 abort ();
464 }
465
466 val = trunc_int_for_mode (val, mode);
467
468 return GEN_INT (val);
469 }
470
471 /* We can do some operations on integer CONST_DOUBLEs. Also allow
472 for a DImode operation on a CONST_INT. */
473 else if (GET_MODE (op) == VOIDmode && width <= HOST_BITS_PER_INT * 2
474 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
475 {
476 unsigned HOST_WIDE_INT l1, lv;
477 HOST_WIDE_INT h1, hv;
478
479 if (GET_CODE (op) == CONST_DOUBLE)
480 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
481 else
482 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
483
484 switch (code)
485 {
486 case NOT:
487 lv = ~ l1;
488 hv = ~ h1;
489 break;
490
491 case NEG:
492 neg_double (l1, h1, &lv, &hv);
493 break;
494
495 case ABS:
496 if (h1 < 0)
497 neg_double (l1, h1, &lv, &hv);
498 else
499 lv = l1, hv = h1;
500 break;
501
502 case FFS:
503 hv = 0;
504 if (l1 == 0)
505 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & (-h1)) + 1;
506 else
507 lv = exact_log2 (l1 & (-l1)) + 1;
508 break;
509
510 case TRUNCATE:
511 /* This is just a change-of-mode, so do nothing. */
512 lv = l1, hv = h1;
513 break;
514
515 case ZERO_EXTEND:
516 if (op_mode == VOIDmode
517 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
518 return 0;
519
520 hv = 0;
521 lv = l1 & GET_MODE_MASK (op_mode);
522 break;
523
524 case SIGN_EXTEND:
525 if (op_mode == VOIDmode
526 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
527 return 0;
528 else
529 {
530 lv = l1 & GET_MODE_MASK (op_mode);
531 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
532 && (lv & ((HOST_WIDE_INT) 1
533 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
534 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
535
536 hv = HWI_SIGN_EXTEND (lv);
537 }
538 break;
539
540 case SQRT:
541 return 0;
542
543 default:
544 return 0;
545 }
546
547 return immed_double_const (lv, hv, mode);
548 }
549
550 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
551 else if (GET_CODE (op) == CONST_DOUBLE
552 && GET_MODE_CLASS (mode) == MODE_FLOAT)
553 {
554 REAL_VALUE_TYPE d;
555 jmp_buf handler;
556 rtx x;
557
558 if (setjmp (handler))
559 /* There used to be a warning here, but that is inadvisable.
560 People may want to cause traps, and the natural way
561 to do it should not get a warning. */
562 return 0;
563
564 set_float_handler (handler);
565
566 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
567
568 switch (code)
569 {
570 case NEG:
571 d = REAL_VALUE_NEGATE (d);
572 break;
573
574 case ABS:
575 if (REAL_VALUE_NEGATIVE (d))
576 d = REAL_VALUE_NEGATE (d);
577 break;
578
579 case FLOAT_TRUNCATE:
580 d = real_value_truncate (mode, d);
581 break;
582
583 case FLOAT_EXTEND:
584 /* All this does is change the mode. */
585 break;
586
587 case FIX:
588 d = REAL_VALUE_RNDZINT (d);
589 break;
590
591 case UNSIGNED_FIX:
592 d = REAL_VALUE_UNSIGNED_RNDZINT (d);
593 break;
594
595 case SQRT:
596 return 0;
597
598 default:
599 abort ();
600 }
601
602 x = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
603 set_float_handler (NULL);
604 return x;
605 }
606
607 else if (GET_CODE (op) == CONST_DOUBLE
608 && GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT
609 && GET_MODE_CLASS (mode) == MODE_INT
610 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
611 {
612 REAL_VALUE_TYPE d;
613 jmp_buf handler;
614 HOST_WIDE_INT val;
615
616 if (setjmp (handler))
617 return 0;
618
619 set_float_handler (handler);
620
621 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
622
623 switch (code)
624 {
625 case FIX:
626 val = REAL_VALUE_FIX (d);
627 break;
628
629 case UNSIGNED_FIX:
630 val = REAL_VALUE_UNSIGNED_FIX (d);
631 break;
632
633 default:
634 abort ();
635 }
636
637 set_float_handler (NULL);
638
639 val = trunc_int_for_mode (val, mode);
640
641 return GEN_INT (val);
642 }
643 #endif
644 /* This was formerly used only for non-IEEE float.
645 eggert@twinsun.com says it is safe for IEEE also. */
646 else
647 {
648 enum rtx_code reversed;
649 /* There are some simplifications we can do even if the operands
650 aren't constant. */
651 switch (code)
652 {
653 case NOT:
654 /* (not (not X)) == X. */
655 if (GET_CODE (op) == NOT)
656 return XEXP (op, 0);
657
658 /* (not (eq X Y)) == (ne X Y), etc. */
659 if (mode == BImode && GET_RTX_CLASS (GET_CODE (op)) == '<'
660 && ((reversed = reversed_comparison_code (op, NULL_RTX))
661 != UNKNOWN))
662 return gen_rtx_fmt_ee (reversed,
663 op_mode, XEXP (op, 0), XEXP (op, 1));
664 break;
665
666 case NEG:
667 /* (neg (neg X)) == X. */
668 if (GET_CODE (op) == NEG)
669 return XEXP (op, 0);
670 break;
671
672 case SIGN_EXTEND:
673 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
674 becomes just the MINUS if its mode is MODE. This allows
675 folding switch statements on machines using casesi (such as
676 the Vax). */
677 if (GET_CODE (op) == TRUNCATE
678 && GET_MODE (XEXP (op, 0)) == mode
679 && GET_CODE (XEXP (op, 0)) == MINUS
680 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
681 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
682 return XEXP (op, 0);
683
684 #ifdef POINTERS_EXTEND_UNSIGNED
685 if (! POINTERS_EXTEND_UNSIGNED
686 && mode == Pmode && GET_MODE (op) == ptr_mode
687 && (CONSTANT_P (op)
688 || (GET_CODE (op) == SUBREG
689 && GET_CODE (SUBREG_REG (op)) == REG
690 && REG_POINTER (SUBREG_REG (op))
691 && GET_MODE (SUBREG_REG (op)) == Pmode)))
692 return convert_memory_address (Pmode, op);
693 #endif
694 break;
695
696 #ifdef POINTERS_EXTEND_UNSIGNED
697 case ZERO_EXTEND:
698 if (POINTERS_EXTEND_UNSIGNED
699 && mode == Pmode && GET_MODE (op) == ptr_mode
700 && (CONSTANT_P (op)
701 || (GET_CODE (op) == SUBREG
702 && GET_CODE (SUBREG_REG (op)) == REG
703 && REG_POINTER (SUBREG_REG (op))
704 && GET_MODE (SUBREG_REG (op)) == Pmode)))
705 return convert_memory_address (Pmode, op);
706 break;
707 #endif
708
709 default:
710 break;
711 }
712
713 return 0;
714 }
715 }
716 \f
717 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
718 and OP1. Return 0 if no simplification is possible.
719
720 Don't use this for relational operations such as EQ or LT.
721 Use simplify_relational_operation instead. */
722
723 rtx
724 simplify_binary_operation (code, mode, op0, op1)
725 enum rtx_code code;
726 enum machine_mode mode;
727 rtx op0, op1;
728 {
729 register HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
730 HOST_WIDE_INT val;
731 unsigned int width = GET_MODE_BITSIZE (mode);
732 rtx tem;
733
734 /* Relational operations don't work here. We must know the mode
735 of the operands in order to do the comparison correctly.
736 Assuming a full word can give incorrect results.
737 Consider comparing 128 with -128 in QImode. */
738
739 if (GET_RTX_CLASS (code) == '<')
740 abort ();
741
742 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
743 if (GET_MODE_CLASS (mode) == MODE_FLOAT
744 && GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
745 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
746 {
747 REAL_VALUE_TYPE f0, f1, value;
748 jmp_buf handler;
749
750 if (setjmp (handler))
751 return 0;
752
753 set_float_handler (handler);
754
755 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
756 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
757 f0 = real_value_truncate (mode, f0);
758 f1 = real_value_truncate (mode, f1);
759
760 #ifdef REAL_ARITHMETIC
761 #ifndef REAL_INFINITY
762 if (code == DIV && REAL_VALUES_EQUAL (f1, dconst0))
763 return 0;
764 #endif
765 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
766 #else
767 switch (code)
768 {
769 case PLUS:
770 value = f0 + f1;
771 break;
772 case MINUS:
773 value = f0 - f1;
774 break;
775 case MULT:
776 value = f0 * f1;
777 break;
778 case DIV:
779 #ifndef REAL_INFINITY
780 if (f1 == 0)
781 return 0;
782 #endif
783 value = f0 / f1;
784 break;
785 case SMIN:
786 value = MIN (f0, f1);
787 break;
788 case SMAX:
789 value = MAX (f0, f1);
790 break;
791 default:
792 abort ();
793 }
794 #endif
795
796 value = real_value_truncate (mode, value);
797 set_float_handler (NULL);
798 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
799 }
800 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
801
802 /* We can fold some multi-word operations. */
803 if (GET_MODE_CLASS (mode) == MODE_INT
804 && width == HOST_BITS_PER_WIDE_INT * 2
805 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
806 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
807 {
808 unsigned HOST_WIDE_INT l1, l2, lv;
809 HOST_WIDE_INT h1, h2, hv;
810
811 if (GET_CODE (op0) == CONST_DOUBLE)
812 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
813 else
814 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
815
816 if (GET_CODE (op1) == CONST_DOUBLE)
817 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
818 else
819 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
820
821 switch (code)
822 {
823 case MINUS:
824 /* A - B == A + (-B). */
825 neg_double (l2, h2, &lv, &hv);
826 l2 = lv, h2 = hv;
827
828 /* .. fall through ... */
829
830 case PLUS:
831 add_double (l1, h1, l2, h2, &lv, &hv);
832 break;
833
834 case MULT:
835 mul_double (l1, h1, l2, h2, &lv, &hv);
836 break;
837
838 case DIV: case MOD: case UDIV: case UMOD:
839 /* We'd need to include tree.h to do this and it doesn't seem worth
840 it. */
841 return 0;
842
843 case AND:
844 lv = l1 & l2, hv = h1 & h2;
845 break;
846
847 case IOR:
848 lv = l1 | l2, hv = h1 | h2;
849 break;
850
851 case XOR:
852 lv = l1 ^ l2, hv = h1 ^ h2;
853 break;
854
855 case SMIN:
856 if (h1 < h2
857 || (h1 == h2
858 && ((unsigned HOST_WIDE_INT) l1
859 < (unsigned HOST_WIDE_INT) l2)))
860 lv = l1, hv = h1;
861 else
862 lv = l2, hv = h2;
863 break;
864
865 case SMAX:
866 if (h1 > h2
867 || (h1 == h2
868 && ((unsigned HOST_WIDE_INT) l1
869 > (unsigned HOST_WIDE_INT) l2)))
870 lv = l1, hv = h1;
871 else
872 lv = l2, hv = h2;
873 break;
874
875 case UMIN:
876 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
877 || (h1 == h2
878 && ((unsigned HOST_WIDE_INT) l1
879 < (unsigned HOST_WIDE_INT) l2)))
880 lv = l1, hv = h1;
881 else
882 lv = l2, hv = h2;
883 break;
884
885 case UMAX:
886 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
887 || (h1 == h2
888 && ((unsigned HOST_WIDE_INT) l1
889 > (unsigned HOST_WIDE_INT) l2)))
890 lv = l1, hv = h1;
891 else
892 lv = l2, hv = h2;
893 break;
894
895 case LSHIFTRT: case ASHIFTRT:
896 case ASHIFT:
897 case ROTATE: case ROTATERT:
898 #ifdef SHIFT_COUNT_TRUNCATED
899 if (SHIFT_COUNT_TRUNCATED)
900 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
901 #endif
902
903 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
904 return 0;
905
906 if (code == LSHIFTRT || code == ASHIFTRT)
907 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
908 code == ASHIFTRT);
909 else if (code == ASHIFT)
910 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
911 else if (code == ROTATE)
912 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
913 else /* code == ROTATERT */
914 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
915 break;
916
917 default:
918 return 0;
919 }
920
921 return immed_double_const (lv, hv, mode);
922 }
923
924 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
925 || width > HOST_BITS_PER_WIDE_INT || width == 0)
926 {
927 /* Even if we can't compute a constant result,
928 there are some cases worth simplifying. */
929
930 switch (code)
931 {
932 case PLUS:
933 /* In IEEE floating point, x+0 is not the same as x. Similarly
934 for the other optimizations below. */
935 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
936 && FLOAT_MODE_P (mode) && ! flag_unsafe_math_optimizations)
937 break;
938
939 if (op1 == CONST0_RTX (mode))
940 return op0;
941
942 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)) */
943 if (GET_CODE (op0) == NEG)
944 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
945 else if (GET_CODE (op1) == NEG)
946 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
947
948 /* Handle both-operands-constant cases. We can only add
949 CONST_INTs to constants since the sum of relocatable symbols
950 can't be handled by most assemblers. Don't add CONST_INT
951 to CONST_INT since overflow won't be computed properly if wider
952 than HOST_BITS_PER_WIDE_INT. */
953
954 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
955 && GET_CODE (op1) == CONST_INT)
956 return plus_constant (op0, INTVAL (op1));
957 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
958 && GET_CODE (op0) == CONST_INT)
959 return plus_constant (op1, INTVAL (op0));
960
961 /* See if this is something like X * C - X or vice versa or
962 if the multiplication is written as a shift. If so, we can
963 distribute and make a new multiply, shift, or maybe just
964 have X (if C is 2 in the example above). But don't make
965 real multiply if we didn't have one before. */
966
967 if (! FLOAT_MODE_P (mode))
968 {
969 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
970 rtx lhs = op0, rhs = op1;
971 int had_mult = 0;
972
973 if (GET_CODE (lhs) == NEG)
974 coeff0 = -1, lhs = XEXP (lhs, 0);
975 else if (GET_CODE (lhs) == MULT
976 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
977 {
978 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
979 had_mult = 1;
980 }
981 else if (GET_CODE (lhs) == ASHIFT
982 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
983 && INTVAL (XEXP (lhs, 1)) >= 0
984 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
985 {
986 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
987 lhs = XEXP (lhs, 0);
988 }
989
990 if (GET_CODE (rhs) == NEG)
991 coeff1 = -1, rhs = XEXP (rhs, 0);
992 else if (GET_CODE (rhs) == MULT
993 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
994 {
995 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
996 had_mult = 1;
997 }
998 else if (GET_CODE (rhs) == ASHIFT
999 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1000 && INTVAL (XEXP (rhs, 1)) >= 0
1001 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1002 {
1003 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1004 rhs = XEXP (rhs, 0);
1005 }
1006
1007 if (rtx_equal_p (lhs, rhs))
1008 {
1009 tem = simplify_gen_binary (MULT, mode, lhs,
1010 GEN_INT (coeff0 + coeff1));
1011 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1012 }
1013 }
1014
1015 /* If one of the operands is a PLUS or a MINUS, see if we can
1016 simplify this by the associative law.
1017 Don't use the associative law for floating point.
1018 The inaccuracy makes it nonassociative,
1019 and subtle programs can break if operations are associated. */
1020
1021 if (INTEGRAL_MODE_P (mode)
1022 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1023 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
1024 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1025 return tem;
1026 break;
1027
1028 case COMPARE:
1029 #ifdef HAVE_cc0
1030 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1031 using cc0, in which case we want to leave it as a COMPARE
1032 so we can distinguish it from a register-register-copy.
1033
1034 In IEEE floating point, x-0 is not the same as x. */
1035
1036 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1037 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1038 && op1 == CONST0_RTX (mode))
1039 return op0;
1040 #endif
1041
1042 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1043 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1044 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1045 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1046 {
1047 rtx xop00 = XEXP (op0, 0);
1048 rtx xop10 = XEXP (op1, 0);
1049
1050 #ifdef HAVE_cc0
1051 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1052 #else
1053 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1054 && GET_MODE (xop00) == GET_MODE (xop10)
1055 && REGNO (xop00) == REGNO (xop10)
1056 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1057 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1058 #endif
1059 return xop00;
1060 }
1061
1062 break;
1063 case MINUS:
1064 /* None of these optimizations can be done for IEEE
1065 floating point. */
1066 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
1067 && FLOAT_MODE_P (mode) && ! flag_unsafe_math_optimizations)
1068 break;
1069
1070 /* We can't assume x-x is 0 even with non-IEEE floating point,
1071 but since it is zero except in very strange circumstances, we
1072 will treat it as zero with -funsafe-math-optimizations. */
1073 if (rtx_equal_p (op0, op1)
1074 && ! side_effects_p (op0)
1075 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1076 return CONST0_RTX (mode);
1077
1078 /* Change subtraction from zero into negation. */
1079 if (op0 == CONST0_RTX (mode))
1080 return gen_rtx_NEG (mode, op1);
1081
1082 /* (-1 - a) is ~a. */
1083 if (op0 == constm1_rtx)
1084 return gen_rtx_NOT (mode, op1);
1085
1086 /* Subtracting 0 has no effect. */
1087 if (op1 == CONST0_RTX (mode))
1088 return op0;
1089
1090 /* See if this is something like X * C - X or vice versa or
1091 if the multiplication is written as a shift. If so, we can
1092 distribute and make a new multiply, shift, or maybe just
1093 have X (if C is 2 in the example above). But don't make
1094 real multiply if we didn't have one before. */
1095
1096 if (! FLOAT_MODE_P (mode))
1097 {
1098 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1099 rtx lhs = op0, rhs = op1;
1100 int had_mult = 0;
1101
1102 if (GET_CODE (lhs) == NEG)
1103 coeff0 = -1, lhs = XEXP (lhs, 0);
1104 else if (GET_CODE (lhs) == MULT
1105 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1106 {
1107 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1108 had_mult = 1;
1109 }
1110 else if (GET_CODE (lhs) == ASHIFT
1111 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1112 && INTVAL (XEXP (lhs, 1)) >= 0
1113 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1114 {
1115 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1116 lhs = XEXP (lhs, 0);
1117 }
1118
1119 if (GET_CODE (rhs) == NEG)
1120 coeff1 = - 1, rhs = XEXP (rhs, 0);
1121 else if (GET_CODE (rhs) == MULT
1122 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1123 {
1124 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1125 had_mult = 1;
1126 }
1127 else if (GET_CODE (rhs) == ASHIFT
1128 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1129 && INTVAL (XEXP (rhs, 1)) >= 0
1130 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1131 {
1132 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1133 rhs = XEXP (rhs, 0);
1134 }
1135
1136 if (rtx_equal_p (lhs, rhs))
1137 {
1138 tem = simplify_gen_binary (MULT, mode, lhs,
1139 GEN_INT (coeff0 - coeff1));
1140 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1141 }
1142 }
1143
1144 /* (a - (-b)) -> (a + b). */
1145 if (GET_CODE (op1) == NEG)
1146 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1147
1148 /* If one of the operands is a PLUS or a MINUS, see if we can
1149 simplify this by the associative law.
1150 Don't use the associative law for floating point.
1151 The inaccuracy makes it nonassociative,
1152 and subtle programs can break if operations are associated. */
1153
1154 if (INTEGRAL_MODE_P (mode)
1155 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1156 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
1157 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1158 return tem;
1159
1160 /* Don't let a relocatable value get a negative coeff. */
1161 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1162 return plus_constant (op0, - INTVAL (op1));
1163
1164 /* (x - (x & y)) -> (x & ~y) */
1165 if (GET_CODE (op1) == AND)
1166 {
1167 if (rtx_equal_p (op0, XEXP (op1, 0)))
1168 return simplify_gen_binary (AND, mode, op0,
1169 gen_rtx_NOT (mode, XEXP (op1, 1)));
1170 if (rtx_equal_p (op0, XEXP (op1, 1)))
1171 return simplify_gen_binary (AND, mode, op0,
1172 gen_rtx_NOT (mode, XEXP (op1, 0)));
1173 }
1174 break;
1175
1176 case MULT:
1177 if (op1 == constm1_rtx)
1178 {
1179 tem = simplify_unary_operation (NEG, mode, op0, mode);
1180
1181 return tem ? tem : gen_rtx_NEG (mode, op0);
1182 }
1183
1184 /* In IEEE floating point, x*0 is not always 0. */
1185 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1186 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1187 && op1 == CONST0_RTX (mode)
1188 && ! side_effects_p (op0))
1189 return op1;
1190
1191 /* In IEEE floating point, x*1 is not equivalent to x for nans.
1192 However, ANSI says we can drop signals,
1193 so we can do this anyway. */
1194 if (op1 == CONST1_RTX (mode))
1195 return op0;
1196
1197 /* Convert multiply by constant power of two into shift unless
1198 we are still generating RTL. This test is a kludge. */
1199 if (GET_CODE (op1) == CONST_INT
1200 && (val = exact_log2 (INTVAL (op1))) >= 0
1201 /* If the mode is larger than the host word size, and the
1202 uppermost bit is set, then this isn't a power of two due
1203 to implicit sign extension. */
1204 && (width <= HOST_BITS_PER_WIDE_INT
1205 || val != HOST_BITS_PER_WIDE_INT - 1)
1206 && ! rtx_equal_function_value_matters)
1207 return gen_rtx_ASHIFT (mode, op0, GEN_INT (val));
1208
1209 if (GET_CODE (op1) == CONST_DOUBLE
1210 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT)
1211 {
1212 REAL_VALUE_TYPE d;
1213 jmp_buf handler;
1214 int op1is2, op1ism1;
1215
1216 if (setjmp (handler))
1217 return 0;
1218
1219 set_float_handler (handler);
1220 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
1221 op1is2 = REAL_VALUES_EQUAL (d, dconst2);
1222 op1ism1 = REAL_VALUES_EQUAL (d, dconstm1);
1223 set_float_handler (NULL);
1224
1225 /* x*2 is x+x and x*(-1) is -x */
1226 if (op1is2 && GET_MODE (op0) == mode)
1227 return gen_rtx_PLUS (mode, op0, copy_rtx (op0));
1228
1229 else if (op1ism1 && GET_MODE (op0) == mode)
1230 return gen_rtx_NEG (mode, op0);
1231 }
1232 break;
1233
1234 case IOR:
1235 if (op1 == const0_rtx)
1236 return op0;
1237 if (GET_CODE (op1) == CONST_INT
1238 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
1239 return op1;
1240 if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1241 return op0;
1242 /* A | (~A) -> -1 */
1243 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1244 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1245 && ! side_effects_p (op0)
1246 && GET_MODE_CLASS (mode) != MODE_CC)
1247 return constm1_rtx;
1248 break;
1249
1250 case XOR:
1251 if (op1 == const0_rtx)
1252 return op0;
1253 if (GET_CODE (op1) == CONST_INT
1254 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
1255 return gen_rtx_NOT (mode, op0);
1256 if (op0 == op1 && ! side_effects_p (op0)
1257 && GET_MODE_CLASS (mode) != MODE_CC)
1258 return const0_rtx;
1259 break;
1260
1261 case AND:
1262 if (op1 == const0_rtx && ! side_effects_p (op0))
1263 return const0_rtx;
1264 if (GET_CODE (op1) == CONST_INT
1265 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
1266 return op0;
1267 if (op0 == op1 && ! side_effects_p (op0)
1268 && GET_MODE_CLASS (mode) != MODE_CC)
1269 return op0;
1270 /* A & (~A) -> 0 */
1271 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1272 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1273 && ! side_effects_p (op0)
1274 && GET_MODE_CLASS (mode) != MODE_CC)
1275 return const0_rtx;
1276 break;
1277
1278 case UDIV:
1279 /* Convert divide by power of two into shift (divide by 1 handled
1280 below). */
1281 if (GET_CODE (op1) == CONST_INT
1282 && (arg1 = exact_log2 (INTVAL (op1))) > 0)
1283 return gen_rtx_LSHIFTRT (mode, op0, GEN_INT (arg1));
1284
1285 /* ... fall through ... */
1286
1287 case DIV:
1288 if (op1 == CONST1_RTX (mode))
1289 return op0;
1290
1291 /* In IEEE floating point, 0/x is not always 0. */
1292 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1293 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1294 && op0 == CONST0_RTX (mode)
1295 && ! side_effects_p (op1))
1296 return op0;
1297
1298 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1299 /* Change division by a constant into multiplication. Only do
1300 this with -funsafe-math-optimizations. */
1301 else if (GET_CODE (op1) == CONST_DOUBLE
1302 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT
1303 && op1 != CONST0_RTX (mode)
1304 && flag_unsafe_math_optimizations)
1305 {
1306 REAL_VALUE_TYPE d;
1307 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
1308
1309 if (! REAL_VALUES_EQUAL (d, dconst0))
1310 {
1311 #if defined (REAL_ARITHMETIC)
1312 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1313 return gen_rtx_MULT (mode, op0,
1314 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
1315 #else
1316 return
1317 gen_rtx_MULT (mode, op0,
1318 CONST_DOUBLE_FROM_REAL_VALUE (1./d, mode));
1319 #endif
1320 }
1321 }
1322 #endif
1323 break;
1324
1325 case UMOD:
1326 /* Handle modulus by power of two (mod with 1 handled below). */
1327 if (GET_CODE (op1) == CONST_INT
1328 && exact_log2 (INTVAL (op1)) > 0)
1329 return gen_rtx_AND (mode, op0, GEN_INT (INTVAL (op1) - 1));
1330
1331 /* ... fall through ... */
1332
1333 case MOD:
1334 if ((op0 == const0_rtx || op1 == const1_rtx)
1335 && ! side_effects_p (op0) && ! side_effects_p (op1))
1336 return const0_rtx;
1337 break;
1338
1339 case ROTATERT:
1340 case ROTATE:
1341 /* Rotating ~0 always results in ~0. */
1342 if (GET_CODE (op0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1343 && (unsigned HOST_WIDE_INT) INTVAL (op0) == GET_MODE_MASK (mode)
1344 && ! side_effects_p (op1))
1345 return op0;
1346
1347 /* ... fall through ... */
1348
1349 case ASHIFT:
1350 case ASHIFTRT:
1351 case LSHIFTRT:
1352 if (op1 == const0_rtx)
1353 return op0;
1354 if (op0 == const0_rtx && ! side_effects_p (op1))
1355 return op0;
1356 break;
1357
1358 case SMIN:
1359 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
1360 && INTVAL (op1) == (HOST_WIDE_INT) 1 << (width -1)
1361 && ! side_effects_p (op0))
1362 return op1;
1363 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1364 return op0;
1365 break;
1366
1367 case SMAX:
1368 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
1369 && ((unsigned HOST_WIDE_INT) INTVAL (op1)
1370 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1371 && ! side_effects_p (op0))
1372 return op1;
1373 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1374 return op0;
1375 break;
1376
1377 case UMIN:
1378 if (op1 == const0_rtx && ! side_effects_p (op0))
1379 return op1;
1380 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1381 return op0;
1382 break;
1383
1384 case UMAX:
1385 if (op1 == constm1_rtx && ! side_effects_p (op0))
1386 return op1;
1387 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1388 return op0;
1389 break;
1390
1391 default:
1392 abort ();
1393 }
1394
1395 return 0;
1396 }
1397
1398 /* Get the integer argument values in two forms:
1399 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1400
1401 arg0 = INTVAL (op0);
1402 arg1 = INTVAL (op1);
1403
1404 if (width < HOST_BITS_PER_WIDE_INT)
1405 {
1406 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1407 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1408
1409 arg0s = arg0;
1410 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1411 arg0s |= ((HOST_WIDE_INT) (-1) << width);
1412
1413 arg1s = arg1;
1414 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1415 arg1s |= ((HOST_WIDE_INT) (-1) << width);
1416 }
1417 else
1418 {
1419 arg0s = arg0;
1420 arg1s = arg1;
1421 }
1422
1423 /* Compute the value of the arithmetic. */
1424
1425 switch (code)
1426 {
1427 case PLUS:
1428 val = arg0s + arg1s;
1429 break;
1430
1431 case MINUS:
1432 val = arg0s - arg1s;
1433 break;
1434
1435 case MULT:
1436 val = arg0s * arg1s;
1437 break;
1438
1439 case DIV:
1440 if (arg1s == 0
1441 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1442 && arg1s == -1))
1443 return 0;
1444 val = arg0s / arg1s;
1445 break;
1446
1447 case MOD:
1448 if (arg1s == 0
1449 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1450 && arg1s == -1))
1451 return 0;
1452 val = arg0s % arg1s;
1453 break;
1454
1455 case UDIV:
1456 if (arg1 == 0
1457 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1458 && arg1s == -1))
1459 return 0;
1460 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
1461 break;
1462
1463 case UMOD:
1464 if (arg1 == 0
1465 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1466 && arg1s == -1))
1467 return 0;
1468 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
1469 break;
1470
1471 case AND:
1472 val = arg0 & arg1;
1473 break;
1474
1475 case IOR:
1476 val = arg0 | arg1;
1477 break;
1478
1479 case XOR:
1480 val = arg0 ^ arg1;
1481 break;
1482
1483 case LSHIFTRT:
1484 /* If shift count is undefined, don't fold it; let the machine do
1485 what it wants. But truncate it if the machine will do that. */
1486 if (arg1 < 0)
1487 return 0;
1488
1489 #ifdef SHIFT_COUNT_TRUNCATED
1490 if (SHIFT_COUNT_TRUNCATED)
1491 arg1 %= width;
1492 #endif
1493
1494 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
1495 break;
1496
1497 case ASHIFT:
1498 if (arg1 < 0)
1499 return 0;
1500
1501 #ifdef SHIFT_COUNT_TRUNCATED
1502 if (SHIFT_COUNT_TRUNCATED)
1503 arg1 %= width;
1504 #endif
1505
1506 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
1507 break;
1508
1509 case ASHIFTRT:
1510 if (arg1 < 0)
1511 return 0;
1512
1513 #ifdef SHIFT_COUNT_TRUNCATED
1514 if (SHIFT_COUNT_TRUNCATED)
1515 arg1 %= width;
1516 #endif
1517
1518 val = arg0s >> arg1;
1519
1520 /* Bootstrap compiler may not have sign extended the right shift.
1521 Manually extend the sign to insure bootstrap cc matches gcc. */
1522 if (arg0s < 0 && arg1 > 0)
1523 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
1524
1525 break;
1526
1527 case ROTATERT:
1528 if (arg1 < 0)
1529 return 0;
1530
1531 arg1 %= width;
1532 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
1533 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
1534 break;
1535
1536 case ROTATE:
1537 if (arg1 < 0)
1538 return 0;
1539
1540 arg1 %= width;
1541 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
1542 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
1543 break;
1544
1545 case COMPARE:
1546 /* Do nothing here. */
1547 return 0;
1548
1549 case SMIN:
1550 val = arg0s <= arg1s ? arg0s : arg1s;
1551 break;
1552
1553 case UMIN:
1554 val = ((unsigned HOST_WIDE_INT) arg0
1555 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1556 break;
1557
1558 case SMAX:
1559 val = arg0s > arg1s ? arg0s : arg1s;
1560 break;
1561
1562 case UMAX:
1563 val = ((unsigned HOST_WIDE_INT) arg0
1564 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1565 break;
1566
1567 default:
1568 abort ();
1569 }
1570
1571 val = trunc_int_for_mode (val, mode);
1572
1573 return GEN_INT (val);
1574 }
1575 \f
1576 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1577 PLUS or MINUS.
1578
1579 Rather than test for specific case, we do this by a brute-force method
1580 and do all possible simplifications until no more changes occur. Then
1581 we rebuild the operation. */
1582
1583 static rtx
1584 simplify_plus_minus (code, mode, op0, op1)
1585 enum rtx_code code;
1586 enum machine_mode mode;
1587 rtx op0, op1;
1588 {
1589 rtx ops[8];
1590 int negs[8];
1591 rtx result, tem;
1592 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts = 0;
1593 int first = 1, negate = 0, changed;
1594 int i, j;
1595
1596 memset ((char *) ops, 0, sizeof ops);
1597
1598 /* Set up the two operands and then expand them until nothing has been
1599 changed. If we run out of room in our array, give up; this should
1600 almost never happen. */
1601
1602 ops[0] = op0, ops[1] = op1, negs[0] = 0, negs[1] = (code == MINUS);
1603
1604 changed = 1;
1605 while (changed)
1606 {
1607 changed = 0;
1608
1609 for (i = 0; i < n_ops; i++)
1610 switch (GET_CODE (ops[i]))
1611 {
1612 case PLUS:
1613 case MINUS:
1614 if (n_ops == 7)
1615 return 0;
1616
1617 ops[n_ops] = XEXP (ops[i], 1);
1618 negs[n_ops++] = GET_CODE (ops[i]) == MINUS ? !negs[i] : negs[i];
1619 ops[i] = XEXP (ops[i], 0);
1620 input_ops++;
1621 changed = 1;
1622 break;
1623
1624 case NEG:
1625 ops[i] = XEXP (ops[i], 0);
1626 negs[i] = ! negs[i];
1627 changed = 1;
1628 break;
1629
1630 case CONST:
1631 ops[i] = XEXP (ops[i], 0);
1632 input_consts++;
1633 changed = 1;
1634 break;
1635
1636 case NOT:
1637 /* ~a -> (-a - 1) */
1638 if (n_ops != 7)
1639 {
1640 ops[n_ops] = constm1_rtx;
1641 negs[n_ops++] = negs[i];
1642 ops[i] = XEXP (ops[i], 0);
1643 negs[i] = ! negs[i];
1644 changed = 1;
1645 }
1646 break;
1647
1648 case CONST_INT:
1649 if (negs[i])
1650 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0, changed = 1;
1651 break;
1652
1653 default:
1654 break;
1655 }
1656 }
1657
1658 /* If we only have two operands, we can't do anything. */
1659 if (n_ops <= 2)
1660 return 0;
1661
1662 /* Now simplify each pair of operands until nothing changes. The first
1663 time through just simplify constants against each other. */
1664
1665 changed = 1;
1666 while (changed)
1667 {
1668 changed = first;
1669
1670 for (i = 0; i < n_ops - 1; i++)
1671 for (j = i + 1; j < n_ops; j++)
1672 if (ops[i] != 0 && ops[j] != 0
1673 && (! first || (CONSTANT_P (ops[i]) && CONSTANT_P (ops[j]))))
1674 {
1675 rtx lhs = ops[i], rhs = ops[j];
1676 enum rtx_code ncode = PLUS;
1677
1678 if (negs[i] && ! negs[j])
1679 lhs = ops[j], rhs = ops[i], ncode = MINUS;
1680 else if (! negs[i] && negs[j])
1681 ncode = MINUS;
1682
1683 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
1684 if (tem)
1685 {
1686 ops[i] = tem, ops[j] = 0;
1687 negs[i] = negs[i] && negs[j];
1688 if (GET_CODE (tem) == NEG)
1689 ops[i] = XEXP (tem, 0), negs[i] = ! negs[i];
1690
1691 if (GET_CODE (ops[i]) == CONST_INT && negs[i])
1692 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0;
1693 changed = 1;
1694 }
1695 }
1696
1697 first = 0;
1698 }
1699
1700 /* Pack all the operands to the lower-numbered entries and give up if
1701 we didn't reduce the number of operands we had. Make sure we
1702 count a CONST as two operands. If we have the same number of
1703 operands, but have made more CONSTs than we had, this is also
1704 an improvement, so accept it. */
1705
1706 for (i = 0, j = 0; j < n_ops; j++)
1707 if (ops[j] != 0)
1708 {
1709 ops[i] = ops[j], negs[i++] = negs[j];
1710 if (GET_CODE (ops[j]) == CONST)
1711 n_consts++;
1712 }
1713
1714 if (i + n_consts > input_ops
1715 || (i + n_consts == input_ops && n_consts <= input_consts))
1716 return 0;
1717
1718 n_ops = i;
1719
1720 /* If we have a CONST_INT, put it last. */
1721 for (i = 0; i < n_ops - 1; i++)
1722 if (GET_CODE (ops[i]) == CONST_INT)
1723 {
1724 tem = ops[n_ops - 1], ops[n_ops - 1] = ops[i] , ops[i] = tem;
1725 j = negs[n_ops - 1], negs[n_ops - 1] = negs[i], negs[i] = j;
1726 }
1727
1728 /* Put a non-negated operand first. If there aren't any, make all
1729 operands positive and negate the whole thing later. */
1730 for (i = 0; i < n_ops && negs[i]; i++)
1731 ;
1732
1733 if (i == n_ops)
1734 {
1735 for (i = 0; i < n_ops; i++)
1736 negs[i] = 0;
1737 negate = 1;
1738 }
1739 else if (i != 0)
1740 {
1741 tem = ops[0], ops[0] = ops[i], ops[i] = tem;
1742 j = negs[0], negs[0] = negs[i], negs[i] = j;
1743 }
1744
1745 /* Now make the result by performing the requested operations. */
1746 result = ops[0];
1747 for (i = 1; i < n_ops; i++)
1748 result = simplify_gen_binary (negs[i] ? MINUS : PLUS, mode, result, ops[i]);
1749
1750 return negate ? gen_rtx_NEG (mode, result) : result;
1751 }
1752
1753 struct cfc_args
1754 {
1755 rtx op0, op1; /* Input */
1756 int equal, op0lt, op1lt; /* Output */
1757 int unordered;
1758 };
1759
1760 static void
1761 check_fold_consts (data)
1762 PTR data;
1763 {
1764 struct cfc_args *args = (struct cfc_args *) data;
1765 REAL_VALUE_TYPE d0, d1;
1766
1767 /* We may possibly raise an exception while reading the value. */
1768 args->unordered = 1;
1769 REAL_VALUE_FROM_CONST_DOUBLE (d0, args->op0);
1770 REAL_VALUE_FROM_CONST_DOUBLE (d1, args->op1);
1771
1772 /* Comparisons of Inf versus Inf are ordered. */
1773 if (REAL_VALUE_ISNAN (d0)
1774 || REAL_VALUE_ISNAN (d1))
1775 return;
1776 args->equal = REAL_VALUES_EQUAL (d0, d1);
1777 args->op0lt = REAL_VALUES_LESS (d0, d1);
1778 args->op1lt = REAL_VALUES_LESS (d1, d0);
1779 args->unordered = 0;
1780 }
1781
1782 /* Like simplify_binary_operation except used for relational operators.
1783 MODE is the mode of the operands, not that of the result. If MODE
1784 is VOIDmode, both operands must also be VOIDmode and we compare the
1785 operands in "infinite precision".
1786
1787 If no simplification is possible, this function returns zero. Otherwise,
1788 it returns either const_true_rtx or const0_rtx. */
1789
1790 rtx
1791 simplify_relational_operation (code, mode, op0, op1)
1792 enum rtx_code code;
1793 enum machine_mode mode;
1794 rtx op0, op1;
1795 {
1796 int equal, op0lt, op0ltu, op1lt, op1ltu;
1797 rtx tem;
1798
1799 if (mode == VOIDmode
1800 && (GET_MODE (op0) != VOIDmode
1801 || GET_MODE (op1) != VOIDmode))
1802 abort ();
1803
1804 /* If op0 is a compare, extract the comparison arguments from it. */
1805 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
1806 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
1807
1808 /* We can't simplify MODE_CC values since we don't know what the
1809 actual comparison is. */
1810 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC
1811 #ifdef HAVE_cc0
1812 || op0 == cc0_rtx
1813 #endif
1814 )
1815 return 0;
1816
1817 /* Make sure the constant is second. */
1818 if ((CONSTANT_P (op0) && ! CONSTANT_P (op1))
1819 || (GET_CODE (op0) == CONST_INT && GET_CODE (op1) != CONST_INT))
1820 {
1821 tem = op0, op0 = op1, op1 = tem;
1822 code = swap_condition (code);
1823 }
1824
1825 /* For integer comparisons of A and B maybe we can simplify A - B and can
1826 then simplify a comparison of that with zero. If A and B are both either
1827 a register or a CONST_INT, this can't help; testing for these cases will
1828 prevent infinite recursion here and speed things up.
1829
1830 If CODE is an unsigned comparison, then we can never do this optimization,
1831 because it gives an incorrect result if the subtraction wraps around zero.
1832 ANSI C defines unsigned operations such that they never overflow, and
1833 thus such cases can not be ignored. */
1834
1835 if (INTEGRAL_MODE_P (mode) && op1 != const0_rtx
1836 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == CONST_INT)
1837 && (GET_CODE (op1) == REG || GET_CODE (op1) == CONST_INT))
1838 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
1839 && code != GTU && code != GEU && code != LTU && code != LEU)
1840 return simplify_relational_operation (signed_condition (code),
1841 mode, tem, const0_rtx);
1842
1843 if (flag_unsafe_math_optimizations && code == ORDERED)
1844 return const_true_rtx;
1845
1846 if (flag_unsafe_math_optimizations && code == UNORDERED)
1847 return const0_rtx;
1848
1849 /* For non-IEEE floating-point, if the two operands are equal, we know the
1850 result. */
1851 if (rtx_equal_p (op0, op1)
1852 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1853 || ! FLOAT_MODE_P (GET_MODE (op0))
1854 || flag_unsafe_math_optimizations))
1855 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
1856
1857 /* If the operands are floating-point constants, see if we can fold
1858 the result. */
1859 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1860 else if (GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
1861 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
1862 {
1863 struct cfc_args args;
1864
1865 /* Setup input for check_fold_consts() */
1866 args.op0 = op0;
1867 args.op1 = op1;
1868
1869
1870 if (!do_float_handler (check_fold_consts, (PTR) &args))
1871 args.unordered = 1;
1872
1873 if (args.unordered)
1874 switch (code)
1875 {
1876 case UNEQ:
1877 case UNLT:
1878 case UNGT:
1879 case UNLE:
1880 case UNGE:
1881 case NE:
1882 case UNORDERED:
1883 return const_true_rtx;
1884 case EQ:
1885 case LT:
1886 case GT:
1887 case LE:
1888 case GE:
1889 case LTGT:
1890 case ORDERED:
1891 return const0_rtx;
1892 default:
1893 return 0;
1894 }
1895
1896 /* Receive output from check_fold_consts() */
1897 equal = args.equal;
1898 op0lt = op0ltu = args.op0lt;
1899 op1lt = op1ltu = args.op1lt;
1900 }
1901 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1902
1903 /* Otherwise, see if the operands are both integers. */
1904 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
1905 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
1906 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
1907 {
1908 int width = GET_MODE_BITSIZE (mode);
1909 HOST_WIDE_INT l0s, h0s, l1s, h1s;
1910 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
1911
1912 /* Get the two words comprising each integer constant. */
1913 if (GET_CODE (op0) == CONST_DOUBLE)
1914 {
1915 l0u = l0s = CONST_DOUBLE_LOW (op0);
1916 h0u = h0s = CONST_DOUBLE_HIGH (op0);
1917 }
1918 else
1919 {
1920 l0u = l0s = INTVAL (op0);
1921 h0u = h0s = HWI_SIGN_EXTEND (l0s);
1922 }
1923
1924 if (GET_CODE (op1) == CONST_DOUBLE)
1925 {
1926 l1u = l1s = CONST_DOUBLE_LOW (op1);
1927 h1u = h1s = CONST_DOUBLE_HIGH (op1);
1928 }
1929 else
1930 {
1931 l1u = l1s = INTVAL (op1);
1932 h1u = h1s = HWI_SIGN_EXTEND (l1s);
1933 }
1934
1935 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
1936 we have to sign or zero-extend the values. */
1937 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
1938 {
1939 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
1940 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
1941
1942 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1943 l0s |= ((HOST_WIDE_INT) (-1) << width);
1944
1945 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1946 l1s |= ((HOST_WIDE_INT) (-1) << width);
1947 }
1948 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
1949 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
1950
1951 equal = (h0u == h1u && l0u == l1u);
1952 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
1953 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
1954 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
1955 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
1956 }
1957
1958 /* Otherwise, there are some code-specific tests we can make. */
1959 else
1960 {
1961 switch (code)
1962 {
1963 case EQ:
1964 /* References to the frame plus a constant or labels cannot
1965 be zero, but a SYMBOL_REF can due to #pragma weak. */
1966 if (((NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx)
1967 || GET_CODE (op0) == LABEL_REF)
1968 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1969 /* On some machines, the ap reg can be 0 sometimes. */
1970 && op0 != arg_pointer_rtx
1971 #endif
1972 )
1973 return const0_rtx;
1974 break;
1975
1976 case NE:
1977 if (((NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx)
1978 || GET_CODE (op0) == LABEL_REF)
1979 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1980 && op0 != arg_pointer_rtx
1981 #endif
1982 )
1983 return const_true_rtx;
1984 break;
1985
1986 case GEU:
1987 /* Unsigned values are never negative. */
1988 if (op1 == const0_rtx)
1989 return const_true_rtx;
1990 break;
1991
1992 case LTU:
1993 if (op1 == const0_rtx)
1994 return const0_rtx;
1995 break;
1996
1997 case LEU:
1998 /* Unsigned values are never greater than the largest
1999 unsigned value. */
2000 if (GET_CODE (op1) == CONST_INT
2001 && (unsigned HOST_WIDE_INT) INTVAL (op1) == GET_MODE_MASK (mode)
2002 && INTEGRAL_MODE_P (mode))
2003 return const_true_rtx;
2004 break;
2005
2006 case GTU:
2007 if (GET_CODE (op1) == CONST_INT
2008 && (unsigned HOST_WIDE_INT) INTVAL (op1) == GET_MODE_MASK (mode)
2009 && INTEGRAL_MODE_P (mode))
2010 return const0_rtx;
2011 break;
2012
2013 default:
2014 break;
2015 }
2016
2017 return 0;
2018 }
2019
2020 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2021 as appropriate. */
2022 switch (code)
2023 {
2024 case EQ:
2025 case UNEQ:
2026 return equal ? const_true_rtx : const0_rtx;
2027 case NE:
2028 case LTGT:
2029 return ! equal ? const_true_rtx : const0_rtx;
2030 case LT:
2031 case UNLT:
2032 return op0lt ? const_true_rtx : const0_rtx;
2033 case GT:
2034 case UNGT:
2035 return op1lt ? const_true_rtx : const0_rtx;
2036 case LTU:
2037 return op0ltu ? const_true_rtx : const0_rtx;
2038 case GTU:
2039 return op1ltu ? const_true_rtx : const0_rtx;
2040 case LE:
2041 case UNLE:
2042 return equal || op0lt ? const_true_rtx : const0_rtx;
2043 case GE:
2044 case UNGE:
2045 return equal || op1lt ? const_true_rtx : const0_rtx;
2046 case LEU:
2047 return equal || op0ltu ? const_true_rtx : const0_rtx;
2048 case GEU:
2049 return equal || op1ltu ? const_true_rtx : const0_rtx;
2050 case ORDERED:
2051 return const_true_rtx;
2052 case UNORDERED:
2053 return const0_rtx;
2054 default:
2055 abort ();
2056 }
2057 }
2058 \f
2059 /* Simplify CODE, an operation with result mode MODE and three operands,
2060 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2061 a constant. Return 0 if no simplifications is possible. */
2062
2063 rtx
2064 simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
2065 enum rtx_code code;
2066 enum machine_mode mode, op0_mode;
2067 rtx op0, op1, op2;
2068 {
2069 unsigned int width = GET_MODE_BITSIZE (mode);
2070
2071 /* VOIDmode means "infinite" precision. */
2072 if (width == 0)
2073 width = HOST_BITS_PER_WIDE_INT;
2074
2075 switch (code)
2076 {
2077 case SIGN_EXTRACT:
2078 case ZERO_EXTRACT:
2079 if (GET_CODE (op0) == CONST_INT
2080 && GET_CODE (op1) == CONST_INT
2081 && GET_CODE (op2) == CONST_INT
2082 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2083 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2084 {
2085 /* Extracting a bit-field from a constant */
2086 HOST_WIDE_INT val = INTVAL (op0);
2087
2088 if (BITS_BIG_ENDIAN)
2089 val >>= (GET_MODE_BITSIZE (op0_mode)
2090 - INTVAL (op2) - INTVAL (op1));
2091 else
2092 val >>= INTVAL (op2);
2093
2094 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2095 {
2096 /* First zero-extend. */
2097 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2098 /* If desired, propagate sign bit. */
2099 if (code == SIGN_EXTRACT
2100 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2101 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2102 }
2103
2104 /* Clear the bits that don't belong in our mode,
2105 unless they and our sign bit are all one.
2106 So we get either a reasonable negative value or a reasonable
2107 unsigned value for this mode. */
2108 if (width < HOST_BITS_PER_WIDE_INT
2109 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2110 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2111 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2112
2113 return GEN_INT (val);
2114 }
2115 break;
2116
2117 case IF_THEN_ELSE:
2118 if (GET_CODE (op0) == CONST_INT)
2119 return op0 != const0_rtx ? op1 : op2;
2120
2121 /* Convert a == b ? b : a to "a". */
2122 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
2123 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
2124 && rtx_equal_p (XEXP (op0, 0), op1)
2125 && rtx_equal_p (XEXP (op0, 1), op2))
2126 return op1;
2127 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
2128 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
2129 && rtx_equal_p (XEXP (op0, 1), op1)
2130 && rtx_equal_p (XEXP (op0, 0), op2))
2131 return op2;
2132 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2133 {
2134 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2135 ? GET_MODE (XEXP (op0, 1))
2136 : GET_MODE (XEXP (op0, 0)));
2137 rtx temp;
2138 if (cmp_mode == VOIDmode)
2139 cmp_mode = op0_mode;
2140 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2141 XEXP (op0, 0), XEXP (op0, 1));
2142
2143 /* See if any simplifications were possible. */
2144 if (temp == const0_rtx)
2145 return op2;
2146 else if (temp == const1_rtx)
2147 return op1;
2148 else if (temp)
2149 op0 = temp;
2150
2151 /* Look for happy constants in op1 and op2. */
2152 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2153 {
2154 HOST_WIDE_INT t = INTVAL (op1);
2155 HOST_WIDE_INT f = INTVAL (op2);
2156
2157 if (t == STORE_FLAG_VALUE && f == 0)
2158 code = GET_CODE (op0);
2159 else if (t == 0 && f == STORE_FLAG_VALUE)
2160 {
2161 enum rtx_code tmp;
2162 tmp = reversed_comparison_code (op0, NULL_RTX);
2163 if (tmp == UNKNOWN)
2164 break;
2165 code = tmp;
2166 }
2167 else
2168 break;
2169
2170 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2171 }
2172 }
2173 break;
2174
2175 default:
2176 abort ();
2177 }
2178
2179 return 0;
2180 }
2181
2182 /* Simplify X, an rtx expression.
2183
2184 Return the simplified expression or NULL if no simplifications
2185 were possible.
2186
2187 This is the preferred entry point into the simplification routines;
2188 however, we still allow passes to call the more specific routines.
2189
2190 Right now GCC has three (yes, three) major bodies of RTL simplficiation
2191 code that need to be unified.
2192
2193 1. fold_rtx in cse.c. This code uses various CSE specific
2194 information to aid in RTL simplification.
2195
2196 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
2197 it uses combine specific information to aid in RTL
2198 simplification.
2199
2200 3. The routines in this file.
2201
2202
2203 Long term we want to only have one body of simplification code; to
2204 get to that state I recommend the following steps:
2205
2206 1. Pour over fold_rtx & simplify_rtx and move any simplifications
2207 which are not pass dependent state into these routines.
2208
2209 2. As code is moved by #1, change fold_rtx & simplify_rtx to
2210 use this routine whenever possible.
2211
2212 3. Allow for pass dependent state to be provided to these
2213 routines and add simplifications based on the pass dependent
2214 state. Remove code from cse.c & combine.c that becomes
2215 redundant/dead.
2216
2217 It will take time, but ultimately the compiler will be easier to
2218 maintain and improve. It's totally silly that when we add a
2219 simplification that it needs to be added to 4 places (3 for RTL
2220 simplification and 1 for tree simplification. */
2221
2222 rtx
2223 simplify_rtx (x)
2224 rtx x;
2225 {
2226 enum rtx_code code = GET_CODE (x);
2227 enum machine_mode mode = GET_MODE (x);
2228
2229 switch (GET_RTX_CLASS (code))
2230 {
2231 case '1':
2232 return simplify_unary_operation (code, mode,
2233 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
2234 case '2':
2235 case 'c':
2236 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
2237
2238 case '3':
2239 case 'b':
2240 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
2241 XEXP (x, 0), XEXP (x, 1),
2242 XEXP (x, 2));
2243
2244 case '<':
2245 return simplify_relational_operation (code,
2246 ((GET_MODE (XEXP (x, 0))
2247 != VOIDmode)
2248 ? GET_MODE (XEXP (x, 0))
2249 : GET_MODE (XEXP (x, 1))),
2250 XEXP (x, 0), XEXP (x, 1));
2251 default:
2252 return NULL;
2253 }
2254 }
This page took 0.148877 seconds and 6 git commands to generate.