]> gcc.gnu.org Git - gcc.git/blob - gcc/simplify-rtx.c
simplify-rtx.c (simplify_subreg): Break out from ...
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001 Free Software Foundation, Inc.
4
5 This file is part of GNU CC.
6
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
11
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include <setjmp.h>
26
27 #include "rtl.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "flags.h"
32 #include "real.h"
33 #include "insn-config.h"
34 #include "recog.h"
35 #include "function.h"
36 #include "expr.h"
37 #include "toplev.h"
38 #include "output.h"
39 #include "ggc.h"
40
41 /* Simplification and canonicalization of RTL. */
42
43 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
44 virtual regs here because the simplify_*_operation routines are called
45 by integrate.c, which is called before virtual register instantiation.
46
47 ?!? FIXED_BASE_PLUS_P and NONZERO_BASE_PLUS_P need to move into
48 a header file so that their definitions can be shared with the
49 simplification routines in simplify-rtx.c. Until then, do not
50 change these macros without also changing the copy in simplify-rtx.c. */
51
52 #define FIXED_BASE_PLUS_P(X) \
53 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
54 || ((X) == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])\
55 || (X) == virtual_stack_vars_rtx \
56 || (X) == virtual_incoming_args_rtx \
57 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
58 && (XEXP (X, 0) == frame_pointer_rtx \
59 || XEXP (X, 0) == hard_frame_pointer_rtx \
60 || ((X) == arg_pointer_rtx \
61 && fixed_regs[ARG_POINTER_REGNUM]) \
62 || XEXP (X, 0) == virtual_stack_vars_rtx \
63 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
64 || GET_CODE (X) == ADDRESSOF)
65
66 /* Similar, but also allows reference to the stack pointer.
67
68 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
69 arg_pointer_rtx by itself is nonzero, because on at least one machine,
70 the i960, the arg pointer is zero when it is unused. */
71
72 #define NONZERO_BASE_PLUS_P(X) \
73 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
74 || (X) == virtual_stack_vars_rtx \
75 || (X) == virtual_incoming_args_rtx \
76 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
77 && (XEXP (X, 0) == frame_pointer_rtx \
78 || XEXP (X, 0) == hard_frame_pointer_rtx \
79 || ((X) == arg_pointer_rtx \
80 && fixed_regs[ARG_POINTER_REGNUM]) \
81 || XEXP (X, 0) == virtual_stack_vars_rtx \
82 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
83 || (X) == stack_pointer_rtx \
84 || (X) == virtual_stack_dynamic_rtx \
85 || (X) == virtual_outgoing_args_rtx \
86 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
87 && (XEXP (X, 0) == stack_pointer_rtx \
88 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
89 || XEXP (X, 0) == virtual_outgoing_args_rtx)) \
90 || GET_CODE (X) == ADDRESSOF)
91
92 /* Much code operates on (low, high) pairs; the low value is an
93 unsigned wide int, the high value a signed wide int. We
94 occasionally need to sign extend from low to high as if low were a
95 signed wide int. */
96 #define HWI_SIGN_EXTEND(low) \
97 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
98
99 static rtx simplify_plus_minus PARAMS ((enum rtx_code,
100 enum machine_mode, rtx, rtx));
101 static void check_fold_consts PARAMS ((PTR));
102 \f
103 /* Make a binary operation by properly ordering the operands and
104 seeing if the expression folds. */
105
106 rtx
107 simplify_gen_binary (code, mode, op0, op1)
108 enum rtx_code code;
109 enum machine_mode mode;
110 rtx op0, op1;
111 {
112 rtx tem;
113
114 /* Put complex operands first and constants second if commutative. */
115 if (GET_RTX_CLASS (code) == 'c'
116 && ((CONSTANT_P (op0) && GET_CODE (op1) != CONST_INT)
117 || (GET_RTX_CLASS (GET_CODE (op0)) == 'o'
118 && GET_RTX_CLASS (GET_CODE (op1)) != 'o')
119 || (GET_CODE (op0) == SUBREG
120 && GET_RTX_CLASS (GET_CODE (SUBREG_REG (op0))) == 'o'
121 && GET_RTX_CLASS (GET_CODE (op1)) != 'o')))
122 tem = op0, op0 = op1, op1 = tem;
123
124 /* If this simplifies, do it. */
125 tem = simplify_binary_operation (code, mode, op0, op1);
126
127 if (tem)
128 return tem;
129
130 /* Handle addition and subtraction of CONST_INT specially. Otherwise,
131 just form the operation. */
132
133 if (code == PLUS && GET_CODE (op1) == CONST_INT
134 && GET_MODE (op0) != VOIDmode)
135 return plus_constant (op0, INTVAL (op1));
136 else if (code == MINUS && GET_CODE (op1) == CONST_INT
137 && GET_MODE (op0) != VOIDmode)
138 return plus_constant (op0, - INTVAL (op1));
139 else
140 return gen_rtx_fmt_ee (code, mode, op0, op1);
141 }
142 \f
143 /* Make a unary operation by first seeing if it folds and otherwise making
144 the specified operation. */
145
146 rtx
147 simplify_gen_unary (code, mode, op, op_mode)
148 enum rtx_code code;
149 enum machine_mode mode;
150 rtx op;
151 enum machine_mode op_mode;
152 {
153 rtx tem;
154
155 /* If this simplifies, use it. */
156 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
157 return tem;
158
159 return gen_rtx_fmt_e (code, mode, op);
160 }
161
162 /* Likewise for ternary operations. */
163
164 rtx
165 simplify_gen_ternary (code, mode, op0_mode, op0, op1, op2)
166 enum rtx_code code;
167 enum machine_mode mode, op0_mode;
168 rtx op0, op1, op2;
169 {
170 rtx tem;
171
172 /* If this simplifies, use it. */
173 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
174 op0, op1, op2)))
175 return tem;
176
177 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
178 }
179 \f
180 /* Likewise, for relational operations.
181 CMP_MODE specifies mode comparison is done in.
182 */
183
184 rtx
185 simplify_gen_relational (code, mode, cmp_mode, op0, op1)
186 enum rtx_code code;
187 enum machine_mode mode;
188 enum machine_mode cmp_mode;
189 rtx op0, op1;
190 {
191 rtx tem;
192
193 if ((tem = simplify_relational_operation (code, cmp_mode, op0, op1)) != 0)
194 return tem;
195
196 /* Put complex operands first and constants second. */
197 if ((CONSTANT_P (op0) && GET_CODE (op1) != CONST_INT)
198 || (GET_RTX_CLASS (GET_CODE (op0)) == 'o'
199 && GET_RTX_CLASS (GET_CODE (op1)) != 'o')
200 || (GET_CODE (op0) == SUBREG
201 && GET_RTX_CLASS (GET_CODE (SUBREG_REG (op0))) == 'o'
202 && GET_RTX_CLASS (GET_CODE (op1)) != 'o'))
203 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
204
205 return gen_rtx_fmt_ee (code, mode, op0, op1);
206 }
207 \f
208 /* Replace all occurrences of OLD in X with NEW and try to simplify the
209 resulting RTX. Return a new RTX which is as simplified as possible. */
210
211 rtx
212 simplify_replace_rtx (x, old, new)
213 rtx x;
214 rtx old;
215 rtx new;
216 {
217 enum rtx_code code = GET_CODE (x);
218 enum machine_mode mode = GET_MODE (x);
219
220 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
221 to build a new expression substituting recursively. If we can't do
222 anything, return our input. */
223
224 if (x == old)
225 return new;
226
227 switch (GET_RTX_CLASS (code))
228 {
229 case '1':
230 {
231 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
232 rtx op = (XEXP (x, 0) == old
233 ? new : simplify_replace_rtx (XEXP (x, 0), old, new));
234
235 return simplify_gen_unary (code, mode, op, op_mode);
236 }
237
238 case '2':
239 case 'c':
240 return
241 simplify_gen_binary (code, mode,
242 simplify_replace_rtx (XEXP (x, 0), old, new),
243 simplify_replace_rtx (XEXP (x, 1), old, new));
244 case '<':
245 return
246 simplify_gen_relational (code, mode,
247 (GET_MODE (XEXP (x, 0)) != VOIDmode
248 ? GET_MODE (XEXP (x, 0))
249 : GET_MODE (XEXP (x, 1))),
250 simplify_replace_rtx (XEXP (x, 0), old, new),
251 simplify_replace_rtx (XEXP (x, 1), old, new));
252
253 case '3':
254 case 'b':
255 return
256 simplify_gen_ternary (code, mode, GET_MODE (XEXP (x, 0)),
257 simplify_replace_rtx (XEXP (x, 0), old, new),
258 simplify_replace_rtx (XEXP (x, 1), old, new),
259 simplify_replace_rtx (XEXP (x, 2), old, new));
260
261 case 'x':
262 /* The only case we try to handle is a lowpart SUBREG of a single-word
263 CONST_INT. */
264 if (code == SUBREG && subreg_lowpart_p (x) && old == SUBREG_REG (x)
265 && GET_CODE (new) == CONST_INT
266 && GET_MODE_SIZE (GET_MODE (old)) <= UNITS_PER_WORD)
267 return GEN_INT (INTVAL (new) & GET_MODE_MASK (mode));
268
269 return x;
270
271 default:
272 if (GET_CODE (x) == MEM)
273 {
274 /* We can't use change_address here, since it verifies memory address
275 for corectness. We don't want such check, since we may handle
276 addresses previously incorect (such as ones in push instructions)
277 and it is caller's work to verify whether resulting insn match. */
278 rtx addr = simplify_replace_rtx (XEXP (x, 0), old, new);
279 rtx mem;
280 if (XEXP (x, 0) != addr)
281 {
282 mem = gen_rtx_MEM (GET_MODE (x), addr);
283 MEM_COPY_ATTRIBUTES (mem, x);
284 }
285 else
286 mem = x;
287 return mem;
288 }
289
290 return x;
291 }
292 return x;
293 }
294 \f
295 /* Try to simplify a unary operation CODE whose output mode is to be
296 MODE with input operand OP whose mode was originally OP_MODE.
297 Return zero if no simplification can be made. */
298
299 rtx
300 simplify_unary_operation (code, mode, op, op_mode)
301 enum rtx_code code;
302 enum machine_mode mode;
303 rtx op;
304 enum machine_mode op_mode;
305 {
306 unsigned int width = GET_MODE_BITSIZE (mode);
307
308 /* The order of these tests is critical so that, for example, we don't
309 check the wrong mode (input vs. output) for a conversion operation,
310 such as FIX. At some point, this should be simplified. */
311
312 #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC)
313
314 if (code == FLOAT && GET_MODE (op) == VOIDmode
315 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
316 {
317 HOST_WIDE_INT hv, lv;
318 REAL_VALUE_TYPE d;
319
320 if (GET_CODE (op) == CONST_INT)
321 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
322 else
323 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
324
325 #ifdef REAL_ARITHMETIC
326 REAL_VALUE_FROM_INT (d, lv, hv, mode);
327 #else
328 if (hv < 0)
329 {
330 d = (double) (~ hv);
331 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
332 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
333 d += (double) (unsigned HOST_WIDE_INT) (~ lv);
334 d = (- d - 1.0);
335 }
336 else
337 {
338 d = (double) hv;
339 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
340 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
341 d += (double) (unsigned HOST_WIDE_INT) lv;
342 }
343 #endif /* REAL_ARITHMETIC */
344 d = real_value_truncate (mode, d);
345 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
346 }
347 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
348 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
349 {
350 HOST_WIDE_INT hv, lv;
351 REAL_VALUE_TYPE d;
352
353 if (GET_CODE (op) == CONST_INT)
354 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
355 else
356 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
357
358 if (op_mode == VOIDmode)
359 {
360 /* We don't know how to interpret negative-looking numbers in
361 this case, so don't try to fold those. */
362 if (hv < 0)
363 return 0;
364 }
365 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
366 ;
367 else
368 hv = 0, lv &= GET_MODE_MASK (op_mode);
369
370 #ifdef REAL_ARITHMETIC
371 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
372 #else
373
374 d = (double) (unsigned HOST_WIDE_INT) hv;
375 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
376 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
377 d += (double) (unsigned HOST_WIDE_INT) lv;
378 #endif /* REAL_ARITHMETIC */
379 d = real_value_truncate (mode, d);
380 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
381 }
382 #endif
383
384 if (GET_CODE (op) == CONST_INT
385 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
386 {
387 register HOST_WIDE_INT arg0 = INTVAL (op);
388 register HOST_WIDE_INT val;
389
390 switch (code)
391 {
392 case NOT:
393 val = ~ arg0;
394 break;
395
396 case NEG:
397 val = - arg0;
398 break;
399
400 case ABS:
401 val = (arg0 >= 0 ? arg0 : - arg0);
402 break;
403
404 case FFS:
405 /* Don't use ffs here. Instead, get low order bit and then its
406 number. If arg0 is zero, this will return 0, as desired. */
407 arg0 &= GET_MODE_MASK (mode);
408 val = exact_log2 (arg0 & (- arg0)) + 1;
409 break;
410
411 case TRUNCATE:
412 val = arg0;
413 break;
414
415 case ZERO_EXTEND:
416 if (op_mode == VOIDmode)
417 op_mode = mode;
418 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
419 {
420 /* If we were really extending the mode,
421 we would have to distinguish between zero-extension
422 and sign-extension. */
423 if (width != GET_MODE_BITSIZE (op_mode))
424 abort ();
425 val = arg0;
426 }
427 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
428 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
429 else
430 return 0;
431 break;
432
433 case SIGN_EXTEND:
434 if (op_mode == VOIDmode)
435 op_mode = mode;
436 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
437 {
438 /* If we were really extending the mode,
439 we would have to distinguish between zero-extension
440 and sign-extension. */
441 if (width != GET_MODE_BITSIZE (op_mode))
442 abort ();
443 val = arg0;
444 }
445 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
446 {
447 val
448 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
449 if (val
450 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
451 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
452 }
453 else
454 return 0;
455 break;
456
457 case SQRT:
458 case FLOAT_EXTEND:
459 case FLOAT_TRUNCATE:
460 return 0;
461
462 default:
463 abort ();
464 }
465
466 val = trunc_int_for_mode (val, mode);
467
468 return GEN_INT (val);
469 }
470
471 /* We can do some operations on integer CONST_DOUBLEs. Also allow
472 for a DImode operation on a CONST_INT. */
473 else if (GET_MODE (op) == VOIDmode && width <= HOST_BITS_PER_INT * 2
474 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
475 {
476 unsigned HOST_WIDE_INT l1, lv;
477 HOST_WIDE_INT h1, hv;
478
479 if (GET_CODE (op) == CONST_DOUBLE)
480 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
481 else
482 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
483
484 switch (code)
485 {
486 case NOT:
487 lv = ~ l1;
488 hv = ~ h1;
489 break;
490
491 case NEG:
492 neg_double (l1, h1, &lv, &hv);
493 break;
494
495 case ABS:
496 if (h1 < 0)
497 neg_double (l1, h1, &lv, &hv);
498 else
499 lv = l1, hv = h1;
500 break;
501
502 case FFS:
503 hv = 0;
504 if (l1 == 0)
505 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & (-h1)) + 1;
506 else
507 lv = exact_log2 (l1 & (-l1)) + 1;
508 break;
509
510 case TRUNCATE:
511 /* This is just a change-of-mode, so do nothing. */
512 lv = l1, hv = h1;
513 break;
514
515 case ZERO_EXTEND:
516 if (op_mode == VOIDmode
517 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
518 return 0;
519
520 hv = 0;
521 lv = l1 & GET_MODE_MASK (op_mode);
522 break;
523
524 case SIGN_EXTEND:
525 if (op_mode == VOIDmode
526 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
527 return 0;
528 else
529 {
530 lv = l1 & GET_MODE_MASK (op_mode);
531 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
532 && (lv & ((HOST_WIDE_INT) 1
533 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
534 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
535
536 hv = HWI_SIGN_EXTEND (lv);
537 }
538 break;
539
540 case SQRT:
541 return 0;
542
543 default:
544 return 0;
545 }
546
547 return immed_double_const (lv, hv, mode);
548 }
549
550 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
551 else if (GET_CODE (op) == CONST_DOUBLE
552 && GET_MODE_CLASS (mode) == MODE_FLOAT)
553 {
554 REAL_VALUE_TYPE d;
555 jmp_buf handler;
556 rtx x;
557
558 if (setjmp (handler))
559 /* There used to be a warning here, but that is inadvisable.
560 People may want to cause traps, and the natural way
561 to do it should not get a warning. */
562 return 0;
563
564 set_float_handler (handler);
565
566 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
567
568 switch (code)
569 {
570 case NEG:
571 d = REAL_VALUE_NEGATE (d);
572 break;
573
574 case ABS:
575 if (REAL_VALUE_NEGATIVE (d))
576 d = REAL_VALUE_NEGATE (d);
577 break;
578
579 case FLOAT_TRUNCATE:
580 d = real_value_truncate (mode, d);
581 break;
582
583 case FLOAT_EXTEND:
584 /* All this does is change the mode. */
585 break;
586
587 case FIX:
588 d = REAL_VALUE_RNDZINT (d);
589 break;
590
591 case UNSIGNED_FIX:
592 d = REAL_VALUE_UNSIGNED_RNDZINT (d);
593 break;
594
595 case SQRT:
596 return 0;
597
598 default:
599 abort ();
600 }
601
602 x = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
603 set_float_handler (NULL);
604 return x;
605 }
606
607 else if (GET_CODE (op) == CONST_DOUBLE
608 && GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT
609 && GET_MODE_CLASS (mode) == MODE_INT
610 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
611 {
612 REAL_VALUE_TYPE d;
613 jmp_buf handler;
614 HOST_WIDE_INT val;
615
616 if (setjmp (handler))
617 return 0;
618
619 set_float_handler (handler);
620
621 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
622
623 switch (code)
624 {
625 case FIX:
626 val = REAL_VALUE_FIX (d);
627 break;
628
629 case UNSIGNED_FIX:
630 val = REAL_VALUE_UNSIGNED_FIX (d);
631 break;
632
633 default:
634 abort ();
635 }
636
637 set_float_handler (NULL);
638
639 val = trunc_int_for_mode (val, mode);
640
641 return GEN_INT (val);
642 }
643 #endif
644 /* This was formerly used only for non-IEEE float.
645 eggert@twinsun.com says it is safe for IEEE also. */
646 else
647 {
648 enum rtx_code reversed;
649 /* There are some simplifications we can do even if the operands
650 aren't constant. */
651 switch (code)
652 {
653 case NOT:
654 /* (not (not X)) == X. */
655 if (GET_CODE (op) == NOT)
656 return XEXP (op, 0);
657
658 /* (not (eq X Y)) == (ne X Y), etc. */
659 if (mode == BImode && GET_RTX_CLASS (GET_CODE (op)) == '<'
660 && ((reversed = reversed_comparison_code (op, NULL_RTX))
661 != UNKNOWN))
662 return gen_rtx_fmt_ee (reversed,
663 op_mode, XEXP (op, 0), XEXP (op, 1));
664 break;
665
666 case NEG:
667 /* (neg (neg X)) == X. */
668 if (GET_CODE (op) == NEG)
669 return XEXP (op, 0);
670 break;
671
672 case SIGN_EXTEND:
673 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
674 becomes just the MINUS if its mode is MODE. This allows
675 folding switch statements on machines using casesi (such as
676 the Vax). */
677 if (GET_CODE (op) == TRUNCATE
678 && GET_MODE (XEXP (op, 0)) == mode
679 && GET_CODE (XEXP (op, 0)) == MINUS
680 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
681 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
682 return XEXP (op, 0);
683
684 #ifdef POINTERS_EXTEND_UNSIGNED
685 if (! POINTERS_EXTEND_UNSIGNED
686 && mode == Pmode && GET_MODE (op) == ptr_mode
687 && (CONSTANT_P (op)
688 || (GET_CODE (op) == SUBREG
689 && GET_CODE (SUBREG_REG (op)) == REG
690 && REG_POINTER (SUBREG_REG (op))
691 && GET_MODE (SUBREG_REG (op)) == Pmode)))
692 return convert_memory_address (Pmode, op);
693 #endif
694 break;
695
696 #ifdef POINTERS_EXTEND_UNSIGNED
697 case ZERO_EXTEND:
698 if (POINTERS_EXTEND_UNSIGNED
699 && mode == Pmode && GET_MODE (op) == ptr_mode
700 && (CONSTANT_P (op)
701 || (GET_CODE (op) == SUBREG
702 && GET_CODE (SUBREG_REG (op)) == REG
703 && REG_POINTER (SUBREG_REG (op))
704 && GET_MODE (SUBREG_REG (op)) == Pmode)))
705 return convert_memory_address (Pmode, op);
706 break;
707 #endif
708
709 default:
710 break;
711 }
712
713 return 0;
714 }
715 }
716 \f
717 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
718 and OP1. Return 0 if no simplification is possible.
719
720 Don't use this for relational operations such as EQ or LT.
721 Use simplify_relational_operation instead. */
722
723 rtx
724 simplify_binary_operation (code, mode, op0, op1)
725 enum rtx_code code;
726 enum machine_mode mode;
727 rtx op0, op1;
728 {
729 register HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
730 HOST_WIDE_INT val;
731 unsigned int width = GET_MODE_BITSIZE (mode);
732 rtx tem;
733
734 /* Relational operations don't work here. We must know the mode
735 of the operands in order to do the comparison correctly.
736 Assuming a full word can give incorrect results.
737 Consider comparing 128 with -128 in QImode. */
738
739 if (GET_RTX_CLASS (code) == '<')
740 abort ();
741
742 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
743 if (GET_MODE_CLASS (mode) == MODE_FLOAT
744 && GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
745 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
746 {
747 REAL_VALUE_TYPE f0, f1, value;
748 jmp_buf handler;
749
750 if (setjmp (handler))
751 return 0;
752
753 set_float_handler (handler);
754
755 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
756 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
757 f0 = real_value_truncate (mode, f0);
758 f1 = real_value_truncate (mode, f1);
759
760 #ifdef REAL_ARITHMETIC
761 #ifndef REAL_INFINITY
762 if (code == DIV && REAL_VALUES_EQUAL (f1, dconst0))
763 return 0;
764 #endif
765 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
766 #else
767 switch (code)
768 {
769 case PLUS:
770 value = f0 + f1;
771 break;
772 case MINUS:
773 value = f0 - f1;
774 break;
775 case MULT:
776 value = f0 * f1;
777 break;
778 case DIV:
779 #ifndef REAL_INFINITY
780 if (f1 == 0)
781 return 0;
782 #endif
783 value = f0 / f1;
784 break;
785 case SMIN:
786 value = MIN (f0, f1);
787 break;
788 case SMAX:
789 value = MAX (f0, f1);
790 break;
791 default:
792 abort ();
793 }
794 #endif
795
796 value = real_value_truncate (mode, value);
797 set_float_handler (NULL);
798 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
799 }
800 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
801
802 /* We can fold some multi-word operations. */
803 if (GET_MODE_CLASS (mode) == MODE_INT
804 && width == HOST_BITS_PER_WIDE_INT * 2
805 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
806 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
807 {
808 unsigned HOST_WIDE_INT l1, l2, lv;
809 HOST_WIDE_INT h1, h2, hv;
810
811 if (GET_CODE (op0) == CONST_DOUBLE)
812 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
813 else
814 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
815
816 if (GET_CODE (op1) == CONST_DOUBLE)
817 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
818 else
819 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
820
821 switch (code)
822 {
823 case MINUS:
824 /* A - B == A + (-B). */
825 neg_double (l2, h2, &lv, &hv);
826 l2 = lv, h2 = hv;
827
828 /* .. fall through ... */
829
830 case PLUS:
831 add_double (l1, h1, l2, h2, &lv, &hv);
832 break;
833
834 case MULT:
835 mul_double (l1, h1, l2, h2, &lv, &hv);
836 break;
837
838 case DIV: case MOD: case UDIV: case UMOD:
839 /* We'd need to include tree.h to do this and it doesn't seem worth
840 it. */
841 return 0;
842
843 case AND:
844 lv = l1 & l2, hv = h1 & h2;
845 break;
846
847 case IOR:
848 lv = l1 | l2, hv = h1 | h2;
849 break;
850
851 case XOR:
852 lv = l1 ^ l2, hv = h1 ^ h2;
853 break;
854
855 case SMIN:
856 if (h1 < h2
857 || (h1 == h2
858 && ((unsigned HOST_WIDE_INT) l1
859 < (unsigned HOST_WIDE_INT) l2)))
860 lv = l1, hv = h1;
861 else
862 lv = l2, hv = h2;
863 break;
864
865 case SMAX:
866 if (h1 > h2
867 || (h1 == h2
868 && ((unsigned HOST_WIDE_INT) l1
869 > (unsigned HOST_WIDE_INT) l2)))
870 lv = l1, hv = h1;
871 else
872 lv = l2, hv = h2;
873 break;
874
875 case UMIN:
876 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
877 || (h1 == h2
878 && ((unsigned HOST_WIDE_INT) l1
879 < (unsigned HOST_WIDE_INT) l2)))
880 lv = l1, hv = h1;
881 else
882 lv = l2, hv = h2;
883 break;
884
885 case UMAX:
886 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
887 || (h1 == h2
888 && ((unsigned HOST_WIDE_INT) l1
889 > (unsigned HOST_WIDE_INT) l2)))
890 lv = l1, hv = h1;
891 else
892 lv = l2, hv = h2;
893 break;
894
895 case LSHIFTRT: case ASHIFTRT:
896 case ASHIFT:
897 case ROTATE: case ROTATERT:
898 #ifdef SHIFT_COUNT_TRUNCATED
899 if (SHIFT_COUNT_TRUNCATED)
900 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
901 #endif
902
903 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
904 return 0;
905
906 if (code == LSHIFTRT || code == ASHIFTRT)
907 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
908 code == ASHIFTRT);
909 else if (code == ASHIFT)
910 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
911 else if (code == ROTATE)
912 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
913 else /* code == ROTATERT */
914 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
915 break;
916
917 default:
918 return 0;
919 }
920
921 return immed_double_const (lv, hv, mode);
922 }
923
924 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
925 || width > HOST_BITS_PER_WIDE_INT || width == 0)
926 {
927 /* Even if we can't compute a constant result,
928 there are some cases worth simplifying. */
929
930 switch (code)
931 {
932 case PLUS:
933 /* In IEEE floating point, x+0 is not the same as x. Similarly
934 for the other optimizations below. */
935 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
936 && FLOAT_MODE_P (mode) && ! flag_unsafe_math_optimizations)
937 break;
938
939 if (op1 == CONST0_RTX (mode))
940 return op0;
941
942 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)) */
943 if (GET_CODE (op0) == NEG)
944 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
945 else if (GET_CODE (op1) == NEG)
946 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
947
948 /* (~a) + 1 -> -a */
949 if (INTEGRAL_MODE_P (mode)
950 && GET_CODE (op0) == NOT
951 && GET_CODE (op1) == CONST_INT
952 && INTVAL (op1) == 1)
953 return gen_rtx_NEG (mode, XEXP (op0, 0));
954
955 /* Handle both-operands-constant cases. We can only add
956 CONST_INTs to constants since the sum of relocatable symbols
957 can't be handled by most assemblers. Don't add CONST_INT
958 to CONST_INT since overflow won't be computed properly if wider
959 than HOST_BITS_PER_WIDE_INT. */
960
961 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
962 && GET_CODE (op1) == CONST_INT)
963 return plus_constant (op0, INTVAL (op1));
964 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
965 && GET_CODE (op0) == CONST_INT)
966 return plus_constant (op1, INTVAL (op0));
967
968 /* See if this is something like X * C - X or vice versa or
969 if the multiplication is written as a shift. If so, we can
970 distribute and make a new multiply, shift, or maybe just
971 have X (if C is 2 in the example above). But don't make
972 real multiply if we didn't have one before. */
973
974 if (! FLOAT_MODE_P (mode))
975 {
976 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
977 rtx lhs = op0, rhs = op1;
978 int had_mult = 0;
979
980 if (GET_CODE (lhs) == NEG)
981 coeff0 = -1, lhs = XEXP (lhs, 0);
982 else if (GET_CODE (lhs) == MULT
983 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
984 {
985 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
986 had_mult = 1;
987 }
988 else if (GET_CODE (lhs) == ASHIFT
989 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
990 && INTVAL (XEXP (lhs, 1)) >= 0
991 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
992 {
993 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
994 lhs = XEXP (lhs, 0);
995 }
996
997 if (GET_CODE (rhs) == NEG)
998 coeff1 = -1, rhs = XEXP (rhs, 0);
999 else if (GET_CODE (rhs) == MULT
1000 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1001 {
1002 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1003 had_mult = 1;
1004 }
1005 else if (GET_CODE (rhs) == ASHIFT
1006 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1007 && INTVAL (XEXP (rhs, 1)) >= 0
1008 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1009 {
1010 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1011 rhs = XEXP (rhs, 0);
1012 }
1013
1014 if (rtx_equal_p (lhs, rhs))
1015 {
1016 tem = simplify_gen_binary (MULT, mode, lhs,
1017 GEN_INT (coeff0 + coeff1));
1018 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1019 }
1020 }
1021
1022 /* If one of the operands is a PLUS or a MINUS, see if we can
1023 simplify this by the associative law.
1024 Don't use the associative law for floating point.
1025 The inaccuracy makes it nonassociative,
1026 and subtle programs can break if operations are associated. */
1027
1028 if (INTEGRAL_MODE_P (mode)
1029 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1030 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
1031 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1032 return tem;
1033 break;
1034
1035 case COMPARE:
1036 #ifdef HAVE_cc0
1037 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1038 using cc0, in which case we want to leave it as a COMPARE
1039 so we can distinguish it from a register-register-copy.
1040
1041 In IEEE floating point, x-0 is not the same as x. */
1042
1043 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1044 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1045 && op1 == CONST0_RTX (mode))
1046 return op0;
1047 #endif
1048
1049 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1050 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1051 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1052 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1053 {
1054 rtx xop00 = XEXP (op0, 0);
1055 rtx xop10 = XEXP (op1, 0);
1056
1057 #ifdef HAVE_cc0
1058 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1059 #else
1060 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1061 && GET_MODE (xop00) == GET_MODE (xop10)
1062 && REGNO (xop00) == REGNO (xop10)
1063 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1064 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1065 #endif
1066 return xop00;
1067 }
1068
1069 break;
1070 case MINUS:
1071 /* None of these optimizations can be done for IEEE
1072 floating point. */
1073 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
1074 && FLOAT_MODE_P (mode) && ! flag_unsafe_math_optimizations)
1075 break;
1076
1077 /* We can't assume x-x is 0 even with non-IEEE floating point,
1078 but since it is zero except in very strange circumstances, we
1079 will treat it as zero with -funsafe-math-optimizations. */
1080 if (rtx_equal_p (op0, op1)
1081 && ! side_effects_p (op0)
1082 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1083 return CONST0_RTX (mode);
1084
1085 /* Change subtraction from zero into negation. */
1086 if (op0 == CONST0_RTX (mode))
1087 return gen_rtx_NEG (mode, op1);
1088
1089 /* (-1 - a) is ~a. */
1090 if (op0 == constm1_rtx)
1091 return gen_rtx_NOT (mode, op1);
1092
1093 /* Subtracting 0 has no effect. */
1094 if (op1 == CONST0_RTX (mode))
1095 return op0;
1096
1097 /* See if this is something like X * C - X or vice versa or
1098 if the multiplication is written as a shift. If so, we can
1099 distribute and make a new multiply, shift, or maybe just
1100 have X (if C is 2 in the example above). But don't make
1101 real multiply if we didn't have one before. */
1102
1103 if (! FLOAT_MODE_P (mode))
1104 {
1105 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1106 rtx lhs = op0, rhs = op1;
1107 int had_mult = 0;
1108
1109 if (GET_CODE (lhs) == NEG)
1110 coeff0 = -1, lhs = XEXP (lhs, 0);
1111 else if (GET_CODE (lhs) == MULT
1112 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1113 {
1114 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1115 had_mult = 1;
1116 }
1117 else if (GET_CODE (lhs) == ASHIFT
1118 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1119 && INTVAL (XEXP (lhs, 1)) >= 0
1120 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1121 {
1122 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1123 lhs = XEXP (lhs, 0);
1124 }
1125
1126 if (GET_CODE (rhs) == NEG)
1127 coeff1 = - 1, rhs = XEXP (rhs, 0);
1128 else if (GET_CODE (rhs) == MULT
1129 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1130 {
1131 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1132 had_mult = 1;
1133 }
1134 else if (GET_CODE (rhs) == ASHIFT
1135 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1136 && INTVAL (XEXP (rhs, 1)) >= 0
1137 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1138 {
1139 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1140 rhs = XEXP (rhs, 0);
1141 }
1142
1143 if (rtx_equal_p (lhs, rhs))
1144 {
1145 tem = simplify_gen_binary (MULT, mode, lhs,
1146 GEN_INT (coeff0 - coeff1));
1147 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1148 }
1149 }
1150
1151 /* (a - (-b)) -> (a + b). */
1152 if (GET_CODE (op1) == NEG)
1153 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1154
1155 /* If one of the operands is a PLUS or a MINUS, see if we can
1156 simplify this by the associative law.
1157 Don't use the associative law for floating point.
1158 The inaccuracy makes it nonassociative,
1159 and subtle programs can break if operations are associated. */
1160
1161 if (INTEGRAL_MODE_P (mode)
1162 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1163 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
1164 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1165 return tem;
1166
1167 /* Don't let a relocatable value get a negative coeff. */
1168 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1169 return plus_constant (op0, - INTVAL (op1));
1170
1171 /* (x - (x & y)) -> (x & ~y) */
1172 if (GET_CODE (op1) == AND)
1173 {
1174 if (rtx_equal_p (op0, XEXP (op1, 0)))
1175 return simplify_gen_binary (AND, mode, op0,
1176 gen_rtx_NOT (mode, XEXP (op1, 1)));
1177 if (rtx_equal_p (op0, XEXP (op1, 1)))
1178 return simplify_gen_binary (AND, mode, op0,
1179 gen_rtx_NOT (mode, XEXP (op1, 0)));
1180 }
1181 break;
1182
1183 case MULT:
1184 if (op1 == constm1_rtx)
1185 {
1186 tem = simplify_unary_operation (NEG, mode, op0, mode);
1187
1188 return tem ? tem : gen_rtx_NEG (mode, op0);
1189 }
1190
1191 /* In IEEE floating point, x*0 is not always 0. */
1192 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1193 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1194 && op1 == CONST0_RTX (mode)
1195 && ! side_effects_p (op0))
1196 return op1;
1197
1198 /* In IEEE floating point, x*1 is not equivalent to x for nans.
1199 However, ANSI says we can drop signals,
1200 so we can do this anyway. */
1201 if (op1 == CONST1_RTX (mode))
1202 return op0;
1203
1204 /* Convert multiply by constant power of two into shift unless
1205 we are still generating RTL. This test is a kludge. */
1206 if (GET_CODE (op1) == CONST_INT
1207 && (val = exact_log2 (INTVAL (op1))) >= 0
1208 /* If the mode is larger than the host word size, and the
1209 uppermost bit is set, then this isn't a power of two due
1210 to implicit sign extension. */
1211 && (width <= HOST_BITS_PER_WIDE_INT
1212 || val != HOST_BITS_PER_WIDE_INT - 1)
1213 && ! rtx_equal_function_value_matters)
1214 return gen_rtx_ASHIFT (mode, op0, GEN_INT (val));
1215
1216 if (GET_CODE (op1) == CONST_DOUBLE
1217 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT)
1218 {
1219 REAL_VALUE_TYPE d;
1220 jmp_buf handler;
1221 int op1is2, op1ism1;
1222
1223 if (setjmp (handler))
1224 return 0;
1225
1226 set_float_handler (handler);
1227 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
1228 op1is2 = REAL_VALUES_EQUAL (d, dconst2);
1229 op1ism1 = REAL_VALUES_EQUAL (d, dconstm1);
1230 set_float_handler (NULL);
1231
1232 /* x*2 is x+x and x*(-1) is -x */
1233 if (op1is2 && GET_MODE (op0) == mode)
1234 return gen_rtx_PLUS (mode, op0, copy_rtx (op0));
1235
1236 else if (op1ism1 && GET_MODE (op0) == mode)
1237 return gen_rtx_NEG (mode, op0);
1238 }
1239 break;
1240
1241 case IOR:
1242 if (op1 == const0_rtx)
1243 return op0;
1244 if (GET_CODE (op1) == CONST_INT
1245 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
1246 return op1;
1247 if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1248 return op0;
1249 /* A | (~A) -> -1 */
1250 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1251 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1252 && ! side_effects_p (op0)
1253 && GET_MODE_CLASS (mode) != MODE_CC)
1254 return constm1_rtx;
1255 break;
1256
1257 case XOR:
1258 if (op1 == const0_rtx)
1259 return op0;
1260 if (GET_CODE (op1) == CONST_INT
1261 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
1262 return gen_rtx_NOT (mode, op0);
1263 if (op0 == op1 && ! side_effects_p (op0)
1264 && GET_MODE_CLASS (mode) != MODE_CC)
1265 return const0_rtx;
1266 break;
1267
1268 case AND:
1269 if (op1 == const0_rtx && ! side_effects_p (op0))
1270 return const0_rtx;
1271 if (GET_CODE (op1) == CONST_INT
1272 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
1273 return op0;
1274 if (op0 == op1 && ! side_effects_p (op0)
1275 && GET_MODE_CLASS (mode) != MODE_CC)
1276 return op0;
1277 /* A & (~A) -> 0 */
1278 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1279 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1280 && ! side_effects_p (op0)
1281 && GET_MODE_CLASS (mode) != MODE_CC)
1282 return const0_rtx;
1283 break;
1284
1285 case UDIV:
1286 /* Convert divide by power of two into shift (divide by 1 handled
1287 below). */
1288 if (GET_CODE (op1) == CONST_INT
1289 && (arg1 = exact_log2 (INTVAL (op1))) > 0)
1290 return gen_rtx_LSHIFTRT (mode, op0, GEN_INT (arg1));
1291
1292 /* ... fall through ... */
1293
1294 case DIV:
1295 if (op1 == CONST1_RTX (mode))
1296 return op0;
1297
1298 /* In IEEE floating point, 0/x is not always 0. */
1299 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1300 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1301 && op0 == CONST0_RTX (mode)
1302 && ! side_effects_p (op1))
1303 return op0;
1304
1305 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1306 /* Change division by a constant into multiplication. Only do
1307 this with -funsafe-math-optimizations. */
1308 else if (GET_CODE (op1) == CONST_DOUBLE
1309 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT
1310 && op1 != CONST0_RTX (mode)
1311 && flag_unsafe_math_optimizations)
1312 {
1313 REAL_VALUE_TYPE d;
1314 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
1315
1316 if (! REAL_VALUES_EQUAL (d, dconst0))
1317 {
1318 #if defined (REAL_ARITHMETIC)
1319 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1320 return gen_rtx_MULT (mode, op0,
1321 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
1322 #else
1323 return
1324 gen_rtx_MULT (mode, op0,
1325 CONST_DOUBLE_FROM_REAL_VALUE (1./d, mode));
1326 #endif
1327 }
1328 }
1329 #endif
1330 break;
1331
1332 case UMOD:
1333 /* Handle modulus by power of two (mod with 1 handled below). */
1334 if (GET_CODE (op1) == CONST_INT
1335 && exact_log2 (INTVAL (op1)) > 0)
1336 return gen_rtx_AND (mode, op0, GEN_INT (INTVAL (op1) - 1));
1337
1338 /* ... fall through ... */
1339
1340 case MOD:
1341 if ((op0 == const0_rtx || op1 == const1_rtx)
1342 && ! side_effects_p (op0) && ! side_effects_p (op1))
1343 return const0_rtx;
1344 break;
1345
1346 case ROTATERT:
1347 case ROTATE:
1348 /* Rotating ~0 always results in ~0. */
1349 if (GET_CODE (op0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1350 && (unsigned HOST_WIDE_INT) INTVAL (op0) == GET_MODE_MASK (mode)
1351 && ! side_effects_p (op1))
1352 return op0;
1353
1354 /* ... fall through ... */
1355
1356 case ASHIFT:
1357 case ASHIFTRT:
1358 case LSHIFTRT:
1359 if (op1 == const0_rtx)
1360 return op0;
1361 if (op0 == const0_rtx && ! side_effects_p (op1))
1362 return op0;
1363 break;
1364
1365 case SMIN:
1366 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
1367 && INTVAL (op1) == (HOST_WIDE_INT) 1 << (width -1)
1368 && ! side_effects_p (op0))
1369 return op1;
1370 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1371 return op0;
1372 break;
1373
1374 case SMAX:
1375 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
1376 && ((unsigned HOST_WIDE_INT) INTVAL (op1)
1377 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1378 && ! side_effects_p (op0))
1379 return op1;
1380 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1381 return op0;
1382 break;
1383
1384 case UMIN:
1385 if (op1 == const0_rtx && ! side_effects_p (op0))
1386 return op1;
1387 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1388 return op0;
1389 break;
1390
1391 case UMAX:
1392 if (op1 == constm1_rtx && ! side_effects_p (op0))
1393 return op1;
1394 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1395 return op0;
1396 break;
1397
1398 default:
1399 abort ();
1400 }
1401
1402 return 0;
1403 }
1404
1405 /* Get the integer argument values in two forms:
1406 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1407
1408 arg0 = INTVAL (op0);
1409 arg1 = INTVAL (op1);
1410
1411 if (width < HOST_BITS_PER_WIDE_INT)
1412 {
1413 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1414 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1415
1416 arg0s = arg0;
1417 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1418 arg0s |= ((HOST_WIDE_INT) (-1) << width);
1419
1420 arg1s = arg1;
1421 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1422 arg1s |= ((HOST_WIDE_INT) (-1) << width);
1423 }
1424 else
1425 {
1426 arg0s = arg0;
1427 arg1s = arg1;
1428 }
1429
1430 /* Compute the value of the arithmetic. */
1431
1432 switch (code)
1433 {
1434 case PLUS:
1435 val = arg0s + arg1s;
1436 break;
1437
1438 case MINUS:
1439 val = arg0s - arg1s;
1440 break;
1441
1442 case MULT:
1443 val = arg0s * arg1s;
1444 break;
1445
1446 case DIV:
1447 if (arg1s == 0
1448 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1449 && arg1s == -1))
1450 return 0;
1451 val = arg0s / arg1s;
1452 break;
1453
1454 case MOD:
1455 if (arg1s == 0
1456 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1457 && arg1s == -1))
1458 return 0;
1459 val = arg0s % arg1s;
1460 break;
1461
1462 case UDIV:
1463 if (arg1 == 0
1464 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1465 && arg1s == -1))
1466 return 0;
1467 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
1468 break;
1469
1470 case UMOD:
1471 if (arg1 == 0
1472 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1473 && arg1s == -1))
1474 return 0;
1475 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
1476 break;
1477
1478 case AND:
1479 val = arg0 & arg1;
1480 break;
1481
1482 case IOR:
1483 val = arg0 | arg1;
1484 break;
1485
1486 case XOR:
1487 val = arg0 ^ arg1;
1488 break;
1489
1490 case LSHIFTRT:
1491 /* If shift count is undefined, don't fold it; let the machine do
1492 what it wants. But truncate it if the machine will do that. */
1493 if (arg1 < 0)
1494 return 0;
1495
1496 #ifdef SHIFT_COUNT_TRUNCATED
1497 if (SHIFT_COUNT_TRUNCATED)
1498 arg1 %= width;
1499 #endif
1500
1501 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
1502 break;
1503
1504 case ASHIFT:
1505 if (arg1 < 0)
1506 return 0;
1507
1508 #ifdef SHIFT_COUNT_TRUNCATED
1509 if (SHIFT_COUNT_TRUNCATED)
1510 arg1 %= width;
1511 #endif
1512
1513 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
1514 break;
1515
1516 case ASHIFTRT:
1517 if (arg1 < 0)
1518 return 0;
1519
1520 #ifdef SHIFT_COUNT_TRUNCATED
1521 if (SHIFT_COUNT_TRUNCATED)
1522 arg1 %= width;
1523 #endif
1524
1525 val = arg0s >> arg1;
1526
1527 /* Bootstrap compiler may not have sign extended the right shift.
1528 Manually extend the sign to insure bootstrap cc matches gcc. */
1529 if (arg0s < 0 && arg1 > 0)
1530 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
1531
1532 break;
1533
1534 case ROTATERT:
1535 if (arg1 < 0)
1536 return 0;
1537
1538 arg1 %= width;
1539 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
1540 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
1541 break;
1542
1543 case ROTATE:
1544 if (arg1 < 0)
1545 return 0;
1546
1547 arg1 %= width;
1548 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
1549 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
1550 break;
1551
1552 case COMPARE:
1553 /* Do nothing here. */
1554 return 0;
1555
1556 case SMIN:
1557 val = arg0s <= arg1s ? arg0s : arg1s;
1558 break;
1559
1560 case UMIN:
1561 val = ((unsigned HOST_WIDE_INT) arg0
1562 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1563 break;
1564
1565 case SMAX:
1566 val = arg0s > arg1s ? arg0s : arg1s;
1567 break;
1568
1569 case UMAX:
1570 val = ((unsigned HOST_WIDE_INT) arg0
1571 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1572 break;
1573
1574 default:
1575 abort ();
1576 }
1577
1578 val = trunc_int_for_mode (val, mode);
1579
1580 return GEN_INT (val);
1581 }
1582 \f
1583 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1584 PLUS or MINUS.
1585
1586 Rather than test for specific case, we do this by a brute-force method
1587 and do all possible simplifications until no more changes occur. Then
1588 we rebuild the operation. */
1589
1590 static rtx
1591 simplify_plus_minus (code, mode, op0, op1)
1592 enum rtx_code code;
1593 enum machine_mode mode;
1594 rtx op0, op1;
1595 {
1596 rtx ops[8];
1597 int negs[8];
1598 rtx result, tem;
1599 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts = 0;
1600 int first = 1, negate = 0, changed;
1601 int i, j;
1602
1603 memset ((char *) ops, 0, sizeof ops);
1604
1605 /* Set up the two operands and then expand them until nothing has been
1606 changed. If we run out of room in our array, give up; this should
1607 almost never happen. */
1608
1609 ops[0] = op0, ops[1] = op1, negs[0] = 0, negs[1] = (code == MINUS);
1610
1611 changed = 1;
1612 while (changed)
1613 {
1614 changed = 0;
1615
1616 for (i = 0; i < n_ops; i++)
1617 switch (GET_CODE (ops[i]))
1618 {
1619 case PLUS:
1620 case MINUS:
1621 if (n_ops == 7)
1622 return 0;
1623
1624 ops[n_ops] = XEXP (ops[i], 1);
1625 negs[n_ops++] = GET_CODE (ops[i]) == MINUS ? !negs[i] : negs[i];
1626 ops[i] = XEXP (ops[i], 0);
1627 input_ops++;
1628 changed = 1;
1629 break;
1630
1631 case NEG:
1632 ops[i] = XEXP (ops[i], 0);
1633 negs[i] = ! negs[i];
1634 changed = 1;
1635 break;
1636
1637 case CONST:
1638 ops[i] = XEXP (ops[i], 0);
1639 input_consts++;
1640 changed = 1;
1641 break;
1642
1643 case NOT:
1644 /* ~a -> (-a - 1) */
1645 if (n_ops != 7)
1646 {
1647 ops[n_ops] = constm1_rtx;
1648 negs[n_ops++] = negs[i];
1649 ops[i] = XEXP (ops[i], 0);
1650 negs[i] = ! negs[i];
1651 changed = 1;
1652 }
1653 break;
1654
1655 case CONST_INT:
1656 if (negs[i])
1657 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0, changed = 1;
1658 break;
1659
1660 default:
1661 break;
1662 }
1663 }
1664
1665 /* If we only have two operands, we can't do anything. */
1666 if (n_ops <= 2)
1667 return 0;
1668
1669 /* Now simplify each pair of operands until nothing changes. The first
1670 time through just simplify constants against each other. */
1671
1672 changed = 1;
1673 while (changed)
1674 {
1675 changed = first;
1676
1677 for (i = 0; i < n_ops - 1; i++)
1678 for (j = i + 1; j < n_ops; j++)
1679 if (ops[i] != 0 && ops[j] != 0
1680 && (! first || (CONSTANT_P (ops[i]) && CONSTANT_P (ops[j]))))
1681 {
1682 rtx lhs = ops[i], rhs = ops[j];
1683 enum rtx_code ncode = PLUS;
1684
1685 if (negs[i] && ! negs[j])
1686 lhs = ops[j], rhs = ops[i], ncode = MINUS;
1687 else if (! negs[i] && negs[j])
1688 ncode = MINUS;
1689
1690 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
1691 if (tem)
1692 {
1693 ops[i] = tem, ops[j] = 0;
1694 negs[i] = negs[i] && negs[j];
1695 if (GET_CODE (tem) == NEG)
1696 ops[i] = XEXP (tem, 0), negs[i] = ! negs[i];
1697
1698 if (GET_CODE (ops[i]) == CONST_INT && negs[i])
1699 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0;
1700 changed = 1;
1701 }
1702 }
1703
1704 first = 0;
1705 }
1706
1707 /* Pack all the operands to the lower-numbered entries and give up if
1708 we didn't reduce the number of operands we had. Make sure we
1709 count a CONST as two operands. If we have the same number of
1710 operands, but have made more CONSTs than we had, this is also
1711 an improvement, so accept it. */
1712
1713 for (i = 0, j = 0; j < n_ops; j++)
1714 if (ops[j] != 0)
1715 {
1716 ops[i] = ops[j], negs[i++] = negs[j];
1717 if (GET_CODE (ops[j]) == CONST)
1718 n_consts++;
1719 }
1720
1721 if (i + n_consts > input_ops
1722 || (i + n_consts == input_ops && n_consts <= input_consts))
1723 return 0;
1724
1725 n_ops = i;
1726
1727 /* If we have a CONST_INT, put it last. */
1728 for (i = 0; i < n_ops - 1; i++)
1729 if (GET_CODE (ops[i]) == CONST_INT)
1730 {
1731 tem = ops[n_ops - 1], ops[n_ops - 1] = ops[i] , ops[i] = tem;
1732 j = negs[n_ops - 1], negs[n_ops - 1] = negs[i], negs[i] = j;
1733 }
1734
1735 /* Put a non-negated operand first. If there aren't any, make all
1736 operands positive and negate the whole thing later. */
1737 for (i = 0; i < n_ops && negs[i]; i++)
1738 ;
1739
1740 if (i == n_ops)
1741 {
1742 for (i = 0; i < n_ops; i++)
1743 negs[i] = 0;
1744 negate = 1;
1745 }
1746 else if (i != 0)
1747 {
1748 tem = ops[0], ops[0] = ops[i], ops[i] = tem;
1749 j = negs[0], negs[0] = negs[i], negs[i] = j;
1750 }
1751
1752 /* Now make the result by performing the requested operations. */
1753 result = ops[0];
1754 for (i = 1; i < n_ops; i++)
1755 result = simplify_gen_binary (negs[i] ? MINUS : PLUS, mode, result, ops[i]);
1756
1757 return negate ? gen_rtx_NEG (mode, result) : result;
1758 }
1759
1760 struct cfc_args
1761 {
1762 rtx op0, op1; /* Input */
1763 int equal, op0lt, op1lt; /* Output */
1764 int unordered;
1765 };
1766
1767 static void
1768 check_fold_consts (data)
1769 PTR data;
1770 {
1771 struct cfc_args *args = (struct cfc_args *) data;
1772 REAL_VALUE_TYPE d0, d1;
1773
1774 /* We may possibly raise an exception while reading the value. */
1775 args->unordered = 1;
1776 REAL_VALUE_FROM_CONST_DOUBLE (d0, args->op0);
1777 REAL_VALUE_FROM_CONST_DOUBLE (d1, args->op1);
1778
1779 /* Comparisons of Inf versus Inf are ordered. */
1780 if (REAL_VALUE_ISNAN (d0)
1781 || REAL_VALUE_ISNAN (d1))
1782 return;
1783 args->equal = REAL_VALUES_EQUAL (d0, d1);
1784 args->op0lt = REAL_VALUES_LESS (d0, d1);
1785 args->op1lt = REAL_VALUES_LESS (d1, d0);
1786 args->unordered = 0;
1787 }
1788
1789 /* Like simplify_binary_operation except used for relational operators.
1790 MODE is the mode of the operands, not that of the result. If MODE
1791 is VOIDmode, both operands must also be VOIDmode and we compare the
1792 operands in "infinite precision".
1793
1794 If no simplification is possible, this function returns zero. Otherwise,
1795 it returns either const_true_rtx or const0_rtx. */
1796
1797 rtx
1798 simplify_relational_operation (code, mode, op0, op1)
1799 enum rtx_code code;
1800 enum machine_mode mode;
1801 rtx op0, op1;
1802 {
1803 int equal, op0lt, op0ltu, op1lt, op1ltu;
1804 rtx tem;
1805
1806 if (mode == VOIDmode
1807 && (GET_MODE (op0) != VOIDmode
1808 || GET_MODE (op1) != VOIDmode))
1809 abort ();
1810
1811 /* If op0 is a compare, extract the comparison arguments from it. */
1812 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
1813 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
1814
1815 /* We can't simplify MODE_CC values since we don't know what the
1816 actual comparison is. */
1817 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC
1818 #ifdef HAVE_cc0
1819 || op0 == cc0_rtx
1820 #endif
1821 )
1822 return 0;
1823
1824 /* Make sure the constant is second. */
1825 if ((CONSTANT_P (op0) && ! CONSTANT_P (op1))
1826 || (GET_CODE (op0) == CONST_INT && GET_CODE (op1) != CONST_INT))
1827 {
1828 tem = op0, op0 = op1, op1 = tem;
1829 code = swap_condition (code);
1830 }
1831
1832 /* For integer comparisons of A and B maybe we can simplify A - B and can
1833 then simplify a comparison of that with zero. If A and B are both either
1834 a register or a CONST_INT, this can't help; testing for these cases will
1835 prevent infinite recursion here and speed things up.
1836
1837 If CODE is an unsigned comparison, then we can never do this optimization,
1838 because it gives an incorrect result if the subtraction wraps around zero.
1839 ANSI C defines unsigned operations such that they never overflow, and
1840 thus such cases can not be ignored. */
1841
1842 if (INTEGRAL_MODE_P (mode) && op1 != const0_rtx
1843 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == CONST_INT)
1844 && (GET_CODE (op1) == REG || GET_CODE (op1) == CONST_INT))
1845 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
1846 && code != GTU && code != GEU && code != LTU && code != LEU)
1847 return simplify_relational_operation (signed_condition (code),
1848 mode, tem, const0_rtx);
1849
1850 if (flag_unsafe_math_optimizations && code == ORDERED)
1851 return const_true_rtx;
1852
1853 if (flag_unsafe_math_optimizations && code == UNORDERED)
1854 return const0_rtx;
1855
1856 /* For non-IEEE floating-point, if the two operands are equal, we know the
1857 result. */
1858 if (rtx_equal_p (op0, op1)
1859 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1860 || ! FLOAT_MODE_P (GET_MODE (op0))
1861 || flag_unsafe_math_optimizations))
1862 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
1863
1864 /* If the operands are floating-point constants, see if we can fold
1865 the result. */
1866 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1867 else if (GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
1868 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
1869 {
1870 struct cfc_args args;
1871
1872 /* Setup input for check_fold_consts() */
1873 args.op0 = op0;
1874 args.op1 = op1;
1875
1876
1877 if (!do_float_handler (check_fold_consts, (PTR) &args))
1878 args.unordered = 1;
1879
1880 if (args.unordered)
1881 switch (code)
1882 {
1883 case UNEQ:
1884 case UNLT:
1885 case UNGT:
1886 case UNLE:
1887 case UNGE:
1888 case NE:
1889 case UNORDERED:
1890 return const_true_rtx;
1891 case EQ:
1892 case LT:
1893 case GT:
1894 case LE:
1895 case GE:
1896 case LTGT:
1897 case ORDERED:
1898 return const0_rtx;
1899 default:
1900 return 0;
1901 }
1902
1903 /* Receive output from check_fold_consts() */
1904 equal = args.equal;
1905 op0lt = op0ltu = args.op0lt;
1906 op1lt = op1ltu = args.op1lt;
1907 }
1908 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1909
1910 /* Otherwise, see if the operands are both integers. */
1911 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
1912 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
1913 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
1914 {
1915 int width = GET_MODE_BITSIZE (mode);
1916 HOST_WIDE_INT l0s, h0s, l1s, h1s;
1917 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
1918
1919 /* Get the two words comprising each integer constant. */
1920 if (GET_CODE (op0) == CONST_DOUBLE)
1921 {
1922 l0u = l0s = CONST_DOUBLE_LOW (op0);
1923 h0u = h0s = CONST_DOUBLE_HIGH (op0);
1924 }
1925 else
1926 {
1927 l0u = l0s = INTVAL (op0);
1928 h0u = h0s = HWI_SIGN_EXTEND (l0s);
1929 }
1930
1931 if (GET_CODE (op1) == CONST_DOUBLE)
1932 {
1933 l1u = l1s = CONST_DOUBLE_LOW (op1);
1934 h1u = h1s = CONST_DOUBLE_HIGH (op1);
1935 }
1936 else
1937 {
1938 l1u = l1s = INTVAL (op1);
1939 h1u = h1s = HWI_SIGN_EXTEND (l1s);
1940 }
1941
1942 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
1943 we have to sign or zero-extend the values. */
1944 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
1945 {
1946 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
1947 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
1948
1949 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1950 l0s |= ((HOST_WIDE_INT) (-1) << width);
1951
1952 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1953 l1s |= ((HOST_WIDE_INT) (-1) << width);
1954 }
1955 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
1956 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
1957
1958 equal = (h0u == h1u && l0u == l1u);
1959 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
1960 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
1961 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
1962 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
1963 }
1964
1965 /* Otherwise, there are some code-specific tests we can make. */
1966 else
1967 {
1968 switch (code)
1969 {
1970 case EQ:
1971 /* References to the frame plus a constant or labels cannot
1972 be zero, but a SYMBOL_REF can due to #pragma weak. */
1973 if (((NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx)
1974 || GET_CODE (op0) == LABEL_REF)
1975 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1976 /* On some machines, the ap reg can be 0 sometimes. */
1977 && op0 != arg_pointer_rtx
1978 #endif
1979 )
1980 return const0_rtx;
1981 break;
1982
1983 case NE:
1984 if (((NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx)
1985 || GET_CODE (op0) == LABEL_REF)
1986 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1987 && op0 != arg_pointer_rtx
1988 #endif
1989 )
1990 return const_true_rtx;
1991 break;
1992
1993 case GEU:
1994 /* Unsigned values are never negative. */
1995 if (op1 == const0_rtx)
1996 return const_true_rtx;
1997 break;
1998
1999 case LTU:
2000 if (op1 == const0_rtx)
2001 return const0_rtx;
2002 break;
2003
2004 case LEU:
2005 /* Unsigned values are never greater than the largest
2006 unsigned value. */
2007 if (GET_CODE (op1) == CONST_INT
2008 && (unsigned HOST_WIDE_INT) INTVAL (op1) == GET_MODE_MASK (mode)
2009 && INTEGRAL_MODE_P (mode))
2010 return const_true_rtx;
2011 break;
2012
2013 case GTU:
2014 if (GET_CODE (op1) == CONST_INT
2015 && (unsigned HOST_WIDE_INT) INTVAL (op1) == GET_MODE_MASK (mode)
2016 && INTEGRAL_MODE_P (mode))
2017 return const0_rtx;
2018 break;
2019
2020 default:
2021 break;
2022 }
2023
2024 return 0;
2025 }
2026
2027 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2028 as appropriate. */
2029 switch (code)
2030 {
2031 case EQ:
2032 case UNEQ:
2033 return equal ? const_true_rtx : const0_rtx;
2034 case NE:
2035 case LTGT:
2036 return ! equal ? const_true_rtx : const0_rtx;
2037 case LT:
2038 case UNLT:
2039 return op0lt ? const_true_rtx : const0_rtx;
2040 case GT:
2041 case UNGT:
2042 return op1lt ? const_true_rtx : const0_rtx;
2043 case LTU:
2044 return op0ltu ? const_true_rtx : const0_rtx;
2045 case GTU:
2046 return op1ltu ? const_true_rtx : const0_rtx;
2047 case LE:
2048 case UNLE:
2049 return equal || op0lt ? const_true_rtx : const0_rtx;
2050 case GE:
2051 case UNGE:
2052 return equal || op1lt ? const_true_rtx : const0_rtx;
2053 case LEU:
2054 return equal || op0ltu ? const_true_rtx : const0_rtx;
2055 case GEU:
2056 return equal || op1ltu ? const_true_rtx : const0_rtx;
2057 case ORDERED:
2058 return const_true_rtx;
2059 case UNORDERED:
2060 return const0_rtx;
2061 default:
2062 abort ();
2063 }
2064 }
2065 \f
2066 /* Simplify CODE, an operation with result mode MODE and three operands,
2067 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2068 a constant. Return 0 if no simplifications is possible. */
2069
2070 rtx
2071 simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
2072 enum rtx_code code;
2073 enum machine_mode mode, op0_mode;
2074 rtx op0, op1, op2;
2075 {
2076 unsigned int width = GET_MODE_BITSIZE (mode);
2077
2078 /* VOIDmode means "infinite" precision. */
2079 if (width == 0)
2080 width = HOST_BITS_PER_WIDE_INT;
2081
2082 switch (code)
2083 {
2084 case SIGN_EXTRACT:
2085 case ZERO_EXTRACT:
2086 if (GET_CODE (op0) == CONST_INT
2087 && GET_CODE (op1) == CONST_INT
2088 && GET_CODE (op2) == CONST_INT
2089 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2090 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2091 {
2092 /* Extracting a bit-field from a constant */
2093 HOST_WIDE_INT val = INTVAL (op0);
2094
2095 if (BITS_BIG_ENDIAN)
2096 val >>= (GET_MODE_BITSIZE (op0_mode)
2097 - INTVAL (op2) - INTVAL (op1));
2098 else
2099 val >>= INTVAL (op2);
2100
2101 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2102 {
2103 /* First zero-extend. */
2104 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2105 /* If desired, propagate sign bit. */
2106 if (code == SIGN_EXTRACT
2107 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2108 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2109 }
2110
2111 /* Clear the bits that don't belong in our mode,
2112 unless they and our sign bit are all one.
2113 So we get either a reasonable negative value or a reasonable
2114 unsigned value for this mode. */
2115 if (width < HOST_BITS_PER_WIDE_INT
2116 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2117 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2118 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2119
2120 return GEN_INT (val);
2121 }
2122 break;
2123
2124 case IF_THEN_ELSE:
2125 if (GET_CODE (op0) == CONST_INT)
2126 return op0 != const0_rtx ? op1 : op2;
2127
2128 /* Convert a == b ? b : a to "a". */
2129 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
2130 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
2131 && rtx_equal_p (XEXP (op0, 0), op1)
2132 && rtx_equal_p (XEXP (op0, 1), op2))
2133 return op1;
2134 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
2135 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
2136 && rtx_equal_p (XEXP (op0, 1), op1)
2137 && rtx_equal_p (XEXP (op0, 0), op2))
2138 return op2;
2139 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2140 {
2141 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2142 ? GET_MODE (XEXP (op0, 1))
2143 : GET_MODE (XEXP (op0, 0)));
2144 rtx temp;
2145 if (cmp_mode == VOIDmode)
2146 cmp_mode = op0_mode;
2147 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2148 XEXP (op0, 0), XEXP (op0, 1));
2149
2150 /* See if any simplifications were possible. */
2151 if (temp == const0_rtx)
2152 return op2;
2153 else if (temp == const1_rtx)
2154 return op1;
2155 else if (temp)
2156 op0 = temp;
2157
2158 /* Look for happy constants in op1 and op2. */
2159 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2160 {
2161 HOST_WIDE_INT t = INTVAL (op1);
2162 HOST_WIDE_INT f = INTVAL (op2);
2163
2164 if (t == STORE_FLAG_VALUE && f == 0)
2165 code = GET_CODE (op0);
2166 else if (t == 0 && f == STORE_FLAG_VALUE)
2167 {
2168 enum rtx_code tmp;
2169 tmp = reversed_comparison_code (op0, NULL_RTX);
2170 if (tmp == UNKNOWN)
2171 break;
2172 code = tmp;
2173 }
2174 else
2175 break;
2176
2177 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2178 }
2179 }
2180 break;
2181
2182 default:
2183 abort ();
2184 }
2185
2186 return 0;
2187 }
2188
2189 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2190 Return 0 if no simplifications is possible. */
2191 rtx
2192 simplify_subreg (outermode, op, innermode, byte)
2193 rtx op;
2194 unsigned int byte;
2195 enum machine_mode outermode, innermode;
2196 {
2197 /* Little bit of sanity checking. */
2198 if (innermode == VOIDmode || outermode == VOIDmode
2199 || innermode == BLKmode || outermode == BLKmode)
2200 abort ();
2201
2202 if (GET_MODE (op) != innermode
2203 && GET_MODE (op) != VOIDmode)
2204 abort ();
2205
2206 if (byte % GET_MODE_SIZE (outermode)
2207 || byte >= GET_MODE_SIZE (innermode))
2208 abort ();
2209
2210 /* Attempt to simplify constant to non-SUBREG expression. */
2211 if (CONSTANT_P (op))
2212 {
2213 int offset, part;
2214 unsigned HOST_WIDE_INT val;
2215
2216 /* ??? This code is partly redundant with code bellow, but can handle
2217 the subregs of floats and similar corner cases.
2218 Later it we should move all simplification code here and rewrite
2219 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2220 using SIMPLIFY_SUBREG. */
2221 if (subreg_lowpart_parts_p (outermode, innermode, byte))
2222 {
2223 rtx new = gen_lowpart_if_possible (outermode, op);
2224 if (new)
2225 return new;
2226 }
2227
2228 /* Similar comment as above apply here. */
2229 if (GET_MODE_SIZE (outermode) == UNITS_PER_WORD
2230 && GET_MODE_SIZE (innermode) > UNITS_PER_WORD
2231 && GET_MODE_CLASS (outermode) == MODE_INT)
2232 {
2233 rtx new = operand_subword (op,
2234 (byte / UNITS_PER_WORD),
2235 0, innermode);
2236 if (new)
2237 return new;
2238 }
2239
2240 offset = byte * BITS_PER_UNIT;
2241 switch (GET_CODE (op))
2242 {
2243 case CONST_DOUBLE:
2244 if (GET_MODE (op) != VOIDmode)
2245 break;
2246
2247 /* We can't handle this case yet. */
2248 if (GET_MODE_BITSIZE (outermode) >= HOST_BITS_PER_WIDE_INT)
2249 return NULL;
2250
2251 part = offset >= HOST_BITS_PER_WIDE_INT;
2252 if ((BITS_PER_WORD > HOST_BITS_PER_WIDE_INT
2253 && BYTES_BIG_ENDIAN)
2254 || (BITS_PER_WORD <= HOST_BITS_PER_WIDE_INT
2255 && WORDS_BIG_ENDIAN))
2256 part = !part;
2257 val = part ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op);
2258 offset %= HOST_BITS_PER_WIDE_INT;
2259
2260 /* We've already picked the word we want from a double, so
2261 pretend this is actually an integer. */
2262 innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
2263
2264 /* FALLTHROUGH */
2265 case CONST_INT:
2266 if (GET_CODE (op) == CONST_INT)
2267 val = INTVAL (op);
2268
2269 /* We don't handle synthetizing of non-integral constants yet. */
2270 if (GET_MODE_CLASS (outermode) != MODE_INT)
2271 return NULL;
2272
2273 if (BYTES_BIG_ENDIAN || WORDS_BIG_ENDIAN)
2274 {
2275 if (WORDS_BIG_ENDIAN)
2276 offset = (GET_MODE_BITSIZE (innermode)
2277 - GET_MODE_BITSIZE (outermode) - offset);
2278 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN
2279 && GET_MODE_SIZE (outermode) < UNITS_PER_WORD)
2280 offset = (offset + BITS_PER_WORD - GET_MODE_BITSIZE (outermode)
2281 - 2 * (offset % BITS_PER_WORD));
2282 }
2283
2284 if (offset >= HOST_BITS_PER_WIDE_INT)
2285 return ((HOST_WIDE_INT) val < 0) ? constm1_rtx : const0_rtx;
2286 else
2287 {
2288 val >>= offset;
2289 if (GET_MODE_BITSIZE (outermode) < HOST_BITS_PER_WIDE_INT)
2290 val = trunc_int_for_mode (val, outermode);
2291 return GEN_INT (val);
2292 }
2293 default:
2294 break;
2295 }
2296 }
2297
2298 /* Changing mode twice with SUBREG => just change it once,
2299 or not at all if changing back op starting mode. */
2300 if (GET_CODE (op) == SUBREG)
2301 {
2302 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
2303 unsigned int final_offset = byte + SUBREG_BYTE (op);
2304 rtx new;
2305
2306 if (outermode == innermostmode
2307 && byte == 0 && SUBREG_BYTE (op) == 0)
2308 return SUBREG_REG (op);
2309
2310 if ((WORDS_BIG_ENDIAN || BYTES_BIG_ENDIAN)
2311 && GET_MODE_SIZE (innermode) > GET_MODE_SIZE (outermode)
2312 && GET_MODE_SIZE (innermode) > GET_MODE_SIZE (innermostmode))
2313 {
2314 /* Inner SUBREG is paradoxical, outer is not. On big endian
2315 we have to special case this. */
2316 if (SUBREG_BYTE (op))
2317 abort(); /* Can a paradoxical subreg have nonzero offset? */
2318 if (WORDS_BIG_ENDIAN && BYTES_BIG_ENDIAN)
2319 final_offset = (byte - GET_MODE_SIZE (innermode)
2320 + GET_MODE_SIZE (innermostmode));
2321 else if (WORDS_BIG_ENDIAN)
2322 final_offset = ((final_offset % UNITS_PER_WORD)
2323 + ((byte - GET_MODE_SIZE (innermode)
2324 + GET_MODE_SIZE (innermostmode))
2325 * UNITS_PER_WORD) / UNITS_PER_WORD);
2326 else
2327 final_offset = (((final_offset * UNITS_PER_WORD)
2328 / UNITS_PER_WORD)
2329 + ((byte - GET_MODE_SIZE (innermode)
2330 + GET_MODE_SIZE (innermostmode))
2331 % UNITS_PER_WORD));
2332 }
2333
2334 /* Recurse for futher possible simplifications. */
2335 new = simplify_subreg (outermode, op, GET_MODE (op),
2336 final_offset);
2337 if (new)
2338 return new;
2339 return gen_rtx_SUBREG (outermode, op, final_offset);
2340 }
2341
2342 /* SUBREG of a hard register => just change the register number
2343 and/or mode. If the hard register is not valid in that mode,
2344 suppress this simplification. If the hard register is the stack,
2345 frame, or argument pointer, leave this as a SUBREG. */
2346
2347 if (REG_P (op) == REG
2348 && REGNO (op) < FIRST_PSEUDO_REGISTER
2349 && REGNO (op) != FRAME_POINTER_REGNUM
2350 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2351 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
2352 #endif
2353 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2354 && REGNO (op) != ARG_POINTER_REGNUM
2355 #endif
2356 && REGNO (op) != STACK_POINTER_REGNUM)
2357 {
2358 int final_regno = subreg_hard_regno (gen_rtx_SUBREG (outermode, op, byte),
2359 0);
2360
2361 if (HARD_REGNO_MODE_OK (final_regno, outermode))
2362 return gen_rtx_REG (outermode, final_regno);
2363 }
2364
2365 /* If we have a SUBREG of a register that we are replacing and we are
2366 replacing it with a MEM, make a new MEM and try replacing the
2367 SUBREG with it. Don't do this if the MEM has a mode-dependent address
2368 or if we would be widening it. */
2369
2370 if (GET_CODE (op) == MEM
2371 && ! mode_dependent_address_p (XEXP (op, 0))
2372 && ! MEM_VOLATILE_P (op)
2373 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
2374 {
2375 rtx new;
2376
2377 new = gen_rtx_MEM (outermode, plus_constant (XEXP (op, 0), byte));
2378 MEM_COPY_ATTRIBUTES (new, op);
2379 return new;
2380 }
2381 return NULL_RTX;
2382 }
2383 /* Simplify X, an rtx expression.
2384
2385 Return the simplified expression or NULL if no simplifications
2386 were possible.
2387
2388 This is the preferred entry point into the simplification routines;
2389 however, we still allow passes to call the more specific routines.
2390
2391 Right now GCC has three (yes, three) major bodies of RTL simplficiation
2392 code that need to be unified.
2393
2394 1. fold_rtx in cse.c. This code uses various CSE specific
2395 information to aid in RTL simplification.
2396
2397 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
2398 it uses combine specific information to aid in RTL
2399 simplification.
2400
2401 3. The routines in this file.
2402
2403
2404 Long term we want to only have one body of simplification code; to
2405 get to that state I recommend the following steps:
2406
2407 1. Pour over fold_rtx & simplify_rtx and move any simplifications
2408 which are not pass dependent state into these routines.
2409
2410 2. As code is moved by #1, change fold_rtx & simplify_rtx to
2411 use this routine whenever possible.
2412
2413 3. Allow for pass dependent state to be provided to these
2414 routines and add simplifications based on the pass dependent
2415 state. Remove code from cse.c & combine.c that becomes
2416 redundant/dead.
2417
2418 It will take time, but ultimately the compiler will be easier to
2419 maintain and improve. It's totally silly that when we add a
2420 simplification that it needs to be added to 4 places (3 for RTL
2421 simplification and 1 for tree simplification. */
2422
2423 rtx
2424 simplify_rtx (x)
2425 rtx x;
2426 {
2427 enum rtx_code code = GET_CODE (x);
2428 enum machine_mode mode = GET_MODE (x);
2429
2430 switch (GET_RTX_CLASS (code))
2431 {
2432 case '1':
2433 return simplify_unary_operation (code, mode,
2434 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
2435 case '2':
2436 case 'c':
2437 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
2438
2439 case '3':
2440 case 'b':
2441 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
2442 XEXP (x, 0), XEXP (x, 1),
2443 XEXP (x, 2));
2444
2445 case '<':
2446 return simplify_relational_operation (code,
2447 ((GET_MODE (XEXP (x, 0))
2448 != VOIDmode)
2449 ? GET_MODE (XEXP (x, 0))
2450 : GET_MODE (XEXP (x, 1))),
2451 XEXP (x, 0), XEXP (x, 1));
2452 default:
2453 return NULL;
2454 }
2455 }
This page took 0.1627 seconds and 6 git commands to generate.