]> gcc.gnu.org Git - gcc.git/blob - gcc/simplify-rtx.c
combine.c, [...]: consistently use "VAX", "VAXen", and "MicroVAX" in comments and...
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001 Free Software Foundation, Inc.
4
5 This file is part of GNU CC.
6
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
11
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include <setjmp.h>
26
27 #include "rtl.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "flags.h"
32 #include "real.h"
33 #include "insn-config.h"
34 #include "recog.h"
35 #include "function.h"
36 #include "expr.h"
37 #include "toplev.h"
38 #include "output.h"
39 #include "ggc.h"
40
41 /* Simplification and canonicalization of RTL. */
42
43 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
44 virtual regs here because the simplify_*_operation routines are called
45 by integrate.c, which is called before virtual register instantiation.
46
47 ?!? FIXED_BASE_PLUS_P and NONZERO_BASE_PLUS_P need to move into
48 a header file so that their definitions can be shared with the
49 simplification routines in simplify-rtx.c. Until then, do not
50 change these macros without also changing the copy in simplify-rtx.c. */
51
52 #define FIXED_BASE_PLUS_P(X) \
53 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
54 || ((X) == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])\
55 || (X) == virtual_stack_vars_rtx \
56 || (X) == virtual_incoming_args_rtx \
57 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
58 && (XEXP (X, 0) == frame_pointer_rtx \
59 || XEXP (X, 0) == hard_frame_pointer_rtx \
60 || ((X) == arg_pointer_rtx \
61 && fixed_regs[ARG_POINTER_REGNUM]) \
62 || XEXP (X, 0) == virtual_stack_vars_rtx \
63 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
64 || GET_CODE (X) == ADDRESSOF)
65
66 /* Similar, but also allows reference to the stack pointer.
67
68 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
69 arg_pointer_rtx by itself is nonzero, because on at least one machine,
70 the i960, the arg pointer is zero when it is unused. */
71
72 #define NONZERO_BASE_PLUS_P(X) \
73 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
74 || (X) == virtual_stack_vars_rtx \
75 || (X) == virtual_incoming_args_rtx \
76 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
77 && (XEXP (X, 0) == frame_pointer_rtx \
78 || XEXP (X, 0) == hard_frame_pointer_rtx \
79 || ((X) == arg_pointer_rtx \
80 && fixed_regs[ARG_POINTER_REGNUM]) \
81 || XEXP (X, 0) == virtual_stack_vars_rtx \
82 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
83 || (X) == stack_pointer_rtx \
84 || (X) == virtual_stack_dynamic_rtx \
85 || (X) == virtual_outgoing_args_rtx \
86 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
87 && (XEXP (X, 0) == stack_pointer_rtx \
88 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
89 || XEXP (X, 0) == virtual_outgoing_args_rtx)) \
90 || GET_CODE (X) == ADDRESSOF)
91
92 /* Much code operates on (low, high) pairs; the low value is an
93 unsigned wide int, the high value a signed wide int. We
94 occasionally need to sign extend from low to high as if low were a
95 signed wide int. */
96 #define HWI_SIGN_EXTEND(low) \
97 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
98
99 static rtx simplify_plus_minus PARAMS ((enum rtx_code,
100 enum machine_mode, rtx, rtx));
101 static void check_fold_consts PARAMS ((PTR));
102 \f
103 /* Make a binary operation by properly ordering the operands and
104 seeing if the expression folds. */
105
106 rtx
107 simplify_gen_binary (code, mode, op0, op1)
108 enum rtx_code code;
109 enum machine_mode mode;
110 rtx op0, op1;
111 {
112 rtx tem;
113
114 /* Put complex operands first and constants second if commutative. */
115 if (GET_RTX_CLASS (code) == 'c'
116 && swap_commutative_operands_p (op0, op1))
117 tem = op0, op0 = op1, op1 = tem;
118
119 /* If this simplifies, do it. */
120 tem = simplify_binary_operation (code, mode, op0, op1);
121
122 if (tem)
123 return tem;
124
125 /* Handle addition and subtraction of CONST_INT specially. Otherwise,
126 just form the operation. */
127
128 if (code == PLUS && GET_CODE (op1) == CONST_INT
129 && GET_MODE (op0) != VOIDmode)
130 return plus_constant (op0, INTVAL (op1));
131 else if (code == MINUS && GET_CODE (op1) == CONST_INT
132 && GET_MODE (op0) != VOIDmode)
133 return plus_constant (op0, - INTVAL (op1));
134 else
135 return gen_rtx_fmt_ee (code, mode, op0, op1);
136 }
137 \f
138 /* If X is a MEM referencing the constant pool, return the real value.
139 Otherwise return X. */
140 rtx
141 avoid_constant_pool_reference (x)
142 rtx x;
143 {
144 rtx c, addr;
145 enum machine_mode cmode;
146
147 if (GET_CODE (x) != MEM)
148 return x;
149 addr = XEXP (x, 0);
150
151 if (GET_CODE (addr) != SYMBOL_REF
152 || ! CONSTANT_POOL_ADDRESS_P (addr))
153 return x;
154
155 c = get_pool_constant (addr);
156 cmode = get_pool_mode (addr);
157
158 /* If we're accessing the constant in a different mode than it was
159 originally stored, attempt to fix that up via subreg simplifications.
160 If that fails we have no choice but to return the original memory. */
161 if (cmode != GET_MODE (x))
162 {
163 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
164 return c ? c : x;
165 }
166
167 return c;
168 }
169 \f
170 /* Make a unary operation by first seeing if it folds and otherwise making
171 the specified operation. */
172
173 rtx
174 simplify_gen_unary (code, mode, op, op_mode)
175 enum rtx_code code;
176 enum machine_mode mode;
177 rtx op;
178 enum machine_mode op_mode;
179 {
180 rtx tem;
181
182 /* If this simplifies, use it. */
183 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
184 return tem;
185
186 return gen_rtx_fmt_e (code, mode, op);
187 }
188
189 /* Likewise for ternary operations. */
190
191 rtx
192 simplify_gen_ternary (code, mode, op0_mode, op0, op1, op2)
193 enum rtx_code code;
194 enum machine_mode mode, op0_mode;
195 rtx op0, op1, op2;
196 {
197 rtx tem;
198
199 /* If this simplifies, use it. */
200 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
201 op0, op1, op2)))
202 return tem;
203
204 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
205 }
206 \f
207 /* Likewise, for relational operations.
208 CMP_MODE specifies mode comparison is done in.
209 */
210
211 rtx
212 simplify_gen_relational (code, mode, cmp_mode, op0, op1)
213 enum rtx_code code;
214 enum machine_mode mode;
215 enum machine_mode cmp_mode;
216 rtx op0, op1;
217 {
218 rtx tem;
219
220 if ((tem = simplify_relational_operation (code, cmp_mode, op0, op1)) != 0)
221 return tem;
222
223 /* Put complex operands first and constants second. */
224 if (swap_commutative_operands_p (op0, op1))
225 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
226
227 return gen_rtx_fmt_ee (code, mode, op0, op1);
228 }
229 \f
230 /* Replace all occurrences of OLD in X with NEW and try to simplify the
231 resulting RTX. Return a new RTX which is as simplified as possible. */
232
233 rtx
234 simplify_replace_rtx (x, old, new)
235 rtx x;
236 rtx old;
237 rtx new;
238 {
239 enum rtx_code code = GET_CODE (x);
240 enum machine_mode mode = GET_MODE (x);
241
242 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
243 to build a new expression substituting recursively. If we can't do
244 anything, return our input. */
245
246 if (x == old)
247 return new;
248
249 switch (GET_RTX_CLASS (code))
250 {
251 case '1':
252 {
253 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
254 rtx op = (XEXP (x, 0) == old
255 ? new : simplify_replace_rtx (XEXP (x, 0), old, new));
256
257 return simplify_gen_unary (code, mode, op, op_mode);
258 }
259
260 case '2':
261 case 'c':
262 return
263 simplify_gen_binary (code, mode,
264 simplify_replace_rtx (XEXP (x, 0), old, new),
265 simplify_replace_rtx (XEXP (x, 1), old, new));
266 case '<':
267 {
268 enum machine_mode op_mode = (GET_MODE (XEXP (x, 0)) != VOIDmode
269 ? GET_MODE (XEXP (x, 0))
270 : GET_MODE (XEXP (x, 1)));
271 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
272 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
273
274 return
275 simplify_gen_relational (code, mode,
276 (op_mode != VOIDmode
277 ? op_mode
278 : GET_MODE (op0) != VOIDmode
279 ? GET_MODE (op0)
280 : GET_MODE (op1)),
281 op0, op1);
282 }
283
284 case '3':
285 case 'b':
286 {
287 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
288 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
289
290 return
291 simplify_gen_ternary (code, mode,
292 (op_mode != VOIDmode
293 ? op_mode
294 : GET_MODE (op0)),
295 op0,
296 simplify_replace_rtx (XEXP (x, 1), old, new),
297 simplify_replace_rtx (XEXP (x, 2), old, new));
298 }
299
300 case 'x':
301 /* The only case we try to handle is a SUBREG. */
302 if (code == SUBREG)
303 {
304 rtx exp;
305 exp = simplify_gen_subreg (GET_MODE (x),
306 simplify_replace_rtx (SUBREG_REG (x),
307 old, new),
308 GET_MODE (SUBREG_REG (x)),
309 SUBREG_BYTE (x));
310 if (exp)
311 x = exp;
312 }
313 return x;
314
315 default:
316 if (GET_CODE (x) == MEM)
317 return
318 replace_equiv_address_nv (x,
319 simplify_replace_rtx (XEXP (x, 0),
320 old, new));
321
322 return x;
323 }
324 return x;
325 }
326 \f
327 /* Try to simplify a unary operation CODE whose output mode is to be
328 MODE with input operand OP whose mode was originally OP_MODE.
329 Return zero if no simplification can be made. */
330
331 rtx
332 simplify_unary_operation (code, mode, op, op_mode)
333 enum rtx_code code;
334 enum machine_mode mode;
335 rtx op;
336 enum machine_mode op_mode;
337 {
338 unsigned int width = GET_MODE_BITSIZE (mode);
339 rtx trueop = avoid_constant_pool_reference (op);
340
341 /* The order of these tests is critical so that, for example, we don't
342 check the wrong mode (input vs. output) for a conversion operation,
343 such as FIX. At some point, this should be simplified. */
344
345 #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC)
346
347 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
348 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
349 {
350 HOST_WIDE_INT hv, lv;
351 REAL_VALUE_TYPE d;
352
353 if (GET_CODE (trueop) == CONST_INT)
354 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
355 else
356 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
357
358 #ifdef REAL_ARITHMETIC
359 REAL_VALUE_FROM_INT (d, lv, hv, mode);
360 #else
361 if (hv < 0)
362 {
363 d = (double) (~ hv);
364 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
365 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
366 d += (double) (unsigned HOST_WIDE_INT) (~ lv);
367 d = (- d - 1.0);
368 }
369 else
370 {
371 d = (double) hv;
372 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
373 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
374 d += (double) (unsigned HOST_WIDE_INT) lv;
375 }
376 #endif /* REAL_ARITHMETIC */
377 d = real_value_truncate (mode, d);
378 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
379 }
380 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
381 && (GET_CODE (trueop) == CONST_DOUBLE
382 || GET_CODE (trueop) == CONST_INT))
383 {
384 HOST_WIDE_INT hv, lv;
385 REAL_VALUE_TYPE d;
386
387 if (GET_CODE (trueop) == CONST_INT)
388 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
389 else
390 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
391
392 if (op_mode == VOIDmode)
393 {
394 /* We don't know how to interpret negative-looking numbers in
395 this case, so don't try to fold those. */
396 if (hv < 0)
397 return 0;
398 }
399 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
400 ;
401 else
402 hv = 0, lv &= GET_MODE_MASK (op_mode);
403
404 #ifdef REAL_ARITHMETIC
405 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
406 #else
407
408 d = (double) (unsigned HOST_WIDE_INT) hv;
409 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
410 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
411 d += (double) (unsigned HOST_WIDE_INT) lv;
412 #endif /* REAL_ARITHMETIC */
413 d = real_value_truncate (mode, d);
414 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
415 }
416 #endif
417
418 if (GET_CODE (trueop) == CONST_INT
419 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
420 {
421 register HOST_WIDE_INT arg0 = INTVAL (trueop);
422 register HOST_WIDE_INT val;
423
424 switch (code)
425 {
426 case NOT:
427 val = ~ arg0;
428 break;
429
430 case NEG:
431 val = - arg0;
432 break;
433
434 case ABS:
435 val = (arg0 >= 0 ? arg0 : - arg0);
436 break;
437
438 case FFS:
439 /* Don't use ffs here. Instead, get low order bit and then its
440 number. If arg0 is zero, this will return 0, as desired. */
441 arg0 &= GET_MODE_MASK (mode);
442 val = exact_log2 (arg0 & (- arg0)) + 1;
443 break;
444
445 case TRUNCATE:
446 val = arg0;
447 break;
448
449 case ZERO_EXTEND:
450 if (op_mode == VOIDmode)
451 op_mode = mode;
452 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
453 {
454 /* If we were really extending the mode,
455 we would have to distinguish between zero-extension
456 and sign-extension. */
457 if (width != GET_MODE_BITSIZE (op_mode))
458 abort ();
459 val = arg0;
460 }
461 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
462 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
463 else
464 return 0;
465 break;
466
467 case SIGN_EXTEND:
468 if (op_mode == VOIDmode)
469 op_mode = mode;
470 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
471 {
472 /* If we were really extending the mode,
473 we would have to distinguish between zero-extension
474 and sign-extension. */
475 if (width != GET_MODE_BITSIZE (op_mode))
476 abort ();
477 val = arg0;
478 }
479 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
480 {
481 val
482 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
483 if (val
484 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
485 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
486 }
487 else
488 return 0;
489 break;
490
491 case SQRT:
492 case FLOAT_EXTEND:
493 case FLOAT_TRUNCATE:
494 return 0;
495
496 default:
497 abort ();
498 }
499
500 val = trunc_int_for_mode (val, mode);
501
502 return GEN_INT (val);
503 }
504
505 /* We can do some operations on integer CONST_DOUBLEs. Also allow
506 for a DImode operation on a CONST_INT. */
507 else if (GET_MODE (trueop) == VOIDmode && width <= HOST_BITS_PER_INT * 2
508 && (GET_CODE (trueop) == CONST_DOUBLE
509 || GET_CODE (trueop) == CONST_INT))
510 {
511 unsigned HOST_WIDE_INT l1, lv;
512 HOST_WIDE_INT h1, hv;
513
514 if (GET_CODE (trueop) == CONST_DOUBLE)
515 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
516 else
517 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
518
519 switch (code)
520 {
521 case NOT:
522 lv = ~ l1;
523 hv = ~ h1;
524 break;
525
526 case NEG:
527 neg_double (l1, h1, &lv, &hv);
528 break;
529
530 case ABS:
531 if (h1 < 0)
532 neg_double (l1, h1, &lv, &hv);
533 else
534 lv = l1, hv = h1;
535 break;
536
537 case FFS:
538 hv = 0;
539 if (l1 == 0)
540 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & (-h1)) + 1;
541 else
542 lv = exact_log2 (l1 & (-l1)) + 1;
543 break;
544
545 case TRUNCATE:
546 /* This is just a change-of-mode, so do nothing. */
547 lv = l1, hv = h1;
548 break;
549
550 case ZERO_EXTEND:
551 if (op_mode == VOIDmode
552 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
553 return 0;
554
555 hv = 0;
556 lv = l1 & GET_MODE_MASK (op_mode);
557 break;
558
559 case SIGN_EXTEND:
560 if (op_mode == VOIDmode
561 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
562 return 0;
563 else
564 {
565 lv = l1 & GET_MODE_MASK (op_mode);
566 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
567 && (lv & ((HOST_WIDE_INT) 1
568 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
569 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
570
571 hv = HWI_SIGN_EXTEND (lv);
572 }
573 break;
574
575 case SQRT:
576 return 0;
577
578 default:
579 return 0;
580 }
581
582 return immed_double_const (lv, hv, mode);
583 }
584
585 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
586 else if (GET_CODE (trueop) == CONST_DOUBLE
587 && GET_MODE_CLASS (mode) == MODE_FLOAT)
588 {
589 REAL_VALUE_TYPE d;
590 jmp_buf handler;
591 rtx x;
592
593 if (setjmp (handler))
594 /* There used to be a warning here, but that is inadvisable.
595 People may want to cause traps, and the natural way
596 to do it should not get a warning. */
597 return 0;
598
599 set_float_handler (handler);
600
601 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
602
603 switch (code)
604 {
605 case NEG:
606 d = REAL_VALUE_NEGATE (d);
607 break;
608
609 case ABS:
610 if (REAL_VALUE_NEGATIVE (d))
611 d = REAL_VALUE_NEGATE (d);
612 break;
613
614 case FLOAT_TRUNCATE:
615 d = real_value_truncate (mode, d);
616 break;
617
618 case FLOAT_EXTEND:
619 /* All this does is change the mode. */
620 break;
621
622 case FIX:
623 d = REAL_VALUE_RNDZINT (d);
624 break;
625
626 case UNSIGNED_FIX:
627 d = REAL_VALUE_UNSIGNED_RNDZINT (d);
628 break;
629
630 case SQRT:
631 return 0;
632
633 default:
634 abort ();
635 }
636
637 x = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
638 set_float_handler (NULL);
639 return x;
640 }
641
642 else if (GET_CODE (trueop) == CONST_DOUBLE
643 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
644 && GET_MODE_CLASS (mode) == MODE_INT
645 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
646 {
647 REAL_VALUE_TYPE d;
648 jmp_buf handler;
649 HOST_WIDE_INT val;
650
651 if (setjmp (handler))
652 return 0;
653
654 set_float_handler (handler);
655
656 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
657
658 switch (code)
659 {
660 case FIX:
661 val = REAL_VALUE_FIX (d);
662 break;
663
664 case UNSIGNED_FIX:
665 val = REAL_VALUE_UNSIGNED_FIX (d);
666 break;
667
668 default:
669 abort ();
670 }
671
672 set_float_handler (NULL);
673
674 val = trunc_int_for_mode (val, mode);
675
676 return GEN_INT (val);
677 }
678 #endif
679 /* This was formerly used only for non-IEEE float.
680 eggert@twinsun.com says it is safe for IEEE also. */
681 else
682 {
683 enum rtx_code reversed;
684 /* There are some simplifications we can do even if the operands
685 aren't constant. */
686 switch (code)
687 {
688 case NOT:
689 /* (not (not X)) == X. */
690 if (GET_CODE (op) == NOT)
691 return XEXP (op, 0);
692
693 /* (not (eq X Y)) == (ne X Y), etc. */
694 if (mode == BImode && GET_RTX_CLASS (GET_CODE (op)) == '<'
695 && ((reversed = reversed_comparison_code (op, NULL_RTX))
696 != UNKNOWN))
697 return gen_rtx_fmt_ee (reversed,
698 op_mode, XEXP (op, 0), XEXP (op, 1));
699 break;
700
701 case NEG:
702 /* (neg (neg X)) == X. */
703 if (GET_CODE (op) == NEG)
704 return XEXP (op, 0);
705 break;
706
707 case SIGN_EXTEND:
708 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
709 becomes just the MINUS if its mode is MODE. This allows
710 folding switch statements on machines using casesi (such as
711 the VAX). */
712 if (GET_CODE (op) == TRUNCATE
713 && GET_MODE (XEXP (op, 0)) == mode
714 && GET_CODE (XEXP (op, 0)) == MINUS
715 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
716 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
717 return XEXP (op, 0);
718
719 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
720 if (! POINTERS_EXTEND_UNSIGNED
721 && mode == Pmode && GET_MODE (op) == ptr_mode
722 && (CONSTANT_P (op)
723 || (GET_CODE (op) == SUBREG
724 && GET_CODE (SUBREG_REG (op)) == REG
725 && REG_POINTER (SUBREG_REG (op))
726 && GET_MODE (SUBREG_REG (op)) == Pmode)))
727 return convert_memory_address (Pmode, op);
728 #endif
729 break;
730
731 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
732 case ZERO_EXTEND:
733 if (POINTERS_EXTEND_UNSIGNED > 0
734 && mode == Pmode && GET_MODE (op) == ptr_mode
735 && (CONSTANT_P (op)
736 || (GET_CODE (op) == SUBREG
737 && GET_CODE (SUBREG_REG (op)) == REG
738 && REG_POINTER (SUBREG_REG (op))
739 && GET_MODE (SUBREG_REG (op)) == Pmode)))
740 return convert_memory_address (Pmode, op);
741 break;
742 #endif
743
744 default:
745 break;
746 }
747
748 return 0;
749 }
750 }
751 \f
752 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
753 and OP1. Return 0 if no simplification is possible.
754
755 Don't use this for relational operations such as EQ or LT.
756 Use simplify_relational_operation instead. */
757
758 rtx
759 simplify_binary_operation (code, mode, op0, op1)
760 enum rtx_code code;
761 enum machine_mode mode;
762 rtx op0, op1;
763 {
764 register HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
765 HOST_WIDE_INT val;
766 unsigned int width = GET_MODE_BITSIZE (mode);
767 rtx tem;
768 rtx trueop0 = avoid_constant_pool_reference (op0);
769 rtx trueop1 = avoid_constant_pool_reference (op1);
770
771 /* Relational operations don't work here. We must know the mode
772 of the operands in order to do the comparison correctly.
773 Assuming a full word can give incorrect results.
774 Consider comparing 128 with -128 in QImode. */
775
776 if (GET_RTX_CLASS (code) == '<')
777 abort ();
778
779 /* Make sure the constant is second. */
780 if (GET_RTX_CLASS (code) == 'c'
781 && swap_commutative_operands_p (trueop0, trueop1))
782 {
783 tem = op0, op0 = op1, op1 = tem;
784 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
785 }
786
787 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
788 if (GET_MODE_CLASS (mode) == MODE_FLOAT
789 && GET_CODE (trueop0) == CONST_DOUBLE
790 && GET_CODE (trueop1) == CONST_DOUBLE
791 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
792 {
793 REAL_VALUE_TYPE f0, f1, value;
794 jmp_buf handler;
795
796 if (setjmp (handler))
797 return 0;
798
799 set_float_handler (handler);
800
801 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
802 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
803 f0 = real_value_truncate (mode, f0);
804 f1 = real_value_truncate (mode, f1);
805
806 #ifdef REAL_ARITHMETIC
807 #ifndef REAL_INFINITY
808 if (code == DIV && REAL_VALUES_EQUAL (f1, dconst0))
809 return 0;
810 #endif
811 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
812 #else
813 switch (code)
814 {
815 case PLUS:
816 value = f0 + f1;
817 break;
818 case MINUS:
819 value = f0 - f1;
820 break;
821 case MULT:
822 value = f0 * f1;
823 break;
824 case DIV:
825 #ifndef REAL_INFINITY
826 if (f1 == 0)
827 return 0;
828 #endif
829 value = f0 / f1;
830 break;
831 case SMIN:
832 value = MIN (f0, f1);
833 break;
834 case SMAX:
835 value = MAX (f0, f1);
836 break;
837 default:
838 abort ();
839 }
840 #endif
841
842 value = real_value_truncate (mode, value);
843 set_float_handler (NULL);
844 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
845 }
846 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
847
848 /* We can fold some multi-word operations. */
849 if (GET_MODE_CLASS (mode) == MODE_INT
850 && width == HOST_BITS_PER_WIDE_INT * 2
851 && (GET_CODE (trueop0) == CONST_DOUBLE
852 || GET_CODE (trueop0) == CONST_INT)
853 && (GET_CODE (trueop1) == CONST_DOUBLE
854 || GET_CODE (trueop1) == CONST_INT))
855 {
856 unsigned HOST_WIDE_INT l1, l2, lv;
857 HOST_WIDE_INT h1, h2, hv;
858
859 if (GET_CODE (trueop0) == CONST_DOUBLE)
860 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
861 else
862 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
863
864 if (GET_CODE (trueop1) == CONST_DOUBLE)
865 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
866 else
867 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
868
869 switch (code)
870 {
871 case MINUS:
872 /* A - B == A + (-B). */
873 neg_double (l2, h2, &lv, &hv);
874 l2 = lv, h2 = hv;
875
876 /* .. fall through ... */
877
878 case PLUS:
879 add_double (l1, h1, l2, h2, &lv, &hv);
880 break;
881
882 case MULT:
883 mul_double (l1, h1, l2, h2, &lv, &hv);
884 break;
885
886 case DIV: case MOD: case UDIV: case UMOD:
887 /* We'd need to include tree.h to do this and it doesn't seem worth
888 it. */
889 return 0;
890
891 case AND:
892 lv = l1 & l2, hv = h1 & h2;
893 break;
894
895 case IOR:
896 lv = l1 | l2, hv = h1 | h2;
897 break;
898
899 case XOR:
900 lv = l1 ^ l2, hv = h1 ^ h2;
901 break;
902
903 case SMIN:
904 if (h1 < h2
905 || (h1 == h2
906 && ((unsigned HOST_WIDE_INT) l1
907 < (unsigned HOST_WIDE_INT) l2)))
908 lv = l1, hv = h1;
909 else
910 lv = l2, hv = h2;
911 break;
912
913 case SMAX:
914 if (h1 > h2
915 || (h1 == h2
916 && ((unsigned HOST_WIDE_INT) l1
917 > (unsigned HOST_WIDE_INT) l2)))
918 lv = l1, hv = h1;
919 else
920 lv = l2, hv = h2;
921 break;
922
923 case UMIN:
924 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
925 || (h1 == h2
926 && ((unsigned HOST_WIDE_INT) l1
927 < (unsigned HOST_WIDE_INT) l2)))
928 lv = l1, hv = h1;
929 else
930 lv = l2, hv = h2;
931 break;
932
933 case UMAX:
934 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
935 || (h1 == h2
936 && ((unsigned HOST_WIDE_INT) l1
937 > (unsigned HOST_WIDE_INT) l2)))
938 lv = l1, hv = h1;
939 else
940 lv = l2, hv = h2;
941 break;
942
943 case LSHIFTRT: case ASHIFTRT:
944 case ASHIFT:
945 case ROTATE: case ROTATERT:
946 #ifdef SHIFT_COUNT_TRUNCATED
947 if (SHIFT_COUNT_TRUNCATED)
948 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
949 #endif
950
951 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
952 return 0;
953
954 if (code == LSHIFTRT || code == ASHIFTRT)
955 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
956 code == ASHIFTRT);
957 else if (code == ASHIFT)
958 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
959 else if (code == ROTATE)
960 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
961 else /* code == ROTATERT */
962 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
963 break;
964
965 default:
966 return 0;
967 }
968
969 return immed_double_const (lv, hv, mode);
970 }
971
972 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
973 || width > HOST_BITS_PER_WIDE_INT || width == 0)
974 {
975 /* Even if we can't compute a constant result,
976 there are some cases worth simplifying. */
977
978 switch (code)
979 {
980 case PLUS:
981 /* In IEEE floating point, x+0 is not the same as x. Similarly
982 for the other optimizations below. */
983 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
984 && FLOAT_MODE_P (mode) && ! flag_unsafe_math_optimizations)
985 break;
986
987 if (trueop1 == CONST0_RTX (mode))
988 return op0;
989
990 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)) */
991 if (GET_CODE (op0) == NEG)
992 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
993 else if (GET_CODE (op1) == NEG)
994 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
995
996 /* (~a) + 1 -> -a */
997 if (INTEGRAL_MODE_P (mode)
998 && GET_CODE (op0) == NOT
999 && trueop1 == const1_rtx)
1000 return gen_rtx_NEG (mode, XEXP (op0, 0));
1001
1002 /* Handle both-operands-constant cases. We can only add
1003 CONST_INTs to constants since the sum of relocatable symbols
1004 can't be handled by most assemblers. Don't add CONST_INT
1005 to CONST_INT since overflow won't be computed properly if wider
1006 than HOST_BITS_PER_WIDE_INT. */
1007
1008 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1009 && GET_CODE (op1) == CONST_INT)
1010 return plus_constant (op0, INTVAL (op1));
1011 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1012 && GET_CODE (op0) == CONST_INT)
1013 return plus_constant (op1, INTVAL (op0));
1014
1015 /* See if this is something like X * C - X or vice versa or
1016 if the multiplication is written as a shift. If so, we can
1017 distribute and make a new multiply, shift, or maybe just
1018 have X (if C is 2 in the example above). But don't make
1019 real multiply if we didn't have one before. */
1020
1021 if (! FLOAT_MODE_P (mode))
1022 {
1023 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1024 rtx lhs = op0, rhs = op1;
1025 int had_mult = 0;
1026
1027 if (GET_CODE (lhs) == NEG)
1028 coeff0 = -1, lhs = XEXP (lhs, 0);
1029 else if (GET_CODE (lhs) == MULT
1030 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1031 {
1032 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1033 had_mult = 1;
1034 }
1035 else if (GET_CODE (lhs) == ASHIFT
1036 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1037 && INTVAL (XEXP (lhs, 1)) >= 0
1038 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1039 {
1040 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1041 lhs = XEXP (lhs, 0);
1042 }
1043
1044 if (GET_CODE (rhs) == NEG)
1045 coeff1 = -1, rhs = XEXP (rhs, 0);
1046 else if (GET_CODE (rhs) == MULT
1047 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1048 {
1049 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1050 had_mult = 1;
1051 }
1052 else if (GET_CODE (rhs) == ASHIFT
1053 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1054 && INTVAL (XEXP (rhs, 1)) >= 0
1055 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1056 {
1057 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1058 rhs = XEXP (rhs, 0);
1059 }
1060
1061 if (rtx_equal_p (lhs, rhs))
1062 {
1063 tem = simplify_gen_binary (MULT, mode, lhs,
1064 GEN_INT (coeff0 + coeff1));
1065 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1066 }
1067 }
1068
1069 /* If one of the operands is a PLUS or a MINUS, see if we can
1070 simplify this by the associative law.
1071 Don't use the associative law for floating point.
1072 The inaccuracy makes it nonassociative,
1073 and subtle programs can break if operations are associated. */
1074
1075 if (INTEGRAL_MODE_P (mode)
1076 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1077 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
1078 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1079 return tem;
1080 break;
1081
1082 case COMPARE:
1083 #ifdef HAVE_cc0
1084 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1085 using cc0, in which case we want to leave it as a COMPARE
1086 so we can distinguish it from a register-register-copy.
1087
1088 In IEEE floating point, x-0 is not the same as x. */
1089
1090 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1091 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1092 && trueop1 == CONST0_RTX (mode))
1093 return op0;
1094 #endif
1095
1096 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1097 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1098 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1099 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1100 {
1101 rtx xop00 = XEXP (op0, 0);
1102 rtx xop10 = XEXP (op1, 0);
1103
1104 #ifdef HAVE_cc0
1105 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1106 #else
1107 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1108 && GET_MODE (xop00) == GET_MODE (xop10)
1109 && REGNO (xop00) == REGNO (xop10)
1110 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1111 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1112 #endif
1113 return xop00;
1114 }
1115
1116 break;
1117 case MINUS:
1118 /* None of these optimizations can be done for IEEE
1119 floating point. */
1120 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
1121 && FLOAT_MODE_P (mode) && ! flag_unsafe_math_optimizations)
1122 break;
1123
1124 /* We can't assume x-x is 0 even with non-IEEE floating point,
1125 but since it is zero except in very strange circumstances, we
1126 will treat it as zero with -funsafe-math-optimizations. */
1127 if (rtx_equal_p (trueop0, trueop1)
1128 && ! side_effects_p (op0)
1129 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1130 return CONST0_RTX (mode);
1131
1132 /* Change subtraction from zero into negation. */
1133 if (trueop0 == CONST0_RTX (mode))
1134 return gen_rtx_NEG (mode, op1);
1135
1136 /* (-1 - a) is ~a. */
1137 if (trueop0 == constm1_rtx)
1138 return gen_rtx_NOT (mode, op1);
1139
1140 /* Subtracting 0 has no effect. */
1141 if (trueop1 == CONST0_RTX (mode))
1142 return op0;
1143
1144 /* See if this is something like X * C - X or vice versa or
1145 if the multiplication is written as a shift. If so, we can
1146 distribute and make a new multiply, shift, or maybe just
1147 have X (if C is 2 in the example above). But don't make
1148 real multiply if we didn't have one before. */
1149
1150 if (! FLOAT_MODE_P (mode))
1151 {
1152 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1153 rtx lhs = op0, rhs = op1;
1154 int had_mult = 0;
1155
1156 if (GET_CODE (lhs) == NEG)
1157 coeff0 = -1, lhs = XEXP (lhs, 0);
1158 else if (GET_CODE (lhs) == MULT
1159 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1160 {
1161 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1162 had_mult = 1;
1163 }
1164 else if (GET_CODE (lhs) == ASHIFT
1165 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1166 && INTVAL (XEXP (lhs, 1)) >= 0
1167 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1168 {
1169 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1170 lhs = XEXP (lhs, 0);
1171 }
1172
1173 if (GET_CODE (rhs) == NEG)
1174 coeff1 = - 1, rhs = XEXP (rhs, 0);
1175 else if (GET_CODE (rhs) == MULT
1176 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1177 {
1178 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1179 had_mult = 1;
1180 }
1181 else if (GET_CODE (rhs) == ASHIFT
1182 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1183 && INTVAL (XEXP (rhs, 1)) >= 0
1184 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1185 {
1186 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1187 rhs = XEXP (rhs, 0);
1188 }
1189
1190 if (rtx_equal_p (lhs, rhs))
1191 {
1192 tem = simplify_gen_binary (MULT, mode, lhs,
1193 GEN_INT (coeff0 - coeff1));
1194 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1195 }
1196 }
1197
1198 /* (a - (-b)) -> (a + b). */
1199 if (GET_CODE (op1) == NEG)
1200 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1201
1202 /* If one of the operands is a PLUS or a MINUS, see if we can
1203 simplify this by the associative law.
1204 Don't use the associative law for floating point.
1205 The inaccuracy makes it nonassociative,
1206 and subtle programs can break if operations are associated. */
1207
1208 if (INTEGRAL_MODE_P (mode)
1209 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1210 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
1211 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1212 return tem;
1213
1214 /* Don't let a relocatable value get a negative coeff. */
1215 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1216 return plus_constant (op0, - INTVAL (op1));
1217
1218 /* (x - (x & y)) -> (x & ~y) */
1219 if (GET_CODE (op1) == AND)
1220 {
1221 if (rtx_equal_p (op0, XEXP (op1, 0)))
1222 return simplify_gen_binary (AND, mode, op0,
1223 gen_rtx_NOT (mode, XEXP (op1, 1)));
1224 if (rtx_equal_p (op0, XEXP (op1, 1)))
1225 return simplify_gen_binary (AND, mode, op0,
1226 gen_rtx_NOT (mode, XEXP (op1, 0)));
1227 }
1228 break;
1229
1230 case MULT:
1231 if (trueop1 == constm1_rtx)
1232 {
1233 tem = simplify_unary_operation (NEG, mode, op0, mode);
1234
1235 return tem ? tem : gen_rtx_NEG (mode, op0);
1236 }
1237
1238 /* In IEEE floating point, x*0 is not always 0. */
1239 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1240 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1241 && trueop1 == CONST0_RTX (mode)
1242 && ! side_effects_p (op0))
1243 return op1;
1244
1245 /* In IEEE floating point, x*1 is not equivalent to x for nans.
1246 However, ANSI says we can drop signals,
1247 so we can do this anyway. */
1248 if (trueop1 == CONST1_RTX (mode))
1249 return op0;
1250
1251 /* Convert multiply by constant power of two into shift unless
1252 we are still generating RTL. This test is a kludge. */
1253 if (GET_CODE (trueop1) == CONST_INT
1254 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1255 /* If the mode is larger than the host word size, and the
1256 uppermost bit is set, then this isn't a power of two due
1257 to implicit sign extension. */
1258 && (width <= HOST_BITS_PER_WIDE_INT
1259 || val != HOST_BITS_PER_WIDE_INT - 1)
1260 && ! rtx_equal_function_value_matters)
1261 return gen_rtx_ASHIFT (mode, op0, GEN_INT (val));
1262
1263 if (GET_CODE (trueop1) == CONST_DOUBLE
1264 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT)
1265 {
1266 REAL_VALUE_TYPE d;
1267 jmp_buf handler;
1268 int op1is2, op1ism1;
1269
1270 if (setjmp (handler))
1271 return 0;
1272
1273 set_float_handler (handler);
1274 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1275 op1is2 = REAL_VALUES_EQUAL (d, dconst2);
1276 op1ism1 = REAL_VALUES_EQUAL (d, dconstm1);
1277 set_float_handler (NULL);
1278
1279 /* x*2 is x+x and x*(-1) is -x */
1280 if (op1is2 && GET_MODE (op0) == mode)
1281 return gen_rtx_PLUS (mode, op0, copy_rtx (op0));
1282
1283 else if (op1ism1 && GET_MODE (op0) == mode)
1284 return gen_rtx_NEG (mode, op0);
1285 }
1286 break;
1287
1288 case IOR:
1289 if (trueop1 == const0_rtx)
1290 return op0;
1291 if (GET_CODE (trueop1) == CONST_INT
1292 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1293 == GET_MODE_MASK (mode)))
1294 return op1;
1295 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1296 return op0;
1297 /* A | (~A) -> -1 */
1298 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1299 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1300 && ! side_effects_p (op0)
1301 && GET_MODE_CLASS (mode) != MODE_CC)
1302 return constm1_rtx;
1303 break;
1304
1305 case XOR:
1306 if (trueop1 == const0_rtx)
1307 return op0;
1308 if (GET_CODE (trueop1) == CONST_INT
1309 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1310 == GET_MODE_MASK (mode)))
1311 return gen_rtx_NOT (mode, op0);
1312 if (trueop0 == trueop1 && ! side_effects_p (op0)
1313 && GET_MODE_CLASS (mode) != MODE_CC)
1314 return const0_rtx;
1315 break;
1316
1317 case AND:
1318 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1319 return const0_rtx;
1320 if (GET_CODE (trueop1) == CONST_INT
1321 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1322 == GET_MODE_MASK (mode)))
1323 return op0;
1324 if (trueop0 == trueop1 && ! side_effects_p (op0)
1325 && GET_MODE_CLASS (mode) != MODE_CC)
1326 return op0;
1327 /* A & (~A) -> 0 */
1328 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1329 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1330 && ! side_effects_p (op0)
1331 && GET_MODE_CLASS (mode) != MODE_CC)
1332 return const0_rtx;
1333 break;
1334
1335 case UDIV:
1336 /* Convert divide by power of two into shift (divide by 1 handled
1337 below). */
1338 if (GET_CODE (trueop1) == CONST_INT
1339 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1340 return gen_rtx_LSHIFTRT (mode, op0, GEN_INT (arg1));
1341
1342 /* ... fall through ... */
1343
1344 case DIV:
1345 if (trueop1 == CONST1_RTX (mode))
1346 return op0;
1347
1348 /* In IEEE floating point, 0/x is not always 0. */
1349 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1350 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1351 && trueop0 == CONST0_RTX (mode)
1352 && ! side_effects_p (op1))
1353 return op0;
1354
1355 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1356 /* Change division by a constant into multiplication. Only do
1357 this with -funsafe-math-optimizations. */
1358 else if (GET_CODE (trueop1) == CONST_DOUBLE
1359 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1360 && trueop1 != CONST0_RTX (mode)
1361 && flag_unsafe_math_optimizations)
1362 {
1363 REAL_VALUE_TYPE d;
1364 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1365
1366 if (! REAL_VALUES_EQUAL (d, dconst0))
1367 {
1368 #if defined (REAL_ARITHMETIC)
1369 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1370 return gen_rtx_MULT (mode, op0,
1371 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
1372 #else
1373 return
1374 gen_rtx_MULT (mode, op0,
1375 CONST_DOUBLE_FROM_REAL_VALUE (1./d, mode));
1376 #endif
1377 }
1378 }
1379 #endif
1380 break;
1381
1382 case UMOD:
1383 /* Handle modulus by power of two (mod with 1 handled below). */
1384 if (GET_CODE (trueop1) == CONST_INT
1385 && exact_log2 (INTVAL (trueop1)) > 0)
1386 return gen_rtx_AND (mode, op0, GEN_INT (INTVAL (op1) - 1));
1387
1388 /* ... fall through ... */
1389
1390 case MOD:
1391 if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1392 && ! side_effects_p (op0) && ! side_effects_p (op1))
1393 return const0_rtx;
1394 break;
1395
1396 case ROTATERT:
1397 case ROTATE:
1398 /* Rotating ~0 always results in ~0. */
1399 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1400 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1401 && ! side_effects_p (op1))
1402 return op0;
1403
1404 /* ... fall through ... */
1405
1406 case ASHIFT:
1407 case ASHIFTRT:
1408 case LSHIFTRT:
1409 if (trueop1 == const0_rtx)
1410 return op0;
1411 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1412 return op0;
1413 break;
1414
1415 case SMIN:
1416 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1417 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1418 && ! side_effects_p (op0))
1419 return op1;
1420 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1421 return op0;
1422 break;
1423
1424 case SMAX:
1425 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1426 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1427 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1428 && ! side_effects_p (op0))
1429 return op1;
1430 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1431 return op0;
1432 break;
1433
1434 case UMIN:
1435 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1436 return op1;
1437 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1438 return op0;
1439 break;
1440
1441 case UMAX:
1442 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1443 return op1;
1444 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1445 return op0;
1446 break;
1447
1448 default:
1449 abort ();
1450 }
1451
1452 return 0;
1453 }
1454
1455 /* Get the integer argument values in two forms:
1456 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1457
1458 arg0 = INTVAL (trueop0);
1459 arg1 = INTVAL (trueop1);
1460
1461 if (width < HOST_BITS_PER_WIDE_INT)
1462 {
1463 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1464 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1465
1466 arg0s = arg0;
1467 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1468 arg0s |= ((HOST_WIDE_INT) (-1) << width);
1469
1470 arg1s = arg1;
1471 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1472 arg1s |= ((HOST_WIDE_INT) (-1) << width);
1473 }
1474 else
1475 {
1476 arg0s = arg0;
1477 arg1s = arg1;
1478 }
1479
1480 /* Compute the value of the arithmetic. */
1481
1482 switch (code)
1483 {
1484 case PLUS:
1485 val = arg0s + arg1s;
1486 break;
1487
1488 case MINUS:
1489 val = arg0s - arg1s;
1490 break;
1491
1492 case MULT:
1493 val = arg0s * arg1s;
1494 break;
1495
1496 case DIV:
1497 if (arg1s == 0
1498 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1499 && arg1s == -1))
1500 return 0;
1501 val = arg0s / arg1s;
1502 break;
1503
1504 case MOD:
1505 if (arg1s == 0
1506 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1507 && arg1s == -1))
1508 return 0;
1509 val = arg0s % arg1s;
1510 break;
1511
1512 case UDIV:
1513 if (arg1 == 0
1514 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1515 && arg1s == -1))
1516 return 0;
1517 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
1518 break;
1519
1520 case UMOD:
1521 if (arg1 == 0
1522 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1523 && arg1s == -1))
1524 return 0;
1525 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
1526 break;
1527
1528 case AND:
1529 val = arg0 & arg1;
1530 break;
1531
1532 case IOR:
1533 val = arg0 | arg1;
1534 break;
1535
1536 case XOR:
1537 val = arg0 ^ arg1;
1538 break;
1539
1540 case LSHIFTRT:
1541 /* If shift count is undefined, don't fold it; let the machine do
1542 what it wants. But truncate it if the machine will do that. */
1543 if (arg1 < 0)
1544 return 0;
1545
1546 #ifdef SHIFT_COUNT_TRUNCATED
1547 if (SHIFT_COUNT_TRUNCATED)
1548 arg1 %= width;
1549 #endif
1550
1551 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
1552 break;
1553
1554 case ASHIFT:
1555 if (arg1 < 0)
1556 return 0;
1557
1558 #ifdef SHIFT_COUNT_TRUNCATED
1559 if (SHIFT_COUNT_TRUNCATED)
1560 arg1 %= width;
1561 #endif
1562
1563 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
1564 break;
1565
1566 case ASHIFTRT:
1567 if (arg1 < 0)
1568 return 0;
1569
1570 #ifdef SHIFT_COUNT_TRUNCATED
1571 if (SHIFT_COUNT_TRUNCATED)
1572 arg1 %= width;
1573 #endif
1574
1575 val = arg0s >> arg1;
1576
1577 /* Bootstrap compiler may not have sign extended the right shift.
1578 Manually extend the sign to insure bootstrap cc matches gcc. */
1579 if (arg0s < 0 && arg1 > 0)
1580 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
1581
1582 break;
1583
1584 case ROTATERT:
1585 if (arg1 < 0)
1586 return 0;
1587
1588 arg1 %= width;
1589 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
1590 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
1591 break;
1592
1593 case ROTATE:
1594 if (arg1 < 0)
1595 return 0;
1596
1597 arg1 %= width;
1598 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
1599 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
1600 break;
1601
1602 case COMPARE:
1603 /* Do nothing here. */
1604 return 0;
1605
1606 case SMIN:
1607 val = arg0s <= arg1s ? arg0s : arg1s;
1608 break;
1609
1610 case UMIN:
1611 val = ((unsigned HOST_WIDE_INT) arg0
1612 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1613 break;
1614
1615 case SMAX:
1616 val = arg0s > arg1s ? arg0s : arg1s;
1617 break;
1618
1619 case UMAX:
1620 val = ((unsigned HOST_WIDE_INT) arg0
1621 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1622 break;
1623
1624 default:
1625 abort ();
1626 }
1627
1628 val = trunc_int_for_mode (val, mode);
1629
1630 return GEN_INT (val);
1631 }
1632 \f
1633 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1634 PLUS or MINUS.
1635
1636 Rather than test for specific case, we do this by a brute-force method
1637 and do all possible simplifications until no more changes occur. Then
1638 we rebuild the operation. */
1639
1640 static rtx
1641 simplify_plus_minus (code, mode, op0, op1)
1642 enum rtx_code code;
1643 enum machine_mode mode;
1644 rtx op0, op1;
1645 {
1646 rtx ops[8];
1647 int negs[8];
1648 rtx result, tem;
1649 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts = 0;
1650 int first = 1, negate = 0, changed;
1651 int i, j;
1652
1653 memset ((char *) ops, 0, sizeof ops);
1654
1655 /* Set up the two operands and then expand them until nothing has been
1656 changed. If we run out of room in our array, give up; this should
1657 almost never happen. */
1658
1659 ops[0] = op0, ops[1] = op1, negs[0] = 0, negs[1] = (code == MINUS);
1660
1661 changed = 1;
1662 while (changed)
1663 {
1664 changed = 0;
1665
1666 for (i = 0; i < n_ops; i++)
1667 switch (GET_CODE (ops[i]))
1668 {
1669 case PLUS:
1670 case MINUS:
1671 if (n_ops == 7)
1672 return 0;
1673
1674 ops[n_ops] = XEXP (ops[i], 1);
1675 negs[n_ops++] = GET_CODE (ops[i]) == MINUS ? !negs[i] : negs[i];
1676 ops[i] = XEXP (ops[i], 0);
1677 input_ops++;
1678 changed = 1;
1679 break;
1680
1681 case NEG:
1682 ops[i] = XEXP (ops[i], 0);
1683 negs[i] = ! negs[i];
1684 changed = 1;
1685 break;
1686
1687 case CONST:
1688 ops[i] = XEXP (ops[i], 0);
1689 input_consts++;
1690 changed = 1;
1691 break;
1692
1693 case NOT:
1694 /* ~a -> (-a - 1) */
1695 if (n_ops != 7)
1696 {
1697 ops[n_ops] = constm1_rtx;
1698 negs[n_ops++] = negs[i];
1699 ops[i] = XEXP (ops[i], 0);
1700 negs[i] = ! negs[i];
1701 changed = 1;
1702 }
1703 break;
1704
1705 case CONST_INT:
1706 if (negs[i])
1707 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0, changed = 1;
1708 break;
1709
1710 default:
1711 break;
1712 }
1713 }
1714
1715 /* If we only have two operands, we can't do anything. */
1716 if (n_ops <= 2)
1717 return 0;
1718
1719 /* Now simplify each pair of operands until nothing changes. The first
1720 time through just simplify constants against each other. */
1721
1722 changed = 1;
1723 while (changed)
1724 {
1725 changed = first;
1726
1727 for (i = 0; i < n_ops - 1; i++)
1728 for (j = i + 1; j < n_ops; j++)
1729 if (ops[i] != 0 && ops[j] != 0
1730 && (! first || (CONSTANT_P (ops[i]) && CONSTANT_P (ops[j]))))
1731 {
1732 rtx lhs = ops[i], rhs = ops[j];
1733 enum rtx_code ncode = PLUS;
1734
1735 if (negs[i] && ! negs[j])
1736 lhs = ops[j], rhs = ops[i], ncode = MINUS;
1737 else if (! negs[i] && negs[j])
1738 ncode = MINUS;
1739
1740 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
1741 if (tem)
1742 {
1743 ops[i] = tem, ops[j] = 0;
1744 negs[i] = negs[i] && negs[j];
1745 if (GET_CODE (tem) == NEG)
1746 ops[i] = XEXP (tem, 0), negs[i] = ! negs[i];
1747
1748 if (GET_CODE (ops[i]) == CONST_INT && negs[i])
1749 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0;
1750 changed = 1;
1751 }
1752 }
1753
1754 first = 0;
1755 }
1756
1757 /* Pack all the operands to the lower-numbered entries and give up if
1758 we didn't reduce the number of operands we had. Make sure we
1759 count a CONST as two operands. If we have the same number of
1760 operands, but have made more CONSTs than we had, this is also
1761 an improvement, so accept it. */
1762
1763 for (i = 0, j = 0; j < n_ops; j++)
1764 if (ops[j] != 0)
1765 {
1766 ops[i] = ops[j], negs[i++] = negs[j];
1767 if (GET_CODE (ops[j]) == CONST)
1768 n_consts++;
1769 }
1770
1771 if (i + n_consts > input_ops
1772 || (i + n_consts == input_ops && n_consts <= input_consts))
1773 return 0;
1774
1775 n_ops = i;
1776
1777 /* If we have a CONST_INT, put it last. */
1778 for (i = 0; i < n_ops - 1; i++)
1779 if (GET_CODE (ops[i]) == CONST_INT)
1780 {
1781 tem = ops[n_ops - 1], ops[n_ops - 1] = ops[i] , ops[i] = tem;
1782 j = negs[n_ops - 1], negs[n_ops - 1] = negs[i], negs[i] = j;
1783 }
1784
1785 /* Put a non-negated operand first. If there aren't any, make all
1786 operands positive and negate the whole thing later. */
1787 for (i = 0; i < n_ops && negs[i]; i++)
1788 ;
1789
1790 if (i == n_ops)
1791 {
1792 for (i = 0; i < n_ops; i++)
1793 negs[i] = 0;
1794 negate = 1;
1795 }
1796 else if (i != 0)
1797 {
1798 tem = ops[0], ops[0] = ops[i], ops[i] = tem;
1799 j = negs[0], negs[0] = negs[i], negs[i] = j;
1800 }
1801
1802 /* Now make the result by performing the requested operations. */
1803 result = ops[0];
1804 for (i = 1; i < n_ops; i++)
1805 result = simplify_gen_binary (negs[i] ? MINUS : PLUS, mode, result, ops[i]);
1806
1807 return negate ? gen_rtx_NEG (mode, result) : result;
1808 }
1809
1810 struct cfc_args
1811 {
1812 rtx op0, op1; /* Input */
1813 int equal, op0lt, op1lt; /* Output */
1814 int unordered;
1815 };
1816
1817 static void
1818 check_fold_consts (data)
1819 PTR data;
1820 {
1821 struct cfc_args *args = (struct cfc_args *) data;
1822 REAL_VALUE_TYPE d0, d1;
1823
1824 /* We may possibly raise an exception while reading the value. */
1825 args->unordered = 1;
1826 REAL_VALUE_FROM_CONST_DOUBLE (d0, args->op0);
1827 REAL_VALUE_FROM_CONST_DOUBLE (d1, args->op1);
1828
1829 /* Comparisons of Inf versus Inf are ordered. */
1830 if (REAL_VALUE_ISNAN (d0)
1831 || REAL_VALUE_ISNAN (d1))
1832 return;
1833 args->equal = REAL_VALUES_EQUAL (d0, d1);
1834 args->op0lt = REAL_VALUES_LESS (d0, d1);
1835 args->op1lt = REAL_VALUES_LESS (d1, d0);
1836 args->unordered = 0;
1837 }
1838
1839 /* Like simplify_binary_operation except used for relational operators.
1840 MODE is the mode of the operands, not that of the result. If MODE
1841 is VOIDmode, both operands must also be VOIDmode and we compare the
1842 operands in "infinite precision".
1843
1844 If no simplification is possible, this function returns zero. Otherwise,
1845 it returns either const_true_rtx or const0_rtx. */
1846
1847 rtx
1848 simplify_relational_operation (code, mode, op0, op1)
1849 enum rtx_code code;
1850 enum machine_mode mode;
1851 rtx op0, op1;
1852 {
1853 int equal, op0lt, op0ltu, op1lt, op1ltu;
1854 rtx tem;
1855 rtx trueop0;
1856 rtx trueop1;
1857
1858 if (mode == VOIDmode
1859 && (GET_MODE (op0) != VOIDmode
1860 || GET_MODE (op1) != VOIDmode))
1861 abort ();
1862
1863 /* If op0 is a compare, extract the comparison arguments from it. */
1864 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
1865 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
1866
1867 trueop0 = avoid_constant_pool_reference (op0);
1868 trueop1 = avoid_constant_pool_reference (op1);
1869
1870 /* We can't simplify MODE_CC values since we don't know what the
1871 actual comparison is. */
1872 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC
1873 #ifdef HAVE_cc0
1874 || op0 == cc0_rtx
1875 #endif
1876 )
1877 return 0;
1878
1879 /* Make sure the constant is second. */
1880 if (swap_commutative_operands_p (trueop0, trueop1))
1881 {
1882 tem = op0, op0 = op1, op1 = tem;
1883 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
1884 code = swap_condition (code);
1885 }
1886
1887 /* For integer comparisons of A and B maybe we can simplify A - B and can
1888 then simplify a comparison of that with zero. If A and B are both either
1889 a register or a CONST_INT, this can't help; testing for these cases will
1890 prevent infinite recursion here and speed things up.
1891
1892 If CODE is an unsigned comparison, then we can never do this optimization,
1893 because it gives an incorrect result if the subtraction wraps around zero.
1894 ANSI C defines unsigned operations such that they never overflow, and
1895 thus such cases can not be ignored. */
1896
1897 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
1898 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
1899 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
1900 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
1901 && code != GTU && code != GEU && code != LTU && code != LEU)
1902 return simplify_relational_operation (signed_condition (code),
1903 mode, tem, const0_rtx);
1904
1905 if (flag_unsafe_math_optimizations && code == ORDERED)
1906 return const_true_rtx;
1907
1908 if (flag_unsafe_math_optimizations && code == UNORDERED)
1909 return const0_rtx;
1910
1911 /* For non-IEEE floating-point, if the two operands are equal, we know the
1912 result. */
1913 if (rtx_equal_p (trueop0, trueop1)
1914 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1915 || ! FLOAT_MODE_P (GET_MODE (trueop0))
1916 || flag_unsafe_math_optimizations))
1917 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
1918
1919 /* If the operands are floating-point constants, see if we can fold
1920 the result. */
1921 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1922 else if (GET_CODE (trueop0) == CONST_DOUBLE
1923 && GET_CODE (trueop1) == CONST_DOUBLE
1924 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
1925 {
1926 struct cfc_args args;
1927
1928 /* Setup input for check_fold_consts() */
1929 args.op0 = trueop0;
1930 args.op1 = trueop1;
1931
1932
1933 if (!do_float_handler (check_fold_consts, (PTR) &args))
1934 args.unordered = 1;
1935
1936 if (args.unordered)
1937 switch (code)
1938 {
1939 case UNEQ:
1940 case UNLT:
1941 case UNGT:
1942 case UNLE:
1943 case UNGE:
1944 case NE:
1945 case UNORDERED:
1946 return const_true_rtx;
1947 case EQ:
1948 case LT:
1949 case GT:
1950 case LE:
1951 case GE:
1952 case LTGT:
1953 case ORDERED:
1954 return const0_rtx;
1955 default:
1956 return 0;
1957 }
1958
1959 /* Receive output from check_fold_consts() */
1960 equal = args.equal;
1961 op0lt = op0ltu = args.op0lt;
1962 op1lt = op1ltu = args.op1lt;
1963 }
1964 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1965
1966 /* Otherwise, see if the operands are both integers. */
1967 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
1968 && (GET_CODE (trueop0) == CONST_DOUBLE
1969 || GET_CODE (trueop0) == CONST_INT)
1970 && (GET_CODE (trueop1) == CONST_DOUBLE
1971 || GET_CODE (trueop1) == CONST_INT))
1972 {
1973 int width = GET_MODE_BITSIZE (mode);
1974 HOST_WIDE_INT l0s, h0s, l1s, h1s;
1975 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
1976
1977 /* Get the two words comprising each integer constant. */
1978 if (GET_CODE (trueop0) == CONST_DOUBLE)
1979 {
1980 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
1981 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
1982 }
1983 else
1984 {
1985 l0u = l0s = INTVAL (trueop0);
1986 h0u = h0s = HWI_SIGN_EXTEND (l0s);
1987 }
1988
1989 if (GET_CODE (trueop1) == CONST_DOUBLE)
1990 {
1991 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
1992 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
1993 }
1994 else
1995 {
1996 l1u = l1s = INTVAL (trueop1);
1997 h1u = h1s = HWI_SIGN_EXTEND (l1s);
1998 }
1999
2000 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2001 we have to sign or zero-extend the values. */
2002 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2003 {
2004 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2005 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2006
2007 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2008 l0s |= ((HOST_WIDE_INT) (-1) << width);
2009
2010 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2011 l1s |= ((HOST_WIDE_INT) (-1) << width);
2012 }
2013 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2014 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2015
2016 equal = (h0u == h1u && l0u == l1u);
2017 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2018 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2019 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2020 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2021 }
2022
2023 /* Otherwise, there are some code-specific tests we can make. */
2024 else
2025 {
2026 switch (code)
2027 {
2028 case EQ:
2029 /* References to the frame plus a constant or labels cannot
2030 be zero, but a SYMBOL_REF can due to #pragma weak. */
2031 if (((NONZERO_BASE_PLUS_P (op0) && trueop1 == const0_rtx)
2032 || GET_CODE (trueop0) == LABEL_REF)
2033 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2034 /* On some machines, the ap reg can be 0 sometimes. */
2035 && op0 != arg_pointer_rtx
2036 #endif
2037 )
2038 return const0_rtx;
2039 break;
2040
2041 case NE:
2042 if (((NONZERO_BASE_PLUS_P (op0) && trueop1 == const0_rtx)
2043 || GET_CODE (trueop0) == LABEL_REF)
2044 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2045 && op0 != arg_pointer_rtx
2046 #endif
2047 )
2048 return const_true_rtx;
2049 break;
2050
2051 case GEU:
2052 /* Unsigned values are never negative. */
2053 if (trueop1 == const0_rtx)
2054 return const_true_rtx;
2055 break;
2056
2057 case LTU:
2058 if (trueop1 == const0_rtx)
2059 return const0_rtx;
2060 break;
2061
2062 case LEU:
2063 /* Unsigned values are never greater than the largest
2064 unsigned value. */
2065 if (GET_CODE (trueop1) == CONST_INT
2066 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2067 && INTEGRAL_MODE_P (mode))
2068 return const_true_rtx;
2069 break;
2070
2071 case GTU:
2072 if (GET_CODE (trueop1) == CONST_INT
2073 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2074 && INTEGRAL_MODE_P (mode))
2075 return const0_rtx;
2076 break;
2077
2078 default:
2079 break;
2080 }
2081
2082 return 0;
2083 }
2084
2085 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2086 as appropriate. */
2087 switch (code)
2088 {
2089 case EQ:
2090 case UNEQ:
2091 return equal ? const_true_rtx : const0_rtx;
2092 case NE:
2093 case LTGT:
2094 return ! equal ? const_true_rtx : const0_rtx;
2095 case LT:
2096 case UNLT:
2097 return op0lt ? const_true_rtx : const0_rtx;
2098 case GT:
2099 case UNGT:
2100 return op1lt ? const_true_rtx : const0_rtx;
2101 case LTU:
2102 return op0ltu ? const_true_rtx : const0_rtx;
2103 case GTU:
2104 return op1ltu ? const_true_rtx : const0_rtx;
2105 case LE:
2106 case UNLE:
2107 return equal || op0lt ? const_true_rtx : const0_rtx;
2108 case GE:
2109 case UNGE:
2110 return equal || op1lt ? const_true_rtx : const0_rtx;
2111 case LEU:
2112 return equal || op0ltu ? const_true_rtx : const0_rtx;
2113 case GEU:
2114 return equal || op1ltu ? const_true_rtx : const0_rtx;
2115 case ORDERED:
2116 return const_true_rtx;
2117 case UNORDERED:
2118 return const0_rtx;
2119 default:
2120 abort ();
2121 }
2122 }
2123 \f
2124 /* Simplify CODE, an operation with result mode MODE and three operands,
2125 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2126 a constant. Return 0 if no simplifications is possible. */
2127
2128 rtx
2129 simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
2130 enum rtx_code code;
2131 enum machine_mode mode, op0_mode;
2132 rtx op0, op1, op2;
2133 {
2134 unsigned int width = GET_MODE_BITSIZE (mode);
2135
2136 /* VOIDmode means "infinite" precision. */
2137 if (width == 0)
2138 width = HOST_BITS_PER_WIDE_INT;
2139
2140 switch (code)
2141 {
2142 case SIGN_EXTRACT:
2143 case ZERO_EXTRACT:
2144 if (GET_CODE (op0) == CONST_INT
2145 && GET_CODE (op1) == CONST_INT
2146 && GET_CODE (op2) == CONST_INT
2147 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2148 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2149 {
2150 /* Extracting a bit-field from a constant */
2151 HOST_WIDE_INT val = INTVAL (op0);
2152
2153 if (BITS_BIG_ENDIAN)
2154 val >>= (GET_MODE_BITSIZE (op0_mode)
2155 - INTVAL (op2) - INTVAL (op1));
2156 else
2157 val >>= INTVAL (op2);
2158
2159 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2160 {
2161 /* First zero-extend. */
2162 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2163 /* If desired, propagate sign bit. */
2164 if (code == SIGN_EXTRACT
2165 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2166 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2167 }
2168
2169 /* Clear the bits that don't belong in our mode,
2170 unless they and our sign bit are all one.
2171 So we get either a reasonable negative value or a reasonable
2172 unsigned value for this mode. */
2173 if (width < HOST_BITS_PER_WIDE_INT
2174 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2175 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2176 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2177
2178 return GEN_INT (val);
2179 }
2180 break;
2181
2182 case IF_THEN_ELSE:
2183 if (GET_CODE (op0) == CONST_INT)
2184 return op0 != const0_rtx ? op1 : op2;
2185
2186 /* Convert a == b ? b : a to "a". */
2187 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
2188 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
2189 && rtx_equal_p (XEXP (op0, 0), op1)
2190 && rtx_equal_p (XEXP (op0, 1), op2))
2191 return op1;
2192 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
2193 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
2194 && rtx_equal_p (XEXP (op0, 1), op1)
2195 && rtx_equal_p (XEXP (op0, 0), op2))
2196 return op2;
2197 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2198 {
2199 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2200 ? GET_MODE (XEXP (op0, 1))
2201 : GET_MODE (XEXP (op0, 0)));
2202 rtx temp;
2203 if (cmp_mode == VOIDmode)
2204 cmp_mode = op0_mode;
2205 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2206 XEXP (op0, 0), XEXP (op0, 1));
2207
2208 /* See if any simplifications were possible. */
2209 if (temp == const0_rtx)
2210 return op2;
2211 else if (temp == const1_rtx)
2212 return op1;
2213 else if (temp)
2214 op0 = temp;
2215
2216 /* Look for happy constants in op1 and op2. */
2217 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2218 {
2219 HOST_WIDE_INT t = INTVAL (op1);
2220 HOST_WIDE_INT f = INTVAL (op2);
2221
2222 if (t == STORE_FLAG_VALUE && f == 0)
2223 code = GET_CODE (op0);
2224 else if (t == 0 && f == STORE_FLAG_VALUE)
2225 {
2226 enum rtx_code tmp;
2227 tmp = reversed_comparison_code (op0, NULL_RTX);
2228 if (tmp == UNKNOWN)
2229 break;
2230 code = tmp;
2231 }
2232 else
2233 break;
2234
2235 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2236 }
2237 }
2238 break;
2239
2240 default:
2241 abort ();
2242 }
2243
2244 return 0;
2245 }
2246
2247 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2248 Return 0 if no simplifications is possible. */
2249 rtx
2250 simplify_subreg (outermode, op, innermode, byte)
2251 rtx op;
2252 unsigned int byte;
2253 enum machine_mode outermode, innermode;
2254 {
2255 /* Little bit of sanity checking. */
2256 if (innermode == VOIDmode || outermode == VOIDmode
2257 || innermode == BLKmode || outermode == BLKmode)
2258 abort ();
2259
2260 if (GET_MODE (op) != innermode
2261 && GET_MODE (op) != VOIDmode)
2262 abort ();
2263
2264 if (byte % GET_MODE_SIZE (outermode)
2265 || byte >= GET_MODE_SIZE (innermode))
2266 abort ();
2267
2268 if (outermode == innermode && !byte)
2269 return op;
2270
2271 /* Attempt to simplify constant to non-SUBREG expression. */
2272 if (CONSTANT_P (op))
2273 {
2274 int offset, part;
2275 unsigned HOST_WIDE_INT val = 0;
2276
2277 /* ??? This code is partly redundant with code below, but can handle
2278 the subregs of floats and similar corner cases.
2279 Later it we should move all simplification code here and rewrite
2280 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2281 using SIMPLIFY_SUBREG. */
2282 if (subreg_lowpart_offset (outermode, innermode) == byte)
2283 {
2284 rtx new = gen_lowpart_if_possible (outermode, op);
2285 if (new)
2286 return new;
2287 }
2288
2289 /* Similar comment as above apply here. */
2290 if (GET_MODE_SIZE (outermode) == UNITS_PER_WORD
2291 && GET_MODE_SIZE (innermode) > UNITS_PER_WORD
2292 && GET_MODE_CLASS (outermode) == MODE_INT)
2293 {
2294 rtx new = constant_subword (op,
2295 (byte / UNITS_PER_WORD),
2296 innermode);
2297 if (new)
2298 return new;
2299 }
2300
2301 offset = byte * BITS_PER_UNIT;
2302 switch (GET_CODE (op))
2303 {
2304 case CONST_DOUBLE:
2305 if (GET_MODE (op) != VOIDmode)
2306 break;
2307
2308 /* We can't handle this case yet. */
2309 if (GET_MODE_BITSIZE (outermode) >= HOST_BITS_PER_WIDE_INT)
2310 return NULL_RTX;
2311
2312 part = offset >= HOST_BITS_PER_WIDE_INT;
2313 if ((BITS_PER_WORD > HOST_BITS_PER_WIDE_INT
2314 && BYTES_BIG_ENDIAN)
2315 || (BITS_PER_WORD <= HOST_BITS_PER_WIDE_INT
2316 && WORDS_BIG_ENDIAN))
2317 part = !part;
2318 val = part ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op);
2319 offset %= HOST_BITS_PER_WIDE_INT;
2320
2321 /* We've already picked the word we want from a double, so
2322 pretend this is actually an integer. */
2323 innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
2324
2325 /* FALLTHROUGH */
2326 case CONST_INT:
2327 if (GET_CODE (op) == CONST_INT)
2328 val = INTVAL (op);
2329
2330 /* We don't handle synthetizing of non-integral constants yet. */
2331 if (GET_MODE_CLASS (outermode) != MODE_INT)
2332 return NULL_RTX;
2333
2334 if (BYTES_BIG_ENDIAN || WORDS_BIG_ENDIAN)
2335 {
2336 if (WORDS_BIG_ENDIAN)
2337 offset = (GET_MODE_BITSIZE (innermode)
2338 - GET_MODE_BITSIZE (outermode) - offset);
2339 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN
2340 && GET_MODE_SIZE (outermode) < UNITS_PER_WORD)
2341 offset = (offset + BITS_PER_WORD - GET_MODE_BITSIZE (outermode)
2342 - 2 * (offset % BITS_PER_WORD));
2343 }
2344
2345 if (offset >= HOST_BITS_PER_WIDE_INT)
2346 return ((HOST_WIDE_INT) val < 0) ? constm1_rtx : const0_rtx;
2347 else
2348 {
2349 val >>= offset;
2350 if (GET_MODE_BITSIZE (outermode) < HOST_BITS_PER_WIDE_INT)
2351 val = trunc_int_for_mode (val, outermode);
2352 return GEN_INT (val);
2353 }
2354 default:
2355 break;
2356 }
2357 }
2358
2359 /* Changing mode twice with SUBREG => just change it once,
2360 or not at all if changing back op starting mode. */
2361 if (GET_CODE (op) == SUBREG)
2362 {
2363 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
2364 int final_offset = byte + SUBREG_BYTE (op);
2365 rtx new;
2366
2367 if (outermode == innermostmode
2368 && byte == 0 && SUBREG_BYTE (op) == 0)
2369 return SUBREG_REG (op);
2370
2371 /* The SUBREG_BYTE represents offset, as if the value were stored
2372 in memory. Irritating exception is paradoxical subreg, where
2373 we define SUBREG_BYTE to be 0. On big endian machines, this
2374 value should be negative. For a moment, undo this exception. */
2375 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
2376 {
2377 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
2378 if (WORDS_BIG_ENDIAN)
2379 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2380 if (BYTES_BIG_ENDIAN)
2381 final_offset += difference % UNITS_PER_WORD;
2382 }
2383 if (SUBREG_BYTE (op) == 0
2384 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
2385 {
2386 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
2387 if (WORDS_BIG_ENDIAN)
2388 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2389 if (BYTES_BIG_ENDIAN)
2390 final_offset += difference % UNITS_PER_WORD;
2391 }
2392
2393 /* See whether resulting subreg will be paradoxical. */
2394 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
2395 {
2396 /* In nonparadoxical subregs we can't handle negative offsets. */
2397 if (final_offset < 0)
2398 return NULL_RTX;
2399 /* Bail out in case resulting subreg would be incorrect. */
2400 if (final_offset % GET_MODE_SIZE (outermode)
2401 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
2402 return NULL_RTX;
2403 }
2404 else
2405 {
2406 int offset = 0;
2407 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
2408
2409 /* In paradoxical subreg, see if we are still looking on lower part.
2410 If so, our SUBREG_BYTE will be 0. */
2411 if (WORDS_BIG_ENDIAN)
2412 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2413 if (BYTES_BIG_ENDIAN)
2414 offset += difference % UNITS_PER_WORD;
2415 if (offset == final_offset)
2416 final_offset = 0;
2417 else
2418 return NULL_RTX;
2419 }
2420
2421 /* Recurse for futher possible simplifications. */
2422 new = simplify_subreg (outermode, SUBREG_REG (op),
2423 GET_MODE (SUBREG_REG (op)),
2424 final_offset);
2425 if (new)
2426 return new;
2427 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
2428 }
2429
2430 /* SUBREG of a hard register => just change the register number
2431 and/or mode. If the hard register is not valid in that mode,
2432 suppress this simplification. If the hard register is the stack,
2433 frame, or argument pointer, leave this as a SUBREG. */
2434
2435 if (REG_P (op)
2436 && (! REG_FUNCTION_VALUE_P (op)
2437 || ! rtx_equal_function_value_matters)
2438 #ifdef CLASS_CANNOT_CHANGE_MODE
2439 && ! (CLASS_CANNOT_CHANGE_MODE_P (outermode, innermode)
2440 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
2441 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT
2442 && (TEST_HARD_REG_BIT
2443 (reg_class_contents[(int) CLASS_CANNOT_CHANGE_MODE],
2444 REGNO (op))))
2445 #endif
2446 && REGNO (op) < FIRST_PSEUDO_REGISTER
2447 && ((reload_completed && !frame_pointer_needed)
2448 || (REGNO (op) != FRAME_POINTER_REGNUM
2449 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2450 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
2451 #endif
2452 ))
2453 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2454 && REGNO (op) != ARG_POINTER_REGNUM
2455 #endif
2456 && REGNO (op) != STACK_POINTER_REGNUM)
2457 {
2458 int final_regno = subreg_hard_regno (gen_rtx_SUBREG (outermode, op, byte),
2459 0);
2460
2461 /* ??? We do allow it if the current REG is not valid for
2462 its mode. This is a kludge to work around how float/complex
2463 arguments are passed on 32-bit Sparc and should be fixed. */
2464 if (HARD_REGNO_MODE_OK (final_regno, outermode)
2465 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
2466 return gen_rtx_REG (outermode, final_regno);
2467 }
2468
2469 /* If we have a SUBREG of a register that we are replacing and we are
2470 replacing it with a MEM, make a new MEM and try replacing the
2471 SUBREG with it. Don't do this if the MEM has a mode-dependent address
2472 or if we would be widening it. */
2473
2474 if (GET_CODE (op) == MEM
2475 && ! mode_dependent_address_p (XEXP (op, 0))
2476 /* Allow splitting of volatile memory references in case we don't
2477 have instruction to move the whole thing. */
2478 && (! MEM_VOLATILE_P (op)
2479 || (mov_optab->handlers[(int) innermode].insn_code
2480 == CODE_FOR_nothing))
2481 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
2482 return adjust_address_nv (op, outermode, byte);
2483
2484 /* Handle complex values represented as CONCAT
2485 of real and imaginary part. */
2486 if (GET_CODE (op) == CONCAT)
2487 {
2488 int is_realpart = byte < GET_MODE_UNIT_SIZE (innermode);
2489 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
2490 unsigned int final_offset;
2491 rtx res;
2492
2493 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
2494 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
2495 if (res)
2496 return res;
2497 /* We can at least simplify it by referring directly to the relevent part. */
2498 return gen_rtx_SUBREG (outermode, part, final_offset);
2499 }
2500
2501 return NULL_RTX;
2502 }
2503 /* Make a SUBREG operation or equivalent if it folds. */
2504
2505 rtx
2506 simplify_gen_subreg (outermode, op, innermode, byte)
2507 rtx op;
2508 unsigned int byte;
2509 enum machine_mode outermode, innermode;
2510 {
2511 rtx new;
2512 /* Little bit of sanity checking. */
2513 if (innermode == VOIDmode || outermode == VOIDmode
2514 || innermode == BLKmode || outermode == BLKmode)
2515 abort ();
2516
2517 if (GET_MODE (op) != innermode
2518 && GET_MODE (op) != VOIDmode)
2519 abort ();
2520
2521 if (byte % GET_MODE_SIZE (outermode)
2522 || byte >= GET_MODE_SIZE (innermode))
2523 abort ();
2524
2525 if (GET_CODE (op) == QUEUED)
2526 return NULL_RTX;
2527
2528 new = simplify_subreg (outermode, op, innermode, byte);
2529 if (new)
2530 return new;
2531
2532 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
2533 return NULL_RTX;
2534
2535 return gen_rtx_SUBREG (outermode, op, byte);
2536 }
2537 /* Simplify X, an rtx expression.
2538
2539 Return the simplified expression or NULL if no simplifications
2540 were possible.
2541
2542 This is the preferred entry point into the simplification routines;
2543 however, we still allow passes to call the more specific routines.
2544
2545 Right now GCC has three (yes, three) major bodies of RTL simplficiation
2546 code that need to be unified.
2547
2548 1. fold_rtx in cse.c. This code uses various CSE specific
2549 information to aid in RTL simplification.
2550
2551 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
2552 it uses combine specific information to aid in RTL
2553 simplification.
2554
2555 3. The routines in this file.
2556
2557
2558 Long term we want to only have one body of simplification code; to
2559 get to that state I recommend the following steps:
2560
2561 1. Pour over fold_rtx & simplify_rtx and move any simplifications
2562 which are not pass dependent state into these routines.
2563
2564 2. As code is moved by #1, change fold_rtx & simplify_rtx to
2565 use this routine whenever possible.
2566
2567 3. Allow for pass dependent state to be provided to these
2568 routines and add simplifications based on the pass dependent
2569 state. Remove code from cse.c & combine.c that becomes
2570 redundant/dead.
2571
2572 It will take time, but ultimately the compiler will be easier to
2573 maintain and improve. It's totally silly that when we add a
2574 simplification that it needs to be added to 4 places (3 for RTL
2575 simplification and 1 for tree simplification. */
2576
2577 rtx
2578 simplify_rtx (x)
2579 rtx x;
2580 {
2581 enum rtx_code code = GET_CODE (x);
2582 enum machine_mode mode = GET_MODE (x);
2583
2584 switch (GET_RTX_CLASS (code))
2585 {
2586 case '1':
2587 return simplify_unary_operation (code, mode,
2588 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
2589 case 'c':
2590 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
2591 {
2592 rtx tem;
2593
2594 tem = XEXP (x, 0);
2595 XEXP (x, 0) = XEXP (x, 1);
2596 XEXP (x, 1) = tem;
2597 return simplify_binary_operation (code, mode,
2598 XEXP (x, 0), XEXP (x, 1));
2599 }
2600
2601 case '2':
2602 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
2603
2604 case '3':
2605 case 'b':
2606 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
2607 XEXP (x, 0), XEXP (x, 1),
2608 XEXP (x, 2));
2609
2610 case '<':
2611 return simplify_relational_operation (code,
2612 ((GET_MODE (XEXP (x, 0))
2613 != VOIDmode)
2614 ? GET_MODE (XEXP (x, 0))
2615 : GET_MODE (XEXP (x, 1))),
2616 XEXP (x, 0), XEXP (x, 1));
2617 case 'x':
2618 /* The only case we try to handle is a SUBREG. */
2619 if (code == SUBREG)
2620 return simplify_gen_subreg (mode, SUBREG_REG (x),
2621 GET_MODE (SUBREG_REG (x)),
2622 SUBREG_BYTE (x));
2623 return NULL;
2624 default:
2625 return NULL;
2626 }
2627 }
This page took 0.157747 seconds and 6 git commands to generate.