1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001 Free Software Foundation, Inc.
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
30 #include "hard-reg-set.h"
33 #include "insn-config.h"
41 /* Simplification and canonicalization of RTL. */
43 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
44 virtual regs here because the simplify_*_operation routines are called
45 by integrate.c, which is called before virtual register instantiation.
47 ?!? FIXED_BASE_PLUS_P and NONZERO_BASE_PLUS_P need to move into
48 a header file so that their definitions can be shared with the
49 simplification routines in simplify-rtx.c. Until then, do not
50 change these macros without also changing the copy in simplify-rtx.c. */
52 #define FIXED_BASE_PLUS_P(X) \
53 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
54 || ((X) == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])\
55 || (X) == virtual_stack_vars_rtx \
56 || (X) == virtual_incoming_args_rtx \
57 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
58 && (XEXP (X, 0) == frame_pointer_rtx \
59 || XEXP (X, 0) == hard_frame_pointer_rtx \
60 || ((X) == arg_pointer_rtx \
61 && fixed_regs[ARG_POINTER_REGNUM]) \
62 || XEXP (X, 0) == virtual_stack_vars_rtx \
63 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
64 || GET_CODE (X) == ADDRESSOF)
66 /* Similar, but also allows reference to the stack pointer.
68 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
69 arg_pointer_rtx by itself is nonzero, because on at least one machine,
70 the i960, the arg pointer is zero when it is unused. */
72 #define NONZERO_BASE_PLUS_P(X) \
73 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
74 || (X) == virtual_stack_vars_rtx \
75 || (X) == virtual_incoming_args_rtx \
76 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
77 && (XEXP (X, 0) == frame_pointer_rtx \
78 || XEXP (X, 0) == hard_frame_pointer_rtx \
79 || ((X) == arg_pointer_rtx \
80 && fixed_regs[ARG_POINTER_REGNUM]) \
81 || XEXP (X, 0) == virtual_stack_vars_rtx \
82 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
83 || (X) == stack_pointer_rtx \
84 || (X) == virtual_stack_dynamic_rtx \
85 || (X) == virtual_outgoing_args_rtx \
86 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
87 && (XEXP (X, 0) == stack_pointer_rtx \
88 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
89 || XEXP (X, 0) == virtual_outgoing_args_rtx)) \
90 || GET_CODE (X) == ADDRESSOF)
92 /* Much code operates on (low, high) pairs; the low value is an
93 unsigned wide int, the high value a signed wide int. We
94 occasionally need to sign extend from low to high as if low were a
96 #define HWI_SIGN_EXTEND(low) \
97 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
99 static rtx simplify_plus_minus
PARAMS ((enum rtx_code
,
100 enum machine_mode
, rtx
, rtx
));
101 static void check_fold_consts
PARAMS ((PTR
));
103 /* Make a binary operation by properly ordering the operands and
104 seeing if the expression folds. */
107 simplify_gen_binary (code
, mode
, op0
, op1
)
109 enum machine_mode mode
;
114 /* Put complex operands first and constants second if commutative. */
115 if (GET_RTX_CLASS (code
) == 'c'
116 && ((CONSTANT_P (op0
) && GET_CODE (op1
) != CONST_INT
)
117 || (GET_RTX_CLASS (GET_CODE (op0
)) == 'o'
118 && GET_RTX_CLASS (GET_CODE (op1
)) != 'o')
119 || (GET_CODE (op0
) == SUBREG
120 && GET_RTX_CLASS (GET_CODE (SUBREG_REG (op0
))) == 'o'
121 && GET_RTX_CLASS (GET_CODE (op1
)) != 'o')))
122 tem
= op0
, op0
= op1
, op1
= tem
;
124 /* If this simplifies, do it. */
125 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
130 /* Handle addition and subtraction of CONST_INT specially. Otherwise,
131 just form the operation. */
133 if (code
== PLUS
&& GET_CODE (op1
) == CONST_INT
134 && GET_MODE (op0
) != VOIDmode
)
135 return plus_constant (op0
, INTVAL (op1
));
136 else if (code
== MINUS
&& GET_CODE (op1
) == CONST_INT
137 && GET_MODE (op0
) != VOIDmode
)
138 return plus_constant (op0
, - INTVAL (op1
));
140 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
143 /* Make a unary operation by first seeing if it folds and otherwise making
144 the specified operation. */
147 simplify_gen_unary (code
, mode
, op
, op_mode
)
149 enum machine_mode mode
;
151 enum machine_mode op_mode
;
155 /* If this simplifies, use it. */
156 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
159 return gen_rtx_fmt_e (code
, mode
, op
);
162 /* Likewise for ternary operations. */
165 simplify_gen_ternary (code
, mode
, op0_mode
, op0
, op1
, op2
)
167 enum machine_mode mode
, op0_mode
;
172 /* If this simplifies, use it. */
173 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
177 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
180 /* Likewise, for relational operations.
181 CMP_MODE specifies mode comparison is done in.
185 simplify_gen_relational (code
, mode
, cmp_mode
, op0
, op1
)
187 enum machine_mode mode
;
188 enum machine_mode cmp_mode
;
193 if ((tem
= simplify_relational_operation (code
, cmp_mode
, op0
, op1
)) != 0)
196 /* Put complex operands first and constants second. */
197 if ((CONSTANT_P (op0
) && GET_CODE (op1
) != CONST_INT
)
198 || (GET_RTX_CLASS (GET_CODE (op0
)) == 'o'
199 && GET_RTX_CLASS (GET_CODE (op1
)) != 'o')
200 || (GET_CODE (op0
) == SUBREG
201 && GET_RTX_CLASS (GET_CODE (SUBREG_REG (op0
))) == 'o'
202 && GET_RTX_CLASS (GET_CODE (op1
)) != 'o'))
203 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
205 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
208 /* Replace all occurrences of OLD in X with NEW and try to simplify the
209 resulting RTX. Return a new RTX which is as simplified as possible. */
212 simplify_replace_rtx (x
, old
, new)
217 enum rtx_code code
= GET_CODE (x
);
218 enum machine_mode mode
= GET_MODE (x
);
220 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
221 to build a new expression substituting recursively. If we can't do
222 anything, return our input. */
227 switch (GET_RTX_CLASS (code
))
231 enum machine_mode op_mode
= GET_MODE (XEXP (x
, 0));
232 rtx op
= (XEXP (x
, 0) == old
233 ? new : simplify_replace_rtx (XEXP (x
, 0), old
, new));
235 return simplify_gen_unary (code
, mode
, op
, op_mode
);
241 simplify_gen_binary (code
, mode
,
242 simplify_replace_rtx (XEXP (x
, 0), old
, new),
243 simplify_replace_rtx (XEXP (x
, 1), old
, new));
246 simplify_gen_relational (code
, mode
,
247 (GET_MODE (XEXP (x
, 0)) != VOIDmode
248 ? GET_MODE (XEXP (x
, 0))
249 : GET_MODE (XEXP (x
, 1))),
250 simplify_replace_rtx (XEXP (x
, 0), old
, new),
251 simplify_replace_rtx (XEXP (x
, 1), old
, new));
256 simplify_gen_ternary (code
, mode
, GET_MODE (XEXP (x
, 0)),
257 simplify_replace_rtx (XEXP (x
, 0), old
, new),
258 simplify_replace_rtx (XEXP (x
, 1), old
, new),
259 simplify_replace_rtx (XEXP (x
, 2), old
, new));
262 /* The only case we try to handle is a SUBREG. */
266 exp
= simplify_gen_subreg (GET_MODE (x
),
267 simplify_replace_rtx (SUBREG_REG (x
),
269 GET_MODE (SUBREG_REG (x
)),
277 if (GET_CODE (x
) == MEM
)
279 /* We can't use change_address here, since it verifies memory address
280 for corectness. We don't want such check, since we may handle
281 addresses previously incorect (such as ones in push instructions)
282 and it is caller's work to verify whether resulting insn match. */
283 rtx addr
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
285 if (XEXP (x
, 0) != addr
)
287 mem
= gen_rtx_MEM (GET_MODE (x
), addr
);
288 MEM_COPY_ATTRIBUTES (mem
, x
);
300 /* Try to simplify a unary operation CODE whose output mode is to be
301 MODE with input operand OP whose mode was originally OP_MODE.
302 Return zero if no simplification can be made. */
305 simplify_unary_operation (code
, mode
, op
, op_mode
)
307 enum machine_mode mode
;
309 enum machine_mode op_mode
;
311 unsigned int width
= GET_MODE_BITSIZE (mode
);
313 /* The order of these tests is critical so that, for example, we don't
314 check the wrong mode (input vs. output) for a conversion operation,
315 such as FIX. At some point, this should be simplified. */
317 #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC)
319 if (code
== FLOAT
&& GET_MODE (op
) == VOIDmode
320 && (GET_CODE (op
) == CONST_DOUBLE
|| GET_CODE (op
) == CONST_INT
))
322 HOST_WIDE_INT hv
, lv
;
325 if (GET_CODE (op
) == CONST_INT
)
326 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
328 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
330 #ifdef REAL_ARITHMETIC
331 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
336 d
*= ((double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2))
337 * (double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2)));
338 d
+= (double) (unsigned HOST_WIDE_INT
) (~ lv
);
344 d
*= ((double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2))
345 * (double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2)));
346 d
+= (double) (unsigned HOST_WIDE_INT
) lv
;
348 #endif /* REAL_ARITHMETIC */
349 d
= real_value_truncate (mode
, d
);
350 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
352 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (op
) == VOIDmode
353 && (GET_CODE (op
) == CONST_DOUBLE
|| GET_CODE (op
) == CONST_INT
))
355 HOST_WIDE_INT hv
, lv
;
358 if (GET_CODE (op
) == CONST_INT
)
359 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
361 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
363 if (op_mode
== VOIDmode
)
365 /* We don't know how to interpret negative-looking numbers in
366 this case, so don't try to fold those. */
370 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
373 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
375 #ifdef REAL_ARITHMETIC
376 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
379 d
= (double) (unsigned HOST_WIDE_INT
) hv
;
380 d
*= ((double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2))
381 * (double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2)));
382 d
+= (double) (unsigned HOST_WIDE_INT
) lv
;
383 #endif /* REAL_ARITHMETIC */
384 d
= real_value_truncate (mode
, d
);
385 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
389 if (GET_CODE (op
) == CONST_INT
390 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
392 register HOST_WIDE_INT arg0
= INTVAL (op
);
393 register HOST_WIDE_INT val
;
406 val
= (arg0
>= 0 ? arg0
: - arg0
);
410 /* Don't use ffs here. Instead, get low order bit and then its
411 number. If arg0 is zero, this will return 0, as desired. */
412 arg0
&= GET_MODE_MASK (mode
);
413 val
= exact_log2 (arg0
& (- arg0
)) + 1;
421 if (op_mode
== VOIDmode
)
423 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
425 /* If we were really extending the mode,
426 we would have to distinguish between zero-extension
427 and sign-extension. */
428 if (width
!= GET_MODE_BITSIZE (op_mode
))
432 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
433 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
439 if (op_mode
== VOIDmode
)
441 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
443 /* If we were really extending the mode,
444 we would have to distinguish between zero-extension
445 and sign-extension. */
446 if (width
!= GET_MODE_BITSIZE (op_mode
))
450 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
453 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
455 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
456 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
471 val
= trunc_int_for_mode (val
, mode
);
473 return GEN_INT (val
);
476 /* We can do some operations on integer CONST_DOUBLEs. Also allow
477 for a DImode operation on a CONST_INT. */
478 else if (GET_MODE (op
) == VOIDmode
&& width
<= HOST_BITS_PER_INT
* 2
479 && (GET_CODE (op
) == CONST_DOUBLE
|| GET_CODE (op
) == CONST_INT
))
481 unsigned HOST_WIDE_INT l1
, lv
;
482 HOST_WIDE_INT h1
, hv
;
484 if (GET_CODE (op
) == CONST_DOUBLE
)
485 l1
= CONST_DOUBLE_LOW (op
), h1
= CONST_DOUBLE_HIGH (op
);
487 l1
= INTVAL (op
), h1
= HWI_SIGN_EXTEND (l1
);
497 neg_double (l1
, h1
, &lv
, &hv
);
502 neg_double (l1
, h1
, &lv
, &hv
);
510 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& (-h1
)) + 1;
512 lv
= exact_log2 (l1
& (-l1
)) + 1;
516 /* This is just a change-of-mode, so do nothing. */
521 if (op_mode
== VOIDmode
522 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
526 lv
= l1
& GET_MODE_MASK (op_mode
);
530 if (op_mode
== VOIDmode
531 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
535 lv
= l1
& GET_MODE_MASK (op_mode
);
536 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
537 && (lv
& ((HOST_WIDE_INT
) 1
538 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
539 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
541 hv
= HWI_SIGN_EXTEND (lv
);
552 return immed_double_const (lv
, hv
, mode
);
555 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
556 else if (GET_CODE (op
) == CONST_DOUBLE
557 && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
563 if (setjmp (handler
))
564 /* There used to be a warning here, but that is inadvisable.
565 People may want to cause traps, and the natural way
566 to do it should not get a warning. */
569 set_float_handler (handler
);
571 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
576 d
= REAL_VALUE_NEGATE (d
);
580 if (REAL_VALUE_NEGATIVE (d
))
581 d
= REAL_VALUE_NEGATE (d
);
585 d
= real_value_truncate (mode
, d
);
589 /* All this does is change the mode. */
593 d
= REAL_VALUE_RNDZINT (d
);
597 d
= REAL_VALUE_UNSIGNED_RNDZINT (d
);
607 x
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
608 set_float_handler (NULL
);
612 else if (GET_CODE (op
) == CONST_DOUBLE
613 && GET_MODE_CLASS (GET_MODE (op
)) == MODE_FLOAT
614 && GET_MODE_CLASS (mode
) == MODE_INT
615 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
621 if (setjmp (handler
))
624 set_float_handler (handler
);
626 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
631 val
= REAL_VALUE_FIX (d
);
635 val
= REAL_VALUE_UNSIGNED_FIX (d
);
642 set_float_handler (NULL
);
644 val
= trunc_int_for_mode (val
, mode
);
646 return GEN_INT (val
);
649 /* This was formerly used only for non-IEEE float.
650 eggert@twinsun.com says it is safe for IEEE also. */
653 enum rtx_code reversed
;
654 /* There are some simplifications we can do even if the operands
659 /* (not (not X)) == X. */
660 if (GET_CODE (op
) == NOT
)
663 /* (not (eq X Y)) == (ne X Y), etc. */
664 if (mode
== BImode
&& GET_RTX_CLASS (GET_CODE (op
)) == '<'
665 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
))
667 return gen_rtx_fmt_ee (reversed
,
668 op_mode
, XEXP (op
, 0), XEXP (op
, 1));
672 /* (neg (neg X)) == X. */
673 if (GET_CODE (op
) == NEG
)
678 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
679 becomes just the MINUS if its mode is MODE. This allows
680 folding switch statements on machines using casesi (such as
682 if (GET_CODE (op
) == TRUNCATE
683 && GET_MODE (XEXP (op
, 0)) == mode
684 && GET_CODE (XEXP (op
, 0)) == MINUS
685 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
686 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
689 #ifdef POINTERS_EXTEND_UNSIGNED
690 if (! POINTERS_EXTEND_UNSIGNED
691 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
693 || (GET_CODE (op
) == SUBREG
694 && GET_CODE (SUBREG_REG (op
)) == REG
695 && REG_POINTER (SUBREG_REG (op
))
696 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
697 return convert_memory_address (Pmode
, op
);
701 #ifdef POINTERS_EXTEND_UNSIGNED
703 if (POINTERS_EXTEND_UNSIGNED
704 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
706 || (GET_CODE (op
) == SUBREG
707 && GET_CODE (SUBREG_REG (op
)) == REG
708 && REG_POINTER (SUBREG_REG (op
))
709 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
710 return convert_memory_address (Pmode
, op
);
722 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
723 and OP1. Return 0 if no simplification is possible.
725 Don't use this for relational operations such as EQ or LT.
726 Use simplify_relational_operation instead. */
729 simplify_binary_operation (code
, mode
, op0
, op1
)
731 enum machine_mode mode
;
734 register HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
736 unsigned int width
= GET_MODE_BITSIZE (mode
);
739 /* Relational operations don't work here. We must know the mode
740 of the operands in order to do the comparison correctly.
741 Assuming a full word can give incorrect results.
742 Consider comparing 128 with -128 in QImode. */
744 if (GET_RTX_CLASS (code
) == '<')
747 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
748 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
749 && GET_CODE (op0
) == CONST_DOUBLE
&& GET_CODE (op1
) == CONST_DOUBLE
750 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
752 REAL_VALUE_TYPE f0
, f1
, value
;
755 if (setjmp (handler
))
758 set_float_handler (handler
);
760 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
761 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
762 f0
= real_value_truncate (mode
, f0
);
763 f1
= real_value_truncate (mode
, f1
);
765 #ifdef REAL_ARITHMETIC
766 #ifndef REAL_INFINITY
767 if (code
== DIV
&& REAL_VALUES_EQUAL (f1
, dconst0
))
770 REAL_ARITHMETIC (value
, rtx_to_tree_code (code
), f0
, f1
);
784 #ifndef REAL_INFINITY
791 value
= MIN (f0
, f1
);
794 value
= MAX (f0
, f1
);
801 value
= real_value_truncate (mode
, value
);
802 set_float_handler (NULL
);
803 return CONST_DOUBLE_FROM_REAL_VALUE (value
, mode
);
805 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
807 /* We can fold some multi-word operations. */
808 if (GET_MODE_CLASS (mode
) == MODE_INT
809 && width
== HOST_BITS_PER_WIDE_INT
* 2
810 && (GET_CODE (op0
) == CONST_DOUBLE
|| GET_CODE (op0
) == CONST_INT
)
811 && (GET_CODE (op1
) == CONST_DOUBLE
|| GET_CODE (op1
) == CONST_INT
))
813 unsigned HOST_WIDE_INT l1
, l2
, lv
;
814 HOST_WIDE_INT h1
, h2
, hv
;
816 if (GET_CODE (op0
) == CONST_DOUBLE
)
817 l1
= CONST_DOUBLE_LOW (op0
), h1
= CONST_DOUBLE_HIGH (op0
);
819 l1
= INTVAL (op0
), h1
= HWI_SIGN_EXTEND (l1
);
821 if (GET_CODE (op1
) == CONST_DOUBLE
)
822 l2
= CONST_DOUBLE_LOW (op1
), h2
= CONST_DOUBLE_HIGH (op1
);
824 l2
= INTVAL (op1
), h2
= HWI_SIGN_EXTEND (l2
);
829 /* A - B == A + (-B). */
830 neg_double (l2
, h2
, &lv
, &hv
);
833 /* .. fall through ... */
836 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
840 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
843 case DIV
: case MOD
: case UDIV
: case UMOD
:
844 /* We'd need to include tree.h to do this and it doesn't seem worth
849 lv
= l1
& l2
, hv
= h1
& h2
;
853 lv
= l1
| l2
, hv
= h1
| h2
;
857 lv
= l1
^ l2
, hv
= h1
^ h2
;
863 && ((unsigned HOST_WIDE_INT
) l1
864 < (unsigned HOST_WIDE_INT
) l2
)))
873 && ((unsigned HOST_WIDE_INT
) l1
874 > (unsigned HOST_WIDE_INT
) l2
)))
881 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
883 && ((unsigned HOST_WIDE_INT
) l1
884 < (unsigned HOST_WIDE_INT
) l2
)))
891 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
893 && ((unsigned HOST_WIDE_INT
) l1
894 > (unsigned HOST_WIDE_INT
) l2
)))
900 case LSHIFTRT
: case ASHIFTRT
:
902 case ROTATE
: case ROTATERT
:
903 #ifdef SHIFT_COUNT_TRUNCATED
904 if (SHIFT_COUNT_TRUNCATED
)
905 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
908 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
911 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
912 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
914 else if (code
== ASHIFT
)
915 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
916 else if (code
== ROTATE
)
917 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
918 else /* code == ROTATERT */
919 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
926 return immed_double_const (lv
, hv
, mode
);
929 if (GET_CODE (op0
) != CONST_INT
|| GET_CODE (op1
) != CONST_INT
930 || width
> HOST_BITS_PER_WIDE_INT
|| width
== 0)
932 /* Even if we can't compute a constant result,
933 there are some cases worth simplifying. */
938 /* In IEEE floating point, x+0 is not the same as x. Similarly
939 for the other optimizations below. */
940 if (TARGET_FLOAT_FORMAT
== IEEE_FLOAT_FORMAT
941 && FLOAT_MODE_P (mode
) && ! flag_unsafe_math_optimizations
)
944 if (op1
== CONST0_RTX (mode
))
947 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)) */
948 if (GET_CODE (op0
) == NEG
)
949 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
950 else if (GET_CODE (op1
) == NEG
)
951 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
954 if (INTEGRAL_MODE_P (mode
)
955 && GET_CODE (op0
) == NOT
956 && GET_CODE (op1
) == CONST_INT
957 && INTVAL (op1
) == 1)
958 return gen_rtx_NEG (mode
, XEXP (op0
, 0));
960 /* Handle both-operands-constant cases. We can only add
961 CONST_INTs to constants since the sum of relocatable symbols
962 can't be handled by most assemblers. Don't add CONST_INT
963 to CONST_INT since overflow won't be computed properly if wider
964 than HOST_BITS_PER_WIDE_INT. */
966 if (CONSTANT_P (op0
) && GET_MODE (op0
) != VOIDmode
967 && GET_CODE (op1
) == CONST_INT
)
968 return plus_constant (op0
, INTVAL (op1
));
969 else if (CONSTANT_P (op1
) && GET_MODE (op1
) != VOIDmode
970 && GET_CODE (op0
) == CONST_INT
)
971 return plus_constant (op1
, INTVAL (op0
));
973 /* See if this is something like X * C - X or vice versa or
974 if the multiplication is written as a shift. If so, we can
975 distribute and make a new multiply, shift, or maybe just
976 have X (if C is 2 in the example above). But don't make
977 real multiply if we didn't have one before. */
979 if (! FLOAT_MODE_P (mode
))
981 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
982 rtx lhs
= op0
, rhs
= op1
;
985 if (GET_CODE (lhs
) == NEG
)
986 coeff0
= -1, lhs
= XEXP (lhs
, 0);
987 else if (GET_CODE (lhs
) == MULT
988 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
990 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
993 else if (GET_CODE (lhs
) == ASHIFT
994 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
995 && INTVAL (XEXP (lhs
, 1)) >= 0
996 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
998 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1002 if (GET_CODE (rhs
) == NEG
)
1003 coeff1
= -1, rhs
= XEXP (rhs
, 0);
1004 else if (GET_CODE (rhs
) == MULT
1005 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1007 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1010 else if (GET_CODE (rhs
) == ASHIFT
1011 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1012 && INTVAL (XEXP (rhs
, 1)) >= 0
1013 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1015 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1016 rhs
= XEXP (rhs
, 0);
1019 if (rtx_equal_p (lhs
, rhs
))
1021 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1022 GEN_INT (coeff0
+ coeff1
));
1023 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
1027 /* If one of the operands is a PLUS or a MINUS, see if we can
1028 simplify this by the associative law.
1029 Don't use the associative law for floating point.
1030 The inaccuracy makes it nonassociative,
1031 and subtle programs can break if operations are associated. */
1033 if (INTEGRAL_MODE_P (mode
)
1034 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1035 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
)
1036 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
1042 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1043 using cc0, in which case we want to leave it as a COMPARE
1044 so we can distinguish it from a register-register-copy.
1046 In IEEE floating point, x-0 is not the same as x. */
1048 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1049 || ! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
1050 && op1
== CONST0_RTX (mode
))
1054 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1055 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
1056 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
1057 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
1059 rtx xop00
= XEXP (op0
, 0);
1060 rtx xop10
= XEXP (op1
, 0);
1063 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
1065 if (GET_CODE (xop00
) == REG
&& GET_CODE (xop10
) == REG
1066 && GET_MODE (xop00
) == GET_MODE (xop10
)
1067 && REGNO (xop00
) == REGNO (xop10
)
1068 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
1069 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
1076 /* None of these optimizations can be done for IEEE
1078 if (TARGET_FLOAT_FORMAT
== IEEE_FLOAT_FORMAT
1079 && FLOAT_MODE_P (mode
) && ! flag_unsafe_math_optimizations
)
1082 /* We can't assume x-x is 0 even with non-IEEE floating point,
1083 but since it is zero except in very strange circumstances, we
1084 will treat it as zero with -funsafe-math-optimizations. */
1085 if (rtx_equal_p (op0
, op1
)
1086 && ! side_effects_p (op0
)
1087 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
))
1088 return CONST0_RTX (mode
);
1090 /* Change subtraction from zero into negation. */
1091 if (op0
== CONST0_RTX (mode
))
1092 return gen_rtx_NEG (mode
, op1
);
1094 /* (-1 - a) is ~a. */
1095 if (op0
== constm1_rtx
)
1096 return gen_rtx_NOT (mode
, op1
);
1098 /* Subtracting 0 has no effect. */
1099 if (op1
== CONST0_RTX (mode
))
1102 /* See if this is something like X * C - X or vice versa or
1103 if the multiplication is written as a shift. If so, we can
1104 distribute and make a new multiply, shift, or maybe just
1105 have X (if C is 2 in the example above). But don't make
1106 real multiply if we didn't have one before. */
1108 if (! FLOAT_MODE_P (mode
))
1110 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1111 rtx lhs
= op0
, rhs
= op1
;
1114 if (GET_CODE (lhs
) == NEG
)
1115 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1116 else if (GET_CODE (lhs
) == MULT
1117 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1119 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1122 else if (GET_CODE (lhs
) == ASHIFT
1123 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1124 && INTVAL (XEXP (lhs
, 1)) >= 0
1125 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1127 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1128 lhs
= XEXP (lhs
, 0);
1131 if (GET_CODE (rhs
) == NEG
)
1132 coeff1
= - 1, rhs
= XEXP (rhs
, 0);
1133 else if (GET_CODE (rhs
) == MULT
1134 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1136 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1139 else if (GET_CODE (rhs
) == ASHIFT
1140 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1141 && INTVAL (XEXP (rhs
, 1)) >= 0
1142 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1144 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1145 rhs
= XEXP (rhs
, 0);
1148 if (rtx_equal_p (lhs
, rhs
))
1150 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1151 GEN_INT (coeff0
- coeff1
));
1152 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
1156 /* (a - (-b)) -> (a + b). */
1157 if (GET_CODE (op1
) == NEG
)
1158 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1160 /* If one of the operands is a PLUS or a MINUS, see if we can
1161 simplify this by the associative law.
1162 Don't use the associative law for floating point.
1163 The inaccuracy makes it nonassociative,
1164 and subtle programs can break if operations are associated. */
1166 if (INTEGRAL_MODE_P (mode
)
1167 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1168 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
)
1169 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
1172 /* Don't let a relocatable value get a negative coeff. */
1173 if (GET_CODE (op1
) == CONST_INT
&& GET_MODE (op0
) != VOIDmode
)
1174 return plus_constant (op0
, - INTVAL (op1
));
1176 /* (x - (x & y)) -> (x & ~y) */
1177 if (GET_CODE (op1
) == AND
)
1179 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1180 return simplify_gen_binary (AND
, mode
, op0
,
1181 gen_rtx_NOT (mode
, XEXP (op1
, 1)));
1182 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1183 return simplify_gen_binary (AND
, mode
, op0
,
1184 gen_rtx_NOT (mode
, XEXP (op1
, 0)));
1189 if (op1
== constm1_rtx
)
1191 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
1193 return tem
? tem
: gen_rtx_NEG (mode
, op0
);
1196 /* In IEEE floating point, x*0 is not always 0. */
1197 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1198 || ! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
1199 && op1
== CONST0_RTX (mode
)
1200 && ! side_effects_p (op0
))
1203 /* In IEEE floating point, x*1 is not equivalent to x for nans.
1204 However, ANSI says we can drop signals,
1205 so we can do this anyway. */
1206 if (op1
== CONST1_RTX (mode
))
1209 /* Convert multiply by constant power of two into shift unless
1210 we are still generating RTL. This test is a kludge. */
1211 if (GET_CODE (op1
) == CONST_INT
1212 && (val
= exact_log2 (INTVAL (op1
))) >= 0
1213 /* If the mode is larger than the host word size, and the
1214 uppermost bit is set, then this isn't a power of two due
1215 to implicit sign extension. */
1216 && (width
<= HOST_BITS_PER_WIDE_INT
1217 || val
!= HOST_BITS_PER_WIDE_INT
- 1)
1218 && ! rtx_equal_function_value_matters
)
1219 return gen_rtx_ASHIFT (mode
, op0
, GEN_INT (val
));
1221 if (GET_CODE (op1
) == CONST_DOUBLE
1222 && GET_MODE_CLASS (GET_MODE (op1
)) == MODE_FLOAT
)
1226 int op1is2
, op1ism1
;
1228 if (setjmp (handler
))
1231 set_float_handler (handler
);
1232 REAL_VALUE_FROM_CONST_DOUBLE (d
, op1
);
1233 op1is2
= REAL_VALUES_EQUAL (d
, dconst2
);
1234 op1ism1
= REAL_VALUES_EQUAL (d
, dconstm1
);
1235 set_float_handler (NULL
);
1237 /* x*2 is x+x and x*(-1) is -x */
1238 if (op1is2
&& GET_MODE (op0
) == mode
)
1239 return gen_rtx_PLUS (mode
, op0
, copy_rtx (op0
));
1241 else if (op1ism1
&& GET_MODE (op0
) == mode
)
1242 return gen_rtx_NEG (mode
, op0
);
1247 if (op1
== const0_rtx
)
1249 if (GET_CODE (op1
) == CONST_INT
1250 && (INTVAL (op1
) & GET_MODE_MASK (mode
)) == GET_MODE_MASK (mode
))
1252 if (rtx_equal_p (op0
, op1
) && ! side_effects_p (op0
))
1254 /* A | (~A) -> -1 */
1255 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1256 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1257 && ! side_effects_p (op0
)
1258 && GET_MODE_CLASS (mode
) != MODE_CC
)
1263 if (op1
== const0_rtx
)
1265 if (GET_CODE (op1
) == CONST_INT
1266 && (INTVAL (op1
) & GET_MODE_MASK (mode
)) == GET_MODE_MASK (mode
))
1267 return gen_rtx_NOT (mode
, op0
);
1268 if (op0
== op1
&& ! side_effects_p (op0
)
1269 && GET_MODE_CLASS (mode
) != MODE_CC
)
1274 if (op1
== const0_rtx
&& ! side_effects_p (op0
))
1276 if (GET_CODE (op1
) == CONST_INT
1277 && (INTVAL (op1
) & GET_MODE_MASK (mode
)) == GET_MODE_MASK (mode
))
1279 if (op0
== op1
&& ! side_effects_p (op0
)
1280 && GET_MODE_CLASS (mode
) != MODE_CC
)
1283 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1284 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1285 && ! side_effects_p (op0
)
1286 && GET_MODE_CLASS (mode
) != MODE_CC
)
1291 /* Convert divide by power of two into shift (divide by 1 handled
1293 if (GET_CODE (op1
) == CONST_INT
1294 && (arg1
= exact_log2 (INTVAL (op1
))) > 0)
1295 return gen_rtx_LSHIFTRT (mode
, op0
, GEN_INT (arg1
));
1297 /* ... fall through ... */
1300 if (op1
== CONST1_RTX (mode
))
1303 /* In IEEE floating point, 0/x is not always 0. */
1304 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1305 || ! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
1306 && op0
== CONST0_RTX (mode
)
1307 && ! side_effects_p (op1
))
1310 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1311 /* Change division by a constant into multiplication. Only do
1312 this with -funsafe-math-optimizations. */
1313 else if (GET_CODE (op1
) == CONST_DOUBLE
1314 && GET_MODE_CLASS (GET_MODE (op1
)) == MODE_FLOAT
1315 && op1
!= CONST0_RTX (mode
)
1316 && flag_unsafe_math_optimizations
)
1319 REAL_VALUE_FROM_CONST_DOUBLE (d
, op1
);
1321 if (! REAL_VALUES_EQUAL (d
, dconst0
))
1323 #if defined (REAL_ARITHMETIC)
1324 REAL_ARITHMETIC (d
, rtx_to_tree_code (DIV
), dconst1
, d
);
1325 return gen_rtx_MULT (mode
, op0
,
1326 CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
));
1329 gen_rtx_MULT (mode
, op0
,
1330 CONST_DOUBLE_FROM_REAL_VALUE (1./d
, mode
));
1338 /* Handle modulus by power of two (mod with 1 handled below). */
1339 if (GET_CODE (op1
) == CONST_INT
1340 && exact_log2 (INTVAL (op1
)) > 0)
1341 return gen_rtx_AND (mode
, op0
, GEN_INT (INTVAL (op1
) - 1));
1343 /* ... fall through ... */
1346 if ((op0
== const0_rtx
|| op1
== const1_rtx
)
1347 && ! side_effects_p (op0
) && ! side_effects_p (op1
))
1353 /* Rotating ~0 always results in ~0. */
1354 if (GET_CODE (op0
) == CONST_INT
&& width
<= HOST_BITS_PER_WIDE_INT
1355 && (unsigned HOST_WIDE_INT
) INTVAL (op0
) == GET_MODE_MASK (mode
)
1356 && ! side_effects_p (op1
))
1359 /* ... fall through ... */
1364 if (op1
== const0_rtx
)
1366 if (op0
== const0_rtx
&& ! side_effects_p (op1
))
1371 if (width
<= HOST_BITS_PER_WIDE_INT
&& GET_CODE (op1
) == CONST_INT
1372 && INTVAL (op1
) == (HOST_WIDE_INT
) 1 << (width
-1)
1373 && ! side_effects_p (op0
))
1375 else if (rtx_equal_p (op0
, op1
) && ! side_effects_p (op0
))
1380 if (width
<= HOST_BITS_PER_WIDE_INT
&& GET_CODE (op1
) == CONST_INT
1381 && ((unsigned HOST_WIDE_INT
) INTVAL (op1
)
1382 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
1383 && ! side_effects_p (op0
))
1385 else if (rtx_equal_p (op0
, op1
) && ! side_effects_p (op0
))
1390 if (op1
== const0_rtx
&& ! side_effects_p (op0
))
1392 else if (rtx_equal_p (op0
, op1
) && ! side_effects_p (op0
))
1397 if (op1
== constm1_rtx
&& ! side_effects_p (op0
))
1399 else if (rtx_equal_p (op0
, op1
) && ! side_effects_p (op0
))
1410 /* Get the integer argument values in two forms:
1411 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1413 arg0
= INTVAL (op0
);
1414 arg1
= INTVAL (op1
);
1416 if (width
< HOST_BITS_PER_WIDE_INT
)
1418 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1419 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1422 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1423 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
1426 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1427 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
1435 /* Compute the value of the arithmetic. */
1440 val
= arg0s
+ arg1s
;
1444 val
= arg0s
- arg1s
;
1448 val
= arg0s
* arg1s
;
1453 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
1456 val
= arg0s
/ arg1s
;
1461 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
1464 val
= arg0s
% arg1s
;
1469 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
1472 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
1477 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
1480 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
1496 /* If shift count is undefined, don't fold it; let the machine do
1497 what it wants. But truncate it if the machine will do that. */
1501 #ifdef SHIFT_COUNT_TRUNCATED
1502 if (SHIFT_COUNT_TRUNCATED
)
1506 val
= ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
;
1513 #ifdef SHIFT_COUNT_TRUNCATED
1514 if (SHIFT_COUNT_TRUNCATED
)
1518 val
= ((unsigned HOST_WIDE_INT
) arg0
) << arg1
;
1525 #ifdef SHIFT_COUNT_TRUNCATED
1526 if (SHIFT_COUNT_TRUNCATED
)
1530 val
= arg0s
>> arg1
;
1532 /* Bootstrap compiler may not have sign extended the right shift.
1533 Manually extend the sign to insure bootstrap cc matches gcc. */
1534 if (arg0s
< 0 && arg1
> 0)
1535 val
|= ((HOST_WIDE_INT
) -1) << (HOST_BITS_PER_WIDE_INT
- arg1
);
1544 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
1545 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
1553 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
1554 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
1558 /* Do nothing here. */
1562 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
1566 val
= ((unsigned HOST_WIDE_INT
) arg0
1567 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
1571 val
= arg0s
> arg1s
? arg0s
: arg1s
;
1575 val
= ((unsigned HOST_WIDE_INT
) arg0
1576 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
1583 val
= trunc_int_for_mode (val
, mode
);
1585 return GEN_INT (val
);
1588 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1591 Rather than test for specific case, we do this by a brute-force method
1592 and do all possible simplifications until no more changes occur. Then
1593 we rebuild the operation. */
1596 simplify_plus_minus (code
, mode
, op0
, op1
)
1598 enum machine_mode mode
;
1604 int n_ops
= 2, input_ops
= 2, input_consts
= 0, n_consts
= 0;
1605 int first
= 1, negate
= 0, changed
;
1608 memset ((char *) ops
, 0, sizeof ops
);
1610 /* Set up the two operands and then expand them until nothing has been
1611 changed. If we run out of room in our array, give up; this should
1612 almost never happen. */
1614 ops
[0] = op0
, ops
[1] = op1
, negs
[0] = 0, negs
[1] = (code
== MINUS
);
1621 for (i
= 0; i
< n_ops
; i
++)
1622 switch (GET_CODE (ops
[i
]))
1629 ops
[n_ops
] = XEXP (ops
[i
], 1);
1630 negs
[n_ops
++] = GET_CODE (ops
[i
]) == MINUS
? !negs
[i
] : negs
[i
];
1631 ops
[i
] = XEXP (ops
[i
], 0);
1637 ops
[i
] = XEXP (ops
[i
], 0);
1638 negs
[i
] = ! negs
[i
];
1643 ops
[i
] = XEXP (ops
[i
], 0);
1649 /* ~a -> (-a - 1) */
1652 ops
[n_ops
] = constm1_rtx
;
1653 negs
[n_ops
++] = negs
[i
];
1654 ops
[i
] = XEXP (ops
[i
], 0);
1655 negs
[i
] = ! negs
[i
];
1662 ops
[i
] = GEN_INT (- INTVAL (ops
[i
])), negs
[i
] = 0, changed
= 1;
1670 /* If we only have two operands, we can't do anything. */
1674 /* Now simplify each pair of operands until nothing changes. The first
1675 time through just simplify constants against each other. */
1682 for (i
= 0; i
< n_ops
- 1; i
++)
1683 for (j
= i
+ 1; j
< n_ops
; j
++)
1684 if (ops
[i
] != 0 && ops
[j
] != 0
1685 && (! first
|| (CONSTANT_P (ops
[i
]) && CONSTANT_P (ops
[j
]))))
1687 rtx lhs
= ops
[i
], rhs
= ops
[j
];
1688 enum rtx_code ncode
= PLUS
;
1690 if (negs
[i
] && ! negs
[j
])
1691 lhs
= ops
[j
], rhs
= ops
[i
], ncode
= MINUS
;
1692 else if (! negs
[i
] && negs
[j
])
1695 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
1698 ops
[i
] = tem
, ops
[j
] = 0;
1699 negs
[i
] = negs
[i
] && negs
[j
];
1700 if (GET_CODE (tem
) == NEG
)
1701 ops
[i
] = XEXP (tem
, 0), negs
[i
] = ! negs
[i
];
1703 if (GET_CODE (ops
[i
]) == CONST_INT
&& negs
[i
])
1704 ops
[i
] = GEN_INT (- INTVAL (ops
[i
])), negs
[i
] = 0;
1712 /* Pack all the operands to the lower-numbered entries and give up if
1713 we didn't reduce the number of operands we had. Make sure we
1714 count a CONST as two operands. If we have the same number of
1715 operands, but have made more CONSTs than we had, this is also
1716 an improvement, so accept it. */
1718 for (i
= 0, j
= 0; j
< n_ops
; j
++)
1721 ops
[i
] = ops
[j
], negs
[i
++] = negs
[j
];
1722 if (GET_CODE (ops
[j
]) == CONST
)
1726 if (i
+ n_consts
> input_ops
1727 || (i
+ n_consts
== input_ops
&& n_consts
<= input_consts
))
1732 /* If we have a CONST_INT, put it last. */
1733 for (i
= 0; i
< n_ops
- 1; i
++)
1734 if (GET_CODE (ops
[i
]) == CONST_INT
)
1736 tem
= ops
[n_ops
- 1], ops
[n_ops
- 1] = ops
[i
] , ops
[i
] = tem
;
1737 j
= negs
[n_ops
- 1], negs
[n_ops
- 1] = negs
[i
], negs
[i
] = j
;
1740 /* Put a non-negated operand first. If there aren't any, make all
1741 operands positive and negate the whole thing later. */
1742 for (i
= 0; i
< n_ops
&& negs
[i
]; i
++)
1747 for (i
= 0; i
< n_ops
; i
++)
1753 tem
= ops
[0], ops
[0] = ops
[i
], ops
[i
] = tem
;
1754 j
= negs
[0], negs
[0] = negs
[i
], negs
[i
] = j
;
1757 /* Now make the result by performing the requested operations. */
1759 for (i
= 1; i
< n_ops
; i
++)
1760 result
= simplify_gen_binary (negs
[i
] ? MINUS
: PLUS
, mode
, result
, ops
[i
]);
1762 return negate
? gen_rtx_NEG (mode
, result
) : result
;
1767 rtx op0
, op1
; /* Input */
1768 int equal
, op0lt
, op1lt
; /* Output */
1773 check_fold_consts (data
)
1776 struct cfc_args
*args
= (struct cfc_args
*) data
;
1777 REAL_VALUE_TYPE d0
, d1
;
1779 /* We may possibly raise an exception while reading the value. */
1780 args
->unordered
= 1;
1781 REAL_VALUE_FROM_CONST_DOUBLE (d0
, args
->op0
);
1782 REAL_VALUE_FROM_CONST_DOUBLE (d1
, args
->op1
);
1784 /* Comparisons of Inf versus Inf are ordered. */
1785 if (REAL_VALUE_ISNAN (d0
)
1786 || REAL_VALUE_ISNAN (d1
))
1788 args
->equal
= REAL_VALUES_EQUAL (d0
, d1
);
1789 args
->op0lt
= REAL_VALUES_LESS (d0
, d1
);
1790 args
->op1lt
= REAL_VALUES_LESS (d1
, d0
);
1791 args
->unordered
= 0;
1794 /* Like simplify_binary_operation except used for relational operators.
1795 MODE is the mode of the operands, not that of the result. If MODE
1796 is VOIDmode, both operands must also be VOIDmode and we compare the
1797 operands in "infinite precision".
1799 If no simplification is possible, this function returns zero. Otherwise,
1800 it returns either const_true_rtx or const0_rtx. */
1803 simplify_relational_operation (code
, mode
, op0
, op1
)
1805 enum machine_mode mode
;
1808 int equal
, op0lt
, op0ltu
, op1lt
, op1ltu
;
1811 if (mode
== VOIDmode
1812 && (GET_MODE (op0
) != VOIDmode
1813 || GET_MODE (op1
) != VOIDmode
))
1816 /* If op0 is a compare, extract the comparison arguments from it. */
1817 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
1818 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
1820 /* We can't simplify MODE_CC values since we don't know what the
1821 actual comparison is. */
1822 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
1829 /* Make sure the constant is second. */
1830 if ((CONSTANT_P (op0
) && ! CONSTANT_P (op1
))
1831 || (GET_CODE (op0
) == CONST_INT
&& GET_CODE (op1
) != CONST_INT
))
1833 tem
= op0
, op0
= op1
, op1
= tem
;
1834 code
= swap_condition (code
);
1837 /* For integer comparisons of A and B maybe we can simplify A - B and can
1838 then simplify a comparison of that with zero. If A and B are both either
1839 a register or a CONST_INT, this can't help; testing for these cases will
1840 prevent infinite recursion here and speed things up.
1842 If CODE is an unsigned comparison, then we can never do this optimization,
1843 because it gives an incorrect result if the subtraction wraps around zero.
1844 ANSI C defines unsigned operations such that they never overflow, and
1845 thus such cases can not be ignored. */
1847 if (INTEGRAL_MODE_P (mode
) && op1
!= const0_rtx
1848 && ! ((GET_CODE (op0
) == REG
|| GET_CODE (op0
) == CONST_INT
)
1849 && (GET_CODE (op1
) == REG
|| GET_CODE (op1
) == CONST_INT
))
1850 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
1851 && code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
)
1852 return simplify_relational_operation (signed_condition (code
),
1853 mode
, tem
, const0_rtx
);
1855 if (flag_unsafe_math_optimizations
&& code
== ORDERED
)
1856 return const_true_rtx
;
1858 if (flag_unsafe_math_optimizations
&& code
== UNORDERED
)
1861 /* For non-IEEE floating-point, if the two operands are equal, we know the
1863 if (rtx_equal_p (op0
, op1
)
1864 && (TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1865 || ! FLOAT_MODE_P (GET_MODE (op0
))
1866 || flag_unsafe_math_optimizations
))
1867 equal
= 1, op0lt
= 0, op0ltu
= 0, op1lt
= 0, op1ltu
= 0;
1869 /* If the operands are floating-point constants, see if we can fold
1871 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1872 else if (GET_CODE (op0
) == CONST_DOUBLE
&& GET_CODE (op1
) == CONST_DOUBLE
1873 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_FLOAT
)
1875 struct cfc_args args
;
1877 /* Setup input for check_fold_consts() */
1882 if (!do_float_handler (check_fold_consts
, (PTR
) &args
))
1895 return const_true_rtx
;
1908 /* Receive output from check_fold_consts() */
1910 op0lt
= op0ltu
= args
.op0lt
;
1911 op1lt
= op1ltu
= args
.op1lt
;
1913 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1915 /* Otherwise, see if the operands are both integers. */
1916 else if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
1917 && (GET_CODE (op0
) == CONST_DOUBLE
|| GET_CODE (op0
) == CONST_INT
)
1918 && (GET_CODE (op1
) == CONST_DOUBLE
|| GET_CODE (op1
) == CONST_INT
))
1920 int width
= GET_MODE_BITSIZE (mode
);
1921 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
1922 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
1924 /* Get the two words comprising each integer constant. */
1925 if (GET_CODE (op0
) == CONST_DOUBLE
)
1927 l0u
= l0s
= CONST_DOUBLE_LOW (op0
);
1928 h0u
= h0s
= CONST_DOUBLE_HIGH (op0
);
1932 l0u
= l0s
= INTVAL (op0
);
1933 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
1936 if (GET_CODE (op1
) == CONST_DOUBLE
)
1938 l1u
= l1s
= CONST_DOUBLE_LOW (op1
);
1939 h1u
= h1s
= CONST_DOUBLE_HIGH (op1
);
1943 l1u
= l1s
= INTVAL (op1
);
1944 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
1947 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
1948 we have to sign or zero-extend the values. */
1949 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
1951 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1952 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1954 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1955 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
1957 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1958 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
1960 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
1961 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
1963 equal
= (h0u
== h1u
&& l0u
== l1u
);
1964 op0lt
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
));
1965 op1lt
= (h1s
< h0s
|| (h1s
== h0s
&& l1u
< l0u
));
1966 op0ltu
= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
));
1967 op1ltu
= (h1u
< h0u
|| (h1u
== h0u
&& l1u
< l0u
));
1970 /* Otherwise, there are some code-specific tests we can make. */
1976 /* References to the frame plus a constant or labels cannot
1977 be zero, but a SYMBOL_REF can due to #pragma weak. */
1978 if (((NONZERO_BASE_PLUS_P (op0
) && op1
== const0_rtx
)
1979 || GET_CODE (op0
) == LABEL_REF
)
1980 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1981 /* On some machines, the ap reg can be 0 sometimes. */
1982 && op0
!= arg_pointer_rtx
1989 if (((NONZERO_BASE_PLUS_P (op0
) && op1
== const0_rtx
)
1990 || GET_CODE (op0
) == LABEL_REF
)
1991 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1992 && op0
!= arg_pointer_rtx
1995 return const_true_rtx
;
1999 /* Unsigned values are never negative. */
2000 if (op1
== const0_rtx
)
2001 return const_true_rtx
;
2005 if (op1
== const0_rtx
)
2010 /* Unsigned values are never greater than the largest
2012 if (GET_CODE (op1
) == CONST_INT
2013 && (unsigned HOST_WIDE_INT
) INTVAL (op1
) == GET_MODE_MASK (mode
)
2014 && INTEGRAL_MODE_P (mode
))
2015 return const_true_rtx
;
2019 if (GET_CODE (op1
) == CONST_INT
2020 && (unsigned HOST_WIDE_INT
) INTVAL (op1
) == GET_MODE_MASK (mode
)
2021 && INTEGRAL_MODE_P (mode
))
2032 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2038 return equal
? const_true_rtx
: const0_rtx
;
2041 return ! equal
? const_true_rtx
: const0_rtx
;
2044 return op0lt
? const_true_rtx
: const0_rtx
;
2047 return op1lt
? const_true_rtx
: const0_rtx
;
2049 return op0ltu
? const_true_rtx
: const0_rtx
;
2051 return op1ltu
? const_true_rtx
: const0_rtx
;
2054 return equal
|| op0lt
? const_true_rtx
: const0_rtx
;
2057 return equal
|| op1lt
? const_true_rtx
: const0_rtx
;
2059 return equal
|| op0ltu
? const_true_rtx
: const0_rtx
;
2061 return equal
|| op1ltu
? const_true_rtx
: const0_rtx
;
2063 return const_true_rtx
;
2071 /* Simplify CODE, an operation with result mode MODE and three operands,
2072 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2073 a constant. Return 0 if no simplifications is possible. */
2076 simplify_ternary_operation (code
, mode
, op0_mode
, op0
, op1
, op2
)
2078 enum machine_mode mode
, op0_mode
;
2081 unsigned int width
= GET_MODE_BITSIZE (mode
);
2083 /* VOIDmode means "infinite" precision. */
2085 width
= HOST_BITS_PER_WIDE_INT
;
2091 if (GET_CODE (op0
) == CONST_INT
2092 && GET_CODE (op1
) == CONST_INT
2093 && GET_CODE (op2
) == CONST_INT
2094 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
2095 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
2097 /* Extracting a bit-field from a constant */
2098 HOST_WIDE_INT val
= INTVAL (op0
);
2100 if (BITS_BIG_ENDIAN
)
2101 val
>>= (GET_MODE_BITSIZE (op0_mode
)
2102 - INTVAL (op2
) - INTVAL (op1
));
2104 val
>>= INTVAL (op2
);
2106 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
2108 /* First zero-extend. */
2109 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
2110 /* If desired, propagate sign bit. */
2111 if (code
== SIGN_EXTRACT
2112 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
2113 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
2116 /* Clear the bits that don't belong in our mode,
2117 unless they and our sign bit are all one.
2118 So we get either a reasonable negative value or a reasonable
2119 unsigned value for this mode. */
2120 if (width
< HOST_BITS_PER_WIDE_INT
2121 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
2122 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
2123 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2125 return GEN_INT (val
);
2130 if (GET_CODE (op0
) == CONST_INT
)
2131 return op0
!= const0_rtx
? op1
: op2
;
2133 /* Convert a == b ? b : a to "a". */
2134 if (GET_CODE (op0
) == NE
&& ! side_effects_p (op0
)
2135 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
2136 && rtx_equal_p (XEXP (op0
, 0), op1
)
2137 && rtx_equal_p (XEXP (op0
, 1), op2
))
2139 else if (GET_CODE (op0
) == EQ
&& ! side_effects_p (op0
)
2140 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
2141 && rtx_equal_p (XEXP (op0
, 1), op1
)
2142 && rtx_equal_p (XEXP (op0
, 0), op2
))
2144 else if (GET_RTX_CLASS (GET_CODE (op0
)) == '<' && ! side_effects_p (op0
))
2146 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
2147 ? GET_MODE (XEXP (op0
, 1))
2148 : GET_MODE (XEXP (op0
, 0)));
2150 if (cmp_mode
== VOIDmode
)
2151 cmp_mode
= op0_mode
;
2152 temp
= simplify_relational_operation (GET_CODE (op0
), cmp_mode
,
2153 XEXP (op0
, 0), XEXP (op0
, 1));
2155 /* See if any simplifications were possible. */
2156 if (temp
== const0_rtx
)
2158 else if (temp
== const1_rtx
)
2163 /* Look for happy constants in op1 and op2. */
2164 if (GET_CODE (op1
) == CONST_INT
&& GET_CODE (op2
) == CONST_INT
)
2166 HOST_WIDE_INT t
= INTVAL (op1
);
2167 HOST_WIDE_INT f
= INTVAL (op2
);
2169 if (t
== STORE_FLAG_VALUE
&& f
== 0)
2170 code
= GET_CODE (op0
);
2171 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
2174 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
2182 return gen_rtx_fmt_ee (code
, mode
, XEXP (op0
, 0), XEXP (op0
, 1));
2194 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2195 Return 0 if no simplifications is possible. */
2197 simplify_subreg (outermode
, op
, innermode
, byte
)
2200 enum machine_mode outermode
, innermode
;
2202 /* Little bit of sanity checking. */
2203 if (innermode
== VOIDmode
|| outermode
== VOIDmode
2204 || innermode
== BLKmode
|| outermode
== BLKmode
)
2207 if (GET_MODE (op
) != innermode
2208 && GET_MODE (op
) != VOIDmode
)
2211 if (byte
% GET_MODE_SIZE (outermode
)
2212 || byte
>= GET_MODE_SIZE (innermode
))
2215 /* Attempt to simplify constant to non-SUBREG expression. */
2216 if (CONSTANT_P (op
))
2219 unsigned HOST_WIDE_INT val
;
2221 /* ??? This code is partly redundant with code bellow, but can handle
2222 the subregs of floats and similar corner cases.
2223 Later it we should move all simplification code here and rewrite
2224 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2225 using SIMPLIFY_SUBREG. */
2226 if (subreg_lowpart_parts_p (outermode
, innermode
, byte
))
2228 rtx
new = gen_lowpart_if_possible (outermode
, op
);
2233 /* Similar comment as above apply here. */
2234 if (GET_MODE_SIZE (outermode
) == UNITS_PER_WORD
2235 && GET_MODE_SIZE (innermode
) > UNITS_PER_WORD
2236 && GET_MODE_CLASS (outermode
) == MODE_INT
)
2238 rtx
new = operand_subword (op
,
2239 (byte
/ UNITS_PER_WORD
),
2245 offset
= byte
* BITS_PER_UNIT
;
2246 switch (GET_CODE (op
))
2249 if (GET_MODE (op
) != VOIDmode
)
2252 /* We can't handle this case yet. */
2253 if (GET_MODE_BITSIZE (outermode
) >= HOST_BITS_PER_WIDE_INT
)
2256 part
= offset
>= HOST_BITS_PER_WIDE_INT
;
2257 if ((BITS_PER_WORD
> HOST_BITS_PER_WIDE_INT
2258 && BYTES_BIG_ENDIAN
)
2259 || (BITS_PER_WORD
<= HOST_BITS_PER_WIDE_INT
2260 && WORDS_BIG_ENDIAN
))
2262 val
= part
? CONST_DOUBLE_HIGH (op
) : CONST_DOUBLE_LOW (op
);
2263 offset
%= HOST_BITS_PER_WIDE_INT
;
2265 /* We've already picked the word we want from a double, so
2266 pretend this is actually an integer. */
2267 innermode
= mode_for_size (HOST_BITS_PER_WIDE_INT
, MODE_INT
, 0);
2271 if (GET_CODE (op
) == CONST_INT
)
2274 /* We don't handle synthetizing of non-integral constants yet. */
2275 if (GET_MODE_CLASS (outermode
) != MODE_INT
)
2278 if (BYTES_BIG_ENDIAN
|| WORDS_BIG_ENDIAN
)
2280 if (WORDS_BIG_ENDIAN
)
2281 offset
= (GET_MODE_BITSIZE (innermode
)
2282 - GET_MODE_BITSIZE (outermode
) - offset
);
2283 if (BYTES_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
2284 && GET_MODE_SIZE (outermode
) < UNITS_PER_WORD
)
2285 offset
= (offset
+ BITS_PER_WORD
- GET_MODE_BITSIZE (outermode
)
2286 - 2 * (offset
% BITS_PER_WORD
));
2289 if (offset
>= HOST_BITS_PER_WIDE_INT
)
2290 return ((HOST_WIDE_INT
) val
< 0) ? constm1_rtx
: const0_rtx
;
2294 if (GET_MODE_BITSIZE (outermode
) < HOST_BITS_PER_WIDE_INT
)
2295 val
= trunc_int_for_mode (val
, outermode
);
2296 return GEN_INT (val
);
2303 /* Changing mode twice with SUBREG => just change it once,
2304 or not at all if changing back op starting mode. */
2305 if (GET_CODE (op
) == SUBREG
)
2307 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
2308 unsigned int final_offset
= byte
+ SUBREG_BYTE (op
);
2311 if (outermode
== innermostmode
2312 && byte
== 0 && SUBREG_BYTE (op
) == 0)
2313 return SUBREG_REG (op
);
2315 if ((WORDS_BIG_ENDIAN
|| BYTES_BIG_ENDIAN
)
2316 && GET_MODE_SIZE (innermode
) > GET_MODE_SIZE (outermode
)
2317 && GET_MODE_SIZE (innermode
) > GET_MODE_SIZE (innermostmode
))
2319 /* Inner SUBREG is paradoxical, outer is not. On big endian
2320 we have to special case this. */
2321 if (SUBREG_BYTE (op
))
2322 abort(); /* Can a paradoxical subreg have nonzero offset? */
2323 if (WORDS_BIG_ENDIAN
&& BYTES_BIG_ENDIAN
)
2324 final_offset
= (byte
- GET_MODE_SIZE (innermode
)
2325 + GET_MODE_SIZE (innermostmode
));
2326 else if (WORDS_BIG_ENDIAN
)
2327 final_offset
= ((final_offset
% UNITS_PER_WORD
)
2328 + ((byte
- GET_MODE_SIZE (innermode
)
2329 + GET_MODE_SIZE (innermostmode
))
2330 * UNITS_PER_WORD
) / UNITS_PER_WORD
);
2332 final_offset
= (((final_offset
* UNITS_PER_WORD
)
2334 + ((byte
- GET_MODE_SIZE (innermode
)
2335 + GET_MODE_SIZE (innermostmode
))
2339 /* Bail out in case resulting subreg would be incorrect. */
2340 if (final_offset
% GET_MODE_SIZE (outermode
)
2341 || final_offset
>= GET_MODE_SIZE (innermostmode
))
2343 /* Recurse for futher possible simplifications. */
2344 new = simplify_subreg (outermode
, SUBREG_REG (op
),
2345 GET_MODE (SUBREG_REG (op
)),
2349 return gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
2352 /* SUBREG of a hard register => just change the register number
2353 and/or mode. If the hard register is not valid in that mode,
2354 suppress this simplification. If the hard register is the stack,
2355 frame, or argument pointer, leave this as a SUBREG. */
2357 if (REG_P (op
) == REG
2358 && REGNO (op
) < FIRST_PSEUDO_REGISTER
2359 && REGNO (op
) != FRAME_POINTER_REGNUM
2360 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2361 && REGNO (op
) != HARD_FRAME_POINTER_REGNUM
2363 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2364 && REGNO (op
) != ARG_POINTER_REGNUM
2366 && REGNO (op
) != STACK_POINTER_REGNUM
)
2368 int final_regno
= subreg_hard_regno (gen_rtx_SUBREG (outermode
, op
, byte
),
2371 if (HARD_REGNO_MODE_OK (final_regno
, outermode
))
2372 return gen_rtx_REG (outermode
, final_regno
);
2375 /* If we have a SUBREG of a register that we are replacing and we are
2376 replacing it with a MEM, make a new MEM and try replacing the
2377 SUBREG with it. Don't do this if the MEM has a mode-dependent address
2378 or if we would be widening it. */
2380 if (GET_CODE (op
) == MEM
2381 && ! mode_dependent_address_p (XEXP (op
, 0))
2382 && ! MEM_VOLATILE_P (op
)
2383 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
2387 new = gen_rtx_MEM (outermode
, plus_constant (XEXP (op
, 0), byte
));
2388 MEM_COPY_ATTRIBUTES (new, op
);
2393 /* Make a SUBREG operation or equivalent if it folds. */
2396 simplify_gen_subreg (outermode
, op
, innermode
, byte
)
2399 enum machine_mode outermode
, innermode
;
2402 /* Little bit of sanity checking. */
2403 if (innermode
== VOIDmode
|| outermode
== VOIDmode
2404 || innermode
== BLKmode
|| outermode
== BLKmode
)
2407 if (GET_MODE (op
) != innermode
2408 && GET_MODE (op
) != VOIDmode
)
2411 if (byte
% GET_MODE_SIZE (outermode
)
2412 || byte
>= GET_MODE_SIZE (innermode
))
2415 new = simplify_subreg (outermode
, op
, innermode
, byte
);
2419 if (GET_CODE (op
) == SUBREG
|| GET_MODE (op
) == VOIDmode
)
2422 return gen_rtx_SUBREG (outermode
, op
, byte
);
2424 /* Simplify X, an rtx expression.
2426 Return the simplified expression or NULL if no simplifications
2429 This is the preferred entry point into the simplification routines;
2430 however, we still allow passes to call the more specific routines.
2432 Right now GCC has three (yes, three) major bodies of RTL simplficiation
2433 code that need to be unified.
2435 1. fold_rtx in cse.c. This code uses various CSE specific
2436 information to aid in RTL simplification.
2438 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
2439 it uses combine specific information to aid in RTL
2442 3. The routines in this file.
2445 Long term we want to only have one body of simplification code; to
2446 get to that state I recommend the following steps:
2448 1. Pour over fold_rtx & simplify_rtx and move any simplifications
2449 which are not pass dependent state into these routines.
2451 2. As code is moved by #1, change fold_rtx & simplify_rtx to
2452 use this routine whenever possible.
2454 3. Allow for pass dependent state to be provided to these
2455 routines and add simplifications based on the pass dependent
2456 state. Remove code from cse.c & combine.c that becomes
2459 It will take time, but ultimately the compiler will be easier to
2460 maintain and improve. It's totally silly that when we add a
2461 simplification that it needs to be added to 4 places (3 for RTL
2462 simplification and 1 for tree simplification. */
2468 enum rtx_code code
= GET_CODE (x
);
2469 enum machine_mode mode
= GET_MODE (x
);
2471 switch (GET_RTX_CLASS (code
))
2474 return simplify_unary_operation (code
, mode
,
2475 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
2478 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
2482 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
2483 XEXP (x
, 0), XEXP (x
, 1),
2487 return simplify_relational_operation (code
,
2488 ((GET_MODE (XEXP (x
, 0))
2490 ? GET_MODE (XEXP (x
, 0))
2491 : GET_MODE (XEXP (x
, 1))),
2492 XEXP (x
, 0), XEXP (x
, 1));
2494 /* The only case we try to handle is a SUBREG. */
2496 return simplify_gen_subreg (mode
, SUBREG_REG (x
),
2497 GET_MODE (SUBREG_REG (x
)),