1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001 Free Software Foundation, Inc.
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
30 #include "hard-reg-set.h"
33 #include "insn-config.h"
41 /* Simplification and canonicalization of RTL. */
43 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
44 virtual regs here because the simplify_*_operation routines are called
45 by integrate.c, which is called before virtual register instantiation.
47 ?!? FIXED_BASE_PLUS_P and NONZERO_BASE_PLUS_P need to move into
48 a header file so that their definitions can be shared with the
49 simplification routines in simplify-rtx.c. Until then, do not
50 change these macros without also changing the copy in simplify-rtx.c. */
52 #define FIXED_BASE_PLUS_P(X) \
53 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
54 || ((X) == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])\
55 || (X) == virtual_stack_vars_rtx \
56 || (X) == virtual_incoming_args_rtx \
57 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
58 && (XEXP (X, 0) == frame_pointer_rtx \
59 || XEXP (X, 0) == hard_frame_pointer_rtx \
60 || ((X) == arg_pointer_rtx \
61 && fixed_regs[ARG_POINTER_REGNUM]) \
62 || XEXP (X, 0) == virtual_stack_vars_rtx \
63 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
64 || GET_CODE (X) == ADDRESSOF)
66 /* Similar, but also allows reference to the stack pointer.
68 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
69 arg_pointer_rtx by itself is nonzero, because on at least one machine,
70 the i960, the arg pointer is zero when it is unused. */
72 #define NONZERO_BASE_PLUS_P(X) \
73 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
74 || (X) == virtual_stack_vars_rtx \
75 || (X) == virtual_incoming_args_rtx \
76 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
77 && (XEXP (X, 0) == frame_pointer_rtx \
78 || XEXP (X, 0) == hard_frame_pointer_rtx \
79 || ((X) == arg_pointer_rtx \
80 && fixed_regs[ARG_POINTER_REGNUM]) \
81 || XEXP (X, 0) == virtual_stack_vars_rtx \
82 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
83 || (X) == stack_pointer_rtx \
84 || (X) == virtual_stack_dynamic_rtx \
85 || (X) == virtual_outgoing_args_rtx \
86 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
87 && (XEXP (X, 0) == stack_pointer_rtx \
88 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
89 || XEXP (X, 0) == virtual_outgoing_args_rtx)) \
90 || GET_CODE (X) == ADDRESSOF)
92 /* Much code operates on (low, high) pairs; the low value is an
93 unsigned wide int, the high value a signed wide int. We
94 occasionally need to sign extend from low to high as if low were a
96 #define HWI_SIGN_EXTEND(low) \
97 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
99 static rtx simplify_plus_minus
PARAMS ((enum rtx_code
,
100 enum machine_mode
, rtx
, rtx
));
101 static void check_fold_consts
PARAMS ((PTR
));
103 /* Make a binary operation by properly ordering the operands and
104 seeing if the expression folds. */
107 simplify_gen_binary (code
, mode
, op0
, op1
)
109 enum machine_mode mode
;
114 /* Put complex operands first and constants second if commutative. */
115 if (GET_RTX_CLASS (code
) == 'c'
116 && ((CONSTANT_P (op0
) && GET_CODE (op1
) != CONST_INT
)
117 || (GET_RTX_CLASS (GET_CODE (op0
)) == 'o'
118 && GET_RTX_CLASS (GET_CODE (op1
)) != 'o')
119 || (GET_CODE (op0
) == SUBREG
120 && GET_RTX_CLASS (GET_CODE (SUBREG_REG (op0
))) == 'o'
121 && GET_RTX_CLASS (GET_CODE (op1
)) != 'o')))
122 tem
= op0
, op0
= op1
, op1
= tem
;
124 /* If this simplifies, do it. */
125 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
130 /* Handle addition and subtraction of CONST_INT specially. Otherwise,
131 just form the operation. */
133 if (code
== PLUS
&& GET_CODE (op1
) == CONST_INT
134 && GET_MODE (op0
) != VOIDmode
)
135 return plus_constant (op0
, INTVAL (op1
));
136 else if (code
== MINUS
&& GET_CODE (op1
) == CONST_INT
137 && GET_MODE (op0
) != VOIDmode
)
138 return plus_constant (op0
, - INTVAL (op1
));
140 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
143 /* Make a unary operation by first seeing if it folds and otherwise making
144 the specified operation. */
147 simplify_gen_unary (code
, mode
, op
, op_mode
)
149 enum machine_mode mode
;
151 enum machine_mode op_mode
;
155 /* If this simplifies, use it. */
156 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
159 return gen_rtx_fmt_e (code
, mode
, op
);
162 /* Likewise for ternary operations. */
165 simplify_gen_ternary (code
, mode
, op0_mode
, op0
, op1
, op2
)
167 enum machine_mode mode
, op0_mode
;
172 /* If this simplifies, use it. */
173 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
177 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
180 /* Likewise, for relational operations.
181 CMP_MODE specifies mode comparison is done in.
185 simplify_gen_relational (code
, mode
, cmp_mode
, op0
, op1
)
187 enum machine_mode mode
;
188 enum machine_mode cmp_mode
;
193 if ((tem
= simplify_relational_operation (code
, cmp_mode
, op0
, op1
)) != 0)
196 /* Put complex operands first and constants second. */
197 if ((CONSTANT_P (op0
) && GET_CODE (op1
) != CONST_INT
)
198 || (GET_RTX_CLASS (GET_CODE (op0
)) == 'o'
199 && GET_RTX_CLASS (GET_CODE (op1
)) != 'o')
200 || (GET_CODE (op0
) == SUBREG
201 && GET_RTX_CLASS (GET_CODE (SUBREG_REG (op0
))) == 'o'
202 && GET_RTX_CLASS (GET_CODE (op1
)) != 'o'))
203 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
205 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
208 /* Replace all occurrences of OLD in X with NEW and try to simplify the
209 resulting RTX. Return a new RTX which is as simplified as possible. */
212 simplify_replace_rtx (x
, old
, new)
217 enum rtx_code code
= GET_CODE (x
);
218 enum machine_mode mode
= GET_MODE (x
);
220 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
221 to build a new expression substituting recursively. If we can't do
222 anything, return our input. */
227 switch (GET_RTX_CLASS (code
))
231 enum machine_mode op_mode
= GET_MODE (XEXP (x
, 0));
232 rtx op
= (XEXP (x
, 0) == old
233 ? new : simplify_replace_rtx (XEXP (x
, 0), old
, new));
235 return simplify_gen_unary (code
, mode
, op
, op_mode
);
241 simplify_gen_binary (code
, mode
,
242 simplify_replace_rtx (XEXP (x
, 0), old
, new),
243 simplify_replace_rtx (XEXP (x
, 1), old
, new));
246 simplify_gen_relational (code
, mode
,
247 (GET_MODE (XEXP (x
, 0)) != VOIDmode
248 ? GET_MODE (XEXP (x
, 0))
249 : GET_MODE (XEXP (x
, 1))),
250 simplify_replace_rtx (XEXP (x
, 0), old
, new),
251 simplify_replace_rtx (XEXP (x
, 1), old
, new));
256 simplify_gen_ternary (code
, mode
, GET_MODE (XEXP (x
, 0)),
257 simplify_replace_rtx (XEXP (x
, 0), old
, new),
258 simplify_replace_rtx (XEXP (x
, 1), old
, new),
259 simplify_replace_rtx (XEXP (x
, 2), old
, new));
262 /* The only case we try to handle is a lowpart SUBREG of a single-word
264 if (code
== SUBREG
&& subreg_lowpart_p (x
) && old
== SUBREG_REG (x
)
265 && GET_CODE (new) == CONST_INT
266 && GET_MODE_SIZE (GET_MODE (old
)) <= UNITS_PER_WORD
)
267 return GEN_INT (INTVAL (new) & GET_MODE_MASK (mode
));
272 if (GET_CODE (x
) == MEM
)
274 /* We can't use change_address here, since it verifies memory address
275 for corectness. We don't want such check, since we may handle
276 addresses previously incorect (such as ones in push instructions)
277 and it is caller's work to verify whether resulting insn match. */
278 rtx addr
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
280 if (XEXP (x
, 0) != addr
)
282 mem
= gen_rtx_MEM (GET_MODE (x
), addr
);
283 MEM_COPY_ATTRIBUTES (mem
, x
);
295 /* Try to simplify a unary operation CODE whose output mode is to be
296 MODE with input operand OP whose mode was originally OP_MODE.
297 Return zero if no simplification can be made. */
300 simplify_unary_operation (code
, mode
, op
, op_mode
)
302 enum machine_mode mode
;
304 enum machine_mode op_mode
;
306 unsigned int width
= GET_MODE_BITSIZE (mode
);
308 /* The order of these tests is critical so that, for example, we don't
309 check the wrong mode (input vs. output) for a conversion operation,
310 such as FIX. At some point, this should be simplified. */
312 #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC)
314 if (code
== FLOAT
&& GET_MODE (op
) == VOIDmode
315 && (GET_CODE (op
) == CONST_DOUBLE
|| GET_CODE (op
) == CONST_INT
))
317 HOST_WIDE_INT hv
, lv
;
320 if (GET_CODE (op
) == CONST_INT
)
321 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
323 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
325 #ifdef REAL_ARITHMETIC
326 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
331 d
*= ((double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2))
332 * (double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2)));
333 d
+= (double) (unsigned HOST_WIDE_INT
) (~ lv
);
339 d
*= ((double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2))
340 * (double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2)));
341 d
+= (double) (unsigned HOST_WIDE_INT
) lv
;
343 #endif /* REAL_ARITHMETIC */
344 d
= real_value_truncate (mode
, d
);
345 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
347 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (op
) == VOIDmode
348 && (GET_CODE (op
) == CONST_DOUBLE
|| GET_CODE (op
) == CONST_INT
))
350 HOST_WIDE_INT hv
, lv
;
353 if (GET_CODE (op
) == CONST_INT
)
354 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
356 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
358 if (op_mode
== VOIDmode
)
360 /* We don't know how to interpret negative-looking numbers in
361 this case, so don't try to fold those. */
365 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
368 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
370 #ifdef REAL_ARITHMETIC
371 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
374 d
= (double) (unsigned HOST_WIDE_INT
) hv
;
375 d
*= ((double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2))
376 * (double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2)));
377 d
+= (double) (unsigned HOST_WIDE_INT
) lv
;
378 #endif /* REAL_ARITHMETIC */
379 d
= real_value_truncate (mode
, d
);
380 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
384 if (GET_CODE (op
) == CONST_INT
385 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
387 register HOST_WIDE_INT arg0
= INTVAL (op
);
388 register HOST_WIDE_INT val
;
401 val
= (arg0
>= 0 ? arg0
: - arg0
);
405 /* Don't use ffs here. Instead, get low order bit and then its
406 number. If arg0 is zero, this will return 0, as desired. */
407 arg0
&= GET_MODE_MASK (mode
);
408 val
= exact_log2 (arg0
& (- arg0
)) + 1;
416 if (op_mode
== VOIDmode
)
418 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
420 /* If we were really extending the mode,
421 we would have to distinguish between zero-extension
422 and sign-extension. */
423 if (width
!= GET_MODE_BITSIZE (op_mode
))
427 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
428 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
434 if (op_mode
== VOIDmode
)
436 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
438 /* If we were really extending the mode,
439 we would have to distinguish between zero-extension
440 and sign-extension. */
441 if (width
!= GET_MODE_BITSIZE (op_mode
))
445 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
448 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
450 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
451 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
466 val
= trunc_int_for_mode (val
, mode
);
468 return GEN_INT (val
);
471 /* We can do some operations on integer CONST_DOUBLEs. Also allow
472 for a DImode operation on a CONST_INT. */
473 else if (GET_MODE (op
) == VOIDmode
&& width
<= HOST_BITS_PER_INT
* 2
474 && (GET_CODE (op
) == CONST_DOUBLE
|| GET_CODE (op
) == CONST_INT
))
476 unsigned HOST_WIDE_INT l1
, lv
;
477 HOST_WIDE_INT h1
, hv
;
479 if (GET_CODE (op
) == CONST_DOUBLE
)
480 l1
= CONST_DOUBLE_LOW (op
), h1
= CONST_DOUBLE_HIGH (op
);
482 l1
= INTVAL (op
), h1
= HWI_SIGN_EXTEND (l1
);
492 neg_double (l1
, h1
, &lv
, &hv
);
497 neg_double (l1
, h1
, &lv
, &hv
);
505 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& (-h1
)) + 1;
507 lv
= exact_log2 (l1
& (-l1
)) + 1;
511 /* This is just a change-of-mode, so do nothing. */
516 if (op_mode
== VOIDmode
517 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
521 lv
= l1
& GET_MODE_MASK (op_mode
);
525 if (op_mode
== VOIDmode
526 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
530 lv
= l1
& GET_MODE_MASK (op_mode
);
531 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
532 && (lv
& ((HOST_WIDE_INT
) 1
533 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
534 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
536 hv
= HWI_SIGN_EXTEND (lv
);
547 return immed_double_const (lv
, hv
, mode
);
550 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
551 else if (GET_CODE (op
) == CONST_DOUBLE
552 && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
558 if (setjmp (handler
))
559 /* There used to be a warning here, but that is inadvisable.
560 People may want to cause traps, and the natural way
561 to do it should not get a warning. */
564 set_float_handler (handler
);
566 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
571 d
= REAL_VALUE_NEGATE (d
);
575 if (REAL_VALUE_NEGATIVE (d
))
576 d
= REAL_VALUE_NEGATE (d
);
580 d
= real_value_truncate (mode
, d
);
584 /* All this does is change the mode. */
588 d
= REAL_VALUE_RNDZINT (d
);
592 d
= REAL_VALUE_UNSIGNED_RNDZINT (d
);
602 x
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
603 set_float_handler (NULL
);
607 else if (GET_CODE (op
) == CONST_DOUBLE
608 && GET_MODE_CLASS (GET_MODE (op
)) == MODE_FLOAT
609 && GET_MODE_CLASS (mode
) == MODE_INT
610 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
616 if (setjmp (handler
))
619 set_float_handler (handler
);
621 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
626 val
= REAL_VALUE_FIX (d
);
630 val
= REAL_VALUE_UNSIGNED_FIX (d
);
637 set_float_handler (NULL
);
639 val
= trunc_int_for_mode (val
, mode
);
641 return GEN_INT (val
);
644 /* This was formerly used only for non-IEEE float.
645 eggert@twinsun.com says it is safe for IEEE also. */
648 enum rtx_code reversed
;
649 /* There are some simplifications we can do even if the operands
654 /* (not (not X)) == X. */
655 if (GET_CODE (op
) == NOT
)
658 /* (not (eq X Y)) == (ne X Y), etc. */
659 if (mode
== BImode
&& GET_RTX_CLASS (GET_CODE (op
)) == '<'
660 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
))
662 return gen_rtx_fmt_ee (reversed
,
663 op_mode
, XEXP (op
, 0), XEXP (op
, 1));
667 /* (neg (neg X)) == X. */
668 if (GET_CODE (op
) == NEG
)
673 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
674 becomes just the MINUS if its mode is MODE. This allows
675 folding switch statements on machines using casesi (such as
677 if (GET_CODE (op
) == TRUNCATE
678 && GET_MODE (XEXP (op
, 0)) == mode
679 && GET_CODE (XEXP (op
, 0)) == MINUS
680 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
681 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
684 #ifdef POINTERS_EXTEND_UNSIGNED
685 if (! POINTERS_EXTEND_UNSIGNED
686 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
688 || (GET_CODE (op
) == SUBREG
689 && GET_CODE (SUBREG_REG (op
)) == REG
690 && REG_POINTER (SUBREG_REG (op
))
691 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
692 return convert_memory_address (Pmode
, op
);
696 #ifdef POINTERS_EXTEND_UNSIGNED
698 if (POINTERS_EXTEND_UNSIGNED
699 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
701 || (GET_CODE (op
) == SUBREG
702 && GET_CODE (SUBREG_REG (op
)) == REG
703 && REG_POINTER (SUBREG_REG (op
))
704 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
705 return convert_memory_address (Pmode
, op
);
717 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
718 and OP1. Return 0 if no simplification is possible.
720 Don't use this for relational operations such as EQ or LT.
721 Use simplify_relational_operation instead. */
724 simplify_binary_operation (code
, mode
, op0
, op1
)
726 enum machine_mode mode
;
729 register HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
731 unsigned int width
= GET_MODE_BITSIZE (mode
);
734 /* Relational operations don't work here. We must know the mode
735 of the operands in order to do the comparison correctly.
736 Assuming a full word can give incorrect results.
737 Consider comparing 128 with -128 in QImode. */
739 if (GET_RTX_CLASS (code
) == '<')
742 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
743 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
744 && GET_CODE (op0
) == CONST_DOUBLE
&& GET_CODE (op1
) == CONST_DOUBLE
745 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
747 REAL_VALUE_TYPE f0
, f1
, value
;
750 if (setjmp (handler
))
753 set_float_handler (handler
);
755 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
756 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
757 f0
= real_value_truncate (mode
, f0
);
758 f1
= real_value_truncate (mode
, f1
);
760 #ifdef REAL_ARITHMETIC
761 #ifndef REAL_INFINITY
762 if (code
== DIV
&& REAL_VALUES_EQUAL (f1
, dconst0
))
765 REAL_ARITHMETIC (value
, rtx_to_tree_code (code
), f0
, f1
);
779 #ifndef REAL_INFINITY
786 value
= MIN (f0
, f1
);
789 value
= MAX (f0
, f1
);
796 value
= real_value_truncate (mode
, value
);
797 set_float_handler (NULL
);
798 return CONST_DOUBLE_FROM_REAL_VALUE (value
, mode
);
800 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
802 /* We can fold some multi-word operations. */
803 if (GET_MODE_CLASS (mode
) == MODE_INT
804 && width
== HOST_BITS_PER_WIDE_INT
* 2
805 && (GET_CODE (op0
) == CONST_DOUBLE
|| GET_CODE (op0
) == CONST_INT
)
806 && (GET_CODE (op1
) == CONST_DOUBLE
|| GET_CODE (op1
) == CONST_INT
))
808 unsigned HOST_WIDE_INT l1
, l2
, lv
;
809 HOST_WIDE_INT h1
, h2
, hv
;
811 if (GET_CODE (op0
) == CONST_DOUBLE
)
812 l1
= CONST_DOUBLE_LOW (op0
), h1
= CONST_DOUBLE_HIGH (op0
);
814 l1
= INTVAL (op0
), h1
= HWI_SIGN_EXTEND (l1
);
816 if (GET_CODE (op1
) == CONST_DOUBLE
)
817 l2
= CONST_DOUBLE_LOW (op1
), h2
= CONST_DOUBLE_HIGH (op1
);
819 l2
= INTVAL (op1
), h2
= HWI_SIGN_EXTEND (l2
);
824 /* A - B == A + (-B). */
825 neg_double (l2
, h2
, &lv
, &hv
);
828 /* .. fall through ... */
831 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
835 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
838 case DIV
: case MOD
: case UDIV
: case UMOD
:
839 /* We'd need to include tree.h to do this and it doesn't seem worth
844 lv
= l1
& l2
, hv
= h1
& h2
;
848 lv
= l1
| l2
, hv
= h1
| h2
;
852 lv
= l1
^ l2
, hv
= h1
^ h2
;
858 && ((unsigned HOST_WIDE_INT
) l1
859 < (unsigned HOST_WIDE_INT
) l2
)))
868 && ((unsigned HOST_WIDE_INT
) l1
869 > (unsigned HOST_WIDE_INT
) l2
)))
876 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
878 && ((unsigned HOST_WIDE_INT
) l1
879 < (unsigned HOST_WIDE_INT
) l2
)))
886 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
888 && ((unsigned HOST_WIDE_INT
) l1
889 > (unsigned HOST_WIDE_INT
) l2
)))
895 case LSHIFTRT
: case ASHIFTRT
:
897 case ROTATE
: case ROTATERT
:
898 #ifdef SHIFT_COUNT_TRUNCATED
899 if (SHIFT_COUNT_TRUNCATED
)
900 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
903 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
906 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
907 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
909 else if (code
== ASHIFT
)
910 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
911 else if (code
== ROTATE
)
912 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
913 else /* code == ROTATERT */
914 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
921 return immed_double_const (lv
, hv
, mode
);
924 if (GET_CODE (op0
) != CONST_INT
|| GET_CODE (op1
) != CONST_INT
925 || width
> HOST_BITS_PER_WIDE_INT
|| width
== 0)
927 /* Even if we can't compute a constant result,
928 there are some cases worth simplifying. */
933 /* In IEEE floating point, x+0 is not the same as x. Similarly
934 for the other optimizations below. */
935 if (TARGET_FLOAT_FORMAT
== IEEE_FLOAT_FORMAT
936 && FLOAT_MODE_P (mode
) && ! flag_unsafe_math_optimizations
)
939 if (op1
== CONST0_RTX (mode
))
942 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)) */
943 if (GET_CODE (op0
) == NEG
)
944 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
945 else if (GET_CODE (op1
) == NEG
)
946 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
949 if (INTEGRAL_MODE_P (mode
)
950 && GET_CODE (op0
) == NOT
951 && GET_CODE (op1
) == CONST_INT
952 && INTVAL (op1
) == 1)
953 return gen_rtx_NEG (mode
, XEXP (op0
, 0));
955 /* Handle both-operands-constant cases. We can only add
956 CONST_INTs to constants since the sum of relocatable symbols
957 can't be handled by most assemblers. Don't add CONST_INT
958 to CONST_INT since overflow won't be computed properly if wider
959 than HOST_BITS_PER_WIDE_INT. */
961 if (CONSTANT_P (op0
) && GET_MODE (op0
) != VOIDmode
962 && GET_CODE (op1
) == CONST_INT
)
963 return plus_constant (op0
, INTVAL (op1
));
964 else if (CONSTANT_P (op1
) && GET_MODE (op1
) != VOIDmode
965 && GET_CODE (op0
) == CONST_INT
)
966 return plus_constant (op1
, INTVAL (op0
));
968 /* See if this is something like X * C - X or vice versa or
969 if the multiplication is written as a shift. If so, we can
970 distribute and make a new multiply, shift, or maybe just
971 have X (if C is 2 in the example above). But don't make
972 real multiply if we didn't have one before. */
974 if (! FLOAT_MODE_P (mode
))
976 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
977 rtx lhs
= op0
, rhs
= op1
;
980 if (GET_CODE (lhs
) == NEG
)
981 coeff0
= -1, lhs
= XEXP (lhs
, 0);
982 else if (GET_CODE (lhs
) == MULT
983 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
985 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
988 else if (GET_CODE (lhs
) == ASHIFT
989 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
990 && INTVAL (XEXP (lhs
, 1)) >= 0
991 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
993 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
997 if (GET_CODE (rhs
) == NEG
)
998 coeff1
= -1, rhs
= XEXP (rhs
, 0);
999 else if (GET_CODE (rhs
) == MULT
1000 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1002 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1005 else if (GET_CODE (rhs
) == ASHIFT
1006 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1007 && INTVAL (XEXP (rhs
, 1)) >= 0
1008 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1010 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1011 rhs
= XEXP (rhs
, 0);
1014 if (rtx_equal_p (lhs
, rhs
))
1016 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1017 GEN_INT (coeff0
+ coeff1
));
1018 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
1022 /* If one of the operands is a PLUS or a MINUS, see if we can
1023 simplify this by the associative law.
1024 Don't use the associative law for floating point.
1025 The inaccuracy makes it nonassociative,
1026 and subtle programs can break if operations are associated. */
1028 if (INTEGRAL_MODE_P (mode
)
1029 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1030 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
)
1031 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
1037 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1038 using cc0, in which case we want to leave it as a COMPARE
1039 so we can distinguish it from a register-register-copy.
1041 In IEEE floating point, x-0 is not the same as x. */
1043 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1044 || ! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
1045 && op1
== CONST0_RTX (mode
))
1049 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1050 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
1051 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
1052 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
1054 rtx xop00
= XEXP (op0
, 0);
1055 rtx xop10
= XEXP (op1
, 0);
1058 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
1060 if (GET_CODE (xop00
) == REG
&& GET_CODE (xop10
) == REG
1061 && GET_MODE (xop00
) == GET_MODE (xop10
)
1062 && REGNO (xop00
) == REGNO (xop10
)
1063 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
1064 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
1071 /* None of these optimizations can be done for IEEE
1073 if (TARGET_FLOAT_FORMAT
== IEEE_FLOAT_FORMAT
1074 && FLOAT_MODE_P (mode
) && ! flag_unsafe_math_optimizations
)
1077 /* We can't assume x-x is 0 even with non-IEEE floating point,
1078 but since it is zero except in very strange circumstances, we
1079 will treat it as zero with -funsafe-math-optimizations. */
1080 if (rtx_equal_p (op0
, op1
)
1081 && ! side_effects_p (op0
)
1082 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
))
1083 return CONST0_RTX (mode
);
1085 /* Change subtraction from zero into negation. */
1086 if (op0
== CONST0_RTX (mode
))
1087 return gen_rtx_NEG (mode
, op1
);
1089 /* (-1 - a) is ~a. */
1090 if (op0
== constm1_rtx
)
1091 return gen_rtx_NOT (mode
, op1
);
1093 /* Subtracting 0 has no effect. */
1094 if (op1
== CONST0_RTX (mode
))
1097 /* See if this is something like X * C - X or vice versa or
1098 if the multiplication is written as a shift. If so, we can
1099 distribute and make a new multiply, shift, or maybe just
1100 have X (if C is 2 in the example above). But don't make
1101 real multiply if we didn't have one before. */
1103 if (! FLOAT_MODE_P (mode
))
1105 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1106 rtx lhs
= op0
, rhs
= op1
;
1109 if (GET_CODE (lhs
) == NEG
)
1110 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1111 else if (GET_CODE (lhs
) == MULT
1112 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1114 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1117 else if (GET_CODE (lhs
) == ASHIFT
1118 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1119 && INTVAL (XEXP (lhs
, 1)) >= 0
1120 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1122 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1123 lhs
= XEXP (lhs
, 0);
1126 if (GET_CODE (rhs
) == NEG
)
1127 coeff1
= - 1, rhs
= XEXP (rhs
, 0);
1128 else if (GET_CODE (rhs
) == MULT
1129 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1131 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1134 else if (GET_CODE (rhs
) == ASHIFT
1135 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1136 && INTVAL (XEXP (rhs
, 1)) >= 0
1137 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1139 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1140 rhs
= XEXP (rhs
, 0);
1143 if (rtx_equal_p (lhs
, rhs
))
1145 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1146 GEN_INT (coeff0
- coeff1
));
1147 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
1151 /* (a - (-b)) -> (a + b). */
1152 if (GET_CODE (op1
) == NEG
)
1153 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1155 /* If one of the operands is a PLUS or a MINUS, see if we can
1156 simplify this by the associative law.
1157 Don't use the associative law for floating point.
1158 The inaccuracy makes it nonassociative,
1159 and subtle programs can break if operations are associated. */
1161 if (INTEGRAL_MODE_P (mode
)
1162 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1163 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
)
1164 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
1167 /* Don't let a relocatable value get a negative coeff. */
1168 if (GET_CODE (op1
) == CONST_INT
&& GET_MODE (op0
) != VOIDmode
)
1169 return plus_constant (op0
, - INTVAL (op1
));
1171 /* (x - (x & y)) -> (x & ~y) */
1172 if (GET_CODE (op1
) == AND
)
1174 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1175 return simplify_gen_binary (AND
, mode
, op0
,
1176 gen_rtx_NOT (mode
, XEXP (op1
, 1)));
1177 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1178 return simplify_gen_binary (AND
, mode
, op0
,
1179 gen_rtx_NOT (mode
, XEXP (op1
, 0)));
1184 if (op1
== constm1_rtx
)
1186 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
1188 return tem
? tem
: gen_rtx_NEG (mode
, op0
);
1191 /* In IEEE floating point, x*0 is not always 0. */
1192 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1193 || ! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
1194 && op1
== CONST0_RTX (mode
)
1195 && ! side_effects_p (op0
))
1198 /* In IEEE floating point, x*1 is not equivalent to x for nans.
1199 However, ANSI says we can drop signals,
1200 so we can do this anyway. */
1201 if (op1
== CONST1_RTX (mode
))
1204 /* Convert multiply by constant power of two into shift unless
1205 we are still generating RTL. This test is a kludge. */
1206 if (GET_CODE (op1
) == CONST_INT
1207 && (val
= exact_log2 (INTVAL (op1
))) >= 0
1208 /* If the mode is larger than the host word size, and the
1209 uppermost bit is set, then this isn't a power of two due
1210 to implicit sign extension. */
1211 && (width
<= HOST_BITS_PER_WIDE_INT
1212 || val
!= HOST_BITS_PER_WIDE_INT
- 1)
1213 && ! rtx_equal_function_value_matters
)
1214 return gen_rtx_ASHIFT (mode
, op0
, GEN_INT (val
));
1216 if (GET_CODE (op1
) == CONST_DOUBLE
1217 && GET_MODE_CLASS (GET_MODE (op1
)) == MODE_FLOAT
)
1221 int op1is2
, op1ism1
;
1223 if (setjmp (handler
))
1226 set_float_handler (handler
);
1227 REAL_VALUE_FROM_CONST_DOUBLE (d
, op1
);
1228 op1is2
= REAL_VALUES_EQUAL (d
, dconst2
);
1229 op1ism1
= REAL_VALUES_EQUAL (d
, dconstm1
);
1230 set_float_handler (NULL
);
1232 /* x*2 is x+x and x*(-1) is -x */
1233 if (op1is2
&& GET_MODE (op0
) == mode
)
1234 return gen_rtx_PLUS (mode
, op0
, copy_rtx (op0
));
1236 else if (op1ism1
&& GET_MODE (op0
) == mode
)
1237 return gen_rtx_NEG (mode
, op0
);
1242 if (op1
== const0_rtx
)
1244 if (GET_CODE (op1
) == CONST_INT
1245 && (INTVAL (op1
) & GET_MODE_MASK (mode
)) == GET_MODE_MASK (mode
))
1247 if (rtx_equal_p (op0
, op1
) && ! side_effects_p (op0
))
1249 /* A | (~A) -> -1 */
1250 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1251 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1252 && ! side_effects_p (op0
)
1253 && GET_MODE_CLASS (mode
) != MODE_CC
)
1258 if (op1
== const0_rtx
)
1260 if (GET_CODE (op1
) == CONST_INT
1261 && (INTVAL (op1
) & GET_MODE_MASK (mode
)) == GET_MODE_MASK (mode
))
1262 return gen_rtx_NOT (mode
, op0
);
1263 if (op0
== op1
&& ! side_effects_p (op0
)
1264 && GET_MODE_CLASS (mode
) != MODE_CC
)
1269 if (op1
== const0_rtx
&& ! side_effects_p (op0
))
1271 if (GET_CODE (op1
) == CONST_INT
1272 && (INTVAL (op1
) & GET_MODE_MASK (mode
)) == GET_MODE_MASK (mode
))
1274 if (op0
== op1
&& ! side_effects_p (op0
)
1275 && GET_MODE_CLASS (mode
) != MODE_CC
)
1278 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1279 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1280 && ! side_effects_p (op0
)
1281 && GET_MODE_CLASS (mode
) != MODE_CC
)
1286 /* Convert divide by power of two into shift (divide by 1 handled
1288 if (GET_CODE (op1
) == CONST_INT
1289 && (arg1
= exact_log2 (INTVAL (op1
))) > 0)
1290 return gen_rtx_LSHIFTRT (mode
, op0
, GEN_INT (arg1
));
1292 /* ... fall through ... */
1295 if (op1
== CONST1_RTX (mode
))
1298 /* In IEEE floating point, 0/x is not always 0. */
1299 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1300 || ! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
1301 && op0
== CONST0_RTX (mode
)
1302 && ! side_effects_p (op1
))
1305 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1306 /* Change division by a constant into multiplication. Only do
1307 this with -funsafe-math-optimizations. */
1308 else if (GET_CODE (op1
) == CONST_DOUBLE
1309 && GET_MODE_CLASS (GET_MODE (op1
)) == MODE_FLOAT
1310 && op1
!= CONST0_RTX (mode
)
1311 && flag_unsafe_math_optimizations
)
1314 REAL_VALUE_FROM_CONST_DOUBLE (d
, op1
);
1316 if (! REAL_VALUES_EQUAL (d
, dconst0
))
1318 #if defined (REAL_ARITHMETIC)
1319 REAL_ARITHMETIC (d
, rtx_to_tree_code (DIV
), dconst1
, d
);
1320 return gen_rtx_MULT (mode
, op0
,
1321 CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
));
1324 gen_rtx_MULT (mode
, op0
,
1325 CONST_DOUBLE_FROM_REAL_VALUE (1./d
, mode
));
1333 /* Handle modulus by power of two (mod with 1 handled below). */
1334 if (GET_CODE (op1
) == CONST_INT
1335 && exact_log2 (INTVAL (op1
)) > 0)
1336 return gen_rtx_AND (mode
, op0
, GEN_INT (INTVAL (op1
) - 1));
1338 /* ... fall through ... */
1341 if ((op0
== const0_rtx
|| op1
== const1_rtx
)
1342 && ! side_effects_p (op0
) && ! side_effects_p (op1
))
1348 /* Rotating ~0 always results in ~0. */
1349 if (GET_CODE (op0
) == CONST_INT
&& width
<= HOST_BITS_PER_WIDE_INT
1350 && (unsigned HOST_WIDE_INT
) INTVAL (op0
) == GET_MODE_MASK (mode
)
1351 && ! side_effects_p (op1
))
1354 /* ... fall through ... */
1359 if (op1
== const0_rtx
)
1361 if (op0
== const0_rtx
&& ! side_effects_p (op1
))
1366 if (width
<= HOST_BITS_PER_WIDE_INT
&& GET_CODE (op1
) == CONST_INT
1367 && INTVAL (op1
) == (HOST_WIDE_INT
) 1 << (width
-1)
1368 && ! side_effects_p (op0
))
1370 else if (rtx_equal_p (op0
, op1
) && ! side_effects_p (op0
))
1375 if (width
<= HOST_BITS_PER_WIDE_INT
&& GET_CODE (op1
) == CONST_INT
1376 && ((unsigned HOST_WIDE_INT
) INTVAL (op1
)
1377 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
1378 && ! side_effects_p (op0
))
1380 else if (rtx_equal_p (op0
, op1
) && ! side_effects_p (op0
))
1385 if (op1
== const0_rtx
&& ! side_effects_p (op0
))
1387 else if (rtx_equal_p (op0
, op1
) && ! side_effects_p (op0
))
1392 if (op1
== constm1_rtx
&& ! side_effects_p (op0
))
1394 else if (rtx_equal_p (op0
, op1
) && ! side_effects_p (op0
))
1405 /* Get the integer argument values in two forms:
1406 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1408 arg0
= INTVAL (op0
);
1409 arg1
= INTVAL (op1
);
1411 if (width
< HOST_BITS_PER_WIDE_INT
)
1413 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1414 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1417 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1418 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
1421 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1422 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
1430 /* Compute the value of the arithmetic. */
1435 val
= arg0s
+ arg1s
;
1439 val
= arg0s
- arg1s
;
1443 val
= arg0s
* arg1s
;
1448 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
1451 val
= arg0s
/ arg1s
;
1456 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
1459 val
= arg0s
% arg1s
;
1464 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
1467 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
1472 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
1475 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
1491 /* If shift count is undefined, don't fold it; let the machine do
1492 what it wants. But truncate it if the machine will do that. */
1496 #ifdef SHIFT_COUNT_TRUNCATED
1497 if (SHIFT_COUNT_TRUNCATED
)
1501 val
= ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
;
1508 #ifdef SHIFT_COUNT_TRUNCATED
1509 if (SHIFT_COUNT_TRUNCATED
)
1513 val
= ((unsigned HOST_WIDE_INT
) arg0
) << arg1
;
1520 #ifdef SHIFT_COUNT_TRUNCATED
1521 if (SHIFT_COUNT_TRUNCATED
)
1525 val
= arg0s
>> arg1
;
1527 /* Bootstrap compiler may not have sign extended the right shift.
1528 Manually extend the sign to insure bootstrap cc matches gcc. */
1529 if (arg0s
< 0 && arg1
> 0)
1530 val
|= ((HOST_WIDE_INT
) -1) << (HOST_BITS_PER_WIDE_INT
- arg1
);
1539 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
1540 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
1548 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
1549 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
1553 /* Do nothing here. */
1557 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
1561 val
= ((unsigned HOST_WIDE_INT
) arg0
1562 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
1566 val
= arg0s
> arg1s
? arg0s
: arg1s
;
1570 val
= ((unsigned HOST_WIDE_INT
) arg0
1571 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
1578 val
= trunc_int_for_mode (val
, mode
);
1580 return GEN_INT (val
);
1583 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1586 Rather than test for specific case, we do this by a brute-force method
1587 and do all possible simplifications until no more changes occur. Then
1588 we rebuild the operation. */
1591 simplify_plus_minus (code
, mode
, op0
, op1
)
1593 enum machine_mode mode
;
1599 int n_ops
= 2, input_ops
= 2, input_consts
= 0, n_consts
= 0;
1600 int first
= 1, negate
= 0, changed
;
1603 memset ((char *) ops
, 0, sizeof ops
);
1605 /* Set up the two operands and then expand them until nothing has been
1606 changed. If we run out of room in our array, give up; this should
1607 almost never happen. */
1609 ops
[0] = op0
, ops
[1] = op1
, negs
[0] = 0, negs
[1] = (code
== MINUS
);
1616 for (i
= 0; i
< n_ops
; i
++)
1617 switch (GET_CODE (ops
[i
]))
1624 ops
[n_ops
] = XEXP (ops
[i
], 1);
1625 negs
[n_ops
++] = GET_CODE (ops
[i
]) == MINUS
? !negs
[i
] : negs
[i
];
1626 ops
[i
] = XEXP (ops
[i
], 0);
1632 ops
[i
] = XEXP (ops
[i
], 0);
1633 negs
[i
] = ! negs
[i
];
1638 ops
[i
] = XEXP (ops
[i
], 0);
1644 /* ~a -> (-a - 1) */
1647 ops
[n_ops
] = constm1_rtx
;
1648 negs
[n_ops
++] = negs
[i
];
1649 ops
[i
] = XEXP (ops
[i
], 0);
1650 negs
[i
] = ! negs
[i
];
1657 ops
[i
] = GEN_INT (- INTVAL (ops
[i
])), negs
[i
] = 0, changed
= 1;
1665 /* If we only have two operands, we can't do anything. */
1669 /* Now simplify each pair of operands until nothing changes. The first
1670 time through just simplify constants against each other. */
1677 for (i
= 0; i
< n_ops
- 1; i
++)
1678 for (j
= i
+ 1; j
< n_ops
; j
++)
1679 if (ops
[i
] != 0 && ops
[j
] != 0
1680 && (! first
|| (CONSTANT_P (ops
[i
]) && CONSTANT_P (ops
[j
]))))
1682 rtx lhs
= ops
[i
], rhs
= ops
[j
];
1683 enum rtx_code ncode
= PLUS
;
1685 if (negs
[i
] && ! negs
[j
])
1686 lhs
= ops
[j
], rhs
= ops
[i
], ncode
= MINUS
;
1687 else if (! negs
[i
] && negs
[j
])
1690 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
1693 ops
[i
] = tem
, ops
[j
] = 0;
1694 negs
[i
] = negs
[i
] && negs
[j
];
1695 if (GET_CODE (tem
) == NEG
)
1696 ops
[i
] = XEXP (tem
, 0), negs
[i
] = ! negs
[i
];
1698 if (GET_CODE (ops
[i
]) == CONST_INT
&& negs
[i
])
1699 ops
[i
] = GEN_INT (- INTVAL (ops
[i
])), negs
[i
] = 0;
1707 /* Pack all the operands to the lower-numbered entries and give up if
1708 we didn't reduce the number of operands we had. Make sure we
1709 count a CONST as two operands. If we have the same number of
1710 operands, but have made more CONSTs than we had, this is also
1711 an improvement, so accept it. */
1713 for (i
= 0, j
= 0; j
< n_ops
; j
++)
1716 ops
[i
] = ops
[j
], negs
[i
++] = negs
[j
];
1717 if (GET_CODE (ops
[j
]) == CONST
)
1721 if (i
+ n_consts
> input_ops
1722 || (i
+ n_consts
== input_ops
&& n_consts
<= input_consts
))
1727 /* If we have a CONST_INT, put it last. */
1728 for (i
= 0; i
< n_ops
- 1; i
++)
1729 if (GET_CODE (ops
[i
]) == CONST_INT
)
1731 tem
= ops
[n_ops
- 1], ops
[n_ops
- 1] = ops
[i
] , ops
[i
] = tem
;
1732 j
= negs
[n_ops
- 1], negs
[n_ops
- 1] = negs
[i
], negs
[i
] = j
;
1735 /* Put a non-negated operand first. If there aren't any, make all
1736 operands positive and negate the whole thing later. */
1737 for (i
= 0; i
< n_ops
&& negs
[i
]; i
++)
1742 for (i
= 0; i
< n_ops
; i
++)
1748 tem
= ops
[0], ops
[0] = ops
[i
], ops
[i
] = tem
;
1749 j
= negs
[0], negs
[0] = negs
[i
], negs
[i
] = j
;
1752 /* Now make the result by performing the requested operations. */
1754 for (i
= 1; i
< n_ops
; i
++)
1755 result
= simplify_gen_binary (negs
[i
] ? MINUS
: PLUS
, mode
, result
, ops
[i
]);
1757 return negate
? gen_rtx_NEG (mode
, result
) : result
;
1762 rtx op0
, op1
; /* Input */
1763 int equal
, op0lt
, op1lt
; /* Output */
1768 check_fold_consts (data
)
1771 struct cfc_args
*args
= (struct cfc_args
*) data
;
1772 REAL_VALUE_TYPE d0
, d1
;
1774 /* We may possibly raise an exception while reading the value. */
1775 args
->unordered
= 1;
1776 REAL_VALUE_FROM_CONST_DOUBLE (d0
, args
->op0
);
1777 REAL_VALUE_FROM_CONST_DOUBLE (d1
, args
->op1
);
1779 /* Comparisons of Inf versus Inf are ordered. */
1780 if (REAL_VALUE_ISNAN (d0
)
1781 || REAL_VALUE_ISNAN (d1
))
1783 args
->equal
= REAL_VALUES_EQUAL (d0
, d1
);
1784 args
->op0lt
= REAL_VALUES_LESS (d0
, d1
);
1785 args
->op1lt
= REAL_VALUES_LESS (d1
, d0
);
1786 args
->unordered
= 0;
1789 /* Like simplify_binary_operation except used for relational operators.
1790 MODE is the mode of the operands, not that of the result. If MODE
1791 is VOIDmode, both operands must also be VOIDmode and we compare the
1792 operands in "infinite precision".
1794 If no simplification is possible, this function returns zero. Otherwise,
1795 it returns either const_true_rtx or const0_rtx. */
1798 simplify_relational_operation (code
, mode
, op0
, op1
)
1800 enum machine_mode mode
;
1803 int equal
, op0lt
, op0ltu
, op1lt
, op1ltu
;
1806 if (mode
== VOIDmode
1807 && (GET_MODE (op0
) != VOIDmode
1808 || GET_MODE (op1
) != VOIDmode
))
1811 /* If op0 is a compare, extract the comparison arguments from it. */
1812 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
1813 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
1815 /* We can't simplify MODE_CC values since we don't know what the
1816 actual comparison is. */
1817 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
1824 /* Make sure the constant is second. */
1825 if ((CONSTANT_P (op0
) && ! CONSTANT_P (op1
))
1826 || (GET_CODE (op0
) == CONST_INT
&& GET_CODE (op1
) != CONST_INT
))
1828 tem
= op0
, op0
= op1
, op1
= tem
;
1829 code
= swap_condition (code
);
1832 /* For integer comparisons of A and B maybe we can simplify A - B and can
1833 then simplify a comparison of that with zero. If A and B are both either
1834 a register or a CONST_INT, this can't help; testing for these cases will
1835 prevent infinite recursion here and speed things up.
1837 If CODE is an unsigned comparison, then we can never do this optimization,
1838 because it gives an incorrect result if the subtraction wraps around zero.
1839 ANSI C defines unsigned operations such that they never overflow, and
1840 thus such cases can not be ignored. */
1842 if (INTEGRAL_MODE_P (mode
) && op1
!= const0_rtx
1843 && ! ((GET_CODE (op0
) == REG
|| GET_CODE (op0
) == CONST_INT
)
1844 && (GET_CODE (op1
) == REG
|| GET_CODE (op1
) == CONST_INT
))
1845 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
1846 && code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
)
1847 return simplify_relational_operation (signed_condition (code
),
1848 mode
, tem
, const0_rtx
);
1850 if (flag_unsafe_math_optimizations
&& code
== ORDERED
)
1851 return const_true_rtx
;
1853 if (flag_unsafe_math_optimizations
&& code
== UNORDERED
)
1856 /* For non-IEEE floating-point, if the two operands are equal, we know the
1858 if (rtx_equal_p (op0
, op1
)
1859 && (TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1860 || ! FLOAT_MODE_P (GET_MODE (op0
))
1861 || flag_unsafe_math_optimizations
))
1862 equal
= 1, op0lt
= 0, op0ltu
= 0, op1lt
= 0, op1ltu
= 0;
1864 /* If the operands are floating-point constants, see if we can fold
1866 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1867 else if (GET_CODE (op0
) == CONST_DOUBLE
&& GET_CODE (op1
) == CONST_DOUBLE
1868 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_FLOAT
)
1870 struct cfc_args args
;
1872 /* Setup input for check_fold_consts() */
1877 if (!do_float_handler (check_fold_consts
, (PTR
) &args
))
1890 return const_true_rtx
;
1903 /* Receive output from check_fold_consts() */
1905 op0lt
= op0ltu
= args
.op0lt
;
1906 op1lt
= op1ltu
= args
.op1lt
;
1908 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1910 /* Otherwise, see if the operands are both integers. */
1911 else if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
1912 && (GET_CODE (op0
) == CONST_DOUBLE
|| GET_CODE (op0
) == CONST_INT
)
1913 && (GET_CODE (op1
) == CONST_DOUBLE
|| GET_CODE (op1
) == CONST_INT
))
1915 int width
= GET_MODE_BITSIZE (mode
);
1916 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
1917 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
1919 /* Get the two words comprising each integer constant. */
1920 if (GET_CODE (op0
) == CONST_DOUBLE
)
1922 l0u
= l0s
= CONST_DOUBLE_LOW (op0
);
1923 h0u
= h0s
= CONST_DOUBLE_HIGH (op0
);
1927 l0u
= l0s
= INTVAL (op0
);
1928 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
1931 if (GET_CODE (op1
) == CONST_DOUBLE
)
1933 l1u
= l1s
= CONST_DOUBLE_LOW (op1
);
1934 h1u
= h1s
= CONST_DOUBLE_HIGH (op1
);
1938 l1u
= l1s
= INTVAL (op1
);
1939 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
1942 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
1943 we have to sign or zero-extend the values. */
1944 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
1946 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1947 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1949 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1950 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
1952 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1953 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
1955 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
1956 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
1958 equal
= (h0u
== h1u
&& l0u
== l1u
);
1959 op0lt
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
));
1960 op1lt
= (h1s
< h0s
|| (h1s
== h0s
&& l1u
< l0u
));
1961 op0ltu
= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
));
1962 op1ltu
= (h1u
< h0u
|| (h1u
== h0u
&& l1u
< l0u
));
1965 /* Otherwise, there are some code-specific tests we can make. */
1971 /* References to the frame plus a constant or labels cannot
1972 be zero, but a SYMBOL_REF can due to #pragma weak. */
1973 if (((NONZERO_BASE_PLUS_P (op0
) && op1
== const0_rtx
)
1974 || GET_CODE (op0
) == LABEL_REF
)
1975 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1976 /* On some machines, the ap reg can be 0 sometimes. */
1977 && op0
!= arg_pointer_rtx
1984 if (((NONZERO_BASE_PLUS_P (op0
) && op1
== const0_rtx
)
1985 || GET_CODE (op0
) == LABEL_REF
)
1986 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1987 && op0
!= arg_pointer_rtx
1990 return const_true_rtx
;
1994 /* Unsigned values are never negative. */
1995 if (op1
== const0_rtx
)
1996 return const_true_rtx
;
2000 if (op1
== const0_rtx
)
2005 /* Unsigned values are never greater than the largest
2007 if (GET_CODE (op1
) == CONST_INT
2008 && (unsigned HOST_WIDE_INT
) INTVAL (op1
) == GET_MODE_MASK (mode
)
2009 && INTEGRAL_MODE_P (mode
))
2010 return const_true_rtx
;
2014 if (GET_CODE (op1
) == CONST_INT
2015 && (unsigned HOST_WIDE_INT
) INTVAL (op1
) == GET_MODE_MASK (mode
)
2016 && INTEGRAL_MODE_P (mode
))
2027 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2033 return equal
? const_true_rtx
: const0_rtx
;
2036 return ! equal
? const_true_rtx
: const0_rtx
;
2039 return op0lt
? const_true_rtx
: const0_rtx
;
2042 return op1lt
? const_true_rtx
: const0_rtx
;
2044 return op0ltu
? const_true_rtx
: const0_rtx
;
2046 return op1ltu
? const_true_rtx
: const0_rtx
;
2049 return equal
|| op0lt
? const_true_rtx
: const0_rtx
;
2052 return equal
|| op1lt
? const_true_rtx
: const0_rtx
;
2054 return equal
|| op0ltu
? const_true_rtx
: const0_rtx
;
2056 return equal
|| op1ltu
? const_true_rtx
: const0_rtx
;
2058 return const_true_rtx
;
2066 /* Simplify CODE, an operation with result mode MODE and three operands,
2067 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2068 a constant. Return 0 if no simplifications is possible. */
2071 simplify_ternary_operation (code
, mode
, op0_mode
, op0
, op1
, op2
)
2073 enum machine_mode mode
, op0_mode
;
2076 unsigned int width
= GET_MODE_BITSIZE (mode
);
2078 /* VOIDmode means "infinite" precision. */
2080 width
= HOST_BITS_PER_WIDE_INT
;
2086 if (GET_CODE (op0
) == CONST_INT
2087 && GET_CODE (op1
) == CONST_INT
2088 && GET_CODE (op2
) == CONST_INT
2089 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
2090 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
2092 /* Extracting a bit-field from a constant */
2093 HOST_WIDE_INT val
= INTVAL (op0
);
2095 if (BITS_BIG_ENDIAN
)
2096 val
>>= (GET_MODE_BITSIZE (op0_mode
)
2097 - INTVAL (op2
) - INTVAL (op1
));
2099 val
>>= INTVAL (op2
);
2101 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
2103 /* First zero-extend. */
2104 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
2105 /* If desired, propagate sign bit. */
2106 if (code
== SIGN_EXTRACT
2107 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
2108 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
2111 /* Clear the bits that don't belong in our mode,
2112 unless they and our sign bit are all one.
2113 So we get either a reasonable negative value or a reasonable
2114 unsigned value for this mode. */
2115 if (width
< HOST_BITS_PER_WIDE_INT
2116 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
2117 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
2118 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2120 return GEN_INT (val
);
2125 if (GET_CODE (op0
) == CONST_INT
)
2126 return op0
!= const0_rtx
? op1
: op2
;
2128 /* Convert a == b ? b : a to "a". */
2129 if (GET_CODE (op0
) == NE
&& ! side_effects_p (op0
)
2130 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
2131 && rtx_equal_p (XEXP (op0
, 0), op1
)
2132 && rtx_equal_p (XEXP (op0
, 1), op2
))
2134 else if (GET_CODE (op0
) == EQ
&& ! side_effects_p (op0
)
2135 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
2136 && rtx_equal_p (XEXP (op0
, 1), op1
)
2137 && rtx_equal_p (XEXP (op0
, 0), op2
))
2139 else if (GET_RTX_CLASS (GET_CODE (op0
)) == '<' && ! side_effects_p (op0
))
2141 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
2142 ? GET_MODE (XEXP (op0
, 1))
2143 : GET_MODE (XEXP (op0
, 0)));
2145 if (cmp_mode
== VOIDmode
)
2146 cmp_mode
= op0_mode
;
2147 temp
= simplify_relational_operation (GET_CODE (op0
), cmp_mode
,
2148 XEXP (op0
, 0), XEXP (op0
, 1));
2150 /* See if any simplifications were possible. */
2151 if (temp
== const0_rtx
)
2153 else if (temp
== const1_rtx
)
2158 /* Look for happy constants in op1 and op2. */
2159 if (GET_CODE (op1
) == CONST_INT
&& GET_CODE (op2
) == CONST_INT
)
2161 HOST_WIDE_INT t
= INTVAL (op1
);
2162 HOST_WIDE_INT f
= INTVAL (op2
);
2164 if (t
== STORE_FLAG_VALUE
&& f
== 0)
2165 code
= GET_CODE (op0
);
2166 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
2169 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
2177 return gen_rtx_fmt_ee (code
, mode
, XEXP (op0
, 0), XEXP (op0
, 1));
2189 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2190 Return 0 if no simplifications is possible. */
2192 simplify_subreg (outermode
, op
, innermode
, byte
)
2195 enum machine_mode outermode
, innermode
;
2197 /* Little bit of sanity checking. */
2198 if (innermode
== VOIDmode
|| outermode
== VOIDmode
2199 || innermode
== BLKmode
|| outermode
== BLKmode
)
2202 if (GET_MODE (op
) != innermode
2203 && GET_MODE (op
) != VOIDmode
)
2206 if (byte
% GET_MODE_SIZE (outermode
)
2207 || byte
>= GET_MODE_SIZE (innermode
))
2210 /* Attempt to simplify constant to non-SUBREG expression. */
2211 if (CONSTANT_P (op
))
2214 unsigned HOST_WIDE_INT val
;
2216 /* ??? This code is partly redundant with code bellow, but can handle
2217 the subregs of floats and similar corner cases.
2218 Later it we should move all simplification code here and rewrite
2219 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2220 using SIMPLIFY_SUBREG. */
2221 if (subreg_lowpart_parts_p (outermode
, innermode
, byte
))
2223 rtx
new = gen_lowpart_if_possible (outermode
, op
);
2228 /* Similar comment as above apply here. */
2229 if (GET_MODE_SIZE (outermode
) == UNITS_PER_WORD
2230 && GET_MODE_SIZE (innermode
) > UNITS_PER_WORD
2231 && GET_MODE_CLASS (outermode
) == MODE_INT
)
2233 rtx
new = operand_subword (op
,
2234 (byte
/ UNITS_PER_WORD
),
2240 offset
= byte
* BITS_PER_UNIT
;
2241 switch (GET_CODE (op
))
2244 if (GET_MODE (op
) != VOIDmode
)
2247 /* We can't handle this case yet. */
2248 if (GET_MODE_BITSIZE (outermode
) >= HOST_BITS_PER_WIDE_INT
)
2251 part
= offset
>= HOST_BITS_PER_WIDE_INT
;
2252 if ((BITS_PER_WORD
> HOST_BITS_PER_WIDE_INT
2253 && BYTES_BIG_ENDIAN
)
2254 || (BITS_PER_WORD
<= HOST_BITS_PER_WIDE_INT
2255 && WORDS_BIG_ENDIAN
))
2257 val
= part
? CONST_DOUBLE_HIGH (op
) : CONST_DOUBLE_LOW (op
);
2258 offset
%= HOST_BITS_PER_WIDE_INT
;
2260 /* We've already picked the word we want from a double, so
2261 pretend this is actually an integer. */
2262 innermode
= mode_for_size (HOST_BITS_PER_WIDE_INT
, MODE_INT
, 0);
2266 if (GET_CODE (op
) == CONST_INT
)
2269 /* We don't handle synthetizing of non-integral constants yet. */
2270 if (GET_MODE_CLASS (outermode
) != MODE_INT
)
2273 if (BYTES_BIG_ENDIAN
|| WORDS_BIG_ENDIAN
)
2275 if (WORDS_BIG_ENDIAN
)
2276 offset
= (GET_MODE_BITSIZE (innermode
)
2277 - GET_MODE_BITSIZE (outermode
) - offset
);
2278 if (BYTES_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
2279 && GET_MODE_SIZE (outermode
) < UNITS_PER_WORD
)
2280 offset
= (offset
+ BITS_PER_WORD
- GET_MODE_BITSIZE (outermode
)
2281 - 2 * (offset
% BITS_PER_WORD
));
2284 if (offset
>= HOST_BITS_PER_WIDE_INT
)
2285 return ((HOST_WIDE_INT
) val
< 0) ? constm1_rtx
: const0_rtx
;
2289 if (GET_MODE_BITSIZE (outermode
) < HOST_BITS_PER_WIDE_INT
)
2290 val
= trunc_int_for_mode (val
, outermode
);
2291 return GEN_INT (val
);
2298 /* Changing mode twice with SUBREG => just change it once,
2299 or not at all if changing back op starting mode. */
2300 if (GET_CODE (op
) == SUBREG
)
2302 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
2303 unsigned int final_offset
= byte
+ SUBREG_BYTE (op
);
2306 if (outermode
== innermostmode
2307 && byte
== 0 && SUBREG_BYTE (op
) == 0)
2308 return SUBREG_REG (op
);
2310 if ((WORDS_BIG_ENDIAN
|| BYTES_BIG_ENDIAN
)
2311 && GET_MODE_SIZE (innermode
) > GET_MODE_SIZE (outermode
)
2312 && GET_MODE_SIZE (innermode
) > GET_MODE_SIZE (innermostmode
))
2314 /* Inner SUBREG is paradoxical, outer is not. On big endian
2315 we have to special case this. */
2316 if (SUBREG_BYTE (op
))
2317 abort(); /* Can a paradoxical subreg have nonzero offset? */
2318 if (WORDS_BIG_ENDIAN
&& BYTES_BIG_ENDIAN
)
2319 final_offset
= (byte
- GET_MODE_SIZE (innermode
)
2320 + GET_MODE_SIZE (innermostmode
));
2321 else if (WORDS_BIG_ENDIAN
)
2322 final_offset
= ((final_offset
% UNITS_PER_WORD
)
2323 + ((byte
- GET_MODE_SIZE (innermode
)
2324 + GET_MODE_SIZE (innermostmode
))
2325 * UNITS_PER_WORD
) / UNITS_PER_WORD
);
2327 final_offset
= (((final_offset
* UNITS_PER_WORD
)
2329 + ((byte
- GET_MODE_SIZE (innermode
)
2330 + GET_MODE_SIZE (innermostmode
))
2334 /* Recurse for futher possible simplifications. */
2335 new = simplify_subreg (outermode
, op
, GET_MODE (op
),
2339 return gen_rtx_SUBREG (outermode
, op
, final_offset
);
2342 /* SUBREG of a hard register => just change the register number
2343 and/or mode. If the hard register is not valid in that mode,
2344 suppress this simplification. If the hard register is the stack,
2345 frame, or argument pointer, leave this as a SUBREG. */
2347 if (REG_P (op
) == REG
2348 && REGNO (op
) < FIRST_PSEUDO_REGISTER
2349 && REGNO (op
) != FRAME_POINTER_REGNUM
2350 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2351 && REGNO (op
) != HARD_FRAME_POINTER_REGNUM
2353 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2354 && REGNO (op
) != ARG_POINTER_REGNUM
2356 && REGNO (op
) != STACK_POINTER_REGNUM
)
2358 int final_regno
= subreg_hard_regno (gen_rtx_SUBREG (outermode
, op
, byte
),
2361 if (HARD_REGNO_MODE_OK (final_regno
, outermode
))
2362 return gen_rtx_REG (outermode
, final_regno
);
2365 /* If we have a SUBREG of a register that we are replacing and we are
2366 replacing it with a MEM, make a new MEM and try replacing the
2367 SUBREG with it. Don't do this if the MEM has a mode-dependent address
2368 or if we would be widening it. */
2370 if (GET_CODE (op
) == MEM
2371 && ! mode_dependent_address_p (XEXP (op
, 0))
2372 && ! MEM_VOLATILE_P (op
)
2373 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
2377 new = gen_rtx_MEM (outermode
, plus_constant (XEXP (op
, 0), byte
));
2378 MEM_COPY_ATTRIBUTES (new, op
);
2383 /* Simplify X, an rtx expression.
2385 Return the simplified expression or NULL if no simplifications
2388 This is the preferred entry point into the simplification routines;
2389 however, we still allow passes to call the more specific routines.
2391 Right now GCC has three (yes, three) major bodies of RTL simplficiation
2392 code that need to be unified.
2394 1. fold_rtx in cse.c. This code uses various CSE specific
2395 information to aid in RTL simplification.
2397 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
2398 it uses combine specific information to aid in RTL
2401 3. The routines in this file.
2404 Long term we want to only have one body of simplification code; to
2405 get to that state I recommend the following steps:
2407 1. Pour over fold_rtx & simplify_rtx and move any simplifications
2408 which are not pass dependent state into these routines.
2410 2. As code is moved by #1, change fold_rtx & simplify_rtx to
2411 use this routine whenever possible.
2413 3. Allow for pass dependent state to be provided to these
2414 routines and add simplifications based on the pass dependent
2415 state. Remove code from cse.c & combine.c that becomes
2418 It will take time, but ultimately the compiler will be easier to
2419 maintain and improve. It's totally silly that when we add a
2420 simplification that it needs to be added to 4 places (3 for RTL
2421 simplification and 1 for tree simplification. */
2427 enum rtx_code code
= GET_CODE (x
);
2428 enum machine_mode mode
= GET_MODE (x
);
2430 switch (GET_RTX_CLASS (code
))
2433 return simplify_unary_operation (code
, mode
,
2434 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
2437 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
2441 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
2442 XEXP (x
, 0), XEXP (x
, 1),
2446 return simplify_relational_operation (code
,
2447 ((GET_MODE (XEXP (x
, 0))
2449 ? GET_MODE (XEXP (x
, 0))
2450 : GET_MODE (XEXP (x
, 1))),
2451 XEXP (x
, 0), XEXP (x
, 1));