1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
29 #include "hard-reg-set.h"
32 #include "insn-config.h"
40 /* Simplification and canonicalization of RTL. */
42 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
43 virtual regs here because the simplify_*_operation routines are called
44 by integrate.c, which is called before virtual register instantiation.
46 ?!? FIXED_BASE_PLUS_P and NONZERO_BASE_PLUS_P need to move into
47 a header file so that their definitions can be shared with the
48 simplification routines in simplify-rtx.c. Until then, do not
49 change these macros without also changing the copy in simplify-rtx.c. */
51 #define FIXED_BASE_PLUS_P(X) \
52 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
53 || ((X) == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])\
54 || (X) == virtual_stack_vars_rtx \
55 || (X) == virtual_incoming_args_rtx \
56 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
57 && (XEXP (X, 0) == frame_pointer_rtx \
58 || XEXP (X, 0) == hard_frame_pointer_rtx \
59 || ((X) == arg_pointer_rtx \
60 && fixed_regs[ARG_POINTER_REGNUM]) \
61 || XEXP (X, 0) == virtual_stack_vars_rtx \
62 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
63 || GET_CODE (X) == ADDRESSOF)
65 /* Similar, but also allows reference to the stack pointer.
67 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
68 arg_pointer_rtx by itself is nonzero, because on at least one machine,
69 the i960, the arg pointer is zero when it is unused. */
71 #define NONZERO_BASE_PLUS_P(X) \
72 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
73 || (X) == virtual_stack_vars_rtx \
74 || (X) == virtual_incoming_args_rtx \
75 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
76 && (XEXP (X, 0) == frame_pointer_rtx \
77 || XEXP (X, 0) == hard_frame_pointer_rtx \
78 || ((X) == arg_pointer_rtx \
79 && fixed_regs[ARG_POINTER_REGNUM]) \
80 || XEXP (X, 0) == virtual_stack_vars_rtx \
81 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
82 || (X) == stack_pointer_rtx \
83 || (X) == virtual_stack_dynamic_rtx \
84 || (X) == virtual_outgoing_args_rtx \
85 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
86 && (XEXP (X, 0) == stack_pointer_rtx \
87 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
88 || XEXP (X, 0) == virtual_outgoing_args_rtx)) \
89 || GET_CODE (X) == ADDRESSOF)
91 /* Much code operates on (low, high) pairs; the low value is an
92 unsigned wide int, the high value a signed wide int. We
93 occasionally need to sign extend from low to high as if low were a
95 #define HWI_SIGN_EXTEND(low) \
96 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
98 static rtx neg_const_int
PARAMS ((enum machine_mode
, rtx
));
99 static int simplify_plus_minus_op_data_cmp
PARAMS ((const void *,
101 static rtx simplify_plus_minus
PARAMS ((enum rtx_code
,
102 enum machine_mode
, rtx
,
104 static void check_fold_consts
PARAMS ((PTR
));
105 static void simplify_unary_real
PARAMS ((PTR
));
106 static void simplify_binary_real
PARAMS ((PTR
));
107 static void simplify_binary_is2orm1
PARAMS ((PTR
));
110 /* Negate a CONST_INT rtx, truncating (because a conversion from a
111 maximally negative number can overflow). */
113 neg_const_int (mode
, i
)
114 enum machine_mode mode
;
117 return GEN_INT (trunc_int_for_mode (- INTVAL (i
), mode
));
121 /* Make a binary operation by properly ordering the operands and
122 seeing if the expression folds. */
125 simplify_gen_binary (code
, mode
, op0
, op1
)
127 enum machine_mode mode
;
132 /* Put complex operands first and constants second if commutative. */
133 if (GET_RTX_CLASS (code
) == 'c'
134 && swap_commutative_operands_p (op0
, op1
))
135 tem
= op0
, op0
= op1
, op1
= tem
;
137 /* If this simplifies, do it. */
138 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
142 /* Handle addition and subtraction specially. Otherwise, just form
145 if (code
== PLUS
|| code
== MINUS
)
146 return simplify_plus_minus (code
, mode
, op0
, op1
, 1);
148 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
151 /* If X is a MEM referencing the constant pool, return the real value.
152 Otherwise return X. */
154 avoid_constant_pool_reference (x
)
158 enum machine_mode cmode
;
160 if (GET_CODE (x
) != MEM
)
164 if (GET_CODE (addr
) != SYMBOL_REF
165 || ! CONSTANT_POOL_ADDRESS_P (addr
))
168 c
= get_pool_constant (addr
);
169 cmode
= get_pool_mode (addr
);
171 /* If we're accessing the constant in a different mode than it was
172 originally stored, attempt to fix that up via subreg simplifications.
173 If that fails we have no choice but to return the original memory. */
174 if (cmode
!= GET_MODE (x
))
176 c
= simplify_subreg (GET_MODE (x
), c
, cmode
, 0);
183 /* Make a unary operation by first seeing if it folds and otherwise making
184 the specified operation. */
187 simplify_gen_unary (code
, mode
, op
, op_mode
)
189 enum machine_mode mode
;
191 enum machine_mode op_mode
;
195 /* If this simplifies, use it. */
196 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
199 return gen_rtx_fmt_e (code
, mode
, op
);
202 /* Likewise for ternary operations. */
205 simplify_gen_ternary (code
, mode
, op0_mode
, op0
, op1
, op2
)
207 enum machine_mode mode
, op0_mode
;
212 /* If this simplifies, use it. */
213 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
217 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
220 /* Likewise, for relational operations.
221 CMP_MODE specifies mode comparison is done in.
225 simplify_gen_relational (code
, mode
, cmp_mode
, op0
, op1
)
227 enum machine_mode mode
;
228 enum machine_mode cmp_mode
;
233 if ((tem
= simplify_relational_operation (code
, cmp_mode
, op0
, op1
)) != 0)
236 /* Put complex operands first and constants second. */
237 if (swap_commutative_operands_p (op0
, op1
))
238 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
240 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
243 /* Replace all occurrences of OLD in X with NEW and try to simplify the
244 resulting RTX. Return a new RTX which is as simplified as possible. */
247 simplify_replace_rtx (x
, old
, new)
252 enum rtx_code code
= GET_CODE (x
);
253 enum machine_mode mode
= GET_MODE (x
);
255 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
256 to build a new expression substituting recursively. If we can't do
257 anything, return our input. */
262 switch (GET_RTX_CLASS (code
))
266 enum machine_mode op_mode
= GET_MODE (XEXP (x
, 0));
267 rtx op
= (XEXP (x
, 0) == old
268 ? new : simplify_replace_rtx (XEXP (x
, 0), old
, new));
270 return simplify_gen_unary (code
, mode
, op
, op_mode
);
276 simplify_gen_binary (code
, mode
,
277 simplify_replace_rtx (XEXP (x
, 0), old
, new),
278 simplify_replace_rtx (XEXP (x
, 1), old
, new));
281 enum machine_mode op_mode
= (GET_MODE (XEXP (x
, 0)) != VOIDmode
282 ? GET_MODE (XEXP (x
, 0))
283 : GET_MODE (XEXP (x
, 1)));
284 rtx op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
285 rtx op1
= simplify_replace_rtx (XEXP (x
, 1), old
, new);
288 simplify_gen_relational (code
, mode
,
291 : GET_MODE (op0
) != VOIDmode
300 enum machine_mode op_mode
= GET_MODE (XEXP (x
, 0));
301 rtx op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
304 simplify_gen_ternary (code
, mode
,
309 simplify_replace_rtx (XEXP (x
, 1), old
, new),
310 simplify_replace_rtx (XEXP (x
, 2), old
, new));
314 /* The only case we try to handle is a SUBREG. */
318 exp
= simplify_gen_subreg (GET_MODE (x
),
319 simplify_replace_rtx (SUBREG_REG (x
),
321 GET_MODE (SUBREG_REG (x
)),
329 if (GET_CODE (x
) == MEM
)
331 replace_equiv_address_nv (x
,
332 simplify_replace_rtx (XEXP (x
, 0),
340 /* Subroutine of simplify_unary_operation, called via do_float_handler.
341 Handles simplification of unary ops on floating point values. */
342 struct simplify_unary_real_args
346 enum machine_mode mode
;
350 #define REAL_VALUE_ABS(d_) \
351 (REAL_VALUE_NEGATIVE (d_) ? REAL_VALUE_NEGATE (d_) : (d_))
354 simplify_unary_real (p
)
359 struct simplify_unary_real_args
*args
=
360 (struct simplify_unary_real_args
*) p
;
362 REAL_VALUE_FROM_CONST_DOUBLE (d
, args
->operand
);
364 if (args
->want_integer
)
370 case FIX
: i
= REAL_VALUE_FIX (d
); break;
371 case UNSIGNED_FIX
: i
= REAL_VALUE_UNSIGNED_FIX (d
); break;
375 args
->result
= GEN_INT (trunc_int_for_mode (i
, args
->mode
));
382 /* We don't attempt to optimize this. */
386 case ABS
: d
= REAL_VALUE_ABS (d
); break;
387 case NEG
: d
= REAL_VALUE_NEGATE (d
); break;
388 case FLOAT_TRUNCATE
: d
= real_value_truncate (args
->mode
, d
); break;
389 case FLOAT_EXTEND
: /* All this does is change the mode. */ break;
390 case FIX
: d
= REAL_VALUE_RNDZINT (d
); break;
391 case UNSIGNED_FIX
: d
= REAL_VALUE_UNSIGNED_RNDZINT (d
); break;
395 args
->result
= CONST_DOUBLE_FROM_REAL_VALUE (d
, args
->mode
);
399 /* Try to simplify a unary operation CODE whose output mode is to be
400 MODE with input operand OP whose mode was originally OP_MODE.
401 Return zero if no simplification can be made. */
403 simplify_unary_operation (code
, mode
, op
, op_mode
)
405 enum machine_mode mode
;
407 enum machine_mode op_mode
;
409 unsigned int width
= GET_MODE_BITSIZE (mode
);
410 rtx trueop
= avoid_constant_pool_reference (op
);
412 /* The order of these tests is critical so that, for example, we don't
413 check the wrong mode (input vs. output) for a conversion operation,
414 such as FIX. At some point, this should be simplified. */
416 if (code
== FLOAT
&& GET_MODE (trueop
) == VOIDmode
417 && (GET_CODE (trueop
) == CONST_DOUBLE
|| GET_CODE (trueop
) == CONST_INT
))
419 HOST_WIDE_INT hv
, lv
;
422 if (GET_CODE (trueop
) == CONST_INT
)
423 lv
= INTVAL (trueop
), hv
= HWI_SIGN_EXTEND (lv
);
425 lv
= CONST_DOUBLE_LOW (trueop
), hv
= CONST_DOUBLE_HIGH (trueop
);
427 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
428 d
= real_value_truncate (mode
, d
);
429 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
431 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (trueop
) == VOIDmode
432 && (GET_CODE (trueop
) == CONST_DOUBLE
433 || GET_CODE (trueop
) == CONST_INT
))
435 HOST_WIDE_INT hv
, lv
;
438 if (GET_CODE (trueop
) == CONST_INT
)
439 lv
= INTVAL (trueop
), hv
= HWI_SIGN_EXTEND (lv
);
441 lv
= CONST_DOUBLE_LOW (trueop
), hv
= CONST_DOUBLE_HIGH (trueop
);
443 if (op_mode
== VOIDmode
)
445 /* We don't know how to interpret negative-looking numbers in
446 this case, so don't try to fold those. */
450 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
453 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
455 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
456 d
= real_value_truncate (mode
, d
);
457 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
460 if (GET_CODE (trueop
) == CONST_INT
461 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
463 HOST_WIDE_INT arg0
= INTVAL (trueop
);
477 val
= (arg0
>= 0 ? arg0
: - arg0
);
481 /* Don't use ffs here. Instead, get low order bit and then its
482 number. If arg0 is zero, this will return 0, as desired. */
483 arg0
&= GET_MODE_MASK (mode
);
484 val
= exact_log2 (arg0
& (- arg0
)) + 1;
492 /* When zero-extending a CONST_INT, we need to know its
494 if (op_mode
== VOIDmode
)
496 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
498 /* If we were really extending the mode,
499 we would have to distinguish between zero-extension
500 and sign-extension. */
501 if (width
!= GET_MODE_BITSIZE (op_mode
))
505 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
506 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
512 if (op_mode
== VOIDmode
)
514 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
516 /* If we were really extending the mode,
517 we would have to distinguish between zero-extension
518 and sign-extension. */
519 if (width
!= GET_MODE_BITSIZE (op_mode
))
523 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
526 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
528 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
529 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
546 val
= trunc_int_for_mode (val
, mode
);
548 return GEN_INT (val
);
551 /* We can do some operations on integer CONST_DOUBLEs. Also allow
552 for a DImode operation on a CONST_INT. */
553 else if (GET_MODE (trueop
) == VOIDmode
554 && width
<= HOST_BITS_PER_WIDE_INT
* 2
555 && (GET_CODE (trueop
) == CONST_DOUBLE
556 || GET_CODE (trueop
) == CONST_INT
))
558 unsigned HOST_WIDE_INT l1
, lv
;
559 HOST_WIDE_INT h1
, hv
;
561 if (GET_CODE (trueop
) == CONST_DOUBLE
)
562 l1
= CONST_DOUBLE_LOW (trueop
), h1
= CONST_DOUBLE_HIGH (trueop
);
564 l1
= INTVAL (trueop
), h1
= HWI_SIGN_EXTEND (l1
);
574 neg_double (l1
, h1
, &lv
, &hv
);
579 neg_double (l1
, h1
, &lv
, &hv
);
587 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& (-h1
)) + 1;
589 lv
= exact_log2 (l1
& (-l1
)) + 1;
593 /* This is just a change-of-mode, so do nothing. */
598 if (op_mode
== VOIDmode
)
601 if (GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
605 lv
= l1
& GET_MODE_MASK (op_mode
);
609 if (op_mode
== VOIDmode
610 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
614 lv
= l1
& GET_MODE_MASK (op_mode
);
615 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
616 && (lv
& ((HOST_WIDE_INT
) 1
617 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
618 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
620 hv
= HWI_SIGN_EXTEND (lv
);
631 return immed_double_const (lv
, hv
, mode
);
634 else if (GET_CODE (trueop
) == CONST_DOUBLE
635 && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
637 struct simplify_unary_real_args args
;
638 args
.operand
= trueop
;
641 args
.want_integer
= false;
643 if (do_float_handler (simplify_unary_real
, (PTR
) &args
))
649 else if (GET_CODE (trueop
) == CONST_DOUBLE
650 && GET_MODE_CLASS (GET_MODE (trueop
)) == MODE_FLOAT
651 && GET_MODE_CLASS (mode
) == MODE_INT
652 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
654 struct simplify_unary_real_args args
;
655 args
.operand
= trueop
;
658 args
.want_integer
= true;
660 if (do_float_handler (simplify_unary_real
, (PTR
) &args
))
666 /* This was formerly used only for non-IEEE float.
667 eggert@twinsun.com says it is safe for IEEE also. */
670 enum rtx_code reversed
;
671 /* There are some simplifications we can do even if the operands
676 /* (not (not X)) == X. */
677 if (GET_CODE (op
) == NOT
)
680 /* (not (eq X Y)) == (ne X Y), etc. */
681 if (mode
== BImode
&& GET_RTX_CLASS (GET_CODE (op
)) == '<'
682 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
))
684 return gen_rtx_fmt_ee (reversed
,
685 op_mode
, XEXP (op
, 0), XEXP (op
, 1));
689 /* (neg (neg X)) == X. */
690 if (GET_CODE (op
) == NEG
)
695 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
696 becomes just the MINUS if its mode is MODE. This allows
697 folding switch statements on machines using casesi (such as
699 if (GET_CODE (op
) == TRUNCATE
700 && GET_MODE (XEXP (op
, 0)) == mode
701 && GET_CODE (XEXP (op
, 0)) == MINUS
702 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
703 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
706 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
707 if (! POINTERS_EXTEND_UNSIGNED
708 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
710 || (GET_CODE (op
) == SUBREG
711 && GET_CODE (SUBREG_REG (op
)) == REG
712 && REG_POINTER (SUBREG_REG (op
))
713 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
714 return convert_memory_address (Pmode
, op
);
718 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
720 if (POINTERS_EXTEND_UNSIGNED
> 0
721 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
723 || (GET_CODE (op
) == SUBREG
724 && GET_CODE (SUBREG_REG (op
)) == REG
725 && REG_POINTER (SUBREG_REG (op
))
726 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
727 return convert_memory_address (Pmode
, op
);
739 /* Subroutine of simplify_binary_operation, called via do_float_handler.
740 Handles simplification of binary ops on floating point values. */
741 struct simplify_binary_real_args
743 rtx trueop0
, trueop1
;
746 enum machine_mode mode
;
750 simplify_binary_real (p
)
753 REAL_VALUE_TYPE f0
, f1
, value
;
754 struct simplify_binary_real_args
*args
=
755 (struct simplify_binary_real_args
*) p
;
757 REAL_VALUE_FROM_CONST_DOUBLE (f0
, args
->trueop0
);
758 REAL_VALUE_FROM_CONST_DOUBLE (f1
, args
->trueop1
);
759 f0
= real_value_truncate (args
->mode
, f0
);
760 f1
= real_value_truncate (args
->mode
, f1
);
762 #ifndef REAL_INFINITY
763 if (args
->code
== DIV
&& REAL_VALUES_EQUAL (f1
, dconst0
))
769 REAL_ARITHMETIC (value
, rtx_to_tree_code (args
->code
), f0
, f1
);
771 value
= real_value_truncate (args
->mode
, value
);
772 args
->result
= CONST_DOUBLE_FROM_REAL_VALUE (value
, args
->mode
);
775 /* Another subroutine called via do_float_handler. This one tests
776 the floating point value given against 2. and -1. */
777 struct simplify_binary_is2orm1_args
785 simplify_binary_is2orm1 (p
)
789 struct simplify_binary_is2orm1_args
*args
=
790 (struct simplify_binary_is2orm1_args
*) p
;
792 REAL_VALUE_FROM_CONST_DOUBLE (d
, args
->value
);
793 args
->is_2
= REAL_VALUES_EQUAL (d
, dconst2
);
794 args
->is_m1
= REAL_VALUES_EQUAL (d
, dconstm1
);
797 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
798 and OP1. Return 0 if no simplification is possible.
800 Don't use this for relational operations such as EQ or LT.
801 Use simplify_relational_operation instead. */
803 simplify_binary_operation (code
, mode
, op0
, op1
)
805 enum machine_mode mode
;
808 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
810 unsigned int width
= GET_MODE_BITSIZE (mode
);
812 rtx trueop0
= avoid_constant_pool_reference (op0
);
813 rtx trueop1
= avoid_constant_pool_reference (op1
);
815 /* Relational operations don't work here. We must know the mode
816 of the operands in order to do the comparison correctly.
817 Assuming a full word can give incorrect results.
818 Consider comparing 128 with -128 in QImode. */
820 if (GET_RTX_CLASS (code
) == '<')
823 /* Make sure the constant is second. */
824 if (GET_RTX_CLASS (code
) == 'c'
825 && swap_commutative_operands_p (trueop0
, trueop1
))
827 tem
= op0
, op0
= op1
, op1
= tem
;
828 tem
= trueop0
, trueop0
= trueop1
, trueop1
= tem
;
831 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
832 && GET_CODE (trueop0
) == CONST_DOUBLE
833 && GET_CODE (trueop1
) == CONST_DOUBLE
834 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
836 struct simplify_binary_real_args args
;
837 args
.trueop0
= trueop0
;
838 args
.trueop1
= trueop1
;
842 if (do_float_handler (simplify_binary_real
, (PTR
) &args
))
847 /* We can fold some multi-word operations. */
848 if (GET_MODE_CLASS (mode
) == MODE_INT
849 && width
== HOST_BITS_PER_WIDE_INT
* 2
850 && (GET_CODE (trueop0
) == CONST_DOUBLE
851 || GET_CODE (trueop0
) == CONST_INT
)
852 && (GET_CODE (trueop1
) == CONST_DOUBLE
853 || GET_CODE (trueop1
) == CONST_INT
))
855 unsigned HOST_WIDE_INT l1
, l2
, lv
;
856 HOST_WIDE_INT h1
, h2
, hv
;
858 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
859 l1
= CONST_DOUBLE_LOW (trueop0
), h1
= CONST_DOUBLE_HIGH (trueop0
);
861 l1
= INTVAL (trueop0
), h1
= HWI_SIGN_EXTEND (l1
);
863 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
864 l2
= CONST_DOUBLE_LOW (trueop1
), h2
= CONST_DOUBLE_HIGH (trueop1
);
866 l2
= INTVAL (trueop1
), h2
= HWI_SIGN_EXTEND (l2
);
871 /* A - B == A + (-B). */
872 neg_double (l2
, h2
, &lv
, &hv
);
875 /* .. fall through ... */
878 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
882 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
885 case DIV
: case MOD
: case UDIV
: case UMOD
:
886 /* We'd need to include tree.h to do this and it doesn't seem worth
891 lv
= l1
& l2
, hv
= h1
& h2
;
895 lv
= l1
| l2
, hv
= h1
| h2
;
899 lv
= l1
^ l2
, hv
= h1
^ h2
;
905 && ((unsigned HOST_WIDE_INT
) l1
906 < (unsigned HOST_WIDE_INT
) l2
)))
915 && ((unsigned HOST_WIDE_INT
) l1
916 > (unsigned HOST_WIDE_INT
) l2
)))
923 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
925 && ((unsigned HOST_WIDE_INT
) l1
926 < (unsigned HOST_WIDE_INT
) l2
)))
933 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
935 && ((unsigned HOST_WIDE_INT
) l1
936 > (unsigned HOST_WIDE_INT
) l2
)))
942 case LSHIFTRT
: case ASHIFTRT
:
944 case ROTATE
: case ROTATERT
:
945 #ifdef SHIFT_COUNT_TRUNCATED
946 if (SHIFT_COUNT_TRUNCATED
)
947 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
950 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
953 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
954 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
956 else if (code
== ASHIFT
)
957 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
958 else if (code
== ROTATE
)
959 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
960 else /* code == ROTATERT */
961 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
968 return immed_double_const (lv
, hv
, mode
);
971 if (GET_CODE (op0
) != CONST_INT
|| GET_CODE (op1
) != CONST_INT
972 || width
> HOST_BITS_PER_WIDE_INT
|| width
== 0)
974 /* Even if we can't compute a constant result,
975 there are some cases worth simplifying. */
980 /* In IEEE floating point, x+0 is not the same as x. Similarly
981 for the other optimizations below. */
982 if (TARGET_FLOAT_FORMAT
== IEEE_FLOAT_FORMAT
983 && FLOAT_MODE_P (mode
) && ! flag_unsafe_math_optimizations
)
986 if (trueop1
== CONST0_RTX (mode
))
989 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)) */
990 if (GET_CODE (op0
) == NEG
)
991 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
992 else if (GET_CODE (op1
) == NEG
)
993 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
996 if (INTEGRAL_MODE_P (mode
)
997 && GET_CODE (op0
) == NOT
998 && trueop1
== const1_rtx
)
999 return gen_rtx_NEG (mode
, XEXP (op0
, 0));
1001 /* Handle both-operands-constant cases. We can only add
1002 CONST_INTs to constants since the sum of relocatable symbols
1003 can't be handled by most assemblers. Don't add CONST_INT
1004 to CONST_INT since overflow won't be computed properly if wider
1005 than HOST_BITS_PER_WIDE_INT. */
1007 if (CONSTANT_P (op0
) && GET_MODE (op0
) != VOIDmode
1008 && GET_CODE (op1
) == CONST_INT
)
1009 return plus_constant (op0
, INTVAL (op1
));
1010 else if (CONSTANT_P (op1
) && GET_MODE (op1
) != VOIDmode
1011 && GET_CODE (op0
) == CONST_INT
)
1012 return plus_constant (op1
, INTVAL (op0
));
1014 /* See if this is something like X * C - X or vice versa or
1015 if the multiplication is written as a shift. If so, we can
1016 distribute and make a new multiply, shift, or maybe just
1017 have X (if C is 2 in the example above). But don't make
1018 real multiply if we didn't have one before. */
1020 if (! FLOAT_MODE_P (mode
))
1022 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1023 rtx lhs
= op0
, rhs
= op1
;
1026 if (GET_CODE (lhs
) == NEG
)
1027 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1028 else if (GET_CODE (lhs
) == MULT
1029 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1031 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1034 else if (GET_CODE (lhs
) == ASHIFT
1035 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1036 && INTVAL (XEXP (lhs
, 1)) >= 0
1037 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1039 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1040 lhs
= XEXP (lhs
, 0);
1043 if (GET_CODE (rhs
) == NEG
)
1044 coeff1
= -1, rhs
= XEXP (rhs
, 0);
1045 else if (GET_CODE (rhs
) == MULT
1046 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1048 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1051 else if (GET_CODE (rhs
) == ASHIFT
1052 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1053 && INTVAL (XEXP (rhs
, 1)) >= 0
1054 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1056 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1057 rhs
= XEXP (rhs
, 0);
1060 if (rtx_equal_p (lhs
, rhs
))
1062 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1063 GEN_INT (coeff0
+ coeff1
));
1064 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
1068 /* If one of the operands is a PLUS or a MINUS, see if we can
1069 simplify this by the associative law.
1070 Don't use the associative law for floating point.
1071 The inaccuracy makes it nonassociative,
1072 and subtle programs can break if operations are associated. */
1074 if (INTEGRAL_MODE_P (mode
)
1075 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1076 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
1077 || (GET_CODE (op0
) == CONST
1078 && GET_CODE (XEXP (op0
, 0)) == PLUS
)
1079 || (GET_CODE (op1
) == CONST
1080 && GET_CODE (XEXP (op1
, 0)) == PLUS
))
1081 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1087 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1088 using cc0, in which case we want to leave it as a COMPARE
1089 so we can distinguish it from a register-register-copy.
1091 In IEEE floating point, x-0 is not the same as x. */
1093 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1094 || ! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
1095 && trueop1
== CONST0_RTX (mode
))
1099 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1100 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
1101 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
1102 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
1104 rtx xop00
= XEXP (op0
, 0);
1105 rtx xop10
= XEXP (op1
, 0);
1108 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
1110 if (GET_CODE (xop00
) == REG
&& GET_CODE (xop10
) == REG
1111 && GET_MODE (xop00
) == GET_MODE (xop10
)
1112 && REGNO (xop00
) == REGNO (xop10
)
1113 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
1114 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
1121 /* None of these optimizations can be done for IEEE
1123 if (TARGET_FLOAT_FORMAT
== IEEE_FLOAT_FORMAT
1124 && FLOAT_MODE_P (mode
) && ! flag_unsafe_math_optimizations
)
1127 /* We can't assume x-x is 0 even with non-IEEE floating point,
1128 but since it is zero except in very strange circumstances, we
1129 will treat it as zero with -funsafe-math-optimizations. */
1130 if (rtx_equal_p (trueop0
, trueop1
)
1131 && ! side_effects_p (op0
)
1132 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
))
1133 return CONST0_RTX (mode
);
1135 /* Change subtraction from zero into negation. */
1136 if (trueop0
== CONST0_RTX (mode
))
1137 return gen_rtx_NEG (mode
, op1
);
1139 /* (-1 - a) is ~a. */
1140 if (trueop0
== constm1_rtx
)
1141 return gen_rtx_NOT (mode
, op1
);
1143 /* Subtracting 0 has no effect. */
1144 if (trueop1
== CONST0_RTX (mode
))
1147 /* See if this is something like X * C - X or vice versa or
1148 if the multiplication is written as a shift. If so, we can
1149 distribute and make a new multiply, shift, or maybe just
1150 have X (if C is 2 in the example above). But don't make
1151 real multiply if we didn't have one before. */
1153 if (! FLOAT_MODE_P (mode
))
1155 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1156 rtx lhs
= op0
, rhs
= op1
;
1159 if (GET_CODE (lhs
) == NEG
)
1160 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1161 else if (GET_CODE (lhs
) == MULT
1162 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1164 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1167 else if (GET_CODE (lhs
) == ASHIFT
1168 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1169 && INTVAL (XEXP (lhs
, 1)) >= 0
1170 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1172 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1173 lhs
= XEXP (lhs
, 0);
1176 if (GET_CODE (rhs
) == NEG
)
1177 coeff1
= - 1, rhs
= XEXP (rhs
, 0);
1178 else if (GET_CODE (rhs
) == MULT
1179 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1181 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1184 else if (GET_CODE (rhs
) == ASHIFT
1185 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1186 && INTVAL (XEXP (rhs
, 1)) >= 0
1187 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1189 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1190 rhs
= XEXP (rhs
, 0);
1193 if (rtx_equal_p (lhs
, rhs
))
1195 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1196 GEN_INT (coeff0
- coeff1
));
1197 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
1201 /* (a - (-b)) -> (a + b). */
1202 if (GET_CODE (op1
) == NEG
)
1203 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1205 /* If one of the operands is a PLUS or a MINUS, see if we can
1206 simplify this by the associative law.
1207 Don't use the associative law for floating point.
1208 The inaccuracy makes it nonassociative,
1209 and subtle programs can break if operations are associated. */
1211 if (INTEGRAL_MODE_P (mode
)
1212 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1213 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
1214 || (GET_CODE (op0
) == CONST
1215 && GET_CODE (XEXP (op0
, 0)) == PLUS
)
1216 || (GET_CODE (op1
) == CONST
1217 && GET_CODE (XEXP (op1
, 0)) == PLUS
))
1218 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1221 /* Don't let a relocatable value get a negative coeff. */
1222 if (GET_CODE (op1
) == CONST_INT
&& GET_MODE (op0
) != VOIDmode
)
1223 return simplify_gen_binary (PLUS
, mode
,
1225 neg_const_int (mode
, op1
));
1227 /* (x - (x & y)) -> (x & ~y) */
1228 if (GET_CODE (op1
) == AND
)
1230 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1231 return simplify_gen_binary (AND
, mode
, op0
,
1232 gen_rtx_NOT (mode
, XEXP (op1
, 1)));
1233 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1234 return simplify_gen_binary (AND
, mode
, op0
,
1235 gen_rtx_NOT (mode
, XEXP (op1
, 0)));
1240 if (trueop1
== constm1_rtx
)
1242 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
1244 return tem
? tem
: gen_rtx_NEG (mode
, op0
);
1247 /* In IEEE floating point, x*0 is not always 0. */
1248 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1249 || ! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
1250 && trueop1
== CONST0_RTX (mode
)
1251 && ! side_effects_p (op0
))
1254 /* In IEEE floating point, x*1 is not equivalent to x for nans.
1255 However, ANSI says we can drop signals,
1256 so we can do this anyway. */
1257 if (trueop1
== CONST1_RTX (mode
))
1260 /* Convert multiply by constant power of two into shift unless
1261 we are still generating RTL. This test is a kludge. */
1262 if (GET_CODE (trueop1
) == CONST_INT
1263 && (val
= exact_log2 (INTVAL (trueop1
))) >= 0
1264 /* If the mode is larger than the host word size, and the
1265 uppermost bit is set, then this isn't a power of two due
1266 to implicit sign extension. */
1267 && (width
<= HOST_BITS_PER_WIDE_INT
1268 || val
!= HOST_BITS_PER_WIDE_INT
- 1)
1269 && ! rtx_equal_function_value_matters
)
1270 return gen_rtx_ASHIFT (mode
, op0
, GEN_INT (val
));
1272 if (GET_CODE (trueop1
) == CONST_DOUBLE
1273 && GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_FLOAT
)
1275 struct simplify_binary_is2orm1_args args
;
1277 args
.value
= trueop1
;
1278 if (! do_float_handler (simplify_binary_is2orm1
, (PTR
) &args
))
1281 /* x*2 is x+x and x*(-1) is -x */
1282 if (args
.is_2
&& GET_MODE (op0
) == mode
)
1283 return gen_rtx_PLUS (mode
, op0
, copy_rtx (op0
));
1285 else if (args
.is_m1
&& GET_MODE (op0
) == mode
)
1286 return gen_rtx_NEG (mode
, op0
);
1291 if (trueop1
== const0_rtx
)
1293 if (GET_CODE (trueop1
) == CONST_INT
1294 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1295 == GET_MODE_MASK (mode
)))
1297 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1299 /* A | (~A) -> -1 */
1300 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1301 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1302 && ! side_effects_p (op0
)
1303 && GET_MODE_CLASS (mode
) != MODE_CC
)
1308 if (trueop1
== const0_rtx
)
1310 if (GET_CODE (trueop1
) == CONST_INT
1311 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1312 == GET_MODE_MASK (mode
)))
1313 return gen_rtx_NOT (mode
, op0
);
1314 if (trueop0
== trueop1
&& ! side_effects_p (op0
)
1315 && GET_MODE_CLASS (mode
) != MODE_CC
)
1320 if (trueop1
== const0_rtx
&& ! side_effects_p (op0
))
1322 if (GET_CODE (trueop1
) == CONST_INT
1323 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1324 == GET_MODE_MASK (mode
)))
1326 if (trueop0
== trueop1
&& ! side_effects_p (op0
)
1327 && GET_MODE_CLASS (mode
) != MODE_CC
)
1330 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1331 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1332 && ! side_effects_p (op0
)
1333 && GET_MODE_CLASS (mode
) != MODE_CC
)
1338 /* Convert divide by power of two into shift (divide by 1 handled
1340 if (GET_CODE (trueop1
) == CONST_INT
1341 && (arg1
= exact_log2 (INTVAL (trueop1
))) > 0)
1342 return gen_rtx_LSHIFTRT (mode
, op0
, GEN_INT (arg1
));
1344 /* ... fall through ... */
1347 if (trueop1
== CONST1_RTX (mode
))
1349 /* On some platforms DIV uses narrower mode than its
1351 rtx x
= gen_lowpart_common (mode
, op0
);
1354 else if (mode
!= GET_MODE (op0
) && GET_MODE (op0
) != VOIDmode
)
1355 return gen_lowpart_SUBREG (mode
, op0
);
1360 /* In IEEE floating point, 0/x is not always 0. */
1361 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1362 || ! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
1363 && trueop0
== CONST0_RTX (mode
)
1364 && ! side_effects_p (op1
))
1367 /* Change division by a constant into multiplication. Only do
1368 this with -funsafe-math-optimizations. */
1369 else if (GET_CODE (trueop1
) == CONST_DOUBLE
1370 && GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_FLOAT
1371 && trueop1
!= CONST0_RTX (mode
)
1372 && flag_unsafe_math_optimizations
)
1375 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1377 if (! REAL_VALUES_EQUAL (d
, dconst0
))
1379 REAL_ARITHMETIC (d
, rtx_to_tree_code (DIV
), dconst1
, d
);
1380 return gen_rtx_MULT (mode
, op0
,
1381 CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
));
1387 /* Handle modulus by power of two (mod with 1 handled below). */
1388 if (GET_CODE (trueop1
) == CONST_INT
1389 && exact_log2 (INTVAL (trueop1
)) > 0)
1390 return gen_rtx_AND (mode
, op0
, GEN_INT (INTVAL (op1
) - 1));
1392 /* ... fall through ... */
1395 if ((trueop0
== const0_rtx
|| trueop1
== const1_rtx
)
1396 && ! side_effects_p (op0
) && ! side_effects_p (op1
))
1402 /* Rotating ~0 always results in ~0. */
1403 if (GET_CODE (trueop0
) == CONST_INT
&& width
<= HOST_BITS_PER_WIDE_INT
1404 && (unsigned HOST_WIDE_INT
) INTVAL (trueop0
) == GET_MODE_MASK (mode
)
1405 && ! side_effects_p (op1
))
1408 /* ... fall through ... */
1413 if (trueop1
== const0_rtx
)
1415 if (trueop0
== const0_rtx
&& ! side_effects_p (op1
))
1420 if (width
<= HOST_BITS_PER_WIDE_INT
&& GET_CODE (trueop1
) == CONST_INT
1421 && INTVAL (trueop1
) == (HOST_WIDE_INT
) 1 << (width
-1)
1422 && ! side_effects_p (op0
))
1424 else if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1429 if (width
<= HOST_BITS_PER_WIDE_INT
&& GET_CODE (trueop1
) == CONST_INT
1430 && ((unsigned HOST_WIDE_INT
) INTVAL (trueop1
)
1431 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
1432 && ! side_effects_p (op0
))
1434 else if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1439 if (trueop1
== const0_rtx
&& ! side_effects_p (op0
))
1441 else if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1446 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
1448 else if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1456 /* ??? There are simplifications that can be done. */
1466 /* Get the integer argument values in two forms:
1467 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1469 arg0
= INTVAL (trueop0
);
1470 arg1
= INTVAL (trueop1
);
1472 if (width
< HOST_BITS_PER_WIDE_INT
)
1474 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1475 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1478 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1479 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
1482 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1483 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
1491 /* Compute the value of the arithmetic. */
1496 val
= arg0s
+ arg1s
;
1500 val
= arg0s
- arg1s
;
1504 val
= arg0s
* arg1s
;
1509 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
1512 val
= arg0s
/ arg1s
;
1517 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
1520 val
= arg0s
% arg1s
;
1525 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
1528 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
1533 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
1536 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
1552 /* If shift count is undefined, don't fold it; let the machine do
1553 what it wants. But truncate it if the machine will do that. */
1557 #ifdef SHIFT_COUNT_TRUNCATED
1558 if (SHIFT_COUNT_TRUNCATED
)
1562 val
= ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
;
1569 #ifdef SHIFT_COUNT_TRUNCATED
1570 if (SHIFT_COUNT_TRUNCATED
)
1574 val
= ((unsigned HOST_WIDE_INT
) arg0
) << arg1
;
1581 #ifdef SHIFT_COUNT_TRUNCATED
1582 if (SHIFT_COUNT_TRUNCATED
)
1586 val
= arg0s
>> arg1
;
1588 /* Bootstrap compiler may not have sign extended the right shift.
1589 Manually extend the sign to insure bootstrap cc matches gcc. */
1590 if (arg0s
< 0 && arg1
> 0)
1591 val
|= ((HOST_WIDE_INT
) -1) << (HOST_BITS_PER_WIDE_INT
- arg1
);
1600 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
1601 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
1609 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
1610 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
1614 /* Do nothing here. */
1618 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
1622 val
= ((unsigned HOST_WIDE_INT
) arg0
1623 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
1627 val
= arg0s
> arg1s
? arg0s
: arg1s
;
1631 val
= ((unsigned HOST_WIDE_INT
) arg0
1632 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
1639 val
= trunc_int_for_mode (val
, mode
);
1641 return GEN_INT (val
);
1644 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1647 Rather than test for specific case, we do this by a brute-force method
1648 and do all possible simplifications until no more changes occur. Then
1649 we rebuild the operation.
1651 If FORCE is true, then always generate the rtx. This is used to
1652 canonicalize stuff emitted from simplify_gen_binary. */
1654 struct simplify_plus_minus_op_data
1661 simplify_plus_minus_op_data_cmp (p1
, p2
)
1665 const struct simplify_plus_minus_op_data
*d1
= p1
;
1666 const struct simplify_plus_minus_op_data
*d2
= p2
;
1668 return (commutative_operand_precedence (d2
->op
)
1669 - commutative_operand_precedence (d1
->op
));
1673 simplify_plus_minus (code
, mode
, op0
, op1
, force
)
1675 enum machine_mode mode
;
1679 struct simplify_plus_minus_op_data ops
[8];
1681 int n_ops
= 2, input_ops
= 2, input_consts
= 0, n_consts
;
1682 int first
, negate
, changed
;
1685 memset ((char *) ops
, 0, sizeof ops
);
1687 /* Set up the two operands and then expand them until nothing has been
1688 changed. If we run out of room in our array, give up; this should
1689 almost never happen. */
1694 ops
[1].neg
= (code
== MINUS
);
1700 for (i
= 0; i
< n_ops
; i
++)
1702 rtx this_op
= ops
[i
].op
;
1703 int this_neg
= ops
[i
].neg
;
1704 enum rtx_code this_code
= GET_CODE (this_op
);
1717 ops
[n_ops
].op
= XEXP (this_op
, 1);
1718 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
1721 ops
[i
].op
= XEXP (this_op
, 0);
1727 ops
[i
].op
= XEXP (this_op
, 0);
1728 ops
[i
].neg
= ! this_neg
;
1734 && GET_CODE (XEXP (this_op
, 0)) == PLUS
1735 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
1736 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
1738 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
1739 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
1740 ops
[n_ops
].neg
= this_neg
;
1748 /* ~a -> (-a - 1) */
1751 ops
[n_ops
].op
= constm1_rtx
;
1752 ops
[n_ops
++].neg
= this_neg
;
1753 ops
[i
].op
= XEXP (this_op
, 0);
1754 ops
[i
].neg
= !this_neg
;
1762 ops
[i
].op
= neg_const_int (mode
, this_op
);
1775 /* If we only have two operands, we can't do anything. */
1776 if (n_ops
<= 2 && !force
)
1779 /* Count the number of CONSTs we didn't split above. */
1780 for (i
= 0; i
< n_ops
; i
++)
1781 if (GET_CODE (ops
[i
].op
) == CONST
)
1784 /* Now simplify each pair of operands until nothing changes. The first
1785 time through just simplify constants against each other. */
1792 for (i
= 0; i
< n_ops
- 1; i
++)
1793 for (j
= i
+ 1; j
< n_ops
; j
++)
1795 rtx lhs
= ops
[i
].op
, rhs
= ops
[j
].op
;
1796 int lneg
= ops
[i
].neg
, rneg
= ops
[j
].neg
;
1798 if (lhs
!= 0 && rhs
!= 0
1799 && (! first
|| (CONSTANT_P (lhs
) && CONSTANT_P (rhs
))))
1801 enum rtx_code ncode
= PLUS
;
1807 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
1809 else if (swap_commutative_operands_p (lhs
, rhs
))
1810 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
1812 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
1814 /* Reject "simplifications" that just wrap the two
1815 arguments in a CONST. Failure to do so can result
1816 in infinite recursion with simplify_binary_operation
1817 when it calls us to simplify CONST operations. */
1819 && ! (GET_CODE (tem
) == CONST
1820 && GET_CODE (XEXP (tem
, 0)) == ncode
1821 && XEXP (XEXP (tem
, 0), 0) == lhs
1822 && XEXP (XEXP (tem
, 0), 1) == rhs
)
1823 /* Don't allow -x + -1 -> ~x simplifications in the
1824 first pass. This allows us the chance to combine
1825 the -1 with other constants. */
1827 && GET_CODE (tem
) == NOT
1828 && XEXP (tem
, 0) == rhs
))
1831 if (GET_CODE (tem
) == NEG
)
1832 tem
= XEXP (tem
, 0), lneg
= !lneg
;
1833 if (GET_CODE (tem
) == CONST_INT
&& lneg
)
1834 tem
= neg_const_int (mode
, tem
), lneg
= 0;
1838 ops
[j
].op
= NULL_RTX
;
1848 /* Pack all the operands to the lower-numbered entries. */
1849 for (i
= 0, j
= 0; j
< n_ops
; j
++)
1854 /* Sort the operations based on swap_commutative_operands_p. */
1855 qsort (ops
, n_ops
, sizeof (*ops
), simplify_plus_minus_op_data_cmp
);
1857 /* We suppressed creation of trivial CONST expressions in the
1858 combination loop to avoid recursion. Create one manually now.
1859 The combination loop should have ensured that there is exactly
1860 one CONST_INT, and the sort will have ensured that it is last
1861 in the array and that any other constant will be next-to-last. */
1864 && GET_CODE (ops
[n_ops
- 1].op
) == CONST_INT
1865 && CONSTANT_P (ops
[n_ops
- 2].op
))
1867 rtx value
= ops
[n_ops
- 1].op
;
1868 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
1869 value
= neg_const_int (mode
, value
);
1870 ops
[n_ops
- 2].op
= plus_constant (ops
[n_ops
- 2].op
, INTVAL (value
));
1874 /* Count the number of CONSTs that we generated. */
1876 for (i
= 0; i
< n_ops
; i
++)
1877 if (GET_CODE (ops
[i
].op
) == CONST
)
1880 /* Give up if we didn't reduce the number of operands we had. Make
1881 sure we count a CONST as two operands. If we have the same
1882 number of operands, but have made more CONSTs than before, this
1883 is also an improvement, so accept it. */
1885 && (n_ops
+ n_consts
> input_ops
1886 || (n_ops
+ n_consts
== input_ops
&& n_consts
<= input_consts
)))
1889 /* Put a non-negated operand first. If there aren't any, make all
1890 operands positive and negate the whole thing later. */
1893 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
1897 for (i
= 0; i
< n_ops
; i
++)
1909 /* Now make the result by performing the requested operations. */
1911 for (i
= 1; i
< n_ops
; i
++)
1912 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
1913 mode
, result
, ops
[i
].op
);
1915 return negate
? gen_rtx_NEG (mode
, result
) : result
;
1920 rtx op0
, op1
; /* Input */
1921 int equal
, op0lt
, op1lt
; /* Output */
1926 check_fold_consts (data
)
1929 struct cfc_args
*args
= (struct cfc_args
*) data
;
1930 REAL_VALUE_TYPE d0
, d1
;
1932 /* We may possibly raise an exception while reading the value. */
1933 args
->unordered
= 1;
1934 REAL_VALUE_FROM_CONST_DOUBLE (d0
, args
->op0
);
1935 REAL_VALUE_FROM_CONST_DOUBLE (d1
, args
->op1
);
1937 /* Comparisons of Inf versus Inf are ordered. */
1938 if (REAL_VALUE_ISNAN (d0
)
1939 || REAL_VALUE_ISNAN (d1
))
1941 args
->equal
= REAL_VALUES_EQUAL (d0
, d1
);
1942 args
->op0lt
= REAL_VALUES_LESS (d0
, d1
);
1943 args
->op1lt
= REAL_VALUES_LESS (d1
, d0
);
1944 args
->unordered
= 0;
1947 /* Like simplify_binary_operation except used for relational operators.
1948 MODE is the mode of the operands, not that of the result. If MODE
1949 is VOIDmode, both operands must also be VOIDmode and we compare the
1950 operands in "infinite precision".
1952 If no simplification is possible, this function returns zero. Otherwise,
1953 it returns either const_true_rtx or const0_rtx. */
1956 simplify_relational_operation (code
, mode
, op0
, op1
)
1958 enum machine_mode mode
;
1961 int equal
, op0lt
, op0ltu
, op1lt
, op1ltu
;
1966 if (mode
== VOIDmode
1967 && (GET_MODE (op0
) != VOIDmode
1968 || GET_MODE (op1
) != VOIDmode
))
1971 /* If op0 is a compare, extract the comparison arguments from it. */
1972 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
1973 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
1975 trueop0
= avoid_constant_pool_reference (op0
);
1976 trueop1
= avoid_constant_pool_reference (op1
);
1978 /* We can't simplify MODE_CC values since we don't know what the
1979 actual comparison is. */
1980 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
1987 /* Make sure the constant is second. */
1988 if (swap_commutative_operands_p (trueop0
, trueop1
))
1990 tem
= op0
, op0
= op1
, op1
= tem
;
1991 tem
= trueop0
, trueop0
= trueop1
, trueop1
= tem
;
1992 code
= swap_condition (code
);
1995 /* For integer comparisons of A and B maybe we can simplify A - B and can
1996 then simplify a comparison of that with zero. If A and B are both either
1997 a register or a CONST_INT, this can't help; testing for these cases will
1998 prevent infinite recursion here and speed things up.
2000 If CODE is an unsigned comparison, then we can never do this optimization,
2001 because it gives an incorrect result if the subtraction wraps around zero.
2002 ANSI C defines unsigned operations such that they never overflow, and
2003 thus such cases can not be ignored. */
2005 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
2006 && ! ((GET_CODE (op0
) == REG
|| GET_CODE (trueop0
) == CONST_INT
)
2007 && (GET_CODE (op1
) == REG
|| GET_CODE (trueop1
) == CONST_INT
))
2008 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
2009 && code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
)
2010 return simplify_relational_operation (signed_condition (code
),
2011 mode
, tem
, const0_rtx
);
2013 if (flag_unsafe_math_optimizations
&& code
== ORDERED
)
2014 return const_true_rtx
;
2016 if (flag_unsafe_math_optimizations
&& code
== UNORDERED
)
2019 /* For non-IEEE floating-point, if the two operands are equal, we know the
2021 if (rtx_equal_p (trueop0
, trueop1
)
2022 && (TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
2023 || ! FLOAT_MODE_P (GET_MODE (trueop0
))
2024 || flag_unsafe_math_optimizations
))
2025 equal
= 1, op0lt
= 0, op0ltu
= 0, op1lt
= 0, op1ltu
= 0;
2027 /* If the operands are floating-point constants, see if we can fold
2029 else if (GET_CODE (trueop0
) == CONST_DOUBLE
2030 && GET_CODE (trueop1
) == CONST_DOUBLE
2031 && GET_MODE_CLASS (GET_MODE (trueop0
)) == MODE_FLOAT
)
2033 struct cfc_args args
;
2035 /* Setup input for check_fold_consts() */
2040 if (!do_float_handler (check_fold_consts
, (PTR
) &args
))
2053 return const_true_rtx
;
2066 /* Receive output from check_fold_consts() */
2068 op0lt
= op0ltu
= args
.op0lt
;
2069 op1lt
= op1ltu
= args
.op1lt
;
2072 /* Otherwise, see if the operands are both integers. */
2073 else if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
2074 && (GET_CODE (trueop0
) == CONST_DOUBLE
2075 || GET_CODE (trueop0
) == CONST_INT
)
2076 && (GET_CODE (trueop1
) == CONST_DOUBLE
2077 || GET_CODE (trueop1
) == CONST_INT
))
2079 int width
= GET_MODE_BITSIZE (mode
);
2080 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
2081 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
2083 /* Get the two words comprising each integer constant. */
2084 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
2086 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
2087 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
2091 l0u
= l0s
= INTVAL (trueop0
);
2092 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
2095 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
2097 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
2098 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
2102 l1u
= l1s
= INTVAL (trueop1
);
2103 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
2106 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2107 we have to sign or zero-extend the values. */
2108 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
2110 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2111 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2113 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2114 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
2116 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2117 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
2119 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
2120 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
2122 equal
= (h0u
== h1u
&& l0u
== l1u
);
2123 op0lt
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
));
2124 op1lt
= (h1s
< h0s
|| (h1s
== h0s
&& l1u
< l0u
));
2125 op0ltu
= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
));
2126 op1ltu
= (h1u
< h0u
|| (h1u
== h0u
&& l1u
< l0u
));
2129 /* Otherwise, there are some code-specific tests we can make. */
2135 /* References to the frame plus a constant or labels cannot
2136 be zero, but a SYMBOL_REF can due to #pragma weak. */
2137 if (((NONZERO_BASE_PLUS_P (op0
) && trueop1
== const0_rtx
)
2138 || GET_CODE (trueop0
) == LABEL_REF
)
2139 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2140 /* On some machines, the ap reg can be 0 sometimes. */
2141 && op0
!= arg_pointer_rtx
2148 if (((NONZERO_BASE_PLUS_P (op0
) && trueop1
== const0_rtx
)
2149 || GET_CODE (trueop0
) == LABEL_REF
)
2150 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2151 && op0
!= arg_pointer_rtx
2154 return const_true_rtx
;
2158 /* Unsigned values are never negative. */
2159 if (trueop1
== const0_rtx
)
2160 return const_true_rtx
;
2164 if (trueop1
== const0_rtx
)
2169 /* Unsigned values are never greater than the largest
2171 if (GET_CODE (trueop1
) == CONST_INT
2172 && (unsigned HOST_WIDE_INT
) INTVAL (trueop1
) == GET_MODE_MASK (mode
)
2173 && INTEGRAL_MODE_P (mode
))
2174 return const_true_rtx
;
2178 if (GET_CODE (trueop1
) == CONST_INT
2179 && (unsigned HOST_WIDE_INT
) INTVAL (trueop1
) == GET_MODE_MASK (mode
)
2180 && INTEGRAL_MODE_P (mode
))
2191 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2197 return equal
? const_true_rtx
: const0_rtx
;
2200 return ! equal
? const_true_rtx
: const0_rtx
;
2203 return op0lt
? const_true_rtx
: const0_rtx
;
2206 return op1lt
? const_true_rtx
: const0_rtx
;
2208 return op0ltu
? const_true_rtx
: const0_rtx
;
2210 return op1ltu
? const_true_rtx
: const0_rtx
;
2213 return equal
|| op0lt
? const_true_rtx
: const0_rtx
;
2216 return equal
|| op1lt
? const_true_rtx
: const0_rtx
;
2218 return equal
|| op0ltu
? const_true_rtx
: const0_rtx
;
2220 return equal
|| op1ltu
? const_true_rtx
: const0_rtx
;
2222 return const_true_rtx
;
2230 /* Simplify CODE, an operation with result mode MODE and three operands,
2231 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2232 a constant. Return 0 if no simplifications is possible. */
2235 simplify_ternary_operation (code
, mode
, op0_mode
, op0
, op1
, op2
)
2237 enum machine_mode mode
, op0_mode
;
2240 unsigned int width
= GET_MODE_BITSIZE (mode
);
2242 /* VOIDmode means "infinite" precision. */
2244 width
= HOST_BITS_PER_WIDE_INT
;
2250 if (GET_CODE (op0
) == CONST_INT
2251 && GET_CODE (op1
) == CONST_INT
2252 && GET_CODE (op2
) == CONST_INT
2253 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
2254 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
2256 /* Extracting a bit-field from a constant */
2257 HOST_WIDE_INT val
= INTVAL (op0
);
2259 if (BITS_BIG_ENDIAN
)
2260 val
>>= (GET_MODE_BITSIZE (op0_mode
)
2261 - INTVAL (op2
) - INTVAL (op1
));
2263 val
>>= INTVAL (op2
);
2265 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
2267 /* First zero-extend. */
2268 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
2269 /* If desired, propagate sign bit. */
2270 if (code
== SIGN_EXTRACT
2271 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
2272 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
2275 /* Clear the bits that don't belong in our mode,
2276 unless they and our sign bit are all one.
2277 So we get either a reasonable negative value or a reasonable
2278 unsigned value for this mode. */
2279 if (width
< HOST_BITS_PER_WIDE_INT
2280 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
2281 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
2282 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2284 return GEN_INT (val
);
2289 if (GET_CODE (op0
) == CONST_INT
)
2290 return op0
!= const0_rtx
? op1
: op2
;
2292 /* Convert a == b ? b : a to "a". */
2293 if (GET_CODE (op0
) == NE
&& ! side_effects_p (op0
)
2294 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
2295 && rtx_equal_p (XEXP (op0
, 0), op1
)
2296 && rtx_equal_p (XEXP (op0
, 1), op2
))
2298 else if (GET_CODE (op0
) == EQ
&& ! side_effects_p (op0
)
2299 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
2300 && rtx_equal_p (XEXP (op0
, 1), op1
)
2301 && rtx_equal_p (XEXP (op0
, 0), op2
))
2303 else if (GET_RTX_CLASS (GET_CODE (op0
)) == '<' && ! side_effects_p (op0
))
2305 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
2306 ? GET_MODE (XEXP (op0
, 1))
2307 : GET_MODE (XEXP (op0
, 0)));
2309 if (cmp_mode
== VOIDmode
)
2310 cmp_mode
= op0_mode
;
2311 temp
= simplify_relational_operation (GET_CODE (op0
), cmp_mode
,
2312 XEXP (op0
, 0), XEXP (op0
, 1));
2314 /* See if any simplifications were possible. */
2315 if (temp
== const0_rtx
)
2317 else if (temp
== const1_rtx
)
2322 /* Look for happy constants in op1 and op2. */
2323 if (GET_CODE (op1
) == CONST_INT
&& GET_CODE (op2
) == CONST_INT
)
2325 HOST_WIDE_INT t
= INTVAL (op1
);
2326 HOST_WIDE_INT f
= INTVAL (op2
);
2328 if (t
== STORE_FLAG_VALUE
&& f
== 0)
2329 code
= GET_CODE (op0
);
2330 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
2333 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
2341 return gen_rtx_fmt_ee (code
, mode
, XEXP (op0
, 0), XEXP (op0
, 1));
2353 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2354 Return 0 if no simplifications is possible. */
2356 simplify_subreg (outermode
, op
, innermode
, byte
)
2359 enum machine_mode outermode
, innermode
;
2361 /* Little bit of sanity checking. */
2362 if (innermode
== VOIDmode
|| outermode
== VOIDmode
2363 || innermode
== BLKmode
|| outermode
== BLKmode
)
2366 if (GET_MODE (op
) != innermode
2367 && GET_MODE (op
) != VOIDmode
)
2370 if (byte
% GET_MODE_SIZE (outermode
)
2371 || byte
>= GET_MODE_SIZE (innermode
))
2374 if (outermode
== innermode
&& !byte
)
2377 /* Attempt to simplify constant to non-SUBREG expression. */
2378 if (CONSTANT_P (op
))
2381 unsigned HOST_WIDE_INT val
= 0;
2383 /* ??? This code is partly redundant with code below, but can handle
2384 the subregs of floats and similar corner cases.
2385 Later it we should move all simplification code here and rewrite
2386 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2387 using SIMPLIFY_SUBREG. */
2388 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
2390 rtx
new = gen_lowpart_if_possible (outermode
, op
);
2395 /* Similar comment as above apply here. */
2396 if (GET_MODE_SIZE (outermode
) == UNITS_PER_WORD
2397 && GET_MODE_SIZE (innermode
) > UNITS_PER_WORD
2398 && GET_MODE_CLASS (outermode
) == MODE_INT
)
2400 rtx
new = constant_subword (op
,
2401 (byte
/ UNITS_PER_WORD
),
2407 offset
= byte
* BITS_PER_UNIT
;
2408 switch (GET_CODE (op
))
2411 if (GET_MODE (op
) != VOIDmode
)
2414 /* We can't handle this case yet. */
2415 if (GET_MODE_BITSIZE (outermode
) >= HOST_BITS_PER_WIDE_INT
)
2418 part
= offset
>= HOST_BITS_PER_WIDE_INT
;
2419 if ((BITS_PER_WORD
> HOST_BITS_PER_WIDE_INT
2420 && BYTES_BIG_ENDIAN
)
2421 || (BITS_PER_WORD
<= HOST_BITS_PER_WIDE_INT
2422 && WORDS_BIG_ENDIAN
))
2424 val
= part
? CONST_DOUBLE_HIGH (op
) : CONST_DOUBLE_LOW (op
);
2425 offset
%= HOST_BITS_PER_WIDE_INT
;
2427 /* We've already picked the word we want from a double, so
2428 pretend this is actually an integer. */
2429 innermode
= mode_for_size (HOST_BITS_PER_WIDE_INT
, MODE_INT
, 0);
2433 if (GET_CODE (op
) == CONST_INT
)
2436 /* We don't handle synthetizing of non-integral constants yet. */
2437 if (GET_MODE_CLASS (outermode
) != MODE_INT
)
2440 if (BYTES_BIG_ENDIAN
|| WORDS_BIG_ENDIAN
)
2442 if (WORDS_BIG_ENDIAN
)
2443 offset
= (GET_MODE_BITSIZE (innermode
)
2444 - GET_MODE_BITSIZE (outermode
) - offset
);
2445 if (BYTES_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
2446 && GET_MODE_SIZE (outermode
) < UNITS_PER_WORD
)
2447 offset
= (offset
+ BITS_PER_WORD
- GET_MODE_BITSIZE (outermode
)
2448 - 2 * (offset
% BITS_PER_WORD
));
2451 if (offset
>= HOST_BITS_PER_WIDE_INT
)
2452 return ((HOST_WIDE_INT
) val
< 0) ? constm1_rtx
: const0_rtx
;
2456 if (GET_MODE_BITSIZE (outermode
) < HOST_BITS_PER_WIDE_INT
)
2457 val
= trunc_int_for_mode (val
, outermode
);
2458 return GEN_INT (val
);
2465 /* Changing mode twice with SUBREG => just change it once,
2466 or not at all if changing back op starting mode. */
2467 if (GET_CODE (op
) == SUBREG
)
2469 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
2470 int final_offset
= byte
+ SUBREG_BYTE (op
);
2473 if (outermode
== innermostmode
2474 && byte
== 0 && SUBREG_BYTE (op
) == 0)
2475 return SUBREG_REG (op
);
2477 /* The SUBREG_BYTE represents offset, as if the value were stored
2478 in memory. Irritating exception is paradoxical subreg, where
2479 we define SUBREG_BYTE to be 0. On big endian machines, this
2480 value should be negative. For a moment, undo this exception. */
2481 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
2483 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
2484 if (WORDS_BIG_ENDIAN
)
2485 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
2486 if (BYTES_BIG_ENDIAN
)
2487 final_offset
+= difference
% UNITS_PER_WORD
;
2489 if (SUBREG_BYTE (op
) == 0
2490 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
2492 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
2493 if (WORDS_BIG_ENDIAN
)
2494 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
2495 if (BYTES_BIG_ENDIAN
)
2496 final_offset
+= difference
% UNITS_PER_WORD
;
2499 /* See whether resulting subreg will be paradoxical. */
2500 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
2502 /* In nonparadoxical subregs we can't handle negative offsets. */
2503 if (final_offset
< 0)
2505 /* Bail out in case resulting subreg would be incorrect. */
2506 if (final_offset
% GET_MODE_SIZE (outermode
)
2507 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
2513 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
2515 /* In paradoxical subreg, see if we are still looking on lower part.
2516 If so, our SUBREG_BYTE will be 0. */
2517 if (WORDS_BIG_ENDIAN
)
2518 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
2519 if (BYTES_BIG_ENDIAN
)
2520 offset
+= difference
% UNITS_PER_WORD
;
2521 if (offset
== final_offset
)
2527 /* Recurse for futher possible simplifications. */
2528 new = simplify_subreg (outermode
, SUBREG_REG (op
),
2529 GET_MODE (SUBREG_REG (op
)),
2533 return gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
2536 /* SUBREG of a hard register => just change the register number
2537 and/or mode. If the hard register is not valid in that mode,
2538 suppress this simplification. If the hard register is the stack,
2539 frame, or argument pointer, leave this as a SUBREG. */
2542 && (! REG_FUNCTION_VALUE_P (op
)
2543 || ! rtx_equal_function_value_matters
)
2544 #ifdef CLASS_CANNOT_CHANGE_MODE
2545 && ! (CLASS_CANNOT_CHANGE_MODE_P (outermode
, innermode
)
2546 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_INT
2547 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_FLOAT
2548 && (TEST_HARD_REG_BIT
2549 (reg_class_contents
[(int) CLASS_CANNOT_CHANGE_MODE
],
2552 && REGNO (op
) < FIRST_PSEUDO_REGISTER
2553 && ((reload_completed
&& !frame_pointer_needed
)
2554 || (REGNO (op
) != FRAME_POINTER_REGNUM
2555 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2556 && REGNO (op
) != HARD_FRAME_POINTER_REGNUM
2559 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2560 && REGNO (op
) != ARG_POINTER_REGNUM
2562 && REGNO (op
) != STACK_POINTER_REGNUM
)
2564 int final_regno
= subreg_hard_regno (gen_rtx_SUBREG (outermode
, op
, byte
),
2567 /* ??? We do allow it if the current REG is not valid for
2568 its mode. This is a kludge to work around how float/complex
2569 arguments are passed on 32-bit Sparc and should be fixed. */
2570 if (HARD_REGNO_MODE_OK (final_regno
, outermode
)
2571 || ! HARD_REGNO_MODE_OK (REGNO (op
), innermode
))
2573 rtx x
= gen_rtx_REG (outermode
, final_regno
);
2575 /* Propagate original regno. We don't have any way to specify
2576 the offset inside orignal regno, so do so only for lowpart.
2577 The information is used only by alias analysis that can not
2578 grog partial register anyway. */
2580 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
2581 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
2586 /* If we have a SUBREG of a register that we are replacing and we are
2587 replacing it with a MEM, make a new MEM and try replacing the
2588 SUBREG with it. Don't do this if the MEM has a mode-dependent address
2589 or if we would be widening it. */
2591 if (GET_CODE (op
) == MEM
2592 && ! mode_dependent_address_p (XEXP (op
, 0))
2593 /* Allow splitting of volatile memory references in case we don't
2594 have instruction to move the whole thing. */
2595 && (! MEM_VOLATILE_P (op
)
2596 || ! have_insn_for (SET
, innermode
))
2597 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
2598 return adjust_address_nv (op
, outermode
, byte
);
2600 /* Handle complex values represented as CONCAT
2601 of real and imaginary part. */
2602 if (GET_CODE (op
) == CONCAT
)
2604 int is_realpart
= byte
< GET_MODE_UNIT_SIZE (innermode
);
2605 rtx part
= is_realpart
? XEXP (op
, 0) : XEXP (op
, 1);
2606 unsigned int final_offset
;
2609 final_offset
= byte
% (GET_MODE_UNIT_SIZE (innermode
));
2610 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
2613 /* We can at least simplify it by referring directly to the relevant part. */
2614 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
2619 /* Make a SUBREG operation or equivalent if it folds. */
2622 simplify_gen_subreg (outermode
, op
, innermode
, byte
)
2625 enum machine_mode outermode
, innermode
;
2628 /* Little bit of sanity checking. */
2629 if (innermode
== VOIDmode
|| outermode
== VOIDmode
2630 || innermode
== BLKmode
|| outermode
== BLKmode
)
2633 if (GET_MODE (op
) != innermode
2634 && GET_MODE (op
) != VOIDmode
)
2637 if (byte
% GET_MODE_SIZE (outermode
)
2638 || byte
>= GET_MODE_SIZE (innermode
))
2641 if (GET_CODE (op
) == QUEUED
)
2644 new = simplify_subreg (outermode
, op
, innermode
, byte
);
2648 if (GET_CODE (op
) == SUBREG
|| GET_MODE (op
) == VOIDmode
)
2651 return gen_rtx_SUBREG (outermode
, op
, byte
);
2653 /* Simplify X, an rtx expression.
2655 Return the simplified expression or NULL if no simplifications
2658 This is the preferred entry point into the simplification routines;
2659 however, we still allow passes to call the more specific routines.
2661 Right now GCC has three (yes, three) major bodies of RTL simplficiation
2662 code that need to be unified.
2664 1. fold_rtx in cse.c. This code uses various CSE specific
2665 information to aid in RTL simplification.
2667 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
2668 it uses combine specific information to aid in RTL
2671 3. The routines in this file.
2674 Long term we want to only have one body of simplification code; to
2675 get to that state I recommend the following steps:
2677 1. Pour over fold_rtx & simplify_rtx and move any simplifications
2678 which are not pass dependent state into these routines.
2680 2. As code is moved by #1, change fold_rtx & simplify_rtx to
2681 use this routine whenever possible.
2683 3. Allow for pass dependent state to be provided to these
2684 routines and add simplifications based on the pass dependent
2685 state. Remove code from cse.c & combine.c that becomes
2688 It will take time, but ultimately the compiler will be easier to
2689 maintain and improve. It's totally silly that when we add a
2690 simplification that it needs to be added to 4 places (3 for RTL
2691 simplification and 1 for tree simplification. */
2697 enum rtx_code code
= GET_CODE (x
);
2698 enum machine_mode mode
= GET_MODE (x
);
2700 switch (GET_RTX_CLASS (code
))
2703 return simplify_unary_operation (code
, mode
,
2704 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
2706 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
2711 XEXP (x
, 0) = XEXP (x
, 1);
2713 return simplify_binary_operation (code
, mode
,
2714 XEXP (x
, 0), XEXP (x
, 1));
2718 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
2722 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
2723 XEXP (x
, 0), XEXP (x
, 1),
2727 return simplify_relational_operation (code
,
2728 ((GET_MODE (XEXP (x
, 0))
2730 ? GET_MODE (XEXP (x
, 0))
2731 : GET_MODE (XEXP (x
, 1))),
2732 XEXP (x
, 0), XEXP (x
, 1));
2734 /* The only case we try to handle is a SUBREG. */
2736 return simplify_gen_subreg (mode
, SUBREG_REG (x
),
2737 GET_MODE (SUBREG_REG (x
)),