]> gcc.gnu.org Git - gcc.git/blob - gcc/simplify-rtx.c
emit-rtl.c, [...]: Remove all #ifndef REAL_ARITHMETIC blocks...
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
21
22
23 #include "config.h"
24 #include "system.h"
25
26 #include "rtl.h"
27 #include "tm_p.h"
28 #include "regs.h"
29 #include "hard-reg-set.h"
30 #include "flags.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "recog.h"
34 #include "function.h"
35 #include "expr.h"
36 #include "toplev.h"
37 #include "output.h"
38 #include "ggc.h"
39
40 /* Simplification and canonicalization of RTL. */
41
42 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
43 virtual regs here because the simplify_*_operation routines are called
44 by integrate.c, which is called before virtual register instantiation.
45
46 ?!? FIXED_BASE_PLUS_P and NONZERO_BASE_PLUS_P need to move into
47 a header file so that their definitions can be shared with the
48 simplification routines in simplify-rtx.c. Until then, do not
49 change these macros without also changing the copy in simplify-rtx.c. */
50
51 #define FIXED_BASE_PLUS_P(X) \
52 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
53 || ((X) == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])\
54 || (X) == virtual_stack_vars_rtx \
55 || (X) == virtual_incoming_args_rtx \
56 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
57 && (XEXP (X, 0) == frame_pointer_rtx \
58 || XEXP (X, 0) == hard_frame_pointer_rtx \
59 || ((X) == arg_pointer_rtx \
60 && fixed_regs[ARG_POINTER_REGNUM]) \
61 || XEXP (X, 0) == virtual_stack_vars_rtx \
62 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
63 || GET_CODE (X) == ADDRESSOF)
64
65 /* Similar, but also allows reference to the stack pointer.
66
67 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
68 arg_pointer_rtx by itself is nonzero, because on at least one machine,
69 the i960, the arg pointer is zero when it is unused. */
70
71 #define NONZERO_BASE_PLUS_P(X) \
72 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
73 || (X) == virtual_stack_vars_rtx \
74 || (X) == virtual_incoming_args_rtx \
75 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
76 && (XEXP (X, 0) == frame_pointer_rtx \
77 || XEXP (X, 0) == hard_frame_pointer_rtx \
78 || ((X) == arg_pointer_rtx \
79 && fixed_regs[ARG_POINTER_REGNUM]) \
80 || XEXP (X, 0) == virtual_stack_vars_rtx \
81 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
82 || (X) == stack_pointer_rtx \
83 || (X) == virtual_stack_dynamic_rtx \
84 || (X) == virtual_outgoing_args_rtx \
85 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
86 && (XEXP (X, 0) == stack_pointer_rtx \
87 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
88 || XEXP (X, 0) == virtual_outgoing_args_rtx)) \
89 || GET_CODE (X) == ADDRESSOF)
90
91 /* Much code operates on (low, high) pairs; the low value is an
92 unsigned wide int, the high value a signed wide int. We
93 occasionally need to sign extend from low to high as if low were a
94 signed wide int. */
95 #define HWI_SIGN_EXTEND(low) \
96 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
97
98 static rtx neg_const_int PARAMS ((enum machine_mode, rtx));
99 static int simplify_plus_minus_op_data_cmp PARAMS ((const void *,
100 const void *));
101 static rtx simplify_plus_minus PARAMS ((enum rtx_code,
102 enum machine_mode, rtx,
103 rtx, int));
104 static void check_fold_consts PARAMS ((PTR));
105 static void simplify_unary_real PARAMS ((PTR));
106 static void simplify_binary_real PARAMS ((PTR));
107 static void simplify_binary_is2orm1 PARAMS ((PTR));
108
109 \f
110 /* Negate a CONST_INT rtx, truncating (because a conversion from a
111 maximally negative number can overflow). */
112 static rtx
113 neg_const_int (mode, i)
114 enum machine_mode mode;
115 rtx i;
116 {
117 return GEN_INT (trunc_int_for_mode (- INTVAL (i), mode));
118 }
119
120 \f
121 /* Make a binary operation by properly ordering the operands and
122 seeing if the expression folds. */
123
124 rtx
125 simplify_gen_binary (code, mode, op0, op1)
126 enum rtx_code code;
127 enum machine_mode mode;
128 rtx op0, op1;
129 {
130 rtx tem;
131
132 /* Put complex operands first and constants second if commutative. */
133 if (GET_RTX_CLASS (code) == 'c'
134 && swap_commutative_operands_p (op0, op1))
135 tem = op0, op0 = op1, op1 = tem;
136
137 /* If this simplifies, do it. */
138 tem = simplify_binary_operation (code, mode, op0, op1);
139 if (tem)
140 return tem;
141
142 /* Handle addition and subtraction specially. Otherwise, just form
143 the operation. */
144
145 if (code == PLUS || code == MINUS)
146 return simplify_plus_minus (code, mode, op0, op1, 1);
147 else
148 return gen_rtx_fmt_ee (code, mode, op0, op1);
149 }
150 \f
151 /* If X is a MEM referencing the constant pool, return the real value.
152 Otherwise return X. */
153 rtx
154 avoid_constant_pool_reference (x)
155 rtx x;
156 {
157 rtx c, addr;
158 enum machine_mode cmode;
159
160 if (GET_CODE (x) != MEM)
161 return x;
162 addr = XEXP (x, 0);
163
164 if (GET_CODE (addr) != SYMBOL_REF
165 || ! CONSTANT_POOL_ADDRESS_P (addr))
166 return x;
167
168 c = get_pool_constant (addr);
169 cmode = get_pool_mode (addr);
170
171 /* If we're accessing the constant in a different mode than it was
172 originally stored, attempt to fix that up via subreg simplifications.
173 If that fails we have no choice but to return the original memory. */
174 if (cmode != GET_MODE (x))
175 {
176 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
177 return c ? c : x;
178 }
179
180 return c;
181 }
182 \f
183 /* Make a unary operation by first seeing if it folds and otherwise making
184 the specified operation. */
185
186 rtx
187 simplify_gen_unary (code, mode, op, op_mode)
188 enum rtx_code code;
189 enum machine_mode mode;
190 rtx op;
191 enum machine_mode op_mode;
192 {
193 rtx tem;
194
195 /* If this simplifies, use it. */
196 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
197 return tem;
198
199 return gen_rtx_fmt_e (code, mode, op);
200 }
201
202 /* Likewise for ternary operations. */
203
204 rtx
205 simplify_gen_ternary (code, mode, op0_mode, op0, op1, op2)
206 enum rtx_code code;
207 enum machine_mode mode, op0_mode;
208 rtx op0, op1, op2;
209 {
210 rtx tem;
211
212 /* If this simplifies, use it. */
213 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
214 op0, op1, op2)))
215 return tem;
216
217 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
218 }
219 \f
220 /* Likewise, for relational operations.
221 CMP_MODE specifies mode comparison is done in.
222 */
223
224 rtx
225 simplify_gen_relational (code, mode, cmp_mode, op0, op1)
226 enum rtx_code code;
227 enum machine_mode mode;
228 enum machine_mode cmp_mode;
229 rtx op0, op1;
230 {
231 rtx tem;
232
233 if ((tem = simplify_relational_operation (code, cmp_mode, op0, op1)) != 0)
234 return tem;
235
236 /* Put complex operands first and constants second. */
237 if (swap_commutative_operands_p (op0, op1))
238 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
239
240 return gen_rtx_fmt_ee (code, mode, op0, op1);
241 }
242 \f
243 /* Replace all occurrences of OLD in X with NEW and try to simplify the
244 resulting RTX. Return a new RTX which is as simplified as possible. */
245
246 rtx
247 simplify_replace_rtx (x, old, new)
248 rtx x;
249 rtx old;
250 rtx new;
251 {
252 enum rtx_code code = GET_CODE (x);
253 enum machine_mode mode = GET_MODE (x);
254
255 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
256 to build a new expression substituting recursively. If we can't do
257 anything, return our input. */
258
259 if (x == old)
260 return new;
261
262 switch (GET_RTX_CLASS (code))
263 {
264 case '1':
265 {
266 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
267 rtx op = (XEXP (x, 0) == old
268 ? new : simplify_replace_rtx (XEXP (x, 0), old, new));
269
270 return simplify_gen_unary (code, mode, op, op_mode);
271 }
272
273 case '2':
274 case 'c':
275 return
276 simplify_gen_binary (code, mode,
277 simplify_replace_rtx (XEXP (x, 0), old, new),
278 simplify_replace_rtx (XEXP (x, 1), old, new));
279 case '<':
280 {
281 enum machine_mode op_mode = (GET_MODE (XEXP (x, 0)) != VOIDmode
282 ? GET_MODE (XEXP (x, 0))
283 : GET_MODE (XEXP (x, 1)));
284 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
285 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
286
287 return
288 simplify_gen_relational (code, mode,
289 (op_mode != VOIDmode
290 ? op_mode
291 : GET_MODE (op0) != VOIDmode
292 ? GET_MODE (op0)
293 : GET_MODE (op1)),
294 op0, op1);
295 }
296
297 case '3':
298 case 'b':
299 {
300 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
301 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
302
303 return
304 simplify_gen_ternary (code, mode,
305 (op_mode != VOIDmode
306 ? op_mode
307 : GET_MODE (op0)),
308 op0,
309 simplify_replace_rtx (XEXP (x, 1), old, new),
310 simplify_replace_rtx (XEXP (x, 2), old, new));
311 }
312
313 case 'x':
314 /* The only case we try to handle is a SUBREG. */
315 if (code == SUBREG)
316 {
317 rtx exp;
318 exp = simplify_gen_subreg (GET_MODE (x),
319 simplify_replace_rtx (SUBREG_REG (x),
320 old, new),
321 GET_MODE (SUBREG_REG (x)),
322 SUBREG_BYTE (x));
323 if (exp)
324 x = exp;
325 }
326 return x;
327
328 default:
329 if (GET_CODE (x) == MEM)
330 return
331 replace_equiv_address_nv (x,
332 simplify_replace_rtx (XEXP (x, 0),
333 old, new));
334
335 return x;
336 }
337 return x;
338 }
339 \f
340 /* Subroutine of simplify_unary_operation, called via do_float_handler.
341 Handles simplification of unary ops on floating point values. */
342 struct simplify_unary_real_args
343 {
344 rtx operand;
345 rtx result;
346 enum machine_mode mode;
347 enum rtx_code code;
348 bool want_integer;
349 };
350 #define REAL_VALUE_ABS(d_) \
351 (REAL_VALUE_NEGATIVE (d_) ? REAL_VALUE_NEGATE (d_) : (d_))
352
353 static void
354 simplify_unary_real (p)
355 PTR p;
356 {
357 REAL_VALUE_TYPE d;
358
359 struct simplify_unary_real_args *args =
360 (struct simplify_unary_real_args *) p;
361
362 REAL_VALUE_FROM_CONST_DOUBLE (d, args->operand);
363
364 if (args->want_integer)
365 {
366 HOST_WIDE_INT i;
367
368 switch (args->code)
369 {
370 case FIX: i = REAL_VALUE_FIX (d); break;
371 case UNSIGNED_FIX: i = REAL_VALUE_UNSIGNED_FIX (d); break;
372 default:
373 abort ();
374 }
375 args->result = GEN_INT (trunc_int_for_mode (i, args->mode));
376 }
377 else
378 {
379 switch (args->code)
380 {
381 case SQRT:
382 /* We don't attempt to optimize this. */
383 args->result = 0;
384 return;
385
386 case ABS: d = REAL_VALUE_ABS (d); break;
387 case NEG: d = REAL_VALUE_NEGATE (d); break;
388 case FLOAT_TRUNCATE: d = real_value_truncate (args->mode, d); break;
389 case FLOAT_EXTEND: /* All this does is change the mode. */ break;
390 case FIX: d = REAL_VALUE_RNDZINT (d); break;
391 case UNSIGNED_FIX: d = REAL_VALUE_UNSIGNED_RNDZINT (d); break;
392 default:
393 abort ();
394 }
395 args->result = CONST_DOUBLE_FROM_REAL_VALUE (d, args->mode);
396 }
397 }
398
399 /* Try to simplify a unary operation CODE whose output mode is to be
400 MODE with input operand OP whose mode was originally OP_MODE.
401 Return zero if no simplification can be made. */
402 rtx
403 simplify_unary_operation (code, mode, op, op_mode)
404 enum rtx_code code;
405 enum machine_mode mode;
406 rtx op;
407 enum machine_mode op_mode;
408 {
409 unsigned int width = GET_MODE_BITSIZE (mode);
410 rtx trueop = avoid_constant_pool_reference (op);
411
412 /* The order of these tests is critical so that, for example, we don't
413 check the wrong mode (input vs. output) for a conversion operation,
414 such as FIX. At some point, this should be simplified. */
415
416 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
417 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
418 {
419 HOST_WIDE_INT hv, lv;
420 REAL_VALUE_TYPE d;
421
422 if (GET_CODE (trueop) == CONST_INT)
423 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
424 else
425 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
426
427 REAL_VALUE_FROM_INT (d, lv, hv, mode);
428 d = real_value_truncate (mode, d);
429 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
430 }
431 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
432 && (GET_CODE (trueop) == CONST_DOUBLE
433 || GET_CODE (trueop) == CONST_INT))
434 {
435 HOST_WIDE_INT hv, lv;
436 REAL_VALUE_TYPE d;
437
438 if (GET_CODE (trueop) == CONST_INT)
439 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
440 else
441 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
442
443 if (op_mode == VOIDmode)
444 {
445 /* We don't know how to interpret negative-looking numbers in
446 this case, so don't try to fold those. */
447 if (hv < 0)
448 return 0;
449 }
450 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
451 ;
452 else
453 hv = 0, lv &= GET_MODE_MASK (op_mode);
454
455 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
456 d = real_value_truncate (mode, d);
457 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
458 }
459
460 if (GET_CODE (trueop) == CONST_INT
461 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
462 {
463 HOST_WIDE_INT arg0 = INTVAL (trueop);
464 HOST_WIDE_INT val;
465
466 switch (code)
467 {
468 case NOT:
469 val = ~ arg0;
470 break;
471
472 case NEG:
473 val = - arg0;
474 break;
475
476 case ABS:
477 val = (arg0 >= 0 ? arg0 : - arg0);
478 break;
479
480 case FFS:
481 /* Don't use ffs here. Instead, get low order bit and then its
482 number. If arg0 is zero, this will return 0, as desired. */
483 arg0 &= GET_MODE_MASK (mode);
484 val = exact_log2 (arg0 & (- arg0)) + 1;
485 break;
486
487 case TRUNCATE:
488 val = arg0;
489 break;
490
491 case ZERO_EXTEND:
492 /* When zero-extending a CONST_INT, we need to know its
493 original mode. */
494 if (op_mode == VOIDmode)
495 abort ();
496 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
497 {
498 /* If we were really extending the mode,
499 we would have to distinguish between zero-extension
500 and sign-extension. */
501 if (width != GET_MODE_BITSIZE (op_mode))
502 abort ();
503 val = arg0;
504 }
505 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
506 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
507 else
508 return 0;
509 break;
510
511 case SIGN_EXTEND:
512 if (op_mode == VOIDmode)
513 op_mode = mode;
514 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
515 {
516 /* If we were really extending the mode,
517 we would have to distinguish between zero-extension
518 and sign-extension. */
519 if (width != GET_MODE_BITSIZE (op_mode))
520 abort ();
521 val = arg0;
522 }
523 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
524 {
525 val
526 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
527 if (val
528 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
529 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
530 }
531 else
532 return 0;
533 break;
534
535 case SQRT:
536 case FLOAT_EXTEND:
537 case FLOAT_TRUNCATE:
538 case SS_TRUNCATE:
539 case US_TRUNCATE:
540 return 0;
541
542 default:
543 abort ();
544 }
545
546 val = trunc_int_for_mode (val, mode);
547
548 return GEN_INT (val);
549 }
550
551 /* We can do some operations on integer CONST_DOUBLEs. Also allow
552 for a DImode operation on a CONST_INT. */
553 else if (GET_MODE (trueop) == VOIDmode
554 && width <= HOST_BITS_PER_WIDE_INT * 2
555 && (GET_CODE (trueop) == CONST_DOUBLE
556 || GET_CODE (trueop) == CONST_INT))
557 {
558 unsigned HOST_WIDE_INT l1, lv;
559 HOST_WIDE_INT h1, hv;
560
561 if (GET_CODE (trueop) == CONST_DOUBLE)
562 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
563 else
564 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
565
566 switch (code)
567 {
568 case NOT:
569 lv = ~ l1;
570 hv = ~ h1;
571 break;
572
573 case NEG:
574 neg_double (l1, h1, &lv, &hv);
575 break;
576
577 case ABS:
578 if (h1 < 0)
579 neg_double (l1, h1, &lv, &hv);
580 else
581 lv = l1, hv = h1;
582 break;
583
584 case FFS:
585 hv = 0;
586 if (l1 == 0)
587 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & (-h1)) + 1;
588 else
589 lv = exact_log2 (l1 & (-l1)) + 1;
590 break;
591
592 case TRUNCATE:
593 /* This is just a change-of-mode, so do nothing. */
594 lv = l1, hv = h1;
595 break;
596
597 case ZERO_EXTEND:
598 if (op_mode == VOIDmode)
599 abort ();
600
601 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
602 return 0;
603
604 hv = 0;
605 lv = l1 & GET_MODE_MASK (op_mode);
606 break;
607
608 case SIGN_EXTEND:
609 if (op_mode == VOIDmode
610 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
611 return 0;
612 else
613 {
614 lv = l1 & GET_MODE_MASK (op_mode);
615 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
616 && (lv & ((HOST_WIDE_INT) 1
617 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
618 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
619
620 hv = HWI_SIGN_EXTEND (lv);
621 }
622 break;
623
624 case SQRT:
625 return 0;
626
627 default:
628 return 0;
629 }
630
631 return immed_double_const (lv, hv, mode);
632 }
633
634 else if (GET_CODE (trueop) == CONST_DOUBLE
635 && GET_MODE_CLASS (mode) == MODE_FLOAT)
636 {
637 struct simplify_unary_real_args args;
638 args.operand = trueop;
639 args.mode = mode;
640 args.code = code;
641 args.want_integer = false;
642
643 if (do_float_handler (simplify_unary_real, (PTR) &args))
644 return args.result;
645
646 return 0;
647 }
648
649 else if (GET_CODE (trueop) == CONST_DOUBLE
650 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
651 && GET_MODE_CLASS (mode) == MODE_INT
652 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
653 {
654 struct simplify_unary_real_args args;
655 args.operand = trueop;
656 args.mode = mode;
657 args.code = code;
658 args.want_integer = true;
659
660 if (do_float_handler (simplify_unary_real, (PTR) &args))
661 return args.result;
662
663 return 0;
664 }
665
666 /* This was formerly used only for non-IEEE float.
667 eggert@twinsun.com says it is safe for IEEE also. */
668 else
669 {
670 enum rtx_code reversed;
671 /* There are some simplifications we can do even if the operands
672 aren't constant. */
673 switch (code)
674 {
675 case NOT:
676 /* (not (not X)) == X. */
677 if (GET_CODE (op) == NOT)
678 return XEXP (op, 0);
679
680 /* (not (eq X Y)) == (ne X Y), etc. */
681 if (mode == BImode && GET_RTX_CLASS (GET_CODE (op)) == '<'
682 && ((reversed = reversed_comparison_code (op, NULL_RTX))
683 != UNKNOWN))
684 return gen_rtx_fmt_ee (reversed,
685 op_mode, XEXP (op, 0), XEXP (op, 1));
686 break;
687
688 case NEG:
689 /* (neg (neg X)) == X. */
690 if (GET_CODE (op) == NEG)
691 return XEXP (op, 0);
692 break;
693
694 case SIGN_EXTEND:
695 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
696 becomes just the MINUS if its mode is MODE. This allows
697 folding switch statements on machines using casesi (such as
698 the VAX). */
699 if (GET_CODE (op) == TRUNCATE
700 && GET_MODE (XEXP (op, 0)) == mode
701 && GET_CODE (XEXP (op, 0)) == MINUS
702 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
703 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
704 return XEXP (op, 0);
705
706 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
707 if (! POINTERS_EXTEND_UNSIGNED
708 && mode == Pmode && GET_MODE (op) == ptr_mode
709 && (CONSTANT_P (op)
710 || (GET_CODE (op) == SUBREG
711 && GET_CODE (SUBREG_REG (op)) == REG
712 && REG_POINTER (SUBREG_REG (op))
713 && GET_MODE (SUBREG_REG (op)) == Pmode)))
714 return convert_memory_address (Pmode, op);
715 #endif
716 break;
717
718 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
719 case ZERO_EXTEND:
720 if (POINTERS_EXTEND_UNSIGNED > 0
721 && mode == Pmode && GET_MODE (op) == ptr_mode
722 && (CONSTANT_P (op)
723 || (GET_CODE (op) == SUBREG
724 && GET_CODE (SUBREG_REG (op)) == REG
725 && REG_POINTER (SUBREG_REG (op))
726 && GET_MODE (SUBREG_REG (op)) == Pmode)))
727 return convert_memory_address (Pmode, op);
728 break;
729 #endif
730
731 default:
732 break;
733 }
734
735 return 0;
736 }
737 }
738 \f
739 /* Subroutine of simplify_binary_operation, called via do_float_handler.
740 Handles simplification of binary ops on floating point values. */
741 struct simplify_binary_real_args
742 {
743 rtx trueop0, trueop1;
744 rtx result;
745 enum rtx_code code;
746 enum machine_mode mode;
747 };
748
749 static void
750 simplify_binary_real (p)
751 PTR p;
752 {
753 REAL_VALUE_TYPE f0, f1, value;
754 struct simplify_binary_real_args *args =
755 (struct simplify_binary_real_args *) p;
756
757 REAL_VALUE_FROM_CONST_DOUBLE (f0, args->trueop0);
758 REAL_VALUE_FROM_CONST_DOUBLE (f1, args->trueop1);
759 f0 = real_value_truncate (args->mode, f0);
760 f1 = real_value_truncate (args->mode, f1);
761
762 #ifndef REAL_INFINITY
763 if (args->code == DIV && REAL_VALUES_EQUAL (f1, dconst0))
764 {
765 args->result = 0;
766 return;
767 }
768 #endif
769 REAL_ARITHMETIC (value, rtx_to_tree_code (args->code), f0, f1);
770
771 value = real_value_truncate (args->mode, value);
772 args->result = CONST_DOUBLE_FROM_REAL_VALUE (value, args->mode);
773 }
774
775 /* Another subroutine called via do_float_handler. This one tests
776 the floating point value given against 2. and -1. */
777 struct simplify_binary_is2orm1_args
778 {
779 rtx value;
780 bool is_2;
781 bool is_m1;
782 };
783
784 static void
785 simplify_binary_is2orm1 (p)
786 PTR p;
787 {
788 REAL_VALUE_TYPE d;
789 struct simplify_binary_is2orm1_args *args =
790 (struct simplify_binary_is2orm1_args *) p;
791
792 REAL_VALUE_FROM_CONST_DOUBLE (d, args->value);
793 args->is_2 = REAL_VALUES_EQUAL (d, dconst2);
794 args->is_m1 = REAL_VALUES_EQUAL (d, dconstm1);
795 }
796
797 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
798 and OP1. Return 0 if no simplification is possible.
799
800 Don't use this for relational operations such as EQ or LT.
801 Use simplify_relational_operation instead. */
802 rtx
803 simplify_binary_operation (code, mode, op0, op1)
804 enum rtx_code code;
805 enum machine_mode mode;
806 rtx op0, op1;
807 {
808 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
809 HOST_WIDE_INT val;
810 unsigned int width = GET_MODE_BITSIZE (mode);
811 rtx tem;
812 rtx trueop0 = avoid_constant_pool_reference (op0);
813 rtx trueop1 = avoid_constant_pool_reference (op1);
814
815 /* Relational operations don't work here. We must know the mode
816 of the operands in order to do the comparison correctly.
817 Assuming a full word can give incorrect results.
818 Consider comparing 128 with -128 in QImode. */
819
820 if (GET_RTX_CLASS (code) == '<')
821 abort ();
822
823 /* Make sure the constant is second. */
824 if (GET_RTX_CLASS (code) == 'c'
825 && swap_commutative_operands_p (trueop0, trueop1))
826 {
827 tem = op0, op0 = op1, op1 = tem;
828 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
829 }
830
831 if (GET_MODE_CLASS (mode) == MODE_FLOAT
832 && GET_CODE (trueop0) == CONST_DOUBLE
833 && GET_CODE (trueop1) == CONST_DOUBLE
834 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
835 {
836 struct simplify_binary_real_args args;
837 args.trueop0 = trueop0;
838 args.trueop1 = trueop1;
839 args.mode = mode;
840 args.code = code;
841
842 if (do_float_handler (simplify_binary_real, (PTR) &args))
843 return args.result;
844 return 0;
845 }
846
847 /* We can fold some multi-word operations. */
848 if (GET_MODE_CLASS (mode) == MODE_INT
849 && width == HOST_BITS_PER_WIDE_INT * 2
850 && (GET_CODE (trueop0) == CONST_DOUBLE
851 || GET_CODE (trueop0) == CONST_INT)
852 && (GET_CODE (trueop1) == CONST_DOUBLE
853 || GET_CODE (trueop1) == CONST_INT))
854 {
855 unsigned HOST_WIDE_INT l1, l2, lv;
856 HOST_WIDE_INT h1, h2, hv;
857
858 if (GET_CODE (trueop0) == CONST_DOUBLE)
859 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
860 else
861 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
862
863 if (GET_CODE (trueop1) == CONST_DOUBLE)
864 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
865 else
866 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
867
868 switch (code)
869 {
870 case MINUS:
871 /* A - B == A + (-B). */
872 neg_double (l2, h2, &lv, &hv);
873 l2 = lv, h2 = hv;
874
875 /* .. fall through ... */
876
877 case PLUS:
878 add_double (l1, h1, l2, h2, &lv, &hv);
879 break;
880
881 case MULT:
882 mul_double (l1, h1, l2, h2, &lv, &hv);
883 break;
884
885 case DIV: case MOD: case UDIV: case UMOD:
886 /* We'd need to include tree.h to do this and it doesn't seem worth
887 it. */
888 return 0;
889
890 case AND:
891 lv = l1 & l2, hv = h1 & h2;
892 break;
893
894 case IOR:
895 lv = l1 | l2, hv = h1 | h2;
896 break;
897
898 case XOR:
899 lv = l1 ^ l2, hv = h1 ^ h2;
900 break;
901
902 case SMIN:
903 if (h1 < h2
904 || (h1 == h2
905 && ((unsigned HOST_WIDE_INT) l1
906 < (unsigned HOST_WIDE_INT) l2)))
907 lv = l1, hv = h1;
908 else
909 lv = l2, hv = h2;
910 break;
911
912 case SMAX:
913 if (h1 > h2
914 || (h1 == h2
915 && ((unsigned HOST_WIDE_INT) l1
916 > (unsigned HOST_WIDE_INT) l2)))
917 lv = l1, hv = h1;
918 else
919 lv = l2, hv = h2;
920 break;
921
922 case UMIN:
923 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
924 || (h1 == h2
925 && ((unsigned HOST_WIDE_INT) l1
926 < (unsigned HOST_WIDE_INT) l2)))
927 lv = l1, hv = h1;
928 else
929 lv = l2, hv = h2;
930 break;
931
932 case UMAX:
933 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
934 || (h1 == h2
935 && ((unsigned HOST_WIDE_INT) l1
936 > (unsigned HOST_WIDE_INT) l2)))
937 lv = l1, hv = h1;
938 else
939 lv = l2, hv = h2;
940 break;
941
942 case LSHIFTRT: case ASHIFTRT:
943 case ASHIFT:
944 case ROTATE: case ROTATERT:
945 #ifdef SHIFT_COUNT_TRUNCATED
946 if (SHIFT_COUNT_TRUNCATED)
947 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
948 #endif
949
950 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
951 return 0;
952
953 if (code == LSHIFTRT || code == ASHIFTRT)
954 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
955 code == ASHIFTRT);
956 else if (code == ASHIFT)
957 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
958 else if (code == ROTATE)
959 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
960 else /* code == ROTATERT */
961 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
962 break;
963
964 default:
965 return 0;
966 }
967
968 return immed_double_const (lv, hv, mode);
969 }
970
971 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
972 || width > HOST_BITS_PER_WIDE_INT || width == 0)
973 {
974 /* Even if we can't compute a constant result,
975 there are some cases worth simplifying. */
976
977 switch (code)
978 {
979 case PLUS:
980 /* In IEEE floating point, x+0 is not the same as x. Similarly
981 for the other optimizations below. */
982 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
983 && FLOAT_MODE_P (mode) && ! flag_unsafe_math_optimizations)
984 break;
985
986 if (trueop1 == CONST0_RTX (mode))
987 return op0;
988
989 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)) */
990 if (GET_CODE (op0) == NEG)
991 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
992 else if (GET_CODE (op1) == NEG)
993 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
994
995 /* (~a) + 1 -> -a */
996 if (INTEGRAL_MODE_P (mode)
997 && GET_CODE (op0) == NOT
998 && trueop1 == const1_rtx)
999 return gen_rtx_NEG (mode, XEXP (op0, 0));
1000
1001 /* Handle both-operands-constant cases. We can only add
1002 CONST_INTs to constants since the sum of relocatable symbols
1003 can't be handled by most assemblers. Don't add CONST_INT
1004 to CONST_INT since overflow won't be computed properly if wider
1005 than HOST_BITS_PER_WIDE_INT. */
1006
1007 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1008 && GET_CODE (op1) == CONST_INT)
1009 return plus_constant (op0, INTVAL (op1));
1010 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1011 && GET_CODE (op0) == CONST_INT)
1012 return plus_constant (op1, INTVAL (op0));
1013
1014 /* See if this is something like X * C - X or vice versa or
1015 if the multiplication is written as a shift. If so, we can
1016 distribute and make a new multiply, shift, or maybe just
1017 have X (if C is 2 in the example above). But don't make
1018 real multiply if we didn't have one before. */
1019
1020 if (! FLOAT_MODE_P (mode))
1021 {
1022 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1023 rtx lhs = op0, rhs = op1;
1024 int had_mult = 0;
1025
1026 if (GET_CODE (lhs) == NEG)
1027 coeff0 = -1, lhs = XEXP (lhs, 0);
1028 else if (GET_CODE (lhs) == MULT
1029 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1030 {
1031 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1032 had_mult = 1;
1033 }
1034 else if (GET_CODE (lhs) == ASHIFT
1035 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1036 && INTVAL (XEXP (lhs, 1)) >= 0
1037 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1038 {
1039 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1040 lhs = XEXP (lhs, 0);
1041 }
1042
1043 if (GET_CODE (rhs) == NEG)
1044 coeff1 = -1, rhs = XEXP (rhs, 0);
1045 else if (GET_CODE (rhs) == MULT
1046 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1047 {
1048 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1049 had_mult = 1;
1050 }
1051 else if (GET_CODE (rhs) == ASHIFT
1052 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1053 && INTVAL (XEXP (rhs, 1)) >= 0
1054 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1055 {
1056 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1057 rhs = XEXP (rhs, 0);
1058 }
1059
1060 if (rtx_equal_p (lhs, rhs))
1061 {
1062 tem = simplify_gen_binary (MULT, mode, lhs,
1063 GEN_INT (coeff0 + coeff1));
1064 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1065 }
1066 }
1067
1068 /* If one of the operands is a PLUS or a MINUS, see if we can
1069 simplify this by the associative law.
1070 Don't use the associative law for floating point.
1071 The inaccuracy makes it nonassociative,
1072 and subtle programs can break if operations are associated. */
1073
1074 if (INTEGRAL_MODE_P (mode)
1075 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1076 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1077 || (GET_CODE (op0) == CONST
1078 && GET_CODE (XEXP (op0, 0)) == PLUS)
1079 || (GET_CODE (op1) == CONST
1080 && GET_CODE (XEXP (op1, 0)) == PLUS))
1081 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1082 return tem;
1083 break;
1084
1085 case COMPARE:
1086 #ifdef HAVE_cc0
1087 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1088 using cc0, in which case we want to leave it as a COMPARE
1089 so we can distinguish it from a register-register-copy.
1090
1091 In IEEE floating point, x-0 is not the same as x. */
1092
1093 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1094 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1095 && trueop1 == CONST0_RTX (mode))
1096 return op0;
1097 #endif
1098
1099 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1100 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1101 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1102 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1103 {
1104 rtx xop00 = XEXP (op0, 0);
1105 rtx xop10 = XEXP (op1, 0);
1106
1107 #ifdef HAVE_cc0
1108 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1109 #else
1110 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1111 && GET_MODE (xop00) == GET_MODE (xop10)
1112 && REGNO (xop00) == REGNO (xop10)
1113 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1114 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1115 #endif
1116 return xop00;
1117 }
1118 break;
1119
1120 case MINUS:
1121 /* None of these optimizations can be done for IEEE
1122 floating point. */
1123 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
1124 && FLOAT_MODE_P (mode) && ! flag_unsafe_math_optimizations)
1125 break;
1126
1127 /* We can't assume x-x is 0 even with non-IEEE floating point,
1128 but since it is zero except in very strange circumstances, we
1129 will treat it as zero with -funsafe-math-optimizations. */
1130 if (rtx_equal_p (trueop0, trueop1)
1131 && ! side_effects_p (op0)
1132 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1133 return CONST0_RTX (mode);
1134
1135 /* Change subtraction from zero into negation. */
1136 if (trueop0 == CONST0_RTX (mode))
1137 return gen_rtx_NEG (mode, op1);
1138
1139 /* (-1 - a) is ~a. */
1140 if (trueop0 == constm1_rtx)
1141 return gen_rtx_NOT (mode, op1);
1142
1143 /* Subtracting 0 has no effect. */
1144 if (trueop1 == CONST0_RTX (mode))
1145 return op0;
1146
1147 /* See if this is something like X * C - X or vice versa or
1148 if the multiplication is written as a shift. If so, we can
1149 distribute and make a new multiply, shift, or maybe just
1150 have X (if C is 2 in the example above). But don't make
1151 real multiply if we didn't have one before. */
1152
1153 if (! FLOAT_MODE_P (mode))
1154 {
1155 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1156 rtx lhs = op0, rhs = op1;
1157 int had_mult = 0;
1158
1159 if (GET_CODE (lhs) == NEG)
1160 coeff0 = -1, lhs = XEXP (lhs, 0);
1161 else if (GET_CODE (lhs) == MULT
1162 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1163 {
1164 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1165 had_mult = 1;
1166 }
1167 else if (GET_CODE (lhs) == ASHIFT
1168 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1169 && INTVAL (XEXP (lhs, 1)) >= 0
1170 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1171 {
1172 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1173 lhs = XEXP (lhs, 0);
1174 }
1175
1176 if (GET_CODE (rhs) == NEG)
1177 coeff1 = - 1, rhs = XEXP (rhs, 0);
1178 else if (GET_CODE (rhs) == MULT
1179 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1180 {
1181 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1182 had_mult = 1;
1183 }
1184 else if (GET_CODE (rhs) == ASHIFT
1185 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1186 && INTVAL (XEXP (rhs, 1)) >= 0
1187 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1188 {
1189 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1190 rhs = XEXP (rhs, 0);
1191 }
1192
1193 if (rtx_equal_p (lhs, rhs))
1194 {
1195 tem = simplify_gen_binary (MULT, mode, lhs,
1196 GEN_INT (coeff0 - coeff1));
1197 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1198 }
1199 }
1200
1201 /* (a - (-b)) -> (a + b). */
1202 if (GET_CODE (op1) == NEG)
1203 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1204
1205 /* If one of the operands is a PLUS or a MINUS, see if we can
1206 simplify this by the associative law.
1207 Don't use the associative law for floating point.
1208 The inaccuracy makes it nonassociative,
1209 and subtle programs can break if operations are associated. */
1210
1211 if (INTEGRAL_MODE_P (mode)
1212 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1213 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1214 || (GET_CODE (op0) == CONST
1215 && GET_CODE (XEXP (op0, 0)) == PLUS)
1216 || (GET_CODE (op1) == CONST
1217 && GET_CODE (XEXP (op1, 0)) == PLUS))
1218 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1219 return tem;
1220
1221 /* Don't let a relocatable value get a negative coeff. */
1222 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1223 return simplify_gen_binary (PLUS, mode,
1224 op0,
1225 neg_const_int (mode, op1));
1226
1227 /* (x - (x & y)) -> (x & ~y) */
1228 if (GET_CODE (op1) == AND)
1229 {
1230 if (rtx_equal_p (op0, XEXP (op1, 0)))
1231 return simplify_gen_binary (AND, mode, op0,
1232 gen_rtx_NOT (mode, XEXP (op1, 1)));
1233 if (rtx_equal_p (op0, XEXP (op1, 1)))
1234 return simplify_gen_binary (AND, mode, op0,
1235 gen_rtx_NOT (mode, XEXP (op1, 0)));
1236 }
1237 break;
1238
1239 case MULT:
1240 if (trueop1 == constm1_rtx)
1241 {
1242 tem = simplify_unary_operation (NEG, mode, op0, mode);
1243
1244 return tem ? tem : gen_rtx_NEG (mode, op0);
1245 }
1246
1247 /* In IEEE floating point, x*0 is not always 0. */
1248 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1249 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1250 && trueop1 == CONST0_RTX (mode)
1251 && ! side_effects_p (op0))
1252 return op1;
1253
1254 /* In IEEE floating point, x*1 is not equivalent to x for nans.
1255 However, ANSI says we can drop signals,
1256 so we can do this anyway. */
1257 if (trueop1 == CONST1_RTX (mode))
1258 return op0;
1259
1260 /* Convert multiply by constant power of two into shift unless
1261 we are still generating RTL. This test is a kludge. */
1262 if (GET_CODE (trueop1) == CONST_INT
1263 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1264 /* If the mode is larger than the host word size, and the
1265 uppermost bit is set, then this isn't a power of two due
1266 to implicit sign extension. */
1267 && (width <= HOST_BITS_PER_WIDE_INT
1268 || val != HOST_BITS_PER_WIDE_INT - 1)
1269 && ! rtx_equal_function_value_matters)
1270 return gen_rtx_ASHIFT (mode, op0, GEN_INT (val));
1271
1272 if (GET_CODE (trueop1) == CONST_DOUBLE
1273 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT)
1274 {
1275 struct simplify_binary_is2orm1_args args;
1276
1277 args.value = trueop1;
1278 if (! do_float_handler (simplify_binary_is2orm1, (PTR) &args))
1279 return 0;
1280
1281 /* x*2 is x+x and x*(-1) is -x */
1282 if (args.is_2 && GET_MODE (op0) == mode)
1283 return gen_rtx_PLUS (mode, op0, copy_rtx (op0));
1284
1285 else if (args.is_m1 && GET_MODE (op0) == mode)
1286 return gen_rtx_NEG (mode, op0);
1287 }
1288 break;
1289
1290 case IOR:
1291 if (trueop1 == const0_rtx)
1292 return op0;
1293 if (GET_CODE (trueop1) == CONST_INT
1294 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1295 == GET_MODE_MASK (mode)))
1296 return op1;
1297 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1298 return op0;
1299 /* A | (~A) -> -1 */
1300 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1301 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1302 && ! side_effects_p (op0)
1303 && GET_MODE_CLASS (mode) != MODE_CC)
1304 return constm1_rtx;
1305 break;
1306
1307 case XOR:
1308 if (trueop1 == const0_rtx)
1309 return op0;
1310 if (GET_CODE (trueop1) == CONST_INT
1311 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1312 == GET_MODE_MASK (mode)))
1313 return gen_rtx_NOT (mode, op0);
1314 if (trueop0 == trueop1 && ! side_effects_p (op0)
1315 && GET_MODE_CLASS (mode) != MODE_CC)
1316 return const0_rtx;
1317 break;
1318
1319 case AND:
1320 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1321 return const0_rtx;
1322 if (GET_CODE (trueop1) == CONST_INT
1323 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1324 == GET_MODE_MASK (mode)))
1325 return op0;
1326 if (trueop0 == trueop1 && ! side_effects_p (op0)
1327 && GET_MODE_CLASS (mode) != MODE_CC)
1328 return op0;
1329 /* A & (~A) -> 0 */
1330 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1331 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1332 && ! side_effects_p (op0)
1333 && GET_MODE_CLASS (mode) != MODE_CC)
1334 return const0_rtx;
1335 break;
1336
1337 case UDIV:
1338 /* Convert divide by power of two into shift (divide by 1 handled
1339 below). */
1340 if (GET_CODE (trueop1) == CONST_INT
1341 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1342 return gen_rtx_LSHIFTRT (mode, op0, GEN_INT (arg1));
1343
1344 /* ... fall through ... */
1345
1346 case DIV:
1347 if (trueop1 == CONST1_RTX (mode))
1348 {
1349 /* On some platforms DIV uses narrower mode than its
1350 operands. */
1351 rtx x = gen_lowpart_common (mode, op0);
1352 if (x)
1353 return x;
1354 else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1355 return gen_lowpart_SUBREG (mode, op0);
1356 else
1357 return op0;
1358 }
1359
1360 /* In IEEE floating point, 0/x is not always 0. */
1361 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1362 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1363 && trueop0 == CONST0_RTX (mode)
1364 && ! side_effects_p (op1))
1365 return op0;
1366
1367 /* Change division by a constant into multiplication. Only do
1368 this with -funsafe-math-optimizations. */
1369 else if (GET_CODE (trueop1) == CONST_DOUBLE
1370 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1371 && trueop1 != CONST0_RTX (mode)
1372 && flag_unsafe_math_optimizations)
1373 {
1374 REAL_VALUE_TYPE d;
1375 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1376
1377 if (! REAL_VALUES_EQUAL (d, dconst0))
1378 {
1379 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1380 return gen_rtx_MULT (mode, op0,
1381 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
1382 }
1383 }
1384 break;
1385
1386 case UMOD:
1387 /* Handle modulus by power of two (mod with 1 handled below). */
1388 if (GET_CODE (trueop1) == CONST_INT
1389 && exact_log2 (INTVAL (trueop1)) > 0)
1390 return gen_rtx_AND (mode, op0, GEN_INT (INTVAL (op1) - 1));
1391
1392 /* ... fall through ... */
1393
1394 case MOD:
1395 if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1396 && ! side_effects_p (op0) && ! side_effects_p (op1))
1397 return const0_rtx;
1398 break;
1399
1400 case ROTATERT:
1401 case ROTATE:
1402 /* Rotating ~0 always results in ~0. */
1403 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1404 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1405 && ! side_effects_p (op1))
1406 return op0;
1407
1408 /* ... fall through ... */
1409
1410 case ASHIFT:
1411 case ASHIFTRT:
1412 case LSHIFTRT:
1413 if (trueop1 == const0_rtx)
1414 return op0;
1415 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1416 return op0;
1417 break;
1418
1419 case SMIN:
1420 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1421 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1422 && ! side_effects_p (op0))
1423 return op1;
1424 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1425 return op0;
1426 break;
1427
1428 case SMAX:
1429 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1430 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1431 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1432 && ! side_effects_p (op0))
1433 return op1;
1434 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1435 return op0;
1436 break;
1437
1438 case UMIN:
1439 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1440 return op1;
1441 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1442 return op0;
1443 break;
1444
1445 case UMAX:
1446 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1447 return op1;
1448 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1449 return op0;
1450 break;
1451
1452 case SS_PLUS:
1453 case US_PLUS:
1454 case SS_MINUS:
1455 case US_MINUS:
1456 /* ??? There are simplifications that can be done. */
1457 return 0;
1458
1459 default:
1460 abort ();
1461 }
1462
1463 return 0;
1464 }
1465
1466 /* Get the integer argument values in two forms:
1467 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1468
1469 arg0 = INTVAL (trueop0);
1470 arg1 = INTVAL (trueop1);
1471
1472 if (width < HOST_BITS_PER_WIDE_INT)
1473 {
1474 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1475 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1476
1477 arg0s = arg0;
1478 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1479 arg0s |= ((HOST_WIDE_INT) (-1) << width);
1480
1481 arg1s = arg1;
1482 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1483 arg1s |= ((HOST_WIDE_INT) (-1) << width);
1484 }
1485 else
1486 {
1487 arg0s = arg0;
1488 arg1s = arg1;
1489 }
1490
1491 /* Compute the value of the arithmetic. */
1492
1493 switch (code)
1494 {
1495 case PLUS:
1496 val = arg0s + arg1s;
1497 break;
1498
1499 case MINUS:
1500 val = arg0s - arg1s;
1501 break;
1502
1503 case MULT:
1504 val = arg0s * arg1s;
1505 break;
1506
1507 case DIV:
1508 if (arg1s == 0
1509 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1510 && arg1s == -1))
1511 return 0;
1512 val = arg0s / arg1s;
1513 break;
1514
1515 case MOD:
1516 if (arg1s == 0
1517 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1518 && arg1s == -1))
1519 return 0;
1520 val = arg0s % arg1s;
1521 break;
1522
1523 case UDIV:
1524 if (arg1 == 0
1525 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1526 && arg1s == -1))
1527 return 0;
1528 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
1529 break;
1530
1531 case UMOD:
1532 if (arg1 == 0
1533 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1534 && arg1s == -1))
1535 return 0;
1536 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
1537 break;
1538
1539 case AND:
1540 val = arg0 & arg1;
1541 break;
1542
1543 case IOR:
1544 val = arg0 | arg1;
1545 break;
1546
1547 case XOR:
1548 val = arg0 ^ arg1;
1549 break;
1550
1551 case LSHIFTRT:
1552 /* If shift count is undefined, don't fold it; let the machine do
1553 what it wants. But truncate it if the machine will do that. */
1554 if (arg1 < 0)
1555 return 0;
1556
1557 #ifdef SHIFT_COUNT_TRUNCATED
1558 if (SHIFT_COUNT_TRUNCATED)
1559 arg1 %= width;
1560 #endif
1561
1562 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
1563 break;
1564
1565 case ASHIFT:
1566 if (arg1 < 0)
1567 return 0;
1568
1569 #ifdef SHIFT_COUNT_TRUNCATED
1570 if (SHIFT_COUNT_TRUNCATED)
1571 arg1 %= width;
1572 #endif
1573
1574 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
1575 break;
1576
1577 case ASHIFTRT:
1578 if (arg1 < 0)
1579 return 0;
1580
1581 #ifdef SHIFT_COUNT_TRUNCATED
1582 if (SHIFT_COUNT_TRUNCATED)
1583 arg1 %= width;
1584 #endif
1585
1586 val = arg0s >> arg1;
1587
1588 /* Bootstrap compiler may not have sign extended the right shift.
1589 Manually extend the sign to insure bootstrap cc matches gcc. */
1590 if (arg0s < 0 && arg1 > 0)
1591 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
1592
1593 break;
1594
1595 case ROTATERT:
1596 if (arg1 < 0)
1597 return 0;
1598
1599 arg1 %= width;
1600 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
1601 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
1602 break;
1603
1604 case ROTATE:
1605 if (arg1 < 0)
1606 return 0;
1607
1608 arg1 %= width;
1609 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
1610 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
1611 break;
1612
1613 case COMPARE:
1614 /* Do nothing here. */
1615 return 0;
1616
1617 case SMIN:
1618 val = arg0s <= arg1s ? arg0s : arg1s;
1619 break;
1620
1621 case UMIN:
1622 val = ((unsigned HOST_WIDE_INT) arg0
1623 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1624 break;
1625
1626 case SMAX:
1627 val = arg0s > arg1s ? arg0s : arg1s;
1628 break;
1629
1630 case UMAX:
1631 val = ((unsigned HOST_WIDE_INT) arg0
1632 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1633 break;
1634
1635 default:
1636 abort ();
1637 }
1638
1639 val = trunc_int_for_mode (val, mode);
1640
1641 return GEN_INT (val);
1642 }
1643 \f
1644 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1645 PLUS or MINUS.
1646
1647 Rather than test for specific case, we do this by a brute-force method
1648 and do all possible simplifications until no more changes occur. Then
1649 we rebuild the operation.
1650
1651 If FORCE is true, then always generate the rtx. This is used to
1652 canonicalize stuff emitted from simplify_gen_binary. */
1653
1654 struct simplify_plus_minus_op_data
1655 {
1656 rtx op;
1657 int neg;
1658 };
1659
1660 static int
1661 simplify_plus_minus_op_data_cmp (p1, p2)
1662 const void *p1;
1663 const void *p2;
1664 {
1665 const struct simplify_plus_minus_op_data *d1 = p1;
1666 const struct simplify_plus_minus_op_data *d2 = p2;
1667
1668 return (commutative_operand_precedence (d2->op)
1669 - commutative_operand_precedence (d1->op));
1670 }
1671
1672 static rtx
1673 simplify_plus_minus (code, mode, op0, op1, force)
1674 enum rtx_code code;
1675 enum machine_mode mode;
1676 rtx op0, op1;
1677 int force;
1678 {
1679 struct simplify_plus_minus_op_data ops[8];
1680 rtx result, tem;
1681 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
1682 int first, negate, changed;
1683 int i, j;
1684
1685 memset ((char *) ops, 0, sizeof ops);
1686
1687 /* Set up the two operands and then expand them until nothing has been
1688 changed. If we run out of room in our array, give up; this should
1689 almost never happen. */
1690
1691 ops[0].op = op0;
1692 ops[0].neg = 0;
1693 ops[1].op = op1;
1694 ops[1].neg = (code == MINUS);
1695
1696 do
1697 {
1698 changed = 0;
1699
1700 for (i = 0; i < n_ops; i++)
1701 {
1702 rtx this_op = ops[i].op;
1703 int this_neg = ops[i].neg;
1704 enum rtx_code this_code = GET_CODE (this_op);
1705
1706 switch (this_code)
1707 {
1708 case PLUS:
1709 case MINUS:
1710 if (n_ops == 7)
1711 {
1712 if (force)
1713 abort ();
1714 return NULL_RTX;
1715 }
1716
1717 ops[n_ops].op = XEXP (this_op, 1);
1718 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
1719 n_ops++;
1720
1721 ops[i].op = XEXP (this_op, 0);
1722 input_ops++;
1723 changed = 1;
1724 break;
1725
1726 case NEG:
1727 ops[i].op = XEXP (this_op, 0);
1728 ops[i].neg = ! this_neg;
1729 changed = 1;
1730 break;
1731
1732 case CONST:
1733 if (n_ops < 7
1734 && GET_CODE (XEXP (this_op, 0)) == PLUS
1735 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
1736 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
1737 {
1738 ops[i].op = XEXP (XEXP (this_op, 0), 0);
1739 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
1740 ops[n_ops].neg = this_neg;
1741 n_ops++;
1742 input_consts++;
1743 changed = 1;
1744 }
1745 break;
1746
1747 case NOT:
1748 /* ~a -> (-a - 1) */
1749 if (n_ops != 7)
1750 {
1751 ops[n_ops].op = constm1_rtx;
1752 ops[n_ops++].neg = this_neg;
1753 ops[i].op = XEXP (this_op, 0);
1754 ops[i].neg = !this_neg;
1755 changed = 1;
1756 }
1757 break;
1758
1759 case CONST_INT:
1760 if (this_neg)
1761 {
1762 ops[i].op = neg_const_int (mode, this_op);
1763 ops[i].neg = 0;
1764 changed = 1;
1765 }
1766 break;
1767
1768 default:
1769 break;
1770 }
1771 }
1772 }
1773 while (changed);
1774
1775 /* If we only have two operands, we can't do anything. */
1776 if (n_ops <= 2 && !force)
1777 return NULL_RTX;
1778
1779 /* Count the number of CONSTs we didn't split above. */
1780 for (i = 0; i < n_ops; i++)
1781 if (GET_CODE (ops[i].op) == CONST)
1782 input_consts++;
1783
1784 /* Now simplify each pair of operands until nothing changes. The first
1785 time through just simplify constants against each other. */
1786
1787 first = 1;
1788 do
1789 {
1790 changed = first;
1791
1792 for (i = 0; i < n_ops - 1; i++)
1793 for (j = i + 1; j < n_ops; j++)
1794 {
1795 rtx lhs = ops[i].op, rhs = ops[j].op;
1796 int lneg = ops[i].neg, rneg = ops[j].neg;
1797
1798 if (lhs != 0 && rhs != 0
1799 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
1800 {
1801 enum rtx_code ncode = PLUS;
1802
1803 if (lneg != rneg)
1804 {
1805 ncode = MINUS;
1806 if (lneg)
1807 tem = lhs, lhs = rhs, rhs = tem;
1808 }
1809 else if (swap_commutative_operands_p (lhs, rhs))
1810 tem = lhs, lhs = rhs, rhs = tem;
1811
1812 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
1813
1814 /* Reject "simplifications" that just wrap the two
1815 arguments in a CONST. Failure to do so can result
1816 in infinite recursion with simplify_binary_operation
1817 when it calls us to simplify CONST operations. */
1818 if (tem
1819 && ! (GET_CODE (tem) == CONST
1820 && GET_CODE (XEXP (tem, 0)) == ncode
1821 && XEXP (XEXP (tem, 0), 0) == lhs
1822 && XEXP (XEXP (tem, 0), 1) == rhs)
1823 /* Don't allow -x + -1 -> ~x simplifications in the
1824 first pass. This allows us the chance to combine
1825 the -1 with other constants. */
1826 && ! (first
1827 && GET_CODE (tem) == NOT
1828 && XEXP (tem, 0) == rhs))
1829 {
1830 lneg &= rneg;
1831 if (GET_CODE (tem) == NEG)
1832 tem = XEXP (tem, 0), lneg = !lneg;
1833 if (GET_CODE (tem) == CONST_INT && lneg)
1834 tem = neg_const_int (mode, tem), lneg = 0;
1835
1836 ops[i].op = tem;
1837 ops[i].neg = lneg;
1838 ops[j].op = NULL_RTX;
1839 changed = 1;
1840 }
1841 }
1842 }
1843
1844 first = 0;
1845 }
1846 while (changed);
1847
1848 /* Pack all the operands to the lower-numbered entries. */
1849 for (i = 0, j = 0; j < n_ops; j++)
1850 if (ops[j].op)
1851 ops[i++] = ops[j];
1852 n_ops = i;
1853
1854 /* Sort the operations based on swap_commutative_operands_p. */
1855 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
1856
1857 /* We suppressed creation of trivial CONST expressions in the
1858 combination loop to avoid recursion. Create one manually now.
1859 The combination loop should have ensured that there is exactly
1860 one CONST_INT, and the sort will have ensured that it is last
1861 in the array and that any other constant will be next-to-last. */
1862
1863 if (n_ops > 1
1864 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
1865 && CONSTANT_P (ops[n_ops - 2].op))
1866 {
1867 rtx value = ops[n_ops - 1].op;
1868 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
1869 value = neg_const_int (mode, value);
1870 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
1871 n_ops--;
1872 }
1873
1874 /* Count the number of CONSTs that we generated. */
1875 n_consts = 0;
1876 for (i = 0; i < n_ops; i++)
1877 if (GET_CODE (ops[i].op) == CONST)
1878 n_consts++;
1879
1880 /* Give up if we didn't reduce the number of operands we had. Make
1881 sure we count a CONST as two operands. If we have the same
1882 number of operands, but have made more CONSTs than before, this
1883 is also an improvement, so accept it. */
1884 if (!force
1885 && (n_ops + n_consts > input_ops
1886 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
1887 return NULL_RTX;
1888
1889 /* Put a non-negated operand first. If there aren't any, make all
1890 operands positive and negate the whole thing later. */
1891
1892 negate = 0;
1893 for (i = 0; i < n_ops && ops[i].neg; i++)
1894 continue;
1895 if (i == n_ops)
1896 {
1897 for (i = 0; i < n_ops; i++)
1898 ops[i].neg = 0;
1899 negate = 1;
1900 }
1901 else if (i != 0)
1902 {
1903 tem = ops[0].op;
1904 ops[0] = ops[i];
1905 ops[i].op = tem;
1906 ops[i].neg = 1;
1907 }
1908
1909 /* Now make the result by performing the requested operations. */
1910 result = ops[0].op;
1911 for (i = 1; i < n_ops; i++)
1912 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
1913 mode, result, ops[i].op);
1914
1915 return negate ? gen_rtx_NEG (mode, result) : result;
1916 }
1917
1918 struct cfc_args
1919 {
1920 rtx op0, op1; /* Input */
1921 int equal, op0lt, op1lt; /* Output */
1922 int unordered;
1923 };
1924
1925 static void
1926 check_fold_consts (data)
1927 PTR data;
1928 {
1929 struct cfc_args *args = (struct cfc_args *) data;
1930 REAL_VALUE_TYPE d0, d1;
1931
1932 /* We may possibly raise an exception while reading the value. */
1933 args->unordered = 1;
1934 REAL_VALUE_FROM_CONST_DOUBLE (d0, args->op0);
1935 REAL_VALUE_FROM_CONST_DOUBLE (d1, args->op1);
1936
1937 /* Comparisons of Inf versus Inf are ordered. */
1938 if (REAL_VALUE_ISNAN (d0)
1939 || REAL_VALUE_ISNAN (d1))
1940 return;
1941 args->equal = REAL_VALUES_EQUAL (d0, d1);
1942 args->op0lt = REAL_VALUES_LESS (d0, d1);
1943 args->op1lt = REAL_VALUES_LESS (d1, d0);
1944 args->unordered = 0;
1945 }
1946
1947 /* Like simplify_binary_operation except used for relational operators.
1948 MODE is the mode of the operands, not that of the result. If MODE
1949 is VOIDmode, both operands must also be VOIDmode and we compare the
1950 operands in "infinite precision".
1951
1952 If no simplification is possible, this function returns zero. Otherwise,
1953 it returns either const_true_rtx or const0_rtx. */
1954
1955 rtx
1956 simplify_relational_operation (code, mode, op0, op1)
1957 enum rtx_code code;
1958 enum machine_mode mode;
1959 rtx op0, op1;
1960 {
1961 int equal, op0lt, op0ltu, op1lt, op1ltu;
1962 rtx tem;
1963 rtx trueop0;
1964 rtx trueop1;
1965
1966 if (mode == VOIDmode
1967 && (GET_MODE (op0) != VOIDmode
1968 || GET_MODE (op1) != VOIDmode))
1969 abort ();
1970
1971 /* If op0 is a compare, extract the comparison arguments from it. */
1972 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
1973 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
1974
1975 trueop0 = avoid_constant_pool_reference (op0);
1976 trueop1 = avoid_constant_pool_reference (op1);
1977
1978 /* We can't simplify MODE_CC values since we don't know what the
1979 actual comparison is. */
1980 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC
1981 #ifdef HAVE_cc0
1982 || op0 == cc0_rtx
1983 #endif
1984 )
1985 return 0;
1986
1987 /* Make sure the constant is second. */
1988 if (swap_commutative_operands_p (trueop0, trueop1))
1989 {
1990 tem = op0, op0 = op1, op1 = tem;
1991 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
1992 code = swap_condition (code);
1993 }
1994
1995 /* For integer comparisons of A and B maybe we can simplify A - B and can
1996 then simplify a comparison of that with zero. If A and B are both either
1997 a register or a CONST_INT, this can't help; testing for these cases will
1998 prevent infinite recursion here and speed things up.
1999
2000 If CODE is an unsigned comparison, then we can never do this optimization,
2001 because it gives an incorrect result if the subtraction wraps around zero.
2002 ANSI C defines unsigned operations such that they never overflow, and
2003 thus such cases can not be ignored. */
2004
2005 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2006 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
2007 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
2008 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2009 && code != GTU && code != GEU && code != LTU && code != LEU)
2010 return simplify_relational_operation (signed_condition (code),
2011 mode, tem, const0_rtx);
2012
2013 if (flag_unsafe_math_optimizations && code == ORDERED)
2014 return const_true_rtx;
2015
2016 if (flag_unsafe_math_optimizations && code == UNORDERED)
2017 return const0_rtx;
2018
2019 /* For non-IEEE floating-point, if the two operands are equal, we know the
2020 result. */
2021 if (rtx_equal_p (trueop0, trueop1)
2022 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
2023 || ! FLOAT_MODE_P (GET_MODE (trueop0))
2024 || flag_unsafe_math_optimizations))
2025 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2026
2027 /* If the operands are floating-point constants, see if we can fold
2028 the result. */
2029 else if (GET_CODE (trueop0) == CONST_DOUBLE
2030 && GET_CODE (trueop1) == CONST_DOUBLE
2031 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2032 {
2033 struct cfc_args args;
2034
2035 /* Setup input for check_fold_consts() */
2036 args.op0 = trueop0;
2037 args.op1 = trueop1;
2038
2039
2040 if (!do_float_handler (check_fold_consts, (PTR) &args))
2041 args.unordered = 1;
2042
2043 if (args.unordered)
2044 switch (code)
2045 {
2046 case UNEQ:
2047 case UNLT:
2048 case UNGT:
2049 case UNLE:
2050 case UNGE:
2051 case NE:
2052 case UNORDERED:
2053 return const_true_rtx;
2054 case EQ:
2055 case LT:
2056 case GT:
2057 case LE:
2058 case GE:
2059 case LTGT:
2060 case ORDERED:
2061 return const0_rtx;
2062 default:
2063 return 0;
2064 }
2065
2066 /* Receive output from check_fold_consts() */
2067 equal = args.equal;
2068 op0lt = op0ltu = args.op0lt;
2069 op1lt = op1ltu = args.op1lt;
2070 }
2071
2072 /* Otherwise, see if the operands are both integers. */
2073 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2074 && (GET_CODE (trueop0) == CONST_DOUBLE
2075 || GET_CODE (trueop0) == CONST_INT)
2076 && (GET_CODE (trueop1) == CONST_DOUBLE
2077 || GET_CODE (trueop1) == CONST_INT))
2078 {
2079 int width = GET_MODE_BITSIZE (mode);
2080 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2081 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2082
2083 /* Get the two words comprising each integer constant. */
2084 if (GET_CODE (trueop0) == CONST_DOUBLE)
2085 {
2086 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2087 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2088 }
2089 else
2090 {
2091 l0u = l0s = INTVAL (trueop0);
2092 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2093 }
2094
2095 if (GET_CODE (trueop1) == CONST_DOUBLE)
2096 {
2097 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2098 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2099 }
2100 else
2101 {
2102 l1u = l1s = INTVAL (trueop1);
2103 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2104 }
2105
2106 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2107 we have to sign or zero-extend the values. */
2108 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2109 {
2110 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2111 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2112
2113 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2114 l0s |= ((HOST_WIDE_INT) (-1) << width);
2115
2116 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2117 l1s |= ((HOST_WIDE_INT) (-1) << width);
2118 }
2119 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2120 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2121
2122 equal = (h0u == h1u && l0u == l1u);
2123 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2124 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2125 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2126 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2127 }
2128
2129 /* Otherwise, there are some code-specific tests we can make. */
2130 else
2131 {
2132 switch (code)
2133 {
2134 case EQ:
2135 /* References to the frame plus a constant or labels cannot
2136 be zero, but a SYMBOL_REF can due to #pragma weak. */
2137 if (((NONZERO_BASE_PLUS_P (op0) && trueop1 == const0_rtx)
2138 || GET_CODE (trueop0) == LABEL_REF)
2139 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2140 /* On some machines, the ap reg can be 0 sometimes. */
2141 && op0 != arg_pointer_rtx
2142 #endif
2143 )
2144 return const0_rtx;
2145 break;
2146
2147 case NE:
2148 if (((NONZERO_BASE_PLUS_P (op0) && trueop1 == const0_rtx)
2149 || GET_CODE (trueop0) == LABEL_REF)
2150 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2151 && op0 != arg_pointer_rtx
2152 #endif
2153 )
2154 return const_true_rtx;
2155 break;
2156
2157 case GEU:
2158 /* Unsigned values are never negative. */
2159 if (trueop1 == const0_rtx)
2160 return const_true_rtx;
2161 break;
2162
2163 case LTU:
2164 if (trueop1 == const0_rtx)
2165 return const0_rtx;
2166 break;
2167
2168 case LEU:
2169 /* Unsigned values are never greater than the largest
2170 unsigned value. */
2171 if (GET_CODE (trueop1) == CONST_INT
2172 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2173 && INTEGRAL_MODE_P (mode))
2174 return const_true_rtx;
2175 break;
2176
2177 case GTU:
2178 if (GET_CODE (trueop1) == CONST_INT
2179 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2180 && INTEGRAL_MODE_P (mode))
2181 return const0_rtx;
2182 break;
2183
2184 default:
2185 break;
2186 }
2187
2188 return 0;
2189 }
2190
2191 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2192 as appropriate. */
2193 switch (code)
2194 {
2195 case EQ:
2196 case UNEQ:
2197 return equal ? const_true_rtx : const0_rtx;
2198 case NE:
2199 case LTGT:
2200 return ! equal ? const_true_rtx : const0_rtx;
2201 case LT:
2202 case UNLT:
2203 return op0lt ? const_true_rtx : const0_rtx;
2204 case GT:
2205 case UNGT:
2206 return op1lt ? const_true_rtx : const0_rtx;
2207 case LTU:
2208 return op0ltu ? const_true_rtx : const0_rtx;
2209 case GTU:
2210 return op1ltu ? const_true_rtx : const0_rtx;
2211 case LE:
2212 case UNLE:
2213 return equal || op0lt ? const_true_rtx : const0_rtx;
2214 case GE:
2215 case UNGE:
2216 return equal || op1lt ? const_true_rtx : const0_rtx;
2217 case LEU:
2218 return equal || op0ltu ? const_true_rtx : const0_rtx;
2219 case GEU:
2220 return equal || op1ltu ? const_true_rtx : const0_rtx;
2221 case ORDERED:
2222 return const_true_rtx;
2223 case UNORDERED:
2224 return const0_rtx;
2225 default:
2226 abort ();
2227 }
2228 }
2229 \f
2230 /* Simplify CODE, an operation with result mode MODE and three operands,
2231 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2232 a constant. Return 0 if no simplifications is possible. */
2233
2234 rtx
2235 simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
2236 enum rtx_code code;
2237 enum machine_mode mode, op0_mode;
2238 rtx op0, op1, op2;
2239 {
2240 unsigned int width = GET_MODE_BITSIZE (mode);
2241
2242 /* VOIDmode means "infinite" precision. */
2243 if (width == 0)
2244 width = HOST_BITS_PER_WIDE_INT;
2245
2246 switch (code)
2247 {
2248 case SIGN_EXTRACT:
2249 case ZERO_EXTRACT:
2250 if (GET_CODE (op0) == CONST_INT
2251 && GET_CODE (op1) == CONST_INT
2252 && GET_CODE (op2) == CONST_INT
2253 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2254 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2255 {
2256 /* Extracting a bit-field from a constant */
2257 HOST_WIDE_INT val = INTVAL (op0);
2258
2259 if (BITS_BIG_ENDIAN)
2260 val >>= (GET_MODE_BITSIZE (op0_mode)
2261 - INTVAL (op2) - INTVAL (op1));
2262 else
2263 val >>= INTVAL (op2);
2264
2265 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2266 {
2267 /* First zero-extend. */
2268 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2269 /* If desired, propagate sign bit. */
2270 if (code == SIGN_EXTRACT
2271 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2272 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2273 }
2274
2275 /* Clear the bits that don't belong in our mode,
2276 unless they and our sign bit are all one.
2277 So we get either a reasonable negative value or a reasonable
2278 unsigned value for this mode. */
2279 if (width < HOST_BITS_PER_WIDE_INT
2280 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2281 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2282 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2283
2284 return GEN_INT (val);
2285 }
2286 break;
2287
2288 case IF_THEN_ELSE:
2289 if (GET_CODE (op0) == CONST_INT)
2290 return op0 != const0_rtx ? op1 : op2;
2291
2292 /* Convert a == b ? b : a to "a". */
2293 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
2294 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
2295 && rtx_equal_p (XEXP (op0, 0), op1)
2296 && rtx_equal_p (XEXP (op0, 1), op2))
2297 return op1;
2298 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
2299 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
2300 && rtx_equal_p (XEXP (op0, 1), op1)
2301 && rtx_equal_p (XEXP (op0, 0), op2))
2302 return op2;
2303 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2304 {
2305 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2306 ? GET_MODE (XEXP (op0, 1))
2307 : GET_MODE (XEXP (op0, 0)));
2308 rtx temp;
2309 if (cmp_mode == VOIDmode)
2310 cmp_mode = op0_mode;
2311 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2312 XEXP (op0, 0), XEXP (op0, 1));
2313
2314 /* See if any simplifications were possible. */
2315 if (temp == const0_rtx)
2316 return op2;
2317 else if (temp == const1_rtx)
2318 return op1;
2319 else if (temp)
2320 op0 = temp;
2321
2322 /* Look for happy constants in op1 and op2. */
2323 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2324 {
2325 HOST_WIDE_INT t = INTVAL (op1);
2326 HOST_WIDE_INT f = INTVAL (op2);
2327
2328 if (t == STORE_FLAG_VALUE && f == 0)
2329 code = GET_CODE (op0);
2330 else if (t == 0 && f == STORE_FLAG_VALUE)
2331 {
2332 enum rtx_code tmp;
2333 tmp = reversed_comparison_code (op0, NULL_RTX);
2334 if (tmp == UNKNOWN)
2335 break;
2336 code = tmp;
2337 }
2338 else
2339 break;
2340
2341 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2342 }
2343 }
2344 break;
2345
2346 default:
2347 abort ();
2348 }
2349
2350 return 0;
2351 }
2352
2353 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2354 Return 0 if no simplifications is possible. */
2355 rtx
2356 simplify_subreg (outermode, op, innermode, byte)
2357 rtx op;
2358 unsigned int byte;
2359 enum machine_mode outermode, innermode;
2360 {
2361 /* Little bit of sanity checking. */
2362 if (innermode == VOIDmode || outermode == VOIDmode
2363 || innermode == BLKmode || outermode == BLKmode)
2364 abort ();
2365
2366 if (GET_MODE (op) != innermode
2367 && GET_MODE (op) != VOIDmode)
2368 abort ();
2369
2370 if (byte % GET_MODE_SIZE (outermode)
2371 || byte >= GET_MODE_SIZE (innermode))
2372 abort ();
2373
2374 if (outermode == innermode && !byte)
2375 return op;
2376
2377 /* Attempt to simplify constant to non-SUBREG expression. */
2378 if (CONSTANT_P (op))
2379 {
2380 int offset, part;
2381 unsigned HOST_WIDE_INT val = 0;
2382
2383 /* ??? This code is partly redundant with code below, but can handle
2384 the subregs of floats and similar corner cases.
2385 Later it we should move all simplification code here and rewrite
2386 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2387 using SIMPLIFY_SUBREG. */
2388 if (subreg_lowpart_offset (outermode, innermode) == byte)
2389 {
2390 rtx new = gen_lowpart_if_possible (outermode, op);
2391 if (new)
2392 return new;
2393 }
2394
2395 /* Similar comment as above apply here. */
2396 if (GET_MODE_SIZE (outermode) == UNITS_PER_WORD
2397 && GET_MODE_SIZE (innermode) > UNITS_PER_WORD
2398 && GET_MODE_CLASS (outermode) == MODE_INT)
2399 {
2400 rtx new = constant_subword (op,
2401 (byte / UNITS_PER_WORD),
2402 innermode);
2403 if (new)
2404 return new;
2405 }
2406
2407 offset = byte * BITS_PER_UNIT;
2408 switch (GET_CODE (op))
2409 {
2410 case CONST_DOUBLE:
2411 if (GET_MODE (op) != VOIDmode)
2412 break;
2413
2414 /* We can't handle this case yet. */
2415 if (GET_MODE_BITSIZE (outermode) >= HOST_BITS_PER_WIDE_INT)
2416 return NULL_RTX;
2417
2418 part = offset >= HOST_BITS_PER_WIDE_INT;
2419 if ((BITS_PER_WORD > HOST_BITS_PER_WIDE_INT
2420 && BYTES_BIG_ENDIAN)
2421 || (BITS_PER_WORD <= HOST_BITS_PER_WIDE_INT
2422 && WORDS_BIG_ENDIAN))
2423 part = !part;
2424 val = part ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op);
2425 offset %= HOST_BITS_PER_WIDE_INT;
2426
2427 /* We've already picked the word we want from a double, so
2428 pretend this is actually an integer. */
2429 innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
2430
2431 /* FALLTHROUGH */
2432 case CONST_INT:
2433 if (GET_CODE (op) == CONST_INT)
2434 val = INTVAL (op);
2435
2436 /* We don't handle synthetizing of non-integral constants yet. */
2437 if (GET_MODE_CLASS (outermode) != MODE_INT)
2438 return NULL_RTX;
2439
2440 if (BYTES_BIG_ENDIAN || WORDS_BIG_ENDIAN)
2441 {
2442 if (WORDS_BIG_ENDIAN)
2443 offset = (GET_MODE_BITSIZE (innermode)
2444 - GET_MODE_BITSIZE (outermode) - offset);
2445 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN
2446 && GET_MODE_SIZE (outermode) < UNITS_PER_WORD)
2447 offset = (offset + BITS_PER_WORD - GET_MODE_BITSIZE (outermode)
2448 - 2 * (offset % BITS_PER_WORD));
2449 }
2450
2451 if (offset >= HOST_BITS_PER_WIDE_INT)
2452 return ((HOST_WIDE_INT) val < 0) ? constm1_rtx : const0_rtx;
2453 else
2454 {
2455 val >>= offset;
2456 if (GET_MODE_BITSIZE (outermode) < HOST_BITS_PER_WIDE_INT)
2457 val = trunc_int_for_mode (val, outermode);
2458 return GEN_INT (val);
2459 }
2460 default:
2461 break;
2462 }
2463 }
2464
2465 /* Changing mode twice with SUBREG => just change it once,
2466 or not at all if changing back op starting mode. */
2467 if (GET_CODE (op) == SUBREG)
2468 {
2469 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
2470 int final_offset = byte + SUBREG_BYTE (op);
2471 rtx new;
2472
2473 if (outermode == innermostmode
2474 && byte == 0 && SUBREG_BYTE (op) == 0)
2475 return SUBREG_REG (op);
2476
2477 /* The SUBREG_BYTE represents offset, as if the value were stored
2478 in memory. Irritating exception is paradoxical subreg, where
2479 we define SUBREG_BYTE to be 0. On big endian machines, this
2480 value should be negative. For a moment, undo this exception. */
2481 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
2482 {
2483 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
2484 if (WORDS_BIG_ENDIAN)
2485 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2486 if (BYTES_BIG_ENDIAN)
2487 final_offset += difference % UNITS_PER_WORD;
2488 }
2489 if (SUBREG_BYTE (op) == 0
2490 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
2491 {
2492 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
2493 if (WORDS_BIG_ENDIAN)
2494 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2495 if (BYTES_BIG_ENDIAN)
2496 final_offset += difference % UNITS_PER_WORD;
2497 }
2498
2499 /* See whether resulting subreg will be paradoxical. */
2500 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
2501 {
2502 /* In nonparadoxical subregs we can't handle negative offsets. */
2503 if (final_offset < 0)
2504 return NULL_RTX;
2505 /* Bail out in case resulting subreg would be incorrect. */
2506 if (final_offset % GET_MODE_SIZE (outermode)
2507 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
2508 return NULL_RTX;
2509 }
2510 else
2511 {
2512 int offset = 0;
2513 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
2514
2515 /* In paradoxical subreg, see if we are still looking on lower part.
2516 If so, our SUBREG_BYTE will be 0. */
2517 if (WORDS_BIG_ENDIAN)
2518 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2519 if (BYTES_BIG_ENDIAN)
2520 offset += difference % UNITS_PER_WORD;
2521 if (offset == final_offset)
2522 final_offset = 0;
2523 else
2524 return NULL_RTX;
2525 }
2526
2527 /* Recurse for futher possible simplifications. */
2528 new = simplify_subreg (outermode, SUBREG_REG (op),
2529 GET_MODE (SUBREG_REG (op)),
2530 final_offset);
2531 if (new)
2532 return new;
2533 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
2534 }
2535
2536 /* SUBREG of a hard register => just change the register number
2537 and/or mode. If the hard register is not valid in that mode,
2538 suppress this simplification. If the hard register is the stack,
2539 frame, or argument pointer, leave this as a SUBREG. */
2540
2541 if (REG_P (op)
2542 && (! REG_FUNCTION_VALUE_P (op)
2543 || ! rtx_equal_function_value_matters)
2544 #ifdef CLASS_CANNOT_CHANGE_MODE
2545 && ! (CLASS_CANNOT_CHANGE_MODE_P (outermode, innermode)
2546 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
2547 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT
2548 && (TEST_HARD_REG_BIT
2549 (reg_class_contents[(int) CLASS_CANNOT_CHANGE_MODE],
2550 REGNO (op))))
2551 #endif
2552 && REGNO (op) < FIRST_PSEUDO_REGISTER
2553 && ((reload_completed && !frame_pointer_needed)
2554 || (REGNO (op) != FRAME_POINTER_REGNUM
2555 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2556 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
2557 #endif
2558 ))
2559 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2560 && REGNO (op) != ARG_POINTER_REGNUM
2561 #endif
2562 && REGNO (op) != STACK_POINTER_REGNUM)
2563 {
2564 int final_regno = subreg_hard_regno (gen_rtx_SUBREG (outermode, op, byte),
2565 0);
2566
2567 /* ??? We do allow it if the current REG is not valid for
2568 its mode. This is a kludge to work around how float/complex
2569 arguments are passed on 32-bit Sparc and should be fixed. */
2570 if (HARD_REGNO_MODE_OK (final_regno, outermode)
2571 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
2572 {
2573 rtx x = gen_rtx_REG (outermode, final_regno);
2574
2575 /* Propagate original regno. We don't have any way to specify
2576 the offset inside orignal regno, so do so only for lowpart.
2577 The information is used only by alias analysis that can not
2578 grog partial register anyway. */
2579
2580 if (subreg_lowpart_offset (outermode, innermode) == byte)
2581 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
2582 return x;
2583 }
2584 }
2585
2586 /* If we have a SUBREG of a register that we are replacing and we are
2587 replacing it with a MEM, make a new MEM and try replacing the
2588 SUBREG with it. Don't do this if the MEM has a mode-dependent address
2589 or if we would be widening it. */
2590
2591 if (GET_CODE (op) == MEM
2592 && ! mode_dependent_address_p (XEXP (op, 0))
2593 /* Allow splitting of volatile memory references in case we don't
2594 have instruction to move the whole thing. */
2595 && (! MEM_VOLATILE_P (op)
2596 || ! have_insn_for (SET, innermode))
2597 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
2598 return adjust_address_nv (op, outermode, byte);
2599
2600 /* Handle complex values represented as CONCAT
2601 of real and imaginary part. */
2602 if (GET_CODE (op) == CONCAT)
2603 {
2604 int is_realpart = byte < GET_MODE_UNIT_SIZE (innermode);
2605 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
2606 unsigned int final_offset;
2607 rtx res;
2608
2609 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
2610 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
2611 if (res)
2612 return res;
2613 /* We can at least simplify it by referring directly to the relevant part. */
2614 return gen_rtx_SUBREG (outermode, part, final_offset);
2615 }
2616
2617 return NULL_RTX;
2618 }
2619 /* Make a SUBREG operation or equivalent if it folds. */
2620
2621 rtx
2622 simplify_gen_subreg (outermode, op, innermode, byte)
2623 rtx op;
2624 unsigned int byte;
2625 enum machine_mode outermode, innermode;
2626 {
2627 rtx new;
2628 /* Little bit of sanity checking. */
2629 if (innermode == VOIDmode || outermode == VOIDmode
2630 || innermode == BLKmode || outermode == BLKmode)
2631 abort ();
2632
2633 if (GET_MODE (op) != innermode
2634 && GET_MODE (op) != VOIDmode)
2635 abort ();
2636
2637 if (byte % GET_MODE_SIZE (outermode)
2638 || byte >= GET_MODE_SIZE (innermode))
2639 abort ();
2640
2641 if (GET_CODE (op) == QUEUED)
2642 return NULL_RTX;
2643
2644 new = simplify_subreg (outermode, op, innermode, byte);
2645 if (new)
2646 return new;
2647
2648 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
2649 return NULL_RTX;
2650
2651 return gen_rtx_SUBREG (outermode, op, byte);
2652 }
2653 /* Simplify X, an rtx expression.
2654
2655 Return the simplified expression or NULL if no simplifications
2656 were possible.
2657
2658 This is the preferred entry point into the simplification routines;
2659 however, we still allow passes to call the more specific routines.
2660
2661 Right now GCC has three (yes, three) major bodies of RTL simplficiation
2662 code that need to be unified.
2663
2664 1. fold_rtx in cse.c. This code uses various CSE specific
2665 information to aid in RTL simplification.
2666
2667 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
2668 it uses combine specific information to aid in RTL
2669 simplification.
2670
2671 3. The routines in this file.
2672
2673
2674 Long term we want to only have one body of simplification code; to
2675 get to that state I recommend the following steps:
2676
2677 1. Pour over fold_rtx & simplify_rtx and move any simplifications
2678 which are not pass dependent state into these routines.
2679
2680 2. As code is moved by #1, change fold_rtx & simplify_rtx to
2681 use this routine whenever possible.
2682
2683 3. Allow for pass dependent state to be provided to these
2684 routines and add simplifications based on the pass dependent
2685 state. Remove code from cse.c & combine.c that becomes
2686 redundant/dead.
2687
2688 It will take time, but ultimately the compiler will be easier to
2689 maintain and improve. It's totally silly that when we add a
2690 simplification that it needs to be added to 4 places (3 for RTL
2691 simplification and 1 for tree simplification. */
2692
2693 rtx
2694 simplify_rtx (x)
2695 rtx x;
2696 {
2697 enum rtx_code code = GET_CODE (x);
2698 enum machine_mode mode = GET_MODE (x);
2699
2700 switch (GET_RTX_CLASS (code))
2701 {
2702 case '1':
2703 return simplify_unary_operation (code, mode,
2704 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
2705 case 'c':
2706 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
2707 {
2708 rtx tem;
2709
2710 tem = XEXP (x, 0);
2711 XEXP (x, 0) = XEXP (x, 1);
2712 XEXP (x, 1) = tem;
2713 return simplify_binary_operation (code, mode,
2714 XEXP (x, 0), XEXP (x, 1));
2715 }
2716
2717 case '2':
2718 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
2719
2720 case '3':
2721 case 'b':
2722 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
2723 XEXP (x, 0), XEXP (x, 1),
2724 XEXP (x, 2));
2725
2726 case '<':
2727 return simplify_relational_operation (code,
2728 ((GET_MODE (XEXP (x, 0))
2729 != VOIDmode)
2730 ? GET_MODE (XEXP (x, 0))
2731 : GET_MODE (XEXP (x, 1))),
2732 XEXP (x, 0), XEXP (x, 1));
2733 case 'x':
2734 /* The only case we try to handle is a SUBREG. */
2735 if (code == SUBREG)
2736 return simplify_gen_subreg (mode, SUBREG_REG (x),
2737 GET_MODE (SUBREG_REG (x)),
2738 SUBREG_BYTE (x));
2739 return NULL;
2740 default:
2741 return NULL;
2742 }
2743 }
This page took 0.164831 seconds and 6 git commands to generate.