]> gcc.gnu.org Git - gcc.git/blob - gcc/simplify-rtx.c
gengenrtl.c (gencode): Don't define obstack_alloc_rtx.
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
21
22
23 #include "config.h"
24 #include "system.h"
25
26 #include "rtl.h"
27 #include "tm_p.h"
28 #include "regs.h"
29 #include "hard-reg-set.h"
30 #include "flags.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "recog.h"
34 #include "function.h"
35 #include "expr.h"
36 #include "toplev.h"
37 #include "output.h"
38 #include "ggc.h"
39
40 /* Simplification and canonicalization of RTL. */
41
42 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
43 virtual regs here because the simplify_*_operation routines are called
44 by integrate.c, which is called before virtual register instantiation.
45
46 ?!? NONZERO_BASE_PLUS_P needs to move into
47 a header file so that their definitions can be shared with the
48 simplification routines in simplify-rtx.c. Until then, do not
49 change this macro without also changing the copy in simplify-rtx.c. */
50
51 /* Allows reference to the stack pointer.
52
53 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
54 arg_pointer_rtx by itself is nonzero, because on at least one machine,
55 the i960, the arg pointer is zero when it is unused. */
56
57 #define NONZERO_BASE_PLUS_P(X) \
58 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
59 || (X) == virtual_stack_vars_rtx \
60 || (X) == virtual_incoming_args_rtx \
61 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
62 && (XEXP (X, 0) == frame_pointer_rtx \
63 || XEXP (X, 0) == hard_frame_pointer_rtx \
64 || ((X) == arg_pointer_rtx \
65 && fixed_regs[ARG_POINTER_REGNUM]) \
66 || XEXP (X, 0) == virtual_stack_vars_rtx \
67 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
68 || (X) == stack_pointer_rtx \
69 || (X) == virtual_stack_dynamic_rtx \
70 || (X) == virtual_outgoing_args_rtx \
71 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
72 && (XEXP (X, 0) == stack_pointer_rtx \
73 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
74 || XEXP (X, 0) == virtual_outgoing_args_rtx)) \
75 || GET_CODE (X) == ADDRESSOF)
76
77 /* Much code operates on (low, high) pairs; the low value is an
78 unsigned wide int, the high value a signed wide int. We
79 occasionally need to sign extend from low to high as if low were a
80 signed wide int. */
81 #define HWI_SIGN_EXTEND(low) \
82 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
83
84 static rtx neg_const_int PARAMS ((enum machine_mode, rtx));
85 static int simplify_plus_minus_op_data_cmp PARAMS ((const void *,
86 const void *));
87 static rtx simplify_plus_minus PARAMS ((enum rtx_code,
88 enum machine_mode, rtx,
89 rtx, int));
90 \f
91 /* Negate a CONST_INT rtx, truncating (because a conversion from a
92 maximally negative number can overflow). */
93 static rtx
94 neg_const_int (mode, i)
95 enum machine_mode mode;
96 rtx i;
97 {
98 return gen_int_mode (- INTVAL (i), mode);
99 }
100
101 \f
102 /* Make a binary operation by properly ordering the operands and
103 seeing if the expression folds. */
104
105 rtx
106 simplify_gen_binary (code, mode, op0, op1)
107 enum rtx_code code;
108 enum machine_mode mode;
109 rtx op0, op1;
110 {
111 rtx tem;
112
113 /* Put complex operands first and constants second if commutative. */
114 if (GET_RTX_CLASS (code) == 'c'
115 && swap_commutative_operands_p (op0, op1))
116 tem = op0, op0 = op1, op1 = tem;
117
118 /* If this simplifies, do it. */
119 tem = simplify_binary_operation (code, mode, op0, op1);
120 if (tem)
121 return tem;
122
123 /* Handle addition and subtraction specially. Otherwise, just form
124 the operation. */
125
126 if (code == PLUS || code == MINUS)
127 {
128 tem = simplify_plus_minus (code, mode, op0, op1, 1);
129 if (tem)
130 return tem;
131 }
132
133 return gen_rtx_fmt_ee (code, mode, op0, op1);
134 }
135 \f
136 /* If X is a MEM referencing the constant pool, return the real value.
137 Otherwise return X. */
138 rtx
139 avoid_constant_pool_reference (x)
140 rtx x;
141 {
142 rtx c, addr;
143 enum machine_mode cmode;
144
145 if (GET_CODE (x) != MEM)
146 return x;
147 addr = XEXP (x, 0);
148
149 if (GET_CODE (addr) != SYMBOL_REF
150 || ! CONSTANT_POOL_ADDRESS_P (addr))
151 return x;
152
153 c = get_pool_constant (addr);
154 cmode = get_pool_mode (addr);
155
156 /* If we're accessing the constant in a different mode than it was
157 originally stored, attempt to fix that up via subreg simplifications.
158 If that fails we have no choice but to return the original memory. */
159 if (cmode != GET_MODE (x))
160 {
161 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
162 return c ? c : x;
163 }
164
165 return c;
166 }
167 \f
168 /* Make a unary operation by first seeing if it folds and otherwise making
169 the specified operation. */
170
171 rtx
172 simplify_gen_unary (code, mode, op, op_mode)
173 enum rtx_code code;
174 enum machine_mode mode;
175 rtx op;
176 enum machine_mode op_mode;
177 {
178 rtx tem;
179
180 /* If this simplifies, use it. */
181 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
182 return tem;
183
184 return gen_rtx_fmt_e (code, mode, op);
185 }
186
187 /* Likewise for ternary operations. */
188
189 rtx
190 simplify_gen_ternary (code, mode, op0_mode, op0, op1, op2)
191 enum rtx_code code;
192 enum machine_mode mode, op0_mode;
193 rtx op0, op1, op2;
194 {
195 rtx tem;
196
197 /* If this simplifies, use it. */
198 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
199 op0, op1, op2)))
200 return tem;
201
202 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
203 }
204 \f
205 /* Likewise, for relational operations.
206 CMP_MODE specifies mode comparison is done in.
207 */
208
209 rtx
210 simplify_gen_relational (code, mode, cmp_mode, op0, op1)
211 enum rtx_code code;
212 enum machine_mode mode;
213 enum machine_mode cmp_mode;
214 rtx op0, op1;
215 {
216 rtx tem;
217
218 if ((tem = simplify_relational_operation (code, cmp_mode, op0, op1)) != 0)
219 return tem;
220
221 /* For the following tests, ensure const0_rtx is op1. */
222 if (op0 == const0_rtx && swap_commutative_operands_p (op0, op1))
223 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
224
225 /* If op0 is a compare, extract the comparison arguments from it. */
226 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
227 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
228
229 /* If op0 is a comparison, extract the comparison arguments form it. */
230 if (code == NE && op1 == const0_rtx
231 && GET_RTX_CLASS (GET_CODE (op0)) == '<')
232 return op0;
233 else if (code == EQ && op1 == const0_rtx)
234 {
235 /* The following tests GET_RTX_CLASS (GET_CODE (op0)) == '<'. */
236 enum rtx_code new = reversed_comparison_code (op0, NULL_RTX);
237 if (new != UNKNOWN)
238 {
239 code = new;
240 mode = cmp_mode;
241 op1 = XEXP (op0, 1);
242 op0 = XEXP (op0, 0);
243 }
244 }
245
246 /* Put complex operands first and constants second. */
247 if (swap_commutative_operands_p (op0, op1))
248 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
249
250 return gen_rtx_fmt_ee (code, mode, op0, op1);
251 }
252 \f
253 /* Replace all occurrences of OLD in X with NEW and try to simplify the
254 resulting RTX. Return a new RTX which is as simplified as possible. */
255
256 rtx
257 simplify_replace_rtx (x, old, new)
258 rtx x;
259 rtx old;
260 rtx new;
261 {
262 enum rtx_code code = GET_CODE (x);
263 enum machine_mode mode = GET_MODE (x);
264
265 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
266 to build a new expression substituting recursively. If we can't do
267 anything, return our input. */
268
269 if (x == old)
270 return new;
271
272 switch (GET_RTX_CLASS (code))
273 {
274 case '1':
275 {
276 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
277 rtx op = (XEXP (x, 0) == old
278 ? new : simplify_replace_rtx (XEXP (x, 0), old, new));
279
280 return simplify_gen_unary (code, mode, op, op_mode);
281 }
282
283 case '2':
284 case 'c':
285 return
286 simplify_gen_binary (code, mode,
287 simplify_replace_rtx (XEXP (x, 0), old, new),
288 simplify_replace_rtx (XEXP (x, 1), old, new));
289 case '<':
290 {
291 enum machine_mode op_mode = (GET_MODE (XEXP (x, 0)) != VOIDmode
292 ? GET_MODE (XEXP (x, 0))
293 : GET_MODE (XEXP (x, 1)));
294 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
295 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
296
297 return
298 simplify_gen_relational (code, mode,
299 (op_mode != VOIDmode
300 ? op_mode
301 : GET_MODE (op0) != VOIDmode
302 ? GET_MODE (op0)
303 : GET_MODE (op1)),
304 op0, op1);
305 }
306
307 case '3':
308 case 'b':
309 {
310 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
311 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
312
313 return
314 simplify_gen_ternary (code, mode,
315 (op_mode != VOIDmode
316 ? op_mode
317 : GET_MODE (op0)),
318 op0,
319 simplify_replace_rtx (XEXP (x, 1), old, new),
320 simplify_replace_rtx (XEXP (x, 2), old, new));
321 }
322
323 case 'x':
324 /* The only case we try to handle is a SUBREG. */
325 if (code == SUBREG)
326 {
327 rtx exp;
328 exp = simplify_gen_subreg (GET_MODE (x),
329 simplify_replace_rtx (SUBREG_REG (x),
330 old, new),
331 GET_MODE (SUBREG_REG (x)),
332 SUBREG_BYTE (x));
333 if (exp)
334 x = exp;
335 }
336 return x;
337
338 case 'o':
339 if (code == MEM)
340 return replace_equiv_address_nv (x,
341 simplify_replace_rtx (XEXP (x, 0),
342 old, new));
343
344 if (REG_P (x) && REG_P (old) && REGNO (x) == REGNO (old))
345 return new;
346
347 return x;
348
349 default:
350 return x;
351 }
352 return x;
353 }
354 \f
355 /* Try to simplify a unary operation CODE whose output mode is to be
356 MODE with input operand OP whose mode was originally OP_MODE.
357 Return zero if no simplification can be made. */
358 rtx
359 simplify_unary_operation (code, mode, op, op_mode)
360 enum rtx_code code;
361 enum machine_mode mode;
362 rtx op;
363 enum machine_mode op_mode;
364 {
365 unsigned int width = GET_MODE_BITSIZE (mode);
366 rtx trueop = avoid_constant_pool_reference (op);
367
368 /* The order of these tests is critical so that, for example, we don't
369 check the wrong mode (input vs. output) for a conversion operation,
370 such as FIX. At some point, this should be simplified. */
371
372 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
373 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
374 {
375 HOST_WIDE_INT hv, lv;
376 REAL_VALUE_TYPE d;
377
378 if (GET_CODE (trueop) == CONST_INT)
379 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
380 else
381 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
382
383 REAL_VALUE_FROM_INT (d, lv, hv, mode);
384 d = real_value_truncate (mode, d);
385 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
386 }
387 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
388 && (GET_CODE (trueop) == CONST_DOUBLE
389 || GET_CODE (trueop) == CONST_INT))
390 {
391 HOST_WIDE_INT hv, lv;
392 REAL_VALUE_TYPE d;
393
394 if (GET_CODE (trueop) == CONST_INT)
395 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
396 else
397 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
398
399 if (op_mode == VOIDmode)
400 {
401 /* We don't know how to interpret negative-looking numbers in
402 this case, so don't try to fold those. */
403 if (hv < 0)
404 return 0;
405 }
406 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
407 ;
408 else
409 hv = 0, lv &= GET_MODE_MASK (op_mode);
410
411 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
412 d = real_value_truncate (mode, d);
413 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
414 }
415
416 if (GET_CODE (trueop) == CONST_INT
417 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
418 {
419 HOST_WIDE_INT arg0 = INTVAL (trueop);
420 HOST_WIDE_INT val;
421
422 switch (code)
423 {
424 case NOT:
425 val = ~ arg0;
426 break;
427
428 case NEG:
429 val = - arg0;
430 break;
431
432 case ABS:
433 val = (arg0 >= 0 ? arg0 : - arg0);
434 break;
435
436 case FFS:
437 /* Don't use ffs here. Instead, get low order bit and then its
438 number. If arg0 is zero, this will return 0, as desired. */
439 arg0 &= GET_MODE_MASK (mode);
440 val = exact_log2 (arg0 & (- arg0)) + 1;
441 break;
442
443 case TRUNCATE:
444 val = arg0;
445 break;
446
447 case ZERO_EXTEND:
448 /* When zero-extending a CONST_INT, we need to know its
449 original mode. */
450 if (op_mode == VOIDmode)
451 abort ();
452 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
453 {
454 /* If we were really extending the mode,
455 we would have to distinguish between zero-extension
456 and sign-extension. */
457 if (width != GET_MODE_BITSIZE (op_mode))
458 abort ();
459 val = arg0;
460 }
461 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
462 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
463 else
464 return 0;
465 break;
466
467 case SIGN_EXTEND:
468 if (op_mode == VOIDmode)
469 op_mode = mode;
470 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
471 {
472 /* If we were really extending the mode,
473 we would have to distinguish between zero-extension
474 and sign-extension. */
475 if (width != GET_MODE_BITSIZE (op_mode))
476 abort ();
477 val = arg0;
478 }
479 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
480 {
481 val
482 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
483 if (val
484 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
485 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
486 }
487 else
488 return 0;
489 break;
490
491 case SQRT:
492 case FLOAT_EXTEND:
493 case FLOAT_TRUNCATE:
494 case SS_TRUNCATE:
495 case US_TRUNCATE:
496 return 0;
497
498 default:
499 abort ();
500 }
501
502 val = trunc_int_for_mode (val, mode);
503
504 return GEN_INT (val);
505 }
506
507 /* We can do some operations on integer CONST_DOUBLEs. Also allow
508 for a DImode operation on a CONST_INT. */
509 else if (GET_MODE (trueop) == VOIDmode
510 && width <= HOST_BITS_PER_WIDE_INT * 2
511 && (GET_CODE (trueop) == CONST_DOUBLE
512 || GET_CODE (trueop) == CONST_INT))
513 {
514 unsigned HOST_WIDE_INT l1, lv;
515 HOST_WIDE_INT h1, hv;
516
517 if (GET_CODE (trueop) == CONST_DOUBLE)
518 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
519 else
520 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
521
522 switch (code)
523 {
524 case NOT:
525 lv = ~ l1;
526 hv = ~ h1;
527 break;
528
529 case NEG:
530 neg_double (l1, h1, &lv, &hv);
531 break;
532
533 case ABS:
534 if (h1 < 0)
535 neg_double (l1, h1, &lv, &hv);
536 else
537 lv = l1, hv = h1;
538 break;
539
540 case FFS:
541 hv = 0;
542 if (l1 == 0)
543 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & (-h1)) + 1;
544 else
545 lv = exact_log2 (l1 & (-l1)) + 1;
546 break;
547
548 case TRUNCATE:
549 /* This is just a change-of-mode, so do nothing. */
550 lv = l1, hv = h1;
551 break;
552
553 case ZERO_EXTEND:
554 if (op_mode == VOIDmode)
555 abort ();
556
557 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
558 return 0;
559
560 hv = 0;
561 lv = l1 & GET_MODE_MASK (op_mode);
562 break;
563
564 case SIGN_EXTEND:
565 if (op_mode == VOIDmode
566 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
567 return 0;
568 else
569 {
570 lv = l1 & GET_MODE_MASK (op_mode);
571 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
572 && (lv & ((HOST_WIDE_INT) 1
573 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
574 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
575
576 hv = HWI_SIGN_EXTEND (lv);
577 }
578 break;
579
580 case SQRT:
581 return 0;
582
583 default:
584 return 0;
585 }
586
587 return immed_double_const (lv, hv, mode);
588 }
589
590 else if (GET_CODE (trueop) == CONST_DOUBLE
591 && GET_MODE_CLASS (mode) == MODE_FLOAT)
592 {
593 REAL_VALUE_TYPE d;
594 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
595
596 switch (code)
597 {
598 case SQRT:
599 /* We don't attempt to optimize this. */
600 return 0;
601
602 case ABS: d = REAL_VALUE_ABS (d); break;
603 case NEG: d = REAL_VALUE_NEGATE (d); break;
604 case FLOAT_TRUNCATE: d = real_value_truncate (mode, d); break;
605 case FLOAT_EXTEND: /* All this does is change the mode. */ break;
606 case FIX: d = REAL_VALUE_RNDZINT (d); break;
607 case UNSIGNED_FIX: d = REAL_VALUE_UNSIGNED_RNDZINT (d); break;
608 default:
609 abort ();
610 }
611 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
612 }
613
614 else if (GET_CODE (trueop) == CONST_DOUBLE
615 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
616 && GET_MODE_CLASS (mode) == MODE_INT
617 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
618 {
619 HOST_WIDE_INT i;
620 REAL_VALUE_TYPE d;
621 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
622 switch (code)
623 {
624 case FIX: i = REAL_VALUE_FIX (d); break;
625 case UNSIGNED_FIX: i = REAL_VALUE_UNSIGNED_FIX (d); break;
626 default:
627 abort ();
628 }
629 return gen_int_mode (i, mode);
630 }
631
632 /* This was formerly used only for non-IEEE float.
633 eggert@twinsun.com says it is safe for IEEE also. */
634 else
635 {
636 enum rtx_code reversed;
637 /* There are some simplifications we can do even if the operands
638 aren't constant. */
639 switch (code)
640 {
641 case NOT:
642 /* (not (not X)) == X. */
643 if (GET_CODE (op) == NOT)
644 return XEXP (op, 0);
645
646 /* (not (eq X Y)) == (ne X Y), etc. */
647 if (mode == BImode && GET_RTX_CLASS (GET_CODE (op)) == '<'
648 && ((reversed = reversed_comparison_code (op, NULL_RTX))
649 != UNKNOWN))
650 return gen_rtx_fmt_ee (reversed,
651 op_mode, XEXP (op, 0), XEXP (op, 1));
652 break;
653
654 case NEG:
655 /* (neg (neg X)) == X. */
656 if (GET_CODE (op) == NEG)
657 return XEXP (op, 0);
658 break;
659
660 case SIGN_EXTEND:
661 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
662 becomes just the MINUS if its mode is MODE. This allows
663 folding switch statements on machines using casesi (such as
664 the VAX). */
665 if (GET_CODE (op) == TRUNCATE
666 && GET_MODE (XEXP (op, 0)) == mode
667 && GET_CODE (XEXP (op, 0)) == MINUS
668 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
669 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
670 return XEXP (op, 0);
671
672 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
673 if (! POINTERS_EXTEND_UNSIGNED
674 && mode == Pmode && GET_MODE (op) == ptr_mode
675 && (CONSTANT_P (op)
676 || (GET_CODE (op) == SUBREG
677 && GET_CODE (SUBREG_REG (op)) == REG
678 && REG_POINTER (SUBREG_REG (op))
679 && GET_MODE (SUBREG_REG (op)) == Pmode)))
680 return convert_memory_address (Pmode, op);
681 #endif
682 break;
683
684 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
685 case ZERO_EXTEND:
686 if (POINTERS_EXTEND_UNSIGNED > 0
687 && mode == Pmode && GET_MODE (op) == ptr_mode
688 && (CONSTANT_P (op)
689 || (GET_CODE (op) == SUBREG
690 && GET_CODE (SUBREG_REG (op)) == REG
691 && REG_POINTER (SUBREG_REG (op))
692 && GET_MODE (SUBREG_REG (op)) == Pmode)))
693 return convert_memory_address (Pmode, op);
694 break;
695 #endif
696
697 default:
698 break;
699 }
700
701 return 0;
702 }
703 }
704 \f
705 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
706 and OP1. Return 0 if no simplification is possible.
707
708 Don't use this for relational operations such as EQ or LT.
709 Use simplify_relational_operation instead. */
710 rtx
711 simplify_binary_operation (code, mode, op0, op1)
712 enum rtx_code code;
713 enum machine_mode mode;
714 rtx op0, op1;
715 {
716 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
717 HOST_WIDE_INT val;
718 unsigned int width = GET_MODE_BITSIZE (mode);
719 rtx tem;
720 rtx trueop0 = avoid_constant_pool_reference (op0);
721 rtx trueop1 = avoid_constant_pool_reference (op1);
722
723 /* Relational operations don't work here. We must know the mode
724 of the operands in order to do the comparison correctly.
725 Assuming a full word can give incorrect results.
726 Consider comparing 128 with -128 in QImode. */
727
728 if (GET_RTX_CLASS (code) == '<')
729 abort ();
730
731 /* Make sure the constant is second. */
732 if (GET_RTX_CLASS (code) == 'c'
733 && swap_commutative_operands_p (trueop0, trueop1))
734 {
735 tem = op0, op0 = op1, op1 = tem;
736 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
737 }
738
739 if (GET_MODE_CLASS (mode) == MODE_FLOAT
740 && GET_CODE (trueop0) == CONST_DOUBLE
741 && GET_CODE (trueop1) == CONST_DOUBLE
742 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
743 {
744 REAL_VALUE_TYPE f0, f1, value;
745
746 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
747 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
748 f0 = real_value_truncate (mode, f0);
749 f1 = real_value_truncate (mode, f1);
750
751 if (code == DIV
752 && !MODE_HAS_INFINITIES (mode)
753 && REAL_VALUES_EQUAL (f1, dconst0))
754 return 0;
755
756 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
757
758 value = real_value_truncate (mode, value);
759 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
760 }
761
762 /* We can fold some multi-word operations. */
763 if (GET_MODE_CLASS (mode) == MODE_INT
764 && width == HOST_BITS_PER_WIDE_INT * 2
765 && (GET_CODE (trueop0) == CONST_DOUBLE
766 || GET_CODE (trueop0) == CONST_INT)
767 && (GET_CODE (trueop1) == CONST_DOUBLE
768 || GET_CODE (trueop1) == CONST_INT))
769 {
770 unsigned HOST_WIDE_INT l1, l2, lv;
771 HOST_WIDE_INT h1, h2, hv;
772
773 if (GET_CODE (trueop0) == CONST_DOUBLE)
774 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
775 else
776 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
777
778 if (GET_CODE (trueop1) == CONST_DOUBLE)
779 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
780 else
781 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
782
783 switch (code)
784 {
785 case MINUS:
786 /* A - B == A + (-B). */
787 neg_double (l2, h2, &lv, &hv);
788 l2 = lv, h2 = hv;
789
790 /* .. fall through ... */
791
792 case PLUS:
793 add_double (l1, h1, l2, h2, &lv, &hv);
794 break;
795
796 case MULT:
797 mul_double (l1, h1, l2, h2, &lv, &hv);
798 break;
799
800 case DIV: case MOD: case UDIV: case UMOD:
801 /* We'd need to include tree.h to do this and it doesn't seem worth
802 it. */
803 return 0;
804
805 case AND:
806 lv = l1 & l2, hv = h1 & h2;
807 break;
808
809 case IOR:
810 lv = l1 | l2, hv = h1 | h2;
811 break;
812
813 case XOR:
814 lv = l1 ^ l2, hv = h1 ^ h2;
815 break;
816
817 case SMIN:
818 if (h1 < h2
819 || (h1 == h2
820 && ((unsigned HOST_WIDE_INT) l1
821 < (unsigned HOST_WIDE_INT) l2)))
822 lv = l1, hv = h1;
823 else
824 lv = l2, hv = h2;
825 break;
826
827 case SMAX:
828 if (h1 > h2
829 || (h1 == h2
830 && ((unsigned HOST_WIDE_INT) l1
831 > (unsigned HOST_WIDE_INT) l2)))
832 lv = l1, hv = h1;
833 else
834 lv = l2, hv = h2;
835 break;
836
837 case UMIN:
838 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
839 || (h1 == h2
840 && ((unsigned HOST_WIDE_INT) l1
841 < (unsigned HOST_WIDE_INT) l2)))
842 lv = l1, hv = h1;
843 else
844 lv = l2, hv = h2;
845 break;
846
847 case UMAX:
848 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
849 || (h1 == h2
850 && ((unsigned HOST_WIDE_INT) l1
851 > (unsigned HOST_WIDE_INT) l2)))
852 lv = l1, hv = h1;
853 else
854 lv = l2, hv = h2;
855 break;
856
857 case LSHIFTRT: case ASHIFTRT:
858 case ASHIFT:
859 case ROTATE: case ROTATERT:
860 #ifdef SHIFT_COUNT_TRUNCATED
861 if (SHIFT_COUNT_TRUNCATED)
862 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
863 #endif
864
865 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
866 return 0;
867
868 if (code == LSHIFTRT || code == ASHIFTRT)
869 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
870 code == ASHIFTRT);
871 else if (code == ASHIFT)
872 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
873 else if (code == ROTATE)
874 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
875 else /* code == ROTATERT */
876 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
877 break;
878
879 default:
880 return 0;
881 }
882
883 return immed_double_const (lv, hv, mode);
884 }
885
886 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
887 || width > HOST_BITS_PER_WIDE_INT || width == 0)
888 {
889 /* Even if we can't compute a constant result,
890 there are some cases worth simplifying. */
891
892 switch (code)
893 {
894 case PLUS:
895 /* Maybe simplify x + 0 to x. The two expressions are equivalent
896 when x is NaN, infinite, or finite and non-zero. They aren't
897 when x is -0 and the rounding mode is not towards -infinity,
898 since (-0) + 0 is then 0. */
899 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
900 return op0;
901
902 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
903 transformations are safe even for IEEE. */
904 if (GET_CODE (op0) == NEG)
905 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
906 else if (GET_CODE (op1) == NEG)
907 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
908
909 /* (~a) + 1 -> -a */
910 if (INTEGRAL_MODE_P (mode)
911 && GET_CODE (op0) == NOT
912 && trueop1 == const1_rtx)
913 return gen_rtx_NEG (mode, XEXP (op0, 0));
914
915 /* Handle both-operands-constant cases. We can only add
916 CONST_INTs to constants since the sum of relocatable symbols
917 can't be handled by most assemblers. Don't add CONST_INT
918 to CONST_INT since overflow won't be computed properly if wider
919 than HOST_BITS_PER_WIDE_INT. */
920
921 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
922 && GET_CODE (op1) == CONST_INT)
923 return plus_constant (op0, INTVAL (op1));
924 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
925 && GET_CODE (op0) == CONST_INT)
926 return plus_constant (op1, INTVAL (op0));
927
928 /* See if this is something like X * C - X or vice versa or
929 if the multiplication is written as a shift. If so, we can
930 distribute and make a new multiply, shift, or maybe just
931 have X (if C is 2 in the example above). But don't make
932 real multiply if we didn't have one before. */
933
934 if (! FLOAT_MODE_P (mode))
935 {
936 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
937 rtx lhs = op0, rhs = op1;
938 int had_mult = 0;
939
940 if (GET_CODE (lhs) == NEG)
941 coeff0 = -1, lhs = XEXP (lhs, 0);
942 else if (GET_CODE (lhs) == MULT
943 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
944 {
945 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
946 had_mult = 1;
947 }
948 else if (GET_CODE (lhs) == ASHIFT
949 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
950 && INTVAL (XEXP (lhs, 1)) >= 0
951 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
952 {
953 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
954 lhs = XEXP (lhs, 0);
955 }
956
957 if (GET_CODE (rhs) == NEG)
958 coeff1 = -1, rhs = XEXP (rhs, 0);
959 else if (GET_CODE (rhs) == MULT
960 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
961 {
962 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
963 had_mult = 1;
964 }
965 else if (GET_CODE (rhs) == ASHIFT
966 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
967 && INTVAL (XEXP (rhs, 1)) >= 0
968 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
969 {
970 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
971 rhs = XEXP (rhs, 0);
972 }
973
974 if (rtx_equal_p (lhs, rhs))
975 {
976 tem = simplify_gen_binary (MULT, mode, lhs,
977 GEN_INT (coeff0 + coeff1));
978 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
979 }
980 }
981
982 /* If one of the operands is a PLUS or a MINUS, see if we can
983 simplify this by the associative law.
984 Don't use the associative law for floating point.
985 The inaccuracy makes it nonassociative,
986 and subtle programs can break if operations are associated. */
987
988 if (INTEGRAL_MODE_P (mode)
989 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
990 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
991 || (GET_CODE (op0) == CONST
992 && GET_CODE (XEXP (op0, 0)) == PLUS)
993 || (GET_CODE (op1) == CONST
994 && GET_CODE (XEXP (op1, 0)) == PLUS))
995 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
996 return tem;
997 break;
998
999 case COMPARE:
1000 #ifdef HAVE_cc0
1001 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1002 using cc0, in which case we want to leave it as a COMPARE
1003 so we can distinguish it from a register-register-copy.
1004
1005 In IEEE floating point, x-0 is not the same as x. */
1006
1007 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1008 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1009 && trueop1 == CONST0_RTX (mode))
1010 return op0;
1011 #endif
1012
1013 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1014 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1015 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1016 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1017 {
1018 rtx xop00 = XEXP (op0, 0);
1019 rtx xop10 = XEXP (op1, 0);
1020
1021 #ifdef HAVE_cc0
1022 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1023 #else
1024 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1025 && GET_MODE (xop00) == GET_MODE (xop10)
1026 && REGNO (xop00) == REGNO (xop10)
1027 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1028 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1029 #endif
1030 return xop00;
1031 }
1032 break;
1033
1034 case MINUS:
1035 /* We can't assume x-x is 0 even with non-IEEE floating point,
1036 but since it is zero except in very strange circumstances, we
1037 will treat it as zero with -funsafe-math-optimizations. */
1038 if (rtx_equal_p (trueop0, trueop1)
1039 && ! side_effects_p (op0)
1040 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1041 return CONST0_RTX (mode);
1042
1043 /* Change subtraction from zero into negation. (0 - x) is the
1044 same as -x when x is NaN, infinite, or finite and non-zero.
1045 But if the mode has signed zeros, and does not round towards
1046 -infinity, then 0 - 0 is 0, not -0. */
1047 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1048 return gen_rtx_NEG (mode, op1);
1049
1050 /* (-1 - a) is ~a. */
1051 if (trueop0 == constm1_rtx)
1052 return gen_rtx_NOT (mode, op1);
1053
1054 /* Subtracting 0 has no effect unless the mode has signed zeros
1055 and supports rounding towards -infinity. In such a case,
1056 0 - 0 is -0. */
1057 if (!(HONOR_SIGNED_ZEROS (mode)
1058 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1059 && trueop1 == CONST0_RTX (mode))
1060 return op0;
1061
1062 /* See if this is something like X * C - X or vice versa or
1063 if the multiplication is written as a shift. If so, we can
1064 distribute and make a new multiply, shift, or maybe just
1065 have X (if C is 2 in the example above). But don't make
1066 real multiply if we didn't have one before. */
1067
1068 if (! FLOAT_MODE_P (mode))
1069 {
1070 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1071 rtx lhs = op0, rhs = op1;
1072 int had_mult = 0;
1073
1074 if (GET_CODE (lhs) == NEG)
1075 coeff0 = -1, lhs = XEXP (lhs, 0);
1076 else if (GET_CODE (lhs) == MULT
1077 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1078 {
1079 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1080 had_mult = 1;
1081 }
1082 else if (GET_CODE (lhs) == ASHIFT
1083 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1084 && INTVAL (XEXP (lhs, 1)) >= 0
1085 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1086 {
1087 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1088 lhs = XEXP (lhs, 0);
1089 }
1090
1091 if (GET_CODE (rhs) == NEG)
1092 coeff1 = - 1, rhs = XEXP (rhs, 0);
1093 else if (GET_CODE (rhs) == MULT
1094 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1095 {
1096 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1097 had_mult = 1;
1098 }
1099 else if (GET_CODE (rhs) == ASHIFT
1100 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1101 && INTVAL (XEXP (rhs, 1)) >= 0
1102 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1103 {
1104 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1105 rhs = XEXP (rhs, 0);
1106 }
1107
1108 if (rtx_equal_p (lhs, rhs))
1109 {
1110 tem = simplify_gen_binary (MULT, mode, lhs,
1111 GEN_INT (coeff0 - coeff1));
1112 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1113 }
1114 }
1115
1116 /* (a - (-b)) -> (a + b). True even for IEEE. */
1117 if (GET_CODE (op1) == NEG)
1118 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1119
1120 /* If one of the operands is a PLUS or a MINUS, see if we can
1121 simplify this by the associative law.
1122 Don't use the associative law for floating point.
1123 The inaccuracy makes it nonassociative,
1124 and subtle programs can break if operations are associated. */
1125
1126 if (INTEGRAL_MODE_P (mode)
1127 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1128 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1129 || (GET_CODE (op0) == CONST
1130 && GET_CODE (XEXP (op0, 0)) == PLUS)
1131 || (GET_CODE (op1) == CONST
1132 && GET_CODE (XEXP (op1, 0)) == PLUS))
1133 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1134 return tem;
1135
1136 /* Don't let a relocatable value get a negative coeff. */
1137 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1138 return simplify_gen_binary (PLUS, mode,
1139 op0,
1140 neg_const_int (mode, op1));
1141
1142 /* (x - (x & y)) -> (x & ~y) */
1143 if (GET_CODE (op1) == AND)
1144 {
1145 if (rtx_equal_p (op0, XEXP (op1, 0)))
1146 return simplify_gen_binary (AND, mode, op0,
1147 gen_rtx_NOT (mode, XEXP (op1, 1)));
1148 if (rtx_equal_p (op0, XEXP (op1, 1)))
1149 return simplify_gen_binary (AND, mode, op0,
1150 gen_rtx_NOT (mode, XEXP (op1, 0)));
1151 }
1152 break;
1153
1154 case MULT:
1155 if (trueop1 == constm1_rtx)
1156 {
1157 tem = simplify_unary_operation (NEG, mode, op0, mode);
1158
1159 return tem ? tem : gen_rtx_NEG (mode, op0);
1160 }
1161
1162 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1163 x is NaN, since x * 0 is then also NaN. Nor is it valid
1164 when the mode has signed zeros, since multiplying a negative
1165 number by 0 will give -0, not 0. */
1166 if (!HONOR_NANS (mode)
1167 && !HONOR_SIGNED_ZEROS (mode)
1168 && trueop1 == CONST0_RTX (mode)
1169 && ! side_effects_p (op0))
1170 return op1;
1171
1172 /* In IEEE floating point, x*1 is not equivalent to x for nans.
1173 However, ANSI says we can drop signals,
1174 so we can do this anyway. */
1175 if (trueop1 == CONST1_RTX (mode))
1176 return op0;
1177
1178 /* Convert multiply by constant power of two into shift unless
1179 we are still generating RTL. This test is a kludge. */
1180 if (GET_CODE (trueop1) == CONST_INT
1181 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1182 /* If the mode is larger than the host word size, and the
1183 uppermost bit is set, then this isn't a power of two due
1184 to implicit sign extension. */
1185 && (width <= HOST_BITS_PER_WIDE_INT
1186 || val != HOST_BITS_PER_WIDE_INT - 1)
1187 && ! rtx_equal_function_value_matters)
1188 return gen_rtx_ASHIFT (mode, op0, GEN_INT (val));
1189
1190 /* x*2 is x+x and x*(-1) is -x */
1191 if (GET_CODE (trueop1) == CONST_DOUBLE
1192 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1193 && GET_MODE (op0) == mode)
1194 {
1195 REAL_VALUE_TYPE d;
1196 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1197
1198 if (REAL_VALUES_EQUAL (d, dconst2))
1199 return gen_rtx_PLUS (mode, op0, copy_rtx (op0));
1200
1201 if (REAL_VALUES_EQUAL (d, dconstm1))
1202 return gen_rtx_NEG (mode, op0);
1203 }
1204 break;
1205
1206 case IOR:
1207 if (trueop1 == const0_rtx)
1208 return op0;
1209 if (GET_CODE (trueop1) == CONST_INT
1210 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1211 == GET_MODE_MASK (mode)))
1212 return op1;
1213 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1214 return op0;
1215 /* A | (~A) -> -1 */
1216 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1217 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1218 && ! side_effects_p (op0)
1219 && GET_MODE_CLASS (mode) != MODE_CC)
1220 return constm1_rtx;
1221 break;
1222
1223 case XOR:
1224 if (trueop1 == const0_rtx)
1225 return op0;
1226 if (GET_CODE (trueop1) == CONST_INT
1227 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1228 == GET_MODE_MASK (mode)))
1229 return gen_rtx_NOT (mode, op0);
1230 if (trueop0 == trueop1 && ! side_effects_p (op0)
1231 && GET_MODE_CLASS (mode) != MODE_CC)
1232 return const0_rtx;
1233 break;
1234
1235 case AND:
1236 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1237 return const0_rtx;
1238 if (GET_CODE (trueop1) == CONST_INT
1239 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1240 == GET_MODE_MASK (mode)))
1241 return op0;
1242 if (trueop0 == trueop1 && ! side_effects_p (op0)
1243 && GET_MODE_CLASS (mode) != MODE_CC)
1244 return op0;
1245 /* A & (~A) -> 0 */
1246 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1247 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1248 && ! side_effects_p (op0)
1249 && GET_MODE_CLASS (mode) != MODE_CC)
1250 return const0_rtx;
1251 break;
1252
1253 case UDIV:
1254 /* Convert divide by power of two into shift (divide by 1 handled
1255 below). */
1256 if (GET_CODE (trueop1) == CONST_INT
1257 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1258 return gen_rtx_LSHIFTRT (mode, op0, GEN_INT (arg1));
1259
1260 /* ... fall through ... */
1261
1262 case DIV:
1263 if (trueop1 == CONST1_RTX (mode))
1264 {
1265 /* On some platforms DIV uses narrower mode than its
1266 operands. */
1267 rtx x = gen_lowpart_common (mode, op0);
1268 if (x)
1269 return x;
1270 else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1271 return gen_lowpart_SUBREG (mode, op0);
1272 else
1273 return op0;
1274 }
1275
1276 /* Maybe change 0 / x to 0. This transformation isn't safe for
1277 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1278 Nor is it safe for modes with signed zeros, since dividing
1279 0 by a negative number gives -0, not 0. */
1280 if (!HONOR_NANS (mode)
1281 && !HONOR_SIGNED_ZEROS (mode)
1282 && trueop0 == CONST0_RTX (mode)
1283 && ! side_effects_p (op1))
1284 return op0;
1285
1286 /* Change division by a constant into multiplication. Only do
1287 this with -funsafe-math-optimizations. */
1288 else if (GET_CODE (trueop1) == CONST_DOUBLE
1289 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1290 && trueop1 != CONST0_RTX (mode)
1291 && flag_unsafe_math_optimizations)
1292 {
1293 REAL_VALUE_TYPE d;
1294 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1295
1296 if (! REAL_VALUES_EQUAL (d, dconst0))
1297 {
1298 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1299 return gen_rtx_MULT (mode, op0,
1300 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
1301 }
1302 }
1303 break;
1304
1305 case UMOD:
1306 /* Handle modulus by power of two (mod with 1 handled below). */
1307 if (GET_CODE (trueop1) == CONST_INT
1308 && exact_log2 (INTVAL (trueop1)) > 0)
1309 return gen_rtx_AND (mode, op0, GEN_INT (INTVAL (op1) - 1));
1310
1311 /* ... fall through ... */
1312
1313 case MOD:
1314 if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1315 && ! side_effects_p (op0) && ! side_effects_p (op1))
1316 return const0_rtx;
1317 break;
1318
1319 case ROTATERT:
1320 case ROTATE:
1321 /* Rotating ~0 always results in ~0. */
1322 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1323 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1324 && ! side_effects_p (op1))
1325 return op0;
1326
1327 /* ... fall through ... */
1328
1329 case ASHIFT:
1330 case ASHIFTRT:
1331 case LSHIFTRT:
1332 if (trueop1 == const0_rtx)
1333 return op0;
1334 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1335 return op0;
1336 break;
1337
1338 case SMIN:
1339 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1340 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1341 && ! side_effects_p (op0))
1342 return op1;
1343 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1344 return op0;
1345 break;
1346
1347 case SMAX:
1348 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1349 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1350 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1351 && ! side_effects_p (op0))
1352 return op1;
1353 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1354 return op0;
1355 break;
1356
1357 case UMIN:
1358 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1359 return op1;
1360 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1361 return op0;
1362 break;
1363
1364 case UMAX:
1365 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1366 return op1;
1367 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1368 return op0;
1369 break;
1370
1371 case SS_PLUS:
1372 case US_PLUS:
1373 case SS_MINUS:
1374 case US_MINUS:
1375 /* ??? There are simplifications that can be done. */
1376 return 0;
1377
1378 default:
1379 abort ();
1380 }
1381
1382 return 0;
1383 }
1384
1385 /* Get the integer argument values in two forms:
1386 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1387
1388 arg0 = INTVAL (trueop0);
1389 arg1 = INTVAL (trueop1);
1390
1391 if (width < HOST_BITS_PER_WIDE_INT)
1392 {
1393 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1394 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1395
1396 arg0s = arg0;
1397 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1398 arg0s |= ((HOST_WIDE_INT) (-1) << width);
1399
1400 arg1s = arg1;
1401 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1402 arg1s |= ((HOST_WIDE_INT) (-1) << width);
1403 }
1404 else
1405 {
1406 arg0s = arg0;
1407 arg1s = arg1;
1408 }
1409
1410 /* Compute the value of the arithmetic. */
1411
1412 switch (code)
1413 {
1414 case PLUS:
1415 val = arg0s + arg1s;
1416 break;
1417
1418 case MINUS:
1419 val = arg0s - arg1s;
1420 break;
1421
1422 case MULT:
1423 val = arg0s * arg1s;
1424 break;
1425
1426 case DIV:
1427 if (arg1s == 0
1428 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1429 && arg1s == -1))
1430 return 0;
1431 val = arg0s / arg1s;
1432 break;
1433
1434 case MOD:
1435 if (arg1s == 0
1436 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1437 && arg1s == -1))
1438 return 0;
1439 val = arg0s % arg1s;
1440 break;
1441
1442 case UDIV:
1443 if (arg1 == 0
1444 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1445 && arg1s == -1))
1446 return 0;
1447 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
1448 break;
1449
1450 case UMOD:
1451 if (arg1 == 0
1452 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1453 && arg1s == -1))
1454 return 0;
1455 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
1456 break;
1457
1458 case AND:
1459 val = arg0 & arg1;
1460 break;
1461
1462 case IOR:
1463 val = arg0 | arg1;
1464 break;
1465
1466 case XOR:
1467 val = arg0 ^ arg1;
1468 break;
1469
1470 case LSHIFTRT:
1471 /* If shift count is undefined, don't fold it; let the machine do
1472 what it wants. But truncate it if the machine will do that. */
1473 if (arg1 < 0)
1474 return 0;
1475
1476 #ifdef SHIFT_COUNT_TRUNCATED
1477 if (SHIFT_COUNT_TRUNCATED)
1478 arg1 %= width;
1479 #endif
1480
1481 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
1482 break;
1483
1484 case ASHIFT:
1485 if (arg1 < 0)
1486 return 0;
1487
1488 #ifdef SHIFT_COUNT_TRUNCATED
1489 if (SHIFT_COUNT_TRUNCATED)
1490 arg1 %= width;
1491 #endif
1492
1493 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
1494 break;
1495
1496 case ASHIFTRT:
1497 if (arg1 < 0)
1498 return 0;
1499
1500 #ifdef SHIFT_COUNT_TRUNCATED
1501 if (SHIFT_COUNT_TRUNCATED)
1502 arg1 %= width;
1503 #endif
1504
1505 val = arg0s >> arg1;
1506
1507 /* Bootstrap compiler may not have sign extended the right shift.
1508 Manually extend the sign to insure bootstrap cc matches gcc. */
1509 if (arg0s < 0 && arg1 > 0)
1510 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
1511
1512 break;
1513
1514 case ROTATERT:
1515 if (arg1 < 0)
1516 return 0;
1517
1518 arg1 %= width;
1519 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
1520 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
1521 break;
1522
1523 case ROTATE:
1524 if (arg1 < 0)
1525 return 0;
1526
1527 arg1 %= width;
1528 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
1529 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
1530 break;
1531
1532 case COMPARE:
1533 /* Do nothing here. */
1534 return 0;
1535
1536 case SMIN:
1537 val = arg0s <= arg1s ? arg0s : arg1s;
1538 break;
1539
1540 case UMIN:
1541 val = ((unsigned HOST_WIDE_INT) arg0
1542 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1543 break;
1544
1545 case SMAX:
1546 val = arg0s > arg1s ? arg0s : arg1s;
1547 break;
1548
1549 case UMAX:
1550 val = ((unsigned HOST_WIDE_INT) arg0
1551 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1552 break;
1553
1554 default:
1555 abort ();
1556 }
1557
1558 val = trunc_int_for_mode (val, mode);
1559
1560 return GEN_INT (val);
1561 }
1562 \f
1563 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1564 PLUS or MINUS.
1565
1566 Rather than test for specific case, we do this by a brute-force method
1567 and do all possible simplifications until no more changes occur. Then
1568 we rebuild the operation.
1569
1570 If FORCE is true, then always generate the rtx. This is used to
1571 canonicalize stuff emitted from simplify_gen_binary. Note that this
1572 can still fail if the rtx is too complex. It won't fail just because
1573 the result is not 'simpler' than the input, however. */
1574
1575 struct simplify_plus_minus_op_data
1576 {
1577 rtx op;
1578 int neg;
1579 };
1580
1581 static int
1582 simplify_plus_minus_op_data_cmp (p1, p2)
1583 const void *p1;
1584 const void *p2;
1585 {
1586 const struct simplify_plus_minus_op_data *d1 = p1;
1587 const struct simplify_plus_minus_op_data *d2 = p2;
1588
1589 return (commutative_operand_precedence (d2->op)
1590 - commutative_operand_precedence (d1->op));
1591 }
1592
1593 static rtx
1594 simplify_plus_minus (code, mode, op0, op1, force)
1595 enum rtx_code code;
1596 enum machine_mode mode;
1597 rtx op0, op1;
1598 int force;
1599 {
1600 struct simplify_plus_minus_op_data ops[8];
1601 rtx result, tem;
1602 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
1603 int first, negate, changed;
1604 int i, j;
1605
1606 memset ((char *) ops, 0, sizeof ops);
1607
1608 /* Set up the two operands and then expand them until nothing has been
1609 changed. If we run out of room in our array, give up; this should
1610 almost never happen. */
1611
1612 ops[0].op = op0;
1613 ops[0].neg = 0;
1614 ops[1].op = op1;
1615 ops[1].neg = (code == MINUS);
1616
1617 do
1618 {
1619 changed = 0;
1620
1621 for (i = 0; i < n_ops; i++)
1622 {
1623 rtx this_op = ops[i].op;
1624 int this_neg = ops[i].neg;
1625 enum rtx_code this_code = GET_CODE (this_op);
1626
1627 switch (this_code)
1628 {
1629 case PLUS:
1630 case MINUS:
1631 if (n_ops == 7)
1632 return NULL_RTX;
1633
1634 ops[n_ops].op = XEXP (this_op, 1);
1635 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
1636 n_ops++;
1637
1638 ops[i].op = XEXP (this_op, 0);
1639 input_ops++;
1640 changed = 1;
1641 break;
1642
1643 case NEG:
1644 ops[i].op = XEXP (this_op, 0);
1645 ops[i].neg = ! this_neg;
1646 changed = 1;
1647 break;
1648
1649 case CONST:
1650 if (n_ops < 7
1651 && GET_CODE (XEXP (this_op, 0)) == PLUS
1652 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
1653 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
1654 {
1655 ops[i].op = XEXP (XEXP (this_op, 0), 0);
1656 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
1657 ops[n_ops].neg = this_neg;
1658 n_ops++;
1659 input_consts++;
1660 changed = 1;
1661 }
1662 break;
1663
1664 case NOT:
1665 /* ~a -> (-a - 1) */
1666 if (n_ops != 7)
1667 {
1668 ops[n_ops].op = constm1_rtx;
1669 ops[n_ops++].neg = this_neg;
1670 ops[i].op = XEXP (this_op, 0);
1671 ops[i].neg = !this_neg;
1672 changed = 1;
1673 }
1674 break;
1675
1676 case CONST_INT:
1677 if (this_neg)
1678 {
1679 ops[i].op = neg_const_int (mode, this_op);
1680 ops[i].neg = 0;
1681 changed = 1;
1682 }
1683 break;
1684
1685 default:
1686 break;
1687 }
1688 }
1689 }
1690 while (changed);
1691
1692 /* If we only have two operands, we can't do anything. */
1693 if (n_ops <= 2 && !force)
1694 return NULL_RTX;
1695
1696 /* Count the number of CONSTs we didn't split above. */
1697 for (i = 0; i < n_ops; i++)
1698 if (GET_CODE (ops[i].op) == CONST)
1699 input_consts++;
1700
1701 /* Now simplify each pair of operands until nothing changes. The first
1702 time through just simplify constants against each other. */
1703
1704 first = 1;
1705 do
1706 {
1707 changed = first;
1708
1709 for (i = 0; i < n_ops - 1; i++)
1710 for (j = i + 1; j < n_ops; j++)
1711 {
1712 rtx lhs = ops[i].op, rhs = ops[j].op;
1713 int lneg = ops[i].neg, rneg = ops[j].neg;
1714
1715 if (lhs != 0 && rhs != 0
1716 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
1717 {
1718 enum rtx_code ncode = PLUS;
1719
1720 if (lneg != rneg)
1721 {
1722 ncode = MINUS;
1723 if (lneg)
1724 tem = lhs, lhs = rhs, rhs = tem;
1725 }
1726 else if (swap_commutative_operands_p (lhs, rhs))
1727 tem = lhs, lhs = rhs, rhs = tem;
1728
1729 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
1730
1731 /* Reject "simplifications" that just wrap the two
1732 arguments in a CONST. Failure to do so can result
1733 in infinite recursion with simplify_binary_operation
1734 when it calls us to simplify CONST operations. */
1735 if (tem
1736 && ! (GET_CODE (tem) == CONST
1737 && GET_CODE (XEXP (tem, 0)) == ncode
1738 && XEXP (XEXP (tem, 0), 0) == lhs
1739 && XEXP (XEXP (tem, 0), 1) == rhs)
1740 /* Don't allow -x + -1 -> ~x simplifications in the
1741 first pass. This allows us the chance to combine
1742 the -1 with other constants. */
1743 && ! (first
1744 && GET_CODE (tem) == NOT
1745 && XEXP (tem, 0) == rhs))
1746 {
1747 lneg &= rneg;
1748 if (GET_CODE (tem) == NEG)
1749 tem = XEXP (tem, 0), lneg = !lneg;
1750 if (GET_CODE (tem) == CONST_INT && lneg)
1751 tem = neg_const_int (mode, tem), lneg = 0;
1752
1753 ops[i].op = tem;
1754 ops[i].neg = lneg;
1755 ops[j].op = NULL_RTX;
1756 changed = 1;
1757 }
1758 }
1759 }
1760
1761 first = 0;
1762 }
1763 while (changed);
1764
1765 /* Pack all the operands to the lower-numbered entries. */
1766 for (i = 0, j = 0; j < n_ops; j++)
1767 if (ops[j].op)
1768 ops[i++] = ops[j];
1769 n_ops = i;
1770
1771 /* Sort the operations based on swap_commutative_operands_p. */
1772 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
1773
1774 /* We suppressed creation of trivial CONST expressions in the
1775 combination loop to avoid recursion. Create one manually now.
1776 The combination loop should have ensured that there is exactly
1777 one CONST_INT, and the sort will have ensured that it is last
1778 in the array and that any other constant will be next-to-last. */
1779
1780 if (n_ops > 1
1781 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
1782 && CONSTANT_P (ops[n_ops - 2].op))
1783 {
1784 rtx value = ops[n_ops - 1].op;
1785 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
1786 value = neg_const_int (mode, value);
1787 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
1788 n_ops--;
1789 }
1790
1791 /* Count the number of CONSTs that we generated. */
1792 n_consts = 0;
1793 for (i = 0; i < n_ops; i++)
1794 if (GET_CODE (ops[i].op) == CONST)
1795 n_consts++;
1796
1797 /* Give up if we didn't reduce the number of operands we had. Make
1798 sure we count a CONST as two operands. If we have the same
1799 number of operands, but have made more CONSTs than before, this
1800 is also an improvement, so accept it. */
1801 if (!force
1802 && (n_ops + n_consts > input_ops
1803 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
1804 return NULL_RTX;
1805
1806 /* Put a non-negated operand first. If there aren't any, make all
1807 operands positive and negate the whole thing later. */
1808
1809 negate = 0;
1810 for (i = 0; i < n_ops && ops[i].neg; i++)
1811 continue;
1812 if (i == n_ops)
1813 {
1814 for (i = 0; i < n_ops; i++)
1815 ops[i].neg = 0;
1816 negate = 1;
1817 }
1818 else if (i != 0)
1819 {
1820 tem = ops[0].op;
1821 ops[0] = ops[i];
1822 ops[i].op = tem;
1823 ops[i].neg = 1;
1824 }
1825
1826 /* Now make the result by performing the requested operations. */
1827 result = ops[0].op;
1828 for (i = 1; i < n_ops; i++)
1829 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
1830 mode, result, ops[i].op);
1831
1832 return negate ? gen_rtx_NEG (mode, result) : result;
1833 }
1834
1835 /* Like simplify_binary_operation except used for relational operators.
1836 MODE is the mode of the operands, not that of the result. If MODE
1837 is VOIDmode, both operands must also be VOIDmode and we compare the
1838 operands in "infinite precision".
1839
1840 If no simplification is possible, this function returns zero. Otherwise,
1841 it returns either const_true_rtx or const0_rtx. */
1842
1843 rtx
1844 simplify_relational_operation (code, mode, op0, op1)
1845 enum rtx_code code;
1846 enum machine_mode mode;
1847 rtx op0, op1;
1848 {
1849 int equal, op0lt, op0ltu, op1lt, op1ltu;
1850 rtx tem;
1851 rtx trueop0;
1852 rtx trueop1;
1853
1854 if (mode == VOIDmode
1855 && (GET_MODE (op0) != VOIDmode
1856 || GET_MODE (op1) != VOIDmode))
1857 abort ();
1858
1859 /* If op0 is a compare, extract the comparison arguments from it. */
1860 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
1861 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
1862
1863 trueop0 = avoid_constant_pool_reference (op0);
1864 trueop1 = avoid_constant_pool_reference (op1);
1865
1866 /* We can't simplify MODE_CC values since we don't know what the
1867 actual comparison is. */
1868 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC
1869 #ifdef HAVE_cc0
1870 || op0 == cc0_rtx
1871 #endif
1872 )
1873 return 0;
1874
1875 /* Make sure the constant is second. */
1876 if (swap_commutative_operands_p (trueop0, trueop1))
1877 {
1878 tem = op0, op0 = op1, op1 = tem;
1879 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
1880 code = swap_condition (code);
1881 }
1882
1883 /* For integer comparisons of A and B maybe we can simplify A - B and can
1884 then simplify a comparison of that with zero. If A and B are both either
1885 a register or a CONST_INT, this can't help; testing for these cases will
1886 prevent infinite recursion here and speed things up.
1887
1888 If CODE is an unsigned comparison, then we can never do this optimization,
1889 because it gives an incorrect result if the subtraction wraps around zero.
1890 ANSI C defines unsigned operations such that they never overflow, and
1891 thus such cases can not be ignored. */
1892
1893 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
1894 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
1895 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
1896 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
1897 && code != GTU && code != GEU && code != LTU && code != LEU)
1898 return simplify_relational_operation (signed_condition (code),
1899 mode, tem, const0_rtx);
1900
1901 if (flag_unsafe_math_optimizations && code == ORDERED)
1902 return const_true_rtx;
1903
1904 if (flag_unsafe_math_optimizations && code == UNORDERED)
1905 return const0_rtx;
1906
1907 /* For modes without NaNs, if the two operands are equal, we know the
1908 result. */
1909 if (!HONOR_NANS (GET_MODE (trueop0)) && rtx_equal_p (trueop0, trueop1))
1910 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
1911
1912 /* If the operands are floating-point constants, see if we can fold
1913 the result. */
1914 else if (GET_CODE (trueop0) == CONST_DOUBLE
1915 && GET_CODE (trueop1) == CONST_DOUBLE
1916 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
1917 {
1918 REAL_VALUE_TYPE d0, d1;
1919
1920 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
1921 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
1922
1923 /* Comparisons are unordered iff at least one of the values is NaN. */
1924 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
1925 switch (code)
1926 {
1927 case UNEQ:
1928 case UNLT:
1929 case UNGT:
1930 case UNLE:
1931 case UNGE:
1932 case NE:
1933 case UNORDERED:
1934 return const_true_rtx;
1935 case EQ:
1936 case LT:
1937 case GT:
1938 case LE:
1939 case GE:
1940 case LTGT:
1941 case ORDERED:
1942 return const0_rtx;
1943 default:
1944 return 0;
1945 }
1946
1947 equal = REAL_VALUES_EQUAL (d0, d1);
1948 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
1949 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
1950 }
1951
1952 /* Otherwise, see if the operands are both integers. */
1953 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
1954 && (GET_CODE (trueop0) == CONST_DOUBLE
1955 || GET_CODE (trueop0) == CONST_INT)
1956 && (GET_CODE (trueop1) == CONST_DOUBLE
1957 || GET_CODE (trueop1) == CONST_INT))
1958 {
1959 int width = GET_MODE_BITSIZE (mode);
1960 HOST_WIDE_INT l0s, h0s, l1s, h1s;
1961 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
1962
1963 /* Get the two words comprising each integer constant. */
1964 if (GET_CODE (trueop0) == CONST_DOUBLE)
1965 {
1966 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
1967 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
1968 }
1969 else
1970 {
1971 l0u = l0s = INTVAL (trueop0);
1972 h0u = h0s = HWI_SIGN_EXTEND (l0s);
1973 }
1974
1975 if (GET_CODE (trueop1) == CONST_DOUBLE)
1976 {
1977 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
1978 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
1979 }
1980 else
1981 {
1982 l1u = l1s = INTVAL (trueop1);
1983 h1u = h1s = HWI_SIGN_EXTEND (l1s);
1984 }
1985
1986 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
1987 we have to sign or zero-extend the values. */
1988 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
1989 {
1990 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
1991 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
1992
1993 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1994 l0s |= ((HOST_WIDE_INT) (-1) << width);
1995
1996 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1997 l1s |= ((HOST_WIDE_INT) (-1) << width);
1998 }
1999 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2000 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2001
2002 equal = (h0u == h1u && l0u == l1u);
2003 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2004 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2005 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2006 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2007 }
2008
2009 /* Otherwise, there are some code-specific tests we can make. */
2010 else
2011 {
2012 switch (code)
2013 {
2014 case EQ:
2015 /* References to the frame plus a constant or labels cannot
2016 be zero, but a SYMBOL_REF can due to #pragma weak. */
2017 if (((NONZERO_BASE_PLUS_P (op0) && trueop1 == const0_rtx)
2018 || GET_CODE (trueop0) == LABEL_REF)
2019 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2020 /* On some machines, the ap reg can be 0 sometimes. */
2021 && op0 != arg_pointer_rtx
2022 #endif
2023 )
2024 return const0_rtx;
2025 break;
2026
2027 case NE:
2028 if (((NONZERO_BASE_PLUS_P (op0) && trueop1 == const0_rtx)
2029 || GET_CODE (trueop0) == LABEL_REF)
2030 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2031 && op0 != arg_pointer_rtx
2032 #endif
2033 )
2034 return const_true_rtx;
2035 break;
2036
2037 case GEU:
2038 /* Unsigned values are never negative. */
2039 if (trueop1 == const0_rtx)
2040 return const_true_rtx;
2041 break;
2042
2043 case LTU:
2044 if (trueop1 == const0_rtx)
2045 return const0_rtx;
2046 break;
2047
2048 case LEU:
2049 /* Unsigned values are never greater than the largest
2050 unsigned value. */
2051 if (GET_CODE (trueop1) == CONST_INT
2052 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2053 && INTEGRAL_MODE_P (mode))
2054 return const_true_rtx;
2055 break;
2056
2057 case GTU:
2058 if (GET_CODE (trueop1) == CONST_INT
2059 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2060 && INTEGRAL_MODE_P (mode))
2061 return const0_rtx;
2062 break;
2063
2064 case LT:
2065 /* Optimize abs(x) < 0.0. */
2066 if (trueop1 == CONST0_RTX (mode))
2067 {
2068 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2069 : trueop0;
2070 if (GET_CODE (tem) == ABS)
2071 return const0_rtx;
2072 }
2073 break;
2074
2075 case GE:
2076 /* Optimize abs(x) >= 0.0. */
2077 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
2078 {
2079 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2080 : trueop0;
2081 if (GET_CODE (tem) == ABS)
2082 return const1_rtx;
2083 }
2084 break;
2085
2086 default:
2087 break;
2088 }
2089
2090 return 0;
2091 }
2092
2093 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2094 as appropriate. */
2095 switch (code)
2096 {
2097 case EQ:
2098 case UNEQ:
2099 return equal ? const_true_rtx : const0_rtx;
2100 case NE:
2101 case LTGT:
2102 return ! equal ? const_true_rtx : const0_rtx;
2103 case LT:
2104 case UNLT:
2105 return op0lt ? const_true_rtx : const0_rtx;
2106 case GT:
2107 case UNGT:
2108 return op1lt ? const_true_rtx : const0_rtx;
2109 case LTU:
2110 return op0ltu ? const_true_rtx : const0_rtx;
2111 case GTU:
2112 return op1ltu ? const_true_rtx : const0_rtx;
2113 case LE:
2114 case UNLE:
2115 return equal || op0lt ? const_true_rtx : const0_rtx;
2116 case GE:
2117 case UNGE:
2118 return equal || op1lt ? const_true_rtx : const0_rtx;
2119 case LEU:
2120 return equal || op0ltu ? const_true_rtx : const0_rtx;
2121 case GEU:
2122 return equal || op1ltu ? const_true_rtx : const0_rtx;
2123 case ORDERED:
2124 return const_true_rtx;
2125 case UNORDERED:
2126 return const0_rtx;
2127 default:
2128 abort ();
2129 }
2130 }
2131 \f
2132 /* Simplify CODE, an operation with result mode MODE and three operands,
2133 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2134 a constant. Return 0 if no simplifications is possible. */
2135
2136 rtx
2137 simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
2138 enum rtx_code code;
2139 enum machine_mode mode, op0_mode;
2140 rtx op0, op1, op2;
2141 {
2142 unsigned int width = GET_MODE_BITSIZE (mode);
2143
2144 /* VOIDmode means "infinite" precision. */
2145 if (width == 0)
2146 width = HOST_BITS_PER_WIDE_INT;
2147
2148 switch (code)
2149 {
2150 case SIGN_EXTRACT:
2151 case ZERO_EXTRACT:
2152 if (GET_CODE (op0) == CONST_INT
2153 && GET_CODE (op1) == CONST_INT
2154 && GET_CODE (op2) == CONST_INT
2155 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2156 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2157 {
2158 /* Extracting a bit-field from a constant */
2159 HOST_WIDE_INT val = INTVAL (op0);
2160
2161 if (BITS_BIG_ENDIAN)
2162 val >>= (GET_MODE_BITSIZE (op0_mode)
2163 - INTVAL (op2) - INTVAL (op1));
2164 else
2165 val >>= INTVAL (op2);
2166
2167 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2168 {
2169 /* First zero-extend. */
2170 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2171 /* If desired, propagate sign bit. */
2172 if (code == SIGN_EXTRACT
2173 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2174 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2175 }
2176
2177 /* Clear the bits that don't belong in our mode,
2178 unless they and our sign bit are all one.
2179 So we get either a reasonable negative value or a reasonable
2180 unsigned value for this mode. */
2181 if (width < HOST_BITS_PER_WIDE_INT
2182 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2183 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2184 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2185
2186 return GEN_INT (val);
2187 }
2188 break;
2189
2190 case IF_THEN_ELSE:
2191 if (GET_CODE (op0) == CONST_INT)
2192 return op0 != const0_rtx ? op1 : op2;
2193
2194 /* Convert a == b ? b : a to "a". */
2195 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
2196 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
2197 && rtx_equal_p (XEXP (op0, 0), op1)
2198 && rtx_equal_p (XEXP (op0, 1), op2))
2199 return op1;
2200 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
2201 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
2202 && rtx_equal_p (XEXP (op0, 1), op1)
2203 && rtx_equal_p (XEXP (op0, 0), op2))
2204 return op2;
2205 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2206 {
2207 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2208 ? GET_MODE (XEXP (op0, 1))
2209 : GET_MODE (XEXP (op0, 0)));
2210 rtx temp;
2211 if (cmp_mode == VOIDmode)
2212 cmp_mode = op0_mode;
2213 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2214 XEXP (op0, 0), XEXP (op0, 1));
2215
2216 /* See if any simplifications were possible. */
2217 if (temp == const0_rtx)
2218 return op2;
2219 else if (temp == const1_rtx)
2220 return op1;
2221 else if (temp)
2222 op0 = temp;
2223
2224 /* Look for happy constants in op1 and op2. */
2225 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2226 {
2227 HOST_WIDE_INT t = INTVAL (op1);
2228 HOST_WIDE_INT f = INTVAL (op2);
2229
2230 if (t == STORE_FLAG_VALUE && f == 0)
2231 code = GET_CODE (op0);
2232 else if (t == 0 && f == STORE_FLAG_VALUE)
2233 {
2234 enum rtx_code tmp;
2235 tmp = reversed_comparison_code (op0, NULL_RTX);
2236 if (tmp == UNKNOWN)
2237 break;
2238 code = tmp;
2239 }
2240 else
2241 break;
2242
2243 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2244 }
2245 }
2246 break;
2247
2248 default:
2249 abort ();
2250 }
2251
2252 return 0;
2253 }
2254
2255 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2256 Return 0 if no simplifications is possible. */
2257 rtx
2258 simplify_subreg (outermode, op, innermode, byte)
2259 rtx op;
2260 unsigned int byte;
2261 enum machine_mode outermode, innermode;
2262 {
2263 /* Little bit of sanity checking. */
2264 if (innermode == VOIDmode || outermode == VOIDmode
2265 || innermode == BLKmode || outermode == BLKmode)
2266 abort ();
2267
2268 if (GET_MODE (op) != innermode
2269 && GET_MODE (op) != VOIDmode)
2270 abort ();
2271
2272 if (byte % GET_MODE_SIZE (outermode)
2273 || byte >= GET_MODE_SIZE (innermode))
2274 abort ();
2275
2276 if (outermode == innermode && !byte)
2277 return op;
2278
2279 /* Simplify subregs of vector constants. */
2280 if (GET_CODE (op) == CONST_VECTOR)
2281 {
2282 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (innermode));
2283 int offset = byte / elt_size;
2284 rtx elt;
2285
2286 if (GET_MODE_INNER (innermode) == outermode)
2287 {
2288 elt = CONST_VECTOR_ELT (op, offset);
2289
2290 /* ?? We probably don't need this copy_rtx because constants
2291 can be shared. ?? */
2292
2293 return copy_rtx (elt);
2294 }
2295 else if (GET_MODE_INNER (innermode) == GET_MODE_INNER (outermode)
2296 && GET_MODE_SIZE (innermode) > GET_MODE_SIZE (outermode))
2297 {
2298 return (gen_rtx_CONST_VECTOR
2299 (outermode,
2300 gen_rtvec_v (GET_MODE_NUNITS (outermode),
2301 &CONST_VECTOR_ELT (op, offset))));
2302 }
2303 else if (GET_MODE_CLASS (outermode) == MODE_INT
2304 && (GET_MODE_SIZE (outermode) % elt_size == 0))
2305 {
2306 /* This happens when the target register size is smaller then
2307 the vector mode, and we synthesize operations with vectors
2308 of elements that are smaller than the register size. */
2309 HOST_WIDE_INT sum = 0, high = 0;
2310 unsigned n_elts = (GET_MODE_SIZE (outermode) / elt_size);
2311 unsigned i = BYTES_BIG_ENDIAN ? offset : offset + n_elts - 1;
2312 unsigned step = BYTES_BIG_ENDIAN ? 1 : -1;
2313 int shift = BITS_PER_UNIT * elt_size;
2314
2315 for (; n_elts--; i += step)
2316 {
2317 elt = CONST_VECTOR_ELT (op, i);
2318 if (GET_CODE (elt) == CONST_DOUBLE
2319 && GET_MODE_CLASS (GET_MODE (elt)) == MODE_FLOAT)
2320 {
2321 elt = gen_lowpart_common (int_mode_for_mode (GET_MODE (elt)),
2322 elt);
2323 if (! elt)
2324 return NULL_RTX;
2325 }
2326 if (GET_CODE (elt) != CONST_INT)
2327 return NULL_RTX;
2328 high = high << shift | sum >> (HOST_BITS_PER_WIDE_INT - shift);
2329 sum = (sum << shift) + INTVAL (elt);
2330 }
2331 if (GET_MODE_BITSIZE (outermode) <= HOST_BITS_PER_WIDE_INT)
2332 return GEN_INT (trunc_int_for_mode (sum, outermode));
2333 else if (GET_MODE_BITSIZE (outermode) == 2* HOST_BITS_PER_WIDE_INT)
2334 return immed_double_const (high, sum, outermode);
2335 else
2336 return NULL_RTX;
2337 }
2338 else if (GET_MODE_CLASS (outermode) == MODE_INT
2339 && (elt_size % GET_MODE_SIZE (outermode) == 0))
2340 {
2341 enum machine_mode new_mode
2342 = int_mode_for_mode (GET_MODE_INNER (innermode));
2343 int subbyte = byte % elt_size;
2344
2345 op = simplify_subreg (new_mode, op, innermode, byte - subbyte);
2346 if (! op)
2347 return NULL_RTX;
2348 return simplify_subreg (outermode, op, new_mode, subbyte);
2349 }
2350 else if (GET_MODE_CLASS (outermode) != MODE_VECTOR_INT
2351 && GET_MODE_CLASS (outermode) != MODE_VECTOR_FLOAT)
2352 /* This shouldn't happen, but let's not do anything stupid. */
2353 return NULL_RTX;
2354 }
2355
2356 /* Attempt to simplify constant to non-SUBREG expression. */
2357 if (CONSTANT_P (op))
2358 {
2359 int offset, part;
2360 unsigned HOST_WIDE_INT val = 0;
2361
2362 if (GET_MODE_CLASS (outermode) == MODE_VECTOR_INT
2363 || GET_MODE_CLASS (outermode) == MODE_VECTOR_FLOAT)
2364 {
2365 /* Construct a CONST_VECTOR from individual subregs. */
2366 enum machine_mode submode = GET_MODE_INNER (outermode);
2367 int subsize = GET_MODE_UNIT_SIZE (outermode);
2368 int i, elts = GET_MODE_NUNITS (outermode);
2369 rtvec v = rtvec_alloc (elts);
2370
2371 for (i = 0; i < elts; i++, byte += subsize)
2372 {
2373 RTVEC_ELT (v, i) = simplify_subreg (submode, op, innermode, byte);
2374 }
2375 return gen_rtx_CONST_VECTOR (outermode, v);
2376 }
2377
2378 /* ??? This code is partly redundant with code below, but can handle
2379 the subregs of floats and similar corner cases.
2380 Later it we should move all simplification code here and rewrite
2381 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2382 using SIMPLIFY_SUBREG. */
2383 if (subreg_lowpart_offset (outermode, innermode) == byte)
2384 {
2385 rtx new = gen_lowpart_if_possible (outermode, op);
2386 if (new)
2387 return new;
2388 }
2389
2390 /* Similar comment as above apply here. */
2391 if (GET_MODE_SIZE (outermode) == UNITS_PER_WORD
2392 && GET_MODE_SIZE (innermode) > UNITS_PER_WORD
2393 && GET_MODE_CLASS (outermode) == MODE_INT)
2394 {
2395 rtx new = constant_subword (op,
2396 (byte / UNITS_PER_WORD),
2397 innermode);
2398 if (new)
2399 return new;
2400 }
2401
2402 offset = byte * BITS_PER_UNIT;
2403 switch (GET_CODE (op))
2404 {
2405 case CONST_DOUBLE:
2406 if (GET_MODE (op) != VOIDmode)
2407 break;
2408
2409 /* We can't handle this case yet. */
2410 if (GET_MODE_BITSIZE (outermode) >= HOST_BITS_PER_WIDE_INT)
2411 return NULL_RTX;
2412
2413 part = offset >= HOST_BITS_PER_WIDE_INT;
2414 if ((BITS_PER_WORD > HOST_BITS_PER_WIDE_INT
2415 && BYTES_BIG_ENDIAN)
2416 || (BITS_PER_WORD <= HOST_BITS_PER_WIDE_INT
2417 && WORDS_BIG_ENDIAN))
2418 part = !part;
2419 val = part ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op);
2420 offset %= HOST_BITS_PER_WIDE_INT;
2421
2422 /* We've already picked the word we want from a double, so
2423 pretend this is actually an integer. */
2424 innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
2425
2426 /* FALLTHROUGH */
2427 case CONST_INT:
2428 if (GET_CODE (op) == CONST_INT)
2429 val = INTVAL (op);
2430
2431 /* We don't handle synthetizing of non-integral constants yet. */
2432 if (GET_MODE_CLASS (outermode) != MODE_INT)
2433 return NULL_RTX;
2434
2435 if (BYTES_BIG_ENDIAN || WORDS_BIG_ENDIAN)
2436 {
2437 if (WORDS_BIG_ENDIAN)
2438 offset = (GET_MODE_BITSIZE (innermode)
2439 - GET_MODE_BITSIZE (outermode) - offset);
2440 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN
2441 && GET_MODE_SIZE (outermode) < UNITS_PER_WORD)
2442 offset = (offset + BITS_PER_WORD - GET_MODE_BITSIZE (outermode)
2443 - 2 * (offset % BITS_PER_WORD));
2444 }
2445
2446 if (offset >= HOST_BITS_PER_WIDE_INT)
2447 return ((HOST_WIDE_INT) val < 0) ? constm1_rtx : const0_rtx;
2448 else
2449 {
2450 val >>= offset;
2451 if (GET_MODE_BITSIZE (outermode) < HOST_BITS_PER_WIDE_INT)
2452 val = trunc_int_for_mode (val, outermode);
2453 return GEN_INT (val);
2454 }
2455 default:
2456 break;
2457 }
2458 }
2459
2460 /* Changing mode twice with SUBREG => just change it once,
2461 or not at all if changing back op starting mode. */
2462 if (GET_CODE (op) == SUBREG)
2463 {
2464 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
2465 int final_offset = byte + SUBREG_BYTE (op);
2466 rtx new;
2467
2468 if (outermode == innermostmode
2469 && byte == 0 && SUBREG_BYTE (op) == 0)
2470 return SUBREG_REG (op);
2471
2472 /* The SUBREG_BYTE represents offset, as if the value were stored
2473 in memory. Irritating exception is paradoxical subreg, where
2474 we define SUBREG_BYTE to be 0. On big endian machines, this
2475 value should be negative. For a moment, undo this exception. */
2476 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
2477 {
2478 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
2479 if (WORDS_BIG_ENDIAN)
2480 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2481 if (BYTES_BIG_ENDIAN)
2482 final_offset += difference % UNITS_PER_WORD;
2483 }
2484 if (SUBREG_BYTE (op) == 0
2485 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
2486 {
2487 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
2488 if (WORDS_BIG_ENDIAN)
2489 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2490 if (BYTES_BIG_ENDIAN)
2491 final_offset += difference % UNITS_PER_WORD;
2492 }
2493
2494 /* See whether resulting subreg will be paradoxical. */
2495 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
2496 {
2497 /* In nonparadoxical subregs we can't handle negative offsets. */
2498 if (final_offset < 0)
2499 return NULL_RTX;
2500 /* Bail out in case resulting subreg would be incorrect. */
2501 if (final_offset % GET_MODE_SIZE (outermode)
2502 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
2503 return NULL_RTX;
2504 }
2505 else
2506 {
2507 int offset = 0;
2508 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
2509
2510 /* In paradoxical subreg, see if we are still looking on lower part.
2511 If so, our SUBREG_BYTE will be 0. */
2512 if (WORDS_BIG_ENDIAN)
2513 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2514 if (BYTES_BIG_ENDIAN)
2515 offset += difference % UNITS_PER_WORD;
2516 if (offset == final_offset)
2517 final_offset = 0;
2518 else
2519 return NULL_RTX;
2520 }
2521
2522 /* Recurse for futher possible simplifications. */
2523 new = simplify_subreg (outermode, SUBREG_REG (op),
2524 GET_MODE (SUBREG_REG (op)),
2525 final_offset);
2526 if (new)
2527 return new;
2528 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
2529 }
2530
2531 /* SUBREG of a hard register => just change the register number
2532 and/or mode. If the hard register is not valid in that mode,
2533 suppress this simplification. If the hard register is the stack,
2534 frame, or argument pointer, leave this as a SUBREG. */
2535
2536 if (REG_P (op)
2537 && (! REG_FUNCTION_VALUE_P (op)
2538 || ! rtx_equal_function_value_matters)
2539 #ifdef CLASS_CANNOT_CHANGE_MODE
2540 && ! (CLASS_CANNOT_CHANGE_MODE_P (outermode, innermode)
2541 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
2542 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT
2543 && (TEST_HARD_REG_BIT
2544 (reg_class_contents[(int) CLASS_CANNOT_CHANGE_MODE],
2545 REGNO (op))))
2546 #endif
2547 && REGNO (op) < FIRST_PSEUDO_REGISTER
2548 && ((reload_completed && !frame_pointer_needed)
2549 || (REGNO (op) != FRAME_POINTER_REGNUM
2550 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2551 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
2552 #endif
2553 ))
2554 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2555 && REGNO (op) != ARG_POINTER_REGNUM
2556 #endif
2557 && REGNO (op) != STACK_POINTER_REGNUM)
2558 {
2559 int final_regno = subreg_hard_regno (gen_rtx_SUBREG (outermode, op, byte),
2560 0);
2561
2562 /* ??? We do allow it if the current REG is not valid for
2563 its mode. This is a kludge to work around how float/complex
2564 arguments are passed on 32-bit Sparc and should be fixed. */
2565 if (HARD_REGNO_MODE_OK (final_regno, outermode)
2566 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
2567 {
2568 rtx x = gen_rtx_REG (outermode, final_regno);
2569
2570 /* Propagate original regno. We don't have any way to specify
2571 the offset inside orignal regno, so do so only for lowpart.
2572 The information is used only by alias analysis that can not
2573 grog partial register anyway. */
2574
2575 if (subreg_lowpart_offset (outermode, innermode) == byte)
2576 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
2577 return x;
2578 }
2579 }
2580
2581 /* If we have a SUBREG of a register that we are replacing and we are
2582 replacing it with a MEM, make a new MEM and try replacing the
2583 SUBREG with it. Don't do this if the MEM has a mode-dependent address
2584 or if we would be widening it. */
2585
2586 if (GET_CODE (op) == MEM
2587 && ! mode_dependent_address_p (XEXP (op, 0))
2588 /* Allow splitting of volatile memory references in case we don't
2589 have instruction to move the whole thing. */
2590 && (! MEM_VOLATILE_P (op)
2591 || ! have_insn_for (SET, innermode))
2592 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
2593 return adjust_address_nv (op, outermode, byte);
2594
2595 /* Handle complex values represented as CONCAT
2596 of real and imaginary part. */
2597 if (GET_CODE (op) == CONCAT)
2598 {
2599 int is_realpart = byte < GET_MODE_UNIT_SIZE (innermode);
2600 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
2601 unsigned int final_offset;
2602 rtx res;
2603
2604 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
2605 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
2606 if (res)
2607 return res;
2608 /* We can at least simplify it by referring directly to the relevant part. */
2609 return gen_rtx_SUBREG (outermode, part, final_offset);
2610 }
2611
2612 return NULL_RTX;
2613 }
2614 /* Make a SUBREG operation or equivalent if it folds. */
2615
2616 rtx
2617 simplify_gen_subreg (outermode, op, innermode, byte)
2618 rtx op;
2619 unsigned int byte;
2620 enum machine_mode outermode, innermode;
2621 {
2622 rtx new;
2623 /* Little bit of sanity checking. */
2624 if (innermode == VOIDmode || outermode == VOIDmode
2625 || innermode == BLKmode || outermode == BLKmode)
2626 abort ();
2627
2628 if (GET_MODE (op) != innermode
2629 && GET_MODE (op) != VOIDmode)
2630 abort ();
2631
2632 if (byte % GET_MODE_SIZE (outermode)
2633 || byte >= GET_MODE_SIZE (innermode))
2634 abort ();
2635
2636 if (GET_CODE (op) == QUEUED)
2637 return NULL_RTX;
2638
2639 new = simplify_subreg (outermode, op, innermode, byte);
2640 if (new)
2641 return new;
2642
2643 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
2644 return NULL_RTX;
2645
2646 return gen_rtx_SUBREG (outermode, op, byte);
2647 }
2648 /* Simplify X, an rtx expression.
2649
2650 Return the simplified expression or NULL if no simplifications
2651 were possible.
2652
2653 This is the preferred entry point into the simplification routines;
2654 however, we still allow passes to call the more specific routines.
2655
2656 Right now GCC has three (yes, three) major bodies of RTL simplficiation
2657 code that need to be unified.
2658
2659 1. fold_rtx in cse.c. This code uses various CSE specific
2660 information to aid in RTL simplification.
2661
2662 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
2663 it uses combine specific information to aid in RTL
2664 simplification.
2665
2666 3. The routines in this file.
2667
2668
2669 Long term we want to only have one body of simplification code; to
2670 get to that state I recommend the following steps:
2671
2672 1. Pour over fold_rtx & simplify_rtx and move any simplifications
2673 which are not pass dependent state into these routines.
2674
2675 2. As code is moved by #1, change fold_rtx & simplify_rtx to
2676 use this routine whenever possible.
2677
2678 3. Allow for pass dependent state to be provided to these
2679 routines and add simplifications based on the pass dependent
2680 state. Remove code from cse.c & combine.c that becomes
2681 redundant/dead.
2682
2683 It will take time, but ultimately the compiler will be easier to
2684 maintain and improve. It's totally silly that when we add a
2685 simplification that it needs to be added to 4 places (3 for RTL
2686 simplification and 1 for tree simplification. */
2687
2688 rtx
2689 simplify_rtx (x)
2690 rtx x;
2691 {
2692 enum rtx_code code = GET_CODE (x);
2693 enum machine_mode mode = GET_MODE (x);
2694
2695 switch (GET_RTX_CLASS (code))
2696 {
2697 case '1':
2698 return simplify_unary_operation (code, mode,
2699 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
2700 case 'c':
2701 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
2702 {
2703 rtx tem;
2704
2705 tem = XEXP (x, 0);
2706 XEXP (x, 0) = XEXP (x, 1);
2707 XEXP (x, 1) = tem;
2708 return simplify_binary_operation (code, mode,
2709 XEXP (x, 0), XEXP (x, 1));
2710 }
2711
2712 case '2':
2713 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
2714
2715 case '3':
2716 case 'b':
2717 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
2718 XEXP (x, 0), XEXP (x, 1),
2719 XEXP (x, 2));
2720
2721 case '<':
2722 return simplify_relational_operation (code,
2723 ((GET_MODE (XEXP (x, 0))
2724 != VOIDmode)
2725 ? GET_MODE (XEXP (x, 0))
2726 : GET_MODE (XEXP (x, 1))),
2727 XEXP (x, 0), XEXP (x, 1));
2728 case 'x':
2729 /* The only case we try to handle is a SUBREG. */
2730 if (code == SUBREG)
2731 return simplify_gen_subreg (mode, SUBREG_REG (x),
2732 GET_MODE (SUBREG_REG (x)),
2733 SUBREG_BYTE (x));
2734 return NULL;
2735 default:
2736 return NULL;
2737 }
2738 }
This page took 4.700482 seconds and 6 git commands to generate.