]> gcc.gnu.org Git - gcc.git/blob - gcc/simplify-rtx.c
cse.c (fold_rtx): Use simplify_subreg.
[gcc.git] / gcc / simplify-rtx.c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001 Free Software Foundation, Inc.
4
5 This file is part of GNU CC.
6
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
11
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include <setjmp.h>
26
27 #include "rtl.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "flags.h"
32 #include "real.h"
33 #include "insn-config.h"
34 #include "recog.h"
35 #include "function.h"
36 #include "expr.h"
37 #include "toplev.h"
38 #include "output.h"
39 #include "ggc.h"
40
41 /* Simplification and canonicalization of RTL. */
42
43 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
44 virtual regs here because the simplify_*_operation routines are called
45 by integrate.c, which is called before virtual register instantiation.
46
47 ?!? FIXED_BASE_PLUS_P and NONZERO_BASE_PLUS_P need to move into
48 a header file so that their definitions can be shared with the
49 simplification routines in simplify-rtx.c. Until then, do not
50 change these macros without also changing the copy in simplify-rtx.c. */
51
52 #define FIXED_BASE_PLUS_P(X) \
53 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
54 || ((X) == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])\
55 || (X) == virtual_stack_vars_rtx \
56 || (X) == virtual_incoming_args_rtx \
57 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
58 && (XEXP (X, 0) == frame_pointer_rtx \
59 || XEXP (X, 0) == hard_frame_pointer_rtx \
60 || ((X) == arg_pointer_rtx \
61 && fixed_regs[ARG_POINTER_REGNUM]) \
62 || XEXP (X, 0) == virtual_stack_vars_rtx \
63 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
64 || GET_CODE (X) == ADDRESSOF)
65
66 /* Similar, but also allows reference to the stack pointer.
67
68 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
69 arg_pointer_rtx by itself is nonzero, because on at least one machine,
70 the i960, the arg pointer is zero when it is unused. */
71
72 #define NONZERO_BASE_PLUS_P(X) \
73 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
74 || (X) == virtual_stack_vars_rtx \
75 || (X) == virtual_incoming_args_rtx \
76 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
77 && (XEXP (X, 0) == frame_pointer_rtx \
78 || XEXP (X, 0) == hard_frame_pointer_rtx \
79 || ((X) == arg_pointer_rtx \
80 && fixed_regs[ARG_POINTER_REGNUM]) \
81 || XEXP (X, 0) == virtual_stack_vars_rtx \
82 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
83 || (X) == stack_pointer_rtx \
84 || (X) == virtual_stack_dynamic_rtx \
85 || (X) == virtual_outgoing_args_rtx \
86 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
87 && (XEXP (X, 0) == stack_pointer_rtx \
88 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
89 || XEXP (X, 0) == virtual_outgoing_args_rtx)) \
90 || GET_CODE (X) == ADDRESSOF)
91
92 /* Much code operates on (low, high) pairs; the low value is an
93 unsigned wide int, the high value a signed wide int. We
94 occasionally need to sign extend from low to high as if low were a
95 signed wide int. */
96 #define HWI_SIGN_EXTEND(low) \
97 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
98
99 static rtx simplify_plus_minus PARAMS ((enum rtx_code,
100 enum machine_mode, rtx, rtx));
101 static void check_fold_consts PARAMS ((PTR));
102 \f
103 /* Make a binary operation by properly ordering the operands and
104 seeing if the expression folds. */
105
106 rtx
107 simplify_gen_binary (code, mode, op0, op1)
108 enum rtx_code code;
109 enum machine_mode mode;
110 rtx op0, op1;
111 {
112 rtx tem;
113
114 /* Put complex operands first and constants second if commutative. */
115 if (GET_RTX_CLASS (code) == 'c'
116 && ((CONSTANT_P (op0) && GET_CODE (op1) != CONST_INT)
117 || (GET_RTX_CLASS (GET_CODE (op0)) == 'o'
118 && GET_RTX_CLASS (GET_CODE (op1)) != 'o')
119 || (GET_CODE (op0) == SUBREG
120 && GET_RTX_CLASS (GET_CODE (SUBREG_REG (op0))) == 'o'
121 && GET_RTX_CLASS (GET_CODE (op1)) != 'o')))
122 tem = op0, op0 = op1, op1 = tem;
123
124 /* If this simplifies, do it. */
125 tem = simplify_binary_operation (code, mode, op0, op1);
126
127 if (tem)
128 return tem;
129
130 /* Handle addition and subtraction of CONST_INT specially. Otherwise,
131 just form the operation. */
132
133 if (code == PLUS && GET_CODE (op1) == CONST_INT
134 && GET_MODE (op0) != VOIDmode)
135 return plus_constant (op0, INTVAL (op1));
136 else if (code == MINUS && GET_CODE (op1) == CONST_INT
137 && GET_MODE (op0) != VOIDmode)
138 return plus_constant (op0, - INTVAL (op1));
139 else
140 return gen_rtx_fmt_ee (code, mode, op0, op1);
141 }
142 \f
143 /* Make a unary operation by first seeing if it folds and otherwise making
144 the specified operation. */
145
146 rtx
147 simplify_gen_unary (code, mode, op, op_mode)
148 enum rtx_code code;
149 enum machine_mode mode;
150 rtx op;
151 enum machine_mode op_mode;
152 {
153 rtx tem;
154
155 /* If this simplifies, use it. */
156 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
157 return tem;
158
159 return gen_rtx_fmt_e (code, mode, op);
160 }
161
162 /* Likewise for ternary operations. */
163
164 rtx
165 simplify_gen_ternary (code, mode, op0_mode, op0, op1, op2)
166 enum rtx_code code;
167 enum machine_mode mode, op0_mode;
168 rtx op0, op1, op2;
169 {
170 rtx tem;
171
172 /* If this simplifies, use it. */
173 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
174 op0, op1, op2)))
175 return tem;
176
177 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
178 }
179 \f
180 /* Likewise, for relational operations.
181 CMP_MODE specifies mode comparison is done in.
182 */
183
184 rtx
185 simplify_gen_relational (code, mode, cmp_mode, op0, op1)
186 enum rtx_code code;
187 enum machine_mode mode;
188 enum machine_mode cmp_mode;
189 rtx op0, op1;
190 {
191 rtx tem;
192
193 if ((tem = simplify_relational_operation (code, cmp_mode, op0, op1)) != 0)
194 return tem;
195
196 /* Put complex operands first and constants second. */
197 if ((CONSTANT_P (op0) && GET_CODE (op1) != CONST_INT)
198 || (GET_RTX_CLASS (GET_CODE (op0)) == 'o'
199 && GET_RTX_CLASS (GET_CODE (op1)) != 'o')
200 || (GET_CODE (op0) == SUBREG
201 && GET_RTX_CLASS (GET_CODE (SUBREG_REG (op0))) == 'o'
202 && GET_RTX_CLASS (GET_CODE (op1)) != 'o'))
203 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
204
205 return gen_rtx_fmt_ee (code, mode, op0, op1);
206 }
207 \f
208 /* Replace all occurrences of OLD in X with NEW and try to simplify the
209 resulting RTX. Return a new RTX which is as simplified as possible. */
210
211 rtx
212 simplify_replace_rtx (x, old, new)
213 rtx x;
214 rtx old;
215 rtx new;
216 {
217 enum rtx_code code = GET_CODE (x);
218 enum machine_mode mode = GET_MODE (x);
219
220 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
221 to build a new expression substituting recursively. If we can't do
222 anything, return our input. */
223
224 if (x == old)
225 return new;
226
227 switch (GET_RTX_CLASS (code))
228 {
229 case '1':
230 {
231 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
232 rtx op = (XEXP (x, 0) == old
233 ? new : simplify_replace_rtx (XEXP (x, 0), old, new));
234
235 return simplify_gen_unary (code, mode, op, op_mode);
236 }
237
238 case '2':
239 case 'c':
240 return
241 simplify_gen_binary (code, mode,
242 simplify_replace_rtx (XEXP (x, 0), old, new),
243 simplify_replace_rtx (XEXP (x, 1), old, new));
244 case '<':
245 return
246 simplify_gen_relational (code, mode,
247 (GET_MODE (XEXP (x, 0)) != VOIDmode
248 ? GET_MODE (XEXP (x, 0))
249 : GET_MODE (XEXP (x, 1))),
250 simplify_replace_rtx (XEXP (x, 0), old, new),
251 simplify_replace_rtx (XEXP (x, 1), old, new));
252
253 case '3':
254 case 'b':
255 return
256 simplify_gen_ternary (code, mode, GET_MODE (XEXP (x, 0)),
257 simplify_replace_rtx (XEXP (x, 0), old, new),
258 simplify_replace_rtx (XEXP (x, 1), old, new),
259 simplify_replace_rtx (XEXP (x, 2), old, new));
260
261 case 'x':
262 /* The only case we try to handle is a SUBREG. */
263 if (code == SUBREG)
264 {
265 rtx exp;
266 exp = simplify_gen_subreg (GET_MODE (x),
267 simplify_replace_rtx (SUBREG_REG (x),
268 old, new),
269 GET_MODE (SUBREG_REG (x)),
270 SUBREG_BYTE (x));
271 if (exp)
272 x = exp;
273 }
274 return x;
275
276 default:
277 if (GET_CODE (x) == MEM)
278 {
279 /* We can't use change_address here, since it verifies memory address
280 for corectness. We don't want such check, since we may handle
281 addresses previously incorect (such as ones in push instructions)
282 and it is caller's work to verify whether resulting insn match. */
283 rtx addr = simplify_replace_rtx (XEXP (x, 0), old, new);
284 rtx mem;
285 if (XEXP (x, 0) != addr)
286 {
287 mem = gen_rtx_MEM (GET_MODE (x), addr);
288 MEM_COPY_ATTRIBUTES (mem, x);
289 }
290 else
291 mem = x;
292 return mem;
293 }
294
295 return x;
296 }
297 return x;
298 }
299 \f
300 /* Try to simplify a unary operation CODE whose output mode is to be
301 MODE with input operand OP whose mode was originally OP_MODE.
302 Return zero if no simplification can be made. */
303
304 rtx
305 simplify_unary_operation (code, mode, op, op_mode)
306 enum rtx_code code;
307 enum machine_mode mode;
308 rtx op;
309 enum machine_mode op_mode;
310 {
311 unsigned int width = GET_MODE_BITSIZE (mode);
312
313 /* The order of these tests is critical so that, for example, we don't
314 check the wrong mode (input vs. output) for a conversion operation,
315 such as FIX. At some point, this should be simplified. */
316
317 #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC)
318
319 if (code == FLOAT && GET_MODE (op) == VOIDmode
320 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
321 {
322 HOST_WIDE_INT hv, lv;
323 REAL_VALUE_TYPE d;
324
325 if (GET_CODE (op) == CONST_INT)
326 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
327 else
328 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
329
330 #ifdef REAL_ARITHMETIC
331 REAL_VALUE_FROM_INT (d, lv, hv, mode);
332 #else
333 if (hv < 0)
334 {
335 d = (double) (~ hv);
336 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
337 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
338 d += (double) (unsigned HOST_WIDE_INT) (~ lv);
339 d = (- d - 1.0);
340 }
341 else
342 {
343 d = (double) hv;
344 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
345 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
346 d += (double) (unsigned HOST_WIDE_INT) lv;
347 }
348 #endif /* REAL_ARITHMETIC */
349 d = real_value_truncate (mode, d);
350 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
351 }
352 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
353 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
354 {
355 HOST_WIDE_INT hv, lv;
356 REAL_VALUE_TYPE d;
357
358 if (GET_CODE (op) == CONST_INT)
359 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
360 else
361 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
362
363 if (op_mode == VOIDmode)
364 {
365 /* We don't know how to interpret negative-looking numbers in
366 this case, so don't try to fold those. */
367 if (hv < 0)
368 return 0;
369 }
370 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
371 ;
372 else
373 hv = 0, lv &= GET_MODE_MASK (op_mode);
374
375 #ifdef REAL_ARITHMETIC
376 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
377 #else
378
379 d = (double) (unsigned HOST_WIDE_INT) hv;
380 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
381 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
382 d += (double) (unsigned HOST_WIDE_INT) lv;
383 #endif /* REAL_ARITHMETIC */
384 d = real_value_truncate (mode, d);
385 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
386 }
387 #endif
388
389 if (GET_CODE (op) == CONST_INT
390 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
391 {
392 register HOST_WIDE_INT arg0 = INTVAL (op);
393 register HOST_WIDE_INT val;
394
395 switch (code)
396 {
397 case NOT:
398 val = ~ arg0;
399 break;
400
401 case NEG:
402 val = - arg0;
403 break;
404
405 case ABS:
406 val = (arg0 >= 0 ? arg0 : - arg0);
407 break;
408
409 case FFS:
410 /* Don't use ffs here. Instead, get low order bit and then its
411 number. If arg0 is zero, this will return 0, as desired. */
412 arg0 &= GET_MODE_MASK (mode);
413 val = exact_log2 (arg0 & (- arg0)) + 1;
414 break;
415
416 case TRUNCATE:
417 val = arg0;
418 break;
419
420 case ZERO_EXTEND:
421 if (op_mode == VOIDmode)
422 op_mode = mode;
423 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
424 {
425 /* If we were really extending the mode,
426 we would have to distinguish between zero-extension
427 and sign-extension. */
428 if (width != GET_MODE_BITSIZE (op_mode))
429 abort ();
430 val = arg0;
431 }
432 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
433 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
434 else
435 return 0;
436 break;
437
438 case SIGN_EXTEND:
439 if (op_mode == VOIDmode)
440 op_mode = mode;
441 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
442 {
443 /* If we were really extending the mode,
444 we would have to distinguish between zero-extension
445 and sign-extension. */
446 if (width != GET_MODE_BITSIZE (op_mode))
447 abort ();
448 val = arg0;
449 }
450 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
451 {
452 val
453 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
454 if (val
455 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
456 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
457 }
458 else
459 return 0;
460 break;
461
462 case SQRT:
463 case FLOAT_EXTEND:
464 case FLOAT_TRUNCATE:
465 return 0;
466
467 default:
468 abort ();
469 }
470
471 val = trunc_int_for_mode (val, mode);
472
473 return GEN_INT (val);
474 }
475
476 /* We can do some operations on integer CONST_DOUBLEs. Also allow
477 for a DImode operation on a CONST_INT. */
478 else if (GET_MODE (op) == VOIDmode && width <= HOST_BITS_PER_INT * 2
479 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
480 {
481 unsigned HOST_WIDE_INT l1, lv;
482 HOST_WIDE_INT h1, hv;
483
484 if (GET_CODE (op) == CONST_DOUBLE)
485 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
486 else
487 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
488
489 switch (code)
490 {
491 case NOT:
492 lv = ~ l1;
493 hv = ~ h1;
494 break;
495
496 case NEG:
497 neg_double (l1, h1, &lv, &hv);
498 break;
499
500 case ABS:
501 if (h1 < 0)
502 neg_double (l1, h1, &lv, &hv);
503 else
504 lv = l1, hv = h1;
505 break;
506
507 case FFS:
508 hv = 0;
509 if (l1 == 0)
510 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & (-h1)) + 1;
511 else
512 lv = exact_log2 (l1 & (-l1)) + 1;
513 break;
514
515 case TRUNCATE:
516 /* This is just a change-of-mode, so do nothing. */
517 lv = l1, hv = h1;
518 break;
519
520 case ZERO_EXTEND:
521 if (op_mode == VOIDmode
522 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
523 return 0;
524
525 hv = 0;
526 lv = l1 & GET_MODE_MASK (op_mode);
527 break;
528
529 case SIGN_EXTEND:
530 if (op_mode == VOIDmode
531 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
532 return 0;
533 else
534 {
535 lv = l1 & GET_MODE_MASK (op_mode);
536 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
537 && (lv & ((HOST_WIDE_INT) 1
538 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
539 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
540
541 hv = HWI_SIGN_EXTEND (lv);
542 }
543 break;
544
545 case SQRT:
546 return 0;
547
548 default:
549 return 0;
550 }
551
552 return immed_double_const (lv, hv, mode);
553 }
554
555 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
556 else if (GET_CODE (op) == CONST_DOUBLE
557 && GET_MODE_CLASS (mode) == MODE_FLOAT)
558 {
559 REAL_VALUE_TYPE d;
560 jmp_buf handler;
561 rtx x;
562
563 if (setjmp (handler))
564 /* There used to be a warning here, but that is inadvisable.
565 People may want to cause traps, and the natural way
566 to do it should not get a warning. */
567 return 0;
568
569 set_float_handler (handler);
570
571 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
572
573 switch (code)
574 {
575 case NEG:
576 d = REAL_VALUE_NEGATE (d);
577 break;
578
579 case ABS:
580 if (REAL_VALUE_NEGATIVE (d))
581 d = REAL_VALUE_NEGATE (d);
582 break;
583
584 case FLOAT_TRUNCATE:
585 d = real_value_truncate (mode, d);
586 break;
587
588 case FLOAT_EXTEND:
589 /* All this does is change the mode. */
590 break;
591
592 case FIX:
593 d = REAL_VALUE_RNDZINT (d);
594 break;
595
596 case UNSIGNED_FIX:
597 d = REAL_VALUE_UNSIGNED_RNDZINT (d);
598 break;
599
600 case SQRT:
601 return 0;
602
603 default:
604 abort ();
605 }
606
607 x = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
608 set_float_handler (NULL);
609 return x;
610 }
611
612 else if (GET_CODE (op) == CONST_DOUBLE
613 && GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT
614 && GET_MODE_CLASS (mode) == MODE_INT
615 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
616 {
617 REAL_VALUE_TYPE d;
618 jmp_buf handler;
619 HOST_WIDE_INT val;
620
621 if (setjmp (handler))
622 return 0;
623
624 set_float_handler (handler);
625
626 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
627
628 switch (code)
629 {
630 case FIX:
631 val = REAL_VALUE_FIX (d);
632 break;
633
634 case UNSIGNED_FIX:
635 val = REAL_VALUE_UNSIGNED_FIX (d);
636 break;
637
638 default:
639 abort ();
640 }
641
642 set_float_handler (NULL);
643
644 val = trunc_int_for_mode (val, mode);
645
646 return GEN_INT (val);
647 }
648 #endif
649 /* This was formerly used only for non-IEEE float.
650 eggert@twinsun.com says it is safe for IEEE also. */
651 else
652 {
653 enum rtx_code reversed;
654 /* There are some simplifications we can do even if the operands
655 aren't constant. */
656 switch (code)
657 {
658 case NOT:
659 /* (not (not X)) == X. */
660 if (GET_CODE (op) == NOT)
661 return XEXP (op, 0);
662
663 /* (not (eq X Y)) == (ne X Y), etc. */
664 if (mode == BImode && GET_RTX_CLASS (GET_CODE (op)) == '<'
665 && ((reversed = reversed_comparison_code (op, NULL_RTX))
666 != UNKNOWN))
667 return gen_rtx_fmt_ee (reversed,
668 op_mode, XEXP (op, 0), XEXP (op, 1));
669 break;
670
671 case NEG:
672 /* (neg (neg X)) == X. */
673 if (GET_CODE (op) == NEG)
674 return XEXP (op, 0);
675 break;
676
677 case SIGN_EXTEND:
678 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
679 becomes just the MINUS if its mode is MODE. This allows
680 folding switch statements on machines using casesi (such as
681 the Vax). */
682 if (GET_CODE (op) == TRUNCATE
683 && GET_MODE (XEXP (op, 0)) == mode
684 && GET_CODE (XEXP (op, 0)) == MINUS
685 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
686 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
687 return XEXP (op, 0);
688
689 #ifdef POINTERS_EXTEND_UNSIGNED
690 if (! POINTERS_EXTEND_UNSIGNED
691 && mode == Pmode && GET_MODE (op) == ptr_mode
692 && (CONSTANT_P (op)
693 || (GET_CODE (op) == SUBREG
694 && GET_CODE (SUBREG_REG (op)) == REG
695 && REG_POINTER (SUBREG_REG (op))
696 && GET_MODE (SUBREG_REG (op)) == Pmode)))
697 return convert_memory_address (Pmode, op);
698 #endif
699 break;
700
701 #ifdef POINTERS_EXTEND_UNSIGNED
702 case ZERO_EXTEND:
703 if (POINTERS_EXTEND_UNSIGNED
704 && mode == Pmode && GET_MODE (op) == ptr_mode
705 && (CONSTANT_P (op)
706 || (GET_CODE (op) == SUBREG
707 && GET_CODE (SUBREG_REG (op)) == REG
708 && REG_POINTER (SUBREG_REG (op))
709 && GET_MODE (SUBREG_REG (op)) == Pmode)))
710 return convert_memory_address (Pmode, op);
711 break;
712 #endif
713
714 default:
715 break;
716 }
717
718 return 0;
719 }
720 }
721 \f
722 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
723 and OP1. Return 0 if no simplification is possible.
724
725 Don't use this for relational operations such as EQ or LT.
726 Use simplify_relational_operation instead. */
727
728 rtx
729 simplify_binary_operation (code, mode, op0, op1)
730 enum rtx_code code;
731 enum machine_mode mode;
732 rtx op0, op1;
733 {
734 register HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
735 HOST_WIDE_INT val;
736 unsigned int width = GET_MODE_BITSIZE (mode);
737 rtx tem;
738
739 /* Relational operations don't work here. We must know the mode
740 of the operands in order to do the comparison correctly.
741 Assuming a full word can give incorrect results.
742 Consider comparing 128 with -128 in QImode. */
743
744 if (GET_RTX_CLASS (code) == '<')
745 abort ();
746
747 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
748 if (GET_MODE_CLASS (mode) == MODE_FLOAT
749 && GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
750 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
751 {
752 REAL_VALUE_TYPE f0, f1, value;
753 jmp_buf handler;
754
755 if (setjmp (handler))
756 return 0;
757
758 set_float_handler (handler);
759
760 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
761 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
762 f0 = real_value_truncate (mode, f0);
763 f1 = real_value_truncate (mode, f1);
764
765 #ifdef REAL_ARITHMETIC
766 #ifndef REAL_INFINITY
767 if (code == DIV && REAL_VALUES_EQUAL (f1, dconst0))
768 return 0;
769 #endif
770 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
771 #else
772 switch (code)
773 {
774 case PLUS:
775 value = f0 + f1;
776 break;
777 case MINUS:
778 value = f0 - f1;
779 break;
780 case MULT:
781 value = f0 * f1;
782 break;
783 case DIV:
784 #ifndef REAL_INFINITY
785 if (f1 == 0)
786 return 0;
787 #endif
788 value = f0 / f1;
789 break;
790 case SMIN:
791 value = MIN (f0, f1);
792 break;
793 case SMAX:
794 value = MAX (f0, f1);
795 break;
796 default:
797 abort ();
798 }
799 #endif
800
801 value = real_value_truncate (mode, value);
802 set_float_handler (NULL);
803 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
804 }
805 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
806
807 /* We can fold some multi-word operations. */
808 if (GET_MODE_CLASS (mode) == MODE_INT
809 && width == HOST_BITS_PER_WIDE_INT * 2
810 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
811 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
812 {
813 unsigned HOST_WIDE_INT l1, l2, lv;
814 HOST_WIDE_INT h1, h2, hv;
815
816 if (GET_CODE (op0) == CONST_DOUBLE)
817 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
818 else
819 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
820
821 if (GET_CODE (op1) == CONST_DOUBLE)
822 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
823 else
824 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
825
826 switch (code)
827 {
828 case MINUS:
829 /* A - B == A + (-B). */
830 neg_double (l2, h2, &lv, &hv);
831 l2 = lv, h2 = hv;
832
833 /* .. fall through ... */
834
835 case PLUS:
836 add_double (l1, h1, l2, h2, &lv, &hv);
837 break;
838
839 case MULT:
840 mul_double (l1, h1, l2, h2, &lv, &hv);
841 break;
842
843 case DIV: case MOD: case UDIV: case UMOD:
844 /* We'd need to include tree.h to do this and it doesn't seem worth
845 it. */
846 return 0;
847
848 case AND:
849 lv = l1 & l2, hv = h1 & h2;
850 break;
851
852 case IOR:
853 lv = l1 | l2, hv = h1 | h2;
854 break;
855
856 case XOR:
857 lv = l1 ^ l2, hv = h1 ^ h2;
858 break;
859
860 case SMIN:
861 if (h1 < h2
862 || (h1 == h2
863 && ((unsigned HOST_WIDE_INT) l1
864 < (unsigned HOST_WIDE_INT) l2)))
865 lv = l1, hv = h1;
866 else
867 lv = l2, hv = h2;
868 break;
869
870 case SMAX:
871 if (h1 > h2
872 || (h1 == h2
873 && ((unsigned HOST_WIDE_INT) l1
874 > (unsigned HOST_WIDE_INT) l2)))
875 lv = l1, hv = h1;
876 else
877 lv = l2, hv = h2;
878 break;
879
880 case UMIN:
881 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
882 || (h1 == h2
883 && ((unsigned HOST_WIDE_INT) l1
884 < (unsigned HOST_WIDE_INT) l2)))
885 lv = l1, hv = h1;
886 else
887 lv = l2, hv = h2;
888 break;
889
890 case UMAX:
891 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
892 || (h1 == h2
893 && ((unsigned HOST_WIDE_INT) l1
894 > (unsigned HOST_WIDE_INT) l2)))
895 lv = l1, hv = h1;
896 else
897 lv = l2, hv = h2;
898 break;
899
900 case LSHIFTRT: case ASHIFTRT:
901 case ASHIFT:
902 case ROTATE: case ROTATERT:
903 #ifdef SHIFT_COUNT_TRUNCATED
904 if (SHIFT_COUNT_TRUNCATED)
905 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
906 #endif
907
908 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
909 return 0;
910
911 if (code == LSHIFTRT || code == ASHIFTRT)
912 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
913 code == ASHIFTRT);
914 else if (code == ASHIFT)
915 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
916 else if (code == ROTATE)
917 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
918 else /* code == ROTATERT */
919 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
920 break;
921
922 default:
923 return 0;
924 }
925
926 return immed_double_const (lv, hv, mode);
927 }
928
929 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
930 || width > HOST_BITS_PER_WIDE_INT || width == 0)
931 {
932 /* Even if we can't compute a constant result,
933 there are some cases worth simplifying. */
934
935 switch (code)
936 {
937 case PLUS:
938 /* In IEEE floating point, x+0 is not the same as x. Similarly
939 for the other optimizations below. */
940 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
941 && FLOAT_MODE_P (mode) && ! flag_unsafe_math_optimizations)
942 break;
943
944 if (op1 == CONST0_RTX (mode))
945 return op0;
946
947 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)) */
948 if (GET_CODE (op0) == NEG)
949 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
950 else if (GET_CODE (op1) == NEG)
951 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
952
953 /* (~a) + 1 -> -a */
954 if (INTEGRAL_MODE_P (mode)
955 && GET_CODE (op0) == NOT
956 && GET_CODE (op1) == CONST_INT
957 && INTVAL (op1) == 1)
958 return gen_rtx_NEG (mode, XEXP (op0, 0));
959
960 /* Handle both-operands-constant cases. We can only add
961 CONST_INTs to constants since the sum of relocatable symbols
962 can't be handled by most assemblers. Don't add CONST_INT
963 to CONST_INT since overflow won't be computed properly if wider
964 than HOST_BITS_PER_WIDE_INT. */
965
966 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
967 && GET_CODE (op1) == CONST_INT)
968 return plus_constant (op0, INTVAL (op1));
969 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
970 && GET_CODE (op0) == CONST_INT)
971 return plus_constant (op1, INTVAL (op0));
972
973 /* See if this is something like X * C - X or vice versa or
974 if the multiplication is written as a shift. If so, we can
975 distribute and make a new multiply, shift, or maybe just
976 have X (if C is 2 in the example above). But don't make
977 real multiply if we didn't have one before. */
978
979 if (! FLOAT_MODE_P (mode))
980 {
981 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
982 rtx lhs = op0, rhs = op1;
983 int had_mult = 0;
984
985 if (GET_CODE (lhs) == NEG)
986 coeff0 = -1, lhs = XEXP (lhs, 0);
987 else if (GET_CODE (lhs) == MULT
988 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
989 {
990 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
991 had_mult = 1;
992 }
993 else if (GET_CODE (lhs) == ASHIFT
994 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
995 && INTVAL (XEXP (lhs, 1)) >= 0
996 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
997 {
998 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
999 lhs = XEXP (lhs, 0);
1000 }
1001
1002 if (GET_CODE (rhs) == NEG)
1003 coeff1 = -1, rhs = XEXP (rhs, 0);
1004 else if (GET_CODE (rhs) == MULT
1005 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1006 {
1007 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1008 had_mult = 1;
1009 }
1010 else if (GET_CODE (rhs) == ASHIFT
1011 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1012 && INTVAL (XEXP (rhs, 1)) >= 0
1013 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1014 {
1015 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1016 rhs = XEXP (rhs, 0);
1017 }
1018
1019 if (rtx_equal_p (lhs, rhs))
1020 {
1021 tem = simplify_gen_binary (MULT, mode, lhs,
1022 GEN_INT (coeff0 + coeff1));
1023 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1024 }
1025 }
1026
1027 /* If one of the operands is a PLUS or a MINUS, see if we can
1028 simplify this by the associative law.
1029 Don't use the associative law for floating point.
1030 The inaccuracy makes it nonassociative,
1031 and subtle programs can break if operations are associated. */
1032
1033 if (INTEGRAL_MODE_P (mode)
1034 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1035 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
1036 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1037 return tem;
1038 break;
1039
1040 case COMPARE:
1041 #ifdef HAVE_cc0
1042 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1043 using cc0, in which case we want to leave it as a COMPARE
1044 so we can distinguish it from a register-register-copy.
1045
1046 In IEEE floating point, x-0 is not the same as x. */
1047
1048 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1049 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1050 && op1 == CONST0_RTX (mode))
1051 return op0;
1052 #endif
1053
1054 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1055 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1056 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1057 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1058 {
1059 rtx xop00 = XEXP (op0, 0);
1060 rtx xop10 = XEXP (op1, 0);
1061
1062 #ifdef HAVE_cc0
1063 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1064 #else
1065 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1066 && GET_MODE (xop00) == GET_MODE (xop10)
1067 && REGNO (xop00) == REGNO (xop10)
1068 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1069 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1070 #endif
1071 return xop00;
1072 }
1073
1074 break;
1075 case MINUS:
1076 /* None of these optimizations can be done for IEEE
1077 floating point. */
1078 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
1079 && FLOAT_MODE_P (mode) && ! flag_unsafe_math_optimizations)
1080 break;
1081
1082 /* We can't assume x-x is 0 even with non-IEEE floating point,
1083 but since it is zero except in very strange circumstances, we
1084 will treat it as zero with -funsafe-math-optimizations. */
1085 if (rtx_equal_p (op0, op1)
1086 && ! side_effects_p (op0)
1087 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1088 return CONST0_RTX (mode);
1089
1090 /* Change subtraction from zero into negation. */
1091 if (op0 == CONST0_RTX (mode))
1092 return gen_rtx_NEG (mode, op1);
1093
1094 /* (-1 - a) is ~a. */
1095 if (op0 == constm1_rtx)
1096 return gen_rtx_NOT (mode, op1);
1097
1098 /* Subtracting 0 has no effect. */
1099 if (op1 == CONST0_RTX (mode))
1100 return op0;
1101
1102 /* See if this is something like X * C - X or vice versa or
1103 if the multiplication is written as a shift. If so, we can
1104 distribute and make a new multiply, shift, or maybe just
1105 have X (if C is 2 in the example above). But don't make
1106 real multiply if we didn't have one before. */
1107
1108 if (! FLOAT_MODE_P (mode))
1109 {
1110 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1111 rtx lhs = op0, rhs = op1;
1112 int had_mult = 0;
1113
1114 if (GET_CODE (lhs) == NEG)
1115 coeff0 = -1, lhs = XEXP (lhs, 0);
1116 else if (GET_CODE (lhs) == MULT
1117 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1118 {
1119 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1120 had_mult = 1;
1121 }
1122 else if (GET_CODE (lhs) == ASHIFT
1123 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1124 && INTVAL (XEXP (lhs, 1)) >= 0
1125 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1126 {
1127 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1128 lhs = XEXP (lhs, 0);
1129 }
1130
1131 if (GET_CODE (rhs) == NEG)
1132 coeff1 = - 1, rhs = XEXP (rhs, 0);
1133 else if (GET_CODE (rhs) == MULT
1134 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1135 {
1136 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1137 had_mult = 1;
1138 }
1139 else if (GET_CODE (rhs) == ASHIFT
1140 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1141 && INTVAL (XEXP (rhs, 1)) >= 0
1142 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1143 {
1144 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1145 rhs = XEXP (rhs, 0);
1146 }
1147
1148 if (rtx_equal_p (lhs, rhs))
1149 {
1150 tem = simplify_gen_binary (MULT, mode, lhs,
1151 GEN_INT (coeff0 - coeff1));
1152 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1153 }
1154 }
1155
1156 /* (a - (-b)) -> (a + b). */
1157 if (GET_CODE (op1) == NEG)
1158 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1159
1160 /* If one of the operands is a PLUS or a MINUS, see if we can
1161 simplify this by the associative law.
1162 Don't use the associative law for floating point.
1163 The inaccuracy makes it nonassociative,
1164 and subtle programs can break if operations are associated. */
1165
1166 if (INTEGRAL_MODE_P (mode)
1167 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1168 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
1169 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1170 return tem;
1171
1172 /* Don't let a relocatable value get a negative coeff. */
1173 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1174 return plus_constant (op0, - INTVAL (op1));
1175
1176 /* (x - (x & y)) -> (x & ~y) */
1177 if (GET_CODE (op1) == AND)
1178 {
1179 if (rtx_equal_p (op0, XEXP (op1, 0)))
1180 return simplify_gen_binary (AND, mode, op0,
1181 gen_rtx_NOT (mode, XEXP (op1, 1)));
1182 if (rtx_equal_p (op0, XEXP (op1, 1)))
1183 return simplify_gen_binary (AND, mode, op0,
1184 gen_rtx_NOT (mode, XEXP (op1, 0)));
1185 }
1186 break;
1187
1188 case MULT:
1189 if (op1 == constm1_rtx)
1190 {
1191 tem = simplify_unary_operation (NEG, mode, op0, mode);
1192
1193 return tem ? tem : gen_rtx_NEG (mode, op0);
1194 }
1195
1196 /* In IEEE floating point, x*0 is not always 0. */
1197 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1198 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1199 && op1 == CONST0_RTX (mode)
1200 && ! side_effects_p (op0))
1201 return op1;
1202
1203 /* In IEEE floating point, x*1 is not equivalent to x for nans.
1204 However, ANSI says we can drop signals,
1205 so we can do this anyway. */
1206 if (op1 == CONST1_RTX (mode))
1207 return op0;
1208
1209 /* Convert multiply by constant power of two into shift unless
1210 we are still generating RTL. This test is a kludge. */
1211 if (GET_CODE (op1) == CONST_INT
1212 && (val = exact_log2 (INTVAL (op1))) >= 0
1213 /* If the mode is larger than the host word size, and the
1214 uppermost bit is set, then this isn't a power of two due
1215 to implicit sign extension. */
1216 && (width <= HOST_BITS_PER_WIDE_INT
1217 || val != HOST_BITS_PER_WIDE_INT - 1)
1218 && ! rtx_equal_function_value_matters)
1219 return gen_rtx_ASHIFT (mode, op0, GEN_INT (val));
1220
1221 if (GET_CODE (op1) == CONST_DOUBLE
1222 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT)
1223 {
1224 REAL_VALUE_TYPE d;
1225 jmp_buf handler;
1226 int op1is2, op1ism1;
1227
1228 if (setjmp (handler))
1229 return 0;
1230
1231 set_float_handler (handler);
1232 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
1233 op1is2 = REAL_VALUES_EQUAL (d, dconst2);
1234 op1ism1 = REAL_VALUES_EQUAL (d, dconstm1);
1235 set_float_handler (NULL);
1236
1237 /* x*2 is x+x and x*(-1) is -x */
1238 if (op1is2 && GET_MODE (op0) == mode)
1239 return gen_rtx_PLUS (mode, op0, copy_rtx (op0));
1240
1241 else if (op1ism1 && GET_MODE (op0) == mode)
1242 return gen_rtx_NEG (mode, op0);
1243 }
1244 break;
1245
1246 case IOR:
1247 if (op1 == const0_rtx)
1248 return op0;
1249 if (GET_CODE (op1) == CONST_INT
1250 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
1251 return op1;
1252 if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1253 return op0;
1254 /* A | (~A) -> -1 */
1255 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1256 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1257 && ! side_effects_p (op0)
1258 && GET_MODE_CLASS (mode) != MODE_CC)
1259 return constm1_rtx;
1260 break;
1261
1262 case XOR:
1263 if (op1 == const0_rtx)
1264 return op0;
1265 if (GET_CODE (op1) == CONST_INT
1266 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
1267 return gen_rtx_NOT (mode, op0);
1268 if (op0 == op1 && ! side_effects_p (op0)
1269 && GET_MODE_CLASS (mode) != MODE_CC)
1270 return const0_rtx;
1271 break;
1272
1273 case AND:
1274 if (op1 == const0_rtx && ! side_effects_p (op0))
1275 return const0_rtx;
1276 if (GET_CODE (op1) == CONST_INT
1277 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
1278 return op0;
1279 if (op0 == op1 && ! side_effects_p (op0)
1280 && GET_MODE_CLASS (mode) != MODE_CC)
1281 return op0;
1282 /* A & (~A) -> 0 */
1283 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1284 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1285 && ! side_effects_p (op0)
1286 && GET_MODE_CLASS (mode) != MODE_CC)
1287 return const0_rtx;
1288 break;
1289
1290 case UDIV:
1291 /* Convert divide by power of two into shift (divide by 1 handled
1292 below). */
1293 if (GET_CODE (op1) == CONST_INT
1294 && (arg1 = exact_log2 (INTVAL (op1))) > 0)
1295 return gen_rtx_LSHIFTRT (mode, op0, GEN_INT (arg1));
1296
1297 /* ... fall through ... */
1298
1299 case DIV:
1300 if (op1 == CONST1_RTX (mode))
1301 return op0;
1302
1303 /* In IEEE floating point, 0/x is not always 0. */
1304 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1305 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1306 && op0 == CONST0_RTX (mode)
1307 && ! side_effects_p (op1))
1308 return op0;
1309
1310 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1311 /* Change division by a constant into multiplication. Only do
1312 this with -funsafe-math-optimizations. */
1313 else if (GET_CODE (op1) == CONST_DOUBLE
1314 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT
1315 && op1 != CONST0_RTX (mode)
1316 && flag_unsafe_math_optimizations)
1317 {
1318 REAL_VALUE_TYPE d;
1319 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
1320
1321 if (! REAL_VALUES_EQUAL (d, dconst0))
1322 {
1323 #if defined (REAL_ARITHMETIC)
1324 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1325 return gen_rtx_MULT (mode, op0,
1326 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
1327 #else
1328 return
1329 gen_rtx_MULT (mode, op0,
1330 CONST_DOUBLE_FROM_REAL_VALUE (1./d, mode));
1331 #endif
1332 }
1333 }
1334 #endif
1335 break;
1336
1337 case UMOD:
1338 /* Handle modulus by power of two (mod with 1 handled below). */
1339 if (GET_CODE (op1) == CONST_INT
1340 && exact_log2 (INTVAL (op1)) > 0)
1341 return gen_rtx_AND (mode, op0, GEN_INT (INTVAL (op1) - 1));
1342
1343 /* ... fall through ... */
1344
1345 case MOD:
1346 if ((op0 == const0_rtx || op1 == const1_rtx)
1347 && ! side_effects_p (op0) && ! side_effects_p (op1))
1348 return const0_rtx;
1349 break;
1350
1351 case ROTATERT:
1352 case ROTATE:
1353 /* Rotating ~0 always results in ~0. */
1354 if (GET_CODE (op0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1355 && (unsigned HOST_WIDE_INT) INTVAL (op0) == GET_MODE_MASK (mode)
1356 && ! side_effects_p (op1))
1357 return op0;
1358
1359 /* ... fall through ... */
1360
1361 case ASHIFT:
1362 case ASHIFTRT:
1363 case LSHIFTRT:
1364 if (op1 == const0_rtx)
1365 return op0;
1366 if (op0 == const0_rtx && ! side_effects_p (op1))
1367 return op0;
1368 break;
1369
1370 case SMIN:
1371 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
1372 && INTVAL (op1) == (HOST_WIDE_INT) 1 << (width -1)
1373 && ! side_effects_p (op0))
1374 return op1;
1375 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1376 return op0;
1377 break;
1378
1379 case SMAX:
1380 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
1381 && ((unsigned HOST_WIDE_INT) INTVAL (op1)
1382 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1383 && ! side_effects_p (op0))
1384 return op1;
1385 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1386 return op0;
1387 break;
1388
1389 case UMIN:
1390 if (op1 == const0_rtx && ! side_effects_p (op0))
1391 return op1;
1392 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1393 return op0;
1394 break;
1395
1396 case UMAX:
1397 if (op1 == constm1_rtx && ! side_effects_p (op0))
1398 return op1;
1399 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1400 return op0;
1401 break;
1402
1403 default:
1404 abort ();
1405 }
1406
1407 return 0;
1408 }
1409
1410 /* Get the integer argument values in two forms:
1411 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1412
1413 arg0 = INTVAL (op0);
1414 arg1 = INTVAL (op1);
1415
1416 if (width < HOST_BITS_PER_WIDE_INT)
1417 {
1418 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1419 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1420
1421 arg0s = arg0;
1422 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1423 arg0s |= ((HOST_WIDE_INT) (-1) << width);
1424
1425 arg1s = arg1;
1426 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1427 arg1s |= ((HOST_WIDE_INT) (-1) << width);
1428 }
1429 else
1430 {
1431 arg0s = arg0;
1432 arg1s = arg1;
1433 }
1434
1435 /* Compute the value of the arithmetic. */
1436
1437 switch (code)
1438 {
1439 case PLUS:
1440 val = arg0s + arg1s;
1441 break;
1442
1443 case MINUS:
1444 val = arg0s - arg1s;
1445 break;
1446
1447 case MULT:
1448 val = arg0s * arg1s;
1449 break;
1450
1451 case DIV:
1452 if (arg1s == 0
1453 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1454 && arg1s == -1))
1455 return 0;
1456 val = arg0s / arg1s;
1457 break;
1458
1459 case MOD:
1460 if (arg1s == 0
1461 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1462 && arg1s == -1))
1463 return 0;
1464 val = arg0s % arg1s;
1465 break;
1466
1467 case UDIV:
1468 if (arg1 == 0
1469 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1470 && arg1s == -1))
1471 return 0;
1472 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
1473 break;
1474
1475 case UMOD:
1476 if (arg1 == 0
1477 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1478 && arg1s == -1))
1479 return 0;
1480 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
1481 break;
1482
1483 case AND:
1484 val = arg0 & arg1;
1485 break;
1486
1487 case IOR:
1488 val = arg0 | arg1;
1489 break;
1490
1491 case XOR:
1492 val = arg0 ^ arg1;
1493 break;
1494
1495 case LSHIFTRT:
1496 /* If shift count is undefined, don't fold it; let the machine do
1497 what it wants. But truncate it if the machine will do that. */
1498 if (arg1 < 0)
1499 return 0;
1500
1501 #ifdef SHIFT_COUNT_TRUNCATED
1502 if (SHIFT_COUNT_TRUNCATED)
1503 arg1 %= width;
1504 #endif
1505
1506 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
1507 break;
1508
1509 case ASHIFT:
1510 if (arg1 < 0)
1511 return 0;
1512
1513 #ifdef SHIFT_COUNT_TRUNCATED
1514 if (SHIFT_COUNT_TRUNCATED)
1515 arg1 %= width;
1516 #endif
1517
1518 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
1519 break;
1520
1521 case ASHIFTRT:
1522 if (arg1 < 0)
1523 return 0;
1524
1525 #ifdef SHIFT_COUNT_TRUNCATED
1526 if (SHIFT_COUNT_TRUNCATED)
1527 arg1 %= width;
1528 #endif
1529
1530 val = arg0s >> arg1;
1531
1532 /* Bootstrap compiler may not have sign extended the right shift.
1533 Manually extend the sign to insure bootstrap cc matches gcc. */
1534 if (arg0s < 0 && arg1 > 0)
1535 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
1536
1537 break;
1538
1539 case ROTATERT:
1540 if (arg1 < 0)
1541 return 0;
1542
1543 arg1 %= width;
1544 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
1545 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
1546 break;
1547
1548 case ROTATE:
1549 if (arg1 < 0)
1550 return 0;
1551
1552 arg1 %= width;
1553 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
1554 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
1555 break;
1556
1557 case COMPARE:
1558 /* Do nothing here. */
1559 return 0;
1560
1561 case SMIN:
1562 val = arg0s <= arg1s ? arg0s : arg1s;
1563 break;
1564
1565 case UMIN:
1566 val = ((unsigned HOST_WIDE_INT) arg0
1567 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1568 break;
1569
1570 case SMAX:
1571 val = arg0s > arg1s ? arg0s : arg1s;
1572 break;
1573
1574 case UMAX:
1575 val = ((unsigned HOST_WIDE_INT) arg0
1576 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1577 break;
1578
1579 default:
1580 abort ();
1581 }
1582
1583 val = trunc_int_for_mode (val, mode);
1584
1585 return GEN_INT (val);
1586 }
1587 \f
1588 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1589 PLUS or MINUS.
1590
1591 Rather than test for specific case, we do this by a brute-force method
1592 and do all possible simplifications until no more changes occur. Then
1593 we rebuild the operation. */
1594
1595 static rtx
1596 simplify_plus_minus (code, mode, op0, op1)
1597 enum rtx_code code;
1598 enum machine_mode mode;
1599 rtx op0, op1;
1600 {
1601 rtx ops[8];
1602 int negs[8];
1603 rtx result, tem;
1604 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts = 0;
1605 int first = 1, negate = 0, changed;
1606 int i, j;
1607
1608 memset ((char *) ops, 0, sizeof ops);
1609
1610 /* Set up the two operands and then expand them until nothing has been
1611 changed. If we run out of room in our array, give up; this should
1612 almost never happen. */
1613
1614 ops[0] = op0, ops[1] = op1, negs[0] = 0, negs[1] = (code == MINUS);
1615
1616 changed = 1;
1617 while (changed)
1618 {
1619 changed = 0;
1620
1621 for (i = 0; i < n_ops; i++)
1622 switch (GET_CODE (ops[i]))
1623 {
1624 case PLUS:
1625 case MINUS:
1626 if (n_ops == 7)
1627 return 0;
1628
1629 ops[n_ops] = XEXP (ops[i], 1);
1630 negs[n_ops++] = GET_CODE (ops[i]) == MINUS ? !negs[i] : negs[i];
1631 ops[i] = XEXP (ops[i], 0);
1632 input_ops++;
1633 changed = 1;
1634 break;
1635
1636 case NEG:
1637 ops[i] = XEXP (ops[i], 0);
1638 negs[i] = ! negs[i];
1639 changed = 1;
1640 break;
1641
1642 case CONST:
1643 ops[i] = XEXP (ops[i], 0);
1644 input_consts++;
1645 changed = 1;
1646 break;
1647
1648 case NOT:
1649 /* ~a -> (-a - 1) */
1650 if (n_ops != 7)
1651 {
1652 ops[n_ops] = constm1_rtx;
1653 negs[n_ops++] = negs[i];
1654 ops[i] = XEXP (ops[i], 0);
1655 negs[i] = ! negs[i];
1656 changed = 1;
1657 }
1658 break;
1659
1660 case CONST_INT:
1661 if (negs[i])
1662 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0, changed = 1;
1663 break;
1664
1665 default:
1666 break;
1667 }
1668 }
1669
1670 /* If we only have two operands, we can't do anything. */
1671 if (n_ops <= 2)
1672 return 0;
1673
1674 /* Now simplify each pair of operands until nothing changes. The first
1675 time through just simplify constants against each other. */
1676
1677 changed = 1;
1678 while (changed)
1679 {
1680 changed = first;
1681
1682 for (i = 0; i < n_ops - 1; i++)
1683 for (j = i + 1; j < n_ops; j++)
1684 if (ops[i] != 0 && ops[j] != 0
1685 && (! first || (CONSTANT_P (ops[i]) && CONSTANT_P (ops[j]))))
1686 {
1687 rtx lhs = ops[i], rhs = ops[j];
1688 enum rtx_code ncode = PLUS;
1689
1690 if (negs[i] && ! negs[j])
1691 lhs = ops[j], rhs = ops[i], ncode = MINUS;
1692 else if (! negs[i] && negs[j])
1693 ncode = MINUS;
1694
1695 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
1696 if (tem)
1697 {
1698 ops[i] = tem, ops[j] = 0;
1699 negs[i] = negs[i] && negs[j];
1700 if (GET_CODE (tem) == NEG)
1701 ops[i] = XEXP (tem, 0), negs[i] = ! negs[i];
1702
1703 if (GET_CODE (ops[i]) == CONST_INT && negs[i])
1704 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0;
1705 changed = 1;
1706 }
1707 }
1708
1709 first = 0;
1710 }
1711
1712 /* Pack all the operands to the lower-numbered entries and give up if
1713 we didn't reduce the number of operands we had. Make sure we
1714 count a CONST as two operands. If we have the same number of
1715 operands, but have made more CONSTs than we had, this is also
1716 an improvement, so accept it. */
1717
1718 for (i = 0, j = 0; j < n_ops; j++)
1719 if (ops[j] != 0)
1720 {
1721 ops[i] = ops[j], negs[i++] = negs[j];
1722 if (GET_CODE (ops[j]) == CONST)
1723 n_consts++;
1724 }
1725
1726 if (i + n_consts > input_ops
1727 || (i + n_consts == input_ops && n_consts <= input_consts))
1728 return 0;
1729
1730 n_ops = i;
1731
1732 /* If we have a CONST_INT, put it last. */
1733 for (i = 0; i < n_ops - 1; i++)
1734 if (GET_CODE (ops[i]) == CONST_INT)
1735 {
1736 tem = ops[n_ops - 1], ops[n_ops - 1] = ops[i] , ops[i] = tem;
1737 j = negs[n_ops - 1], negs[n_ops - 1] = negs[i], negs[i] = j;
1738 }
1739
1740 /* Put a non-negated operand first. If there aren't any, make all
1741 operands positive and negate the whole thing later. */
1742 for (i = 0; i < n_ops && negs[i]; i++)
1743 ;
1744
1745 if (i == n_ops)
1746 {
1747 for (i = 0; i < n_ops; i++)
1748 negs[i] = 0;
1749 negate = 1;
1750 }
1751 else if (i != 0)
1752 {
1753 tem = ops[0], ops[0] = ops[i], ops[i] = tem;
1754 j = negs[0], negs[0] = negs[i], negs[i] = j;
1755 }
1756
1757 /* Now make the result by performing the requested operations. */
1758 result = ops[0];
1759 for (i = 1; i < n_ops; i++)
1760 result = simplify_gen_binary (negs[i] ? MINUS : PLUS, mode, result, ops[i]);
1761
1762 return negate ? gen_rtx_NEG (mode, result) : result;
1763 }
1764
1765 struct cfc_args
1766 {
1767 rtx op0, op1; /* Input */
1768 int equal, op0lt, op1lt; /* Output */
1769 int unordered;
1770 };
1771
1772 static void
1773 check_fold_consts (data)
1774 PTR data;
1775 {
1776 struct cfc_args *args = (struct cfc_args *) data;
1777 REAL_VALUE_TYPE d0, d1;
1778
1779 /* We may possibly raise an exception while reading the value. */
1780 args->unordered = 1;
1781 REAL_VALUE_FROM_CONST_DOUBLE (d0, args->op0);
1782 REAL_VALUE_FROM_CONST_DOUBLE (d1, args->op1);
1783
1784 /* Comparisons of Inf versus Inf are ordered. */
1785 if (REAL_VALUE_ISNAN (d0)
1786 || REAL_VALUE_ISNAN (d1))
1787 return;
1788 args->equal = REAL_VALUES_EQUAL (d0, d1);
1789 args->op0lt = REAL_VALUES_LESS (d0, d1);
1790 args->op1lt = REAL_VALUES_LESS (d1, d0);
1791 args->unordered = 0;
1792 }
1793
1794 /* Like simplify_binary_operation except used for relational operators.
1795 MODE is the mode of the operands, not that of the result. If MODE
1796 is VOIDmode, both operands must also be VOIDmode and we compare the
1797 operands in "infinite precision".
1798
1799 If no simplification is possible, this function returns zero. Otherwise,
1800 it returns either const_true_rtx or const0_rtx. */
1801
1802 rtx
1803 simplify_relational_operation (code, mode, op0, op1)
1804 enum rtx_code code;
1805 enum machine_mode mode;
1806 rtx op0, op1;
1807 {
1808 int equal, op0lt, op0ltu, op1lt, op1ltu;
1809 rtx tem;
1810
1811 if (mode == VOIDmode
1812 && (GET_MODE (op0) != VOIDmode
1813 || GET_MODE (op1) != VOIDmode))
1814 abort ();
1815
1816 /* If op0 is a compare, extract the comparison arguments from it. */
1817 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
1818 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
1819
1820 /* We can't simplify MODE_CC values since we don't know what the
1821 actual comparison is. */
1822 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC
1823 #ifdef HAVE_cc0
1824 || op0 == cc0_rtx
1825 #endif
1826 )
1827 return 0;
1828
1829 /* Make sure the constant is second. */
1830 if ((CONSTANT_P (op0) && ! CONSTANT_P (op1))
1831 || (GET_CODE (op0) == CONST_INT && GET_CODE (op1) != CONST_INT))
1832 {
1833 tem = op0, op0 = op1, op1 = tem;
1834 code = swap_condition (code);
1835 }
1836
1837 /* For integer comparisons of A and B maybe we can simplify A - B and can
1838 then simplify a comparison of that with zero. If A and B are both either
1839 a register or a CONST_INT, this can't help; testing for these cases will
1840 prevent infinite recursion here and speed things up.
1841
1842 If CODE is an unsigned comparison, then we can never do this optimization,
1843 because it gives an incorrect result if the subtraction wraps around zero.
1844 ANSI C defines unsigned operations such that they never overflow, and
1845 thus such cases can not be ignored. */
1846
1847 if (INTEGRAL_MODE_P (mode) && op1 != const0_rtx
1848 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == CONST_INT)
1849 && (GET_CODE (op1) == REG || GET_CODE (op1) == CONST_INT))
1850 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
1851 && code != GTU && code != GEU && code != LTU && code != LEU)
1852 return simplify_relational_operation (signed_condition (code),
1853 mode, tem, const0_rtx);
1854
1855 if (flag_unsafe_math_optimizations && code == ORDERED)
1856 return const_true_rtx;
1857
1858 if (flag_unsafe_math_optimizations && code == UNORDERED)
1859 return const0_rtx;
1860
1861 /* For non-IEEE floating-point, if the two operands are equal, we know the
1862 result. */
1863 if (rtx_equal_p (op0, op1)
1864 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1865 || ! FLOAT_MODE_P (GET_MODE (op0))
1866 || flag_unsafe_math_optimizations))
1867 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
1868
1869 /* If the operands are floating-point constants, see if we can fold
1870 the result. */
1871 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1872 else if (GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
1873 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
1874 {
1875 struct cfc_args args;
1876
1877 /* Setup input for check_fold_consts() */
1878 args.op0 = op0;
1879 args.op1 = op1;
1880
1881
1882 if (!do_float_handler (check_fold_consts, (PTR) &args))
1883 args.unordered = 1;
1884
1885 if (args.unordered)
1886 switch (code)
1887 {
1888 case UNEQ:
1889 case UNLT:
1890 case UNGT:
1891 case UNLE:
1892 case UNGE:
1893 case NE:
1894 case UNORDERED:
1895 return const_true_rtx;
1896 case EQ:
1897 case LT:
1898 case GT:
1899 case LE:
1900 case GE:
1901 case LTGT:
1902 case ORDERED:
1903 return const0_rtx;
1904 default:
1905 return 0;
1906 }
1907
1908 /* Receive output from check_fold_consts() */
1909 equal = args.equal;
1910 op0lt = op0ltu = args.op0lt;
1911 op1lt = op1ltu = args.op1lt;
1912 }
1913 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1914
1915 /* Otherwise, see if the operands are both integers. */
1916 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
1917 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
1918 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
1919 {
1920 int width = GET_MODE_BITSIZE (mode);
1921 HOST_WIDE_INT l0s, h0s, l1s, h1s;
1922 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
1923
1924 /* Get the two words comprising each integer constant. */
1925 if (GET_CODE (op0) == CONST_DOUBLE)
1926 {
1927 l0u = l0s = CONST_DOUBLE_LOW (op0);
1928 h0u = h0s = CONST_DOUBLE_HIGH (op0);
1929 }
1930 else
1931 {
1932 l0u = l0s = INTVAL (op0);
1933 h0u = h0s = HWI_SIGN_EXTEND (l0s);
1934 }
1935
1936 if (GET_CODE (op1) == CONST_DOUBLE)
1937 {
1938 l1u = l1s = CONST_DOUBLE_LOW (op1);
1939 h1u = h1s = CONST_DOUBLE_HIGH (op1);
1940 }
1941 else
1942 {
1943 l1u = l1s = INTVAL (op1);
1944 h1u = h1s = HWI_SIGN_EXTEND (l1s);
1945 }
1946
1947 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
1948 we have to sign or zero-extend the values. */
1949 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
1950 {
1951 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
1952 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
1953
1954 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1955 l0s |= ((HOST_WIDE_INT) (-1) << width);
1956
1957 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1958 l1s |= ((HOST_WIDE_INT) (-1) << width);
1959 }
1960 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
1961 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
1962
1963 equal = (h0u == h1u && l0u == l1u);
1964 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
1965 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
1966 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
1967 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
1968 }
1969
1970 /* Otherwise, there are some code-specific tests we can make. */
1971 else
1972 {
1973 switch (code)
1974 {
1975 case EQ:
1976 /* References to the frame plus a constant or labels cannot
1977 be zero, but a SYMBOL_REF can due to #pragma weak. */
1978 if (((NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx)
1979 || GET_CODE (op0) == LABEL_REF)
1980 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1981 /* On some machines, the ap reg can be 0 sometimes. */
1982 && op0 != arg_pointer_rtx
1983 #endif
1984 )
1985 return const0_rtx;
1986 break;
1987
1988 case NE:
1989 if (((NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx)
1990 || GET_CODE (op0) == LABEL_REF)
1991 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1992 && op0 != arg_pointer_rtx
1993 #endif
1994 )
1995 return const_true_rtx;
1996 break;
1997
1998 case GEU:
1999 /* Unsigned values are never negative. */
2000 if (op1 == const0_rtx)
2001 return const_true_rtx;
2002 break;
2003
2004 case LTU:
2005 if (op1 == const0_rtx)
2006 return const0_rtx;
2007 break;
2008
2009 case LEU:
2010 /* Unsigned values are never greater than the largest
2011 unsigned value. */
2012 if (GET_CODE (op1) == CONST_INT
2013 && (unsigned HOST_WIDE_INT) INTVAL (op1) == GET_MODE_MASK (mode)
2014 && INTEGRAL_MODE_P (mode))
2015 return const_true_rtx;
2016 break;
2017
2018 case GTU:
2019 if (GET_CODE (op1) == CONST_INT
2020 && (unsigned HOST_WIDE_INT) INTVAL (op1) == GET_MODE_MASK (mode)
2021 && INTEGRAL_MODE_P (mode))
2022 return const0_rtx;
2023 break;
2024
2025 default:
2026 break;
2027 }
2028
2029 return 0;
2030 }
2031
2032 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2033 as appropriate. */
2034 switch (code)
2035 {
2036 case EQ:
2037 case UNEQ:
2038 return equal ? const_true_rtx : const0_rtx;
2039 case NE:
2040 case LTGT:
2041 return ! equal ? const_true_rtx : const0_rtx;
2042 case LT:
2043 case UNLT:
2044 return op0lt ? const_true_rtx : const0_rtx;
2045 case GT:
2046 case UNGT:
2047 return op1lt ? const_true_rtx : const0_rtx;
2048 case LTU:
2049 return op0ltu ? const_true_rtx : const0_rtx;
2050 case GTU:
2051 return op1ltu ? const_true_rtx : const0_rtx;
2052 case LE:
2053 case UNLE:
2054 return equal || op0lt ? const_true_rtx : const0_rtx;
2055 case GE:
2056 case UNGE:
2057 return equal || op1lt ? const_true_rtx : const0_rtx;
2058 case LEU:
2059 return equal || op0ltu ? const_true_rtx : const0_rtx;
2060 case GEU:
2061 return equal || op1ltu ? const_true_rtx : const0_rtx;
2062 case ORDERED:
2063 return const_true_rtx;
2064 case UNORDERED:
2065 return const0_rtx;
2066 default:
2067 abort ();
2068 }
2069 }
2070 \f
2071 /* Simplify CODE, an operation with result mode MODE and three operands,
2072 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2073 a constant. Return 0 if no simplifications is possible. */
2074
2075 rtx
2076 simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
2077 enum rtx_code code;
2078 enum machine_mode mode, op0_mode;
2079 rtx op0, op1, op2;
2080 {
2081 unsigned int width = GET_MODE_BITSIZE (mode);
2082
2083 /* VOIDmode means "infinite" precision. */
2084 if (width == 0)
2085 width = HOST_BITS_PER_WIDE_INT;
2086
2087 switch (code)
2088 {
2089 case SIGN_EXTRACT:
2090 case ZERO_EXTRACT:
2091 if (GET_CODE (op0) == CONST_INT
2092 && GET_CODE (op1) == CONST_INT
2093 && GET_CODE (op2) == CONST_INT
2094 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2095 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2096 {
2097 /* Extracting a bit-field from a constant */
2098 HOST_WIDE_INT val = INTVAL (op0);
2099
2100 if (BITS_BIG_ENDIAN)
2101 val >>= (GET_MODE_BITSIZE (op0_mode)
2102 - INTVAL (op2) - INTVAL (op1));
2103 else
2104 val >>= INTVAL (op2);
2105
2106 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2107 {
2108 /* First zero-extend. */
2109 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2110 /* If desired, propagate sign bit. */
2111 if (code == SIGN_EXTRACT
2112 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2113 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2114 }
2115
2116 /* Clear the bits that don't belong in our mode,
2117 unless they and our sign bit are all one.
2118 So we get either a reasonable negative value or a reasonable
2119 unsigned value for this mode. */
2120 if (width < HOST_BITS_PER_WIDE_INT
2121 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2122 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2123 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2124
2125 return GEN_INT (val);
2126 }
2127 break;
2128
2129 case IF_THEN_ELSE:
2130 if (GET_CODE (op0) == CONST_INT)
2131 return op0 != const0_rtx ? op1 : op2;
2132
2133 /* Convert a == b ? b : a to "a". */
2134 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
2135 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
2136 && rtx_equal_p (XEXP (op0, 0), op1)
2137 && rtx_equal_p (XEXP (op0, 1), op2))
2138 return op1;
2139 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
2140 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
2141 && rtx_equal_p (XEXP (op0, 1), op1)
2142 && rtx_equal_p (XEXP (op0, 0), op2))
2143 return op2;
2144 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2145 {
2146 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2147 ? GET_MODE (XEXP (op0, 1))
2148 : GET_MODE (XEXP (op0, 0)));
2149 rtx temp;
2150 if (cmp_mode == VOIDmode)
2151 cmp_mode = op0_mode;
2152 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2153 XEXP (op0, 0), XEXP (op0, 1));
2154
2155 /* See if any simplifications were possible. */
2156 if (temp == const0_rtx)
2157 return op2;
2158 else if (temp == const1_rtx)
2159 return op1;
2160 else if (temp)
2161 op0 = temp;
2162
2163 /* Look for happy constants in op1 and op2. */
2164 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2165 {
2166 HOST_WIDE_INT t = INTVAL (op1);
2167 HOST_WIDE_INT f = INTVAL (op2);
2168
2169 if (t == STORE_FLAG_VALUE && f == 0)
2170 code = GET_CODE (op0);
2171 else if (t == 0 && f == STORE_FLAG_VALUE)
2172 {
2173 enum rtx_code tmp;
2174 tmp = reversed_comparison_code (op0, NULL_RTX);
2175 if (tmp == UNKNOWN)
2176 break;
2177 code = tmp;
2178 }
2179 else
2180 break;
2181
2182 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2183 }
2184 }
2185 break;
2186
2187 default:
2188 abort ();
2189 }
2190
2191 return 0;
2192 }
2193
2194 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2195 Return 0 if no simplifications is possible. */
2196 rtx
2197 simplify_subreg (outermode, op, innermode, byte)
2198 rtx op;
2199 unsigned int byte;
2200 enum machine_mode outermode, innermode;
2201 {
2202 /* Little bit of sanity checking. */
2203 if (innermode == VOIDmode || outermode == VOIDmode
2204 || innermode == BLKmode || outermode == BLKmode)
2205 abort ();
2206
2207 if (GET_MODE (op) != innermode
2208 && GET_MODE (op) != VOIDmode)
2209 abort ();
2210
2211 if (byte % GET_MODE_SIZE (outermode)
2212 || byte >= GET_MODE_SIZE (innermode))
2213 abort ();
2214
2215 /* Attempt to simplify constant to non-SUBREG expression. */
2216 if (CONSTANT_P (op))
2217 {
2218 int offset, part;
2219 unsigned HOST_WIDE_INT val;
2220
2221 /* ??? This code is partly redundant with code bellow, but can handle
2222 the subregs of floats and similar corner cases.
2223 Later it we should move all simplification code here and rewrite
2224 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2225 using SIMPLIFY_SUBREG. */
2226 if (subreg_lowpart_parts_p (outermode, innermode, byte))
2227 {
2228 rtx new = gen_lowpart_if_possible (outermode, op);
2229 if (new)
2230 return new;
2231 }
2232
2233 /* Similar comment as above apply here. */
2234 if (GET_MODE_SIZE (outermode) == UNITS_PER_WORD
2235 && GET_MODE_SIZE (innermode) > UNITS_PER_WORD
2236 && GET_MODE_CLASS (outermode) == MODE_INT)
2237 {
2238 rtx new = operand_subword (op,
2239 (byte / UNITS_PER_WORD),
2240 0, innermode);
2241 if (new)
2242 return new;
2243 }
2244
2245 offset = byte * BITS_PER_UNIT;
2246 switch (GET_CODE (op))
2247 {
2248 case CONST_DOUBLE:
2249 if (GET_MODE (op) != VOIDmode)
2250 break;
2251
2252 /* We can't handle this case yet. */
2253 if (GET_MODE_BITSIZE (outermode) >= HOST_BITS_PER_WIDE_INT)
2254 return NULL;
2255
2256 part = offset >= HOST_BITS_PER_WIDE_INT;
2257 if ((BITS_PER_WORD > HOST_BITS_PER_WIDE_INT
2258 && BYTES_BIG_ENDIAN)
2259 || (BITS_PER_WORD <= HOST_BITS_PER_WIDE_INT
2260 && WORDS_BIG_ENDIAN))
2261 part = !part;
2262 val = part ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op);
2263 offset %= HOST_BITS_PER_WIDE_INT;
2264
2265 /* We've already picked the word we want from a double, so
2266 pretend this is actually an integer. */
2267 innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
2268
2269 /* FALLTHROUGH */
2270 case CONST_INT:
2271 if (GET_CODE (op) == CONST_INT)
2272 val = INTVAL (op);
2273
2274 /* We don't handle synthetizing of non-integral constants yet. */
2275 if (GET_MODE_CLASS (outermode) != MODE_INT)
2276 return NULL;
2277
2278 if (BYTES_BIG_ENDIAN || WORDS_BIG_ENDIAN)
2279 {
2280 if (WORDS_BIG_ENDIAN)
2281 offset = (GET_MODE_BITSIZE (innermode)
2282 - GET_MODE_BITSIZE (outermode) - offset);
2283 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN
2284 && GET_MODE_SIZE (outermode) < UNITS_PER_WORD)
2285 offset = (offset + BITS_PER_WORD - GET_MODE_BITSIZE (outermode)
2286 - 2 * (offset % BITS_PER_WORD));
2287 }
2288
2289 if (offset >= HOST_BITS_PER_WIDE_INT)
2290 return ((HOST_WIDE_INT) val < 0) ? constm1_rtx : const0_rtx;
2291 else
2292 {
2293 val >>= offset;
2294 if (GET_MODE_BITSIZE (outermode) < HOST_BITS_PER_WIDE_INT)
2295 val = trunc_int_for_mode (val, outermode);
2296 return GEN_INT (val);
2297 }
2298 default:
2299 break;
2300 }
2301 }
2302
2303 /* Changing mode twice with SUBREG => just change it once,
2304 or not at all if changing back op starting mode. */
2305 if (GET_CODE (op) == SUBREG)
2306 {
2307 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
2308 unsigned int final_offset = byte + SUBREG_BYTE (op);
2309 rtx new;
2310
2311 if (outermode == innermostmode
2312 && byte == 0 && SUBREG_BYTE (op) == 0)
2313 return SUBREG_REG (op);
2314
2315 if ((WORDS_BIG_ENDIAN || BYTES_BIG_ENDIAN)
2316 && GET_MODE_SIZE (innermode) > GET_MODE_SIZE (outermode)
2317 && GET_MODE_SIZE (innermode) > GET_MODE_SIZE (innermostmode))
2318 {
2319 /* Inner SUBREG is paradoxical, outer is not. On big endian
2320 we have to special case this. */
2321 if (SUBREG_BYTE (op))
2322 abort(); /* Can a paradoxical subreg have nonzero offset? */
2323 if (WORDS_BIG_ENDIAN && BYTES_BIG_ENDIAN)
2324 final_offset = (byte - GET_MODE_SIZE (innermode)
2325 + GET_MODE_SIZE (innermostmode));
2326 else if (WORDS_BIG_ENDIAN)
2327 final_offset = ((final_offset % UNITS_PER_WORD)
2328 + ((byte - GET_MODE_SIZE (innermode)
2329 + GET_MODE_SIZE (innermostmode))
2330 * UNITS_PER_WORD) / UNITS_PER_WORD);
2331 else
2332 final_offset = (((final_offset * UNITS_PER_WORD)
2333 / UNITS_PER_WORD)
2334 + ((byte - GET_MODE_SIZE (innermode)
2335 + GET_MODE_SIZE (innermostmode))
2336 % UNITS_PER_WORD));
2337 }
2338
2339 /* Bail out in case resulting subreg would be incorrect. */
2340 if (final_offset % GET_MODE_SIZE (outermode)
2341 || final_offset >= GET_MODE_SIZE (innermostmode))
2342 return NULL;
2343 /* Recurse for futher possible simplifications. */
2344 new = simplify_subreg (outermode, SUBREG_REG (op),
2345 GET_MODE (SUBREG_REG (op)),
2346 final_offset);
2347 if (new)
2348 return new;
2349 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
2350 }
2351
2352 /* SUBREG of a hard register => just change the register number
2353 and/or mode. If the hard register is not valid in that mode,
2354 suppress this simplification. If the hard register is the stack,
2355 frame, or argument pointer, leave this as a SUBREG. */
2356
2357 if (REG_P (op) == REG
2358 && REGNO (op) < FIRST_PSEUDO_REGISTER
2359 && REGNO (op) != FRAME_POINTER_REGNUM
2360 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2361 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
2362 #endif
2363 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2364 && REGNO (op) != ARG_POINTER_REGNUM
2365 #endif
2366 && REGNO (op) != STACK_POINTER_REGNUM)
2367 {
2368 int final_regno = subreg_hard_regno (gen_rtx_SUBREG (outermode, op, byte),
2369 0);
2370
2371 if (HARD_REGNO_MODE_OK (final_regno, outermode))
2372 return gen_rtx_REG (outermode, final_regno);
2373 }
2374
2375 /* If we have a SUBREG of a register that we are replacing and we are
2376 replacing it with a MEM, make a new MEM and try replacing the
2377 SUBREG with it. Don't do this if the MEM has a mode-dependent address
2378 or if we would be widening it. */
2379
2380 if (GET_CODE (op) == MEM
2381 && ! mode_dependent_address_p (XEXP (op, 0))
2382 && ! MEM_VOLATILE_P (op)
2383 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
2384 {
2385 rtx new;
2386
2387 new = gen_rtx_MEM (outermode, plus_constant (XEXP (op, 0), byte));
2388 MEM_COPY_ATTRIBUTES (new, op);
2389 return new;
2390 }
2391 return NULL_RTX;
2392 }
2393 /* Make a SUBREG operation or equivalent if it folds. */
2394
2395 rtx
2396 simplify_gen_subreg (outermode, op, innermode, byte)
2397 rtx op;
2398 unsigned int byte;
2399 enum machine_mode outermode, innermode;
2400 {
2401 rtx new;
2402 /* Little bit of sanity checking. */
2403 if (innermode == VOIDmode || outermode == VOIDmode
2404 || innermode == BLKmode || outermode == BLKmode)
2405 abort ();
2406
2407 if (GET_MODE (op) != innermode
2408 && GET_MODE (op) != VOIDmode)
2409 abort ();
2410
2411 if (byte % GET_MODE_SIZE (outermode)
2412 || byte >= GET_MODE_SIZE (innermode))
2413 abort ();
2414
2415 new = simplify_subreg (outermode, op, innermode, byte);
2416 if (new)
2417 return new;
2418
2419 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
2420 return NULL_RTX;
2421
2422 return gen_rtx_SUBREG (outermode, op, byte);
2423 }
2424 /* Simplify X, an rtx expression.
2425
2426 Return the simplified expression or NULL if no simplifications
2427 were possible.
2428
2429 This is the preferred entry point into the simplification routines;
2430 however, we still allow passes to call the more specific routines.
2431
2432 Right now GCC has three (yes, three) major bodies of RTL simplficiation
2433 code that need to be unified.
2434
2435 1. fold_rtx in cse.c. This code uses various CSE specific
2436 information to aid in RTL simplification.
2437
2438 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
2439 it uses combine specific information to aid in RTL
2440 simplification.
2441
2442 3. The routines in this file.
2443
2444
2445 Long term we want to only have one body of simplification code; to
2446 get to that state I recommend the following steps:
2447
2448 1. Pour over fold_rtx & simplify_rtx and move any simplifications
2449 which are not pass dependent state into these routines.
2450
2451 2. As code is moved by #1, change fold_rtx & simplify_rtx to
2452 use this routine whenever possible.
2453
2454 3. Allow for pass dependent state to be provided to these
2455 routines and add simplifications based on the pass dependent
2456 state. Remove code from cse.c & combine.c that becomes
2457 redundant/dead.
2458
2459 It will take time, but ultimately the compiler will be easier to
2460 maintain and improve. It's totally silly that when we add a
2461 simplification that it needs to be added to 4 places (3 for RTL
2462 simplification and 1 for tree simplification. */
2463
2464 rtx
2465 simplify_rtx (x)
2466 rtx x;
2467 {
2468 enum rtx_code code = GET_CODE (x);
2469 enum machine_mode mode = GET_MODE (x);
2470
2471 switch (GET_RTX_CLASS (code))
2472 {
2473 case '1':
2474 return simplify_unary_operation (code, mode,
2475 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
2476 case '2':
2477 case 'c':
2478 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
2479
2480 case '3':
2481 case 'b':
2482 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
2483 XEXP (x, 0), XEXP (x, 1),
2484 XEXP (x, 2));
2485
2486 case '<':
2487 return simplify_relational_operation (code,
2488 ((GET_MODE (XEXP (x, 0))
2489 != VOIDmode)
2490 ? GET_MODE (XEXP (x, 0))
2491 : GET_MODE (XEXP (x, 1))),
2492 XEXP (x, 0), XEXP (x, 1));
2493 case 'x':
2494 /* The only case we try to handle is a SUBREG. */
2495 if (code == SUBREG)
2496 return simplify_gen_subreg (mode, SUBREG_REG (x),
2497 GET_MODE (SUBREG_REG (x)),
2498 SUBREG_BYTE (x));
2499 return NULL;
2500 default:
2501 return NULL;
2502 }
2503 }
This page took 4.694058 seconds and 6 git commands to generate.