]> gcc.gnu.org Git - gcc.git/blame - gcc/simplify-rtx.c
rtl.h (MEM_COPY_ATTRIBUTES): Also copy RTX_UNCHANGING_P and MEM_ALIAS_SET.
[gcc.git] / gcc / simplify-rtx.c
CommitLineData
749a2da1 1/* RTL simplification functions for GNU compiler.
af841dbd
JL
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000 Free Software Foundation, Inc.
0cedb36c
JL
4
5This file is part of GNU CC.
6
7GNU CC is free software; you can redistribute it and/or modify
8it under the terms of the GNU General Public License as published by
9the Free Software Foundation; either version 2, or (at your option)
10any later version.
11
12GNU CC is distributed in the hope that it will be useful,
13but WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15GNU General Public License for more details.
16
17You should have received a copy of the GNU General Public License
18along with GNU CC; see the file COPYING. If not, write to
19the Free Software Foundation, 59 Temple Place - Suite 330,
20Boston, MA 02111-1307, USA. */
21
22
23#include "config.h"
0cedb36c
JL
24#include "system.h"
25#include <setjmp.h>
26
27#include "rtl.h"
28#include "tm_p.h"
29#include "regs.h"
30#include "hard-reg-set.h"
31#include "flags.h"
32#include "real.h"
33#include "insn-config.h"
34#include "recog.h"
35#include "function.h"
36#include "expr.h"
37#include "toplev.h"
38#include "output.h"
eab5c70a
BS
39#include "ggc.h"
40#include "obstack.h"
41#include "hashtab.h"
42#include "cselib.h"
0cedb36c
JL
43
44/* Simplification and canonicalization of RTL. */
45
46/* Nonzero if X has the form (PLUS frame-pointer integer). We check for
47 virtual regs here because the simplify_*_operation routines are called
48 by integrate.c, which is called before virtual register instantiation.
49
50 ?!? FIXED_BASE_PLUS_P and NONZERO_BASE_PLUS_P need to move into
51 a header file so that their definitions can be shared with the
52 simplification routines in simplify-rtx.c. Until then, do not
53 change these macros without also changing the copy in simplify-rtx.c. */
54
55#define FIXED_BASE_PLUS_P(X) \
56 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
57 || ((X) == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])\
58 || (X) == virtual_stack_vars_rtx \
59 || (X) == virtual_incoming_args_rtx \
60 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
61 && (XEXP (X, 0) == frame_pointer_rtx \
62 || XEXP (X, 0) == hard_frame_pointer_rtx \
63 || ((X) == arg_pointer_rtx \
64 && fixed_regs[ARG_POINTER_REGNUM]) \
65 || XEXP (X, 0) == virtual_stack_vars_rtx \
66 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
67 || GET_CODE (X) == ADDRESSOF)
68
69/* Similar, but also allows reference to the stack pointer.
70
71 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
72 arg_pointer_rtx by itself is nonzero, because on at least one machine,
73 the i960, the arg pointer is zero when it is unused. */
74
75#define NONZERO_BASE_PLUS_P(X) \
76 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
77 || (X) == virtual_stack_vars_rtx \
78 || (X) == virtual_incoming_args_rtx \
79 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
80 && (XEXP (X, 0) == frame_pointer_rtx \
81 || XEXP (X, 0) == hard_frame_pointer_rtx \
82 || ((X) == arg_pointer_rtx \
83 && fixed_regs[ARG_POINTER_REGNUM]) \
84 || XEXP (X, 0) == virtual_stack_vars_rtx \
85 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
86 || (X) == stack_pointer_rtx \
87 || (X) == virtual_stack_dynamic_rtx \
88 || (X) == virtual_outgoing_args_rtx \
89 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
90 && (XEXP (X, 0) == stack_pointer_rtx \
91 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
92 || XEXP (X, 0) == virtual_outgoing_args_rtx)) \
93 || GET_CODE (X) == ADDRESSOF)
94
95
749a2da1
RK
96static rtx simplify_plus_minus PARAMS ((enum rtx_code,
97 enum machine_mode, rtx, rtx));
98static void check_fold_consts PARAMS ((PTR));
99static int entry_and_rtx_equal_p PARAMS ((const void *, const void *));
100static unsigned int get_value_hash PARAMS ((const void *));
101static struct elt_list *new_elt_list PARAMS ((struct elt_list *,
102 cselib_val *));
103static struct elt_loc_list *new_elt_loc_list PARAMS ((struct elt_loc_list *,
104 rtx));
105static void unchain_one_value PARAMS ((cselib_val *));
106static void unchain_one_elt_list PARAMS ((struct elt_list **));
107static void unchain_one_elt_loc_list PARAMS ((struct elt_loc_list **));
108static void clear_table PARAMS ((void));
749a2da1
RK
109static int discard_useless_locs PARAMS ((void **, void *));
110static int discard_useless_values PARAMS ((void **, void *));
111static void remove_useless_values PARAMS ((void));
112static unsigned int hash_rtx PARAMS ((rtx, enum machine_mode, int));
113static cselib_val *new_cselib_val PARAMS ((unsigned int,
114 enum machine_mode));
115static void add_mem_for_addr PARAMS ((cselib_val *, cselib_val *,
116 rtx));
117static cselib_val *cselib_lookup_mem PARAMS ((rtx, int));
118static rtx cselib_subst_to_values PARAMS ((rtx));
119static void cselib_invalidate_regno PARAMS ((unsigned int,
120 enum machine_mode));
121static int cselib_mem_conflict_p PARAMS ((rtx, rtx));
122static int cselib_invalidate_mem_1 PARAMS ((void **, void *));
123static void cselib_invalidate_mem PARAMS ((rtx));
124static void cselib_invalidate_rtx PARAMS ((rtx, rtx, void *));
125static void cselib_record_set PARAMS ((rtx, cselib_val *,
126 cselib_val *));
127static void cselib_record_sets PARAMS ((rtx));
128
129/* There are three ways in which cselib can look up an rtx:
130 - for a REG, the reg_values table (which is indexed by regno) is used
131 - for a MEM, we recursively look up its address and then follow the
132 addr_list of that value
133 - for everything else, we compute a hash value and go through the hash
134 table. Since different rtx's can still have the same hash value,
135 this involves walking the table entries for a given value and comparing
136 the locations of the entries with the rtx we are looking up. */
137
138/* A table that enables us to look up elts by their value. */
139static htab_t hash_table;
140
141/* This is a global so we don't have to pass this through every function.
142 It is used in new_elt_loc_list to set SETTING_INSN. */
143static rtx cselib_current_insn;
144
145/* Every new unknown value gets a unique number. */
146static unsigned int next_unknown_value;
0cedb36c 147
749a2da1
RK
148/* The number of registers we had when the varrays were last resized. */
149static unsigned int cselib_nregs;
150
151/* Count values without known locations. Whenever this grows too big, we
152 remove these useless values from the table. */
153static int n_useless_values;
154
155/* Number of useless values before we remove them from the hash table. */
156#define MAX_USELESS_VALUES 32
157
158/* This table maps from register number to values. It does not contain
159 pointers to cselib_val structures, but rather elt_lists. The purpose is
160 to be able to refer to the same register in different modes. */
161static varray_type reg_values;
162#define REG_VALUES(I) VARRAY_ELT_LIST (reg_values, (I))
163
164/* We pass this to cselib_invalidate_mem to invalidate all of
165 memory for a non-const call instruction. */
166static rtx callmem;
167
168/* Memory for our structures is allocated from this obstack. */
169static struct obstack cselib_obstack;
170
171/* Used to quickly free all memory. */
172static char *cselib_startobj;
173
174/* Caches for unused structures. */
175static cselib_val *empty_vals;
176static struct elt_list *empty_elt_lists;
177static struct elt_loc_list *empty_elt_loc_lists;
178
179/* Set by discard_useless_locs if it deleted the last location of any
180 value. */
181static int values_became_useless;
182\f
0cedb36c
JL
183/* Make a binary operation by properly ordering the operands and
184 seeing if the expression folds. */
185
186rtx
187simplify_gen_binary (code, mode, op0, op1)
188 enum rtx_code code;
189 enum machine_mode mode;
190 rtx op0, op1;
191{
192 rtx tem;
193
194 /* Put complex operands first and constants second if commutative. */
195 if (GET_RTX_CLASS (code) == 'c'
196 && ((CONSTANT_P (op0) && GET_CODE (op1) != CONST_INT)
197 || (GET_RTX_CLASS (GET_CODE (op0)) == 'o'
198 && GET_RTX_CLASS (GET_CODE (op1)) != 'o')
199 || (GET_CODE (op0) == SUBREG
200 && GET_RTX_CLASS (GET_CODE (SUBREG_REG (op0))) == 'o'
201 && GET_RTX_CLASS (GET_CODE (op1)) != 'o')))
202 tem = op0, op0 = op1, op1 = tem;
203
204 /* If this simplifies, do it. */
205 tem = simplify_binary_operation (code, mode, op0, op1);
206
207 if (tem)
208 return tem;
209
210 /* Handle addition and subtraction of CONST_INT specially. Otherwise,
211 just form the operation. */
212
213 if (code == PLUS && GET_CODE (op1) == CONST_INT
214 && GET_MODE (op0) != VOIDmode)
215 return plus_constant (op0, INTVAL (op1));
216 else if (code == MINUS && GET_CODE (op1) == CONST_INT
217 && GET_MODE (op0) != VOIDmode)
218 return plus_constant (op0, - INTVAL (op1));
219 else
220 return gen_rtx_fmt_ee (code, mode, op0, op1);
221}
222\f
223/* Try to simplify a unary operation CODE whose output mode is to be
224 MODE with input operand OP whose mode was originally OP_MODE.
225 Return zero if no simplification can be made. */
226
227rtx
228simplify_unary_operation (code, mode, op, op_mode)
229 enum rtx_code code;
230 enum machine_mode mode;
231 rtx op;
232 enum machine_mode op_mode;
233{
770ae6cc 234 unsigned int width = GET_MODE_BITSIZE (mode);
0cedb36c
JL
235
236 /* The order of these tests is critical so that, for example, we don't
237 check the wrong mode (input vs. output) for a conversion operation,
238 such as FIX. At some point, this should be simplified. */
239
240#if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC)
241
242 if (code == FLOAT && GET_MODE (op) == VOIDmode
243 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
244 {
245 HOST_WIDE_INT hv, lv;
246 REAL_VALUE_TYPE d;
247
248 if (GET_CODE (op) == CONST_INT)
249 lv = INTVAL (op), hv = INTVAL (op) < 0 ? -1 : 0;
250 else
251 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
252
253#ifdef REAL_ARITHMETIC
254 REAL_VALUE_FROM_INT (d, lv, hv, mode);
255#else
256 if (hv < 0)
257 {
258 d = (double) (~ hv);
259 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
260 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
261 d += (double) (unsigned HOST_WIDE_INT) (~ lv);
262 d = (- d - 1.0);
263 }
264 else
265 {
266 d = (double) hv;
267 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
268 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
269 d += (double) (unsigned HOST_WIDE_INT) lv;
270 }
271#endif /* REAL_ARITHMETIC */
272 d = real_value_truncate (mode, d);
273 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
274 }
275 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
276 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
277 {
278 HOST_WIDE_INT hv, lv;
279 REAL_VALUE_TYPE d;
280
281 if (GET_CODE (op) == CONST_INT)
282 lv = INTVAL (op), hv = INTVAL (op) < 0 ? -1 : 0;
283 else
284 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
285
286 if (op_mode == VOIDmode)
287 {
288 /* We don't know how to interpret negative-looking numbers in
289 this case, so don't try to fold those. */
290 if (hv < 0)
291 return 0;
292 }
293 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
294 ;
295 else
296 hv = 0, lv &= GET_MODE_MASK (op_mode);
297
298#ifdef REAL_ARITHMETIC
299 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
300#else
301
302 d = (double) (unsigned HOST_WIDE_INT) hv;
303 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
304 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
305 d += (double) (unsigned HOST_WIDE_INT) lv;
306#endif /* REAL_ARITHMETIC */
307 d = real_value_truncate (mode, d);
308 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
309 }
310#endif
311
312 if (GET_CODE (op) == CONST_INT
313 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
314 {
315 register HOST_WIDE_INT arg0 = INTVAL (op);
316 register HOST_WIDE_INT val;
317
318 switch (code)
319 {
320 case NOT:
321 val = ~ arg0;
322 break;
323
324 case NEG:
325 val = - arg0;
326 break;
327
328 case ABS:
329 val = (arg0 >= 0 ? arg0 : - arg0);
330 break;
331
332 case FFS:
333 /* Don't use ffs here. Instead, get low order bit and then its
334 number. If arg0 is zero, this will return 0, as desired. */
335 arg0 &= GET_MODE_MASK (mode);
336 val = exact_log2 (arg0 & (- arg0)) + 1;
337 break;
338
339 case TRUNCATE:
340 val = arg0;
341 break;
342
343 case ZERO_EXTEND:
344 if (op_mode == VOIDmode)
345 op_mode = mode;
346 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
347 {
348 /* If we were really extending the mode,
349 we would have to distinguish between zero-extension
350 and sign-extension. */
351 if (width != GET_MODE_BITSIZE (op_mode))
352 abort ();
353 val = arg0;
354 }
355 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
356 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
357 else
358 return 0;
359 break;
360
361 case SIGN_EXTEND:
362 if (op_mode == VOIDmode)
363 op_mode = mode;
364 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
365 {
366 /* If we were really extending the mode,
367 we would have to distinguish between zero-extension
368 and sign-extension. */
369 if (width != GET_MODE_BITSIZE (op_mode))
370 abort ();
371 val = arg0;
372 }
373 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
374 {
375 val
376 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
377 if (val
378 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
379 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
380 }
381 else
382 return 0;
383 break;
384
385 case SQRT:
386 return 0;
387
388 default:
389 abort ();
390 }
391
392 val = trunc_int_for_mode (val, mode);
393
394 return GEN_INT (val);
395 }
396
397 /* We can do some operations on integer CONST_DOUBLEs. Also allow
398 for a DImode operation on a CONST_INT. */
399 else if (GET_MODE (op) == VOIDmode && width <= HOST_BITS_PER_INT * 2
400 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
401 {
402 HOST_WIDE_INT l1, h1, lv, hv;
403
404 if (GET_CODE (op) == CONST_DOUBLE)
405 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
406 else
407 l1 = INTVAL (op), h1 = l1 < 0 ? -1 : 0;
408
409 switch (code)
410 {
411 case NOT:
412 lv = ~ l1;
413 hv = ~ h1;
414 break;
415
416 case NEG:
417 neg_double (l1, h1, &lv, &hv);
418 break;
419
420 case ABS:
421 if (h1 < 0)
422 neg_double (l1, h1, &lv, &hv);
423 else
424 lv = l1, hv = h1;
425 break;
426
427 case FFS:
428 hv = 0;
429 if (l1 == 0)
430 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & (-h1)) + 1;
431 else
432 lv = exact_log2 (l1 & (-l1)) + 1;
433 break;
434
435 case TRUNCATE:
436 /* This is just a change-of-mode, so do nothing. */
437 lv = l1, hv = h1;
438 break;
439
440 case ZERO_EXTEND:
441 if (op_mode == VOIDmode
442 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
443 return 0;
444
445 hv = 0;
446 lv = l1 & GET_MODE_MASK (op_mode);
447 break;
448
449 case SIGN_EXTEND:
450 if (op_mode == VOIDmode
451 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
452 return 0;
453 else
454 {
455 lv = l1 & GET_MODE_MASK (op_mode);
456 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
457 && (lv & ((HOST_WIDE_INT) 1
458 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
459 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
460
461 hv = (lv < 0) ? ~ (HOST_WIDE_INT) 0 : 0;
462 }
463 break;
464
465 case SQRT:
466 return 0;
467
468 default:
469 return 0;
470 }
471
472 return immed_double_const (lv, hv, mode);
473 }
474
475#if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
476 else if (GET_CODE (op) == CONST_DOUBLE
477 && GET_MODE_CLASS (mode) == MODE_FLOAT)
478 {
479 REAL_VALUE_TYPE d;
480 jmp_buf handler;
481 rtx x;
482
483 if (setjmp (handler))
484 /* There used to be a warning here, but that is inadvisable.
485 People may want to cause traps, and the natural way
486 to do it should not get a warning. */
487 return 0;
488
489 set_float_handler (handler);
490
491 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
492
493 switch (code)
494 {
495 case NEG:
496 d = REAL_VALUE_NEGATE (d);
497 break;
498
499 case ABS:
500 if (REAL_VALUE_NEGATIVE (d))
501 d = REAL_VALUE_NEGATE (d);
502 break;
503
504 case FLOAT_TRUNCATE:
505 d = real_value_truncate (mode, d);
506 break;
507
508 case FLOAT_EXTEND:
509 /* All this does is change the mode. */
510 break;
511
512 case FIX:
513 d = REAL_VALUE_RNDZINT (d);
514 break;
515
516 case UNSIGNED_FIX:
517 d = REAL_VALUE_UNSIGNED_RNDZINT (d);
518 break;
519
520 case SQRT:
521 return 0;
522
523 default:
524 abort ();
525 }
526
527 x = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
528 set_float_handler (NULL_PTR);
529 return x;
530 }
531
532 else if (GET_CODE (op) == CONST_DOUBLE
533 && GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT
534 && GET_MODE_CLASS (mode) == MODE_INT
535 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
536 {
537 REAL_VALUE_TYPE d;
538 jmp_buf handler;
539 HOST_WIDE_INT val;
540
541 if (setjmp (handler))
542 return 0;
543
544 set_float_handler (handler);
545
546 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
547
548 switch (code)
549 {
550 case FIX:
551 val = REAL_VALUE_FIX (d);
552 break;
553
554 case UNSIGNED_FIX:
555 val = REAL_VALUE_UNSIGNED_FIX (d);
556 break;
557
558 default:
559 abort ();
560 }
561
562 set_float_handler (NULL_PTR);
563
564 val = trunc_int_for_mode (val, mode);
565
566 return GEN_INT (val);
567 }
568#endif
569 /* This was formerly used only for non-IEEE float.
570 eggert@twinsun.com says it is safe for IEEE also. */
571 else
572 {
573 /* There are some simplifications we can do even if the operands
574 aren't constant. */
575 switch (code)
576 {
577 case NEG:
578 case NOT:
579 /* (not (not X)) == X, similarly for NEG. */
580 if (GET_CODE (op) == code)
581 return XEXP (op, 0);
582 break;
583
584 case SIGN_EXTEND:
585 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
586 becomes just the MINUS if its mode is MODE. This allows
587 folding switch statements on machines using casesi (such as
588 the Vax). */
589 if (GET_CODE (op) == TRUNCATE
590 && GET_MODE (XEXP (op, 0)) == mode
591 && GET_CODE (XEXP (op, 0)) == MINUS
592 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
593 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
594 return XEXP (op, 0);
595
596#ifdef POINTERS_EXTEND_UNSIGNED
597 if (! POINTERS_EXTEND_UNSIGNED
598 && mode == Pmode && GET_MODE (op) == ptr_mode
599 && CONSTANT_P (op))
600 return convert_memory_address (Pmode, op);
601#endif
602 break;
603
604#ifdef POINTERS_EXTEND_UNSIGNED
605 case ZERO_EXTEND:
606 if (POINTERS_EXTEND_UNSIGNED
607 && mode == Pmode && GET_MODE (op) == ptr_mode
608 && CONSTANT_P (op))
609 return convert_memory_address (Pmode, op);
610 break;
611#endif
612
613 default:
614 break;
615 }
616
617 return 0;
618 }
619}
620\f
621/* Simplify a binary operation CODE with result mode MODE, operating on OP0
622 and OP1. Return 0 if no simplification is possible.
623
624 Don't use this for relational operations such as EQ or LT.
625 Use simplify_relational_operation instead. */
626
627rtx
628simplify_binary_operation (code, mode, op0, op1)
629 enum rtx_code code;
630 enum machine_mode mode;
631 rtx op0, op1;
632{
633 register HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
634 HOST_WIDE_INT val;
770ae6cc 635 unsigned int width = GET_MODE_BITSIZE (mode);
0cedb36c
JL
636 rtx tem;
637
638 /* Relational operations don't work here. We must know the mode
639 of the operands in order to do the comparison correctly.
640 Assuming a full word can give incorrect results.
641 Consider comparing 128 with -128 in QImode. */
642
643 if (GET_RTX_CLASS (code) == '<')
644 abort ();
645
646#if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
647 if (GET_MODE_CLASS (mode) == MODE_FLOAT
648 && GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
649 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
650 {
651 REAL_VALUE_TYPE f0, f1, value;
652 jmp_buf handler;
653
654 if (setjmp (handler))
655 return 0;
656
657 set_float_handler (handler);
658
659 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
660 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
661 f0 = real_value_truncate (mode, f0);
662 f1 = real_value_truncate (mode, f1);
663
664#ifdef REAL_ARITHMETIC
665#ifndef REAL_INFINITY
666 if (code == DIV && REAL_VALUES_EQUAL (f1, dconst0))
667 return 0;
668#endif
669 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
670#else
671 switch (code)
672 {
673 case PLUS:
674 value = f0 + f1;
675 break;
676 case MINUS:
677 value = f0 - f1;
678 break;
679 case MULT:
680 value = f0 * f1;
681 break;
682 case DIV:
683#ifndef REAL_INFINITY
684 if (f1 == 0)
685 return 0;
686#endif
687 value = f0 / f1;
688 break;
689 case SMIN:
690 value = MIN (f0, f1);
691 break;
692 case SMAX:
693 value = MAX (f0, f1);
694 break;
695 default:
696 abort ();
697 }
698#endif
699
700 value = real_value_truncate (mode, value);
701 set_float_handler (NULL_PTR);
702 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
703 }
704#endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
705
706 /* We can fold some multi-word operations. */
707 if (GET_MODE_CLASS (mode) == MODE_INT
708 && width == HOST_BITS_PER_WIDE_INT * 2
709 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
710 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
711 {
712 HOST_WIDE_INT l1, l2, h1, h2, lv, hv;
713
714 if (GET_CODE (op0) == CONST_DOUBLE)
715 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
716 else
717 l1 = INTVAL (op0), h1 = l1 < 0 ? -1 : 0;
718
719 if (GET_CODE (op1) == CONST_DOUBLE)
720 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
721 else
722 l2 = INTVAL (op1), h2 = l2 < 0 ? -1 : 0;
723
724 switch (code)
725 {
726 case MINUS:
727 /* A - B == A + (-B). */
728 neg_double (l2, h2, &lv, &hv);
729 l2 = lv, h2 = hv;
730
731 /* .. fall through ... */
732
733 case PLUS:
734 add_double (l1, h1, l2, h2, &lv, &hv);
735 break;
736
737 case MULT:
738 mul_double (l1, h1, l2, h2, &lv, &hv);
739 break;
740
741 case DIV: case MOD: case UDIV: case UMOD:
742 /* We'd need to include tree.h to do this and it doesn't seem worth
743 it. */
744 return 0;
745
746 case AND:
747 lv = l1 & l2, hv = h1 & h2;
748 break;
749
750 case IOR:
751 lv = l1 | l2, hv = h1 | h2;
752 break;
753
754 case XOR:
755 lv = l1 ^ l2, hv = h1 ^ h2;
756 break;
757
758 case SMIN:
759 if (h1 < h2
760 || (h1 == h2
761 && ((unsigned HOST_WIDE_INT) l1
762 < (unsigned HOST_WIDE_INT) l2)))
763 lv = l1, hv = h1;
764 else
765 lv = l2, hv = h2;
766 break;
767
768 case SMAX:
769 if (h1 > h2
770 || (h1 == h2
771 && ((unsigned HOST_WIDE_INT) l1
772 > (unsigned HOST_WIDE_INT) l2)))
773 lv = l1, hv = h1;
774 else
775 lv = l2, hv = h2;
776 break;
777
778 case UMIN:
779 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
780 || (h1 == h2
781 && ((unsigned HOST_WIDE_INT) l1
782 < (unsigned HOST_WIDE_INT) l2)))
783 lv = l1, hv = h1;
784 else
785 lv = l2, hv = h2;
786 break;
787
788 case UMAX:
789 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
790 || (h1 == h2
791 && ((unsigned HOST_WIDE_INT) l1
792 > (unsigned HOST_WIDE_INT) l2)))
793 lv = l1, hv = h1;
794 else
795 lv = l2, hv = h2;
796 break;
797
798 case LSHIFTRT: case ASHIFTRT:
799 case ASHIFT:
800 case ROTATE: case ROTATERT:
801#ifdef SHIFT_COUNT_TRUNCATED
802 if (SHIFT_COUNT_TRUNCATED)
803 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
804#endif
805
806 if (h2 != 0 || l2 < 0 || l2 >= GET_MODE_BITSIZE (mode))
807 return 0;
808
809 if (code == LSHIFTRT || code == ASHIFTRT)
810 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
811 code == ASHIFTRT);
812 else if (code == ASHIFT)
813 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
814 else if (code == ROTATE)
815 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
816 else /* code == ROTATERT */
817 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
818 break;
819
820 default:
821 return 0;
822 }
823
824 return immed_double_const (lv, hv, mode);
825 }
826
827 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
828 || width > HOST_BITS_PER_WIDE_INT || width == 0)
829 {
830 /* Even if we can't compute a constant result,
831 there are some cases worth simplifying. */
832
833 switch (code)
834 {
835 case PLUS:
836 /* In IEEE floating point, x+0 is not the same as x. Similarly
837 for the other optimizations below. */
838 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
839 && FLOAT_MODE_P (mode) && ! flag_fast_math)
840 break;
841
842 if (op1 == CONST0_RTX (mode))
843 return op0;
844
845 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)) */
846 if (GET_CODE (op0) == NEG)
847 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
848 else if (GET_CODE (op1) == NEG)
849 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
850
851 /* Handle both-operands-constant cases. We can only add
852 CONST_INTs to constants since the sum of relocatable symbols
853 can't be handled by most assemblers. Don't add CONST_INT
854 to CONST_INT since overflow won't be computed properly if wider
855 than HOST_BITS_PER_WIDE_INT. */
856
857 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
858 && GET_CODE (op1) == CONST_INT)
859 return plus_constant (op0, INTVAL (op1));
860 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
861 && GET_CODE (op0) == CONST_INT)
862 return plus_constant (op1, INTVAL (op0));
863
864 /* See if this is something like X * C - X or vice versa or
865 if the multiplication is written as a shift. If so, we can
866 distribute and make a new multiply, shift, or maybe just
867 have X (if C is 2 in the example above). But don't make
868 real multiply if we didn't have one before. */
869
870 if (! FLOAT_MODE_P (mode))
871 {
872 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
873 rtx lhs = op0, rhs = op1;
874 int had_mult = 0;
875
876 if (GET_CODE (lhs) == NEG)
877 coeff0 = -1, lhs = XEXP (lhs, 0);
878 else if (GET_CODE (lhs) == MULT
879 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
880 {
881 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
882 had_mult = 1;
883 }
884 else if (GET_CODE (lhs) == ASHIFT
885 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
886 && INTVAL (XEXP (lhs, 1)) >= 0
887 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
888 {
889 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
890 lhs = XEXP (lhs, 0);
891 }
892
893 if (GET_CODE (rhs) == NEG)
894 coeff1 = -1, rhs = XEXP (rhs, 0);
895 else if (GET_CODE (rhs) == MULT
896 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
897 {
898 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
899 had_mult = 1;
900 }
901 else if (GET_CODE (rhs) == ASHIFT
902 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
903 && INTVAL (XEXP (rhs, 1)) >= 0
904 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
905 {
906 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
907 rhs = XEXP (rhs, 0);
908 }
909
910 if (rtx_equal_p (lhs, rhs))
911 {
912 tem = simplify_gen_binary (MULT, mode, lhs,
913 GEN_INT (coeff0 + coeff1));
914 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
915 }
916 }
917
918 /* If one of the operands is a PLUS or a MINUS, see if we can
919 simplify this by the associative law.
920 Don't use the associative law for floating point.
921 The inaccuracy makes it nonassociative,
922 and subtle programs can break if operations are associated. */
923
924 if (INTEGRAL_MODE_P (mode)
925 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
926 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
927 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
928 return tem;
929 break;
930
931 case COMPARE:
932#ifdef HAVE_cc0
933 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
934 using cc0, in which case we want to leave it as a COMPARE
935 so we can distinguish it from a register-register-copy.
936
937 In IEEE floating point, x-0 is not the same as x. */
938
939 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
940 || ! FLOAT_MODE_P (mode) || flag_fast_math)
941 && op1 == CONST0_RTX (mode))
942 return op0;
943#else
944 /* Do nothing here. */
945#endif
946 break;
947
948 case MINUS:
949 /* None of these optimizations can be done for IEEE
950 floating point. */
951 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
952 && FLOAT_MODE_P (mode) && ! flag_fast_math)
953 break;
954
955 /* We can't assume x-x is 0 even with non-IEEE floating point,
956 but since it is zero except in very strange circumstances, we
957 will treat it as zero with -ffast-math. */
958 if (rtx_equal_p (op0, op1)
959 && ! side_effects_p (op0)
960 && (! FLOAT_MODE_P (mode) || flag_fast_math))
961 return CONST0_RTX (mode);
962
963 /* Change subtraction from zero into negation. */
964 if (op0 == CONST0_RTX (mode))
965 return gen_rtx_NEG (mode, op1);
966
967 /* (-1 - a) is ~a. */
968 if (op0 == constm1_rtx)
969 return gen_rtx_NOT (mode, op1);
970
971 /* Subtracting 0 has no effect. */
972 if (op1 == CONST0_RTX (mode))
973 return op0;
974
975 /* See if this is something like X * C - X or vice versa or
976 if the multiplication is written as a shift. If so, we can
977 distribute and make a new multiply, shift, or maybe just
978 have X (if C is 2 in the example above). But don't make
979 real multiply if we didn't have one before. */
980
981 if (! FLOAT_MODE_P (mode))
982 {
983 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
984 rtx lhs = op0, rhs = op1;
985 int had_mult = 0;
986
987 if (GET_CODE (lhs) == NEG)
988 coeff0 = -1, lhs = XEXP (lhs, 0);
989 else if (GET_CODE (lhs) == MULT
990 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
991 {
992 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
993 had_mult = 1;
994 }
995 else if (GET_CODE (lhs) == ASHIFT
996 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
997 && INTVAL (XEXP (lhs, 1)) >= 0
998 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
999 {
1000 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1001 lhs = XEXP (lhs, 0);
1002 }
1003
1004 if (GET_CODE (rhs) == NEG)
1005 coeff1 = - 1, rhs = XEXP (rhs, 0);
1006 else if (GET_CODE (rhs) == MULT
1007 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1008 {
1009 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1010 had_mult = 1;
1011 }
1012 else if (GET_CODE (rhs) == ASHIFT
1013 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1014 && INTVAL (XEXP (rhs, 1)) >= 0
1015 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1016 {
1017 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1018 rhs = XEXP (rhs, 0);
1019 }
1020
1021 if (rtx_equal_p (lhs, rhs))
1022 {
1023 tem = simplify_gen_binary (MULT, mode, lhs,
1024 GEN_INT (coeff0 - coeff1));
1025 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1026 }
1027 }
1028
1029 /* (a - (-b)) -> (a + b). */
1030 if (GET_CODE (op1) == NEG)
1031 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1032
1033 /* If one of the operands is a PLUS or a MINUS, see if we can
1034 simplify this by the associative law.
1035 Don't use the associative law for floating point.
1036 The inaccuracy makes it nonassociative,
1037 and subtle programs can break if operations are associated. */
1038
1039 if (INTEGRAL_MODE_P (mode)
1040 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1041 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
1042 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1043 return tem;
1044
1045 /* Don't let a relocatable value get a negative coeff. */
1046 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1047 return plus_constant (op0, - INTVAL (op1));
1048
1049 /* (x - (x & y)) -> (x & ~y) */
1050 if (GET_CODE (op1) == AND)
1051 {
1052 if (rtx_equal_p (op0, XEXP (op1, 0)))
1053 return simplify_gen_binary (AND, mode, op0,
1054 gen_rtx_NOT (mode, XEXP (op1, 1)));
1055 if (rtx_equal_p (op0, XEXP (op1, 1)))
1056 return simplify_gen_binary (AND, mode, op0,
1057 gen_rtx_NOT (mode, XEXP (op1, 0)));
1058 }
1059 break;
1060
1061 case MULT:
1062 if (op1 == constm1_rtx)
1063 {
1064 tem = simplify_unary_operation (NEG, mode, op0, mode);
1065
1066 return tem ? tem : gen_rtx_NEG (mode, op0);
1067 }
1068
1069 /* In IEEE floating point, x*0 is not always 0. */
1070 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1071 || ! FLOAT_MODE_P (mode) || flag_fast_math)
1072 && op1 == CONST0_RTX (mode)
1073 && ! side_effects_p (op0))
1074 return op1;
1075
1076 /* In IEEE floating point, x*1 is not equivalent to x for nans.
1077 However, ANSI says we can drop signals,
1078 so we can do this anyway. */
1079 if (op1 == CONST1_RTX (mode))
1080 return op0;
1081
1082 /* Convert multiply by constant power of two into shift unless
1083 we are still generating RTL. This test is a kludge. */
1084 if (GET_CODE (op1) == CONST_INT
1085 && (val = exact_log2 (INTVAL (op1))) >= 0
1086 /* If the mode is larger than the host word size, and the
1087 uppermost bit is set, then this isn't a power of two due
1088 to implicit sign extension. */
1089 && (width <= HOST_BITS_PER_WIDE_INT
1090 || val != HOST_BITS_PER_WIDE_INT - 1)
1091 && ! rtx_equal_function_value_matters)
1092 return gen_rtx_ASHIFT (mode, op0, GEN_INT (val));
1093
1094 if (GET_CODE (op1) == CONST_DOUBLE
1095 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT)
1096 {
1097 REAL_VALUE_TYPE d;
1098 jmp_buf handler;
1099 int op1is2, op1ism1;
1100
1101 if (setjmp (handler))
1102 return 0;
1103
1104 set_float_handler (handler);
1105 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
1106 op1is2 = REAL_VALUES_EQUAL (d, dconst2);
1107 op1ism1 = REAL_VALUES_EQUAL (d, dconstm1);
1108 set_float_handler (NULL_PTR);
1109
1110 /* x*2 is x+x and x*(-1) is -x */
1111 if (op1is2 && GET_MODE (op0) == mode)
1112 return gen_rtx_PLUS (mode, op0, copy_rtx (op0));
1113
1114 else if (op1ism1 && GET_MODE (op0) == mode)
1115 return gen_rtx_NEG (mode, op0);
1116 }
1117 break;
1118
1119 case IOR:
1120 if (op1 == const0_rtx)
1121 return op0;
1122 if (GET_CODE (op1) == CONST_INT
1123 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
1124 return op1;
1125 if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1126 return op0;
1127 /* A | (~A) -> -1 */
1128 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1129 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1130 && ! side_effects_p (op0)
1131 && GET_MODE_CLASS (mode) != MODE_CC)
1132 return constm1_rtx;
1133 break;
1134
1135 case XOR:
1136 if (op1 == const0_rtx)
1137 return op0;
1138 if (GET_CODE (op1) == CONST_INT
1139 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
1140 return gen_rtx_NOT (mode, op0);
1141 if (op0 == op1 && ! side_effects_p (op0)
1142 && GET_MODE_CLASS (mode) != MODE_CC)
1143 return const0_rtx;
1144 break;
1145
1146 case AND:
1147 if (op1 == const0_rtx && ! side_effects_p (op0))
1148 return const0_rtx;
1149 if (GET_CODE (op1) == CONST_INT
1150 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
1151 return op0;
1152 if (op0 == op1 && ! side_effects_p (op0)
1153 && GET_MODE_CLASS (mode) != MODE_CC)
1154 return op0;
1155 /* A & (~A) -> 0 */
1156 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1157 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1158 && ! side_effects_p (op0)
1159 && GET_MODE_CLASS (mode) != MODE_CC)
1160 return const0_rtx;
1161 break;
1162
1163 case UDIV:
1164 /* Convert divide by power of two into shift (divide by 1 handled
1165 below). */
1166 if (GET_CODE (op1) == CONST_INT
1167 && (arg1 = exact_log2 (INTVAL (op1))) > 0)
1168 return gen_rtx_LSHIFTRT (mode, op0, GEN_INT (arg1));
1169
1170 /* ... fall through ... */
1171
1172 case DIV:
1173 if (op1 == CONST1_RTX (mode))
1174 return op0;
1175
1176 /* In IEEE floating point, 0/x is not always 0. */
1177 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1178 || ! FLOAT_MODE_P (mode) || flag_fast_math)
1179 && op0 == CONST0_RTX (mode)
1180 && ! side_effects_p (op1))
1181 return op0;
1182
1183#if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1184 /* Change division by a constant into multiplication. Only do
1185 this with -ffast-math until an expert says it is safe in
1186 general. */
1187 else if (GET_CODE (op1) == CONST_DOUBLE
1188 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT
1189 && op1 != CONST0_RTX (mode)
1190 && flag_fast_math)
1191 {
1192 REAL_VALUE_TYPE d;
1193 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
1194
1195 if (! REAL_VALUES_EQUAL (d, dconst0))
1196 {
1197#if defined (REAL_ARITHMETIC)
1198 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1199 return gen_rtx_MULT (mode, op0,
1200 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
1201#else
1202 return
1203 gen_rtx_MULT (mode, op0,
1204 CONST_DOUBLE_FROM_REAL_VALUE (1./d, mode));
1205#endif
1206 }
1207 }
1208#endif
1209 break;
1210
1211 case UMOD:
1212 /* Handle modulus by power of two (mod with 1 handled below). */
1213 if (GET_CODE (op1) == CONST_INT
1214 && exact_log2 (INTVAL (op1)) > 0)
1215 return gen_rtx_AND (mode, op0, GEN_INT (INTVAL (op1) - 1));
1216
1217 /* ... fall through ... */
1218
1219 case MOD:
1220 if ((op0 == const0_rtx || op1 == const1_rtx)
1221 && ! side_effects_p (op0) && ! side_effects_p (op1))
1222 return const0_rtx;
1223 break;
1224
1225 case ROTATERT:
1226 case ROTATE:
1227 /* Rotating ~0 always results in ~0. */
1228 if (GET_CODE (op0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1229 && (unsigned HOST_WIDE_INT) INTVAL (op0) == GET_MODE_MASK (mode)
1230 && ! side_effects_p (op1))
1231 return op0;
1232
1233 /* ... fall through ... */
1234
1235 case ASHIFT:
1236 case ASHIFTRT:
1237 case LSHIFTRT:
1238 if (op1 == const0_rtx)
1239 return op0;
1240 if (op0 == const0_rtx && ! side_effects_p (op1))
1241 return op0;
1242 break;
1243
1244 case SMIN:
1245 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
1246 && INTVAL (op1) == (HOST_WIDE_INT) 1 << (width -1)
1247 && ! side_effects_p (op0))
1248 return op1;
1249 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1250 return op0;
1251 break;
1252
1253 case SMAX:
1254 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
1255 && ((unsigned HOST_WIDE_INT) INTVAL (op1)
1256 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1257 && ! side_effects_p (op0))
1258 return op1;
1259 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1260 return op0;
1261 break;
1262
1263 case UMIN:
1264 if (op1 == const0_rtx && ! side_effects_p (op0))
1265 return op1;
1266 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1267 return op0;
1268 break;
1269
1270 case UMAX:
1271 if (op1 == constm1_rtx && ! side_effects_p (op0))
1272 return op1;
1273 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1274 return op0;
1275 break;
1276
1277 default:
1278 abort ();
1279 }
1280
1281 return 0;
1282 }
1283
1284 /* Get the integer argument values in two forms:
1285 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1286
1287 arg0 = INTVAL (op0);
1288 arg1 = INTVAL (op1);
1289
1290 if (width < HOST_BITS_PER_WIDE_INT)
1291 {
1292 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1293 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1294
1295 arg0s = arg0;
1296 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1297 arg0s |= ((HOST_WIDE_INT) (-1) << width);
1298
1299 arg1s = arg1;
1300 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1301 arg1s |= ((HOST_WIDE_INT) (-1) << width);
1302 }
1303 else
1304 {
1305 arg0s = arg0;
1306 arg1s = arg1;
1307 }
1308
1309 /* Compute the value of the arithmetic. */
1310
1311 switch (code)
1312 {
1313 case PLUS:
1314 val = arg0s + arg1s;
1315 break;
1316
1317 case MINUS:
1318 val = arg0s - arg1s;
1319 break;
1320
1321 case MULT:
1322 val = arg0s * arg1s;
1323 break;
1324
1325 case DIV:
1326 if (arg1s == 0)
1327 return 0;
1328 val = arg0s / arg1s;
1329 break;
1330
1331 case MOD:
1332 if (arg1s == 0)
1333 return 0;
1334 val = arg0s % arg1s;
1335 break;
1336
1337 case UDIV:
1338 if (arg1 == 0)
1339 return 0;
1340 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
1341 break;
1342
1343 case UMOD:
1344 if (arg1 == 0)
1345 return 0;
1346 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
1347 break;
1348
1349 case AND:
1350 val = arg0 & arg1;
1351 break;
1352
1353 case IOR:
1354 val = arg0 | arg1;
1355 break;
1356
1357 case XOR:
1358 val = arg0 ^ arg1;
1359 break;
1360
1361 case LSHIFTRT:
1362 /* If shift count is undefined, don't fold it; let the machine do
1363 what it wants. But truncate it if the machine will do that. */
1364 if (arg1 < 0)
1365 return 0;
1366
1367#ifdef SHIFT_COUNT_TRUNCATED
1368 if (SHIFT_COUNT_TRUNCATED)
1369 arg1 %= width;
1370#endif
1371
1372 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
1373 break;
1374
1375 case ASHIFT:
1376 if (arg1 < 0)
1377 return 0;
1378
1379#ifdef SHIFT_COUNT_TRUNCATED
1380 if (SHIFT_COUNT_TRUNCATED)
1381 arg1 %= width;
1382#endif
1383
1384 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
1385 break;
1386
1387 case ASHIFTRT:
1388 if (arg1 < 0)
1389 return 0;
1390
1391#ifdef SHIFT_COUNT_TRUNCATED
1392 if (SHIFT_COUNT_TRUNCATED)
1393 arg1 %= width;
1394#endif
1395
1396 val = arg0s >> arg1;
1397
1398 /* Bootstrap compiler may not have sign extended the right shift.
1399 Manually extend the sign to insure bootstrap cc matches gcc. */
1400 if (arg0s < 0 && arg1 > 0)
1401 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
1402
1403 break;
1404
1405 case ROTATERT:
1406 if (arg1 < 0)
1407 return 0;
1408
1409 arg1 %= width;
1410 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
1411 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
1412 break;
1413
1414 case ROTATE:
1415 if (arg1 < 0)
1416 return 0;
1417
1418 arg1 %= width;
1419 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
1420 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
1421 break;
1422
1423 case COMPARE:
1424 /* Do nothing here. */
1425 return 0;
1426
1427 case SMIN:
1428 val = arg0s <= arg1s ? arg0s : arg1s;
1429 break;
1430
1431 case UMIN:
1432 val = ((unsigned HOST_WIDE_INT) arg0
1433 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1434 break;
1435
1436 case SMAX:
1437 val = arg0s > arg1s ? arg0s : arg1s;
1438 break;
1439
1440 case UMAX:
1441 val = ((unsigned HOST_WIDE_INT) arg0
1442 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1443 break;
1444
1445 default:
1446 abort ();
1447 }
1448
1449 val = trunc_int_for_mode (val, mode);
1450
1451 return GEN_INT (val);
1452}
1453\f
1454/* Simplify a PLUS or MINUS, at least one of whose operands may be another
1455 PLUS or MINUS.
1456
1457 Rather than test for specific case, we do this by a brute-force method
1458 and do all possible simplifications until no more changes occur. Then
1459 we rebuild the operation. */
1460
1461static rtx
1462simplify_plus_minus (code, mode, op0, op1)
1463 enum rtx_code code;
1464 enum machine_mode mode;
1465 rtx op0, op1;
1466{
1467 rtx ops[8];
1468 int negs[8];
1469 rtx result, tem;
1470 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts = 0;
1471 int first = 1, negate = 0, changed;
1472 int i, j;
1473
1474 bzero ((char *) ops, sizeof ops);
1475
1476 /* Set up the two operands and then expand them until nothing has been
1477 changed. If we run out of room in our array, give up; this should
1478 almost never happen. */
1479
1480 ops[0] = op0, ops[1] = op1, negs[0] = 0, negs[1] = (code == MINUS);
1481
1482 changed = 1;
1483 while (changed)
1484 {
1485 changed = 0;
1486
1487 for (i = 0; i < n_ops; i++)
1488 switch (GET_CODE (ops[i]))
1489 {
1490 case PLUS:
1491 case MINUS:
1492 if (n_ops == 7)
1493 return 0;
1494
1495 ops[n_ops] = XEXP (ops[i], 1);
1496 negs[n_ops++] = GET_CODE (ops[i]) == MINUS ? !negs[i] : negs[i];
1497 ops[i] = XEXP (ops[i], 0);
1498 input_ops++;
1499 changed = 1;
1500 break;
1501
1502 case NEG:
1503 ops[i] = XEXP (ops[i], 0);
1504 negs[i] = ! negs[i];
1505 changed = 1;
1506 break;
1507
1508 case CONST:
1509 ops[i] = XEXP (ops[i], 0);
1510 input_consts++;
1511 changed = 1;
1512 break;
1513
1514 case NOT:
1515 /* ~a -> (-a - 1) */
1516 if (n_ops != 7)
1517 {
1518 ops[n_ops] = constm1_rtx;
1519 negs[n_ops++] = negs[i];
1520 ops[i] = XEXP (ops[i], 0);
1521 negs[i] = ! negs[i];
1522 changed = 1;
1523 }
1524 break;
1525
1526 case CONST_INT:
1527 if (negs[i])
1528 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0, changed = 1;
1529 break;
1530
1531 default:
1532 break;
1533 }
1534 }
1535
1536 /* If we only have two operands, we can't do anything. */
1537 if (n_ops <= 2)
1538 return 0;
1539
1540 /* Now simplify each pair of operands until nothing changes. The first
1541 time through just simplify constants against each other. */
1542
1543 changed = 1;
1544 while (changed)
1545 {
1546 changed = first;
1547
1548 for (i = 0; i < n_ops - 1; i++)
1549 for (j = i + 1; j < n_ops; j++)
1550 if (ops[i] != 0 && ops[j] != 0
1551 && (! first || (CONSTANT_P (ops[i]) && CONSTANT_P (ops[j]))))
1552 {
1553 rtx lhs = ops[i], rhs = ops[j];
1554 enum rtx_code ncode = PLUS;
1555
1556 if (negs[i] && ! negs[j])
1557 lhs = ops[j], rhs = ops[i], ncode = MINUS;
1558 else if (! negs[i] && negs[j])
1559 ncode = MINUS;
1560
1561 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
1562 if (tem)
1563 {
1564 ops[i] = tem, ops[j] = 0;
1565 negs[i] = negs[i] && negs[j];
1566 if (GET_CODE (tem) == NEG)
1567 ops[i] = XEXP (tem, 0), negs[i] = ! negs[i];
1568
1569 if (GET_CODE (ops[i]) == CONST_INT && negs[i])
1570 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0;
1571 changed = 1;
1572 }
1573 }
1574
1575 first = 0;
1576 }
1577
1578 /* Pack all the operands to the lower-numbered entries and give up if
1579 we didn't reduce the number of operands we had. Make sure we
1580 count a CONST as two operands. If we have the same number of
1581 operands, but have made more CONSTs than we had, this is also
1582 an improvement, so accept it. */
1583
1584 for (i = 0, j = 0; j < n_ops; j++)
1585 if (ops[j] != 0)
1586 {
1587 ops[i] = ops[j], negs[i++] = negs[j];
1588 if (GET_CODE (ops[j]) == CONST)
1589 n_consts++;
1590 }
1591
1592 if (i + n_consts > input_ops
1593 || (i + n_consts == input_ops && n_consts <= input_consts))
1594 return 0;
1595
1596 n_ops = i;
1597
1598 /* If we have a CONST_INT, put it last. */
1599 for (i = 0; i < n_ops - 1; i++)
1600 if (GET_CODE (ops[i]) == CONST_INT)
1601 {
1602 tem = ops[n_ops - 1], ops[n_ops - 1] = ops[i] , ops[i] = tem;
1603 j = negs[n_ops - 1], negs[n_ops - 1] = negs[i], negs[i] = j;
1604 }
1605
1606 /* Put a non-negated operand first. If there aren't any, make all
1607 operands positive and negate the whole thing later. */
1608 for (i = 0; i < n_ops && negs[i]; i++)
1609 ;
1610
1611 if (i == n_ops)
1612 {
1613 for (i = 0; i < n_ops; i++)
1614 negs[i] = 0;
1615 negate = 1;
1616 }
1617 else if (i != 0)
1618 {
1619 tem = ops[0], ops[0] = ops[i], ops[i] = tem;
1620 j = negs[0], negs[0] = negs[i], negs[i] = j;
1621 }
1622
1623 /* Now make the result by performing the requested operations. */
1624 result = ops[0];
1625 for (i = 1; i < n_ops; i++)
1626 result = simplify_gen_binary (negs[i] ? MINUS : PLUS, mode, result, ops[i]);
1627
1628 return negate ? gen_rtx_NEG (mode, result) : result;
1629}
1630
1631struct cfc_args
1632{
14a774a9
RK
1633 rtx op0, op1; /* Input */
1634 int equal, op0lt, op1lt; /* Output */
0cedb36c
JL
1635};
1636
1637static void
1638check_fold_consts (data)
1639 PTR data;
1640{
14a774a9 1641 struct cfc_args *args = (struct cfc_args *) data;
0cedb36c
JL
1642 REAL_VALUE_TYPE d0, d1;
1643
1644 REAL_VALUE_FROM_CONST_DOUBLE (d0, args->op0);
1645 REAL_VALUE_FROM_CONST_DOUBLE (d1, args->op1);
1646 args->equal = REAL_VALUES_EQUAL (d0, d1);
1647 args->op0lt = REAL_VALUES_LESS (d0, d1);
1648 args->op1lt = REAL_VALUES_LESS (d1, d0);
1649}
1650
1651/* Like simplify_binary_operation except used for relational operators.
1652 MODE is the mode of the operands, not that of the result. If MODE
1653 is VOIDmode, both operands must also be VOIDmode and we compare the
1654 operands in "infinite precision".
1655
1656 If no simplification is possible, this function returns zero. Otherwise,
1657 it returns either const_true_rtx or const0_rtx. */
1658
1659rtx
1660simplify_relational_operation (code, mode, op0, op1)
1661 enum rtx_code code;
1662 enum machine_mode mode;
1663 rtx op0, op1;
1664{
1665 int equal, op0lt, op0ltu, op1lt, op1ltu;
1666 rtx tem;
1667
1668 /* If op0 is a compare, extract the comparison arguments from it. */
1669 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
1670 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
1671
1672 /* We can't simplify MODE_CC values since we don't know what the
1673 actual comparison is. */
1674 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC
1675#ifdef HAVE_cc0
1676 || op0 == cc0_rtx
1677#endif
1678 )
1679 return 0;
1680
52a75c3c
RH
1681 /* Make sure the constant is second. */
1682 if ((CONSTANT_P (op0) && ! CONSTANT_P (op1))
1683 || (GET_CODE (op0) == CONST_INT && GET_CODE (op1) != CONST_INT))
1684 {
1685 tem = op0, op0 = op1, op1 = tem;
1686 code = swap_condition (code);
1687 }
1688
0cedb36c
JL
1689 /* For integer comparisons of A and B maybe we can simplify A - B and can
1690 then simplify a comparison of that with zero. If A and B are both either
1691 a register or a CONST_INT, this can't help; testing for these cases will
1692 prevent infinite recursion here and speed things up.
1693
1694 If CODE is an unsigned comparison, then we can never do this optimization,
1695 because it gives an incorrect result if the subtraction wraps around zero.
1696 ANSI C defines unsigned operations such that they never overflow, and
1697 thus such cases can not be ignored. */
1698
1699 if (INTEGRAL_MODE_P (mode) && op1 != const0_rtx
1700 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == CONST_INT)
1701 && (GET_CODE (op1) == REG || GET_CODE (op1) == CONST_INT))
1702 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
1703 && code != GTU && code != GEU && code != LTU && code != LEU)
1704 return simplify_relational_operation (signed_condition (code),
1705 mode, tem, const0_rtx);
1706
1707 /* For non-IEEE floating-point, if the two operands are equal, we know the
1708 result. */
1709 if (rtx_equal_p (op0, op1)
1710 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1711 || ! FLOAT_MODE_P (GET_MODE (op0)) || flag_fast_math))
1712 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
1713
1714 /* If the operands are floating-point constants, see if we can fold
1715 the result. */
1716#if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1717 else if (GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
1718 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
1719 {
1720 struct cfc_args args;
1721
1722 /* Setup input for check_fold_consts() */
1723 args.op0 = op0;
1724 args.op1 = op1;
1725
1726 if (do_float_handler(check_fold_consts, (PTR) &args) == 0)
1727 /* We got an exception from check_fold_consts() */
1728 return 0;
1729
1730 /* Receive output from check_fold_consts() */
1731 equal = args.equal;
1732 op0lt = op0ltu = args.op0lt;
1733 op1lt = op1ltu = args.op1lt;
1734 }
1735#endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1736
1737 /* Otherwise, see if the operands are both integers. */
1738 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
1739 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
1740 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
1741 {
1742 int width = GET_MODE_BITSIZE (mode);
1743 HOST_WIDE_INT l0s, h0s, l1s, h1s;
1744 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
1745
1746 /* Get the two words comprising each integer constant. */
1747 if (GET_CODE (op0) == CONST_DOUBLE)
1748 {
1749 l0u = l0s = CONST_DOUBLE_LOW (op0);
1750 h0u = h0s = CONST_DOUBLE_HIGH (op0);
1751 }
1752 else
1753 {
1754 l0u = l0s = INTVAL (op0);
1755 h0u = h0s = l0s < 0 ? -1 : 0;
1756 }
1757
1758 if (GET_CODE (op1) == CONST_DOUBLE)
1759 {
1760 l1u = l1s = CONST_DOUBLE_LOW (op1);
1761 h1u = h1s = CONST_DOUBLE_HIGH (op1);
1762 }
1763 else
1764 {
1765 l1u = l1s = INTVAL (op1);
1766 h1u = h1s = l1s < 0 ? -1 : 0;
1767 }
1768
1769 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
1770 we have to sign or zero-extend the values. */
1771 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
1772 h0u = h1u = 0, h0s = l0s < 0 ? -1 : 0, h1s = l1s < 0 ? -1 : 0;
1773
1774 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
1775 {
1776 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
1777 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
1778
1779 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1780 l0s |= ((HOST_WIDE_INT) (-1) << width);
1781
1782 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1783 l1s |= ((HOST_WIDE_INT) (-1) << width);
1784 }
1785
1786 equal = (h0u == h1u && l0u == l1u);
1787 op0lt = (h0s < h1s || (h0s == h1s && l0s < l1s));
1788 op1lt = (h1s < h0s || (h1s == h0s && l1s < l0s));
1789 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
1790 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
1791 }
1792
1793 /* Otherwise, there are some code-specific tests we can make. */
1794 else
1795 {
1796 switch (code)
1797 {
1798 case EQ:
1799 /* References to the frame plus a constant or labels cannot
1800 be zero, but a SYMBOL_REF can due to #pragma weak. */
1801 if (((NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx)
1802 || GET_CODE (op0) == LABEL_REF)
1803#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1804 /* On some machines, the ap reg can be 0 sometimes. */
1805 && op0 != arg_pointer_rtx
1806#endif
1807 )
1808 return const0_rtx;
1809 break;
1810
1811 case NE:
1812 if (((NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx)
1813 || GET_CODE (op0) == LABEL_REF)
1814#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1815 && op0 != arg_pointer_rtx
1816#endif
1817 )
1818 return const_true_rtx;
1819 break;
1820
1821 case GEU:
1822 /* Unsigned values are never negative. */
1823 if (op1 == const0_rtx)
1824 return const_true_rtx;
1825 break;
1826
1827 case LTU:
1828 if (op1 == const0_rtx)
1829 return const0_rtx;
1830 break;
1831
1832 case LEU:
1833 /* Unsigned values are never greater than the largest
1834 unsigned value. */
1835 if (GET_CODE (op1) == CONST_INT
1836 && (unsigned HOST_WIDE_INT) INTVAL (op1) == GET_MODE_MASK (mode)
1837 && INTEGRAL_MODE_P (mode))
1838 return const_true_rtx;
1839 break;
1840
1841 case GTU:
1842 if (GET_CODE (op1) == CONST_INT
1843 && (unsigned HOST_WIDE_INT) INTVAL (op1) == GET_MODE_MASK (mode)
1844 && INTEGRAL_MODE_P (mode))
1845 return const0_rtx;
1846 break;
1847
1848 default:
1849 break;
1850 }
1851
1852 return 0;
1853 }
1854
1855 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
1856 as appropriate. */
1857 switch (code)
1858 {
1859 case EQ:
1860 return equal ? const_true_rtx : const0_rtx;
1861 case NE:
1862 return ! equal ? const_true_rtx : const0_rtx;
1863 case LT:
1864 return op0lt ? const_true_rtx : const0_rtx;
1865 case GT:
1866 return op1lt ? const_true_rtx : const0_rtx;
1867 case LTU:
1868 return op0ltu ? const_true_rtx : const0_rtx;
1869 case GTU:
1870 return op1ltu ? const_true_rtx : const0_rtx;
1871 case LE:
1872 return equal || op0lt ? const_true_rtx : const0_rtx;
1873 case GE:
1874 return equal || op1lt ? const_true_rtx : const0_rtx;
1875 case LEU:
1876 return equal || op0ltu ? const_true_rtx : const0_rtx;
1877 case GEU:
1878 return equal || op1ltu ? const_true_rtx : const0_rtx;
1879 default:
1880 abort ();
1881 }
1882}
1883\f
1884/* Simplify CODE, an operation with result mode MODE and three operands,
1885 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
1886 a constant. Return 0 if no simplifications is possible. */
1887
1888rtx
1889simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
1890 enum rtx_code code;
1891 enum machine_mode mode, op0_mode;
1892 rtx op0, op1, op2;
1893{
749a2da1 1894 unsigned int width = GET_MODE_BITSIZE (mode);
0cedb36c
JL
1895
1896 /* VOIDmode means "infinite" precision. */
1897 if (width == 0)
1898 width = HOST_BITS_PER_WIDE_INT;
1899
1900 switch (code)
1901 {
1902 case SIGN_EXTRACT:
1903 case ZERO_EXTRACT:
1904 if (GET_CODE (op0) == CONST_INT
1905 && GET_CODE (op1) == CONST_INT
1906 && GET_CODE (op2) == CONST_INT
1907 && INTVAL (op1) + INTVAL (op2) <= GET_MODE_BITSIZE (op0_mode)
1908 && width <= HOST_BITS_PER_WIDE_INT)
1909 {
1910 /* Extracting a bit-field from a constant */
1911 HOST_WIDE_INT val = INTVAL (op0);
1912
1913 if (BITS_BIG_ENDIAN)
1914 val >>= (GET_MODE_BITSIZE (op0_mode)
1915 - INTVAL (op2) - INTVAL (op1));
1916 else
1917 val >>= INTVAL (op2);
1918
1919 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
1920 {
1921 /* First zero-extend. */
1922 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
1923 /* If desired, propagate sign bit. */
1924 if (code == SIGN_EXTRACT
1925 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
1926 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
1927 }
1928
1929 /* Clear the bits that don't belong in our mode,
1930 unless they and our sign bit are all one.
1931 So we get either a reasonable negative value or a reasonable
1932 unsigned value for this mode. */
1933 if (width < HOST_BITS_PER_WIDE_INT
1934 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
1935 != ((HOST_WIDE_INT) (-1) << (width - 1))))
1936 val &= ((HOST_WIDE_INT) 1 << width) - 1;
1937
1938 return GEN_INT (val);
1939 }
1940 break;
1941
1942 case IF_THEN_ELSE:
1943 if (GET_CODE (op0) == CONST_INT)
1944 return op0 != const0_rtx ? op1 : op2;
1945
1946 /* Convert a == b ? b : a to "a". */
1947 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
1948 && rtx_equal_p (XEXP (op0, 0), op1)
1949 && rtx_equal_p (XEXP (op0, 1), op2))
1950 return op1;
1951 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
1952 && rtx_equal_p (XEXP (op0, 1), op1)
1953 && rtx_equal_p (XEXP (op0, 0), op2))
1954 return op2;
1955 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
1956 {
749a2da1
RK
1957 rtx temp
1958 = simplify_relational_operation (GET_CODE (op0), op0_mode,
1959 XEXP (op0, 0), XEXP (op0, 1));
1960
0cedb36c
JL
1961 /* See if any simplifications were possible. */
1962 if (temp == const0_rtx)
1963 return op2;
1964 else if (temp == const1_rtx)
1965 return op1;
1966 }
1967 break;
1968
1969 default:
1970 abort ();
1971 }
1972
1973 return 0;
1974}
1975
1976/* Simplify X, an rtx expression.
1977
1978 Return the simplified expression or NULL if no simplifications
1979 were possible.
1980
1981 This is the preferred entry point into the simplification routines;
1982 however, we still allow passes to call the more specific routines.
1983
1984 Right now GCC has three (yes, three) major bodies of RTL simplficiation
1985 code that need to be unified.
1986
1987 1. fold_rtx in cse.c. This code uses various CSE specific
1988 information to aid in RTL simplification.
1989
1990 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
1991 it uses combine specific information to aid in RTL
1992 simplification.
1993
1994 3. The routines in this file.
1995
1996
1997 Long term we want to only have one body of simplification code; to
1998 get to that state I recommend the following steps:
1999
2000 1. Pour over fold_rtx & simplify_rtx and move any simplifications
2001 which are not pass dependent state into these routines.
2002
2003 2. As code is moved by #1, change fold_rtx & simplify_rtx to
2004 use this routine whenever possible.
2005
2006 3. Allow for pass dependent state to be provided to these
2007 routines and add simplifications based on the pass dependent
2008 state. Remove code from cse.c & combine.c that becomes
2009 redundant/dead.
2010
2011 It will take time, but ultimately the compiler will be easier to
2012 maintain and improve. It's totally silly that when we add a
2013 simplification that it needs to be added to 4 places (3 for RTL
2014 simplification and 1 for tree simplification. */
2015
2016rtx
2017simplify_rtx (x)
2018 rtx x;
2019{
2020 enum rtx_code code;
2021 enum machine_mode mode;
0cedb36c
JL
2022
2023 mode = GET_MODE (x);
2024 code = GET_CODE (x);
2025
2026 switch (GET_RTX_CLASS (code))
2027 {
2028 case '1':
2029 return simplify_unary_operation (code, mode,
2030 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
2031 case '2':
2032 case 'c':
2033 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
2034
2035 case '3':
2036 case 'b':
2037 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
2038 XEXP (x, 0), XEXP (x, 1), XEXP (x, 2));
2039
2040 case '<':
2041 return simplify_relational_operation (code, GET_MODE (XEXP (x, 0)),
2042 XEXP (x, 0), XEXP (x, 1));
2043 default:
2044 return NULL;
2045 }
2046}
eab5c70a 2047\f
eab5c70a
BS
2048
2049/* Allocate a struct elt_list and fill in its two elements with the
2050 arguments. */
749a2da1 2051
eab5c70a
BS
2052static struct elt_list *
2053new_elt_list (next, elt)
2054 struct elt_list *next;
2055 cselib_val *elt;
2056{
2057 struct elt_list *el = empty_elt_lists;
749a2da1 2058
eab5c70a
BS
2059 if (el)
2060 empty_elt_lists = el->next;
2061 else
2062 el = (struct elt_list *) obstack_alloc (&cselib_obstack,
2063 sizeof (struct elt_list));
2064 el->next = next;
2065 el->elt = elt;
2066 return el;
2067}
2068
2069/* Allocate a struct elt_loc_list and fill in its two elements with the
2070 arguments. */
749a2da1 2071
eab5c70a
BS
2072static struct elt_loc_list *
2073new_elt_loc_list (next, loc)
2074 struct elt_loc_list *next;
2075 rtx loc;
2076{
2077 struct elt_loc_list *el = empty_elt_loc_lists;
749a2da1 2078
eab5c70a
BS
2079 if (el)
2080 empty_elt_loc_lists = el->next;
2081 else
2082 el = (struct elt_loc_list *) obstack_alloc (&cselib_obstack,
2083 sizeof (struct elt_loc_list));
2084 el->next = next;
2085 el->loc = loc;
2086 el->setting_insn = cselib_current_insn;
2087 return el;
2088}
2089
2090/* The elt_list at *PL is no longer needed. Unchain it and free its
2091 storage. */
749a2da1 2092
eab5c70a
BS
2093static void
2094unchain_one_elt_list (pl)
2095 struct elt_list **pl;
2096{
2097 struct elt_list *l = *pl;
749a2da1 2098
eab5c70a
BS
2099 *pl = l->next;
2100 l->next = empty_elt_lists;
2101 empty_elt_lists = l;
2102}
2103
2104/* Likewise for elt_loc_lists. */
749a2da1 2105
eab5c70a
BS
2106static void
2107unchain_one_elt_loc_list (pl)
2108 struct elt_loc_list **pl;
2109{
2110 struct elt_loc_list *l = *pl;
749a2da1 2111
eab5c70a
BS
2112 *pl = l->next;
2113 l->next = empty_elt_loc_lists;
2114 empty_elt_loc_lists = l;
2115}
2116
2117/* Likewise for cselib_vals. This also frees the addr_list associated with
2118 V. */
749a2da1 2119
eab5c70a
BS
2120static void
2121unchain_one_value (v)
2122 cselib_val *v;
2123{
2124 while (v->addr_list)
2125 unchain_one_elt_list (&v->addr_list);
2126
2127 v->u.next_free = empty_vals;
2128 empty_vals = v;
2129}
2130
2131/* Remove all entries from the hash table. Also used during
2132 initialization. */
749a2da1 2133
eab5c70a
BS
2134static void
2135clear_table ()
2136{
749a2da1
RK
2137 unsigned int i;
2138
eab5c70a
BS
2139 for (i = 0; i < cselib_nregs; i++)
2140 REG_VALUES (i) = 0;
2141
2142 htab_empty (hash_table);
2143 obstack_free (&cselib_obstack, cselib_startobj);
2144
2145 empty_vals = 0;
2146 empty_elt_lists = 0;
2147 empty_elt_loc_lists = 0;
2148 n_useless_values = 0;
2149
2150 next_unknown_value = 0;
2151}
2152
2153/* The equality test for our hash table. The first argument ENTRY is a table
2154 element (i.e. a cselib_val), while the second arg X is an rtx. */
749a2da1 2155
eab5c70a
BS
2156static int
2157entry_and_rtx_equal_p (entry, x_arg)
2158 const void *entry, *x_arg;
2159{
2160 struct elt_loc_list *l;
749a2da1
RK
2161 const cselib_val *v = (const cselib_val *) entry;
2162 rtx x = (rtx) x_arg;
eab5c70a
BS
2163
2164 /* We don't guarantee that distinct rtx's have different hash values,
2165 so we need to do a comparison. */
2166 for (l = v->locs; l; l = l->next)
2167 if (rtx_equal_for_cselib_p (l->loc, x))
2168 return 1;
749a2da1 2169
eab5c70a
BS
2170 return 0;
2171}
2172
2173/* The hash function for our hash table. The value is always computed with
2174 hash_rtx when adding an element; this function just extracts the hash
2175 value from a cselib_val structure. */
749a2da1 2176
eab5c70a
BS
2177static unsigned int
2178get_value_hash (entry)
2179 const void *entry;
2180{
e77d72cb 2181 const cselib_val *v = (const cselib_val *) entry;
eab5c70a
BS
2182 return v->value;
2183}
2184
eab5c70a
BS
2185/* Return true if X contains a VALUE rtx. If ONLY_USELESS is set, we
2186 only return true for values which point to a cselib_val whose value
2187 element has been set to zero, which implies the cselib_val will be
2188 removed. */
749a2da1 2189
eab5c70a
BS
2190int
2191references_value_p (x, only_useless)
2192 rtx x;
2193 int only_useless;
2194{
2195 enum rtx_code code = GET_CODE (x);
2196 const char *fmt = GET_RTX_FORMAT (code);
749a2da1 2197 int i, j;
eab5c70a
BS
2198
2199 if (GET_CODE (x) == VALUE
4d0482f6 2200 && (! only_useless || CSELIB_VAL_PTR (x)->locs == 0))
eab5c70a
BS
2201 return 1;
2202
2203 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2204 {
749a2da1
RK
2205 if (fmt[i] == 'e' && references_value_p (XEXP (x, i), only_useless))
2206 return 1;
eab5c70a 2207 else if (fmt[i] == 'E')
749a2da1
RK
2208 for (j = 0; j < XVECLEN (x, i); j++)
2209 if (references_value_p (XVECEXP (x, i, j), only_useless))
2210 return 1;
eab5c70a
BS
2211 }
2212
2213 return 0;
2214}
2215
eab5c70a
BS
2216/* For all locations found in X, delete locations that reference useless
2217 values (i.e. values without any location). Called through
2218 htab_traverse. */
749a2da1 2219
eab5c70a
BS
2220static int
2221discard_useless_locs (x, info)
2222 void **x;
2223 void *info ATTRIBUTE_UNUSED;
2224{
2225 cselib_val *v = (cselib_val *)*x;
2226 struct elt_loc_list **p = &v->locs;
4d0482f6 2227 int had_locs = v->locs != 0;
eab5c70a
BS
2228
2229 while (*p)
2230 {
2231 if (references_value_p ((*p)->loc, 1))
2232 unchain_one_elt_loc_list (p);
2233 else
2234 p = &(*p)->next;
2235 }
749a2da1 2236
4d0482f6
BS
2237 if (had_locs && v->locs == 0)
2238 {
2239 n_useless_values++;
2240 values_became_useless = 1;
2241 }
eab5c70a
BS
2242 return 1;
2243}
2244
2245/* If X is a value with no locations, remove it from the hashtable. */
2246
2247static int
2248discard_useless_values (x, info)
2249 void **x;
2250 void *info ATTRIBUTE_UNUSED;
2251{
2252 cselib_val *v = (cselib_val *)*x;
2253
4d0482f6 2254 if (v->locs == 0)
eab5c70a
BS
2255 {
2256 htab_clear_slot (hash_table, x);
2257 unchain_one_value (v);
2258 n_useless_values--;
2259 }
749a2da1 2260
eab5c70a
BS
2261 return 1;
2262}
2263
2264/* Clean out useless values (i.e. those which no longer have locations
2265 associated with them) from the hash table. */
749a2da1 2266
eab5c70a
BS
2267static void
2268remove_useless_values ()
2269{
2270 /* First pass: eliminate locations that reference the value. That in
2271 turn can make more values useless. */
2272 do
2273 {
2274 values_became_useless = 0;
2275 htab_traverse (hash_table, discard_useless_locs, 0);
2276 }
2277 while (values_became_useless);
2278
2279 /* Second pass: actually remove the values. */
2280 htab_traverse (hash_table, discard_useless_values, 0);
2281
2282 if (n_useless_values != 0)
2283 abort ();
2284}
2285
2286/* Return nonzero if we can prove that X and Y contain the same value, taking
2287 our gathered information into account. */
749a2da1 2288
eab5c70a
BS
2289int
2290rtx_equal_for_cselib_p (x, y)
2291 rtx x, y;
2292{
2293 enum rtx_code code;
2294 const char *fmt;
2295 int i;
2296
2297 if (GET_CODE (x) == REG || GET_CODE (x) == MEM)
2298 {
2299 cselib_val *e = cselib_lookup (x, VOIDmode, 0);
749a2da1 2300
eab5c70a
BS
2301 if (e)
2302 x = e->u.val_rtx;
2303 }
749a2da1 2304
eab5c70a
BS
2305 if (GET_CODE (y) == REG || GET_CODE (y) == MEM)
2306 {
2307 cselib_val *e = cselib_lookup (y, VOIDmode, 0);
749a2da1 2308
eab5c70a
BS
2309 if (e)
2310 y = e->u.val_rtx;
2311 }
2312
2313 if (x == y)
2314 return 1;
2315
2316 if (GET_CODE (x) == VALUE && GET_CODE (y) == VALUE)
2317 return CSELIB_VAL_PTR (x) == CSELIB_VAL_PTR (y);
2318
2319 if (GET_CODE (x) == VALUE)
2320 {
2321 cselib_val *e = CSELIB_VAL_PTR (x);
2322 struct elt_loc_list *l;
2323
2324 for (l = e->locs; l; l = l->next)
2325 {
2326 rtx t = l->loc;
2327
2328 /* Avoid infinite recursion. */
2329 if (GET_CODE (t) == REG || GET_CODE (t) == MEM)
2330 continue;
749a2da1 2331 else if (rtx_equal_for_cselib_p (t, y))
eab5c70a
BS
2332 return 1;
2333 }
2334
2335 return 0;
2336 }
2337
2338 if (GET_CODE (y) == VALUE)
2339 {
2340 cselib_val *e = CSELIB_VAL_PTR (y);
2341 struct elt_loc_list *l;
2342
2343 for (l = e->locs; l; l = l->next)
2344 {
2345 rtx t = l->loc;
2346
2347 if (GET_CODE (t) == REG || GET_CODE (t) == MEM)
2348 continue;
749a2da1 2349 else if (rtx_equal_for_cselib_p (x, t))
eab5c70a
BS
2350 return 1;
2351 }
2352
2353 return 0;
2354 }
2355
749a2da1 2356 if (GET_CODE (x) != GET_CODE (y) || GET_MODE (x) != GET_MODE (y))
eab5c70a
BS
2357 return 0;
2358
2359 /* This won't be handled correctly by the code below. */
2360 if (GET_CODE (x) == LABEL_REF)
2361 return XEXP (x, 0) == XEXP (y, 0);
2362
2363 code = GET_CODE (x);
2364 fmt = GET_RTX_FORMAT (code);
2365
2366 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2367 {
2368 int j;
749a2da1 2369
eab5c70a
BS
2370 switch (fmt[i])
2371 {
2372 case 'w':
2373 if (XWINT (x, i) != XWINT (y, i))
2374 return 0;
2375 break;
2376
2377 case 'n':
2378 case 'i':
2379 if (XINT (x, i) != XINT (y, i))
2380 return 0;
2381 break;
2382
2383 case 'V':
2384 case 'E':
2385 /* Two vectors must have the same length. */
2386 if (XVECLEN (x, i) != XVECLEN (y, i))
2387 return 0;
2388
2389 /* And the corresponding elements must match. */
2390 for (j = 0; j < XVECLEN (x, i); j++)
2391 if (! rtx_equal_for_cselib_p (XVECEXP (x, i, j),
2392 XVECEXP (y, i, j)))
2393 return 0;
2394 break;
2395
2396 case 'e':
2397 if (! rtx_equal_for_cselib_p (XEXP (x, i), XEXP (y, i)))
2398 return 0;
2399 break;
2400
2401 case 'S':
2402 case 's':
2403 if (strcmp (XSTR (x, i), XSTR (y, i)))
2404 return 0;
2405 break;
2406
2407 case 'u':
2408 /* These are just backpointers, so they don't matter. */
2409 break;
2410
2411 case '0':
2412 case 't':
2413 break;
2414
2415 /* It is believed that rtx's at this level will never
2416 contain anything but integers and other rtx's,
2417 except for within LABEL_REFs and SYMBOL_REFs. */
2418 default:
2419 abort ();
2420 }
2421 }
2422 return 1;
2423}
2424
2425/* Hash an rtx. Return 0 if we couldn't hash the rtx.
2426 For registers and memory locations, we look up their cselib_val structure
2427 and return its VALUE element.
2428 Possible reasons for return 0 are: the object is volatile, or we couldn't
2429 find a register or memory location in the table and CREATE is zero. If
2430 CREATE is nonzero, table elts are created for regs and mem.
2431 MODE is used in hashing for CONST_INTs only;
2432 otherwise the mode of X is used. */
749a2da1 2433
eab5c70a
BS
2434static unsigned int
2435hash_rtx (x, mode, create)
2436 rtx x;
2437 enum machine_mode mode;
2438 int create;
2439{
2440 cselib_val *e;
2441 int i, j;
22eb7dfa
BS
2442 enum rtx_code code;
2443 const char *fmt;
eab5c70a 2444 unsigned int hash = 0;
eab5c70a
BS
2445
2446 /* repeat is used to turn tail-recursion into iteration. */
2447 repeat:
eab5c70a 2448 code = GET_CODE (x);
22eb7dfa
BS
2449 hash += (unsigned) code + (unsigned) GET_MODE (x);
2450
eab5c70a
BS
2451 switch (code)
2452 {
2453 case MEM:
2454 case REG:
2455 e = cselib_lookup (x, GET_MODE (x), create);
2456 if (! e)
2457 return 0;
749a2da1 2458
22eb7dfa
BS
2459 hash += e->value;
2460 return hash;
eab5c70a
BS
2461
2462 case CONST_INT:
749a2da1
RK
2463 hash += ((unsigned) CONST_INT << 7) + (unsigned) mode + INTVAL (x);
2464 return hash ? hash : CONST_INT;
eab5c70a
BS
2465
2466 case CONST_DOUBLE:
2467 /* This is like the general case, except that it only counts
2468 the integers representing the constant. */
2469 hash += (unsigned) code + (unsigned) GET_MODE (x);
2470 if (GET_MODE (x) != VOIDmode)
2471 for (i = 2; i < GET_RTX_LENGTH (CONST_DOUBLE); i++)
749a2da1 2472 hash += XWINT (x, i);
eab5c70a
BS
2473 else
2474 hash += ((unsigned) CONST_DOUBLE_LOW (x)
2475 + (unsigned) CONST_DOUBLE_HIGH (x));
2476 return hash ? hash : CONST_DOUBLE;
2477
2478 /* Assume there is only one rtx object for any given label. */
2479 case LABEL_REF:
2480 hash
2481 += ((unsigned) LABEL_REF << 7) + (unsigned long) XEXP (x, 0);
2482 return hash ? hash : LABEL_REF;
2483
2484 case SYMBOL_REF:
2485 hash
2486 += ((unsigned) SYMBOL_REF << 7) + (unsigned long) XSTR (x, 0);
2487 return hash ? hash : SYMBOL_REF;
2488
2489 case PRE_DEC:
2490 case PRE_INC:
2491 case POST_DEC:
2492 case POST_INC:
2493 case PC:
2494 case CC0:
2495 case CALL:
2496 case UNSPEC_VOLATILE:
2497 return 0;
2498
2499 case ASM_OPERANDS:
2500 if (MEM_VOLATILE_P (x))
2501 return 0;
2502
2503 break;
2504
2505 default:
2506 break;
2507 }
2508
2509 i = GET_RTX_LENGTH (code) - 1;
eab5c70a
BS
2510 fmt = GET_RTX_FORMAT (code);
2511 for (; i >= 0; i--)
2512 {
2513 if (fmt[i] == 'e')
2514 {
eab5c70a 2515 rtx tem = XEXP (x, i);
749a2da1 2516 unsigned int tem_hash;
eab5c70a
BS
2517
2518 /* If we are about to do the last recursive call
2519 needed at this level, change it into iteration.
2520 This function is called enough to be worth it. */
2521 if (i == 0)
2522 {
2523 x = tem;
2524 goto repeat;
2525 }
749a2da1 2526
eab5c70a
BS
2527 tem_hash = hash_rtx (tem, 0, create);
2528 if (tem_hash == 0)
2529 return 0;
749a2da1 2530
eab5c70a
BS
2531 hash += tem_hash;
2532 }
2533 else if (fmt[i] == 'E')
2534 for (j = 0; j < XVECLEN (x, i); j++)
2535 {
2536 unsigned int tem_hash = hash_rtx (XVECEXP (x, i, j), 0, create);
749a2da1 2537
eab5c70a
BS
2538 if (tem_hash == 0)
2539 return 0;
749a2da1 2540
eab5c70a
BS
2541 hash += tem_hash;
2542 }
2543 else if (fmt[i] == 's')
2544 {
e77d72cb 2545 const unsigned char *p = (const unsigned char *) XSTR (x, i);
749a2da1 2546
eab5c70a
BS
2547 if (p)
2548 while (*p)
2549 hash += *p++;
2550 }
2551 else if (fmt[i] == 'i')
749a2da1 2552 hash += XINT (x, i);
eab5c70a
BS
2553 else if (fmt[i] == '0' || fmt[i] == 't')
2554 /* unused */;
2555 else
2556 abort ();
2557 }
749a2da1 2558
eab5c70a
BS
2559 return hash ? hash : 1 + GET_CODE (x);
2560}
2561
2562/* Create a new value structure for VALUE and initialize it. The mode of the
2563 value is MODE. */
749a2da1 2564
eab5c70a
BS
2565static cselib_val *
2566new_cselib_val (value, mode)
2567 unsigned int value;
2568 enum machine_mode mode;
2569{
2570 cselib_val *e = empty_vals;
749a2da1 2571
eab5c70a
BS
2572 if (e)
2573 empty_vals = e->u.next_free;
2574 else
2575 e = (cselib_val *) obstack_alloc (&cselib_obstack, sizeof (cselib_val));
749a2da1 2576
eab5c70a
BS
2577 if (value == 0)
2578 abort ();
749a2da1 2579
eab5c70a
BS
2580 e->value = value;
2581 e->u.val_rtx = gen_rtx_VALUE (mode);
2582 CSELIB_VAL_PTR (e->u.val_rtx) = e;
eab5c70a
BS
2583 e->addr_list = 0;
2584 e->locs = 0;
2585 return e;
2586}
2587
2588/* ADDR_ELT is a value that is used as address. MEM_ELT is the value that
2589 contains the data at this address. X is a MEM that represents the
2590 value. Update the two value structures to represent this situation. */
749a2da1 2591
eab5c70a
BS
2592static void
2593add_mem_for_addr (addr_elt, mem_elt, x)
2594 cselib_val *addr_elt, *mem_elt;
2595 rtx x;
2596{
2597 rtx new;
2598 struct elt_loc_list *l;
2599
2600 /* Avoid duplicates. */
2601 for (l = mem_elt->locs; l; l = l->next)
2602 if (GET_CODE (l->loc) == MEM
2603 && CSELIB_VAL_PTR (XEXP (l->loc, 0)) == addr_elt)
2604 return;
2605
2606 new = gen_rtx_MEM (GET_MODE (x), addr_elt->u.val_rtx);
eab5c70a
BS
2607 MEM_COPY_ATTRIBUTES (new, x);
2608
bf49b139 2609 addr_elt->addr_list = new_elt_list (addr_elt->addr_list, mem_elt);
eab5c70a
BS
2610 mem_elt->locs = new_elt_loc_list (mem_elt->locs, new);
2611}
2612
2613/* Subroutine of cselib_lookup. Return a value for X, which is a MEM rtx.
2614 If CREATE, make a new one if we haven't seen it before. */
749a2da1 2615
eab5c70a
BS
2616static cselib_val *
2617cselib_lookup_mem (x, create)
2618 rtx x;
2619 int create;
2620{
2621 void **slot;
2622 cselib_val *addr;
2623 cselib_val *mem_elt;
2624 struct elt_list *l;
2625
749a2da1
RK
2626 if (MEM_VOLATILE_P (x) || GET_MODE (x) == BLKmode
2627 || (FLOAT_MODE_P (GET_MODE (x)) && flag_float_store))
eab5c70a
BS
2628 return 0;
2629
2630 /* Look up the value for the address. */
2631 addr = cselib_lookup (XEXP (x, 0), GET_MODE (x), create);
2632 if (! addr)
2633 return 0;
2634
2635 /* Find a value that describes a value of our mode at that address. */
2636 for (l = addr->addr_list; l; l = l->next)
2637 if (GET_MODE (l->elt->u.val_rtx) == GET_MODE (x))
2638 return l->elt;
749a2da1 2639
eab5c70a
BS
2640 if (! create)
2641 return 0;
749a2da1 2642
eab5c70a
BS
2643 mem_elt = new_cselib_val (++next_unknown_value, GET_MODE (x));
2644 add_mem_for_addr (addr, mem_elt, x);
e38992e8 2645 slot = htab_find_slot_with_hash (hash_table, x, mem_elt->value, INSERT);
eab5c70a
BS
2646 *slot = mem_elt;
2647 return mem_elt;
2648}
2649
2650/* Walk rtx X and replace all occurrences of REG and MEM subexpressions
2651 with VALUE expressions. This way, it becomes independent of changes
2652 to registers and memory.
2653 X isn't actually modified; if modifications are needed, new rtl is
2654 allocated. However, the return value can share rtl with X. */
749a2da1 2655
eab5c70a
BS
2656static rtx
2657cselib_subst_to_values (x)
2658 rtx x;
2659{
2660 enum rtx_code code = GET_CODE (x);
2661 const char *fmt = GET_RTX_FORMAT (code);
2662 cselib_val *e;
2663 struct elt_list *l;
2664 rtx copy = x;
2665 int i;
2666
2667 switch (code)
2668 {
2669 case REG:
749a2da1 2670 for (l = REG_VALUES (REGNO (x)); l; l = l->next)
eab5c70a
BS
2671 if (GET_MODE (l->elt->u.val_rtx) == GET_MODE (x))
2672 return l->elt->u.val_rtx;
749a2da1 2673
eab5c70a
BS
2674 abort ();
2675
2676 case MEM:
2677 e = cselib_lookup_mem (x, 0);
2678 if (! e)
2679 abort ();
2680 return e->u.val_rtx;
2681
2682 /* CONST_DOUBLEs must be special-cased here so that we won't try to
2683 look up the CONST_DOUBLE_MEM inside. */
2684 case CONST_DOUBLE:
2685 case CONST_INT:
2686 return x;
2687
2688 default:
2689 break;
2690 }
2691
2692 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2693 {
2694 if (fmt[i] == 'e')
2695 {
2696 rtx t = cselib_subst_to_values (XEXP (x, i));
749a2da1 2697
eab5c70a
BS
2698 if (t != XEXP (x, i) && x == copy)
2699 copy = shallow_copy_rtx (x);
749a2da1 2700
eab5c70a
BS
2701 XEXP (copy, i) = t;
2702 }
2703 else if (fmt[i] == 'E')
2704 {
2705 int j, k;
2706
2707 for (j = 0; j < XVECLEN (x, i); j++)
2708 {
2709 rtx t = cselib_subst_to_values (XVECEXP (x, i, j));
749a2da1 2710
eab5c70a
BS
2711 if (t != XVECEXP (x, i, j) && XVEC (x, i) == XVEC (copy, i))
2712 {
2713 if (x == copy)
2714 copy = shallow_copy_rtx (x);
749a2da1 2715
eab5c70a
BS
2716 XVEC (copy, i) = rtvec_alloc (XVECLEN (x, i));
2717 for (k = 0; k < j; k++)
2718 XVECEXP (copy, i, k) = XVECEXP (x, i, k);
2719 }
749a2da1 2720
eab5c70a
BS
2721 XVECEXP (copy, i, j) = t;
2722 }
2723 }
2724 }
749a2da1 2725
eab5c70a
BS
2726 return copy;
2727}
2728
2729/* Look up the rtl expression X in our tables and return the value it has.
2730 If CREATE is zero, we return NULL if we don't know the value. Otherwise,
2731 we create a new one if possible, using mode MODE if X doesn't have a mode
2732 (i.e. because it's a constant). */
749a2da1 2733
eab5c70a
BS
2734cselib_val *
2735cselib_lookup (x, mode, create)
2736 rtx x;
2737 enum machine_mode mode;
2738 int create;
2739{
2740 void **slot;
2741 cselib_val *e;
2742 unsigned int hashval;
2743
2744 if (GET_MODE (x) != VOIDmode)
2745 mode = GET_MODE (x);
2746
2747 if (GET_CODE (x) == VALUE)
2748 return CSELIB_VAL_PTR (x);
2749
2750 if (GET_CODE (x) == REG)
2751 {
2752 struct elt_list *l;
749a2da1
RK
2753 unsigned int i = REGNO (x);
2754
eab5c70a
BS
2755 for (l = REG_VALUES (i); l; l = l->next)
2756 if (mode == GET_MODE (l->elt->u.val_rtx))
2757 return l->elt;
749a2da1 2758
eab5c70a
BS
2759 if (! create)
2760 return 0;
749a2da1 2761
eab5c70a
BS
2762 e = new_cselib_val (++next_unknown_value, GET_MODE (x));
2763 e->locs = new_elt_loc_list (e->locs, x);
2764 REG_VALUES (i) = new_elt_list (REG_VALUES (i), e);
e38992e8 2765 slot = htab_find_slot_with_hash (hash_table, x, e->value, INSERT);
eab5c70a
BS
2766 *slot = e;
2767 return e;
2768 }
2769
2770 if (GET_CODE (x) == MEM)
2771 return cselib_lookup_mem (x, create);
2772
2773 hashval = hash_rtx (x, mode, create);
2774 /* Can't even create if hashing is not possible. */
2775 if (! hashval)
2776 return 0;
2777
e38992e8
RK
2778 slot = htab_find_slot_with_hash (hash_table, x, hashval,
2779 create ? INSERT : NO_INSERT);
eab5c70a
BS
2780 if (slot == 0)
2781 return 0;
749a2da1 2782
eab5c70a
BS
2783 e = (cselib_val *) *slot;
2784 if (e)
2785 return e;
2786
2787 e = new_cselib_val (hashval, mode);
749a2da1 2788
22eb7dfa
BS
2789 /* We have to fill the slot before calling cselib_subst_to_values:
2790 the hash table is inconsistent until we do so, and
2791 cselib_subst_to_values will need to do lookups. */
eab5c70a 2792 *slot = (void *) e;
22eb7dfa 2793 e->locs = new_elt_loc_list (e->locs, cselib_subst_to_values (x));
eab5c70a
BS
2794 return e;
2795}
2796
2797/* Invalidate any entries in reg_values that overlap REGNO. This is called
2798 if REGNO is changing. MODE is the mode of the assignment to REGNO, which
2799 is used to determine how many hard registers are being changed. If MODE
2800 is VOIDmode, then only REGNO is being changed; this is used when
2801 invalidating call clobbered registers across a call. */
770ae6cc 2802
eab5c70a
BS
2803static void
2804cselib_invalidate_regno (regno, mode)
770ae6cc 2805 unsigned int regno;
eab5c70a
BS
2806 enum machine_mode mode;
2807{
770ae6cc
RK
2808 unsigned int endregno;
2809 unsigned int i;
eab5c70a
BS
2810
2811 /* If we see pseudos after reload, something is _wrong_. */
2812 if (reload_completed && regno >= FIRST_PSEUDO_REGISTER
2813 && reg_renumber[regno] >= 0)
2814 abort ();
2815
2816 /* Determine the range of registers that must be invalidated. For
2817 pseudos, only REGNO is affected. For hard regs, we must take MODE
2818 into account, and we must also invalidate lower register numbers
2819 if they contain values that overlap REGNO. */
2820 endregno = regno + 1;
2821 if (regno < FIRST_PSEUDO_REGISTER && mode != VOIDmode)
2822 endregno = regno + HARD_REGNO_NREGS (regno, mode);
2823
2824 for (i = 0; i < endregno; i++)
2825 {
2826 struct elt_list **l = &REG_VALUES (i);
2827
2828 /* Go through all known values for this reg; if it overlaps the range
2829 we're invalidating, remove the value. */
2830 while (*l)
2831 {
2832 cselib_val *v = (*l)->elt;
2833 struct elt_loc_list **p;
770ae6cc 2834 unsigned int this_last = i;
eab5c70a
BS
2835
2836 if (i < FIRST_PSEUDO_REGISTER)
2837 this_last += HARD_REGNO_NREGS (i, GET_MODE (v->u.val_rtx)) - 1;
770ae6cc 2838
eab5c70a
BS
2839 if (this_last < regno)
2840 {
2841 l = &(*l)->next;
2842 continue;
2843 }
770ae6cc 2844
eab5c70a
BS
2845 /* We have an overlap. */
2846 unchain_one_elt_list (l);
2847
2848 /* Now, we clear the mapping from value to reg. It must exist, so
2849 this code will crash intentionally if it doesn't. */
2850 for (p = &v->locs; ; p = &(*p)->next)
2851 {
2852 rtx x = (*p)->loc;
770ae6cc 2853
eab5c70a
BS
2854 if (GET_CODE (x) == REG && REGNO (x) == i)
2855 {
2856 unchain_one_elt_loc_list (p);
2857 break;
2858 }
2859 }
4d0482f6
BS
2860 if (v->locs == 0)
2861 n_useless_values++;
eab5c70a
BS
2862 }
2863 }
2864}
2865
2866/* The memory at address MEM_BASE is being changed.
2867 Return whether this change will invalidate VAL. */
749a2da1 2868
eab5c70a
BS
2869static int
2870cselib_mem_conflict_p (mem_base, val)
2871 rtx mem_base;
2872 rtx val;
2873{
2874 enum rtx_code code;
2875 const char *fmt;
749a2da1 2876 int i, j;
eab5c70a
BS
2877
2878 code = GET_CODE (val);
2879 switch (code)
2880 {
2881 /* Get rid of a few simple cases quickly. */
2882 case REG:
2883 case PC:
2884 case CC0:
2885 case SCRATCH:
2886 case CONST:
2887 case CONST_INT:
2888 case CONST_DOUBLE:
2889 case SYMBOL_REF:
2890 case LABEL_REF:
2891 return 0;
2892
2893 case MEM:
2894 if (GET_MODE (mem_base) == BLKmode
749a2da1
RK
2895 || GET_MODE (val) == BLKmode
2896 || anti_dependence (val, mem_base))
eab5c70a 2897 return 1;
749a2da1 2898
eab5c70a
BS
2899 /* The address may contain nested MEMs. */
2900 break;
2901
2902 default:
2903 break;
2904 }
2905
2906 fmt = GET_RTX_FORMAT (code);
eab5c70a
BS
2907 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2908 {
2909 if (fmt[i] == 'e')
2910 {
2911 if (cselib_mem_conflict_p (mem_base, XEXP (val, i)))
2912 return 1;
2913 }
2914 else if (fmt[i] == 'E')
749a2da1
RK
2915 for (j = 0; j < XVECLEN (val, i); j++)
2916 if (cselib_mem_conflict_p (mem_base, XVECEXP (val, i, j)))
2917 return 1;
eab5c70a
BS
2918 }
2919
2920 return 0;
2921}
2922
2923/* For the value found in SLOT, walk its locations to determine if any overlap
2924 INFO (which is a MEM rtx). */
749a2da1 2925
eab5c70a
BS
2926static int
2927cselib_invalidate_mem_1 (slot, info)
2928 void **slot;
2929 void *info;
2930{
2931 cselib_val *v = (cselib_val *) *slot;
2932 rtx mem_rtx = (rtx) info;
2933 struct elt_loc_list **p = &v->locs;
4d0482f6 2934 int had_locs = v->locs != 0;
eab5c70a
BS
2935
2936 while (*p)
2937 {
749a2da1 2938 rtx x = (*p)->loc;
eab5c70a
BS
2939 cselib_val *addr;
2940 struct elt_list **mem_chain;
eab5c70a
BS
2941
2942 /* MEMs may occur in locations only at the top level; below
2943 that every MEM or REG is substituted by its VALUE. */
2944 if (GET_CODE (x) != MEM
2945 || ! cselib_mem_conflict_p (mem_rtx, x))
2946 {
2947 p = &(*p)->next;
2948 continue;
2949 }
2950
2951 /* This one overlaps. */
2952 /* We must have a mapping from this MEM's address to the
2953 value (E). Remove that, too. */
2954 addr = cselib_lookup (XEXP (x, 0), VOIDmode, 0);
2955 mem_chain = &addr->addr_list;
2956 for (;;)
2957 {
2958 if ((*mem_chain)->elt == v)
2959 {
2960 unchain_one_elt_list (mem_chain);
2961 break;
2962 }
749a2da1 2963
eab5c70a
BS
2964 mem_chain = &(*mem_chain)->next;
2965 }
749a2da1 2966
eab5c70a
BS
2967 unchain_one_elt_loc_list (p);
2968 }
749a2da1 2969
4d0482f6
BS
2970 if (had_locs && v->locs == 0)
2971 n_useless_values++;
2972
eab5c70a
BS
2973 return 1;
2974}
2975
2976/* Invalidate any locations in the table which are changed because of a
2977 store to MEM_RTX. If this is called because of a non-const call
2978 instruction, MEM_RTX is (mem:BLK const0_rtx). */
749a2da1 2979
eab5c70a
BS
2980static void
2981cselib_invalidate_mem (mem_rtx)
2982 rtx mem_rtx;
2983{
2984 htab_traverse (hash_table, cselib_invalidate_mem_1, mem_rtx);
2985}
2986
2987/* Invalidate DEST, which is being assigned to or clobbered. The second and
2988 the third parameter exist so that this function can be passed to
2989 note_stores; they are ignored. */
749a2da1 2990
eab5c70a
BS
2991static void
2992cselib_invalidate_rtx (dest, ignore, data)
2993 rtx dest;
2994 rtx ignore ATTRIBUTE_UNUSED;
2995 void *data ATTRIBUTE_UNUSED;
2996{
749a2da1
RK
2997 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SIGN_EXTRACT
2998 || GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SUBREG)
eab5c70a
BS
2999 dest = XEXP (dest, 0);
3000
3001 if (GET_CODE (dest) == REG)
3002 cselib_invalidate_regno (REGNO (dest), GET_MODE (dest));
3003 else if (GET_CODE (dest) == MEM)
3004 cselib_invalidate_mem (dest);
3005
3006 /* Some machines don't define AUTO_INC_DEC, but they still use push
3007 instructions. We need to catch that case here in order to
3008 invalidate the stack pointer correctly. Note that invalidating
3009 the stack pointer is different from invalidating DEST. */
3010 if (push_operand (dest, GET_MODE (dest)))
3011 cselib_invalidate_rtx (stack_pointer_rtx, NULL_RTX, NULL);
3012}
3013
3014/* Record the result of a SET instruction. DEST is being set; the source
3015 contains the value described by SRC_ELT. If DEST is a MEM, DEST_ADDR_ELT
3016 describes its address. */
770ae6cc 3017
eab5c70a
BS
3018static void
3019cselib_record_set (dest, src_elt, dest_addr_elt)
3020 rtx dest;
3021 cselib_val *src_elt, *dest_addr_elt;
3022{
770ae6cc 3023 int dreg = GET_CODE (dest) == REG ? (int) REGNO (dest) : -1;
eab5c70a
BS
3024
3025 if (src_elt == 0 || side_effects_p (dest))
3026 return;
3027
3028 if (dreg >= 0)
3029 {
3030 REG_VALUES (dreg) = new_elt_list (REG_VALUES (dreg), src_elt);
4d0482f6
BS
3031 if (src_elt->locs == 0)
3032 n_useless_values--;
eab5c70a
BS
3033 src_elt->locs = new_elt_loc_list (src_elt->locs, dest);
3034 }
3035 else if (GET_CODE (dest) == MEM && dest_addr_elt != 0)
4d0482f6
BS
3036 {
3037 if (src_elt->locs == 0)
3038 n_useless_values--;
3039 add_mem_for_addr (dest_addr_elt, src_elt, dest);
3040 }
eab5c70a
BS
3041}
3042
3043/* Describe a single set that is part of an insn. */
3044struct set
3045{
3046 rtx src;
3047 rtx dest;
3048 cselib_val *src_elt;
3049 cselib_val *dest_addr_elt;
3050};
3051
3052/* There is no good way to determine how many elements there can be
3053 in a PARALLEL. Since it's fairly cheap, use a really large number. */
3054#define MAX_SETS (FIRST_PSEUDO_REGISTER * 2)
3055
3056/* Record the effects of any sets in INSN. */
3057static void
3058cselib_record_sets (insn)
3059 rtx insn;
3060{
3061 int n_sets = 0;
3062 int i;
3063 struct set sets[MAX_SETS];
3064 rtx body = PATTERN (insn);
3065
3066 body = PATTERN (insn);
3067 /* Find all sets. */
3068 if (GET_CODE (body) == SET)
3069 {
3070 sets[0].src = SET_SRC (body);
3071 sets[0].dest = SET_DEST (body);
3072 n_sets = 1;
3073 }
3074 else if (GET_CODE (body) == PARALLEL)
3075 {
3076 /* Look through the PARALLEL and record the values being
3077 set, if possible. Also handle any CLOBBERs. */
3078 for (i = XVECLEN (body, 0) - 1; i >= 0; --i)
3079 {
3080 rtx x = XVECEXP (body, 0, i);
3081
3082 if (GET_CODE (x) == SET)
3083 {
3084 sets[n_sets].src = SET_SRC (x);
3085 sets[n_sets].dest = SET_DEST (x);
3086 n_sets++;
3087 }
3088 }
3089 }
3090
3091 /* Look up the values that are read. Do this before invalidating the
3092 locations that are written. */
3093 for (i = 0; i < n_sets; i++)
3094 {
3095 sets[i].src_elt = cselib_lookup (sets[i].src, GET_MODE (sets[i].dest),
3096 1);
3097 if (GET_CODE (sets[i].dest) == MEM)
3098 sets[i].dest_addr_elt = cselib_lookup (XEXP (sets[i].dest, 0), Pmode,
3099 1);
3100 else
3101 sets[i].dest_addr_elt = 0;
3102 }
3103
3104 /* Invalidate all locations written by this insn. Note that the elts we
3105 looked up in the previous loop aren't affected, just some of their
3106 locations may go away. */
3107 note_stores (body, cselib_invalidate_rtx, NULL);
3108
3109 /* Now enter the equivalences in our tables. */
3110 for (i = 0; i < n_sets; i++)
3111 cselib_record_set (sets[i].dest, sets[i].src_elt, sets[i].dest_addr_elt);
3112}
3113
3114/* Record the effects of INSN. */
749a2da1 3115
eab5c70a
BS
3116void
3117cselib_process_insn (insn)
3118 rtx insn;
3119{
3120 int i;
749a2da1 3121 rtx x;
eab5c70a
BS
3122
3123 cselib_current_insn = insn;
3124
3125 /* Forget everything at a CODE_LABEL, a volatile asm, or a setjmp. */
3126 if (GET_CODE (insn) == CODE_LABEL
3127 || (GET_CODE (insn) == NOTE
3128 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_SETJMP)
3129 || (GET_CODE (insn) == INSN
3130 && GET_CODE (PATTERN (insn)) == ASM_OPERANDS
3131 && MEM_VOLATILE_P (PATTERN (insn))))
3132 {
3133 clear_table ();
3134 return;
3135 }
3136
3137 if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
3138 {
3139 cselib_current_insn = 0;
3140 return;
3141 }
749a2da1 3142
eab5c70a
BS
3143 /* If this is a call instruction, forget anything stored in a
3144 call clobbered register, or, if this is not a const call, in
3145 memory. */
3146 if (GET_CODE (insn) == CALL_INSN)
3147 {
3148 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3149 if (call_used_regs[i])
3150 cselib_invalidate_regno (i, VOIDmode);
3151
3152 if (! CONST_CALL_P (insn))
3153 cselib_invalidate_mem (callmem);
3154 }
3155
3156 cselib_record_sets (insn);
3157
3158#ifdef AUTO_INC_DEC
3159 /* Clobber any registers which appear in REG_INC notes. We
3160 could keep track of the changes to their values, but it is
3161 unlikely to help. */
749a2da1
RK
3162 for (x = REG_NOTES (insn); x; x = XEXP (x, 1))
3163 if (REG_NOTE_KIND (x) == REG_INC)
3164 cselib_invalidate_rtx (XEXP (x, 0), NULL_RTX, NULL);
eab5c70a
BS
3165#endif
3166
3167 /* Look for any CLOBBERs in CALL_INSN_FUNCTION_USAGE, but only
3168 after we have processed the insn. */
3169 if (GET_CODE (insn) == CALL_INSN)
749a2da1
RK
3170 for (x = CALL_INSN_FUNCTION_USAGE (insn); x; x = XEXP (x, 1))
3171 if (GET_CODE (XEXP (x, 0)) == CLOBBER)
3172 cselib_invalidate_rtx (XEXP (XEXP (x, 0), 0), NULL_RTX, NULL);
eab5c70a
BS
3173
3174 cselib_current_insn = 0;
3175
3176 if (n_useless_values > MAX_USELESS_VALUES)
3177 remove_useless_values ();
3178}
3179
3180/* Make sure our varrays are big enough. Not called from any cselib routines;
3181 it must be called by the user if it allocated new registers. */
749a2da1 3182
eab5c70a
BS
3183void
3184cselib_update_varray_sizes ()
3185{
749a2da1
RK
3186 unsigned int nregs = max_reg_num ();
3187
eab5c70a
BS
3188 if (nregs == cselib_nregs)
3189 return;
749a2da1 3190
eab5c70a
BS
3191 cselib_nregs = nregs;
3192 VARRAY_GROW (reg_values, nregs);
3193}
3194
3195/* Initialize cselib for one pass. The caller must also call
3196 init_alias_analysis. */
749a2da1 3197
eab5c70a
BS
3198void
3199cselib_init ()
3200{
3201 /* These are only created once. */
3202 if (! callmem)
3203 {
3204 extern struct obstack permanent_obstack;
749a2da1 3205
eab5c70a
BS
3206 gcc_obstack_init (&cselib_obstack);
3207 cselib_startobj = obstack_alloc (&cselib_obstack, 0);
3208
3209 push_obstacks (&permanent_obstack, &permanent_obstack);
3210 callmem = gen_rtx_MEM (BLKmode, const0_rtx);
3211 pop_obstacks ();
3212 ggc_add_rtx_root (&callmem, 1);
3213 }
3214
3215 cselib_nregs = max_reg_num ();
3216 VARRAY_ELT_LIST_INIT (reg_values, cselib_nregs, "reg_values");
3217 hash_table = htab_create (31, get_value_hash, entry_and_rtx_equal_p, NULL);
3218 clear_table ();
3219}
3220
3221/* Called when the current user is done with cselib. */
749a2da1 3222
eab5c70a
BS
3223void
3224cselib_finish ()
3225{
3226 clear_table ();
3227 htab_delete (hash_table);
3228}
This page took 0.451996 seconds and 5 git commands to generate.