]> gcc.gnu.org Git - gcc.git/blob - gcc/cse.c
90th Cygnus<->FSF quick merge
[gcc.git] / gcc / cse.c
1 /* Common subexpression elimination for GNU compiler.
2 Copyright (C) 1987, 88, 89, 92-6, 1997 Free Software Foundation, Inc.
3
4 This file is part of GNU CC.
5
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
10
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
20
21
22 #include "config.h"
23 /* Must precede rtl.h for FFS. */
24 #include <stdio.h>
25
26 #include "rtl.h"
27 #include "regs.h"
28 #include "hard-reg-set.h"
29 #include "flags.h"
30 #include "real.h"
31 #include "insn-config.h"
32 #include "recog.h"
33
34 #include <setjmp.h>
35
36 /* The basic idea of common subexpression elimination is to go
37 through the code, keeping a record of expressions that would
38 have the same value at the current scan point, and replacing
39 expressions encountered with the cheapest equivalent expression.
40
41 It is too complicated to keep track of the different possibilities
42 when control paths merge; so, at each label, we forget all that is
43 known and start fresh. This can be described as processing each
44 basic block separately. Note, however, that these are not quite
45 the same as the basic blocks found by a later pass and used for
46 data flow analysis and register packing. We do not need to start fresh
47 after a conditional jump instruction if there is no label there.
48
49 We use two data structures to record the equivalent expressions:
50 a hash table for most expressions, and several vectors together
51 with "quantity numbers" to record equivalent (pseudo) registers.
52
53 The use of the special data structure for registers is desirable
54 because it is faster. It is possible because registers references
55 contain a fairly small number, the register number, taken from
56 a contiguously allocated series, and two register references are
57 identical if they have the same number. General expressions
58 do not have any such thing, so the only way to retrieve the
59 information recorded on an expression other than a register
60 is to keep it in a hash table.
61
62 Registers and "quantity numbers":
63
64 At the start of each basic block, all of the (hardware and pseudo)
65 registers used in the function are given distinct quantity
66 numbers to indicate their contents. During scan, when the code
67 copies one register into another, we copy the quantity number.
68 When a register is loaded in any other way, we allocate a new
69 quantity number to describe the value generated by this operation.
70 `reg_qty' records what quantity a register is currently thought
71 of as containing.
72
73 All real quantity numbers are greater than or equal to `max_reg'.
74 If register N has not been assigned a quantity, reg_qty[N] will equal N.
75
76 Quantity numbers below `max_reg' do not exist and none of the `qty_...'
77 variables should be referenced with an index below `max_reg'.
78
79 We also maintain a bidirectional chain of registers for each
80 quantity number. `qty_first_reg', `qty_last_reg',
81 `reg_next_eqv' and `reg_prev_eqv' hold these chains.
82
83 The first register in a chain is the one whose lifespan is least local.
84 Among equals, it is the one that was seen first.
85 We replace any equivalent register with that one.
86
87 If two registers have the same quantity number, it must be true that
88 REG expressions with `qty_mode' must be in the hash table for both
89 registers and must be in the same class.
90
91 The converse is not true. Since hard registers may be referenced in
92 any mode, two REG expressions might be equivalent in the hash table
93 but not have the same quantity number if the quantity number of one
94 of the registers is not the same mode as those expressions.
95
96 Constants and quantity numbers
97
98 When a quantity has a known constant value, that value is stored
99 in the appropriate element of qty_const. This is in addition to
100 putting the constant in the hash table as is usual for non-regs.
101
102 Whether a reg or a constant is preferred is determined by the configuration
103 macro CONST_COSTS and will often depend on the constant value. In any
104 event, expressions containing constants can be simplified, by fold_rtx.
105
106 When a quantity has a known nearly constant value (such as an address
107 of a stack slot), that value is stored in the appropriate element
108 of qty_const.
109
110 Integer constants don't have a machine mode. However, cse
111 determines the intended machine mode from the destination
112 of the instruction that moves the constant. The machine mode
113 is recorded in the hash table along with the actual RTL
114 constant expression so that different modes are kept separate.
115
116 Other expressions:
117
118 To record known equivalences among expressions in general
119 we use a hash table called `table'. It has a fixed number of buckets
120 that contain chains of `struct table_elt' elements for expressions.
121 These chains connect the elements whose expressions have the same
122 hash codes.
123
124 Other chains through the same elements connect the elements which
125 currently have equivalent values.
126
127 Register references in an expression are canonicalized before hashing
128 the expression. This is done using `reg_qty' and `qty_first_reg'.
129 The hash code of a register reference is computed using the quantity
130 number, not the register number.
131
132 When the value of an expression changes, it is necessary to remove from the
133 hash table not just that expression but all expressions whose values
134 could be different as a result.
135
136 1. If the value changing is in memory, except in special cases
137 ANYTHING referring to memory could be changed. That is because
138 nobody knows where a pointer does not point.
139 The function `invalidate_memory' removes what is necessary.
140
141 The special cases are when the address is constant or is
142 a constant plus a fixed register such as the frame pointer
143 or a static chain pointer. When such addresses are stored in,
144 we can tell exactly which other such addresses must be invalidated
145 due to overlap. `invalidate' does this.
146 All expressions that refer to non-constant
147 memory addresses are also invalidated. `invalidate_memory' does this.
148
149 2. If the value changing is a register, all expressions
150 containing references to that register, and only those,
151 must be removed.
152
153 Because searching the entire hash table for expressions that contain
154 a register is very slow, we try to figure out when it isn't necessary.
155 Precisely, this is necessary only when expressions have been
156 entered in the hash table using this register, and then the value has
157 changed, and then another expression wants to be added to refer to
158 the register's new value. This sequence of circumstances is rare
159 within any one basic block.
160
161 The vectors `reg_tick' and `reg_in_table' are used to detect this case.
162 reg_tick[i] is incremented whenever a value is stored in register i.
163 reg_in_table[i] holds -1 if no references to register i have been
164 entered in the table; otherwise, it contains the value reg_tick[i] had
165 when the references were entered. If we want to enter a reference
166 and reg_in_table[i] != reg_tick[i], we must scan and remove old references.
167 Until we want to enter a new entry, the mere fact that the two vectors
168 don't match makes the entries be ignored if anyone tries to match them.
169
170 Registers themselves are entered in the hash table as well as in
171 the equivalent-register chains. However, the vectors `reg_tick'
172 and `reg_in_table' do not apply to expressions which are simple
173 register references. These expressions are removed from the table
174 immediately when they become invalid, and this can be done even if
175 we do not immediately search for all the expressions that refer to
176 the register.
177
178 A CLOBBER rtx in an instruction invalidates its operand for further
179 reuse. A CLOBBER or SET rtx whose operand is a MEM:BLK
180 invalidates everything that resides in memory.
181
182 Related expressions:
183
184 Constant expressions that differ only by an additive integer
185 are called related. When a constant expression is put in
186 the table, the related expression with no constant term
187 is also entered. These are made to point at each other
188 so that it is possible to find out if there exists any
189 register equivalent to an expression related to a given expression. */
190
191 /* One plus largest register number used in this function. */
192
193 static int max_reg;
194
195 /* Length of vectors indexed by quantity number.
196 We know in advance we will not need a quantity number this big. */
197
198 static int max_qty;
199
200 /* Next quantity number to be allocated.
201 This is 1 + the largest number needed so far. */
202
203 static int next_qty;
204
205 /* Indexed by quantity number, gives the first (or last) (pseudo) register
206 in the chain of registers that currently contain this quantity. */
207
208 static int *qty_first_reg;
209 static int *qty_last_reg;
210
211 /* Index by quantity number, gives the mode of the quantity. */
212
213 static enum machine_mode *qty_mode;
214
215 /* Indexed by quantity number, gives the rtx of the constant value of the
216 quantity, or zero if it does not have a known value.
217 A sum of the frame pointer (or arg pointer) plus a constant
218 can also be entered here. */
219
220 static rtx *qty_const;
221
222 /* Indexed by qty number, gives the insn that stored the constant value
223 recorded in `qty_const'. */
224
225 static rtx *qty_const_insn;
226
227 /* The next three variables are used to track when a comparison between a
228 quantity and some constant or register has been passed. In that case, we
229 know the results of the comparison in case we see it again. These variables
230 record a comparison that is known to be true. */
231
232 /* Indexed by qty number, gives the rtx code of a comparison with a known
233 result involving this quantity. If none, it is UNKNOWN. */
234 static enum rtx_code *qty_comparison_code;
235
236 /* Indexed by qty number, gives the constant being compared against in a
237 comparison of known result. If no such comparison, it is undefined.
238 If the comparison is not with a constant, it is zero. */
239
240 static rtx *qty_comparison_const;
241
242 /* Indexed by qty number, gives the quantity being compared against in a
243 comparison of known result. If no such comparison, if it undefined.
244 If the comparison is not with a register, it is -1. */
245
246 static int *qty_comparison_qty;
247
248 #ifdef HAVE_cc0
249 /* For machines that have a CC0, we do not record its value in the hash
250 table since its use is guaranteed to be the insn immediately following
251 its definition and any other insn is presumed to invalidate it.
252
253 Instead, we store below the value last assigned to CC0. If it should
254 happen to be a constant, it is stored in preference to the actual
255 assigned value. In case it is a constant, we store the mode in which
256 the constant should be interpreted. */
257
258 static rtx prev_insn_cc0;
259 static enum machine_mode prev_insn_cc0_mode;
260 #endif
261
262 /* Previous actual insn. 0 if at first insn of basic block. */
263
264 static rtx prev_insn;
265
266 /* Insn being scanned. */
267
268 static rtx this_insn;
269
270 /* Index by (pseudo) register number, gives the quantity number
271 of the register's current contents. */
272
273 static int *reg_qty;
274
275 /* Index by (pseudo) register number, gives the number of the next (or
276 previous) (pseudo) register in the chain of registers sharing the same
277 value.
278
279 Or -1 if this register is at the end of the chain.
280
281 If reg_qty[N] == N, reg_next_eqv[N] is undefined. */
282
283 static int *reg_next_eqv;
284 static int *reg_prev_eqv;
285
286 /* Index by (pseudo) register number, gives the number of times
287 that register has been altered in the current basic block. */
288
289 static int *reg_tick;
290
291 /* Index by (pseudo) register number, gives the reg_tick value at which
292 rtx's containing this register are valid in the hash table.
293 If this does not equal the current reg_tick value, such expressions
294 existing in the hash table are invalid.
295 If this is -1, no expressions containing this register have been
296 entered in the table. */
297
298 static int *reg_in_table;
299
300 /* A HARD_REG_SET containing all the hard registers for which there is
301 currently a REG expression in the hash table. Note the difference
302 from the above variables, which indicate if the REG is mentioned in some
303 expression in the table. */
304
305 static HARD_REG_SET hard_regs_in_table;
306
307 /* A HARD_REG_SET containing all the hard registers that are invalidated
308 by a CALL_INSN. */
309
310 static HARD_REG_SET regs_invalidated_by_call;
311
312 /* Two vectors of ints:
313 one containing max_reg -1's; the other max_reg + 500 (an approximation
314 for max_qty) elements where element i contains i.
315 These are used to initialize various other vectors fast. */
316
317 static int *all_minus_one;
318 static int *consec_ints;
319
320 /* CUID of insn that starts the basic block currently being cse-processed. */
321
322 static int cse_basic_block_start;
323
324 /* CUID of insn that ends the basic block currently being cse-processed. */
325
326 static int cse_basic_block_end;
327
328 /* Vector mapping INSN_UIDs to cuids.
329 The cuids are like uids but increase monotonically always.
330 We use them to see whether a reg is used outside a given basic block. */
331
332 static int *uid_cuid;
333
334 /* Highest UID in UID_CUID. */
335 static int max_uid;
336
337 /* Get the cuid of an insn. */
338
339 #define INSN_CUID(INSN) (uid_cuid[INSN_UID (INSN)])
340
341 /* Nonzero if cse has altered conditional jump insns
342 in such a way that jump optimization should be redone. */
343
344 static int cse_jumps_altered;
345
346 /* Nonzero if we put a LABEL_REF into the hash table. Since we may have put
347 it into an INSN without a REG_LABEL, we have to rerun jump after CSE
348 to put in the note. */
349 static int recorded_label_ref;
350
351 /* canon_hash stores 1 in do_not_record
352 if it notices a reference to CC0, PC, or some other volatile
353 subexpression. */
354
355 static int do_not_record;
356
357 #ifdef LOAD_EXTEND_OP
358
359 /* Scratch rtl used when looking for load-extended copy of a MEM. */
360 static rtx memory_extend_rtx;
361 #endif
362
363 /* canon_hash stores 1 in hash_arg_in_memory
364 if it notices a reference to memory within the expression being hashed. */
365
366 static int hash_arg_in_memory;
367
368 /* canon_hash stores 1 in hash_arg_in_struct
369 if it notices a reference to memory that's part of a structure. */
370
371 static int hash_arg_in_struct;
372
373 /* The hash table contains buckets which are chains of `struct table_elt's,
374 each recording one expression's information.
375 That expression is in the `exp' field.
376
377 Those elements with the same hash code are chained in both directions
378 through the `next_same_hash' and `prev_same_hash' fields.
379
380 Each set of expressions with equivalent values
381 are on a two-way chain through the `next_same_value'
382 and `prev_same_value' fields, and all point with
383 the `first_same_value' field at the first element in
384 that chain. The chain is in order of increasing cost.
385 Each element's cost value is in its `cost' field.
386
387 The `in_memory' field is nonzero for elements that
388 involve any reference to memory. These elements are removed
389 whenever a write is done to an unidentified location in memory.
390 To be safe, we assume that a memory address is unidentified unless
391 the address is either a symbol constant or a constant plus
392 the frame pointer or argument pointer.
393
394 The `in_struct' field is nonzero for elements that
395 involve any reference to memory inside a structure or array.
396
397 The `related_value' field is used to connect related expressions
398 (that differ by adding an integer).
399 The related expressions are chained in a circular fashion.
400 `related_value' is zero for expressions for which this
401 chain is not useful.
402
403 The `cost' field stores the cost of this element's expression.
404
405 The `is_const' flag is set if the element is a constant (including
406 a fixed address).
407
408 The `flag' field is used as a temporary during some search routines.
409
410 The `mode' field is usually the same as GET_MODE (`exp'), but
411 if `exp' is a CONST_INT and has no machine mode then the `mode'
412 field is the mode it was being used as. Each constant is
413 recorded separately for each mode it is used with. */
414
415
416 struct table_elt
417 {
418 rtx exp;
419 struct table_elt *next_same_hash;
420 struct table_elt *prev_same_hash;
421 struct table_elt *next_same_value;
422 struct table_elt *prev_same_value;
423 struct table_elt *first_same_value;
424 struct table_elt *related_value;
425 int cost;
426 enum machine_mode mode;
427 char in_memory;
428 char in_struct;
429 char is_const;
430 char flag;
431 };
432
433 /* We don't want a lot of buckets, because we rarely have very many
434 things stored in the hash table, and a lot of buckets slows
435 down a lot of loops that happen frequently. */
436 #define NBUCKETS 31
437
438 /* Compute hash code of X in mode M. Special-case case where X is a pseudo
439 register (hard registers may require `do_not_record' to be set). */
440
441 #define HASH(X, M) \
442 (GET_CODE (X) == REG && REGNO (X) >= FIRST_PSEUDO_REGISTER \
443 ? (((unsigned) REG << 7) + (unsigned) reg_qty[REGNO (X)]) % NBUCKETS \
444 : canon_hash (X, M) % NBUCKETS)
445
446 /* Determine whether register number N is considered a fixed register for CSE.
447 It is desirable to replace other regs with fixed regs, to reduce need for
448 non-fixed hard regs.
449 A reg wins if it is either the frame pointer or designated as fixed,
450 but not if it is an overlapping register. */
451 #ifdef OVERLAPPING_REGNO_P
452 #define FIXED_REGNO_P(N) \
453 (((N) == FRAME_POINTER_REGNUM || (N) == HARD_FRAME_POINTER_REGNUM \
454 || fixed_regs[N] || global_regs[N]) \
455 && ! OVERLAPPING_REGNO_P ((N)))
456 #else
457 #define FIXED_REGNO_P(N) \
458 ((N) == FRAME_POINTER_REGNUM || (N) == HARD_FRAME_POINTER_REGNUM \
459 || fixed_regs[N] || global_regs[N])
460 #endif
461
462 /* Compute cost of X, as stored in the `cost' field of a table_elt. Fixed
463 hard registers and pointers into the frame are the cheapest with a cost
464 of 0. Next come pseudos with a cost of one and other hard registers with
465 a cost of 2. Aside from these special cases, call `rtx_cost'. */
466
467 #define CHEAP_REGNO(N) \
468 ((N) == FRAME_POINTER_REGNUM || (N) == HARD_FRAME_POINTER_REGNUM \
469 || (N) == STACK_POINTER_REGNUM || (N) == ARG_POINTER_REGNUM \
470 || ((N) >= FIRST_VIRTUAL_REGISTER && (N) <= LAST_VIRTUAL_REGISTER) \
471 || ((N) < FIRST_PSEUDO_REGISTER \
472 && FIXED_REGNO_P (N) && REGNO_REG_CLASS (N) != NO_REGS))
473
474 /* A register is cheap if it is a user variable assigned to the register
475 or if its register number always corresponds to a cheap register. */
476
477 #define CHEAP_REG(N) \
478 ((REG_USERVAR_P (N) && REGNO (N) < FIRST_PSEUDO_REGISTER) \
479 || CHEAP_REGNO (REGNO (N)))
480
481 #define COST(X) \
482 (GET_CODE (X) == REG \
483 ? (CHEAP_REG (X) ? 0 \
484 : REGNO (X) >= FIRST_PSEUDO_REGISTER ? 1 \
485 : 2) \
486 : ((GET_CODE (X) == SUBREG \
487 && GET_CODE (SUBREG_REG (X)) == REG \
488 && GET_MODE_CLASS (GET_MODE (X)) == MODE_INT \
489 && GET_MODE_CLASS (GET_MODE (SUBREG_REG (X))) == MODE_INT \
490 && (GET_MODE_SIZE (GET_MODE (X)) \
491 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (X)))) \
492 && subreg_lowpart_p (X) \
493 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (GET_MODE (X)), \
494 GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (X))))) \
495 ? (CHEAP_REG (SUBREG_REG (X)) ? 0 \
496 : REGNO (SUBREG_REG (X)) >= FIRST_PSEUDO_REGISTER ? 1 \
497 : 2) \
498 : rtx_cost (X, SET) * 2))
499
500 /* Determine if the quantity number for register X represents a valid index
501 into the `qty_...' variables. */
502
503 #define REGNO_QTY_VALID_P(N) (reg_qty[N] != (N))
504
505 static struct table_elt *table[NBUCKETS];
506
507 /* Chain of `struct table_elt's made so far for this function
508 but currently removed from the table. */
509
510 static struct table_elt *free_element_chain;
511
512 /* Number of `struct table_elt' structures made so far for this function. */
513
514 static int n_elements_made;
515
516 /* Maximum value `n_elements_made' has had so far in this compilation
517 for functions previously processed. */
518
519 static int max_elements_made;
520
521 /* Surviving equivalence class when two equivalence classes are merged
522 by recording the effects of a jump in the last insn. Zero if the
523 last insn was not a conditional jump. */
524
525 static struct table_elt *last_jump_equiv_class;
526
527 /* Set to the cost of a constant pool reference if one was found for a
528 symbolic constant. If this was found, it means we should try to
529 convert constants into constant pool entries if they don't fit in
530 the insn. */
531
532 static int constant_pool_entries_cost;
533
534 /* Bits describing what kind of values in memory must be invalidated
535 for a particular instruction. If all three bits are zero,
536 no memory refs need to be invalidated. Each bit is more powerful
537 than the preceding ones, and if a bit is set then the preceding
538 bits are also set.
539
540 Here is how the bits are set:
541 Pushing onto the stack invalidates only the stack pointer,
542 writing at a fixed address invalidates only variable addresses,
543 writing in a structure element at variable address
544 invalidates all but scalar variables,
545 and writing in anything else at variable address invalidates everything. */
546
547 struct write_data
548 {
549 int sp : 1; /* Invalidate stack pointer. */
550 int var : 1; /* Invalidate variable addresses. */
551 int nonscalar : 1; /* Invalidate all but scalar variables. */
552 int all : 1; /* Invalidate all memory refs. */
553 };
554
555 /* Define maximum length of a branch path. */
556
557 #define PATHLENGTH 10
558
559 /* This data describes a block that will be processed by cse_basic_block. */
560
561 struct cse_basic_block_data {
562 /* Lowest CUID value of insns in block. */
563 int low_cuid;
564 /* Highest CUID value of insns in block. */
565 int high_cuid;
566 /* Total number of SETs in block. */
567 int nsets;
568 /* Last insn in the block. */
569 rtx last;
570 /* Size of current branch path, if any. */
571 int path_size;
572 /* Current branch path, indicating which branches will be taken. */
573 struct branch_path {
574 /* The branch insn. */
575 rtx branch;
576 /* Whether it should be taken or not. AROUND is the same as taken
577 except that it is used when the destination label is not preceded
578 by a BARRIER. */
579 enum taken {TAKEN, NOT_TAKEN, AROUND} status;
580 } path[PATHLENGTH];
581 };
582
583 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
584 virtual regs here because the simplify_*_operation routines are called
585 by integrate.c, which is called before virtual register instantiation. */
586
587 #define FIXED_BASE_PLUS_P(X) \
588 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
589 || (X) == arg_pointer_rtx \
590 || (X) == virtual_stack_vars_rtx \
591 || (X) == virtual_incoming_args_rtx \
592 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
593 && (XEXP (X, 0) == frame_pointer_rtx \
594 || XEXP (X, 0) == hard_frame_pointer_rtx \
595 || XEXP (X, 0) == arg_pointer_rtx \
596 || XEXP (X, 0) == virtual_stack_vars_rtx \
597 || XEXP (X, 0) == virtual_incoming_args_rtx)))
598
599 /* Similar, but also allows reference to the stack pointer.
600
601 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
602 arg_pointer_rtx by itself is nonzero, because on at least one machine,
603 the i960, the arg pointer is zero when it is unused. */
604
605 #define NONZERO_BASE_PLUS_P(X) \
606 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
607 || (X) == virtual_stack_vars_rtx \
608 || (X) == virtual_incoming_args_rtx \
609 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
610 && (XEXP (X, 0) == frame_pointer_rtx \
611 || XEXP (X, 0) == hard_frame_pointer_rtx \
612 || XEXP (X, 0) == arg_pointer_rtx \
613 || XEXP (X, 0) == virtual_stack_vars_rtx \
614 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
615 || (X) == stack_pointer_rtx \
616 || (X) == virtual_stack_dynamic_rtx \
617 || (X) == virtual_outgoing_args_rtx \
618 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
619 && (XEXP (X, 0) == stack_pointer_rtx \
620 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
621 || XEXP (X, 0) == virtual_outgoing_args_rtx)))
622
623 static void new_basic_block PROTO((void));
624 static void make_new_qty PROTO((int));
625 static void make_regs_eqv PROTO((int, int));
626 static void delete_reg_equiv PROTO((int));
627 static int mention_regs PROTO((rtx));
628 static int insert_regs PROTO((rtx, struct table_elt *, int));
629 static void free_element PROTO((struct table_elt *));
630 static void remove_from_table PROTO((struct table_elt *, unsigned));
631 static struct table_elt *get_element PROTO((void));
632 static struct table_elt *lookup PROTO((rtx, unsigned, enum machine_mode)),
633 *lookup_for_remove PROTO((rtx, unsigned, enum machine_mode));
634 static rtx lookup_as_function PROTO((rtx, enum rtx_code));
635 static struct table_elt *insert PROTO((rtx, struct table_elt *, unsigned,
636 enum machine_mode));
637 static void merge_equiv_classes PROTO((struct table_elt *,
638 struct table_elt *));
639 static void invalidate PROTO((rtx, enum machine_mode));
640 static void remove_invalid_refs PROTO((int));
641 static void rehash_using_reg PROTO((rtx));
642 static void invalidate_memory PROTO((struct write_data *));
643 static void invalidate_for_call PROTO((void));
644 static rtx use_related_value PROTO((rtx, struct table_elt *));
645 static unsigned canon_hash PROTO((rtx, enum machine_mode));
646 static unsigned safe_hash PROTO((rtx, enum machine_mode));
647 static int exp_equiv_p PROTO((rtx, rtx, int, int));
648 static void set_nonvarying_address_components PROTO((rtx, int, rtx *,
649 HOST_WIDE_INT *,
650 HOST_WIDE_INT *));
651 static int refers_to_p PROTO((rtx, rtx));
652 static int refers_to_mem_p PROTO((rtx, rtx, HOST_WIDE_INT,
653 HOST_WIDE_INT));
654 static int cse_rtx_addr_varies_p PROTO((rtx));
655 static rtx canon_reg PROTO((rtx, rtx));
656 static void find_best_addr PROTO((rtx, rtx *));
657 static enum rtx_code find_comparison_args PROTO((enum rtx_code, rtx *, rtx *,
658 enum machine_mode *,
659 enum machine_mode *));
660 static rtx cse_gen_binary PROTO((enum rtx_code, enum machine_mode,
661 rtx, rtx));
662 static rtx simplify_plus_minus PROTO((enum rtx_code, enum machine_mode,
663 rtx, rtx));
664 static rtx fold_rtx PROTO((rtx, rtx));
665 static rtx equiv_constant PROTO((rtx));
666 static void record_jump_equiv PROTO((rtx, int));
667 static void record_jump_cond PROTO((enum rtx_code, enum machine_mode,
668 rtx, rtx, int));
669 static void cse_insn PROTO((rtx, int));
670 static void note_mem_written PROTO((rtx, struct write_data *));
671 static void invalidate_from_clobbers PROTO((struct write_data *, rtx));
672 static rtx cse_process_notes PROTO((rtx, rtx));
673 static void cse_around_loop PROTO((rtx));
674 static void invalidate_skipped_set PROTO((rtx, rtx));
675 static void invalidate_skipped_block PROTO((rtx));
676 static void cse_check_loop_start PROTO((rtx, rtx));
677 static void cse_set_around_loop PROTO((rtx, rtx, rtx));
678 static rtx cse_basic_block PROTO((rtx, rtx, struct branch_path *, int));
679 static void count_reg_usage PROTO((rtx, int *, rtx, int));
680
681 extern int rtx_equal_function_value_matters;
682 \f
683 /* Return an estimate of the cost of computing rtx X.
684 One use is in cse, to decide which expression to keep in the hash table.
685 Another is in rtl generation, to pick the cheapest way to multiply.
686 Other uses like the latter are expected in the future. */
687
688 /* Return the right cost to give to an operation
689 to make the cost of the corresponding register-to-register instruction
690 N times that of a fast register-to-register instruction. */
691
692 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
693
694 int
695 rtx_cost (x, outer_code)
696 rtx x;
697 enum rtx_code outer_code;
698 {
699 register int i, j;
700 register enum rtx_code code;
701 register char *fmt;
702 register int total;
703
704 if (x == 0)
705 return 0;
706
707 /* Compute the default costs of certain things.
708 Note that RTX_COSTS can override the defaults. */
709
710 code = GET_CODE (x);
711 switch (code)
712 {
713 case MULT:
714 /* Count multiplication by 2**n as a shift,
715 because if we are considering it, we would output it as a shift. */
716 if (GET_CODE (XEXP (x, 1)) == CONST_INT
717 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
718 total = 2;
719 else
720 total = COSTS_N_INSNS (5);
721 break;
722 case DIV:
723 case UDIV:
724 case MOD:
725 case UMOD:
726 total = COSTS_N_INSNS (7);
727 break;
728 case USE:
729 /* Used in loop.c and combine.c as a marker. */
730 total = 0;
731 break;
732 case ASM_OPERANDS:
733 /* We don't want these to be used in substitutions because
734 we have no way of validating the resulting insn. So assign
735 anything containing an ASM_OPERANDS a very high cost. */
736 total = 1000;
737 break;
738 default:
739 total = 2;
740 }
741
742 switch (code)
743 {
744 case REG:
745 return ! CHEAP_REG (x);
746
747 case SUBREG:
748 /* If we can't tie these modes, make this expensive. The larger
749 the mode, the more expensive it is. */
750 if (! MODES_TIEABLE_P (GET_MODE (x), GET_MODE (SUBREG_REG (x))))
751 return COSTS_N_INSNS (2
752 + GET_MODE_SIZE (GET_MODE (x)) / UNITS_PER_WORD);
753 return 2;
754 #ifdef RTX_COSTS
755 RTX_COSTS (x, code, outer_code);
756 #endif
757 CONST_COSTS (x, code, outer_code);
758 }
759
760 /* Sum the costs of the sub-rtx's, plus cost of this operation,
761 which is already in total. */
762
763 fmt = GET_RTX_FORMAT (code);
764 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
765 if (fmt[i] == 'e')
766 total += rtx_cost (XEXP (x, i), code);
767 else if (fmt[i] == 'E')
768 for (j = 0; j < XVECLEN (x, i); j++)
769 total += rtx_cost (XVECEXP (x, i, j), code);
770
771 return total;
772 }
773 \f
774 /* Clear the hash table and initialize each register with its own quantity,
775 for a new basic block. */
776
777 static void
778 new_basic_block ()
779 {
780 register int i;
781
782 next_qty = max_reg;
783
784 bzero ((char *) reg_tick, max_reg * sizeof (int));
785
786 bcopy ((char *) all_minus_one, (char *) reg_in_table,
787 max_reg * sizeof (int));
788 bcopy ((char *) consec_ints, (char *) reg_qty, max_reg * sizeof (int));
789 CLEAR_HARD_REG_SET (hard_regs_in_table);
790
791 /* The per-quantity values used to be initialized here, but it is
792 much faster to initialize each as it is made in `make_new_qty'. */
793
794 for (i = 0; i < NBUCKETS; i++)
795 {
796 register struct table_elt *this, *next;
797 for (this = table[i]; this; this = next)
798 {
799 next = this->next_same_hash;
800 free_element (this);
801 }
802 }
803
804 bzero ((char *) table, sizeof table);
805
806 prev_insn = 0;
807
808 #ifdef HAVE_cc0
809 prev_insn_cc0 = 0;
810 #endif
811 }
812
813 /* Say that register REG contains a quantity not in any register before
814 and initialize that quantity. */
815
816 static void
817 make_new_qty (reg)
818 register int reg;
819 {
820 register int q;
821
822 if (next_qty >= max_qty)
823 abort ();
824
825 q = reg_qty[reg] = next_qty++;
826 qty_first_reg[q] = reg;
827 qty_last_reg[q] = reg;
828 qty_const[q] = qty_const_insn[q] = 0;
829 qty_comparison_code[q] = UNKNOWN;
830
831 reg_next_eqv[reg] = reg_prev_eqv[reg] = -1;
832 }
833
834 /* Make reg NEW equivalent to reg OLD.
835 OLD is not changing; NEW is. */
836
837 static void
838 make_regs_eqv (new, old)
839 register int new, old;
840 {
841 register int lastr, firstr;
842 register int q = reg_qty[old];
843
844 /* Nothing should become eqv until it has a "non-invalid" qty number. */
845 if (! REGNO_QTY_VALID_P (old))
846 abort ();
847
848 reg_qty[new] = q;
849 firstr = qty_first_reg[q];
850 lastr = qty_last_reg[q];
851
852 /* Prefer fixed hard registers to anything. Prefer pseudo regs to other
853 hard regs. Among pseudos, if NEW will live longer than any other reg
854 of the same qty, and that is beyond the current basic block,
855 make it the new canonical replacement for this qty. */
856 if (! (firstr < FIRST_PSEUDO_REGISTER && FIXED_REGNO_P (firstr))
857 /* Certain fixed registers might be of the class NO_REGS. This means
858 that not only can they not be allocated by the compiler, but
859 they cannot be used in substitutions or canonicalizations
860 either. */
861 && (new >= FIRST_PSEUDO_REGISTER || REGNO_REG_CLASS (new) != NO_REGS)
862 && ((new < FIRST_PSEUDO_REGISTER && FIXED_REGNO_P (new))
863 || (new >= FIRST_PSEUDO_REGISTER
864 && (firstr < FIRST_PSEUDO_REGISTER
865 || ((uid_cuid[regno_last_uid[new]] > cse_basic_block_end
866 || (uid_cuid[regno_first_uid[new]]
867 < cse_basic_block_start))
868 && (uid_cuid[regno_last_uid[new]]
869 > uid_cuid[regno_last_uid[firstr]]))))))
870 {
871 reg_prev_eqv[firstr] = new;
872 reg_next_eqv[new] = firstr;
873 reg_prev_eqv[new] = -1;
874 qty_first_reg[q] = new;
875 }
876 else
877 {
878 /* If NEW is a hard reg (known to be non-fixed), insert at end.
879 Otherwise, insert before any non-fixed hard regs that are at the
880 end. Registers of class NO_REGS cannot be used as an
881 equivalent for anything. */
882 while (lastr < FIRST_PSEUDO_REGISTER && reg_prev_eqv[lastr] >= 0
883 && (REGNO_REG_CLASS (lastr) == NO_REGS || ! FIXED_REGNO_P (lastr))
884 && new >= FIRST_PSEUDO_REGISTER)
885 lastr = reg_prev_eqv[lastr];
886 reg_next_eqv[new] = reg_next_eqv[lastr];
887 if (reg_next_eqv[lastr] >= 0)
888 reg_prev_eqv[reg_next_eqv[lastr]] = new;
889 else
890 qty_last_reg[q] = new;
891 reg_next_eqv[lastr] = new;
892 reg_prev_eqv[new] = lastr;
893 }
894 }
895
896 /* Remove REG from its equivalence class. */
897
898 static void
899 delete_reg_equiv (reg)
900 register int reg;
901 {
902 register int q = reg_qty[reg];
903 register int p, n;
904
905 /* If invalid, do nothing. */
906 if (q == reg)
907 return;
908
909 p = reg_prev_eqv[reg];
910 n = reg_next_eqv[reg];
911
912 if (n != -1)
913 reg_prev_eqv[n] = p;
914 else
915 qty_last_reg[q] = p;
916 if (p != -1)
917 reg_next_eqv[p] = n;
918 else
919 qty_first_reg[q] = n;
920
921 reg_qty[reg] = reg;
922 }
923
924 /* Remove any invalid expressions from the hash table
925 that refer to any of the registers contained in expression X.
926
927 Make sure that newly inserted references to those registers
928 as subexpressions will be considered valid.
929
930 mention_regs is not called when a register itself
931 is being stored in the table.
932
933 Return 1 if we have done something that may have changed the hash code
934 of X. */
935
936 static int
937 mention_regs (x)
938 rtx x;
939 {
940 register enum rtx_code code;
941 register int i, j;
942 register char *fmt;
943 register int changed = 0;
944
945 if (x == 0)
946 return 0;
947
948 code = GET_CODE (x);
949 if (code == REG)
950 {
951 register int regno = REGNO (x);
952 register int endregno
953 = regno + (regno >= FIRST_PSEUDO_REGISTER ? 1
954 : HARD_REGNO_NREGS (regno, GET_MODE (x)));
955 int i;
956
957 for (i = regno; i < endregno; i++)
958 {
959 if (reg_in_table[i] >= 0 && reg_in_table[i] != reg_tick[i])
960 remove_invalid_refs (i);
961
962 reg_in_table[i] = reg_tick[i];
963 }
964
965 return 0;
966 }
967
968 /* If X is a comparison or a COMPARE and either operand is a register
969 that does not have a quantity, give it one. This is so that a later
970 call to record_jump_equiv won't cause X to be assigned a different
971 hash code and not found in the table after that call.
972
973 It is not necessary to do this here, since rehash_using_reg can
974 fix up the table later, but doing this here eliminates the need to
975 call that expensive function in the most common case where the only
976 use of the register is in the comparison. */
977
978 if (code == COMPARE || GET_RTX_CLASS (code) == '<')
979 {
980 if (GET_CODE (XEXP (x, 0)) == REG
981 && ! REGNO_QTY_VALID_P (REGNO (XEXP (x, 0))))
982 if (insert_regs (XEXP (x, 0), NULL_PTR, 0))
983 {
984 rehash_using_reg (XEXP (x, 0));
985 changed = 1;
986 }
987
988 if (GET_CODE (XEXP (x, 1)) == REG
989 && ! REGNO_QTY_VALID_P (REGNO (XEXP (x, 1))))
990 if (insert_regs (XEXP (x, 1), NULL_PTR, 0))
991 {
992 rehash_using_reg (XEXP (x, 1));
993 changed = 1;
994 }
995 }
996
997 fmt = GET_RTX_FORMAT (code);
998 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
999 if (fmt[i] == 'e')
1000 changed |= mention_regs (XEXP (x, i));
1001 else if (fmt[i] == 'E')
1002 for (j = 0; j < XVECLEN (x, i); j++)
1003 changed |= mention_regs (XVECEXP (x, i, j));
1004
1005 return changed;
1006 }
1007
1008 /* Update the register quantities for inserting X into the hash table
1009 with a value equivalent to CLASSP.
1010 (If the class does not contain a REG, it is irrelevant.)
1011 If MODIFIED is nonzero, X is a destination; it is being modified.
1012 Note that delete_reg_equiv should be called on a register
1013 before insert_regs is done on that register with MODIFIED != 0.
1014
1015 Nonzero value means that elements of reg_qty have changed
1016 so X's hash code may be different. */
1017
1018 static int
1019 insert_regs (x, classp, modified)
1020 rtx x;
1021 struct table_elt *classp;
1022 int modified;
1023 {
1024 if (GET_CODE (x) == REG)
1025 {
1026 register int regno = REGNO (x);
1027
1028 /* If REGNO is in the equivalence table already but is of the
1029 wrong mode for that equivalence, don't do anything here. */
1030
1031 if (REGNO_QTY_VALID_P (regno)
1032 && qty_mode[reg_qty[regno]] != GET_MODE (x))
1033 return 0;
1034
1035 if (modified || ! REGNO_QTY_VALID_P (regno))
1036 {
1037 if (classp)
1038 for (classp = classp->first_same_value;
1039 classp != 0;
1040 classp = classp->next_same_value)
1041 if (GET_CODE (classp->exp) == REG
1042 && GET_MODE (classp->exp) == GET_MODE (x))
1043 {
1044 make_regs_eqv (regno, REGNO (classp->exp));
1045 return 1;
1046 }
1047
1048 make_new_qty (regno);
1049 qty_mode[reg_qty[regno]] = GET_MODE (x);
1050 return 1;
1051 }
1052
1053 return 0;
1054 }
1055
1056 /* If X is a SUBREG, we will likely be inserting the inner register in the
1057 table. If that register doesn't have an assigned quantity number at
1058 this point but does later, the insertion that we will be doing now will
1059 not be accessible because its hash code will have changed. So assign
1060 a quantity number now. */
1061
1062 else if (GET_CODE (x) == SUBREG && GET_CODE (SUBREG_REG (x)) == REG
1063 && ! REGNO_QTY_VALID_P (REGNO (SUBREG_REG (x))))
1064 {
1065 insert_regs (SUBREG_REG (x), NULL_PTR, 0);
1066 mention_regs (SUBREG_REG (x));
1067 return 1;
1068 }
1069 else
1070 return mention_regs (x);
1071 }
1072 \f
1073 /* Look in or update the hash table. */
1074
1075 /* Put the element ELT on the list of free elements. */
1076
1077 static void
1078 free_element (elt)
1079 struct table_elt *elt;
1080 {
1081 elt->next_same_hash = free_element_chain;
1082 free_element_chain = elt;
1083 }
1084
1085 /* Return an element that is free for use. */
1086
1087 static struct table_elt *
1088 get_element ()
1089 {
1090 struct table_elt *elt = free_element_chain;
1091 if (elt)
1092 {
1093 free_element_chain = elt->next_same_hash;
1094 return elt;
1095 }
1096 n_elements_made++;
1097 return (struct table_elt *) oballoc (sizeof (struct table_elt));
1098 }
1099
1100 /* Remove table element ELT from use in the table.
1101 HASH is its hash code, made using the HASH macro.
1102 It's an argument because often that is known in advance
1103 and we save much time not recomputing it. */
1104
1105 static void
1106 remove_from_table (elt, hash)
1107 register struct table_elt *elt;
1108 unsigned hash;
1109 {
1110 if (elt == 0)
1111 return;
1112
1113 /* Mark this element as removed. See cse_insn. */
1114 elt->first_same_value = 0;
1115
1116 /* Remove the table element from its equivalence class. */
1117
1118 {
1119 register struct table_elt *prev = elt->prev_same_value;
1120 register struct table_elt *next = elt->next_same_value;
1121
1122 if (next) next->prev_same_value = prev;
1123
1124 if (prev)
1125 prev->next_same_value = next;
1126 else
1127 {
1128 register struct table_elt *newfirst = next;
1129 while (next)
1130 {
1131 next->first_same_value = newfirst;
1132 next = next->next_same_value;
1133 }
1134 }
1135 }
1136
1137 /* Remove the table element from its hash bucket. */
1138
1139 {
1140 register struct table_elt *prev = elt->prev_same_hash;
1141 register struct table_elt *next = elt->next_same_hash;
1142
1143 if (next) next->prev_same_hash = prev;
1144
1145 if (prev)
1146 prev->next_same_hash = next;
1147 else if (table[hash] == elt)
1148 table[hash] = next;
1149 else
1150 {
1151 /* This entry is not in the proper hash bucket. This can happen
1152 when two classes were merged by `merge_equiv_classes'. Search
1153 for the hash bucket that it heads. This happens only very
1154 rarely, so the cost is acceptable. */
1155 for (hash = 0; hash < NBUCKETS; hash++)
1156 if (table[hash] == elt)
1157 table[hash] = next;
1158 }
1159 }
1160
1161 /* Remove the table element from its related-value circular chain. */
1162
1163 if (elt->related_value != 0 && elt->related_value != elt)
1164 {
1165 register struct table_elt *p = elt->related_value;
1166 while (p->related_value != elt)
1167 p = p->related_value;
1168 p->related_value = elt->related_value;
1169 if (p->related_value == p)
1170 p->related_value = 0;
1171 }
1172
1173 free_element (elt);
1174 }
1175
1176 /* Look up X in the hash table and return its table element,
1177 or 0 if X is not in the table.
1178
1179 MODE is the machine-mode of X, or if X is an integer constant
1180 with VOIDmode then MODE is the mode with which X will be used.
1181
1182 Here we are satisfied to find an expression whose tree structure
1183 looks like X. */
1184
1185 static struct table_elt *
1186 lookup (x, hash, mode)
1187 rtx x;
1188 unsigned hash;
1189 enum machine_mode mode;
1190 {
1191 register struct table_elt *p;
1192
1193 for (p = table[hash]; p; p = p->next_same_hash)
1194 if (mode == p->mode && ((x == p->exp && GET_CODE (x) == REG)
1195 || exp_equiv_p (x, p->exp, GET_CODE (x) != REG, 0)))
1196 return p;
1197
1198 return 0;
1199 }
1200
1201 /* Like `lookup' but don't care whether the table element uses invalid regs.
1202 Also ignore discrepancies in the machine mode of a register. */
1203
1204 static struct table_elt *
1205 lookup_for_remove (x, hash, mode)
1206 rtx x;
1207 unsigned hash;
1208 enum machine_mode mode;
1209 {
1210 register struct table_elt *p;
1211
1212 if (GET_CODE (x) == REG)
1213 {
1214 int regno = REGNO (x);
1215 /* Don't check the machine mode when comparing registers;
1216 invalidating (REG:SI 0) also invalidates (REG:DF 0). */
1217 for (p = table[hash]; p; p = p->next_same_hash)
1218 if (GET_CODE (p->exp) == REG
1219 && REGNO (p->exp) == regno)
1220 return p;
1221 }
1222 else
1223 {
1224 for (p = table[hash]; p; p = p->next_same_hash)
1225 if (mode == p->mode && (x == p->exp || exp_equiv_p (x, p->exp, 0, 0)))
1226 return p;
1227 }
1228
1229 return 0;
1230 }
1231
1232 /* Look for an expression equivalent to X and with code CODE.
1233 If one is found, return that expression. */
1234
1235 static rtx
1236 lookup_as_function (x, code)
1237 rtx x;
1238 enum rtx_code code;
1239 {
1240 register struct table_elt *p = lookup (x, safe_hash (x, VOIDmode) % NBUCKETS,
1241 GET_MODE (x));
1242 if (p == 0)
1243 return 0;
1244
1245 for (p = p->first_same_value; p; p = p->next_same_value)
1246 {
1247 if (GET_CODE (p->exp) == code
1248 /* Make sure this is a valid entry in the table. */
1249 && exp_equiv_p (p->exp, p->exp, 1, 0))
1250 return p->exp;
1251 }
1252
1253 return 0;
1254 }
1255
1256 /* Insert X in the hash table, assuming HASH is its hash code
1257 and CLASSP is an element of the class it should go in
1258 (or 0 if a new class should be made).
1259 It is inserted at the proper position to keep the class in
1260 the order cheapest first.
1261
1262 MODE is the machine-mode of X, or if X is an integer constant
1263 with VOIDmode then MODE is the mode with which X will be used.
1264
1265 For elements of equal cheapness, the most recent one
1266 goes in front, except that the first element in the list
1267 remains first unless a cheaper element is added. The order of
1268 pseudo-registers does not matter, as canon_reg will be called to
1269 find the cheapest when a register is retrieved from the table.
1270
1271 The in_memory field in the hash table element is set to 0.
1272 The caller must set it nonzero if appropriate.
1273
1274 You should call insert_regs (X, CLASSP, MODIFY) before calling here,
1275 and if insert_regs returns a nonzero value
1276 you must then recompute its hash code before calling here.
1277
1278 If necessary, update table showing constant values of quantities. */
1279
1280 #define CHEAPER(X,Y) ((X)->cost < (Y)->cost)
1281
1282 static struct table_elt *
1283 insert (x, classp, hash, mode)
1284 register rtx x;
1285 register struct table_elt *classp;
1286 unsigned hash;
1287 enum machine_mode mode;
1288 {
1289 register struct table_elt *elt;
1290
1291 /* If X is a register and we haven't made a quantity for it,
1292 something is wrong. */
1293 if (GET_CODE (x) == REG && ! REGNO_QTY_VALID_P (REGNO (x)))
1294 abort ();
1295
1296 /* If X is a hard register, show it is being put in the table. */
1297 if (GET_CODE (x) == REG && REGNO (x) < FIRST_PSEUDO_REGISTER)
1298 {
1299 int regno = REGNO (x);
1300 int endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (x));
1301 int i;
1302
1303 for (i = regno; i < endregno; i++)
1304 SET_HARD_REG_BIT (hard_regs_in_table, i);
1305 }
1306
1307 /* If X is a label, show we recorded it. */
1308 if (GET_CODE (x) == LABEL_REF
1309 || (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
1310 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF))
1311 recorded_label_ref = 1;
1312
1313 /* Put an element for X into the right hash bucket. */
1314
1315 elt = get_element ();
1316 elt->exp = x;
1317 elt->cost = COST (x);
1318 elt->next_same_value = 0;
1319 elt->prev_same_value = 0;
1320 elt->next_same_hash = table[hash];
1321 elt->prev_same_hash = 0;
1322 elt->related_value = 0;
1323 elt->in_memory = 0;
1324 elt->mode = mode;
1325 elt->is_const = (CONSTANT_P (x)
1326 /* GNU C++ takes advantage of this for `this'
1327 (and other const values). */
1328 || (RTX_UNCHANGING_P (x)
1329 && GET_CODE (x) == REG
1330 && REGNO (x) >= FIRST_PSEUDO_REGISTER)
1331 || FIXED_BASE_PLUS_P (x));
1332
1333 if (table[hash])
1334 table[hash]->prev_same_hash = elt;
1335 table[hash] = elt;
1336
1337 /* Put it into the proper value-class. */
1338 if (classp)
1339 {
1340 classp = classp->first_same_value;
1341 if (CHEAPER (elt, classp))
1342 /* Insert at the head of the class */
1343 {
1344 register struct table_elt *p;
1345 elt->next_same_value = classp;
1346 classp->prev_same_value = elt;
1347 elt->first_same_value = elt;
1348
1349 for (p = classp; p; p = p->next_same_value)
1350 p->first_same_value = elt;
1351 }
1352 else
1353 {
1354 /* Insert not at head of the class. */
1355 /* Put it after the last element cheaper than X. */
1356 register struct table_elt *p, *next;
1357 for (p = classp; (next = p->next_same_value) && CHEAPER (next, elt);
1358 p = next);
1359 /* Put it after P and before NEXT. */
1360 elt->next_same_value = next;
1361 if (next)
1362 next->prev_same_value = elt;
1363 elt->prev_same_value = p;
1364 p->next_same_value = elt;
1365 elt->first_same_value = classp;
1366 }
1367 }
1368 else
1369 elt->first_same_value = elt;
1370
1371 /* If this is a constant being set equivalent to a register or a register
1372 being set equivalent to a constant, note the constant equivalence.
1373
1374 If this is a constant, it cannot be equivalent to a different constant,
1375 and a constant is the only thing that can be cheaper than a register. So
1376 we know the register is the head of the class (before the constant was
1377 inserted).
1378
1379 If this is a register that is not already known equivalent to a
1380 constant, we must check the entire class.
1381
1382 If this is a register that is already known equivalent to an insn,
1383 update `qty_const_insn' to show that `this_insn' is the latest
1384 insn making that quantity equivalent to the constant. */
1385
1386 if (elt->is_const && classp && GET_CODE (classp->exp) == REG
1387 && GET_CODE (x) != REG)
1388 {
1389 qty_const[reg_qty[REGNO (classp->exp)]]
1390 = gen_lowpart_if_possible (qty_mode[reg_qty[REGNO (classp->exp)]], x);
1391 qty_const_insn[reg_qty[REGNO (classp->exp)]] = this_insn;
1392 }
1393
1394 else if (GET_CODE (x) == REG && classp && ! qty_const[reg_qty[REGNO (x)]]
1395 && ! elt->is_const)
1396 {
1397 register struct table_elt *p;
1398
1399 for (p = classp; p != 0; p = p->next_same_value)
1400 {
1401 if (p->is_const && GET_CODE (p->exp) != REG)
1402 {
1403 qty_const[reg_qty[REGNO (x)]]
1404 = gen_lowpart_if_possible (GET_MODE (x), p->exp);
1405 qty_const_insn[reg_qty[REGNO (x)]] = this_insn;
1406 break;
1407 }
1408 }
1409 }
1410
1411 else if (GET_CODE (x) == REG && qty_const[reg_qty[REGNO (x)]]
1412 && GET_MODE (x) == qty_mode[reg_qty[REGNO (x)]])
1413 qty_const_insn[reg_qty[REGNO (x)]] = this_insn;
1414
1415 /* If this is a constant with symbolic value,
1416 and it has a term with an explicit integer value,
1417 link it up with related expressions. */
1418 if (GET_CODE (x) == CONST)
1419 {
1420 rtx subexp = get_related_value (x);
1421 unsigned subhash;
1422 struct table_elt *subelt, *subelt_prev;
1423
1424 if (subexp != 0)
1425 {
1426 /* Get the integer-free subexpression in the hash table. */
1427 subhash = safe_hash (subexp, mode) % NBUCKETS;
1428 subelt = lookup (subexp, subhash, mode);
1429 if (subelt == 0)
1430 subelt = insert (subexp, NULL_PTR, subhash, mode);
1431 /* Initialize SUBELT's circular chain if it has none. */
1432 if (subelt->related_value == 0)
1433 subelt->related_value = subelt;
1434 /* Find the element in the circular chain that precedes SUBELT. */
1435 subelt_prev = subelt;
1436 while (subelt_prev->related_value != subelt)
1437 subelt_prev = subelt_prev->related_value;
1438 /* Put new ELT into SUBELT's circular chain just before SUBELT.
1439 This way the element that follows SUBELT is the oldest one. */
1440 elt->related_value = subelt_prev->related_value;
1441 subelt_prev->related_value = elt;
1442 }
1443 }
1444
1445 return elt;
1446 }
1447 \f
1448 /* Given two equivalence classes, CLASS1 and CLASS2, put all the entries from
1449 CLASS2 into CLASS1. This is done when we have reached an insn which makes
1450 the two classes equivalent.
1451
1452 CLASS1 will be the surviving class; CLASS2 should not be used after this
1453 call.
1454
1455 Any invalid entries in CLASS2 will not be copied. */
1456
1457 static void
1458 merge_equiv_classes (class1, class2)
1459 struct table_elt *class1, *class2;
1460 {
1461 struct table_elt *elt, *next, *new;
1462
1463 /* Ensure we start with the head of the classes. */
1464 class1 = class1->first_same_value;
1465 class2 = class2->first_same_value;
1466
1467 /* If they were already equal, forget it. */
1468 if (class1 == class2)
1469 return;
1470
1471 for (elt = class2; elt; elt = next)
1472 {
1473 unsigned hash;
1474 rtx exp = elt->exp;
1475 enum machine_mode mode = elt->mode;
1476
1477 next = elt->next_same_value;
1478
1479 /* Remove old entry, make a new one in CLASS1's class.
1480 Don't do this for invalid entries as we cannot find their
1481 hash code (it also isn't necessary). */
1482 if (GET_CODE (exp) == REG || exp_equiv_p (exp, exp, 1, 0))
1483 {
1484 hash_arg_in_memory = 0;
1485 hash_arg_in_struct = 0;
1486 hash = HASH (exp, mode);
1487
1488 if (GET_CODE (exp) == REG)
1489 delete_reg_equiv (REGNO (exp));
1490
1491 remove_from_table (elt, hash);
1492
1493 if (insert_regs (exp, class1, 0))
1494 {
1495 rehash_using_reg (exp);
1496 hash = HASH (exp, mode);
1497 }
1498 new = insert (exp, class1, hash, mode);
1499 new->in_memory = hash_arg_in_memory;
1500 new->in_struct = hash_arg_in_struct;
1501 }
1502 }
1503 }
1504 \f
1505 /* Remove from the hash table, or mark as invalid,
1506 all expressions whose values could be altered by storing in X.
1507 X is a register, a subreg, or a memory reference with nonvarying address
1508 (because, when a memory reference with a varying address is stored in,
1509 all memory references are removed by invalidate_memory
1510 so specific invalidation is superfluous).
1511 FULL_MODE, if not VOIDmode, indicates that this much should be invalidated
1512 instead of just the amount indicated by the mode of X. This is only used
1513 for bitfield stores into memory.
1514
1515 A nonvarying address may be just a register or just
1516 a symbol reference, or it may be either of those plus
1517 a numeric offset. */
1518
1519 static void
1520 invalidate (x, full_mode)
1521 rtx x;
1522 enum machine_mode full_mode;
1523 {
1524 register int i;
1525 register struct table_elt *p;
1526 rtx base;
1527 HOST_WIDE_INT start, end;
1528
1529 /* If X is a register, dependencies on its contents
1530 are recorded through the qty number mechanism.
1531 Just change the qty number of the register,
1532 mark it as invalid for expressions that refer to it,
1533 and remove it itself. */
1534
1535 if (GET_CODE (x) == REG)
1536 {
1537 register int regno = REGNO (x);
1538 register unsigned hash = HASH (x, GET_MODE (x));
1539
1540 /* Remove REGNO from any quantity list it might be on and indicate
1541 that it's value might have changed. If it is a pseudo, remove its
1542 entry from the hash table.
1543
1544 For a hard register, we do the first two actions above for any
1545 additional hard registers corresponding to X. Then, if any of these
1546 registers are in the table, we must remove any REG entries that
1547 overlap these registers. */
1548
1549 delete_reg_equiv (regno);
1550 reg_tick[regno]++;
1551
1552 if (regno >= FIRST_PSEUDO_REGISTER)
1553 {
1554 /* Because a register can be referenced in more than one mode,
1555 we might have to remove more than one table entry. */
1556
1557 struct table_elt *elt;
1558
1559 while (elt = lookup_for_remove (x, hash, GET_MODE (x)))
1560 remove_from_table (elt, hash);
1561 }
1562 else
1563 {
1564 HOST_WIDE_INT in_table
1565 = TEST_HARD_REG_BIT (hard_regs_in_table, regno);
1566 int endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (x));
1567 int tregno, tendregno;
1568 register struct table_elt *p, *next;
1569
1570 CLEAR_HARD_REG_BIT (hard_regs_in_table, regno);
1571
1572 for (i = regno + 1; i < endregno; i++)
1573 {
1574 in_table |= TEST_HARD_REG_BIT (hard_regs_in_table, i);
1575 CLEAR_HARD_REG_BIT (hard_regs_in_table, i);
1576 delete_reg_equiv (i);
1577 reg_tick[i]++;
1578 }
1579
1580 if (in_table)
1581 for (hash = 0; hash < NBUCKETS; hash++)
1582 for (p = table[hash]; p; p = next)
1583 {
1584 next = p->next_same_hash;
1585
1586 if (GET_CODE (p->exp) != REG
1587 || REGNO (p->exp) >= FIRST_PSEUDO_REGISTER)
1588 continue;
1589
1590 tregno = REGNO (p->exp);
1591 tendregno
1592 = tregno + HARD_REGNO_NREGS (tregno, GET_MODE (p->exp));
1593 if (tendregno > regno && tregno < endregno)
1594 remove_from_table (p, hash);
1595 }
1596 }
1597
1598 return;
1599 }
1600
1601 if (GET_CODE (x) == SUBREG)
1602 {
1603 if (GET_CODE (SUBREG_REG (x)) != REG)
1604 abort ();
1605 invalidate (SUBREG_REG (x), VOIDmode);
1606 return;
1607 }
1608
1609 /* X is not a register; it must be a memory reference with
1610 a nonvarying address. Remove all hash table elements
1611 that refer to overlapping pieces of memory. */
1612
1613 if (GET_CODE (x) != MEM)
1614 abort ();
1615
1616 if (full_mode == VOIDmode)
1617 full_mode = GET_MODE (x);
1618
1619 set_nonvarying_address_components (XEXP (x, 0), GET_MODE_SIZE (full_mode),
1620 &base, &start, &end);
1621
1622 for (i = 0; i < NBUCKETS; i++)
1623 {
1624 register struct table_elt *next;
1625 for (p = table[i]; p; p = next)
1626 {
1627 next = p->next_same_hash;
1628 if (refers_to_mem_p (p->exp, base, start, end))
1629 remove_from_table (p, i);
1630 }
1631 }
1632 }
1633
1634 /* Remove all expressions that refer to register REGNO,
1635 since they are already invalid, and we are about to
1636 mark that register valid again and don't want the old
1637 expressions to reappear as valid. */
1638
1639 static void
1640 remove_invalid_refs (regno)
1641 int regno;
1642 {
1643 register int i;
1644 register struct table_elt *p, *next;
1645
1646 for (i = 0; i < NBUCKETS; i++)
1647 for (p = table[i]; p; p = next)
1648 {
1649 next = p->next_same_hash;
1650 if (GET_CODE (p->exp) != REG
1651 && refers_to_regno_p (regno, regno + 1, p->exp, NULL_PTR))
1652 remove_from_table (p, i);
1653 }
1654 }
1655 \f
1656 /* Recompute the hash codes of any valid entries in the hash table that
1657 reference X, if X is a register, or SUBREG_REG (X) if X is a SUBREG.
1658
1659 This is called when we make a jump equivalence. */
1660
1661 static void
1662 rehash_using_reg (x)
1663 rtx x;
1664 {
1665 int i;
1666 struct table_elt *p, *next;
1667 unsigned hash;
1668
1669 if (GET_CODE (x) == SUBREG)
1670 x = SUBREG_REG (x);
1671
1672 /* If X is not a register or if the register is known not to be in any
1673 valid entries in the table, we have no work to do. */
1674
1675 if (GET_CODE (x) != REG
1676 || reg_in_table[REGNO (x)] < 0
1677 || reg_in_table[REGNO (x)] != reg_tick[REGNO (x)])
1678 return;
1679
1680 /* Scan all hash chains looking for valid entries that mention X.
1681 If we find one and it is in the wrong hash chain, move it. We can skip
1682 objects that are registers, since they are handled specially. */
1683
1684 for (i = 0; i < NBUCKETS; i++)
1685 for (p = table[i]; p; p = next)
1686 {
1687 next = p->next_same_hash;
1688 if (GET_CODE (p->exp) != REG && reg_mentioned_p (x, p->exp)
1689 && exp_equiv_p (p->exp, p->exp, 1, 0)
1690 && i != (hash = safe_hash (p->exp, p->mode) % NBUCKETS))
1691 {
1692 if (p->next_same_hash)
1693 p->next_same_hash->prev_same_hash = p->prev_same_hash;
1694
1695 if (p->prev_same_hash)
1696 p->prev_same_hash->next_same_hash = p->next_same_hash;
1697 else
1698 table[i] = p->next_same_hash;
1699
1700 p->next_same_hash = table[hash];
1701 p->prev_same_hash = 0;
1702 if (table[hash])
1703 table[hash]->prev_same_hash = p;
1704 table[hash] = p;
1705 }
1706 }
1707 }
1708 \f
1709 /* Remove from the hash table all expressions that reference memory,
1710 or some of them as specified by *WRITES. */
1711
1712 static void
1713 invalidate_memory (writes)
1714 struct write_data *writes;
1715 {
1716 register int i;
1717 register struct table_elt *p, *next;
1718 int all = writes->all;
1719 int nonscalar = writes->nonscalar;
1720
1721 for (i = 0; i < NBUCKETS; i++)
1722 for (p = table[i]; p; p = next)
1723 {
1724 next = p->next_same_hash;
1725 if (p->in_memory
1726 && (all
1727 || (nonscalar && p->in_struct)
1728 || cse_rtx_addr_varies_p (p->exp)))
1729 remove_from_table (p, i);
1730 }
1731 }
1732 \f
1733 /* Remove from the hash table any expression that is a call-clobbered
1734 register. Also update their TICK values. */
1735
1736 static void
1737 invalidate_for_call ()
1738 {
1739 int regno, endregno;
1740 int i;
1741 unsigned hash;
1742 struct table_elt *p, *next;
1743 int in_table = 0;
1744
1745 /* Go through all the hard registers. For each that is clobbered in
1746 a CALL_INSN, remove the register from quantity chains and update
1747 reg_tick if defined. Also see if any of these registers is currently
1748 in the table. */
1749
1750 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
1751 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
1752 {
1753 delete_reg_equiv (regno);
1754 if (reg_tick[regno] >= 0)
1755 reg_tick[regno]++;
1756
1757 in_table |= (TEST_HARD_REG_BIT (hard_regs_in_table, regno) != 0);
1758 }
1759
1760 /* In the case where we have no call-clobbered hard registers in the
1761 table, we are done. Otherwise, scan the table and remove any
1762 entry that overlaps a call-clobbered register. */
1763
1764 if (in_table)
1765 for (hash = 0; hash < NBUCKETS; hash++)
1766 for (p = table[hash]; p; p = next)
1767 {
1768 next = p->next_same_hash;
1769
1770 if (GET_CODE (p->exp) != REG
1771 || REGNO (p->exp) >= FIRST_PSEUDO_REGISTER)
1772 continue;
1773
1774 regno = REGNO (p->exp);
1775 endregno = regno + HARD_REGNO_NREGS (regno, GET_MODE (p->exp));
1776
1777 for (i = regno; i < endregno; i++)
1778 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
1779 {
1780 remove_from_table (p, hash);
1781 break;
1782 }
1783 }
1784 }
1785 \f
1786 /* Given an expression X of type CONST,
1787 and ELT which is its table entry (or 0 if it
1788 is not in the hash table),
1789 return an alternate expression for X as a register plus integer.
1790 If none can be found, return 0. */
1791
1792 static rtx
1793 use_related_value (x, elt)
1794 rtx x;
1795 struct table_elt *elt;
1796 {
1797 register struct table_elt *relt = 0;
1798 register struct table_elt *p, *q;
1799 HOST_WIDE_INT offset;
1800
1801 /* First, is there anything related known?
1802 If we have a table element, we can tell from that.
1803 Otherwise, must look it up. */
1804
1805 if (elt != 0 && elt->related_value != 0)
1806 relt = elt;
1807 else if (elt == 0 && GET_CODE (x) == CONST)
1808 {
1809 rtx subexp = get_related_value (x);
1810 if (subexp != 0)
1811 relt = lookup (subexp,
1812 safe_hash (subexp, GET_MODE (subexp)) % NBUCKETS,
1813 GET_MODE (subexp));
1814 }
1815
1816 if (relt == 0)
1817 return 0;
1818
1819 /* Search all related table entries for one that has an
1820 equivalent register. */
1821
1822 p = relt;
1823 while (1)
1824 {
1825 /* This loop is strange in that it is executed in two different cases.
1826 The first is when X is already in the table. Then it is searching
1827 the RELATED_VALUE list of X's class (RELT). The second case is when
1828 X is not in the table. Then RELT points to a class for the related
1829 value.
1830
1831 Ensure that, whatever case we are in, that we ignore classes that have
1832 the same value as X. */
1833
1834 if (rtx_equal_p (x, p->exp))
1835 q = 0;
1836 else
1837 for (q = p->first_same_value; q; q = q->next_same_value)
1838 if (GET_CODE (q->exp) == REG)
1839 break;
1840
1841 if (q)
1842 break;
1843
1844 p = p->related_value;
1845
1846 /* We went all the way around, so there is nothing to be found.
1847 Alternatively, perhaps RELT was in the table for some other reason
1848 and it has no related values recorded. */
1849 if (p == relt || p == 0)
1850 break;
1851 }
1852
1853 if (q == 0)
1854 return 0;
1855
1856 offset = (get_integer_term (x) - get_integer_term (p->exp));
1857 /* Note: OFFSET may be 0 if P->xexp and X are related by commutativity. */
1858 return plus_constant (q->exp, offset);
1859 }
1860 \f
1861 /* Hash an rtx. We are careful to make sure the value is never negative.
1862 Equivalent registers hash identically.
1863 MODE is used in hashing for CONST_INTs only;
1864 otherwise the mode of X is used.
1865
1866 Store 1 in do_not_record if any subexpression is volatile.
1867
1868 Store 1 in hash_arg_in_memory if X contains a MEM rtx
1869 which does not have the RTX_UNCHANGING_P bit set.
1870 In this case, also store 1 in hash_arg_in_struct
1871 if there is a MEM rtx which has the MEM_IN_STRUCT_P bit set.
1872
1873 Note that cse_insn knows that the hash code of a MEM expression
1874 is just (int) MEM plus the hash code of the address. */
1875
1876 static unsigned
1877 canon_hash (x, mode)
1878 rtx x;
1879 enum machine_mode mode;
1880 {
1881 register int i, j;
1882 register unsigned hash = 0;
1883 register enum rtx_code code;
1884 register char *fmt;
1885
1886 /* repeat is used to turn tail-recursion into iteration. */
1887 repeat:
1888 if (x == 0)
1889 return hash;
1890
1891 code = GET_CODE (x);
1892 switch (code)
1893 {
1894 case REG:
1895 {
1896 register int regno = REGNO (x);
1897
1898 /* On some machines, we can't record any non-fixed hard register,
1899 because extending its life will cause reload problems. We
1900 consider ap, fp, and sp to be fixed for this purpose.
1901 On all machines, we can't record any global registers. */
1902
1903 if (regno < FIRST_PSEUDO_REGISTER
1904 && (global_regs[regno]
1905 #ifdef SMALL_REGISTER_CLASSES
1906 || (SMALL_REGISTER_CLASSES
1907 && ! fixed_regs[regno]
1908 && regno != FRAME_POINTER_REGNUM
1909 && regno != HARD_FRAME_POINTER_REGNUM
1910 && regno != ARG_POINTER_REGNUM
1911 && regno != STACK_POINTER_REGNUM)
1912 #endif
1913 ))
1914 {
1915 do_not_record = 1;
1916 return 0;
1917 }
1918 hash += ((unsigned) REG << 7) + (unsigned) reg_qty[regno];
1919 return hash;
1920 }
1921
1922 case CONST_INT:
1923 {
1924 unsigned HOST_WIDE_INT tem = INTVAL (x);
1925 hash += ((unsigned) CONST_INT << 7) + (unsigned) mode + tem;
1926 return hash;
1927 }
1928
1929 case CONST_DOUBLE:
1930 /* This is like the general case, except that it only counts
1931 the integers representing the constant. */
1932 hash += (unsigned) code + (unsigned) GET_MODE (x);
1933 if (GET_MODE (x) != VOIDmode)
1934 for (i = 2; i < GET_RTX_LENGTH (CONST_DOUBLE); i++)
1935 {
1936 unsigned tem = XINT (x, i);
1937 hash += tem;
1938 }
1939 else
1940 hash += ((unsigned) CONST_DOUBLE_LOW (x)
1941 + (unsigned) CONST_DOUBLE_HIGH (x));
1942 return hash;
1943
1944 /* Assume there is only one rtx object for any given label. */
1945 case LABEL_REF:
1946 hash
1947 += ((unsigned) LABEL_REF << 7) + (unsigned HOST_WIDE_INT) XEXP (x, 0);
1948 return hash;
1949
1950 case SYMBOL_REF:
1951 hash
1952 += ((unsigned) SYMBOL_REF << 7) + (unsigned HOST_WIDE_INT) XSTR (x, 0);
1953 return hash;
1954
1955 case MEM:
1956 if (MEM_VOLATILE_P (x))
1957 {
1958 do_not_record = 1;
1959 return 0;
1960 }
1961 if (! RTX_UNCHANGING_P (x) || FIXED_BASE_PLUS_P (XEXP (x, 0)))
1962 {
1963 hash_arg_in_memory = 1;
1964 if (MEM_IN_STRUCT_P (x)) hash_arg_in_struct = 1;
1965 }
1966 /* Now that we have already found this special case,
1967 might as well speed it up as much as possible. */
1968 hash += (unsigned) MEM;
1969 x = XEXP (x, 0);
1970 goto repeat;
1971
1972 case PRE_DEC:
1973 case PRE_INC:
1974 case POST_DEC:
1975 case POST_INC:
1976 case PC:
1977 case CC0:
1978 case CALL:
1979 case UNSPEC_VOLATILE:
1980 do_not_record = 1;
1981 return 0;
1982
1983 case ASM_OPERANDS:
1984 if (MEM_VOLATILE_P (x))
1985 {
1986 do_not_record = 1;
1987 return 0;
1988 }
1989 }
1990
1991 i = GET_RTX_LENGTH (code) - 1;
1992 hash += (unsigned) code + (unsigned) GET_MODE (x);
1993 fmt = GET_RTX_FORMAT (code);
1994 for (; i >= 0; i--)
1995 {
1996 if (fmt[i] == 'e')
1997 {
1998 rtx tem = XEXP (x, i);
1999
2000 /* If we are about to do the last recursive call
2001 needed at this level, change it into iteration.
2002 This function is called enough to be worth it. */
2003 if (i == 0)
2004 {
2005 x = tem;
2006 goto repeat;
2007 }
2008 hash += canon_hash (tem, 0);
2009 }
2010 else if (fmt[i] == 'E')
2011 for (j = 0; j < XVECLEN (x, i); j++)
2012 hash += canon_hash (XVECEXP (x, i, j), 0);
2013 else if (fmt[i] == 's')
2014 {
2015 register unsigned char *p = (unsigned char *) XSTR (x, i);
2016 if (p)
2017 while (*p)
2018 hash += *p++;
2019 }
2020 else if (fmt[i] == 'i')
2021 {
2022 register unsigned tem = XINT (x, i);
2023 hash += tem;
2024 }
2025 else
2026 abort ();
2027 }
2028 return hash;
2029 }
2030
2031 /* Like canon_hash but with no side effects. */
2032
2033 static unsigned
2034 safe_hash (x, mode)
2035 rtx x;
2036 enum machine_mode mode;
2037 {
2038 int save_do_not_record = do_not_record;
2039 int save_hash_arg_in_memory = hash_arg_in_memory;
2040 int save_hash_arg_in_struct = hash_arg_in_struct;
2041 unsigned hash = canon_hash (x, mode);
2042 hash_arg_in_memory = save_hash_arg_in_memory;
2043 hash_arg_in_struct = save_hash_arg_in_struct;
2044 do_not_record = save_do_not_record;
2045 return hash;
2046 }
2047 \f
2048 /* Return 1 iff X and Y would canonicalize into the same thing,
2049 without actually constructing the canonicalization of either one.
2050 If VALIDATE is nonzero,
2051 we assume X is an expression being processed from the rtl
2052 and Y was found in the hash table. We check register refs
2053 in Y for being marked as valid.
2054
2055 If EQUAL_VALUES is nonzero, we allow a register to match a constant value
2056 that is known to be in the register. Ordinarily, we don't allow them
2057 to match, because letting them match would cause unpredictable results
2058 in all the places that search a hash table chain for an equivalent
2059 for a given value. A possible equivalent that has different structure
2060 has its hash code computed from different data. Whether the hash code
2061 is the same as that of the the given value is pure luck. */
2062
2063 static int
2064 exp_equiv_p (x, y, validate, equal_values)
2065 rtx x, y;
2066 int validate;
2067 int equal_values;
2068 {
2069 register int i, j;
2070 register enum rtx_code code;
2071 register char *fmt;
2072
2073 /* Note: it is incorrect to assume an expression is equivalent to itself
2074 if VALIDATE is nonzero. */
2075 if (x == y && !validate)
2076 return 1;
2077 if (x == 0 || y == 0)
2078 return x == y;
2079
2080 code = GET_CODE (x);
2081 if (code != GET_CODE (y))
2082 {
2083 if (!equal_values)
2084 return 0;
2085
2086 /* If X is a constant and Y is a register or vice versa, they may be
2087 equivalent. We only have to validate if Y is a register. */
2088 if (CONSTANT_P (x) && GET_CODE (y) == REG
2089 && REGNO_QTY_VALID_P (REGNO (y))
2090 && GET_MODE (y) == qty_mode[reg_qty[REGNO (y)]]
2091 && rtx_equal_p (x, qty_const[reg_qty[REGNO (y)]])
2092 && (! validate || reg_in_table[REGNO (y)] == reg_tick[REGNO (y)]))
2093 return 1;
2094
2095 if (CONSTANT_P (y) && code == REG
2096 && REGNO_QTY_VALID_P (REGNO (x))
2097 && GET_MODE (x) == qty_mode[reg_qty[REGNO (x)]]
2098 && rtx_equal_p (y, qty_const[reg_qty[REGNO (x)]]))
2099 return 1;
2100
2101 return 0;
2102 }
2103
2104 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. */
2105 if (GET_MODE (x) != GET_MODE (y))
2106 return 0;
2107
2108 switch (code)
2109 {
2110 case PC:
2111 case CC0:
2112 return x == y;
2113
2114 case CONST_INT:
2115 return INTVAL (x) == INTVAL (y);
2116
2117 case LABEL_REF:
2118 return XEXP (x, 0) == XEXP (y, 0);
2119
2120 case SYMBOL_REF:
2121 return XSTR (x, 0) == XSTR (y, 0);
2122
2123 case REG:
2124 {
2125 int regno = REGNO (y);
2126 int endregno
2127 = regno + (regno >= FIRST_PSEUDO_REGISTER ? 1
2128 : HARD_REGNO_NREGS (regno, GET_MODE (y)));
2129 int i;
2130
2131 /* If the quantities are not the same, the expressions are not
2132 equivalent. If there are and we are not to validate, they
2133 are equivalent. Otherwise, ensure all regs are up-to-date. */
2134
2135 if (reg_qty[REGNO (x)] != reg_qty[regno])
2136 return 0;
2137
2138 if (! validate)
2139 return 1;
2140
2141 for (i = regno; i < endregno; i++)
2142 if (reg_in_table[i] != reg_tick[i])
2143 return 0;
2144
2145 return 1;
2146 }
2147
2148 /* For commutative operations, check both orders. */
2149 case PLUS:
2150 case MULT:
2151 case AND:
2152 case IOR:
2153 case XOR:
2154 case NE:
2155 case EQ:
2156 return ((exp_equiv_p (XEXP (x, 0), XEXP (y, 0), validate, equal_values)
2157 && exp_equiv_p (XEXP (x, 1), XEXP (y, 1),
2158 validate, equal_values))
2159 || (exp_equiv_p (XEXP (x, 0), XEXP (y, 1),
2160 validate, equal_values)
2161 && exp_equiv_p (XEXP (x, 1), XEXP (y, 0),
2162 validate, equal_values)));
2163 }
2164
2165 /* Compare the elements. If any pair of corresponding elements
2166 fail to match, return 0 for the whole things. */
2167
2168 fmt = GET_RTX_FORMAT (code);
2169 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2170 {
2171 switch (fmt[i])
2172 {
2173 case 'e':
2174 if (! exp_equiv_p (XEXP (x, i), XEXP (y, i), validate, equal_values))
2175 return 0;
2176 break;
2177
2178 case 'E':
2179 if (XVECLEN (x, i) != XVECLEN (y, i))
2180 return 0;
2181 for (j = 0; j < XVECLEN (x, i); j++)
2182 if (! exp_equiv_p (XVECEXP (x, i, j), XVECEXP (y, i, j),
2183 validate, equal_values))
2184 return 0;
2185 break;
2186
2187 case 's':
2188 if (strcmp (XSTR (x, i), XSTR (y, i)))
2189 return 0;
2190 break;
2191
2192 case 'i':
2193 if (XINT (x, i) != XINT (y, i))
2194 return 0;
2195 break;
2196
2197 case 'w':
2198 if (XWINT (x, i) != XWINT (y, i))
2199 return 0;
2200 break;
2201
2202 case '0':
2203 break;
2204
2205 default:
2206 abort ();
2207 }
2208 }
2209
2210 return 1;
2211 }
2212 \f
2213 /* Return 1 iff any subexpression of X matches Y.
2214 Here we do not require that X or Y be valid (for registers referred to)
2215 for being in the hash table. */
2216
2217 static int
2218 refers_to_p (x, y)
2219 rtx x, y;
2220 {
2221 register int i;
2222 register enum rtx_code code;
2223 register char *fmt;
2224
2225 repeat:
2226 if (x == y)
2227 return 1;
2228 if (x == 0 || y == 0)
2229 return 0;
2230
2231 code = GET_CODE (x);
2232 /* If X as a whole has the same code as Y, they may match.
2233 If so, return 1. */
2234 if (code == GET_CODE (y))
2235 {
2236 if (exp_equiv_p (x, y, 0, 1))
2237 return 1;
2238 }
2239
2240 /* X does not match, so try its subexpressions. */
2241
2242 fmt = GET_RTX_FORMAT (code);
2243 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2244 if (fmt[i] == 'e')
2245 {
2246 if (i == 0)
2247 {
2248 x = XEXP (x, 0);
2249 goto repeat;
2250 }
2251 else
2252 if (refers_to_p (XEXP (x, i), y))
2253 return 1;
2254 }
2255 else if (fmt[i] == 'E')
2256 {
2257 int j;
2258 for (j = 0; j < XVECLEN (x, i); j++)
2259 if (refers_to_p (XVECEXP (x, i, j), y))
2260 return 1;
2261 }
2262
2263 return 0;
2264 }
2265 \f
2266 /* Given ADDR and SIZE (a memory address, and the size of the memory reference),
2267 set PBASE, PSTART, and PEND which correspond to the base of the address,
2268 the starting offset, and ending offset respectively.
2269
2270 ADDR is known to be a nonvarying address. */
2271
2272 /* ??? Despite what the comments say, this function is in fact frequently
2273 passed varying addresses. This does not appear to cause any problems. */
2274
2275 static void
2276 set_nonvarying_address_components (addr, size, pbase, pstart, pend)
2277 rtx addr;
2278 int size;
2279 rtx *pbase;
2280 HOST_WIDE_INT *pstart, *pend;
2281 {
2282 rtx base;
2283 HOST_WIDE_INT start, end;
2284
2285 base = addr;
2286 start = 0;
2287 end = 0;
2288
2289 /* Registers with nonvarying addresses usually have constant equivalents;
2290 but the frame pointer register is also possible. */
2291 if (GET_CODE (base) == REG
2292 && qty_const != 0
2293 && REGNO_QTY_VALID_P (REGNO (base))
2294 && qty_mode[reg_qty[REGNO (base)]] == GET_MODE (base)
2295 && qty_const[reg_qty[REGNO (base)]] != 0)
2296 base = qty_const[reg_qty[REGNO (base)]];
2297 else if (GET_CODE (base) == PLUS
2298 && GET_CODE (XEXP (base, 1)) == CONST_INT
2299 && GET_CODE (XEXP (base, 0)) == REG
2300 && qty_const != 0
2301 && REGNO_QTY_VALID_P (REGNO (XEXP (base, 0)))
2302 && (qty_mode[reg_qty[REGNO (XEXP (base, 0))]]
2303 == GET_MODE (XEXP (base, 0)))
2304 && qty_const[reg_qty[REGNO (XEXP (base, 0))]])
2305 {
2306 start = INTVAL (XEXP (base, 1));
2307 base = qty_const[reg_qty[REGNO (XEXP (base, 0))]];
2308 }
2309 /* This can happen as the result of virtual register instantiation,
2310 if the initial offset is too large to be a valid address. */
2311 else if (GET_CODE (base) == PLUS
2312 && GET_CODE (XEXP (base, 0)) == REG
2313 && GET_CODE (XEXP (base, 1)) == REG
2314 && qty_const != 0
2315 && REGNO_QTY_VALID_P (REGNO (XEXP (base, 0)))
2316 && (qty_mode[reg_qty[REGNO (XEXP (base, 0))]]
2317 == GET_MODE (XEXP (base, 0)))
2318 && qty_const[reg_qty[REGNO (XEXP (base, 0))]]
2319 && REGNO_QTY_VALID_P (REGNO (XEXP (base, 1)))
2320 && (qty_mode[reg_qty[REGNO (XEXP (base, 1))]]
2321 == GET_MODE (XEXP (base, 1)))
2322 && qty_const[reg_qty[REGNO (XEXP (base, 1))]])
2323 {
2324 rtx tem = qty_const[reg_qty[REGNO (XEXP (base, 1))]];
2325 base = qty_const[reg_qty[REGNO (XEXP (base, 0))]];
2326
2327 /* One of the two values must be a constant. */
2328 if (GET_CODE (base) != CONST_INT)
2329 {
2330 if (GET_CODE (tem) != CONST_INT)
2331 abort ();
2332 start = INTVAL (tem);
2333 }
2334 else
2335 {
2336 start = INTVAL (base);
2337 base = tem;
2338 }
2339 }
2340
2341 /* Handle everything that we can find inside an address that has been
2342 viewed as constant. */
2343
2344 while (1)
2345 {
2346 /* If no part of this switch does a "continue", the code outside
2347 will exit this loop. */
2348
2349 switch (GET_CODE (base))
2350 {
2351 case LO_SUM:
2352 /* By definition, operand1 of a LO_SUM is the associated constant
2353 address. Use the associated constant address as the base
2354 instead. */
2355 base = XEXP (base, 1);
2356 continue;
2357
2358 case CONST:
2359 /* Strip off CONST. */
2360 base = XEXP (base, 0);
2361 continue;
2362
2363 case PLUS:
2364 if (GET_CODE (XEXP (base, 1)) == CONST_INT)
2365 {
2366 start += INTVAL (XEXP (base, 1));
2367 base = XEXP (base, 0);
2368 continue;
2369 }
2370 break;
2371
2372 case AND:
2373 /* Handle the case of an AND which is the negative of a power of
2374 two. This is used to represent unaligned memory operations. */
2375 if (GET_CODE (XEXP (base, 1)) == CONST_INT
2376 && exact_log2 (- INTVAL (XEXP (base, 1))) > 0)
2377 {
2378 set_nonvarying_address_components (XEXP (base, 0), size,
2379 pbase, pstart, pend);
2380
2381 /* Assume the worst misalignment. START is affected, but not
2382 END, so compensate but adjusting SIZE. Don't lose any
2383 constant we already had. */
2384
2385 size = *pend - *pstart - INTVAL (XEXP (base, 1)) - 1;
2386 start += *pstart + INTVAL (XEXP (base, 1)) + 1;
2387 end += *pend;
2388 base = *pbase;
2389 }
2390 break;
2391 }
2392
2393 break;
2394 }
2395
2396 if (GET_CODE (base) == CONST_INT)
2397 {
2398 start += INTVAL (base);
2399 base = const0_rtx;
2400 }
2401
2402 end = start + size;
2403
2404 /* Set the return values. */
2405 *pbase = base;
2406 *pstart = start;
2407 *pend = end;
2408 }
2409
2410 /* Return 1 iff any subexpression of X refers to memory
2411 at an address of BASE plus some offset
2412 such that any of the bytes' offsets fall between START (inclusive)
2413 and END (exclusive).
2414
2415 The value is undefined if X is a varying address (as determined by
2416 cse_rtx_addr_varies_p). This function is not used in such cases.
2417
2418 When used in the cse pass, `qty_const' is nonzero, and it is used
2419 to treat an address that is a register with a known constant value
2420 as if it were that constant value.
2421 In the loop pass, `qty_const' is zero, so this is not done. */
2422
2423 static int
2424 refers_to_mem_p (x, base, start, end)
2425 rtx x, base;
2426 HOST_WIDE_INT start, end;
2427 {
2428 register HOST_WIDE_INT i;
2429 register enum rtx_code code;
2430 register char *fmt;
2431
2432 repeat:
2433 if (x == 0)
2434 return 0;
2435
2436 code = GET_CODE (x);
2437 if (code == MEM)
2438 {
2439 register rtx addr = XEXP (x, 0); /* Get the address. */
2440 rtx mybase;
2441 HOST_WIDE_INT mystart, myend;
2442
2443 set_nonvarying_address_components (addr, GET_MODE_SIZE (GET_MODE (x)),
2444 &mybase, &mystart, &myend);
2445
2446
2447 /* refers_to_mem_p is never called with varying addresses.
2448 If the base addresses are not equal, there is no chance
2449 of the memory addresses conflicting. */
2450 if (! rtx_equal_p (mybase, base))
2451 return 0;
2452
2453 return myend > start && mystart < end;
2454 }
2455
2456 /* X does not match, so try its subexpressions. */
2457
2458 fmt = GET_RTX_FORMAT (code);
2459 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2460 if (fmt[i] == 'e')
2461 {
2462 if (i == 0)
2463 {
2464 x = XEXP (x, 0);
2465 goto repeat;
2466 }
2467 else
2468 if (refers_to_mem_p (XEXP (x, i), base, start, end))
2469 return 1;
2470 }
2471 else if (fmt[i] == 'E')
2472 {
2473 int j;
2474 for (j = 0; j < XVECLEN (x, i); j++)
2475 if (refers_to_mem_p (XVECEXP (x, i, j), base, start, end))
2476 return 1;
2477 }
2478
2479 return 0;
2480 }
2481
2482 /* Nonzero if X refers to memory at a varying address;
2483 except that a register which has at the moment a known constant value
2484 isn't considered variable. */
2485
2486 static int
2487 cse_rtx_addr_varies_p (x)
2488 rtx x;
2489 {
2490 /* We need not check for X and the equivalence class being of the same
2491 mode because if X is equivalent to a constant in some mode, it
2492 doesn't vary in any mode. */
2493
2494 if (GET_CODE (x) == MEM
2495 && GET_CODE (XEXP (x, 0)) == REG
2496 && REGNO_QTY_VALID_P (REGNO (XEXP (x, 0)))
2497 && GET_MODE (XEXP (x, 0)) == qty_mode[reg_qty[REGNO (XEXP (x, 0))]]
2498 && qty_const[reg_qty[REGNO (XEXP (x, 0))]] != 0)
2499 return 0;
2500
2501 if (GET_CODE (x) == MEM
2502 && GET_CODE (XEXP (x, 0)) == PLUS
2503 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2504 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
2505 && REGNO_QTY_VALID_P (REGNO (XEXP (XEXP (x, 0), 0)))
2506 && (GET_MODE (XEXP (XEXP (x, 0), 0))
2507 == qty_mode[reg_qty[REGNO (XEXP (XEXP (x, 0), 0))]])
2508 && qty_const[reg_qty[REGNO (XEXP (XEXP (x, 0), 0))]])
2509 return 0;
2510
2511 /* This can happen as the result of virtual register instantiation, if
2512 the initial constant is too large to be a valid address. This gives
2513 us a three instruction sequence, load large offset into a register,
2514 load fp minus a constant into a register, then a MEM which is the
2515 sum of the two `constant' registers. */
2516 if (GET_CODE (x) == MEM
2517 && GET_CODE (XEXP (x, 0)) == PLUS
2518 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
2519 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG
2520 && REGNO_QTY_VALID_P (REGNO (XEXP (XEXP (x, 0), 0)))
2521 && (GET_MODE (XEXP (XEXP (x, 0), 0))
2522 == qty_mode[reg_qty[REGNO (XEXP (XEXP (x, 0), 0))]])
2523 && qty_const[reg_qty[REGNO (XEXP (XEXP (x, 0), 0))]]
2524 && REGNO_QTY_VALID_P (REGNO (XEXP (XEXP (x, 0), 1)))
2525 && (GET_MODE (XEXP (XEXP (x, 0), 1))
2526 == qty_mode[reg_qty[REGNO (XEXP (XEXP (x, 0), 1))]])
2527 && qty_const[reg_qty[REGNO (XEXP (XEXP (x, 0), 1))]])
2528 return 0;
2529
2530 return rtx_addr_varies_p (x);
2531 }
2532 \f
2533 /* Canonicalize an expression:
2534 replace each register reference inside it
2535 with the "oldest" equivalent register.
2536
2537 If INSN is non-zero and we are replacing a pseudo with a hard register
2538 or vice versa, validate_change is used to ensure that INSN remains valid
2539 after we make our substitution. The calls are made with IN_GROUP non-zero
2540 so apply_change_group must be called upon the outermost return from this
2541 function (unless INSN is zero). The result of apply_change_group can
2542 generally be discarded since the changes we are making are optional. */
2543
2544 static rtx
2545 canon_reg (x, insn)
2546 rtx x;
2547 rtx insn;
2548 {
2549 register int i;
2550 register enum rtx_code code;
2551 register char *fmt;
2552
2553 if (x == 0)
2554 return x;
2555
2556 code = GET_CODE (x);
2557 switch (code)
2558 {
2559 case PC:
2560 case CC0:
2561 case CONST:
2562 case CONST_INT:
2563 case CONST_DOUBLE:
2564 case SYMBOL_REF:
2565 case LABEL_REF:
2566 case ADDR_VEC:
2567 case ADDR_DIFF_VEC:
2568 return x;
2569
2570 case REG:
2571 {
2572 register int first;
2573
2574 /* Never replace a hard reg, because hard regs can appear
2575 in more than one machine mode, and we must preserve the mode
2576 of each occurrence. Also, some hard regs appear in
2577 MEMs that are shared and mustn't be altered. Don't try to
2578 replace any reg that maps to a reg of class NO_REGS. */
2579 if (REGNO (x) < FIRST_PSEUDO_REGISTER
2580 || ! REGNO_QTY_VALID_P (REGNO (x)))
2581 return x;
2582
2583 first = qty_first_reg[reg_qty[REGNO (x)]];
2584 return (first >= FIRST_PSEUDO_REGISTER ? regno_reg_rtx[first]
2585 : REGNO_REG_CLASS (first) == NO_REGS ? x
2586 : gen_rtx (REG, qty_mode[reg_qty[REGNO (x)]], first));
2587 }
2588 }
2589
2590 fmt = GET_RTX_FORMAT (code);
2591 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2592 {
2593 register int j;
2594
2595 if (fmt[i] == 'e')
2596 {
2597 rtx new = canon_reg (XEXP (x, i), insn);
2598
2599 /* If replacing pseudo with hard reg or vice versa, ensure the
2600 insn remains valid. Likewise if the insn has MATCH_DUPs. */
2601 if (insn != 0 && new != 0
2602 && GET_CODE (new) == REG && GET_CODE (XEXP (x, i)) == REG
2603 && (((REGNO (new) < FIRST_PSEUDO_REGISTER)
2604 != (REGNO (XEXP (x, i)) < FIRST_PSEUDO_REGISTER))
2605 || insn_n_dups[recog_memoized (insn)] > 0))
2606 validate_change (insn, &XEXP (x, i), new, 1);
2607 else
2608 XEXP (x, i) = new;
2609 }
2610 else if (fmt[i] == 'E')
2611 for (j = 0; j < XVECLEN (x, i); j++)
2612 XVECEXP (x, i, j) = canon_reg (XVECEXP (x, i, j), insn);
2613 }
2614
2615 return x;
2616 }
2617 \f
2618 /* LOC is a location within INSN that is an operand address (the contents of
2619 a MEM). Find the best equivalent address to use that is valid for this
2620 insn.
2621
2622 On most CISC machines, complicated address modes are costly, and rtx_cost
2623 is a good approximation for that cost. However, most RISC machines have
2624 only a few (usually only one) memory reference formats. If an address is
2625 valid at all, it is often just as cheap as any other address. Hence, for
2626 RISC machines, we use the configuration macro `ADDRESS_COST' to compare the
2627 costs of various addresses. For two addresses of equal cost, choose the one
2628 with the highest `rtx_cost' value as that has the potential of eliminating
2629 the most insns. For equal costs, we choose the first in the equivalence
2630 class. Note that we ignore the fact that pseudo registers are cheaper
2631 than hard registers here because we would also prefer the pseudo registers.
2632 */
2633
2634 static void
2635 find_best_addr (insn, loc)
2636 rtx insn;
2637 rtx *loc;
2638 {
2639 struct table_elt *elt, *p;
2640 rtx addr = *loc;
2641 int our_cost;
2642 int found_better = 1;
2643 int save_do_not_record = do_not_record;
2644 int save_hash_arg_in_memory = hash_arg_in_memory;
2645 int save_hash_arg_in_struct = hash_arg_in_struct;
2646 int addr_volatile;
2647 int regno;
2648 unsigned hash;
2649
2650 /* Do not try to replace constant addresses or addresses of local and
2651 argument slots. These MEM expressions are made only once and inserted
2652 in many instructions, as well as being used to control symbol table
2653 output. It is not safe to clobber them.
2654
2655 There are some uncommon cases where the address is already in a register
2656 for some reason, but we cannot take advantage of that because we have
2657 no easy way to unshare the MEM. In addition, looking up all stack
2658 addresses is costly. */
2659 if ((GET_CODE (addr) == PLUS
2660 && GET_CODE (XEXP (addr, 0)) == REG
2661 && GET_CODE (XEXP (addr, 1)) == CONST_INT
2662 && (regno = REGNO (XEXP (addr, 0)),
2663 regno == FRAME_POINTER_REGNUM || regno == HARD_FRAME_POINTER_REGNUM
2664 || regno == ARG_POINTER_REGNUM))
2665 || (GET_CODE (addr) == REG
2666 && (regno = REGNO (addr), regno == FRAME_POINTER_REGNUM
2667 || regno == HARD_FRAME_POINTER_REGNUM
2668 || regno == ARG_POINTER_REGNUM))
2669 || CONSTANT_ADDRESS_P (addr))
2670 return;
2671
2672 /* If this address is not simply a register, try to fold it. This will
2673 sometimes simplify the expression. Many simplifications
2674 will not be valid, but some, usually applying the associative rule, will
2675 be valid and produce better code. */
2676 if (GET_CODE (addr) != REG)
2677 {
2678 rtx folded = fold_rtx (copy_rtx (addr), NULL_RTX);
2679
2680 if (1
2681 #ifdef ADDRESS_COST
2682 && (ADDRESS_COST (folded) < ADDRESS_COST (addr)
2683 || (ADDRESS_COST (folded) == ADDRESS_COST (addr)
2684 && rtx_cost (folded) > rtx_cost (addr)))
2685 #else
2686 && rtx_cost (folded) < rtx_cost (addr)
2687 #endif
2688 && validate_change (insn, loc, folded, 0))
2689 addr = folded;
2690 }
2691
2692 /* If this address is not in the hash table, we can't look for equivalences
2693 of the whole address. Also, ignore if volatile. */
2694
2695 do_not_record = 0;
2696 hash = HASH (addr, Pmode);
2697 addr_volatile = do_not_record;
2698 do_not_record = save_do_not_record;
2699 hash_arg_in_memory = save_hash_arg_in_memory;
2700 hash_arg_in_struct = save_hash_arg_in_struct;
2701
2702 if (addr_volatile)
2703 return;
2704
2705 elt = lookup (addr, hash, Pmode);
2706
2707 #ifndef ADDRESS_COST
2708 if (elt)
2709 {
2710 our_cost = elt->cost;
2711
2712 /* Find the lowest cost below ours that works. */
2713 for (elt = elt->first_same_value; elt; elt = elt->next_same_value)
2714 if (elt->cost < our_cost
2715 && (GET_CODE (elt->exp) == REG
2716 || exp_equiv_p (elt->exp, elt->exp, 1, 0))
2717 && validate_change (insn, loc,
2718 canon_reg (copy_rtx (elt->exp), NULL_RTX), 0))
2719 return;
2720 }
2721 #else
2722
2723 if (elt)
2724 {
2725 /* We need to find the best (under the criteria documented above) entry
2726 in the class that is valid. We use the `flag' field to indicate
2727 choices that were invalid and iterate until we can't find a better
2728 one that hasn't already been tried. */
2729
2730 for (p = elt->first_same_value; p; p = p->next_same_value)
2731 p->flag = 0;
2732
2733 while (found_better)
2734 {
2735 int best_addr_cost = ADDRESS_COST (*loc);
2736 int best_rtx_cost = (elt->cost + 1) >> 1;
2737 struct table_elt *best_elt = elt;
2738
2739 found_better = 0;
2740 for (p = elt->first_same_value; p; p = p->next_same_value)
2741 if (! p->flag
2742 && (GET_CODE (p->exp) == REG
2743 || exp_equiv_p (p->exp, p->exp, 1, 0))
2744 && (ADDRESS_COST (p->exp) < best_addr_cost
2745 || (ADDRESS_COST (p->exp) == best_addr_cost
2746 && (p->cost + 1) >> 1 > best_rtx_cost)))
2747 {
2748 found_better = 1;
2749 best_addr_cost = ADDRESS_COST (p->exp);
2750 best_rtx_cost = (p->cost + 1) >> 1;
2751 best_elt = p;
2752 }
2753
2754 if (found_better)
2755 {
2756 if (validate_change (insn, loc,
2757 canon_reg (copy_rtx (best_elt->exp),
2758 NULL_RTX), 0))
2759 return;
2760 else
2761 best_elt->flag = 1;
2762 }
2763 }
2764 }
2765
2766 /* If the address is a binary operation with the first operand a register
2767 and the second a constant, do the same as above, but looking for
2768 equivalences of the register. Then try to simplify before checking for
2769 the best address to use. This catches a few cases: First is when we
2770 have REG+const and the register is another REG+const. We can often merge
2771 the constants and eliminate one insn and one register. It may also be
2772 that a machine has a cheap REG+REG+const. Finally, this improves the
2773 code on the Alpha for unaligned byte stores. */
2774
2775 if (flag_expensive_optimizations
2776 && (GET_RTX_CLASS (GET_CODE (*loc)) == '2'
2777 || GET_RTX_CLASS (GET_CODE (*loc)) == 'c')
2778 && GET_CODE (XEXP (*loc, 0)) == REG
2779 && GET_CODE (XEXP (*loc, 1)) == CONST_INT)
2780 {
2781 rtx c = XEXP (*loc, 1);
2782
2783 do_not_record = 0;
2784 hash = HASH (XEXP (*loc, 0), Pmode);
2785 do_not_record = save_do_not_record;
2786 hash_arg_in_memory = save_hash_arg_in_memory;
2787 hash_arg_in_struct = save_hash_arg_in_struct;
2788
2789 elt = lookup (XEXP (*loc, 0), hash, Pmode);
2790 if (elt == 0)
2791 return;
2792
2793 /* We need to find the best (under the criteria documented above) entry
2794 in the class that is valid. We use the `flag' field to indicate
2795 choices that were invalid and iterate until we can't find a better
2796 one that hasn't already been tried. */
2797
2798 for (p = elt->first_same_value; p; p = p->next_same_value)
2799 p->flag = 0;
2800
2801 while (found_better)
2802 {
2803 int best_addr_cost = ADDRESS_COST (*loc);
2804 int best_rtx_cost = (COST (*loc) + 1) >> 1;
2805 struct table_elt *best_elt = elt;
2806 rtx best_rtx = *loc;
2807 int count;
2808
2809 /* This is at worst case an O(n^2) algorithm, so limit our search
2810 to the first 32 elements on the list. This avoids trouble
2811 compiling code with very long basic blocks that can easily
2812 call cse_gen_binary so many times that we run out of memory. */
2813
2814 found_better = 0;
2815 for (p = elt->first_same_value, count = 0;
2816 p && count < 32;
2817 p = p->next_same_value, count++)
2818 if (! p->flag
2819 && (GET_CODE (p->exp) == REG
2820 || exp_equiv_p (p->exp, p->exp, 1, 0)))
2821 {
2822 rtx new = cse_gen_binary (GET_CODE (*loc), Pmode, p->exp, c);
2823
2824 if ((ADDRESS_COST (new) < best_addr_cost
2825 || (ADDRESS_COST (new) == best_addr_cost
2826 && (COST (new) + 1) >> 1 > best_rtx_cost)))
2827 {
2828 found_better = 1;
2829 best_addr_cost = ADDRESS_COST (new);
2830 best_rtx_cost = (COST (new) + 1) >> 1;
2831 best_elt = p;
2832 best_rtx = new;
2833 }
2834 }
2835
2836 if (found_better)
2837 {
2838 if (validate_change (insn, loc,
2839 canon_reg (copy_rtx (best_rtx),
2840 NULL_RTX), 0))
2841 return;
2842 else
2843 best_elt->flag = 1;
2844 }
2845 }
2846 }
2847 #endif
2848 }
2849 \f
2850 /* Given an operation (CODE, *PARG1, *PARG2), where code is a comparison
2851 operation (EQ, NE, GT, etc.), follow it back through the hash table and
2852 what values are being compared.
2853
2854 *PARG1 and *PARG2 are updated to contain the rtx representing the values
2855 actually being compared. For example, if *PARG1 was (cc0) and *PARG2
2856 was (const_int 0), *PARG1 and *PARG2 will be set to the objects that were
2857 compared to produce cc0.
2858
2859 The return value is the comparison operator and is either the code of
2860 A or the code corresponding to the inverse of the comparison. */
2861
2862 static enum rtx_code
2863 find_comparison_args (code, parg1, parg2, pmode1, pmode2)
2864 enum rtx_code code;
2865 rtx *parg1, *parg2;
2866 enum machine_mode *pmode1, *pmode2;
2867 {
2868 rtx arg1, arg2;
2869
2870 arg1 = *parg1, arg2 = *parg2;
2871
2872 /* If ARG2 is const0_rtx, see what ARG1 is equivalent to. */
2873
2874 while (arg2 == CONST0_RTX (GET_MODE (arg1)))
2875 {
2876 /* Set non-zero when we find something of interest. */
2877 rtx x = 0;
2878 int reverse_code = 0;
2879 struct table_elt *p = 0;
2880
2881 /* If arg1 is a COMPARE, extract the comparison arguments from it.
2882 On machines with CC0, this is the only case that can occur, since
2883 fold_rtx will return the COMPARE or item being compared with zero
2884 when given CC0. */
2885
2886 if (GET_CODE (arg1) == COMPARE && arg2 == const0_rtx)
2887 x = arg1;
2888
2889 /* If ARG1 is a comparison operator and CODE is testing for
2890 STORE_FLAG_VALUE, get the inner arguments. */
2891
2892 else if (GET_RTX_CLASS (GET_CODE (arg1)) == '<')
2893 {
2894 if (code == NE
2895 || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_INT
2896 && code == LT && STORE_FLAG_VALUE == -1)
2897 #ifdef FLOAT_STORE_FLAG_VALUE
2898 || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_FLOAT
2899 && FLOAT_STORE_FLAG_VALUE < 0)
2900 #endif
2901 )
2902 x = arg1;
2903 else if (code == EQ
2904 || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_INT
2905 && code == GE && STORE_FLAG_VALUE == -1)
2906 #ifdef FLOAT_STORE_FLAG_VALUE
2907 || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_FLOAT
2908 && FLOAT_STORE_FLAG_VALUE < 0)
2909 #endif
2910 )
2911 x = arg1, reverse_code = 1;
2912 }
2913
2914 /* ??? We could also check for
2915
2916 (ne (and (eq (...) (const_int 1))) (const_int 0))
2917
2918 and related forms, but let's wait until we see them occurring. */
2919
2920 if (x == 0)
2921 /* Look up ARG1 in the hash table and see if it has an equivalence
2922 that lets us see what is being compared. */
2923 p = lookup (arg1, safe_hash (arg1, GET_MODE (arg1)) % NBUCKETS,
2924 GET_MODE (arg1));
2925 if (p) p = p->first_same_value;
2926
2927 for (; p; p = p->next_same_value)
2928 {
2929 enum machine_mode inner_mode = GET_MODE (p->exp);
2930
2931 /* If the entry isn't valid, skip it. */
2932 if (! exp_equiv_p (p->exp, p->exp, 1, 0))
2933 continue;
2934
2935 if (GET_CODE (p->exp) == COMPARE
2936 /* Another possibility is that this machine has a compare insn
2937 that includes the comparison code. In that case, ARG1 would
2938 be equivalent to a comparison operation that would set ARG1 to
2939 either STORE_FLAG_VALUE or zero. If this is an NE operation,
2940 ORIG_CODE is the actual comparison being done; if it is an EQ,
2941 we must reverse ORIG_CODE. On machine with a negative value
2942 for STORE_FLAG_VALUE, also look at LT and GE operations. */
2943 || ((code == NE
2944 || (code == LT
2945 && GET_MODE_CLASS (inner_mode) == MODE_INT
2946 && (GET_MODE_BITSIZE (inner_mode)
2947 <= HOST_BITS_PER_WIDE_INT)
2948 && (STORE_FLAG_VALUE
2949 & ((HOST_WIDE_INT) 1
2950 << (GET_MODE_BITSIZE (inner_mode) - 1))))
2951 #ifdef FLOAT_STORE_FLAG_VALUE
2952 || (code == LT
2953 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
2954 && FLOAT_STORE_FLAG_VALUE < 0)
2955 #endif
2956 )
2957 && GET_RTX_CLASS (GET_CODE (p->exp)) == '<'))
2958 {
2959 x = p->exp;
2960 break;
2961 }
2962 else if ((code == EQ
2963 || (code == GE
2964 && GET_MODE_CLASS (inner_mode) == MODE_INT
2965 && (GET_MODE_BITSIZE (inner_mode)
2966 <= HOST_BITS_PER_WIDE_INT)
2967 && (STORE_FLAG_VALUE
2968 & ((HOST_WIDE_INT) 1
2969 << (GET_MODE_BITSIZE (inner_mode) - 1))))
2970 #ifdef FLOAT_STORE_FLAG_VALUE
2971 || (code == GE
2972 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
2973 && FLOAT_STORE_FLAG_VALUE < 0)
2974 #endif
2975 )
2976 && GET_RTX_CLASS (GET_CODE (p->exp)) == '<')
2977 {
2978 reverse_code = 1;
2979 x = p->exp;
2980 break;
2981 }
2982
2983 /* If this is fp + constant, the equivalent is a better operand since
2984 it may let us predict the value of the comparison. */
2985 else if (NONZERO_BASE_PLUS_P (p->exp))
2986 {
2987 arg1 = p->exp;
2988 continue;
2989 }
2990 }
2991
2992 /* If we didn't find a useful equivalence for ARG1, we are done.
2993 Otherwise, set up for the next iteration. */
2994 if (x == 0)
2995 break;
2996
2997 arg1 = XEXP (x, 0), arg2 = XEXP (x, 1);
2998 if (GET_RTX_CLASS (GET_CODE (x)) == '<')
2999 code = GET_CODE (x);
3000
3001 if (reverse_code)
3002 code = reverse_condition (code);
3003 }
3004
3005 /* Return our results. Return the modes from before fold_rtx
3006 because fold_rtx might produce const_int, and then it's too late. */
3007 *pmode1 = GET_MODE (arg1), *pmode2 = GET_MODE (arg2);
3008 *parg1 = fold_rtx (arg1, 0), *parg2 = fold_rtx (arg2, 0);
3009
3010 return code;
3011 }
3012 \f
3013 /* Try to simplify a unary operation CODE whose output mode is to be
3014 MODE with input operand OP whose mode was originally OP_MODE.
3015 Return zero if no simplification can be made. */
3016
3017 rtx
3018 simplify_unary_operation (code, mode, op, op_mode)
3019 enum rtx_code code;
3020 enum machine_mode mode;
3021 rtx op;
3022 enum machine_mode op_mode;
3023 {
3024 register int width = GET_MODE_BITSIZE (mode);
3025
3026 /* The order of these tests is critical so that, for example, we don't
3027 check the wrong mode (input vs. output) for a conversion operation,
3028 such as FIX. At some point, this should be simplified. */
3029
3030 #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC)
3031
3032 if (code == FLOAT && GET_MODE (op) == VOIDmode
3033 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
3034 {
3035 HOST_WIDE_INT hv, lv;
3036 REAL_VALUE_TYPE d;
3037
3038 if (GET_CODE (op) == CONST_INT)
3039 lv = INTVAL (op), hv = INTVAL (op) < 0 ? -1 : 0;
3040 else
3041 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
3042
3043 #ifdef REAL_ARITHMETIC
3044 REAL_VALUE_FROM_INT (d, lv, hv, mode);
3045 #else
3046 if (hv < 0)
3047 {
3048 d = (double) (~ hv);
3049 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
3050 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
3051 d += (double) (unsigned HOST_WIDE_INT) (~ lv);
3052 d = (- d - 1.0);
3053 }
3054 else
3055 {
3056 d = (double) hv;
3057 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
3058 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
3059 d += (double) (unsigned HOST_WIDE_INT) lv;
3060 }
3061 #endif /* REAL_ARITHMETIC */
3062 d = real_value_truncate (mode, d);
3063 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3064 }
3065 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
3066 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
3067 {
3068 HOST_WIDE_INT hv, lv;
3069 REAL_VALUE_TYPE d;
3070
3071 if (GET_CODE (op) == CONST_INT)
3072 lv = INTVAL (op), hv = INTVAL (op) < 0 ? -1 : 0;
3073 else
3074 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
3075
3076 if (op_mode == VOIDmode)
3077 {
3078 /* We don't know how to interpret negative-looking numbers in
3079 this case, so don't try to fold those. */
3080 if (hv < 0)
3081 return 0;
3082 }
3083 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
3084 ;
3085 else
3086 hv = 0, lv &= GET_MODE_MASK (op_mode);
3087
3088 #ifdef REAL_ARITHMETIC
3089 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
3090 #else
3091
3092 d = (double) (unsigned HOST_WIDE_INT) hv;
3093 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
3094 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
3095 d += (double) (unsigned HOST_WIDE_INT) lv;
3096 #endif /* REAL_ARITHMETIC */
3097 d = real_value_truncate (mode, d);
3098 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3099 }
3100 #endif
3101
3102 if (GET_CODE (op) == CONST_INT
3103 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
3104 {
3105 register HOST_WIDE_INT arg0 = INTVAL (op);
3106 register HOST_WIDE_INT val;
3107
3108 switch (code)
3109 {
3110 case NOT:
3111 val = ~ arg0;
3112 break;
3113
3114 case NEG:
3115 val = - arg0;
3116 break;
3117
3118 case ABS:
3119 val = (arg0 >= 0 ? arg0 : - arg0);
3120 break;
3121
3122 case FFS:
3123 /* Don't use ffs here. Instead, get low order bit and then its
3124 number. If arg0 is zero, this will return 0, as desired. */
3125 arg0 &= GET_MODE_MASK (mode);
3126 val = exact_log2 (arg0 & (- arg0)) + 1;
3127 break;
3128
3129 case TRUNCATE:
3130 val = arg0;
3131 break;
3132
3133 case ZERO_EXTEND:
3134 if (op_mode == VOIDmode)
3135 op_mode = mode;
3136 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
3137 {
3138 /* If we were really extending the mode,
3139 we would have to distinguish between zero-extension
3140 and sign-extension. */
3141 if (width != GET_MODE_BITSIZE (op_mode))
3142 abort ();
3143 val = arg0;
3144 }
3145 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
3146 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
3147 else
3148 return 0;
3149 break;
3150
3151 case SIGN_EXTEND:
3152 if (op_mode == VOIDmode)
3153 op_mode = mode;
3154 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
3155 {
3156 /* If we were really extending the mode,
3157 we would have to distinguish between zero-extension
3158 and sign-extension. */
3159 if (width != GET_MODE_BITSIZE (op_mode))
3160 abort ();
3161 val = arg0;
3162 }
3163 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
3164 {
3165 val
3166 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
3167 if (val
3168 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
3169 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
3170 }
3171 else
3172 return 0;
3173 break;
3174
3175 case SQRT:
3176 return 0;
3177
3178 default:
3179 abort ();
3180 }
3181
3182 /* Clear the bits that don't belong in our mode,
3183 unless they and our sign bit are all one.
3184 So we get either a reasonable negative value or a reasonable
3185 unsigned value for this mode. */
3186 if (width < HOST_BITS_PER_WIDE_INT
3187 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
3188 != ((HOST_WIDE_INT) (-1) << (width - 1))))
3189 val &= ((HOST_WIDE_INT) 1 << width) - 1;
3190
3191 return GEN_INT (val);
3192 }
3193
3194 /* We can do some operations on integer CONST_DOUBLEs. Also allow
3195 for a DImode operation on a CONST_INT. */
3196 else if (GET_MODE (op) == VOIDmode && width <= HOST_BITS_PER_INT * 2
3197 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
3198 {
3199 HOST_WIDE_INT l1, h1, lv, hv;
3200
3201 if (GET_CODE (op) == CONST_DOUBLE)
3202 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
3203 else
3204 l1 = INTVAL (op), h1 = l1 < 0 ? -1 : 0;
3205
3206 switch (code)
3207 {
3208 case NOT:
3209 lv = ~ l1;
3210 hv = ~ h1;
3211 break;
3212
3213 case NEG:
3214 neg_double (l1, h1, &lv, &hv);
3215 break;
3216
3217 case ABS:
3218 if (h1 < 0)
3219 neg_double (l1, h1, &lv, &hv);
3220 else
3221 lv = l1, hv = h1;
3222 break;
3223
3224 case FFS:
3225 hv = 0;
3226 if (l1 == 0)
3227 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & (-h1)) + 1;
3228 else
3229 lv = exact_log2 (l1 & (-l1)) + 1;
3230 break;
3231
3232 case TRUNCATE:
3233 /* This is just a change-of-mode, so do nothing. */
3234 lv = l1, hv = h1;
3235 break;
3236
3237 case ZERO_EXTEND:
3238 if (op_mode == VOIDmode
3239 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
3240 return 0;
3241
3242 hv = 0;
3243 lv = l1 & GET_MODE_MASK (op_mode);
3244 break;
3245
3246 case SIGN_EXTEND:
3247 if (op_mode == VOIDmode
3248 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
3249 return 0;
3250 else
3251 {
3252 lv = l1 & GET_MODE_MASK (op_mode);
3253 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
3254 && (lv & ((HOST_WIDE_INT) 1
3255 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
3256 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
3257
3258 hv = (lv < 0) ? ~ (HOST_WIDE_INT) 0 : 0;
3259 }
3260 break;
3261
3262 case SQRT:
3263 return 0;
3264
3265 default:
3266 return 0;
3267 }
3268
3269 return immed_double_const (lv, hv, mode);
3270 }
3271
3272 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3273 else if (GET_CODE (op) == CONST_DOUBLE
3274 && GET_MODE_CLASS (mode) == MODE_FLOAT)
3275 {
3276 REAL_VALUE_TYPE d;
3277 jmp_buf handler;
3278 rtx x;
3279
3280 if (setjmp (handler))
3281 /* There used to be a warning here, but that is inadvisable.
3282 People may want to cause traps, and the natural way
3283 to do it should not get a warning. */
3284 return 0;
3285
3286 set_float_handler (handler);
3287
3288 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
3289
3290 switch (code)
3291 {
3292 case NEG:
3293 d = REAL_VALUE_NEGATE (d);
3294 break;
3295
3296 case ABS:
3297 if (REAL_VALUE_NEGATIVE (d))
3298 d = REAL_VALUE_NEGATE (d);
3299 break;
3300
3301 case FLOAT_TRUNCATE:
3302 d = real_value_truncate (mode, d);
3303 break;
3304
3305 case FLOAT_EXTEND:
3306 /* All this does is change the mode. */
3307 break;
3308
3309 case FIX:
3310 d = REAL_VALUE_RNDZINT (d);
3311 break;
3312
3313 case UNSIGNED_FIX:
3314 d = REAL_VALUE_UNSIGNED_RNDZINT (d);
3315 break;
3316
3317 case SQRT:
3318 return 0;
3319
3320 default:
3321 abort ();
3322 }
3323
3324 x = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
3325 set_float_handler (NULL_PTR);
3326 return x;
3327 }
3328
3329 else if (GET_CODE (op) == CONST_DOUBLE
3330 && GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT
3331 && GET_MODE_CLASS (mode) == MODE_INT
3332 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
3333 {
3334 REAL_VALUE_TYPE d;
3335 jmp_buf handler;
3336 HOST_WIDE_INT val;
3337
3338 if (setjmp (handler))
3339 return 0;
3340
3341 set_float_handler (handler);
3342
3343 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
3344
3345 switch (code)
3346 {
3347 case FIX:
3348 val = REAL_VALUE_FIX (d);
3349 break;
3350
3351 case UNSIGNED_FIX:
3352 val = REAL_VALUE_UNSIGNED_FIX (d);
3353 break;
3354
3355 default:
3356 abort ();
3357 }
3358
3359 set_float_handler (NULL_PTR);
3360
3361 /* Clear the bits that don't belong in our mode,
3362 unless they and our sign bit are all one.
3363 So we get either a reasonable negative value or a reasonable
3364 unsigned value for this mode. */
3365 if (width < HOST_BITS_PER_WIDE_INT
3366 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
3367 != ((HOST_WIDE_INT) (-1) << (width - 1))))
3368 val &= ((HOST_WIDE_INT) 1 << width) - 1;
3369
3370 /* If this would be an entire word for the target, but is not for
3371 the host, then sign-extend on the host so that the number will look
3372 the same way on the host that it would on the target.
3373
3374 For example, when building a 64 bit alpha hosted 32 bit sparc
3375 targeted compiler, then we want the 32 bit unsigned value -1 to be
3376 represented as a 64 bit value -1, and not as 0x00000000ffffffff.
3377 The later confuses the sparc backend. */
3378
3379 if (BITS_PER_WORD < HOST_BITS_PER_WIDE_INT && BITS_PER_WORD == width
3380 && (val & ((HOST_WIDE_INT) 1 << (width - 1))))
3381 val |= ((HOST_WIDE_INT) (-1) << width);
3382
3383 return GEN_INT (val);
3384 }
3385 #endif
3386 /* This was formerly used only for non-IEEE float.
3387 eggert@twinsun.com says it is safe for IEEE also. */
3388 else
3389 {
3390 /* There are some simplifications we can do even if the operands
3391 aren't constant. */
3392 switch (code)
3393 {
3394 case NEG:
3395 case NOT:
3396 /* (not (not X)) == X, similarly for NEG. */
3397 if (GET_CODE (op) == code)
3398 return XEXP (op, 0);
3399 break;
3400
3401 case SIGN_EXTEND:
3402 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
3403 becomes just the MINUS if its mode is MODE. This allows
3404 folding switch statements on machines using casesi (such as
3405 the Vax). */
3406 if (GET_CODE (op) == TRUNCATE
3407 && GET_MODE (XEXP (op, 0)) == mode
3408 && GET_CODE (XEXP (op, 0)) == MINUS
3409 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
3410 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
3411 return XEXP (op, 0);
3412
3413 #ifdef POINTERS_EXTEND_UNSIGNED
3414 if (! POINTERS_EXTEND_UNSIGNED
3415 && mode == Pmode && GET_MODE (op) == ptr_mode
3416 && CONSTANT_P (op))
3417 return convert_memory_address (Pmode, op);
3418 #endif
3419 break;
3420
3421 #ifdef POINTERS_EXTEND_UNSIGNED
3422 case ZERO_EXTEND:
3423 if (POINTERS_EXTEND_UNSIGNED
3424 && mode == Pmode && GET_MODE (op) == ptr_mode
3425 && CONSTANT_P (op))
3426 return convert_memory_address (Pmode, op);
3427 break;
3428 #endif
3429 }
3430
3431 return 0;
3432 }
3433 }
3434 \f
3435 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
3436 and OP1. Return 0 if no simplification is possible.
3437
3438 Don't use this for relational operations such as EQ or LT.
3439 Use simplify_relational_operation instead. */
3440
3441 rtx
3442 simplify_binary_operation (code, mode, op0, op1)
3443 enum rtx_code code;
3444 enum machine_mode mode;
3445 rtx op0, op1;
3446 {
3447 register HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3448 HOST_WIDE_INT val;
3449 int width = GET_MODE_BITSIZE (mode);
3450 rtx tem;
3451
3452 /* Relational operations don't work here. We must know the mode
3453 of the operands in order to do the comparison correctly.
3454 Assuming a full word can give incorrect results.
3455 Consider comparing 128 with -128 in QImode. */
3456
3457 if (GET_RTX_CLASS (code) == '<')
3458 abort ();
3459
3460 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3461 if (GET_MODE_CLASS (mode) == MODE_FLOAT
3462 && GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
3463 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3464 {
3465 REAL_VALUE_TYPE f0, f1, value;
3466 jmp_buf handler;
3467
3468 if (setjmp (handler))
3469 return 0;
3470
3471 set_float_handler (handler);
3472
3473 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3474 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3475 f0 = real_value_truncate (mode, f0);
3476 f1 = real_value_truncate (mode, f1);
3477
3478 #ifdef REAL_ARITHMETIC
3479 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
3480 #else
3481 switch (code)
3482 {
3483 case PLUS:
3484 value = f0 + f1;
3485 break;
3486 case MINUS:
3487 value = f0 - f1;
3488 break;
3489 case MULT:
3490 value = f0 * f1;
3491 break;
3492 case DIV:
3493 #ifndef REAL_INFINITY
3494 if (f1 == 0)
3495 return 0;
3496 #endif
3497 value = f0 / f1;
3498 break;
3499 case SMIN:
3500 value = MIN (f0, f1);
3501 break;
3502 case SMAX:
3503 value = MAX (f0, f1);
3504 break;
3505 default:
3506 abort ();
3507 }
3508 #endif
3509
3510 value = real_value_truncate (mode, value);
3511 set_float_handler (NULL_PTR);
3512 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
3513 }
3514 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3515
3516 /* We can fold some multi-word operations. */
3517 if (GET_MODE_CLASS (mode) == MODE_INT
3518 && width == HOST_BITS_PER_WIDE_INT * 2
3519 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
3520 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
3521 {
3522 HOST_WIDE_INT l1, l2, h1, h2, lv, hv;
3523
3524 if (GET_CODE (op0) == CONST_DOUBLE)
3525 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
3526 else
3527 l1 = INTVAL (op0), h1 = l1 < 0 ? -1 : 0;
3528
3529 if (GET_CODE (op1) == CONST_DOUBLE)
3530 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
3531 else
3532 l2 = INTVAL (op1), h2 = l2 < 0 ? -1 : 0;
3533
3534 switch (code)
3535 {
3536 case MINUS:
3537 /* A - B == A + (-B). */
3538 neg_double (l2, h2, &lv, &hv);
3539 l2 = lv, h2 = hv;
3540
3541 /* .. fall through ... */
3542
3543 case PLUS:
3544 add_double (l1, h1, l2, h2, &lv, &hv);
3545 break;
3546
3547 case MULT:
3548 mul_double (l1, h1, l2, h2, &lv, &hv);
3549 break;
3550
3551 case DIV: case MOD: case UDIV: case UMOD:
3552 /* We'd need to include tree.h to do this and it doesn't seem worth
3553 it. */
3554 return 0;
3555
3556 case AND:
3557 lv = l1 & l2, hv = h1 & h2;
3558 break;
3559
3560 case IOR:
3561 lv = l1 | l2, hv = h1 | h2;
3562 break;
3563
3564 case XOR:
3565 lv = l1 ^ l2, hv = h1 ^ h2;
3566 break;
3567
3568 case SMIN:
3569 if (h1 < h2
3570 || (h1 == h2
3571 && ((unsigned HOST_WIDE_INT) l1
3572 < (unsigned HOST_WIDE_INT) l2)))
3573 lv = l1, hv = h1;
3574 else
3575 lv = l2, hv = h2;
3576 break;
3577
3578 case SMAX:
3579 if (h1 > h2
3580 || (h1 == h2
3581 && ((unsigned HOST_WIDE_INT) l1
3582 > (unsigned HOST_WIDE_INT) l2)))
3583 lv = l1, hv = h1;
3584 else
3585 lv = l2, hv = h2;
3586 break;
3587
3588 case UMIN:
3589 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
3590 || (h1 == h2
3591 && ((unsigned HOST_WIDE_INT) l1
3592 < (unsigned HOST_WIDE_INT) l2)))
3593 lv = l1, hv = h1;
3594 else
3595 lv = l2, hv = h2;
3596 break;
3597
3598 case UMAX:
3599 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
3600 || (h1 == h2
3601 && ((unsigned HOST_WIDE_INT) l1
3602 > (unsigned HOST_WIDE_INT) l2)))
3603 lv = l1, hv = h1;
3604 else
3605 lv = l2, hv = h2;
3606 break;
3607
3608 case LSHIFTRT: case ASHIFTRT:
3609 case ASHIFT:
3610 case ROTATE: case ROTATERT:
3611 #ifdef SHIFT_COUNT_TRUNCATED
3612 if (SHIFT_COUNT_TRUNCATED)
3613 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
3614 #endif
3615
3616 if (h2 != 0 || l2 < 0 || l2 >= GET_MODE_BITSIZE (mode))
3617 return 0;
3618
3619 if (code == LSHIFTRT || code == ASHIFTRT)
3620 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
3621 code == ASHIFTRT);
3622 else if (code == ASHIFT)
3623 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
3624 else if (code == ROTATE)
3625 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3626 else /* code == ROTATERT */
3627 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3628 break;
3629
3630 default:
3631 return 0;
3632 }
3633
3634 return immed_double_const (lv, hv, mode);
3635 }
3636
3637 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
3638 || width > HOST_BITS_PER_WIDE_INT || width == 0)
3639 {
3640 /* Even if we can't compute a constant result,
3641 there are some cases worth simplifying. */
3642
3643 switch (code)
3644 {
3645 case PLUS:
3646 /* In IEEE floating point, x+0 is not the same as x. Similarly
3647 for the other optimizations below. */
3648 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
3649 && FLOAT_MODE_P (mode) && ! flag_fast_math)
3650 break;
3651
3652 if (op1 == CONST0_RTX (mode))
3653 return op0;
3654
3655 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)) */
3656 if (GET_CODE (op0) == NEG)
3657 return cse_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
3658 else if (GET_CODE (op1) == NEG)
3659 return cse_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
3660
3661 /* Handle both-operands-constant cases. We can only add
3662 CONST_INTs to constants since the sum of relocatable symbols
3663 can't be handled by most assemblers. Don't add CONST_INT
3664 to CONST_INT since overflow won't be computed properly if wider
3665 than HOST_BITS_PER_WIDE_INT. */
3666
3667 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
3668 && GET_CODE (op1) == CONST_INT)
3669 return plus_constant (op0, INTVAL (op1));
3670 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
3671 && GET_CODE (op0) == CONST_INT)
3672 return plus_constant (op1, INTVAL (op0));
3673
3674 /* See if this is something like X * C - X or vice versa or
3675 if the multiplication is written as a shift. If so, we can
3676 distribute and make a new multiply, shift, or maybe just
3677 have X (if C is 2 in the example above). But don't make
3678 real multiply if we didn't have one before. */
3679
3680 if (! FLOAT_MODE_P (mode))
3681 {
3682 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
3683 rtx lhs = op0, rhs = op1;
3684 int had_mult = 0;
3685
3686 if (GET_CODE (lhs) == NEG)
3687 coeff0 = -1, lhs = XEXP (lhs, 0);
3688 else if (GET_CODE (lhs) == MULT
3689 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
3690 {
3691 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
3692 had_mult = 1;
3693 }
3694 else if (GET_CODE (lhs) == ASHIFT
3695 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
3696 && INTVAL (XEXP (lhs, 1)) >= 0
3697 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
3698 {
3699 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
3700 lhs = XEXP (lhs, 0);
3701 }
3702
3703 if (GET_CODE (rhs) == NEG)
3704 coeff1 = -1, rhs = XEXP (rhs, 0);
3705 else if (GET_CODE (rhs) == MULT
3706 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
3707 {
3708 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
3709 had_mult = 1;
3710 }
3711 else if (GET_CODE (rhs) == ASHIFT
3712 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
3713 && INTVAL (XEXP (rhs, 1)) >= 0
3714 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
3715 {
3716 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
3717 rhs = XEXP (rhs, 0);
3718 }
3719
3720 if (rtx_equal_p (lhs, rhs))
3721 {
3722 tem = cse_gen_binary (MULT, mode, lhs,
3723 GEN_INT (coeff0 + coeff1));
3724 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
3725 }
3726 }
3727
3728 /* If one of the operands is a PLUS or a MINUS, see if we can
3729 simplify this by the associative law.
3730 Don't use the associative law for floating point.
3731 The inaccuracy makes it nonassociative,
3732 and subtle programs can break if operations are associated. */
3733
3734 if (INTEGRAL_MODE_P (mode)
3735 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
3736 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
3737 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
3738 return tem;
3739 break;
3740
3741 case COMPARE:
3742 #ifdef HAVE_cc0
3743 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
3744 using cc0, in which case we want to leave it as a COMPARE
3745 so we can distinguish it from a register-register-copy.
3746
3747 In IEEE floating point, x-0 is not the same as x. */
3748
3749 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3750 || ! FLOAT_MODE_P (mode) || flag_fast_math)
3751 && op1 == CONST0_RTX (mode))
3752 return op0;
3753 #else
3754 /* Do nothing here. */
3755 #endif
3756 break;
3757
3758 case MINUS:
3759 /* None of these optimizations can be done for IEEE
3760 floating point. */
3761 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
3762 && FLOAT_MODE_P (mode) && ! flag_fast_math)
3763 break;
3764
3765 /* We can't assume x-x is 0 even with non-IEEE floating point,
3766 but since it is zero except in very strange circumstances, we
3767 will treat it as zero with -ffast-math. */
3768 if (rtx_equal_p (op0, op1)
3769 && ! side_effects_p (op0)
3770 && (! FLOAT_MODE_P (mode) || flag_fast_math))
3771 return CONST0_RTX (mode);
3772
3773 /* Change subtraction from zero into negation. */
3774 if (op0 == CONST0_RTX (mode))
3775 return gen_rtx (NEG, mode, op1);
3776
3777 /* (-1 - a) is ~a. */
3778 if (op0 == constm1_rtx)
3779 return gen_rtx (NOT, mode, op1);
3780
3781 /* Subtracting 0 has no effect. */
3782 if (op1 == CONST0_RTX (mode))
3783 return op0;
3784
3785 /* See if this is something like X * C - X or vice versa or
3786 if the multiplication is written as a shift. If so, we can
3787 distribute and make a new multiply, shift, or maybe just
3788 have X (if C is 2 in the example above). But don't make
3789 real multiply if we didn't have one before. */
3790
3791 if (! FLOAT_MODE_P (mode))
3792 {
3793 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
3794 rtx lhs = op0, rhs = op1;
3795 int had_mult = 0;
3796
3797 if (GET_CODE (lhs) == NEG)
3798 coeff0 = -1, lhs = XEXP (lhs, 0);
3799 else if (GET_CODE (lhs) == MULT
3800 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
3801 {
3802 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
3803 had_mult = 1;
3804 }
3805 else if (GET_CODE (lhs) == ASHIFT
3806 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
3807 && INTVAL (XEXP (lhs, 1)) >= 0
3808 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
3809 {
3810 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
3811 lhs = XEXP (lhs, 0);
3812 }
3813
3814 if (GET_CODE (rhs) == NEG)
3815 coeff1 = - 1, rhs = XEXP (rhs, 0);
3816 else if (GET_CODE (rhs) == MULT
3817 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
3818 {
3819 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
3820 had_mult = 1;
3821 }
3822 else if (GET_CODE (rhs) == ASHIFT
3823 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
3824 && INTVAL (XEXP (rhs, 1)) >= 0
3825 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
3826 {
3827 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
3828 rhs = XEXP (rhs, 0);
3829 }
3830
3831 if (rtx_equal_p (lhs, rhs))
3832 {
3833 tem = cse_gen_binary (MULT, mode, lhs,
3834 GEN_INT (coeff0 - coeff1));
3835 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
3836 }
3837 }
3838
3839 /* (a - (-b)) -> (a + b). */
3840 if (GET_CODE (op1) == NEG)
3841 return cse_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
3842
3843 /* If one of the operands is a PLUS or a MINUS, see if we can
3844 simplify this by the associative law.
3845 Don't use the associative law for floating point.
3846 The inaccuracy makes it nonassociative,
3847 and subtle programs can break if operations are associated. */
3848
3849 if (INTEGRAL_MODE_P (mode)
3850 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
3851 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
3852 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
3853 return tem;
3854
3855 /* Don't let a relocatable value get a negative coeff. */
3856 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
3857 return plus_constant (op0, - INTVAL (op1));
3858
3859 /* (x - (x & y)) -> (x & ~y) */
3860 if (GET_CODE (op1) == AND)
3861 {
3862 if (rtx_equal_p (op0, XEXP (op1, 0)))
3863 return cse_gen_binary (AND, mode, op0, gen_rtx (NOT, mode, XEXP (op1, 1)));
3864 if (rtx_equal_p (op0, XEXP (op1, 1)))
3865 return cse_gen_binary (AND, mode, op0, gen_rtx (NOT, mode, XEXP (op1, 0)));
3866 }
3867 break;
3868
3869 case MULT:
3870 if (op1 == constm1_rtx)
3871 {
3872 tem = simplify_unary_operation (NEG, mode, op0, mode);
3873
3874 return tem ? tem : gen_rtx (NEG, mode, op0);
3875 }
3876
3877 /* In IEEE floating point, x*0 is not always 0. */
3878 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3879 || ! FLOAT_MODE_P (mode) || flag_fast_math)
3880 && op1 == CONST0_RTX (mode)
3881 && ! side_effects_p (op0))
3882 return op1;
3883
3884 /* In IEEE floating point, x*1 is not equivalent to x for nans.
3885 However, ANSI says we can drop signals,
3886 so we can do this anyway. */
3887 if (op1 == CONST1_RTX (mode))
3888 return op0;
3889
3890 /* Convert multiply by constant power of two into shift unless
3891 we are still generating RTL. This test is a kludge. */
3892 if (GET_CODE (op1) == CONST_INT
3893 && (val = exact_log2 (INTVAL (op1))) >= 0
3894 /* If the mode is larger than the host word size, and the
3895 uppermost bit is set, then this isn't a power of two due
3896 to implicit sign extension. */
3897 && (width <= HOST_BITS_PER_WIDE_INT
3898 || val != HOST_BITS_PER_WIDE_INT - 1)
3899 && ! rtx_equal_function_value_matters)
3900 return gen_rtx (ASHIFT, mode, op0, GEN_INT (val));
3901
3902 if (GET_CODE (op1) == CONST_DOUBLE
3903 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT)
3904 {
3905 REAL_VALUE_TYPE d;
3906 jmp_buf handler;
3907 int op1is2, op1ism1;
3908
3909 if (setjmp (handler))
3910 return 0;
3911
3912 set_float_handler (handler);
3913 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
3914 op1is2 = REAL_VALUES_EQUAL (d, dconst2);
3915 op1ism1 = REAL_VALUES_EQUAL (d, dconstm1);
3916 set_float_handler (NULL_PTR);
3917
3918 /* x*2 is x+x and x*(-1) is -x */
3919 if (op1is2 && GET_MODE (op0) == mode)
3920 return gen_rtx (PLUS, mode, op0, copy_rtx (op0));
3921
3922 else if (op1ism1 && GET_MODE (op0) == mode)
3923 return gen_rtx (NEG, mode, op0);
3924 }
3925 break;
3926
3927 case IOR:
3928 if (op1 == const0_rtx)
3929 return op0;
3930 if (GET_CODE (op1) == CONST_INT
3931 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
3932 return op1;
3933 if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
3934 return op0;
3935 /* A | (~A) -> -1 */
3936 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3937 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3938 && ! side_effects_p (op0)
3939 && GET_MODE_CLASS (mode) != MODE_CC)
3940 return constm1_rtx;
3941 break;
3942
3943 case XOR:
3944 if (op1 == const0_rtx)
3945 return op0;
3946 if (GET_CODE (op1) == CONST_INT
3947 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
3948 return gen_rtx (NOT, mode, op0);
3949 if (op0 == op1 && ! side_effects_p (op0)
3950 && GET_MODE_CLASS (mode) != MODE_CC)
3951 return const0_rtx;
3952 break;
3953
3954 case AND:
3955 if (op1 == const0_rtx && ! side_effects_p (op0))
3956 return const0_rtx;
3957 if (GET_CODE (op1) == CONST_INT
3958 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
3959 return op0;
3960 if (op0 == op1 && ! side_effects_p (op0)
3961 && GET_MODE_CLASS (mode) != MODE_CC)
3962 return op0;
3963 /* A & (~A) -> 0 */
3964 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3965 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3966 && ! side_effects_p (op0)
3967 && GET_MODE_CLASS (mode) != MODE_CC)
3968 return const0_rtx;
3969 break;
3970
3971 case UDIV:
3972 /* Convert divide by power of two into shift (divide by 1 handled
3973 below). */
3974 if (GET_CODE (op1) == CONST_INT
3975 && (arg1 = exact_log2 (INTVAL (op1))) > 0)
3976 return gen_rtx (LSHIFTRT, mode, op0, GEN_INT (arg1));
3977
3978 /* ... fall through ... */
3979
3980 case DIV:
3981 if (op1 == CONST1_RTX (mode))
3982 return op0;
3983
3984 /* In IEEE floating point, 0/x is not always 0. */
3985 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3986 || ! FLOAT_MODE_P (mode) || flag_fast_math)
3987 && op0 == CONST0_RTX (mode)
3988 && ! side_effects_p (op1))
3989 return op0;
3990
3991 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3992 /* Change division by a constant into multiplication. Only do
3993 this with -ffast-math until an expert says it is safe in
3994 general. */
3995 else if (GET_CODE (op1) == CONST_DOUBLE
3996 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT
3997 && op1 != CONST0_RTX (mode)
3998 && flag_fast_math)
3999 {
4000 REAL_VALUE_TYPE d;
4001 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
4002
4003 if (! REAL_VALUES_EQUAL (d, dconst0))
4004 {
4005 #if defined (REAL_ARITHMETIC)
4006 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
4007 return gen_rtx (MULT, mode, op0,
4008 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
4009 #else
4010 return gen_rtx (MULT, mode, op0,
4011 CONST_DOUBLE_FROM_REAL_VALUE (1./d, mode));
4012 #endif
4013 }
4014 }
4015 #endif
4016 break;
4017
4018 case UMOD:
4019 /* Handle modulus by power of two (mod with 1 handled below). */
4020 if (GET_CODE (op1) == CONST_INT
4021 && exact_log2 (INTVAL (op1)) > 0)
4022 return gen_rtx (AND, mode, op0, GEN_INT (INTVAL (op1) - 1));
4023
4024 /* ... fall through ... */
4025
4026 case MOD:
4027 if ((op0 == const0_rtx || op1 == const1_rtx)
4028 && ! side_effects_p (op0) && ! side_effects_p (op1))
4029 return const0_rtx;
4030 break;
4031
4032 case ROTATERT:
4033 case ROTATE:
4034 /* Rotating ~0 always results in ~0. */
4035 if (GET_CODE (op0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
4036 && INTVAL (op0) == GET_MODE_MASK (mode)
4037 && ! side_effects_p (op1))
4038 return op0;
4039
4040 /* ... fall through ... */
4041
4042 case ASHIFT:
4043 case ASHIFTRT:
4044 case LSHIFTRT:
4045 if (op1 == const0_rtx)
4046 return op0;
4047 if (op0 == const0_rtx && ! side_effects_p (op1))
4048 return op0;
4049 break;
4050
4051 case SMIN:
4052 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
4053 && INTVAL (op1) == (HOST_WIDE_INT) 1 << (width -1)
4054 && ! side_effects_p (op0))
4055 return op1;
4056 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
4057 return op0;
4058 break;
4059
4060 case SMAX:
4061 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
4062 && (INTVAL (op1)
4063 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
4064 && ! side_effects_p (op0))
4065 return op1;
4066 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
4067 return op0;
4068 break;
4069
4070 case UMIN:
4071 if (op1 == const0_rtx && ! side_effects_p (op0))
4072 return op1;
4073 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
4074 return op0;
4075 break;
4076
4077 case UMAX:
4078 if (op1 == constm1_rtx && ! side_effects_p (op0))
4079 return op1;
4080 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
4081 return op0;
4082 break;
4083
4084 default:
4085 abort ();
4086 }
4087
4088 return 0;
4089 }
4090
4091 /* Get the integer argument values in two forms:
4092 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
4093
4094 arg0 = INTVAL (op0);
4095 arg1 = INTVAL (op1);
4096
4097 if (width < HOST_BITS_PER_WIDE_INT)
4098 {
4099 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
4100 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
4101
4102 arg0s = arg0;
4103 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
4104 arg0s |= ((HOST_WIDE_INT) (-1) << width);
4105
4106 arg1s = arg1;
4107 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
4108 arg1s |= ((HOST_WIDE_INT) (-1) << width);
4109 }
4110 else
4111 {
4112 arg0s = arg0;
4113 arg1s = arg1;
4114 }
4115
4116 /* Compute the value of the arithmetic. */
4117
4118 switch (code)
4119 {
4120 case PLUS:
4121 val = arg0s + arg1s;
4122 break;
4123
4124 case MINUS:
4125 val = arg0s - arg1s;
4126 break;
4127
4128 case MULT:
4129 val = arg0s * arg1s;
4130 break;
4131
4132 case DIV:
4133 if (arg1s == 0)
4134 return 0;
4135 val = arg0s / arg1s;
4136 break;
4137
4138 case MOD:
4139 if (arg1s == 0)
4140 return 0;
4141 val = arg0s % arg1s;
4142 break;
4143
4144 case UDIV:
4145 if (arg1 == 0)
4146 return 0;
4147 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
4148 break;
4149
4150 case UMOD:
4151 if (arg1 == 0)
4152 return 0;
4153 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
4154 break;
4155
4156 case AND:
4157 val = arg0 & arg1;
4158 break;
4159
4160 case IOR:
4161 val = arg0 | arg1;
4162 break;
4163
4164 case XOR:
4165 val = arg0 ^ arg1;
4166 break;
4167
4168 case LSHIFTRT:
4169 /* If shift count is undefined, don't fold it; let the machine do
4170 what it wants. But truncate it if the machine will do that. */
4171 if (arg1 < 0)
4172 return 0;
4173
4174 #ifdef SHIFT_COUNT_TRUNCATED
4175 if (SHIFT_COUNT_TRUNCATED)
4176 arg1 %= width;
4177 #endif
4178
4179 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
4180 break;
4181
4182 case ASHIFT:
4183 if (arg1 < 0)
4184 return 0;
4185
4186 #ifdef SHIFT_COUNT_TRUNCATED
4187 if (SHIFT_COUNT_TRUNCATED)
4188 arg1 %= width;
4189 #endif
4190
4191 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
4192 break;
4193
4194 case ASHIFTRT:
4195 if (arg1 < 0)
4196 return 0;
4197
4198 #ifdef SHIFT_COUNT_TRUNCATED
4199 if (SHIFT_COUNT_TRUNCATED)
4200 arg1 %= width;
4201 #endif
4202
4203 val = arg0s >> arg1;
4204
4205 /* Bootstrap compiler may not have sign extended the right shift.
4206 Manually extend the sign to insure bootstrap cc matches gcc. */
4207 if (arg0s < 0 && arg1 > 0)
4208 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
4209
4210 break;
4211
4212 case ROTATERT:
4213 if (arg1 < 0)
4214 return 0;
4215
4216 arg1 %= width;
4217 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
4218 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
4219 break;
4220
4221 case ROTATE:
4222 if (arg1 < 0)
4223 return 0;
4224
4225 arg1 %= width;
4226 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
4227 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
4228 break;
4229
4230 case COMPARE:
4231 /* Do nothing here. */
4232 return 0;
4233
4234 case SMIN:
4235 val = arg0s <= arg1s ? arg0s : arg1s;
4236 break;
4237
4238 case UMIN:
4239 val = ((unsigned HOST_WIDE_INT) arg0
4240 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4241 break;
4242
4243 case SMAX:
4244 val = arg0s > arg1s ? arg0s : arg1s;
4245 break;
4246
4247 case UMAX:
4248 val = ((unsigned HOST_WIDE_INT) arg0
4249 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
4250 break;
4251
4252 default:
4253 abort ();
4254 }
4255
4256 /* Clear the bits that don't belong in our mode, unless they and our sign
4257 bit are all one. So we get either a reasonable negative value or a
4258 reasonable unsigned value for this mode. */
4259 if (width < HOST_BITS_PER_WIDE_INT
4260 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4261 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4262 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4263
4264 /* If this would be an entire word for the target, but is not for
4265 the host, then sign-extend on the host so that the number will look
4266 the same way on the host that it would on the target.
4267
4268 For example, when building a 64 bit alpha hosted 32 bit sparc
4269 targeted compiler, then we want the 32 bit unsigned value -1 to be
4270 represented as a 64 bit value -1, and not as 0x00000000ffffffff.
4271 The later confuses the sparc backend. */
4272
4273 if (BITS_PER_WORD < HOST_BITS_PER_WIDE_INT && BITS_PER_WORD == width
4274 && (val & ((HOST_WIDE_INT) 1 << (width - 1))))
4275 val |= ((HOST_WIDE_INT) (-1) << width);
4276
4277 return GEN_INT (val);
4278 }
4279 \f
4280 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
4281 PLUS or MINUS.
4282
4283 Rather than test for specific case, we do this by a brute-force method
4284 and do all possible simplifications until no more changes occur. Then
4285 we rebuild the operation. */
4286
4287 static rtx
4288 simplify_plus_minus (code, mode, op0, op1)
4289 enum rtx_code code;
4290 enum machine_mode mode;
4291 rtx op0, op1;
4292 {
4293 rtx ops[8];
4294 int negs[8];
4295 rtx result, tem;
4296 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts = 0;
4297 int first = 1, negate = 0, changed;
4298 int i, j;
4299
4300 bzero ((char *) ops, sizeof ops);
4301
4302 /* Set up the two operands and then expand them until nothing has been
4303 changed. If we run out of room in our array, give up; this should
4304 almost never happen. */
4305
4306 ops[0] = op0, ops[1] = op1, negs[0] = 0, negs[1] = (code == MINUS);
4307
4308 changed = 1;
4309 while (changed)
4310 {
4311 changed = 0;
4312
4313 for (i = 0; i < n_ops; i++)
4314 switch (GET_CODE (ops[i]))
4315 {
4316 case PLUS:
4317 case MINUS:
4318 if (n_ops == 7)
4319 return 0;
4320
4321 ops[n_ops] = XEXP (ops[i], 1);
4322 negs[n_ops++] = GET_CODE (ops[i]) == MINUS ? !negs[i] : negs[i];
4323 ops[i] = XEXP (ops[i], 0);
4324 input_ops++;
4325 changed = 1;
4326 break;
4327
4328 case NEG:
4329 ops[i] = XEXP (ops[i], 0);
4330 negs[i] = ! negs[i];
4331 changed = 1;
4332 break;
4333
4334 case CONST:
4335 ops[i] = XEXP (ops[i], 0);
4336 input_consts++;
4337 changed = 1;
4338 break;
4339
4340 case NOT:
4341 /* ~a -> (-a - 1) */
4342 if (n_ops != 7)
4343 {
4344 ops[n_ops] = constm1_rtx;
4345 negs[n_ops++] = negs[i];
4346 ops[i] = XEXP (ops[i], 0);
4347 negs[i] = ! negs[i];
4348 changed = 1;
4349 }
4350 break;
4351
4352 case CONST_INT:
4353 if (negs[i])
4354 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0, changed = 1;
4355 break;
4356 }
4357 }
4358
4359 /* If we only have two operands, we can't do anything. */
4360 if (n_ops <= 2)
4361 return 0;
4362
4363 /* Now simplify each pair of operands until nothing changes. The first
4364 time through just simplify constants against each other. */
4365
4366 changed = 1;
4367 while (changed)
4368 {
4369 changed = first;
4370
4371 for (i = 0; i < n_ops - 1; i++)
4372 for (j = i + 1; j < n_ops; j++)
4373 if (ops[i] != 0 && ops[j] != 0
4374 && (! first || (CONSTANT_P (ops[i]) && CONSTANT_P (ops[j]))))
4375 {
4376 rtx lhs = ops[i], rhs = ops[j];
4377 enum rtx_code ncode = PLUS;
4378
4379 if (negs[i] && ! negs[j])
4380 lhs = ops[j], rhs = ops[i], ncode = MINUS;
4381 else if (! negs[i] && negs[j])
4382 ncode = MINUS;
4383
4384 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
4385 if (tem)
4386 {
4387 ops[i] = tem, ops[j] = 0;
4388 negs[i] = negs[i] && negs[j];
4389 if (GET_CODE (tem) == NEG)
4390 ops[i] = XEXP (tem, 0), negs[i] = ! negs[i];
4391
4392 if (GET_CODE (ops[i]) == CONST_INT && negs[i])
4393 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0;
4394 changed = 1;
4395 }
4396 }
4397
4398 first = 0;
4399 }
4400
4401 /* Pack all the operands to the lower-numbered entries and give up if
4402 we didn't reduce the number of operands we had. Make sure we
4403 count a CONST as two operands. If we have the same number of
4404 operands, but have made more CONSTs than we had, this is also
4405 an improvement, so accept it. */
4406
4407 for (i = 0, j = 0; j < n_ops; j++)
4408 if (ops[j] != 0)
4409 {
4410 ops[i] = ops[j], negs[i++] = negs[j];
4411 if (GET_CODE (ops[j]) == CONST)
4412 n_consts++;
4413 }
4414
4415 if (i + n_consts > input_ops
4416 || (i + n_consts == input_ops && n_consts <= input_consts))
4417 return 0;
4418
4419 n_ops = i;
4420
4421 /* If we have a CONST_INT, put it last. */
4422 for (i = 0; i < n_ops - 1; i++)
4423 if (GET_CODE (ops[i]) == CONST_INT)
4424 {
4425 tem = ops[n_ops - 1], ops[n_ops - 1] = ops[i] , ops[i] = tem;
4426 j = negs[n_ops - 1], negs[n_ops - 1] = negs[i], negs[i] = j;
4427 }
4428
4429 /* Put a non-negated operand first. If there aren't any, make all
4430 operands positive and negate the whole thing later. */
4431 for (i = 0; i < n_ops && negs[i]; i++)
4432 ;
4433
4434 if (i == n_ops)
4435 {
4436 for (i = 0; i < n_ops; i++)
4437 negs[i] = 0;
4438 negate = 1;
4439 }
4440 else if (i != 0)
4441 {
4442 tem = ops[0], ops[0] = ops[i], ops[i] = tem;
4443 j = negs[0], negs[0] = negs[i], negs[i] = j;
4444 }
4445
4446 /* Now make the result by performing the requested operations. */
4447 result = ops[0];
4448 for (i = 1; i < n_ops; i++)
4449 result = cse_gen_binary (negs[i] ? MINUS : PLUS, mode, result, ops[i]);
4450
4451 return negate ? gen_rtx (NEG, mode, result) : result;
4452 }
4453 \f
4454 /* Make a binary operation by properly ordering the operands and
4455 seeing if the expression folds. */
4456
4457 static rtx
4458 cse_gen_binary (code, mode, op0, op1)
4459 enum rtx_code code;
4460 enum machine_mode mode;
4461 rtx op0, op1;
4462 {
4463 rtx tem;
4464
4465 /* Put complex operands first and constants second if commutative. */
4466 if (GET_RTX_CLASS (code) == 'c'
4467 && ((CONSTANT_P (op0) && GET_CODE (op1) != CONST_INT)
4468 || (GET_RTX_CLASS (GET_CODE (op0)) == 'o'
4469 && GET_RTX_CLASS (GET_CODE (op1)) != 'o')
4470 || (GET_CODE (op0) == SUBREG
4471 && GET_RTX_CLASS (GET_CODE (SUBREG_REG (op0))) == 'o'
4472 && GET_RTX_CLASS (GET_CODE (op1)) != 'o')))
4473 tem = op0, op0 = op1, op1 = tem;
4474
4475 /* If this simplifies, do it. */
4476 tem = simplify_binary_operation (code, mode, op0, op1);
4477
4478 if (tem)
4479 return tem;
4480
4481 /* Handle addition and subtraction of CONST_INT specially. Otherwise,
4482 just form the operation. */
4483
4484 if (code == PLUS && GET_CODE (op1) == CONST_INT
4485 && GET_MODE (op0) != VOIDmode)
4486 return plus_constant (op0, INTVAL (op1));
4487 else if (code == MINUS && GET_CODE (op1) == CONST_INT
4488 && GET_MODE (op0) != VOIDmode)
4489 return plus_constant (op0, - INTVAL (op1));
4490 else
4491 return gen_rtx (code, mode, op0, op1);
4492 }
4493 \f
4494 /* Like simplify_binary_operation except used for relational operators.
4495 MODE is the mode of the operands, not that of the result. If MODE
4496 is VOIDmode, both operands must also be VOIDmode and we compare the
4497 operands in "infinite precision".
4498
4499 If no simplification is possible, this function returns zero. Otherwise,
4500 it returns either const_true_rtx or const0_rtx. */
4501
4502 rtx
4503 simplify_relational_operation (code, mode, op0, op1)
4504 enum rtx_code code;
4505 enum machine_mode mode;
4506 rtx op0, op1;
4507 {
4508 int equal, op0lt, op0ltu, op1lt, op1ltu;
4509 rtx tem;
4510
4511 /* If op0 is a compare, extract the comparison arguments from it. */
4512 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4513 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
4514
4515 /* We can't simplify MODE_CC values since we don't know what the
4516 actual comparison is. */
4517 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC
4518 #ifdef HAVE_cc0
4519 || op0 == cc0_rtx
4520 #endif
4521 )
4522 return 0;
4523
4524 /* For integer comparisons of A and B maybe we can simplify A - B and can
4525 then simplify a comparison of that with zero. If A and B are both either
4526 a register or a CONST_INT, this can't help; testing for these cases will
4527 prevent infinite recursion here and speed things up.
4528
4529 If CODE is an unsigned comparison, then we can never do this optimization,
4530 because it gives an incorrect result if the subtraction wraps around zero.
4531 ANSI C defines unsigned operations such that they never overflow, and
4532 thus such cases can not be ignored. */
4533
4534 if (INTEGRAL_MODE_P (mode) && op1 != const0_rtx
4535 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == CONST_INT)
4536 && (GET_CODE (op1) == REG || GET_CODE (op1) == CONST_INT))
4537 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4538 && code != GTU && code != GEU && code != LTU && code != LEU)
4539 return simplify_relational_operation (signed_condition (code),
4540 mode, tem, const0_rtx);
4541
4542 /* For non-IEEE floating-point, if the two operands are equal, we know the
4543 result. */
4544 if (rtx_equal_p (op0, op1)
4545 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
4546 || ! FLOAT_MODE_P (GET_MODE (op0)) || flag_fast_math))
4547 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
4548
4549 /* If the operands are floating-point constants, see if we can fold
4550 the result. */
4551 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
4552 else if (GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
4553 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
4554 {
4555 REAL_VALUE_TYPE d0, d1;
4556 jmp_buf handler;
4557
4558 if (setjmp (handler))
4559 return 0;
4560
4561 set_float_handler (handler);
4562 REAL_VALUE_FROM_CONST_DOUBLE (d0, op0);
4563 REAL_VALUE_FROM_CONST_DOUBLE (d1, op1);
4564 equal = REAL_VALUES_EQUAL (d0, d1);
4565 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
4566 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
4567 set_float_handler (NULL_PTR);
4568 }
4569 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
4570
4571 /* Otherwise, see if the operands are both integers. */
4572 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4573 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
4574 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
4575 {
4576 int width = GET_MODE_BITSIZE (mode);
4577 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4578 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4579
4580 /* Get the two words comprising each integer constant. */
4581 if (GET_CODE (op0) == CONST_DOUBLE)
4582 {
4583 l0u = l0s = CONST_DOUBLE_LOW (op0);
4584 h0u = h0s = CONST_DOUBLE_HIGH (op0);
4585 }
4586 else
4587 {
4588 l0u = l0s = INTVAL (op0);
4589 h0u = 0, h0s = l0s < 0 ? -1 : 0;
4590 }
4591
4592 if (GET_CODE (op1) == CONST_DOUBLE)
4593 {
4594 l1u = l1s = CONST_DOUBLE_LOW (op1);
4595 h1u = h1s = CONST_DOUBLE_HIGH (op1);
4596 }
4597 else
4598 {
4599 l1u = l1s = INTVAL (op1);
4600 h1u = 0, h1s = l1s < 0 ? -1 : 0;
4601 }
4602
4603 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4604 we have to sign or zero-extend the values. */
4605 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4606 h0u = h1u = 0, h0s = l0s < 0 ? -1 : 0, h1s = l1s < 0 ? -1 : 0;
4607
4608 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4609 {
4610 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
4611 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
4612
4613 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
4614 l0s |= ((HOST_WIDE_INT) (-1) << width);
4615
4616 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
4617 l1s |= ((HOST_WIDE_INT) (-1) << width);
4618 }
4619
4620 equal = (h0u == h1u && l0u == l1u);
4621 op0lt = (h0s < h1s || (h0s == h1s && l0s < l1s));
4622 op1lt = (h1s < h0s || (h1s == h0s && l1s < l0s));
4623 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
4624 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
4625 }
4626
4627 /* Otherwise, there are some code-specific tests we can make. */
4628 else
4629 {
4630 switch (code)
4631 {
4632 case EQ:
4633 /* References to the frame plus a constant or labels cannot
4634 be zero, but a SYMBOL_REF can due to #pragma weak. */
4635 if (((NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx)
4636 || GET_CODE (op0) == LABEL_REF)
4637 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4638 /* On some machines, the ap reg can be 0 sometimes. */
4639 && op0 != arg_pointer_rtx
4640 #endif
4641 )
4642 return const0_rtx;
4643 break;
4644
4645 case NE:
4646 if (((NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx)
4647 || GET_CODE (op0) == LABEL_REF)
4648 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4649 && op0 != arg_pointer_rtx
4650 #endif
4651 )
4652 return const_true_rtx;
4653 break;
4654
4655 case GEU:
4656 /* Unsigned values are never negative. */
4657 if (op1 == const0_rtx)
4658 return const_true_rtx;
4659 break;
4660
4661 case LTU:
4662 if (op1 == const0_rtx)
4663 return const0_rtx;
4664 break;
4665
4666 case LEU:
4667 /* Unsigned values are never greater than the largest
4668 unsigned value. */
4669 if (GET_CODE (op1) == CONST_INT
4670 && INTVAL (op1) == GET_MODE_MASK (mode)
4671 && INTEGRAL_MODE_P (mode))
4672 return const_true_rtx;
4673 break;
4674
4675 case GTU:
4676 if (GET_CODE (op1) == CONST_INT
4677 && INTVAL (op1) == GET_MODE_MASK (mode)
4678 && INTEGRAL_MODE_P (mode))
4679 return const0_rtx;
4680 break;
4681 }
4682
4683 return 0;
4684 }
4685
4686 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
4687 as appropriate. */
4688 switch (code)
4689 {
4690 case EQ:
4691 return equal ? const_true_rtx : const0_rtx;
4692 case NE:
4693 return ! equal ? const_true_rtx : const0_rtx;
4694 case LT:
4695 return op0lt ? const_true_rtx : const0_rtx;
4696 case GT:
4697 return op1lt ? const_true_rtx : const0_rtx;
4698 case LTU:
4699 return op0ltu ? const_true_rtx : const0_rtx;
4700 case GTU:
4701 return op1ltu ? const_true_rtx : const0_rtx;
4702 case LE:
4703 return equal || op0lt ? const_true_rtx : const0_rtx;
4704 case GE:
4705 return equal || op1lt ? const_true_rtx : const0_rtx;
4706 case LEU:
4707 return equal || op0ltu ? const_true_rtx : const0_rtx;
4708 case GEU:
4709 return equal || op1ltu ? const_true_rtx : const0_rtx;
4710 }
4711
4712 abort ();
4713 }
4714 \f
4715 /* Simplify CODE, an operation with result mode MODE and three operands,
4716 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4717 a constant. Return 0 if no simplifications is possible. */
4718
4719 rtx
4720 simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
4721 enum rtx_code code;
4722 enum machine_mode mode, op0_mode;
4723 rtx op0, op1, op2;
4724 {
4725 int width = GET_MODE_BITSIZE (mode);
4726
4727 /* VOIDmode means "infinite" precision. */
4728 if (width == 0)
4729 width = HOST_BITS_PER_WIDE_INT;
4730
4731 switch (code)
4732 {
4733 case SIGN_EXTRACT:
4734 case ZERO_EXTRACT:
4735 if (GET_CODE (op0) == CONST_INT
4736 && GET_CODE (op1) == CONST_INT
4737 && GET_CODE (op2) == CONST_INT
4738 && INTVAL (op1) + INTVAL (op2) <= GET_MODE_BITSIZE (op0_mode)
4739 && width <= HOST_BITS_PER_WIDE_INT)
4740 {
4741 /* Extracting a bit-field from a constant */
4742 HOST_WIDE_INT val = INTVAL (op0);
4743
4744 if (BITS_BIG_ENDIAN)
4745 val >>= (GET_MODE_BITSIZE (op0_mode)
4746 - INTVAL (op2) - INTVAL (op1));
4747 else
4748 val >>= INTVAL (op2);
4749
4750 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4751 {
4752 /* First zero-extend. */
4753 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4754 /* If desired, propagate sign bit. */
4755 if (code == SIGN_EXTRACT
4756 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4757 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4758 }
4759
4760 /* Clear the bits that don't belong in our mode,
4761 unless they and our sign bit are all one.
4762 So we get either a reasonable negative value or a reasonable
4763 unsigned value for this mode. */
4764 if (width < HOST_BITS_PER_WIDE_INT
4765 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4766 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4767 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4768
4769 return GEN_INT (val);
4770 }
4771 break;
4772
4773 case IF_THEN_ELSE:
4774 if (GET_CODE (op0) == CONST_INT)
4775 return op0 != const0_rtx ? op1 : op2;
4776 break;
4777
4778 default:
4779 abort ();
4780 }
4781
4782 return 0;
4783 }
4784 \f
4785 /* If X is a nontrivial arithmetic operation on an argument
4786 for which a constant value can be determined, return
4787 the result of operating on that value, as a constant.
4788 Otherwise, return X, possibly with one or more operands
4789 modified by recursive calls to this function.
4790
4791 If X is a register whose contents are known, we do NOT
4792 return those contents here. equiv_constant is called to
4793 perform that task.
4794
4795 INSN is the insn that we may be modifying. If it is 0, make a copy
4796 of X before modifying it. */
4797
4798 static rtx
4799 fold_rtx (x, insn)
4800 rtx x;
4801 rtx insn;
4802 {
4803 register enum rtx_code code;
4804 register enum machine_mode mode;
4805 register char *fmt;
4806 register int i;
4807 rtx new = 0;
4808 int copied = 0;
4809 int must_swap = 0;
4810
4811 /* Folded equivalents of first two operands of X. */
4812 rtx folded_arg0;
4813 rtx folded_arg1;
4814
4815 /* Constant equivalents of first three operands of X;
4816 0 when no such equivalent is known. */
4817 rtx const_arg0;
4818 rtx const_arg1;
4819 rtx const_arg2;
4820
4821 /* The mode of the first operand of X. We need this for sign and zero
4822 extends. */
4823 enum machine_mode mode_arg0;
4824
4825 if (x == 0)
4826 return x;
4827
4828 mode = GET_MODE (x);
4829 code = GET_CODE (x);
4830 switch (code)
4831 {
4832 case CONST:
4833 case CONST_INT:
4834 case CONST_DOUBLE:
4835 case SYMBOL_REF:
4836 case LABEL_REF:
4837 case REG:
4838 /* No use simplifying an EXPR_LIST
4839 since they are used only for lists of args
4840 in a function call's REG_EQUAL note. */
4841 case EXPR_LIST:
4842 return x;
4843
4844 #ifdef HAVE_cc0
4845 case CC0:
4846 return prev_insn_cc0;
4847 #endif
4848
4849 case PC:
4850 /* If the next insn is a CODE_LABEL followed by a jump table,
4851 PC's value is a LABEL_REF pointing to that label. That
4852 lets us fold switch statements on the Vax. */
4853 if (insn && GET_CODE (insn) == JUMP_INSN)
4854 {
4855 rtx next = next_nonnote_insn (insn);
4856
4857 if (next && GET_CODE (next) == CODE_LABEL
4858 && NEXT_INSN (next) != 0
4859 && GET_CODE (NEXT_INSN (next)) == JUMP_INSN
4860 && (GET_CODE (PATTERN (NEXT_INSN (next))) == ADDR_VEC
4861 || GET_CODE (PATTERN (NEXT_INSN (next))) == ADDR_DIFF_VEC))
4862 return gen_rtx (LABEL_REF, Pmode, next);
4863 }
4864 break;
4865
4866 case SUBREG:
4867 /* See if we previously assigned a constant value to this SUBREG. */
4868 if ((new = lookup_as_function (x, CONST_INT)) != 0
4869 || (new = lookup_as_function (x, CONST_DOUBLE)) != 0)
4870 return new;
4871
4872 /* If this is a paradoxical SUBREG, we have no idea what value the
4873 extra bits would have. However, if the operand is equivalent
4874 to a SUBREG whose operand is the same as our mode, and all the
4875 modes are within a word, we can just use the inner operand
4876 because these SUBREGs just say how to treat the register.
4877
4878 Similarly if we find an integer constant. */
4879
4880 if (GET_MODE_SIZE (mode) > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
4881 {
4882 enum machine_mode imode = GET_MODE (SUBREG_REG (x));
4883 struct table_elt *elt;
4884
4885 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
4886 && GET_MODE_SIZE (imode) <= UNITS_PER_WORD
4887 && (elt = lookup (SUBREG_REG (x), HASH (SUBREG_REG (x), imode),
4888 imode)) != 0)
4889 for (elt = elt->first_same_value;
4890 elt; elt = elt->next_same_value)
4891 {
4892 if (CONSTANT_P (elt->exp)
4893 && GET_MODE (elt->exp) == VOIDmode)
4894 return elt->exp;
4895
4896 if (GET_CODE (elt->exp) == SUBREG
4897 && GET_MODE (SUBREG_REG (elt->exp)) == mode
4898 && exp_equiv_p (elt->exp, elt->exp, 1, 0))
4899 return copy_rtx (SUBREG_REG (elt->exp));
4900 }
4901
4902 return x;
4903 }
4904
4905 /* Fold SUBREG_REG. If it changed, see if we can simplify the SUBREG.
4906 We might be able to if the SUBREG is extracting a single word in an
4907 integral mode or extracting the low part. */
4908
4909 folded_arg0 = fold_rtx (SUBREG_REG (x), insn);
4910 const_arg0 = equiv_constant (folded_arg0);
4911 if (const_arg0)
4912 folded_arg0 = const_arg0;
4913
4914 if (folded_arg0 != SUBREG_REG (x))
4915 {
4916 new = 0;
4917
4918 if (GET_MODE_CLASS (mode) == MODE_INT
4919 && GET_MODE_SIZE (mode) == UNITS_PER_WORD
4920 && GET_MODE (SUBREG_REG (x)) != VOIDmode)
4921 new = operand_subword (folded_arg0, SUBREG_WORD (x), 0,
4922 GET_MODE (SUBREG_REG (x)));
4923 if (new == 0 && subreg_lowpart_p (x))
4924 new = gen_lowpart_if_possible (mode, folded_arg0);
4925 if (new)
4926 return new;
4927 }
4928
4929 /* If this is a narrowing SUBREG and our operand is a REG, see if
4930 we can find an equivalence for REG that is an arithmetic operation
4931 in a wider mode where both operands are paradoxical SUBREGs
4932 from objects of our result mode. In that case, we couldn't report
4933 an equivalent value for that operation, since we don't know what the
4934 extra bits will be. But we can find an equivalence for this SUBREG
4935 by folding that operation is the narrow mode. This allows us to
4936 fold arithmetic in narrow modes when the machine only supports
4937 word-sized arithmetic.
4938
4939 Also look for a case where we have a SUBREG whose operand is the
4940 same as our result. If both modes are smaller than a word, we
4941 are simply interpreting a register in different modes and we
4942 can use the inner value. */
4943
4944 if (GET_CODE (folded_arg0) == REG
4945 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (folded_arg0))
4946 && subreg_lowpart_p (x))
4947 {
4948 struct table_elt *elt;
4949
4950 /* We can use HASH here since we know that canon_hash won't be
4951 called. */
4952 elt = lookup (folded_arg0,
4953 HASH (folded_arg0, GET_MODE (folded_arg0)),
4954 GET_MODE (folded_arg0));
4955
4956 if (elt)
4957 elt = elt->first_same_value;
4958
4959 for (; elt; elt = elt->next_same_value)
4960 {
4961 enum rtx_code eltcode = GET_CODE (elt->exp);
4962
4963 /* Just check for unary and binary operations. */
4964 if (GET_RTX_CLASS (GET_CODE (elt->exp)) == '1'
4965 && GET_CODE (elt->exp) != SIGN_EXTEND
4966 && GET_CODE (elt->exp) != ZERO_EXTEND
4967 && GET_CODE (XEXP (elt->exp, 0)) == SUBREG
4968 && GET_MODE (SUBREG_REG (XEXP (elt->exp, 0))) == mode)
4969 {
4970 rtx op0 = SUBREG_REG (XEXP (elt->exp, 0));
4971
4972 if (GET_CODE (op0) != REG && ! CONSTANT_P (op0))
4973 op0 = fold_rtx (op0, NULL_RTX);
4974
4975 op0 = equiv_constant (op0);
4976 if (op0)
4977 new = simplify_unary_operation (GET_CODE (elt->exp), mode,
4978 op0, mode);
4979 }
4980 else if ((GET_RTX_CLASS (GET_CODE (elt->exp)) == '2'
4981 || GET_RTX_CLASS (GET_CODE (elt->exp)) == 'c')
4982 && eltcode != DIV && eltcode != MOD
4983 && eltcode != UDIV && eltcode != UMOD
4984 && eltcode != ASHIFTRT && eltcode != LSHIFTRT
4985 && eltcode != ROTATE && eltcode != ROTATERT
4986 && ((GET_CODE (XEXP (elt->exp, 0)) == SUBREG
4987 && (GET_MODE (SUBREG_REG (XEXP (elt->exp, 0)))
4988 == mode))
4989 || CONSTANT_P (XEXP (elt->exp, 0)))
4990 && ((GET_CODE (XEXP (elt->exp, 1)) == SUBREG
4991 && (GET_MODE (SUBREG_REG (XEXP (elt->exp, 1)))
4992 == mode))
4993 || CONSTANT_P (XEXP (elt->exp, 1))))
4994 {
4995 rtx op0 = gen_lowpart_common (mode, XEXP (elt->exp, 0));
4996 rtx op1 = gen_lowpart_common (mode, XEXP (elt->exp, 1));
4997
4998 if (op0 && GET_CODE (op0) != REG && ! CONSTANT_P (op0))
4999 op0 = fold_rtx (op0, NULL_RTX);
5000
5001 if (op0)
5002 op0 = equiv_constant (op0);
5003
5004 if (op1 && GET_CODE (op1) != REG && ! CONSTANT_P (op1))
5005 op1 = fold_rtx (op1, NULL_RTX);
5006
5007 if (op1)
5008 op1 = equiv_constant (op1);
5009
5010 /* If we are looking for the low SImode part of
5011 (ashift:DI c (const_int 32)), it doesn't work
5012 to compute that in SImode, because a 32-bit shift
5013 in SImode is unpredictable. We know the value is 0. */
5014 if (op0 && op1
5015 && GET_CODE (elt->exp) == ASHIFT
5016 && GET_CODE (op1) == CONST_INT
5017 && INTVAL (op1) >= GET_MODE_BITSIZE (mode))
5018 {
5019 if (INTVAL (op1) < GET_MODE_BITSIZE (GET_MODE (elt->exp)))
5020
5021 /* If the count fits in the inner mode's width,
5022 but exceeds the outer mode's width,
5023 the value will get truncated to 0
5024 by the subreg. */
5025 new = const0_rtx;
5026 else
5027 /* If the count exceeds even the inner mode's width,
5028 don't fold this expression. */
5029 new = 0;
5030 }
5031 else if (op0 && op1)
5032 new = simplify_binary_operation (GET_CODE (elt->exp), mode,
5033 op0, op1);
5034 }
5035
5036 else if (GET_CODE (elt->exp) == SUBREG
5037 && GET_MODE (SUBREG_REG (elt->exp)) == mode
5038 && (GET_MODE_SIZE (GET_MODE (folded_arg0))
5039 <= UNITS_PER_WORD)
5040 && exp_equiv_p (elt->exp, elt->exp, 1, 0))
5041 new = copy_rtx (SUBREG_REG (elt->exp));
5042
5043 if (new)
5044 return new;
5045 }
5046 }
5047
5048 return x;
5049
5050 case NOT:
5051 case NEG:
5052 /* If we have (NOT Y), see if Y is known to be (NOT Z).
5053 If so, (NOT Y) simplifies to Z. Similarly for NEG. */
5054 new = lookup_as_function (XEXP (x, 0), code);
5055 if (new)
5056 return fold_rtx (copy_rtx (XEXP (new, 0)), insn);
5057 break;
5058
5059 case MEM:
5060 /* If we are not actually processing an insn, don't try to find the
5061 best address. Not only don't we care, but we could modify the
5062 MEM in an invalid way since we have no insn to validate against. */
5063 if (insn != 0)
5064 find_best_addr (insn, &XEXP (x, 0));
5065
5066 {
5067 /* Even if we don't fold in the insn itself,
5068 we can safely do so here, in hopes of getting a constant. */
5069 rtx addr = fold_rtx (XEXP (x, 0), NULL_RTX);
5070 rtx base = 0;
5071 HOST_WIDE_INT offset = 0;
5072
5073 if (GET_CODE (addr) == REG
5074 && REGNO_QTY_VALID_P (REGNO (addr))
5075 && GET_MODE (addr) == qty_mode[reg_qty[REGNO (addr)]]
5076 && qty_const[reg_qty[REGNO (addr)]] != 0)
5077 addr = qty_const[reg_qty[REGNO (addr)]];
5078
5079 /* If address is constant, split it into a base and integer offset. */
5080 if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == LABEL_REF)
5081 base = addr;
5082 else if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
5083 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
5084 {
5085 base = XEXP (XEXP (addr, 0), 0);
5086 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
5087 }
5088 else if (GET_CODE (addr) == LO_SUM
5089 && GET_CODE (XEXP (addr, 1)) == SYMBOL_REF)
5090 base = XEXP (addr, 1);
5091
5092 /* If this is a constant pool reference, we can fold it into its
5093 constant to allow better value tracking. */
5094 if (base && GET_CODE (base) == SYMBOL_REF
5095 && CONSTANT_POOL_ADDRESS_P (base))
5096 {
5097 rtx constant = get_pool_constant (base);
5098 enum machine_mode const_mode = get_pool_mode (base);
5099 rtx new;
5100
5101 if (CONSTANT_P (constant) && GET_CODE (constant) != CONST_INT)
5102 constant_pool_entries_cost = COST (constant);
5103
5104 /* If we are loading the full constant, we have an equivalence. */
5105 if (offset == 0 && mode == const_mode)
5106 return constant;
5107
5108 /* If this actually isn't a constant (weird!), we can't do
5109 anything. Otherwise, handle the two most common cases:
5110 extracting a word from a multi-word constant, and extracting
5111 the low-order bits. Other cases don't seem common enough to
5112 worry about. */
5113 if (! CONSTANT_P (constant))
5114 return x;
5115
5116 if (GET_MODE_CLASS (mode) == MODE_INT
5117 && GET_MODE_SIZE (mode) == UNITS_PER_WORD
5118 && offset % UNITS_PER_WORD == 0
5119 && (new = operand_subword (constant,
5120 offset / UNITS_PER_WORD,
5121 0, const_mode)) != 0)
5122 return new;
5123
5124 if (((BYTES_BIG_ENDIAN
5125 && offset == GET_MODE_SIZE (GET_MODE (constant)) - 1)
5126 || (! BYTES_BIG_ENDIAN && offset == 0))
5127 && (new = gen_lowpart_if_possible (mode, constant)) != 0)
5128 return new;
5129 }
5130
5131 /* If this is a reference to a label at a known position in a jump
5132 table, we also know its value. */
5133 if (base && GET_CODE (base) == LABEL_REF)
5134 {
5135 rtx label = XEXP (base, 0);
5136 rtx table_insn = NEXT_INSN (label);
5137
5138 if (table_insn && GET_CODE (table_insn) == JUMP_INSN
5139 && GET_CODE (PATTERN (table_insn)) == ADDR_VEC)
5140 {
5141 rtx table = PATTERN (table_insn);
5142
5143 if (offset >= 0
5144 && (offset / GET_MODE_SIZE (GET_MODE (table))
5145 < XVECLEN (table, 0)))
5146 return XVECEXP (table, 0,
5147 offset / GET_MODE_SIZE (GET_MODE (table)));
5148 }
5149 if (table_insn && GET_CODE (table_insn) == JUMP_INSN
5150 && GET_CODE (PATTERN (table_insn)) == ADDR_DIFF_VEC)
5151 {
5152 rtx table = PATTERN (table_insn);
5153
5154 if (offset >= 0
5155 && (offset / GET_MODE_SIZE (GET_MODE (table))
5156 < XVECLEN (table, 1)))
5157 {
5158 offset /= GET_MODE_SIZE (GET_MODE (table));
5159 new = gen_rtx (MINUS, Pmode, XVECEXP (table, 1, offset),
5160 XEXP (table, 0));
5161
5162 if (GET_MODE (table) != Pmode)
5163 new = gen_rtx (TRUNCATE, GET_MODE (table), new);
5164
5165 /* Indicate this is a constant. This isn't a
5166 valid form of CONST, but it will only be used
5167 to fold the next insns and then discarded, so
5168 it should be safe. */
5169 return gen_rtx (CONST, GET_MODE (new), new);
5170 }
5171 }
5172 }
5173
5174 return x;
5175 }
5176
5177 case ASM_OPERANDS:
5178 for (i = XVECLEN (x, 3) - 1; i >= 0; i--)
5179 validate_change (insn, &XVECEXP (x, 3, i),
5180 fold_rtx (XVECEXP (x, 3, i), insn), 0);
5181 break;
5182 }
5183
5184 const_arg0 = 0;
5185 const_arg1 = 0;
5186 const_arg2 = 0;
5187 mode_arg0 = VOIDmode;
5188
5189 /* Try folding our operands.
5190 Then see which ones have constant values known. */
5191
5192 fmt = GET_RTX_FORMAT (code);
5193 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5194 if (fmt[i] == 'e')
5195 {
5196 rtx arg = XEXP (x, i);
5197 rtx folded_arg = arg, const_arg = 0;
5198 enum machine_mode mode_arg = GET_MODE (arg);
5199 rtx cheap_arg, expensive_arg;
5200 rtx replacements[2];
5201 int j;
5202
5203 /* Most arguments are cheap, so handle them specially. */
5204 switch (GET_CODE (arg))
5205 {
5206 case REG:
5207 /* This is the same as calling equiv_constant; it is duplicated
5208 here for speed. */
5209 if (REGNO_QTY_VALID_P (REGNO (arg))
5210 && qty_const[reg_qty[REGNO (arg)]] != 0
5211 && GET_CODE (qty_const[reg_qty[REGNO (arg)]]) != REG
5212 && GET_CODE (qty_const[reg_qty[REGNO (arg)]]) != PLUS)
5213 const_arg
5214 = gen_lowpart_if_possible (GET_MODE (arg),
5215 qty_const[reg_qty[REGNO (arg)]]);
5216 break;
5217
5218 case CONST:
5219 case CONST_INT:
5220 case SYMBOL_REF:
5221 case LABEL_REF:
5222 case CONST_DOUBLE:
5223 const_arg = arg;
5224 break;
5225
5226 #ifdef HAVE_cc0
5227 case CC0:
5228 folded_arg = prev_insn_cc0;
5229 mode_arg = prev_insn_cc0_mode;
5230 const_arg = equiv_constant (folded_arg);
5231 break;
5232 #endif
5233
5234 default:
5235 folded_arg = fold_rtx (arg, insn);
5236 const_arg = equiv_constant (folded_arg);
5237 }
5238
5239 /* For the first three operands, see if the operand
5240 is constant or equivalent to a constant. */
5241 switch (i)
5242 {
5243 case 0:
5244 folded_arg0 = folded_arg;
5245 const_arg0 = const_arg;
5246 mode_arg0 = mode_arg;
5247 break;
5248 case 1:
5249 folded_arg1 = folded_arg;
5250 const_arg1 = const_arg;
5251 break;
5252 case 2:
5253 const_arg2 = const_arg;
5254 break;
5255 }
5256
5257 /* Pick the least expensive of the folded argument and an
5258 equivalent constant argument. */
5259 if (const_arg == 0 || const_arg == folded_arg
5260 || COST (const_arg) > COST (folded_arg))
5261 cheap_arg = folded_arg, expensive_arg = const_arg;
5262 else
5263 cheap_arg = const_arg, expensive_arg = folded_arg;
5264
5265 /* Try to replace the operand with the cheapest of the two
5266 possibilities. If it doesn't work and this is either of the first
5267 two operands of a commutative operation, try swapping them.
5268 If THAT fails, try the more expensive, provided it is cheaper
5269 than what is already there. */
5270
5271 if (cheap_arg == XEXP (x, i))
5272 continue;
5273
5274 if (insn == 0 && ! copied)
5275 {
5276 x = copy_rtx (x);
5277 copied = 1;
5278 }
5279
5280 replacements[0] = cheap_arg, replacements[1] = expensive_arg;
5281 for (j = 0;
5282 j < 2 && replacements[j]
5283 && COST (replacements[j]) < COST (XEXP (x, i));
5284 j++)
5285 {
5286 if (validate_change (insn, &XEXP (x, i), replacements[j], 0))
5287 break;
5288
5289 if (code == NE || code == EQ || GET_RTX_CLASS (code) == 'c')
5290 {
5291 validate_change (insn, &XEXP (x, i), XEXP (x, 1 - i), 1);
5292 validate_change (insn, &XEXP (x, 1 - i), replacements[j], 1);
5293
5294 if (apply_change_group ())
5295 {
5296 /* Swap them back to be invalid so that this loop can
5297 continue and flag them to be swapped back later. */
5298 rtx tem;
5299
5300 tem = XEXP (x, 0); XEXP (x, 0) = XEXP (x, 1);
5301 XEXP (x, 1) = tem;
5302 must_swap = 1;
5303 break;
5304 }
5305 }
5306 }
5307 }
5308
5309 else if (fmt[i] == 'E')
5310 /* Don't try to fold inside of a vector of expressions.
5311 Doing nothing is harmless. */
5312 ;
5313
5314 /* If a commutative operation, place a constant integer as the second
5315 operand unless the first operand is also a constant integer. Otherwise,
5316 place any constant second unless the first operand is also a constant. */
5317
5318 if (code == EQ || code == NE || GET_RTX_CLASS (code) == 'c')
5319 {
5320 if (must_swap || (const_arg0
5321 && (const_arg1 == 0
5322 || (GET_CODE (const_arg0) == CONST_INT
5323 && GET_CODE (const_arg1) != CONST_INT))))
5324 {
5325 register rtx tem = XEXP (x, 0);
5326
5327 if (insn == 0 && ! copied)
5328 {
5329 x = copy_rtx (x);
5330 copied = 1;
5331 }
5332
5333 validate_change (insn, &XEXP (x, 0), XEXP (x, 1), 1);
5334 validate_change (insn, &XEXP (x, 1), tem, 1);
5335 if (apply_change_group ())
5336 {
5337 tem = const_arg0, const_arg0 = const_arg1, const_arg1 = tem;
5338 tem = folded_arg0, folded_arg0 = folded_arg1, folded_arg1 = tem;
5339 }
5340 }
5341 }
5342
5343 /* If X is an arithmetic operation, see if we can simplify it. */
5344
5345 switch (GET_RTX_CLASS (code))
5346 {
5347 case '1':
5348 {
5349 int is_const = 0;
5350
5351 /* We can't simplify extension ops unless we know the
5352 original mode. */
5353 if ((code == ZERO_EXTEND || code == SIGN_EXTEND)
5354 && mode_arg0 == VOIDmode)
5355 break;
5356
5357 /* If we had a CONST, strip it off and put it back later if we
5358 fold. */
5359 if (const_arg0 != 0 && GET_CODE (const_arg0) == CONST)
5360 is_const = 1, const_arg0 = XEXP (const_arg0, 0);
5361
5362 new = simplify_unary_operation (code, mode,
5363 const_arg0 ? const_arg0 : folded_arg0,
5364 mode_arg0);
5365 if (new != 0 && is_const)
5366 new = gen_rtx (CONST, mode, new);
5367 }
5368 break;
5369
5370 case '<':
5371 /* See what items are actually being compared and set FOLDED_ARG[01]
5372 to those values and CODE to the actual comparison code. If any are
5373 constant, set CONST_ARG0 and CONST_ARG1 appropriately. We needn't
5374 do anything if both operands are already known to be constant. */
5375
5376 if (const_arg0 == 0 || const_arg1 == 0)
5377 {
5378 struct table_elt *p0, *p1;
5379 rtx true = const_true_rtx, false = const0_rtx;
5380 enum machine_mode mode_arg1;
5381
5382 #ifdef FLOAT_STORE_FLAG_VALUE
5383 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5384 {
5385 true = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE,
5386 mode);
5387 false = CONST0_RTX (mode);
5388 }
5389 #endif
5390
5391 code = find_comparison_args (code, &folded_arg0, &folded_arg1,
5392 &mode_arg0, &mode_arg1);
5393 const_arg0 = equiv_constant (folded_arg0);
5394 const_arg1 = equiv_constant (folded_arg1);
5395
5396 /* If the mode is VOIDmode or a MODE_CC mode, we don't know
5397 what kinds of things are being compared, so we can't do
5398 anything with this comparison. */
5399
5400 if (mode_arg0 == VOIDmode || GET_MODE_CLASS (mode_arg0) == MODE_CC)
5401 break;
5402
5403 /* If we do not now have two constants being compared, see
5404 if we can nevertheless deduce some things about the
5405 comparison. */
5406 if (const_arg0 == 0 || const_arg1 == 0)
5407 {
5408 /* Is FOLDED_ARG0 frame-pointer plus a constant? Or
5409 non-explicit constant? These aren't zero, but we
5410 don't know their sign. */
5411 if (const_arg1 == const0_rtx
5412 && (NONZERO_BASE_PLUS_P (folded_arg0)
5413 #if 0 /* Sad to say, on sysvr4, #pragma weak can make a symbol address
5414 come out as 0. */
5415 || GET_CODE (folded_arg0) == SYMBOL_REF
5416 #endif
5417 || GET_CODE (folded_arg0) == LABEL_REF
5418 || GET_CODE (folded_arg0) == CONST))
5419 {
5420 if (code == EQ)
5421 return false;
5422 else if (code == NE)
5423 return true;
5424 }
5425
5426 /* See if the two operands are the same. We don't do this
5427 for IEEE floating-point since we can't assume x == x
5428 since x might be a NaN. */
5429
5430 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
5431 || ! FLOAT_MODE_P (mode_arg0) || flag_fast_math)
5432 && (folded_arg0 == folded_arg1
5433 || (GET_CODE (folded_arg0) == REG
5434 && GET_CODE (folded_arg1) == REG
5435 && (reg_qty[REGNO (folded_arg0)]
5436 == reg_qty[REGNO (folded_arg1)]))
5437 || ((p0 = lookup (folded_arg0,
5438 (safe_hash (folded_arg0, mode_arg0)
5439 % NBUCKETS), mode_arg0))
5440 && (p1 = lookup (folded_arg1,
5441 (safe_hash (folded_arg1, mode_arg0)
5442 % NBUCKETS), mode_arg0))
5443 && p0->first_same_value == p1->first_same_value)))
5444 return ((code == EQ || code == LE || code == GE
5445 || code == LEU || code == GEU)
5446 ? true : false);
5447
5448 /* If FOLDED_ARG0 is a register, see if the comparison we are
5449 doing now is either the same as we did before or the reverse
5450 (we only check the reverse if not floating-point). */
5451 else if (GET_CODE (folded_arg0) == REG)
5452 {
5453 int qty = reg_qty[REGNO (folded_arg0)];
5454
5455 if (REGNO_QTY_VALID_P (REGNO (folded_arg0))
5456 && (comparison_dominates_p (qty_comparison_code[qty], code)
5457 || (comparison_dominates_p (qty_comparison_code[qty],
5458 reverse_condition (code))
5459 && ! FLOAT_MODE_P (mode_arg0)))
5460 && (rtx_equal_p (qty_comparison_const[qty], folded_arg1)
5461 || (const_arg1
5462 && rtx_equal_p (qty_comparison_const[qty],
5463 const_arg1))
5464 || (GET_CODE (folded_arg1) == REG
5465 && (reg_qty[REGNO (folded_arg1)]
5466 == qty_comparison_qty[qty]))))
5467 return (comparison_dominates_p (qty_comparison_code[qty],
5468 code)
5469 ? true : false);
5470 }
5471 }
5472 }
5473
5474 /* If we are comparing against zero, see if the first operand is
5475 equivalent to an IOR with a constant. If so, we may be able to
5476 determine the result of this comparison. */
5477
5478 if (const_arg1 == const0_rtx)
5479 {
5480 rtx y = lookup_as_function (folded_arg0, IOR);
5481 rtx inner_const;
5482
5483 if (y != 0
5484 && (inner_const = equiv_constant (XEXP (y, 1))) != 0
5485 && GET_CODE (inner_const) == CONST_INT
5486 && INTVAL (inner_const) != 0)
5487 {
5488 int sign_bitnum = GET_MODE_BITSIZE (mode_arg0) - 1;
5489 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
5490 && (INTVAL (inner_const)
5491 & ((HOST_WIDE_INT) 1 << sign_bitnum)));
5492 rtx true = const_true_rtx, false = const0_rtx;
5493
5494 #ifdef FLOAT_STORE_FLAG_VALUE
5495 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5496 {
5497 true = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE,
5498 mode);
5499 false = CONST0_RTX (mode);
5500 }
5501 #endif
5502
5503 switch (code)
5504 {
5505 case EQ:
5506 return false;
5507 case NE:
5508 return true;
5509 case LT: case LE:
5510 if (has_sign)
5511 return true;
5512 break;
5513 case GT: case GE:
5514 if (has_sign)
5515 return false;
5516 break;
5517 }
5518 }
5519 }
5520
5521 new = simplify_relational_operation (code, mode_arg0,
5522 const_arg0 ? const_arg0 : folded_arg0,
5523 const_arg1 ? const_arg1 : folded_arg1);
5524 #ifdef FLOAT_STORE_FLAG_VALUE
5525 if (new != 0 && GET_MODE_CLASS (mode) == MODE_FLOAT)
5526 new = ((new == const0_rtx) ? CONST0_RTX (mode)
5527 : CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE, mode));
5528 #endif
5529 break;
5530
5531 case '2':
5532 case 'c':
5533 switch (code)
5534 {
5535 case PLUS:
5536 /* If the second operand is a LABEL_REF, see if the first is a MINUS
5537 with that LABEL_REF as its second operand. If so, the result is
5538 the first operand of that MINUS. This handles switches with an
5539 ADDR_DIFF_VEC table. */
5540 if (const_arg1 && GET_CODE (const_arg1) == LABEL_REF)
5541 {
5542 rtx y
5543 = GET_CODE (folded_arg0) == MINUS ? folded_arg0
5544 : lookup_as_function (folded_arg0, MINUS);
5545
5546 if (y != 0 && GET_CODE (XEXP (y, 1)) == LABEL_REF
5547 && XEXP (XEXP (y, 1), 0) == XEXP (const_arg1, 0))
5548 return XEXP (y, 0);
5549
5550 /* Now try for a CONST of a MINUS like the above. */
5551 if ((y = (GET_CODE (folded_arg0) == CONST ? folded_arg0
5552 : lookup_as_function (folded_arg0, CONST))) != 0
5553 && GET_CODE (XEXP (y, 0)) == MINUS
5554 && GET_CODE (XEXP (XEXP (y, 0), 1)) == LABEL_REF
5555 && XEXP (XEXP (XEXP (y, 0),1), 0) == XEXP (const_arg1, 0))
5556 return XEXP (XEXP (y, 0), 0);
5557 }
5558
5559 /* Likewise if the operands are in the other order. */
5560 if (const_arg0 && GET_CODE (const_arg0) == LABEL_REF)
5561 {
5562 rtx y
5563 = GET_CODE (folded_arg1) == MINUS ? folded_arg1
5564 : lookup_as_function (folded_arg1, MINUS);
5565
5566 if (y != 0 && GET_CODE (XEXP (y, 1)) == LABEL_REF
5567 && XEXP (XEXP (y, 1), 0) == XEXP (const_arg0, 0))
5568 return XEXP (y, 0);
5569
5570 /* Now try for a CONST of a MINUS like the above. */
5571 if ((y = (GET_CODE (folded_arg1) == CONST ? folded_arg1
5572 : lookup_as_function (folded_arg1, CONST))) != 0
5573 && GET_CODE (XEXP (y, 0)) == MINUS
5574 && GET_CODE (XEXP (XEXP (y, 0), 1)) == LABEL_REF
5575 && XEXP (XEXP (XEXP (y, 0),1), 0) == XEXP (const_arg0, 0))
5576 return XEXP (XEXP (y, 0), 0);
5577 }
5578
5579 /* If second operand is a register equivalent to a negative
5580 CONST_INT, see if we can find a register equivalent to the
5581 positive constant. Make a MINUS if so. Don't do this for
5582 a negative constant since we might then alternate between
5583 chosing positive and negative constants. Having the positive
5584 constant previously-used is the more common case. */
5585 if (const_arg1 && GET_CODE (const_arg1) == CONST_INT
5586 && INTVAL (const_arg1) < 0 && GET_CODE (folded_arg1) == REG)
5587 {
5588 rtx new_const = GEN_INT (- INTVAL (const_arg1));
5589 struct table_elt *p
5590 = lookup (new_const, safe_hash (new_const, mode) % NBUCKETS,
5591 mode);
5592
5593 if (p)
5594 for (p = p->first_same_value; p; p = p->next_same_value)
5595 if (GET_CODE (p->exp) == REG)
5596 return cse_gen_binary (MINUS, mode, folded_arg0,
5597 canon_reg (p->exp, NULL_RTX));
5598 }
5599 goto from_plus;
5600
5601 case MINUS:
5602 /* If we have (MINUS Y C), see if Y is known to be (PLUS Z C2).
5603 If so, produce (PLUS Z C2-C). */
5604 if (const_arg1 != 0 && GET_CODE (const_arg1) == CONST_INT)
5605 {
5606 rtx y = lookup_as_function (XEXP (x, 0), PLUS);
5607 if (y && GET_CODE (XEXP (y, 1)) == CONST_INT)
5608 return fold_rtx (plus_constant (copy_rtx (y),
5609 -INTVAL (const_arg1)),
5610 NULL_RTX);
5611 }
5612
5613 /* ... fall through ... */
5614
5615 from_plus:
5616 case SMIN: case SMAX: case UMIN: case UMAX:
5617 case IOR: case AND: case XOR:
5618 case MULT: case DIV: case UDIV:
5619 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
5620 /* If we have (<op> <reg> <const_int>) for an associative OP and REG
5621 is known to be of similar form, we may be able to replace the
5622 operation with a combined operation. This may eliminate the
5623 intermediate operation if every use is simplified in this way.
5624 Note that the similar optimization done by combine.c only works
5625 if the intermediate operation's result has only one reference. */
5626
5627 if (GET_CODE (folded_arg0) == REG
5628 && const_arg1 && GET_CODE (const_arg1) == CONST_INT)
5629 {
5630 int is_shift
5631 = (code == ASHIFT || code == ASHIFTRT || code == LSHIFTRT);
5632 rtx y = lookup_as_function (folded_arg0, code);
5633 rtx inner_const;
5634 enum rtx_code associate_code;
5635 rtx new_const;
5636
5637 if (y == 0
5638 || 0 == (inner_const
5639 = equiv_constant (fold_rtx (XEXP (y, 1), 0)))
5640 || GET_CODE (inner_const) != CONST_INT
5641 /* If we have compiled a statement like
5642 "if (x == (x & mask1))", and now are looking at
5643 "x & mask2", we will have a case where the first operand
5644 of Y is the same as our first operand. Unless we detect
5645 this case, an infinite loop will result. */
5646 || XEXP (y, 0) == folded_arg0)
5647 break;
5648
5649 /* Don't associate these operations if they are a PLUS with the
5650 same constant and it is a power of two. These might be doable
5651 with a pre- or post-increment. Similarly for two subtracts of
5652 identical powers of two with post decrement. */
5653
5654 if (code == PLUS && INTVAL (const_arg1) == INTVAL (inner_const)
5655 && (0
5656 #if defined(HAVE_PRE_INCREMENT) || defined(HAVE_POST_INCREMENT)
5657 || exact_log2 (INTVAL (const_arg1)) >= 0
5658 #endif
5659 #if defined(HAVE_PRE_DECREMENT) || defined(HAVE_POST_DECREMENT)
5660 || exact_log2 (- INTVAL (const_arg1)) >= 0
5661 #endif
5662 ))
5663 break;
5664
5665 /* Compute the code used to compose the constants. For example,
5666 A/C1/C2 is A/(C1 * C2), so if CODE == DIV, we want MULT. */
5667
5668 associate_code
5669 = (code == MULT || code == DIV || code == UDIV ? MULT
5670 : is_shift || code == PLUS || code == MINUS ? PLUS : code);
5671
5672 new_const = simplify_binary_operation (associate_code, mode,
5673 const_arg1, inner_const);
5674
5675 if (new_const == 0)
5676 break;
5677
5678 /* If we are associating shift operations, don't let this
5679 produce a shift of the size of the object or larger.
5680 This could occur when we follow a sign-extend by a right
5681 shift on a machine that does a sign-extend as a pair
5682 of shifts. */
5683
5684 if (is_shift && GET_CODE (new_const) == CONST_INT
5685 && INTVAL (new_const) >= GET_MODE_BITSIZE (mode))
5686 {
5687 /* As an exception, we can turn an ASHIFTRT of this
5688 form into a shift of the number of bits - 1. */
5689 if (code == ASHIFTRT)
5690 new_const = GEN_INT (GET_MODE_BITSIZE (mode) - 1);
5691 else
5692 break;
5693 }
5694
5695 y = copy_rtx (XEXP (y, 0));
5696
5697 /* If Y contains our first operand (the most common way this
5698 can happen is if Y is a MEM), we would do into an infinite
5699 loop if we tried to fold it. So don't in that case. */
5700
5701 if (! reg_mentioned_p (folded_arg0, y))
5702 y = fold_rtx (y, insn);
5703
5704 return cse_gen_binary (code, mode, y, new_const);
5705 }
5706 }
5707
5708 new = simplify_binary_operation (code, mode,
5709 const_arg0 ? const_arg0 : folded_arg0,
5710 const_arg1 ? const_arg1 : folded_arg1);
5711 break;
5712
5713 case 'o':
5714 /* (lo_sum (high X) X) is simply X. */
5715 if (code == LO_SUM && const_arg0 != 0
5716 && GET_CODE (const_arg0) == HIGH
5717 && rtx_equal_p (XEXP (const_arg0, 0), const_arg1))
5718 return const_arg1;
5719 break;
5720
5721 case '3':
5722 case 'b':
5723 new = simplify_ternary_operation (code, mode, mode_arg0,
5724 const_arg0 ? const_arg0 : folded_arg0,
5725 const_arg1 ? const_arg1 : folded_arg1,
5726 const_arg2 ? const_arg2 : XEXP (x, 2));
5727 break;
5728 }
5729
5730 return new ? new : x;
5731 }
5732 \f
5733 /* Return a constant value currently equivalent to X.
5734 Return 0 if we don't know one. */
5735
5736 static rtx
5737 equiv_constant (x)
5738 rtx x;
5739 {
5740 if (GET_CODE (x) == REG
5741 && REGNO_QTY_VALID_P (REGNO (x))
5742 && qty_const[reg_qty[REGNO (x)]])
5743 x = gen_lowpart_if_possible (GET_MODE (x), qty_const[reg_qty[REGNO (x)]]);
5744
5745 if (x != 0 && CONSTANT_P (x))
5746 return x;
5747
5748 /* If X is a MEM, try to fold it outside the context of any insn to see if
5749 it might be equivalent to a constant. That handles the case where it
5750 is a constant-pool reference. Then try to look it up in the hash table
5751 in case it is something whose value we have seen before. */
5752
5753 if (GET_CODE (x) == MEM)
5754 {
5755 struct table_elt *elt;
5756
5757 x = fold_rtx (x, NULL_RTX);
5758 if (CONSTANT_P (x))
5759 return x;
5760
5761 elt = lookup (x, safe_hash (x, GET_MODE (x)) % NBUCKETS, GET_MODE (x));
5762 if (elt == 0)
5763 return 0;
5764
5765 for (elt = elt->first_same_value; elt; elt = elt->next_same_value)
5766 if (elt->is_const && CONSTANT_P (elt->exp))
5767 return elt->exp;
5768 }
5769
5770 return 0;
5771 }
5772 \f
5773 /* Assuming that X is an rtx (e.g., MEM, REG or SUBREG) for a fixed-point
5774 number, return an rtx (MEM, SUBREG, or CONST_INT) that refers to the
5775 least-significant part of X.
5776 MODE specifies how big a part of X to return.
5777
5778 If the requested operation cannot be done, 0 is returned.
5779
5780 This is similar to gen_lowpart in emit-rtl.c. */
5781
5782 rtx
5783 gen_lowpart_if_possible (mode, x)
5784 enum machine_mode mode;
5785 register rtx x;
5786 {
5787 rtx result = gen_lowpart_common (mode, x);
5788
5789 if (result)
5790 return result;
5791 else if (GET_CODE (x) == MEM)
5792 {
5793 /* This is the only other case we handle. */
5794 register int offset = 0;
5795 rtx new;
5796
5797 if (WORDS_BIG_ENDIAN)
5798 offset = (MAX (GET_MODE_SIZE (GET_MODE (x)), UNITS_PER_WORD)
5799 - MAX (GET_MODE_SIZE (mode), UNITS_PER_WORD));
5800 if (BYTES_BIG_ENDIAN)
5801 /* Adjust the address so that the address-after-the-data is
5802 unchanged. */
5803 offset -= (MIN (UNITS_PER_WORD, GET_MODE_SIZE (mode))
5804 - MIN (UNITS_PER_WORD, GET_MODE_SIZE (GET_MODE (x))));
5805 new = gen_rtx (MEM, mode, plus_constant (XEXP (x, 0), offset));
5806 if (! memory_address_p (mode, XEXP (new, 0)))
5807 return 0;
5808 MEM_VOLATILE_P (new) = MEM_VOLATILE_P (x);
5809 RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (x);
5810 MEM_IN_STRUCT_P (new) = MEM_IN_STRUCT_P (x);
5811 return new;
5812 }
5813 else
5814 return 0;
5815 }
5816 \f
5817 /* Given INSN, a jump insn, TAKEN indicates if we are following the "taken"
5818 branch. It will be zero if not.
5819
5820 In certain cases, this can cause us to add an equivalence. For example,
5821 if we are following the taken case of
5822 if (i == 2)
5823 we can add the fact that `i' and '2' are now equivalent.
5824
5825 In any case, we can record that this comparison was passed. If the same
5826 comparison is seen later, we will know its value. */
5827
5828 static void
5829 record_jump_equiv (insn, taken)
5830 rtx insn;
5831 int taken;
5832 {
5833 int cond_known_true;
5834 rtx op0, op1;
5835 enum machine_mode mode, mode0, mode1;
5836 int reversed_nonequality = 0;
5837 enum rtx_code code;
5838
5839 /* Ensure this is the right kind of insn. */
5840 if (! condjump_p (insn) || simplejump_p (insn))
5841 return;
5842
5843 /* See if this jump condition is known true or false. */
5844 if (taken)
5845 cond_known_true = (XEXP (SET_SRC (PATTERN (insn)), 2) == pc_rtx);
5846 else
5847 cond_known_true = (XEXP (SET_SRC (PATTERN (insn)), 1) == pc_rtx);
5848
5849 /* Get the type of comparison being done and the operands being compared.
5850 If we had to reverse a non-equality condition, record that fact so we
5851 know that it isn't valid for floating-point. */
5852 code = GET_CODE (XEXP (SET_SRC (PATTERN (insn)), 0));
5853 op0 = fold_rtx (XEXP (XEXP (SET_SRC (PATTERN (insn)), 0), 0), insn);
5854 op1 = fold_rtx (XEXP (XEXP (SET_SRC (PATTERN (insn)), 0), 1), insn);
5855
5856 code = find_comparison_args (code, &op0, &op1, &mode0, &mode1);
5857 if (! cond_known_true)
5858 {
5859 reversed_nonequality = (code != EQ && code != NE);
5860 code = reverse_condition (code);
5861 }
5862
5863 /* The mode is the mode of the non-constant. */
5864 mode = mode0;
5865 if (mode1 != VOIDmode)
5866 mode = mode1;
5867
5868 record_jump_cond (code, mode, op0, op1, reversed_nonequality);
5869 }
5870
5871 /* We know that comparison CODE applied to OP0 and OP1 in MODE is true.
5872 REVERSED_NONEQUALITY is nonzero if CODE had to be swapped.
5873 Make any useful entries we can with that information. Called from
5874 above function and called recursively. */
5875
5876 static void
5877 record_jump_cond (code, mode, op0, op1, reversed_nonequality)
5878 enum rtx_code code;
5879 enum machine_mode mode;
5880 rtx op0, op1;
5881 int reversed_nonequality;
5882 {
5883 unsigned op0_hash, op1_hash;
5884 int op0_in_memory, op0_in_struct, op1_in_memory, op1_in_struct;
5885 struct table_elt *op0_elt, *op1_elt;
5886
5887 /* If OP0 and OP1 are known equal, and either is a paradoxical SUBREG,
5888 we know that they are also equal in the smaller mode (this is also
5889 true for all smaller modes whether or not there is a SUBREG, but
5890 is not worth testing for with no SUBREG. */
5891
5892 /* Note that GET_MODE (op0) may not equal MODE. */
5893 if (code == EQ && GET_CODE (op0) == SUBREG
5894 && (GET_MODE_SIZE (GET_MODE (op0))
5895 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
5896 {
5897 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
5898 rtx tem = gen_lowpart_if_possible (inner_mode, op1);
5899
5900 record_jump_cond (code, mode, SUBREG_REG (op0),
5901 tem ? tem : gen_rtx (SUBREG, inner_mode, op1, 0),
5902 reversed_nonequality);
5903 }
5904
5905 if (code == EQ && GET_CODE (op1) == SUBREG
5906 && (GET_MODE_SIZE (GET_MODE (op1))
5907 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (op1)))))
5908 {
5909 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op1));
5910 rtx tem = gen_lowpart_if_possible (inner_mode, op0);
5911
5912 record_jump_cond (code, mode, SUBREG_REG (op1),
5913 tem ? tem : gen_rtx (SUBREG, inner_mode, op0, 0),
5914 reversed_nonequality);
5915 }
5916
5917 /* Similarly, if this is an NE comparison, and either is a SUBREG
5918 making a smaller mode, we know the whole thing is also NE. */
5919
5920 /* Note that GET_MODE (op0) may not equal MODE;
5921 if we test MODE instead, we can get an infinite recursion
5922 alternating between two modes each wider than MODE. */
5923
5924 if (code == NE && GET_CODE (op0) == SUBREG
5925 && subreg_lowpart_p (op0)
5926 && (GET_MODE_SIZE (GET_MODE (op0))
5927 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
5928 {
5929 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
5930 rtx tem = gen_lowpart_if_possible (inner_mode, op1);
5931
5932 record_jump_cond (code, mode, SUBREG_REG (op0),
5933 tem ? tem : gen_rtx (SUBREG, inner_mode, op1, 0),
5934 reversed_nonequality);
5935 }
5936
5937 if (code == NE && GET_CODE (op1) == SUBREG
5938 && subreg_lowpart_p (op1)
5939 && (GET_MODE_SIZE (GET_MODE (op1))
5940 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op1)))))
5941 {
5942 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op1));
5943 rtx tem = gen_lowpart_if_possible (inner_mode, op0);
5944
5945 record_jump_cond (code, mode, SUBREG_REG (op1),
5946 tem ? tem : gen_rtx (SUBREG, inner_mode, op0, 0),
5947 reversed_nonequality);
5948 }
5949
5950 /* Hash both operands. */
5951
5952 do_not_record = 0;
5953 hash_arg_in_memory = 0;
5954 hash_arg_in_struct = 0;
5955 op0_hash = HASH (op0, mode);
5956 op0_in_memory = hash_arg_in_memory;
5957 op0_in_struct = hash_arg_in_struct;
5958
5959 if (do_not_record)
5960 return;
5961
5962 do_not_record = 0;
5963 hash_arg_in_memory = 0;
5964 hash_arg_in_struct = 0;
5965 op1_hash = HASH (op1, mode);
5966 op1_in_memory = hash_arg_in_memory;
5967 op1_in_struct = hash_arg_in_struct;
5968
5969 if (do_not_record)
5970 return;
5971
5972 /* Look up both operands. */
5973 op0_elt = lookup (op0, op0_hash, mode);
5974 op1_elt = lookup (op1, op1_hash, mode);
5975
5976 /* If both operands are already equivalent or if they are not in the
5977 table but are identical, do nothing. */
5978 if ((op0_elt != 0 && op1_elt != 0
5979 && op0_elt->first_same_value == op1_elt->first_same_value)
5980 || op0 == op1 || rtx_equal_p (op0, op1))
5981 return;
5982
5983 /* If we aren't setting two things equal all we can do is save this
5984 comparison. Similarly if this is floating-point. In the latter
5985 case, OP1 might be zero and both -0.0 and 0.0 are equal to it.
5986 If we record the equality, we might inadvertently delete code
5987 whose intent was to change -0 to +0. */
5988
5989 if (code != EQ || FLOAT_MODE_P (GET_MODE (op0)))
5990 {
5991 /* If we reversed a floating-point comparison, if OP0 is not a
5992 register, or if OP1 is neither a register or constant, we can't
5993 do anything. */
5994
5995 if (GET_CODE (op1) != REG)
5996 op1 = equiv_constant (op1);
5997
5998 if ((reversed_nonequality && FLOAT_MODE_P (mode))
5999 || GET_CODE (op0) != REG || op1 == 0)
6000 return;
6001
6002 /* Put OP0 in the hash table if it isn't already. This gives it a
6003 new quantity number. */
6004 if (op0_elt == 0)
6005 {
6006 if (insert_regs (op0, NULL_PTR, 0))
6007 {
6008 rehash_using_reg (op0);
6009 op0_hash = HASH (op0, mode);
6010
6011 /* If OP0 is contained in OP1, this changes its hash code
6012 as well. Faster to rehash than to check, except
6013 for the simple case of a constant. */
6014 if (! CONSTANT_P (op1))
6015 op1_hash = HASH (op1,mode);
6016 }
6017
6018 op0_elt = insert (op0, NULL_PTR, op0_hash, mode);
6019 op0_elt->in_memory = op0_in_memory;
6020 op0_elt->in_struct = op0_in_struct;
6021 }
6022
6023 qty_comparison_code[reg_qty[REGNO (op0)]] = code;
6024 if (GET_CODE (op1) == REG)
6025 {
6026 /* Look it up again--in case op0 and op1 are the same. */
6027 op1_elt = lookup (op1, op1_hash, mode);
6028
6029 /* Put OP1 in the hash table so it gets a new quantity number. */
6030 if (op1_elt == 0)
6031 {
6032 if (insert_regs (op1, NULL_PTR, 0))
6033 {
6034 rehash_using_reg (op1);
6035 op1_hash = HASH (op1, mode);
6036 }
6037
6038 op1_elt = insert (op1, NULL_PTR, op1_hash, mode);
6039 op1_elt->in_memory = op1_in_memory;
6040 op1_elt->in_struct = op1_in_struct;
6041 }
6042
6043 qty_comparison_qty[reg_qty[REGNO (op0)]] = reg_qty[REGNO (op1)];
6044 qty_comparison_const[reg_qty[REGNO (op0)]] = 0;
6045 }
6046 else
6047 {
6048 qty_comparison_qty[reg_qty[REGNO (op0)]] = -1;
6049 qty_comparison_const[reg_qty[REGNO (op0)]] = op1;
6050 }
6051
6052 return;
6053 }
6054
6055 /* If either side is still missing an equivalence, make it now,
6056 then merge the equivalences. */
6057
6058 if (op0_elt == 0)
6059 {
6060 if (insert_regs (op0, NULL_PTR, 0))
6061 {
6062 rehash_using_reg (op0);
6063 op0_hash = HASH (op0, mode);
6064 }
6065
6066 op0_elt = insert (op0, NULL_PTR, op0_hash, mode);
6067 op0_elt->in_memory = op0_in_memory;
6068 op0_elt->in_struct = op0_in_struct;
6069 }
6070
6071 if (op1_elt == 0)
6072 {
6073 if (insert_regs (op1, NULL_PTR, 0))
6074 {
6075 rehash_using_reg (op1);
6076 op1_hash = HASH (op1, mode);
6077 }
6078
6079 op1_elt = insert (op1, NULL_PTR, op1_hash, mode);
6080 op1_elt->in_memory = op1_in_memory;
6081 op1_elt->in_struct = op1_in_struct;
6082 }
6083
6084 merge_equiv_classes (op0_elt, op1_elt);
6085 last_jump_equiv_class = op0_elt;
6086 }
6087 \f
6088 /* CSE processing for one instruction.
6089 First simplify sources and addresses of all assignments
6090 in the instruction, using previously-computed equivalents values.
6091 Then install the new sources and destinations in the table
6092 of available values.
6093
6094 If IN_LIBCALL_BLOCK is nonzero, don't record any equivalence made in
6095 the insn. */
6096
6097 /* Data on one SET contained in the instruction. */
6098
6099 struct set
6100 {
6101 /* The SET rtx itself. */
6102 rtx rtl;
6103 /* The SET_SRC of the rtx (the original value, if it is changing). */
6104 rtx src;
6105 /* The hash-table element for the SET_SRC of the SET. */
6106 struct table_elt *src_elt;
6107 /* Hash value for the SET_SRC. */
6108 unsigned src_hash;
6109 /* Hash value for the SET_DEST. */
6110 unsigned dest_hash;
6111 /* The SET_DEST, with SUBREG, etc., stripped. */
6112 rtx inner_dest;
6113 /* Place where the pointer to the INNER_DEST was found. */
6114 rtx *inner_dest_loc;
6115 /* Nonzero if the SET_SRC is in memory. */
6116 char src_in_memory;
6117 /* Nonzero if the SET_SRC is in a structure. */
6118 char src_in_struct;
6119 /* Nonzero if the SET_SRC contains something
6120 whose value cannot be predicted and understood. */
6121 char src_volatile;
6122 /* Original machine mode, in case it becomes a CONST_INT. */
6123 enum machine_mode mode;
6124 /* A constant equivalent for SET_SRC, if any. */
6125 rtx src_const;
6126 /* Hash value of constant equivalent for SET_SRC. */
6127 unsigned src_const_hash;
6128 /* Table entry for constant equivalent for SET_SRC, if any. */
6129 struct table_elt *src_const_elt;
6130 };
6131
6132 static void
6133 cse_insn (insn, in_libcall_block)
6134 rtx insn;
6135 int in_libcall_block;
6136 {
6137 register rtx x = PATTERN (insn);
6138 register int i;
6139 rtx tem;
6140 register int n_sets = 0;
6141
6142 /* Records what this insn does to set CC0. */
6143 rtx this_insn_cc0 = 0;
6144 enum machine_mode this_insn_cc0_mode;
6145 struct write_data writes_memory;
6146 static struct write_data init = {0, 0, 0, 0};
6147
6148 rtx src_eqv = 0;
6149 struct table_elt *src_eqv_elt = 0;
6150 int src_eqv_volatile;
6151 int src_eqv_in_memory;
6152 int src_eqv_in_struct;
6153 unsigned src_eqv_hash;
6154
6155 struct set *sets;
6156
6157 this_insn = insn;
6158 writes_memory = init;
6159
6160 /* Find all the SETs and CLOBBERs in this instruction.
6161 Record all the SETs in the array `set' and count them.
6162 Also determine whether there is a CLOBBER that invalidates
6163 all memory references, or all references at varying addresses. */
6164
6165 if (GET_CODE (insn) == CALL_INSN)
6166 {
6167 for (tem = CALL_INSN_FUNCTION_USAGE (insn); tem; tem = XEXP (tem, 1))
6168 if (GET_CODE (XEXP (tem, 0)) == CLOBBER)
6169 invalidate (SET_DEST (XEXP (tem, 0)), VOIDmode);
6170 }
6171
6172 if (GET_CODE (x) == SET)
6173 {
6174 sets = (struct set *) alloca (sizeof (struct set));
6175 sets[0].rtl = x;
6176
6177 /* Ignore SETs that are unconditional jumps.
6178 They never need cse processing, so this does not hurt.
6179 The reason is not efficiency but rather
6180 so that we can test at the end for instructions
6181 that have been simplified to unconditional jumps
6182 and not be misled by unchanged instructions
6183 that were unconditional jumps to begin with. */
6184 if (SET_DEST (x) == pc_rtx
6185 && GET_CODE (SET_SRC (x)) == LABEL_REF)
6186 ;
6187
6188 /* Don't count call-insns, (set (reg 0) (call ...)), as a set.
6189 The hard function value register is used only once, to copy to
6190 someplace else, so it isn't worth cse'ing (and on 80386 is unsafe)!
6191 Ensure we invalidate the destination register. On the 80386 no
6192 other code would invalidate it since it is a fixed_reg.
6193 We need not check the return of apply_change_group; see canon_reg. */
6194
6195 else if (GET_CODE (SET_SRC (x)) == CALL)
6196 {
6197 canon_reg (SET_SRC (x), insn);
6198 apply_change_group ();
6199 fold_rtx (SET_SRC (x), insn);
6200 invalidate (SET_DEST (x), VOIDmode);
6201 }
6202 else
6203 n_sets = 1;
6204 }
6205 else if (GET_CODE (x) == PARALLEL)
6206 {
6207 register int lim = XVECLEN (x, 0);
6208
6209 sets = (struct set *) alloca (lim * sizeof (struct set));
6210
6211 /* Find all regs explicitly clobbered in this insn,
6212 and ensure they are not replaced with any other regs
6213 elsewhere in this insn.
6214 When a reg that is clobbered is also used for input,
6215 we should presume that that is for a reason,
6216 and we should not substitute some other register
6217 which is not supposed to be clobbered.
6218 Therefore, this loop cannot be merged into the one below
6219 because a CALL may precede a CLOBBER and refer to the
6220 value clobbered. We must not let a canonicalization do
6221 anything in that case. */
6222 for (i = 0; i < lim; i++)
6223 {
6224 register rtx y = XVECEXP (x, 0, i);
6225 if (GET_CODE (y) == CLOBBER)
6226 {
6227 rtx clobbered = XEXP (y, 0);
6228
6229 if (GET_CODE (clobbered) == REG
6230 || GET_CODE (clobbered) == SUBREG)
6231 invalidate (clobbered, VOIDmode);
6232 else if (GET_CODE (clobbered) == STRICT_LOW_PART
6233 || GET_CODE (clobbered) == ZERO_EXTRACT)
6234 invalidate (XEXP (clobbered, 0), GET_MODE (clobbered));
6235 }
6236 }
6237
6238 for (i = 0; i < lim; i++)
6239 {
6240 register rtx y = XVECEXP (x, 0, i);
6241 if (GET_CODE (y) == SET)
6242 {
6243 /* As above, we ignore unconditional jumps and call-insns and
6244 ignore the result of apply_change_group. */
6245 if (GET_CODE (SET_SRC (y)) == CALL)
6246 {
6247 canon_reg (SET_SRC (y), insn);
6248 apply_change_group ();
6249 fold_rtx (SET_SRC (y), insn);
6250 invalidate (SET_DEST (y), VOIDmode);
6251 }
6252 else if (SET_DEST (y) == pc_rtx
6253 && GET_CODE (SET_SRC (y)) == LABEL_REF)
6254 ;
6255 else
6256 sets[n_sets++].rtl = y;
6257 }
6258 else if (GET_CODE (y) == CLOBBER)
6259 {
6260 /* If we clobber memory, take note of that,
6261 and canon the address.
6262 This does nothing when a register is clobbered
6263 because we have already invalidated the reg. */
6264 if (GET_CODE (XEXP (y, 0)) == MEM)
6265 {
6266 canon_reg (XEXP (y, 0), NULL_RTX);
6267 note_mem_written (XEXP (y, 0), &writes_memory);
6268 }
6269 }
6270 else if (GET_CODE (y) == USE
6271 && ! (GET_CODE (XEXP (y, 0)) == REG
6272 && REGNO (XEXP (y, 0)) < FIRST_PSEUDO_REGISTER))
6273 canon_reg (y, NULL_RTX);
6274 else if (GET_CODE (y) == CALL)
6275 {
6276 /* The result of apply_change_group can be ignored; see
6277 canon_reg. */
6278 canon_reg (y, insn);
6279 apply_change_group ();
6280 fold_rtx (y, insn);
6281 }
6282 }
6283 }
6284 else if (GET_CODE (x) == CLOBBER)
6285 {
6286 if (GET_CODE (XEXP (x, 0)) == MEM)
6287 {
6288 canon_reg (XEXP (x, 0), NULL_RTX);
6289 note_mem_written (XEXP (x, 0), &writes_memory);
6290 }
6291 }
6292
6293 /* Canonicalize a USE of a pseudo register or memory location. */
6294 else if (GET_CODE (x) == USE
6295 && ! (GET_CODE (XEXP (x, 0)) == REG
6296 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER))
6297 canon_reg (XEXP (x, 0), NULL_RTX);
6298 else if (GET_CODE (x) == CALL)
6299 {
6300 /* The result of apply_change_group can be ignored; see canon_reg. */
6301 canon_reg (x, insn);
6302 apply_change_group ();
6303 fold_rtx (x, insn);
6304 }
6305
6306 /* Store the equivalent value in SRC_EQV, if different, or if the DEST
6307 is a STRICT_LOW_PART. The latter condition is necessary because SRC_EQV
6308 is handled specially for this case, and if it isn't set, then there will
6309 be no equivalence for the destination. */
6310 if (n_sets == 1 && REG_NOTES (insn) != 0
6311 && (tem = find_reg_note (insn, REG_EQUAL, NULL_RTX)) != 0
6312 && (! rtx_equal_p (XEXP (tem, 0), SET_SRC (sets[0].rtl))
6313 || GET_CODE (SET_DEST (sets[0].rtl)) == STRICT_LOW_PART))
6314 src_eqv = canon_reg (XEXP (tem, 0), NULL_RTX);
6315
6316 /* Canonicalize sources and addresses of destinations.
6317 We do this in a separate pass to avoid problems when a MATCH_DUP is
6318 present in the insn pattern. In that case, we want to ensure that
6319 we don't break the duplicate nature of the pattern. So we will replace
6320 both operands at the same time. Otherwise, we would fail to find an
6321 equivalent substitution in the loop calling validate_change below.
6322
6323 We used to suppress canonicalization of DEST if it appears in SRC,
6324 but we don't do this any more. */
6325
6326 for (i = 0; i < n_sets; i++)
6327 {
6328 rtx dest = SET_DEST (sets[i].rtl);
6329 rtx src = SET_SRC (sets[i].rtl);
6330 rtx new = canon_reg (src, insn);
6331
6332 if ((GET_CODE (new) == REG && GET_CODE (src) == REG
6333 && ((REGNO (new) < FIRST_PSEUDO_REGISTER)
6334 != (REGNO (src) < FIRST_PSEUDO_REGISTER)))
6335 || insn_n_dups[recog_memoized (insn)] > 0)
6336 validate_change (insn, &SET_SRC (sets[i].rtl), new, 1);
6337 else
6338 SET_SRC (sets[i].rtl) = new;
6339
6340 if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
6341 {
6342 validate_change (insn, &XEXP (dest, 1),
6343 canon_reg (XEXP (dest, 1), insn), 1);
6344 validate_change (insn, &XEXP (dest, 2),
6345 canon_reg (XEXP (dest, 2), insn), 1);
6346 }
6347
6348 while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART
6349 || GET_CODE (dest) == ZERO_EXTRACT
6350 || GET_CODE (dest) == SIGN_EXTRACT)
6351 dest = XEXP (dest, 0);
6352
6353 if (GET_CODE (dest) == MEM)
6354 canon_reg (dest, insn);
6355 }
6356
6357 /* Now that we have done all the replacements, we can apply the change
6358 group and see if they all work. Note that this will cause some
6359 canonicalizations that would have worked individually not to be applied
6360 because some other canonicalization didn't work, but this should not
6361 occur often.
6362
6363 The result of apply_change_group can be ignored; see canon_reg. */
6364
6365 apply_change_group ();
6366
6367 /* Set sets[i].src_elt to the class each source belongs to.
6368 Detect assignments from or to volatile things
6369 and set set[i] to zero so they will be ignored
6370 in the rest of this function.
6371
6372 Nothing in this loop changes the hash table or the register chains. */
6373
6374 for (i = 0; i < n_sets; i++)
6375 {
6376 register rtx src, dest;
6377 register rtx src_folded;
6378 register struct table_elt *elt = 0, *p;
6379 enum machine_mode mode;
6380 rtx src_eqv_here;
6381 rtx src_const = 0;
6382 rtx src_related = 0;
6383 struct table_elt *src_const_elt = 0;
6384 int src_cost = 10000, src_eqv_cost = 10000, src_folded_cost = 10000;
6385 int src_related_cost = 10000, src_elt_cost = 10000;
6386 /* Set non-zero if we need to call force_const_mem on with the
6387 contents of src_folded before using it. */
6388 int src_folded_force_flag = 0;
6389
6390 dest = SET_DEST (sets[i].rtl);
6391 src = SET_SRC (sets[i].rtl);
6392
6393 /* If SRC is a constant that has no machine mode,
6394 hash it with the destination's machine mode.
6395 This way we can keep different modes separate. */
6396
6397 mode = GET_MODE (src) == VOIDmode ? GET_MODE (dest) : GET_MODE (src);
6398 sets[i].mode = mode;
6399
6400 if (src_eqv)
6401 {
6402 enum machine_mode eqvmode = mode;
6403 if (GET_CODE (dest) == STRICT_LOW_PART)
6404 eqvmode = GET_MODE (SUBREG_REG (XEXP (dest, 0)));
6405 do_not_record = 0;
6406 hash_arg_in_memory = 0;
6407 hash_arg_in_struct = 0;
6408 src_eqv = fold_rtx (src_eqv, insn);
6409 src_eqv_hash = HASH (src_eqv, eqvmode);
6410
6411 /* Find the equivalence class for the equivalent expression. */
6412
6413 if (!do_not_record)
6414 src_eqv_elt = lookup (src_eqv, src_eqv_hash, eqvmode);
6415
6416 src_eqv_volatile = do_not_record;
6417 src_eqv_in_memory = hash_arg_in_memory;
6418 src_eqv_in_struct = hash_arg_in_struct;
6419 }
6420
6421 /* If this is a STRICT_LOW_PART assignment, src_eqv corresponds to the
6422 value of the INNER register, not the destination. So it is not
6423 a valid substitution for the source. But save it for later. */
6424 if (GET_CODE (dest) == STRICT_LOW_PART)
6425 src_eqv_here = 0;
6426 else
6427 src_eqv_here = src_eqv;
6428
6429 /* Simplify and foldable subexpressions in SRC. Then get the fully-
6430 simplified result, which may not necessarily be valid. */
6431 src_folded = fold_rtx (src, insn);
6432
6433 #if 0
6434 /* ??? This caused bad code to be generated for the m68k port with -O2.
6435 Suppose src is (CONST_INT -1), and that after truncation src_folded
6436 is (CONST_INT 3). Suppose src_folded is then used for src_const.
6437 At the end we will add src and src_const to the same equivalence
6438 class. We now have 3 and -1 on the same equivalence class. This
6439 causes later instructions to be mis-optimized. */
6440 /* If storing a constant in a bitfield, pre-truncate the constant
6441 so we will be able to record it later. */
6442 if (GET_CODE (SET_DEST (sets[i].rtl)) == ZERO_EXTRACT
6443 || GET_CODE (SET_DEST (sets[i].rtl)) == SIGN_EXTRACT)
6444 {
6445 rtx width = XEXP (SET_DEST (sets[i].rtl), 1);
6446
6447 if (GET_CODE (src) == CONST_INT
6448 && GET_CODE (width) == CONST_INT
6449 && INTVAL (width) < HOST_BITS_PER_WIDE_INT
6450 && (INTVAL (src) & ((HOST_WIDE_INT) (-1) << INTVAL (width))))
6451 src_folded
6452 = GEN_INT (INTVAL (src) & (((HOST_WIDE_INT) 1
6453 << INTVAL (width)) - 1));
6454 }
6455 #endif
6456
6457 /* Compute SRC's hash code, and also notice if it
6458 should not be recorded at all. In that case,
6459 prevent any further processing of this assignment. */
6460 do_not_record = 0;
6461 hash_arg_in_memory = 0;
6462 hash_arg_in_struct = 0;
6463
6464 sets[i].src = src;
6465 sets[i].src_hash = HASH (src, mode);
6466 sets[i].src_volatile = do_not_record;
6467 sets[i].src_in_memory = hash_arg_in_memory;
6468 sets[i].src_in_struct = hash_arg_in_struct;
6469
6470 #if 0
6471 /* It is no longer clear why we used to do this, but it doesn't
6472 appear to still be needed. So let's try without it since this
6473 code hurts cse'ing widened ops. */
6474 /* If source is a perverse subreg (such as QI treated as an SI),
6475 treat it as volatile. It may do the work of an SI in one context
6476 where the extra bits are not being used, but cannot replace an SI
6477 in general. */
6478 if (GET_CODE (src) == SUBREG
6479 && (GET_MODE_SIZE (GET_MODE (src))
6480 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))))
6481 sets[i].src_volatile = 1;
6482 #endif
6483
6484 /* Locate all possible equivalent forms for SRC. Try to replace
6485 SRC in the insn with each cheaper equivalent.
6486
6487 We have the following types of equivalents: SRC itself, a folded
6488 version, a value given in a REG_EQUAL note, or a value related
6489 to a constant.
6490
6491 Each of these equivalents may be part of an additional class
6492 of equivalents (if more than one is in the table, they must be in
6493 the same class; we check for this).
6494
6495 If the source is volatile, we don't do any table lookups.
6496
6497 We note any constant equivalent for possible later use in a
6498 REG_NOTE. */
6499
6500 if (!sets[i].src_volatile)
6501 elt = lookup (src, sets[i].src_hash, mode);
6502
6503 sets[i].src_elt = elt;
6504
6505 if (elt && src_eqv_here && src_eqv_elt)
6506 {
6507 if (elt->first_same_value != src_eqv_elt->first_same_value)
6508 {
6509 /* The REG_EQUAL is indicating that two formerly distinct
6510 classes are now equivalent. So merge them. */
6511 merge_equiv_classes (elt, src_eqv_elt);
6512 src_eqv_hash = HASH (src_eqv, elt->mode);
6513 src_eqv_elt = lookup (src_eqv, src_eqv_hash, elt->mode);
6514 }
6515
6516 src_eqv_here = 0;
6517 }
6518
6519 else if (src_eqv_elt)
6520 elt = src_eqv_elt;
6521
6522 /* Try to find a constant somewhere and record it in `src_const'.
6523 Record its table element, if any, in `src_const_elt'. Look in
6524 any known equivalences first. (If the constant is not in the
6525 table, also set `sets[i].src_const_hash'). */
6526 if (elt)
6527 for (p = elt->first_same_value; p; p = p->next_same_value)
6528 if (p->is_const)
6529 {
6530 src_const = p->exp;
6531 src_const_elt = elt;
6532 break;
6533 }
6534
6535 if (src_const == 0
6536 && (CONSTANT_P (src_folded)
6537 /* Consider (minus (label_ref L1) (label_ref L2)) as
6538 "constant" here so we will record it. This allows us
6539 to fold switch statements when an ADDR_DIFF_VEC is used. */
6540 || (GET_CODE (src_folded) == MINUS
6541 && GET_CODE (XEXP (src_folded, 0)) == LABEL_REF
6542 && GET_CODE (XEXP (src_folded, 1)) == LABEL_REF)))
6543 src_const = src_folded, src_const_elt = elt;
6544 else if (src_const == 0 && src_eqv_here && CONSTANT_P (src_eqv_here))
6545 src_const = src_eqv_here, src_const_elt = src_eqv_elt;
6546
6547 /* If we don't know if the constant is in the table, get its
6548 hash code and look it up. */
6549 if (src_const && src_const_elt == 0)
6550 {
6551 sets[i].src_const_hash = HASH (src_const, mode);
6552 src_const_elt = lookup (src_const, sets[i].src_const_hash, mode);
6553 }
6554
6555 sets[i].src_const = src_const;
6556 sets[i].src_const_elt = src_const_elt;
6557
6558 /* If the constant and our source are both in the table, mark them as
6559 equivalent. Otherwise, if a constant is in the table but the source
6560 isn't, set ELT to it. */
6561 if (src_const_elt && elt
6562 && src_const_elt->first_same_value != elt->first_same_value)
6563 merge_equiv_classes (elt, src_const_elt);
6564 else if (src_const_elt && elt == 0)
6565 elt = src_const_elt;
6566
6567 /* See if there is a register linearly related to a constant
6568 equivalent of SRC. */
6569 if (src_const
6570 && (GET_CODE (src_const) == CONST
6571 || (src_const_elt && src_const_elt->related_value != 0)))
6572 {
6573 src_related = use_related_value (src_const, src_const_elt);
6574 if (src_related)
6575 {
6576 struct table_elt *src_related_elt
6577 = lookup (src_related, HASH (src_related, mode), mode);
6578 if (src_related_elt && elt)
6579 {
6580 if (elt->first_same_value
6581 != src_related_elt->first_same_value)
6582 /* This can occur when we previously saw a CONST
6583 involving a SYMBOL_REF and then see the SYMBOL_REF
6584 twice. Merge the involved classes. */
6585 merge_equiv_classes (elt, src_related_elt);
6586
6587 src_related = 0;
6588 src_related_elt = 0;
6589 }
6590 else if (src_related_elt && elt == 0)
6591 elt = src_related_elt;
6592 }
6593 }
6594
6595 /* See if we have a CONST_INT that is already in a register in a
6596 wider mode. */
6597
6598 if (src_const && src_related == 0 && GET_CODE (src_const) == CONST_INT
6599 && GET_MODE_CLASS (mode) == MODE_INT
6600 && GET_MODE_BITSIZE (mode) < BITS_PER_WORD)
6601 {
6602 enum machine_mode wider_mode;
6603
6604 for (wider_mode = GET_MODE_WIDER_MODE (mode);
6605 GET_MODE_BITSIZE (wider_mode) <= BITS_PER_WORD
6606 && src_related == 0;
6607 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
6608 {
6609 struct table_elt *const_elt
6610 = lookup (src_const, HASH (src_const, wider_mode), wider_mode);
6611
6612 if (const_elt == 0)
6613 continue;
6614
6615 for (const_elt = const_elt->first_same_value;
6616 const_elt; const_elt = const_elt->next_same_value)
6617 if (GET_CODE (const_elt->exp) == REG)
6618 {
6619 src_related = gen_lowpart_if_possible (mode,
6620 const_elt->exp);
6621 break;
6622 }
6623 }
6624 }
6625
6626 /* Another possibility is that we have an AND with a constant in
6627 a mode narrower than a word. If so, it might have been generated
6628 as part of an "if" which would narrow the AND. If we already
6629 have done the AND in a wider mode, we can use a SUBREG of that
6630 value. */
6631
6632 if (flag_expensive_optimizations && ! src_related
6633 && GET_CODE (src) == AND && GET_CODE (XEXP (src, 1)) == CONST_INT
6634 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6635 {
6636 enum machine_mode tmode;
6637 rtx new_and = gen_rtx (AND, VOIDmode, NULL_RTX, XEXP (src, 1));
6638
6639 for (tmode = GET_MODE_WIDER_MODE (mode);
6640 GET_MODE_SIZE (tmode) <= UNITS_PER_WORD;
6641 tmode = GET_MODE_WIDER_MODE (tmode))
6642 {
6643 rtx inner = gen_lowpart_if_possible (tmode, XEXP (src, 0));
6644 struct table_elt *larger_elt;
6645
6646 if (inner)
6647 {
6648 PUT_MODE (new_and, tmode);
6649 XEXP (new_and, 0) = inner;
6650 larger_elt = lookup (new_and, HASH (new_and, tmode), tmode);
6651 if (larger_elt == 0)
6652 continue;
6653
6654 for (larger_elt = larger_elt->first_same_value;
6655 larger_elt; larger_elt = larger_elt->next_same_value)
6656 if (GET_CODE (larger_elt->exp) == REG)
6657 {
6658 src_related
6659 = gen_lowpart_if_possible (mode, larger_elt->exp);
6660 break;
6661 }
6662
6663 if (src_related)
6664 break;
6665 }
6666 }
6667 }
6668
6669 #ifdef LOAD_EXTEND_OP
6670 /* See if a MEM has already been loaded with a widening operation;
6671 if it has, we can use a subreg of that. Many CISC machines
6672 also have such operations, but this is only likely to be
6673 beneficial these machines. */
6674
6675 if (flag_expensive_optimizations && src_related == 0
6676 && (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6677 && GET_MODE_CLASS (mode) == MODE_INT
6678 && GET_CODE (src) == MEM && ! do_not_record
6679 && LOAD_EXTEND_OP (mode) != NIL)
6680 {
6681 enum machine_mode tmode;
6682
6683 /* Set what we are trying to extend and the operation it might
6684 have been extended with. */
6685 PUT_CODE (memory_extend_rtx, LOAD_EXTEND_OP (mode));
6686 XEXP (memory_extend_rtx, 0) = src;
6687
6688 for (tmode = GET_MODE_WIDER_MODE (mode);
6689 GET_MODE_SIZE (tmode) <= UNITS_PER_WORD;
6690 tmode = GET_MODE_WIDER_MODE (tmode))
6691 {
6692 struct table_elt *larger_elt;
6693
6694 PUT_MODE (memory_extend_rtx, tmode);
6695 larger_elt = lookup (memory_extend_rtx,
6696 HASH (memory_extend_rtx, tmode), tmode);
6697 if (larger_elt == 0)
6698 continue;
6699
6700 for (larger_elt = larger_elt->first_same_value;
6701 larger_elt; larger_elt = larger_elt->next_same_value)
6702 if (GET_CODE (larger_elt->exp) == REG)
6703 {
6704 src_related = gen_lowpart_if_possible (mode,
6705 larger_elt->exp);
6706 break;
6707 }
6708
6709 if (src_related)
6710 break;
6711 }
6712 }
6713 #endif /* LOAD_EXTEND_OP */
6714
6715 if (src == src_folded)
6716 src_folded = 0;
6717
6718 /* At this point, ELT, if non-zero, points to a class of expressions
6719 equivalent to the source of this SET and SRC, SRC_EQV, SRC_FOLDED,
6720 and SRC_RELATED, if non-zero, each contain additional equivalent
6721 expressions. Prune these latter expressions by deleting expressions
6722 already in the equivalence class.
6723
6724 Check for an equivalent identical to the destination. If found,
6725 this is the preferred equivalent since it will likely lead to
6726 elimination of the insn. Indicate this by placing it in
6727 `src_related'. */
6728
6729 if (elt) elt = elt->first_same_value;
6730 for (p = elt; p; p = p->next_same_value)
6731 {
6732 enum rtx_code code = GET_CODE (p->exp);
6733
6734 /* If the expression is not valid, ignore it. Then we do not
6735 have to check for validity below. In most cases, we can use
6736 `rtx_equal_p', since canonicalization has already been done. */
6737 if (code != REG && ! exp_equiv_p (p->exp, p->exp, 1, 0))
6738 continue;
6739
6740 if (src && GET_CODE (src) == code && rtx_equal_p (src, p->exp))
6741 src = 0;
6742 else if (src_folded && GET_CODE (src_folded) == code
6743 && rtx_equal_p (src_folded, p->exp))
6744 src_folded = 0;
6745 else if (src_eqv_here && GET_CODE (src_eqv_here) == code
6746 && rtx_equal_p (src_eqv_here, p->exp))
6747 src_eqv_here = 0;
6748 else if (src_related && GET_CODE (src_related) == code
6749 && rtx_equal_p (src_related, p->exp))
6750 src_related = 0;
6751
6752 /* This is the same as the destination of the insns, we want
6753 to prefer it. Copy it to src_related. The code below will
6754 then give it a negative cost. */
6755 if (GET_CODE (dest) == code && rtx_equal_p (p->exp, dest))
6756 src_related = dest;
6757
6758 }
6759
6760 /* Find the cheapest valid equivalent, trying all the available
6761 possibilities. Prefer items not in the hash table to ones
6762 that are when they are equal cost. Note that we can never
6763 worsen an insn as the current contents will also succeed.
6764 If we find an equivalent identical to the destination, use it as best,
6765 since this insn will probably be eliminated in that case. */
6766 if (src)
6767 {
6768 if (rtx_equal_p (src, dest))
6769 src_cost = -1;
6770 else
6771 src_cost = COST (src);
6772 }
6773
6774 if (src_eqv_here)
6775 {
6776 if (rtx_equal_p (src_eqv_here, dest))
6777 src_eqv_cost = -1;
6778 else
6779 src_eqv_cost = COST (src_eqv_here);
6780 }
6781
6782 if (src_folded)
6783 {
6784 if (rtx_equal_p (src_folded, dest))
6785 src_folded_cost = -1;
6786 else
6787 src_folded_cost = COST (src_folded);
6788 }
6789
6790 if (src_related)
6791 {
6792 if (rtx_equal_p (src_related, dest))
6793 src_related_cost = -1;
6794 else
6795 src_related_cost = COST (src_related);
6796 }
6797
6798 /* If this was an indirect jump insn, a known label will really be
6799 cheaper even though it looks more expensive. */
6800 if (dest == pc_rtx && src_const && GET_CODE (src_const) == LABEL_REF)
6801 src_folded = src_const, src_folded_cost = -1;
6802
6803 /* Terminate loop when replacement made. This must terminate since
6804 the current contents will be tested and will always be valid. */
6805 while (1)
6806 {
6807 rtx trial;
6808
6809 /* Skip invalid entries. */
6810 while (elt && GET_CODE (elt->exp) != REG
6811 && ! exp_equiv_p (elt->exp, elt->exp, 1, 0))
6812 elt = elt->next_same_value;
6813
6814 if (elt) src_elt_cost = elt->cost;
6815
6816 /* Find cheapest and skip it for the next time. For items
6817 of equal cost, use this order:
6818 src_folded, src, src_eqv, src_related and hash table entry. */
6819 if (src_folded_cost <= src_cost
6820 && src_folded_cost <= src_eqv_cost
6821 && src_folded_cost <= src_related_cost
6822 && src_folded_cost <= src_elt_cost)
6823 {
6824 trial = src_folded, src_folded_cost = 10000;
6825 if (src_folded_force_flag)
6826 trial = force_const_mem (mode, trial);
6827 }
6828 else if (src_cost <= src_eqv_cost
6829 && src_cost <= src_related_cost
6830 && src_cost <= src_elt_cost)
6831 trial = src, src_cost = 10000;
6832 else if (src_eqv_cost <= src_related_cost
6833 && src_eqv_cost <= src_elt_cost)
6834 trial = copy_rtx (src_eqv_here), src_eqv_cost = 10000;
6835 else if (src_related_cost <= src_elt_cost)
6836 trial = copy_rtx (src_related), src_related_cost = 10000;
6837 else
6838 {
6839 trial = copy_rtx (elt->exp);
6840 elt = elt->next_same_value;
6841 src_elt_cost = 10000;
6842 }
6843
6844 /* We don't normally have an insn matching (set (pc) (pc)), so
6845 check for this separately here. We will delete such an
6846 insn below.
6847
6848 Tablejump insns contain a USE of the table, so simply replacing
6849 the operand with the constant won't match. This is simply an
6850 unconditional branch, however, and is therefore valid. Just
6851 insert the substitution here and we will delete and re-emit
6852 the insn later. */
6853
6854 if (n_sets == 1 && dest == pc_rtx
6855 && (trial == pc_rtx
6856 || (GET_CODE (trial) == LABEL_REF
6857 && ! condjump_p (insn))))
6858 {
6859 /* If TRIAL is a label in front of a jump table, we are
6860 really falling through the switch (this is how casesi
6861 insns work), so we must branch around the table. */
6862 if (GET_CODE (trial) == CODE_LABEL
6863 && NEXT_INSN (trial) != 0
6864 && GET_CODE (NEXT_INSN (trial)) == JUMP_INSN
6865 && (GET_CODE (PATTERN (NEXT_INSN (trial))) == ADDR_DIFF_VEC
6866 || GET_CODE (PATTERN (NEXT_INSN (trial))) == ADDR_VEC))
6867
6868 trial = gen_rtx (LABEL_REF, Pmode, get_label_after (trial));
6869
6870 SET_SRC (sets[i].rtl) = trial;
6871 cse_jumps_altered = 1;
6872 break;
6873 }
6874
6875 /* Look for a substitution that makes a valid insn. */
6876 else if (validate_change (insn, &SET_SRC (sets[i].rtl), trial, 0))
6877 {
6878 /* The result of apply_change_group can be ignored; see
6879 canon_reg. */
6880
6881 validate_change (insn, &SET_SRC (sets[i].rtl),
6882 canon_reg (SET_SRC (sets[i].rtl), insn),
6883 1);
6884 apply_change_group ();
6885 break;
6886 }
6887
6888 /* If we previously found constant pool entries for
6889 constants and this is a constant, try making a
6890 pool entry. Put it in src_folded unless we already have done
6891 this since that is where it likely came from. */
6892
6893 else if (constant_pool_entries_cost
6894 && CONSTANT_P (trial)
6895 && ! (GET_CODE (trial) == CONST
6896 && GET_CODE (XEXP (trial, 0)) == TRUNCATE)
6897 && (src_folded == 0
6898 || (GET_CODE (src_folded) != MEM
6899 && ! src_folded_force_flag))
6900 && GET_MODE_CLASS (mode) != MODE_CC)
6901 {
6902 src_folded_force_flag = 1;
6903 src_folded = trial;
6904 src_folded_cost = constant_pool_entries_cost;
6905 }
6906 }
6907
6908 src = SET_SRC (sets[i].rtl);
6909
6910 /* In general, it is good to have a SET with SET_SRC == SET_DEST.
6911 However, there is an important exception: If both are registers
6912 that are not the head of their equivalence class, replace SET_SRC
6913 with the head of the class. If we do not do this, we will have
6914 both registers live over a portion of the basic block. This way,
6915 their lifetimes will likely abut instead of overlapping. */
6916 if (GET_CODE (dest) == REG
6917 && REGNO_QTY_VALID_P (REGNO (dest))
6918 && qty_mode[reg_qty[REGNO (dest)]] == GET_MODE (dest)
6919 && qty_first_reg[reg_qty[REGNO (dest)]] != REGNO (dest)
6920 && GET_CODE (src) == REG && REGNO (src) == REGNO (dest)
6921 /* Don't do this if the original insn had a hard reg as
6922 SET_SRC. */
6923 && (GET_CODE (sets[i].src) != REG
6924 || REGNO (sets[i].src) >= FIRST_PSEUDO_REGISTER))
6925 /* We can't call canon_reg here because it won't do anything if
6926 SRC is a hard register. */
6927 {
6928 int first = qty_first_reg[reg_qty[REGNO (src)]];
6929
6930 src = SET_SRC (sets[i].rtl)
6931 = first >= FIRST_PSEUDO_REGISTER ? regno_reg_rtx[first]
6932 : gen_rtx (REG, GET_MODE (src), first);
6933
6934 /* If we had a constant that is cheaper than what we are now
6935 setting SRC to, use that constant. We ignored it when we
6936 thought we could make this into a no-op. */
6937 if (src_const && COST (src_const) < COST (src)
6938 && validate_change (insn, &SET_SRC (sets[i].rtl), src_const, 0))
6939 src = src_const;
6940 }
6941
6942 /* If we made a change, recompute SRC values. */
6943 if (src != sets[i].src)
6944 {
6945 do_not_record = 0;
6946 hash_arg_in_memory = 0;
6947 hash_arg_in_struct = 0;
6948 sets[i].src = src;
6949 sets[i].src_hash = HASH (src, mode);
6950 sets[i].src_volatile = do_not_record;
6951 sets[i].src_in_memory = hash_arg_in_memory;
6952 sets[i].src_in_struct = hash_arg_in_struct;
6953 sets[i].src_elt = lookup (src, sets[i].src_hash, mode);
6954 }
6955
6956 /* If this is a single SET, we are setting a register, and we have an
6957 equivalent constant, we want to add a REG_NOTE. We don't want
6958 to write a REG_EQUAL note for a constant pseudo since verifying that
6959 that pseudo hasn't been eliminated is a pain. Such a note also
6960 won't help anything. */
6961 if (n_sets == 1 && src_const && GET_CODE (dest) == REG
6962 && GET_CODE (src_const) != REG)
6963 {
6964 tem = find_reg_note (insn, REG_EQUAL, NULL_RTX);
6965
6966 /* Record the actual constant value in a REG_EQUAL note, making
6967 a new one if one does not already exist. */
6968 if (tem)
6969 XEXP (tem, 0) = src_const;
6970 else
6971 REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_EQUAL,
6972 src_const, REG_NOTES (insn));
6973
6974 /* If storing a constant value in a register that
6975 previously held the constant value 0,
6976 record this fact with a REG_WAS_0 note on this insn.
6977
6978 Note that the *register* is required to have previously held 0,
6979 not just any register in the quantity and we must point to the
6980 insn that set that register to zero.
6981
6982 Rather than track each register individually, we just see if
6983 the last set for this quantity was for this register. */
6984
6985 if (REGNO_QTY_VALID_P (REGNO (dest))
6986 && qty_const[reg_qty[REGNO (dest)]] == const0_rtx)
6987 {
6988 /* See if we previously had a REG_WAS_0 note. */
6989 rtx note = find_reg_note (insn, REG_WAS_0, NULL_RTX);
6990 rtx const_insn = qty_const_insn[reg_qty[REGNO (dest)]];
6991
6992 if ((tem = single_set (const_insn)) != 0
6993 && rtx_equal_p (SET_DEST (tem), dest))
6994 {
6995 if (note)
6996 XEXP (note, 0) = const_insn;
6997 else
6998 REG_NOTES (insn) = gen_rtx (INSN_LIST, REG_WAS_0,
6999 const_insn, REG_NOTES (insn));
7000 }
7001 }
7002 }
7003
7004 /* Now deal with the destination. */
7005 do_not_record = 0;
7006 sets[i].inner_dest_loc = &SET_DEST (sets[0].rtl);
7007
7008 /* Look within any SIGN_EXTRACT or ZERO_EXTRACT
7009 to the MEM or REG within it. */
7010 while (GET_CODE (dest) == SIGN_EXTRACT
7011 || GET_CODE (dest) == ZERO_EXTRACT
7012 || GET_CODE (dest) == SUBREG
7013 || GET_CODE (dest) == STRICT_LOW_PART)
7014 {
7015 sets[i].inner_dest_loc = &XEXP (dest, 0);
7016 dest = XEXP (dest, 0);
7017 }
7018
7019 sets[i].inner_dest = dest;
7020
7021 if (GET_CODE (dest) == MEM)
7022 {
7023 dest = fold_rtx (dest, insn);
7024
7025 /* Decide whether we invalidate everything in memory,
7026 or just things at non-fixed places.
7027 Writing a large aggregate must invalidate everything
7028 because we don't know how long it is. */
7029 note_mem_written (dest, &writes_memory);
7030 }
7031
7032 /* Compute the hash code of the destination now,
7033 before the effects of this instruction are recorded,
7034 since the register values used in the address computation
7035 are those before this instruction. */
7036 sets[i].dest_hash = HASH (dest, mode);
7037
7038 /* Don't enter a bit-field in the hash table
7039 because the value in it after the store
7040 may not equal what was stored, due to truncation. */
7041
7042 if (GET_CODE (SET_DEST (sets[i].rtl)) == ZERO_EXTRACT
7043 || GET_CODE (SET_DEST (sets[i].rtl)) == SIGN_EXTRACT)
7044 {
7045 rtx width = XEXP (SET_DEST (sets[i].rtl), 1);
7046
7047 if (src_const != 0 && GET_CODE (src_const) == CONST_INT
7048 && GET_CODE (width) == CONST_INT
7049 && INTVAL (width) < HOST_BITS_PER_WIDE_INT
7050 && ! (INTVAL (src_const)
7051 & ((HOST_WIDE_INT) (-1) << INTVAL (width))))
7052 /* Exception: if the value is constant,
7053 and it won't be truncated, record it. */
7054 ;
7055 else
7056 {
7057 /* This is chosen so that the destination will be invalidated
7058 but no new value will be recorded.
7059 We must invalidate because sometimes constant
7060 values can be recorded for bitfields. */
7061 sets[i].src_elt = 0;
7062 sets[i].src_volatile = 1;
7063 src_eqv = 0;
7064 src_eqv_elt = 0;
7065 }
7066 }
7067
7068 /* If only one set in a JUMP_INSN and it is now a no-op, we can delete
7069 the insn. */
7070 else if (n_sets == 1 && dest == pc_rtx && src == pc_rtx)
7071 {
7072 PUT_CODE (insn, NOTE);
7073 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
7074 NOTE_SOURCE_FILE (insn) = 0;
7075 cse_jumps_altered = 1;
7076 /* One less use of the label this insn used to jump to. */
7077 --LABEL_NUSES (JUMP_LABEL (insn));
7078 /* No more processing for this set. */
7079 sets[i].rtl = 0;
7080 }
7081
7082 /* If this SET is now setting PC to a label, we know it used to
7083 be a conditional or computed branch. So we see if we can follow
7084 it. If it was a computed branch, delete it and re-emit. */
7085 else if (dest == pc_rtx && GET_CODE (src) == LABEL_REF)
7086 {
7087 rtx p;
7088
7089 /* If this is not in the format for a simple branch and
7090 we are the only SET in it, re-emit it. */
7091 if (! simplejump_p (insn) && n_sets == 1)
7092 {
7093 rtx new = emit_jump_insn_before (gen_jump (XEXP (src, 0)), insn);
7094 JUMP_LABEL (new) = XEXP (src, 0);
7095 LABEL_NUSES (XEXP (src, 0))++;
7096 delete_insn (insn);
7097 insn = new;
7098 }
7099 else
7100 /* Otherwise, force rerecognition, since it probably had
7101 a different pattern before.
7102 This shouldn't really be necessary, since whatever
7103 changed the source value above should have done this.
7104 Until the right place is found, might as well do this here. */
7105 INSN_CODE (insn) = -1;
7106
7107 /* Now that we've converted this jump to an unconditional jump,
7108 there is dead code after it. Delete the dead code until we
7109 reach a BARRIER, the end of the function, or a label. Do
7110 not delete NOTEs except for NOTE_INSN_DELETED since later
7111 phases assume these notes are retained. */
7112
7113 p = insn;
7114
7115 while (NEXT_INSN (p) != 0
7116 && GET_CODE (NEXT_INSN (p)) != BARRIER
7117 && GET_CODE (NEXT_INSN (p)) != CODE_LABEL)
7118 {
7119 if (GET_CODE (NEXT_INSN (p)) != NOTE
7120 || NOTE_LINE_NUMBER (NEXT_INSN (p)) == NOTE_INSN_DELETED)
7121 delete_insn (NEXT_INSN (p));
7122 else
7123 p = NEXT_INSN (p);
7124 }
7125
7126 /* If we don't have a BARRIER immediately after INSN, put one there.
7127 Much code assumes that there are no NOTEs between a JUMP_INSN and
7128 BARRIER. */
7129
7130 if (NEXT_INSN (insn) == 0
7131 || GET_CODE (NEXT_INSN (insn)) != BARRIER)
7132 emit_barrier_before (NEXT_INSN (insn));
7133
7134 /* We might have two BARRIERs separated by notes. Delete the second
7135 one if so. */
7136
7137 if (p != insn && NEXT_INSN (p) != 0
7138 && GET_CODE (NEXT_INSN (p)) == BARRIER)
7139 delete_insn (NEXT_INSN (p));
7140
7141 cse_jumps_altered = 1;
7142 sets[i].rtl = 0;
7143 }
7144
7145 /* If destination is volatile, invalidate it and then do no further
7146 processing for this assignment. */
7147
7148 else if (do_not_record)
7149 {
7150 if (GET_CODE (dest) == REG || GET_CODE (dest) == SUBREG
7151 || GET_CODE (dest) == MEM)
7152 invalidate (dest, VOIDmode);
7153 else if (GET_CODE (dest) == STRICT_LOW_PART
7154 || GET_CODE (dest) == ZERO_EXTRACT)
7155 invalidate (XEXP (dest, 0), GET_MODE (dest));
7156 sets[i].rtl = 0;
7157 }
7158
7159 if (sets[i].rtl != 0 && dest != SET_DEST (sets[i].rtl))
7160 sets[i].dest_hash = HASH (SET_DEST (sets[i].rtl), mode);
7161
7162 #ifdef HAVE_cc0
7163 /* If setting CC0, record what it was set to, or a constant, if it
7164 is equivalent to a constant. If it is being set to a floating-point
7165 value, make a COMPARE with the appropriate constant of 0. If we
7166 don't do this, later code can interpret this as a test against
7167 const0_rtx, which can cause problems if we try to put it into an
7168 insn as a floating-point operand. */
7169 if (dest == cc0_rtx)
7170 {
7171 this_insn_cc0 = src_const && mode != VOIDmode ? src_const : src;
7172 this_insn_cc0_mode = mode;
7173 if (FLOAT_MODE_P (mode))
7174 this_insn_cc0 = gen_rtx (COMPARE, VOIDmode, this_insn_cc0,
7175 CONST0_RTX (mode));
7176 }
7177 #endif
7178 }
7179
7180 /* Now enter all non-volatile source expressions in the hash table
7181 if they are not already present.
7182 Record their equivalence classes in src_elt.
7183 This way we can insert the corresponding destinations into
7184 the same classes even if the actual sources are no longer in them
7185 (having been invalidated). */
7186
7187 if (src_eqv && src_eqv_elt == 0 && sets[0].rtl != 0 && ! src_eqv_volatile
7188 && ! rtx_equal_p (src_eqv, SET_DEST (sets[0].rtl)))
7189 {
7190 register struct table_elt *elt;
7191 register struct table_elt *classp = sets[0].src_elt;
7192 rtx dest = SET_DEST (sets[0].rtl);
7193 enum machine_mode eqvmode = GET_MODE (dest);
7194
7195 if (GET_CODE (dest) == STRICT_LOW_PART)
7196 {
7197 eqvmode = GET_MODE (SUBREG_REG (XEXP (dest, 0)));
7198 classp = 0;
7199 }
7200 if (insert_regs (src_eqv, classp, 0))
7201 {
7202 rehash_using_reg (src_eqv);
7203 src_eqv_hash = HASH (src_eqv, eqvmode);
7204 }
7205 elt = insert (src_eqv, classp, src_eqv_hash, eqvmode);
7206 elt->in_memory = src_eqv_in_memory;
7207 elt->in_struct = src_eqv_in_struct;
7208 src_eqv_elt = elt;
7209
7210 /* Check to see if src_eqv_elt is the same as a set source which
7211 does not yet have an elt, and if so set the elt of the set source
7212 to src_eqv_elt. */
7213 for (i = 0; i < n_sets; i++)
7214 if (sets[i].rtl && sets[i].src_elt == 0
7215 && rtx_equal_p (SET_SRC (sets[i].rtl), src_eqv))
7216 sets[i].src_elt = src_eqv_elt;
7217 }
7218
7219 for (i = 0; i < n_sets; i++)
7220 if (sets[i].rtl && ! sets[i].src_volatile
7221 && ! rtx_equal_p (SET_SRC (sets[i].rtl), SET_DEST (sets[i].rtl)))
7222 {
7223 if (GET_CODE (SET_DEST (sets[i].rtl)) == STRICT_LOW_PART)
7224 {
7225 /* REG_EQUAL in setting a STRICT_LOW_PART
7226 gives an equivalent for the entire destination register,
7227 not just for the subreg being stored in now.
7228 This is a more interesting equivalence, so we arrange later
7229 to treat the entire reg as the destination. */
7230 sets[i].src_elt = src_eqv_elt;
7231 sets[i].src_hash = src_eqv_hash;
7232 }
7233 else
7234 {
7235 /* Insert source and constant equivalent into hash table, if not
7236 already present. */
7237 register struct table_elt *classp = src_eqv_elt;
7238 register rtx src = sets[i].src;
7239 register rtx dest = SET_DEST (sets[i].rtl);
7240 enum machine_mode mode
7241 = GET_MODE (src) == VOIDmode ? GET_MODE (dest) : GET_MODE (src);
7242
7243 if (sets[i].src_elt == 0)
7244 {
7245 register struct table_elt *elt;
7246
7247 /* Note that these insert_regs calls cannot remove
7248 any of the src_elt's, because they would have failed to
7249 match if not still valid. */
7250 if (insert_regs (src, classp, 0))
7251 {
7252 rehash_using_reg (src);
7253 sets[i].src_hash = HASH (src, mode);
7254 }
7255 elt = insert (src, classp, sets[i].src_hash, mode);
7256 elt->in_memory = sets[i].src_in_memory;
7257 elt->in_struct = sets[i].src_in_struct;
7258 sets[i].src_elt = classp = elt;
7259 }
7260
7261 if (sets[i].src_const && sets[i].src_const_elt == 0
7262 && src != sets[i].src_const
7263 && ! rtx_equal_p (sets[i].src_const, src))
7264 sets[i].src_elt = insert (sets[i].src_const, classp,
7265 sets[i].src_const_hash, mode);
7266 }
7267 }
7268 else if (sets[i].src_elt == 0)
7269 /* If we did not insert the source into the hash table (e.g., it was
7270 volatile), note the equivalence class for the REG_EQUAL value, if any,
7271 so that the destination goes into that class. */
7272 sets[i].src_elt = src_eqv_elt;
7273
7274 invalidate_from_clobbers (&writes_memory, x);
7275
7276 /* Some registers are invalidated by subroutine calls. Memory is
7277 invalidated by non-constant calls. */
7278
7279 if (GET_CODE (insn) == CALL_INSN)
7280 {
7281 static struct write_data everything = {0, 1, 1, 1};
7282
7283 if (! CONST_CALL_P (insn))
7284 invalidate_memory (&everything);
7285 invalidate_for_call ();
7286 }
7287
7288 /* Now invalidate everything set by this instruction.
7289 If a SUBREG or other funny destination is being set,
7290 sets[i].rtl is still nonzero, so here we invalidate the reg
7291 a part of which is being set. */
7292
7293 for (i = 0; i < n_sets; i++)
7294 if (sets[i].rtl)
7295 {
7296 /* We can't use the inner dest, because the mode associated with
7297 a ZERO_EXTRACT is significant. */
7298 register rtx dest = SET_DEST (sets[i].rtl);
7299
7300 /* Needed for registers to remove the register from its
7301 previous quantity's chain.
7302 Needed for memory if this is a nonvarying address, unless
7303 we have just done an invalidate_memory that covers even those. */
7304 if (GET_CODE (dest) == REG || GET_CODE (dest) == SUBREG
7305 || (GET_CODE (dest) == MEM && ! writes_memory.all
7306 && ! cse_rtx_addr_varies_p (dest)))
7307 invalidate (dest, VOIDmode);
7308 else if (GET_CODE (dest) == STRICT_LOW_PART
7309 || GET_CODE (dest) == ZERO_EXTRACT)
7310 invalidate (XEXP (dest, 0), GET_MODE (dest));
7311 }
7312
7313 /* Make sure registers mentioned in destinations
7314 are safe for use in an expression to be inserted.
7315 This removes from the hash table
7316 any invalid entry that refers to one of these registers.
7317
7318 We don't care about the return value from mention_regs because
7319 we are going to hash the SET_DEST values unconditionally. */
7320
7321 for (i = 0; i < n_sets; i++)
7322 if (sets[i].rtl && GET_CODE (SET_DEST (sets[i].rtl)) != REG)
7323 mention_regs (SET_DEST (sets[i].rtl));
7324
7325 /* We may have just removed some of the src_elt's from the hash table.
7326 So replace each one with the current head of the same class. */
7327
7328 for (i = 0; i < n_sets; i++)
7329 if (sets[i].rtl)
7330 {
7331 if (sets[i].src_elt && sets[i].src_elt->first_same_value == 0)
7332 /* If elt was removed, find current head of same class,
7333 or 0 if nothing remains of that class. */
7334 {
7335 register struct table_elt *elt = sets[i].src_elt;
7336
7337 while (elt && elt->prev_same_value)
7338 elt = elt->prev_same_value;
7339
7340 while (elt && elt->first_same_value == 0)
7341 elt = elt->next_same_value;
7342 sets[i].src_elt = elt ? elt->first_same_value : 0;
7343 }
7344 }
7345
7346 /* Now insert the destinations into their equivalence classes. */
7347
7348 for (i = 0; i < n_sets; i++)
7349 if (sets[i].rtl)
7350 {
7351 register rtx dest = SET_DEST (sets[i].rtl);
7352 register struct table_elt *elt;
7353
7354 /* Don't record value if we are not supposed to risk allocating
7355 floating-point values in registers that might be wider than
7356 memory. */
7357 if ((flag_float_store
7358 && GET_CODE (dest) == MEM
7359 && FLOAT_MODE_P (GET_MODE (dest)))
7360 /* Don't record values of destinations set inside a libcall block
7361 since we might delete the libcall. Things should have been set
7362 up so we won't want to reuse such a value, but we play it safe
7363 here. */
7364 || in_libcall_block
7365 /* If we didn't put a REG_EQUAL value or a source into the hash
7366 table, there is no point is recording DEST. */
7367 || sets[i].src_elt == 0
7368 /* If DEST is a paradoxical SUBREG and SRC is a ZERO_EXTEND
7369 or SIGN_EXTEND, don't record DEST since it can cause
7370 some tracking to be wrong.
7371
7372 ??? Think about this more later. */
7373 || (GET_CODE (dest) == SUBREG
7374 && (GET_MODE_SIZE (GET_MODE (dest))
7375 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))))
7376 && (GET_CODE (sets[i].src) == SIGN_EXTEND
7377 || GET_CODE (sets[i].src) == ZERO_EXTEND)))
7378 continue;
7379
7380 /* STRICT_LOW_PART isn't part of the value BEING set,
7381 and neither is the SUBREG inside it.
7382 Note that in this case SETS[I].SRC_ELT is really SRC_EQV_ELT. */
7383 if (GET_CODE (dest) == STRICT_LOW_PART)
7384 dest = SUBREG_REG (XEXP (dest, 0));
7385
7386 if (GET_CODE (dest) == REG || GET_CODE (dest) == SUBREG)
7387 /* Registers must also be inserted into chains for quantities. */
7388 if (insert_regs (dest, sets[i].src_elt, 1))
7389 {
7390 /* If `insert_regs' changes something, the hash code must be
7391 recalculated. */
7392 rehash_using_reg (dest);
7393 sets[i].dest_hash = HASH (dest, GET_MODE (dest));
7394 }
7395
7396 elt = insert (dest, sets[i].src_elt,
7397 sets[i].dest_hash, GET_MODE (dest));
7398 elt->in_memory = (GET_CODE (sets[i].inner_dest) == MEM
7399 && (! RTX_UNCHANGING_P (sets[i].inner_dest)
7400 || FIXED_BASE_PLUS_P (XEXP (sets[i].inner_dest,
7401 0))));
7402
7403 if (elt->in_memory)
7404 {
7405 /* This implicitly assumes a whole struct
7406 need not have MEM_IN_STRUCT_P.
7407 But a whole struct is *supposed* to have MEM_IN_STRUCT_P. */
7408 elt->in_struct = (MEM_IN_STRUCT_P (sets[i].inner_dest)
7409 || sets[i].inner_dest != SET_DEST (sets[i].rtl));
7410 }
7411
7412 /* If we have (set (subreg:m1 (reg:m2 foo) 0) (bar:m1)), M1 is no
7413 narrower than M2, and both M1 and M2 are the same number of words,
7414 we are also doing (set (reg:m2 foo) (subreg:m2 (bar:m1) 0)) so
7415 make that equivalence as well.
7416
7417 However, BAR may have equivalences for which gen_lowpart_if_possible
7418 will produce a simpler value than gen_lowpart_if_possible applied to
7419 BAR (e.g., if BAR was ZERO_EXTENDed from M2), so we will scan all
7420 BAR's equivalences. If we don't get a simplified form, make
7421 the SUBREG. It will not be used in an equivalence, but will
7422 cause two similar assignments to be detected.
7423
7424 Note the loop below will find SUBREG_REG (DEST) since we have
7425 already entered SRC and DEST of the SET in the table. */
7426
7427 if (GET_CODE (dest) == SUBREG
7428 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))) - 1)
7429 / UNITS_PER_WORD)
7430 == (GET_MODE_SIZE (GET_MODE (dest)) - 1)/ UNITS_PER_WORD)
7431 && (GET_MODE_SIZE (GET_MODE (dest))
7432 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))))
7433 && sets[i].src_elt != 0)
7434 {
7435 enum machine_mode new_mode = GET_MODE (SUBREG_REG (dest));
7436 struct table_elt *elt, *classp = 0;
7437
7438 for (elt = sets[i].src_elt->first_same_value; elt;
7439 elt = elt->next_same_value)
7440 {
7441 rtx new_src = 0;
7442 unsigned src_hash;
7443 struct table_elt *src_elt;
7444
7445 /* Ignore invalid entries. */
7446 if (GET_CODE (elt->exp) != REG
7447 && ! exp_equiv_p (elt->exp, elt->exp, 1, 0))
7448 continue;
7449
7450 new_src = gen_lowpart_if_possible (new_mode, elt->exp);
7451 if (new_src == 0)
7452 new_src = gen_rtx (SUBREG, new_mode, elt->exp, 0);
7453
7454 src_hash = HASH (new_src, new_mode);
7455 src_elt = lookup (new_src, src_hash, new_mode);
7456
7457 /* Put the new source in the hash table is if isn't
7458 already. */
7459 if (src_elt == 0)
7460 {
7461 if (insert_regs (new_src, classp, 0))
7462 {
7463 rehash_using_reg (new_src);
7464 src_hash = HASH (new_src, new_mode);
7465 }
7466 src_elt = insert (new_src, classp, src_hash, new_mode);
7467 src_elt->in_memory = elt->in_memory;
7468 src_elt->in_struct = elt->in_struct;
7469 }
7470 else if (classp && classp != src_elt->first_same_value)
7471 /* Show that two things that we've seen before are
7472 actually the same. */
7473 merge_equiv_classes (src_elt, classp);
7474
7475 classp = src_elt->first_same_value;
7476 }
7477 }
7478 }
7479
7480 /* Special handling for (set REG0 REG1)
7481 where REG0 is the "cheapest", cheaper than REG1.
7482 After cse, REG1 will probably not be used in the sequel,
7483 so (if easily done) change this insn to (set REG1 REG0) and
7484 replace REG1 with REG0 in the previous insn that computed their value.
7485 Then REG1 will become a dead store and won't cloud the situation
7486 for later optimizations.
7487
7488 Do not make this change if REG1 is a hard register, because it will
7489 then be used in the sequel and we may be changing a two-operand insn
7490 into a three-operand insn.
7491
7492 Also do not do this if we are operating on a copy of INSN. */
7493
7494 if (n_sets == 1 && sets[0].rtl && GET_CODE (SET_DEST (sets[0].rtl)) == REG
7495 && NEXT_INSN (PREV_INSN (insn)) == insn
7496 && GET_CODE (SET_SRC (sets[0].rtl)) == REG
7497 && REGNO (SET_SRC (sets[0].rtl)) >= FIRST_PSEUDO_REGISTER
7498 && REGNO_QTY_VALID_P (REGNO (SET_SRC (sets[0].rtl)))
7499 && (qty_first_reg[reg_qty[REGNO (SET_SRC (sets[0].rtl))]]
7500 == REGNO (SET_DEST (sets[0].rtl))))
7501 {
7502 rtx prev = PREV_INSN (insn);
7503 while (prev && GET_CODE (prev) == NOTE)
7504 prev = PREV_INSN (prev);
7505
7506 if (prev && GET_CODE (prev) == INSN && GET_CODE (PATTERN (prev)) == SET
7507 && SET_DEST (PATTERN (prev)) == SET_SRC (sets[0].rtl))
7508 {
7509 rtx dest = SET_DEST (sets[0].rtl);
7510 rtx note = find_reg_note (prev, REG_EQUIV, NULL_RTX);
7511
7512 validate_change (prev, & SET_DEST (PATTERN (prev)), dest, 1);
7513 validate_change (insn, & SET_DEST (sets[0].rtl),
7514 SET_SRC (sets[0].rtl), 1);
7515 validate_change (insn, & SET_SRC (sets[0].rtl), dest, 1);
7516 apply_change_group ();
7517
7518 /* If REG1 was equivalent to a constant, REG0 is not. */
7519 if (note)
7520 PUT_REG_NOTE_KIND (note, REG_EQUAL);
7521
7522 /* If there was a REG_WAS_0 note on PREV, remove it. Move
7523 any REG_WAS_0 note on INSN to PREV. */
7524 note = find_reg_note (prev, REG_WAS_0, NULL_RTX);
7525 if (note)
7526 remove_note (prev, note);
7527
7528 note = find_reg_note (insn, REG_WAS_0, NULL_RTX);
7529 if (note)
7530 {
7531 remove_note (insn, note);
7532 XEXP (note, 1) = REG_NOTES (prev);
7533 REG_NOTES (prev) = note;
7534 }
7535
7536 /* If INSN has a REG_EQUAL note, and this note mentions REG0,
7537 then we must delete it, because the value in REG0 has changed. */
7538 note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
7539 if (note && reg_mentioned_p (dest, XEXP (note, 0)))
7540 remove_note (insn, note);
7541 }
7542 }
7543
7544 /* If this is a conditional jump insn, record any known equivalences due to
7545 the condition being tested. */
7546
7547 last_jump_equiv_class = 0;
7548 if (GET_CODE (insn) == JUMP_INSN
7549 && n_sets == 1 && GET_CODE (x) == SET
7550 && GET_CODE (SET_SRC (x)) == IF_THEN_ELSE)
7551 record_jump_equiv (insn, 0);
7552
7553 #ifdef HAVE_cc0
7554 /* If the previous insn set CC0 and this insn no longer references CC0,
7555 delete the previous insn. Here we use the fact that nothing expects CC0
7556 to be valid over an insn, which is true until the final pass. */
7557 if (prev_insn && GET_CODE (prev_insn) == INSN
7558 && (tem = single_set (prev_insn)) != 0
7559 && SET_DEST (tem) == cc0_rtx
7560 && ! reg_mentioned_p (cc0_rtx, x))
7561 {
7562 PUT_CODE (prev_insn, NOTE);
7563 NOTE_LINE_NUMBER (prev_insn) = NOTE_INSN_DELETED;
7564 NOTE_SOURCE_FILE (prev_insn) = 0;
7565 }
7566
7567 prev_insn_cc0 = this_insn_cc0;
7568 prev_insn_cc0_mode = this_insn_cc0_mode;
7569 #endif
7570
7571 prev_insn = insn;
7572 }
7573 \f
7574 /* Store 1 in *WRITES_PTR for those categories of memory ref
7575 that must be invalidated when the expression WRITTEN is stored in.
7576 If WRITTEN is null, say everything must be invalidated. */
7577
7578 static void
7579 note_mem_written (written, writes_ptr)
7580 rtx written;
7581 struct write_data *writes_ptr;
7582 {
7583 static struct write_data everything = {0, 1, 1, 1};
7584
7585 if (written == 0)
7586 *writes_ptr = everything;
7587 else if (GET_CODE (written) == MEM)
7588 {
7589 /* Pushing or popping the stack invalidates just the stack pointer. */
7590 rtx addr = XEXP (written, 0);
7591 if ((GET_CODE (addr) == PRE_DEC || GET_CODE (addr) == PRE_INC
7592 || GET_CODE (addr) == POST_DEC || GET_CODE (addr) == POST_INC)
7593 && GET_CODE (XEXP (addr, 0)) == REG
7594 && REGNO (XEXP (addr, 0)) == STACK_POINTER_REGNUM)
7595 {
7596 writes_ptr->sp = 1;
7597 return;
7598 }
7599 else if (GET_MODE (written) == BLKmode)
7600 *writes_ptr = everything;
7601 else if (cse_rtx_addr_varies_p (written))
7602 {
7603 /* A varying address that is a sum indicates an array element,
7604 and that's just as good as a structure element
7605 in implying that we need not invalidate scalar variables.
7606 However, we must allow QImode aliasing of scalars, because the
7607 ANSI C standard allows character pointers to alias anything.
7608 We must also allow AND addresses, because they may generate
7609 accesses outside the object being referenced. This is used to
7610 generate aligned addresses from unaligned addresses, for instance,
7611 the alpha storeqi_unaligned pattern. */
7612 if (! ((MEM_IN_STRUCT_P (written)
7613 || GET_CODE (XEXP (written, 0)) == PLUS)
7614 && GET_MODE (written) != QImode
7615 && GET_CODE (XEXP (written, 0)) != AND))
7616 writes_ptr->all = 1;
7617 writes_ptr->nonscalar = 1;
7618 }
7619 writes_ptr->var = 1;
7620 }
7621 }
7622
7623 /* Perform invalidation on the basis of everything about an insn
7624 except for invalidating the actual places that are SET in it.
7625 This includes the places CLOBBERed, and anything that might
7626 alias with something that is SET or CLOBBERed.
7627
7628 W points to the writes_memory for this insn, a struct write_data
7629 saying which kinds of memory references must be invalidated.
7630 X is the pattern of the insn. */
7631
7632 static void
7633 invalidate_from_clobbers (w, x)
7634 struct write_data *w;
7635 rtx x;
7636 {
7637 /* If W->var is not set, W specifies no action.
7638 If W->all is set, this step gets all memory refs
7639 so they can be ignored in the rest of this function. */
7640 if (w->var)
7641 invalidate_memory (w);
7642
7643 if (w->sp)
7644 {
7645 if (reg_tick[STACK_POINTER_REGNUM] >= 0)
7646 reg_tick[STACK_POINTER_REGNUM]++;
7647
7648 /* This should be *very* rare. */
7649 if (TEST_HARD_REG_BIT (hard_regs_in_table, STACK_POINTER_REGNUM))
7650 invalidate (stack_pointer_rtx, VOIDmode);
7651 }
7652
7653 if (GET_CODE (x) == CLOBBER)
7654 {
7655 rtx ref = XEXP (x, 0);
7656 if (ref)
7657 {
7658 if (GET_CODE (ref) == REG || GET_CODE (ref) == SUBREG
7659 || (GET_CODE (ref) == MEM && ! w->all))
7660 invalidate (ref, VOIDmode);
7661 else if (GET_CODE (ref) == STRICT_LOW_PART
7662 || GET_CODE (ref) == ZERO_EXTRACT)
7663 invalidate (XEXP (ref, 0), GET_MODE (ref));
7664 }
7665 }
7666 else if (GET_CODE (x) == PARALLEL)
7667 {
7668 register int i;
7669 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
7670 {
7671 register rtx y = XVECEXP (x, 0, i);
7672 if (GET_CODE (y) == CLOBBER)
7673 {
7674 rtx ref = XEXP (y, 0);
7675 if (ref)
7676 {
7677 if (GET_CODE (ref) == REG || GET_CODE (ref) == SUBREG
7678 || (GET_CODE (ref) == MEM && !w->all))
7679 invalidate (ref, VOIDmode);
7680 else if (GET_CODE (ref) == STRICT_LOW_PART
7681 || GET_CODE (ref) == ZERO_EXTRACT)
7682 invalidate (XEXP (ref, 0), GET_MODE (ref));
7683 }
7684 }
7685 }
7686 }
7687 }
7688 \f
7689 /* Process X, part of the REG_NOTES of an insn. Look at any REG_EQUAL notes
7690 and replace any registers in them with either an equivalent constant
7691 or the canonical form of the register. If we are inside an address,
7692 only do this if the address remains valid.
7693
7694 OBJECT is 0 except when within a MEM in which case it is the MEM.
7695
7696 Return the replacement for X. */
7697
7698 static rtx
7699 cse_process_notes (x, object)
7700 rtx x;
7701 rtx object;
7702 {
7703 enum rtx_code code = GET_CODE (x);
7704 char *fmt = GET_RTX_FORMAT (code);
7705 int i;
7706
7707 switch (code)
7708 {
7709 case CONST_INT:
7710 case CONST:
7711 case SYMBOL_REF:
7712 case LABEL_REF:
7713 case CONST_DOUBLE:
7714 case PC:
7715 case CC0:
7716 case LO_SUM:
7717 return x;
7718
7719 case MEM:
7720 XEXP (x, 0) = cse_process_notes (XEXP (x, 0), x);
7721 return x;
7722
7723 case EXPR_LIST:
7724 case INSN_LIST:
7725 if (REG_NOTE_KIND (x) == REG_EQUAL)
7726 XEXP (x, 0) = cse_process_notes (XEXP (x, 0), NULL_RTX);
7727 if (XEXP (x, 1))
7728 XEXP (x, 1) = cse_process_notes (XEXP (x, 1), NULL_RTX);
7729 return x;
7730
7731 case SIGN_EXTEND:
7732 case ZERO_EXTEND:
7733 case SUBREG:
7734 {
7735 rtx new = cse_process_notes (XEXP (x, 0), object);
7736 /* We don't substitute VOIDmode constants into these rtx,
7737 since they would impede folding. */
7738 if (GET_MODE (new) != VOIDmode)
7739 validate_change (object, &XEXP (x, 0), new, 0);
7740 return x;
7741 }
7742
7743 case REG:
7744 i = reg_qty[REGNO (x)];
7745
7746 /* Return a constant or a constant register. */
7747 if (REGNO_QTY_VALID_P (REGNO (x))
7748 && qty_const[i] != 0
7749 && (CONSTANT_P (qty_const[i])
7750 || GET_CODE (qty_const[i]) == REG))
7751 {
7752 rtx new = gen_lowpart_if_possible (GET_MODE (x), qty_const[i]);
7753 if (new)
7754 return new;
7755 }
7756
7757 /* Otherwise, canonicalize this register. */
7758 return canon_reg (x, NULL_RTX);
7759 }
7760
7761 for (i = 0; i < GET_RTX_LENGTH (code); i++)
7762 if (fmt[i] == 'e')
7763 validate_change (object, &XEXP (x, i),
7764 cse_process_notes (XEXP (x, i), object), 0);
7765
7766 return x;
7767 }
7768 \f
7769 /* Find common subexpressions between the end test of a loop and the beginning
7770 of the loop. LOOP_START is the CODE_LABEL at the start of a loop.
7771
7772 Often we have a loop where an expression in the exit test is used
7773 in the body of the loop. For example "while (*p) *q++ = *p++;".
7774 Because of the way we duplicate the loop exit test in front of the loop,
7775 however, we don't detect that common subexpression. This will be caught
7776 when global cse is implemented, but this is a quite common case.
7777
7778 This function handles the most common cases of these common expressions.
7779 It is called after we have processed the basic block ending with the
7780 NOTE_INSN_LOOP_END note that ends a loop and the previous JUMP_INSN
7781 jumps to a label used only once. */
7782
7783 static void
7784 cse_around_loop (loop_start)
7785 rtx loop_start;
7786 {
7787 rtx insn;
7788 int i;
7789 struct table_elt *p;
7790
7791 /* If the jump at the end of the loop doesn't go to the start, we don't
7792 do anything. */
7793 for (insn = PREV_INSN (loop_start);
7794 insn && (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) >= 0);
7795 insn = PREV_INSN (insn))
7796 ;
7797
7798 if (insn == 0
7799 || GET_CODE (insn) != NOTE
7800 || NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG)
7801 return;
7802
7803 /* If the last insn of the loop (the end test) was an NE comparison,
7804 we will interpret it as an EQ comparison, since we fell through
7805 the loop. Any equivalences resulting from that comparison are
7806 therefore not valid and must be invalidated. */
7807 if (last_jump_equiv_class)
7808 for (p = last_jump_equiv_class->first_same_value; p;
7809 p = p->next_same_value)
7810 if (GET_CODE (p->exp) == MEM || GET_CODE (p->exp) == REG
7811 || (GET_CODE (p->exp) == SUBREG
7812 && GET_CODE (SUBREG_REG (p->exp)) == REG))
7813 invalidate (p->exp, VOIDmode);
7814 else if (GET_CODE (p->exp) == STRICT_LOW_PART
7815 || GET_CODE (p->exp) == ZERO_EXTRACT)
7816 invalidate (XEXP (p->exp, 0), GET_MODE (p->exp));
7817
7818 /* Process insns starting after LOOP_START until we hit a CALL_INSN or
7819 a CODE_LABEL (we could handle a CALL_INSN, but it isn't worth it).
7820
7821 The only thing we do with SET_DEST is invalidate entries, so we
7822 can safely process each SET in order. It is slightly less efficient
7823 to do so, but we only want to handle the most common cases. */
7824
7825 for (insn = NEXT_INSN (loop_start);
7826 GET_CODE (insn) != CALL_INSN && GET_CODE (insn) != CODE_LABEL
7827 && ! (GET_CODE (insn) == NOTE
7828 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END);
7829 insn = NEXT_INSN (insn))
7830 {
7831 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
7832 && (GET_CODE (PATTERN (insn)) == SET
7833 || GET_CODE (PATTERN (insn)) == CLOBBER))
7834 cse_set_around_loop (PATTERN (insn), insn, loop_start);
7835 else if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
7836 && GET_CODE (PATTERN (insn)) == PARALLEL)
7837 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
7838 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET
7839 || GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == CLOBBER)
7840 cse_set_around_loop (XVECEXP (PATTERN (insn), 0, i), insn,
7841 loop_start);
7842 }
7843 }
7844 \f
7845 /* Variable used for communications between the next two routines. */
7846
7847 static struct write_data skipped_writes_memory;
7848
7849 /* Process one SET of an insn that was skipped. We ignore CLOBBERs
7850 since they are done elsewhere. This function is called via note_stores. */
7851
7852 static void
7853 invalidate_skipped_set (dest, set)
7854 rtx set;
7855 rtx dest;
7856 {
7857 if (GET_CODE (dest) == MEM)
7858 note_mem_written (dest, &skipped_writes_memory);
7859
7860 /* There are times when an address can appear varying and be a PLUS
7861 during this scan when it would be a fixed address were we to know
7862 the proper equivalences. So promote "nonscalar" to be "all". */
7863 if (skipped_writes_memory.nonscalar)
7864 skipped_writes_memory.all = 1;
7865
7866 if (GET_CODE (set) == CLOBBER
7867 #ifdef HAVE_cc0
7868 || dest == cc0_rtx
7869 #endif
7870 || dest == pc_rtx)
7871 return;
7872
7873 if (GET_CODE (dest) == REG || GET_CODE (dest) == SUBREG
7874 || (! skipped_writes_memory.all && ! cse_rtx_addr_varies_p (dest)))
7875 invalidate (dest, VOIDmode);
7876 else if (GET_CODE (dest) == STRICT_LOW_PART
7877 || GET_CODE (dest) == ZERO_EXTRACT)
7878 invalidate (XEXP (dest, 0), GET_MODE (dest));
7879 }
7880
7881 /* Invalidate all insns from START up to the end of the function or the
7882 next label. This called when we wish to CSE around a block that is
7883 conditionally executed. */
7884
7885 static void
7886 invalidate_skipped_block (start)
7887 rtx start;
7888 {
7889 rtx insn;
7890 static struct write_data init = {0, 0, 0, 0};
7891 static struct write_data everything = {0, 1, 1, 1};
7892
7893 for (insn = start; insn && GET_CODE (insn) != CODE_LABEL;
7894 insn = NEXT_INSN (insn))
7895 {
7896 if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
7897 continue;
7898
7899 skipped_writes_memory = init;
7900
7901 if (GET_CODE (insn) == CALL_INSN)
7902 {
7903 invalidate_for_call ();
7904 skipped_writes_memory = everything;
7905 }
7906
7907 note_stores (PATTERN (insn), invalidate_skipped_set);
7908 invalidate_from_clobbers (&skipped_writes_memory, PATTERN (insn));
7909 }
7910 }
7911 \f
7912 /* Used for communication between the following two routines; contains a
7913 value to be checked for modification. */
7914
7915 static rtx cse_check_loop_start_value;
7916
7917 /* If modifying X will modify the value in CSE_CHECK_LOOP_START_VALUE,
7918 indicate that fact by setting CSE_CHECK_LOOP_START_VALUE to 0. */
7919
7920 static void
7921 cse_check_loop_start (x, set)
7922 rtx x;
7923 rtx set;
7924 {
7925 if (cse_check_loop_start_value == 0
7926 || GET_CODE (x) == CC0 || GET_CODE (x) == PC)
7927 return;
7928
7929 if ((GET_CODE (x) == MEM && GET_CODE (cse_check_loop_start_value) == MEM)
7930 || reg_overlap_mentioned_p (x, cse_check_loop_start_value))
7931 cse_check_loop_start_value = 0;
7932 }
7933
7934 /* X is a SET or CLOBBER contained in INSN that was found near the start of
7935 a loop that starts with the label at LOOP_START.
7936
7937 If X is a SET, we see if its SET_SRC is currently in our hash table.
7938 If so, we see if it has a value equal to some register used only in the
7939 loop exit code (as marked by jump.c).
7940
7941 If those two conditions are true, we search backwards from the start of
7942 the loop to see if that same value was loaded into a register that still
7943 retains its value at the start of the loop.
7944
7945 If so, we insert an insn after the load to copy the destination of that
7946 load into the equivalent register and (try to) replace our SET_SRC with that
7947 register.
7948
7949 In any event, we invalidate whatever this SET or CLOBBER modifies. */
7950
7951 static void
7952 cse_set_around_loop (x, insn, loop_start)
7953 rtx x;
7954 rtx insn;
7955 rtx loop_start;
7956 {
7957 struct table_elt *src_elt;
7958 static struct write_data init = {0, 0, 0, 0};
7959 struct write_data writes_memory;
7960
7961 writes_memory = init;
7962
7963 /* If this is a SET, see if we can replace SET_SRC, but ignore SETs that
7964 are setting PC or CC0 or whose SET_SRC is already a register. */
7965 if (GET_CODE (x) == SET
7966 && GET_CODE (SET_DEST (x)) != PC && GET_CODE (SET_DEST (x)) != CC0
7967 && GET_CODE (SET_SRC (x)) != REG)
7968 {
7969 src_elt = lookup (SET_SRC (x),
7970 HASH (SET_SRC (x), GET_MODE (SET_DEST (x))),
7971 GET_MODE (SET_DEST (x)));
7972
7973 if (src_elt)
7974 for (src_elt = src_elt->first_same_value; src_elt;
7975 src_elt = src_elt->next_same_value)
7976 if (GET_CODE (src_elt->exp) == REG && REG_LOOP_TEST_P (src_elt->exp)
7977 && COST (src_elt->exp) < COST (SET_SRC (x)))
7978 {
7979 rtx p, set;
7980
7981 /* Look for an insn in front of LOOP_START that sets
7982 something in the desired mode to SET_SRC (x) before we hit
7983 a label or CALL_INSN. */
7984
7985 for (p = prev_nonnote_insn (loop_start);
7986 p && GET_CODE (p) != CALL_INSN
7987 && GET_CODE (p) != CODE_LABEL;
7988 p = prev_nonnote_insn (p))
7989 if ((set = single_set (p)) != 0
7990 && GET_CODE (SET_DEST (set)) == REG
7991 && GET_MODE (SET_DEST (set)) == src_elt->mode
7992 && rtx_equal_p (SET_SRC (set), SET_SRC (x)))
7993 {
7994 /* We now have to ensure that nothing between P
7995 and LOOP_START modified anything referenced in
7996 SET_SRC (x). We know that nothing within the loop
7997 can modify it, or we would have invalidated it in
7998 the hash table. */
7999 rtx q;
8000
8001 cse_check_loop_start_value = SET_SRC (x);
8002 for (q = p; q != loop_start; q = NEXT_INSN (q))
8003 if (GET_RTX_CLASS (GET_CODE (q)) == 'i')
8004 note_stores (PATTERN (q), cse_check_loop_start);
8005
8006 /* If nothing was changed and we can replace our
8007 SET_SRC, add an insn after P to copy its destination
8008 to what we will be replacing SET_SRC with. */
8009 if (cse_check_loop_start_value
8010 && validate_change (insn, &SET_SRC (x),
8011 src_elt->exp, 0))
8012 emit_insn_after (gen_move_insn (src_elt->exp,
8013 SET_DEST (set)),
8014 p);
8015 break;
8016 }
8017 }
8018 }
8019
8020 /* Now invalidate anything modified by X. */
8021 note_mem_written (SET_DEST (x), &writes_memory);
8022
8023 if (writes_memory.var)
8024 invalidate_memory (&writes_memory);
8025
8026 /* See comment on similar code in cse_insn for explanation of these
8027 tests. */
8028 if (GET_CODE (SET_DEST (x)) == REG || GET_CODE (SET_DEST (x)) == SUBREG
8029 || (GET_CODE (SET_DEST (x)) == MEM && ! writes_memory.all
8030 && ! cse_rtx_addr_varies_p (SET_DEST (x))))
8031 invalidate (SET_DEST (x), VOIDmode);
8032 else if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART
8033 || GET_CODE (SET_DEST (x)) == ZERO_EXTRACT)
8034 invalidate (XEXP (SET_DEST (x), 0), GET_MODE (SET_DEST (x)));
8035 }
8036 \f
8037 /* Find the end of INSN's basic block and return its range,
8038 the total number of SETs in all the insns of the block, the last insn of the
8039 block, and the branch path.
8040
8041 The branch path indicates which branches should be followed. If a non-zero
8042 path size is specified, the block should be rescanned and a different set
8043 of branches will be taken. The branch path is only used if
8044 FLAG_CSE_FOLLOW_JUMPS or FLAG_CSE_SKIP_BLOCKS is non-zero.
8045
8046 DATA is a pointer to a struct cse_basic_block_data, defined below, that is
8047 used to describe the block. It is filled in with the information about
8048 the current block. The incoming structure's branch path, if any, is used
8049 to construct the output branch path. */
8050
8051 void
8052 cse_end_of_basic_block (insn, data, follow_jumps, after_loop, skip_blocks)
8053 rtx insn;
8054 struct cse_basic_block_data *data;
8055 int follow_jumps;
8056 int after_loop;
8057 int skip_blocks;
8058 {
8059 rtx p = insn, q;
8060 int nsets = 0;
8061 int low_cuid = INSN_CUID (insn), high_cuid = INSN_CUID (insn);
8062 rtx next = GET_RTX_CLASS (GET_CODE (insn)) == 'i' ? insn : next_real_insn (insn);
8063 int path_size = data->path_size;
8064 int path_entry = 0;
8065 int i;
8066
8067 /* Update the previous branch path, if any. If the last branch was
8068 previously TAKEN, mark it NOT_TAKEN. If it was previously NOT_TAKEN,
8069 shorten the path by one and look at the previous branch. We know that
8070 at least one branch must have been taken if PATH_SIZE is non-zero. */
8071 while (path_size > 0)
8072 {
8073 if (data->path[path_size - 1].status != NOT_TAKEN)
8074 {
8075 data->path[path_size - 1].status = NOT_TAKEN;
8076 break;
8077 }
8078 else
8079 path_size--;
8080 }
8081
8082 /* Scan to end of this basic block. */
8083 while (p && GET_CODE (p) != CODE_LABEL)
8084 {
8085 /* Don't cse out the end of a loop. This makes a difference
8086 only for the unusual loops that always execute at least once;
8087 all other loops have labels there so we will stop in any case.
8088 Cse'ing out the end of the loop is dangerous because it
8089 might cause an invariant expression inside the loop
8090 to be reused after the end of the loop. This would make it
8091 hard to move the expression out of the loop in loop.c,
8092 especially if it is one of several equivalent expressions
8093 and loop.c would like to eliminate it.
8094
8095 If we are running after loop.c has finished, we can ignore
8096 the NOTE_INSN_LOOP_END. */
8097
8098 if (! after_loop && GET_CODE (p) == NOTE
8099 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
8100 break;
8101
8102 /* Don't cse over a call to setjmp; on some machines (eg vax)
8103 the regs restored by the longjmp come from
8104 a later time than the setjmp. */
8105 if (GET_CODE (p) == NOTE
8106 && NOTE_LINE_NUMBER (p) == NOTE_INSN_SETJMP)
8107 break;
8108
8109 /* A PARALLEL can have lots of SETs in it,
8110 especially if it is really an ASM_OPERANDS. */
8111 if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
8112 && GET_CODE (PATTERN (p)) == PARALLEL)
8113 nsets += XVECLEN (PATTERN (p), 0);
8114 else if (GET_CODE (p) != NOTE)
8115 nsets += 1;
8116
8117 /* Ignore insns made by CSE; they cannot affect the boundaries of
8118 the basic block. */
8119
8120 if (INSN_UID (p) <= max_uid && INSN_CUID (p) > high_cuid)
8121 high_cuid = INSN_CUID (p);
8122 if (INSN_UID (p) <= max_uid && INSN_CUID (p) < low_cuid)
8123 low_cuid = INSN_CUID (p);
8124
8125 /* See if this insn is in our branch path. If it is and we are to
8126 take it, do so. */
8127 if (path_entry < path_size && data->path[path_entry].branch == p)
8128 {
8129 if (data->path[path_entry].status != NOT_TAKEN)
8130 p = JUMP_LABEL (p);
8131
8132 /* Point to next entry in path, if any. */
8133 path_entry++;
8134 }
8135
8136 /* If this is a conditional jump, we can follow it if -fcse-follow-jumps
8137 was specified, we haven't reached our maximum path length, there are
8138 insns following the target of the jump, this is the only use of the
8139 jump label, and the target label is preceded by a BARRIER.
8140
8141 Alternatively, we can follow the jump if it branches around a
8142 block of code and there are no other branches into the block.
8143 In this case invalidate_skipped_block will be called to invalidate any
8144 registers set in the block when following the jump. */
8145
8146 else if ((follow_jumps || skip_blocks) && path_size < PATHLENGTH - 1
8147 && GET_CODE (p) == JUMP_INSN
8148 && GET_CODE (PATTERN (p)) == SET
8149 && GET_CODE (SET_SRC (PATTERN (p))) == IF_THEN_ELSE
8150 && LABEL_NUSES (JUMP_LABEL (p)) == 1
8151 && NEXT_INSN (JUMP_LABEL (p)) != 0)
8152 {
8153 for (q = PREV_INSN (JUMP_LABEL (p)); q; q = PREV_INSN (q))
8154 if ((GET_CODE (q) != NOTE
8155 || NOTE_LINE_NUMBER (q) == NOTE_INSN_LOOP_END
8156 || NOTE_LINE_NUMBER (q) == NOTE_INSN_SETJMP)
8157 && (GET_CODE (q) != CODE_LABEL || LABEL_NUSES (q) != 0))
8158 break;
8159
8160 /* If we ran into a BARRIER, this code is an extension of the
8161 basic block when the branch is taken. */
8162 if (follow_jumps && q != 0 && GET_CODE (q) == BARRIER)
8163 {
8164 /* Don't allow ourself to keep walking around an
8165 always-executed loop. */
8166 if (next_real_insn (q) == next)
8167 {
8168 p = NEXT_INSN (p);
8169 continue;
8170 }
8171
8172 /* Similarly, don't put a branch in our path more than once. */
8173 for (i = 0; i < path_entry; i++)
8174 if (data->path[i].branch == p)
8175 break;
8176
8177 if (i != path_entry)
8178 break;
8179
8180 data->path[path_entry].branch = p;
8181 data->path[path_entry++].status = TAKEN;
8182
8183 /* This branch now ends our path. It was possible that we
8184 didn't see this branch the last time around (when the
8185 insn in front of the target was a JUMP_INSN that was
8186 turned into a no-op). */
8187 path_size = path_entry;
8188
8189 p = JUMP_LABEL (p);
8190 /* Mark block so we won't scan it again later. */
8191 PUT_MODE (NEXT_INSN (p), QImode);
8192 }
8193 /* Detect a branch around a block of code. */
8194 else if (skip_blocks && q != 0 && GET_CODE (q) != CODE_LABEL)
8195 {
8196 register rtx tmp;
8197
8198 if (next_real_insn (q) == next)
8199 {
8200 p = NEXT_INSN (p);
8201 continue;
8202 }
8203
8204 for (i = 0; i < path_entry; i++)
8205 if (data->path[i].branch == p)
8206 break;
8207
8208 if (i != path_entry)
8209 break;
8210
8211 /* This is no_labels_between_p (p, q) with an added check for
8212 reaching the end of a function (in case Q precedes P). */
8213 for (tmp = NEXT_INSN (p); tmp && tmp != q; tmp = NEXT_INSN (tmp))
8214 if (GET_CODE (tmp) == CODE_LABEL)
8215 break;
8216
8217 if (tmp == q)
8218 {
8219 data->path[path_entry].branch = p;
8220 data->path[path_entry++].status = AROUND;
8221
8222 path_size = path_entry;
8223
8224 p = JUMP_LABEL (p);
8225 /* Mark block so we won't scan it again later. */
8226 PUT_MODE (NEXT_INSN (p), QImode);
8227 }
8228 }
8229 }
8230 p = NEXT_INSN (p);
8231 }
8232
8233 data->low_cuid = low_cuid;
8234 data->high_cuid = high_cuid;
8235 data->nsets = nsets;
8236 data->last = p;
8237
8238 /* If all jumps in the path are not taken, set our path length to zero
8239 so a rescan won't be done. */
8240 for (i = path_size - 1; i >= 0; i--)
8241 if (data->path[i].status != NOT_TAKEN)
8242 break;
8243
8244 if (i == -1)
8245 data->path_size = 0;
8246 else
8247 data->path_size = path_size;
8248
8249 /* End the current branch path. */
8250 data->path[path_size].branch = 0;
8251 }
8252 \f
8253 /* Perform cse on the instructions of a function.
8254 F is the first instruction.
8255 NREGS is one plus the highest pseudo-reg number used in the instruction.
8256
8257 AFTER_LOOP is 1 if this is the cse call done after loop optimization
8258 (only if -frerun-cse-after-loop).
8259
8260 Returns 1 if jump_optimize should be redone due to simplifications
8261 in conditional jump instructions. */
8262
8263 int
8264 cse_main (f, nregs, after_loop, file)
8265 rtx f;
8266 int nregs;
8267 int after_loop;
8268 FILE *file;
8269 {
8270 struct cse_basic_block_data val;
8271 register rtx insn = f;
8272 register int i;
8273
8274 cse_jumps_altered = 0;
8275 recorded_label_ref = 0;
8276 constant_pool_entries_cost = 0;
8277 val.path_size = 0;
8278
8279 init_recog ();
8280
8281 max_reg = nregs;
8282
8283 all_minus_one = (int *) alloca (nregs * sizeof (int));
8284 consec_ints = (int *) alloca (nregs * sizeof (int));
8285
8286 for (i = 0; i < nregs; i++)
8287 {
8288 all_minus_one[i] = -1;
8289 consec_ints[i] = i;
8290 }
8291
8292 reg_next_eqv = (int *) alloca (nregs * sizeof (int));
8293 reg_prev_eqv = (int *) alloca (nregs * sizeof (int));
8294 reg_qty = (int *) alloca (nregs * sizeof (int));
8295 reg_in_table = (int *) alloca (nregs * sizeof (int));
8296 reg_tick = (int *) alloca (nregs * sizeof (int));
8297
8298 #ifdef LOAD_EXTEND_OP
8299
8300 /* Allocate scratch rtl here. cse_insn will fill in the memory reference
8301 and change the code and mode as appropriate. */
8302 memory_extend_rtx = gen_rtx (ZERO_EXTEND, VOIDmode, 0);
8303 #endif
8304
8305 /* Discard all the free elements of the previous function
8306 since they are allocated in the temporarily obstack. */
8307 bzero ((char *) table, sizeof table);
8308 free_element_chain = 0;
8309 n_elements_made = 0;
8310
8311 /* Find the largest uid. */
8312
8313 max_uid = get_max_uid ();
8314 uid_cuid = (int *) alloca ((max_uid + 1) * sizeof (int));
8315 bzero ((char *) uid_cuid, (max_uid + 1) * sizeof (int));
8316
8317 /* Compute the mapping from uids to cuids.
8318 CUIDs are numbers assigned to insns, like uids,
8319 except that cuids increase monotonically through the code.
8320 Don't assign cuids to line-number NOTEs, so that the distance in cuids
8321 between two insns is not affected by -g. */
8322
8323 for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
8324 {
8325 if (GET_CODE (insn) != NOTE
8326 || NOTE_LINE_NUMBER (insn) < 0)
8327 INSN_CUID (insn) = ++i;
8328 else
8329 /* Give a line number note the same cuid as preceding insn. */
8330 INSN_CUID (insn) = i;
8331 }
8332
8333 /* Initialize which registers are clobbered by calls. */
8334
8335 CLEAR_HARD_REG_SET (regs_invalidated_by_call);
8336
8337 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
8338 if ((call_used_regs[i]
8339 /* Used to check !fixed_regs[i] here, but that isn't safe;
8340 fixed regs are still call-clobbered, and sched can get
8341 confused if they can "live across calls".
8342
8343 The frame pointer is always preserved across calls. The arg
8344 pointer is if it is fixed. The stack pointer usually is, unless
8345 RETURN_POPS_ARGS, in which case an explicit CLOBBER
8346 will be present. If we are generating PIC code, the PIC offset
8347 table register is preserved across calls. */
8348
8349 && i != STACK_POINTER_REGNUM
8350 && i != FRAME_POINTER_REGNUM
8351 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
8352 && i != HARD_FRAME_POINTER_REGNUM
8353 #endif
8354 #if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
8355 && ! (i == ARG_POINTER_REGNUM && fixed_regs[i])
8356 #endif
8357 #if defined (PIC_OFFSET_TABLE_REGNUM) && !defined (PIC_OFFSET_TABLE_REG_CALL_CLOBBERED)
8358 && ! (i == PIC_OFFSET_TABLE_REGNUM && flag_pic)
8359 #endif
8360 )
8361 || global_regs[i])
8362 SET_HARD_REG_BIT (regs_invalidated_by_call, i);
8363
8364 /* Loop over basic blocks.
8365 Compute the maximum number of qty's needed for each basic block
8366 (which is 2 for each SET). */
8367 insn = f;
8368 while (insn)
8369 {
8370 cse_end_of_basic_block (insn, &val, flag_cse_follow_jumps, after_loop,
8371 flag_cse_skip_blocks);
8372
8373 /* If this basic block was already processed or has no sets, skip it. */
8374 if (val.nsets == 0 || GET_MODE (insn) == QImode)
8375 {
8376 PUT_MODE (insn, VOIDmode);
8377 insn = (val.last ? NEXT_INSN (val.last) : 0);
8378 val.path_size = 0;
8379 continue;
8380 }
8381
8382 cse_basic_block_start = val.low_cuid;
8383 cse_basic_block_end = val.high_cuid;
8384 max_qty = val.nsets * 2;
8385
8386 if (file)
8387 fprintf (file, ";; Processing block from %d to %d, %d sets.\n",
8388 INSN_UID (insn), val.last ? INSN_UID (val.last) : 0,
8389 val.nsets);
8390
8391 /* Make MAX_QTY bigger to give us room to optimize
8392 past the end of this basic block, if that should prove useful. */
8393 if (max_qty < 500)
8394 max_qty = 500;
8395
8396 max_qty += max_reg;
8397
8398 /* If this basic block is being extended by following certain jumps,
8399 (see `cse_end_of_basic_block'), we reprocess the code from the start.
8400 Otherwise, we start after this basic block. */
8401 if (val.path_size > 0)
8402 cse_basic_block (insn, val.last, val.path, 0);
8403 else
8404 {
8405 int old_cse_jumps_altered = cse_jumps_altered;
8406 rtx temp;
8407
8408 /* When cse changes a conditional jump to an unconditional
8409 jump, we want to reprocess the block, since it will give
8410 us a new branch path to investigate. */
8411 cse_jumps_altered = 0;
8412 temp = cse_basic_block (insn, val.last, val.path, ! after_loop);
8413 if (cse_jumps_altered == 0
8414 || (flag_cse_follow_jumps == 0 && flag_cse_skip_blocks == 0))
8415 insn = temp;
8416
8417 cse_jumps_altered |= old_cse_jumps_altered;
8418 }
8419
8420 #ifdef USE_C_ALLOCA
8421 alloca (0);
8422 #endif
8423 }
8424
8425 /* Tell refers_to_mem_p that qty_const info is not available. */
8426 qty_const = 0;
8427
8428 if (max_elements_made < n_elements_made)
8429 max_elements_made = n_elements_made;
8430
8431 return cse_jumps_altered || recorded_label_ref;
8432 }
8433
8434 /* Process a single basic block. FROM and TO and the limits of the basic
8435 block. NEXT_BRANCH points to the branch path when following jumps or
8436 a null path when not following jumps.
8437
8438 AROUND_LOOP is non-zero if we are to try to cse around to the start of a
8439 loop. This is true when we are being called for the last time on a
8440 block and this CSE pass is before loop.c. */
8441
8442 static rtx
8443 cse_basic_block (from, to, next_branch, around_loop)
8444 register rtx from, to;
8445 struct branch_path *next_branch;
8446 int around_loop;
8447 {
8448 register rtx insn;
8449 int to_usage = 0;
8450 int in_libcall_block = 0;
8451
8452 /* Each of these arrays is undefined before max_reg, so only allocate
8453 the space actually needed and adjust the start below. */
8454
8455 qty_first_reg = (int *) alloca ((max_qty - max_reg) * sizeof (int));
8456 qty_last_reg = (int *) alloca ((max_qty - max_reg) * sizeof (int));
8457 qty_mode= (enum machine_mode *) alloca ((max_qty - max_reg) * sizeof (enum machine_mode));
8458 qty_const = (rtx *) alloca ((max_qty - max_reg) * sizeof (rtx));
8459 qty_const_insn = (rtx *) alloca ((max_qty - max_reg) * sizeof (rtx));
8460 qty_comparison_code
8461 = (enum rtx_code *) alloca ((max_qty - max_reg) * sizeof (enum rtx_code));
8462 qty_comparison_qty = (int *) alloca ((max_qty - max_reg) * sizeof (int));
8463 qty_comparison_const = (rtx *) alloca ((max_qty - max_reg) * sizeof (rtx));
8464
8465 qty_first_reg -= max_reg;
8466 qty_last_reg -= max_reg;
8467 qty_mode -= max_reg;
8468 qty_const -= max_reg;
8469 qty_const_insn -= max_reg;
8470 qty_comparison_code -= max_reg;
8471 qty_comparison_qty -= max_reg;
8472 qty_comparison_const -= max_reg;
8473
8474 new_basic_block ();
8475
8476 /* TO might be a label. If so, protect it from being deleted. */
8477 if (to != 0 && GET_CODE (to) == CODE_LABEL)
8478 ++LABEL_NUSES (to);
8479
8480 for (insn = from; insn != to; insn = NEXT_INSN (insn))
8481 {
8482 register enum rtx_code code;
8483
8484 /* See if this is a branch that is part of the path. If so, and it is
8485 to be taken, do so. */
8486 if (next_branch->branch == insn)
8487 {
8488 enum taken status = next_branch++->status;
8489 if (status != NOT_TAKEN)
8490 {
8491 if (status == TAKEN)
8492 record_jump_equiv (insn, 1);
8493 else
8494 invalidate_skipped_block (NEXT_INSN (insn));
8495
8496 /* Set the last insn as the jump insn; it doesn't affect cc0.
8497 Then follow this branch. */
8498 #ifdef HAVE_cc0
8499 prev_insn_cc0 = 0;
8500 #endif
8501 prev_insn = insn;
8502 insn = JUMP_LABEL (insn);
8503 continue;
8504 }
8505 }
8506
8507 code = GET_CODE (insn);
8508 if (GET_MODE (insn) == QImode)
8509 PUT_MODE (insn, VOIDmode);
8510
8511 if (GET_RTX_CLASS (code) == 'i')
8512 {
8513 /* Process notes first so we have all notes in canonical forms when
8514 looking for duplicate operations. */
8515
8516 if (REG_NOTES (insn))
8517 REG_NOTES (insn) = cse_process_notes (REG_NOTES (insn), NULL_RTX);
8518
8519 /* Track when we are inside in LIBCALL block. Inside such a block,
8520 we do not want to record destinations. The last insn of a
8521 LIBCALL block is not considered to be part of the block, since
8522 its destination is the result of the block and hence should be
8523 recorded. */
8524
8525 if (find_reg_note (insn, REG_LIBCALL, NULL_RTX))
8526 in_libcall_block = 1;
8527 else if (find_reg_note (insn, REG_RETVAL, NULL_RTX))
8528 in_libcall_block = 0;
8529
8530 cse_insn (insn, in_libcall_block);
8531 }
8532
8533 /* If INSN is now an unconditional jump, skip to the end of our
8534 basic block by pretending that we just did the last insn in the
8535 basic block. If we are jumping to the end of our block, show
8536 that we can have one usage of TO. */
8537
8538 if (simplejump_p (insn))
8539 {
8540 if (to == 0)
8541 return 0;
8542
8543 if (JUMP_LABEL (insn) == to)
8544 to_usage = 1;
8545
8546 /* Maybe TO was deleted because the jump is unconditional.
8547 If so, there is nothing left in this basic block. */
8548 /* ??? Perhaps it would be smarter to set TO
8549 to whatever follows this insn,
8550 and pretend the basic block had always ended here. */
8551 if (INSN_DELETED_P (to))
8552 break;
8553
8554 insn = PREV_INSN (to);
8555 }
8556
8557 /* See if it is ok to keep on going past the label
8558 which used to end our basic block. Remember that we incremented
8559 the count of that label, so we decrement it here. If we made
8560 a jump unconditional, TO_USAGE will be one; in that case, we don't
8561 want to count the use in that jump. */
8562
8563 if (to != 0 && NEXT_INSN (insn) == to
8564 && GET_CODE (to) == CODE_LABEL && --LABEL_NUSES (to) == to_usage)
8565 {
8566 struct cse_basic_block_data val;
8567 rtx prev;
8568
8569 insn = NEXT_INSN (to);
8570
8571 if (LABEL_NUSES (to) == 0)
8572 insn = delete_insn (to);
8573
8574 /* If TO was the last insn in the function, we are done. */
8575 if (insn == 0)
8576 return 0;
8577
8578 /* If TO was preceded by a BARRIER we are done with this block
8579 because it has no continuation. */
8580 prev = prev_nonnote_insn (to);
8581 if (prev && GET_CODE (prev) == BARRIER)
8582 return insn;
8583
8584 /* Find the end of the following block. Note that we won't be
8585 following branches in this case. */
8586 to_usage = 0;
8587 val.path_size = 0;
8588 cse_end_of_basic_block (insn, &val, 0, 0, 0);
8589
8590 /* If the tables we allocated have enough space left
8591 to handle all the SETs in the next basic block,
8592 continue through it. Otherwise, return,
8593 and that block will be scanned individually. */
8594 if (val.nsets * 2 + next_qty > max_qty)
8595 break;
8596
8597 cse_basic_block_start = val.low_cuid;
8598 cse_basic_block_end = val.high_cuid;
8599 to = val.last;
8600
8601 /* Prevent TO from being deleted if it is a label. */
8602 if (to != 0 && GET_CODE (to) == CODE_LABEL)
8603 ++LABEL_NUSES (to);
8604
8605 /* Back up so we process the first insn in the extension. */
8606 insn = PREV_INSN (insn);
8607 }
8608 }
8609
8610 if (next_qty > max_qty)
8611 abort ();
8612
8613 /* If we are running before loop.c, we stopped on a NOTE_INSN_LOOP_END, and
8614 the previous insn is the only insn that branches to the head of a loop,
8615 we can cse into the loop. Don't do this if we changed the jump
8616 structure of a loop unless we aren't going to be following jumps. */
8617
8618 if ((cse_jumps_altered == 0
8619 || (flag_cse_follow_jumps == 0 && flag_cse_skip_blocks == 0))
8620 && around_loop && to != 0
8621 && GET_CODE (to) == NOTE && NOTE_LINE_NUMBER (to) == NOTE_INSN_LOOP_END
8622 && GET_CODE (PREV_INSN (to)) == JUMP_INSN
8623 && JUMP_LABEL (PREV_INSN (to)) != 0
8624 && LABEL_NUSES (JUMP_LABEL (PREV_INSN (to))) == 1)
8625 cse_around_loop (JUMP_LABEL (PREV_INSN (to)));
8626
8627 return to ? NEXT_INSN (to) : 0;
8628 }
8629 \f
8630 /* Count the number of times registers are used (not set) in X.
8631 COUNTS is an array in which we accumulate the count, INCR is how much
8632 we count each register usage.
8633
8634 Don't count a usage of DEST, which is the SET_DEST of a SET which
8635 contains X in its SET_SRC. This is because such a SET does not
8636 modify the liveness of DEST. */
8637
8638 static void
8639 count_reg_usage (x, counts, dest, incr)
8640 rtx x;
8641 int *counts;
8642 rtx dest;
8643 int incr;
8644 {
8645 enum rtx_code code;
8646 char *fmt;
8647 int i, j;
8648
8649 if (x == 0)
8650 return;
8651
8652 switch (code = GET_CODE (x))
8653 {
8654 case REG:
8655 if (x != dest)
8656 counts[REGNO (x)] += incr;
8657 return;
8658
8659 case PC:
8660 case CC0:
8661 case CONST:
8662 case CONST_INT:
8663 case CONST_DOUBLE:
8664 case SYMBOL_REF:
8665 case LABEL_REF:
8666 case CLOBBER:
8667 return;
8668
8669 case SET:
8670 /* Unless we are setting a REG, count everything in SET_DEST. */
8671 if (GET_CODE (SET_DEST (x)) != REG)
8672 count_reg_usage (SET_DEST (x), counts, NULL_RTX, incr);
8673
8674 /* If SRC has side-effects, then we can't delete this insn, so the
8675 usage of SET_DEST inside SRC counts.
8676
8677 ??? Strictly-speaking, we might be preserving this insn
8678 because some other SET has side-effects, but that's hard
8679 to do and can't happen now. */
8680 count_reg_usage (SET_SRC (x), counts,
8681 side_effects_p (SET_SRC (x)) ? NULL_RTX : SET_DEST (x),
8682 incr);
8683 return;
8684
8685 case CALL_INSN:
8686 count_reg_usage (CALL_INSN_FUNCTION_USAGE (x), counts, NULL_RTX, incr);
8687
8688 /* ... falls through ... */
8689 case INSN:
8690 case JUMP_INSN:
8691 count_reg_usage (PATTERN (x), counts, NULL_RTX, incr);
8692
8693 /* Things used in a REG_EQUAL note aren't dead since loop may try to
8694 use them. */
8695
8696 count_reg_usage (REG_NOTES (x), counts, NULL_RTX, incr);
8697 return;
8698
8699 case EXPR_LIST:
8700 case INSN_LIST:
8701 if (REG_NOTE_KIND (x) == REG_EQUAL
8702 || GET_CODE (XEXP (x,0)) == USE)
8703 count_reg_usage (XEXP (x, 0), counts, NULL_RTX, incr);
8704 count_reg_usage (XEXP (x, 1), counts, NULL_RTX, incr);
8705 return;
8706 }
8707
8708 fmt = GET_RTX_FORMAT (code);
8709 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8710 {
8711 if (fmt[i] == 'e')
8712 count_reg_usage (XEXP (x, i), counts, dest, incr);
8713 else if (fmt[i] == 'E')
8714 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8715 count_reg_usage (XVECEXP (x, i, j), counts, dest, incr);
8716 }
8717 }
8718 \f
8719 /* Scan all the insns and delete any that are dead; i.e., they store a register
8720 that is never used or they copy a register to itself.
8721
8722 This is used to remove insns made obviously dead by cse. It improves the
8723 heuristics in loop since it won't try to move dead invariants out of loops
8724 or make givs for dead quantities. The remaining passes of the compilation
8725 are also sped up. */
8726
8727 void
8728 delete_dead_from_cse (insns, nreg)
8729 rtx insns;
8730 int nreg;
8731 {
8732 int *counts = (int *) alloca (nreg * sizeof (int));
8733 rtx insn, prev;
8734 rtx tem;
8735 int i;
8736 int in_libcall = 0;
8737
8738 /* First count the number of times each register is used. */
8739 bzero ((char *) counts, sizeof (int) * nreg);
8740 for (insn = next_real_insn (insns); insn; insn = next_real_insn (insn))
8741 count_reg_usage (insn, counts, NULL_RTX, 1);
8742
8743 /* Go from the last insn to the first and delete insns that only set unused
8744 registers or copy a register to itself. As we delete an insn, remove
8745 usage counts for registers it uses. */
8746 for (insn = prev_real_insn (get_last_insn ()); insn; insn = prev)
8747 {
8748 int live_insn = 0;
8749
8750 prev = prev_real_insn (insn);
8751
8752 /* Don't delete any insns that are part of a libcall block.
8753 Flow or loop might get confused if we did that. Remember
8754 that we are scanning backwards. */
8755 if (find_reg_note (insn, REG_RETVAL, NULL_RTX))
8756 in_libcall = 1;
8757
8758 if (in_libcall)
8759 live_insn = 1;
8760 else if (GET_CODE (PATTERN (insn)) == SET)
8761 {
8762 if (GET_CODE (SET_DEST (PATTERN (insn))) == REG
8763 && SET_DEST (PATTERN (insn)) == SET_SRC (PATTERN (insn)))
8764 ;
8765
8766 #ifdef HAVE_cc0
8767 else if (GET_CODE (SET_DEST (PATTERN (insn))) == CC0
8768 && ! side_effects_p (SET_SRC (PATTERN (insn)))
8769 && ((tem = next_nonnote_insn (insn)) == 0
8770 || GET_RTX_CLASS (GET_CODE (tem)) != 'i'
8771 || ! reg_referenced_p (cc0_rtx, PATTERN (tem))))
8772 ;
8773 #endif
8774 else if (GET_CODE (SET_DEST (PATTERN (insn))) != REG
8775 || REGNO (SET_DEST (PATTERN (insn))) < FIRST_PSEUDO_REGISTER
8776 || counts[REGNO (SET_DEST (PATTERN (insn)))] != 0
8777 || side_effects_p (SET_SRC (PATTERN (insn))))
8778 live_insn = 1;
8779 }
8780 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
8781 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
8782 {
8783 rtx elt = XVECEXP (PATTERN (insn), 0, i);
8784
8785 if (GET_CODE (elt) == SET)
8786 {
8787 if (GET_CODE (SET_DEST (elt)) == REG
8788 && SET_DEST (elt) == SET_SRC (elt))
8789 ;
8790
8791 #ifdef HAVE_cc0
8792 else if (GET_CODE (SET_DEST (elt)) == CC0
8793 && ! side_effects_p (SET_SRC (elt))
8794 && ((tem = next_nonnote_insn (insn)) == 0
8795 || GET_RTX_CLASS (GET_CODE (tem)) != 'i'
8796 || ! reg_referenced_p (cc0_rtx, PATTERN (tem))))
8797 ;
8798 #endif
8799 else if (GET_CODE (SET_DEST (elt)) != REG
8800 || REGNO (SET_DEST (elt)) < FIRST_PSEUDO_REGISTER
8801 || counts[REGNO (SET_DEST (elt))] != 0
8802 || side_effects_p (SET_SRC (elt)))
8803 live_insn = 1;
8804 }
8805 else if (GET_CODE (elt) != CLOBBER && GET_CODE (elt) != USE)
8806 live_insn = 1;
8807 }
8808 else
8809 live_insn = 1;
8810
8811 /* If this is a dead insn, delete it and show registers in it aren't
8812 being used. */
8813
8814 if (! live_insn)
8815 {
8816 count_reg_usage (insn, counts, NULL_RTX, -1);
8817 delete_insn (insn);
8818 }
8819
8820 if (find_reg_note (insn, REG_LIBCALL, NULL_RTX))
8821 in_libcall = 0;
8822 }
8823 }
This page took 0.564065 seconds and 5 git commands to generate.