]> gcc.gnu.org Git - gcc.git/blob - gcc/combine.cc
OpenACC: Fix reduction tree-sharing issue [PR106982]
[gcc.git] / gcc / combine.cc
1 /* Optimize by combining instructions for GNU compiler.
2 Copyright (C) 1987-2022 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* This module is essentially the "combiner" phase of the U. of Arizona
21 Portable Optimizer, but redone to work on our list-structured
22 representation for RTL instead of their string representation.
23
24 The LOG_LINKS of each insn identify the most recent assignment
25 to each REG used in the insn. It is a list of previous insns,
26 each of which contains a SET for a REG that is used in this insn
27 and not used or set in between. LOG_LINKs never cross basic blocks.
28 They were set up by the preceding pass (lifetime analysis).
29
30 We try to combine each pair of insns joined by a logical link.
31 We also try to combine triplets of insns A, B and C when C has
32 a link back to B and B has a link back to A. Likewise for a
33 small number of quadruplets of insns A, B, C and D for which
34 there's high likelihood of success.
35
36 We check (with modified_between_p) to avoid combining in such a way
37 as to move a computation to a place where its value would be different.
38
39 Combination is done by mathematically substituting the previous
40 insn(s) values for the regs they set into the expressions in
41 the later insns that refer to these regs. If the result is a valid insn
42 for our target machine, according to the machine description,
43 we install it, delete the earlier insns, and update the data flow
44 information (LOG_LINKS and REG_NOTES) for what we did.
45
46 There are a few exceptions where the dataflow information isn't
47 completely updated (however this is only a local issue since it is
48 regenerated before the next pass that uses it):
49
50 - reg_live_length is not updated
51 - reg_n_refs is not adjusted in the rare case when a register is
52 no longer required in a computation
53 - there are extremely rare cases (see distribute_notes) when a
54 REG_DEAD note is lost
55 - a LOG_LINKS entry that refers to an insn with multiple SETs may be
56 removed because there is no way to know which register it was
57 linking
58
59 To simplify substitution, we combine only when the earlier insn(s)
60 consist of only a single assignment. To simplify updating afterward,
61 we never combine when a subroutine call appears in the middle. */
62
63 #include "config.h"
64 #include "system.h"
65 #include "coretypes.h"
66 #include "backend.h"
67 #include "target.h"
68 #include "rtl.h"
69 #include "tree.h"
70 #include "cfghooks.h"
71 #include "predict.h"
72 #include "df.h"
73 #include "memmodel.h"
74 #include "tm_p.h"
75 #include "optabs.h"
76 #include "regs.h"
77 #include "emit-rtl.h"
78 #include "recog.h"
79 #include "cgraph.h"
80 #include "stor-layout.h"
81 #include "cfgrtl.h"
82 #include "cfgcleanup.h"
83 /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */
84 #include "explow.h"
85 #include "insn-attr.h"
86 #include "rtlhooks-def.h"
87 #include "expr.h"
88 #include "tree-pass.h"
89 #include "valtrack.h"
90 #include "rtl-iter.h"
91 #include "print-rtl.h"
92 #include "function-abi.h"
93 #include "rtlanal.h"
94
95 /* Number of attempts to combine instructions in this function. */
96
97 static int combine_attempts;
98
99 /* Number of attempts that got as far as substitution in this function. */
100
101 static int combine_merges;
102
103 /* Number of instructions combined with added SETs in this function. */
104
105 static int combine_extras;
106
107 /* Number of instructions combined in this function. */
108
109 static int combine_successes;
110
111 /* Totals over entire compilation. */
112
113 static int total_attempts, total_merges, total_extras, total_successes;
114
115 /* combine_instructions may try to replace the right hand side of the
116 second instruction with the value of an associated REG_EQUAL note
117 before throwing it at try_combine. That is problematic when there
118 is a REG_DEAD note for a register used in the old right hand side
119 and can cause distribute_notes to do wrong things. This is the
120 second instruction if it has been so modified, null otherwise. */
121
122 static rtx_insn *i2mod;
123
124 /* When I2MOD is nonnull, this is a copy of the old right hand side. */
125
126 static rtx i2mod_old_rhs;
127
128 /* When I2MOD is nonnull, this is a copy of the new right hand side. */
129
130 static rtx i2mod_new_rhs;
131 \f
132 struct reg_stat_type {
133 /* Record last point of death of (hard or pseudo) register n. */
134 rtx_insn *last_death;
135
136 /* Record last point of modification of (hard or pseudo) register n. */
137 rtx_insn *last_set;
138
139 /* The next group of fields allows the recording of the last value assigned
140 to (hard or pseudo) register n. We use this information to see if an
141 operation being processed is redundant given a prior operation performed
142 on the register. For example, an `and' with a constant is redundant if
143 all the zero bits are already known to be turned off.
144
145 We use an approach similar to that used by cse, but change it in the
146 following ways:
147
148 (1) We do not want to reinitialize at each label.
149 (2) It is useful, but not critical, to know the actual value assigned
150 to a register. Often just its form is helpful.
151
152 Therefore, we maintain the following fields:
153
154 last_set_value the last value assigned
155 last_set_label records the value of label_tick when the
156 register was assigned
157 last_set_table_tick records the value of label_tick when a
158 value using the register is assigned
159 last_set_invalid set to nonzero when it is not valid
160 to use the value of this register in some
161 register's value
162
163 To understand the usage of these tables, it is important to understand
164 the distinction between the value in last_set_value being valid and
165 the register being validly contained in some other expression in the
166 table.
167
168 (The next two parameters are out of date).
169
170 reg_stat[i].last_set_value is valid if it is nonzero, and either
171 reg_n_sets[i] is 1 or reg_stat[i].last_set_label == label_tick.
172
173 Register I may validly appear in any expression returned for the value
174 of another register if reg_n_sets[i] is 1. It may also appear in the
175 value for register J if reg_stat[j].last_set_invalid is zero, or
176 reg_stat[i].last_set_label < reg_stat[j].last_set_label.
177
178 If an expression is found in the table containing a register which may
179 not validly appear in an expression, the register is replaced by
180 something that won't match, (clobber (const_int 0)). */
181
182 /* Record last value assigned to (hard or pseudo) register n. */
183
184 rtx last_set_value;
185
186 /* Record the value of label_tick when an expression involving register n
187 is placed in last_set_value. */
188
189 int last_set_table_tick;
190
191 /* Record the value of label_tick when the value for register n is placed in
192 last_set_value. */
193
194 int last_set_label;
195
196 /* These fields are maintained in parallel with last_set_value and are
197 used to store the mode in which the register was last set, the bits
198 that were known to be zero when it was last set, and the number of
199 sign bits copies it was known to have when it was last set. */
200
201 unsigned HOST_WIDE_INT last_set_nonzero_bits;
202 char last_set_sign_bit_copies;
203 ENUM_BITFIELD(machine_mode) last_set_mode : 8;
204
205 /* Set nonzero if references to register n in expressions should not be
206 used. last_set_invalid is set nonzero when this register is being
207 assigned to and last_set_table_tick == label_tick. */
208
209 char last_set_invalid;
210
211 /* Some registers that are set more than once and used in more than one
212 basic block are nevertheless always set in similar ways. For example,
213 a QImode register may be loaded from memory in two places on a machine
214 where byte loads zero extend.
215
216 We record in the following fields if a register has some leading bits
217 that are always equal to the sign bit, and what we know about the
218 nonzero bits of a register, specifically which bits are known to be
219 zero.
220
221 If an entry is zero, it means that we don't know anything special. */
222
223 unsigned char sign_bit_copies;
224
225 unsigned HOST_WIDE_INT nonzero_bits;
226
227 /* Record the value of the label_tick when the last truncation
228 happened. The field truncated_to_mode is only valid if
229 truncation_label == label_tick. */
230
231 int truncation_label;
232
233 /* Record the last truncation seen for this register. If truncation
234 is not a nop to this mode we might be able to save an explicit
235 truncation if we know that value already contains a truncated
236 value. */
237
238 ENUM_BITFIELD(machine_mode) truncated_to_mode : 8;
239 };
240
241
242 static vec<reg_stat_type> reg_stat;
243
244 /* One plus the highest pseudo for which we track REG_N_SETS.
245 regstat_init_n_sets_and_refs allocates the array for REG_N_SETS just once,
246 but during combine_split_insns new pseudos can be created. As we don't have
247 updated DF information in that case, it is hard to initialize the array
248 after growing. The combiner only cares about REG_N_SETS (regno) == 1,
249 so instead of growing the arrays, just assume all newly created pseudos
250 during combine might be set multiple times. */
251
252 static unsigned int reg_n_sets_max;
253
254 /* Record the luid of the last insn that invalidated memory
255 (anything that writes memory, and subroutine calls, but not pushes). */
256
257 static int mem_last_set;
258
259 /* Record the luid of the last CALL_INSN
260 so we can tell whether a potential combination crosses any calls. */
261
262 static int last_call_luid;
263
264 /* When `subst' is called, this is the insn that is being modified
265 (by combining in a previous insn). The PATTERN of this insn
266 is still the old pattern partially modified and it should not be
267 looked at, but this may be used to examine the successors of the insn
268 to judge whether a simplification is valid. */
269
270 static rtx_insn *subst_insn;
271
272 /* This is the lowest LUID that `subst' is currently dealing with.
273 get_last_value will not return a value if the register was set at or
274 after this LUID. If not for this mechanism, we could get confused if
275 I2 or I1 in try_combine were an insn that used the old value of a register
276 to obtain a new value. In that case, we might erroneously get the
277 new value of the register when we wanted the old one. */
278
279 static int subst_low_luid;
280
281 /* This contains any hard registers that are used in newpat; reg_dead_at_p
282 must consider all these registers to be always live. */
283
284 static HARD_REG_SET newpat_used_regs;
285
286 /* This is an insn to which a LOG_LINKS entry has been added. If this
287 insn is the earlier than I2 or I3, combine should rescan starting at
288 that location. */
289
290 static rtx_insn *added_links_insn;
291
292 /* And similarly, for notes. */
293
294 static rtx_insn *added_notes_insn;
295
296 /* Basic block in which we are performing combines. */
297 static basic_block this_basic_block;
298 static bool optimize_this_for_speed_p;
299
300 \f
301 /* Length of the currently allocated uid_insn_cost array. */
302
303 static int max_uid_known;
304
305 /* The following array records the insn_cost for every insn
306 in the instruction stream. */
307
308 static int *uid_insn_cost;
309
310 /* The following array records the LOG_LINKS for every insn in the
311 instruction stream as struct insn_link pointers. */
312
313 struct insn_link {
314 rtx_insn *insn;
315 unsigned int regno;
316 struct insn_link *next;
317 };
318
319 static struct insn_link **uid_log_links;
320
321 static inline int
322 insn_uid_check (const_rtx insn)
323 {
324 int uid = INSN_UID (insn);
325 gcc_checking_assert (uid <= max_uid_known);
326 return uid;
327 }
328
329 #define INSN_COST(INSN) (uid_insn_cost[insn_uid_check (INSN)])
330 #define LOG_LINKS(INSN) (uid_log_links[insn_uid_check (INSN)])
331
332 #define FOR_EACH_LOG_LINK(L, INSN) \
333 for ((L) = LOG_LINKS (INSN); (L); (L) = (L)->next)
334
335 /* Links for LOG_LINKS are allocated from this obstack. */
336
337 static struct obstack insn_link_obstack;
338
339 /* Allocate a link. */
340
341 static inline struct insn_link *
342 alloc_insn_link (rtx_insn *insn, unsigned int regno, struct insn_link *next)
343 {
344 struct insn_link *l
345 = (struct insn_link *) obstack_alloc (&insn_link_obstack,
346 sizeof (struct insn_link));
347 l->insn = insn;
348 l->regno = regno;
349 l->next = next;
350 return l;
351 }
352
353 /* Incremented for each basic block. */
354
355 static int label_tick;
356
357 /* Reset to label_tick for each extended basic block in scanning order. */
358
359 static int label_tick_ebb_start;
360
361 /* Mode used to compute significance in reg_stat[].nonzero_bits. It is the
362 largest integer mode that can fit in HOST_BITS_PER_WIDE_INT. */
363
364 static scalar_int_mode nonzero_bits_mode;
365
366 /* Nonzero when reg_stat[].nonzero_bits and reg_stat[].sign_bit_copies can
367 be safely used. It is zero while computing them and after combine has
368 completed. This former test prevents propagating values based on
369 previously set values, which can be incorrect if a variable is modified
370 in a loop. */
371
372 static int nonzero_sign_valid;
373
374 \f
375 /* Record one modification to rtl structure
376 to be undone by storing old_contents into *where. */
377
378 enum undo_kind { UNDO_RTX, UNDO_INT, UNDO_MODE, UNDO_LINKS };
379
380 struct undo
381 {
382 struct undo *next;
383 enum undo_kind kind;
384 union { rtx r; int i; machine_mode m; struct insn_link *l; } old_contents;
385 union { rtx *r; int *i; int regno; struct insn_link **l; } where;
386 };
387
388 /* Record a bunch of changes to be undone, up to MAX_UNDO of them.
389 num_undo says how many are currently recorded.
390
391 other_insn is nonzero if we have modified some other insn in the process
392 of working on subst_insn. It must be verified too. */
393
394 struct undobuf
395 {
396 struct undo *undos;
397 struct undo *frees;
398 rtx_insn *other_insn;
399 };
400
401 static struct undobuf undobuf;
402
403 /* Number of times the pseudo being substituted for
404 was found and replaced. */
405
406 static int n_occurrences;
407
408 static rtx reg_nonzero_bits_for_combine (const_rtx, scalar_int_mode,
409 scalar_int_mode,
410 unsigned HOST_WIDE_INT *);
411 static rtx reg_num_sign_bit_copies_for_combine (const_rtx, scalar_int_mode,
412 scalar_int_mode,
413 unsigned int *);
414 static void do_SUBST (rtx *, rtx);
415 static void do_SUBST_INT (int *, int);
416 static void init_reg_last (void);
417 static void setup_incoming_promotions (rtx_insn *);
418 static void set_nonzero_bits_and_sign_copies (rtx, const_rtx, void *);
419 static int cant_combine_insn_p (rtx_insn *);
420 static int can_combine_p (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *,
421 rtx_insn *, rtx_insn *, rtx *, rtx *);
422 static int combinable_i3pat (rtx_insn *, rtx *, rtx, rtx, rtx, int, int, rtx *);
423 static int contains_muldiv (rtx);
424 static rtx_insn *try_combine (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *,
425 int *, rtx_insn *);
426 static void undo_all (void);
427 static void undo_commit (void);
428 static rtx *find_split_point (rtx *, rtx_insn *, bool);
429 static rtx subst (rtx, rtx, rtx, int, int, int);
430 static rtx combine_simplify_rtx (rtx, machine_mode, int, int);
431 static rtx simplify_if_then_else (rtx);
432 static rtx simplify_set (rtx);
433 static rtx simplify_logical (rtx);
434 static rtx expand_compound_operation (rtx);
435 static const_rtx expand_field_assignment (const_rtx);
436 static rtx make_extraction (machine_mode, rtx, HOST_WIDE_INT,
437 rtx, unsigned HOST_WIDE_INT, int, int, int);
438 static int get_pos_from_mask (unsigned HOST_WIDE_INT,
439 unsigned HOST_WIDE_INT *);
440 static rtx canon_reg_for_combine (rtx, rtx);
441 static rtx force_int_to_mode (rtx, scalar_int_mode, scalar_int_mode,
442 scalar_int_mode, unsigned HOST_WIDE_INT, int);
443 static rtx force_to_mode (rtx, machine_mode,
444 unsigned HOST_WIDE_INT, int);
445 static rtx if_then_else_cond (rtx, rtx *, rtx *);
446 static rtx known_cond (rtx, enum rtx_code, rtx, rtx);
447 static int rtx_equal_for_field_assignment_p (rtx, rtx, bool = false);
448 static rtx make_field_assignment (rtx);
449 static rtx apply_distributive_law (rtx);
450 static rtx distribute_and_simplify_rtx (rtx, int);
451 static rtx simplify_and_const_int_1 (scalar_int_mode, rtx,
452 unsigned HOST_WIDE_INT);
453 static rtx simplify_and_const_int (rtx, scalar_int_mode, rtx,
454 unsigned HOST_WIDE_INT);
455 static int merge_outer_ops (enum rtx_code *, HOST_WIDE_INT *, enum rtx_code,
456 HOST_WIDE_INT, machine_mode, int *);
457 static rtx simplify_shift_const_1 (enum rtx_code, machine_mode, rtx, int);
458 static rtx simplify_shift_const (rtx, enum rtx_code, machine_mode, rtx,
459 int);
460 static int recog_for_combine (rtx *, rtx_insn *, rtx *);
461 static rtx gen_lowpart_for_combine (machine_mode, rtx);
462 static enum rtx_code simplify_compare_const (enum rtx_code, machine_mode,
463 rtx, rtx *);
464 static enum rtx_code simplify_comparison (enum rtx_code, rtx *, rtx *);
465 static void update_table_tick (rtx);
466 static void record_value_for_reg (rtx, rtx_insn *, rtx);
467 static void check_promoted_subreg (rtx_insn *, rtx);
468 static void record_dead_and_set_regs_1 (rtx, const_rtx, void *);
469 static void record_dead_and_set_regs (rtx_insn *);
470 static int get_last_value_validate (rtx *, rtx_insn *, int, int);
471 static rtx get_last_value (const_rtx);
472 static void reg_dead_at_p_1 (rtx, const_rtx, void *);
473 static int reg_dead_at_p (rtx, rtx_insn *);
474 static void move_deaths (rtx, rtx, int, rtx_insn *, rtx *);
475 static int reg_bitfield_target_p (rtx, rtx);
476 static void distribute_notes (rtx, rtx_insn *, rtx_insn *, rtx_insn *, rtx, rtx, rtx);
477 static void distribute_links (struct insn_link *);
478 static void mark_used_regs_combine (rtx);
479 static void record_promoted_value (rtx_insn *, rtx);
480 static bool unmentioned_reg_p (rtx, rtx);
481 static void record_truncated_values (rtx *, void *);
482 static bool reg_truncated_to_mode (machine_mode, const_rtx);
483 static rtx gen_lowpart_or_truncate (machine_mode, rtx);
484 \f
485
486 /* It is not safe to use ordinary gen_lowpart in combine.
487 See comments in gen_lowpart_for_combine. */
488 #undef RTL_HOOKS_GEN_LOWPART
489 #define RTL_HOOKS_GEN_LOWPART gen_lowpart_for_combine
490
491 /* Our implementation of gen_lowpart never emits a new pseudo. */
492 #undef RTL_HOOKS_GEN_LOWPART_NO_EMIT
493 #define RTL_HOOKS_GEN_LOWPART_NO_EMIT gen_lowpart_for_combine
494
495 #undef RTL_HOOKS_REG_NONZERO_REG_BITS
496 #define RTL_HOOKS_REG_NONZERO_REG_BITS reg_nonzero_bits_for_combine
497
498 #undef RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES
499 #define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES reg_num_sign_bit_copies_for_combine
500
501 #undef RTL_HOOKS_REG_TRUNCATED_TO_MODE
502 #define RTL_HOOKS_REG_TRUNCATED_TO_MODE reg_truncated_to_mode
503
504 static const struct rtl_hooks combine_rtl_hooks = RTL_HOOKS_INITIALIZER;
505
506 \f
507 /* Convenience wrapper for the canonicalize_comparison target hook.
508 Target hooks cannot use enum rtx_code. */
509 static inline void
510 target_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1,
511 bool op0_preserve_value)
512 {
513 int code_int = (int)*code;
514 targetm.canonicalize_comparison (&code_int, op0, op1, op0_preserve_value);
515 *code = (enum rtx_code)code_int;
516 }
517
518 /* Try to split PATTERN found in INSN. This returns NULL_RTX if
519 PATTERN cannot be split. Otherwise, it returns an insn sequence.
520 This is a wrapper around split_insns which ensures that the
521 reg_stat vector is made larger if the splitter creates a new
522 register. */
523
524 static rtx_insn *
525 combine_split_insns (rtx pattern, rtx_insn *insn)
526 {
527 rtx_insn *ret;
528 unsigned int nregs;
529
530 ret = split_insns (pattern, insn);
531 nregs = max_reg_num ();
532 if (nregs > reg_stat.length ())
533 reg_stat.safe_grow_cleared (nregs, true);
534 return ret;
535 }
536
537 /* This is used by find_single_use to locate an rtx in LOC that
538 contains exactly one use of DEST, which is typically a REG.
539 It returns a pointer to the innermost rtx expression
540 containing DEST. Appearances of DEST that are being used to
541 totally replace it are not counted. */
542
543 static rtx *
544 find_single_use_1 (rtx dest, rtx *loc)
545 {
546 rtx x = *loc;
547 enum rtx_code code = GET_CODE (x);
548 rtx *result = NULL;
549 rtx *this_result;
550 int i;
551 const char *fmt;
552
553 switch (code)
554 {
555 case CONST:
556 case LABEL_REF:
557 case SYMBOL_REF:
558 CASE_CONST_ANY:
559 case CLOBBER:
560 return 0;
561
562 case SET:
563 /* If the destination is anything other than PC, a REG or a SUBREG
564 of a REG that occupies all of the REG, the insn uses DEST if
565 it is mentioned in the destination or the source. Otherwise, we
566 need just check the source. */
567 if (GET_CODE (SET_DEST (x)) != PC
568 && !REG_P (SET_DEST (x))
569 && ! (GET_CODE (SET_DEST (x)) == SUBREG
570 && REG_P (SUBREG_REG (SET_DEST (x)))
571 && !read_modify_subreg_p (SET_DEST (x))))
572 break;
573
574 return find_single_use_1 (dest, &SET_SRC (x));
575
576 case MEM:
577 case SUBREG:
578 return find_single_use_1 (dest, &XEXP (x, 0));
579
580 default:
581 break;
582 }
583
584 /* If it wasn't one of the common cases above, check each expression and
585 vector of this code. Look for a unique usage of DEST. */
586
587 fmt = GET_RTX_FORMAT (code);
588 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
589 {
590 if (fmt[i] == 'e')
591 {
592 if (dest == XEXP (x, i)
593 || (REG_P (dest) && REG_P (XEXP (x, i))
594 && REGNO (dest) == REGNO (XEXP (x, i))))
595 this_result = loc;
596 else
597 this_result = find_single_use_1 (dest, &XEXP (x, i));
598
599 if (result == NULL)
600 result = this_result;
601 else if (this_result)
602 /* Duplicate usage. */
603 return NULL;
604 }
605 else if (fmt[i] == 'E')
606 {
607 int j;
608
609 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
610 {
611 if (XVECEXP (x, i, j) == dest
612 || (REG_P (dest)
613 && REG_P (XVECEXP (x, i, j))
614 && REGNO (XVECEXP (x, i, j)) == REGNO (dest)))
615 this_result = loc;
616 else
617 this_result = find_single_use_1 (dest, &XVECEXP (x, i, j));
618
619 if (result == NULL)
620 result = this_result;
621 else if (this_result)
622 return NULL;
623 }
624 }
625 }
626
627 return result;
628 }
629
630
631 /* See if DEST, produced in INSN, is used only a single time in the
632 sequel. If so, return a pointer to the innermost rtx expression in which
633 it is used.
634
635 If PLOC is nonzero, *PLOC is set to the insn containing the single use.
636
637 Otherwise, we find the single use by finding an insn that has a
638 LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST. If DEST is
639 only referenced once in that insn, we know that it must be the first
640 and last insn referencing DEST. */
641
642 static rtx *
643 find_single_use (rtx dest, rtx_insn *insn, rtx_insn **ploc)
644 {
645 basic_block bb;
646 rtx_insn *next;
647 rtx *result;
648 struct insn_link *link;
649
650 if (!REG_P (dest))
651 return 0;
652
653 bb = BLOCK_FOR_INSN (insn);
654 for (next = NEXT_INSN (insn);
655 next && BLOCK_FOR_INSN (next) == bb;
656 next = NEXT_INSN (next))
657 if (NONDEBUG_INSN_P (next) && dead_or_set_p (next, dest))
658 {
659 FOR_EACH_LOG_LINK (link, next)
660 if (link->insn == insn && link->regno == REGNO (dest))
661 break;
662
663 if (link)
664 {
665 result = find_single_use_1 (dest, &PATTERN (next));
666 if (ploc)
667 *ploc = next;
668 return result;
669 }
670 }
671
672 return 0;
673 }
674 \f
675 /* Substitute NEWVAL, an rtx expression, into INTO, a place in some
676 insn. The substitution can be undone by undo_all. If INTO is already
677 set to NEWVAL, do not record this change. Because computing NEWVAL might
678 also call SUBST, we have to compute it before we put anything into
679 the undo table. */
680
681 static void
682 do_SUBST (rtx *into, rtx newval)
683 {
684 struct undo *buf;
685 rtx oldval = *into;
686
687 if (oldval == newval)
688 return;
689
690 /* We'd like to catch as many invalid transformations here as
691 possible. Unfortunately, there are way too many mode changes
692 that are perfectly valid, so we'd waste too much effort for
693 little gain doing the checks here. Focus on catching invalid
694 transformations involving integer constants. */
695 if (GET_MODE_CLASS (GET_MODE (oldval)) == MODE_INT
696 && CONST_INT_P (newval))
697 {
698 /* Sanity check that we're replacing oldval with a CONST_INT
699 that is a valid sign-extension for the original mode. */
700 gcc_assert (INTVAL (newval)
701 == trunc_int_for_mode (INTVAL (newval), GET_MODE (oldval)));
702
703 /* Replacing the operand of a SUBREG or a ZERO_EXTEND with a
704 CONST_INT is not valid, because after the replacement, the
705 original mode would be gone. Unfortunately, we can't tell
706 when do_SUBST is called to replace the operand thereof, so we
707 perform this test on oldval instead, checking whether an
708 invalid replacement took place before we got here. */
709 gcc_assert (!(GET_CODE (oldval) == SUBREG
710 && CONST_INT_P (SUBREG_REG (oldval))));
711 gcc_assert (!(GET_CODE (oldval) == ZERO_EXTEND
712 && CONST_INT_P (XEXP (oldval, 0))));
713 }
714
715 if (undobuf.frees)
716 buf = undobuf.frees, undobuf.frees = buf->next;
717 else
718 buf = XNEW (struct undo);
719
720 buf->kind = UNDO_RTX;
721 buf->where.r = into;
722 buf->old_contents.r = oldval;
723 *into = newval;
724
725 buf->next = undobuf.undos, undobuf.undos = buf;
726 }
727
728 #define SUBST(INTO, NEWVAL) do_SUBST (&(INTO), (NEWVAL))
729
730 /* Similar to SUBST, but NEWVAL is an int expression. Note that substitution
731 for the value of a HOST_WIDE_INT value (including CONST_INT) is
732 not safe. */
733
734 static void
735 do_SUBST_INT (int *into, int newval)
736 {
737 struct undo *buf;
738 int oldval = *into;
739
740 if (oldval == newval)
741 return;
742
743 if (undobuf.frees)
744 buf = undobuf.frees, undobuf.frees = buf->next;
745 else
746 buf = XNEW (struct undo);
747
748 buf->kind = UNDO_INT;
749 buf->where.i = into;
750 buf->old_contents.i = oldval;
751 *into = newval;
752
753 buf->next = undobuf.undos, undobuf.undos = buf;
754 }
755
756 #define SUBST_INT(INTO, NEWVAL) do_SUBST_INT (&(INTO), (NEWVAL))
757
758 /* Similar to SUBST, but just substitute the mode. This is used when
759 changing the mode of a pseudo-register, so that any other
760 references to the entry in the regno_reg_rtx array will change as
761 well. */
762
763 static void
764 subst_mode (int regno, machine_mode newval)
765 {
766 struct undo *buf;
767 rtx reg = regno_reg_rtx[regno];
768 machine_mode oldval = GET_MODE (reg);
769
770 if (oldval == newval)
771 return;
772
773 if (undobuf.frees)
774 buf = undobuf.frees, undobuf.frees = buf->next;
775 else
776 buf = XNEW (struct undo);
777
778 buf->kind = UNDO_MODE;
779 buf->where.regno = regno;
780 buf->old_contents.m = oldval;
781 adjust_reg_mode (reg, newval);
782
783 buf->next = undobuf.undos, undobuf.undos = buf;
784 }
785
786 /* Similar to SUBST, but NEWVAL is a LOG_LINKS expression. */
787
788 static void
789 do_SUBST_LINK (struct insn_link **into, struct insn_link *newval)
790 {
791 struct undo *buf;
792 struct insn_link * oldval = *into;
793
794 if (oldval == newval)
795 return;
796
797 if (undobuf.frees)
798 buf = undobuf.frees, undobuf.frees = buf->next;
799 else
800 buf = XNEW (struct undo);
801
802 buf->kind = UNDO_LINKS;
803 buf->where.l = into;
804 buf->old_contents.l = oldval;
805 *into = newval;
806
807 buf->next = undobuf.undos, undobuf.undos = buf;
808 }
809
810 #define SUBST_LINK(oldval, newval) do_SUBST_LINK (&oldval, newval)
811 \f
812 /* Subroutine of try_combine. Determine whether the replacement patterns
813 NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to insn_cost
814 than the original sequence I0, I1, I2, I3 and undobuf.other_insn. Note
815 that I0, I1 and/or NEWI2PAT may be NULL_RTX. Similarly, NEWOTHERPAT and
816 undobuf.other_insn may also both be NULL_RTX. Return false if the cost
817 of all the instructions can be estimated and the replacements are more
818 expensive than the original sequence. */
819
820 static bool
821 combine_validate_cost (rtx_insn *i0, rtx_insn *i1, rtx_insn *i2, rtx_insn *i3,
822 rtx newpat, rtx newi2pat, rtx newotherpat)
823 {
824 int i0_cost, i1_cost, i2_cost, i3_cost;
825 int new_i2_cost, new_i3_cost;
826 int old_cost, new_cost;
827
828 /* Lookup the original insn_costs. */
829 i2_cost = INSN_COST (i2);
830 i3_cost = INSN_COST (i3);
831
832 if (i1)
833 {
834 i1_cost = INSN_COST (i1);
835 if (i0)
836 {
837 i0_cost = INSN_COST (i0);
838 old_cost = (i0_cost > 0 && i1_cost > 0 && i2_cost > 0 && i3_cost > 0
839 ? i0_cost + i1_cost + i2_cost + i3_cost : 0);
840 }
841 else
842 {
843 old_cost = (i1_cost > 0 && i2_cost > 0 && i3_cost > 0
844 ? i1_cost + i2_cost + i3_cost : 0);
845 i0_cost = 0;
846 }
847 }
848 else
849 {
850 old_cost = (i2_cost > 0 && i3_cost > 0) ? i2_cost + i3_cost : 0;
851 i1_cost = i0_cost = 0;
852 }
853
854 /* If we have split a PARALLEL I2 to I1,I2, we have counted its cost twice;
855 correct that. */
856 if (old_cost && i1 && INSN_UID (i1) == INSN_UID (i2))
857 old_cost -= i1_cost;
858
859
860 /* Calculate the replacement insn_costs. */
861 rtx tmp = PATTERN (i3);
862 PATTERN (i3) = newpat;
863 int tmpi = INSN_CODE (i3);
864 INSN_CODE (i3) = -1;
865 new_i3_cost = insn_cost (i3, optimize_this_for_speed_p);
866 PATTERN (i3) = tmp;
867 INSN_CODE (i3) = tmpi;
868 if (newi2pat)
869 {
870 tmp = PATTERN (i2);
871 PATTERN (i2) = newi2pat;
872 tmpi = INSN_CODE (i2);
873 INSN_CODE (i2) = -1;
874 new_i2_cost = insn_cost (i2, optimize_this_for_speed_p);
875 PATTERN (i2) = tmp;
876 INSN_CODE (i2) = tmpi;
877 new_cost = (new_i2_cost > 0 && new_i3_cost > 0)
878 ? new_i2_cost + new_i3_cost : 0;
879 }
880 else
881 {
882 new_cost = new_i3_cost;
883 new_i2_cost = 0;
884 }
885
886 if (undobuf.other_insn)
887 {
888 int old_other_cost, new_other_cost;
889
890 old_other_cost = INSN_COST (undobuf.other_insn);
891 tmp = PATTERN (undobuf.other_insn);
892 PATTERN (undobuf.other_insn) = newotherpat;
893 tmpi = INSN_CODE (undobuf.other_insn);
894 INSN_CODE (undobuf.other_insn) = -1;
895 new_other_cost = insn_cost (undobuf.other_insn,
896 optimize_this_for_speed_p);
897 PATTERN (undobuf.other_insn) = tmp;
898 INSN_CODE (undobuf.other_insn) = tmpi;
899 if (old_other_cost > 0 && new_other_cost > 0)
900 {
901 old_cost += old_other_cost;
902 new_cost += new_other_cost;
903 }
904 else
905 old_cost = 0;
906 }
907
908 /* Disallow this combination if both new_cost and old_cost are greater than
909 zero, and new_cost is greater than old cost. */
910 int reject = old_cost > 0 && new_cost > old_cost;
911
912 if (dump_file)
913 {
914 fprintf (dump_file, "%s combination of insns ",
915 reject ? "rejecting" : "allowing");
916 if (i0)
917 fprintf (dump_file, "%d, ", INSN_UID (i0));
918 if (i1 && INSN_UID (i1) != INSN_UID (i2))
919 fprintf (dump_file, "%d, ", INSN_UID (i1));
920 fprintf (dump_file, "%d and %d\n", INSN_UID (i2), INSN_UID (i3));
921
922 fprintf (dump_file, "original costs ");
923 if (i0)
924 fprintf (dump_file, "%d + ", i0_cost);
925 if (i1 && INSN_UID (i1) != INSN_UID (i2))
926 fprintf (dump_file, "%d + ", i1_cost);
927 fprintf (dump_file, "%d + %d = %d\n", i2_cost, i3_cost, old_cost);
928
929 if (newi2pat)
930 fprintf (dump_file, "replacement costs %d + %d = %d\n",
931 new_i2_cost, new_i3_cost, new_cost);
932 else
933 fprintf (dump_file, "replacement cost %d\n", new_cost);
934 }
935
936 if (reject)
937 return false;
938
939 /* Update the uid_insn_cost array with the replacement costs. */
940 INSN_COST (i2) = new_i2_cost;
941 INSN_COST (i3) = new_i3_cost;
942 if (i1)
943 {
944 INSN_COST (i1) = 0;
945 if (i0)
946 INSN_COST (i0) = 0;
947 }
948
949 return true;
950 }
951
952
953 /* Delete any insns that copy a register to itself.
954 Return true if the CFG was changed. */
955
956 static bool
957 delete_noop_moves (void)
958 {
959 rtx_insn *insn, *next;
960 basic_block bb;
961
962 bool edges_deleted = false;
963
964 FOR_EACH_BB_FN (bb, cfun)
965 {
966 for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = next)
967 {
968 next = NEXT_INSN (insn);
969 if (INSN_P (insn) && noop_move_p (insn))
970 {
971 if (dump_file)
972 fprintf (dump_file, "deleting noop move %d\n", INSN_UID (insn));
973
974 edges_deleted |= delete_insn_and_edges (insn);
975 }
976 }
977 }
978
979 return edges_deleted;
980 }
981
982 \f
983 /* Return false if we do not want to (or cannot) combine DEF. */
984 static bool
985 can_combine_def_p (df_ref def)
986 {
987 /* Do not consider if it is pre/post modification in MEM. */
988 if (DF_REF_FLAGS (def) & DF_REF_PRE_POST_MODIFY)
989 return false;
990
991 unsigned int regno = DF_REF_REGNO (def);
992
993 /* Do not combine frame pointer adjustments. */
994 if ((regno == FRAME_POINTER_REGNUM
995 && (!reload_completed || frame_pointer_needed))
996 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
997 && regno == HARD_FRAME_POINTER_REGNUM
998 && (!reload_completed || frame_pointer_needed))
999 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1000 && regno == ARG_POINTER_REGNUM && fixed_regs[regno]))
1001 return false;
1002
1003 return true;
1004 }
1005
1006 /* Return false if we do not want to (or cannot) combine USE. */
1007 static bool
1008 can_combine_use_p (df_ref use)
1009 {
1010 /* Do not consider the usage of the stack pointer by function call. */
1011 if (DF_REF_FLAGS (use) & DF_REF_CALL_STACK_USAGE)
1012 return false;
1013
1014 return true;
1015 }
1016
1017 /* Fill in log links field for all insns. */
1018
1019 static void
1020 create_log_links (void)
1021 {
1022 basic_block bb;
1023 rtx_insn **next_use;
1024 rtx_insn *insn;
1025 df_ref def, use;
1026
1027 next_use = XCNEWVEC (rtx_insn *, max_reg_num ());
1028
1029 /* Pass through each block from the end, recording the uses of each
1030 register and establishing log links when def is encountered.
1031 Note that we do not clear next_use array in order to save time,
1032 so we have to test whether the use is in the same basic block as def.
1033
1034 There are a few cases below when we do not consider the definition or
1035 usage -- these are taken from original flow.c did. Don't ask me why it is
1036 done this way; I don't know and if it works, I don't want to know. */
1037
1038 FOR_EACH_BB_FN (bb, cfun)
1039 {
1040 FOR_BB_INSNS_REVERSE (bb, insn)
1041 {
1042 if (!NONDEBUG_INSN_P (insn))
1043 continue;
1044
1045 /* Log links are created only once. */
1046 gcc_assert (!LOG_LINKS (insn));
1047
1048 FOR_EACH_INSN_DEF (def, insn)
1049 {
1050 unsigned int regno = DF_REF_REGNO (def);
1051 rtx_insn *use_insn;
1052
1053 if (!next_use[regno])
1054 continue;
1055
1056 if (!can_combine_def_p (def))
1057 continue;
1058
1059 use_insn = next_use[regno];
1060 next_use[regno] = NULL;
1061
1062 if (BLOCK_FOR_INSN (use_insn) != bb)
1063 continue;
1064
1065 /* flow.c claimed:
1066
1067 We don't build a LOG_LINK for hard registers contained
1068 in ASM_OPERANDs. If these registers get replaced,
1069 we might wind up changing the semantics of the insn,
1070 even if reload can make what appear to be valid
1071 assignments later. */
1072 if (regno < FIRST_PSEUDO_REGISTER
1073 && asm_noperands (PATTERN (use_insn)) >= 0)
1074 continue;
1075
1076 /* Don't add duplicate links between instructions. */
1077 struct insn_link *links;
1078 FOR_EACH_LOG_LINK (links, use_insn)
1079 if (insn == links->insn && regno == links->regno)
1080 break;
1081
1082 if (!links)
1083 LOG_LINKS (use_insn)
1084 = alloc_insn_link (insn, regno, LOG_LINKS (use_insn));
1085 }
1086
1087 FOR_EACH_INSN_USE (use, insn)
1088 if (can_combine_use_p (use))
1089 next_use[DF_REF_REGNO (use)] = insn;
1090 }
1091 }
1092
1093 free (next_use);
1094 }
1095
1096 /* Walk the LOG_LINKS of insn B to see if we find a reference to A. Return
1097 true if we found a LOG_LINK that proves that A feeds B. This only works
1098 if there are no instructions between A and B which could have a link
1099 depending on A, since in that case we would not record a link for B. */
1100
1101 static bool
1102 insn_a_feeds_b (rtx_insn *a, rtx_insn *b)
1103 {
1104 struct insn_link *links;
1105 FOR_EACH_LOG_LINK (links, b)
1106 if (links->insn == a)
1107 return true;
1108 return false;
1109 }
1110 \f
1111 /* Main entry point for combiner. F is the first insn of the function.
1112 NREGS is the first unused pseudo-reg number.
1113
1114 Return nonzero if the CFG was changed (e.g. if the combiner has
1115 turned an indirect jump instruction into a direct jump). */
1116 static int
1117 combine_instructions (rtx_insn *f, unsigned int nregs)
1118 {
1119 rtx_insn *insn, *next;
1120 struct insn_link *links, *nextlinks;
1121 rtx_insn *first;
1122 basic_block last_bb;
1123
1124 int new_direct_jump_p = 0;
1125
1126 for (first = f; first && !NONDEBUG_INSN_P (first); )
1127 first = NEXT_INSN (first);
1128 if (!first)
1129 return 0;
1130
1131 combine_attempts = 0;
1132 combine_merges = 0;
1133 combine_extras = 0;
1134 combine_successes = 0;
1135
1136 rtl_hooks = combine_rtl_hooks;
1137
1138 reg_stat.safe_grow_cleared (nregs, true);
1139
1140 init_recog_no_volatile ();
1141
1142 /* Allocate array for insn info. */
1143 max_uid_known = get_max_uid ();
1144 uid_log_links = XCNEWVEC (struct insn_link *, max_uid_known + 1);
1145 uid_insn_cost = XCNEWVEC (int, max_uid_known + 1);
1146 gcc_obstack_init (&insn_link_obstack);
1147
1148 nonzero_bits_mode = int_mode_for_size (HOST_BITS_PER_WIDE_INT, 0).require ();
1149
1150 /* Don't use reg_stat[].nonzero_bits when computing it. This can cause
1151 problems when, for example, we have j <<= 1 in a loop. */
1152
1153 nonzero_sign_valid = 0;
1154 label_tick = label_tick_ebb_start = 1;
1155
1156 /* Scan all SETs and see if we can deduce anything about what
1157 bits are known to be zero for some registers and how many copies
1158 of the sign bit are known to exist for those registers.
1159
1160 Also set any known values so that we can use it while searching
1161 for what bits are known to be set. */
1162
1163 setup_incoming_promotions (first);
1164 /* Allow the entry block and the first block to fall into the same EBB.
1165 Conceptually the incoming promotions are assigned to the entry block. */
1166 last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1167
1168 create_log_links ();
1169 FOR_EACH_BB_FN (this_basic_block, cfun)
1170 {
1171 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1172 last_call_luid = 0;
1173 mem_last_set = -1;
1174
1175 label_tick++;
1176 if (!single_pred_p (this_basic_block)
1177 || single_pred (this_basic_block) != last_bb)
1178 label_tick_ebb_start = label_tick;
1179 last_bb = this_basic_block;
1180
1181 FOR_BB_INSNS (this_basic_block, insn)
1182 if (INSN_P (insn) && BLOCK_FOR_INSN (insn))
1183 {
1184 rtx links;
1185
1186 subst_low_luid = DF_INSN_LUID (insn);
1187 subst_insn = insn;
1188
1189 note_stores (insn, set_nonzero_bits_and_sign_copies, insn);
1190 record_dead_and_set_regs (insn);
1191
1192 if (AUTO_INC_DEC)
1193 for (links = REG_NOTES (insn); links; links = XEXP (links, 1))
1194 if (REG_NOTE_KIND (links) == REG_INC)
1195 set_nonzero_bits_and_sign_copies (XEXP (links, 0), NULL_RTX,
1196 insn);
1197
1198 /* Record the current insn_cost of this instruction. */
1199 INSN_COST (insn) = insn_cost (insn, optimize_this_for_speed_p);
1200 if (dump_file)
1201 {
1202 fprintf (dump_file, "insn_cost %d for ", INSN_COST (insn));
1203 dump_insn_slim (dump_file, insn);
1204 }
1205 }
1206 }
1207
1208 nonzero_sign_valid = 1;
1209
1210 /* Now scan all the insns in forward order. */
1211 label_tick = label_tick_ebb_start = 1;
1212 init_reg_last ();
1213 setup_incoming_promotions (first);
1214 last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1215 int max_combine = param_max_combine_insns;
1216
1217 FOR_EACH_BB_FN (this_basic_block, cfun)
1218 {
1219 rtx_insn *last_combined_insn = NULL;
1220
1221 /* Ignore instruction combination in basic blocks that are going to
1222 be removed as unreachable anyway. See PR82386. */
1223 if (EDGE_COUNT (this_basic_block->preds) == 0)
1224 continue;
1225
1226 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1227 last_call_luid = 0;
1228 mem_last_set = -1;
1229
1230 label_tick++;
1231 if (!single_pred_p (this_basic_block)
1232 || single_pred (this_basic_block) != last_bb)
1233 label_tick_ebb_start = label_tick;
1234 last_bb = this_basic_block;
1235
1236 rtl_profile_for_bb (this_basic_block);
1237 for (insn = BB_HEAD (this_basic_block);
1238 insn != NEXT_INSN (BB_END (this_basic_block));
1239 insn = next ? next : NEXT_INSN (insn))
1240 {
1241 next = 0;
1242 if (!NONDEBUG_INSN_P (insn))
1243 continue;
1244
1245 while (last_combined_insn
1246 && (!NONDEBUG_INSN_P (last_combined_insn)
1247 || last_combined_insn->deleted ()))
1248 last_combined_insn = PREV_INSN (last_combined_insn);
1249 if (last_combined_insn == NULL_RTX
1250 || BLOCK_FOR_INSN (last_combined_insn) != this_basic_block
1251 || DF_INSN_LUID (last_combined_insn) <= DF_INSN_LUID (insn))
1252 last_combined_insn = insn;
1253
1254 /* See if we know about function return values before this
1255 insn based upon SUBREG flags. */
1256 check_promoted_subreg (insn, PATTERN (insn));
1257
1258 /* See if we can find hardregs and subreg of pseudos in
1259 narrower modes. This could help turning TRUNCATEs
1260 into SUBREGs. */
1261 note_uses (&PATTERN (insn), record_truncated_values, NULL);
1262
1263 /* Try this insn with each insn it links back to. */
1264
1265 FOR_EACH_LOG_LINK (links, insn)
1266 if ((next = try_combine (insn, links->insn, NULL,
1267 NULL, &new_direct_jump_p,
1268 last_combined_insn)) != 0)
1269 {
1270 statistics_counter_event (cfun, "two-insn combine", 1);
1271 goto retry;
1272 }
1273
1274 /* Try each sequence of three linked insns ending with this one. */
1275
1276 if (max_combine >= 3)
1277 FOR_EACH_LOG_LINK (links, insn)
1278 {
1279 rtx_insn *link = links->insn;
1280
1281 /* If the linked insn has been replaced by a note, then there
1282 is no point in pursuing this chain any further. */
1283 if (NOTE_P (link))
1284 continue;
1285
1286 FOR_EACH_LOG_LINK (nextlinks, link)
1287 if ((next = try_combine (insn, link, nextlinks->insn,
1288 NULL, &new_direct_jump_p,
1289 last_combined_insn)) != 0)
1290 {
1291 statistics_counter_event (cfun, "three-insn combine", 1);
1292 goto retry;
1293 }
1294 }
1295
1296 /* Try combining an insn with two different insns whose results it
1297 uses. */
1298 if (max_combine >= 3)
1299 FOR_EACH_LOG_LINK (links, insn)
1300 for (nextlinks = links->next; nextlinks;
1301 nextlinks = nextlinks->next)
1302 if ((next = try_combine (insn, links->insn,
1303 nextlinks->insn, NULL,
1304 &new_direct_jump_p,
1305 last_combined_insn)) != 0)
1306
1307 {
1308 statistics_counter_event (cfun, "three-insn combine", 1);
1309 goto retry;
1310 }
1311
1312 /* Try four-instruction combinations. */
1313 if (max_combine >= 4)
1314 FOR_EACH_LOG_LINK (links, insn)
1315 {
1316 struct insn_link *next1;
1317 rtx_insn *link = links->insn;
1318
1319 /* If the linked insn has been replaced by a note, then there
1320 is no point in pursuing this chain any further. */
1321 if (NOTE_P (link))
1322 continue;
1323
1324 FOR_EACH_LOG_LINK (next1, link)
1325 {
1326 rtx_insn *link1 = next1->insn;
1327 if (NOTE_P (link1))
1328 continue;
1329 /* I0 -> I1 -> I2 -> I3. */
1330 FOR_EACH_LOG_LINK (nextlinks, link1)
1331 if ((next = try_combine (insn, link, link1,
1332 nextlinks->insn,
1333 &new_direct_jump_p,
1334 last_combined_insn)) != 0)
1335 {
1336 statistics_counter_event (cfun, "four-insn combine", 1);
1337 goto retry;
1338 }
1339 /* I0, I1 -> I2, I2 -> I3. */
1340 for (nextlinks = next1->next; nextlinks;
1341 nextlinks = nextlinks->next)
1342 if ((next = try_combine (insn, link, link1,
1343 nextlinks->insn,
1344 &new_direct_jump_p,
1345 last_combined_insn)) != 0)
1346 {
1347 statistics_counter_event (cfun, "four-insn combine", 1);
1348 goto retry;
1349 }
1350 }
1351
1352 for (next1 = links->next; next1; next1 = next1->next)
1353 {
1354 rtx_insn *link1 = next1->insn;
1355 if (NOTE_P (link1))
1356 continue;
1357 /* I0 -> I2; I1, I2 -> I3. */
1358 FOR_EACH_LOG_LINK (nextlinks, link)
1359 if ((next = try_combine (insn, link, link1,
1360 nextlinks->insn,
1361 &new_direct_jump_p,
1362 last_combined_insn)) != 0)
1363 {
1364 statistics_counter_event (cfun, "four-insn combine", 1);
1365 goto retry;
1366 }
1367 /* I0 -> I1; I1, I2 -> I3. */
1368 FOR_EACH_LOG_LINK (nextlinks, link1)
1369 if ((next = try_combine (insn, link, link1,
1370 nextlinks->insn,
1371 &new_direct_jump_p,
1372 last_combined_insn)) != 0)
1373 {
1374 statistics_counter_event (cfun, "four-insn combine", 1);
1375 goto retry;
1376 }
1377 }
1378 }
1379
1380 /* Try this insn with each REG_EQUAL note it links back to. */
1381 FOR_EACH_LOG_LINK (links, insn)
1382 {
1383 rtx set, note;
1384 rtx_insn *temp = links->insn;
1385 if ((set = single_set (temp)) != 0
1386 && (note = find_reg_equal_equiv_note (temp)) != 0
1387 && (note = XEXP (note, 0), GET_CODE (note)) != EXPR_LIST
1388 && ! side_effects_p (SET_SRC (set))
1389 /* Avoid using a register that may already been marked
1390 dead by an earlier instruction. */
1391 && ! unmentioned_reg_p (note, SET_SRC (set))
1392 && (GET_MODE (note) == VOIDmode
1393 ? SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set)))
1394 : (GET_MODE (SET_DEST (set)) == GET_MODE (note)
1395 && (GET_CODE (SET_DEST (set)) != ZERO_EXTRACT
1396 || (GET_MODE (XEXP (SET_DEST (set), 0))
1397 == GET_MODE (note))))))
1398 {
1399 /* Temporarily replace the set's source with the
1400 contents of the REG_EQUAL note. The insn will
1401 be deleted or recognized by try_combine. */
1402 rtx orig_src = SET_SRC (set);
1403 rtx orig_dest = SET_DEST (set);
1404 if (GET_CODE (SET_DEST (set)) == ZERO_EXTRACT)
1405 SET_DEST (set) = XEXP (SET_DEST (set), 0);
1406 SET_SRC (set) = note;
1407 i2mod = temp;
1408 i2mod_old_rhs = copy_rtx (orig_src);
1409 i2mod_new_rhs = copy_rtx (note);
1410 next = try_combine (insn, i2mod, NULL, NULL,
1411 &new_direct_jump_p,
1412 last_combined_insn);
1413 i2mod = NULL;
1414 if (next)
1415 {
1416 statistics_counter_event (cfun, "insn-with-note combine", 1);
1417 goto retry;
1418 }
1419 SET_SRC (set) = orig_src;
1420 SET_DEST (set) = orig_dest;
1421 }
1422 }
1423
1424 if (!NOTE_P (insn))
1425 record_dead_and_set_regs (insn);
1426
1427 retry:
1428 ;
1429 }
1430 }
1431
1432 default_rtl_profile ();
1433 clear_bb_flags ();
1434 new_direct_jump_p |= purge_all_dead_edges ();
1435 new_direct_jump_p |= delete_noop_moves ();
1436
1437 /* Clean up. */
1438 obstack_free (&insn_link_obstack, NULL);
1439 free (uid_log_links);
1440 free (uid_insn_cost);
1441 reg_stat.release ();
1442
1443 {
1444 struct undo *undo, *next;
1445 for (undo = undobuf.frees; undo; undo = next)
1446 {
1447 next = undo->next;
1448 free (undo);
1449 }
1450 undobuf.frees = 0;
1451 }
1452
1453 total_attempts += combine_attempts;
1454 total_merges += combine_merges;
1455 total_extras += combine_extras;
1456 total_successes += combine_successes;
1457
1458 nonzero_sign_valid = 0;
1459 rtl_hooks = general_rtl_hooks;
1460
1461 /* Make recognizer allow volatile MEMs again. */
1462 init_recog ();
1463
1464 return new_direct_jump_p;
1465 }
1466
1467 /* Wipe the last_xxx fields of reg_stat in preparation for another pass. */
1468
1469 static void
1470 init_reg_last (void)
1471 {
1472 unsigned int i;
1473 reg_stat_type *p;
1474
1475 FOR_EACH_VEC_ELT (reg_stat, i, p)
1476 memset (p, 0, offsetof (reg_stat_type, sign_bit_copies));
1477 }
1478 \f
1479 /* Set up any promoted values for incoming argument registers. */
1480
1481 static void
1482 setup_incoming_promotions (rtx_insn *first)
1483 {
1484 tree arg;
1485 bool strictly_local = false;
1486
1487 for (arg = DECL_ARGUMENTS (current_function_decl); arg;
1488 arg = DECL_CHAIN (arg))
1489 {
1490 rtx x, reg = DECL_INCOMING_RTL (arg);
1491 int uns1, uns3;
1492 machine_mode mode1, mode2, mode3, mode4;
1493
1494 /* Only continue if the incoming argument is in a register. */
1495 if (!REG_P (reg))
1496 continue;
1497
1498 /* Determine, if possible, whether all call sites of the current
1499 function lie within the current compilation unit. (This does
1500 take into account the exporting of a function via taking its
1501 address, and so forth.) */
1502 strictly_local
1503 = cgraph_node::local_info_node (current_function_decl)->local;
1504
1505 /* The mode and signedness of the argument before any promotions happen
1506 (equal to the mode of the pseudo holding it at that stage). */
1507 mode1 = TYPE_MODE (TREE_TYPE (arg));
1508 uns1 = TYPE_UNSIGNED (TREE_TYPE (arg));
1509
1510 /* The mode and signedness of the argument after any source language and
1511 TARGET_PROMOTE_PROTOTYPES-driven promotions. */
1512 mode2 = TYPE_MODE (DECL_ARG_TYPE (arg));
1513 uns3 = TYPE_UNSIGNED (DECL_ARG_TYPE (arg));
1514
1515 /* The mode and signedness of the argument as it is actually passed,
1516 see assign_parm_setup_reg in function.cc. */
1517 mode3 = promote_function_mode (TREE_TYPE (arg), mode1, &uns3,
1518 TREE_TYPE (cfun->decl), 0);
1519
1520 /* The mode of the register in which the argument is being passed. */
1521 mode4 = GET_MODE (reg);
1522
1523 /* Eliminate sign extensions in the callee when:
1524 (a) A mode promotion has occurred; */
1525 if (mode1 == mode3)
1526 continue;
1527 /* (b) The mode of the register is the same as the mode of
1528 the argument as it is passed; */
1529 if (mode3 != mode4)
1530 continue;
1531 /* (c) There's no language level extension; */
1532 if (mode1 == mode2)
1533 ;
1534 /* (c.1) All callers are from the current compilation unit. If that's
1535 the case we don't have to rely on an ABI, we only have to know
1536 what we're generating right now, and we know that we will do the
1537 mode1 to mode2 promotion with the given sign. */
1538 else if (!strictly_local)
1539 continue;
1540 /* (c.2) The combination of the two promotions is useful. This is
1541 true when the signs match, or if the first promotion is unsigned.
1542 In the later case, (sign_extend (zero_extend x)) is the same as
1543 (zero_extend (zero_extend x)), so make sure to force UNS3 true. */
1544 else if (uns1)
1545 uns3 = true;
1546 else if (uns3)
1547 continue;
1548
1549 /* Record that the value was promoted from mode1 to mode3,
1550 so that any sign extension at the head of the current
1551 function may be eliminated. */
1552 x = gen_rtx_CLOBBER (mode1, const0_rtx);
1553 x = gen_rtx_fmt_e ((uns3 ? ZERO_EXTEND : SIGN_EXTEND), mode3, x);
1554 record_value_for_reg (reg, first, x);
1555 }
1556 }
1557
1558 /* If MODE has a precision lower than PREC and SRC is a non-negative constant
1559 that would appear negative in MODE, sign-extend SRC for use in nonzero_bits
1560 because some machines (maybe most) will actually do the sign-extension and
1561 this is the conservative approach.
1562
1563 ??? For 2.5, try to tighten up the MD files in this regard instead of this
1564 kludge. */
1565
1566 static rtx
1567 sign_extend_short_imm (rtx src, machine_mode mode, unsigned int prec)
1568 {
1569 scalar_int_mode int_mode;
1570 if (CONST_INT_P (src)
1571 && is_a <scalar_int_mode> (mode, &int_mode)
1572 && GET_MODE_PRECISION (int_mode) < prec
1573 && INTVAL (src) > 0
1574 && val_signbit_known_set_p (int_mode, INTVAL (src)))
1575 src = GEN_INT (INTVAL (src) | ~GET_MODE_MASK (int_mode));
1576
1577 return src;
1578 }
1579
1580 /* Update RSP for pseudo-register X from INSN's REG_EQUAL note (if one exists)
1581 and SET. */
1582
1583 static void
1584 update_rsp_from_reg_equal (reg_stat_type *rsp, rtx_insn *insn, const_rtx set,
1585 rtx x)
1586 {
1587 rtx reg_equal_note = insn ? find_reg_equal_equiv_note (insn) : NULL_RTX;
1588 unsigned HOST_WIDE_INT bits = 0;
1589 rtx reg_equal = NULL, src = SET_SRC (set);
1590 unsigned int num = 0;
1591
1592 if (reg_equal_note)
1593 reg_equal = XEXP (reg_equal_note, 0);
1594
1595 if (SHORT_IMMEDIATES_SIGN_EXTEND)
1596 {
1597 src = sign_extend_short_imm (src, GET_MODE (x), BITS_PER_WORD);
1598 if (reg_equal)
1599 reg_equal = sign_extend_short_imm (reg_equal, GET_MODE (x), BITS_PER_WORD);
1600 }
1601
1602 /* Don't call nonzero_bits if it cannot change anything. */
1603 if (rsp->nonzero_bits != HOST_WIDE_INT_M1U)
1604 {
1605 machine_mode mode = GET_MODE (x);
1606 if (GET_MODE_CLASS (mode) == MODE_INT
1607 && HWI_COMPUTABLE_MODE_P (mode))
1608 mode = nonzero_bits_mode;
1609 bits = nonzero_bits (src, mode);
1610 if (reg_equal && bits)
1611 bits &= nonzero_bits (reg_equal, mode);
1612 rsp->nonzero_bits |= bits;
1613 }
1614
1615 /* Don't call num_sign_bit_copies if it cannot change anything. */
1616 if (rsp->sign_bit_copies != 1)
1617 {
1618 num = num_sign_bit_copies (SET_SRC (set), GET_MODE (x));
1619 if (reg_equal && maybe_ne (num, GET_MODE_PRECISION (GET_MODE (x))))
1620 {
1621 unsigned int numeq = num_sign_bit_copies (reg_equal, GET_MODE (x));
1622 if (num == 0 || numeq > num)
1623 num = numeq;
1624 }
1625 if (rsp->sign_bit_copies == 0 || num < rsp->sign_bit_copies)
1626 rsp->sign_bit_copies = num;
1627 }
1628 }
1629
1630 /* Called via note_stores. If X is a pseudo that is narrower than
1631 HOST_BITS_PER_WIDE_INT and is being set, record what bits are known zero.
1632
1633 If we are setting only a portion of X and we can't figure out what
1634 portion, assume all bits will be used since we don't know what will
1635 be happening.
1636
1637 Similarly, set how many bits of X are known to be copies of the sign bit
1638 at all locations in the function. This is the smallest number implied
1639 by any set of X. */
1640
1641 static void
1642 set_nonzero_bits_and_sign_copies (rtx x, const_rtx set, void *data)
1643 {
1644 rtx_insn *insn = (rtx_insn *) data;
1645 scalar_int_mode mode;
1646
1647 if (REG_P (x)
1648 && REGNO (x) >= FIRST_PSEUDO_REGISTER
1649 /* If this register is undefined at the start of the file, we can't
1650 say what its contents were. */
1651 && ! REGNO_REG_SET_P
1652 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), REGNO (x))
1653 && is_a <scalar_int_mode> (GET_MODE (x), &mode)
1654 && HWI_COMPUTABLE_MODE_P (mode))
1655 {
1656 reg_stat_type *rsp = &reg_stat[REGNO (x)];
1657
1658 if (set == 0 || GET_CODE (set) == CLOBBER)
1659 {
1660 rsp->nonzero_bits = GET_MODE_MASK (mode);
1661 rsp->sign_bit_copies = 1;
1662 return;
1663 }
1664
1665 /* If this register is being initialized using itself, and the
1666 register is uninitialized in this basic block, and there are
1667 no LOG_LINKS which set the register, then part of the
1668 register is uninitialized. In that case we can't assume
1669 anything about the number of nonzero bits.
1670
1671 ??? We could do better if we checked this in
1672 reg_{nonzero_bits,num_sign_bit_copies}_for_combine. Then we
1673 could avoid making assumptions about the insn which initially
1674 sets the register, while still using the information in other
1675 insns. We would have to be careful to check every insn
1676 involved in the combination. */
1677
1678 if (insn
1679 && reg_referenced_p (x, PATTERN (insn))
1680 && !REGNO_REG_SET_P (DF_LR_IN (BLOCK_FOR_INSN (insn)),
1681 REGNO (x)))
1682 {
1683 struct insn_link *link;
1684
1685 FOR_EACH_LOG_LINK (link, insn)
1686 if (dead_or_set_p (link->insn, x))
1687 break;
1688 if (!link)
1689 {
1690 rsp->nonzero_bits = GET_MODE_MASK (mode);
1691 rsp->sign_bit_copies = 1;
1692 return;
1693 }
1694 }
1695
1696 /* If this is a complex assignment, see if we can convert it into a
1697 simple assignment. */
1698 set = expand_field_assignment (set);
1699
1700 /* If this is a simple assignment, or we have a paradoxical SUBREG,
1701 set what we know about X. */
1702
1703 if (SET_DEST (set) == x
1704 || (paradoxical_subreg_p (SET_DEST (set))
1705 && SUBREG_REG (SET_DEST (set)) == x))
1706 update_rsp_from_reg_equal (rsp, insn, set, x);
1707 else
1708 {
1709 rsp->nonzero_bits = GET_MODE_MASK (mode);
1710 rsp->sign_bit_copies = 1;
1711 }
1712 }
1713 }
1714 \f
1715 /* See if INSN can be combined into I3. PRED, PRED2, SUCC and SUCC2 are
1716 optionally insns that were previously combined into I3 or that will be
1717 combined into the merger of INSN and I3. The order is PRED, PRED2,
1718 INSN, SUCC, SUCC2, I3.
1719
1720 Return 0 if the combination is not allowed for any reason.
1721
1722 If the combination is allowed, *PDEST will be set to the single
1723 destination of INSN and *PSRC to the single source, and this function
1724 will return 1. */
1725
1726 static int
1727 can_combine_p (rtx_insn *insn, rtx_insn *i3, rtx_insn *pred ATTRIBUTE_UNUSED,
1728 rtx_insn *pred2 ATTRIBUTE_UNUSED, rtx_insn *succ, rtx_insn *succ2,
1729 rtx *pdest, rtx *psrc)
1730 {
1731 int i;
1732 const_rtx set = 0;
1733 rtx src, dest;
1734 rtx_insn *p;
1735 rtx link;
1736 bool all_adjacent = true;
1737 int (*is_volatile_p) (const_rtx);
1738
1739 if (succ)
1740 {
1741 if (succ2)
1742 {
1743 if (next_active_insn (succ2) != i3)
1744 all_adjacent = false;
1745 if (next_active_insn (succ) != succ2)
1746 all_adjacent = false;
1747 }
1748 else if (next_active_insn (succ) != i3)
1749 all_adjacent = false;
1750 if (next_active_insn (insn) != succ)
1751 all_adjacent = false;
1752 }
1753 else if (next_active_insn (insn) != i3)
1754 all_adjacent = false;
1755
1756 /* Can combine only if previous insn is a SET of a REG or a SUBREG,
1757 or a PARALLEL consisting of such a SET and CLOBBERs.
1758
1759 If INSN has CLOBBER parallel parts, ignore them for our processing.
1760 By definition, these happen during the execution of the insn. When it
1761 is merged with another insn, all bets are off. If they are, in fact,
1762 needed and aren't also supplied in I3, they may be added by
1763 recog_for_combine. Otherwise, it won't match.
1764
1765 We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED
1766 note.
1767
1768 Get the source and destination of INSN. If more than one, can't
1769 combine. */
1770
1771 if (GET_CODE (PATTERN (insn)) == SET)
1772 set = PATTERN (insn);
1773 else if (GET_CODE (PATTERN (insn)) == PARALLEL
1774 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET)
1775 {
1776 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1777 {
1778 rtx elt = XVECEXP (PATTERN (insn), 0, i);
1779
1780 switch (GET_CODE (elt))
1781 {
1782 /* This is important to combine floating point insns
1783 for the SH4 port. */
1784 case USE:
1785 /* Combining an isolated USE doesn't make sense.
1786 We depend here on combinable_i3pat to reject them. */
1787 /* The code below this loop only verifies that the inputs of
1788 the SET in INSN do not change. We call reg_set_between_p
1789 to verify that the REG in the USE does not change between
1790 I3 and INSN.
1791 If the USE in INSN was for a pseudo register, the matching
1792 insn pattern will likely match any register; combining this
1793 with any other USE would only be safe if we knew that the
1794 used registers have identical values, or if there was
1795 something to tell them apart, e.g. different modes. For
1796 now, we forgo such complicated tests and simply disallow
1797 combining of USES of pseudo registers with any other USE. */
1798 if (REG_P (XEXP (elt, 0))
1799 && GET_CODE (PATTERN (i3)) == PARALLEL)
1800 {
1801 rtx i3pat = PATTERN (i3);
1802 int i = XVECLEN (i3pat, 0) - 1;
1803 unsigned int regno = REGNO (XEXP (elt, 0));
1804
1805 do
1806 {
1807 rtx i3elt = XVECEXP (i3pat, 0, i);
1808
1809 if (GET_CODE (i3elt) == USE
1810 && REG_P (XEXP (i3elt, 0))
1811 && (REGNO (XEXP (i3elt, 0)) == regno
1812 ? reg_set_between_p (XEXP (elt, 0),
1813 PREV_INSN (insn), i3)
1814 : regno >= FIRST_PSEUDO_REGISTER))
1815 return 0;
1816 }
1817 while (--i >= 0);
1818 }
1819 break;
1820
1821 /* We can ignore CLOBBERs. */
1822 case CLOBBER:
1823 break;
1824
1825 case SET:
1826 /* Ignore SETs whose result isn't used but not those that
1827 have side-effects. */
1828 if (find_reg_note (insn, REG_UNUSED, SET_DEST (elt))
1829 && insn_nothrow_p (insn)
1830 && !side_effects_p (elt))
1831 break;
1832
1833 /* If we have already found a SET, this is a second one and
1834 so we cannot combine with this insn. */
1835 if (set)
1836 return 0;
1837
1838 set = elt;
1839 break;
1840
1841 default:
1842 /* Anything else means we can't combine. */
1843 return 0;
1844 }
1845 }
1846
1847 if (set == 0
1848 /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs,
1849 so don't do anything with it. */
1850 || GET_CODE (SET_SRC (set)) == ASM_OPERANDS)
1851 return 0;
1852 }
1853 else
1854 return 0;
1855
1856 if (set == 0)
1857 return 0;
1858
1859 /* The simplification in expand_field_assignment may call back to
1860 get_last_value, so set safe guard here. */
1861 subst_low_luid = DF_INSN_LUID (insn);
1862
1863 set = expand_field_assignment (set);
1864 src = SET_SRC (set), dest = SET_DEST (set);
1865
1866 /* Do not eliminate user-specified register if it is in an
1867 asm input because we may break the register asm usage defined
1868 in GCC manual if allow to do so.
1869 Be aware that this may cover more cases than we expect but this
1870 should be harmless. */
1871 if (REG_P (dest) && REG_USERVAR_P (dest) && HARD_REGISTER_P (dest)
1872 && extract_asm_operands (PATTERN (i3)))
1873 return 0;
1874
1875 /* Don't eliminate a store in the stack pointer. */
1876 if (dest == stack_pointer_rtx
1877 /* Don't combine with an insn that sets a register to itself if it has
1878 a REG_EQUAL note. This may be part of a LIBCALL sequence. */
1879 || (rtx_equal_p (src, dest) && find_reg_note (insn, REG_EQUAL, NULL_RTX))
1880 /* Can't merge an ASM_OPERANDS. */
1881 || GET_CODE (src) == ASM_OPERANDS
1882 /* Can't merge a function call. */
1883 || GET_CODE (src) == CALL
1884 /* Don't eliminate a function call argument. */
1885 || (CALL_P (i3)
1886 && (find_reg_fusage (i3, USE, dest)
1887 || (REG_P (dest)
1888 && REGNO (dest) < FIRST_PSEUDO_REGISTER
1889 && global_regs[REGNO (dest)])))
1890 /* Don't substitute into an incremented register. */
1891 || FIND_REG_INC_NOTE (i3, dest)
1892 || (succ && FIND_REG_INC_NOTE (succ, dest))
1893 || (succ2 && FIND_REG_INC_NOTE (succ2, dest))
1894 /* Don't substitute into a non-local goto, this confuses CFG. */
1895 || (JUMP_P (i3) && find_reg_note (i3, REG_NON_LOCAL_GOTO, NULL_RTX))
1896 /* Make sure that DEST is not used after INSN but before SUCC, or
1897 after SUCC and before SUCC2, or after SUCC2 but before I3. */
1898 || (!all_adjacent
1899 && ((succ2
1900 && (reg_used_between_p (dest, succ2, i3)
1901 || reg_used_between_p (dest, succ, succ2)))
1902 || (!succ2 && succ && reg_used_between_p (dest, succ, i3))
1903 || (!succ2 && !succ && reg_used_between_p (dest, insn, i3))
1904 || (succ
1905 /* SUCC and SUCC2 can be split halves from a PARALLEL; in
1906 that case SUCC is not in the insn stream, so use SUCC2
1907 instead for this test. */
1908 && reg_used_between_p (dest, insn,
1909 succ2
1910 && INSN_UID (succ) == INSN_UID (succ2)
1911 ? succ2 : succ))))
1912 /* Make sure that the value that is to be substituted for the register
1913 does not use any registers whose values alter in between. However,
1914 If the insns are adjacent, a use can't cross a set even though we
1915 think it might (this can happen for a sequence of insns each setting
1916 the same destination; last_set of that register might point to
1917 a NOTE). If INSN has a REG_EQUIV note, the register is always
1918 equivalent to the memory so the substitution is valid even if there
1919 are intervening stores. Also, don't move a volatile asm or
1920 UNSPEC_VOLATILE across any other insns. */
1921 || (! all_adjacent
1922 && (((!MEM_P (src)
1923 || ! find_reg_note (insn, REG_EQUIV, src))
1924 && modified_between_p (src, insn, i3))
1925 || (GET_CODE (src) == ASM_OPERANDS && MEM_VOLATILE_P (src))
1926 || GET_CODE (src) == UNSPEC_VOLATILE))
1927 /* Don't combine across a CALL_INSN, because that would possibly
1928 change whether the life span of some REGs crosses calls or not,
1929 and it is a pain to update that information.
1930 Exception: if source is a constant, moving it later can't hurt.
1931 Accept that as a special case. */
1932 || (DF_INSN_LUID (insn) < last_call_luid && ! CONSTANT_P (src)))
1933 return 0;
1934
1935 /* DEST must be a REG. */
1936 if (REG_P (dest))
1937 {
1938 /* If register alignment is being enforced for multi-word items in all
1939 cases except for parameters, it is possible to have a register copy
1940 insn referencing a hard register that is not allowed to contain the
1941 mode being copied and which would not be valid as an operand of most
1942 insns. Eliminate this problem by not combining with such an insn.
1943
1944 Also, on some machines we don't want to extend the life of a hard
1945 register. */
1946
1947 if (REG_P (src)
1948 && ((REGNO (dest) < FIRST_PSEUDO_REGISTER
1949 && !targetm.hard_regno_mode_ok (REGNO (dest), GET_MODE (dest)))
1950 /* Don't extend the life of a hard register unless it is
1951 user variable (if we have few registers) or it can't
1952 fit into the desired register (meaning something special
1953 is going on).
1954 Also avoid substituting a return register into I3, because
1955 reload can't handle a conflict with constraints of other
1956 inputs. */
1957 || (REGNO (src) < FIRST_PSEUDO_REGISTER
1958 && !targetm.hard_regno_mode_ok (REGNO (src),
1959 GET_MODE (src)))))
1960 return 0;
1961 }
1962 else
1963 return 0;
1964
1965
1966 if (GET_CODE (PATTERN (i3)) == PARALLEL)
1967 for (i = XVECLEN (PATTERN (i3), 0) - 1; i >= 0; i--)
1968 if (GET_CODE (XVECEXP (PATTERN (i3), 0, i)) == CLOBBER)
1969 {
1970 rtx reg = XEXP (XVECEXP (PATTERN (i3), 0, i), 0);
1971
1972 /* If the clobber represents an earlyclobber operand, we must not
1973 substitute an expression containing the clobbered register.
1974 As we do not analyze the constraint strings here, we have to
1975 make the conservative assumption. However, if the register is
1976 a fixed hard reg, the clobber cannot represent any operand;
1977 we leave it up to the machine description to either accept or
1978 reject use-and-clobber patterns. */
1979 if (!REG_P (reg)
1980 || REGNO (reg) >= FIRST_PSEUDO_REGISTER
1981 || !fixed_regs[REGNO (reg)])
1982 if (reg_overlap_mentioned_p (reg, src))
1983 return 0;
1984 }
1985
1986 /* If INSN contains anything volatile, or is an `asm' (whether volatile
1987 or not), reject, unless nothing volatile comes between it and I3 */
1988
1989 if (GET_CODE (src) == ASM_OPERANDS || volatile_refs_p (src))
1990 {
1991 /* Make sure neither succ nor succ2 contains a volatile reference. */
1992 if (succ2 != 0 && volatile_refs_p (PATTERN (succ2)))
1993 return 0;
1994 if (succ != 0 && volatile_refs_p (PATTERN (succ)))
1995 return 0;
1996 /* We'll check insns between INSN and I3 below. */
1997 }
1998
1999 /* If INSN is an asm, and DEST is a hard register, reject, since it has
2000 to be an explicit register variable, and was chosen for a reason. */
2001
2002 if (GET_CODE (src) == ASM_OPERANDS
2003 && REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER)
2004 return 0;
2005
2006 /* If INSN contains volatile references (specifically volatile MEMs),
2007 we cannot combine across any other volatile references.
2008 Even if INSN doesn't contain volatile references, any intervening
2009 volatile insn might affect machine state. */
2010
2011 is_volatile_p = volatile_refs_p (PATTERN (insn))
2012 ? volatile_refs_p
2013 : volatile_insn_p;
2014
2015 for (p = NEXT_INSN (insn); p != i3; p = NEXT_INSN (p))
2016 if (INSN_P (p) && p != succ && p != succ2 && is_volatile_p (PATTERN (p)))
2017 return 0;
2018
2019 /* If INSN contains an autoincrement or autodecrement, make sure that
2020 register is not used between there and I3, and not already used in
2021 I3 either. Neither must it be used in PRED or SUCC, if they exist.
2022 Also insist that I3 not be a jump if using LRA; if it were one
2023 and the incremented register were spilled, we would lose.
2024 Reload handles this correctly. */
2025
2026 if (AUTO_INC_DEC)
2027 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2028 if (REG_NOTE_KIND (link) == REG_INC
2029 && ((JUMP_P (i3) && targetm.lra_p ())
2030 || reg_used_between_p (XEXP (link, 0), insn, i3)
2031 || (pred != NULL_RTX
2032 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred)))
2033 || (pred2 != NULL_RTX
2034 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred2)))
2035 || (succ != NULL_RTX
2036 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ)))
2037 || (succ2 != NULL_RTX
2038 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ2)))
2039 || reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i3))))
2040 return 0;
2041
2042 /* If we get here, we have passed all the tests and the combination is
2043 to be allowed. */
2044
2045 *pdest = dest;
2046 *psrc = src;
2047
2048 return 1;
2049 }
2050 \f
2051 /* LOC is the location within I3 that contains its pattern or the component
2052 of a PARALLEL of the pattern. We validate that it is valid for combining.
2053
2054 One problem is if I3 modifies its output, as opposed to replacing it
2055 entirely, we can't allow the output to contain I2DEST, I1DEST or I0DEST as
2056 doing so would produce an insn that is not equivalent to the original insns.
2057
2058 Consider:
2059
2060 (set (reg:DI 101) (reg:DI 100))
2061 (set (subreg:SI (reg:DI 101) 0) <foo>)
2062
2063 This is NOT equivalent to:
2064
2065 (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
2066 (set (reg:DI 101) (reg:DI 100))])
2067
2068 Not only does this modify 100 (in which case it might still be valid
2069 if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100.
2070
2071 We can also run into a problem if I2 sets a register that I1
2072 uses and I1 gets directly substituted into I3 (not via I2). In that
2073 case, we would be getting the wrong value of I2DEST into I3, so we
2074 must reject the combination. This case occurs when I2 and I1 both
2075 feed into I3, rather than when I1 feeds into I2, which feeds into I3.
2076 If I1_NOT_IN_SRC is nonzero, it means that finding I1 in the source
2077 of a SET must prevent combination from occurring. The same situation
2078 can occur for I0, in which case I0_NOT_IN_SRC is set.
2079
2080 Before doing the above check, we first try to expand a field assignment
2081 into a set of logical operations.
2082
2083 If PI3_DEST_KILLED is nonzero, it is a pointer to a location in which
2084 we place a register that is both set and used within I3. If more than one
2085 such register is detected, we fail.
2086
2087 Return 1 if the combination is valid, zero otherwise. */
2088
2089 static int
2090 combinable_i3pat (rtx_insn *i3, rtx *loc, rtx i2dest, rtx i1dest, rtx i0dest,
2091 int i1_not_in_src, int i0_not_in_src, rtx *pi3dest_killed)
2092 {
2093 rtx x = *loc;
2094
2095 if (GET_CODE (x) == SET)
2096 {
2097 rtx set = x ;
2098 rtx dest = SET_DEST (set);
2099 rtx src = SET_SRC (set);
2100 rtx inner_dest = dest;
2101 rtx subdest;
2102
2103 while (GET_CODE (inner_dest) == STRICT_LOW_PART
2104 || GET_CODE (inner_dest) == SUBREG
2105 || GET_CODE (inner_dest) == ZERO_EXTRACT)
2106 inner_dest = XEXP (inner_dest, 0);
2107
2108 /* Check for the case where I3 modifies its output, as discussed
2109 above. We don't want to prevent pseudos from being combined
2110 into the address of a MEM, so only prevent the combination if
2111 i1 or i2 set the same MEM. */
2112 if ((inner_dest != dest &&
2113 (!MEM_P (inner_dest)
2114 || rtx_equal_p (i2dest, inner_dest)
2115 || (i1dest && rtx_equal_p (i1dest, inner_dest))
2116 || (i0dest && rtx_equal_p (i0dest, inner_dest)))
2117 && (reg_overlap_mentioned_p (i2dest, inner_dest)
2118 || (i1dest && reg_overlap_mentioned_p (i1dest, inner_dest))
2119 || (i0dest && reg_overlap_mentioned_p (i0dest, inner_dest))))
2120
2121 /* This is the same test done in can_combine_p except we can't test
2122 all_adjacent; we don't have to, since this instruction will stay
2123 in place, thus we are not considering increasing the lifetime of
2124 INNER_DEST.
2125
2126 Also, if this insn sets a function argument, combining it with
2127 something that might need a spill could clobber a previous
2128 function argument; the all_adjacent test in can_combine_p also
2129 checks this; here, we do a more specific test for this case. */
2130
2131 || (REG_P (inner_dest)
2132 && REGNO (inner_dest) < FIRST_PSEUDO_REGISTER
2133 && !targetm.hard_regno_mode_ok (REGNO (inner_dest),
2134 GET_MODE (inner_dest)))
2135 || (i1_not_in_src && reg_overlap_mentioned_p (i1dest, src))
2136 || (i0_not_in_src && reg_overlap_mentioned_p (i0dest, src)))
2137 return 0;
2138
2139 /* If DEST is used in I3, it is being killed in this insn, so
2140 record that for later. We have to consider paradoxical
2141 subregs here, since they kill the whole register, but we
2142 ignore partial subregs, STRICT_LOW_PART, etc.
2143 Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2144 STACK_POINTER_REGNUM, since these are always considered to be
2145 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
2146 subdest = dest;
2147 if (GET_CODE (subdest) == SUBREG && !partial_subreg_p (subdest))
2148 subdest = SUBREG_REG (subdest);
2149 if (pi3dest_killed
2150 && REG_P (subdest)
2151 && reg_referenced_p (subdest, PATTERN (i3))
2152 && REGNO (subdest) != FRAME_POINTER_REGNUM
2153 && (HARD_FRAME_POINTER_IS_FRAME_POINTER
2154 || REGNO (subdest) != HARD_FRAME_POINTER_REGNUM)
2155 && (FRAME_POINTER_REGNUM == ARG_POINTER_REGNUM
2156 || (REGNO (subdest) != ARG_POINTER_REGNUM
2157 || ! fixed_regs [REGNO (subdest)]))
2158 && REGNO (subdest) != STACK_POINTER_REGNUM)
2159 {
2160 if (*pi3dest_killed)
2161 return 0;
2162
2163 *pi3dest_killed = subdest;
2164 }
2165 }
2166
2167 else if (GET_CODE (x) == PARALLEL)
2168 {
2169 int i;
2170
2171 for (i = 0; i < XVECLEN (x, 0); i++)
2172 if (! combinable_i3pat (i3, &XVECEXP (x, 0, i), i2dest, i1dest, i0dest,
2173 i1_not_in_src, i0_not_in_src, pi3dest_killed))
2174 return 0;
2175 }
2176
2177 return 1;
2178 }
2179 \f
2180 /* Return 1 if X is an arithmetic expression that contains a multiplication
2181 and division. We don't count multiplications by powers of two here. */
2182
2183 static int
2184 contains_muldiv (rtx x)
2185 {
2186 switch (GET_CODE (x))
2187 {
2188 case MOD: case DIV: case UMOD: case UDIV:
2189 return 1;
2190
2191 case MULT:
2192 return ! (CONST_INT_P (XEXP (x, 1))
2193 && pow2p_hwi (UINTVAL (XEXP (x, 1))));
2194 default:
2195 if (BINARY_P (x))
2196 return contains_muldiv (XEXP (x, 0))
2197 || contains_muldiv (XEXP (x, 1));
2198
2199 if (UNARY_P (x))
2200 return contains_muldiv (XEXP (x, 0));
2201
2202 return 0;
2203 }
2204 }
2205 \f
2206 /* Determine whether INSN can be used in a combination. Return nonzero if
2207 not. This is used in try_combine to detect early some cases where we
2208 can't perform combinations. */
2209
2210 static int
2211 cant_combine_insn_p (rtx_insn *insn)
2212 {
2213 rtx set;
2214 rtx src, dest;
2215
2216 /* If this isn't really an insn, we can't do anything.
2217 This can occur when flow deletes an insn that it has merged into an
2218 auto-increment address. */
2219 if (!NONDEBUG_INSN_P (insn))
2220 return 1;
2221
2222 /* Never combine loads and stores involving hard regs that are likely
2223 to be spilled. The register allocator can usually handle such
2224 reg-reg moves by tying. If we allow the combiner to make
2225 substitutions of likely-spilled regs, reload might die.
2226 As an exception, we allow combinations involving fixed regs; these are
2227 not available to the register allocator so there's no risk involved. */
2228
2229 set = single_set (insn);
2230 if (! set)
2231 return 0;
2232 src = SET_SRC (set);
2233 dest = SET_DEST (set);
2234 if (GET_CODE (src) == SUBREG)
2235 src = SUBREG_REG (src);
2236 if (GET_CODE (dest) == SUBREG)
2237 dest = SUBREG_REG (dest);
2238 if (REG_P (src) && REG_P (dest)
2239 && ((HARD_REGISTER_P (src)
2240 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (src))
2241 #ifdef LEAF_REGISTERS
2242 && ! LEAF_REGISTERS [REGNO (src)])
2243 #else
2244 )
2245 #endif
2246 || (HARD_REGISTER_P (dest)
2247 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (dest))
2248 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dest))))))
2249 return 1;
2250
2251 return 0;
2252 }
2253
2254 struct likely_spilled_retval_info
2255 {
2256 unsigned regno, nregs;
2257 unsigned mask;
2258 };
2259
2260 /* Called via note_stores by likely_spilled_retval_p. Remove from info->mask
2261 hard registers that are known to be written to / clobbered in full. */
2262 static void
2263 likely_spilled_retval_1 (rtx x, const_rtx set, void *data)
2264 {
2265 struct likely_spilled_retval_info *const info =
2266 (struct likely_spilled_retval_info *) data;
2267 unsigned regno, nregs;
2268 unsigned new_mask;
2269
2270 if (!REG_P (XEXP (set, 0)))
2271 return;
2272 regno = REGNO (x);
2273 if (regno >= info->regno + info->nregs)
2274 return;
2275 nregs = REG_NREGS (x);
2276 if (regno + nregs <= info->regno)
2277 return;
2278 new_mask = (2U << (nregs - 1)) - 1;
2279 if (regno < info->regno)
2280 new_mask >>= info->regno - regno;
2281 else
2282 new_mask <<= regno - info->regno;
2283 info->mask &= ~new_mask;
2284 }
2285
2286 /* Return nonzero iff part of the return value is live during INSN, and
2287 it is likely spilled. This can happen when more than one insn is needed
2288 to copy the return value, e.g. when we consider to combine into the
2289 second copy insn for a complex value. */
2290
2291 static int
2292 likely_spilled_retval_p (rtx_insn *insn)
2293 {
2294 rtx_insn *use = BB_END (this_basic_block);
2295 rtx reg;
2296 rtx_insn *p;
2297 unsigned regno, nregs;
2298 /* We assume here that no machine mode needs more than
2299 32 hard registers when the value overlaps with a register
2300 for which TARGET_FUNCTION_VALUE_REGNO_P is true. */
2301 unsigned mask;
2302 struct likely_spilled_retval_info info;
2303
2304 if (!NONJUMP_INSN_P (use) || GET_CODE (PATTERN (use)) != USE || insn == use)
2305 return 0;
2306 reg = XEXP (PATTERN (use), 0);
2307 if (!REG_P (reg) || !targetm.calls.function_value_regno_p (REGNO (reg)))
2308 return 0;
2309 regno = REGNO (reg);
2310 nregs = REG_NREGS (reg);
2311 if (nregs == 1)
2312 return 0;
2313 mask = (2U << (nregs - 1)) - 1;
2314
2315 /* Disregard parts of the return value that are set later. */
2316 info.regno = regno;
2317 info.nregs = nregs;
2318 info.mask = mask;
2319 for (p = PREV_INSN (use); info.mask && p != insn; p = PREV_INSN (p))
2320 if (INSN_P (p))
2321 note_stores (p, likely_spilled_retval_1, &info);
2322 mask = info.mask;
2323
2324 /* Check if any of the (probably) live return value registers is
2325 likely spilled. */
2326 nregs --;
2327 do
2328 {
2329 if ((mask & 1 << nregs)
2330 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (regno + nregs)))
2331 return 1;
2332 } while (nregs--);
2333 return 0;
2334 }
2335
2336 /* Adjust INSN after we made a change to its destination.
2337
2338 Changing the destination can invalidate notes that say something about
2339 the results of the insn and a LOG_LINK pointing to the insn. */
2340
2341 static void
2342 adjust_for_new_dest (rtx_insn *insn)
2343 {
2344 /* For notes, be conservative and simply remove them. */
2345 remove_reg_equal_equiv_notes (insn, true);
2346
2347 /* The new insn will have a destination that was previously the destination
2348 of an insn just above it. Call distribute_links to make a LOG_LINK from
2349 the next use of that destination. */
2350
2351 rtx set = single_set (insn);
2352 gcc_assert (set);
2353
2354 rtx reg = SET_DEST (set);
2355
2356 while (GET_CODE (reg) == ZERO_EXTRACT
2357 || GET_CODE (reg) == STRICT_LOW_PART
2358 || GET_CODE (reg) == SUBREG)
2359 reg = XEXP (reg, 0);
2360 gcc_assert (REG_P (reg));
2361
2362 distribute_links (alloc_insn_link (insn, REGNO (reg), NULL));
2363
2364 df_insn_rescan (insn);
2365 }
2366
2367 /* Return TRUE if combine can reuse reg X in mode MODE.
2368 ADDED_SETS is nonzero if the original set is still required. */
2369 static bool
2370 can_change_dest_mode (rtx x, int added_sets, machine_mode mode)
2371 {
2372 unsigned int regno;
2373
2374 if (!REG_P (x))
2375 return false;
2376
2377 /* Don't change between modes with different underlying register sizes,
2378 since this could lead to invalid subregs. */
2379 if (maybe_ne (REGMODE_NATURAL_SIZE (mode),
2380 REGMODE_NATURAL_SIZE (GET_MODE (x))))
2381 return false;
2382
2383 regno = REGNO (x);
2384 /* Allow hard registers if the new mode is legal, and occupies no more
2385 registers than the old mode. */
2386 if (regno < FIRST_PSEUDO_REGISTER)
2387 return (targetm.hard_regno_mode_ok (regno, mode)
2388 && REG_NREGS (x) >= hard_regno_nregs (regno, mode));
2389
2390 /* Or a pseudo that is only used once. */
2391 return (regno < reg_n_sets_max
2392 && REG_N_SETS (regno) == 1
2393 && !added_sets
2394 && !REG_USERVAR_P (x));
2395 }
2396
2397
2398 /* Check whether X, the destination of a set, refers to part of
2399 the register specified by REG. */
2400
2401 static bool
2402 reg_subword_p (rtx x, rtx reg)
2403 {
2404 /* Check that reg is an integer mode register. */
2405 if (!REG_P (reg) || GET_MODE_CLASS (GET_MODE (reg)) != MODE_INT)
2406 return false;
2407
2408 if (GET_CODE (x) == STRICT_LOW_PART
2409 || GET_CODE (x) == ZERO_EXTRACT)
2410 x = XEXP (x, 0);
2411
2412 return GET_CODE (x) == SUBREG
2413 && !paradoxical_subreg_p (x)
2414 && SUBREG_REG (x) == reg
2415 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT;
2416 }
2417
2418 /* Return whether PAT is a PARALLEL of exactly N register SETs followed
2419 by an arbitrary number of CLOBBERs. */
2420 static bool
2421 is_parallel_of_n_reg_sets (rtx pat, int n)
2422 {
2423 if (GET_CODE (pat) != PARALLEL)
2424 return false;
2425
2426 int len = XVECLEN (pat, 0);
2427 if (len < n)
2428 return false;
2429
2430 int i;
2431 for (i = 0; i < n; i++)
2432 if (GET_CODE (XVECEXP (pat, 0, i)) != SET
2433 || !REG_P (SET_DEST (XVECEXP (pat, 0, i))))
2434 return false;
2435 for ( ; i < len; i++)
2436 switch (GET_CODE (XVECEXP (pat, 0, i)))
2437 {
2438 case CLOBBER:
2439 if (XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
2440 return false;
2441 break;
2442 default:
2443 return false;
2444 }
2445 return true;
2446 }
2447
2448 /* Return whether INSN, a PARALLEL of N register SETs (and maybe some
2449 CLOBBERs), can be split into individual SETs in that order, without
2450 changing semantics. */
2451 static bool
2452 can_split_parallel_of_n_reg_sets (rtx_insn *insn, int n)
2453 {
2454 if (!insn_nothrow_p (insn))
2455 return false;
2456
2457 rtx pat = PATTERN (insn);
2458
2459 int i, j;
2460 for (i = 0; i < n; i++)
2461 {
2462 if (side_effects_p (SET_SRC (XVECEXP (pat, 0, i))))
2463 return false;
2464
2465 rtx reg = SET_DEST (XVECEXP (pat, 0, i));
2466
2467 for (j = i + 1; j < n; j++)
2468 if (reg_referenced_p (reg, XVECEXP (pat, 0, j)))
2469 return false;
2470 }
2471
2472 return true;
2473 }
2474
2475 /* Return whether X is just a single_set, with the source
2476 a general_operand. */
2477 static bool
2478 is_just_move (rtx_insn *x)
2479 {
2480 rtx set = single_set (x);
2481 if (!set)
2482 return false;
2483
2484 return general_operand (SET_SRC (set), VOIDmode);
2485 }
2486
2487 /* Callback function to count autoincs. */
2488
2489 static int
2490 count_auto_inc (rtx, rtx, rtx, rtx, rtx, void *arg)
2491 {
2492 (*((int *) arg))++;
2493
2494 return 0;
2495 }
2496
2497 /* Try to combine the insns I0, I1 and I2 into I3.
2498 Here I0, I1 and I2 appear earlier than I3.
2499 I0 and I1 can be zero; then we combine just I2 into I3, or I1 and I2 into
2500 I3.
2501
2502 If we are combining more than two insns and the resulting insn is not
2503 recognized, try splitting it into two insns. If that happens, I2 and I3
2504 are retained and I1/I0 are pseudo-deleted by turning them into a NOTE.
2505 Otherwise, I0, I1 and I2 are pseudo-deleted.
2506
2507 Return 0 if the combination does not work. Then nothing is changed.
2508 If we did the combination, return the insn at which combine should
2509 resume scanning.
2510
2511 Set NEW_DIRECT_JUMP_P to a nonzero value if try_combine creates a
2512 new direct jump instruction.
2513
2514 LAST_COMBINED_INSN is either I3, or some insn after I3 that has
2515 been I3 passed to an earlier try_combine within the same basic
2516 block. */
2517
2518 static rtx_insn *
2519 try_combine (rtx_insn *i3, rtx_insn *i2, rtx_insn *i1, rtx_insn *i0,
2520 int *new_direct_jump_p, rtx_insn *last_combined_insn)
2521 {
2522 /* New patterns for I3 and I2, respectively. */
2523 rtx newpat, newi2pat = 0;
2524 rtvec newpat_vec_with_clobbers = 0;
2525 int substed_i2 = 0, substed_i1 = 0, substed_i0 = 0;
2526 /* Indicates need to preserve SET in I0, I1 or I2 in I3 if it is not
2527 dead. */
2528 int added_sets_0, added_sets_1, added_sets_2;
2529 /* Total number of SETs to put into I3. */
2530 int total_sets;
2531 /* Nonzero if I2's or I1's body now appears in I3. */
2532 int i2_is_used = 0, i1_is_used = 0;
2533 /* INSN_CODEs for new I3, new I2, and user of condition code. */
2534 int insn_code_number, i2_code_number = 0, other_code_number = 0;
2535 /* Contains I3 if the destination of I3 is used in its source, which means
2536 that the old life of I3 is being killed. If that usage is placed into
2537 I2 and not in I3, a REG_DEAD note must be made. */
2538 rtx i3dest_killed = 0;
2539 /* SET_DEST and SET_SRC of I2, I1 and I0. */
2540 rtx i2dest = 0, i2src = 0, i1dest = 0, i1src = 0, i0dest = 0, i0src = 0;
2541 /* Copy of SET_SRC of I1 and I0, if needed. */
2542 rtx i1src_copy = 0, i0src_copy = 0, i0src_copy2 = 0;
2543 /* Set if I2DEST was reused as a scratch register. */
2544 bool i2scratch = false;
2545 /* The PATTERNs of I0, I1, and I2, or a copy of them in certain cases. */
2546 rtx i0pat = 0, i1pat = 0, i2pat = 0;
2547 /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC. */
2548 int i2dest_in_i2src = 0, i1dest_in_i1src = 0, i2dest_in_i1src = 0;
2549 int i0dest_in_i0src = 0, i1dest_in_i0src = 0, i2dest_in_i0src = 0;
2550 int i2dest_killed = 0, i1dest_killed = 0, i0dest_killed = 0;
2551 int i1_feeds_i2_n = 0, i0_feeds_i2_n = 0, i0_feeds_i1_n = 0;
2552 /* Notes that must be added to REG_NOTES in I3 and I2. */
2553 rtx new_i3_notes, new_i2_notes;
2554 /* Notes that we substituted I3 into I2 instead of the normal case. */
2555 int i3_subst_into_i2 = 0;
2556 /* Notes that I1, I2 or I3 is a MULT operation. */
2557 int have_mult = 0;
2558 int swap_i2i3 = 0;
2559 int split_i2i3 = 0;
2560 int changed_i3_dest = 0;
2561 bool i2_was_move = false, i3_was_move = false;
2562 int n_auto_inc = 0;
2563
2564 int maxreg;
2565 rtx_insn *temp_insn;
2566 rtx temp_expr;
2567 struct insn_link *link;
2568 rtx other_pat = 0;
2569 rtx new_other_notes;
2570 int i;
2571 scalar_int_mode dest_mode, temp_mode;
2572 bool has_non_call_exception = false;
2573
2574 /* Immediately return if any of I0,I1,I2 are the same insn (I3 can
2575 never be). */
2576 if (i1 == i2 || i0 == i2 || (i0 && i0 == i1))
2577 return 0;
2578
2579 /* Only try four-insn combinations when there's high likelihood of
2580 success. Look for simple insns, such as loads of constants or
2581 binary operations involving a constant. */
2582 if (i0)
2583 {
2584 int i;
2585 int ngood = 0;
2586 int nshift = 0;
2587 rtx set0, set3;
2588
2589 if (!flag_expensive_optimizations)
2590 return 0;
2591
2592 for (i = 0; i < 4; i++)
2593 {
2594 rtx_insn *insn = i == 0 ? i0 : i == 1 ? i1 : i == 2 ? i2 : i3;
2595 rtx set = single_set (insn);
2596 rtx src;
2597 if (!set)
2598 continue;
2599 src = SET_SRC (set);
2600 if (CONSTANT_P (src))
2601 {
2602 ngood += 2;
2603 break;
2604 }
2605 else if (BINARY_P (src) && CONSTANT_P (XEXP (src, 1)))
2606 ngood++;
2607 else if (GET_CODE (src) == ASHIFT || GET_CODE (src) == ASHIFTRT
2608 || GET_CODE (src) == LSHIFTRT)
2609 nshift++;
2610 }
2611
2612 /* If I0 loads a memory and I3 sets the same memory, then I1 and I2
2613 are likely manipulating its value. Ideally we'll be able to combine
2614 all four insns into a bitfield insertion of some kind.
2615
2616 Note the source in I0 might be inside a sign/zero extension and the
2617 memory modes in I0 and I3 might be different. So extract the address
2618 from the destination of I3 and search for it in the source of I0.
2619
2620 In the event that there's a match but the source/dest do not actually
2621 refer to the same memory, the worst that happens is we try some
2622 combinations that we wouldn't have otherwise. */
2623 if ((set0 = single_set (i0))
2624 /* Ensure the source of SET0 is a MEM, possibly buried inside
2625 an extension. */
2626 && (GET_CODE (SET_SRC (set0)) == MEM
2627 || ((GET_CODE (SET_SRC (set0)) == ZERO_EXTEND
2628 || GET_CODE (SET_SRC (set0)) == SIGN_EXTEND)
2629 && GET_CODE (XEXP (SET_SRC (set0), 0)) == MEM))
2630 && (set3 = single_set (i3))
2631 /* Ensure the destination of SET3 is a MEM. */
2632 && GET_CODE (SET_DEST (set3)) == MEM
2633 /* Would it be better to extract the base address for the MEM
2634 in SET3 and look for that? I don't have cases where it matters
2635 but I could envision such cases. */
2636 && rtx_referenced_p (XEXP (SET_DEST (set3), 0), SET_SRC (set0)))
2637 ngood += 2;
2638
2639 if (ngood < 2 && nshift < 2)
2640 return 0;
2641 }
2642
2643 /* Exit early if one of the insns involved can't be used for
2644 combinations. */
2645 if (CALL_P (i2)
2646 || (i1 && CALL_P (i1))
2647 || (i0 && CALL_P (i0))
2648 || cant_combine_insn_p (i3)
2649 || cant_combine_insn_p (i2)
2650 || (i1 && cant_combine_insn_p (i1))
2651 || (i0 && cant_combine_insn_p (i0))
2652 || likely_spilled_retval_p (i3))
2653 return 0;
2654
2655 combine_attempts++;
2656 undobuf.other_insn = 0;
2657
2658 /* Reset the hard register usage information. */
2659 CLEAR_HARD_REG_SET (newpat_used_regs);
2660
2661 if (dump_file && (dump_flags & TDF_DETAILS))
2662 {
2663 if (i0)
2664 fprintf (dump_file, "\nTrying %d, %d, %d -> %d:\n",
2665 INSN_UID (i0), INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2666 else if (i1)
2667 fprintf (dump_file, "\nTrying %d, %d -> %d:\n",
2668 INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2669 else
2670 fprintf (dump_file, "\nTrying %d -> %d:\n",
2671 INSN_UID (i2), INSN_UID (i3));
2672
2673 if (i0)
2674 dump_insn_slim (dump_file, i0);
2675 if (i1)
2676 dump_insn_slim (dump_file, i1);
2677 dump_insn_slim (dump_file, i2);
2678 dump_insn_slim (dump_file, i3);
2679 }
2680
2681 /* If multiple insns feed into one of I2 or I3, they can be in any
2682 order. To simplify the code below, reorder them in sequence. */
2683 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i2))
2684 std::swap (i0, i2);
2685 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i1))
2686 std::swap (i0, i1);
2687 if (i1 && DF_INSN_LUID (i1) > DF_INSN_LUID (i2))
2688 std::swap (i1, i2);
2689
2690 added_links_insn = 0;
2691 added_notes_insn = 0;
2692
2693 /* First check for one important special case that the code below will
2694 not handle. Namely, the case where I1 is zero, I2 is a PARALLEL
2695 and I3 is a SET whose SET_SRC is a SET_DEST in I2. In that case,
2696 we may be able to replace that destination with the destination of I3.
2697 This occurs in the common code where we compute both a quotient and
2698 remainder into a structure, in which case we want to do the computation
2699 directly into the structure to avoid register-register copies.
2700
2701 Note that this case handles both multiple sets in I2 and also cases
2702 where I2 has a number of CLOBBERs inside the PARALLEL.
2703
2704 We make very conservative checks below and only try to handle the
2705 most common cases of this. For example, we only handle the case
2706 where I2 and I3 are adjacent to avoid making difficult register
2707 usage tests. */
2708
2709 if (i1 == 0 && NONJUMP_INSN_P (i3) && GET_CODE (PATTERN (i3)) == SET
2710 && REG_P (SET_SRC (PATTERN (i3)))
2711 && REGNO (SET_SRC (PATTERN (i3))) >= FIRST_PSEUDO_REGISTER
2712 && find_reg_note (i3, REG_DEAD, SET_SRC (PATTERN (i3)))
2713 && GET_CODE (PATTERN (i2)) == PARALLEL
2714 && ! side_effects_p (SET_DEST (PATTERN (i3)))
2715 /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code
2716 below would need to check what is inside (and reg_overlap_mentioned_p
2717 doesn't support those codes anyway). Don't allow those destinations;
2718 the resulting insn isn't likely to be recognized anyway. */
2719 && GET_CODE (SET_DEST (PATTERN (i3))) != ZERO_EXTRACT
2720 && GET_CODE (SET_DEST (PATTERN (i3))) != STRICT_LOW_PART
2721 && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3)),
2722 SET_DEST (PATTERN (i3)))
2723 && next_active_insn (i2) == i3)
2724 {
2725 rtx p2 = PATTERN (i2);
2726
2727 /* Make sure that the destination of I3,
2728 which we are going to substitute into one output of I2,
2729 is not used within another output of I2. We must avoid making this:
2730 (parallel [(set (mem (reg 69)) ...)
2731 (set (reg 69) ...)])
2732 which is not well-defined as to order of actions.
2733 (Besides, reload can't handle output reloads for this.)
2734
2735 The problem can also happen if the dest of I3 is a memory ref,
2736 if another dest in I2 is an indirect memory ref.
2737
2738 Neither can this PARALLEL be an asm. We do not allow combining
2739 that usually (see can_combine_p), so do not here either. */
2740 bool ok = true;
2741 for (i = 0; ok && i < XVECLEN (p2, 0); i++)
2742 {
2743 if ((GET_CODE (XVECEXP (p2, 0, i)) == SET
2744 || GET_CODE (XVECEXP (p2, 0, i)) == CLOBBER)
2745 && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3)),
2746 SET_DEST (XVECEXP (p2, 0, i))))
2747 ok = false;
2748 else if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2749 && GET_CODE (SET_SRC (XVECEXP (p2, 0, i))) == ASM_OPERANDS)
2750 ok = false;
2751 }
2752
2753 if (ok)
2754 for (i = 0; i < XVECLEN (p2, 0); i++)
2755 if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2756 && SET_DEST (XVECEXP (p2, 0, i)) == SET_SRC (PATTERN (i3)))
2757 {
2758 combine_merges++;
2759
2760 subst_insn = i3;
2761 subst_low_luid = DF_INSN_LUID (i2);
2762
2763 added_sets_2 = added_sets_1 = added_sets_0 = 0;
2764 i2src = SET_SRC (XVECEXP (p2, 0, i));
2765 i2dest = SET_DEST (XVECEXP (p2, 0, i));
2766 i2dest_killed = dead_or_set_p (i2, i2dest);
2767
2768 /* Replace the dest in I2 with our dest and make the resulting
2769 insn the new pattern for I3. Then skip to where we validate
2770 the pattern. Everything was set up above. */
2771 SUBST (SET_DEST (XVECEXP (p2, 0, i)), SET_DEST (PATTERN (i3)));
2772 newpat = p2;
2773 i3_subst_into_i2 = 1;
2774 goto validate_replacement;
2775 }
2776 }
2777
2778 /* If I2 is setting a pseudo to a constant and I3 is setting some
2779 sub-part of it to another constant, merge them by making a new
2780 constant. */
2781 if (i1 == 0
2782 && (temp_expr = single_set (i2)) != 0
2783 && is_a <scalar_int_mode> (GET_MODE (SET_DEST (temp_expr)), &temp_mode)
2784 && CONST_SCALAR_INT_P (SET_SRC (temp_expr))
2785 && GET_CODE (PATTERN (i3)) == SET
2786 && CONST_SCALAR_INT_P (SET_SRC (PATTERN (i3)))
2787 && reg_subword_p (SET_DEST (PATTERN (i3)), SET_DEST (temp_expr)))
2788 {
2789 rtx dest = SET_DEST (PATTERN (i3));
2790 rtx temp_dest = SET_DEST (temp_expr);
2791 int offset = -1;
2792 int width = 0;
2793
2794 if (GET_CODE (dest) == ZERO_EXTRACT)
2795 {
2796 if (CONST_INT_P (XEXP (dest, 1))
2797 && CONST_INT_P (XEXP (dest, 2))
2798 && is_a <scalar_int_mode> (GET_MODE (XEXP (dest, 0)),
2799 &dest_mode))
2800 {
2801 width = INTVAL (XEXP (dest, 1));
2802 offset = INTVAL (XEXP (dest, 2));
2803 dest = XEXP (dest, 0);
2804 if (BITS_BIG_ENDIAN)
2805 offset = GET_MODE_PRECISION (dest_mode) - width - offset;
2806 }
2807 }
2808 else
2809 {
2810 if (GET_CODE (dest) == STRICT_LOW_PART)
2811 dest = XEXP (dest, 0);
2812 if (is_a <scalar_int_mode> (GET_MODE (dest), &dest_mode))
2813 {
2814 width = GET_MODE_PRECISION (dest_mode);
2815 offset = 0;
2816 }
2817 }
2818
2819 if (offset >= 0)
2820 {
2821 /* If this is the low part, we're done. */
2822 if (subreg_lowpart_p (dest))
2823 ;
2824 /* Handle the case where inner is twice the size of outer. */
2825 else if (GET_MODE_PRECISION (temp_mode)
2826 == 2 * GET_MODE_PRECISION (dest_mode))
2827 offset += GET_MODE_PRECISION (dest_mode);
2828 /* Otherwise give up for now. */
2829 else
2830 offset = -1;
2831 }
2832
2833 if (offset >= 0)
2834 {
2835 rtx inner = SET_SRC (PATTERN (i3));
2836 rtx outer = SET_SRC (temp_expr);
2837
2838 wide_int o = wi::insert (rtx_mode_t (outer, temp_mode),
2839 rtx_mode_t (inner, dest_mode),
2840 offset, width);
2841
2842 combine_merges++;
2843 subst_insn = i3;
2844 subst_low_luid = DF_INSN_LUID (i2);
2845 added_sets_2 = added_sets_1 = added_sets_0 = 0;
2846 i2dest = temp_dest;
2847 i2dest_killed = dead_or_set_p (i2, i2dest);
2848
2849 /* Replace the source in I2 with the new constant and make the
2850 resulting insn the new pattern for I3. Then skip to where we
2851 validate the pattern. Everything was set up above. */
2852 SUBST (SET_SRC (temp_expr),
2853 immed_wide_int_const (o, temp_mode));
2854
2855 newpat = PATTERN (i2);
2856
2857 /* The dest of I3 has been replaced with the dest of I2. */
2858 changed_i3_dest = 1;
2859 goto validate_replacement;
2860 }
2861 }
2862
2863 /* If we have no I1 and I2 looks like:
2864 (parallel [(set (reg:CC X) (compare:CC OP (const_int 0)))
2865 (set Y OP)])
2866 make up a dummy I1 that is
2867 (set Y OP)
2868 and change I2 to be
2869 (set (reg:CC X) (compare:CC Y (const_int 0)))
2870
2871 (We can ignore any trailing CLOBBERs.)
2872
2873 This undoes a previous combination and allows us to match a branch-and-
2874 decrement insn. */
2875
2876 if (i1 == 0
2877 && is_parallel_of_n_reg_sets (PATTERN (i2), 2)
2878 && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2), 0, 0))))
2879 == MODE_CC)
2880 && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2), 0, 0))) == COMPARE
2881 && XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 1) == const0_rtx
2882 && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 0),
2883 SET_SRC (XVECEXP (PATTERN (i2), 0, 1)))
2884 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
2885 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3))
2886 {
2887 /* We make I1 with the same INSN_UID as I2. This gives it
2888 the same DF_INSN_LUID for value tracking. Our fake I1 will
2889 never appear in the insn stream so giving it the same INSN_UID
2890 as I2 will not cause a problem. */
2891
2892 i1 = gen_rtx_INSN (VOIDmode, NULL, i2, BLOCK_FOR_INSN (i2),
2893 XVECEXP (PATTERN (i2), 0, 1), INSN_LOCATION (i2),
2894 -1, NULL_RTX);
2895 INSN_UID (i1) = INSN_UID (i2);
2896
2897 SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 0));
2898 SUBST (XEXP (SET_SRC (PATTERN (i2)), 0),
2899 SET_DEST (PATTERN (i1)));
2900 unsigned int regno = REGNO (SET_DEST (PATTERN (i1)));
2901 SUBST_LINK (LOG_LINKS (i2),
2902 alloc_insn_link (i1, regno, LOG_LINKS (i2)));
2903 }
2904
2905 /* If I2 is a PARALLEL of two SETs of REGs (and perhaps some CLOBBERs),
2906 make those two SETs separate I1 and I2 insns, and make an I0 that is
2907 the original I1. */
2908 if (i0 == 0
2909 && is_parallel_of_n_reg_sets (PATTERN (i2), 2)
2910 && can_split_parallel_of_n_reg_sets (i2, 2)
2911 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
2912 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3)
2913 && !reg_set_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
2914 && !reg_set_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3))
2915 {
2916 /* If there is no I1, there is no I0 either. */
2917 i0 = i1;
2918
2919 /* We make I1 with the same INSN_UID as I2. This gives it
2920 the same DF_INSN_LUID for value tracking. Our fake I1 will
2921 never appear in the insn stream so giving it the same INSN_UID
2922 as I2 will not cause a problem. */
2923
2924 i1 = gen_rtx_INSN (VOIDmode, NULL, i2, BLOCK_FOR_INSN (i2),
2925 XVECEXP (PATTERN (i2), 0, 0), INSN_LOCATION (i2),
2926 -1, NULL_RTX);
2927 INSN_UID (i1) = INSN_UID (i2);
2928
2929 SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 1));
2930 }
2931
2932 /* Verify that I2 and maybe I1 and I0 can be combined into I3. */
2933 if (!can_combine_p (i2, i3, i0, i1, NULL, NULL, &i2dest, &i2src))
2934 {
2935 if (dump_file && (dump_flags & TDF_DETAILS))
2936 fprintf (dump_file, "Can't combine i2 into i3\n");
2937 undo_all ();
2938 return 0;
2939 }
2940 if (i1 && !can_combine_p (i1, i3, i0, NULL, i2, NULL, &i1dest, &i1src))
2941 {
2942 if (dump_file && (dump_flags & TDF_DETAILS))
2943 fprintf (dump_file, "Can't combine i1 into i3\n");
2944 undo_all ();
2945 return 0;
2946 }
2947 if (i0 && !can_combine_p (i0, i3, NULL, NULL, i1, i2, &i0dest, &i0src))
2948 {
2949 if (dump_file && (dump_flags & TDF_DETAILS))
2950 fprintf (dump_file, "Can't combine i0 into i3\n");
2951 undo_all ();
2952 return 0;
2953 }
2954
2955 /* With non-call exceptions we can end up trying to combine multiple
2956 insns with possible EH side effects. Make sure we can combine
2957 that to a single insn which means there must be at most one insn
2958 in the combination with an EH side effect. */
2959 if (cfun->can_throw_non_call_exceptions)
2960 {
2961 if (find_reg_note (i3, REG_EH_REGION, NULL_RTX)
2962 || find_reg_note (i2, REG_EH_REGION, NULL_RTX)
2963 || (i1 && find_reg_note (i1, REG_EH_REGION, NULL_RTX))
2964 || (i0 && find_reg_note (i0, REG_EH_REGION, NULL_RTX)))
2965 {
2966 has_non_call_exception = true;
2967 if (insn_could_throw_p (i3)
2968 + insn_could_throw_p (i2)
2969 + (i1 ? insn_could_throw_p (i1) : 0)
2970 + (i0 ? insn_could_throw_p (i0) : 0) > 1)
2971 {
2972 if (dump_file && (dump_flags & TDF_DETAILS))
2973 fprintf (dump_file, "Can't combine multiple insns with EH "
2974 "side-effects\n");
2975 undo_all ();
2976 return 0;
2977 }
2978 }
2979 }
2980
2981 /* Record whether i2 and i3 are trivial moves. */
2982 i2_was_move = is_just_move (i2);
2983 i3_was_move = is_just_move (i3);
2984
2985 /* Record whether I2DEST is used in I2SRC and similarly for the other
2986 cases. Knowing this will help in register status updating below. */
2987 i2dest_in_i2src = reg_overlap_mentioned_p (i2dest, i2src);
2988 i1dest_in_i1src = i1 && reg_overlap_mentioned_p (i1dest, i1src);
2989 i2dest_in_i1src = i1 && reg_overlap_mentioned_p (i2dest, i1src);
2990 i0dest_in_i0src = i0 && reg_overlap_mentioned_p (i0dest, i0src);
2991 i1dest_in_i0src = i0 && reg_overlap_mentioned_p (i1dest, i0src);
2992 i2dest_in_i0src = i0 && reg_overlap_mentioned_p (i2dest, i0src);
2993 i2dest_killed = dead_or_set_p (i2, i2dest);
2994 i1dest_killed = i1 && dead_or_set_p (i1, i1dest);
2995 i0dest_killed = i0 && dead_or_set_p (i0, i0dest);
2996
2997 /* For the earlier insns, determine which of the subsequent ones they
2998 feed. */
2999 i1_feeds_i2_n = i1 && insn_a_feeds_b (i1, i2);
3000 i0_feeds_i1_n = i0 && insn_a_feeds_b (i0, i1);
3001 i0_feeds_i2_n = (i0 && (!i0_feeds_i1_n ? insn_a_feeds_b (i0, i2)
3002 : (!reg_overlap_mentioned_p (i1dest, i0dest)
3003 && reg_overlap_mentioned_p (i0dest, i2src))));
3004
3005 /* Ensure that I3's pattern can be the destination of combines. */
3006 if (! combinable_i3pat (i3, &PATTERN (i3), i2dest, i1dest, i0dest,
3007 i1 && i2dest_in_i1src && !i1_feeds_i2_n,
3008 i0 && ((i2dest_in_i0src && !i0_feeds_i2_n)
3009 || (i1dest_in_i0src && !i0_feeds_i1_n)),
3010 &i3dest_killed))
3011 {
3012 undo_all ();
3013 return 0;
3014 }
3015
3016 /* See if any of the insns is a MULT operation. Unless one is, we will
3017 reject a combination that is, since it must be slower. Be conservative
3018 here. */
3019 if (GET_CODE (i2src) == MULT
3020 || (i1 != 0 && GET_CODE (i1src) == MULT)
3021 || (i0 != 0 && GET_CODE (i0src) == MULT)
3022 || (GET_CODE (PATTERN (i3)) == SET
3023 && GET_CODE (SET_SRC (PATTERN (i3))) == MULT))
3024 have_mult = 1;
3025
3026 /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd.
3027 We used to do this EXCEPT in one case: I3 has a post-inc in an
3028 output operand. However, that exception can give rise to insns like
3029 mov r3,(r3)+
3030 which is a famous insn on the PDP-11 where the value of r3 used as the
3031 source was model-dependent. Avoid this sort of thing. */
3032
3033 #if 0
3034 if (!(GET_CODE (PATTERN (i3)) == SET
3035 && REG_P (SET_SRC (PATTERN (i3)))
3036 && MEM_P (SET_DEST (PATTERN (i3)))
3037 && (GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_INC
3038 || GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_DEC)))
3039 /* It's not the exception. */
3040 #endif
3041 if (AUTO_INC_DEC)
3042 {
3043 rtx link;
3044 for (link = REG_NOTES (i3); link; link = XEXP (link, 1))
3045 if (REG_NOTE_KIND (link) == REG_INC
3046 && (reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i2))
3047 || (i1 != 0
3048 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i1)))))
3049 {
3050 undo_all ();
3051 return 0;
3052 }
3053 }
3054
3055 /* See if the SETs in I1 or I2 need to be kept around in the merged
3056 instruction: whenever the value set there is still needed past I3.
3057 For the SET in I2, this is easy: we see if I2DEST dies or is set in I3.
3058
3059 For the SET in I1, we have two cases: if I1 and I2 independently feed
3060 into I3, the set in I1 needs to be kept around unless I1DEST dies
3061 or is set in I3. Otherwise (if I1 feeds I2 which feeds I3), the set
3062 in I1 needs to be kept around unless I1DEST dies or is set in either
3063 I2 or I3. The same considerations apply to I0. */
3064
3065 added_sets_2 = !dead_or_set_p (i3, i2dest);
3066
3067 if (i1)
3068 added_sets_1 = !(dead_or_set_p (i3, i1dest)
3069 || (i1_feeds_i2_n && dead_or_set_p (i2, i1dest)));
3070 else
3071 added_sets_1 = 0;
3072
3073 if (i0)
3074 added_sets_0 = !(dead_or_set_p (i3, i0dest)
3075 || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest))
3076 || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3077 && dead_or_set_p (i2, i0dest)));
3078 else
3079 added_sets_0 = 0;
3080
3081 /* We are about to copy insns for the case where they need to be kept
3082 around. Check that they can be copied in the merged instruction. */
3083
3084 if (targetm.cannot_copy_insn_p
3085 && ((added_sets_2 && targetm.cannot_copy_insn_p (i2))
3086 || (i1 && added_sets_1 && targetm.cannot_copy_insn_p (i1))
3087 || (i0 && added_sets_0 && targetm.cannot_copy_insn_p (i0))))
3088 {
3089 undo_all ();
3090 return 0;
3091 }
3092
3093 /* We cannot safely duplicate volatile references in any case. */
3094
3095 if ((added_sets_2 && volatile_refs_p (PATTERN (i2)))
3096 || (added_sets_1 && volatile_refs_p (PATTERN (i1)))
3097 || (added_sets_0 && volatile_refs_p (PATTERN (i0))))
3098 {
3099 undo_all ();
3100 return 0;
3101 }
3102
3103 /* Count how many auto_inc expressions there were in the original insns;
3104 we need to have the same number in the resulting patterns. */
3105
3106 if (i0)
3107 for_each_inc_dec (PATTERN (i0), count_auto_inc, &n_auto_inc);
3108 if (i1)
3109 for_each_inc_dec (PATTERN (i1), count_auto_inc, &n_auto_inc);
3110 for_each_inc_dec (PATTERN (i2), count_auto_inc, &n_auto_inc);
3111 for_each_inc_dec (PATTERN (i3), count_auto_inc, &n_auto_inc);
3112
3113 /* If the set in I2 needs to be kept around, we must make a copy of
3114 PATTERN (I2), so that when we substitute I1SRC for I1DEST in
3115 PATTERN (I2), we are only substituting for the original I1DEST, not into
3116 an already-substituted copy. This also prevents making self-referential
3117 rtx. If I2 is a PARALLEL, we just need the piece that assigns I2SRC to
3118 I2DEST. */
3119
3120 if (added_sets_2)
3121 {
3122 if (GET_CODE (PATTERN (i2)) == PARALLEL)
3123 i2pat = gen_rtx_SET (i2dest, copy_rtx (i2src));
3124 else
3125 i2pat = copy_rtx (PATTERN (i2));
3126 }
3127
3128 if (added_sets_1)
3129 {
3130 if (GET_CODE (PATTERN (i1)) == PARALLEL)
3131 i1pat = gen_rtx_SET (i1dest, copy_rtx (i1src));
3132 else
3133 i1pat = copy_rtx (PATTERN (i1));
3134 }
3135
3136 if (added_sets_0)
3137 {
3138 if (GET_CODE (PATTERN (i0)) == PARALLEL)
3139 i0pat = gen_rtx_SET (i0dest, copy_rtx (i0src));
3140 else
3141 i0pat = copy_rtx (PATTERN (i0));
3142 }
3143
3144 combine_merges++;
3145
3146 /* Substitute in the latest insn for the regs set by the earlier ones. */
3147
3148 maxreg = max_reg_num ();
3149
3150 subst_insn = i3;
3151
3152 /* Many machines have insns that can both perform an
3153 arithmetic operation and set the condition code. These operations will
3154 be represented as a PARALLEL with the first element of the vector
3155 being a COMPARE of an arithmetic operation with the constant zero.
3156 The second element of the vector will set some pseudo to the result
3157 of the same arithmetic operation. If we simplify the COMPARE, we won't
3158 match such a pattern and so will generate an extra insn. Here we test
3159 for this case, where both the comparison and the operation result are
3160 needed, and make the PARALLEL by just replacing I2DEST in I3SRC with
3161 I2SRC. Later we will make the PARALLEL that contains I2. */
3162
3163 if (i1 == 0 && added_sets_2 && GET_CODE (PATTERN (i3)) == SET
3164 && GET_CODE (SET_SRC (PATTERN (i3))) == COMPARE
3165 && CONST_INT_P (XEXP (SET_SRC (PATTERN (i3)), 1))
3166 && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3)), 0), i2dest))
3167 {
3168 rtx newpat_dest;
3169 rtx *cc_use_loc = NULL;
3170 rtx_insn *cc_use_insn = NULL;
3171 rtx op0 = i2src, op1 = XEXP (SET_SRC (PATTERN (i3)), 1);
3172 machine_mode compare_mode, orig_compare_mode;
3173 enum rtx_code compare_code = UNKNOWN, orig_compare_code = UNKNOWN;
3174 scalar_int_mode mode;
3175
3176 newpat = PATTERN (i3);
3177 newpat_dest = SET_DEST (newpat);
3178 compare_mode = orig_compare_mode = GET_MODE (newpat_dest);
3179
3180 if (undobuf.other_insn == 0
3181 && (cc_use_loc = find_single_use (SET_DEST (newpat), i3,
3182 &cc_use_insn)))
3183 {
3184 compare_code = orig_compare_code = GET_CODE (*cc_use_loc);
3185 if (is_a <scalar_int_mode> (GET_MODE (i2dest), &mode))
3186 compare_code = simplify_compare_const (compare_code, mode,
3187 op0, &op1);
3188 target_canonicalize_comparison (&compare_code, &op0, &op1, 1);
3189 }
3190
3191 /* Do the rest only if op1 is const0_rtx, which may be the
3192 result of simplification. */
3193 if (op1 == const0_rtx)
3194 {
3195 /* If a single use of the CC is found, prepare to modify it
3196 when SELECT_CC_MODE returns a new CC-class mode, or when
3197 the above simplify_compare_const() returned a new comparison
3198 operator. undobuf.other_insn is assigned the CC use insn
3199 when modifying it. */
3200 if (cc_use_loc)
3201 {
3202 #ifdef SELECT_CC_MODE
3203 machine_mode new_mode
3204 = SELECT_CC_MODE (compare_code, op0, op1);
3205 if (new_mode != orig_compare_mode
3206 && can_change_dest_mode (SET_DEST (newpat),
3207 added_sets_2, new_mode))
3208 {
3209 unsigned int regno = REGNO (newpat_dest);
3210 compare_mode = new_mode;
3211 if (regno < FIRST_PSEUDO_REGISTER)
3212 newpat_dest = gen_rtx_REG (compare_mode, regno);
3213 else
3214 {
3215 subst_mode (regno, compare_mode);
3216 newpat_dest = regno_reg_rtx[regno];
3217 }
3218 }
3219 #endif
3220 /* Cases for modifying the CC-using comparison. */
3221 if (compare_code != orig_compare_code
3222 /* ??? Do we need to verify the zero rtx? */
3223 && XEXP (*cc_use_loc, 1) == const0_rtx)
3224 {
3225 /* Replace cc_use_loc with entire new RTX. */
3226 SUBST (*cc_use_loc,
3227 gen_rtx_fmt_ee (compare_code, GET_MODE (*cc_use_loc),
3228 newpat_dest, const0_rtx));
3229 undobuf.other_insn = cc_use_insn;
3230 }
3231 else if (compare_mode != orig_compare_mode)
3232 {
3233 /* Just replace the CC reg with a new mode. */
3234 SUBST (XEXP (*cc_use_loc, 0), newpat_dest);
3235 undobuf.other_insn = cc_use_insn;
3236 }
3237 }
3238
3239 /* Now we modify the current newpat:
3240 First, SET_DEST(newpat) is updated if the CC mode has been
3241 altered. For targets without SELECT_CC_MODE, this should be
3242 optimized away. */
3243 if (compare_mode != orig_compare_mode)
3244 SUBST (SET_DEST (newpat), newpat_dest);
3245 /* This is always done to propagate i2src into newpat. */
3246 SUBST (SET_SRC (newpat),
3247 gen_rtx_COMPARE (compare_mode, op0, op1));
3248 /* Create new version of i2pat if needed; the below PARALLEL
3249 creation needs this to work correctly. */
3250 if (! rtx_equal_p (i2src, op0))
3251 i2pat = gen_rtx_SET (i2dest, op0);
3252 i2_is_used = 1;
3253 }
3254 }
3255
3256 if (i2_is_used == 0)
3257 {
3258 /* It is possible that the source of I2 or I1 may be performing
3259 an unneeded operation, such as a ZERO_EXTEND of something
3260 that is known to have the high part zero. Handle that case
3261 by letting subst look at the inner insns.
3262
3263 Another way to do this would be to have a function that tries
3264 to simplify a single insn instead of merging two or more
3265 insns. We don't do this because of the potential of infinite
3266 loops and because of the potential extra memory required.
3267 However, doing it the way we are is a bit of a kludge and
3268 doesn't catch all cases.
3269
3270 But only do this if -fexpensive-optimizations since it slows
3271 things down and doesn't usually win.
3272
3273 This is not done in the COMPARE case above because the
3274 unmodified I2PAT is used in the PARALLEL and so a pattern
3275 with a modified I2SRC would not match. */
3276
3277 if (flag_expensive_optimizations)
3278 {
3279 /* Pass pc_rtx so no substitutions are done, just
3280 simplifications. */
3281 if (i1)
3282 {
3283 subst_low_luid = DF_INSN_LUID (i1);
3284 i1src = subst (i1src, pc_rtx, pc_rtx, 0, 0, 0);
3285 }
3286
3287 subst_low_luid = DF_INSN_LUID (i2);
3288 i2src = subst (i2src, pc_rtx, pc_rtx, 0, 0, 0);
3289 }
3290
3291 n_occurrences = 0; /* `subst' counts here */
3292 subst_low_luid = DF_INSN_LUID (i2);
3293
3294 /* If I1 feeds into I2 and I1DEST is in I1SRC, we need to make a unique
3295 copy of I2SRC each time we substitute it, in order to avoid creating
3296 self-referential RTL when we will be substituting I1SRC for I1DEST
3297 later. Likewise if I0 feeds into I2, either directly or indirectly
3298 through I1, and I0DEST is in I0SRC. */
3299 newpat = subst (PATTERN (i3), i2dest, i2src, 0, 0,
3300 (i1_feeds_i2_n && i1dest_in_i1src)
3301 || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3302 && i0dest_in_i0src));
3303 substed_i2 = 1;
3304
3305 /* Record whether I2's body now appears within I3's body. */
3306 i2_is_used = n_occurrences;
3307 }
3308
3309 /* If we already got a failure, don't try to do more. Otherwise, try to
3310 substitute I1 if we have it. */
3311
3312 if (i1 && GET_CODE (newpat) != CLOBBER)
3313 {
3314 /* Before we can do this substitution, we must redo the test done
3315 above (see detailed comments there) that ensures I1DEST isn't
3316 mentioned in any SETs in NEWPAT that are field assignments. */
3317 if (!combinable_i3pat (NULL, &newpat, i1dest, NULL_RTX, NULL_RTX,
3318 0, 0, 0))
3319 {
3320 undo_all ();
3321 return 0;
3322 }
3323
3324 n_occurrences = 0;
3325 subst_low_luid = DF_INSN_LUID (i1);
3326
3327 /* If the following substitution will modify I1SRC, make a copy of it
3328 for the case where it is substituted for I1DEST in I2PAT later. */
3329 if (added_sets_2 && i1_feeds_i2_n)
3330 i1src_copy = copy_rtx (i1src);
3331
3332 /* If I0 feeds into I1 and I0DEST is in I0SRC, we need to make a unique
3333 copy of I1SRC each time we substitute it, in order to avoid creating
3334 self-referential RTL when we will be substituting I0SRC for I0DEST
3335 later. */
3336 newpat = subst (newpat, i1dest, i1src, 0, 0,
3337 i0_feeds_i1_n && i0dest_in_i0src);
3338 substed_i1 = 1;
3339
3340 /* Record whether I1's body now appears within I3's body. */
3341 i1_is_used = n_occurrences;
3342 }
3343
3344 /* Likewise for I0 if we have it. */
3345
3346 if (i0 && GET_CODE (newpat) != CLOBBER)
3347 {
3348 if (!combinable_i3pat (NULL, &newpat, i0dest, NULL_RTX, NULL_RTX,
3349 0, 0, 0))
3350 {
3351 undo_all ();
3352 return 0;
3353 }
3354
3355 /* If the following substitution will modify I0SRC, make a copy of it
3356 for the case where it is substituted for I0DEST in I1PAT later. */
3357 if (added_sets_1 && i0_feeds_i1_n)
3358 i0src_copy = copy_rtx (i0src);
3359 /* And a copy for I0DEST in I2PAT substitution. */
3360 if (added_sets_2 && ((i0_feeds_i1_n && i1_feeds_i2_n)
3361 || (i0_feeds_i2_n)))
3362 i0src_copy2 = copy_rtx (i0src);
3363
3364 n_occurrences = 0;
3365 subst_low_luid = DF_INSN_LUID (i0);
3366 newpat = subst (newpat, i0dest, i0src, 0, 0, 0);
3367 substed_i0 = 1;
3368 }
3369
3370 if (n_auto_inc)
3371 {
3372 int new_n_auto_inc = 0;
3373 for_each_inc_dec (newpat, count_auto_inc, &new_n_auto_inc);
3374
3375 if (n_auto_inc != new_n_auto_inc)
3376 {
3377 if (dump_file && (dump_flags & TDF_DETAILS))
3378 fprintf (dump_file, "Number of auto_inc expressions changed\n");
3379 undo_all ();
3380 return 0;
3381 }
3382 }
3383
3384 /* Fail if an autoincrement side-effect has been duplicated. Be careful
3385 to count all the ways that I2SRC and I1SRC can be used. */
3386 if ((FIND_REG_INC_NOTE (i2, NULL_RTX) != 0
3387 && i2_is_used + added_sets_2 > 1)
3388 || (i1 != 0 && FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3389 && (i1_is_used + added_sets_1 + (added_sets_2 && i1_feeds_i2_n)
3390 > 1))
3391 || (i0 != 0 && FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3392 && (n_occurrences + added_sets_0
3393 + (added_sets_1 && i0_feeds_i1_n)
3394 + (added_sets_2 && i0_feeds_i2_n)
3395 > 1))
3396 /* Fail if we tried to make a new register. */
3397 || max_reg_num () != maxreg
3398 /* Fail if we couldn't do something and have a CLOBBER. */
3399 || GET_CODE (newpat) == CLOBBER
3400 /* Fail if this new pattern is a MULT and we didn't have one before
3401 at the outer level. */
3402 || (GET_CODE (newpat) == SET && GET_CODE (SET_SRC (newpat)) == MULT
3403 && ! have_mult))
3404 {
3405 undo_all ();
3406 return 0;
3407 }
3408
3409 /* If the actions of the earlier insns must be kept
3410 in addition to substituting them into the latest one,
3411 we must make a new PARALLEL for the latest insn
3412 to hold additional the SETs. */
3413
3414 if (added_sets_0 || added_sets_1 || added_sets_2)
3415 {
3416 int extra_sets = added_sets_0 + added_sets_1 + added_sets_2;
3417 combine_extras++;
3418
3419 if (GET_CODE (newpat) == PARALLEL)
3420 {
3421 rtvec old = XVEC (newpat, 0);
3422 total_sets = XVECLEN (newpat, 0) + extra_sets;
3423 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3424 memcpy (XVEC (newpat, 0)->elem, &old->elem[0],
3425 sizeof (old->elem[0]) * old->num_elem);
3426 }
3427 else
3428 {
3429 rtx old = newpat;
3430 total_sets = 1 + extra_sets;
3431 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3432 XVECEXP (newpat, 0, 0) = old;
3433 }
3434
3435 if (added_sets_0)
3436 XVECEXP (newpat, 0, --total_sets) = i0pat;
3437
3438 if (added_sets_1)
3439 {
3440 rtx t = i1pat;
3441 if (i0_feeds_i1_n)
3442 t = subst (t, i0dest, i0src_copy ? i0src_copy : i0src, 0, 0, 0);
3443
3444 XVECEXP (newpat, 0, --total_sets) = t;
3445 }
3446 if (added_sets_2)
3447 {
3448 rtx t = i2pat;
3449 if (i1_feeds_i2_n)
3450 t = subst (t, i1dest, i1src_copy ? i1src_copy : i1src, 0, 0,
3451 i0_feeds_i1_n && i0dest_in_i0src);
3452 if ((i0_feeds_i1_n && i1_feeds_i2_n) || i0_feeds_i2_n)
3453 t = subst (t, i0dest, i0src_copy2 ? i0src_copy2 : i0src, 0, 0, 0);
3454
3455 XVECEXP (newpat, 0, --total_sets) = t;
3456 }
3457 }
3458
3459 validate_replacement:
3460
3461 /* Note which hard regs this insn has as inputs. */
3462 mark_used_regs_combine (newpat);
3463
3464 /* If recog_for_combine fails, it strips existing clobbers. If we'll
3465 consider splitting this pattern, we might need these clobbers. */
3466 if (i1 && GET_CODE (newpat) == PARALLEL
3467 && GET_CODE (XVECEXP (newpat, 0, XVECLEN (newpat, 0) - 1)) == CLOBBER)
3468 {
3469 int len = XVECLEN (newpat, 0);
3470
3471 newpat_vec_with_clobbers = rtvec_alloc (len);
3472 for (i = 0; i < len; i++)
3473 RTVEC_ELT (newpat_vec_with_clobbers, i) = XVECEXP (newpat, 0, i);
3474 }
3475
3476 /* We have recognized nothing yet. */
3477 insn_code_number = -1;
3478
3479 /* See if this is a PARALLEL of two SETs where one SET's destination is
3480 a register that is unused and this isn't marked as an instruction that
3481 might trap in an EH region. In that case, we just need the other SET.
3482 We prefer this over the PARALLEL.
3483
3484 This can occur when simplifying a divmod insn. We *must* test for this
3485 case here because the code below that splits two independent SETs doesn't
3486 handle this case correctly when it updates the register status.
3487
3488 It's pointless doing this if we originally had two sets, one from
3489 i3, and one from i2. Combining then splitting the parallel results
3490 in the original i2 again plus an invalid insn (which we delete).
3491 The net effect is only to move instructions around, which makes
3492 debug info less accurate.
3493
3494 If the remaining SET came from I2 its destination should not be used
3495 between I2 and I3. See PR82024. */
3496
3497 if (!(added_sets_2 && i1 == 0)
3498 && is_parallel_of_n_reg_sets (newpat, 2)
3499 && asm_noperands (newpat) < 0)
3500 {
3501 rtx set0 = XVECEXP (newpat, 0, 0);
3502 rtx set1 = XVECEXP (newpat, 0, 1);
3503 rtx oldpat = newpat;
3504
3505 if (((REG_P (SET_DEST (set1))
3506 && find_reg_note (i3, REG_UNUSED, SET_DEST (set1)))
3507 || (GET_CODE (SET_DEST (set1)) == SUBREG
3508 && find_reg_note (i3, REG_UNUSED, SUBREG_REG (SET_DEST (set1)))))
3509 && insn_nothrow_p (i3)
3510 && !side_effects_p (SET_SRC (set1)))
3511 {
3512 newpat = set0;
3513 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3514 }
3515
3516 else if (((REG_P (SET_DEST (set0))
3517 && find_reg_note (i3, REG_UNUSED, SET_DEST (set0)))
3518 || (GET_CODE (SET_DEST (set0)) == SUBREG
3519 && find_reg_note (i3, REG_UNUSED,
3520 SUBREG_REG (SET_DEST (set0)))))
3521 && insn_nothrow_p (i3)
3522 && !side_effects_p (SET_SRC (set0)))
3523 {
3524 rtx dest = SET_DEST (set1);
3525 if (GET_CODE (dest) == SUBREG)
3526 dest = SUBREG_REG (dest);
3527 if (!reg_used_between_p (dest, i2, i3))
3528 {
3529 newpat = set1;
3530 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3531
3532 if (insn_code_number >= 0)
3533 changed_i3_dest = 1;
3534 }
3535 }
3536
3537 if (insn_code_number < 0)
3538 newpat = oldpat;
3539 }
3540
3541 /* Is the result of combination a valid instruction? */
3542 if (insn_code_number < 0)
3543 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3544
3545 /* If we were combining three insns and the result is a simple SET
3546 with no ASM_OPERANDS that wasn't recognized, try to split it into two
3547 insns. There are two ways to do this. It can be split using a
3548 machine-specific method (like when you have an addition of a large
3549 constant) or by combine in the function find_split_point. */
3550
3551 if (i1 && insn_code_number < 0 && GET_CODE (newpat) == SET
3552 && asm_noperands (newpat) < 0)
3553 {
3554 rtx parallel, *split;
3555 rtx_insn *m_split_insn;
3556
3557 /* See if the MD file can split NEWPAT. If it can't, see if letting it
3558 use I2DEST as a scratch register will help. In the latter case,
3559 convert I2DEST to the mode of the source of NEWPAT if we can. */
3560
3561 m_split_insn = combine_split_insns (newpat, i3);
3562
3563 /* We can only use I2DEST as a scratch reg if it doesn't overlap any
3564 inputs of NEWPAT. */
3565
3566 /* ??? If I2DEST is not safe, and I1DEST exists, then it would be
3567 possible to try that as a scratch reg. This would require adding
3568 more code to make it work though. */
3569
3570 if (m_split_insn == 0 && ! reg_overlap_mentioned_p (i2dest, newpat))
3571 {
3572 machine_mode new_mode = GET_MODE (SET_DEST (newpat));
3573
3574 /* ??? Reusing i2dest without resetting the reg_stat entry for it
3575 (temporarily, until we are committed to this instruction
3576 combination) does not work: for example, any call to nonzero_bits
3577 on the register (from a splitter in the MD file, for example)
3578 will get the old information, which is invalid.
3579
3580 Since nowadays we can create registers during combine just fine,
3581 we should just create a new one here, not reuse i2dest. */
3582
3583 /* First try to split using the original register as a
3584 scratch register. */
3585 parallel = gen_rtx_PARALLEL (VOIDmode,
3586 gen_rtvec (2, newpat,
3587 gen_rtx_CLOBBER (VOIDmode,
3588 i2dest)));
3589 m_split_insn = combine_split_insns (parallel, i3);
3590
3591 /* If that didn't work, try changing the mode of I2DEST if
3592 we can. */
3593 if (m_split_insn == 0
3594 && new_mode != GET_MODE (i2dest)
3595 && new_mode != VOIDmode
3596 && can_change_dest_mode (i2dest, added_sets_2, new_mode))
3597 {
3598 machine_mode old_mode = GET_MODE (i2dest);
3599 rtx ni2dest;
3600
3601 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3602 ni2dest = gen_rtx_REG (new_mode, REGNO (i2dest));
3603 else
3604 {
3605 subst_mode (REGNO (i2dest), new_mode);
3606 ni2dest = regno_reg_rtx[REGNO (i2dest)];
3607 }
3608
3609 parallel = (gen_rtx_PARALLEL
3610 (VOIDmode,
3611 gen_rtvec (2, newpat,
3612 gen_rtx_CLOBBER (VOIDmode,
3613 ni2dest))));
3614 m_split_insn = combine_split_insns (parallel, i3);
3615
3616 if (m_split_insn == 0
3617 && REGNO (i2dest) >= FIRST_PSEUDO_REGISTER)
3618 {
3619 struct undo *buf;
3620
3621 adjust_reg_mode (regno_reg_rtx[REGNO (i2dest)], old_mode);
3622 buf = undobuf.undos;
3623 undobuf.undos = buf->next;
3624 buf->next = undobuf.frees;
3625 undobuf.frees = buf;
3626 }
3627 }
3628
3629 i2scratch = m_split_insn != 0;
3630 }
3631
3632 /* If recog_for_combine has discarded clobbers, try to use them
3633 again for the split. */
3634 if (m_split_insn == 0 && newpat_vec_with_clobbers)
3635 {
3636 parallel = gen_rtx_PARALLEL (VOIDmode, newpat_vec_with_clobbers);
3637 m_split_insn = combine_split_insns (parallel, i3);
3638 }
3639
3640 if (m_split_insn && NEXT_INSN (m_split_insn) == NULL_RTX)
3641 {
3642 rtx m_split_pat = PATTERN (m_split_insn);
3643 insn_code_number = recog_for_combine (&m_split_pat, i3, &new_i3_notes);
3644 if (insn_code_number >= 0)
3645 newpat = m_split_pat;
3646 }
3647 else if (m_split_insn && NEXT_INSN (NEXT_INSN (m_split_insn)) == NULL_RTX
3648 && (next_nonnote_nondebug_insn (i2) == i3
3649 || !modified_between_p (PATTERN (m_split_insn), i2, i3)))
3650 {
3651 rtx i2set, i3set;
3652 rtx newi3pat = PATTERN (NEXT_INSN (m_split_insn));
3653 newi2pat = PATTERN (m_split_insn);
3654
3655 i3set = single_set (NEXT_INSN (m_split_insn));
3656 i2set = single_set (m_split_insn);
3657
3658 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3659
3660 /* If I2 or I3 has multiple SETs, we won't know how to track
3661 register status, so don't use these insns. If I2's destination
3662 is used between I2 and I3, we also can't use these insns. */
3663
3664 if (i2_code_number >= 0 && i2set && i3set
3665 && (next_nonnote_nondebug_insn (i2) == i3
3666 || ! reg_used_between_p (SET_DEST (i2set), i2, i3)))
3667 insn_code_number = recog_for_combine (&newi3pat, i3,
3668 &new_i3_notes);
3669 if (insn_code_number >= 0)
3670 newpat = newi3pat;
3671
3672 /* It is possible that both insns now set the destination of I3.
3673 If so, we must show an extra use of it. */
3674
3675 if (insn_code_number >= 0)
3676 {
3677 rtx new_i3_dest = SET_DEST (i3set);
3678 rtx new_i2_dest = SET_DEST (i2set);
3679
3680 while (GET_CODE (new_i3_dest) == ZERO_EXTRACT
3681 || GET_CODE (new_i3_dest) == STRICT_LOW_PART
3682 || GET_CODE (new_i3_dest) == SUBREG)
3683 new_i3_dest = XEXP (new_i3_dest, 0);
3684
3685 while (GET_CODE (new_i2_dest) == ZERO_EXTRACT
3686 || GET_CODE (new_i2_dest) == STRICT_LOW_PART
3687 || GET_CODE (new_i2_dest) == SUBREG)
3688 new_i2_dest = XEXP (new_i2_dest, 0);
3689
3690 if (REG_P (new_i3_dest)
3691 && REG_P (new_i2_dest)
3692 && REGNO (new_i3_dest) == REGNO (new_i2_dest)
3693 && REGNO (new_i2_dest) < reg_n_sets_max)
3694 INC_REG_N_SETS (REGNO (new_i2_dest), 1);
3695 }
3696 }
3697
3698 /* If we can split it and use I2DEST, go ahead and see if that
3699 helps things be recognized. Verify that none of the registers
3700 are set between I2 and I3. */
3701 if (insn_code_number < 0
3702 && (split = find_split_point (&newpat, i3, false)) != 0
3703 /* We need I2DEST in the proper mode. If it is a hard register
3704 or the only use of a pseudo, we can change its mode.
3705 Make sure we don't change a hard register to have a mode that
3706 isn't valid for it, or change the number of registers. */
3707 && (GET_MODE (*split) == GET_MODE (i2dest)
3708 || GET_MODE (*split) == VOIDmode
3709 || can_change_dest_mode (i2dest, added_sets_2,
3710 GET_MODE (*split)))
3711 && (next_nonnote_nondebug_insn (i2) == i3
3712 || !modified_between_p (*split, i2, i3))
3713 /* We can't overwrite I2DEST if its value is still used by
3714 NEWPAT. */
3715 && ! reg_referenced_p (i2dest, newpat)
3716 /* We should not split a possibly trapping part when we
3717 care about non-call EH and have REG_EH_REGION notes
3718 to distribute. */
3719 && ! (cfun->can_throw_non_call_exceptions
3720 && has_non_call_exception
3721 && may_trap_p (*split)))
3722 {
3723 rtx newdest = i2dest;
3724 enum rtx_code split_code = GET_CODE (*split);
3725 machine_mode split_mode = GET_MODE (*split);
3726 bool subst_done = false;
3727 newi2pat = NULL_RTX;
3728
3729 i2scratch = true;
3730
3731 /* *SPLIT may be part of I2SRC, so make sure we have the
3732 original expression around for later debug processing.
3733 We should not need I2SRC any more in other cases. */
3734 if (MAY_HAVE_DEBUG_BIND_INSNS)
3735 i2src = copy_rtx (i2src);
3736 else
3737 i2src = NULL;
3738
3739 /* Get NEWDEST as a register in the proper mode. We have already
3740 validated that we can do this. */
3741 if (GET_MODE (i2dest) != split_mode && split_mode != VOIDmode)
3742 {
3743 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3744 newdest = gen_rtx_REG (split_mode, REGNO (i2dest));
3745 else
3746 {
3747 subst_mode (REGNO (i2dest), split_mode);
3748 newdest = regno_reg_rtx[REGNO (i2dest)];
3749 }
3750 }
3751
3752 /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to
3753 an ASHIFT. This can occur if it was inside a PLUS and hence
3754 appeared to be a memory address. This is a kludge. */
3755 if (split_code == MULT
3756 && CONST_INT_P (XEXP (*split, 1))
3757 && INTVAL (XEXP (*split, 1)) > 0
3758 && (i = exact_log2 (UINTVAL (XEXP (*split, 1)))) >= 0)
3759 {
3760 rtx i_rtx = gen_int_shift_amount (split_mode, i);
3761 SUBST (*split, gen_rtx_ASHIFT (split_mode,
3762 XEXP (*split, 0), i_rtx));
3763 /* Update split_code because we may not have a multiply
3764 anymore. */
3765 split_code = GET_CODE (*split);
3766 }
3767
3768 /* Similarly for (plus (mult FOO (const_int pow2))). */
3769 if (split_code == PLUS
3770 && GET_CODE (XEXP (*split, 0)) == MULT
3771 && CONST_INT_P (XEXP (XEXP (*split, 0), 1))
3772 && INTVAL (XEXP (XEXP (*split, 0), 1)) > 0
3773 && (i = exact_log2 (UINTVAL (XEXP (XEXP (*split, 0), 1)))) >= 0)
3774 {
3775 rtx nsplit = XEXP (*split, 0);
3776 rtx i_rtx = gen_int_shift_amount (GET_MODE (nsplit), i);
3777 SUBST (XEXP (*split, 0), gen_rtx_ASHIFT (GET_MODE (nsplit),
3778 XEXP (nsplit, 0),
3779 i_rtx));
3780 /* Update split_code because we may not have a multiply
3781 anymore. */
3782 split_code = GET_CODE (*split);
3783 }
3784
3785 #ifdef INSN_SCHEDULING
3786 /* If *SPLIT is a paradoxical SUBREG, when we split it, it should
3787 be written as a ZERO_EXTEND. */
3788 if (split_code == SUBREG && MEM_P (SUBREG_REG (*split)))
3789 {
3790 /* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's
3791 what it really is. */
3792 if (load_extend_op (GET_MODE (SUBREG_REG (*split)))
3793 == SIGN_EXTEND)
3794 SUBST (*split, gen_rtx_SIGN_EXTEND (split_mode,
3795 SUBREG_REG (*split)));
3796 else
3797 SUBST (*split, gen_rtx_ZERO_EXTEND (split_mode,
3798 SUBREG_REG (*split)));
3799 }
3800 #endif
3801
3802 /* Attempt to split binary operators using arithmetic identities. */
3803 if (BINARY_P (SET_SRC (newpat))
3804 && split_mode == GET_MODE (SET_SRC (newpat))
3805 && ! side_effects_p (SET_SRC (newpat)))
3806 {
3807 rtx setsrc = SET_SRC (newpat);
3808 machine_mode mode = GET_MODE (setsrc);
3809 enum rtx_code code = GET_CODE (setsrc);
3810 rtx src_op0 = XEXP (setsrc, 0);
3811 rtx src_op1 = XEXP (setsrc, 1);
3812
3813 /* Split "X = Y op Y" as "Z = Y; X = Z op Z". */
3814 if (rtx_equal_p (src_op0, src_op1))
3815 {
3816 newi2pat = gen_rtx_SET (newdest, src_op0);
3817 SUBST (XEXP (setsrc, 0), newdest);
3818 SUBST (XEXP (setsrc, 1), newdest);
3819 subst_done = true;
3820 }
3821 /* Split "((P op Q) op R) op S" where op is PLUS or MULT. */
3822 else if ((code == PLUS || code == MULT)
3823 && GET_CODE (src_op0) == code
3824 && GET_CODE (XEXP (src_op0, 0)) == code
3825 && (INTEGRAL_MODE_P (mode)
3826 || (FLOAT_MODE_P (mode)
3827 && flag_unsafe_math_optimizations)))
3828 {
3829 rtx p = XEXP (XEXP (src_op0, 0), 0);
3830 rtx q = XEXP (XEXP (src_op0, 0), 1);
3831 rtx r = XEXP (src_op0, 1);
3832 rtx s = src_op1;
3833
3834 /* Split both "((X op Y) op X) op Y" and
3835 "((X op Y) op Y) op X" as "T op T" where T is
3836 "X op Y". */
3837 if ((rtx_equal_p (p,r) && rtx_equal_p (q,s))
3838 || (rtx_equal_p (p,s) && rtx_equal_p (q,r)))
3839 {
3840 newi2pat = gen_rtx_SET (newdest, XEXP (src_op0, 0));
3841 SUBST (XEXP (setsrc, 0), newdest);
3842 SUBST (XEXP (setsrc, 1), newdest);
3843 subst_done = true;
3844 }
3845 /* Split "((X op X) op Y) op Y)" as "T op T" where
3846 T is "X op Y". */
3847 else if (rtx_equal_p (p,q) && rtx_equal_p (r,s))
3848 {
3849 rtx tmp = simplify_gen_binary (code, mode, p, r);
3850 newi2pat = gen_rtx_SET (newdest, tmp);
3851 SUBST (XEXP (setsrc, 0), newdest);
3852 SUBST (XEXP (setsrc, 1), newdest);
3853 subst_done = true;
3854 }
3855 }
3856 }
3857
3858 if (!subst_done)
3859 {
3860 newi2pat = gen_rtx_SET (newdest, *split);
3861 SUBST (*split, newdest);
3862 }
3863
3864 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3865
3866 /* recog_for_combine might have added CLOBBERs to newi2pat.
3867 Make sure NEWPAT does not depend on the clobbered regs. */
3868 if (GET_CODE (newi2pat) == PARALLEL)
3869 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
3870 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
3871 {
3872 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
3873 if (reg_overlap_mentioned_p (reg, newpat))
3874 {
3875 undo_all ();
3876 return 0;
3877 }
3878 }
3879
3880 /* If the split point was a MULT and we didn't have one before,
3881 don't use one now. */
3882 if (i2_code_number >= 0 && ! (split_code == MULT && ! have_mult))
3883 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3884 }
3885 }
3886
3887 /* Check for a case where we loaded from memory in a narrow mode and
3888 then sign extended it, but we need both registers. In that case,
3889 we have a PARALLEL with both loads from the same memory location.
3890 We can split this into a load from memory followed by a register-register
3891 copy. This saves at least one insn, more if register allocation can
3892 eliminate the copy.
3893
3894 We cannot do this if the destination of the first assignment is a
3895 condition code register. We eliminate this case by making sure
3896 the SET_DEST and SET_SRC have the same mode.
3897
3898 We cannot do this if the destination of the second assignment is
3899 a register that we have already assumed is zero-extended. Similarly
3900 for a SUBREG of such a register. */
3901
3902 else if (i1 && insn_code_number < 0 && asm_noperands (newpat) < 0
3903 && GET_CODE (newpat) == PARALLEL
3904 && XVECLEN (newpat, 0) == 2
3905 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3906 && GET_CODE (SET_SRC (XVECEXP (newpat, 0, 0))) == SIGN_EXTEND
3907 && (GET_MODE (SET_DEST (XVECEXP (newpat, 0, 0)))
3908 == GET_MODE (SET_SRC (XVECEXP (newpat, 0, 0))))
3909 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3910 && rtx_equal_p (SET_SRC (XVECEXP (newpat, 0, 1)),
3911 XEXP (SET_SRC (XVECEXP (newpat, 0, 0)), 0))
3912 && !modified_between_p (SET_SRC (XVECEXP (newpat, 0, 1)), i2, i3)
3913 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
3914 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
3915 && ! (temp_expr = SET_DEST (XVECEXP (newpat, 0, 1)),
3916 (REG_P (temp_expr)
3917 && reg_stat[REGNO (temp_expr)].nonzero_bits != 0
3918 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)),
3919 BITS_PER_WORD)
3920 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)),
3921 HOST_BITS_PER_INT)
3922 && (reg_stat[REGNO (temp_expr)].nonzero_bits
3923 != GET_MODE_MASK (word_mode))))
3924 && ! (GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) == SUBREG
3925 && (temp_expr = SUBREG_REG (SET_DEST (XVECEXP (newpat, 0, 1))),
3926 (REG_P (temp_expr)
3927 && reg_stat[REGNO (temp_expr)].nonzero_bits != 0
3928 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)),
3929 BITS_PER_WORD)
3930 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)),
3931 HOST_BITS_PER_INT)
3932 && (reg_stat[REGNO (temp_expr)].nonzero_bits
3933 != GET_MODE_MASK (word_mode)))))
3934 && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat, 0, 1)),
3935 SET_SRC (XVECEXP (newpat, 0, 1)))
3936 && ! find_reg_note (i3, REG_UNUSED,
3937 SET_DEST (XVECEXP (newpat, 0, 0))))
3938 {
3939 rtx ni2dest;
3940
3941 newi2pat = XVECEXP (newpat, 0, 0);
3942 ni2dest = SET_DEST (XVECEXP (newpat, 0, 0));
3943 newpat = XVECEXP (newpat, 0, 1);
3944 SUBST (SET_SRC (newpat),
3945 gen_lowpart (GET_MODE (SET_SRC (newpat)), ni2dest));
3946 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3947
3948 if (i2_code_number >= 0)
3949 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3950
3951 if (insn_code_number >= 0)
3952 swap_i2i3 = 1;
3953 }
3954
3955 /* Similarly, check for a case where we have a PARALLEL of two independent
3956 SETs but we started with three insns. In this case, we can do the sets
3957 as two separate insns. This case occurs when some SET allows two
3958 other insns to combine, but the destination of that SET is still live.
3959
3960 Also do this if we started with two insns and (at least) one of the
3961 resulting sets is a noop; this noop will be deleted later.
3962
3963 Also do this if we started with two insns neither of which was a simple
3964 move. */
3965
3966 else if (insn_code_number < 0 && asm_noperands (newpat) < 0
3967 && GET_CODE (newpat) == PARALLEL
3968 && XVECLEN (newpat, 0) == 2
3969 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3970 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3971 && (i1
3972 || set_noop_p (XVECEXP (newpat, 0, 0))
3973 || set_noop_p (XVECEXP (newpat, 0, 1))
3974 || (!i2_was_move && !i3_was_move))
3975 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != ZERO_EXTRACT
3976 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != STRICT_LOW_PART
3977 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
3978 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
3979 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 1)),
3980 XVECEXP (newpat, 0, 0))
3981 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 0)),
3982 XVECEXP (newpat, 0, 1))
3983 && ! (contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 0)))
3984 && contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 1)))))
3985 {
3986 rtx set0 = XVECEXP (newpat, 0, 0);
3987 rtx set1 = XVECEXP (newpat, 0, 1);
3988
3989 /* Normally, it doesn't matter which of the two is done first, but
3990 one which uses any regs/memory set in between i2 and i3 can't
3991 be first. The PARALLEL might also have been pre-existing in i3,
3992 so we need to make sure that we won't wrongly hoist a SET to i2
3993 that would conflict with a death note present in there, or would
3994 have its dest modified between i2 and i3. */
3995 if (!modified_between_p (SET_SRC (set1), i2, i3)
3996 && !(REG_P (SET_DEST (set1))
3997 && find_reg_note (i2, REG_DEAD, SET_DEST (set1)))
3998 && !(GET_CODE (SET_DEST (set1)) == SUBREG
3999 && find_reg_note (i2, REG_DEAD,
4000 SUBREG_REG (SET_DEST (set1))))
4001 && !modified_between_p (SET_DEST (set1), i2, i3)
4002 /* If I3 is a jump, ensure that set0 is a jump so that
4003 we do not create invalid RTL. */
4004 && (!JUMP_P (i3) || SET_DEST (set0) == pc_rtx)
4005 )
4006 {
4007 newi2pat = set1;
4008 newpat = set0;
4009 }
4010 else if (!modified_between_p (SET_SRC (set0), i2, i3)
4011 && !(REG_P (SET_DEST (set0))
4012 && find_reg_note (i2, REG_DEAD, SET_DEST (set0)))
4013 && !(GET_CODE (SET_DEST (set0)) == SUBREG
4014 && find_reg_note (i2, REG_DEAD,
4015 SUBREG_REG (SET_DEST (set0))))
4016 && !modified_between_p (SET_DEST (set0), i2, i3)
4017 /* If I3 is a jump, ensure that set1 is a jump so that
4018 we do not create invalid RTL. */
4019 && (!JUMP_P (i3) || SET_DEST (set1) == pc_rtx)
4020 )
4021 {
4022 newi2pat = set0;
4023 newpat = set1;
4024 }
4025 else
4026 {
4027 undo_all ();
4028 return 0;
4029 }
4030
4031 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
4032
4033 if (i2_code_number >= 0)
4034 {
4035 /* recog_for_combine might have added CLOBBERs to newi2pat.
4036 Make sure NEWPAT does not depend on the clobbered regs. */
4037 if (GET_CODE (newi2pat) == PARALLEL)
4038 {
4039 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
4040 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
4041 {
4042 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
4043 if (reg_overlap_mentioned_p (reg, newpat))
4044 {
4045 undo_all ();
4046 return 0;
4047 }
4048 }
4049 }
4050
4051 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
4052
4053 /* Likewise, recog_for_combine might have added clobbers to NEWPAT.
4054 Checking that the SET0's SET_DEST and SET1's SET_DEST aren't
4055 mentioned/clobbered, ensures NEWI2PAT's SET_DEST is live. */
4056 if (insn_code_number >= 0 && GET_CODE (newpat) == PARALLEL)
4057 {
4058 for (i = XVECLEN (newpat, 0) - 1; i >= 0; i--)
4059 if (GET_CODE (XVECEXP (newpat, 0, i)) == CLOBBER)
4060 {
4061 rtx reg = XEXP (XVECEXP (newpat, 0, i), 0);
4062 if (reg_overlap_mentioned_p (reg, SET_DEST (set0))
4063 || reg_overlap_mentioned_p (reg, SET_DEST (set1)))
4064 {
4065 undo_all ();
4066 return 0;
4067 }
4068 }
4069 }
4070
4071 if (insn_code_number >= 0)
4072 split_i2i3 = 1;
4073 }
4074 }
4075
4076 /* If it still isn't recognized, fail and change things back the way they
4077 were. */
4078 if ((insn_code_number < 0
4079 /* Is the result a reasonable ASM_OPERANDS? */
4080 && (! check_asm_operands (newpat) || added_sets_1 || added_sets_2)))
4081 {
4082 undo_all ();
4083 return 0;
4084 }
4085
4086 /* If we had to change another insn, make sure it is valid also. */
4087 if (undobuf.other_insn)
4088 {
4089 CLEAR_HARD_REG_SET (newpat_used_regs);
4090
4091 other_pat = PATTERN (undobuf.other_insn);
4092 other_code_number = recog_for_combine (&other_pat, undobuf.other_insn,
4093 &new_other_notes);
4094
4095 if (other_code_number < 0 && ! check_asm_operands (other_pat))
4096 {
4097 undo_all ();
4098 return 0;
4099 }
4100 }
4101
4102 /* Only allow this combination if insn_cost reports that the
4103 replacement instructions are cheaper than the originals. */
4104 if (!combine_validate_cost (i0, i1, i2, i3, newpat, newi2pat, other_pat))
4105 {
4106 undo_all ();
4107 return 0;
4108 }
4109
4110 if (MAY_HAVE_DEBUG_BIND_INSNS)
4111 {
4112 struct undo *undo;
4113
4114 for (undo = undobuf.undos; undo; undo = undo->next)
4115 if (undo->kind == UNDO_MODE)
4116 {
4117 rtx reg = regno_reg_rtx[undo->where.regno];
4118 machine_mode new_mode = GET_MODE (reg);
4119 machine_mode old_mode = undo->old_contents.m;
4120
4121 /* Temporarily revert mode back. */
4122 adjust_reg_mode (reg, old_mode);
4123
4124 if (reg == i2dest && i2scratch)
4125 {
4126 /* If we used i2dest as a scratch register with a
4127 different mode, substitute it for the original
4128 i2src while its original mode is temporarily
4129 restored, and then clear i2scratch so that we don't
4130 do it again later. */
4131 propagate_for_debug (i2, last_combined_insn, reg, i2src,
4132 this_basic_block);
4133 i2scratch = false;
4134 /* Put back the new mode. */
4135 adjust_reg_mode (reg, new_mode);
4136 }
4137 else
4138 {
4139 rtx tempreg = gen_raw_REG (old_mode, REGNO (reg));
4140 rtx_insn *first, *last;
4141
4142 if (reg == i2dest)
4143 {
4144 first = i2;
4145 last = last_combined_insn;
4146 }
4147 else
4148 {
4149 first = i3;
4150 last = undobuf.other_insn;
4151 gcc_assert (last);
4152 if (DF_INSN_LUID (last)
4153 < DF_INSN_LUID (last_combined_insn))
4154 last = last_combined_insn;
4155 }
4156
4157 /* We're dealing with a reg that changed mode but not
4158 meaning, so we want to turn it into a subreg for
4159 the new mode. However, because of REG sharing and
4160 because its mode had already changed, we have to do
4161 it in two steps. First, replace any debug uses of
4162 reg, with its original mode temporarily restored,
4163 with this copy we have created; then, replace the
4164 copy with the SUBREG of the original shared reg,
4165 once again changed to the new mode. */
4166 propagate_for_debug (first, last, reg, tempreg,
4167 this_basic_block);
4168 adjust_reg_mode (reg, new_mode);
4169 propagate_for_debug (first, last, tempreg,
4170 lowpart_subreg (old_mode, reg, new_mode),
4171 this_basic_block);
4172 }
4173 }
4174 }
4175
4176 /* If we will be able to accept this, we have made a
4177 change to the destination of I3. This requires us to
4178 do a few adjustments. */
4179
4180 if (changed_i3_dest)
4181 {
4182 PATTERN (i3) = newpat;
4183 adjust_for_new_dest (i3);
4184 }
4185
4186 /* We now know that we can do this combination. Merge the insns and
4187 update the status of registers and LOG_LINKS. */
4188
4189 if (undobuf.other_insn)
4190 {
4191 rtx note, next;
4192
4193 PATTERN (undobuf.other_insn) = other_pat;
4194
4195 /* If any of the notes in OTHER_INSN were REG_DEAD or REG_UNUSED,
4196 ensure that they are still valid. Then add any non-duplicate
4197 notes added by recog_for_combine. */
4198 for (note = REG_NOTES (undobuf.other_insn); note; note = next)
4199 {
4200 next = XEXP (note, 1);
4201
4202 if ((REG_NOTE_KIND (note) == REG_DEAD
4203 && !reg_referenced_p (XEXP (note, 0),
4204 PATTERN (undobuf.other_insn)))
4205 ||(REG_NOTE_KIND (note) == REG_UNUSED
4206 && !reg_set_p (XEXP (note, 0),
4207 PATTERN (undobuf.other_insn)))
4208 /* Simply drop equal note since it may be no longer valid
4209 for other_insn. It may be possible to record that CC
4210 register is changed and only discard those notes, but
4211 in practice it's unnecessary complication and doesn't
4212 give any meaningful improvement.
4213
4214 See PR78559. */
4215 || REG_NOTE_KIND (note) == REG_EQUAL
4216 || REG_NOTE_KIND (note) == REG_EQUIV)
4217 remove_note (undobuf.other_insn, note);
4218 }
4219
4220 distribute_notes (new_other_notes, undobuf.other_insn,
4221 undobuf.other_insn, NULL, NULL_RTX, NULL_RTX,
4222 NULL_RTX);
4223 }
4224
4225 if (swap_i2i3)
4226 {
4227 /* I3 now uses what used to be its destination and which is now
4228 I2's destination. This requires us to do a few adjustments. */
4229 PATTERN (i3) = newpat;
4230 adjust_for_new_dest (i3);
4231 }
4232
4233 if (swap_i2i3 || split_i2i3)
4234 {
4235 /* We might need a LOG_LINK from I3 to I2. But then we used to
4236 have one, so we still will.
4237
4238 However, some later insn might be using I2's dest and have
4239 a LOG_LINK pointing at I3. We should change it to point at
4240 I2 instead. */
4241
4242 /* newi2pat is usually a SET here; however, recog_for_combine might
4243 have added some clobbers. */
4244 rtx x = newi2pat;
4245 if (GET_CODE (x) == PARALLEL)
4246 x = XVECEXP (newi2pat, 0, 0);
4247
4248 if (REG_P (SET_DEST (x))
4249 || (GET_CODE (SET_DEST (x)) == SUBREG
4250 && REG_P (SUBREG_REG (SET_DEST (x)))))
4251 {
4252 unsigned int regno = reg_or_subregno (SET_DEST (x));
4253
4254 bool done = false;
4255 for (rtx_insn *insn = NEXT_INSN (i3);
4256 !done
4257 && insn
4258 && INSN_P (insn)
4259 && BLOCK_FOR_INSN (insn) == this_basic_block;
4260 insn = NEXT_INSN (insn))
4261 {
4262 if (DEBUG_INSN_P (insn))
4263 continue;
4264 struct insn_link *link;
4265 FOR_EACH_LOG_LINK (link, insn)
4266 if (link->insn == i3 && link->regno == regno)
4267 {
4268 link->insn = i2;
4269 done = true;
4270 break;
4271 }
4272 }
4273 }
4274 }
4275
4276 {
4277 rtx i3notes, i2notes, i1notes = 0, i0notes = 0;
4278 struct insn_link *i3links, *i2links, *i1links = 0, *i0links = 0;
4279 rtx midnotes = 0;
4280 int from_luid;
4281 /* Compute which registers we expect to eliminate. newi2pat may be setting
4282 either i3dest or i2dest, so we must check it. */
4283 rtx elim_i2 = ((newi2pat && reg_set_p (i2dest, newi2pat))
4284 || i2dest_in_i2src || i2dest_in_i1src || i2dest_in_i0src
4285 || !i2dest_killed
4286 ? 0 : i2dest);
4287 /* For i1, we need to compute both local elimination and global
4288 elimination information with respect to newi2pat because i1dest
4289 may be the same as i3dest, in which case newi2pat may be setting
4290 i1dest. Global information is used when distributing REG_DEAD
4291 note for i2 and i3, in which case it does matter if newi2pat sets
4292 i1dest or not.
4293
4294 Local information is used when distributing REG_DEAD note for i1,
4295 in which case it doesn't matter if newi2pat sets i1dest or not.
4296 See PR62151, if we have four insns combination:
4297 i0: r0 <- i0src
4298 i1: r1 <- i1src (using r0)
4299 REG_DEAD (r0)
4300 i2: r0 <- i2src (using r1)
4301 i3: r3 <- i3src (using r0)
4302 ix: using r0
4303 From i1's point of view, r0 is eliminated, no matter if it is set
4304 by newi2pat or not. In other words, REG_DEAD info for r0 in i1
4305 should be discarded.
4306
4307 Note local information only affects cases in forms like "I1->I2->I3",
4308 "I0->I1->I2->I3" or "I0&I1->I2, I2->I3". For other cases like
4309 "I0->I1, I1&I2->I3" or "I1&I2->I3", newi2pat won't set i1dest or
4310 i0dest anyway. */
4311 rtx local_elim_i1 = (i1 == 0 || i1dest_in_i1src || i1dest_in_i0src
4312 || !i1dest_killed
4313 ? 0 : i1dest);
4314 rtx elim_i1 = (local_elim_i1 == 0
4315 || (newi2pat && reg_set_p (i1dest, newi2pat))
4316 ? 0 : i1dest);
4317 /* Same case as i1. */
4318 rtx local_elim_i0 = (i0 == 0 || i0dest_in_i0src || !i0dest_killed
4319 ? 0 : i0dest);
4320 rtx elim_i0 = (local_elim_i0 == 0
4321 || (newi2pat && reg_set_p (i0dest, newi2pat))
4322 ? 0 : i0dest);
4323
4324 /* Get the old REG_NOTES and LOG_LINKS from all our insns and
4325 clear them. */
4326 i3notes = REG_NOTES (i3), i3links = LOG_LINKS (i3);
4327 i2notes = REG_NOTES (i2), i2links = LOG_LINKS (i2);
4328 if (i1)
4329 i1notes = REG_NOTES (i1), i1links = LOG_LINKS (i1);
4330 if (i0)
4331 i0notes = REG_NOTES (i0), i0links = LOG_LINKS (i0);
4332
4333 /* Ensure that we do not have something that should not be shared but
4334 occurs multiple times in the new insns. Check this by first
4335 resetting all the `used' flags and then copying anything is shared. */
4336
4337 reset_used_flags (i3notes);
4338 reset_used_flags (i2notes);
4339 reset_used_flags (i1notes);
4340 reset_used_flags (i0notes);
4341 reset_used_flags (newpat);
4342 reset_used_flags (newi2pat);
4343 if (undobuf.other_insn)
4344 reset_used_flags (PATTERN (undobuf.other_insn));
4345
4346 i3notes = copy_rtx_if_shared (i3notes);
4347 i2notes = copy_rtx_if_shared (i2notes);
4348 i1notes = copy_rtx_if_shared (i1notes);
4349 i0notes = copy_rtx_if_shared (i0notes);
4350 newpat = copy_rtx_if_shared (newpat);
4351 newi2pat = copy_rtx_if_shared (newi2pat);
4352 if (undobuf.other_insn)
4353 reset_used_flags (PATTERN (undobuf.other_insn));
4354
4355 INSN_CODE (i3) = insn_code_number;
4356 PATTERN (i3) = newpat;
4357
4358 if (CALL_P (i3) && CALL_INSN_FUNCTION_USAGE (i3))
4359 {
4360 for (rtx link = CALL_INSN_FUNCTION_USAGE (i3); link;
4361 link = XEXP (link, 1))
4362 {
4363 if (substed_i2)
4364 {
4365 /* I2SRC must still be meaningful at this point. Some
4366 splitting operations can invalidate I2SRC, but those
4367 operations do not apply to calls. */
4368 gcc_assert (i2src);
4369 XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4370 i2dest, i2src);
4371 }
4372 if (substed_i1)
4373 XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4374 i1dest, i1src);
4375 if (substed_i0)
4376 XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4377 i0dest, i0src);
4378 }
4379 }
4380
4381 if (undobuf.other_insn)
4382 INSN_CODE (undobuf.other_insn) = other_code_number;
4383
4384 /* We had one special case above where I2 had more than one set and
4385 we replaced a destination of one of those sets with the destination
4386 of I3. In that case, we have to update LOG_LINKS of insns later
4387 in this basic block. Note that this (expensive) case is rare.
4388
4389 Also, in this case, we must pretend that all REG_NOTEs for I2
4390 actually came from I3, so that REG_UNUSED notes from I2 will be
4391 properly handled. */
4392
4393 if (i3_subst_into_i2)
4394 {
4395 for (i = 0; i < XVECLEN (PATTERN (i2), 0); i++)
4396 if ((GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == SET
4397 || GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == CLOBBER)
4398 && REG_P (SET_DEST (XVECEXP (PATTERN (i2), 0, i)))
4399 && SET_DEST (XVECEXP (PATTERN (i2), 0, i)) != i2dest
4400 && ! find_reg_note (i2, REG_UNUSED,
4401 SET_DEST (XVECEXP (PATTERN (i2), 0, i))))
4402 for (temp_insn = NEXT_INSN (i2);
4403 temp_insn
4404 && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
4405 || BB_HEAD (this_basic_block) != temp_insn);
4406 temp_insn = NEXT_INSN (temp_insn))
4407 if (temp_insn != i3 && NONDEBUG_INSN_P (temp_insn))
4408 FOR_EACH_LOG_LINK (link, temp_insn)
4409 if (link->insn == i2)
4410 link->insn = i3;
4411
4412 if (i3notes)
4413 {
4414 rtx link = i3notes;
4415 while (XEXP (link, 1))
4416 link = XEXP (link, 1);
4417 XEXP (link, 1) = i2notes;
4418 }
4419 else
4420 i3notes = i2notes;
4421 i2notes = 0;
4422 }
4423
4424 LOG_LINKS (i3) = NULL;
4425 REG_NOTES (i3) = 0;
4426 LOG_LINKS (i2) = NULL;
4427 REG_NOTES (i2) = 0;
4428
4429 if (newi2pat)
4430 {
4431 if (MAY_HAVE_DEBUG_BIND_INSNS && i2scratch)
4432 propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4433 this_basic_block);
4434 INSN_CODE (i2) = i2_code_number;
4435 PATTERN (i2) = newi2pat;
4436 }
4437 else
4438 {
4439 if (MAY_HAVE_DEBUG_BIND_INSNS && i2src)
4440 propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4441 this_basic_block);
4442 SET_INSN_DELETED (i2);
4443 }
4444
4445 if (i1)
4446 {
4447 LOG_LINKS (i1) = NULL;
4448 REG_NOTES (i1) = 0;
4449 if (MAY_HAVE_DEBUG_BIND_INSNS)
4450 propagate_for_debug (i1, last_combined_insn, i1dest, i1src,
4451 this_basic_block);
4452 SET_INSN_DELETED (i1);
4453 }
4454
4455 if (i0)
4456 {
4457 LOG_LINKS (i0) = NULL;
4458 REG_NOTES (i0) = 0;
4459 if (MAY_HAVE_DEBUG_BIND_INSNS)
4460 propagate_for_debug (i0, last_combined_insn, i0dest, i0src,
4461 this_basic_block);
4462 SET_INSN_DELETED (i0);
4463 }
4464
4465 /* Get death notes for everything that is now used in either I3 or
4466 I2 and used to die in a previous insn. If we built two new
4467 patterns, move from I1 to I2 then I2 to I3 so that we get the
4468 proper movement on registers that I2 modifies. */
4469
4470 if (i0)
4471 from_luid = DF_INSN_LUID (i0);
4472 else if (i1)
4473 from_luid = DF_INSN_LUID (i1);
4474 else
4475 from_luid = DF_INSN_LUID (i2);
4476 if (newi2pat)
4477 move_deaths (newi2pat, NULL_RTX, from_luid, i2, &midnotes);
4478 move_deaths (newpat, newi2pat, from_luid, i3, &midnotes);
4479
4480 /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3. */
4481 if (i3notes)
4482 distribute_notes (i3notes, i3, i3, newi2pat ? i2 : NULL,
4483 elim_i2, elim_i1, elim_i0);
4484 if (i2notes)
4485 distribute_notes (i2notes, i2, i3, newi2pat ? i2 : NULL,
4486 elim_i2, elim_i1, elim_i0);
4487 if (i1notes)
4488 distribute_notes (i1notes, i1, i3, newi2pat ? i2 : NULL,
4489 elim_i2, local_elim_i1, local_elim_i0);
4490 if (i0notes)
4491 distribute_notes (i0notes, i0, i3, newi2pat ? i2 : NULL,
4492 elim_i2, elim_i1, local_elim_i0);
4493 if (midnotes)
4494 distribute_notes (midnotes, NULL, i3, newi2pat ? i2 : NULL,
4495 elim_i2, elim_i1, elim_i0);
4496
4497 /* Distribute any notes added to I2 or I3 by recog_for_combine. We
4498 know these are REG_UNUSED and want them to go to the desired insn,
4499 so we always pass it as i3. */
4500
4501 if (newi2pat && new_i2_notes)
4502 distribute_notes (new_i2_notes, i2, i2, NULL, NULL_RTX, NULL_RTX,
4503 NULL_RTX);
4504
4505 if (new_i3_notes)
4506 distribute_notes (new_i3_notes, i3, i3, NULL, NULL_RTX, NULL_RTX,
4507 NULL_RTX);
4508
4509 /* If I3DEST was used in I3SRC, it really died in I3. We may need to
4510 put a REG_DEAD note for it somewhere. If NEWI2PAT exists and sets
4511 I3DEST, the death must be somewhere before I2, not I3. If we passed I3
4512 in that case, it might delete I2. Similarly for I2 and I1.
4513 Show an additional death due to the REG_DEAD note we make here. If
4514 we discard it in distribute_notes, we will decrement it again. */
4515
4516 if (i3dest_killed)
4517 {
4518 rtx new_note = alloc_reg_note (REG_DEAD, i3dest_killed, NULL_RTX);
4519 if (newi2pat && reg_set_p (i3dest_killed, newi2pat))
4520 distribute_notes (new_note, NULL, i2, NULL, elim_i2,
4521 elim_i1, elim_i0);
4522 else
4523 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4524 elim_i2, elim_i1, elim_i0);
4525 }
4526
4527 if (i2dest_in_i2src)
4528 {
4529 rtx new_note = alloc_reg_note (REG_DEAD, i2dest, NULL_RTX);
4530 if (newi2pat && reg_set_p (i2dest, newi2pat))
4531 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4532 NULL_RTX, NULL_RTX);
4533 else
4534 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4535 NULL_RTX, NULL_RTX, NULL_RTX);
4536 }
4537
4538 if (i1dest_in_i1src)
4539 {
4540 rtx new_note = alloc_reg_note (REG_DEAD, i1dest, NULL_RTX);
4541 if (newi2pat && reg_set_p (i1dest, newi2pat))
4542 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4543 NULL_RTX, NULL_RTX);
4544 else
4545 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4546 NULL_RTX, NULL_RTX, NULL_RTX);
4547 }
4548
4549 if (i0dest_in_i0src)
4550 {
4551 rtx new_note = alloc_reg_note (REG_DEAD, i0dest, NULL_RTX);
4552 if (newi2pat && reg_set_p (i0dest, newi2pat))
4553 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4554 NULL_RTX, NULL_RTX);
4555 else
4556 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4557 NULL_RTX, NULL_RTX, NULL_RTX);
4558 }
4559
4560 distribute_links (i3links);
4561 distribute_links (i2links);
4562 distribute_links (i1links);
4563 distribute_links (i0links);
4564
4565 if (REG_P (i2dest))
4566 {
4567 struct insn_link *link;
4568 rtx_insn *i2_insn = 0;
4569 rtx i2_val = 0, set;
4570
4571 /* The insn that used to set this register doesn't exist, and
4572 this life of the register may not exist either. See if one of
4573 I3's links points to an insn that sets I2DEST. If it does,
4574 that is now the last known value for I2DEST. If we don't update
4575 this and I2 set the register to a value that depended on its old
4576 contents, we will get confused. If this insn is used, thing
4577 will be set correctly in combine_instructions. */
4578 FOR_EACH_LOG_LINK (link, i3)
4579 if ((set = single_set (link->insn)) != 0
4580 && rtx_equal_p (i2dest, SET_DEST (set)))
4581 i2_insn = link->insn, i2_val = SET_SRC (set);
4582
4583 record_value_for_reg (i2dest, i2_insn, i2_val);
4584
4585 /* If the reg formerly set in I2 died only once and that was in I3,
4586 zero its use count so it won't make `reload' do any work. */
4587 if (! added_sets_2
4588 && (newi2pat == 0 || ! reg_mentioned_p (i2dest, newi2pat))
4589 && ! i2dest_in_i2src
4590 && REGNO (i2dest) < reg_n_sets_max)
4591 INC_REG_N_SETS (REGNO (i2dest), -1);
4592 }
4593
4594 if (i1 && REG_P (i1dest))
4595 {
4596 struct insn_link *link;
4597 rtx_insn *i1_insn = 0;
4598 rtx i1_val = 0, set;
4599
4600 FOR_EACH_LOG_LINK (link, i3)
4601 if ((set = single_set (link->insn)) != 0
4602 && rtx_equal_p (i1dest, SET_DEST (set)))
4603 i1_insn = link->insn, i1_val = SET_SRC (set);
4604
4605 record_value_for_reg (i1dest, i1_insn, i1_val);
4606
4607 if (! added_sets_1
4608 && ! i1dest_in_i1src
4609 && REGNO (i1dest) < reg_n_sets_max)
4610 INC_REG_N_SETS (REGNO (i1dest), -1);
4611 }
4612
4613 if (i0 && REG_P (i0dest))
4614 {
4615 struct insn_link *link;
4616 rtx_insn *i0_insn = 0;
4617 rtx i0_val = 0, set;
4618
4619 FOR_EACH_LOG_LINK (link, i3)
4620 if ((set = single_set (link->insn)) != 0
4621 && rtx_equal_p (i0dest, SET_DEST (set)))
4622 i0_insn = link->insn, i0_val = SET_SRC (set);
4623
4624 record_value_for_reg (i0dest, i0_insn, i0_val);
4625
4626 if (! added_sets_0
4627 && ! i0dest_in_i0src
4628 && REGNO (i0dest) < reg_n_sets_max)
4629 INC_REG_N_SETS (REGNO (i0dest), -1);
4630 }
4631
4632 /* Update reg_stat[].nonzero_bits et al for any changes that may have
4633 been made to this insn. The order is important, because newi2pat
4634 can affect nonzero_bits of newpat. */
4635 if (newi2pat)
4636 note_pattern_stores (newi2pat, set_nonzero_bits_and_sign_copies, NULL);
4637 note_pattern_stores (newpat, set_nonzero_bits_and_sign_copies, NULL);
4638 }
4639
4640 if (undobuf.other_insn != NULL_RTX)
4641 {
4642 if (dump_file)
4643 {
4644 fprintf (dump_file, "modifying other_insn ");
4645 dump_insn_slim (dump_file, undobuf.other_insn);
4646 }
4647 df_insn_rescan (undobuf.other_insn);
4648 }
4649
4650 if (i0 && !(NOTE_P (i0) && (NOTE_KIND (i0) == NOTE_INSN_DELETED)))
4651 {
4652 if (dump_file)
4653 {
4654 fprintf (dump_file, "modifying insn i0 ");
4655 dump_insn_slim (dump_file, i0);
4656 }
4657 df_insn_rescan (i0);
4658 }
4659
4660 if (i1 && !(NOTE_P (i1) && (NOTE_KIND (i1) == NOTE_INSN_DELETED)))
4661 {
4662 if (dump_file)
4663 {
4664 fprintf (dump_file, "modifying insn i1 ");
4665 dump_insn_slim (dump_file, i1);
4666 }
4667 df_insn_rescan (i1);
4668 }
4669
4670 if (i2 && !(NOTE_P (i2) && (NOTE_KIND (i2) == NOTE_INSN_DELETED)))
4671 {
4672 if (dump_file)
4673 {
4674 fprintf (dump_file, "modifying insn i2 ");
4675 dump_insn_slim (dump_file, i2);
4676 }
4677 df_insn_rescan (i2);
4678 }
4679
4680 if (i3 && !(NOTE_P (i3) && (NOTE_KIND (i3) == NOTE_INSN_DELETED)))
4681 {
4682 if (dump_file)
4683 {
4684 fprintf (dump_file, "modifying insn i3 ");
4685 dump_insn_slim (dump_file, i3);
4686 }
4687 df_insn_rescan (i3);
4688 }
4689
4690 /* Set new_direct_jump_p if a new return or simple jump instruction
4691 has been created. Adjust the CFG accordingly. */
4692 if (returnjump_p (i3) || any_uncondjump_p (i3))
4693 {
4694 *new_direct_jump_p = 1;
4695 mark_jump_label (PATTERN (i3), i3, 0);
4696 update_cfg_for_uncondjump (i3);
4697 }
4698
4699 if (undobuf.other_insn != NULL_RTX
4700 && (returnjump_p (undobuf.other_insn)
4701 || any_uncondjump_p (undobuf.other_insn)))
4702 {
4703 *new_direct_jump_p = 1;
4704 update_cfg_for_uncondjump (undobuf.other_insn);
4705 }
4706
4707 if (GET_CODE (PATTERN (i3)) == TRAP_IF
4708 && XEXP (PATTERN (i3), 0) == const1_rtx)
4709 {
4710 basic_block bb = BLOCK_FOR_INSN (i3);
4711 gcc_assert (bb);
4712 remove_edge (split_block (bb, i3));
4713 emit_barrier_after_bb (bb);
4714 *new_direct_jump_p = 1;
4715 }
4716
4717 if (undobuf.other_insn
4718 && GET_CODE (PATTERN (undobuf.other_insn)) == TRAP_IF
4719 && XEXP (PATTERN (undobuf.other_insn), 0) == const1_rtx)
4720 {
4721 basic_block bb = BLOCK_FOR_INSN (undobuf.other_insn);
4722 gcc_assert (bb);
4723 remove_edge (split_block (bb, undobuf.other_insn));
4724 emit_barrier_after_bb (bb);
4725 *new_direct_jump_p = 1;
4726 }
4727
4728 /* A noop might also need cleaning up of CFG, if it comes from the
4729 simplification of a jump. */
4730 if (JUMP_P (i3)
4731 && GET_CODE (newpat) == SET
4732 && SET_SRC (newpat) == pc_rtx
4733 && SET_DEST (newpat) == pc_rtx)
4734 {
4735 *new_direct_jump_p = 1;
4736 update_cfg_for_uncondjump (i3);
4737 }
4738
4739 if (undobuf.other_insn != NULL_RTX
4740 && JUMP_P (undobuf.other_insn)
4741 && GET_CODE (PATTERN (undobuf.other_insn)) == SET
4742 && SET_SRC (PATTERN (undobuf.other_insn)) == pc_rtx
4743 && SET_DEST (PATTERN (undobuf.other_insn)) == pc_rtx)
4744 {
4745 *new_direct_jump_p = 1;
4746 update_cfg_for_uncondjump (undobuf.other_insn);
4747 }
4748
4749 combine_successes++;
4750 undo_commit ();
4751
4752 rtx_insn *ret = newi2pat ? i2 : i3;
4753 if (added_links_insn && DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (ret))
4754 ret = added_links_insn;
4755 if (added_notes_insn && DF_INSN_LUID (added_notes_insn) < DF_INSN_LUID (ret))
4756 ret = added_notes_insn;
4757
4758 return ret;
4759 }
4760 \f
4761 /* Get a marker for undoing to the current state. */
4762
4763 static void *
4764 get_undo_marker (void)
4765 {
4766 return undobuf.undos;
4767 }
4768
4769 /* Undo the modifications up to the marker. */
4770
4771 static void
4772 undo_to_marker (void *marker)
4773 {
4774 struct undo *undo, *next;
4775
4776 for (undo = undobuf.undos; undo != marker; undo = next)
4777 {
4778 gcc_assert (undo);
4779
4780 next = undo->next;
4781 switch (undo->kind)
4782 {
4783 case UNDO_RTX:
4784 *undo->where.r = undo->old_contents.r;
4785 break;
4786 case UNDO_INT:
4787 *undo->where.i = undo->old_contents.i;
4788 break;
4789 case UNDO_MODE:
4790 adjust_reg_mode (regno_reg_rtx[undo->where.regno],
4791 undo->old_contents.m);
4792 break;
4793 case UNDO_LINKS:
4794 *undo->where.l = undo->old_contents.l;
4795 break;
4796 default:
4797 gcc_unreachable ();
4798 }
4799
4800 undo->next = undobuf.frees;
4801 undobuf.frees = undo;
4802 }
4803
4804 undobuf.undos = (struct undo *) marker;
4805 }
4806
4807 /* Undo all the modifications recorded in undobuf. */
4808
4809 static void
4810 undo_all (void)
4811 {
4812 undo_to_marker (0);
4813 }
4814
4815 /* We've committed to accepting the changes we made. Move all
4816 of the undos to the free list. */
4817
4818 static void
4819 undo_commit (void)
4820 {
4821 struct undo *undo, *next;
4822
4823 for (undo = undobuf.undos; undo; undo = next)
4824 {
4825 next = undo->next;
4826 undo->next = undobuf.frees;
4827 undobuf.frees = undo;
4828 }
4829 undobuf.undos = 0;
4830 }
4831 \f
4832 /* Find the innermost point within the rtx at LOC, possibly LOC itself,
4833 where we have an arithmetic expression and return that point. LOC will
4834 be inside INSN.
4835
4836 try_combine will call this function to see if an insn can be split into
4837 two insns. */
4838
4839 static rtx *
4840 find_split_point (rtx *loc, rtx_insn *insn, bool set_src)
4841 {
4842 rtx x = *loc;
4843 enum rtx_code code = GET_CODE (x);
4844 rtx *split;
4845 unsigned HOST_WIDE_INT len = 0;
4846 HOST_WIDE_INT pos = 0;
4847 int unsignedp = 0;
4848 rtx inner = NULL_RTX;
4849 scalar_int_mode mode, inner_mode;
4850
4851 /* First special-case some codes. */
4852 switch (code)
4853 {
4854 case SUBREG:
4855 #ifdef INSN_SCHEDULING
4856 /* If we are making a paradoxical SUBREG invalid, it becomes a split
4857 point. */
4858 if (MEM_P (SUBREG_REG (x)))
4859 return loc;
4860 #endif
4861 return find_split_point (&SUBREG_REG (x), insn, false);
4862
4863 case MEM:
4864 /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it
4865 using LO_SUM and HIGH. */
4866 if (HAVE_lo_sum && (GET_CODE (XEXP (x, 0)) == CONST
4867 || GET_CODE (XEXP (x, 0)) == SYMBOL_REF))
4868 {
4869 machine_mode address_mode = get_address_mode (x);
4870
4871 SUBST (XEXP (x, 0),
4872 gen_rtx_LO_SUM (address_mode,
4873 gen_rtx_HIGH (address_mode, XEXP (x, 0)),
4874 XEXP (x, 0)));
4875 return &XEXP (XEXP (x, 0), 0);
4876 }
4877
4878 /* If we have a PLUS whose second operand is a constant and the
4879 address is not valid, perhaps we can split it up using
4880 the machine-specific way to split large constants. We use
4881 the first pseudo-reg (one of the virtual regs) as a placeholder;
4882 it will not remain in the result. */
4883 if (GET_CODE (XEXP (x, 0)) == PLUS
4884 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
4885 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4886 MEM_ADDR_SPACE (x)))
4887 {
4888 rtx reg = regno_reg_rtx[FIRST_PSEUDO_REGISTER];
4889 rtx_insn *seq = combine_split_insns (gen_rtx_SET (reg, XEXP (x, 0)),
4890 subst_insn);
4891
4892 /* This should have produced two insns, each of which sets our
4893 placeholder. If the source of the second is a valid address,
4894 we can put both sources together and make a split point
4895 in the middle. */
4896
4897 if (seq
4898 && NEXT_INSN (seq) != NULL_RTX
4899 && NEXT_INSN (NEXT_INSN (seq)) == NULL_RTX
4900 && NONJUMP_INSN_P (seq)
4901 && GET_CODE (PATTERN (seq)) == SET
4902 && SET_DEST (PATTERN (seq)) == reg
4903 && ! reg_mentioned_p (reg,
4904 SET_SRC (PATTERN (seq)))
4905 && NONJUMP_INSN_P (NEXT_INSN (seq))
4906 && GET_CODE (PATTERN (NEXT_INSN (seq))) == SET
4907 && SET_DEST (PATTERN (NEXT_INSN (seq))) == reg
4908 && memory_address_addr_space_p
4909 (GET_MODE (x), SET_SRC (PATTERN (NEXT_INSN (seq))),
4910 MEM_ADDR_SPACE (x)))
4911 {
4912 rtx src1 = SET_SRC (PATTERN (seq));
4913 rtx src2 = SET_SRC (PATTERN (NEXT_INSN (seq)));
4914
4915 /* Replace the placeholder in SRC2 with SRC1. If we can
4916 find where in SRC2 it was placed, that can become our
4917 split point and we can replace this address with SRC2.
4918 Just try two obvious places. */
4919
4920 src2 = replace_rtx (src2, reg, src1);
4921 split = 0;
4922 if (XEXP (src2, 0) == src1)
4923 split = &XEXP (src2, 0);
4924 else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2, 0)))[0] == 'e'
4925 && XEXP (XEXP (src2, 0), 0) == src1)
4926 split = &XEXP (XEXP (src2, 0), 0);
4927
4928 if (split)
4929 {
4930 SUBST (XEXP (x, 0), src2);
4931 return split;
4932 }
4933 }
4934
4935 /* If that didn't work and we have a nested plus, like:
4936 ((REG1 * CONST1) + REG2) + CONST2 and (REG1 + REG2) + CONST2
4937 is valid address, try to split (REG1 * CONST1). */
4938 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS
4939 && !OBJECT_P (XEXP (XEXP (XEXP (x, 0), 0), 0))
4940 && OBJECT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
4941 && ! (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SUBREG
4942 && OBJECT_P (SUBREG_REG (XEXP (XEXP (XEXP (x, 0),
4943 0), 0)))))
4944 {
4945 rtx tem = XEXP (XEXP (XEXP (x, 0), 0), 0);
4946 XEXP (XEXP (XEXP (x, 0), 0), 0) = reg;
4947 if (memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4948 MEM_ADDR_SPACE (x)))
4949 {
4950 XEXP (XEXP (XEXP (x, 0), 0), 0) = tem;
4951 return &XEXP (XEXP (XEXP (x, 0), 0), 0);
4952 }
4953 XEXP (XEXP (XEXP (x, 0), 0), 0) = tem;
4954 }
4955 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS
4956 && OBJECT_P (XEXP (XEXP (XEXP (x, 0), 0), 0))
4957 && !OBJECT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
4958 && ! (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == SUBREG
4959 && OBJECT_P (SUBREG_REG (XEXP (XEXP (XEXP (x, 0),
4960 0), 1)))))
4961 {
4962 rtx tem = XEXP (XEXP (XEXP (x, 0), 0), 1);
4963 XEXP (XEXP (XEXP (x, 0), 0), 1) = reg;
4964 if (memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4965 MEM_ADDR_SPACE (x)))
4966 {
4967 XEXP (XEXP (XEXP (x, 0), 0), 1) = tem;
4968 return &XEXP (XEXP (XEXP (x, 0), 0), 1);
4969 }
4970 XEXP (XEXP (XEXP (x, 0), 0), 1) = tem;
4971 }
4972
4973 /* If that didn't work, perhaps the first operand is complex and
4974 needs to be computed separately, so make a split point there.
4975 This will occur on machines that just support REG + CONST
4976 and have a constant moved through some previous computation. */
4977 if (!OBJECT_P (XEXP (XEXP (x, 0), 0))
4978 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
4979 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
4980 return &XEXP (XEXP (x, 0), 0);
4981 }
4982
4983 /* If we have a PLUS whose first operand is complex, try computing it
4984 separately by making a split there. */
4985 if (GET_CODE (XEXP (x, 0)) == PLUS
4986 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4987 MEM_ADDR_SPACE (x))
4988 && ! OBJECT_P (XEXP (XEXP (x, 0), 0))
4989 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
4990 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
4991 return &XEXP (XEXP (x, 0), 0);
4992 break;
4993
4994 case SET:
4995 /* See if we can split SET_SRC as it stands. */
4996 split = find_split_point (&SET_SRC (x), insn, true);
4997 if (split && split != &SET_SRC (x))
4998 return split;
4999
5000 /* See if we can split SET_DEST as it stands. */
5001 split = find_split_point (&SET_DEST (x), insn, false);
5002 if (split && split != &SET_DEST (x))
5003 return split;
5004
5005 /* See if this is a bitfield assignment with everything constant. If
5006 so, this is an IOR of an AND, so split it into that. */
5007 if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
5008 && is_a <scalar_int_mode> (GET_MODE (XEXP (SET_DEST (x), 0)),
5009 &inner_mode)
5010 && HWI_COMPUTABLE_MODE_P (inner_mode)
5011 && CONST_INT_P (XEXP (SET_DEST (x), 1))
5012 && CONST_INT_P (XEXP (SET_DEST (x), 2))
5013 && CONST_INT_P (SET_SRC (x))
5014 && ((INTVAL (XEXP (SET_DEST (x), 1))
5015 + INTVAL (XEXP (SET_DEST (x), 2)))
5016 <= GET_MODE_PRECISION (inner_mode))
5017 && ! side_effects_p (XEXP (SET_DEST (x), 0)))
5018 {
5019 HOST_WIDE_INT pos = INTVAL (XEXP (SET_DEST (x), 2));
5020 unsigned HOST_WIDE_INT len = INTVAL (XEXP (SET_DEST (x), 1));
5021 rtx dest = XEXP (SET_DEST (x), 0);
5022 unsigned HOST_WIDE_INT mask = (HOST_WIDE_INT_1U << len) - 1;
5023 unsigned HOST_WIDE_INT src = INTVAL (SET_SRC (x)) & mask;
5024 rtx or_mask;
5025
5026 if (BITS_BIG_ENDIAN)
5027 pos = GET_MODE_PRECISION (inner_mode) - len - pos;
5028
5029 or_mask = gen_int_mode (src << pos, inner_mode);
5030 if (src == mask)
5031 SUBST (SET_SRC (x),
5032 simplify_gen_binary (IOR, inner_mode, dest, or_mask));
5033 else
5034 {
5035 rtx negmask = gen_int_mode (~(mask << pos), inner_mode);
5036 SUBST (SET_SRC (x),
5037 simplify_gen_binary (IOR, inner_mode,
5038 simplify_gen_binary (AND, inner_mode,
5039 dest, negmask),
5040 or_mask));
5041 }
5042
5043 SUBST (SET_DEST (x), dest);
5044
5045 split = find_split_point (&SET_SRC (x), insn, true);
5046 if (split && split != &SET_SRC (x))
5047 return split;
5048 }
5049
5050 /* Otherwise, see if this is an operation that we can split into two.
5051 If so, try to split that. */
5052 code = GET_CODE (SET_SRC (x));
5053
5054 switch (code)
5055 {
5056 case AND:
5057 /* If we are AND'ing with a large constant that is only a single
5058 bit and the result is only being used in a context where we
5059 need to know if it is zero or nonzero, replace it with a bit
5060 extraction. This will avoid the large constant, which might
5061 have taken more than one insn to make. If the constant were
5062 not a valid argument to the AND but took only one insn to make,
5063 this is no worse, but if it took more than one insn, it will
5064 be better. */
5065
5066 if (CONST_INT_P (XEXP (SET_SRC (x), 1))
5067 && REG_P (XEXP (SET_SRC (x), 0))
5068 && (pos = exact_log2 (UINTVAL (XEXP (SET_SRC (x), 1)))) >= 7
5069 && REG_P (SET_DEST (x))
5070 && (split = find_single_use (SET_DEST (x), insn, NULL)) != 0
5071 && (GET_CODE (*split) == EQ || GET_CODE (*split) == NE)
5072 && XEXP (*split, 0) == SET_DEST (x)
5073 && XEXP (*split, 1) == const0_rtx)
5074 {
5075 rtx extraction = make_extraction (GET_MODE (SET_DEST (x)),
5076 XEXP (SET_SRC (x), 0),
5077 pos, NULL_RTX, 1, 1, 0, 0);
5078 if (extraction != 0)
5079 {
5080 SUBST (SET_SRC (x), extraction);
5081 return find_split_point (loc, insn, false);
5082 }
5083 }
5084 break;
5085
5086 case NE:
5087 /* If STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X
5088 is known to be on, this can be converted into a NEG of a shift. */
5089 if (STORE_FLAG_VALUE == -1 && XEXP (SET_SRC (x), 1) == const0_rtx
5090 && GET_MODE (SET_SRC (x)) == GET_MODE (XEXP (SET_SRC (x), 0))
5091 && ((pos = exact_log2 (nonzero_bits (XEXP (SET_SRC (x), 0),
5092 GET_MODE (XEXP (SET_SRC (x),
5093 0))))) >= 1))
5094 {
5095 machine_mode mode = GET_MODE (XEXP (SET_SRC (x), 0));
5096 rtx pos_rtx = gen_int_shift_amount (mode, pos);
5097 SUBST (SET_SRC (x),
5098 gen_rtx_NEG (mode,
5099 gen_rtx_LSHIFTRT (mode,
5100 XEXP (SET_SRC (x), 0),
5101 pos_rtx)));
5102
5103 split = find_split_point (&SET_SRC (x), insn, true);
5104 if (split && split != &SET_SRC (x))
5105 return split;
5106 }
5107 break;
5108
5109 case SIGN_EXTEND:
5110 inner = XEXP (SET_SRC (x), 0);
5111
5112 /* We can't optimize if either mode is a partial integer
5113 mode as we don't know how many bits are significant
5114 in those modes. */
5115 if (!is_int_mode (GET_MODE (inner), &inner_mode)
5116 || GET_MODE_CLASS (GET_MODE (SET_SRC (x))) == MODE_PARTIAL_INT)
5117 break;
5118
5119 pos = 0;
5120 len = GET_MODE_PRECISION (inner_mode);
5121 unsignedp = 0;
5122 break;
5123
5124 case SIGN_EXTRACT:
5125 case ZERO_EXTRACT:
5126 if (is_a <scalar_int_mode> (GET_MODE (XEXP (SET_SRC (x), 0)),
5127 &inner_mode)
5128 && CONST_INT_P (XEXP (SET_SRC (x), 1))
5129 && CONST_INT_P (XEXP (SET_SRC (x), 2)))
5130 {
5131 inner = XEXP (SET_SRC (x), 0);
5132 len = INTVAL (XEXP (SET_SRC (x), 1));
5133 pos = INTVAL (XEXP (SET_SRC (x), 2));
5134
5135 if (BITS_BIG_ENDIAN)
5136 pos = GET_MODE_PRECISION (inner_mode) - len - pos;
5137 unsignedp = (code == ZERO_EXTRACT);
5138 }
5139 break;
5140
5141 default:
5142 break;
5143 }
5144
5145 if (len
5146 && known_subrange_p (pos, len,
5147 0, GET_MODE_PRECISION (GET_MODE (inner)))
5148 && is_a <scalar_int_mode> (GET_MODE (SET_SRC (x)), &mode))
5149 {
5150 /* For unsigned, we have a choice of a shift followed by an
5151 AND or two shifts. Use two shifts for field sizes where the
5152 constant might be too large. We assume here that we can
5153 always at least get 8-bit constants in an AND insn, which is
5154 true for every current RISC. */
5155
5156 if (unsignedp && len <= 8)
5157 {
5158 unsigned HOST_WIDE_INT mask
5159 = (HOST_WIDE_INT_1U << len) - 1;
5160 rtx pos_rtx = gen_int_shift_amount (mode, pos);
5161 SUBST (SET_SRC (x),
5162 gen_rtx_AND (mode,
5163 gen_rtx_LSHIFTRT
5164 (mode, gen_lowpart (mode, inner), pos_rtx),
5165 gen_int_mode (mask, mode)));
5166
5167 split = find_split_point (&SET_SRC (x), insn, true);
5168 if (split && split != &SET_SRC (x))
5169 return split;
5170 }
5171 else
5172 {
5173 int left_bits = GET_MODE_PRECISION (mode) - len - pos;
5174 int right_bits = GET_MODE_PRECISION (mode) - len;
5175 SUBST (SET_SRC (x),
5176 gen_rtx_fmt_ee
5177 (unsignedp ? LSHIFTRT : ASHIFTRT, mode,
5178 gen_rtx_ASHIFT (mode,
5179 gen_lowpart (mode, inner),
5180 gen_int_shift_amount (mode, left_bits)),
5181 gen_int_shift_amount (mode, right_bits)));
5182
5183 split = find_split_point (&SET_SRC (x), insn, true);
5184 if (split && split != &SET_SRC (x))
5185 return split;
5186 }
5187 }
5188
5189 /* See if this is a simple operation with a constant as the second
5190 operand. It might be that this constant is out of range and hence
5191 could be used as a split point. */
5192 if (BINARY_P (SET_SRC (x))
5193 && CONSTANT_P (XEXP (SET_SRC (x), 1))
5194 && (OBJECT_P (XEXP (SET_SRC (x), 0))
5195 || (GET_CODE (XEXP (SET_SRC (x), 0)) == SUBREG
5196 && OBJECT_P (SUBREG_REG (XEXP (SET_SRC (x), 0))))))
5197 return &XEXP (SET_SRC (x), 1);
5198
5199 /* Finally, see if this is a simple operation with its first operand
5200 not in a register. The operation might require this operand in a
5201 register, so return it as a split point. We can always do this
5202 because if the first operand were another operation, we would have
5203 already found it as a split point. */
5204 if ((BINARY_P (SET_SRC (x)) || UNARY_P (SET_SRC (x)))
5205 && ! register_operand (XEXP (SET_SRC (x), 0), VOIDmode))
5206 return &XEXP (SET_SRC (x), 0);
5207
5208 return 0;
5209
5210 case AND:
5211 case IOR:
5212 /* We write NOR as (and (not A) (not B)), but if we don't have a NOR,
5213 it is better to write this as (not (ior A B)) so we can split it.
5214 Similarly for IOR. */
5215 if (GET_CODE (XEXP (x, 0)) == NOT && GET_CODE (XEXP (x, 1)) == NOT)
5216 {
5217 SUBST (*loc,
5218 gen_rtx_NOT (GET_MODE (x),
5219 gen_rtx_fmt_ee (code == IOR ? AND : IOR,
5220 GET_MODE (x),
5221 XEXP (XEXP (x, 0), 0),
5222 XEXP (XEXP (x, 1), 0))));
5223 return find_split_point (loc, insn, set_src);
5224 }
5225
5226 /* Many RISC machines have a large set of logical insns. If the
5227 second operand is a NOT, put it first so we will try to split the
5228 other operand first. */
5229 if (GET_CODE (XEXP (x, 1)) == NOT)
5230 {
5231 rtx tem = XEXP (x, 0);
5232 SUBST (XEXP (x, 0), XEXP (x, 1));
5233 SUBST (XEXP (x, 1), tem);
5234 }
5235 break;
5236
5237 case PLUS:
5238 case MINUS:
5239 /* Canonicalization can produce (minus A (mult B C)), where C is a
5240 constant. It may be better to try splitting (plus (mult B -C) A)
5241 instead if this isn't a multiply by a power of two. */
5242 if (set_src && code == MINUS && GET_CODE (XEXP (x, 1)) == MULT
5243 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
5244 && !pow2p_hwi (INTVAL (XEXP (XEXP (x, 1), 1))))
5245 {
5246 machine_mode mode = GET_MODE (x);
5247 unsigned HOST_WIDE_INT this_int = INTVAL (XEXP (XEXP (x, 1), 1));
5248 HOST_WIDE_INT other_int = trunc_int_for_mode (-this_int, mode);
5249 SUBST (*loc, gen_rtx_PLUS (mode,
5250 gen_rtx_MULT (mode,
5251 XEXP (XEXP (x, 1), 0),
5252 gen_int_mode (other_int,
5253 mode)),
5254 XEXP (x, 0)));
5255 return find_split_point (loc, insn, set_src);
5256 }
5257
5258 /* Split at a multiply-accumulate instruction. However if this is
5259 the SET_SRC, we likely do not have such an instruction and it's
5260 worthless to try this split. */
5261 if (!set_src
5262 && (GET_CODE (XEXP (x, 0)) == MULT
5263 || (GET_CODE (XEXP (x, 0)) == ASHIFT
5264 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
5265 return loc;
5266
5267 default:
5268 break;
5269 }
5270
5271 /* Otherwise, select our actions depending on our rtx class. */
5272 switch (GET_RTX_CLASS (code))
5273 {
5274 case RTX_BITFIELD_OPS: /* This is ZERO_EXTRACT and SIGN_EXTRACT. */
5275 case RTX_TERNARY:
5276 split = find_split_point (&XEXP (x, 2), insn, false);
5277 if (split)
5278 return split;
5279 /* fall through */
5280 case RTX_BIN_ARITH:
5281 case RTX_COMM_ARITH:
5282 case RTX_COMPARE:
5283 case RTX_COMM_COMPARE:
5284 split = find_split_point (&XEXP (x, 1), insn, false);
5285 if (split)
5286 return split;
5287 /* fall through */
5288 case RTX_UNARY:
5289 /* Some machines have (and (shift ...) ...) insns. If X is not
5290 an AND, but XEXP (X, 0) is, use it as our split point. */
5291 if (GET_CODE (x) != AND && GET_CODE (XEXP (x, 0)) == AND)
5292 return &XEXP (x, 0);
5293
5294 split = find_split_point (&XEXP (x, 0), insn, false);
5295 if (split)
5296 return split;
5297 return loc;
5298
5299 default:
5300 /* Otherwise, we don't have a split point. */
5301 return 0;
5302 }
5303 }
5304 \f
5305 /* Throughout X, replace FROM with TO, and return the result.
5306 The result is TO if X is FROM;
5307 otherwise the result is X, but its contents may have been modified.
5308 If they were modified, a record was made in undobuf so that
5309 undo_all will (among other things) return X to its original state.
5310
5311 If the number of changes necessary is too much to record to undo,
5312 the excess changes are not made, so the result is invalid.
5313 The changes already made can still be undone.
5314 undobuf.num_undo is incremented for such changes, so by testing that
5315 the caller can tell whether the result is valid.
5316
5317 `n_occurrences' is incremented each time FROM is replaced.
5318
5319 IN_DEST is nonzero if we are processing the SET_DEST of a SET.
5320
5321 IN_COND is nonzero if we are at the top level of a condition.
5322
5323 UNIQUE_COPY is nonzero if each substitution must be unique. We do this
5324 by copying if `n_occurrences' is nonzero. */
5325
5326 static rtx
5327 subst (rtx x, rtx from, rtx to, int in_dest, int in_cond, int unique_copy)
5328 {
5329 enum rtx_code code = GET_CODE (x);
5330 machine_mode op0_mode = VOIDmode;
5331 const char *fmt;
5332 int len, i;
5333 rtx new_rtx;
5334
5335 /* Two expressions are equal if they are identical copies of a shared
5336 RTX or if they are both registers with the same register number
5337 and mode. */
5338
5339 #define COMBINE_RTX_EQUAL_P(X,Y) \
5340 ((X) == (Y) \
5341 || (REG_P (X) && REG_P (Y) \
5342 && REGNO (X) == REGNO (Y) && GET_MODE (X) == GET_MODE (Y)))
5343
5344 /* Do not substitute into clobbers of regs -- this will never result in
5345 valid RTL. */
5346 if (GET_CODE (x) == CLOBBER && REG_P (XEXP (x, 0)))
5347 return x;
5348
5349 if (! in_dest && COMBINE_RTX_EQUAL_P (x, from))
5350 {
5351 n_occurrences++;
5352 return (unique_copy && n_occurrences > 1 ? copy_rtx (to) : to);
5353 }
5354
5355 /* If X and FROM are the same register but different modes, they
5356 will not have been seen as equal above. However, the log links code
5357 will make a LOG_LINKS entry for that case. If we do nothing, we
5358 will try to rerecognize our original insn and, when it succeeds,
5359 we will delete the feeding insn, which is incorrect.
5360
5361 So force this insn not to match in this (rare) case. */
5362 if (! in_dest && code == REG && REG_P (from)
5363 && reg_overlap_mentioned_p (x, from))
5364 return gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
5365
5366 /* If this is an object, we are done unless it is a MEM or LO_SUM, both
5367 of which may contain things that can be combined. */
5368 if (code != MEM && code != LO_SUM && OBJECT_P (x))
5369 return x;
5370
5371 /* It is possible to have a subexpression appear twice in the insn.
5372 Suppose that FROM is a register that appears within TO.
5373 Then, after that subexpression has been scanned once by `subst',
5374 the second time it is scanned, TO may be found. If we were
5375 to scan TO here, we would find FROM within it and create a
5376 self-referent rtl structure which is completely wrong. */
5377 if (COMBINE_RTX_EQUAL_P (x, to))
5378 return to;
5379
5380 /* Parallel asm_operands need special attention because all of the
5381 inputs are shared across the arms. Furthermore, unsharing the
5382 rtl results in recognition failures. Failure to handle this case
5383 specially can result in circular rtl.
5384
5385 Solve this by doing a normal pass across the first entry of the
5386 parallel, and only processing the SET_DESTs of the subsequent
5387 entries. Ug. */
5388
5389 if (code == PARALLEL
5390 && GET_CODE (XVECEXP (x, 0, 0)) == SET
5391 && GET_CODE (SET_SRC (XVECEXP (x, 0, 0))) == ASM_OPERANDS)
5392 {
5393 new_rtx = subst (XVECEXP (x, 0, 0), from, to, 0, 0, unique_copy);
5394
5395 /* If this substitution failed, this whole thing fails. */
5396 if (GET_CODE (new_rtx) == CLOBBER
5397 && XEXP (new_rtx, 0) == const0_rtx)
5398 return new_rtx;
5399
5400 SUBST (XVECEXP (x, 0, 0), new_rtx);
5401
5402 for (i = XVECLEN (x, 0) - 1; i >= 1; i--)
5403 {
5404 rtx dest = SET_DEST (XVECEXP (x, 0, i));
5405
5406 if (!REG_P (dest) && GET_CODE (dest) != PC)
5407 {
5408 new_rtx = subst (dest, from, to, 0, 0, unique_copy);
5409
5410 /* If this substitution failed, this whole thing fails. */
5411 if (GET_CODE (new_rtx) == CLOBBER
5412 && XEXP (new_rtx, 0) == const0_rtx)
5413 return new_rtx;
5414
5415 SUBST (SET_DEST (XVECEXP (x, 0, i)), new_rtx);
5416 }
5417 }
5418 }
5419 else
5420 {
5421 len = GET_RTX_LENGTH (code);
5422 fmt = GET_RTX_FORMAT (code);
5423
5424 /* We don't need to process a SET_DEST that is a register or PC, so
5425 set up to skip this common case. All other cases where we want
5426 to suppress replacing something inside a SET_SRC are handled via
5427 the IN_DEST operand. */
5428 if (code == SET
5429 && (REG_P (SET_DEST (x))
5430 || GET_CODE (SET_DEST (x)) == PC))
5431 fmt = "ie";
5432
5433 /* Trying to simplify the operands of a widening MULT is not likely
5434 to create RTL matching a machine insn. */
5435 if (code == MULT
5436 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
5437 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
5438 && (GET_CODE (XEXP (x, 1)) == ZERO_EXTEND
5439 || GET_CODE (XEXP (x, 1)) == SIGN_EXTEND)
5440 && REG_P (XEXP (XEXP (x, 0), 0))
5441 && REG_P (XEXP (XEXP (x, 1), 0))
5442 && from == to)
5443 return x;
5444
5445
5446 /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a
5447 constant. */
5448 if (fmt[0] == 'e')
5449 op0_mode = GET_MODE (XEXP (x, 0));
5450
5451 for (i = 0; i < len; i++)
5452 {
5453 if (fmt[i] == 'E')
5454 {
5455 int j;
5456 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5457 {
5458 if (COMBINE_RTX_EQUAL_P (XVECEXP (x, i, j), from))
5459 {
5460 new_rtx = (unique_copy && n_occurrences
5461 ? copy_rtx (to) : to);
5462 n_occurrences++;
5463 }
5464 else
5465 {
5466 new_rtx = subst (XVECEXP (x, i, j), from, to, 0, 0,
5467 unique_copy);
5468
5469 /* If this substitution failed, this whole thing
5470 fails. */
5471 if (GET_CODE (new_rtx) == CLOBBER
5472 && XEXP (new_rtx, 0) == const0_rtx)
5473 return new_rtx;
5474 }
5475
5476 SUBST (XVECEXP (x, i, j), new_rtx);
5477 }
5478 }
5479 else if (fmt[i] == 'e')
5480 {
5481 /* If this is a register being set, ignore it. */
5482 new_rtx = XEXP (x, i);
5483 if (in_dest
5484 && i == 0
5485 && (((code == SUBREG || code == ZERO_EXTRACT)
5486 && REG_P (new_rtx))
5487 || code == STRICT_LOW_PART))
5488 ;
5489
5490 else if (COMBINE_RTX_EQUAL_P (XEXP (x, i), from))
5491 {
5492 /* In general, don't install a subreg involving two
5493 modes not tieable. It can worsen register
5494 allocation, and can even make invalid reload
5495 insns, since the reg inside may need to be copied
5496 from in the outside mode, and that may be invalid
5497 if it is an fp reg copied in integer mode.
5498
5499 We allow an exception to this: It is valid if
5500 it is inside another SUBREG and the mode of that
5501 SUBREG and the mode of the inside of TO is
5502 tieable. */
5503
5504 if (GET_CODE (to) == SUBREG
5505 && !targetm.modes_tieable_p (GET_MODE (to),
5506 GET_MODE (SUBREG_REG (to)))
5507 && ! (code == SUBREG
5508 && (targetm.modes_tieable_p
5509 (GET_MODE (x), GET_MODE (SUBREG_REG (to))))))
5510 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5511
5512 if (code == SUBREG
5513 && REG_P (to)
5514 && REGNO (to) < FIRST_PSEUDO_REGISTER
5515 && simplify_subreg_regno (REGNO (to), GET_MODE (to),
5516 SUBREG_BYTE (x),
5517 GET_MODE (x)) < 0)
5518 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5519
5520 new_rtx = (unique_copy && n_occurrences ? copy_rtx (to) : to);
5521 n_occurrences++;
5522 }
5523 else
5524 /* If we are in a SET_DEST, suppress most cases unless we
5525 have gone inside a MEM, in which case we want to
5526 simplify the address. We assume here that things that
5527 are actually part of the destination have their inner
5528 parts in the first expression. This is true for SUBREG,
5529 STRICT_LOW_PART, and ZERO_EXTRACT, which are the only
5530 things aside from REG and MEM that should appear in a
5531 SET_DEST. */
5532 new_rtx = subst (XEXP (x, i), from, to,
5533 (((in_dest
5534 && (code == SUBREG || code == STRICT_LOW_PART
5535 || code == ZERO_EXTRACT))
5536 || code == SET)
5537 && i == 0),
5538 code == IF_THEN_ELSE && i == 0,
5539 unique_copy);
5540
5541 /* If we found that we will have to reject this combination,
5542 indicate that by returning the CLOBBER ourselves, rather than
5543 an expression containing it. This will speed things up as
5544 well as prevent accidents where two CLOBBERs are considered
5545 to be equal, thus producing an incorrect simplification. */
5546
5547 if (GET_CODE (new_rtx) == CLOBBER && XEXP (new_rtx, 0) == const0_rtx)
5548 return new_rtx;
5549
5550 if (GET_CODE (x) == SUBREG && CONST_SCALAR_INT_P (new_rtx))
5551 {
5552 machine_mode mode = GET_MODE (x);
5553
5554 x = simplify_subreg (GET_MODE (x), new_rtx,
5555 GET_MODE (SUBREG_REG (x)),
5556 SUBREG_BYTE (x));
5557 if (! x)
5558 x = gen_rtx_CLOBBER (mode, const0_rtx);
5559 }
5560 else if (CONST_SCALAR_INT_P (new_rtx)
5561 && (GET_CODE (x) == ZERO_EXTEND
5562 || GET_CODE (x) == SIGN_EXTEND
5563 || GET_CODE (x) == FLOAT
5564 || GET_CODE (x) == UNSIGNED_FLOAT))
5565 {
5566 x = simplify_unary_operation (GET_CODE (x), GET_MODE (x),
5567 new_rtx,
5568 GET_MODE (XEXP (x, 0)));
5569 if (!x)
5570 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5571 }
5572 /* CONST_INTs shouldn't be substituted into PRE_DEC, PRE_MODIFY
5573 etc. arguments, otherwise we can ICE before trying to recog
5574 it. See PR104446. */
5575 else if (CONST_SCALAR_INT_P (new_rtx)
5576 && GET_RTX_CLASS (GET_CODE (x)) == RTX_AUTOINC)
5577 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5578 else
5579 SUBST (XEXP (x, i), new_rtx);
5580 }
5581 }
5582 }
5583
5584 /* Check if we are loading something from the constant pool via float
5585 extension; in this case we would undo compress_float_constant
5586 optimization and degenerate constant load to an immediate value. */
5587 if (GET_CODE (x) == FLOAT_EXTEND
5588 && MEM_P (XEXP (x, 0))
5589 && MEM_READONLY_P (XEXP (x, 0)))
5590 {
5591 rtx tmp = avoid_constant_pool_reference (x);
5592 if (x != tmp)
5593 return x;
5594 }
5595
5596 /* Try to simplify X. If the simplification changed the code, it is likely
5597 that further simplification will help, so loop, but limit the number
5598 of repetitions that will be performed. */
5599
5600 for (i = 0; i < 4; i++)
5601 {
5602 /* If X is sufficiently simple, don't bother trying to do anything
5603 with it. */
5604 if (code != CONST_INT && code != REG && code != CLOBBER)
5605 x = combine_simplify_rtx (x, op0_mode, in_dest, in_cond);
5606
5607 if (GET_CODE (x) == code)
5608 break;
5609
5610 code = GET_CODE (x);
5611
5612 /* We no longer know the original mode of operand 0 since we
5613 have changed the form of X) */
5614 op0_mode = VOIDmode;
5615 }
5616
5617 return x;
5618 }
5619 \f
5620 /* If X is a commutative operation whose operands are not in the canonical
5621 order, use substitutions to swap them. */
5622
5623 static void
5624 maybe_swap_commutative_operands (rtx x)
5625 {
5626 if (COMMUTATIVE_ARITH_P (x)
5627 && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5628 {
5629 rtx temp = XEXP (x, 0);
5630 SUBST (XEXP (x, 0), XEXP (x, 1));
5631 SUBST (XEXP (x, 1), temp);
5632 }
5633 }
5634
5635 /* Simplify X, a piece of RTL. We just operate on the expression at the
5636 outer level; call `subst' to simplify recursively. Return the new
5637 expression.
5638
5639 OP0_MODE is the original mode of XEXP (x, 0). IN_DEST is nonzero
5640 if we are inside a SET_DEST. IN_COND is nonzero if we are at the top level
5641 of a condition. */
5642
5643 static rtx
5644 combine_simplify_rtx (rtx x, machine_mode op0_mode, int in_dest,
5645 int in_cond)
5646 {
5647 enum rtx_code code = GET_CODE (x);
5648 machine_mode mode = GET_MODE (x);
5649 scalar_int_mode int_mode;
5650 rtx temp;
5651 int i;
5652
5653 /* If this is a commutative operation, put a constant last and a complex
5654 expression first. We don't need to do this for comparisons here. */
5655 maybe_swap_commutative_operands (x);
5656
5657 /* Try to fold this expression in case we have constants that weren't
5658 present before. */
5659 temp = 0;
5660 switch (GET_RTX_CLASS (code))
5661 {
5662 case RTX_UNARY:
5663 if (op0_mode == VOIDmode)
5664 op0_mode = GET_MODE (XEXP (x, 0));
5665 temp = simplify_unary_operation (code, mode, XEXP (x, 0), op0_mode);
5666 break;
5667 case RTX_COMPARE:
5668 case RTX_COMM_COMPARE:
5669 {
5670 machine_mode cmp_mode = GET_MODE (XEXP (x, 0));
5671 if (cmp_mode == VOIDmode)
5672 {
5673 cmp_mode = GET_MODE (XEXP (x, 1));
5674 if (cmp_mode == VOIDmode)
5675 cmp_mode = op0_mode;
5676 }
5677 temp = simplify_relational_operation (code, mode, cmp_mode,
5678 XEXP (x, 0), XEXP (x, 1));
5679 }
5680 break;
5681 case RTX_COMM_ARITH:
5682 case RTX_BIN_ARITH:
5683 temp = simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5684 break;
5685 case RTX_BITFIELD_OPS:
5686 case RTX_TERNARY:
5687 temp = simplify_ternary_operation (code, mode, op0_mode, XEXP (x, 0),
5688 XEXP (x, 1), XEXP (x, 2));
5689 break;
5690 default:
5691 break;
5692 }
5693
5694 if (temp)
5695 {
5696 x = temp;
5697 code = GET_CODE (temp);
5698 op0_mode = VOIDmode;
5699 mode = GET_MODE (temp);
5700 }
5701
5702 /* If this is a simple operation applied to an IF_THEN_ELSE, try
5703 applying it to the arms of the IF_THEN_ELSE. This often simplifies
5704 things. Check for cases where both arms are testing the same
5705 condition.
5706
5707 Don't do anything if all operands are very simple. */
5708
5709 if ((BINARY_P (x)
5710 && ((!OBJECT_P (XEXP (x, 0))
5711 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5712 && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))
5713 || (!OBJECT_P (XEXP (x, 1))
5714 && ! (GET_CODE (XEXP (x, 1)) == SUBREG
5715 && OBJECT_P (SUBREG_REG (XEXP (x, 1)))))))
5716 || (UNARY_P (x)
5717 && (!OBJECT_P (XEXP (x, 0))
5718 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5719 && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))))
5720 {
5721 rtx cond, true_rtx, false_rtx;
5722
5723 cond = if_then_else_cond (x, &true_rtx, &false_rtx);
5724 if (cond != 0
5725 /* If everything is a comparison, what we have is highly unlikely
5726 to be simpler, so don't use it. */
5727 && ! (COMPARISON_P (x)
5728 && (COMPARISON_P (true_rtx) || COMPARISON_P (false_rtx)))
5729 /* Similarly, if we end up with one of the expressions the same
5730 as the original, it is certainly not simpler. */
5731 && ! rtx_equal_p (x, true_rtx)
5732 && ! rtx_equal_p (x, false_rtx))
5733 {
5734 rtx cop1 = const0_rtx;
5735 enum rtx_code cond_code = simplify_comparison (NE, &cond, &cop1);
5736
5737 if (cond_code == NE && COMPARISON_P (cond))
5738 return x;
5739
5740 /* Simplify the alternative arms; this may collapse the true and
5741 false arms to store-flag values. Be careful to use copy_rtx
5742 here since true_rtx or false_rtx might share RTL with x as a
5743 result of the if_then_else_cond call above. */
5744 true_rtx = subst (copy_rtx (true_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5745 false_rtx = subst (copy_rtx (false_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5746
5747 /* If true_rtx and false_rtx are not general_operands, an if_then_else
5748 is unlikely to be simpler. */
5749 if (general_operand (true_rtx, VOIDmode)
5750 && general_operand (false_rtx, VOIDmode))
5751 {
5752 enum rtx_code reversed;
5753
5754 /* Restarting if we generate a store-flag expression will cause
5755 us to loop. Just drop through in this case. */
5756
5757 /* If the result values are STORE_FLAG_VALUE and zero, we can
5758 just make the comparison operation. */
5759 if (true_rtx == const_true_rtx && false_rtx == const0_rtx)
5760 x = simplify_gen_relational (cond_code, mode, VOIDmode,
5761 cond, cop1);
5762 else if (true_rtx == const0_rtx && false_rtx == const_true_rtx
5763 && ((reversed = reversed_comparison_code_parts
5764 (cond_code, cond, cop1, NULL))
5765 != UNKNOWN))
5766 x = simplify_gen_relational (reversed, mode, VOIDmode,
5767 cond, cop1);
5768
5769 /* Likewise, we can make the negate of a comparison operation
5770 if the result values are - STORE_FLAG_VALUE and zero. */
5771 else if (CONST_INT_P (true_rtx)
5772 && INTVAL (true_rtx) == - STORE_FLAG_VALUE
5773 && false_rtx == const0_rtx)
5774 x = simplify_gen_unary (NEG, mode,
5775 simplify_gen_relational (cond_code,
5776 mode, VOIDmode,
5777 cond, cop1),
5778 mode);
5779 else if (CONST_INT_P (false_rtx)
5780 && INTVAL (false_rtx) == - STORE_FLAG_VALUE
5781 && true_rtx == const0_rtx
5782 && ((reversed = reversed_comparison_code_parts
5783 (cond_code, cond, cop1, NULL))
5784 != UNKNOWN))
5785 x = simplify_gen_unary (NEG, mode,
5786 simplify_gen_relational (reversed,
5787 mode, VOIDmode,
5788 cond, cop1),
5789 mode);
5790
5791 code = GET_CODE (x);
5792 op0_mode = VOIDmode;
5793 }
5794 }
5795 }
5796
5797 /* First see if we can apply the inverse distributive law. */
5798 if (code == PLUS || code == MINUS
5799 || code == AND || code == IOR || code == XOR)
5800 {
5801 x = apply_distributive_law (x);
5802 code = GET_CODE (x);
5803 op0_mode = VOIDmode;
5804 }
5805
5806 /* If CODE is an associative operation not otherwise handled, see if we
5807 can associate some operands. This can win if they are constants or
5808 if they are logically related (i.e. (a & b) & a). */
5809 if ((code == PLUS || code == MINUS || code == MULT || code == DIV
5810 || code == AND || code == IOR || code == XOR
5811 || code == SMAX || code == SMIN || code == UMAX || code == UMIN)
5812 && ((INTEGRAL_MODE_P (mode) && code != DIV)
5813 || (flag_associative_math && FLOAT_MODE_P (mode))))
5814 {
5815 if (GET_CODE (XEXP (x, 0)) == code)
5816 {
5817 rtx other = XEXP (XEXP (x, 0), 0);
5818 rtx inner_op0 = XEXP (XEXP (x, 0), 1);
5819 rtx inner_op1 = XEXP (x, 1);
5820 rtx inner;
5821
5822 /* Make sure we pass the constant operand if any as the second
5823 one if this is a commutative operation. */
5824 if (CONSTANT_P (inner_op0) && COMMUTATIVE_ARITH_P (x))
5825 std::swap (inner_op0, inner_op1);
5826 inner = simplify_binary_operation (code == MINUS ? PLUS
5827 : code == DIV ? MULT
5828 : code,
5829 mode, inner_op0, inner_op1);
5830
5831 /* For commutative operations, try the other pair if that one
5832 didn't simplify. */
5833 if (inner == 0 && COMMUTATIVE_ARITH_P (x))
5834 {
5835 other = XEXP (XEXP (x, 0), 1);
5836 inner = simplify_binary_operation (code, mode,
5837 XEXP (XEXP (x, 0), 0),
5838 XEXP (x, 1));
5839 }
5840
5841 if (inner)
5842 return simplify_gen_binary (code, mode, other, inner);
5843 }
5844 }
5845
5846 /* A little bit of algebraic simplification here. */
5847 switch (code)
5848 {
5849 case MEM:
5850 /* Ensure that our address has any ASHIFTs converted to MULT in case
5851 address-recognizing predicates are called later. */
5852 temp = make_compound_operation (XEXP (x, 0), MEM);
5853 SUBST (XEXP (x, 0), temp);
5854 break;
5855
5856 case SUBREG:
5857 if (op0_mode == VOIDmode)
5858 op0_mode = GET_MODE (SUBREG_REG (x));
5859
5860 /* See if this can be moved to simplify_subreg. */
5861 if (CONSTANT_P (SUBREG_REG (x))
5862 && known_eq (subreg_lowpart_offset (mode, op0_mode), SUBREG_BYTE (x))
5863 /* Don't call gen_lowpart if the inner mode
5864 is VOIDmode and we cannot simplify it, as SUBREG without
5865 inner mode is invalid. */
5866 && (GET_MODE (SUBREG_REG (x)) != VOIDmode
5867 || gen_lowpart_common (mode, SUBREG_REG (x))))
5868 return gen_lowpart (mode, SUBREG_REG (x));
5869
5870 if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x))) == MODE_CC)
5871 break;
5872 {
5873 rtx temp;
5874 temp = simplify_subreg (mode, SUBREG_REG (x), op0_mode,
5875 SUBREG_BYTE (x));
5876 if (temp)
5877 return temp;
5878
5879 /* If op is known to have all lower bits zero, the result is zero. */
5880 scalar_int_mode int_mode, int_op0_mode;
5881 if (!in_dest
5882 && is_a <scalar_int_mode> (mode, &int_mode)
5883 && is_a <scalar_int_mode> (op0_mode, &int_op0_mode)
5884 && (GET_MODE_PRECISION (int_mode)
5885 < GET_MODE_PRECISION (int_op0_mode))
5886 && known_eq (subreg_lowpart_offset (int_mode, int_op0_mode),
5887 SUBREG_BYTE (x))
5888 && HWI_COMPUTABLE_MODE_P (int_op0_mode)
5889 && ((nonzero_bits (SUBREG_REG (x), int_op0_mode)
5890 & GET_MODE_MASK (int_mode)) == 0)
5891 && !side_effects_p (SUBREG_REG (x)))
5892 return CONST0_RTX (int_mode);
5893 }
5894
5895 /* Don't change the mode of the MEM if that would change the meaning
5896 of the address. */
5897 if (MEM_P (SUBREG_REG (x))
5898 && (MEM_VOLATILE_P (SUBREG_REG (x))
5899 || mode_dependent_address_p (XEXP (SUBREG_REG (x), 0),
5900 MEM_ADDR_SPACE (SUBREG_REG (x)))))
5901 return gen_rtx_CLOBBER (mode, const0_rtx);
5902
5903 /* Note that we cannot do any narrowing for non-constants since
5904 we might have been counting on using the fact that some bits were
5905 zero. We now do this in the SET. */
5906
5907 break;
5908
5909 case NEG:
5910 temp = expand_compound_operation (XEXP (x, 0));
5911
5912 /* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be
5913 replaced by (lshiftrt X C). This will convert
5914 (neg (sign_extract X 1 Y)) to (zero_extract X 1 Y). */
5915
5916 if (GET_CODE (temp) == ASHIFTRT
5917 && CONST_INT_P (XEXP (temp, 1))
5918 && INTVAL (XEXP (temp, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
5919 return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (temp, 0),
5920 INTVAL (XEXP (temp, 1)));
5921
5922 /* If X has only a single bit that might be nonzero, say, bit I, convert
5923 (neg X) to (ashiftrt (ashift X C-I) C-I) where C is the bitsize of
5924 MODE minus 1. This will convert (neg (zero_extract X 1 Y)) to
5925 (sign_extract X 1 Y). But only do this if TEMP isn't a register
5926 or a SUBREG of one since we'd be making the expression more
5927 complex if it was just a register. */
5928
5929 if (!REG_P (temp)
5930 && ! (GET_CODE (temp) == SUBREG
5931 && REG_P (SUBREG_REG (temp)))
5932 && is_a <scalar_int_mode> (mode, &int_mode)
5933 && (i = exact_log2 (nonzero_bits (temp, int_mode))) >= 0)
5934 {
5935 rtx temp1 = simplify_shift_const
5936 (NULL_RTX, ASHIFTRT, int_mode,
5937 simplify_shift_const (NULL_RTX, ASHIFT, int_mode, temp,
5938 GET_MODE_PRECISION (int_mode) - 1 - i),
5939 GET_MODE_PRECISION (int_mode) - 1 - i);
5940
5941 /* If all we did was surround TEMP with the two shifts, we
5942 haven't improved anything, so don't use it. Otherwise,
5943 we are better off with TEMP1. */
5944 if (GET_CODE (temp1) != ASHIFTRT
5945 || GET_CODE (XEXP (temp1, 0)) != ASHIFT
5946 || XEXP (XEXP (temp1, 0), 0) != temp)
5947 return temp1;
5948 }
5949 break;
5950
5951 case TRUNCATE:
5952 /* We can't handle truncation to a partial integer mode here
5953 because we don't know the real bitsize of the partial
5954 integer mode. */
5955 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
5956 break;
5957
5958 if (HWI_COMPUTABLE_MODE_P (mode))
5959 SUBST (XEXP (x, 0),
5960 force_to_mode (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
5961 GET_MODE_MASK (mode), 0));
5962
5963 /* We can truncate a constant value and return it. */
5964 {
5965 poly_int64 c;
5966 if (poly_int_rtx_p (XEXP (x, 0), &c))
5967 return gen_int_mode (c, mode);
5968 }
5969
5970 /* Similarly to what we do in simplify-rtx.cc, a truncate of a register
5971 whose value is a comparison can be replaced with a subreg if
5972 STORE_FLAG_VALUE permits. */
5973 if (HWI_COMPUTABLE_MODE_P (mode)
5974 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0
5975 && (temp = get_last_value (XEXP (x, 0)))
5976 && COMPARISON_P (temp)
5977 && TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (XEXP (x, 0))))
5978 return gen_lowpart (mode, XEXP (x, 0));
5979 break;
5980
5981 case CONST:
5982 /* (const (const X)) can become (const X). Do it this way rather than
5983 returning the inner CONST since CONST can be shared with a
5984 REG_EQUAL note. */
5985 if (GET_CODE (XEXP (x, 0)) == CONST)
5986 SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0));
5987 break;
5988
5989 case LO_SUM:
5990 /* Convert (lo_sum (high FOO) FOO) to FOO. This is necessary so we
5991 can add in an offset. find_split_point will split this address up
5992 again if it doesn't match. */
5993 if (HAVE_lo_sum && GET_CODE (XEXP (x, 0)) == HIGH
5994 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5995 return XEXP (x, 1);
5996 break;
5997
5998 case PLUS:
5999 /* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>)
6000 when c is (const_int (pow2 + 1) / 2) is a sign extension of a
6001 bit-field and can be replaced by either a sign_extend or a
6002 sign_extract. The `and' may be a zero_extend and the two
6003 <c>, -<c> constants may be reversed. */
6004 if (GET_CODE (XEXP (x, 0)) == XOR
6005 && is_a <scalar_int_mode> (mode, &int_mode)
6006 && CONST_INT_P (XEXP (x, 1))
6007 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
6008 && INTVAL (XEXP (x, 1)) == -INTVAL (XEXP (XEXP (x, 0), 1))
6009 && ((i = exact_log2 (UINTVAL (XEXP (XEXP (x, 0), 1)))) >= 0
6010 || (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0)
6011 && HWI_COMPUTABLE_MODE_P (int_mode)
6012 && ((GET_CODE (XEXP (XEXP (x, 0), 0)) == AND
6013 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
6014 && (UINTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1))
6015 == (HOST_WIDE_INT_1U << (i + 1)) - 1))
6016 || (GET_CODE (XEXP (XEXP (x, 0), 0)) == ZERO_EXTEND
6017 && known_eq ((GET_MODE_PRECISION
6018 (GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0)))),
6019 (unsigned int) i + 1))))
6020 return simplify_shift_const
6021 (NULL_RTX, ASHIFTRT, int_mode,
6022 simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6023 XEXP (XEXP (XEXP (x, 0), 0), 0),
6024 GET_MODE_PRECISION (int_mode) - (i + 1)),
6025 GET_MODE_PRECISION (int_mode) - (i + 1));
6026
6027 /* If only the low-order bit of X is possibly nonzero, (plus x -1)
6028 can become (ashiftrt (ashift (xor x 1) C) C) where C is
6029 the bitsize of the mode - 1. This allows simplification of
6030 "a = (b & 8) == 0;" */
6031 if (XEXP (x, 1) == constm1_rtx
6032 && !REG_P (XEXP (x, 0))
6033 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
6034 && REG_P (SUBREG_REG (XEXP (x, 0))))
6035 && is_a <scalar_int_mode> (mode, &int_mode)
6036 && nonzero_bits (XEXP (x, 0), int_mode) == 1)
6037 return simplify_shift_const
6038 (NULL_RTX, ASHIFTRT, int_mode,
6039 simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6040 gen_rtx_XOR (int_mode, XEXP (x, 0),
6041 const1_rtx),
6042 GET_MODE_PRECISION (int_mode) - 1),
6043 GET_MODE_PRECISION (int_mode) - 1);
6044
6045 /* If we are adding two things that have no bits in common, convert
6046 the addition into an IOR. This will often be further simplified,
6047 for example in cases like ((a & 1) + (a & 2)), which can
6048 become a & 3. */
6049
6050 if (HWI_COMPUTABLE_MODE_P (mode)
6051 && (nonzero_bits (XEXP (x, 0), mode)
6052 & nonzero_bits (XEXP (x, 1), mode)) == 0)
6053 {
6054 /* Try to simplify the expression further. */
6055 rtx tor = simplify_gen_binary (IOR, mode, XEXP (x, 0), XEXP (x, 1));
6056 temp = combine_simplify_rtx (tor, VOIDmode, in_dest, 0);
6057
6058 /* If we could, great. If not, do not go ahead with the IOR
6059 replacement, since PLUS appears in many special purpose
6060 address arithmetic instructions. */
6061 if (GET_CODE (temp) != CLOBBER
6062 && (GET_CODE (temp) != IOR
6063 || ((XEXP (temp, 0) != XEXP (x, 0)
6064 || XEXP (temp, 1) != XEXP (x, 1))
6065 && (XEXP (temp, 0) != XEXP (x, 1)
6066 || XEXP (temp, 1) != XEXP (x, 0)))))
6067 return temp;
6068 }
6069
6070 /* Canonicalize x + x into x << 1. */
6071 if (GET_MODE_CLASS (mode) == MODE_INT
6072 && rtx_equal_p (XEXP (x, 0), XEXP (x, 1))
6073 && !side_effects_p (XEXP (x, 0)))
6074 return simplify_gen_binary (ASHIFT, mode, XEXP (x, 0), const1_rtx);
6075
6076 break;
6077
6078 case MINUS:
6079 /* (minus <foo> (and <foo> (const_int -pow2))) becomes
6080 (and <foo> (const_int pow2-1)) */
6081 if (is_a <scalar_int_mode> (mode, &int_mode)
6082 && GET_CODE (XEXP (x, 1)) == AND
6083 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
6084 && pow2p_hwi (-UINTVAL (XEXP (XEXP (x, 1), 1)))
6085 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
6086 return simplify_and_const_int (NULL_RTX, int_mode, XEXP (x, 0),
6087 -INTVAL (XEXP (XEXP (x, 1), 1)) - 1);
6088 break;
6089
6090 case MULT:
6091 /* If we have (mult (plus A B) C), apply the distributive law and then
6092 the inverse distributive law to see if things simplify. This
6093 occurs mostly in addresses, often when unrolling loops. */
6094
6095 if (GET_CODE (XEXP (x, 0)) == PLUS)
6096 {
6097 rtx result = distribute_and_simplify_rtx (x, 0);
6098 if (result)
6099 return result;
6100 }
6101
6102 /* Try simplify a*(b/c) as (a*b)/c. */
6103 if (FLOAT_MODE_P (mode) && flag_associative_math
6104 && GET_CODE (XEXP (x, 0)) == DIV)
6105 {
6106 rtx tem = simplify_binary_operation (MULT, mode,
6107 XEXP (XEXP (x, 0), 0),
6108 XEXP (x, 1));
6109 if (tem)
6110 return simplify_gen_binary (DIV, mode, tem, XEXP (XEXP (x, 0), 1));
6111 }
6112 break;
6113
6114 case UDIV:
6115 /* If this is a divide by a power of two, treat it as a shift if
6116 its first operand is a shift. */
6117 if (is_a <scalar_int_mode> (mode, &int_mode)
6118 && CONST_INT_P (XEXP (x, 1))
6119 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
6120 && (GET_CODE (XEXP (x, 0)) == ASHIFT
6121 || GET_CODE (XEXP (x, 0)) == LSHIFTRT
6122 || GET_CODE (XEXP (x, 0)) == ASHIFTRT
6123 || GET_CODE (XEXP (x, 0)) == ROTATE
6124 || GET_CODE (XEXP (x, 0)) == ROTATERT))
6125 return simplify_shift_const (NULL_RTX, LSHIFTRT, int_mode,
6126 XEXP (x, 0), i);
6127 break;
6128
6129 case EQ: case NE:
6130 case GT: case GTU: case GE: case GEU:
6131 case LT: case LTU: case LE: case LEU:
6132 case UNEQ: case LTGT:
6133 case UNGT: case UNGE:
6134 case UNLT: case UNLE:
6135 case UNORDERED: case ORDERED:
6136 /* If the first operand is a condition code, we can't do anything
6137 with it. */
6138 if (GET_CODE (XEXP (x, 0)) == COMPARE
6139 || GET_MODE_CLASS (GET_MODE (XEXP (x, 0))) != MODE_CC)
6140 {
6141 rtx op0 = XEXP (x, 0);
6142 rtx op1 = XEXP (x, 1);
6143 enum rtx_code new_code;
6144
6145 if (GET_CODE (op0) == COMPARE)
6146 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
6147
6148 /* Simplify our comparison, if possible. */
6149 new_code = simplify_comparison (code, &op0, &op1);
6150
6151 /* If STORE_FLAG_VALUE is 1, we can convert (ne x 0) to simply X
6152 if only the low-order bit is possibly nonzero in X (such as when
6153 X is a ZERO_EXTRACT of one bit). Similarly, we can convert EQ to
6154 (xor X 1) or (minus 1 X); we use the former. Finally, if X is
6155 known to be either 0 or -1, NE becomes a NEG and EQ becomes
6156 (plus X 1).
6157
6158 Remove any ZERO_EXTRACT we made when thinking this was a
6159 comparison. It may now be simpler to use, e.g., an AND. If a
6160 ZERO_EXTRACT is indeed appropriate, it will be placed back by
6161 the call to make_compound_operation in the SET case.
6162
6163 Don't apply these optimizations if the caller would
6164 prefer a comparison rather than a value.
6165 E.g., for the condition in an IF_THEN_ELSE most targets need
6166 an explicit comparison. */
6167
6168 if (in_cond)
6169 ;
6170
6171 else if (STORE_FLAG_VALUE == 1
6172 && new_code == NE
6173 && is_int_mode (mode, &int_mode)
6174 && op1 == const0_rtx
6175 && int_mode == GET_MODE (op0)
6176 && nonzero_bits (op0, int_mode) == 1)
6177 return gen_lowpart (int_mode,
6178 expand_compound_operation (op0));
6179
6180 else if (STORE_FLAG_VALUE == 1
6181 && new_code == NE
6182 && is_int_mode (mode, &int_mode)
6183 && op1 == const0_rtx
6184 && int_mode == GET_MODE (op0)
6185 && (num_sign_bit_copies (op0, int_mode)
6186 == GET_MODE_PRECISION (int_mode)))
6187 {
6188 op0 = expand_compound_operation (op0);
6189 return simplify_gen_unary (NEG, int_mode,
6190 gen_lowpart (int_mode, op0),
6191 int_mode);
6192 }
6193
6194 else if (STORE_FLAG_VALUE == 1
6195 && new_code == EQ
6196 && is_int_mode (mode, &int_mode)
6197 && op1 == const0_rtx
6198 && int_mode == GET_MODE (op0)
6199 && nonzero_bits (op0, int_mode) == 1)
6200 {
6201 op0 = expand_compound_operation (op0);
6202 return simplify_gen_binary (XOR, int_mode,
6203 gen_lowpart (int_mode, op0),
6204 const1_rtx);
6205 }
6206
6207 else if (STORE_FLAG_VALUE == 1
6208 && new_code == EQ
6209 && is_int_mode (mode, &int_mode)
6210 && op1 == const0_rtx
6211 && int_mode == GET_MODE (op0)
6212 && (num_sign_bit_copies (op0, int_mode)
6213 == GET_MODE_PRECISION (int_mode)))
6214 {
6215 op0 = expand_compound_operation (op0);
6216 return plus_constant (int_mode, gen_lowpart (int_mode, op0), 1);
6217 }
6218
6219 /* If STORE_FLAG_VALUE is -1, we have cases similar to
6220 those above. */
6221 if (in_cond)
6222 ;
6223
6224 else if (STORE_FLAG_VALUE == -1
6225 && new_code == NE
6226 && is_int_mode (mode, &int_mode)
6227 && op1 == const0_rtx
6228 && int_mode == GET_MODE (op0)
6229 && (num_sign_bit_copies (op0, int_mode)
6230 == GET_MODE_PRECISION (int_mode)))
6231 return gen_lowpart (int_mode, expand_compound_operation (op0));
6232
6233 else if (STORE_FLAG_VALUE == -1
6234 && new_code == NE
6235 && is_int_mode (mode, &int_mode)
6236 && op1 == const0_rtx
6237 && int_mode == GET_MODE (op0)
6238 && nonzero_bits (op0, int_mode) == 1)
6239 {
6240 op0 = expand_compound_operation (op0);
6241 return simplify_gen_unary (NEG, int_mode,
6242 gen_lowpart (int_mode, op0),
6243 int_mode);
6244 }
6245
6246 else if (STORE_FLAG_VALUE == -1
6247 && new_code == EQ
6248 && is_int_mode (mode, &int_mode)
6249 && op1 == const0_rtx
6250 && int_mode == GET_MODE (op0)
6251 && (num_sign_bit_copies (op0, int_mode)
6252 == GET_MODE_PRECISION (int_mode)))
6253 {
6254 op0 = expand_compound_operation (op0);
6255 return simplify_gen_unary (NOT, int_mode,
6256 gen_lowpart (int_mode, op0),
6257 int_mode);
6258 }
6259
6260 /* If X is 0/1, (eq X 0) is X-1. */
6261 else if (STORE_FLAG_VALUE == -1
6262 && new_code == EQ
6263 && is_int_mode (mode, &int_mode)
6264 && op1 == const0_rtx
6265 && int_mode == GET_MODE (op0)
6266 && nonzero_bits (op0, int_mode) == 1)
6267 {
6268 op0 = expand_compound_operation (op0);
6269 return plus_constant (int_mode, gen_lowpart (int_mode, op0), -1);
6270 }
6271
6272 /* If STORE_FLAG_VALUE says to just test the sign bit and X has just
6273 one bit that might be nonzero, we can convert (ne x 0) to
6274 (ashift x c) where C puts the bit in the sign bit. Remove any
6275 AND with STORE_FLAG_VALUE when we are done, since we are only
6276 going to test the sign bit. */
6277 if (new_code == NE
6278 && is_int_mode (mode, &int_mode)
6279 && HWI_COMPUTABLE_MODE_P (int_mode)
6280 && val_signbit_p (int_mode, STORE_FLAG_VALUE)
6281 && op1 == const0_rtx
6282 && int_mode == GET_MODE (op0)
6283 && (i = exact_log2 (nonzero_bits (op0, int_mode))) >= 0)
6284 {
6285 x = simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6286 expand_compound_operation (op0),
6287 GET_MODE_PRECISION (int_mode) - 1 - i);
6288 if (GET_CODE (x) == AND && XEXP (x, 1) == const_true_rtx)
6289 return XEXP (x, 0);
6290 else
6291 return x;
6292 }
6293
6294 /* If the code changed, return a whole new comparison.
6295 We also need to avoid using SUBST in cases where
6296 simplify_comparison has widened a comparison with a CONST_INT,
6297 since in that case the wider CONST_INT may fail the sanity
6298 checks in do_SUBST. */
6299 if (new_code != code
6300 || (CONST_INT_P (op1)
6301 && GET_MODE (op0) != GET_MODE (XEXP (x, 0))
6302 && GET_MODE (op0) != GET_MODE (XEXP (x, 1))))
6303 return gen_rtx_fmt_ee (new_code, mode, op0, op1);
6304
6305 /* Otherwise, keep this operation, but maybe change its operands.
6306 This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR). */
6307 SUBST (XEXP (x, 0), op0);
6308 SUBST (XEXP (x, 1), op1);
6309 }
6310 break;
6311
6312 case IF_THEN_ELSE:
6313 return simplify_if_then_else (x);
6314
6315 case ZERO_EXTRACT:
6316 case SIGN_EXTRACT:
6317 case ZERO_EXTEND:
6318 case SIGN_EXTEND:
6319 /* If we are processing SET_DEST, we are done. */
6320 if (in_dest)
6321 return x;
6322
6323 return expand_compound_operation (x);
6324
6325 case SET:
6326 return simplify_set (x);
6327
6328 case AND:
6329 case IOR:
6330 return simplify_logical (x);
6331
6332 case ASHIFT:
6333 case LSHIFTRT:
6334 case ASHIFTRT:
6335 case ROTATE:
6336 case ROTATERT:
6337 /* If this is a shift by a constant amount, simplify it. */
6338 if (CONST_INT_P (XEXP (x, 1)))
6339 return simplify_shift_const (x, code, mode, XEXP (x, 0),
6340 INTVAL (XEXP (x, 1)));
6341
6342 else if (SHIFT_COUNT_TRUNCATED && !REG_P (XEXP (x, 1)))
6343 SUBST (XEXP (x, 1),
6344 force_to_mode (XEXP (x, 1), GET_MODE (XEXP (x, 1)),
6345 (HOST_WIDE_INT_1U
6346 << exact_log2 (GET_MODE_UNIT_BITSIZE
6347 (GET_MODE (x))))
6348 - 1,
6349 0));
6350 break;
6351 case VEC_SELECT:
6352 {
6353 rtx trueop0 = XEXP (x, 0);
6354 mode = GET_MODE (trueop0);
6355 rtx trueop1 = XEXP (x, 1);
6356 /* If we select a low-part subreg, return that. */
6357 if (vec_series_lowpart_p (GET_MODE (x), mode, trueop1))
6358 {
6359 rtx new_rtx = lowpart_subreg (GET_MODE (x), trueop0, mode);
6360 if (new_rtx != NULL_RTX)
6361 return new_rtx;
6362 }
6363 }
6364
6365 default:
6366 break;
6367 }
6368
6369 return x;
6370 }
6371 \f
6372 /* Simplify X, an IF_THEN_ELSE expression. Return the new expression. */
6373
6374 static rtx
6375 simplify_if_then_else (rtx x)
6376 {
6377 machine_mode mode = GET_MODE (x);
6378 rtx cond = XEXP (x, 0);
6379 rtx true_rtx = XEXP (x, 1);
6380 rtx false_rtx = XEXP (x, 2);
6381 enum rtx_code true_code = GET_CODE (cond);
6382 int comparison_p = COMPARISON_P (cond);
6383 rtx temp;
6384 int i;
6385 enum rtx_code false_code;
6386 rtx reversed;
6387 scalar_int_mode int_mode, inner_mode;
6388
6389 /* Simplify storing of the truth value. */
6390 if (comparison_p && true_rtx == const_true_rtx && false_rtx == const0_rtx)
6391 return simplify_gen_relational (true_code, mode, VOIDmode,
6392 XEXP (cond, 0), XEXP (cond, 1));
6393
6394 /* Also when the truth value has to be reversed. */
6395 if (comparison_p
6396 && true_rtx == const0_rtx && false_rtx == const_true_rtx
6397 && (reversed = reversed_comparison (cond, mode)))
6398 return reversed;
6399
6400 /* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used
6401 in it is being compared against certain values. Get the true and false
6402 comparisons and see if that says anything about the value of each arm. */
6403
6404 if (comparison_p
6405 && ((false_code = reversed_comparison_code (cond, NULL))
6406 != UNKNOWN)
6407 && REG_P (XEXP (cond, 0)))
6408 {
6409 HOST_WIDE_INT nzb;
6410 rtx from = XEXP (cond, 0);
6411 rtx true_val = XEXP (cond, 1);
6412 rtx false_val = true_val;
6413 int swapped = 0;
6414
6415 /* If FALSE_CODE is EQ, swap the codes and arms. */
6416
6417 if (false_code == EQ)
6418 {
6419 swapped = 1, true_code = EQ, false_code = NE;
6420 std::swap (true_rtx, false_rtx);
6421 }
6422
6423 scalar_int_mode from_mode;
6424 if (is_a <scalar_int_mode> (GET_MODE (from), &from_mode))
6425 {
6426 /* If we are comparing against zero and the expression being
6427 tested has only a single bit that might be nonzero, that is
6428 its value when it is not equal to zero. Similarly if it is
6429 known to be -1 or 0. */
6430 if (true_code == EQ
6431 && true_val == const0_rtx
6432 && pow2p_hwi (nzb = nonzero_bits (from, from_mode)))
6433 {
6434 false_code = EQ;
6435 false_val = gen_int_mode (nzb, from_mode);
6436 }
6437 else if (true_code == EQ
6438 && true_val == const0_rtx
6439 && (num_sign_bit_copies (from, from_mode)
6440 == GET_MODE_PRECISION (from_mode)))
6441 {
6442 false_code = EQ;
6443 false_val = constm1_rtx;
6444 }
6445 }
6446
6447 /* Now simplify an arm if we know the value of the register in the
6448 branch and it is used in the arm. Be careful due to the potential
6449 of locally-shared RTL. */
6450
6451 if (reg_mentioned_p (from, true_rtx))
6452 true_rtx = subst (known_cond (copy_rtx (true_rtx), true_code,
6453 from, true_val),
6454 pc_rtx, pc_rtx, 0, 0, 0);
6455 if (reg_mentioned_p (from, false_rtx))
6456 false_rtx = subst (known_cond (copy_rtx (false_rtx), false_code,
6457 from, false_val),
6458 pc_rtx, pc_rtx, 0, 0, 0);
6459
6460 SUBST (XEXP (x, 1), swapped ? false_rtx : true_rtx);
6461 SUBST (XEXP (x, 2), swapped ? true_rtx : false_rtx);
6462
6463 true_rtx = XEXP (x, 1);
6464 false_rtx = XEXP (x, 2);
6465 true_code = GET_CODE (cond);
6466 }
6467
6468 /* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be
6469 reversed, do so to avoid needing two sets of patterns for
6470 subtract-and-branch insns. Similarly if we have a constant in the true
6471 arm, the false arm is the same as the first operand of the comparison, or
6472 the false arm is more complicated than the true arm. */
6473
6474 if (comparison_p
6475 && reversed_comparison_code (cond, NULL) != UNKNOWN
6476 && (true_rtx == pc_rtx
6477 || (CONSTANT_P (true_rtx)
6478 && !CONST_INT_P (false_rtx) && false_rtx != pc_rtx)
6479 || true_rtx == const0_rtx
6480 || (OBJECT_P (true_rtx) && !OBJECT_P (false_rtx))
6481 || (GET_CODE (true_rtx) == SUBREG && OBJECT_P (SUBREG_REG (true_rtx))
6482 && !OBJECT_P (false_rtx))
6483 || reg_mentioned_p (true_rtx, false_rtx)
6484 || rtx_equal_p (false_rtx, XEXP (cond, 0))))
6485 {
6486 SUBST (XEXP (x, 0), reversed_comparison (cond, GET_MODE (cond)));
6487 SUBST (XEXP (x, 1), false_rtx);
6488 SUBST (XEXP (x, 2), true_rtx);
6489
6490 std::swap (true_rtx, false_rtx);
6491 cond = XEXP (x, 0);
6492
6493 /* It is possible that the conditional has been simplified out. */
6494 true_code = GET_CODE (cond);
6495 comparison_p = COMPARISON_P (cond);
6496 }
6497
6498 /* If the two arms are identical, we don't need the comparison. */
6499
6500 if (rtx_equal_p (true_rtx, false_rtx) && ! side_effects_p (cond))
6501 return true_rtx;
6502
6503 /* Convert a == b ? b : a to "a". */
6504 if (true_code == EQ && ! side_effects_p (cond)
6505 && !HONOR_NANS (mode)
6506 && rtx_equal_p (XEXP (cond, 0), false_rtx)
6507 && rtx_equal_p (XEXP (cond, 1), true_rtx))
6508 return false_rtx;
6509 else if (true_code == NE && ! side_effects_p (cond)
6510 && !HONOR_NANS (mode)
6511 && rtx_equal_p (XEXP (cond, 0), true_rtx)
6512 && rtx_equal_p (XEXP (cond, 1), false_rtx))
6513 return true_rtx;
6514
6515 /* Look for cases where we have (abs x) or (neg (abs X)). */
6516
6517 if (GET_MODE_CLASS (mode) == MODE_INT
6518 && comparison_p
6519 && XEXP (cond, 1) == const0_rtx
6520 && GET_CODE (false_rtx) == NEG
6521 && rtx_equal_p (true_rtx, XEXP (false_rtx, 0))
6522 && rtx_equal_p (true_rtx, XEXP (cond, 0))
6523 && ! side_effects_p (true_rtx))
6524 switch (true_code)
6525 {
6526 case GT:
6527 case GE:
6528 return simplify_gen_unary (ABS, mode, true_rtx, mode);
6529 case LT:
6530 case LE:
6531 return
6532 simplify_gen_unary (NEG, mode,
6533 simplify_gen_unary (ABS, mode, true_rtx, mode),
6534 mode);
6535 default:
6536 break;
6537 }
6538
6539 /* Look for MIN or MAX. */
6540
6541 if ((! FLOAT_MODE_P (mode)
6542 || (flag_unsafe_math_optimizations
6543 && !HONOR_NANS (mode)
6544 && !HONOR_SIGNED_ZEROS (mode)))
6545 && comparison_p
6546 && rtx_equal_p (XEXP (cond, 0), true_rtx)
6547 && rtx_equal_p (XEXP (cond, 1), false_rtx)
6548 && ! side_effects_p (cond))
6549 switch (true_code)
6550 {
6551 case GE:
6552 case GT:
6553 return simplify_gen_binary (SMAX, mode, true_rtx, false_rtx);
6554 case LE:
6555 case LT:
6556 return simplify_gen_binary (SMIN, mode, true_rtx, false_rtx);
6557 case GEU:
6558 case GTU:
6559 return simplify_gen_binary (UMAX, mode, true_rtx, false_rtx);
6560 case LEU:
6561 case LTU:
6562 return simplify_gen_binary (UMIN, mode, true_rtx, false_rtx);
6563 default:
6564 break;
6565 }
6566
6567 /* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its
6568 second operand is zero, this can be done as (OP Z (mult COND C2)) where
6569 C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or
6570 SIGN_EXTEND as long as Z is already extended (so we don't destroy it).
6571 We can do this kind of thing in some cases when STORE_FLAG_VALUE is
6572 neither 1 or -1, but it isn't worth checking for. */
6573
6574 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
6575 && comparison_p
6576 && is_int_mode (mode, &int_mode)
6577 && ! side_effects_p (x))
6578 {
6579 rtx t = make_compound_operation (true_rtx, SET);
6580 rtx f = make_compound_operation (false_rtx, SET);
6581 rtx cond_op0 = XEXP (cond, 0);
6582 rtx cond_op1 = XEXP (cond, 1);
6583 enum rtx_code op = UNKNOWN, extend_op = UNKNOWN;
6584 scalar_int_mode m = int_mode;
6585 rtx z = 0, c1 = NULL_RTX;
6586
6587 if ((GET_CODE (t) == PLUS || GET_CODE (t) == MINUS
6588 || GET_CODE (t) == IOR || GET_CODE (t) == XOR
6589 || GET_CODE (t) == ASHIFT
6590 || GET_CODE (t) == LSHIFTRT || GET_CODE (t) == ASHIFTRT)
6591 && rtx_equal_p (XEXP (t, 0), f))
6592 c1 = XEXP (t, 1), op = GET_CODE (t), z = f;
6593
6594 /* If an identity-zero op is commutative, check whether there
6595 would be a match if we swapped the operands. */
6596 else if ((GET_CODE (t) == PLUS || GET_CODE (t) == IOR
6597 || GET_CODE (t) == XOR)
6598 && rtx_equal_p (XEXP (t, 1), f))
6599 c1 = XEXP (t, 0), op = GET_CODE (t), z = f;
6600 else if (GET_CODE (t) == SIGN_EXTEND
6601 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6602 && (GET_CODE (XEXP (t, 0)) == PLUS
6603 || GET_CODE (XEXP (t, 0)) == MINUS
6604 || GET_CODE (XEXP (t, 0)) == IOR
6605 || GET_CODE (XEXP (t, 0)) == XOR
6606 || GET_CODE (XEXP (t, 0)) == ASHIFT
6607 || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6608 || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6609 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6610 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6611 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6612 && (num_sign_bit_copies (f, GET_MODE (f))
6613 > (unsigned int)
6614 (GET_MODE_PRECISION (int_mode)
6615 - GET_MODE_PRECISION (inner_mode))))
6616 {
6617 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6618 extend_op = SIGN_EXTEND;
6619 m = inner_mode;
6620 }
6621 else if (GET_CODE (t) == SIGN_EXTEND
6622 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6623 && (GET_CODE (XEXP (t, 0)) == PLUS
6624 || GET_CODE (XEXP (t, 0)) == IOR
6625 || GET_CODE (XEXP (t, 0)) == XOR)
6626 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6627 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6628 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6629 && (num_sign_bit_copies (f, GET_MODE (f))
6630 > (unsigned int)
6631 (GET_MODE_PRECISION (int_mode)
6632 - GET_MODE_PRECISION (inner_mode))))
6633 {
6634 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6635 extend_op = SIGN_EXTEND;
6636 m = inner_mode;
6637 }
6638 else if (GET_CODE (t) == ZERO_EXTEND
6639 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6640 && (GET_CODE (XEXP (t, 0)) == PLUS
6641 || GET_CODE (XEXP (t, 0)) == MINUS
6642 || GET_CODE (XEXP (t, 0)) == IOR
6643 || GET_CODE (XEXP (t, 0)) == XOR
6644 || GET_CODE (XEXP (t, 0)) == ASHIFT
6645 || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6646 || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6647 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6648 && HWI_COMPUTABLE_MODE_P (int_mode)
6649 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6650 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6651 && ((nonzero_bits (f, GET_MODE (f))
6652 & ~GET_MODE_MASK (inner_mode))
6653 == 0))
6654 {
6655 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6656 extend_op = ZERO_EXTEND;
6657 m = inner_mode;
6658 }
6659 else if (GET_CODE (t) == ZERO_EXTEND
6660 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6661 && (GET_CODE (XEXP (t, 0)) == PLUS
6662 || GET_CODE (XEXP (t, 0)) == IOR
6663 || GET_CODE (XEXP (t, 0)) == XOR)
6664 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6665 && HWI_COMPUTABLE_MODE_P (int_mode)
6666 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6667 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6668 && ((nonzero_bits (f, GET_MODE (f))
6669 & ~GET_MODE_MASK (inner_mode))
6670 == 0))
6671 {
6672 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6673 extend_op = ZERO_EXTEND;
6674 m = inner_mode;
6675 }
6676
6677 if (z)
6678 {
6679 machine_mode cm = m;
6680 if ((op == ASHIFT || op == LSHIFTRT || op == ASHIFTRT)
6681 && GET_MODE (c1) != VOIDmode)
6682 cm = GET_MODE (c1);
6683 temp = subst (simplify_gen_relational (true_code, cm, VOIDmode,
6684 cond_op0, cond_op1),
6685 pc_rtx, pc_rtx, 0, 0, 0);
6686 temp = simplify_gen_binary (MULT, cm, temp,
6687 simplify_gen_binary (MULT, cm, c1,
6688 const_true_rtx));
6689 temp = subst (temp, pc_rtx, pc_rtx, 0, 0, 0);
6690 temp = simplify_gen_binary (op, m, gen_lowpart (m, z), temp);
6691
6692 if (extend_op != UNKNOWN)
6693 temp = simplify_gen_unary (extend_op, int_mode, temp, m);
6694
6695 return temp;
6696 }
6697 }
6698
6699 /* If we have (if_then_else (ne A 0) C1 0) and either A is known to be 0 or
6700 1 and C1 is a single bit or A is known to be 0 or -1 and C1 is the
6701 negation of a single bit, we can convert this operation to a shift. We
6702 can actually do this more generally, but it doesn't seem worth it. */
6703
6704 if (true_code == NE
6705 && is_a <scalar_int_mode> (mode, &int_mode)
6706 && XEXP (cond, 1) == const0_rtx
6707 && false_rtx == const0_rtx
6708 && CONST_INT_P (true_rtx)
6709 && ((nonzero_bits (XEXP (cond, 0), int_mode) == 1
6710 && (i = exact_log2 (UINTVAL (true_rtx))) >= 0)
6711 || ((num_sign_bit_copies (XEXP (cond, 0), int_mode)
6712 == GET_MODE_PRECISION (int_mode))
6713 && (i = exact_log2 (-UINTVAL (true_rtx))) >= 0)))
6714 return
6715 simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6716 gen_lowpart (int_mode, XEXP (cond, 0)), i);
6717
6718 /* (IF_THEN_ELSE (NE A 0) C1 0) is A or a zero-extend of A if the only
6719 non-zero bit in A is C1. */
6720 if (true_code == NE && XEXP (cond, 1) == const0_rtx
6721 && false_rtx == const0_rtx && CONST_INT_P (true_rtx)
6722 && is_a <scalar_int_mode> (mode, &int_mode)
6723 && is_a <scalar_int_mode> (GET_MODE (XEXP (cond, 0)), &inner_mode)
6724 && (UINTVAL (true_rtx) & GET_MODE_MASK (int_mode))
6725 == nonzero_bits (XEXP (cond, 0), inner_mode)
6726 && (i = exact_log2 (UINTVAL (true_rtx) & GET_MODE_MASK (int_mode))) >= 0)
6727 {
6728 rtx val = XEXP (cond, 0);
6729 if (inner_mode == int_mode)
6730 return val;
6731 else if (GET_MODE_PRECISION (inner_mode) < GET_MODE_PRECISION (int_mode))
6732 return simplify_gen_unary (ZERO_EXTEND, int_mode, val, inner_mode);
6733 }
6734
6735 return x;
6736 }
6737 \f
6738 /* Simplify X, a SET expression. Return the new expression. */
6739
6740 static rtx
6741 simplify_set (rtx x)
6742 {
6743 rtx src = SET_SRC (x);
6744 rtx dest = SET_DEST (x);
6745 machine_mode mode
6746 = GET_MODE (src) != VOIDmode ? GET_MODE (src) : GET_MODE (dest);
6747 rtx_insn *other_insn;
6748 rtx *cc_use;
6749 scalar_int_mode int_mode;
6750
6751 /* (set (pc) (return)) gets written as (return). */
6752 if (GET_CODE (dest) == PC && ANY_RETURN_P (src))
6753 return src;
6754
6755 /* Now that we know for sure which bits of SRC we are using, see if we can
6756 simplify the expression for the object knowing that we only need the
6757 low-order bits. */
6758
6759 if (GET_MODE_CLASS (mode) == MODE_INT && HWI_COMPUTABLE_MODE_P (mode))
6760 {
6761 src = force_to_mode (src, mode, HOST_WIDE_INT_M1U, 0);
6762 SUBST (SET_SRC (x), src);
6763 }
6764
6765 /* If the source is a COMPARE, look for the use of the comparison result
6766 and try to simplify it unless we already have used undobuf.other_insn. */
6767 if ((GET_MODE_CLASS (mode) == MODE_CC || GET_CODE (src) == COMPARE)
6768 && (cc_use = find_single_use (dest, subst_insn, &other_insn)) != 0
6769 && (undobuf.other_insn == 0 || other_insn == undobuf.other_insn)
6770 && COMPARISON_P (*cc_use)
6771 && rtx_equal_p (XEXP (*cc_use, 0), dest))
6772 {
6773 enum rtx_code old_code = GET_CODE (*cc_use);
6774 enum rtx_code new_code;
6775 rtx op0, op1, tmp;
6776 int other_changed = 0;
6777 rtx inner_compare = NULL_RTX;
6778 machine_mode compare_mode = GET_MODE (dest);
6779
6780 if (GET_CODE (src) == COMPARE)
6781 {
6782 op0 = XEXP (src, 0), op1 = XEXP (src, 1);
6783 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
6784 {
6785 inner_compare = op0;
6786 op0 = XEXP (inner_compare, 0), op1 = XEXP (inner_compare, 1);
6787 }
6788 }
6789 else
6790 op0 = src, op1 = CONST0_RTX (GET_MODE (src));
6791
6792 tmp = simplify_relational_operation (old_code, compare_mode, VOIDmode,
6793 op0, op1);
6794 if (!tmp)
6795 new_code = old_code;
6796 else if (!CONSTANT_P (tmp))
6797 {
6798 new_code = GET_CODE (tmp);
6799 op0 = XEXP (tmp, 0);
6800 op1 = XEXP (tmp, 1);
6801 }
6802 else
6803 {
6804 rtx pat = PATTERN (other_insn);
6805 undobuf.other_insn = other_insn;
6806 SUBST (*cc_use, tmp);
6807
6808 /* Attempt to simplify CC user. */
6809 if (GET_CODE (pat) == SET)
6810 {
6811 rtx new_rtx = simplify_rtx (SET_SRC (pat));
6812 if (new_rtx != NULL_RTX)
6813 SUBST (SET_SRC (pat), new_rtx);
6814 }
6815
6816 /* Convert X into a no-op move. */
6817 SUBST (SET_DEST (x), pc_rtx);
6818 SUBST (SET_SRC (x), pc_rtx);
6819 return x;
6820 }
6821
6822 /* Simplify our comparison, if possible. */
6823 new_code = simplify_comparison (new_code, &op0, &op1);
6824
6825 #ifdef SELECT_CC_MODE
6826 /* If this machine has CC modes other than CCmode, check to see if we
6827 need to use a different CC mode here. */
6828 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
6829 compare_mode = GET_MODE (op0);
6830 else if (inner_compare
6831 && GET_MODE_CLASS (GET_MODE (inner_compare)) == MODE_CC
6832 && new_code == old_code
6833 && op0 == XEXP (inner_compare, 0)
6834 && op1 == XEXP (inner_compare, 1))
6835 compare_mode = GET_MODE (inner_compare);
6836 else
6837 compare_mode = SELECT_CC_MODE (new_code, op0, op1);
6838
6839 /* If the mode changed, we have to change SET_DEST, the mode in the
6840 compare, and the mode in the place SET_DEST is used. If SET_DEST is
6841 a hard register, just build new versions with the proper mode. If it
6842 is a pseudo, we lose unless it is only time we set the pseudo, in
6843 which case we can safely change its mode. */
6844 if (compare_mode != GET_MODE (dest))
6845 {
6846 if (can_change_dest_mode (dest, 0, compare_mode))
6847 {
6848 unsigned int regno = REGNO (dest);
6849 rtx new_dest;
6850
6851 if (regno < FIRST_PSEUDO_REGISTER)
6852 new_dest = gen_rtx_REG (compare_mode, regno);
6853 else
6854 {
6855 subst_mode (regno, compare_mode);
6856 new_dest = regno_reg_rtx[regno];
6857 }
6858
6859 SUBST (SET_DEST (x), new_dest);
6860 SUBST (XEXP (*cc_use, 0), new_dest);
6861 other_changed = 1;
6862
6863 dest = new_dest;
6864 }
6865 }
6866 #endif /* SELECT_CC_MODE */
6867
6868 /* If the code changed, we have to build a new comparison in
6869 undobuf.other_insn. */
6870 if (new_code != old_code)
6871 {
6872 int other_changed_previously = other_changed;
6873 unsigned HOST_WIDE_INT mask;
6874 rtx old_cc_use = *cc_use;
6875
6876 SUBST (*cc_use, gen_rtx_fmt_ee (new_code, GET_MODE (*cc_use),
6877 dest, const0_rtx));
6878 other_changed = 1;
6879
6880 /* If the only change we made was to change an EQ into an NE or
6881 vice versa, OP0 has only one bit that might be nonzero, and OP1
6882 is zero, check if changing the user of the condition code will
6883 produce a valid insn. If it won't, we can keep the original code
6884 in that insn by surrounding our operation with an XOR. */
6885
6886 if (((old_code == NE && new_code == EQ)
6887 || (old_code == EQ && new_code == NE))
6888 && ! other_changed_previously && op1 == const0_rtx
6889 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
6890 && pow2p_hwi (mask = nonzero_bits (op0, GET_MODE (op0))))
6891 {
6892 rtx pat = PATTERN (other_insn), note = 0;
6893
6894 if ((recog_for_combine (&pat, other_insn, &note) < 0
6895 && ! check_asm_operands (pat)))
6896 {
6897 *cc_use = old_cc_use;
6898 other_changed = 0;
6899
6900 op0 = simplify_gen_binary (XOR, GET_MODE (op0), op0,
6901 gen_int_mode (mask,
6902 GET_MODE (op0)));
6903 }
6904 }
6905 }
6906
6907 if (other_changed)
6908 undobuf.other_insn = other_insn;
6909
6910 /* Don't generate a compare of a CC with 0, just use that CC. */
6911 if (GET_MODE (op0) == compare_mode && op1 == const0_rtx)
6912 {
6913 SUBST (SET_SRC (x), op0);
6914 src = SET_SRC (x);
6915 }
6916 /* Otherwise, if we didn't previously have the same COMPARE we
6917 want, create it from scratch. */
6918 else if (GET_CODE (src) != COMPARE || GET_MODE (src) != compare_mode
6919 || XEXP (src, 0) != op0 || XEXP (src, 1) != op1)
6920 {
6921 SUBST (SET_SRC (x), gen_rtx_COMPARE (compare_mode, op0, op1));
6922 src = SET_SRC (x);
6923 }
6924 }
6925 else
6926 {
6927 /* Get SET_SRC in a form where we have placed back any
6928 compound expressions. Then do the checks below. */
6929 src = make_compound_operation (src, SET);
6930 SUBST (SET_SRC (x), src);
6931 }
6932
6933 /* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation,
6934 and X being a REG or (subreg (reg)), we may be able to convert this to
6935 (set (subreg:m2 x) (op)).
6936
6937 We can always do this if M1 is narrower than M2 because that means that
6938 we only care about the low bits of the result.
6939
6940 However, on machines without WORD_REGISTER_OPERATIONS defined, we cannot
6941 perform a narrower operation than requested since the high-order bits will
6942 be undefined. On machine where it is defined, this transformation is safe
6943 as long as M1 and M2 have the same number of words. */
6944
6945 if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
6946 && !OBJECT_P (SUBREG_REG (src))
6947 && (known_equal_after_align_up
6948 (GET_MODE_SIZE (GET_MODE (src)),
6949 GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))),
6950 UNITS_PER_WORD))
6951 && (WORD_REGISTER_OPERATIONS || !paradoxical_subreg_p (src))
6952 && ! (REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER
6953 && !REG_CAN_CHANGE_MODE_P (REGNO (dest),
6954 GET_MODE (SUBREG_REG (src)),
6955 GET_MODE (src)))
6956 && (REG_P (dest)
6957 || (GET_CODE (dest) == SUBREG
6958 && REG_P (SUBREG_REG (dest)))))
6959 {
6960 SUBST (SET_DEST (x),
6961 gen_lowpart (GET_MODE (SUBREG_REG (src)),
6962 dest));
6963 SUBST (SET_SRC (x), SUBREG_REG (src));
6964
6965 src = SET_SRC (x), dest = SET_DEST (x);
6966 }
6967
6968 /* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this
6969 would require a paradoxical subreg. Replace the subreg with a
6970 zero_extend to avoid the reload that would otherwise be required.
6971 Don't do this unless we have a scalar integer mode, otherwise the
6972 transformation is incorrect. */
6973
6974 enum rtx_code extend_op;
6975 if (paradoxical_subreg_p (src)
6976 && MEM_P (SUBREG_REG (src))
6977 && SCALAR_INT_MODE_P (GET_MODE (src))
6978 && (extend_op = load_extend_op (GET_MODE (SUBREG_REG (src)))) != UNKNOWN)
6979 {
6980 SUBST (SET_SRC (x),
6981 gen_rtx_fmt_e (extend_op, GET_MODE (src), SUBREG_REG (src)));
6982
6983 src = SET_SRC (x);
6984 }
6985
6986 /* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we
6987 are comparing an item known to be 0 or -1 against 0, use a logical
6988 operation instead. Check for one of the arms being an IOR of the other
6989 arm with some value. We compute three terms to be IOR'ed together. In
6990 practice, at most two will be nonzero. Then we do the IOR's. */
6991
6992 if (GET_CODE (dest) != PC
6993 && GET_CODE (src) == IF_THEN_ELSE
6994 && is_int_mode (GET_MODE (src), &int_mode)
6995 && (GET_CODE (XEXP (src, 0)) == EQ || GET_CODE (XEXP (src, 0)) == NE)
6996 && XEXP (XEXP (src, 0), 1) == const0_rtx
6997 && int_mode == GET_MODE (XEXP (XEXP (src, 0), 0))
6998 && (!HAVE_conditional_move
6999 || ! can_conditionally_move_p (int_mode))
7000 && (num_sign_bit_copies (XEXP (XEXP (src, 0), 0), int_mode)
7001 == GET_MODE_PRECISION (int_mode))
7002 && ! side_effects_p (src))
7003 {
7004 rtx true_rtx = (GET_CODE (XEXP (src, 0)) == NE
7005 ? XEXP (src, 1) : XEXP (src, 2));
7006 rtx false_rtx = (GET_CODE (XEXP (src, 0)) == NE
7007 ? XEXP (src, 2) : XEXP (src, 1));
7008 rtx term1 = const0_rtx, term2, term3;
7009
7010 if (GET_CODE (true_rtx) == IOR
7011 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
7012 term1 = false_rtx, true_rtx = XEXP (true_rtx, 1), false_rtx = const0_rtx;
7013 else if (GET_CODE (true_rtx) == IOR
7014 && rtx_equal_p (XEXP (true_rtx, 1), false_rtx))
7015 term1 = false_rtx, true_rtx = XEXP (true_rtx, 0), false_rtx = const0_rtx;
7016 else if (GET_CODE (false_rtx) == IOR
7017 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx))
7018 term1 = true_rtx, false_rtx = XEXP (false_rtx, 1), true_rtx = const0_rtx;
7019 else if (GET_CODE (false_rtx) == IOR
7020 && rtx_equal_p (XEXP (false_rtx, 1), true_rtx))
7021 term1 = true_rtx, false_rtx = XEXP (false_rtx, 0), true_rtx = const0_rtx;
7022
7023 term2 = simplify_gen_binary (AND, int_mode,
7024 XEXP (XEXP (src, 0), 0), true_rtx);
7025 term3 = simplify_gen_binary (AND, int_mode,
7026 simplify_gen_unary (NOT, int_mode,
7027 XEXP (XEXP (src, 0), 0),
7028 int_mode),
7029 false_rtx);
7030
7031 SUBST (SET_SRC (x),
7032 simplify_gen_binary (IOR, int_mode,
7033 simplify_gen_binary (IOR, int_mode,
7034 term1, term2),
7035 term3));
7036
7037 src = SET_SRC (x);
7038 }
7039
7040 /* If either SRC or DEST is a CLOBBER of (const_int 0), make this
7041 whole thing fail. */
7042 if (GET_CODE (src) == CLOBBER && XEXP (src, 0) == const0_rtx)
7043 return src;
7044 else if (GET_CODE (dest) == CLOBBER && XEXP (dest, 0) == const0_rtx)
7045 return dest;
7046 else
7047 /* Convert this into a field assignment operation, if possible. */
7048 return make_field_assignment (x);
7049 }
7050 \f
7051 /* Simplify, X, and AND, IOR, or XOR operation, and return the simplified
7052 result. */
7053
7054 static rtx
7055 simplify_logical (rtx x)
7056 {
7057 rtx op0 = XEXP (x, 0);
7058 rtx op1 = XEXP (x, 1);
7059 scalar_int_mode mode;
7060
7061 switch (GET_CODE (x))
7062 {
7063 case AND:
7064 /* We can call simplify_and_const_int only if we don't lose
7065 any (sign) bits when converting INTVAL (op1) to
7066 "unsigned HOST_WIDE_INT". */
7067 if (is_a <scalar_int_mode> (GET_MODE (x), &mode)
7068 && CONST_INT_P (op1)
7069 && (HWI_COMPUTABLE_MODE_P (mode)
7070 || INTVAL (op1) > 0))
7071 {
7072 x = simplify_and_const_int (x, mode, op0, INTVAL (op1));
7073 if (GET_CODE (x) != AND)
7074 return x;
7075
7076 op0 = XEXP (x, 0);
7077 op1 = XEXP (x, 1);
7078 }
7079
7080 /* If we have any of (and (ior A B) C) or (and (xor A B) C),
7081 apply the distributive law and then the inverse distributive
7082 law to see if things simplify. */
7083 if (GET_CODE (op0) == IOR || GET_CODE (op0) == XOR)
7084 {
7085 rtx result = distribute_and_simplify_rtx (x, 0);
7086 if (result)
7087 return result;
7088 }
7089 if (GET_CODE (op1) == IOR || GET_CODE (op1) == XOR)
7090 {
7091 rtx result = distribute_and_simplify_rtx (x, 1);
7092 if (result)
7093 return result;
7094 }
7095 break;
7096
7097 case IOR:
7098 /* If we have (ior (and A B) C), apply the distributive law and then
7099 the inverse distributive law to see if things simplify. */
7100
7101 if (GET_CODE (op0) == AND)
7102 {
7103 rtx result = distribute_and_simplify_rtx (x, 0);
7104 if (result)
7105 return result;
7106 }
7107
7108 if (GET_CODE (op1) == AND)
7109 {
7110 rtx result = distribute_and_simplify_rtx (x, 1);
7111 if (result)
7112 return result;
7113 }
7114 break;
7115
7116 default:
7117 gcc_unreachable ();
7118 }
7119
7120 return x;
7121 }
7122 \f
7123 /* We consider ZERO_EXTRACT, SIGN_EXTRACT, and SIGN_EXTEND as "compound
7124 operations" because they can be replaced with two more basic operations.
7125 ZERO_EXTEND is also considered "compound" because it can be replaced with
7126 an AND operation, which is simpler, though only one operation.
7127
7128 The function expand_compound_operation is called with an rtx expression
7129 and will convert it to the appropriate shifts and AND operations,
7130 simplifying at each stage.
7131
7132 The function make_compound_operation is called to convert an expression
7133 consisting of shifts and ANDs into the equivalent compound expression.
7134 It is the inverse of this function, loosely speaking. */
7135
7136 static rtx
7137 expand_compound_operation (rtx x)
7138 {
7139 unsigned HOST_WIDE_INT pos = 0, len;
7140 int unsignedp = 0;
7141 unsigned int modewidth;
7142 rtx tem;
7143 scalar_int_mode inner_mode;
7144
7145 switch (GET_CODE (x))
7146 {
7147 case ZERO_EXTEND:
7148 unsignedp = 1;
7149 /* FALLTHRU */
7150 case SIGN_EXTEND:
7151 /* We can't necessarily use a const_int for a multiword mode;
7152 it depends on implicitly extending the value.
7153 Since we don't know the right way to extend it,
7154 we can't tell whether the implicit way is right.
7155
7156 Even for a mode that is no wider than a const_int,
7157 we can't win, because we need to sign extend one of its bits through
7158 the rest of it, and we don't know which bit. */
7159 if (CONST_INT_P (XEXP (x, 0)))
7160 return x;
7161
7162 /* Reject modes that aren't scalar integers because turning vector
7163 or complex modes into shifts causes problems. */
7164 if (!is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode))
7165 return x;
7166
7167 /* Return if (subreg:MODE FROM 0) is not a safe replacement for
7168 (zero_extend:MODE FROM) or (sign_extend:MODE FROM). It is for any MEM
7169 because (SUBREG (MEM...)) is guaranteed to cause the MEM to be
7170 reloaded. If not for that, MEM's would very rarely be safe.
7171
7172 Reject modes bigger than a word, because we might not be able
7173 to reference a two-register group starting with an arbitrary register
7174 (and currently gen_lowpart might crash for a SUBREG). */
7175
7176 if (GET_MODE_SIZE (inner_mode) > UNITS_PER_WORD)
7177 return x;
7178
7179 len = GET_MODE_PRECISION (inner_mode);
7180 /* If the inner object has VOIDmode (the only way this can happen
7181 is if it is an ASM_OPERANDS), we can't do anything since we don't
7182 know how much masking to do. */
7183 if (len == 0)
7184 return x;
7185
7186 break;
7187
7188 case ZERO_EXTRACT:
7189 unsignedp = 1;
7190
7191 /* fall through */
7192
7193 case SIGN_EXTRACT:
7194 /* If the operand is a CLOBBER, just return it. */
7195 if (GET_CODE (XEXP (x, 0)) == CLOBBER)
7196 return XEXP (x, 0);
7197
7198 if (!CONST_INT_P (XEXP (x, 1))
7199 || !CONST_INT_P (XEXP (x, 2)))
7200 return x;
7201
7202 /* Reject modes that aren't scalar integers because turning vector
7203 or complex modes into shifts causes problems. */
7204 if (!is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode))
7205 return x;
7206
7207 len = INTVAL (XEXP (x, 1));
7208 pos = INTVAL (XEXP (x, 2));
7209
7210 /* This should stay within the object being extracted, fail otherwise. */
7211 if (len + pos > GET_MODE_PRECISION (inner_mode))
7212 return x;
7213
7214 if (BITS_BIG_ENDIAN)
7215 pos = GET_MODE_PRECISION (inner_mode) - len - pos;
7216
7217 break;
7218
7219 default:
7220 return x;
7221 }
7222
7223 /* We've rejected non-scalar operations by now. */
7224 scalar_int_mode mode = as_a <scalar_int_mode> (GET_MODE (x));
7225
7226 /* Convert sign extension to zero extension, if we know that the high
7227 bit is not set, as this is easier to optimize. It will be converted
7228 back to cheaper alternative in make_extraction. */
7229 if (GET_CODE (x) == SIGN_EXTEND
7230 && HWI_COMPUTABLE_MODE_P (mode)
7231 && ((nonzero_bits (XEXP (x, 0), inner_mode)
7232 & ~(((unsigned HOST_WIDE_INT) GET_MODE_MASK (inner_mode)) >> 1))
7233 == 0))
7234 {
7235 rtx temp = gen_rtx_ZERO_EXTEND (mode, XEXP (x, 0));
7236 rtx temp2 = expand_compound_operation (temp);
7237
7238 /* Make sure this is a profitable operation. */
7239 if (set_src_cost (x, mode, optimize_this_for_speed_p)
7240 > set_src_cost (temp2, mode, optimize_this_for_speed_p))
7241 return temp2;
7242 else if (set_src_cost (x, mode, optimize_this_for_speed_p)
7243 > set_src_cost (temp, mode, optimize_this_for_speed_p))
7244 return temp;
7245 else
7246 return x;
7247 }
7248
7249 /* We can optimize some special cases of ZERO_EXTEND. */
7250 if (GET_CODE (x) == ZERO_EXTEND)
7251 {
7252 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we
7253 know that the last value didn't have any inappropriate bits
7254 set. */
7255 if (GET_CODE (XEXP (x, 0)) == TRUNCATE
7256 && GET_MODE (XEXP (XEXP (x, 0), 0)) == mode
7257 && HWI_COMPUTABLE_MODE_P (mode)
7258 && (nonzero_bits (XEXP (XEXP (x, 0), 0), mode)
7259 & ~GET_MODE_MASK (inner_mode)) == 0)
7260 return XEXP (XEXP (x, 0), 0);
7261
7262 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7263 if (GET_CODE (XEXP (x, 0)) == SUBREG
7264 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == mode
7265 && subreg_lowpart_p (XEXP (x, 0))
7266 && HWI_COMPUTABLE_MODE_P (mode)
7267 && (nonzero_bits (SUBREG_REG (XEXP (x, 0)), mode)
7268 & ~GET_MODE_MASK (inner_mode)) == 0)
7269 return SUBREG_REG (XEXP (x, 0));
7270
7271 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo
7272 is a comparison and STORE_FLAG_VALUE permits. This is like
7273 the first case, but it works even when MODE is larger
7274 than HOST_WIDE_INT. */
7275 if (GET_CODE (XEXP (x, 0)) == TRUNCATE
7276 && GET_MODE (XEXP (XEXP (x, 0), 0)) == mode
7277 && COMPARISON_P (XEXP (XEXP (x, 0), 0))
7278 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
7279 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (inner_mode)) == 0)
7280 return XEXP (XEXP (x, 0), 0);
7281
7282 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7283 if (GET_CODE (XEXP (x, 0)) == SUBREG
7284 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == mode
7285 && subreg_lowpart_p (XEXP (x, 0))
7286 && COMPARISON_P (SUBREG_REG (XEXP (x, 0)))
7287 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
7288 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (inner_mode)) == 0)
7289 return SUBREG_REG (XEXP (x, 0));
7290
7291 }
7292
7293 /* If we reach here, we want to return a pair of shifts. The inner
7294 shift is a left shift of BITSIZE - POS - LEN bits. The outer
7295 shift is a right shift of BITSIZE - LEN bits. It is arithmetic or
7296 logical depending on the value of UNSIGNEDP.
7297
7298 If this was a ZERO_EXTEND or ZERO_EXTRACT, this pair of shifts will be
7299 converted into an AND of a shift.
7300
7301 We must check for the case where the left shift would have a negative
7302 count. This can happen in a case like (x >> 31) & 255 on machines
7303 that can't shift by a constant. On those machines, we would first
7304 combine the shift with the AND to produce a variable-position
7305 extraction. Then the constant of 31 would be substituted in
7306 to produce such a position. */
7307
7308 modewidth = GET_MODE_PRECISION (mode);
7309 if (modewidth >= pos + len)
7310 {
7311 tem = gen_lowpart (mode, XEXP (x, 0));
7312 if (!tem || GET_CODE (tem) == CLOBBER)
7313 return x;
7314 tem = simplify_shift_const (NULL_RTX, ASHIFT, mode,
7315 tem, modewidth - pos - len);
7316 tem = simplify_shift_const (NULL_RTX, unsignedp ? LSHIFTRT : ASHIFTRT,
7317 mode, tem, modewidth - len);
7318 }
7319 else if (unsignedp && len < HOST_BITS_PER_WIDE_INT)
7320 {
7321 tem = simplify_shift_const (NULL_RTX, LSHIFTRT, inner_mode,
7322 XEXP (x, 0), pos);
7323 tem = gen_lowpart (mode, tem);
7324 if (!tem || GET_CODE (tem) == CLOBBER)
7325 return x;
7326 tem = simplify_and_const_int (NULL_RTX, mode, tem,
7327 (HOST_WIDE_INT_1U << len) - 1);
7328 }
7329 else
7330 /* Any other cases we can't handle. */
7331 return x;
7332
7333 /* If we couldn't do this for some reason, return the original
7334 expression. */
7335 if (GET_CODE (tem) == CLOBBER)
7336 return x;
7337
7338 return tem;
7339 }
7340 \f
7341 /* X is a SET which contains an assignment of one object into
7342 a part of another (such as a bit-field assignment, STRICT_LOW_PART,
7343 or certain SUBREGS). If possible, convert it into a series of
7344 logical operations.
7345
7346 We half-heartedly support variable positions, but do not at all
7347 support variable lengths. */
7348
7349 static const_rtx
7350 expand_field_assignment (const_rtx x)
7351 {
7352 rtx inner;
7353 rtx pos; /* Always counts from low bit. */
7354 int len, inner_len;
7355 rtx mask, cleared, masked;
7356 scalar_int_mode compute_mode;
7357
7358 /* Loop until we find something we can't simplify. */
7359 while (1)
7360 {
7361 if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART
7362 && GET_CODE (XEXP (SET_DEST (x), 0)) == SUBREG)
7363 {
7364 rtx x0 = XEXP (SET_DEST (x), 0);
7365 if (!GET_MODE_PRECISION (GET_MODE (x0)).is_constant (&len))
7366 break;
7367 inner = SUBREG_REG (XEXP (SET_DEST (x), 0));
7368 pos = gen_int_mode (subreg_lsb (XEXP (SET_DEST (x), 0)),
7369 MAX_MODE_INT);
7370 }
7371 else if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
7372 && CONST_INT_P (XEXP (SET_DEST (x), 1)))
7373 {
7374 inner = XEXP (SET_DEST (x), 0);
7375 if (!GET_MODE_PRECISION (GET_MODE (inner)).is_constant (&inner_len))
7376 break;
7377
7378 len = INTVAL (XEXP (SET_DEST (x), 1));
7379 pos = XEXP (SET_DEST (x), 2);
7380
7381 /* A constant position should stay within the width of INNER. */
7382 if (CONST_INT_P (pos) && INTVAL (pos) + len > inner_len)
7383 break;
7384
7385 if (BITS_BIG_ENDIAN)
7386 {
7387 if (CONST_INT_P (pos))
7388 pos = GEN_INT (inner_len - len - INTVAL (pos));
7389 else if (GET_CODE (pos) == MINUS
7390 && CONST_INT_P (XEXP (pos, 1))
7391 && INTVAL (XEXP (pos, 1)) == inner_len - len)
7392 /* If position is ADJUST - X, new position is X. */
7393 pos = XEXP (pos, 0);
7394 else
7395 pos = simplify_gen_binary (MINUS, GET_MODE (pos),
7396 gen_int_mode (inner_len - len,
7397 GET_MODE (pos)),
7398 pos);
7399 }
7400 }
7401
7402 /* If the destination is a subreg that overwrites the whole of the inner
7403 register, we can move the subreg to the source. */
7404 else if (GET_CODE (SET_DEST (x)) == SUBREG
7405 /* We need SUBREGs to compute nonzero_bits properly. */
7406 && nonzero_sign_valid
7407 && !read_modify_subreg_p (SET_DEST (x)))
7408 {
7409 x = gen_rtx_SET (SUBREG_REG (SET_DEST (x)),
7410 gen_lowpart
7411 (GET_MODE (SUBREG_REG (SET_DEST (x))),
7412 SET_SRC (x)));
7413 continue;
7414 }
7415 else
7416 break;
7417
7418 while (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
7419 inner = SUBREG_REG (inner);
7420
7421 /* Don't attempt bitwise arithmetic on non scalar integer modes. */
7422 if (!is_a <scalar_int_mode> (GET_MODE (inner), &compute_mode))
7423 {
7424 /* Don't do anything for vector or complex integral types. */
7425 if (! FLOAT_MODE_P (GET_MODE (inner)))
7426 break;
7427
7428 /* Try to find an integral mode to pun with. */
7429 if (!int_mode_for_size (GET_MODE_BITSIZE (GET_MODE (inner)), 0)
7430 .exists (&compute_mode))
7431 break;
7432
7433 inner = gen_lowpart (compute_mode, inner);
7434 }
7435
7436 /* Compute a mask of LEN bits, if we can do this on the host machine. */
7437 if (len >= HOST_BITS_PER_WIDE_INT)
7438 break;
7439
7440 /* Don't try to compute in too wide unsupported modes. */
7441 if (!targetm.scalar_mode_supported_p (compute_mode))
7442 break;
7443
7444 /* Now compute the equivalent expression. Make a copy of INNER
7445 for the SET_DEST in case it is a MEM into which we will substitute;
7446 we don't want shared RTL in that case. */
7447 mask = gen_int_mode ((HOST_WIDE_INT_1U << len) - 1,
7448 compute_mode);
7449 cleared = simplify_gen_binary (AND, compute_mode,
7450 simplify_gen_unary (NOT, compute_mode,
7451 simplify_gen_binary (ASHIFT,
7452 compute_mode,
7453 mask, pos),
7454 compute_mode),
7455 inner);
7456 masked = simplify_gen_binary (ASHIFT, compute_mode,
7457 simplify_gen_binary (
7458 AND, compute_mode,
7459 gen_lowpart (compute_mode, SET_SRC (x)),
7460 mask),
7461 pos);
7462
7463 x = gen_rtx_SET (copy_rtx (inner),
7464 simplify_gen_binary (IOR, compute_mode,
7465 cleared, masked));
7466 }
7467
7468 return x;
7469 }
7470 \f
7471 /* Return an RTX for a reference to LEN bits of INNER. If POS_RTX is nonzero,
7472 it is an RTX that represents the (variable) starting position; otherwise,
7473 POS is the (constant) starting bit position. Both are counted from the LSB.
7474
7475 UNSIGNEDP is nonzero for an unsigned reference and zero for a signed one.
7476
7477 IN_DEST is nonzero if this is a reference in the destination of a SET.
7478 This is used when a ZERO_ or SIGN_EXTRACT isn't needed. If nonzero,
7479 a STRICT_LOW_PART will be used, if zero, ZERO_EXTEND or SIGN_EXTEND will
7480 be used.
7481
7482 IN_COMPARE is nonzero if we are in a COMPARE. This means that a
7483 ZERO_EXTRACT should be built even for bits starting at bit 0.
7484
7485 MODE is the desired mode of the result (if IN_DEST == 0).
7486
7487 The result is an RTX for the extraction or NULL_RTX if the target
7488 can't handle it. */
7489
7490 static rtx
7491 make_extraction (machine_mode mode, rtx inner, HOST_WIDE_INT pos,
7492 rtx pos_rtx, unsigned HOST_WIDE_INT len, int unsignedp,
7493 int in_dest, int in_compare)
7494 {
7495 /* This mode describes the size of the storage area
7496 to fetch the overall value from. Within that, we
7497 ignore the POS lowest bits, etc. */
7498 machine_mode is_mode = GET_MODE (inner);
7499 machine_mode inner_mode;
7500 scalar_int_mode wanted_inner_mode;
7501 scalar_int_mode wanted_inner_reg_mode = word_mode;
7502 scalar_int_mode pos_mode = word_mode;
7503 machine_mode extraction_mode = word_mode;
7504 rtx new_rtx = 0;
7505 rtx orig_pos_rtx = pos_rtx;
7506 HOST_WIDE_INT orig_pos;
7507
7508 if (pos_rtx && CONST_INT_P (pos_rtx))
7509 pos = INTVAL (pos_rtx), pos_rtx = 0;
7510
7511 if (GET_CODE (inner) == SUBREG
7512 && subreg_lowpart_p (inner)
7513 && (paradoxical_subreg_p (inner)
7514 /* If trying or potentionally trying to extract
7515 bits outside of is_mode, don't look through
7516 non-paradoxical SUBREGs. See PR82192. */
7517 || (pos_rtx == NULL_RTX
7518 && known_le (pos + len, GET_MODE_PRECISION (is_mode)))))
7519 {
7520 /* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...),
7521 consider just the QI as the memory to extract from.
7522 The subreg adds or removes high bits; its mode is
7523 irrelevant to the meaning of this extraction,
7524 since POS and LEN count from the lsb. */
7525 if (MEM_P (SUBREG_REG (inner)))
7526 is_mode = GET_MODE (SUBREG_REG (inner));
7527 inner = SUBREG_REG (inner);
7528 }
7529 else if (GET_CODE (inner) == ASHIFT
7530 && CONST_INT_P (XEXP (inner, 1))
7531 && pos_rtx == 0 && pos == 0
7532 && len > UINTVAL (XEXP (inner, 1)))
7533 {
7534 /* We're extracting the least significant bits of an rtx
7535 (ashift X (const_int C)), where LEN > C. Extract the
7536 least significant (LEN - C) bits of X, giving an rtx
7537 whose mode is MODE, then shift it left C times. */
7538 new_rtx = make_extraction (mode, XEXP (inner, 0),
7539 0, 0, len - INTVAL (XEXP (inner, 1)),
7540 unsignedp, in_dest, in_compare);
7541 if (new_rtx != 0)
7542 return gen_rtx_ASHIFT (mode, new_rtx, XEXP (inner, 1));
7543 }
7544 else if (GET_CODE (inner) == MULT
7545 && CONST_INT_P (XEXP (inner, 1))
7546 && pos_rtx == 0 && pos == 0)
7547 {
7548 /* We're extracting the least significant bits of an rtx
7549 (mult X (const_int 2^C)), where LEN > C. Extract the
7550 least significant (LEN - C) bits of X, giving an rtx
7551 whose mode is MODE, then multiply it by 2^C. */
7552 const HOST_WIDE_INT shift_amt = exact_log2 (INTVAL (XEXP (inner, 1)));
7553 if (IN_RANGE (shift_amt, 1, len - 1))
7554 {
7555 new_rtx = make_extraction (mode, XEXP (inner, 0),
7556 0, 0, len - shift_amt,
7557 unsignedp, in_dest, in_compare);
7558 if (new_rtx)
7559 return gen_rtx_MULT (mode, new_rtx, XEXP (inner, 1));
7560 }
7561 }
7562 else if (GET_CODE (inner) == TRUNCATE
7563 /* If trying or potentionally trying to extract
7564 bits outside of is_mode, don't look through
7565 TRUNCATE. See PR82192. */
7566 && pos_rtx == NULL_RTX
7567 && known_le (pos + len, GET_MODE_PRECISION (is_mode)))
7568 inner = XEXP (inner, 0);
7569
7570 inner_mode = GET_MODE (inner);
7571
7572 /* See if this can be done without an extraction. We never can if the
7573 width of the field is not the same as that of some integer mode. For
7574 registers, we can only avoid the extraction if the position is at the
7575 low-order bit and this is either not in the destination or we have the
7576 appropriate STRICT_LOW_PART operation available.
7577
7578 For MEM, we can avoid an extract if the field starts on an appropriate
7579 boundary and we can change the mode of the memory reference. */
7580
7581 scalar_int_mode tmode;
7582 if (int_mode_for_size (len, 1).exists (&tmode)
7583 && ((pos_rtx == 0 && (pos % BITS_PER_WORD) == 0
7584 && !MEM_P (inner)
7585 && (pos == 0 || REG_P (inner))
7586 && (inner_mode == tmode
7587 || !REG_P (inner)
7588 || TRULY_NOOP_TRUNCATION_MODES_P (tmode, inner_mode)
7589 || reg_truncated_to_mode (tmode, inner))
7590 && (! in_dest
7591 || (REG_P (inner)
7592 && have_insn_for (STRICT_LOW_PART, tmode))))
7593 || (MEM_P (inner) && pos_rtx == 0
7594 && (pos
7595 % (STRICT_ALIGNMENT ? GET_MODE_ALIGNMENT (tmode)
7596 : BITS_PER_UNIT)) == 0
7597 /* We can't do this if we are widening INNER_MODE (it
7598 may not be aligned, for one thing). */
7599 && !paradoxical_subreg_p (tmode, inner_mode)
7600 && known_le (pos + len, GET_MODE_PRECISION (is_mode))
7601 && (inner_mode == tmode
7602 || (! mode_dependent_address_p (XEXP (inner, 0),
7603 MEM_ADDR_SPACE (inner))
7604 && ! MEM_VOLATILE_P (inner))))))
7605 {
7606 /* If INNER is a MEM, make a new MEM that encompasses just the desired
7607 field. If the original and current mode are the same, we need not
7608 adjust the offset. Otherwise, we do if bytes big endian.
7609
7610 If INNER is not a MEM, get a piece consisting of just the field
7611 of interest (in this case POS % BITS_PER_WORD must be 0). */
7612
7613 if (MEM_P (inner))
7614 {
7615 poly_int64 offset;
7616
7617 /* POS counts from lsb, but make OFFSET count in memory order. */
7618 if (BYTES_BIG_ENDIAN)
7619 offset = bits_to_bytes_round_down (GET_MODE_PRECISION (is_mode)
7620 - len - pos);
7621 else
7622 offset = pos / BITS_PER_UNIT;
7623
7624 new_rtx = adjust_address_nv (inner, tmode, offset);
7625 }
7626 else if (REG_P (inner))
7627 {
7628 if (tmode != inner_mode)
7629 {
7630 /* We can't call gen_lowpart in a DEST since we
7631 always want a SUBREG (see below) and it would sometimes
7632 return a new hard register. */
7633 if (pos || in_dest)
7634 {
7635 poly_uint64 offset
7636 = subreg_offset_from_lsb (tmode, inner_mode, pos);
7637
7638 /* Avoid creating invalid subregs, for example when
7639 simplifying (x>>32)&255. */
7640 if (!validate_subreg (tmode, inner_mode, inner, offset))
7641 return NULL_RTX;
7642
7643 new_rtx = gen_rtx_SUBREG (tmode, inner, offset);
7644 }
7645 else
7646 new_rtx = gen_lowpart (tmode, inner);
7647 }
7648 else
7649 new_rtx = inner;
7650 }
7651 else
7652 new_rtx = force_to_mode (inner, tmode,
7653 len >= HOST_BITS_PER_WIDE_INT
7654 ? HOST_WIDE_INT_M1U
7655 : (HOST_WIDE_INT_1U << len) - 1, 0);
7656
7657 /* If this extraction is going into the destination of a SET,
7658 make a STRICT_LOW_PART unless we made a MEM. */
7659
7660 if (in_dest)
7661 return (MEM_P (new_rtx) ? new_rtx
7662 : (GET_CODE (new_rtx) != SUBREG
7663 ? gen_rtx_CLOBBER (tmode, const0_rtx)
7664 : gen_rtx_STRICT_LOW_PART (VOIDmode, new_rtx)));
7665
7666 if (mode == tmode)
7667 return new_rtx;
7668
7669 if (CONST_SCALAR_INT_P (new_rtx))
7670 return simplify_unary_operation (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7671 mode, new_rtx, tmode);
7672
7673 /* If we know that no extraneous bits are set, and that the high
7674 bit is not set, convert the extraction to the cheaper of
7675 sign and zero extension, that are equivalent in these cases. */
7676 if (flag_expensive_optimizations
7677 && (HWI_COMPUTABLE_MODE_P (tmode)
7678 && ((nonzero_bits (new_rtx, tmode)
7679 & ~(((unsigned HOST_WIDE_INT)GET_MODE_MASK (tmode)) >> 1))
7680 == 0)))
7681 {
7682 rtx temp = gen_rtx_ZERO_EXTEND (mode, new_rtx);
7683 rtx temp1 = gen_rtx_SIGN_EXTEND (mode, new_rtx);
7684
7685 /* Prefer ZERO_EXTENSION, since it gives more information to
7686 backends. */
7687 if (set_src_cost (temp, mode, optimize_this_for_speed_p)
7688 <= set_src_cost (temp1, mode, optimize_this_for_speed_p))
7689 return temp;
7690 return temp1;
7691 }
7692
7693 /* Otherwise, sign- or zero-extend unless we already are in the
7694 proper mode. */
7695
7696 return (gen_rtx_fmt_e (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7697 mode, new_rtx));
7698 }
7699
7700 /* Unless this is a COMPARE or we have a funny memory reference,
7701 don't do anything with zero-extending field extracts starting at
7702 the low-order bit since they are simple AND operations. */
7703 if (pos_rtx == 0 && pos == 0 && ! in_dest
7704 && ! in_compare && unsignedp)
7705 return 0;
7706
7707 /* Unless INNER is not MEM, reject this if we would be spanning bytes or
7708 if the position is not a constant and the length is not 1. In all
7709 other cases, we would only be going outside our object in cases when
7710 an original shift would have been undefined. */
7711 if (MEM_P (inner)
7712 && ((pos_rtx == 0 && maybe_gt (pos + len, GET_MODE_PRECISION (is_mode)))
7713 || (pos_rtx != 0 && len != 1)))
7714 return 0;
7715
7716 enum extraction_pattern pattern = (in_dest ? EP_insv
7717 : unsignedp ? EP_extzv : EP_extv);
7718
7719 /* If INNER is not from memory, we want it to have the mode of a register
7720 extraction pattern's structure operand, or word_mode if there is no
7721 such pattern. The same applies to extraction_mode and pos_mode
7722 and their respective operands.
7723
7724 For memory, assume that the desired extraction_mode and pos_mode
7725 are the same as for a register operation, since at present we don't
7726 have named patterns for aligned memory structures. */
7727 class extraction_insn insn;
7728 unsigned int inner_size;
7729 if (GET_MODE_BITSIZE (inner_mode).is_constant (&inner_size)
7730 && get_best_reg_extraction_insn (&insn, pattern, inner_size, mode))
7731 {
7732 wanted_inner_reg_mode = insn.struct_mode.require ();
7733 pos_mode = insn.pos_mode;
7734 extraction_mode = insn.field_mode;
7735 }
7736
7737 /* Never narrow an object, since that might not be safe. */
7738
7739 if (mode != VOIDmode
7740 && partial_subreg_p (extraction_mode, mode))
7741 extraction_mode = mode;
7742
7743 /* Punt if len is too large for extraction_mode. */
7744 if (maybe_gt (len, GET_MODE_PRECISION (extraction_mode)))
7745 return NULL_RTX;
7746
7747 if (!MEM_P (inner))
7748 wanted_inner_mode = wanted_inner_reg_mode;
7749 else
7750 {
7751 /* Be careful not to go beyond the extracted object and maintain the
7752 natural alignment of the memory. */
7753 wanted_inner_mode = smallest_int_mode_for_size (len);
7754 while (pos % GET_MODE_BITSIZE (wanted_inner_mode) + len
7755 > GET_MODE_BITSIZE (wanted_inner_mode))
7756 wanted_inner_mode = GET_MODE_WIDER_MODE (wanted_inner_mode).require ();
7757 }
7758
7759 orig_pos = pos;
7760
7761 if (BITS_BIG_ENDIAN)
7762 {
7763 /* POS is passed as if BITS_BIG_ENDIAN == 0, so we need to convert it to
7764 BITS_BIG_ENDIAN style. If position is constant, compute new
7765 position. Otherwise, build subtraction.
7766 Note that POS is relative to the mode of the original argument.
7767 If it's a MEM we need to recompute POS relative to that.
7768 However, if we're extracting from (or inserting into) a register,
7769 we want to recompute POS relative to wanted_inner_mode. */
7770 int width;
7771 if (!MEM_P (inner))
7772 width = GET_MODE_BITSIZE (wanted_inner_mode);
7773 else if (!GET_MODE_BITSIZE (is_mode).is_constant (&width))
7774 return NULL_RTX;
7775
7776 if (pos_rtx == 0)
7777 pos = width - len - pos;
7778 else
7779 pos_rtx
7780 = gen_rtx_MINUS (GET_MODE (pos_rtx),
7781 gen_int_mode (width - len, GET_MODE (pos_rtx)),
7782 pos_rtx);
7783 /* POS may be less than 0 now, but we check for that below.
7784 Note that it can only be less than 0 if !MEM_P (inner). */
7785 }
7786
7787 /* If INNER has a wider mode, and this is a constant extraction, try to
7788 make it smaller and adjust the byte to point to the byte containing
7789 the value. */
7790 if (wanted_inner_mode != VOIDmode
7791 && inner_mode != wanted_inner_mode
7792 && ! pos_rtx
7793 && partial_subreg_p (wanted_inner_mode, is_mode)
7794 && MEM_P (inner)
7795 && ! mode_dependent_address_p (XEXP (inner, 0), MEM_ADDR_SPACE (inner))
7796 && ! MEM_VOLATILE_P (inner))
7797 {
7798 poly_int64 offset = 0;
7799
7800 /* The computations below will be correct if the machine is big
7801 endian in both bits and bytes or little endian in bits and bytes.
7802 If it is mixed, we must adjust. */
7803
7804 /* If bytes are big endian and we had a paradoxical SUBREG, we must
7805 adjust OFFSET to compensate. */
7806 if (BYTES_BIG_ENDIAN
7807 && paradoxical_subreg_p (is_mode, inner_mode))
7808 offset -= GET_MODE_SIZE (is_mode) - GET_MODE_SIZE (inner_mode);
7809
7810 /* We can now move to the desired byte. */
7811 offset += (pos / GET_MODE_BITSIZE (wanted_inner_mode))
7812 * GET_MODE_SIZE (wanted_inner_mode);
7813 pos %= GET_MODE_BITSIZE (wanted_inner_mode);
7814
7815 if (BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN
7816 && is_mode != wanted_inner_mode)
7817 offset = (GET_MODE_SIZE (is_mode)
7818 - GET_MODE_SIZE (wanted_inner_mode) - offset);
7819
7820 inner = adjust_address_nv (inner, wanted_inner_mode, offset);
7821 }
7822
7823 /* If INNER is not memory, get it into the proper mode. If we are changing
7824 its mode, POS must be a constant and smaller than the size of the new
7825 mode. */
7826 else if (!MEM_P (inner))
7827 {
7828 /* On the LHS, don't create paradoxical subregs implicitely truncating
7829 the register unless TARGET_TRULY_NOOP_TRUNCATION. */
7830 if (in_dest
7831 && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (inner),
7832 wanted_inner_mode))
7833 return NULL_RTX;
7834
7835 if (GET_MODE (inner) != wanted_inner_mode
7836 && (pos_rtx != 0
7837 || orig_pos + len > GET_MODE_BITSIZE (wanted_inner_mode)))
7838 return NULL_RTX;
7839
7840 if (orig_pos < 0)
7841 return NULL_RTX;
7842
7843 inner = force_to_mode (inner, wanted_inner_mode,
7844 pos_rtx
7845 || len + orig_pos >= HOST_BITS_PER_WIDE_INT
7846 ? HOST_WIDE_INT_M1U
7847 : (((HOST_WIDE_INT_1U << len) - 1)
7848 << orig_pos),
7849 0);
7850 }
7851
7852 /* Adjust mode of POS_RTX, if needed. If we want a wider mode, we
7853 have to zero extend. Otherwise, we can just use a SUBREG.
7854
7855 We dealt with constant rtxes earlier, so pos_rtx cannot
7856 have VOIDmode at this point. */
7857 if (pos_rtx != 0
7858 && (GET_MODE_SIZE (pos_mode)
7859 > GET_MODE_SIZE (as_a <scalar_int_mode> (GET_MODE (pos_rtx)))))
7860 {
7861 rtx temp = simplify_gen_unary (ZERO_EXTEND, pos_mode, pos_rtx,
7862 GET_MODE (pos_rtx));
7863
7864 /* If we know that no extraneous bits are set, and that the high
7865 bit is not set, convert extraction to cheaper one - either
7866 SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these
7867 cases. */
7868 if (flag_expensive_optimizations
7869 && (HWI_COMPUTABLE_MODE_P (GET_MODE (pos_rtx))
7870 && ((nonzero_bits (pos_rtx, GET_MODE (pos_rtx))
7871 & ~(((unsigned HOST_WIDE_INT)
7872 GET_MODE_MASK (GET_MODE (pos_rtx)))
7873 >> 1))
7874 == 0)))
7875 {
7876 rtx temp1 = simplify_gen_unary (SIGN_EXTEND, pos_mode, pos_rtx,
7877 GET_MODE (pos_rtx));
7878
7879 /* Prefer ZERO_EXTENSION, since it gives more information to
7880 backends. */
7881 if (set_src_cost (temp1, pos_mode, optimize_this_for_speed_p)
7882 < set_src_cost (temp, pos_mode, optimize_this_for_speed_p))
7883 temp = temp1;
7884 }
7885 pos_rtx = temp;
7886 }
7887
7888 /* Make POS_RTX unless we already have it and it is correct. If we don't
7889 have a POS_RTX but we do have an ORIG_POS_RTX, the latter must
7890 be a CONST_INT. */
7891 if (pos_rtx == 0 && orig_pos_rtx != 0 && INTVAL (orig_pos_rtx) == pos)
7892 pos_rtx = orig_pos_rtx;
7893
7894 else if (pos_rtx == 0)
7895 pos_rtx = GEN_INT (pos);
7896
7897 /* Make the required operation. See if we can use existing rtx. */
7898 new_rtx = gen_rtx_fmt_eee (unsignedp ? ZERO_EXTRACT : SIGN_EXTRACT,
7899 extraction_mode, inner, GEN_INT (len), pos_rtx);
7900 if (! in_dest)
7901 new_rtx = gen_lowpart (mode, new_rtx);
7902
7903 return new_rtx;
7904 }
7905 \f
7906 /* See if X (of mode MODE) contains an ASHIFT of COUNT or more bits that
7907 can be commuted with any other operations in X. Return X without
7908 that shift if so. */
7909
7910 static rtx
7911 extract_left_shift (scalar_int_mode mode, rtx x, int count)
7912 {
7913 enum rtx_code code = GET_CODE (x);
7914 rtx tem;
7915
7916 switch (code)
7917 {
7918 case ASHIFT:
7919 /* This is the shift itself. If it is wide enough, we will return
7920 either the value being shifted if the shift count is equal to
7921 COUNT or a shift for the difference. */
7922 if (CONST_INT_P (XEXP (x, 1))
7923 && INTVAL (XEXP (x, 1)) >= count)
7924 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (x, 0),
7925 INTVAL (XEXP (x, 1)) - count);
7926 break;
7927
7928 case NEG: case NOT:
7929 if ((tem = extract_left_shift (mode, XEXP (x, 0), count)) != 0)
7930 return simplify_gen_unary (code, mode, tem, mode);
7931
7932 break;
7933
7934 case PLUS: case IOR: case XOR: case AND:
7935 /* If we can safely shift this constant and we find the inner shift,
7936 make a new operation. */
7937 if (CONST_INT_P (XEXP (x, 1))
7938 && (UINTVAL (XEXP (x, 1))
7939 & (((HOST_WIDE_INT_1U << count)) - 1)) == 0
7940 && (tem = extract_left_shift (mode, XEXP (x, 0), count)) != 0)
7941 {
7942 HOST_WIDE_INT val = INTVAL (XEXP (x, 1)) >> count;
7943 return simplify_gen_binary (code, mode, tem,
7944 gen_int_mode (val, mode));
7945 }
7946 break;
7947
7948 default:
7949 break;
7950 }
7951
7952 return 0;
7953 }
7954 \f
7955 /* Subroutine of make_compound_operation. *X_PTR is the rtx at the current
7956 level of the expression and MODE is its mode. IN_CODE is as for
7957 make_compound_operation. *NEXT_CODE_PTR is the value of IN_CODE
7958 that should be used when recursing on operands of *X_PTR.
7959
7960 There are two possible actions:
7961
7962 - Return null. This tells the caller to recurse on *X_PTR with IN_CODE
7963 equal to *NEXT_CODE_PTR, after which *X_PTR holds the final value.
7964
7965 - Return a new rtx, which the caller returns directly. */
7966
7967 static rtx
7968 make_compound_operation_int (scalar_int_mode mode, rtx *x_ptr,
7969 enum rtx_code in_code,
7970 enum rtx_code *next_code_ptr)
7971 {
7972 rtx x = *x_ptr;
7973 enum rtx_code next_code = *next_code_ptr;
7974 enum rtx_code code = GET_CODE (x);
7975 int mode_width = GET_MODE_PRECISION (mode);
7976 rtx rhs, lhs;
7977 rtx new_rtx = 0;
7978 int i;
7979 rtx tem;
7980 scalar_int_mode inner_mode;
7981 bool equality_comparison = false;
7982
7983 if (in_code == EQ)
7984 {
7985 equality_comparison = true;
7986 in_code = COMPARE;
7987 }
7988
7989 /* Process depending on the code of this operation. If NEW is set
7990 nonzero, it will be returned. */
7991
7992 switch (code)
7993 {
7994 case ASHIFT:
7995 /* Convert shifts by constants into multiplications if inside
7996 an address. */
7997 if (in_code == MEM && CONST_INT_P (XEXP (x, 1))
7998 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
7999 && INTVAL (XEXP (x, 1)) >= 0)
8000 {
8001 HOST_WIDE_INT count = INTVAL (XEXP (x, 1));
8002 HOST_WIDE_INT multval = HOST_WIDE_INT_1 << count;
8003
8004 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8005 if (GET_CODE (new_rtx) == NEG)
8006 {
8007 new_rtx = XEXP (new_rtx, 0);
8008 multval = -multval;
8009 }
8010 multval = trunc_int_for_mode (multval, mode);
8011 new_rtx = gen_rtx_MULT (mode, new_rtx, gen_int_mode (multval, mode));
8012 }
8013 break;
8014
8015 case PLUS:
8016 lhs = XEXP (x, 0);
8017 rhs = XEXP (x, 1);
8018 lhs = make_compound_operation (lhs, next_code);
8019 rhs = make_compound_operation (rhs, next_code);
8020 if (GET_CODE (lhs) == MULT && GET_CODE (XEXP (lhs, 0)) == NEG)
8021 {
8022 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (lhs, 0), 0),
8023 XEXP (lhs, 1));
8024 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
8025 }
8026 else if (GET_CODE (lhs) == MULT
8027 && (CONST_INT_P (XEXP (lhs, 1)) && INTVAL (XEXP (lhs, 1)) < 0))
8028 {
8029 tem = simplify_gen_binary (MULT, mode, XEXP (lhs, 0),
8030 simplify_gen_unary (NEG, mode,
8031 XEXP (lhs, 1),
8032 mode));
8033 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
8034 }
8035 else
8036 {
8037 SUBST (XEXP (x, 0), lhs);
8038 SUBST (XEXP (x, 1), rhs);
8039 }
8040 maybe_swap_commutative_operands (x);
8041 return x;
8042
8043 case MINUS:
8044 lhs = XEXP (x, 0);
8045 rhs = XEXP (x, 1);
8046 lhs = make_compound_operation (lhs, next_code);
8047 rhs = make_compound_operation (rhs, next_code);
8048 if (GET_CODE (rhs) == MULT && GET_CODE (XEXP (rhs, 0)) == NEG)
8049 {
8050 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (rhs, 0), 0),
8051 XEXP (rhs, 1));
8052 return simplify_gen_binary (PLUS, mode, tem, lhs);
8053 }
8054 else if (GET_CODE (rhs) == MULT
8055 && (CONST_INT_P (XEXP (rhs, 1)) && INTVAL (XEXP (rhs, 1)) < 0))
8056 {
8057 tem = simplify_gen_binary (MULT, mode, XEXP (rhs, 0),
8058 simplify_gen_unary (NEG, mode,
8059 XEXP (rhs, 1),
8060 mode));
8061 return simplify_gen_binary (PLUS, mode, tem, lhs);
8062 }
8063 else
8064 {
8065 SUBST (XEXP (x, 0), lhs);
8066 SUBST (XEXP (x, 1), rhs);
8067 return x;
8068 }
8069
8070 case AND:
8071 /* If the second operand is not a constant, we can't do anything
8072 with it. */
8073 if (!CONST_INT_P (XEXP (x, 1)))
8074 break;
8075
8076 /* If the constant is a power of two minus one and the first operand
8077 is a logical right shift, make an extraction. */
8078 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8079 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8080 {
8081 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
8082 new_rtx = make_extraction (mode, new_rtx, 0, XEXP (XEXP (x, 0), 1),
8083 i, 1, 0, in_code == COMPARE);
8084 }
8085
8086 /* Same as previous, but for (subreg (lshiftrt ...)) in first op. */
8087 else if (GET_CODE (XEXP (x, 0)) == SUBREG
8088 && subreg_lowpart_p (XEXP (x, 0))
8089 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (XEXP (x, 0))),
8090 &inner_mode)
8091 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == LSHIFTRT
8092 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8093 {
8094 rtx inner_x0 = SUBREG_REG (XEXP (x, 0));
8095 new_rtx = make_compound_operation (XEXP (inner_x0, 0), next_code);
8096 new_rtx = make_extraction (inner_mode, new_rtx, 0,
8097 XEXP (inner_x0, 1),
8098 i, 1, 0, in_code == COMPARE);
8099
8100 /* If we narrowed the mode when dropping the subreg, then we lose. */
8101 if (GET_MODE_SIZE (inner_mode) < GET_MODE_SIZE (mode))
8102 new_rtx = NULL;
8103
8104 /* If that didn't give anything, see if the AND simplifies on
8105 its own. */
8106 if (!new_rtx && i >= 0)
8107 {
8108 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8109 new_rtx = make_extraction (mode, new_rtx, 0, NULL_RTX, i, 1,
8110 0, in_code == COMPARE);
8111 }
8112 }
8113 /* Same as previous, but for (xor/ior (lshiftrt...) (lshiftrt...)). */
8114 else if ((GET_CODE (XEXP (x, 0)) == XOR
8115 || GET_CODE (XEXP (x, 0)) == IOR)
8116 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LSHIFTRT
8117 && GET_CODE (XEXP (XEXP (x, 0), 1)) == LSHIFTRT
8118 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8119 {
8120 /* Apply the distributive law, and then try to make extractions. */
8121 new_rtx = gen_rtx_fmt_ee (GET_CODE (XEXP (x, 0)), mode,
8122 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 0),
8123 XEXP (x, 1)),
8124 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 1),
8125 XEXP (x, 1)));
8126 new_rtx = make_compound_operation (new_rtx, in_code);
8127 }
8128
8129 /* If we are have (and (rotate X C) M) and C is larger than the number
8130 of bits in M, this is an extraction. */
8131
8132 else if (GET_CODE (XEXP (x, 0)) == ROTATE
8133 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8134 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0
8135 && i <= INTVAL (XEXP (XEXP (x, 0), 1)))
8136 {
8137 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
8138 new_rtx = make_extraction (mode, new_rtx,
8139 (GET_MODE_PRECISION (mode)
8140 - INTVAL (XEXP (XEXP (x, 0), 1))),
8141 NULL_RTX, i, 1, 0, in_code == COMPARE);
8142 }
8143
8144 /* On machines without logical shifts, if the operand of the AND is
8145 a logical shift and our mask turns off all the propagated sign
8146 bits, we can replace the logical shift with an arithmetic shift. */
8147 else if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8148 && !have_insn_for (LSHIFTRT, mode)
8149 && have_insn_for (ASHIFTRT, mode)
8150 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8151 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8152 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
8153 && mode_width <= HOST_BITS_PER_WIDE_INT)
8154 {
8155 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
8156
8157 mask >>= INTVAL (XEXP (XEXP (x, 0), 1));
8158 if ((INTVAL (XEXP (x, 1)) & ~mask) == 0)
8159 SUBST (XEXP (x, 0),
8160 gen_rtx_ASHIFTRT (mode,
8161 make_compound_operation (XEXP (XEXP (x,
8162 0),
8163 0),
8164 next_code),
8165 XEXP (XEXP (x, 0), 1)));
8166 }
8167
8168 /* If the constant is one less than a power of two, this might be
8169 representable by an extraction even if no shift is present.
8170 If it doesn't end up being a ZERO_EXTEND, we will ignore it unless
8171 we are in a COMPARE. */
8172 else if ((i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8173 new_rtx = make_extraction (mode,
8174 make_compound_operation (XEXP (x, 0),
8175 next_code),
8176 0, NULL_RTX, i, 1, 0, in_code == COMPARE);
8177
8178 /* If we are in a comparison and this is an AND with a power of two,
8179 convert this into the appropriate bit extract. */
8180 else if (in_code == COMPARE
8181 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
8182 && (equality_comparison || i < GET_MODE_PRECISION (mode) - 1))
8183 new_rtx = make_extraction (mode,
8184 make_compound_operation (XEXP (x, 0),
8185 next_code),
8186 i, NULL_RTX, 1, 1, 0, 1);
8187
8188 /* If the one operand is a paradoxical subreg of a register or memory and
8189 the constant (limited to the smaller mode) has only zero bits where
8190 the sub expression has known zero bits, this can be expressed as
8191 a zero_extend. */
8192 else if (GET_CODE (XEXP (x, 0)) == SUBREG)
8193 {
8194 rtx sub;
8195
8196 sub = XEXP (XEXP (x, 0), 0);
8197 machine_mode sub_mode = GET_MODE (sub);
8198 int sub_width;
8199 if ((REG_P (sub) || MEM_P (sub))
8200 && GET_MODE_PRECISION (sub_mode).is_constant (&sub_width)
8201 && sub_width < mode_width)
8202 {
8203 unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (sub_mode);
8204 unsigned HOST_WIDE_INT mask;
8205
8206 /* original AND constant with all the known zero bits set */
8207 mask = UINTVAL (XEXP (x, 1)) | (~nonzero_bits (sub, sub_mode));
8208 if ((mask & mode_mask) == mode_mask)
8209 {
8210 new_rtx = make_compound_operation (sub, next_code);
8211 new_rtx = make_extraction (mode, new_rtx, 0, 0, sub_width,
8212 1, 0, in_code == COMPARE);
8213 }
8214 }
8215 }
8216
8217 break;
8218
8219 case LSHIFTRT:
8220 /* If the sign bit is known to be zero, replace this with an
8221 arithmetic shift. */
8222 if (have_insn_for (ASHIFTRT, mode)
8223 && ! have_insn_for (LSHIFTRT, mode)
8224 && mode_width <= HOST_BITS_PER_WIDE_INT
8225 && (nonzero_bits (XEXP (x, 0), mode) & (1 << (mode_width - 1))) == 0)
8226 {
8227 new_rtx = gen_rtx_ASHIFTRT (mode,
8228 make_compound_operation (XEXP (x, 0),
8229 next_code),
8230 XEXP (x, 1));
8231 break;
8232 }
8233
8234 /* fall through */
8235
8236 case ASHIFTRT:
8237 lhs = XEXP (x, 0);
8238 rhs = XEXP (x, 1);
8239
8240 /* If we have (ashiftrt (ashift foo C1) C2) with C2 >= C1,
8241 this is a SIGN_EXTRACT. */
8242 if (CONST_INT_P (rhs)
8243 && GET_CODE (lhs) == ASHIFT
8244 && CONST_INT_P (XEXP (lhs, 1))
8245 && INTVAL (rhs) >= INTVAL (XEXP (lhs, 1))
8246 && INTVAL (XEXP (lhs, 1)) >= 0
8247 && INTVAL (rhs) < mode_width)
8248 {
8249 new_rtx = make_compound_operation (XEXP (lhs, 0), next_code);
8250 new_rtx = make_extraction (mode, new_rtx,
8251 INTVAL (rhs) - INTVAL (XEXP (lhs, 1)),
8252 NULL_RTX, mode_width - INTVAL (rhs),
8253 code == LSHIFTRT, 0, in_code == COMPARE);
8254 break;
8255 }
8256
8257 /* See if we have operations between an ASHIFTRT and an ASHIFT.
8258 If so, try to merge the shifts into a SIGN_EXTEND. We could
8259 also do this for some cases of SIGN_EXTRACT, but it doesn't
8260 seem worth the effort; the case checked for occurs on Alpha. */
8261
8262 if (!OBJECT_P (lhs)
8263 && ! (GET_CODE (lhs) == SUBREG
8264 && (OBJECT_P (SUBREG_REG (lhs))))
8265 && CONST_INT_P (rhs)
8266 && INTVAL (rhs) >= 0
8267 && INTVAL (rhs) < HOST_BITS_PER_WIDE_INT
8268 && INTVAL (rhs) < mode_width
8269 && (new_rtx = extract_left_shift (mode, lhs, INTVAL (rhs))) != 0)
8270 new_rtx = make_extraction (mode, make_compound_operation (new_rtx,
8271 next_code),
8272 0, NULL_RTX, mode_width - INTVAL (rhs),
8273 code == LSHIFTRT, 0, in_code == COMPARE);
8274
8275 break;
8276
8277 case SUBREG:
8278 /* Call ourselves recursively on the inner expression. If we are
8279 narrowing the object and it has a different RTL code from
8280 what it originally did, do this SUBREG as a force_to_mode. */
8281 {
8282 rtx inner = SUBREG_REG (x), simplified;
8283 enum rtx_code subreg_code = in_code;
8284
8285 /* If the SUBREG is masking of a logical right shift,
8286 make an extraction. */
8287 if (GET_CODE (inner) == LSHIFTRT
8288 && is_a <scalar_int_mode> (GET_MODE (inner), &inner_mode)
8289 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (inner_mode)
8290 && CONST_INT_P (XEXP (inner, 1))
8291 && UINTVAL (XEXP (inner, 1)) < GET_MODE_PRECISION (inner_mode)
8292 && subreg_lowpart_p (x))
8293 {
8294 new_rtx = make_compound_operation (XEXP (inner, 0), next_code);
8295 int width = GET_MODE_PRECISION (inner_mode)
8296 - INTVAL (XEXP (inner, 1));
8297 if (width > mode_width)
8298 width = mode_width;
8299 new_rtx = make_extraction (mode, new_rtx, 0, XEXP (inner, 1),
8300 width, 1, 0, in_code == COMPARE);
8301 break;
8302 }
8303
8304 /* If in_code is COMPARE, it isn't always safe to pass it through
8305 to the recursive make_compound_operation call. */
8306 if (subreg_code == COMPARE
8307 && (!subreg_lowpart_p (x)
8308 || GET_CODE (inner) == SUBREG
8309 /* (subreg:SI (and:DI (reg:DI) (const_int 0x800000000)) 0)
8310 is (const_int 0), rather than
8311 (subreg:SI (lshiftrt:DI (reg:DI) (const_int 35)) 0).
8312 Similarly (subreg:QI (and:SI (reg:SI) (const_int 0x80)) 0)
8313 for non-equality comparisons against 0 is not equivalent
8314 to (subreg:QI (lshiftrt:SI (reg:SI) (const_int 7)) 0). */
8315 || (GET_CODE (inner) == AND
8316 && CONST_INT_P (XEXP (inner, 1))
8317 && partial_subreg_p (x)
8318 && exact_log2 (UINTVAL (XEXP (inner, 1)))
8319 >= GET_MODE_BITSIZE (mode) - 1)))
8320 subreg_code = SET;
8321
8322 tem = make_compound_operation (inner, subreg_code);
8323
8324 simplified
8325 = simplify_subreg (mode, tem, GET_MODE (inner), SUBREG_BYTE (x));
8326 if (simplified)
8327 tem = simplified;
8328
8329 if (GET_CODE (tem) != GET_CODE (inner)
8330 && partial_subreg_p (x)
8331 && subreg_lowpart_p (x))
8332 {
8333 rtx newer
8334 = force_to_mode (tem, mode, HOST_WIDE_INT_M1U, 0);
8335
8336 /* If we have something other than a SUBREG, we might have
8337 done an expansion, so rerun ourselves. */
8338 if (GET_CODE (newer) != SUBREG)
8339 newer = make_compound_operation (newer, in_code);
8340
8341 /* force_to_mode can expand compounds. If it just re-expanded
8342 the compound, use gen_lowpart to convert to the desired
8343 mode. */
8344 if (rtx_equal_p (newer, x)
8345 /* Likewise if it re-expanded the compound only partially.
8346 This happens for SUBREG of ZERO_EXTRACT if they extract
8347 the same number of bits. */
8348 || (GET_CODE (newer) == SUBREG
8349 && (GET_CODE (SUBREG_REG (newer)) == LSHIFTRT
8350 || GET_CODE (SUBREG_REG (newer)) == ASHIFTRT)
8351 && GET_CODE (inner) == AND
8352 && rtx_equal_p (SUBREG_REG (newer), XEXP (inner, 0))))
8353 return gen_lowpart (GET_MODE (x), tem);
8354
8355 return newer;
8356 }
8357
8358 if (simplified)
8359 return tem;
8360 }
8361 break;
8362
8363 default:
8364 break;
8365 }
8366
8367 if (new_rtx)
8368 *x_ptr = gen_lowpart (mode, new_rtx);
8369 *next_code_ptr = next_code;
8370 return NULL_RTX;
8371 }
8372
8373 /* Look at the expression rooted at X. Look for expressions
8374 equivalent to ZERO_EXTRACT, SIGN_EXTRACT, ZERO_EXTEND, SIGN_EXTEND.
8375 Form these expressions.
8376
8377 Return the new rtx, usually just X.
8378
8379 Also, for machines like the VAX that don't have logical shift insns,
8380 try to convert logical to arithmetic shift operations in cases where
8381 they are equivalent. This undoes the canonicalizations to logical
8382 shifts done elsewhere.
8383
8384 We try, as much as possible, to re-use rtl expressions to save memory.
8385
8386 IN_CODE says what kind of expression we are processing. Normally, it is
8387 SET. In a memory address it is MEM. When processing the arguments of
8388 a comparison or a COMPARE against zero, it is COMPARE, or EQ if more
8389 precisely it is an equality comparison against zero. */
8390
8391 rtx
8392 make_compound_operation (rtx x, enum rtx_code in_code)
8393 {
8394 enum rtx_code code = GET_CODE (x);
8395 const char *fmt;
8396 int i, j;
8397 enum rtx_code next_code;
8398 rtx new_rtx, tem;
8399
8400 /* Select the code to be used in recursive calls. Once we are inside an
8401 address, we stay there. If we have a comparison, set to COMPARE,
8402 but once inside, go back to our default of SET. */
8403
8404 next_code = (code == MEM ? MEM
8405 : ((code == COMPARE || COMPARISON_P (x))
8406 && XEXP (x, 1) == const0_rtx) ? COMPARE
8407 : in_code == COMPARE || in_code == EQ ? SET : in_code);
8408
8409 scalar_int_mode mode;
8410 if (is_a <scalar_int_mode> (GET_MODE (x), &mode))
8411 {
8412 rtx new_rtx = make_compound_operation_int (mode, &x, in_code,
8413 &next_code);
8414 if (new_rtx)
8415 return new_rtx;
8416 code = GET_CODE (x);
8417 }
8418
8419 /* Now recursively process each operand of this operation. We need to
8420 handle ZERO_EXTEND specially so that we don't lose track of the
8421 inner mode. */
8422 if (code == ZERO_EXTEND)
8423 {
8424 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8425 tem = simplify_const_unary_operation (ZERO_EXTEND, GET_MODE (x),
8426 new_rtx, GET_MODE (XEXP (x, 0)));
8427 if (tem)
8428 return tem;
8429 SUBST (XEXP (x, 0), new_rtx);
8430 return x;
8431 }
8432
8433 fmt = GET_RTX_FORMAT (code);
8434 for (i = 0; i < GET_RTX_LENGTH (code); i++)
8435 if (fmt[i] == 'e')
8436 {
8437 new_rtx = make_compound_operation (XEXP (x, i), next_code);
8438 SUBST (XEXP (x, i), new_rtx);
8439 }
8440 else if (fmt[i] == 'E')
8441 for (j = 0; j < XVECLEN (x, i); j++)
8442 {
8443 new_rtx = make_compound_operation (XVECEXP (x, i, j), next_code);
8444 SUBST (XVECEXP (x, i, j), new_rtx);
8445 }
8446
8447 maybe_swap_commutative_operands (x);
8448 return x;
8449 }
8450 \f
8451 /* Given M see if it is a value that would select a field of bits
8452 within an item, but not the entire word. Return -1 if not.
8453 Otherwise, return the starting position of the field, where 0 is the
8454 low-order bit.
8455
8456 *PLEN is set to the length of the field. */
8457
8458 static int
8459 get_pos_from_mask (unsigned HOST_WIDE_INT m, unsigned HOST_WIDE_INT *plen)
8460 {
8461 /* Get the bit number of the first 1 bit from the right, -1 if none. */
8462 int pos = m ? ctz_hwi (m) : -1;
8463 int len = 0;
8464
8465 if (pos >= 0)
8466 /* Now shift off the low-order zero bits and see if we have a
8467 power of two minus 1. */
8468 len = exact_log2 ((m >> pos) + 1);
8469
8470 if (len <= 0)
8471 pos = -1;
8472
8473 *plen = len;
8474 return pos;
8475 }
8476 \f
8477 /* If X refers to a register that equals REG in value, replace these
8478 references with REG. */
8479 static rtx
8480 canon_reg_for_combine (rtx x, rtx reg)
8481 {
8482 rtx op0, op1, op2;
8483 const char *fmt;
8484 int i;
8485 bool copied;
8486
8487 enum rtx_code code = GET_CODE (x);
8488 switch (GET_RTX_CLASS (code))
8489 {
8490 case RTX_UNARY:
8491 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8492 if (op0 != XEXP (x, 0))
8493 return simplify_gen_unary (GET_CODE (x), GET_MODE (x), op0,
8494 GET_MODE (reg));
8495 break;
8496
8497 case RTX_BIN_ARITH:
8498 case RTX_COMM_ARITH:
8499 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8500 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8501 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8502 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), op0, op1);
8503 break;
8504
8505 case RTX_COMPARE:
8506 case RTX_COMM_COMPARE:
8507 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8508 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8509 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8510 return simplify_gen_relational (GET_CODE (x), GET_MODE (x),
8511 GET_MODE (op0), op0, op1);
8512 break;
8513
8514 case RTX_TERNARY:
8515 case RTX_BITFIELD_OPS:
8516 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8517 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8518 op2 = canon_reg_for_combine (XEXP (x, 2), reg);
8519 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1) || op2 != XEXP (x, 2))
8520 return simplify_gen_ternary (GET_CODE (x), GET_MODE (x),
8521 GET_MODE (op0), op0, op1, op2);
8522 /* FALLTHRU */
8523
8524 case RTX_OBJ:
8525 if (REG_P (x))
8526 {
8527 if (rtx_equal_p (get_last_value (reg), x)
8528 || rtx_equal_p (reg, get_last_value (x)))
8529 return reg;
8530 else
8531 break;
8532 }
8533
8534 /* fall through */
8535
8536 default:
8537 fmt = GET_RTX_FORMAT (code);
8538 copied = false;
8539 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8540 if (fmt[i] == 'e')
8541 {
8542 rtx op = canon_reg_for_combine (XEXP (x, i), reg);
8543 if (op != XEXP (x, i))
8544 {
8545 if (!copied)
8546 {
8547 copied = true;
8548 x = copy_rtx (x);
8549 }
8550 XEXP (x, i) = op;
8551 }
8552 }
8553 else if (fmt[i] == 'E')
8554 {
8555 int j;
8556 for (j = 0; j < XVECLEN (x, i); j++)
8557 {
8558 rtx op = canon_reg_for_combine (XVECEXP (x, i, j), reg);
8559 if (op != XVECEXP (x, i, j))
8560 {
8561 if (!copied)
8562 {
8563 copied = true;
8564 x = copy_rtx (x);
8565 }
8566 XVECEXP (x, i, j) = op;
8567 }
8568 }
8569 }
8570
8571 break;
8572 }
8573
8574 return x;
8575 }
8576
8577 /* Return X converted to MODE. If the value is already truncated to
8578 MODE we can just return a subreg even though in the general case we
8579 would need an explicit truncation. */
8580
8581 static rtx
8582 gen_lowpart_or_truncate (machine_mode mode, rtx x)
8583 {
8584 if (!CONST_INT_P (x)
8585 && partial_subreg_p (mode, GET_MODE (x))
8586 && !TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (x))
8587 && !(REG_P (x) && reg_truncated_to_mode (mode, x)))
8588 {
8589 /* Bit-cast X into an integer mode. */
8590 if (!SCALAR_INT_MODE_P (GET_MODE (x)))
8591 x = gen_lowpart (int_mode_for_mode (GET_MODE (x)).require (), x);
8592 x = simplify_gen_unary (TRUNCATE, int_mode_for_mode (mode).require (),
8593 x, GET_MODE (x));
8594 }
8595
8596 return gen_lowpart (mode, x);
8597 }
8598
8599 /* See if X can be simplified knowing that we will only refer to it in
8600 MODE and will only refer to those bits that are nonzero in MASK.
8601 If other bits are being computed or if masking operations are done
8602 that select a superset of the bits in MASK, they can sometimes be
8603 ignored.
8604
8605 Return a possibly simplified expression, but always convert X to
8606 MODE. If X is a CONST_INT, AND the CONST_INT with MASK.
8607
8608 If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK
8609 are all off in X. This is used when X will be complemented, by either
8610 NOT, NEG, or XOR. */
8611
8612 static rtx
8613 force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask,
8614 int just_select)
8615 {
8616 enum rtx_code code = GET_CODE (x);
8617 int next_select = just_select || code == XOR || code == NOT || code == NEG;
8618 machine_mode op_mode;
8619 unsigned HOST_WIDE_INT nonzero;
8620
8621 /* If this is a CALL or ASM_OPERANDS, don't do anything. Some of the
8622 code below will do the wrong thing since the mode of such an
8623 expression is VOIDmode.
8624
8625 Also do nothing if X is a CLOBBER; this can happen if X was
8626 the return value from a call to gen_lowpart. */
8627 if (code == CALL || code == ASM_OPERANDS || code == CLOBBER)
8628 return x;
8629
8630 /* We want to perform the operation in its present mode unless we know
8631 that the operation is valid in MODE, in which case we do the operation
8632 in MODE. */
8633 op_mode = ((GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (x))
8634 && have_insn_for (code, mode))
8635 ? mode : GET_MODE (x));
8636
8637 /* It is not valid to do a right-shift in a narrower mode
8638 than the one it came in with. */
8639 if ((code == LSHIFTRT || code == ASHIFTRT)
8640 && partial_subreg_p (mode, GET_MODE (x)))
8641 op_mode = GET_MODE (x);
8642
8643 /* Truncate MASK to fit OP_MODE. */
8644 if (op_mode)
8645 mask &= GET_MODE_MASK (op_mode);
8646
8647 /* Determine what bits of X are guaranteed to be (non)zero. */
8648 nonzero = nonzero_bits (x, mode);
8649
8650 /* If none of the bits in X are needed, return a zero. */
8651 if (!just_select && (nonzero & mask) == 0 && !side_effects_p (x))
8652 x = const0_rtx;
8653
8654 /* If X is a CONST_INT, return a new one. Do this here since the
8655 test below will fail. */
8656 if (CONST_INT_P (x))
8657 {
8658 if (SCALAR_INT_MODE_P (mode))
8659 return gen_int_mode (INTVAL (x) & mask, mode);
8660 else
8661 {
8662 x = GEN_INT (INTVAL (x) & mask);
8663 return gen_lowpart_common (mode, x);
8664 }
8665 }
8666
8667 /* If X is narrower than MODE and we want all the bits in X's mode, just
8668 get X in the proper mode. */
8669 if (paradoxical_subreg_p (mode, GET_MODE (x))
8670 && (GET_MODE_MASK (GET_MODE (x)) & ~mask) == 0)
8671 return gen_lowpart (mode, x);
8672
8673 /* We can ignore the effect of a SUBREG if it narrows the mode or
8674 if the constant masks to zero all the bits the mode doesn't have. */
8675 if (GET_CODE (x) == SUBREG
8676 && subreg_lowpart_p (x)
8677 && (partial_subreg_p (x)
8678 || (mask
8679 & GET_MODE_MASK (GET_MODE (x))
8680 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x)))) == 0))
8681 return force_to_mode (SUBREG_REG (x), mode, mask, next_select);
8682
8683 scalar_int_mode int_mode, xmode;
8684 if (is_a <scalar_int_mode> (mode, &int_mode)
8685 && is_a <scalar_int_mode> (GET_MODE (x), &xmode))
8686 /* OP_MODE is either MODE or XMODE, so it must be a scalar
8687 integer too. */
8688 return force_int_to_mode (x, int_mode, xmode,
8689 as_a <scalar_int_mode> (op_mode),
8690 mask, just_select);
8691
8692 return gen_lowpart_or_truncate (mode, x);
8693 }
8694
8695 /* Subroutine of force_to_mode that handles cases in which both X and
8696 the result are scalar integers. MODE is the mode of the result,
8697 XMODE is the mode of X, and OP_MODE says which of MODE or XMODE
8698 is preferred for simplified versions of X. The other arguments
8699 are as for force_to_mode. */
8700
8701 static rtx
8702 force_int_to_mode (rtx x, scalar_int_mode mode, scalar_int_mode xmode,
8703 scalar_int_mode op_mode, unsigned HOST_WIDE_INT mask,
8704 int just_select)
8705 {
8706 enum rtx_code code = GET_CODE (x);
8707 int next_select = just_select || code == XOR || code == NOT || code == NEG;
8708 unsigned HOST_WIDE_INT fuller_mask;
8709 rtx op0, op1, temp;
8710 poly_int64 const_op0;
8711
8712 /* When we have an arithmetic operation, or a shift whose count we
8713 do not know, we need to assume that all bits up to the highest-order
8714 bit in MASK will be needed. This is how we form such a mask. */
8715 if (mask & (HOST_WIDE_INT_1U << (HOST_BITS_PER_WIDE_INT - 1)))
8716 fuller_mask = HOST_WIDE_INT_M1U;
8717 else
8718 fuller_mask = ((HOST_WIDE_INT_1U << (floor_log2 (mask) + 1))
8719 - 1);
8720
8721 switch (code)
8722 {
8723 case CLOBBER:
8724 /* If X is a (clobber (const_int)), return it since we know we are
8725 generating something that won't match. */
8726 return x;
8727
8728 case SIGN_EXTEND:
8729 case ZERO_EXTEND:
8730 case ZERO_EXTRACT:
8731 case SIGN_EXTRACT:
8732 x = expand_compound_operation (x);
8733 if (GET_CODE (x) != code)
8734 return force_to_mode (x, mode, mask, next_select);
8735 break;
8736
8737 case TRUNCATE:
8738 /* Similarly for a truncate. */
8739 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8740
8741 case AND:
8742 /* If this is an AND with a constant, convert it into an AND
8743 whose constant is the AND of that constant with MASK. If it
8744 remains an AND of MASK, delete it since it is redundant. */
8745
8746 if (CONST_INT_P (XEXP (x, 1)))
8747 {
8748 x = simplify_and_const_int (x, op_mode, XEXP (x, 0),
8749 mask & INTVAL (XEXP (x, 1)));
8750 xmode = op_mode;
8751
8752 /* If X is still an AND, see if it is an AND with a mask that
8753 is just some low-order bits. If so, and it is MASK, we don't
8754 need it. */
8755
8756 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8757 && (INTVAL (XEXP (x, 1)) & GET_MODE_MASK (xmode)) == mask)
8758 x = XEXP (x, 0);
8759
8760 /* If it remains an AND, try making another AND with the bits
8761 in the mode mask that aren't in MASK turned on. If the
8762 constant in the AND is wide enough, this might make a
8763 cheaper constant. */
8764
8765 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8766 && GET_MODE_MASK (xmode) != mask
8767 && HWI_COMPUTABLE_MODE_P (xmode))
8768 {
8769 unsigned HOST_WIDE_INT cval
8770 = UINTVAL (XEXP (x, 1)) | (GET_MODE_MASK (xmode) & ~mask);
8771 rtx y;
8772
8773 y = simplify_gen_binary (AND, xmode, XEXP (x, 0),
8774 gen_int_mode (cval, xmode));
8775 if (set_src_cost (y, xmode, optimize_this_for_speed_p)
8776 < set_src_cost (x, xmode, optimize_this_for_speed_p))
8777 x = y;
8778 }
8779
8780 break;
8781 }
8782
8783 goto binop;
8784
8785 case PLUS:
8786 /* In (and (plus FOO C1) M), if M is a mask that just turns off
8787 low-order bits (as in an alignment operation) and FOO is already
8788 aligned to that boundary, mask C1 to that boundary as well.
8789 This may eliminate that PLUS and, later, the AND. */
8790
8791 {
8792 unsigned int width = GET_MODE_PRECISION (mode);
8793 unsigned HOST_WIDE_INT smask = mask;
8794
8795 /* If MODE is narrower than HOST_WIDE_INT and mask is a negative
8796 number, sign extend it. */
8797
8798 if (width < HOST_BITS_PER_WIDE_INT
8799 && (smask & (HOST_WIDE_INT_1U << (width - 1))) != 0)
8800 smask |= HOST_WIDE_INT_M1U << width;
8801
8802 if (CONST_INT_P (XEXP (x, 1))
8803 && pow2p_hwi (- smask)
8804 && (nonzero_bits (XEXP (x, 0), mode) & ~smask) == 0
8805 && (INTVAL (XEXP (x, 1)) & ~smask) != 0)
8806 return force_to_mode (plus_constant (xmode, XEXP (x, 0),
8807 (INTVAL (XEXP (x, 1)) & smask)),
8808 mode, smask, next_select);
8809 }
8810
8811 /* fall through */
8812
8813 case MULT:
8814 /* Substituting into the operands of a widening MULT is not likely to
8815 create RTL matching a machine insn. */
8816 if (code == MULT
8817 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
8818 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
8819 && (GET_CODE (XEXP (x, 1)) == ZERO_EXTEND
8820 || GET_CODE (XEXP (x, 1)) == SIGN_EXTEND)
8821 && REG_P (XEXP (XEXP (x, 0), 0))
8822 && REG_P (XEXP (XEXP (x, 1), 0)))
8823 return gen_lowpart_or_truncate (mode, x);
8824
8825 /* For PLUS, MINUS and MULT, we need any bits less significant than the
8826 most significant bit in MASK since carries from those bits will
8827 affect the bits we are interested in. */
8828 mask = fuller_mask;
8829 goto binop;
8830
8831 case MINUS:
8832 /* If X is (minus C Y) where C's least set bit is larger than any bit
8833 in the mask, then we may replace with (neg Y). */
8834 if (poly_int_rtx_p (XEXP (x, 0), &const_op0)
8835 && known_alignment (poly_uint64 (const_op0)) > mask)
8836 {
8837 x = simplify_gen_unary (NEG, xmode, XEXP (x, 1), xmode);
8838 return force_to_mode (x, mode, mask, next_select);
8839 }
8840
8841 /* Similarly, if C contains every bit in the fuller_mask, then we may
8842 replace with (not Y). */
8843 if (CONST_INT_P (XEXP (x, 0))
8844 && ((UINTVAL (XEXP (x, 0)) | fuller_mask) == UINTVAL (XEXP (x, 0))))
8845 {
8846 x = simplify_gen_unary (NOT, xmode, XEXP (x, 1), xmode);
8847 return force_to_mode (x, mode, mask, next_select);
8848 }
8849
8850 mask = fuller_mask;
8851 goto binop;
8852
8853 case IOR:
8854 case XOR:
8855 /* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and
8856 LSHIFTRT so we end up with an (and (lshiftrt (ior ...) ...) ...)
8857 operation which may be a bitfield extraction. Ensure that the
8858 constant we form is not wider than the mode of X. */
8859
8860 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8861 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8862 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8863 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
8864 && CONST_INT_P (XEXP (x, 1))
8865 && ((INTVAL (XEXP (XEXP (x, 0), 1))
8866 + floor_log2 (INTVAL (XEXP (x, 1))))
8867 < GET_MODE_PRECISION (xmode))
8868 && (UINTVAL (XEXP (x, 1))
8869 & ~nonzero_bits (XEXP (x, 0), xmode)) == 0)
8870 {
8871 temp = gen_int_mode ((INTVAL (XEXP (x, 1)) & mask)
8872 << INTVAL (XEXP (XEXP (x, 0), 1)),
8873 xmode);
8874 temp = simplify_gen_binary (GET_CODE (x), xmode,
8875 XEXP (XEXP (x, 0), 0), temp);
8876 x = simplify_gen_binary (LSHIFTRT, xmode, temp,
8877 XEXP (XEXP (x, 0), 1));
8878 return force_to_mode (x, mode, mask, next_select);
8879 }
8880
8881 binop:
8882 /* For most binary operations, just propagate into the operation and
8883 change the mode if we have an operation of that mode. */
8884
8885 op0 = force_to_mode (XEXP (x, 0), mode, mask, next_select);
8886 op1 = force_to_mode (XEXP (x, 1), mode, mask, next_select);
8887
8888 /* If we ended up truncating both operands, truncate the result of the
8889 operation instead. */
8890 if (GET_CODE (op0) == TRUNCATE
8891 && GET_CODE (op1) == TRUNCATE)
8892 {
8893 op0 = XEXP (op0, 0);
8894 op1 = XEXP (op1, 0);
8895 }
8896
8897 op0 = gen_lowpart_or_truncate (op_mode, op0);
8898 op1 = gen_lowpart_or_truncate (op_mode, op1);
8899
8900 if (op_mode != xmode || op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8901 {
8902 x = simplify_gen_binary (code, op_mode, op0, op1);
8903 xmode = op_mode;
8904 }
8905 break;
8906
8907 case ASHIFT:
8908 /* For left shifts, do the same, but just for the first operand.
8909 However, we cannot do anything with shifts where we cannot
8910 guarantee that the counts are smaller than the size of the mode
8911 because such a count will have a different meaning in a
8912 wider mode. */
8913
8914 if (! (CONST_INT_P (XEXP (x, 1))
8915 && INTVAL (XEXP (x, 1)) >= 0
8916 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (mode))
8917 && ! (GET_MODE (XEXP (x, 1)) != VOIDmode
8918 && (nonzero_bits (XEXP (x, 1), GET_MODE (XEXP (x, 1)))
8919 < (unsigned HOST_WIDE_INT) GET_MODE_PRECISION (mode))))
8920 break;
8921
8922 /* If the shift count is a constant and we can do arithmetic in
8923 the mode of the shift, refine which bits we need. Otherwise, use the
8924 conservative form of the mask. */
8925 if (CONST_INT_P (XEXP (x, 1))
8926 && INTVAL (XEXP (x, 1)) >= 0
8927 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (op_mode)
8928 && HWI_COMPUTABLE_MODE_P (op_mode))
8929 mask >>= INTVAL (XEXP (x, 1));
8930 else
8931 mask = fuller_mask;
8932
8933 op0 = gen_lowpart_or_truncate (op_mode,
8934 force_to_mode (XEXP (x, 0), mode,
8935 mask, next_select));
8936
8937 if (op_mode != xmode || op0 != XEXP (x, 0))
8938 {
8939 x = simplify_gen_binary (code, op_mode, op0, XEXP (x, 1));
8940 xmode = op_mode;
8941 }
8942 break;
8943
8944 case LSHIFTRT:
8945 /* Here we can only do something if the shift count is a constant,
8946 this shift constant is valid for the host, and we can do arithmetic
8947 in OP_MODE. */
8948
8949 if (CONST_INT_P (XEXP (x, 1))
8950 && INTVAL (XEXP (x, 1)) >= 0
8951 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
8952 && HWI_COMPUTABLE_MODE_P (op_mode))
8953 {
8954 rtx inner = XEXP (x, 0);
8955 unsigned HOST_WIDE_INT inner_mask;
8956
8957 /* Select the mask of the bits we need for the shift operand. */
8958 inner_mask = mask << INTVAL (XEXP (x, 1));
8959
8960 /* We can only change the mode of the shift if we can do arithmetic
8961 in the mode of the shift and INNER_MASK is no wider than the
8962 width of X's mode. */
8963 if ((inner_mask & ~GET_MODE_MASK (xmode)) != 0)
8964 op_mode = xmode;
8965
8966 inner = force_to_mode (inner, op_mode, inner_mask, next_select);
8967
8968 if (xmode != op_mode || inner != XEXP (x, 0))
8969 {
8970 x = simplify_gen_binary (LSHIFTRT, op_mode, inner, XEXP (x, 1));
8971 xmode = op_mode;
8972 }
8973 }
8974
8975 /* If we have (and (lshiftrt FOO C1) C2) where the combination of the
8976 shift and AND produces only copies of the sign bit (C2 is one less
8977 than a power of two), we can do this with just a shift. */
8978
8979 if (GET_CODE (x) == LSHIFTRT
8980 && CONST_INT_P (XEXP (x, 1))
8981 /* The shift puts one of the sign bit copies in the least significant
8982 bit. */
8983 && ((INTVAL (XEXP (x, 1))
8984 + num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
8985 >= GET_MODE_PRECISION (xmode))
8986 && pow2p_hwi (mask + 1)
8987 /* Number of bits left after the shift must be more than the mask
8988 needs. */
8989 && ((INTVAL (XEXP (x, 1)) + exact_log2 (mask + 1))
8990 <= GET_MODE_PRECISION (xmode))
8991 /* Must be more sign bit copies than the mask needs. */
8992 && ((int) num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
8993 >= exact_log2 (mask + 1)))
8994 {
8995 int nbits = GET_MODE_PRECISION (xmode) - exact_log2 (mask + 1);
8996 x = simplify_gen_binary (LSHIFTRT, xmode, XEXP (x, 0),
8997 gen_int_shift_amount (xmode, nbits));
8998 }
8999 goto shiftrt;
9000
9001 case ASHIFTRT:
9002 /* If we are just looking for the sign bit, we don't need this shift at
9003 all, even if it has a variable count. */
9004 if (val_signbit_p (xmode, mask))
9005 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
9006
9007 /* If this is a shift by a constant, get a mask that contains those bits
9008 that are not copies of the sign bit. We then have two cases: If
9009 MASK only includes those bits, this can be a logical shift, which may
9010 allow simplifications. If MASK is a single-bit field not within
9011 those bits, we are requesting a copy of the sign bit and hence can
9012 shift the sign bit to the appropriate location. */
9013
9014 if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) >= 0
9015 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
9016 {
9017 unsigned HOST_WIDE_INT nonzero;
9018 int i;
9019
9020 /* If the considered data is wider than HOST_WIDE_INT, we can't
9021 represent a mask for all its bits in a single scalar.
9022 But we only care about the lower bits, so calculate these. */
9023
9024 if (GET_MODE_PRECISION (xmode) > HOST_BITS_PER_WIDE_INT)
9025 {
9026 nonzero = HOST_WIDE_INT_M1U;
9027
9028 /* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
9029 is the number of bits a full-width mask would have set.
9030 We need only shift if these are fewer than nonzero can
9031 hold. If not, we must keep all bits set in nonzero. */
9032
9033 if (GET_MODE_PRECISION (xmode) - INTVAL (XEXP (x, 1))
9034 < HOST_BITS_PER_WIDE_INT)
9035 nonzero >>= INTVAL (XEXP (x, 1))
9036 + HOST_BITS_PER_WIDE_INT
9037 - GET_MODE_PRECISION (xmode);
9038 }
9039 else
9040 {
9041 nonzero = GET_MODE_MASK (xmode);
9042 nonzero >>= INTVAL (XEXP (x, 1));
9043 }
9044
9045 if ((mask & ~nonzero) == 0)
9046 {
9047 x = simplify_shift_const (NULL_RTX, LSHIFTRT, xmode,
9048 XEXP (x, 0), INTVAL (XEXP (x, 1)));
9049 if (GET_CODE (x) != ASHIFTRT)
9050 return force_to_mode (x, mode, mask, next_select);
9051 }
9052
9053 else if ((i = exact_log2 (mask)) >= 0)
9054 {
9055 x = simplify_shift_const
9056 (NULL_RTX, LSHIFTRT, xmode, XEXP (x, 0),
9057 GET_MODE_PRECISION (xmode) - 1 - i);
9058
9059 if (GET_CODE (x) != ASHIFTRT)
9060 return force_to_mode (x, mode, mask, next_select);
9061 }
9062 }
9063
9064 /* If MASK is 1, convert this to an LSHIFTRT. This can be done
9065 even if the shift count isn't a constant. */
9066 if (mask == 1)
9067 x = simplify_gen_binary (LSHIFTRT, xmode, XEXP (x, 0), XEXP (x, 1));
9068
9069 shiftrt:
9070
9071 /* If this is a zero- or sign-extension operation that just affects bits
9072 we don't care about, remove it. Be sure the call above returned
9073 something that is still a shift. */
9074
9075 if ((GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ASHIFTRT)
9076 && CONST_INT_P (XEXP (x, 1))
9077 && INTVAL (XEXP (x, 1)) >= 0
9078 && (INTVAL (XEXP (x, 1))
9079 <= GET_MODE_PRECISION (xmode) - (floor_log2 (mask) + 1))
9080 && GET_CODE (XEXP (x, 0)) == ASHIFT
9081 && XEXP (XEXP (x, 0), 1) == XEXP (x, 1))
9082 return force_to_mode (XEXP (XEXP (x, 0), 0), mode, mask,
9083 next_select);
9084
9085 break;
9086
9087 case ROTATE:
9088 case ROTATERT:
9089 /* If the shift count is constant and we can do computations
9090 in the mode of X, compute where the bits we care about are.
9091 Otherwise, we can't do anything. Don't change the mode of
9092 the shift or propagate MODE into the shift, though. */
9093 if (CONST_INT_P (XEXP (x, 1))
9094 && INTVAL (XEXP (x, 1)) >= 0)
9095 {
9096 temp = simplify_binary_operation (code == ROTATE ? ROTATERT : ROTATE,
9097 xmode, gen_int_mode (mask, xmode),
9098 XEXP (x, 1));
9099 if (temp && CONST_INT_P (temp))
9100 x = simplify_gen_binary (code, xmode,
9101 force_to_mode (XEXP (x, 0), xmode,
9102 INTVAL (temp), next_select),
9103 XEXP (x, 1));
9104 }
9105 break;
9106
9107 case NEG:
9108 /* If we just want the low-order bit, the NEG isn't needed since it
9109 won't change the low-order bit. */
9110 if (mask == 1)
9111 return force_to_mode (XEXP (x, 0), mode, mask, just_select);
9112
9113 /* We need any bits less significant than the most significant bit in
9114 MASK since carries from those bits will affect the bits we are
9115 interested in. */
9116 mask = fuller_mask;
9117 goto unop;
9118
9119 case NOT:
9120 /* (not FOO) is (xor FOO CONST), so if FOO is an LSHIFTRT, we can do the
9121 same as the XOR case above. Ensure that the constant we form is not
9122 wider than the mode of X. */
9123
9124 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
9125 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
9126 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
9127 && (INTVAL (XEXP (XEXP (x, 0), 1)) + floor_log2 (mask)
9128 < GET_MODE_PRECISION (xmode))
9129 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT)
9130 {
9131 temp = gen_int_mode (mask << INTVAL (XEXP (XEXP (x, 0), 1)), xmode);
9132 temp = simplify_gen_binary (XOR, xmode, XEXP (XEXP (x, 0), 0), temp);
9133 x = simplify_gen_binary (LSHIFTRT, xmode,
9134 temp, XEXP (XEXP (x, 0), 1));
9135
9136 return force_to_mode (x, mode, mask, next_select);
9137 }
9138
9139 /* (and (not FOO) CONST) is (not (or FOO (not CONST))), so we must
9140 use the full mask inside the NOT. */
9141 mask = fuller_mask;
9142
9143 unop:
9144 op0 = gen_lowpart_or_truncate (op_mode,
9145 force_to_mode (XEXP (x, 0), mode, mask,
9146 next_select));
9147 if (op_mode != xmode || op0 != XEXP (x, 0))
9148 {
9149 x = simplify_gen_unary (code, op_mode, op0, op_mode);
9150 xmode = op_mode;
9151 }
9152 break;
9153
9154 case NE:
9155 /* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included
9156 in STORE_FLAG_VALUE and FOO has a single bit that might be nonzero,
9157 which is equal to STORE_FLAG_VALUE. */
9158 if ((mask & ~STORE_FLAG_VALUE) == 0
9159 && XEXP (x, 1) == const0_rtx
9160 && GET_MODE (XEXP (x, 0)) == mode
9161 && pow2p_hwi (nonzero_bits (XEXP (x, 0), mode))
9162 && (nonzero_bits (XEXP (x, 0), mode)
9163 == (unsigned HOST_WIDE_INT) STORE_FLAG_VALUE))
9164 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
9165
9166 break;
9167
9168 case IF_THEN_ELSE:
9169 /* We have no way of knowing if the IF_THEN_ELSE can itself be
9170 written in a narrower mode. We play it safe and do not do so. */
9171
9172 op0 = gen_lowpart_or_truncate (xmode,
9173 force_to_mode (XEXP (x, 1), mode,
9174 mask, next_select));
9175 op1 = gen_lowpart_or_truncate (xmode,
9176 force_to_mode (XEXP (x, 2), mode,
9177 mask, next_select));
9178 if (op0 != XEXP (x, 1) || op1 != XEXP (x, 2))
9179 x = simplify_gen_ternary (IF_THEN_ELSE, xmode,
9180 GET_MODE (XEXP (x, 0)), XEXP (x, 0),
9181 op0, op1);
9182 break;
9183
9184 default:
9185 break;
9186 }
9187
9188 /* Ensure we return a value of the proper mode. */
9189 return gen_lowpart_or_truncate (mode, x);
9190 }
9191 \f
9192 /* Return nonzero if X is an expression that has one of two values depending on
9193 whether some other value is zero or nonzero. In that case, we return the
9194 value that is being tested, *PTRUE is set to the value if the rtx being
9195 returned has a nonzero value, and *PFALSE is set to the other alternative.
9196
9197 If we return zero, we set *PTRUE and *PFALSE to X. */
9198
9199 static rtx
9200 if_then_else_cond (rtx x, rtx *ptrue, rtx *pfalse)
9201 {
9202 machine_mode mode = GET_MODE (x);
9203 enum rtx_code code = GET_CODE (x);
9204 rtx cond0, cond1, true0, true1, false0, false1;
9205 unsigned HOST_WIDE_INT nz;
9206 scalar_int_mode int_mode;
9207
9208 /* If we are comparing a value against zero, we are done. */
9209 if ((code == NE || code == EQ)
9210 && XEXP (x, 1) == const0_rtx)
9211 {
9212 *ptrue = (code == NE) ? const_true_rtx : const0_rtx;
9213 *pfalse = (code == NE) ? const0_rtx : const_true_rtx;
9214 return XEXP (x, 0);
9215 }
9216
9217 /* If this is a unary operation whose operand has one of two values, apply
9218 our opcode to compute those values. */
9219 else if (UNARY_P (x)
9220 && (cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0)) != 0)
9221 {
9222 *ptrue = simplify_gen_unary (code, mode, true0, GET_MODE (XEXP (x, 0)));
9223 *pfalse = simplify_gen_unary (code, mode, false0,
9224 GET_MODE (XEXP (x, 0)));
9225 return cond0;
9226 }
9227
9228 /* If this is a COMPARE, do nothing, since the IF_THEN_ELSE we would
9229 make can't possibly match and would suppress other optimizations. */
9230 else if (code == COMPARE)
9231 ;
9232
9233 /* If this is a binary operation, see if either side has only one of two
9234 values. If either one does or if both do and they are conditional on
9235 the same value, compute the new true and false values. */
9236 else if (BINARY_P (x))
9237 {
9238 rtx op0 = XEXP (x, 0);
9239 rtx op1 = XEXP (x, 1);
9240 cond0 = if_then_else_cond (op0, &true0, &false0);
9241 cond1 = if_then_else_cond (op1, &true1, &false1);
9242
9243 if ((cond0 != 0 && cond1 != 0 && !rtx_equal_p (cond0, cond1))
9244 && (REG_P (op0) || REG_P (op1)))
9245 {
9246 /* Try to enable a simplification by undoing work done by
9247 if_then_else_cond if it converted a REG into something more
9248 complex. */
9249 if (REG_P (op0))
9250 {
9251 cond0 = 0;
9252 true0 = false0 = op0;
9253 }
9254 else
9255 {
9256 cond1 = 0;
9257 true1 = false1 = op1;
9258 }
9259 }
9260
9261 if ((cond0 != 0 || cond1 != 0)
9262 && ! (cond0 != 0 && cond1 != 0 && !rtx_equal_p (cond0, cond1)))
9263 {
9264 /* If if_then_else_cond returned zero, then true/false are the
9265 same rtl. We must copy one of them to prevent invalid rtl
9266 sharing. */
9267 if (cond0 == 0)
9268 true0 = copy_rtx (true0);
9269 else if (cond1 == 0)
9270 true1 = copy_rtx (true1);
9271
9272 if (COMPARISON_P (x))
9273 {
9274 *ptrue = simplify_gen_relational (code, mode, VOIDmode,
9275 true0, true1);
9276 *pfalse = simplify_gen_relational (code, mode, VOIDmode,
9277 false0, false1);
9278 }
9279 else
9280 {
9281 *ptrue = simplify_gen_binary (code, mode, true0, true1);
9282 *pfalse = simplify_gen_binary (code, mode, false0, false1);
9283 }
9284
9285 return cond0 ? cond0 : cond1;
9286 }
9287
9288 /* See if we have PLUS, IOR, XOR, MINUS or UMAX, where one of the
9289 operands is zero when the other is nonzero, and vice-versa,
9290 and STORE_FLAG_VALUE is 1 or -1. */
9291
9292 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
9293 && (code == PLUS || code == IOR || code == XOR || code == MINUS
9294 || code == UMAX)
9295 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
9296 {
9297 rtx op0 = XEXP (XEXP (x, 0), 1);
9298 rtx op1 = XEXP (XEXP (x, 1), 1);
9299
9300 cond0 = XEXP (XEXP (x, 0), 0);
9301 cond1 = XEXP (XEXP (x, 1), 0);
9302
9303 if (COMPARISON_P (cond0)
9304 && COMPARISON_P (cond1)
9305 && SCALAR_INT_MODE_P (mode)
9306 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
9307 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
9308 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
9309 || ((swap_condition (GET_CODE (cond0))
9310 == reversed_comparison_code (cond1, NULL))
9311 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
9312 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
9313 && ! side_effects_p (x))
9314 {
9315 *ptrue = simplify_gen_binary (MULT, mode, op0, const_true_rtx);
9316 *pfalse = simplify_gen_binary (MULT, mode,
9317 (code == MINUS
9318 ? simplify_gen_unary (NEG, mode,
9319 op1, mode)
9320 : op1),
9321 const_true_rtx);
9322 return cond0;
9323 }
9324 }
9325
9326 /* Similarly for MULT, AND and UMIN, except that for these the result
9327 is always zero. */
9328 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
9329 && (code == MULT || code == AND || code == UMIN)
9330 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
9331 {
9332 cond0 = XEXP (XEXP (x, 0), 0);
9333 cond1 = XEXP (XEXP (x, 1), 0);
9334
9335 if (COMPARISON_P (cond0)
9336 && COMPARISON_P (cond1)
9337 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
9338 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
9339 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
9340 || ((swap_condition (GET_CODE (cond0))
9341 == reversed_comparison_code (cond1, NULL))
9342 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
9343 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
9344 && ! side_effects_p (x))
9345 {
9346 *ptrue = *pfalse = const0_rtx;
9347 return cond0;
9348 }
9349 }
9350 }
9351
9352 else if (code == IF_THEN_ELSE)
9353 {
9354 /* If we have IF_THEN_ELSE already, extract the condition and
9355 canonicalize it if it is NE or EQ. */
9356 cond0 = XEXP (x, 0);
9357 *ptrue = XEXP (x, 1), *pfalse = XEXP (x, 2);
9358 if (GET_CODE (cond0) == NE && XEXP (cond0, 1) == const0_rtx)
9359 return XEXP (cond0, 0);
9360 else if (GET_CODE (cond0) == EQ && XEXP (cond0, 1) == const0_rtx)
9361 {
9362 *ptrue = XEXP (x, 2), *pfalse = XEXP (x, 1);
9363 return XEXP (cond0, 0);
9364 }
9365 else
9366 return cond0;
9367 }
9368
9369 /* If X is a SUBREG, we can narrow both the true and false values
9370 if the inner expression, if there is a condition. */
9371 else if (code == SUBREG
9372 && (cond0 = if_then_else_cond (SUBREG_REG (x), &true0,
9373 &false0)) != 0)
9374 {
9375 true0 = simplify_gen_subreg (mode, true0,
9376 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
9377 false0 = simplify_gen_subreg (mode, false0,
9378 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
9379 if (true0 && false0)
9380 {
9381 *ptrue = true0;
9382 *pfalse = false0;
9383 return cond0;
9384 }
9385 }
9386
9387 /* If X is a constant, this isn't special and will cause confusions
9388 if we treat it as such. Likewise if it is equivalent to a constant. */
9389 else if (CONSTANT_P (x)
9390 || ((cond0 = get_last_value (x)) != 0 && CONSTANT_P (cond0)))
9391 ;
9392
9393 /* If we're in BImode, canonicalize on 0 and STORE_FLAG_VALUE, as that
9394 will be least confusing to the rest of the compiler. */
9395 else if (mode == BImode)
9396 {
9397 *ptrue = GEN_INT (STORE_FLAG_VALUE), *pfalse = const0_rtx;
9398 return x;
9399 }
9400
9401 /* If X is known to be either 0 or -1, those are the true and
9402 false values when testing X. */
9403 else if (x == constm1_rtx || x == const0_rtx
9404 || (is_a <scalar_int_mode> (mode, &int_mode)
9405 && (num_sign_bit_copies (x, int_mode)
9406 == GET_MODE_PRECISION (int_mode))))
9407 {
9408 *ptrue = constm1_rtx, *pfalse = const0_rtx;
9409 return x;
9410 }
9411
9412 /* Likewise for 0 or a single bit. */
9413 else if (HWI_COMPUTABLE_MODE_P (mode)
9414 && pow2p_hwi (nz = nonzero_bits (x, mode)))
9415 {
9416 *ptrue = gen_int_mode (nz, mode), *pfalse = const0_rtx;
9417 return x;
9418 }
9419
9420 /* Otherwise fail; show no condition with true and false values the same. */
9421 *ptrue = *pfalse = x;
9422 return 0;
9423 }
9424 \f
9425 /* Return the value of expression X given the fact that condition COND
9426 is known to be true when applied to REG as its first operand and VAL
9427 as its second. X is known to not be shared and so can be modified in
9428 place.
9429
9430 We only handle the simplest cases, and specifically those cases that
9431 arise with IF_THEN_ELSE expressions. */
9432
9433 static rtx
9434 known_cond (rtx x, enum rtx_code cond, rtx reg, rtx val)
9435 {
9436 enum rtx_code code = GET_CODE (x);
9437 const char *fmt;
9438 int i, j;
9439
9440 if (side_effects_p (x))
9441 return x;
9442
9443 /* If either operand of the condition is a floating point value,
9444 then we have to avoid collapsing an EQ comparison. */
9445 if (cond == EQ
9446 && rtx_equal_p (x, reg)
9447 && ! FLOAT_MODE_P (GET_MODE (x))
9448 && ! FLOAT_MODE_P (GET_MODE (val)))
9449 return val;
9450
9451 if (cond == UNEQ && rtx_equal_p (x, reg))
9452 return val;
9453
9454 /* If X is (abs REG) and we know something about REG's relationship
9455 with zero, we may be able to simplify this. */
9456
9457 if (code == ABS && rtx_equal_p (XEXP (x, 0), reg) && val == const0_rtx)
9458 switch (cond)
9459 {
9460 case GE: case GT: case EQ:
9461 return XEXP (x, 0);
9462 case LT: case LE:
9463 return simplify_gen_unary (NEG, GET_MODE (XEXP (x, 0)),
9464 XEXP (x, 0),
9465 GET_MODE (XEXP (x, 0)));
9466 default:
9467 break;
9468 }
9469
9470 /* The only other cases we handle are MIN, MAX, and comparisons if the
9471 operands are the same as REG and VAL. */
9472
9473 else if (COMPARISON_P (x) || COMMUTATIVE_ARITH_P (x))
9474 {
9475 if (rtx_equal_p (XEXP (x, 0), val))
9476 {
9477 std::swap (val, reg);
9478 cond = swap_condition (cond);
9479 }
9480
9481 if (rtx_equal_p (XEXP (x, 0), reg) && rtx_equal_p (XEXP (x, 1), val))
9482 {
9483 if (COMPARISON_P (x))
9484 {
9485 if (comparison_dominates_p (cond, code))
9486 return VECTOR_MODE_P (GET_MODE (x)) ? x : const_true_rtx;
9487
9488 code = reversed_comparison_code (x, NULL);
9489 if (code != UNKNOWN
9490 && comparison_dominates_p (cond, code))
9491 return CONST0_RTX (GET_MODE (x));
9492 else
9493 return x;
9494 }
9495 else if (code == SMAX || code == SMIN
9496 || code == UMIN || code == UMAX)
9497 {
9498 int unsignedp = (code == UMIN || code == UMAX);
9499
9500 /* Do not reverse the condition when it is NE or EQ.
9501 This is because we cannot conclude anything about
9502 the value of 'SMAX (x, y)' when x is not equal to y,
9503 but we can when x equals y. */
9504 if ((code == SMAX || code == UMAX)
9505 && ! (cond == EQ || cond == NE))
9506 cond = reverse_condition (cond);
9507
9508 switch (cond)
9509 {
9510 case GE: case GT:
9511 return unsignedp ? x : XEXP (x, 1);
9512 case LE: case LT:
9513 return unsignedp ? x : XEXP (x, 0);
9514 case GEU: case GTU:
9515 return unsignedp ? XEXP (x, 1) : x;
9516 case LEU: case LTU:
9517 return unsignedp ? XEXP (x, 0) : x;
9518 default:
9519 break;
9520 }
9521 }
9522 }
9523 }
9524 else if (code == SUBREG)
9525 {
9526 machine_mode inner_mode = GET_MODE (SUBREG_REG (x));
9527 rtx new_rtx, r = known_cond (SUBREG_REG (x), cond, reg, val);
9528
9529 if (SUBREG_REG (x) != r)
9530 {
9531 /* We must simplify subreg here, before we lose track of the
9532 original inner_mode. */
9533 new_rtx = simplify_subreg (GET_MODE (x), r,
9534 inner_mode, SUBREG_BYTE (x));
9535 if (new_rtx)
9536 return new_rtx;
9537 else
9538 SUBST (SUBREG_REG (x), r);
9539 }
9540
9541 return x;
9542 }
9543 /* We don't have to handle SIGN_EXTEND here, because even in the
9544 case of replacing something with a modeless CONST_INT, a
9545 CONST_INT is already (supposed to be) a valid sign extension for
9546 its narrower mode, which implies it's already properly
9547 sign-extended for the wider mode. Now, for ZERO_EXTEND, the
9548 story is different. */
9549 else if (code == ZERO_EXTEND)
9550 {
9551 machine_mode inner_mode = GET_MODE (XEXP (x, 0));
9552 rtx new_rtx, r = known_cond (XEXP (x, 0), cond, reg, val);
9553
9554 if (XEXP (x, 0) != r)
9555 {
9556 /* We must simplify the zero_extend here, before we lose
9557 track of the original inner_mode. */
9558 new_rtx = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
9559 r, inner_mode);
9560 if (new_rtx)
9561 return new_rtx;
9562 else
9563 SUBST (XEXP (x, 0), r);
9564 }
9565
9566 return x;
9567 }
9568
9569 fmt = GET_RTX_FORMAT (code);
9570 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
9571 {
9572 if (fmt[i] == 'e')
9573 SUBST (XEXP (x, i), known_cond (XEXP (x, i), cond, reg, val));
9574 else if (fmt[i] == 'E')
9575 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9576 SUBST (XVECEXP (x, i, j), known_cond (XVECEXP (x, i, j),
9577 cond, reg, val));
9578 }
9579
9580 return x;
9581 }
9582 \f
9583 /* See if X and Y are equal for the purposes of seeing if we can rewrite an
9584 assignment as a field assignment. */
9585
9586 static int
9587 rtx_equal_for_field_assignment_p (rtx x, rtx y, bool widen_x)
9588 {
9589 if (widen_x && GET_MODE (x) != GET_MODE (y))
9590 {
9591 if (paradoxical_subreg_p (GET_MODE (x), GET_MODE (y)))
9592 return 0;
9593 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
9594 return 0;
9595 x = adjust_address_nv (x, GET_MODE (y),
9596 byte_lowpart_offset (GET_MODE (y),
9597 GET_MODE (x)));
9598 }
9599
9600 if (x == y || rtx_equal_p (x, y))
9601 return 1;
9602
9603 if (x == 0 || y == 0 || GET_MODE (x) != GET_MODE (y))
9604 return 0;
9605
9606 /* Check for a paradoxical SUBREG of a MEM compared with the MEM.
9607 Note that all SUBREGs of MEM are paradoxical; otherwise they
9608 would have been rewritten. */
9609 if (MEM_P (x) && GET_CODE (y) == SUBREG
9610 && MEM_P (SUBREG_REG (y))
9611 && rtx_equal_p (SUBREG_REG (y),
9612 gen_lowpart (GET_MODE (SUBREG_REG (y)), x)))
9613 return 1;
9614
9615 if (MEM_P (y) && GET_CODE (x) == SUBREG
9616 && MEM_P (SUBREG_REG (x))
9617 && rtx_equal_p (SUBREG_REG (x),
9618 gen_lowpart (GET_MODE (SUBREG_REG (x)), y)))
9619 return 1;
9620
9621 /* We used to see if get_last_value of X and Y were the same but that's
9622 not correct. In one direction, we'll cause the assignment to have
9623 the wrong destination and in the case, we'll import a register into this
9624 insn that might have already have been dead. So fail if none of the
9625 above cases are true. */
9626 return 0;
9627 }
9628 \f
9629 /* See if X, a SET operation, can be rewritten as a bit-field assignment.
9630 Return that assignment if so.
9631
9632 We only handle the most common cases. */
9633
9634 static rtx
9635 make_field_assignment (rtx x)
9636 {
9637 rtx dest = SET_DEST (x);
9638 rtx src = SET_SRC (x);
9639 rtx assign;
9640 rtx rhs, lhs;
9641 HOST_WIDE_INT c1;
9642 HOST_WIDE_INT pos;
9643 unsigned HOST_WIDE_INT len;
9644 rtx other;
9645
9646 /* All the rules in this function are specific to scalar integers. */
9647 scalar_int_mode mode;
9648 if (!is_a <scalar_int_mode> (GET_MODE (dest), &mode))
9649 return x;
9650
9651 /* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is
9652 a clear of a one-bit field. We will have changed it to
9653 (and (rotate (const_int -2) POS) DEST), so check for that. Also check
9654 for a SUBREG. */
9655
9656 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == ROTATE
9657 && CONST_INT_P (XEXP (XEXP (src, 0), 0))
9658 && INTVAL (XEXP (XEXP (src, 0), 0)) == -2
9659 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9660 {
9661 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
9662 1, 1, 1, 0);
9663 if (assign != 0)
9664 return gen_rtx_SET (assign, const0_rtx);
9665 return x;
9666 }
9667
9668 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == SUBREG
9669 && subreg_lowpart_p (XEXP (src, 0))
9670 && partial_subreg_p (XEXP (src, 0))
9671 && GET_CODE (SUBREG_REG (XEXP (src, 0))) == ROTATE
9672 && CONST_INT_P (XEXP (SUBREG_REG (XEXP (src, 0)), 0))
9673 && INTVAL (XEXP (SUBREG_REG (XEXP (src, 0)), 0)) == -2
9674 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9675 {
9676 assign = make_extraction (VOIDmode, dest, 0,
9677 XEXP (SUBREG_REG (XEXP (src, 0)), 1),
9678 1, 1, 1, 0);
9679 if (assign != 0)
9680 return gen_rtx_SET (assign, const0_rtx);
9681 return x;
9682 }
9683
9684 /* If SRC is (ior (ashift (const_int 1) POS) DEST), this is a set of a
9685 one-bit field. */
9686 if (GET_CODE (src) == IOR && GET_CODE (XEXP (src, 0)) == ASHIFT
9687 && XEXP (XEXP (src, 0), 0) == const1_rtx
9688 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9689 {
9690 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
9691 1, 1, 1, 0);
9692 if (assign != 0)
9693 return gen_rtx_SET (assign, const1_rtx);
9694 return x;
9695 }
9696
9697 /* If DEST is already a field assignment, i.e. ZERO_EXTRACT, and the
9698 SRC is an AND with all bits of that field set, then we can discard
9699 the AND. */
9700 if (GET_CODE (dest) == ZERO_EXTRACT
9701 && CONST_INT_P (XEXP (dest, 1))
9702 && GET_CODE (src) == AND
9703 && CONST_INT_P (XEXP (src, 1)))
9704 {
9705 HOST_WIDE_INT width = INTVAL (XEXP (dest, 1));
9706 unsigned HOST_WIDE_INT and_mask = INTVAL (XEXP (src, 1));
9707 unsigned HOST_WIDE_INT ze_mask;
9708
9709 if (width >= HOST_BITS_PER_WIDE_INT)
9710 ze_mask = -1;
9711 else
9712 ze_mask = ((unsigned HOST_WIDE_INT)1 << width) - 1;
9713
9714 /* Complete overlap. We can remove the source AND. */
9715 if ((and_mask & ze_mask) == ze_mask)
9716 return gen_rtx_SET (dest, XEXP (src, 0));
9717
9718 /* Partial overlap. We can reduce the source AND. */
9719 if ((and_mask & ze_mask) != and_mask)
9720 {
9721 src = gen_rtx_AND (mode, XEXP (src, 0),
9722 gen_int_mode (and_mask & ze_mask, mode));
9723 return gen_rtx_SET (dest, src);
9724 }
9725 }
9726
9727 /* The other case we handle is assignments into a constant-position
9728 field. They look like (ior/xor (and DEST C1) OTHER). If C1 represents
9729 a mask that has all one bits except for a group of zero bits and
9730 OTHER is known to have zeros where C1 has ones, this is such an
9731 assignment. Compute the position and length from C1. Shift OTHER
9732 to the appropriate position, force it to the required mode, and
9733 make the extraction. Check for the AND in both operands. */
9734
9735 /* One or more SUBREGs might obscure the constant-position field
9736 assignment. The first one we are likely to encounter is an outer
9737 narrowing SUBREG, which we can just strip for the purposes of
9738 identifying the constant-field assignment. */
9739 scalar_int_mode src_mode = mode;
9740 if (GET_CODE (src) == SUBREG
9741 && subreg_lowpart_p (src)
9742 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (src)), &src_mode))
9743 src = SUBREG_REG (src);
9744
9745 if (GET_CODE (src) != IOR && GET_CODE (src) != XOR)
9746 return x;
9747
9748 rhs = expand_compound_operation (XEXP (src, 0));
9749 lhs = expand_compound_operation (XEXP (src, 1));
9750
9751 if (GET_CODE (rhs) == AND
9752 && CONST_INT_P (XEXP (rhs, 1))
9753 && rtx_equal_for_field_assignment_p (XEXP (rhs, 0), dest))
9754 c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
9755 /* The second SUBREG that might get in the way is a paradoxical
9756 SUBREG around the first operand of the AND. We want to
9757 pretend the operand is as wide as the destination here. We
9758 do this by adjusting the MEM to wider mode for the sole
9759 purpose of the call to rtx_equal_for_field_assignment_p. Also
9760 note this trick only works for MEMs. */
9761 else if (GET_CODE (rhs) == AND
9762 && paradoxical_subreg_p (XEXP (rhs, 0))
9763 && MEM_P (SUBREG_REG (XEXP (rhs, 0)))
9764 && CONST_INT_P (XEXP (rhs, 1))
9765 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (rhs, 0)),
9766 dest, true))
9767 c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
9768 else if (GET_CODE (lhs) == AND
9769 && CONST_INT_P (XEXP (lhs, 1))
9770 && rtx_equal_for_field_assignment_p (XEXP (lhs, 0), dest))
9771 c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
9772 /* The second SUBREG that might get in the way is a paradoxical
9773 SUBREG around the first operand of the AND. We want to
9774 pretend the operand is as wide as the destination here. We
9775 do this by adjusting the MEM to wider mode for the sole
9776 purpose of the call to rtx_equal_for_field_assignment_p. Also
9777 note this trick only works for MEMs. */
9778 else if (GET_CODE (lhs) == AND
9779 && paradoxical_subreg_p (XEXP (lhs, 0))
9780 && MEM_P (SUBREG_REG (XEXP (lhs, 0)))
9781 && CONST_INT_P (XEXP (lhs, 1))
9782 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (lhs, 0)),
9783 dest, true))
9784 c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
9785 else
9786 return x;
9787
9788 pos = get_pos_from_mask ((~c1) & GET_MODE_MASK (mode), &len);
9789 if (pos < 0
9790 || pos + len > GET_MODE_PRECISION (mode)
9791 || GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT
9792 || (c1 & nonzero_bits (other, mode)) != 0)
9793 return x;
9794
9795 assign = make_extraction (VOIDmode, dest, pos, NULL_RTX, len, 1, 1, 0);
9796 if (assign == 0)
9797 return x;
9798
9799 /* The mode to use for the source is the mode of the assignment, or of
9800 what is inside a possible STRICT_LOW_PART. */
9801 machine_mode new_mode = (GET_CODE (assign) == STRICT_LOW_PART
9802 ? GET_MODE (XEXP (assign, 0)) : GET_MODE (assign));
9803
9804 /* Shift OTHER right POS places and make it the source, restricting it
9805 to the proper length and mode. */
9806
9807 src = canon_reg_for_combine (simplify_shift_const (NULL_RTX, LSHIFTRT,
9808 src_mode, other, pos),
9809 dest);
9810 src = force_to_mode (src, new_mode,
9811 len >= HOST_BITS_PER_WIDE_INT
9812 ? HOST_WIDE_INT_M1U
9813 : (HOST_WIDE_INT_1U << len) - 1,
9814 0);
9815
9816 /* If SRC is masked by an AND that does not make a difference in
9817 the value being stored, strip it. */
9818 if (GET_CODE (assign) == ZERO_EXTRACT
9819 && CONST_INT_P (XEXP (assign, 1))
9820 && INTVAL (XEXP (assign, 1)) < HOST_BITS_PER_WIDE_INT
9821 && GET_CODE (src) == AND
9822 && CONST_INT_P (XEXP (src, 1))
9823 && UINTVAL (XEXP (src, 1))
9824 == (HOST_WIDE_INT_1U << INTVAL (XEXP (assign, 1))) - 1)
9825 src = XEXP (src, 0);
9826
9827 return gen_rtx_SET (assign, src);
9828 }
9829 \f
9830 /* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c)
9831 if so. */
9832
9833 static rtx
9834 apply_distributive_law (rtx x)
9835 {
9836 enum rtx_code code = GET_CODE (x);
9837 enum rtx_code inner_code;
9838 rtx lhs, rhs, other;
9839 rtx tem;
9840
9841 /* Distributivity is not true for floating point as it can change the
9842 value. So we don't do it unless -funsafe-math-optimizations. */
9843 if (FLOAT_MODE_P (GET_MODE (x))
9844 && ! flag_unsafe_math_optimizations)
9845 return x;
9846
9847 /* The outer operation can only be one of the following: */
9848 if (code != IOR && code != AND && code != XOR
9849 && code != PLUS && code != MINUS)
9850 return x;
9851
9852 lhs = XEXP (x, 0);
9853 rhs = XEXP (x, 1);
9854
9855 /* If either operand is a primitive we can't do anything, so get out
9856 fast. */
9857 if (OBJECT_P (lhs) || OBJECT_P (rhs))
9858 return x;
9859
9860 lhs = expand_compound_operation (lhs);
9861 rhs = expand_compound_operation (rhs);
9862 inner_code = GET_CODE (lhs);
9863 if (inner_code != GET_CODE (rhs))
9864 return x;
9865
9866 /* See if the inner and outer operations distribute. */
9867 switch (inner_code)
9868 {
9869 case LSHIFTRT:
9870 case ASHIFTRT:
9871 case AND:
9872 case IOR:
9873 /* These all distribute except over PLUS. */
9874 if (code == PLUS || code == MINUS)
9875 return x;
9876 break;
9877
9878 case MULT:
9879 if (code != PLUS && code != MINUS)
9880 return x;
9881 break;
9882
9883 case ASHIFT:
9884 /* This is also a multiply, so it distributes over everything. */
9885 break;
9886
9887 /* This used to handle SUBREG, but this turned out to be counter-
9888 productive, since (subreg (op ...)) usually is not handled by
9889 insn patterns, and this "optimization" therefore transformed
9890 recognizable patterns into unrecognizable ones. Therefore the
9891 SUBREG case was removed from here.
9892
9893 It is possible that distributing SUBREG over arithmetic operations
9894 leads to an intermediate result than can then be optimized further,
9895 e.g. by moving the outer SUBREG to the other side of a SET as done
9896 in simplify_set. This seems to have been the original intent of
9897 handling SUBREGs here.
9898
9899 However, with current GCC this does not appear to actually happen,
9900 at least on major platforms. If some case is found where removing
9901 the SUBREG case here prevents follow-on optimizations, distributing
9902 SUBREGs ought to be re-added at that place, e.g. in simplify_set. */
9903
9904 default:
9905 return x;
9906 }
9907
9908 /* Set LHS and RHS to the inner operands (A and B in the example
9909 above) and set OTHER to the common operand (C in the example).
9910 There is only one way to do this unless the inner operation is
9911 commutative. */
9912 if (COMMUTATIVE_ARITH_P (lhs)
9913 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 0)))
9914 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 1);
9915 else if (COMMUTATIVE_ARITH_P (lhs)
9916 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 1)))
9917 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 0);
9918 else if (COMMUTATIVE_ARITH_P (lhs)
9919 && rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 0)))
9920 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 1);
9921 else if (rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 1)))
9922 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 0);
9923 else
9924 return x;
9925
9926 /* Form the new inner operation, seeing if it simplifies first. */
9927 tem = simplify_gen_binary (code, GET_MODE (x), lhs, rhs);
9928
9929 /* There is one exception to the general way of distributing:
9930 (a | c) ^ (b | c) -> (a ^ b) & ~c */
9931 if (code == XOR && inner_code == IOR)
9932 {
9933 inner_code = AND;
9934 other = simplify_gen_unary (NOT, GET_MODE (x), other, GET_MODE (x));
9935 }
9936
9937 /* We may be able to continuing distributing the result, so call
9938 ourselves recursively on the inner operation before forming the
9939 outer operation, which we return. */
9940 return simplify_gen_binary (inner_code, GET_MODE (x),
9941 apply_distributive_law (tem), other);
9942 }
9943
9944 /* See if X is of the form (* (+ A B) C), and if so convert to
9945 (+ (* A C) (* B C)) and try to simplify.
9946
9947 Most of the time, this results in no change. However, if some of
9948 the operands are the same or inverses of each other, simplifications
9949 will result.
9950
9951 For example, (and (ior A B) (not B)) can occur as the result of
9952 expanding a bit field assignment. When we apply the distributive
9953 law to this, we get (ior (and (A (not B))) (and (B (not B)))),
9954 which then simplifies to (and (A (not B))).
9955
9956 Note that no checks happen on the validity of applying the inverse
9957 distributive law. This is pointless since we can do it in the
9958 few places where this routine is called.
9959
9960 N is the index of the term that is decomposed (the arithmetic operation,
9961 i.e. (+ A B) in the first example above). !N is the index of the term that
9962 is distributed, i.e. of C in the first example above. */
9963 static rtx
9964 distribute_and_simplify_rtx (rtx x, int n)
9965 {
9966 machine_mode mode;
9967 enum rtx_code outer_code, inner_code;
9968 rtx decomposed, distributed, inner_op0, inner_op1, new_op0, new_op1, tmp;
9969
9970 /* Distributivity is not true for floating point as it can change the
9971 value. So we don't do it unless -funsafe-math-optimizations. */
9972 if (FLOAT_MODE_P (GET_MODE (x))
9973 && ! flag_unsafe_math_optimizations)
9974 return NULL_RTX;
9975
9976 decomposed = XEXP (x, n);
9977 if (!ARITHMETIC_P (decomposed))
9978 return NULL_RTX;
9979
9980 mode = GET_MODE (x);
9981 outer_code = GET_CODE (x);
9982 distributed = XEXP (x, !n);
9983
9984 inner_code = GET_CODE (decomposed);
9985 inner_op0 = XEXP (decomposed, 0);
9986 inner_op1 = XEXP (decomposed, 1);
9987
9988 /* Special case (and (xor B C) (not A)), which is equivalent to
9989 (xor (ior A B) (ior A C)) */
9990 if (outer_code == AND && inner_code == XOR && GET_CODE (distributed) == NOT)
9991 {
9992 distributed = XEXP (distributed, 0);
9993 outer_code = IOR;
9994 }
9995
9996 if (n == 0)
9997 {
9998 /* Distribute the second term. */
9999 new_op0 = simplify_gen_binary (outer_code, mode, inner_op0, distributed);
10000 new_op1 = simplify_gen_binary (outer_code, mode, inner_op1, distributed);
10001 }
10002 else
10003 {
10004 /* Distribute the first term. */
10005 new_op0 = simplify_gen_binary (outer_code, mode, distributed, inner_op0);
10006 new_op1 = simplify_gen_binary (outer_code, mode, distributed, inner_op1);
10007 }
10008
10009 tmp = apply_distributive_law (simplify_gen_binary (inner_code, mode,
10010 new_op0, new_op1));
10011 if (GET_CODE (tmp) != outer_code
10012 && (set_src_cost (tmp, mode, optimize_this_for_speed_p)
10013 < set_src_cost (x, mode, optimize_this_for_speed_p)))
10014 return tmp;
10015
10016 return NULL_RTX;
10017 }
10018 \f
10019 /* Simplify a logical `and' of VAROP with the constant CONSTOP, to be done
10020 in MODE. Return an equivalent form, if different from (and VAROP
10021 (const_int CONSTOP)). Otherwise, return NULL_RTX. */
10022
10023 static rtx
10024 simplify_and_const_int_1 (scalar_int_mode mode, rtx varop,
10025 unsigned HOST_WIDE_INT constop)
10026 {
10027 unsigned HOST_WIDE_INT nonzero;
10028 unsigned HOST_WIDE_INT orig_constop;
10029 rtx orig_varop;
10030 int i;
10031
10032 orig_varop = varop;
10033 orig_constop = constop;
10034 if (GET_CODE (varop) == CLOBBER)
10035 return NULL_RTX;
10036
10037 /* Simplify VAROP knowing that we will be only looking at some of the
10038 bits in it.
10039
10040 Note by passing in CONSTOP, we guarantee that the bits not set in
10041 CONSTOP are not significant and will never be examined. We must
10042 ensure that is the case by explicitly masking out those bits
10043 before returning. */
10044 varop = force_to_mode (varop, mode, constop, 0);
10045
10046 /* If VAROP is a CLOBBER, we will fail so return it. */
10047 if (GET_CODE (varop) == CLOBBER)
10048 return varop;
10049
10050 /* If VAROP is a CONST_INT, then we need to apply the mask in CONSTOP
10051 to VAROP and return the new constant. */
10052 if (CONST_INT_P (varop))
10053 return gen_int_mode (INTVAL (varop) & constop, mode);
10054
10055 /* See what bits may be nonzero in VAROP. Unlike the general case of
10056 a call to nonzero_bits, here we don't care about bits outside
10057 MODE. */
10058
10059 nonzero = nonzero_bits (varop, mode) & GET_MODE_MASK (mode);
10060
10061 /* Turn off all bits in the constant that are known to already be zero.
10062 Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS
10063 which is tested below. */
10064
10065 constop &= nonzero;
10066
10067 /* If we don't have any bits left, return zero. */
10068 if (constop == 0 && !side_effects_p (varop))
10069 return const0_rtx;
10070
10071 /* If VAROP is a NEG of something known to be zero or 1 and CONSTOP is
10072 a power of two, we can replace this with an ASHIFT. */
10073 if (GET_CODE (varop) == NEG && nonzero_bits (XEXP (varop, 0), mode) == 1
10074 && (i = exact_log2 (constop)) >= 0)
10075 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (varop, 0), i);
10076
10077 /* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR
10078 or XOR, then try to apply the distributive law. This may eliminate
10079 operations if either branch can be simplified because of the AND.
10080 It may also make some cases more complex, but those cases probably
10081 won't match a pattern either with or without this. */
10082
10083 if (GET_CODE (varop) == IOR || GET_CODE (varop) == XOR)
10084 {
10085 scalar_int_mode varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10086 return
10087 gen_lowpart
10088 (mode,
10089 apply_distributive_law
10090 (simplify_gen_binary (GET_CODE (varop), varop_mode,
10091 simplify_and_const_int (NULL_RTX, varop_mode,
10092 XEXP (varop, 0),
10093 constop),
10094 simplify_and_const_int (NULL_RTX, varop_mode,
10095 XEXP (varop, 1),
10096 constop))));
10097 }
10098
10099 /* If VAROP is PLUS, and the constant is a mask of low bits, distribute
10100 the AND and see if one of the operands simplifies to zero. If so, we
10101 may eliminate it. */
10102
10103 if (GET_CODE (varop) == PLUS
10104 && pow2p_hwi (constop + 1))
10105 {
10106 rtx o0, o1;
10107
10108 o0 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 0), constop);
10109 o1 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 1), constop);
10110 if (o0 == const0_rtx)
10111 return o1;
10112 if (o1 == const0_rtx)
10113 return o0;
10114 }
10115
10116 /* Make a SUBREG if necessary. If we can't make it, fail. */
10117 varop = gen_lowpart (mode, varop);
10118 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
10119 return NULL_RTX;
10120
10121 /* If we are only masking insignificant bits, return VAROP. */
10122 if (constop == nonzero)
10123 return varop;
10124
10125 if (varop == orig_varop && constop == orig_constop)
10126 return NULL_RTX;
10127
10128 /* Otherwise, return an AND. */
10129 return simplify_gen_binary (AND, mode, varop, gen_int_mode (constop, mode));
10130 }
10131
10132
10133 /* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done
10134 in MODE.
10135
10136 Return an equivalent form, if different from X. Otherwise, return X. If
10137 X is zero, we are to always construct the equivalent form. */
10138
10139 static rtx
10140 simplify_and_const_int (rtx x, scalar_int_mode mode, rtx varop,
10141 unsigned HOST_WIDE_INT constop)
10142 {
10143 rtx tem = simplify_and_const_int_1 (mode, varop, constop);
10144 if (tem)
10145 return tem;
10146
10147 if (!x)
10148 x = simplify_gen_binary (AND, GET_MODE (varop), varop,
10149 gen_int_mode (constop, mode));
10150 if (GET_MODE (x) != mode)
10151 x = gen_lowpart (mode, x);
10152 return x;
10153 }
10154 \f
10155 /* Given a REG X of mode XMODE, compute which bits in X can be nonzero.
10156 We don't care about bits outside of those defined in MODE.
10157 We DO care about all the bits in MODE, even if XMODE is smaller than MODE.
10158
10159 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
10160 a shift, AND, or zero_extract, we can do better. */
10161
10162 static rtx
10163 reg_nonzero_bits_for_combine (const_rtx x, scalar_int_mode xmode,
10164 scalar_int_mode mode,
10165 unsigned HOST_WIDE_INT *nonzero)
10166 {
10167 rtx tem;
10168 reg_stat_type *rsp;
10169
10170 /* If X is a register whose nonzero bits value is current, use it.
10171 Otherwise, if X is a register whose value we can find, use that
10172 value. Otherwise, use the previously-computed global nonzero bits
10173 for this register. */
10174
10175 rsp = &reg_stat[REGNO (x)];
10176 if (rsp->last_set_value != 0
10177 && (rsp->last_set_mode == mode
10178 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
10179 && GET_MODE_CLASS (rsp->last_set_mode) == MODE_INT
10180 && GET_MODE_CLASS (mode) == MODE_INT))
10181 && ((rsp->last_set_label >= label_tick_ebb_start
10182 && rsp->last_set_label < label_tick)
10183 || (rsp->last_set_label == label_tick
10184 && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
10185 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
10186 && REGNO (x) < reg_n_sets_max
10187 && REG_N_SETS (REGNO (x)) == 1
10188 && !REGNO_REG_SET_P
10189 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
10190 REGNO (x)))))
10191 {
10192 /* Note that, even if the precision of last_set_mode is lower than that
10193 of mode, record_value_for_reg invoked nonzero_bits on the register
10194 with nonzero_bits_mode (because last_set_mode is necessarily integral
10195 and HWI_COMPUTABLE_MODE_P in this case) so bits in nonzero_bits_mode
10196 are all valid, hence in mode too since nonzero_bits_mode is defined
10197 to the largest HWI_COMPUTABLE_MODE_P mode. */
10198 *nonzero &= rsp->last_set_nonzero_bits;
10199 return NULL;
10200 }
10201
10202 tem = get_last_value (x);
10203 if (tem)
10204 {
10205 if (SHORT_IMMEDIATES_SIGN_EXTEND)
10206 tem = sign_extend_short_imm (tem, xmode, GET_MODE_PRECISION (mode));
10207
10208 return tem;
10209 }
10210
10211 if (nonzero_sign_valid && rsp->nonzero_bits)
10212 {
10213 unsigned HOST_WIDE_INT mask = rsp->nonzero_bits;
10214
10215 if (GET_MODE_PRECISION (xmode) < GET_MODE_PRECISION (mode))
10216 /* We don't know anything about the upper bits. */
10217 mask |= GET_MODE_MASK (mode) ^ GET_MODE_MASK (xmode);
10218
10219 *nonzero &= mask;
10220 }
10221
10222 return NULL;
10223 }
10224
10225 /* Given a reg X of mode XMODE, return the number of bits at the high-order
10226 end of X that are known to be equal to the sign bit. X will be used
10227 in mode MODE; the returned value will always be between 1 and the
10228 number of bits in MODE. */
10229
10230 static rtx
10231 reg_num_sign_bit_copies_for_combine (const_rtx x, scalar_int_mode xmode,
10232 scalar_int_mode mode,
10233 unsigned int *result)
10234 {
10235 rtx tem;
10236 reg_stat_type *rsp;
10237
10238 rsp = &reg_stat[REGNO (x)];
10239 if (rsp->last_set_value != 0
10240 && rsp->last_set_mode == mode
10241 && ((rsp->last_set_label >= label_tick_ebb_start
10242 && rsp->last_set_label < label_tick)
10243 || (rsp->last_set_label == label_tick
10244 && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
10245 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
10246 && REGNO (x) < reg_n_sets_max
10247 && REG_N_SETS (REGNO (x)) == 1
10248 && !REGNO_REG_SET_P
10249 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
10250 REGNO (x)))))
10251 {
10252 *result = rsp->last_set_sign_bit_copies;
10253 return NULL;
10254 }
10255
10256 tem = get_last_value (x);
10257 if (tem != 0)
10258 return tem;
10259
10260 if (nonzero_sign_valid && rsp->sign_bit_copies != 0
10261 && GET_MODE_PRECISION (xmode) == GET_MODE_PRECISION (mode))
10262 *result = rsp->sign_bit_copies;
10263
10264 return NULL;
10265 }
10266 \f
10267 /* Return the number of "extended" bits there are in X, when interpreted
10268 as a quantity in MODE whose signedness is indicated by UNSIGNEDP. For
10269 unsigned quantities, this is the number of high-order zero bits.
10270 For signed quantities, this is the number of copies of the sign bit
10271 minus 1. In both case, this function returns the number of "spare"
10272 bits. For example, if two quantities for which this function returns
10273 at least 1 are added, the addition is known not to overflow.
10274
10275 This function will always return 0 unless called during combine, which
10276 implies that it must be called from a define_split. */
10277
10278 unsigned int
10279 extended_count (const_rtx x, machine_mode mode, int unsignedp)
10280 {
10281 if (nonzero_sign_valid == 0)
10282 return 0;
10283
10284 scalar_int_mode int_mode;
10285 return (unsignedp
10286 ? (is_a <scalar_int_mode> (mode, &int_mode)
10287 && HWI_COMPUTABLE_MODE_P (int_mode)
10288 ? (unsigned int) (GET_MODE_PRECISION (int_mode) - 1
10289 - floor_log2 (nonzero_bits (x, int_mode)))
10290 : 0)
10291 : num_sign_bit_copies (x, mode) - 1);
10292 }
10293
10294 /* This function is called from `simplify_shift_const' to merge two
10295 outer operations. Specifically, we have already found that we need
10296 to perform operation *POP0 with constant *PCONST0 at the outermost
10297 position. We would now like to also perform OP1 with constant CONST1
10298 (with *POP0 being done last).
10299
10300 Return 1 if we can do the operation and update *POP0 and *PCONST0 with
10301 the resulting operation. *PCOMP_P is set to 1 if we would need to
10302 complement the innermost operand, otherwise it is unchanged.
10303
10304 MODE is the mode in which the operation will be done. No bits outside
10305 the width of this mode matter. It is assumed that the width of this mode
10306 is smaller than or equal to HOST_BITS_PER_WIDE_INT.
10307
10308 If *POP0 or OP1 are UNKNOWN, it means no operation is required. Only NEG, PLUS,
10309 IOR, XOR, and AND are supported. We may set *POP0 to SET if the proper
10310 result is simply *PCONST0.
10311
10312 If the resulting operation cannot be expressed as one operation, we
10313 return 0 and do not change *POP0, *PCONST0, and *PCOMP_P. */
10314
10315 static int
10316 merge_outer_ops (enum rtx_code *pop0, HOST_WIDE_INT *pconst0, enum rtx_code op1, HOST_WIDE_INT const1, machine_mode mode, int *pcomp_p)
10317 {
10318 enum rtx_code op0 = *pop0;
10319 HOST_WIDE_INT const0 = *pconst0;
10320
10321 const0 &= GET_MODE_MASK (mode);
10322 const1 &= GET_MODE_MASK (mode);
10323
10324 /* If OP0 is an AND, clear unimportant bits in CONST1. */
10325 if (op0 == AND)
10326 const1 &= const0;
10327
10328 /* If OP0 or OP1 is UNKNOWN, this is easy. Similarly if they are the same or
10329 if OP0 is SET. */
10330
10331 if (op1 == UNKNOWN || op0 == SET)
10332 return 1;
10333
10334 else if (op0 == UNKNOWN)
10335 op0 = op1, const0 = const1;
10336
10337 else if (op0 == op1)
10338 {
10339 switch (op0)
10340 {
10341 case AND:
10342 const0 &= const1;
10343 break;
10344 case IOR:
10345 const0 |= const1;
10346 break;
10347 case XOR:
10348 const0 ^= const1;
10349 break;
10350 case PLUS:
10351 const0 += const1;
10352 break;
10353 case NEG:
10354 op0 = UNKNOWN;
10355 break;
10356 default:
10357 break;
10358 }
10359 }
10360
10361 /* Otherwise, if either is a PLUS or NEG, we can't do anything. */
10362 else if (op0 == PLUS || op1 == PLUS || op0 == NEG || op1 == NEG)
10363 return 0;
10364
10365 /* If the two constants aren't the same, we can't do anything. The
10366 remaining six cases can all be done. */
10367 else if (const0 != const1)
10368 return 0;
10369
10370 else
10371 switch (op0)
10372 {
10373 case IOR:
10374 if (op1 == AND)
10375 /* (a & b) | b == b */
10376 op0 = SET;
10377 else /* op1 == XOR */
10378 /* (a ^ b) | b == a | b */
10379 {;}
10380 break;
10381
10382 case XOR:
10383 if (op1 == AND)
10384 /* (a & b) ^ b == (~a) & b */
10385 op0 = AND, *pcomp_p = 1;
10386 else /* op1 == IOR */
10387 /* (a | b) ^ b == a & ~b */
10388 op0 = AND, const0 = ~const0;
10389 break;
10390
10391 case AND:
10392 if (op1 == IOR)
10393 /* (a | b) & b == b */
10394 op0 = SET;
10395 else /* op1 == XOR */
10396 /* (a ^ b) & b) == (~a) & b */
10397 *pcomp_p = 1;
10398 break;
10399 default:
10400 break;
10401 }
10402
10403 /* Check for NO-OP cases. */
10404 const0 &= GET_MODE_MASK (mode);
10405 if (const0 == 0
10406 && (op0 == IOR || op0 == XOR || op0 == PLUS))
10407 op0 = UNKNOWN;
10408 else if (const0 == 0 && op0 == AND)
10409 op0 = SET;
10410 else if ((unsigned HOST_WIDE_INT) const0 == GET_MODE_MASK (mode)
10411 && op0 == AND)
10412 op0 = UNKNOWN;
10413
10414 *pop0 = op0;
10415
10416 /* ??? Slightly redundant with the above mask, but not entirely.
10417 Moving this above means we'd have to sign-extend the mode mask
10418 for the final test. */
10419 if (op0 != UNKNOWN && op0 != NEG)
10420 *pconst0 = trunc_int_for_mode (const0, mode);
10421
10422 return 1;
10423 }
10424 \f
10425 /* A helper to simplify_shift_const_1 to determine the mode we can perform
10426 the shift in. The original shift operation CODE is performed on OP in
10427 ORIG_MODE. Return the wider mode MODE if we can perform the operation
10428 in that mode. Return ORIG_MODE otherwise. We can also assume that the
10429 result of the shift is subject to operation OUTER_CODE with operand
10430 OUTER_CONST. */
10431
10432 static scalar_int_mode
10433 try_widen_shift_mode (enum rtx_code code, rtx op, int count,
10434 scalar_int_mode orig_mode, scalar_int_mode mode,
10435 enum rtx_code outer_code, HOST_WIDE_INT outer_const)
10436 {
10437 gcc_assert (GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (orig_mode));
10438
10439 /* In general we can't perform in wider mode for right shift and rotate. */
10440 switch (code)
10441 {
10442 case ASHIFTRT:
10443 /* We can still widen if the bits brought in from the left are identical
10444 to the sign bit of ORIG_MODE. */
10445 if (num_sign_bit_copies (op, mode)
10446 > (unsigned) (GET_MODE_PRECISION (mode)
10447 - GET_MODE_PRECISION (orig_mode)))
10448 return mode;
10449 return orig_mode;
10450
10451 case LSHIFTRT:
10452 /* Similarly here but with zero bits. */
10453 if (HWI_COMPUTABLE_MODE_P (mode)
10454 && (nonzero_bits (op, mode) & ~GET_MODE_MASK (orig_mode)) == 0)
10455 return mode;
10456
10457 /* We can also widen if the bits brought in will be masked off. This
10458 operation is performed in ORIG_MODE. */
10459 if (outer_code == AND)
10460 {
10461 int care_bits = low_bitmask_len (orig_mode, outer_const);
10462
10463 if (care_bits >= 0
10464 && GET_MODE_PRECISION (orig_mode) - care_bits >= count)
10465 return mode;
10466 }
10467 /* fall through */
10468
10469 case ROTATE:
10470 return orig_mode;
10471
10472 case ROTATERT:
10473 gcc_unreachable ();
10474
10475 default:
10476 return mode;
10477 }
10478 }
10479
10480 /* Simplify a shift of VAROP by ORIG_COUNT bits. CODE says what kind
10481 of shift. The result of the shift is RESULT_MODE. Return NULL_RTX
10482 if we cannot simplify it. Otherwise, return a simplified value.
10483
10484 The shift is normally computed in the widest mode we find in VAROP, as
10485 long as it isn't a different number of words than RESULT_MODE. Exceptions
10486 are ASHIFTRT and ROTATE, which are always done in their original mode. */
10487
10488 static rtx
10489 simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
10490 rtx varop, int orig_count)
10491 {
10492 enum rtx_code orig_code = code;
10493 rtx orig_varop = varop;
10494 int count, log2;
10495 machine_mode mode = result_mode;
10496 machine_mode shift_mode;
10497 scalar_int_mode tmode, inner_mode, int_mode, int_varop_mode, int_result_mode;
10498 /* We form (outer_op (code varop count) (outer_const)). */
10499 enum rtx_code outer_op = UNKNOWN;
10500 HOST_WIDE_INT outer_const = 0;
10501 int complement_p = 0;
10502 rtx new_rtx, x;
10503
10504 /* Make sure and truncate the "natural" shift on the way in. We don't
10505 want to do this inside the loop as it makes it more difficult to
10506 combine shifts. */
10507 if (SHIFT_COUNT_TRUNCATED)
10508 orig_count &= GET_MODE_UNIT_BITSIZE (mode) - 1;
10509
10510 /* If we were given an invalid count, don't do anything except exactly
10511 what was requested. */
10512
10513 if (orig_count < 0 || orig_count >= (int) GET_MODE_UNIT_PRECISION (mode))
10514 return NULL_RTX;
10515
10516 count = orig_count;
10517
10518 /* Unless one of the branches of the `if' in this loop does a `continue',
10519 we will `break' the loop after the `if'. */
10520
10521 while (count != 0)
10522 {
10523 /* If we have an operand of (clobber (const_int 0)), fail. */
10524 if (GET_CODE (varop) == CLOBBER)
10525 return NULL_RTX;
10526
10527 /* Convert ROTATERT to ROTATE. */
10528 if (code == ROTATERT)
10529 {
10530 unsigned int bitsize = GET_MODE_UNIT_PRECISION (result_mode);
10531 code = ROTATE;
10532 count = bitsize - count;
10533 }
10534
10535 shift_mode = result_mode;
10536 if (shift_mode != mode)
10537 {
10538 /* We only change the modes of scalar shifts. */
10539 int_mode = as_a <scalar_int_mode> (mode);
10540 int_result_mode = as_a <scalar_int_mode> (result_mode);
10541 shift_mode = try_widen_shift_mode (code, varop, count,
10542 int_result_mode, int_mode,
10543 outer_op, outer_const);
10544 }
10545
10546 scalar_int_mode shift_unit_mode
10547 = as_a <scalar_int_mode> (GET_MODE_INNER (shift_mode));
10548
10549 /* Handle cases where the count is greater than the size of the mode
10550 minus 1. For ASHIFT, use the size minus one as the count (this can
10551 occur when simplifying (lshiftrt (ashiftrt ..))). For rotates,
10552 take the count modulo the size. For other shifts, the result is
10553 zero.
10554
10555 Since these shifts are being produced by the compiler by combining
10556 multiple operations, each of which are defined, we know what the
10557 result is supposed to be. */
10558
10559 if (count > (GET_MODE_PRECISION (shift_unit_mode) - 1))
10560 {
10561 if (code == ASHIFTRT)
10562 count = GET_MODE_PRECISION (shift_unit_mode) - 1;
10563 else if (code == ROTATE || code == ROTATERT)
10564 count %= GET_MODE_PRECISION (shift_unit_mode);
10565 else
10566 {
10567 /* We can't simply return zero because there may be an
10568 outer op. */
10569 varop = const0_rtx;
10570 count = 0;
10571 break;
10572 }
10573 }
10574
10575 /* If we discovered we had to complement VAROP, leave. Making a NOT
10576 here would cause an infinite loop. */
10577 if (complement_p)
10578 break;
10579
10580 if (shift_mode == shift_unit_mode)
10581 {
10582 /* An arithmetic right shift of a quantity known to be -1 or 0
10583 is a no-op. */
10584 if (code == ASHIFTRT
10585 && (num_sign_bit_copies (varop, shift_unit_mode)
10586 == GET_MODE_PRECISION (shift_unit_mode)))
10587 {
10588 count = 0;
10589 break;
10590 }
10591
10592 /* If we are doing an arithmetic right shift and discarding all but
10593 the sign bit copies, this is equivalent to doing a shift by the
10594 bitsize minus one. Convert it into that shift because it will
10595 often allow other simplifications. */
10596
10597 if (code == ASHIFTRT
10598 && (count + num_sign_bit_copies (varop, shift_unit_mode)
10599 >= GET_MODE_PRECISION (shift_unit_mode)))
10600 count = GET_MODE_PRECISION (shift_unit_mode) - 1;
10601
10602 /* We simplify the tests below and elsewhere by converting
10603 ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
10604 `make_compound_operation' will convert it to an ASHIFTRT for
10605 those machines (such as VAX) that don't have an LSHIFTRT. */
10606 if (code == ASHIFTRT
10607 && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10608 && val_signbit_known_clear_p (shift_unit_mode,
10609 nonzero_bits (varop,
10610 shift_unit_mode)))
10611 code = LSHIFTRT;
10612
10613 if (((code == LSHIFTRT
10614 && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10615 && !(nonzero_bits (varop, shift_unit_mode) >> count))
10616 || (code == ASHIFT
10617 && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10618 && !((nonzero_bits (varop, shift_unit_mode) << count)
10619 & GET_MODE_MASK (shift_unit_mode))))
10620 && !side_effects_p (varop))
10621 varop = const0_rtx;
10622 }
10623
10624 switch (GET_CODE (varop))
10625 {
10626 case SIGN_EXTEND:
10627 case ZERO_EXTEND:
10628 case SIGN_EXTRACT:
10629 case ZERO_EXTRACT:
10630 new_rtx = expand_compound_operation (varop);
10631 if (new_rtx != varop)
10632 {
10633 varop = new_rtx;
10634 continue;
10635 }
10636 break;
10637
10638 case MEM:
10639 /* The following rules apply only to scalars. */
10640 if (shift_mode != shift_unit_mode)
10641 break;
10642 int_mode = as_a <scalar_int_mode> (mode);
10643
10644 /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH
10645 minus the width of a smaller mode, we can do this with a
10646 SIGN_EXTEND or ZERO_EXTEND from the narrower memory location. */
10647 if ((code == ASHIFTRT || code == LSHIFTRT)
10648 && ! mode_dependent_address_p (XEXP (varop, 0),
10649 MEM_ADDR_SPACE (varop))
10650 && ! MEM_VOLATILE_P (varop)
10651 && (int_mode_for_size (GET_MODE_BITSIZE (int_mode) - count, 1)
10652 .exists (&tmode)))
10653 {
10654 new_rtx = adjust_address_nv (varop, tmode,
10655 BYTES_BIG_ENDIAN ? 0
10656 : count / BITS_PER_UNIT);
10657
10658 varop = gen_rtx_fmt_e (code == ASHIFTRT ? SIGN_EXTEND
10659 : ZERO_EXTEND, int_mode, new_rtx);
10660 count = 0;
10661 continue;
10662 }
10663 break;
10664
10665 case SUBREG:
10666 /* The following rules apply only to scalars. */
10667 if (shift_mode != shift_unit_mode)
10668 break;
10669 int_mode = as_a <scalar_int_mode> (mode);
10670 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10671
10672 /* If VAROP is a SUBREG, strip it as long as the inner operand has
10673 the same number of words as what we've seen so far. Then store
10674 the widest mode in MODE. */
10675 if (subreg_lowpart_p (varop)
10676 && is_int_mode (GET_MODE (SUBREG_REG (varop)), &inner_mode)
10677 && GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (int_varop_mode)
10678 && (CEIL (GET_MODE_SIZE (inner_mode), UNITS_PER_WORD)
10679 == CEIL (GET_MODE_SIZE (int_mode), UNITS_PER_WORD))
10680 && GET_MODE_CLASS (int_varop_mode) == MODE_INT)
10681 {
10682 varop = SUBREG_REG (varop);
10683 if (GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (int_mode))
10684 mode = inner_mode;
10685 continue;
10686 }
10687 break;
10688
10689 case MULT:
10690 /* Some machines use MULT instead of ASHIFT because MULT
10691 is cheaper. But it is still better on those machines to
10692 merge two shifts into one. */
10693 if (CONST_INT_P (XEXP (varop, 1))
10694 && (log2 = exact_log2 (UINTVAL (XEXP (varop, 1)))) >= 0)
10695 {
10696 rtx log2_rtx = gen_int_shift_amount (GET_MODE (varop), log2);
10697 varop = simplify_gen_binary (ASHIFT, GET_MODE (varop),
10698 XEXP (varop, 0), log2_rtx);
10699 continue;
10700 }
10701 break;
10702
10703 case UDIV:
10704 /* Similar, for when divides are cheaper. */
10705 if (CONST_INT_P (XEXP (varop, 1))
10706 && (log2 = exact_log2 (UINTVAL (XEXP (varop, 1)))) >= 0)
10707 {
10708 rtx log2_rtx = gen_int_shift_amount (GET_MODE (varop), log2);
10709 varop = simplify_gen_binary (LSHIFTRT, GET_MODE (varop),
10710 XEXP (varop, 0), log2_rtx);
10711 continue;
10712 }
10713 break;
10714
10715 case ASHIFTRT:
10716 /* If we are extracting just the sign bit of an arithmetic
10717 right shift, that shift is not needed. However, the sign
10718 bit of a wider mode may be different from what would be
10719 interpreted as the sign bit in a narrower mode, so, if
10720 the result is narrower, don't discard the shift. */
10721 if (code == LSHIFTRT
10722 && count == (GET_MODE_UNIT_BITSIZE (result_mode) - 1)
10723 && (GET_MODE_UNIT_BITSIZE (result_mode)
10724 >= GET_MODE_UNIT_BITSIZE (GET_MODE (varop))))
10725 {
10726 varop = XEXP (varop, 0);
10727 continue;
10728 }
10729
10730 /* fall through */
10731
10732 case LSHIFTRT:
10733 case ASHIFT:
10734 case ROTATE:
10735 /* The following rules apply only to scalars. */
10736 if (shift_mode != shift_unit_mode)
10737 break;
10738 int_mode = as_a <scalar_int_mode> (mode);
10739 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10740 int_result_mode = as_a <scalar_int_mode> (result_mode);
10741
10742 /* Here we have two nested shifts. The result is usually the
10743 AND of a new shift with a mask. We compute the result below. */
10744 if (CONST_INT_P (XEXP (varop, 1))
10745 && INTVAL (XEXP (varop, 1)) >= 0
10746 && INTVAL (XEXP (varop, 1)) < GET_MODE_PRECISION (int_varop_mode)
10747 && HWI_COMPUTABLE_MODE_P (int_result_mode)
10748 && HWI_COMPUTABLE_MODE_P (int_mode))
10749 {
10750 enum rtx_code first_code = GET_CODE (varop);
10751 unsigned int first_count = INTVAL (XEXP (varop, 1));
10752 unsigned HOST_WIDE_INT mask;
10753 rtx mask_rtx;
10754
10755 /* We have one common special case. We can't do any merging if
10756 the inner code is an ASHIFTRT of a smaller mode. However, if
10757 we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2)
10758 with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2),
10759 we can convert it to
10760 (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0) C3) C2) C1).
10761 This simplifies certain SIGN_EXTEND operations. */
10762 if (code == ASHIFT && first_code == ASHIFTRT
10763 && count == (GET_MODE_PRECISION (int_result_mode)
10764 - GET_MODE_PRECISION (int_varop_mode)))
10765 {
10766 /* C3 has the low-order C1 bits zero. */
10767
10768 mask = GET_MODE_MASK (int_mode)
10769 & ~((HOST_WIDE_INT_1U << first_count) - 1);
10770
10771 varop = simplify_and_const_int (NULL_RTX, int_result_mode,
10772 XEXP (varop, 0), mask);
10773 varop = simplify_shift_const (NULL_RTX, ASHIFT,
10774 int_result_mode, varop, count);
10775 count = first_count;
10776 code = ASHIFTRT;
10777 continue;
10778 }
10779
10780 /* If this was (ashiftrt (ashift foo C1) C2) and FOO has more
10781 than C1 high-order bits equal to the sign bit, we can convert
10782 this to either an ASHIFT or an ASHIFTRT depending on the
10783 two counts.
10784
10785 We cannot do this if VAROP's mode is not SHIFT_UNIT_MODE. */
10786
10787 if (code == ASHIFTRT && first_code == ASHIFT
10788 && int_varop_mode == shift_unit_mode
10789 && (num_sign_bit_copies (XEXP (varop, 0), shift_unit_mode)
10790 > first_count))
10791 {
10792 varop = XEXP (varop, 0);
10793 count -= first_count;
10794 if (count < 0)
10795 {
10796 count = -count;
10797 code = ASHIFT;
10798 }
10799
10800 continue;
10801 }
10802
10803 /* There are some cases we can't do. If CODE is ASHIFTRT,
10804 we can only do this if FIRST_CODE is also ASHIFTRT.
10805
10806 We can't do the case when CODE is ROTATE and FIRST_CODE is
10807 ASHIFTRT.
10808
10809 If the mode of this shift is not the mode of the outer shift,
10810 we can't do this if either shift is a right shift or ROTATE.
10811
10812 Finally, we can't do any of these if the mode is too wide
10813 unless the codes are the same.
10814
10815 Handle the case where the shift codes are the same
10816 first. */
10817
10818 if (code == first_code)
10819 {
10820 if (int_varop_mode != int_result_mode
10821 && (code == ASHIFTRT || code == LSHIFTRT
10822 || code == ROTATE))
10823 break;
10824
10825 count += first_count;
10826 varop = XEXP (varop, 0);
10827 continue;
10828 }
10829
10830 if (code == ASHIFTRT
10831 || (code == ROTATE && first_code == ASHIFTRT)
10832 || GET_MODE_PRECISION (int_mode) > HOST_BITS_PER_WIDE_INT
10833 || (int_varop_mode != int_result_mode
10834 && (first_code == ASHIFTRT || first_code == LSHIFTRT
10835 || first_code == ROTATE
10836 || code == ROTATE)))
10837 break;
10838
10839 /* To compute the mask to apply after the shift, shift the
10840 nonzero bits of the inner shift the same way the
10841 outer shift will. */
10842
10843 mask_rtx = gen_int_mode (nonzero_bits (varop, int_varop_mode),
10844 int_result_mode);
10845 rtx count_rtx = gen_int_shift_amount (int_result_mode, count);
10846 mask_rtx
10847 = simplify_const_binary_operation (code, int_result_mode,
10848 mask_rtx, count_rtx);
10849
10850 /* Give up if we can't compute an outer operation to use. */
10851 if (mask_rtx == 0
10852 || !CONST_INT_P (mask_rtx)
10853 || ! merge_outer_ops (&outer_op, &outer_const, AND,
10854 INTVAL (mask_rtx),
10855 int_result_mode, &complement_p))
10856 break;
10857
10858 /* If the shifts are in the same direction, we add the
10859 counts. Otherwise, we subtract them. */
10860 if ((code == ASHIFTRT || code == LSHIFTRT)
10861 == (first_code == ASHIFTRT || first_code == LSHIFTRT))
10862 count += first_count;
10863 else
10864 count -= first_count;
10865
10866 /* If COUNT is positive, the new shift is usually CODE,
10867 except for the two exceptions below, in which case it is
10868 FIRST_CODE. If the count is negative, FIRST_CODE should
10869 always be used */
10870 if (count > 0
10871 && ((first_code == ROTATE && code == ASHIFT)
10872 || (first_code == ASHIFTRT && code == LSHIFTRT)))
10873 code = first_code;
10874 else if (count < 0)
10875 code = first_code, count = -count;
10876
10877 varop = XEXP (varop, 0);
10878 continue;
10879 }
10880
10881 /* If we have (A << B << C) for any shift, we can convert this to
10882 (A << C << B). This wins if A is a constant. Only try this if
10883 B is not a constant. */
10884
10885 else if (GET_CODE (varop) == code
10886 && CONST_INT_P (XEXP (varop, 0))
10887 && !CONST_INT_P (XEXP (varop, 1)))
10888 {
10889 /* For ((unsigned) (cstULL >> count)) >> cst2 we have to make
10890 sure the result will be masked. See PR70222. */
10891 if (code == LSHIFTRT
10892 && int_mode != int_result_mode
10893 && !merge_outer_ops (&outer_op, &outer_const, AND,
10894 GET_MODE_MASK (int_result_mode)
10895 >> orig_count, int_result_mode,
10896 &complement_p))
10897 break;
10898 /* For ((int) (cstLL >> count)) >> cst2 just give up. Queuing
10899 up outer sign extension (often left and right shift) is
10900 hardly more efficient than the original. See PR70429.
10901 Similarly punt for rotates with different modes.
10902 See PR97386. */
10903 if ((code == ASHIFTRT || code == ROTATE)
10904 && int_mode != int_result_mode)
10905 break;
10906
10907 rtx count_rtx = gen_int_shift_amount (int_result_mode, count);
10908 rtx new_rtx = simplify_const_binary_operation (code, int_mode,
10909 XEXP (varop, 0),
10910 count_rtx);
10911 varop = gen_rtx_fmt_ee (code, int_mode, new_rtx, XEXP (varop, 1));
10912 count = 0;
10913 continue;
10914 }
10915 break;
10916
10917 case NOT:
10918 /* The following rules apply only to scalars. */
10919 if (shift_mode != shift_unit_mode)
10920 break;
10921
10922 /* Make this fit the case below. */
10923 varop = gen_rtx_XOR (mode, XEXP (varop, 0), constm1_rtx);
10924 continue;
10925
10926 case IOR:
10927 case AND:
10928 case XOR:
10929 /* The following rules apply only to scalars. */
10930 if (shift_mode != shift_unit_mode)
10931 break;
10932 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10933 int_result_mode = as_a <scalar_int_mode> (result_mode);
10934
10935 /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C)
10936 with C the size of VAROP - 1 and the shift is logical if
10937 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10938 we have an (le X 0) operation. If we have an arithmetic shift
10939 and STORE_FLAG_VALUE is 1 or we have a logical shift with
10940 STORE_FLAG_VALUE of -1, we have a (neg (le X 0)) operation. */
10941
10942 if (GET_CODE (varop) == IOR && GET_CODE (XEXP (varop, 0)) == PLUS
10943 && XEXP (XEXP (varop, 0), 1) == constm1_rtx
10944 && (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
10945 && (code == LSHIFTRT || code == ASHIFTRT)
10946 && count == (GET_MODE_PRECISION (int_varop_mode) - 1)
10947 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
10948 {
10949 count = 0;
10950 varop = gen_rtx_LE (int_varop_mode, XEXP (varop, 1),
10951 const0_rtx);
10952
10953 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
10954 varop = gen_rtx_NEG (int_varop_mode, varop);
10955
10956 continue;
10957 }
10958
10959 /* If we have (shift (logical)), move the logical to the outside
10960 to allow it to possibly combine with another logical and the
10961 shift to combine with another shift. This also canonicalizes to
10962 what a ZERO_EXTRACT looks like. Also, some machines have
10963 (and (shift)) insns. */
10964
10965 if (CONST_INT_P (XEXP (varop, 1))
10966 /* We can't do this if we have (ashiftrt (xor)) and the
10967 constant has its sign bit set in shift_unit_mode with
10968 shift_unit_mode wider than result_mode. */
10969 && !(code == ASHIFTRT && GET_CODE (varop) == XOR
10970 && int_result_mode != shift_unit_mode
10971 && trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
10972 shift_unit_mode) < 0)
10973 && (new_rtx = simplify_const_binary_operation
10974 (code, int_result_mode,
10975 gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
10976 gen_int_shift_amount (int_result_mode, count))) != 0
10977 && CONST_INT_P (new_rtx)
10978 && merge_outer_ops (&outer_op, &outer_const, GET_CODE (varop),
10979 INTVAL (new_rtx), int_result_mode,
10980 &complement_p))
10981 {
10982 varop = XEXP (varop, 0);
10983 continue;
10984 }
10985
10986 /* If we can't do that, try to simplify the shift in each arm of the
10987 logical expression, make a new logical expression, and apply
10988 the inverse distributive law. This also can't be done for
10989 (ashiftrt (xor)) where we've widened the shift and the constant
10990 changes the sign bit. */
10991 if (CONST_INT_P (XEXP (varop, 1))
10992 && !(code == ASHIFTRT && GET_CODE (varop) == XOR
10993 && int_result_mode != shift_unit_mode
10994 && trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
10995 shift_unit_mode) < 0))
10996 {
10997 rtx lhs = simplify_shift_const (NULL_RTX, code, shift_unit_mode,
10998 XEXP (varop, 0), count);
10999 rtx rhs = simplify_shift_const (NULL_RTX, code, shift_unit_mode,
11000 XEXP (varop, 1), count);
11001
11002 varop = simplify_gen_binary (GET_CODE (varop), shift_unit_mode,
11003 lhs, rhs);
11004 varop = apply_distributive_law (varop);
11005
11006 count = 0;
11007 continue;
11008 }
11009 break;
11010
11011 case EQ:
11012 /* The following rules apply only to scalars. */
11013 if (shift_mode != shift_unit_mode)
11014 break;
11015 int_result_mode = as_a <scalar_int_mode> (result_mode);
11016
11017 /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
11018 says that the sign bit can be tested, FOO has mode MODE, C is
11019 GET_MODE_PRECISION (MODE) - 1, and FOO has only its low-order bit
11020 that may be nonzero. */
11021 if (code == LSHIFTRT
11022 && XEXP (varop, 1) == const0_rtx
11023 && GET_MODE (XEXP (varop, 0)) == int_result_mode
11024 && count == (GET_MODE_PRECISION (int_result_mode) - 1)
11025 && HWI_COMPUTABLE_MODE_P (int_result_mode)
11026 && STORE_FLAG_VALUE == -1
11027 && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1
11028 && merge_outer_ops (&outer_op, &outer_const, XOR, 1,
11029 int_result_mode, &complement_p))
11030 {
11031 varop = XEXP (varop, 0);
11032 count = 0;
11033 continue;
11034 }
11035 break;
11036
11037 case NEG:
11038 /* The following rules apply only to scalars. */
11039 if (shift_mode != shift_unit_mode)
11040 break;
11041 int_result_mode = as_a <scalar_int_mode> (result_mode);
11042
11043 /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
11044 than the number of bits in the mode is equivalent to A. */
11045 if (code == LSHIFTRT
11046 && count == (GET_MODE_PRECISION (int_result_mode) - 1)
11047 && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1)
11048 {
11049 varop = XEXP (varop, 0);
11050 count = 0;
11051 continue;
11052 }
11053
11054 /* NEG commutes with ASHIFT since it is multiplication. Move the
11055 NEG outside to allow shifts to combine. */
11056 if (code == ASHIFT
11057 && merge_outer_ops (&outer_op, &outer_const, NEG, 0,
11058 int_result_mode, &complement_p))
11059 {
11060 varop = XEXP (varop, 0);
11061 continue;
11062 }
11063 break;
11064
11065 case PLUS:
11066 /* The following rules apply only to scalars. */
11067 if (shift_mode != shift_unit_mode)
11068 break;
11069 int_result_mode = as_a <scalar_int_mode> (result_mode);
11070
11071 /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C
11072 is one less than the number of bits in the mode is
11073 equivalent to (xor A 1). */
11074 if (code == LSHIFTRT
11075 && count == (GET_MODE_PRECISION (int_result_mode) - 1)
11076 && XEXP (varop, 1) == constm1_rtx
11077 && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1
11078 && merge_outer_ops (&outer_op, &outer_const, XOR, 1,
11079 int_result_mode, &complement_p))
11080 {
11081 count = 0;
11082 varop = XEXP (varop, 0);
11083 continue;
11084 }
11085
11086 /* If we have (xshiftrt (plus FOO BAR) C), and the only bits
11087 that might be nonzero in BAR are those being shifted out and those
11088 bits are known zero in FOO, we can replace the PLUS with FOO.
11089 Similarly in the other operand order. This code occurs when
11090 we are computing the size of a variable-size array. */
11091
11092 if ((code == ASHIFTRT || code == LSHIFTRT)
11093 && count < HOST_BITS_PER_WIDE_INT
11094 && nonzero_bits (XEXP (varop, 1), int_result_mode) >> count == 0
11095 && (nonzero_bits (XEXP (varop, 1), int_result_mode)
11096 & nonzero_bits (XEXP (varop, 0), int_result_mode)) == 0)
11097 {
11098 varop = XEXP (varop, 0);
11099 continue;
11100 }
11101 else if ((code == ASHIFTRT || code == LSHIFTRT)
11102 && count < HOST_BITS_PER_WIDE_INT
11103 && HWI_COMPUTABLE_MODE_P (int_result_mode)
11104 && (nonzero_bits (XEXP (varop, 0), int_result_mode)
11105 >> count) == 0
11106 && (nonzero_bits (XEXP (varop, 0), int_result_mode)
11107 & nonzero_bits (XEXP (varop, 1), int_result_mode)) == 0)
11108 {
11109 varop = XEXP (varop, 1);
11110 continue;
11111 }
11112
11113 /* (ashift (plus foo C) N) is (plus (ashift foo N) C'). */
11114 if (code == ASHIFT
11115 && CONST_INT_P (XEXP (varop, 1))
11116 && (new_rtx = simplify_const_binary_operation
11117 (ASHIFT, int_result_mode,
11118 gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
11119 gen_int_shift_amount (int_result_mode, count))) != 0
11120 && CONST_INT_P (new_rtx)
11121 && merge_outer_ops (&outer_op, &outer_const, PLUS,
11122 INTVAL (new_rtx), int_result_mode,
11123 &complement_p))
11124 {
11125 varop = XEXP (varop, 0);
11126 continue;
11127 }
11128
11129 /* Check for 'PLUS signbit', which is the canonical form of 'XOR
11130 signbit', and attempt to change the PLUS to an XOR and move it to
11131 the outer operation as is done above in the AND/IOR/XOR case
11132 leg for shift(logical). See details in logical handling above
11133 for reasoning in doing so. */
11134 if (code == LSHIFTRT
11135 && CONST_INT_P (XEXP (varop, 1))
11136 && mode_signbit_p (int_result_mode, XEXP (varop, 1))
11137 && (new_rtx = simplify_const_binary_operation
11138 (code, int_result_mode,
11139 gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
11140 gen_int_shift_amount (int_result_mode, count))) != 0
11141 && CONST_INT_P (new_rtx)
11142 && merge_outer_ops (&outer_op, &outer_const, XOR,
11143 INTVAL (new_rtx), int_result_mode,
11144 &complement_p))
11145 {
11146 varop = XEXP (varop, 0);
11147 continue;
11148 }
11149
11150 break;
11151
11152 case MINUS:
11153 /* The following rules apply only to scalars. */
11154 if (shift_mode != shift_unit_mode)
11155 break;
11156 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
11157
11158 /* If we have (xshiftrt (minus (ashiftrt X C)) X) C)
11159 with C the size of VAROP - 1 and the shift is logical if
11160 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
11161 we have a (gt X 0) operation. If the shift is arithmetic with
11162 STORE_FLAG_VALUE of 1 or logical with STORE_FLAG_VALUE == -1,
11163 we have a (neg (gt X 0)) operation. */
11164
11165 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
11166 && GET_CODE (XEXP (varop, 0)) == ASHIFTRT
11167 && count == (GET_MODE_PRECISION (int_varop_mode) - 1)
11168 && (code == LSHIFTRT || code == ASHIFTRT)
11169 && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
11170 && INTVAL (XEXP (XEXP (varop, 0), 1)) == count
11171 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
11172 {
11173 count = 0;
11174 varop = gen_rtx_GT (int_varop_mode, XEXP (varop, 1),
11175 const0_rtx);
11176
11177 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
11178 varop = gen_rtx_NEG (int_varop_mode, varop);
11179
11180 continue;
11181 }
11182 break;
11183
11184 case TRUNCATE:
11185 /* Change (lshiftrt (truncate (lshiftrt))) to (truncate (lshiftrt))
11186 if the truncate does not affect the value. */
11187 if (code == LSHIFTRT
11188 && GET_CODE (XEXP (varop, 0)) == LSHIFTRT
11189 && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
11190 && (INTVAL (XEXP (XEXP (varop, 0), 1))
11191 >= (GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (varop, 0)))
11192 - GET_MODE_UNIT_PRECISION (GET_MODE (varop)))))
11193 {
11194 rtx varop_inner = XEXP (varop, 0);
11195 int new_count = count + INTVAL (XEXP (varop_inner, 1));
11196 rtx new_count_rtx = gen_int_shift_amount (GET_MODE (varop_inner),
11197 new_count);
11198 varop_inner = gen_rtx_LSHIFTRT (GET_MODE (varop_inner),
11199 XEXP (varop_inner, 0),
11200 new_count_rtx);
11201 varop = gen_rtx_TRUNCATE (GET_MODE (varop), varop_inner);
11202 count = 0;
11203 continue;
11204 }
11205 break;
11206
11207 default:
11208 break;
11209 }
11210
11211 break;
11212 }
11213
11214 shift_mode = result_mode;
11215 if (shift_mode != mode)
11216 {
11217 /* We only change the modes of scalar shifts. */
11218 int_mode = as_a <scalar_int_mode> (mode);
11219 int_result_mode = as_a <scalar_int_mode> (result_mode);
11220 shift_mode = try_widen_shift_mode (code, varop, count, int_result_mode,
11221 int_mode, outer_op, outer_const);
11222 }
11223
11224 /* We have now finished analyzing the shift. The result should be
11225 a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places. If
11226 OUTER_OP is non-UNKNOWN, it is an operation that needs to be applied
11227 to the result of the shift. OUTER_CONST is the relevant constant,
11228 but we must turn off all bits turned off in the shift. */
11229
11230 if (outer_op == UNKNOWN
11231 && orig_code == code && orig_count == count
11232 && varop == orig_varop
11233 && shift_mode == GET_MODE (varop))
11234 return NULL_RTX;
11235
11236 /* Make a SUBREG if necessary. If we can't make it, fail. */
11237 varop = gen_lowpart (shift_mode, varop);
11238 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
11239 return NULL_RTX;
11240
11241 /* If we have an outer operation and we just made a shift, it is
11242 possible that we could have simplified the shift were it not
11243 for the outer operation. So try to do the simplification
11244 recursively. */
11245
11246 if (outer_op != UNKNOWN)
11247 x = simplify_shift_const_1 (code, shift_mode, varop, count);
11248 else
11249 x = NULL_RTX;
11250
11251 if (x == NULL_RTX)
11252 x = simplify_gen_binary (code, shift_mode, varop,
11253 gen_int_shift_amount (shift_mode, count));
11254
11255 /* If we were doing an LSHIFTRT in a wider mode than it was originally,
11256 turn off all the bits that the shift would have turned off. */
11257 if (orig_code == LSHIFTRT && result_mode != shift_mode)
11258 /* We only change the modes of scalar shifts. */
11259 x = simplify_and_const_int (NULL_RTX, as_a <scalar_int_mode> (shift_mode),
11260 x, GET_MODE_MASK (result_mode) >> orig_count);
11261
11262 /* Do the remainder of the processing in RESULT_MODE. */
11263 x = gen_lowpart_or_truncate (result_mode, x);
11264
11265 /* If COMPLEMENT_P is set, we have to complement X before doing the outer
11266 operation. */
11267 if (complement_p)
11268 x = simplify_gen_unary (NOT, result_mode, x, result_mode);
11269
11270 if (outer_op != UNKNOWN)
11271 {
11272 int_result_mode = as_a <scalar_int_mode> (result_mode);
11273
11274 if (GET_RTX_CLASS (outer_op) != RTX_UNARY
11275 && GET_MODE_PRECISION (int_result_mode) < HOST_BITS_PER_WIDE_INT)
11276 outer_const = trunc_int_for_mode (outer_const, int_result_mode);
11277
11278 if (outer_op == AND)
11279 x = simplify_and_const_int (NULL_RTX, int_result_mode, x, outer_const);
11280 else if (outer_op == SET)
11281 {
11282 /* This means that we have determined that the result is
11283 equivalent to a constant. This should be rare. */
11284 if (!side_effects_p (x))
11285 x = GEN_INT (outer_const);
11286 }
11287 else if (GET_RTX_CLASS (outer_op) == RTX_UNARY)
11288 x = simplify_gen_unary (outer_op, int_result_mode, x, int_result_mode);
11289 else
11290 x = simplify_gen_binary (outer_op, int_result_mode, x,
11291 GEN_INT (outer_const));
11292 }
11293
11294 return x;
11295 }
11296
11297 /* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift.
11298 The result of the shift is RESULT_MODE. If we cannot simplify it,
11299 return X or, if it is NULL, synthesize the expression with
11300 simplify_gen_binary. Otherwise, return a simplified value.
11301
11302 The shift is normally computed in the widest mode we find in VAROP, as
11303 long as it isn't a different number of words than RESULT_MODE. Exceptions
11304 are ASHIFTRT and ROTATE, which are always done in their original mode. */
11305
11306 static rtx
11307 simplify_shift_const (rtx x, enum rtx_code code, machine_mode result_mode,
11308 rtx varop, int count)
11309 {
11310 rtx tem = simplify_shift_const_1 (code, result_mode, varop, count);
11311 if (tem)
11312 return tem;
11313
11314 if (!x)
11315 x = simplify_gen_binary (code, GET_MODE (varop), varop,
11316 gen_int_shift_amount (GET_MODE (varop), count));
11317 if (GET_MODE (x) != result_mode)
11318 x = gen_lowpart (result_mode, x);
11319 return x;
11320 }
11321
11322 \f
11323 /* A subroutine of recog_for_combine. See there for arguments and
11324 return value. */
11325
11326 static int
11327 recog_for_combine_1 (rtx *pnewpat, rtx_insn *insn, rtx *pnotes)
11328 {
11329 rtx pat = *pnewpat;
11330 rtx pat_without_clobbers;
11331 int insn_code_number;
11332 int num_clobbers_to_add = 0;
11333 int i;
11334 rtx notes = NULL_RTX;
11335 rtx old_notes, old_pat;
11336 int old_icode;
11337
11338 /* If PAT is a PARALLEL, check to see if it contains the CLOBBER
11339 we use to indicate that something didn't match. If we find such a
11340 thing, force rejection. */
11341 if (GET_CODE (pat) == PARALLEL)
11342 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
11343 if (GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER
11344 && XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
11345 return -1;
11346
11347 old_pat = PATTERN (insn);
11348 old_notes = REG_NOTES (insn);
11349 PATTERN (insn) = pat;
11350 REG_NOTES (insn) = NULL_RTX;
11351
11352 insn_code_number = recog (pat, insn, &num_clobbers_to_add);
11353 if (dump_file && (dump_flags & TDF_DETAILS))
11354 {
11355 if (insn_code_number < 0)
11356 fputs ("Failed to match this instruction:\n", dump_file);
11357 else
11358 fputs ("Successfully matched this instruction:\n", dump_file);
11359 print_rtl_single (dump_file, pat);
11360 }
11361
11362 /* If it isn't, there is the possibility that we previously had an insn
11363 that clobbered some register as a side effect, but the combined
11364 insn doesn't need to do that. So try once more without the clobbers
11365 unless this represents an ASM insn. */
11366
11367 if (insn_code_number < 0 && ! check_asm_operands (pat)
11368 && GET_CODE (pat) == PARALLEL)
11369 {
11370 int pos;
11371
11372 for (pos = 0, i = 0; i < XVECLEN (pat, 0); i++)
11373 if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER)
11374 {
11375 if (i != pos)
11376 SUBST (XVECEXP (pat, 0, pos), XVECEXP (pat, 0, i));
11377 pos++;
11378 }
11379
11380 SUBST_INT (XVECLEN (pat, 0), pos);
11381
11382 if (pos == 1)
11383 pat = XVECEXP (pat, 0, 0);
11384
11385 PATTERN (insn) = pat;
11386 insn_code_number = recog (pat, insn, &num_clobbers_to_add);
11387 if (dump_file && (dump_flags & TDF_DETAILS))
11388 {
11389 if (insn_code_number < 0)
11390 fputs ("Failed to match this instruction:\n", dump_file);
11391 else
11392 fputs ("Successfully matched this instruction:\n", dump_file);
11393 print_rtl_single (dump_file, pat);
11394 }
11395 }
11396
11397 pat_without_clobbers = pat;
11398
11399 PATTERN (insn) = old_pat;
11400 REG_NOTES (insn) = old_notes;
11401
11402 /* Recognize all noop sets, these will be killed by followup pass. */
11403 if (insn_code_number < 0 && GET_CODE (pat) == SET && set_noop_p (pat))
11404 insn_code_number = NOOP_MOVE_INSN_CODE, num_clobbers_to_add = 0;
11405
11406 /* If we had any clobbers to add, make a new pattern than contains
11407 them. Then check to make sure that all of them are dead. */
11408 if (num_clobbers_to_add)
11409 {
11410 rtx newpat = gen_rtx_PARALLEL (VOIDmode,
11411 rtvec_alloc (GET_CODE (pat) == PARALLEL
11412 ? (XVECLEN (pat, 0)
11413 + num_clobbers_to_add)
11414 : num_clobbers_to_add + 1));
11415
11416 if (GET_CODE (pat) == PARALLEL)
11417 for (i = 0; i < XVECLEN (pat, 0); i++)
11418 XVECEXP (newpat, 0, i) = XVECEXP (pat, 0, i);
11419 else
11420 XVECEXP (newpat, 0, 0) = pat;
11421
11422 add_clobbers (newpat, insn_code_number);
11423
11424 for (i = XVECLEN (newpat, 0) - num_clobbers_to_add;
11425 i < XVECLEN (newpat, 0); i++)
11426 {
11427 if (REG_P (XEXP (XVECEXP (newpat, 0, i), 0))
11428 && ! reg_dead_at_p (XEXP (XVECEXP (newpat, 0, i), 0), insn))
11429 return -1;
11430 if (GET_CODE (XEXP (XVECEXP (newpat, 0, i), 0)) != SCRATCH)
11431 {
11432 gcc_assert (REG_P (XEXP (XVECEXP (newpat, 0, i), 0)));
11433 notes = alloc_reg_note (REG_UNUSED,
11434 XEXP (XVECEXP (newpat, 0, i), 0), notes);
11435 }
11436 }
11437 pat = newpat;
11438 }
11439
11440 if (insn_code_number >= 0
11441 && insn_code_number != NOOP_MOVE_INSN_CODE)
11442 {
11443 old_pat = PATTERN (insn);
11444 old_notes = REG_NOTES (insn);
11445 old_icode = INSN_CODE (insn);
11446 PATTERN (insn) = pat;
11447 REG_NOTES (insn) = notes;
11448 INSN_CODE (insn) = insn_code_number;
11449
11450 /* Allow targets to reject combined insn. */
11451 if (!targetm.legitimate_combined_insn (insn))
11452 {
11453 if (dump_file && (dump_flags & TDF_DETAILS))
11454 fputs ("Instruction not appropriate for target.",
11455 dump_file);
11456
11457 /* Callers expect recog_for_combine to strip
11458 clobbers from the pattern on failure. */
11459 pat = pat_without_clobbers;
11460 notes = NULL_RTX;
11461
11462 insn_code_number = -1;
11463 }
11464
11465 PATTERN (insn) = old_pat;
11466 REG_NOTES (insn) = old_notes;
11467 INSN_CODE (insn) = old_icode;
11468 }
11469
11470 *pnewpat = pat;
11471 *pnotes = notes;
11472
11473 return insn_code_number;
11474 }
11475
11476 /* Change every ZERO_EXTRACT and ZERO_EXTEND of a SUBREG that can be
11477 expressed as an AND and maybe an LSHIFTRT, to that formulation.
11478 Return whether anything was so changed. */
11479
11480 static bool
11481 change_zero_ext (rtx pat)
11482 {
11483 bool changed = false;
11484 rtx *src = &SET_SRC (pat);
11485
11486 subrtx_ptr_iterator::array_type array;
11487 FOR_EACH_SUBRTX_PTR (iter, array, src, NONCONST)
11488 {
11489 rtx x = **iter;
11490 scalar_int_mode mode, inner_mode;
11491 if (!is_a <scalar_int_mode> (GET_MODE (x), &mode))
11492 continue;
11493 int size;
11494
11495 if (GET_CODE (x) == ZERO_EXTRACT
11496 && CONST_INT_P (XEXP (x, 1))
11497 && CONST_INT_P (XEXP (x, 2))
11498 && is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode)
11499 && GET_MODE_PRECISION (inner_mode) <= GET_MODE_PRECISION (mode))
11500 {
11501 size = INTVAL (XEXP (x, 1));
11502
11503 int start = INTVAL (XEXP (x, 2));
11504 if (BITS_BIG_ENDIAN)
11505 start = GET_MODE_PRECISION (inner_mode) - size - start;
11506
11507 if (start != 0)
11508 x = gen_rtx_LSHIFTRT (inner_mode, XEXP (x, 0),
11509 gen_int_shift_amount (inner_mode, start));
11510 else
11511 x = XEXP (x, 0);
11512
11513 if (mode != inner_mode)
11514 {
11515 if (REG_P (x) && HARD_REGISTER_P (x)
11516 && !can_change_dest_mode (x, 0, mode))
11517 continue;
11518
11519 x = gen_lowpart_SUBREG (mode, x);
11520 }
11521 }
11522 else if (GET_CODE (x) == ZERO_EXTEND
11523 && GET_CODE (XEXP (x, 0)) == SUBREG
11524 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (XEXP (x, 0))))
11525 && !paradoxical_subreg_p (XEXP (x, 0))
11526 && subreg_lowpart_p (XEXP (x, 0)))
11527 {
11528 inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)));
11529 size = GET_MODE_PRECISION (inner_mode);
11530 x = SUBREG_REG (XEXP (x, 0));
11531 if (GET_MODE (x) != mode)
11532 {
11533 if (REG_P (x) && HARD_REGISTER_P (x)
11534 && !can_change_dest_mode (x, 0, mode))
11535 continue;
11536
11537 x = gen_lowpart_SUBREG (mode, x);
11538 }
11539 }
11540 else if (GET_CODE (x) == ZERO_EXTEND
11541 && REG_P (XEXP (x, 0))
11542 && HARD_REGISTER_P (XEXP (x, 0))
11543 && can_change_dest_mode (XEXP (x, 0), 0, mode))
11544 {
11545 inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)));
11546 size = GET_MODE_PRECISION (inner_mode);
11547 x = gen_rtx_REG (mode, REGNO (XEXP (x, 0)));
11548 }
11549 else
11550 continue;
11551
11552 if (!(GET_CODE (x) == LSHIFTRT
11553 && CONST_INT_P (XEXP (x, 1))
11554 && size + INTVAL (XEXP (x, 1)) == GET_MODE_PRECISION (mode)))
11555 {
11556 wide_int mask = wi::mask (size, false, GET_MODE_PRECISION (mode));
11557 x = gen_rtx_AND (mode, x, immed_wide_int_const (mask, mode));
11558 }
11559
11560 SUBST (**iter, x);
11561 changed = true;
11562 }
11563
11564 if (changed)
11565 FOR_EACH_SUBRTX_PTR (iter, array, src, NONCONST)
11566 maybe_swap_commutative_operands (**iter);
11567
11568 rtx *dst = &SET_DEST (pat);
11569 scalar_int_mode mode;
11570 if (GET_CODE (*dst) == ZERO_EXTRACT
11571 && REG_P (XEXP (*dst, 0))
11572 && is_a <scalar_int_mode> (GET_MODE (XEXP (*dst, 0)), &mode)
11573 && CONST_INT_P (XEXP (*dst, 1))
11574 && CONST_INT_P (XEXP (*dst, 2)))
11575 {
11576 rtx reg = XEXP (*dst, 0);
11577 int width = INTVAL (XEXP (*dst, 1));
11578 int offset = INTVAL (XEXP (*dst, 2));
11579 int reg_width = GET_MODE_PRECISION (mode);
11580 if (BITS_BIG_ENDIAN)
11581 offset = reg_width - width - offset;
11582
11583 rtx x, y, z, w;
11584 wide_int mask = wi::shifted_mask (offset, width, true, reg_width);
11585 wide_int mask2 = wi::shifted_mask (offset, width, false, reg_width);
11586 x = gen_rtx_AND (mode, reg, immed_wide_int_const (mask, mode));
11587 if (offset)
11588 y = gen_rtx_ASHIFT (mode, SET_SRC (pat), GEN_INT (offset));
11589 else
11590 y = SET_SRC (pat);
11591 z = gen_rtx_AND (mode, y, immed_wide_int_const (mask2, mode));
11592 w = gen_rtx_IOR (mode, x, z);
11593 SUBST (SET_DEST (pat), reg);
11594 SUBST (SET_SRC (pat), w);
11595
11596 changed = true;
11597 }
11598
11599 return changed;
11600 }
11601
11602 /* Like recog, but we receive the address of a pointer to a new pattern.
11603 We try to match the rtx that the pointer points to.
11604 If that fails, we may try to modify or replace the pattern,
11605 storing the replacement into the same pointer object.
11606
11607 Modifications include deletion or addition of CLOBBERs. If the
11608 instruction will still not match, we change ZERO_EXTEND and ZERO_EXTRACT
11609 to the equivalent AND and perhaps LSHIFTRT patterns, and try with that
11610 (and undo if that fails).
11611
11612 PNOTES is a pointer to a location where any REG_UNUSED notes added for
11613 the CLOBBERs are placed.
11614
11615 The value is the final insn code from the pattern ultimately matched,
11616 or -1. */
11617
11618 static int
11619 recog_for_combine (rtx *pnewpat, rtx_insn *insn, rtx *pnotes)
11620 {
11621 rtx pat = *pnewpat;
11622 int insn_code_number = recog_for_combine_1 (pnewpat, insn, pnotes);
11623 if (insn_code_number >= 0 || check_asm_operands (pat))
11624 return insn_code_number;
11625
11626 void *marker = get_undo_marker ();
11627 bool changed = false;
11628
11629 if (GET_CODE (pat) == SET)
11630 {
11631 /* For an unrecognized single set of a constant, try placing it in
11632 the constant pool, if this function already uses one. */
11633 rtx src = SET_SRC (pat);
11634 if (CONSTANT_P (src)
11635 && !CONST_INT_P (src)
11636 && crtl->uses_const_pool)
11637 {
11638 machine_mode mode = GET_MODE (src);
11639 if (mode == VOIDmode)
11640 mode = GET_MODE (SET_DEST (pat));
11641 src = force_const_mem (mode, src);
11642 if (src)
11643 {
11644 SUBST (SET_SRC (pat), src);
11645 changed = true;
11646 }
11647 }
11648 else
11649 changed = change_zero_ext (pat);
11650 }
11651 else if (GET_CODE (pat) == PARALLEL)
11652 {
11653 int i;
11654 for (i = 0; i < XVECLEN (pat, 0); i++)
11655 {
11656 rtx set = XVECEXP (pat, 0, i);
11657 if (GET_CODE (set) == SET)
11658 changed |= change_zero_ext (set);
11659 }
11660 }
11661
11662 if (changed)
11663 {
11664 insn_code_number = recog_for_combine_1 (pnewpat, insn, pnotes);
11665
11666 if (insn_code_number < 0)
11667 undo_to_marker (marker);
11668 }
11669
11670 return insn_code_number;
11671 }
11672 \f
11673 /* Like gen_lowpart_general but for use by combine. In combine it
11674 is not possible to create any new pseudoregs. However, it is
11675 safe to create invalid memory addresses, because combine will
11676 try to recognize them and all they will do is make the combine
11677 attempt fail.
11678
11679 If for some reason this cannot do its job, an rtx
11680 (clobber (const_int 0)) is returned.
11681 An insn containing that will not be recognized. */
11682
11683 static rtx
11684 gen_lowpart_for_combine (machine_mode omode, rtx x)
11685 {
11686 machine_mode imode = GET_MODE (x);
11687 rtx result;
11688
11689 if (omode == imode)
11690 return x;
11691
11692 /* We can only support MODE being wider than a word if X is a
11693 constant integer or has a mode the same size. */
11694 if (maybe_gt (GET_MODE_SIZE (omode), UNITS_PER_WORD)
11695 && ! (CONST_SCALAR_INT_P (x)
11696 || known_eq (GET_MODE_SIZE (imode), GET_MODE_SIZE (omode))))
11697 goto fail;
11698
11699 /* X might be a paradoxical (subreg (mem)). In that case, gen_lowpart
11700 won't know what to do. So we will strip off the SUBREG here and
11701 process normally. */
11702 if (GET_CODE (x) == SUBREG && MEM_P (SUBREG_REG (x)))
11703 {
11704 x = SUBREG_REG (x);
11705
11706 /* For use in case we fall down into the address adjustments
11707 further below, we need to adjust the known mode and size of
11708 x; imode and isize, since we just adjusted x. */
11709 imode = GET_MODE (x);
11710
11711 if (imode == omode)
11712 return x;
11713 }
11714
11715 result = gen_lowpart_common (omode, x);
11716
11717 if (result)
11718 return result;
11719
11720 if (MEM_P (x))
11721 {
11722 /* Refuse to work on a volatile memory ref or one with a mode-dependent
11723 address. */
11724 if (MEM_VOLATILE_P (x)
11725 || mode_dependent_address_p (XEXP (x, 0), MEM_ADDR_SPACE (x)))
11726 goto fail;
11727
11728 /* If we want to refer to something bigger than the original memref,
11729 generate a paradoxical subreg instead. That will force a reload
11730 of the original memref X. */
11731 if (paradoxical_subreg_p (omode, imode))
11732 return gen_rtx_SUBREG (omode, x, 0);
11733
11734 poly_int64 offset = byte_lowpart_offset (omode, imode);
11735 return adjust_address_nv (x, omode, offset);
11736 }
11737
11738 /* If X is a comparison operator, rewrite it in a new mode. This
11739 probably won't match, but may allow further simplifications. */
11740 else if (COMPARISON_P (x)
11741 && SCALAR_INT_MODE_P (imode)
11742 && SCALAR_INT_MODE_P (omode))
11743 return gen_rtx_fmt_ee (GET_CODE (x), omode, XEXP (x, 0), XEXP (x, 1));
11744
11745 /* If we couldn't simplify X any other way, just enclose it in a
11746 SUBREG. Normally, this SUBREG won't match, but some patterns may
11747 include an explicit SUBREG or we may simplify it further in combine. */
11748 else
11749 {
11750 rtx res;
11751
11752 if (imode == VOIDmode)
11753 {
11754 imode = int_mode_for_mode (omode).require ();
11755 x = gen_lowpart_common (imode, x);
11756 if (x == NULL)
11757 goto fail;
11758 }
11759 res = lowpart_subreg (omode, x, imode);
11760 if (res)
11761 return res;
11762 }
11763
11764 fail:
11765 return gen_rtx_CLOBBER (omode, const0_rtx);
11766 }
11767 \f
11768 /* Try to simplify a comparison between OP0 and a constant OP1,
11769 where CODE is the comparison code that will be tested, into a
11770 (CODE OP0 const0_rtx) form.
11771
11772 The result is a possibly different comparison code to use.
11773 *POP1 may be updated. */
11774
11775 static enum rtx_code
11776 simplify_compare_const (enum rtx_code code, machine_mode mode,
11777 rtx op0, rtx *pop1)
11778 {
11779 scalar_int_mode int_mode;
11780 HOST_WIDE_INT const_op = INTVAL (*pop1);
11781
11782 /* Get the constant we are comparing against and turn off all bits
11783 not on in our mode. */
11784 if (mode != VOIDmode)
11785 const_op = trunc_int_for_mode (const_op, mode);
11786
11787 /* If we are comparing against a constant power of two and the value
11788 being compared can only have that single bit nonzero (e.g., it was
11789 `and'ed with that bit), we can replace this with a comparison
11790 with zero. */
11791 if (const_op
11792 && (code == EQ || code == NE || code == GE || code == GEU
11793 || code == LT || code == LTU)
11794 && is_a <scalar_int_mode> (mode, &int_mode)
11795 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11796 && pow2p_hwi (const_op & GET_MODE_MASK (int_mode))
11797 && (nonzero_bits (op0, int_mode)
11798 == (unsigned HOST_WIDE_INT) (const_op & GET_MODE_MASK (int_mode))))
11799 {
11800 code = (code == EQ || code == GE || code == GEU ? NE : EQ);
11801 const_op = 0;
11802 }
11803
11804 /* Similarly, if we are comparing a value known to be either -1 or
11805 0 with -1, change it to the opposite comparison against zero. */
11806 if (const_op == -1
11807 && (code == EQ || code == NE || code == GT || code == LE
11808 || code == GEU || code == LTU)
11809 && is_a <scalar_int_mode> (mode, &int_mode)
11810 && num_sign_bit_copies (op0, int_mode) == GET_MODE_PRECISION (int_mode))
11811 {
11812 code = (code == EQ || code == LE || code == GEU ? NE : EQ);
11813 const_op = 0;
11814 }
11815
11816 /* Do some canonicalizations based on the comparison code. We prefer
11817 comparisons against zero and then prefer equality comparisons.
11818 If we can reduce the size of a constant, we will do that too. */
11819 switch (code)
11820 {
11821 case LT:
11822 /* < C is equivalent to <= (C - 1) */
11823 if (const_op > 0)
11824 {
11825 const_op -= 1;
11826 code = LE;
11827 /* ... fall through to LE case below. */
11828 gcc_fallthrough ();
11829 }
11830 else
11831 break;
11832
11833 case LE:
11834 /* <= C is equivalent to < (C + 1); we do this for C < 0 */
11835 if (const_op < 0)
11836 {
11837 const_op += 1;
11838 code = LT;
11839 }
11840
11841 /* If we are doing a <= 0 comparison on a value known to have
11842 a zero sign bit, we can replace this with == 0. */
11843 else if (const_op == 0
11844 && is_a <scalar_int_mode> (mode, &int_mode)
11845 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11846 && (nonzero_bits (op0, int_mode)
11847 & (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11848 == 0)
11849 code = EQ;
11850 break;
11851
11852 case GE:
11853 /* >= C is equivalent to > (C - 1). */
11854 if (const_op > 0)
11855 {
11856 const_op -= 1;
11857 code = GT;
11858 /* ... fall through to GT below. */
11859 gcc_fallthrough ();
11860 }
11861 else
11862 break;
11863
11864 case GT:
11865 /* > C is equivalent to >= (C + 1); we do this for C < 0. */
11866 if (const_op < 0)
11867 {
11868 const_op += 1;
11869 code = GE;
11870 }
11871
11872 /* If we are doing a > 0 comparison on a value known to have
11873 a zero sign bit, we can replace this with != 0. */
11874 else if (const_op == 0
11875 && is_a <scalar_int_mode> (mode, &int_mode)
11876 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11877 && (nonzero_bits (op0, int_mode)
11878 & (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11879 == 0)
11880 code = NE;
11881 break;
11882
11883 case LTU:
11884 /* < C is equivalent to <= (C - 1). */
11885 if (const_op > 0)
11886 {
11887 const_op -= 1;
11888 code = LEU;
11889 /* ... fall through ... */
11890 gcc_fallthrough ();
11891 }
11892 /* (unsigned) < 0x80000000 is equivalent to >= 0. */
11893 else if (is_a <scalar_int_mode> (mode, &int_mode)
11894 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11895 && ((unsigned HOST_WIDE_INT) const_op
11896 == HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11897 {
11898 const_op = 0;
11899 code = GE;
11900 break;
11901 }
11902 else
11903 break;
11904
11905 case LEU:
11906 /* unsigned <= 0 is equivalent to == 0 */
11907 if (const_op == 0)
11908 code = EQ;
11909 /* (unsigned) <= 0x7fffffff is equivalent to >= 0. */
11910 else if (is_a <scalar_int_mode> (mode, &int_mode)
11911 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11912 && ((unsigned HOST_WIDE_INT) const_op
11913 == ((HOST_WIDE_INT_1U
11914 << (GET_MODE_PRECISION (int_mode) - 1)) - 1)))
11915 {
11916 const_op = 0;
11917 code = GE;
11918 }
11919 break;
11920
11921 case GEU:
11922 /* >= C is equivalent to > (C - 1). */
11923 if (const_op > 1)
11924 {
11925 const_op -= 1;
11926 code = GTU;
11927 /* ... fall through ... */
11928 gcc_fallthrough ();
11929 }
11930
11931 /* (unsigned) >= 0x80000000 is equivalent to < 0. */
11932 else if (is_a <scalar_int_mode> (mode, &int_mode)
11933 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11934 && ((unsigned HOST_WIDE_INT) const_op
11935 == HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11936 {
11937 const_op = 0;
11938 code = LT;
11939 break;
11940 }
11941 else
11942 break;
11943
11944 case GTU:
11945 /* unsigned > 0 is equivalent to != 0 */
11946 if (const_op == 0)
11947 code = NE;
11948 /* (unsigned) > 0x7fffffff is equivalent to < 0. */
11949 else if (is_a <scalar_int_mode> (mode, &int_mode)
11950 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11951 && ((unsigned HOST_WIDE_INT) const_op
11952 == (HOST_WIDE_INT_1U
11953 << (GET_MODE_PRECISION (int_mode) - 1)) - 1))
11954 {
11955 const_op = 0;
11956 code = LT;
11957 }
11958 break;
11959
11960 default:
11961 break;
11962 }
11963
11964 *pop1 = GEN_INT (const_op);
11965 return code;
11966 }
11967 \f
11968 /* Simplify a comparison between *POP0 and *POP1 where CODE is the
11969 comparison code that will be tested.
11970
11971 The result is a possibly different comparison code to use. *POP0 and
11972 *POP1 may be updated.
11973
11974 It is possible that we might detect that a comparison is either always
11975 true or always false. However, we do not perform general constant
11976 folding in combine, so this knowledge isn't useful. Such tautologies
11977 should have been detected earlier. Hence we ignore all such cases. */
11978
11979 static enum rtx_code
11980 simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1)
11981 {
11982 rtx op0 = *pop0;
11983 rtx op1 = *pop1;
11984 rtx tem, tem1;
11985 int i;
11986 scalar_int_mode mode, inner_mode, tmode;
11987 opt_scalar_int_mode tmode_iter;
11988
11989 /* Try a few ways of applying the same transformation to both operands. */
11990 while (1)
11991 {
11992 /* The test below this one won't handle SIGN_EXTENDs on these machines,
11993 so check specially. */
11994 if (!WORD_REGISTER_OPERATIONS
11995 && code != GTU && code != GEU && code != LTU && code != LEU
11996 && GET_CODE (op0) == ASHIFTRT && GET_CODE (op1) == ASHIFTRT
11997 && GET_CODE (XEXP (op0, 0)) == ASHIFT
11998 && GET_CODE (XEXP (op1, 0)) == ASHIFT
11999 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == SUBREG
12000 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SUBREG
12001 && is_a <scalar_int_mode> (GET_MODE (op0), &mode)
12002 && (is_a <scalar_int_mode>
12003 (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0))), &inner_mode))
12004 && inner_mode == GET_MODE (SUBREG_REG (XEXP (XEXP (op1, 0), 0)))
12005 && CONST_INT_P (XEXP (op0, 1))
12006 && XEXP (op0, 1) == XEXP (op1, 1)
12007 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
12008 && XEXP (op0, 1) == XEXP (XEXP (op1, 0), 1)
12009 && (INTVAL (XEXP (op0, 1))
12010 == (GET_MODE_PRECISION (mode)
12011 - GET_MODE_PRECISION (inner_mode))))
12012 {
12013 op0 = SUBREG_REG (XEXP (XEXP (op0, 0), 0));
12014 op1 = SUBREG_REG (XEXP (XEXP (op1, 0), 0));
12015 }
12016
12017 /* If both operands are the same constant shift, see if we can ignore the
12018 shift. We can if the shift is a rotate or if the bits shifted out of
12019 this shift are known to be zero for both inputs and if the type of
12020 comparison is compatible with the shift. */
12021 if (GET_CODE (op0) == GET_CODE (op1)
12022 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
12023 && ((GET_CODE (op0) == ROTATE && (code == NE || code == EQ))
12024 || ((GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFT)
12025 && (code != GT && code != LT && code != GE && code != LE))
12026 || (GET_CODE (op0) == ASHIFTRT
12027 && (code != GTU && code != LTU
12028 && code != GEU && code != LEU)))
12029 && CONST_INT_P (XEXP (op0, 1))
12030 && INTVAL (XEXP (op0, 1)) >= 0
12031 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
12032 && XEXP (op0, 1) == XEXP (op1, 1))
12033 {
12034 machine_mode mode = GET_MODE (op0);
12035 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
12036 int shift_count = INTVAL (XEXP (op0, 1));
12037
12038 if (GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFTRT)
12039 mask &= (mask >> shift_count) << shift_count;
12040 else if (GET_CODE (op0) == ASHIFT)
12041 mask = (mask & (mask << shift_count)) >> shift_count;
12042
12043 if ((nonzero_bits (XEXP (op0, 0), mode) & ~mask) == 0
12044 && (nonzero_bits (XEXP (op1, 0), mode) & ~mask) == 0)
12045 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0);
12046 else
12047 break;
12048 }
12049
12050 /* If both operands are AND's of a paradoxical SUBREG by constant, the
12051 SUBREGs are of the same mode, and, in both cases, the AND would
12052 be redundant if the comparison was done in the narrower mode,
12053 do the comparison in the narrower mode (e.g., we are AND'ing with 1
12054 and the operand's possibly nonzero bits are 0xffffff01; in that case
12055 if we only care about QImode, we don't need the AND). This case
12056 occurs if the output mode of an scc insn is not SImode and
12057 STORE_FLAG_VALUE == 1 (e.g., the 386).
12058
12059 Similarly, check for a case where the AND's are ZERO_EXTEND
12060 operations from some narrower mode even though a SUBREG is not
12061 present. */
12062
12063 else if (GET_CODE (op0) == AND && GET_CODE (op1) == AND
12064 && CONST_INT_P (XEXP (op0, 1))
12065 && CONST_INT_P (XEXP (op1, 1)))
12066 {
12067 rtx inner_op0 = XEXP (op0, 0);
12068 rtx inner_op1 = XEXP (op1, 0);
12069 HOST_WIDE_INT c0 = INTVAL (XEXP (op0, 1));
12070 HOST_WIDE_INT c1 = INTVAL (XEXP (op1, 1));
12071 int changed = 0;
12072
12073 if (paradoxical_subreg_p (inner_op0)
12074 && GET_CODE (inner_op1) == SUBREG
12075 && HWI_COMPUTABLE_MODE_P (GET_MODE (SUBREG_REG (inner_op0)))
12076 && (GET_MODE (SUBREG_REG (inner_op0))
12077 == GET_MODE (SUBREG_REG (inner_op1)))
12078 && ((~c0) & nonzero_bits (SUBREG_REG (inner_op0),
12079 GET_MODE (SUBREG_REG (inner_op0)))) == 0
12080 && ((~c1) & nonzero_bits (SUBREG_REG (inner_op1),
12081 GET_MODE (SUBREG_REG (inner_op1)))) == 0)
12082 {
12083 op0 = SUBREG_REG (inner_op0);
12084 op1 = SUBREG_REG (inner_op1);
12085
12086 /* The resulting comparison is always unsigned since we masked
12087 off the original sign bit. */
12088 code = unsigned_condition (code);
12089
12090 changed = 1;
12091 }
12092
12093 else if (c0 == c1)
12094 FOR_EACH_MODE_UNTIL (tmode,
12095 as_a <scalar_int_mode> (GET_MODE (op0)))
12096 if ((unsigned HOST_WIDE_INT) c0 == GET_MODE_MASK (tmode))
12097 {
12098 op0 = gen_lowpart_or_truncate (tmode, inner_op0);
12099 op1 = gen_lowpart_or_truncate (tmode, inner_op1);
12100 code = unsigned_condition (code);
12101 changed = 1;
12102 break;
12103 }
12104
12105 if (! changed)
12106 break;
12107 }
12108
12109 /* If both operands are NOT, we can strip off the outer operation
12110 and adjust the comparison code for swapped operands; similarly for
12111 NEG, except that this must be an equality comparison. */
12112 else if ((GET_CODE (op0) == NOT && GET_CODE (op1) == NOT)
12113 || (GET_CODE (op0) == NEG && GET_CODE (op1) == NEG
12114 && (code == EQ || code == NE)))
12115 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0), code = swap_condition (code);
12116
12117 else
12118 break;
12119 }
12120
12121 /* If the first operand is a constant, swap the operands and adjust the
12122 comparison code appropriately, but don't do this if the second operand
12123 is already a constant integer. */
12124 if (swap_commutative_operands_p (op0, op1))
12125 {
12126 std::swap (op0, op1);
12127 code = swap_condition (code);
12128 }
12129
12130 /* We now enter a loop during which we will try to simplify the comparison.
12131 For the most part, we only are concerned with comparisons with zero,
12132 but some things may really be comparisons with zero but not start
12133 out looking that way. */
12134
12135 while (CONST_INT_P (op1))
12136 {
12137 machine_mode raw_mode = GET_MODE (op0);
12138 scalar_int_mode int_mode;
12139 int equality_comparison_p;
12140 int sign_bit_comparison_p;
12141 int unsigned_comparison_p;
12142 HOST_WIDE_INT const_op;
12143
12144 /* We only want to handle integral modes. This catches VOIDmode,
12145 CCmode, and the floating-point modes. An exception is that we
12146 can handle VOIDmode if OP0 is a COMPARE or a comparison
12147 operation. */
12148
12149 if (GET_MODE_CLASS (raw_mode) != MODE_INT
12150 && ! (raw_mode == VOIDmode
12151 && (GET_CODE (op0) == COMPARE || COMPARISON_P (op0))))
12152 break;
12153
12154 /* Try to simplify the compare to constant, possibly changing the
12155 comparison op, and/or changing op1 to zero. */
12156 code = simplify_compare_const (code, raw_mode, op0, &op1);
12157 const_op = INTVAL (op1);
12158
12159 /* Compute some predicates to simplify code below. */
12160
12161 equality_comparison_p = (code == EQ || code == NE);
12162 sign_bit_comparison_p = ((code == LT || code == GE) && const_op == 0);
12163 unsigned_comparison_p = (code == LTU || code == LEU || code == GTU
12164 || code == GEU);
12165
12166 /* If this is a sign bit comparison and we can do arithmetic in
12167 MODE, say that we will only be needing the sign bit of OP0. */
12168 if (sign_bit_comparison_p
12169 && is_a <scalar_int_mode> (raw_mode, &int_mode)
12170 && HWI_COMPUTABLE_MODE_P (int_mode))
12171 op0 = force_to_mode (op0, int_mode,
12172 HOST_WIDE_INT_1U
12173 << (GET_MODE_PRECISION (int_mode) - 1),
12174 0);
12175
12176 if (COMPARISON_P (op0))
12177 {
12178 /* We can't do anything if OP0 is a condition code value, rather
12179 than an actual data value. */
12180 if (const_op != 0
12181 || GET_MODE_CLASS (GET_MODE (XEXP (op0, 0))) == MODE_CC)
12182 break;
12183
12184 /* Get the two operands being compared. */
12185 if (GET_CODE (XEXP (op0, 0)) == COMPARE)
12186 tem = XEXP (XEXP (op0, 0), 0), tem1 = XEXP (XEXP (op0, 0), 1);
12187 else
12188 tem = XEXP (op0, 0), tem1 = XEXP (op0, 1);
12189
12190 /* Check for the cases where we simply want the result of the
12191 earlier test or the opposite of that result. */
12192 if (code == NE || code == EQ
12193 || (val_signbit_known_set_p (raw_mode, STORE_FLAG_VALUE)
12194 && (code == LT || code == GE)))
12195 {
12196 enum rtx_code new_code;
12197 if (code == LT || code == NE)
12198 new_code = GET_CODE (op0);
12199 else
12200 new_code = reversed_comparison_code (op0, NULL);
12201
12202 if (new_code != UNKNOWN)
12203 {
12204 code = new_code;
12205 op0 = tem;
12206 op1 = tem1;
12207 continue;
12208 }
12209 }
12210 break;
12211 }
12212
12213 if (raw_mode == VOIDmode)
12214 break;
12215 scalar_int_mode mode = as_a <scalar_int_mode> (raw_mode);
12216
12217 /* Now try cases based on the opcode of OP0. If none of the cases
12218 does a "continue", we exit this loop immediately after the
12219 switch. */
12220
12221 unsigned int mode_width = GET_MODE_PRECISION (mode);
12222 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
12223 switch (GET_CODE (op0))
12224 {
12225 case ZERO_EXTRACT:
12226 /* If we are extracting a single bit from a variable position in
12227 a constant that has only a single bit set and are comparing it
12228 with zero, we can convert this into an equality comparison
12229 between the position and the location of the single bit. */
12230 /* Except we can't if SHIFT_COUNT_TRUNCATED is set, since we might
12231 have already reduced the shift count modulo the word size. */
12232 if (!SHIFT_COUNT_TRUNCATED
12233 && CONST_INT_P (XEXP (op0, 0))
12234 && XEXP (op0, 1) == const1_rtx
12235 && equality_comparison_p && const_op == 0
12236 && (i = exact_log2 (UINTVAL (XEXP (op0, 0)))) >= 0)
12237 {
12238 if (BITS_BIG_ENDIAN)
12239 i = BITS_PER_WORD - 1 - i;
12240
12241 op0 = XEXP (op0, 2);
12242 op1 = GEN_INT (i);
12243 const_op = i;
12244
12245 /* Result is nonzero iff shift count is equal to I. */
12246 code = reverse_condition (code);
12247 continue;
12248 }
12249
12250 /* fall through */
12251
12252 case SIGN_EXTRACT:
12253 tem = expand_compound_operation (op0);
12254 if (tem != op0)
12255 {
12256 op0 = tem;
12257 continue;
12258 }
12259 break;
12260
12261 case NOT:
12262 /* If testing for equality, we can take the NOT of the constant. */
12263 if (equality_comparison_p
12264 && (tem = simplify_unary_operation (NOT, mode, op1, mode)) != 0)
12265 {
12266 op0 = XEXP (op0, 0);
12267 op1 = tem;
12268 continue;
12269 }
12270
12271 /* If just looking at the sign bit, reverse the sense of the
12272 comparison. */
12273 if (sign_bit_comparison_p)
12274 {
12275 op0 = XEXP (op0, 0);
12276 code = (code == GE ? LT : GE);
12277 continue;
12278 }
12279 break;
12280
12281 case NEG:
12282 /* If testing for equality, we can take the NEG of the constant. */
12283 if (equality_comparison_p
12284 && (tem = simplify_unary_operation (NEG, mode, op1, mode)) != 0)
12285 {
12286 op0 = XEXP (op0, 0);
12287 op1 = tem;
12288 continue;
12289 }
12290
12291 /* The remaining cases only apply to comparisons with zero. */
12292 if (const_op != 0)
12293 break;
12294
12295 /* When X is ABS or is known positive,
12296 (neg X) is < 0 if and only if X != 0. */
12297
12298 if (sign_bit_comparison_p
12299 && (GET_CODE (XEXP (op0, 0)) == ABS
12300 || (mode_width <= HOST_BITS_PER_WIDE_INT
12301 && (nonzero_bits (XEXP (op0, 0), mode)
12302 & (HOST_WIDE_INT_1U << (mode_width - 1)))
12303 == 0)))
12304 {
12305 op0 = XEXP (op0, 0);
12306 code = (code == LT ? NE : EQ);
12307 continue;
12308 }
12309
12310 /* If we have NEG of something whose two high-order bits are the
12311 same, we know that "(-a) < 0" is equivalent to "a > 0". */
12312 if (num_sign_bit_copies (op0, mode) >= 2)
12313 {
12314 op0 = XEXP (op0, 0);
12315 code = swap_condition (code);
12316 continue;
12317 }
12318 break;
12319
12320 case ROTATE:
12321 /* If we are testing equality and our count is a constant, we
12322 can perform the inverse operation on our RHS. */
12323 if (equality_comparison_p && CONST_INT_P (XEXP (op0, 1))
12324 && (tem = simplify_binary_operation (ROTATERT, mode,
12325 op1, XEXP (op0, 1))) != 0)
12326 {
12327 op0 = XEXP (op0, 0);
12328 op1 = tem;
12329 continue;
12330 }
12331
12332 /* If we are doing a < 0 or >= 0 comparison, it means we are testing
12333 a particular bit. Convert it to an AND of a constant of that
12334 bit. This will be converted into a ZERO_EXTRACT. */
12335 if (const_op == 0 && sign_bit_comparison_p
12336 && CONST_INT_P (XEXP (op0, 1))
12337 && mode_width <= HOST_BITS_PER_WIDE_INT
12338 && UINTVAL (XEXP (op0, 1)) < mode_width)
12339 {
12340 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
12341 (HOST_WIDE_INT_1U
12342 << (mode_width - 1
12343 - INTVAL (XEXP (op0, 1)))));
12344 code = (code == LT ? NE : EQ);
12345 continue;
12346 }
12347
12348 /* Fall through. */
12349
12350 case ABS:
12351 /* ABS is ignorable inside an equality comparison with zero. */
12352 if (const_op == 0 && equality_comparison_p)
12353 {
12354 op0 = XEXP (op0, 0);
12355 continue;
12356 }
12357 break;
12358
12359 case SIGN_EXTEND:
12360 /* Can simplify (compare (zero/sign_extend FOO) CONST) to
12361 (compare FOO CONST) if CONST fits in FOO's mode and we
12362 are either testing inequality or have an unsigned
12363 comparison with ZERO_EXTEND or a signed comparison with
12364 SIGN_EXTEND. But don't do it if we don't have a compare
12365 insn of the given mode, since we'd have to revert it
12366 later on, and then we wouldn't know whether to sign- or
12367 zero-extend. */
12368 if (is_int_mode (GET_MODE (XEXP (op0, 0)), &mode)
12369 && ! unsigned_comparison_p
12370 && HWI_COMPUTABLE_MODE_P (mode)
12371 && trunc_int_for_mode (const_op, mode) == const_op
12372 && have_insn_for (COMPARE, mode))
12373 {
12374 op0 = XEXP (op0, 0);
12375 continue;
12376 }
12377 break;
12378
12379 case SUBREG:
12380 /* Check for the case where we are comparing A - C1 with C2, that is
12381
12382 (subreg:MODE (plus (A) (-C1))) op (C2)
12383
12384 with C1 a constant, and try to lift the SUBREG, i.e. to do the
12385 comparison in the wider mode. One of the following two conditions
12386 must be true in order for this to be valid:
12387
12388 1. The mode extension results in the same bit pattern being added
12389 on both sides and the comparison is equality or unsigned. As
12390 C2 has been truncated to fit in MODE, the pattern can only be
12391 all 0s or all 1s.
12392
12393 2. The mode extension results in the sign bit being copied on
12394 each side.
12395
12396 The difficulty here is that we have predicates for A but not for
12397 (A - C1) so we need to check that C1 is within proper bounds so
12398 as to perturbate A as little as possible. */
12399
12400 if (mode_width <= HOST_BITS_PER_WIDE_INT
12401 && subreg_lowpart_p (op0)
12402 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)),
12403 &inner_mode)
12404 && GET_MODE_PRECISION (inner_mode) > mode_width
12405 && GET_CODE (SUBREG_REG (op0)) == PLUS
12406 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1)))
12407 {
12408 rtx a = XEXP (SUBREG_REG (op0), 0);
12409 HOST_WIDE_INT c1 = -INTVAL (XEXP (SUBREG_REG (op0), 1));
12410
12411 if ((c1 > 0
12412 && (unsigned HOST_WIDE_INT) c1
12413 < HOST_WIDE_INT_1U << (mode_width - 1)
12414 && (equality_comparison_p || unsigned_comparison_p)
12415 /* (A - C1) zero-extends if it is positive and sign-extends
12416 if it is negative, C2 both zero- and sign-extends. */
12417 && (((nonzero_bits (a, inner_mode)
12418 & ~GET_MODE_MASK (mode)) == 0
12419 && const_op >= 0)
12420 /* (A - C1) sign-extends if it is positive and 1-extends
12421 if it is negative, C2 both sign- and 1-extends. */
12422 || (num_sign_bit_copies (a, inner_mode)
12423 > (unsigned int) (GET_MODE_PRECISION (inner_mode)
12424 - mode_width)
12425 && const_op < 0)))
12426 || ((unsigned HOST_WIDE_INT) c1
12427 < HOST_WIDE_INT_1U << (mode_width - 2)
12428 /* (A - C1) always sign-extends, like C2. */
12429 && num_sign_bit_copies (a, inner_mode)
12430 > (unsigned int) (GET_MODE_PRECISION (inner_mode)
12431 - (mode_width - 1))))
12432 {
12433 op0 = SUBREG_REG (op0);
12434 continue;
12435 }
12436 }
12437
12438 /* If the inner mode is narrower and we are extracting the low part,
12439 we can treat the SUBREG as if it were a ZERO_EXTEND. */
12440 if (paradoxical_subreg_p (op0))
12441 ;
12442 else if (subreg_lowpart_p (op0)
12443 && GET_MODE_CLASS (mode) == MODE_INT
12444 && is_int_mode (GET_MODE (SUBREG_REG (op0)), &inner_mode)
12445 && (code == NE || code == EQ)
12446 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
12447 && !paradoxical_subreg_p (op0)
12448 && (nonzero_bits (SUBREG_REG (op0), inner_mode)
12449 & ~GET_MODE_MASK (mode)) == 0)
12450 {
12451 /* Remove outer subregs that don't do anything. */
12452 tem = gen_lowpart (inner_mode, op1);
12453
12454 if ((nonzero_bits (tem, inner_mode)
12455 & ~GET_MODE_MASK (mode)) == 0)
12456 {
12457 op0 = SUBREG_REG (op0);
12458 op1 = tem;
12459 continue;
12460 }
12461 break;
12462 }
12463 else
12464 break;
12465
12466 /* FALLTHROUGH */
12467
12468 case ZERO_EXTEND:
12469 if (is_int_mode (GET_MODE (XEXP (op0, 0)), &mode)
12470 && (unsigned_comparison_p || equality_comparison_p)
12471 && HWI_COMPUTABLE_MODE_P (mode)
12472 && (unsigned HOST_WIDE_INT) const_op <= GET_MODE_MASK (mode)
12473 && const_op >= 0
12474 && have_insn_for (COMPARE, mode))
12475 {
12476 op0 = XEXP (op0, 0);
12477 continue;
12478 }
12479 break;
12480
12481 case PLUS:
12482 /* (eq (plus X A) B) -> (eq X (minus B A)). We can only do
12483 this for equality comparisons due to pathological cases involving
12484 overflows. */
12485 if (equality_comparison_p
12486 && (tem = simplify_binary_operation (MINUS, mode,
12487 op1, XEXP (op0, 1))) != 0)
12488 {
12489 op0 = XEXP (op0, 0);
12490 op1 = tem;
12491 continue;
12492 }
12493
12494 /* (plus (abs X) (const_int -1)) is < 0 if and only if X == 0. */
12495 if (const_op == 0 && XEXP (op0, 1) == constm1_rtx
12496 && GET_CODE (XEXP (op0, 0)) == ABS && sign_bit_comparison_p)
12497 {
12498 op0 = XEXP (XEXP (op0, 0), 0);
12499 code = (code == LT ? EQ : NE);
12500 continue;
12501 }
12502 break;
12503
12504 case MINUS:
12505 /* We used to optimize signed comparisons against zero, but that
12506 was incorrect. Unsigned comparisons against zero (GTU, LEU)
12507 arrive here as equality comparisons, or (GEU, LTU) are
12508 optimized away. No need to special-case them. */
12509
12510 /* (eq (minus A B) C) -> (eq A (plus B C)) or
12511 (eq B (minus A C)), whichever simplifies. We can only do
12512 this for equality comparisons due to pathological cases involving
12513 overflows. */
12514 if (equality_comparison_p
12515 && (tem = simplify_binary_operation (PLUS, mode,
12516 XEXP (op0, 1), op1)) != 0)
12517 {
12518 op0 = XEXP (op0, 0);
12519 op1 = tem;
12520 continue;
12521 }
12522
12523 if (equality_comparison_p
12524 && (tem = simplify_binary_operation (MINUS, mode,
12525 XEXP (op0, 0), op1)) != 0)
12526 {
12527 op0 = XEXP (op0, 1);
12528 op1 = tem;
12529 continue;
12530 }
12531
12532 /* The sign bit of (minus (ashiftrt X C) X), where C is the number
12533 of bits in X minus 1, is one iff X > 0. */
12534 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == ASHIFTRT
12535 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12536 && UINTVAL (XEXP (XEXP (op0, 0), 1)) == mode_width - 1
12537 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
12538 {
12539 op0 = XEXP (op0, 1);
12540 code = (code == GE ? LE : GT);
12541 continue;
12542 }
12543 break;
12544
12545 case XOR:
12546 /* (eq (xor A B) C) -> (eq A (xor B C)). This is a simplification
12547 if C is zero or B is a constant. */
12548 if (equality_comparison_p
12549 && (tem = simplify_binary_operation (XOR, mode,
12550 XEXP (op0, 1), op1)) != 0)
12551 {
12552 op0 = XEXP (op0, 0);
12553 op1 = tem;
12554 continue;
12555 }
12556 break;
12557
12558
12559 case IOR:
12560 /* The sign bit of (ior (plus X (const_int -1)) X) is nonzero
12561 iff X <= 0. */
12562 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == PLUS
12563 && XEXP (XEXP (op0, 0), 1) == constm1_rtx
12564 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
12565 {
12566 op0 = XEXP (op0, 1);
12567 code = (code == GE ? GT : LE);
12568 continue;
12569 }
12570 break;
12571
12572 case AND:
12573 /* Convert (and (xshift 1 X) Y) to (and (lshiftrt Y X) 1). This
12574 will be converted to a ZERO_EXTRACT later. */
12575 if (const_op == 0 && equality_comparison_p
12576 && GET_CODE (XEXP (op0, 0)) == ASHIFT
12577 && XEXP (XEXP (op0, 0), 0) == const1_rtx)
12578 {
12579 op0 = gen_rtx_LSHIFTRT (mode, XEXP (op0, 1),
12580 XEXP (XEXP (op0, 0), 1));
12581 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
12582 continue;
12583 }
12584
12585 /* If we are comparing (and (lshiftrt X C1) C2) for equality with
12586 zero and X is a comparison and C1 and C2 describe only bits set
12587 in STORE_FLAG_VALUE, we can compare with X. */
12588 if (const_op == 0 && equality_comparison_p
12589 && mode_width <= HOST_BITS_PER_WIDE_INT
12590 && CONST_INT_P (XEXP (op0, 1))
12591 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT
12592 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12593 && INTVAL (XEXP (XEXP (op0, 0), 1)) >= 0
12594 && INTVAL (XEXP (XEXP (op0, 0), 1)) < HOST_BITS_PER_WIDE_INT)
12595 {
12596 mask = ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
12597 << INTVAL (XEXP (XEXP (op0, 0), 1)));
12598 if ((~STORE_FLAG_VALUE & mask) == 0
12599 && (COMPARISON_P (XEXP (XEXP (op0, 0), 0))
12600 || ((tem = get_last_value (XEXP (XEXP (op0, 0), 0))) != 0
12601 && COMPARISON_P (tem))))
12602 {
12603 op0 = XEXP (XEXP (op0, 0), 0);
12604 continue;
12605 }
12606 }
12607
12608 /* If we are doing an equality comparison of an AND of a bit equal
12609 to the sign bit, replace this with a LT or GE comparison of
12610 the underlying value. */
12611 if (equality_comparison_p
12612 && const_op == 0
12613 && CONST_INT_P (XEXP (op0, 1))
12614 && mode_width <= HOST_BITS_PER_WIDE_INT
12615 && ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
12616 == HOST_WIDE_INT_1U << (mode_width - 1)))
12617 {
12618 op0 = XEXP (op0, 0);
12619 code = (code == EQ ? GE : LT);
12620 continue;
12621 }
12622
12623 /* If this AND operation is really a ZERO_EXTEND from a narrower
12624 mode, the constant fits within that mode, and this is either an
12625 equality or unsigned comparison, try to do this comparison in
12626 the narrower mode.
12627
12628 Note that in:
12629
12630 (ne:DI (and:DI (reg:DI 4) (const_int 0xffffffff)) (const_int 0))
12631 -> (ne:DI (reg:SI 4) (const_int 0))
12632
12633 unless TARGET_TRULY_NOOP_TRUNCATION allows it or the register is
12634 known to hold a value of the required mode the
12635 transformation is invalid. */
12636 if ((equality_comparison_p || unsigned_comparison_p)
12637 && CONST_INT_P (XEXP (op0, 1))
12638 && (i = exact_log2 ((UINTVAL (XEXP (op0, 1))
12639 & GET_MODE_MASK (mode))
12640 + 1)) >= 0
12641 && const_op >> i == 0
12642 && int_mode_for_size (i, 1).exists (&tmode))
12643 {
12644 op0 = gen_lowpart_or_truncate (tmode, XEXP (op0, 0));
12645 continue;
12646 }
12647
12648 /* If this is (and:M1 (subreg:M1 X:M2 0) (const_int C1)) where C1
12649 fits in both M1 and M2 and the SUBREG is either paradoxical
12650 or represents the low part, permute the SUBREG and the AND
12651 and try again. */
12652 if (GET_CODE (XEXP (op0, 0)) == SUBREG
12653 && CONST_INT_P (XEXP (op0, 1)))
12654 {
12655 unsigned HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
12656 /* Require an integral mode, to avoid creating something like
12657 (AND:SF ...). */
12658 if ((is_a <scalar_int_mode>
12659 (GET_MODE (SUBREG_REG (XEXP (op0, 0))), &tmode))
12660 /* It is unsafe to commute the AND into the SUBREG if the
12661 SUBREG is paradoxical and WORD_REGISTER_OPERATIONS is
12662 not defined. As originally written the upper bits
12663 have a defined value due to the AND operation.
12664 However, if we commute the AND inside the SUBREG then
12665 they no longer have defined values and the meaning of
12666 the code has been changed.
12667 Also C1 should not change value in the smaller mode,
12668 see PR67028 (a positive C1 can become negative in the
12669 smaller mode, so that the AND does no longer mask the
12670 upper bits). */
12671 && ((WORD_REGISTER_OPERATIONS
12672 && mode_width > GET_MODE_PRECISION (tmode)
12673 && mode_width <= BITS_PER_WORD
12674 && trunc_int_for_mode (c1, tmode) == (HOST_WIDE_INT) c1)
12675 || (mode_width <= GET_MODE_PRECISION (tmode)
12676 && subreg_lowpart_p (XEXP (op0, 0))))
12677 && mode_width <= HOST_BITS_PER_WIDE_INT
12678 && HWI_COMPUTABLE_MODE_P (tmode)
12679 && (c1 & ~mask) == 0
12680 && (c1 & ~GET_MODE_MASK (tmode)) == 0
12681 && c1 != mask
12682 && c1 != GET_MODE_MASK (tmode))
12683 {
12684 op0 = simplify_gen_binary (AND, tmode,
12685 SUBREG_REG (XEXP (op0, 0)),
12686 gen_int_mode (c1, tmode));
12687 op0 = gen_lowpart (mode, op0);
12688 continue;
12689 }
12690 }
12691
12692 /* Convert (ne (and (not X) 1) 0) to (eq (and X 1) 0). */
12693 if (const_op == 0 && equality_comparison_p
12694 && XEXP (op0, 1) == const1_rtx
12695 && GET_CODE (XEXP (op0, 0)) == NOT)
12696 {
12697 op0 = simplify_and_const_int (NULL_RTX, mode,
12698 XEXP (XEXP (op0, 0), 0), 1);
12699 code = (code == NE ? EQ : NE);
12700 continue;
12701 }
12702
12703 /* Convert (ne (and (lshiftrt (not X)) 1) 0) to
12704 (eq (and (lshiftrt X) 1) 0).
12705 Also handle the case where (not X) is expressed using xor. */
12706 if (const_op == 0 && equality_comparison_p
12707 && XEXP (op0, 1) == const1_rtx
12708 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT)
12709 {
12710 rtx shift_op = XEXP (XEXP (op0, 0), 0);
12711 rtx shift_count = XEXP (XEXP (op0, 0), 1);
12712
12713 if (GET_CODE (shift_op) == NOT
12714 || (GET_CODE (shift_op) == XOR
12715 && CONST_INT_P (XEXP (shift_op, 1))
12716 && CONST_INT_P (shift_count)
12717 && HWI_COMPUTABLE_MODE_P (mode)
12718 && (UINTVAL (XEXP (shift_op, 1))
12719 == HOST_WIDE_INT_1U
12720 << INTVAL (shift_count))))
12721 {
12722 op0
12723 = gen_rtx_LSHIFTRT (mode, XEXP (shift_op, 0), shift_count);
12724 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
12725 code = (code == NE ? EQ : NE);
12726 continue;
12727 }
12728 }
12729 break;
12730
12731 case ASHIFT:
12732 /* If we have (compare (ashift FOO N) (const_int C)) and
12733 the high order N bits of FOO (N+1 if an inequality comparison)
12734 are known to be zero, we can do this by comparing FOO with C
12735 shifted right N bits so long as the low-order N bits of C are
12736 zero. */
12737 if (CONST_INT_P (XEXP (op0, 1))
12738 && INTVAL (XEXP (op0, 1)) >= 0
12739 && ((INTVAL (XEXP (op0, 1)) + ! equality_comparison_p)
12740 < HOST_BITS_PER_WIDE_INT)
12741 && (((unsigned HOST_WIDE_INT) const_op
12742 & ((HOST_WIDE_INT_1U << INTVAL (XEXP (op0, 1)))
12743 - 1)) == 0)
12744 && mode_width <= HOST_BITS_PER_WIDE_INT
12745 && (nonzero_bits (XEXP (op0, 0), mode)
12746 & ~(mask >> (INTVAL (XEXP (op0, 1))
12747 + ! equality_comparison_p))) == 0)
12748 {
12749 /* We must perform a logical shift, not an arithmetic one,
12750 as we want the top N bits of C to be zero. */
12751 unsigned HOST_WIDE_INT temp = const_op & GET_MODE_MASK (mode);
12752
12753 temp >>= INTVAL (XEXP (op0, 1));
12754 op1 = gen_int_mode (temp, mode);
12755 op0 = XEXP (op0, 0);
12756 continue;
12757 }
12758
12759 /* If we are doing a sign bit comparison, it means we are testing
12760 a particular bit. Convert it to the appropriate AND. */
12761 if (sign_bit_comparison_p && CONST_INT_P (XEXP (op0, 1))
12762 && mode_width <= HOST_BITS_PER_WIDE_INT)
12763 {
12764 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
12765 (HOST_WIDE_INT_1U
12766 << (mode_width - 1
12767 - INTVAL (XEXP (op0, 1)))));
12768 code = (code == LT ? NE : EQ);
12769 continue;
12770 }
12771
12772 /* If this an equality comparison with zero and we are shifting
12773 the low bit to the sign bit, we can convert this to an AND of the
12774 low-order bit. */
12775 if (const_op == 0 && equality_comparison_p
12776 && CONST_INT_P (XEXP (op0, 1))
12777 && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
12778 {
12779 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0), 1);
12780 continue;
12781 }
12782 break;
12783
12784 case ASHIFTRT:
12785 /* If this is an equality comparison with zero, we can do this
12786 as a logical shift, which might be much simpler. */
12787 if (equality_comparison_p && const_op == 0
12788 && CONST_INT_P (XEXP (op0, 1)))
12789 {
12790 op0 = simplify_shift_const (NULL_RTX, LSHIFTRT, mode,
12791 XEXP (op0, 0),
12792 INTVAL (XEXP (op0, 1)));
12793 continue;
12794 }
12795
12796 /* If OP0 is a sign extension and CODE is not an unsigned comparison,
12797 do the comparison in a narrower mode. */
12798 if (! unsigned_comparison_p
12799 && CONST_INT_P (XEXP (op0, 1))
12800 && GET_CODE (XEXP (op0, 0)) == ASHIFT
12801 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
12802 && (int_mode_for_size (mode_width - INTVAL (XEXP (op0, 1)), 1)
12803 .exists (&tmode))
12804 && (((unsigned HOST_WIDE_INT) const_op
12805 + (GET_MODE_MASK (tmode) >> 1) + 1)
12806 <= GET_MODE_MASK (tmode)))
12807 {
12808 op0 = gen_lowpart (tmode, XEXP (XEXP (op0, 0), 0));
12809 continue;
12810 }
12811
12812 /* Likewise if OP0 is a PLUS of a sign extension with a
12813 constant, which is usually represented with the PLUS
12814 between the shifts. */
12815 if (! unsigned_comparison_p
12816 && CONST_INT_P (XEXP (op0, 1))
12817 && GET_CODE (XEXP (op0, 0)) == PLUS
12818 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12819 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == ASHIFT
12820 && XEXP (op0, 1) == XEXP (XEXP (XEXP (op0, 0), 0), 1)
12821 && (int_mode_for_size (mode_width - INTVAL (XEXP (op0, 1)), 1)
12822 .exists (&tmode))
12823 && (((unsigned HOST_WIDE_INT) const_op
12824 + (GET_MODE_MASK (tmode) >> 1) + 1)
12825 <= GET_MODE_MASK (tmode)))
12826 {
12827 rtx inner = XEXP (XEXP (XEXP (op0, 0), 0), 0);
12828 rtx add_const = XEXP (XEXP (op0, 0), 1);
12829 rtx new_const = simplify_gen_binary (ASHIFTRT, mode,
12830 add_const, XEXP (op0, 1));
12831
12832 op0 = simplify_gen_binary (PLUS, tmode,
12833 gen_lowpart (tmode, inner),
12834 new_const);
12835 continue;
12836 }
12837
12838 /* FALLTHROUGH */
12839 case LSHIFTRT:
12840 /* If we have (compare (xshiftrt FOO N) (const_int C)) and
12841 the low order N bits of FOO are known to be zero, we can do this
12842 by comparing FOO with C shifted left N bits so long as no
12843 overflow occurs. Even if the low order N bits of FOO aren't known
12844 to be zero, if the comparison is >= or < we can use the same
12845 optimization and for > or <= by setting all the low
12846 order N bits in the comparison constant. */
12847 if (CONST_INT_P (XEXP (op0, 1))
12848 && INTVAL (XEXP (op0, 1)) > 0
12849 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
12850 && mode_width <= HOST_BITS_PER_WIDE_INT
12851 && (((unsigned HOST_WIDE_INT) const_op
12852 + (GET_CODE (op0) != LSHIFTRT
12853 ? ((GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1)) >> 1)
12854 + 1)
12855 : 0))
12856 <= GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1))))
12857 {
12858 unsigned HOST_WIDE_INT low_bits
12859 = (nonzero_bits (XEXP (op0, 0), mode)
12860 & ((HOST_WIDE_INT_1U
12861 << INTVAL (XEXP (op0, 1))) - 1));
12862 if (low_bits == 0 || !equality_comparison_p)
12863 {
12864 /* If the shift was logical, then we must make the condition
12865 unsigned. */
12866 if (GET_CODE (op0) == LSHIFTRT)
12867 code = unsigned_condition (code);
12868
12869 const_op = (unsigned HOST_WIDE_INT) const_op
12870 << INTVAL (XEXP (op0, 1));
12871 if (low_bits != 0
12872 && (code == GT || code == GTU
12873 || code == LE || code == LEU))
12874 const_op
12875 |= ((HOST_WIDE_INT_1 << INTVAL (XEXP (op0, 1))) - 1);
12876 op1 = GEN_INT (const_op);
12877 op0 = XEXP (op0, 0);
12878 continue;
12879 }
12880 }
12881
12882 /* If we are using this shift to extract just the sign bit, we
12883 can replace this with an LT or GE comparison. */
12884 if (const_op == 0
12885 && (equality_comparison_p || sign_bit_comparison_p)
12886 && CONST_INT_P (XEXP (op0, 1))
12887 && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
12888 {
12889 op0 = XEXP (op0, 0);
12890 code = (code == NE || code == GT ? LT : GE);
12891 continue;
12892 }
12893 break;
12894
12895 default:
12896 break;
12897 }
12898
12899 break;
12900 }
12901
12902 /* Now make any compound operations involved in this comparison. Then,
12903 check for an outmost SUBREG on OP0 that is not doing anything or is
12904 paradoxical. The latter transformation must only be performed when
12905 it is known that the "extra" bits will be the same in op0 and op1 or
12906 that they don't matter. There are three cases to consider:
12907
12908 1. SUBREG_REG (op0) is a register. In this case the bits are don't
12909 care bits and we can assume they have any convenient value. So
12910 making the transformation is safe.
12911
12912 2. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is UNKNOWN.
12913 In this case the upper bits of op0 are undefined. We should not make
12914 the simplification in that case as we do not know the contents of
12915 those bits.
12916
12917 3. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is not UNKNOWN.
12918 In that case we know those bits are zeros or ones. We must also be
12919 sure that they are the same as the upper bits of op1.
12920
12921 We can never remove a SUBREG for a non-equality comparison because
12922 the sign bit is in a different place in the underlying object. */
12923
12924 rtx_code op0_mco_code = SET;
12925 if (op1 == const0_rtx)
12926 op0_mco_code = code == NE || code == EQ ? EQ : COMPARE;
12927
12928 op0 = make_compound_operation (op0, op0_mco_code);
12929 op1 = make_compound_operation (op1, SET);
12930
12931 if (GET_CODE (op0) == SUBREG && subreg_lowpart_p (op0)
12932 && is_int_mode (GET_MODE (op0), &mode)
12933 && is_int_mode (GET_MODE (SUBREG_REG (op0)), &inner_mode)
12934 && (code == NE || code == EQ))
12935 {
12936 if (paradoxical_subreg_p (op0))
12937 {
12938 /* For paradoxical subregs, allow case 1 as above. Case 3 isn't
12939 implemented. */
12940 if (REG_P (SUBREG_REG (op0)))
12941 {
12942 op0 = SUBREG_REG (op0);
12943 op1 = gen_lowpart (inner_mode, op1);
12944 }
12945 }
12946 else if (GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
12947 && (nonzero_bits (SUBREG_REG (op0), inner_mode)
12948 & ~GET_MODE_MASK (mode)) == 0)
12949 {
12950 tem = gen_lowpart (inner_mode, op1);
12951
12952 if ((nonzero_bits (tem, inner_mode) & ~GET_MODE_MASK (mode)) == 0)
12953 op0 = SUBREG_REG (op0), op1 = tem;
12954 }
12955 }
12956
12957 /* We now do the opposite procedure: Some machines don't have compare
12958 insns in all modes. If OP0's mode is an integer mode smaller than a
12959 word and we can't do a compare in that mode, see if there is a larger
12960 mode for which we can do the compare. There are a number of cases in
12961 which we can use the wider mode. */
12962
12963 if (is_int_mode (GET_MODE (op0), &mode)
12964 && GET_MODE_SIZE (mode) < UNITS_PER_WORD
12965 && ! have_insn_for (COMPARE, mode))
12966 FOR_EACH_WIDER_MODE (tmode_iter, mode)
12967 {
12968 tmode = tmode_iter.require ();
12969 if (!HWI_COMPUTABLE_MODE_P (tmode))
12970 break;
12971 if (have_insn_for (COMPARE, tmode))
12972 {
12973 int zero_extended;
12974
12975 /* If this is a test for negative, we can make an explicit
12976 test of the sign bit. Test this first so we can use
12977 a paradoxical subreg to extend OP0. */
12978
12979 if (op1 == const0_rtx && (code == LT || code == GE)
12980 && HWI_COMPUTABLE_MODE_P (mode))
12981 {
12982 unsigned HOST_WIDE_INT sign
12983 = HOST_WIDE_INT_1U << (GET_MODE_BITSIZE (mode) - 1);
12984 op0 = simplify_gen_binary (AND, tmode,
12985 gen_lowpart (tmode, op0),
12986 gen_int_mode (sign, tmode));
12987 code = (code == LT) ? NE : EQ;
12988 break;
12989 }
12990
12991 /* If the only nonzero bits in OP0 and OP1 are those in the
12992 narrower mode and this is an equality or unsigned comparison,
12993 we can use the wider mode. Similarly for sign-extended
12994 values, in which case it is true for all comparisons. */
12995 zero_extended = ((code == EQ || code == NE
12996 || code == GEU || code == GTU
12997 || code == LEU || code == LTU)
12998 && (nonzero_bits (op0, tmode)
12999 & ~GET_MODE_MASK (mode)) == 0
13000 && ((CONST_INT_P (op1)
13001 || (nonzero_bits (op1, tmode)
13002 & ~GET_MODE_MASK (mode)) == 0)));
13003
13004 if (zero_extended
13005 || ((num_sign_bit_copies (op0, tmode)
13006 > (unsigned int) (GET_MODE_PRECISION (tmode)
13007 - GET_MODE_PRECISION (mode)))
13008 && (num_sign_bit_copies (op1, tmode)
13009 > (unsigned int) (GET_MODE_PRECISION (tmode)
13010 - GET_MODE_PRECISION (mode)))))
13011 {
13012 /* If OP0 is an AND and we don't have an AND in MODE either,
13013 make a new AND in the proper mode. */
13014 if (GET_CODE (op0) == AND
13015 && !have_insn_for (AND, mode))
13016 op0 = simplify_gen_binary (AND, tmode,
13017 gen_lowpart (tmode,
13018 XEXP (op0, 0)),
13019 gen_lowpart (tmode,
13020 XEXP (op0, 1)));
13021 else
13022 {
13023 if (zero_extended)
13024 {
13025 op0 = simplify_gen_unary (ZERO_EXTEND, tmode,
13026 op0, mode);
13027 op1 = simplify_gen_unary (ZERO_EXTEND, tmode,
13028 op1, mode);
13029 }
13030 else
13031 {
13032 op0 = simplify_gen_unary (SIGN_EXTEND, tmode,
13033 op0, mode);
13034 op1 = simplify_gen_unary (SIGN_EXTEND, tmode,
13035 op1, mode);
13036 }
13037 break;
13038 }
13039 }
13040 }
13041 }
13042
13043 /* We may have changed the comparison operands. Re-canonicalize. */
13044 if (swap_commutative_operands_p (op0, op1))
13045 {
13046 std::swap (op0, op1);
13047 code = swap_condition (code);
13048 }
13049
13050 /* If this machine only supports a subset of valid comparisons, see if we
13051 can convert an unsupported one into a supported one. */
13052 target_canonicalize_comparison (&code, &op0, &op1, 0);
13053
13054 *pop0 = op0;
13055 *pop1 = op1;
13056
13057 return code;
13058 }
13059 \f
13060 /* Utility function for record_value_for_reg. Count number of
13061 rtxs in X. */
13062 static int
13063 count_rtxs (rtx x)
13064 {
13065 enum rtx_code code = GET_CODE (x);
13066 const char *fmt;
13067 int i, j, ret = 1;
13068
13069 if (GET_RTX_CLASS (code) == RTX_BIN_ARITH
13070 || GET_RTX_CLASS (code) == RTX_COMM_ARITH)
13071 {
13072 rtx x0 = XEXP (x, 0);
13073 rtx x1 = XEXP (x, 1);
13074
13075 if (x0 == x1)
13076 return 1 + 2 * count_rtxs (x0);
13077
13078 if ((GET_RTX_CLASS (GET_CODE (x1)) == RTX_BIN_ARITH
13079 || GET_RTX_CLASS (GET_CODE (x1)) == RTX_COMM_ARITH)
13080 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
13081 return 2 + 2 * count_rtxs (x0)
13082 + count_rtxs (x == XEXP (x1, 0)
13083 ? XEXP (x1, 1) : XEXP (x1, 0));
13084
13085 if ((GET_RTX_CLASS (GET_CODE (x0)) == RTX_BIN_ARITH
13086 || GET_RTX_CLASS (GET_CODE (x0)) == RTX_COMM_ARITH)
13087 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
13088 return 2 + 2 * count_rtxs (x1)
13089 + count_rtxs (x == XEXP (x0, 0)
13090 ? XEXP (x0, 1) : XEXP (x0, 0));
13091 }
13092
13093 fmt = GET_RTX_FORMAT (code);
13094 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13095 if (fmt[i] == 'e')
13096 ret += count_rtxs (XEXP (x, i));
13097 else if (fmt[i] == 'E')
13098 for (j = 0; j < XVECLEN (x, i); j++)
13099 ret += count_rtxs (XVECEXP (x, i, j));
13100
13101 return ret;
13102 }
13103 \f
13104 /* Utility function for following routine. Called when X is part of a value
13105 being stored into last_set_value. Sets last_set_table_tick
13106 for each register mentioned. Similar to mention_regs in cse.cc */
13107
13108 static void
13109 update_table_tick (rtx x)
13110 {
13111 enum rtx_code code = GET_CODE (x);
13112 const char *fmt = GET_RTX_FORMAT (code);
13113 int i, j;
13114
13115 if (code == REG)
13116 {
13117 unsigned int regno = REGNO (x);
13118 unsigned int endregno = END_REGNO (x);
13119 unsigned int r;
13120
13121 for (r = regno; r < endregno; r++)
13122 {
13123 reg_stat_type *rsp = &reg_stat[r];
13124 rsp->last_set_table_tick = label_tick;
13125 }
13126
13127 return;
13128 }
13129
13130 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13131 if (fmt[i] == 'e')
13132 {
13133 /* Check for identical subexpressions. If x contains
13134 identical subexpression we only have to traverse one of
13135 them. */
13136 if (i == 0 && ARITHMETIC_P (x))
13137 {
13138 /* Note that at this point x1 has already been
13139 processed. */
13140 rtx x0 = XEXP (x, 0);
13141 rtx x1 = XEXP (x, 1);
13142
13143 /* If x0 and x1 are identical then there is no need to
13144 process x0. */
13145 if (x0 == x1)
13146 break;
13147
13148 /* If x0 is identical to a subexpression of x1 then while
13149 processing x1, x0 has already been processed. Thus we
13150 are done with x. */
13151 if (ARITHMETIC_P (x1)
13152 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
13153 break;
13154
13155 /* If x1 is identical to a subexpression of x0 then we
13156 still have to process the rest of x0. */
13157 if (ARITHMETIC_P (x0)
13158 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
13159 {
13160 update_table_tick (XEXP (x0, x1 == XEXP (x0, 0) ? 1 : 0));
13161 break;
13162 }
13163 }
13164
13165 update_table_tick (XEXP (x, i));
13166 }
13167 else if (fmt[i] == 'E')
13168 for (j = 0; j < XVECLEN (x, i); j++)
13169 update_table_tick (XVECEXP (x, i, j));
13170 }
13171
13172 /* Record that REG is set to VALUE in insn INSN. If VALUE is zero, we
13173 are saying that the register is clobbered and we no longer know its
13174 value. If INSN is zero, don't update reg_stat[].last_set; this is
13175 only permitted with VALUE also zero and is used to invalidate the
13176 register. */
13177
13178 static void
13179 record_value_for_reg (rtx reg, rtx_insn *insn, rtx value)
13180 {
13181 unsigned int regno = REGNO (reg);
13182 unsigned int endregno = END_REGNO (reg);
13183 unsigned int i;
13184 reg_stat_type *rsp;
13185
13186 /* If VALUE contains REG and we have a previous value for REG, substitute
13187 the previous value. */
13188 if (value && insn && reg_overlap_mentioned_p (reg, value))
13189 {
13190 rtx tem;
13191
13192 /* Set things up so get_last_value is allowed to see anything set up to
13193 our insn. */
13194 subst_low_luid = DF_INSN_LUID (insn);
13195 tem = get_last_value (reg);
13196
13197 /* If TEM is simply a binary operation with two CLOBBERs as operands,
13198 it isn't going to be useful and will take a lot of time to process,
13199 so just use the CLOBBER. */
13200
13201 if (tem)
13202 {
13203 if (ARITHMETIC_P (tem)
13204 && GET_CODE (XEXP (tem, 0)) == CLOBBER
13205 && GET_CODE (XEXP (tem, 1)) == CLOBBER)
13206 tem = XEXP (tem, 0);
13207 else if (count_occurrences (value, reg, 1) >= 2)
13208 {
13209 /* If there are two or more occurrences of REG in VALUE,
13210 prevent the value from growing too much. */
13211 if (count_rtxs (tem) > param_max_last_value_rtl)
13212 tem = gen_rtx_CLOBBER (GET_MODE (tem), const0_rtx);
13213 }
13214
13215 value = replace_rtx (copy_rtx (value), reg, tem);
13216 }
13217 }
13218
13219 /* For each register modified, show we don't know its value, that
13220 we don't know about its bitwise content, that its value has been
13221 updated, and that we don't know the location of the death of the
13222 register. */
13223 for (i = regno; i < endregno; i++)
13224 {
13225 rsp = &reg_stat[i];
13226
13227 if (insn)
13228 rsp->last_set = insn;
13229
13230 rsp->last_set_value = 0;
13231 rsp->last_set_mode = VOIDmode;
13232 rsp->last_set_nonzero_bits = 0;
13233 rsp->last_set_sign_bit_copies = 0;
13234 rsp->last_death = 0;
13235 rsp->truncated_to_mode = VOIDmode;
13236 }
13237
13238 /* Mark registers that are being referenced in this value. */
13239 if (value)
13240 update_table_tick (value);
13241
13242 /* Now update the status of each register being set.
13243 If someone is using this register in this block, set this register
13244 to invalid since we will get confused between the two lives in this
13245 basic block. This makes using this register always invalid. In cse, we
13246 scan the table to invalidate all entries using this register, but this
13247 is too much work for us. */
13248
13249 for (i = regno; i < endregno; i++)
13250 {
13251 rsp = &reg_stat[i];
13252 rsp->last_set_label = label_tick;
13253 if (!insn
13254 || (value && rsp->last_set_table_tick >= label_tick_ebb_start))
13255 rsp->last_set_invalid = 1;
13256 else
13257 rsp->last_set_invalid = 0;
13258 }
13259
13260 /* The value being assigned might refer to X (like in "x++;"). In that
13261 case, we must replace it with (clobber (const_int 0)) to prevent
13262 infinite loops. */
13263 rsp = &reg_stat[regno];
13264 if (value && !get_last_value_validate (&value, insn, label_tick, 0))
13265 {
13266 value = copy_rtx (value);
13267 if (!get_last_value_validate (&value, insn, label_tick, 1))
13268 value = 0;
13269 }
13270
13271 /* For the main register being modified, update the value, the mode, the
13272 nonzero bits, and the number of sign bit copies. */
13273
13274 rsp->last_set_value = value;
13275
13276 if (value)
13277 {
13278 machine_mode mode = GET_MODE (reg);
13279 subst_low_luid = DF_INSN_LUID (insn);
13280 rsp->last_set_mode = mode;
13281 if (GET_MODE_CLASS (mode) == MODE_INT
13282 && HWI_COMPUTABLE_MODE_P (mode))
13283 mode = nonzero_bits_mode;
13284 rsp->last_set_nonzero_bits = nonzero_bits (value, mode);
13285 rsp->last_set_sign_bit_copies
13286 = num_sign_bit_copies (value, GET_MODE (reg));
13287 }
13288 }
13289
13290 /* Called via note_stores from record_dead_and_set_regs to handle one
13291 SET or CLOBBER in an insn. DATA is the instruction in which the
13292 set is occurring. */
13293
13294 static void
13295 record_dead_and_set_regs_1 (rtx dest, const_rtx setter, void *data)
13296 {
13297 rtx_insn *record_dead_insn = (rtx_insn *) data;
13298
13299 if (GET_CODE (dest) == SUBREG)
13300 dest = SUBREG_REG (dest);
13301
13302 if (!record_dead_insn)
13303 {
13304 if (REG_P (dest))
13305 record_value_for_reg (dest, NULL, NULL_RTX);
13306 return;
13307 }
13308
13309 if (REG_P (dest))
13310 {
13311 /* If we are setting the whole register, we know its value. Otherwise
13312 show that we don't know the value. We can handle a SUBREG if it's
13313 the low part, but we must be careful with paradoxical SUBREGs on
13314 RISC architectures because we cannot strip e.g. an extension around
13315 a load and record the naked load since the RTL middle-end considers
13316 that the upper bits are defined according to LOAD_EXTEND_OP. */
13317 if (GET_CODE (setter) == SET && dest == SET_DEST (setter))
13318 record_value_for_reg (dest, record_dead_insn, SET_SRC (setter));
13319 else if (GET_CODE (setter) == SET
13320 && GET_CODE (SET_DEST (setter)) == SUBREG
13321 && SUBREG_REG (SET_DEST (setter)) == dest
13322 && known_le (GET_MODE_PRECISION (GET_MODE (dest)),
13323 BITS_PER_WORD)
13324 && subreg_lowpart_p (SET_DEST (setter)))
13325 record_value_for_reg (dest, record_dead_insn,
13326 WORD_REGISTER_OPERATIONS
13327 && word_register_operation_p (SET_SRC (setter))
13328 && paradoxical_subreg_p (SET_DEST (setter))
13329 ? SET_SRC (setter)
13330 : gen_lowpart (GET_MODE (dest),
13331 SET_SRC (setter)));
13332 else
13333 record_value_for_reg (dest, record_dead_insn, NULL_RTX);
13334 }
13335 else if (MEM_P (dest)
13336 /* Ignore pushes, they clobber nothing. */
13337 && ! push_operand (dest, GET_MODE (dest)))
13338 mem_last_set = DF_INSN_LUID (record_dead_insn);
13339 }
13340
13341 /* Update the records of when each REG was most recently set or killed
13342 for the things done by INSN. This is the last thing done in processing
13343 INSN in the combiner loop.
13344
13345 We update reg_stat[], in particular fields last_set, last_set_value,
13346 last_set_mode, last_set_nonzero_bits, last_set_sign_bit_copies,
13347 last_death, and also the similar information mem_last_set (which insn
13348 most recently modified memory) and last_call_luid (which insn was the
13349 most recent subroutine call). */
13350
13351 static void
13352 record_dead_and_set_regs (rtx_insn *insn)
13353 {
13354 rtx link;
13355 unsigned int i;
13356
13357 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
13358 {
13359 if (REG_NOTE_KIND (link) == REG_DEAD
13360 && REG_P (XEXP (link, 0)))
13361 {
13362 unsigned int regno = REGNO (XEXP (link, 0));
13363 unsigned int endregno = END_REGNO (XEXP (link, 0));
13364
13365 for (i = regno; i < endregno; i++)
13366 {
13367 reg_stat_type *rsp;
13368
13369 rsp = &reg_stat[i];
13370 rsp->last_death = insn;
13371 }
13372 }
13373 else if (REG_NOTE_KIND (link) == REG_INC)
13374 record_value_for_reg (XEXP (link, 0), insn, NULL_RTX);
13375 }
13376
13377 if (CALL_P (insn))
13378 {
13379 HARD_REG_SET callee_clobbers
13380 = insn_callee_abi (insn).full_and_partial_reg_clobbers ();
13381 hard_reg_set_iterator hrsi;
13382 EXECUTE_IF_SET_IN_HARD_REG_SET (callee_clobbers, 0, i, hrsi)
13383 {
13384 reg_stat_type *rsp;
13385
13386 /* ??? We could try to preserve some information from the last
13387 set of register I if the call doesn't actually clobber
13388 (reg:last_set_mode I), which might be true for ABIs with
13389 partial clobbers. However, it would be difficult to
13390 update last_set_nonzero_bits and last_sign_bit_copies
13391 to account for the part of I that actually was clobbered.
13392 It wouldn't help much anyway, since we rarely see this
13393 situation before RA. */
13394 rsp = &reg_stat[i];
13395 rsp->last_set_invalid = 1;
13396 rsp->last_set = insn;
13397 rsp->last_set_value = 0;
13398 rsp->last_set_mode = VOIDmode;
13399 rsp->last_set_nonzero_bits = 0;
13400 rsp->last_set_sign_bit_copies = 0;
13401 rsp->last_death = 0;
13402 rsp->truncated_to_mode = VOIDmode;
13403 }
13404
13405 last_call_luid = mem_last_set = DF_INSN_LUID (insn);
13406
13407 /* We can't combine into a call pattern. Remember, though, that
13408 the return value register is set at this LUID. We could
13409 still replace a register with the return value from the
13410 wrong subroutine call! */
13411 note_stores (insn, record_dead_and_set_regs_1, NULL_RTX);
13412 }
13413 else
13414 note_stores (insn, record_dead_and_set_regs_1, insn);
13415 }
13416
13417 /* If a SUBREG has the promoted bit set, it is in fact a property of the
13418 register present in the SUBREG, so for each such SUBREG go back and
13419 adjust nonzero and sign bit information of the registers that are
13420 known to have some zero/sign bits set.
13421
13422 This is needed because when combine blows the SUBREGs away, the
13423 information on zero/sign bits is lost and further combines can be
13424 missed because of that. */
13425
13426 static void
13427 record_promoted_value (rtx_insn *insn, rtx subreg)
13428 {
13429 struct insn_link *links;
13430 rtx set;
13431 unsigned int regno = REGNO (SUBREG_REG (subreg));
13432 machine_mode mode = GET_MODE (subreg);
13433
13434 if (!HWI_COMPUTABLE_MODE_P (mode))
13435 return;
13436
13437 for (links = LOG_LINKS (insn); links;)
13438 {
13439 reg_stat_type *rsp;
13440
13441 insn = links->insn;
13442 set = single_set (insn);
13443
13444 if (! set || !REG_P (SET_DEST (set))
13445 || REGNO (SET_DEST (set)) != regno
13446 || GET_MODE (SET_DEST (set)) != GET_MODE (SUBREG_REG (subreg)))
13447 {
13448 links = links->next;
13449 continue;
13450 }
13451
13452 rsp = &reg_stat[regno];
13453 if (rsp->last_set == insn)
13454 {
13455 if (SUBREG_PROMOTED_UNSIGNED_P (subreg))
13456 rsp->last_set_nonzero_bits &= GET_MODE_MASK (mode);
13457 }
13458
13459 if (REG_P (SET_SRC (set)))
13460 {
13461 regno = REGNO (SET_SRC (set));
13462 links = LOG_LINKS (insn);
13463 }
13464 else
13465 break;
13466 }
13467 }
13468
13469 /* Check if X, a register, is known to contain a value already
13470 truncated to MODE. In this case we can use a subreg to refer to
13471 the truncated value even though in the generic case we would need
13472 an explicit truncation. */
13473
13474 static bool
13475 reg_truncated_to_mode (machine_mode mode, const_rtx x)
13476 {
13477 reg_stat_type *rsp = &reg_stat[REGNO (x)];
13478 machine_mode truncated = rsp->truncated_to_mode;
13479
13480 if (truncated == 0
13481 || rsp->truncation_label < label_tick_ebb_start)
13482 return false;
13483 if (!partial_subreg_p (mode, truncated))
13484 return true;
13485 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, truncated))
13486 return true;
13487 return false;
13488 }
13489
13490 /* If X is a hard reg or a subreg record the mode that the register is
13491 accessed in. For non-TARGET_TRULY_NOOP_TRUNCATION targets we might be
13492 able to turn a truncate into a subreg using this information. Return true
13493 if traversing X is complete. */
13494
13495 static bool
13496 record_truncated_value (rtx x)
13497 {
13498 machine_mode truncated_mode;
13499 reg_stat_type *rsp;
13500
13501 if (GET_CODE (x) == SUBREG && REG_P (SUBREG_REG (x)))
13502 {
13503 machine_mode original_mode = GET_MODE (SUBREG_REG (x));
13504 truncated_mode = GET_MODE (x);
13505
13506 if (!partial_subreg_p (truncated_mode, original_mode))
13507 return true;
13508
13509 truncated_mode = GET_MODE (x);
13510 if (TRULY_NOOP_TRUNCATION_MODES_P (truncated_mode, original_mode))
13511 return true;
13512
13513 x = SUBREG_REG (x);
13514 }
13515 /* ??? For hard-regs we now record everything. We might be able to
13516 optimize this using last_set_mode. */
13517 else if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
13518 truncated_mode = GET_MODE (x);
13519 else
13520 return false;
13521
13522 rsp = &reg_stat[REGNO (x)];
13523 if (rsp->truncated_to_mode == 0
13524 || rsp->truncation_label < label_tick_ebb_start
13525 || partial_subreg_p (truncated_mode, rsp->truncated_to_mode))
13526 {
13527 rsp->truncated_to_mode = truncated_mode;
13528 rsp->truncation_label = label_tick;
13529 }
13530
13531 return true;
13532 }
13533
13534 /* Callback for note_uses. Find hardregs and subregs of pseudos and
13535 the modes they are used in. This can help truning TRUNCATEs into
13536 SUBREGs. */
13537
13538 static void
13539 record_truncated_values (rtx *loc, void *data ATTRIBUTE_UNUSED)
13540 {
13541 subrtx_var_iterator::array_type array;
13542 FOR_EACH_SUBRTX_VAR (iter, array, *loc, NONCONST)
13543 if (record_truncated_value (*iter))
13544 iter.skip_subrtxes ();
13545 }
13546
13547 /* Scan X for promoted SUBREGs. For each one found,
13548 note what it implies to the registers used in it. */
13549
13550 static void
13551 check_promoted_subreg (rtx_insn *insn, rtx x)
13552 {
13553 if (GET_CODE (x) == SUBREG
13554 && SUBREG_PROMOTED_VAR_P (x)
13555 && REG_P (SUBREG_REG (x)))
13556 record_promoted_value (insn, x);
13557 else
13558 {
13559 const char *format = GET_RTX_FORMAT (GET_CODE (x));
13560 int i, j;
13561
13562 for (i = 0; i < GET_RTX_LENGTH (GET_CODE (x)); i++)
13563 switch (format[i])
13564 {
13565 case 'e':
13566 check_promoted_subreg (insn, XEXP (x, i));
13567 break;
13568 case 'V':
13569 case 'E':
13570 if (XVEC (x, i) != 0)
13571 for (j = 0; j < XVECLEN (x, i); j++)
13572 check_promoted_subreg (insn, XVECEXP (x, i, j));
13573 break;
13574 }
13575 }
13576 }
13577 \f
13578 /* Verify that all the registers and memory references mentioned in *LOC are
13579 still valid. *LOC was part of a value set in INSN when label_tick was
13580 equal to TICK. Return 0 if some are not. If REPLACE is nonzero, replace
13581 the invalid references with (clobber (const_int 0)) and return 1. This
13582 replacement is useful because we often can get useful information about
13583 the form of a value (e.g., if it was produced by a shift that always
13584 produces -1 or 0) even though we don't know exactly what registers it
13585 was produced from. */
13586
13587 static int
13588 get_last_value_validate (rtx *loc, rtx_insn *insn, int tick, int replace)
13589 {
13590 rtx x = *loc;
13591 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
13592 int len = GET_RTX_LENGTH (GET_CODE (x));
13593 int i, j;
13594
13595 if (REG_P (x))
13596 {
13597 unsigned int regno = REGNO (x);
13598 unsigned int endregno = END_REGNO (x);
13599 unsigned int j;
13600
13601 for (j = regno; j < endregno; j++)
13602 {
13603 reg_stat_type *rsp = &reg_stat[j];
13604 if (rsp->last_set_invalid
13605 /* If this is a pseudo-register that was only set once and not
13606 live at the beginning of the function, it is always valid. */
13607 || (! (regno >= FIRST_PSEUDO_REGISTER
13608 && regno < reg_n_sets_max
13609 && REG_N_SETS (regno) == 1
13610 && (!REGNO_REG_SET_P
13611 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
13612 regno)))
13613 && rsp->last_set_label > tick))
13614 {
13615 if (replace)
13616 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
13617 return replace;
13618 }
13619 }
13620
13621 return 1;
13622 }
13623 /* If this is a memory reference, make sure that there were no stores after
13624 it that might have clobbered the value. We don't have alias info, so we
13625 assume any store invalidates it. Moreover, we only have local UIDs, so
13626 we also assume that there were stores in the intervening basic blocks. */
13627 else if (MEM_P (x) && !MEM_READONLY_P (x)
13628 && (tick != label_tick || DF_INSN_LUID (insn) <= mem_last_set))
13629 {
13630 if (replace)
13631 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
13632 return replace;
13633 }
13634
13635 for (i = 0; i < len; i++)
13636 {
13637 if (fmt[i] == 'e')
13638 {
13639 /* Check for identical subexpressions. If x contains
13640 identical subexpression we only have to traverse one of
13641 them. */
13642 if (i == 1 && ARITHMETIC_P (x))
13643 {
13644 /* Note that at this point x0 has already been checked
13645 and found valid. */
13646 rtx x0 = XEXP (x, 0);
13647 rtx x1 = XEXP (x, 1);
13648
13649 /* If x0 and x1 are identical then x is also valid. */
13650 if (x0 == x1)
13651 return 1;
13652
13653 /* If x1 is identical to a subexpression of x0 then
13654 while checking x0, x1 has already been checked. Thus
13655 it is valid and so as x. */
13656 if (ARITHMETIC_P (x0)
13657 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
13658 return 1;
13659
13660 /* If x0 is identical to a subexpression of x1 then x is
13661 valid iff the rest of x1 is valid. */
13662 if (ARITHMETIC_P (x1)
13663 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
13664 return
13665 get_last_value_validate (&XEXP (x1,
13666 x0 == XEXP (x1, 0) ? 1 : 0),
13667 insn, tick, replace);
13668 }
13669
13670 if (get_last_value_validate (&XEXP (x, i), insn, tick,
13671 replace) == 0)
13672 return 0;
13673 }
13674 else if (fmt[i] == 'E')
13675 for (j = 0; j < XVECLEN (x, i); j++)
13676 if (get_last_value_validate (&XVECEXP (x, i, j),
13677 insn, tick, replace) == 0)
13678 return 0;
13679 }
13680
13681 /* If we haven't found a reason for it to be invalid, it is valid. */
13682 return 1;
13683 }
13684
13685 /* Get the last value assigned to X, if known. Some registers
13686 in the value may be replaced with (clobber (const_int 0)) if their value
13687 is known longer known reliably. */
13688
13689 static rtx
13690 get_last_value (const_rtx x)
13691 {
13692 unsigned int regno;
13693 rtx value;
13694 reg_stat_type *rsp;
13695
13696 /* If this is a non-paradoxical SUBREG, get the value of its operand and
13697 then convert it to the desired mode. If this is a paradoxical SUBREG,
13698 we cannot predict what values the "extra" bits might have. */
13699 if (GET_CODE (x) == SUBREG
13700 && subreg_lowpart_p (x)
13701 && !paradoxical_subreg_p (x)
13702 && (value = get_last_value (SUBREG_REG (x))) != 0)
13703 return gen_lowpart (GET_MODE (x), value);
13704
13705 if (!REG_P (x))
13706 return 0;
13707
13708 regno = REGNO (x);
13709 rsp = &reg_stat[regno];
13710 value = rsp->last_set_value;
13711
13712 /* If we don't have a value, or if it isn't for this basic block and
13713 it's either a hard register, set more than once, or it's a live
13714 at the beginning of the function, return 0.
13715
13716 Because if it's not live at the beginning of the function then the reg
13717 is always set before being used (is never used without being set).
13718 And, if it's set only once, and it's always set before use, then all
13719 uses must have the same last value, even if it's not from this basic
13720 block. */
13721
13722 if (value == 0
13723 || (rsp->last_set_label < label_tick_ebb_start
13724 && (regno < FIRST_PSEUDO_REGISTER
13725 || regno >= reg_n_sets_max
13726 || REG_N_SETS (regno) != 1
13727 || REGNO_REG_SET_P
13728 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), regno))))
13729 return 0;
13730
13731 /* If the value was set in a later insn than the ones we are processing,
13732 we can't use it even if the register was only set once. */
13733 if (rsp->last_set_label == label_tick
13734 && DF_INSN_LUID (rsp->last_set) >= subst_low_luid)
13735 return 0;
13736
13737 /* If fewer bits were set than what we are asked for now, we cannot use
13738 the value. */
13739 if (maybe_lt (GET_MODE_PRECISION (rsp->last_set_mode),
13740 GET_MODE_PRECISION (GET_MODE (x))))
13741 return 0;
13742
13743 /* If the value has all its registers valid, return it. */
13744 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 0))
13745 return value;
13746
13747 /* Otherwise, make a copy and replace any invalid register with
13748 (clobber (const_int 0)). If that fails for some reason, return 0. */
13749
13750 value = copy_rtx (value);
13751 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 1))
13752 return value;
13753
13754 return 0;
13755 }
13756 \f
13757 /* Define three variables used for communication between the following
13758 routines. */
13759
13760 static unsigned int reg_dead_regno, reg_dead_endregno;
13761 static int reg_dead_flag;
13762 rtx reg_dead_reg;
13763
13764 /* Function called via note_stores from reg_dead_at_p.
13765
13766 If DEST is within [reg_dead_regno, reg_dead_endregno), set
13767 reg_dead_flag to 1 if X is a CLOBBER and to -1 it is a SET. */
13768
13769 static void
13770 reg_dead_at_p_1 (rtx dest, const_rtx x, void *data ATTRIBUTE_UNUSED)
13771 {
13772 unsigned int regno, endregno;
13773
13774 if (!REG_P (dest))
13775 return;
13776
13777 regno = REGNO (dest);
13778 endregno = END_REGNO (dest);
13779 if (reg_dead_endregno > regno && reg_dead_regno < endregno)
13780 reg_dead_flag = (GET_CODE (x) == CLOBBER) ? 1 : -1;
13781 }
13782
13783 /* Return nonzero if REG is known to be dead at INSN.
13784
13785 We scan backwards from INSN. If we hit a REG_DEAD note or a CLOBBER
13786 referencing REG, it is dead. If we hit a SET referencing REG, it is
13787 live. Otherwise, see if it is live or dead at the start of the basic
13788 block we are in. Hard regs marked as being live in NEWPAT_USED_REGS
13789 must be assumed to be always live. */
13790
13791 static int
13792 reg_dead_at_p (rtx reg, rtx_insn *insn)
13793 {
13794 basic_block block;
13795 unsigned int i;
13796
13797 /* Set variables for reg_dead_at_p_1. */
13798 reg_dead_regno = REGNO (reg);
13799 reg_dead_endregno = END_REGNO (reg);
13800 reg_dead_reg = reg;
13801
13802 reg_dead_flag = 0;
13803
13804 /* Check that reg isn't mentioned in NEWPAT_USED_REGS. For fixed registers
13805 we allow the machine description to decide whether use-and-clobber
13806 patterns are OK. */
13807 if (reg_dead_regno < FIRST_PSEUDO_REGISTER)
13808 {
13809 for (i = reg_dead_regno; i < reg_dead_endregno; i++)
13810 if (!fixed_regs[i] && TEST_HARD_REG_BIT (newpat_used_regs, i))
13811 return 0;
13812 }
13813
13814 /* Scan backwards until we find a REG_DEAD note, SET, CLOBBER, or
13815 beginning of basic block. */
13816 block = BLOCK_FOR_INSN (insn);
13817 for (;;)
13818 {
13819 if (INSN_P (insn))
13820 {
13821 if (find_regno_note (insn, REG_UNUSED, reg_dead_regno))
13822 return 1;
13823
13824 note_stores (insn, reg_dead_at_p_1, NULL);
13825 if (reg_dead_flag)
13826 return reg_dead_flag == 1 ? 1 : 0;
13827
13828 if (find_regno_note (insn, REG_DEAD, reg_dead_regno))
13829 return 1;
13830 }
13831
13832 if (insn == BB_HEAD (block))
13833 break;
13834
13835 insn = PREV_INSN (insn);
13836 }
13837
13838 /* Look at live-in sets for the basic block that we were in. */
13839 for (i = reg_dead_regno; i < reg_dead_endregno; i++)
13840 if (REGNO_REG_SET_P (df_get_live_in (block), i))
13841 return 0;
13842
13843 return 1;
13844 }
13845 \f
13846 /* Note hard registers in X that are used. */
13847
13848 static void
13849 mark_used_regs_combine (rtx x)
13850 {
13851 RTX_CODE code = GET_CODE (x);
13852 unsigned int regno;
13853 int i;
13854
13855 switch (code)
13856 {
13857 case LABEL_REF:
13858 case SYMBOL_REF:
13859 case CONST:
13860 CASE_CONST_ANY:
13861 case PC:
13862 case ADDR_VEC:
13863 case ADDR_DIFF_VEC:
13864 case ASM_INPUT:
13865 return;
13866
13867 case CLOBBER:
13868 /* If we are clobbering a MEM, mark any hard registers inside the
13869 address as used. */
13870 if (MEM_P (XEXP (x, 0)))
13871 mark_used_regs_combine (XEXP (XEXP (x, 0), 0));
13872 return;
13873
13874 case REG:
13875 regno = REGNO (x);
13876 /* A hard reg in a wide mode may really be multiple registers.
13877 If so, mark all of them just like the first. */
13878 if (regno < FIRST_PSEUDO_REGISTER)
13879 {
13880 /* None of this applies to the stack, frame or arg pointers. */
13881 if (regno == STACK_POINTER_REGNUM
13882 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
13883 && regno == HARD_FRAME_POINTER_REGNUM)
13884 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
13885 && regno == ARG_POINTER_REGNUM && fixed_regs[regno])
13886 || regno == FRAME_POINTER_REGNUM)
13887 return;
13888
13889 add_to_hard_reg_set (&newpat_used_regs, GET_MODE (x), regno);
13890 }
13891 return;
13892
13893 case SET:
13894 {
13895 /* If setting a MEM, or a SUBREG of a MEM, then note any hard regs in
13896 the address. */
13897 rtx testreg = SET_DEST (x);
13898
13899 while (GET_CODE (testreg) == SUBREG
13900 || GET_CODE (testreg) == ZERO_EXTRACT
13901 || GET_CODE (testreg) == STRICT_LOW_PART)
13902 testreg = XEXP (testreg, 0);
13903
13904 if (MEM_P (testreg))
13905 mark_used_regs_combine (XEXP (testreg, 0));
13906
13907 mark_used_regs_combine (SET_SRC (x));
13908 }
13909 return;
13910
13911 default:
13912 break;
13913 }
13914
13915 /* Recursively scan the operands of this expression. */
13916
13917 {
13918 const char *fmt = GET_RTX_FORMAT (code);
13919
13920 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13921 {
13922 if (fmt[i] == 'e')
13923 mark_used_regs_combine (XEXP (x, i));
13924 else if (fmt[i] == 'E')
13925 {
13926 int j;
13927
13928 for (j = 0; j < XVECLEN (x, i); j++)
13929 mark_used_regs_combine (XVECEXP (x, i, j));
13930 }
13931 }
13932 }
13933 }
13934 \f
13935 /* Remove register number REGNO from the dead registers list of INSN.
13936
13937 Return the note used to record the death, if there was one. */
13938
13939 rtx
13940 remove_death (unsigned int regno, rtx_insn *insn)
13941 {
13942 rtx note = find_regno_note (insn, REG_DEAD, regno);
13943
13944 if (note)
13945 remove_note (insn, note);
13946
13947 return note;
13948 }
13949
13950 /* For each register (hardware or pseudo) used within expression X, if its
13951 death is in an instruction with luid between FROM_LUID (inclusive) and
13952 TO_INSN (exclusive), put a REG_DEAD note for that register in the
13953 list headed by PNOTES.
13954
13955 That said, don't move registers killed by maybe_kill_insn.
13956
13957 This is done when X is being merged by combination into TO_INSN. These
13958 notes will then be distributed as needed. */
13959
13960 static void
13961 move_deaths (rtx x, rtx maybe_kill_insn, int from_luid, rtx_insn *to_insn,
13962 rtx *pnotes)
13963 {
13964 const char *fmt;
13965 int len, i;
13966 enum rtx_code code = GET_CODE (x);
13967
13968 if (code == REG)
13969 {
13970 unsigned int regno = REGNO (x);
13971 rtx_insn *where_dead = reg_stat[regno].last_death;
13972
13973 /* If we do not know where the register died, it may still die between
13974 FROM_LUID and TO_INSN. If so, find it. This is PR83304. */
13975 if (!where_dead || DF_INSN_LUID (where_dead) >= DF_INSN_LUID (to_insn))
13976 {
13977 rtx_insn *insn = prev_real_nondebug_insn (to_insn);
13978 while (insn
13979 && BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (to_insn)
13980 && DF_INSN_LUID (insn) >= from_luid)
13981 {
13982 if (dead_or_set_regno_p (insn, regno))
13983 {
13984 if (find_regno_note (insn, REG_DEAD, regno))
13985 where_dead = insn;
13986 break;
13987 }
13988
13989 insn = prev_real_nondebug_insn (insn);
13990 }
13991 }
13992
13993 /* Don't move the register if it gets killed in between from and to. */
13994 if (maybe_kill_insn && reg_set_p (x, maybe_kill_insn)
13995 && ! reg_referenced_p (x, maybe_kill_insn))
13996 return;
13997
13998 if (where_dead
13999 && BLOCK_FOR_INSN (where_dead) == BLOCK_FOR_INSN (to_insn)
14000 && DF_INSN_LUID (where_dead) >= from_luid
14001 && DF_INSN_LUID (where_dead) < DF_INSN_LUID (to_insn))
14002 {
14003 rtx note = remove_death (regno, where_dead);
14004
14005 /* It is possible for the call above to return 0. This can occur
14006 when last_death points to I2 or I1 that we combined with.
14007 In that case make a new note.
14008
14009 We must also check for the case where X is a hard register
14010 and NOTE is a death note for a range of hard registers
14011 including X. In that case, we must put REG_DEAD notes for
14012 the remaining registers in place of NOTE. */
14013
14014 if (note != 0 && regno < FIRST_PSEUDO_REGISTER
14015 && partial_subreg_p (GET_MODE (x), GET_MODE (XEXP (note, 0))))
14016 {
14017 unsigned int deadregno = REGNO (XEXP (note, 0));
14018 unsigned int deadend = END_REGNO (XEXP (note, 0));
14019 unsigned int ourend = END_REGNO (x);
14020 unsigned int i;
14021
14022 for (i = deadregno; i < deadend; i++)
14023 if (i < regno || i >= ourend)
14024 add_reg_note (where_dead, REG_DEAD, regno_reg_rtx[i]);
14025 }
14026
14027 /* If we didn't find any note, or if we found a REG_DEAD note that
14028 covers only part of the given reg, and we have a multi-reg hard
14029 register, then to be safe we must check for REG_DEAD notes
14030 for each register other than the first. They could have
14031 their own REG_DEAD notes lying around. */
14032 else if ((note == 0
14033 || (note != 0
14034 && partial_subreg_p (GET_MODE (XEXP (note, 0)),
14035 GET_MODE (x))))
14036 && regno < FIRST_PSEUDO_REGISTER
14037 && REG_NREGS (x) > 1)
14038 {
14039 unsigned int ourend = END_REGNO (x);
14040 unsigned int i, offset;
14041 rtx oldnotes = 0;
14042
14043 if (note)
14044 offset = hard_regno_nregs (regno, GET_MODE (XEXP (note, 0)));
14045 else
14046 offset = 1;
14047
14048 for (i = regno + offset; i < ourend; i++)
14049 move_deaths (regno_reg_rtx[i],
14050 maybe_kill_insn, from_luid, to_insn, &oldnotes);
14051 }
14052
14053 if (note != 0 && GET_MODE (XEXP (note, 0)) == GET_MODE (x))
14054 {
14055 XEXP (note, 1) = *pnotes;
14056 *pnotes = note;
14057 }
14058 else
14059 *pnotes = alloc_reg_note (REG_DEAD, x, *pnotes);
14060 }
14061
14062 return;
14063 }
14064
14065 else if (GET_CODE (x) == SET)
14066 {
14067 rtx dest = SET_DEST (x);
14068
14069 move_deaths (SET_SRC (x), maybe_kill_insn, from_luid, to_insn, pnotes);
14070
14071 /* In the case of a ZERO_EXTRACT, a STRICT_LOW_PART, or a SUBREG
14072 that accesses one word of a multi-word item, some
14073 piece of everything register in the expression is used by
14074 this insn, so remove any old death. */
14075 /* ??? So why do we test for equality of the sizes? */
14076
14077 if (GET_CODE (dest) == ZERO_EXTRACT
14078 || GET_CODE (dest) == STRICT_LOW_PART
14079 || (GET_CODE (dest) == SUBREG
14080 && !read_modify_subreg_p (dest)))
14081 {
14082 move_deaths (dest, maybe_kill_insn, from_luid, to_insn, pnotes);
14083 return;
14084 }
14085
14086 /* If this is some other SUBREG, we know it replaces the entire
14087 value, so use that as the destination. */
14088 if (GET_CODE (dest) == SUBREG)
14089 dest = SUBREG_REG (dest);
14090
14091 /* If this is a MEM, adjust deaths of anything used in the address.
14092 For a REG (the only other possibility), the entire value is
14093 being replaced so the old value is not used in this insn. */
14094
14095 if (MEM_P (dest))
14096 move_deaths (XEXP (dest, 0), maybe_kill_insn, from_luid,
14097 to_insn, pnotes);
14098 return;
14099 }
14100
14101 else if (GET_CODE (x) == CLOBBER)
14102 return;
14103
14104 len = GET_RTX_LENGTH (code);
14105 fmt = GET_RTX_FORMAT (code);
14106
14107 for (i = 0; i < len; i++)
14108 {
14109 if (fmt[i] == 'E')
14110 {
14111 int j;
14112 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
14113 move_deaths (XVECEXP (x, i, j), maybe_kill_insn, from_luid,
14114 to_insn, pnotes);
14115 }
14116 else if (fmt[i] == 'e')
14117 move_deaths (XEXP (x, i), maybe_kill_insn, from_luid, to_insn, pnotes);
14118 }
14119 }
14120 \f
14121 /* Return 1 if X is the target of a bit-field assignment in BODY, the
14122 pattern of an insn. X must be a REG. */
14123
14124 static int
14125 reg_bitfield_target_p (rtx x, rtx body)
14126 {
14127 int i;
14128
14129 if (GET_CODE (body) == SET)
14130 {
14131 rtx dest = SET_DEST (body);
14132 rtx target;
14133 unsigned int regno, tregno, endregno, endtregno;
14134
14135 if (GET_CODE (dest) == ZERO_EXTRACT)
14136 target = XEXP (dest, 0);
14137 else if (GET_CODE (dest) == STRICT_LOW_PART)
14138 target = SUBREG_REG (XEXP (dest, 0));
14139 else
14140 return 0;
14141
14142 if (GET_CODE (target) == SUBREG)
14143 target = SUBREG_REG (target);
14144
14145 if (!REG_P (target))
14146 return 0;
14147
14148 tregno = REGNO (target), regno = REGNO (x);
14149 if (tregno >= FIRST_PSEUDO_REGISTER || regno >= FIRST_PSEUDO_REGISTER)
14150 return target == x;
14151
14152 endtregno = end_hard_regno (GET_MODE (target), tregno);
14153 endregno = end_hard_regno (GET_MODE (x), regno);
14154
14155 return endregno > tregno && regno < endtregno;
14156 }
14157
14158 else if (GET_CODE (body) == PARALLEL)
14159 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
14160 if (reg_bitfield_target_p (x, XVECEXP (body, 0, i)))
14161 return 1;
14162
14163 return 0;
14164 }
14165 \f
14166 /* Given a chain of REG_NOTES originally from FROM_INSN, try to place them
14167 as appropriate. I3 and I2 are the insns resulting from the combination
14168 insns including FROM (I2 may be zero).
14169
14170 ELIM_I2 and ELIM_I1 are either zero or registers that we know will
14171 not need REG_DEAD notes because they are being substituted for. This
14172 saves searching in the most common cases.
14173
14174 Each note in the list is either ignored or placed on some insns, depending
14175 on the type of note. */
14176
14177 static void
14178 distribute_notes (rtx notes, rtx_insn *from_insn, rtx_insn *i3, rtx_insn *i2,
14179 rtx elim_i2, rtx elim_i1, rtx elim_i0)
14180 {
14181 rtx note, next_note;
14182 rtx tem_note;
14183 rtx_insn *tem_insn;
14184
14185 for (note = notes; note; note = next_note)
14186 {
14187 rtx_insn *place = 0, *place2 = 0;
14188
14189 next_note = XEXP (note, 1);
14190 switch (REG_NOTE_KIND (note))
14191 {
14192 case REG_BR_PROB:
14193 case REG_BR_PRED:
14194 /* Doesn't matter much where we put this, as long as it's somewhere.
14195 It is preferable to keep these notes on branches, which is most
14196 likely to be i3. */
14197 place = i3;
14198 break;
14199
14200 case REG_NON_LOCAL_GOTO:
14201 if (JUMP_P (i3))
14202 place = i3;
14203 else
14204 {
14205 gcc_assert (i2 && JUMP_P (i2));
14206 place = i2;
14207 }
14208 break;
14209
14210 case REG_EH_REGION:
14211 {
14212 /* The landing pad handling needs to be kept in sync with the
14213 prerequisite checking in try_combine. */
14214 int lp_nr = INTVAL (XEXP (note, 0));
14215 /* A REG_EH_REGION note transfering control can only ever come
14216 from i3. */
14217 if (lp_nr > 0)
14218 gcc_assert (from_insn == i3);
14219 /* We are making sure there is a single effective REG_EH_REGION
14220 note and it's valid to put it on i3. */
14221 if (!insn_could_throw_p (from_insn))
14222 /* Throw away stra notes on insns that can never throw. */
14223 ;
14224 else
14225 {
14226 if (CALL_P (i3))
14227 place = i3;
14228 else
14229 {
14230 gcc_assert (cfun->can_throw_non_call_exceptions);
14231 /* If i3 can still trap preserve the note, otherwise we've
14232 combined things such that we can now prove that the
14233 instructions can't trap. Drop the note in this case. */
14234 if (may_trap_p (i3))
14235 place = i3;
14236 }
14237 }
14238 break;
14239 }
14240
14241 case REG_ARGS_SIZE:
14242 /* ??? How to distribute between i3-i1. Assume i3 contains the
14243 entire adjustment. Assert i3 contains at least some adjust. */
14244 if (!noop_move_p (i3))
14245 {
14246 poly_int64 old_size, args_size = get_args_size (note);
14247 /* fixup_args_size_notes looks at REG_NORETURN note,
14248 so ensure the note is placed there first. */
14249 if (CALL_P (i3))
14250 {
14251 rtx *np;
14252 for (np = &next_note; *np; np = &XEXP (*np, 1))
14253 if (REG_NOTE_KIND (*np) == REG_NORETURN)
14254 {
14255 rtx n = *np;
14256 *np = XEXP (n, 1);
14257 XEXP (n, 1) = REG_NOTES (i3);
14258 REG_NOTES (i3) = n;
14259 break;
14260 }
14261 }
14262 old_size = fixup_args_size_notes (PREV_INSN (i3), i3, args_size);
14263 /* emit_call_1 adds for !ACCUMULATE_OUTGOING_ARGS
14264 REG_ARGS_SIZE note to all noreturn calls, allow that here. */
14265 gcc_assert (maybe_ne (old_size, args_size)
14266 || (CALL_P (i3)
14267 && !ACCUMULATE_OUTGOING_ARGS
14268 && find_reg_note (i3, REG_NORETURN, NULL_RTX)));
14269 }
14270 break;
14271
14272 case REG_NORETURN:
14273 case REG_SETJMP:
14274 case REG_TM:
14275 case REG_CALL_DECL:
14276 case REG_UNTYPED_CALL:
14277 case REG_CALL_NOCF_CHECK:
14278 /* These notes must remain with the call. It should not be
14279 possible for both I2 and I3 to be a call. */
14280 if (CALL_P (i3))
14281 place = i3;
14282 else
14283 {
14284 gcc_assert (i2 && CALL_P (i2));
14285 place = i2;
14286 }
14287 break;
14288
14289 case REG_UNUSED:
14290 /* Any clobbers for i3 may still exist, and so we must process
14291 REG_UNUSED notes from that insn.
14292
14293 Any clobbers from i2 or i1 can only exist if they were added by
14294 recog_for_combine. In that case, recog_for_combine created the
14295 necessary REG_UNUSED notes. Trying to keep any original
14296 REG_UNUSED notes from these insns can cause incorrect output
14297 if it is for the same register as the original i3 dest.
14298 In that case, we will notice that the register is set in i3,
14299 and then add a REG_UNUSED note for the destination of i3, which
14300 is wrong. However, it is possible to have REG_UNUSED notes from
14301 i2 or i1 for register which were both used and clobbered, so
14302 we keep notes from i2 or i1 if they will turn into REG_DEAD
14303 notes. */
14304
14305 /* If this register is set or clobbered between FROM_INSN and I3,
14306 we should not create a note for it. */
14307 if (reg_set_between_p (XEXP (note, 0), from_insn, i3))
14308 break;
14309
14310 /* If this register is set or clobbered in I3, put the note there
14311 unless there is one already. */
14312 if (reg_set_p (XEXP (note, 0), PATTERN (i3)))
14313 {
14314 if (from_insn != i3)
14315 break;
14316
14317 if (! (REG_P (XEXP (note, 0))
14318 ? find_regno_note (i3, REG_UNUSED, REGNO (XEXP (note, 0)))
14319 : find_reg_note (i3, REG_UNUSED, XEXP (note, 0))))
14320 place = i3;
14321 }
14322 /* Otherwise, if this register is used by I3, then this register
14323 now dies here, so we must put a REG_DEAD note here unless there
14324 is one already. */
14325 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3))
14326 && ! (REG_P (XEXP (note, 0))
14327 ? find_regno_note (i3, REG_DEAD,
14328 REGNO (XEXP (note, 0)))
14329 : find_reg_note (i3, REG_DEAD, XEXP (note, 0))))
14330 {
14331 PUT_REG_NOTE_KIND (note, REG_DEAD);
14332 place = i3;
14333 }
14334
14335 /* A SET or CLOBBER of the REG_UNUSED reg has been removed,
14336 but we can't tell which at this point. We must reset any
14337 expectations we had about the value that was previously
14338 stored in the reg. ??? Ideally, we'd adjust REG_N_SETS
14339 and, if appropriate, restore its previous value, but we
14340 don't have enough information for that at this point. */
14341 else
14342 {
14343 record_value_for_reg (XEXP (note, 0), NULL, NULL_RTX);
14344
14345 /* Otherwise, if this register is now referenced in i2
14346 then the register used to be modified in one of the
14347 original insns. If it was i3 (say, in an unused
14348 parallel), it's now completely gone, so the note can
14349 be discarded. But if it was modified in i2, i1 or i0
14350 and we still reference it in i2, then we're
14351 referencing the previous value, and since the
14352 register was modified and REG_UNUSED, we know that
14353 the previous value is now dead. So, if we only
14354 reference the register in i2, we change the note to
14355 REG_DEAD, to reflect the previous value. However, if
14356 we're also setting or clobbering the register as
14357 scratch, we know (because the register was not
14358 referenced in i3) that it's unused, just as it was
14359 unused before, and we place the note in i2. */
14360 if (from_insn != i3 && i2 && INSN_P (i2)
14361 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14362 {
14363 if (!reg_set_p (XEXP (note, 0), PATTERN (i2)))
14364 PUT_REG_NOTE_KIND (note, REG_DEAD);
14365 if (! (REG_P (XEXP (note, 0))
14366 ? find_regno_note (i2, REG_NOTE_KIND (note),
14367 REGNO (XEXP (note, 0)))
14368 : find_reg_note (i2, REG_NOTE_KIND (note),
14369 XEXP (note, 0))))
14370 place = i2;
14371 }
14372 }
14373
14374 break;
14375
14376 case REG_EQUAL:
14377 case REG_EQUIV:
14378 case REG_NOALIAS:
14379 /* These notes say something about results of an insn. We can
14380 only support them if they used to be on I3 in which case they
14381 remain on I3. Otherwise they are ignored.
14382
14383 If the note refers to an expression that is not a constant, we
14384 must also ignore the note since we cannot tell whether the
14385 equivalence is still true. It might be possible to do
14386 slightly better than this (we only have a problem if I2DEST
14387 or I1DEST is present in the expression), but it doesn't
14388 seem worth the trouble. */
14389
14390 if (from_insn == i3
14391 && (XEXP (note, 0) == 0 || CONSTANT_P (XEXP (note, 0))))
14392 place = i3;
14393 break;
14394
14395 case REG_INC:
14396 /* These notes say something about how a register is used. They must
14397 be present on any use of the register in I2 or I3. */
14398 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3)))
14399 place = i3;
14400
14401 if (i2 && reg_mentioned_p (XEXP (note, 0), PATTERN (i2)))
14402 {
14403 if (place)
14404 place2 = i2;
14405 else
14406 place = i2;
14407 }
14408 break;
14409
14410 case REG_LABEL_TARGET:
14411 case REG_LABEL_OPERAND:
14412 /* This can show up in several ways -- either directly in the
14413 pattern, or hidden off in the constant pool with (or without?)
14414 a REG_EQUAL note. */
14415 /* ??? Ignore the without-reg_equal-note problem for now. */
14416 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3))
14417 || ((tem_note = find_reg_note (i3, REG_EQUAL, NULL_RTX))
14418 && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF
14419 && label_ref_label (XEXP (tem_note, 0)) == XEXP (note, 0)))
14420 place = i3;
14421
14422 if (i2
14423 && (reg_mentioned_p (XEXP (note, 0), PATTERN (i2))
14424 || ((tem_note = find_reg_note (i2, REG_EQUAL, NULL_RTX))
14425 && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF
14426 && label_ref_label (XEXP (tem_note, 0)) == XEXP (note, 0))))
14427 {
14428 if (place)
14429 place2 = i2;
14430 else
14431 place = i2;
14432 }
14433
14434 /* For REG_LABEL_TARGET on a JUMP_P, we prefer to put the note
14435 as a JUMP_LABEL or decrement LABEL_NUSES if it's already
14436 there. */
14437 if (place && JUMP_P (place)
14438 && REG_NOTE_KIND (note) == REG_LABEL_TARGET
14439 && (JUMP_LABEL (place) == NULL
14440 || JUMP_LABEL (place) == XEXP (note, 0)))
14441 {
14442 rtx label = JUMP_LABEL (place);
14443
14444 if (!label)
14445 JUMP_LABEL (place) = XEXP (note, 0);
14446 else if (LABEL_P (label))
14447 LABEL_NUSES (label)--;
14448 }
14449
14450 if (place2 && JUMP_P (place2)
14451 && REG_NOTE_KIND (note) == REG_LABEL_TARGET
14452 && (JUMP_LABEL (place2) == NULL
14453 || JUMP_LABEL (place2) == XEXP (note, 0)))
14454 {
14455 rtx label = JUMP_LABEL (place2);
14456
14457 if (!label)
14458 JUMP_LABEL (place2) = XEXP (note, 0);
14459 else if (LABEL_P (label))
14460 LABEL_NUSES (label)--;
14461 place2 = 0;
14462 }
14463 break;
14464
14465 case REG_NONNEG:
14466 /* This note says something about the value of a register prior
14467 to the execution of an insn. It is too much trouble to see
14468 if the note is still correct in all situations. It is better
14469 to simply delete it. */
14470 break;
14471
14472 case REG_DEAD:
14473 /* If we replaced the right hand side of FROM_INSN with a
14474 REG_EQUAL note, the original use of the dying register
14475 will not have been combined into I3 and I2. In such cases,
14476 FROM_INSN is guaranteed to be the first of the combined
14477 instructions, so we simply need to search back before
14478 FROM_INSN for the previous use or set of this register,
14479 then alter the notes there appropriately.
14480
14481 If the register is used as an input in I3, it dies there.
14482 Similarly for I2, if it is nonzero and adjacent to I3.
14483
14484 If the register is not used as an input in either I3 or I2
14485 and it is not one of the registers we were supposed to eliminate,
14486 there are two possibilities. We might have a non-adjacent I2
14487 or we might have somehow eliminated an additional register
14488 from a computation. For example, we might have had A & B where
14489 we discover that B will always be zero. In this case we will
14490 eliminate the reference to A.
14491
14492 In both cases, we must search to see if we can find a previous
14493 use of A and put the death note there. */
14494
14495 if (from_insn
14496 && from_insn == i2mod
14497 && !reg_overlap_mentioned_p (XEXP (note, 0), i2mod_new_rhs))
14498 tem_insn = from_insn;
14499 else
14500 {
14501 if (from_insn
14502 && CALL_P (from_insn)
14503 && find_reg_fusage (from_insn, USE, XEXP (note, 0)))
14504 place = from_insn;
14505 else if (i2 && reg_set_p (XEXP (note, 0), PATTERN (i2)))
14506 {
14507 /* If the new I2 sets the same register that is marked
14508 dead in the note, we do not in general know where to
14509 put the note. One important case we _can_ handle is
14510 when the note comes from I3. */
14511 if (from_insn == i3)
14512 place = i3;
14513 else
14514 break;
14515 }
14516 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3)))
14517 place = i3;
14518 else if (i2 != 0 && next_nonnote_nondebug_insn (i2) == i3
14519 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14520 place = i2;
14521 else if ((rtx_equal_p (XEXP (note, 0), elim_i2)
14522 && !(i2mod
14523 && reg_overlap_mentioned_p (XEXP (note, 0),
14524 i2mod_old_rhs)))
14525 || rtx_equal_p (XEXP (note, 0), elim_i1)
14526 || rtx_equal_p (XEXP (note, 0), elim_i0))
14527 break;
14528 tem_insn = i3;
14529 }
14530
14531 if (place == 0)
14532 {
14533 basic_block bb = this_basic_block;
14534
14535 for (tem_insn = PREV_INSN (tem_insn); place == 0; tem_insn = PREV_INSN (tem_insn))
14536 {
14537 if (!NONDEBUG_INSN_P (tem_insn))
14538 {
14539 if (tem_insn == BB_HEAD (bb))
14540 break;
14541 continue;
14542 }
14543
14544 /* If the register is being set at TEM_INSN, see if that is all
14545 TEM_INSN is doing. If so, delete TEM_INSN. Otherwise, make this
14546 into a REG_UNUSED note instead. Don't delete sets to
14547 global register vars. */
14548 if ((REGNO (XEXP (note, 0)) >= FIRST_PSEUDO_REGISTER
14549 || !global_regs[REGNO (XEXP (note, 0))])
14550 && reg_set_p (XEXP (note, 0), PATTERN (tem_insn)))
14551 {
14552 rtx set = single_set (tem_insn);
14553 rtx inner_dest = 0;
14554
14555 if (set != 0)
14556 for (inner_dest = SET_DEST (set);
14557 (GET_CODE (inner_dest) == STRICT_LOW_PART
14558 || GET_CODE (inner_dest) == SUBREG
14559 || GET_CODE (inner_dest) == ZERO_EXTRACT);
14560 inner_dest = XEXP (inner_dest, 0))
14561 ;
14562
14563 /* Verify that it was the set, and not a clobber that
14564 modified the register.
14565
14566 If we cannot delete the setter due to side
14567 effects, mark the user with an UNUSED note instead
14568 of deleting it. */
14569
14570 if (set != 0 && ! side_effects_p (SET_SRC (set))
14571 && rtx_equal_p (XEXP (note, 0), inner_dest))
14572 {
14573 /* Move the notes and links of TEM_INSN elsewhere.
14574 This might delete other dead insns recursively.
14575 First set the pattern to something that won't use
14576 any register. */
14577 rtx old_notes = REG_NOTES (tem_insn);
14578
14579 PATTERN (tem_insn) = pc_rtx;
14580 REG_NOTES (tem_insn) = NULL;
14581
14582 distribute_notes (old_notes, tem_insn, tem_insn, NULL,
14583 NULL_RTX, NULL_RTX, NULL_RTX);
14584 distribute_links (LOG_LINKS (tem_insn));
14585
14586 unsigned int regno = REGNO (XEXP (note, 0));
14587 reg_stat_type *rsp = &reg_stat[regno];
14588 if (rsp->last_set == tem_insn)
14589 record_value_for_reg (XEXP (note, 0), NULL, NULL_RTX);
14590
14591 SET_INSN_DELETED (tem_insn);
14592 if (tem_insn == i2)
14593 i2 = NULL;
14594 }
14595 else
14596 {
14597 PUT_REG_NOTE_KIND (note, REG_UNUSED);
14598
14599 /* If there isn't already a REG_UNUSED note, put one
14600 here. Do not place a REG_DEAD note, even if
14601 the register is also used here; that would not
14602 match the algorithm used in lifetime analysis
14603 and can cause the consistency check in the
14604 scheduler to fail. */
14605 if (! find_regno_note (tem_insn, REG_UNUSED,
14606 REGNO (XEXP (note, 0))))
14607 place = tem_insn;
14608 break;
14609 }
14610 }
14611 else if (reg_referenced_p (XEXP (note, 0), PATTERN (tem_insn))
14612 || (CALL_P (tem_insn)
14613 && find_reg_fusage (tem_insn, USE, XEXP (note, 0))))
14614 {
14615 place = tem_insn;
14616
14617 /* If we are doing a 3->2 combination, and we have a
14618 register which formerly died in i3 and was not used
14619 by i2, which now no longer dies in i3 and is used in
14620 i2 but does not die in i2, and place is between i2
14621 and i3, then we may need to move a link from place to
14622 i2. */
14623 if (i2 && DF_INSN_LUID (place) > DF_INSN_LUID (i2)
14624 && from_insn
14625 && DF_INSN_LUID (from_insn) > DF_INSN_LUID (i2)
14626 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14627 {
14628 struct insn_link *links = LOG_LINKS (place);
14629 LOG_LINKS (place) = NULL;
14630 distribute_links (links);
14631 }
14632 break;
14633 }
14634
14635 if (tem_insn == BB_HEAD (bb))
14636 break;
14637 }
14638
14639 }
14640
14641 /* If the register is set or already dead at PLACE, we needn't do
14642 anything with this note if it is still a REG_DEAD note.
14643 We check here if it is set at all, not if is it totally replaced,
14644 which is what `dead_or_set_p' checks, so also check for it being
14645 set partially. */
14646
14647 if (place && REG_NOTE_KIND (note) == REG_DEAD)
14648 {
14649 unsigned int regno = REGNO (XEXP (note, 0));
14650 reg_stat_type *rsp = &reg_stat[regno];
14651
14652 if (dead_or_set_p (place, XEXP (note, 0))
14653 || reg_bitfield_target_p (XEXP (note, 0), PATTERN (place)))
14654 {
14655 /* Unless the register previously died in PLACE, clear
14656 last_death. [I no longer understand why this is
14657 being done.] */
14658 if (rsp->last_death != place)
14659 rsp->last_death = 0;
14660 place = 0;
14661 }
14662 else
14663 rsp->last_death = place;
14664
14665 /* If this is a death note for a hard reg that is occupying
14666 multiple registers, ensure that we are still using all
14667 parts of the object. If we find a piece of the object
14668 that is unused, we must arrange for an appropriate REG_DEAD
14669 note to be added for it. However, we can't just emit a USE
14670 and tag the note to it, since the register might actually
14671 be dead; so we recourse, and the recursive call then finds
14672 the previous insn that used this register. */
14673
14674 if (place && REG_NREGS (XEXP (note, 0)) > 1)
14675 {
14676 unsigned int endregno = END_REGNO (XEXP (note, 0));
14677 bool all_used = true;
14678 unsigned int i;
14679
14680 for (i = regno; i < endregno; i++)
14681 if ((! refers_to_regno_p (i, PATTERN (place))
14682 && ! find_regno_fusage (place, USE, i))
14683 || dead_or_set_regno_p (place, i))
14684 {
14685 all_used = false;
14686 break;
14687 }
14688
14689 if (! all_used)
14690 {
14691 /* Put only REG_DEAD notes for pieces that are
14692 not already dead or set. */
14693
14694 for (i = regno; i < endregno;
14695 i += hard_regno_nregs (i, reg_raw_mode[i]))
14696 {
14697 rtx piece = regno_reg_rtx[i];
14698 basic_block bb = this_basic_block;
14699
14700 if (! dead_or_set_p (place, piece)
14701 && ! reg_bitfield_target_p (piece,
14702 PATTERN (place)))
14703 {
14704 rtx new_note = alloc_reg_note (REG_DEAD, piece,
14705 NULL_RTX);
14706
14707 distribute_notes (new_note, place, place,
14708 NULL, NULL_RTX, NULL_RTX,
14709 NULL_RTX);
14710 }
14711 else if (! refers_to_regno_p (i, PATTERN (place))
14712 && ! find_regno_fusage (place, USE, i))
14713 for (tem_insn = PREV_INSN (place); ;
14714 tem_insn = PREV_INSN (tem_insn))
14715 {
14716 if (!NONDEBUG_INSN_P (tem_insn))
14717 {
14718 if (tem_insn == BB_HEAD (bb))
14719 break;
14720 continue;
14721 }
14722 if (dead_or_set_p (tem_insn, piece)
14723 || reg_bitfield_target_p (piece,
14724 PATTERN (tem_insn)))
14725 {
14726 add_reg_note (tem_insn, REG_UNUSED, piece);
14727 break;
14728 }
14729 }
14730 }
14731
14732 place = 0;
14733 }
14734 }
14735 }
14736 break;
14737
14738 default:
14739 /* Any other notes should not be present at this point in the
14740 compilation. */
14741 gcc_unreachable ();
14742 }
14743
14744 if (place)
14745 {
14746 XEXP (note, 1) = REG_NOTES (place);
14747 REG_NOTES (place) = note;
14748
14749 /* Set added_notes_insn to the earliest insn we added a note to. */
14750 if (added_notes_insn == 0
14751 || DF_INSN_LUID (added_notes_insn) > DF_INSN_LUID (place))
14752 added_notes_insn = place;
14753 }
14754
14755 if (place2)
14756 {
14757 add_shallow_copy_of_reg_note (place2, note);
14758
14759 /* Set added_notes_insn to the earliest insn we added a note to. */
14760 if (added_notes_insn == 0
14761 || DF_INSN_LUID (added_notes_insn) > DF_INSN_LUID (place2))
14762 added_notes_insn = place2;
14763 }
14764 }
14765 }
14766 \f
14767 /* Similarly to above, distribute the LOG_LINKS that used to be present on
14768 I3, I2, and I1 to new locations. This is also called to add a link
14769 pointing at I3 when I3's destination is changed. */
14770
14771 static void
14772 distribute_links (struct insn_link *links)
14773 {
14774 struct insn_link *link, *next_link;
14775
14776 for (link = links; link; link = next_link)
14777 {
14778 rtx_insn *place = 0;
14779 rtx_insn *insn;
14780 rtx set, reg;
14781
14782 next_link = link->next;
14783
14784 /* If the insn that this link points to is a NOTE, ignore it. */
14785 if (NOTE_P (link->insn))
14786 continue;
14787
14788 set = 0;
14789 rtx pat = PATTERN (link->insn);
14790 if (GET_CODE (pat) == SET)
14791 set = pat;
14792 else if (GET_CODE (pat) == PARALLEL)
14793 {
14794 int i;
14795 for (i = 0; i < XVECLEN (pat, 0); i++)
14796 {
14797 set = XVECEXP (pat, 0, i);
14798 if (GET_CODE (set) != SET)
14799 continue;
14800
14801 reg = SET_DEST (set);
14802 while (GET_CODE (reg) == ZERO_EXTRACT
14803 || GET_CODE (reg) == STRICT_LOW_PART
14804 || GET_CODE (reg) == SUBREG)
14805 reg = XEXP (reg, 0);
14806
14807 if (!REG_P (reg))
14808 continue;
14809
14810 if (REGNO (reg) == link->regno)
14811 break;
14812 }
14813 if (i == XVECLEN (pat, 0))
14814 continue;
14815 }
14816 else
14817 continue;
14818
14819 reg = SET_DEST (set);
14820
14821 while (GET_CODE (reg) == ZERO_EXTRACT
14822 || GET_CODE (reg) == STRICT_LOW_PART
14823 || GET_CODE (reg) == SUBREG)
14824 reg = XEXP (reg, 0);
14825
14826 if (reg == pc_rtx)
14827 continue;
14828
14829 /* A LOG_LINK is defined as being placed on the first insn that uses
14830 a register and points to the insn that sets the register. Start
14831 searching at the next insn after the target of the link and stop
14832 when we reach a set of the register or the end of the basic block.
14833
14834 Note that this correctly handles the link that used to point from
14835 I3 to I2. Also note that not much searching is typically done here
14836 since most links don't point very far away. */
14837
14838 for (insn = NEXT_INSN (link->insn);
14839 (insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
14840 || BB_HEAD (this_basic_block->next_bb) != insn));
14841 insn = NEXT_INSN (insn))
14842 if (DEBUG_INSN_P (insn))
14843 continue;
14844 else if (INSN_P (insn) && reg_overlap_mentioned_p (reg, PATTERN (insn)))
14845 {
14846 if (reg_referenced_p (reg, PATTERN (insn)))
14847 place = insn;
14848 break;
14849 }
14850 else if (CALL_P (insn)
14851 && find_reg_fusage (insn, USE, reg))
14852 {
14853 place = insn;
14854 break;
14855 }
14856 else if (INSN_P (insn) && reg_set_p (reg, insn))
14857 break;
14858
14859 /* If we found a place to put the link, place it there unless there
14860 is already a link to the same insn as LINK at that point. */
14861
14862 if (place)
14863 {
14864 struct insn_link *link2;
14865
14866 FOR_EACH_LOG_LINK (link2, place)
14867 if (link2->insn == link->insn && link2->regno == link->regno)
14868 break;
14869
14870 if (link2 == NULL)
14871 {
14872 link->next = LOG_LINKS (place);
14873 LOG_LINKS (place) = link;
14874
14875 /* Set added_links_insn to the earliest insn we added a
14876 link to. */
14877 if (added_links_insn == 0
14878 || DF_INSN_LUID (added_links_insn) > DF_INSN_LUID (place))
14879 added_links_insn = place;
14880 }
14881 }
14882 }
14883 }
14884 \f
14885 /* Check for any register or memory mentioned in EQUIV that is not
14886 mentioned in EXPR. This is used to restrict EQUIV to "specializations"
14887 of EXPR where some registers may have been replaced by constants. */
14888
14889 static bool
14890 unmentioned_reg_p (rtx equiv, rtx expr)
14891 {
14892 subrtx_iterator::array_type array;
14893 FOR_EACH_SUBRTX (iter, array, equiv, NONCONST)
14894 {
14895 const_rtx x = *iter;
14896 if ((REG_P (x) || MEM_P (x))
14897 && !reg_mentioned_p (x, expr))
14898 return true;
14899 }
14900 return false;
14901 }
14902 \f
14903 DEBUG_FUNCTION void
14904 dump_combine_stats (FILE *file)
14905 {
14906 fprintf
14907 (file,
14908 ";; Combiner statistics: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n\n",
14909 combine_attempts, combine_merges, combine_extras, combine_successes);
14910 }
14911
14912 void
14913 dump_combine_total_stats (FILE *file)
14914 {
14915 fprintf
14916 (file,
14917 "\n;; Combiner totals: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n",
14918 total_attempts, total_merges, total_extras, total_successes);
14919 }
14920 \f
14921 /* Make pseudo-to-pseudo copies after every hard-reg-to-pseudo-copy, because
14922 the reg-to-reg copy can usefully combine with later instructions, but we
14923 do not want to combine the hard reg into later instructions, for that
14924 restricts register allocation. */
14925 static void
14926 make_more_copies (void)
14927 {
14928 basic_block bb;
14929
14930 FOR_EACH_BB_FN (bb, cfun)
14931 {
14932 rtx_insn *insn;
14933
14934 FOR_BB_INSNS (bb, insn)
14935 {
14936 if (!NONDEBUG_INSN_P (insn))
14937 continue;
14938
14939 rtx set = single_set (insn);
14940 if (!set)
14941 continue;
14942
14943 rtx dest = SET_DEST (set);
14944 if (!(REG_P (dest) && !HARD_REGISTER_P (dest)))
14945 continue;
14946
14947 rtx src = SET_SRC (set);
14948 if (!(REG_P (src) && HARD_REGISTER_P (src)))
14949 continue;
14950 if (TEST_HARD_REG_BIT (fixed_reg_set, REGNO (src)))
14951 continue;
14952
14953 rtx new_reg = gen_reg_rtx (GET_MODE (dest));
14954 rtx_insn *new_insn = gen_move_insn (new_reg, src);
14955 SET_SRC (set) = new_reg;
14956 emit_insn_before (new_insn, insn);
14957 df_insn_rescan (insn);
14958 }
14959 }
14960 }
14961
14962 /* Try combining insns through substitution. */
14963 static unsigned int
14964 rest_of_handle_combine (void)
14965 {
14966 make_more_copies ();
14967
14968 df_set_flags (DF_LR_RUN_DCE + DF_DEFER_INSN_RESCAN);
14969 df_note_add_problem ();
14970 df_analyze ();
14971
14972 regstat_init_n_sets_and_refs ();
14973 reg_n_sets_max = max_reg_num ();
14974
14975 int rebuild_jump_labels_after_combine
14976 = combine_instructions (get_insns (), max_reg_num ());
14977
14978 /* Combining insns may have turned an indirect jump into a
14979 direct jump. Rebuild the JUMP_LABEL fields of jumping
14980 instructions. */
14981 if (rebuild_jump_labels_after_combine)
14982 {
14983 if (dom_info_available_p (CDI_DOMINATORS))
14984 free_dominance_info (CDI_DOMINATORS);
14985 timevar_push (TV_JUMP);
14986 rebuild_jump_labels (get_insns ());
14987 cleanup_cfg (0);
14988 timevar_pop (TV_JUMP);
14989 }
14990
14991 regstat_free_n_sets_and_refs ();
14992 return 0;
14993 }
14994
14995 namespace {
14996
14997 const pass_data pass_data_combine =
14998 {
14999 RTL_PASS, /* type */
15000 "combine", /* name */
15001 OPTGROUP_NONE, /* optinfo_flags */
15002 TV_COMBINE, /* tv_id */
15003 PROP_cfglayout, /* properties_required */
15004 0, /* properties_provided */
15005 0, /* properties_destroyed */
15006 0, /* todo_flags_start */
15007 TODO_df_finish, /* todo_flags_finish */
15008 };
15009
15010 class pass_combine : public rtl_opt_pass
15011 {
15012 public:
15013 pass_combine (gcc::context *ctxt)
15014 : rtl_opt_pass (pass_data_combine, ctxt)
15015 {}
15016
15017 /* opt_pass methods: */
15018 virtual bool gate (function *) { return (optimize > 0); }
15019 virtual unsigned int execute (function *)
15020 {
15021 return rest_of_handle_combine ();
15022 }
15023
15024 }; // class pass_combine
15025
15026 } // anon namespace
15027
15028 rtl_opt_pass *
15029 make_pass_combine (gcc::context *ctxt)
15030 {
15031 return new pass_combine (ctxt);
15032 }
This page took 0.746159 seconds and 5 git commands to generate.