]> gcc.gnu.org Git - gcc.git/blob - gcc/loop.c
188a4020c408a1cf938a27c55686af63f322c76e
[gcc.git] / gcc / loop.c
1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 1988, 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997,
3 1998, 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
21
22 /* This is the loop optimization pass of the compiler.
23 It finds invariant computations within loops and moves them
24 to the beginning of the loop. Then it identifies basic and
25 general induction variables.
26
27 Basic induction variables (BIVs) are a pseudo registers which are set within
28 a loop only by incrementing or decrementing its value. General induction
29 variables (GIVs) are pseudo registers with a value which is a linear function
30 of a basic induction variable. BIVs are recognized by `basic_induction_var';
31 GIVs by `general_induction_var'.
32
33 Once induction variables are identified, strength reduction is applied to the
34 general induction variables, and induction variable elimination is applied to
35 the basic induction variables.
36
37 It also finds cases where
38 a register is set within the loop by zero-extending a narrower value
39 and changes these to zero the entire register once before the loop
40 and merely copy the low part within the loop.
41
42 Most of the complexity is in heuristics to decide when it is worth
43 while to do these things. */
44
45 #include "config.h"
46 #include "system.h"
47 #include "coretypes.h"
48 #include "tm.h"
49 #include "rtl.h"
50 #include "tm_p.h"
51 #include "function.h"
52 #include "expr.h"
53 #include "hard-reg-set.h"
54 #include "basic-block.h"
55 #include "insn-config.h"
56 #include "regs.h"
57 #include "recog.h"
58 #include "flags.h"
59 #include "real.h"
60 #include "loop.h"
61 #include "cselib.h"
62 #include "except.h"
63 #include "toplev.h"
64 #include "predict.h"
65 #include "insn-flags.h"
66 #include "optabs.h"
67 #include "cfgloop.h"
68 #include "ggc.h"
69
70 /* Not really meaningful values, but at least something. */
71 #ifndef SIMULTANEOUS_PREFETCHES
72 #define SIMULTANEOUS_PREFETCHES 3
73 #endif
74 #ifndef PREFETCH_BLOCK
75 #define PREFETCH_BLOCK 32
76 #endif
77 #ifndef HAVE_prefetch
78 #define HAVE_prefetch 0
79 #define CODE_FOR_prefetch 0
80 #define gen_prefetch(a,b,c) (gcc_unreachable(), NULL_RTX)
81 #endif
82
83 /* Give up the prefetch optimizations once we exceed a given threshold.
84 It is unlikely that we would be able to optimize something in a loop
85 with so many detected prefetches. */
86 #define MAX_PREFETCHES 100
87 /* The number of prefetch blocks that are beneficial to fetch at once before
88 a loop with a known (and low) iteration count. */
89 #define PREFETCH_BLOCKS_BEFORE_LOOP_MAX 6
90 /* For very tiny loops it is not worthwhile to prefetch even before the loop,
91 since it is likely that the data are already in the cache. */
92 #define PREFETCH_BLOCKS_BEFORE_LOOP_MIN 2
93
94 /* Parameterize some prefetch heuristics so they can be turned on and off
95 easily for performance testing on new architectures. These can be
96 defined in target-dependent files. */
97
98 /* Prefetch is worthwhile only when loads/stores are dense. */
99 #ifndef PREFETCH_ONLY_DENSE_MEM
100 #define PREFETCH_ONLY_DENSE_MEM 1
101 #endif
102
103 /* Define what we mean by "dense" loads and stores; This value divided by 256
104 is the minimum percentage of memory references that worth prefetching. */
105 #ifndef PREFETCH_DENSE_MEM
106 #define PREFETCH_DENSE_MEM 220
107 #endif
108
109 /* Do not prefetch for a loop whose iteration count is known to be low. */
110 #ifndef PREFETCH_NO_LOW_LOOPCNT
111 #define PREFETCH_NO_LOW_LOOPCNT 1
112 #endif
113
114 /* Define what we mean by a "low" iteration count. */
115 #ifndef PREFETCH_LOW_LOOPCNT
116 #define PREFETCH_LOW_LOOPCNT 32
117 #endif
118
119 /* Do not prefetch for a loop that contains a function call; such a loop is
120 probably not an internal loop. */
121 #ifndef PREFETCH_NO_CALL
122 #define PREFETCH_NO_CALL 1
123 #endif
124
125 /* Do not prefetch accesses with an extreme stride. */
126 #ifndef PREFETCH_NO_EXTREME_STRIDE
127 #define PREFETCH_NO_EXTREME_STRIDE 1
128 #endif
129
130 /* Define what we mean by an "extreme" stride. */
131 #ifndef PREFETCH_EXTREME_STRIDE
132 #define PREFETCH_EXTREME_STRIDE 4096
133 #endif
134
135 /* Define a limit to how far apart indices can be and still be merged
136 into a single prefetch. */
137 #ifndef PREFETCH_EXTREME_DIFFERENCE
138 #define PREFETCH_EXTREME_DIFFERENCE 4096
139 #endif
140
141 /* Issue prefetch instructions before the loop to fetch data to be used
142 in the first few loop iterations. */
143 #ifndef PREFETCH_BEFORE_LOOP
144 #define PREFETCH_BEFORE_LOOP 1
145 #endif
146
147 /* Do not handle reversed order prefetches (negative stride). */
148 #ifndef PREFETCH_NO_REVERSE_ORDER
149 #define PREFETCH_NO_REVERSE_ORDER 1
150 #endif
151
152 /* Prefetch even if the GIV is in conditional code. */
153 #ifndef PREFETCH_CONDITIONAL
154 #define PREFETCH_CONDITIONAL 1
155 #endif
156
157 #define LOOP_REG_LIFETIME(LOOP, REGNO) \
158 ((REGNO_LAST_LUID (REGNO) - REGNO_FIRST_LUID (REGNO)))
159
160 #define LOOP_REG_GLOBAL_P(LOOP, REGNO) \
161 ((REGNO_LAST_LUID (REGNO) > INSN_LUID ((LOOP)->end) \
162 || REGNO_FIRST_LUID (REGNO) < INSN_LUID ((LOOP)->start)))
163
164 #define LOOP_REGNO_NREGS(REGNO, SET_DEST) \
165 ((REGNO) < FIRST_PSEUDO_REGISTER \
166 ? (int) hard_regno_nregs[(REGNO)][GET_MODE (SET_DEST)] : 1)
167
168
169 /* Vector mapping INSN_UIDs to luids.
170 The luids are like uids but increase monotonically always.
171 We use them to see whether a jump comes from outside a given loop. */
172
173 int *uid_luid;
174
175 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
176 number the insn is contained in. */
177
178 struct loop **uid_loop;
179
180 /* 1 + largest uid of any insn. */
181
182 int max_uid_for_loop;
183
184 /* Number of loops detected in current function. Used as index to the
185 next few tables. */
186
187 static int max_loop_num;
188
189 /* Bound on pseudo register number before loop optimization.
190 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
191 unsigned int max_reg_before_loop;
192
193 /* The value to pass to the next call of reg_scan_update. */
194 static int loop_max_reg;
195 \f
196 /* During the analysis of a loop, a chain of `struct movable's
197 is made to record all the movable insns found.
198 Then the entire chain can be scanned to decide which to move. */
199
200 struct movable
201 {
202 rtx insn; /* A movable insn */
203 rtx set_src; /* The expression this reg is set from. */
204 rtx set_dest; /* The destination of this SET. */
205 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
206 of any registers used within the LIBCALL. */
207 int consec; /* Number of consecutive following insns
208 that must be moved with this one. */
209 unsigned int regno; /* The register it sets */
210 short lifetime; /* lifetime of that register;
211 may be adjusted when matching movables
212 that load the same value are found. */
213 short savings; /* Number of insns we can move for this reg,
214 including other movables that force this
215 or match this one. */
216 ENUM_BITFIELD(machine_mode) savemode : 8; /* Nonzero means it is a mode for
217 a low part that we should avoid changing when
218 clearing the rest of the reg. */
219 unsigned int cond : 1; /* 1 if only conditionally movable */
220 unsigned int force : 1; /* 1 means MUST move this insn */
221 unsigned int global : 1; /* 1 means reg is live outside this loop */
222 /* If PARTIAL is 1, GLOBAL means something different:
223 that the reg is live outside the range from where it is set
224 to the following label. */
225 unsigned int done : 1; /* 1 inhibits further processing of this */
226
227 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
228 In particular, moving it does not make it
229 invariant. */
230 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
231 load SRC, rather than copying INSN. */
232 unsigned int move_insn_first:1;/* Same as above, if this is necessary for the
233 first insn of a consecutive sets group. */
234 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
235 unsigned int insert_temp : 1; /* 1 means we copy to a new pseudo and replace
236 the original insn with a copy from that
237 pseudo, rather than deleting it. */
238 struct movable *match; /* First entry for same value */
239 struct movable *forces; /* An insn that must be moved if this is */
240 struct movable *next;
241 };
242
243
244 FILE *loop_dump_stream;
245
246 /* Forward declarations. */
247
248 static void invalidate_loops_containing_label (rtx);
249 static void find_and_verify_loops (rtx, struct loops *);
250 static void mark_loop_jump (rtx, struct loop *);
251 static void prescan_loop (struct loop *);
252 static int reg_in_basic_block_p (rtx, rtx);
253 static int consec_sets_invariant_p (const struct loop *, rtx, int, rtx);
254 static int labels_in_range_p (rtx, int);
255 static void count_one_set (struct loop_regs *, rtx, rtx, rtx *);
256 static void note_addr_stored (rtx, rtx, void *);
257 static void note_set_pseudo_multiple_uses (rtx, rtx, void *);
258 static int loop_reg_used_before_p (const struct loop *, rtx, rtx);
259 static rtx find_regs_nested (rtx, rtx);
260 static void scan_loop (struct loop*, int);
261 #if 0
262 static void replace_call_address (rtx, rtx, rtx);
263 #endif
264 static rtx skip_consec_insns (rtx, int);
265 static int libcall_benefit (rtx);
266 static rtx libcall_other_reg (rtx, rtx);
267 static void record_excess_regs (rtx, rtx, rtx *);
268 static void ignore_some_movables (struct loop_movables *);
269 static void force_movables (struct loop_movables *);
270 static void combine_movables (struct loop_movables *, struct loop_regs *);
271 static int num_unmoved_movables (const struct loop *);
272 static int regs_match_p (rtx, rtx, struct loop_movables *);
273 static int rtx_equal_for_loop_p (rtx, rtx, struct loop_movables *,
274 struct loop_regs *);
275 static void add_label_notes (rtx, rtx);
276 static void move_movables (struct loop *loop, struct loop_movables *, int,
277 int);
278 static void loop_movables_add (struct loop_movables *, struct movable *);
279 static void loop_movables_free (struct loop_movables *);
280 static int count_nonfixed_reads (const struct loop *, rtx);
281 static void loop_bivs_find (struct loop *);
282 static void loop_bivs_init_find (struct loop *);
283 static void loop_bivs_check (struct loop *);
284 static void loop_givs_find (struct loop *);
285 static void loop_givs_check (struct loop *);
286 static int loop_biv_eliminable_p (struct loop *, struct iv_class *, int, int);
287 static int loop_giv_reduce_benefit (struct loop *, struct iv_class *,
288 struct induction *, rtx);
289 static void loop_givs_dead_check (struct loop *, struct iv_class *);
290 static void loop_givs_reduce (struct loop *, struct iv_class *);
291 static void loop_givs_rescan (struct loop *, struct iv_class *, rtx *);
292 static void loop_ivs_free (struct loop *);
293 static void strength_reduce (struct loop *, int);
294 static void find_single_use_in_loop (struct loop_regs *, rtx, rtx);
295 static int valid_initial_value_p (rtx, rtx, int, rtx);
296 static void find_mem_givs (const struct loop *, rtx, rtx, int, int);
297 static void record_biv (struct loop *, struct induction *, rtx, rtx, rtx,
298 rtx, rtx *, int, int);
299 static void check_final_value (const struct loop *, struct induction *);
300 static void loop_ivs_dump (const struct loop *, FILE *, int);
301 static void loop_iv_class_dump (const struct iv_class *, FILE *, int);
302 static void loop_biv_dump (const struct induction *, FILE *, int);
303 static void loop_giv_dump (const struct induction *, FILE *, int);
304 static void record_giv (const struct loop *, struct induction *, rtx, rtx,
305 rtx, rtx, rtx, rtx, int, enum g_types, int, int,
306 rtx *);
307 static void update_giv_derive (const struct loop *, rtx);
308 static void check_ext_dependent_givs (const struct loop *, struct iv_class *);
309 static int basic_induction_var (const struct loop *, rtx, enum machine_mode,
310 rtx, rtx, rtx *, rtx *, rtx **);
311 static rtx simplify_giv_expr (const struct loop *, rtx, rtx *, int *);
312 static int general_induction_var (const struct loop *loop, rtx, rtx *, rtx *,
313 rtx *, rtx *, int, int *, enum machine_mode);
314 static int consec_sets_giv (const struct loop *, int, rtx, rtx, rtx, rtx *,
315 rtx *, rtx *, rtx *);
316 static int check_dbra_loop (struct loop *, int);
317 static rtx express_from_1 (rtx, rtx, rtx);
318 static rtx combine_givs_p (struct induction *, struct induction *);
319 static int cmp_combine_givs_stats (const void *, const void *);
320 static void combine_givs (struct loop_regs *, struct iv_class *);
321 static int product_cheap_p (rtx, rtx);
322 static int maybe_eliminate_biv (const struct loop *, struct iv_class *, int,
323 int, int);
324 static int maybe_eliminate_biv_1 (const struct loop *, rtx, rtx,
325 struct iv_class *, int, basic_block, rtx);
326 static int last_use_this_basic_block (rtx, rtx);
327 static void record_initial (rtx, rtx, void *);
328 static void update_reg_last_use (rtx, rtx);
329 static rtx next_insn_in_loop (const struct loop *, rtx);
330 static void loop_regs_scan (const struct loop *, int);
331 static int count_insns_in_loop (const struct loop *);
332 static int find_mem_in_note_1 (rtx *, void *);
333 static rtx find_mem_in_note (rtx);
334 static void load_mems (const struct loop *);
335 static int insert_loop_mem (rtx *, void *);
336 static int replace_loop_mem (rtx *, void *);
337 static void replace_loop_mems (rtx, rtx, rtx, int);
338 static int replace_loop_reg (rtx *, void *);
339 static void replace_loop_regs (rtx insn, rtx, rtx);
340 static void note_reg_stored (rtx, rtx, void *);
341 static void try_copy_prop (const struct loop *, rtx, unsigned int);
342 static void try_swap_copy_prop (const struct loop *, rtx, unsigned int);
343 static rtx check_insn_for_givs (struct loop *, rtx, int, int);
344 static rtx check_insn_for_bivs (struct loop *, rtx, int, int);
345 static rtx gen_add_mult (rtx, rtx, rtx, rtx);
346 static void loop_regs_update (const struct loop *, rtx);
347 static int iv_add_mult_cost (rtx, rtx, rtx, rtx);
348
349 static rtx loop_insn_emit_after (const struct loop *, basic_block, rtx, rtx);
350 static rtx loop_call_insn_emit_before (const struct loop *, basic_block,
351 rtx, rtx);
352 static rtx loop_call_insn_hoist (const struct loop *, rtx);
353 static rtx loop_insn_sink_or_swim (const struct loop *, rtx);
354
355 static void loop_dump_aux (const struct loop *, FILE *, int);
356 static void loop_delete_insns (rtx, rtx);
357 static HOST_WIDE_INT remove_constant_addition (rtx *);
358 static rtx gen_load_of_final_value (rtx, rtx);
359 void debug_ivs (const struct loop *);
360 void debug_iv_class (const struct iv_class *);
361 void debug_biv (const struct induction *);
362 void debug_giv (const struct induction *);
363 void debug_loop (const struct loop *);
364 void debug_loops (const struct loops *);
365
366 typedef struct loop_replace_args
367 {
368 rtx match;
369 rtx replacement;
370 rtx insn;
371 } loop_replace_args;
372
373 /* Nonzero iff INSN is between START and END, inclusive. */
374 #define INSN_IN_RANGE_P(INSN, START, END) \
375 (INSN_UID (INSN) < max_uid_for_loop \
376 && INSN_LUID (INSN) >= INSN_LUID (START) \
377 && INSN_LUID (INSN) <= INSN_LUID (END))
378
379 /* Indirect_jump_in_function is computed once per function. */
380 static int indirect_jump_in_function;
381 static int indirect_jump_in_function_p (rtx);
382
383 static int compute_luids (rtx, rtx, int);
384
385 static int biv_elimination_giv_has_0_offset (struct induction *,
386 struct induction *, rtx);
387 \f
388 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
389 copy the value of the strength reduced giv to its original register. */
390 static int copy_cost;
391
392 /* Cost of using a register, to normalize the benefits of a giv. */
393 static int reg_address_cost;
394
395 void
396 init_loop (void)
397 {
398 rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
399
400 reg_address_cost = address_cost (reg, SImode);
401
402 copy_cost = COSTS_N_INSNS (1);
403 }
404 \f
405 /* Compute the mapping from uids to luids.
406 LUIDs are numbers assigned to insns, like uids,
407 except that luids increase monotonically through the code.
408 Start at insn START and stop just before END. Assign LUIDs
409 starting with PREV_LUID + 1. Return the last assigned LUID + 1. */
410 static int
411 compute_luids (rtx start, rtx end, int prev_luid)
412 {
413 int i;
414 rtx insn;
415
416 for (insn = start, i = prev_luid; insn != end; insn = NEXT_INSN (insn))
417 {
418 if (INSN_UID (insn) >= max_uid_for_loop)
419 continue;
420 /* Don't assign luids to line-number NOTEs, so that the distance in
421 luids between two insns is not affected by -g. */
422 if (!NOTE_P (insn)
423 || NOTE_LINE_NUMBER (insn) <= 0)
424 uid_luid[INSN_UID (insn)] = ++i;
425 else
426 /* Give a line number note the same luid as preceding insn. */
427 uid_luid[INSN_UID (insn)] = i;
428 }
429 return i + 1;
430 }
431 \f
432 /* Entry point of this file. Perform loop optimization
433 on the current function. F is the first insn of the function
434 and DUMPFILE is a stream for output of a trace of actions taken
435 (or 0 if none should be output). */
436
437 void
438 loop_optimize (rtx f, FILE *dumpfile, int flags)
439 {
440 rtx insn;
441 int i;
442 struct loops loops_data;
443 struct loops *loops = &loops_data;
444 struct loop_info *loops_info;
445
446 loop_dump_stream = dumpfile;
447
448 init_recog_no_volatile ();
449
450 max_reg_before_loop = max_reg_num ();
451 loop_max_reg = max_reg_before_loop;
452
453 regs_may_share = 0;
454
455 /* Count the number of loops. */
456
457 max_loop_num = 0;
458 for (insn = f; insn; insn = NEXT_INSN (insn))
459 {
460 if (NOTE_P (insn)
461 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
462 max_loop_num++;
463 }
464
465 /* Don't waste time if no loops. */
466 if (max_loop_num == 0)
467 return;
468
469 loops->num = max_loop_num;
470
471 /* Get size to use for tables indexed by uids.
472 Leave some space for labels allocated by find_and_verify_loops. */
473 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
474
475 uid_luid = xcalloc (max_uid_for_loop, sizeof (int));
476 uid_loop = xcalloc (max_uid_for_loop, sizeof (struct loop *));
477
478 /* Allocate storage for array of loops. */
479 loops->array = xcalloc (loops->num, sizeof (struct loop));
480
481 /* Find and process each loop.
482 First, find them, and record them in order of their beginnings. */
483 find_and_verify_loops (f, loops);
484
485 /* Allocate and initialize auxiliary loop information. */
486 loops_info = xcalloc (loops->num, sizeof (struct loop_info));
487 for (i = 0; i < (int) loops->num; i++)
488 loops->array[i].aux = loops_info + i;
489
490 /* Now find all register lifetimes. This must be done after
491 find_and_verify_loops, because it might reorder the insns in the
492 function. */
493 reg_scan (f, max_reg_before_loop, 1);
494
495 /* This must occur after reg_scan so that registers created by gcse
496 will have entries in the register tables.
497
498 We could have added a call to reg_scan after gcse_main in toplev.c,
499 but moving this call to init_alias_analysis is more efficient. */
500 init_alias_analysis ();
501
502 /* See if we went too far. Note that get_max_uid already returns
503 one more that the maximum uid of all insn. */
504 gcc_assert (get_max_uid () <= max_uid_for_loop);
505
506 /* Now reset it to the actual size we need. See above. */
507 max_uid_for_loop = get_max_uid ();
508
509 /* find_and_verify_loops has already called compute_luids, but it
510 might have rearranged code afterwards, so we need to recompute
511 the luids now. */
512 compute_luids (f, NULL_RTX, 0);
513
514 /* Don't leave gaps in uid_luid for insns that have been
515 deleted. It is possible that the first or last insn
516 using some register has been deleted by cross-jumping.
517 Make sure that uid_luid for that former insn's uid
518 points to the general area where that insn used to be. */
519 for (i = 0; i < max_uid_for_loop; i++)
520 {
521 uid_luid[0] = uid_luid[i];
522 if (uid_luid[0] != 0)
523 break;
524 }
525 for (i = 0; i < max_uid_for_loop; i++)
526 if (uid_luid[i] == 0)
527 uid_luid[i] = uid_luid[i - 1];
528
529 /* Determine if the function has indirect jump. On some systems
530 this prevents low overhead loop instructions from being used. */
531 indirect_jump_in_function = indirect_jump_in_function_p (f);
532
533 /* Now scan the loops, last ones first, since this means inner ones are done
534 before outer ones. */
535 for (i = max_loop_num - 1; i >= 0; i--)
536 {
537 struct loop *loop = &loops->array[i];
538
539 if (! loop->invalid && loop->end)
540 {
541 scan_loop (loop, flags);
542 ggc_collect ();
543 }
544 }
545
546 end_alias_analysis ();
547
548 /* Clean up. */
549 for (i = 0; i < (int) loops->num; i++)
550 free (loops_info[i].mems);
551
552 free (uid_luid);
553 free (uid_loop);
554 free (loops_info);
555 free (loops->array);
556 }
557 \f
558 /* Returns the next insn, in execution order, after INSN. START and
559 END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop,
560 respectively. LOOP->TOP, if non-NULL, is the top of the loop in the
561 insn-stream; it is used with loops that are entered near the
562 bottom. */
563
564 static rtx
565 next_insn_in_loop (const struct loop *loop, rtx insn)
566 {
567 insn = NEXT_INSN (insn);
568
569 if (insn == loop->end)
570 {
571 if (loop->top)
572 /* Go to the top of the loop, and continue there. */
573 insn = loop->top;
574 else
575 /* We're done. */
576 insn = NULL_RTX;
577 }
578
579 if (insn == loop->scan_start)
580 /* We're done. */
581 insn = NULL_RTX;
582
583 return insn;
584 }
585
586 /* Find any register references hidden inside X and add them to
587 the dependency list DEPS. This is used to look inside CLOBBER (MEM
588 when checking whether a PARALLEL can be pulled out of a loop. */
589
590 static rtx
591 find_regs_nested (rtx deps, rtx x)
592 {
593 enum rtx_code code = GET_CODE (x);
594 if (code == REG)
595 deps = gen_rtx_EXPR_LIST (VOIDmode, x, deps);
596 else
597 {
598 const char *fmt = GET_RTX_FORMAT (code);
599 int i, j;
600 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
601 {
602 if (fmt[i] == 'e')
603 deps = find_regs_nested (deps, XEXP (x, i));
604 else if (fmt[i] == 'E')
605 for (j = 0; j < XVECLEN (x, i); j++)
606 deps = find_regs_nested (deps, XVECEXP (x, i, j));
607 }
608 }
609 return deps;
610 }
611
612 /* Optimize one loop described by LOOP. */
613
614 /* ??? Could also move memory writes out of loops if the destination address
615 is invariant, the source is invariant, the memory write is not volatile,
616 and if we can prove that no read inside the loop can read this address
617 before the write occurs. If there is a read of this address after the
618 write, then we can also mark the memory read as invariant. */
619
620 static void
621 scan_loop (struct loop *loop, int flags)
622 {
623 struct loop_info *loop_info = LOOP_INFO (loop);
624 struct loop_regs *regs = LOOP_REGS (loop);
625 int i;
626 rtx loop_start = loop->start;
627 rtx loop_end = loop->end;
628 rtx p;
629 /* 1 if we are scanning insns that could be executed zero times. */
630 int maybe_never = 0;
631 /* 1 if we are scanning insns that might never be executed
632 due to a subroutine call which might exit before they are reached. */
633 int call_passed = 0;
634 /* Number of insns in the loop. */
635 int insn_count;
636 int tem;
637 rtx temp, update_start, update_end;
638 /* The SET from an insn, if it is the only SET in the insn. */
639 rtx set, set1;
640 /* Chain describing insns movable in current loop. */
641 struct loop_movables *movables = LOOP_MOVABLES (loop);
642 /* Ratio of extra register life span we can justify
643 for saving an instruction. More if loop doesn't call subroutines
644 since in that case saving an insn makes more difference
645 and more registers are available. */
646 int threshold;
647 int in_libcall;
648
649 loop->top = 0;
650
651 movables->head = 0;
652 movables->last = 0;
653
654 /* Determine whether this loop starts with a jump down to a test at
655 the end. This will occur for a small number of loops with a test
656 that is too complex to duplicate in front of the loop.
657
658 We search for the first insn or label in the loop, skipping NOTEs.
659 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
660 (because we might have a loop executed only once that contains a
661 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
662 (in case we have a degenerate loop).
663
664 Note that if we mistakenly think that a loop is entered at the top
665 when, in fact, it is entered at the exit test, the only effect will be
666 slightly poorer optimization. Making the opposite error can generate
667 incorrect code. Since very few loops now start with a jump to the
668 exit test, the code here to detect that case is very conservative. */
669
670 for (p = NEXT_INSN (loop_start);
671 p != loop_end
672 && !LABEL_P (p) && ! INSN_P (p)
673 && (!NOTE_P (p)
674 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
675 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
676 p = NEXT_INSN (p))
677 ;
678
679 loop->scan_start = p;
680
681 /* If loop end is the end of the current function, then emit a
682 NOTE_INSN_DELETED after loop_end and set loop->sink to the dummy
683 note insn. This is the position we use when sinking insns out of
684 the loop. */
685 if (NEXT_INSN (loop->end) != 0)
686 loop->sink = NEXT_INSN (loop->end);
687 else
688 loop->sink = emit_note_after (NOTE_INSN_DELETED, loop->end);
689
690 /* Set up variables describing this loop. */
691 prescan_loop (loop);
692 threshold = (loop_info->has_call ? 1 : 2) * (1 + n_non_fixed_regs);
693
694 /* If loop has a jump before the first label,
695 the true entry is the target of that jump.
696 Start scan from there.
697 But record in LOOP->TOP the place where the end-test jumps
698 back to so we can scan that after the end of the loop. */
699 if (JUMP_P (p)
700 /* Loop entry must be unconditional jump (and not a RETURN) */
701 && any_uncondjump_p (p)
702 && JUMP_LABEL (p) != 0
703 /* Check to see whether the jump actually
704 jumps out of the loop (meaning it's no loop).
705 This case can happen for things like
706 do {..} while (0). If this label was generated previously
707 by loop, we can't tell anything about it and have to reject
708 the loop. */
709 && INSN_IN_RANGE_P (JUMP_LABEL (p), loop_start, loop_end))
710 {
711 loop->top = next_label (loop->scan_start);
712 loop->scan_start = JUMP_LABEL (p);
713 }
714
715 /* If LOOP->SCAN_START was an insn created by loop, we don't know its luid
716 as required by loop_reg_used_before_p. So skip such loops. (This
717 test may never be true, but it's best to play it safe.)
718
719 Also, skip loops where we do not start scanning at a label. This
720 test also rejects loops starting with a JUMP_INSN that failed the
721 test above. */
722
723 if (INSN_UID (loop->scan_start) >= max_uid_for_loop
724 || !LABEL_P (loop->scan_start))
725 {
726 if (loop_dump_stream)
727 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
728 INSN_UID (loop_start), INSN_UID (loop_end));
729 return;
730 }
731
732 /* Allocate extra space for REGs that might be created by load_mems.
733 We allocate a little extra slop as well, in the hopes that we
734 won't have to reallocate the regs array. */
735 loop_regs_scan (loop, loop_info->mems_idx + 16);
736 insn_count = count_insns_in_loop (loop);
737
738 if (loop_dump_stream)
739 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
740 INSN_UID (loop_start), INSN_UID (loop_end), insn_count);
741
742 /* Scan through the loop finding insns that are safe to move.
743 Set REGS->ARRAY[I].SET_IN_LOOP negative for the reg I being set, so that
744 this reg will be considered invariant for subsequent insns.
745 We consider whether subsequent insns use the reg
746 in deciding whether it is worth actually moving.
747
748 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
749 and therefore it is possible that the insns we are scanning
750 would never be executed. At such times, we must make sure
751 that it is safe to execute the insn once instead of zero times.
752 When MAYBE_NEVER is 0, all insns will be executed at least once
753 so that is not a problem. */
754
755 for (in_libcall = 0, p = next_insn_in_loop (loop, loop->scan_start);
756 p != NULL_RTX;
757 p = next_insn_in_loop (loop, p))
758 {
759 if (in_libcall && INSN_P (p) && find_reg_note (p, REG_RETVAL, NULL_RTX))
760 in_libcall--;
761 if (NONJUMP_INSN_P (p))
762 {
763 temp = find_reg_note (p, REG_LIBCALL, NULL_RTX);
764 if (temp)
765 in_libcall++;
766 if (! in_libcall
767 && (set = single_set (p))
768 && REG_P (SET_DEST (set))
769 #ifdef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
770 && SET_DEST (set) != pic_offset_table_rtx
771 #endif
772 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
773 {
774 int tem1 = 0;
775 int tem2 = 0;
776 int move_insn = 0;
777 int insert_temp = 0;
778 rtx src = SET_SRC (set);
779 rtx dependencies = 0;
780
781 /* Figure out what to use as a source of this insn. If a
782 REG_EQUIV note is given or if a REG_EQUAL note with a
783 constant operand is specified, use it as the source and
784 mark that we should move this insn by calling
785 emit_move_insn rather that duplicating the insn.
786
787 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL
788 note is present. */
789 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
790 if (temp)
791 src = XEXP (temp, 0), move_insn = 1;
792 else
793 {
794 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
795 if (temp && CONSTANT_P (XEXP (temp, 0)))
796 src = XEXP (temp, 0), move_insn = 1;
797 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
798 {
799 src = XEXP (temp, 0);
800 /* A libcall block can use regs that don't appear in
801 the equivalent expression. To move the libcall,
802 we must move those regs too. */
803 dependencies = libcall_other_reg (p, src);
804 }
805 }
806
807 /* For parallels, add any possible uses to the dependencies, as
808 we can't move the insn without resolving them first.
809 MEMs inside CLOBBERs may also reference registers; these
810 count as implicit uses. */
811 if (GET_CODE (PATTERN (p)) == PARALLEL)
812 {
813 for (i = 0; i < XVECLEN (PATTERN (p), 0); i++)
814 {
815 rtx x = XVECEXP (PATTERN (p), 0, i);
816 if (GET_CODE (x) == USE)
817 dependencies
818 = gen_rtx_EXPR_LIST (VOIDmode, XEXP (x, 0),
819 dependencies);
820 else if (GET_CODE (x) == CLOBBER
821 && MEM_P (XEXP (x, 0)))
822 dependencies = find_regs_nested (dependencies,
823 XEXP (XEXP (x, 0), 0));
824 }
825 }
826
827 if (/* The register is used in basic blocks other
828 than the one where it is set (meaning that
829 something after this point in the loop might
830 depend on its value before the set). */
831 ! reg_in_basic_block_p (p, SET_DEST (set))
832 /* And the set is not guaranteed to be executed once
833 the loop starts, or the value before the set is
834 needed before the set occurs...
835
836 ??? Note we have quadratic behavior here, mitigated
837 by the fact that the previous test will often fail for
838 large loops. Rather than re-scanning the entire loop
839 each time for register usage, we should build tables
840 of the register usage and use them here instead. */
841 && (maybe_never
842 || loop_reg_used_before_p (loop, set, p)))
843 /* It is unsafe to move the set. However, it may be OK to
844 move the source into a new pseudo, and substitute a
845 reg-to-reg copy for the original insn.
846
847 This code used to consider it OK to move a set of a variable
848 which was not created by the user and not used in an exit
849 test.
850 That behavior is incorrect and was removed. */
851 insert_temp = 1;
852
853 /* Don't try to optimize a MODE_CC set with a constant
854 source. It probably will be combined with a conditional
855 jump. */
856 if (GET_MODE_CLASS (GET_MODE (SET_DEST (set))) == MODE_CC
857 && CONSTANT_P (src))
858 ;
859 /* Don't try to optimize a register that was made
860 by loop-optimization for an inner loop.
861 We don't know its life-span, so we can't compute
862 the benefit. */
863 else if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
864 ;
865 /* Don't move the source and add a reg-to-reg copy:
866 - with -Os (this certainly increases size),
867 - if the mode doesn't support copy operations (obviously),
868 - if the source is already a reg (the motion will gain nothing),
869 - if the source is a legitimate constant (likewise). */
870 else if (insert_temp
871 && (optimize_size
872 || ! can_copy_p (GET_MODE (SET_SRC (set)))
873 || REG_P (SET_SRC (set))
874 || (CONSTANT_P (SET_SRC (set))
875 && LEGITIMATE_CONSTANT_P (SET_SRC (set)))))
876 ;
877 else if ((tem = loop_invariant_p (loop, src))
878 && (dependencies == 0
879 || (tem2
880 = loop_invariant_p (loop, dependencies)) != 0)
881 && (regs->array[REGNO (SET_DEST (set))].set_in_loop == 1
882 || (tem1
883 = consec_sets_invariant_p
884 (loop, SET_DEST (set),
885 regs->array[REGNO (SET_DEST (set))].set_in_loop,
886 p)))
887 /* If the insn can cause a trap (such as divide by zero),
888 can't move it unless it's guaranteed to be executed
889 once loop is entered. Even a function call might
890 prevent the trap insn from being reached
891 (since it might exit!) */
892 && ! ((maybe_never || call_passed)
893 && may_trap_p (src)))
894 {
895 struct movable *m;
896 int regno = REGNO (SET_DEST (set));
897
898 /* A potential lossage is where we have a case where two insns
899 can be combined as long as they are both in the loop, but
900 we move one of them outside the loop. For large loops,
901 this can lose. The most common case of this is the address
902 of a function being called.
903
904 Therefore, if this register is marked as being used
905 exactly once if we are in a loop with calls
906 (a "large loop"), see if we can replace the usage of
907 this register with the source of this SET. If we can,
908 delete this insn.
909
910 Don't do this if P has a REG_RETVAL note or if we have
911 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
912
913 if (loop_info->has_call
914 && regs->array[regno].single_usage != 0
915 && regs->array[regno].single_usage != const0_rtx
916 && REGNO_FIRST_UID (regno) == INSN_UID (p)
917 && (REGNO_LAST_UID (regno)
918 == INSN_UID (regs->array[regno].single_usage))
919 && regs->array[regno].set_in_loop == 1
920 && GET_CODE (SET_SRC (set)) != ASM_OPERANDS
921 && ! side_effects_p (SET_SRC (set))
922 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
923 && (! SMALL_REGISTER_CLASSES
924 || (! (REG_P (SET_SRC (set))
925 && (REGNO (SET_SRC (set))
926 < FIRST_PSEUDO_REGISTER))))
927 && regno >= FIRST_PSEUDO_REGISTER
928 /* This test is not redundant; SET_SRC (set) might be
929 a call-clobbered register and the life of REGNO
930 might span a call. */
931 && ! modified_between_p (SET_SRC (set), p,
932 regs->array[regno].single_usage)
933 && no_labels_between_p (p,
934 regs->array[regno].single_usage)
935 && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
936 regs->array[regno].single_usage))
937 {
938 /* Replace any usage in a REG_EQUAL note. Must copy
939 the new source, so that we don't get rtx sharing
940 between the SET_SOURCE and REG_NOTES of insn p. */
941 REG_NOTES (regs->array[regno].single_usage)
942 = (replace_rtx
943 (REG_NOTES (regs->array[regno].single_usage),
944 SET_DEST (set), copy_rtx (SET_SRC (set))));
945
946 delete_insn (p);
947 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set));
948 i++)
949 regs->array[regno+i].set_in_loop = 0;
950 continue;
951 }
952
953 m = xmalloc (sizeof (struct movable));
954 m->next = 0;
955 m->insn = p;
956 m->set_src = src;
957 m->dependencies = dependencies;
958 m->set_dest = SET_DEST (set);
959 m->force = 0;
960 m->consec
961 = regs->array[REGNO (SET_DEST (set))].set_in_loop - 1;
962 m->done = 0;
963 m->forces = 0;
964 m->partial = 0;
965 m->move_insn = move_insn;
966 m->move_insn_first = 0;
967 m->insert_temp = insert_temp;
968 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
969 m->savemode = VOIDmode;
970 m->regno = regno;
971 /* Set M->cond if either loop_invariant_p
972 or consec_sets_invariant_p returned 2
973 (only conditionally invariant). */
974 m->cond = ((tem | tem1 | tem2) > 1);
975 m->global = LOOP_REG_GLOBAL_P (loop, regno);
976 m->match = 0;
977 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
978 m->savings = regs->array[regno].n_times_set;
979 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
980 m->savings += libcall_benefit (p);
981 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++)
982 regs->array[regno+i].set_in_loop = move_insn ? -2 : -1;
983 /* Add M to the end of the chain MOVABLES. */
984 loop_movables_add (movables, m);
985
986 if (m->consec > 0)
987 {
988 /* It is possible for the first instruction to have a
989 REG_EQUAL note but a non-invariant SET_SRC, so we must
990 remember the status of the first instruction in case
991 the last instruction doesn't have a REG_EQUAL note. */
992 m->move_insn_first = m->move_insn;
993
994 /* Skip this insn, not checking REG_LIBCALL notes. */
995 p = next_nonnote_insn (p);
996 /* Skip the consecutive insns, if there are any. */
997 p = skip_consec_insns (p, m->consec);
998 /* Back up to the last insn of the consecutive group. */
999 p = prev_nonnote_insn (p);
1000
1001 /* We must now reset m->move_insn, m->is_equiv, and
1002 possibly m->set_src to correspond to the effects of
1003 all the insns. */
1004 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
1005 if (temp)
1006 m->set_src = XEXP (temp, 0), m->move_insn = 1;
1007 else
1008 {
1009 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
1010 if (temp && CONSTANT_P (XEXP (temp, 0)))
1011 m->set_src = XEXP (temp, 0), m->move_insn = 1;
1012 else
1013 m->move_insn = 0;
1014
1015 }
1016 m->is_equiv
1017 = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
1018 }
1019 }
1020 /* If this register is always set within a STRICT_LOW_PART
1021 or set to zero, then its high bytes are constant.
1022 So clear them outside the loop and within the loop
1023 just load the low bytes.
1024 We must check that the machine has an instruction to do so.
1025 Also, if the value loaded into the register
1026 depends on the same register, this cannot be done. */
1027 else if (SET_SRC (set) == const0_rtx
1028 && NONJUMP_INSN_P (NEXT_INSN (p))
1029 && (set1 = single_set (NEXT_INSN (p)))
1030 && GET_CODE (set1) == SET
1031 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
1032 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
1033 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
1034 == SET_DEST (set))
1035 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
1036 {
1037 int regno = REGNO (SET_DEST (set));
1038 if (regs->array[regno].set_in_loop == 2)
1039 {
1040 struct movable *m;
1041 m = xmalloc (sizeof (struct movable));
1042 m->next = 0;
1043 m->insn = p;
1044 m->set_dest = SET_DEST (set);
1045 m->dependencies = 0;
1046 m->force = 0;
1047 m->consec = 0;
1048 m->done = 0;
1049 m->forces = 0;
1050 m->move_insn = 0;
1051 m->move_insn_first = 0;
1052 m->insert_temp = insert_temp;
1053 m->partial = 1;
1054 /* If the insn may not be executed on some cycles,
1055 we can't clear the whole reg; clear just high part.
1056 Not even if the reg is used only within this loop.
1057 Consider this:
1058 while (1)
1059 while (s != t) {
1060 if (foo ()) x = *s;
1061 use (x);
1062 }
1063 Clearing x before the inner loop could clobber a value
1064 being saved from the last time around the outer loop.
1065 However, if the reg is not used outside this loop
1066 and all uses of the register are in the same
1067 basic block as the store, there is no problem.
1068
1069 If this insn was made by loop, we don't know its
1070 INSN_LUID and hence must make a conservative
1071 assumption. */
1072 m->global = (INSN_UID (p) >= max_uid_for_loop
1073 || LOOP_REG_GLOBAL_P (loop, regno)
1074 || (labels_in_range_p
1075 (p, REGNO_FIRST_LUID (regno))));
1076 if (maybe_never && m->global)
1077 m->savemode = GET_MODE (SET_SRC (set1));
1078 else
1079 m->savemode = VOIDmode;
1080 m->regno = regno;
1081 m->cond = 0;
1082 m->match = 0;
1083 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
1084 m->savings = 1;
1085 for (i = 0;
1086 i < LOOP_REGNO_NREGS (regno, SET_DEST (set));
1087 i++)
1088 regs->array[regno+i].set_in_loop = -1;
1089 /* Add M to the end of the chain MOVABLES. */
1090 loop_movables_add (movables, m);
1091 }
1092 }
1093 }
1094 }
1095 /* Past a call insn, we get to insns which might not be executed
1096 because the call might exit. This matters for insns that trap.
1097 Constant and pure call insns always return, so they don't count. */
1098 else if (CALL_P (p) && ! CONST_OR_PURE_CALL_P (p))
1099 call_passed = 1;
1100 /* Past a label or a jump, we get to insns for which we
1101 can't count on whether or how many times they will be
1102 executed during each iteration. Therefore, we can
1103 only move out sets of trivial variables
1104 (those not used after the loop). */
1105 /* Similar code appears twice in strength_reduce. */
1106 else if ((LABEL_P (p) || JUMP_P (p))
1107 /* If we enter the loop in the middle, and scan around to the
1108 beginning, don't set maybe_never for that. This must be an
1109 unconditional jump, otherwise the code at the top of the
1110 loop might never be executed. Unconditional jumps are
1111 followed by a barrier then the loop_end. */
1112 && ! (JUMP_P (p) && JUMP_LABEL (p) == loop->top
1113 && NEXT_INSN (NEXT_INSN (p)) == loop_end
1114 && any_uncondjump_p (p)))
1115 maybe_never = 1;
1116 }
1117
1118 /* If one movable subsumes another, ignore that other. */
1119
1120 ignore_some_movables (movables);
1121
1122 /* For each movable insn, see if the reg that it loads
1123 leads when it dies right into another conditionally movable insn.
1124 If so, record that the second insn "forces" the first one,
1125 since the second can be moved only if the first is. */
1126
1127 force_movables (movables);
1128
1129 /* See if there are multiple movable insns that load the same value.
1130 If there are, make all but the first point at the first one
1131 through the `match' field, and add the priorities of them
1132 all together as the priority of the first. */
1133
1134 combine_movables (movables, regs);
1135
1136 /* Now consider each movable insn to decide whether it is worth moving.
1137 Store 0 in regs->array[I].set_in_loop for each reg I that is moved.
1138
1139 For machines with few registers this increases code size, so do not
1140 move moveables when optimizing for code size on such machines.
1141 (The 18 below is the value for i386.) */
1142
1143 if (!optimize_size
1144 || (reg_class_size[GENERAL_REGS] > 18 && !loop_info->has_call))
1145 {
1146 move_movables (loop, movables, threshold, insn_count);
1147
1148 /* Recalculate regs->array if move_movables has created new
1149 registers. */
1150 if (max_reg_num () > regs->num)
1151 {
1152 loop_regs_scan (loop, 0);
1153 for (update_start = loop_start;
1154 PREV_INSN (update_start)
1155 && !LABEL_P (PREV_INSN (update_start));
1156 update_start = PREV_INSN (update_start))
1157 ;
1158 update_end = NEXT_INSN (loop_end);
1159
1160 reg_scan_update (update_start, update_end, loop_max_reg);
1161 loop_max_reg = max_reg_num ();
1162 }
1163 }
1164
1165 /* Now candidates that still are negative are those not moved.
1166 Change regs->array[I].set_in_loop to indicate that those are not actually
1167 invariant. */
1168 for (i = 0; i < regs->num; i++)
1169 if (regs->array[i].set_in_loop < 0)
1170 regs->array[i].set_in_loop = regs->array[i].n_times_set;
1171
1172 /* Now that we've moved some things out of the loop, we might be able to
1173 hoist even more memory references. */
1174 load_mems (loop);
1175
1176 /* Recalculate regs->array if load_mems has created new registers. */
1177 if (max_reg_num () > regs->num)
1178 loop_regs_scan (loop, 0);
1179
1180 for (update_start = loop_start;
1181 PREV_INSN (update_start)
1182 && !LABEL_P (PREV_INSN (update_start));
1183 update_start = PREV_INSN (update_start))
1184 ;
1185 update_end = NEXT_INSN (loop_end);
1186
1187 reg_scan_update (update_start, update_end, loop_max_reg);
1188 loop_max_reg = max_reg_num ();
1189
1190 if (flag_strength_reduce)
1191 {
1192 if (update_end && LABEL_P (update_end))
1193 /* Ensure our label doesn't go away. */
1194 LABEL_NUSES (update_end)++;
1195
1196 strength_reduce (loop, flags);
1197
1198 reg_scan_update (update_start, update_end, loop_max_reg);
1199 loop_max_reg = max_reg_num ();
1200
1201 if (update_end && LABEL_P (update_end)
1202 && --LABEL_NUSES (update_end) == 0)
1203 delete_related_insns (update_end);
1204 }
1205
1206
1207 /* The movable information is required for strength reduction. */
1208 loop_movables_free (movables);
1209
1210 free (regs->array);
1211 regs->array = 0;
1212 regs->num = 0;
1213 }
1214 \f
1215 /* Add elements to *OUTPUT to record all the pseudo-regs
1216 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1217
1218 static void
1219 record_excess_regs (rtx in_this, rtx not_in_this, rtx *output)
1220 {
1221 enum rtx_code code;
1222 const char *fmt;
1223 int i;
1224
1225 code = GET_CODE (in_this);
1226
1227 switch (code)
1228 {
1229 case PC:
1230 case CC0:
1231 case CONST_INT:
1232 case CONST_DOUBLE:
1233 case CONST:
1234 case SYMBOL_REF:
1235 case LABEL_REF:
1236 return;
1237
1238 case REG:
1239 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
1240 && ! reg_mentioned_p (in_this, not_in_this))
1241 *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output);
1242 return;
1243
1244 default:
1245 break;
1246 }
1247
1248 fmt = GET_RTX_FORMAT (code);
1249 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1250 {
1251 int j;
1252
1253 switch (fmt[i])
1254 {
1255 case 'E':
1256 for (j = 0; j < XVECLEN (in_this, i); j++)
1257 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1258 break;
1259
1260 case 'e':
1261 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1262 break;
1263 }
1264 }
1265 }
1266 \f
1267 /* Check what regs are referred to in the libcall block ending with INSN,
1268 aside from those mentioned in the equivalent value.
1269 If there are none, return 0.
1270 If there are one or more, return an EXPR_LIST containing all of them. */
1271
1272 static rtx
1273 libcall_other_reg (rtx insn, rtx equiv)
1274 {
1275 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
1276 rtx p = XEXP (note, 0);
1277 rtx output = 0;
1278
1279 /* First, find all the regs used in the libcall block
1280 that are not mentioned as inputs to the result. */
1281
1282 while (p != insn)
1283 {
1284 if (INSN_P (p))
1285 record_excess_regs (PATTERN (p), equiv, &output);
1286 p = NEXT_INSN (p);
1287 }
1288
1289 return output;
1290 }
1291 \f
1292 /* Return 1 if all uses of REG
1293 are between INSN and the end of the basic block. */
1294
1295 static int
1296 reg_in_basic_block_p (rtx insn, rtx reg)
1297 {
1298 int regno = REGNO (reg);
1299 rtx p;
1300
1301 if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
1302 return 0;
1303
1304 /* Search this basic block for the already recorded last use of the reg. */
1305 for (p = insn; p; p = NEXT_INSN (p))
1306 {
1307 switch (GET_CODE (p))
1308 {
1309 case NOTE:
1310 break;
1311
1312 case INSN:
1313 case CALL_INSN:
1314 /* Ordinary insn: if this is the last use, we win. */
1315 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1316 return 1;
1317 break;
1318
1319 case JUMP_INSN:
1320 /* Jump insn: if this is the last use, we win. */
1321 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1322 return 1;
1323 /* Otherwise, it's the end of the basic block, so we lose. */
1324 return 0;
1325
1326 case CODE_LABEL:
1327 case BARRIER:
1328 /* It's the end of the basic block, so we lose. */
1329 return 0;
1330
1331 default:
1332 break;
1333 }
1334 }
1335
1336 /* The "last use" that was recorded can't be found after the first
1337 use. This can happen when the last use was deleted while
1338 processing an inner loop, this inner loop was then completely
1339 unrolled, and the outer loop is always exited after the inner loop,
1340 so that everything after the first use becomes a single basic block. */
1341 return 1;
1342 }
1343 \f
1344 /* Compute the benefit of eliminating the insns in the block whose
1345 last insn is LAST. This may be a group of insns used to compute a
1346 value directly or can contain a library call. */
1347
1348 static int
1349 libcall_benefit (rtx last)
1350 {
1351 rtx insn;
1352 int benefit = 0;
1353
1354 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
1355 insn != last; insn = NEXT_INSN (insn))
1356 {
1357 if (CALL_P (insn))
1358 benefit += 10; /* Assume at least this many insns in a library
1359 routine. */
1360 else if (NONJUMP_INSN_P (insn)
1361 && GET_CODE (PATTERN (insn)) != USE
1362 && GET_CODE (PATTERN (insn)) != CLOBBER)
1363 benefit++;
1364 }
1365
1366 return benefit;
1367 }
1368 \f
1369 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1370
1371 static rtx
1372 skip_consec_insns (rtx insn, int count)
1373 {
1374 for (; count > 0; count--)
1375 {
1376 rtx temp;
1377
1378 /* If first insn of libcall sequence, skip to end. */
1379 /* Do this at start of loop, since INSN is guaranteed to
1380 be an insn here. */
1381 if (!NOTE_P (insn)
1382 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
1383 insn = XEXP (temp, 0);
1384
1385 do
1386 insn = NEXT_INSN (insn);
1387 while (NOTE_P (insn));
1388 }
1389
1390 return insn;
1391 }
1392
1393 /* Ignore any movable whose insn falls within a libcall
1394 which is part of another movable.
1395 We make use of the fact that the movable for the libcall value
1396 was made later and so appears later on the chain. */
1397
1398 static void
1399 ignore_some_movables (struct loop_movables *movables)
1400 {
1401 struct movable *m, *m1;
1402
1403 for (m = movables->head; m; m = m->next)
1404 {
1405 /* Is this a movable for the value of a libcall? */
1406 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
1407 if (note)
1408 {
1409 rtx insn;
1410 /* Check for earlier movables inside that range,
1411 and mark them invalid. We cannot use LUIDs here because
1412 insns created by loop.c for prior loops don't have LUIDs.
1413 Rather than reject all such insns from movables, we just
1414 explicitly check each insn in the libcall (since invariant
1415 libcalls aren't that common). */
1416 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1417 for (m1 = movables->head; m1 != m; m1 = m1->next)
1418 if (m1->insn == insn)
1419 m1->done = 1;
1420 }
1421 }
1422 }
1423
1424 /* For each movable insn, see if the reg that it loads
1425 leads when it dies right into another conditionally movable insn.
1426 If so, record that the second insn "forces" the first one,
1427 since the second can be moved only if the first is. */
1428
1429 static void
1430 force_movables (struct loop_movables *movables)
1431 {
1432 struct movable *m, *m1;
1433
1434 for (m1 = movables->head; m1; m1 = m1->next)
1435 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1436 if (!m1->partial && !m1->done)
1437 {
1438 int regno = m1->regno;
1439 for (m = m1->next; m; m = m->next)
1440 /* ??? Could this be a bug? What if CSE caused the
1441 register of M1 to be used after this insn?
1442 Since CSE does not update regno_last_uid,
1443 this insn M->insn might not be where it dies.
1444 But very likely this doesn't matter; what matters is
1445 that M's reg is computed from M1's reg. */
1446 if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
1447 && !m->done)
1448 break;
1449 if (m != 0 && m->set_src == m1->set_dest
1450 /* If m->consec, m->set_src isn't valid. */
1451 && m->consec == 0)
1452 m = 0;
1453
1454 /* Increase the priority of the moving the first insn
1455 since it permits the second to be moved as well.
1456 Likewise for insns already forced by the first insn. */
1457 if (m != 0)
1458 {
1459 struct movable *m2;
1460
1461 m->forces = m1;
1462 for (m2 = m1; m2; m2 = m2->forces)
1463 {
1464 m2->lifetime += m->lifetime;
1465 m2->savings += m->savings;
1466 }
1467 }
1468 }
1469 }
1470 \f
1471 /* Find invariant expressions that are equal and can be combined into
1472 one register. */
1473
1474 static void
1475 combine_movables (struct loop_movables *movables, struct loop_regs *regs)
1476 {
1477 struct movable *m;
1478 char *matched_regs = xmalloc (regs->num);
1479 enum machine_mode mode;
1480
1481 /* Regs that are set more than once are not allowed to match
1482 or be matched. I'm no longer sure why not. */
1483 /* Only pseudo registers are allowed to match or be matched,
1484 since move_movables does not validate the change. */
1485 /* Perhaps testing m->consec_sets would be more appropriate here? */
1486
1487 for (m = movables->head; m; m = m->next)
1488 if (m->match == 0 && regs->array[m->regno].n_times_set == 1
1489 && m->regno >= FIRST_PSEUDO_REGISTER
1490 && !m->insert_temp
1491 && !m->partial)
1492 {
1493 struct movable *m1;
1494 int regno = m->regno;
1495
1496 memset (matched_regs, 0, regs->num);
1497 matched_regs[regno] = 1;
1498
1499 /* We want later insns to match the first one. Don't make the first
1500 one match any later ones. So start this loop at m->next. */
1501 for (m1 = m->next; m1; m1 = m1->next)
1502 if (m != m1 && m1->match == 0
1503 && !m1->insert_temp
1504 && regs->array[m1->regno].n_times_set == 1
1505 && m1->regno >= FIRST_PSEUDO_REGISTER
1506 /* A reg used outside the loop mustn't be eliminated. */
1507 && !m1->global
1508 /* A reg used for zero-extending mustn't be eliminated. */
1509 && !m1->partial
1510 && (matched_regs[m1->regno]
1511 ||
1512 (
1513 /* Can combine regs with different modes loaded from the
1514 same constant only if the modes are the same or
1515 if both are integer modes with M wider or the same
1516 width as M1. The check for integer is redundant, but
1517 safe, since the only case of differing destination
1518 modes with equal sources is when both sources are
1519 VOIDmode, i.e., CONST_INT. */
1520 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1521 || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
1522 && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
1523 && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
1524 >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
1525 /* See if the source of M1 says it matches M. */
1526 && ((REG_P (m1->set_src)
1527 && matched_regs[REGNO (m1->set_src)])
1528 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1529 movables, regs))))
1530 && ((m->dependencies == m1->dependencies)
1531 || rtx_equal_p (m->dependencies, m1->dependencies)))
1532 {
1533 m->lifetime += m1->lifetime;
1534 m->savings += m1->savings;
1535 m1->done = 1;
1536 m1->match = m;
1537 matched_regs[m1->regno] = 1;
1538 }
1539 }
1540
1541 /* Now combine the regs used for zero-extension.
1542 This can be done for those not marked `global'
1543 provided their lives don't overlap. */
1544
1545 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1546 mode = GET_MODE_WIDER_MODE (mode))
1547 {
1548 struct movable *m0 = 0;
1549
1550 /* Combine all the registers for extension from mode MODE.
1551 Don't combine any that are used outside this loop. */
1552 for (m = movables->head; m; m = m->next)
1553 if (m->partial && ! m->global
1554 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1555 {
1556 struct movable *m1;
1557
1558 int first = REGNO_FIRST_LUID (m->regno);
1559 int last = REGNO_LAST_LUID (m->regno);
1560
1561 if (m0 == 0)
1562 {
1563 /* First one: don't check for overlap, just record it. */
1564 m0 = m;
1565 continue;
1566 }
1567
1568 /* Make sure they extend to the same mode.
1569 (Almost always true.) */
1570 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1571 continue;
1572
1573 /* We already have one: check for overlap with those
1574 already combined together. */
1575 for (m1 = movables->head; m1 != m; m1 = m1->next)
1576 if (m1 == m0 || (m1->partial && m1->match == m0))
1577 if (! (REGNO_FIRST_LUID (m1->regno) > last
1578 || REGNO_LAST_LUID (m1->regno) < first))
1579 goto overlap;
1580
1581 /* No overlap: we can combine this with the others. */
1582 m0->lifetime += m->lifetime;
1583 m0->savings += m->savings;
1584 m->done = 1;
1585 m->match = m0;
1586
1587 overlap:
1588 ;
1589 }
1590 }
1591
1592 /* Clean up. */
1593 free (matched_regs);
1594 }
1595
1596 /* Returns the number of movable instructions in LOOP that were not
1597 moved outside the loop. */
1598
1599 static int
1600 num_unmoved_movables (const struct loop *loop)
1601 {
1602 int num = 0;
1603 struct movable *m;
1604
1605 for (m = LOOP_MOVABLES (loop)->head; m; m = m->next)
1606 if (!m->done)
1607 ++num;
1608
1609 return num;
1610 }
1611
1612 \f
1613 /* Return 1 if regs X and Y will become the same if moved. */
1614
1615 static int
1616 regs_match_p (rtx x, rtx y, struct loop_movables *movables)
1617 {
1618 unsigned int xn = REGNO (x);
1619 unsigned int yn = REGNO (y);
1620 struct movable *mx, *my;
1621
1622 for (mx = movables->head; mx; mx = mx->next)
1623 if (mx->regno == xn)
1624 break;
1625
1626 for (my = movables->head; my; my = my->next)
1627 if (my->regno == yn)
1628 break;
1629
1630 return (mx && my
1631 && ((mx->match == my->match && mx->match != 0)
1632 || mx->match == my
1633 || mx == my->match));
1634 }
1635
1636 /* Return 1 if X and Y are identical-looking rtx's.
1637 This is the Lisp function EQUAL for rtx arguments.
1638
1639 If two registers are matching movables or a movable register and an
1640 equivalent constant, consider them equal. */
1641
1642 static int
1643 rtx_equal_for_loop_p (rtx x, rtx y, struct loop_movables *movables,
1644 struct loop_regs *regs)
1645 {
1646 int i;
1647 int j;
1648 struct movable *m;
1649 enum rtx_code code;
1650 const char *fmt;
1651
1652 if (x == y)
1653 return 1;
1654 if (x == 0 || y == 0)
1655 return 0;
1656
1657 code = GET_CODE (x);
1658
1659 /* If we have a register and a constant, they may sometimes be
1660 equal. */
1661 if (REG_P (x) && regs->array[REGNO (x)].set_in_loop == -2
1662 && CONSTANT_P (y))
1663 {
1664 for (m = movables->head; m; m = m->next)
1665 if (m->move_insn && m->regno == REGNO (x)
1666 && rtx_equal_p (m->set_src, y))
1667 return 1;
1668 }
1669 else if (REG_P (y) && regs->array[REGNO (y)].set_in_loop == -2
1670 && CONSTANT_P (x))
1671 {
1672 for (m = movables->head; m; m = m->next)
1673 if (m->move_insn && m->regno == REGNO (y)
1674 && rtx_equal_p (m->set_src, x))
1675 return 1;
1676 }
1677
1678 /* Otherwise, rtx's of different codes cannot be equal. */
1679 if (code != GET_CODE (y))
1680 return 0;
1681
1682 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1683 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1684
1685 if (GET_MODE (x) != GET_MODE (y))
1686 return 0;
1687
1688 /* These three types of rtx's can be compared nonrecursively. */
1689 if (code == REG)
1690 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
1691
1692 if (code == LABEL_REF)
1693 return XEXP (x, 0) == XEXP (y, 0);
1694 if (code == SYMBOL_REF)
1695 return XSTR (x, 0) == XSTR (y, 0);
1696
1697 /* Compare the elements. If any pair of corresponding elements
1698 fail to match, return 0 for the whole things. */
1699
1700 fmt = GET_RTX_FORMAT (code);
1701 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1702 {
1703 switch (fmt[i])
1704 {
1705 case 'w':
1706 if (XWINT (x, i) != XWINT (y, i))
1707 return 0;
1708 break;
1709
1710 case 'i':
1711 if (XINT (x, i) != XINT (y, i))
1712 return 0;
1713 break;
1714
1715 case 'E':
1716 /* Two vectors must have the same length. */
1717 if (XVECLEN (x, i) != XVECLEN (y, i))
1718 return 0;
1719
1720 /* And the corresponding elements must match. */
1721 for (j = 0; j < XVECLEN (x, i); j++)
1722 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j),
1723 movables, regs) == 0)
1724 return 0;
1725 break;
1726
1727 case 'e':
1728 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables, regs)
1729 == 0)
1730 return 0;
1731 break;
1732
1733 case 's':
1734 if (strcmp (XSTR (x, i), XSTR (y, i)))
1735 return 0;
1736 break;
1737
1738 case 'u':
1739 /* These are just backpointers, so they don't matter. */
1740 break;
1741
1742 case '0':
1743 break;
1744
1745 /* It is believed that rtx's at this level will never
1746 contain anything but integers and other rtx's,
1747 except for within LABEL_REFs and SYMBOL_REFs. */
1748 default:
1749 gcc_unreachable ();
1750 }
1751 }
1752 return 1;
1753 }
1754 \f
1755 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
1756 insns in INSNS which use the reference. LABEL_NUSES for CODE_LABEL
1757 references is incremented once for each added note. */
1758
1759 static void
1760 add_label_notes (rtx x, rtx insns)
1761 {
1762 enum rtx_code code = GET_CODE (x);
1763 int i, j;
1764 const char *fmt;
1765 rtx insn;
1766
1767 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
1768 {
1769 /* This code used to ignore labels that referred to dispatch tables to
1770 avoid flow generating (slightly) worse code.
1771
1772 We no longer ignore such label references (see LABEL_REF handling in
1773 mark_jump_label for additional information). */
1774 for (insn = insns; insn; insn = NEXT_INSN (insn))
1775 if (reg_mentioned_p (XEXP (x, 0), insn))
1776 {
1777 REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL, XEXP (x, 0),
1778 REG_NOTES (insn));
1779 if (LABEL_P (XEXP (x, 0)))
1780 LABEL_NUSES (XEXP (x, 0))++;
1781 }
1782 }
1783
1784 fmt = GET_RTX_FORMAT (code);
1785 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1786 {
1787 if (fmt[i] == 'e')
1788 add_label_notes (XEXP (x, i), insns);
1789 else if (fmt[i] == 'E')
1790 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1791 add_label_notes (XVECEXP (x, i, j), insns);
1792 }
1793 }
1794 \f
1795 /* Scan MOVABLES, and move the insns that deserve to be moved.
1796 If two matching movables are combined, replace one reg with the
1797 other throughout. */
1798
1799 static void
1800 move_movables (struct loop *loop, struct loop_movables *movables,
1801 int threshold, int insn_count)
1802 {
1803 struct loop_regs *regs = LOOP_REGS (loop);
1804 int nregs = regs->num;
1805 rtx new_start = 0;
1806 struct movable *m;
1807 rtx p;
1808 rtx loop_start = loop->start;
1809 rtx loop_end = loop->end;
1810 /* Map of pseudo-register replacements to handle combining
1811 when we move several insns that load the same value
1812 into different pseudo-registers. */
1813 rtx *reg_map = xcalloc (nregs, sizeof (rtx));
1814 char *already_moved = xcalloc (nregs, sizeof (char));
1815
1816 for (m = movables->head; m; m = m->next)
1817 {
1818 /* Describe this movable insn. */
1819
1820 if (loop_dump_stream)
1821 {
1822 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
1823 INSN_UID (m->insn), m->regno, m->lifetime);
1824 if (m->consec > 0)
1825 fprintf (loop_dump_stream, "consec %d, ", m->consec);
1826 if (m->cond)
1827 fprintf (loop_dump_stream, "cond ");
1828 if (m->force)
1829 fprintf (loop_dump_stream, "force ");
1830 if (m->global)
1831 fprintf (loop_dump_stream, "global ");
1832 if (m->done)
1833 fprintf (loop_dump_stream, "done ");
1834 if (m->move_insn)
1835 fprintf (loop_dump_stream, "move-insn ");
1836 if (m->match)
1837 fprintf (loop_dump_stream, "matches %d ",
1838 INSN_UID (m->match->insn));
1839 if (m->forces)
1840 fprintf (loop_dump_stream, "forces %d ",
1841 INSN_UID (m->forces->insn));
1842 }
1843
1844 /* Ignore the insn if it's already done (it matched something else).
1845 Otherwise, see if it is now safe to move. */
1846
1847 if (!m->done
1848 && (! m->cond
1849 || (1 == loop_invariant_p (loop, m->set_src)
1850 && (m->dependencies == 0
1851 || 1 == loop_invariant_p (loop, m->dependencies))
1852 && (m->consec == 0
1853 || 1 == consec_sets_invariant_p (loop, m->set_dest,
1854 m->consec + 1,
1855 m->insn))))
1856 && (! m->forces || m->forces->done))
1857 {
1858 int regno;
1859 rtx p;
1860 int savings = m->savings;
1861
1862 /* We have an insn that is safe to move.
1863 Compute its desirability. */
1864
1865 p = m->insn;
1866 regno = m->regno;
1867
1868 if (loop_dump_stream)
1869 fprintf (loop_dump_stream, "savings %d ", savings);
1870
1871 if (regs->array[regno].moved_once && loop_dump_stream)
1872 fprintf (loop_dump_stream, "halved since already moved ");
1873
1874 /* An insn MUST be moved if we already moved something else
1875 which is safe only if this one is moved too: that is,
1876 if already_moved[REGNO] is nonzero. */
1877
1878 /* An insn is desirable to move if the new lifetime of the
1879 register is no more than THRESHOLD times the old lifetime.
1880 If it's not desirable, it means the loop is so big
1881 that moving won't speed things up much,
1882 and it is liable to make register usage worse. */
1883
1884 /* It is also desirable to move if it can be moved at no
1885 extra cost because something else was already moved. */
1886
1887 if (already_moved[regno]
1888 || flag_move_all_movables
1889 || (threshold * savings * m->lifetime) >=
1890 (regs->array[regno].moved_once ? insn_count * 2 : insn_count)
1891 || (m->forces && m->forces->done
1892 && regs->array[m->forces->regno].n_times_set == 1))
1893 {
1894 int count;
1895 struct movable *m1;
1896 rtx first = NULL_RTX;
1897 rtx newreg = NULL_RTX;
1898
1899 if (m->insert_temp)
1900 newreg = gen_reg_rtx (GET_MODE (m->set_dest));
1901
1902 /* Now move the insns that set the reg. */
1903
1904 if (m->partial && m->match)
1905 {
1906 rtx newpat, i1;
1907 rtx r1, r2;
1908 /* Find the end of this chain of matching regs.
1909 Thus, we load each reg in the chain from that one reg.
1910 And that reg is loaded with 0 directly,
1911 since it has ->match == 0. */
1912 for (m1 = m; m1->match; m1 = m1->match);
1913 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
1914 SET_DEST (PATTERN (m1->insn)));
1915 i1 = loop_insn_hoist (loop, newpat);
1916
1917 /* Mark the moved, invariant reg as being allowed to
1918 share a hard reg with the other matching invariant. */
1919 REG_NOTES (i1) = REG_NOTES (m->insn);
1920 r1 = SET_DEST (PATTERN (m->insn));
1921 r2 = SET_DEST (PATTERN (m1->insn));
1922 regs_may_share
1923 = gen_rtx_EXPR_LIST (VOIDmode, r1,
1924 gen_rtx_EXPR_LIST (VOIDmode, r2,
1925 regs_may_share));
1926 delete_insn (m->insn);
1927
1928 if (new_start == 0)
1929 new_start = i1;
1930
1931 if (loop_dump_stream)
1932 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1933 }
1934 /* If we are to re-generate the item being moved with a
1935 new move insn, first delete what we have and then emit
1936 the move insn before the loop. */
1937 else if (m->move_insn)
1938 {
1939 rtx i1, temp, seq;
1940
1941 for (count = m->consec; count >= 0; count--)
1942 {
1943 if (!NOTE_P (p))
1944 {
1945 /* If this is the first insn of a library call
1946 sequence, something is very wrong. */
1947 gcc_assert (!find_reg_note (p, REG_LIBCALL,
1948 NULL_RTX));
1949
1950 /* If this is the last insn of a libcall sequence,
1951 then delete every insn in the sequence except
1952 the last. The last insn is handled in the
1953 normal manner. */
1954 temp = find_reg_note (p, REG_RETVAL, NULL_RTX);
1955 if (temp)
1956 {
1957 temp = XEXP (temp, 0);
1958 while (temp != p)
1959 temp = delete_insn (temp);
1960 }
1961 }
1962
1963 temp = p;
1964 p = delete_insn (p);
1965
1966 /* simplify_giv_expr expects that it can walk the insns
1967 at m->insn forwards and see this old sequence we are
1968 tossing here. delete_insn does preserve the next
1969 pointers, but when we skip over a NOTE we must fix
1970 it up. Otherwise that code walks into the non-deleted
1971 insn stream. */
1972 while (p && NOTE_P (p))
1973 p = NEXT_INSN (temp) = NEXT_INSN (p);
1974
1975 if (m->insert_temp)
1976 {
1977 /* Replace the original insn with a move from
1978 our newly created temp. */
1979 start_sequence ();
1980 emit_move_insn (m->set_dest, newreg);
1981 seq = get_insns ();
1982 end_sequence ();
1983 emit_insn_before (seq, p);
1984 }
1985 }
1986
1987 start_sequence ();
1988 emit_move_insn (m->insert_temp ? newreg : m->set_dest,
1989 m->set_src);
1990 seq = get_insns ();
1991 end_sequence ();
1992
1993 add_label_notes (m->set_src, seq);
1994
1995 i1 = loop_insn_hoist (loop, seq);
1996 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
1997 set_unique_reg_note (i1,
1998 m->is_equiv ? REG_EQUIV : REG_EQUAL,
1999 m->set_src);
2000
2001 if (loop_dump_stream)
2002 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
2003
2004 /* The more regs we move, the less we like moving them. */
2005 threshold -= 3;
2006 }
2007 else
2008 {
2009 for (count = m->consec; count >= 0; count--)
2010 {
2011 rtx i1, temp;
2012
2013 /* If first insn of libcall sequence, skip to end. */
2014 /* Do this at start of loop, since p is guaranteed to
2015 be an insn here. */
2016 if (!NOTE_P (p)
2017 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
2018 p = XEXP (temp, 0);
2019
2020 /* If last insn of libcall sequence, move all
2021 insns except the last before the loop. The last
2022 insn is handled in the normal manner. */
2023 if (!NOTE_P (p)
2024 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
2025 {
2026 rtx fn_address = 0;
2027 rtx fn_reg = 0;
2028 rtx fn_address_insn = 0;
2029
2030 first = 0;
2031 for (temp = XEXP (temp, 0); temp != p;
2032 temp = NEXT_INSN (temp))
2033 {
2034 rtx body;
2035 rtx n;
2036 rtx next;
2037
2038 if (NOTE_P (temp))
2039 continue;
2040
2041 body = PATTERN (temp);
2042
2043 /* Find the next insn after TEMP,
2044 not counting USE or NOTE insns. */
2045 for (next = NEXT_INSN (temp); next != p;
2046 next = NEXT_INSN (next))
2047 if (! (NONJUMP_INSN_P (next)
2048 && GET_CODE (PATTERN (next)) == USE)
2049 && !NOTE_P (next))
2050 break;
2051
2052 /* If that is the call, this may be the insn
2053 that loads the function address.
2054
2055 Extract the function address from the insn
2056 that loads it into a register.
2057 If this insn was cse'd, we get incorrect code.
2058
2059 So emit a new move insn that copies the
2060 function address into the register that the
2061 call insn will use. flow.c will delete any
2062 redundant stores that we have created. */
2063 if (CALL_P (next)
2064 && GET_CODE (body) == SET
2065 && REG_P (SET_DEST (body))
2066 && (n = find_reg_note (temp, REG_EQUAL,
2067 NULL_RTX)))
2068 {
2069 fn_reg = SET_SRC (body);
2070 if (!REG_P (fn_reg))
2071 fn_reg = SET_DEST (body);
2072 fn_address = XEXP (n, 0);
2073 fn_address_insn = temp;
2074 }
2075 /* We have the call insn.
2076 If it uses the register we suspect it might,
2077 load it with the correct address directly. */
2078 if (CALL_P (temp)
2079 && fn_address != 0
2080 && reg_referenced_p (fn_reg, body))
2081 loop_insn_emit_after (loop, 0, fn_address_insn,
2082 gen_move_insn
2083 (fn_reg, fn_address));
2084
2085 if (CALL_P (temp))
2086 {
2087 i1 = loop_call_insn_hoist (loop, body);
2088 /* Because the USAGE information potentially
2089 contains objects other than hard registers
2090 we need to copy it. */
2091 if (CALL_INSN_FUNCTION_USAGE (temp))
2092 CALL_INSN_FUNCTION_USAGE (i1)
2093 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
2094 }
2095 else
2096 i1 = loop_insn_hoist (loop, body);
2097 if (first == 0)
2098 first = i1;
2099 if (temp == fn_address_insn)
2100 fn_address_insn = i1;
2101 REG_NOTES (i1) = REG_NOTES (temp);
2102 REG_NOTES (temp) = NULL;
2103 delete_insn (temp);
2104 }
2105 if (new_start == 0)
2106 new_start = first;
2107 }
2108 if (m->savemode != VOIDmode)
2109 {
2110 /* P sets REG to zero; but we should clear only
2111 the bits that are not covered by the mode
2112 m->savemode. */
2113 rtx reg = m->set_dest;
2114 rtx sequence;
2115 rtx tem;
2116
2117 start_sequence ();
2118 tem = expand_simple_binop
2119 (GET_MODE (reg), AND, reg,
2120 GEN_INT ((((HOST_WIDE_INT) 1
2121 << GET_MODE_BITSIZE (m->savemode)))
2122 - 1),
2123 reg, 1, OPTAB_LIB_WIDEN);
2124 gcc_assert (tem != 0);
2125 if (tem != reg)
2126 emit_move_insn (reg, tem);
2127 sequence = get_insns ();
2128 end_sequence ();
2129 i1 = loop_insn_hoist (loop, sequence);
2130 }
2131 else if (CALL_P (p))
2132 {
2133 i1 = loop_call_insn_hoist (loop, PATTERN (p));
2134 /* Because the USAGE information potentially
2135 contains objects other than hard registers
2136 we need to copy it. */
2137 if (CALL_INSN_FUNCTION_USAGE (p))
2138 CALL_INSN_FUNCTION_USAGE (i1)
2139 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
2140 }
2141 else if (count == m->consec && m->move_insn_first)
2142 {
2143 rtx seq;
2144 /* The SET_SRC might not be invariant, so we must
2145 use the REG_EQUAL note. */
2146 start_sequence ();
2147 emit_move_insn (m->insert_temp ? newreg : m->set_dest,
2148 m->set_src);
2149 seq = get_insns ();
2150 end_sequence ();
2151
2152 add_label_notes (m->set_src, seq);
2153
2154 i1 = loop_insn_hoist (loop, seq);
2155 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
2156 set_unique_reg_note (i1, m->is_equiv ? REG_EQUIV
2157 : REG_EQUAL, m->set_src);
2158 }
2159 else if (m->insert_temp)
2160 {
2161 rtx *reg_map2 = xcalloc (REGNO (newreg),
2162 sizeof(rtx));
2163 reg_map2 [m->regno] = newreg;
2164
2165 i1 = loop_insn_hoist (loop, copy_rtx (PATTERN (p)));
2166 replace_regs (i1, reg_map2, REGNO (newreg), 1);
2167 free (reg_map2);
2168 }
2169 else
2170 i1 = loop_insn_hoist (loop, PATTERN (p));
2171
2172 if (REG_NOTES (i1) == 0)
2173 {
2174 REG_NOTES (i1) = REG_NOTES (p);
2175 REG_NOTES (p) = NULL;
2176
2177 /* If there is a REG_EQUAL note present whose value
2178 is not loop invariant, then delete it, since it
2179 may cause problems with later optimization passes.
2180 It is possible for cse to create such notes
2181 like this as a result of record_jump_cond. */
2182
2183 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
2184 && ! loop_invariant_p (loop, XEXP (temp, 0)))
2185 remove_note (i1, temp);
2186 }
2187
2188 if (new_start == 0)
2189 new_start = i1;
2190
2191 if (loop_dump_stream)
2192 fprintf (loop_dump_stream, " moved to %d",
2193 INSN_UID (i1));
2194
2195 /* If library call, now fix the REG_NOTES that contain
2196 insn pointers, namely REG_LIBCALL on FIRST
2197 and REG_RETVAL on I1. */
2198 if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX)))
2199 {
2200 XEXP (temp, 0) = first;
2201 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
2202 XEXP (temp, 0) = i1;
2203 }
2204
2205 temp = p;
2206 delete_insn (p);
2207 p = NEXT_INSN (p);
2208
2209 /* simplify_giv_expr expects that it can walk the insns
2210 at m->insn forwards and see this old sequence we are
2211 tossing here. delete_insn does preserve the next
2212 pointers, but when we skip over a NOTE we must fix
2213 it up. Otherwise that code walks into the non-deleted
2214 insn stream. */
2215 while (p && NOTE_P (p))
2216 p = NEXT_INSN (temp) = NEXT_INSN (p);
2217
2218 if (m->insert_temp)
2219 {
2220 rtx seq;
2221 /* Replace the original insn with a move from
2222 our newly created temp. */
2223 start_sequence ();
2224 emit_move_insn (m->set_dest, newreg);
2225 seq = get_insns ();
2226 end_sequence ();
2227 emit_insn_before (seq, p);
2228 }
2229 }
2230
2231 /* The more regs we move, the less we like moving them. */
2232 threshold -= 3;
2233 }
2234
2235 m->done = 1;
2236
2237 if (!m->insert_temp)
2238 {
2239 /* Any other movable that loads the same register
2240 MUST be moved. */
2241 already_moved[regno] = 1;
2242
2243 /* This reg has been moved out of one loop. */
2244 regs->array[regno].moved_once = 1;
2245
2246 /* The reg set here is now invariant. */
2247 if (! m->partial)
2248 {
2249 int i;
2250 for (i = 0; i < LOOP_REGNO_NREGS (regno, m->set_dest); i++)
2251 regs->array[regno+i].set_in_loop = 0;
2252 }
2253
2254 /* Change the length-of-life info for the register
2255 to say it lives at least the full length of this loop.
2256 This will help guide optimizations in outer loops. */
2257
2258 if (REGNO_FIRST_LUID (regno) > INSN_LUID (loop_start))
2259 /* This is the old insn before all the moved insns.
2260 We can't use the moved insn because it is out of range
2261 in uid_luid. Only the old insns have luids. */
2262 REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
2263 if (REGNO_LAST_LUID (regno) < INSN_LUID (loop_end))
2264 REGNO_LAST_UID (regno) = INSN_UID (loop_end);
2265 }
2266
2267 /* Combine with this moved insn any other matching movables. */
2268
2269 if (! m->partial)
2270 for (m1 = movables->head; m1; m1 = m1->next)
2271 if (m1->match == m)
2272 {
2273 rtx temp;
2274
2275 /* Schedule the reg loaded by M1
2276 for replacement so that shares the reg of M.
2277 If the modes differ (only possible in restricted
2278 circumstances, make a SUBREG.
2279
2280 Note this assumes that the target dependent files
2281 treat REG and SUBREG equally, including within
2282 GO_IF_LEGITIMATE_ADDRESS and in all the
2283 predicates since we never verify that replacing the
2284 original register with a SUBREG results in a
2285 recognizable insn. */
2286 if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
2287 reg_map[m1->regno] = m->set_dest;
2288 else
2289 reg_map[m1->regno]
2290 = gen_lowpart_common (GET_MODE (m1->set_dest),
2291 m->set_dest);
2292
2293 /* Get rid of the matching insn
2294 and prevent further processing of it. */
2295 m1->done = 1;
2296
2297 /* If library call, delete all insns. */
2298 if ((temp = find_reg_note (m1->insn, REG_RETVAL,
2299 NULL_RTX)))
2300 delete_insn_chain (XEXP (temp, 0), m1->insn);
2301 else
2302 delete_insn (m1->insn);
2303
2304 /* Any other movable that loads the same register
2305 MUST be moved. */
2306 already_moved[m1->regno] = 1;
2307
2308 /* The reg merged here is now invariant,
2309 if the reg it matches is invariant. */
2310 if (! m->partial)
2311 {
2312 int i;
2313 for (i = 0;
2314 i < LOOP_REGNO_NREGS (regno, m1->set_dest);
2315 i++)
2316 regs->array[m1->regno+i].set_in_loop = 0;
2317 }
2318 }
2319 }
2320 else if (loop_dump_stream)
2321 fprintf (loop_dump_stream, "not desirable");
2322 }
2323 else if (loop_dump_stream && !m->match)
2324 fprintf (loop_dump_stream, "not safe");
2325
2326 if (loop_dump_stream)
2327 fprintf (loop_dump_stream, "\n");
2328 }
2329
2330 if (new_start == 0)
2331 new_start = loop_start;
2332
2333 /* Go through all the instructions in the loop, making
2334 all the register substitutions scheduled in REG_MAP. */
2335 for (p = new_start; p != loop_end; p = NEXT_INSN (p))
2336 if (INSN_P (p))
2337 {
2338 replace_regs (PATTERN (p), reg_map, nregs, 0);
2339 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
2340 INSN_CODE (p) = -1;
2341 }
2342
2343 /* Clean up. */
2344 free (reg_map);
2345 free (already_moved);
2346 }
2347
2348
2349 static void
2350 loop_movables_add (struct loop_movables *movables, struct movable *m)
2351 {
2352 if (movables->head == 0)
2353 movables->head = m;
2354 else
2355 movables->last->next = m;
2356 movables->last = m;
2357 }
2358
2359
2360 static void
2361 loop_movables_free (struct loop_movables *movables)
2362 {
2363 struct movable *m;
2364 struct movable *m_next;
2365
2366 for (m = movables->head; m; m = m_next)
2367 {
2368 m_next = m->next;
2369 free (m);
2370 }
2371 }
2372 \f
2373 #if 0
2374 /* Scan X and replace the address of any MEM in it with ADDR.
2375 REG is the address that MEM should have before the replacement. */
2376
2377 static void
2378 replace_call_address (rtx x, rtx reg, rtx addr)
2379 {
2380 enum rtx_code code;
2381 int i;
2382 const char *fmt;
2383
2384 if (x == 0)
2385 return;
2386 code = GET_CODE (x);
2387 switch (code)
2388 {
2389 case PC:
2390 case CC0:
2391 case CONST_INT:
2392 case CONST_DOUBLE:
2393 case CONST:
2394 case SYMBOL_REF:
2395 case LABEL_REF:
2396 case REG:
2397 return;
2398
2399 case SET:
2400 /* Short cut for very common case. */
2401 replace_call_address (XEXP (x, 1), reg, addr);
2402 return;
2403
2404 case CALL:
2405 /* Short cut for very common case. */
2406 replace_call_address (XEXP (x, 0), reg, addr);
2407 return;
2408
2409 case MEM:
2410 /* If this MEM uses a reg other than the one we expected,
2411 something is wrong. */
2412 gcc_assert (XEXP (x, 0) == reg);
2413 XEXP (x, 0) = addr;
2414 return;
2415
2416 default:
2417 break;
2418 }
2419
2420 fmt = GET_RTX_FORMAT (code);
2421 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2422 {
2423 if (fmt[i] == 'e')
2424 replace_call_address (XEXP (x, i), reg, addr);
2425 else if (fmt[i] == 'E')
2426 {
2427 int j;
2428 for (j = 0; j < XVECLEN (x, i); j++)
2429 replace_call_address (XVECEXP (x, i, j), reg, addr);
2430 }
2431 }
2432 }
2433 #endif
2434 \f
2435 /* Return the number of memory refs to addresses that vary
2436 in the rtx X. */
2437
2438 static int
2439 count_nonfixed_reads (const struct loop *loop, rtx x)
2440 {
2441 enum rtx_code code;
2442 int i;
2443 const char *fmt;
2444 int value;
2445
2446 if (x == 0)
2447 return 0;
2448
2449 code = GET_CODE (x);
2450 switch (code)
2451 {
2452 case PC:
2453 case CC0:
2454 case CONST_INT:
2455 case CONST_DOUBLE:
2456 case CONST:
2457 case SYMBOL_REF:
2458 case LABEL_REF:
2459 case REG:
2460 return 0;
2461
2462 case MEM:
2463 return ((loop_invariant_p (loop, XEXP (x, 0)) != 1)
2464 + count_nonfixed_reads (loop, XEXP (x, 0)));
2465
2466 default:
2467 break;
2468 }
2469
2470 value = 0;
2471 fmt = GET_RTX_FORMAT (code);
2472 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2473 {
2474 if (fmt[i] == 'e')
2475 value += count_nonfixed_reads (loop, XEXP (x, i));
2476 if (fmt[i] == 'E')
2477 {
2478 int j;
2479 for (j = 0; j < XVECLEN (x, i); j++)
2480 value += count_nonfixed_reads (loop, XVECEXP (x, i, j));
2481 }
2482 }
2483 return value;
2484 }
2485 \f
2486 /* Scan a loop setting the elements `loops_enclosed',
2487 `has_call', `has_nonconst_call', `has_volatile', `has_tablejump',
2488 `unknown_address_altered', `unknown_constant_address_altered', and
2489 `num_mem_sets' in LOOP. Also, fill in the array `mems' and the
2490 list `store_mems' in LOOP. */
2491
2492 static void
2493 prescan_loop (struct loop *loop)
2494 {
2495 int level = 1;
2496 rtx insn;
2497 struct loop_info *loop_info = LOOP_INFO (loop);
2498 rtx start = loop->start;
2499 rtx end = loop->end;
2500 /* The label after END. Jumping here is just like falling off the
2501 end of the loop. We use next_nonnote_insn instead of next_label
2502 as a hedge against the (pathological) case where some actual insn
2503 might end up between the two. */
2504 rtx exit_target = next_nonnote_insn (end);
2505
2506 loop_info->has_indirect_jump = indirect_jump_in_function;
2507 loop_info->pre_header_has_call = 0;
2508 loop_info->has_call = 0;
2509 loop_info->has_nonconst_call = 0;
2510 loop_info->has_prefetch = 0;
2511 loop_info->has_volatile = 0;
2512 loop_info->has_tablejump = 0;
2513 loop_info->has_multiple_exit_targets = 0;
2514 loop->level = 1;
2515
2516 loop_info->unknown_address_altered = 0;
2517 loop_info->unknown_constant_address_altered = 0;
2518 loop_info->store_mems = NULL_RTX;
2519 loop_info->first_loop_store_insn = NULL_RTX;
2520 loop_info->mems_idx = 0;
2521 loop_info->num_mem_sets = 0;
2522 /* If loop opts run twice, this was set on 1st pass for 2nd. */
2523 loop_info->preconditioned = NOTE_PRECONDITIONED (end);
2524
2525 for (insn = start; insn && !LABEL_P (insn);
2526 insn = PREV_INSN (insn))
2527 {
2528 if (CALL_P (insn))
2529 {
2530 loop_info->pre_header_has_call = 1;
2531 break;
2532 }
2533 }
2534
2535 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2536 insn = NEXT_INSN (insn))
2537 {
2538 switch (GET_CODE (insn))
2539 {
2540 case NOTE:
2541 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2542 {
2543 ++level;
2544 /* Count number of loops contained in this one. */
2545 loop->level++;
2546 }
2547 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2548 --level;
2549 break;
2550
2551 case CALL_INSN:
2552 if (! CONST_OR_PURE_CALL_P (insn))
2553 {
2554 loop_info->unknown_address_altered = 1;
2555 loop_info->has_nonconst_call = 1;
2556 }
2557 else if (pure_call_p (insn))
2558 loop_info->has_nonconst_call = 1;
2559 loop_info->has_call = 1;
2560 if (can_throw_internal (insn))
2561 loop_info->has_multiple_exit_targets = 1;
2562 break;
2563
2564 case JUMP_INSN:
2565 if (! loop_info->has_multiple_exit_targets)
2566 {
2567 rtx set = pc_set (insn);
2568
2569 if (set)
2570 {
2571 rtx src = SET_SRC (set);
2572 rtx label1, label2;
2573
2574 if (GET_CODE (src) == IF_THEN_ELSE)
2575 {
2576 label1 = XEXP (src, 1);
2577 label2 = XEXP (src, 2);
2578 }
2579 else
2580 {
2581 label1 = src;
2582 label2 = NULL_RTX;
2583 }
2584
2585 do
2586 {
2587 if (label1 && label1 != pc_rtx)
2588 {
2589 if (GET_CODE (label1) != LABEL_REF)
2590 {
2591 /* Something tricky. */
2592 loop_info->has_multiple_exit_targets = 1;
2593 break;
2594 }
2595 else if (XEXP (label1, 0) != exit_target
2596 && LABEL_OUTSIDE_LOOP_P (label1))
2597 {
2598 /* A jump outside the current loop. */
2599 loop_info->has_multiple_exit_targets = 1;
2600 break;
2601 }
2602 }
2603
2604 label1 = label2;
2605 label2 = NULL_RTX;
2606 }
2607 while (label1);
2608 }
2609 else
2610 {
2611 /* A return, or something tricky. */
2612 loop_info->has_multiple_exit_targets = 1;
2613 }
2614 }
2615 /* Fall through. */
2616
2617 case INSN:
2618 if (volatile_refs_p (PATTERN (insn)))
2619 loop_info->has_volatile = 1;
2620
2621 if (JUMP_P (insn)
2622 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
2623 || GET_CODE (PATTERN (insn)) == ADDR_VEC))
2624 loop_info->has_tablejump = 1;
2625
2626 note_stores (PATTERN (insn), note_addr_stored, loop_info);
2627 if (! loop_info->first_loop_store_insn && loop_info->store_mems)
2628 loop_info->first_loop_store_insn = insn;
2629
2630 if (flag_non_call_exceptions && can_throw_internal (insn))
2631 loop_info->has_multiple_exit_targets = 1;
2632 break;
2633
2634 default:
2635 break;
2636 }
2637 }
2638
2639 /* Now, rescan the loop, setting up the LOOP_MEMS array. */
2640 if (/* An exception thrown by a called function might land us
2641 anywhere. */
2642 ! loop_info->has_nonconst_call
2643 /* We don't want loads for MEMs moved to a location before the
2644 one at which their stack memory becomes allocated. (Note
2645 that this is not a problem for malloc, etc., since those
2646 require actual function calls. */
2647 && ! current_function_calls_alloca
2648 /* There are ways to leave the loop other than falling off the
2649 end. */
2650 && ! loop_info->has_multiple_exit_targets)
2651 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2652 insn = NEXT_INSN (insn))
2653 for_each_rtx (&insn, insert_loop_mem, loop_info);
2654
2655 /* BLKmode MEMs are added to LOOP_STORE_MEM as necessary so
2656 that loop_invariant_p and load_mems can use true_dependence
2657 to determine what is really clobbered. */
2658 if (loop_info->unknown_address_altered)
2659 {
2660 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2661
2662 loop_info->store_mems
2663 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
2664 }
2665 if (loop_info->unknown_constant_address_altered)
2666 {
2667 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2668 MEM_READONLY_P (mem) = 1;
2669 loop_info->store_mems
2670 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
2671 }
2672 }
2673 \f
2674 /* Invalidate all loops containing LABEL. */
2675
2676 static void
2677 invalidate_loops_containing_label (rtx label)
2678 {
2679 struct loop *loop;
2680 for (loop = uid_loop[INSN_UID (label)]; loop; loop = loop->outer)
2681 loop->invalid = 1;
2682 }
2683
2684 /* Scan the function looking for loops. Record the start and end of each loop.
2685 Also mark as invalid loops any loops that contain a setjmp or are branched
2686 to from outside the loop. */
2687
2688 static void
2689 find_and_verify_loops (rtx f, struct loops *loops)
2690 {
2691 rtx insn;
2692 rtx label;
2693 int num_loops;
2694 struct loop *current_loop;
2695 struct loop *next_loop;
2696 struct loop *loop;
2697
2698 num_loops = loops->num;
2699
2700 compute_luids (f, NULL_RTX, 0);
2701
2702 /* If there are jumps to undefined labels,
2703 treat them as jumps out of any/all loops.
2704 This also avoids writing past end of tables when there are no loops. */
2705 uid_loop[0] = NULL;
2706
2707 /* Find boundaries of loops, mark which loops are contained within
2708 loops, and invalidate loops that have setjmp. */
2709
2710 num_loops = 0;
2711 current_loop = NULL;
2712 for (insn = f; insn; insn = NEXT_INSN (insn))
2713 {
2714 if (NOTE_P (insn))
2715 switch (NOTE_LINE_NUMBER (insn))
2716 {
2717 case NOTE_INSN_LOOP_BEG:
2718 next_loop = loops->array + num_loops;
2719 next_loop->num = num_loops;
2720 num_loops++;
2721 next_loop->start = insn;
2722 next_loop->outer = current_loop;
2723 current_loop = next_loop;
2724 break;
2725
2726 case NOTE_INSN_LOOP_END:
2727 gcc_assert (current_loop);
2728
2729 current_loop->end = insn;
2730 current_loop = current_loop->outer;
2731 break;
2732
2733 default:
2734 break;
2735 }
2736
2737 if (CALL_P (insn)
2738 && find_reg_note (insn, REG_SETJMP, NULL))
2739 {
2740 /* In this case, we must invalidate our current loop and any
2741 enclosing loop. */
2742 for (loop = current_loop; loop; loop = loop->outer)
2743 {
2744 loop->invalid = 1;
2745 if (loop_dump_stream)
2746 fprintf (loop_dump_stream,
2747 "\nLoop at %d ignored due to setjmp.\n",
2748 INSN_UID (loop->start));
2749 }
2750 }
2751
2752 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2753 enclosing loop, but this doesn't matter. */
2754 uid_loop[INSN_UID (insn)] = current_loop;
2755 }
2756
2757 /* Any loop containing a label used in an initializer must be invalidated,
2758 because it can be jumped into from anywhere. */
2759 for (label = forced_labels; label; label = XEXP (label, 1))
2760 invalidate_loops_containing_label (XEXP (label, 0));
2761
2762 /* Any loop containing a label used for an exception handler must be
2763 invalidated, because it can be jumped into from anywhere. */
2764 for_each_eh_label (invalidate_loops_containing_label);
2765
2766 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2767 loop that it is not contained within, that loop is marked invalid.
2768 If any INSN or CALL_INSN uses a label's address, then the loop containing
2769 that label is marked invalid, because it could be jumped into from
2770 anywhere.
2771
2772 Also look for blocks of code ending in an unconditional branch that
2773 exits the loop. If such a block is surrounded by a conditional
2774 branch around the block, move the block elsewhere (see below) and
2775 invert the jump to point to the code block. This may eliminate a
2776 label in our loop and will simplify processing by both us and a
2777 possible second cse pass. */
2778
2779 for (insn = f; insn; insn = NEXT_INSN (insn))
2780 if (INSN_P (insn))
2781 {
2782 struct loop *this_loop = uid_loop[INSN_UID (insn)];
2783
2784 if (NONJUMP_INSN_P (insn) || CALL_P (insn))
2785 {
2786 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
2787 if (note)
2788 invalidate_loops_containing_label (XEXP (note, 0));
2789 }
2790
2791 if (!JUMP_P (insn))
2792 continue;
2793
2794 mark_loop_jump (PATTERN (insn), this_loop);
2795
2796 /* See if this is an unconditional branch outside the loop. */
2797 if (this_loop
2798 && (GET_CODE (PATTERN (insn)) == RETURN
2799 || (any_uncondjump_p (insn)
2800 && onlyjump_p (insn)
2801 && (uid_loop[INSN_UID (JUMP_LABEL (insn))]
2802 != this_loop)))
2803 && get_max_uid () < max_uid_for_loop)
2804 {
2805 rtx p;
2806 rtx our_next = next_real_insn (insn);
2807 rtx last_insn_to_move = NEXT_INSN (insn);
2808 struct loop *dest_loop;
2809 struct loop *outer_loop = NULL;
2810
2811 /* Go backwards until we reach the start of the loop, a label,
2812 or a JUMP_INSN. */
2813 for (p = PREV_INSN (insn);
2814 !LABEL_P (p)
2815 && ! (NOTE_P (p)
2816 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
2817 && !JUMP_P (p);
2818 p = PREV_INSN (p))
2819 ;
2820
2821 /* Check for the case where we have a jump to an inner nested
2822 loop, and do not perform the optimization in that case. */
2823
2824 if (JUMP_LABEL (insn))
2825 {
2826 dest_loop = uid_loop[INSN_UID (JUMP_LABEL (insn))];
2827 if (dest_loop)
2828 {
2829 for (outer_loop = dest_loop; outer_loop;
2830 outer_loop = outer_loop->outer)
2831 if (outer_loop == this_loop)
2832 break;
2833 }
2834 }
2835
2836 /* Make sure that the target of P is within the current loop. */
2837
2838 if (JUMP_P (p) && JUMP_LABEL (p)
2839 && uid_loop[INSN_UID (JUMP_LABEL (p))] != this_loop)
2840 outer_loop = this_loop;
2841
2842 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2843 we have a block of code to try to move.
2844
2845 We look backward and then forward from the target of INSN
2846 to find a BARRIER at the same loop depth as the target.
2847 If we find such a BARRIER, we make a new label for the start
2848 of the block, invert the jump in P and point it to that label,
2849 and move the block of code to the spot we found. */
2850
2851 if (! outer_loop
2852 && JUMP_P (p)
2853 && JUMP_LABEL (p) != 0
2854 /* Just ignore jumps to labels that were never emitted.
2855 These always indicate compilation errors. */
2856 && INSN_UID (JUMP_LABEL (p)) != 0
2857 && any_condjump_p (p) && onlyjump_p (p)
2858 && next_real_insn (JUMP_LABEL (p)) == our_next
2859 /* If it's not safe to move the sequence, then we
2860 mustn't try. */
2861 && insns_safe_to_move_p (p, NEXT_INSN (insn),
2862 &last_insn_to_move))
2863 {
2864 rtx target
2865 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
2866 struct loop *target_loop = uid_loop[INSN_UID (target)];
2867 rtx loc, loc2;
2868 rtx tmp;
2869
2870 /* Search for possible garbage past the conditional jumps
2871 and look for the last barrier. */
2872 for (tmp = last_insn_to_move;
2873 tmp && !LABEL_P (tmp); tmp = NEXT_INSN (tmp))
2874 if (BARRIER_P (tmp))
2875 last_insn_to_move = tmp;
2876
2877 for (loc = target; loc; loc = PREV_INSN (loc))
2878 if (BARRIER_P (loc)
2879 /* Don't move things inside a tablejump. */
2880 && ((loc2 = next_nonnote_insn (loc)) == 0
2881 || !LABEL_P (loc2)
2882 || (loc2 = next_nonnote_insn (loc2)) == 0
2883 || !JUMP_P (loc2)
2884 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2885 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2886 && uid_loop[INSN_UID (loc)] == target_loop)
2887 break;
2888
2889 if (loc == 0)
2890 for (loc = target; loc; loc = NEXT_INSN (loc))
2891 if (BARRIER_P (loc)
2892 /* Don't move things inside a tablejump. */
2893 && ((loc2 = next_nonnote_insn (loc)) == 0
2894 || !LABEL_P (loc2)
2895 || (loc2 = next_nonnote_insn (loc2)) == 0
2896 || !JUMP_P (loc2)
2897 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2898 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2899 && uid_loop[INSN_UID (loc)] == target_loop)
2900 break;
2901
2902 if (loc)
2903 {
2904 rtx cond_label = JUMP_LABEL (p);
2905 rtx new_label = get_label_after (p);
2906
2907 /* Ensure our label doesn't go away. */
2908 LABEL_NUSES (cond_label)++;
2909
2910 /* Verify that uid_loop is large enough and that
2911 we can invert P. */
2912 if (invert_jump (p, new_label, 1))
2913 {
2914 rtx q, r;
2915 bool fail;
2916
2917 /* If no suitable BARRIER was found, create a suitable
2918 one before TARGET. Since TARGET is a fall through
2919 path, we'll need to insert a jump around our block
2920 and add a BARRIER before TARGET.
2921
2922 This creates an extra unconditional jump outside
2923 the loop. However, the benefits of removing rarely
2924 executed instructions from inside the loop usually
2925 outweighs the cost of the extra unconditional jump
2926 outside the loop. */
2927 if (loc == 0)
2928 {
2929 rtx temp;
2930
2931 temp = gen_jump (JUMP_LABEL (insn));
2932 temp = emit_jump_insn_before (temp, target);
2933 JUMP_LABEL (temp) = JUMP_LABEL (insn);
2934 LABEL_NUSES (JUMP_LABEL (insn))++;
2935 loc = emit_barrier_before (target);
2936 }
2937
2938 /* Include the BARRIER after INSN and copy the
2939 block after LOC. */
2940 fail = squeeze_notes (&new_label, &last_insn_to_move);
2941 gcc_assert (!fail);
2942 reorder_insns (new_label, last_insn_to_move, loc);
2943
2944 /* All those insns are now in TARGET_LOOP. */
2945 for (q = new_label;
2946 q != NEXT_INSN (last_insn_to_move);
2947 q = NEXT_INSN (q))
2948 uid_loop[INSN_UID (q)] = target_loop;
2949
2950 /* The label jumped to by INSN is no longer a loop
2951 exit. Unless INSN does not have a label (e.g.,
2952 it is a RETURN insn), search loop->exit_labels
2953 to find its label_ref, and remove it. Also turn
2954 off LABEL_OUTSIDE_LOOP_P bit. */
2955 if (JUMP_LABEL (insn))
2956 {
2957 for (q = 0, r = this_loop->exit_labels;
2958 r;
2959 q = r, r = LABEL_NEXTREF (r))
2960 if (XEXP (r, 0) == JUMP_LABEL (insn))
2961 {
2962 LABEL_OUTSIDE_LOOP_P (r) = 0;
2963 if (q)
2964 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
2965 else
2966 this_loop->exit_labels = LABEL_NEXTREF (r);
2967 break;
2968 }
2969
2970 for (loop = this_loop; loop && loop != target_loop;
2971 loop = loop->outer)
2972 loop->exit_count--;
2973
2974 /* If we didn't find it, then something is
2975 wrong. */
2976 gcc_assert (r);
2977 }
2978
2979 /* P is now a jump outside the loop, so it must be put
2980 in loop->exit_labels, and marked as such.
2981 The easiest way to do this is to just call
2982 mark_loop_jump again for P. */
2983 mark_loop_jump (PATTERN (p), this_loop);
2984
2985 /* If INSN now jumps to the insn after it,
2986 delete INSN. */
2987 if (JUMP_LABEL (insn) != 0
2988 && (next_real_insn (JUMP_LABEL (insn))
2989 == next_real_insn (insn)))
2990 delete_related_insns (insn);
2991 }
2992
2993 /* Continue the loop after where the conditional
2994 branch used to jump, since the only branch insn
2995 in the block (if it still remains) is an inter-loop
2996 branch and hence needs no processing. */
2997 insn = NEXT_INSN (cond_label);
2998
2999 if (--LABEL_NUSES (cond_label) == 0)
3000 delete_related_insns (cond_label);
3001
3002 /* This loop will be continued with NEXT_INSN (insn). */
3003 insn = PREV_INSN (insn);
3004 }
3005 }
3006 }
3007 }
3008 }
3009
3010 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
3011 loops it is contained in, mark the target loop invalid.
3012
3013 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
3014
3015 static void
3016 mark_loop_jump (rtx x, struct loop *loop)
3017 {
3018 struct loop *dest_loop;
3019 struct loop *outer_loop;
3020 int i;
3021
3022 switch (GET_CODE (x))
3023 {
3024 case PC:
3025 case USE:
3026 case CLOBBER:
3027 case REG:
3028 case MEM:
3029 case CONST_INT:
3030 case CONST_DOUBLE:
3031 case RETURN:
3032 return;
3033
3034 case CONST:
3035 /* There could be a label reference in here. */
3036 mark_loop_jump (XEXP (x, 0), loop);
3037 return;
3038
3039 case PLUS:
3040 case MINUS:
3041 case MULT:
3042 mark_loop_jump (XEXP (x, 0), loop);
3043 mark_loop_jump (XEXP (x, 1), loop);
3044 return;
3045
3046 case LO_SUM:
3047 /* This may refer to a LABEL_REF or SYMBOL_REF. */
3048 mark_loop_jump (XEXP (x, 1), loop);
3049 return;
3050
3051 case SIGN_EXTEND:
3052 case ZERO_EXTEND:
3053 mark_loop_jump (XEXP (x, 0), loop);
3054 return;
3055
3056 case LABEL_REF:
3057 dest_loop = uid_loop[INSN_UID (XEXP (x, 0))];
3058
3059 /* Link together all labels that branch outside the loop. This
3060 is used by final_[bg]iv_value and the loop unrolling code. Also
3061 mark this LABEL_REF so we know that this branch should predict
3062 false. */
3063
3064 /* A check to make sure the label is not in an inner nested loop,
3065 since this does not count as a loop exit. */
3066 if (dest_loop)
3067 {
3068 for (outer_loop = dest_loop; outer_loop;
3069 outer_loop = outer_loop->outer)
3070 if (outer_loop == loop)
3071 break;
3072 }
3073 else
3074 outer_loop = NULL;
3075
3076 if (loop && ! outer_loop)
3077 {
3078 LABEL_OUTSIDE_LOOP_P (x) = 1;
3079 LABEL_NEXTREF (x) = loop->exit_labels;
3080 loop->exit_labels = x;
3081
3082 for (outer_loop = loop;
3083 outer_loop && outer_loop != dest_loop;
3084 outer_loop = outer_loop->outer)
3085 outer_loop->exit_count++;
3086 }
3087
3088 /* If this is inside a loop, but not in the current loop or one enclosed
3089 by it, it invalidates at least one loop. */
3090
3091 if (! dest_loop)
3092 return;
3093
3094 /* We must invalidate every nested loop containing the target of this
3095 label, except those that also contain the jump insn. */
3096
3097 for (; dest_loop; dest_loop = dest_loop->outer)
3098 {
3099 /* Stop when we reach a loop that also contains the jump insn. */
3100 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3101 if (dest_loop == outer_loop)
3102 return;
3103
3104 /* If we get here, we know we need to invalidate a loop. */
3105 if (loop_dump_stream && ! dest_loop->invalid)
3106 fprintf (loop_dump_stream,
3107 "\nLoop at %d ignored due to multiple entry points.\n",
3108 INSN_UID (dest_loop->start));
3109
3110 dest_loop->invalid = 1;
3111 }
3112 return;
3113
3114 case SET:
3115 /* If this is not setting pc, ignore. */
3116 if (SET_DEST (x) == pc_rtx)
3117 mark_loop_jump (SET_SRC (x), loop);
3118 return;
3119
3120 case IF_THEN_ELSE:
3121 mark_loop_jump (XEXP (x, 1), loop);
3122 mark_loop_jump (XEXP (x, 2), loop);
3123 return;
3124
3125 case PARALLEL:
3126 case ADDR_VEC:
3127 for (i = 0; i < XVECLEN (x, 0); i++)
3128 mark_loop_jump (XVECEXP (x, 0, i), loop);
3129 return;
3130
3131 case ADDR_DIFF_VEC:
3132 for (i = 0; i < XVECLEN (x, 1); i++)
3133 mark_loop_jump (XVECEXP (x, 1, i), loop);
3134 return;
3135
3136 default:
3137 /* Strictly speaking this is not a jump into the loop, only a possible
3138 jump out of the loop. However, we have no way to link the destination
3139 of this jump onto the list of exit labels. To be safe we mark this
3140 loop and any containing loops as invalid. */
3141 if (loop)
3142 {
3143 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3144 {
3145 if (loop_dump_stream && ! outer_loop->invalid)
3146 fprintf (loop_dump_stream,
3147 "\nLoop at %d ignored due to unknown exit jump.\n",
3148 INSN_UID (outer_loop->start));
3149 outer_loop->invalid = 1;
3150 }
3151 }
3152 return;
3153 }
3154 }
3155 \f
3156 /* Return nonzero if there is a label in the range from
3157 insn INSN to and including the insn whose luid is END
3158 INSN must have an assigned luid (i.e., it must not have
3159 been previously created by loop.c). */
3160
3161 static int
3162 labels_in_range_p (rtx insn, int end)
3163 {
3164 while (insn && INSN_LUID (insn) <= end)
3165 {
3166 if (LABEL_P (insn))
3167 return 1;
3168 insn = NEXT_INSN (insn);
3169 }
3170
3171 return 0;
3172 }
3173
3174 /* Record that a memory reference X is being set. */
3175
3176 static void
3177 note_addr_stored (rtx x, rtx y ATTRIBUTE_UNUSED,
3178 void *data ATTRIBUTE_UNUSED)
3179 {
3180 struct loop_info *loop_info = data;
3181
3182 if (x == 0 || !MEM_P (x))
3183 return;
3184
3185 /* Count number of memory writes.
3186 This affects heuristics in strength_reduce. */
3187 loop_info->num_mem_sets++;
3188
3189 /* BLKmode MEM means all memory is clobbered. */
3190 if (GET_MODE (x) == BLKmode)
3191 {
3192 if (MEM_READONLY_P (x))
3193 loop_info->unknown_constant_address_altered = 1;
3194 else
3195 loop_info->unknown_address_altered = 1;
3196
3197 return;
3198 }
3199
3200 loop_info->store_mems = gen_rtx_EXPR_LIST (VOIDmode, x,
3201 loop_info->store_mems);
3202 }
3203
3204 /* X is a value modified by an INSN that references a biv inside a loop
3205 exit test (ie, X is somehow related to the value of the biv). If X
3206 is a pseudo that is used more than once, then the biv is (effectively)
3207 used more than once. DATA is a pointer to a loop_regs structure. */
3208
3209 static void
3210 note_set_pseudo_multiple_uses (rtx x, rtx y ATTRIBUTE_UNUSED, void *data)
3211 {
3212 struct loop_regs *regs = (struct loop_regs *) data;
3213
3214 if (x == 0)
3215 return;
3216
3217 while (GET_CODE (x) == STRICT_LOW_PART
3218 || GET_CODE (x) == SIGN_EXTRACT
3219 || GET_CODE (x) == ZERO_EXTRACT
3220 || GET_CODE (x) == SUBREG)
3221 x = XEXP (x, 0);
3222
3223 if (!REG_P (x) || REGNO (x) < FIRST_PSEUDO_REGISTER)
3224 return;
3225
3226 /* If we do not have usage information, or if we know the register
3227 is used more than once, note that fact for check_dbra_loop. */
3228 if (REGNO (x) >= max_reg_before_loop
3229 || ! regs->array[REGNO (x)].single_usage
3230 || regs->array[REGNO (x)].single_usage == const0_rtx)
3231 regs->multiple_uses = 1;
3232 }
3233 \f
3234 /* Return nonzero if the rtx X is invariant over the current loop.
3235
3236 The value is 2 if we refer to something only conditionally invariant.
3237
3238 A memory ref is invariant if it is not volatile and does not conflict
3239 with anything stored in `loop_info->store_mems'. */
3240
3241 int
3242 loop_invariant_p (const struct loop *loop, rtx x)
3243 {
3244 struct loop_info *loop_info = LOOP_INFO (loop);
3245 struct loop_regs *regs = LOOP_REGS (loop);
3246 int i;
3247 enum rtx_code code;
3248 const char *fmt;
3249 int conditional = 0;
3250 rtx mem_list_entry;
3251
3252 if (x == 0)
3253 return 1;
3254 code = GET_CODE (x);
3255 switch (code)
3256 {
3257 case CONST_INT:
3258 case CONST_DOUBLE:
3259 case SYMBOL_REF:
3260 case CONST:
3261 return 1;
3262
3263 case LABEL_REF:
3264 /* A LABEL_REF is normally invariant, however, if we are unrolling
3265 loops, and this label is inside the loop, then it isn't invariant.
3266 This is because each unrolled copy of the loop body will have
3267 a copy of this label. If this was invariant, then an insn loading
3268 the address of this label into a register might get moved outside
3269 the loop, and then each loop body would end up using the same label.
3270
3271 We don't know the loop bounds here though, so just fail for all
3272 labels. */
3273 if (flag_old_unroll_loops)
3274 return 0;
3275 else
3276 return 1;
3277
3278 case PC:
3279 case CC0:
3280 case UNSPEC_VOLATILE:
3281 return 0;
3282
3283 case REG:
3284 if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
3285 || x == arg_pointer_rtx || x == pic_offset_table_rtx)
3286 && ! current_function_has_nonlocal_goto)
3287 return 1;
3288
3289 if (LOOP_INFO (loop)->has_call
3290 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
3291 return 0;
3292
3293 /* Out-of-range regs can occur when we are called from unrolling.
3294 These registers created by the unroller are set in the loop,
3295 hence are never invariant.
3296 Other out-of-range regs can be generated by load_mems; those that
3297 are written to in the loop are not invariant, while those that are
3298 not written to are invariant. It would be easy for load_mems
3299 to set n_times_set correctly for these registers, however, there
3300 is no easy way to distinguish them from registers created by the
3301 unroller. */
3302
3303 if (REGNO (x) >= (unsigned) regs->num)
3304 return 0;
3305
3306 if (regs->array[REGNO (x)].set_in_loop < 0)
3307 return 2;
3308
3309 return regs->array[REGNO (x)].set_in_loop == 0;
3310
3311 case MEM:
3312 /* Volatile memory references must be rejected. Do this before
3313 checking for read-only items, so that volatile read-only items
3314 will be rejected also. */
3315 if (MEM_VOLATILE_P (x))
3316 return 0;
3317
3318 /* See if there is any dependence between a store and this load. */
3319 mem_list_entry = loop_info->store_mems;
3320 while (mem_list_entry)
3321 {
3322 if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
3323 x, rtx_varies_p))
3324 return 0;
3325
3326 mem_list_entry = XEXP (mem_list_entry, 1);
3327 }
3328
3329 /* It's not invalidated by a store in memory
3330 but we must still verify the address is invariant. */
3331 break;
3332
3333 case ASM_OPERANDS:
3334 /* Don't mess with insns declared volatile. */
3335 if (MEM_VOLATILE_P (x))
3336 return 0;
3337 break;
3338
3339 default:
3340 break;
3341 }
3342
3343 fmt = GET_RTX_FORMAT (code);
3344 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3345 {
3346 if (fmt[i] == 'e')
3347 {
3348 int tem = loop_invariant_p (loop, XEXP (x, i));
3349 if (tem == 0)
3350 return 0;
3351 if (tem == 2)
3352 conditional = 1;
3353 }
3354 else if (fmt[i] == 'E')
3355 {
3356 int j;
3357 for (j = 0; j < XVECLEN (x, i); j++)
3358 {
3359 int tem = loop_invariant_p (loop, XVECEXP (x, i, j));
3360 if (tem == 0)
3361 return 0;
3362 if (tem == 2)
3363 conditional = 1;
3364 }
3365
3366 }
3367 }
3368
3369 return 1 + conditional;
3370 }
3371 \f
3372 /* Return nonzero if all the insns in the loop that set REG
3373 are INSN and the immediately following insns,
3374 and if each of those insns sets REG in an invariant way
3375 (not counting uses of REG in them).
3376
3377 The value is 2 if some of these insns are only conditionally invariant.
3378
3379 We assume that INSN itself is the first set of REG
3380 and that its source is invariant. */
3381
3382 static int
3383 consec_sets_invariant_p (const struct loop *loop, rtx reg, int n_sets,
3384 rtx insn)
3385 {
3386 struct loop_regs *regs = LOOP_REGS (loop);
3387 rtx p = insn;
3388 unsigned int regno = REGNO (reg);
3389 rtx temp;
3390 /* Number of sets we have to insist on finding after INSN. */
3391 int count = n_sets - 1;
3392 int old = regs->array[regno].set_in_loop;
3393 int value = 0;
3394 int this;
3395
3396 /* If N_SETS hit the limit, we can't rely on its value. */
3397 if (n_sets == 127)
3398 return 0;
3399
3400 regs->array[regno].set_in_loop = 0;
3401
3402 while (count > 0)
3403 {
3404 enum rtx_code code;
3405 rtx set;
3406
3407 p = NEXT_INSN (p);
3408 code = GET_CODE (p);
3409
3410 /* If library call, skip to end of it. */
3411 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3412 p = XEXP (temp, 0);
3413
3414 this = 0;
3415 if (code == INSN
3416 && (set = single_set (p))
3417 && REG_P (SET_DEST (set))
3418 && REGNO (SET_DEST (set)) == regno)
3419 {
3420 this = loop_invariant_p (loop, SET_SRC (set));
3421 if (this != 0)
3422 value |= this;
3423 else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)))
3424 {
3425 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3426 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3427 notes are OK. */
3428 this = (CONSTANT_P (XEXP (temp, 0))
3429 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
3430 && loop_invariant_p (loop, XEXP (temp, 0))));
3431 if (this != 0)
3432 value |= this;
3433 }
3434 }
3435 if (this != 0)
3436 count--;
3437 else if (code != NOTE)
3438 {
3439 regs->array[regno].set_in_loop = old;
3440 return 0;
3441 }
3442 }
3443
3444 regs->array[regno].set_in_loop = old;
3445 /* If loop_invariant_p ever returned 2, we return 2. */
3446 return 1 + (value & 2);
3447 }
3448 \f
3449 /* Look at all uses (not sets) of registers in X. For each, if it is
3450 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3451 a different insn, set USAGE[REGNO] to const0_rtx. */
3452
3453 static void
3454 find_single_use_in_loop (struct loop_regs *regs, rtx insn, rtx x)
3455 {
3456 enum rtx_code code = GET_CODE (x);
3457 const char *fmt = GET_RTX_FORMAT (code);
3458 int i, j;
3459
3460 if (code == REG)
3461 regs->array[REGNO (x)].single_usage
3462 = (regs->array[REGNO (x)].single_usage != 0
3463 && regs->array[REGNO (x)].single_usage != insn)
3464 ? const0_rtx : insn;
3465
3466 else if (code == SET)
3467 {
3468 /* Don't count SET_DEST if it is a REG; otherwise count things
3469 in SET_DEST because if a register is partially modified, it won't
3470 show up as a potential movable so we don't care how USAGE is set
3471 for it. */
3472 if (!REG_P (SET_DEST (x)))
3473 find_single_use_in_loop (regs, insn, SET_DEST (x));
3474 find_single_use_in_loop (regs, insn, SET_SRC (x));
3475 }
3476 else
3477 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3478 {
3479 if (fmt[i] == 'e' && XEXP (x, i) != 0)
3480 find_single_use_in_loop (regs, insn, XEXP (x, i));
3481 else if (fmt[i] == 'E')
3482 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3483 find_single_use_in_loop (regs, insn, XVECEXP (x, i, j));
3484 }
3485 }
3486 \f
3487 /* Count and record any set in X which is contained in INSN. Update
3488 REGS->array[I].MAY_NOT_OPTIMIZE and LAST_SET for any register I set
3489 in X. */
3490
3491 static void
3492 count_one_set (struct loop_regs *regs, rtx insn, rtx x, rtx *last_set)
3493 {
3494 if (GET_CODE (x) == CLOBBER && REG_P (XEXP (x, 0)))
3495 /* Don't move a reg that has an explicit clobber.
3496 It's not worth the pain to try to do it correctly. */
3497 regs->array[REGNO (XEXP (x, 0))].may_not_optimize = 1;
3498
3499 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
3500 {
3501 rtx dest = SET_DEST (x);
3502 while (GET_CODE (dest) == SUBREG
3503 || GET_CODE (dest) == ZERO_EXTRACT
3504 || GET_CODE (dest) == SIGN_EXTRACT
3505 || GET_CODE (dest) == STRICT_LOW_PART)
3506 dest = XEXP (dest, 0);
3507 if (REG_P (dest))
3508 {
3509 int i;
3510 int regno = REGNO (dest);
3511 for (i = 0; i < LOOP_REGNO_NREGS (regno, dest); i++)
3512 {
3513 /* If this is the first setting of this reg
3514 in current basic block, and it was set before,
3515 it must be set in two basic blocks, so it cannot
3516 be moved out of the loop. */
3517 if (regs->array[regno].set_in_loop > 0
3518 && last_set[regno] == 0)
3519 regs->array[regno+i].may_not_optimize = 1;
3520 /* If this is not first setting in current basic block,
3521 see if reg was used in between previous one and this.
3522 If so, neither one can be moved. */
3523 if (last_set[regno] != 0
3524 && reg_used_between_p (dest, last_set[regno], insn))
3525 regs->array[regno+i].may_not_optimize = 1;
3526 if (regs->array[regno+i].set_in_loop < 127)
3527 ++regs->array[regno+i].set_in_loop;
3528 last_set[regno+i] = insn;
3529 }
3530 }
3531 }
3532 }
3533 \f
3534 /* Given a loop that is bounded by LOOP->START and LOOP->END and that
3535 is entered at LOOP->SCAN_START, return 1 if the register set in SET
3536 contained in insn INSN is used by any insn that precedes INSN in
3537 cyclic order starting from the loop entry point.
3538
3539 We don't want to use INSN_LUID here because if we restrict INSN to those
3540 that have a valid INSN_LUID, it means we cannot move an invariant out
3541 from an inner loop past two loops. */
3542
3543 static int
3544 loop_reg_used_before_p (const struct loop *loop, rtx set, rtx insn)
3545 {
3546 rtx reg = SET_DEST (set);
3547 rtx p;
3548
3549 /* Scan forward checking for register usage. If we hit INSN, we
3550 are done. Otherwise, if we hit LOOP->END, wrap around to LOOP->START. */
3551 for (p = loop->scan_start; p != insn; p = NEXT_INSN (p))
3552 {
3553 if (INSN_P (p) && reg_overlap_mentioned_p (reg, PATTERN (p)))
3554 return 1;
3555
3556 if (p == loop->end)
3557 p = loop->start;
3558 }
3559
3560 return 0;
3561 }
3562 \f
3563
3564 /* Information we collect about arrays that we might want to prefetch. */
3565 struct prefetch_info
3566 {
3567 struct iv_class *class; /* Class this prefetch is based on. */
3568 struct induction *giv; /* GIV this prefetch is based on. */
3569 rtx base_address; /* Start prefetching from this address plus
3570 index. */
3571 HOST_WIDE_INT index;
3572 HOST_WIDE_INT stride; /* Prefetch stride in bytes in each
3573 iteration. */
3574 unsigned int bytes_accessed; /* Sum of sizes of all accesses to this
3575 prefetch area in one iteration. */
3576 unsigned int total_bytes; /* Total bytes loop will access in this block.
3577 This is set only for loops with known
3578 iteration counts and is 0xffffffff
3579 otherwise. */
3580 int prefetch_in_loop; /* Number of prefetch insns in loop. */
3581 int prefetch_before_loop; /* Number of prefetch insns before loop. */
3582 unsigned int write : 1; /* 1 for read/write prefetches. */
3583 };
3584
3585 /* Data used by check_store function. */
3586 struct check_store_data
3587 {
3588 rtx mem_address;
3589 int mem_write;
3590 };
3591
3592 static void check_store (rtx, rtx, void *);
3593 static void emit_prefetch_instructions (struct loop *);
3594 static int rtx_equal_for_prefetch_p (rtx, rtx);
3595
3596 /* Set mem_write when mem_address is found. Used as callback to
3597 note_stores. */
3598 static void
3599 check_store (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
3600 {
3601 struct check_store_data *d = (struct check_store_data *) data;
3602
3603 if ((MEM_P (x)) && rtx_equal_p (d->mem_address, XEXP (x, 0)))
3604 d->mem_write = 1;
3605 }
3606 \f
3607 /* Like rtx_equal_p, but attempts to swap commutative operands. This is
3608 important to get some addresses combined. Later more sophisticated
3609 transformations can be added when necessary.
3610
3611 ??? Same trick with swapping operand is done at several other places.
3612 It can be nice to develop some common way to handle this. */
3613
3614 static int
3615 rtx_equal_for_prefetch_p (rtx x, rtx y)
3616 {
3617 int i;
3618 int j;
3619 enum rtx_code code = GET_CODE (x);
3620 const char *fmt;
3621
3622 if (x == y)
3623 return 1;
3624 if (code != GET_CODE (y))
3625 return 0;
3626
3627 if (COMMUTATIVE_ARITH_P (x))
3628 {
3629 return ((rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 0))
3630 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 1)))
3631 || (rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 1))
3632 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 0))));
3633 }
3634
3635 /* Compare the elements. If any pair of corresponding elements fails to
3636 match, return 0 for the whole thing. */
3637
3638 fmt = GET_RTX_FORMAT (code);
3639 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3640 {
3641 switch (fmt[i])
3642 {
3643 case 'w':
3644 if (XWINT (x, i) != XWINT (y, i))
3645 return 0;
3646 break;
3647
3648 case 'i':
3649 if (XINT (x, i) != XINT (y, i))
3650 return 0;
3651 break;
3652
3653 case 'E':
3654 /* Two vectors must have the same length. */
3655 if (XVECLEN (x, i) != XVECLEN (y, i))
3656 return 0;
3657
3658 /* And the corresponding elements must match. */
3659 for (j = 0; j < XVECLEN (x, i); j++)
3660 if (rtx_equal_for_prefetch_p (XVECEXP (x, i, j),
3661 XVECEXP (y, i, j)) == 0)
3662 return 0;
3663 break;
3664
3665 case 'e':
3666 if (rtx_equal_for_prefetch_p (XEXP (x, i), XEXP (y, i)) == 0)
3667 return 0;
3668 break;
3669
3670 case 's':
3671 if (strcmp (XSTR (x, i), XSTR (y, i)))
3672 return 0;
3673 break;
3674
3675 case 'u':
3676 /* These are just backpointers, so they don't matter. */
3677 break;
3678
3679 case '0':
3680 break;
3681
3682 /* It is believed that rtx's at this level will never
3683 contain anything but integers and other rtx's,
3684 except for within LABEL_REFs and SYMBOL_REFs. */
3685 default:
3686 gcc_unreachable ();
3687 }
3688 }
3689 return 1;
3690 }
3691 \f
3692 /* Remove constant addition value from the expression X (when present)
3693 and return it. */
3694
3695 static HOST_WIDE_INT
3696 remove_constant_addition (rtx *x)
3697 {
3698 HOST_WIDE_INT addval = 0;
3699 rtx exp = *x;
3700
3701 /* Avoid clobbering a shared CONST expression. */
3702 if (GET_CODE (exp) == CONST)
3703 {
3704 if (GET_CODE (XEXP (exp, 0)) == PLUS
3705 && GET_CODE (XEXP (XEXP (exp, 0), 0)) == SYMBOL_REF
3706 && GET_CODE (XEXP (XEXP (exp, 0), 1)) == CONST_INT)
3707 {
3708 *x = XEXP (XEXP (exp, 0), 0);
3709 return INTVAL (XEXP (XEXP (exp, 0), 1));
3710 }
3711 return 0;
3712 }
3713
3714 if (GET_CODE (exp) == CONST_INT)
3715 {
3716 addval = INTVAL (exp);
3717 *x = const0_rtx;
3718 }
3719
3720 /* For plus expression recurse on ourself. */
3721 else if (GET_CODE (exp) == PLUS)
3722 {
3723 addval += remove_constant_addition (&XEXP (exp, 0));
3724 addval += remove_constant_addition (&XEXP (exp, 1));
3725
3726 /* In case our parameter was constant, remove extra zero from the
3727 expression. */
3728 if (XEXP (exp, 0) == const0_rtx)
3729 *x = XEXP (exp, 1);
3730 else if (XEXP (exp, 1) == const0_rtx)
3731 *x = XEXP (exp, 0);
3732 }
3733
3734 return addval;
3735 }
3736
3737 /* Attempt to identify accesses to arrays that are most likely to cause cache
3738 misses, and emit prefetch instructions a few prefetch blocks forward.
3739
3740 To detect the arrays we use the GIV information that was collected by the
3741 strength reduction pass.
3742
3743 The prefetch instructions are generated after the GIV information is done
3744 and before the strength reduction process. The new GIVs are injected into
3745 the strength reduction tables, so the prefetch addresses are optimized as
3746 well.
3747
3748 GIVs are split into base address, stride, and constant addition values.
3749 GIVs with the same address, stride and close addition values are combined
3750 into a single prefetch. Also writes to GIVs are detected, so that prefetch
3751 for write instructions can be used for the block we write to, on machines
3752 that support write prefetches.
3753
3754 Several heuristics are used to determine when to prefetch. They are
3755 controlled by defined symbols that can be overridden for each target. */
3756
3757 static void
3758 emit_prefetch_instructions (struct loop *loop)
3759 {
3760 int num_prefetches = 0;
3761 int num_real_prefetches = 0;
3762 int num_real_write_prefetches = 0;
3763 int num_prefetches_before = 0;
3764 int num_write_prefetches_before = 0;
3765 int ahead = 0;
3766 int i;
3767 struct iv_class *bl;
3768 struct induction *iv;
3769 struct prefetch_info info[MAX_PREFETCHES];
3770 struct loop_ivs *ivs = LOOP_IVS (loop);
3771
3772 if (!HAVE_prefetch)
3773 return;
3774
3775 /* Consider only loops w/o calls. When a call is done, the loop is probably
3776 slow enough to read the memory. */
3777 if (PREFETCH_NO_CALL && LOOP_INFO (loop)->has_call)
3778 {
3779 if (loop_dump_stream)
3780 fprintf (loop_dump_stream, "Prefetch: ignoring loop: has call.\n");
3781
3782 return;
3783 }
3784
3785 /* Don't prefetch in loops known to have few iterations. */
3786 if (PREFETCH_NO_LOW_LOOPCNT
3787 && LOOP_INFO (loop)->n_iterations
3788 && LOOP_INFO (loop)->n_iterations <= PREFETCH_LOW_LOOPCNT)
3789 {
3790 if (loop_dump_stream)
3791 fprintf (loop_dump_stream,
3792 "Prefetch: ignoring loop: not enough iterations.\n");
3793 return;
3794 }
3795
3796 /* Search all induction variables and pick those interesting for the prefetch
3797 machinery. */
3798 for (bl = ivs->list; bl; bl = bl->next)
3799 {
3800 struct induction *biv = bl->biv, *biv1;
3801 int basestride = 0;
3802
3803 biv1 = biv;
3804
3805 /* Expect all BIVs to be executed in each iteration. This makes our
3806 analysis more conservative. */
3807 while (biv1)
3808 {
3809 /* Discard non-constant additions that we can't handle well yet, and
3810 BIVs that are executed multiple times; such BIVs ought to be
3811 handled in the nested loop. We accept not_every_iteration BIVs,
3812 since these only result in larger strides and make our
3813 heuristics more conservative. */
3814 if (GET_CODE (biv->add_val) != CONST_INT)
3815 {
3816 if (loop_dump_stream)
3817 {
3818 fprintf (loop_dump_stream,
3819 "Prefetch: ignoring biv %d: non-constant addition at insn %d:",
3820 REGNO (biv->src_reg), INSN_UID (biv->insn));
3821 print_rtl (loop_dump_stream, biv->add_val);
3822 fprintf (loop_dump_stream, "\n");
3823 }
3824 break;
3825 }
3826
3827 if (biv->maybe_multiple)
3828 {
3829 if (loop_dump_stream)
3830 {
3831 fprintf (loop_dump_stream,
3832 "Prefetch: ignoring biv %d: maybe_multiple at insn %i:",
3833 REGNO (biv->src_reg), INSN_UID (biv->insn));
3834 print_rtl (loop_dump_stream, biv->add_val);
3835 fprintf (loop_dump_stream, "\n");
3836 }
3837 break;
3838 }
3839
3840 basestride += INTVAL (biv1->add_val);
3841 biv1 = biv1->next_iv;
3842 }
3843
3844 if (biv1 || !basestride)
3845 continue;
3846
3847 for (iv = bl->giv; iv; iv = iv->next_iv)
3848 {
3849 rtx address;
3850 rtx temp;
3851 HOST_WIDE_INT index = 0;
3852 int add = 1;
3853 HOST_WIDE_INT stride = 0;
3854 int stride_sign = 1;
3855 struct check_store_data d;
3856 const char *ignore_reason = NULL;
3857 int size = GET_MODE_SIZE (GET_MODE (iv));
3858
3859 /* See whether an induction variable is interesting to us and if
3860 not, report the reason. */
3861 if (iv->giv_type != DEST_ADDR)
3862 ignore_reason = "giv is not a destination address";
3863
3864 /* We are interested only in constant stride memory references
3865 in order to be able to compute density easily. */
3866 else if (GET_CODE (iv->mult_val) != CONST_INT)
3867 ignore_reason = "stride is not constant";
3868
3869 else
3870 {
3871 stride = INTVAL (iv->mult_val) * basestride;
3872 if (stride < 0)
3873 {
3874 stride = -stride;
3875 stride_sign = -1;
3876 }
3877
3878 /* On some targets, reversed order prefetches are not
3879 worthwhile. */
3880 if (PREFETCH_NO_REVERSE_ORDER && stride_sign < 0)
3881 ignore_reason = "reversed order stride";
3882
3883 /* Prefetch of accesses with an extreme stride might not be
3884 worthwhile, either. */
3885 else if (PREFETCH_NO_EXTREME_STRIDE
3886 && stride > PREFETCH_EXTREME_STRIDE)
3887 ignore_reason = "extreme stride";
3888
3889 /* Ignore GIVs with varying add values; we can't predict the
3890 value for the next iteration. */
3891 else if (!loop_invariant_p (loop, iv->add_val))
3892 ignore_reason = "giv has varying add value";
3893
3894 /* Ignore GIVs in the nested loops; they ought to have been
3895 handled already. */
3896 else if (iv->maybe_multiple)
3897 ignore_reason = "giv is in nested loop";
3898 }
3899
3900 if (ignore_reason != NULL)
3901 {
3902 if (loop_dump_stream)
3903 fprintf (loop_dump_stream,
3904 "Prefetch: ignoring giv at %d: %s.\n",
3905 INSN_UID (iv->insn), ignore_reason);
3906 continue;
3907 }
3908
3909 /* Determine the pointer to the basic array we are examining. It is
3910 the sum of the BIV's initial value and the GIV's add_val. */
3911 address = copy_rtx (iv->add_val);
3912 temp = copy_rtx (bl->initial_value);
3913
3914 address = simplify_gen_binary (PLUS, Pmode, temp, address);
3915 index = remove_constant_addition (&address);
3916
3917 d.mem_write = 0;
3918 d.mem_address = *iv->location;
3919
3920 /* When the GIV is not always executed, we might be better off by
3921 not dirtying the cache pages. */
3922 if (PREFETCH_CONDITIONAL || iv->always_executed)
3923 note_stores (PATTERN (iv->insn), check_store, &d);
3924 else
3925 {
3926 if (loop_dump_stream)
3927 fprintf (loop_dump_stream, "Prefetch: Ignoring giv at %d: %s\n",
3928 INSN_UID (iv->insn), "in conditional code.");
3929 continue;
3930 }
3931
3932 /* Attempt to find another prefetch to the same array and see if we
3933 can merge this one. */
3934 for (i = 0; i < num_prefetches; i++)
3935 if (rtx_equal_for_prefetch_p (address, info[i].base_address)
3936 && stride == info[i].stride)
3937 {
3938 /* In case both access same array (same location
3939 just with small difference in constant indexes), merge
3940 the prefetches. Just do the later and the earlier will
3941 get prefetched from previous iteration.
3942 The artificial threshold should not be too small,
3943 but also not bigger than small portion of memory usually
3944 traversed by single loop. */
3945 if (index >= info[i].index
3946 && index - info[i].index < PREFETCH_EXTREME_DIFFERENCE)
3947 {
3948 info[i].write |= d.mem_write;
3949 info[i].bytes_accessed += size;
3950 info[i].index = index;
3951 info[i].giv = iv;
3952 info[i].class = bl;
3953 info[num_prefetches].base_address = address;
3954 add = 0;
3955 break;
3956 }
3957
3958 if (index < info[i].index
3959 && info[i].index - index < PREFETCH_EXTREME_DIFFERENCE)
3960 {
3961 info[i].write |= d.mem_write;
3962 info[i].bytes_accessed += size;
3963 add = 0;
3964 break;
3965 }
3966 }
3967
3968 /* Merging failed. */
3969 if (add)
3970 {
3971 info[num_prefetches].giv = iv;
3972 info[num_prefetches].class = bl;
3973 info[num_prefetches].index = index;
3974 info[num_prefetches].stride = stride;
3975 info[num_prefetches].base_address = address;
3976 info[num_prefetches].write = d.mem_write;
3977 info[num_prefetches].bytes_accessed = size;
3978 num_prefetches++;
3979 if (num_prefetches >= MAX_PREFETCHES)
3980 {
3981 if (loop_dump_stream)
3982 fprintf (loop_dump_stream,
3983 "Maximal number of prefetches exceeded.\n");
3984 return;
3985 }
3986 }
3987 }
3988 }
3989
3990 for (i = 0; i < num_prefetches; i++)
3991 {
3992 int density;
3993
3994 /* Attempt to calculate the total number of bytes fetched by all
3995 iterations of the loop. Avoid overflow. */
3996 if (LOOP_INFO (loop)->n_iterations
3997 && ((unsigned HOST_WIDE_INT) (0xffffffff / info[i].stride)
3998 >= LOOP_INFO (loop)->n_iterations))
3999 info[i].total_bytes = info[i].stride * LOOP_INFO (loop)->n_iterations;
4000 else
4001 info[i].total_bytes = 0xffffffff;
4002
4003 density = info[i].bytes_accessed * 100 / info[i].stride;
4004
4005 /* Prefetch might be worthwhile only when the loads/stores are dense. */
4006 if (PREFETCH_ONLY_DENSE_MEM)
4007 if (density * 256 > PREFETCH_DENSE_MEM * 100
4008 && (info[i].total_bytes / PREFETCH_BLOCK
4009 >= PREFETCH_BLOCKS_BEFORE_LOOP_MIN))
4010 {
4011 info[i].prefetch_before_loop = 1;
4012 info[i].prefetch_in_loop
4013 = (info[i].total_bytes / PREFETCH_BLOCK
4014 > PREFETCH_BLOCKS_BEFORE_LOOP_MAX);
4015 }
4016 else
4017 {
4018 info[i].prefetch_in_loop = 0, info[i].prefetch_before_loop = 0;
4019 if (loop_dump_stream)
4020 fprintf (loop_dump_stream,
4021 "Prefetch: ignoring giv at %d: %d%% density is too low.\n",
4022 INSN_UID (info[i].giv->insn), density);
4023 }
4024 else
4025 info[i].prefetch_in_loop = 1, info[i].prefetch_before_loop = 1;
4026
4027 /* Find how many prefetch instructions we'll use within the loop. */
4028 if (info[i].prefetch_in_loop != 0)
4029 {
4030 info[i].prefetch_in_loop = ((info[i].stride + PREFETCH_BLOCK - 1)
4031 / PREFETCH_BLOCK);
4032 num_real_prefetches += info[i].prefetch_in_loop;
4033 if (info[i].write)
4034 num_real_write_prefetches += info[i].prefetch_in_loop;
4035 }
4036 }
4037
4038 /* Determine how many iterations ahead to prefetch within the loop, based
4039 on how many prefetches we currently expect to do within the loop. */
4040 if (num_real_prefetches != 0)
4041 {
4042 if ((ahead = SIMULTANEOUS_PREFETCHES / num_real_prefetches) == 0)
4043 {
4044 if (loop_dump_stream)
4045 fprintf (loop_dump_stream,
4046 "Prefetch: ignoring prefetches within loop: ahead is zero; %d < %d\n",
4047 SIMULTANEOUS_PREFETCHES, num_real_prefetches);
4048 num_real_prefetches = 0, num_real_write_prefetches = 0;
4049 }
4050 }
4051 /* We'll also use AHEAD to determine how many prefetch instructions to
4052 emit before a loop, so don't leave it zero. */
4053 if (ahead == 0)
4054 ahead = PREFETCH_BLOCKS_BEFORE_LOOP_MAX;
4055
4056 for (i = 0; i < num_prefetches; i++)
4057 {
4058 /* Update if we've decided not to prefetch anything within the loop. */
4059 if (num_real_prefetches == 0)
4060 info[i].prefetch_in_loop = 0;
4061
4062 /* Find how many prefetch instructions we'll use before the loop. */
4063 if (info[i].prefetch_before_loop != 0)
4064 {
4065 int n = info[i].total_bytes / PREFETCH_BLOCK;
4066 if (n > ahead)
4067 n = ahead;
4068 info[i].prefetch_before_loop = n;
4069 num_prefetches_before += n;
4070 if (info[i].write)
4071 num_write_prefetches_before += n;
4072 }
4073
4074 if (loop_dump_stream)
4075 {
4076 if (info[i].prefetch_in_loop == 0
4077 && info[i].prefetch_before_loop == 0)
4078 continue;
4079 fprintf (loop_dump_stream, "Prefetch insn: %d",
4080 INSN_UID (info[i].giv->insn));
4081 fprintf (loop_dump_stream,
4082 "; in loop: %d; before: %d; %s\n",
4083 info[i].prefetch_in_loop,
4084 info[i].prefetch_before_loop,
4085 info[i].write ? "read/write" : "read only");
4086 fprintf (loop_dump_stream,
4087 " density: %d%%; bytes_accessed: %u; total_bytes: %u\n",
4088 (int) (info[i].bytes_accessed * 100 / info[i].stride),
4089 info[i].bytes_accessed, info[i].total_bytes);
4090 fprintf (loop_dump_stream, " index: " HOST_WIDE_INT_PRINT_DEC
4091 "; stride: " HOST_WIDE_INT_PRINT_DEC "; address: ",
4092 info[i].index, info[i].stride);
4093 print_rtl (loop_dump_stream, info[i].base_address);
4094 fprintf (loop_dump_stream, "\n");
4095 }
4096 }
4097
4098 if (num_real_prefetches + num_prefetches_before > 0)
4099 {
4100 /* Record that this loop uses prefetch instructions. */
4101 LOOP_INFO (loop)->has_prefetch = 1;
4102
4103 if (loop_dump_stream)
4104 {
4105 fprintf (loop_dump_stream, "Real prefetches needed within loop: %d (write: %d)\n",
4106 num_real_prefetches, num_real_write_prefetches);
4107 fprintf (loop_dump_stream, "Real prefetches needed before loop: %d (write: %d)\n",
4108 num_prefetches_before, num_write_prefetches_before);
4109 }
4110 }
4111
4112 for (i = 0; i < num_prefetches; i++)
4113 {
4114 int y;
4115
4116 for (y = 0; y < info[i].prefetch_in_loop; y++)
4117 {
4118 rtx loc = copy_rtx (*info[i].giv->location);
4119 rtx insn;
4120 int bytes_ahead = PREFETCH_BLOCK * (ahead + y);
4121 rtx before_insn = info[i].giv->insn;
4122 rtx prev_insn = PREV_INSN (info[i].giv->insn);
4123 rtx seq;
4124
4125 /* We can save some effort by offsetting the address on
4126 architectures with offsettable memory references. */
4127 if (offsettable_address_p (0, VOIDmode, loc))
4128 loc = plus_constant (loc, bytes_ahead);
4129 else
4130 {
4131 rtx reg = gen_reg_rtx (Pmode);
4132 loop_iv_add_mult_emit_before (loop, loc, const1_rtx,
4133 GEN_INT (bytes_ahead), reg,
4134 0, before_insn);
4135 loc = reg;
4136 }
4137
4138 start_sequence ();
4139 /* Make sure the address operand is valid for prefetch. */
4140 if (! (*insn_data[(int)CODE_FOR_prefetch].operand[0].predicate)
4141 (loc, insn_data[(int)CODE_FOR_prefetch].operand[0].mode))
4142 loc = force_reg (Pmode, loc);
4143 emit_insn (gen_prefetch (loc, GEN_INT (info[i].write),
4144 GEN_INT (3)));
4145 seq = get_insns ();
4146 end_sequence ();
4147 emit_insn_before (seq, before_insn);
4148
4149 /* Check all insns emitted and record the new GIV
4150 information. */
4151 insn = NEXT_INSN (prev_insn);
4152 while (insn != before_insn)
4153 {
4154 insn = check_insn_for_givs (loop, insn,
4155 info[i].giv->always_executed,
4156 info[i].giv->maybe_multiple);
4157 insn = NEXT_INSN (insn);
4158 }
4159 }
4160
4161 if (PREFETCH_BEFORE_LOOP)
4162 {
4163 /* Emit insns before the loop to fetch the first cache lines or,
4164 if we're not prefetching within the loop, everything we expect
4165 to need. */
4166 for (y = 0; y < info[i].prefetch_before_loop; y++)
4167 {
4168 rtx reg = gen_reg_rtx (Pmode);
4169 rtx loop_start = loop->start;
4170 rtx init_val = info[i].class->initial_value;
4171 rtx add_val = simplify_gen_binary (PLUS, Pmode,
4172 info[i].giv->add_val,
4173 GEN_INT (y * PREFETCH_BLOCK));
4174
4175 /* Functions called by LOOP_IV_ADD_EMIT_BEFORE expect a
4176 non-constant INIT_VAL to have the same mode as REG, which
4177 in this case we know to be Pmode. */
4178 if (GET_MODE (init_val) != Pmode && !CONSTANT_P (init_val))
4179 {
4180 rtx seq;
4181
4182 start_sequence ();
4183 init_val = convert_to_mode (Pmode, init_val, 0);
4184 seq = get_insns ();
4185 end_sequence ();
4186 loop_insn_emit_before (loop, 0, loop_start, seq);
4187 }
4188 loop_iv_add_mult_emit_before (loop, init_val,
4189 info[i].giv->mult_val,
4190 add_val, reg, 0, loop_start);
4191 emit_insn_before (gen_prefetch (reg, GEN_INT (info[i].write),
4192 GEN_INT (3)),
4193 loop_start);
4194 }
4195 }
4196 }
4197
4198 return;
4199 }
4200 \f
4201 /* Communication with routines called via `note_stores'. */
4202
4203 static rtx note_insn;
4204
4205 /* Dummy register to have nonzero DEST_REG for DEST_ADDR type givs. */
4206
4207 static rtx addr_placeholder;
4208
4209 /* ??? Unfinished optimizations, and possible future optimizations,
4210 for the strength reduction code. */
4211
4212 /* ??? The interaction of biv elimination, and recognition of 'constant'
4213 bivs, may cause problems. */
4214
4215 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
4216 performance problems.
4217
4218 Perhaps don't eliminate things that can be combined with an addressing
4219 mode. Find all givs that have the same biv, mult_val, and add_val;
4220 then for each giv, check to see if its only use dies in a following
4221 memory address. If so, generate a new memory address and check to see
4222 if it is valid. If it is valid, then store the modified memory address,
4223 otherwise, mark the giv as not done so that it will get its own iv. */
4224
4225 /* ??? Could try to optimize branches when it is known that a biv is always
4226 positive. */
4227
4228 /* ??? When replace a biv in a compare insn, we should replace with closest
4229 giv so that an optimized branch can still be recognized by the combiner,
4230 e.g. the VAX acb insn. */
4231
4232 /* ??? Many of the checks involving uid_luid could be simplified if regscan
4233 was rerun in loop_optimize whenever a register was added or moved.
4234 Also, some of the optimizations could be a little less conservative. */
4235 \f
4236 /* Scan the loop body and call FNCALL for each insn. In the addition to the
4237 LOOP and INSN parameters pass MAYBE_MULTIPLE and NOT_EVERY_ITERATION to the
4238 callback.
4239
4240 NOT_EVERY_ITERATION is 1 if current insn is not known to be executed at
4241 least once for every loop iteration except for the last one.
4242
4243 MAYBE_MULTIPLE is 1 if current insn may be executed more than once for every
4244 loop iteration.
4245 */
4246 void
4247 for_each_insn_in_loop (struct loop *loop, loop_insn_callback fncall)
4248 {
4249 int not_every_iteration = 0;
4250 int maybe_multiple = 0;
4251 int past_loop_latch = 0;
4252 rtx p;
4253
4254 /* If loop_scan_start points to the loop exit test, we have to be wary of
4255 subversive use of gotos inside expression statements. */
4256 if (prev_nonnote_insn (loop->scan_start) != prev_nonnote_insn (loop->start))
4257 maybe_multiple = back_branch_in_range_p (loop, loop->scan_start);
4258
4259 /* Scan through loop and update NOT_EVERY_ITERATION and MAYBE_MULTIPLE. */
4260 for (p = next_insn_in_loop (loop, loop->scan_start);
4261 p != NULL_RTX;
4262 p = next_insn_in_loop (loop, p))
4263 {
4264 p = fncall (loop, p, not_every_iteration, maybe_multiple);
4265
4266 /* Past CODE_LABEL, we get to insns that may be executed multiple
4267 times. The only way we can be sure that they can't is if every
4268 jump insn between here and the end of the loop either
4269 returns, exits the loop, is a jump to a location that is still
4270 behind the label, or is a jump to the loop start. */
4271
4272 if (LABEL_P (p))
4273 {
4274 rtx insn = p;
4275
4276 maybe_multiple = 0;
4277
4278 while (1)
4279 {
4280 insn = NEXT_INSN (insn);
4281 if (insn == loop->scan_start)
4282 break;
4283 if (insn == loop->end)
4284 {
4285 if (loop->top != 0)
4286 insn = loop->top;
4287 else
4288 break;
4289 if (insn == loop->scan_start)
4290 break;
4291 }
4292
4293 if (JUMP_P (insn)
4294 && GET_CODE (PATTERN (insn)) != RETURN
4295 && (!any_condjump_p (insn)
4296 || (JUMP_LABEL (insn) != 0
4297 && JUMP_LABEL (insn) != loop->scan_start
4298 && !loop_insn_first_p (p, JUMP_LABEL (insn)))))
4299 {
4300 maybe_multiple = 1;
4301 break;
4302 }
4303 }
4304 }
4305
4306 /* Past a jump, we get to insns for which we can't count
4307 on whether they will be executed during each iteration. */
4308 /* This code appears twice in strength_reduce. There is also similar
4309 code in scan_loop. */
4310 if (JUMP_P (p)
4311 /* If we enter the loop in the middle, and scan around to the
4312 beginning, don't set not_every_iteration for that.
4313 This can be any kind of jump, since we want to know if insns
4314 will be executed if the loop is executed. */
4315 && !(JUMP_LABEL (p) == loop->top
4316 && ((NEXT_INSN (NEXT_INSN (p)) == loop->end
4317 && any_uncondjump_p (p))
4318 || (NEXT_INSN (p) == loop->end && any_condjump_p (p)))))
4319 {
4320 rtx label = 0;
4321
4322 /* If this is a jump outside the loop, then it also doesn't
4323 matter. Check to see if the target of this branch is on the
4324 loop->exits_labels list. */
4325
4326 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
4327 if (XEXP (label, 0) == JUMP_LABEL (p))
4328 break;
4329
4330 if (!label)
4331 not_every_iteration = 1;
4332 }
4333
4334 /* Note if we pass a loop latch. If we do, then we can not clear
4335 NOT_EVERY_ITERATION below when we pass the last CODE_LABEL in
4336 a loop since a jump before the last CODE_LABEL may have started
4337 a new loop iteration.
4338
4339 Note that LOOP_TOP is only set for rotated loops and we need
4340 this check for all loops, so compare against the CODE_LABEL
4341 which immediately follows LOOP_START. */
4342 if (JUMP_P (p)
4343 && JUMP_LABEL (p) == NEXT_INSN (loop->start))
4344 past_loop_latch = 1;
4345
4346 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
4347 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
4348 or not an insn is known to be executed each iteration of the
4349 loop, whether or not any iterations are known to occur.
4350
4351 Therefore, if we have just passed a label and have no more labels
4352 between here and the test insn of the loop, and we have not passed
4353 a jump to the top of the loop, then we know these insns will be
4354 executed each iteration. */
4355
4356 if (not_every_iteration
4357 && !past_loop_latch
4358 && LABEL_P (p)
4359 && no_labels_between_p (p, loop->end))
4360 not_every_iteration = 0;
4361 }
4362 }
4363 \f
4364 static void
4365 loop_bivs_find (struct loop *loop)
4366 {
4367 struct loop_regs *regs = LOOP_REGS (loop);
4368 struct loop_ivs *ivs = LOOP_IVS (loop);
4369 /* Temporary list pointers for traversing ivs->list. */
4370 struct iv_class *bl, **backbl;
4371
4372 ivs->list = 0;
4373
4374 for_each_insn_in_loop (loop, check_insn_for_bivs);
4375
4376 /* Scan ivs->list to remove all regs that proved not to be bivs.
4377 Make a sanity check against regs->n_times_set. */
4378 for (backbl = &ivs->list, bl = *backbl; bl; bl = bl->next)
4379 {
4380 if (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4381 /* Above happens if register modified by subreg, etc. */
4382 /* Make sure it is not recognized as a basic induction var: */
4383 || regs->array[bl->regno].n_times_set != bl->biv_count
4384 /* If never incremented, it is invariant that we decided not to
4385 move. So leave it alone. */
4386 || ! bl->incremented)
4387 {
4388 if (loop_dump_stream)
4389 fprintf (loop_dump_stream, "Biv %d: discarded, %s\n",
4390 bl->regno,
4391 (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4392 ? "not induction variable"
4393 : (! bl->incremented ? "never incremented"
4394 : "count error")));
4395
4396 REG_IV_TYPE (ivs, bl->regno) = NOT_BASIC_INDUCT;
4397 *backbl = bl->next;
4398 }
4399 else
4400 {
4401 backbl = &bl->next;
4402
4403 if (loop_dump_stream)
4404 fprintf (loop_dump_stream, "Biv %d: verified\n", bl->regno);
4405 }
4406 }
4407 }
4408
4409
4410 /* Determine how BIVS are initialized by looking through pre-header
4411 extended basic block. */
4412 static void
4413 loop_bivs_init_find (struct loop *loop)
4414 {
4415 struct loop_ivs *ivs = LOOP_IVS (loop);
4416 /* Temporary list pointers for traversing ivs->list. */
4417 struct iv_class *bl;
4418 int call_seen;
4419 rtx p;
4420
4421 /* Find initial value for each biv by searching backwards from loop_start,
4422 halting at first label. Also record any test condition. */
4423
4424 call_seen = 0;
4425 for (p = loop->start; p && !LABEL_P (p); p = PREV_INSN (p))
4426 {
4427 rtx test;
4428
4429 note_insn = p;
4430
4431 if (CALL_P (p))
4432 call_seen = 1;
4433
4434 if (INSN_P (p))
4435 note_stores (PATTERN (p), record_initial, ivs);
4436
4437 /* Record any test of a biv that branches around the loop if no store
4438 between it and the start of loop. We only care about tests with
4439 constants and registers and only certain of those. */
4440 if (JUMP_P (p)
4441 && JUMP_LABEL (p) != 0
4442 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop->end)
4443 && (test = get_condition_for_loop (loop, p)) != 0
4444 && REG_P (XEXP (test, 0))
4445 && REGNO (XEXP (test, 0)) < max_reg_before_loop
4446 && (bl = REG_IV_CLASS (ivs, REGNO (XEXP (test, 0)))) != 0
4447 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop->start)
4448 && bl->init_insn == 0)
4449 {
4450 /* If an NE test, we have an initial value! */
4451 if (GET_CODE (test) == NE)
4452 {
4453 bl->init_insn = p;
4454 bl->init_set = gen_rtx_SET (VOIDmode,
4455 XEXP (test, 0), XEXP (test, 1));
4456 }
4457 else
4458 bl->initial_test = test;
4459 }
4460 }
4461 }
4462
4463
4464 /* Look at the each biv and see if we can say anything better about its
4465 initial value from any initializing insns set up above. (This is done
4466 in two passes to avoid missing SETs in a PARALLEL.) */
4467 static void
4468 loop_bivs_check (struct loop *loop)
4469 {
4470 struct loop_ivs *ivs = LOOP_IVS (loop);
4471 /* Temporary list pointers for traversing ivs->list. */
4472 struct iv_class *bl;
4473 struct iv_class **backbl;
4474
4475 for (backbl = &ivs->list; (bl = *backbl); backbl = &bl->next)
4476 {
4477 rtx src;
4478 rtx note;
4479
4480 if (! bl->init_insn)
4481 continue;
4482
4483 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
4484 is a constant, use the value of that. */
4485 if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL
4486 && CONSTANT_P (XEXP (note, 0)))
4487 || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL
4488 && CONSTANT_P (XEXP (note, 0))))
4489 src = XEXP (note, 0);
4490 else
4491 src = SET_SRC (bl->init_set);
4492
4493 if (loop_dump_stream)
4494 fprintf (loop_dump_stream,
4495 "Biv %d: initialized at insn %d: initial value ",
4496 bl->regno, INSN_UID (bl->init_insn));
4497
4498 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
4499 || GET_MODE (src) == VOIDmode)
4500 && valid_initial_value_p (src, bl->init_insn,
4501 LOOP_INFO (loop)->pre_header_has_call,
4502 loop->start))
4503 {
4504 bl->initial_value = src;
4505
4506 if (loop_dump_stream)
4507 {
4508 print_simple_rtl (loop_dump_stream, src);
4509 fputc ('\n', loop_dump_stream);
4510 }
4511 }
4512 /* If we can't make it a giv,
4513 let biv keep initial value of "itself". */
4514 else if (loop_dump_stream)
4515 fprintf (loop_dump_stream, "is complex\n");
4516 }
4517 }
4518
4519
4520 /* Search the loop for general induction variables. */
4521
4522 static void
4523 loop_givs_find (struct loop* loop)
4524 {
4525 for_each_insn_in_loop (loop, check_insn_for_givs);
4526 }
4527
4528
4529 /* For each giv for which we still don't know whether or not it is
4530 replaceable, check to see if it is replaceable because its final value
4531 can be calculated. */
4532
4533 static void
4534 loop_givs_check (struct loop *loop)
4535 {
4536 struct loop_ivs *ivs = LOOP_IVS (loop);
4537 struct iv_class *bl;
4538
4539 for (bl = ivs->list; bl; bl = bl->next)
4540 {
4541 struct induction *v;
4542
4543 for (v = bl->giv; v; v = v->next_iv)
4544 if (! v->replaceable && ! v->not_replaceable)
4545 check_final_value (loop, v);
4546 }
4547 }
4548
4549
4550 /* Return nonzero if it is possible to eliminate the biv BL provided
4551 all givs are reduced. This is possible if either the reg is not
4552 used outside the loop, or we can compute what its final value will
4553 be. */
4554
4555 static int
4556 loop_biv_eliminable_p (struct loop *loop, struct iv_class *bl,
4557 int threshold, int insn_count)
4558 {
4559 /* For architectures with a decrement_and_branch_until_zero insn,
4560 don't do this if we put a REG_NONNEG note on the endtest for this
4561 biv. */
4562
4563 #ifdef HAVE_decrement_and_branch_until_zero
4564 if (bl->nonneg)
4565 {
4566 if (loop_dump_stream)
4567 fprintf (loop_dump_stream,
4568 "Cannot eliminate nonneg biv %d.\n", bl->regno);
4569 return 0;
4570 }
4571 #endif
4572
4573 /* Check that biv is used outside loop or if it has a final value.
4574 Compare against bl->init_insn rather than loop->start. We aren't
4575 concerned with any uses of the biv between init_insn and
4576 loop->start since these won't be affected by the value of the biv
4577 elsewhere in the function, so long as init_insn doesn't use the
4578 biv itself. */
4579
4580 if ((REGNO_LAST_LUID (bl->regno) < INSN_LUID (loop->end)
4581 && bl->init_insn
4582 && INSN_UID (bl->init_insn) < max_uid_for_loop
4583 && REGNO_FIRST_LUID (bl->regno) >= INSN_LUID (bl->init_insn)
4584 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
4585 || (bl->final_value = final_biv_value (loop, bl)))
4586 return maybe_eliminate_biv (loop, bl, 0, threshold, insn_count);
4587
4588 if (loop_dump_stream)
4589 {
4590 fprintf (loop_dump_stream,
4591 "Cannot eliminate biv %d.\n",
4592 bl->regno);
4593 fprintf (loop_dump_stream,
4594 "First use: insn %d, last use: insn %d.\n",
4595 REGNO_FIRST_UID (bl->regno),
4596 REGNO_LAST_UID (bl->regno));
4597 }
4598 return 0;
4599 }
4600
4601
4602 /* Reduce each giv of BL that we have decided to reduce. */
4603
4604 static void
4605 loop_givs_reduce (struct loop *loop, struct iv_class *bl)
4606 {
4607 struct induction *v;
4608
4609 for (v = bl->giv; v; v = v->next_iv)
4610 {
4611 struct induction *tv;
4612 if (! v->ignore && v->same == 0)
4613 {
4614 int auto_inc_opt = 0;
4615
4616 /* If the code for derived givs immediately below has already
4617 allocated a new_reg, we must keep it. */
4618 if (! v->new_reg)
4619 v->new_reg = gen_reg_rtx (v->mode);
4620
4621 #ifdef AUTO_INC_DEC
4622 /* If the target has auto-increment addressing modes, and
4623 this is an address giv, then try to put the increment
4624 immediately after its use, so that flow can create an
4625 auto-increment addressing mode. */
4626 /* Don't do this for loops entered at the bottom, to avoid
4627 this invalid transformation:
4628 jmp L; -> jmp L;
4629 TOP: TOP:
4630 use giv use giv
4631 L: inc giv
4632 inc biv L:
4633 test biv test giv
4634 cbr TOP cbr TOP
4635 */
4636 if (v->giv_type == DEST_ADDR && bl->biv_count == 1
4637 && bl->biv->always_executed && ! bl->biv->maybe_multiple
4638 /* We don't handle reversed biv's because bl->biv->insn
4639 does not have a valid INSN_LUID. */
4640 && ! bl->reversed
4641 && v->always_executed && ! v->maybe_multiple
4642 && INSN_UID (v->insn) < max_uid_for_loop
4643 && !loop->top)
4644 {
4645 /* If other giv's have been combined with this one, then
4646 this will work only if all uses of the other giv's occur
4647 before this giv's insn. This is difficult to check.
4648
4649 We simplify this by looking for the common case where
4650 there is one DEST_REG giv, and this giv's insn is the
4651 last use of the dest_reg of that DEST_REG giv. If the
4652 increment occurs after the address giv, then we can
4653 perform the optimization. (Otherwise, the increment
4654 would have to go before other_giv, and we would not be
4655 able to combine it with the address giv to get an
4656 auto-inc address.) */
4657 if (v->combined_with)
4658 {
4659 struct induction *other_giv = 0;
4660
4661 for (tv = bl->giv; tv; tv = tv->next_iv)
4662 if (tv->same == v)
4663 {
4664 if (other_giv)
4665 break;
4666 else
4667 other_giv = tv;
4668 }
4669 if (! tv && other_giv
4670 && REGNO (other_giv->dest_reg) < max_reg_before_loop
4671 && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
4672 == INSN_UID (v->insn))
4673 && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
4674 auto_inc_opt = 1;
4675 }
4676 /* Check for case where increment is before the address
4677 giv. Do this test in "loop order". */
4678 else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn)
4679 && (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
4680 || (INSN_LUID (bl->biv->insn)
4681 > INSN_LUID (loop->scan_start))))
4682 || (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
4683 && (INSN_LUID (loop->scan_start)
4684 < INSN_LUID (bl->biv->insn))))
4685 auto_inc_opt = -1;
4686 else
4687 auto_inc_opt = 1;
4688
4689 #ifdef HAVE_cc0
4690 {
4691 rtx prev;
4692
4693 /* We can't put an insn immediately after one setting
4694 cc0, or immediately before one using cc0. */
4695 if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
4696 || (auto_inc_opt == -1
4697 && (prev = prev_nonnote_insn (v->insn)) != 0
4698 && INSN_P (prev)
4699 && sets_cc0_p (PATTERN (prev))))
4700 auto_inc_opt = 0;
4701 }
4702 #endif
4703
4704 if (auto_inc_opt)
4705 v->auto_inc_opt = 1;
4706 }
4707 #endif
4708
4709 /* For each place where the biv is incremented, add an insn
4710 to increment the new, reduced reg for the giv. */
4711 for (tv = bl->biv; tv; tv = tv->next_iv)
4712 {
4713 rtx insert_before;
4714
4715 /* Skip if location is the same as a previous one. */
4716 if (tv->same)
4717 continue;
4718 if (! auto_inc_opt)
4719 insert_before = NEXT_INSN (tv->insn);
4720 else if (auto_inc_opt == 1)
4721 insert_before = NEXT_INSN (v->insn);
4722 else
4723 insert_before = v->insn;
4724
4725 if (tv->mult_val == const1_rtx)
4726 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
4727 v->new_reg, v->new_reg,
4728 0, insert_before);
4729 else /* tv->mult_val == const0_rtx */
4730 /* A multiply is acceptable here
4731 since this is presumed to be seldom executed. */
4732 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
4733 v->add_val, v->new_reg,
4734 0, insert_before);
4735 }
4736
4737 /* Add code at loop start to initialize giv's reduced reg. */
4738
4739 loop_iv_add_mult_hoist (loop,
4740 extend_value_for_giv (v, bl->initial_value),
4741 v->mult_val, v->add_val, v->new_reg);
4742 }
4743 }
4744 }
4745
4746
4747 /* Check for givs whose first use is their definition and whose
4748 last use is the definition of another giv. If so, it is likely
4749 dead and should not be used to derive another giv nor to
4750 eliminate a biv. */
4751
4752 static void
4753 loop_givs_dead_check (struct loop *loop ATTRIBUTE_UNUSED, struct iv_class *bl)
4754 {
4755 struct induction *v;
4756
4757 for (v = bl->giv; v; v = v->next_iv)
4758 {
4759 if (v->ignore
4760 || (v->same && v->same->ignore))
4761 continue;
4762
4763 if (v->giv_type == DEST_REG
4764 && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
4765 {
4766 struct induction *v1;
4767
4768 for (v1 = bl->giv; v1; v1 = v1->next_iv)
4769 if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
4770 v->maybe_dead = 1;
4771 }
4772 }
4773 }
4774
4775
4776 static void
4777 loop_givs_rescan (struct loop *loop, struct iv_class *bl, rtx *reg_map)
4778 {
4779 struct induction *v;
4780
4781 for (v = bl->giv; v; v = v->next_iv)
4782 {
4783 if (v->same && v->same->ignore)
4784 v->ignore = 1;
4785
4786 if (v->ignore)
4787 continue;
4788
4789 /* Update expression if this was combined, in case other giv was
4790 replaced. */
4791 if (v->same)
4792 v->new_reg = replace_rtx (v->new_reg,
4793 v->same->dest_reg, v->same->new_reg);
4794
4795 /* See if this register is known to be a pointer to something. If
4796 so, see if we can find the alignment. First see if there is a
4797 destination register that is a pointer. If so, this shares the
4798 alignment too. Next see if we can deduce anything from the
4799 computational information. If not, and this is a DEST_ADDR
4800 giv, at least we know that it's a pointer, though we don't know
4801 the alignment. */
4802 if (REG_P (v->new_reg)
4803 && v->giv_type == DEST_REG
4804 && REG_POINTER (v->dest_reg))
4805 mark_reg_pointer (v->new_reg,
4806 REGNO_POINTER_ALIGN (REGNO (v->dest_reg)));
4807 else if (REG_P (v->new_reg)
4808 && REG_POINTER (v->src_reg))
4809 {
4810 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->src_reg));
4811
4812 if (align == 0
4813 || GET_CODE (v->add_val) != CONST_INT
4814 || INTVAL (v->add_val) % (align / BITS_PER_UNIT) != 0)
4815 align = 0;
4816
4817 mark_reg_pointer (v->new_reg, align);
4818 }
4819 else if (REG_P (v->new_reg)
4820 && REG_P (v->add_val)
4821 && REG_POINTER (v->add_val))
4822 {
4823 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->add_val));
4824
4825 if (align == 0 || GET_CODE (v->mult_val) != CONST_INT
4826 || INTVAL (v->mult_val) % (align / BITS_PER_UNIT) != 0)
4827 align = 0;
4828
4829 mark_reg_pointer (v->new_reg, align);
4830 }
4831 else if (REG_P (v->new_reg) && v->giv_type == DEST_ADDR)
4832 mark_reg_pointer (v->new_reg, 0);
4833
4834 if (v->giv_type == DEST_ADDR)
4835 /* Store reduced reg as the address in the memref where we found
4836 this giv. */
4837 validate_change (v->insn, v->location, v->new_reg, 0);
4838 else if (v->replaceable)
4839 {
4840 reg_map[REGNO (v->dest_reg)] = v->new_reg;
4841 }
4842 else
4843 {
4844 rtx original_insn = v->insn;
4845 rtx note;
4846
4847 /* Not replaceable; emit an insn to set the original giv reg from
4848 the reduced giv, same as above. */
4849 v->insn = loop_insn_emit_after (loop, 0, original_insn,
4850 gen_move_insn (v->dest_reg,
4851 v->new_reg));
4852
4853 /* The original insn may have a REG_EQUAL note. This note is
4854 now incorrect and may result in invalid substitutions later.
4855 The original insn is dead, but may be part of a libcall
4856 sequence, which doesn't seem worth the bother of handling. */
4857 note = find_reg_note (original_insn, REG_EQUAL, NULL_RTX);
4858 if (note)
4859 remove_note (original_insn, note);
4860 }
4861
4862 /* When a loop is reversed, givs which depend on the reversed
4863 biv, and which are live outside the loop, must be set to their
4864 correct final value. This insn is only needed if the giv is
4865 not replaceable. The correct final value is the same as the
4866 value that the giv starts the reversed loop with. */
4867 if (bl->reversed && ! v->replaceable)
4868 loop_iv_add_mult_sink (loop,
4869 extend_value_for_giv (v, bl->initial_value),
4870 v->mult_val, v->add_val, v->dest_reg);
4871 else if (v->final_value)
4872 loop_insn_sink_or_swim (loop,
4873 gen_load_of_final_value (v->dest_reg,
4874 v->final_value));
4875
4876 if (loop_dump_stream)
4877 {
4878 fprintf (loop_dump_stream, "giv at %d reduced to ",
4879 INSN_UID (v->insn));
4880 print_simple_rtl (loop_dump_stream, v->new_reg);
4881 fprintf (loop_dump_stream, "\n");
4882 }
4883 }
4884 }
4885
4886
4887 static int
4888 loop_giv_reduce_benefit (struct loop *loop ATTRIBUTE_UNUSED,
4889 struct iv_class *bl, struct induction *v,
4890 rtx test_reg)
4891 {
4892 int add_cost;
4893 int benefit;
4894
4895 benefit = v->benefit;
4896 PUT_MODE (test_reg, v->mode);
4897 add_cost = iv_add_mult_cost (bl->biv->add_val, v->mult_val,
4898 test_reg, test_reg);
4899
4900 /* Reduce benefit if not replaceable, since we will insert a
4901 move-insn to replace the insn that calculates this giv. Don't do
4902 this unless the giv is a user variable, since it will often be
4903 marked non-replaceable because of the duplication of the exit
4904 code outside the loop. In such a case, the copies we insert are
4905 dead and will be deleted. So they don't have a cost. Similar
4906 situations exist. */
4907 /* ??? The new final_[bg]iv_value code does a much better job of
4908 finding replaceable giv's, and hence this code may no longer be
4909 necessary. */
4910 if (! v->replaceable && ! bl->eliminable
4911 && REG_USERVAR_P (v->dest_reg))
4912 benefit -= copy_cost;
4913
4914 /* Decrease the benefit to count the add-insns that we will insert
4915 to increment the reduced reg for the giv. ??? This can
4916 overestimate the run-time cost of the additional insns, e.g. if
4917 there are multiple basic blocks that increment the biv, but only
4918 one of these blocks is executed during each iteration. There is
4919 no good way to detect cases like this with the current structure
4920 of the loop optimizer. This code is more accurate for
4921 determining code size than run-time benefits. */
4922 benefit -= add_cost * bl->biv_count;
4923
4924 /* Decide whether to strength-reduce this giv or to leave the code
4925 unchanged (recompute it from the biv each time it is used). This
4926 decision can be made independently for each giv. */
4927
4928 #ifdef AUTO_INC_DEC
4929 /* Attempt to guess whether autoincrement will handle some of the
4930 new add insns; if so, increase BENEFIT (undo the subtraction of
4931 add_cost that was done above). */
4932 if (v->giv_type == DEST_ADDR
4933 /* Increasing the benefit is risky, since this is only a guess.
4934 Avoid increasing register pressure in cases where there would
4935 be no other benefit from reducing this giv. */
4936 && benefit > 0
4937 && GET_CODE (v->mult_val) == CONST_INT)
4938 {
4939 int size = GET_MODE_SIZE (GET_MODE (v->mem));
4940
4941 if (HAVE_POST_INCREMENT
4942 && INTVAL (v->mult_val) == size)
4943 benefit += add_cost * bl->biv_count;
4944 else if (HAVE_PRE_INCREMENT
4945 && INTVAL (v->mult_val) == size)
4946 benefit += add_cost * bl->biv_count;
4947 else if (HAVE_POST_DECREMENT
4948 && -INTVAL (v->mult_val) == size)
4949 benefit += add_cost * bl->biv_count;
4950 else if (HAVE_PRE_DECREMENT
4951 && -INTVAL (v->mult_val) == size)
4952 benefit += add_cost * bl->biv_count;
4953 }
4954 #endif
4955
4956 return benefit;
4957 }
4958
4959
4960 /* Free IV structures for LOOP. */
4961
4962 static void
4963 loop_ivs_free (struct loop *loop)
4964 {
4965 struct loop_ivs *ivs = LOOP_IVS (loop);
4966 struct iv_class *iv = ivs->list;
4967
4968 free (ivs->regs);
4969
4970 while (iv)
4971 {
4972 struct iv_class *next = iv->next;
4973 struct induction *induction;
4974 struct induction *next_induction;
4975
4976 for (induction = iv->biv; induction; induction = next_induction)
4977 {
4978 next_induction = induction->next_iv;
4979 free (induction);
4980 }
4981 for (induction = iv->giv; induction; induction = next_induction)
4982 {
4983 next_induction = induction->next_iv;
4984 free (induction);
4985 }
4986
4987 free (iv);
4988 iv = next;
4989 }
4990 }
4991
4992
4993 /* Perform strength reduction and induction variable elimination.
4994
4995 Pseudo registers created during this function will be beyond the
4996 last valid index in several tables including
4997 REGS->ARRAY[I].N_TIMES_SET and REGNO_LAST_UID. This does not cause a
4998 problem here, because the added registers cannot be givs outside of
4999 their loop, and hence will never be reconsidered. But scan_loop
5000 must check regnos to make sure they are in bounds. */
5001
5002 static void
5003 strength_reduce (struct loop *loop, int flags)
5004 {
5005 struct loop_info *loop_info = LOOP_INFO (loop);
5006 struct loop_regs *regs = LOOP_REGS (loop);
5007 struct loop_ivs *ivs = LOOP_IVS (loop);
5008 rtx p;
5009 /* Temporary list pointer for traversing ivs->list. */
5010 struct iv_class *bl;
5011 /* Ratio of extra register life span we can justify
5012 for saving an instruction. More if loop doesn't call subroutines
5013 since in that case saving an insn makes more difference
5014 and more registers are available. */
5015 /* ??? could set this to last value of threshold in move_movables */
5016 int threshold = (loop_info->has_call ? 1 : 2) * (3 + n_non_fixed_regs);
5017 /* Map of pseudo-register replacements. */
5018 rtx *reg_map = NULL;
5019 int reg_map_size;
5020 int unrolled_insn_copies = 0;
5021 rtx test_reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
5022 int insn_count = count_insns_in_loop (loop);
5023
5024 addr_placeholder = gen_reg_rtx (Pmode);
5025
5026 ivs->n_regs = max_reg_before_loop;
5027 ivs->regs = xcalloc (ivs->n_regs, sizeof (struct iv));
5028
5029 /* Find all BIVs in loop. */
5030 loop_bivs_find (loop);
5031
5032 /* Exit if there are no bivs. */
5033 if (! ivs->list)
5034 {
5035 /* Can still unroll the loop anyways, but indicate that there is no
5036 strength reduction info available. */
5037 if (flags & LOOP_UNROLL)
5038 unroll_loop (loop, insn_count, 0);
5039
5040 loop_ivs_free (loop);
5041 return;
5042 }
5043
5044 /* Determine how BIVS are initialized by looking through pre-header
5045 extended basic block. */
5046 loop_bivs_init_find (loop);
5047
5048 /* Look at the each biv and see if we can say anything better about its
5049 initial value from any initializing insns set up above. */
5050 loop_bivs_check (loop);
5051
5052 /* Search the loop for general induction variables. */
5053 loop_givs_find (loop);
5054
5055 /* Try to calculate and save the number of loop iterations. This is
5056 set to zero if the actual number can not be calculated. This must
5057 be called after all giv's have been identified, since otherwise it may
5058 fail if the iteration variable is a giv. */
5059 loop_iterations (loop);
5060
5061 #ifdef HAVE_prefetch
5062 if (flags & LOOP_PREFETCH)
5063 emit_prefetch_instructions (loop);
5064 #endif
5065
5066 /* Now for each giv for which we still don't know whether or not it is
5067 replaceable, check to see if it is replaceable because its final value
5068 can be calculated. This must be done after loop_iterations is called,
5069 so that final_giv_value will work correctly. */
5070 loop_givs_check (loop);
5071
5072 /* Try to prove that the loop counter variable (if any) is always
5073 nonnegative; if so, record that fact with a REG_NONNEG note
5074 so that "decrement and branch until zero" insn can be used. */
5075 check_dbra_loop (loop, insn_count);
5076
5077 /* Create reg_map to hold substitutions for replaceable giv regs.
5078 Some givs might have been made from biv increments, so look at
5079 ivs->reg_iv_type for a suitable size. */
5080 reg_map_size = ivs->n_regs;
5081 reg_map = xcalloc (reg_map_size, sizeof (rtx));
5082
5083 /* Examine each iv class for feasibility of strength reduction/induction
5084 variable elimination. */
5085
5086 for (bl = ivs->list; bl; bl = bl->next)
5087 {
5088 struct induction *v;
5089 int benefit;
5090
5091 /* Test whether it will be possible to eliminate this biv
5092 provided all givs are reduced. */
5093 bl->eliminable = loop_biv_eliminable_p (loop, bl, threshold, insn_count);
5094
5095 /* This will be true at the end, if all givs which depend on this
5096 biv have been strength reduced.
5097 We can't (currently) eliminate the biv unless this is so. */
5098 bl->all_reduced = 1;
5099
5100 /* Check each extension dependent giv in this class to see if its
5101 root biv is safe from wrapping in the interior mode. */
5102 check_ext_dependent_givs (loop, bl);
5103
5104 /* Combine all giv's for this iv_class. */
5105 combine_givs (regs, bl);
5106
5107 for (v = bl->giv; v; v = v->next_iv)
5108 {
5109 struct induction *tv;
5110
5111 if (v->ignore || v->same)
5112 continue;
5113
5114 benefit = loop_giv_reduce_benefit (loop, bl, v, test_reg);
5115
5116 /* If an insn is not to be strength reduced, then set its ignore
5117 flag, and clear bl->all_reduced. */
5118
5119 /* A giv that depends on a reversed biv must be reduced if it is
5120 used after the loop exit, otherwise, it would have the wrong
5121 value after the loop exit. To make it simple, just reduce all
5122 of such giv's whether or not we know they are used after the loop
5123 exit. */
5124
5125 if (! flag_reduce_all_givs
5126 && v->lifetime * threshold * benefit < insn_count
5127 && ! bl->reversed)
5128 {
5129 if (loop_dump_stream)
5130 fprintf (loop_dump_stream,
5131 "giv of insn %d not worth while, %d vs %d.\n",
5132 INSN_UID (v->insn),
5133 v->lifetime * threshold * benefit, insn_count);
5134 v->ignore = 1;
5135 bl->all_reduced = 0;
5136 }
5137 else
5138 {
5139 /* Check that we can increment the reduced giv without a
5140 multiply insn. If not, reject it. */
5141
5142 for (tv = bl->biv; tv; tv = tv->next_iv)
5143 if (tv->mult_val == const1_rtx
5144 && ! product_cheap_p (tv->add_val, v->mult_val))
5145 {
5146 if (loop_dump_stream)
5147 fprintf (loop_dump_stream,
5148 "giv of insn %d: would need a multiply.\n",
5149 INSN_UID (v->insn));
5150 v->ignore = 1;
5151 bl->all_reduced = 0;
5152 break;
5153 }
5154 }
5155 }
5156
5157 /* Check for givs whose first use is their definition and whose
5158 last use is the definition of another giv. If so, it is likely
5159 dead and should not be used to derive another giv nor to
5160 eliminate a biv. */
5161 loop_givs_dead_check (loop, bl);
5162
5163 /* Reduce each giv that we decided to reduce. */
5164 loop_givs_reduce (loop, bl);
5165
5166 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
5167 as not reduced.
5168
5169 For each giv register that can be reduced now: if replaceable,
5170 substitute reduced reg wherever the old giv occurs;
5171 else add new move insn "giv_reg = reduced_reg". */
5172 loop_givs_rescan (loop, bl, reg_map);
5173
5174 /* All the givs based on the biv bl have been reduced if they
5175 merit it. */
5176
5177 /* For each giv not marked as maybe dead that has been combined with a
5178 second giv, clear any "maybe dead" mark on that second giv.
5179 v->new_reg will either be or refer to the register of the giv it
5180 combined with.
5181
5182 Doing this clearing avoids problems in biv elimination where
5183 a giv's new_reg is a complex value that can't be put in the
5184 insn but the giv combined with (with a reg as new_reg) is
5185 marked maybe_dead. Since the register will be used in either
5186 case, we'd prefer it be used from the simpler giv. */
5187
5188 for (v = bl->giv; v; v = v->next_iv)
5189 if (! v->maybe_dead && v->same)
5190 v->same->maybe_dead = 0;
5191
5192 /* Try to eliminate the biv, if it is a candidate.
5193 This won't work if ! bl->all_reduced,
5194 since the givs we planned to use might not have been reduced.
5195
5196 We have to be careful that we didn't initially think we could
5197 eliminate this biv because of a giv that we now think may be
5198 dead and shouldn't be used as a biv replacement.
5199
5200 Also, there is the possibility that we may have a giv that looks
5201 like it can be used to eliminate a biv, but the resulting insn
5202 isn't valid. This can happen, for example, on the 88k, where a
5203 JUMP_INSN can compare a register only with zero. Attempts to
5204 replace it with a compare with a constant will fail.
5205
5206 Note that in cases where this call fails, we may have replaced some
5207 of the occurrences of the biv with a giv, but no harm was done in
5208 doing so in the rare cases where it can occur. */
5209
5210 if (bl->all_reduced == 1 && bl->eliminable
5211 && maybe_eliminate_biv (loop, bl, 1, threshold, insn_count))
5212 {
5213 /* ?? If we created a new test to bypass the loop entirely,
5214 or otherwise drop straight in, based on this test, then
5215 we might want to rewrite it also. This way some later
5216 pass has more hope of removing the initialization of this
5217 biv entirely. */
5218
5219 /* If final_value != 0, then the biv may be used after loop end
5220 and we must emit an insn to set it just in case.
5221
5222 Reversed bivs already have an insn after the loop setting their
5223 value, so we don't need another one. We can't calculate the
5224 proper final value for such a biv here anyways. */
5225 if (bl->final_value && ! bl->reversed)
5226 loop_insn_sink_or_swim (loop,
5227 gen_load_of_final_value (bl->biv->dest_reg,
5228 bl->final_value));
5229
5230 if (loop_dump_stream)
5231 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
5232 bl->regno);
5233 }
5234 /* See above note wrt final_value. But since we couldn't eliminate
5235 the biv, we must set the value after the loop instead of before. */
5236 else if (bl->final_value && ! bl->reversed)
5237 loop_insn_sink (loop, gen_load_of_final_value (bl->biv->dest_reg,
5238 bl->final_value));
5239 }
5240
5241 /* Go through all the instructions in the loop, making all the
5242 register substitutions scheduled in REG_MAP. */
5243
5244 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
5245 if (INSN_P (p))
5246 {
5247 replace_regs (PATTERN (p), reg_map, reg_map_size, 0);
5248 replace_regs (REG_NOTES (p), reg_map, reg_map_size, 0);
5249 INSN_CODE (p) = -1;
5250 }
5251
5252 if (loop_info->n_iterations > 0)
5253 {
5254 /* When we completely unroll a loop we will likely not need the increment
5255 of the loop BIV and we will not need the conditional branch at the
5256 end of the loop. */
5257 unrolled_insn_copies = insn_count - 2;
5258
5259 #ifdef HAVE_cc0
5260 /* When we completely unroll a loop on a HAVE_cc0 machine we will not
5261 need the comparison before the conditional branch at the end of the
5262 loop. */
5263 unrolled_insn_copies -= 1;
5264 #endif
5265
5266 /* We'll need one copy for each loop iteration. */
5267 unrolled_insn_copies *= loop_info->n_iterations;
5268
5269 /* A little slop to account for the ability to remove initialization
5270 code, better CSE, and other secondary benefits of completely
5271 unrolling some loops. */
5272 unrolled_insn_copies -= 1;
5273
5274 /* Clamp the value. */
5275 if (unrolled_insn_copies < 0)
5276 unrolled_insn_copies = 0;
5277 }
5278
5279 /* Unroll loops from within strength reduction so that we can use the
5280 induction variable information that strength_reduce has already
5281 collected. Always unroll loops that would be as small or smaller
5282 unrolled than when rolled. */
5283 if ((flags & LOOP_UNROLL)
5284 || ((flags & LOOP_AUTO_UNROLL)
5285 && loop_info->n_iterations > 0
5286 && unrolled_insn_copies <= insn_count))
5287 unroll_loop (loop, insn_count, 1);
5288
5289 if (loop_dump_stream)
5290 fprintf (loop_dump_stream, "\n");
5291
5292 loop_ivs_free (loop);
5293 if (reg_map)
5294 free (reg_map);
5295 }
5296 \f
5297 /*Record all basic induction variables calculated in the insn. */
5298 static rtx
5299 check_insn_for_bivs (struct loop *loop, rtx p, int not_every_iteration,
5300 int maybe_multiple)
5301 {
5302 struct loop_ivs *ivs = LOOP_IVS (loop);
5303 rtx set;
5304 rtx dest_reg;
5305 rtx inc_val;
5306 rtx mult_val;
5307 rtx *location;
5308
5309 if (NONJUMP_INSN_P (p)
5310 && (set = single_set (p))
5311 && REG_P (SET_DEST (set)))
5312 {
5313 dest_reg = SET_DEST (set);
5314 if (REGNO (dest_reg) < max_reg_before_loop
5315 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
5316 && REG_IV_TYPE (ivs, REGNO (dest_reg)) != NOT_BASIC_INDUCT)
5317 {
5318 if (basic_induction_var (loop, SET_SRC (set),
5319 GET_MODE (SET_SRC (set)),
5320 dest_reg, p, &inc_val, &mult_val,
5321 &location))
5322 {
5323 /* It is a possible basic induction variable.
5324 Create and initialize an induction structure for it. */
5325
5326 struct induction *v = xmalloc (sizeof (struct induction));
5327
5328 record_biv (loop, v, p, dest_reg, inc_val, mult_val, location,
5329 not_every_iteration, maybe_multiple);
5330 REG_IV_TYPE (ivs, REGNO (dest_reg)) = BASIC_INDUCT;
5331 }
5332 else if (REGNO (dest_reg) < ivs->n_regs)
5333 REG_IV_TYPE (ivs, REGNO (dest_reg)) = NOT_BASIC_INDUCT;
5334 }
5335 }
5336 return p;
5337 }
5338 \f
5339 /* Record all givs calculated in the insn.
5340 A register is a giv if: it is only set once, it is a function of a
5341 biv and a constant (or invariant), and it is not a biv. */
5342 static rtx
5343 check_insn_for_givs (struct loop *loop, rtx p, int not_every_iteration,
5344 int maybe_multiple)
5345 {
5346 struct loop_regs *regs = LOOP_REGS (loop);
5347
5348 rtx set;
5349 /* Look for a general induction variable in a register. */
5350 if (NONJUMP_INSN_P (p)
5351 && (set = single_set (p))
5352 && REG_P (SET_DEST (set))
5353 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
5354 {
5355 rtx src_reg;
5356 rtx dest_reg;
5357 rtx add_val;
5358 rtx mult_val;
5359 rtx ext_val;
5360 int benefit;
5361 rtx regnote = 0;
5362 rtx last_consec_insn;
5363
5364 dest_reg = SET_DEST (set);
5365 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
5366 return p;
5367
5368 if (/* SET_SRC is a giv. */
5369 (general_induction_var (loop, SET_SRC (set), &src_reg, &add_val,
5370 &mult_val, &ext_val, 0, &benefit, VOIDmode)
5371 /* Equivalent expression is a giv. */
5372 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
5373 && general_induction_var (loop, XEXP (regnote, 0), &src_reg,
5374 &add_val, &mult_val, &ext_val, 0,
5375 &benefit, VOIDmode)))
5376 /* Don't try to handle any regs made by loop optimization.
5377 We have nothing on them in regno_first_uid, etc. */
5378 && REGNO (dest_reg) < max_reg_before_loop
5379 /* Don't recognize a BASIC_INDUCT_VAR here. */
5380 && dest_reg != src_reg
5381 /* This must be the only place where the register is set. */
5382 && (regs->array[REGNO (dest_reg)].n_times_set == 1
5383 /* or all sets must be consecutive and make a giv. */
5384 || (benefit = consec_sets_giv (loop, benefit, p,
5385 src_reg, dest_reg,
5386 &add_val, &mult_val, &ext_val,
5387 &last_consec_insn))))
5388 {
5389 struct induction *v = xmalloc (sizeof (struct induction));
5390
5391 /* If this is a library call, increase benefit. */
5392 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
5393 benefit += libcall_benefit (p);
5394
5395 /* Skip the consecutive insns, if there are any. */
5396 if (regs->array[REGNO (dest_reg)].n_times_set != 1)
5397 p = last_consec_insn;
5398
5399 record_giv (loop, v, p, src_reg, dest_reg, mult_val, add_val,
5400 ext_val, benefit, DEST_REG, not_every_iteration,
5401 maybe_multiple, (rtx*) 0);
5402
5403 }
5404 }
5405
5406 /* Look for givs which are memory addresses. */
5407 if (NONJUMP_INSN_P (p))
5408 find_mem_givs (loop, PATTERN (p), p, not_every_iteration,
5409 maybe_multiple);
5410
5411 /* Update the status of whether giv can derive other givs. This can
5412 change when we pass a label or an insn that updates a biv. */
5413 if (INSN_P (p) || LABEL_P (p))
5414 update_giv_derive (loop, p);
5415 return p;
5416 }
5417 \f
5418 /* Return 1 if X is a valid source for an initial value (or as value being
5419 compared against in an initial test).
5420
5421 X must be either a register or constant and must not be clobbered between
5422 the current insn and the start of the loop.
5423
5424 INSN is the insn containing X. */
5425
5426 static int
5427 valid_initial_value_p (rtx x, rtx insn, int call_seen, rtx loop_start)
5428 {
5429 if (CONSTANT_P (x))
5430 return 1;
5431
5432 /* Only consider pseudos we know about initialized in insns whose luids
5433 we know. */
5434 if (!REG_P (x)
5435 || REGNO (x) >= max_reg_before_loop)
5436 return 0;
5437
5438 /* Don't use call-clobbered registers across a call which clobbers it. On
5439 some machines, don't use any hard registers at all. */
5440 if (REGNO (x) < FIRST_PSEUDO_REGISTER
5441 && (SMALL_REGISTER_CLASSES
5442 || (call_used_regs[REGNO (x)] && call_seen)))
5443 return 0;
5444
5445 /* Don't use registers that have been clobbered before the start of the
5446 loop. */
5447 if (reg_set_between_p (x, insn, loop_start))
5448 return 0;
5449
5450 return 1;
5451 }
5452 \f
5453 /* Scan X for memory refs and check each memory address
5454 as a possible giv. INSN is the insn whose pattern X comes from.
5455 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
5456 every loop iteration. MAYBE_MULTIPLE is 1 if the insn might be executed
5457 more than once in each loop iteration. */
5458
5459 static void
5460 find_mem_givs (const struct loop *loop, rtx x, rtx insn,
5461 int not_every_iteration, int maybe_multiple)
5462 {
5463 int i, j;
5464 enum rtx_code code;
5465 const char *fmt;
5466
5467 if (x == 0)
5468 return;
5469
5470 code = GET_CODE (x);
5471 switch (code)
5472 {
5473 case REG:
5474 case CONST_INT:
5475 case CONST:
5476 case CONST_DOUBLE:
5477 case SYMBOL_REF:
5478 case LABEL_REF:
5479 case PC:
5480 case CC0:
5481 case ADDR_VEC:
5482 case ADDR_DIFF_VEC:
5483 case USE:
5484 case CLOBBER:
5485 return;
5486
5487 case MEM:
5488 {
5489 rtx src_reg;
5490 rtx add_val;
5491 rtx mult_val;
5492 rtx ext_val;
5493 int benefit;
5494
5495 /* This code used to disable creating GIVs with mult_val == 1 and
5496 add_val == 0. However, this leads to lost optimizations when
5497 it comes time to combine a set of related DEST_ADDR GIVs, since
5498 this one would not be seen. */
5499
5500 if (general_induction_var (loop, XEXP (x, 0), &src_reg, &add_val,
5501 &mult_val, &ext_val, 1, &benefit,
5502 GET_MODE (x)))
5503 {
5504 /* Found one; record it. */
5505 struct induction *v = xmalloc (sizeof (struct induction));
5506
5507 record_giv (loop, v, insn, src_reg, addr_placeholder, mult_val,
5508 add_val, ext_val, benefit, DEST_ADDR,
5509 not_every_iteration, maybe_multiple, &XEXP (x, 0));
5510
5511 v->mem = x;
5512 }
5513 }
5514 return;
5515
5516 default:
5517 break;
5518 }
5519
5520 /* Recursively scan the subexpressions for other mem refs. */
5521
5522 fmt = GET_RTX_FORMAT (code);
5523 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5524 if (fmt[i] == 'e')
5525 find_mem_givs (loop, XEXP (x, i), insn, not_every_iteration,
5526 maybe_multiple);
5527 else if (fmt[i] == 'E')
5528 for (j = 0; j < XVECLEN (x, i); j++)
5529 find_mem_givs (loop, XVECEXP (x, i, j), insn, not_every_iteration,
5530 maybe_multiple);
5531 }
5532 \f
5533 /* Fill in the data about one biv update.
5534 V is the `struct induction' in which we record the biv. (It is
5535 allocated by the caller, with alloca.)
5536 INSN is the insn that sets it.
5537 DEST_REG is the biv's reg.
5538
5539 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
5540 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
5541 being set to INC_VAL.
5542
5543 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
5544 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
5545 can be executed more than once per iteration. If MAYBE_MULTIPLE
5546 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
5547 executed exactly once per iteration. */
5548
5549 static void
5550 record_biv (struct loop *loop, struct induction *v, rtx insn, rtx dest_reg,
5551 rtx inc_val, rtx mult_val, rtx *location,
5552 int not_every_iteration, int maybe_multiple)
5553 {
5554 struct loop_ivs *ivs = LOOP_IVS (loop);
5555 struct iv_class *bl;
5556
5557 v->insn = insn;
5558 v->src_reg = dest_reg;
5559 v->dest_reg = dest_reg;
5560 v->mult_val = mult_val;
5561 v->add_val = inc_val;
5562 v->ext_dependent = NULL_RTX;
5563 v->location = location;
5564 v->mode = GET_MODE (dest_reg);
5565 v->always_computable = ! not_every_iteration;
5566 v->always_executed = ! not_every_iteration;
5567 v->maybe_multiple = maybe_multiple;
5568 v->same = 0;
5569
5570 /* Add this to the reg's iv_class, creating a class
5571 if this is the first incrementation of the reg. */
5572
5573 bl = REG_IV_CLASS (ivs, REGNO (dest_reg));
5574 if (bl == 0)
5575 {
5576 /* Create and initialize new iv_class. */
5577
5578 bl = xmalloc (sizeof (struct iv_class));
5579
5580 bl->regno = REGNO (dest_reg);
5581 bl->biv = 0;
5582 bl->giv = 0;
5583 bl->biv_count = 0;
5584 bl->giv_count = 0;
5585
5586 /* Set initial value to the reg itself. */
5587 bl->initial_value = dest_reg;
5588 bl->final_value = 0;
5589 /* We haven't seen the initializing insn yet. */
5590 bl->init_insn = 0;
5591 bl->init_set = 0;
5592 bl->initial_test = 0;
5593 bl->incremented = 0;
5594 bl->eliminable = 0;
5595 bl->nonneg = 0;
5596 bl->reversed = 0;
5597 bl->total_benefit = 0;
5598
5599 /* Add this class to ivs->list. */
5600 bl->next = ivs->list;
5601 ivs->list = bl;
5602
5603 /* Put it in the array of biv register classes. */
5604 REG_IV_CLASS (ivs, REGNO (dest_reg)) = bl;
5605 }
5606 else
5607 {
5608 /* Check if location is the same as a previous one. */
5609 struct induction *induction;
5610 for (induction = bl->biv; induction; induction = induction->next_iv)
5611 if (location == induction->location)
5612 {
5613 v->same = induction;
5614 break;
5615 }
5616 }
5617
5618 /* Update IV_CLASS entry for this biv. */
5619 v->next_iv = bl->biv;
5620 bl->biv = v;
5621 bl->biv_count++;
5622 if (mult_val == const1_rtx)
5623 bl->incremented = 1;
5624
5625 if (loop_dump_stream)
5626 loop_biv_dump (v, loop_dump_stream, 0);
5627 }
5628 \f
5629 /* Fill in the data about one giv.
5630 V is the `struct induction' in which we record the giv. (It is
5631 allocated by the caller, with alloca.)
5632 INSN is the insn that sets it.
5633 BENEFIT estimates the savings from deleting this insn.
5634 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
5635 into a register or is used as a memory address.
5636
5637 SRC_REG is the biv reg which the giv is computed from.
5638 DEST_REG is the giv's reg (if the giv is stored in a reg).
5639 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
5640 LOCATION points to the place where this giv's value appears in INSN. */
5641
5642 static void
5643 record_giv (const struct loop *loop, struct induction *v, rtx insn,
5644 rtx src_reg, rtx dest_reg, rtx mult_val, rtx add_val,
5645 rtx ext_val, int benefit, enum g_types type,
5646 int not_every_iteration, int maybe_multiple, rtx *location)
5647 {
5648 struct loop_ivs *ivs = LOOP_IVS (loop);
5649 struct induction *b;
5650 struct iv_class *bl;
5651 rtx set = single_set (insn);
5652 rtx temp;
5653
5654 /* Attempt to prove constantness of the values. Don't let simplify_rtx
5655 undo the MULT canonicalization that we performed earlier. */
5656 temp = simplify_rtx (add_val);
5657 if (temp
5658 && ! (GET_CODE (add_val) == MULT
5659 && GET_CODE (temp) == ASHIFT))
5660 add_val = temp;
5661
5662 v->insn = insn;
5663 v->src_reg = src_reg;
5664 v->giv_type = type;
5665 v->dest_reg = dest_reg;
5666 v->mult_val = mult_val;
5667 v->add_val = add_val;
5668 v->ext_dependent = ext_val;
5669 v->benefit = benefit;
5670 v->location = location;
5671 v->cant_derive = 0;
5672 v->combined_with = 0;
5673 v->maybe_multiple = maybe_multiple;
5674 v->maybe_dead = 0;
5675 v->derive_adjustment = 0;
5676 v->same = 0;
5677 v->ignore = 0;
5678 v->new_reg = 0;
5679 v->final_value = 0;
5680 v->same_insn = 0;
5681 v->auto_inc_opt = 0;
5682 v->unrolled = 0;
5683 v->shared = 0;
5684
5685 /* The v->always_computable field is used in update_giv_derive, to
5686 determine whether a giv can be used to derive another giv. For a
5687 DEST_REG giv, INSN computes a new value for the giv, so its value
5688 isn't computable if INSN insn't executed every iteration.
5689 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
5690 it does not compute a new value. Hence the value is always computable
5691 regardless of whether INSN is executed each iteration. */
5692
5693 if (type == DEST_ADDR)
5694 v->always_computable = 1;
5695 else
5696 v->always_computable = ! not_every_iteration;
5697
5698 v->always_executed = ! not_every_iteration;
5699
5700 if (type == DEST_ADDR)
5701 {
5702 v->mode = GET_MODE (*location);
5703 v->lifetime = 1;
5704 }
5705 else /* type == DEST_REG */
5706 {
5707 v->mode = GET_MODE (SET_DEST (set));
5708
5709 v->lifetime = LOOP_REG_LIFETIME (loop, REGNO (dest_reg));
5710
5711 /* If the lifetime is zero, it means that this register is
5712 really a dead store. So mark this as a giv that can be
5713 ignored. This will not prevent the biv from being eliminated. */
5714 if (v->lifetime == 0)
5715 v->ignore = 1;
5716
5717 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
5718 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
5719 }
5720
5721 /* Add the giv to the class of givs computed from one biv. */
5722
5723 bl = REG_IV_CLASS (ivs, REGNO (src_reg));
5724 gcc_assert (bl); /* Fatal error, biv missing for this giv? */
5725 v->next_iv = bl->giv;
5726 bl->giv = v;
5727 /* Don't count DEST_ADDR. This is supposed to count the number of
5728 insns that calculate givs. */
5729 if (type == DEST_REG)
5730 bl->giv_count++;
5731 bl->total_benefit += benefit;
5732
5733 if (type == DEST_ADDR)
5734 {
5735 v->replaceable = 1;
5736 v->not_replaceable = 0;
5737 }
5738 else
5739 {
5740 /* The giv can be replaced outright by the reduced register only if all
5741 of the following conditions are true:
5742 - the insn that sets the giv is always executed on any iteration
5743 on which the giv is used at all
5744 (there are two ways to deduce this:
5745 either the insn is executed on every iteration,
5746 or all uses follow that insn in the same basic block),
5747 - the giv is not used outside the loop
5748 - no assignments to the biv occur during the giv's lifetime. */
5749
5750 if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
5751 /* Previous line always fails if INSN was moved by loop opt. */
5752 && REGNO_LAST_LUID (REGNO (dest_reg))
5753 < INSN_LUID (loop->end)
5754 && (! not_every_iteration
5755 || last_use_this_basic_block (dest_reg, insn)))
5756 {
5757 /* Now check that there are no assignments to the biv within the
5758 giv's lifetime. This requires two separate checks. */
5759
5760 /* Check each biv update, and fail if any are between the first
5761 and last use of the giv.
5762
5763 If this loop contains an inner loop that was unrolled, then
5764 the insn modifying the biv may have been emitted by the loop
5765 unrolling code, and hence does not have a valid luid. Just
5766 mark the biv as not replaceable in this case. It is not very
5767 useful as a biv, because it is used in two different loops.
5768 It is very unlikely that we would be able to optimize the giv
5769 using this biv anyways. */
5770
5771 v->replaceable = 1;
5772 v->not_replaceable = 0;
5773 for (b = bl->biv; b; b = b->next_iv)
5774 {
5775 if (INSN_UID (b->insn) >= max_uid_for_loop
5776 || ((INSN_LUID (b->insn)
5777 >= REGNO_FIRST_LUID (REGNO (dest_reg)))
5778 && (INSN_LUID (b->insn)
5779 <= REGNO_LAST_LUID (REGNO (dest_reg)))))
5780 {
5781 v->replaceable = 0;
5782 v->not_replaceable = 1;
5783 break;
5784 }
5785 }
5786
5787 /* If there are any backwards branches that go from after the
5788 biv update to before it, then this giv is not replaceable. */
5789 if (v->replaceable)
5790 for (b = bl->biv; b; b = b->next_iv)
5791 if (back_branch_in_range_p (loop, b->insn))
5792 {
5793 v->replaceable = 0;
5794 v->not_replaceable = 1;
5795 break;
5796 }
5797 }
5798 else
5799 {
5800 /* May still be replaceable, we don't have enough info here to
5801 decide. */
5802 v->replaceable = 0;
5803 v->not_replaceable = 0;
5804 }
5805 }
5806
5807 /* Record whether the add_val contains a const_int, for later use by
5808 combine_givs. */
5809 {
5810 rtx tem = add_val;
5811
5812 v->no_const_addval = 1;
5813 if (tem == const0_rtx)
5814 ;
5815 else if (CONSTANT_P (add_val))
5816 v->no_const_addval = 0;
5817 if (GET_CODE (tem) == PLUS)
5818 {
5819 while (1)
5820 {
5821 if (GET_CODE (XEXP (tem, 0)) == PLUS)
5822 tem = XEXP (tem, 0);
5823 else if (GET_CODE (XEXP (tem, 1)) == PLUS)
5824 tem = XEXP (tem, 1);
5825 else
5826 break;
5827 }
5828 if (CONSTANT_P (XEXP (tem, 1)))
5829 v->no_const_addval = 0;
5830 }
5831 }
5832
5833 if (loop_dump_stream)
5834 loop_giv_dump (v, loop_dump_stream, 0);
5835 }
5836
5837 /* All this does is determine whether a giv can be made replaceable because
5838 its final value can be calculated. This code can not be part of record_giv
5839 above, because final_giv_value requires that the number of loop iterations
5840 be known, and that can not be accurately calculated until after all givs
5841 have been identified. */
5842
5843 static void
5844 check_final_value (const struct loop *loop, struct induction *v)
5845 {
5846 rtx final_value = 0;
5847
5848 /* DEST_ADDR givs will never reach here, because they are always marked
5849 replaceable above in record_giv. */
5850
5851 /* The giv can be replaced outright by the reduced register only if all
5852 of the following conditions are true:
5853 - the insn that sets the giv is always executed on any iteration
5854 on which the giv is used at all
5855 (there are two ways to deduce this:
5856 either the insn is executed on every iteration,
5857 or all uses follow that insn in the same basic block),
5858 - its final value can be calculated (this condition is different
5859 than the one above in record_giv)
5860 - it's not used before the it's set
5861 - no assignments to the biv occur during the giv's lifetime. */
5862
5863 #if 0
5864 /* This is only called now when replaceable is known to be false. */
5865 /* Clear replaceable, so that it won't confuse final_giv_value. */
5866 v->replaceable = 0;
5867 #endif
5868
5869 if ((final_value = final_giv_value (loop, v))
5870 && (v->always_executed
5871 || last_use_this_basic_block (v->dest_reg, v->insn)))
5872 {
5873 int biv_increment_seen = 0, before_giv_insn = 0;
5874 rtx p = v->insn;
5875 rtx last_giv_use;
5876
5877 v->replaceable = 1;
5878 v->not_replaceable = 0;
5879
5880 /* When trying to determine whether or not a biv increment occurs
5881 during the lifetime of the giv, we can ignore uses of the variable
5882 outside the loop because final_value is true. Hence we can not
5883 use regno_last_uid and regno_first_uid as above in record_giv. */
5884
5885 /* Search the loop to determine whether any assignments to the
5886 biv occur during the giv's lifetime. Start with the insn
5887 that sets the giv, and search around the loop until we come
5888 back to that insn again.
5889
5890 Also fail if there is a jump within the giv's lifetime that jumps
5891 to somewhere outside the lifetime but still within the loop. This
5892 catches spaghetti code where the execution order is not linear, and
5893 hence the above test fails. Here we assume that the giv lifetime
5894 does not extend from one iteration of the loop to the next, so as
5895 to make the test easier. Since the lifetime isn't known yet,
5896 this requires two loops. See also record_giv above. */
5897
5898 last_giv_use = v->insn;
5899
5900 while (1)
5901 {
5902 p = NEXT_INSN (p);
5903 if (p == loop->end)
5904 {
5905 before_giv_insn = 1;
5906 p = NEXT_INSN (loop->start);
5907 }
5908 if (p == v->insn)
5909 break;
5910
5911 if (INSN_P (p))
5912 {
5913 /* It is possible for the BIV increment to use the GIV if we
5914 have a cycle. Thus we must be sure to check each insn for
5915 both BIV and GIV uses, and we must check for BIV uses
5916 first. */
5917
5918 if (! biv_increment_seen
5919 && reg_set_p (v->src_reg, PATTERN (p)))
5920 biv_increment_seen = 1;
5921
5922 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
5923 {
5924 if (biv_increment_seen || before_giv_insn)
5925 {
5926 v->replaceable = 0;
5927 v->not_replaceable = 1;
5928 break;
5929 }
5930 last_giv_use = p;
5931 }
5932 }
5933 }
5934
5935 /* Now that the lifetime of the giv is known, check for branches
5936 from within the lifetime to outside the lifetime if it is still
5937 replaceable. */
5938
5939 if (v->replaceable)
5940 {
5941 p = v->insn;
5942 while (1)
5943 {
5944 p = NEXT_INSN (p);
5945 if (p == loop->end)
5946 p = NEXT_INSN (loop->start);
5947 if (p == last_giv_use)
5948 break;
5949
5950 if (JUMP_P (p) && JUMP_LABEL (p)
5951 && LABEL_NAME (JUMP_LABEL (p))
5952 && ((loop_insn_first_p (JUMP_LABEL (p), v->insn)
5953 && loop_insn_first_p (loop->start, JUMP_LABEL (p)))
5954 || (loop_insn_first_p (last_giv_use, JUMP_LABEL (p))
5955 && loop_insn_first_p (JUMP_LABEL (p), loop->end))))
5956 {
5957 v->replaceable = 0;
5958 v->not_replaceable = 1;
5959
5960 if (loop_dump_stream)
5961 fprintf (loop_dump_stream,
5962 "Found branch outside giv lifetime.\n");
5963
5964 break;
5965 }
5966 }
5967 }
5968
5969 /* If it is replaceable, then save the final value. */
5970 if (v->replaceable)
5971 v->final_value = final_value;
5972 }
5973
5974 if (loop_dump_stream && v->replaceable)
5975 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
5976 INSN_UID (v->insn), REGNO (v->dest_reg));
5977 }
5978 \f
5979 /* Update the status of whether a giv can derive other givs.
5980
5981 We need to do something special if there is or may be an update to the biv
5982 between the time the giv is defined and the time it is used to derive
5983 another giv.
5984
5985 In addition, a giv that is only conditionally set is not allowed to
5986 derive another giv once a label has been passed.
5987
5988 The cases we look at are when a label or an update to a biv is passed. */
5989
5990 static void
5991 update_giv_derive (const struct loop *loop, rtx p)
5992 {
5993 struct loop_ivs *ivs = LOOP_IVS (loop);
5994 struct iv_class *bl;
5995 struct induction *biv, *giv;
5996 rtx tem;
5997 int dummy;
5998
5999 /* Search all IV classes, then all bivs, and finally all givs.
6000
6001 There are three cases we are concerned with. First we have the situation
6002 of a giv that is only updated conditionally. In that case, it may not
6003 derive any givs after a label is passed.
6004
6005 The second case is when a biv update occurs, or may occur, after the
6006 definition of a giv. For certain biv updates (see below) that are
6007 known to occur between the giv definition and use, we can adjust the
6008 giv definition. For others, or when the biv update is conditional,
6009 we must prevent the giv from deriving any other givs. There are two
6010 sub-cases within this case.
6011
6012 If this is a label, we are concerned with any biv update that is done
6013 conditionally, since it may be done after the giv is defined followed by
6014 a branch here (actually, we need to pass both a jump and a label, but
6015 this extra tracking doesn't seem worth it).
6016
6017 If this is a jump, we are concerned about any biv update that may be
6018 executed multiple times. We are actually only concerned about
6019 backward jumps, but it is probably not worth performing the test
6020 on the jump again here.
6021
6022 If this is a biv update, we must adjust the giv status to show that a
6023 subsequent biv update was performed. If this adjustment cannot be done,
6024 the giv cannot derive further givs. */
6025
6026 for (bl = ivs->list; bl; bl = bl->next)
6027 for (biv = bl->biv; biv; biv = biv->next_iv)
6028 if (LABEL_P (p) || JUMP_P (p)
6029 || biv->insn == p)
6030 {
6031 /* Skip if location is the same as a previous one. */
6032 if (biv->same)
6033 continue;
6034
6035 for (giv = bl->giv; giv; giv = giv->next_iv)
6036 {
6037 /* If cant_derive is already true, there is no point in
6038 checking all of these conditions again. */
6039 if (giv->cant_derive)
6040 continue;
6041
6042 /* If this giv is conditionally set and we have passed a label,
6043 it cannot derive anything. */
6044 if (LABEL_P (p) && ! giv->always_computable)
6045 giv->cant_derive = 1;
6046
6047 /* Skip givs that have mult_val == 0, since
6048 they are really invariants. Also skip those that are
6049 replaceable, since we know their lifetime doesn't contain
6050 any biv update. */
6051 else if (giv->mult_val == const0_rtx || giv->replaceable)
6052 continue;
6053
6054 /* The only way we can allow this giv to derive another
6055 is if this is a biv increment and we can form the product
6056 of biv->add_val and giv->mult_val. In this case, we will
6057 be able to compute a compensation. */
6058 else if (biv->insn == p)
6059 {
6060 rtx ext_val_dummy;
6061
6062 tem = 0;
6063 if (biv->mult_val == const1_rtx)
6064 tem = simplify_giv_expr (loop,
6065 gen_rtx_MULT (giv->mode,
6066 biv->add_val,
6067 giv->mult_val),
6068 &ext_val_dummy, &dummy);
6069
6070 if (tem && giv->derive_adjustment)
6071 tem = simplify_giv_expr
6072 (loop,
6073 gen_rtx_PLUS (giv->mode, tem, giv->derive_adjustment),
6074 &ext_val_dummy, &dummy);
6075
6076 if (tem)
6077 giv->derive_adjustment = tem;
6078 else
6079 giv->cant_derive = 1;
6080 }
6081 else if ((LABEL_P (p) && ! biv->always_computable)
6082 || (JUMP_P (p) && biv->maybe_multiple))
6083 giv->cant_derive = 1;
6084 }
6085 }
6086 }
6087 \f
6088 /* Check whether an insn is an increment legitimate for a basic induction var.
6089 X is the source of insn P, or a part of it.
6090 MODE is the mode in which X should be interpreted.
6091
6092 DEST_REG is the putative biv, also the destination of the insn.
6093 We accept patterns of these forms:
6094 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
6095 REG = INVARIANT + REG
6096
6097 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
6098 store the additive term into *INC_VAL, and store the place where
6099 we found the additive term into *LOCATION.
6100
6101 If X is an assignment of an invariant into DEST_REG, we set
6102 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
6103
6104 We also want to detect a BIV when it corresponds to a variable
6105 whose mode was promoted. In that case, an increment
6106 of the variable may be a PLUS that adds a SUBREG of that variable to
6107 an invariant and then sign- or zero-extends the result of the PLUS
6108 into the variable.
6109
6110 Most GIVs in such cases will be in the promoted mode, since that is the
6111 probably the natural computation mode (and almost certainly the mode
6112 used for addresses) on the machine. So we view the pseudo-reg containing
6113 the variable as the BIV, as if it were simply incremented.
6114
6115 Note that treating the entire pseudo as a BIV will result in making
6116 simple increments to any GIVs based on it. However, if the variable
6117 overflows in its declared mode but not its promoted mode, the result will
6118 be incorrect. This is acceptable if the variable is signed, since
6119 overflows in such cases are undefined, but not if it is unsigned, since
6120 those overflows are defined. So we only check for SIGN_EXTEND and
6121 not ZERO_EXTEND.
6122
6123 If we cannot find a biv, we return 0. */
6124
6125 static int
6126 basic_induction_var (const struct loop *loop, rtx x, enum machine_mode mode,
6127 rtx dest_reg, rtx p, rtx *inc_val, rtx *mult_val,
6128 rtx **location)
6129 {
6130 enum rtx_code code;
6131 rtx *argp, arg;
6132 rtx insn, set = 0, last, inc;
6133
6134 code = GET_CODE (x);
6135 *location = NULL;
6136 switch (code)
6137 {
6138 case PLUS:
6139 if (rtx_equal_p (XEXP (x, 0), dest_reg)
6140 || (GET_CODE (XEXP (x, 0)) == SUBREG
6141 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
6142 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
6143 {
6144 argp = &XEXP (x, 1);
6145 }
6146 else if (rtx_equal_p (XEXP (x, 1), dest_reg)
6147 || (GET_CODE (XEXP (x, 1)) == SUBREG
6148 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
6149 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
6150 {
6151 argp = &XEXP (x, 0);
6152 }
6153 else
6154 return 0;
6155
6156 arg = *argp;
6157 if (loop_invariant_p (loop, arg) != 1)
6158 return 0;
6159
6160 /* convert_modes can emit new instructions, e.g. when arg is a loop
6161 invariant MEM and dest_reg has a different mode.
6162 These instructions would be emitted after the end of the function
6163 and then *inc_val would be an uninitialized pseudo.
6164 Detect this and bail in this case.
6165 Other alternatives to solve this can be introducing a convert_modes
6166 variant which is allowed to fail but not allowed to emit new
6167 instructions, emit these instructions before loop start and let
6168 it be garbage collected if *inc_val is never used or saving the
6169 *inc_val initialization sequence generated here and when *inc_val
6170 is going to be actually used, emit it at some suitable place. */
6171 last = get_last_insn ();
6172 inc = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
6173 if (get_last_insn () != last)
6174 {
6175 delete_insns_since (last);
6176 return 0;
6177 }
6178
6179 *inc_val = inc;
6180 *mult_val = const1_rtx;
6181 *location = argp;
6182 return 1;
6183
6184 case SUBREG:
6185 /* If what's inside the SUBREG is a BIV, then the SUBREG. This will
6186 handle addition of promoted variables.
6187 ??? The comment at the start of this function is wrong: promoted
6188 variable increments don't look like it says they do. */
6189 return basic_induction_var (loop, SUBREG_REG (x),
6190 GET_MODE (SUBREG_REG (x)),
6191 dest_reg, p, inc_val, mult_val, location);
6192
6193 case REG:
6194 /* If this register is assigned in a previous insn, look at its
6195 source, but don't go outside the loop or past a label. */
6196
6197 /* If this sets a register to itself, we would repeat any previous
6198 biv increment if we applied this strategy blindly. */
6199 if (rtx_equal_p (dest_reg, x))
6200 return 0;
6201
6202 insn = p;
6203 while (1)
6204 {
6205 rtx dest;
6206 do
6207 {
6208 insn = PREV_INSN (insn);
6209 }
6210 while (insn && NOTE_P (insn)
6211 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
6212
6213 if (!insn)
6214 break;
6215 set = single_set (insn);
6216 if (set == 0)
6217 break;
6218 dest = SET_DEST (set);
6219 if (dest == x
6220 || (GET_CODE (dest) == SUBREG
6221 && (GET_MODE_SIZE (GET_MODE (dest)) <= UNITS_PER_WORD)
6222 && (GET_MODE_CLASS (GET_MODE (dest)) == MODE_INT)
6223 && SUBREG_REG (dest) == x))
6224 return basic_induction_var (loop, SET_SRC (set),
6225 (GET_MODE (SET_SRC (set)) == VOIDmode
6226 ? GET_MODE (x)
6227 : GET_MODE (SET_SRC (set))),
6228 dest_reg, insn,
6229 inc_val, mult_val, location);
6230
6231 while (GET_CODE (dest) == SIGN_EXTRACT
6232 || GET_CODE (dest) == ZERO_EXTRACT
6233 || GET_CODE (dest) == SUBREG
6234 || GET_CODE (dest) == STRICT_LOW_PART)
6235 dest = XEXP (dest, 0);
6236 if (dest == x)
6237 break;
6238 }
6239 /* Fall through. */
6240
6241 /* Can accept constant setting of biv only when inside inner most loop.
6242 Otherwise, a biv of an inner loop may be incorrectly recognized
6243 as a biv of the outer loop,
6244 causing code to be moved INTO the inner loop. */
6245 case MEM:
6246 if (loop_invariant_p (loop, x) != 1)
6247 return 0;
6248 case CONST_INT:
6249 case SYMBOL_REF:
6250 case CONST:
6251 /* convert_modes aborts if we try to convert to or from CCmode, so just
6252 exclude that case. It is very unlikely that a condition code value
6253 would be a useful iterator anyways. convert_modes aborts if we try to
6254 convert a float mode to non-float or vice versa too. */
6255 if (loop->level == 1
6256 && GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (dest_reg))
6257 && GET_MODE_CLASS (mode) != MODE_CC)
6258 {
6259 /* Possible bug here? Perhaps we don't know the mode of X. */
6260 last = get_last_insn ();
6261 inc = convert_modes (GET_MODE (dest_reg), mode, x, 0);
6262 if (get_last_insn () != last)
6263 {
6264 delete_insns_since (last);
6265 return 0;
6266 }
6267
6268 *inc_val = inc;
6269 *mult_val = const0_rtx;
6270 return 1;
6271 }
6272 else
6273 return 0;
6274
6275 case SIGN_EXTEND:
6276 /* Ignore this BIV if signed arithmetic overflow is defined. */
6277 if (flag_wrapv)
6278 return 0;
6279 return basic_induction_var (loop, XEXP (x, 0), GET_MODE (XEXP (x, 0)),
6280 dest_reg, p, inc_val, mult_val, location);
6281
6282 case ASHIFTRT:
6283 /* Similar, since this can be a sign extension. */
6284 for (insn = PREV_INSN (p);
6285 (insn && NOTE_P (insn)
6286 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
6287 insn = PREV_INSN (insn))
6288 ;
6289
6290 if (insn)
6291 set = single_set (insn);
6292
6293 if (! rtx_equal_p (dest_reg, XEXP (x, 0))
6294 && set && SET_DEST (set) == XEXP (x, 0)
6295 && GET_CODE (XEXP (x, 1)) == CONST_INT
6296 && INTVAL (XEXP (x, 1)) >= 0
6297 && GET_CODE (SET_SRC (set)) == ASHIFT
6298 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
6299 return basic_induction_var (loop, XEXP (SET_SRC (set), 0),
6300 GET_MODE (XEXP (x, 0)),
6301 dest_reg, insn, inc_val, mult_val,
6302 location);
6303 return 0;
6304
6305 default:
6306 return 0;
6307 }
6308 }
6309 \f
6310 /* A general induction variable (giv) is any quantity that is a linear
6311 function of a basic induction variable,
6312 i.e. giv = biv * mult_val + add_val.
6313 The coefficients can be any loop invariant quantity.
6314 A giv need not be computed directly from the biv;
6315 it can be computed by way of other givs. */
6316
6317 /* Determine whether X computes a giv.
6318 If it does, return a nonzero value
6319 which is the benefit from eliminating the computation of X;
6320 set *SRC_REG to the register of the biv that it is computed from;
6321 set *ADD_VAL and *MULT_VAL to the coefficients,
6322 such that the value of X is biv * mult + add; */
6323
6324 static int
6325 general_induction_var (const struct loop *loop, rtx x, rtx *src_reg,
6326 rtx *add_val, rtx *mult_val, rtx *ext_val,
6327 int is_addr, int *pbenefit,
6328 enum machine_mode addr_mode)
6329 {
6330 struct loop_ivs *ivs = LOOP_IVS (loop);
6331 rtx orig_x = x;
6332
6333 /* If this is an invariant, forget it, it isn't a giv. */
6334 if (loop_invariant_p (loop, x) == 1)
6335 return 0;
6336
6337 *pbenefit = 0;
6338 *ext_val = NULL_RTX;
6339 x = simplify_giv_expr (loop, x, ext_val, pbenefit);
6340 if (x == 0)
6341 return 0;
6342
6343 switch (GET_CODE (x))
6344 {
6345 case USE:
6346 case CONST_INT:
6347 /* Since this is now an invariant and wasn't before, it must be a giv
6348 with MULT_VAL == 0. It doesn't matter which BIV we associate this
6349 with. */
6350 *src_reg = ivs->list->biv->dest_reg;
6351 *mult_val = const0_rtx;
6352 *add_val = x;
6353 break;
6354
6355 case REG:
6356 /* This is equivalent to a BIV. */
6357 *src_reg = x;
6358 *mult_val = const1_rtx;
6359 *add_val = const0_rtx;
6360 break;
6361
6362 case PLUS:
6363 /* Either (plus (biv) (invar)) or
6364 (plus (mult (biv) (invar_1)) (invar_2)). */
6365 if (GET_CODE (XEXP (x, 0)) == MULT)
6366 {
6367 *src_reg = XEXP (XEXP (x, 0), 0);
6368 *mult_val = XEXP (XEXP (x, 0), 1);
6369 }
6370 else
6371 {
6372 *src_reg = XEXP (x, 0);
6373 *mult_val = const1_rtx;
6374 }
6375 *add_val = XEXP (x, 1);
6376 break;
6377
6378 case MULT:
6379 /* ADD_VAL is zero. */
6380 *src_reg = XEXP (x, 0);
6381 *mult_val = XEXP (x, 1);
6382 *add_val = const0_rtx;
6383 break;
6384
6385 default:
6386 gcc_unreachable ();
6387 }
6388
6389 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
6390 unless they are CONST_INT). */
6391 if (GET_CODE (*add_val) == USE)
6392 *add_val = XEXP (*add_val, 0);
6393 if (GET_CODE (*mult_val) == USE)
6394 *mult_val = XEXP (*mult_val, 0);
6395
6396 if (is_addr)
6397 *pbenefit += address_cost (orig_x, addr_mode) - reg_address_cost;
6398 else
6399 *pbenefit += rtx_cost (orig_x, SET);
6400
6401 /* Always return true if this is a giv so it will be detected as such,
6402 even if the benefit is zero or negative. This allows elimination
6403 of bivs that might otherwise not be eliminated. */
6404 return 1;
6405 }
6406 \f
6407 /* Given an expression, X, try to form it as a linear function of a biv.
6408 We will canonicalize it to be of the form
6409 (plus (mult (BIV) (invar_1))
6410 (invar_2))
6411 with possible degeneracies.
6412
6413 The invariant expressions must each be of a form that can be used as a
6414 machine operand. We surround then with a USE rtx (a hack, but localized
6415 and certainly unambiguous!) if not a CONST_INT for simplicity in this
6416 routine; it is the caller's responsibility to strip them.
6417
6418 If no such canonicalization is possible (i.e., two biv's are used or an
6419 expression that is neither invariant nor a biv or giv), this routine
6420 returns 0.
6421
6422 For a nonzero return, the result will have a code of CONST_INT, USE,
6423 REG (for a BIV), PLUS, or MULT. No other codes will occur.
6424
6425 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
6426
6427 static rtx sge_plus (enum machine_mode, rtx, rtx);
6428 static rtx sge_plus_constant (rtx, rtx);
6429
6430 static rtx
6431 simplify_giv_expr (const struct loop *loop, rtx x, rtx *ext_val, int *benefit)
6432 {
6433 struct loop_ivs *ivs = LOOP_IVS (loop);
6434 struct loop_regs *regs = LOOP_REGS (loop);
6435 enum machine_mode mode = GET_MODE (x);
6436 rtx arg0, arg1;
6437 rtx tem;
6438
6439 /* If this is not an integer mode, or if we cannot do arithmetic in this
6440 mode, this can't be a giv. */
6441 if (mode != VOIDmode
6442 && (GET_MODE_CLASS (mode) != MODE_INT
6443 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
6444 return NULL_RTX;
6445
6446 switch (GET_CODE (x))
6447 {
6448 case PLUS:
6449 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6450 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
6451 if (arg0 == 0 || arg1 == 0)
6452 return NULL_RTX;
6453
6454 /* Put constant last, CONST_INT last if both constant. */
6455 if ((GET_CODE (arg0) == USE
6456 || GET_CODE (arg0) == CONST_INT)
6457 && ! ((GET_CODE (arg0) == USE
6458 && GET_CODE (arg1) == USE)
6459 || GET_CODE (arg1) == CONST_INT))
6460 tem = arg0, arg0 = arg1, arg1 = tem;
6461
6462 /* Handle addition of zero, then addition of an invariant. */
6463 if (arg1 == const0_rtx)
6464 return arg0;
6465 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
6466 switch (GET_CODE (arg0))
6467 {
6468 case CONST_INT:
6469 case USE:
6470 /* Adding two invariants must result in an invariant, so enclose
6471 addition operation inside a USE and return it. */
6472 if (GET_CODE (arg0) == USE)
6473 arg0 = XEXP (arg0, 0);
6474 if (GET_CODE (arg1) == USE)
6475 arg1 = XEXP (arg1, 0);
6476
6477 if (GET_CODE (arg0) == CONST_INT)
6478 tem = arg0, arg0 = arg1, arg1 = tem;
6479 if (GET_CODE (arg1) == CONST_INT)
6480 tem = sge_plus_constant (arg0, arg1);
6481 else
6482 tem = sge_plus (mode, arg0, arg1);
6483
6484 if (GET_CODE (tem) != CONST_INT)
6485 tem = gen_rtx_USE (mode, tem);
6486 return tem;
6487
6488 case REG:
6489 case MULT:
6490 /* biv + invar or mult + invar. Return sum. */
6491 return gen_rtx_PLUS (mode, arg0, arg1);
6492
6493 case PLUS:
6494 /* (a + invar_1) + invar_2. Associate. */
6495 return
6496 simplify_giv_expr (loop,
6497 gen_rtx_PLUS (mode,
6498 XEXP (arg0, 0),
6499 gen_rtx_PLUS (mode,
6500 XEXP (arg0, 1),
6501 arg1)),
6502 ext_val, benefit);
6503
6504 default:
6505 gcc_unreachable ();
6506 }
6507
6508 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
6509 MULT to reduce cases. */
6510 if (REG_P (arg0))
6511 arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
6512 if (REG_P (arg1))
6513 arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
6514
6515 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
6516 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
6517 Recurse to associate the second PLUS. */
6518 if (GET_CODE (arg1) == MULT)
6519 tem = arg0, arg0 = arg1, arg1 = tem;
6520
6521 if (GET_CODE (arg1) == PLUS)
6522 return
6523 simplify_giv_expr (loop,
6524 gen_rtx_PLUS (mode,
6525 gen_rtx_PLUS (mode, arg0,
6526 XEXP (arg1, 0)),
6527 XEXP (arg1, 1)),
6528 ext_val, benefit);
6529
6530 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
6531 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
6532 return NULL_RTX;
6533
6534 if (!rtx_equal_p (arg0, arg1))
6535 return NULL_RTX;
6536
6537 return simplify_giv_expr (loop,
6538 gen_rtx_MULT (mode,
6539 XEXP (arg0, 0),
6540 gen_rtx_PLUS (mode,
6541 XEXP (arg0, 1),
6542 XEXP (arg1, 1))),
6543 ext_val, benefit);
6544
6545 case MINUS:
6546 /* Handle "a - b" as "a + b * (-1)". */
6547 return simplify_giv_expr (loop,
6548 gen_rtx_PLUS (mode,
6549 XEXP (x, 0),
6550 gen_rtx_MULT (mode,
6551 XEXP (x, 1),
6552 constm1_rtx)),
6553 ext_val, benefit);
6554
6555 case MULT:
6556 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6557 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
6558 if (arg0 == 0 || arg1 == 0)
6559 return NULL_RTX;
6560
6561 /* Put constant last, CONST_INT last if both constant. */
6562 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
6563 && GET_CODE (arg1) != CONST_INT)
6564 tem = arg0, arg0 = arg1, arg1 = tem;
6565
6566 /* If second argument is not now constant, not giv. */
6567 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
6568 return NULL_RTX;
6569
6570 /* Handle multiply by 0 or 1. */
6571 if (arg1 == const0_rtx)
6572 return const0_rtx;
6573
6574 else if (arg1 == const1_rtx)
6575 return arg0;
6576
6577 switch (GET_CODE (arg0))
6578 {
6579 case REG:
6580 /* biv * invar. Done. */
6581 return gen_rtx_MULT (mode, arg0, arg1);
6582
6583 case CONST_INT:
6584 /* Product of two constants. */
6585 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
6586
6587 case USE:
6588 /* invar * invar is a giv, but attempt to simplify it somehow. */
6589 if (GET_CODE (arg1) != CONST_INT)
6590 return NULL_RTX;
6591
6592 arg0 = XEXP (arg0, 0);
6593 if (GET_CODE (arg0) == MULT)
6594 {
6595 /* (invar_0 * invar_1) * invar_2. Associate. */
6596 return simplify_giv_expr (loop,
6597 gen_rtx_MULT (mode,
6598 XEXP (arg0, 0),
6599 gen_rtx_MULT (mode,
6600 XEXP (arg0,
6601 1),
6602 arg1)),
6603 ext_val, benefit);
6604 }
6605 /* Propagate the MULT expressions to the innermost nodes. */
6606 else if (GET_CODE (arg0) == PLUS)
6607 {
6608 /* (invar_0 + invar_1) * invar_2. Distribute. */
6609 return simplify_giv_expr (loop,
6610 gen_rtx_PLUS (mode,
6611 gen_rtx_MULT (mode,
6612 XEXP (arg0,
6613 0),
6614 arg1),
6615 gen_rtx_MULT (mode,
6616 XEXP (arg0,
6617 1),
6618 arg1)),
6619 ext_val, benefit);
6620 }
6621 return gen_rtx_USE (mode, gen_rtx_MULT (mode, arg0, arg1));
6622
6623 case MULT:
6624 /* (a * invar_1) * invar_2. Associate. */
6625 return simplify_giv_expr (loop,
6626 gen_rtx_MULT (mode,
6627 XEXP (arg0, 0),
6628 gen_rtx_MULT (mode,
6629 XEXP (arg0, 1),
6630 arg1)),
6631 ext_val, benefit);
6632
6633 case PLUS:
6634 /* (a + invar_1) * invar_2. Distribute. */
6635 return simplify_giv_expr (loop,
6636 gen_rtx_PLUS (mode,
6637 gen_rtx_MULT (mode,
6638 XEXP (arg0, 0),
6639 arg1),
6640 gen_rtx_MULT (mode,
6641 XEXP (arg0, 1),
6642 arg1)),
6643 ext_val, benefit);
6644
6645 default:
6646 gcc_unreachable ();
6647 }
6648
6649 case ASHIFT:
6650 /* Shift by constant is multiply by power of two. */
6651 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
6652 return 0;
6653
6654 return
6655 simplify_giv_expr (loop,
6656 gen_rtx_MULT (mode,
6657 XEXP (x, 0),
6658 GEN_INT ((HOST_WIDE_INT) 1
6659 << INTVAL (XEXP (x, 1)))),
6660 ext_val, benefit);
6661
6662 case NEG:
6663 /* "-a" is "a * (-1)" */
6664 return simplify_giv_expr (loop,
6665 gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
6666 ext_val, benefit);
6667
6668 case NOT:
6669 /* "~a" is "-a - 1". Silly, but easy. */
6670 return simplify_giv_expr (loop,
6671 gen_rtx_MINUS (mode,
6672 gen_rtx_NEG (mode, XEXP (x, 0)),
6673 const1_rtx),
6674 ext_val, benefit);
6675
6676 case USE:
6677 /* Already in proper form for invariant. */
6678 return x;
6679
6680 case SIGN_EXTEND:
6681 case ZERO_EXTEND:
6682 case TRUNCATE:
6683 /* Conditionally recognize extensions of simple IVs. After we've
6684 computed loop traversal counts and verified the range of the
6685 source IV, we'll reevaluate this as a GIV. */
6686 if (*ext_val == NULL_RTX)
6687 {
6688 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6689 if (arg0 && *ext_val == NULL_RTX && REG_P (arg0))
6690 {
6691 *ext_val = gen_rtx_fmt_e (GET_CODE (x), mode, arg0);
6692 return arg0;
6693 }
6694 }
6695 goto do_default;
6696
6697 case REG:
6698 /* If this is a new register, we can't deal with it. */
6699 if (REGNO (x) >= max_reg_before_loop)
6700 return 0;
6701
6702 /* Check for biv or giv. */
6703 switch (REG_IV_TYPE (ivs, REGNO (x)))
6704 {
6705 case BASIC_INDUCT:
6706 return x;
6707 case GENERAL_INDUCT:
6708 {
6709 struct induction *v = REG_IV_INFO (ivs, REGNO (x));
6710
6711 /* Form expression from giv and add benefit. Ensure this giv
6712 can derive another and subtract any needed adjustment if so. */
6713
6714 /* Increasing the benefit here is risky. The only case in which it
6715 is arguably correct is if this is the only use of V. In other
6716 cases, this will artificially inflate the benefit of the current
6717 giv, and lead to suboptimal code. Thus, it is disabled, since
6718 potentially not reducing an only marginally beneficial giv is
6719 less harmful than reducing many givs that are not really
6720 beneficial. */
6721 {
6722 rtx single_use = regs->array[REGNO (x)].single_usage;
6723 if (single_use && single_use != const0_rtx)
6724 *benefit += v->benefit;
6725 }
6726
6727 if (v->cant_derive)
6728 return 0;
6729
6730 tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode,
6731 v->src_reg, v->mult_val),
6732 v->add_val);
6733
6734 if (v->derive_adjustment)
6735 tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
6736 arg0 = simplify_giv_expr (loop, tem, ext_val, benefit);
6737 if (*ext_val)
6738 {
6739 if (!v->ext_dependent)
6740 return arg0;
6741 }
6742 else
6743 {
6744 *ext_val = v->ext_dependent;
6745 return arg0;
6746 }
6747 return 0;
6748 }
6749
6750 default:
6751 do_default:
6752 /* If it isn't an induction variable, and it is invariant, we
6753 may be able to simplify things further by looking through
6754 the bits we just moved outside the loop. */
6755 if (loop_invariant_p (loop, x) == 1)
6756 {
6757 struct movable *m;
6758 struct loop_movables *movables = LOOP_MOVABLES (loop);
6759
6760 for (m = movables->head; m; m = m->next)
6761 if (rtx_equal_p (x, m->set_dest))
6762 {
6763 /* Ok, we found a match. Substitute and simplify. */
6764
6765 /* If we match another movable, we must use that, as
6766 this one is going away. */
6767 if (m->match)
6768 return simplify_giv_expr (loop, m->match->set_dest,
6769 ext_val, benefit);
6770
6771 /* If consec is nonzero, this is a member of a group of
6772 instructions that were moved together. We handle this
6773 case only to the point of seeking to the last insn and
6774 looking for a REG_EQUAL. Fail if we don't find one. */
6775 if (m->consec != 0)
6776 {
6777 int i = m->consec;
6778 tem = m->insn;
6779 do
6780 {
6781 tem = NEXT_INSN (tem);
6782 }
6783 while (--i > 0);
6784
6785 tem = find_reg_note (tem, REG_EQUAL, NULL_RTX);
6786 if (tem)
6787 tem = XEXP (tem, 0);
6788 }
6789 else
6790 {
6791 tem = single_set (m->insn);
6792 if (tem)
6793 tem = SET_SRC (tem);
6794 }
6795
6796 if (tem)
6797 {
6798 /* What we are most interested in is pointer
6799 arithmetic on invariants -- only take
6800 patterns we may be able to do something with. */
6801 if (GET_CODE (tem) == PLUS
6802 || GET_CODE (tem) == MULT
6803 || GET_CODE (tem) == ASHIFT
6804 || GET_CODE (tem) == CONST_INT
6805 || GET_CODE (tem) == SYMBOL_REF)
6806 {
6807 tem = simplify_giv_expr (loop, tem, ext_val,
6808 benefit);
6809 if (tem)
6810 return tem;
6811 }
6812 else if (GET_CODE (tem) == CONST
6813 && GET_CODE (XEXP (tem, 0)) == PLUS
6814 && GET_CODE (XEXP (XEXP (tem, 0), 0)) == SYMBOL_REF
6815 && GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT)
6816 {
6817 tem = simplify_giv_expr (loop, XEXP (tem, 0),
6818 ext_val, benefit);
6819 if (tem)
6820 return tem;
6821 }
6822 }
6823 break;
6824 }
6825 }
6826 break;
6827 }
6828
6829 /* Fall through to general case. */
6830 default:
6831 /* If invariant, return as USE (unless CONST_INT).
6832 Otherwise, not giv. */
6833 if (GET_CODE (x) == USE)
6834 x = XEXP (x, 0);
6835
6836 if (loop_invariant_p (loop, x) == 1)
6837 {
6838 if (GET_CODE (x) == CONST_INT)
6839 return x;
6840 if (GET_CODE (x) == CONST
6841 && GET_CODE (XEXP (x, 0)) == PLUS
6842 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
6843 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
6844 x = XEXP (x, 0);
6845 return gen_rtx_USE (mode, x);
6846 }
6847 else
6848 return 0;
6849 }
6850 }
6851
6852 /* This routine folds invariants such that there is only ever one
6853 CONST_INT in the summation. It is only used by simplify_giv_expr. */
6854
6855 static rtx
6856 sge_plus_constant (rtx x, rtx c)
6857 {
6858 if (GET_CODE (x) == CONST_INT)
6859 return GEN_INT (INTVAL (x) + INTVAL (c));
6860 else if (GET_CODE (x) != PLUS)
6861 return gen_rtx_PLUS (GET_MODE (x), x, c);
6862 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
6863 {
6864 return gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
6865 GEN_INT (INTVAL (XEXP (x, 1)) + INTVAL (c)));
6866 }
6867 else if (GET_CODE (XEXP (x, 0)) == PLUS
6868 || GET_CODE (XEXP (x, 1)) != PLUS)
6869 {
6870 return gen_rtx_PLUS (GET_MODE (x),
6871 sge_plus_constant (XEXP (x, 0), c), XEXP (x, 1));
6872 }
6873 else
6874 {
6875 return gen_rtx_PLUS (GET_MODE (x),
6876 sge_plus_constant (XEXP (x, 1), c), XEXP (x, 0));
6877 }
6878 }
6879
6880 static rtx
6881 sge_plus (enum machine_mode mode, rtx x, rtx y)
6882 {
6883 while (GET_CODE (y) == PLUS)
6884 {
6885 rtx a = XEXP (y, 0);
6886 if (GET_CODE (a) == CONST_INT)
6887 x = sge_plus_constant (x, a);
6888 else
6889 x = gen_rtx_PLUS (mode, x, a);
6890 y = XEXP (y, 1);
6891 }
6892 if (GET_CODE (y) == CONST_INT)
6893 x = sge_plus_constant (x, y);
6894 else
6895 x = gen_rtx_PLUS (mode, x, y);
6896 return x;
6897 }
6898 \f
6899 /* Help detect a giv that is calculated by several consecutive insns;
6900 for example,
6901 giv = biv * M
6902 giv = giv + A
6903 The caller has already identified the first insn P as having a giv as dest;
6904 we check that all other insns that set the same register follow
6905 immediately after P, that they alter nothing else,
6906 and that the result of the last is still a giv.
6907
6908 The value is 0 if the reg set in P is not really a giv.
6909 Otherwise, the value is the amount gained by eliminating
6910 all the consecutive insns that compute the value.
6911
6912 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
6913 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
6914
6915 The coefficients of the ultimate giv value are stored in
6916 *MULT_VAL and *ADD_VAL. */
6917
6918 static int
6919 consec_sets_giv (const struct loop *loop, int first_benefit, rtx p,
6920 rtx src_reg, rtx dest_reg, rtx *add_val, rtx *mult_val,
6921 rtx *ext_val, rtx *last_consec_insn)
6922 {
6923 struct loop_ivs *ivs = LOOP_IVS (loop);
6924 struct loop_regs *regs = LOOP_REGS (loop);
6925 int count;
6926 enum rtx_code code;
6927 int benefit;
6928 rtx temp;
6929 rtx set;
6930
6931 /* Indicate that this is a giv so that we can update the value produced in
6932 each insn of the multi-insn sequence.
6933
6934 This induction structure will be used only by the call to
6935 general_induction_var below, so we can allocate it on our stack.
6936 If this is a giv, our caller will replace the induct var entry with
6937 a new induction structure. */
6938 struct induction *v;
6939
6940 if (REG_IV_TYPE (ivs, REGNO (dest_reg)) != UNKNOWN_INDUCT)
6941 return 0;
6942
6943 v = alloca (sizeof (struct induction));
6944 v->src_reg = src_reg;
6945 v->mult_val = *mult_val;
6946 v->add_val = *add_val;
6947 v->benefit = first_benefit;
6948 v->cant_derive = 0;
6949 v->derive_adjustment = 0;
6950 v->ext_dependent = NULL_RTX;
6951
6952 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
6953 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
6954
6955 count = regs->array[REGNO (dest_reg)].n_times_set - 1;
6956
6957 while (count > 0)
6958 {
6959 p = NEXT_INSN (p);
6960 code = GET_CODE (p);
6961
6962 /* If libcall, skip to end of call sequence. */
6963 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
6964 p = XEXP (temp, 0);
6965
6966 if (code == INSN
6967 && (set = single_set (p))
6968 && REG_P (SET_DEST (set))
6969 && SET_DEST (set) == dest_reg
6970 && (general_induction_var (loop, SET_SRC (set), &src_reg,
6971 add_val, mult_val, ext_val, 0,
6972 &benefit, VOIDmode)
6973 /* Giv created by equivalent expression. */
6974 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
6975 && general_induction_var (loop, XEXP (temp, 0), &src_reg,
6976 add_val, mult_val, ext_val, 0,
6977 &benefit, VOIDmode)))
6978 && src_reg == v->src_reg)
6979 {
6980 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
6981 benefit += libcall_benefit (p);
6982
6983 count--;
6984 v->mult_val = *mult_val;
6985 v->add_val = *add_val;
6986 v->benefit += benefit;
6987 }
6988 else if (code != NOTE)
6989 {
6990 /* Allow insns that set something other than this giv to a
6991 constant. Such insns are needed on machines which cannot
6992 include long constants and should not disqualify a giv. */
6993 if (code == INSN
6994 && (set = single_set (p))
6995 && SET_DEST (set) != dest_reg
6996 && CONSTANT_P (SET_SRC (set)))
6997 continue;
6998
6999 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
7000 return 0;
7001 }
7002 }
7003
7004 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
7005 *last_consec_insn = p;
7006 return v->benefit;
7007 }
7008 \f
7009 /* Return an rtx, if any, that expresses giv G2 as a function of the register
7010 represented by G1. If no such expression can be found, or it is clear that
7011 it cannot possibly be a valid address, 0 is returned.
7012
7013 To perform the computation, we note that
7014 G1 = x * v + a and
7015 G2 = y * v + b
7016 where `v' is the biv.
7017
7018 So G2 = (y/b) * G1 + (b - a*y/x).
7019
7020 Note that MULT = y/x.
7021
7022 Update: A and B are now allowed to be additive expressions such that
7023 B contains all variables in A. That is, computing B-A will not require
7024 subtracting variables. */
7025
7026 static rtx
7027 express_from_1 (rtx a, rtx b, rtx mult)
7028 {
7029 /* If MULT is zero, then A*MULT is zero, and our expression is B. */
7030
7031 if (mult == const0_rtx)
7032 return b;
7033
7034 /* If MULT is not 1, we cannot handle A with non-constants, since we
7035 would then be required to subtract multiples of the registers in A.
7036 This is theoretically possible, and may even apply to some Fortran
7037 constructs, but it is a lot of work and we do not attempt it here. */
7038
7039 if (mult != const1_rtx && GET_CODE (a) != CONST_INT)
7040 return NULL_RTX;
7041
7042 /* In general these structures are sorted top to bottom (down the PLUS
7043 chain), but not left to right across the PLUS. If B is a higher
7044 order giv than A, we can strip one level and recurse. If A is higher
7045 order, we'll eventually bail out, but won't know that until the end.
7046 If they are the same, we'll strip one level around this loop. */
7047
7048 while (GET_CODE (a) == PLUS && GET_CODE (b) == PLUS)
7049 {
7050 rtx ra, rb, oa, ob, tmp;
7051
7052 ra = XEXP (a, 0), oa = XEXP (a, 1);
7053 if (GET_CODE (ra) == PLUS)
7054 tmp = ra, ra = oa, oa = tmp;
7055
7056 rb = XEXP (b, 0), ob = XEXP (b, 1);
7057 if (GET_CODE (rb) == PLUS)
7058 tmp = rb, rb = ob, ob = tmp;
7059
7060 if (rtx_equal_p (ra, rb))
7061 /* We matched: remove one reg completely. */
7062 a = oa, b = ob;
7063 else if (GET_CODE (ob) != PLUS && rtx_equal_p (ra, ob))
7064 /* An alternate match. */
7065 a = oa, b = rb;
7066 else if (GET_CODE (oa) != PLUS && rtx_equal_p (oa, rb))
7067 /* An alternate match. */
7068 a = ra, b = ob;
7069 else
7070 {
7071 /* Indicates an extra register in B. Strip one level from B and
7072 recurse, hoping B was the higher order expression. */
7073 ob = express_from_1 (a, ob, mult);
7074 if (ob == NULL_RTX)
7075 return NULL_RTX;
7076 return gen_rtx_PLUS (GET_MODE (b), rb, ob);
7077 }
7078 }
7079
7080 /* Here we are at the last level of A, go through the cases hoping to
7081 get rid of everything but a constant. */
7082
7083 if (GET_CODE (a) == PLUS)
7084 {
7085 rtx ra, oa;
7086
7087 ra = XEXP (a, 0), oa = XEXP (a, 1);
7088 if (rtx_equal_p (oa, b))
7089 oa = ra;
7090 else if (!rtx_equal_p (ra, b))
7091 return NULL_RTX;
7092
7093 if (GET_CODE (oa) != CONST_INT)
7094 return NULL_RTX;
7095
7096 return GEN_INT (-INTVAL (oa) * INTVAL (mult));
7097 }
7098 else if (GET_CODE (a) == CONST_INT)
7099 {
7100 return plus_constant (b, -INTVAL (a) * INTVAL (mult));
7101 }
7102 else if (CONSTANT_P (a))
7103 {
7104 enum machine_mode mode_a = GET_MODE (a);
7105 enum machine_mode mode_b = GET_MODE (b);
7106 enum machine_mode mode = mode_b == VOIDmode ? mode_a : mode_b;
7107 return simplify_gen_binary (MINUS, mode, b, a);
7108 }
7109 else if (GET_CODE (b) == PLUS)
7110 {
7111 if (rtx_equal_p (a, XEXP (b, 0)))
7112 return XEXP (b, 1);
7113 else if (rtx_equal_p (a, XEXP (b, 1)))
7114 return XEXP (b, 0);
7115 else
7116 return NULL_RTX;
7117 }
7118 else if (rtx_equal_p (a, b))
7119 return const0_rtx;
7120
7121 return NULL_RTX;
7122 }
7123
7124 rtx
7125 express_from (struct induction *g1, struct induction *g2)
7126 {
7127 rtx mult, add;
7128
7129 /* The value that G1 will be multiplied by must be a constant integer. Also,
7130 the only chance we have of getting a valid address is if b*c/a (see above
7131 for notation) is also an integer. */
7132 if (GET_CODE (g1->mult_val) == CONST_INT
7133 && GET_CODE (g2->mult_val) == CONST_INT)
7134 {
7135 if (g1->mult_val == const0_rtx
7136 || (g1->mult_val == constm1_rtx
7137 && INTVAL (g2->mult_val)
7138 == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1))
7139 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
7140 return NULL_RTX;
7141 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
7142 }
7143 else if (rtx_equal_p (g1->mult_val, g2->mult_val))
7144 mult = const1_rtx;
7145 else
7146 {
7147 /* ??? Find out if the one is a multiple of the other? */
7148 return NULL_RTX;
7149 }
7150
7151 add = express_from_1 (g1->add_val, g2->add_val, mult);
7152 if (add == NULL_RTX)
7153 {
7154 /* Failed. If we've got a multiplication factor between G1 and G2,
7155 scale G1's addend and try again. */
7156 if (INTVAL (mult) > 1)
7157 {
7158 rtx g1_add_val = g1->add_val;
7159 if (GET_CODE (g1_add_val) == MULT
7160 && GET_CODE (XEXP (g1_add_val, 1)) == CONST_INT)
7161 {
7162 HOST_WIDE_INT m;
7163 m = INTVAL (mult) * INTVAL (XEXP (g1_add_val, 1));
7164 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val),
7165 XEXP (g1_add_val, 0), GEN_INT (m));
7166 }
7167 else
7168 {
7169 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val), g1_add_val,
7170 mult);
7171 }
7172
7173 add = express_from_1 (g1_add_val, g2->add_val, const1_rtx);
7174 }
7175 }
7176 if (add == NULL_RTX)
7177 return NULL_RTX;
7178
7179 /* Form simplified final result. */
7180 if (mult == const0_rtx)
7181 return add;
7182 else if (mult == const1_rtx)
7183 mult = g1->dest_reg;
7184 else
7185 mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult);
7186
7187 if (add == const0_rtx)
7188 return mult;
7189 else
7190 {
7191 if (GET_CODE (add) == PLUS
7192 && CONSTANT_P (XEXP (add, 1)))
7193 {
7194 rtx tem = XEXP (add, 1);
7195 mult = gen_rtx_PLUS (g2->mode, mult, XEXP (add, 0));
7196 add = tem;
7197 }
7198
7199 return gen_rtx_PLUS (g2->mode, mult, add);
7200 }
7201 }
7202 \f
7203 /* Return an rtx, if any, that expresses giv G2 as a function of the register
7204 represented by G1. This indicates that G2 should be combined with G1 and
7205 that G2 can use (either directly or via an address expression) a register
7206 used to represent G1. */
7207
7208 static rtx
7209 combine_givs_p (struct induction *g1, struct induction *g2)
7210 {
7211 rtx comb, ret;
7212
7213 /* With the introduction of ext dependent givs, we must care for modes.
7214 G2 must not use a wider mode than G1. */
7215 if (GET_MODE_SIZE (g1->mode) < GET_MODE_SIZE (g2->mode))
7216 return NULL_RTX;
7217
7218 ret = comb = express_from (g1, g2);
7219 if (comb == NULL_RTX)
7220 return NULL_RTX;
7221 if (g1->mode != g2->mode)
7222 ret = gen_lowpart (g2->mode, comb);
7223
7224 /* If these givs are identical, they can be combined. We use the results
7225 of express_from because the addends are not in a canonical form, so
7226 rtx_equal_p is a weaker test. */
7227 /* But don't combine a DEST_REG giv with a DEST_ADDR giv; we want the
7228 combination to be the other way round. */
7229 if (comb == g1->dest_reg
7230 && (g1->giv_type == DEST_REG || g2->giv_type == DEST_ADDR))
7231 {
7232 return ret;
7233 }
7234
7235 /* If G2 can be expressed as a function of G1 and that function is valid
7236 as an address and no more expensive than using a register for G2,
7237 the expression of G2 in terms of G1 can be used. */
7238 if (ret != NULL_RTX
7239 && g2->giv_type == DEST_ADDR
7240 && memory_address_p (GET_MODE (g2->mem), ret))
7241 return ret;
7242
7243 return NULL_RTX;
7244 }
7245 \f
7246 /* Check each extension dependent giv in this class to see if its
7247 root biv is safe from wrapping in the interior mode, which would
7248 make the giv illegal. */
7249
7250 static void
7251 check_ext_dependent_givs (const struct loop *loop, struct iv_class *bl)
7252 {
7253 struct loop_info *loop_info = LOOP_INFO (loop);
7254 int ze_ok = 0, se_ok = 0, info_ok = 0;
7255 enum machine_mode biv_mode = GET_MODE (bl->biv->src_reg);
7256 HOST_WIDE_INT start_val;
7257 unsigned HOST_WIDE_INT u_end_val = 0;
7258 unsigned HOST_WIDE_INT u_start_val = 0;
7259 rtx incr = pc_rtx;
7260 struct induction *v;
7261
7262 /* Make sure the iteration data is available. We must have
7263 constants in order to be certain of no overflow. */
7264 if (loop_info->n_iterations > 0
7265 && bl->initial_value
7266 && GET_CODE (bl->initial_value) == CONST_INT
7267 && (incr = biv_total_increment (bl))
7268 && GET_CODE (incr) == CONST_INT
7269 /* Make sure the host can represent the arithmetic. */
7270 && HOST_BITS_PER_WIDE_INT >= GET_MODE_BITSIZE (biv_mode))
7271 {
7272 unsigned HOST_WIDE_INT abs_incr, total_incr;
7273 HOST_WIDE_INT s_end_val;
7274 int neg_incr;
7275
7276 info_ok = 1;
7277 start_val = INTVAL (bl->initial_value);
7278 u_start_val = start_val;
7279
7280 neg_incr = 0, abs_incr = INTVAL (incr);
7281 if (INTVAL (incr) < 0)
7282 neg_incr = 1, abs_incr = -abs_incr;
7283 total_incr = abs_incr * loop_info->n_iterations;
7284
7285 /* Check for host arithmetic overflow. */
7286 if (total_incr / loop_info->n_iterations == abs_incr)
7287 {
7288 unsigned HOST_WIDE_INT u_max;
7289 HOST_WIDE_INT s_max;
7290
7291 u_end_val = start_val + (neg_incr ? -total_incr : total_incr);
7292 s_end_val = u_end_val;
7293 u_max = GET_MODE_MASK (biv_mode);
7294 s_max = u_max >> 1;
7295
7296 /* Check zero extension of biv ok. */
7297 if (start_val >= 0
7298 /* Check for host arithmetic overflow. */
7299 && (neg_incr
7300 ? u_end_val < u_start_val
7301 : u_end_val > u_start_val)
7302 /* Check for target arithmetic overflow. */
7303 && (neg_incr
7304 ? 1 /* taken care of with host overflow */
7305 : u_end_val <= u_max))
7306 {
7307 ze_ok = 1;
7308 }
7309
7310 /* Check sign extension of biv ok. */
7311 /* ??? While it is true that overflow with signed and pointer
7312 arithmetic is undefined, I fear too many programmers don't
7313 keep this fact in mind -- myself included on occasion.
7314 So leave alone with the signed overflow optimizations. */
7315 if (start_val >= -s_max - 1
7316 /* Check for host arithmetic overflow. */
7317 && (neg_incr
7318 ? s_end_val < start_val
7319 : s_end_val > start_val)
7320 /* Check for target arithmetic overflow. */
7321 && (neg_incr
7322 ? s_end_val >= -s_max - 1
7323 : s_end_val <= s_max))
7324 {
7325 se_ok = 1;
7326 }
7327 }
7328 }
7329
7330 /* If we know the BIV is compared at run-time against an
7331 invariant value, and the increment is +/- 1, we may also
7332 be able to prove that the BIV cannot overflow. */
7333 else if (bl->biv->src_reg == loop_info->iteration_var
7334 && loop_info->comparison_value
7335 && loop_invariant_p (loop, loop_info->comparison_value)
7336 && (incr = biv_total_increment (bl))
7337 && GET_CODE (incr) == CONST_INT)
7338 {
7339 /* If the increment is +1, and the exit test is a <,
7340 the BIV cannot overflow. (For <=, we have the
7341 problematic case that the comparison value might
7342 be the maximum value of the range.) */
7343 if (INTVAL (incr) == 1)
7344 {
7345 if (loop_info->comparison_code == LT)
7346 se_ok = ze_ok = 1;
7347 else if (loop_info->comparison_code == LTU)
7348 ze_ok = 1;
7349 }
7350
7351 /* Likewise for increment -1 and exit test >. */
7352 if (INTVAL (incr) == -1)
7353 {
7354 if (loop_info->comparison_code == GT)
7355 se_ok = ze_ok = 1;
7356 else if (loop_info->comparison_code == GTU)
7357 ze_ok = 1;
7358 }
7359 }
7360
7361 /* Invalidate givs that fail the tests. */
7362 for (v = bl->giv; v; v = v->next_iv)
7363 if (v->ext_dependent)
7364 {
7365 enum rtx_code code = GET_CODE (v->ext_dependent);
7366 int ok = 0;
7367
7368 switch (code)
7369 {
7370 case SIGN_EXTEND:
7371 ok = se_ok;
7372 break;
7373 case ZERO_EXTEND:
7374 ok = ze_ok;
7375 break;
7376
7377 case TRUNCATE:
7378 /* We don't know whether this value is being used as either
7379 signed or unsigned, so to safely truncate we must satisfy
7380 both. The initial check here verifies the BIV itself;
7381 once that is successful we may check its range wrt the
7382 derived GIV. This works only if we were able to determine
7383 constant start and end values above. */
7384 if (se_ok && ze_ok && info_ok)
7385 {
7386 enum machine_mode outer_mode = GET_MODE (v->ext_dependent);
7387 unsigned HOST_WIDE_INT max = GET_MODE_MASK (outer_mode) >> 1;
7388
7389 /* We know from the above that both endpoints are nonnegative,
7390 and that there is no wrapping. Verify that both endpoints
7391 are within the (signed) range of the outer mode. */
7392 if (u_start_val <= max && u_end_val <= max)
7393 ok = 1;
7394 }
7395 break;
7396
7397 default:
7398 gcc_unreachable ();
7399 }
7400
7401 if (ok)
7402 {
7403 if (loop_dump_stream)
7404 {
7405 fprintf (loop_dump_stream,
7406 "Verified ext dependent giv at %d of reg %d\n",
7407 INSN_UID (v->insn), bl->regno);
7408 }
7409 }
7410 else
7411 {
7412 if (loop_dump_stream)
7413 {
7414 const char *why;
7415
7416 if (info_ok)
7417 why = "biv iteration values overflowed";
7418 else
7419 {
7420 if (incr == pc_rtx)
7421 incr = biv_total_increment (bl);
7422 if (incr == const1_rtx)
7423 why = "biv iteration info incomplete; incr by 1";
7424 else
7425 why = "biv iteration info incomplete";
7426 }
7427
7428 fprintf (loop_dump_stream,
7429 "Failed ext dependent giv at %d, %s\n",
7430 INSN_UID (v->insn), why);
7431 }
7432 v->ignore = 1;
7433 bl->all_reduced = 0;
7434 }
7435 }
7436 }
7437
7438 /* Generate a version of VALUE in a mode appropriate for initializing V. */
7439
7440 rtx
7441 extend_value_for_giv (struct induction *v, rtx value)
7442 {
7443 rtx ext_dep = v->ext_dependent;
7444
7445 if (! ext_dep)
7446 return value;
7447
7448 /* Recall that check_ext_dependent_givs verified that the known bounds
7449 of a biv did not overflow or wrap with respect to the extension for
7450 the giv. Therefore, constants need no additional adjustment. */
7451 if (CONSTANT_P (value) && GET_MODE (value) == VOIDmode)
7452 return value;
7453
7454 /* Otherwise, we must adjust the value to compensate for the
7455 differing modes of the biv and the giv. */
7456 return gen_rtx_fmt_e (GET_CODE (ext_dep), GET_MODE (ext_dep), value);
7457 }
7458 \f
7459 struct combine_givs_stats
7460 {
7461 int giv_number;
7462 int total_benefit;
7463 };
7464
7465 static int
7466 cmp_combine_givs_stats (const void *xp, const void *yp)
7467 {
7468 const struct combine_givs_stats * const x =
7469 (const struct combine_givs_stats *) xp;
7470 const struct combine_givs_stats * const y =
7471 (const struct combine_givs_stats *) yp;
7472 int d;
7473 d = y->total_benefit - x->total_benefit;
7474 /* Stabilize the sort. */
7475 if (!d)
7476 d = x->giv_number - y->giv_number;
7477 return d;
7478 }
7479
7480 /* Check all pairs of givs for iv_class BL and see if any can be combined with
7481 any other. If so, point SAME to the giv combined with and set NEW_REG to
7482 be an expression (in terms of the other giv's DEST_REG) equivalent to the
7483 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
7484
7485 static void
7486 combine_givs (struct loop_regs *regs, struct iv_class *bl)
7487 {
7488 /* Additional benefit to add for being combined multiple times. */
7489 const int extra_benefit = 3;
7490
7491 struct induction *g1, *g2, **giv_array;
7492 int i, j, k, giv_count;
7493 struct combine_givs_stats *stats;
7494 rtx *can_combine;
7495
7496 /* Count givs, because bl->giv_count is incorrect here. */
7497 giv_count = 0;
7498 for (g1 = bl->giv; g1; g1 = g1->next_iv)
7499 if (!g1->ignore)
7500 giv_count++;
7501
7502 giv_array = alloca (giv_count * sizeof (struct induction *));
7503 i = 0;
7504 for (g1 = bl->giv; g1; g1 = g1->next_iv)
7505 if (!g1->ignore)
7506 giv_array[i++] = g1;
7507
7508 stats = xcalloc (giv_count, sizeof (*stats));
7509 can_combine = xcalloc (giv_count, giv_count * sizeof (rtx));
7510
7511 for (i = 0; i < giv_count; i++)
7512 {
7513 int this_benefit;
7514 rtx single_use;
7515
7516 g1 = giv_array[i];
7517 stats[i].giv_number = i;
7518
7519 /* If a DEST_REG GIV is used only once, do not allow it to combine
7520 with anything, for in doing so we will gain nothing that cannot
7521 be had by simply letting the GIV with which we would have combined
7522 to be reduced on its own. The losage shows up in particular with
7523 DEST_ADDR targets on hosts with reg+reg addressing, though it can
7524 be seen elsewhere as well. */
7525 if (g1->giv_type == DEST_REG
7526 && (single_use = regs->array[REGNO (g1->dest_reg)].single_usage)
7527 && single_use != const0_rtx)
7528 continue;
7529
7530 this_benefit = g1->benefit;
7531 /* Add an additional weight for zero addends. */
7532 if (g1->no_const_addval)
7533 this_benefit += 1;
7534
7535 for (j = 0; j < giv_count; j++)
7536 {
7537 rtx this_combine;
7538
7539 g2 = giv_array[j];
7540 if (g1 != g2
7541 && (this_combine = combine_givs_p (g1, g2)) != NULL_RTX)
7542 {
7543 can_combine[i * giv_count + j] = this_combine;
7544 this_benefit += g2->benefit + extra_benefit;
7545 }
7546 }
7547 stats[i].total_benefit = this_benefit;
7548 }
7549
7550 /* Iterate, combining until we can't. */
7551 restart:
7552 qsort (stats, giv_count, sizeof (*stats), cmp_combine_givs_stats);
7553
7554 if (loop_dump_stream)
7555 {
7556 fprintf (loop_dump_stream, "Sorted combine statistics:\n");
7557 for (k = 0; k < giv_count; k++)
7558 {
7559 g1 = giv_array[stats[k].giv_number];
7560 if (!g1->combined_with && !g1->same)
7561 fprintf (loop_dump_stream, " {%d, %d}",
7562 INSN_UID (giv_array[stats[k].giv_number]->insn),
7563 stats[k].total_benefit);
7564 }
7565 putc ('\n', loop_dump_stream);
7566 }
7567
7568 for (k = 0; k < giv_count; k++)
7569 {
7570 int g1_add_benefit = 0;
7571
7572 i = stats[k].giv_number;
7573 g1 = giv_array[i];
7574
7575 /* If it has already been combined, skip. */
7576 if (g1->combined_with || g1->same)
7577 continue;
7578
7579 for (j = 0; j < giv_count; j++)
7580 {
7581 g2 = giv_array[j];
7582 if (g1 != g2 && can_combine[i * giv_count + j]
7583 /* If it has already been combined, skip. */
7584 && ! g2->same && ! g2->combined_with)
7585 {
7586 int l;
7587
7588 g2->new_reg = can_combine[i * giv_count + j];
7589 g2->same = g1;
7590 /* For destination, we now may replace by mem expression instead
7591 of register. This changes the costs considerably, so add the
7592 compensation. */
7593 if (g2->giv_type == DEST_ADDR)
7594 g2->benefit = (g2->benefit + reg_address_cost
7595 - address_cost (g2->new_reg,
7596 GET_MODE (g2->mem)));
7597 g1->combined_with++;
7598 g1->lifetime += g2->lifetime;
7599
7600 g1_add_benefit += g2->benefit;
7601
7602 /* ??? The new final_[bg]iv_value code does a much better job
7603 of finding replaceable giv's, and hence this code may no
7604 longer be necessary. */
7605 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
7606 g1_add_benefit -= copy_cost;
7607
7608 /* To help optimize the next set of combinations, remove
7609 this giv from the benefits of other potential mates. */
7610 for (l = 0; l < giv_count; ++l)
7611 {
7612 int m = stats[l].giv_number;
7613 if (can_combine[m * giv_count + j])
7614 stats[l].total_benefit -= g2->benefit + extra_benefit;
7615 }
7616
7617 if (loop_dump_stream)
7618 fprintf (loop_dump_stream,
7619 "giv at %d combined with giv at %d; new benefit %d + %d, lifetime %d\n",
7620 INSN_UID (g2->insn), INSN_UID (g1->insn),
7621 g1->benefit, g1_add_benefit, g1->lifetime);
7622 }
7623 }
7624
7625 /* To help optimize the next set of combinations, remove
7626 this giv from the benefits of other potential mates. */
7627 if (g1->combined_with)
7628 {
7629 for (j = 0; j < giv_count; ++j)
7630 {
7631 int m = stats[j].giv_number;
7632 if (can_combine[m * giv_count + i])
7633 stats[j].total_benefit -= g1->benefit + extra_benefit;
7634 }
7635
7636 g1->benefit += g1_add_benefit;
7637
7638 /* We've finished with this giv, and everything it touched.
7639 Restart the combination so that proper weights for the
7640 rest of the givs are properly taken into account. */
7641 /* ??? Ideally we would compact the arrays at this point, so
7642 as to not cover old ground. But sanely compacting
7643 can_combine is tricky. */
7644 goto restart;
7645 }
7646 }
7647
7648 /* Clean up. */
7649 free (stats);
7650 free (can_combine);
7651 }
7652 \f
7653 /* Generate sequence for REG = B * M + A. B is the initial value of
7654 the basic induction variable, M a multiplicative constant, A an
7655 additive constant and REG the destination register. */
7656
7657 static rtx
7658 gen_add_mult (rtx b, rtx m, rtx a, rtx reg)
7659 {
7660 rtx seq;
7661 rtx result;
7662
7663 start_sequence ();
7664 /* Use unsigned arithmetic. */
7665 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
7666 if (reg != result)
7667 emit_move_insn (reg, result);
7668 seq = get_insns ();
7669 end_sequence ();
7670
7671 return seq;
7672 }
7673
7674
7675 /* Update registers created in insn sequence SEQ. */
7676
7677 static void
7678 loop_regs_update (const struct loop *loop ATTRIBUTE_UNUSED, rtx seq)
7679 {
7680 rtx insn;
7681
7682 /* Update register info for alias analysis. */
7683
7684 insn = seq;
7685 while (insn != NULL_RTX)
7686 {
7687 rtx set = single_set (insn);
7688
7689 if (set && REG_P (SET_DEST (set)))
7690 record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
7691
7692 insn = NEXT_INSN (insn);
7693 }
7694 }
7695
7696
7697 /* EMIT code before BEFORE_BB/BEFORE_INSN to set REG = B * M + A. B
7698 is the initial value of the basic induction variable, M a
7699 multiplicative constant, A an additive constant and REG the
7700 destination register. */
7701
7702 void
7703 loop_iv_add_mult_emit_before (const struct loop *loop, rtx b, rtx m, rtx a,
7704 rtx reg, basic_block before_bb, rtx before_insn)
7705 {
7706 rtx seq;
7707
7708 if (! before_insn)
7709 {
7710 loop_iv_add_mult_hoist (loop, b, m, a, reg);
7711 return;
7712 }
7713
7714 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7715 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7716
7717 /* Increase the lifetime of any invariants moved further in code. */
7718 update_reg_last_use (a, before_insn);
7719 update_reg_last_use (b, before_insn);
7720 update_reg_last_use (m, before_insn);
7721
7722 /* It is possible that the expansion created lots of new registers.
7723 Iterate over the sequence we just created and record them all. We
7724 must do this before inserting the sequence. */
7725 loop_regs_update (loop, seq);
7726
7727 loop_insn_emit_before (loop, before_bb, before_insn, seq);
7728 }
7729
7730
7731 /* Emit insns in loop pre-header to set REG = B * M + A. B is the
7732 initial value of the basic induction variable, M a multiplicative
7733 constant, A an additive constant and REG the destination
7734 register. */
7735
7736 void
7737 loop_iv_add_mult_sink (const struct loop *loop, rtx b, rtx m, rtx a, rtx reg)
7738 {
7739 rtx seq;
7740
7741 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7742 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7743
7744 /* Increase the lifetime of any invariants moved further in code.
7745 ???? Is this really necessary? */
7746 update_reg_last_use (a, loop->sink);
7747 update_reg_last_use (b, loop->sink);
7748 update_reg_last_use (m, loop->sink);
7749
7750 /* It is possible that the expansion created lots of new registers.
7751 Iterate over the sequence we just created and record them all. We
7752 must do this before inserting the sequence. */
7753 loop_regs_update (loop, seq);
7754
7755 loop_insn_sink (loop, seq);
7756 }
7757
7758
7759 /* Emit insns after loop to set REG = B * M + A. B is the initial
7760 value of the basic induction variable, M a multiplicative constant,
7761 A an additive constant and REG the destination register. */
7762
7763 void
7764 loop_iv_add_mult_hoist (const struct loop *loop, rtx b, rtx m, rtx a, rtx reg)
7765 {
7766 rtx seq;
7767
7768 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7769 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7770
7771 /* It is possible that the expansion created lots of new registers.
7772 Iterate over the sequence we just created and record them all. We
7773 must do this before inserting the sequence. */
7774 loop_regs_update (loop, seq);
7775
7776 loop_insn_hoist (loop, seq);
7777 }
7778
7779
7780
7781 /* Similar to gen_add_mult, but compute cost rather than generating
7782 sequence. */
7783
7784 static int
7785 iv_add_mult_cost (rtx b, rtx m, rtx a, rtx reg)
7786 {
7787 int cost = 0;
7788 rtx last, result;
7789
7790 start_sequence ();
7791 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
7792 if (reg != result)
7793 emit_move_insn (reg, result);
7794 last = get_last_insn ();
7795 while (last)
7796 {
7797 rtx t = single_set (last);
7798 if (t)
7799 cost += rtx_cost (SET_SRC (t), SET);
7800 last = PREV_INSN (last);
7801 }
7802 end_sequence ();
7803 return cost;
7804 }
7805 \f
7806 /* Test whether A * B can be computed without
7807 an actual multiply insn. Value is 1 if so.
7808
7809 ??? This function stinks because it generates a ton of wasted RTL
7810 ??? and as a result fragments GC memory to no end. There are other
7811 ??? places in the compiler which are invoked a lot and do the same
7812 ??? thing, generate wasted RTL just to see if something is possible. */
7813
7814 static int
7815 product_cheap_p (rtx a, rtx b)
7816 {
7817 rtx tmp;
7818 int win, n_insns;
7819
7820 /* If only one is constant, make it B. */
7821 if (GET_CODE (a) == CONST_INT)
7822 tmp = a, a = b, b = tmp;
7823
7824 /* If first constant, both constant, so don't need multiply. */
7825 if (GET_CODE (a) == CONST_INT)
7826 return 1;
7827
7828 /* If second not constant, neither is constant, so would need multiply. */
7829 if (GET_CODE (b) != CONST_INT)
7830 return 0;
7831
7832 /* One operand is constant, so might not need multiply insn. Generate the
7833 code for the multiply and see if a call or multiply, or long sequence
7834 of insns is generated. */
7835
7836 start_sequence ();
7837 expand_mult (GET_MODE (a), a, b, NULL_RTX, 1);
7838 tmp = get_insns ();
7839 end_sequence ();
7840
7841 win = 1;
7842 if (INSN_P (tmp))
7843 {
7844 n_insns = 0;
7845 while (tmp != NULL_RTX)
7846 {
7847 rtx next = NEXT_INSN (tmp);
7848
7849 if (++n_insns > 3
7850 || !NONJUMP_INSN_P (tmp)
7851 || (GET_CODE (PATTERN (tmp)) == SET
7852 && GET_CODE (SET_SRC (PATTERN (tmp))) == MULT)
7853 || (GET_CODE (PATTERN (tmp)) == PARALLEL
7854 && GET_CODE (XVECEXP (PATTERN (tmp), 0, 0)) == SET
7855 && GET_CODE (SET_SRC (XVECEXP (PATTERN (tmp), 0, 0))) == MULT))
7856 {
7857 win = 0;
7858 break;
7859 }
7860
7861 tmp = next;
7862 }
7863 }
7864 else if (GET_CODE (tmp) == SET
7865 && GET_CODE (SET_SRC (tmp)) == MULT)
7866 win = 0;
7867 else if (GET_CODE (tmp) == PARALLEL
7868 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
7869 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
7870 win = 0;
7871
7872 return win;
7873 }
7874 \f
7875 /* Check to see if loop can be terminated by a "decrement and branch until
7876 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
7877 Also try reversing an increment loop to a decrement loop
7878 to see if the optimization can be performed.
7879 Value is nonzero if optimization was performed. */
7880
7881 /* This is useful even if the architecture doesn't have such an insn,
7882 because it might change a loops which increments from 0 to n to a loop
7883 which decrements from n to 0. A loop that decrements to zero is usually
7884 faster than one that increments from zero. */
7885
7886 /* ??? This could be rewritten to use some of the loop unrolling procedures,
7887 such as approx_final_value, biv_total_increment, loop_iterations, and
7888 final_[bg]iv_value. */
7889
7890 static int
7891 check_dbra_loop (struct loop *loop, int insn_count)
7892 {
7893 struct loop_info *loop_info = LOOP_INFO (loop);
7894 struct loop_regs *regs = LOOP_REGS (loop);
7895 struct loop_ivs *ivs = LOOP_IVS (loop);
7896 struct iv_class *bl;
7897 rtx reg;
7898 enum machine_mode mode;
7899 rtx jump_label;
7900 rtx final_value;
7901 rtx start_value;
7902 rtx new_add_val;
7903 rtx comparison;
7904 rtx before_comparison;
7905 rtx p;
7906 rtx jump;
7907 rtx first_compare;
7908 int compare_and_branch;
7909 rtx loop_start = loop->start;
7910 rtx loop_end = loop->end;
7911
7912 /* If last insn is a conditional branch, and the insn before tests a
7913 register value, try to optimize it. Otherwise, we can't do anything. */
7914
7915 jump = PREV_INSN (loop_end);
7916 comparison = get_condition_for_loop (loop, jump);
7917 if (comparison == 0)
7918 return 0;
7919 if (!onlyjump_p (jump))
7920 return 0;
7921
7922 /* Try to compute whether the compare/branch at the loop end is one or
7923 two instructions. */
7924 get_condition (jump, &first_compare, false, true);
7925 if (first_compare == jump)
7926 compare_and_branch = 1;
7927 else if (first_compare == prev_nonnote_insn (jump))
7928 compare_and_branch = 2;
7929 else
7930 return 0;
7931
7932 {
7933 /* If more than one condition is present to control the loop, then
7934 do not proceed, as this function does not know how to rewrite
7935 loop tests with more than one condition.
7936
7937 Look backwards from the first insn in the last comparison
7938 sequence and see if we've got another comparison sequence. */
7939
7940 rtx jump1;
7941 if ((jump1 = prev_nonnote_insn (first_compare))
7942 && JUMP_P (jump1))
7943 return 0;
7944 }
7945
7946 /* Check all of the bivs to see if the compare uses one of them.
7947 Skip biv's set more than once because we can't guarantee that
7948 it will be zero on the last iteration. Also skip if the biv is
7949 used between its update and the test insn. */
7950
7951 for (bl = ivs->list; bl; bl = bl->next)
7952 {
7953 if (bl->biv_count == 1
7954 && ! bl->biv->maybe_multiple
7955 && bl->biv->dest_reg == XEXP (comparison, 0)
7956 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
7957 first_compare))
7958 break;
7959 }
7960
7961 /* Try swapping the comparison to identify a suitable biv. */
7962 if (!bl)
7963 for (bl = ivs->list; bl; bl = bl->next)
7964 if (bl->biv_count == 1
7965 && ! bl->biv->maybe_multiple
7966 && bl->biv->dest_reg == XEXP (comparison, 1)
7967 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
7968 first_compare))
7969 {
7970 comparison = gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)),
7971 VOIDmode,
7972 XEXP (comparison, 1),
7973 XEXP (comparison, 0));
7974 break;
7975 }
7976
7977 if (! bl)
7978 return 0;
7979
7980 /* Look for the case where the basic induction variable is always
7981 nonnegative, and equals zero on the last iteration.
7982 In this case, add a reg_note REG_NONNEG, which allows the
7983 m68k DBRA instruction to be used. */
7984
7985 if (((GET_CODE (comparison) == GT && XEXP (comparison, 1) == constm1_rtx)
7986 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
7987 && GET_CODE (bl->biv->add_val) == CONST_INT
7988 && INTVAL (bl->biv->add_val) < 0)
7989 {
7990 /* Initial value must be greater than 0,
7991 init_val % -dec_value == 0 to ensure that it equals zero on
7992 the last iteration */
7993
7994 if (GET_CODE (bl->initial_value) == CONST_INT
7995 && INTVAL (bl->initial_value) > 0
7996 && (INTVAL (bl->initial_value)
7997 % (-INTVAL (bl->biv->add_val))) == 0)
7998 {
7999 /* Register always nonnegative, add REG_NOTE to branch. */
8000 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
8001 REG_NOTES (jump)
8002 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
8003 REG_NOTES (jump));
8004 bl->nonneg = 1;
8005
8006 return 1;
8007 }
8008
8009 /* If the decrement is 1 and the value was tested as >= 0 before
8010 the loop, then we can safely optimize. */
8011 for (p = loop_start; p; p = PREV_INSN (p))
8012 {
8013 if (LABEL_P (p))
8014 break;
8015 if (!JUMP_P (p))
8016 continue;
8017
8018 before_comparison = get_condition_for_loop (loop, p);
8019 if (before_comparison
8020 && XEXP (before_comparison, 0) == bl->biv->dest_reg
8021 && (GET_CODE (before_comparison) == LT
8022 || GET_CODE (before_comparison) == LTU)
8023 && XEXP (before_comparison, 1) == const0_rtx
8024 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
8025 && INTVAL (bl->biv->add_val) == -1)
8026 {
8027 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
8028 REG_NOTES (jump)
8029 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
8030 REG_NOTES (jump));
8031 bl->nonneg = 1;
8032
8033 return 1;
8034 }
8035 }
8036 }
8037 else if (GET_CODE (bl->biv->add_val) == CONST_INT
8038 && INTVAL (bl->biv->add_val) > 0)
8039 {
8040 /* Try to change inc to dec, so can apply above optimization. */
8041 /* Can do this if:
8042 all registers modified are induction variables or invariant,
8043 all memory references have non-overlapping addresses
8044 (obviously true if only one write)
8045 allow 2 insns for the compare/jump at the end of the loop. */
8046 /* Also, we must avoid any instructions which use both the reversed
8047 biv and another biv. Such instructions will fail if the loop is
8048 reversed. We meet this condition by requiring that either
8049 no_use_except_counting is true, or else that there is only
8050 one biv. */
8051 int num_nonfixed_reads = 0;
8052 /* 1 if the iteration var is used only to count iterations. */
8053 int no_use_except_counting = 0;
8054 /* 1 if the loop has no memory store, or it has a single memory store
8055 which is reversible. */
8056 int reversible_mem_store = 1;
8057
8058 if (bl->giv_count == 0
8059 && !loop->exit_count
8060 && !loop_info->has_multiple_exit_targets)
8061 {
8062 rtx bivreg = regno_reg_rtx[bl->regno];
8063 struct iv_class *blt;
8064
8065 /* If there are no givs for this biv, and the only exit is the
8066 fall through at the end of the loop, then
8067 see if perhaps there are no uses except to count. */
8068 no_use_except_counting = 1;
8069 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8070 if (INSN_P (p))
8071 {
8072 rtx set = single_set (p);
8073
8074 if (set && REG_P (SET_DEST (set))
8075 && REGNO (SET_DEST (set)) == bl->regno)
8076 /* An insn that sets the biv is okay. */
8077 ;
8078 else if (!reg_mentioned_p (bivreg, PATTERN (p)))
8079 /* An insn that doesn't mention the biv is okay. */
8080 ;
8081 else if (p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
8082 || p == prev_nonnote_insn (loop_end))
8083 {
8084 /* If either of these insns uses the biv and sets a pseudo
8085 that has more than one usage, then the biv has uses
8086 other than counting since it's used to derive a value
8087 that is used more than one time. */
8088 note_stores (PATTERN (p), note_set_pseudo_multiple_uses,
8089 regs);
8090 if (regs->multiple_uses)
8091 {
8092 no_use_except_counting = 0;
8093 break;
8094 }
8095 }
8096 else
8097 {
8098 no_use_except_counting = 0;
8099 break;
8100 }
8101 }
8102
8103 /* A biv has uses besides counting if it is used to set
8104 another biv. */
8105 for (blt = ivs->list; blt; blt = blt->next)
8106 if (blt->init_set
8107 && reg_mentioned_p (bivreg, SET_SRC (blt->init_set)))
8108 {
8109 no_use_except_counting = 0;
8110 break;
8111 }
8112 }
8113
8114 if (no_use_except_counting)
8115 /* No need to worry about MEMs. */
8116 ;
8117 else if (loop_info->num_mem_sets <= 1)
8118 {
8119 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8120 if (INSN_P (p))
8121 num_nonfixed_reads += count_nonfixed_reads (loop, PATTERN (p));
8122
8123 /* If the loop has a single store, and the destination address is
8124 invariant, then we can't reverse the loop, because this address
8125 might then have the wrong value at loop exit.
8126 This would work if the source was invariant also, however, in that
8127 case, the insn should have been moved out of the loop. */
8128
8129 if (loop_info->num_mem_sets == 1)
8130 {
8131 struct induction *v;
8132
8133 /* If we could prove that each of the memory locations
8134 written to was different, then we could reverse the
8135 store -- but we don't presently have any way of
8136 knowing that. */
8137 reversible_mem_store = 0;
8138
8139 /* If the store depends on a register that is set after the
8140 store, it depends on the initial value, and is thus not
8141 reversible. */
8142 for (v = bl->giv; reversible_mem_store && v; v = v->next_iv)
8143 {
8144 if (v->giv_type == DEST_REG
8145 && reg_mentioned_p (v->dest_reg,
8146 PATTERN (loop_info->first_loop_store_insn))
8147 && loop_insn_first_p (loop_info->first_loop_store_insn,
8148 v->insn))
8149 reversible_mem_store = 0;
8150 }
8151 }
8152 }
8153 else
8154 return 0;
8155
8156 /* This code only acts for innermost loops. Also it simplifies
8157 the memory address check by only reversing loops with
8158 zero or one memory access.
8159 Two memory accesses could involve parts of the same array,
8160 and that can't be reversed.
8161 If the biv is used only for counting, than we don't need to worry
8162 about all these things. */
8163
8164 if ((num_nonfixed_reads <= 1
8165 && ! loop_info->has_nonconst_call
8166 && ! loop_info->has_prefetch
8167 && ! loop_info->has_volatile
8168 && reversible_mem_store
8169 && (bl->giv_count + bl->biv_count + loop_info->num_mem_sets
8170 + num_unmoved_movables (loop) + compare_and_branch == insn_count)
8171 && (bl == ivs->list && bl->next == 0))
8172 || (no_use_except_counting && ! loop_info->has_prefetch))
8173 {
8174 rtx tem;
8175
8176 /* Loop can be reversed. */
8177 if (loop_dump_stream)
8178 fprintf (loop_dump_stream, "Can reverse loop\n");
8179
8180 /* Now check other conditions:
8181
8182 The increment must be a constant, as must the initial value,
8183 and the comparison code must be LT.
8184
8185 This test can probably be improved since +/- 1 in the constant
8186 can be obtained by changing LT to LE and vice versa; this is
8187 confusing. */
8188
8189 if (comparison
8190 /* for constants, LE gets turned into LT */
8191 && (GET_CODE (comparison) == LT
8192 || (GET_CODE (comparison) == LE
8193 && no_use_except_counting)
8194 || GET_CODE (comparison) == LTU))
8195 {
8196 HOST_WIDE_INT add_val, add_adjust, comparison_val = 0;
8197 rtx initial_value, comparison_value;
8198 int nonneg = 0;
8199 enum rtx_code cmp_code;
8200 int comparison_const_width;
8201 unsigned HOST_WIDE_INT comparison_sign_mask;
8202 bool keep_first_compare;
8203
8204 add_val = INTVAL (bl->biv->add_val);
8205 comparison_value = XEXP (comparison, 1);
8206 if (GET_MODE (comparison_value) == VOIDmode)
8207 comparison_const_width
8208 = GET_MODE_BITSIZE (GET_MODE (XEXP (comparison, 0)));
8209 else
8210 comparison_const_width
8211 = GET_MODE_BITSIZE (GET_MODE (comparison_value));
8212 if (comparison_const_width > HOST_BITS_PER_WIDE_INT)
8213 comparison_const_width = HOST_BITS_PER_WIDE_INT;
8214 comparison_sign_mask
8215 = (unsigned HOST_WIDE_INT) 1 << (comparison_const_width - 1);
8216
8217 /* If the comparison value is not a loop invariant, then we
8218 can not reverse this loop.
8219
8220 ??? If the insns which initialize the comparison value as
8221 a whole compute an invariant result, then we could move
8222 them out of the loop and proceed with loop reversal. */
8223 if (! loop_invariant_p (loop, comparison_value))
8224 return 0;
8225
8226 if (GET_CODE (comparison_value) == CONST_INT)
8227 comparison_val = INTVAL (comparison_value);
8228 initial_value = bl->initial_value;
8229
8230 /* Normalize the initial value if it is an integer and
8231 has no other use except as a counter. This will allow
8232 a few more loops to be reversed. */
8233 if (no_use_except_counting
8234 && GET_CODE (comparison_value) == CONST_INT
8235 && GET_CODE (initial_value) == CONST_INT)
8236 {
8237 comparison_val = comparison_val - INTVAL (bl->initial_value);
8238 /* The code below requires comparison_val to be a multiple
8239 of add_val in order to do the loop reversal, so
8240 round up comparison_val to a multiple of add_val.
8241 Since comparison_value is constant, we know that the
8242 current comparison code is LT. */
8243 comparison_val = comparison_val + add_val - 1;
8244 comparison_val
8245 -= (unsigned HOST_WIDE_INT) comparison_val % add_val;
8246 /* We postpone overflow checks for COMPARISON_VAL here;
8247 even if there is an overflow, we might still be able to
8248 reverse the loop, if converting the loop exit test to
8249 NE is possible. */
8250 initial_value = const0_rtx;
8251 }
8252
8253 /* First check if we can do a vanilla loop reversal. */
8254 if (initial_value == const0_rtx
8255 && GET_CODE (comparison_value) == CONST_INT
8256 /* Now do postponed overflow checks on COMPARISON_VAL. */
8257 && ! (((comparison_val - add_val) ^ INTVAL (comparison_value))
8258 & comparison_sign_mask))
8259 {
8260 /* Register will always be nonnegative, with value
8261 0 on last iteration */
8262 add_adjust = add_val;
8263 nonneg = 1;
8264 cmp_code = GE;
8265 }
8266 else
8267 return 0;
8268
8269 if (GET_CODE (comparison) == LE)
8270 add_adjust -= add_val;
8271
8272 /* If the initial value is not zero, or if the comparison
8273 value is not an exact multiple of the increment, then we
8274 can not reverse this loop. */
8275 if (initial_value == const0_rtx
8276 && GET_CODE (comparison_value) == CONST_INT)
8277 {
8278 if (((unsigned HOST_WIDE_INT) comparison_val % add_val) != 0)
8279 return 0;
8280 }
8281 else
8282 {
8283 if (! no_use_except_counting || add_val != 1)
8284 return 0;
8285 }
8286
8287 final_value = comparison_value;
8288
8289 /* Reset these in case we normalized the initial value
8290 and comparison value above. */
8291 if (GET_CODE (comparison_value) == CONST_INT
8292 && GET_CODE (initial_value) == CONST_INT)
8293 {
8294 comparison_value = GEN_INT (comparison_val);
8295 final_value
8296 = GEN_INT (comparison_val + INTVAL (bl->initial_value));
8297 }
8298 bl->initial_value = initial_value;
8299
8300 /* Save some info needed to produce the new insns. */
8301 reg = bl->biv->dest_reg;
8302 mode = GET_MODE (reg);
8303 jump_label = condjump_label (PREV_INSN (loop_end));
8304 new_add_val = GEN_INT (-INTVAL (bl->biv->add_val));
8305
8306 /* Set start_value; if this is not a CONST_INT, we need
8307 to generate a SUB.
8308 Initialize biv to start_value before loop start.
8309 The old initializing insn will be deleted as a
8310 dead store by flow.c. */
8311 if (initial_value == const0_rtx
8312 && GET_CODE (comparison_value) == CONST_INT)
8313 {
8314 start_value
8315 = gen_int_mode (comparison_val - add_adjust, mode);
8316 loop_insn_hoist (loop, gen_move_insn (reg, start_value));
8317 }
8318 else if (GET_CODE (initial_value) == CONST_INT)
8319 {
8320 rtx offset = GEN_INT (-INTVAL (initial_value) - add_adjust);
8321 rtx add_insn = gen_add3_insn (reg, comparison_value, offset);
8322
8323 if (add_insn == 0)
8324 return 0;
8325
8326 start_value
8327 = gen_rtx_PLUS (mode, comparison_value, offset);
8328 loop_insn_hoist (loop, add_insn);
8329 if (GET_CODE (comparison) == LE)
8330 final_value = gen_rtx_PLUS (mode, comparison_value,
8331 GEN_INT (add_val));
8332 }
8333 else if (! add_adjust)
8334 {
8335 rtx sub_insn = gen_sub3_insn (reg, comparison_value,
8336 initial_value);
8337
8338 if (sub_insn == 0)
8339 return 0;
8340 start_value
8341 = gen_rtx_MINUS (mode, comparison_value, initial_value);
8342 loop_insn_hoist (loop, sub_insn);
8343 }
8344 else
8345 /* We could handle the other cases too, but it'll be
8346 better to have a testcase first. */
8347 return 0;
8348
8349 /* We may not have a single insn which can increment a reg, so
8350 create a sequence to hold all the insns from expand_inc. */
8351 start_sequence ();
8352 expand_inc (reg, new_add_val);
8353 tem = get_insns ();
8354 end_sequence ();
8355
8356 p = loop_insn_emit_before (loop, 0, bl->biv->insn, tem);
8357 delete_insn (bl->biv->insn);
8358
8359 /* Update biv info to reflect its new status. */
8360 bl->biv->insn = p;
8361 bl->initial_value = start_value;
8362 bl->biv->add_val = new_add_val;
8363
8364 /* Update loop info. */
8365 loop_info->initial_value = reg;
8366 loop_info->initial_equiv_value = reg;
8367 loop_info->final_value = const0_rtx;
8368 loop_info->final_equiv_value = const0_rtx;
8369 loop_info->comparison_value = const0_rtx;
8370 loop_info->comparison_code = cmp_code;
8371 loop_info->increment = new_add_val;
8372
8373 /* Inc LABEL_NUSES so that delete_insn will
8374 not delete the label. */
8375 LABEL_NUSES (XEXP (jump_label, 0))++;
8376
8377 /* If we have a separate comparison insn that does more
8378 than just set cc0, the result of the comparison might
8379 be used outside the loop. */
8380 keep_first_compare = (compare_and_branch == 2
8381 #ifdef HAVE_CC0
8382 && sets_cc0_p (first_compare) <= 0
8383 #endif
8384 );
8385
8386 /* Emit an insn after the end of the loop to set the biv's
8387 proper exit value if it is used anywhere outside the loop. */
8388 if (keep_first_compare
8389 || (REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare))
8390 || ! bl->init_insn
8391 || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
8392 loop_insn_sink (loop, gen_load_of_final_value (reg, final_value));
8393
8394 if (keep_first_compare)
8395 loop_insn_sink (loop, PATTERN (first_compare));
8396
8397 /* Delete compare/branch at end of loop. */
8398 delete_related_insns (PREV_INSN (loop_end));
8399 if (compare_and_branch == 2)
8400 delete_related_insns (first_compare);
8401
8402 /* Add new compare/branch insn at end of loop. */
8403 start_sequence ();
8404 emit_cmp_and_jump_insns (reg, const0_rtx, cmp_code, NULL_RTX,
8405 mode, 0,
8406 XEXP (jump_label, 0));
8407 tem = get_insns ();
8408 end_sequence ();
8409 emit_jump_insn_before (tem, loop_end);
8410
8411 for (tem = PREV_INSN (loop_end);
8412 tem && !JUMP_P (tem);
8413 tem = PREV_INSN (tem))
8414 ;
8415
8416 if (tem)
8417 JUMP_LABEL (tem) = XEXP (jump_label, 0);
8418
8419 if (nonneg)
8420 {
8421 if (tem)
8422 {
8423 /* Increment of LABEL_NUSES done above. */
8424 /* Register is now always nonnegative,
8425 so add REG_NONNEG note to the branch. */
8426 REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, reg,
8427 REG_NOTES (tem));
8428 }
8429 bl->nonneg = 1;
8430 }
8431
8432 /* No insn may reference both the reversed and another biv or it
8433 will fail (see comment near the top of the loop reversal
8434 code).
8435 Earlier on, we have verified that the biv has no use except
8436 counting, or it is the only biv in this function.
8437 However, the code that computes no_use_except_counting does
8438 not verify reg notes. It's possible to have an insn that
8439 references another biv, and has a REG_EQUAL note with an
8440 expression based on the reversed biv. To avoid this case,
8441 remove all REG_EQUAL notes based on the reversed biv
8442 here. */
8443 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8444 if (INSN_P (p))
8445 {
8446 rtx *pnote;
8447 rtx set = single_set (p);
8448 /* If this is a set of a GIV based on the reversed biv, any
8449 REG_EQUAL notes should still be correct. */
8450 if (! set
8451 || !REG_P (SET_DEST (set))
8452 || (size_t) REGNO (SET_DEST (set)) >= ivs->n_regs
8453 || REG_IV_TYPE (ivs, REGNO (SET_DEST (set))) != GENERAL_INDUCT
8454 || REG_IV_INFO (ivs, REGNO (SET_DEST (set)))->src_reg != bl->biv->src_reg)
8455 for (pnote = &REG_NOTES (p); *pnote;)
8456 {
8457 if (REG_NOTE_KIND (*pnote) == REG_EQUAL
8458 && reg_mentioned_p (regno_reg_rtx[bl->regno],
8459 XEXP (*pnote, 0)))
8460 *pnote = XEXP (*pnote, 1);
8461 else
8462 pnote = &XEXP (*pnote, 1);
8463 }
8464 }
8465
8466 /* Mark that this biv has been reversed. Each giv which depends
8467 on this biv, and which is also live past the end of the loop
8468 will have to be fixed up. */
8469
8470 bl->reversed = 1;
8471
8472 if (loop_dump_stream)
8473 {
8474 fprintf (loop_dump_stream, "Reversed loop");
8475 if (bl->nonneg)
8476 fprintf (loop_dump_stream, " and added reg_nonneg\n");
8477 else
8478 fprintf (loop_dump_stream, "\n");
8479 }
8480
8481 return 1;
8482 }
8483 }
8484 }
8485
8486 return 0;
8487 }
8488 \f
8489 /* Verify whether the biv BL appears to be eliminable,
8490 based on the insns in the loop that refer to it.
8491
8492 If ELIMINATE_P is nonzero, actually do the elimination.
8493
8494 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
8495 determine whether invariant insns should be placed inside or at the
8496 start of the loop. */
8497
8498 static int
8499 maybe_eliminate_biv (const struct loop *loop, struct iv_class *bl,
8500 int eliminate_p, int threshold, int insn_count)
8501 {
8502 struct loop_ivs *ivs = LOOP_IVS (loop);
8503 rtx reg = bl->biv->dest_reg;
8504 rtx p;
8505
8506 /* Scan all insns in the loop, stopping if we find one that uses the
8507 biv in a way that we cannot eliminate. */
8508
8509 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
8510 {
8511 enum rtx_code code = GET_CODE (p);
8512 basic_block where_bb = 0;
8513 rtx where_insn = threshold >= insn_count ? 0 : p;
8514 rtx note;
8515
8516 /* If this is a libcall that sets a giv, skip ahead to its end. */
8517 if (INSN_P (p))
8518 {
8519 note = find_reg_note (p, REG_LIBCALL, NULL_RTX);
8520
8521 if (note)
8522 {
8523 rtx last = XEXP (note, 0);
8524 rtx set = single_set (last);
8525
8526 if (set && REG_P (SET_DEST (set)))
8527 {
8528 unsigned int regno = REGNO (SET_DEST (set));
8529
8530 if (regno < ivs->n_regs
8531 && REG_IV_TYPE (ivs, regno) == GENERAL_INDUCT
8532 && REG_IV_INFO (ivs, regno)->src_reg == bl->biv->src_reg)
8533 p = last;
8534 }
8535 }
8536 }
8537
8538 /* Closely examine the insn if the biv is mentioned. */
8539 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
8540 && reg_mentioned_p (reg, PATTERN (p))
8541 && ! maybe_eliminate_biv_1 (loop, PATTERN (p), p, bl,
8542 eliminate_p, where_bb, where_insn))
8543 {
8544 if (loop_dump_stream)
8545 fprintf (loop_dump_stream,
8546 "Cannot eliminate biv %d: biv used in insn %d.\n",
8547 bl->regno, INSN_UID (p));
8548 break;
8549 }
8550
8551 /* If we are eliminating, kill REG_EQUAL notes mentioning the biv. */
8552 if (eliminate_p
8553 && (note = find_reg_note (p, REG_EQUAL, NULL_RTX)) != NULL_RTX
8554 && reg_mentioned_p (reg, XEXP (note, 0)))
8555 remove_note (p, note);
8556 }
8557
8558 if (p == loop->end)
8559 {
8560 if (loop_dump_stream)
8561 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
8562 bl->regno, eliminate_p ? "was" : "can be");
8563 return 1;
8564 }
8565
8566 return 0;
8567 }
8568 \f
8569 /* INSN and REFERENCE are instructions in the same insn chain.
8570 Return nonzero if INSN is first. */
8571
8572 int
8573 loop_insn_first_p (rtx insn, rtx reference)
8574 {
8575 rtx p, q;
8576
8577 for (p = insn, q = reference;;)
8578 {
8579 /* Start with test for not first so that INSN == REFERENCE yields not
8580 first. */
8581 if (q == insn || ! p)
8582 return 0;
8583 if (p == reference || ! q)
8584 return 1;
8585
8586 /* Either of P or Q might be a NOTE. Notes have the same LUID as the
8587 previous insn, hence the <= comparison below does not work if
8588 P is a note. */
8589 if (INSN_UID (p) < max_uid_for_loop
8590 && INSN_UID (q) < max_uid_for_loop
8591 && !NOTE_P (p))
8592 return INSN_LUID (p) <= INSN_LUID (q);
8593
8594 if (INSN_UID (p) >= max_uid_for_loop
8595 || NOTE_P (p))
8596 p = NEXT_INSN (p);
8597 if (INSN_UID (q) >= max_uid_for_loop)
8598 q = NEXT_INSN (q);
8599 }
8600 }
8601
8602 /* We are trying to eliminate BIV in INSN using GIV. Return nonzero if
8603 the offset that we have to take into account due to auto-increment /
8604 div derivation is zero. */
8605 static int
8606 biv_elimination_giv_has_0_offset (struct induction *biv,
8607 struct induction *giv, rtx insn)
8608 {
8609 /* If the giv V had the auto-inc address optimization applied
8610 to it, and INSN occurs between the giv insn and the biv
8611 insn, then we'd have to adjust the value used here.
8612 This is rare, so we don't bother to make this possible. */
8613 if (giv->auto_inc_opt
8614 && ((loop_insn_first_p (giv->insn, insn)
8615 && loop_insn_first_p (insn, biv->insn))
8616 || (loop_insn_first_p (biv->insn, insn)
8617 && loop_insn_first_p (insn, giv->insn))))
8618 return 0;
8619
8620 return 1;
8621 }
8622
8623 /* If BL appears in X (part of the pattern of INSN), see if we can
8624 eliminate its use. If so, return 1. If not, return 0.
8625
8626 If BIV does not appear in X, return 1.
8627
8628 If ELIMINATE_P is nonzero, actually do the elimination.
8629 WHERE_INSN/WHERE_BB indicate where extra insns should be added.
8630 Depending on how many items have been moved out of the loop, it
8631 will either be before INSN (when WHERE_INSN is nonzero) or at the
8632 start of the loop (when WHERE_INSN is zero). */
8633
8634 static int
8635 maybe_eliminate_biv_1 (const struct loop *loop, rtx x, rtx insn,
8636 struct iv_class *bl, int eliminate_p,
8637 basic_block where_bb, rtx where_insn)
8638 {
8639 enum rtx_code code = GET_CODE (x);
8640 rtx reg = bl->biv->dest_reg;
8641 enum machine_mode mode = GET_MODE (reg);
8642 struct induction *v;
8643 rtx arg, tem;
8644 #ifdef HAVE_cc0
8645 rtx new;
8646 #endif
8647 int arg_operand;
8648 const char *fmt;
8649 int i, j;
8650
8651 switch (code)
8652 {
8653 case REG:
8654 /* If we haven't already been able to do something with this BIV,
8655 we can't eliminate it. */
8656 if (x == reg)
8657 return 0;
8658 return 1;
8659
8660 case SET:
8661 /* If this sets the BIV, it is not a problem. */
8662 if (SET_DEST (x) == reg)
8663 return 1;
8664
8665 /* If this is an insn that defines a giv, it is also ok because
8666 it will go away when the giv is reduced. */
8667 for (v = bl->giv; v; v = v->next_iv)
8668 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
8669 return 1;
8670
8671 #ifdef HAVE_cc0
8672 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
8673 {
8674 /* Can replace with any giv that was reduced and
8675 that has (MULT_VAL != 0) and (ADD_VAL == 0).
8676 Require a constant for MULT_VAL, so we know it's nonzero.
8677 ??? We disable this optimization to avoid potential
8678 overflows. */
8679
8680 for (v = bl->giv; v; v = v->next_iv)
8681 if (GET_CODE (v->mult_val) == CONST_INT && v->mult_val != const0_rtx
8682 && v->add_val == const0_rtx
8683 && ! v->ignore && ! v->maybe_dead && v->always_computable
8684 && v->mode == mode
8685 && 0)
8686 {
8687 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8688 continue;
8689
8690 if (! eliminate_p)
8691 return 1;
8692
8693 /* If the giv has the opposite direction of change,
8694 then reverse the comparison. */
8695 if (INTVAL (v->mult_val) < 0)
8696 new = gen_rtx_COMPARE (GET_MODE (v->new_reg),
8697 const0_rtx, v->new_reg);
8698 else
8699 new = v->new_reg;
8700
8701 /* We can probably test that giv's reduced reg. */
8702 if (validate_change (insn, &SET_SRC (x), new, 0))
8703 return 1;
8704 }
8705
8706 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
8707 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
8708 Require a constant for MULT_VAL, so we know it's nonzero.
8709 ??? Do this only if ADD_VAL is a pointer to avoid a potential
8710 overflow problem. */
8711
8712 for (v = bl->giv; v; v = v->next_iv)
8713 if (GET_CODE (v->mult_val) == CONST_INT
8714 && v->mult_val != const0_rtx
8715 && ! v->ignore && ! v->maybe_dead && v->always_computable
8716 && v->mode == mode
8717 && (GET_CODE (v->add_val) == SYMBOL_REF
8718 || GET_CODE (v->add_val) == LABEL_REF
8719 || GET_CODE (v->add_val) == CONST
8720 || (REG_P (v->add_val)
8721 && REG_POINTER (v->add_val))))
8722 {
8723 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8724 continue;
8725
8726 if (! eliminate_p)
8727 return 1;
8728
8729 /* If the giv has the opposite direction of change,
8730 then reverse the comparison. */
8731 if (INTVAL (v->mult_val) < 0)
8732 new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val),
8733 v->new_reg);
8734 else
8735 new = gen_rtx_COMPARE (VOIDmode, v->new_reg,
8736 copy_rtx (v->add_val));
8737
8738 /* Replace biv with the giv's reduced register. */
8739 update_reg_last_use (v->add_val, insn);
8740 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
8741 return 1;
8742
8743 /* Insn doesn't support that constant or invariant. Copy it
8744 into a register (it will be a loop invariant.) */
8745 tem = gen_reg_rtx (GET_MODE (v->new_reg));
8746
8747 loop_insn_emit_before (loop, 0, where_insn,
8748 gen_move_insn (tem,
8749 copy_rtx (v->add_val)));
8750
8751 /* Substitute the new register for its invariant value in
8752 the compare expression. */
8753 XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
8754 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
8755 return 1;
8756 }
8757 }
8758 #endif
8759 break;
8760
8761 case COMPARE:
8762 case EQ: case NE:
8763 case GT: case GE: case GTU: case GEU:
8764 case LT: case LE: case LTU: case LEU:
8765 /* See if either argument is the biv. */
8766 if (XEXP (x, 0) == reg)
8767 arg = XEXP (x, 1), arg_operand = 1;
8768 else if (XEXP (x, 1) == reg)
8769 arg = XEXP (x, 0), arg_operand = 0;
8770 else
8771 break;
8772
8773 if (CONSTANT_P (arg))
8774 {
8775 /* First try to replace with any giv that has constant positive
8776 mult_val and constant add_val. We might be able to support
8777 negative mult_val, but it seems complex to do it in general. */
8778
8779 for (v = bl->giv; v; v = v->next_iv)
8780 if (GET_CODE (v->mult_val) == CONST_INT
8781 && INTVAL (v->mult_val) > 0
8782 && (GET_CODE (v->add_val) == SYMBOL_REF
8783 || GET_CODE (v->add_val) == LABEL_REF
8784 || GET_CODE (v->add_val) == CONST
8785 || (REG_P (v->add_val)
8786 && REG_POINTER (v->add_val)))
8787 && ! v->ignore && ! v->maybe_dead && v->always_computable
8788 && v->mode == mode)
8789 {
8790 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8791 continue;
8792
8793 /* Don't eliminate if the linear combination that makes up
8794 the giv overflows when it is applied to ARG. */
8795 if (GET_CODE (arg) == CONST_INT)
8796 {
8797 rtx add_val;
8798
8799 if (GET_CODE (v->add_val) == CONST_INT)
8800 add_val = v->add_val;
8801 else
8802 add_val = const0_rtx;
8803
8804 if (const_mult_add_overflow_p (arg, v->mult_val,
8805 add_val, mode, 1))
8806 continue;
8807 }
8808
8809 if (! eliminate_p)
8810 return 1;
8811
8812 /* Replace biv with the giv's reduced reg. */
8813 validate_change (insn, &XEXP (x, 1 - arg_operand), v->new_reg, 1);
8814
8815 /* If all constants are actually constant integers and
8816 the derived constant can be directly placed in the COMPARE,
8817 do so. */
8818 if (GET_CODE (arg) == CONST_INT
8819 && GET_CODE (v->add_val) == CONST_INT)
8820 {
8821 tem = expand_mult_add (arg, NULL_RTX, v->mult_val,
8822 v->add_val, mode, 1);
8823 }
8824 else
8825 {
8826 /* Otherwise, load it into a register. */
8827 tem = gen_reg_rtx (mode);
8828 loop_iv_add_mult_emit_before (loop, arg,
8829 v->mult_val, v->add_val,
8830 tem, where_bb, where_insn);
8831 }
8832
8833 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8834
8835 if (apply_change_group ())
8836 return 1;
8837 }
8838
8839 /* Look for giv with positive constant mult_val and nonconst add_val.
8840 Insert insns to calculate new compare value.
8841 ??? Turn this off due to possible overflow. */
8842
8843 for (v = bl->giv; v; v = v->next_iv)
8844 if (GET_CODE (v->mult_val) == CONST_INT
8845 && INTVAL (v->mult_val) > 0
8846 && ! v->ignore && ! v->maybe_dead && v->always_computable
8847 && v->mode == mode
8848 && 0)
8849 {
8850 rtx tem;
8851
8852 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8853 continue;
8854
8855 if (! eliminate_p)
8856 return 1;
8857
8858 tem = gen_reg_rtx (mode);
8859
8860 /* Replace biv with giv's reduced register. */
8861 validate_change (insn, &XEXP (x, 1 - arg_operand),
8862 v->new_reg, 1);
8863
8864 /* Compute value to compare against. */
8865 loop_iv_add_mult_emit_before (loop, arg,
8866 v->mult_val, v->add_val,
8867 tem, where_bb, where_insn);
8868 /* Use it in this insn. */
8869 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8870 if (apply_change_group ())
8871 return 1;
8872 }
8873 }
8874 else if (REG_P (arg) || MEM_P (arg))
8875 {
8876 if (loop_invariant_p (loop, arg) == 1)
8877 {
8878 /* Look for giv with constant positive mult_val and nonconst
8879 add_val. Insert insns to compute new compare value.
8880 ??? Turn this off due to possible overflow. */
8881
8882 for (v = bl->giv; v; v = v->next_iv)
8883 if (GET_CODE (v->mult_val) == CONST_INT && INTVAL (v->mult_val) > 0
8884 && ! v->ignore && ! v->maybe_dead && v->always_computable
8885 && v->mode == mode
8886 && 0)
8887 {
8888 rtx tem;
8889
8890 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8891 continue;
8892
8893 if (! eliminate_p)
8894 return 1;
8895
8896 tem = gen_reg_rtx (mode);
8897
8898 /* Replace biv with giv's reduced register. */
8899 validate_change (insn, &XEXP (x, 1 - arg_operand),
8900 v->new_reg, 1);
8901
8902 /* Compute value to compare against. */
8903 loop_iv_add_mult_emit_before (loop, arg,
8904 v->mult_val, v->add_val,
8905 tem, where_bb, where_insn);
8906 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8907 if (apply_change_group ())
8908 return 1;
8909 }
8910 }
8911
8912 /* This code has problems. Basically, you can't know when
8913 seeing if we will eliminate BL, whether a particular giv
8914 of ARG will be reduced. If it isn't going to be reduced,
8915 we can't eliminate BL. We can try forcing it to be reduced,
8916 but that can generate poor code.
8917
8918 The problem is that the benefit of reducing TV, below should
8919 be increased if BL can actually be eliminated, but this means
8920 we might have to do a topological sort of the order in which
8921 we try to process biv. It doesn't seem worthwhile to do
8922 this sort of thing now. */
8923
8924 #if 0
8925 /* Otherwise the reg compared with had better be a biv. */
8926 if (!REG_P (arg)
8927 || REG_IV_TYPE (ivs, REGNO (arg)) != BASIC_INDUCT)
8928 return 0;
8929
8930 /* Look for a pair of givs, one for each biv,
8931 with identical coefficients. */
8932 for (v = bl->giv; v; v = v->next_iv)
8933 {
8934 struct induction *tv;
8935
8936 if (v->ignore || v->maybe_dead || v->mode != mode)
8937 continue;
8938
8939 for (tv = REG_IV_CLASS (ivs, REGNO (arg))->giv; tv;
8940 tv = tv->next_iv)
8941 if (! tv->ignore && ! tv->maybe_dead
8942 && rtx_equal_p (tv->mult_val, v->mult_val)
8943 && rtx_equal_p (tv->add_val, v->add_val)
8944 && tv->mode == mode)
8945 {
8946 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8947 continue;
8948
8949 if (! eliminate_p)
8950 return 1;
8951
8952 /* Replace biv with its giv's reduced reg. */
8953 XEXP (x, 1 - arg_operand) = v->new_reg;
8954 /* Replace other operand with the other giv's
8955 reduced reg. */
8956 XEXP (x, arg_operand) = tv->new_reg;
8957 return 1;
8958 }
8959 }
8960 #endif
8961 }
8962
8963 /* If we get here, the biv can't be eliminated. */
8964 return 0;
8965
8966 case MEM:
8967 /* If this address is a DEST_ADDR giv, it doesn't matter if the
8968 biv is used in it, since it will be replaced. */
8969 for (v = bl->giv; v; v = v->next_iv)
8970 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
8971 return 1;
8972 break;
8973
8974 default:
8975 break;
8976 }
8977
8978 /* See if any subexpression fails elimination. */
8979 fmt = GET_RTX_FORMAT (code);
8980 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8981 {
8982 switch (fmt[i])
8983 {
8984 case 'e':
8985 if (! maybe_eliminate_biv_1 (loop, XEXP (x, i), insn, bl,
8986 eliminate_p, where_bb, where_insn))
8987 return 0;
8988 break;
8989
8990 case 'E':
8991 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8992 if (! maybe_eliminate_biv_1 (loop, XVECEXP (x, i, j), insn, bl,
8993 eliminate_p, where_bb, where_insn))
8994 return 0;
8995 break;
8996 }
8997 }
8998
8999 return 1;
9000 }
9001 \f
9002 /* Return nonzero if the last use of REG
9003 is in an insn following INSN in the same basic block. */
9004
9005 static int
9006 last_use_this_basic_block (rtx reg, rtx insn)
9007 {
9008 rtx n;
9009 for (n = insn;
9010 n && !LABEL_P (n) && !JUMP_P (n);
9011 n = NEXT_INSN (n))
9012 {
9013 if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
9014 return 1;
9015 }
9016 return 0;
9017 }
9018 \f
9019 /* Called via `note_stores' to record the initial value of a biv. Here we
9020 just record the location of the set and process it later. */
9021
9022 static void
9023 record_initial (rtx dest, rtx set, void *data ATTRIBUTE_UNUSED)
9024 {
9025 struct loop_ivs *ivs = (struct loop_ivs *) data;
9026 struct iv_class *bl;
9027
9028 if (!REG_P (dest)
9029 || REGNO (dest) >= ivs->n_regs
9030 || REG_IV_TYPE (ivs, REGNO (dest)) != BASIC_INDUCT)
9031 return;
9032
9033 bl = REG_IV_CLASS (ivs, REGNO (dest));
9034
9035 /* If this is the first set found, record it. */
9036 if (bl->init_insn == 0)
9037 {
9038 bl->init_insn = note_insn;
9039 bl->init_set = set;
9040 }
9041 }
9042 \f
9043 /* If any of the registers in X are "old" and currently have a last use earlier
9044 than INSN, update them to have a last use of INSN. Their actual last use
9045 will be the previous insn but it will not have a valid uid_luid so we can't
9046 use it. X must be a source expression only. */
9047
9048 static void
9049 update_reg_last_use (rtx x, rtx insn)
9050 {
9051 /* Check for the case where INSN does not have a valid luid. In this case,
9052 there is no need to modify the regno_last_uid, as this can only happen
9053 when code is inserted after the loop_end to set a pseudo's final value,
9054 and hence this insn will never be the last use of x.
9055 ???? This comment is not correct. See for example loop_givs_reduce.
9056 This may insert an insn before another new insn. */
9057 if (REG_P (x) && REGNO (x) < max_reg_before_loop
9058 && INSN_UID (insn) < max_uid_for_loop
9059 && REGNO_LAST_LUID (REGNO (x)) < INSN_LUID (insn))
9060 {
9061 REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
9062 }
9063 else
9064 {
9065 int i, j;
9066 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
9067 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
9068 {
9069 if (fmt[i] == 'e')
9070 update_reg_last_use (XEXP (x, i), insn);
9071 else if (fmt[i] == 'E')
9072 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9073 update_reg_last_use (XVECEXP (x, i, j), insn);
9074 }
9075 }
9076 }
9077 \f
9078 /* Given an insn INSN and condition COND, return the condition in a
9079 canonical form to simplify testing by callers. Specifically:
9080
9081 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
9082 (2) Both operands will be machine operands; (cc0) will have been replaced.
9083 (3) If an operand is a constant, it will be the second operand.
9084 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
9085 for GE, GEU, and LEU.
9086
9087 If the condition cannot be understood, or is an inequality floating-point
9088 comparison which needs to be reversed, 0 will be returned.
9089
9090 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
9091
9092 If EARLIEST is nonzero, it is a pointer to a place where the earliest
9093 insn used in locating the condition was found. If a replacement test
9094 of the condition is desired, it should be placed in front of that
9095 insn and we will be sure that the inputs are still valid.
9096
9097 If WANT_REG is nonzero, we wish the condition to be relative to that
9098 register, if possible. Therefore, do not canonicalize the condition
9099 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
9100 to be a compare to a CC mode register.
9101
9102 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
9103 and at INSN. */
9104
9105 rtx
9106 canonicalize_condition (rtx insn, rtx cond, int reverse, rtx *earliest,
9107 rtx want_reg, int allow_cc_mode, int valid_at_insn_p)
9108 {
9109 enum rtx_code code;
9110 rtx prev = insn;
9111 rtx set;
9112 rtx tem;
9113 rtx op0, op1;
9114 int reverse_code = 0;
9115 enum machine_mode mode;
9116
9117 code = GET_CODE (cond);
9118 mode = GET_MODE (cond);
9119 op0 = XEXP (cond, 0);
9120 op1 = XEXP (cond, 1);
9121
9122 if (reverse)
9123 code = reversed_comparison_code (cond, insn);
9124 if (code == UNKNOWN)
9125 return 0;
9126
9127 if (earliest)
9128 *earliest = insn;
9129
9130 /* If we are comparing a register with zero, see if the register is set
9131 in the previous insn to a COMPARE or a comparison operation. Perform
9132 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
9133 in cse.c */
9134
9135 while ((GET_RTX_CLASS (code) == RTX_COMPARE
9136 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
9137 && op1 == CONST0_RTX (GET_MODE (op0))
9138 && op0 != want_reg)
9139 {
9140 /* Set nonzero when we find something of interest. */
9141 rtx x = 0;
9142
9143 #ifdef HAVE_cc0
9144 /* If comparison with cc0, import actual comparison from compare
9145 insn. */
9146 if (op0 == cc0_rtx)
9147 {
9148 if ((prev = prev_nonnote_insn (prev)) == 0
9149 || !NONJUMP_INSN_P (prev)
9150 || (set = single_set (prev)) == 0
9151 || SET_DEST (set) != cc0_rtx)
9152 return 0;
9153
9154 op0 = SET_SRC (set);
9155 op1 = CONST0_RTX (GET_MODE (op0));
9156 if (earliest)
9157 *earliest = prev;
9158 }
9159 #endif
9160
9161 /* If this is a COMPARE, pick up the two things being compared. */
9162 if (GET_CODE (op0) == COMPARE)
9163 {
9164 op1 = XEXP (op0, 1);
9165 op0 = XEXP (op0, 0);
9166 continue;
9167 }
9168 else if (!REG_P (op0))
9169 break;
9170
9171 /* Go back to the previous insn. Stop if it is not an INSN. We also
9172 stop if it isn't a single set or if it has a REG_INC note because
9173 we don't want to bother dealing with it. */
9174
9175 if ((prev = prev_nonnote_insn (prev)) == 0
9176 || !NONJUMP_INSN_P (prev)
9177 || FIND_REG_INC_NOTE (prev, NULL_RTX))
9178 break;
9179
9180 set = set_of (op0, prev);
9181
9182 if (set
9183 && (GET_CODE (set) != SET
9184 || !rtx_equal_p (SET_DEST (set), op0)))
9185 break;
9186
9187 /* If this is setting OP0, get what it sets it to if it looks
9188 relevant. */
9189 if (set)
9190 {
9191 enum machine_mode inner_mode = GET_MODE (SET_DEST (set));
9192 #ifdef FLOAT_STORE_FLAG_VALUE
9193 REAL_VALUE_TYPE fsfv;
9194 #endif
9195
9196 /* ??? We may not combine comparisons done in a CCmode with
9197 comparisons not done in a CCmode. This is to aid targets
9198 like Alpha that have an IEEE compliant EQ instruction, and
9199 a non-IEEE compliant BEQ instruction. The use of CCmode is
9200 actually artificial, simply to prevent the combination, but
9201 should not affect other platforms.
9202
9203 However, we must allow VOIDmode comparisons to match either
9204 CCmode or non-CCmode comparison, because some ports have
9205 modeless comparisons inside branch patterns.
9206
9207 ??? This mode check should perhaps look more like the mode check
9208 in simplify_comparison in combine. */
9209
9210 if ((GET_CODE (SET_SRC (set)) == COMPARE
9211 || (((code == NE
9212 || (code == LT
9213 && GET_MODE_CLASS (inner_mode) == MODE_INT
9214 && (GET_MODE_BITSIZE (inner_mode)
9215 <= HOST_BITS_PER_WIDE_INT)
9216 && (STORE_FLAG_VALUE
9217 & ((HOST_WIDE_INT) 1
9218 << (GET_MODE_BITSIZE (inner_mode) - 1))))
9219 #ifdef FLOAT_STORE_FLAG_VALUE
9220 || (code == LT
9221 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
9222 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
9223 REAL_VALUE_NEGATIVE (fsfv)))
9224 #endif
9225 ))
9226 && COMPARISON_P (SET_SRC (set))))
9227 && (((GET_MODE_CLASS (mode) == MODE_CC)
9228 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
9229 || mode == VOIDmode || inner_mode == VOIDmode))
9230 x = SET_SRC (set);
9231 else if (((code == EQ
9232 || (code == GE
9233 && (GET_MODE_BITSIZE (inner_mode)
9234 <= HOST_BITS_PER_WIDE_INT)
9235 && GET_MODE_CLASS (inner_mode) == MODE_INT
9236 && (STORE_FLAG_VALUE
9237 & ((HOST_WIDE_INT) 1
9238 << (GET_MODE_BITSIZE (inner_mode) - 1))))
9239 #ifdef FLOAT_STORE_FLAG_VALUE
9240 || (code == GE
9241 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
9242 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
9243 REAL_VALUE_NEGATIVE (fsfv)))
9244 #endif
9245 ))
9246 && COMPARISON_P (SET_SRC (set))
9247 && (((GET_MODE_CLASS (mode) == MODE_CC)
9248 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
9249 || mode == VOIDmode || inner_mode == VOIDmode))
9250
9251 {
9252 reverse_code = 1;
9253 x = SET_SRC (set);
9254 }
9255 else
9256 break;
9257 }
9258
9259 else if (reg_set_p (op0, prev))
9260 /* If this sets OP0, but not directly, we have to give up. */
9261 break;
9262
9263 if (x)
9264 {
9265 /* If the caller is expecting the condition to be valid at INSN,
9266 make sure X doesn't change before INSN. */
9267 if (valid_at_insn_p)
9268 if (modified_in_p (x, prev) || modified_between_p (x, prev, insn))
9269 break;
9270 if (COMPARISON_P (x))
9271 code = GET_CODE (x);
9272 if (reverse_code)
9273 {
9274 code = reversed_comparison_code (x, prev);
9275 if (code == UNKNOWN)
9276 return 0;
9277 reverse_code = 0;
9278 }
9279
9280 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
9281 if (earliest)
9282 *earliest = prev;
9283 }
9284 }
9285
9286 /* If constant is first, put it last. */
9287 if (CONSTANT_P (op0))
9288 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
9289
9290 /* If OP0 is the result of a comparison, we weren't able to find what
9291 was really being compared, so fail. */
9292 if (!allow_cc_mode
9293 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
9294 return 0;
9295
9296 /* Canonicalize any ordered comparison with integers involving equality
9297 if we can do computations in the relevant mode and we do not
9298 overflow. */
9299
9300 if (GET_MODE_CLASS (GET_MODE (op0)) != MODE_CC
9301 && GET_CODE (op1) == CONST_INT
9302 && GET_MODE (op0) != VOIDmode
9303 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
9304 {
9305 HOST_WIDE_INT const_val = INTVAL (op1);
9306 unsigned HOST_WIDE_INT uconst_val = const_val;
9307 unsigned HOST_WIDE_INT max_val
9308 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
9309
9310 switch (code)
9311 {
9312 case LE:
9313 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
9314 code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0));
9315 break;
9316
9317 /* When cross-compiling, const_val might be sign-extended from
9318 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
9319 case GE:
9320 if ((HOST_WIDE_INT) (const_val & max_val)
9321 != (((HOST_WIDE_INT) 1
9322 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
9323 code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0));
9324 break;
9325
9326 case LEU:
9327 if (uconst_val < max_val)
9328 code = LTU, op1 = gen_int_mode (uconst_val + 1, GET_MODE (op0));
9329 break;
9330
9331 case GEU:
9332 if (uconst_val != 0)
9333 code = GTU, op1 = gen_int_mode (uconst_val - 1, GET_MODE (op0));
9334 break;
9335
9336 default:
9337 break;
9338 }
9339 }
9340
9341 /* Never return CC0; return zero instead. */
9342 if (CC0_P (op0))
9343 return 0;
9344
9345 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
9346 }
9347
9348 /* Given a jump insn JUMP, return the condition that will cause it to branch
9349 to its JUMP_LABEL. If the condition cannot be understood, or is an
9350 inequality floating-point comparison which needs to be reversed, 0 will
9351 be returned.
9352
9353 If EARLIEST is nonzero, it is a pointer to a place where the earliest
9354 insn used in locating the condition was found. If a replacement test
9355 of the condition is desired, it should be placed in front of that
9356 insn and we will be sure that the inputs are still valid. If EARLIEST
9357 is null, the returned condition will be valid at INSN.
9358
9359 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
9360 compare CC mode register.
9361
9362 VALID_AT_INSN_P is the same as for canonicalize_condition. */
9363
9364 rtx
9365 get_condition (rtx jump, rtx *earliest, int allow_cc_mode, int valid_at_insn_p)
9366 {
9367 rtx cond;
9368 int reverse;
9369 rtx set;
9370
9371 /* If this is not a standard conditional jump, we can't parse it. */
9372 if (!JUMP_P (jump)
9373 || ! any_condjump_p (jump))
9374 return 0;
9375 set = pc_set (jump);
9376
9377 cond = XEXP (SET_SRC (set), 0);
9378
9379 /* If this branches to JUMP_LABEL when the condition is false, reverse
9380 the condition. */
9381 reverse
9382 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
9383 && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump);
9384
9385 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX,
9386 allow_cc_mode, valid_at_insn_p);
9387 }
9388
9389 /* Similar to above routine, except that we also put an invariant last
9390 unless both operands are invariants. */
9391
9392 rtx
9393 get_condition_for_loop (const struct loop *loop, rtx x)
9394 {
9395 rtx comparison = get_condition (x, (rtx*) 0, false, true);
9396
9397 if (comparison == 0
9398 || ! loop_invariant_p (loop, XEXP (comparison, 0))
9399 || loop_invariant_p (loop, XEXP (comparison, 1)))
9400 return comparison;
9401
9402 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
9403 XEXP (comparison, 1), XEXP (comparison, 0));
9404 }
9405
9406 /* Scan the function and determine whether it has indirect (computed) jumps.
9407
9408 This is taken mostly from flow.c; similar code exists elsewhere
9409 in the compiler. It may be useful to put this into rtlanal.c. */
9410 static int
9411 indirect_jump_in_function_p (rtx start)
9412 {
9413 rtx insn;
9414
9415 for (insn = start; insn; insn = NEXT_INSN (insn))
9416 if (computed_jump_p (insn))
9417 return 1;
9418
9419 return 0;
9420 }
9421
9422 /* Add MEM to the LOOP_MEMS array, if appropriate. See the
9423 documentation for LOOP_MEMS for the definition of `appropriate'.
9424 This function is called from prescan_loop via for_each_rtx. */
9425
9426 static int
9427 insert_loop_mem (rtx *mem, void *data ATTRIBUTE_UNUSED)
9428 {
9429 struct loop_info *loop_info = data;
9430 int i;
9431 rtx m = *mem;
9432
9433 if (m == NULL_RTX)
9434 return 0;
9435
9436 switch (GET_CODE (m))
9437 {
9438 case MEM:
9439 break;
9440
9441 case CLOBBER:
9442 /* We're not interested in MEMs that are only clobbered. */
9443 return -1;
9444
9445 case CONST_DOUBLE:
9446 /* We're not interested in the MEM associated with a
9447 CONST_DOUBLE, so there's no need to traverse into this. */
9448 return -1;
9449
9450 case EXPR_LIST:
9451 /* We're not interested in any MEMs that only appear in notes. */
9452 return -1;
9453
9454 default:
9455 /* This is not a MEM. */
9456 return 0;
9457 }
9458
9459 /* See if we've already seen this MEM. */
9460 for (i = 0; i < loop_info->mems_idx; ++i)
9461 if (rtx_equal_p (m, loop_info->mems[i].mem))
9462 {
9463 if (MEM_VOLATILE_P (m) && !MEM_VOLATILE_P (loop_info->mems[i].mem))
9464 loop_info->mems[i].mem = m;
9465 if (GET_MODE (m) != GET_MODE (loop_info->mems[i].mem))
9466 /* The modes of the two memory accesses are different. If
9467 this happens, something tricky is going on, and we just
9468 don't optimize accesses to this MEM. */
9469 loop_info->mems[i].optimize = 0;
9470
9471 return 0;
9472 }
9473
9474 /* Resize the array, if necessary. */
9475 if (loop_info->mems_idx == loop_info->mems_allocated)
9476 {
9477 if (loop_info->mems_allocated != 0)
9478 loop_info->mems_allocated *= 2;
9479 else
9480 loop_info->mems_allocated = 32;
9481
9482 loop_info->mems = xrealloc (loop_info->mems,
9483 loop_info->mems_allocated * sizeof (loop_mem_info));
9484 }
9485
9486 /* Actually insert the MEM. */
9487 loop_info->mems[loop_info->mems_idx].mem = m;
9488 /* We can't hoist this MEM out of the loop if it's a BLKmode MEM
9489 because we can't put it in a register. We still store it in the
9490 table, though, so that if we see the same address later, but in a
9491 non-BLK mode, we'll not think we can optimize it at that point. */
9492 loop_info->mems[loop_info->mems_idx].optimize = (GET_MODE (m) != BLKmode);
9493 loop_info->mems[loop_info->mems_idx].reg = NULL_RTX;
9494 ++loop_info->mems_idx;
9495
9496 return 0;
9497 }
9498
9499
9500 /* Allocate REGS->ARRAY or reallocate it if it is too small.
9501
9502 Increment REGS->ARRAY[I].SET_IN_LOOP at the index I of each
9503 register that is modified by an insn between FROM and TO. If the
9504 value of an element of REGS->array[I].SET_IN_LOOP becomes 127 or
9505 more, stop incrementing it, to avoid overflow.
9506
9507 Store in REGS->ARRAY[I].SINGLE_USAGE the single insn in which
9508 register I is used, if it is only used once. Otherwise, it is set
9509 to 0 (for no uses) or const0_rtx for more than one use. This
9510 parameter may be zero, in which case this processing is not done.
9511
9512 Set REGS->ARRAY[I].MAY_NOT_OPTIMIZE nonzero if we should not
9513 optimize register I. */
9514
9515 static void
9516 loop_regs_scan (const struct loop *loop, int extra_size)
9517 {
9518 struct loop_regs *regs = LOOP_REGS (loop);
9519 int old_nregs;
9520 /* last_set[n] is nonzero iff reg n has been set in the current
9521 basic block. In that case, it is the insn that last set reg n. */
9522 rtx *last_set;
9523 rtx insn;
9524 int i;
9525
9526 old_nregs = regs->num;
9527 regs->num = max_reg_num ();
9528
9529 /* Grow the regs array if not allocated or too small. */
9530 if (regs->num >= regs->size)
9531 {
9532 regs->size = regs->num + extra_size;
9533
9534 regs->array = xrealloc (regs->array, regs->size * sizeof (*regs->array));
9535
9536 /* Zero the new elements. */
9537 memset (regs->array + old_nregs, 0,
9538 (regs->size - old_nregs) * sizeof (*regs->array));
9539 }
9540
9541 /* Clear previously scanned fields but do not clear n_times_set. */
9542 for (i = 0; i < old_nregs; i++)
9543 {
9544 regs->array[i].set_in_loop = 0;
9545 regs->array[i].may_not_optimize = 0;
9546 regs->array[i].single_usage = NULL_RTX;
9547 }
9548
9549 last_set = xcalloc (regs->num, sizeof (rtx));
9550
9551 /* Scan the loop, recording register usage. */
9552 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
9553 insn = NEXT_INSN (insn))
9554 {
9555 if (INSN_P (insn))
9556 {
9557 /* Record registers that have exactly one use. */
9558 find_single_use_in_loop (regs, insn, PATTERN (insn));
9559
9560 /* Include uses in REG_EQUAL notes. */
9561 if (REG_NOTES (insn))
9562 find_single_use_in_loop (regs, insn, REG_NOTES (insn));
9563
9564 if (GET_CODE (PATTERN (insn)) == SET
9565 || GET_CODE (PATTERN (insn)) == CLOBBER)
9566 count_one_set (regs, insn, PATTERN (insn), last_set);
9567 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
9568 {
9569 int i;
9570 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
9571 count_one_set (regs, insn, XVECEXP (PATTERN (insn), 0, i),
9572 last_set);
9573 }
9574 }
9575
9576 if (LABEL_P (insn) || JUMP_P (insn))
9577 memset (last_set, 0, regs->num * sizeof (rtx));
9578
9579 /* Invalidate all registers used for function argument passing.
9580 We check rtx_varies_p for the same reason as below, to allow
9581 optimizing PIC calculations. */
9582 if (CALL_P (insn))
9583 {
9584 rtx link;
9585 for (link = CALL_INSN_FUNCTION_USAGE (insn);
9586 link;
9587 link = XEXP (link, 1))
9588 {
9589 rtx op, reg;
9590
9591 if (GET_CODE (op = XEXP (link, 0)) == USE
9592 && REG_P (reg = XEXP (op, 0))
9593 && rtx_varies_p (reg, 1))
9594 regs->array[REGNO (reg)].may_not_optimize = 1;
9595 }
9596 }
9597 }
9598
9599 /* Invalidate all hard registers clobbered by calls. With one exception:
9600 a call-clobbered PIC register is still function-invariant for our
9601 purposes, since we can hoist any PIC calculations out of the loop.
9602 Thus the call to rtx_varies_p. */
9603 if (LOOP_INFO (loop)->has_call)
9604 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
9605 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i)
9606 && rtx_varies_p (regno_reg_rtx[i], 1))
9607 {
9608 regs->array[i].may_not_optimize = 1;
9609 regs->array[i].set_in_loop = 1;
9610 }
9611
9612 #ifdef AVOID_CCMODE_COPIES
9613 /* Don't try to move insns which set CC registers if we should not
9614 create CCmode register copies. */
9615 for (i = regs->num - 1; i >= FIRST_PSEUDO_REGISTER; i--)
9616 if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
9617 regs->array[i].may_not_optimize = 1;
9618 #endif
9619
9620 /* Set regs->array[I].n_times_set for the new registers. */
9621 for (i = old_nregs; i < regs->num; i++)
9622 regs->array[i].n_times_set = regs->array[i].set_in_loop;
9623
9624 free (last_set);
9625 }
9626
9627 /* Returns the number of real INSNs in the LOOP. */
9628
9629 static int
9630 count_insns_in_loop (const struct loop *loop)
9631 {
9632 int count = 0;
9633 rtx insn;
9634
9635 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
9636 insn = NEXT_INSN (insn))
9637 if (INSN_P (insn))
9638 ++count;
9639
9640 return count;
9641 }
9642
9643 /* Move MEMs into registers for the duration of the loop. */
9644
9645 static void
9646 load_mems (const struct loop *loop)
9647 {
9648 struct loop_info *loop_info = LOOP_INFO (loop);
9649 struct loop_regs *regs = LOOP_REGS (loop);
9650 int maybe_never = 0;
9651 int i;
9652 rtx p, prev_ebb_head;
9653 rtx label = NULL_RTX;
9654 rtx end_label;
9655 /* Nonzero if the next instruction may never be executed. */
9656 int next_maybe_never = 0;
9657 unsigned int last_max_reg = max_reg_num ();
9658
9659 if (loop_info->mems_idx == 0)
9660 return;
9661
9662 /* We cannot use next_label here because it skips over normal insns. */
9663 end_label = next_nonnote_insn (loop->end);
9664 if (end_label && !LABEL_P (end_label))
9665 end_label = NULL_RTX;
9666
9667 /* Check to see if it's possible that some instructions in the loop are
9668 never executed. Also check if there is a goto out of the loop other
9669 than right after the end of the loop. */
9670 for (p = next_insn_in_loop (loop, loop->scan_start);
9671 p != NULL_RTX;
9672 p = next_insn_in_loop (loop, p))
9673 {
9674 if (LABEL_P (p))
9675 maybe_never = 1;
9676 else if (JUMP_P (p)
9677 /* If we enter the loop in the middle, and scan
9678 around to the beginning, don't set maybe_never
9679 for that. This must be an unconditional jump,
9680 otherwise the code at the top of the loop might
9681 never be executed. Unconditional jumps are
9682 followed a by barrier then loop end. */
9683 && ! (JUMP_P (p)
9684 && JUMP_LABEL (p) == loop->top
9685 && NEXT_INSN (NEXT_INSN (p)) == loop->end
9686 && any_uncondjump_p (p)))
9687 {
9688 /* If this is a jump outside of the loop but not right
9689 after the end of the loop, we would have to emit new fixup
9690 sequences for each such label. */
9691 if (/* If we can't tell where control might go when this
9692 JUMP_INSN is executed, we must be conservative. */
9693 !JUMP_LABEL (p)
9694 || (JUMP_LABEL (p) != end_label
9695 && (INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop
9696 || INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop->start)
9697 || INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop->end))))
9698 return;
9699
9700 if (!any_condjump_p (p))
9701 /* Something complicated. */
9702 maybe_never = 1;
9703 else
9704 /* If there are any more instructions in the loop, they
9705 might not be reached. */
9706 next_maybe_never = 1;
9707 }
9708 else if (next_maybe_never)
9709 maybe_never = 1;
9710 }
9711
9712 /* Find start of the extended basic block that enters the loop. */
9713 for (p = loop->start;
9714 PREV_INSN (p) && !LABEL_P (p);
9715 p = PREV_INSN (p))
9716 ;
9717 prev_ebb_head = p;
9718
9719 cselib_init (true);
9720
9721 /* Build table of mems that get set to constant values before the
9722 loop. */
9723 for (; p != loop->start; p = NEXT_INSN (p))
9724 cselib_process_insn (p);
9725
9726 /* Actually move the MEMs. */
9727 for (i = 0; i < loop_info->mems_idx; ++i)
9728 {
9729 regset_head load_copies;
9730 regset_head store_copies;
9731 int written = 0;
9732 rtx reg;
9733 rtx mem = loop_info->mems[i].mem;
9734 rtx mem_list_entry;
9735
9736 if (MEM_VOLATILE_P (mem)
9737 || loop_invariant_p (loop, XEXP (mem, 0)) != 1)
9738 /* There's no telling whether or not MEM is modified. */
9739 loop_info->mems[i].optimize = 0;
9740
9741 /* Go through the MEMs written to in the loop to see if this
9742 one is aliased by one of them. */
9743 mem_list_entry = loop_info->store_mems;
9744 while (mem_list_entry)
9745 {
9746 if (rtx_equal_p (mem, XEXP (mem_list_entry, 0)))
9747 written = 1;
9748 else if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
9749 mem, rtx_varies_p))
9750 {
9751 /* MEM is indeed aliased by this store. */
9752 loop_info->mems[i].optimize = 0;
9753 break;
9754 }
9755 mem_list_entry = XEXP (mem_list_entry, 1);
9756 }
9757
9758 if (flag_float_store && written
9759 && GET_MODE_CLASS (GET_MODE (mem)) == MODE_FLOAT)
9760 loop_info->mems[i].optimize = 0;
9761
9762 /* If this MEM is written to, we must be sure that there
9763 are no reads from another MEM that aliases this one. */
9764 if (loop_info->mems[i].optimize && written)
9765 {
9766 int j;
9767
9768 for (j = 0; j < loop_info->mems_idx; ++j)
9769 {
9770 if (j == i)
9771 continue;
9772 else if (true_dependence (mem,
9773 VOIDmode,
9774 loop_info->mems[j].mem,
9775 rtx_varies_p))
9776 {
9777 /* It's not safe to hoist loop_info->mems[i] out of
9778 the loop because writes to it might not be
9779 seen by reads from loop_info->mems[j]. */
9780 loop_info->mems[i].optimize = 0;
9781 break;
9782 }
9783 }
9784 }
9785
9786 if (maybe_never && may_trap_p (mem))
9787 /* We can't access the MEM outside the loop; it might
9788 cause a trap that wouldn't have happened otherwise. */
9789 loop_info->mems[i].optimize = 0;
9790
9791 if (!loop_info->mems[i].optimize)
9792 /* We thought we were going to lift this MEM out of the
9793 loop, but later discovered that we could not. */
9794 continue;
9795
9796 INIT_REG_SET (&load_copies);
9797 INIT_REG_SET (&store_copies);
9798
9799 /* Allocate a pseudo for this MEM. We set REG_USERVAR_P in
9800 order to keep scan_loop from moving stores to this MEM
9801 out of the loop just because this REG is neither a
9802 user-variable nor used in the loop test. */
9803 reg = gen_reg_rtx (GET_MODE (mem));
9804 REG_USERVAR_P (reg) = 1;
9805 loop_info->mems[i].reg = reg;
9806
9807 /* Now, replace all references to the MEM with the
9808 corresponding pseudos. */
9809 maybe_never = 0;
9810 for (p = next_insn_in_loop (loop, loop->scan_start);
9811 p != NULL_RTX;
9812 p = next_insn_in_loop (loop, p))
9813 {
9814 if (INSN_P (p))
9815 {
9816 rtx set;
9817
9818 set = single_set (p);
9819
9820 /* See if this copies the mem into a register that isn't
9821 modified afterwards. We'll try to do copy propagation
9822 a little further on. */
9823 if (set
9824 /* @@@ This test is _way_ too conservative. */
9825 && ! maybe_never
9826 && REG_P (SET_DEST (set))
9827 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
9828 && REGNO (SET_DEST (set)) < last_max_reg
9829 && regs->array[REGNO (SET_DEST (set))].n_times_set == 1
9830 && rtx_equal_p (SET_SRC (set), mem))
9831 SET_REGNO_REG_SET (&load_copies, REGNO (SET_DEST (set)));
9832
9833 /* See if this copies the mem from a register that isn't
9834 modified afterwards. We'll try to remove the
9835 redundant copy later on by doing a little register
9836 renaming and copy propagation. This will help
9837 to untangle things for the BIV detection code. */
9838 if (set
9839 && ! maybe_never
9840 && REG_P (SET_SRC (set))
9841 && REGNO (SET_SRC (set)) >= FIRST_PSEUDO_REGISTER
9842 && REGNO (SET_SRC (set)) < last_max_reg
9843 && regs->array[REGNO (SET_SRC (set))].n_times_set == 1
9844 && rtx_equal_p (SET_DEST (set), mem))
9845 SET_REGNO_REG_SET (&store_copies, REGNO (SET_SRC (set)));
9846
9847 /* If this is a call which uses / clobbers this memory
9848 location, we must not change the interface here. */
9849 if (CALL_P (p)
9850 && reg_mentioned_p (loop_info->mems[i].mem,
9851 CALL_INSN_FUNCTION_USAGE (p)))
9852 {
9853 cancel_changes (0);
9854 loop_info->mems[i].optimize = 0;
9855 break;
9856 }
9857 else
9858 /* Replace the memory reference with the shadow register. */
9859 replace_loop_mems (p, loop_info->mems[i].mem,
9860 loop_info->mems[i].reg, written);
9861 }
9862
9863 if (LABEL_P (p)
9864 || JUMP_P (p))
9865 maybe_never = 1;
9866 }
9867
9868 if (! loop_info->mems[i].optimize)
9869 ; /* We found we couldn't do the replacement, so do nothing. */
9870 else if (! apply_change_group ())
9871 /* We couldn't replace all occurrences of the MEM. */
9872 loop_info->mems[i].optimize = 0;
9873 else
9874 {
9875 /* Load the memory immediately before LOOP->START, which is
9876 the NOTE_LOOP_BEG. */
9877 cselib_val *e = cselib_lookup (mem, VOIDmode, 0);
9878 rtx set;
9879 rtx best = mem;
9880 int j;
9881 struct elt_loc_list *const_equiv = 0;
9882
9883 if (e)
9884 {
9885 struct elt_loc_list *equiv;
9886 struct elt_loc_list *best_equiv = 0;
9887 for (equiv = e->locs; equiv; equiv = equiv->next)
9888 {
9889 if (CONSTANT_P (equiv->loc))
9890 const_equiv = equiv;
9891 else if (REG_P (equiv->loc)
9892 /* Extending hard register lifetimes causes crash
9893 on SRC targets. Doing so on non-SRC is
9894 probably also not good idea, since we most
9895 probably have pseudoregister equivalence as
9896 well. */
9897 && REGNO (equiv->loc) >= FIRST_PSEUDO_REGISTER)
9898 best_equiv = equiv;
9899 }
9900 /* Use the constant equivalence if that is cheap enough. */
9901 if (! best_equiv)
9902 best_equiv = const_equiv;
9903 else if (const_equiv
9904 && (rtx_cost (const_equiv->loc, SET)
9905 <= rtx_cost (best_equiv->loc, SET)))
9906 {
9907 best_equiv = const_equiv;
9908 const_equiv = 0;
9909 }
9910
9911 /* If best_equiv is nonzero, we know that MEM is set to a
9912 constant or register before the loop. We will use this
9913 knowledge to initialize the shadow register with that
9914 constant or reg rather than by loading from MEM. */
9915 if (best_equiv)
9916 best = copy_rtx (best_equiv->loc);
9917 }
9918
9919 set = gen_move_insn (reg, best);
9920 set = loop_insn_hoist (loop, set);
9921 if (REG_P (best))
9922 {
9923 for (p = prev_ebb_head; p != loop->start; p = NEXT_INSN (p))
9924 if (REGNO_LAST_UID (REGNO (best)) == INSN_UID (p))
9925 {
9926 REGNO_LAST_UID (REGNO (best)) = INSN_UID (set);
9927 break;
9928 }
9929 }
9930
9931 if (const_equiv)
9932 set_unique_reg_note (set, REG_EQUAL, copy_rtx (const_equiv->loc));
9933
9934 if (written)
9935 {
9936 if (label == NULL_RTX)
9937 {
9938 label = gen_label_rtx ();
9939 emit_label_after (label, loop->end);
9940 }
9941
9942 /* Store the memory immediately after END, which is
9943 the NOTE_LOOP_END. */
9944 set = gen_move_insn (copy_rtx (mem), reg);
9945 loop_insn_emit_after (loop, 0, label, set);
9946 }
9947
9948 if (loop_dump_stream)
9949 {
9950 fprintf (loop_dump_stream, "Hoisted regno %d %s from ",
9951 REGNO (reg), (written ? "r/w" : "r/o"));
9952 print_rtl (loop_dump_stream, mem);
9953 fputc ('\n', loop_dump_stream);
9954 }
9955
9956 /* Attempt a bit of copy propagation. This helps untangle the
9957 data flow, and enables {basic,general}_induction_var to find
9958 more bivs/givs. */
9959 EXECUTE_IF_SET_IN_REG_SET
9960 (&load_copies, FIRST_PSEUDO_REGISTER, j,
9961 {
9962 try_copy_prop (loop, reg, j);
9963 });
9964 CLEAR_REG_SET (&load_copies);
9965
9966 EXECUTE_IF_SET_IN_REG_SET
9967 (&store_copies, FIRST_PSEUDO_REGISTER, j,
9968 {
9969 try_swap_copy_prop (loop, reg, j);
9970 });
9971 CLEAR_REG_SET (&store_copies);
9972 }
9973 }
9974
9975 /* Now, we need to replace all references to the previous exit
9976 label with the new one. */
9977 if (label != NULL_RTX && end_label != NULL_RTX)
9978 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
9979 if (JUMP_P (p) && JUMP_LABEL (p) == end_label)
9980 redirect_jump (p, label, false);
9981
9982 cselib_finish ();
9983 }
9984
9985 /* For communication between note_reg_stored and its caller. */
9986 struct note_reg_stored_arg
9987 {
9988 int set_seen;
9989 rtx reg;
9990 };
9991
9992 /* Called via note_stores, record in SET_SEEN whether X, which is written,
9993 is equal to ARG. */
9994 static void
9995 note_reg_stored (rtx x, rtx setter ATTRIBUTE_UNUSED, void *arg)
9996 {
9997 struct note_reg_stored_arg *t = (struct note_reg_stored_arg *) arg;
9998 if (t->reg == x)
9999 t->set_seen = 1;
10000 }
10001
10002 /* Try to replace every occurrence of pseudo REGNO with REPLACEMENT.
10003 There must be exactly one insn that sets this pseudo; it will be
10004 deleted if all replacements succeed and we can prove that the register
10005 is not used after the loop. */
10006
10007 static void
10008 try_copy_prop (const struct loop *loop, rtx replacement, unsigned int regno)
10009 {
10010 /* This is the reg that we are copying from. */
10011 rtx reg_rtx = regno_reg_rtx[regno];
10012 rtx init_insn = 0;
10013 rtx insn;
10014 /* These help keep track of whether we replaced all uses of the reg. */
10015 int replaced_last = 0;
10016 int store_is_first = 0;
10017
10018 for (insn = next_insn_in_loop (loop, loop->scan_start);
10019 insn != NULL_RTX;
10020 insn = next_insn_in_loop (loop, insn))
10021 {
10022 rtx set;
10023
10024 /* Only substitute within one extended basic block from the initializing
10025 insn. */
10026 if (LABEL_P (insn) && init_insn)
10027 break;
10028
10029 if (! INSN_P (insn))
10030 continue;
10031
10032 /* Is this the initializing insn? */
10033 set = single_set (insn);
10034 if (set
10035 && REG_P (SET_DEST (set))
10036 && REGNO (SET_DEST (set)) == regno)
10037 {
10038 gcc_assert (!init_insn);
10039
10040 init_insn = insn;
10041 if (REGNO_FIRST_UID (regno) == INSN_UID (insn))
10042 store_is_first = 1;
10043 }
10044
10045 /* Only substitute after seeing the initializing insn. */
10046 if (init_insn && insn != init_insn)
10047 {
10048 struct note_reg_stored_arg arg;
10049
10050 replace_loop_regs (insn, reg_rtx, replacement);
10051 if (REGNO_LAST_UID (regno) == INSN_UID (insn))
10052 replaced_last = 1;
10053
10054 /* Stop replacing when REPLACEMENT is modified. */
10055 arg.reg = replacement;
10056 arg.set_seen = 0;
10057 note_stores (PATTERN (insn), note_reg_stored, &arg);
10058 if (arg.set_seen)
10059 {
10060 rtx note = find_reg_note (insn, REG_EQUAL, NULL);
10061
10062 /* It is possible that we've turned previously valid REG_EQUAL to
10063 invalid, as we change the REGNO to REPLACEMENT and unlike REGNO,
10064 REPLACEMENT is modified, we get different meaning. */
10065 if (note && reg_mentioned_p (replacement, XEXP (note, 0)))
10066 remove_note (insn, note);
10067 break;
10068 }
10069 }
10070 }
10071 gcc_assert (init_insn);
10072 if (apply_change_group ())
10073 {
10074 if (loop_dump_stream)
10075 fprintf (loop_dump_stream, " Replaced reg %d", regno);
10076 if (store_is_first && replaced_last)
10077 {
10078 rtx first;
10079 rtx retval_note;
10080
10081 /* Assume we're just deleting INIT_INSN. */
10082 first = init_insn;
10083 /* Look for REG_RETVAL note. If we're deleting the end of
10084 the libcall sequence, the whole sequence can go. */
10085 retval_note = find_reg_note (init_insn, REG_RETVAL, NULL_RTX);
10086 /* If we found a REG_RETVAL note, find the first instruction
10087 in the sequence. */
10088 if (retval_note)
10089 first = XEXP (retval_note, 0);
10090
10091 /* Delete the instructions. */
10092 loop_delete_insns (first, init_insn);
10093 }
10094 if (loop_dump_stream)
10095 fprintf (loop_dump_stream, ".\n");
10096 }
10097 }
10098
10099 /* Replace all the instructions from FIRST up to and including LAST
10100 with NOTE_INSN_DELETED notes. */
10101
10102 static void
10103 loop_delete_insns (rtx first, rtx last)
10104 {
10105 while (1)
10106 {
10107 if (loop_dump_stream)
10108 fprintf (loop_dump_stream, ", deleting init_insn (%d)",
10109 INSN_UID (first));
10110 delete_insn (first);
10111
10112 /* If this was the LAST instructions we're supposed to delete,
10113 we're done. */
10114 if (first == last)
10115 break;
10116
10117 first = NEXT_INSN (first);
10118 }
10119 }
10120
10121 /* Try to replace occurrences of pseudo REGNO with REPLACEMENT within
10122 loop LOOP if the order of the sets of these registers can be
10123 swapped. There must be exactly one insn within the loop that sets
10124 this pseudo followed immediately by a move insn that sets
10125 REPLACEMENT with REGNO. */
10126 static void
10127 try_swap_copy_prop (const struct loop *loop, rtx replacement,
10128 unsigned int regno)
10129 {
10130 rtx insn;
10131 rtx set = NULL_RTX;
10132 unsigned int new_regno;
10133
10134 new_regno = REGNO (replacement);
10135
10136 for (insn = next_insn_in_loop (loop, loop->scan_start);
10137 insn != NULL_RTX;
10138 insn = next_insn_in_loop (loop, insn))
10139 {
10140 /* Search for the insn that copies REGNO to NEW_REGNO? */
10141 if (INSN_P (insn)
10142 && (set = single_set (insn))
10143 && REG_P (SET_DEST (set))
10144 && REGNO (SET_DEST (set)) == new_regno
10145 && REG_P (SET_SRC (set))
10146 && REGNO (SET_SRC (set)) == regno)
10147 break;
10148 }
10149
10150 if (insn != NULL_RTX)
10151 {
10152 rtx prev_insn;
10153 rtx prev_set;
10154
10155 /* Some DEF-USE info would come in handy here to make this
10156 function more general. For now, just check the previous insn
10157 which is the most likely candidate for setting REGNO. */
10158
10159 prev_insn = PREV_INSN (insn);
10160
10161 if (INSN_P (insn)
10162 && (prev_set = single_set (prev_insn))
10163 && REG_P (SET_DEST (prev_set))
10164 && REGNO (SET_DEST (prev_set)) == regno)
10165 {
10166 /* We have:
10167 (set (reg regno) (expr))
10168 (set (reg new_regno) (reg regno))
10169
10170 so try converting this to:
10171 (set (reg new_regno) (expr))
10172 (set (reg regno) (reg new_regno))
10173
10174 The former construct is often generated when a global
10175 variable used for an induction variable is shadowed by a
10176 register (NEW_REGNO). The latter construct improves the
10177 chances of GIV replacement and BIV elimination. */
10178
10179 validate_change (prev_insn, &SET_DEST (prev_set),
10180 replacement, 1);
10181 validate_change (insn, &SET_DEST (set),
10182 SET_SRC (set), 1);
10183 validate_change (insn, &SET_SRC (set),
10184 replacement, 1);
10185
10186 if (apply_change_group ())
10187 {
10188 if (loop_dump_stream)
10189 fprintf (loop_dump_stream,
10190 " Swapped set of reg %d at %d with reg %d at %d.\n",
10191 regno, INSN_UID (insn),
10192 new_regno, INSN_UID (prev_insn));
10193
10194 /* Update first use of REGNO. */
10195 if (REGNO_FIRST_UID (regno) == INSN_UID (prev_insn))
10196 REGNO_FIRST_UID (regno) = INSN_UID (insn);
10197
10198 /* Now perform copy propagation to hopefully
10199 remove all uses of REGNO within the loop. */
10200 try_copy_prop (loop, replacement, regno);
10201 }
10202 }
10203 }
10204 }
10205
10206 /* Worker function for find_mem_in_note, called via for_each_rtx. */
10207
10208 static int
10209 find_mem_in_note_1 (rtx *x, void *data)
10210 {
10211 if (*x != NULL_RTX && MEM_P (*x))
10212 {
10213 rtx *res = (rtx *) data;
10214 *res = *x;
10215 return 1;
10216 }
10217 return 0;
10218 }
10219
10220 /* Returns the first MEM found in NOTE by depth-first search. */
10221
10222 static rtx
10223 find_mem_in_note (rtx note)
10224 {
10225 if (note && for_each_rtx (&note, find_mem_in_note_1, &note))
10226 return note;
10227 return NULL_RTX;
10228 }
10229
10230 /* Replace MEM with its associated pseudo register. This function is
10231 called from load_mems via for_each_rtx. DATA is actually a pointer
10232 to a structure describing the instruction currently being scanned
10233 and the MEM we are currently replacing. */
10234
10235 static int
10236 replace_loop_mem (rtx *mem, void *data)
10237 {
10238 loop_replace_args *args = (loop_replace_args *) data;
10239 rtx m = *mem;
10240
10241 if (m == NULL_RTX)
10242 return 0;
10243
10244 switch (GET_CODE (m))
10245 {
10246 case MEM:
10247 break;
10248
10249 case CONST_DOUBLE:
10250 /* We're not interested in the MEM associated with a
10251 CONST_DOUBLE, so there's no need to traverse into one. */
10252 return -1;
10253
10254 default:
10255 /* This is not a MEM. */
10256 return 0;
10257 }
10258
10259 if (!rtx_equal_p (args->match, m))
10260 /* This is not the MEM we are currently replacing. */
10261 return 0;
10262
10263 /* Actually replace the MEM. */
10264 validate_change (args->insn, mem, args->replacement, 1);
10265
10266 return 0;
10267 }
10268
10269 static void
10270 replace_loop_mems (rtx insn, rtx mem, rtx reg, int written)
10271 {
10272 loop_replace_args args;
10273
10274 args.insn = insn;
10275 args.match = mem;
10276 args.replacement = reg;
10277
10278 for_each_rtx (&insn, replace_loop_mem, &args);
10279
10280 /* If we hoist a mem write out of the loop, then REG_EQUAL
10281 notes referring to the mem are no longer valid. */
10282 if (written)
10283 {
10284 rtx note, sub;
10285 rtx *link;
10286
10287 for (link = &REG_NOTES (insn); (note = *link); link = &XEXP (note, 1))
10288 {
10289 if (REG_NOTE_KIND (note) == REG_EQUAL
10290 && (sub = find_mem_in_note (note))
10291 && true_dependence (mem, VOIDmode, sub, rtx_varies_p))
10292 {
10293 /* Remove the note. */
10294 validate_change (NULL_RTX, link, XEXP (note, 1), 1);
10295 break;
10296 }
10297 }
10298 }
10299 }
10300
10301 /* Replace one register with another. Called through for_each_rtx; PX points
10302 to the rtx being scanned. DATA is actually a pointer to
10303 a structure of arguments. */
10304
10305 static int
10306 replace_loop_reg (rtx *px, void *data)
10307 {
10308 rtx x = *px;
10309 loop_replace_args *args = (loop_replace_args *) data;
10310
10311 if (x == NULL_RTX)
10312 return 0;
10313
10314 if (x == args->match)
10315 validate_change (args->insn, px, args->replacement, 1);
10316
10317 return 0;
10318 }
10319
10320 static void
10321 replace_loop_regs (rtx insn, rtx reg, rtx replacement)
10322 {
10323 loop_replace_args args;
10324
10325 args.insn = insn;
10326 args.match = reg;
10327 args.replacement = replacement;
10328
10329 for_each_rtx (&insn, replace_loop_reg, &args);
10330 }
10331 \f
10332 /* Emit insn for PATTERN after WHERE_INSN in basic block WHERE_BB
10333 (ignored in the interim). */
10334
10335 static rtx
10336 loop_insn_emit_after (const struct loop *loop ATTRIBUTE_UNUSED,
10337 basic_block where_bb ATTRIBUTE_UNUSED, rtx where_insn,
10338 rtx pattern)
10339 {
10340 return emit_insn_after (pattern, where_insn);
10341 }
10342
10343
10344 /* If WHERE_INSN is nonzero emit insn for PATTERN before WHERE_INSN
10345 in basic block WHERE_BB (ignored in the interim) within the loop
10346 otherwise hoist PATTERN into the loop pre-header. */
10347
10348 rtx
10349 loop_insn_emit_before (const struct loop *loop,
10350 basic_block where_bb ATTRIBUTE_UNUSED,
10351 rtx where_insn, rtx pattern)
10352 {
10353 if (! where_insn)
10354 return loop_insn_hoist (loop, pattern);
10355 return emit_insn_before (pattern, where_insn);
10356 }
10357
10358
10359 /* Emit call insn for PATTERN before WHERE_INSN in basic block
10360 WHERE_BB (ignored in the interim) within the loop. */
10361
10362 static rtx
10363 loop_call_insn_emit_before (const struct loop *loop ATTRIBUTE_UNUSED,
10364 basic_block where_bb ATTRIBUTE_UNUSED,
10365 rtx where_insn, rtx pattern)
10366 {
10367 return emit_call_insn_before (pattern, where_insn);
10368 }
10369
10370
10371 /* Hoist insn for PATTERN into the loop pre-header. */
10372
10373 rtx
10374 loop_insn_hoist (const struct loop *loop, rtx pattern)
10375 {
10376 return loop_insn_emit_before (loop, 0, loop->start, pattern);
10377 }
10378
10379
10380 /* Hoist call insn for PATTERN into the loop pre-header. */
10381
10382 static rtx
10383 loop_call_insn_hoist (const struct loop *loop, rtx pattern)
10384 {
10385 return loop_call_insn_emit_before (loop, 0, loop->start, pattern);
10386 }
10387
10388
10389 /* Sink insn for PATTERN after the loop end. */
10390
10391 rtx
10392 loop_insn_sink (const struct loop *loop, rtx pattern)
10393 {
10394 return loop_insn_emit_before (loop, 0, loop->sink, pattern);
10395 }
10396
10397 /* bl->final_value can be either general_operand or PLUS of general_operand
10398 and constant. Emit sequence of instructions to load it into REG. */
10399 static rtx
10400 gen_load_of_final_value (rtx reg, rtx final_value)
10401 {
10402 rtx seq;
10403 start_sequence ();
10404 final_value = force_operand (final_value, reg);
10405 if (final_value != reg)
10406 emit_move_insn (reg, final_value);
10407 seq = get_insns ();
10408 end_sequence ();
10409 return seq;
10410 }
10411
10412 /* If the loop has multiple exits, emit insn for PATTERN before the
10413 loop to ensure that it will always be executed no matter how the
10414 loop exits. Otherwise, emit the insn for PATTERN after the loop,
10415 since this is slightly more efficient. */
10416
10417 static rtx
10418 loop_insn_sink_or_swim (const struct loop *loop, rtx pattern)
10419 {
10420 if (loop->exit_count)
10421 return loop_insn_hoist (loop, pattern);
10422 else
10423 return loop_insn_sink (loop, pattern);
10424 }
10425 \f
10426 static void
10427 loop_ivs_dump (const struct loop *loop, FILE *file, int verbose)
10428 {
10429 struct iv_class *bl;
10430 int iv_num = 0;
10431
10432 if (! loop || ! file)
10433 return;
10434
10435 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
10436 iv_num++;
10437
10438 fprintf (file, "Loop %d: %d IV classes\n", loop->num, iv_num);
10439
10440 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
10441 {
10442 loop_iv_class_dump (bl, file, verbose);
10443 fputc ('\n', file);
10444 }
10445 }
10446
10447
10448 static void
10449 loop_iv_class_dump (const struct iv_class *bl, FILE *file,
10450 int verbose ATTRIBUTE_UNUSED)
10451 {
10452 struct induction *v;
10453 rtx incr;
10454 int i;
10455
10456 if (! bl || ! file)
10457 return;
10458
10459 fprintf (file, "IV class for reg %d, benefit %d\n",
10460 bl->regno, bl->total_benefit);
10461
10462 fprintf (file, " Init insn %d", INSN_UID (bl->init_insn));
10463 if (bl->initial_value)
10464 {
10465 fprintf (file, ", init val: ");
10466 print_simple_rtl (file, bl->initial_value);
10467 }
10468 if (bl->initial_test)
10469 {
10470 fprintf (file, ", init test: ");
10471 print_simple_rtl (file, bl->initial_test);
10472 }
10473 fputc ('\n', file);
10474
10475 if (bl->final_value)
10476 {
10477 fprintf (file, " Final val: ");
10478 print_simple_rtl (file, bl->final_value);
10479 fputc ('\n', file);
10480 }
10481
10482 if ((incr = biv_total_increment (bl)))
10483 {
10484 fprintf (file, " Total increment: ");
10485 print_simple_rtl (file, incr);
10486 fputc ('\n', file);
10487 }
10488
10489 /* List the increments. */
10490 for (i = 0, v = bl->biv; v; v = v->next_iv, i++)
10491 {
10492 fprintf (file, " Inc%d: insn %d, incr: ", i, INSN_UID (v->insn));
10493 print_simple_rtl (file, v->add_val);
10494 fputc ('\n', file);
10495 }
10496
10497 /* List the givs. */
10498 for (i = 0, v = bl->giv; v; v = v->next_iv, i++)
10499 {
10500 fprintf (file, " Giv%d: insn %d, benefit %d, ",
10501 i, INSN_UID (v->insn), v->benefit);
10502 if (v->giv_type == DEST_ADDR)
10503 print_simple_rtl (file, v->mem);
10504 else
10505 print_simple_rtl (file, single_set (v->insn));
10506 fputc ('\n', file);
10507 }
10508 }
10509
10510
10511 static void
10512 loop_biv_dump (const struct induction *v, FILE *file, int verbose)
10513 {
10514 if (! v || ! file)
10515 return;
10516
10517 fprintf (file,
10518 "Biv %d: insn %d",
10519 REGNO (v->dest_reg), INSN_UID (v->insn));
10520 fprintf (file, " const ");
10521 print_simple_rtl (file, v->add_val);
10522
10523 if (verbose && v->final_value)
10524 {
10525 fputc ('\n', file);
10526 fprintf (file, " final ");
10527 print_simple_rtl (file, v->final_value);
10528 }
10529
10530 fputc ('\n', file);
10531 }
10532
10533
10534 static void
10535 loop_giv_dump (const struct induction *v, FILE *file, int verbose)
10536 {
10537 if (! v || ! file)
10538 return;
10539
10540 if (v->giv_type == DEST_REG)
10541 fprintf (file, "Giv %d: insn %d",
10542 REGNO (v->dest_reg), INSN_UID (v->insn));
10543 else
10544 fprintf (file, "Dest address: insn %d",
10545 INSN_UID (v->insn));
10546
10547 fprintf (file, " src reg %d benefit %d",
10548 REGNO (v->src_reg), v->benefit);
10549 fprintf (file, " lifetime %d",
10550 v->lifetime);
10551
10552 if (v->replaceable)
10553 fprintf (file, " replaceable");
10554
10555 if (v->no_const_addval)
10556 fprintf (file, " ncav");
10557
10558 if (v->ext_dependent)
10559 {
10560 switch (GET_CODE (v->ext_dependent))
10561 {
10562 case SIGN_EXTEND:
10563 fprintf (file, " ext se");
10564 break;
10565 case ZERO_EXTEND:
10566 fprintf (file, " ext ze");
10567 break;
10568 case TRUNCATE:
10569 fprintf (file, " ext tr");
10570 break;
10571 default:
10572 gcc_unreachable ();
10573 }
10574 }
10575
10576 fputc ('\n', file);
10577 fprintf (file, " mult ");
10578 print_simple_rtl (file, v->mult_val);
10579
10580 fputc ('\n', file);
10581 fprintf (file, " add ");
10582 print_simple_rtl (file, v->add_val);
10583
10584 if (verbose && v->final_value)
10585 {
10586 fputc ('\n', file);
10587 fprintf (file, " final ");
10588 print_simple_rtl (file, v->final_value);
10589 }
10590
10591 fputc ('\n', file);
10592 }
10593
10594
10595 void
10596 debug_ivs (const struct loop *loop)
10597 {
10598 loop_ivs_dump (loop, stderr, 1);
10599 }
10600
10601
10602 void
10603 debug_iv_class (const struct iv_class *bl)
10604 {
10605 loop_iv_class_dump (bl, stderr, 1);
10606 }
10607
10608
10609 void
10610 debug_biv (const struct induction *v)
10611 {
10612 loop_biv_dump (v, stderr, 1);
10613 }
10614
10615
10616 void
10617 debug_giv (const struct induction *v)
10618 {
10619 loop_giv_dump (v, stderr, 1);
10620 }
10621
10622
10623 #define LOOP_BLOCK_NUM_1(INSN) \
10624 ((INSN) ? (BLOCK_FOR_INSN (INSN) ? BLOCK_NUM (INSN) : - 1) : -1)
10625
10626 /* The notes do not have an assigned block, so look at the next insn. */
10627 #define LOOP_BLOCK_NUM(INSN) \
10628 ((INSN) ? (NOTE_P (INSN) \
10629 ? LOOP_BLOCK_NUM_1 (next_nonnote_insn (INSN)) \
10630 : LOOP_BLOCK_NUM_1 (INSN)) \
10631 : -1)
10632
10633 #define LOOP_INSN_UID(INSN) ((INSN) ? INSN_UID (INSN) : -1)
10634
10635 static void
10636 loop_dump_aux (const struct loop *loop, FILE *file,
10637 int verbose ATTRIBUTE_UNUSED)
10638 {
10639 rtx label;
10640
10641 if (! loop || ! file || !BB_HEAD (loop->first))
10642 return;
10643
10644 /* Print diagnostics to compare our concept of a loop with
10645 what the loop notes say. */
10646 if (! PREV_INSN (BB_HEAD (loop->first))
10647 || !NOTE_P (PREV_INSN (BB_HEAD (loop->first)))
10648 || NOTE_LINE_NUMBER (PREV_INSN (BB_HEAD (loop->first)))
10649 != NOTE_INSN_LOOP_BEG)
10650 fprintf (file, ";; No NOTE_INSN_LOOP_BEG at %d\n",
10651 INSN_UID (PREV_INSN (BB_HEAD (loop->first))));
10652 if (! NEXT_INSN (BB_END (loop->last))
10653 || !NOTE_P (NEXT_INSN (BB_END (loop->last)))
10654 || NOTE_LINE_NUMBER (NEXT_INSN (BB_END (loop->last)))
10655 != NOTE_INSN_LOOP_END)
10656 fprintf (file, ";; No NOTE_INSN_LOOP_END at %d\n",
10657 INSN_UID (NEXT_INSN (BB_END (loop->last))));
10658
10659 if (loop->start)
10660 {
10661 fprintf (file,
10662 ";; start %d (%d), end %d (%d)\n",
10663 LOOP_BLOCK_NUM (loop->start),
10664 LOOP_INSN_UID (loop->start),
10665 LOOP_BLOCK_NUM (loop->end),
10666 LOOP_INSN_UID (loop->end));
10667 fprintf (file, ";; top %d (%d), scan start %d (%d)\n",
10668 LOOP_BLOCK_NUM (loop->top),
10669 LOOP_INSN_UID (loop->top),
10670 LOOP_BLOCK_NUM (loop->scan_start),
10671 LOOP_INSN_UID (loop->scan_start));
10672 fprintf (file, ";; exit_count %d", loop->exit_count);
10673 if (loop->exit_count)
10674 {
10675 fputs (", labels:", file);
10676 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
10677 {
10678 fprintf (file, " %d ",
10679 LOOP_INSN_UID (XEXP (label, 0)));
10680 }
10681 }
10682 fputs ("\n", file);
10683 }
10684 }
10685
10686 /* Call this function from the debugger to dump LOOP. */
10687
10688 void
10689 debug_loop (const struct loop *loop)
10690 {
10691 flow_loop_dump (loop, stderr, loop_dump_aux, 1);
10692 }
10693
10694 /* Call this function from the debugger to dump LOOPS. */
10695
10696 void
10697 debug_loops (const struct loops *loops)
10698 {
10699 flow_loops_dump (loops, stderr, loop_dump_aux, 1);
10700 }
This page took 0.524503 seconds and 4 git commands to generate.