]> gcc.gnu.org Git - gcc.git/blob - gcc/loop.c
177c4c5c919101d6f8a67c37f557e0d6edb9b811
[gcc.git] / gcc / loop.c
1 /* Perform various loop optimizations, including strength reduction.
2 Copyright (C) 1987, 1988, 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997,
3 1998, 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
21
22 /* This is the loop optimization pass of the compiler.
23 It finds invariant computations within loops and moves them
24 to the beginning of the loop. Then it identifies basic and
25 general induction variables. Strength reduction is applied to the general
26 induction variables, and induction variable elimination is applied to
27 the basic induction variables.
28
29 It also finds cases where
30 a register is set within the loop by zero-extending a narrower value
31 and changes these to zero the entire register once before the loop
32 and merely copy the low part within the loop.
33
34 Most of the complexity is in heuristics to decide when it is worth
35 while to do these things. */
36
37 #include "config.h"
38 #include "system.h"
39 #include "rtl.h"
40 #include "tm_p.h"
41 #include "obstack.h"
42 #include "function.h"
43 #include "expr.h"
44 #include "hard-reg-set.h"
45 #include "basic-block.h"
46 #include "insn-config.h"
47 #include "regs.h"
48 #include "recog.h"
49 #include "flags.h"
50 #include "real.h"
51 #include "loop.h"
52 #include "cselib.h"
53 #include "except.h"
54 #include "toplev.h"
55 #include "predict.h"
56 #include "insn-flags.h"
57 #include "optabs.h"
58
59 /* Not really meaningful values, but at least something. */
60 #ifndef SIMULTANEOUS_PREFETCHES
61 #define SIMULTANEOUS_PREFETCHES 3
62 #endif
63 #ifndef PREFETCH_BLOCK
64 #define PREFETCH_BLOCK 32
65 #endif
66 #ifndef HAVE_prefetch
67 #define HAVE_prefetch 0
68 #define CODE_FOR_prefetch 0
69 #define gen_prefetch(a,b,c) (abort(), NULL_RTX)
70 #endif
71
72 /* Give up the prefetch optimizations once we exceed a given threshhold.
73 It is unlikely that we would be able to optimize something in a loop
74 with so many detected prefetches. */
75 #define MAX_PREFETCHES 100
76 /* The number of prefetch blocks that are beneficial to fetch at once before
77 a loop with a known (and low) iteration count. */
78 #define PREFETCH_BLOCKS_BEFORE_LOOP_MAX 6
79 /* For very tiny loops it is not worthwhile to prefetch even before the loop,
80 since it is likely that the data are already in the cache. */
81 #define PREFETCH_BLOCKS_BEFORE_LOOP_MIN 2
82
83 /* Parameterize some prefetch heuristics so they can be turned on and off
84 easily for performance testing on new architecures. These can be
85 defined in target-dependent files. */
86
87 /* Prefetch is worthwhile only when loads/stores are dense. */
88 #ifndef PREFETCH_ONLY_DENSE_MEM
89 #define PREFETCH_ONLY_DENSE_MEM 1
90 #endif
91
92 /* Define what we mean by "dense" loads and stores; This value divided by 256
93 is the minimum percentage of memory references that worth prefetching. */
94 #ifndef PREFETCH_DENSE_MEM
95 #define PREFETCH_DENSE_MEM 220
96 #endif
97
98 /* Do not prefetch for a loop whose iteration count is known to be low. */
99 #ifndef PREFETCH_NO_LOW_LOOPCNT
100 #define PREFETCH_NO_LOW_LOOPCNT 1
101 #endif
102
103 /* Define what we mean by a "low" iteration count. */
104 #ifndef PREFETCH_LOW_LOOPCNT
105 #define PREFETCH_LOW_LOOPCNT 32
106 #endif
107
108 /* Do not prefetch for a loop that contains a function call; such a loop is
109 probably not an internal loop. */
110 #ifndef PREFETCH_NO_CALL
111 #define PREFETCH_NO_CALL 1
112 #endif
113
114 /* Do not prefetch accesses with an extreme stride. */
115 #ifndef PREFETCH_NO_EXTREME_STRIDE
116 #define PREFETCH_NO_EXTREME_STRIDE 1
117 #endif
118
119 /* Define what we mean by an "extreme" stride. */
120 #ifndef PREFETCH_EXTREME_STRIDE
121 #define PREFETCH_EXTREME_STRIDE 4096
122 #endif
123
124 /* Define a limit to how far apart indices can be and still be merged
125 into a single prefetch. */
126 #ifndef PREFETCH_EXTREME_DIFFERENCE
127 #define PREFETCH_EXTREME_DIFFERENCE 4096
128 #endif
129
130 /* Issue prefetch instructions before the loop to fetch data to be used
131 in the first few loop iterations. */
132 #ifndef PREFETCH_BEFORE_LOOP
133 #define PREFETCH_BEFORE_LOOP 1
134 #endif
135
136 /* Do not handle reversed order prefetches (negative stride). */
137 #ifndef PREFETCH_NO_REVERSE_ORDER
138 #define PREFETCH_NO_REVERSE_ORDER 1
139 #endif
140
141 /* Prefetch even if the GIV is in conditional code. */
142 #ifndef PREFETCH_CONDITIONAL
143 #define PREFETCH_CONDITIONAL 1
144 #endif
145
146 #define LOOP_REG_LIFETIME(LOOP, REGNO) \
147 ((REGNO_LAST_LUID (REGNO) - REGNO_FIRST_LUID (REGNO)))
148
149 #define LOOP_REG_GLOBAL_P(LOOP, REGNO) \
150 ((REGNO_LAST_LUID (REGNO) > INSN_LUID ((LOOP)->end) \
151 || REGNO_FIRST_LUID (REGNO) < INSN_LUID ((LOOP)->start)))
152
153 #define LOOP_REGNO_NREGS(REGNO, SET_DEST) \
154 ((REGNO) < FIRST_PSEUDO_REGISTER \
155 ? HARD_REGNO_NREGS ((REGNO), GET_MODE (SET_DEST)) : 1)
156
157
158 /* Vector mapping INSN_UIDs to luids.
159 The luids are like uids but increase monotonically always.
160 We use them to see whether a jump comes from outside a given loop. */
161
162 int *uid_luid;
163
164 /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
165 number the insn is contained in. */
166
167 struct loop **uid_loop;
168
169 /* 1 + largest uid of any insn. */
170
171 int max_uid_for_loop;
172
173 /* 1 + luid of last insn. */
174
175 static int max_luid;
176
177 /* Number of loops detected in current function. Used as index to the
178 next few tables. */
179
180 static int max_loop_num;
181
182 /* Bound on pseudo register number before loop optimization.
183 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
184 unsigned int max_reg_before_loop;
185
186 /* The value to pass to the next call of reg_scan_update. */
187 static int loop_max_reg;
188 \f
189 /* During the analysis of a loop, a chain of `struct movable's
190 is made to record all the movable insns found.
191 Then the entire chain can be scanned to decide which to move. */
192
193 struct movable
194 {
195 rtx insn; /* A movable insn */
196 rtx set_src; /* The expression this reg is set from. */
197 rtx set_dest; /* The destination of this SET. */
198 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
199 of any registers used within the LIBCALL. */
200 int consec; /* Number of consecutive following insns
201 that must be moved with this one. */
202 unsigned int regno; /* The register it sets */
203 short lifetime; /* lifetime of that register;
204 may be adjusted when matching movables
205 that load the same value are found. */
206 short savings; /* Number of insns we can move for this reg,
207 including other movables that force this
208 or match this one. */
209 unsigned int cond : 1; /* 1 if only conditionally movable */
210 unsigned int force : 1; /* 1 means MUST move this insn */
211 unsigned int global : 1; /* 1 means reg is live outside this loop */
212 /* If PARTIAL is 1, GLOBAL means something different:
213 that the reg is live outside the range from where it is set
214 to the following label. */
215 unsigned int done : 1; /* 1 inhibits further processing of this */
216
217 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
218 In particular, moving it does not make it
219 invariant. */
220 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
221 load SRC, rather than copying INSN. */
222 unsigned int move_insn_first:1;/* Same as above, if this is necessary for the
223 first insn of a consecutive sets group. */
224 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
225 enum machine_mode savemode; /* Nonzero means it is a mode for a low part
226 that we should avoid changing when clearing
227 the rest of the reg. */
228 struct movable *match; /* First entry for same value */
229 struct movable *forces; /* An insn that must be moved if this is */
230 struct movable *next;
231 };
232
233
234 FILE *loop_dump_stream;
235
236 /* Forward declarations. */
237
238 static void invalidate_loops_containing_label PARAMS ((rtx));
239 static void find_and_verify_loops PARAMS ((rtx, struct loops *));
240 static void mark_loop_jump PARAMS ((rtx, struct loop *));
241 static void prescan_loop PARAMS ((struct loop *));
242 static int reg_in_basic_block_p PARAMS ((rtx, rtx));
243 static int consec_sets_invariant_p PARAMS ((const struct loop *,
244 rtx, int, rtx));
245 static int labels_in_range_p PARAMS ((rtx, int));
246 static void count_one_set PARAMS ((struct loop_regs *, rtx, rtx, rtx *));
247 static void note_addr_stored PARAMS ((rtx, rtx, void *));
248 static void note_set_pseudo_multiple_uses PARAMS ((rtx, rtx, void *));
249 static int loop_reg_used_before_p PARAMS ((const struct loop *, rtx, rtx));
250 static void scan_loop PARAMS ((struct loop*, int));
251 #if 0
252 static void replace_call_address PARAMS ((rtx, rtx, rtx));
253 #endif
254 static rtx skip_consec_insns PARAMS ((rtx, int));
255 static int libcall_benefit PARAMS ((rtx));
256 static void ignore_some_movables PARAMS ((struct loop_movables *));
257 static void force_movables PARAMS ((struct loop_movables *));
258 static void combine_movables PARAMS ((struct loop_movables *,
259 struct loop_regs *));
260 static int num_unmoved_movables PARAMS ((const struct loop *));
261 static int regs_match_p PARAMS ((rtx, rtx, struct loop_movables *));
262 static int rtx_equal_for_loop_p PARAMS ((rtx, rtx, struct loop_movables *,
263 struct loop_regs *));
264 static void add_label_notes PARAMS ((rtx, rtx));
265 static void move_movables PARAMS ((struct loop *loop, struct loop_movables *,
266 int, int));
267 static void loop_movables_add PARAMS((struct loop_movables *,
268 struct movable *));
269 static void loop_movables_free PARAMS((struct loop_movables *));
270 static int count_nonfixed_reads PARAMS ((const struct loop *, rtx));
271 static void loop_bivs_find PARAMS((struct loop *));
272 static void loop_bivs_init_find PARAMS((struct loop *));
273 static void loop_bivs_check PARAMS((struct loop *));
274 static void loop_givs_find PARAMS((struct loop *));
275 static void loop_givs_check PARAMS((struct loop *));
276 static int loop_biv_eliminable_p PARAMS((struct loop *, struct iv_class *,
277 int, int));
278 static int loop_giv_reduce_benefit PARAMS((struct loop *, struct iv_class *,
279 struct induction *, rtx));
280 static void loop_givs_dead_check PARAMS((struct loop *, struct iv_class *));
281 static void loop_givs_reduce PARAMS((struct loop *, struct iv_class *));
282 static void loop_givs_rescan PARAMS((struct loop *, struct iv_class *,
283 rtx *));
284 static void loop_ivs_free PARAMS((struct loop *));
285 static void strength_reduce PARAMS ((struct loop *, int));
286 static void find_single_use_in_loop PARAMS ((struct loop_regs *, rtx, rtx));
287 static int valid_initial_value_p PARAMS ((rtx, rtx, int, rtx));
288 static void find_mem_givs PARAMS ((const struct loop *, rtx, rtx, int, int));
289 static void record_biv PARAMS ((struct loop *, struct induction *,
290 rtx, rtx, rtx, rtx, rtx *,
291 int, int));
292 static void check_final_value PARAMS ((const struct loop *,
293 struct induction *));
294 static void loop_ivs_dump PARAMS((const struct loop *, FILE *, int));
295 static void loop_iv_class_dump PARAMS((const struct iv_class *, FILE *, int));
296 static void loop_biv_dump PARAMS((const struct induction *, FILE *, int));
297 static void loop_giv_dump PARAMS((const struct induction *, FILE *, int));
298 static void record_giv PARAMS ((const struct loop *, struct induction *,
299 rtx, rtx, rtx, rtx, rtx, rtx, int,
300 enum g_types, int, int, rtx *));
301 static void update_giv_derive PARAMS ((const struct loop *, rtx));
302 static void check_ext_dependent_givs PARAMS ((struct iv_class *,
303 struct loop_info *));
304 static int basic_induction_var PARAMS ((const struct loop *, rtx,
305 enum machine_mode, rtx, rtx,
306 rtx *, rtx *, rtx **));
307 static rtx simplify_giv_expr PARAMS ((const struct loop *, rtx, rtx *, int *));
308 static int general_induction_var PARAMS ((const struct loop *loop, rtx, rtx *,
309 rtx *, rtx *, rtx *, int, int *,
310 enum machine_mode));
311 static int consec_sets_giv PARAMS ((const struct loop *, int, rtx,
312 rtx, rtx, rtx *, rtx *, rtx *, rtx *));
313 static int check_dbra_loop PARAMS ((struct loop *, int));
314 static rtx express_from_1 PARAMS ((rtx, rtx, rtx));
315 static rtx combine_givs_p PARAMS ((struct induction *, struct induction *));
316 static int cmp_combine_givs_stats PARAMS ((const PTR, const PTR));
317 static void combine_givs PARAMS ((struct loop_regs *, struct iv_class *));
318 static int product_cheap_p PARAMS ((rtx, rtx));
319 static int maybe_eliminate_biv PARAMS ((const struct loop *, struct iv_class *,
320 int, int, int));
321 static int maybe_eliminate_biv_1 PARAMS ((const struct loop *, rtx, rtx,
322 struct iv_class *, int,
323 basic_block, rtx));
324 static int last_use_this_basic_block PARAMS ((rtx, rtx));
325 static void record_initial PARAMS ((rtx, rtx, void *));
326 static void update_reg_last_use PARAMS ((rtx, rtx));
327 static rtx next_insn_in_loop PARAMS ((const struct loop *, rtx));
328 static void loop_regs_scan PARAMS ((const struct loop *, int));
329 static int count_insns_in_loop PARAMS ((const struct loop *));
330 static void load_mems PARAMS ((const struct loop *));
331 static int insert_loop_mem PARAMS ((rtx *, void *));
332 static int replace_loop_mem PARAMS ((rtx *, void *));
333 static void replace_loop_mems PARAMS ((rtx, rtx, rtx));
334 static int replace_loop_reg PARAMS ((rtx *, void *));
335 static void replace_loop_regs PARAMS ((rtx insn, rtx, rtx));
336 static void note_reg_stored PARAMS ((rtx, rtx, void *));
337 static void try_copy_prop PARAMS ((const struct loop *, rtx, unsigned int));
338 static void try_swap_copy_prop PARAMS ((const struct loop *, rtx,
339 unsigned int));
340 static int replace_label PARAMS ((rtx *, void *));
341 static rtx check_insn_for_givs PARAMS((struct loop *, rtx, int, int));
342 static rtx check_insn_for_bivs PARAMS((struct loop *, rtx, int, int));
343 static rtx gen_add_mult PARAMS ((rtx, rtx, rtx, rtx));
344 static void loop_regs_update PARAMS ((const struct loop *, rtx));
345 static int iv_add_mult_cost PARAMS ((rtx, rtx, rtx, rtx));
346
347 static rtx loop_insn_emit_after PARAMS((const struct loop *, basic_block,
348 rtx, rtx));
349 static rtx loop_call_insn_emit_before PARAMS((const struct loop *,
350 basic_block, rtx, rtx));
351 static rtx loop_call_insn_hoist PARAMS((const struct loop *, rtx));
352 static rtx loop_insn_sink_or_swim PARAMS((const struct loop *, rtx));
353
354 static void loop_dump_aux PARAMS ((const struct loop *, FILE *, int));
355 static void loop_delete_insns PARAMS ((rtx, rtx));
356 static HOST_WIDE_INT remove_constant_addition PARAMS ((rtx *));
357 static rtx gen_load_of_final_value PARAMS ((rtx, rtx));
358 void debug_ivs PARAMS ((const struct loop *));
359 void debug_iv_class PARAMS ((const struct iv_class *));
360 void debug_biv PARAMS ((const struct induction *));
361 void debug_giv PARAMS ((const struct induction *));
362 void debug_loop PARAMS ((const struct loop *));
363 void debug_loops PARAMS ((const struct loops *));
364
365 typedef struct rtx_pair
366 {
367 rtx r1;
368 rtx r2;
369 } rtx_pair;
370
371 typedef struct loop_replace_args
372 {
373 rtx match;
374 rtx replacement;
375 rtx insn;
376 } loop_replace_args;
377
378 /* Nonzero iff INSN is between START and END, inclusive. */
379 #define INSN_IN_RANGE_P(INSN, START, END) \
380 (INSN_UID (INSN) < max_uid_for_loop \
381 && INSN_LUID (INSN) >= INSN_LUID (START) \
382 && INSN_LUID (INSN) <= INSN_LUID (END))
383
384 /* Indirect_jump_in_function is computed once per function. */
385 static int indirect_jump_in_function;
386 static int indirect_jump_in_function_p PARAMS ((rtx));
387
388 static int compute_luids PARAMS ((rtx, rtx, int));
389
390 static int biv_elimination_giv_has_0_offset PARAMS ((struct induction *,
391 struct induction *,
392 rtx));
393 \f
394 /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
395 copy the value of the strength reduced giv to its original register. */
396 static int copy_cost;
397
398 /* Cost of using a register, to normalize the benefits of a giv. */
399 static int reg_address_cost;
400
401 void
402 init_loop ()
403 {
404 rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
405
406 reg_address_cost = address_cost (reg, SImode);
407
408 copy_cost = COSTS_N_INSNS (1);
409 }
410 \f
411 /* Compute the mapping from uids to luids.
412 LUIDs are numbers assigned to insns, like uids,
413 except that luids increase monotonically through the code.
414 Start at insn START and stop just before END. Assign LUIDs
415 starting with PREV_LUID + 1. Return the last assigned LUID + 1. */
416 static int
417 compute_luids (start, end, prev_luid)
418 rtx start, end;
419 int prev_luid;
420 {
421 int i;
422 rtx insn;
423
424 for (insn = start, i = prev_luid; insn != end; insn = NEXT_INSN (insn))
425 {
426 if (INSN_UID (insn) >= max_uid_for_loop)
427 continue;
428 /* Don't assign luids to line-number NOTEs, so that the distance in
429 luids between two insns is not affected by -g. */
430 if (GET_CODE (insn) != NOTE
431 || NOTE_LINE_NUMBER (insn) <= 0)
432 uid_luid[INSN_UID (insn)] = ++i;
433 else
434 /* Give a line number note the same luid as preceding insn. */
435 uid_luid[INSN_UID (insn)] = i;
436 }
437 return i + 1;
438 }
439 \f
440 /* Entry point of this file. Perform loop optimization
441 on the current function. F is the first insn of the function
442 and DUMPFILE is a stream for output of a trace of actions taken
443 (or 0 if none should be output). */
444
445 void
446 loop_optimize (f, dumpfile, flags)
447 /* f is the first instruction of a chain of insns for one function */
448 rtx f;
449 FILE *dumpfile;
450 int flags;
451 {
452 rtx insn;
453 int i;
454 struct loops loops_data;
455 struct loops *loops = &loops_data;
456 struct loop_info *loops_info;
457
458 loop_dump_stream = dumpfile;
459
460 init_recog_no_volatile ();
461
462 max_reg_before_loop = max_reg_num ();
463 loop_max_reg = max_reg_before_loop;
464
465 regs_may_share = 0;
466
467 /* Count the number of loops. */
468
469 max_loop_num = 0;
470 for (insn = f; insn; insn = NEXT_INSN (insn))
471 {
472 if (GET_CODE (insn) == NOTE
473 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
474 max_loop_num++;
475 }
476
477 /* Don't waste time if no loops. */
478 if (max_loop_num == 0)
479 return;
480
481 loops->num = max_loop_num;
482
483 /* Get size to use for tables indexed by uids.
484 Leave some space for labels allocated by find_and_verify_loops. */
485 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
486
487 uid_luid = (int *) xcalloc (max_uid_for_loop, sizeof (int));
488 uid_loop = (struct loop **) xcalloc (max_uid_for_loop,
489 sizeof (struct loop *));
490
491 /* Allocate storage for array of loops. */
492 loops->array = (struct loop *)
493 xcalloc (loops->num, sizeof (struct loop));
494
495 /* Find and process each loop.
496 First, find them, and record them in order of their beginnings. */
497 find_and_verify_loops (f, loops);
498
499 /* Allocate and initialize auxiliary loop information. */
500 loops_info = xcalloc (loops->num, sizeof (struct loop_info));
501 for (i = 0; i < loops->num; i++)
502 loops->array[i].aux = loops_info + i;
503
504 /* Now find all register lifetimes. This must be done after
505 find_and_verify_loops, because it might reorder the insns in the
506 function. */
507 reg_scan (f, max_reg_before_loop, 1);
508
509 /* This must occur after reg_scan so that registers created by gcse
510 will have entries in the register tables.
511
512 We could have added a call to reg_scan after gcse_main in toplev.c,
513 but moving this call to init_alias_analysis is more efficient. */
514 init_alias_analysis ();
515
516 /* See if we went too far. Note that get_max_uid already returns
517 one more that the maximum uid of all insn. */
518 if (get_max_uid () > max_uid_for_loop)
519 abort ();
520 /* Now reset it to the actual size we need. See above. */
521 max_uid_for_loop = get_max_uid ();
522
523 /* find_and_verify_loops has already called compute_luids, but it
524 might have rearranged code afterwards, so we need to recompute
525 the luids now. */
526 max_luid = compute_luids (f, NULL_RTX, 0);
527
528 /* Don't leave gaps in uid_luid for insns that have been
529 deleted. It is possible that the first or last insn
530 using some register has been deleted by cross-jumping.
531 Make sure that uid_luid for that former insn's uid
532 points to the general area where that insn used to be. */
533 for (i = 0; i < max_uid_for_loop; i++)
534 {
535 uid_luid[0] = uid_luid[i];
536 if (uid_luid[0] != 0)
537 break;
538 }
539 for (i = 0; i < max_uid_for_loop; i++)
540 if (uid_luid[i] == 0)
541 uid_luid[i] = uid_luid[i - 1];
542
543 /* Determine if the function has indirect jump. On some systems
544 this prevents low overhead loop instructions from being used. */
545 indirect_jump_in_function = indirect_jump_in_function_p (f);
546
547 /* Now scan the loops, last ones first, since this means inner ones are done
548 before outer ones. */
549 for (i = max_loop_num - 1; i >= 0; i--)
550 {
551 struct loop *loop = &loops->array[i];
552
553 if (! loop->invalid && loop->end)
554 scan_loop (loop, flags);
555 }
556
557 end_alias_analysis ();
558
559 /* Clean up. */
560 free (uid_luid);
561 free (uid_loop);
562 free (loops_info);
563 free (loops->array);
564 }
565 \f
566 /* Returns the next insn, in execution order, after INSN. START and
567 END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop,
568 respectively. LOOP->TOP, if non-NULL, is the top of the loop in the
569 insn-stream; it is used with loops that are entered near the
570 bottom. */
571
572 static rtx
573 next_insn_in_loop (loop, insn)
574 const struct loop *loop;
575 rtx insn;
576 {
577 insn = NEXT_INSN (insn);
578
579 if (insn == loop->end)
580 {
581 if (loop->top)
582 /* Go to the top of the loop, and continue there. */
583 insn = loop->top;
584 else
585 /* We're done. */
586 insn = NULL_RTX;
587 }
588
589 if (insn == loop->scan_start)
590 /* We're done. */
591 insn = NULL_RTX;
592
593 return insn;
594 }
595
596 /* Optimize one loop described by LOOP. */
597
598 /* ??? Could also move memory writes out of loops if the destination address
599 is invariant, the source is invariant, the memory write is not volatile,
600 and if we can prove that no read inside the loop can read this address
601 before the write occurs. If there is a read of this address after the
602 write, then we can also mark the memory read as invariant. */
603
604 static void
605 scan_loop (loop, flags)
606 struct loop *loop;
607 int flags;
608 {
609 struct loop_info *loop_info = LOOP_INFO (loop);
610 struct loop_regs *regs = LOOP_REGS (loop);
611 int i;
612 rtx loop_start = loop->start;
613 rtx loop_end = loop->end;
614 rtx p;
615 /* 1 if we are scanning insns that could be executed zero times. */
616 int maybe_never = 0;
617 /* 1 if we are scanning insns that might never be executed
618 due to a subroutine call which might exit before they are reached. */
619 int call_passed = 0;
620 /* Jump insn that enters the loop, or 0 if control drops in. */
621 rtx loop_entry_jump = 0;
622 /* Number of insns in the loop. */
623 int insn_count;
624 int tem;
625 rtx temp, update_start, update_end;
626 /* The SET from an insn, if it is the only SET in the insn. */
627 rtx set, set1;
628 /* Chain describing insns movable in current loop. */
629 struct loop_movables *movables = LOOP_MOVABLES (loop);
630 /* Ratio of extra register life span we can justify
631 for saving an instruction. More if loop doesn't call subroutines
632 since in that case saving an insn makes more difference
633 and more registers are available. */
634 int threshold;
635 /* Nonzero if we are scanning instructions in a sub-loop. */
636 int loop_depth = 0;
637
638 loop->top = 0;
639
640 movables->head = 0;
641 movables->last = 0;
642
643 /* Determine whether this loop starts with a jump down to a test at
644 the end. This will occur for a small number of loops with a test
645 that is too complex to duplicate in front of the loop.
646
647 We search for the first insn or label in the loop, skipping NOTEs.
648 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
649 (because we might have a loop executed only once that contains a
650 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
651 (in case we have a degenerate loop).
652
653 Note that if we mistakenly think that a loop is entered at the top
654 when, in fact, it is entered at the exit test, the only effect will be
655 slightly poorer optimization. Making the opposite error can generate
656 incorrect code. Since very few loops now start with a jump to the
657 exit test, the code here to detect that case is very conservative. */
658
659 for (p = NEXT_INSN (loop_start);
660 p != loop_end
661 && GET_CODE (p) != CODE_LABEL && ! INSN_P (p)
662 && (GET_CODE (p) != NOTE
663 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
664 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
665 p = NEXT_INSN (p))
666 ;
667
668 loop->scan_start = p;
669
670 /* If loop end is the end of the current function, then emit a
671 NOTE_INSN_DELETED after loop_end and set loop->sink to the dummy
672 note insn. This is the position we use when sinking insns out of
673 the loop. */
674 if (NEXT_INSN (loop->end) != 0)
675 loop->sink = NEXT_INSN (loop->end);
676 else
677 loop->sink = emit_note_after (NOTE_INSN_DELETED, loop->end);
678
679 /* Set up variables describing this loop. */
680 prescan_loop (loop);
681 threshold = (loop_info->has_call ? 1 : 2) * (1 + n_non_fixed_regs);
682
683 /* If loop has a jump before the first label,
684 the true entry is the target of that jump.
685 Start scan from there.
686 But record in LOOP->TOP the place where the end-test jumps
687 back to so we can scan that after the end of the loop. */
688 if (GET_CODE (p) == JUMP_INSN)
689 {
690 loop_entry_jump = p;
691
692 /* Loop entry must be unconditional jump (and not a RETURN) */
693 if (any_uncondjump_p (p)
694 && JUMP_LABEL (p) != 0
695 /* Check to see whether the jump actually
696 jumps out of the loop (meaning it's no loop).
697 This case can happen for things like
698 do {..} while (0). If this label was generated previously
699 by loop, we can't tell anything about it and have to reject
700 the loop. */
701 && INSN_IN_RANGE_P (JUMP_LABEL (p), loop_start, loop_end))
702 {
703 loop->top = next_label (loop->scan_start);
704 loop->scan_start = JUMP_LABEL (p);
705 }
706 }
707
708 /* If LOOP->SCAN_START was an insn created by loop, we don't know its luid
709 as required by loop_reg_used_before_p. So skip such loops. (This
710 test may never be true, but it's best to play it safe.)
711
712 Also, skip loops where we do not start scanning at a label. This
713 test also rejects loops starting with a JUMP_INSN that failed the
714 test above. */
715
716 if (INSN_UID (loop->scan_start) >= max_uid_for_loop
717 || GET_CODE (loop->scan_start) != CODE_LABEL)
718 {
719 if (loop_dump_stream)
720 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
721 INSN_UID (loop_start), INSN_UID (loop_end));
722 return;
723 }
724
725 /* Allocate extra space for REGs that might be created by load_mems.
726 We allocate a little extra slop as well, in the hopes that we
727 won't have to reallocate the regs array. */
728 loop_regs_scan (loop, loop_info->mems_idx + 16);
729 insn_count = count_insns_in_loop (loop);
730
731 if (loop_dump_stream)
732 {
733 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
734 INSN_UID (loop_start), INSN_UID (loop_end), insn_count);
735 if (loop->cont)
736 fprintf (loop_dump_stream, "Continue at insn %d.\n",
737 INSN_UID (loop->cont));
738 }
739
740 /* Scan through the loop finding insns that are safe to move.
741 Set REGS->ARRAY[I].SET_IN_LOOP negative for the reg I being set, so that
742 this reg will be considered invariant for subsequent insns.
743 We consider whether subsequent insns use the reg
744 in deciding whether it is worth actually moving.
745
746 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
747 and therefore it is possible that the insns we are scanning
748 would never be executed. At such times, we must make sure
749 that it is safe to execute the insn once instead of zero times.
750 When MAYBE_NEVER is 0, all insns will be executed at least once
751 so that is not a problem. */
752
753 for (p = next_insn_in_loop (loop, loop->scan_start);
754 p != NULL_RTX;
755 p = next_insn_in_loop (loop, p))
756 {
757 if (GET_CODE (p) == INSN
758 && (set = single_set (p))
759 && GET_CODE (SET_DEST (set)) == REG
760 #ifdef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
761 && SET_DEST (set) != pic_offset_table_rtx
762 #endif
763 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
764 {
765 int tem1 = 0;
766 int tem2 = 0;
767 int move_insn = 0;
768 rtx src = SET_SRC (set);
769 rtx dependencies = 0;
770
771 /* Figure out what to use as a source of this insn. If a REG_EQUIV
772 note is given or if a REG_EQUAL note with a constant operand is
773 specified, use it as the source and mark that we should move
774 this insn by calling emit_move_insn rather that duplicating the
775 insn.
776
777 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL note
778 is present. */
779 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
780 if (temp)
781 src = XEXP (temp, 0), move_insn = 1;
782 else
783 {
784 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
785 if (temp && CONSTANT_P (XEXP (temp, 0)))
786 src = XEXP (temp, 0), move_insn = 1;
787 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
788 {
789 src = XEXP (temp, 0);
790 /* A libcall block can use regs that don't appear in
791 the equivalent expression. To move the libcall,
792 we must move those regs too. */
793 dependencies = libcall_other_reg (p, src);
794 }
795 }
796
797 /* For parallels, add any possible uses to the depencies, as we can't move
798 the insn without resolving them first. */
799 if (GET_CODE (PATTERN (p)) == PARALLEL)
800 {
801 for (i = 0; i < XVECLEN (PATTERN (p), 0); i++)
802 {
803 rtx x = XVECEXP (PATTERN (p), 0, i);
804 if (GET_CODE (x) == USE)
805 dependencies = gen_rtx_EXPR_LIST (VOIDmode, XEXP (x, 0), dependencies);
806 }
807 }
808
809 /* Don't try to optimize a register that was made
810 by loop-optimization for an inner loop.
811 We don't know its life-span, so we can't compute the benefit. */
812 if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
813 ;
814 else if (/* The register is used in basic blocks other
815 than the one where it is set (meaning that
816 something after this point in the loop might
817 depend on its value before the set). */
818 ! reg_in_basic_block_p (p, SET_DEST (set))
819 /* And the set is not guaranteed to be executed once
820 the loop starts, or the value before the set is
821 needed before the set occurs...
822
823 ??? Note we have quadratic behaviour here, mitigated
824 by the fact that the previous test will often fail for
825 large loops. Rather than re-scanning the entire loop
826 each time for register usage, we should build tables
827 of the register usage and use them here instead. */
828 && (maybe_never
829 || loop_reg_used_before_p (loop, set, p)))
830 /* It is unsafe to move the set.
831
832 This code used to consider it OK to move a set of a variable
833 which was not created by the user and not used in an exit test.
834 That behavior is incorrect and was removed. */
835 ;
836 else if ((tem = loop_invariant_p (loop, src))
837 && (dependencies == 0
838 || (tem2 = loop_invariant_p (loop, dependencies)) != 0)
839 && (regs->array[REGNO (SET_DEST (set))].set_in_loop == 1
840 || (tem1
841 = consec_sets_invariant_p
842 (loop, SET_DEST (set),
843 regs->array[REGNO (SET_DEST (set))].set_in_loop,
844 p)))
845 /* If the insn can cause a trap (such as divide by zero),
846 can't move it unless it's guaranteed to be executed
847 once loop is entered. Even a function call might
848 prevent the trap insn from being reached
849 (since it might exit!) */
850 && ! ((maybe_never || call_passed)
851 && may_trap_p (src)))
852 {
853 struct movable *m;
854 int regno = REGNO (SET_DEST (set));
855
856 /* A potential lossage is where we have a case where two insns
857 can be combined as long as they are both in the loop, but
858 we move one of them outside the loop. For large loops,
859 this can lose. The most common case of this is the address
860 of a function being called.
861
862 Therefore, if this register is marked as being used exactly
863 once if we are in a loop with calls (a "large loop"), see if
864 we can replace the usage of this register with the source
865 of this SET. If we can, delete this insn.
866
867 Don't do this if P has a REG_RETVAL note or if we have
868 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
869
870 if (loop_info->has_call
871 && regs->array[regno].single_usage != 0
872 && regs->array[regno].single_usage != const0_rtx
873 && REGNO_FIRST_UID (regno) == INSN_UID (p)
874 && (REGNO_LAST_UID (regno)
875 == INSN_UID (regs->array[regno].single_usage))
876 && regs->array[regno].set_in_loop == 1
877 && GET_CODE (SET_SRC (set)) != ASM_OPERANDS
878 && ! side_effects_p (SET_SRC (set))
879 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
880 && (! SMALL_REGISTER_CLASSES
881 || (! (GET_CODE (SET_SRC (set)) == REG
882 && REGNO (SET_SRC (set)) < FIRST_PSEUDO_REGISTER)))
883 /* This test is not redundant; SET_SRC (set) might be
884 a call-clobbered register and the life of REGNO
885 might span a call. */
886 && ! modified_between_p (SET_SRC (set), p,
887 regs->array[regno].single_usage)
888 && no_labels_between_p (p, regs->array[regno].single_usage)
889 && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
890 regs->array[regno].single_usage))
891 {
892 /* Replace any usage in a REG_EQUAL note. Must copy the
893 new source, so that we don't get rtx sharing between the
894 SET_SOURCE and REG_NOTES of insn p. */
895 REG_NOTES (regs->array[regno].single_usage)
896 = replace_rtx (REG_NOTES (regs->array[regno].single_usage),
897 SET_DEST (set), copy_rtx (SET_SRC (set)));
898
899 delete_insn (p);
900 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++)
901 regs->array[regno+i].set_in_loop = 0;
902 continue;
903 }
904
905 m = (struct movable *) xmalloc (sizeof (struct movable));
906 m->next = 0;
907 m->insn = p;
908 m->set_src = src;
909 m->dependencies = dependencies;
910 m->set_dest = SET_DEST (set);
911 m->force = 0;
912 m->consec = regs->array[REGNO (SET_DEST (set))].set_in_loop - 1;
913 m->done = 0;
914 m->forces = 0;
915 m->partial = 0;
916 m->move_insn = move_insn;
917 m->move_insn_first = 0;
918 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
919 m->savemode = VOIDmode;
920 m->regno = regno;
921 /* Set M->cond if either loop_invariant_p
922 or consec_sets_invariant_p returned 2
923 (only conditionally invariant). */
924 m->cond = ((tem | tem1 | tem2) > 1);
925 m->global = LOOP_REG_GLOBAL_P (loop, regno);
926 m->match = 0;
927 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
928 m->savings = regs->array[regno].n_times_set;
929 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
930 m->savings += libcall_benefit (p);
931 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++)
932 regs->array[regno+i].set_in_loop = move_insn ? -2 : -1;
933 /* Add M to the end of the chain MOVABLES. */
934 loop_movables_add (movables, m);
935
936 if (m->consec > 0)
937 {
938 /* It is possible for the first instruction to have a
939 REG_EQUAL note but a non-invariant SET_SRC, so we must
940 remember the status of the first instruction in case
941 the last instruction doesn't have a REG_EQUAL note. */
942 m->move_insn_first = m->move_insn;
943
944 /* Skip this insn, not checking REG_LIBCALL notes. */
945 p = next_nonnote_insn (p);
946 /* Skip the consecutive insns, if there are any. */
947 p = skip_consec_insns (p, m->consec);
948 /* Back up to the last insn of the consecutive group. */
949 p = prev_nonnote_insn (p);
950
951 /* We must now reset m->move_insn, m->is_equiv, and possibly
952 m->set_src to correspond to the effects of all the
953 insns. */
954 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
955 if (temp)
956 m->set_src = XEXP (temp, 0), m->move_insn = 1;
957 else
958 {
959 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
960 if (temp && CONSTANT_P (XEXP (temp, 0)))
961 m->set_src = XEXP (temp, 0), m->move_insn = 1;
962 else
963 m->move_insn = 0;
964
965 }
966 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
967 }
968 }
969 /* If this register is always set within a STRICT_LOW_PART
970 or set to zero, then its high bytes are constant.
971 So clear them outside the loop and within the loop
972 just load the low bytes.
973 We must check that the machine has an instruction to do so.
974 Also, if the value loaded into the register
975 depends on the same register, this cannot be done. */
976 else if (SET_SRC (set) == const0_rtx
977 && GET_CODE (NEXT_INSN (p)) == INSN
978 && (set1 = single_set (NEXT_INSN (p)))
979 && GET_CODE (set1) == SET
980 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
981 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
982 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
983 == SET_DEST (set))
984 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
985 {
986 int regno = REGNO (SET_DEST (set));
987 if (regs->array[regno].set_in_loop == 2)
988 {
989 struct movable *m;
990 m = (struct movable *) xmalloc (sizeof (struct movable));
991 m->next = 0;
992 m->insn = p;
993 m->set_dest = SET_DEST (set);
994 m->dependencies = 0;
995 m->force = 0;
996 m->consec = 0;
997 m->done = 0;
998 m->forces = 0;
999 m->move_insn = 0;
1000 m->move_insn_first = 0;
1001 m->partial = 1;
1002 /* If the insn may not be executed on some cycles,
1003 we can't clear the whole reg; clear just high part.
1004 Not even if the reg is used only within this loop.
1005 Consider this:
1006 while (1)
1007 while (s != t) {
1008 if (foo ()) x = *s;
1009 use (x);
1010 }
1011 Clearing x before the inner loop could clobber a value
1012 being saved from the last time around the outer loop.
1013 However, if the reg is not used outside this loop
1014 and all uses of the register are in the same
1015 basic block as the store, there is no problem.
1016
1017 If this insn was made by loop, we don't know its
1018 INSN_LUID and hence must make a conservative
1019 assumption. */
1020 m->global = (INSN_UID (p) >= max_uid_for_loop
1021 || LOOP_REG_GLOBAL_P (loop, regno)
1022 || (labels_in_range_p
1023 (p, REGNO_FIRST_LUID (regno))));
1024 if (maybe_never && m->global)
1025 m->savemode = GET_MODE (SET_SRC (set1));
1026 else
1027 m->savemode = VOIDmode;
1028 m->regno = regno;
1029 m->cond = 0;
1030 m->match = 0;
1031 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
1032 m->savings = 1;
1033 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++)
1034 regs->array[regno+i].set_in_loop = -1;
1035 /* Add M to the end of the chain MOVABLES. */
1036 loop_movables_add (movables, m);
1037 }
1038 }
1039 }
1040 /* Past a call insn, we get to insns which might not be executed
1041 because the call might exit. This matters for insns that trap.
1042 Constant and pure call insns always return, so they don't count. */
1043 else if (GET_CODE (p) == CALL_INSN && ! CONST_OR_PURE_CALL_P (p))
1044 call_passed = 1;
1045 /* Past a label or a jump, we get to insns for which we
1046 can't count on whether or how many times they will be
1047 executed during each iteration. Therefore, we can
1048 only move out sets of trivial variables
1049 (those not used after the loop). */
1050 /* Similar code appears twice in strength_reduce. */
1051 else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
1052 /* If we enter the loop in the middle, and scan around to the
1053 beginning, don't set maybe_never for that. This must be an
1054 unconditional jump, otherwise the code at the top of the
1055 loop might never be executed. Unconditional jumps are
1056 followed by a barrier then the loop_end. */
1057 && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop->top
1058 && NEXT_INSN (NEXT_INSN (p)) == loop_end
1059 && any_uncondjump_p (p)))
1060 maybe_never = 1;
1061 else if (GET_CODE (p) == NOTE)
1062 {
1063 /* At the virtual top of a converted loop, insns are again known to
1064 be executed: logically, the loop begins here even though the exit
1065 code has been duplicated. */
1066 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
1067 maybe_never = call_passed = 0;
1068 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
1069 loop_depth++;
1070 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
1071 loop_depth--;
1072 }
1073 }
1074
1075 /* If one movable subsumes another, ignore that other. */
1076
1077 ignore_some_movables (movables);
1078
1079 /* For each movable insn, see if the reg that it loads
1080 leads when it dies right into another conditionally movable insn.
1081 If so, record that the second insn "forces" the first one,
1082 since the second can be moved only if the first is. */
1083
1084 force_movables (movables);
1085
1086 /* See if there are multiple movable insns that load the same value.
1087 If there are, make all but the first point at the first one
1088 through the `match' field, and add the priorities of them
1089 all together as the priority of the first. */
1090
1091 combine_movables (movables, regs);
1092
1093 /* Now consider each movable insn to decide whether it is worth moving.
1094 Store 0 in regs->array[I].set_in_loop for each reg I that is moved.
1095
1096 Generally this increases code size, so do not move moveables when
1097 optimizing for code size. */
1098
1099 if (! optimize_size)
1100 {
1101 move_movables (loop, movables, threshold, insn_count);
1102
1103 /* Recalculate regs->array if move_movables has created new
1104 registers. */
1105 if (max_reg_num () > regs->num)
1106 {
1107 loop_regs_scan (loop, 0);
1108 for (update_start = loop_start;
1109 PREV_INSN (update_start)
1110 && GET_CODE (PREV_INSN (update_start)) != CODE_LABEL;
1111 update_start = PREV_INSN (update_start))
1112 ;
1113 update_end = NEXT_INSN (loop_end);
1114
1115 reg_scan_update (update_start, update_end, loop_max_reg);
1116 loop_max_reg = max_reg_num ();
1117 }
1118 }
1119
1120 /* Now candidates that still are negative are those not moved.
1121 Change regs->array[I].set_in_loop to indicate that those are not actually
1122 invariant. */
1123 for (i = 0; i < regs->num; i++)
1124 if (regs->array[i].set_in_loop < 0)
1125 regs->array[i].set_in_loop = regs->array[i].n_times_set;
1126
1127 /* Now that we've moved some things out of the loop, we might be able to
1128 hoist even more memory references. */
1129 load_mems (loop);
1130
1131 /* Recalculate regs->array if load_mems has created new registers. */
1132 if (max_reg_num () > regs->num)
1133 loop_regs_scan (loop, 0);
1134
1135 for (update_start = loop_start;
1136 PREV_INSN (update_start)
1137 && GET_CODE (PREV_INSN (update_start)) != CODE_LABEL;
1138 update_start = PREV_INSN (update_start))
1139 ;
1140 update_end = NEXT_INSN (loop_end);
1141
1142 reg_scan_update (update_start, update_end, loop_max_reg);
1143 loop_max_reg = max_reg_num ();
1144
1145 if (flag_strength_reduce)
1146 {
1147 if (update_end && GET_CODE (update_end) == CODE_LABEL)
1148 /* Ensure our label doesn't go away. */
1149 LABEL_NUSES (update_end)++;
1150
1151 strength_reduce (loop, flags);
1152
1153 reg_scan_update (update_start, update_end, loop_max_reg);
1154 loop_max_reg = max_reg_num ();
1155
1156 if (update_end && GET_CODE (update_end) == CODE_LABEL
1157 && --LABEL_NUSES (update_end) == 0)
1158 delete_related_insns (update_end);
1159 }
1160
1161
1162 /* The movable information is required for strength reduction. */
1163 loop_movables_free (movables);
1164
1165 free (regs->array);
1166 regs->array = 0;
1167 regs->num = 0;
1168 }
1169 \f
1170 /* Add elements to *OUTPUT to record all the pseudo-regs
1171 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1172
1173 void
1174 record_excess_regs (in_this, not_in_this, output)
1175 rtx in_this, not_in_this;
1176 rtx *output;
1177 {
1178 enum rtx_code code;
1179 const char *fmt;
1180 int i;
1181
1182 code = GET_CODE (in_this);
1183
1184 switch (code)
1185 {
1186 case PC:
1187 case CC0:
1188 case CONST_INT:
1189 case CONST_DOUBLE:
1190 case CONST:
1191 case SYMBOL_REF:
1192 case LABEL_REF:
1193 return;
1194
1195 case REG:
1196 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
1197 && ! reg_mentioned_p (in_this, not_in_this))
1198 *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output);
1199 return;
1200
1201 default:
1202 break;
1203 }
1204
1205 fmt = GET_RTX_FORMAT (code);
1206 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1207 {
1208 int j;
1209
1210 switch (fmt[i])
1211 {
1212 case 'E':
1213 for (j = 0; j < XVECLEN (in_this, i); j++)
1214 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1215 break;
1216
1217 case 'e':
1218 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1219 break;
1220 }
1221 }
1222 }
1223 \f
1224 /* Check what regs are referred to in the libcall block ending with INSN,
1225 aside from those mentioned in the equivalent value.
1226 If there are none, return 0.
1227 If there are one or more, return an EXPR_LIST containing all of them. */
1228
1229 rtx
1230 libcall_other_reg (insn, equiv)
1231 rtx insn, equiv;
1232 {
1233 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
1234 rtx p = XEXP (note, 0);
1235 rtx output = 0;
1236
1237 /* First, find all the regs used in the libcall block
1238 that are not mentioned as inputs to the result. */
1239
1240 while (p != insn)
1241 {
1242 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
1243 || GET_CODE (p) == CALL_INSN)
1244 record_excess_regs (PATTERN (p), equiv, &output);
1245 p = NEXT_INSN (p);
1246 }
1247
1248 return output;
1249 }
1250 \f
1251 /* Return 1 if all uses of REG
1252 are between INSN and the end of the basic block. */
1253
1254 static int
1255 reg_in_basic_block_p (insn, reg)
1256 rtx insn, reg;
1257 {
1258 int regno = REGNO (reg);
1259 rtx p;
1260
1261 if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
1262 return 0;
1263
1264 /* Search this basic block for the already recorded last use of the reg. */
1265 for (p = insn; p; p = NEXT_INSN (p))
1266 {
1267 switch (GET_CODE (p))
1268 {
1269 case NOTE:
1270 break;
1271
1272 case INSN:
1273 case CALL_INSN:
1274 /* Ordinary insn: if this is the last use, we win. */
1275 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1276 return 1;
1277 break;
1278
1279 case JUMP_INSN:
1280 /* Jump insn: if this is the last use, we win. */
1281 if (REGNO_LAST_UID (regno) == INSN_UID (p))
1282 return 1;
1283 /* Otherwise, it's the end of the basic block, so we lose. */
1284 return 0;
1285
1286 case CODE_LABEL:
1287 case BARRIER:
1288 /* It's the end of the basic block, so we lose. */
1289 return 0;
1290
1291 default:
1292 break;
1293 }
1294 }
1295
1296 /* The "last use" that was recorded can't be found after the first
1297 use. This can happen when the last use was deleted while
1298 processing an inner loop, this inner loop was then completely
1299 unrolled, and the outer loop is always exited after the inner loop,
1300 so that everything after the first use becomes a single basic block. */
1301 return 1;
1302 }
1303 \f
1304 /* Compute the benefit of eliminating the insns in the block whose
1305 last insn is LAST. This may be a group of insns used to compute a
1306 value directly or can contain a library call. */
1307
1308 static int
1309 libcall_benefit (last)
1310 rtx last;
1311 {
1312 rtx insn;
1313 int benefit = 0;
1314
1315 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
1316 insn != last; insn = NEXT_INSN (insn))
1317 {
1318 if (GET_CODE (insn) == CALL_INSN)
1319 benefit += 10; /* Assume at least this many insns in a library
1320 routine. */
1321 else if (GET_CODE (insn) == INSN
1322 && GET_CODE (PATTERN (insn)) != USE
1323 && GET_CODE (PATTERN (insn)) != CLOBBER)
1324 benefit++;
1325 }
1326
1327 return benefit;
1328 }
1329 \f
1330 /* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1331
1332 static rtx
1333 skip_consec_insns (insn, count)
1334 rtx insn;
1335 int count;
1336 {
1337 for (; count > 0; count--)
1338 {
1339 rtx temp;
1340
1341 /* If first insn of libcall sequence, skip to end. */
1342 /* Do this at start of loop, since INSN is guaranteed to
1343 be an insn here. */
1344 if (GET_CODE (insn) != NOTE
1345 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
1346 insn = XEXP (temp, 0);
1347
1348 do
1349 insn = NEXT_INSN (insn);
1350 while (GET_CODE (insn) == NOTE);
1351 }
1352
1353 return insn;
1354 }
1355
1356 /* Ignore any movable whose insn falls within a libcall
1357 which is part of another movable.
1358 We make use of the fact that the movable for the libcall value
1359 was made later and so appears later on the chain. */
1360
1361 static void
1362 ignore_some_movables (movables)
1363 struct loop_movables *movables;
1364 {
1365 struct movable *m, *m1;
1366
1367 for (m = movables->head; m; m = m->next)
1368 {
1369 /* Is this a movable for the value of a libcall? */
1370 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
1371 if (note)
1372 {
1373 rtx insn;
1374 /* Check for earlier movables inside that range,
1375 and mark them invalid. We cannot use LUIDs here because
1376 insns created by loop.c for prior loops don't have LUIDs.
1377 Rather than reject all such insns from movables, we just
1378 explicitly check each insn in the libcall (since invariant
1379 libcalls aren't that common). */
1380 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1381 for (m1 = movables->head; m1 != m; m1 = m1->next)
1382 if (m1->insn == insn)
1383 m1->done = 1;
1384 }
1385 }
1386 }
1387
1388 /* For each movable insn, see if the reg that it loads
1389 leads when it dies right into another conditionally movable insn.
1390 If so, record that the second insn "forces" the first one,
1391 since the second can be moved only if the first is. */
1392
1393 static void
1394 force_movables (movables)
1395 struct loop_movables *movables;
1396 {
1397 struct movable *m, *m1;
1398
1399 for (m1 = movables->head; m1; m1 = m1->next)
1400 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1401 if (!m1->partial && !m1->done)
1402 {
1403 int regno = m1->regno;
1404 for (m = m1->next; m; m = m->next)
1405 /* ??? Could this be a bug? What if CSE caused the
1406 register of M1 to be used after this insn?
1407 Since CSE does not update regno_last_uid,
1408 this insn M->insn might not be where it dies.
1409 But very likely this doesn't matter; what matters is
1410 that M's reg is computed from M1's reg. */
1411 if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
1412 && !m->done)
1413 break;
1414 if (m != 0 && m->set_src == m1->set_dest
1415 /* If m->consec, m->set_src isn't valid. */
1416 && m->consec == 0)
1417 m = 0;
1418
1419 /* Increase the priority of the moving the first insn
1420 since it permits the second to be moved as well. */
1421 if (m != 0)
1422 {
1423 m->forces = m1;
1424 m1->lifetime += m->lifetime;
1425 m1->savings += m->savings;
1426 }
1427 }
1428 }
1429 \f
1430 /* Find invariant expressions that are equal and can be combined into
1431 one register. */
1432
1433 static void
1434 combine_movables (movables, regs)
1435 struct loop_movables *movables;
1436 struct loop_regs *regs;
1437 {
1438 struct movable *m;
1439 char *matched_regs = (char *) xmalloc (regs->num);
1440 enum machine_mode mode;
1441
1442 /* Regs that are set more than once are not allowed to match
1443 or be matched. I'm no longer sure why not. */
1444 /* Only pseudo registers are allowed to match or be matched,
1445 since move_movables does not validate the change. */
1446 /* Perhaps testing m->consec_sets would be more appropriate here? */
1447
1448 for (m = movables->head; m; m = m->next)
1449 if (m->match == 0 && regs->array[m->regno].n_times_set == 1
1450 && m->regno >= FIRST_PSEUDO_REGISTER
1451 && !m->partial)
1452 {
1453 struct movable *m1;
1454 int regno = m->regno;
1455
1456 memset (matched_regs, 0, regs->num);
1457 matched_regs[regno] = 1;
1458
1459 /* We want later insns to match the first one. Don't make the first
1460 one match any later ones. So start this loop at m->next. */
1461 for (m1 = m->next; m1; m1 = m1->next)
1462 if (m != m1 && m1->match == 0
1463 && regs->array[m1->regno].n_times_set == 1
1464 && m1->regno >= FIRST_PSEUDO_REGISTER
1465 /* A reg used outside the loop mustn't be eliminated. */
1466 && !m1->global
1467 /* A reg used for zero-extending mustn't be eliminated. */
1468 && !m1->partial
1469 && (matched_regs[m1->regno]
1470 ||
1471 (
1472 /* Can combine regs with different modes loaded from the
1473 same constant only if the modes are the same or
1474 if both are integer modes with M wider or the same
1475 width as M1. The check for integer is redundant, but
1476 safe, since the only case of differing destination
1477 modes with equal sources is when both sources are
1478 VOIDmode, i.e., CONST_INT. */
1479 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1480 || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
1481 && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
1482 && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
1483 >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
1484 /* See if the source of M1 says it matches M. */
1485 && ((GET_CODE (m1->set_src) == REG
1486 && matched_regs[REGNO (m1->set_src)])
1487 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1488 movables, regs))))
1489 && ((m->dependencies == m1->dependencies)
1490 || rtx_equal_p (m->dependencies, m1->dependencies)))
1491 {
1492 m->lifetime += m1->lifetime;
1493 m->savings += m1->savings;
1494 m1->done = 1;
1495 m1->match = m;
1496 matched_regs[m1->regno] = 1;
1497 }
1498 }
1499
1500 /* Now combine the regs used for zero-extension.
1501 This can be done for those not marked `global'
1502 provided their lives don't overlap. */
1503
1504 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1505 mode = GET_MODE_WIDER_MODE (mode))
1506 {
1507 struct movable *m0 = 0;
1508
1509 /* Combine all the registers for extension from mode MODE.
1510 Don't combine any that are used outside this loop. */
1511 for (m = movables->head; m; m = m->next)
1512 if (m->partial && ! m->global
1513 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1514 {
1515 struct movable *m1;
1516
1517 int first = REGNO_FIRST_LUID (m->regno);
1518 int last = REGNO_LAST_LUID (m->regno);
1519
1520 if (m0 == 0)
1521 {
1522 /* First one: don't check for overlap, just record it. */
1523 m0 = m;
1524 continue;
1525 }
1526
1527 /* Make sure they extend to the same mode.
1528 (Almost always true.) */
1529 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1530 continue;
1531
1532 /* We already have one: check for overlap with those
1533 already combined together. */
1534 for (m1 = movables->head; m1 != m; m1 = m1->next)
1535 if (m1 == m0 || (m1->partial && m1->match == m0))
1536 if (! (REGNO_FIRST_LUID (m1->regno) > last
1537 || REGNO_LAST_LUID (m1->regno) < first))
1538 goto overlap;
1539
1540 /* No overlap: we can combine this with the others. */
1541 m0->lifetime += m->lifetime;
1542 m0->savings += m->savings;
1543 m->done = 1;
1544 m->match = m0;
1545
1546 overlap:
1547 ;
1548 }
1549 }
1550
1551 /* Clean up. */
1552 free (matched_regs);
1553 }
1554
1555 /* Returns the number of movable instructions in LOOP that were not
1556 moved outside the loop. */
1557
1558 static int
1559 num_unmoved_movables (loop)
1560 const struct loop *loop;
1561 {
1562 int num = 0;
1563 struct movable *m;
1564
1565 for (m = LOOP_MOVABLES (loop)->head; m; m = m->next)
1566 if (!m->done)
1567 ++num;
1568
1569 return num;
1570 }
1571
1572 \f
1573 /* Return 1 if regs X and Y will become the same if moved. */
1574
1575 static int
1576 regs_match_p (x, y, movables)
1577 rtx x, y;
1578 struct loop_movables *movables;
1579 {
1580 unsigned int xn = REGNO (x);
1581 unsigned int yn = REGNO (y);
1582 struct movable *mx, *my;
1583
1584 for (mx = movables->head; mx; mx = mx->next)
1585 if (mx->regno == xn)
1586 break;
1587
1588 for (my = movables->head; my; my = my->next)
1589 if (my->regno == yn)
1590 break;
1591
1592 return (mx && my
1593 && ((mx->match == my->match && mx->match != 0)
1594 || mx->match == my
1595 || mx == my->match));
1596 }
1597
1598 /* Return 1 if X and Y are identical-looking rtx's.
1599 This is the Lisp function EQUAL for rtx arguments.
1600
1601 If two registers are matching movables or a movable register and an
1602 equivalent constant, consider them equal. */
1603
1604 static int
1605 rtx_equal_for_loop_p (x, y, movables, regs)
1606 rtx x, y;
1607 struct loop_movables *movables;
1608 struct loop_regs *regs;
1609 {
1610 int i;
1611 int j;
1612 struct movable *m;
1613 enum rtx_code code;
1614 const char *fmt;
1615
1616 if (x == y)
1617 return 1;
1618 if (x == 0 || y == 0)
1619 return 0;
1620
1621 code = GET_CODE (x);
1622
1623 /* If we have a register and a constant, they may sometimes be
1624 equal. */
1625 if (GET_CODE (x) == REG && regs->array[REGNO (x)].set_in_loop == -2
1626 && CONSTANT_P (y))
1627 {
1628 for (m = movables->head; m; m = m->next)
1629 if (m->move_insn && m->regno == REGNO (x)
1630 && rtx_equal_p (m->set_src, y))
1631 return 1;
1632 }
1633 else if (GET_CODE (y) == REG && regs->array[REGNO (y)].set_in_loop == -2
1634 && CONSTANT_P (x))
1635 {
1636 for (m = movables->head; m; m = m->next)
1637 if (m->move_insn && m->regno == REGNO (y)
1638 && rtx_equal_p (m->set_src, x))
1639 return 1;
1640 }
1641
1642 /* Otherwise, rtx's of different codes cannot be equal. */
1643 if (code != GET_CODE (y))
1644 return 0;
1645
1646 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1647 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1648
1649 if (GET_MODE (x) != GET_MODE (y))
1650 return 0;
1651
1652 /* These three types of rtx's can be compared nonrecursively. */
1653 if (code == REG)
1654 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
1655
1656 if (code == LABEL_REF)
1657 return XEXP (x, 0) == XEXP (y, 0);
1658 if (code == SYMBOL_REF)
1659 return XSTR (x, 0) == XSTR (y, 0);
1660
1661 /* Compare the elements. If any pair of corresponding elements
1662 fail to match, return 0 for the whole things. */
1663
1664 fmt = GET_RTX_FORMAT (code);
1665 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1666 {
1667 switch (fmt[i])
1668 {
1669 case 'w':
1670 if (XWINT (x, i) != XWINT (y, i))
1671 return 0;
1672 break;
1673
1674 case 'i':
1675 if (XINT (x, i) != XINT (y, i))
1676 return 0;
1677 break;
1678
1679 case 'E':
1680 /* Two vectors must have the same length. */
1681 if (XVECLEN (x, i) != XVECLEN (y, i))
1682 return 0;
1683
1684 /* And the corresponding elements must match. */
1685 for (j = 0; j < XVECLEN (x, i); j++)
1686 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j),
1687 movables, regs) == 0)
1688 return 0;
1689 break;
1690
1691 case 'e':
1692 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables, regs)
1693 == 0)
1694 return 0;
1695 break;
1696
1697 case 's':
1698 if (strcmp (XSTR (x, i), XSTR (y, i)))
1699 return 0;
1700 break;
1701
1702 case 'u':
1703 /* These are just backpointers, so they don't matter. */
1704 break;
1705
1706 case '0':
1707 break;
1708
1709 /* It is believed that rtx's at this level will never
1710 contain anything but integers and other rtx's,
1711 except for within LABEL_REFs and SYMBOL_REFs. */
1712 default:
1713 abort ();
1714 }
1715 }
1716 return 1;
1717 }
1718 \f
1719 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
1720 insns in INSNS which use the reference. LABEL_NUSES for CODE_LABEL
1721 references is incremented once for each added note. */
1722
1723 static void
1724 add_label_notes (x, insns)
1725 rtx x;
1726 rtx insns;
1727 {
1728 enum rtx_code code = GET_CODE (x);
1729 int i, j;
1730 const char *fmt;
1731 rtx insn;
1732
1733 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
1734 {
1735 /* This code used to ignore labels that referred to dispatch tables to
1736 avoid flow generating (slighly) worse code.
1737
1738 We no longer ignore such label references (see LABEL_REF handling in
1739 mark_jump_label for additional information). */
1740 for (insn = insns; insn; insn = NEXT_INSN (insn))
1741 if (reg_mentioned_p (XEXP (x, 0), insn))
1742 {
1743 REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL, XEXP (x, 0),
1744 REG_NOTES (insn));
1745 if (LABEL_P (XEXP (x, 0)))
1746 LABEL_NUSES (XEXP (x, 0))++;
1747 }
1748 }
1749
1750 fmt = GET_RTX_FORMAT (code);
1751 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1752 {
1753 if (fmt[i] == 'e')
1754 add_label_notes (XEXP (x, i), insns);
1755 else if (fmt[i] == 'E')
1756 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1757 add_label_notes (XVECEXP (x, i, j), insns);
1758 }
1759 }
1760 \f
1761 /* Scan MOVABLES, and move the insns that deserve to be moved.
1762 If two matching movables are combined, replace one reg with the
1763 other throughout. */
1764
1765 static void
1766 move_movables (loop, movables, threshold, insn_count)
1767 struct loop *loop;
1768 struct loop_movables *movables;
1769 int threshold;
1770 int insn_count;
1771 {
1772 struct loop_regs *regs = LOOP_REGS (loop);
1773 int nregs = regs->num;
1774 rtx new_start = 0;
1775 struct movable *m;
1776 rtx p;
1777 rtx loop_start = loop->start;
1778 rtx loop_end = loop->end;
1779 /* Map of pseudo-register replacements to handle combining
1780 when we move several insns that load the same value
1781 into different pseudo-registers. */
1782 rtx *reg_map = (rtx *) xcalloc (nregs, sizeof (rtx));
1783 char *already_moved = (char *) xcalloc (nregs, sizeof (char));
1784
1785 for (m = movables->head; m; m = m->next)
1786 {
1787 /* Describe this movable insn. */
1788
1789 if (loop_dump_stream)
1790 {
1791 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
1792 INSN_UID (m->insn), m->regno, m->lifetime);
1793 if (m->consec > 0)
1794 fprintf (loop_dump_stream, "consec %d, ", m->consec);
1795 if (m->cond)
1796 fprintf (loop_dump_stream, "cond ");
1797 if (m->force)
1798 fprintf (loop_dump_stream, "force ");
1799 if (m->global)
1800 fprintf (loop_dump_stream, "global ");
1801 if (m->done)
1802 fprintf (loop_dump_stream, "done ");
1803 if (m->move_insn)
1804 fprintf (loop_dump_stream, "move-insn ");
1805 if (m->match)
1806 fprintf (loop_dump_stream, "matches %d ",
1807 INSN_UID (m->match->insn));
1808 if (m->forces)
1809 fprintf (loop_dump_stream, "forces %d ",
1810 INSN_UID (m->forces->insn));
1811 }
1812
1813 /* Ignore the insn if it's already done (it matched something else).
1814 Otherwise, see if it is now safe to move. */
1815
1816 if (!m->done
1817 && (! m->cond
1818 || (1 == loop_invariant_p (loop, m->set_src)
1819 && (m->dependencies == 0
1820 || 1 == loop_invariant_p (loop, m->dependencies))
1821 && (m->consec == 0
1822 || 1 == consec_sets_invariant_p (loop, m->set_dest,
1823 m->consec + 1,
1824 m->insn))))
1825 && (! m->forces || m->forces->done))
1826 {
1827 int regno;
1828 rtx p;
1829 int savings = m->savings;
1830
1831 /* We have an insn that is safe to move.
1832 Compute its desirability. */
1833
1834 p = m->insn;
1835 regno = m->regno;
1836
1837 if (loop_dump_stream)
1838 fprintf (loop_dump_stream, "savings %d ", savings);
1839
1840 if (regs->array[regno].moved_once && loop_dump_stream)
1841 fprintf (loop_dump_stream, "halved since already moved ");
1842
1843 /* An insn MUST be moved if we already moved something else
1844 which is safe only if this one is moved too: that is,
1845 if already_moved[REGNO] is nonzero. */
1846
1847 /* An insn is desirable to move if the new lifetime of the
1848 register is no more than THRESHOLD times the old lifetime.
1849 If it's not desirable, it means the loop is so big
1850 that moving won't speed things up much,
1851 and it is liable to make register usage worse. */
1852
1853 /* It is also desirable to move if it can be moved at no
1854 extra cost because something else was already moved. */
1855
1856 if (already_moved[regno]
1857 || flag_move_all_movables
1858 || (threshold * savings * m->lifetime) >=
1859 (regs->array[regno].moved_once ? insn_count * 2 : insn_count)
1860 || (m->forces && m->forces->done
1861 && regs->array[m->forces->regno].n_times_set == 1))
1862 {
1863 int count;
1864 struct movable *m1;
1865 rtx first = NULL_RTX;
1866
1867 /* Now move the insns that set the reg. */
1868
1869 if (m->partial && m->match)
1870 {
1871 rtx newpat, i1;
1872 rtx r1, r2;
1873 /* Find the end of this chain of matching regs.
1874 Thus, we load each reg in the chain from that one reg.
1875 And that reg is loaded with 0 directly,
1876 since it has ->match == 0. */
1877 for (m1 = m; m1->match; m1 = m1->match);
1878 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
1879 SET_DEST (PATTERN (m1->insn)));
1880 i1 = loop_insn_hoist (loop, newpat);
1881
1882 /* Mark the moved, invariant reg as being allowed to
1883 share a hard reg with the other matching invariant. */
1884 REG_NOTES (i1) = REG_NOTES (m->insn);
1885 r1 = SET_DEST (PATTERN (m->insn));
1886 r2 = SET_DEST (PATTERN (m1->insn));
1887 regs_may_share
1888 = gen_rtx_EXPR_LIST (VOIDmode, r1,
1889 gen_rtx_EXPR_LIST (VOIDmode, r2,
1890 regs_may_share));
1891 delete_insn (m->insn);
1892
1893 if (new_start == 0)
1894 new_start = i1;
1895
1896 if (loop_dump_stream)
1897 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1898 }
1899 /* If we are to re-generate the item being moved with a
1900 new move insn, first delete what we have and then emit
1901 the move insn before the loop. */
1902 else if (m->move_insn)
1903 {
1904 rtx i1, temp, seq;
1905
1906 for (count = m->consec; count >= 0; count--)
1907 {
1908 /* If this is the first insn of a library call sequence,
1909 skip to the end. */
1910 if (GET_CODE (p) != NOTE
1911 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1912 p = XEXP (temp, 0);
1913
1914 /* If this is the last insn of a libcall sequence, then
1915 delete every insn in the sequence except the last.
1916 The last insn is handled in the normal manner. */
1917 if (GET_CODE (p) != NOTE
1918 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1919 {
1920 temp = XEXP (temp, 0);
1921 while (temp != p)
1922 temp = delete_insn (temp);
1923 }
1924
1925 temp = p;
1926 p = delete_insn (p);
1927
1928 /* simplify_giv_expr expects that it can walk the insns
1929 at m->insn forwards and see this old sequence we are
1930 tossing here. delete_insn does preserve the next
1931 pointers, but when we skip over a NOTE we must fix
1932 it up. Otherwise that code walks into the non-deleted
1933 insn stream. */
1934 while (p && GET_CODE (p) == NOTE)
1935 p = NEXT_INSN (temp) = NEXT_INSN (p);
1936 }
1937
1938 start_sequence ();
1939 emit_move_insn (m->set_dest, m->set_src);
1940 seq = get_insns ();
1941 end_sequence ();
1942
1943 add_label_notes (m->set_src, seq);
1944
1945 i1 = loop_insn_hoist (loop, seq);
1946 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
1947 set_unique_reg_note (i1,
1948 m->is_equiv ? REG_EQUIV : REG_EQUAL,
1949 m->set_src);
1950
1951 if (loop_dump_stream)
1952 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1953
1954 /* The more regs we move, the less we like moving them. */
1955 threshold -= 3;
1956 }
1957 else
1958 {
1959 for (count = m->consec; count >= 0; count--)
1960 {
1961 rtx i1, temp;
1962
1963 /* If first insn of libcall sequence, skip to end. */
1964 /* Do this at start of loop, since p is guaranteed to
1965 be an insn here. */
1966 if (GET_CODE (p) != NOTE
1967 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
1968 p = XEXP (temp, 0);
1969
1970 /* If last insn of libcall sequence, move all
1971 insns except the last before the loop. The last
1972 insn is handled in the normal manner. */
1973 if (GET_CODE (p) != NOTE
1974 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
1975 {
1976 rtx fn_address = 0;
1977 rtx fn_reg = 0;
1978 rtx fn_address_insn = 0;
1979
1980 first = 0;
1981 for (temp = XEXP (temp, 0); temp != p;
1982 temp = NEXT_INSN (temp))
1983 {
1984 rtx body;
1985 rtx n;
1986 rtx next;
1987
1988 if (GET_CODE (temp) == NOTE)
1989 continue;
1990
1991 body = PATTERN (temp);
1992
1993 /* Find the next insn after TEMP,
1994 not counting USE or NOTE insns. */
1995 for (next = NEXT_INSN (temp); next != p;
1996 next = NEXT_INSN (next))
1997 if (! (GET_CODE (next) == INSN
1998 && GET_CODE (PATTERN (next)) == USE)
1999 && GET_CODE (next) != NOTE)
2000 break;
2001
2002 /* If that is the call, this may be the insn
2003 that loads the function address.
2004
2005 Extract the function address from the insn
2006 that loads it into a register.
2007 If this insn was cse'd, we get incorrect code.
2008
2009 So emit a new move insn that copies the
2010 function address into the register that the
2011 call insn will use. flow.c will delete any
2012 redundant stores that we have created. */
2013 if (GET_CODE (next) == CALL_INSN
2014 && GET_CODE (body) == SET
2015 && GET_CODE (SET_DEST (body)) == REG
2016 && (n = find_reg_note (temp, REG_EQUAL,
2017 NULL_RTX)))
2018 {
2019 fn_reg = SET_SRC (body);
2020 if (GET_CODE (fn_reg) != REG)
2021 fn_reg = SET_DEST (body);
2022 fn_address = XEXP (n, 0);
2023 fn_address_insn = temp;
2024 }
2025 /* We have the call insn.
2026 If it uses the register we suspect it might,
2027 load it with the correct address directly. */
2028 if (GET_CODE (temp) == CALL_INSN
2029 && fn_address != 0
2030 && reg_referenced_p (fn_reg, body))
2031 loop_insn_emit_after (loop, 0, fn_address_insn,
2032 gen_move_insn
2033 (fn_reg, fn_address));
2034
2035 if (GET_CODE (temp) == CALL_INSN)
2036 {
2037 i1 = loop_call_insn_hoist (loop, body);
2038 /* Because the USAGE information potentially
2039 contains objects other than hard registers
2040 we need to copy it. */
2041 if (CALL_INSN_FUNCTION_USAGE (temp))
2042 CALL_INSN_FUNCTION_USAGE (i1)
2043 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
2044 }
2045 else
2046 i1 = loop_insn_hoist (loop, body);
2047 if (first == 0)
2048 first = i1;
2049 if (temp == fn_address_insn)
2050 fn_address_insn = i1;
2051 REG_NOTES (i1) = REG_NOTES (temp);
2052 REG_NOTES (temp) = NULL;
2053 delete_insn (temp);
2054 }
2055 if (new_start == 0)
2056 new_start = first;
2057 }
2058 if (m->savemode != VOIDmode)
2059 {
2060 /* P sets REG to zero; but we should clear only
2061 the bits that are not covered by the mode
2062 m->savemode. */
2063 rtx reg = m->set_dest;
2064 rtx sequence;
2065 rtx tem;
2066
2067 start_sequence ();
2068 tem = expand_simple_binop
2069 (GET_MODE (reg), AND, reg,
2070 GEN_INT ((((HOST_WIDE_INT) 1
2071 << GET_MODE_BITSIZE (m->savemode)))
2072 - 1),
2073 reg, 1, OPTAB_LIB_WIDEN);
2074 if (tem == 0)
2075 abort ();
2076 if (tem != reg)
2077 emit_move_insn (reg, tem);
2078 sequence = get_insns ();
2079 end_sequence ();
2080 i1 = loop_insn_hoist (loop, sequence);
2081 }
2082 else if (GET_CODE (p) == CALL_INSN)
2083 {
2084 i1 = loop_call_insn_hoist (loop, PATTERN (p));
2085 /* Because the USAGE information potentially
2086 contains objects other than hard registers
2087 we need to copy it. */
2088 if (CALL_INSN_FUNCTION_USAGE (p))
2089 CALL_INSN_FUNCTION_USAGE (i1)
2090 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
2091 }
2092 else if (count == m->consec && m->move_insn_first)
2093 {
2094 rtx seq;
2095 /* The SET_SRC might not be invariant, so we must
2096 use the REG_EQUAL note. */
2097 start_sequence ();
2098 emit_move_insn (m->set_dest, m->set_src);
2099 seq = get_insns ();
2100 end_sequence ();
2101
2102 add_label_notes (m->set_src, seq);
2103
2104 i1 = loop_insn_hoist (loop, seq);
2105 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
2106 set_unique_reg_note (i1, m->is_equiv ? REG_EQUIV
2107 : REG_EQUAL, m->set_src);
2108 }
2109 else
2110 i1 = loop_insn_hoist (loop, PATTERN (p));
2111
2112 if (REG_NOTES (i1) == 0)
2113 {
2114 REG_NOTES (i1) = REG_NOTES (p);
2115 REG_NOTES (p) = NULL;
2116
2117 /* If there is a REG_EQUAL note present whose value
2118 is not loop invariant, then delete it, since it
2119 may cause problems with later optimization passes.
2120 It is possible for cse to create such notes
2121 like this as a result of record_jump_cond. */
2122
2123 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
2124 && ! loop_invariant_p (loop, XEXP (temp, 0)))
2125 remove_note (i1, temp);
2126 }
2127
2128 if (new_start == 0)
2129 new_start = i1;
2130
2131 if (loop_dump_stream)
2132 fprintf (loop_dump_stream, " moved to %d",
2133 INSN_UID (i1));
2134
2135 /* If library call, now fix the REG_NOTES that contain
2136 insn pointers, namely REG_LIBCALL on FIRST
2137 and REG_RETVAL on I1. */
2138 if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX)))
2139 {
2140 XEXP (temp, 0) = first;
2141 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
2142 XEXP (temp, 0) = i1;
2143 }
2144
2145 temp = p;
2146 delete_insn (p);
2147 p = NEXT_INSN (p);
2148
2149 /* simplify_giv_expr expects that it can walk the insns
2150 at m->insn forwards and see this old sequence we are
2151 tossing here. delete_insn does preserve the next
2152 pointers, but when we skip over a NOTE we must fix
2153 it up. Otherwise that code walks into the non-deleted
2154 insn stream. */
2155 while (p && GET_CODE (p) == NOTE)
2156 p = NEXT_INSN (temp) = NEXT_INSN (p);
2157 }
2158
2159 /* The more regs we move, the less we like moving them. */
2160 threshold -= 3;
2161 }
2162
2163 /* Any other movable that loads the same register
2164 MUST be moved. */
2165 already_moved[regno] = 1;
2166
2167 /* This reg has been moved out of one loop. */
2168 regs->array[regno].moved_once = 1;
2169
2170 /* The reg set here is now invariant. */
2171 if (! m->partial)
2172 {
2173 int i;
2174 for (i = 0; i < LOOP_REGNO_NREGS (regno, m->set_dest); i++)
2175 regs->array[regno+i].set_in_loop = 0;
2176 }
2177
2178 m->done = 1;
2179
2180 /* Change the length-of-life info for the register
2181 to say it lives at least the full length of this loop.
2182 This will help guide optimizations in outer loops. */
2183
2184 if (REGNO_FIRST_LUID (regno) > INSN_LUID (loop_start))
2185 /* This is the old insn before all the moved insns.
2186 We can't use the moved insn because it is out of range
2187 in uid_luid. Only the old insns have luids. */
2188 REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
2189 if (REGNO_LAST_LUID (regno) < INSN_LUID (loop_end))
2190 REGNO_LAST_UID (regno) = INSN_UID (loop_end);
2191
2192 /* Combine with this moved insn any other matching movables. */
2193
2194 if (! m->partial)
2195 for (m1 = movables->head; m1; m1 = m1->next)
2196 if (m1->match == m)
2197 {
2198 rtx temp;
2199
2200 /* Schedule the reg loaded by M1
2201 for replacement so that shares the reg of M.
2202 If the modes differ (only possible in restricted
2203 circumstances, make a SUBREG.
2204
2205 Note this assumes that the target dependent files
2206 treat REG and SUBREG equally, including within
2207 GO_IF_LEGITIMATE_ADDRESS and in all the
2208 predicates since we never verify that replacing the
2209 original register with a SUBREG results in a
2210 recognizable insn. */
2211 if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
2212 reg_map[m1->regno] = m->set_dest;
2213 else
2214 reg_map[m1->regno]
2215 = gen_lowpart_common (GET_MODE (m1->set_dest),
2216 m->set_dest);
2217
2218 /* Get rid of the matching insn
2219 and prevent further processing of it. */
2220 m1->done = 1;
2221
2222 /* if library call, delete all insns. */
2223 if ((temp = find_reg_note (m1->insn, REG_RETVAL,
2224 NULL_RTX)))
2225 delete_insn_chain (XEXP (temp, 0), m1->insn);
2226 else
2227 delete_insn (m1->insn);
2228
2229 /* Any other movable that loads the same register
2230 MUST be moved. */
2231 already_moved[m1->regno] = 1;
2232
2233 /* The reg merged here is now invariant,
2234 if the reg it matches is invariant. */
2235 if (! m->partial)
2236 {
2237 int i;
2238 for (i = 0;
2239 i < LOOP_REGNO_NREGS (regno, m1->set_dest);
2240 i++)
2241 regs->array[m1->regno+i].set_in_loop = 0;
2242 }
2243 }
2244 }
2245 else if (loop_dump_stream)
2246 fprintf (loop_dump_stream, "not desirable");
2247 }
2248 else if (loop_dump_stream && !m->match)
2249 fprintf (loop_dump_stream, "not safe");
2250
2251 if (loop_dump_stream)
2252 fprintf (loop_dump_stream, "\n");
2253 }
2254
2255 if (new_start == 0)
2256 new_start = loop_start;
2257
2258 /* Go through all the instructions in the loop, making
2259 all the register substitutions scheduled in REG_MAP. */
2260 for (p = new_start; p != loop_end; p = NEXT_INSN (p))
2261 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
2262 || GET_CODE (p) == CALL_INSN)
2263 {
2264 replace_regs (PATTERN (p), reg_map, nregs, 0);
2265 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
2266 INSN_CODE (p) = -1;
2267 }
2268
2269 /* Clean up. */
2270 free (reg_map);
2271 free (already_moved);
2272 }
2273
2274
2275 static void
2276 loop_movables_add (movables, m)
2277 struct loop_movables *movables;
2278 struct movable *m;
2279 {
2280 if (movables->head == 0)
2281 movables->head = m;
2282 else
2283 movables->last->next = m;
2284 movables->last = m;
2285 }
2286
2287
2288 static void
2289 loop_movables_free (movables)
2290 struct loop_movables *movables;
2291 {
2292 struct movable *m;
2293 struct movable *m_next;
2294
2295 for (m = movables->head; m; m = m_next)
2296 {
2297 m_next = m->next;
2298 free (m);
2299 }
2300 }
2301 \f
2302 #if 0
2303 /* Scan X and replace the address of any MEM in it with ADDR.
2304 REG is the address that MEM should have before the replacement. */
2305
2306 static void
2307 replace_call_address (x, reg, addr)
2308 rtx x, reg, addr;
2309 {
2310 enum rtx_code code;
2311 int i;
2312 const char *fmt;
2313
2314 if (x == 0)
2315 return;
2316 code = GET_CODE (x);
2317 switch (code)
2318 {
2319 case PC:
2320 case CC0:
2321 case CONST_INT:
2322 case CONST_DOUBLE:
2323 case CONST:
2324 case SYMBOL_REF:
2325 case LABEL_REF:
2326 case REG:
2327 return;
2328
2329 case SET:
2330 /* Short cut for very common case. */
2331 replace_call_address (XEXP (x, 1), reg, addr);
2332 return;
2333
2334 case CALL:
2335 /* Short cut for very common case. */
2336 replace_call_address (XEXP (x, 0), reg, addr);
2337 return;
2338
2339 case MEM:
2340 /* If this MEM uses a reg other than the one we expected,
2341 something is wrong. */
2342 if (XEXP (x, 0) != reg)
2343 abort ();
2344 XEXP (x, 0) = addr;
2345 return;
2346
2347 default:
2348 break;
2349 }
2350
2351 fmt = GET_RTX_FORMAT (code);
2352 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2353 {
2354 if (fmt[i] == 'e')
2355 replace_call_address (XEXP (x, i), reg, addr);
2356 else if (fmt[i] == 'E')
2357 {
2358 int j;
2359 for (j = 0; j < XVECLEN (x, i); j++)
2360 replace_call_address (XVECEXP (x, i, j), reg, addr);
2361 }
2362 }
2363 }
2364 #endif
2365 \f
2366 /* Return the number of memory refs to addresses that vary
2367 in the rtx X. */
2368
2369 static int
2370 count_nonfixed_reads (loop, x)
2371 const struct loop *loop;
2372 rtx x;
2373 {
2374 enum rtx_code code;
2375 int i;
2376 const char *fmt;
2377 int value;
2378
2379 if (x == 0)
2380 return 0;
2381
2382 code = GET_CODE (x);
2383 switch (code)
2384 {
2385 case PC:
2386 case CC0:
2387 case CONST_INT:
2388 case CONST_DOUBLE:
2389 case CONST:
2390 case SYMBOL_REF:
2391 case LABEL_REF:
2392 case REG:
2393 return 0;
2394
2395 case MEM:
2396 return ((loop_invariant_p (loop, XEXP (x, 0)) != 1)
2397 + count_nonfixed_reads (loop, XEXP (x, 0)));
2398
2399 default:
2400 break;
2401 }
2402
2403 value = 0;
2404 fmt = GET_RTX_FORMAT (code);
2405 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2406 {
2407 if (fmt[i] == 'e')
2408 value += count_nonfixed_reads (loop, XEXP (x, i));
2409 if (fmt[i] == 'E')
2410 {
2411 int j;
2412 for (j = 0; j < XVECLEN (x, i); j++)
2413 value += count_nonfixed_reads (loop, XVECEXP (x, i, j));
2414 }
2415 }
2416 return value;
2417 }
2418 \f
2419 /* Scan a loop setting the elements `cont', `vtop', `loops_enclosed',
2420 `has_call', `has_nonconst_call', `has_volatile', `has_tablejump',
2421 `unknown_address_altered', `unknown_constant_address_altered', and
2422 `num_mem_sets' in LOOP. Also, fill in the array `mems' and the
2423 list `store_mems' in LOOP. */
2424
2425 static void
2426 prescan_loop (loop)
2427 struct loop *loop;
2428 {
2429 int level = 1;
2430 rtx insn;
2431 struct loop_info *loop_info = LOOP_INFO (loop);
2432 rtx start = loop->start;
2433 rtx end = loop->end;
2434 /* The label after END. Jumping here is just like falling off the
2435 end of the loop. We use next_nonnote_insn instead of next_label
2436 as a hedge against the (pathological) case where some actual insn
2437 might end up between the two. */
2438 rtx exit_target = next_nonnote_insn (end);
2439
2440 loop_info->has_indirect_jump = indirect_jump_in_function;
2441 loop_info->pre_header_has_call = 0;
2442 loop_info->has_call = 0;
2443 loop_info->has_nonconst_call = 0;
2444 loop_info->has_prefetch = 0;
2445 loop_info->has_volatile = 0;
2446 loop_info->has_tablejump = 0;
2447 loop_info->has_multiple_exit_targets = 0;
2448 loop->level = 1;
2449
2450 loop_info->unknown_address_altered = 0;
2451 loop_info->unknown_constant_address_altered = 0;
2452 loop_info->store_mems = NULL_RTX;
2453 loop_info->first_loop_store_insn = NULL_RTX;
2454 loop_info->mems_idx = 0;
2455 loop_info->num_mem_sets = 0;
2456
2457
2458 for (insn = start; insn && GET_CODE (insn) != CODE_LABEL;
2459 insn = PREV_INSN (insn))
2460 {
2461 if (GET_CODE (insn) == CALL_INSN)
2462 {
2463 loop_info->pre_header_has_call = 1;
2464 break;
2465 }
2466 }
2467
2468 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2469 insn = NEXT_INSN (insn))
2470 {
2471 switch (GET_CODE (insn))
2472 {
2473 case NOTE:
2474 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2475 {
2476 ++level;
2477 /* Count number of loops contained in this one. */
2478 loop->level++;
2479 }
2480 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2481 --level;
2482 break;
2483
2484 case CALL_INSN:
2485 if (! CONST_OR_PURE_CALL_P (insn))
2486 {
2487 loop_info->unknown_address_altered = 1;
2488 loop_info->has_nonconst_call = 1;
2489 }
2490 else if (pure_call_p (insn))
2491 loop_info->has_nonconst_call = 1;
2492 loop_info->has_call = 1;
2493 if (can_throw_internal (insn))
2494 loop_info->has_multiple_exit_targets = 1;
2495 break;
2496
2497 case JUMP_INSN:
2498 if (! loop_info->has_multiple_exit_targets)
2499 {
2500 rtx set = pc_set (insn);
2501
2502 if (set)
2503 {
2504 rtx src = SET_SRC (set);
2505 rtx label1, label2;
2506
2507 if (GET_CODE (src) == IF_THEN_ELSE)
2508 {
2509 label1 = XEXP (src, 1);
2510 label2 = XEXP (src, 2);
2511 }
2512 else
2513 {
2514 label1 = src;
2515 label2 = NULL_RTX;
2516 }
2517
2518 do
2519 {
2520 if (label1 && label1 != pc_rtx)
2521 {
2522 if (GET_CODE (label1) != LABEL_REF)
2523 {
2524 /* Something tricky. */
2525 loop_info->has_multiple_exit_targets = 1;
2526 break;
2527 }
2528 else if (XEXP (label1, 0) != exit_target
2529 && LABEL_OUTSIDE_LOOP_P (label1))
2530 {
2531 /* A jump outside the current loop. */
2532 loop_info->has_multiple_exit_targets = 1;
2533 break;
2534 }
2535 }
2536
2537 label1 = label2;
2538 label2 = NULL_RTX;
2539 }
2540 while (label1);
2541 }
2542 else
2543 {
2544 /* A return, or something tricky. */
2545 loop_info->has_multiple_exit_targets = 1;
2546 }
2547 }
2548 /* FALLTHRU */
2549
2550 case INSN:
2551 if (volatile_refs_p (PATTERN (insn)))
2552 loop_info->has_volatile = 1;
2553
2554 if (GET_CODE (insn) == JUMP_INSN
2555 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
2556 || GET_CODE (PATTERN (insn)) == ADDR_VEC))
2557 loop_info->has_tablejump = 1;
2558
2559 note_stores (PATTERN (insn), note_addr_stored, loop_info);
2560 if (! loop_info->first_loop_store_insn && loop_info->store_mems)
2561 loop_info->first_loop_store_insn = insn;
2562
2563 if (flag_non_call_exceptions && can_throw_internal (insn))
2564 loop_info->has_multiple_exit_targets = 1;
2565 break;
2566
2567 default:
2568 break;
2569 }
2570 }
2571
2572 /* Now, rescan the loop, setting up the LOOP_MEMS array. */
2573 if (/* An exception thrown by a called function might land us
2574 anywhere. */
2575 ! loop_info->has_nonconst_call
2576 /* We don't want loads for MEMs moved to a location before the
2577 one at which their stack memory becomes allocated. (Note
2578 that this is not a problem for malloc, etc., since those
2579 require actual function calls. */
2580 && ! current_function_calls_alloca
2581 /* There are ways to leave the loop other than falling off the
2582 end. */
2583 && ! loop_info->has_multiple_exit_targets)
2584 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2585 insn = NEXT_INSN (insn))
2586 for_each_rtx (&insn, insert_loop_mem, loop_info);
2587
2588 /* BLKmode MEMs are added to LOOP_STORE_MEM as necessary so
2589 that loop_invariant_p and load_mems can use true_dependence
2590 to determine what is really clobbered. */
2591 if (loop_info->unknown_address_altered)
2592 {
2593 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2594
2595 loop_info->store_mems
2596 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
2597 }
2598 if (loop_info->unknown_constant_address_altered)
2599 {
2600 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2601
2602 RTX_UNCHANGING_P (mem) = 1;
2603 loop_info->store_mems
2604 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
2605 }
2606 }
2607 \f
2608 /* Invalidate all loops containing LABEL. */
2609
2610 static void
2611 invalidate_loops_containing_label (label)
2612 rtx label;
2613 {
2614 struct loop *loop;
2615 for (loop = uid_loop[INSN_UID (label)]; loop; loop = loop->outer)
2616 loop->invalid = 1;
2617 }
2618
2619 /* Scan the function looking for loops. Record the start and end of each loop.
2620 Also mark as invalid loops any loops that contain a setjmp or are branched
2621 to from outside the loop. */
2622
2623 static void
2624 find_and_verify_loops (f, loops)
2625 rtx f;
2626 struct loops *loops;
2627 {
2628 rtx insn;
2629 rtx label;
2630 int num_loops;
2631 struct loop *current_loop;
2632 struct loop *next_loop;
2633 struct loop *loop;
2634
2635 num_loops = loops->num;
2636
2637 compute_luids (f, NULL_RTX, 0);
2638
2639 /* If there are jumps to undefined labels,
2640 treat them as jumps out of any/all loops.
2641 This also avoids writing past end of tables when there are no loops. */
2642 uid_loop[0] = NULL;
2643
2644 /* Find boundaries of loops, mark which loops are contained within
2645 loops, and invalidate loops that have setjmp. */
2646
2647 num_loops = 0;
2648 current_loop = NULL;
2649 for (insn = f; insn; insn = NEXT_INSN (insn))
2650 {
2651 if (GET_CODE (insn) == NOTE)
2652 switch (NOTE_LINE_NUMBER (insn))
2653 {
2654 case NOTE_INSN_LOOP_BEG:
2655 next_loop = loops->array + num_loops;
2656 next_loop->num = num_loops;
2657 num_loops++;
2658 next_loop->start = insn;
2659 next_loop->outer = current_loop;
2660 current_loop = next_loop;
2661 break;
2662
2663 case NOTE_INSN_LOOP_CONT:
2664 current_loop->cont = insn;
2665 break;
2666
2667 case NOTE_INSN_LOOP_VTOP:
2668 current_loop->vtop = insn;
2669 break;
2670
2671 case NOTE_INSN_LOOP_END:
2672 if (! current_loop)
2673 abort ();
2674
2675 current_loop->end = insn;
2676 current_loop = current_loop->outer;
2677 break;
2678
2679 default:
2680 break;
2681 }
2682
2683 if (GET_CODE (insn) == CALL_INSN
2684 && find_reg_note (insn, REG_SETJMP, NULL))
2685 {
2686 /* In this case, we must invalidate our current loop and any
2687 enclosing loop. */
2688 for (loop = current_loop; loop; loop = loop->outer)
2689 {
2690 loop->invalid = 1;
2691 if (loop_dump_stream)
2692 fprintf (loop_dump_stream,
2693 "\nLoop at %d ignored due to setjmp.\n",
2694 INSN_UID (loop->start));
2695 }
2696 }
2697
2698 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2699 enclosing loop, but this doesn't matter. */
2700 uid_loop[INSN_UID (insn)] = current_loop;
2701 }
2702
2703 /* Any loop containing a label used in an initializer must be invalidated,
2704 because it can be jumped into from anywhere. */
2705 for (label = forced_labels; label; label = XEXP (label, 1))
2706 invalidate_loops_containing_label (XEXP (label, 0));
2707
2708 /* Any loop containing a label used for an exception handler must be
2709 invalidated, because it can be jumped into from anywhere. */
2710 for_each_eh_label (invalidate_loops_containing_label);
2711
2712 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2713 loop that it is not contained within, that loop is marked invalid.
2714 If any INSN or CALL_INSN uses a label's address, then the loop containing
2715 that label is marked invalid, because it could be jumped into from
2716 anywhere.
2717
2718 Also look for blocks of code ending in an unconditional branch that
2719 exits the loop. If such a block is surrounded by a conditional
2720 branch around the block, move the block elsewhere (see below) and
2721 invert the jump to point to the code block. This may eliminate a
2722 label in our loop and will simplify processing by both us and a
2723 possible second cse pass. */
2724
2725 for (insn = f; insn; insn = NEXT_INSN (insn))
2726 if (INSN_P (insn))
2727 {
2728 struct loop *this_loop = uid_loop[INSN_UID (insn)];
2729
2730 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
2731 {
2732 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
2733 if (note)
2734 invalidate_loops_containing_label (XEXP (note, 0));
2735 }
2736
2737 if (GET_CODE (insn) != JUMP_INSN)
2738 continue;
2739
2740 mark_loop_jump (PATTERN (insn), this_loop);
2741
2742 /* See if this is an unconditional branch outside the loop. */
2743 if (this_loop
2744 && (GET_CODE (PATTERN (insn)) == RETURN
2745 || (any_uncondjump_p (insn)
2746 && onlyjump_p (insn)
2747 && (uid_loop[INSN_UID (JUMP_LABEL (insn))]
2748 != this_loop)))
2749 && get_max_uid () < max_uid_for_loop)
2750 {
2751 rtx p;
2752 rtx our_next = next_real_insn (insn);
2753 rtx last_insn_to_move = NEXT_INSN (insn);
2754 struct loop *dest_loop;
2755 struct loop *outer_loop = NULL;
2756
2757 /* Go backwards until we reach the start of the loop, a label,
2758 or a JUMP_INSN. */
2759 for (p = PREV_INSN (insn);
2760 GET_CODE (p) != CODE_LABEL
2761 && ! (GET_CODE (p) == NOTE
2762 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
2763 && GET_CODE (p) != JUMP_INSN;
2764 p = PREV_INSN (p))
2765 ;
2766
2767 /* Check for the case where we have a jump to an inner nested
2768 loop, and do not perform the optimization in that case. */
2769
2770 if (JUMP_LABEL (insn))
2771 {
2772 dest_loop = uid_loop[INSN_UID (JUMP_LABEL (insn))];
2773 if (dest_loop)
2774 {
2775 for (outer_loop = dest_loop; outer_loop;
2776 outer_loop = outer_loop->outer)
2777 if (outer_loop == this_loop)
2778 break;
2779 }
2780 }
2781
2782 /* Make sure that the target of P is within the current loop. */
2783
2784 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
2785 && uid_loop[INSN_UID (JUMP_LABEL (p))] != this_loop)
2786 outer_loop = this_loop;
2787
2788 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2789 we have a block of code to try to move.
2790
2791 We look backward and then forward from the target of INSN
2792 to find a BARRIER at the same loop depth as the target.
2793 If we find such a BARRIER, we make a new label for the start
2794 of the block, invert the jump in P and point it to that label,
2795 and move the block of code to the spot we found. */
2796
2797 if (! outer_loop
2798 && GET_CODE (p) == JUMP_INSN
2799 && JUMP_LABEL (p) != 0
2800 /* Just ignore jumps to labels that were never emitted.
2801 These always indicate compilation errors. */
2802 && INSN_UID (JUMP_LABEL (p)) != 0
2803 && any_condjump_p (p) && onlyjump_p (p)
2804 && next_real_insn (JUMP_LABEL (p)) == our_next
2805 /* If it's not safe to move the sequence, then we
2806 mustn't try. */
2807 && insns_safe_to_move_p (p, NEXT_INSN (insn),
2808 &last_insn_to_move))
2809 {
2810 rtx target
2811 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
2812 struct loop *target_loop = uid_loop[INSN_UID (target)];
2813 rtx loc, loc2;
2814 rtx tmp;
2815
2816 /* Search for possible garbage past the conditional jumps
2817 and look for the last barrier. */
2818 for (tmp = last_insn_to_move;
2819 tmp && GET_CODE (tmp) != CODE_LABEL; tmp = NEXT_INSN (tmp))
2820 if (GET_CODE (tmp) == BARRIER)
2821 last_insn_to_move = tmp;
2822
2823 for (loc = target; loc; loc = PREV_INSN (loc))
2824 if (GET_CODE (loc) == BARRIER
2825 /* Don't move things inside a tablejump. */
2826 && ((loc2 = next_nonnote_insn (loc)) == 0
2827 || GET_CODE (loc2) != CODE_LABEL
2828 || (loc2 = next_nonnote_insn (loc2)) == 0
2829 || GET_CODE (loc2) != JUMP_INSN
2830 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2831 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2832 && uid_loop[INSN_UID (loc)] == target_loop)
2833 break;
2834
2835 if (loc == 0)
2836 for (loc = target; loc; loc = NEXT_INSN (loc))
2837 if (GET_CODE (loc) == BARRIER
2838 /* Don't move things inside a tablejump. */
2839 && ((loc2 = next_nonnote_insn (loc)) == 0
2840 || GET_CODE (loc2) != CODE_LABEL
2841 || (loc2 = next_nonnote_insn (loc2)) == 0
2842 || GET_CODE (loc2) != JUMP_INSN
2843 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2844 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
2845 && uid_loop[INSN_UID (loc)] == target_loop)
2846 break;
2847
2848 if (loc)
2849 {
2850 rtx cond_label = JUMP_LABEL (p);
2851 rtx new_label = get_label_after (p);
2852
2853 /* Ensure our label doesn't go away. */
2854 LABEL_NUSES (cond_label)++;
2855
2856 /* Verify that uid_loop is large enough and that
2857 we can invert P. */
2858 if (invert_jump (p, new_label, 1))
2859 {
2860 rtx q, r;
2861
2862 /* If no suitable BARRIER was found, create a suitable
2863 one before TARGET. Since TARGET is a fall through
2864 path, we'll need to insert an jump around our block
2865 and add a BARRIER before TARGET.
2866
2867 This creates an extra unconditional jump outside
2868 the loop. However, the benefits of removing rarely
2869 executed instructions from inside the loop usually
2870 outweighs the cost of the extra unconditional jump
2871 outside the loop. */
2872 if (loc == 0)
2873 {
2874 rtx temp;
2875
2876 temp = gen_jump (JUMP_LABEL (insn));
2877 temp = emit_jump_insn_before (temp, target);
2878 JUMP_LABEL (temp) = JUMP_LABEL (insn);
2879 LABEL_NUSES (JUMP_LABEL (insn))++;
2880 loc = emit_barrier_before (target);
2881 }
2882
2883 /* Include the BARRIER after INSN and copy the
2884 block after LOC. */
2885 if (squeeze_notes (&new_label, &last_insn_to_move))
2886 abort ();
2887 reorder_insns (new_label, last_insn_to_move, loc);
2888
2889 /* All those insns are now in TARGET_LOOP. */
2890 for (q = new_label;
2891 q != NEXT_INSN (last_insn_to_move);
2892 q = NEXT_INSN (q))
2893 uid_loop[INSN_UID (q)] = target_loop;
2894
2895 /* The label jumped to by INSN is no longer a loop
2896 exit. Unless INSN does not have a label (e.g.,
2897 it is a RETURN insn), search loop->exit_labels
2898 to find its label_ref, and remove it. Also turn
2899 off LABEL_OUTSIDE_LOOP_P bit. */
2900 if (JUMP_LABEL (insn))
2901 {
2902 for (q = 0, r = this_loop->exit_labels;
2903 r;
2904 q = r, r = LABEL_NEXTREF (r))
2905 if (XEXP (r, 0) == JUMP_LABEL (insn))
2906 {
2907 LABEL_OUTSIDE_LOOP_P (r) = 0;
2908 if (q)
2909 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
2910 else
2911 this_loop->exit_labels = LABEL_NEXTREF (r);
2912 break;
2913 }
2914
2915 for (loop = this_loop; loop && loop != target_loop;
2916 loop = loop->outer)
2917 loop->exit_count--;
2918
2919 /* If we didn't find it, then something is
2920 wrong. */
2921 if (! r)
2922 abort ();
2923 }
2924
2925 /* P is now a jump outside the loop, so it must be put
2926 in loop->exit_labels, and marked as such.
2927 The easiest way to do this is to just call
2928 mark_loop_jump again for P. */
2929 mark_loop_jump (PATTERN (p), this_loop);
2930
2931 /* If INSN now jumps to the insn after it,
2932 delete INSN. */
2933 if (JUMP_LABEL (insn) != 0
2934 && (next_real_insn (JUMP_LABEL (insn))
2935 == next_real_insn (insn)))
2936 delete_related_insns (insn);
2937 }
2938
2939 /* Continue the loop after where the conditional
2940 branch used to jump, since the only branch insn
2941 in the block (if it still remains) is an inter-loop
2942 branch and hence needs no processing. */
2943 insn = NEXT_INSN (cond_label);
2944
2945 if (--LABEL_NUSES (cond_label) == 0)
2946 delete_related_insns (cond_label);
2947
2948 /* This loop will be continued with NEXT_INSN (insn). */
2949 insn = PREV_INSN (insn);
2950 }
2951 }
2952 }
2953 }
2954 }
2955
2956 /* If any label in X jumps to a loop different from LOOP_NUM and any of the
2957 loops it is contained in, mark the target loop invalid.
2958
2959 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
2960
2961 static void
2962 mark_loop_jump (x, loop)
2963 rtx x;
2964 struct loop *loop;
2965 {
2966 struct loop *dest_loop;
2967 struct loop *outer_loop;
2968 int i;
2969
2970 switch (GET_CODE (x))
2971 {
2972 case PC:
2973 case USE:
2974 case CLOBBER:
2975 case REG:
2976 case MEM:
2977 case CONST_INT:
2978 case CONST_DOUBLE:
2979 case RETURN:
2980 return;
2981
2982 case CONST:
2983 /* There could be a label reference in here. */
2984 mark_loop_jump (XEXP (x, 0), loop);
2985 return;
2986
2987 case PLUS:
2988 case MINUS:
2989 case MULT:
2990 mark_loop_jump (XEXP (x, 0), loop);
2991 mark_loop_jump (XEXP (x, 1), loop);
2992 return;
2993
2994 case LO_SUM:
2995 /* This may refer to a LABEL_REF or SYMBOL_REF. */
2996 mark_loop_jump (XEXP (x, 1), loop);
2997 return;
2998
2999 case SIGN_EXTEND:
3000 case ZERO_EXTEND:
3001 mark_loop_jump (XEXP (x, 0), loop);
3002 return;
3003
3004 case LABEL_REF:
3005 dest_loop = uid_loop[INSN_UID (XEXP (x, 0))];
3006
3007 /* Link together all labels that branch outside the loop. This
3008 is used by final_[bg]iv_value and the loop unrolling code. Also
3009 mark this LABEL_REF so we know that this branch should predict
3010 false. */
3011
3012 /* A check to make sure the label is not in an inner nested loop,
3013 since this does not count as a loop exit. */
3014 if (dest_loop)
3015 {
3016 for (outer_loop = dest_loop; outer_loop;
3017 outer_loop = outer_loop->outer)
3018 if (outer_loop == loop)
3019 break;
3020 }
3021 else
3022 outer_loop = NULL;
3023
3024 if (loop && ! outer_loop)
3025 {
3026 LABEL_OUTSIDE_LOOP_P (x) = 1;
3027 LABEL_NEXTREF (x) = loop->exit_labels;
3028 loop->exit_labels = x;
3029
3030 for (outer_loop = loop;
3031 outer_loop && outer_loop != dest_loop;
3032 outer_loop = outer_loop->outer)
3033 outer_loop->exit_count++;
3034 }
3035
3036 /* If this is inside a loop, but not in the current loop or one enclosed
3037 by it, it invalidates at least one loop. */
3038
3039 if (! dest_loop)
3040 return;
3041
3042 /* We must invalidate every nested loop containing the target of this
3043 label, except those that also contain the jump insn. */
3044
3045 for (; dest_loop; dest_loop = dest_loop->outer)
3046 {
3047 /* Stop when we reach a loop that also contains the jump insn. */
3048 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3049 if (dest_loop == outer_loop)
3050 return;
3051
3052 /* If we get here, we know we need to invalidate a loop. */
3053 if (loop_dump_stream && ! dest_loop->invalid)
3054 fprintf (loop_dump_stream,
3055 "\nLoop at %d ignored due to multiple entry points.\n",
3056 INSN_UID (dest_loop->start));
3057
3058 dest_loop->invalid = 1;
3059 }
3060 return;
3061
3062 case SET:
3063 /* If this is not setting pc, ignore. */
3064 if (SET_DEST (x) == pc_rtx)
3065 mark_loop_jump (SET_SRC (x), loop);
3066 return;
3067
3068 case IF_THEN_ELSE:
3069 mark_loop_jump (XEXP (x, 1), loop);
3070 mark_loop_jump (XEXP (x, 2), loop);
3071 return;
3072
3073 case PARALLEL:
3074 case ADDR_VEC:
3075 for (i = 0; i < XVECLEN (x, 0); i++)
3076 mark_loop_jump (XVECEXP (x, 0, i), loop);
3077 return;
3078
3079 case ADDR_DIFF_VEC:
3080 for (i = 0; i < XVECLEN (x, 1); i++)
3081 mark_loop_jump (XVECEXP (x, 1, i), loop);
3082 return;
3083
3084 default:
3085 /* Strictly speaking this is not a jump into the loop, only a possible
3086 jump out of the loop. However, we have no way to link the destination
3087 of this jump onto the list of exit labels. To be safe we mark this
3088 loop and any containing loops as invalid. */
3089 if (loop)
3090 {
3091 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
3092 {
3093 if (loop_dump_stream && ! outer_loop->invalid)
3094 fprintf (loop_dump_stream,
3095 "\nLoop at %d ignored due to unknown exit jump.\n",
3096 INSN_UID (outer_loop->start));
3097 outer_loop->invalid = 1;
3098 }
3099 }
3100 return;
3101 }
3102 }
3103 \f
3104 /* Return nonzero if there is a label in the range from
3105 insn INSN to and including the insn whose luid is END
3106 INSN must have an assigned luid (i.e., it must not have
3107 been previously created by loop.c). */
3108
3109 static int
3110 labels_in_range_p (insn, end)
3111 rtx insn;
3112 int end;
3113 {
3114 while (insn && INSN_LUID (insn) <= end)
3115 {
3116 if (GET_CODE (insn) == CODE_LABEL)
3117 return 1;
3118 insn = NEXT_INSN (insn);
3119 }
3120
3121 return 0;
3122 }
3123
3124 /* Record that a memory reference X is being set. */
3125
3126 static void
3127 note_addr_stored (x, y, data)
3128 rtx x;
3129 rtx y ATTRIBUTE_UNUSED;
3130 void *data ATTRIBUTE_UNUSED;
3131 {
3132 struct loop_info *loop_info = data;
3133
3134 if (x == 0 || GET_CODE (x) != MEM)
3135 return;
3136
3137 /* Count number of memory writes.
3138 This affects heuristics in strength_reduce. */
3139 loop_info->num_mem_sets++;
3140
3141 /* BLKmode MEM means all memory is clobbered. */
3142 if (GET_MODE (x) == BLKmode)
3143 {
3144 if (RTX_UNCHANGING_P (x))
3145 loop_info->unknown_constant_address_altered = 1;
3146 else
3147 loop_info->unknown_address_altered = 1;
3148
3149 return;
3150 }
3151
3152 loop_info->store_mems = gen_rtx_EXPR_LIST (VOIDmode, x,
3153 loop_info->store_mems);
3154 }
3155
3156 /* X is a value modified by an INSN that references a biv inside a loop
3157 exit test (ie, X is somehow related to the value of the biv). If X
3158 is a pseudo that is used more than once, then the biv is (effectively)
3159 used more than once. DATA is a pointer to a loop_regs structure. */
3160
3161 static void
3162 note_set_pseudo_multiple_uses (x, y, data)
3163 rtx x;
3164 rtx y ATTRIBUTE_UNUSED;
3165 void *data;
3166 {
3167 struct loop_regs *regs = (struct loop_regs *) data;
3168
3169 if (x == 0)
3170 return;
3171
3172 while (GET_CODE (x) == STRICT_LOW_PART
3173 || GET_CODE (x) == SIGN_EXTRACT
3174 || GET_CODE (x) == ZERO_EXTRACT
3175 || GET_CODE (x) == SUBREG)
3176 x = XEXP (x, 0);
3177
3178 if (GET_CODE (x) != REG || REGNO (x) < FIRST_PSEUDO_REGISTER)
3179 return;
3180
3181 /* If we do not have usage information, or if we know the register
3182 is used more than once, note that fact for check_dbra_loop. */
3183 if (REGNO (x) >= max_reg_before_loop
3184 || ! regs->array[REGNO (x)].single_usage
3185 || regs->array[REGNO (x)].single_usage == const0_rtx)
3186 regs->multiple_uses = 1;
3187 }
3188 \f
3189 /* Return nonzero if the rtx X is invariant over the current loop.
3190
3191 The value is 2 if we refer to something only conditionally invariant.
3192
3193 A memory ref is invariant if it is not volatile and does not conflict
3194 with anything stored in `loop_info->store_mems'. */
3195
3196 int
3197 loop_invariant_p (loop, x)
3198 const struct loop *loop;
3199 rtx x;
3200 {
3201 struct loop_info *loop_info = LOOP_INFO (loop);
3202 struct loop_regs *regs = LOOP_REGS (loop);
3203 int i;
3204 enum rtx_code code;
3205 const char *fmt;
3206 int conditional = 0;
3207 rtx mem_list_entry;
3208
3209 if (x == 0)
3210 return 1;
3211 code = GET_CODE (x);
3212 switch (code)
3213 {
3214 case CONST_INT:
3215 case CONST_DOUBLE:
3216 case SYMBOL_REF:
3217 case CONST:
3218 return 1;
3219
3220 case LABEL_REF:
3221 /* A LABEL_REF is normally invariant, however, if we are unrolling
3222 loops, and this label is inside the loop, then it isn't invariant.
3223 This is because each unrolled copy of the loop body will have
3224 a copy of this label. If this was invariant, then an insn loading
3225 the address of this label into a register might get moved outside
3226 the loop, and then each loop body would end up using the same label.
3227
3228 We don't know the loop bounds here though, so just fail for all
3229 labels. */
3230 if (flag_unroll_loops)
3231 return 0;
3232 else
3233 return 1;
3234
3235 case PC:
3236 case CC0:
3237 case UNSPEC_VOLATILE:
3238 return 0;
3239
3240 case REG:
3241 /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
3242 since the reg might be set by initialization within the loop. */
3243
3244 if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
3245 || x == arg_pointer_rtx || x == pic_offset_table_rtx)
3246 && ! current_function_has_nonlocal_goto)
3247 return 1;
3248
3249 if (LOOP_INFO (loop)->has_call
3250 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
3251 return 0;
3252
3253 if (regs->array[REGNO (x)].set_in_loop < 0)
3254 return 2;
3255
3256 return regs->array[REGNO (x)].set_in_loop == 0;
3257
3258 case MEM:
3259 /* Volatile memory references must be rejected. Do this before
3260 checking for read-only items, so that volatile read-only items
3261 will be rejected also. */
3262 if (MEM_VOLATILE_P (x))
3263 return 0;
3264
3265 /* See if there is any dependence between a store and this load. */
3266 mem_list_entry = loop_info->store_mems;
3267 while (mem_list_entry)
3268 {
3269 if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
3270 x, rtx_varies_p))
3271 return 0;
3272
3273 mem_list_entry = XEXP (mem_list_entry, 1);
3274 }
3275
3276 /* It's not invalidated by a store in memory
3277 but we must still verify the address is invariant. */
3278 break;
3279
3280 case ASM_OPERANDS:
3281 /* Don't mess with insns declared volatile. */
3282 if (MEM_VOLATILE_P (x))
3283 return 0;
3284 break;
3285
3286 default:
3287 break;
3288 }
3289
3290 fmt = GET_RTX_FORMAT (code);
3291 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3292 {
3293 if (fmt[i] == 'e')
3294 {
3295 int tem = loop_invariant_p (loop, XEXP (x, i));
3296 if (tem == 0)
3297 return 0;
3298 if (tem == 2)
3299 conditional = 1;
3300 }
3301 else if (fmt[i] == 'E')
3302 {
3303 int j;
3304 for (j = 0; j < XVECLEN (x, i); j++)
3305 {
3306 int tem = loop_invariant_p (loop, XVECEXP (x, i, j));
3307 if (tem == 0)
3308 return 0;
3309 if (tem == 2)
3310 conditional = 1;
3311 }
3312
3313 }
3314 }
3315
3316 return 1 + conditional;
3317 }
3318 \f
3319 /* Return nonzero if all the insns in the loop that set REG
3320 are INSN and the immediately following insns,
3321 and if each of those insns sets REG in an invariant way
3322 (not counting uses of REG in them).
3323
3324 The value is 2 if some of these insns are only conditionally invariant.
3325
3326 We assume that INSN itself is the first set of REG
3327 and that its source is invariant. */
3328
3329 static int
3330 consec_sets_invariant_p (loop, reg, n_sets, insn)
3331 const struct loop *loop;
3332 int n_sets;
3333 rtx reg, insn;
3334 {
3335 struct loop_regs *regs = LOOP_REGS (loop);
3336 rtx p = insn;
3337 unsigned int regno = REGNO (reg);
3338 rtx temp;
3339 /* Number of sets we have to insist on finding after INSN. */
3340 int count = n_sets - 1;
3341 int old = regs->array[regno].set_in_loop;
3342 int value = 0;
3343 int this;
3344
3345 /* If N_SETS hit the limit, we can't rely on its value. */
3346 if (n_sets == 127)
3347 return 0;
3348
3349 regs->array[regno].set_in_loop = 0;
3350
3351 while (count > 0)
3352 {
3353 enum rtx_code code;
3354 rtx set;
3355
3356 p = NEXT_INSN (p);
3357 code = GET_CODE (p);
3358
3359 /* If library call, skip to end of it. */
3360 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
3361 p = XEXP (temp, 0);
3362
3363 this = 0;
3364 if (code == INSN
3365 && (set = single_set (p))
3366 && GET_CODE (SET_DEST (set)) == REG
3367 && REGNO (SET_DEST (set)) == regno)
3368 {
3369 this = loop_invariant_p (loop, SET_SRC (set));
3370 if (this != 0)
3371 value |= this;
3372 else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)))
3373 {
3374 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3375 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3376 notes are OK. */
3377 this = (CONSTANT_P (XEXP (temp, 0))
3378 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
3379 && loop_invariant_p (loop, XEXP (temp, 0))));
3380 if (this != 0)
3381 value |= this;
3382 }
3383 }
3384 if (this != 0)
3385 count--;
3386 else if (code != NOTE)
3387 {
3388 regs->array[regno].set_in_loop = old;
3389 return 0;
3390 }
3391 }
3392
3393 regs->array[regno].set_in_loop = old;
3394 /* If loop_invariant_p ever returned 2, we return 2. */
3395 return 1 + (value & 2);
3396 }
3397
3398 #if 0
3399 /* I don't think this condition is sufficient to allow INSN
3400 to be moved, so we no longer test it. */
3401
3402 /* Return 1 if all insns in the basic block of INSN and following INSN
3403 that set REG are invariant according to TABLE. */
3404
3405 static int
3406 all_sets_invariant_p (reg, insn, table)
3407 rtx reg, insn;
3408 short *table;
3409 {
3410 rtx p = insn;
3411 int regno = REGNO (reg);
3412
3413 while (1)
3414 {
3415 enum rtx_code code;
3416 p = NEXT_INSN (p);
3417 code = GET_CODE (p);
3418 if (code == CODE_LABEL || code == JUMP_INSN)
3419 return 1;
3420 if (code == INSN && GET_CODE (PATTERN (p)) == SET
3421 && GET_CODE (SET_DEST (PATTERN (p))) == REG
3422 && REGNO (SET_DEST (PATTERN (p))) == regno)
3423 {
3424 if (! loop_invariant_p (loop, SET_SRC (PATTERN (p)), table))
3425 return 0;
3426 }
3427 }
3428 }
3429 #endif /* 0 */
3430 \f
3431 /* Look at all uses (not sets) of registers in X. For each, if it is
3432 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3433 a different insn, set USAGE[REGNO] to const0_rtx. */
3434
3435 static void
3436 find_single_use_in_loop (regs, insn, x)
3437 struct loop_regs *regs;
3438 rtx insn;
3439 rtx x;
3440 {
3441 enum rtx_code code = GET_CODE (x);
3442 const char *fmt = GET_RTX_FORMAT (code);
3443 int i, j;
3444
3445 if (code == REG)
3446 regs->array[REGNO (x)].single_usage
3447 = (regs->array[REGNO (x)].single_usage != 0
3448 && regs->array[REGNO (x)].single_usage != insn)
3449 ? const0_rtx : insn;
3450
3451 else if (code == SET)
3452 {
3453 /* Don't count SET_DEST if it is a REG; otherwise count things
3454 in SET_DEST because if a register is partially modified, it won't
3455 show up as a potential movable so we don't care how USAGE is set
3456 for it. */
3457 if (GET_CODE (SET_DEST (x)) != REG)
3458 find_single_use_in_loop (regs, insn, SET_DEST (x));
3459 find_single_use_in_loop (regs, insn, SET_SRC (x));
3460 }
3461 else
3462 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3463 {
3464 if (fmt[i] == 'e' && XEXP (x, i) != 0)
3465 find_single_use_in_loop (regs, insn, XEXP (x, i));
3466 else if (fmt[i] == 'E')
3467 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3468 find_single_use_in_loop (regs, insn, XVECEXP (x, i, j));
3469 }
3470 }
3471 \f
3472 /* Count and record any set in X which is contained in INSN. Update
3473 REGS->array[I].MAY_NOT_OPTIMIZE and LAST_SET for any register I set
3474 in X. */
3475
3476 static void
3477 count_one_set (regs, insn, x, last_set)
3478 struct loop_regs *regs;
3479 rtx insn, x;
3480 rtx *last_set;
3481 {
3482 if (GET_CODE (x) == CLOBBER && GET_CODE (XEXP (x, 0)) == REG)
3483 /* Don't move a reg that has an explicit clobber.
3484 It's not worth the pain to try to do it correctly. */
3485 regs->array[REGNO (XEXP (x, 0))].may_not_optimize = 1;
3486
3487 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
3488 {
3489 rtx dest = SET_DEST (x);
3490 while (GET_CODE (dest) == SUBREG
3491 || GET_CODE (dest) == ZERO_EXTRACT
3492 || GET_CODE (dest) == SIGN_EXTRACT
3493 || GET_CODE (dest) == STRICT_LOW_PART)
3494 dest = XEXP (dest, 0);
3495 if (GET_CODE (dest) == REG)
3496 {
3497 int i;
3498 int regno = REGNO (dest);
3499 for (i = 0; i < LOOP_REGNO_NREGS (regno, dest); i++)
3500 {
3501 /* If this is the first setting of this reg
3502 in current basic block, and it was set before,
3503 it must be set in two basic blocks, so it cannot
3504 be moved out of the loop. */
3505 if (regs->array[regno].set_in_loop > 0
3506 && last_set == 0)
3507 regs->array[regno+i].may_not_optimize = 1;
3508 /* If this is not first setting in current basic block,
3509 see if reg was used in between previous one and this.
3510 If so, neither one can be moved. */
3511 if (last_set[regno] != 0
3512 && reg_used_between_p (dest, last_set[regno], insn))
3513 regs->array[regno+i].may_not_optimize = 1;
3514 if (regs->array[regno+i].set_in_loop < 127)
3515 ++regs->array[regno+i].set_in_loop;
3516 last_set[regno+i] = insn;
3517 }
3518 }
3519 }
3520 }
3521 \f
3522 /* Given a loop that is bounded by LOOP->START and LOOP->END and that
3523 is entered at LOOP->SCAN_START, return 1 if the register set in SET
3524 contained in insn INSN is used by any insn that precedes INSN in
3525 cyclic order starting from the loop entry point.
3526
3527 We don't want to use INSN_LUID here because if we restrict INSN to those
3528 that have a valid INSN_LUID, it means we cannot move an invariant out
3529 from an inner loop past two loops. */
3530
3531 static int
3532 loop_reg_used_before_p (loop, set, insn)
3533 const struct loop *loop;
3534 rtx set, insn;
3535 {
3536 rtx reg = SET_DEST (set);
3537 rtx p;
3538
3539 /* Scan forward checking for register usage. If we hit INSN, we
3540 are done. Otherwise, if we hit LOOP->END, wrap around to LOOP->START. */
3541 for (p = loop->scan_start; p != insn; p = NEXT_INSN (p))
3542 {
3543 if (INSN_P (p) && reg_overlap_mentioned_p (reg, PATTERN (p)))
3544 return 1;
3545
3546 if (p == loop->end)
3547 p = loop->start;
3548 }
3549
3550 return 0;
3551 }
3552 \f
3553
3554 /* Information we collect about arrays that we might want to prefetch. */
3555 struct prefetch_info
3556 {
3557 struct iv_class *class; /* Class this prefetch is based on. */
3558 struct induction *giv; /* GIV this prefetch is based on. */
3559 rtx base_address; /* Start prefetching from this address plus
3560 index. */
3561 HOST_WIDE_INT index;
3562 HOST_WIDE_INT stride; /* Prefetch stride in bytes in each
3563 iteration. */
3564 unsigned int bytes_accessed; /* Sum of sizes of all acceses to this
3565 prefetch area in one iteration. */
3566 unsigned int total_bytes; /* Total bytes loop will access in this block.
3567 This is set only for loops with known
3568 iteration counts and is 0xffffffff
3569 otherwise. */
3570 int prefetch_in_loop; /* Number of prefetch insns in loop. */
3571 int prefetch_before_loop; /* Number of prefetch insns before loop. */
3572 unsigned int write : 1; /* 1 for read/write prefetches. */
3573 };
3574
3575 /* Data used by check_store function. */
3576 struct check_store_data
3577 {
3578 rtx mem_address;
3579 int mem_write;
3580 };
3581
3582 static void check_store PARAMS ((rtx, rtx, void *));
3583 static void emit_prefetch_instructions PARAMS ((struct loop *));
3584 static int rtx_equal_for_prefetch_p PARAMS ((rtx, rtx));
3585
3586 /* Set mem_write when mem_address is found. Used as callback to
3587 note_stores. */
3588 static void
3589 check_store (x, pat, data)
3590 rtx x, pat ATTRIBUTE_UNUSED;
3591 void *data;
3592 {
3593 struct check_store_data *d = (struct check_store_data *) data;
3594
3595 if ((GET_CODE (x) == MEM) && rtx_equal_p (d->mem_address, XEXP (x, 0)))
3596 d->mem_write = 1;
3597 }
3598 \f
3599 /* Like rtx_equal_p, but attempts to swap commutative operands. This is
3600 important to get some addresses combined. Later more sophisticated
3601 transformations can be added when necesary.
3602
3603 ??? Same trick with swapping operand is done at several other places.
3604 It can be nice to develop some common way to handle this. */
3605
3606 static int
3607 rtx_equal_for_prefetch_p (x, y)
3608 rtx x, y;
3609 {
3610 int i;
3611 int j;
3612 enum rtx_code code = GET_CODE (x);
3613 const char *fmt;
3614
3615 if (x == y)
3616 return 1;
3617 if (code != GET_CODE (y))
3618 return 0;
3619
3620 code = GET_CODE (x);
3621
3622 if (GET_RTX_CLASS (code) == 'c')
3623 {
3624 return ((rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 0))
3625 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 1)))
3626 || (rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 1))
3627 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 0))));
3628 }
3629 /* Compare the elements. If any pair of corresponding elements fails to
3630 match, return 0 for the whole thing. */
3631
3632 fmt = GET_RTX_FORMAT (code);
3633 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3634 {
3635 switch (fmt[i])
3636 {
3637 case 'w':
3638 if (XWINT (x, i) != XWINT (y, i))
3639 return 0;
3640 break;
3641
3642 case 'i':
3643 if (XINT (x, i) != XINT (y, i))
3644 return 0;
3645 break;
3646
3647 case 'E':
3648 /* Two vectors must have the same length. */
3649 if (XVECLEN (x, i) != XVECLEN (y, i))
3650 return 0;
3651
3652 /* And the corresponding elements must match. */
3653 for (j = 0; j < XVECLEN (x, i); j++)
3654 if (rtx_equal_for_prefetch_p (XVECEXP (x, i, j),
3655 XVECEXP (y, i, j)) == 0)
3656 return 0;
3657 break;
3658
3659 case 'e':
3660 if (rtx_equal_for_prefetch_p (XEXP (x, i), XEXP (y, i)) == 0)
3661 return 0;
3662 break;
3663
3664 case 's':
3665 if (strcmp (XSTR (x, i), XSTR (y, i)))
3666 return 0;
3667 break;
3668
3669 case 'u':
3670 /* These are just backpointers, so they don't matter. */
3671 break;
3672
3673 case '0':
3674 break;
3675
3676 /* It is believed that rtx's at this level will never
3677 contain anything but integers and other rtx's,
3678 except for within LABEL_REFs and SYMBOL_REFs. */
3679 default:
3680 abort ();
3681 }
3682 }
3683 return 1;
3684 }
3685 \f
3686 /* Remove constant addition value from the expression X (when present)
3687 and return it. */
3688
3689 static HOST_WIDE_INT
3690 remove_constant_addition (x)
3691 rtx *x;
3692 {
3693 HOST_WIDE_INT addval = 0;
3694 rtx exp = *x;
3695
3696 /* Avoid clobbering a shared CONST expression. */
3697 if (GET_CODE (exp) == CONST)
3698 {
3699 if (GET_CODE (XEXP (exp, 0)) == PLUS
3700 && GET_CODE (XEXP (XEXP (exp, 0), 0)) == SYMBOL_REF
3701 && GET_CODE (XEXP (XEXP (exp, 0), 1)) == CONST_INT)
3702 {
3703 *x = XEXP (XEXP (exp, 0), 0);
3704 return INTVAL (XEXP (XEXP (exp, 0), 1));
3705 }
3706 return 0;
3707 }
3708
3709 if (GET_CODE (exp) == CONST_INT)
3710 {
3711 addval = INTVAL (exp);
3712 *x = const0_rtx;
3713 }
3714
3715 /* For plus expression recurse on ourself. */
3716 else if (GET_CODE (exp) == PLUS)
3717 {
3718 addval += remove_constant_addition (&XEXP (exp, 0));
3719 addval += remove_constant_addition (&XEXP (exp, 1));
3720
3721 /* In case our parameter was constant, remove extra zero from the
3722 expression. */
3723 if (XEXP (exp, 0) == const0_rtx)
3724 *x = XEXP (exp, 1);
3725 else if (XEXP (exp, 1) == const0_rtx)
3726 *x = XEXP (exp, 0);
3727 }
3728
3729 return addval;
3730 }
3731
3732 /* Attempt to identify accesses to arrays that are most likely to cause cache
3733 misses, and emit prefetch instructions a few prefetch blocks forward.
3734
3735 To detect the arrays we use the GIV information that was collected by the
3736 strength reduction pass.
3737
3738 The prefetch instructions are generated after the GIV information is done
3739 and before the strength reduction process. The new GIVs are injected into
3740 the strength reduction tables, so the prefetch addresses are optimized as
3741 well.
3742
3743 GIVs are split into base address, stride, and constant addition values.
3744 GIVs with the same address, stride and close addition values are combined
3745 into a single prefetch. Also writes to GIVs are detected, so that prefetch
3746 for write instructions can be used for the block we write to, on machines
3747 that support write prefetches.
3748
3749 Several heuristics are used to determine when to prefetch. They are
3750 controlled by defined symbols that can be overridden for each target. */
3751
3752 static void
3753 emit_prefetch_instructions (loop)
3754 struct loop *loop;
3755 {
3756 int num_prefetches = 0;
3757 int num_real_prefetches = 0;
3758 int num_real_write_prefetches = 0;
3759 int num_prefetches_before = 0;
3760 int num_write_prefetches_before = 0;
3761 int ahead = 0;
3762 int i;
3763 struct iv_class *bl;
3764 struct induction *iv;
3765 struct prefetch_info info[MAX_PREFETCHES];
3766 struct loop_ivs *ivs = LOOP_IVS (loop);
3767
3768 if (!HAVE_prefetch)
3769 return;
3770
3771 /* Consider only loops w/o calls. When a call is done, the loop is probably
3772 slow enough to read the memory. */
3773 if (PREFETCH_NO_CALL && LOOP_INFO (loop)->has_call)
3774 {
3775 if (loop_dump_stream)
3776 fprintf (loop_dump_stream, "Prefetch: ignoring loop: has call.\n");
3777
3778 return;
3779 }
3780
3781 /* Don't prefetch in loops known to have few iterations. */
3782 if (PREFETCH_NO_LOW_LOOPCNT
3783 && LOOP_INFO (loop)->n_iterations
3784 && LOOP_INFO (loop)->n_iterations <= PREFETCH_LOW_LOOPCNT)
3785 {
3786 if (loop_dump_stream)
3787 fprintf (loop_dump_stream,
3788 "Prefetch: ignoring loop: not enough iterations.\n");
3789 return;
3790 }
3791
3792 /* Search all induction variables and pick those interesting for the prefetch
3793 machinery. */
3794 for (bl = ivs->list; bl; bl = bl->next)
3795 {
3796 struct induction *biv = bl->biv, *biv1;
3797 int basestride = 0;
3798
3799 biv1 = biv;
3800
3801 /* Expect all BIVs to be executed in each iteration. This makes our
3802 analysis more conservative. */
3803 while (biv1)
3804 {
3805 /* Discard non-constant additions that we can't handle well yet, and
3806 BIVs that are executed multiple times; such BIVs ought to be
3807 handled in the nested loop. We accept not_every_iteration BIVs,
3808 since these only result in larger strides and make our
3809 heuristics more conservative. */
3810 if (GET_CODE (biv->add_val) != CONST_INT)
3811 {
3812 if (loop_dump_stream)
3813 {
3814 fprintf (loop_dump_stream,
3815 "Prefetch: ignoring biv %d: non-constant addition at insn %d:",
3816 REGNO (biv->src_reg), INSN_UID (biv->insn));
3817 print_rtl (loop_dump_stream, biv->add_val);
3818 fprintf (loop_dump_stream, "\n");
3819 }
3820 break;
3821 }
3822
3823 if (biv->maybe_multiple)
3824 {
3825 if (loop_dump_stream)
3826 {
3827 fprintf (loop_dump_stream,
3828 "Prefetch: ignoring biv %d: maybe_multiple at insn %i:",
3829 REGNO (biv->src_reg), INSN_UID (biv->insn));
3830 print_rtl (loop_dump_stream, biv->add_val);
3831 fprintf (loop_dump_stream, "\n");
3832 }
3833 break;
3834 }
3835
3836 basestride += INTVAL (biv1->add_val);
3837 biv1 = biv1->next_iv;
3838 }
3839
3840 if (biv1 || !basestride)
3841 continue;
3842
3843 for (iv = bl->giv; iv; iv = iv->next_iv)
3844 {
3845 rtx address;
3846 rtx temp;
3847 HOST_WIDE_INT index = 0;
3848 int add = 1;
3849 HOST_WIDE_INT stride = 0;
3850 int stride_sign = 1;
3851 struct check_store_data d;
3852 const char *ignore_reason = NULL;
3853 int size = GET_MODE_SIZE (GET_MODE (iv));
3854
3855 /* See whether an induction variable is interesting to us and if
3856 not, report the reason. */
3857 if (iv->giv_type != DEST_ADDR)
3858 ignore_reason = "giv is not a destination address";
3859
3860 /* We are interested only in constant stride memory references
3861 in order to be able to compute density easily. */
3862 else if (GET_CODE (iv->mult_val) != CONST_INT)
3863 ignore_reason = "stride is not constant";
3864
3865 else
3866 {
3867 stride = INTVAL (iv->mult_val) * basestride;
3868 if (stride < 0)
3869 {
3870 stride = -stride;
3871 stride_sign = -1;
3872 }
3873
3874 /* On some targets, reversed order prefetches are not
3875 worthwhile. */
3876 if (PREFETCH_NO_REVERSE_ORDER && stride_sign < 0)
3877 ignore_reason = "reversed order stride";
3878
3879 /* Prefetch of accesses with an extreme stride might not be
3880 worthwhile, either. */
3881 else if (PREFETCH_NO_EXTREME_STRIDE
3882 && stride > PREFETCH_EXTREME_STRIDE)
3883 ignore_reason = "extreme stride";
3884
3885 /* Ignore GIVs with varying add values; we can't predict the
3886 value for the next iteration. */
3887 else if (!loop_invariant_p (loop, iv->add_val))
3888 ignore_reason = "giv has varying add value";
3889
3890 /* Ignore GIVs in the nested loops; they ought to have been
3891 handled already. */
3892 else if (iv->maybe_multiple)
3893 ignore_reason = "giv is in nested loop";
3894 }
3895
3896 if (ignore_reason != NULL)
3897 {
3898 if (loop_dump_stream)
3899 fprintf (loop_dump_stream,
3900 "Prefetch: ignoring giv at %d: %s.\n",
3901 INSN_UID (iv->insn), ignore_reason);
3902 continue;
3903 }
3904
3905 /* Determine the pointer to the basic array we are examining. It is
3906 the sum of the BIV's initial value and the GIV's add_val. */
3907 address = copy_rtx (iv->add_val);
3908 temp = copy_rtx (bl->initial_value);
3909
3910 address = simplify_gen_binary (PLUS, Pmode, temp, address);
3911 index = remove_constant_addition (&address);
3912
3913 d.mem_write = 0;
3914 d.mem_address = *iv->location;
3915
3916 /* When the GIV is not always executed, we might be better off by
3917 not dirtying the cache pages. */
3918 if (PREFETCH_CONDITIONAL || iv->always_executed)
3919 note_stores (PATTERN (iv->insn), check_store, &d);
3920 else
3921 {
3922 if (loop_dump_stream)
3923 fprintf (loop_dump_stream, "Prefetch: Ignoring giv at %d: %s\n",
3924 INSN_UID (iv->insn), "in conditional code.");
3925 continue;
3926 }
3927
3928 /* Attempt to find another prefetch to the same array and see if we
3929 can merge this one. */
3930 for (i = 0; i < num_prefetches; i++)
3931 if (rtx_equal_for_prefetch_p (address, info[i].base_address)
3932 && stride == info[i].stride)
3933 {
3934 /* In case both access same array (same location
3935 just with small difference in constant indexes), merge
3936 the prefetches. Just do the later and the earlier will
3937 get prefetched from previous iteration.
3938 The artificial threshold should not be too small,
3939 but also not bigger than small portion of memory usually
3940 traversed by single loop. */
3941 if (index >= info[i].index
3942 && index - info[i].index < PREFETCH_EXTREME_DIFFERENCE)
3943 {
3944 info[i].write |= d.mem_write;
3945 info[i].bytes_accessed += size;
3946 info[i].index = index;
3947 info[i].giv = iv;
3948 info[i].class = bl;
3949 info[num_prefetches].base_address = address;
3950 add = 0;
3951 break;
3952 }
3953
3954 if (index < info[i].index
3955 && info[i].index - index < PREFETCH_EXTREME_DIFFERENCE)
3956 {
3957 info[i].write |= d.mem_write;
3958 info[i].bytes_accessed += size;
3959 add = 0;
3960 break;
3961 }
3962 }
3963
3964 /* Merging failed. */
3965 if (add)
3966 {
3967 info[num_prefetches].giv = iv;
3968 info[num_prefetches].class = bl;
3969 info[num_prefetches].index = index;
3970 info[num_prefetches].stride = stride;
3971 info[num_prefetches].base_address = address;
3972 info[num_prefetches].write = d.mem_write;
3973 info[num_prefetches].bytes_accessed = size;
3974 num_prefetches++;
3975 if (num_prefetches >= MAX_PREFETCHES)
3976 {
3977 if (loop_dump_stream)
3978 fprintf (loop_dump_stream,
3979 "Maximal number of prefetches exceeded.\n");
3980 return;
3981 }
3982 }
3983 }
3984 }
3985
3986 for (i = 0; i < num_prefetches; i++)
3987 {
3988 int density;
3989
3990 /* Attempt to calculate the total number of bytes fetched by all
3991 iterations of the loop. Avoid overflow. */
3992 if (LOOP_INFO (loop)->n_iterations
3993 && ((unsigned HOST_WIDE_INT) (0xffffffff / info[i].stride)
3994 >= LOOP_INFO (loop)->n_iterations))
3995 info[i].total_bytes = info[i].stride * LOOP_INFO (loop)->n_iterations;
3996 else
3997 info[i].total_bytes = 0xffffffff;
3998
3999 density = info[i].bytes_accessed * 100 / info[i].stride;
4000
4001 /* Prefetch might be worthwhile only when the loads/stores are dense. */
4002 if (PREFETCH_ONLY_DENSE_MEM)
4003 if (density * 256 > PREFETCH_DENSE_MEM * 100
4004 && (info[i].total_bytes / PREFETCH_BLOCK
4005 >= PREFETCH_BLOCKS_BEFORE_LOOP_MIN))
4006 {
4007 info[i].prefetch_before_loop = 1;
4008 info[i].prefetch_in_loop
4009 = (info[i].total_bytes / PREFETCH_BLOCK
4010 > PREFETCH_BLOCKS_BEFORE_LOOP_MAX);
4011 }
4012 else
4013 {
4014 info[i].prefetch_in_loop = 0, info[i].prefetch_before_loop = 0;
4015 if (loop_dump_stream)
4016 fprintf (loop_dump_stream,
4017 "Prefetch: ignoring giv at %d: %d%% density is too low.\n",
4018 INSN_UID (info[i].giv->insn), density);
4019 }
4020 else
4021 info[i].prefetch_in_loop = 1, info[i].prefetch_before_loop = 1;
4022
4023 /* Find how many prefetch instructions we'll use within the loop. */
4024 if (info[i].prefetch_in_loop != 0)
4025 {
4026 info[i].prefetch_in_loop = ((info[i].stride + PREFETCH_BLOCK - 1)
4027 / PREFETCH_BLOCK);
4028 num_real_prefetches += info[i].prefetch_in_loop;
4029 if (info[i].write)
4030 num_real_write_prefetches += info[i].prefetch_in_loop;
4031 }
4032 }
4033
4034 /* Determine how many iterations ahead to prefetch within the loop, based
4035 on how many prefetches we currently expect to do within the loop. */
4036 if (num_real_prefetches != 0)
4037 {
4038 if ((ahead = SIMULTANEOUS_PREFETCHES / num_real_prefetches) == 0)
4039 {
4040 if (loop_dump_stream)
4041 fprintf (loop_dump_stream,
4042 "Prefetch: ignoring prefetches within loop: ahead is zero; %d < %d\n",
4043 SIMULTANEOUS_PREFETCHES, num_real_prefetches);
4044 num_real_prefetches = 0, num_real_write_prefetches = 0;
4045 }
4046 }
4047 /* We'll also use AHEAD to determine how many prefetch instructions to
4048 emit before a loop, so don't leave it zero. */
4049 if (ahead == 0)
4050 ahead = PREFETCH_BLOCKS_BEFORE_LOOP_MAX;
4051
4052 for (i = 0; i < num_prefetches; i++)
4053 {
4054 /* Update if we've decided not to prefetch anything within the loop. */
4055 if (num_real_prefetches == 0)
4056 info[i].prefetch_in_loop = 0;
4057
4058 /* Find how many prefetch instructions we'll use before the loop. */
4059 if (info[i].prefetch_before_loop != 0)
4060 {
4061 int n = info[i].total_bytes / PREFETCH_BLOCK;
4062 if (n > ahead)
4063 n = ahead;
4064 info[i].prefetch_before_loop = n;
4065 num_prefetches_before += n;
4066 if (info[i].write)
4067 num_write_prefetches_before += n;
4068 }
4069
4070 if (loop_dump_stream)
4071 {
4072 if (info[i].prefetch_in_loop == 0
4073 && info[i].prefetch_before_loop == 0)
4074 continue;
4075 fprintf (loop_dump_stream, "Prefetch insn: %d",
4076 INSN_UID (info[i].giv->insn));
4077 fprintf (loop_dump_stream,
4078 "; in loop: %d; before: %d; %s\n",
4079 info[i].prefetch_in_loop,
4080 info[i].prefetch_before_loop,
4081 info[i].write ? "read/write" : "read only");
4082 fprintf (loop_dump_stream,
4083 " density: %d%%; bytes_accessed: %u; total_bytes: %u\n",
4084 (int) (info[i].bytes_accessed * 100 / info[i].stride),
4085 info[i].bytes_accessed, info[i].total_bytes);
4086 fprintf (loop_dump_stream, " index: ");
4087 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, info[i].index);
4088 fprintf (loop_dump_stream, "; stride: ");
4089 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, info[i].stride);
4090 fprintf (loop_dump_stream, "; address: ");
4091 print_rtl (loop_dump_stream, info[i].base_address);
4092 fprintf (loop_dump_stream, "\n");
4093 }
4094 }
4095
4096 if (num_real_prefetches + num_prefetches_before > 0)
4097 {
4098 /* Record that this loop uses prefetch instructions. */
4099 LOOP_INFO (loop)->has_prefetch = 1;
4100
4101 if (loop_dump_stream)
4102 {
4103 fprintf (loop_dump_stream, "Real prefetches needed within loop: %d (write: %d)\n",
4104 num_real_prefetches, num_real_write_prefetches);
4105 fprintf (loop_dump_stream, "Real prefetches needed before loop: %d (write: %d)\n",
4106 num_prefetches_before, num_write_prefetches_before);
4107 }
4108 }
4109
4110 for (i = 0; i < num_prefetches; i++)
4111 {
4112 int y;
4113
4114 for (y = 0; y < info[i].prefetch_in_loop; y++)
4115 {
4116 rtx loc = copy_rtx (*info[i].giv->location);
4117 rtx insn;
4118 int bytes_ahead = PREFETCH_BLOCK * (ahead + y);
4119 rtx before_insn = info[i].giv->insn;
4120 rtx prev_insn = PREV_INSN (info[i].giv->insn);
4121 rtx seq;
4122
4123 /* We can save some effort by offsetting the address on
4124 architectures with offsettable memory references. */
4125 if (offsettable_address_p (0, VOIDmode, loc))
4126 loc = plus_constant (loc, bytes_ahead);
4127 else
4128 {
4129 rtx reg = gen_reg_rtx (Pmode);
4130 loop_iv_add_mult_emit_before (loop, loc, const1_rtx,
4131 GEN_INT (bytes_ahead), reg,
4132 0, before_insn);
4133 loc = reg;
4134 }
4135
4136 start_sequence ();
4137 /* Make sure the address operand is valid for prefetch. */
4138 if (! (*insn_data[(int)CODE_FOR_prefetch].operand[0].predicate)
4139 (loc, insn_data[(int)CODE_FOR_prefetch].operand[0].mode))
4140 loc = force_reg (Pmode, loc);
4141 emit_insn (gen_prefetch (loc, GEN_INT (info[i].write),
4142 GEN_INT (3)));
4143 seq = get_insns ();
4144 end_sequence ();
4145 emit_insn_before (seq, before_insn);
4146
4147 /* Check all insns emitted and record the new GIV
4148 information. */
4149 insn = NEXT_INSN (prev_insn);
4150 while (insn != before_insn)
4151 {
4152 insn = check_insn_for_givs (loop, insn,
4153 info[i].giv->always_executed,
4154 info[i].giv->maybe_multiple);
4155 insn = NEXT_INSN (insn);
4156 }
4157 }
4158
4159 if (PREFETCH_BEFORE_LOOP)
4160 {
4161 /* Emit insns before the loop to fetch the first cache lines or,
4162 if we're not prefetching within the loop, everything we expect
4163 to need. */
4164 for (y = 0; y < info[i].prefetch_before_loop; y++)
4165 {
4166 rtx reg = gen_reg_rtx (Pmode);
4167 rtx loop_start = loop->start;
4168 rtx init_val = info[i].class->initial_value;
4169 rtx add_val = simplify_gen_binary (PLUS, Pmode,
4170 info[i].giv->add_val,
4171 GEN_INT (y * PREFETCH_BLOCK));
4172
4173 /* Functions called by LOOP_IV_ADD_EMIT_BEFORE expect a
4174 non-constant INIT_VAL to have the same mode as REG, which
4175 in this case we know to be Pmode. */
4176 if (GET_MODE (init_val) != Pmode && !CONSTANT_P (init_val))
4177 init_val = convert_to_mode (Pmode, init_val, 0);
4178 loop_iv_add_mult_emit_before (loop, init_val,
4179 info[i].giv->mult_val,
4180 add_val, reg, 0, loop_start);
4181 emit_insn_before (gen_prefetch (reg, GEN_INT (info[i].write),
4182 GEN_INT (3)),
4183 loop_start);
4184 }
4185 }
4186 }
4187
4188 return;
4189 }
4190 \f
4191 /* A "basic induction variable" or biv is a pseudo reg that is set
4192 (within this loop) only by incrementing or decrementing it. */
4193 /* A "general induction variable" or giv is a pseudo reg whose
4194 value is a linear function of a biv. */
4195
4196 /* Bivs are recognized by `basic_induction_var';
4197 Givs by `general_induction_var'. */
4198
4199 /* Communication with routines called via `note_stores'. */
4200
4201 static rtx note_insn;
4202
4203 /* Dummy register to have non-zero DEST_REG for DEST_ADDR type givs. */
4204
4205 static rtx addr_placeholder;
4206
4207 /* ??? Unfinished optimizations, and possible future optimizations,
4208 for the strength reduction code. */
4209
4210 /* ??? The interaction of biv elimination, and recognition of 'constant'
4211 bivs, may cause problems. */
4212
4213 /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
4214 performance problems.
4215
4216 Perhaps don't eliminate things that can be combined with an addressing
4217 mode. Find all givs that have the same biv, mult_val, and add_val;
4218 then for each giv, check to see if its only use dies in a following
4219 memory address. If so, generate a new memory address and check to see
4220 if it is valid. If it is valid, then store the modified memory address,
4221 otherwise, mark the giv as not done so that it will get its own iv. */
4222
4223 /* ??? Could try to optimize branches when it is known that a biv is always
4224 positive. */
4225
4226 /* ??? When replace a biv in a compare insn, we should replace with closest
4227 giv so that an optimized branch can still be recognized by the combiner,
4228 e.g. the VAX acb insn. */
4229
4230 /* ??? Many of the checks involving uid_luid could be simplified if regscan
4231 was rerun in loop_optimize whenever a register was added or moved.
4232 Also, some of the optimizations could be a little less conservative. */
4233 \f
4234 /* Scan the loop body and call FNCALL for each insn. In the addition to the
4235 LOOP and INSN parameters pass MAYBE_MULTIPLE and NOT_EVERY_ITERATION to the
4236 callback.
4237
4238 NOT_EVERY_ITERATION is 1 if current insn is not known to be executed at
4239 least once for every loop iteration except for the last one.
4240
4241 MAYBE_MULTIPLE is 1 if current insn may be executed more than once for every
4242 loop iteration.
4243 */
4244 void
4245 for_each_insn_in_loop (loop, fncall)
4246 struct loop *loop;
4247 loop_insn_callback fncall;
4248 {
4249 int not_every_iteration = 0;
4250 int maybe_multiple = 0;
4251 int past_loop_latch = 0;
4252 int loop_depth = 0;
4253 rtx p;
4254
4255 /* If loop_scan_start points to the loop exit test, we have to be wary of
4256 subversive use of gotos inside expression statements. */
4257 if (prev_nonnote_insn (loop->scan_start) != prev_nonnote_insn (loop->start))
4258 maybe_multiple = back_branch_in_range_p (loop, loop->scan_start);
4259
4260 /* Scan through loop and update NOT_EVERY_ITERATION and MAYBE_MULTIPLE. */
4261 for (p = next_insn_in_loop (loop, loop->scan_start);
4262 p != NULL_RTX;
4263 p = next_insn_in_loop (loop, p))
4264 {
4265 p = fncall (loop, p, not_every_iteration, maybe_multiple);
4266
4267 /* Past CODE_LABEL, we get to insns that may be executed multiple
4268 times. The only way we can be sure that they can't is if every
4269 jump insn between here and the end of the loop either
4270 returns, exits the loop, is a jump to a location that is still
4271 behind the label, or is a jump to the loop start. */
4272
4273 if (GET_CODE (p) == CODE_LABEL)
4274 {
4275 rtx insn = p;
4276
4277 maybe_multiple = 0;
4278
4279 while (1)
4280 {
4281 insn = NEXT_INSN (insn);
4282 if (insn == loop->scan_start)
4283 break;
4284 if (insn == loop->end)
4285 {
4286 if (loop->top != 0)
4287 insn = loop->top;
4288 else
4289 break;
4290 if (insn == loop->scan_start)
4291 break;
4292 }
4293
4294 if (GET_CODE (insn) == JUMP_INSN
4295 && GET_CODE (PATTERN (insn)) != RETURN
4296 && (!any_condjump_p (insn)
4297 || (JUMP_LABEL (insn) != 0
4298 && JUMP_LABEL (insn) != loop->scan_start
4299 && !loop_insn_first_p (p, JUMP_LABEL (insn)))))
4300 {
4301 maybe_multiple = 1;
4302 break;
4303 }
4304 }
4305 }
4306
4307 /* Past a jump, we get to insns for which we can't count
4308 on whether they will be executed during each iteration. */
4309 /* This code appears twice in strength_reduce. There is also similar
4310 code in scan_loop. */
4311 if (GET_CODE (p) == JUMP_INSN
4312 /* If we enter the loop in the middle, and scan around to the
4313 beginning, don't set not_every_iteration for that.
4314 This can be any kind of jump, since we want to know if insns
4315 will be executed if the loop is executed. */
4316 && !(JUMP_LABEL (p) == loop->top
4317 && ((NEXT_INSN (NEXT_INSN (p)) == loop->end
4318 && any_uncondjump_p (p))
4319 || (NEXT_INSN (p) == loop->end && any_condjump_p (p)))))
4320 {
4321 rtx label = 0;
4322
4323 /* If this is a jump outside the loop, then it also doesn't
4324 matter. Check to see if the target of this branch is on the
4325 loop->exits_labels list. */
4326
4327 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
4328 if (XEXP (label, 0) == JUMP_LABEL (p))
4329 break;
4330
4331 if (!label)
4332 not_every_iteration = 1;
4333 }
4334
4335 else if (GET_CODE (p) == NOTE)
4336 {
4337 /* At the virtual top of a converted loop, insns are again known to
4338 be executed each iteration: logically, the loop begins here
4339 even though the exit code has been duplicated.
4340
4341 Insns are also again known to be executed each iteration at
4342 the LOOP_CONT note. */
4343 if ((NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP
4344 || NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_CONT)
4345 && loop_depth == 0)
4346 not_every_iteration = 0;
4347 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
4348 loop_depth++;
4349 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
4350 loop_depth--;
4351 }
4352
4353 /* Note if we pass a loop latch. If we do, then we can not clear
4354 NOT_EVERY_ITERATION below when we pass the last CODE_LABEL in
4355 a loop since a jump before the last CODE_LABEL may have started
4356 a new loop iteration.
4357
4358 Note that LOOP_TOP is only set for rotated loops and we need
4359 this check for all loops, so compare against the CODE_LABEL
4360 which immediately follows LOOP_START. */
4361 if (GET_CODE (p) == JUMP_INSN
4362 && JUMP_LABEL (p) == NEXT_INSN (loop->start))
4363 past_loop_latch = 1;
4364
4365 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
4366 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
4367 or not an insn is known to be executed each iteration of the
4368 loop, whether or not any iterations are known to occur.
4369
4370 Therefore, if we have just passed a label and have no more labels
4371 between here and the test insn of the loop, and we have not passed
4372 a jump to the top of the loop, then we know these insns will be
4373 executed each iteration. */
4374
4375 if (not_every_iteration
4376 && !past_loop_latch
4377 && GET_CODE (p) == CODE_LABEL
4378 && no_labels_between_p (p, loop->end)
4379 && loop_insn_first_p (p, loop->cont))
4380 not_every_iteration = 0;
4381 }
4382 }
4383 \f
4384 static void
4385 loop_bivs_find (loop)
4386 struct loop *loop;
4387 {
4388 struct loop_regs *regs = LOOP_REGS (loop);
4389 struct loop_ivs *ivs = LOOP_IVS (loop);
4390 /* Temporary list pointers for traversing ivs->list. */
4391 struct iv_class *bl, **backbl;
4392
4393 ivs->list = 0;
4394
4395 for_each_insn_in_loop (loop, check_insn_for_bivs);
4396
4397 /* Scan ivs->list to remove all regs that proved not to be bivs.
4398 Make a sanity check against regs->n_times_set. */
4399 for (backbl = &ivs->list, bl = *backbl; bl; bl = bl->next)
4400 {
4401 if (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4402 /* Above happens if register modified by subreg, etc. */
4403 /* Make sure it is not recognized as a basic induction var: */
4404 || regs->array[bl->regno].n_times_set != bl->biv_count
4405 /* If never incremented, it is invariant that we decided not to
4406 move. So leave it alone. */
4407 || ! bl->incremented)
4408 {
4409 if (loop_dump_stream)
4410 fprintf (loop_dump_stream, "Biv %d: discarded, %s\n",
4411 bl->regno,
4412 (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
4413 ? "not induction variable"
4414 : (! bl->incremented ? "never incremented"
4415 : "count error")));
4416
4417 REG_IV_TYPE (ivs, bl->regno) = NOT_BASIC_INDUCT;
4418 *backbl = bl->next;
4419 }
4420 else
4421 {
4422 backbl = &bl->next;
4423
4424 if (loop_dump_stream)
4425 fprintf (loop_dump_stream, "Biv %d: verified\n", bl->regno);
4426 }
4427 }
4428 }
4429
4430
4431 /* Determine how BIVS are initialised by looking through pre-header
4432 extended basic block. */
4433 static void
4434 loop_bivs_init_find (loop)
4435 struct loop *loop;
4436 {
4437 struct loop_ivs *ivs = LOOP_IVS (loop);
4438 /* Temporary list pointers for traversing ivs->list. */
4439 struct iv_class *bl;
4440 int call_seen;
4441 rtx p;
4442
4443 /* Find initial value for each biv by searching backwards from loop_start,
4444 halting at first label. Also record any test condition. */
4445
4446 call_seen = 0;
4447 for (p = loop->start; p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p))
4448 {
4449 rtx test;
4450
4451 note_insn = p;
4452
4453 if (GET_CODE (p) == CALL_INSN)
4454 call_seen = 1;
4455
4456 if (INSN_P (p))
4457 note_stores (PATTERN (p), record_initial, ivs);
4458
4459 /* Record any test of a biv that branches around the loop if no store
4460 between it and the start of loop. We only care about tests with
4461 constants and registers and only certain of those. */
4462 if (GET_CODE (p) == JUMP_INSN
4463 && JUMP_LABEL (p) != 0
4464 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop->end)
4465 && (test = get_condition_for_loop (loop, p)) != 0
4466 && GET_CODE (XEXP (test, 0)) == REG
4467 && REGNO (XEXP (test, 0)) < max_reg_before_loop
4468 && (bl = REG_IV_CLASS (ivs, REGNO (XEXP (test, 0)))) != 0
4469 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop->start)
4470 && bl->init_insn == 0)
4471 {
4472 /* If an NE test, we have an initial value! */
4473 if (GET_CODE (test) == NE)
4474 {
4475 bl->init_insn = p;
4476 bl->init_set = gen_rtx_SET (VOIDmode,
4477 XEXP (test, 0), XEXP (test, 1));
4478 }
4479 else
4480 bl->initial_test = test;
4481 }
4482 }
4483 }
4484
4485
4486 /* Look at the each biv and see if we can say anything better about its
4487 initial value from any initializing insns set up above. (This is done
4488 in two passes to avoid missing SETs in a PARALLEL.) */
4489 static void
4490 loop_bivs_check (loop)
4491 struct loop *loop;
4492 {
4493 struct loop_ivs *ivs = LOOP_IVS (loop);
4494 /* Temporary list pointers for traversing ivs->list. */
4495 struct iv_class *bl;
4496 struct iv_class **backbl;
4497
4498 for (backbl = &ivs->list; (bl = *backbl); backbl = &bl->next)
4499 {
4500 rtx src;
4501 rtx note;
4502
4503 if (! bl->init_insn)
4504 continue;
4505
4506 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
4507 is a constant, use the value of that. */
4508 if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL
4509 && CONSTANT_P (XEXP (note, 0)))
4510 || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL
4511 && CONSTANT_P (XEXP (note, 0))))
4512 src = XEXP (note, 0);
4513 else
4514 src = SET_SRC (bl->init_set);
4515
4516 if (loop_dump_stream)
4517 fprintf (loop_dump_stream,
4518 "Biv %d: initialized at insn %d: initial value ",
4519 bl->regno, INSN_UID (bl->init_insn));
4520
4521 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
4522 || GET_MODE (src) == VOIDmode)
4523 && valid_initial_value_p (src, bl->init_insn,
4524 LOOP_INFO (loop)->pre_header_has_call,
4525 loop->start))
4526 {
4527 bl->initial_value = src;
4528
4529 if (loop_dump_stream)
4530 {
4531 print_simple_rtl (loop_dump_stream, src);
4532 fputc ('\n', loop_dump_stream);
4533 }
4534 }
4535 /* If we can't make it a giv,
4536 let biv keep initial value of "itself". */
4537 else if (loop_dump_stream)
4538 fprintf (loop_dump_stream, "is complex\n");
4539 }
4540 }
4541
4542
4543 /* Search the loop for general induction variables. */
4544
4545 static void
4546 loop_givs_find (loop)
4547 struct loop* loop;
4548 {
4549 for_each_insn_in_loop (loop, check_insn_for_givs);
4550 }
4551
4552
4553 /* For each giv for which we still don't know whether or not it is
4554 replaceable, check to see if it is replaceable because its final value
4555 can be calculated. */
4556
4557 static void
4558 loop_givs_check (loop)
4559 struct loop *loop;
4560 {
4561 struct loop_ivs *ivs = LOOP_IVS (loop);
4562 struct iv_class *bl;
4563
4564 for (bl = ivs->list; bl; bl = bl->next)
4565 {
4566 struct induction *v;
4567
4568 for (v = bl->giv; v; v = v->next_iv)
4569 if (! v->replaceable && ! v->not_replaceable)
4570 check_final_value (loop, v);
4571 }
4572 }
4573
4574
4575 /* Return non-zero if it is possible to eliminate the biv BL provided
4576 all givs are reduced. This is possible if either the reg is not
4577 used outside the loop, or we can compute what its final value will
4578 be. */
4579
4580 static int
4581 loop_biv_eliminable_p (loop, bl, threshold, insn_count)
4582 struct loop *loop;
4583 struct iv_class *bl;
4584 int threshold;
4585 int insn_count;
4586 {
4587 /* For architectures with a decrement_and_branch_until_zero insn,
4588 don't do this if we put a REG_NONNEG note on the endtest for this
4589 biv. */
4590
4591 #ifdef HAVE_decrement_and_branch_until_zero
4592 if (bl->nonneg)
4593 {
4594 if (loop_dump_stream)
4595 fprintf (loop_dump_stream,
4596 "Cannot eliminate nonneg biv %d.\n", bl->regno);
4597 return 0;
4598 }
4599 #endif
4600
4601 /* Check that biv is used outside loop or if it has a final value.
4602 Compare against bl->init_insn rather than loop->start. We aren't
4603 concerned with any uses of the biv between init_insn and
4604 loop->start since these won't be affected by the value of the biv
4605 elsewhere in the function, so long as init_insn doesn't use the
4606 biv itself. */
4607
4608 if ((REGNO_LAST_LUID (bl->regno) < INSN_LUID (loop->end)
4609 && bl->init_insn
4610 && INSN_UID (bl->init_insn) < max_uid_for_loop
4611 && REGNO_FIRST_LUID (bl->regno) >= INSN_LUID (bl->init_insn)
4612 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
4613 || (bl->final_value = final_biv_value (loop, bl)))
4614 return maybe_eliminate_biv (loop, bl, 0, threshold, insn_count);
4615
4616 if (loop_dump_stream)
4617 {
4618 fprintf (loop_dump_stream,
4619 "Cannot eliminate biv %d.\n",
4620 bl->regno);
4621 fprintf (loop_dump_stream,
4622 "First use: insn %d, last use: insn %d.\n",
4623 REGNO_FIRST_UID (bl->regno),
4624 REGNO_LAST_UID (bl->regno));
4625 }
4626 return 0;
4627 }
4628
4629
4630 /* Reduce each giv of BL that we have decided to reduce. */
4631
4632 static void
4633 loop_givs_reduce (loop, bl)
4634 struct loop *loop;
4635 struct iv_class *bl;
4636 {
4637 struct induction *v;
4638
4639 for (v = bl->giv; v; v = v->next_iv)
4640 {
4641 struct induction *tv;
4642 if (! v->ignore && v->same == 0)
4643 {
4644 int auto_inc_opt = 0;
4645
4646 /* If the code for derived givs immediately below has already
4647 allocated a new_reg, we must keep it. */
4648 if (! v->new_reg)
4649 v->new_reg = gen_reg_rtx (v->mode);
4650
4651 #ifdef AUTO_INC_DEC
4652 /* If the target has auto-increment addressing modes, and
4653 this is an address giv, then try to put the increment
4654 immediately after its use, so that flow can create an
4655 auto-increment addressing mode. */
4656 if (v->giv_type == DEST_ADDR && bl->biv_count == 1
4657 && bl->biv->always_executed && ! bl->biv->maybe_multiple
4658 /* We don't handle reversed biv's because bl->biv->insn
4659 does not have a valid INSN_LUID. */
4660 && ! bl->reversed
4661 && v->always_executed && ! v->maybe_multiple
4662 && INSN_UID (v->insn) < max_uid_for_loop)
4663 {
4664 /* If other giv's have been combined with this one, then
4665 this will work only if all uses of the other giv's occur
4666 before this giv's insn. This is difficult to check.
4667
4668 We simplify this by looking for the common case where
4669 there is one DEST_REG giv, and this giv's insn is the
4670 last use of the dest_reg of that DEST_REG giv. If the
4671 increment occurs after the address giv, then we can
4672 perform the optimization. (Otherwise, the increment
4673 would have to go before other_giv, and we would not be
4674 able to combine it with the address giv to get an
4675 auto-inc address.) */
4676 if (v->combined_with)
4677 {
4678 struct induction *other_giv = 0;
4679
4680 for (tv = bl->giv; tv; tv = tv->next_iv)
4681 if (tv->same == v)
4682 {
4683 if (other_giv)
4684 break;
4685 else
4686 other_giv = tv;
4687 }
4688 if (! tv && other_giv
4689 && REGNO (other_giv->dest_reg) < max_reg_before_loop
4690 && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
4691 == INSN_UID (v->insn))
4692 && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
4693 auto_inc_opt = 1;
4694 }
4695 /* Check for case where increment is before the address
4696 giv. Do this test in "loop order". */
4697 else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn)
4698 && (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
4699 || (INSN_LUID (bl->biv->insn)
4700 > INSN_LUID (loop->scan_start))))
4701 || (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
4702 && (INSN_LUID (loop->scan_start)
4703 < INSN_LUID (bl->biv->insn))))
4704 auto_inc_opt = -1;
4705 else
4706 auto_inc_opt = 1;
4707
4708 #ifdef HAVE_cc0
4709 {
4710 rtx prev;
4711
4712 /* We can't put an insn immediately after one setting
4713 cc0, or immediately before one using cc0. */
4714 if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
4715 || (auto_inc_opt == -1
4716 && (prev = prev_nonnote_insn (v->insn)) != 0
4717 && INSN_P (prev)
4718 && sets_cc0_p (PATTERN (prev))))
4719 auto_inc_opt = 0;
4720 }
4721 #endif
4722
4723 if (auto_inc_opt)
4724 v->auto_inc_opt = 1;
4725 }
4726 #endif
4727
4728 /* For each place where the biv is incremented, add an insn
4729 to increment the new, reduced reg for the giv. */
4730 for (tv = bl->biv; tv; tv = tv->next_iv)
4731 {
4732 rtx insert_before;
4733
4734 if (! auto_inc_opt)
4735 insert_before = tv->insn;
4736 else if (auto_inc_opt == 1)
4737 insert_before = NEXT_INSN (v->insn);
4738 else
4739 insert_before = v->insn;
4740
4741 if (tv->mult_val == const1_rtx)
4742 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
4743 v->new_reg, v->new_reg,
4744 0, insert_before);
4745 else /* tv->mult_val == const0_rtx */
4746 /* A multiply is acceptable here
4747 since this is presumed to be seldom executed. */
4748 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
4749 v->add_val, v->new_reg,
4750 0, insert_before);
4751 }
4752
4753 /* Add code at loop start to initialize giv's reduced reg. */
4754
4755 loop_iv_add_mult_hoist (loop,
4756 extend_value_for_giv (v, bl->initial_value),
4757 v->mult_val, v->add_val, v->new_reg);
4758 }
4759 }
4760 }
4761
4762
4763 /* Check for givs whose first use is their definition and whose
4764 last use is the definition of another giv. If so, it is likely
4765 dead and should not be used to derive another giv nor to
4766 eliminate a biv. */
4767
4768 static void
4769 loop_givs_dead_check (loop, bl)
4770 struct loop *loop ATTRIBUTE_UNUSED;
4771 struct iv_class *bl;
4772 {
4773 struct induction *v;
4774
4775 for (v = bl->giv; v; v = v->next_iv)
4776 {
4777 if (v->ignore
4778 || (v->same && v->same->ignore))
4779 continue;
4780
4781 if (v->giv_type == DEST_REG
4782 && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
4783 {
4784 struct induction *v1;
4785
4786 for (v1 = bl->giv; v1; v1 = v1->next_iv)
4787 if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
4788 v->maybe_dead = 1;
4789 }
4790 }
4791 }
4792
4793
4794 static void
4795 loop_givs_rescan (loop, bl, reg_map)
4796 struct loop *loop;
4797 struct iv_class *bl;
4798 rtx *reg_map;
4799 {
4800 struct induction *v;
4801
4802 for (v = bl->giv; v; v = v->next_iv)
4803 {
4804 if (v->same && v->same->ignore)
4805 v->ignore = 1;
4806
4807 if (v->ignore)
4808 continue;
4809
4810 /* Update expression if this was combined, in case other giv was
4811 replaced. */
4812 if (v->same)
4813 v->new_reg = replace_rtx (v->new_reg,
4814 v->same->dest_reg, v->same->new_reg);
4815
4816 /* See if this register is known to be a pointer to something. If
4817 so, see if we can find the alignment. First see if there is a
4818 destination register that is a pointer. If so, this shares the
4819 alignment too. Next see if we can deduce anything from the
4820 computational information. If not, and this is a DEST_ADDR
4821 giv, at least we know that it's a pointer, though we don't know
4822 the alignment. */
4823 if (GET_CODE (v->new_reg) == REG
4824 && v->giv_type == DEST_REG
4825 && REG_POINTER (v->dest_reg))
4826 mark_reg_pointer (v->new_reg,
4827 REGNO_POINTER_ALIGN (REGNO (v->dest_reg)));
4828 else if (GET_CODE (v->new_reg) == REG
4829 && REG_POINTER (v->src_reg))
4830 {
4831 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->src_reg));
4832
4833 if (align == 0
4834 || GET_CODE (v->add_val) != CONST_INT
4835 || INTVAL (v->add_val) % (align / BITS_PER_UNIT) != 0)
4836 align = 0;
4837
4838 mark_reg_pointer (v->new_reg, align);
4839 }
4840 else if (GET_CODE (v->new_reg) == REG
4841 && GET_CODE (v->add_val) == REG
4842 && REG_POINTER (v->add_val))
4843 {
4844 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->add_val));
4845
4846 if (align == 0 || GET_CODE (v->mult_val) != CONST_INT
4847 || INTVAL (v->mult_val) % (align / BITS_PER_UNIT) != 0)
4848 align = 0;
4849
4850 mark_reg_pointer (v->new_reg, align);
4851 }
4852 else if (GET_CODE (v->new_reg) == REG && v->giv_type == DEST_ADDR)
4853 mark_reg_pointer (v->new_reg, 0);
4854
4855 if (v->giv_type == DEST_ADDR)
4856 /* Store reduced reg as the address in the memref where we found
4857 this giv. */
4858 validate_change (v->insn, v->location, v->new_reg, 0);
4859 else if (v->replaceable)
4860 {
4861 reg_map[REGNO (v->dest_reg)] = v->new_reg;
4862 }
4863 else
4864 {
4865 rtx original_insn = v->insn;
4866 rtx note;
4867
4868 /* Not replaceable; emit an insn to set the original giv reg from
4869 the reduced giv, same as above. */
4870 v->insn = loop_insn_emit_after (loop, 0, original_insn,
4871 gen_move_insn (v->dest_reg,
4872 v->new_reg));
4873
4874 /* The original insn may have a REG_EQUAL note. This note is
4875 now incorrect and may result in invalid substitutions later.
4876 The original insn is dead, but may be part of a libcall
4877 sequence, which doesn't seem worth the bother of handling. */
4878 note = find_reg_note (original_insn, REG_EQUAL, NULL_RTX);
4879 if (note)
4880 remove_note (original_insn, note);
4881 }
4882
4883 /* When a loop is reversed, givs which depend on the reversed
4884 biv, and which are live outside the loop, must be set to their
4885 correct final value. This insn is only needed if the giv is
4886 not replaceable. The correct final value is the same as the
4887 value that the giv starts the reversed loop with. */
4888 if (bl->reversed && ! v->replaceable)
4889 loop_iv_add_mult_sink (loop,
4890 extend_value_for_giv (v, bl->initial_value),
4891 v->mult_val, v->add_val, v->dest_reg);
4892 else if (v->final_value)
4893 loop_insn_sink_or_swim (loop,
4894 gen_load_of_final_value (v->dest_reg,
4895 v->final_value));
4896
4897 if (loop_dump_stream)
4898 {
4899 fprintf (loop_dump_stream, "giv at %d reduced to ",
4900 INSN_UID (v->insn));
4901 print_simple_rtl (loop_dump_stream, v->new_reg);
4902 fprintf (loop_dump_stream, "\n");
4903 }
4904 }
4905 }
4906
4907
4908 static int
4909 loop_giv_reduce_benefit (loop, bl, v, test_reg)
4910 struct loop *loop ATTRIBUTE_UNUSED;
4911 struct iv_class *bl;
4912 struct induction *v;
4913 rtx test_reg;
4914 {
4915 int add_cost;
4916 int benefit;
4917
4918 benefit = v->benefit;
4919 PUT_MODE (test_reg, v->mode);
4920 add_cost = iv_add_mult_cost (bl->biv->add_val, v->mult_val,
4921 test_reg, test_reg);
4922
4923 /* Reduce benefit if not replaceable, since we will insert a
4924 move-insn to replace the insn that calculates this giv. Don't do
4925 this unless the giv is a user variable, since it will often be
4926 marked non-replaceable because of the duplication of the exit
4927 code outside the loop. In such a case, the copies we insert are
4928 dead and will be deleted. So they don't have a cost. Similar
4929 situations exist. */
4930 /* ??? The new final_[bg]iv_value code does a much better job of
4931 finding replaceable giv's, and hence this code may no longer be
4932 necessary. */
4933 if (! v->replaceable && ! bl->eliminable
4934 && REG_USERVAR_P (v->dest_reg))
4935 benefit -= copy_cost;
4936
4937 /* Decrease the benefit to count the add-insns that we will insert
4938 to increment the reduced reg for the giv. ??? This can
4939 overestimate the run-time cost of the additional insns, e.g. if
4940 there are multiple basic blocks that increment the biv, but only
4941 one of these blocks is executed during each iteration. There is
4942 no good way to detect cases like this with the current structure
4943 of the loop optimizer. This code is more accurate for
4944 determining code size than run-time benefits. */
4945 benefit -= add_cost * bl->biv_count;
4946
4947 /* Decide whether to strength-reduce this giv or to leave the code
4948 unchanged (recompute it from the biv each time it is used). This
4949 decision can be made independently for each giv. */
4950
4951 #ifdef AUTO_INC_DEC
4952 /* Attempt to guess whether autoincrement will handle some of the
4953 new add insns; if so, increase BENEFIT (undo the subtraction of
4954 add_cost that was done above). */
4955 if (v->giv_type == DEST_ADDR
4956 /* Increasing the benefit is risky, since this is only a guess.
4957 Avoid increasing register pressure in cases where there would
4958 be no other benefit from reducing this giv. */
4959 && benefit > 0
4960 && GET_CODE (v->mult_val) == CONST_INT)
4961 {
4962 int size = GET_MODE_SIZE (GET_MODE (v->mem));
4963
4964 if (HAVE_POST_INCREMENT
4965 && INTVAL (v->mult_val) == size)
4966 benefit += add_cost * bl->biv_count;
4967 else if (HAVE_PRE_INCREMENT
4968 && INTVAL (v->mult_val) == size)
4969 benefit += add_cost * bl->biv_count;
4970 else if (HAVE_POST_DECREMENT
4971 && -INTVAL (v->mult_val) == size)
4972 benefit += add_cost * bl->biv_count;
4973 else if (HAVE_PRE_DECREMENT
4974 && -INTVAL (v->mult_val) == size)
4975 benefit += add_cost * bl->biv_count;
4976 }
4977 #endif
4978
4979 return benefit;
4980 }
4981
4982
4983 /* Free IV structures for LOOP. */
4984
4985 static void
4986 loop_ivs_free (loop)
4987 struct loop *loop;
4988 {
4989 struct loop_ivs *ivs = LOOP_IVS (loop);
4990 struct iv_class *iv = ivs->list;
4991
4992 free (ivs->regs);
4993
4994 while (iv)
4995 {
4996 struct iv_class *next = iv->next;
4997 struct induction *induction;
4998 struct induction *next_induction;
4999
5000 for (induction = iv->biv; induction; induction = next_induction)
5001 {
5002 next_induction = induction->next_iv;
5003 free (induction);
5004 }
5005 for (induction = iv->giv; induction; induction = next_induction)
5006 {
5007 next_induction = induction->next_iv;
5008 free (induction);
5009 }
5010
5011 free (iv);
5012 iv = next;
5013 }
5014 }
5015
5016
5017 /* Perform strength reduction and induction variable elimination.
5018
5019 Pseudo registers created during this function will be beyond the
5020 last valid index in several tables including
5021 REGS->ARRAY[I].N_TIMES_SET and REGNO_LAST_UID. This does not cause a
5022 problem here, because the added registers cannot be givs outside of
5023 their loop, and hence will never be reconsidered. But scan_loop
5024 must check regnos to make sure they are in bounds. */
5025
5026 static void
5027 strength_reduce (loop, flags)
5028 struct loop *loop;
5029 int flags;
5030 {
5031 struct loop_info *loop_info = LOOP_INFO (loop);
5032 struct loop_regs *regs = LOOP_REGS (loop);
5033 struct loop_ivs *ivs = LOOP_IVS (loop);
5034 rtx p;
5035 /* Temporary list pointer for traversing ivs->list. */
5036 struct iv_class *bl;
5037 /* Ratio of extra register life span we can justify
5038 for saving an instruction. More if loop doesn't call subroutines
5039 since in that case saving an insn makes more difference
5040 and more registers are available. */
5041 /* ??? could set this to last value of threshold in move_movables */
5042 int threshold = (loop_info->has_call ? 1 : 2) * (3 + n_non_fixed_regs);
5043 /* Map of pseudo-register replacements. */
5044 rtx *reg_map = NULL;
5045 int reg_map_size;
5046 int unrolled_insn_copies = 0;
5047 rtx test_reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
5048 int insn_count = count_insns_in_loop (loop);
5049
5050 addr_placeholder = gen_reg_rtx (Pmode);
5051
5052 ivs->n_regs = max_reg_before_loop;
5053 ivs->regs = (struct iv *) xcalloc (ivs->n_regs, sizeof (struct iv));
5054
5055 /* Find all BIVs in loop. */
5056 loop_bivs_find (loop);
5057
5058 /* Exit if there are no bivs. */
5059 if (! ivs->list)
5060 {
5061 /* Can still unroll the loop anyways, but indicate that there is no
5062 strength reduction info available. */
5063 if (flags & LOOP_UNROLL)
5064 unroll_loop (loop, insn_count, 0);
5065
5066 loop_ivs_free (loop);
5067 return;
5068 }
5069
5070 /* Determine how BIVS are initialised by looking through pre-header
5071 extended basic block. */
5072 loop_bivs_init_find (loop);
5073
5074 /* Look at the each biv and see if we can say anything better about its
5075 initial value from any initializing insns set up above. */
5076 loop_bivs_check (loop);
5077
5078 /* Search the loop for general induction variables. */
5079 loop_givs_find (loop);
5080
5081 /* Try to calculate and save the number of loop iterations. This is
5082 set to zero if the actual number can not be calculated. This must
5083 be called after all giv's have been identified, since otherwise it may
5084 fail if the iteration variable is a giv. */
5085 loop_iterations (loop);
5086
5087 #ifdef HAVE_prefetch
5088 if (flags & LOOP_PREFETCH)
5089 emit_prefetch_instructions (loop);
5090 #endif
5091
5092 /* Now for each giv for which we still don't know whether or not it is
5093 replaceable, check to see if it is replaceable because its final value
5094 can be calculated. This must be done after loop_iterations is called,
5095 so that final_giv_value will work correctly. */
5096 loop_givs_check (loop);
5097
5098 /* Try to prove that the loop counter variable (if any) is always
5099 nonnegative; if so, record that fact with a REG_NONNEG note
5100 so that "decrement and branch until zero" insn can be used. */
5101 check_dbra_loop (loop, insn_count);
5102
5103 /* Create reg_map to hold substitutions for replaceable giv regs.
5104 Some givs might have been made from biv increments, so look at
5105 ivs->reg_iv_type for a suitable size. */
5106 reg_map_size = ivs->n_regs;
5107 reg_map = (rtx *) xcalloc (reg_map_size, sizeof (rtx));
5108
5109 /* Examine each iv class for feasibility of strength reduction/induction
5110 variable elimination. */
5111
5112 for (bl = ivs->list; bl; bl = bl->next)
5113 {
5114 struct induction *v;
5115 int benefit;
5116
5117 /* Test whether it will be possible to eliminate this biv
5118 provided all givs are reduced. */
5119 bl->eliminable = loop_biv_eliminable_p (loop, bl, threshold, insn_count);
5120
5121 /* This will be true at the end, if all givs which depend on this
5122 biv have been strength reduced.
5123 We can't (currently) eliminate the biv unless this is so. */
5124 bl->all_reduced = 1;
5125
5126 /* Check each extension dependent giv in this class to see if its
5127 root biv is safe from wrapping in the interior mode. */
5128 check_ext_dependent_givs (bl, loop_info);
5129
5130 /* Combine all giv's for this iv_class. */
5131 combine_givs (regs, bl);
5132
5133 for (v = bl->giv; v; v = v->next_iv)
5134 {
5135 struct induction *tv;
5136
5137 if (v->ignore || v->same)
5138 continue;
5139
5140 benefit = loop_giv_reduce_benefit (loop, bl, v, test_reg);
5141
5142 /* If an insn is not to be strength reduced, then set its ignore
5143 flag, and clear bl->all_reduced. */
5144
5145 /* A giv that depends on a reversed biv must be reduced if it is
5146 used after the loop exit, otherwise, it would have the wrong
5147 value after the loop exit. To make it simple, just reduce all
5148 of such giv's whether or not we know they are used after the loop
5149 exit. */
5150
5151 if (! flag_reduce_all_givs
5152 && v->lifetime * threshold * benefit < insn_count
5153 && ! bl->reversed)
5154 {
5155 if (loop_dump_stream)
5156 fprintf (loop_dump_stream,
5157 "giv of insn %d not worth while, %d vs %d.\n",
5158 INSN_UID (v->insn),
5159 v->lifetime * threshold * benefit, insn_count);
5160 v->ignore = 1;
5161 bl->all_reduced = 0;
5162 }
5163 else
5164 {
5165 /* Check that we can increment the reduced giv without a
5166 multiply insn. If not, reject it. */
5167
5168 for (tv = bl->biv; tv; tv = tv->next_iv)
5169 if (tv->mult_val == const1_rtx
5170 && ! product_cheap_p (tv->add_val, v->mult_val))
5171 {
5172 if (loop_dump_stream)
5173 fprintf (loop_dump_stream,
5174 "giv of insn %d: would need a multiply.\n",
5175 INSN_UID (v->insn));
5176 v->ignore = 1;
5177 bl->all_reduced = 0;
5178 break;
5179 }
5180 }
5181 }
5182
5183 /* Check for givs whose first use is their definition and whose
5184 last use is the definition of another giv. If so, it is likely
5185 dead and should not be used to derive another giv nor to
5186 eliminate a biv. */
5187 loop_givs_dead_check (loop, bl);
5188
5189 /* Reduce each giv that we decided to reduce. */
5190 loop_givs_reduce (loop, bl);
5191
5192 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
5193 as not reduced.
5194
5195 For each giv register that can be reduced now: if replaceable,
5196 substitute reduced reg wherever the old giv occurs;
5197 else add new move insn "giv_reg = reduced_reg". */
5198 loop_givs_rescan (loop, bl, reg_map);
5199
5200 /* All the givs based on the biv bl have been reduced if they
5201 merit it. */
5202
5203 /* For each giv not marked as maybe dead that has been combined with a
5204 second giv, clear any "maybe dead" mark on that second giv.
5205 v->new_reg will either be or refer to the register of the giv it
5206 combined with.
5207
5208 Doing this clearing avoids problems in biv elimination where
5209 a giv's new_reg is a complex value that can't be put in the
5210 insn but the giv combined with (with a reg as new_reg) is
5211 marked maybe_dead. Since the register will be used in either
5212 case, we'd prefer it be used from the simpler giv. */
5213
5214 for (v = bl->giv; v; v = v->next_iv)
5215 if (! v->maybe_dead && v->same)
5216 v->same->maybe_dead = 0;
5217
5218 /* Try to eliminate the biv, if it is a candidate.
5219 This won't work if ! bl->all_reduced,
5220 since the givs we planned to use might not have been reduced.
5221
5222 We have to be careful that we didn't initially think we could
5223 eliminate this biv because of a giv that we now think may be
5224 dead and shouldn't be used as a biv replacement.
5225
5226 Also, there is the possibility that we may have a giv that looks
5227 like it can be used to eliminate a biv, but the resulting insn
5228 isn't valid. This can happen, for example, on the 88k, where a
5229 JUMP_INSN can compare a register only with zero. Attempts to
5230 replace it with a compare with a constant will fail.
5231
5232 Note that in cases where this call fails, we may have replaced some
5233 of the occurrences of the biv with a giv, but no harm was done in
5234 doing so in the rare cases where it can occur. */
5235
5236 if (bl->all_reduced == 1 && bl->eliminable
5237 && maybe_eliminate_biv (loop, bl, 1, threshold, insn_count))
5238 {
5239 /* ?? If we created a new test to bypass the loop entirely,
5240 or otherwise drop straight in, based on this test, then
5241 we might want to rewrite it also. This way some later
5242 pass has more hope of removing the initialization of this
5243 biv entirely. */
5244
5245 /* If final_value != 0, then the biv may be used after loop end
5246 and we must emit an insn to set it just in case.
5247
5248 Reversed bivs already have an insn after the loop setting their
5249 value, so we don't need another one. We can't calculate the
5250 proper final value for such a biv here anyways. */
5251 if (bl->final_value && ! bl->reversed)
5252 loop_insn_sink_or_swim (loop,
5253 gen_load_of_final_value (bl->biv->dest_reg,
5254 bl->final_value));
5255
5256 if (loop_dump_stream)
5257 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
5258 bl->regno);
5259 }
5260 /* See above note wrt final_value. But since we couldn't eliminate
5261 the biv, we must set the value after the loop instead of before. */
5262 else if (bl->final_value && ! bl->reversed)
5263 loop_insn_sink (loop, gen_load_of_final_value (bl->biv->dest_reg,
5264 bl->final_value));
5265 }
5266
5267 /* Go through all the instructions in the loop, making all the
5268 register substitutions scheduled in REG_MAP. */
5269
5270 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
5271 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5272 || GET_CODE (p) == CALL_INSN)
5273 {
5274 replace_regs (PATTERN (p), reg_map, reg_map_size, 0);
5275 replace_regs (REG_NOTES (p), reg_map, reg_map_size, 0);
5276 INSN_CODE (p) = -1;
5277 }
5278
5279 if (loop_info->n_iterations > 0)
5280 {
5281 /* When we completely unroll a loop we will likely not need the increment
5282 of the loop BIV and we will not need the conditional branch at the
5283 end of the loop. */
5284 unrolled_insn_copies = insn_count - 2;
5285
5286 #ifdef HAVE_cc0
5287 /* When we completely unroll a loop on a HAVE_cc0 machine we will not
5288 need the comparison before the conditional branch at the end of the
5289 loop. */
5290 unrolled_insn_copies -= 1;
5291 #endif
5292
5293 /* We'll need one copy for each loop iteration. */
5294 unrolled_insn_copies *= loop_info->n_iterations;
5295
5296 /* A little slop to account for the ability to remove initialization
5297 code, better CSE, and other secondary benefits of completely
5298 unrolling some loops. */
5299 unrolled_insn_copies -= 1;
5300
5301 /* Clamp the value. */
5302 if (unrolled_insn_copies < 0)
5303 unrolled_insn_copies = 0;
5304 }
5305
5306 /* Unroll loops from within strength reduction so that we can use the
5307 induction variable information that strength_reduce has already
5308 collected. Always unroll loops that would be as small or smaller
5309 unrolled than when rolled. */
5310 if ((flags & LOOP_UNROLL)
5311 || ((flags & LOOP_AUTO_UNROLL)
5312 && loop_info->n_iterations > 0
5313 && unrolled_insn_copies <= insn_count))
5314 unroll_loop (loop, insn_count, 1);
5315
5316 #ifdef HAVE_doloop_end
5317 if (HAVE_doloop_end && (flags & LOOP_BCT) && flag_branch_on_count_reg)
5318 doloop_optimize (loop);
5319 #endif /* HAVE_doloop_end */
5320
5321 /* In case number of iterations is known, drop branch prediction note
5322 in the branch. Do that only in second loop pass, as loop unrolling
5323 may change the number of iterations performed. */
5324 if (flags & LOOP_BCT)
5325 {
5326 unsigned HOST_WIDE_INT n
5327 = loop_info->n_iterations / loop_info->unroll_number;
5328 if (n > 1)
5329 predict_insn (PREV_INSN (loop->end), PRED_LOOP_ITERATIONS,
5330 REG_BR_PROB_BASE - REG_BR_PROB_BASE / n);
5331 }
5332
5333 if (loop_dump_stream)
5334 fprintf (loop_dump_stream, "\n");
5335
5336 loop_ivs_free (loop);
5337 if (reg_map)
5338 free (reg_map);
5339 }
5340 \f
5341 /*Record all basic induction variables calculated in the insn. */
5342 static rtx
5343 check_insn_for_bivs (loop, p, not_every_iteration, maybe_multiple)
5344 struct loop *loop;
5345 rtx p;
5346 int not_every_iteration;
5347 int maybe_multiple;
5348 {
5349 struct loop_ivs *ivs = LOOP_IVS (loop);
5350 rtx set;
5351 rtx dest_reg;
5352 rtx inc_val;
5353 rtx mult_val;
5354 rtx *location;
5355
5356 if (GET_CODE (p) == INSN
5357 && (set = single_set (p))
5358 && GET_CODE (SET_DEST (set)) == REG)
5359 {
5360 dest_reg = SET_DEST (set);
5361 if (REGNO (dest_reg) < max_reg_before_loop
5362 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
5363 && REG_IV_TYPE (ivs, REGNO (dest_reg)) != NOT_BASIC_INDUCT)
5364 {
5365 if (basic_induction_var (loop, SET_SRC (set),
5366 GET_MODE (SET_SRC (set)),
5367 dest_reg, p, &inc_val, &mult_val,
5368 &location))
5369 {
5370 /* It is a possible basic induction variable.
5371 Create and initialize an induction structure for it. */
5372
5373 struct induction *v
5374 = (struct induction *) xmalloc (sizeof (struct induction));
5375
5376 record_biv (loop, v, p, dest_reg, inc_val, mult_val, location,
5377 not_every_iteration, maybe_multiple);
5378 REG_IV_TYPE (ivs, REGNO (dest_reg)) = BASIC_INDUCT;
5379 }
5380 else if (REGNO (dest_reg) < ivs->n_regs)
5381 REG_IV_TYPE (ivs, REGNO (dest_reg)) = NOT_BASIC_INDUCT;
5382 }
5383 }
5384 return p;
5385 }
5386 \f
5387 /* Record all givs calculated in the insn.
5388 A register is a giv if: it is only set once, it is a function of a
5389 biv and a constant (or invariant), and it is not a biv. */
5390 static rtx
5391 check_insn_for_givs (loop, p, not_every_iteration, maybe_multiple)
5392 struct loop *loop;
5393 rtx p;
5394 int not_every_iteration;
5395 int maybe_multiple;
5396 {
5397 struct loop_regs *regs = LOOP_REGS (loop);
5398
5399 rtx set;
5400 /* Look for a general induction variable in a register. */
5401 if (GET_CODE (p) == INSN
5402 && (set = single_set (p))
5403 && GET_CODE (SET_DEST (set)) == REG
5404 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
5405 {
5406 rtx src_reg;
5407 rtx dest_reg;
5408 rtx add_val;
5409 rtx mult_val;
5410 rtx ext_val;
5411 int benefit;
5412 rtx regnote = 0;
5413 rtx last_consec_insn;
5414
5415 dest_reg = SET_DEST (set);
5416 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
5417 return p;
5418
5419 if (/* SET_SRC is a giv. */
5420 (general_induction_var (loop, SET_SRC (set), &src_reg, &add_val,
5421 &mult_val, &ext_val, 0, &benefit, VOIDmode)
5422 /* Equivalent expression is a giv. */
5423 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
5424 && general_induction_var (loop, XEXP (regnote, 0), &src_reg,
5425 &add_val, &mult_val, &ext_val, 0,
5426 &benefit, VOIDmode)))
5427 /* Don't try to handle any regs made by loop optimization.
5428 We have nothing on them in regno_first_uid, etc. */
5429 && REGNO (dest_reg) < max_reg_before_loop
5430 /* Don't recognize a BASIC_INDUCT_VAR here. */
5431 && dest_reg != src_reg
5432 /* This must be the only place where the register is set. */
5433 && (regs->array[REGNO (dest_reg)].n_times_set == 1
5434 /* or all sets must be consecutive and make a giv. */
5435 || (benefit = consec_sets_giv (loop, benefit, p,
5436 src_reg, dest_reg,
5437 &add_val, &mult_val, &ext_val,
5438 &last_consec_insn))))
5439 {
5440 struct induction *v
5441 = (struct induction *) xmalloc (sizeof (struct induction));
5442
5443 /* If this is a library call, increase benefit. */
5444 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
5445 benefit += libcall_benefit (p);
5446
5447 /* Skip the consecutive insns, if there are any. */
5448 if (regs->array[REGNO (dest_reg)].n_times_set != 1)
5449 p = last_consec_insn;
5450
5451 record_giv (loop, v, p, src_reg, dest_reg, mult_val, add_val,
5452 ext_val, benefit, DEST_REG, not_every_iteration,
5453 maybe_multiple, (rtx*) 0);
5454
5455 }
5456 }
5457
5458 #ifndef DONT_REDUCE_ADDR
5459 /* Look for givs which are memory addresses. */
5460 /* This resulted in worse code on a VAX 8600. I wonder if it
5461 still does. */
5462 if (GET_CODE (p) == INSN)
5463 find_mem_givs (loop, PATTERN (p), p, not_every_iteration,
5464 maybe_multiple);
5465 #endif
5466
5467 /* Update the status of whether giv can derive other givs. This can
5468 change when we pass a label or an insn that updates a biv. */
5469 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5470 || GET_CODE (p) == CODE_LABEL)
5471 update_giv_derive (loop, p);
5472 return p;
5473 }
5474 \f
5475 /* Return 1 if X is a valid source for an initial value (or as value being
5476 compared against in an initial test).
5477
5478 X must be either a register or constant and must not be clobbered between
5479 the current insn and the start of the loop.
5480
5481 INSN is the insn containing X. */
5482
5483 static int
5484 valid_initial_value_p (x, insn, call_seen, loop_start)
5485 rtx x;
5486 rtx insn;
5487 int call_seen;
5488 rtx loop_start;
5489 {
5490 if (CONSTANT_P (x))
5491 return 1;
5492
5493 /* Only consider pseudos we know about initialized in insns whose luids
5494 we know. */
5495 if (GET_CODE (x) != REG
5496 || REGNO (x) >= max_reg_before_loop)
5497 return 0;
5498
5499 /* Don't use call-clobbered registers across a call which clobbers it. On
5500 some machines, don't use any hard registers at all. */
5501 if (REGNO (x) < FIRST_PSEUDO_REGISTER
5502 && (SMALL_REGISTER_CLASSES
5503 || (call_used_regs[REGNO (x)] && call_seen)))
5504 return 0;
5505
5506 /* Don't use registers that have been clobbered before the start of the
5507 loop. */
5508 if (reg_set_between_p (x, insn, loop_start))
5509 return 0;
5510
5511 return 1;
5512 }
5513 \f
5514 /* Scan X for memory refs and check each memory address
5515 as a possible giv. INSN is the insn whose pattern X comes from.
5516 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
5517 every loop iteration. MAYBE_MULTIPLE is 1 if the insn might be executed
5518 more thanonce in each loop iteration. */
5519
5520 static void
5521 find_mem_givs (loop, x, insn, not_every_iteration, maybe_multiple)
5522 const struct loop *loop;
5523 rtx x;
5524 rtx insn;
5525 int not_every_iteration, maybe_multiple;
5526 {
5527 int i, j;
5528 enum rtx_code code;
5529 const char *fmt;
5530
5531 if (x == 0)
5532 return;
5533
5534 code = GET_CODE (x);
5535 switch (code)
5536 {
5537 case REG:
5538 case CONST_INT:
5539 case CONST:
5540 case CONST_DOUBLE:
5541 case SYMBOL_REF:
5542 case LABEL_REF:
5543 case PC:
5544 case CC0:
5545 case ADDR_VEC:
5546 case ADDR_DIFF_VEC:
5547 case USE:
5548 case CLOBBER:
5549 return;
5550
5551 case MEM:
5552 {
5553 rtx src_reg;
5554 rtx add_val;
5555 rtx mult_val;
5556 rtx ext_val;
5557 int benefit;
5558
5559 /* This code used to disable creating GIVs with mult_val == 1 and
5560 add_val == 0. However, this leads to lost optimizations when
5561 it comes time to combine a set of related DEST_ADDR GIVs, since
5562 this one would not be seen. */
5563
5564 if (general_induction_var (loop, XEXP (x, 0), &src_reg, &add_val,
5565 &mult_val, &ext_val, 1, &benefit,
5566 GET_MODE (x)))
5567 {
5568 /* Found one; record it. */
5569 struct induction *v
5570 = (struct induction *) xmalloc (sizeof (struct induction));
5571
5572 record_giv (loop, v, insn, src_reg, addr_placeholder, mult_val,
5573 add_val, ext_val, benefit, DEST_ADDR,
5574 not_every_iteration, maybe_multiple, &XEXP (x, 0));
5575
5576 v->mem = x;
5577 }
5578 }
5579 return;
5580
5581 default:
5582 break;
5583 }
5584
5585 /* Recursively scan the subexpressions for other mem refs. */
5586
5587 fmt = GET_RTX_FORMAT (code);
5588 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5589 if (fmt[i] == 'e')
5590 find_mem_givs (loop, XEXP (x, i), insn, not_every_iteration,
5591 maybe_multiple);
5592 else if (fmt[i] == 'E')
5593 for (j = 0; j < XVECLEN (x, i); j++)
5594 find_mem_givs (loop, XVECEXP (x, i, j), insn, not_every_iteration,
5595 maybe_multiple);
5596 }
5597 \f
5598 /* Fill in the data about one biv update.
5599 V is the `struct induction' in which we record the biv. (It is
5600 allocated by the caller, with alloca.)
5601 INSN is the insn that sets it.
5602 DEST_REG is the biv's reg.
5603
5604 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
5605 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
5606 being set to INC_VAL.
5607
5608 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
5609 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
5610 can be executed more than once per iteration. If MAYBE_MULTIPLE
5611 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
5612 executed exactly once per iteration. */
5613
5614 static void
5615 record_biv (loop, v, insn, dest_reg, inc_val, mult_val, location,
5616 not_every_iteration, maybe_multiple)
5617 struct loop *loop;
5618 struct induction *v;
5619 rtx insn;
5620 rtx dest_reg;
5621 rtx inc_val;
5622 rtx mult_val;
5623 rtx *location;
5624 int not_every_iteration;
5625 int maybe_multiple;
5626 {
5627 struct loop_ivs *ivs = LOOP_IVS (loop);
5628 struct iv_class *bl;
5629
5630 v->insn = insn;
5631 v->src_reg = dest_reg;
5632 v->dest_reg = dest_reg;
5633 v->mult_val = mult_val;
5634 v->add_val = inc_val;
5635 v->ext_dependent = NULL_RTX;
5636 v->location = location;
5637 v->mode = GET_MODE (dest_reg);
5638 v->always_computable = ! not_every_iteration;
5639 v->always_executed = ! not_every_iteration;
5640 v->maybe_multiple = maybe_multiple;
5641
5642 /* Add this to the reg's iv_class, creating a class
5643 if this is the first incrementation of the reg. */
5644
5645 bl = REG_IV_CLASS (ivs, REGNO (dest_reg));
5646 if (bl == 0)
5647 {
5648 /* Create and initialize new iv_class. */
5649
5650 bl = (struct iv_class *) xmalloc (sizeof (struct iv_class));
5651
5652 bl->regno = REGNO (dest_reg);
5653 bl->biv = 0;
5654 bl->giv = 0;
5655 bl->biv_count = 0;
5656 bl->giv_count = 0;
5657
5658 /* Set initial value to the reg itself. */
5659 bl->initial_value = dest_reg;
5660 bl->final_value = 0;
5661 /* We haven't seen the initializing insn yet */
5662 bl->init_insn = 0;
5663 bl->init_set = 0;
5664 bl->initial_test = 0;
5665 bl->incremented = 0;
5666 bl->eliminable = 0;
5667 bl->nonneg = 0;
5668 bl->reversed = 0;
5669 bl->total_benefit = 0;
5670
5671 /* Add this class to ivs->list. */
5672 bl->next = ivs->list;
5673 ivs->list = bl;
5674
5675 /* Put it in the array of biv register classes. */
5676 REG_IV_CLASS (ivs, REGNO (dest_reg)) = bl;
5677 }
5678
5679 /* Update IV_CLASS entry for this biv. */
5680 v->next_iv = bl->biv;
5681 bl->biv = v;
5682 bl->biv_count++;
5683 if (mult_val == const1_rtx)
5684 bl->incremented = 1;
5685
5686 if (loop_dump_stream)
5687 loop_biv_dump (v, loop_dump_stream, 0);
5688 }
5689 \f
5690 /* Fill in the data about one giv.
5691 V is the `struct induction' in which we record the giv. (It is
5692 allocated by the caller, with alloca.)
5693 INSN is the insn that sets it.
5694 BENEFIT estimates the savings from deleting this insn.
5695 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
5696 into a register or is used as a memory address.
5697
5698 SRC_REG is the biv reg which the giv is computed from.
5699 DEST_REG is the giv's reg (if the giv is stored in a reg).
5700 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
5701 LOCATION points to the place where this giv's value appears in INSN. */
5702
5703 static void
5704 record_giv (loop, v, insn, src_reg, dest_reg, mult_val, add_val, ext_val,
5705 benefit, type, not_every_iteration, maybe_multiple, location)
5706 const struct loop *loop;
5707 struct induction *v;
5708 rtx insn;
5709 rtx src_reg;
5710 rtx dest_reg;
5711 rtx mult_val, add_val, ext_val;
5712 int benefit;
5713 enum g_types type;
5714 int not_every_iteration, maybe_multiple;
5715 rtx *location;
5716 {
5717 struct loop_ivs *ivs = LOOP_IVS (loop);
5718 struct induction *b;
5719 struct iv_class *bl;
5720 rtx set = single_set (insn);
5721 rtx temp;
5722
5723 /* Attempt to prove constantness of the values. Don't let simplity_rtx
5724 undo the MULT canonicalization that we performed earlier. */
5725 temp = simplify_rtx (add_val);
5726 if (temp
5727 && ! (GET_CODE (add_val) == MULT
5728 && GET_CODE (temp) == ASHIFT))
5729 add_val = temp;
5730
5731 v->insn = insn;
5732 v->src_reg = src_reg;
5733 v->giv_type = type;
5734 v->dest_reg = dest_reg;
5735 v->mult_val = mult_val;
5736 v->add_val = add_val;
5737 v->ext_dependent = ext_val;
5738 v->benefit = benefit;
5739 v->location = location;
5740 v->cant_derive = 0;
5741 v->combined_with = 0;
5742 v->maybe_multiple = maybe_multiple;
5743 v->maybe_dead = 0;
5744 v->derive_adjustment = 0;
5745 v->same = 0;
5746 v->ignore = 0;
5747 v->new_reg = 0;
5748 v->final_value = 0;
5749 v->same_insn = 0;
5750 v->auto_inc_opt = 0;
5751 v->unrolled = 0;
5752 v->shared = 0;
5753
5754 /* The v->always_computable field is used in update_giv_derive, to
5755 determine whether a giv can be used to derive another giv. For a
5756 DEST_REG giv, INSN computes a new value for the giv, so its value
5757 isn't computable if INSN insn't executed every iteration.
5758 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
5759 it does not compute a new value. Hence the value is always computable
5760 regardless of whether INSN is executed each iteration. */
5761
5762 if (type == DEST_ADDR)
5763 v->always_computable = 1;
5764 else
5765 v->always_computable = ! not_every_iteration;
5766
5767 v->always_executed = ! not_every_iteration;
5768
5769 if (type == DEST_ADDR)
5770 {
5771 v->mode = GET_MODE (*location);
5772 v->lifetime = 1;
5773 }
5774 else /* type == DEST_REG */
5775 {
5776 v->mode = GET_MODE (SET_DEST (set));
5777
5778 v->lifetime = LOOP_REG_LIFETIME (loop, REGNO (dest_reg));
5779
5780 /* If the lifetime is zero, it means that this register is
5781 really a dead store. So mark this as a giv that can be
5782 ignored. This will not prevent the biv from being eliminated. */
5783 if (v->lifetime == 0)
5784 v->ignore = 1;
5785
5786 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
5787 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
5788 }
5789
5790 /* Add the giv to the class of givs computed from one biv. */
5791
5792 bl = REG_IV_CLASS (ivs, REGNO (src_reg));
5793 if (bl)
5794 {
5795 v->next_iv = bl->giv;
5796 bl->giv = v;
5797 /* Don't count DEST_ADDR. This is supposed to count the number of
5798 insns that calculate givs. */
5799 if (type == DEST_REG)
5800 bl->giv_count++;
5801 bl->total_benefit += benefit;
5802 }
5803 else
5804 /* Fatal error, biv missing for this giv? */
5805 abort ();
5806
5807 if (type == DEST_ADDR)
5808 v->replaceable = 1;
5809 else
5810 {
5811 /* The giv can be replaced outright by the reduced register only if all
5812 of the following conditions are true:
5813 - the insn that sets the giv is always executed on any iteration
5814 on which the giv is used at all
5815 (there are two ways to deduce this:
5816 either the insn is executed on every iteration,
5817 or all uses follow that insn in the same basic block),
5818 - the giv is not used outside the loop
5819 - no assignments to the biv occur during the giv's lifetime. */
5820
5821 if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
5822 /* Previous line always fails if INSN was moved by loop opt. */
5823 && REGNO_LAST_LUID (REGNO (dest_reg))
5824 < INSN_LUID (loop->end)
5825 && (! not_every_iteration
5826 || last_use_this_basic_block (dest_reg, insn)))
5827 {
5828 /* Now check that there are no assignments to the biv within the
5829 giv's lifetime. This requires two separate checks. */
5830
5831 /* Check each biv update, and fail if any are between the first
5832 and last use of the giv.
5833
5834 If this loop contains an inner loop that was unrolled, then
5835 the insn modifying the biv may have been emitted by the loop
5836 unrolling code, and hence does not have a valid luid. Just
5837 mark the biv as not replaceable in this case. It is not very
5838 useful as a biv, because it is used in two different loops.
5839 It is very unlikely that we would be able to optimize the giv
5840 using this biv anyways. */
5841
5842 v->replaceable = 1;
5843 for (b = bl->biv; b; b = b->next_iv)
5844 {
5845 if (INSN_UID (b->insn) >= max_uid_for_loop
5846 || ((INSN_LUID (b->insn)
5847 >= REGNO_FIRST_LUID (REGNO (dest_reg)))
5848 && (INSN_LUID (b->insn)
5849 <= REGNO_LAST_LUID (REGNO (dest_reg)))))
5850 {
5851 v->replaceable = 0;
5852 v->not_replaceable = 1;
5853 break;
5854 }
5855 }
5856
5857 /* If there are any backwards branches that go from after the
5858 biv update to before it, then this giv is not replaceable. */
5859 if (v->replaceable)
5860 for (b = bl->biv; b; b = b->next_iv)
5861 if (back_branch_in_range_p (loop, b->insn))
5862 {
5863 v->replaceable = 0;
5864 v->not_replaceable = 1;
5865 break;
5866 }
5867 }
5868 else
5869 {
5870 /* May still be replaceable, we don't have enough info here to
5871 decide. */
5872 v->replaceable = 0;
5873 v->not_replaceable = 0;
5874 }
5875 }
5876
5877 /* Record whether the add_val contains a const_int, for later use by
5878 combine_givs. */
5879 {
5880 rtx tem = add_val;
5881
5882 v->no_const_addval = 1;
5883 if (tem == const0_rtx)
5884 ;
5885 else if (CONSTANT_P (add_val))
5886 v->no_const_addval = 0;
5887 if (GET_CODE (tem) == PLUS)
5888 {
5889 while (1)
5890 {
5891 if (GET_CODE (XEXP (tem, 0)) == PLUS)
5892 tem = XEXP (tem, 0);
5893 else if (GET_CODE (XEXP (tem, 1)) == PLUS)
5894 tem = XEXP (tem, 1);
5895 else
5896 break;
5897 }
5898 if (CONSTANT_P (XEXP (tem, 1)))
5899 v->no_const_addval = 0;
5900 }
5901 }
5902
5903 if (loop_dump_stream)
5904 loop_giv_dump (v, loop_dump_stream, 0);
5905 }
5906
5907 /* All this does is determine whether a giv can be made replaceable because
5908 its final value can be calculated. This code can not be part of record_giv
5909 above, because final_giv_value requires that the number of loop iterations
5910 be known, and that can not be accurately calculated until after all givs
5911 have been identified. */
5912
5913 static void
5914 check_final_value (loop, v)
5915 const struct loop *loop;
5916 struct induction *v;
5917 {
5918 struct loop_ivs *ivs = LOOP_IVS (loop);
5919 struct iv_class *bl;
5920 rtx final_value = 0;
5921
5922 bl = REG_IV_CLASS (ivs, REGNO (v->src_reg));
5923
5924 /* DEST_ADDR givs will never reach here, because they are always marked
5925 replaceable above in record_giv. */
5926
5927 /* The giv can be replaced outright by the reduced register only if all
5928 of the following conditions are true:
5929 - the insn that sets the giv is always executed on any iteration
5930 on which the giv is used at all
5931 (there are two ways to deduce this:
5932 either the insn is executed on every iteration,
5933 or all uses follow that insn in the same basic block),
5934 - its final value can be calculated (this condition is different
5935 than the one above in record_giv)
5936 - it's not used before the it's set
5937 - no assignments to the biv occur during the giv's lifetime. */
5938
5939 #if 0
5940 /* This is only called now when replaceable is known to be false. */
5941 /* Clear replaceable, so that it won't confuse final_giv_value. */
5942 v->replaceable = 0;
5943 #endif
5944
5945 if ((final_value = final_giv_value (loop, v))
5946 && (v->always_executed
5947 || last_use_this_basic_block (v->dest_reg, v->insn)))
5948 {
5949 int biv_increment_seen = 0, before_giv_insn = 0;
5950 rtx p = v->insn;
5951 rtx last_giv_use;
5952
5953 v->replaceable = 1;
5954
5955 /* When trying to determine whether or not a biv increment occurs
5956 during the lifetime of the giv, we can ignore uses of the variable
5957 outside the loop because final_value is true. Hence we can not
5958 use regno_last_uid and regno_first_uid as above in record_giv. */
5959
5960 /* Search the loop to determine whether any assignments to the
5961 biv occur during the giv's lifetime. Start with the insn
5962 that sets the giv, and search around the loop until we come
5963 back to that insn again.
5964
5965 Also fail if there is a jump within the giv's lifetime that jumps
5966 to somewhere outside the lifetime but still within the loop. This
5967 catches spaghetti code where the execution order is not linear, and
5968 hence the above test fails. Here we assume that the giv lifetime
5969 does not extend from one iteration of the loop to the next, so as
5970 to make the test easier. Since the lifetime isn't known yet,
5971 this requires two loops. See also record_giv above. */
5972
5973 last_giv_use = v->insn;
5974
5975 while (1)
5976 {
5977 p = NEXT_INSN (p);
5978 if (p == loop->end)
5979 {
5980 before_giv_insn = 1;
5981 p = NEXT_INSN (loop->start);
5982 }
5983 if (p == v->insn)
5984 break;
5985
5986 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5987 || GET_CODE (p) == CALL_INSN)
5988 {
5989 /* It is possible for the BIV increment to use the GIV if we
5990 have a cycle. Thus we must be sure to check each insn for
5991 both BIV and GIV uses, and we must check for BIV uses
5992 first. */
5993
5994 if (! biv_increment_seen
5995 && reg_set_p (v->src_reg, PATTERN (p)))
5996 biv_increment_seen = 1;
5997
5998 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
5999 {
6000 if (biv_increment_seen || before_giv_insn)
6001 {
6002 v->replaceable = 0;
6003 v->not_replaceable = 1;
6004 break;
6005 }
6006 last_giv_use = p;
6007 }
6008 }
6009 }
6010
6011 /* Now that the lifetime of the giv is known, check for branches
6012 from within the lifetime to outside the lifetime if it is still
6013 replaceable. */
6014
6015 if (v->replaceable)
6016 {
6017 p = v->insn;
6018 while (1)
6019 {
6020 p = NEXT_INSN (p);
6021 if (p == loop->end)
6022 p = NEXT_INSN (loop->start);
6023 if (p == last_giv_use)
6024 break;
6025
6026 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
6027 && LABEL_NAME (JUMP_LABEL (p))
6028 && ((loop_insn_first_p (JUMP_LABEL (p), v->insn)
6029 && loop_insn_first_p (loop->start, JUMP_LABEL (p)))
6030 || (loop_insn_first_p (last_giv_use, JUMP_LABEL (p))
6031 && loop_insn_first_p (JUMP_LABEL (p), loop->end))))
6032 {
6033 v->replaceable = 0;
6034 v->not_replaceable = 1;
6035
6036 if (loop_dump_stream)
6037 fprintf (loop_dump_stream,
6038 "Found branch outside giv lifetime.\n");
6039
6040 break;
6041 }
6042 }
6043 }
6044
6045 /* If it is replaceable, then save the final value. */
6046 if (v->replaceable)
6047 v->final_value = final_value;
6048 }
6049
6050 if (loop_dump_stream && v->replaceable)
6051 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
6052 INSN_UID (v->insn), REGNO (v->dest_reg));
6053 }
6054 \f
6055 /* Update the status of whether a giv can derive other givs.
6056
6057 We need to do something special if there is or may be an update to the biv
6058 between the time the giv is defined and the time it is used to derive
6059 another giv.
6060
6061 In addition, a giv that is only conditionally set is not allowed to
6062 derive another giv once a label has been passed.
6063
6064 The cases we look at are when a label or an update to a biv is passed. */
6065
6066 static void
6067 update_giv_derive (loop, p)
6068 const struct loop *loop;
6069 rtx p;
6070 {
6071 struct loop_ivs *ivs = LOOP_IVS (loop);
6072 struct iv_class *bl;
6073 struct induction *biv, *giv;
6074 rtx tem;
6075 int dummy;
6076
6077 /* Search all IV classes, then all bivs, and finally all givs.
6078
6079 There are three cases we are concerned with. First we have the situation
6080 of a giv that is only updated conditionally. In that case, it may not
6081 derive any givs after a label is passed.
6082
6083 The second case is when a biv update occurs, or may occur, after the
6084 definition of a giv. For certain biv updates (see below) that are
6085 known to occur between the giv definition and use, we can adjust the
6086 giv definition. For others, or when the biv update is conditional,
6087 we must prevent the giv from deriving any other givs. There are two
6088 sub-cases within this case.
6089
6090 If this is a label, we are concerned with any biv update that is done
6091 conditionally, since it may be done after the giv is defined followed by
6092 a branch here (actually, we need to pass both a jump and a label, but
6093 this extra tracking doesn't seem worth it).
6094
6095 If this is a jump, we are concerned about any biv update that may be
6096 executed multiple times. We are actually only concerned about
6097 backward jumps, but it is probably not worth performing the test
6098 on the jump again here.
6099
6100 If this is a biv update, we must adjust the giv status to show that a
6101 subsequent biv update was performed. If this adjustment cannot be done,
6102 the giv cannot derive further givs. */
6103
6104 for (bl = ivs->list; bl; bl = bl->next)
6105 for (biv = bl->biv; biv; biv = biv->next_iv)
6106 if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
6107 || biv->insn == p)
6108 {
6109 for (giv = bl->giv; giv; giv = giv->next_iv)
6110 {
6111 /* If cant_derive is already true, there is no point in
6112 checking all of these conditions again. */
6113 if (giv->cant_derive)
6114 continue;
6115
6116 /* If this giv is conditionally set and we have passed a label,
6117 it cannot derive anything. */
6118 if (GET_CODE (p) == CODE_LABEL && ! giv->always_computable)
6119 giv->cant_derive = 1;
6120
6121 /* Skip givs that have mult_val == 0, since
6122 they are really invariants. Also skip those that are
6123 replaceable, since we know their lifetime doesn't contain
6124 any biv update. */
6125 else if (giv->mult_val == const0_rtx || giv->replaceable)
6126 continue;
6127
6128 /* The only way we can allow this giv to derive another
6129 is if this is a biv increment and we can form the product
6130 of biv->add_val and giv->mult_val. In this case, we will
6131 be able to compute a compensation. */
6132 else if (biv->insn == p)
6133 {
6134 rtx ext_val_dummy;
6135
6136 tem = 0;
6137 if (biv->mult_val == const1_rtx)
6138 tem = simplify_giv_expr (loop,
6139 gen_rtx_MULT (giv->mode,
6140 biv->add_val,
6141 giv->mult_val),
6142 &ext_val_dummy, &dummy);
6143
6144 if (tem && giv->derive_adjustment)
6145 tem = simplify_giv_expr
6146 (loop,
6147 gen_rtx_PLUS (giv->mode, tem, giv->derive_adjustment),
6148 &ext_val_dummy, &dummy);
6149
6150 if (tem)
6151 giv->derive_adjustment = tem;
6152 else
6153 giv->cant_derive = 1;
6154 }
6155 else if ((GET_CODE (p) == CODE_LABEL && ! biv->always_computable)
6156 || (GET_CODE (p) == JUMP_INSN && biv->maybe_multiple))
6157 giv->cant_derive = 1;
6158 }
6159 }
6160 }
6161 \f
6162 /* Check whether an insn is an increment legitimate for a basic induction var.
6163 X is the source of insn P, or a part of it.
6164 MODE is the mode in which X should be interpreted.
6165
6166 DEST_REG is the putative biv, also the destination of the insn.
6167 We accept patterns of these forms:
6168 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
6169 REG = INVARIANT + REG
6170
6171 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
6172 store the additive term into *INC_VAL, and store the place where
6173 we found the additive term into *LOCATION.
6174
6175 If X is an assignment of an invariant into DEST_REG, we set
6176 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
6177
6178 We also want to detect a BIV when it corresponds to a variable
6179 whose mode was promoted via PROMOTED_MODE. In that case, an increment
6180 of the variable may be a PLUS that adds a SUBREG of that variable to
6181 an invariant and then sign- or zero-extends the result of the PLUS
6182 into the variable.
6183
6184 Most GIVs in such cases will be in the promoted mode, since that is the
6185 probably the natural computation mode (and almost certainly the mode
6186 used for addresses) on the machine. So we view the pseudo-reg containing
6187 the variable as the BIV, as if it were simply incremented.
6188
6189 Note that treating the entire pseudo as a BIV will result in making
6190 simple increments to any GIVs based on it. However, if the variable
6191 overflows in its declared mode but not its promoted mode, the result will
6192 be incorrect. This is acceptable if the variable is signed, since
6193 overflows in such cases are undefined, but not if it is unsigned, since
6194 those overflows are defined. So we only check for SIGN_EXTEND and
6195 not ZERO_EXTEND.
6196
6197 If we cannot find a biv, we return 0. */
6198
6199 static int
6200 basic_induction_var (loop, x, mode, dest_reg, p, inc_val, mult_val, location)
6201 const struct loop *loop;
6202 rtx x;
6203 enum machine_mode mode;
6204 rtx dest_reg;
6205 rtx p;
6206 rtx *inc_val;
6207 rtx *mult_val;
6208 rtx **location;
6209 {
6210 enum rtx_code code;
6211 rtx *argp, arg;
6212 rtx insn, set = 0;
6213
6214 code = GET_CODE (x);
6215 *location = NULL;
6216 switch (code)
6217 {
6218 case PLUS:
6219 if (rtx_equal_p (XEXP (x, 0), dest_reg)
6220 || (GET_CODE (XEXP (x, 0)) == SUBREG
6221 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
6222 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
6223 {
6224 argp = &XEXP (x, 1);
6225 }
6226 else if (rtx_equal_p (XEXP (x, 1), dest_reg)
6227 || (GET_CODE (XEXP (x, 1)) == SUBREG
6228 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
6229 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
6230 {
6231 argp = &XEXP (x, 0);
6232 }
6233 else
6234 return 0;
6235
6236 arg = *argp;
6237 if (loop_invariant_p (loop, arg) != 1)
6238 return 0;
6239
6240 *inc_val = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
6241 *mult_val = const1_rtx;
6242 *location = argp;
6243 return 1;
6244
6245 case SUBREG:
6246 /* If what's inside the SUBREG is a BIV, then the SUBREG. This will
6247 handle addition of promoted variables.
6248 ??? The comment at the start of this function is wrong: promoted
6249 variable increments don't look like it says they do. */
6250 return basic_induction_var (loop, SUBREG_REG (x),
6251 GET_MODE (SUBREG_REG (x)),
6252 dest_reg, p, inc_val, mult_val, location);
6253
6254 case REG:
6255 /* If this register is assigned in a previous insn, look at its
6256 source, but don't go outside the loop or past a label. */
6257
6258 /* If this sets a register to itself, we would repeat any previous
6259 biv increment if we applied this strategy blindly. */
6260 if (rtx_equal_p (dest_reg, x))
6261 return 0;
6262
6263 insn = p;
6264 while (1)
6265 {
6266 rtx dest;
6267 do
6268 {
6269 insn = PREV_INSN (insn);
6270 }
6271 while (insn && GET_CODE (insn) == NOTE
6272 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
6273
6274 if (!insn)
6275 break;
6276 set = single_set (insn);
6277 if (set == 0)
6278 break;
6279 dest = SET_DEST (set);
6280 if (dest == x
6281 || (GET_CODE (dest) == SUBREG
6282 && (GET_MODE_SIZE (GET_MODE (dest)) <= UNITS_PER_WORD)
6283 && (GET_MODE_CLASS (GET_MODE (dest)) == MODE_INT)
6284 && SUBREG_REG (dest) == x))
6285 return basic_induction_var (loop, SET_SRC (set),
6286 (GET_MODE (SET_SRC (set)) == VOIDmode
6287 ? GET_MODE (x)
6288 : GET_MODE (SET_SRC (set))),
6289 dest_reg, insn,
6290 inc_val, mult_val, location);
6291
6292 while (GET_CODE (dest) == SIGN_EXTRACT
6293 || GET_CODE (dest) == ZERO_EXTRACT
6294 || GET_CODE (dest) == SUBREG
6295 || GET_CODE (dest) == STRICT_LOW_PART)
6296 dest = XEXP (dest, 0);
6297 if (dest == x)
6298 break;
6299 }
6300 /* Fall through. */
6301
6302 /* Can accept constant setting of biv only when inside inner most loop.
6303 Otherwise, a biv of an inner loop may be incorrectly recognized
6304 as a biv of the outer loop,
6305 causing code to be moved INTO the inner loop. */
6306 case MEM:
6307 if (loop_invariant_p (loop, x) != 1)
6308 return 0;
6309 case CONST_INT:
6310 case SYMBOL_REF:
6311 case CONST:
6312 /* convert_modes aborts if we try to convert to or from CCmode, so just
6313 exclude that case. It is very unlikely that a condition code value
6314 would be a useful iterator anyways. convert_modes aborts if we try to
6315 convert a float mode to non-float or vice versa too. */
6316 if (loop->level == 1
6317 && GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (dest_reg))
6318 && GET_MODE_CLASS (mode) != MODE_CC)
6319 {
6320 /* Possible bug here? Perhaps we don't know the mode of X. */
6321 *inc_val = convert_modes (GET_MODE (dest_reg), mode, x, 0);
6322 *mult_val = const0_rtx;
6323 return 1;
6324 }
6325 else
6326 return 0;
6327
6328 case SIGN_EXTEND:
6329 return basic_induction_var (loop, XEXP (x, 0), GET_MODE (XEXP (x, 0)),
6330 dest_reg, p, inc_val, mult_val, location);
6331
6332 case ASHIFTRT:
6333 /* Similar, since this can be a sign extension. */
6334 for (insn = PREV_INSN (p);
6335 (insn && GET_CODE (insn) == NOTE
6336 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
6337 insn = PREV_INSN (insn))
6338 ;
6339
6340 if (insn)
6341 set = single_set (insn);
6342
6343 if (! rtx_equal_p (dest_reg, XEXP (x, 0))
6344 && set && SET_DEST (set) == XEXP (x, 0)
6345 && GET_CODE (XEXP (x, 1)) == CONST_INT
6346 && INTVAL (XEXP (x, 1)) >= 0
6347 && GET_CODE (SET_SRC (set)) == ASHIFT
6348 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
6349 return basic_induction_var (loop, XEXP (SET_SRC (set), 0),
6350 GET_MODE (XEXP (x, 0)),
6351 dest_reg, insn, inc_val, mult_val,
6352 location);
6353 return 0;
6354
6355 default:
6356 return 0;
6357 }
6358 }
6359 \f
6360 /* A general induction variable (giv) is any quantity that is a linear
6361 function of a basic induction variable,
6362 i.e. giv = biv * mult_val + add_val.
6363 The coefficients can be any loop invariant quantity.
6364 A giv need not be computed directly from the biv;
6365 it can be computed by way of other givs. */
6366
6367 /* Determine whether X computes a giv.
6368 If it does, return a nonzero value
6369 which is the benefit from eliminating the computation of X;
6370 set *SRC_REG to the register of the biv that it is computed from;
6371 set *ADD_VAL and *MULT_VAL to the coefficients,
6372 such that the value of X is biv * mult + add; */
6373
6374 static int
6375 general_induction_var (loop, x, src_reg, add_val, mult_val, ext_val,
6376 is_addr, pbenefit, addr_mode)
6377 const struct loop *loop;
6378 rtx x;
6379 rtx *src_reg;
6380 rtx *add_val;
6381 rtx *mult_val;
6382 rtx *ext_val;
6383 int is_addr;
6384 int *pbenefit;
6385 enum machine_mode addr_mode;
6386 {
6387 struct loop_ivs *ivs = LOOP_IVS (loop);
6388 rtx orig_x = x;
6389
6390 /* If this is an invariant, forget it, it isn't a giv. */
6391 if (loop_invariant_p (loop, x) == 1)
6392 return 0;
6393
6394 *pbenefit = 0;
6395 *ext_val = NULL_RTX;
6396 x = simplify_giv_expr (loop, x, ext_val, pbenefit);
6397 if (x == 0)
6398 return 0;
6399
6400 switch (GET_CODE (x))
6401 {
6402 case USE:
6403 case CONST_INT:
6404 /* Since this is now an invariant and wasn't before, it must be a giv
6405 with MULT_VAL == 0. It doesn't matter which BIV we associate this
6406 with. */
6407 *src_reg = ivs->list->biv->dest_reg;
6408 *mult_val = const0_rtx;
6409 *add_val = x;
6410 break;
6411
6412 case REG:
6413 /* This is equivalent to a BIV. */
6414 *src_reg = x;
6415 *mult_val = const1_rtx;
6416 *add_val = const0_rtx;
6417 break;
6418
6419 case PLUS:
6420 /* Either (plus (biv) (invar)) or
6421 (plus (mult (biv) (invar_1)) (invar_2)). */
6422 if (GET_CODE (XEXP (x, 0)) == MULT)
6423 {
6424 *src_reg = XEXP (XEXP (x, 0), 0);
6425 *mult_val = XEXP (XEXP (x, 0), 1);
6426 }
6427 else
6428 {
6429 *src_reg = XEXP (x, 0);
6430 *mult_val = const1_rtx;
6431 }
6432 *add_val = XEXP (x, 1);
6433 break;
6434
6435 case MULT:
6436 /* ADD_VAL is zero. */
6437 *src_reg = XEXP (x, 0);
6438 *mult_val = XEXP (x, 1);
6439 *add_val = const0_rtx;
6440 break;
6441
6442 default:
6443 abort ();
6444 }
6445
6446 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
6447 unless they are CONST_INT). */
6448 if (GET_CODE (*add_val) == USE)
6449 *add_val = XEXP (*add_val, 0);
6450 if (GET_CODE (*mult_val) == USE)
6451 *mult_val = XEXP (*mult_val, 0);
6452
6453 if (is_addr)
6454 *pbenefit += address_cost (orig_x, addr_mode) - reg_address_cost;
6455 else
6456 *pbenefit += rtx_cost (orig_x, SET);
6457
6458 /* Always return true if this is a giv so it will be detected as such,
6459 even if the benefit is zero or negative. This allows elimination
6460 of bivs that might otherwise not be eliminated. */
6461 return 1;
6462 }
6463 \f
6464 /* Given an expression, X, try to form it as a linear function of a biv.
6465 We will canonicalize it to be of the form
6466 (plus (mult (BIV) (invar_1))
6467 (invar_2))
6468 with possible degeneracies.
6469
6470 The invariant expressions must each be of a form that can be used as a
6471 machine operand. We surround then with a USE rtx (a hack, but localized
6472 and certainly unambiguous!) if not a CONST_INT for simplicity in this
6473 routine; it is the caller's responsibility to strip them.
6474
6475 If no such canonicalization is possible (i.e., two biv's are used or an
6476 expression that is neither invariant nor a biv or giv), this routine
6477 returns 0.
6478
6479 For a non-zero return, the result will have a code of CONST_INT, USE,
6480 REG (for a BIV), PLUS, or MULT. No other codes will occur.
6481
6482 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
6483
6484 static rtx sge_plus PARAMS ((enum machine_mode, rtx, rtx));
6485 static rtx sge_plus_constant PARAMS ((rtx, rtx));
6486
6487 static rtx
6488 simplify_giv_expr (loop, x, ext_val, benefit)
6489 const struct loop *loop;
6490 rtx x;
6491 rtx *ext_val;
6492 int *benefit;
6493 {
6494 struct loop_ivs *ivs = LOOP_IVS (loop);
6495 struct loop_regs *regs = LOOP_REGS (loop);
6496 enum machine_mode mode = GET_MODE (x);
6497 rtx arg0, arg1;
6498 rtx tem;
6499
6500 /* If this is not an integer mode, or if we cannot do arithmetic in this
6501 mode, this can't be a giv. */
6502 if (mode != VOIDmode
6503 && (GET_MODE_CLASS (mode) != MODE_INT
6504 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
6505 return NULL_RTX;
6506
6507 switch (GET_CODE (x))
6508 {
6509 case PLUS:
6510 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6511 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
6512 if (arg0 == 0 || arg1 == 0)
6513 return NULL_RTX;
6514
6515 /* Put constant last, CONST_INT last if both constant. */
6516 if ((GET_CODE (arg0) == USE
6517 || GET_CODE (arg0) == CONST_INT)
6518 && ! ((GET_CODE (arg0) == USE
6519 && GET_CODE (arg1) == USE)
6520 || GET_CODE (arg1) == CONST_INT))
6521 tem = arg0, arg0 = arg1, arg1 = tem;
6522
6523 /* Handle addition of zero, then addition of an invariant. */
6524 if (arg1 == const0_rtx)
6525 return arg0;
6526 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
6527 switch (GET_CODE (arg0))
6528 {
6529 case CONST_INT:
6530 case USE:
6531 /* Adding two invariants must result in an invariant, so enclose
6532 addition operation inside a USE and return it. */
6533 if (GET_CODE (arg0) == USE)
6534 arg0 = XEXP (arg0, 0);
6535 if (GET_CODE (arg1) == USE)
6536 arg1 = XEXP (arg1, 0);
6537
6538 if (GET_CODE (arg0) == CONST_INT)
6539 tem = arg0, arg0 = arg1, arg1 = tem;
6540 if (GET_CODE (arg1) == CONST_INT)
6541 tem = sge_plus_constant (arg0, arg1);
6542 else
6543 tem = sge_plus (mode, arg0, arg1);
6544
6545 if (GET_CODE (tem) != CONST_INT)
6546 tem = gen_rtx_USE (mode, tem);
6547 return tem;
6548
6549 case REG:
6550 case MULT:
6551 /* biv + invar or mult + invar. Return sum. */
6552 return gen_rtx_PLUS (mode, arg0, arg1);
6553
6554 case PLUS:
6555 /* (a + invar_1) + invar_2. Associate. */
6556 return
6557 simplify_giv_expr (loop,
6558 gen_rtx_PLUS (mode,
6559 XEXP (arg0, 0),
6560 gen_rtx_PLUS (mode,
6561 XEXP (arg0, 1),
6562 arg1)),
6563 ext_val, benefit);
6564
6565 default:
6566 abort ();
6567 }
6568
6569 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
6570 MULT to reduce cases. */
6571 if (GET_CODE (arg0) == REG)
6572 arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
6573 if (GET_CODE (arg1) == REG)
6574 arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
6575
6576 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
6577 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
6578 Recurse to associate the second PLUS. */
6579 if (GET_CODE (arg1) == MULT)
6580 tem = arg0, arg0 = arg1, arg1 = tem;
6581
6582 if (GET_CODE (arg1) == PLUS)
6583 return
6584 simplify_giv_expr (loop,
6585 gen_rtx_PLUS (mode,
6586 gen_rtx_PLUS (mode, arg0,
6587 XEXP (arg1, 0)),
6588 XEXP (arg1, 1)),
6589 ext_val, benefit);
6590
6591 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
6592 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
6593 return NULL_RTX;
6594
6595 if (!rtx_equal_p (arg0, arg1))
6596 return NULL_RTX;
6597
6598 return simplify_giv_expr (loop,
6599 gen_rtx_MULT (mode,
6600 XEXP (arg0, 0),
6601 gen_rtx_PLUS (mode,
6602 XEXP (arg0, 1),
6603 XEXP (arg1, 1))),
6604 ext_val, benefit);
6605
6606 case MINUS:
6607 /* Handle "a - b" as "a + b * (-1)". */
6608 return simplify_giv_expr (loop,
6609 gen_rtx_PLUS (mode,
6610 XEXP (x, 0),
6611 gen_rtx_MULT (mode,
6612 XEXP (x, 1),
6613 constm1_rtx)),
6614 ext_val, benefit);
6615
6616 case MULT:
6617 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6618 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
6619 if (arg0 == 0 || arg1 == 0)
6620 return NULL_RTX;
6621
6622 /* Put constant last, CONST_INT last if both constant. */
6623 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
6624 && GET_CODE (arg1) != CONST_INT)
6625 tem = arg0, arg0 = arg1, arg1 = tem;
6626
6627 /* If second argument is not now constant, not giv. */
6628 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
6629 return NULL_RTX;
6630
6631 /* Handle multiply by 0 or 1. */
6632 if (arg1 == const0_rtx)
6633 return const0_rtx;
6634
6635 else if (arg1 == const1_rtx)
6636 return arg0;
6637
6638 switch (GET_CODE (arg0))
6639 {
6640 case REG:
6641 /* biv * invar. Done. */
6642 return gen_rtx_MULT (mode, arg0, arg1);
6643
6644 case CONST_INT:
6645 /* Product of two constants. */
6646 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
6647
6648 case USE:
6649 /* invar * invar is a giv, but attempt to simplify it somehow. */
6650 if (GET_CODE (arg1) != CONST_INT)
6651 return NULL_RTX;
6652
6653 arg0 = XEXP (arg0, 0);
6654 if (GET_CODE (arg0) == MULT)
6655 {
6656 /* (invar_0 * invar_1) * invar_2. Associate. */
6657 return simplify_giv_expr (loop,
6658 gen_rtx_MULT (mode,
6659 XEXP (arg0, 0),
6660 gen_rtx_MULT (mode,
6661 XEXP (arg0,
6662 1),
6663 arg1)),
6664 ext_val, benefit);
6665 }
6666 /* Porpagate the MULT expressions to the intermost nodes. */
6667 else if (GET_CODE (arg0) == PLUS)
6668 {
6669 /* (invar_0 + invar_1) * invar_2. Distribute. */
6670 return simplify_giv_expr (loop,
6671 gen_rtx_PLUS (mode,
6672 gen_rtx_MULT (mode,
6673 XEXP (arg0,
6674 0),
6675 arg1),
6676 gen_rtx_MULT (mode,
6677 XEXP (arg0,
6678 1),
6679 arg1)),
6680 ext_val, benefit);
6681 }
6682 return gen_rtx_USE (mode, gen_rtx_MULT (mode, arg0, arg1));
6683
6684 case MULT:
6685 /* (a * invar_1) * invar_2. Associate. */
6686 return simplify_giv_expr (loop,
6687 gen_rtx_MULT (mode,
6688 XEXP (arg0, 0),
6689 gen_rtx_MULT (mode,
6690 XEXP (arg0, 1),
6691 arg1)),
6692 ext_val, benefit);
6693
6694 case PLUS:
6695 /* (a + invar_1) * invar_2. Distribute. */
6696 return simplify_giv_expr (loop,
6697 gen_rtx_PLUS (mode,
6698 gen_rtx_MULT (mode,
6699 XEXP (arg0, 0),
6700 arg1),
6701 gen_rtx_MULT (mode,
6702 XEXP (arg0, 1),
6703 arg1)),
6704 ext_val, benefit);
6705
6706 default:
6707 abort ();
6708 }
6709
6710 case ASHIFT:
6711 /* Shift by constant is multiply by power of two. */
6712 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
6713 return 0;
6714
6715 return
6716 simplify_giv_expr (loop,
6717 gen_rtx_MULT (mode,
6718 XEXP (x, 0),
6719 GEN_INT ((HOST_WIDE_INT) 1
6720 << INTVAL (XEXP (x, 1)))),
6721 ext_val, benefit);
6722
6723 case NEG:
6724 /* "-a" is "a * (-1)" */
6725 return simplify_giv_expr (loop,
6726 gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
6727 ext_val, benefit);
6728
6729 case NOT:
6730 /* "~a" is "-a - 1". Silly, but easy. */
6731 return simplify_giv_expr (loop,
6732 gen_rtx_MINUS (mode,
6733 gen_rtx_NEG (mode, XEXP (x, 0)),
6734 const1_rtx),
6735 ext_val, benefit);
6736
6737 case USE:
6738 /* Already in proper form for invariant. */
6739 return x;
6740
6741 case SIGN_EXTEND:
6742 case ZERO_EXTEND:
6743 case TRUNCATE:
6744 /* Conditionally recognize extensions of simple IVs. After we've
6745 computed loop traversal counts and verified the range of the
6746 source IV, we'll reevaluate this as a GIV. */
6747 if (*ext_val == NULL_RTX)
6748 {
6749 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6750 if (arg0 && *ext_val == NULL_RTX && GET_CODE (arg0) == REG)
6751 {
6752 *ext_val = gen_rtx_fmt_e (GET_CODE (x), mode, arg0);
6753 return arg0;
6754 }
6755 }
6756 goto do_default;
6757
6758 case REG:
6759 /* If this is a new register, we can't deal with it. */
6760 if (REGNO (x) >= max_reg_before_loop)
6761 return 0;
6762
6763 /* Check for biv or giv. */
6764 switch (REG_IV_TYPE (ivs, REGNO (x)))
6765 {
6766 case BASIC_INDUCT:
6767 return x;
6768 case GENERAL_INDUCT:
6769 {
6770 struct induction *v = REG_IV_INFO (ivs, REGNO (x));
6771
6772 /* Form expression from giv and add benefit. Ensure this giv
6773 can derive another and subtract any needed adjustment if so. */
6774
6775 /* Increasing the benefit here is risky. The only case in which it
6776 is arguably correct is if this is the only use of V. In other
6777 cases, this will artificially inflate the benefit of the current
6778 giv, and lead to suboptimal code. Thus, it is disabled, since
6779 potentially not reducing an only marginally beneficial giv is
6780 less harmful than reducing many givs that are not really
6781 beneficial. */
6782 {
6783 rtx single_use = regs->array[REGNO (x)].single_usage;
6784 if (single_use && single_use != const0_rtx)
6785 *benefit += v->benefit;
6786 }
6787
6788 if (v->cant_derive)
6789 return 0;
6790
6791 tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode,
6792 v->src_reg, v->mult_val),
6793 v->add_val);
6794
6795 if (v->derive_adjustment)
6796 tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
6797 arg0 = simplify_giv_expr (loop, tem, ext_val, benefit);
6798 if (*ext_val)
6799 {
6800 if (!v->ext_dependent)
6801 return arg0;
6802 }
6803 else
6804 {
6805 *ext_val = v->ext_dependent;
6806 return arg0;
6807 }
6808 return 0;
6809 }
6810
6811 default:
6812 do_default:
6813 /* If it isn't an induction variable, and it is invariant, we
6814 may be able to simplify things further by looking through
6815 the bits we just moved outside the loop. */
6816 if (loop_invariant_p (loop, x) == 1)
6817 {
6818 struct movable *m;
6819 struct loop_movables *movables = LOOP_MOVABLES (loop);
6820
6821 for (m = movables->head; m; m = m->next)
6822 if (rtx_equal_p (x, m->set_dest))
6823 {
6824 /* Ok, we found a match. Substitute and simplify. */
6825
6826 /* If we match another movable, we must use that, as
6827 this one is going away. */
6828 if (m->match)
6829 return simplify_giv_expr (loop, m->match->set_dest,
6830 ext_val, benefit);
6831
6832 /* If consec is non-zero, this is a member of a group of
6833 instructions that were moved together. We handle this
6834 case only to the point of seeking to the last insn and
6835 looking for a REG_EQUAL. Fail if we don't find one. */
6836 if (m->consec != 0)
6837 {
6838 int i = m->consec;
6839 tem = m->insn;
6840 do
6841 {
6842 tem = NEXT_INSN (tem);
6843 }
6844 while (--i > 0);
6845
6846 tem = find_reg_note (tem, REG_EQUAL, NULL_RTX);
6847 if (tem)
6848 tem = XEXP (tem, 0);
6849 }
6850 else
6851 {
6852 tem = single_set (m->insn);
6853 if (tem)
6854 tem = SET_SRC (tem);
6855 }
6856
6857 if (tem)
6858 {
6859 /* What we are most interested in is pointer
6860 arithmetic on invariants -- only take
6861 patterns we may be able to do something with. */
6862 if (GET_CODE (tem) == PLUS
6863 || GET_CODE (tem) == MULT
6864 || GET_CODE (tem) == ASHIFT
6865 || GET_CODE (tem) == CONST_INT
6866 || GET_CODE (tem) == SYMBOL_REF)
6867 {
6868 tem = simplify_giv_expr (loop, tem, ext_val,
6869 benefit);
6870 if (tem)
6871 return tem;
6872 }
6873 else if (GET_CODE (tem) == CONST
6874 && GET_CODE (XEXP (tem, 0)) == PLUS
6875 && GET_CODE (XEXP (XEXP (tem, 0), 0)) == SYMBOL_REF
6876 && GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT)
6877 {
6878 tem = simplify_giv_expr (loop, XEXP (tem, 0),
6879 ext_val, benefit);
6880 if (tem)
6881 return tem;
6882 }
6883 }
6884 break;
6885 }
6886 }
6887 break;
6888 }
6889
6890 /* Fall through to general case. */
6891 default:
6892 /* If invariant, return as USE (unless CONST_INT).
6893 Otherwise, not giv. */
6894 if (GET_CODE (x) == USE)
6895 x = XEXP (x, 0);
6896
6897 if (loop_invariant_p (loop, x) == 1)
6898 {
6899 if (GET_CODE (x) == CONST_INT)
6900 return x;
6901 if (GET_CODE (x) == CONST
6902 && GET_CODE (XEXP (x, 0)) == PLUS
6903 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
6904 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
6905 x = XEXP (x, 0);
6906 return gen_rtx_USE (mode, x);
6907 }
6908 else
6909 return 0;
6910 }
6911 }
6912
6913 /* This routine folds invariants such that there is only ever one
6914 CONST_INT in the summation. It is only used by simplify_giv_expr. */
6915
6916 static rtx
6917 sge_plus_constant (x, c)
6918 rtx x, c;
6919 {
6920 if (GET_CODE (x) == CONST_INT)
6921 return GEN_INT (INTVAL (x) + INTVAL (c));
6922 else if (GET_CODE (x) != PLUS)
6923 return gen_rtx_PLUS (GET_MODE (x), x, c);
6924 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
6925 {
6926 return gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
6927 GEN_INT (INTVAL (XEXP (x, 1)) + INTVAL (c)));
6928 }
6929 else if (GET_CODE (XEXP (x, 0)) == PLUS
6930 || GET_CODE (XEXP (x, 1)) != PLUS)
6931 {
6932 return gen_rtx_PLUS (GET_MODE (x),
6933 sge_plus_constant (XEXP (x, 0), c), XEXP (x, 1));
6934 }
6935 else
6936 {
6937 return gen_rtx_PLUS (GET_MODE (x),
6938 sge_plus_constant (XEXP (x, 1), c), XEXP (x, 0));
6939 }
6940 }
6941
6942 static rtx
6943 sge_plus (mode, x, y)
6944 enum machine_mode mode;
6945 rtx x, y;
6946 {
6947 while (GET_CODE (y) == PLUS)
6948 {
6949 rtx a = XEXP (y, 0);
6950 if (GET_CODE (a) == CONST_INT)
6951 x = sge_plus_constant (x, a);
6952 else
6953 x = gen_rtx_PLUS (mode, x, a);
6954 y = XEXP (y, 1);
6955 }
6956 if (GET_CODE (y) == CONST_INT)
6957 x = sge_plus_constant (x, y);
6958 else
6959 x = gen_rtx_PLUS (mode, x, y);
6960 return x;
6961 }
6962 \f
6963 /* Help detect a giv that is calculated by several consecutive insns;
6964 for example,
6965 giv = biv * M
6966 giv = giv + A
6967 The caller has already identified the first insn P as having a giv as dest;
6968 we check that all other insns that set the same register follow
6969 immediately after P, that they alter nothing else,
6970 and that the result of the last is still a giv.
6971
6972 The value is 0 if the reg set in P is not really a giv.
6973 Otherwise, the value is the amount gained by eliminating
6974 all the consecutive insns that compute the value.
6975
6976 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
6977 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
6978
6979 The coefficients of the ultimate giv value are stored in
6980 *MULT_VAL and *ADD_VAL. */
6981
6982 static int
6983 consec_sets_giv (loop, first_benefit, p, src_reg, dest_reg,
6984 add_val, mult_val, ext_val, last_consec_insn)
6985 const struct loop *loop;
6986 int first_benefit;
6987 rtx p;
6988 rtx src_reg;
6989 rtx dest_reg;
6990 rtx *add_val;
6991 rtx *mult_val;
6992 rtx *ext_val;
6993 rtx *last_consec_insn;
6994 {
6995 struct loop_ivs *ivs = LOOP_IVS (loop);
6996 struct loop_regs *regs = LOOP_REGS (loop);
6997 int count;
6998 enum rtx_code code;
6999 int benefit;
7000 rtx temp;
7001 rtx set;
7002
7003 /* Indicate that this is a giv so that we can update the value produced in
7004 each insn of the multi-insn sequence.
7005
7006 This induction structure will be used only by the call to
7007 general_induction_var below, so we can allocate it on our stack.
7008 If this is a giv, our caller will replace the induct var entry with
7009 a new induction structure. */
7010 struct induction *v;
7011
7012 if (REG_IV_TYPE (ivs, REGNO (dest_reg)) != UNKNOWN_INDUCT)
7013 return 0;
7014
7015 v = (struct induction *) alloca (sizeof (struct induction));
7016 v->src_reg = src_reg;
7017 v->mult_val = *mult_val;
7018 v->add_val = *add_val;
7019 v->benefit = first_benefit;
7020 v->cant_derive = 0;
7021 v->derive_adjustment = 0;
7022 v->ext_dependent = NULL_RTX;
7023
7024 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
7025 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
7026
7027 count = regs->array[REGNO (dest_reg)].n_times_set - 1;
7028
7029 while (count > 0)
7030 {
7031 p = NEXT_INSN (p);
7032 code = GET_CODE (p);
7033
7034 /* If libcall, skip to end of call sequence. */
7035 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
7036 p = XEXP (temp, 0);
7037
7038 if (code == INSN
7039 && (set = single_set (p))
7040 && GET_CODE (SET_DEST (set)) == REG
7041 && SET_DEST (set) == dest_reg
7042 && (general_induction_var (loop, SET_SRC (set), &src_reg,
7043 add_val, mult_val, ext_val, 0,
7044 &benefit, VOIDmode)
7045 /* Giv created by equivalent expression. */
7046 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
7047 && general_induction_var (loop, XEXP (temp, 0), &src_reg,
7048 add_val, mult_val, ext_val, 0,
7049 &benefit, VOIDmode)))
7050 && src_reg == v->src_reg)
7051 {
7052 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
7053 benefit += libcall_benefit (p);
7054
7055 count--;
7056 v->mult_val = *mult_val;
7057 v->add_val = *add_val;
7058 v->benefit += benefit;
7059 }
7060 else if (code != NOTE)
7061 {
7062 /* Allow insns that set something other than this giv to a
7063 constant. Such insns are needed on machines which cannot
7064 include long constants and should not disqualify a giv. */
7065 if (code == INSN
7066 && (set = single_set (p))
7067 && SET_DEST (set) != dest_reg
7068 && CONSTANT_P (SET_SRC (set)))
7069 continue;
7070
7071 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
7072 return 0;
7073 }
7074 }
7075
7076 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
7077 *last_consec_insn = p;
7078 return v->benefit;
7079 }
7080 \f
7081 /* Return an rtx, if any, that expresses giv G2 as a function of the register
7082 represented by G1. If no such expression can be found, or it is clear that
7083 it cannot possibly be a valid address, 0 is returned.
7084
7085 To perform the computation, we note that
7086 G1 = x * v + a and
7087 G2 = y * v + b
7088 where `v' is the biv.
7089
7090 So G2 = (y/b) * G1 + (b - a*y/x).
7091
7092 Note that MULT = y/x.
7093
7094 Update: A and B are now allowed to be additive expressions such that
7095 B contains all variables in A. That is, computing B-A will not require
7096 subtracting variables. */
7097
7098 static rtx
7099 express_from_1 (a, b, mult)
7100 rtx a, b, mult;
7101 {
7102 /* If MULT is zero, then A*MULT is zero, and our expression is B. */
7103
7104 if (mult == const0_rtx)
7105 return b;
7106
7107 /* If MULT is not 1, we cannot handle A with non-constants, since we
7108 would then be required to subtract multiples of the registers in A.
7109 This is theoretically possible, and may even apply to some Fortran
7110 constructs, but it is a lot of work and we do not attempt it here. */
7111
7112 if (mult != const1_rtx && GET_CODE (a) != CONST_INT)
7113 return NULL_RTX;
7114
7115 /* In general these structures are sorted top to bottom (down the PLUS
7116 chain), but not left to right across the PLUS. If B is a higher
7117 order giv than A, we can strip one level and recurse. If A is higher
7118 order, we'll eventually bail out, but won't know that until the end.
7119 If they are the same, we'll strip one level around this loop. */
7120
7121 while (GET_CODE (a) == PLUS && GET_CODE (b) == PLUS)
7122 {
7123 rtx ra, rb, oa, ob, tmp;
7124
7125 ra = XEXP (a, 0), oa = XEXP (a, 1);
7126 if (GET_CODE (ra) == PLUS)
7127 tmp = ra, ra = oa, oa = tmp;
7128
7129 rb = XEXP (b, 0), ob = XEXP (b, 1);
7130 if (GET_CODE (rb) == PLUS)
7131 tmp = rb, rb = ob, ob = tmp;
7132
7133 if (rtx_equal_p (ra, rb))
7134 /* We matched: remove one reg completely. */
7135 a = oa, b = ob;
7136 else if (GET_CODE (ob) != PLUS && rtx_equal_p (ra, ob))
7137 /* An alternate match. */
7138 a = oa, b = rb;
7139 else if (GET_CODE (oa) != PLUS && rtx_equal_p (oa, rb))
7140 /* An alternate match. */
7141 a = ra, b = ob;
7142 else
7143 {
7144 /* Indicates an extra register in B. Strip one level from B and
7145 recurse, hoping B was the higher order expression. */
7146 ob = express_from_1 (a, ob, mult);
7147 if (ob == NULL_RTX)
7148 return NULL_RTX;
7149 return gen_rtx_PLUS (GET_MODE (b), rb, ob);
7150 }
7151 }
7152
7153 /* Here we are at the last level of A, go through the cases hoping to
7154 get rid of everything but a constant. */
7155
7156 if (GET_CODE (a) == PLUS)
7157 {
7158 rtx ra, oa;
7159
7160 ra = XEXP (a, 0), oa = XEXP (a, 1);
7161 if (rtx_equal_p (oa, b))
7162 oa = ra;
7163 else if (!rtx_equal_p (ra, b))
7164 return NULL_RTX;
7165
7166 if (GET_CODE (oa) != CONST_INT)
7167 return NULL_RTX;
7168
7169 return GEN_INT (-INTVAL (oa) * INTVAL (mult));
7170 }
7171 else if (GET_CODE (a) == CONST_INT)
7172 {
7173 return plus_constant (b, -INTVAL (a) * INTVAL (mult));
7174 }
7175 else if (CONSTANT_P (a))
7176 {
7177 enum machine_mode mode_a = GET_MODE (a);
7178 enum machine_mode mode_b = GET_MODE (b);
7179 enum machine_mode mode = mode_b == VOIDmode ? mode_a : mode_b;
7180 return simplify_gen_binary (MINUS, mode, b, a);
7181 }
7182 else if (GET_CODE (b) == PLUS)
7183 {
7184 if (rtx_equal_p (a, XEXP (b, 0)))
7185 return XEXP (b, 1);
7186 else if (rtx_equal_p (a, XEXP (b, 1)))
7187 return XEXP (b, 0);
7188 else
7189 return NULL_RTX;
7190 }
7191 else if (rtx_equal_p (a, b))
7192 return const0_rtx;
7193
7194 return NULL_RTX;
7195 }
7196
7197 rtx
7198 express_from (g1, g2)
7199 struct induction *g1, *g2;
7200 {
7201 rtx mult, add;
7202
7203 /* The value that G1 will be multiplied by must be a constant integer. Also,
7204 the only chance we have of getting a valid address is if b*c/a (see above
7205 for notation) is also an integer. */
7206 if (GET_CODE (g1->mult_val) == CONST_INT
7207 && GET_CODE (g2->mult_val) == CONST_INT)
7208 {
7209 if (g1->mult_val == const0_rtx
7210 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
7211 return NULL_RTX;
7212 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
7213 }
7214 else if (rtx_equal_p (g1->mult_val, g2->mult_val))
7215 mult = const1_rtx;
7216 else
7217 {
7218 /* ??? Find out if the one is a multiple of the other? */
7219 return NULL_RTX;
7220 }
7221
7222 add = express_from_1 (g1->add_val, g2->add_val, mult);
7223 if (add == NULL_RTX)
7224 {
7225 /* Failed. If we've got a multiplication factor between G1 and G2,
7226 scale G1's addend and try again. */
7227 if (INTVAL (mult) > 1)
7228 {
7229 rtx g1_add_val = g1->add_val;
7230 if (GET_CODE (g1_add_val) == MULT
7231 && GET_CODE (XEXP (g1_add_val, 1)) == CONST_INT)
7232 {
7233 HOST_WIDE_INT m;
7234 m = INTVAL (mult) * INTVAL (XEXP (g1_add_val, 1));
7235 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val),
7236 XEXP (g1_add_val, 0), GEN_INT (m));
7237 }
7238 else
7239 {
7240 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val), g1_add_val,
7241 mult);
7242 }
7243
7244 add = express_from_1 (g1_add_val, g2->add_val, const1_rtx);
7245 }
7246 }
7247 if (add == NULL_RTX)
7248 return NULL_RTX;
7249
7250 /* Form simplified final result. */
7251 if (mult == const0_rtx)
7252 return add;
7253 else if (mult == const1_rtx)
7254 mult = g1->dest_reg;
7255 else
7256 mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult);
7257
7258 if (add == const0_rtx)
7259 return mult;
7260 else
7261 {
7262 if (GET_CODE (add) == PLUS
7263 && CONSTANT_P (XEXP (add, 1)))
7264 {
7265 rtx tem = XEXP (add, 1);
7266 mult = gen_rtx_PLUS (g2->mode, mult, XEXP (add, 0));
7267 add = tem;
7268 }
7269
7270 return gen_rtx_PLUS (g2->mode, mult, add);
7271 }
7272 }
7273 \f
7274 /* Return an rtx, if any, that expresses giv G2 as a function of the register
7275 represented by G1. This indicates that G2 should be combined with G1 and
7276 that G2 can use (either directly or via an address expression) a register
7277 used to represent G1. */
7278
7279 static rtx
7280 combine_givs_p (g1, g2)
7281 struct induction *g1, *g2;
7282 {
7283 rtx comb, ret;
7284
7285 /* With the introduction of ext dependent givs, we must care for modes.
7286 G2 must not use a wider mode than G1. */
7287 if (GET_MODE_SIZE (g1->mode) < GET_MODE_SIZE (g2->mode))
7288 return NULL_RTX;
7289
7290 ret = comb = express_from (g1, g2);
7291 if (comb == NULL_RTX)
7292 return NULL_RTX;
7293 if (g1->mode != g2->mode)
7294 ret = gen_lowpart (g2->mode, comb);
7295
7296 /* If these givs are identical, they can be combined. We use the results
7297 of express_from because the addends are not in a canonical form, so
7298 rtx_equal_p is a weaker test. */
7299 /* But don't combine a DEST_REG giv with a DEST_ADDR giv; we want the
7300 combination to be the other way round. */
7301 if (comb == g1->dest_reg
7302 && (g1->giv_type == DEST_REG || g2->giv_type == DEST_ADDR))
7303 {
7304 return ret;
7305 }
7306
7307 /* If G2 can be expressed as a function of G1 and that function is valid
7308 as an address and no more expensive than using a register for G2,
7309 the expression of G2 in terms of G1 can be used. */
7310 if (ret != NULL_RTX
7311 && g2->giv_type == DEST_ADDR
7312 && memory_address_p (GET_MODE (g2->mem), ret)
7313 /* ??? Looses, especially with -fforce-addr, where *g2->location
7314 will always be a register, and so anything more complicated
7315 gets discarded. */
7316 #if 0
7317 #ifdef ADDRESS_COST
7318 && ADDRESS_COST (tem) <= ADDRESS_COST (*g2->location)
7319 #else
7320 && rtx_cost (tem, MEM) <= rtx_cost (*g2->location, MEM)
7321 #endif
7322 #endif
7323 )
7324 {
7325 return ret;
7326 }
7327
7328 return NULL_RTX;
7329 }
7330 \f
7331 /* Check each extension dependent giv in this class to see if its
7332 root biv is safe from wrapping in the interior mode, which would
7333 make the giv illegal. */
7334
7335 static void
7336 check_ext_dependent_givs (bl, loop_info)
7337 struct iv_class *bl;
7338 struct loop_info *loop_info;
7339 {
7340 int ze_ok = 0, se_ok = 0, info_ok = 0;
7341 enum machine_mode biv_mode = GET_MODE (bl->biv->src_reg);
7342 HOST_WIDE_INT start_val;
7343 unsigned HOST_WIDE_INT u_end_val = 0;
7344 unsigned HOST_WIDE_INT u_start_val = 0;
7345 rtx incr = pc_rtx;
7346 struct induction *v;
7347
7348 /* Make sure the iteration data is available. We must have
7349 constants in order to be certain of no overflow. */
7350 /* ??? An unknown iteration count with an increment of +-1
7351 combined with friendly exit tests of against an invariant
7352 value is also ameanable to optimization. Not implemented. */
7353 if (loop_info->n_iterations > 0
7354 && bl->initial_value
7355 && GET_CODE (bl->initial_value) == CONST_INT
7356 && (incr = biv_total_increment (bl))
7357 && GET_CODE (incr) == CONST_INT
7358 /* Make sure the host can represent the arithmetic. */
7359 && HOST_BITS_PER_WIDE_INT >= GET_MODE_BITSIZE (biv_mode))
7360 {
7361 unsigned HOST_WIDE_INT abs_incr, total_incr;
7362 HOST_WIDE_INT s_end_val;
7363 int neg_incr;
7364
7365 info_ok = 1;
7366 start_val = INTVAL (bl->initial_value);
7367 u_start_val = start_val;
7368
7369 neg_incr = 0, abs_incr = INTVAL (incr);
7370 if (INTVAL (incr) < 0)
7371 neg_incr = 1, abs_incr = -abs_incr;
7372 total_incr = abs_incr * loop_info->n_iterations;
7373
7374 /* Check for host arithmatic overflow. */
7375 if (total_incr / loop_info->n_iterations == abs_incr)
7376 {
7377 unsigned HOST_WIDE_INT u_max;
7378 HOST_WIDE_INT s_max;
7379
7380 u_end_val = start_val + (neg_incr ? -total_incr : total_incr);
7381 s_end_val = u_end_val;
7382 u_max = GET_MODE_MASK (biv_mode);
7383 s_max = u_max >> 1;
7384
7385 /* Check zero extension of biv ok. */
7386 if (start_val >= 0
7387 /* Check for host arithmatic overflow. */
7388 && (neg_incr
7389 ? u_end_val < u_start_val
7390 : u_end_val > u_start_val)
7391 /* Check for target arithmetic overflow. */
7392 && (neg_incr
7393 ? 1 /* taken care of with host overflow */
7394 : u_end_val <= u_max))
7395 {
7396 ze_ok = 1;
7397 }
7398
7399 /* Check sign extension of biv ok. */
7400 /* ??? While it is true that overflow with signed and pointer
7401 arithmetic is undefined, I fear too many programmers don't
7402 keep this fact in mind -- myself included on occasion.
7403 So leave alone with the signed overflow optimizations. */
7404 if (start_val >= -s_max - 1
7405 /* Check for host arithmatic overflow. */
7406 && (neg_incr
7407 ? s_end_val < start_val
7408 : s_end_val > start_val)
7409 /* Check for target arithmetic overflow. */
7410 && (neg_incr
7411 ? s_end_val >= -s_max - 1
7412 : s_end_val <= s_max))
7413 {
7414 se_ok = 1;
7415 }
7416 }
7417 }
7418
7419 /* Invalidate givs that fail the tests. */
7420 for (v = bl->giv; v; v = v->next_iv)
7421 if (v->ext_dependent)
7422 {
7423 enum rtx_code code = GET_CODE (v->ext_dependent);
7424 int ok = 0;
7425
7426 switch (code)
7427 {
7428 case SIGN_EXTEND:
7429 ok = se_ok;
7430 break;
7431 case ZERO_EXTEND:
7432 ok = ze_ok;
7433 break;
7434
7435 case TRUNCATE:
7436 /* We don't know whether this value is being used as either
7437 signed or unsigned, so to safely truncate we must satisfy
7438 both. The initial check here verifies the BIV itself;
7439 once that is successful we may check its range wrt the
7440 derived GIV. */
7441 if (se_ok && ze_ok)
7442 {
7443 enum machine_mode outer_mode = GET_MODE (v->ext_dependent);
7444 unsigned HOST_WIDE_INT max = GET_MODE_MASK (outer_mode) >> 1;
7445
7446 /* We know from the above that both endpoints are nonnegative,
7447 and that there is no wrapping. Verify that both endpoints
7448 are within the (signed) range of the outer mode. */
7449 if (u_start_val <= max && u_end_val <= max)
7450 ok = 1;
7451 }
7452 break;
7453
7454 default:
7455 abort ();
7456 }
7457
7458 if (ok)
7459 {
7460 if (loop_dump_stream)
7461 {
7462 fprintf (loop_dump_stream,
7463 "Verified ext dependent giv at %d of reg %d\n",
7464 INSN_UID (v->insn), bl->regno);
7465 }
7466 }
7467 else
7468 {
7469 if (loop_dump_stream)
7470 {
7471 const char *why;
7472
7473 if (info_ok)
7474 why = "biv iteration values overflowed";
7475 else
7476 {
7477 if (incr == pc_rtx)
7478 incr = biv_total_increment (bl);
7479 if (incr == const1_rtx)
7480 why = "biv iteration info incomplete; incr by 1";
7481 else
7482 why = "biv iteration info incomplete";
7483 }
7484
7485 fprintf (loop_dump_stream,
7486 "Failed ext dependent giv at %d, %s\n",
7487 INSN_UID (v->insn), why);
7488 }
7489 v->ignore = 1;
7490 bl->all_reduced = 0;
7491 }
7492 }
7493 }
7494
7495 /* Generate a version of VALUE in a mode appropriate for initializing V. */
7496
7497 rtx
7498 extend_value_for_giv (v, value)
7499 struct induction *v;
7500 rtx value;
7501 {
7502 rtx ext_dep = v->ext_dependent;
7503
7504 if (! ext_dep)
7505 return value;
7506
7507 /* Recall that check_ext_dependent_givs verified that the known bounds
7508 of a biv did not overflow or wrap with respect to the extension for
7509 the giv. Therefore, constants need no additional adjustment. */
7510 if (CONSTANT_P (value) && GET_MODE (value) == VOIDmode)
7511 return value;
7512
7513 /* Otherwise, we must adjust the value to compensate for the
7514 differing modes of the biv and the giv. */
7515 return gen_rtx_fmt_e (GET_CODE (ext_dep), GET_MODE (ext_dep), value);
7516 }
7517 \f
7518 struct combine_givs_stats
7519 {
7520 int giv_number;
7521 int total_benefit;
7522 };
7523
7524 static int
7525 cmp_combine_givs_stats (xp, yp)
7526 const PTR xp;
7527 const PTR yp;
7528 {
7529 const struct combine_givs_stats * const x =
7530 (const struct combine_givs_stats *) xp;
7531 const struct combine_givs_stats * const y =
7532 (const struct combine_givs_stats *) yp;
7533 int d;
7534 d = y->total_benefit - x->total_benefit;
7535 /* Stabilize the sort. */
7536 if (!d)
7537 d = x->giv_number - y->giv_number;
7538 return d;
7539 }
7540
7541 /* Check all pairs of givs for iv_class BL and see if any can be combined with
7542 any other. If so, point SAME to the giv combined with and set NEW_REG to
7543 be an expression (in terms of the other giv's DEST_REG) equivalent to the
7544 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
7545
7546 static void
7547 combine_givs (regs, bl)
7548 struct loop_regs *regs;
7549 struct iv_class *bl;
7550 {
7551 /* Additional benefit to add for being combined multiple times. */
7552 const int extra_benefit = 3;
7553
7554 struct induction *g1, *g2, **giv_array;
7555 int i, j, k, giv_count;
7556 struct combine_givs_stats *stats;
7557 rtx *can_combine;
7558
7559 /* Count givs, because bl->giv_count is incorrect here. */
7560 giv_count = 0;
7561 for (g1 = bl->giv; g1; g1 = g1->next_iv)
7562 if (!g1->ignore)
7563 giv_count++;
7564
7565 giv_array
7566 = (struct induction **) alloca (giv_count * sizeof (struct induction *));
7567 i = 0;
7568 for (g1 = bl->giv; g1; g1 = g1->next_iv)
7569 if (!g1->ignore)
7570 giv_array[i++] = g1;
7571
7572 stats = (struct combine_givs_stats *) xcalloc (giv_count, sizeof (*stats));
7573 can_combine = (rtx *) xcalloc (giv_count, giv_count * sizeof (rtx));
7574
7575 for (i = 0; i < giv_count; i++)
7576 {
7577 int this_benefit;
7578 rtx single_use;
7579
7580 g1 = giv_array[i];
7581 stats[i].giv_number = i;
7582
7583 /* If a DEST_REG GIV is used only once, do not allow it to combine
7584 with anything, for in doing so we will gain nothing that cannot
7585 be had by simply letting the GIV with which we would have combined
7586 to be reduced on its own. The losage shows up in particular with
7587 DEST_ADDR targets on hosts with reg+reg addressing, though it can
7588 be seen elsewhere as well. */
7589 if (g1->giv_type == DEST_REG
7590 && (single_use = regs->array[REGNO (g1->dest_reg)].single_usage)
7591 && single_use != const0_rtx)
7592 continue;
7593
7594 this_benefit = g1->benefit;
7595 /* Add an additional weight for zero addends. */
7596 if (g1->no_const_addval)
7597 this_benefit += 1;
7598
7599 for (j = 0; j < giv_count; j++)
7600 {
7601 rtx this_combine;
7602
7603 g2 = giv_array[j];
7604 if (g1 != g2
7605 && (this_combine = combine_givs_p (g1, g2)) != NULL_RTX)
7606 {
7607 can_combine[i * giv_count + j] = this_combine;
7608 this_benefit += g2->benefit + extra_benefit;
7609 }
7610 }
7611 stats[i].total_benefit = this_benefit;
7612 }
7613
7614 /* Iterate, combining until we can't. */
7615 restart:
7616 qsort (stats, giv_count, sizeof (*stats), cmp_combine_givs_stats);
7617
7618 if (loop_dump_stream)
7619 {
7620 fprintf (loop_dump_stream, "Sorted combine statistics:\n");
7621 for (k = 0; k < giv_count; k++)
7622 {
7623 g1 = giv_array[stats[k].giv_number];
7624 if (!g1->combined_with && !g1->same)
7625 fprintf (loop_dump_stream, " {%d, %d}",
7626 INSN_UID (giv_array[stats[k].giv_number]->insn),
7627 stats[k].total_benefit);
7628 }
7629 putc ('\n', loop_dump_stream);
7630 }
7631
7632 for (k = 0; k < giv_count; k++)
7633 {
7634 int g1_add_benefit = 0;
7635
7636 i = stats[k].giv_number;
7637 g1 = giv_array[i];
7638
7639 /* If it has already been combined, skip. */
7640 if (g1->combined_with || g1->same)
7641 continue;
7642
7643 for (j = 0; j < giv_count; j++)
7644 {
7645 g2 = giv_array[j];
7646 if (g1 != g2 && can_combine[i * giv_count + j]
7647 /* If it has already been combined, skip. */
7648 && ! g2->same && ! g2->combined_with)
7649 {
7650 int l;
7651
7652 g2->new_reg = can_combine[i * giv_count + j];
7653 g2->same = g1;
7654 /* For destination, we now may replace by mem expression instead
7655 of register. This changes the costs considerably, so add the
7656 compensation. */
7657 if (g2->giv_type == DEST_ADDR)
7658 g2->benefit = (g2->benefit + reg_address_cost
7659 - address_cost (g2->new_reg,
7660 GET_MODE (g2->mem)));
7661 g1->combined_with++;
7662 g1->lifetime += g2->lifetime;
7663
7664 g1_add_benefit += g2->benefit;
7665
7666 /* ??? The new final_[bg]iv_value code does a much better job
7667 of finding replaceable giv's, and hence this code may no
7668 longer be necessary. */
7669 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
7670 g1_add_benefit -= copy_cost;
7671
7672 /* To help optimize the next set of combinations, remove
7673 this giv from the benefits of other potential mates. */
7674 for (l = 0; l < giv_count; ++l)
7675 {
7676 int m = stats[l].giv_number;
7677 if (can_combine[m * giv_count + j])
7678 stats[l].total_benefit -= g2->benefit + extra_benefit;
7679 }
7680
7681 if (loop_dump_stream)
7682 fprintf (loop_dump_stream,
7683 "giv at %d combined with giv at %d; new benefit %d + %d, lifetime %d\n",
7684 INSN_UID (g2->insn), INSN_UID (g1->insn),
7685 g1->benefit, g1_add_benefit, g1->lifetime);
7686 }
7687 }
7688
7689 /* To help optimize the next set of combinations, remove
7690 this giv from the benefits of other potential mates. */
7691 if (g1->combined_with)
7692 {
7693 for (j = 0; j < giv_count; ++j)
7694 {
7695 int m = stats[j].giv_number;
7696 if (can_combine[m * giv_count + i])
7697 stats[j].total_benefit -= g1->benefit + extra_benefit;
7698 }
7699
7700 g1->benefit += g1_add_benefit;
7701
7702 /* We've finished with this giv, and everything it touched.
7703 Restart the combination so that proper weights for the
7704 rest of the givs are properly taken into account. */
7705 /* ??? Ideally we would compact the arrays at this point, so
7706 as to not cover old ground. But sanely compacting
7707 can_combine is tricky. */
7708 goto restart;
7709 }
7710 }
7711
7712 /* Clean up. */
7713 free (stats);
7714 free (can_combine);
7715 }
7716 \f
7717 /* Generate sequence for REG = B * M + A. */
7718
7719 static rtx
7720 gen_add_mult (b, m, a, reg)
7721 rtx b; /* initial value of basic induction variable */
7722 rtx m; /* multiplicative constant */
7723 rtx a; /* additive constant */
7724 rtx reg; /* destination register */
7725 {
7726 rtx seq;
7727 rtx result;
7728
7729 start_sequence ();
7730 /* Use unsigned arithmetic. */
7731 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
7732 if (reg != result)
7733 emit_move_insn (reg, result);
7734 seq = get_insns ();
7735 end_sequence ();
7736
7737 return seq;
7738 }
7739
7740
7741 /* Update registers created in insn sequence SEQ. */
7742
7743 static void
7744 loop_regs_update (loop, seq)
7745 const struct loop *loop ATTRIBUTE_UNUSED;
7746 rtx seq;
7747 {
7748 rtx insn;
7749
7750 /* Update register info for alias analysis. */
7751
7752 if (seq == NULL_RTX)
7753 return;
7754
7755 if (INSN_P (seq))
7756 {
7757 insn = seq;
7758 while (insn != NULL_RTX)
7759 {
7760 rtx set = single_set (insn);
7761
7762 if (set && GET_CODE (SET_DEST (set)) == REG)
7763 record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
7764
7765 insn = NEXT_INSN (insn);
7766 }
7767 }
7768 else if (GET_CODE (seq) == SET
7769 && GET_CODE (SET_DEST (seq)) == REG)
7770 record_base_value (REGNO (SET_DEST (seq)), SET_SRC (seq), 0);
7771 }
7772
7773
7774 /* EMIT code before BEFORE_BB/BEFORE_INSN to set REG = B * M + A. */
7775
7776 void
7777 loop_iv_add_mult_emit_before (loop, b, m, a, reg, before_bb, before_insn)
7778 const struct loop *loop;
7779 rtx b; /* initial value of basic induction variable */
7780 rtx m; /* multiplicative constant */
7781 rtx a; /* additive constant */
7782 rtx reg; /* destination register */
7783 basic_block before_bb;
7784 rtx before_insn;
7785 {
7786 rtx seq;
7787
7788 if (! before_insn)
7789 {
7790 loop_iv_add_mult_hoist (loop, b, m, a, reg);
7791 return;
7792 }
7793
7794 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7795 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7796
7797 /* Increase the lifetime of any invariants moved further in code. */
7798 update_reg_last_use (a, before_insn);
7799 update_reg_last_use (b, before_insn);
7800 update_reg_last_use (m, before_insn);
7801
7802 loop_insn_emit_before (loop, before_bb, before_insn, seq);
7803
7804 /* It is possible that the expansion created lots of new registers.
7805 Iterate over the sequence we just created and record them all. */
7806 loop_regs_update (loop, seq);
7807 }
7808
7809
7810 /* Emit insns in loop pre-header to set REG = B * M + A. */
7811
7812 void
7813 loop_iv_add_mult_sink (loop, b, m, a, reg)
7814 const struct loop *loop;
7815 rtx b; /* initial value of basic induction variable */
7816 rtx m; /* multiplicative constant */
7817 rtx a; /* additive constant */
7818 rtx reg; /* destination register */
7819 {
7820 rtx seq;
7821
7822 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7823 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7824
7825 /* Increase the lifetime of any invariants moved further in code.
7826 ???? Is this really necessary? */
7827 update_reg_last_use (a, loop->sink);
7828 update_reg_last_use (b, loop->sink);
7829 update_reg_last_use (m, loop->sink);
7830
7831 loop_insn_sink (loop, seq);
7832
7833 /* It is possible that the expansion created lots of new registers.
7834 Iterate over the sequence we just created and record them all. */
7835 loop_regs_update (loop, seq);
7836 }
7837
7838
7839 /* Emit insns after loop to set REG = B * M + A. */
7840
7841 void
7842 loop_iv_add_mult_hoist (loop, b, m, a, reg)
7843 const struct loop *loop;
7844 rtx b; /* initial value of basic induction variable */
7845 rtx m; /* multiplicative constant */
7846 rtx a; /* additive constant */
7847 rtx reg; /* destination register */
7848 {
7849 rtx seq;
7850
7851 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
7852 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
7853
7854 loop_insn_hoist (loop, seq);
7855
7856 /* It is possible that the expansion created lots of new registers.
7857 Iterate over the sequence we just created and record them all. */
7858 loop_regs_update (loop, seq);
7859 }
7860
7861
7862
7863 /* Similar to gen_add_mult, but compute cost rather than generating
7864 sequence. */
7865
7866 static int
7867 iv_add_mult_cost (b, m, a, reg)
7868 rtx b; /* initial value of basic induction variable */
7869 rtx m; /* multiplicative constant */
7870 rtx a; /* additive constant */
7871 rtx reg; /* destination register */
7872 {
7873 int cost = 0;
7874 rtx last, result;
7875
7876 start_sequence ();
7877 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
7878 if (reg != result)
7879 emit_move_insn (reg, result);
7880 last = get_last_insn ();
7881 while (last)
7882 {
7883 rtx t = single_set (last);
7884 if (t)
7885 cost += rtx_cost (SET_SRC (t), SET);
7886 last = PREV_INSN (last);
7887 }
7888 end_sequence ();
7889 return cost;
7890 }
7891 \f
7892 /* Test whether A * B can be computed without
7893 an actual multiply insn. Value is 1 if so.
7894
7895 ??? This function stinks because it generates a ton of wasted RTL
7896 ??? and as a result fragments GC memory to no end. There are other
7897 ??? places in the compiler which are invoked a lot and do the same
7898 ??? thing, generate wasted RTL just to see if something is possible. */
7899
7900 static int
7901 product_cheap_p (a, b)
7902 rtx a;
7903 rtx b;
7904 {
7905 rtx tmp;
7906 int win, n_insns;
7907
7908 /* If only one is constant, make it B. */
7909 if (GET_CODE (a) == CONST_INT)
7910 tmp = a, a = b, b = tmp;
7911
7912 /* If first constant, both constant, so don't need multiply. */
7913 if (GET_CODE (a) == CONST_INT)
7914 return 1;
7915
7916 /* If second not constant, neither is constant, so would need multiply. */
7917 if (GET_CODE (b) != CONST_INT)
7918 return 0;
7919
7920 /* One operand is constant, so might not need multiply insn. Generate the
7921 code for the multiply and see if a call or multiply, or long sequence
7922 of insns is generated. */
7923
7924 start_sequence ();
7925 expand_mult (GET_MODE (a), a, b, NULL_RTX, 1);
7926 tmp = get_insns ();
7927 end_sequence ();
7928
7929 win = 1;
7930 if (INSN_P (tmp))
7931 {
7932 n_insns = 0;
7933 while (tmp != NULL_RTX)
7934 {
7935 rtx next = NEXT_INSN (tmp);
7936
7937 if (++n_insns > 3
7938 || GET_CODE (tmp) != INSN
7939 || (GET_CODE (PATTERN (tmp)) == SET
7940 && GET_CODE (SET_SRC (PATTERN (tmp))) == MULT)
7941 || (GET_CODE (PATTERN (tmp)) == PARALLEL
7942 && GET_CODE (XVECEXP (PATTERN (tmp), 0, 0)) == SET
7943 && GET_CODE (SET_SRC (XVECEXP (PATTERN (tmp), 0, 0))) == MULT))
7944 {
7945 win = 0;
7946 break;
7947 }
7948
7949 tmp = next;
7950 }
7951 }
7952 else if (GET_CODE (tmp) == SET
7953 && GET_CODE (SET_SRC (tmp)) == MULT)
7954 win = 0;
7955 else if (GET_CODE (tmp) == PARALLEL
7956 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
7957 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
7958 win = 0;
7959
7960 return win;
7961 }
7962 \f
7963 /* Check to see if loop can be terminated by a "decrement and branch until
7964 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
7965 Also try reversing an increment loop to a decrement loop
7966 to see if the optimization can be performed.
7967 Value is nonzero if optimization was performed. */
7968
7969 /* This is useful even if the architecture doesn't have such an insn,
7970 because it might change a loops which increments from 0 to n to a loop
7971 which decrements from n to 0. A loop that decrements to zero is usually
7972 faster than one that increments from zero. */
7973
7974 /* ??? This could be rewritten to use some of the loop unrolling procedures,
7975 such as approx_final_value, biv_total_increment, loop_iterations, and
7976 final_[bg]iv_value. */
7977
7978 static int
7979 check_dbra_loop (loop, insn_count)
7980 struct loop *loop;
7981 int insn_count;
7982 {
7983 struct loop_info *loop_info = LOOP_INFO (loop);
7984 struct loop_regs *regs = LOOP_REGS (loop);
7985 struct loop_ivs *ivs = LOOP_IVS (loop);
7986 struct iv_class *bl;
7987 rtx reg;
7988 rtx jump_label;
7989 rtx final_value;
7990 rtx start_value;
7991 rtx new_add_val;
7992 rtx comparison;
7993 rtx before_comparison;
7994 rtx p;
7995 rtx jump;
7996 rtx first_compare;
7997 int compare_and_branch;
7998 rtx loop_start = loop->start;
7999 rtx loop_end = loop->end;
8000
8001 /* If last insn is a conditional branch, and the insn before tests a
8002 register value, try to optimize it. Otherwise, we can't do anything. */
8003
8004 jump = PREV_INSN (loop_end);
8005 comparison = get_condition_for_loop (loop, jump);
8006 if (comparison == 0)
8007 return 0;
8008 if (!onlyjump_p (jump))
8009 return 0;
8010
8011 /* Try to compute whether the compare/branch at the loop end is one or
8012 two instructions. */
8013 get_condition (jump, &first_compare);
8014 if (first_compare == jump)
8015 compare_and_branch = 1;
8016 else if (first_compare == prev_nonnote_insn (jump))
8017 compare_and_branch = 2;
8018 else
8019 return 0;
8020
8021 {
8022 /* If more than one condition is present to control the loop, then
8023 do not proceed, as this function does not know how to rewrite
8024 loop tests with more than one condition.
8025
8026 Look backwards from the first insn in the last comparison
8027 sequence and see if we've got another comparison sequence. */
8028
8029 rtx jump1;
8030 if ((jump1 = prev_nonnote_insn (first_compare)) != loop->cont)
8031 if (GET_CODE (jump1) == JUMP_INSN)
8032 return 0;
8033 }
8034
8035 /* Check all of the bivs to see if the compare uses one of them.
8036 Skip biv's set more than once because we can't guarantee that
8037 it will be zero on the last iteration. Also skip if the biv is
8038 used between its update and the test insn. */
8039
8040 for (bl = ivs->list; bl; bl = bl->next)
8041 {
8042 if (bl->biv_count == 1
8043 && ! bl->biv->maybe_multiple
8044 && bl->biv->dest_reg == XEXP (comparison, 0)
8045 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
8046 first_compare))
8047 break;
8048 }
8049
8050 if (! bl)
8051 return 0;
8052
8053 /* Look for the case where the basic induction variable is always
8054 nonnegative, and equals zero on the last iteration.
8055 In this case, add a reg_note REG_NONNEG, which allows the
8056 m68k DBRA instruction to be used. */
8057
8058 if (((GET_CODE (comparison) == GT
8059 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
8060 && INTVAL (XEXP (comparison, 1)) == -1)
8061 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
8062 && GET_CODE (bl->biv->add_val) == CONST_INT
8063 && INTVAL (bl->biv->add_val) < 0)
8064 {
8065 /* Initial value must be greater than 0,
8066 init_val % -dec_value == 0 to ensure that it equals zero on
8067 the last iteration */
8068
8069 if (GET_CODE (bl->initial_value) == CONST_INT
8070 && INTVAL (bl->initial_value) > 0
8071 && (INTVAL (bl->initial_value)
8072 % (-INTVAL (bl->biv->add_val))) == 0)
8073 {
8074 /* register always nonnegative, add REG_NOTE to branch */
8075 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
8076 REG_NOTES (jump)
8077 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
8078 REG_NOTES (jump));
8079 bl->nonneg = 1;
8080
8081 return 1;
8082 }
8083
8084 /* If the decrement is 1 and the value was tested as >= 0 before
8085 the loop, then we can safely optimize. */
8086 for (p = loop_start; p; p = PREV_INSN (p))
8087 {
8088 if (GET_CODE (p) == CODE_LABEL)
8089 break;
8090 if (GET_CODE (p) != JUMP_INSN)
8091 continue;
8092
8093 before_comparison = get_condition_for_loop (loop, p);
8094 if (before_comparison
8095 && XEXP (before_comparison, 0) == bl->biv->dest_reg
8096 && GET_CODE (before_comparison) == LT
8097 && XEXP (before_comparison, 1) == const0_rtx
8098 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
8099 && INTVAL (bl->biv->add_val) == -1)
8100 {
8101 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
8102 REG_NOTES (jump)
8103 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
8104 REG_NOTES (jump));
8105 bl->nonneg = 1;
8106
8107 return 1;
8108 }
8109 }
8110 }
8111 else if (GET_CODE (bl->biv->add_val) == CONST_INT
8112 && INTVAL (bl->biv->add_val) > 0)
8113 {
8114 /* Try to change inc to dec, so can apply above optimization. */
8115 /* Can do this if:
8116 all registers modified are induction variables or invariant,
8117 all memory references have non-overlapping addresses
8118 (obviously true if only one write)
8119 allow 2 insns for the compare/jump at the end of the loop. */
8120 /* Also, we must avoid any instructions which use both the reversed
8121 biv and another biv. Such instructions will fail if the loop is
8122 reversed. We meet this condition by requiring that either
8123 no_use_except_counting is true, or else that there is only
8124 one biv. */
8125 int num_nonfixed_reads = 0;
8126 /* 1 if the iteration var is used only to count iterations. */
8127 int no_use_except_counting = 0;
8128 /* 1 if the loop has no memory store, or it has a single memory store
8129 which is reversible. */
8130 int reversible_mem_store = 1;
8131
8132 if (bl->giv_count == 0
8133 && !loop->exit_count
8134 && !loop_info->has_multiple_exit_targets)
8135 {
8136 rtx bivreg = regno_reg_rtx[bl->regno];
8137 struct iv_class *blt;
8138
8139 /* If there are no givs for this biv, and the only exit is the
8140 fall through at the end of the loop, then
8141 see if perhaps there are no uses except to count. */
8142 no_use_except_counting = 1;
8143 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8144 if (INSN_P (p))
8145 {
8146 rtx set = single_set (p);
8147
8148 if (set && GET_CODE (SET_DEST (set)) == REG
8149 && REGNO (SET_DEST (set)) == bl->regno)
8150 /* An insn that sets the biv is okay. */
8151 ;
8152 else if ((p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
8153 || p == prev_nonnote_insn (loop_end))
8154 && reg_mentioned_p (bivreg, PATTERN (p)))
8155 {
8156 /* If either of these insns uses the biv and sets a pseudo
8157 that has more than one usage, then the biv has uses
8158 other than counting since it's used to derive a value
8159 that is used more than one time. */
8160 note_stores (PATTERN (p), note_set_pseudo_multiple_uses,
8161 regs);
8162 if (regs->multiple_uses)
8163 {
8164 no_use_except_counting = 0;
8165 break;
8166 }
8167 }
8168 else if (reg_mentioned_p (bivreg, PATTERN (p)))
8169 {
8170 no_use_except_counting = 0;
8171 break;
8172 }
8173 }
8174
8175 /* A biv has uses besides counting if it is used to set
8176 another biv. */
8177 for (blt = ivs->list; blt; blt = blt->next)
8178 if (blt->init_set
8179 && reg_mentioned_p (bivreg, SET_SRC (blt->init_set)))
8180 {
8181 no_use_except_counting = 0;
8182 break;
8183 }
8184 }
8185
8186 if (no_use_except_counting)
8187 /* No need to worry about MEMs. */
8188 ;
8189 else if (loop_info->num_mem_sets <= 1)
8190 {
8191 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8192 if (INSN_P (p))
8193 num_nonfixed_reads += count_nonfixed_reads (loop, PATTERN (p));
8194
8195 /* If the loop has a single store, and the destination address is
8196 invariant, then we can't reverse the loop, because this address
8197 might then have the wrong value at loop exit.
8198 This would work if the source was invariant also, however, in that
8199 case, the insn should have been moved out of the loop. */
8200
8201 if (loop_info->num_mem_sets == 1)
8202 {
8203 struct induction *v;
8204
8205 /* If we could prove that each of the memory locations
8206 written to was different, then we could reverse the
8207 store -- but we don't presently have any way of
8208 knowing that. */
8209 reversible_mem_store = 0;
8210
8211 /* If the store depends on a register that is set after the
8212 store, it depends on the initial value, and is thus not
8213 reversible. */
8214 for (v = bl->giv; reversible_mem_store && v; v = v->next_iv)
8215 {
8216 if (v->giv_type == DEST_REG
8217 && reg_mentioned_p (v->dest_reg,
8218 PATTERN (loop_info->first_loop_store_insn))
8219 && loop_insn_first_p (loop_info->first_loop_store_insn,
8220 v->insn))
8221 reversible_mem_store = 0;
8222 }
8223 }
8224 }
8225 else
8226 return 0;
8227
8228 /* This code only acts for innermost loops. Also it simplifies
8229 the memory address check by only reversing loops with
8230 zero or one memory access.
8231 Two memory accesses could involve parts of the same array,
8232 and that can't be reversed.
8233 If the biv is used only for counting, than we don't need to worry
8234 about all these things. */
8235
8236 if ((num_nonfixed_reads <= 1
8237 && ! loop_info->has_nonconst_call
8238 && ! loop_info->has_prefetch
8239 && ! loop_info->has_volatile
8240 && reversible_mem_store
8241 && (bl->giv_count + bl->biv_count + loop_info->num_mem_sets
8242 + num_unmoved_movables (loop) + compare_and_branch == insn_count)
8243 && (bl == ivs->list && bl->next == 0))
8244 || (no_use_except_counting && ! loop_info->has_prefetch))
8245 {
8246 rtx tem;
8247
8248 /* Loop can be reversed. */
8249 if (loop_dump_stream)
8250 fprintf (loop_dump_stream, "Can reverse loop\n");
8251
8252 /* Now check other conditions:
8253
8254 The increment must be a constant, as must the initial value,
8255 and the comparison code must be LT.
8256
8257 This test can probably be improved since +/- 1 in the constant
8258 can be obtained by changing LT to LE and vice versa; this is
8259 confusing. */
8260
8261 if (comparison
8262 /* for constants, LE gets turned into LT */
8263 && (GET_CODE (comparison) == LT
8264 || (GET_CODE (comparison) == LE
8265 && no_use_except_counting)))
8266 {
8267 HOST_WIDE_INT add_val, add_adjust, comparison_val = 0;
8268 rtx initial_value, comparison_value;
8269 int nonneg = 0;
8270 enum rtx_code cmp_code;
8271 int comparison_const_width;
8272 unsigned HOST_WIDE_INT comparison_sign_mask;
8273
8274 add_val = INTVAL (bl->biv->add_val);
8275 comparison_value = XEXP (comparison, 1);
8276 if (GET_MODE (comparison_value) == VOIDmode)
8277 comparison_const_width
8278 = GET_MODE_BITSIZE (GET_MODE (XEXP (comparison, 0)));
8279 else
8280 comparison_const_width
8281 = GET_MODE_BITSIZE (GET_MODE (comparison_value));
8282 if (comparison_const_width > HOST_BITS_PER_WIDE_INT)
8283 comparison_const_width = HOST_BITS_PER_WIDE_INT;
8284 comparison_sign_mask
8285 = (unsigned HOST_WIDE_INT) 1 << (comparison_const_width - 1);
8286
8287 /* If the comparison value is not a loop invariant, then we
8288 can not reverse this loop.
8289
8290 ??? If the insns which initialize the comparison value as
8291 a whole compute an invariant result, then we could move
8292 them out of the loop and proceed with loop reversal. */
8293 if (! loop_invariant_p (loop, comparison_value))
8294 return 0;
8295
8296 if (GET_CODE (comparison_value) == CONST_INT)
8297 comparison_val = INTVAL (comparison_value);
8298 initial_value = bl->initial_value;
8299
8300 /* Normalize the initial value if it is an integer and
8301 has no other use except as a counter. This will allow
8302 a few more loops to be reversed. */
8303 if (no_use_except_counting
8304 && GET_CODE (comparison_value) == CONST_INT
8305 && GET_CODE (initial_value) == CONST_INT)
8306 {
8307 comparison_val = comparison_val - INTVAL (bl->initial_value);
8308 /* The code below requires comparison_val to be a multiple
8309 of add_val in order to do the loop reversal, so
8310 round up comparison_val to a multiple of add_val.
8311 Since comparison_value is constant, we know that the
8312 current comparison code is LT. */
8313 comparison_val = comparison_val + add_val - 1;
8314 comparison_val
8315 -= (unsigned HOST_WIDE_INT) comparison_val % add_val;
8316 /* We postpone overflow checks for COMPARISON_VAL here;
8317 even if there is an overflow, we might still be able to
8318 reverse the loop, if converting the loop exit test to
8319 NE is possible. */
8320 initial_value = const0_rtx;
8321 }
8322
8323 /* First check if we can do a vanilla loop reversal. */
8324 if (initial_value == const0_rtx
8325 /* If we have a decrement_and_branch_on_count,
8326 prefer the NE test, since this will allow that
8327 instruction to be generated. Note that we must
8328 use a vanilla loop reversal if the biv is used to
8329 calculate a giv or has a non-counting use. */
8330 #if ! defined (HAVE_decrement_and_branch_until_zero) \
8331 && defined (HAVE_decrement_and_branch_on_count)
8332 && (! (add_val == 1 && loop->vtop
8333 && (bl->biv_count == 0
8334 || no_use_except_counting)))
8335 #endif
8336 && GET_CODE (comparison_value) == CONST_INT
8337 /* Now do postponed overflow checks on COMPARISON_VAL. */
8338 && ! (((comparison_val - add_val) ^ INTVAL (comparison_value))
8339 & comparison_sign_mask))
8340 {
8341 /* Register will always be nonnegative, with value
8342 0 on last iteration */
8343 add_adjust = add_val;
8344 nonneg = 1;
8345 cmp_code = GE;
8346 }
8347 else if (add_val == 1 && loop->vtop
8348 && (bl->biv_count == 0
8349 || no_use_except_counting))
8350 {
8351 add_adjust = 0;
8352 cmp_code = NE;
8353 }
8354 else
8355 return 0;
8356
8357 if (GET_CODE (comparison) == LE)
8358 add_adjust -= add_val;
8359
8360 /* If the initial value is not zero, or if the comparison
8361 value is not an exact multiple of the increment, then we
8362 can not reverse this loop. */
8363 if (initial_value == const0_rtx
8364 && GET_CODE (comparison_value) == CONST_INT)
8365 {
8366 if (((unsigned HOST_WIDE_INT) comparison_val % add_val) != 0)
8367 return 0;
8368 }
8369 else
8370 {
8371 if (! no_use_except_counting || add_val != 1)
8372 return 0;
8373 }
8374
8375 final_value = comparison_value;
8376
8377 /* Reset these in case we normalized the initial value
8378 and comparison value above. */
8379 if (GET_CODE (comparison_value) == CONST_INT
8380 && GET_CODE (initial_value) == CONST_INT)
8381 {
8382 comparison_value = GEN_INT (comparison_val);
8383 final_value
8384 = GEN_INT (comparison_val + INTVAL (bl->initial_value));
8385 }
8386 bl->initial_value = initial_value;
8387
8388 /* Save some info needed to produce the new insns. */
8389 reg = bl->biv->dest_reg;
8390 jump_label = condjump_label (PREV_INSN (loop_end));
8391 new_add_val = GEN_INT (-INTVAL (bl->biv->add_val));
8392
8393 /* Set start_value; if this is not a CONST_INT, we need
8394 to generate a SUB.
8395 Initialize biv to start_value before loop start.
8396 The old initializing insn will be deleted as a
8397 dead store by flow.c. */
8398 if (initial_value == const0_rtx
8399 && GET_CODE (comparison_value) == CONST_INT)
8400 {
8401 start_value = GEN_INT (comparison_val - add_adjust);
8402 loop_insn_hoist (loop, gen_move_insn (reg, start_value));
8403 }
8404 else if (GET_CODE (initial_value) == CONST_INT)
8405 {
8406 enum machine_mode mode = GET_MODE (reg);
8407 rtx offset = GEN_INT (-INTVAL (initial_value) - add_adjust);
8408 rtx add_insn = gen_add3_insn (reg, comparison_value, offset);
8409
8410 if (add_insn == 0)
8411 return 0;
8412
8413 start_value
8414 = gen_rtx_PLUS (mode, comparison_value, offset);
8415 loop_insn_hoist (loop, add_insn);
8416 if (GET_CODE (comparison) == LE)
8417 final_value = gen_rtx_PLUS (mode, comparison_value,
8418 GEN_INT (add_val));
8419 }
8420 else if (! add_adjust)
8421 {
8422 enum machine_mode mode = GET_MODE (reg);
8423 rtx sub_insn = gen_sub3_insn (reg, comparison_value,
8424 initial_value);
8425
8426 if (sub_insn == 0)
8427 return 0;
8428 start_value
8429 = gen_rtx_MINUS (mode, comparison_value, initial_value);
8430 loop_insn_hoist (loop, sub_insn);
8431 }
8432 else
8433 /* We could handle the other cases too, but it'll be
8434 better to have a testcase first. */
8435 return 0;
8436
8437 /* We may not have a single insn which can increment a reg, so
8438 create a sequence to hold all the insns from expand_inc. */
8439 start_sequence ();
8440 expand_inc (reg, new_add_val);
8441 tem = get_insns ();
8442 end_sequence ();
8443
8444 p = loop_insn_emit_before (loop, 0, bl->biv->insn, tem);
8445 delete_insn (bl->biv->insn);
8446
8447 /* Update biv info to reflect its new status. */
8448 bl->biv->insn = p;
8449 bl->initial_value = start_value;
8450 bl->biv->add_val = new_add_val;
8451
8452 /* Update loop info. */
8453 loop_info->initial_value = reg;
8454 loop_info->initial_equiv_value = reg;
8455 loop_info->final_value = const0_rtx;
8456 loop_info->final_equiv_value = const0_rtx;
8457 loop_info->comparison_value = const0_rtx;
8458 loop_info->comparison_code = cmp_code;
8459 loop_info->increment = new_add_val;
8460
8461 /* Inc LABEL_NUSES so that delete_insn will
8462 not delete the label. */
8463 LABEL_NUSES (XEXP (jump_label, 0))++;
8464
8465 /* Emit an insn after the end of the loop to set the biv's
8466 proper exit value if it is used anywhere outside the loop. */
8467 if ((REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare))
8468 || ! bl->init_insn
8469 || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
8470 loop_insn_sink (loop, gen_load_of_final_value (reg, final_value));
8471
8472 /* Delete compare/branch at end of loop. */
8473 delete_related_insns (PREV_INSN (loop_end));
8474 if (compare_and_branch == 2)
8475 delete_related_insns (first_compare);
8476
8477 /* Add new compare/branch insn at end of loop. */
8478 start_sequence ();
8479 emit_cmp_and_jump_insns (reg, const0_rtx, cmp_code, NULL_RTX,
8480 GET_MODE (reg), 0,
8481 XEXP (jump_label, 0));
8482 tem = get_insns ();
8483 end_sequence ();
8484 emit_jump_insn_before (tem, loop_end);
8485
8486 for (tem = PREV_INSN (loop_end);
8487 tem && GET_CODE (tem) != JUMP_INSN;
8488 tem = PREV_INSN (tem))
8489 ;
8490
8491 if (tem)
8492 JUMP_LABEL (tem) = XEXP (jump_label, 0);
8493
8494 if (nonneg)
8495 {
8496 if (tem)
8497 {
8498 /* Increment of LABEL_NUSES done above. */
8499 /* Register is now always nonnegative,
8500 so add REG_NONNEG note to the branch. */
8501 REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, reg,
8502 REG_NOTES (tem));
8503 }
8504 bl->nonneg = 1;
8505 }
8506
8507 /* No insn may reference both the reversed and another biv or it
8508 will fail (see comment near the top of the loop reversal
8509 code).
8510 Earlier on, we have verified that the biv has no use except
8511 counting, or it is the only biv in this function.
8512 However, the code that computes no_use_except_counting does
8513 not verify reg notes. It's possible to have an insn that
8514 references another biv, and has a REG_EQUAL note with an
8515 expression based on the reversed biv. To avoid this case,
8516 remove all REG_EQUAL notes based on the reversed biv
8517 here. */
8518 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
8519 if (INSN_P (p))
8520 {
8521 rtx *pnote;
8522 rtx set = single_set (p);
8523 /* If this is a set of a GIV based on the reversed biv, any
8524 REG_EQUAL notes should still be correct. */
8525 if (! set
8526 || GET_CODE (SET_DEST (set)) != REG
8527 || (size_t) REGNO (SET_DEST (set)) >= ivs->n_regs
8528 || REG_IV_TYPE (ivs, REGNO (SET_DEST (set))) != GENERAL_INDUCT
8529 || REG_IV_INFO (ivs, REGNO (SET_DEST (set)))->src_reg != bl->biv->src_reg)
8530 for (pnote = &REG_NOTES (p); *pnote;)
8531 {
8532 if (REG_NOTE_KIND (*pnote) == REG_EQUAL
8533 && reg_mentioned_p (regno_reg_rtx[bl->regno],
8534 XEXP (*pnote, 0)))
8535 *pnote = XEXP (*pnote, 1);
8536 else
8537 pnote = &XEXP (*pnote, 1);
8538 }
8539 }
8540
8541 /* Mark that this biv has been reversed. Each giv which depends
8542 on this biv, and which is also live past the end of the loop
8543 will have to be fixed up. */
8544
8545 bl->reversed = 1;
8546
8547 if (loop_dump_stream)
8548 {
8549 fprintf (loop_dump_stream, "Reversed loop");
8550 if (bl->nonneg)
8551 fprintf (loop_dump_stream, " and added reg_nonneg\n");
8552 else
8553 fprintf (loop_dump_stream, "\n");
8554 }
8555
8556 return 1;
8557 }
8558 }
8559 }
8560
8561 return 0;
8562 }
8563 \f
8564 /* Verify whether the biv BL appears to be eliminable,
8565 based on the insns in the loop that refer to it.
8566
8567 If ELIMINATE_P is non-zero, actually do the elimination.
8568
8569 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
8570 determine whether invariant insns should be placed inside or at the
8571 start of the loop. */
8572
8573 static int
8574 maybe_eliminate_biv (loop, bl, eliminate_p, threshold, insn_count)
8575 const struct loop *loop;
8576 struct iv_class *bl;
8577 int eliminate_p;
8578 int threshold, insn_count;
8579 {
8580 struct loop_ivs *ivs = LOOP_IVS (loop);
8581 rtx reg = bl->biv->dest_reg;
8582 rtx p;
8583
8584 /* Scan all insns in the loop, stopping if we find one that uses the
8585 biv in a way that we cannot eliminate. */
8586
8587 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
8588 {
8589 enum rtx_code code = GET_CODE (p);
8590 basic_block where_bb = 0;
8591 rtx where_insn = threshold >= insn_count ? 0 : p;
8592
8593 /* If this is a libcall that sets a giv, skip ahead to its end. */
8594 if (GET_RTX_CLASS (code) == 'i')
8595 {
8596 rtx note = find_reg_note (p, REG_LIBCALL, NULL_RTX);
8597
8598 if (note)
8599 {
8600 rtx last = XEXP (note, 0);
8601 rtx set = single_set (last);
8602
8603 if (set && GET_CODE (SET_DEST (set)) == REG)
8604 {
8605 unsigned int regno = REGNO (SET_DEST (set));
8606
8607 if (regno < ivs->n_regs
8608 && REG_IV_TYPE (ivs, regno) == GENERAL_INDUCT
8609 && REG_IV_INFO (ivs, regno)->src_reg == bl->biv->src_reg)
8610 p = last;
8611 }
8612 }
8613 }
8614 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
8615 && reg_mentioned_p (reg, PATTERN (p))
8616 && ! maybe_eliminate_biv_1 (loop, PATTERN (p), p, bl,
8617 eliminate_p, where_bb, where_insn))
8618 {
8619 if (loop_dump_stream)
8620 fprintf (loop_dump_stream,
8621 "Cannot eliminate biv %d: biv used in insn %d.\n",
8622 bl->regno, INSN_UID (p));
8623 break;
8624 }
8625 }
8626
8627 if (p == loop->end)
8628 {
8629 if (loop_dump_stream)
8630 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
8631 bl->regno, eliminate_p ? "was" : "can be");
8632 return 1;
8633 }
8634
8635 return 0;
8636 }
8637 \f
8638 /* INSN and REFERENCE are instructions in the same insn chain.
8639 Return non-zero if INSN is first. */
8640
8641 int
8642 loop_insn_first_p (insn, reference)
8643 rtx insn, reference;
8644 {
8645 rtx p, q;
8646
8647 for (p = insn, q = reference;;)
8648 {
8649 /* Start with test for not first so that INSN == REFERENCE yields not
8650 first. */
8651 if (q == insn || ! p)
8652 return 0;
8653 if (p == reference || ! q)
8654 return 1;
8655
8656 /* Either of P or Q might be a NOTE. Notes have the same LUID as the
8657 previous insn, hence the <= comparison below does not work if
8658 P is a note. */
8659 if (INSN_UID (p) < max_uid_for_loop
8660 && INSN_UID (q) < max_uid_for_loop
8661 && GET_CODE (p) != NOTE)
8662 return INSN_LUID (p) <= INSN_LUID (q);
8663
8664 if (INSN_UID (p) >= max_uid_for_loop
8665 || GET_CODE (p) == NOTE)
8666 p = NEXT_INSN (p);
8667 if (INSN_UID (q) >= max_uid_for_loop)
8668 q = NEXT_INSN (q);
8669 }
8670 }
8671
8672 /* We are trying to eliminate BIV in INSN using GIV. Return non-zero if
8673 the offset that we have to take into account due to auto-increment /
8674 div derivation is zero. */
8675 static int
8676 biv_elimination_giv_has_0_offset (biv, giv, insn)
8677 struct induction *biv, *giv;
8678 rtx insn;
8679 {
8680 /* If the giv V had the auto-inc address optimization applied
8681 to it, and INSN occurs between the giv insn and the biv
8682 insn, then we'd have to adjust the value used here.
8683 This is rare, so we don't bother to make this possible. */
8684 if (giv->auto_inc_opt
8685 && ((loop_insn_first_p (giv->insn, insn)
8686 && loop_insn_first_p (insn, biv->insn))
8687 || (loop_insn_first_p (biv->insn, insn)
8688 && loop_insn_first_p (insn, giv->insn))))
8689 return 0;
8690
8691 return 1;
8692 }
8693
8694 /* If BL appears in X (part of the pattern of INSN), see if we can
8695 eliminate its use. If so, return 1. If not, return 0.
8696
8697 If BIV does not appear in X, return 1.
8698
8699 If ELIMINATE_P is non-zero, actually do the elimination.
8700 WHERE_INSN/WHERE_BB indicate where extra insns should be added.
8701 Depending on how many items have been moved out of the loop, it
8702 will either be before INSN (when WHERE_INSN is non-zero) or at the
8703 start of the loop (when WHERE_INSN is zero). */
8704
8705 static int
8706 maybe_eliminate_biv_1 (loop, x, insn, bl, eliminate_p, where_bb, where_insn)
8707 const struct loop *loop;
8708 rtx x, insn;
8709 struct iv_class *bl;
8710 int eliminate_p;
8711 basic_block where_bb;
8712 rtx where_insn;
8713 {
8714 enum rtx_code code = GET_CODE (x);
8715 rtx reg = bl->biv->dest_reg;
8716 enum machine_mode mode = GET_MODE (reg);
8717 struct induction *v;
8718 rtx arg, tem;
8719 #ifdef HAVE_cc0
8720 rtx new;
8721 #endif
8722 int arg_operand;
8723 const char *fmt;
8724 int i, j;
8725
8726 switch (code)
8727 {
8728 case REG:
8729 /* If we haven't already been able to do something with this BIV,
8730 we can't eliminate it. */
8731 if (x == reg)
8732 return 0;
8733 return 1;
8734
8735 case SET:
8736 /* If this sets the BIV, it is not a problem. */
8737 if (SET_DEST (x) == reg)
8738 return 1;
8739
8740 /* If this is an insn that defines a giv, it is also ok because
8741 it will go away when the giv is reduced. */
8742 for (v = bl->giv; v; v = v->next_iv)
8743 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
8744 return 1;
8745
8746 #ifdef HAVE_cc0
8747 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
8748 {
8749 /* Can replace with any giv that was reduced and
8750 that has (MULT_VAL != 0) and (ADD_VAL == 0).
8751 Require a constant for MULT_VAL, so we know it's nonzero.
8752 ??? We disable this optimization to avoid potential
8753 overflows. */
8754
8755 for (v = bl->giv; v; v = v->next_iv)
8756 if (GET_CODE (v->mult_val) == CONST_INT && v->mult_val != const0_rtx
8757 && v->add_val == const0_rtx
8758 && ! v->ignore && ! v->maybe_dead && v->always_computable
8759 && v->mode == mode
8760 && 0)
8761 {
8762 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8763 continue;
8764
8765 if (! eliminate_p)
8766 return 1;
8767
8768 /* If the giv has the opposite direction of change,
8769 then reverse the comparison. */
8770 if (INTVAL (v->mult_val) < 0)
8771 new = gen_rtx_COMPARE (GET_MODE (v->new_reg),
8772 const0_rtx, v->new_reg);
8773 else
8774 new = v->new_reg;
8775
8776 /* We can probably test that giv's reduced reg. */
8777 if (validate_change (insn, &SET_SRC (x), new, 0))
8778 return 1;
8779 }
8780
8781 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
8782 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
8783 Require a constant for MULT_VAL, so we know it's nonzero.
8784 ??? Do this only if ADD_VAL is a pointer to avoid a potential
8785 overflow problem. */
8786
8787 for (v = bl->giv; v; v = v->next_iv)
8788 if (GET_CODE (v->mult_val) == CONST_INT
8789 && v->mult_val != const0_rtx
8790 && ! v->ignore && ! v->maybe_dead && v->always_computable
8791 && v->mode == mode
8792 && (GET_CODE (v->add_val) == SYMBOL_REF
8793 || GET_CODE (v->add_val) == LABEL_REF
8794 || GET_CODE (v->add_val) == CONST
8795 || (GET_CODE (v->add_val) == REG
8796 && REG_POINTER (v->add_val))))
8797 {
8798 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8799 continue;
8800
8801 if (! eliminate_p)
8802 return 1;
8803
8804 /* If the giv has the opposite direction of change,
8805 then reverse the comparison. */
8806 if (INTVAL (v->mult_val) < 0)
8807 new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val),
8808 v->new_reg);
8809 else
8810 new = gen_rtx_COMPARE (VOIDmode, v->new_reg,
8811 copy_rtx (v->add_val));
8812
8813 /* Replace biv with the giv's reduced register. */
8814 update_reg_last_use (v->add_val, insn);
8815 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
8816 return 1;
8817
8818 /* Insn doesn't support that constant or invariant. Copy it
8819 into a register (it will be a loop invariant.) */
8820 tem = gen_reg_rtx (GET_MODE (v->new_reg));
8821
8822 loop_insn_emit_before (loop, 0, where_insn,
8823 gen_move_insn (tem,
8824 copy_rtx (v->add_val)));
8825
8826 /* Substitute the new register for its invariant value in
8827 the compare expression. */
8828 XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
8829 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
8830 return 1;
8831 }
8832 }
8833 #endif
8834 break;
8835
8836 case COMPARE:
8837 case EQ: case NE:
8838 case GT: case GE: case GTU: case GEU:
8839 case LT: case LE: case LTU: case LEU:
8840 /* See if either argument is the biv. */
8841 if (XEXP (x, 0) == reg)
8842 arg = XEXP (x, 1), arg_operand = 1;
8843 else if (XEXP (x, 1) == reg)
8844 arg = XEXP (x, 0), arg_operand = 0;
8845 else
8846 break;
8847
8848 if (CONSTANT_P (arg))
8849 {
8850 /* First try to replace with any giv that has constant positive
8851 mult_val and constant add_val. We might be able to support
8852 negative mult_val, but it seems complex to do it in general. */
8853
8854 for (v = bl->giv; v; v = v->next_iv)
8855 if (GET_CODE (v->mult_val) == CONST_INT
8856 && INTVAL (v->mult_val) > 0
8857 && (GET_CODE (v->add_val) == SYMBOL_REF
8858 || GET_CODE (v->add_val) == LABEL_REF
8859 || GET_CODE (v->add_val) == CONST
8860 || (GET_CODE (v->add_val) == REG
8861 && REG_POINTER (v->add_val)))
8862 && ! v->ignore && ! v->maybe_dead && v->always_computable
8863 && v->mode == mode)
8864 {
8865 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8866 continue;
8867
8868 /* Don't eliminate if the linear combination that makes up
8869 the giv overflows when it is applied to ARG. */
8870 if (GET_CODE (arg) == CONST_INT)
8871 {
8872 rtx add_val;
8873
8874 if (GET_CODE (v->add_val) == CONST_INT)
8875 add_val = v->add_val;
8876 else
8877 add_val = const0_rtx;
8878
8879 if (const_mult_add_overflow_p (arg, v->mult_val,
8880 add_val, mode, 1))
8881 continue;
8882 }
8883
8884 if (! eliminate_p)
8885 return 1;
8886
8887 /* Replace biv with the giv's reduced reg. */
8888 validate_change (insn, &XEXP (x, 1 - arg_operand), v->new_reg, 1);
8889
8890 /* If all constants are actually constant integers and
8891 the derived constant can be directly placed in the COMPARE,
8892 do so. */
8893 if (GET_CODE (arg) == CONST_INT
8894 && GET_CODE (v->add_val) == CONST_INT)
8895 {
8896 tem = expand_mult_add (arg, NULL_RTX, v->mult_val,
8897 v->add_val, mode, 1);
8898 }
8899 else
8900 {
8901 /* Otherwise, load it into a register. */
8902 tem = gen_reg_rtx (mode);
8903 loop_iv_add_mult_emit_before (loop, arg,
8904 v->mult_val, v->add_val,
8905 tem, where_bb, where_insn);
8906 }
8907
8908 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8909
8910 if (apply_change_group ())
8911 return 1;
8912 }
8913
8914 /* Look for giv with positive constant mult_val and nonconst add_val.
8915 Insert insns to calculate new compare value.
8916 ??? Turn this off due to possible overflow. */
8917
8918 for (v = bl->giv; v; v = v->next_iv)
8919 if (GET_CODE (v->mult_val) == CONST_INT
8920 && INTVAL (v->mult_val) > 0
8921 && ! v->ignore && ! v->maybe_dead && v->always_computable
8922 && v->mode == mode
8923 && 0)
8924 {
8925 rtx tem;
8926
8927 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8928 continue;
8929
8930 if (! eliminate_p)
8931 return 1;
8932
8933 tem = gen_reg_rtx (mode);
8934
8935 /* Replace biv with giv's reduced register. */
8936 validate_change (insn, &XEXP (x, 1 - arg_operand),
8937 v->new_reg, 1);
8938
8939 /* Compute value to compare against. */
8940 loop_iv_add_mult_emit_before (loop, arg,
8941 v->mult_val, v->add_val,
8942 tem, where_bb, where_insn);
8943 /* Use it in this insn. */
8944 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8945 if (apply_change_group ())
8946 return 1;
8947 }
8948 }
8949 else if (GET_CODE (arg) == REG || GET_CODE (arg) == MEM)
8950 {
8951 if (loop_invariant_p (loop, arg) == 1)
8952 {
8953 /* Look for giv with constant positive mult_val and nonconst
8954 add_val. Insert insns to compute new compare value.
8955 ??? Turn this off due to possible overflow. */
8956
8957 for (v = bl->giv; v; v = v->next_iv)
8958 if (GET_CODE (v->mult_val) == CONST_INT && INTVAL (v->mult_val) > 0
8959 && ! v->ignore && ! v->maybe_dead && v->always_computable
8960 && v->mode == mode
8961 && 0)
8962 {
8963 rtx tem;
8964
8965 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
8966 continue;
8967
8968 if (! eliminate_p)
8969 return 1;
8970
8971 tem = gen_reg_rtx (mode);
8972
8973 /* Replace biv with giv's reduced register. */
8974 validate_change (insn, &XEXP (x, 1 - arg_operand),
8975 v->new_reg, 1);
8976
8977 /* Compute value to compare against. */
8978 loop_iv_add_mult_emit_before (loop, arg,
8979 v->mult_val, v->add_val,
8980 tem, where_bb, where_insn);
8981 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8982 if (apply_change_group ())
8983 return 1;
8984 }
8985 }
8986
8987 /* This code has problems. Basically, you can't know when
8988 seeing if we will eliminate BL, whether a particular giv
8989 of ARG will be reduced. If it isn't going to be reduced,
8990 we can't eliminate BL. We can try forcing it to be reduced,
8991 but that can generate poor code.
8992
8993 The problem is that the benefit of reducing TV, below should
8994 be increased if BL can actually be eliminated, but this means
8995 we might have to do a topological sort of the order in which
8996 we try to process biv. It doesn't seem worthwhile to do
8997 this sort of thing now. */
8998
8999 #if 0
9000 /* Otherwise the reg compared with had better be a biv. */
9001 if (GET_CODE (arg) != REG
9002 || REG_IV_TYPE (ivs, REGNO (arg)) != BASIC_INDUCT)
9003 return 0;
9004
9005 /* Look for a pair of givs, one for each biv,
9006 with identical coefficients. */
9007 for (v = bl->giv; v; v = v->next_iv)
9008 {
9009 struct induction *tv;
9010
9011 if (v->ignore || v->maybe_dead || v->mode != mode)
9012 continue;
9013
9014 for (tv = REG_IV_CLASS (ivs, REGNO (arg))->giv; tv;
9015 tv = tv->next_iv)
9016 if (! tv->ignore && ! tv->maybe_dead
9017 && rtx_equal_p (tv->mult_val, v->mult_val)
9018 && rtx_equal_p (tv->add_val, v->add_val)
9019 && tv->mode == mode)
9020 {
9021 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
9022 continue;
9023
9024 if (! eliminate_p)
9025 return 1;
9026
9027 /* Replace biv with its giv's reduced reg. */
9028 XEXP (x, 1 - arg_operand) = v->new_reg;
9029 /* Replace other operand with the other giv's
9030 reduced reg. */
9031 XEXP (x, arg_operand) = tv->new_reg;
9032 return 1;
9033 }
9034 }
9035 #endif
9036 }
9037
9038 /* If we get here, the biv can't be eliminated. */
9039 return 0;
9040
9041 case MEM:
9042 /* If this address is a DEST_ADDR giv, it doesn't matter if the
9043 biv is used in it, since it will be replaced. */
9044 for (v = bl->giv; v; v = v->next_iv)
9045 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
9046 return 1;
9047 break;
9048
9049 default:
9050 break;
9051 }
9052
9053 /* See if any subexpression fails elimination. */
9054 fmt = GET_RTX_FORMAT (code);
9055 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
9056 {
9057 switch (fmt[i])
9058 {
9059 case 'e':
9060 if (! maybe_eliminate_biv_1 (loop, XEXP (x, i), insn, bl,
9061 eliminate_p, where_bb, where_insn))
9062 return 0;
9063 break;
9064
9065 case 'E':
9066 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9067 if (! maybe_eliminate_biv_1 (loop, XVECEXP (x, i, j), insn, bl,
9068 eliminate_p, where_bb, where_insn))
9069 return 0;
9070 break;
9071 }
9072 }
9073
9074 return 1;
9075 }
9076 \f
9077 /* Return nonzero if the last use of REG
9078 is in an insn following INSN in the same basic block. */
9079
9080 static int
9081 last_use_this_basic_block (reg, insn)
9082 rtx reg;
9083 rtx insn;
9084 {
9085 rtx n;
9086 for (n = insn;
9087 n && GET_CODE (n) != CODE_LABEL && GET_CODE (n) != JUMP_INSN;
9088 n = NEXT_INSN (n))
9089 {
9090 if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
9091 return 1;
9092 }
9093 return 0;
9094 }
9095 \f
9096 /* Called via `note_stores' to record the initial value of a biv. Here we
9097 just record the location of the set and process it later. */
9098
9099 static void
9100 record_initial (dest, set, data)
9101 rtx dest;
9102 rtx set;
9103 void *data ATTRIBUTE_UNUSED;
9104 {
9105 struct loop_ivs *ivs = (struct loop_ivs *) data;
9106 struct iv_class *bl;
9107
9108 if (GET_CODE (dest) != REG
9109 || REGNO (dest) >= ivs->n_regs
9110 || REG_IV_TYPE (ivs, REGNO (dest)) != BASIC_INDUCT)
9111 return;
9112
9113 bl = REG_IV_CLASS (ivs, REGNO (dest));
9114
9115 /* If this is the first set found, record it. */
9116 if (bl->init_insn == 0)
9117 {
9118 bl->init_insn = note_insn;
9119 bl->init_set = set;
9120 }
9121 }
9122 \f
9123 /* If any of the registers in X are "old" and currently have a last use earlier
9124 than INSN, update them to have a last use of INSN. Their actual last use
9125 will be the previous insn but it will not have a valid uid_luid so we can't
9126 use it. X must be a source expression only. */
9127
9128 static void
9129 update_reg_last_use (x, insn)
9130 rtx x;
9131 rtx insn;
9132 {
9133 /* Check for the case where INSN does not have a valid luid. In this case,
9134 there is no need to modify the regno_last_uid, as this can only happen
9135 when code is inserted after the loop_end to set a pseudo's final value,
9136 and hence this insn will never be the last use of x.
9137 ???? This comment is not correct. See for example loop_givs_reduce.
9138 This may insert an insn before another new insn. */
9139 if (GET_CODE (x) == REG && REGNO (x) < max_reg_before_loop
9140 && INSN_UID (insn) < max_uid_for_loop
9141 && REGNO_LAST_LUID (REGNO (x)) < INSN_LUID (insn))
9142 {
9143 REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
9144 }
9145 else
9146 {
9147 int i, j;
9148 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
9149 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
9150 {
9151 if (fmt[i] == 'e')
9152 update_reg_last_use (XEXP (x, i), insn);
9153 else if (fmt[i] == 'E')
9154 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9155 update_reg_last_use (XVECEXP (x, i, j), insn);
9156 }
9157 }
9158 }
9159 \f
9160 /* Given an insn INSN and condition COND, return the condition in a
9161 canonical form to simplify testing by callers. Specifically:
9162
9163 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
9164 (2) Both operands will be machine operands; (cc0) will have been replaced.
9165 (3) If an operand is a constant, it will be the second operand.
9166 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
9167 for GE, GEU, and LEU.
9168
9169 If the condition cannot be understood, or is an inequality floating-point
9170 comparison which needs to be reversed, 0 will be returned.
9171
9172 If REVERSE is non-zero, then reverse the condition prior to canonizing it.
9173
9174 If EARLIEST is non-zero, it is a pointer to a place where the earliest
9175 insn used in locating the condition was found. If a replacement test
9176 of the condition is desired, it should be placed in front of that
9177 insn and we will be sure that the inputs are still valid.
9178
9179 If WANT_REG is non-zero, we wish the condition to be relative to that
9180 register, if possible. Therefore, do not canonicalize the condition
9181 further. */
9182
9183 rtx
9184 canonicalize_condition (insn, cond, reverse, earliest, want_reg)
9185 rtx insn;
9186 rtx cond;
9187 int reverse;
9188 rtx *earliest;
9189 rtx want_reg;
9190 {
9191 enum rtx_code code;
9192 rtx prev = insn;
9193 rtx set;
9194 rtx tem;
9195 rtx op0, op1;
9196 int reverse_code = 0;
9197 enum machine_mode mode;
9198
9199 code = GET_CODE (cond);
9200 mode = GET_MODE (cond);
9201 op0 = XEXP (cond, 0);
9202 op1 = XEXP (cond, 1);
9203
9204 if (reverse)
9205 code = reversed_comparison_code (cond, insn);
9206 if (code == UNKNOWN)
9207 return 0;
9208
9209 if (earliest)
9210 *earliest = insn;
9211
9212 /* If we are comparing a register with zero, see if the register is set
9213 in the previous insn to a COMPARE or a comparison operation. Perform
9214 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
9215 in cse.c */
9216
9217 while (GET_RTX_CLASS (code) == '<'
9218 && op1 == CONST0_RTX (GET_MODE (op0))
9219 && op0 != want_reg)
9220 {
9221 /* Set non-zero when we find something of interest. */
9222 rtx x = 0;
9223
9224 #ifdef HAVE_cc0
9225 /* If comparison with cc0, import actual comparison from compare
9226 insn. */
9227 if (op0 == cc0_rtx)
9228 {
9229 if ((prev = prev_nonnote_insn (prev)) == 0
9230 || GET_CODE (prev) != INSN
9231 || (set = single_set (prev)) == 0
9232 || SET_DEST (set) != cc0_rtx)
9233 return 0;
9234
9235 op0 = SET_SRC (set);
9236 op1 = CONST0_RTX (GET_MODE (op0));
9237 if (earliest)
9238 *earliest = prev;
9239 }
9240 #endif
9241
9242 /* If this is a COMPARE, pick up the two things being compared. */
9243 if (GET_CODE (op0) == COMPARE)
9244 {
9245 op1 = XEXP (op0, 1);
9246 op0 = XEXP (op0, 0);
9247 continue;
9248 }
9249 else if (GET_CODE (op0) != REG)
9250 break;
9251
9252 /* Go back to the previous insn. Stop if it is not an INSN. We also
9253 stop if it isn't a single set or if it has a REG_INC note because
9254 we don't want to bother dealing with it. */
9255
9256 if ((prev = prev_nonnote_insn (prev)) == 0
9257 || GET_CODE (prev) != INSN
9258 || FIND_REG_INC_NOTE (prev, NULL_RTX))
9259 break;
9260
9261 set = set_of (op0, prev);
9262
9263 if (set
9264 && (GET_CODE (set) != SET
9265 || !rtx_equal_p (SET_DEST (set), op0)))
9266 break;
9267
9268 /* If this is setting OP0, get what it sets it to if it looks
9269 relevant. */
9270 if (set)
9271 {
9272 enum machine_mode inner_mode = GET_MODE (SET_DEST (set));
9273
9274 /* ??? We may not combine comparisons done in a CCmode with
9275 comparisons not done in a CCmode. This is to aid targets
9276 like Alpha that have an IEEE compliant EQ instruction, and
9277 a non-IEEE compliant BEQ instruction. The use of CCmode is
9278 actually artificial, simply to prevent the combination, but
9279 should not affect other platforms.
9280
9281 However, we must allow VOIDmode comparisons to match either
9282 CCmode or non-CCmode comparison, because some ports have
9283 modeless comparisons inside branch patterns.
9284
9285 ??? This mode check should perhaps look more like the mode check
9286 in simplify_comparison in combine. */
9287
9288 if ((GET_CODE (SET_SRC (set)) == COMPARE
9289 || (((code == NE
9290 || (code == LT
9291 && GET_MODE_CLASS (inner_mode) == MODE_INT
9292 && (GET_MODE_BITSIZE (inner_mode)
9293 <= HOST_BITS_PER_WIDE_INT)
9294 && (STORE_FLAG_VALUE
9295 & ((HOST_WIDE_INT) 1
9296 << (GET_MODE_BITSIZE (inner_mode) - 1))))
9297 #ifdef FLOAT_STORE_FLAG_VALUE
9298 || (code == LT
9299 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
9300 && (REAL_VALUE_NEGATIVE
9301 (FLOAT_STORE_FLAG_VALUE (inner_mode))))
9302 #endif
9303 ))
9304 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'))
9305 && (((GET_MODE_CLASS (mode) == MODE_CC)
9306 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
9307 || mode == VOIDmode || inner_mode == VOIDmode))
9308 x = SET_SRC (set);
9309 else if (((code == EQ
9310 || (code == GE
9311 && (GET_MODE_BITSIZE (inner_mode)
9312 <= HOST_BITS_PER_WIDE_INT)
9313 && GET_MODE_CLASS (inner_mode) == MODE_INT
9314 && (STORE_FLAG_VALUE
9315 & ((HOST_WIDE_INT) 1
9316 << (GET_MODE_BITSIZE (inner_mode) - 1))))
9317 #ifdef FLOAT_STORE_FLAG_VALUE
9318 || (code == GE
9319 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
9320 && (REAL_VALUE_NEGATIVE
9321 (FLOAT_STORE_FLAG_VALUE (inner_mode))))
9322 #endif
9323 ))
9324 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'
9325 && (((GET_MODE_CLASS (mode) == MODE_CC)
9326 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
9327 || mode == VOIDmode || inner_mode == VOIDmode))
9328
9329 {
9330 reverse_code = 1;
9331 x = SET_SRC (set);
9332 }
9333 else
9334 break;
9335 }
9336
9337 else if (reg_set_p (op0, prev))
9338 /* If this sets OP0, but not directly, we have to give up. */
9339 break;
9340
9341 if (x)
9342 {
9343 if (GET_RTX_CLASS (GET_CODE (x)) == '<')
9344 code = GET_CODE (x);
9345 if (reverse_code)
9346 {
9347 code = reversed_comparison_code (x, prev);
9348 if (code == UNKNOWN)
9349 return 0;
9350 reverse_code = 0;
9351 }
9352
9353 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
9354 if (earliest)
9355 *earliest = prev;
9356 }
9357 }
9358
9359 /* If constant is first, put it last. */
9360 if (CONSTANT_P (op0))
9361 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
9362
9363 /* If OP0 is the result of a comparison, we weren't able to find what
9364 was really being compared, so fail. */
9365 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
9366 return 0;
9367
9368 /* Canonicalize any ordered comparison with integers involving equality
9369 if we can do computations in the relevant mode and we do not
9370 overflow. */
9371
9372 if (GET_CODE (op1) == CONST_INT
9373 && GET_MODE (op0) != VOIDmode
9374 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
9375 {
9376 HOST_WIDE_INT const_val = INTVAL (op1);
9377 unsigned HOST_WIDE_INT uconst_val = const_val;
9378 unsigned HOST_WIDE_INT max_val
9379 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
9380
9381 switch (code)
9382 {
9383 case LE:
9384 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
9385 code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0));
9386 break;
9387
9388 /* When cross-compiling, const_val might be sign-extended from
9389 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
9390 case GE:
9391 if ((HOST_WIDE_INT) (const_val & max_val)
9392 != (((HOST_WIDE_INT) 1
9393 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
9394 code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0));
9395 break;
9396
9397 case LEU:
9398 if (uconst_val < max_val)
9399 code = LTU, op1 = gen_int_mode (uconst_val + 1, GET_MODE (op0));
9400 break;
9401
9402 case GEU:
9403 if (uconst_val != 0)
9404 code = GTU, op1 = gen_int_mode (uconst_val - 1, GET_MODE (op0));
9405 break;
9406
9407 default:
9408 break;
9409 }
9410 }
9411
9412 #ifdef HAVE_cc0
9413 /* Never return CC0; return zero instead. */
9414 if (op0 == cc0_rtx)
9415 return 0;
9416 #endif
9417
9418 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
9419 }
9420
9421 /* Given a jump insn JUMP, return the condition that will cause it to branch
9422 to its JUMP_LABEL. If the condition cannot be understood, or is an
9423 inequality floating-point comparison which needs to be reversed, 0 will
9424 be returned.
9425
9426 If EARLIEST is non-zero, it is a pointer to a place where the earliest
9427 insn used in locating the condition was found. If a replacement test
9428 of the condition is desired, it should be placed in front of that
9429 insn and we will be sure that the inputs are still valid. */
9430
9431 rtx
9432 get_condition (jump, earliest)
9433 rtx jump;
9434 rtx *earliest;
9435 {
9436 rtx cond;
9437 int reverse;
9438 rtx set;
9439
9440 /* If this is not a standard conditional jump, we can't parse it. */
9441 if (GET_CODE (jump) != JUMP_INSN
9442 || ! any_condjump_p (jump))
9443 return 0;
9444 set = pc_set (jump);
9445
9446 cond = XEXP (SET_SRC (set), 0);
9447
9448 /* If this branches to JUMP_LABEL when the condition is false, reverse
9449 the condition. */
9450 reverse
9451 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
9452 && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump);
9453
9454 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX);
9455 }
9456
9457 /* Similar to above routine, except that we also put an invariant last
9458 unless both operands are invariants. */
9459
9460 rtx
9461 get_condition_for_loop (loop, x)
9462 const struct loop *loop;
9463 rtx x;
9464 {
9465 rtx comparison = get_condition (x, (rtx*) 0);
9466
9467 if (comparison == 0
9468 || ! loop_invariant_p (loop, XEXP (comparison, 0))
9469 || loop_invariant_p (loop, XEXP (comparison, 1)))
9470 return comparison;
9471
9472 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
9473 XEXP (comparison, 1), XEXP (comparison, 0));
9474 }
9475
9476 /* Scan the function and determine whether it has indirect (computed) jumps.
9477
9478 This is taken mostly from flow.c; similar code exists elsewhere
9479 in the compiler. It may be useful to put this into rtlanal.c. */
9480 static int
9481 indirect_jump_in_function_p (start)
9482 rtx start;
9483 {
9484 rtx insn;
9485
9486 for (insn = start; insn; insn = NEXT_INSN (insn))
9487 if (computed_jump_p (insn))
9488 return 1;
9489
9490 return 0;
9491 }
9492
9493 /* Add MEM to the LOOP_MEMS array, if appropriate. See the
9494 documentation for LOOP_MEMS for the definition of `appropriate'.
9495 This function is called from prescan_loop via for_each_rtx. */
9496
9497 static int
9498 insert_loop_mem (mem, data)
9499 rtx *mem;
9500 void *data ATTRIBUTE_UNUSED;
9501 {
9502 struct loop_info *loop_info = data;
9503 int i;
9504 rtx m = *mem;
9505
9506 if (m == NULL_RTX)
9507 return 0;
9508
9509 switch (GET_CODE (m))
9510 {
9511 case MEM:
9512 break;
9513
9514 case CLOBBER:
9515 /* We're not interested in MEMs that are only clobbered. */
9516 return -1;
9517
9518 case CONST_DOUBLE:
9519 /* We're not interested in the MEM associated with a
9520 CONST_DOUBLE, so there's no need to traverse into this. */
9521 return -1;
9522
9523 case EXPR_LIST:
9524 /* We're not interested in any MEMs that only appear in notes. */
9525 return -1;
9526
9527 default:
9528 /* This is not a MEM. */
9529 return 0;
9530 }
9531
9532 /* See if we've already seen this MEM. */
9533 for (i = 0; i < loop_info->mems_idx; ++i)
9534 if (rtx_equal_p (m, loop_info->mems[i].mem))
9535 {
9536 if (GET_MODE (m) != GET_MODE (loop_info->mems[i].mem))
9537 /* The modes of the two memory accesses are different. If
9538 this happens, something tricky is going on, and we just
9539 don't optimize accesses to this MEM. */
9540 loop_info->mems[i].optimize = 0;
9541
9542 return 0;
9543 }
9544
9545 /* Resize the array, if necessary. */
9546 if (loop_info->mems_idx == loop_info->mems_allocated)
9547 {
9548 if (loop_info->mems_allocated != 0)
9549 loop_info->mems_allocated *= 2;
9550 else
9551 loop_info->mems_allocated = 32;
9552
9553 loop_info->mems = (loop_mem_info *)
9554 xrealloc (loop_info->mems,
9555 loop_info->mems_allocated * sizeof (loop_mem_info));
9556 }
9557
9558 /* Actually insert the MEM. */
9559 loop_info->mems[loop_info->mems_idx].mem = m;
9560 /* We can't hoist this MEM out of the loop if it's a BLKmode MEM
9561 because we can't put it in a register. We still store it in the
9562 table, though, so that if we see the same address later, but in a
9563 non-BLK mode, we'll not think we can optimize it at that point. */
9564 loop_info->mems[loop_info->mems_idx].optimize = (GET_MODE (m) != BLKmode);
9565 loop_info->mems[loop_info->mems_idx].reg = NULL_RTX;
9566 ++loop_info->mems_idx;
9567
9568 return 0;
9569 }
9570
9571
9572 /* Allocate REGS->ARRAY or reallocate it if it is too small.
9573
9574 Increment REGS->ARRAY[I].SET_IN_LOOP at the index I of each
9575 register that is modified by an insn between FROM and TO. If the
9576 value of an element of REGS->array[I].SET_IN_LOOP becomes 127 or
9577 more, stop incrementing it, to avoid overflow.
9578
9579 Store in REGS->ARRAY[I].SINGLE_USAGE the single insn in which
9580 register I is used, if it is only used once. Otherwise, it is set
9581 to 0 (for no uses) or const0_rtx for more than one use. This
9582 parameter may be zero, in which case this processing is not done.
9583
9584 Set REGS->ARRAY[I].MAY_NOT_OPTIMIZE nonzero if we should not
9585 optimize register I. */
9586
9587 static void
9588 loop_regs_scan (loop, extra_size)
9589 const struct loop *loop;
9590 int extra_size;
9591 {
9592 struct loop_regs *regs = LOOP_REGS (loop);
9593 int old_nregs;
9594 /* last_set[n] is nonzero iff reg n has been set in the current
9595 basic block. In that case, it is the insn that last set reg n. */
9596 rtx *last_set;
9597 rtx insn;
9598 int i;
9599
9600 old_nregs = regs->num;
9601 regs->num = max_reg_num ();
9602
9603 /* Grow the regs array if not allocated or too small. */
9604 if (regs->num >= regs->size)
9605 {
9606 regs->size = regs->num + extra_size;
9607
9608 regs->array = (struct loop_reg *)
9609 xrealloc (regs->array, regs->size * sizeof (*regs->array));
9610
9611 /* Zero the new elements. */
9612 memset (regs->array + old_nregs, 0,
9613 (regs->size - old_nregs) * sizeof (*regs->array));
9614 }
9615
9616 /* Clear previously scanned fields but do not clear n_times_set. */
9617 for (i = 0; i < old_nregs; i++)
9618 {
9619 regs->array[i].set_in_loop = 0;
9620 regs->array[i].may_not_optimize = 0;
9621 regs->array[i].single_usage = NULL_RTX;
9622 }
9623
9624 last_set = (rtx *) xcalloc (regs->num, sizeof (rtx));
9625
9626 /* Scan the loop, recording register usage. */
9627 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
9628 insn = NEXT_INSN (insn))
9629 {
9630 if (INSN_P (insn))
9631 {
9632 /* Record registers that have exactly one use. */
9633 find_single_use_in_loop (regs, insn, PATTERN (insn));
9634
9635 /* Include uses in REG_EQUAL notes. */
9636 if (REG_NOTES (insn))
9637 find_single_use_in_loop (regs, insn, REG_NOTES (insn));
9638
9639 if (GET_CODE (PATTERN (insn)) == SET
9640 || GET_CODE (PATTERN (insn)) == CLOBBER)
9641 count_one_set (regs, insn, PATTERN (insn), last_set);
9642 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
9643 {
9644 int i;
9645 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
9646 count_one_set (regs, insn, XVECEXP (PATTERN (insn), 0, i),
9647 last_set);
9648 }
9649 }
9650
9651 if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
9652 memset (last_set, 0, regs->num * sizeof (rtx));
9653 }
9654
9655 /* Invalidate all hard registers clobbered by calls. With one exception:
9656 a call-clobbered PIC register is still function-invariant for our
9657 purposes, since we can hoist any PIC calculations out of the loop.
9658 Thus the call to rtx_varies_p. */
9659 if (LOOP_INFO (loop)->has_call)
9660 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
9661 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i)
9662 && rtx_varies_p (regno_reg_rtx[i], 1))
9663 {
9664 regs->array[i].may_not_optimize = 1;
9665 regs->array[i].set_in_loop = 1;
9666 }
9667
9668 #ifdef AVOID_CCMODE_COPIES
9669 /* Don't try to move insns which set CC registers if we should not
9670 create CCmode register copies. */
9671 for (i = regs->num - 1; i >= FIRST_PSEUDO_REGISTER; i--)
9672 if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
9673 regs->array[i].may_not_optimize = 1;
9674 #endif
9675
9676 /* Set regs->array[I].n_times_set for the new registers. */
9677 for (i = old_nregs; i < regs->num; i++)
9678 regs->array[i].n_times_set = regs->array[i].set_in_loop;
9679
9680 free (last_set);
9681 }
9682
9683 /* Returns the number of real INSNs in the LOOP. */
9684
9685 static int
9686 count_insns_in_loop (loop)
9687 const struct loop *loop;
9688 {
9689 int count = 0;
9690 rtx insn;
9691
9692 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
9693 insn = NEXT_INSN (insn))
9694 if (INSN_P (insn))
9695 ++count;
9696
9697 return count;
9698 }
9699
9700 /* Move MEMs into registers for the duration of the loop. */
9701
9702 static void
9703 load_mems (loop)
9704 const struct loop *loop;
9705 {
9706 struct loop_info *loop_info = LOOP_INFO (loop);
9707 struct loop_regs *regs = LOOP_REGS (loop);
9708 int maybe_never = 0;
9709 int i;
9710 rtx p, prev_ebb_head;
9711 rtx label = NULL_RTX;
9712 rtx end_label;
9713 /* Nonzero if the next instruction may never be executed. */
9714 int next_maybe_never = 0;
9715 unsigned int last_max_reg = max_reg_num ();
9716
9717 if (loop_info->mems_idx == 0)
9718 return;
9719
9720 /* We cannot use next_label here because it skips over normal insns. */
9721 end_label = next_nonnote_insn (loop->end);
9722 if (end_label && GET_CODE (end_label) != CODE_LABEL)
9723 end_label = NULL_RTX;
9724
9725 /* Check to see if it's possible that some instructions in the loop are
9726 never executed. Also check if there is a goto out of the loop other
9727 than right after the end of the loop. */
9728 for (p = next_insn_in_loop (loop, loop->scan_start);
9729 p != NULL_RTX;
9730 p = next_insn_in_loop (loop, p))
9731 {
9732 if (GET_CODE (p) == CODE_LABEL)
9733 maybe_never = 1;
9734 else if (GET_CODE (p) == JUMP_INSN
9735 /* If we enter the loop in the middle, and scan
9736 around to the beginning, don't set maybe_never
9737 for that. This must be an unconditional jump,
9738 otherwise the code at the top of the loop might
9739 never be executed. Unconditional jumps are
9740 followed a by barrier then loop end. */
9741 && ! (GET_CODE (p) == JUMP_INSN
9742 && JUMP_LABEL (p) == loop->top
9743 && NEXT_INSN (NEXT_INSN (p)) == loop->end
9744 && any_uncondjump_p (p)))
9745 {
9746 /* If this is a jump outside of the loop but not right
9747 after the end of the loop, we would have to emit new fixup
9748 sequences for each such label. */
9749 if (/* If we can't tell where control might go when this
9750 JUMP_INSN is executed, we must be conservative. */
9751 !JUMP_LABEL (p)
9752 || (JUMP_LABEL (p) != end_label
9753 && (INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop
9754 || INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop->start)
9755 || INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop->end))))
9756 return;
9757
9758 if (!any_condjump_p (p))
9759 /* Something complicated. */
9760 maybe_never = 1;
9761 else
9762 /* If there are any more instructions in the loop, they
9763 might not be reached. */
9764 next_maybe_never = 1;
9765 }
9766 else if (next_maybe_never)
9767 maybe_never = 1;
9768 }
9769
9770 /* Find start of the extended basic block that enters the loop. */
9771 for (p = loop->start;
9772 PREV_INSN (p) && GET_CODE (p) != CODE_LABEL;
9773 p = PREV_INSN (p))
9774 ;
9775 prev_ebb_head = p;
9776
9777 cselib_init ();
9778
9779 /* Build table of mems that get set to constant values before the
9780 loop. */
9781 for (; p != loop->start; p = NEXT_INSN (p))
9782 cselib_process_insn (p);
9783
9784 /* Actually move the MEMs. */
9785 for (i = 0; i < loop_info->mems_idx; ++i)
9786 {
9787 regset_head load_copies;
9788 regset_head store_copies;
9789 int written = 0;
9790 rtx reg;
9791 rtx mem = loop_info->mems[i].mem;
9792 rtx mem_list_entry;
9793
9794 if (MEM_VOLATILE_P (mem)
9795 || loop_invariant_p (loop, XEXP (mem, 0)) != 1)
9796 /* There's no telling whether or not MEM is modified. */
9797 loop_info->mems[i].optimize = 0;
9798
9799 /* Go through the MEMs written to in the loop to see if this
9800 one is aliased by one of them. */
9801 mem_list_entry = loop_info->store_mems;
9802 while (mem_list_entry)
9803 {
9804 if (rtx_equal_p (mem, XEXP (mem_list_entry, 0)))
9805 written = 1;
9806 else if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
9807 mem, rtx_varies_p))
9808 {
9809 /* MEM is indeed aliased by this store. */
9810 loop_info->mems[i].optimize = 0;
9811 break;
9812 }
9813 mem_list_entry = XEXP (mem_list_entry, 1);
9814 }
9815
9816 if (flag_float_store && written
9817 && GET_MODE_CLASS (GET_MODE (mem)) == MODE_FLOAT)
9818 loop_info->mems[i].optimize = 0;
9819
9820 /* If this MEM is written to, we must be sure that there
9821 are no reads from another MEM that aliases this one. */
9822 if (loop_info->mems[i].optimize && written)
9823 {
9824 int j;
9825
9826 for (j = 0; j < loop_info->mems_idx; ++j)
9827 {
9828 if (j == i)
9829 continue;
9830 else if (true_dependence (mem,
9831 VOIDmode,
9832 loop_info->mems[j].mem,
9833 rtx_varies_p))
9834 {
9835 /* It's not safe to hoist loop_info->mems[i] out of
9836 the loop because writes to it might not be
9837 seen by reads from loop_info->mems[j]. */
9838 loop_info->mems[i].optimize = 0;
9839 break;
9840 }
9841 }
9842 }
9843
9844 if (maybe_never && may_trap_p (mem))
9845 /* We can't access the MEM outside the loop; it might
9846 cause a trap that wouldn't have happened otherwise. */
9847 loop_info->mems[i].optimize = 0;
9848
9849 if (!loop_info->mems[i].optimize)
9850 /* We thought we were going to lift this MEM out of the
9851 loop, but later discovered that we could not. */
9852 continue;
9853
9854 INIT_REG_SET (&load_copies);
9855 INIT_REG_SET (&store_copies);
9856
9857 /* Allocate a pseudo for this MEM. We set REG_USERVAR_P in
9858 order to keep scan_loop from moving stores to this MEM
9859 out of the loop just because this REG is neither a
9860 user-variable nor used in the loop test. */
9861 reg = gen_reg_rtx (GET_MODE (mem));
9862 REG_USERVAR_P (reg) = 1;
9863 loop_info->mems[i].reg = reg;
9864
9865 /* Now, replace all references to the MEM with the
9866 corresponding pseudos. */
9867 maybe_never = 0;
9868 for (p = next_insn_in_loop (loop, loop->scan_start);
9869 p != NULL_RTX;
9870 p = next_insn_in_loop (loop, p))
9871 {
9872 if (INSN_P (p))
9873 {
9874 rtx set;
9875
9876 set = single_set (p);
9877
9878 /* See if this copies the mem into a register that isn't
9879 modified afterwards. We'll try to do copy propagation
9880 a little further on. */
9881 if (set
9882 /* @@@ This test is _way_ too conservative. */
9883 && ! maybe_never
9884 && GET_CODE (SET_DEST (set)) == REG
9885 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
9886 && REGNO (SET_DEST (set)) < last_max_reg
9887 && regs->array[REGNO (SET_DEST (set))].n_times_set == 1
9888 && rtx_equal_p (SET_SRC (set), mem))
9889 SET_REGNO_REG_SET (&load_copies, REGNO (SET_DEST (set)));
9890
9891 /* See if this copies the mem from a register that isn't
9892 modified afterwards. We'll try to remove the
9893 redundant copy later on by doing a little register
9894 renaming and copy propagation. This will help
9895 to untangle things for the BIV detection code. */
9896 if (set
9897 && ! maybe_never
9898 && GET_CODE (SET_SRC (set)) == REG
9899 && REGNO (SET_SRC (set)) >= FIRST_PSEUDO_REGISTER
9900 && REGNO (SET_SRC (set)) < last_max_reg
9901 && regs->array[REGNO (SET_SRC (set))].n_times_set == 1
9902 && rtx_equal_p (SET_DEST (set), mem))
9903 SET_REGNO_REG_SET (&store_copies, REGNO (SET_SRC (set)));
9904
9905 /* If this is a call which uses / clobbers this memory
9906 location, we must not change the interface here. */
9907 if (GET_CODE (p) == CALL_INSN
9908 && reg_mentioned_p (loop_info->mems[i].mem,
9909 CALL_INSN_FUNCTION_USAGE (p)))
9910 {
9911 cancel_changes (0);
9912 loop_info->mems[i].optimize = 0;
9913 break;
9914 }
9915 else
9916 /* Replace the memory reference with the shadow register. */
9917 replace_loop_mems (p, loop_info->mems[i].mem,
9918 loop_info->mems[i].reg);
9919 }
9920
9921 if (GET_CODE (p) == CODE_LABEL
9922 || GET_CODE (p) == JUMP_INSN)
9923 maybe_never = 1;
9924 }
9925
9926 if (! loop_info->mems[i].optimize)
9927 ; /* We found we couldn't do the replacement, so do nothing. */
9928 else if (! apply_change_group ())
9929 /* We couldn't replace all occurrences of the MEM. */
9930 loop_info->mems[i].optimize = 0;
9931 else
9932 {
9933 /* Load the memory immediately before LOOP->START, which is
9934 the NOTE_LOOP_BEG. */
9935 cselib_val *e = cselib_lookup (mem, VOIDmode, 0);
9936 rtx set;
9937 rtx best = mem;
9938 int j;
9939 struct elt_loc_list *const_equiv = 0;
9940
9941 if (e)
9942 {
9943 struct elt_loc_list *equiv;
9944 struct elt_loc_list *best_equiv = 0;
9945 for (equiv = e->locs; equiv; equiv = equiv->next)
9946 {
9947 if (CONSTANT_P (equiv->loc))
9948 const_equiv = equiv;
9949 else if (GET_CODE (equiv->loc) == REG
9950 /* Extending hard register lifetimes causes crash
9951 on SRC targets. Doing so on non-SRC is
9952 probably also not good idea, since we most
9953 probably have pseudoregister equivalence as
9954 well. */
9955 && REGNO (equiv->loc) >= FIRST_PSEUDO_REGISTER)
9956 best_equiv = equiv;
9957 }
9958 /* Use the constant equivalence if that is cheap enough. */
9959 if (! best_equiv)
9960 best_equiv = const_equiv;
9961 else if (const_equiv
9962 && (rtx_cost (const_equiv->loc, SET)
9963 <= rtx_cost (best_equiv->loc, SET)))
9964 {
9965 best_equiv = const_equiv;
9966 const_equiv = 0;
9967 }
9968
9969 /* If best_equiv is nonzero, we know that MEM is set to a
9970 constant or register before the loop. We will use this
9971 knowledge to initialize the shadow register with that
9972 constant or reg rather than by loading from MEM. */
9973 if (best_equiv)
9974 best = copy_rtx (best_equiv->loc);
9975 }
9976
9977 set = gen_move_insn (reg, best);
9978 set = loop_insn_hoist (loop, set);
9979 if (REG_P (best))
9980 {
9981 for (p = prev_ebb_head; p != loop->start; p = NEXT_INSN (p))
9982 if (REGNO_LAST_UID (REGNO (best)) == INSN_UID (p))
9983 {
9984 REGNO_LAST_UID (REGNO (best)) = INSN_UID (set);
9985 break;
9986 }
9987 }
9988
9989 if (const_equiv)
9990 set_unique_reg_note (set, REG_EQUAL, copy_rtx (const_equiv->loc));
9991
9992 if (written)
9993 {
9994 if (label == NULL_RTX)
9995 {
9996 label = gen_label_rtx ();
9997 emit_label_after (label, loop->end);
9998 }
9999
10000 /* Store the memory immediately after END, which is
10001 the NOTE_LOOP_END. */
10002 set = gen_move_insn (copy_rtx (mem), reg);
10003 loop_insn_emit_after (loop, 0, label, set);
10004 }
10005
10006 if (loop_dump_stream)
10007 {
10008 fprintf (loop_dump_stream, "Hoisted regno %d %s from ",
10009 REGNO (reg), (written ? "r/w" : "r/o"));
10010 print_rtl (loop_dump_stream, mem);
10011 fputc ('\n', loop_dump_stream);
10012 }
10013
10014 /* Attempt a bit of copy propagation. This helps untangle the
10015 data flow, and enables {basic,general}_induction_var to find
10016 more bivs/givs. */
10017 EXECUTE_IF_SET_IN_REG_SET
10018 (&load_copies, FIRST_PSEUDO_REGISTER, j,
10019 {
10020 try_copy_prop (loop, reg, j);
10021 });
10022 CLEAR_REG_SET (&load_copies);
10023
10024 EXECUTE_IF_SET_IN_REG_SET
10025 (&store_copies, FIRST_PSEUDO_REGISTER, j,
10026 {
10027 try_swap_copy_prop (loop, reg, j);
10028 });
10029 CLEAR_REG_SET (&store_copies);
10030 }
10031 }
10032
10033 if (label != NULL_RTX && end_label != NULL_RTX)
10034 {
10035 /* Now, we need to replace all references to the previous exit
10036 label with the new one. */
10037 rtx_pair rr;
10038 rr.r1 = end_label;
10039 rr.r2 = label;
10040
10041 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
10042 {
10043 for_each_rtx (&p, replace_label, &rr);
10044
10045 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
10046 field. This is not handled by for_each_rtx because it doesn't
10047 handle unprinted ('0') fields. We need to update JUMP_LABEL
10048 because the immediately following unroll pass will use it.
10049 replace_label would not work anyways, because that only handles
10050 LABEL_REFs. */
10051 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == end_label)
10052 JUMP_LABEL (p) = label;
10053 }
10054 }
10055
10056 cselib_finish ();
10057 }
10058
10059 /* For communication between note_reg_stored and its caller. */
10060 struct note_reg_stored_arg
10061 {
10062 int set_seen;
10063 rtx reg;
10064 };
10065
10066 /* Called via note_stores, record in SET_SEEN whether X, which is written,
10067 is equal to ARG. */
10068 static void
10069 note_reg_stored (x, setter, arg)
10070 rtx x, setter ATTRIBUTE_UNUSED;
10071 void *arg;
10072 {
10073 struct note_reg_stored_arg *t = (struct note_reg_stored_arg *) arg;
10074 if (t->reg == x)
10075 t->set_seen = 1;
10076 }
10077
10078 /* Try to replace every occurrence of pseudo REGNO with REPLACEMENT.
10079 There must be exactly one insn that sets this pseudo; it will be
10080 deleted if all replacements succeed and we can prove that the register
10081 is not used after the loop. */
10082
10083 static void
10084 try_copy_prop (loop, replacement, regno)
10085 const struct loop *loop;
10086 rtx replacement;
10087 unsigned int regno;
10088 {
10089 /* This is the reg that we are copying from. */
10090 rtx reg_rtx = regno_reg_rtx[regno];
10091 rtx init_insn = 0;
10092 rtx insn;
10093 /* These help keep track of whether we replaced all uses of the reg. */
10094 int replaced_last = 0;
10095 int store_is_first = 0;
10096
10097 for (insn = next_insn_in_loop (loop, loop->scan_start);
10098 insn != NULL_RTX;
10099 insn = next_insn_in_loop (loop, insn))
10100 {
10101 rtx set;
10102
10103 /* Only substitute within one extended basic block from the initializing
10104 insn. */
10105 if (GET_CODE (insn) == CODE_LABEL && init_insn)
10106 break;
10107
10108 if (! INSN_P (insn))
10109 continue;
10110
10111 /* Is this the initializing insn? */
10112 set = single_set (insn);
10113 if (set
10114 && GET_CODE (SET_DEST (set)) == REG
10115 && REGNO (SET_DEST (set)) == regno)
10116 {
10117 if (init_insn)
10118 abort ();
10119
10120 init_insn = insn;
10121 if (REGNO_FIRST_UID (regno) == INSN_UID (insn))
10122 store_is_first = 1;
10123 }
10124
10125 /* Only substitute after seeing the initializing insn. */
10126 if (init_insn && insn != init_insn)
10127 {
10128 struct note_reg_stored_arg arg;
10129
10130 replace_loop_regs (insn, reg_rtx, replacement);
10131 if (REGNO_LAST_UID (regno) == INSN_UID (insn))
10132 replaced_last = 1;
10133
10134 /* Stop replacing when REPLACEMENT is modified. */
10135 arg.reg = replacement;
10136 arg.set_seen = 0;
10137 note_stores (PATTERN (insn), note_reg_stored, &arg);
10138 if (arg.set_seen)
10139 {
10140 rtx note = find_reg_note (insn, REG_EQUAL, NULL);
10141
10142 /* It is possible that we've turned previously valid REG_EQUAL to
10143 invalid, as we change the REGNO to REPLACEMENT and unlike REGNO,
10144 REPLACEMENT is modified, we get different meaning. */
10145 if (note && reg_mentioned_p (replacement, XEXP (note, 0)))
10146 remove_note (insn, note);
10147 break;
10148 }
10149 }
10150 }
10151 if (! init_insn)
10152 abort ();
10153 if (apply_change_group ())
10154 {
10155 if (loop_dump_stream)
10156 fprintf (loop_dump_stream, " Replaced reg %d", regno);
10157 if (store_is_first && replaced_last)
10158 {
10159 rtx first;
10160 rtx retval_note;
10161
10162 /* Assume we're just deleting INIT_INSN. */
10163 first = init_insn;
10164 /* Look for REG_RETVAL note. If we're deleting the end of
10165 the libcall sequence, the whole sequence can go. */
10166 retval_note = find_reg_note (init_insn, REG_RETVAL, NULL_RTX);
10167 /* If we found a REG_RETVAL note, find the first instruction
10168 in the sequence. */
10169 if (retval_note)
10170 first = XEXP (retval_note, 0);
10171
10172 /* Delete the instructions. */
10173 loop_delete_insns (first, init_insn);
10174 }
10175 if (loop_dump_stream)
10176 fprintf (loop_dump_stream, ".\n");
10177 }
10178 }
10179
10180 /* Replace all the instructions from FIRST up to and including LAST
10181 with NOTE_INSN_DELETED notes. */
10182
10183 static void
10184 loop_delete_insns (first, last)
10185 rtx first;
10186 rtx last;
10187 {
10188 while (1)
10189 {
10190 if (loop_dump_stream)
10191 fprintf (loop_dump_stream, ", deleting init_insn (%d)",
10192 INSN_UID (first));
10193 delete_insn (first);
10194
10195 /* If this was the LAST instructions we're supposed to delete,
10196 we're done. */
10197 if (first == last)
10198 break;
10199
10200 first = NEXT_INSN (first);
10201 }
10202 }
10203
10204 /* Try to replace occurrences of pseudo REGNO with REPLACEMENT within
10205 loop LOOP if the order of the sets of these registers can be
10206 swapped. There must be exactly one insn within the loop that sets
10207 this pseudo followed immediately by a move insn that sets
10208 REPLACEMENT with REGNO. */
10209 static void
10210 try_swap_copy_prop (loop, replacement, regno)
10211 const struct loop *loop;
10212 rtx replacement;
10213 unsigned int regno;
10214 {
10215 rtx insn;
10216 rtx set = NULL_RTX;
10217 unsigned int new_regno;
10218
10219 new_regno = REGNO (replacement);
10220
10221 for (insn = next_insn_in_loop (loop, loop->scan_start);
10222 insn != NULL_RTX;
10223 insn = next_insn_in_loop (loop, insn))
10224 {
10225 /* Search for the insn that copies REGNO to NEW_REGNO? */
10226 if (INSN_P (insn)
10227 && (set = single_set (insn))
10228 && GET_CODE (SET_DEST (set)) == REG
10229 && REGNO (SET_DEST (set)) == new_regno
10230 && GET_CODE (SET_SRC (set)) == REG
10231 && REGNO (SET_SRC (set)) == regno)
10232 break;
10233 }
10234
10235 if (insn != NULL_RTX)
10236 {
10237 rtx prev_insn;
10238 rtx prev_set;
10239
10240 /* Some DEF-USE info would come in handy here to make this
10241 function more general. For now, just check the previous insn
10242 which is the most likely candidate for setting REGNO. */
10243
10244 prev_insn = PREV_INSN (insn);
10245
10246 if (INSN_P (insn)
10247 && (prev_set = single_set (prev_insn))
10248 && GET_CODE (SET_DEST (prev_set)) == REG
10249 && REGNO (SET_DEST (prev_set)) == regno)
10250 {
10251 /* We have:
10252 (set (reg regno) (expr))
10253 (set (reg new_regno) (reg regno))
10254
10255 so try converting this to:
10256 (set (reg new_regno) (expr))
10257 (set (reg regno) (reg new_regno))
10258
10259 The former construct is often generated when a global
10260 variable used for an induction variable is shadowed by a
10261 register (NEW_REGNO). The latter construct improves the
10262 chances of GIV replacement and BIV elimination. */
10263
10264 validate_change (prev_insn, &SET_DEST (prev_set),
10265 replacement, 1);
10266 validate_change (insn, &SET_DEST (set),
10267 SET_SRC (set), 1);
10268 validate_change (insn, &SET_SRC (set),
10269 replacement, 1);
10270
10271 if (apply_change_group ())
10272 {
10273 if (loop_dump_stream)
10274 fprintf (loop_dump_stream,
10275 " Swapped set of reg %d at %d with reg %d at %d.\n",
10276 regno, INSN_UID (insn),
10277 new_regno, INSN_UID (prev_insn));
10278
10279 /* Update first use of REGNO. */
10280 if (REGNO_FIRST_UID (regno) == INSN_UID (prev_insn))
10281 REGNO_FIRST_UID (regno) = INSN_UID (insn);
10282
10283 /* Now perform copy propagation to hopefully
10284 remove all uses of REGNO within the loop. */
10285 try_copy_prop (loop, replacement, regno);
10286 }
10287 }
10288 }
10289 }
10290
10291 /* Replace MEM with its associated pseudo register. This function is
10292 called from load_mems via for_each_rtx. DATA is actually a pointer
10293 to a structure describing the instruction currently being scanned
10294 and the MEM we are currently replacing. */
10295
10296 static int
10297 replace_loop_mem (mem, data)
10298 rtx *mem;
10299 void *data;
10300 {
10301 loop_replace_args *args = (loop_replace_args *) data;
10302 rtx m = *mem;
10303
10304 if (m == NULL_RTX)
10305 return 0;
10306
10307 switch (GET_CODE (m))
10308 {
10309 case MEM:
10310 break;
10311
10312 case CONST_DOUBLE:
10313 /* We're not interested in the MEM associated with a
10314 CONST_DOUBLE, so there's no need to traverse into one. */
10315 return -1;
10316
10317 default:
10318 /* This is not a MEM. */
10319 return 0;
10320 }
10321
10322 if (!rtx_equal_p (args->match, m))
10323 /* This is not the MEM we are currently replacing. */
10324 return 0;
10325
10326 /* Actually replace the MEM. */
10327 validate_change (args->insn, mem, args->replacement, 1);
10328
10329 return 0;
10330 }
10331
10332 static void
10333 replace_loop_mems (insn, mem, reg)
10334 rtx insn;
10335 rtx mem;
10336 rtx reg;
10337 {
10338 loop_replace_args args;
10339
10340 args.insn = insn;
10341 args.match = mem;
10342 args.replacement = reg;
10343
10344 for_each_rtx (&insn, replace_loop_mem, &args);
10345 }
10346
10347 /* Replace one register with another. Called through for_each_rtx; PX points
10348 to the rtx being scanned. DATA is actually a pointer to
10349 a structure of arguments. */
10350
10351 static int
10352 replace_loop_reg (px, data)
10353 rtx *px;
10354 void *data;
10355 {
10356 rtx x = *px;
10357 loop_replace_args *args = (loop_replace_args *) data;
10358
10359 if (x == NULL_RTX)
10360 return 0;
10361
10362 if (x == args->match)
10363 validate_change (args->insn, px, args->replacement, 1);
10364
10365 return 0;
10366 }
10367
10368 static void
10369 replace_loop_regs (insn, reg, replacement)
10370 rtx insn;
10371 rtx reg;
10372 rtx replacement;
10373 {
10374 loop_replace_args args;
10375
10376 args.insn = insn;
10377 args.match = reg;
10378 args.replacement = replacement;
10379
10380 for_each_rtx (&insn, replace_loop_reg, &args);
10381 }
10382
10383 /* Replace occurrences of the old exit label for the loop with the new
10384 one. DATA is an rtx_pair containing the old and new labels,
10385 respectively. */
10386
10387 static int
10388 replace_label (x, data)
10389 rtx *x;
10390 void *data;
10391 {
10392 rtx l = *x;
10393 rtx old_label = ((rtx_pair *) data)->r1;
10394 rtx new_label = ((rtx_pair *) data)->r2;
10395
10396 if (l == NULL_RTX)
10397 return 0;
10398
10399 if (GET_CODE (l) != LABEL_REF)
10400 return 0;
10401
10402 if (XEXP (l, 0) != old_label)
10403 return 0;
10404
10405 XEXP (l, 0) = new_label;
10406 ++LABEL_NUSES (new_label);
10407 --LABEL_NUSES (old_label);
10408
10409 return 0;
10410 }
10411 \f
10412 /* Emit insn for PATTERN after WHERE_INSN in basic block WHERE_BB
10413 (ignored in the interim). */
10414
10415 static rtx
10416 loop_insn_emit_after (loop, where_bb, where_insn, pattern)
10417 const struct loop *loop ATTRIBUTE_UNUSED;
10418 basic_block where_bb ATTRIBUTE_UNUSED;
10419 rtx where_insn;
10420 rtx pattern;
10421 {
10422 return emit_insn_after (pattern, where_insn);
10423 }
10424
10425
10426 /* If WHERE_INSN is non-zero emit insn for PATTERN before WHERE_INSN
10427 in basic block WHERE_BB (ignored in the interim) within the loop
10428 otherwise hoist PATTERN into the loop pre-header. */
10429
10430 rtx
10431 loop_insn_emit_before (loop, where_bb, where_insn, pattern)
10432 const struct loop *loop;
10433 basic_block where_bb ATTRIBUTE_UNUSED;
10434 rtx where_insn;
10435 rtx pattern;
10436 {
10437 if (! where_insn)
10438 return loop_insn_hoist (loop, pattern);
10439 return emit_insn_before (pattern, where_insn);
10440 }
10441
10442
10443 /* Emit call insn for PATTERN before WHERE_INSN in basic block
10444 WHERE_BB (ignored in the interim) within the loop. */
10445
10446 static rtx
10447 loop_call_insn_emit_before (loop, where_bb, where_insn, pattern)
10448 const struct loop *loop ATTRIBUTE_UNUSED;
10449 basic_block where_bb ATTRIBUTE_UNUSED;
10450 rtx where_insn;
10451 rtx pattern;
10452 {
10453 return emit_call_insn_before (pattern, where_insn);
10454 }
10455
10456
10457 /* Hoist insn for PATTERN into the loop pre-header. */
10458
10459 rtx
10460 loop_insn_hoist (loop, pattern)
10461 const struct loop *loop;
10462 rtx pattern;
10463 {
10464 return loop_insn_emit_before (loop, 0, loop->start, pattern);
10465 }
10466
10467
10468 /* Hoist call insn for PATTERN into the loop pre-header. */
10469
10470 static rtx
10471 loop_call_insn_hoist (loop, pattern)
10472 const struct loop *loop;
10473 rtx pattern;
10474 {
10475 return loop_call_insn_emit_before (loop, 0, loop->start, pattern);
10476 }
10477
10478
10479 /* Sink insn for PATTERN after the loop end. */
10480
10481 rtx
10482 loop_insn_sink (loop, pattern)
10483 const struct loop *loop;
10484 rtx pattern;
10485 {
10486 return loop_insn_emit_before (loop, 0, loop->sink, pattern);
10487 }
10488
10489 /* bl->final_value can be eighter general_operand or PLUS of general_operand
10490 and constant. Emit sequence of intructions to load it into REG */
10491 static rtx
10492 gen_load_of_final_value (reg, final_value)
10493 rtx reg, final_value;
10494 {
10495 rtx seq;
10496 start_sequence ();
10497 final_value = force_operand (final_value, reg);
10498 if (final_value != reg)
10499 emit_move_insn (reg, final_value);
10500 seq = get_insns ();
10501 end_sequence ();
10502 return seq;
10503 }
10504
10505 /* If the loop has multiple exits, emit insn for PATTERN before the
10506 loop to ensure that it will always be executed no matter how the
10507 loop exits. Otherwise, emit the insn for PATTERN after the loop,
10508 since this is slightly more efficient. */
10509
10510 static rtx
10511 loop_insn_sink_or_swim (loop, pattern)
10512 const struct loop *loop;
10513 rtx pattern;
10514 {
10515 if (loop->exit_count)
10516 return loop_insn_hoist (loop, pattern);
10517 else
10518 return loop_insn_sink (loop, pattern);
10519 }
10520 \f
10521 static void
10522 loop_ivs_dump (loop, file, verbose)
10523 const struct loop *loop;
10524 FILE *file;
10525 int verbose;
10526 {
10527 struct iv_class *bl;
10528 int iv_num = 0;
10529
10530 if (! loop || ! file)
10531 return;
10532
10533 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
10534 iv_num++;
10535
10536 fprintf (file, "Loop %d: %d IV classes\n", loop->num, iv_num);
10537
10538 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
10539 {
10540 loop_iv_class_dump (bl, file, verbose);
10541 fputc ('\n', file);
10542 }
10543 }
10544
10545
10546 static void
10547 loop_iv_class_dump (bl, file, verbose)
10548 const struct iv_class *bl;
10549 FILE *file;
10550 int verbose ATTRIBUTE_UNUSED;
10551 {
10552 struct induction *v;
10553 rtx incr;
10554 int i;
10555
10556 if (! bl || ! file)
10557 return;
10558
10559 fprintf (file, "IV class for reg %d, benefit %d\n",
10560 bl->regno, bl->total_benefit);
10561
10562 fprintf (file, " Init insn %d", INSN_UID (bl->init_insn));
10563 if (bl->initial_value)
10564 {
10565 fprintf (file, ", init val: ");
10566 print_simple_rtl (file, bl->initial_value);
10567 }
10568 if (bl->initial_test)
10569 {
10570 fprintf (file, ", init test: ");
10571 print_simple_rtl (file, bl->initial_test);
10572 }
10573 fputc ('\n', file);
10574
10575 if (bl->final_value)
10576 {
10577 fprintf (file, " Final val: ");
10578 print_simple_rtl (file, bl->final_value);
10579 fputc ('\n', file);
10580 }
10581
10582 if ((incr = biv_total_increment (bl)))
10583 {
10584 fprintf (file, " Total increment: ");
10585 print_simple_rtl (file, incr);
10586 fputc ('\n', file);
10587 }
10588
10589 /* List the increments. */
10590 for (i = 0, v = bl->biv; v; v = v->next_iv, i++)
10591 {
10592 fprintf (file, " Inc%d: insn %d, incr: ", i, INSN_UID (v->insn));
10593 print_simple_rtl (file, v->add_val);
10594 fputc ('\n', file);
10595 }
10596
10597 /* List the givs. */
10598 for (i = 0, v = bl->giv; v; v = v->next_iv, i++)
10599 {
10600 fprintf (file, " Giv%d: insn %d, benefit %d, ",
10601 i, INSN_UID (v->insn), v->benefit);
10602 if (v->giv_type == DEST_ADDR)
10603 print_simple_rtl (file, v->mem);
10604 else
10605 print_simple_rtl (file, single_set (v->insn));
10606 fputc ('\n', file);
10607 }
10608 }
10609
10610
10611 static void
10612 loop_biv_dump (v, file, verbose)
10613 const struct induction *v;
10614 FILE *file;
10615 int verbose;
10616 {
10617 if (! v || ! file)
10618 return;
10619
10620 fprintf (file,
10621 "Biv %d: insn %d",
10622 REGNO (v->dest_reg), INSN_UID (v->insn));
10623 fprintf (file, " const ");
10624 print_simple_rtl (file, v->add_val);
10625
10626 if (verbose && v->final_value)
10627 {
10628 fputc ('\n', file);
10629 fprintf (file, " final ");
10630 print_simple_rtl (file, v->final_value);
10631 }
10632
10633 fputc ('\n', file);
10634 }
10635
10636
10637 static void
10638 loop_giv_dump (v, file, verbose)
10639 const struct induction *v;
10640 FILE *file;
10641 int verbose;
10642 {
10643 if (! v || ! file)
10644 return;
10645
10646 if (v->giv_type == DEST_REG)
10647 fprintf (file, "Giv %d: insn %d",
10648 REGNO (v->dest_reg), INSN_UID (v->insn));
10649 else
10650 fprintf (file, "Dest address: insn %d",
10651 INSN_UID (v->insn));
10652
10653 fprintf (file, " src reg %d benefit %d",
10654 REGNO (v->src_reg), v->benefit);
10655 fprintf (file, " lifetime %d",
10656 v->lifetime);
10657
10658 if (v->replaceable)
10659 fprintf (file, " replaceable");
10660
10661 if (v->no_const_addval)
10662 fprintf (file, " ncav");
10663
10664 if (v->ext_dependent)
10665 {
10666 switch (GET_CODE (v->ext_dependent))
10667 {
10668 case SIGN_EXTEND:
10669 fprintf (file, " ext se");
10670 break;
10671 case ZERO_EXTEND:
10672 fprintf (file, " ext ze");
10673 break;
10674 case TRUNCATE:
10675 fprintf (file, " ext tr");
10676 break;
10677 default:
10678 abort ();
10679 }
10680 }
10681
10682 fputc ('\n', file);
10683 fprintf (file, " mult ");
10684 print_simple_rtl (file, v->mult_val);
10685
10686 fputc ('\n', file);
10687 fprintf (file, " add ");
10688 print_simple_rtl (file, v->add_val);
10689
10690 if (verbose && v->final_value)
10691 {
10692 fputc ('\n', file);
10693 fprintf (file, " final ");
10694 print_simple_rtl (file, v->final_value);
10695 }
10696
10697 fputc ('\n', file);
10698 }
10699
10700
10701 void
10702 debug_ivs (loop)
10703 const struct loop *loop;
10704 {
10705 loop_ivs_dump (loop, stderr, 1);
10706 }
10707
10708
10709 void
10710 debug_iv_class (bl)
10711 const struct iv_class *bl;
10712 {
10713 loop_iv_class_dump (bl, stderr, 1);
10714 }
10715
10716
10717 void
10718 debug_biv (v)
10719 const struct induction *v;
10720 {
10721 loop_biv_dump (v, stderr, 1);
10722 }
10723
10724
10725 void
10726 debug_giv (v)
10727 const struct induction *v;
10728 {
10729 loop_giv_dump (v, stderr, 1);
10730 }
10731
10732
10733 #define LOOP_BLOCK_NUM_1(INSN) \
10734 ((INSN) ? (BLOCK_FOR_INSN (INSN) ? BLOCK_NUM (INSN) : - 1) : -1)
10735
10736 /* The notes do not have an assigned block, so look at the next insn. */
10737 #define LOOP_BLOCK_NUM(INSN) \
10738 ((INSN) ? (GET_CODE (INSN) == NOTE \
10739 ? LOOP_BLOCK_NUM_1 (next_nonnote_insn (INSN)) \
10740 : LOOP_BLOCK_NUM_1 (INSN)) \
10741 : -1)
10742
10743 #define LOOP_INSN_UID(INSN) ((INSN) ? INSN_UID (INSN) : -1)
10744
10745 static void
10746 loop_dump_aux (loop, file, verbose)
10747 const struct loop *loop;
10748 FILE *file;
10749 int verbose ATTRIBUTE_UNUSED;
10750 {
10751 rtx label;
10752
10753 if (! loop || ! file)
10754 return;
10755
10756 /* Print diagnostics to compare our concept of a loop with
10757 what the loop notes say. */
10758 if (! PREV_INSN (loop->first->head)
10759 || GET_CODE (PREV_INSN (loop->first->head)) != NOTE
10760 || NOTE_LINE_NUMBER (PREV_INSN (loop->first->head))
10761 != NOTE_INSN_LOOP_BEG)
10762 fprintf (file, ";; No NOTE_INSN_LOOP_BEG at %d\n",
10763 INSN_UID (PREV_INSN (loop->first->head)));
10764 if (! NEXT_INSN (loop->last->end)
10765 || GET_CODE (NEXT_INSN (loop->last->end)) != NOTE
10766 || NOTE_LINE_NUMBER (NEXT_INSN (loop->last->end))
10767 != NOTE_INSN_LOOP_END)
10768 fprintf (file, ";; No NOTE_INSN_LOOP_END at %d\n",
10769 INSN_UID (NEXT_INSN (loop->last->end)));
10770
10771 if (loop->start)
10772 {
10773 fprintf (file,
10774 ";; start %d (%d), cont dom %d (%d), cont %d (%d), vtop %d (%d), end %d (%d)\n",
10775 LOOP_BLOCK_NUM (loop->start),
10776 LOOP_INSN_UID (loop->start),
10777 LOOP_BLOCK_NUM (loop->cont),
10778 LOOP_INSN_UID (loop->cont),
10779 LOOP_BLOCK_NUM (loop->cont),
10780 LOOP_INSN_UID (loop->cont),
10781 LOOP_BLOCK_NUM (loop->vtop),
10782 LOOP_INSN_UID (loop->vtop),
10783 LOOP_BLOCK_NUM (loop->end),
10784 LOOP_INSN_UID (loop->end));
10785 fprintf (file, ";; top %d (%d), scan start %d (%d)\n",
10786 LOOP_BLOCK_NUM (loop->top),
10787 LOOP_INSN_UID (loop->top),
10788 LOOP_BLOCK_NUM (loop->scan_start),
10789 LOOP_INSN_UID (loop->scan_start));
10790 fprintf (file, ";; exit_count %d", loop->exit_count);
10791 if (loop->exit_count)
10792 {
10793 fputs (", labels:", file);
10794 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
10795 {
10796 fprintf (file, " %d ",
10797 LOOP_INSN_UID (XEXP (label, 0)));
10798 }
10799 }
10800 fputs ("\n", file);
10801
10802 /* This can happen when a marked loop appears as two nested loops,
10803 say from while (a || b) {}. The inner loop won't match
10804 the loop markers but the outer one will. */
10805 if (LOOP_BLOCK_NUM (loop->cont) != loop->latch->index)
10806 fprintf (file, ";; NOTE_INSN_LOOP_CONT not in loop latch\n");
10807 }
10808 }
10809
10810 /* Call this function from the debugger to dump LOOP. */
10811
10812 void
10813 debug_loop (loop)
10814 const struct loop *loop;
10815 {
10816 flow_loop_dump (loop, stderr, loop_dump_aux, 1);
10817 }
10818
10819 /* Call this function from the debugger to dump LOOPS. */
10820
10821 void
10822 debug_loops (loops)
10823 const struct loops *loops;
10824 {
10825 flow_loops_dump (loops, stderr, loop_dump_aux, 1);
10826 }
This page took 0.523596 seconds and 4 git commands to generate.