]> gcc.gnu.org Git - gcc.git/blame - gcc/loop.c
configure.in (i*86-*-sysv5*): Use fixinc.svr4 to patch byteorder problems.
[gcc.git] / gcc / loop.c
CommitLineData
c8465d70 1/* Perform various loop optimizations, including strength reduction.
e5e809f4 2 Copyright (C) 1987, 88, 89, 91-97, 1998 Free Software Foundation, Inc.
b4ad7b23
RS
3
4This file is part of GNU CC.
5
6GNU CC is free software; you can redistribute it and/or modify
7it under the terms of the GNU General Public License as published by
8the Free Software Foundation; either version 2, or (at your option)
9any later version.
10
11GNU CC is distributed in the hope that it will be useful,
12but WITHOUT ANY WARRANTY; without even the implied warranty of
13MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14GNU General Public License for more details.
15
16You should have received a copy of the GNU General Public License
17along with GNU CC; see the file COPYING. If not, write to
a35311b0
RK
18the Free Software Foundation, 59 Temple Place - Suite 330,
19Boston, MA 02111-1307, USA. */
b4ad7b23
RS
20
21
22/* This is the loop optimization pass of the compiler.
23 It finds invariant computations within loops and moves them
24 to the beginning of the loop. Then it identifies basic and
25 general induction variables. Strength reduction is applied to the general
26 induction variables, and induction variable elimination is applied to
27 the basic induction variables.
28
29 It also finds cases where
30 a register is set within the loop by zero-extending a narrower value
31 and changes these to zero the entire register once before the loop
32 and merely copy the low part within the loop.
33
34 Most of the complexity is in heuristics to decide when it is worth
35 while to do these things. */
36
37#include "config.h"
670ee920 38#include "system.h"
b4ad7b23
RS
39#include "rtl.h"
40#include "obstack.h"
41#include "expr.h"
42#include "insn-config.h"
43#include "insn-flags.h"
44#include "regs.h"
45#include "hard-reg-set.h"
46#include "recog.h"
47#include "flags.h"
48#include "real.h"
b4ad7b23 49#include "loop.h"
6adb4e3a 50#include "except.h"
2e107e9e 51#include "toplev.h"
b4ad7b23
RS
52
53/* Vector mapping INSN_UIDs to luids.
d45cf215 54 The luids are like uids but increase monotonically always.
b4ad7b23
RS
55 We use them to see whether a jump comes from outside a given loop. */
56
57int *uid_luid;
58
59/* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
60 number the insn is contained in. */
61
62int *uid_loop_num;
63
64/* 1 + largest uid of any insn. */
65
66int max_uid_for_loop;
67
68/* 1 + luid of last insn. */
69
70static int max_luid;
71
72/* Number of loops detected in current function. Used as index to the
73 next few tables. */
74
75static int max_loop_num;
76
77/* Indexed by loop number, contains the first and last insn of each loop. */
78
79static rtx *loop_number_loop_starts, *loop_number_loop_ends;
80
81/* For each loop, gives the containing loop number, -1 if none. */
82
83int *loop_outer_loop;
84
8c660648
JL
85#ifdef HAIFA
86/* The main output of analyze_loop_iterations is placed here */
87
88int *loop_can_insert_bct;
89
90/* For each loop, determines whether some of its inner loops has used
91 count register */
92
93int *loop_used_count_register;
94
8c660648
JL
95/* loop parameters for arithmetic loops. These loops have a loop variable
96 which is initialized to loop_start_value, incremented in each iteration
97 by "loop_increment". At the end of the iteration the loop variable is
98 compared to the loop_comparison_value (using loop_comparison_code). */
99
100rtx *loop_increment;
101rtx *loop_comparison_value;
102rtx *loop_start_value;
103enum rtx_code *loop_comparison_code;
8c660648
JL
104#endif /* HAIFA */
105
237a9795
JL
106/* For each loop, keep track of its unrolling factor.
107 Potential values:
108 0: unrolled
109 1: not unrolled.
110 -1: completely unrolled
111 >0: holds the unroll exact factor. */
112int *loop_unroll_factor;
8c660648 113
b4ad7b23
RS
114/* Indexed by loop number, contains a nonzero value if the "loop" isn't
115 really a loop (an insn outside the loop branches into it). */
116
117static char *loop_invalid;
118
119/* Indexed by loop number, links together all LABEL_REFs which refer to
120 code labels outside the loop. Used by routines that need to know all
121 loop exits, such as final_biv_value and final_giv_value.
122
123 This does not include loop exits due to return instructions. This is
124 because all bivs and givs are pseudos, and hence must be dead after a
125 return, so the presense of a return does not affect any of the
126 optimizations that use this info. It is simpler to just not include return
127 instructions on this list. */
128
129rtx *loop_number_exit_labels;
130
353127c2
RK
131/* Indexed by loop number, counts the number of LABEL_REFs on
132 loop_number_exit_labels for this loop and all loops nested inside it. */
133
134int *loop_number_exit_count;
135
b4ad7b23 136/* Holds the number of loop iterations. It is zero if the number could not be
5fd8383e
RK
137 calculated. Must be unsigned since the number of iterations can
138 be as high as 2^wordsize-1. For loops with a wider iterator, this number
38e01259 139 will be zero if the number of loop iterations is too large for an
5fd8383e 140 unsigned integer to hold. */
b4ad7b23 141
5fd8383e 142unsigned HOST_WIDE_INT loop_n_iterations;
b4ad7b23 143
9ae8ffe7 144/* Nonzero if there is a subroutine call in the current loop. */
b4ad7b23
RS
145
146static int loop_has_call;
147
552bc76f
RS
148/* Nonzero if there is a volatile memory reference in the current
149 loop. */
150
151static int loop_has_volatile;
152
b4ad7b23
RS
153/* Added loop_continue which is the NOTE_INSN_LOOP_CONT of the
154 current loop. A continue statement will generate a branch to
155 NEXT_INSN (loop_continue). */
156
157static rtx loop_continue;
158
159/* Indexed by register number, contains the number of times the reg
160 is set during the loop being scanned.
161 During code motion, a negative value indicates a reg that has been
162 made a candidate; in particular -2 means that it is an candidate that
c5b7917e 163 we know is equal to a constant and -1 means that it is an candidate
b4ad7b23
RS
164 not known equal to a constant.
165 After code motion, regs moved have 0 (which is accurate now)
166 while the failed candidates have the original number of times set.
167
168 Therefore, at all times, == 0 indicates an invariant register;
169 < 0 a conditionally invariant one. */
170
8deb8e2c 171static varray_type n_times_set;
b4ad7b23
RS
172
173/* Original value of n_times_set; same except that this value
174 is not set negative for a reg whose sets have been made candidates
175 and not set to 0 for a reg that is moved. */
176
8deb8e2c 177static varray_type n_times_used;
b4ad7b23
RS
178
179/* Index by register number, 1 indicates that the register
180 cannot be moved or strength reduced. */
181
8deb8e2c 182static varray_type may_not_optimize;
b4ad7b23
RS
183
184/* Nonzero means reg N has already been moved out of one loop.
185 This reduces the desire to move it out of another. */
186
187static char *moved_once;
188
189/* Array of MEMs that are stored in this loop. If there are too many to fit
190 here, we just turn on unknown_address_altered. */
191
9ae8ffe7 192#define NUM_STORES 30
b4ad7b23
RS
193static rtx loop_store_mems[NUM_STORES];
194
195/* Index of first available slot in above array. */
196static int loop_store_mems_idx;
197
41a972a9
MM
198typedef struct loop_mem_info {
199 rtx mem; /* The MEM itself. */
200 rtx reg; /* Corresponding pseudo, if any. */
201 int optimize; /* Nonzero if we can optimize access to this MEM. */
202} loop_mem_info;
203
204/* Array of MEMs that are used (read or written) in this loop, but
205 cannot be aliased by anything in this loop, except perhaps
206 themselves. In other words, if loop_mems[i] is altered during the
207 loop, it is altered by an expression that is rtx_equal_p to it. */
208
209static loop_mem_info *loop_mems;
210
211/* The index of the next available slot in LOOP_MEMS. */
212
213static int loop_mems_idx;
214
215/* The number of elements allocated in LOOP_MEMs. */
216
217static int loop_mems_allocated;
218
b4ad7b23 219/* Nonzero if we don't know what MEMs were changed in the current loop.
552bc76f 220 This happens if the loop contains a call (in which case `loop_has_call'
b4ad7b23
RS
221 will also be set) or if we store into more than NUM_STORES MEMs. */
222
223static int unknown_address_altered;
224
225/* Count of movable (i.e. invariant) instructions discovered in the loop. */
226static int num_movables;
227
228/* Count of memory write instructions discovered in the loop. */
229static int num_mem_sets;
230
231/* Number of loops contained within the current one, including itself. */
232static int loops_enclosed;
233
234/* Bound on pseudo register number before loop optimization.
235 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
236int max_reg_before_loop;
237
238/* This obstack is used in product_cheap_p to allocate its rtl. It
239 may call gen_reg_rtx which, in turn, may reallocate regno_reg_rtx.
240 If we used the same obstack that it did, we would be deallocating
241 that array. */
242
243static struct obstack temp_obstack;
244
245/* This is where the pointer to the obstack being used for RTL is stored. */
246
247extern struct obstack *rtl_obstack;
248
249#define obstack_chunk_alloc xmalloc
250#define obstack_chunk_free free
b4ad7b23
RS
251\f
252/* During the analysis of a loop, a chain of `struct movable's
253 is made to record all the movable insns found.
254 Then the entire chain can be scanned to decide which to move. */
255
256struct movable
257{
258 rtx insn; /* A movable insn */
0f41302f
MS
259 rtx set_src; /* The expression this reg is set from. */
260 rtx set_dest; /* The destination of this SET. */
b4ad7b23 261 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
0f41302f 262 of any registers used within the LIBCALL. */
b4ad7b23
RS
263 int consec; /* Number of consecutive following insns
264 that must be moved with this one. */
265 int regno; /* The register it sets */
266 short lifetime; /* lifetime of that register;
267 may be adjusted when matching movables
268 that load the same value are found. */
269 short savings; /* Number of insns we can move for this reg,
270 including other movables that force this
271 or match this one. */
272 unsigned int cond : 1; /* 1 if only conditionally movable */
273 unsigned int force : 1; /* 1 means MUST move this insn */
274 unsigned int global : 1; /* 1 means reg is live outside this loop */
275 /* If PARTIAL is 1, GLOBAL means something different:
276 that the reg is live outside the range from where it is set
277 to the following label. */
278 unsigned int done : 1; /* 1 inhibits further processing of this */
279
280 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
281 In particular, moving it does not make it
282 invariant. */
283 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
284 load SRC, rather than copying INSN. */
1a61c29f
JW
285 unsigned int move_insn_first:1;/* Same as above, if this is necessary for the
286 first insn of a consecutive sets group. */
0f41302f 287 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
b4ad7b23
RS
288 enum machine_mode savemode; /* Nonzero means it is a mode for a low part
289 that we should avoid changing when clearing
290 the rest of the reg. */
291 struct movable *match; /* First entry for same value */
292 struct movable *forces; /* An insn that must be moved if this is */
293 struct movable *next;
294};
295
45f97e2e
RH
296static struct movable *the_movables;
297
b4ad7b23
RS
298FILE *loop_dump_stream;
299
300/* Forward declarations. */
301
56c0e996
BS
302static void find_and_verify_loops PROTO((rtx));
303static void mark_loop_jump PROTO((rtx, int));
304static void prescan_loop PROTO((rtx, rtx));
305static int reg_in_basic_block_p PROTO((rtx, rtx));
306static int consec_sets_invariant_p PROTO((rtx, int, rtx));
307static rtx libcall_other_reg PROTO((rtx, rtx));
308static int labels_in_range_p PROTO((rtx, int));
8deb8e2c
MM
309static void count_loop_regs_set PROTO((rtx, rtx, varray_type, varray_type,
310 int *, int));
693e265f 311static void note_addr_stored PROTO((rtx, rtx));
56c0e996 312static int loop_reg_used_before_p PROTO((rtx, rtx, rtx, rtx, rtx));
5accd822 313static void scan_loop PROTO((rtx, rtx, int, int));
e9a25f70 314#if 0
27fa83c1 315static void replace_call_address PROTO((rtx, rtx, rtx));
e9a25f70 316#endif
56c0e996
BS
317static rtx skip_consec_insns PROTO((rtx, int));
318static int libcall_benefit PROTO((rtx));
319static void ignore_some_movables PROTO((struct movable *));
320static void force_movables PROTO((struct movable *));
321static void combine_movables PROTO((struct movable *, int));
fd4a1ca5 322static int regs_match_p PROTO((rtx, rtx, struct movable *));
56c0e996 323static int rtx_equal_for_loop_p PROTO((rtx, rtx, struct movable *));
e009aaf3 324static void add_label_notes PROTO((rtx, rtx));
56c0e996 325static void move_movables PROTO((struct movable *, int, int, rtx, rtx, int));
e009aaf3 326static int count_nonfixed_reads PROTO((rtx));
5accd822 327static void strength_reduce PROTO((rtx, rtx, rtx, int, rtx, rtx, int, int));
8deb8e2c 328static void find_single_use_in_loop PROTO((rtx, rtx, varray_type));
56c0e996
BS
329static int valid_initial_value_p PROTO((rtx, rtx, int, rtx));
330static void find_mem_givs PROTO((rtx, rtx, int, rtx, rtx));
331static void record_biv PROTO((struct induction *, rtx, rtx, rtx, rtx, int, int));
332static void check_final_value PROTO((struct induction *, rtx, rtx));
333static void record_giv PROTO((struct induction *, rtx, rtx, rtx, rtx, rtx, int, enum g_types, int, rtx *, rtx, rtx));
334static void update_giv_derive PROTO((rtx));
335static int basic_induction_var PROTO((rtx, enum machine_mode, rtx, rtx, rtx *, rtx *));
336static rtx simplify_giv_expr PROTO((rtx, int *));
45f97e2e 337static int general_induction_var PROTO((rtx, rtx *, rtx *, rtx *, int, int *));
56c0e996
BS
338static int consec_sets_giv PROTO((int, rtx, rtx, rtx, rtx *, rtx *));
339static int check_dbra_loop PROTO((rtx, int, rtx));
45f97e2e 340static rtx express_from_1 PROTO((rtx, rtx, rtx));
56c0e996 341static rtx express_from PROTO((struct induction *, struct induction *));
45f97e2e 342static rtx combine_givs_p PROTO((struct induction *, struct induction *));
56c0e996
BS
343static void combine_givs PROTO((struct iv_class *));
344static int product_cheap_p PROTO((rtx, rtx));
345static int maybe_eliminate_biv PROTO((struct iv_class *, rtx, rtx, int, int, int));
346static int maybe_eliminate_biv_1 PROTO((rtx, rtx, struct iv_class *, int, rtx));
347static int last_use_this_basic_block PROTO((rtx, rtx));
348static void record_initial PROTO((rtx, rtx));
349static void update_reg_last_use PROTO((rtx, rtx));
41a972a9
MM
350static rtx next_insn_in_loop PROTO((rtx, rtx, rtx, rtx));
351static void load_mems_and_recount_loop_regs_set PROTO((rtx, rtx, rtx,
8deb8e2c
MM
352 rtx, varray_type,
353 int *));
41a972a9
MM
354static void load_mems PROTO((rtx, rtx, rtx, rtx));
355static int insert_loop_mem PROTO((rtx *, void *));
356static int replace_loop_mem PROTO((rtx *, void *));
357static int replace_label PROTO((rtx *, void *));
358
359typedef struct rtx_and_int {
360 rtx r;
361 int i;
362} rtx_and_int;
363
364typedef struct rtx_pair {
365 rtx r1;
366 rtx r2;
367} rtx_pair;
368
369/* Nonzero iff INSN is between START and END, inclusive. */
370#define INSN_IN_RANGE_P(INSN, START, END) \
371 (INSN_UID (INSN) < max_uid_for_loop \
372 && INSN_LUID (INSN) >= INSN_LUID (START) \
373 && INSN_LUID (INSN) <= INSN_LUID (END))
8c660648
JL
374
375#ifdef HAIFA
376/* This is extern from unroll.c */
56c0e996 377extern void iteration_info PROTO((rtx, rtx *, rtx *, rtx, rtx));
8c660648
JL
378
379/* Two main functions for implementing bct:
380 first - to be called before loop unrolling, and the second - after */
51723711 381#ifdef HAVE_decrement_and_branch_on_count
56c0e996
BS
382static void analyze_loop_iterations PROTO((rtx, rtx));
383static void insert_bct PROTO((rtx, rtx));
8c660648
JL
384
385/* Auxiliary function that inserts the bct pattern into the loop */
56c0e996 386static void instrument_loop_bct PROTO((rtx, rtx, rtx));
51723711 387#endif /* HAVE_decrement_and_branch_on_count */
8c660648
JL
388#endif /* HAIFA */
389
2a1777af
JL
390/* Indirect_jump_in_function is computed once per function. */
391int indirect_jump_in_function = 0;
56c0e996 392static int indirect_jump_in_function_p PROTO((rtx));
2a1777af 393
b4ad7b23
RS
394\f
395/* Relative gain of eliminating various kinds of operations. */
45f97e2e 396static int add_cost;
b4ad7b23 397#if 0
45f97e2e
RH
398static int shift_cost;
399static int mult_cost;
b4ad7b23
RS
400#endif
401
402/* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
403 copy the value of the strength reduced giv to its original register. */
45f97e2e
RH
404static int copy_cost;
405
406/* Cost of using a register, to normalize the benefits of a giv. */
407static int reg_address_cost;
408
b4ad7b23
RS
409
410void
411init_loop ()
412{
413 char *free_point = (char *) oballoc (1);
38a448ca 414 rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
b4ad7b23 415
38a448ca 416 add_cost = rtx_cost (gen_rtx_PLUS (word_mode, reg, reg), SET);
b4ad7b23 417
45f97e2e
RH
418#ifdef ADDRESS_COST
419 reg_address_cost = ADDRESS_COST (reg);
420#else
421 reg_address_cost = rtx_cost (reg, MEM);
422#endif
423
b4ad7b23
RS
424 /* We multiply by 2 to reconcile the difference in scale between
425 these two ways of computing costs. Otherwise the cost of a copy
426 will be far less than the cost of an add. */
5fd8383e 427
b4ad7b23 428 copy_cost = 2 * 2;
b4ad7b23
RS
429
430 /* Free the objects we just allocated. */
431 obfree (free_point);
432
433 /* Initialize the obstack used for rtl in product_cheap_p. */
434 gcc_obstack_init (&temp_obstack);
435}
436\f
437/* Entry point of this file. Perform loop optimization
438 on the current function. F is the first insn of the function
439 and DUMPFILE is a stream for output of a trace of actions taken
440 (or 0 if none should be output). */
441
442void
5accd822 443loop_optimize (f, dumpfile, unroll_p, bct_p)
b4ad7b23
RS
444 /* f is the first instruction of a chain of insns for one function */
445 rtx f;
446 FILE *dumpfile;
5accd822 447 int unroll_p, bct_p;
b4ad7b23
RS
448{
449 register rtx insn;
450 register int i;
b4ad7b23
RS
451 rtx last_insn;
452
453 loop_dump_stream = dumpfile;
454
455 init_recog_no_volatile ();
b4ad7b23
RS
456
457 max_reg_before_loop = max_reg_num ();
458
459 moved_once = (char *) alloca (max_reg_before_loop);
460 bzero (moved_once, max_reg_before_loop);
461
462 regs_may_share = 0;
463
0f41302f 464 /* Count the number of loops. */
b4ad7b23
RS
465
466 max_loop_num = 0;
467 for (insn = f; insn; insn = NEXT_INSN (insn))
468 {
469 if (GET_CODE (insn) == NOTE
470 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
471 max_loop_num++;
472 }
473
474 /* Don't waste time if no loops. */
475 if (max_loop_num == 0)
476 return;
477
478 /* Get size to use for tables indexed by uids.
479 Leave some space for labels allocated by find_and_verify_loops. */
1c01e9df 480 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
b4ad7b23
RS
481
482 uid_luid = (int *) alloca (max_uid_for_loop * sizeof (int));
483 uid_loop_num = (int *) alloca (max_uid_for_loop * sizeof (int));
484
4c9a05bc
RK
485 bzero ((char *) uid_luid, max_uid_for_loop * sizeof (int));
486 bzero ((char *) uid_loop_num, max_uid_for_loop * sizeof (int));
b4ad7b23
RS
487
488 /* Allocate tables for recording each loop. We set each entry, so they need
489 not be zeroed. */
490 loop_number_loop_starts = (rtx *) alloca (max_loop_num * sizeof (rtx));
491 loop_number_loop_ends = (rtx *) alloca (max_loop_num * sizeof (rtx));
492 loop_outer_loop = (int *) alloca (max_loop_num * sizeof (int));
493 loop_invalid = (char *) alloca (max_loop_num * sizeof (char));
494 loop_number_exit_labels = (rtx *) alloca (max_loop_num * sizeof (rtx));
353127c2 495 loop_number_exit_count = (int *) alloca (max_loop_num * sizeof (int));
b4ad7b23 496
237a9795
JL
497 /* This is initialized by the unrolling code, so we go ahead
498 and clear them just in case we are not performing loop
499 unrolling. */
500 loop_unroll_factor = (int *) alloca (max_loop_num *sizeof (int));
501 bzero ((char *) loop_unroll_factor, max_loop_num * sizeof (int));
502
8c660648
JL
503#ifdef HAIFA
504 /* Allocate for BCT optimization */
505 loop_can_insert_bct = (int *) alloca (max_loop_num * sizeof (int));
506 bzero ((char *) loop_can_insert_bct, max_loop_num * sizeof (int));
507
508 loop_used_count_register = (int *) alloca (max_loop_num * sizeof (int));
509 bzero ((char *) loop_used_count_register, max_loop_num * sizeof (int));
510
8c660648
JL
511 loop_increment = (rtx *) alloca (max_loop_num * sizeof (rtx));
512 loop_comparison_value = (rtx *) alloca (max_loop_num * sizeof (rtx));
513 loop_start_value = (rtx *) alloca (max_loop_num * sizeof (rtx));
514 bzero ((char *) loop_increment, max_loop_num * sizeof (rtx));
515 bzero ((char *) loop_comparison_value, max_loop_num * sizeof (rtx));
516 bzero ((char *) loop_start_value, max_loop_num * sizeof (rtx));
517
518 loop_comparison_code
519 = (enum rtx_code *) alloca (max_loop_num * sizeof (enum rtx_code));
520 bzero ((char *) loop_comparison_code, max_loop_num * sizeof (enum rtx_code));
521#endif /* HAIFA */
522
b4ad7b23
RS
523 /* Find and process each loop.
524 First, find them, and record them in order of their beginnings. */
525 find_and_verify_loops (f);
526
527 /* Now find all register lifetimes. This must be done after
528 find_and_verify_loops, because it might reorder the insns in the
529 function. */
530 reg_scan (f, max_reg_num (), 1);
531
7506f491
DE
532 /* This must occur after reg_scan so that registers created by gcse
533 will have entries in the register tables.
534
535 We could have added a call to reg_scan after gcse_main in toplev.c,
536 but moving this call to init_alias_analysis is more efficient. */
537 init_alias_analysis ();
538
1c01e9df
TW
539 /* See if we went too far. */
540 if (get_max_uid () > max_uid_for_loop)
541 abort ();
f5963e61
JL
542 /* Now reset it to the actual size we need. See above. */
543 max_uid_for_loop = get_max_uid () + 1;
1c01e9df 544
b4ad7b23
RS
545 /* Compute the mapping from uids to luids.
546 LUIDs are numbers assigned to insns, like uids,
547 except that luids increase monotonically through the code.
548 Don't assign luids to line-number NOTEs, so that the distance in luids
549 between two insns is not affected by -g. */
550
551 for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
552 {
553 last_insn = insn;
554 if (GET_CODE (insn) != NOTE
555 || NOTE_LINE_NUMBER (insn) <= 0)
556 uid_luid[INSN_UID (insn)] = ++i;
557 else
558 /* Give a line number note the same luid as preceding insn. */
559 uid_luid[INSN_UID (insn)] = i;
560 }
561
562 max_luid = i + 1;
563
564 /* Don't leave gaps in uid_luid for insns that have been
565 deleted. It is possible that the first or last insn
566 using some register has been deleted by cross-jumping.
567 Make sure that uid_luid for that former insn's uid
568 points to the general area where that insn used to be. */
569 for (i = 0; i < max_uid_for_loop; i++)
570 {
571 uid_luid[0] = uid_luid[i];
572 if (uid_luid[0] != 0)
573 break;
574 }
575 for (i = 0; i < max_uid_for_loop; i++)
576 if (uid_luid[i] == 0)
577 uid_luid[i] = uid_luid[i - 1];
578
579 /* Create a mapping from loops to BLOCK tree nodes. */
81797aba 580 if (unroll_p && write_symbols != NO_DEBUG)
07e857c2 581 find_loop_tree_blocks ();
b4ad7b23 582
2a1777af
JL
583 /* Determine if the function has indirect jump. On some systems
584 this prevents low overhead loop instructions from being used. */
8c660648 585 indirect_jump_in_function = indirect_jump_in_function_p (f);
8c660648 586
b4ad7b23
RS
587 /* Now scan the loops, last ones first, since this means inner ones are done
588 before outer ones. */
589 for (i = max_loop_num-1; i >= 0; i--)
590 if (! loop_invalid[i] && loop_number_loop_ends[i])
591 scan_loop (loop_number_loop_starts[i], loop_number_loop_ends[i],
5accd822 592 unroll_p, bct_p);
07e857c2
JW
593
594 /* If debugging and unrolling loops, we must replicate the tree nodes
595 corresponding to the blocks inside the loop, so that the original one
596 to one mapping will remain. */
81797aba 597 if (unroll_p && write_symbols != NO_DEBUG)
07e857c2 598 unroll_block_trees ();
45f97e2e
RH
599
600 end_alias_analysis ();
b4ad7b23
RS
601}
602\f
41a972a9
MM
603/* Returns the next insn, in execution order, after INSN. START and
604 END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop,
605 respectively. LOOP_TOP, if non-NULL, is the top of the loop in the
606 insn-stream; it is used with loops that are entered near the
607 bottom. */
608
609static rtx
610next_insn_in_loop (insn, start, end, loop_top)
611 rtx insn;
612 rtx start;
613 rtx end;
614 rtx loop_top;
615{
616 insn = NEXT_INSN (insn);
617
618 if (insn == end)
619 {
620 if (loop_top)
621 /* Go to the top of the loop, and continue there. */
622 insn = loop_top;
623 else
624 /* We're done. */
625 insn = NULL_RTX;
626 }
627
628 if (insn == start)
629 /* We're done. */
630 insn = NULL_RTX;
631
632 return insn;
633}
634
b4ad7b23
RS
635/* Optimize one loop whose start is LOOP_START and end is END.
636 LOOP_START is the NOTE_INSN_LOOP_BEG and END is the matching
637 NOTE_INSN_LOOP_END. */
638
639/* ??? Could also move memory writes out of loops if the destination address
640 is invariant, the source is invariant, the memory write is not volatile,
641 and if we can prove that no read inside the loop can read this address
642 before the write occurs. If there is a read of this address after the
643 write, then we can also mark the memory read as invariant. */
644
645static void
5accd822 646scan_loop (loop_start, end, unroll_p, bct_p)
b4ad7b23 647 rtx loop_start, end;
5accd822 648 int unroll_p, bct_p;
b4ad7b23
RS
649{
650 register int i;
41a972a9 651 rtx p;
b4ad7b23
RS
652 /* 1 if we are scanning insns that could be executed zero times. */
653 int maybe_never = 0;
654 /* 1 if we are scanning insns that might never be executed
655 due to a subroutine call which might exit before they are reached. */
656 int call_passed = 0;
657 /* For a rotated loop that is entered near the bottom,
658 this is the label at the top. Otherwise it is zero. */
659 rtx loop_top = 0;
660 /* Jump insn that enters the loop, or 0 if control drops in. */
661 rtx loop_entry_jump = 0;
662 /* Place in the loop where control enters. */
663 rtx scan_start;
664 /* Number of insns in the loop. */
665 int insn_count;
666 int in_libcall = 0;
667 int tem;
668 rtx temp;
669 /* The SET from an insn, if it is the only SET in the insn. */
670 rtx set, set1;
671 /* Chain describing insns movable in current loop. */
672 struct movable *movables = 0;
673 /* Last element in `movables' -- so we can add elements at the end. */
674 struct movable *last_movable = 0;
675 /* Ratio of extra register life span we can justify
676 for saving an instruction. More if loop doesn't call subroutines
677 since in that case saving an insn makes more difference
678 and more registers are available. */
679 int threshold;
680 /* If we have calls, contains the insn in which a register was used
681 if it was used exactly once; contains const0_rtx if it was used more
682 than once. */
8deb8e2c 683 varray_type reg_single_usage = 0;
5ea7a4ae
JW
684 /* Nonzero if we are scanning instructions in a sub-loop. */
685 int loop_depth = 0;
41a972a9 686 int nregs;
b4ad7b23
RS
687
688 /* Determine whether this loop starts with a jump down to a test at
689 the end. This will occur for a small number of loops with a test
690 that is too complex to duplicate in front of the loop.
691
692 We search for the first insn or label in the loop, skipping NOTEs.
693 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
694 (because we might have a loop executed only once that contains a
695 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
696 (in case we have a degenerate loop).
697
698 Note that if we mistakenly think that a loop is entered at the top
699 when, in fact, it is entered at the exit test, the only effect will be
700 slightly poorer optimization. Making the opposite error can generate
701 incorrect code. Since very few loops now start with a jump to the
702 exit test, the code here to detect that case is very conservative. */
703
704 for (p = NEXT_INSN (loop_start);
705 p != end
706 && GET_CODE (p) != CODE_LABEL && GET_RTX_CLASS (GET_CODE (p)) != 'i'
707 && (GET_CODE (p) != NOTE
708 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
709 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
710 p = NEXT_INSN (p))
711 ;
712
713 scan_start = p;
714
715 /* Set up variables describing this loop. */
716 prescan_loop (loop_start, end);
717 threshold = (loop_has_call ? 1 : 2) * (1 + n_non_fixed_regs);
718
719 /* If loop has a jump before the first label,
720 the true entry is the target of that jump.
721 Start scan from there.
722 But record in LOOP_TOP the place where the end-test jumps
723 back to so we can scan that after the end of the loop. */
724 if (GET_CODE (p) == JUMP_INSN)
725 {
726 loop_entry_jump = p;
727
728 /* Loop entry must be unconditional jump (and not a RETURN) */
729 if (simplejump_p (p)
730 && JUMP_LABEL (p) != 0
731 /* Check to see whether the jump actually
732 jumps out of the loop (meaning it's no loop).
733 This case can happen for things like
734 do {..} while (0). If this label was generated previously
735 by loop, we can't tell anything about it and have to reject
736 the loop. */
41a972a9 737 && INSN_IN_RANGE_P (JUMP_LABEL (p), loop_start, end))
b4ad7b23
RS
738 {
739 loop_top = next_label (scan_start);
740 scan_start = JUMP_LABEL (p);
741 }
742 }
743
744 /* If SCAN_START was an insn created by loop, we don't know its luid
745 as required by loop_reg_used_before_p. So skip such loops. (This
746 test may never be true, but it's best to play it safe.)
747
748 Also, skip loops where we do not start scanning at a label. This
749 test also rejects loops starting with a JUMP_INSN that failed the
750 test above. */
751
752 if (INSN_UID (scan_start) >= max_uid_for_loop
753 || GET_CODE (scan_start) != CODE_LABEL)
754 {
755 if (loop_dump_stream)
756 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
757 INSN_UID (loop_start), INSN_UID (end));
758 return;
759 }
760
761 /* Count number of times each reg is set during this loop.
8deb8e2c 762 Set VARRAY_CHAR (may_not_optimize, I) if it is not safe to move out
b4ad7b23 763 the setting of register I. If this loop has calls, set
8deb8e2c 764 VARRAY_RTX (reg_single_usage, I). */
41a972a9
MM
765
766 /* Allocate extra space for REGS that might be created by
8deb8e2c
MM
767 load_mems. We allocate a little extra slop as well, in the hopes
768 that even after the moving of movables creates some new registers
769 we won't have to reallocate these arrays. However, we do grow
770 the arrays, if necessary, in load_mems_recount_loop_regs_set. */
771 nregs = max_reg_num () + loop_mems_idx + 16;
772 VARRAY_INT_INIT (n_times_set, nregs, "n_times_set");
773 VARRAY_INT_INIT (n_times_used, nregs, "n_times_used");
774 VARRAY_CHAR_INIT (may_not_optimize, nregs, "may_not_optimize");
b4ad7b23
RS
775
776 if (loop_has_call)
8deb8e2c 777 VARRAY_RTX_INIT (reg_single_usage, nregs, "reg_single_usage");
b4ad7b23
RS
778
779 count_loop_regs_set (loop_top ? loop_top : loop_start, end,
780 may_not_optimize, reg_single_usage, &insn_count, nregs);
781
782 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
8deb8e2c
MM
783 {
784 VARRAY_CHAR (may_not_optimize, i) = 1;
785 VARRAY_INT (n_times_set, i) = 1;
786 }
ef9e3c5b
JL
787
788#ifdef AVOID_CCMODE_COPIES
789 /* Don't try to move insns which set CC registers if we should not
790 create CCmode register copies. */
3568fdd2 791 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
ef9e3c5b 792 if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
8deb8e2c 793 VARRAY_CHAR (may_not_optimize, i) = 1;
ef9e3c5b
JL
794#endif
795
8deb8e2c
MM
796 bcopy ((char *) &n_times_set->data,
797 (char *) &n_times_used->data, nregs * sizeof (int));
b4ad7b23
RS
798
799 if (loop_dump_stream)
800 {
801 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
802 INSN_UID (loop_start), INSN_UID (end), insn_count);
803 if (loop_continue)
804 fprintf (loop_dump_stream, "Continue at insn %d.\n",
805 INSN_UID (loop_continue));
806 }
807
808 /* Scan through the loop finding insns that are safe to move.
d45cf215 809 Set n_times_set negative for the reg being set, so that
b4ad7b23
RS
810 this reg will be considered invariant for subsequent insns.
811 We consider whether subsequent insns use the reg
812 in deciding whether it is worth actually moving.
813
814 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
815 and therefore it is possible that the insns we are scanning
816 would never be executed. At such times, we must make sure
817 that it is safe to execute the insn once instead of zero times.
818 When MAYBE_NEVER is 0, all insns will be executed at least once
819 so that is not a problem. */
820
41a972a9
MM
821 for (p = next_insn_in_loop (scan_start, scan_start, end, loop_top);
822 p != NULL_RTX;
823 p = next_insn_in_loop (p, scan_start, end, loop_top))
b4ad7b23 824 {
b4ad7b23 825 if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
5fd8383e 826 && find_reg_note (p, REG_LIBCALL, NULL_RTX))
b4ad7b23
RS
827 in_libcall = 1;
828 else if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
5fd8383e 829 && find_reg_note (p, REG_RETVAL, NULL_RTX))
b4ad7b23
RS
830 in_libcall = 0;
831
832 if (GET_CODE (p) == INSN
833 && (set = single_set (p))
834 && GET_CODE (SET_DEST (set)) == REG
8deb8e2c 835 && ! VARRAY_CHAR (may_not_optimize, REGNO (SET_DEST (set))))
b4ad7b23
RS
836 {
837 int tem1 = 0;
838 int tem2 = 0;
839 int move_insn = 0;
840 rtx src = SET_SRC (set);
841 rtx dependencies = 0;
842
843 /* Figure out what to use as a source of this insn. If a REG_EQUIV
844 note is given or if a REG_EQUAL note with a constant operand is
845 specified, use it as the source and mark that we should move
846 this insn by calling emit_move_insn rather that duplicating the
847 insn.
848
849 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL note
850 is present. */
5fd8383e 851 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
b4ad7b23
RS
852 if (temp)
853 src = XEXP (temp, 0), move_insn = 1;
854 else
855 {
5fd8383e 856 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
b4ad7b23
RS
857 if (temp && CONSTANT_P (XEXP (temp, 0)))
858 src = XEXP (temp, 0), move_insn = 1;
5fd8383e 859 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
b4ad7b23
RS
860 {
861 src = XEXP (temp, 0);
862 /* A libcall block can use regs that don't appear in
863 the equivalent expression. To move the libcall,
864 we must move those regs too. */
865 dependencies = libcall_other_reg (p, src);
866 }
867 }
868
869 /* Don't try to optimize a register that was made
870 by loop-optimization for an inner loop.
871 We don't know its life-span, so we can't compute the benefit. */
872 if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
873 ;
874 /* In order to move a register, we need to have one of three cases:
875 (1) it is used only in the same basic block as the set
6ad216ad
RS
876 (2) it is not a user variable and it is not used in the
877 exit test (this can cause the variable to be used
878 before it is set just like a user-variable).
b4ad7b23
RS
879 (3) the set is guaranteed to be executed once the loop starts,
880 and the reg is not used until after that. */
881 else if (! ((! maybe_never
882 && ! loop_reg_used_before_p (set, p, loop_start,
883 scan_start, end))
103869f2
RK
884 || (! REG_USERVAR_P (SET_DEST (set))
885 && ! REG_LOOP_TEST_P (SET_DEST (set)))
886 || reg_in_basic_block_p (p, SET_DEST (set))))
b4ad7b23
RS
887 ;
888 else if ((tem = invariant_p (src))
889 && (dependencies == 0
890 || (tem2 = invariant_p (dependencies)) != 0)
8deb8e2c
MM
891 && (VARRAY_INT (n_times_set,
892 REGNO (SET_DEST (set))) == 1
b4ad7b23 893 || (tem1
8deb8e2c
MM
894 = consec_sets_invariant_p
895 (SET_DEST (set),
896 VARRAY_INT (n_times_set, REGNO (SET_DEST (set))),
897 p)))
b4ad7b23
RS
898 /* If the insn can cause a trap (such as divide by zero),
899 can't move it unless it's guaranteed to be executed
900 once loop is entered. Even a function call might
901 prevent the trap insn from being reached
902 (since it might exit!) */
903 && ! ((maybe_never || call_passed)
904 && may_trap_p (src)))
905 {
906 register struct movable *m;
907 register int regno = REGNO (SET_DEST (set));
908
909 /* A potential lossage is where we have a case where two insns
910 can be combined as long as they are both in the loop, but
911 we move one of them outside the loop. For large loops,
912 this can lose. The most common case of this is the address
913 of a function being called.
914
915 Therefore, if this register is marked as being used exactly
916 once if we are in a loop with calls (a "large loop"), see if
917 we can replace the usage of this register with the source
918 of this SET. If we can, delete this insn.
919
920 Don't do this if P has a REG_RETVAL note or if we have
921 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
922
8deb8e2c
MM
923 if (reg_single_usage && VARRAY_RTX (reg_single_usage, regno) != 0
924 && VARRAY_RTX (reg_single_usage, regno) != const0_rtx
b1f21e0a
MM
925 && REGNO_FIRST_UID (regno) == INSN_UID (p)
926 && (REGNO_LAST_UID (regno)
8deb8e2c
MM
927 == INSN_UID (VARRAY_RTX (reg_single_usage, regno)))
928 && VARRAY_INT (n_times_set, regno) == 1
b4ad7b23 929 && ! side_effects_p (SET_SRC (set))
5fd8383e 930 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
e9a25f70
JL
931 && (! SMALL_REGISTER_CLASSES
932 || (! (GET_CODE (SET_SRC (set)) == REG
933 && REGNO (SET_SRC (set)) < FIRST_PSEUDO_REGISTER)))
b4ad7b23
RS
934 /* This test is not redundant; SET_SRC (set) might be
935 a call-clobbered register and the life of REGNO
936 might span a call. */
937 && ! modified_between_p (SET_SRC (set), p,
8deb8e2c
MM
938 VARRAY_RTX
939 (reg_single_usage, regno))
940 && no_labels_between_p (p, VARRAY_RTX (reg_single_usage, regno))
b4ad7b23 941 && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
8deb8e2c
MM
942 VARRAY_RTX
943 (reg_single_usage, regno)))
b4ad7b23 944 {
5eeedd4d
JW
945 /* Replace any usage in a REG_EQUAL note. Must copy the
946 new source, so that we don't get rtx sharing between the
947 SET_SOURCE and REG_NOTES of insn p. */
8deb8e2c
MM
948 REG_NOTES (VARRAY_RTX (reg_single_usage, regno))
949 = replace_rtx (REG_NOTES (VARRAY_RTX
950 (reg_single_usage, regno)),
5eeedd4d 951 SET_DEST (set), copy_rtx (SET_SRC (set)));
b4ad7b23
RS
952
953 PUT_CODE (p, NOTE);
954 NOTE_LINE_NUMBER (p) = NOTE_INSN_DELETED;
955 NOTE_SOURCE_FILE (p) = 0;
8deb8e2c 956 VARRAY_INT (n_times_set, regno) = 0;
b4ad7b23
RS
957 continue;
958 }
959
960 m = (struct movable *) alloca (sizeof (struct movable));
961 m->next = 0;
962 m->insn = p;
963 m->set_src = src;
964 m->dependencies = dependencies;
965 m->set_dest = SET_DEST (set);
966 m->force = 0;
8deb8e2c
MM
967 m->consec = VARRAY_INT (n_times_set,
968 REGNO (SET_DEST (set))) - 1;
b4ad7b23
RS
969 m->done = 0;
970 m->forces = 0;
971 m->partial = 0;
972 m->move_insn = move_insn;
1a61c29f 973 m->move_insn_first = 0;
5fd8383e 974 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
b4ad7b23
RS
975 m->savemode = VOIDmode;
976 m->regno = regno;
977 /* Set M->cond if either invariant_p or consec_sets_invariant_p
978 returned 2 (only conditionally invariant). */
979 m->cond = ((tem | tem1 | tem2) > 1);
b1f21e0a
MM
980 m->global = (uid_luid[REGNO_LAST_UID (regno)] > INSN_LUID (end)
981 || uid_luid[REGNO_FIRST_UID (regno)] < INSN_LUID (loop_start));
b4ad7b23 982 m->match = 0;
b1f21e0a
MM
983 m->lifetime = (uid_luid[REGNO_LAST_UID (regno)]
984 - uid_luid[REGNO_FIRST_UID (regno)]);
8deb8e2c 985 m->savings = VARRAY_INT (n_times_used, regno);
5fd8383e 986 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
b4ad7b23 987 m->savings += libcall_benefit (p);
8deb8e2c 988 VARRAY_INT (n_times_set, regno) = move_insn ? -2 : -1;
b4ad7b23
RS
989 /* Add M to the end of the chain MOVABLES. */
990 if (movables == 0)
991 movables = m;
992 else
993 last_movable->next = m;
994 last_movable = m;
995
996 if (m->consec > 0)
997 {
1a61c29f
JW
998 /* It is possible for the first instruction to have a
999 REG_EQUAL note but a non-invariant SET_SRC, so we must
1000 remember the status of the first instruction in case
1001 the last instruction doesn't have a REG_EQUAL note. */
1002 m->move_insn_first = m->move_insn;
1003
b4ad7b23 1004 /* Skip this insn, not checking REG_LIBCALL notes. */
202a34fd 1005 p = next_nonnote_insn (p);
b4ad7b23
RS
1006 /* Skip the consecutive insns, if there are any. */
1007 p = skip_consec_insns (p, m->consec);
1008 /* Back up to the last insn of the consecutive group. */
1009 p = prev_nonnote_insn (p);
1010
1011 /* We must now reset m->move_insn, m->is_equiv, and possibly
1012 m->set_src to correspond to the effects of all the
1013 insns. */
5fd8383e 1014 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
b4ad7b23
RS
1015 if (temp)
1016 m->set_src = XEXP (temp, 0), m->move_insn = 1;
1017 else
1018 {
5fd8383e 1019 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
b4ad7b23
RS
1020 if (temp && CONSTANT_P (XEXP (temp, 0)))
1021 m->set_src = XEXP (temp, 0), m->move_insn = 1;
1022 else
1023 m->move_insn = 0;
1024
1025 }
5fd8383e 1026 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
b4ad7b23
RS
1027 }
1028 }
1029 /* If this register is always set within a STRICT_LOW_PART
1030 or set to zero, then its high bytes are constant.
1031 So clear them outside the loop and within the loop
1032 just load the low bytes.
1033 We must check that the machine has an instruction to do so.
1034 Also, if the value loaded into the register
1035 depends on the same register, this cannot be done. */
1036 else if (SET_SRC (set) == const0_rtx
1037 && GET_CODE (NEXT_INSN (p)) == INSN
1038 && (set1 = single_set (NEXT_INSN (p)))
1039 && GET_CODE (set1) == SET
1040 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
1041 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
1042 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
1043 == SET_DEST (set))
1044 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
1045 {
1046 register int regno = REGNO (SET_DEST (set));
8deb8e2c 1047 if (VARRAY_INT (n_times_set, regno) == 2)
b4ad7b23
RS
1048 {
1049 register struct movable *m;
1050 m = (struct movable *) alloca (sizeof (struct movable));
1051 m->next = 0;
1052 m->insn = p;
1053 m->set_dest = SET_DEST (set);
1054 m->dependencies = 0;
1055 m->force = 0;
1056 m->consec = 0;
1057 m->done = 0;
1058 m->forces = 0;
1059 m->move_insn = 0;
8cf619da 1060 m->move_insn_first = 0;
b4ad7b23
RS
1061 m->partial = 1;
1062 /* If the insn may not be executed on some cycles,
1063 we can't clear the whole reg; clear just high part.
1064 Not even if the reg is used only within this loop.
1065 Consider this:
1066 while (1)
1067 while (s != t) {
1068 if (foo ()) x = *s;
1069 use (x);
1070 }
1071 Clearing x before the inner loop could clobber a value
1072 being saved from the last time around the outer loop.
1073 However, if the reg is not used outside this loop
1074 and all uses of the register are in the same
1075 basic block as the store, there is no problem.
1076
1077 If this insn was made by loop, we don't know its
1078 INSN_LUID and hence must make a conservative
0f41302f 1079 assumption. */
b4ad7b23 1080 m->global = (INSN_UID (p) >= max_uid_for_loop
b1f21e0a 1081 || (uid_luid[REGNO_LAST_UID (regno)]
b4ad7b23 1082 > INSN_LUID (end))
b1f21e0a 1083 || (uid_luid[REGNO_FIRST_UID (regno)]
b4ad7b23
RS
1084 < INSN_LUID (p))
1085 || (labels_in_range_p
b1f21e0a 1086 (p, uid_luid[REGNO_FIRST_UID (regno)])));
b4ad7b23
RS
1087 if (maybe_never && m->global)
1088 m->savemode = GET_MODE (SET_SRC (set1));
1089 else
1090 m->savemode = VOIDmode;
1091 m->regno = regno;
1092 m->cond = 0;
1093 m->match = 0;
b1f21e0a
MM
1094 m->lifetime = (uid_luid[REGNO_LAST_UID (regno)]
1095 - uid_luid[REGNO_FIRST_UID (regno)]);
b4ad7b23 1096 m->savings = 1;
8deb8e2c 1097 VARRAY_INT (n_times_set, regno) = -1;
b4ad7b23
RS
1098 /* Add M to the end of the chain MOVABLES. */
1099 if (movables == 0)
1100 movables = m;
1101 else
1102 last_movable->next = m;
1103 last_movable = m;
1104 }
1105 }
1106 }
1107 /* Past a call insn, we get to insns which might not be executed
1108 because the call might exit. This matters for insns that trap.
1109 Call insns inside a REG_LIBCALL/REG_RETVAL block always return,
1110 so they don't count. */
1111 else if (GET_CODE (p) == CALL_INSN && ! in_libcall)
1112 call_passed = 1;
1113 /* Past a label or a jump, we get to insns for which we
1114 can't count on whether or how many times they will be
1115 executed during each iteration. Therefore, we can
1116 only move out sets of trivial variables
1117 (those not used after the loop). */
8516af93 1118 /* Similar code appears twice in strength_reduce. */
b4ad7b23
RS
1119 else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
1120 /* If we enter the loop in the middle, and scan around to the
1121 beginning, don't set maybe_never for that. This must be an
1122 unconditional jump, otherwise the code at the top of the
1123 loop might never be executed. Unconditional jumps are
1124 followed a by barrier then loop end. */
1125 && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop_top
1126 && NEXT_INSN (NEXT_INSN (p)) == end
1127 && simplejump_p (p)))
1128 maybe_never = 1;
5ea7a4ae
JW
1129 else if (GET_CODE (p) == NOTE)
1130 {
1131 /* At the virtual top of a converted loop, insns are again known to
1132 be executed: logically, the loop begins here even though the exit
1133 code has been duplicated. */
1134 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
1135 maybe_never = call_passed = 0;
1136 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
1137 loop_depth++;
1138 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
1139 loop_depth--;
1140 }
b4ad7b23
RS
1141 }
1142
1143 /* If one movable subsumes another, ignore that other. */
1144
1145 ignore_some_movables (movables);
1146
1147 /* For each movable insn, see if the reg that it loads
1148 leads when it dies right into another conditionally movable insn.
1149 If so, record that the second insn "forces" the first one,
1150 since the second can be moved only if the first is. */
1151
1152 force_movables (movables);
1153
1154 /* See if there are multiple movable insns that load the same value.
1155 If there are, make all but the first point at the first one
1156 through the `match' field, and add the priorities of them
1157 all together as the priority of the first. */
1158
1159 combine_movables (movables, nregs);
1160
1161 /* Now consider each movable insn to decide whether it is worth moving.
9dd07f87 1162 Store 0 in n_times_set for each reg that is moved.
b4ad7b23 1163
9dd07f87
R
1164 Generally this increases code size, so do not move moveables when
1165 optimizing for code size. */
1166
1167 if (! optimize_size)
1168 move_movables (movables, threshold,
1169 insn_count, loop_start, end, nregs);
b4ad7b23
RS
1170
1171 /* Now candidates that still are negative are those not moved.
1172 Change n_times_set to indicate that those are not actually invariant. */
1173 for (i = 0; i < nregs; i++)
8deb8e2c
MM
1174 if (VARRAY_INT (n_times_set, i) < 0)
1175 VARRAY_INT (n_times_set, i) = VARRAY_INT (n_times_used, i);
b4ad7b23 1176
41a972a9
MM
1177 /* Now that we've moved some things out of the loop, we able to
1178 hoist even more memory references. There's no need to pass
1179 reg_single_usage this time, since we're done with it. */
1180 load_mems_and_recount_loop_regs_set (scan_start, end, loop_top,
1181 loop_start, 0,
1182 &insn_count);
1183
b4ad7b23 1184 if (flag_strength_reduce)
45f97e2e
RH
1185 {
1186 the_movables = movables;
1187 strength_reduce (scan_start, end, loop_top,
5accd822 1188 insn_count, loop_start, end, unroll_p, bct_p);
45f97e2e 1189 }
8deb8e2c
MM
1190
1191 VARRAY_FREE (n_times_set);
1192 VARRAY_FREE (n_times_used);
1193 VARRAY_FREE (may_not_optimize);
1194 VARRAY_FREE (reg_single_usage);
b4ad7b23
RS
1195}
1196\f
1197/* Add elements to *OUTPUT to record all the pseudo-regs
1198 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1199
1200void
1201record_excess_regs (in_this, not_in_this, output)
1202 rtx in_this, not_in_this;
1203 rtx *output;
1204{
1205 enum rtx_code code;
1206 char *fmt;
1207 int i;
1208
1209 code = GET_CODE (in_this);
1210
1211 switch (code)
1212 {
1213 case PC:
1214 case CC0:
1215 case CONST_INT:
1216 case CONST_DOUBLE:
1217 case CONST:
1218 case SYMBOL_REF:
1219 case LABEL_REF:
1220 return;
1221
1222 case REG:
1223 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
1224 && ! reg_mentioned_p (in_this, not_in_this))
38a448ca 1225 *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output);
b4ad7b23 1226 return;
e9a25f70
JL
1227
1228 default:
1229 break;
b4ad7b23
RS
1230 }
1231
1232 fmt = GET_RTX_FORMAT (code);
1233 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1234 {
1235 int j;
1236
1237 switch (fmt[i])
1238 {
1239 case 'E':
1240 for (j = 0; j < XVECLEN (in_this, i); j++)
1241 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1242 break;
1243
1244 case 'e':
1245 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1246 break;
1247 }
1248 }
1249}
1250\f
1251/* Check what regs are referred to in the libcall block ending with INSN,
1252 aside from those mentioned in the equivalent value.
1253 If there are none, return 0.
1254 If there are one or more, return an EXPR_LIST containing all of them. */
1255
1256static rtx
1257libcall_other_reg (insn, equiv)
1258 rtx insn, equiv;
1259{
5fd8383e 1260 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
b4ad7b23
RS
1261 rtx p = XEXP (note, 0);
1262 rtx output = 0;
1263
1264 /* First, find all the regs used in the libcall block
1265 that are not mentioned as inputs to the result. */
1266
1267 while (p != insn)
1268 {
1269 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
1270 || GET_CODE (p) == CALL_INSN)
1271 record_excess_regs (PATTERN (p), equiv, &output);
1272 p = NEXT_INSN (p);
1273 }
1274
1275 return output;
1276}
1277\f
1278/* Return 1 if all uses of REG
1279 are between INSN and the end of the basic block. */
1280
1281static int
1282reg_in_basic_block_p (insn, reg)
1283 rtx insn, reg;
1284{
1285 int regno = REGNO (reg);
1286 rtx p;
1287
b1f21e0a 1288 if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
b4ad7b23
RS
1289 return 0;
1290
1291 /* Search this basic block for the already recorded last use of the reg. */
1292 for (p = insn; p; p = NEXT_INSN (p))
1293 {
1294 switch (GET_CODE (p))
1295 {
1296 case NOTE:
1297 break;
1298
1299 case INSN:
1300 case CALL_INSN:
1301 /* Ordinary insn: if this is the last use, we win. */
b1f21e0a 1302 if (REGNO_LAST_UID (regno) == INSN_UID (p))
b4ad7b23
RS
1303 return 1;
1304 break;
1305
1306 case JUMP_INSN:
1307 /* Jump insn: if this is the last use, we win. */
b1f21e0a 1308 if (REGNO_LAST_UID (regno) == INSN_UID (p))
b4ad7b23
RS
1309 return 1;
1310 /* Otherwise, it's the end of the basic block, so we lose. */
1311 return 0;
1312
1313 case CODE_LABEL:
1314 case BARRIER:
1315 /* It's the end of the basic block, so we lose. */
1316 return 0;
e9a25f70
JL
1317
1318 default:
1319 break;
b4ad7b23
RS
1320 }
1321 }
1322
1323 /* The "last use" doesn't follow the "first use"?? */
1324 abort ();
1325}
1326\f
1327/* Compute the benefit of eliminating the insns in the block whose
1328 last insn is LAST. This may be a group of insns used to compute a
1329 value directly or can contain a library call. */
1330
1331static int
1332libcall_benefit (last)
1333 rtx last;
1334{
1335 rtx insn;
1336 int benefit = 0;
1337
5fd8383e 1338 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
b4ad7b23
RS
1339 insn != last; insn = NEXT_INSN (insn))
1340 {
1341 if (GET_CODE (insn) == CALL_INSN)
1342 benefit += 10; /* Assume at least this many insns in a library
0f41302f 1343 routine. */
b4ad7b23
RS
1344 else if (GET_CODE (insn) == INSN
1345 && GET_CODE (PATTERN (insn)) != USE
1346 && GET_CODE (PATTERN (insn)) != CLOBBER)
1347 benefit++;
1348 }
1349
1350 return benefit;
1351}
1352\f
1353/* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1354
1355static rtx
1356skip_consec_insns (insn, count)
1357 rtx insn;
1358 int count;
1359{
1360 for (; count > 0; count--)
1361 {
1362 rtx temp;
1363
1364 /* If first insn of libcall sequence, skip to end. */
1365 /* Do this at start of loop, since INSN is guaranteed to
1366 be an insn here. */
1367 if (GET_CODE (insn) != NOTE
5fd8383e 1368 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
b4ad7b23
RS
1369 insn = XEXP (temp, 0);
1370
1371 do insn = NEXT_INSN (insn);
1372 while (GET_CODE (insn) == NOTE);
1373 }
1374
1375 return insn;
1376}
1377
1378/* Ignore any movable whose insn falls within a libcall
1379 which is part of another movable.
1380 We make use of the fact that the movable for the libcall value
1381 was made later and so appears later on the chain. */
1382
1383static void
1384ignore_some_movables (movables)
1385 struct movable *movables;
1386{
1387 register struct movable *m, *m1;
1388
1389 for (m = movables; m; m = m->next)
1390 {
1391 /* Is this a movable for the value of a libcall? */
5fd8383e 1392 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
b4ad7b23
RS
1393 if (note)
1394 {
1395 rtx insn;
1396 /* Check for earlier movables inside that range,
1397 and mark them invalid. We cannot use LUIDs here because
1398 insns created by loop.c for prior loops don't have LUIDs.
1399 Rather than reject all such insns from movables, we just
1400 explicitly check each insn in the libcall (since invariant
1401 libcalls aren't that common). */
1402 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
1403 for (m1 = movables; m1 != m; m1 = m1->next)
1404 if (m1->insn == insn)
1405 m1->done = 1;
1406 }
1407 }
1408}
1409
1410/* For each movable insn, see if the reg that it loads
1411 leads when it dies right into another conditionally movable insn.
1412 If so, record that the second insn "forces" the first one,
1413 since the second can be moved only if the first is. */
1414
1415static void
1416force_movables (movables)
1417 struct movable *movables;
1418{
1419 register struct movable *m, *m1;
1420 for (m1 = movables; m1; m1 = m1->next)
1421 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1422 if (!m1->partial && !m1->done)
1423 {
1424 int regno = m1->regno;
1425 for (m = m1->next; m; m = m->next)
1426 /* ??? Could this be a bug? What if CSE caused the
1427 register of M1 to be used after this insn?
1428 Since CSE does not update regno_last_uid,
1429 this insn M->insn might not be where it dies.
1430 But very likely this doesn't matter; what matters is
1431 that M's reg is computed from M1's reg. */
b1f21e0a 1432 if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
b4ad7b23
RS
1433 && !m->done)
1434 break;
1435 if (m != 0 && m->set_src == m1->set_dest
1436 /* If m->consec, m->set_src isn't valid. */
1437 && m->consec == 0)
1438 m = 0;
1439
1440 /* Increase the priority of the moving the first insn
1441 since it permits the second to be moved as well. */
1442 if (m != 0)
1443 {
1444 m->forces = m1;
1445 m1->lifetime += m->lifetime;
3875b31d 1446 m1->savings += m->savings;
b4ad7b23
RS
1447 }
1448 }
1449}
1450\f
1451/* Find invariant expressions that are equal and can be combined into
1452 one register. */
1453
1454static void
1455combine_movables (movables, nregs)
1456 struct movable *movables;
1457 int nregs;
1458{
1459 register struct movable *m;
1460 char *matched_regs = (char *) alloca (nregs);
1461 enum machine_mode mode;
1462
1463 /* Regs that are set more than once are not allowed to match
1464 or be matched. I'm no longer sure why not. */
1465 /* Perhaps testing m->consec_sets would be more appropriate here? */
1466
1467 for (m = movables; m; m = m->next)
8deb8e2c 1468 if (m->match == 0 && VARRAY_INT (n_times_used, m->regno) == 1 && !m->partial)
b4ad7b23
RS
1469 {
1470 register struct movable *m1;
1471 int regno = m->regno;
b4ad7b23
RS
1472
1473 bzero (matched_regs, nregs);
1474 matched_regs[regno] = 1;
1475
88016fb7
DE
1476 /* We want later insns to match the first one. Don't make the first
1477 one match any later ones. So start this loop at m->next. */
1478 for (m1 = m->next; m1; m1 = m1->next)
8deb8e2c 1479 if (m != m1 && m1->match == 0 && VARRAY_INT (n_times_used, m1->regno) == 1
b4ad7b23
RS
1480 /* A reg used outside the loop mustn't be eliminated. */
1481 && !m1->global
1482 /* A reg used for zero-extending mustn't be eliminated. */
1483 && !m1->partial
1484 && (matched_regs[m1->regno]
1485 ||
1486 (
1487 /* Can combine regs with different modes loaded from the
1488 same constant only if the modes are the same or
1489 if both are integer modes with M wider or the same
1490 width as M1. The check for integer is redundant, but
1491 safe, since the only case of differing destination
1492 modes with equal sources is when both sources are
1493 VOIDmode, i.e., CONST_INT. */
1494 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1495 || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
1496 && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
1497 && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
1498 >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
1499 /* See if the source of M1 says it matches M. */
1500 && ((GET_CODE (m1->set_src) == REG
1501 && matched_regs[REGNO (m1->set_src)])
1502 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
1503 movables))))
1504 && ((m->dependencies == m1->dependencies)
1505 || rtx_equal_p (m->dependencies, m1->dependencies)))
1506 {
1507 m->lifetime += m1->lifetime;
1508 m->savings += m1->savings;
1509 m1->done = 1;
1510 m1->match = m;
1511 matched_regs[m1->regno] = 1;
1512 }
1513 }
1514
1515 /* Now combine the regs used for zero-extension.
1516 This can be done for those not marked `global'
1517 provided their lives don't overlap. */
1518
1519 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1520 mode = GET_MODE_WIDER_MODE (mode))
1521 {
1522 register struct movable *m0 = 0;
1523
1524 /* Combine all the registers for extension from mode MODE.
1525 Don't combine any that are used outside this loop. */
1526 for (m = movables; m; m = m->next)
1527 if (m->partial && ! m->global
1528 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1529 {
1530 register struct movable *m1;
b1f21e0a
MM
1531 int first = uid_luid[REGNO_FIRST_UID (m->regno)];
1532 int last = uid_luid[REGNO_LAST_UID (m->regno)];
b4ad7b23
RS
1533
1534 if (m0 == 0)
1535 {
1536 /* First one: don't check for overlap, just record it. */
1537 m0 = m;
1538 continue;
1539 }
1540
1541 /* Make sure they extend to the same mode.
1542 (Almost always true.) */
1543 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
1544 continue;
1545
1546 /* We already have one: check for overlap with those
1547 already combined together. */
1548 for (m1 = movables; m1 != m; m1 = m1->next)
1549 if (m1 == m0 || (m1->partial && m1->match == m0))
b1f21e0a
MM
1550 if (! (uid_luid[REGNO_FIRST_UID (m1->regno)] > last
1551 || uid_luid[REGNO_LAST_UID (m1->regno)] < first))
b4ad7b23
RS
1552 goto overlap;
1553
1554 /* No overlap: we can combine this with the others. */
1555 m0->lifetime += m->lifetime;
1556 m0->savings += m->savings;
1557 m->done = 1;
1558 m->match = m0;
1559
1560 overlap: ;
1561 }
1562 }
1563}
1564\f
1565/* Return 1 if regs X and Y will become the same if moved. */
1566
1567static int
1568regs_match_p (x, y, movables)
1569 rtx x, y;
1570 struct movable *movables;
1571{
1572 int xn = REGNO (x);
1573 int yn = REGNO (y);
1574 struct movable *mx, *my;
1575
1576 for (mx = movables; mx; mx = mx->next)
1577 if (mx->regno == xn)
1578 break;
1579
1580 for (my = movables; my; my = my->next)
1581 if (my->regno == yn)
1582 break;
1583
1584 return (mx && my
1585 && ((mx->match == my->match && mx->match != 0)
1586 || mx->match == my
1587 || mx == my->match));
1588}
1589
1590/* Return 1 if X and Y are identical-looking rtx's.
1591 This is the Lisp function EQUAL for rtx arguments.
1592
1593 If two registers are matching movables or a movable register and an
1594 equivalent constant, consider them equal. */
1595
1596static int
1597rtx_equal_for_loop_p (x, y, movables)
1598 rtx x, y;
1599 struct movable *movables;
1600{
1601 register int i;
1602 register int j;
1603 register struct movable *m;
1604 register enum rtx_code code;
1605 register char *fmt;
1606
1607 if (x == y)
1608 return 1;
1609 if (x == 0 || y == 0)
1610 return 0;
1611
1612 code = GET_CODE (x);
1613
1614 /* If we have a register and a constant, they may sometimes be
1615 equal. */
8deb8e2c 1616 if (GET_CODE (x) == REG && VARRAY_INT (n_times_set, REGNO (x)) == -2
b4ad7b23 1617 && CONSTANT_P (y))
b1a0c816
JL
1618 {
1619 for (m = movables; m; m = m->next)
1620 if (m->move_insn && m->regno == REGNO (x)
1621 && rtx_equal_p (m->set_src, y))
1622 return 1;
1623 }
8deb8e2c 1624 else if (GET_CODE (y) == REG && VARRAY_INT (n_times_set, REGNO (y)) == -2
b4ad7b23 1625 && CONSTANT_P (x))
b1a0c816
JL
1626 {
1627 for (m = movables; m; m = m->next)
1628 if (m->move_insn && m->regno == REGNO (y)
1629 && rtx_equal_p (m->set_src, x))
1630 return 1;
1631 }
b4ad7b23
RS
1632
1633 /* Otherwise, rtx's of different codes cannot be equal. */
1634 if (code != GET_CODE (y))
1635 return 0;
1636
1637 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1638 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1639
1640 if (GET_MODE (x) != GET_MODE (y))
1641 return 0;
1642
1643 /* These three types of rtx's can be compared nonrecursively. */
1644 if (code == REG)
1645 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
1646
1647 if (code == LABEL_REF)
1648 return XEXP (x, 0) == XEXP (y, 0);
1649 if (code == SYMBOL_REF)
1650 return XSTR (x, 0) == XSTR (y, 0);
1651
1652 /* Compare the elements. If any pair of corresponding elements
1653 fail to match, return 0 for the whole things. */
1654
1655 fmt = GET_RTX_FORMAT (code);
1656 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1657 {
1658 switch (fmt[i])
1659 {
5fd8383e
RK
1660 case 'w':
1661 if (XWINT (x, i) != XWINT (y, i))
1662 return 0;
1663 break;
1664
b4ad7b23
RS
1665 case 'i':
1666 if (XINT (x, i) != XINT (y, i))
1667 return 0;
1668 break;
1669
1670 case 'E':
1671 /* Two vectors must have the same length. */
1672 if (XVECLEN (x, i) != XVECLEN (y, i))
1673 return 0;
1674
1675 /* And the corresponding elements must match. */
1676 for (j = 0; j < XVECLEN (x, i); j++)
1677 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j), movables) == 0)
1678 return 0;
1679 break;
1680
1681 case 'e':
1682 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables) == 0)
1683 return 0;
1684 break;
1685
1686 case 's':
1687 if (strcmp (XSTR (x, i), XSTR (y, i)))
1688 return 0;
1689 break;
1690
1691 case 'u':
1692 /* These are just backpointers, so they don't matter. */
1693 break;
1694
1695 case '0':
1696 break;
1697
1698 /* It is believed that rtx's at this level will never
1699 contain anything but integers and other rtx's,
1700 except for within LABEL_REFs and SYMBOL_REFs. */
1701 default:
1702 abort ();
1703 }
1704 }
1705 return 1;
1706}
1707\f
c160c628
RK
1708/* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
1709 insns in INSNS which use thet reference. */
1710
1711static void
1712add_label_notes (x, insns)
1713 rtx x;
1714 rtx insns;
1715{
1716 enum rtx_code code = GET_CODE (x);
7dcd3836 1717 int i, j;
c160c628
RK
1718 char *fmt;
1719 rtx insn;
1720
82d00367 1721 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
c160c628 1722 {
6b3603c2
JL
1723 /* This code used to ignore labels that referred to dispatch tables to
1724 avoid flow generating (slighly) worse code.
1725
1726 We no longer ignore such label references (see LABEL_REF handling in
1727 mark_jump_label for additional information). */
1728 for (insn = insns; insn; insn = NEXT_INSN (insn))
1729 if (reg_mentioned_p (XEXP (x, 0), insn))
1730 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_LABEL, XEXP (x, 0),
1731 REG_NOTES (insn));
c160c628
RK
1732 }
1733
1734 fmt = GET_RTX_FORMAT (code);
1735 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
7dcd3836
RK
1736 {
1737 if (fmt[i] == 'e')
1738 add_label_notes (XEXP (x, i), insns);
1739 else if (fmt[i] == 'E')
1740 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1741 add_label_notes (XVECEXP (x, i, j), insns);
1742 }
c160c628
RK
1743}
1744\f
b4ad7b23
RS
1745/* Scan MOVABLES, and move the insns that deserve to be moved.
1746 If two matching movables are combined, replace one reg with the
1747 other throughout. */
1748
1749static void
1750move_movables (movables, threshold, insn_count, loop_start, end, nregs)
1751 struct movable *movables;
1752 int threshold;
1753 int insn_count;
1754 rtx loop_start;
1755 rtx end;
1756 int nregs;
1757{
1758 rtx new_start = 0;
1759 register struct movable *m;
1760 register rtx p;
1761 /* Map of pseudo-register replacements to handle combining
1762 when we move several insns that load the same value
1763 into different pseudo-registers. */
1764 rtx *reg_map = (rtx *) alloca (nregs * sizeof (rtx));
1765 char *already_moved = (char *) alloca (nregs);
1766
1767 bzero (already_moved, nregs);
4c9a05bc 1768 bzero ((char *) reg_map, nregs * sizeof (rtx));
b4ad7b23
RS
1769
1770 num_movables = 0;
1771
1772 for (m = movables; m; m = m->next)
1773 {
1774 /* Describe this movable insn. */
1775
1776 if (loop_dump_stream)
1777 {
1778 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
1779 INSN_UID (m->insn), m->regno, m->lifetime);
1780 if (m->consec > 0)
1781 fprintf (loop_dump_stream, "consec %d, ", m->consec);
1782 if (m->cond)
1783 fprintf (loop_dump_stream, "cond ");
1784 if (m->force)
1785 fprintf (loop_dump_stream, "force ");
1786 if (m->global)
1787 fprintf (loop_dump_stream, "global ");
1788 if (m->done)
1789 fprintf (loop_dump_stream, "done ");
1790 if (m->move_insn)
1791 fprintf (loop_dump_stream, "move-insn ");
1792 if (m->match)
1793 fprintf (loop_dump_stream, "matches %d ",
1794 INSN_UID (m->match->insn));
1795 if (m->forces)
1796 fprintf (loop_dump_stream, "forces %d ",
1797 INSN_UID (m->forces->insn));
1798 }
1799
1800 /* Count movables. Value used in heuristics in strength_reduce. */
1801 num_movables++;
1802
1803 /* Ignore the insn if it's already done (it matched something else).
1804 Otherwise, see if it is now safe to move. */
1805
1806 if (!m->done
1807 && (! m->cond
1808 || (1 == invariant_p (m->set_src)
1809 && (m->dependencies == 0
1810 || 1 == invariant_p (m->dependencies))
1811 && (m->consec == 0
1812 || 1 == consec_sets_invariant_p (m->set_dest,
1813 m->consec + 1,
1814 m->insn))))
1815 && (! m->forces || m->forces->done))
1816 {
1817 register int regno;
1818 register rtx p;
1819 int savings = m->savings;
1820
1821 /* We have an insn that is safe to move.
1822 Compute its desirability. */
1823
1824 p = m->insn;
1825 regno = m->regno;
1826
1827 if (loop_dump_stream)
1828 fprintf (loop_dump_stream, "savings %d ", savings);
1829
1830 if (moved_once[regno])
1831 {
1832 insn_count *= 2;
1833
1834 if (loop_dump_stream)
1835 fprintf (loop_dump_stream, "halved since already moved ");
1836 }
1837
1838 /* An insn MUST be moved if we already moved something else
1839 which is safe only if this one is moved too: that is,
1840 if already_moved[REGNO] is nonzero. */
1841
1842 /* An insn is desirable to move if the new lifetime of the
1843 register is no more than THRESHOLD times the old lifetime.
1844 If it's not desirable, it means the loop is so big
1845 that moving won't speed things up much,
1846 and it is liable to make register usage worse. */
1847
1848 /* It is also desirable to move if it can be moved at no
1849 extra cost because something else was already moved. */
1850
1851 if (already_moved[regno]
e5eb27e5 1852 || flag_move_all_movables
b4ad7b23
RS
1853 || (threshold * savings * m->lifetime) >= insn_count
1854 || (m->forces && m->forces->done
8deb8e2c 1855 && VARRAY_INT (n_times_used, m->forces->regno) == 1))
b4ad7b23
RS
1856 {
1857 int count;
1858 register struct movable *m1;
1859 rtx first;
1860
1861 /* Now move the insns that set the reg. */
1862
1863 if (m->partial && m->match)
1864 {
1865 rtx newpat, i1;
1866 rtx r1, r2;
1867 /* Find the end of this chain of matching regs.
1868 Thus, we load each reg in the chain from that one reg.
1869 And that reg is loaded with 0 directly,
1870 since it has ->match == 0. */
1871 for (m1 = m; m1->match; m1 = m1->match);
1872 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
1873 SET_DEST (PATTERN (m1->insn)));
1874 i1 = emit_insn_before (newpat, loop_start);
1875
1876 /* Mark the moved, invariant reg as being allowed to
1877 share a hard reg with the other matching invariant. */
1878 REG_NOTES (i1) = REG_NOTES (m->insn);
1879 r1 = SET_DEST (PATTERN (m->insn));
1880 r2 = SET_DEST (PATTERN (m1->insn));
38a448ca
RH
1881 regs_may_share
1882 = gen_rtx_EXPR_LIST (VOIDmode, r1,
1883 gen_rtx_EXPR_LIST (VOIDmode, r2,
1884 regs_may_share));
b4ad7b23
RS
1885 delete_insn (m->insn);
1886
1887 if (new_start == 0)
1888 new_start = i1;
1889
1890 if (loop_dump_stream)
1891 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1892 }
1893 /* If we are to re-generate the item being moved with a
1894 new move insn, first delete what we have and then emit
1895 the move insn before the loop. */
1896 else if (m->move_insn)
1897 {
1898 rtx i1, temp;
1899
1900 for (count = m->consec; count >= 0; count--)
1901 {
1902 /* If this is the first insn of a library call sequence,
1903 skip to the end. */
1904 if (GET_CODE (p) != NOTE
5fd8383e 1905 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
b4ad7b23
RS
1906 p = XEXP (temp, 0);
1907
1908 /* If this is the last insn of a libcall sequence, then
1909 delete every insn in the sequence except the last.
1910 The last insn is handled in the normal manner. */
1911 if (GET_CODE (p) != NOTE
5fd8383e 1912 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
b4ad7b23
RS
1913 {
1914 temp = XEXP (temp, 0);
1915 while (temp != p)
1916 temp = delete_insn (temp);
1917 }
1918
9655bf95 1919 temp = p;
b4ad7b23 1920 p = delete_insn (p);
9655bf95
DM
1921
1922 /* simplify_giv_expr expects that it can walk the insns
1923 at m->insn forwards and see this old sequence we are
1924 tossing here. delete_insn does preserve the next
1925 pointers, but when we skip over a NOTE we must fix
1926 it up. Otherwise that code walks into the non-deleted
1927 insn stream. */
dd202606 1928 while (p && GET_CODE (p) == NOTE)
9655bf95 1929 p = NEXT_INSN (temp) = NEXT_INSN (p);
b4ad7b23
RS
1930 }
1931
1932 start_sequence ();
1933 emit_move_insn (m->set_dest, m->set_src);
c160c628 1934 temp = get_insns ();
b4ad7b23
RS
1935 end_sequence ();
1936
c160c628
RK
1937 add_label_notes (m->set_src, temp);
1938
1939 i1 = emit_insns_before (temp, loop_start);
5fd8383e 1940 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
b4ad7b23 1941 REG_NOTES (i1)
38a448ca
RH
1942 = gen_rtx_EXPR_LIST (m->is_equiv ? REG_EQUIV : REG_EQUAL,
1943 m->set_src, REG_NOTES (i1));
b4ad7b23
RS
1944
1945 if (loop_dump_stream)
1946 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1947
1948 /* The more regs we move, the less we like moving them. */
1949 threshold -= 3;
1950 }
1951 else
1952 {
1953 for (count = m->consec; count >= 0; count--)
1954 {
1955 rtx i1, temp;
1956
0f41302f 1957 /* If first insn of libcall sequence, skip to end. */
b4ad7b23
RS
1958 /* Do this at start of loop, since p is guaranteed to
1959 be an insn here. */
1960 if (GET_CODE (p) != NOTE
5fd8383e 1961 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
b4ad7b23
RS
1962 p = XEXP (temp, 0);
1963
1964 /* If last insn of libcall sequence, move all
1965 insns except the last before the loop. The last
1966 insn is handled in the normal manner. */
1967 if (GET_CODE (p) != NOTE
5fd8383e 1968 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
b4ad7b23
RS
1969 {
1970 rtx fn_address = 0;
1971 rtx fn_reg = 0;
1972 rtx fn_address_insn = 0;
1973
1974 first = 0;
1975 for (temp = XEXP (temp, 0); temp != p;
1976 temp = NEXT_INSN (temp))
1977 {
1978 rtx body;
1979 rtx n;
1980 rtx next;
1981
1982 if (GET_CODE (temp) == NOTE)
1983 continue;
1984
1985 body = PATTERN (temp);
1986
1987 /* Find the next insn after TEMP,
1988 not counting USE or NOTE insns. */
1989 for (next = NEXT_INSN (temp); next != p;
1990 next = NEXT_INSN (next))
1991 if (! (GET_CODE (next) == INSN
1992 && GET_CODE (PATTERN (next)) == USE)
1993 && GET_CODE (next) != NOTE)
1994 break;
1995
1996 /* If that is the call, this may be the insn
1997 that loads the function address.
1998
1999 Extract the function address from the insn
2000 that loads it into a register.
2001 If this insn was cse'd, we get incorrect code.
2002
2003 So emit a new move insn that copies the
2004 function address into the register that the
2005 call insn will use. flow.c will delete any
2006 redundant stores that we have created. */
2007 if (GET_CODE (next) == CALL_INSN
2008 && GET_CODE (body) == SET
2009 && GET_CODE (SET_DEST (body)) == REG
5fd8383e
RK
2010 && (n = find_reg_note (temp, REG_EQUAL,
2011 NULL_RTX)))
b4ad7b23
RS
2012 {
2013 fn_reg = SET_SRC (body);
2014 if (GET_CODE (fn_reg) != REG)
2015 fn_reg = SET_DEST (body);
2016 fn_address = XEXP (n, 0);
2017 fn_address_insn = temp;
2018 }
2019 /* We have the call insn.
2020 If it uses the register we suspect it might,
2021 load it with the correct address directly. */
2022 if (GET_CODE (temp) == CALL_INSN
2023 && fn_address != 0
d9f8a199 2024 && reg_referenced_p (fn_reg, body))
b4ad7b23
RS
2025 emit_insn_after (gen_move_insn (fn_reg,
2026 fn_address),
2027 fn_address_insn);
2028
2029 if (GET_CODE (temp) == CALL_INSN)
f97d29ce
JW
2030 {
2031 i1 = emit_call_insn_before (body, loop_start);
2032 /* Because the USAGE information potentially
2033 contains objects other than hard registers
2034 we need to copy it. */
8c4f5c09 2035 if (CALL_INSN_FUNCTION_USAGE (temp))
db3cf6fb
MS
2036 CALL_INSN_FUNCTION_USAGE (i1)
2037 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
f97d29ce 2038 }
b4ad7b23
RS
2039 else
2040 i1 = emit_insn_before (body, loop_start);
2041 if (first == 0)
2042 first = i1;
2043 if (temp == fn_address_insn)
2044 fn_address_insn = i1;
2045 REG_NOTES (i1) = REG_NOTES (temp);
2046 delete_insn (temp);
2047 }
2048 }
2049 if (m->savemode != VOIDmode)
2050 {
2051 /* P sets REG to zero; but we should clear only
2052 the bits that are not covered by the mode
2053 m->savemode. */
2054 rtx reg = m->set_dest;
2055 rtx sequence;
2056 rtx tem;
2057
2058 start_sequence ();
2059 tem = expand_binop
2060 (GET_MODE (reg), and_optab, reg,
5fd8383e
RK
2061 GEN_INT ((((HOST_WIDE_INT) 1
2062 << GET_MODE_BITSIZE (m->savemode)))
b4ad7b23
RS
2063 - 1),
2064 reg, 1, OPTAB_LIB_WIDEN);
2065 if (tem == 0)
2066 abort ();
2067 if (tem != reg)
2068 emit_move_insn (reg, tem);
2069 sequence = gen_sequence ();
2070 end_sequence ();
2071 i1 = emit_insn_before (sequence, loop_start);
2072 }
2073 else if (GET_CODE (p) == CALL_INSN)
f97d29ce
JW
2074 {
2075 i1 = emit_call_insn_before (PATTERN (p), loop_start);
2076 /* Because the USAGE information potentially
2077 contains objects other than hard registers
2078 we need to copy it. */
8c4f5c09 2079 if (CALL_INSN_FUNCTION_USAGE (p))
db3cf6fb
MS
2080 CALL_INSN_FUNCTION_USAGE (i1)
2081 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
f97d29ce 2082 }
1a61c29f
JW
2083 else if (count == m->consec && m->move_insn_first)
2084 {
2085 /* The SET_SRC might not be invariant, so we must
2086 use the REG_EQUAL note. */
2087 start_sequence ();
2088 emit_move_insn (m->set_dest, m->set_src);
2089 temp = get_insns ();
2090 end_sequence ();
2091
2092 add_label_notes (m->set_src, temp);
2093
2094 i1 = emit_insns_before (temp, loop_start);
2095 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
2096 REG_NOTES (i1)
2097 = gen_rtx_EXPR_LIST ((m->is_equiv ? REG_EQUIV
2098 : REG_EQUAL),
2099 m->set_src, REG_NOTES (i1));
2100 }
b4ad7b23
RS
2101 else
2102 i1 = emit_insn_before (PATTERN (p), loop_start);
2103
1a61c29f
JW
2104 if (REG_NOTES (i1) == 0)
2105 {
2106 REG_NOTES (i1) = REG_NOTES (p);
b4ad7b23 2107
1a61c29f
JW
2108 /* If there is a REG_EQUAL note present whose value
2109 is not loop invariant, then delete it, since it
2110 may cause problems with later optimization passes.
2111 It is possible for cse to create such notes
2112 like this as a result of record_jump_cond. */
e6726b1f 2113
1a61c29f
JW
2114 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
2115 && ! invariant_p (XEXP (temp, 0)))
2116 remove_note (i1, temp);
2117 }
e6726b1f 2118
b4ad7b23
RS
2119 if (new_start == 0)
2120 new_start = i1;
2121
2122 if (loop_dump_stream)
2123 fprintf (loop_dump_stream, " moved to %d",
2124 INSN_UID (i1));
2125
b4ad7b23
RS
2126 /* If library call, now fix the REG_NOTES that contain
2127 insn pointers, namely REG_LIBCALL on FIRST
2128 and REG_RETVAL on I1. */
51723711 2129 if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX)))
b4ad7b23
RS
2130 {
2131 XEXP (temp, 0) = first;
5fd8383e 2132 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
b4ad7b23
RS
2133 XEXP (temp, 0) = i1;
2134 }
2135
9655bf95 2136 temp = p;
b4ad7b23 2137 delete_insn (p);
9655bf95
DM
2138 p = NEXT_INSN (p);
2139
2140 /* simplify_giv_expr expects that it can walk the insns
2141 at m->insn forwards and see this old sequence we are
2142 tossing here. delete_insn does preserve the next
2143 pointers, but when we skip over a NOTE we must fix
2144 it up. Otherwise that code walks into the non-deleted
2145 insn stream. */
2146 while (p && GET_CODE (p) == NOTE)
2147 p = NEXT_INSN (temp) = NEXT_INSN (p);
b4ad7b23
RS
2148 }
2149
2150 /* The more regs we move, the less we like moving them. */
2151 threshold -= 3;
2152 }
2153
2154 /* Any other movable that loads the same register
2155 MUST be moved. */
2156 already_moved[regno] = 1;
2157
2158 /* This reg has been moved out of one loop. */
2159 moved_once[regno] = 1;
2160
2161 /* The reg set here is now invariant. */
2162 if (! m->partial)
8deb8e2c 2163 VARRAY_INT (n_times_set, regno) = 0;
b4ad7b23
RS
2164
2165 m->done = 1;
2166
2167 /* Change the length-of-life info for the register
2168 to say it lives at least the full length of this loop.
2169 This will help guide optimizations in outer loops. */
2170
b1f21e0a 2171 if (uid_luid[REGNO_FIRST_UID (regno)] > INSN_LUID (loop_start))
b4ad7b23
RS
2172 /* This is the old insn before all the moved insns.
2173 We can't use the moved insn because it is out of range
2174 in uid_luid. Only the old insns have luids. */
b1f21e0a
MM
2175 REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
2176 if (uid_luid[REGNO_LAST_UID (regno)] < INSN_LUID (end))
2177 REGNO_LAST_UID (regno) = INSN_UID (end);
b4ad7b23
RS
2178
2179 /* Combine with this moved insn any other matching movables. */
2180
2181 if (! m->partial)
2182 for (m1 = movables; m1; m1 = m1->next)
2183 if (m1->match == m)
2184 {
2185 rtx temp;
2186
2187 /* Schedule the reg loaded by M1
2188 for replacement so that shares the reg of M.
2189 If the modes differ (only possible in restricted
2190 circumstances, make a SUBREG. */
2191 if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
2192 reg_map[m1->regno] = m->set_dest;
2193 else
2194 reg_map[m1->regno]
2195 = gen_lowpart_common (GET_MODE (m1->set_dest),
2196 m->set_dest);
2197
2198 /* Get rid of the matching insn
2199 and prevent further processing of it. */
2200 m1->done = 1;
2201
2202 /* if library call, delete all insn except last, which
2203 is deleted below */
51723711
KG
2204 if ((temp = find_reg_note (m1->insn, REG_RETVAL,
2205 NULL_RTX)))
b4ad7b23
RS
2206 {
2207 for (temp = XEXP (temp, 0); temp != m1->insn;
2208 temp = NEXT_INSN (temp))
2209 delete_insn (temp);
2210 }
2211 delete_insn (m1->insn);
2212
2213 /* Any other movable that loads the same register
2214 MUST be moved. */
2215 already_moved[m1->regno] = 1;
2216
2217 /* The reg merged here is now invariant,
2218 if the reg it matches is invariant. */
2219 if (! m->partial)
8deb8e2c 2220 VARRAY_INT (n_times_set, m1->regno) = 0;
b4ad7b23
RS
2221 }
2222 }
2223 else if (loop_dump_stream)
2224 fprintf (loop_dump_stream, "not desirable");
2225 }
2226 else if (loop_dump_stream && !m->match)
2227 fprintf (loop_dump_stream, "not safe");
2228
2229 if (loop_dump_stream)
2230 fprintf (loop_dump_stream, "\n");
2231 }
2232
2233 if (new_start == 0)
2234 new_start = loop_start;
2235
2236 /* Go through all the instructions in the loop, making
2237 all the register substitutions scheduled in REG_MAP. */
2238 for (p = new_start; p != end; p = NEXT_INSN (p))
2239 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
2240 || GET_CODE (p) == CALL_INSN)
2241 {
2242 replace_regs (PATTERN (p), reg_map, nregs, 0);
2243 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
da0c128e 2244 INSN_CODE (p) = -1;
b4ad7b23
RS
2245 }
2246}
2247\f
2248#if 0
2249/* Scan X and replace the address of any MEM in it with ADDR.
2250 REG is the address that MEM should have before the replacement. */
2251
2252static void
2253replace_call_address (x, reg, addr)
2254 rtx x, reg, addr;
2255{
2256 register enum rtx_code code;
2257 register int i;
2258 register char *fmt;
2259
2260 if (x == 0)
2261 return;
2262 code = GET_CODE (x);
2263 switch (code)
2264 {
2265 case PC:
2266 case CC0:
2267 case CONST_INT:
2268 case CONST_DOUBLE:
2269 case CONST:
2270 case SYMBOL_REF:
2271 case LABEL_REF:
2272 case REG:
2273 return;
2274
2275 case SET:
2276 /* Short cut for very common case. */
2277 replace_call_address (XEXP (x, 1), reg, addr);
2278 return;
2279
2280 case CALL:
2281 /* Short cut for very common case. */
2282 replace_call_address (XEXP (x, 0), reg, addr);
2283 return;
2284
2285 case MEM:
2286 /* If this MEM uses a reg other than the one we expected,
2287 something is wrong. */
2288 if (XEXP (x, 0) != reg)
2289 abort ();
2290 XEXP (x, 0) = addr;
2291 return;
e9a25f70
JL
2292
2293 default:
2294 break;
b4ad7b23
RS
2295 }
2296
2297 fmt = GET_RTX_FORMAT (code);
2298 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2299 {
2300 if (fmt[i] == 'e')
2301 replace_call_address (XEXP (x, i), reg, addr);
2302 if (fmt[i] == 'E')
2303 {
2304 register int j;
2305 for (j = 0; j < XVECLEN (x, i); j++)
2306 replace_call_address (XVECEXP (x, i, j), reg, addr);
2307 }
2308 }
2309}
2310#endif
2311\f
2312/* Return the number of memory refs to addresses that vary
2313 in the rtx X. */
2314
2315static int
2316count_nonfixed_reads (x)
2317 rtx x;
2318{
2319 register enum rtx_code code;
2320 register int i;
2321 register char *fmt;
2322 int value;
2323
2324 if (x == 0)
2325 return 0;
2326
2327 code = GET_CODE (x);
2328 switch (code)
2329 {
2330 case PC:
2331 case CC0:
2332 case CONST_INT:
2333 case CONST_DOUBLE:
2334 case CONST:
2335 case SYMBOL_REF:
2336 case LABEL_REF:
2337 case REG:
2338 return 0;
2339
2340 case MEM:
2341 return ((invariant_p (XEXP (x, 0)) != 1)
2342 + count_nonfixed_reads (XEXP (x, 0)));
e9a25f70
JL
2343
2344 default:
2345 break;
b4ad7b23
RS
2346 }
2347
2348 value = 0;
2349 fmt = GET_RTX_FORMAT (code);
2350 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2351 {
2352 if (fmt[i] == 'e')
2353 value += count_nonfixed_reads (XEXP (x, i));
2354 if (fmt[i] == 'E')
2355 {
2356 register int j;
2357 for (j = 0; j < XVECLEN (x, i); j++)
2358 value += count_nonfixed_reads (XVECEXP (x, i, j));
2359 }
2360 }
2361 return value;
2362}
2363
2364\f
2365#if 0
2366/* P is an instruction that sets a register to the result of a ZERO_EXTEND.
2367 Replace it with an instruction to load just the low bytes
2368 if the machine supports such an instruction,
2369 and insert above LOOP_START an instruction to clear the register. */
2370
2371static void
2372constant_high_bytes (p, loop_start)
2373 rtx p, loop_start;
2374{
2375 register rtx new;
2376 register int insn_code_number;
2377
2378 /* Try to change (SET (REG ...) (ZERO_EXTEND (..:B ...)))
2379 to (SET (STRICT_LOW_PART (SUBREG:B (REG...))) ...). */
2380
38a448ca
RH
2381 new = gen_rtx_SET (VOIDmode,
2382 gen_rtx_STRICT_LOW_PART (VOIDmode,
2383 gen_rtx_SUBREG (GET_MODE (XEXP (SET_SRC (PATTERN (p)), 0)),
b4ad7b23
RS
2384 SET_DEST (PATTERN (p)),
2385 0)),
2386 XEXP (SET_SRC (PATTERN (p)), 0));
2387 insn_code_number = recog (new, p);
2388
2389 if (insn_code_number)
2390 {
2391 register int i;
2392
2393 /* Clear destination register before the loop. */
38a448ca
RH
2394 emit_insn_before (gen_rtx_SET (VOIDmode, SET_DEST (PATTERN (p)),
2395 const0_rtx),
b4ad7b23
RS
2396 loop_start);
2397
2398 /* Inside the loop, just load the low part. */
2399 PATTERN (p) = new;
2400 }
2401}
2402#endif
2403\f
2404/* Scan a loop setting the variables `unknown_address_altered',
552bc76f 2405 `num_mem_sets', `loop_continue', loops_enclosed', `loop_has_call',
41a972a9
MM
2406 and `loop_has_volatile'. Also, fill in the arrays `loop_mems' and
2407 `loop_store_mems'. */
b4ad7b23
RS
2408
2409static void
2410prescan_loop (start, end)
2411 rtx start, end;
2412{
2413 register int level = 1;
41a972a9
MM
2414 rtx insn;
2415 int loop_has_multiple_exit_targets = 0;
2416 /* The label after END. Jumping here is just like falling off the
2417 end of the loop. We use next_nonnote_insn instead of next_label
2418 as a hedge against the (pathological) case where some actual insn
2419 might end up between the two. */
2420 rtx exit_target = next_nonnote_insn (end);
2421 if (exit_target == NULL_RTX || GET_CODE (exit_target) != CODE_LABEL)
2422 loop_has_multiple_exit_targets = 1;
b4ad7b23
RS
2423
2424 unknown_address_altered = 0;
2425 loop_has_call = 0;
552bc76f 2426 loop_has_volatile = 0;
b4ad7b23 2427 loop_store_mems_idx = 0;
41a972a9 2428 loop_mems_idx = 0;
b4ad7b23
RS
2429
2430 num_mem_sets = 0;
2431 loops_enclosed = 1;
2432 loop_continue = 0;
2433
2434 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2435 insn = NEXT_INSN (insn))
2436 {
2437 if (GET_CODE (insn) == NOTE)
2438 {
2439 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2440 {
2441 ++level;
2442 /* Count number of loops contained in this one. */
2443 loops_enclosed++;
2444 }
2445 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
2446 {
2447 --level;
2448 if (level == 0)
2449 {
2450 end = insn;
2451 break;
2452 }
2453 }
2454 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_CONT)
2455 {
2456 if (level == 1)
2457 loop_continue = insn;
2458 }
2459 }
2460 else if (GET_CODE (insn) == CALL_INSN)
2461 {
9ae8ffe7
JL
2462 if (! CONST_CALL_P (insn))
2463 unknown_address_altered = 1;
b4ad7b23
RS
2464 loop_has_call = 1;
2465 }
41a972a9 2466 else if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
b4ad7b23 2467 {
41a972a9
MM
2468 rtx label1 = NULL_RTX;
2469 rtx label2 = NULL_RTX;
2470
2471 if (volatile_refs_p (PATTERN (insn)))
2472 loop_has_volatile = 1;
2473
2474 note_stores (PATTERN (insn), note_addr_stored);
2475
2476 if (!loop_has_multiple_exit_targets
2477 && GET_CODE (insn) == JUMP_INSN
2478 && GET_CODE (PATTERN (insn)) == SET
2479 && SET_DEST (PATTERN (insn)) == pc_rtx)
552bc76f 2480 {
41a972a9
MM
2481 if (GET_CODE (SET_SRC (PATTERN (insn))) == IF_THEN_ELSE)
2482 {
2483 label1 = XEXP (SET_SRC (PATTERN (insn)), 1);
2484 label2 = XEXP (SET_SRC (PATTERN (insn)), 2);
2485 }
2486 else
2487 {
2488 label1 = SET_SRC (PATTERN (insn));
2489 }
2490
2491 do {
2492 if (label1 && label1 != pc_rtx)
2493 {
2494 if (GET_CODE (label1) != LABEL_REF)
2495 {
2496 /* Something tricky. */
2497 loop_has_multiple_exit_targets = 1;
2498 break;
2499 }
2500 else if (XEXP (label1, 0) != exit_target
2501 && LABEL_OUTSIDE_LOOP_P (label1))
2502 {
2503 /* A jump outside the current loop. */
2504 loop_has_multiple_exit_targets = 1;
2505 break;
2506 }
2507 }
552bc76f 2508
41a972a9
MM
2509 label1 = label2;
2510 label2 = NULL_RTX;
2511 } while (label1);
552bc76f 2512 }
b4ad7b23 2513 }
41a972a9
MM
2514 else if (GET_CODE (insn) == RETURN)
2515 loop_has_multiple_exit_targets = 1;
b4ad7b23 2516 }
41a972a9
MM
2517
2518 /* Now, rescan the loop, setting up the LOOP_MEMS array. */
2519 if (/* We can't tell what MEMs are aliased by what. */
2520 !unknown_address_altered
2521 /* An exception thrown by a called function might land us
2522 anywhere. */
2523 && !loop_has_call
2524 /* We don't want loads for MEMs moved to a location before the
2525 one at which their stack memory becomes allocated. (Note
2526 that this is not a problem for malloc, etc., since those
2527 require actual function calls. */
2528 && !current_function_calls_alloca
2529 /* There are ways to leave the loop other than falling off the
2530 end. */
2531 && !loop_has_multiple_exit_targets)
2532 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2533 insn = NEXT_INSN (insn))
2534 for_each_rtx (&insn, insert_loop_mem, 0);
b4ad7b23
RS
2535}
2536\f
2537/* Scan the function looking for loops. Record the start and end of each loop.
2538 Also mark as invalid loops any loops that contain a setjmp or are branched
2539 to from outside the loop. */
2540
2541static void
2542find_and_verify_loops (f)
2543 rtx f;
2544{
034dabc9 2545 rtx insn, label;
b4ad7b23
RS
2546 int current_loop = -1;
2547 int next_loop = -1;
2548 int loop;
2549
2550 /* If there are jumps to undefined labels,
2551 treat them as jumps out of any/all loops.
2552 This also avoids writing past end of tables when there are no loops. */
2553 uid_loop_num[0] = -1;
2554
2555 /* Find boundaries of loops, mark which loops are contained within
2556 loops, and invalidate loops that have setjmp. */
2557
2558 for (insn = f; insn; insn = NEXT_INSN (insn))
2559 {
2560 if (GET_CODE (insn) == NOTE)
2561 switch (NOTE_LINE_NUMBER (insn))
2562 {
2563 case NOTE_INSN_LOOP_BEG:
2564 loop_number_loop_starts[++next_loop] = insn;
2565 loop_number_loop_ends[next_loop] = 0;
2566 loop_outer_loop[next_loop] = current_loop;
2567 loop_invalid[next_loop] = 0;
2568 loop_number_exit_labels[next_loop] = 0;
353127c2 2569 loop_number_exit_count[next_loop] = 0;
b4ad7b23
RS
2570 current_loop = next_loop;
2571 break;
2572
2573 case NOTE_INSN_SETJMP:
2574 /* In this case, we must invalidate our current loop and any
2575 enclosing loop. */
2576 for (loop = current_loop; loop != -1; loop = loop_outer_loop[loop])
2577 {
2578 loop_invalid[loop] = 1;
2579 if (loop_dump_stream)
2580 fprintf (loop_dump_stream,
2581 "\nLoop at %d ignored due to setjmp.\n",
2582 INSN_UID (loop_number_loop_starts[loop]));
2583 }
2584 break;
2585
2586 case NOTE_INSN_LOOP_END:
2587 if (current_loop == -1)
2588 abort ();
2589
2590 loop_number_loop_ends[current_loop] = insn;
2591 current_loop = loop_outer_loop[current_loop];
2592 break;
2593
e9a25f70
JL
2594 default:
2595 break;
b4ad7b23
RS
2596 }
2597
2598 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2599 enclosing loop, but this doesn't matter. */
2600 uid_loop_num[INSN_UID (insn)] = current_loop;
2601 }
2602
034dabc9
JW
2603 /* Any loop containing a label used in an initializer must be invalidated,
2604 because it can be jumped into from anywhere. */
2605
2606 for (label = forced_labels; label; label = XEXP (label, 1))
2607 {
2608 int loop_num;
2609
2610 for (loop_num = uid_loop_num[INSN_UID (XEXP (label, 0))];
2611 loop_num != -1;
2612 loop_num = loop_outer_loop[loop_num])
2613 loop_invalid[loop_num] = 1;
2614 }
2615
6adb4e3a
MS
2616 /* Any loop containing a label used for an exception handler must be
2617 invalidated, because it can be jumped into from anywhere. */
2618
2619 for (label = exception_handler_labels; label; label = XEXP (label, 1))
2620 {
2621 int loop_num;
2622
2623 for (loop_num = uid_loop_num[INSN_UID (XEXP (label, 0))];
2624 loop_num != -1;
2625 loop_num = loop_outer_loop[loop_num])
2626 loop_invalid[loop_num] = 1;
2627 }
2628
034dabc9
JW
2629 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2630 loop that it is not contained within, that loop is marked invalid.
2631 If any INSN or CALL_INSN uses a label's address, then the loop containing
2632 that label is marked invalid, because it could be jumped into from
2633 anywhere.
b4ad7b23
RS
2634
2635 Also look for blocks of code ending in an unconditional branch that
2636 exits the loop. If such a block is surrounded by a conditional
2637 branch around the block, move the block elsewhere (see below) and
2638 invert the jump to point to the code block. This may eliminate a
2639 label in our loop and will simplify processing by both us and a
2640 possible second cse pass. */
2641
2642 for (insn = f; insn; insn = NEXT_INSN (insn))
034dabc9 2643 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
b4ad7b23
RS
2644 {
2645 int this_loop_num = uid_loop_num[INSN_UID (insn)];
2646
034dabc9
JW
2647 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
2648 {
2649 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
2650 if (note)
2651 {
2652 int loop_num;
2653
2654 for (loop_num = uid_loop_num[INSN_UID (XEXP (note, 0))];
2655 loop_num != -1;
2656 loop_num = loop_outer_loop[loop_num])
2657 loop_invalid[loop_num] = 1;
2658 }
2659 }
2660
2661 if (GET_CODE (insn) != JUMP_INSN)
2662 continue;
2663
b4ad7b23
RS
2664 mark_loop_jump (PATTERN (insn), this_loop_num);
2665
2666 /* See if this is an unconditional branch outside the loop. */
2667 if (this_loop_num != -1
2668 && (GET_CODE (PATTERN (insn)) == RETURN
2669 || (simplejump_p (insn)
2670 && (uid_loop_num[INSN_UID (JUMP_LABEL (insn))]
1c01e9df
TW
2671 != this_loop_num)))
2672 && get_max_uid () < max_uid_for_loop)
b4ad7b23
RS
2673 {
2674 rtx p;
2675 rtx our_next = next_real_insn (insn);
fdccb6df
RK
2676 int dest_loop;
2677 int outer_loop = -1;
b4ad7b23
RS
2678
2679 /* Go backwards until we reach the start of the loop, a label,
2680 or a JUMP_INSN. */
2681 for (p = PREV_INSN (insn);
2682 GET_CODE (p) != CODE_LABEL
2683 && ! (GET_CODE (p) == NOTE
2684 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
2685 && GET_CODE (p) != JUMP_INSN;
2686 p = PREV_INSN (p))
2687 ;
2688
edf711a4
RK
2689 /* Check for the case where we have a jump to an inner nested
2690 loop, and do not perform the optimization in that case. */
2691
fdccb6df 2692 if (JUMP_LABEL (insn))
edf711a4 2693 {
fdccb6df
RK
2694 dest_loop = uid_loop_num[INSN_UID (JUMP_LABEL (insn))];
2695 if (dest_loop != -1)
2696 {
2697 for (outer_loop = dest_loop; outer_loop != -1;
2698 outer_loop = loop_outer_loop[outer_loop])
2699 if (outer_loop == this_loop_num)
2700 break;
2701 }
edf711a4 2702 }
edf711a4 2703
89724a5a
RK
2704 /* Make sure that the target of P is within the current loop. */
2705
9a8e74f0 2706 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
89724a5a
RK
2707 && uid_loop_num[INSN_UID (JUMP_LABEL (p))] != this_loop_num)
2708 outer_loop = this_loop_num;
2709
b4ad7b23
RS
2710 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2711 we have a block of code to try to move.
2712
2713 We look backward and then forward from the target of INSN
2714 to find a BARRIER at the same loop depth as the target.
2715 If we find such a BARRIER, we make a new label for the start
2716 of the block, invert the jump in P and point it to that label,
2717 and move the block of code to the spot we found. */
2718
edf711a4
RK
2719 if (outer_loop == -1
2720 && GET_CODE (p) == JUMP_INSN
c6096c5e
RS
2721 && JUMP_LABEL (p) != 0
2722 /* Just ignore jumps to labels that were never emitted.
2723 These always indicate compilation errors. */
2724 && INSN_UID (JUMP_LABEL (p)) != 0
2725 && condjump_p (p)
2726 && ! simplejump_p (p)
2727 && next_real_insn (JUMP_LABEL (p)) == our_next)
b4ad7b23
RS
2728 {
2729 rtx target
2730 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
2731 int target_loop_num = uid_loop_num[INSN_UID (target)];
2732 rtx loc;
2733
2734 for (loc = target; loc; loc = PREV_INSN (loc))
2735 if (GET_CODE (loc) == BARRIER
2736 && uid_loop_num[INSN_UID (loc)] == target_loop_num)
2737 break;
2738
2739 if (loc == 0)
2740 for (loc = target; loc; loc = NEXT_INSN (loc))
2741 if (GET_CODE (loc) == BARRIER
2742 && uid_loop_num[INSN_UID (loc)] == target_loop_num)
2743 break;
2744
2745 if (loc)
2746 {
2747 rtx cond_label = JUMP_LABEL (p);
2748 rtx new_label = get_label_after (p);
2749
2750 /* Ensure our label doesn't go away. */
2751 LABEL_NUSES (cond_label)++;
2752
2753 /* Verify that uid_loop_num is large enough and that
0f41302f 2754 we can invert P. */
1c01e9df 2755 if (invert_jump (p, new_label))
b4ad7b23
RS
2756 {
2757 rtx q, r;
2758
72ec635f
JL
2759 /* If no suitable BARRIER was found, create a suitable
2760 one before TARGET. Since TARGET is a fall through
2761 path, we'll need to insert an jump around our block
2762 and a add a BARRIER before TARGET.
2763
2764 This creates an extra unconditional jump outside
2765 the loop. However, the benefits of removing rarely
2766 executed instructions from inside the loop usually
2767 outweighs the cost of the extra unconditional jump
2768 outside the loop. */
2769 if (loc == 0)
2770 {
2771 rtx temp;
2772
2773 temp = gen_jump (JUMP_LABEL (insn));
2774 temp = emit_jump_insn_before (temp, target);
2775 JUMP_LABEL (temp) = JUMP_LABEL (insn);
2776 LABEL_NUSES (JUMP_LABEL (insn))++;
2777 loc = emit_barrier_before (target);
2778 }
2779
b4ad7b23
RS
2780 /* Include the BARRIER after INSN and copy the
2781 block after LOC. */
915f619f 2782 new_label = squeeze_notes (new_label, NEXT_INSN (insn));
b4ad7b23
RS
2783 reorder_insns (new_label, NEXT_INSN (insn), loc);
2784
2785 /* All those insns are now in TARGET_LOOP_NUM. */
2786 for (q = new_label; q != NEXT_INSN (NEXT_INSN (insn));
2787 q = NEXT_INSN (q))
2788 uid_loop_num[INSN_UID (q)] = target_loop_num;
2789
2790 /* The label jumped to by INSN is no longer a loop exit.
2791 Unless INSN does not have a label (e.g., it is a
2792 RETURN insn), search loop_number_exit_labels to find
2793 its label_ref, and remove it. Also turn off
2794 LABEL_OUTSIDE_LOOP_P bit. */
2795 if (JUMP_LABEL (insn))
2796 {
353127c2
RK
2797 int loop_num;
2798
b4ad7b23
RS
2799 for (q = 0,
2800 r = loop_number_exit_labels[this_loop_num];
2801 r; q = r, r = LABEL_NEXTREF (r))
2802 if (XEXP (r, 0) == JUMP_LABEL (insn))
2803 {
2804 LABEL_OUTSIDE_LOOP_P (r) = 0;
2805 if (q)
2806 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
2807 else
2808 loop_number_exit_labels[this_loop_num]
2809 = LABEL_NEXTREF (r);
2810 break;
2811 }
2812
353127c2
RK
2813 for (loop_num = this_loop_num;
2814 loop_num != -1 && loop_num != target_loop_num;
2815 loop_num = loop_outer_loop[loop_num])
2816 loop_number_exit_count[loop_num]--;
2817
0f41302f 2818 /* If we didn't find it, then something is wrong. */
b4ad7b23
RS
2819 if (! r)
2820 abort ();
2821 }
2822
2823 /* P is now a jump outside the loop, so it must be put
2824 in loop_number_exit_labels, and marked as such.
2825 The easiest way to do this is to just call
2826 mark_loop_jump again for P. */
2827 mark_loop_jump (PATTERN (p), this_loop_num);
2828
2829 /* If INSN now jumps to the insn after it,
2830 delete INSN. */
2831 if (JUMP_LABEL (insn) != 0
2832 && (next_real_insn (JUMP_LABEL (insn))
2833 == next_real_insn (insn)))
2834 delete_insn (insn);
2835 }
2836
2837 /* Continue the loop after where the conditional
2838 branch used to jump, since the only branch insn
2839 in the block (if it still remains) is an inter-loop
2840 branch and hence needs no processing. */
2841 insn = NEXT_INSN (cond_label);
2842
2843 if (--LABEL_NUSES (cond_label) == 0)
2844 delete_insn (cond_label);
3ad0cfaf
RK
2845
2846 /* This loop will be continued with NEXT_INSN (insn). */
2847 insn = PREV_INSN (insn);
b4ad7b23
RS
2848 }
2849 }
2850 }
2851 }
2852}
2853
2854/* If any label in X jumps to a loop different from LOOP_NUM and any of the
2855 loops it is contained in, mark the target loop invalid.
2856
2857 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
2858
2859static void
2860mark_loop_jump (x, loop_num)
2861 rtx x;
2862 int loop_num;
2863{
2864 int dest_loop;
2865 int outer_loop;
2866 int i;
2867
2868 switch (GET_CODE (x))
2869 {
2870 case PC:
2871 case USE:
2872 case CLOBBER:
2873 case REG:
2874 case MEM:
2875 case CONST_INT:
2876 case CONST_DOUBLE:
2877 case RETURN:
2878 return;
2879
2880 case CONST:
2881 /* There could be a label reference in here. */
2882 mark_loop_jump (XEXP (x, 0), loop_num);
2883 return;
2884
2885 case PLUS:
2886 case MINUS:
2887 case MULT:
b4ad7b23
RS
2888 mark_loop_jump (XEXP (x, 0), loop_num);
2889 mark_loop_jump (XEXP (x, 1), loop_num);
2890 return;
2891
2892 case SIGN_EXTEND:
2893 case ZERO_EXTEND:
2894 mark_loop_jump (XEXP (x, 0), loop_num);
2895 return;
2896
2897 case LABEL_REF:
2898 dest_loop = uid_loop_num[INSN_UID (XEXP (x, 0))];
2899
2900 /* Link together all labels that branch outside the loop. This
2901 is used by final_[bg]iv_value and the loop unrolling code. Also
2902 mark this LABEL_REF so we know that this branch should predict
2903 false. */
2904
edf711a4
RK
2905 /* A check to make sure the label is not in an inner nested loop,
2906 since this does not count as a loop exit. */
2907 if (dest_loop != -1)
2908 {
2909 for (outer_loop = dest_loop; outer_loop != -1;
2910 outer_loop = loop_outer_loop[outer_loop])
2911 if (outer_loop == loop_num)
2912 break;
2913 }
2914 else
2915 outer_loop = -1;
2916
2917 if (loop_num != -1 && outer_loop == -1)
b4ad7b23
RS
2918 {
2919 LABEL_OUTSIDE_LOOP_P (x) = 1;
2920 LABEL_NEXTREF (x) = loop_number_exit_labels[loop_num];
2921 loop_number_exit_labels[loop_num] = x;
353127c2
RK
2922
2923 for (outer_loop = loop_num;
2924 outer_loop != -1 && outer_loop != dest_loop;
2925 outer_loop = loop_outer_loop[outer_loop])
2926 loop_number_exit_count[outer_loop]++;
b4ad7b23
RS
2927 }
2928
2929 /* If this is inside a loop, but not in the current loop or one enclosed
2930 by it, it invalidates at least one loop. */
2931
2932 if (dest_loop == -1)
2933 return;
2934
2935 /* We must invalidate every nested loop containing the target of this
2936 label, except those that also contain the jump insn. */
2937
2938 for (; dest_loop != -1; dest_loop = loop_outer_loop[dest_loop])
2939 {
2940 /* Stop when we reach a loop that also contains the jump insn. */
2941 for (outer_loop = loop_num; outer_loop != -1;
2942 outer_loop = loop_outer_loop[outer_loop])
2943 if (dest_loop == outer_loop)
2944 return;
2945
2946 /* If we get here, we know we need to invalidate a loop. */
2947 if (loop_dump_stream && ! loop_invalid[dest_loop])
2948 fprintf (loop_dump_stream,
2949 "\nLoop at %d ignored due to multiple entry points.\n",
2950 INSN_UID (loop_number_loop_starts[dest_loop]));
2951
2952 loop_invalid[dest_loop] = 1;
2953 }
2954 return;
2955
2956 case SET:
2957 /* If this is not setting pc, ignore. */
2958 if (SET_DEST (x) == pc_rtx)
2959 mark_loop_jump (SET_SRC (x), loop_num);
2960 return;
2961
2962 case IF_THEN_ELSE:
2963 mark_loop_jump (XEXP (x, 1), loop_num);
2964 mark_loop_jump (XEXP (x, 2), loop_num);
2965 return;
2966
2967 case PARALLEL:
2968 case ADDR_VEC:
2969 for (i = 0; i < XVECLEN (x, 0); i++)
2970 mark_loop_jump (XVECEXP (x, 0, i), loop_num);
2971 return;
2972
2973 case ADDR_DIFF_VEC:
2974 for (i = 0; i < XVECLEN (x, 1); i++)
2975 mark_loop_jump (XVECEXP (x, 1, i), loop_num);
2976 return;
2977
2978 default:
b6ccc3fb
RS
2979 /* Treat anything else (such as a symbol_ref)
2980 as a branch out of this loop, but not into any loop. */
2981
2982 if (loop_num != -1)
353127c2 2983 {
8c660648
JL
2984#ifdef HAIFA
2985 LABEL_OUTSIDE_LOOP_P (x) = 1;
2986 LABEL_NEXTREF (x) = loop_number_exit_labels[loop_num];
2987#endif /* HAIFA */
2988
353127c2 2989 loop_number_exit_labels[loop_num] = x;
b6ccc3fb 2990
353127c2
RK
2991 for (outer_loop = loop_num; outer_loop != -1;
2992 outer_loop = loop_outer_loop[outer_loop])
2993 loop_number_exit_count[outer_loop]++;
2994 }
b6ccc3fb 2995 return;
b4ad7b23
RS
2996 }
2997}
2998\f
2999/* Return nonzero if there is a label in the range from
3000 insn INSN to and including the insn whose luid is END
3001 INSN must have an assigned luid (i.e., it must not have
3002 been previously created by loop.c). */
3003
3004static int
3005labels_in_range_p (insn, end)
3006 rtx insn;
3007 int end;
3008{
3009 while (insn && INSN_LUID (insn) <= end)
3010 {
3011 if (GET_CODE (insn) == CODE_LABEL)
3012 return 1;
3013 insn = NEXT_INSN (insn);
3014 }
3015
3016 return 0;
3017}
3018
3019/* Record that a memory reference X is being set. */
3020
3021static void
693e265f 3022note_addr_stored (x, y)
b4ad7b23 3023 rtx x;
693e265f 3024 rtx y ATTRIBUTE_UNUSED;
b4ad7b23
RS
3025{
3026 register int i;
3027
3028 if (x == 0 || GET_CODE (x) != MEM)
3029 return;
3030
3031 /* Count number of memory writes.
3032 This affects heuristics in strength_reduce. */
3033 num_mem_sets++;
3034
ca800983
RK
3035 /* BLKmode MEM means all memory is clobbered. */
3036 if (GET_MODE (x) == BLKmode)
3037 unknown_address_altered = 1;
3038
b4ad7b23
RS
3039 if (unknown_address_altered)
3040 return;
3041
3042 for (i = 0; i < loop_store_mems_idx; i++)
3043 if (rtx_equal_p (XEXP (loop_store_mems[i], 0), XEXP (x, 0))
3044 && MEM_IN_STRUCT_P (x) == MEM_IN_STRUCT_P (loop_store_mems[i]))
3045 {
3046 /* We are storing at the same address as previously noted. Save the
ca800983
RK
3047 wider reference. */
3048 if (GET_MODE_SIZE (GET_MODE (x))
3049 > GET_MODE_SIZE (GET_MODE (loop_store_mems[i])))
b4ad7b23
RS
3050 loop_store_mems[i] = x;
3051 break;
3052 }
3053
3054 if (i == NUM_STORES)
3055 unknown_address_altered = 1;
3056
3057 else if (i == loop_store_mems_idx)
3058 loop_store_mems[loop_store_mems_idx++] = x;
3059}
3060\f
3061/* Return nonzero if the rtx X is invariant over the current loop.
3062
3063 The value is 2 if we refer to something only conditionally invariant.
3064
3065 If `unknown_address_altered' is nonzero, no memory ref is invariant.
3066 Otherwise, a memory ref is invariant if it does not conflict with
3067 anything stored in `loop_store_mems'. */
3068
3069int
3070invariant_p (x)
3071 register rtx x;
3072{
3073 register int i;
3074 register enum rtx_code code;
3075 register char *fmt;
3076 int conditional = 0;
3077
3078 if (x == 0)
3079 return 1;
3080 code = GET_CODE (x);
3081 switch (code)
3082 {
3083 case CONST_INT:
3084 case CONST_DOUBLE:
3085 case SYMBOL_REF:
3086 case CONST:
3087 return 1;
3088
3089 case LABEL_REF:
3090 /* A LABEL_REF is normally invariant, however, if we are unrolling
3091 loops, and this label is inside the loop, then it isn't invariant.
3092 This is because each unrolled copy of the loop body will have
3093 a copy of this label. If this was invariant, then an insn loading
3094 the address of this label into a register might get moved outside
3095 the loop, and then each loop body would end up using the same label.
3096
3097 We don't know the loop bounds here though, so just fail for all
3098 labels. */
81797aba 3099 if (flag_unroll_loops)
b4ad7b23
RS
3100 return 0;
3101 else
3102 return 1;
3103
3104 case PC:
3105 case CC0:
3106 case UNSPEC_VOLATILE:
3107 return 0;
3108
3109 case REG:
3110 /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
3111 since the reg might be set by initialization within the loop. */
1f027d54
RK
3112
3113 if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
3114 || x == arg_pointer_rtx)
3115 && ! current_function_has_nonlocal_goto)
b4ad7b23 3116 return 1;
1f027d54 3117
b4ad7b23
RS
3118 if (loop_has_call
3119 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
3120 return 0;
1f027d54 3121
8deb8e2c 3122 if (VARRAY_INT (n_times_set, REGNO (x)) < 0)
b4ad7b23 3123 return 2;
1f027d54 3124
8deb8e2c 3125 return VARRAY_INT (n_times_set, REGNO (x)) == 0;
b4ad7b23
RS
3126
3127 case MEM:
667a4593
JW
3128 /* Volatile memory references must be rejected. Do this before
3129 checking for read-only items, so that volatile read-only items
3130 will be rejected also. */
3131 if (MEM_VOLATILE_P (x))
3132 return 0;
3133
b4ad7b23
RS
3134 /* Read-only items (such as constants in a constant pool) are
3135 invariant if their address is. */
3136 if (RTX_UNCHANGING_P (x))
3137 break;
3138
3139 /* If we filled the table (or had a subroutine call), any location
3140 in memory could have been clobbered. */
667a4593 3141 if (unknown_address_altered)
b4ad7b23
RS
3142 return 0;
3143
3144 /* See if there is any dependence between a store and this load. */
3145 for (i = loop_store_mems_idx - 1; i >= 0; i--)
9ae8ffe7 3146 if (true_dependence (loop_store_mems[i], VOIDmode, x, rtx_varies_p))
b4ad7b23
RS
3147 return 0;
3148
3149 /* It's not invalidated by a store in memory
3150 but we must still verify the address is invariant. */
3151 break;
3152
3153 case ASM_OPERANDS:
3154 /* Don't mess with insns declared volatile. */
3155 if (MEM_VOLATILE_P (x))
3156 return 0;
e9a25f70
JL
3157 break;
3158
3159 default:
3160 break;
b4ad7b23
RS
3161 }
3162
3163 fmt = GET_RTX_FORMAT (code);
3164 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3165 {
3166 if (fmt[i] == 'e')
3167 {
3168 int tem = invariant_p (XEXP (x, i));
3169 if (tem == 0)
3170 return 0;
3171 if (tem == 2)
3172 conditional = 1;
3173 }
3174 else if (fmt[i] == 'E')
3175 {
3176 register int j;
3177 for (j = 0; j < XVECLEN (x, i); j++)
3178 {
3179 int tem = invariant_p (XVECEXP (x, i, j));
3180 if (tem == 0)
3181 return 0;
3182 if (tem == 2)
3183 conditional = 1;
3184 }
3185
3186 }
3187 }
3188
3189 return 1 + conditional;
3190}
3191
b4ad7b23
RS
3192\f
3193/* Return nonzero if all the insns in the loop that set REG
3194 are INSN and the immediately following insns,
3195 and if each of those insns sets REG in an invariant way
3196 (not counting uses of REG in them).
3197
3198 The value is 2 if some of these insns are only conditionally invariant.
3199
3200 We assume that INSN itself is the first set of REG
3201 and that its source is invariant. */
3202
3203static int
3204consec_sets_invariant_p (reg, n_sets, insn)
3205 int n_sets;
3206 rtx reg, insn;
3207{
3208 register rtx p = insn;
3209 register int regno = REGNO (reg);
3210 rtx temp;
3211 /* Number of sets we have to insist on finding after INSN. */
3212 int count = n_sets - 1;
8deb8e2c 3213 int old = VARRAY_INT (n_times_set, regno);
b4ad7b23
RS
3214 int value = 0;
3215 int this;
3216
3217 /* If N_SETS hit the limit, we can't rely on its value. */
3218 if (n_sets == 127)
3219 return 0;
3220
8deb8e2c 3221 VARRAY_INT (n_times_set, regno) = 0;
b4ad7b23
RS
3222
3223 while (count > 0)
3224 {
3225 register enum rtx_code code;
3226 rtx set;
3227
3228 p = NEXT_INSN (p);
3229 code = GET_CODE (p);
3230
38e01259 3231 /* If library call, skip to end of it. */
5fd8383e 3232 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
b4ad7b23
RS
3233 p = XEXP (temp, 0);
3234
3235 this = 0;
3236 if (code == INSN
3237 && (set = single_set (p))
3238 && GET_CODE (SET_DEST (set)) == REG
3239 && REGNO (SET_DEST (set)) == regno)
3240 {
3241 this = invariant_p (SET_SRC (set));
3242 if (this != 0)
3243 value |= this;
51723711 3244 else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)))
b4ad7b23 3245 {
83d90aac
JW
3246 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3247 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3248 notes are OK. */
3249 this = (CONSTANT_P (XEXP (temp, 0))
3250 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
3251 && invariant_p (XEXP (temp, 0))));
b4ad7b23
RS
3252 if (this != 0)
3253 value |= this;
3254 }
3255 }
3256 if (this != 0)
3257 count--;
3258 else if (code != NOTE)
3259 {
8deb8e2c 3260 VARRAY_INT (n_times_set, regno) = old;
b4ad7b23
RS
3261 return 0;
3262 }
3263 }
3264
8deb8e2c 3265 VARRAY_INT (n_times_set, regno) = old;
b4ad7b23
RS
3266 /* If invariant_p ever returned 2, we return 2. */
3267 return 1 + (value & 2);
3268}
3269
3270#if 0
3271/* I don't think this condition is sufficient to allow INSN
3272 to be moved, so we no longer test it. */
3273
3274/* Return 1 if all insns in the basic block of INSN and following INSN
3275 that set REG are invariant according to TABLE. */
3276
3277static int
3278all_sets_invariant_p (reg, insn, table)
3279 rtx reg, insn;
3280 short *table;
3281{
3282 register rtx p = insn;
3283 register int regno = REGNO (reg);
3284
3285 while (1)
3286 {
3287 register enum rtx_code code;
3288 p = NEXT_INSN (p);
3289 code = GET_CODE (p);
3290 if (code == CODE_LABEL || code == JUMP_INSN)
3291 return 1;
3292 if (code == INSN && GET_CODE (PATTERN (p)) == SET
3293 && GET_CODE (SET_DEST (PATTERN (p))) == REG
3294 && REGNO (SET_DEST (PATTERN (p))) == regno)
3295 {
3296 if (!invariant_p (SET_SRC (PATTERN (p)), table))
3297 return 0;
3298 }
3299 }
3300}
3301#endif /* 0 */
3302\f
3303/* Look at all uses (not sets) of registers in X. For each, if it is
3304 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3305 a different insn, set USAGE[REGNO] to const0_rtx. */
3306
3307static void
3308find_single_use_in_loop (insn, x, usage)
3309 rtx insn;
3310 rtx x;
8deb8e2c 3311 varray_type usage;
b4ad7b23
RS
3312{
3313 enum rtx_code code = GET_CODE (x);
3314 char *fmt = GET_RTX_FORMAT (code);
3315 int i, j;
3316
3317 if (code == REG)
8deb8e2c
MM
3318 VARRAY_RTX (usage, REGNO (x))
3319 = (VARRAY_RTX (usage, REGNO (x)) != 0
3320 && VARRAY_RTX (usage, REGNO (x)) != insn)
b4ad7b23
RS
3321 ? const0_rtx : insn;
3322
3323 else if (code == SET)
3324 {
3325 /* Don't count SET_DEST if it is a REG; otherwise count things
3326 in SET_DEST because if a register is partially modified, it won't
3327 show up as a potential movable so we don't care how USAGE is set
3328 for it. */
3329 if (GET_CODE (SET_DEST (x)) != REG)
3330 find_single_use_in_loop (insn, SET_DEST (x), usage);
3331 find_single_use_in_loop (insn, SET_SRC (x), usage);
3332 }
3333 else
3334 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3335 {
3336 if (fmt[i] == 'e' && XEXP (x, i) != 0)
3337 find_single_use_in_loop (insn, XEXP (x, i), usage);
3338 else if (fmt[i] == 'E')
3339 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3340 find_single_use_in_loop (insn, XVECEXP (x, i, j), usage);
3341 }
3342}
3343\f
3344/* Increment N_TIMES_SET at the index of each register
3345 that is modified by an insn between FROM and TO.
3346 If the value of an element of N_TIMES_SET becomes 127 or more,
3347 stop incrementing it, to avoid overflow.
3348
3349 Store in SINGLE_USAGE[I] the single insn in which register I is
3350 used, if it is only used once. Otherwise, it is set to 0 (for no
3351 uses) or const0_rtx for more than one use. This parameter may be zero,
3352 in which case this processing is not done.
3353
3354 Store in *COUNT_PTR the number of actual instruction
3355 in the loop. We use this to decide what is worth moving out. */
3356
3357/* last_set[n] is nonzero iff reg n has been set in the current basic block.
3358 In that case, it is the insn that last set reg n. */
3359
3360static void
3361count_loop_regs_set (from, to, may_not_move, single_usage, count_ptr, nregs)
3362 register rtx from, to;
8deb8e2c
MM
3363 varray_type may_not_move;
3364 varray_type single_usage;
b4ad7b23
RS
3365 int *count_ptr;
3366 int nregs;
3367{
3368 register rtx *last_set = (rtx *) alloca (nregs * sizeof (rtx));
3369 register rtx insn;
3370 register int count = 0;
3371 register rtx dest;
3372
4c9a05bc 3373 bzero ((char *) last_set, nregs * sizeof (rtx));
b4ad7b23
RS
3374 for (insn = from; insn != to; insn = NEXT_INSN (insn))
3375 {
3376 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
3377 {
3378 ++count;
3379
3380 /* If requested, record registers that have exactly one use. */
3381 if (single_usage)
3382 {
3383 find_single_use_in_loop (insn, PATTERN (insn), single_usage);
3384
3385 /* Include uses in REG_EQUAL notes. */
3386 if (REG_NOTES (insn))
3387 find_single_use_in_loop (insn, REG_NOTES (insn), single_usage);
3388 }
3389
3390 if (GET_CODE (PATTERN (insn)) == CLOBBER
3391 && GET_CODE (XEXP (PATTERN (insn), 0)) == REG)
3392 /* Don't move a reg that has an explicit clobber.
3393 We might do so sometimes, but it's not worth the pain. */
8deb8e2c 3394 VARRAY_CHAR (may_not_move, REGNO (XEXP (PATTERN (insn), 0))) = 1;
b4ad7b23
RS
3395
3396 if (GET_CODE (PATTERN (insn)) == SET
3397 || GET_CODE (PATTERN (insn)) == CLOBBER)
3398 {
3399 dest = SET_DEST (PATTERN (insn));
3400 while (GET_CODE (dest) == SUBREG
3401 || GET_CODE (dest) == ZERO_EXTRACT
3402 || GET_CODE (dest) == SIGN_EXTRACT
3403 || GET_CODE (dest) == STRICT_LOW_PART)
3404 dest = XEXP (dest, 0);
3405 if (GET_CODE (dest) == REG)
3406 {
3407 register int regno = REGNO (dest);
3408 /* If this is the first setting of this reg
3409 in current basic block, and it was set before,
3410 it must be set in two basic blocks, so it cannot
3411 be moved out of the loop. */
8deb8e2c
MM
3412 if (VARRAY_INT (n_times_set, regno) > 0
3413 && last_set[regno] == 0)
3414 VARRAY_CHAR (may_not_move, regno) = 1;
b4ad7b23
RS
3415 /* If this is not first setting in current basic block,
3416 see if reg was used in between previous one and this.
3417 If so, neither one can be moved. */
3418 if (last_set[regno] != 0
3419 && reg_used_between_p (dest, last_set[regno], insn))
8deb8e2c
MM
3420 VARRAY_CHAR (may_not_move, regno) = 1;
3421 if (VARRAY_INT (n_times_set, regno) < 127)
3422 ++VARRAY_INT (n_times_set, regno);
b4ad7b23
RS
3423 last_set[regno] = insn;
3424 }
3425 }
3426 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
3427 {
3428 register int i;
3429 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
3430 {
3431 register rtx x = XVECEXP (PATTERN (insn), 0, i);
3432 if (GET_CODE (x) == CLOBBER && GET_CODE (XEXP (x, 0)) == REG)
3433 /* Don't move a reg that has an explicit clobber.
3434 It's not worth the pain to try to do it correctly. */
8deb8e2c 3435 VARRAY_CHAR (may_not_move, REGNO (XEXP (x, 0))) = 1;
b4ad7b23
RS
3436
3437 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
3438 {
3439 dest = SET_DEST (x);
3440 while (GET_CODE (dest) == SUBREG
3441 || GET_CODE (dest) == ZERO_EXTRACT
3442 || GET_CODE (dest) == SIGN_EXTRACT
3443 || GET_CODE (dest) == STRICT_LOW_PART)
3444 dest = XEXP (dest, 0);
3445 if (GET_CODE (dest) == REG)
3446 {
3447 register int regno = REGNO (dest);
8deb8e2c
MM
3448 if (VARRAY_INT (n_times_set, regno) > 0
3449 && last_set[regno] == 0)
3450 VARRAY_CHAR (may_not_move, regno) = 1;
b4ad7b23
RS
3451 if (last_set[regno] != 0
3452 && reg_used_between_p (dest, last_set[regno], insn))
8deb8e2c
MM
3453 VARRAY_CHAR (may_not_move, regno) = 1;
3454 if (VARRAY_INT (n_times_set, regno) < 127)
3455 ++VARRAY_INT (n_times_set, regno);
b4ad7b23
RS
3456 last_set[regno] = insn;
3457 }
3458 }
3459 }
3460 }
3461 }
4c9a05bc 3462
b4ad7b23 3463 if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
4c9a05bc 3464 bzero ((char *) last_set, nregs * sizeof (rtx));
b4ad7b23
RS
3465 }
3466 *count_ptr = count;
3467}
3468\f
3469/* Given a loop that is bounded by LOOP_START and LOOP_END
3470 and that is entered at SCAN_START,
3471 return 1 if the register set in SET contained in insn INSN is used by
3472 any insn that precedes INSN in cyclic order starting
3473 from the loop entry point.
3474
3475 We don't want to use INSN_LUID here because if we restrict INSN to those
3476 that have a valid INSN_LUID, it means we cannot move an invariant out
3477 from an inner loop past two loops. */
3478
3479static int
3480loop_reg_used_before_p (set, insn, loop_start, scan_start, loop_end)
3481 rtx set, insn, loop_start, scan_start, loop_end;
3482{
3483 rtx reg = SET_DEST (set);
3484 rtx p;
3485
3486 /* Scan forward checking for register usage. If we hit INSN, we
3487 are done. Otherwise, if we hit LOOP_END, wrap around to LOOP_START. */
3488 for (p = scan_start; p != insn; p = NEXT_INSN (p))
3489 {
3490 if (GET_RTX_CLASS (GET_CODE (p)) == 'i'
3491 && reg_overlap_mentioned_p (reg, PATTERN (p)))
3492 return 1;
3493
3494 if (p == loop_end)
3495 p = loop_start;
3496 }
3497
3498 return 0;
3499}
3500\f
3501/* A "basic induction variable" or biv is a pseudo reg that is set
3502 (within this loop) only by incrementing or decrementing it. */
3503/* A "general induction variable" or giv is a pseudo reg whose
3504 value is a linear function of a biv. */
3505
3506/* Bivs are recognized by `basic_induction_var';
45f97e2e 3507 Givs by `general_induction_var'. */
b4ad7b23
RS
3508
3509/* Indexed by register number, indicates whether or not register is an
3510 induction variable, and if so what type. */
3511
3512enum iv_mode *reg_iv_type;
3513
3514/* Indexed by register number, contains pointer to `struct induction'
3515 if register is an induction variable. This holds general info for
3516 all induction variables. */
3517
3518struct induction **reg_iv_info;
3519
3520/* Indexed by register number, contains pointer to `struct iv_class'
3521 if register is a basic induction variable. This holds info describing
3522 the class (a related group) of induction variables that the biv belongs
3523 to. */
3524
3525struct iv_class **reg_biv_class;
3526
3527/* The head of a list which links together (via the next field)
3528 every iv class for the current loop. */
3529
3530struct iv_class *loop_iv_list;
3531
3532/* Communication with routines called via `note_stores'. */
3533
3534static rtx note_insn;
3535
3536/* Dummy register to have non-zero DEST_REG for DEST_ADDR type givs. */
3537
3538static rtx addr_placeholder;
3539
3540/* ??? Unfinished optimizations, and possible future optimizations,
3541 for the strength reduction code. */
3542
b4ad7b23 3543/* ??? The interaction of biv elimination, and recognition of 'constant'
0f41302f 3544 bivs, may cause problems. */
b4ad7b23
RS
3545
3546/* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
3547 performance problems.
3548
3549 Perhaps don't eliminate things that can be combined with an addressing
3550 mode. Find all givs that have the same biv, mult_val, and add_val;
3551 then for each giv, check to see if its only use dies in a following
3552 memory address. If so, generate a new memory address and check to see
3553 if it is valid. If it is valid, then store the modified memory address,
3554 otherwise, mark the giv as not done so that it will get its own iv. */
3555
3556/* ??? Could try to optimize branches when it is known that a biv is always
3557 positive. */
3558
3559/* ??? When replace a biv in a compare insn, we should replace with closest
3560 giv so that an optimized branch can still be recognized by the combiner,
3561 e.g. the VAX acb insn. */
3562
3563/* ??? Many of the checks involving uid_luid could be simplified if regscan
3564 was rerun in loop_optimize whenever a register was added or moved.
3565 Also, some of the optimizations could be a little less conservative. */
3566\f
41a972a9 3567/* Perform strength reduction and induction variable elimination.
b4ad7b23 3568
41a972a9 3569 Pseudo registers created during this function will be beyond the last
b4ad7b23
RS
3570 valid index in several tables including n_times_set and regno_last_uid.
3571 This does not cause a problem here, because the added registers cannot be
3572 givs outside of their loop, and hence will never be reconsidered.
41a972a9
MM
3573 But scan_loop must check regnos to make sure they are in bounds.
3574
3575 SCAN_START is the first instruction in the loop, as the loop would
3576 actually be executed. END is the NOTE_INSN_LOOP_END. LOOP_TOP is
3577 the first instruction in the loop, as it is layed out in the
3578 instruction stream. LOOP_START is the NOTE_INSN_LOOP_BEG. */
b4ad7b23
RS
3579
3580static void
3581strength_reduce (scan_start, end, loop_top, insn_count,
5accd822 3582 loop_start, loop_end, unroll_p, bct_p)
b4ad7b23
RS
3583 rtx scan_start;
3584 rtx end;
3585 rtx loop_top;
3586 int insn_count;
3587 rtx loop_start;
3588 rtx loop_end;
5accd822 3589 int unroll_p, bct_p;
b4ad7b23
RS
3590{
3591 rtx p;
3592 rtx set;
3593 rtx inc_val;
3594 rtx mult_val;
3595 rtx dest_reg;
3596 /* This is 1 if current insn is not executed at least once for every loop
3597 iteration. */
3598 int not_every_iteration = 0;
7dcd3836
RK
3599 /* This is 1 if current insn may be executed more than once for every
3600 loop iteration. */
3601 int maybe_multiple = 0;
b4ad7b23
RS
3602 /* Temporary list pointers for traversing loop_iv_list. */
3603 struct iv_class *bl, **backbl;
3604 /* Ratio of extra register life span we can justify
3605 for saving an instruction. More if loop doesn't call subroutines
3606 since in that case saving an insn makes more difference
3607 and more registers are available. */
3608 /* ??? could set this to last value of threshold in move_movables */
3609 int threshold = (loop_has_call ? 1 : 2) * (3 + n_non_fixed_regs);
3610 /* Map of pseudo-register replacements. */
3611 rtx *reg_map;
3612 int call_seen;
3613 rtx test;
3614 rtx end_insert_before;
5ea7a4ae 3615 int loop_depth = 0;
b4ad7b23
RS
3616
3617 reg_iv_type = (enum iv_mode *) alloca (max_reg_before_loop
3618 * sizeof (enum iv_mode *));
3619 bzero ((char *) reg_iv_type, max_reg_before_loop * sizeof (enum iv_mode *));
3620 reg_iv_info = (struct induction **)
3621 alloca (max_reg_before_loop * sizeof (struct induction *));
3622 bzero ((char *) reg_iv_info, (max_reg_before_loop
3623 * sizeof (struct induction *)));
3624 reg_biv_class = (struct iv_class **)
3625 alloca (max_reg_before_loop * sizeof (struct iv_class *));
3626 bzero ((char *) reg_biv_class, (max_reg_before_loop
3627 * sizeof (struct iv_class *)));
3628
3629 loop_iv_list = 0;
3630 addr_placeholder = gen_reg_rtx (Pmode);
3631
3632 /* Save insn immediately after the loop_end. Insns inserted after loop_end
3633 must be put before this insn, so that they will appear in the right
b2586fe0 3634 order (i.e. loop order).
b4ad7b23 3635
b2586fe0
JL
3636 If loop_end is the end of the current function, then emit a
3637 NOTE_INSN_DELETED after loop_end and set end_insert_before to the
3638 dummy note insn. */
3639 if (NEXT_INSN (loop_end) != 0)
3640 end_insert_before = NEXT_INSN (loop_end);
3641 else
3642 end_insert_before = emit_note_after (NOTE_INSN_DELETED, loop_end);
b4ad7b23
RS
3643
3644 /* Scan through loop to find all possible bivs. */
3645
41a972a9
MM
3646 for (p = next_insn_in_loop (scan_start, scan_start, end, loop_top);
3647 p != NULL_RTX;
3648 p = next_insn_in_loop (p, scan_start, end, loop_top))
b4ad7b23 3649 {
b4ad7b23
RS
3650 if (GET_CODE (p) == INSN
3651 && (set = single_set (p))
3652 && GET_CODE (SET_DEST (set)) == REG)
3653 {
3654 dest_reg = SET_DEST (set);
3655 if (REGNO (dest_reg) < max_reg_before_loop
3656 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
3657 && reg_iv_type[REGNO (dest_reg)] != NOT_BASIC_INDUCT)
3658 {
7056f7e8
RS
3659 if (basic_induction_var (SET_SRC (set), GET_MODE (SET_SRC (set)),
3660 dest_reg, p, &inc_val, &mult_val))
b4ad7b23
RS
3661 {
3662 /* It is a possible basic induction variable.
3663 Create and initialize an induction structure for it. */
3664
3665 struct induction *v
3666 = (struct induction *) alloca (sizeof (struct induction));
3667
3668 record_biv (v, p, dest_reg, inc_val, mult_val,
7dcd3836 3669 not_every_iteration, maybe_multiple);
b4ad7b23
RS
3670 reg_iv_type[REGNO (dest_reg)] = BASIC_INDUCT;
3671 }
3672 else if (REGNO (dest_reg) < max_reg_before_loop)
3673 reg_iv_type[REGNO (dest_reg)] = NOT_BASIC_INDUCT;
3674 }
3675 }
3676
7dcd3836
RK
3677 /* Past CODE_LABEL, we get to insns that may be executed multiple
3678 times. The only way we can be sure that they can't is if every
38e01259 3679 jump insn between here and the end of the loop either
8516af93
JW
3680 returns, exits the loop, is a forward jump, or is a jump
3681 to the loop start. */
7dcd3836
RK
3682
3683 if (GET_CODE (p) == CODE_LABEL)
3684 {
3685 rtx insn = p;
3686
3687 maybe_multiple = 0;
3688
3689 while (1)
3690 {
3691 insn = NEXT_INSN (insn);
3692 if (insn == scan_start)
3693 break;
3694 if (insn == end)
3695 {
3696 if (loop_top != 0)
f67ff5de 3697 insn = loop_top;
7dcd3836
RK
3698 else
3699 break;
3700 if (insn == scan_start)
3701 break;
3702 }
3703
3704 if (GET_CODE (insn) == JUMP_INSN
3705 && GET_CODE (PATTERN (insn)) != RETURN
3706 && (! condjump_p (insn)
3707 || (JUMP_LABEL (insn) != 0
8516af93 3708 && JUMP_LABEL (insn) != scan_start
cdc54cc9
TW
3709 && (INSN_UID (JUMP_LABEL (insn)) >= max_uid_for_loop
3710 || INSN_UID (insn) >= max_uid_for_loop
7dcd3836
RK
3711 || (INSN_LUID (JUMP_LABEL (insn))
3712 < INSN_LUID (insn))))))
8516af93
JW
3713 {
3714 maybe_multiple = 1;
3715 break;
3716 }
7dcd3836
RK
3717 }
3718 }
3719
8516af93
JW
3720 /* Past a jump, we get to insns for which we can't count
3721 on whether they will be executed during each iteration. */
3722 /* This code appears twice in strength_reduce. There is also similar
3723 code in scan_loop. */
3724 if (GET_CODE (p) == JUMP_INSN
b4ad7b23
RS
3725 /* If we enter the loop in the middle, and scan around to the
3726 beginning, don't set not_every_iteration for that.
3727 This can be any kind of jump, since we want to know if insns
3728 will be executed if the loop is executed. */
8516af93 3729 && ! (JUMP_LABEL (p) == loop_top
b4ad7b23
RS
3730 && ((NEXT_INSN (NEXT_INSN (p)) == loop_end && simplejump_p (p))
3731 || (NEXT_INSN (p) == loop_end && condjump_p (p)))))
8516af93
JW
3732 {
3733 rtx label = 0;
3734
3735 /* If this is a jump outside the loop, then it also doesn't
3736 matter. Check to see if the target of this branch is on the
3737 loop_number_exits_labels list. */
3738
3739 for (label = loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]];
3740 label;
3741 label = LABEL_NEXTREF (label))
3742 if (XEXP (label, 0) == JUMP_LABEL (p))
3743 break;
3744
3745 if (! label)
3746 not_every_iteration = 1;
3747 }
b4ad7b23 3748
5ea7a4ae
JW
3749 else if (GET_CODE (p) == NOTE)
3750 {
3751 /* At the virtual top of a converted loop, insns are again known to
3752 be executed each iteration: logically, the loop begins here
3753 even though the exit code has been duplicated. */
3754 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
3755 not_every_iteration = 0;
3756 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
3757 loop_depth++;
3758 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
3759 loop_depth--;
3760 }
b4ad7b23
RS
3761
3762 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
3763 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
3764 or not an insn is known to be executed each iteration of the
3765 loop, whether or not any iterations are known to occur.
3766
3767 Therefore, if we have just passed a label and have no more labels
3768 between here and the test insn of the loop, we know these insns
8516af93 3769 will be executed each iteration. */
b4ad7b23
RS
3770
3771 if (not_every_iteration && GET_CODE (p) == CODE_LABEL
3772 && no_labels_between_p (p, loop_end))
3773 not_every_iteration = 0;
3774 }
3775
3776 /* Scan loop_iv_list to remove all regs that proved not to be bivs.
3777 Make a sanity check against n_times_set. */
3778 for (backbl = &loop_iv_list, bl = *backbl; bl; bl = bl->next)
3779 {
3780 if (reg_iv_type[bl->regno] != BASIC_INDUCT
3781 /* Above happens if register modified by subreg, etc. */
3782 /* Make sure it is not recognized as a basic induction var: */
8deb8e2c 3783 || VARRAY_INT (n_times_set, bl->regno) != bl->biv_count
b4ad7b23
RS
3784 /* If never incremented, it is invariant that we decided not to
3785 move. So leave it alone. */
3786 || ! bl->incremented)
3787 {
3788 if (loop_dump_stream)
3789 fprintf (loop_dump_stream, "Reg %d: biv discarded, %s\n",
3790 bl->regno,
3791 (reg_iv_type[bl->regno] != BASIC_INDUCT
3792 ? "not induction variable"
3793 : (! bl->incremented ? "never incremented"
3794 : "count error")));
3795
3796 reg_iv_type[bl->regno] = NOT_BASIC_INDUCT;
3797 *backbl = bl->next;
3798 }
3799 else
3800 {
3801 backbl = &bl->next;
3802
3803 if (loop_dump_stream)
3804 fprintf (loop_dump_stream, "Reg %d: biv verified\n", bl->regno);
3805 }
3806 }
3807
3808 /* Exit if there are no bivs. */
3809 if (! loop_iv_list)
3810 {
3811 /* Can still unroll the loop anyways, but indicate that there is no
3812 strength reduction info available. */
81797aba 3813 if (unroll_p)
b4ad7b23
RS
3814 unroll_loop (loop_end, insn_count, loop_start, end_insert_before, 0);
3815
3816 return;
3817 }
3818
3819 /* Find initial value for each biv by searching backwards from loop_start,
3820 halting at first label. Also record any test condition. */
3821
3822 call_seen = 0;
3823 for (p = loop_start; p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p))
3824 {
3825 note_insn = p;
3826
3827 if (GET_CODE (p) == CALL_INSN)
3828 call_seen = 1;
3829
3830 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
3831 || GET_CODE (p) == CALL_INSN)
3832 note_stores (PATTERN (p), record_initial);
3833
3834 /* Record any test of a biv that branches around the loop if no store
3835 between it and the start of loop. We only care about tests with
3836 constants and registers and only certain of those. */
3837 if (GET_CODE (p) == JUMP_INSN
3838 && JUMP_LABEL (p) != 0
3839 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop_end)
3840 && (test = get_condition_for_loop (p)) != 0
3841 && GET_CODE (XEXP (test, 0)) == REG
3842 && REGNO (XEXP (test, 0)) < max_reg_before_loop
3843 && (bl = reg_biv_class[REGNO (XEXP (test, 0))]) != 0
3844 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop_start)
3845 && bl->init_insn == 0)
3846 {
3847 /* If an NE test, we have an initial value! */
3848 if (GET_CODE (test) == NE)
3849 {
3850 bl->init_insn = p;
38a448ca
RH
3851 bl->init_set = gen_rtx_SET (VOIDmode,
3852 XEXP (test, 0), XEXP (test, 1));
b4ad7b23
RS
3853 }
3854 else
3855 bl->initial_test = test;
3856 }
3857 }
3858
3859 /* Look at the each biv and see if we can say anything better about its
3860 initial value from any initializing insns set up above. (This is done
3861 in two passes to avoid missing SETs in a PARALLEL.) */
3862 for (bl = loop_iv_list; bl; bl = bl->next)
3863 {
3864 rtx src;
956d6950 3865 rtx note;
b4ad7b23
RS
3866
3867 if (! bl->init_insn)
3868 continue;
3869
956d6950
JL
3870 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
3871 is a constant, use the value of that. */
3872 if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL
3873 && CONSTANT_P (XEXP (note, 0)))
3874 || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL
3875 && CONSTANT_P (XEXP (note, 0))))
3876 src = XEXP (note, 0);
3877 else
3878 src = SET_SRC (bl->init_set);
b4ad7b23
RS
3879
3880 if (loop_dump_stream)
3881 fprintf (loop_dump_stream,
3882 "Biv %d initialized at insn %d: initial value ",
3883 bl->regno, INSN_UID (bl->init_insn));
3884
43a674af
JW
3885 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
3886 || GET_MODE (src) == VOIDmode)
63d59526 3887 && valid_initial_value_p (src, bl->init_insn, call_seen, loop_start))
b4ad7b23
RS
3888 {
3889 bl->initial_value = src;
3890
3891 if (loop_dump_stream)
3892 {
3893 if (GET_CODE (src) == CONST_INT)
9ba7a303
JC
3894 {
3895 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (src));
3896 fputc ('\n', loop_dump_stream);
3897 }
b4ad7b23
RS
3898 else
3899 {
3900 print_rtl (loop_dump_stream, src);
3901 fprintf (loop_dump_stream, "\n");
3902 }
3903 }
3904 }
3905 else
3906 {
3907 /* Biv initial value is not simple move,
d45cf215 3908 so let it keep initial value of "itself". */
b4ad7b23
RS
3909
3910 if (loop_dump_stream)
3911 fprintf (loop_dump_stream, "is complex\n");
3912 }
3913 }
3914
3915 /* Search the loop for general induction variables. */
3916
3917 /* A register is a giv if: it is only set once, it is a function of a
3918 biv and a constant (or invariant), and it is not a biv. */
3919
3920 not_every_iteration = 0;
5ea7a4ae 3921 loop_depth = 0;
b4ad7b23
RS
3922 p = scan_start;
3923 while (1)
3924 {
3925 p = NEXT_INSN (p);
3926 /* At end of a straight-in loop, we are done.
3927 At end of a loop entered at the bottom, scan the top. */
3928 if (p == scan_start)
3929 break;
3930 if (p == end)
3931 {
3932 if (loop_top != 0)
f67ff5de 3933 p = loop_top;
b4ad7b23
RS
3934 else
3935 break;
3936 if (p == scan_start)
3937 break;
3938 }
3939
3940 /* Look for a general induction variable in a register. */
3941 if (GET_CODE (p) == INSN
3942 && (set = single_set (p))
3943 && GET_CODE (SET_DEST (set)) == REG
8deb8e2c 3944 && ! VARRAY_CHAR (may_not_optimize, REGNO (SET_DEST (set))))
b4ad7b23
RS
3945 {
3946 rtx src_reg;
3947 rtx add_val;
3948 rtx mult_val;
3949 int benefit;
3950 rtx regnote = 0;
3951
3952 dest_reg = SET_DEST (set);
3953 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
3954 continue;
3955
3956 if (/* SET_SRC is a giv. */
45f97e2e
RH
3957 (general_induction_var (SET_SRC (set), &src_reg, &add_val,
3958 &mult_val, 0, &benefit)
0f41302f 3959 /* Equivalent expression is a giv. */
5fd8383e 3960 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
45f97e2e
RH
3961 && general_induction_var (XEXP (regnote, 0), &src_reg,
3962 &add_val, &mult_val, 0,
3963 &benefit)))
b4ad7b23
RS
3964 /* Don't try to handle any regs made by loop optimization.
3965 We have nothing on them in regno_first_uid, etc. */
3966 && REGNO (dest_reg) < max_reg_before_loop
3967 /* Don't recognize a BASIC_INDUCT_VAR here. */
3968 && dest_reg != src_reg
3969 /* This must be the only place where the register is set. */
8deb8e2c 3970 && (VARRAY_INT (n_times_set, REGNO (dest_reg)) == 1
0f41302f 3971 /* or all sets must be consecutive and make a giv. */
b4ad7b23
RS
3972 || (benefit = consec_sets_giv (benefit, p,
3973 src_reg, dest_reg,
3974 &add_val, &mult_val))))
3975 {
3976 int count;
3977 struct induction *v
3978 = (struct induction *) alloca (sizeof (struct induction));
3979 rtx temp;
3980
3981 /* If this is a library call, increase benefit. */
5fd8383e 3982 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
b4ad7b23
RS
3983 benefit += libcall_benefit (p);
3984
3985 /* Skip the consecutive insns, if there are any. */
8deb8e2c 3986 for (count = VARRAY_INT (n_times_set, REGNO (dest_reg)) - 1;
b4ad7b23
RS
3987 count > 0; count--)
3988 {
3989 /* If first insn of libcall sequence, skip to end.
3990 Do this at start of loop, since INSN is guaranteed to
3991 be an insn here. */
3992 if (GET_CODE (p) != NOTE
5fd8383e 3993 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
b4ad7b23
RS
3994 p = XEXP (temp, 0);
3995
3996 do p = NEXT_INSN (p);
3997 while (GET_CODE (p) == NOTE);
3998 }
3999
4000 record_giv (v, p, src_reg, dest_reg, mult_val, add_val, benefit,
5fd8383e 4001 DEST_REG, not_every_iteration, NULL_PTR, loop_start,
b4ad7b23
RS
4002 loop_end);
4003
4004 }
4005 }
4006
4007#ifndef DONT_REDUCE_ADDR
4008 /* Look for givs which are memory addresses. */
4009 /* This resulted in worse code on a VAX 8600. I wonder if it
4010 still does. */
4011 if (GET_CODE (p) == INSN)
4012 find_mem_givs (PATTERN (p), p, not_every_iteration, loop_start,
4013 loop_end);
4014#endif
4015
4016 /* Update the status of whether giv can derive other givs. This can
4017 change when we pass a label or an insn that updates a biv. */
7dcd3836
RK
4018 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
4019 || GET_CODE (p) == CODE_LABEL)
b4ad7b23
RS
4020 update_giv_derive (p);
4021
8516af93
JW
4022 /* Past a jump, we get to insns for which we can't count
4023 on whether they will be executed during each iteration. */
4024 /* This code appears twice in strength_reduce. There is also similar
4025 code in scan_loop. */
4026 if (GET_CODE (p) == JUMP_INSN
4027 /* If we enter the loop in the middle, and scan around to the
4028 beginning, don't set not_every_iteration for that.
b4ad7b23
RS
4029 This can be any kind of jump, since we want to know if insns
4030 will be executed if the loop is executed. */
8516af93 4031 && ! (JUMP_LABEL (p) == loop_top
b4ad7b23
RS
4032 && ((NEXT_INSN (NEXT_INSN (p)) == loop_end && simplejump_p (p))
4033 || (NEXT_INSN (p) == loop_end && condjump_p (p)))))
8516af93
JW
4034 {
4035 rtx label = 0;
4036
4037 /* If this is a jump outside the loop, then it also doesn't
4038 matter. Check to see if the target of this branch is on the
4039 loop_number_exits_labels list. */
4040
4041 for (label = loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]];
4042 label;
4043 label = LABEL_NEXTREF (label))
4044 if (XEXP (label, 0) == JUMP_LABEL (p))
4045 break;
4046
4047 if (! label)
4048 not_every_iteration = 1;
4049 }
b4ad7b23 4050
5ea7a4ae
JW
4051 else if (GET_CODE (p) == NOTE)
4052 {
4053 /* At the virtual top of a converted loop, insns are again known to
4054 be executed each iteration: logically, the loop begins here
4055 even though the exit code has been duplicated. */
4056 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
4057 not_every_iteration = 0;
4058 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
4059 loop_depth++;
4060 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
4061 loop_depth--;
4062 }
b4ad7b23
RS
4063
4064 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
4065 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
4066 or not an insn is known to be executed each iteration of the
4067 loop, whether or not any iterations are known to occur.
4068
4069 Therefore, if we have just passed a label and have no more labels
4070 between here and the test insn of the loop, we know these insns
4071 will be executed each iteration. */
4072
4073 if (not_every_iteration && GET_CODE (p) == CODE_LABEL
4074 && no_labels_between_p (p, loop_end))
4075 not_every_iteration = 0;
4076 }
4077
4078 /* Try to calculate and save the number of loop iterations. This is
4079 set to zero if the actual number can not be calculated. This must
4080 be called after all giv's have been identified, since otherwise it may
4081 fail if the iteration variable is a giv. */
4082
4083 loop_n_iterations = loop_iterations (loop_start, loop_end);
4084
4085 /* Now for each giv for which we still don't know whether or not it is
4086 replaceable, check to see if it is replaceable because its final value
4087 can be calculated. This must be done after loop_iterations is called,
4088 so that final_giv_value will work correctly. */
4089
4090 for (bl = loop_iv_list; bl; bl = bl->next)
4091 {
4092 struct induction *v;
4093
4094 for (v = bl->giv; v; v = v->next_iv)
4095 if (! v->replaceable && ! v->not_replaceable)
4096 check_final_value (v, loop_start, loop_end);
4097 }
4098
4099 /* Try to prove that the loop counter variable (if any) is always
4100 nonnegative; if so, record that fact with a REG_NONNEG note
4101 so that "decrement and branch until zero" insn can be used. */
4102 check_dbra_loop (loop_end, insn_count, loop_start);
4103
8c660648
JL
4104#ifdef HAIFA
4105 /* record loop-variables relevant for BCT optimization before unrolling
4106 the loop. Unrolling may update part of this information, and the
4107 correct data will be used for generating the BCT. */
4108#ifdef HAVE_decrement_and_branch_on_count
5accd822 4109 if (HAVE_decrement_and_branch_on_count && bct_p)
8c660648
JL
4110 analyze_loop_iterations (loop_start, loop_end);
4111#endif
4112#endif /* HAIFA */
4113
b4ad7b23
RS
4114 /* Create reg_map to hold substitutions for replaceable giv regs. */
4115 reg_map = (rtx *) alloca (max_reg_before_loop * sizeof (rtx));
4116 bzero ((char *) reg_map, max_reg_before_loop * sizeof (rtx));
4117
4118 /* Examine each iv class for feasibility of strength reduction/induction
4119 variable elimination. */
4120
4121 for (bl = loop_iv_list; bl; bl = bl->next)
4122 {
4123 struct induction *v;
4124 int benefit;
4125 int all_reduced;
4126 rtx final_value = 0;
4127
4128 /* Test whether it will be possible to eliminate this biv
4129 provided all givs are reduced. This is possible if either
4130 the reg is not used outside the loop, or we can compute
4131 what its final value will be.
4132
4133 For architectures with a decrement_and_branch_until_zero insn,
4134 don't do this if we put a REG_NONNEG note on the endtest for
4135 this biv. */
4136
4137 /* Compare against bl->init_insn rather than loop_start.
4138 We aren't concerned with any uses of the biv between
4139 init_insn and loop_start since these won't be affected
4140 by the value of the biv elsewhere in the function, so
4141 long as init_insn doesn't use the biv itself.
4142 March 14, 1989 -- self@bayes.arc.nasa.gov */
4143
b1f21e0a 4144 if ((uid_luid[REGNO_LAST_UID (bl->regno)] < INSN_LUID (loop_end)
b4ad7b23
RS
4145 && bl->init_insn
4146 && INSN_UID (bl->init_insn) < max_uid_for_loop
b1f21e0a 4147 && uid_luid[REGNO_FIRST_UID (bl->regno)] >= INSN_LUID (bl->init_insn)
b4ad7b23
RS
4148#ifdef HAVE_decrement_and_branch_until_zero
4149 && ! bl->nonneg
4150#endif
4151 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
4152 || ((final_value = final_biv_value (bl, loop_start, loop_end))
4153#ifdef HAVE_decrement_and_branch_until_zero
4154 && ! bl->nonneg
4155#endif
4156 ))
4157 bl->eliminable = maybe_eliminate_biv (bl, loop_start, end, 0,
4158 threshold, insn_count);
4159 else
4160 {
4161 if (loop_dump_stream)
4162 {
4163 fprintf (loop_dump_stream,
4164 "Cannot eliminate biv %d.\n",
4165 bl->regno);
4166 fprintf (loop_dump_stream,
4167 "First use: insn %d, last use: insn %d.\n",
b1f21e0a
MM
4168 REGNO_FIRST_UID (bl->regno),
4169 REGNO_LAST_UID (bl->regno));
b4ad7b23
RS
4170 }
4171 }
4172
4173 /* Combine all giv's for this iv_class. */
4174 combine_givs (bl);
4175
4176 /* This will be true at the end, if all givs which depend on this
4177 biv have been strength reduced.
4178 We can't (currently) eliminate the biv unless this is so. */
4179 all_reduced = 1;
4180
4181 /* Check each giv in this class to see if we will benefit by reducing
4182 it. Skip giv's combined with others. */
4183 for (v = bl->giv; v; v = v->next_iv)
4184 {
4185 struct induction *tv;
4186
4187 if (v->ignore || v->same)
4188 continue;
4189
4190 benefit = v->benefit;
4191
4192 /* Reduce benefit if not replaceable, since we will insert
4193 a move-insn to replace the insn that calculates this giv.
4194 Don't do this unless the giv is a user variable, since it
4195 will often be marked non-replaceable because of the duplication
4196 of the exit code outside the loop. In such a case, the copies
4197 we insert are dead and will be deleted. So they don't have
4198 a cost. Similar situations exist. */
4199 /* ??? The new final_[bg]iv_value code does a much better job
4200 of finding replaceable giv's, and hence this code may no longer
4201 be necessary. */
4202 if (! v->replaceable && ! bl->eliminable
4203 && REG_USERVAR_P (v->dest_reg))
4204 benefit -= copy_cost;
4205
4206 /* Decrease the benefit to count the add-insns that we will
4207 insert to increment the reduced reg for the giv. */
4208 benefit -= add_cost * bl->biv_count;
4209
4210 /* Decide whether to strength-reduce this giv or to leave the code
4211 unchanged (recompute it from the biv each time it is used).
4212 This decision can be made independently for each giv. */
4213
ab162578
JL
4214#ifdef AUTO_INC_DEC
4215 /* Attempt to guess whether autoincrement will handle some of the
4216 new add insns; if so, increase BENEFIT (undo the subtraction of
4217 add_cost that was done above). */
4218 if (v->giv_type == DEST_ADDR
4219 && GET_CODE (v->mult_val) == CONST_INT)
4220 {
4221#if defined (HAVE_POST_INCREMENT) || defined (HAVE_PRE_INCREMENT)
4222 if (INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode))
4223 benefit += add_cost * bl->biv_count;
4224#endif
4225#if defined (HAVE_POST_DECREMENT) || defined (HAVE_PRE_DECREMENT)
4226 if (-INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode))
4227 benefit += add_cost * bl->biv_count;
4228#endif
4229 }
4230#endif
b4ad7b23
RS
4231
4232 /* If an insn is not to be strength reduced, then set its ignore
4233 flag, and clear all_reduced. */
4234
e6f6eb29
JW
4235 /* A giv that depends on a reversed biv must be reduced if it is
4236 used after the loop exit, otherwise, it would have the wrong
4237 value after the loop exit. To make it simple, just reduce all
4238 of such giv's whether or not we know they are used after the loop
4239 exit. */
4240
e5eb27e5
JL
4241 if ( ! flag_reduce_all_givs && v->lifetime * threshold * benefit < insn_count
4242 && ! bl->reversed )
b4ad7b23
RS
4243 {
4244 if (loop_dump_stream)
4245 fprintf (loop_dump_stream,
4246 "giv of insn %d not worth while, %d vs %d.\n",
4247 INSN_UID (v->insn),
4248 v->lifetime * threshold * benefit, insn_count);
4249 v->ignore = 1;
4250 all_reduced = 0;
4251 }
4252 else
4253 {
4254 /* Check that we can increment the reduced giv without a
4255 multiply insn. If not, reject it. */
4256
4257 for (tv = bl->biv; tv; tv = tv->next_iv)
4258 if (tv->mult_val == const1_rtx
4259 && ! product_cheap_p (tv->add_val, v->mult_val))
4260 {
4261 if (loop_dump_stream)
4262 fprintf (loop_dump_stream,
4263 "giv of insn %d: would need a multiply.\n",
4264 INSN_UID (v->insn));
4265 v->ignore = 1;
4266 all_reduced = 0;
4267 break;
4268 }
4269 }
4270 }
4271
4272 /* Reduce each giv that we decided to reduce. */
4273
4274 for (v = bl->giv; v; v = v->next_iv)
4275 {
4276 struct induction *tv;
4277 if (! v->ignore && v->same == 0)
4278 {
8516af93
JW
4279 int auto_inc_opt = 0;
4280
b4ad7b23
RS
4281 v->new_reg = gen_reg_rtx (v->mode);
4282
8516af93
JW
4283#ifdef AUTO_INC_DEC
4284 /* If the target has auto-increment addressing modes, and
4285 this is an address giv, then try to put the increment
4286 immediately after its use, so that flow can create an
4287 auto-increment addressing mode. */
4288 if (v->giv_type == DEST_ADDR && bl->biv_count == 1
085daa5a
JW
4289 && bl->biv->always_executed && ! bl->biv->maybe_multiple
4290 /* We don't handle reversed biv's because bl->biv->insn
4291 does not have a valid INSN_LUID. */
4292 && ! bl->reversed
f5963e61
JL
4293 && v->always_executed && ! v->maybe_multiple
4294 && INSN_UID (v->insn) < max_uid_for_loop)
8516af93
JW
4295 {
4296 /* If other giv's have been combined with this one, then
4297 this will work only if all uses of the other giv's occur
4298 before this giv's insn. This is difficult to check.
4299
4300 We simplify this by looking for the common case where
4301 there is one DEST_REG giv, and this giv's insn is the
4302 last use of the dest_reg of that DEST_REG giv. If the
38e01259 4303 increment occurs after the address giv, then we can
8516af93
JW
4304 perform the optimization. (Otherwise, the increment
4305 would have to go before other_giv, and we would not be
4306 able to combine it with the address giv to get an
4307 auto-inc address.) */
4308 if (v->combined_with)
4309 {
4310 struct induction *other_giv = 0;
4311
4312 for (tv = bl->giv; tv; tv = tv->next_iv)
4313 if (tv->same == v)
4314 {
4315 if (other_giv)
4316 break;
4317 else
4318 other_giv = tv;
4319 }
4320 if (! tv && other_giv
43243872 4321 && REGNO (other_giv->dest_reg) < max_reg_before_loop
b1f21e0a 4322 && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
8516af93
JW
4323 == INSN_UID (v->insn))
4324 && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
4325 auto_inc_opt = 1;
4326 }
38e01259 4327 /* Check for case where increment is before the address
72b0c616
RK
4328 giv. Do this test in "loop order". */
4329 else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn)
4330 && (INSN_LUID (v->insn) < INSN_LUID (scan_start)
4331 || (INSN_LUID (bl->biv->insn)
4332 > INSN_LUID (scan_start))))
4333 || (INSN_LUID (v->insn) < INSN_LUID (scan_start)
4334 && (INSN_LUID (scan_start)
4335 < INSN_LUID (bl->biv->insn))))
8516af93
JW
4336 auto_inc_opt = -1;
4337 else
4338 auto_inc_opt = 1;
4339
bb91b814 4340#ifdef HAVE_cc0
a7a4457e
DE
4341 {
4342 rtx prev;
4343
4344 /* We can't put an insn immediately after one setting
4345 cc0, or immediately before one using cc0. */
4346 if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
4347 || (auto_inc_opt == -1
4348 && (prev = prev_nonnote_insn (v->insn)) != 0
4349 && GET_RTX_CLASS (GET_CODE (prev)) == 'i'
4350 && sets_cc0_p (PATTERN (prev))))
4351 auto_inc_opt = 0;
4352 }
bb91b814
JW
4353#endif
4354
8516af93
JW
4355 if (auto_inc_opt)
4356 v->auto_inc_opt = 1;
4357 }
4358#endif
4359
4360 /* For each place where the biv is incremented, add an insn
4361 to increment the new, reduced reg for the giv. */
b4ad7b23
RS
4362 for (tv = bl->biv; tv; tv = tv->next_iv)
4363 {
8516af93
JW
4364 rtx insert_before;
4365
4366 if (! auto_inc_opt)
4367 insert_before = tv->insn;
4368 else if (auto_inc_opt == 1)
4369 insert_before = NEXT_INSN (v->insn);
4370 else
4371 insert_before = v->insn;
4372
b4ad7b23
RS
4373 if (tv->mult_val == const1_rtx)
4374 emit_iv_add_mult (tv->add_val, v->mult_val,
8516af93 4375 v->new_reg, v->new_reg, insert_before);
b4ad7b23
RS
4376 else /* tv->mult_val == const0_rtx */
4377 /* A multiply is acceptable here
4378 since this is presumed to be seldom executed. */
4379 emit_iv_add_mult (tv->add_val, v->mult_val,
8516af93 4380 v->add_val, v->new_reg, insert_before);
b4ad7b23
RS
4381 }
4382
4383 /* Add code at loop start to initialize giv's reduced reg. */
4384
4385 emit_iv_add_mult (bl->initial_value, v->mult_val,
4386 v->add_val, v->new_reg, loop_start);
4387 }
4388 }
4389
4390 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
4391 as not reduced.
4392
4393 For each giv register that can be reduced now: if replaceable,
4394 substitute reduced reg wherever the old giv occurs;
4395 else add new move insn "giv_reg = reduced_reg".
4396
4397 Also check for givs whose first use is their definition and whose
4398 last use is the definition of another giv. If so, it is likely
4399 dead and should not be used to eliminate a biv. */
4400 for (v = bl->giv; v; v = v->next_iv)
4401 {
4402 if (v->same && v->same->ignore)
4403 v->ignore = 1;
4404
4405 if (v->ignore)
4406 continue;
4407
4408 if (v->giv_type == DEST_REG
b1f21e0a 4409 && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
b4ad7b23
RS
4410 {
4411 struct induction *v1;
4412
4413 for (v1 = bl->giv; v1; v1 = v1->next_iv)
b1f21e0a 4414 if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
b4ad7b23
RS
4415 v->maybe_dead = 1;
4416 }
4417
4418 /* Update expression if this was combined, in case other giv was
4419 replaced. */
4420 if (v->same)
4421 v->new_reg = replace_rtx (v->new_reg,
4422 v->same->dest_reg, v->same->new_reg);
4423
4424 if (v->giv_type == DEST_ADDR)
4425 /* Store reduced reg as the address in the memref where we found
4426 this giv. */
9abdca9c 4427 validate_change (v->insn, v->location, v->new_reg, 0);
b4ad7b23
RS
4428 else if (v->replaceable)
4429 {
4430 reg_map[REGNO (v->dest_reg)] = v->new_reg;
4431
4432#if 0
4433 /* I can no longer duplicate the original problem. Perhaps
4434 this is unnecessary now? */
4435
4436 /* Replaceable; it isn't strictly necessary to delete the old
4437 insn and emit a new one, because v->dest_reg is now dead.
4438
4439 However, especially when unrolling loops, the special
4440 handling for (set REG0 REG1) in the second cse pass may
4441 make v->dest_reg live again. To avoid this problem, emit
4442 an insn to set the original giv reg from the reduced giv.
4443 We can not delete the original insn, since it may be part
4444 of a LIBCALL, and the code in flow that eliminates dead
4445 libcalls will fail if it is deleted. */
4446 emit_insn_after (gen_move_insn (v->dest_reg, v->new_reg),
4447 v->insn);
4448#endif
4449 }
4450 else
4451 {
4452 /* Not replaceable; emit an insn to set the original giv reg from
4453 the reduced giv, same as above. */
4454 emit_insn_after (gen_move_insn (v->dest_reg, v->new_reg),
4455 v->insn);
4456 }
4457
4458 /* When a loop is reversed, givs which depend on the reversed
4459 biv, and which are live outside the loop, must be set to their
4460 correct final value. This insn is only needed if the giv is
4461 not replaceable. The correct final value is the same as the
4462 value that the giv starts the reversed loop with. */
4463 if (bl->reversed && ! v->replaceable)
4464 emit_iv_add_mult (bl->initial_value, v->mult_val,
4465 v->add_val, v->dest_reg, end_insert_before);
4466 else if (v->final_value)
4467 {
4468 rtx insert_before;
4469
4470 /* If the loop has multiple exits, emit the insn before the
4471 loop to ensure that it will always be executed no matter
4472 how the loop exits. Otherwise, emit the insn after the loop,
4473 since this is slightly more efficient. */
353127c2 4474 if (loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
b4ad7b23
RS
4475 insert_before = loop_start;
4476 else
4477 insert_before = end_insert_before;
4478 emit_insn_before (gen_move_insn (v->dest_reg, v->final_value),
4479 insert_before);
4480
4481#if 0
4482 /* If the insn to set the final value of the giv was emitted
4483 before the loop, then we must delete the insn inside the loop
4484 that sets it. If this is a LIBCALL, then we must delete
4485 every insn in the libcall. Note, however, that
4486 final_giv_value will only succeed when there are multiple
4487 exits if the giv is dead at each exit, hence it does not
4488 matter that the original insn remains because it is dead
4489 anyways. */
4490 /* Delete the insn inside the loop that sets the giv since
4491 the giv is now set before (or after) the loop. */
4492 delete_insn (v->insn);
4493#endif
4494 }
4495
4496 if (loop_dump_stream)
4497 {
4498 fprintf (loop_dump_stream, "giv at %d reduced to ",
4499 INSN_UID (v->insn));
4500 print_rtl (loop_dump_stream, v->new_reg);
4501 fprintf (loop_dump_stream, "\n");
4502 }
4503 }
4504
4505 /* All the givs based on the biv bl have been reduced if they
4506 merit it. */
4507
4508 /* For each giv not marked as maybe dead that has been combined with a
4509 second giv, clear any "maybe dead" mark on that second giv.
4510 v->new_reg will either be or refer to the register of the giv it
4511 combined with.
4512
4513 Doing this clearing avoids problems in biv elimination where a
4514 giv's new_reg is a complex value that can't be put in the insn but
4515 the giv combined with (with a reg as new_reg) is marked maybe_dead.
4516 Since the register will be used in either case, we'd prefer it be
4517 used from the simpler giv. */
4518
4519 for (v = bl->giv; v; v = v->next_iv)
4520 if (! v->maybe_dead && v->same)
4521 v->same->maybe_dead = 0;
4522
4523 /* Try to eliminate the biv, if it is a candidate.
4524 This won't work if ! all_reduced,
4525 since the givs we planned to use might not have been reduced.
4526
d45cf215 4527 We have to be careful that we didn't initially think we could eliminate
b4ad7b23
RS
4528 this biv because of a giv that we now think may be dead and shouldn't
4529 be used as a biv replacement.
4530
4531 Also, there is the possibility that we may have a giv that looks
4532 like it can be used to eliminate a biv, but the resulting insn
4533 isn't valid. This can happen, for example, on the 88k, where a
4534 JUMP_INSN can compare a register only with zero. Attempts to
c5b7917e 4535 replace it with a compare with a constant will fail.
b4ad7b23
RS
4536
4537 Note that in cases where this call fails, we may have replaced some
4538 of the occurrences of the biv with a giv, but no harm was done in
4539 doing so in the rare cases where it can occur. */
4540
4541 if (all_reduced == 1 && bl->eliminable
4542 && maybe_eliminate_biv (bl, loop_start, end, 1,
4543 threshold, insn_count))
4544
4545 {
4546 /* ?? If we created a new test to bypass the loop entirely,
4547 or otherwise drop straight in, based on this test, then
4548 we might want to rewrite it also. This way some later
4549 pass has more hope of removing the initialization of this
0f41302f 4550 biv entirely. */
b4ad7b23
RS
4551
4552 /* If final_value != 0, then the biv may be used after loop end
4553 and we must emit an insn to set it just in case.
4554
4555 Reversed bivs already have an insn after the loop setting their
4556 value, so we don't need another one. We can't calculate the
0f41302f 4557 proper final value for such a biv here anyways. */
b4ad7b23
RS
4558 if (final_value != 0 && ! bl->reversed)
4559 {
4560 rtx insert_before;
4561
4562 /* If the loop has multiple exits, emit the insn before the
4563 loop to ensure that it will always be executed no matter
4564 how the loop exits. Otherwise, emit the insn after the
4565 loop, since this is slightly more efficient. */
353127c2 4566 if (loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
b4ad7b23
RS
4567 insert_before = loop_start;
4568 else
4569 insert_before = end_insert_before;
4570
4571 emit_insn_before (gen_move_insn (bl->biv->dest_reg, final_value),
4572 end_insert_before);
4573 }
4574
4575#if 0
4576 /* Delete all of the instructions inside the loop which set
4577 the biv, as they are all dead. If is safe to delete them,
4578 because an insn setting a biv will never be part of a libcall. */
4579 /* However, deleting them will invalidate the regno_last_uid info,
4580 so keeping them around is more convenient. Final_biv_value
4581 will only succeed when there are multiple exits if the biv
4582 is dead at each exit, hence it does not matter that the original
4583 insn remains, because it is dead anyways. */
4584 for (v = bl->biv; v; v = v->next_iv)
4585 delete_insn (v->insn);
4586#endif
4587
4588 if (loop_dump_stream)
4589 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
4590 bl->regno);
4591 }
4592 }
4593
4594 /* Go through all the instructions in the loop, making all the
4595 register substitutions scheduled in REG_MAP. */
4596
4597 for (p = loop_start; p != end; p = NEXT_INSN (p))
4598 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
4599 || GET_CODE (p) == CALL_INSN)
4600 {
4601 replace_regs (PATTERN (p), reg_map, max_reg_before_loop, 0);
4602 replace_regs (REG_NOTES (p), reg_map, max_reg_before_loop, 0);
da0c128e 4603 INSN_CODE (p) = -1;
b4ad7b23
RS
4604 }
4605
4606 /* Unroll loops from within strength reduction so that we can use the
4607 induction variable information that strength_reduce has already
4608 collected. */
4609
81797aba 4610 if (unroll_p)
b4ad7b23
RS
4611 unroll_loop (loop_end, insn_count, loop_start, end_insert_before, 1);
4612
8c660648
JL
4613#ifdef HAIFA
4614 /* instrument the loop with bct insn */
4615#ifdef HAVE_decrement_and_branch_on_count
5accd822 4616 if (HAVE_decrement_and_branch_on_count && bct_p)
8c660648
JL
4617 insert_bct (loop_start, loop_end);
4618#endif
4619#endif /* HAIFA */
4620
b4ad7b23
RS
4621 if (loop_dump_stream)
4622 fprintf (loop_dump_stream, "\n");
4623}
4624\f
4625/* Return 1 if X is a valid source for an initial value (or as value being
4626 compared against in an initial test).
4627
4628 X must be either a register or constant and must not be clobbered between
4629 the current insn and the start of the loop.
4630
4631 INSN is the insn containing X. */
4632
4633static int
4634valid_initial_value_p (x, insn, call_seen, loop_start)
4635 rtx x;
4636 rtx insn;
4637 int call_seen;
4638 rtx loop_start;
4639{
4640 if (CONSTANT_P (x))
4641 return 1;
4642
d45cf215 4643 /* Only consider pseudos we know about initialized in insns whose luids
b4ad7b23
RS
4644 we know. */
4645 if (GET_CODE (x) != REG
4646 || REGNO (x) >= max_reg_before_loop)
4647 return 0;
4648
4649 /* Don't use call-clobbered registers across a call which clobbers it. On
4650 some machines, don't use any hard registers at all. */
4651 if (REGNO (x) < FIRST_PSEUDO_REGISTER
e9a25f70
JL
4652 && (SMALL_REGISTER_CLASSES
4653 || (call_used_regs[REGNO (x)] && call_seen)))
b4ad7b23
RS
4654 return 0;
4655
4656 /* Don't use registers that have been clobbered before the start of the
4657 loop. */
4658 if (reg_set_between_p (x, insn, loop_start))
4659 return 0;
4660
4661 return 1;
4662}
4663\f
4664/* Scan X for memory refs and check each memory address
4665 as a possible giv. INSN is the insn whose pattern X comes from.
4666 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
4667 every loop iteration. */
4668
4669static void
4670find_mem_givs (x, insn, not_every_iteration, loop_start, loop_end)
4671 rtx x;
4672 rtx insn;
4673 int not_every_iteration;
4674 rtx loop_start, loop_end;
4675{
4676 register int i, j;
4677 register enum rtx_code code;
4678 register char *fmt;
4679
4680 if (x == 0)
4681 return;
4682
4683 code = GET_CODE (x);
4684 switch (code)
4685 {
4686 case REG:
4687 case CONST_INT:
4688 case CONST:
4689 case CONST_DOUBLE:
4690 case SYMBOL_REF:
4691 case LABEL_REF:
4692 case PC:
4693 case CC0:
4694 case ADDR_VEC:
4695 case ADDR_DIFF_VEC:
4696 case USE:
4697 case CLOBBER:
4698 return;
4699
4700 case MEM:
4701 {
4702 rtx src_reg;
4703 rtx add_val;
4704 rtx mult_val;
4705 int benefit;
4706
45f97e2e
RH
4707 /* This code used to disable creating GIVs with mult_val == 1 and
4708 add_val == 0. However, this leads to lost optimizations when
4709 it comes time to combine a set of related DEST_ADDR GIVs, since
4710 this one would not be seen. */
b4ad7b23 4711
45f97e2e
RH
4712 if (general_induction_var (XEXP (x, 0), &src_reg, &add_val,
4713 &mult_val, 1, &benefit))
b4ad7b23
RS
4714 {
4715 /* Found one; record it. */
4716 struct induction *v
4717 = (struct induction *) oballoc (sizeof (struct induction));
4718
4719 record_giv (v, insn, src_reg, addr_placeholder, mult_val,
4720 add_val, benefit, DEST_ADDR, not_every_iteration,
4721 &XEXP (x, 0), loop_start, loop_end);
4722
4723 v->mem_mode = GET_MODE (x);
4724 }
b4ad7b23 4725 }
e9a25f70
JL
4726 return;
4727
4728 default:
4729 break;
b4ad7b23
RS
4730 }
4731
4732 /* Recursively scan the subexpressions for other mem refs. */
4733
4734 fmt = GET_RTX_FORMAT (code);
4735 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4736 if (fmt[i] == 'e')
4737 find_mem_givs (XEXP (x, i), insn, not_every_iteration, loop_start,
4738 loop_end);
4739 else if (fmt[i] == 'E')
4740 for (j = 0; j < XVECLEN (x, i); j++)
4741 find_mem_givs (XVECEXP (x, i, j), insn, not_every_iteration,
4742 loop_start, loop_end);
4743}
4744\f
4745/* Fill in the data about one biv update.
4746 V is the `struct induction' in which we record the biv. (It is
4747 allocated by the caller, with alloca.)
4748 INSN is the insn that sets it.
4749 DEST_REG is the biv's reg.
4750
4751 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
4752 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
7dcd3836
RK
4753 being set to INC_VAL.
4754
4755 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
4756 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
4757 can be executed more than once per iteration. If MAYBE_MULTIPLE
4758 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
4759 executed exactly once per iteration. */
b4ad7b23
RS
4760
4761static void
7dcd3836
RK
4762record_biv (v, insn, dest_reg, inc_val, mult_val,
4763 not_every_iteration, maybe_multiple)
b4ad7b23
RS
4764 struct induction *v;
4765 rtx insn;
4766 rtx dest_reg;
4767 rtx inc_val;
4768 rtx mult_val;
4769 int not_every_iteration;
7dcd3836 4770 int maybe_multiple;
b4ad7b23
RS
4771{
4772 struct iv_class *bl;
4773
4774 v->insn = insn;
4775 v->src_reg = dest_reg;
4776 v->dest_reg = dest_reg;
4777 v->mult_val = mult_val;
4778 v->add_val = inc_val;
4779 v->mode = GET_MODE (dest_reg);
4780 v->always_computable = ! not_every_iteration;
8516af93 4781 v->always_executed = ! not_every_iteration;
7dcd3836 4782 v->maybe_multiple = maybe_multiple;
b4ad7b23
RS
4783
4784 /* Add this to the reg's iv_class, creating a class
4785 if this is the first incrementation of the reg. */
4786
4787 bl = reg_biv_class[REGNO (dest_reg)];
4788 if (bl == 0)
4789 {
4790 /* Create and initialize new iv_class. */
4791
4792 bl = (struct iv_class *) oballoc (sizeof (struct iv_class));
4793
4794 bl->regno = REGNO (dest_reg);
4795 bl->biv = 0;
4796 bl->giv = 0;
4797 bl->biv_count = 0;
4798 bl->giv_count = 0;
4799
4800 /* Set initial value to the reg itself. */
4801 bl->initial_value = dest_reg;
c5b7917e 4802 /* We haven't seen the initializing insn yet */
b4ad7b23
RS
4803 bl->init_insn = 0;
4804 bl->init_set = 0;
4805 bl->initial_test = 0;
4806 bl->incremented = 0;
4807 bl->eliminable = 0;
4808 bl->nonneg = 0;
4809 bl->reversed = 0;
b5d27be7 4810 bl->total_benefit = 0;
b4ad7b23
RS
4811
4812 /* Add this class to loop_iv_list. */
4813 bl->next = loop_iv_list;
4814 loop_iv_list = bl;
4815
4816 /* Put it in the array of biv register classes. */
4817 reg_biv_class[REGNO (dest_reg)] = bl;
4818 }
4819
4820 /* Update IV_CLASS entry for this biv. */
4821 v->next_iv = bl->biv;
4822 bl->biv = v;
4823 bl->biv_count++;
4824 if (mult_val == const1_rtx)
4825 bl->incremented = 1;
4826
4827 if (loop_dump_stream)
4828 {
4829 fprintf (loop_dump_stream,
4830 "Insn %d: possible biv, reg %d,",
4831 INSN_UID (insn), REGNO (dest_reg));
4832 if (GET_CODE (inc_val) == CONST_INT)
9ba7a303
JC
4833 {
4834 fprintf (loop_dump_stream, " const =");
4835 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (inc_val));
4836 fputc ('\n', loop_dump_stream);
4837 }
b4ad7b23
RS
4838 else
4839 {
4840 fprintf (loop_dump_stream, " const = ");
4841 print_rtl (loop_dump_stream, inc_val);
4842 fprintf (loop_dump_stream, "\n");
4843 }
4844 }
4845}
4846\f
4847/* Fill in the data about one giv.
4848 V is the `struct induction' in which we record the giv. (It is
4849 allocated by the caller, with alloca.)
4850 INSN is the insn that sets it.
4851 BENEFIT estimates the savings from deleting this insn.
4852 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
4853 into a register or is used as a memory address.
4854
4855 SRC_REG is the biv reg which the giv is computed from.
4856 DEST_REG is the giv's reg (if the giv is stored in a reg).
4857 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
4858 LOCATION points to the place where this giv's value appears in INSN. */
4859
4860static void
4861record_giv (v, insn, src_reg, dest_reg, mult_val, add_val, benefit,
4862 type, not_every_iteration, location, loop_start, loop_end)
4863 struct induction *v;
4864 rtx insn;
4865 rtx src_reg;
4866 rtx dest_reg;
4867 rtx mult_val, add_val;
4868 int benefit;
4869 enum g_types type;
4870 int not_every_iteration;
4871 rtx *location;
4872 rtx loop_start, loop_end;
4873{
4874 struct induction *b;
4875 struct iv_class *bl;
4876 rtx set = single_set (insn);
b4ad7b23
RS
4877
4878 v->insn = insn;
4879 v->src_reg = src_reg;
4880 v->giv_type = type;
4881 v->dest_reg = dest_reg;
4882 v->mult_val = mult_val;
4883 v->add_val = add_val;
4884 v->benefit = benefit;
4885 v->location = location;
4886 v->cant_derive = 0;
4887 v->combined_with = 0;
7dcd3836 4888 v->maybe_multiple = 0;
b4ad7b23
RS
4889 v->maybe_dead = 0;
4890 v->derive_adjustment = 0;
4891 v->same = 0;
4892 v->ignore = 0;
4893 v->new_reg = 0;
4894 v->final_value = 0;
f415f7be 4895 v->same_insn = 0;
8516af93 4896 v->auto_inc_opt = 0;
9ae8ffe7
JL
4897 v->unrolled = 0;
4898 v->shared = 0;
b4ad7b23
RS
4899
4900 /* The v->always_computable field is used in update_giv_derive, to
4901 determine whether a giv can be used to derive another giv. For a
4902 DEST_REG giv, INSN computes a new value for the giv, so its value
4903 isn't computable if INSN insn't executed every iteration.
4904 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
4905 it does not compute a new value. Hence the value is always computable
d45cf215 4906 regardless of whether INSN is executed each iteration. */
b4ad7b23
RS
4907
4908 if (type == DEST_ADDR)
4909 v->always_computable = 1;
4910 else
4911 v->always_computable = ! not_every_iteration;
4912
8516af93
JW
4913 v->always_executed = ! not_every_iteration;
4914
b4ad7b23
RS
4915 if (type == DEST_ADDR)
4916 {
4917 v->mode = GET_MODE (*location);
4918 v->lifetime = 1;
4919 v->times_used = 1;
4920 }
4921 else /* type == DEST_REG */
4922 {
4923 v->mode = GET_MODE (SET_DEST (set));
4924
b1f21e0a
MM
4925 v->lifetime = (uid_luid[REGNO_LAST_UID (REGNO (dest_reg))]
4926 - uid_luid[REGNO_FIRST_UID (REGNO (dest_reg))]);
b4ad7b23 4927
8deb8e2c 4928 v->times_used = VARRAY_INT (n_times_used, REGNO (dest_reg));
b4ad7b23
RS
4929
4930 /* If the lifetime is zero, it means that this register is
4931 really a dead store. So mark this as a giv that can be
0f41302f 4932 ignored. This will not prevent the biv from being eliminated. */
b4ad7b23
RS
4933 if (v->lifetime == 0)
4934 v->ignore = 1;
4935
4936 reg_iv_type[REGNO (dest_reg)] = GENERAL_INDUCT;
4937 reg_iv_info[REGNO (dest_reg)] = v;
4938 }
4939
4940 /* Add the giv to the class of givs computed from one biv. */
4941
4942 bl = reg_biv_class[REGNO (src_reg)];
4943 if (bl)
4944 {
4945 v->next_iv = bl->giv;
4946 bl->giv = v;
4947 /* Don't count DEST_ADDR. This is supposed to count the number of
4948 insns that calculate givs. */
4949 if (type == DEST_REG)
4950 bl->giv_count++;
4951 bl->total_benefit += benefit;
4952 }
4953 else
4954 /* Fatal error, biv missing for this giv? */
4955 abort ();
4956
4957 if (type == DEST_ADDR)
4958 v->replaceable = 1;
4959 else
4960 {
4961 /* The giv can be replaced outright by the reduced register only if all
4962 of the following conditions are true:
4963 - the insn that sets the giv is always executed on any iteration
4964 on which the giv is used at all
4965 (there are two ways to deduce this:
4966 either the insn is executed on every iteration,
4967 or all uses follow that insn in the same basic block),
4968 - the giv is not used outside the loop
4969 - no assignments to the biv occur during the giv's lifetime. */
4970
b1f21e0a 4971 if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
b4ad7b23 4972 /* Previous line always fails if INSN was moved by loop opt. */
b1f21e0a 4973 && uid_luid[REGNO_LAST_UID (REGNO (dest_reg))] < INSN_LUID (loop_end)
b4ad7b23
RS
4974 && (! not_every_iteration
4975 || last_use_this_basic_block (dest_reg, insn)))
4976 {
4977 /* Now check that there are no assignments to the biv within the
4978 giv's lifetime. This requires two separate checks. */
4979
4980 /* Check each biv update, and fail if any are between the first
4981 and last use of the giv.
4982
4983 If this loop contains an inner loop that was unrolled, then
4984 the insn modifying the biv may have been emitted by the loop
4985 unrolling code, and hence does not have a valid luid. Just
4986 mark the biv as not replaceable in this case. It is not very
4987 useful as a biv, because it is used in two different loops.
4988 It is very unlikely that we would be able to optimize the giv
4989 using this biv anyways. */
4990
4991 v->replaceable = 1;
4992 for (b = bl->biv; b; b = b->next_iv)
4993 {
4994 if (INSN_UID (b->insn) >= max_uid_for_loop
4995 || ((uid_luid[INSN_UID (b->insn)]
b1f21e0a 4996 >= uid_luid[REGNO_FIRST_UID (REGNO (dest_reg))])
b4ad7b23 4997 && (uid_luid[INSN_UID (b->insn)]
b1f21e0a 4998 <= uid_luid[REGNO_LAST_UID (REGNO (dest_reg))])))
b4ad7b23
RS
4999 {
5000 v->replaceable = 0;
5001 v->not_replaceable = 1;
5002 break;
5003 }
5004 }
5005
5031afa7
JW
5006 /* If there are any backwards branches that go from after the
5007 biv update to before it, then this giv is not replaceable. */
b4ad7b23 5008 if (v->replaceable)
5031afa7
JW
5009 for (b = bl->biv; b; b = b->next_iv)
5010 if (back_branch_in_range_p (b->insn, loop_start, loop_end))
5011 {
5012 v->replaceable = 0;
5013 v->not_replaceable = 1;
5014 break;
5015 }
b4ad7b23
RS
5016 }
5017 else
5018 {
5019 /* May still be replaceable, we don't have enough info here to
5020 decide. */
5021 v->replaceable = 0;
5022 v->not_replaceable = 0;
5023 }
5024 }
5025
45f97e2e
RH
5026 /* Record whether the add_val contains a const_int, for later use by
5027 combine_givs. */
5028 {
5029 rtx tem = add_val;
5030
5031 v->no_const_addval = 1;
5032 if (tem == const0_rtx)
5033 ;
5034 else if (GET_CODE (tem) == CONST_INT)
5035 v->no_const_addval = 0;
5036 else if (GET_CODE (tem) == PLUS)
5037 {
5038 while (1)
5039 {
5040 if (GET_CODE (XEXP (tem, 0)) == PLUS)
5041 tem = XEXP (tem, 0);
5042 else if (GET_CODE (XEXP (tem, 1)) == PLUS)
5043 tem = XEXP (tem, 1);
5044 else
5045 break;
5046 }
5047 if (GET_CODE (XEXP (tem, 1)) == CONST_INT)
5048 v->no_const_addval = 0;
5049 }
5050 }
5051
b4ad7b23
RS
5052 if (loop_dump_stream)
5053 {
5054 if (type == DEST_REG)
5055 fprintf (loop_dump_stream, "Insn %d: giv reg %d",
5056 INSN_UID (insn), REGNO (dest_reg));
5057 else
5058 fprintf (loop_dump_stream, "Insn %d: dest address",
5059 INSN_UID (insn));
5060
5061 fprintf (loop_dump_stream, " src reg %d benefit %d",
5062 REGNO (src_reg), v->benefit);
5063 fprintf (loop_dump_stream, " used %d lifetime %d",
5064 v->times_used, v->lifetime);
5065
5066 if (v->replaceable)
5067 fprintf (loop_dump_stream, " replaceable");
5068
45f97e2e
RH
5069 if (v->no_const_addval)
5070 fprintf (loop_dump_stream, " ncav");
5071
b4ad7b23 5072 if (GET_CODE (mult_val) == CONST_INT)
9ba7a303
JC
5073 {
5074 fprintf (loop_dump_stream, " mult ");
5075 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (mult_val));
5076 }
b4ad7b23
RS
5077 else
5078 {
5079 fprintf (loop_dump_stream, " mult ");
5080 print_rtl (loop_dump_stream, mult_val);
5081 }
5082
5083 if (GET_CODE (add_val) == CONST_INT)
9ba7a303
JC
5084 {
5085 fprintf (loop_dump_stream, " add ");
5086 fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (add_val));
5087 }
b4ad7b23
RS
5088 else
5089 {
5090 fprintf (loop_dump_stream, " add ");
5091 print_rtl (loop_dump_stream, add_val);
5092 }
5093 }
5094
5095 if (loop_dump_stream)
5096 fprintf (loop_dump_stream, "\n");
5097
5098}
5099
5100
5101/* All this does is determine whether a giv can be made replaceable because
5102 its final value can be calculated. This code can not be part of record_giv
5103 above, because final_giv_value requires that the number of loop iterations
5104 be known, and that can not be accurately calculated until after all givs
5105 have been identified. */
5106
5107static void
5108check_final_value (v, loop_start, loop_end)
5109 struct induction *v;
5110 rtx loop_start, loop_end;
5111{
5112 struct iv_class *bl;
5113 rtx final_value = 0;
b4ad7b23
RS
5114
5115 bl = reg_biv_class[REGNO (v->src_reg)];
5116
5117 /* DEST_ADDR givs will never reach here, because they are always marked
5118 replaceable above in record_giv. */
5119
5120 /* The giv can be replaced outright by the reduced register only if all
5121 of the following conditions are true:
5122 - the insn that sets the giv is always executed on any iteration
5123 on which the giv is used at all
5124 (there are two ways to deduce this:
5125 either the insn is executed on every iteration,
5126 or all uses follow that insn in the same basic block),
5127 - its final value can be calculated (this condition is different
5128 than the one above in record_giv)
5129 - no assignments to the biv occur during the giv's lifetime. */
5130
5131#if 0
5132 /* This is only called now when replaceable is known to be false. */
5133 /* Clear replaceable, so that it won't confuse final_giv_value. */
5134 v->replaceable = 0;
5135#endif
5136
5137 if ((final_value = final_giv_value (v, loop_start, loop_end))
5138 && (v->always_computable || last_use_this_basic_block (v->dest_reg, v->insn)))
5139 {
5140 int biv_increment_seen = 0;
5141 rtx p = v->insn;
5142 rtx last_giv_use;
5143
5144 v->replaceable = 1;
5145
5146 /* When trying to determine whether or not a biv increment occurs
5147 during the lifetime of the giv, we can ignore uses of the variable
5148 outside the loop because final_value is true. Hence we can not
5149 use regno_last_uid and regno_first_uid as above in record_giv. */
5150
5151 /* Search the loop to determine whether any assignments to the
5152 biv occur during the giv's lifetime. Start with the insn
5153 that sets the giv, and search around the loop until we come
5154 back to that insn again.
5155
5156 Also fail if there is a jump within the giv's lifetime that jumps
5157 to somewhere outside the lifetime but still within the loop. This
5158 catches spaghetti code where the execution order is not linear, and
5159 hence the above test fails. Here we assume that the giv lifetime
5160 does not extend from one iteration of the loop to the next, so as
5161 to make the test easier. Since the lifetime isn't known yet,
5162 this requires two loops. See also record_giv above. */
5163
5164 last_giv_use = v->insn;
5165
5166 while (1)
5167 {
5168 p = NEXT_INSN (p);
5169 if (p == loop_end)
5170 p = NEXT_INSN (loop_start);
5171 if (p == v->insn)
5172 break;
5173
5174 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5175 || GET_CODE (p) == CALL_INSN)
5176 {
5177 if (biv_increment_seen)
5178 {
5179 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
5180 {
5181 v->replaceable = 0;
5182 v->not_replaceable = 1;
5183 break;
5184 }
5185 }
c5da853f 5186 else if (reg_set_p (v->src_reg, PATTERN (p)))
b4ad7b23
RS
5187 biv_increment_seen = 1;
5188 else if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
5189 last_giv_use = p;
5190 }
5191 }
5192
5193 /* Now that the lifetime of the giv is known, check for branches
5194 from within the lifetime to outside the lifetime if it is still
5195 replaceable. */
5196
5197 if (v->replaceable)
5198 {
5199 p = v->insn;
5200 while (1)
5201 {
5202 p = NEXT_INSN (p);
5203 if (p == loop_end)
5204 p = NEXT_INSN (loop_start);
5205 if (p == last_giv_use)
5206 break;
5207
5208 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
5209 && LABEL_NAME (JUMP_LABEL (p))
6217f613
RK
5210 && ((INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop)
5211 || (INSN_UID (v->insn) >= max_uid_for_loop)
5212 || (INSN_UID (last_giv_use) >= max_uid_for_loop)
5213 || (INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (v->insn)
5214 && INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop_start))
b4ad7b23
RS
5215 || (INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (last_giv_use)
5216 && INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop_end))))
5217 {
5218 v->replaceable = 0;
5219 v->not_replaceable = 1;
5220
5221 if (loop_dump_stream)
5222 fprintf (loop_dump_stream,
5223 "Found branch outside giv lifetime.\n");
5224
5225 break;
5226 }
5227 }
5228 }
5229
5230 /* If it is replaceable, then save the final value. */
5231 if (v->replaceable)
5232 v->final_value = final_value;
5233 }
5234
5235 if (loop_dump_stream && v->replaceable)
5236 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
5237 INSN_UID (v->insn), REGNO (v->dest_reg));
5238}
5239\f
5240/* Update the status of whether a giv can derive other givs.
5241
5242 We need to do something special if there is or may be an update to the biv
5243 between the time the giv is defined and the time it is used to derive
5244 another giv.
5245
5246 In addition, a giv that is only conditionally set is not allowed to
5247 derive another giv once a label has been passed.
5248
5249 The cases we look at are when a label or an update to a biv is passed. */
5250
5251static void
5252update_giv_derive (p)
5253 rtx p;
5254{
5255 struct iv_class *bl;
5256 struct induction *biv, *giv;
5257 rtx tem;
5258 int dummy;
5259
5260 /* Search all IV classes, then all bivs, and finally all givs.
5261
7dcd3836 5262 There are three cases we are concerned with. First we have the situation
b4ad7b23
RS
5263 of a giv that is only updated conditionally. In that case, it may not
5264 derive any givs after a label is passed.
5265
5266 The second case is when a biv update occurs, or may occur, after the
5267 definition of a giv. For certain biv updates (see below) that are
5268 known to occur between the giv definition and use, we can adjust the
5269 giv definition. For others, or when the biv update is conditional,
5270 we must prevent the giv from deriving any other givs. There are two
5271 sub-cases within this case.
5272
5273 If this is a label, we are concerned with any biv update that is done
5274 conditionally, since it may be done after the giv is defined followed by
5275 a branch here (actually, we need to pass both a jump and a label, but
5276 this extra tracking doesn't seem worth it).
5277
7dcd3836
RK
5278 If this is a jump, we are concerned about any biv update that may be
5279 executed multiple times. We are actually only concerned about
5280 backward jumps, but it is probably not worth performing the test
5281 on the jump again here.
5282
5283 If this is a biv update, we must adjust the giv status to show that a
b4ad7b23
RS
5284 subsequent biv update was performed. If this adjustment cannot be done,
5285 the giv cannot derive further givs. */
5286
5287 for (bl = loop_iv_list; bl; bl = bl->next)
5288 for (biv = bl->biv; biv; biv = biv->next_iv)
7dcd3836
RK
5289 if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
5290 || biv->insn == p)
b4ad7b23
RS
5291 {
5292 for (giv = bl->giv; giv; giv = giv->next_iv)
5293 {
5294 /* If cant_derive is already true, there is no point in
5295 checking all of these conditions again. */
5296 if (giv->cant_derive)
5297 continue;
5298
5299 /* If this giv is conditionally set and we have passed a label,
5300 it cannot derive anything. */
5301 if (GET_CODE (p) == CODE_LABEL && ! giv->always_computable)
5302 giv->cant_derive = 1;
5303
5304 /* Skip givs that have mult_val == 0, since
5305 they are really invariants. Also skip those that are
5306 replaceable, since we know their lifetime doesn't contain
5307 any biv update. */
5308 else if (giv->mult_val == const0_rtx || giv->replaceable)
5309 continue;
5310
5311 /* The only way we can allow this giv to derive another
5312 is if this is a biv increment and we can form the product
5313 of biv->add_val and giv->mult_val. In this case, we will
5314 be able to compute a compensation. */
5315 else if (biv->insn == p)
5316 {
c160c628
RK
5317 tem = 0;
5318
5319 if (biv->mult_val == const1_rtx)
38a448ca
RH
5320 tem = simplify_giv_expr (gen_rtx_MULT (giv->mode,
5321 biv->add_val,
5322 giv->mult_val),
c160c628
RK
5323 &dummy);
5324
5325 if (tem && giv->derive_adjustment)
38a448ca
RH
5326 tem = simplify_giv_expr (gen_rtx_PLUS (giv->mode, tem,
5327 giv->derive_adjustment),
c160c628
RK
5328 &dummy);
5329 if (tem)
b4ad7b23
RS
5330 giv->derive_adjustment = tem;
5331 else
5332 giv->cant_derive = 1;
5333 }
7dcd3836
RK
5334 else if ((GET_CODE (p) == CODE_LABEL && ! biv->always_computable)
5335 || (GET_CODE (p) == JUMP_INSN && biv->maybe_multiple))
b4ad7b23
RS
5336 giv->cant_derive = 1;
5337 }
5338 }
5339}
5340\f
5341/* Check whether an insn is an increment legitimate for a basic induction var.
7056f7e8
RS
5342 X is the source of insn P, or a part of it.
5343 MODE is the mode in which X should be interpreted.
5344
b4ad7b23
RS
5345 DEST_REG is the putative biv, also the destination of the insn.
5346 We accept patterns of these forms:
09d7f5a5 5347 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
b4ad7b23 5348 REG = INVARIANT + REG
b4ad7b23
RS
5349
5350 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
5351 and store the additive term into *INC_VAL.
5352
5353 If X is an assignment of an invariant into DEST_REG, we set
5354 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
5355
09d7f5a5
RK
5356 We also want to detect a BIV when it corresponds to a variable
5357 whose mode was promoted via PROMOTED_MODE. In that case, an increment
5358 of the variable may be a PLUS that adds a SUBREG of that variable to
5359 an invariant and then sign- or zero-extends the result of the PLUS
5360 into the variable.
5361
5362 Most GIVs in such cases will be in the promoted mode, since that is the
5363 probably the natural computation mode (and almost certainly the mode
5364 used for addresses) on the machine. So we view the pseudo-reg containing
5365 the variable as the BIV, as if it were simply incremented.
5366
5367 Note that treating the entire pseudo as a BIV will result in making
5368 simple increments to any GIVs based on it. However, if the variable
5369 overflows in its declared mode but not its promoted mode, the result will
5370 be incorrect. This is acceptable if the variable is signed, since
5371 overflows in such cases are undefined, but not if it is unsigned, since
5372 those overflows are defined. So we only check for SIGN_EXTEND and
5373 not ZERO_EXTEND.
5374
5375 If we cannot find a biv, we return 0. */
b4ad7b23
RS
5376
5377static int
7056f7e8 5378basic_induction_var (x, mode, dest_reg, p, inc_val, mult_val)
b4ad7b23 5379 register rtx x;
7056f7e8 5380 enum machine_mode mode;
09d7f5a5 5381 rtx p;
b4ad7b23
RS
5382 rtx dest_reg;
5383 rtx *inc_val;
5384 rtx *mult_val;
5385{
5386 register enum rtx_code code;
5387 rtx arg;
09d7f5a5 5388 rtx insn, set = 0;
b4ad7b23
RS
5389
5390 code = GET_CODE (x);
5391 switch (code)
5392 {
5393 case PLUS:
45f97e2e 5394 if (rtx_equal_p (XEXP (x, 0), dest_reg)
09d7f5a5
RK
5395 || (GET_CODE (XEXP (x, 0)) == SUBREG
5396 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
5397 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
b4ad7b23 5398 arg = XEXP (x, 1);
45f97e2e 5399 else if (rtx_equal_p (XEXP (x, 1), dest_reg)
09d7f5a5 5400 || (GET_CODE (XEXP (x, 1)) == SUBREG
b81fd0f4
RS
5401 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
5402 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
b4ad7b23
RS
5403 arg = XEXP (x, 0);
5404 else
5405 return 0;
5406
5407 if (invariant_p (arg) != 1)
5408 return 0;
5409
7056f7e8 5410 *inc_val = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
b4ad7b23
RS
5411 *mult_val = const1_rtx;
5412 return 1;
5413
09d7f5a5
RK
5414 case SUBREG:
5415 /* If this is a SUBREG for a promoted variable, check the inner
5416 value. */
5417 if (SUBREG_PROMOTED_VAR_P (x))
7056f7e8
RS
5418 return basic_induction_var (SUBREG_REG (x), GET_MODE (SUBREG_REG (x)),
5419 dest_reg, p, inc_val, mult_val);
fe159061 5420 return 0;
b4ad7b23 5421
09d7f5a5 5422 case REG:
45f97e2e 5423 /* If this register is assigned in a previous insn, look at its
09d7f5a5
RK
5424 source, but don't go outside the loop or past a label. */
5425
45f97e2e
RH
5426 insn = p;
5427 while (1)
5428 {
5429 do {
5430 insn = PREV_INSN (insn);
5431 } while (insn && GET_CODE (insn) == NOTE
5432 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
09d7f5a5 5433
45f97e2e
RH
5434 if (!insn)
5435 break;
5436 set = single_set (insn);
5437 if (set == 0)
5438 break;
09d7f5a5 5439
45f97e2e
RH
5440 if ((SET_DEST (set) == x
5441 || (GET_CODE (SET_DEST (set)) == SUBREG
5442 && (GET_MODE_SIZE (GET_MODE (SET_DEST (set)))
5443 <= UNITS_PER_WORD)
5444 && SUBREG_REG (SET_DEST (set)) == x))
5445 && basic_induction_var (SET_SRC (set),
5446 (GET_MODE (SET_SRC (set)) == VOIDmode
5447 ? GET_MODE (x)
5448 : GET_MODE (SET_SRC (set))),
5449 dest_reg, insn,
5450 inc_val, mult_val))
5451 return 1;
5452 }
0f41302f 5453 /* ... fall through ... */
b4ad7b23
RS
5454
5455 /* Can accept constant setting of biv only when inside inner most loop.
5456 Otherwise, a biv of an inner loop may be incorrectly recognized
5457 as a biv of the outer loop,
5458 causing code to be moved INTO the inner loop. */
5459 case MEM:
b4ad7b23
RS
5460 if (invariant_p (x) != 1)
5461 return 0;
5462 case CONST_INT:
5463 case SYMBOL_REF:
5464 case CONST:
829002bb
BM
5465 /* convert_modes aborts if we try to convert to or from CCmode, so just
5466 exclude that case. It is very unlikely that a condition code value
5467 would be a useful iterator anyways. */
5468 if (loops_enclosed == 1
5469 && GET_MODE_CLASS (mode) != MODE_CC
5470 && GET_MODE_CLASS (GET_MODE (dest_reg)) != MODE_CC)
b4ad7b23 5471 {
7056f7e8
RS
5472 /* Possible bug here? Perhaps we don't know the mode of X. */
5473 *inc_val = convert_modes (GET_MODE (dest_reg), mode, x, 0);
b4ad7b23
RS
5474 *mult_val = const0_rtx;
5475 return 1;
5476 }
5477 else
5478 return 0;
5479
09d7f5a5 5480 case SIGN_EXTEND:
7056f7e8
RS
5481 return basic_induction_var (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
5482 dest_reg, p, inc_val, mult_val);
45f97e2e 5483
09d7f5a5
RK
5484 case ASHIFTRT:
5485 /* Similar, since this can be a sign extension. */
5486 for (insn = PREV_INSN (p);
5487 (insn && GET_CODE (insn) == NOTE
5488 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
5489 insn = PREV_INSN (insn))
5490 ;
5491
5492 if (insn)
5493 set = single_set (insn);
5494
5495 if (set && SET_DEST (set) == XEXP (x, 0)
5496 && GET_CODE (XEXP (x, 1)) == CONST_INT
5497 && INTVAL (XEXP (x, 1)) >= 0
5498 && GET_CODE (SET_SRC (set)) == ASHIFT
5499 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
7056f7e8
RS
5500 return basic_induction_var (XEXP (SET_SRC (set), 0),
5501 GET_MODE (XEXP (x, 0)),
5502 dest_reg, insn, inc_val, mult_val);
09d7f5a5
RK
5503 return 0;
5504
b4ad7b23
RS
5505 default:
5506 return 0;
5507 }
5508}
5509\f
5510/* A general induction variable (giv) is any quantity that is a linear
5511 function of a basic induction variable,
5512 i.e. giv = biv * mult_val + add_val.
5513 The coefficients can be any loop invariant quantity.
5514 A giv need not be computed directly from the biv;
5515 it can be computed by way of other givs. */
5516
5517/* Determine whether X computes a giv.
5518 If it does, return a nonzero value
5519 which is the benefit from eliminating the computation of X;
5520 set *SRC_REG to the register of the biv that it is computed from;
5521 set *ADD_VAL and *MULT_VAL to the coefficients,
5522 such that the value of X is biv * mult + add; */
5523
5524static int
45f97e2e 5525general_induction_var (x, src_reg, add_val, mult_val, is_addr, pbenefit)
b4ad7b23
RS
5526 rtx x;
5527 rtx *src_reg;
5528 rtx *add_val;
5529 rtx *mult_val;
45f97e2e
RH
5530 int is_addr;
5531 int *pbenefit;
b4ad7b23
RS
5532{
5533 rtx orig_x = x;
b4ad7b23
RS
5534 char *storage;
5535
5536 /* If this is an invariant, forget it, it isn't a giv. */
5537 if (invariant_p (x) == 1)
5538 return 0;
5539
5540 /* See if the expression could be a giv and get its form.
5541 Mark our place on the obstack in case we don't find a giv. */
5542 storage = (char *) oballoc (0);
45f97e2e
RH
5543 *pbenefit = 0;
5544 x = simplify_giv_expr (x, pbenefit);
b4ad7b23
RS
5545 if (x == 0)
5546 {
5547 obfree (storage);
5548 return 0;
5549 }
5550
5551 switch (GET_CODE (x))
5552 {
5553 case USE:
5554 case CONST_INT:
5555 /* Since this is now an invariant and wasn't before, it must be a giv
5556 with MULT_VAL == 0. It doesn't matter which BIV we associate this
5557 with. */
5558 *src_reg = loop_iv_list->biv->dest_reg;
5559 *mult_val = const0_rtx;
5560 *add_val = x;
5561 break;
5562
5563 case REG:
5564 /* This is equivalent to a BIV. */
5565 *src_reg = x;
5566 *mult_val = const1_rtx;
5567 *add_val = const0_rtx;
5568 break;
5569
5570 case PLUS:
5571 /* Either (plus (biv) (invar)) or
5572 (plus (mult (biv) (invar_1)) (invar_2)). */
5573 if (GET_CODE (XEXP (x, 0)) == MULT)
5574 {
5575 *src_reg = XEXP (XEXP (x, 0), 0);
5576 *mult_val = XEXP (XEXP (x, 0), 1);
5577 }
5578 else
5579 {
5580 *src_reg = XEXP (x, 0);
5581 *mult_val = const1_rtx;
5582 }
5583 *add_val = XEXP (x, 1);
5584 break;
5585
5586 case MULT:
5587 /* ADD_VAL is zero. */
5588 *src_reg = XEXP (x, 0);
5589 *mult_val = XEXP (x, 1);
5590 *add_val = const0_rtx;
5591 break;
5592
5593 default:
5594 abort ();
5595 }
5596
5597 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
5598 unless they are CONST_INT). */
5599 if (GET_CODE (*add_val) == USE)
5600 *add_val = XEXP (*add_val, 0);
5601 if (GET_CODE (*mult_val) == USE)
5602 *mult_val = XEXP (*mult_val, 0);
5603
45f97e2e
RH
5604 if (is_addr)
5605 {
5606#ifdef ADDRESS_COST
5607 *pbenefit += ADDRESS_COST (orig_x) - reg_address_cost;
5608#else
5609 *pbenefit += rtx_cost (orig_x, MEM) - reg_address_cost;
5610#endif
5611 }
5612 else
5613 *pbenefit += rtx_cost (orig_x, SET);
b4ad7b23 5614
45f97e2e
RH
5615 /* Always return true if this is a giv so it will be detected as such,
5616 even if the benefit is zero or negative. This allows elimination
5617 of bivs that might otherwise not be eliminated. */
5618 return 1;
b4ad7b23
RS
5619}
5620\f
5621/* Given an expression, X, try to form it as a linear function of a biv.
5622 We will canonicalize it to be of the form
5623 (plus (mult (BIV) (invar_1))
5624 (invar_2))
c5b7917e 5625 with possible degeneracies.
b4ad7b23
RS
5626
5627 The invariant expressions must each be of a form that can be used as a
5628 machine operand. We surround then with a USE rtx (a hack, but localized
5629 and certainly unambiguous!) if not a CONST_INT for simplicity in this
5630 routine; it is the caller's responsibility to strip them.
5631
5632 If no such canonicalization is possible (i.e., two biv's are used or an
5633 expression that is neither invariant nor a biv or giv), this routine
5634 returns 0.
5635
5636 For a non-zero return, the result will have a code of CONST_INT, USE,
5637 REG (for a BIV), PLUS, or MULT. No other codes will occur.
5638
5639 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
5640
45f97e2e
RH
5641static rtx sge_plus PROTO ((enum machine_mode, rtx, rtx));
5642static rtx sge_plus_constant PROTO ((rtx, rtx));
5643
b4ad7b23
RS
5644static rtx
5645simplify_giv_expr (x, benefit)
5646 rtx x;
5647 int *benefit;
5648{
5649 enum machine_mode mode = GET_MODE (x);
5650 rtx arg0, arg1;
5651 rtx tem;
5652
5653 /* If this is not an integer mode, or if we cannot do arithmetic in this
5654 mode, this can't be a giv. */
5655 if (mode != VOIDmode
5656 && (GET_MODE_CLASS (mode) != MODE_INT
5fd8383e 5657 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
45f97e2e 5658 return NULL_RTX;
b4ad7b23
RS
5659
5660 switch (GET_CODE (x))
5661 {
5662 case PLUS:
5663 arg0 = simplify_giv_expr (XEXP (x, 0), benefit);
5664 arg1 = simplify_giv_expr (XEXP (x, 1), benefit);
5665 if (arg0 == 0 || arg1 == 0)
45f97e2e 5666 return NULL_RTX;
b4ad7b23
RS
5667
5668 /* Put constant last, CONST_INT last if both constant. */
5669 if ((GET_CODE (arg0) == USE
5670 || GET_CODE (arg0) == CONST_INT)
45f97e2e
RH
5671 && ! ((GET_CODE (arg0) == USE
5672 && GET_CODE (arg1) == USE)
5673 || GET_CODE (arg1) == CONST_INT))
b4ad7b23
RS
5674 tem = arg0, arg0 = arg1, arg1 = tem;
5675
5676 /* Handle addition of zero, then addition of an invariant. */
5677 if (arg1 == const0_rtx)
5678 return arg0;
5679 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
5680 switch (GET_CODE (arg0))
5681 {
5682 case CONST_INT:
5683 case USE:
45f97e2e
RH
5684 /* Adding two invariants must result in an invariant, so enclose
5685 addition operation inside a USE and return it. */
b4ad7b23
RS
5686 if (GET_CODE (arg0) == USE)
5687 arg0 = XEXP (arg0, 0);
da0af5a5
JL
5688 if (GET_CODE (arg1) == USE)
5689 arg1 = XEXP (arg1, 0);
5690
45f97e2e
RH
5691 if (GET_CODE (arg0) == CONST_INT)
5692 tem = arg0, arg0 = arg1, arg1 = tem;
5693 if (GET_CODE (arg1) == CONST_INT)
5694 tem = sge_plus_constant (arg0, arg1);
da0af5a5 5695 else
45f97e2e 5696 tem = sge_plus (mode, arg0, arg1);
b4ad7b23 5697
45f97e2e
RH
5698 if (GET_CODE (tem) != CONST_INT)
5699 tem = gen_rtx_USE (mode, tem);
b4ad7b23
RS
5700 return tem;
5701
5702 case REG:
5703 case MULT:
5704 /* biv + invar or mult + invar. Return sum. */
38a448ca 5705 return gen_rtx_PLUS (mode, arg0, arg1);
b4ad7b23
RS
5706
5707 case PLUS:
5708 /* (a + invar_1) + invar_2. Associate. */
45f97e2e
RH
5709 return simplify_giv_expr (
5710 gen_rtx_PLUS (mode, XEXP (arg0, 0),
5711 gen_rtx_PLUS (mode, XEXP (arg0, 1), arg1)),
5712 benefit);
b4ad7b23
RS
5713
5714 default:
5715 abort ();
5716 }
5717
5718 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
5719 MULT to reduce cases. */
5720 if (GET_CODE (arg0) == REG)
38a448ca 5721 arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
b4ad7b23 5722 if (GET_CODE (arg1) == REG)
38a448ca 5723 arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
b4ad7b23
RS
5724
5725 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
5726 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
5727 Recurse to associate the second PLUS. */
5728 if (GET_CODE (arg1) == MULT)
5729 tem = arg0, arg0 = arg1, arg1 = tem;
5730
5731 if (GET_CODE (arg1) == PLUS)
38a448ca
RH
5732 return simplify_giv_expr (gen_rtx_PLUS (mode,
5733 gen_rtx_PLUS (mode, arg0,
5734 XEXP (arg1, 0)),
5735 XEXP (arg1, 1)),
b4ad7b23
RS
5736 benefit);
5737
5738 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
5739 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
45f97e2e 5740 return NULL_RTX;
b4ad7b23 5741
45f97e2e
RH
5742 if (!rtx_equal_p (arg0, arg1))
5743 return NULL_RTX;
b4ad7b23 5744
38a448ca
RH
5745 return simplify_giv_expr (gen_rtx_MULT (mode,
5746 XEXP (arg0, 0),
5747 gen_rtx_PLUS (mode,
5748 XEXP (arg0, 1),
5749 XEXP (arg1, 1))),
b4ad7b23
RS
5750 benefit);
5751
5752 case MINUS:
0f41302f 5753 /* Handle "a - b" as "a + b * (-1)". */
38a448ca
RH
5754 return simplify_giv_expr (gen_rtx_PLUS (mode,
5755 XEXP (x, 0),
5756 gen_rtx_MULT (mode, XEXP (x, 1),
5757 constm1_rtx)),
b4ad7b23
RS
5758 benefit);
5759
5760 case MULT:
5761 arg0 = simplify_giv_expr (XEXP (x, 0), benefit);
5762 arg1 = simplify_giv_expr (XEXP (x, 1), benefit);
5763 if (arg0 == 0 || arg1 == 0)
45f97e2e 5764 return NULL_RTX;
b4ad7b23
RS
5765
5766 /* Put constant last, CONST_INT last if both constant. */
5767 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
5768 && GET_CODE (arg1) != CONST_INT)
5769 tem = arg0, arg0 = arg1, arg1 = tem;
5770
5771 /* If second argument is not now constant, not giv. */
5772 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
45f97e2e 5773 return NULL_RTX;
b4ad7b23
RS
5774
5775 /* Handle multiply by 0 or 1. */
5776 if (arg1 == const0_rtx)
5777 return const0_rtx;
5778
5779 else if (arg1 == const1_rtx)
5780 return arg0;
5781
5782 switch (GET_CODE (arg0))
5783 {
5784 case REG:
5785 /* biv * invar. Done. */
38a448ca 5786 return gen_rtx_MULT (mode, arg0, arg1);
b4ad7b23
RS
5787
5788 case CONST_INT:
5789 /* Product of two constants. */
5fd8383e 5790 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
b4ad7b23
RS
5791
5792 case USE:
45f97e2e
RH
5793 /* invar * invar. It is a giv, but very few of these will
5794 actually pay off, so limit to simple registers. */
5795 if (GET_CODE (arg1) != CONST_INT)
5796 return NULL_RTX;
5797
5798 arg0 = XEXP (arg0, 0);
5799 if (GET_CODE (arg0) == REG)
5800 tem = gen_rtx_MULT (mode, arg0, arg1);
5801 else if (GET_CODE (arg0) == MULT
5802 && GET_CODE (XEXP (arg0, 0)) == REG
5803 && GET_CODE (XEXP (arg0, 1)) == CONST_INT)
5804 {
5805 tem = gen_rtx_MULT (mode, XEXP (arg0, 0),
5806 GEN_INT (INTVAL (XEXP (arg0, 1))
5807 * INTVAL (arg1)));
5808 }
5809 else
5810 return NULL_RTX;
5811 return gen_rtx_USE (mode, tem);
b4ad7b23
RS
5812
5813 case MULT:
5814 /* (a * invar_1) * invar_2. Associate. */
38a448ca
RH
5815 return simplify_giv_expr (gen_rtx_MULT (mode, XEXP (arg0, 0),
5816 gen_rtx_MULT (mode,
5817 XEXP (arg0, 1),
5818 arg1)),
b4ad7b23
RS
5819 benefit);
5820
5821 case PLUS:
5822 /* (a + invar_1) * invar_2. Distribute. */
38a448ca
RH
5823 return simplify_giv_expr (gen_rtx_PLUS (mode,
5824 gen_rtx_MULT (mode,
5825 XEXP (arg0, 0),
5826 arg1),
5827 gen_rtx_MULT (mode,
5828 XEXP (arg0, 1),
5829 arg1)),
b4ad7b23
RS
5830 benefit);
5831
5832 default:
5833 abort ();
5834 }
5835
5836 case ASHIFT:
b4ad7b23
RS
5837 /* Shift by constant is multiply by power of two. */
5838 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
5839 return 0;
5840
38a448ca
RH
5841 return simplify_giv_expr (gen_rtx_MULT (mode,
5842 XEXP (x, 0),
5843 GEN_INT ((HOST_WIDE_INT) 1
5844 << INTVAL (XEXP (x, 1)))),
b4ad7b23
RS
5845 benefit);
5846
5847 case NEG:
5848 /* "-a" is "a * (-1)" */
38a448ca 5849 return simplify_giv_expr (gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
b4ad7b23
RS
5850 benefit);
5851
5852 case NOT:
5853 /* "~a" is "-a - 1". Silly, but easy. */
38a448ca
RH
5854 return simplify_giv_expr (gen_rtx_MINUS (mode,
5855 gen_rtx_NEG (mode, XEXP (x, 0)),
5856 const1_rtx),
b4ad7b23
RS
5857 benefit);
5858
5859 case USE:
5860 /* Already in proper form for invariant. */
5861 return x;
5862
5863 case REG:
5864 /* If this is a new register, we can't deal with it. */
5865 if (REGNO (x) >= max_reg_before_loop)
5866 return 0;
5867
5868 /* Check for biv or giv. */
5869 switch (reg_iv_type[REGNO (x)])
5870 {
5871 case BASIC_INDUCT:
5872 return x;
5873 case GENERAL_INDUCT:
5874 {
5875 struct induction *v = reg_iv_info[REGNO (x)];
5876
5877 /* Form expression from giv and add benefit. Ensure this giv
5878 can derive another and subtract any needed adjustment if so. */
5879 *benefit += v->benefit;
5880 if (v->cant_derive)
5881 return 0;
5882
38a448ca
RH
5883 tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode, v->src_reg,
5884 v->mult_val),
b4ad7b23
RS
5885 v->add_val);
5886 if (v->derive_adjustment)
38a448ca 5887 tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
b4ad7b23
RS
5888 return simplify_giv_expr (tem, benefit);
5889 }
e9a25f70
JL
5890
5891 default:
45f97e2e
RH
5892 /* If it isn't an induction variable, and it is invariant, we
5893 may be able to simplify things further by looking through
5894 the bits we just moved outside the loop. */
5895 if (invariant_p (x) == 1)
5896 {
5897 struct movable *m;
5898
5899 for (m = the_movables; m ; m = m->next)
5900 if (rtx_equal_p (x, m->set_dest))
5901 {
5902 /* Ok, we found a match. Substitute and simplify. */
5903
5904 /* If we match another movable, we must use that, as
5905 this one is going away. */
5906 if (m->match)
5907 return simplify_giv_expr (m->match->set_dest, benefit);
5908
5909 /* If consec is non-zero, this is a member of a group of
5910 instructions that were moved together. We handle this
5911 case only to the point of seeking to the last insn and
5912 looking for a REG_EQUAL. Fail if we don't find one. */
5913 if (m->consec != 0)
5914 {
5915 int i = m->consec;
5916 tem = m->insn;
5917 do { tem = NEXT_INSN (tem); } while (--i > 0);
5918
5919 tem = find_reg_note (tem, REG_EQUAL, NULL_RTX);
5920 if (tem)
5921 tem = XEXP (tem, 0);
5922 }
5923 else
5924 {
5925 tem = single_set (m->insn);
5926 if (tem)
5927 tem = SET_SRC (tem);
5928 }
5929
5930 if (tem)
5931 {
5932 /* What we are most interested in is pointer
5933 arithmetic on invariants -- only take
5934 patterns we may be able to do something with. */
5935 if (GET_CODE (tem) == PLUS
5936 || GET_CODE (tem) == MULT
5937 || GET_CODE (tem) == ASHIFT
5938 || GET_CODE (tem) == CONST_INT
5939 || GET_CODE (tem) == SYMBOL_REF)
5940 {
5941 tem = simplify_giv_expr (tem, benefit);
5942 if (tem)
5943 return tem;
5944 }
5945 else if (GET_CODE (tem) == CONST
5946 && GET_CODE (XEXP (tem, 0)) == PLUS
5947 && GET_CODE (XEXP (XEXP (tem, 0), 0)) == SYMBOL_REF
5948 && GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT)
5949 {
5950 tem = simplify_giv_expr (XEXP (tem, 0), benefit);
5951 if (tem)
5952 return tem;
5953 }
5954 }
5955 break;
5956 }
5957 }
e9a25f70 5958 break;
b4ad7b23
RS
5959 }
5960
5961 /* Fall through to general case. */
5962 default:
5963 /* If invariant, return as USE (unless CONST_INT).
5964 Otherwise, not giv. */
5965 if (GET_CODE (x) == USE)
5966 x = XEXP (x, 0);
5967
5968 if (invariant_p (x) == 1)
5969 {
5970 if (GET_CODE (x) == CONST_INT)
5971 return x;
45f97e2e
RH
5972 if (GET_CODE (x) == CONST
5973 && GET_CODE (XEXP (x, 0)) == PLUS
5974 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
5975 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
5976 x = XEXP (x, 0);
5977 return gen_rtx_USE (mode, x);
b4ad7b23
RS
5978 }
5979 else
5980 return 0;
5981 }
5982}
45f97e2e
RH
5983
5984/* This routine folds invariants such that there is only ever one
5985 CONST_INT in the summation. It is only used by simplify_giv_expr. */
5986
5987static rtx
5988sge_plus_constant (x, c)
5989 rtx x, c;
5990{
5991 if (GET_CODE (x) == CONST_INT)
5992 return GEN_INT (INTVAL (x) + INTVAL (c));
5993 else if (GET_CODE (x) != PLUS)
5994 return gen_rtx_PLUS (GET_MODE (x), x, c);
5995 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
5996 {
5997 return gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
5998 GEN_INT (INTVAL (XEXP (x, 1)) + INTVAL (c)));
5999 }
6000 else if (GET_CODE (XEXP (x, 0)) == PLUS
6001 || GET_CODE (XEXP (x, 1)) != PLUS)
6002 {
6003 return gen_rtx_PLUS (GET_MODE (x),
6004 sge_plus_constant (XEXP (x, 0), c), XEXP (x, 1));
6005 }
6006 else
6007 {
6008 return gen_rtx_PLUS (GET_MODE (x),
6009 sge_plus_constant (XEXP (x, 1), c), XEXP (x, 0));
6010 }
6011}
6012
6013static rtx
6014sge_plus (mode, x, y)
6015 enum machine_mode mode;
6016 rtx x, y;
6017{
6018 while (GET_CODE (y) == PLUS)
6019 {
6020 rtx a = XEXP (y, 0);
6021 if (GET_CODE (a) == CONST_INT)
6022 x = sge_plus_constant (x, a);
6023 else
6024 x = gen_rtx_PLUS (mode, x, a);
6025 y = XEXP (y, 1);
6026 }
6027 if (GET_CODE (y) == CONST_INT)
6028 x = sge_plus_constant (x, y);
6029 else
6030 x = gen_rtx_PLUS (mode, x, y);
6031 return x;
6032}
b4ad7b23
RS
6033\f
6034/* Help detect a giv that is calculated by several consecutive insns;
6035 for example,
6036 giv = biv * M
6037 giv = giv + A
6038 The caller has already identified the first insn P as having a giv as dest;
6039 we check that all other insns that set the same register follow
6040 immediately after P, that they alter nothing else,
6041 and that the result of the last is still a giv.
6042
6043 The value is 0 if the reg set in P is not really a giv.
6044 Otherwise, the value is the amount gained by eliminating
6045 all the consecutive insns that compute the value.
6046
6047 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
6048 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
6049
6050 The coefficients of the ultimate giv value are stored in
6051 *MULT_VAL and *ADD_VAL. */
6052
6053static int
6054consec_sets_giv (first_benefit, p, src_reg, dest_reg,
6055 add_val, mult_val)
6056 int first_benefit;
6057 rtx p;
6058 rtx src_reg;
6059 rtx dest_reg;
6060 rtx *add_val;
6061 rtx *mult_val;
6062{
6063 int count;
6064 enum rtx_code code;
6065 int benefit;
6066 rtx temp;
6067 rtx set;
6068
6069 /* Indicate that this is a giv so that we can update the value produced in
6070 each insn of the multi-insn sequence.
6071
6072 This induction structure will be used only by the call to
6073 general_induction_var below, so we can allocate it on our stack.
6074 If this is a giv, our caller will replace the induct var entry with
6075 a new induction structure. */
6076 struct induction *v
6077 = (struct induction *) alloca (sizeof (struct induction));
6078 v->src_reg = src_reg;
6079 v->mult_val = *mult_val;
6080 v->add_val = *add_val;
6081 v->benefit = first_benefit;
6082 v->cant_derive = 0;
6083 v->derive_adjustment = 0;
6084
6085 reg_iv_type[REGNO (dest_reg)] = GENERAL_INDUCT;
6086 reg_iv_info[REGNO (dest_reg)] = v;
6087
8deb8e2c 6088 count = VARRAY_INT (n_times_set, REGNO (dest_reg)) - 1;
b4ad7b23
RS
6089
6090 while (count > 0)
6091 {
6092 p = NEXT_INSN (p);
6093 code = GET_CODE (p);
6094
6095 /* If libcall, skip to end of call sequence. */
5fd8383e 6096 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
b4ad7b23
RS
6097 p = XEXP (temp, 0);
6098
6099 if (code == INSN
6100 && (set = single_set (p))
6101 && GET_CODE (SET_DEST (set)) == REG
6102 && SET_DEST (set) == dest_reg
45f97e2e
RH
6103 && (general_induction_var (SET_SRC (set), &src_reg,
6104 add_val, mult_val, 0, &benefit)
b4ad7b23 6105 /* Giv created by equivalent expression. */
5fd8383e 6106 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
45f97e2e
RH
6107 && general_induction_var (XEXP (temp, 0), &src_reg,
6108 add_val, mult_val, 0, &benefit)))
b4ad7b23
RS
6109 && src_reg == v->src_reg)
6110 {
5fd8383e 6111 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
b4ad7b23
RS
6112 benefit += libcall_benefit (p);
6113
6114 count--;
6115 v->mult_val = *mult_val;
6116 v->add_val = *add_val;
6117 v->benefit = benefit;
6118 }
6119 else if (code != NOTE)
6120 {
6121 /* Allow insns that set something other than this giv to a
6122 constant. Such insns are needed on machines which cannot
6123 include long constants and should not disqualify a giv. */
6124 if (code == INSN
6125 && (set = single_set (p))
6126 && SET_DEST (set) != dest_reg
6127 && CONSTANT_P (SET_SRC (set)))
6128 continue;
6129
6130 reg_iv_type[REGNO (dest_reg)] = UNKNOWN_INDUCT;
6131 return 0;
6132 }
6133 }
6134
6135 return v->benefit;
6136}
6137\f
6138/* Return an rtx, if any, that expresses giv G2 as a function of the register
6139 represented by G1. If no such expression can be found, or it is clear that
6140 it cannot possibly be a valid address, 0 is returned.
6141
6142 To perform the computation, we note that
45f97e2e
RH
6143 G1 = x * v + a and
6144 G2 = y * v + b
b4ad7b23
RS
6145 where `v' is the biv.
6146
45f97e2e
RH
6147 So G2 = (y/b) * G1 + (b - a*y/x).
6148
6149 Note that MULT = y/x.
6150
6151 Update: A and B are now allowed to be additive expressions such that
6152 B contains all variables in A. That is, computing B-A will not require
6153 subtracting variables. */
6154
6155static rtx
6156express_from_1 (a, b, mult)
6157 rtx a, b, mult;
6158{
6159 /* If MULT is zero, then A*MULT is zero, and our expression is B. */
6160
6161 if (mult == const0_rtx)
6162 return b;
6163
6164 /* If MULT is not 1, we cannot handle A with non-constants, since we
6165 would then be required to subtract multiples of the registers in A.
6166 This is theoretically possible, and may even apply to some Fortran
6167 constructs, but it is a lot of work and we do not attempt it here. */
6168
6169 if (mult != const1_rtx && GET_CODE (a) != CONST_INT)
6170 return NULL_RTX;
6171
6172 /* In general these structures are sorted top to bottom (down the PLUS
6173 chain), but not left to right across the PLUS. If B is a higher
6174 order giv than A, we can strip one level and recurse. If A is higher
6175 order, we'll eventually bail out, but won't know that until the end.
6176 If they are the same, we'll strip one level around this loop. */
6177
6178 while (GET_CODE (a) == PLUS && GET_CODE (b) == PLUS)
6179 {
6180 rtx ra, rb, oa, ob, tmp;
6181
6182 ra = XEXP (a, 0), oa = XEXP (a, 1);
6183 if (GET_CODE (ra) == PLUS)
6184 tmp = ra, ra = oa, oa = tmp;
6185
6186 rb = XEXP (b, 0), ob = XEXP (b, 1);
6187 if (GET_CODE (rb) == PLUS)
6188 tmp = rb, rb = ob, ob = tmp;
6189
6190 if (rtx_equal_p (ra, rb))
6191 /* We matched: remove one reg completely. */
6192 a = oa, b = ob;
6193 else if (GET_CODE (ob) != PLUS && rtx_equal_p (ra, ob))
6194 /* An alternate match. */
6195 a = oa, b = rb;
6196 else if (GET_CODE (oa) != PLUS && rtx_equal_p (oa, rb))
6197 /* An alternate match. */
6198 a = ra, b = ob;
6199 else
6200 {
6201 /* Indicates an extra register in B. Strip one level from B and
6202 recurse, hoping B was the higher order expression. */
6203 ob = express_from_1 (a, ob, mult);
6204 if (ob == NULL_RTX)
6205 return NULL_RTX;
6206 return gen_rtx_PLUS (GET_MODE (b), rb, ob);
6207 }
6208 }
6209
6210 /* Here we are at the last level of A, go through the cases hoping to
6211 get rid of everything but a constant. */
6212
6213 if (GET_CODE (a) == PLUS)
6214 {
efe3eb65 6215 rtx ra, oa;
45f97e2e
RH
6216
6217 ra = XEXP (a, 0), oa = XEXP (a, 1);
6218 if (rtx_equal_p (oa, b))
6219 oa = ra;
6220 else if (!rtx_equal_p (ra, b))
6221 return NULL_RTX;
6222
6223 if (GET_CODE (oa) != CONST_INT)
6224 return NULL_RTX;
6225
6226 return GEN_INT (-INTVAL (oa) * INTVAL (mult));
6227 }
6228 else if (GET_CODE (a) == CONST_INT)
6229 {
6230 return plus_constant (b, -INTVAL (a) * INTVAL (mult));
6231 }
6232 else if (GET_CODE (b) == PLUS)
6233 {
6234 if (rtx_equal_p (a, XEXP (b, 0)))
6235 return XEXP (b, 1);
6236 else if (rtx_equal_p (a, XEXP (b, 1)))
6237 return XEXP (b, 0);
6238 else
6239 return NULL_RTX;
6240 }
6241 else if (rtx_equal_p (a, b))
6242 return const0_rtx;
6243
6244 return NULL_RTX;
6245}
b4ad7b23 6246
b4ad7b23
RS
6247static rtx
6248express_from (g1, g2)
6249 struct induction *g1, *g2;
6250{
6251 rtx mult, add;
6252
6253 /* The value that G1 will be multiplied by must be a constant integer. Also,
6254 the only chance we have of getting a valid address is if b*c/a (see above
6255 for notation) is also an integer. */
45f97e2e
RH
6256 if (GET_CODE (g1->mult_val) == CONST_INT
6257 && GET_CODE (g2->mult_val) == CONST_INT)
6258 {
6259 if (g1->mult_val == const0_rtx
6260 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
6261 return NULL_RTX;
6262 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
6263 }
6264 else if (rtx_equal_p (g1->mult_val, g2->mult_val))
6265 mult = const1_rtx;
6266 else
6267 {
6268 /* ??? Find out if the one is a multiple of the other? */
6269 return NULL_RTX;
6270 }
b4ad7b23 6271
45f97e2e
RH
6272 add = express_from_1 (g1->add_val, g2->add_val, mult);
6273 if (add == NULL_RTX)
6274 return NULL_RTX;
b4ad7b23
RS
6275
6276 /* Form simplified final result. */
6277 if (mult == const0_rtx)
6278 return add;
6279 else if (mult == const1_rtx)
6280 mult = g1->dest_reg;
6281 else
38a448ca 6282 mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult);
b4ad7b23
RS
6283
6284 if (add == const0_rtx)
6285 return mult;
6286 else
38a448ca 6287 return gen_rtx_PLUS (g2->mode, mult, add);
b4ad7b23 6288}
b4ad7b23
RS
6289\f
6290/* Return 1 if giv G2 can be combined with G1. This means that G2 can use
6291 (either directly or via an address expression) a register used to represent
6292 G1. Set g2->new_reg to a represtation of G1 (normally just
6293 g1->dest_reg). */
6294
45f97e2e 6295static rtx
b4ad7b23
RS
6296combine_givs_p (g1, g2)
6297 struct induction *g1, *g2;
6298{
45f97e2e 6299 rtx tem = express_from (g1, g2);
b4ad7b23 6300
45f97e2e
RH
6301 /* If these givs are identical, they can be combined. We use the results
6302 of express_from because the addends are not in a canonical form, so
6303 rtx_equal_p is a weaker test. */
6304 if (tem == const0_rtx)
b4ad7b23 6305 {
45f97e2e 6306 return g1->dest_reg;
b4ad7b23
RS
6307 }
6308
b4ad7b23
RS
6309 /* If G2 can be expressed as a function of G1 and that function is valid
6310 as an address and no more expensive than using a register for G2,
6311 the expression of G2 in terms of G1 can be used. */
45f97e2e
RH
6312 if (tem != NULL_RTX
6313 && g2->giv_type == DEST_ADDR
b4ad7b23 6314 && memory_address_p (g2->mem_mode, tem)
45f97e2e
RH
6315 /* ??? Looses, especially with -fforce-addr, where *g2->location
6316 will always be a register, and so anything more complicated
6317 gets discarded. */
6318#if 0
6319#ifdef ADDRESS_COST
6320 && ADDRESS_COST (tem) <= ADDRESS_COST (*g2->location)
6321#else
6322 && rtx_cost (tem, MEM) <= rtx_cost (*g2->location, MEM)
6323#endif
6324#endif
6325 )
b4ad7b23 6326 {
45f97e2e 6327 return tem;
b4ad7b23 6328 }
b4ad7b23 6329
45f97e2e 6330 return NULL_RTX;
b4ad7b23
RS
6331}
6332\f
45f97e2e
RH
6333struct combine_givs_stats
6334{
6335 int giv_number;
6336 int total_benefit;
6337};
6338
6339static int
6340cmp_combine_givs_stats (x, y)
6341 struct combine_givs_stats *x, *y;
6342{
6343 int d;
6344 d = y->total_benefit - x->total_benefit;
6345 /* Stabilize the sort. */
6346 if (!d)
6347 d = x->giv_number - y->giv_number;
6348 return d;
6349}
6350
6351/* If one of these givs is a DEST_REG that was only used once, by the
6352 other giv, this is actually a single use. Return 0 if this is not
6353 the case, -1 if g1 is the DEST_REG involved, and 1 if it was g2. */
7027f90a
JW
6354
6355static int
45f97e2e
RH
6356combine_givs_used_once (g1, g2)
6357 struct induction *g1, *g2;
7027f90a 6358{
45f97e2e 6359 if (g1->giv_type == DEST_REG
8deb8e2c 6360 && VARRAY_INT (n_times_used, REGNO (g1->dest_reg)) == 1
45f97e2e
RH
6361 && reg_mentioned_p (g1->dest_reg, PATTERN (g2->insn)))
6362 return -1;
6363
6364 if (g2->giv_type == DEST_REG
8deb8e2c 6365 && VARRAY_INT (n_times_used, REGNO (g2->dest_reg)) == 1
45f97e2e
RH
6366 && reg_mentioned_p (g2->dest_reg, PATTERN (g1->insn)))
6367 return 1;
7027f90a
JW
6368
6369 return 0;
6370}
45f97e2e
RH
6371
6372static int
6373combine_givs_benefit_from (g1, g2)
6374 struct induction *g1, *g2;
6375{
6376 int tmp = combine_givs_used_once (g1, g2);
6377 if (tmp < 0)
6378 return 0;
6379 else if (tmp > 0)
6380 return g2->benefit - g1->benefit;
6381 else
6382 return g2->benefit;
6383}
7027f90a 6384
b4ad7b23
RS
6385/* Check all pairs of givs for iv_class BL and see if any can be combined with
6386 any other. If so, point SAME to the giv combined with and set NEW_REG to
6387 be an expression (in terms of the other giv's DEST_REG) equivalent to the
6388 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
6389
6390static void
6391combine_givs (bl)
6392 struct iv_class *bl;
6393{
29a82058 6394 struct induction *g1, *g2, **giv_array;
45f97e2e
RH
6395 int i, j, k, giv_count;
6396 struct combine_givs_stats *stats;
6397 rtx *can_combine;
b4ad7b23 6398
7027f90a
JW
6399 /* Count givs, because bl->giv_count is incorrect here. */
6400 giv_count = 0;
b4ad7b23 6401 for (g1 = bl->giv; g1; g1 = g1->next_iv)
45f97e2e
RH
6402 if (!g1->ignore)
6403 giv_count++;
7027f90a
JW
6404
6405 giv_array
6406 = (struct induction **) alloca (giv_count * sizeof (struct induction *));
6407 i = 0;
6408 for (g1 = bl->giv; g1; g1 = g1->next_iv)
45f97e2e
RH
6409 if (!g1->ignore)
6410 giv_array[i++] = g1;
7027f90a 6411
45f97e2e 6412 stats = (struct combine_givs_stats *) alloca (giv_count * sizeof (*stats));
efe3eb65 6413 bzero ((char *) stats, giv_count * sizeof (*stats));
7027f90a 6414
45f97e2e 6415 can_combine = (rtx *) alloca (giv_count * giv_count * sizeof(rtx));
efe3eb65 6416 bzero ((char *) can_combine, giv_count * giv_count * sizeof(rtx));
7027f90a
JW
6417
6418 for (i = 0; i < giv_count; i++)
6419 {
45f97e2e
RH
6420 int this_benefit;
6421
7027f90a 6422 g1 = giv_array[i];
45f97e2e
RH
6423
6424 this_benefit = g1->benefit;
6425 /* Add an additional weight for zero addends. */
6426 if (g1->no_const_addval)
6427 this_benefit += 1;
6428 for (j = 0; j < giv_count; j++)
6429 {
6430 rtx this_combine;
6431
6432 g2 = giv_array[j];
6433 if (g1 != g2
6434 && (this_combine = combine_givs_p (g1, g2)) != NULL_RTX)
6435 {
6436 can_combine[i*giv_count + j] = this_combine;
6437 this_benefit += combine_givs_benefit_from (g1, g2);
6438 /* Add an additional weight for being reused more times. */
6439 this_benefit += 3;
6440 }
6441 }
6442 stats[i].giv_number = i;
6443 stats[i].total_benefit = this_benefit;
6444 }
6445
6446 /* Iterate, combining until we can't. */
6447restart:
6448 qsort (stats, giv_count, sizeof(*stats), cmp_combine_givs_stats);
6449
6450 if (loop_dump_stream)
6451 {
6452 fprintf (loop_dump_stream, "Sorted combine statistics:\n");
6453 for (k = 0; k < giv_count; k++)
6454 {
6455 g1 = giv_array[stats[k].giv_number];
6456 if (!g1->combined_with && !g1->same)
6457 fprintf (loop_dump_stream, " {%d, %d}",
6458 INSN_UID (giv_array[stats[k].giv_number]->insn),
6459 stats[k].total_benefit);
6460 }
6461 putc ('\n', loop_dump_stream);
6462 }
6463
6464 for (k = 0; k < giv_count; k++)
6465 {
6466 int g1_add_benefit = 0;
6467
6468 i = stats[k].giv_number;
6469 g1 = giv_array[i];
6470
6471 /* If it has already been combined, skip. */
6472 if (g1->combined_with || g1->same)
6473 continue;
6474
6475 for (j = 0; j < giv_count; j++)
6476 {
6477 g2 = giv_array[j];
6478 if (g1 != g2 && can_combine[i*giv_count + j]
6479 /* If it has already been combined, skip. */
6480 && ! g2->same && ! g2->combined_with)
6481 {
6482 int l;
6483
6484 g2->new_reg = can_combine[i*giv_count + j];
6485 g2->same = g1;
6486 g1->combined_with = 1;
6487 if (!combine_givs_used_once (g1, g2))
6488 g1->times_used += 1;
6489 g1->lifetime += g2->lifetime;
6490
6491 g1_add_benefit += combine_givs_benefit_from (g1, g2);
6492
6493 /* ??? The new final_[bg]iv_value code does a much better job
6494 of finding replaceable giv's, and hence this code may no
6495 longer be necessary. */
6496 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
6497 g1_add_benefit -= copy_cost;
7027f90a 6498
45f97e2e
RH
6499 /* To help optimize the next set of combinations, remove
6500 this giv from the benefits of other potential mates. */
6501 for (l = 0; l < giv_count; ++l)
6502 {
6503 int m = stats[l].giv_number;
6504 if (can_combine[m*giv_count + j])
6505 {
6506 /* Remove additional weight for being reused. */
6507 stats[l].total_benefit -= 3 +
6508 combine_givs_benefit_from (giv_array[m], g2);
6509 }
6510 }
6511
6512 if (loop_dump_stream)
6513 fprintf (loop_dump_stream,
6514 "giv at %d combined with giv at %d\n",
6515 INSN_UID (g2->insn), INSN_UID (g1->insn));
6516 }
6517 }
6518
6519 /* To help optimize the next set of combinations, remove
6520 this giv from the benefits of other potential mates. */
6521 if (g1->combined_with)
6522 {
6523 for (j = 0; j < giv_count; ++j)
6524 {
6525 int m = stats[j].giv_number;
6526 if (can_combine[m*giv_count + j])
6527 {
6528 /* Remove additional weight for being reused. */
6529 stats[j].total_benefit -= 3 +
6530 combine_givs_benefit_from (giv_array[m], g1);
6531 }
6532 }
6533
6534 g1->benefit += g1_add_benefit;
6535
6536 /* We've finished with this giv, and everything it touched.
6537 Restart the combination so that proper weights for the
6538 rest of the givs are properly taken into account. */
6539 /* ??? Ideally we would compact the arrays at this point, so
6540 as to not cover old ground. But sanely compacting
6541 can_combine is tricky. */
6542 goto restart;
6543 }
7027f90a 6544 }
b4ad7b23
RS
6545}
6546\f
6547/* EMIT code before INSERT_BEFORE to set REG = B * M + A. */
6548
6549void
6550emit_iv_add_mult (b, m, a, reg, insert_before)
6551 rtx b; /* initial value of basic induction variable */
6552 rtx m; /* multiplicative constant */
6553 rtx a; /* additive constant */
6554 rtx reg; /* destination register */
6555 rtx insert_before;
6556{
6557 rtx seq;
6558 rtx result;
6559
6560 /* Prevent unexpected sharing of these rtx. */
6561 a = copy_rtx (a);
6562 b = copy_rtx (b);
6563
0f41302f 6564 /* Increase the lifetime of any invariants moved further in code. */
b4ad7b23
RS
6565 update_reg_last_use (a, insert_before);
6566 update_reg_last_use (b, insert_before);
6567 update_reg_last_use (m, insert_before);
6568
6569 start_sequence ();
6570 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 0);
6571 if (reg != result)
6572 emit_move_insn (reg, result);
6573 seq = gen_sequence ();
6574 end_sequence ();
6575
6576 emit_insn_before (seq, insert_before);
9ae8ffe7 6577
00116a7b
RH
6578 /* It is entirely possible that the expansion created lots of new
6579 registers. Iterate over the sequence we just created and
6580 record them all. */
6581
6582 if (GET_CODE (seq) == SEQUENCE)
6583 {
6584 int i;
6585 for (i = 0; i < XVECLEN (seq, 0); ++i)
6586 {
6587 rtx set = single_set (XVECEXP (seq, 0, i));
6588 if (set && GET_CODE (SET_DEST (set)) == REG)
6589 record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
6590 }
6591 }
6592 else if (GET_CODE (seq) == SET
6593 && GET_CODE (SET_DEST (seq)) == REG)
6594 record_base_value (REGNO (SET_DEST (seq)), SET_SRC (seq), 0);
b4ad7b23
RS
6595}
6596\f
6597/* Test whether A * B can be computed without
6598 an actual multiply insn. Value is 1 if so. */
6599
6600static int
6601product_cheap_p (a, b)
6602 rtx a;
6603 rtx b;
6604{
6605 int i;
6606 rtx tmp;
6607 struct obstack *old_rtl_obstack = rtl_obstack;
6608 char *storage = (char *) obstack_alloc (&temp_obstack, 0);
6609 int win = 1;
6610
0f41302f 6611 /* If only one is constant, make it B. */
b4ad7b23
RS
6612 if (GET_CODE (a) == CONST_INT)
6613 tmp = a, a = b, b = tmp;
6614
6615 /* If first constant, both constant, so don't need multiply. */
6616 if (GET_CODE (a) == CONST_INT)
6617 return 1;
6618
6619 /* If second not constant, neither is constant, so would need multiply. */
6620 if (GET_CODE (b) != CONST_INT)
6621 return 0;
6622
6623 /* One operand is constant, so might not need multiply insn. Generate the
6624 code for the multiply and see if a call or multiply, or long sequence
6625 of insns is generated. */
6626
6627 rtl_obstack = &temp_obstack;
6628 start_sequence ();
5fd8383e 6629 expand_mult (GET_MODE (a), a, b, NULL_RTX, 0);
b4ad7b23
RS
6630 tmp = gen_sequence ();
6631 end_sequence ();
6632
6633 if (GET_CODE (tmp) == SEQUENCE)
6634 {
6635 if (XVEC (tmp, 0) == 0)
6636 win = 1;
6637 else if (XVECLEN (tmp, 0) > 3)
6638 win = 0;
6639 else
6640 for (i = 0; i < XVECLEN (tmp, 0); i++)
6641 {
6642 rtx insn = XVECEXP (tmp, 0, i);
6643
6644 if (GET_CODE (insn) != INSN
6645 || (GET_CODE (PATTERN (insn)) == SET
6646 && GET_CODE (SET_SRC (PATTERN (insn))) == MULT)
6647 || (GET_CODE (PATTERN (insn)) == PARALLEL
6648 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET
6649 && GET_CODE (SET_SRC (XVECEXP (PATTERN (insn), 0, 0))) == MULT))
6650 {
6651 win = 0;
6652 break;
6653 }
6654 }
6655 }
6656 else if (GET_CODE (tmp) == SET
6657 && GET_CODE (SET_SRC (tmp)) == MULT)
6658 win = 0;
6659 else if (GET_CODE (tmp) == PARALLEL
6660 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
6661 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
6662 win = 0;
6663
6664 /* Free any storage we obtained in generating this multiply and restore rtl
6665 allocation to its normal obstack. */
6666 obstack_free (&temp_obstack, storage);
6667 rtl_obstack = old_rtl_obstack;
6668
6669 return win;
6670}
6671\f
6672/* Check to see if loop can be terminated by a "decrement and branch until
6673 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
6674 Also try reversing an increment loop to a decrement loop
6675 to see if the optimization can be performed.
6676 Value is nonzero if optimization was performed. */
6677
6678/* This is useful even if the architecture doesn't have such an insn,
6679 because it might change a loops which increments from 0 to n to a loop
6680 which decrements from n to 0. A loop that decrements to zero is usually
6681 faster than one that increments from zero. */
6682
6683/* ??? This could be rewritten to use some of the loop unrolling procedures,
6684 such as approx_final_value, biv_total_increment, loop_iterations, and
6685 final_[bg]iv_value. */
6686
6687static int
6688check_dbra_loop (loop_end, insn_count, loop_start)
6689 rtx loop_end;
6690 int insn_count;
6691 rtx loop_start;
6692{
6693 struct iv_class *bl;
6694 rtx reg;
6695 rtx jump_label;
6696 rtx final_value;
6697 rtx start_value;
b4ad7b23
RS
6698 rtx new_add_val;
6699 rtx comparison;
6700 rtx before_comparison;
6701 rtx p;
0628fde6
JW
6702 rtx jump;
6703 rtx first_compare;
6704 int compare_and_branch;
b4ad7b23
RS
6705
6706 /* If last insn is a conditional branch, and the insn before tests a
6707 register value, try to optimize it. Otherwise, we can't do anything. */
6708
0628fde6
JW
6709 jump = PREV_INSN (loop_end);
6710 comparison = get_condition_for_loop (jump);
b4ad7b23
RS
6711 if (comparison == 0)
6712 return 0;
6713
0628fde6
JW
6714 /* Try to compute whether the compare/branch at the loop end is one or
6715 two instructions. */
6716 get_condition (jump, &first_compare);
6717 if (first_compare == jump)
6718 compare_and_branch = 1;
6719 else if (first_compare == prev_nonnote_insn (jump))
6720 compare_and_branch = 2;
6721 else
6722 return 0;
6723
b4ad7b23
RS
6724 /* Check all of the bivs to see if the compare uses one of them.
6725 Skip biv's set more than once because we can't guarantee that
6726 it will be zero on the last iteration. Also skip if the biv is
6727 used between its update and the test insn. */
6728
6729 for (bl = loop_iv_list; bl; bl = bl->next)
6730 {
6731 if (bl->biv_count == 1
6732 && bl->biv->dest_reg == XEXP (comparison, 0)
6733 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
0628fde6 6734 first_compare))
b4ad7b23
RS
6735 break;
6736 }
6737
6738 if (! bl)
6739 return 0;
6740
6741 /* Look for the case where the basic induction variable is always
6742 nonnegative, and equals zero on the last iteration.
6743 In this case, add a reg_note REG_NONNEG, which allows the
6744 m68k DBRA instruction to be used. */
6745
6746 if (((GET_CODE (comparison) == GT
6747 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
6748 && INTVAL (XEXP (comparison, 1)) == -1)
6749 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
6750 && GET_CODE (bl->biv->add_val) == CONST_INT
6751 && INTVAL (bl->biv->add_val) < 0)
6752 {
6753 /* Initial value must be greater than 0,
6754 init_val % -dec_value == 0 to ensure that it equals zero on
6755 the last iteration */
6756
6757 if (GET_CODE (bl->initial_value) == CONST_INT
6758 && INTVAL (bl->initial_value) > 0
db3cf6fb
MS
6759 && (INTVAL (bl->initial_value)
6760 % (-INTVAL (bl->biv->add_val))) == 0)
b4ad7b23
RS
6761 {
6762 /* register always nonnegative, add REG_NOTE to branch */
6763 REG_NOTES (PREV_INSN (loop_end))
38a448ca
RH
6764 = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX,
6765 REG_NOTES (PREV_INSN (loop_end)));
b4ad7b23
RS
6766 bl->nonneg = 1;
6767
6768 return 1;
6769 }
6770
6771 /* If the decrement is 1 and the value was tested as >= 0 before
6772 the loop, then we can safely optimize. */
6773 for (p = loop_start; p; p = PREV_INSN (p))
6774 {
6775 if (GET_CODE (p) == CODE_LABEL)
6776 break;
6777 if (GET_CODE (p) != JUMP_INSN)
6778 continue;
6779
6780 before_comparison = get_condition_for_loop (p);
6781 if (before_comparison
6782 && XEXP (before_comparison, 0) == bl->biv->dest_reg
6783 && GET_CODE (before_comparison) == LT
6784 && XEXP (before_comparison, 1) == const0_rtx
6785 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
6786 && INTVAL (bl->biv->add_val) == -1)
6787 {
6788 REG_NOTES (PREV_INSN (loop_end))
38a448ca
RH
6789 = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX,
6790 REG_NOTES (PREV_INSN (loop_end)));
b4ad7b23
RS
6791 bl->nonneg = 1;
6792
6793 return 1;
6794 }
6795 }
6796 }
c48ba252 6797 else if (INTVAL (bl->biv->add_val) > 0)
b4ad7b23
RS
6798 {
6799 /* Try to change inc to dec, so can apply above optimization. */
6800 /* Can do this if:
6801 all registers modified are induction variables or invariant,
6802 all memory references have non-overlapping addresses
6803 (obviously true if only one write)
6804 allow 2 insns for the compare/jump at the end of the loop. */
45cc060e
JW
6805 /* Also, we must avoid any instructions which use both the reversed
6806 biv and another biv. Such instructions will fail if the loop is
6807 reversed. We meet this condition by requiring that either
6808 no_use_except_counting is true, or else that there is only
6809 one biv. */
b4ad7b23
RS
6810 int num_nonfixed_reads = 0;
6811 /* 1 if the iteration var is used only to count iterations. */
6812 int no_use_except_counting = 0;
b418c26e
JW
6813 /* 1 if the loop has no memory store, or it has a single memory store
6814 which is reversible. */
6815 int reversible_mem_store = 1;
b4ad7b23 6816
b4ad7b23 6817 if (bl->giv_count == 0
353127c2 6818 && ! loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]])
b4ad7b23
RS
6819 {
6820 rtx bivreg = regno_reg_rtx[bl->regno];
6821
6822 /* If there are no givs for this biv, and the only exit is the
38e01259 6823 fall through at the end of the loop, then
b4ad7b23
RS
6824 see if perhaps there are no uses except to count. */
6825 no_use_except_counting = 1;
6826 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
6827 if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
6828 {
6829 rtx set = single_set (p);
6830
6831 if (set && GET_CODE (SET_DEST (set)) == REG
6832 && REGNO (SET_DEST (set)) == bl->regno)
6833 /* An insn that sets the biv is okay. */
6834 ;
6835 else if (p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
6836 || p == prev_nonnote_insn (loop_end))
6837 /* Don't bother about the end test. */
6838 ;
6839 else if (reg_mentioned_p (bivreg, PATTERN (p)))
b4ad7b23
RS
6840 {
6841 no_use_except_counting = 0;
6842 break;
6843 }
6844 }
6845 }
6846
c48ba252
R
6847 if (no_use_except_counting)
6848 ; /* no need to worry about MEMs. */
6849 else if (num_mem_sets <= 1)
6850 {
6851 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
6852 if (GET_RTX_CLASS (GET_CODE (p)) == 'i')
6853 num_nonfixed_reads += count_nonfixed_reads (PATTERN (p));
6854
6855 /* If the loop has a single store, and the destination address is
6856 invariant, then we can't reverse the loop, because this address
6857 might then have the wrong value at loop exit.
6858 This would work if the source was invariant also, however, in that
6859 case, the insn should have been moved out of the loop. */
6860
6861 if (num_mem_sets == 1)
6862 reversible_mem_store
6863 = (! unknown_address_altered
6864 && ! invariant_p (XEXP (loop_store_mems[0], 0)));
6865 }
6866 else
6867 return 0;
b418c26e 6868
b4ad7b23
RS
6869 /* This code only acts for innermost loops. Also it simplifies
6870 the memory address check by only reversing loops with
6871 zero or one memory access.
6872 Two memory accesses could involve parts of the same array,
c48ba252
R
6873 and that can't be reversed.
6874 If the biv is used only for counting, than we don't need to worry
6875 about all these things. */
6876
6877 if ((num_nonfixed_reads <= 1
6878 && !loop_has_call
6879 && !loop_has_volatile
6880 && reversible_mem_store
6881 && (bl->giv_count + bl->biv_count + num_mem_sets
6882 + num_movables + compare_and_branch == insn_count)
6883 && (bl == loop_iv_list && bl->next == 0))
6884 || no_use_except_counting)
b4ad7b23 6885 {
b4ad7b23
RS
6886 rtx tem;
6887
6888 /* Loop can be reversed. */
6889 if (loop_dump_stream)
6890 fprintf (loop_dump_stream, "Can reverse loop\n");
6891
6892 /* Now check other conditions:
e9a25f70 6893
956d6950
JL
6894 The increment must be a constant, as must the initial value,
6895 and the comparison code must be LT.
b4ad7b23
RS
6896
6897 This test can probably be improved since +/- 1 in the constant
6898 can be obtained by changing LT to LE and vice versa; this is
6899 confusing. */
6900
e9a25f70 6901 if (comparison
c48ba252
R
6902 /* for constants, LE gets turned into LT */
6903 && (GET_CODE (comparison) == LT
6904 || (GET_CODE (comparison) == LE
6905 && no_use_except_counting)))
b4ad7b23 6906 {
c48ba252
R
6907 HOST_WIDE_INT add_val, add_adjust, comparison_val;
6908 rtx initial_value, comparison_value;
6909 int nonneg = 0;
6910 enum rtx_code cmp_code;
6911 int comparison_const_width;
6912 unsigned HOST_WIDE_INT comparison_sign_mask;
6913 rtx vtop;
e9a25f70
JL
6914
6915 add_val = INTVAL (bl->biv->add_val);
c48ba252
R
6916 comparison_value = XEXP (comparison, 1);
6917 comparison_const_width
6918 = GET_MODE_BITSIZE (GET_MODE (XEXP (comparison, 1)));
6919 if (comparison_const_width > HOST_BITS_PER_WIDE_INT)
6920 comparison_const_width = HOST_BITS_PER_WIDE_INT;
6921 comparison_sign_mask
6922 = (unsigned HOST_WIDE_INT)1 << (comparison_const_width - 1);
6923
3aa94dc8
JL
6924 /* If the comparison value is not a loop invariant, then we
6925 can not reverse this loop.
6926
6927 ??? If the insns which initialize the comparison value as
6928 a whole compute an invariant result, then we could move
6929 them out of the loop and proceed with loop reversal. */
9231189b 6930 if (!invariant_p (comparison_value))
3aa94dc8
JL
6931 return 0;
6932
c48ba252
R
6933 if (GET_CODE (comparison_value) == CONST_INT)
6934 comparison_val = INTVAL (comparison_value);
e9a25f70
JL
6935 initial_value = bl->initial_value;
6936
a8decb2c
JL
6937 /* Normalize the initial value if it is an integer and
6938 has no other use except as a counter. This will allow
6939 a few more loops to be reversed. */
6940 if (no_use_except_counting
c48ba252 6941 && GET_CODE (comparison_value) == CONST_INT
a8decb2c 6942 && GET_CODE (initial_value) == CONST_INT)
e9a25f70
JL
6943 {
6944 comparison_val = comparison_val - INTVAL (bl->initial_value);
c48ba252
R
6945 /* The code below requires comparison_val to be a multiple
6946 of add_val in order to do the loop reversal, so
6947 round up comparison_val to a multiple of add_val.
6948 Since comparison_value is constant, we know that the
6949 current comparison code is LT. */
6950 comparison_val = comparison_val + add_val - 1;
6951 comparison_val
6952 -= (unsigned HOST_WIDE_INT) comparison_val % add_val;
6953 /* We postpone overflow checks for COMPARISON_VAL here;
6954 even if there is an overflow, we might still be able to
6955 reverse the loop, if converting the loop exit test to
6956 NE is possible. */
6957 initial_value = const0_rtx;
e9a25f70
JL
6958 }
6959
c48ba252
R
6960 /* Check if there is a NOTE_INSN_LOOP_VTOP note. If there is,
6961 that means that this is a for or while style loop, with
6962 a loop exit test at the start. Thus, we can assume that
6963 the loop condition was true when the loop was entered.
6964 This allows us to change the loop exit condition to an
6965 equality test.
6966 We start at the end and search backwards for the previous
6967 NOTE. If there is no NOTE_INSN_LOOP_VTOP for this loop,
6968 the search will stop at the NOTE_INSN_LOOP_CONT. */
6969 vtop = loop_end;
6970 do
6971 vtop = PREV_INSN (vtop);
7a5109b7
R
6972 while (GET_CODE (vtop) != NOTE
6973 || NOTE_LINE_NUMBER (vtop) > 0
6974 || NOTE_LINE_NUMBER (vtop) == NOTE_REPEATED_LINE_NUMBER
6975 || NOTE_LINE_NUMBER (vtop) == NOTE_INSN_DELETED);
c48ba252
R
6976 if (NOTE_LINE_NUMBER (vtop) != NOTE_INSN_LOOP_VTOP)
6977 vtop = NULL_RTX;
c48ba252
R
6978
6979 /* First check if we can do a vanilla loop reversal. */
6980 if (initial_value == const0_rtx
6981 /* If we have a decrement_and_branch_on_count, prefer
6982 the NE test, since this will allow that instruction to
6983 be generated. */
5accd822 6984#if ! defined (HAVE_decrement_and_branch_until_zero) && defined (HAVE_decrement_and_branch_on_count)
c48ba252
R
6985 && (add_val != 1 || ! vtop)
6986#endif
6987 && GET_CODE (comparison_value) == CONST_INT
6988 /* Now do postponed overflow checks on COMPARISON_VAL. */
6989 && ! (((comparison_val - add_val) ^ INTVAL (comparison_value))
6990 & comparison_sign_mask))
6991 {
6992 /* Register will always be nonnegative, with value
6993 0 on last iteration */
6994 add_adjust = add_val;
6995 nonneg = 1;
6996 cmp_code = GE;
6997 }
6998 else if (add_val == 1 && vtop)
6999 {
7000 add_adjust = 0;
7001 cmp_code = NE;
7002 }
7003 else
7004 return 0;
7005
7006 if (GET_CODE (comparison) == LE)
7007 add_adjust -= add_val;
7008
e9a25f70
JL
7009 /* If the initial value is not zero, or if the comparison
7010 value is not an exact multiple of the increment, then we
7011 can not reverse this loop. */
c48ba252
R
7012 if (initial_value == const0_rtx
7013 && GET_CODE (comparison_value) == CONST_INT)
7014 {
7015 if (((unsigned HOST_WIDE_INT) comparison_val % add_val) != 0)
7016 return 0;
7017 }
7018 else
7019 {
7020 if (! no_use_except_counting || add_val != 1)
7021 return 0;
7022 }
e9a25f70 7023
8ed69d09
R
7024 final_value = comparison_value;
7025
e9a25f70
JL
7026 /* Reset these in case we normalized the initial value
7027 and comparison value above. */
8ed69d09
R
7028 if (GET_CODE (comparison_value) == CONST_INT
7029 && GET_CODE (initial_value) == CONST_INT)
7030 {
7031 comparison_value = GEN_INT (comparison_val);
7032 final_value
7033 = GEN_INT (comparison_val + INTVAL (bl->initial_value));
7034 }
e9a25f70 7035 bl->initial_value = initial_value;
b4ad7b23
RS
7036
7037 /* Save some info needed to produce the new insns. */
7038 reg = bl->biv->dest_reg;
7039 jump_label = XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end))), 1);
3c2f289c
RK
7040 if (jump_label == pc_rtx)
7041 jump_label = XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end))), 2);
5fd8383e 7042 new_add_val = GEN_INT (- INTVAL (bl->biv->add_val));
b4ad7b23 7043
c48ba252
R
7044 /* Set start_value; if this is not a CONST_INT, we need
7045 to generate a SUB.
7046 Initialize biv to start_value before loop start.
b4ad7b23
RS
7047 The old initializing insn will be deleted as a
7048 dead store by flow.c. */
c48ba252
R
7049 if (initial_value == const0_rtx
7050 && GET_CODE (comparison_value) == CONST_INT)
7051 {
7052 start_value = GEN_INT (comparison_val - add_adjust);
7053 emit_insn_before (gen_move_insn (reg, start_value),
7054 loop_start);
7055 }
7056 else if (GET_CODE (initial_value) == CONST_INT)
7057 {
7058 rtx offset = GEN_INT (-INTVAL (initial_value) - add_adjust);
7059 enum machine_mode mode = GET_MODE (reg);
7060 enum insn_code icode
7061 = add_optab->handlers[(int) mode].insn_code;
7062 if (! (*insn_operand_predicate[icode][0]) (reg, mode)
7063 || ! ((*insn_operand_predicate[icode][1])
7064 (comparison_value, mode))
7065 || ! (*insn_operand_predicate[icode][2]) (offset, mode))
7066 return 0;
7067 start_value
7068 = gen_rtx_PLUS (mode, comparison_value, offset);
7069 emit_insn_before ((GEN_FCN (icode)
7070 (reg, comparison_value, offset)),
7071 loop_start);
7072 if (GET_CODE (comparison) == LE)
7073 final_value = gen_rtx_PLUS (mode, comparison_value,
7074 GEN_INT (add_val));
7075 }
7076 else if (! add_adjust)
7077 {
7078 enum machine_mode mode = GET_MODE (reg);
7079 enum insn_code icode
7080 = sub_optab->handlers[(int) mode].insn_code;
7081 if (! (*insn_operand_predicate[icode][0]) (reg, mode)
7082 || ! ((*insn_operand_predicate[icode][1])
7083 (comparison_value, mode))
7084 || ! ((*insn_operand_predicate[icode][2])
7085 (initial_value, mode)))
7086 return 0;
7087 start_value
7088 = gen_rtx_MINUS (mode, comparison_value, initial_value);
7089 emit_insn_before ((GEN_FCN (icode)
7090 (reg, comparison_value, initial_value)),
7091 loop_start);
7092 }
7093 else
7094 /* We could handle the other cases too, but it'll be
7095 better to have a testcase first. */
7096 return 0;
b4ad7b23
RS
7097
7098 /* Add insn to decrement register, and delete insn
7099 that incremented the register. */
7100 p = emit_insn_before (gen_add2_insn (reg, new_add_val),
7101 bl->biv->insn);
7102 delete_insn (bl->biv->insn);
7103
7104 /* Update biv info to reflect its new status. */
7105 bl->biv->insn = p;
7106 bl->initial_value = start_value;
7107 bl->biv->add_val = new_add_val;
7108
7109 /* Inc LABEL_NUSES so that delete_insn will
7110 not delete the label. */
7111 LABEL_NUSES (XEXP (jump_label, 0)) ++;
7112
7113 /* Emit an insn after the end of the loop to set the biv's
7114 proper exit value if it is used anywhere outside the loop. */
0628fde6 7115 if ((REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare))
b4ad7b23 7116 || ! bl->init_insn
b1f21e0a 7117 || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
b4ad7b23
RS
7118 emit_insn_after (gen_move_insn (reg, final_value),
7119 loop_end);
7120
7121 /* Delete compare/branch at end of loop. */
7122 delete_insn (PREV_INSN (loop_end));
0628fde6
JW
7123 if (compare_and_branch == 2)
7124 delete_insn (first_compare);
b4ad7b23
RS
7125
7126 /* Add new compare/branch insn at end of loop. */
7127 start_sequence ();
c48ba252 7128 emit_cmp_insn (reg, const0_rtx, cmp_code, NULL_RTX,
5fd8383e 7129 GET_MODE (reg), 0, 0);
c48ba252
R
7130 emit_jump_insn ((*bcc_gen_fctn[(int) cmp_code])
7131 (XEXP (jump_label, 0)));
b4ad7b23
RS
7132 tem = gen_sequence ();
7133 end_sequence ();
7134 emit_jump_insn_before (tem, loop_end);
7135
c48ba252 7136 if (nonneg)
b4ad7b23 7137 {
c48ba252
R
7138 for (tem = PREV_INSN (loop_end);
7139 tem && GET_CODE (tem) != JUMP_INSN;
7140 tem = PREV_INSN (tem))
7141 ;
7142 if (tem)
7143 {
7144 JUMP_LABEL (tem) = XEXP (jump_label, 0);
b4ad7b23 7145
c48ba252
R
7146 /* Increment of LABEL_NUSES done above. */
7147 /* Register is now always nonnegative,
7148 so add REG_NONNEG note to the branch. */
7149 REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX,
7150 REG_NOTES (tem));
7151 }
7152 bl->nonneg = 1;
b4ad7b23
RS
7153 }
7154
b4ad7b23
RS
7155 /* Mark that this biv has been reversed. Each giv which depends
7156 on this biv, and which is also live past the end of the loop
7157 will have to be fixed up. */
7158
7159 bl->reversed = 1;
7160
7161 if (loop_dump_stream)
7162 fprintf (loop_dump_stream,
7163 "Reversed loop and added reg_nonneg\n");
7164
7165 return 1;
7166 }
7167 }
7168 }
7169
7170 return 0;
7171}
7172\f
7173/* Verify whether the biv BL appears to be eliminable,
7174 based on the insns in the loop that refer to it.
7175 LOOP_START is the first insn of the loop, and END is the end insn.
7176
7177 If ELIMINATE_P is non-zero, actually do the elimination.
7178
7179 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
7180 determine whether invariant insns should be placed inside or at the
7181 start of the loop. */
7182
7183static int
7184maybe_eliminate_biv (bl, loop_start, end, eliminate_p, threshold, insn_count)
7185 struct iv_class *bl;
7186 rtx loop_start;
7187 rtx end;
7188 int eliminate_p;
7189 int threshold, insn_count;
7190{
7191 rtx reg = bl->biv->dest_reg;
bd5a664e 7192 rtx p;
b4ad7b23
RS
7193
7194 /* Scan all insns in the loop, stopping if we find one that uses the
7195 biv in a way that we cannot eliminate. */
7196
7197 for (p = loop_start; p != end; p = NEXT_INSN (p))
7198 {
7199 enum rtx_code code = GET_CODE (p);
7200 rtx where = threshold >= insn_count ? loop_start : p;
7201
7202 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
7203 && reg_mentioned_p (reg, PATTERN (p))
7204 && ! maybe_eliminate_biv_1 (PATTERN (p), p, bl, eliminate_p, where))
7205 {
7206 if (loop_dump_stream)
7207 fprintf (loop_dump_stream,
7208 "Cannot eliminate biv %d: biv used in insn %d.\n",
7209 bl->regno, INSN_UID (p));
7210 break;
7211 }
7212 }
7213
7214 if (p == end)
7215 {
7216 if (loop_dump_stream)
7217 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
7218 bl->regno, eliminate_p ? "was" : "can be");
7219 return 1;
7220 }
7221
7222 return 0;
7223}
7224\f
7225/* If BL appears in X (part of the pattern of INSN), see if we can
7226 eliminate its use. If so, return 1. If not, return 0.
7227
7228 If BIV does not appear in X, return 1.
7229
7230 If ELIMINATE_P is non-zero, actually do the elimination. WHERE indicates
7231 where extra insns should be added. Depending on how many items have been
7232 moved out of the loop, it will either be before INSN or at the start of
7233 the loop. */
7234
7235static int
7236maybe_eliminate_biv_1 (x, insn, bl, eliminate_p, where)
7237 rtx x, insn;
7238 struct iv_class *bl;
7239 int eliminate_p;
7240 rtx where;
7241{
7242 enum rtx_code code = GET_CODE (x);
7243 rtx reg = bl->biv->dest_reg;
7244 enum machine_mode mode = GET_MODE (reg);
7245 struct induction *v;
51723711
KG
7246 rtx arg, tem;
7247#ifdef HAVE_cc0
7248 rtx new;
7249#endif
b4ad7b23
RS
7250 int arg_operand;
7251 char *fmt;
7252 int i, j;
7253
7254 switch (code)
7255 {
7256 case REG:
7257 /* If we haven't already been able to do something with this BIV,
7258 we can't eliminate it. */
7259 if (x == reg)
7260 return 0;
7261 return 1;
7262
7263 case SET:
7264 /* If this sets the BIV, it is not a problem. */
7265 if (SET_DEST (x) == reg)
7266 return 1;
7267
7268 /* If this is an insn that defines a giv, it is also ok because
7269 it will go away when the giv is reduced. */
7270 for (v = bl->giv; v; v = v->next_iv)
7271 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
7272 return 1;
7273
7274#ifdef HAVE_cc0
7275 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
7276 {
7277 /* Can replace with any giv that was reduced and
7278 that has (MULT_VAL != 0) and (ADD_VAL == 0).
fbdc6da8
RK
7279 Require a constant for MULT_VAL, so we know it's nonzero.
7280 ??? We disable this optimization to avoid potential
7281 overflows. */
b4ad7b23
RS
7282
7283 for (v = bl->giv; v; v = v->next_iv)
7284 if (CONSTANT_P (v->mult_val) && v->mult_val != const0_rtx
7285 && v->add_val == const0_rtx
453331a3 7286 && ! v->ignore && ! v->maybe_dead && v->always_computable
fbdc6da8
RK
7287 && v->mode == mode
7288 && 0)
b4ad7b23 7289 {
8516af93
JW
7290 /* If the giv V had the auto-inc address optimization applied
7291 to it, and INSN occurs between the giv insn and the biv
7292 insn, then we must adjust the value used here.
7293 This is rare, so we don't bother to do so. */
7294 if (v->auto_inc_opt
7295 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
7296 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
7297 || (INSN_LUID (v->insn) > INSN_LUID (insn)
7298 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
7299 continue;
7300
b4ad7b23
RS
7301 if (! eliminate_p)
7302 return 1;
7303
7304 /* If the giv has the opposite direction of change,
7305 then reverse the comparison. */
7306 if (INTVAL (v->mult_val) < 0)
38a448ca
RH
7307 new = gen_rtx_COMPARE (GET_MODE (v->new_reg),
7308 const0_rtx, v->new_reg);
b4ad7b23
RS
7309 else
7310 new = v->new_reg;
7311
7312 /* We can probably test that giv's reduced reg. */
7313 if (validate_change (insn, &SET_SRC (x), new, 0))
7314 return 1;
7315 }
7316
7317 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
7318 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
fbdc6da8
RK
7319 Require a constant for MULT_VAL, so we know it's nonzero.
7320 ??? Do this only if ADD_VAL is a pointer to avoid a potential
7321 overflow problem. */
b4ad7b23
RS
7322
7323 for (v = bl->giv; v; v = v->next_iv)
7324 if (CONSTANT_P (v->mult_val) && v->mult_val != const0_rtx
453331a3 7325 && ! v->ignore && ! v->maybe_dead && v->always_computable
fbdc6da8
RK
7326 && v->mode == mode
7327 && (GET_CODE (v->add_val) == SYMBOL_REF
7328 || GET_CODE (v->add_val) == LABEL_REF
7329 || GET_CODE (v->add_val) == CONST
7330 || (GET_CODE (v->add_val) == REG
7331 && REGNO_POINTER_FLAG (REGNO (v->add_val)))))
b4ad7b23 7332 {
8516af93
JW
7333 /* If the giv V had the auto-inc address optimization applied
7334 to it, and INSN occurs between the giv insn and the biv
7335 insn, then we must adjust the value used here.
7336 This is rare, so we don't bother to do so. */
7337 if (v->auto_inc_opt
7338 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
7339 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
7340 || (INSN_LUID (v->insn) > INSN_LUID (insn)
7341 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
7342 continue;
7343
b4ad7b23
RS
7344 if (! eliminate_p)
7345 return 1;
7346
7347 /* If the giv has the opposite direction of change,
7348 then reverse the comparison. */
7349 if (INTVAL (v->mult_val) < 0)
38a448ca
RH
7350 new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val),
7351 v->new_reg);
b4ad7b23 7352 else
38a448ca
RH
7353 new = gen_rtx_COMPARE (VOIDmode, v->new_reg,
7354 copy_rtx (v->add_val));
b4ad7b23
RS
7355
7356 /* Replace biv with the giv's reduced register. */
7357 update_reg_last_use (v->add_val, insn);
7358 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
7359 return 1;
7360
7361 /* Insn doesn't support that constant or invariant. Copy it
7362 into a register (it will be a loop invariant.) */
7363 tem = gen_reg_rtx (GET_MODE (v->new_reg));
7364
7365 emit_insn_before (gen_move_insn (tem, copy_rtx (v->add_val)),
7366 where);
7367
2ae3dcac
RK
7368 /* Substitute the new register for its invariant value in
7369 the compare expression. */
7370 XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
7371 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
b4ad7b23
RS
7372 return 1;
7373 }
7374 }
7375#endif
7376 break;
7377
7378 case COMPARE:
7379 case EQ: case NE:
7380 case GT: case GE: case GTU: case GEU:
7381 case LT: case LE: case LTU: case LEU:
7382 /* See if either argument is the biv. */
7383 if (XEXP (x, 0) == reg)
7384 arg = XEXP (x, 1), arg_operand = 1;
7385 else if (XEXP (x, 1) == reg)
7386 arg = XEXP (x, 0), arg_operand = 0;
7387 else
7388 break;
7389
7390 if (CONSTANT_P (arg))
7391 {
7392 /* First try to replace with any giv that has constant positive
7393 mult_val and constant add_val. We might be able to support
7394 negative mult_val, but it seems complex to do it in general. */
7395
7396 for (v = bl->giv; v; v = v->next_iv)
7397 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
fbdc6da8
RK
7398 && (GET_CODE (v->add_val) == SYMBOL_REF
7399 || GET_CODE (v->add_val) == LABEL_REF
7400 || GET_CODE (v->add_val) == CONST
7401 || (GET_CODE (v->add_val) == REG
7402 && REGNO_POINTER_FLAG (REGNO (v->add_val))))
453331a3 7403 && ! v->ignore && ! v->maybe_dead && v->always_computable
b4ad7b23
RS
7404 && v->mode == mode)
7405 {
8516af93
JW
7406 /* If the giv V had the auto-inc address optimization applied
7407 to it, and INSN occurs between the giv insn and the biv
7408 insn, then we must adjust the value used here.
7409 This is rare, so we don't bother to do so. */
7410 if (v->auto_inc_opt
7411 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
7412 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
7413 || (INSN_LUID (v->insn) > INSN_LUID (insn)
7414 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
7415 continue;
7416
b4ad7b23
RS
7417 if (! eliminate_p)
7418 return 1;
7419
7420 /* Replace biv with the giv's reduced reg. */
7421 XEXP (x, 1-arg_operand) = v->new_reg;
7422
7423 /* If all constants are actually constant integers and
7424 the derived constant can be directly placed in the COMPARE,
7425 do so. */
7426 if (GET_CODE (arg) == CONST_INT
7427 && GET_CODE (v->mult_val) == CONST_INT
7428 && GET_CODE (v->add_val) == CONST_INT
7429 && validate_change (insn, &XEXP (x, arg_operand),
5fd8383e
RK
7430 GEN_INT (INTVAL (arg)
7431 * INTVAL (v->mult_val)
7432 + INTVAL (v->add_val)), 0))
b4ad7b23
RS
7433 return 1;
7434
7435 /* Otherwise, load it into a register. */
7436 tem = gen_reg_rtx (mode);
7437 emit_iv_add_mult (arg, v->mult_val, v->add_val, tem, where);
7438 if (validate_change (insn, &XEXP (x, arg_operand), tem, 0))
7439 return 1;
7440
7441 /* If that failed, put back the change we made above. */
7442 XEXP (x, 1-arg_operand) = reg;
7443 }
7444
7445 /* Look for giv with positive constant mult_val and nonconst add_val.
fbdc6da8
RK
7446 Insert insns to calculate new compare value.
7447 ??? Turn this off due to possible overflow. */
b4ad7b23
RS
7448
7449 for (v = bl->giv; v; v = v->next_iv)
d45cf215 7450 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
453331a3 7451 && ! v->ignore && ! v->maybe_dead && v->always_computable
fbdc6da8
RK
7452 && v->mode == mode
7453 && 0)
b4ad7b23
RS
7454 {
7455 rtx tem;
7456
8516af93
JW
7457 /* If the giv V had the auto-inc address optimization applied
7458 to it, and INSN occurs between the giv insn and the biv
7459 insn, then we must adjust the value used here.
7460 This is rare, so we don't bother to do so. */
7461 if (v->auto_inc_opt
7462 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
7463 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
7464 || (INSN_LUID (v->insn) > INSN_LUID (insn)
7465 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
7466 continue;
7467
b4ad7b23
RS
7468 if (! eliminate_p)
7469 return 1;
7470
7471 tem = gen_reg_rtx (mode);
7472
7473 /* Replace biv with giv's reduced register. */
7474 validate_change (insn, &XEXP (x, 1 - arg_operand),
7475 v->new_reg, 1);
7476
7477 /* Compute value to compare against. */
7478 emit_iv_add_mult (arg, v->mult_val, v->add_val, tem, where);
7479 /* Use it in this insn. */
7480 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
7481 if (apply_change_group ())
7482 return 1;
7483 }
7484 }
7485 else if (GET_CODE (arg) == REG || GET_CODE (arg) == MEM)
7486 {
7487 if (invariant_p (arg) == 1)
7488 {
7489 /* Look for giv with constant positive mult_val and nonconst
fbdc6da8
RK
7490 add_val. Insert insns to compute new compare value.
7491 ??? Turn this off due to possible overflow. */
b4ad7b23
RS
7492
7493 for (v = bl->giv; v; v = v->next_iv)
7494 if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0
453331a3 7495 && ! v->ignore && ! v->maybe_dead && v->always_computable
fbdc6da8
RK
7496 && v->mode == mode
7497 && 0)
b4ad7b23
RS
7498 {
7499 rtx tem;
7500
8516af93
JW
7501 /* If the giv V had the auto-inc address optimization applied
7502 to it, and INSN occurs between the giv insn and the biv
7503 insn, then we must adjust the value used here.
7504 This is rare, so we don't bother to do so. */
7505 if (v->auto_inc_opt
7506 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
7507 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
7508 || (INSN_LUID (v->insn) > INSN_LUID (insn)
7509 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
7510 continue;
7511
b4ad7b23
RS
7512 if (! eliminate_p)
7513 return 1;
7514
7515 tem = gen_reg_rtx (mode);
7516
7517 /* Replace biv with giv's reduced register. */
7518 validate_change (insn, &XEXP (x, 1 - arg_operand),
7519 v->new_reg, 1);
7520
7521 /* Compute value to compare against. */
7522 emit_iv_add_mult (arg, v->mult_val, v->add_val,
7523 tem, where);
7524 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
7525 if (apply_change_group ())
7526 return 1;
7527 }
7528 }
7529
7530 /* This code has problems. Basically, you can't know when
7531 seeing if we will eliminate BL, whether a particular giv
7532 of ARG will be reduced. If it isn't going to be reduced,
7533 we can't eliminate BL. We can try forcing it to be reduced,
7534 but that can generate poor code.
7535
7536 The problem is that the benefit of reducing TV, below should
7537 be increased if BL can actually be eliminated, but this means
7538 we might have to do a topological sort of the order in which
7539 we try to process biv. It doesn't seem worthwhile to do
7540 this sort of thing now. */
7541
7542#if 0
7543 /* Otherwise the reg compared with had better be a biv. */
7544 if (GET_CODE (arg) != REG
7545 || reg_iv_type[REGNO (arg)] != BASIC_INDUCT)
7546 return 0;
7547
7548 /* Look for a pair of givs, one for each biv,
7549 with identical coefficients. */
7550 for (v = bl->giv; v; v = v->next_iv)
7551 {
7552 struct induction *tv;
7553
7554 if (v->ignore || v->maybe_dead || v->mode != mode)
7555 continue;
7556
7557 for (tv = reg_biv_class[REGNO (arg)]->giv; tv; tv = tv->next_iv)
7558 if (! tv->ignore && ! tv->maybe_dead
7559 && rtx_equal_p (tv->mult_val, v->mult_val)
7560 && rtx_equal_p (tv->add_val, v->add_val)
7561 && tv->mode == mode)
7562 {
8516af93
JW
7563 /* If the giv V had the auto-inc address optimization applied
7564 to it, and INSN occurs between the giv insn and the biv
7565 insn, then we must adjust the value used here.
7566 This is rare, so we don't bother to do so. */
7567 if (v->auto_inc_opt
7568 && ((INSN_LUID (v->insn) < INSN_LUID (insn)
7569 && INSN_LUID (insn) < INSN_LUID (bl->biv->insn))
7570 || (INSN_LUID (v->insn) > INSN_LUID (insn)
7571 && INSN_LUID (insn) > INSN_LUID (bl->biv->insn))))
7572 continue;
7573
b4ad7b23
RS
7574 if (! eliminate_p)
7575 return 1;
7576
7577 /* Replace biv with its giv's reduced reg. */
7578 XEXP (x, 1-arg_operand) = v->new_reg;
7579 /* Replace other operand with the other giv's
7580 reduced reg. */
7581 XEXP (x, arg_operand) = tv->new_reg;
7582 return 1;
7583 }
7584 }
7585#endif
7586 }
7587
7588 /* If we get here, the biv can't be eliminated. */
7589 return 0;
7590
7591 case MEM:
7592 /* If this address is a DEST_ADDR giv, it doesn't matter if the
7593 biv is used in it, since it will be replaced. */
7594 for (v = bl->giv; v; v = v->next_iv)
7595 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
7596 return 1;
7597 break;
e9a25f70
JL
7598
7599 default:
7600 break;
b4ad7b23
RS
7601 }
7602
7603 /* See if any subexpression fails elimination. */
7604 fmt = GET_RTX_FORMAT (code);
7605 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
7606 {
7607 switch (fmt[i])
7608 {
7609 case 'e':
7610 if (! maybe_eliminate_biv_1 (XEXP (x, i), insn, bl,
7611 eliminate_p, where))
7612 return 0;
7613 break;
7614
7615 case 'E':
7616 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
7617 if (! maybe_eliminate_biv_1 (XVECEXP (x, i, j), insn, bl,
7618 eliminate_p, where))
7619 return 0;
7620 break;
7621 }
7622 }
7623
7624 return 1;
7625}
7626\f
7627/* Return nonzero if the last use of REG
7628 is in an insn following INSN in the same basic block. */
7629
7630static int
7631last_use_this_basic_block (reg, insn)
7632 rtx reg;
7633 rtx insn;
7634{
7635 rtx n;
7636 for (n = insn;
7637 n && GET_CODE (n) != CODE_LABEL && GET_CODE (n) != JUMP_INSN;
7638 n = NEXT_INSN (n))
7639 {
b1f21e0a 7640 if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
b4ad7b23
RS
7641 return 1;
7642 }
7643 return 0;
7644}
7645\f
7646/* Called via `note_stores' to record the initial value of a biv. Here we
7647 just record the location of the set and process it later. */
7648
7649static void
7650record_initial (dest, set)
7651 rtx dest;
7652 rtx set;
7653{
7654 struct iv_class *bl;
7655
7656 if (GET_CODE (dest) != REG
7657 || REGNO (dest) >= max_reg_before_loop
63d59526 7658 || reg_iv_type[REGNO (dest)] != BASIC_INDUCT)
b4ad7b23
RS
7659 return;
7660
7661 bl = reg_biv_class[REGNO (dest)];
7662
7663 /* If this is the first set found, record it. */
7664 if (bl->init_insn == 0)
7665 {
7666 bl->init_insn = note_insn;
7667 bl->init_set = set;
7668 }
7669}
7670\f
7671/* If any of the registers in X are "old" and currently have a last use earlier
7672 than INSN, update them to have a last use of INSN. Their actual last use
7673 will be the previous insn but it will not have a valid uid_luid so we can't
7674 use it. */
7675
7676static void
7677update_reg_last_use (x, insn)
7678 rtx x;
7679 rtx insn;
7680{
7681 /* Check for the case where INSN does not have a valid luid. In this case,
7682 there is no need to modify the regno_last_uid, as this can only happen
7683 when code is inserted after the loop_end to set a pseudo's final value,
7684 and hence this insn will never be the last use of x. */
7685 if (GET_CODE (x) == REG && REGNO (x) < max_reg_before_loop
7686 && INSN_UID (insn) < max_uid_for_loop
b1f21e0a
MM
7687 && uid_luid[REGNO_LAST_UID (REGNO (x))] < uid_luid[INSN_UID (insn)])
7688 REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
b4ad7b23
RS
7689 else
7690 {
7691 register int i, j;
7692 register char *fmt = GET_RTX_FORMAT (GET_CODE (x));
7693 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
7694 {
7695 if (fmt[i] == 'e')
7696 update_reg_last_use (XEXP (x, i), insn);
7697 else if (fmt[i] == 'E')
7698 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
7699 update_reg_last_use (XVECEXP (x, i, j), insn);
7700 }
7701 }
7702}
7703\f
7704/* Given a jump insn JUMP, return the condition that will cause it to branch
7705 to its JUMP_LABEL. If the condition cannot be understood, or is an
7706 inequality floating-point comparison which needs to be reversed, 0 will
7707 be returned.
7708
7709 If EARLIEST is non-zero, it is a pointer to a place where the earliest
7710 insn used in locating the condition was found. If a replacement test
7711 of the condition is desired, it should be placed in front of that
7712 insn and we will be sure that the inputs are still valid.
7713
7714 The condition will be returned in a canonical form to simplify testing by
7715 callers. Specifically:
7716
7717 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
7718 (2) Both operands will be machine operands; (cc0) will have been replaced.
7719 (3) If an operand is a constant, it will be the second operand.
7720 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
7721 for GE, GEU, and LEU. */
7722
7723rtx
7724get_condition (jump, earliest)
7725 rtx jump;
7726 rtx *earliest;
7727{
7728 enum rtx_code code;
7729 rtx prev = jump;
7730 rtx set;
7731 rtx tem;
7732 rtx op0, op1;
7733 int reverse_code = 0;
7734 int did_reverse_condition = 0;
f283421d 7735 enum machine_mode mode;
b4ad7b23
RS
7736
7737 /* If this is not a standard conditional jump, we can't parse it. */
7738 if (GET_CODE (jump) != JUMP_INSN
7739 || ! condjump_p (jump) || simplejump_p (jump))
7740 return 0;
7741
7742 code = GET_CODE (XEXP (SET_SRC (PATTERN (jump)), 0));
f283421d 7743 mode = GET_MODE (XEXP (SET_SRC (PATTERN (jump)), 0));
b4ad7b23
RS
7744 op0 = XEXP (XEXP (SET_SRC (PATTERN (jump)), 0), 0);
7745 op1 = XEXP (XEXP (SET_SRC (PATTERN (jump)), 0), 1);
7746
7747 if (earliest)
7748 *earliest = jump;
7749
7750 /* If this branches to JUMP_LABEL when the condition is false, reverse
7751 the condition. */
b5d27be7
RS
7752 if (GET_CODE (XEXP (SET_SRC (PATTERN (jump)), 2)) == LABEL_REF
7753 && XEXP (XEXP (SET_SRC (PATTERN (jump)), 2), 0) == JUMP_LABEL (jump))
b4ad7b23
RS
7754 code = reverse_condition (code), did_reverse_condition ^= 1;
7755
7756 /* If we are comparing a register with zero, see if the register is set
7757 in the previous insn to a COMPARE or a comparison operation. Perform
7758 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
7759 in cse.c */
7760
a18b5d98 7761 while (GET_RTX_CLASS (code) == '<' && op1 == CONST0_RTX (GET_MODE (op0)))
b4ad7b23
RS
7762 {
7763 /* Set non-zero when we find something of interest. */
7764 rtx x = 0;
7765
7766#ifdef HAVE_cc0
7767 /* If comparison with cc0, import actual comparison from compare
7768 insn. */
7769 if (op0 == cc0_rtx)
7770 {
7771 if ((prev = prev_nonnote_insn (prev)) == 0
7772 || GET_CODE (prev) != INSN
7773 || (set = single_set (prev)) == 0
7774 || SET_DEST (set) != cc0_rtx)
7775 return 0;
7776
7777 op0 = SET_SRC (set);
7778 op1 = CONST0_RTX (GET_MODE (op0));
7779 if (earliest)
7780 *earliest = prev;
7781 }
7782#endif
7783
7784 /* If this is a COMPARE, pick up the two things being compared. */
7785 if (GET_CODE (op0) == COMPARE)
7786 {
7787 op1 = XEXP (op0, 1);
7788 op0 = XEXP (op0, 0);
7789 continue;
7790 }
7791 else if (GET_CODE (op0) != REG)
7792 break;
7793
7794 /* Go back to the previous insn. Stop if it is not an INSN. We also
7795 stop if it isn't a single set or if it has a REG_INC note because
7796 we don't want to bother dealing with it. */
7797
7798 if ((prev = prev_nonnote_insn (prev)) == 0
7799 || GET_CODE (prev) != INSN
7800 || FIND_REG_INC_NOTE (prev, 0)
7801 || (set = single_set (prev)) == 0)
7802 break;
7803
7804 /* If this is setting OP0, get what it sets it to if it looks
7805 relevant. */
a95c317b 7806 if (rtx_equal_p (SET_DEST (set), op0))
b4ad7b23
RS
7807 {
7808 enum machine_mode inner_mode = GET_MODE (SET_SRC (set));
7809
f283421d
RH
7810 /* ??? We may not combine comparisons done in a CCmode with
7811 comparisons not done in a CCmode. This is to aid targets
7812 like Alpha that have an IEEE compliant EQ instruction, and
7813 a non-IEEE compliant BEQ instruction. The use of CCmode is
7814 actually artificial, simply to prevent the combination, but
7815 should not affect other platforms. */
7816
b4ad7b23 7817 if ((GET_CODE (SET_SRC (set)) == COMPARE
b565a316
RK
7818 || (((code == NE
7819 || (code == LT
7820 && GET_MODE_CLASS (inner_mode) == MODE_INT
5fd8383e
RK
7821 && (GET_MODE_BITSIZE (inner_mode)
7822 <= HOST_BITS_PER_WIDE_INT)
b565a316 7823 && (STORE_FLAG_VALUE
5fd8383e
RK
7824 & ((HOST_WIDE_INT) 1
7825 << (GET_MODE_BITSIZE (inner_mode) - 1))))
b565a316
RK
7826#ifdef FLOAT_STORE_FLAG_VALUE
7827 || (code == LT
7828 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
7829 && FLOAT_STORE_FLAG_VALUE < 0)
7830#endif
7831 ))
f283421d
RH
7832 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'))
7833 && ((GET_MODE_CLASS (mode) == MODE_CC)
7834 != (GET_MODE_CLASS (inner_mode) == MODE_CC)))
b4ad7b23 7835 x = SET_SRC (set);
b565a316
RK
7836 else if (((code == EQ
7837 || (code == GE
5fd8383e
RK
7838 && (GET_MODE_BITSIZE (inner_mode)
7839 <= HOST_BITS_PER_WIDE_INT)
b565a316
RK
7840 && GET_MODE_CLASS (inner_mode) == MODE_INT
7841 && (STORE_FLAG_VALUE
5fd8383e
RK
7842 & ((HOST_WIDE_INT) 1
7843 << (GET_MODE_BITSIZE (inner_mode) - 1))))
b565a316
RK
7844#ifdef FLOAT_STORE_FLAG_VALUE
7845 || (code == GE
7846 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
7847 && FLOAT_STORE_FLAG_VALUE < 0)
fb8ca0a4 7848#endif
b565a316 7849 ))
f283421d
RH
7850 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'
7851 && ((GET_MODE_CLASS (mode) == MODE_CC)
7852 != (GET_MODE_CLASS (inner_mode) == MODE_CC)))
b4ad7b23
RS
7853 {
7854 /* We might have reversed a LT to get a GE here. But this wasn't
7855 actually the comparison of data, so we don't flag that we
7856 have had to reverse the condition. */
7857 did_reverse_condition ^= 1;
7858 reverse_code = 1;
7859 x = SET_SRC (set);
7860 }
71ef37f6
RK
7861 else
7862 break;
b4ad7b23
RS
7863 }
7864
7865 else if (reg_set_p (op0, prev))
7866 /* If this sets OP0, but not directly, we have to give up. */
7867 break;
7868
7869 if (x)
7870 {
7871 if (GET_RTX_CLASS (GET_CODE (x)) == '<')
7872 code = GET_CODE (x);
7873 if (reverse_code)
7874 {
7875 code = reverse_condition (code);
7876 did_reverse_condition ^= 1;
7877 reverse_code = 0;
7878 }
7879
7880 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
7881 if (earliest)
7882 *earliest = prev;
7883 }
7884 }
7885
7886 /* If constant is first, put it last. */
7887 if (CONSTANT_P (op0))
7888 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
7889
7890 /* If OP0 is the result of a comparison, we weren't able to find what
7891 was really being compared, so fail. */
7892 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
7893 return 0;
7894
d8cfa4ee
RK
7895 /* Canonicalize any ordered comparison with integers involving equality
7896 if we can do computations in the relevant mode and we do not
7897 overflow. */
7898
7899 if (GET_CODE (op1) == CONST_INT
7900 && GET_MODE (op0) != VOIDmode
7901 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
b4ad7b23 7902 {
5fd8383e
RK
7903 HOST_WIDE_INT const_val = INTVAL (op1);
7904 unsigned HOST_WIDE_INT uconst_val = const_val;
d8cfa4ee
RK
7905 unsigned HOST_WIDE_INT max_val
7906 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
b4ad7b23
RS
7907
7908 switch (code)
d8cfa4ee
RK
7909 {
7910 case LE:
7911 if (const_val != max_val >> 1)
7912 code = LT, op1 = GEN_INT (const_val + 1);
7913 break;
b4ad7b23 7914
460f50dc
R
7915 /* When cross-compiling, const_val might be sign-extended from
7916 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
d8cfa4ee 7917 case GE:
460f50dc 7918 if ((const_val & max_val)
d8cfa4ee
RK
7919 != (((HOST_WIDE_INT) 1
7920 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
7921 code = GT, op1 = GEN_INT (const_val - 1);
7922 break;
b4ad7b23 7923
d8cfa4ee 7924 case LEU:
460f50dc 7925 if (uconst_val < max_val)
d8cfa4ee
RK
7926 code = LTU, op1 = GEN_INT (uconst_val + 1);
7927 break;
b4ad7b23 7928
d8cfa4ee
RK
7929 case GEU:
7930 if (uconst_val != 0)
7931 code = GTU, op1 = GEN_INT (uconst_val - 1);
7932 break;
e9a25f70
JL
7933
7934 default:
7935 break;
d8cfa4ee 7936 }
b4ad7b23
RS
7937 }
7938
7939 /* If this was floating-point and we reversed anything other than an
7940 EQ or NE, return zero. */
7941 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
7942 && did_reverse_condition && code != NE && code != EQ
1fc3d466 7943 && ! flag_fast_math
b4ad7b23
RS
7944 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
7945 return 0;
7946
7947#ifdef HAVE_cc0
7948 /* Never return CC0; return zero instead. */
7949 if (op0 == cc0_rtx)
7950 return 0;
7951#endif
7952
38a448ca 7953 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
b4ad7b23
RS
7954}
7955
7956/* Similar to above routine, except that we also put an invariant last
7957 unless both operands are invariants. */
7958
7959rtx
7960get_condition_for_loop (x)
7961 rtx x;
7962{
5fd8383e 7963 rtx comparison = get_condition (x, NULL_PTR);
b4ad7b23
RS
7964
7965 if (comparison == 0
7966 || ! invariant_p (XEXP (comparison, 0))
7967 || invariant_p (XEXP (comparison, 1)))
7968 return comparison;
7969
38a448ca
RH
7970 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
7971 XEXP (comparison, 1), XEXP (comparison, 0));
b4ad7b23 7972}
8c660648
JL
7973
7974#ifdef HAIFA
7975/* Analyze a loop in order to instrument it with the use of count register.
7976 loop_start and loop_end are the first and last insns of the loop.
7977 This function works in cooperation with insert_bct ().
7978 loop_can_insert_bct[loop_num] is set according to whether the optimization
7979 is applicable to the loop. When it is applicable, the following variables
7980 are also set:
7981 loop_start_value[loop_num]
7982 loop_comparison_value[loop_num]
7983 loop_increment[loop_num]
7984 loop_comparison_code[loop_num] */
7985
51723711 7986#ifdef HAVE_decrement_and_branch_on_count
45f97e2e
RH
7987static void
7988analyze_loop_iterations (loop_start, loop_end)
8c660648
JL
7989 rtx loop_start, loop_end;
7990{
7991 rtx comparison, comparison_value;
7992 rtx iteration_var, initial_value, increment;
7993 enum rtx_code comparison_code;
7994
7995 rtx last_loop_insn;
7996 rtx insn;
7997 int i;
7998
7999 /* loop_variable mode */
8000 enum machine_mode original_mode;
8001
8002 /* find the number of the loop */
37aa45a2 8003 int loop_num = uid_loop_num [INSN_UID (loop_start)];
8c660648
JL
8004
8005 /* we change our mind only when we are sure that loop will be instrumented */
8006 loop_can_insert_bct[loop_num] = 0;
8007
8c660648
JL
8008 /* is the optimization suppressed. */
8009 if ( !flag_branch_on_count_reg )
8010 return;
8011
8012 /* make sure that count-reg is not in use */
8013 if (loop_used_count_register[loop_num]){
8014 if (loop_dump_stream)
8015 fprintf (loop_dump_stream,
8016 "analyze_loop_iterations %d: BCT instrumentation failed: count register already in use\n",
8017 loop_num);
8018 return;
8019 }
8020
8021 /* make sure that the function has no indirect jumps. */
8022 if (indirect_jump_in_function){
8023 if (loop_dump_stream)
8024 fprintf (loop_dump_stream,
8025 "analyze_loop_iterations %d: BCT instrumentation failed: indirect jump in function\n",
8026 loop_num);
8027 return;
8028 }
8029
8030 /* make sure that the last loop insn is a conditional jump */
8031 last_loop_insn = PREV_INSN (loop_end);
4e9633f0 8032 if (GET_CODE (last_loop_insn) != JUMP_INSN || !condjump_p (last_loop_insn)) {
8c660648
JL
8033 if (loop_dump_stream)
8034 fprintf (loop_dump_stream,
8035 "analyze_loop_iterations %d: BCT instrumentation failed: invalid jump at loop end\n",
8036 loop_num);
8037 return;
8038 }
8039
8040 /* First find the iteration variable. If the last insn is a conditional
8041 branch, and the insn preceding it tests a register value, make that
8042 register the iteration variable. */
8043
8044 /* We used to use prev_nonnote_insn here, but that fails because it might
8045 accidentally get the branch for a contained loop if the branch for this
8046 loop was deleted. We can only trust branches immediately before the
8047 loop_end. */
8048
8049 comparison = get_condition_for_loop (last_loop_insn);
8050 /* ??? Get_condition may switch position of induction variable and
8051 invariant register when it canonicalizes the comparison. */
8052
8053 if (comparison == 0) {
8054 if (loop_dump_stream)
8055 fprintf (loop_dump_stream,
8056 "analyze_loop_iterations %d: BCT instrumentation failed: comparison not found\n",
8057 loop_num);
8058 return;
8059 }
8060
8061 comparison_code = GET_CODE (comparison);
8062 iteration_var = XEXP (comparison, 0);
8063 comparison_value = XEXP (comparison, 1);
8064
8065 original_mode = GET_MODE (iteration_var);
8066 if (GET_MODE_CLASS (original_mode) != MODE_INT
8067 || GET_MODE_SIZE (original_mode) != UNITS_PER_WORD) {
8068 if (loop_dump_stream)
8069 fprintf (loop_dump_stream,
8070 "analyze_loop_iterations %d: BCT Instrumentation failed: loop variable not integer\n",
8071 loop_num);
8072 return;
8073 }
8074
8075 /* get info about loop bounds and increment */
8076 iteration_info (iteration_var, &initial_value, &increment,
8077 loop_start, loop_end);
8078
8079 /* make sure that all required loop data were found */
8080 if (!(initial_value && increment && comparison_value
8081 && invariant_p (comparison_value) && invariant_p (increment)
8082 && ! indirect_jump_in_function))
8083 {
8084 if (loop_dump_stream) {
8085 fprintf (loop_dump_stream,
8086 "analyze_loop_iterations %d: BCT instrumentation failed because of wrong loop: ", loop_num);
8087 if (!(initial_value && increment && comparison_value)) {
8088 fprintf (loop_dump_stream, "\tbounds not available: ");
8089 if ( ! initial_value )
8090 fprintf (loop_dump_stream, "initial ");
8091 if ( ! increment )
8092 fprintf (loop_dump_stream, "increment ");
8093 if ( ! comparison_value )
8094 fprintf (loop_dump_stream, "comparison ");
8095 fprintf (loop_dump_stream, "\n");
8096 }
8097 if (!invariant_p (comparison_value) || !invariant_p (increment))
8098 fprintf (loop_dump_stream, "\tloop bounds not invariant\n");
8099 }
8100 return;
8101 }
8102
8103 /* make sure that the increment is constant */
8104 if (GET_CODE (increment) != CONST_INT) {
8105 if (loop_dump_stream)
8106 fprintf (loop_dump_stream,
8107 "analyze_loop_iterations %d: instrumentation failed: not arithmetic loop\n",
8108 loop_num);
8109 return;
8110 }
8111
8112 /* make sure that the loop contains neither function call, nor jump on table.
8113 (the count register might be altered by the called function, and might
8114 be used for a branch on table). */
8115 for (insn = loop_start; insn && insn != loop_end; insn = NEXT_INSN (insn)) {
8116 if (GET_CODE (insn) == CALL_INSN){
8117 if (loop_dump_stream)
8118 fprintf (loop_dump_stream,
8119 "analyze_loop_iterations %d: BCT instrumentation failed: function call in the loop\n",
8120 loop_num);
8121 return;
8122 }
8123
8124 if (GET_CODE (insn) == JUMP_INSN
8125 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
8126 || GET_CODE (PATTERN (insn)) == ADDR_VEC)){
8127 if (loop_dump_stream)
8128 fprintf (loop_dump_stream,
8129 "analyze_loop_iterations %d: BCT instrumentation failed: computed branch in the loop\n",
8130 loop_num);
8131 return;
8132 }
8133 }
8134
8135 /* At this point, we are sure that the loop can be instrumented with BCT.
8136 Some of the loops, however, will not be instrumented - the final decision
8137 is taken by insert_bct () */
8138 if (loop_dump_stream)
8139 fprintf (loop_dump_stream,
8140 "analyze_loop_iterations: loop (luid =%d) can be BCT instrumented.\n",
8141 loop_num);
8142
8143 /* mark all enclosing loops that they cannot use count register */
8144 /* ???: In fact, since insert_bct may decide not to instrument this loop,
8145 marking here may prevent instrumenting an enclosing loop that could
8146 actually be instrumented. But since this is rare, it is safer to mark
8147 here in case the order of calling (analyze/insert)_bct would be changed. */
8148 for (i=loop_num; i != -1; i = loop_outer_loop[i])
8149 loop_used_count_register[i] = 1;
8150
8151 /* Set data structures which will be used by the instrumentation phase */
8152 loop_start_value[loop_num] = initial_value;
8153 loop_comparison_value[loop_num] = comparison_value;
8154 loop_increment[loop_num] = increment;
8155 loop_comparison_code[loop_num] = comparison_code;
8156 loop_can_insert_bct[loop_num] = 1;
8157}
8158
8159
8160/* instrument loop for insertion of bct instruction. We distinguish between
8161 loops with compile-time bounds, to those with run-time bounds. The loop
8162 behaviour is analized according to the following characteristics/variables:
8163 ; Input variables:
8164 ; comparison-value: the value to which the iteration counter is compared.
8165 ; initial-value: iteration-counter initial value.
8166 ; increment: iteration-counter increment.
8167 ; Computed variables:
8168 ; increment-direction: the sign of the increment.
8169 ; compare-direction: '1' for GT, GTE, '-1' for LT, LTE, '0' for NE.
8170 ; range-direction: sign (comparison-value - initial-value)
8171 We give up on the following cases:
8172 ; loop variable overflow.
8173 ; run-time loop bounds with comparison code NE.
8174 */
8175
8176static void
8177insert_bct (loop_start, loop_end)
8178 rtx loop_start, loop_end;
8179{
8180 rtx initial_value, comparison_value, increment;
8181 enum rtx_code comparison_code;
8182
8183 int increment_direction, compare_direction;
8184 int unsigned_p = 0;
8185
8186 /* if the loop condition is <= or >=, the number of iteration
8187 is 1 more than the range of the bounds of the loop */
8188 int add_iteration = 0;
8189
8190 /* the only machine mode we work with - is the integer of the size that the
8191 machine has */
5accd822 8192 enum machine_mode loop_var_mode = word_mode;
8c660648 8193
37aa45a2 8194 int loop_num = uid_loop_num [INSN_UID (loop_start)];
8c660648
JL
8195
8196 /* get loop-variables. No need to check that these are valid - already
8197 checked in analyze_loop_iterations (). */
8198 comparison_code = loop_comparison_code[loop_num];
8199 initial_value = loop_start_value[loop_num];
8200 comparison_value = loop_comparison_value[loop_num];
8201 increment = loop_increment[loop_num];
8202
8203 /* check analyze_loop_iterations decision for this loop. */
8204 if (! loop_can_insert_bct[loop_num]){
8205 if (loop_dump_stream)
8206 fprintf (loop_dump_stream,
8207 "insert_bct: [%d] - was decided not to instrument by analyze_loop_iterations ()\n",
8208 loop_num);
8209 return;
8210 }
8211
237a9795
JL
8212 /* It's impossible to instrument a competely unrolled loop. */
8213 if (loop_unroll_factor [loop_num] == -1)
8c660648 8214 return;
8c660648
JL
8215
8216 /* make sure that the last loop insn is a conditional jump .
8217 This check is repeated from analyze_loop_iterations (),
8218 because unrolling might have changed that. */
38ea060f 8219 if (GET_CODE (PREV_INSN (loop_end)) != JUMP_INSN
1404ad5c 8220 || !condjump_p (PREV_INSN (loop_end))) {
8c660648
JL
8221 if (loop_dump_stream)
8222 fprintf (loop_dump_stream,
8223 "insert_bct: not instrumenting BCT because of invalid branch\n");
8224 return;
8225 }
8226
8227 /* fix increment in case loop was unrolled. */
237a9795
JL
8228 if (loop_unroll_factor [loop_num] > 1)
8229 increment = GEN_INT ( INTVAL (increment) * loop_unroll_factor [loop_num] );
8c660648
JL
8230
8231 /* determine properties and directions of the loop */
8232 increment_direction = (INTVAL (increment) > 0) ? 1:-1;
8233 switch ( comparison_code ) {
8234 case LEU:
8235 unsigned_p = 1;
8236 /* fallthrough */
8237 case LE:
8238 compare_direction = 1;
8239 add_iteration = 1;
8240 break;
8241 case GEU:
8242 unsigned_p = 1;
8243 /* fallthrough */
8244 case GE:
8245 compare_direction = -1;
8246 add_iteration = 1;
8247 break;
8248 case EQ:
8249 /* in this case we cannot know the number of iterations */
8250 if (loop_dump_stream)
8251 fprintf (loop_dump_stream,
8252 "insert_bct: %d: loop cannot be instrumented: == in condition\n",
8253 loop_num);
8254 return;
8255 case LTU:
8256 unsigned_p = 1;
8257 /* fallthrough */
8258 case LT:
8259 compare_direction = 1;
8260 break;
8261 case GTU:
8262 unsigned_p = 1;
8263 /* fallthrough */
8264 case GT:
8265 compare_direction = -1;
8266 break;
8267 case NE:
8268 compare_direction = 0;
8269 break;
8270 default:
8271 abort ();
8272 }
8273
8274
8275 /* make sure that the loop does not end by an overflow */
8276 if (compare_direction != increment_direction) {
8277 if (loop_dump_stream)
8278 fprintf (loop_dump_stream,
8279 "insert_bct: %d: loop cannot be instrumented: terminated by overflow\n",
8280 loop_num);
8281 return;
8282 }
8283
8284 /* try to instrument the loop. */
8285
8286 /* Handle the simpler case, where the bounds are known at compile time. */
5accd822
DE
8287 if (GET_CODE (initial_value) == CONST_INT
8288 && GET_CODE (comparison_value) == CONST_INT)
8c660648
JL
8289 {
8290 int n_iterations;
8291 int increment_value_abs = INTVAL (increment) * increment_direction;
8292
8293 /* check the relation between compare-val and initial-val */
8294 int difference = INTVAL (comparison_value) - INTVAL (initial_value);
8295 int range_direction = (difference > 0) ? 1 : -1;
8296
8297 /* make sure the loop executes enough iterations to gain from BCT */
8298 if (difference > -3 && difference < 3) {
8299 if (loop_dump_stream)
8300 fprintf (loop_dump_stream,
8301 "insert_bct: loop %d not BCT instrumented: too small iteration count.\n",
8302 loop_num);
8303 return;
8304 }
8305
8306 /* make sure that the loop executes at least once */
8307 if ((range_direction == 1 && compare_direction == -1)
8308 || (range_direction == -1 && compare_direction == 1))
8309 {
8310 if (loop_dump_stream)
8311 fprintf (loop_dump_stream,
8312 "insert_bct: loop %d: does not iterate even once. Not instrumenting.\n",
8313 loop_num);
8314 return;
8315 }
8316
8317 /* make sure that the loop does not end by an overflow (in compile time
8318 bounds we must have an additional check for overflow, because here
8319 we also support the compare code of 'NE'. */
8320 if (comparison_code == NE
8321 && increment_direction != range_direction) {
8322 if (loop_dump_stream)
8323 fprintf (loop_dump_stream,
8324 "insert_bct (compile time bounds): %d: loop not instrumented: terminated by overflow\n",
8325 loop_num);
8326 return;
8327 }
8328
8329 /* Determine the number of iterations by:
8330 ;
8331 ; compare-val - initial-val + (increment -1) + additional-iteration
8332 ; num_iterations = -----------------------------------------------------------------
8333 ; increment
8334 */
8335 difference = (range_direction > 0) ? difference : -difference;
8336#if 0
8337 fprintf (stderr, "difference is: %d\n", difference); /* @*/
8338 fprintf (stderr, "increment_value_abs is: %d\n", increment_value_abs); /* @*/
8339 fprintf (stderr, "add_iteration is: %d\n", add_iteration); /* @*/
8340 fprintf (stderr, "INTVAL (comparison_value) is: %d\n", INTVAL (comparison_value)); /* @*/
8341 fprintf (stderr, "INTVAL (initial_value) is: %d\n", INTVAL (initial_value)); /* @*/
8342#endif
8343
8344 if (increment_value_abs == 0) {
8345 fprintf (stderr, "insert_bct: error: increment == 0 !!!\n");
8346 abort ();
8347 }
8348 n_iterations = (difference + increment_value_abs - 1 + add_iteration)
8349 / increment_value_abs;
8350
8351#if 0
8352 fprintf (stderr, "number of iterations is: %d\n", n_iterations); /* @*/
8353#endif
8354 instrument_loop_bct (loop_start, loop_end, GEN_INT (n_iterations));
8355
8356 /* Done with this loop. */
8357 return;
8358 }
8359
8360 /* Handle the more complex case, that the bounds are NOT known at compile time. */
8361 /* In this case we generate run_time calculation of the number of iterations */
8362
8363 /* With runtime bounds, if the compare is of the form '!=' we give up */
8364 if (comparison_code == NE) {
8365 if (loop_dump_stream)
8366 fprintf (loop_dump_stream,
8367 "insert_bct: fail for loop %d: runtime bounds with != comparison\n",
8368 loop_num);
8369 return;
8370 }
8371
8372 else {
8373 /* We rely on the existence of run-time guard to ensure that the
8374 loop executes at least once. */
8375 rtx sequence;
8376 rtx iterations_num_reg;
8377
8378 int increment_value_abs = INTVAL (increment) * increment_direction;
8379
8380 /* make sure that the increment is a power of two, otherwise (an
8381 expensive) divide is needed. */
38ea060f 8382 if (exact_log2 (increment_value_abs) == -1)
8c660648
JL
8383 {
8384 if (loop_dump_stream)
8385 fprintf (loop_dump_stream,
8386 "insert_bct: not instrumenting BCT because the increment is not power of 2\n");
8387 return;
8388 }
8389
8390 /* compute the number of iterations */
8391 start_sequence ();
8392 {
8c660648
JL
8393 rtx temp_reg;
8394
8395 /* Again, the number of iterations is calculated by:
8396 ;
8397 ; compare-val - initial-val + (increment -1) + additional-iteration
8398 ; num_iterations = -----------------------------------------------------------------
8399 ; increment
8400 */
8401 /* ??? Do we have to call copy_rtx here before passing rtx to
8402 expand_binop? */
8403 if (compare_direction > 0) {
8404 /* <, <= :the loop variable is increasing */
8405 temp_reg = expand_binop (loop_var_mode, sub_optab, comparison_value,
8406 initial_value, NULL_RTX, 0, OPTAB_LIB_WIDEN);
8407 }
8408 else {
8409 temp_reg = expand_binop (loop_var_mode, sub_optab, initial_value,
8410 comparison_value, NULL_RTX, 0, OPTAB_LIB_WIDEN);
8411 }
8412
8413 if (increment_value_abs - 1 + add_iteration != 0)
8414 temp_reg = expand_binop (loop_var_mode, add_optab, temp_reg,
8415 GEN_INT (increment_value_abs - 1 + add_iteration),
8416 NULL_RTX, 0, OPTAB_LIB_WIDEN);
8417
8418 if (increment_value_abs != 1)
8419 {
8420 /* ??? This will generate an expensive divide instruction for
8421 most targets. The original authors apparently expected this
8422 to be a shift, since they test for power-of-2 divisors above,
8423 but just naively generating a divide instruction will not give
8424 a shift. It happens to work for the PowerPC target because
8425 the rs6000.md file has a divide pattern that emits shifts.
8426 It will probably not work for any other target. */
8427 iterations_num_reg = expand_binop (loop_var_mode, sdiv_optab,
8428 temp_reg,
8429 GEN_INT (increment_value_abs),
8430 NULL_RTX, 0, OPTAB_LIB_WIDEN);
8431 }
8432 else
8433 iterations_num_reg = temp_reg;
8c660648
JL
8434 }
8435 sequence = gen_sequence ();
8436 end_sequence ();
8437 emit_insn_before (sequence, loop_start);
8438 instrument_loop_bct (loop_start, loop_end, iterations_num_reg);
8439 }
8440}
8441
8442/* instrument loop by inserting a bct in it. This is done in the following way:
8443 1. A new register is created and assigned the hard register number of the count
8444 register.
8445 2. In the head of the loop the new variable is initialized by the value passed in the
8446 loop_num_iterations parameter.
8447 3. At the end of the loop, comparison of the register with 0 is generated.
8448 The created comparison follows the pattern defined for the
8449 decrement_and_branch_on_count insn, so this insn will be generated in assembly
8450 generation phase.
8451 4. The compare&branch on the old variable is deleted. So, if the loop-variable was
8452 not used elsewhere, it will be eliminated by data-flow analisys. */
8453
8454static void
8455instrument_loop_bct (loop_start, loop_end, loop_num_iterations)
8456 rtx loop_start, loop_end;
8457 rtx loop_num_iterations;
8458{
8459 rtx temp_reg1, temp_reg2;
8460 rtx start_label;
8461
8462 rtx sequence;
5accd822 8463 enum machine_mode loop_var_mode = word_mode;
8c660648 8464
8c660648
JL
8465 if (HAVE_decrement_and_branch_on_count)
8466 {
8467 if (loop_dump_stream)
8468 fprintf (loop_dump_stream, "Loop: Inserting BCT\n");
8469
5accd822
DE
8470 /* Discard original jump to continue loop. Original compare result
8471 may still be live, so it cannot be discarded explicitly. */
8c660648
JL
8472 delete_insn (PREV_INSN (loop_end));
8473
8474 /* insert the label which will delimit the start of the loop */
8475 start_label = gen_label_rtx ();
8476 emit_label_after (start_label, loop_start);
8477
8478 /* insert initialization of the count register into the loop header */
8479 start_sequence ();
8480 temp_reg1 = gen_reg_rtx (loop_var_mode);
8481 emit_insn (gen_move_insn (temp_reg1, loop_num_iterations));
8482
8483 /* this will be count register */
38a448ca 8484 temp_reg2 = gen_rtx_REG (loop_var_mode, COUNT_REGISTER_REGNUM);
8c660648
JL
8485 /* we have to move the value to the count register from an GPR
8486 because rtx pointed to by loop_num_iterations could contain
8487 expression which cannot be moved into count register */
8488 emit_insn (gen_move_insn (temp_reg2, temp_reg1));
8489
8490 sequence = gen_sequence ();
8491 end_sequence ();
5accd822 8492 emit_insn_before (sequence, loop_start);
8c660648
JL
8493
8494 /* insert new comparison on the count register instead of the
8495 old one, generating the needed BCT pattern (that will be
8496 later recognized by assembly generation phase). */
5accd822
DE
8497 emit_jump_insn_before (gen_decrement_and_branch_on_count (temp_reg2,
8498 start_label),
8c660648
JL
8499 loop_end);
8500 LABEL_NUSES (start_label)++;
8501 }
8502
8c660648 8503}
51723711
KG
8504#endif /* HAVE_decrement_and_branch_on_count */
8505
2a1777af
JL
8506#endif /* HAIFA */
8507
8508/* Scan the function and determine whether it has indirect (computed) jumps.
8c660648 8509
2a1777af
JL
8510 This is taken mostly from flow.c; similar code exists elsewhere
8511 in the compiler. It may be useful to put this into rtlanal.c. */
8c660648
JL
8512static int
8513indirect_jump_in_function_p (start)
8514 rtx start;
8515{
8516 rtx insn;
8c660648 8517
2a1777af
JL
8518 for (insn = start; insn; insn = NEXT_INSN (insn))
8519 if (computed_jump_p (insn))
8520 return 1;
7019d00e
L
8521
8522 return 0;
8c660648 8523}
41a972a9
MM
8524
8525/* Add MEM to the LOOP_MEMS array, if appropriate. See the
8526 documentation for LOOP_MEMS for the definition of `appropriate'.
8527 This function is called from prescan_loop via for_each_rtx. */
8528
8529static int
8530insert_loop_mem (mem, data)
8531 rtx *mem;
8532 void *data;
8533{
8534 int i;
8535 rtx m = *mem;
8536
8537 if (m == NULL_RTX)
8538 return 0;
8539
8540 switch (GET_CODE (m))
8541 {
8542 case MEM:
8543 break;
8544
8545 case CONST_DOUBLE:
8546 /* We're not interested in the MEM associated with a
8547 CONST_DOUBLE, so there's no need to traverse into this. */
8548 return -1;
8549
8550 default:
8551 /* This is not a MEM. */
8552 return 0;
8553 }
8554
8555 /* See if we've already seen this MEM. */
8556 for (i = 0; i < loop_mems_idx; ++i)
8557 if (rtx_equal_p (m, loop_mems[i].mem))
8558 {
8559 if (GET_MODE (m) != GET_MODE (loop_mems[i].mem))
8560 /* The modes of the two memory accesses are different. If
8561 this happens, something tricky is going on, and we just
8562 don't optimize accesses to this MEM. */
8563 loop_mems[i].optimize = 0;
8564
8565 return 0;
8566 }
8567
8568 /* Resize the array, if necessary. */
8569 if (loop_mems_idx == loop_mems_allocated)
8570 {
8571 if (loop_mems_allocated != 0)
8572 loop_mems_allocated *= 2;
8573 else
8574 loop_mems_allocated = 32;
8575
8576 loop_mems = (loop_mem_info*)
8577 xrealloc (loop_mems,
8578 loop_mems_allocated * sizeof (loop_mem_info));
8579 }
8580
8581 /* Actually insert the MEM. */
8582 loop_mems[loop_mems_idx].mem = m;
8583 /* We can't hoist this MEM out of the loop if it's a BLKmode MEM
8584 because we can't put it in a register. We still store it in the
8585 table, though, so that if we see the same address later, but in a
8586 non-BLK mode, we'll not think we can optimize it at that point. */
8587 loop_mems[loop_mems_idx].optimize = (GET_MODE (m) != BLKmode);
8588 loop_mems[loop_mems_idx].reg = NULL_RTX;
8589 ++loop_mems_idx;
8deb8e2c
MM
8590
8591 return 0;
41a972a9
MM
8592}
8593
8594/* Like load_mems, but also ensures that N_TIMES_SET,
8595 MAY_NOT_OPTIMIZE, REG_SINGLE_USAGE, and INSN_COUNT have the correct
8596 values after load_mems. */
8597
8598static void
8599load_mems_and_recount_loop_regs_set (scan_start, end, loop_top, start,
8600 reg_single_usage, insn_count)
8601 rtx scan_start;
8602 rtx end;
8603 rtx loop_top;
8604 rtx start;
8deb8e2c 8605 varray_type reg_single_usage;
41a972a9
MM
8606 int *insn_count;
8607{
8608 int nregs = max_reg_num ();
8609
8610 load_mems (scan_start, end, loop_top, start);
8611
8612 /* Recalculate n_times_set and friends since load_mems may have
8613 created new registers. */
8614 if (max_reg_num () > nregs)
8615 {
8616 int i;
8617 int old_nregs;
8618
8619 old_nregs = nregs;
8620 nregs = max_reg_num ();
8621
8deb8e2c
MM
8622 if (nregs > n_times_set->num_elements)
8623 {
8624 /* Grow all the arrays. */
8625 VARRAY_GROW (n_times_set, nregs);
8626 VARRAY_GROW (n_times_used, nregs);
8627 VARRAY_GROW (may_not_optimize, nregs);
8628 if (reg_single_usage)
8629 VARRAY_GROW (reg_single_usage, nregs);
8630 }
8631 /* Clear the arrays */
8632 bzero ((char *) &n_times_set->data, nregs * sizeof (int));
8633 bzero ((char *) &may_not_optimize->data, nregs * sizeof (char));
8634 if (reg_single_usage)
8635 bzero ((char *) &reg_single_usage->data, nregs * sizeof (rtx));
41a972a9
MM
8636
8637 count_loop_regs_set (loop_top ? loop_top : start, end,
8638 may_not_optimize, reg_single_usage,
8639 insn_count, nregs);
8640
8641 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
8deb8e2c
MM
8642 {
8643 VARRAY_CHAR (may_not_optimize, i) = 1;
8644 VARRAY_INT (n_times_set, i) = 1;
8645 }
41a972a9 8646
dd0208b9
DM
8647#ifdef AVOID_CCMODE_COPIES
8648 /* Don't try to move insns which set CC registers if we should not
8649 create CCmode register copies. */
78b87d18 8650 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
dd0208b9 8651 if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
8deb8e2c 8652 VARRAY_CHAR (may_not_optimize, i) = 1;
dd0208b9
DM
8653#endif
8654
41a972a9 8655 /* Set n_times_used for the new registers. */
8deb8e2c
MM
8656 bcopy ((char *) (&n_times_set->data.i[0] + old_nregs),
8657 (char *) (&n_times_used->data.i[0] + old_nregs),
41a972a9
MM
8658 (nregs - old_nregs) * sizeof (int));
8659 }
8660}
8661
8662/* Move MEMs into registers for the duration of the loop. SCAN_START
8663 is the first instruction in the loop (as it is executed). The
8664 other parameters are as for next_insn_in_loop. */
8665
8666static void
8667load_mems (scan_start, end, loop_top, start)
8668 rtx scan_start;
8669 rtx end;
8670 rtx loop_top;
8671 rtx start;
8672{
8673 int maybe_never = 0;
8674 int i;
8675 rtx p;
8676 rtx label = NULL_RTX;
8677 rtx end_label;
8678
8679 if (loop_mems_idx > 0)
8680 {
8681 /* Nonzero if the next instruction may never be executed. */
8682 int next_maybe_never = 0;
8683
8684 /* Check to see if it's possible that some instructions in the
8685 loop are never executed. */
8686 for (p = next_insn_in_loop (scan_start, scan_start, end, loop_top);
8687 p != NULL_RTX && !maybe_never;
8688 p = next_insn_in_loop (p, scan_start, end, loop_top))
8689 {
8690 if (GET_CODE (p) == CODE_LABEL)
8691 maybe_never = 1;
8692 else if (GET_CODE (p) == JUMP_INSN
8693 /* If we enter the loop in the middle, and scan
8694 around to the beginning, don't set maybe_never
8695 for that. This must be an unconditional jump,
8696 otherwise the code at the top of the loop might
8697 never be executed. Unconditional jumps are
8698 followed a by barrier then loop end. */
8699 && ! (GET_CODE (p) == JUMP_INSN
8700 && JUMP_LABEL (p) == loop_top
8701 && NEXT_INSN (NEXT_INSN (p)) == end
8702 && simplejump_p (p)))
8703 {
8704 if (!condjump_p (p))
8705 /* Something complicated. */
8706 maybe_never = 1;
8707 else
8708 /* If there are any more instructions in the loop, they
8709 might not be reached. */
8710 next_maybe_never = 1;
8711 }
8712 else if (next_maybe_never)
8713 maybe_never = 1;
8714 }
8715
8716 /* Actually move the MEMs. */
8717 for (i = 0; i < loop_mems_idx; ++i)
8718 {
8719 int j;
8720 int written = 0;
8721 rtx reg;
8722 rtx mem = loop_mems[i].mem;
8723
8724 if (MEM_VOLATILE_P (mem)
8725 || invariant_p (XEXP (mem, 0)) != 1)
8726 /* There's no telling whether or not MEM is modified. */
8727 loop_mems[i].optimize = 0;
8728
8729 /* Go through the MEMs written to in the loop to see if this
8730 one is aliased by one of them. */
8731 for (j = 0; j < loop_store_mems_idx; ++j)
8732 {
8733 if (rtx_equal_p (mem, loop_store_mems[j]))
8734 written = 1;
8735 else if (true_dependence (loop_store_mems[j], VOIDmode,
8736 mem, rtx_varies_p))
8737 {
8738 /* MEM is indeed aliased by this store. */
8739 loop_mems[i].optimize = 0;
8740 break;
8741 }
8742 }
8743
8744 /* If this MEM is written to, we must be sure that there
8745 are no reads from another MEM that aliases this one. */
8746 if (loop_mems[i].optimize && written)
8747 {
8748 int j;
8749
8750 for (j = 0; j < loop_mems_idx; ++j)
8751 {
8752 if (j == i)
8753 continue;
8754 else if (true_dependence (mem,
8755 VOIDmode,
8756 loop_mems[j].mem,
8757 rtx_varies_p))
8758 {
8759 /* It's not safe to hoist loop_mems[i] out of
8760 the loop because writes to it might not be
8761 seen by reads from loop_mems[j]. */
8762 loop_mems[i].optimize = 0;
8763 break;
8764 }
8765 }
8766 }
8767
8768 if (maybe_never && may_trap_p (mem))
8769 /* We can't access the MEM outside the loop; it might
8770 cause a trap that wouldn't have happened otherwise. */
8771 loop_mems[i].optimize = 0;
8772
8773 if (!loop_mems[i].optimize)
8774 /* We thought we were going to lift this MEM out of the
8775 loop, but later discovered that we could not. */
8776 continue;
8777
8778 /* Allocate a pseudo for this MEM. We set REG_USERVAR_P in
8779 order to keep scan_loop from moving stores to this MEM
8780 out of the loop just because this REG is neither a
8781 user-variable nor used in the loop test. */
8782 reg = gen_reg_rtx (GET_MODE (mem));
8783 REG_USERVAR_P (reg) = 1;
8784 loop_mems[i].reg = reg;
8785
8786 /* Now, replace all references to the MEM with the
8787 corresponding pesudos. */
8788 for (p = next_insn_in_loop (scan_start, scan_start, end, loop_top);
8789 p != NULL_RTX;
8790 p = next_insn_in_loop (p, scan_start, end, loop_top))
8791 {
59d4e481
KGA
8792 rtx_and_int ri;
8793 ri.r = p;
8794 ri.i = i;
41a972a9
MM
8795 for_each_rtx (&p, replace_loop_mem, &ri);
8796 }
8797
8798 if (!apply_change_group ())
8799 /* We couldn't replace all occurrences of the MEM. */
8800 loop_mems[i].optimize = 0;
8801 else
8802 {
8803 rtx set;
8804
8805 /* Load the memory immediately before START, which is
8806 the NOTE_LOOP_BEG. */
8807 set = gen_rtx_SET (GET_MODE (reg), reg, mem);
8808 emit_insn_before (set, start);
8809
8810 if (written)
8811 {
8812 if (label == NULL_RTX)
8813 {
8814 /* We must compute the former
8815 right-after-the-end label before we insert
8816 the new one. */
8817 end_label = next_label (end);
8818 label = gen_label_rtx ();
8819 emit_label_after (label, end);
8820 }
8821
8822 /* Store the memory immediately after END, which is
8823 the NOTE_LOOP_END. */
ad206475 8824 set = gen_rtx_SET (GET_MODE (reg), copy_rtx (mem), reg);
41a972a9
MM
8825 emit_insn_after (set, label);
8826 }
8827
8828 if (loop_dump_stream)
8829 {
8830 fprintf (loop_dump_stream, "Hoisted regno %d %s from ",
8831 REGNO (reg), (written ? "r/w" : "r/o"));
8832 print_rtl (loop_dump_stream, mem);
8833 fputc ('\n', loop_dump_stream);
8834 }
8835 }
8836 }
8837 }
8838
8839 if (label != NULL_RTX)
8840 {
8841 /* Now, we need to replace all references to the previous exit
8842 label with the new one. */
59d4e481
KGA
8843 rtx_pair rr;
8844 rr.r1 = end_label;
8845 rr.r2 = label;
41a972a9
MM
8846
8847 for (p = start; p != end; p = NEXT_INSN (p))
7940acc4
JW
8848 {
8849 for_each_rtx (&p, replace_label, &rr);
8850
8851 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
8852 field. This is not handled by for_each_rtx because it doesn't
8853 handle unprinted ('0') fields. We need to update JUMP_LABEL
8854 because the immediately following unroll pass will use it.
8855 replace_label would not work anyways, because that only handles
8856 LABEL_REFs. */
8857 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == end_label)
8858 JUMP_LABEL (p) = label;
8859 }
41a972a9
MM
8860 }
8861}
8862
8863/* Replace MEM with its associated pseudo register. This function is
8864 called from load_mems via for_each_rtx. DATA is actually an
8865 rtx_and_int * describing the instruction currently being scanned
8866 and the MEM we are currently replacing. */
8867
8868static int
8869replace_loop_mem (mem, data)
8870 rtx *mem;
8871 void *data;
8872{
8873 rtx_and_int *ri;
8874 rtx insn;
8875 int i;
8876 rtx m = *mem;
8877
8878 if (m == NULL_RTX)
8879 return 0;
8880
8881 switch (GET_CODE (m))
8882 {
8883 case MEM:
8884 break;
8885
8886 case CONST_DOUBLE:
8887 /* We're not interested in the MEM associated with a
8888 CONST_DOUBLE, so there's no need to traverse into one. */
8889 return -1;
8890
8891 default:
8892 /* This is not a MEM. */
8893 return 0;
8894 }
8895
8896 ri = (rtx_and_int*) data;
8897 i = ri->i;
8898
8899 if (!rtx_equal_p (loop_mems[i].mem, m))
8900 /* This is not the MEM we are currently replacing. */
8901 return 0;
8902
8903 insn = ri->r;
8904
8905 /* Actually replace the MEM. */
8906 validate_change (insn, mem, loop_mems[i].reg, 1);
8907
8908 return 0;
8909}
8910
8911/* Replace occurrences of the old exit label for the loop with the new
8912 one. DATA is an rtx_pair containing the old and new labels,
8913 respectively. */
8914
8915static int
8916replace_label (x, data)
8917 rtx *x;
8918 void *data;
8919{
8920 rtx l = *x;
8921 rtx old_label = ((rtx_pair*) data)->r1;
8922 rtx new_label = ((rtx_pair*) data)->r2;
8923
8924 if (l == NULL_RTX)
8925 return 0;
8926
8927 if (GET_CODE (l) != LABEL_REF)
8928 return 0;
8929
8930 if (XEXP (l, 0) != old_label)
8931 return 0;
8932
8933 XEXP (l, 0) = new_label;
8934 ++LABEL_NUSES (new_label);
8935 --LABEL_NUSES (old_label);
8936
8937 return 0;
8938}
8939
This page took 1.822822 seconds and 5 git commands to generate.