]>
Commit | Line | Data |
---|---|---|
c8465d70 | 1 | /* Perform various loop optimizations, including strength reduction. |
ad83f537 | 2 | Copyright (C) 1987, 88, 89, 91-98, 1999 Free Software Foundation, Inc. |
b4ad7b23 RS |
3 | |
4 | This file is part of GNU CC. | |
5 | ||
6 | GNU CC is free software; you can redistribute it and/or modify | |
7 | it under the terms of the GNU General Public License as published by | |
8 | the Free Software Foundation; either version 2, or (at your option) | |
9 | any later version. | |
10 | ||
11 | GNU CC is distributed in the hope that it will be useful, | |
12 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | GNU General Public License for more details. | |
15 | ||
16 | You should have received a copy of the GNU General Public License | |
17 | along with GNU CC; see the file COPYING. If not, write to | |
a35311b0 RK |
18 | the Free Software Foundation, 59 Temple Place - Suite 330, |
19 | Boston, MA 02111-1307, USA. */ | |
b4ad7b23 RS |
20 | |
21 | ||
22 | /* This is the loop optimization pass of the compiler. | |
23 | It finds invariant computations within loops and moves them | |
24 | to the beginning of the loop. Then it identifies basic and | |
25 | general induction variables. Strength reduction is applied to the general | |
26 | induction variables, and induction variable elimination is applied to | |
27 | the basic induction variables. | |
28 | ||
29 | It also finds cases where | |
30 | a register is set within the loop by zero-extending a narrower value | |
31 | and changes these to zero the entire register once before the loop | |
32 | and merely copy the low part within the loop. | |
33 | ||
34 | Most of the complexity is in heuristics to decide when it is worth | |
35 | while to do these things. */ | |
36 | ||
37 | #include "config.h" | |
670ee920 | 38 | #include "system.h" |
b4ad7b23 RS |
39 | #include "rtl.h" |
40 | #include "obstack.h" | |
49ad7cfa | 41 | #include "function.h" |
b4ad7b23 RS |
42 | #include "expr.h" |
43 | #include "insn-config.h" | |
44 | #include "insn-flags.h" | |
45 | #include "regs.h" | |
46 | #include "hard-reg-set.h" | |
47 | #include "recog.h" | |
48 | #include "flags.h" | |
49 | #include "real.h" | |
b4ad7b23 | 50 | #include "loop.h" |
6adb4e3a | 51 | #include "except.h" |
2e107e9e | 52 | #include "toplev.h" |
b4ad7b23 | 53 | |
3c748bb6 MH |
54 | /* Information about the loop being processed used to compute |
55 | the number of loop iterations for loop unrolling and doloop | |
56 | optimization. */ | |
57 | static struct loop_info this_loop_info; | |
58 | ||
b4ad7b23 | 59 | /* Vector mapping INSN_UIDs to luids. |
d45cf215 | 60 | The luids are like uids but increase monotonically always. |
b4ad7b23 RS |
61 | We use them to see whether a jump comes from outside a given loop. */ |
62 | ||
63 | int *uid_luid; | |
64 | ||
65 | /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop | |
66 | number the insn is contained in. */ | |
67 | ||
68 | int *uid_loop_num; | |
69 | ||
70 | /* 1 + largest uid of any insn. */ | |
71 | ||
72 | int max_uid_for_loop; | |
73 | ||
74 | /* 1 + luid of last insn. */ | |
75 | ||
76 | static int max_luid; | |
77 | ||
78 | /* Number of loops detected in current function. Used as index to the | |
79 | next few tables. */ | |
80 | ||
81 | static int max_loop_num; | |
82 | ||
83 | /* Indexed by loop number, contains the first and last insn of each loop. */ | |
84 | ||
85 | static rtx *loop_number_loop_starts, *loop_number_loop_ends; | |
86 | ||
3ec2b590 R |
87 | /* Likewise for the continue insn */ |
88 | static rtx *loop_number_loop_cont; | |
89 | ||
90 | /* The first code_label that is reached in every loop iteration. | |
91 | 0 when not computed yet, initially const0_rtx if a jump couldn't be | |
92 | followed. | |
93 | Also set to 0 when there is no such label before the NOTE_INSN_LOOP_CONT | |
94 | of this loop, or in verify_dominator, if a jump couldn't be followed. */ | |
95 | static rtx *loop_number_cont_dominator; | |
96 | ||
b4ad7b23 RS |
97 | /* For each loop, gives the containing loop number, -1 if none. */ |
98 | ||
99 | int *loop_outer_loop; | |
100 | ||
cac8ce95 DE |
101 | #ifdef HAVE_decrement_and_branch_on_count |
102 | /* Records whether resource in use by inner loop. */ | |
8c660648 JL |
103 | |
104 | int *loop_used_count_register; | |
cac8ce95 | 105 | #endif /* HAVE_decrement_and_branch_on_count */ |
8c660648 | 106 | |
b4ad7b23 RS |
107 | /* Indexed by loop number, contains a nonzero value if the "loop" isn't |
108 | really a loop (an insn outside the loop branches into it). */ | |
109 | ||
110 | static char *loop_invalid; | |
111 | ||
112 | /* Indexed by loop number, links together all LABEL_REFs which refer to | |
113 | code labels outside the loop. Used by routines that need to know all | |
114 | loop exits, such as final_biv_value and final_giv_value. | |
115 | ||
116 | This does not include loop exits due to return instructions. This is | |
117 | because all bivs and givs are pseudos, and hence must be dead after a | |
118 | return, so the presense of a return does not affect any of the | |
119 | optimizations that use this info. It is simpler to just not include return | |
120 | instructions on this list. */ | |
121 | ||
122 | rtx *loop_number_exit_labels; | |
123 | ||
353127c2 RK |
124 | /* Indexed by loop number, counts the number of LABEL_REFs on |
125 | loop_number_exit_labels for this loop and all loops nested inside it. */ | |
126 | ||
127 | int *loop_number_exit_count; | |
128 | ||
b4ad7b23 RS |
129 | /* Indexed by register number, contains the number of times the reg |
130 | is set during the loop being scanned. | |
131 | During code motion, a negative value indicates a reg that has been | |
132 | made a candidate; in particular -2 means that it is an candidate that | |
c5b7917e | 133 | we know is equal to a constant and -1 means that it is an candidate |
b4ad7b23 RS |
134 | not known equal to a constant. |
135 | After code motion, regs moved have 0 (which is accurate now) | |
136 | while the failed candidates have the original number of times set. | |
137 | ||
138 | Therefore, at all times, == 0 indicates an invariant register; | |
139 | < 0 a conditionally invariant one. */ | |
140 | ||
4b259e3f | 141 | static varray_type set_in_loop; |
b4ad7b23 | 142 | |
4b259e3f | 143 | /* Original value of set_in_loop; same except that this value |
b4ad7b23 RS |
144 | is not set negative for a reg whose sets have been made candidates |
145 | and not set to 0 for a reg that is moved. */ | |
146 | ||
4b259e3f | 147 | static varray_type n_times_set; |
b4ad7b23 RS |
148 | |
149 | /* Index by register number, 1 indicates that the register | |
150 | cannot be moved or strength reduced. */ | |
151 | ||
8deb8e2c | 152 | static varray_type may_not_optimize; |
b4ad7b23 | 153 | |
d6b44532 RH |
154 | /* Contains the insn in which a register was used if it was used |
155 | exactly once; contains const0_rtx if it was used more than once. */ | |
156 | ||
157 | static varray_type reg_single_usage; | |
158 | ||
b4ad7b23 RS |
159 | /* Nonzero means reg N has already been moved out of one loop. |
160 | This reduces the desire to move it out of another. */ | |
161 | ||
162 | static char *moved_once; | |
163 | ||
5026a502 | 164 | /* List of MEMs that are stored in this loop. */ |
b4ad7b23 | 165 | |
5026a502 | 166 | static rtx loop_store_mems; |
b4ad7b23 | 167 | |
2d4fde68 R |
168 | /* The insn where the first of these was found. */ |
169 | static rtx first_loop_store_insn; | |
170 | ||
41a972a9 MM |
171 | typedef struct loop_mem_info { |
172 | rtx mem; /* The MEM itself. */ | |
173 | rtx reg; /* Corresponding pseudo, if any. */ | |
174 | int optimize; /* Nonzero if we can optimize access to this MEM. */ | |
175 | } loop_mem_info; | |
176 | ||
177 | /* Array of MEMs that are used (read or written) in this loop, but | |
178 | cannot be aliased by anything in this loop, except perhaps | |
179 | themselves. In other words, if loop_mems[i] is altered during the | |
180 | loop, it is altered by an expression that is rtx_equal_p to it. */ | |
181 | ||
182 | static loop_mem_info *loop_mems; | |
183 | ||
184 | /* The index of the next available slot in LOOP_MEMS. */ | |
185 | ||
186 | static int loop_mems_idx; | |
187 | ||
188 | /* The number of elements allocated in LOOP_MEMs. */ | |
189 | ||
190 | static int loop_mems_allocated; | |
191 | ||
3c748bb6 MH |
192 | /* Nonzero if we don't know what MEMs were changed in the current |
193 | loop. This happens if the loop contains a call (in which case | |
194 | `loop_info->has_call' will also be set) or if we store into more | |
195 | than NUM_STORES MEMs. */ | |
b4ad7b23 RS |
196 | |
197 | static int unknown_address_altered; | |
198 | ||
199 | /* Count of movable (i.e. invariant) instructions discovered in the loop. */ | |
200 | static int num_movables; | |
201 | ||
202 | /* Count of memory write instructions discovered in the loop. */ | |
203 | static int num_mem_sets; | |
204 | ||
b4ad7b23 RS |
205 | /* Bound on pseudo register number before loop optimization. |
206 | A pseudo has valid regscan info if its number is < max_reg_before_loop. */ | |
207 | int max_reg_before_loop; | |
208 | ||
209 | /* This obstack is used in product_cheap_p to allocate its rtl. It | |
210 | may call gen_reg_rtx which, in turn, may reallocate regno_reg_rtx. | |
211 | If we used the same obstack that it did, we would be deallocating | |
212 | that array. */ | |
213 | ||
214 | static struct obstack temp_obstack; | |
215 | ||
216 | /* This is where the pointer to the obstack being used for RTL is stored. */ | |
217 | ||
218 | extern struct obstack *rtl_obstack; | |
219 | ||
220 | #define obstack_chunk_alloc xmalloc | |
221 | #define obstack_chunk_free free | |
b4ad7b23 RS |
222 | \f |
223 | /* During the analysis of a loop, a chain of `struct movable's | |
224 | is made to record all the movable insns found. | |
225 | Then the entire chain can be scanned to decide which to move. */ | |
226 | ||
227 | struct movable | |
228 | { | |
229 | rtx insn; /* A movable insn */ | |
0f41302f MS |
230 | rtx set_src; /* The expression this reg is set from. */ |
231 | rtx set_dest; /* The destination of this SET. */ | |
b4ad7b23 | 232 | rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST |
0f41302f | 233 | of any registers used within the LIBCALL. */ |
b4ad7b23 RS |
234 | int consec; /* Number of consecutive following insns |
235 | that must be moved with this one. */ | |
236 | int regno; /* The register it sets */ | |
237 | short lifetime; /* lifetime of that register; | |
238 | may be adjusted when matching movables | |
239 | that load the same value are found. */ | |
240 | short savings; /* Number of insns we can move for this reg, | |
241 | including other movables that force this | |
242 | or match this one. */ | |
243 | unsigned int cond : 1; /* 1 if only conditionally movable */ | |
244 | unsigned int force : 1; /* 1 means MUST move this insn */ | |
245 | unsigned int global : 1; /* 1 means reg is live outside this loop */ | |
246 | /* If PARTIAL is 1, GLOBAL means something different: | |
247 | that the reg is live outside the range from where it is set | |
248 | to the following label. */ | |
249 | unsigned int done : 1; /* 1 inhibits further processing of this */ | |
250 | ||
251 | unsigned int partial : 1; /* 1 means this reg is used for zero-extending. | |
252 | In particular, moving it does not make it | |
253 | invariant. */ | |
254 | unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to | |
255 | load SRC, rather than copying INSN. */ | |
1a61c29f JW |
256 | unsigned int move_insn_first:1;/* Same as above, if this is necessary for the |
257 | first insn of a consecutive sets group. */ | |
0f41302f | 258 | unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */ |
b4ad7b23 RS |
259 | enum machine_mode savemode; /* Nonzero means it is a mode for a low part |
260 | that we should avoid changing when clearing | |
261 | the rest of the reg. */ | |
262 | struct movable *match; /* First entry for same value */ | |
263 | struct movable *forces; /* An insn that must be moved if this is */ | |
264 | struct movable *next; | |
265 | }; | |
266 | ||
45f97e2e RH |
267 | static struct movable *the_movables; |
268 | ||
b4ad7b23 RS |
269 | FILE *loop_dump_stream; |
270 | ||
271 | /* Forward declarations. */ | |
272 | ||
3ec2b590 | 273 | static void verify_dominator PROTO((int)); |
56c0e996 BS |
274 | static void find_and_verify_loops PROTO((rtx)); |
275 | static void mark_loop_jump PROTO((rtx, int)); | |
3c748bb6 | 276 | static void prescan_loop PROTO((rtx, rtx, struct loop_info *)); |
56c0e996 BS |
277 | static int reg_in_basic_block_p PROTO((rtx, rtx)); |
278 | static int consec_sets_invariant_p PROTO((rtx, int, rtx)); | |
56c0e996 | 279 | static int labels_in_range_p PROTO((rtx, int)); |
237228c0 BS |
280 | static void count_one_set PROTO((rtx, rtx, varray_type, rtx *)); |
281 | ||
8deb8e2c MM |
282 | static void count_loop_regs_set PROTO((rtx, rtx, varray_type, varray_type, |
283 | int *, int)); | |
693e265f | 284 | static void note_addr_stored PROTO((rtx, rtx)); |
56c0e996 | 285 | static int loop_reg_used_before_p PROTO((rtx, rtx, rtx, rtx, rtx)); |
6dd49eb4 | 286 | static void scan_loop PROTO((rtx, rtx, rtx, int, int)); |
e9a25f70 | 287 | #if 0 |
27fa83c1 | 288 | static void replace_call_address PROTO((rtx, rtx, rtx)); |
e9a25f70 | 289 | #endif |
56c0e996 BS |
290 | static rtx skip_consec_insns PROTO((rtx, int)); |
291 | static int libcall_benefit PROTO((rtx)); | |
292 | static void ignore_some_movables PROTO((struct movable *)); | |
293 | static void force_movables PROTO((struct movable *)); | |
294 | static void combine_movables PROTO((struct movable *, int)); | |
fd4a1ca5 | 295 | static int regs_match_p PROTO((rtx, rtx, struct movable *)); |
56c0e996 | 296 | static int rtx_equal_for_loop_p PROTO((rtx, rtx, struct movable *)); |
e009aaf3 | 297 | static void add_label_notes PROTO((rtx, rtx)); |
56c0e996 | 298 | static void move_movables PROTO((struct movable *, int, int, rtx, rtx, int)); |
e009aaf3 | 299 | static int count_nonfixed_reads PROTO((rtx)); |
3c748bb6 MH |
300 | static void strength_reduce PROTO((rtx, rtx, rtx, int, rtx, rtx, |
301 | struct loop_info *, rtx, int, int)); | |
8deb8e2c | 302 | static void find_single_use_in_loop PROTO((rtx, rtx, varray_type)); |
56c0e996 | 303 | static int valid_initial_value_p PROTO((rtx, rtx, int, rtx)); |
c5c76735 | 304 | static void find_mem_givs PROTO((rtx, rtx, int, int, rtx, rtx)); |
3ec2b590 | 305 | static void record_biv PROTO((struct induction *, rtx, rtx, rtx, rtx, rtx *, int, int)); |
302670f3 MH |
306 | static void check_final_value PROTO((struct induction *, rtx, rtx, |
307 | unsigned HOST_WIDE_INT)); | |
c5c76735 | 308 | static void record_giv PROTO((struct induction *, rtx, rtx, rtx, rtx, rtx, int, enum g_types, int, int, rtx *, rtx, rtx)); |
56c0e996 | 309 | static void update_giv_derive PROTO((rtx)); |
3ec2b590 | 310 | static int basic_induction_var PROTO((rtx, enum machine_mode, rtx, rtx, rtx *, rtx *, rtx **)); |
56c0e996 | 311 | static rtx simplify_giv_expr PROTO((rtx, int *)); |
45f97e2e | 312 | static int general_induction_var PROTO((rtx, rtx *, rtx *, rtx *, int, int *)); |
a07516d3 | 313 | static int consec_sets_giv PROTO((int, rtx, rtx, rtx, rtx *, rtx *, rtx *)); |
5629b16c | 314 | static int check_dbra_loop PROTO((rtx, int, rtx, struct loop_info *)); |
45f97e2e | 315 | static rtx express_from_1 PROTO((rtx, rtx, rtx)); |
45f97e2e | 316 | static rtx combine_givs_p PROTO((struct induction *, struct induction *)); |
56c0e996 | 317 | static void combine_givs PROTO((struct iv_class *)); |
3ec2b590 | 318 | struct recombine_givs_stats; |
96df87b8 | 319 | static int find_life_end PROTO((rtx, struct recombine_givs_stats *, rtx, rtx)); |
53dc05e4 | 320 | static void recombine_givs PROTO((struct iv_class *, rtx, rtx, int)); |
56c0e996 BS |
321 | static int product_cheap_p PROTO((rtx, rtx)); |
322 | static int maybe_eliminate_biv PROTO((struct iv_class *, rtx, rtx, int, int, int)); | |
323 | static int maybe_eliminate_biv_1 PROTO((rtx, rtx, struct iv_class *, int, rtx)); | |
324 | static int last_use_this_basic_block PROTO((rtx, rtx)); | |
325 | static void record_initial PROTO((rtx, rtx)); | |
326 | static void update_reg_last_use PROTO((rtx, rtx)); | |
41a972a9 MM |
327 | static rtx next_insn_in_loop PROTO((rtx, rtx, rtx, rtx)); |
328 | static void load_mems_and_recount_loop_regs_set PROTO((rtx, rtx, rtx, | |
d6b44532 | 329 | rtx, int *)); |
41a972a9 MM |
330 | static void load_mems PROTO((rtx, rtx, rtx, rtx)); |
331 | static int insert_loop_mem PROTO((rtx *, void *)); | |
332 | static int replace_loop_mem PROTO((rtx *, void *)); | |
333 | static int replace_label PROTO((rtx *, void *)); | |
334 | ||
335 | typedef struct rtx_and_int { | |
336 | rtx r; | |
337 | int i; | |
338 | } rtx_and_int; | |
339 | ||
340 | typedef struct rtx_pair { | |
341 | rtx r1; | |
342 | rtx r2; | |
343 | } rtx_pair; | |
344 | ||
345 | /* Nonzero iff INSN is between START and END, inclusive. */ | |
346 | #define INSN_IN_RANGE_P(INSN, START, END) \ | |
347 | (INSN_UID (INSN) < max_uid_for_loop \ | |
348 | && INSN_LUID (INSN) >= INSN_LUID (START) \ | |
349 | && INSN_LUID (INSN) <= INSN_LUID (END)) | |
8c660648 | 350 | |
51723711 | 351 | #ifdef HAVE_decrement_and_branch_on_count |
cac8ce95 | 352 | /* Test whether BCT applicable and safe. */ |
302670f3 | 353 | static void insert_bct PROTO((rtx, rtx, struct loop_info *)); |
8c660648 | 354 | |
cac8ce95 | 355 | /* Auxiliary function that inserts the BCT pattern into the loop. */ |
56c0e996 | 356 | static void instrument_loop_bct PROTO((rtx, rtx, rtx)); |
51723711 | 357 | #endif /* HAVE_decrement_and_branch_on_count */ |
8c660648 | 358 | |
2a1777af JL |
359 | /* Indirect_jump_in_function is computed once per function. */ |
360 | int indirect_jump_in_function = 0; | |
56c0e996 | 361 | static int indirect_jump_in_function_p PROTO((rtx)); |
2a1777af | 362 | |
a6207a2b R |
363 | static int compute_luids PROTO((rtx, rtx, int)); |
364 | ||
a6207a2b R |
365 | static int biv_elimination_giv_has_0_offset PROTO((struct induction *, |
366 | struct induction *, rtx)); | |
b4ad7b23 RS |
367 | \f |
368 | /* Relative gain of eliminating various kinds of operations. */ | |
45f97e2e | 369 | static int add_cost; |
b4ad7b23 | 370 | #if 0 |
45f97e2e RH |
371 | static int shift_cost; |
372 | static int mult_cost; | |
b4ad7b23 RS |
373 | #endif |
374 | ||
375 | /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to | |
376 | copy the value of the strength reduced giv to its original register. */ | |
45f97e2e RH |
377 | static int copy_cost; |
378 | ||
379 | /* Cost of using a register, to normalize the benefits of a giv. */ | |
380 | static int reg_address_cost; | |
381 | ||
b4ad7b23 RS |
382 | |
383 | void | |
384 | init_loop () | |
385 | { | |
386 | char *free_point = (char *) oballoc (1); | |
38a448ca | 387 | rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1); |
b4ad7b23 | 388 | |
38a448ca | 389 | add_cost = rtx_cost (gen_rtx_PLUS (word_mode, reg, reg), SET); |
b4ad7b23 | 390 | |
45f97e2e RH |
391 | #ifdef ADDRESS_COST |
392 | reg_address_cost = ADDRESS_COST (reg); | |
393 | #else | |
394 | reg_address_cost = rtx_cost (reg, MEM); | |
395 | #endif | |
396 | ||
b4ad7b23 RS |
397 | /* We multiply by 2 to reconcile the difference in scale between |
398 | these two ways of computing costs. Otherwise the cost of a copy | |
399 | will be far less than the cost of an add. */ | |
5fd8383e | 400 | |
b4ad7b23 | 401 | copy_cost = 2 * 2; |
b4ad7b23 RS |
402 | |
403 | /* Free the objects we just allocated. */ | |
404 | obfree (free_point); | |
405 | ||
406 | /* Initialize the obstack used for rtl in product_cheap_p. */ | |
407 | gcc_obstack_init (&temp_obstack); | |
408 | } | |
409 | \f | |
3ec2b590 R |
410 | /* Compute the mapping from uids to luids. |
411 | LUIDs are numbers assigned to insns, like uids, | |
412 | except that luids increase monotonically through the code. | |
413 | Start at insn START and stop just before END. Assign LUIDs | |
414 | starting with PREV_LUID + 1. Return the last assigned LUID + 1. */ | |
415 | static int | |
416 | compute_luids (start, end, prev_luid) | |
417 | rtx start, end; | |
418 | int prev_luid; | |
419 | { | |
420 | int i; | |
421 | rtx insn; | |
422 | ||
423 | for (insn = start, i = prev_luid; insn != end; insn = NEXT_INSN (insn)) | |
424 | { | |
425 | if (INSN_UID (insn) >= max_uid_for_loop) | |
426 | continue; | |
427 | /* Don't assign luids to line-number NOTEs, so that the distance in | |
428 | luids between two insns is not affected by -g. */ | |
429 | if (GET_CODE (insn) != NOTE | |
430 | || NOTE_LINE_NUMBER (insn) <= 0) | |
431 | uid_luid[INSN_UID (insn)] = ++i; | |
432 | else | |
433 | /* Give a line number note the same luid as preceding insn. */ | |
434 | uid_luid[INSN_UID (insn)] = i; | |
435 | } | |
436 | return i + 1; | |
437 | } | |
438 | \f | |
b4ad7b23 RS |
439 | /* Entry point of this file. Perform loop optimization |
440 | on the current function. F is the first insn of the function | |
441 | and DUMPFILE is a stream for output of a trace of actions taken | |
442 | (or 0 if none should be output). */ | |
443 | ||
444 | void | |
5accd822 | 445 | loop_optimize (f, dumpfile, unroll_p, bct_p) |
b4ad7b23 RS |
446 | /* f is the first instruction of a chain of insns for one function */ |
447 | rtx f; | |
448 | FILE *dumpfile; | |
5accd822 | 449 | int unroll_p, bct_p; |
b4ad7b23 RS |
450 | { |
451 | register rtx insn; | |
452 | register int i; | |
b4ad7b23 RS |
453 | |
454 | loop_dump_stream = dumpfile; | |
455 | ||
456 | init_recog_no_volatile (); | |
b4ad7b23 RS |
457 | |
458 | max_reg_before_loop = max_reg_num (); | |
459 | ||
460 | moved_once = (char *) alloca (max_reg_before_loop); | |
461 | bzero (moved_once, max_reg_before_loop); | |
462 | ||
463 | regs_may_share = 0; | |
464 | ||
0f41302f | 465 | /* Count the number of loops. */ |
b4ad7b23 RS |
466 | |
467 | max_loop_num = 0; | |
468 | for (insn = f; insn; insn = NEXT_INSN (insn)) | |
469 | { | |
470 | if (GET_CODE (insn) == NOTE | |
471 | && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG) | |
472 | max_loop_num++; | |
473 | } | |
474 | ||
475 | /* Don't waste time if no loops. */ | |
476 | if (max_loop_num == 0) | |
477 | return; | |
478 | ||
479 | /* Get size to use for tables indexed by uids. | |
480 | Leave some space for labels allocated by find_and_verify_loops. */ | |
1c01e9df | 481 | max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32; |
b4ad7b23 RS |
482 | |
483 | uid_luid = (int *) alloca (max_uid_for_loop * sizeof (int)); | |
484 | uid_loop_num = (int *) alloca (max_uid_for_loop * sizeof (int)); | |
485 | ||
4c9a05bc RK |
486 | bzero ((char *) uid_luid, max_uid_for_loop * sizeof (int)); |
487 | bzero ((char *) uid_loop_num, max_uid_for_loop * sizeof (int)); | |
b4ad7b23 RS |
488 | |
489 | /* Allocate tables for recording each loop. We set each entry, so they need | |
490 | not be zeroed. */ | |
491 | loop_number_loop_starts = (rtx *) alloca (max_loop_num * sizeof (rtx)); | |
492 | loop_number_loop_ends = (rtx *) alloca (max_loop_num * sizeof (rtx)); | |
3ec2b590 R |
493 | loop_number_loop_cont = (rtx *) alloca (max_loop_num * sizeof (rtx)); |
494 | loop_number_cont_dominator = (rtx *) alloca (max_loop_num * sizeof (rtx)); | |
b4ad7b23 RS |
495 | loop_outer_loop = (int *) alloca (max_loop_num * sizeof (int)); |
496 | loop_invalid = (char *) alloca (max_loop_num * sizeof (char)); | |
497 | loop_number_exit_labels = (rtx *) alloca (max_loop_num * sizeof (rtx)); | |
353127c2 | 498 | loop_number_exit_count = (int *) alloca (max_loop_num * sizeof (int)); |
b4ad7b23 | 499 | |
cac8ce95 | 500 | #ifdef HAVE_decrement_and_branch_on_count |
8c660648 | 501 | /* Allocate for BCT optimization */ |
8c660648 JL |
502 | loop_used_count_register = (int *) alloca (max_loop_num * sizeof (int)); |
503 | bzero ((char *) loop_used_count_register, max_loop_num * sizeof (int)); | |
cac8ce95 | 504 | #endif /* HAVE_decrement_and_branch_on_count */ |
8c660648 | 505 | |
b4ad7b23 RS |
506 | /* Find and process each loop. |
507 | First, find them, and record them in order of their beginnings. */ | |
508 | find_and_verify_loops (f); | |
509 | ||
510 | /* Now find all register lifetimes. This must be done after | |
511 | find_and_verify_loops, because it might reorder the insns in the | |
512 | function. */ | |
513 | reg_scan (f, max_reg_num (), 1); | |
514 | ||
7506f491 DE |
515 | /* This must occur after reg_scan so that registers created by gcse |
516 | will have entries in the register tables. | |
517 | ||
518 | We could have added a call to reg_scan after gcse_main in toplev.c, | |
519 | but moving this call to init_alias_analysis is more efficient. */ | |
520 | init_alias_analysis (); | |
521 | ||
e318cec0 R |
522 | /* See if we went too far. Note that get_max_uid already returns |
523 | one more that the maximum uid of all insn. */ | |
1c01e9df TW |
524 | if (get_max_uid () > max_uid_for_loop) |
525 | abort (); | |
f5963e61 | 526 | /* Now reset it to the actual size we need. See above. */ |
e318cec0 | 527 | max_uid_for_loop = get_max_uid (); |
1c01e9df | 528 | |
3ec2b590 R |
529 | /* find_and_verify_loops has already called compute_luids, but it might |
530 | have rearranged code afterwards, so we need to recompute the luids now. */ | |
531 | max_luid = compute_luids (f, NULL_RTX, 0); | |
b4ad7b23 RS |
532 | |
533 | /* Don't leave gaps in uid_luid for insns that have been | |
534 | deleted. It is possible that the first or last insn | |
535 | using some register has been deleted by cross-jumping. | |
536 | Make sure that uid_luid for that former insn's uid | |
537 | points to the general area where that insn used to be. */ | |
538 | for (i = 0; i < max_uid_for_loop; i++) | |
539 | { | |
540 | uid_luid[0] = uid_luid[i]; | |
541 | if (uid_luid[0] != 0) | |
542 | break; | |
543 | } | |
544 | for (i = 0; i < max_uid_for_loop; i++) | |
545 | if (uid_luid[i] == 0) | |
546 | uid_luid[i] = uid_luid[i - 1]; | |
547 | ||
548 | /* Create a mapping from loops to BLOCK tree nodes. */ | |
81797aba | 549 | if (unroll_p && write_symbols != NO_DEBUG) |
07e857c2 | 550 | find_loop_tree_blocks (); |
b4ad7b23 | 551 | |
2a1777af JL |
552 | /* Determine if the function has indirect jump. On some systems |
553 | this prevents low overhead loop instructions from being used. */ | |
8c660648 | 554 | indirect_jump_in_function = indirect_jump_in_function_p (f); |
8c660648 | 555 | |
b4ad7b23 RS |
556 | /* Now scan the loops, last ones first, since this means inner ones are done |
557 | before outer ones. */ | |
558 | for (i = max_loop_num-1; i >= 0; i--) | |
559 | if (! loop_invalid[i] && loop_number_loop_ends[i]) | |
560 | scan_loop (loop_number_loop_starts[i], loop_number_loop_ends[i], | |
6dd49eb4 | 561 | loop_number_loop_cont[i], unroll_p, bct_p); |
07e857c2 JW |
562 | |
563 | /* If debugging and unrolling loops, we must replicate the tree nodes | |
564 | corresponding to the blocks inside the loop, so that the original one | |
565 | to one mapping will remain. */ | |
81797aba | 566 | if (unroll_p && write_symbols != NO_DEBUG) |
07e857c2 | 567 | unroll_block_trees (); |
45f97e2e RH |
568 | |
569 | end_alias_analysis (); | |
b4ad7b23 RS |
570 | } |
571 | \f | |
41a972a9 MM |
572 | /* Returns the next insn, in execution order, after INSN. START and |
573 | END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop, | |
574 | respectively. LOOP_TOP, if non-NULL, is the top of the loop in the | |
575 | insn-stream; it is used with loops that are entered near the | |
576 | bottom. */ | |
577 | ||
578 | static rtx | |
579 | next_insn_in_loop (insn, start, end, loop_top) | |
580 | rtx insn; | |
581 | rtx start; | |
582 | rtx end; | |
583 | rtx loop_top; | |
584 | { | |
585 | insn = NEXT_INSN (insn); | |
586 | ||
587 | if (insn == end) | |
588 | { | |
589 | if (loop_top) | |
590 | /* Go to the top of the loop, and continue there. */ | |
591 | insn = loop_top; | |
592 | else | |
593 | /* We're done. */ | |
594 | insn = NULL_RTX; | |
595 | } | |
596 | ||
597 | if (insn == start) | |
598 | /* We're done. */ | |
599 | insn = NULL_RTX; | |
600 | ||
601 | return insn; | |
602 | } | |
603 | ||
b4ad7b23 RS |
604 | /* Optimize one loop whose start is LOOP_START and end is END. |
605 | LOOP_START is the NOTE_INSN_LOOP_BEG and END is the matching | |
6dd49eb4 R |
606 | NOTE_INSN_LOOP_END. |
607 | LOOP_CONT is the NOTE_INSN_LOOP_CONT. */ | |
b4ad7b23 RS |
608 | |
609 | /* ??? Could also move memory writes out of loops if the destination address | |
610 | is invariant, the source is invariant, the memory write is not volatile, | |
611 | and if we can prove that no read inside the loop can read this address | |
612 | before the write occurs. If there is a read of this address after the | |
613 | write, then we can also mark the memory read as invariant. */ | |
614 | ||
615 | static void | |
6dd49eb4 R |
616 | scan_loop (loop_start, end, loop_cont, unroll_p, bct_p) |
617 | rtx loop_start, end, loop_cont; | |
5accd822 | 618 | int unroll_p, bct_p; |
b4ad7b23 RS |
619 | { |
620 | register int i; | |
41a972a9 | 621 | rtx p; |
b4ad7b23 RS |
622 | /* 1 if we are scanning insns that could be executed zero times. */ |
623 | int maybe_never = 0; | |
624 | /* 1 if we are scanning insns that might never be executed | |
625 | due to a subroutine call which might exit before they are reached. */ | |
626 | int call_passed = 0; | |
627 | /* For a rotated loop that is entered near the bottom, | |
628 | this is the label at the top. Otherwise it is zero. */ | |
629 | rtx loop_top = 0; | |
630 | /* Jump insn that enters the loop, or 0 if control drops in. */ | |
631 | rtx loop_entry_jump = 0; | |
632 | /* Place in the loop where control enters. */ | |
633 | rtx scan_start; | |
634 | /* Number of insns in the loop. */ | |
635 | int insn_count; | |
636 | int in_libcall = 0; | |
637 | int tem; | |
638 | rtx temp; | |
639 | /* The SET from an insn, if it is the only SET in the insn. */ | |
640 | rtx set, set1; | |
641 | /* Chain describing insns movable in current loop. */ | |
642 | struct movable *movables = 0; | |
643 | /* Last element in `movables' -- so we can add elements at the end. */ | |
644 | struct movable *last_movable = 0; | |
645 | /* Ratio of extra register life span we can justify | |
646 | for saving an instruction. More if loop doesn't call subroutines | |
647 | since in that case saving an insn makes more difference | |
648 | and more registers are available. */ | |
649 | int threshold; | |
5ea7a4ae JW |
650 | /* Nonzero if we are scanning instructions in a sub-loop. */ |
651 | int loop_depth = 0; | |
41a972a9 | 652 | int nregs; |
3c748bb6 | 653 | struct loop_info *loop_info = &this_loop_info; |
b4ad7b23 RS |
654 | |
655 | /* Determine whether this loop starts with a jump down to a test at | |
656 | the end. This will occur for a small number of loops with a test | |
657 | that is too complex to duplicate in front of the loop. | |
658 | ||
659 | We search for the first insn or label in the loop, skipping NOTEs. | |
660 | However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG | |
661 | (because we might have a loop executed only once that contains a | |
662 | loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END | |
663 | (in case we have a degenerate loop). | |
664 | ||
665 | Note that if we mistakenly think that a loop is entered at the top | |
666 | when, in fact, it is entered at the exit test, the only effect will be | |
667 | slightly poorer optimization. Making the opposite error can generate | |
668 | incorrect code. Since very few loops now start with a jump to the | |
669 | exit test, the code here to detect that case is very conservative. */ | |
670 | ||
671 | for (p = NEXT_INSN (loop_start); | |
672 | p != end | |
673 | && GET_CODE (p) != CODE_LABEL && GET_RTX_CLASS (GET_CODE (p)) != 'i' | |
674 | && (GET_CODE (p) != NOTE | |
675 | || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG | |
676 | && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END)); | |
677 | p = NEXT_INSN (p)) | |
678 | ; | |
679 | ||
680 | scan_start = p; | |
681 | ||
682 | /* Set up variables describing this loop. */ | |
3c748bb6 MH |
683 | prescan_loop (loop_start, end, loop_info); |
684 | threshold = (loop_info->has_call ? 1 : 2) * (1 + n_non_fixed_regs); | |
b4ad7b23 RS |
685 | |
686 | /* If loop has a jump before the first label, | |
687 | the true entry is the target of that jump. | |
688 | Start scan from there. | |
689 | But record in LOOP_TOP the place where the end-test jumps | |
690 | back to so we can scan that after the end of the loop. */ | |
691 | if (GET_CODE (p) == JUMP_INSN) | |
692 | { | |
693 | loop_entry_jump = p; | |
694 | ||
695 | /* Loop entry must be unconditional jump (and not a RETURN) */ | |
696 | if (simplejump_p (p) | |
697 | && JUMP_LABEL (p) != 0 | |
698 | /* Check to see whether the jump actually | |
699 | jumps out of the loop (meaning it's no loop). | |
700 | This case can happen for things like | |
701 | do {..} while (0). If this label was generated previously | |
702 | by loop, we can't tell anything about it and have to reject | |
703 | the loop. */ | |
41a972a9 | 704 | && INSN_IN_RANGE_P (JUMP_LABEL (p), loop_start, end)) |
b4ad7b23 RS |
705 | { |
706 | loop_top = next_label (scan_start); | |
707 | scan_start = JUMP_LABEL (p); | |
708 | } | |
709 | } | |
710 | ||
711 | /* If SCAN_START was an insn created by loop, we don't know its luid | |
712 | as required by loop_reg_used_before_p. So skip such loops. (This | |
713 | test may never be true, but it's best to play it safe.) | |
714 | ||
715 | Also, skip loops where we do not start scanning at a label. This | |
716 | test also rejects loops starting with a JUMP_INSN that failed the | |
717 | test above. */ | |
718 | ||
719 | if (INSN_UID (scan_start) >= max_uid_for_loop | |
720 | || GET_CODE (scan_start) != CODE_LABEL) | |
721 | { | |
722 | if (loop_dump_stream) | |
723 | fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n", | |
724 | INSN_UID (loop_start), INSN_UID (end)); | |
725 | return; | |
726 | } | |
727 | ||
728 | /* Count number of times each reg is set during this loop. | |
8deb8e2c | 729 | Set VARRAY_CHAR (may_not_optimize, I) if it is not safe to move out |
d6b44532 | 730 | the setting of register I. Set VARRAY_RTX (reg_single_usage, I). */ |
41a972a9 MM |
731 | |
732 | /* Allocate extra space for REGS that might be created by | |
8deb8e2c MM |
733 | load_mems. We allocate a little extra slop as well, in the hopes |
734 | that even after the moving of movables creates some new registers | |
735 | we won't have to reallocate these arrays. However, we do grow | |
736 | the arrays, if necessary, in load_mems_recount_loop_regs_set. */ | |
737 | nregs = max_reg_num () + loop_mems_idx + 16; | |
4b259e3f | 738 | VARRAY_INT_INIT (set_in_loop, nregs, "set_in_loop"); |
8deb8e2c | 739 | VARRAY_INT_INIT (n_times_set, nregs, "n_times_set"); |
8deb8e2c | 740 | VARRAY_CHAR_INIT (may_not_optimize, nregs, "may_not_optimize"); |
d6b44532 | 741 | VARRAY_RTX_INIT (reg_single_usage, nregs, "reg_single_usage"); |
b4ad7b23 RS |
742 | |
743 | count_loop_regs_set (loop_top ? loop_top : loop_start, end, | |
744 | may_not_optimize, reg_single_usage, &insn_count, nregs); | |
745 | ||
746 | for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) | |
8deb8e2c MM |
747 | { |
748 | VARRAY_CHAR (may_not_optimize, i) = 1; | |
4b259e3f | 749 | VARRAY_INT (set_in_loop, i) = 1; |
8deb8e2c | 750 | } |
ef9e3c5b JL |
751 | |
752 | #ifdef AVOID_CCMODE_COPIES | |
753 | /* Don't try to move insns which set CC registers if we should not | |
754 | create CCmode register copies. */ | |
3568fdd2 | 755 | for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--) |
ef9e3c5b | 756 | if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC) |
8deb8e2c | 757 | VARRAY_CHAR (may_not_optimize, i) = 1; |
ef9e3c5b JL |
758 | #endif |
759 | ||
4b259e3f R |
760 | bcopy ((char *) &set_in_loop->data, |
761 | (char *) &n_times_set->data, nregs * sizeof (int)); | |
b4ad7b23 RS |
762 | |
763 | if (loop_dump_stream) | |
764 | { | |
765 | fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n", | |
766 | INSN_UID (loop_start), INSN_UID (end), insn_count); | |
3c748bb6 | 767 | if (loop_info->cont) |
b4ad7b23 | 768 | fprintf (loop_dump_stream, "Continue at insn %d.\n", |
3c748bb6 | 769 | INSN_UID (loop_info->cont)); |
b4ad7b23 RS |
770 | } |
771 | ||
772 | /* Scan through the loop finding insns that are safe to move. | |
4b259e3f | 773 | Set set_in_loop negative for the reg being set, so that |
b4ad7b23 RS |
774 | this reg will be considered invariant for subsequent insns. |
775 | We consider whether subsequent insns use the reg | |
776 | in deciding whether it is worth actually moving. | |
777 | ||
778 | MAYBE_NEVER is nonzero if we have passed a conditional jump insn | |
779 | and therefore it is possible that the insns we are scanning | |
780 | would never be executed. At such times, we must make sure | |
781 | that it is safe to execute the insn once instead of zero times. | |
782 | When MAYBE_NEVER is 0, all insns will be executed at least once | |
783 | so that is not a problem. */ | |
784 | ||
41a972a9 MM |
785 | for (p = next_insn_in_loop (scan_start, scan_start, end, loop_top); |
786 | p != NULL_RTX; | |
787 | p = next_insn_in_loop (p, scan_start, end, loop_top)) | |
b4ad7b23 | 788 | { |
b4ad7b23 | 789 | if (GET_RTX_CLASS (GET_CODE (p)) == 'i' |
5fd8383e | 790 | && find_reg_note (p, REG_LIBCALL, NULL_RTX)) |
b4ad7b23 RS |
791 | in_libcall = 1; |
792 | else if (GET_RTX_CLASS (GET_CODE (p)) == 'i' | |
5fd8383e | 793 | && find_reg_note (p, REG_RETVAL, NULL_RTX)) |
b4ad7b23 RS |
794 | in_libcall = 0; |
795 | ||
796 | if (GET_CODE (p) == INSN | |
797 | && (set = single_set (p)) | |
798 | && GET_CODE (SET_DEST (set)) == REG | |
8deb8e2c | 799 | && ! VARRAY_CHAR (may_not_optimize, REGNO (SET_DEST (set)))) |
b4ad7b23 RS |
800 | { |
801 | int tem1 = 0; | |
802 | int tem2 = 0; | |
803 | int move_insn = 0; | |
804 | rtx src = SET_SRC (set); | |
805 | rtx dependencies = 0; | |
806 | ||
807 | /* Figure out what to use as a source of this insn. If a REG_EQUIV | |
808 | note is given or if a REG_EQUAL note with a constant operand is | |
809 | specified, use it as the source and mark that we should move | |
810 | this insn by calling emit_move_insn rather that duplicating the | |
811 | insn. | |
812 | ||
813 | Otherwise, only use the REG_EQUAL contents if a REG_RETVAL note | |
814 | is present. */ | |
5fd8383e | 815 | temp = find_reg_note (p, REG_EQUIV, NULL_RTX); |
b4ad7b23 RS |
816 | if (temp) |
817 | src = XEXP (temp, 0), move_insn = 1; | |
818 | else | |
819 | { | |
5fd8383e | 820 | temp = find_reg_note (p, REG_EQUAL, NULL_RTX); |
b4ad7b23 RS |
821 | if (temp && CONSTANT_P (XEXP (temp, 0))) |
822 | src = XEXP (temp, 0), move_insn = 1; | |
5fd8383e | 823 | if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX)) |
b4ad7b23 RS |
824 | { |
825 | src = XEXP (temp, 0); | |
826 | /* A libcall block can use regs that don't appear in | |
827 | the equivalent expression. To move the libcall, | |
828 | we must move those regs too. */ | |
829 | dependencies = libcall_other_reg (p, src); | |
830 | } | |
831 | } | |
832 | ||
833 | /* Don't try to optimize a register that was made | |
834 | by loop-optimization for an inner loop. | |
835 | We don't know its life-span, so we can't compute the benefit. */ | |
836 | if (REGNO (SET_DEST (set)) >= max_reg_before_loop) | |
837 | ; | |
77854601 | 838 | else if (/* The register is used in basic blocks other |
95ca22f4 MM |
839 | than the one where it is set (meaning that |
840 | something after this point in the loop might | |
841 | depend on its value before the set). */ | |
77854601 MH |
842 | ! reg_in_basic_block_p (p, SET_DEST (set)) |
843 | /* And the set is not guaranteed to be executed one | |
844 | the loop starts, or the value before the set is | |
845 | needed before the set occurs... | |
846 | ||
847 | ??? Note we have quadratic behaviour here, mitigated | |
848 | by the fact that the previous test will often fail for | |
849 | large loops. Rather than re-scanning the entire loop | |
850 | each time for register usage, we should build tables | |
851 | of the register usage and use them here instead. */ | |
852 | && (maybe_never | |
853 | || loop_reg_used_before_p (set, p, loop_start, | |
854 | scan_start, end))) | |
e1f7435e JL |
855 | /* It is unsafe to move the set. |
856 | ||
857 | This code used to consider it OK to move a set of a variable | |
858 | which was not created by the user and not used in an exit test. | |
859 | That behavior is incorrect and was removed. */ | |
b4ad7b23 RS |
860 | ; |
861 | else if ((tem = invariant_p (src)) | |
862 | && (dependencies == 0 | |
863 | || (tem2 = invariant_p (dependencies)) != 0) | |
4b259e3f | 864 | && (VARRAY_INT (set_in_loop, |
8deb8e2c | 865 | REGNO (SET_DEST (set))) == 1 |
b4ad7b23 | 866 | || (tem1 |
8deb8e2c MM |
867 | = consec_sets_invariant_p |
868 | (SET_DEST (set), | |
4b259e3f | 869 | VARRAY_INT (set_in_loop, REGNO (SET_DEST (set))), |
8deb8e2c | 870 | p))) |
b4ad7b23 RS |
871 | /* If the insn can cause a trap (such as divide by zero), |
872 | can't move it unless it's guaranteed to be executed | |
873 | once loop is entered. Even a function call might | |
874 | prevent the trap insn from being reached | |
875 | (since it might exit!) */ | |
876 | && ! ((maybe_never || call_passed) | |
877 | && may_trap_p (src))) | |
878 | { | |
879 | register struct movable *m; | |
880 | register int regno = REGNO (SET_DEST (set)); | |
881 | ||
882 | /* A potential lossage is where we have a case where two insns | |
883 | can be combined as long as they are both in the loop, but | |
884 | we move one of them outside the loop. For large loops, | |
885 | this can lose. The most common case of this is the address | |
886 | of a function being called. | |
887 | ||
888 | Therefore, if this register is marked as being used exactly | |
889 | once if we are in a loop with calls (a "large loop"), see if | |
890 | we can replace the usage of this register with the source | |
891 | of this SET. If we can, delete this insn. | |
892 | ||
893 | Don't do this if P has a REG_RETVAL note or if we have | |
894 | SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */ | |
895 | ||
3c748bb6 | 896 | if (loop_info->has_call |
d6b44532 | 897 | && VARRAY_RTX (reg_single_usage, regno) != 0 |
8deb8e2c | 898 | && VARRAY_RTX (reg_single_usage, regno) != const0_rtx |
b1f21e0a MM |
899 | && REGNO_FIRST_UID (regno) == INSN_UID (p) |
900 | && (REGNO_LAST_UID (regno) | |
8deb8e2c | 901 | == INSN_UID (VARRAY_RTX (reg_single_usage, regno))) |
4b259e3f | 902 | && VARRAY_INT (set_in_loop, regno) == 1 |
b4ad7b23 | 903 | && ! side_effects_p (SET_SRC (set)) |
5fd8383e | 904 | && ! find_reg_note (p, REG_RETVAL, NULL_RTX) |
e9a25f70 JL |
905 | && (! SMALL_REGISTER_CLASSES |
906 | || (! (GET_CODE (SET_SRC (set)) == REG | |
907 | && REGNO (SET_SRC (set)) < FIRST_PSEUDO_REGISTER))) | |
b4ad7b23 RS |
908 | /* This test is not redundant; SET_SRC (set) might be |
909 | a call-clobbered register and the life of REGNO | |
910 | might span a call. */ | |
911 | && ! modified_between_p (SET_SRC (set), p, | |
8deb8e2c MM |
912 | VARRAY_RTX |
913 | (reg_single_usage, regno)) | |
914 | && no_labels_between_p (p, VARRAY_RTX (reg_single_usage, regno)) | |
b4ad7b23 | 915 | && validate_replace_rtx (SET_DEST (set), SET_SRC (set), |
8deb8e2c MM |
916 | VARRAY_RTX |
917 | (reg_single_usage, regno))) | |
b4ad7b23 | 918 | { |
5eeedd4d JW |
919 | /* Replace any usage in a REG_EQUAL note. Must copy the |
920 | new source, so that we don't get rtx sharing between the | |
921 | SET_SOURCE and REG_NOTES of insn p. */ | |
8deb8e2c MM |
922 | REG_NOTES (VARRAY_RTX (reg_single_usage, regno)) |
923 | = replace_rtx (REG_NOTES (VARRAY_RTX | |
924 | (reg_single_usage, regno)), | |
5eeedd4d | 925 | SET_DEST (set), copy_rtx (SET_SRC (set))); |
b4ad7b23 RS |
926 | |
927 | PUT_CODE (p, NOTE); | |
928 | NOTE_LINE_NUMBER (p) = NOTE_INSN_DELETED; | |
929 | NOTE_SOURCE_FILE (p) = 0; | |
4b259e3f | 930 | VARRAY_INT (set_in_loop, regno) = 0; |
b4ad7b23 RS |
931 | continue; |
932 | } | |
933 | ||
934 | m = (struct movable *) alloca (sizeof (struct movable)); | |
935 | m->next = 0; | |
936 | m->insn = p; | |
937 | m->set_src = src; | |
938 | m->dependencies = dependencies; | |
939 | m->set_dest = SET_DEST (set); | |
940 | m->force = 0; | |
4b259e3f | 941 | m->consec = VARRAY_INT (set_in_loop, |
8deb8e2c | 942 | REGNO (SET_DEST (set))) - 1; |
b4ad7b23 RS |
943 | m->done = 0; |
944 | m->forces = 0; | |
945 | m->partial = 0; | |
946 | m->move_insn = move_insn; | |
1a61c29f | 947 | m->move_insn_first = 0; |
5fd8383e | 948 | m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0); |
b4ad7b23 RS |
949 | m->savemode = VOIDmode; |
950 | m->regno = regno; | |
951 | /* Set M->cond if either invariant_p or consec_sets_invariant_p | |
952 | returned 2 (only conditionally invariant). */ | |
953 | m->cond = ((tem | tem1 | tem2) > 1); | |
b1f21e0a MM |
954 | m->global = (uid_luid[REGNO_LAST_UID (regno)] > INSN_LUID (end) |
955 | || uid_luid[REGNO_FIRST_UID (regno)] < INSN_LUID (loop_start)); | |
b4ad7b23 | 956 | m->match = 0; |
b1f21e0a MM |
957 | m->lifetime = (uid_luid[REGNO_LAST_UID (regno)] |
958 | - uid_luid[REGNO_FIRST_UID (regno)]); | |
4b259e3f | 959 | m->savings = VARRAY_INT (n_times_set, regno); |
5fd8383e | 960 | if (find_reg_note (p, REG_RETVAL, NULL_RTX)) |
b4ad7b23 | 961 | m->savings += libcall_benefit (p); |
4b259e3f | 962 | VARRAY_INT (set_in_loop, regno) = move_insn ? -2 : -1; |
b4ad7b23 RS |
963 | /* Add M to the end of the chain MOVABLES. */ |
964 | if (movables == 0) | |
965 | movables = m; | |
966 | else | |
967 | last_movable->next = m; | |
968 | last_movable = m; | |
969 | ||
970 | if (m->consec > 0) | |
971 | { | |
1a61c29f JW |
972 | /* It is possible for the first instruction to have a |
973 | REG_EQUAL note but a non-invariant SET_SRC, so we must | |
974 | remember the status of the first instruction in case | |
975 | the last instruction doesn't have a REG_EQUAL note. */ | |
976 | m->move_insn_first = m->move_insn; | |
977 | ||
b4ad7b23 | 978 | /* Skip this insn, not checking REG_LIBCALL notes. */ |
202a34fd | 979 | p = next_nonnote_insn (p); |
b4ad7b23 RS |
980 | /* Skip the consecutive insns, if there are any. */ |
981 | p = skip_consec_insns (p, m->consec); | |
982 | /* Back up to the last insn of the consecutive group. */ | |
983 | p = prev_nonnote_insn (p); | |
984 | ||
985 | /* We must now reset m->move_insn, m->is_equiv, and possibly | |
986 | m->set_src to correspond to the effects of all the | |
987 | insns. */ | |
5fd8383e | 988 | temp = find_reg_note (p, REG_EQUIV, NULL_RTX); |
b4ad7b23 RS |
989 | if (temp) |
990 | m->set_src = XEXP (temp, 0), m->move_insn = 1; | |
991 | else | |
992 | { | |
5fd8383e | 993 | temp = find_reg_note (p, REG_EQUAL, NULL_RTX); |
b4ad7b23 RS |
994 | if (temp && CONSTANT_P (XEXP (temp, 0))) |
995 | m->set_src = XEXP (temp, 0), m->move_insn = 1; | |
996 | else | |
997 | m->move_insn = 0; | |
998 | ||
999 | } | |
5fd8383e | 1000 | m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0); |
b4ad7b23 RS |
1001 | } |
1002 | } | |
1003 | /* If this register is always set within a STRICT_LOW_PART | |
1004 | or set to zero, then its high bytes are constant. | |
1005 | So clear them outside the loop and within the loop | |
1006 | just load the low bytes. | |
1007 | We must check that the machine has an instruction to do so. | |
1008 | Also, if the value loaded into the register | |
1009 | depends on the same register, this cannot be done. */ | |
1010 | else if (SET_SRC (set) == const0_rtx | |
1011 | && GET_CODE (NEXT_INSN (p)) == INSN | |
1012 | && (set1 = single_set (NEXT_INSN (p))) | |
1013 | && GET_CODE (set1) == SET | |
1014 | && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART) | |
1015 | && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG) | |
1016 | && (SUBREG_REG (XEXP (SET_DEST (set1), 0)) | |
1017 | == SET_DEST (set)) | |
1018 | && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1))) | |
1019 | { | |
1020 | register int regno = REGNO (SET_DEST (set)); | |
4b259e3f | 1021 | if (VARRAY_INT (set_in_loop, regno) == 2) |
b4ad7b23 RS |
1022 | { |
1023 | register struct movable *m; | |
1024 | m = (struct movable *) alloca (sizeof (struct movable)); | |
1025 | m->next = 0; | |
1026 | m->insn = p; | |
1027 | m->set_dest = SET_DEST (set); | |
1028 | m->dependencies = 0; | |
1029 | m->force = 0; | |
1030 | m->consec = 0; | |
1031 | m->done = 0; | |
1032 | m->forces = 0; | |
1033 | m->move_insn = 0; | |
8cf619da | 1034 | m->move_insn_first = 0; |
b4ad7b23 RS |
1035 | m->partial = 1; |
1036 | /* If the insn may not be executed on some cycles, | |
1037 | we can't clear the whole reg; clear just high part. | |
1038 | Not even if the reg is used only within this loop. | |
1039 | Consider this: | |
1040 | while (1) | |
1041 | while (s != t) { | |
1042 | if (foo ()) x = *s; | |
1043 | use (x); | |
1044 | } | |
1045 | Clearing x before the inner loop could clobber a value | |
1046 | being saved from the last time around the outer loop. | |
1047 | However, if the reg is not used outside this loop | |
1048 | and all uses of the register are in the same | |
1049 | basic block as the store, there is no problem. | |
1050 | ||
1051 | If this insn was made by loop, we don't know its | |
1052 | INSN_LUID and hence must make a conservative | |
0f41302f | 1053 | assumption. */ |
b4ad7b23 | 1054 | m->global = (INSN_UID (p) >= max_uid_for_loop |
b1f21e0a | 1055 | || (uid_luid[REGNO_LAST_UID (regno)] |
b4ad7b23 | 1056 | > INSN_LUID (end)) |
b1f21e0a | 1057 | || (uid_luid[REGNO_FIRST_UID (regno)] |
b4ad7b23 RS |
1058 | < INSN_LUID (p)) |
1059 | || (labels_in_range_p | |
b1f21e0a | 1060 | (p, uid_luid[REGNO_FIRST_UID (regno)]))); |
b4ad7b23 RS |
1061 | if (maybe_never && m->global) |
1062 | m->savemode = GET_MODE (SET_SRC (set1)); | |
1063 | else | |
1064 | m->savemode = VOIDmode; | |
1065 | m->regno = regno; | |
1066 | m->cond = 0; | |
1067 | m->match = 0; | |
b1f21e0a MM |
1068 | m->lifetime = (uid_luid[REGNO_LAST_UID (regno)] |
1069 | - uid_luid[REGNO_FIRST_UID (regno)]); | |
b4ad7b23 | 1070 | m->savings = 1; |
4b259e3f | 1071 | VARRAY_INT (set_in_loop, regno) = -1; |
b4ad7b23 RS |
1072 | /* Add M to the end of the chain MOVABLES. */ |
1073 | if (movables == 0) | |
1074 | movables = m; | |
1075 | else | |
1076 | last_movable->next = m; | |
1077 | last_movable = m; | |
1078 | } | |
1079 | } | |
1080 | } | |
1081 | /* Past a call insn, we get to insns which might not be executed | |
1082 | because the call might exit. This matters for insns that trap. | |
1083 | Call insns inside a REG_LIBCALL/REG_RETVAL block always return, | |
1084 | so they don't count. */ | |
1085 | else if (GET_CODE (p) == CALL_INSN && ! in_libcall) | |
1086 | call_passed = 1; | |
1087 | /* Past a label or a jump, we get to insns for which we | |
1088 | can't count on whether or how many times they will be | |
1089 | executed during each iteration. Therefore, we can | |
1090 | only move out sets of trivial variables | |
1091 | (those not used after the loop). */ | |
8516af93 | 1092 | /* Similar code appears twice in strength_reduce. */ |
b4ad7b23 RS |
1093 | else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN) |
1094 | /* If we enter the loop in the middle, and scan around to the | |
1095 | beginning, don't set maybe_never for that. This must be an | |
1096 | unconditional jump, otherwise the code at the top of the | |
1097 | loop might never be executed. Unconditional jumps are | |
1098 | followed a by barrier then loop end. */ | |
1099 | && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop_top | |
1100 | && NEXT_INSN (NEXT_INSN (p)) == end | |
1101 | && simplejump_p (p))) | |
1102 | maybe_never = 1; | |
5ea7a4ae JW |
1103 | else if (GET_CODE (p) == NOTE) |
1104 | { | |
1105 | /* At the virtual top of a converted loop, insns are again known to | |
1106 | be executed: logically, the loop begins here even though the exit | |
1107 | code has been duplicated. */ | |
1108 | if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0) | |
1109 | maybe_never = call_passed = 0; | |
1110 | else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG) | |
1111 | loop_depth++; | |
1112 | else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END) | |
1113 | loop_depth--; | |
1114 | } | |
b4ad7b23 RS |
1115 | } |
1116 | ||
1117 | /* If one movable subsumes another, ignore that other. */ | |
1118 | ||
1119 | ignore_some_movables (movables); | |
1120 | ||
1121 | /* For each movable insn, see if the reg that it loads | |
1122 | leads when it dies right into another conditionally movable insn. | |
1123 | If so, record that the second insn "forces" the first one, | |
1124 | since the second can be moved only if the first is. */ | |
1125 | ||
1126 | force_movables (movables); | |
1127 | ||
1128 | /* See if there are multiple movable insns that load the same value. | |
1129 | If there are, make all but the first point at the first one | |
1130 | through the `match' field, and add the priorities of them | |
1131 | all together as the priority of the first. */ | |
1132 | ||
1133 | combine_movables (movables, nregs); | |
1134 | ||
1135 | /* Now consider each movable insn to decide whether it is worth moving. | |
4b259e3f | 1136 | Store 0 in set_in_loop for each reg that is moved. |
b4ad7b23 | 1137 | |
9dd07f87 R |
1138 | Generally this increases code size, so do not move moveables when |
1139 | optimizing for code size. */ | |
1140 | ||
1141 | if (! optimize_size) | |
1142 | move_movables (movables, threshold, | |
1143 | insn_count, loop_start, end, nregs); | |
b4ad7b23 RS |
1144 | |
1145 | /* Now candidates that still are negative are those not moved. | |
4b259e3f | 1146 | Change set_in_loop to indicate that those are not actually invariant. */ |
b4ad7b23 | 1147 | for (i = 0; i < nregs; i++) |
4b259e3f R |
1148 | if (VARRAY_INT (set_in_loop, i) < 0) |
1149 | VARRAY_INT (set_in_loop, i) = VARRAY_INT (n_times_set, i); | |
b4ad7b23 | 1150 | |
3ec2b590 | 1151 | /* Now that we've moved some things out of the loop, we might be able to |
d6b44532 | 1152 | hoist even more memory references. */ |
41a972a9 | 1153 | load_mems_and_recount_loop_regs_set (scan_start, end, loop_top, |
d6b44532 | 1154 | loop_start, &insn_count); |
4b259e3f | 1155 | |
b4ad7b23 | 1156 | if (flag_strength_reduce) |
45f97e2e RH |
1157 | { |
1158 | the_movables = movables; | |
1159 | strength_reduce (scan_start, end, loop_top, | |
3c748bb6 MH |
1160 | insn_count, loop_start, end, |
1161 | loop_info, loop_cont, unroll_p, bct_p); | |
45f97e2e | 1162 | } |
8deb8e2c | 1163 | |
d6b44532 | 1164 | VARRAY_FREE (reg_single_usage); |
4b259e3f | 1165 | VARRAY_FREE (set_in_loop); |
8deb8e2c | 1166 | VARRAY_FREE (n_times_set); |
8deb8e2c | 1167 | VARRAY_FREE (may_not_optimize); |
b4ad7b23 RS |
1168 | } |
1169 | \f | |
1170 | /* Add elements to *OUTPUT to record all the pseudo-regs | |
1171 | mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */ | |
1172 | ||
1173 | void | |
1174 | record_excess_regs (in_this, not_in_this, output) | |
1175 | rtx in_this, not_in_this; | |
1176 | rtx *output; | |
1177 | { | |
1178 | enum rtx_code code; | |
6f7d635c | 1179 | const char *fmt; |
b4ad7b23 RS |
1180 | int i; |
1181 | ||
1182 | code = GET_CODE (in_this); | |
1183 | ||
1184 | switch (code) | |
1185 | { | |
1186 | case PC: | |
1187 | case CC0: | |
1188 | case CONST_INT: | |
1189 | case CONST_DOUBLE: | |
1190 | case CONST: | |
1191 | case SYMBOL_REF: | |
1192 | case LABEL_REF: | |
1193 | return; | |
1194 | ||
1195 | case REG: | |
1196 | if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER | |
1197 | && ! reg_mentioned_p (in_this, not_in_this)) | |
38a448ca | 1198 | *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output); |
b4ad7b23 | 1199 | return; |
e9a25f70 JL |
1200 | |
1201 | default: | |
1202 | break; | |
b4ad7b23 RS |
1203 | } |
1204 | ||
1205 | fmt = GET_RTX_FORMAT (code); | |
1206 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) | |
1207 | { | |
1208 | int j; | |
1209 | ||
1210 | switch (fmt[i]) | |
1211 | { | |
1212 | case 'E': | |
1213 | for (j = 0; j < XVECLEN (in_this, i); j++) | |
1214 | record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output); | |
1215 | break; | |
1216 | ||
1217 | case 'e': | |
1218 | record_excess_regs (XEXP (in_this, i), not_in_this, output); | |
1219 | break; | |
1220 | } | |
1221 | } | |
1222 | } | |
1223 | \f | |
1224 | /* Check what regs are referred to in the libcall block ending with INSN, | |
1225 | aside from those mentioned in the equivalent value. | |
1226 | If there are none, return 0. | |
1227 | If there are one or more, return an EXPR_LIST containing all of them. */ | |
1228 | ||
89d3d442 | 1229 | rtx |
b4ad7b23 RS |
1230 | libcall_other_reg (insn, equiv) |
1231 | rtx insn, equiv; | |
1232 | { | |
5fd8383e | 1233 | rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX); |
b4ad7b23 RS |
1234 | rtx p = XEXP (note, 0); |
1235 | rtx output = 0; | |
1236 | ||
1237 | /* First, find all the regs used in the libcall block | |
1238 | that are not mentioned as inputs to the result. */ | |
1239 | ||
1240 | while (p != insn) | |
1241 | { | |
1242 | if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN | |
1243 | || GET_CODE (p) == CALL_INSN) | |
1244 | record_excess_regs (PATTERN (p), equiv, &output); | |
1245 | p = NEXT_INSN (p); | |
1246 | } | |
1247 | ||
1248 | return output; | |
1249 | } | |
1250 | \f | |
1251 | /* Return 1 if all uses of REG | |
1252 | are between INSN and the end of the basic block. */ | |
1253 | ||
1254 | static int | |
1255 | reg_in_basic_block_p (insn, reg) | |
1256 | rtx insn, reg; | |
1257 | { | |
1258 | int regno = REGNO (reg); | |
1259 | rtx p; | |
1260 | ||
b1f21e0a | 1261 | if (REGNO_FIRST_UID (regno) != INSN_UID (insn)) |
b4ad7b23 RS |
1262 | return 0; |
1263 | ||
1264 | /* Search this basic block for the already recorded last use of the reg. */ | |
1265 | for (p = insn; p; p = NEXT_INSN (p)) | |
1266 | { | |
1267 | switch (GET_CODE (p)) | |
1268 | { | |
1269 | case NOTE: | |
1270 | break; | |
1271 | ||
1272 | case INSN: | |
1273 | case CALL_INSN: | |
1274 | /* Ordinary insn: if this is the last use, we win. */ | |
b1f21e0a | 1275 | if (REGNO_LAST_UID (regno) == INSN_UID (p)) |
b4ad7b23 RS |
1276 | return 1; |
1277 | break; | |
1278 | ||
1279 | case JUMP_INSN: | |
1280 | /* Jump insn: if this is the last use, we win. */ | |
b1f21e0a | 1281 | if (REGNO_LAST_UID (regno) == INSN_UID (p)) |
b4ad7b23 RS |
1282 | return 1; |
1283 | /* Otherwise, it's the end of the basic block, so we lose. */ | |
1284 | return 0; | |
1285 | ||
1286 | case CODE_LABEL: | |
1287 | case BARRIER: | |
1288 | /* It's the end of the basic block, so we lose. */ | |
1289 | return 0; | |
e9a25f70 JL |
1290 | |
1291 | default: | |
1292 | break; | |
b4ad7b23 RS |
1293 | } |
1294 | } | |
1295 | ||
1296 | /* The "last use" doesn't follow the "first use"?? */ | |
1297 | abort (); | |
1298 | } | |
1299 | \f | |
1300 | /* Compute the benefit of eliminating the insns in the block whose | |
1301 | last insn is LAST. This may be a group of insns used to compute a | |
1302 | value directly or can contain a library call. */ | |
1303 | ||
1304 | static int | |
1305 | libcall_benefit (last) | |
1306 | rtx last; | |
1307 | { | |
1308 | rtx insn; | |
1309 | int benefit = 0; | |
1310 | ||
5fd8383e | 1311 | for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0); |
b4ad7b23 RS |
1312 | insn != last; insn = NEXT_INSN (insn)) |
1313 | { | |
1314 | if (GET_CODE (insn) == CALL_INSN) | |
1315 | benefit += 10; /* Assume at least this many insns in a library | |
0f41302f | 1316 | routine. */ |
b4ad7b23 RS |
1317 | else if (GET_CODE (insn) == INSN |
1318 | && GET_CODE (PATTERN (insn)) != USE | |
1319 | && GET_CODE (PATTERN (insn)) != CLOBBER) | |
1320 | benefit++; | |
1321 | } | |
1322 | ||
1323 | return benefit; | |
1324 | } | |
1325 | \f | |
1326 | /* Skip COUNT insns from INSN, counting library calls as 1 insn. */ | |
1327 | ||
1328 | static rtx | |
1329 | skip_consec_insns (insn, count) | |
1330 | rtx insn; | |
1331 | int count; | |
1332 | { | |
1333 | for (; count > 0; count--) | |
1334 | { | |
1335 | rtx temp; | |
1336 | ||
1337 | /* If first insn of libcall sequence, skip to end. */ | |
1338 | /* Do this at start of loop, since INSN is guaranteed to | |
1339 | be an insn here. */ | |
1340 | if (GET_CODE (insn) != NOTE | |
5fd8383e | 1341 | && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX))) |
b4ad7b23 RS |
1342 | insn = XEXP (temp, 0); |
1343 | ||
1344 | do insn = NEXT_INSN (insn); | |
1345 | while (GET_CODE (insn) == NOTE); | |
1346 | } | |
1347 | ||
1348 | return insn; | |
1349 | } | |
1350 | ||
1351 | /* Ignore any movable whose insn falls within a libcall | |
1352 | which is part of another movable. | |
1353 | We make use of the fact that the movable for the libcall value | |
1354 | was made later and so appears later on the chain. */ | |
1355 | ||
1356 | static void | |
1357 | ignore_some_movables (movables) | |
1358 | struct movable *movables; | |
1359 | { | |
1360 | register struct movable *m, *m1; | |
1361 | ||
1362 | for (m = movables; m; m = m->next) | |
1363 | { | |
1364 | /* Is this a movable for the value of a libcall? */ | |
5fd8383e | 1365 | rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX); |
b4ad7b23 RS |
1366 | if (note) |
1367 | { | |
1368 | rtx insn; | |
1369 | /* Check for earlier movables inside that range, | |
1370 | and mark them invalid. We cannot use LUIDs here because | |
1371 | insns created by loop.c for prior loops don't have LUIDs. | |
1372 | Rather than reject all such insns from movables, we just | |
1373 | explicitly check each insn in the libcall (since invariant | |
1374 | libcalls aren't that common). */ | |
1375 | for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn)) | |
1376 | for (m1 = movables; m1 != m; m1 = m1->next) | |
1377 | if (m1->insn == insn) | |
1378 | m1->done = 1; | |
1379 | } | |
1380 | } | |
1381 | } | |
1382 | ||
1383 | /* For each movable insn, see if the reg that it loads | |
1384 | leads when it dies right into another conditionally movable insn. | |
1385 | If so, record that the second insn "forces" the first one, | |
1386 | since the second can be moved only if the first is. */ | |
1387 | ||
1388 | static void | |
1389 | force_movables (movables) | |
1390 | struct movable *movables; | |
1391 | { | |
1392 | register struct movable *m, *m1; | |
1393 | for (m1 = movables; m1; m1 = m1->next) | |
1394 | /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */ | |
1395 | if (!m1->partial && !m1->done) | |
1396 | { | |
1397 | int regno = m1->regno; | |
1398 | for (m = m1->next; m; m = m->next) | |
1399 | /* ??? Could this be a bug? What if CSE caused the | |
1400 | register of M1 to be used after this insn? | |
1401 | Since CSE does not update regno_last_uid, | |
1402 | this insn M->insn might not be where it dies. | |
1403 | But very likely this doesn't matter; what matters is | |
1404 | that M's reg is computed from M1's reg. */ | |
b1f21e0a | 1405 | if (INSN_UID (m->insn) == REGNO_LAST_UID (regno) |
b4ad7b23 RS |
1406 | && !m->done) |
1407 | break; | |
1408 | if (m != 0 && m->set_src == m1->set_dest | |
1409 | /* If m->consec, m->set_src isn't valid. */ | |
1410 | && m->consec == 0) | |
1411 | m = 0; | |
1412 | ||
1413 | /* Increase the priority of the moving the first insn | |
1414 | since it permits the second to be moved as well. */ | |
1415 | if (m != 0) | |
1416 | { | |
1417 | m->forces = m1; | |
1418 | m1->lifetime += m->lifetime; | |
3875b31d | 1419 | m1->savings += m->savings; |
b4ad7b23 RS |
1420 | } |
1421 | } | |
1422 | } | |
1423 | \f | |
1424 | /* Find invariant expressions that are equal and can be combined into | |
1425 | one register. */ | |
1426 | ||
1427 | static void | |
1428 | combine_movables (movables, nregs) | |
1429 | struct movable *movables; | |
1430 | int nregs; | |
1431 | { | |
1432 | register struct movable *m; | |
1433 | char *matched_regs = (char *) alloca (nregs); | |
1434 | enum machine_mode mode; | |
1435 | ||
1436 | /* Regs that are set more than once are not allowed to match | |
1437 | or be matched. I'm no longer sure why not. */ | |
1438 | /* Perhaps testing m->consec_sets would be more appropriate here? */ | |
1439 | ||
1440 | for (m = movables; m; m = m->next) | |
4b259e3f | 1441 | if (m->match == 0 && VARRAY_INT (n_times_set, m->regno) == 1 && !m->partial) |
b4ad7b23 RS |
1442 | { |
1443 | register struct movable *m1; | |
1444 | int regno = m->regno; | |
b4ad7b23 RS |
1445 | |
1446 | bzero (matched_regs, nregs); | |
1447 | matched_regs[regno] = 1; | |
1448 | ||
88016fb7 DE |
1449 | /* We want later insns to match the first one. Don't make the first |
1450 | one match any later ones. So start this loop at m->next. */ | |
1451 | for (m1 = m->next; m1; m1 = m1->next) | |
4b259e3f | 1452 | if (m != m1 && m1->match == 0 && VARRAY_INT (n_times_set, m1->regno) == 1 |
b4ad7b23 RS |
1453 | /* A reg used outside the loop mustn't be eliminated. */ |
1454 | && !m1->global | |
1455 | /* A reg used for zero-extending mustn't be eliminated. */ | |
1456 | && !m1->partial | |
1457 | && (matched_regs[m1->regno] | |
1458 | || | |
1459 | ( | |
1460 | /* Can combine regs with different modes loaded from the | |
1461 | same constant only if the modes are the same or | |
1462 | if both are integer modes with M wider or the same | |
1463 | width as M1. The check for integer is redundant, but | |
1464 | safe, since the only case of differing destination | |
1465 | modes with equal sources is when both sources are | |
1466 | VOIDmode, i.e., CONST_INT. */ | |
1467 | (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest) | |
1468 | || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT | |
1469 | && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT | |
1470 | && (GET_MODE_BITSIZE (GET_MODE (m->set_dest)) | |
1471 | >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest))))) | |
1472 | /* See if the source of M1 says it matches M. */ | |
1473 | && ((GET_CODE (m1->set_src) == REG | |
1474 | && matched_regs[REGNO (m1->set_src)]) | |
1475 | || rtx_equal_for_loop_p (m->set_src, m1->set_src, | |
1476 | movables)))) | |
1477 | && ((m->dependencies == m1->dependencies) | |
1478 | || rtx_equal_p (m->dependencies, m1->dependencies))) | |
1479 | { | |
1480 | m->lifetime += m1->lifetime; | |
1481 | m->savings += m1->savings; | |
1482 | m1->done = 1; | |
1483 | m1->match = m; | |
1484 | matched_regs[m1->regno] = 1; | |
1485 | } | |
1486 | } | |
1487 | ||
1488 | /* Now combine the regs used for zero-extension. | |
1489 | This can be done for those not marked `global' | |
1490 | provided their lives don't overlap. */ | |
1491 | ||
1492 | for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode; | |
1493 | mode = GET_MODE_WIDER_MODE (mode)) | |
1494 | { | |
1495 | register struct movable *m0 = 0; | |
1496 | ||
1497 | /* Combine all the registers for extension from mode MODE. | |
1498 | Don't combine any that are used outside this loop. */ | |
1499 | for (m = movables; m; m = m->next) | |
1500 | if (m->partial && ! m->global | |
1501 | && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn))))) | |
1502 | { | |
1503 | register struct movable *m1; | |
b1f21e0a MM |
1504 | int first = uid_luid[REGNO_FIRST_UID (m->regno)]; |
1505 | int last = uid_luid[REGNO_LAST_UID (m->regno)]; | |
b4ad7b23 RS |
1506 | |
1507 | if (m0 == 0) | |
1508 | { | |
1509 | /* First one: don't check for overlap, just record it. */ | |
1510 | m0 = m; | |
1511 | continue; | |
1512 | } | |
1513 | ||
1514 | /* Make sure they extend to the same mode. | |
1515 | (Almost always true.) */ | |
1516 | if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest)) | |
1517 | continue; | |
1518 | ||
1519 | /* We already have one: check for overlap with those | |
1520 | already combined together. */ | |
1521 | for (m1 = movables; m1 != m; m1 = m1->next) | |
1522 | if (m1 == m0 || (m1->partial && m1->match == m0)) | |
b1f21e0a MM |
1523 | if (! (uid_luid[REGNO_FIRST_UID (m1->regno)] > last |
1524 | || uid_luid[REGNO_LAST_UID (m1->regno)] < first)) | |
b4ad7b23 RS |
1525 | goto overlap; |
1526 | ||
1527 | /* No overlap: we can combine this with the others. */ | |
1528 | m0->lifetime += m->lifetime; | |
1529 | m0->savings += m->savings; | |
1530 | m->done = 1; | |
1531 | m->match = m0; | |
1532 | ||
1533 | overlap: ; | |
1534 | } | |
1535 | } | |
1536 | } | |
1537 | \f | |
1538 | /* Return 1 if regs X and Y will become the same if moved. */ | |
1539 | ||
1540 | static int | |
1541 | regs_match_p (x, y, movables) | |
1542 | rtx x, y; | |
1543 | struct movable *movables; | |
1544 | { | |
1545 | int xn = REGNO (x); | |
1546 | int yn = REGNO (y); | |
1547 | struct movable *mx, *my; | |
1548 | ||
1549 | for (mx = movables; mx; mx = mx->next) | |
1550 | if (mx->regno == xn) | |
1551 | break; | |
1552 | ||
1553 | for (my = movables; my; my = my->next) | |
1554 | if (my->regno == yn) | |
1555 | break; | |
1556 | ||
1557 | return (mx && my | |
1558 | && ((mx->match == my->match && mx->match != 0) | |
1559 | || mx->match == my | |
1560 | || mx == my->match)); | |
1561 | } | |
1562 | ||
1563 | /* Return 1 if X and Y are identical-looking rtx's. | |
1564 | This is the Lisp function EQUAL for rtx arguments. | |
1565 | ||
1566 | If two registers are matching movables or a movable register and an | |
1567 | equivalent constant, consider them equal. */ | |
1568 | ||
1569 | static int | |
1570 | rtx_equal_for_loop_p (x, y, movables) | |
1571 | rtx x, y; | |
1572 | struct movable *movables; | |
1573 | { | |
1574 | register int i; | |
1575 | register int j; | |
1576 | register struct movable *m; | |
1577 | register enum rtx_code code; | |
6f7d635c | 1578 | register const char *fmt; |
b4ad7b23 RS |
1579 | |
1580 | if (x == y) | |
1581 | return 1; | |
1582 | if (x == 0 || y == 0) | |
1583 | return 0; | |
1584 | ||
1585 | code = GET_CODE (x); | |
1586 | ||
1587 | /* If we have a register and a constant, they may sometimes be | |
1588 | equal. */ | |
4b259e3f | 1589 | if (GET_CODE (x) == REG && VARRAY_INT (set_in_loop, REGNO (x)) == -2 |
b4ad7b23 | 1590 | && CONSTANT_P (y)) |
b1a0c816 JL |
1591 | { |
1592 | for (m = movables; m; m = m->next) | |
1593 | if (m->move_insn && m->regno == REGNO (x) | |
1594 | && rtx_equal_p (m->set_src, y)) | |
1595 | return 1; | |
1596 | } | |
4b259e3f | 1597 | else if (GET_CODE (y) == REG && VARRAY_INT (set_in_loop, REGNO (y)) == -2 |
b4ad7b23 | 1598 | && CONSTANT_P (x)) |
b1a0c816 JL |
1599 | { |
1600 | for (m = movables; m; m = m->next) | |
1601 | if (m->move_insn && m->regno == REGNO (y) | |
1602 | && rtx_equal_p (m->set_src, x)) | |
1603 | return 1; | |
1604 | } | |
b4ad7b23 RS |
1605 | |
1606 | /* Otherwise, rtx's of different codes cannot be equal. */ | |
1607 | if (code != GET_CODE (y)) | |
1608 | return 0; | |
1609 | ||
1610 | /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. | |
1611 | (REG:SI x) and (REG:HI x) are NOT equivalent. */ | |
1612 | ||
1613 | if (GET_MODE (x) != GET_MODE (y)) | |
1614 | return 0; | |
1615 | ||
1616 | /* These three types of rtx's can be compared nonrecursively. */ | |
1617 | if (code == REG) | |
1618 | return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables)); | |
1619 | ||
1620 | if (code == LABEL_REF) | |
1621 | return XEXP (x, 0) == XEXP (y, 0); | |
1622 | if (code == SYMBOL_REF) | |
1623 | return XSTR (x, 0) == XSTR (y, 0); | |
1624 | ||
1625 | /* Compare the elements. If any pair of corresponding elements | |
1626 | fail to match, return 0 for the whole things. */ | |
1627 | ||
1628 | fmt = GET_RTX_FORMAT (code); | |
1629 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) | |
1630 | { | |
1631 | switch (fmt[i]) | |
1632 | { | |
5fd8383e RK |
1633 | case 'w': |
1634 | if (XWINT (x, i) != XWINT (y, i)) | |
1635 | return 0; | |
1636 | break; | |
1637 | ||
b4ad7b23 RS |
1638 | case 'i': |
1639 | if (XINT (x, i) != XINT (y, i)) | |
1640 | return 0; | |
1641 | break; | |
1642 | ||
1643 | case 'E': | |
1644 | /* Two vectors must have the same length. */ | |
1645 | if (XVECLEN (x, i) != XVECLEN (y, i)) | |
1646 | return 0; | |
1647 | ||
1648 | /* And the corresponding elements must match. */ | |
1649 | for (j = 0; j < XVECLEN (x, i); j++) | |
1650 | if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j), movables) == 0) | |
1651 | return 0; | |
1652 | break; | |
1653 | ||
1654 | case 'e': | |
1655 | if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables) == 0) | |
1656 | return 0; | |
1657 | break; | |
1658 | ||
1659 | case 's': | |
1660 | if (strcmp (XSTR (x, i), XSTR (y, i))) | |
1661 | return 0; | |
1662 | break; | |
1663 | ||
1664 | case 'u': | |
1665 | /* These are just backpointers, so they don't matter. */ | |
1666 | break; | |
1667 | ||
1668 | case '0': | |
1669 | break; | |
1670 | ||
1671 | /* It is believed that rtx's at this level will never | |
1672 | contain anything but integers and other rtx's, | |
1673 | except for within LABEL_REFs and SYMBOL_REFs. */ | |
1674 | default: | |
1675 | abort (); | |
1676 | } | |
1677 | } | |
1678 | return 1; | |
1679 | } | |
1680 | \f | |
c160c628 | 1681 | /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all |
3c748bb6 | 1682 | insns in INSNS which use the reference. */ |
c160c628 RK |
1683 | |
1684 | static void | |
1685 | add_label_notes (x, insns) | |
1686 | rtx x; | |
1687 | rtx insns; | |
1688 | { | |
1689 | enum rtx_code code = GET_CODE (x); | |
7dcd3836 | 1690 | int i, j; |
6f7d635c | 1691 | const char *fmt; |
c160c628 RK |
1692 | rtx insn; |
1693 | ||
82d00367 | 1694 | if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x)) |
c160c628 | 1695 | { |
6b3603c2 JL |
1696 | /* This code used to ignore labels that referred to dispatch tables to |
1697 | avoid flow generating (slighly) worse code. | |
1698 | ||
1699 | We no longer ignore such label references (see LABEL_REF handling in | |
1700 | mark_jump_label for additional information). */ | |
1701 | for (insn = insns; insn; insn = NEXT_INSN (insn)) | |
1702 | if (reg_mentioned_p (XEXP (x, 0), insn)) | |
1703 | REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_LABEL, XEXP (x, 0), | |
1704 | REG_NOTES (insn)); | |
c160c628 RK |
1705 | } |
1706 | ||
1707 | fmt = GET_RTX_FORMAT (code); | |
1708 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) | |
7dcd3836 RK |
1709 | { |
1710 | if (fmt[i] == 'e') | |
1711 | add_label_notes (XEXP (x, i), insns); | |
1712 | else if (fmt[i] == 'E') | |
1713 | for (j = XVECLEN (x, i) - 1; j >= 0; j--) | |
1714 | add_label_notes (XVECEXP (x, i, j), insns); | |
1715 | } | |
c160c628 RK |
1716 | } |
1717 | \f | |
b4ad7b23 RS |
1718 | /* Scan MOVABLES, and move the insns that deserve to be moved. |
1719 | If two matching movables are combined, replace one reg with the | |
1720 | other throughout. */ | |
1721 | ||
1722 | static void | |
1723 | move_movables (movables, threshold, insn_count, loop_start, end, nregs) | |
1724 | struct movable *movables; | |
1725 | int threshold; | |
1726 | int insn_count; | |
1727 | rtx loop_start; | |
1728 | rtx end; | |
1729 | int nregs; | |
1730 | { | |
1731 | rtx new_start = 0; | |
1732 | register struct movable *m; | |
1733 | register rtx p; | |
1734 | /* Map of pseudo-register replacements to handle combining | |
1735 | when we move several insns that load the same value | |
1736 | into different pseudo-registers. */ | |
1737 | rtx *reg_map = (rtx *) alloca (nregs * sizeof (rtx)); | |
1738 | char *already_moved = (char *) alloca (nregs); | |
1739 | ||
1740 | bzero (already_moved, nregs); | |
4c9a05bc | 1741 | bzero ((char *) reg_map, nregs * sizeof (rtx)); |
b4ad7b23 RS |
1742 | |
1743 | num_movables = 0; | |
1744 | ||
1745 | for (m = movables; m; m = m->next) | |
1746 | { | |
1747 | /* Describe this movable insn. */ | |
1748 | ||
1749 | if (loop_dump_stream) | |
1750 | { | |
1751 | fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ", | |
1752 | INSN_UID (m->insn), m->regno, m->lifetime); | |
1753 | if (m->consec > 0) | |
1754 | fprintf (loop_dump_stream, "consec %d, ", m->consec); | |
1755 | if (m->cond) | |
1756 | fprintf (loop_dump_stream, "cond "); | |
1757 | if (m->force) | |
1758 | fprintf (loop_dump_stream, "force "); | |
1759 | if (m->global) | |
1760 | fprintf (loop_dump_stream, "global "); | |
1761 | if (m->done) | |
1762 | fprintf (loop_dump_stream, "done "); | |
1763 | if (m->move_insn) | |
1764 | fprintf (loop_dump_stream, "move-insn "); | |
1765 | if (m->match) | |
1766 | fprintf (loop_dump_stream, "matches %d ", | |
1767 | INSN_UID (m->match->insn)); | |
1768 | if (m->forces) | |
1769 | fprintf (loop_dump_stream, "forces %d ", | |
1770 | INSN_UID (m->forces->insn)); | |
1771 | } | |
1772 | ||
1773 | /* Count movables. Value used in heuristics in strength_reduce. */ | |
1774 | num_movables++; | |
1775 | ||
1776 | /* Ignore the insn if it's already done (it matched something else). | |
1777 | Otherwise, see if it is now safe to move. */ | |
1778 | ||
1779 | if (!m->done | |
1780 | && (! m->cond | |
1781 | || (1 == invariant_p (m->set_src) | |
1782 | && (m->dependencies == 0 | |
1783 | || 1 == invariant_p (m->dependencies)) | |
1784 | && (m->consec == 0 | |
1785 | || 1 == consec_sets_invariant_p (m->set_dest, | |
1786 | m->consec + 1, | |
1787 | m->insn)))) | |
1788 | && (! m->forces || m->forces->done)) | |
1789 | { | |
1790 | register int regno; | |
1791 | register rtx p; | |
1792 | int savings = m->savings; | |
1793 | ||
1794 | /* We have an insn that is safe to move. | |
1795 | Compute its desirability. */ | |
1796 | ||
1797 | p = m->insn; | |
1798 | regno = m->regno; | |
1799 | ||
1800 | if (loop_dump_stream) | |
1801 | fprintf (loop_dump_stream, "savings %d ", savings); | |
1802 | ||
877ca132 HB |
1803 | if (moved_once[regno] && loop_dump_stream) |
1804 | fprintf (loop_dump_stream, "halved since already moved "); | |
b4ad7b23 RS |
1805 | |
1806 | /* An insn MUST be moved if we already moved something else | |
1807 | which is safe only if this one is moved too: that is, | |
1808 | if already_moved[REGNO] is nonzero. */ | |
1809 | ||
1810 | /* An insn is desirable to move if the new lifetime of the | |
1811 | register is no more than THRESHOLD times the old lifetime. | |
1812 | If it's not desirable, it means the loop is so big | |
1813 | that moving won't speed things up much, | |
1814 | and it is liable to make register usage worse. */ | |
1815 | ||
1816 | /* It is also desirable to move if it can be moved at no | |
1817 | extra cost because something else was already moved. */ | |
1818 | ||
1819 | if (already_moved[regno] | |
e5eb27e5 | 1820 | || flag_move_all_movables |
877ca132 HB |
1821 | || (threshold * savings * m->lifetime) >= |
1822 | (moved_once[regno] ? insn_count * 2 : insn_count) | |
b4ad7b23 | 1823 | || (m->forces && m->forces->done |
4b259e3f | 1824 | && VARRAY_INT (n_times_set, m->forces->regno) == 1)) |
b4ad7b23 RS |
1825 | { |
1826 | int count; | |
1827 | register struct movable *m1; | |
6a651371 | 1828 | rtx first = NULL_RTX; |
b4ad7b23 RS |
1829 | |
1830 | /* Now move the insns that set the reg. */ | |
1831 | ||
1832 | if (m->partial && m->match) | |
1833 | { | |
1834 | rtx newpat, i1; | |
1835 | rtx r1, r2; | |
1836 | /* Find the end of this chain of matching regs. | |
1837 | Thus, we load each reg in the chain from that one reg. | |
1838 | And that reg is loaded with 0 directly, | |
1839 | since it has ->match == 0. */ | |
1840 | for (m1 = m; m1->match; m1 = m1->match); | |
1841 | newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)), | |
1842 | SET_DEST (PATTERN (m1->insn))); | |
1843 | i1 = emit_insn_before (newpat, loop_start); | |
1844 | ||
1845 | /* Mark the moved, invariant reg as being allowed to | |
1846 | share a hard reg with the other matching invariant. */ | |
1847 | REG_NOTES (i1) = REG_NOTES (m->insn); | |
1848 | r1 = SET_DEST (PATTERN (m->insn)); | |
1849 | r2 = SET_DEST (PATTERN (m1->insn)); | |
38a448ca RH |
1850 | regs_may_share |
1851 | = gen_rtx_EXPR_LIST (VOIDmode, r1, | |
1852 | gen_rtx_EXPR_LIST (VOIDmode, r2, | |
1853 | regs_may_share)); | |
b4ad7b23 RS |
1854 | delete_insn (m->insn); |
1855 | ||
1856 | if (new_start == 0) | |
1857 | new_start = i1; | |
1858 | ||
1859 | if (loop_dump_stream) | |
1860 | fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1)); | |
1861 | } | |
1862 | /* If we are to re-generate the item being moved with a | |
1863 | new move insn, first delete what we have and then emit | |
1864 | the move insn before the loop. */ | |
1865 | else if (m->move_insn) | |
1866 | { | |
1867 | rtx i1, temp; | |
1868 | ||
1869 | for (count = m->consec; count >= 0; count--) | |
1870 | { | |
1871 | /* If this is the first insn of a library call sequence, | |
1872 | skip to the end. */ | |
1873 | if (GET_CODE (p) != NOTE | |
5fd8383e | 1874 | && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX))) |
b4ad7b23 RS |
1875 | p = XEXP (temp, 0); |
1876 | ||
1877 | /* If this is the last insn of a libcall sequence, then | |
1878 | delete every insn in the sequence except the last. | |
1879 | The last insn is handled in the normal manner. */ | |
1880 | if (GET_CODE (p) != NOTE | |
5fd8383e | 1881 | && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX))) |
b4ad7b23 RS |
1882 | { |
1883 | temp = XEXP (temp, 0); | |
1884 | while (temp != p) | |
1885 | temp = delete_insn (temp); | |
1886 | } | |
1887 | ||
9655bf95 | 1888 | temp = p; |
b4ad7b23 | 1889 | p = delete_insn (p); |
9655bf95 DM |
1890 | |
1891 | /* simplify_giv_expr expects that it can walk the insns | |
1892 | at m->insn forwards and see this old sequence we are | |
1893 | tossing here. delete_insn does preserve the next | |
1894 | pointers, but when we skip over a NOTE we must fix | |
1895 | it up. Otherwise that code walks into the non-deleted | |
1896 | insn stream. */ | |
dd202606 | 1897 | while (p && GET_CODE (p) == NOTE) |
9655bf95 | 1898 | p = NEXT_INSN (temp) = NEXT_INSN (p); |
b4ad7b23 RS |
1899 | } |
1900 | ||
1901 | start_sequence (); | |
1902 | emit_move_insn (m->set_dest, m->set_src); | |
c160c628 | 1903 | temp = get_insns (); |
b4ad7b23 RS |
1904 | end_sequence (); |
1905 | ||
c160c628 RK |
1906 | add_label_notes (m->set_src, temp); |
1907 | ||
1908 | i1 = emit_insns_before (temp, loop_start); | |
5fd8383e | 1909 | if (! find_reg_note (i1, REG_EQUAL, NULL_RTX)) |
b4ad7b23 | 1910 | REG_NOTES (i1) |
38a448ca RH |
1911 | = gen_rtx_EXPR_LIST (m->is_equiv ? REG_EQUIV : REG_EQUAL, |
1912 | m->set_src, REG_NOTES (i1)); | |
b4ad7b23 RS |
1913 | |
1914 | if (loop_dump_stream) | |
1915 | fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1)); | |
1916 | ||
1917 | /* The more regs we move, the less we like moving them. */ | |
1918 | threshold -= 3; | |
1919 | } | |
1920 | else | |
1921 | { | |
1922 | for (count = m->consec; count >= 0; count--) | |
1923 | { | |
1924 | rtx i1, temp; | |
1925 | ||
0f41302f | 1926 | /* If first insn of libcall sequence, skip to end. */ |
b4ad7b23 RS |
1927 | /* Do this at start of loop, since p is guaranteed to |
1928 | be an insn here. */ | |
1929 | if (GET_CODE (p) != NOTE | |
5fd8383e | 1930 | && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX))) |
b4ad7b23 RS |
1931 | p = XEXP (temp, 0); |
1932 | ||
1933 | /* If last insn of libcall sequence, move all | |
1934 | insns except the last before the loop. The last | |
1935 | insn is handled in the normal manner. */ | |
1936 | if (GET_CODE (p) != NOTE | |
5fd8383e | 1937 | && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX))) |
b4ad7b23 RS |
1938 | { |
1939 | rtx fn_address = 0; | |
1940 | rtx fn_reg = 0; | |
1941 | rtx fn_address_insn = 0; | |
1942 | ||
1943 | first = 0; | |
1944 | for (temp = XEXP (temp, 0); temp != p; | |
1945 | temp = NEXT_INSN (temp)) | |
1946 | { | |
1947 | rtx body; | |
1948 | rtx n; | |
1949 | rtx next; | |
1950 | ||
1951 | if (GET_CODE (temp) == NOTE) | |
1952 | continue; | |
1953 | ||
1954 | body = PATTERN (temp); | |
1955 | ||
1956 | /* Find the next insn after TEMP, | |
1957 | not counting USE or NOTE insns. */ | |
1958 | for (next = NEXT_INSN (temp); next != p; | |
1959 | next = NEXT_INSN (next)) | |
1960 | if (! (GET_CODE (next) == INSN | |
1961 | && GET_CODE (PATTERN (next)) == USE) | |
1962 | && GET_CODE (next) != NOTE) | |
1963 | break; | |
1964 | ||
1965 | /* If that is the call, this may be the insn | |
1966 | that loads the function address. | |
1967 | ||
1968 | Extract the function address from the insn | |
1969 | that loads it into a register. | |
1970 | If this insn was cse'd, we get incorrect code. | |
1971 | ||
1972 | So emit a new move insn that copies the | |
1973 | function address into the register that the | |
1974 | call insn will use. flow.c will delete any | |
1975 | redundant stores that we have created. */ | |
1976 | if (GET_CODE (next) == CALL_INSN | |
1977 | && GET_CODE (body) == SET | |
1978 | && GET_CODE (SET_DEST (body)) == REG | |
5fd8383e RK |
1979 | && (n = find_reg_note (temp, REG_EQUAL, |
1980 | NULL_RTX))) | |
b4ad7b23 RS |
1981 | { |
1982 | fn_reg = SET_SRC (body); | |
1983 | if (GET_CODE (fn_reg) != REG) | |
1984 | fn_reg = SET_DEST (body); | |
1985 | fn_address = XEXP (n, 0); | |
1986 | fn_address_insn = temp; | |
1987 | } | |
1988 | /* We have the call insn. | |
1989 | If it uses the register we suspect it might, | |
1990 | load it with the correct address directly. */ | |
1991 | if (GET_CODE (temp) == CALL_INSN | |
1992 | && fn_address != 0 | |
d9f8a199 | 1993 | && reg_referenced_p (fn_reg, body)) |
b4ad7b23 RS |
1994 | emit_insn_after (gen_move_insn (fn_reg, |
1995 | fn_address), | |
1996 | fn_address_insn); | |
1997 | ||
1998 | if (GET_CODE (temp) == CALL_INSN) | |
f97d29ce JW |
1999 | { |
2000 | i1 = emit_call_insn_before (body, loop_start); | |
2001 | /* Because the USAGE information potentially | |
2002 | contains objects other than hard registers | |
2003 | we need to copy it. */ | |
8c4f5c09 | 2004 | if (CALL_INSN_FUNCTION_USAGE (temp)) |
db3cf6fb MS |
2005 | CALL_INSN_FUNCTION_USAGE (i1) |
2006 | = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp)); | |
f97d29ce | 2007 | } |
b4ad7b23 RS |
2008 | else |
2009 | i1 = emit_insn_before (body, loop_start); | |
2010 | if (first == 0) | |
2011 | first = i1; | |
2012 | if (temp == fn_address_insn) | |
2013 | fn_address_insn = i1; | |
2014 | REG_NOTES (i1) = REG_NOTES (temp); | |
2015 | delete_insn (temp); | |
2016 | } | |
18985c91 R |
2017 | if (new_start == 0) |
2018 | new_start = first; | |
b4ad7b23 RS |
2019 | } |
2020 | if (m->savemode != VOIDmode) | |
2021 | { | |
2022 | /* P sets REG to zero; but we should clear only | |
2023 | the bits that are not covered by the mode | |
2024 | m->savemode. */ | |
2025 | rtx reg = m->set_dest; | |
2026 | rtx sequence; | |
2027 | rtx tem; | |
2028 | ||
2029 | start_sequence (); | |
2030 | tem = expand_binop | |
2031 | (GET_MODE (reg), and_optab, reg, | |
5fd8383e RK |
2032 | GEN_INT ((((HOST_WIDE_INT) 1 |
2033 | << GET_MODE_BITSIZE (m->savemode))) | |
b4ad7b23 RS |
2034 | - 1), |
2035 | reg, 1, OPTAB_LIB_WIDEN); | |
2036 | if (tem == 0) | |
2037 | abort (); | |
2038 | if (tem != reg) | |
2039 | emit_move_insn (reg, tem); | |
2040 | sequence = gen_sequence (); | |
2041 | end_sequence (); | |
2042 | i1 = emit_insn_before (sequence, loop_start); | |
2043 | } | |
2044 | else if (GET_CODE (p) == CALL_INSN) | |
f97d29ce JW |
2045 | { |
2046 | i1 = emit_call_insn_before (PATTERN (p), loop_start); | |
2047 | /* Because the USAGE information potentially | |
2048 | contains objects other than hard registers | |
2049 | we need to copy it. */ | |
8c4f5c09 | 2050 | if (CALL_INSN_FUNCTION_USAGE (p)) |
db3cf6fb MS |
2051 | CALL_INSN_FUNCTION_USAGE (i1) |
2052 | = copy_rtx (CALL_INSN_FUNCTION_USAGE (p)); | |
f97d29ce | 2053 | } |
1a61c29f JW |
2054 | else if (count == m->consec && m->move_insn_first) |
2055 | { | |
2056 | /* The SET_SRC might not be invariant, so we must | |
2057 | use the REG_EQUAL note. */ | |
2058 | start_sequence (); | |
2059 | emit_move_insn (m->set_dest, m->set_src); | |
2060 | temp = get_insns (); | |
2061 | end_sequence (); | |
2062 | ||
2063 | add_label_notes (m->set_src, temp); | |
2064 | ||
2065 | i1 = emit_insns_before (temp, loop_start); | |
2066 | if (! find_reg_note (i1, REG_EQUAL, NULL_RTX)) | |
2067 | REG_NOTES (i1) | |
2068 | = gen_rtx_EXPR_LIST ((m->is_equiv ? REG_EQUIV | |
2069 | : REG_EQUAL), | |
2070 | m->set_src, REG_NOTES (i1)); | |
2071 | } | |
b4ad7b23 RS |
2072 | else |
2073 | i1 = emit_insn_before (PATTERN (p), loop_start); | |
2074 | ||
1a61c29f JW |
2075 | if (REG_NOTES (i1) == 0) |
2076 | { | |
2077 | REG_NOTES (i1) = REG_NOTES (p); | |
b4ad7b23 | 2078 | |
1a61c29f JW |
2079 | /* If there is a REG_EQUAL note present whose value |
2080 | is not loop invariant, then delete it, since it | |
2081 | may cause problems with later optimization passes. | |
2082 | It is possible for cse to create such notes | |
2083 | like this as a result of record_jump_cond. */ | |
e6726b1f | 2084 | |
1a61c29f JW |
2085 | if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX)) |
2086 | && ! invariant_p (XEXP (temp, 0))) | |
2087 | remove_note (i1, temp); | |
2088 | } | |
e6726b1f | 2089 | |
b4ad7b23 RS |
2090 | if (new_start == 0) |
2091 | new_start = i1; | |
2092 | ||
2093 | if (loop_dump_stream) | |
2094 | fprintf (loop_dump_stream, " moved to %d", | |
2095 | INSN_UID (i1)); | |
2096 | ||
b4ad7b23 RS |
2097 | /* If library call, now fix the REG_NOTES that contain |
2098 | insn pointers, namely REG_LIBCALL on FIRST | |
2099 | and REG_RETVAL on I1. */ | |
51723711 | 2100 | if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX))) |
b4ad7b23 RS |
2101 | { |
2102 | XEXP (temp, 0) = first; | |
5fd8383e | 2103 | temp = find_reg_note (first, REG_LIBCALL, NULL_RTX); |
b4ad7b23 RS |
2104 | XEXP (temp, 0) = i1; |
2105 | } | |
2106 | ||
9655bf95 | 2107 | temp = p; |
b4ad7b23 | 2108 | delete_insn (p); |
9655bf95 DM |
2109 | p = NEXT_INSN (p); |
2110 | ||
2111 | /* simplify_giv_expr expects that it can walk the insns | |
2112 | at m->insn forwards and see this old sequence we are | |
2113 | tossing here. delete_insn does preserve the next | |
2114 | pointers, but when we skip over a NOTE we must fix | |
2115 | it up. Otherwise that code walks into the non-deleted | |
2116 | insn stream. */ | |
2117 | while (p && GET_CODE (p) == NOTE) | |
2118 | p = NEXT_INSN (temp) = NEXT_INSN (p); | |
b4ad7b23 RS |
2119 | } |
2120 | ||
2121 | /* The more regs we move, the less we like moving them. */ | |
2122 | threshold -= 3; | |
2123 | } | |
2124 | ||
2125 | /* Any other movable that loads the same register | |
2126 | MUST be moved. */ | |
2127 | already_moved[regno] = 1; | |
2128 | ||
2129 | /* This reg has been moved out of one loop. */ | |
2130 | moved_once[regno] = 1; | |
2131 | ||
2132 | /* The reg set here is now invariant. */ | |
2133 | if (! m->partial) | |
4b259e3f | 2134 | VARRAY_INT (set_in_loop, regno) = 0; |
b4ad7b23 RS |
2135 | |
2136 | m->done = 1; | |
2137 | ||
2138 | /* Change the length-of-life info for the register | |
2139 | to say it lives at least the full length of this loop. | |
2140 | This will help guide optimizations in outer loops. */ | |
2141 | ||
b1f21e0a | 2142 | if (uid_luid[REGNO_FIRST_UID (regno)] > INSN_LUID (loop_start)) |
b4ad7b23 RS |
2143 | /* This is the old insn before all the moved insns. |
2144 | We can't use the moved insn because it is out of range | |
2145 | in uid_luid. Only the old insns have luids. */ | |
b1f21e0a MM |
2146 | REGNO_FIRST_UID (regno) = INSN_UID (loop_start); |
2147 | if (uid_luid[REGNO_LAST_UID (regno)] < INSN_LUID (end)) | |
2148 | REGNO_LAST_UID (regno) = INSN_UID (end); | |
b4ad7b23 RS |
2149 | |
2150 | /* Combine with this moved insn any other matching movables. */ | |
2151 | ||
2152 | if (! m->partial) | |
2153 | for (m1 = movables; m1; m1 = m1->next) | |
2154 | if (m1->match == m) | |
2155 | { | |
2156 | rtx temp; | |
2157 | ||
2158 | /* Schedule the reg loaded by M1 | |
2159 | for replacement so that shares the reg of M. | |
2160 | If the modes differ (only possible in restricted | |
51f0646f JL |
2161 | circumstances, make a SUBREG. |
2162 | ||
2163 | Note this assumes that the target dependent files | |
2164 | treat REG and SUBREG equally, including within | |
2165 | GO_IF_LEGITIMATE_ADDRESS and in all the | |
2166 | predicates since we never verify that replacing the | |
2167 | original register with a SUBREG results in a | |
2168 | recognizable insn. */ | |
b4ad7b23 RS |
2169 | if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)) |
2170 | reg_map[m1->regno] = m->set_dest; | |
2171 | else | |
2172 | reg_map[m1->regno] | |
2173 | = gen_lowpart_common (GET_MODE (m1->set_dest), | |
2174 | m->set_dest); | |
2175 | ||
2176 | /* Get rid of the matching insn | |
2177 | and prevent further processing of it. */ | |
2178 | m1->done = 1; | |
2179 | ||
2180 | /* if library call, delete all insn except last, which | |
2181 | is deleted below */ | |
51723711 KG |
2182 | if ((temp = find_reg_note (m1->insn, REG_RETVAL, |
2183 | NULL_RTX))) | |
b4ad7b23 RS |
2184 | { |
2185 | for (temp = XEXP (temp, 0); temp != m1->insn; | |
2186 | temp = NEXT_INSN (temp)) | |
2187 | delete_insn (temp); | |
2188 | } | |
2189 | delete_insn (m1->insn); | |
2190 | ||
2191 | /* Any other movable that loads the same register | |
2192 | MUST be moved. */ | |
2193 | already_moved[m1->regno] = 1; | |
2194 | ||
2195 | /* The reg merged here is now invariant, | |
2196 | if the reg it matches is invariant. */ | |
2197 | if (! m->partial) | |
4b259e3f | 2198 | VARRAY_INT (set_in_loop, m1->regno) = 0; |
b4ad7b23 RS |
2199 | } |
2200 | } | |
2201 | else if (loop_dump_stream) | |
2202 | fprintf (loop_dump_stream, "not desirable"); | |
2203 | } | |
2204 | else if (loop_dump_stream && !m->match) | |
2205 | fprintf (loop_dump_stream, "not safe"); | |
2206 | ||
2207 | if (loop_dump_stream) | |
2208 | fprintf (loop_dump_stream, "\n"); | |
2209 | } | |
2210 | ||
2211 | if (new_start == 0) | |
2212 | new_start = loop_start; | |
2213 | ||
2214 | /* Go through all the instructions in the loop, making | |
2215 | all the register substitutions scheduled in REG_MAP. */ | |
2216 | for (p = new_start; p != end; p = NEXT_INSN (p)) | |
2217 | if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN | |
2218 | || GET_CODE (p) == CALL_INSN) | |
2219 | { | |
2220 | replace_regs (PATTERN (p), reg_map, nregs, 0); | |
2221 | replace_regs (REG_NOTES (p), reg_map, nregs, 0); | |
da0c128e | 2222 | INSN_CODE (p) = -1; |
b4ad7b23 RS |
2223 | } |
2224 | } | |
2225 | \f | |
2226 | #if 0 | |
2227 | /* Scan X and replace the address of any MEM in it with ADDR. | |
2228 | REG is the address that MEM should have before the replacement. */ | |
2229 | ||
2230 | static void | |
2231 | replace_call_address (x, reg, addr) | |
2232 | rtx x, reg, addr; | |
2233 | { | |
2234 | register enum rtx_code code; | |
2235 | register int i; | |
6f7d635c | 2236 | register const char *fmt; |
b4ad7b23 RS |
2237 | |
2238 | if (x == 0) | |
2239 | return; | |
2240 | code = GET_CODE (x); | |
2241 | switch (code) | |
2242 | { | |
2243 | case PC: | |
2244 | case CC0: | |
2245 | case CONST_INT: | |
2246 | case CONST_DOUBLE: | |
2247 | case CONST: | |
2248 | case SYMBOL_REF: | |
2249 | case LABEL_REF: | |
2250 | case REG: | |
2251 | return; | |
2252 | ||
2253 | case SET: | |
2254 | /* Short cut for very common case. */ | |
2255 | replace_call_address (XEXP (x, 1), reg, addr); | |
2256 | return; | |
2257 | ||
2258 | case CALL: | |
2259 | /* Short cut for very common case. */ | |
2260 | replace_call_address (XEXP (x, 0), reg, addr); | |
2261 | return; | |
2262 | ||
2263 | case MEM: | |
2264 | /* If this MEM uses a reg other than the one we expected, | |
2265 | something is wrong. */ | |
2266 | if (XEXP (x, 0) != reg) | |
2267 | abort (); | |
2268 | XEXP (x, 0) = addr; | |
2269 | return; | |
e9a25f70 JL |
2270 | |
2271 | default: | |
2272 | break; | |
b4ad7b23 RS |
2273 | } |
2274 | ||
2275 | fmt = GET_RTX_FORMAT (code); | |
2276 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) | |
2277 | { | |
2278 | if (fmt[i] == 'e') | |
2279 | replace_call_address (XEXP (x, i), reg, addr); | |
2280 | if (fmt[i] == 'E') | |
2281 | { | |
2282 | register int j; | |
2283 | for (j = 0; j < XVECLEN (x, i); j++) | |
2284 | replace_call_address (XVECEXP (x, i, j), reg, addr); | |
2285 | } | |
2286 | } | |
2287 | } | |
2288 | #endif | |
2289 | \f | |
2290 | /* Return the number of memory refs to addresses that vary | |
2291 | in the rtx X. */ | |
2292 | ||
2293 | static int | |
2294 | count_nonfixed_reads (x) | |
2295 | rtx x; | |
2296 | { | |
2297 | register enum rtx_code code; | |
2298 | register int i; | |
6f7d635c | 2299 | register const char *fmt; |
b4ad7b23 RS |
2300 | int value; |
2301 | ||
2302 | if (x == 0) | |
2303 | return 0; | |
2304 | ||
2305 | code = GET_CODE (x); | |
2306 | switch (code) | |
2307 | { | |
2308 | case PC: | |
2309 | case CC0: | |
2310 | case CONST_INT: | |
2311 | case CONST_DOUBLE: | |
2312 | case CONST: | |
2313 | case SYMBOL_REF: | |
2314 | case LABEL_REF: | |
2315 | case REG: | |
2316 | return 0; | |
2317 | ||
2318 | case MEM: | |
2319 | return ((invariant_p (XEXP (x, 0)) != 1) | |
2320 | + count_nonfixed_reads (XEXP (x, 0))); | |
e9a25f70 JL |
2321 | |
2322 | default: | |
2323 | break; | |
b4ad7b23 RS |
2324 | } |
2325 | ||
2326 | value = 0; | |
2327 | fmt = GET_RTX_FORMAT (code); | |
2328 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) | |
2329 | { | |
2330 | if (fmt[i] == 'e') | |
2331 | value += count_nonfixed_reads (XEXP (x, i)); | |
2332 | if (fmt[i] == 'E') | |
2333 | { | |
2334 | register int j; | |
2335 | for (j = 0; j < XVECLEN (x, i); j++) | |
2336 | value += count_nonfixed_reads (XVECEXP (x, i, j)); | |
2337 | } | |
2338 | } | |
2339 | return value; | |
2340 | } | |
2341 | ||
2342 | \f | |
2343 | #if 0 | |
2344 | /* P is an instruction that sets a register to the result of a ZERO_EXTEND. | |
2345 | Replace it with an instruction to load just the low bytes | |
2346 | if the machine supports such an instruction, | |
2347 | and insert above LOOP_START an instruction to clear the register. */ | |
2348 | ||
2349 | static void | |
2350 | constant_high_bytes (p, loop_start) | |
2351 | rtx p, loop_start; | |
2352 | { | |
2353 | register rtx new; | |
2354 | register int insn_code_number; | |
2355 | ||
2356 | /* Try to change (SET (REG ...) (ZERO_EXTEND (..:B ...))) | |
2357 | to (SET (STRICT_LOW_PART (SUBREG:B (REG...))) ...). */ | |
2358 | ||
c5c76735 JL |
2359 | new |
2360 | = gen_rtx_SET | |
2361 | (VOIDmode, | |
2362 | gen_rtx_STRICT_LOW_PART | |
2363 | (VOIDmode, | |
2364 | gen_rtx_SUBREG (GET_MODE (XEXP (SET_SRC (PATTERN (p)), 0)), | |
2365 | SET_DEST (PATTERN (p)), 0)), | |
2366 | XEXP (SET_SRC (PATTERN (p)), 0)); | |
2367 | ||
b4ad7b23 RS |
2368 | insn_code_number = recog (new, p); |
2369 | ||
2370 | if (insn_code_number) | |
2371 | { | |
2372 | register int i; | |
2373 | ||
2374 | /* Clear destination register before the loop. */ | |
c5c76735 JL |
2375 | emit_insn_before (gen_rtx_SET (VOIDmode, |
2376 | SET_DEST (PATTERN (p)), const0_rtx), | |
b4ad7b23 RS |
2377 | loop_start); |
2378 | ||
2379 | /* Inside the loop, just load the low part. */ | |
2380 | PATTERN (p) = new; | |
2381 | } | |
2382 | } | |
2383 | #endif | |
2384 | \f | |
3c748bb6 MH |
2385 | /* Scan a loop setting the elements `cont', `vtop', `loops_enclosed', |
2386 | `has_call', `has_volatile', and `has_tablejump' within LOOP_INFO. | |
2387 | Set the global variables `unknown_address_altered' and | |
2388 | `num_mem_sets'. Also, fill in the array `loop_mems' and the list | |
2389 | `loop_store_mems'. */ | |
b4ad7b23 RS |
2390 | |
2391 | static void | |
3c748bb6 | 2392 | prescan_loop (start, end, loop_info) |
b4ad7b23 | 2393 | rtx start, end; |
3c748bb6 | 2394 | struct loop_info *loop_info; |
b4ad7b23 RS |
2395 | { |
2396 | register int level = 1; | |
41a972a9 | 2397 | rtx insn; |
41a972a9 MM |
2398 | /* The label after END. Jumping here is just like falling off the |
2399 | end of the loop. We use next_nonnote_insn instead of next_label | |
2400 | as a hedge against the (pathological) case where some actual insn | |
2401 | might end up between the two. */ | |
2402 | rtx exit_target = next_nonnote_insn (end); | |
3c748bb6 MH |
2403 | |
2404 | loop_info->num = uid_loop_num [INSN_UID (start)]; | |
2405 | loop_info->has_indirect_jump = indirect_jump_in_function; | |
2406 | loop_info->has_call = 0; | |
2407 | loop_info->has_volatile = 0; | |
2408 | loop_info->has_tablejump = 0; | |
2409 | loop_info->loops_enclosed = 1; | |
2410 | loop_info->has_multiple_exit_targets = 0; | |
2411 | loop_info->cont = 0; | |
2412 | loop_info->vtop = 0; | |
b4ad7b23 RS |
2413 | |
2414 | unknown_address_altered = 0; | |
5026a502 | 2415 | loop_store_mems = NULL_RTX; |
2d4fde68 | 2416 | first_loop_store_insn = NULL_RTX; |
41a972a9 | 2417 | loop_mems_idx = 0; |
b4ad7b23 | 2418 | num_mem_sets = 0; |
b4ad7b23 RS |
2419 | |
2420 | for (insn = NEXT_INSN (start); insn != NEXT_INSN (end); | |
2421 | insn = NEXT_INSN (insn)) | |
2422 | { | |
2423 | if (GET_CODE (insn) == NOTE) | |
2424 | { | |
2425 | if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG) | |
2426 | { | |
2427 | ++level; | |
2428 | /* Count number of loops contained in this one. */ | |
3c748bb6 | 2429 | loop_info->loops_enclosed++; |
b4ad7b23 RS |
2430 | } |
2431 | else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END) | |
2432 | { | |
2433 | --level; | |
2434 | if (level == 0) | |
2435 | { | |
2436 | end = insn; | |
2437 | break; | |
2438 | } | |
2439 | } | |
2440 | else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_CONT) | |
2441 | { | |
2442 | if (level == 1) | |
3c748bb6 MH |
2443 | loop_info->cont = insn; |
2444 | } | |
2445 | else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_VTOP) | |
2446 | { | |
2447 | /* If there is a NOTE_INSN_LOOP_VTOP, then this is a for | |
2448 | or while style loop, with a loop exit test at the | |
2449 | start. Thus, we can assume that the loop condition | |
2450 | was true when the loop was entered. */ | |
2451 | if (level == 1) | |
2452 | loop_info->vtop = insn; | |
b4ad7b23 RS |
2453 | } |
2454 | } | |
2455 | else if (GET_CODE (insn) == CALL_INSN) | |
2456 | { | |
9ae8ffe7 JL |
2457 | if (! CONST_CALL_P (insn)) |
2458 | unknown_address_altered = 1; | |
3c748bb6 | 2459 | loop_info->has_call = 1; |
b4ad7b23 | 2460 | } |
41a972a9 | 2461 | else if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN) |
b4ad7b23 | 2462 | { |
41a972a9 MM |
2463 | rtx label1 = NULL_RTX; |
2464 | rtx label2 = NULL_RTX; | |
2465 | ||
2466 | if (volatile_refs_p (PATTERN (insn))) | |
3c748bb6 | 2467 | loop_info->has_volatile = 1; |
8c368ee2 DE |
2468 | |
2469 | if (GET_CODE (insn) == JUMP_INSN | |
2470 | && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC | |
2471 | || GET_CODE (PATTERN (insn)) == ADDR_VEC)) | |
3c748bb6 | 2472 | loop_info->has_tablejump = 1; |
41a972a9 MM |
2473 | |
2474 | note_stores (PATTERN (insn), note_addr_stored); | |
2d4fde68 R |
2475 | if (! first_loop_store_insn && loop_store_mems) |
2476 | first_loop_store_insn = insn; | |
41a972a9 | 2477 | |
3c748bb6 | 2478 | if (! loop_info->has_multiple_exit_targets |
41a972a9 MM |
2479 | && GET_CODE (insn) == JUMP_INSN |
2480 | && GET_CODE (PATTERN (insn)) == SET | |
2481 | && SET_DEST (PATTERN (insn)) == pc_rtx) | |
552bc76f | 2482 | { |
41a972a9 MM |
2483 | if (GET_CODE (SET_SRC (PATTERN (insn))) == IF_THEN_ELSE) |
2484 | { | |
2485 | label1 = XEXP (SET_SRC (PATTERN (insn)), 1); | |
2486 | label2 = XEXP (SET_SRC (PATTERN (insn)), 2); | |
2487 | } | |
2488 | else | |
2489 | { | |
2490 | label1 = SET_SRC (PATTERN (insn)); | |
2491 | } | |
2492 | ||
2493 | do { | |
2494 | if (label1 && label1 != pc_rtx) | |
2495 | { | |
2496 | if (GET_CODE (label1) != LABEL_REF) | |
2497 | { | |
2498 | /* Something tricky. */ | |
3c748bb6 | 2499 | loop_info->has_multiple_exit_targets = 1; |
41a972a9 MM |
2500 | break; |
2501 | } | |
2502 | else if (XEXP (label1, 0) != exit_target | |
2503 | && LABEL_OUTSIDE_LOOP_P (label1)) | |
2504 | { | |
2505 | /* A jump outside the current loop. */ | |
3c748bb6 | 2506 | loop_info->has_multiple_exit_targets = 1; |
41a972a9 MM |
2507 | break; |
2508 | } | |
2509 | } | |
552bc76f | 2510 | |
41a972a9 MM |
2511 | label1 = label2; |
2512 | label2 = NULL_RTX; | |
2513 | } while (label1); | |
552bc76f | 2514 | } |
b4ad7b23 | 2515 | } |
41a972a9 | 2516 | else if (GET_CODE (insn) == RETURN) |
3c748bb6 | 2517 | loop_info->has_multiple_exit_targets = 1; |
b4ad7b23 | 2518 | } |
41a972a9 MM |
2519 | |
2520 | /* Now, rescan the loop, setting up the LOOP_MEMS array. */ | |
2521 | if (/* We can't tell what MEMs are aliased by what. */ | |
2522 | !unknown_address_altered | |
2523 | /* An exception thrown by a called function might land us | |
2524 | anywhere. */ | |
3c748bb6 | 2525 | && !loop_info->has_call |
41a972a9 MM |
2526 | /* We don't want loads for MEMs moved to a location before the |
2527 | one at which their stack memory becomes allocated. (Note | |
2528 | that this is not a problem for malloc, etc., since those | |
2529 | require actual function calls. */ | |
2530 | && !current_function_calls_alloca | |
2531 | /* There are ways to leave the loop other than falling off the | |
2532 | end. */ | |
3c748bb6 | 2533 | && !loop_info->has_multiple_exit_targets) |
41a972a9 MM |
2534 | for (insn = NEXT_INSN (start); insn != NEXT_INSN (end); |
2535 | insn = NEXT_INSN (insn)) | |
2536 | for_each_rtx (&insn, insert_loop_mem, 0); | |
b4ad7b23 RS |
2537 | } |
2538 | \f | |
3ec2b590 R |
2539 | /* LOOP_NUMBER_CONT_DOMINATOR is now the last label between the loop start |
2540 | and the continue note that is a the destination of a (cond)jump after | |
2541 | the continue note. If there is any (cond)jump between the loop start | |
2542 | and what we have so far as LOOP_NUMBER_CONT_DOMINATOR that has a | |
2543 | target between LOOP_DOMINATOR and the continue note, move | |
2544 | LOOP_NUMBER_CONT_DOMINATOR forward to that label; if a jump's | |
2545 | destination cannot be determined, clear LOOP_NUMBER_CONT_DOMINATOR. */ | |
2546 | ||
2547 | static void | |
2548 | verify_dominator (loop_number) | |
2549 | int loop_number; | |
2550 | { | |
2551 | rtx insn; | |
2552 | ||
2553 | if (! loop_number_cont_dominator[loop_number]) | |
2554 | /* This can happen for an empty loop, e.g. in | |
2555 | gcc.c-torture/compile/920410-2.c */ | |
2556 | return; | |
2557 | if (loop_number_cont_dominator[loop_number] == const0_rtx) | |
2558 | { | |
2559 | loop_number_cont_dominator[loop_number] = 0; | |
2560 | return; | |
2561 | } | |
2562 | for (insn = loop_number_loop_starts[loop_number]; | |
2563 | insn != loop_number_cont_dominator[loop_number]; | |
2564 | insn = NEXT_INSN (insn)) | |
2565 | { | |
2566 | if (GET_CODE (insn) == JUMP_INSN | |
2567 | && GET_CODE (PATTERN (insn)) != RETURN) | |
2568 | { | |
2569 | rtx label = JUMP_LABEL (insn); | |
8d22ad72 JL |
2570 | int label_luid; |
2571 | ||
2572 | /* If it is not a jump we can easily understand or for | |
2573 | which we do not have jump target information in the JUMP_LABEL | |
2574 | field (consider ADDR_VEC and ADDR_DIFF_VEC insns), then clear | |
2575 | LOOP_NUMBER_CONT_DOMINATOR. */ | |
2576 | if ((! condjump_p (insn) | |
2577 | && ! condjump_in_parallel_p (insn)) | |
2578 | || label == NULL_RTX) | |
3ec2b590 R |
2579 | { |
2580 | loop_number_cont_dominator[loop_number] = NULL_RTX; | |
2581 | return; | |
2582 | } | |
8d22ad72 JL |
2583 | |
2584 | label_luid = INSN_LUID (label); | |
3ec2b590 R |
2585 | if (label_luid < INSN_LUID (loop_number_loop_cont[loop_number]) |
2586 | && (label_luid | |
2587 | > INSN_LUID (loop_number_cont_dominator[loop_number]))) | |
2588 | loop_number_cont_dominator[loop_number] = label; | |
2589 | } | |
2590 | } | |
2591 | } | |
2592 | ||
b4ad7b23 RS |
2593 | /* Scan the function looking for loops. Record the start and end of each loop. |
2594 | Also mark as invalid loops any loops that contain a setjmp or are branched | |
2595 | to from outside the loop. */ | |
2596 | ||
2597 | static void | |
2598 | find_and_verify_loops (f) | |
2599 | rtx f; | |
2600 | { | |
034dabc9 | 2601 | rtx insn, label; |
b4ad7b23 RS |
2602 | int current_loop = -1; |
2603 | int next_loop = -1; | |
2604 | int loop; | |
2605 | ||
3ec2b590 R |
2606 | compute_luids (f, NULL_RTX, 0); |
2607 | ||
b4ad7b23 RS |
2608 | /* If there are jumps to undefined labels, |
2609 | treat them as jumps out of any/all loops. | |
2610 | This also avoids writing past end of tables when there are no loops. */ | |
2611 | uid_loop_num[0] = -1; | |
2612 | ||
2613 | /* Find boundaries of loops, mark which loops are contained within | |
2614 | loops, and invalidate loops that have setjmp. */ | |
2615 | ||
2616 | for (insn = f; insn; insn = NEXT_INSN (insn)) | |
2617 | { | |
2618 | if (GET_CODE (insn) == NOTE) | |
2619 | switch (NOTE_LINE_NUMBER (insn)) | |
2620 | { | |
2621 | case NOTE_INSN_LOOP_BEG: | |
2622 | loop_number_loop_starts[++next_loop] = insn; | |
2623 | loop_number_loop_ends[next_loop] = 0; | |
3ec2b590 R |
2624 | loop_number_loop_cont[next_loop] = 0; |
2625 | loop_number_cont_dominator[next_loop] = 0; | |
b4ad7b23 RS |
2626 | loop_outer_loop[next_loop] = current_loop; |
2627 | loop_invalid[next_loop] = 0; | |
2628 | loop_number_exit_labels[next_loop] = 0; | |
353127c2 | 2629 | loop_number_exit_count[next_loop] = 0; |
b4ad7b23 RS |
2630 | current_loop = next_loop; |
2631 | break; | |
2632 | ||
2633 | case NOTE_INSN_SETJMP: | |
2634 | /* In this case, we must invalidate our current loop and any | |
2635 | enclosing loop. */ | |
2636 | for (loop = current_loop; loop != -1; loop = loop_outer_loop[loop]) | |
2637 | { | |
2638 | loop_invalid[loop] = 1; | |
2639 | if (loop_dump_stream) | |
2640 | fprintf (loop_dump_stream, | |
2641 | "\nLoop at %d ignored due to setjmp.\n", | |
2642 | INSN_UID (loop_number_loop_starts[loop])); | |
2643 | } | |
2644 | break; | |
2645 | ||
3ec2b590 R |
2646 | case NOTE_INSN_LOOP_CONT: |
2647 | loop_number_loop_cont[current_loop] = insn; | |
2648 | break; | |
b4ad7b23 RS |
2649 | case NOTE_INSN_LOOP_END: |
2650 | if (current_loop == -1) | |
2651 | abort (); | |
2652 | ||
2653 | loop_number_loop_ends[current_loop] = insn; | |
3ec2b590 | 2654 | verify_dominator (current_loop); |
b4ad7b23 RS |
2655 | current_loop = loop_outer_loop[current_loop]; |
2656 | break; | |
2657 | ||
e9a25f70 JL |
2658 | default: |
2659 | break; | |
b4ad7b23 | 2660 | } |
3ec2b590 R |
2661 | /* If for any loop, this is a jump insn between the NOTE_INSN_LOOP_CONT |
2662 | and NOTE_INSN_LOOP_END notes, update loop_number_loop_dominator. */ | |
2663 | else if (GET_CODE (insn) == JUMP_INSN | |
2664 | && GET_CODE (PATTERN (insn)) != RETURN | |
2665 | && current_loop >= 0) | |
2666 | { | |
3c748bb6 | 2667 | int this_loop_num; |
3ec2b590 R |
2668 | rtx label = JUMP_LABEL (insn); |
2669 | ||
2670 | if (! condjump_p (insn) && ! condjump_in_parallel_p (insn)) | |
2671 | label = NULL_RTX; | |
2672 | ||
3c748bb6 | 2673 | this_loop_num = current_loop; |
3ec2b590 R |
2674 | do |
2675 | { | |
2676 | /* First see if we care about this loop. */ | |
3c748bb6 MH |
2677 | if (loop_number_loop_cont[this_loop_num] |
2678 | && loop_number_cont_dominator[this_loop_num] != const0_rtx) | |
3ec2b590 R |
2679 | { |
2680 | /* If the jump destination is not known, invalidate | |
2681 | loop_number_const_dominator. */ | |
2682 | if (! label) | |
3c748bb6 | 2683 | loop_number_cont_dominator[this_loop_num] = const0_rtx; |
3ec2b590 R |
2684 | else |
2685 | /* Check if the destination is between loop start and | |
2686 | cont. */ | |
2687 | if ((INSN_LUID (label) | |
3c748bb6 | 2688 | < INSN_LUID (loop_number_loop_cont[this_loop_num])) |
3ec2b590 | 2689 | && (INSN_LUID (label) |
3c748bb6 | 2690 | > INSN_LUID (loop_number_loop_starts[this_loop_num])) |
3ec2b590 R |
2691 | /* And if there is no later destination already |
2692 | recorded. */ | |
3c748bb6 | 2693 | && (! loop_number_cont_dominator[this_loop_num] |
3ec2b590 R |
2694 | || (INSN_LUID (label) |
2695 | > INSN_LUID (loop_number_cont_dominator | |
3c748bb6 MH |
2696 | [this_loop_num])))) |
2697 | loop_number_cont_dominator[this_loop_num] = label; | |
3ec2b590 | 2698 | } |
3c748bb6 | 2699 | this_loop_num = loop_outer_loop[this_loop_num]; |
3ec2b590 | 2700 | } |
3c748bb6 | 2701 | while (this_loop_num >= 0); |
3ec2b590 | 2702 | } |
b4ad7b23 RS |
2703 | |
2704 | /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the | |
2705 | enclosing loop, but this doesn't matter. */ | |
2706 | uid_loop_num[INSN_UID (insn)] = current_loop; | |
2707 | } | |
2708 | ||
034dabc9 JW |
2709 | /* Any loop containing a label used in an initializer must be invalidated, |
2710 | because it can be jumped into from anywhere. */ | |
2711 | ||
2712 | for (label = forced_labels; label; label = XEXP (label, 1)) | |
2713 | { | |
2714 | int loop_num; | |
2715 | ||
2716 | for (loop_num = uid_loop_num[INSN_UID (XEXP (label, 0))]; | |
2717 | loop_num != -1; | |
2718 | loop_num = loop_outer_loop[loop_num]) | |
2719 | loop_invalid[loop_num] = 1; | |
2720 | } | |
2721 | ||
6adb4e3a MS |
2722 | /* Any loop containing a label used for an exception handler must be |
2723 | invalidated, because it can be jumped into from anywhere. */ | |
2724 | ||
2725 | for (label = exception_handler_labels; label; label = XEXP (label, 1)) | |
2726 | { | |
2727 | int loop_num; | |
2728 | ||
2729 | for (loop_num = uid_loop_num[INSN_UID (XEXP (label, 0))]; | |
2730 | loop_num != -1; | |
2731 | loop_num = loop_outer_loop[loop_num]) | |
2732 | loop_invalid[loop_num] = 1; | |
2733 | } | |
2734 | ||
034dabc9 JW |
2735 | /* Now scan all insn's in the function. If any JUMP_INSN branches into a |
2736 | loop that it is not contained within, that loop is marked invalid. | |
2737 | If any INSN or CALL_INSN uses a label's address, then the loop containing | |
2738 | that label is marked invalid, because it could be jumped into from | |
2739 | anywhere. | |
b4ad7b23 RS |
2740 | |
2741 | Also look for blocks of code ending in an unconditional branch that | |
2742 | exits the loop. If such a block is surrounded by a conditional | |
2743 | branch around the block, move the block elsewhere (see below) and | |
2744 | invert the jump to point to the code block. This may eliminate a | |
2745 | label in our loop and will simplify processing by both us and a | |
2746 | possible second cse pass. */ | |
2747 | ||
2748 | for (insn = f; insn; insn = NEXT_INSN (insn)) | |
034dabc9 | 2749 | if (GET_RTX_CLASS (GET_CODE (insn)) == 'i') |
b4ad7b23 RS |
2750 | { |
2751 | int this_loop_num = uid_loop_num[INSN_UID (insn)]; | |
2752 | ||
034dabc9 JW |
2753 | if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN) |
2754 | { | |
2755 | rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX); | |
2756 | if (note) | |
2757 | { | |
2758 | int loop_num; | |
2759 | ||
2760 | for (loop_num = uid_loop_num[INSN_UID (XEXP (note, 0))]; | |
2761 | loop_num != -1; | |
2762 | loop_num = loop_outer_loop[loop_num]) | |
2763 | loop_invalid[loop_num] = 1; | |
2764 | } | |
2765 | } | |
2766 | ||
2767 | if (GET_CODE (insn) != JUMP_INSN) | |
2768 | continue; | |
2769 | ||
b4ad7b23 RS |
2770 | mark_loop_jump (PATTERN (insn), this_loop_num); |
2771 | ||
2772 | /* See if this is an unconditional branch outside the loop. */ | |
2773 | if (this_loop_num != -1 | |
2774 | && (GET_CODE (PATTERN (insn)) == RETURN | |
2775 | || (simplejump_p (insn) | |
2776 | && (uid_loop_num[INSN_UID (JUMP_LABEL (insn))] | |
1c01e9df TW |
2777 | != this_loop_num))) |
2778 | && get_max_uid () < max_uid_for_loop) | |
b4ad7b23 RS |
2779 | { |
2780 | rtx p; | |
2781 | rtx our_next = next_real_insn (insn); | |
fdccb6df RK |
2782 | int dest_loop; |
2783 | int outer_loop = -1; | |
b4ad7b23 RS |
2784 | |
2785 | /* Go backwards until we reach the start of the loop, a label, | |
2786 | or a JUMP_INSN. */ | |
2787 | for (p = PREV_INSN (insn); | |
2788 | GET_CODE (p) != CODE_LABEL | |
2789 | && ! (GET_CODE (p) == NOTE | |
2790 | && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG) | |
2791 | && GET_CODE (p) != JUMP_INSN; | |
2792 | p = PREV_INSN (p)) | |
2793 | ; | |
2794 | ||
edf711a4 RK |
2795 | /* Check for the case where we have a jump to an inner nested |
2796 | loop, and do not perform the optimization in that case. */ | |
2797 | ||
fdccb6df | 2798 | if (JUMP_LABEL (insn)) |
edf711a4 | 2799 | { |
fdccb6df RK |
2800 | dest_loop = uid_loop_num[INSN_UID (JUMP_LABEL (insn))]; |
2801 | if (dest_loop != -1) | |
2802 | { | |
2803 | for (outer_loop = dest_loop; outer_loop != -1; | |
2804 | outer_loop = loop_outer_loop[outer_loop]) | |
2805 | if (outer_loop == this_loop_num) | |
2806 | break; | |
2807 | } | |
edf711a4 | 2808 | } |
edf711a4 | 2809 | |
89724a5a RK |
2810 | /* Make sure that the target of P is within the current loop. */ |
2811 | ||
9a8e74f0 | 2812 | if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) |
89724a5a RK |
2813 | && uid_loop_num[INSN_UID (JUMP_LABEL (p))] != this_loop_num) |
2814 | outer_loop = this_loop_num; | |
2815 | ||
b4ad7b23 RS |
2816 | /* If we stopped on a JUMP_INSN to the next insn after INSN, |
2817 | we have a block of code to try to move. | |
2818 | ||
2819 | We look backward and then forward from the target of INSN | |
2820 | to find a BARRIER at the same loop depth as the target. | |
2821 | If we find such a BARRIER, we make a new label for the start | |
2822 | of the block, invert the jump in P and point it to that label, | |
2823 | and move the block of code to the spot we found. */ | |
2824 | ||
edf711a4 RK |
2825 | if (outer_loop == -1 |
2826 | && GET_CODE (p) == JUMP_INSN | |
c6096c5e RS |
2827 | && JUMP_LABEL (p) != 0 |
2828 | /* Just ignore jumps to labels that were never emitted. | |
2829 | These always indicate compilation errors. */ | |
2830 | && INSN_UID (JUMP_LABEL (p)) != 0 | |
2831 | && condjump_p (p) | |
2832 | && ! simplejump_p (p) | |
2833 | && next_real_insn (JUMP_LABEL (p)) == our_next) | |
b4ad7b23 RS |
2834 | { |
2835 | rtx target | |
2836 | = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn (); | |
2837 | int target_loop_num = uid_loop_num[INSN_UID (target)]; | |
2838 | rtx loc; | |
2839 | ||
2840 | for (loc = target; loc; loc = PREV_INSN (loc)) | |
2841 | if (GET_CODE (loc) == BARRIER | |
2842 | && uid_loop_num[INSN_UID (loc)] == target_loop_num) | |
2843 | break; | |
2844 | ||
2845 | if (loc == 0) | |
2846 | for (loc = target; loc; loc = NEXT_INSN (loc)) | |
2847 | if (GET_CODE (loc) == BARRIER | |
2848 | && uid_loop_num[INSN_UID (loc)] == target_loop_num) | |
2849 | break; | |
2850 | ||
2851 | if (loc) | |
2852 | { | |
2853 | rtx cond_label = JUMP_LABEL (p); | |
2854 | rtx new_label = get_label_after (p); | |
2855 | ||
2856 | /* Ensure our label doesn't go away. */ | |
2857 | LABEL_NUSES (cond_label)++; | |
2858 | ||
2859 | /* Verify that uid_loop_num is large enough and that | |
0f41302f | 2860 | we can invert P. */ |
1c01e9df | 2861 | if (invert_jump (p, new_label)) |
b4ad7b23 RS |
2862 | { |
2863 | rtx q, r; | |
2864 | ||
72ec635f JL |
2865 | /* If no suitable BARRIER was found, create a suitable |
2866 | one before TARGET. Since TARGET is a fall through | |
2867 | path, we'll need to insert an jump around our block | |
2868 | and a add a BARRIER before TARGET. | |
2869 | ||
2870 | This creates an extra unconditional jump outside | |
2871 | the loop. However, the benefits of removing rarely | |
2872 | executed instructions from inside the loop usually | |
2873 | outweighs the cost of the extra unconditional jump | |
2874 | outside the loop. */ | |
2875 | if (loc == 0) | |
2876 | { | |
2877 | rtx temp; | |
2878 | ||
2879 | temp = gen_jump (JUMP_LABEL (insn)); | |
2880 | temp = emit_jump_insn_before (temp, target); | |
2881 | JUMP_LABEL (temp) = JUMP_LABEL (insn); | |
2882 | LABEL_NUSES (JUMP_LABEL (insn))++; | |
2883 | loc = emit_barrier_before (target); | |
2884 | } | |
2885 | ||
b4ad7b23 RS |
2886 | /* Include the BARRIER after INSN and copy the |
2887 | block after LOC. */ | |
915f619f | 2888 | new_label = squeeze_notes (new_label, NEXT_INSN (insn)); |
b4ad7b23 RS |
2889 | reorder_insns (new_label, NEXT_INSN (insn), loc); |
2890 | ||
2891 | /* All those insns are now in TARGET_LOOP_NUM. */ | |
2892 | for (q = new_label; q != NEXT_INSN (NEXT_INSN (insn)); | |
2893 | q = NEXT_INSN (q)) | |
2894 | uid_loop_num[INSN_UID (q)] = target_loop_num; | |
2895 | ||
2896 | /* The label jumped to by INSN is no longer a loop exit. | |
2897 | Unless INSN does not have a label (e.g., it is a | |
2898 | RETURN insn), search loop_number_exit_labels to find | |
2899 | its label_ref, and remove it. Also turn off | |
2900 | LABEL_OUTSIDE_LOOP_P bit. */ | |
2901 | if (JUMP_LABEL (insn)) | |
2902 | { | |
353127c2 RK |
2903 | int loop_num; |
2904 | ||
b4ad7b23 RS |
2905 | for (q = 0, |
2906 | r = loop_number_exit_labels[this_loop_num]; | |
2907 | r; q = r, r = LABEL_NEXTREF (r)) | |
2908 | if (XEXP (r, 0) == JUMP_LABEL (insn)) | |
2909 | { | |
2910 | LABEL_OUTSIDE_LOOP_P (r) = 0; | |
2911 | if (q) | |
2912 | LABEL_NEXTREF (q) = LABEL_NEXTREF (r); | |
2913 | else | |
2914 | loop_number_exit_labels[this_loop_num] | |
2915 | = LABEL_NEXTREF (r); | |
2916 | break; | |
2917 | } | |
2918 | ||
353127c2 RK |
2919 | for (loop_num = this_loop_num; |
2920 | loop_num != -1 && loop_num != target_loop_num; | |
2921 | loop_num = loop_outer_loop[loop_num]) | |
2922 | loop_number_exit_count[loop_num]--; | |
2923 | ||
0f41302f | 2924 | /* If we didn't find it, then something is wrong. */ |
b4ad7b23 RS |
2925 | if (! r) |
2926 | abort (); | |
2927 | } | |
2928 | ||
2929 | /* P is now a jump outside the loop, so it must be put | |
2930 | in loop_number_exit_labels, and marked as such. | |
2931 | The easiest way to do this is to just call | |
2932 | mark_loop_jump again for P. */ | |
2933 | mark_loop_jump (PATTERN (p), this_loop_num); | |
2934 | ||
2935 | /* If INSN now jumps to the insn after it, | |
2936 | delete INSN. */ | |
2937 | if (JUMP_LABEL (insn) != 0 | |
2938 | && (next_real_insn (JUMP_LABEL (insn)) | |
2939 | == next_real_insn (insn))) | |
2940 | delete_insn (insn); | |
2941 | } | |
2942 | ||
2943 | /* Continue the loop after where the conditional | |
2944 | branch used to jump, since the only branch insn | |
2945 | in the block (if it still remains) is an inter-loop | |
2946 | branch and hence needs no processing. */ | |
2947 | insn = NEXT_INSN (cond_label); | |
2948 | ||
2949 | if (--LABEL_NUSES (cond_label) == 0) | |
2950 | delete_insn (cond_label); | |
3ad0cfaf RK |
2951 | |
2952 | /* This loop will be continued with NEXT_INSN (insn). */ | |
2953 | insn = PREV_INSN (insn); | |
b4ad7b23 RS |
2954 | } |
2955 | } | |
2956 | } | |
2957 | } | |
2958 | } | |
2959 | ||
2960 | /* If any label in X jumps to a loop different from LOOP_NUM and any of the | |
2961 | loops it is contained in, mark the target loop invalid. | |
2962 | ||
2963 | For speed, we assume that X is part of a pattern of a JUMP_INSN. */ | |
2964 | ||
2965 | static void | |
2966 | mark_loop_jump (x, loop_num) | |
2967 | rtx x; | |
2968 | int loop_num; | |
2969 | { | |
2970 | int dest_loop; | |
2971 | int outer_loop; | |
2972 | int i; | |
2973 | ||
2974 | switch (GET_CODE (x)) | |
2975 | { | |
2976 | case PC: | |
2977 | case USE: | |
2978 | case CLOBBER: | |
2979 | case REG: | |
2980 | case MEM: | |
2981 | case CONST_INT: | |
2982 | case CONST_DOUBLE: | |
2983 | case RETURN: | |
2984 | return; | |
2985 | ||
2986 | case CONST: | |
2987 | /* There could be a label reference in here. */ | |
2988 | mark_loop_jump (XEXP (x, 0), loop_num); | |
2989 | return; | |
2990 | ||
2991 | case PLUS: | |
2992 | case MINUS: | |
2993 | case MULT: | |
b4ad7b23 RS |
2994 | mark_loop_jump (XEXP (x, 0), loop_num); |
2995 | mark_loop_jump (XEXP (x, 1), loop_num); | |
2996 | return; | |
2997 | ||
c4ae2725 JL |
2998 | case LO_SUM: |
2999 | /* This may refer to a LABEL_REF or SYMBOL_REF. */ | |
3000 | mark_loop_jump (XEXP (x, 1), loop_num); | |
3001 | return; | |
3002 | ||
b4ad7b23 RS |
3003 | case SIGN_EXTEND: |
3004 | case ZERO_EXTEND: | |
3005 | mark_loop_jump (XEXP (x, 0), loop_num); | |
3006 | return; | |
3007 | ||
3008 | case LABEL_REF: | |
3009 | dest_loop = uid_loop_num[INSN_UID (XEXP (x, 0))]; | |
3010 | ||
3011 | /* Link together all labels that branch outside the loop. This | |
3012 | is used by final_[bg]iv_value and the loop unrolling code. Also | |
3013 | mark this LABEL_REF so we know that this branch should predict | |
3014 | false. */ | |
3015 | ||
edf711a4 RK |
3016 | /* A check to make sure the label is not in an inner nested loop, |
3017 | since this does not count as a loop exit. */ | |
3018 | if (dest_loop != -1) | |
3019 | { | |
3020 | for (outer_loop = dest_loop; outer_loop != -1; | |
3021 | outer_loop = loop_outer_loop[outer_loop]) | |
3022 | if (outer_loop == loop_num) | |
3023 | break; | |
3024 | } | |
3025 | else | |
3026 | outer_loop = -1; | |
3027 | ||
3028 | if (loop_num != -1 && outer_loop == -1) | |
b4ad7b23 RS |
3029 | { |
3030 | LABEL_OUTSIDE_LOOP_P (x) = 1; | |
3031 | LABEL_NEXTREF (x) = loop_number_exit_labels[loop_num]; | |
3032 | loop_number_exit_labels[loop_num] = x; | |
353127c2 RK |
3033 | |
3034 | for (outer_loop = loop_num; | |
3035 | outer_loop != -1 && outer_loop != dest_loop; | |
3036 | outer_loop = loop_outer_loop[outer_loop]) | |
3037 | loop_number_exit_count[outer_loop]++; | |
b4ad7b23 RS |
3038 | } |
3039 | ||
3040 | /* If this is inside a loop, but not in the current loop or one enclosed | |
3041 | by it, it invalidates at least one loop. */ | |
3042 | ||
3043 | if (dest_loop == -1) | |
3044 | return; | |
3045 | ||
3046 | /* We must invalidate every nested loop containing the target of this | |
3047 | label, except those that also contain the jump insn. */ | |
3048 | ||
3049 | for (; dest_loop != -1; dest_loop = loop_outer_loop[dest_loop]) | |
3050 | { | |
3051 | /* Stop when we reach a loop that also contains the jump insn. */ | |
3052 | for (outer_loop = loop_num; outer_loop != -1; | |
3053 | outer_loop = loop_outer_loop[outer_loop]) | |
3054 | if (dest_loop == outer_loop) | |
3055 | return; | |
3056 | ||
3057 | /* If we get here, we know we need to invalidate a loop. */ | |
3058 | if (loop_dump_stream && ! loop_invalid[dest_loop]) | |
3059 | fprintf (loop_dump_stream, | |
3060 | "\nLoop at %d ignored due to multiple entry points.\n", | |
3061 | INSN_UID (loop_number_loop_starts[dest_loop])); | |
3062 | ||
3063 | loop_invalid[dest_loop] = 1; | |
3064 | } | |
3065 | return; | |
3066 | ||
3067 | case SET: | |
3068 | /* If this is not setting pc, ignore. */ | |
3069 | if (SET_DEST (x) == pc_rtx) | |
3070 | mark_loop_jump (SET_SRC (x), loop_num); | |
3071 | return; | |
3072 | ||
3073 | case IF_THEN_ELSE: | |
3074 | mark_loop_jump (XEXP (x, 1), loop_num); | |
3075 | mark_loop_jump (XEXP (x, 2), loop_num); | |
3076 | return; | |
3077 | ||
3078 | case PARALLEL: | |
3079 | case ADDR_VEC: | |
3080 | for (i = 0; i < XVECLEN (x, 0); i++) | |
3081 | mark_loop_jump (XVECEXP (x, 0, i), loop_num); | |
3082 | return; | |
3083 | ||
3084 | case ADDR_DIFF_VEC: | |
3085 | for (i = 0; i < XVECLEN (x, 1); i++) | |
3086 | mark_loop_jump (XVECEXP (x, 1, i), loop_num); | |
3087 | return; | |
3088 | ||
3089 | default: | |
c4ae2725 JL |
3090 | /* Strictly speaking this is not a jump into the loop, only a possible |
3091 | jump out of the loop. However, we have no way to link the destination | |
3092 | of this jump onto the list of exit labels. To be safe we mark this | |
3093 | loop and any containing loops as invalid. */ | |
b6ccc3fb | 3094 | if (loop_num != -1) |
353127c2 | 3095 | { |
353127c2 RK |
3096 | for (outer_loop = loop_num; outer_loop != -1; |
3097 | outer_loop = loop_outer_loop[outer_loop]) | |
c4ae2725 JL |
3098 | { |
3099 | if (loop_dump_stream && ! loop_invalid[outer_loop]) | |
3100 | fprintf (loop_dump_stream, | |
3101 | "\nLoop at %d ignored due to unknown exit jump.\n", | |
3102 | INSN_UID (loop_number_loop_starts[outer_loop])); | |
3103 | loop_invalid[outer_loop] = 1; | |
3104 | } | |
353127c2 | 3105 | } |
b6ccc3fb | 3106 | return; |
b4ad7b23 RS |
3107 | } |
3108 | } | |
3109 | \f | |
3110 | /* Return nonzero if there is a label in the range from | |
3111 | insn INSN to and including the insn whose luid is END | |
3112 | INSN must have an assigned luid (i.e., it must not have | |
3113 | been previously created by loop.c). */ | |
3114 | ||
3115 | static int | |
3116 | labels_in_range_p (insn, end) | |
3117 | rtx insn; | |
3118 | int end; | |
3119 | { | |
3120 | while (insn && INSN_LUID (insn) <= end) | |
3121 | { | |
3122 | if (GET_CODE (insn) == CODE_LABEL) | |
3123 | return 1; | |
3124 | insn = NEXT_INSN (insn); | |
3125 | } | |
3126 | ||
3127 | return 0; | |
3128 | } | |
3129 | ||
3130 | /* Record that a memory reference X is being set. */ | |
3131 | ||
3132 | static void | |
693e265f | 3133 | note_addr_stored (x, y) |
b4ad7b23 | 3134 | rtx x; |
693e265f | 3135 | rtx y ATTRIBUTE_UNUSED; |
b4ad7b23 | 3136 | { |
b4ad7b23 RS |
3137 | if (x == 0 || GET_CODE (x) != MEM) |
3138 | return; | |
3139 | ||
3140 | /* Count number of memory writes. | |
3141 | This affects heuristics in strength_reduce. */ | |
3142 | num_mem_sets++; | |
3143 | ||
ca800983 RK |
3144 | /* BLKmode MEM means all memory is clobbered. */ |
3145 | if (GET_MODE (x) == BLKmode) | |
3146 | unknown_address_altered = 1; | |
3147 | ||
b4ad7b23 RS |
3148 | if (unknown_address_altered) |
3149 | return; | |
3150 | ||
5026a502 | 3151 | loop_store_mems = gen_rtx_EXPR_LIST (VOIDmode, x, loop_store_mems); |
b4ad7b23 RS |
3152 | } |
3153 | \f | |
3154 | /* Return nonzero if the rtx X is invariant over the current loop. | |
3155 | ||
3156 | The value is 2 if we refer to something only conditionally invariant. | |
3157 | ||
3158 | If `unknown_address_altered' is nonzero, no memory ref is invariant. | |
3159 | Otherwise, a memory ref is invariant if it does not conflict with | |
3160 | anything stored in `loop_store_mems'. */ | |
3161 | ||
3162 | int | |
3163 | invariant_p (x) | |
3164 | register rtx x; | |
3165 | { | |
3166 | register int i; | |
3167 | register enum rtx_code code; | |
6f7d635c | 3168 | register const char *fmt; |
b4ad7b23 | 3169 | int conditional = 0; |
5026a502 | 3170 | rtx mem_list_entry; |
b4ad7b23 RS |
3171 | |
3172 | if (x == 0) | |
3173 | return 1; | |
3174 | code = GET_CODE (x); | |
3175 | switch (code) | |
3176 | { | |
3177 | case CONST_INT: | |
3178 | case CONST_DOUBLE: | |
3179 | case SYMBOL_REF: | |
3180 | case CONST: | |
3181 | return 1; | |
3182 | ||
3183 | case LABEL_REF: | |
3184 | /* A LABEL_REF is normally invariant, however, if we are unrolling | |
3185 | loops, and this label is inside the loop, then it isn't invariant. | |
3186 | This is because each unrolled copy of the loop body will have | |
3187 | a copy of this label. If this was invariant, then an insn loading | |
3188 | the address of this label into a register might get moved outside | |
3189 | the loop, and then each loop body would end up using the same label. | |
3190 | ||
3191 | We don't know the loop bounds here though, so just fail for all | |
3192 | labels. */ | |
81797aba | 3193 | if (flag_unroll_loops) |
b4ad7b23 RS |
3194 | return 0; |
3195 | else | |
3196 | return 1; | |
3197 | ||
3198 | case PC: | |
3199 | case CC0: | |
3200 | case UNSPEC_VOLATILE: | |
3201 | return 0; | |
3202 | ||
3203 | case REG: | |
3204 | /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid | |
3205 | since the reg might be set by initialization within the loop. */ | |
1f027d54 RK |
3206 | |
3207 | if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx | |
3208 | || x == arg_pointer_rtx) | |
3209 | && ! current_function_has_nonlocal_goto) | |
b4ad7b23 | 3210 | return 1; |
1f027d54 | 3211 | |
3c748bb6 | 3212 | if (this_loop_info.has_call |
b4ad7b23 RS |
3213 | && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)]) |
3214 | return 0; | |
1f027d54 | 3215 | |
4b259e3f | 3216 | if (VARRAY_INT (set_in_loop, REGNO (x)) < 0) |
b4ad7b23 | 3217 | return 2; |
1f027d54 | 3218 | |
4b259e3f | 3219 | return VARRAY_INT (set_in_loop, REGNO (x)) == 0; |
b4ad7b23 RS |
3220 | |
3221 | case MEM: | |
667a4593 JW |
3222 | /* Volatile memory references must be rejected. Do this before |
3223 | checking for read-only items, so that volatile read-only items | |
3224 | will be rejected also. */ | |
3225 | if (MEM_VOLATILE_P (x)) | |
3226 | return 0; | |
3227 | ||
b4ad7b23 RS |
3228 | /* Read-only items (such as constants in a constant pool) are |
3229 | invariant if their address is. */ | |
3230 | if (RTX_UNCHANGING_P (x)) | |
3231 | break; | |
3232 | ||
5026a502 JL |
3233 | /* If we had a subroutine call, any location in memory could have been |
3234 | clobbered. */ | |
667a4593 | 3235 | if (unknown_address_altered) |
b4ad7b23 RS |
3236 | return 0; |
3237 | ||
3238 | /* See if there is any dependence between a store and this load. */ | |
5026a502 JL |
3239 | mem_list_entry = loop_store_mems; |
3240 | while (mem_list_entry) | |
3241 | { | |
3242 | if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode, | |
3243 | x, rtx_varies_p)) | |
3244 | return 0; | |
3245 | mem_list_entry = XEXP (mem_list_entry, 1); | |
3246 | } | |
b4ad7b23 RS |
3247 | |
3248 | /* It's not invalidated by a store in memory | |
3249 | but we must still verify the address is invariant. */ | |
3250 | break; | |
3251 | ||
3252 | case ASM_OPERANDS: | |
3253 | /* Don't mess with insns declared volatile. */ | |
3254 | if (MEM_VOLATILE_P (x)) | |
3255 | return 0; | |
e9a25f70 JL |
3256 | break; |
3257 | ||
3258 | default: | |
3259 | break; | |
b4ad7b23 RS |
3260 | } |
3261 | ||
3262 | fmt = GET_RTX_FORMAT (code); | |
3263 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) | |
3264 | { | |
3265 | if (fmt[i] == 'e') | |
3266 | { | |
3267 | int tem = invariant_p (XEXP (x, i)); | |
3268 | if (tem == 0) | |
3269 | return 0; | |
3270 | if (tem == 2) | |
3271 | conditional = 1; | |
3272 | } | |
3273 | else if (fmt[i] == 'E') | |
3274 | { | |
3275 | register int j; | |
3276 | for (j = 0; j < XVECLEN (x, i); j++) | |
3277 | { | |
3278 | int tem = invariant_p (XVECEXP (x, i, j)); | |
3279 | if (tem == 0) | |
3280 | return 0; | |
3281 | if (tem == 2) | |
3282 | conditional = 1; | |
3283 | } | |
3284 | ||
3285 | } | |
3286 | } | |
3287 | ||
3288 | return 1 + conditional; | |
3289 | } | |
3290 | ||
b4ad7b23 RS |
3291 | \f |
3292 | /* Return nonzero if all the insns in the loop that set REG | |
3293 | are INSN and the immediately following insns, | |
3294 | and if each of those insns sets REG in an invariant way | |
3295 | (not counting uses of REG in them). | |
3296 | ||
3297 | The value is 2 if some of these insns are only conditionally invariant. | |
3298 | ||
3299 | We assume that INSN itself is the first set of REG | |
3300 | and that its source is invariant. */ | |
3301 | ||
3302 | static int | |
3303 | consec_sets_invariant_p (reg, n_sets, insn) | |
3304 | int n_sets; | |
3305 | rtx reg, insn; | |
3306 | { | |
3307 | register rtx p = insn; | |
3308 | register int regno = REGNO (reg); | |
3309 | rtx temp; | |
3310 | /* Number of sets we have to insist on finding after INSN. */ | |
3311 | int count = n_sets - 1; | |
4b259e3f | 3312 | int old = VARRAY_INT (set_in_loop, regno); |
b4ad7b23 RS |
3313 | int value = 0; |
3314 | int this; | |
3315 | ||
3316 | /* If N_SETS hit the limit, we can't rely on its value. */ | |
3317 | if (n_sets == 127) | |
3318 | return 0; | |
3319 | ||
4b259e3f | 3320 | VARRAY_INT (set_in_loop, regno) = 0; |
b4ad7b23 RS |
3321 | |
3322 | while (count > 0) | |
3323 | { | |
3324 | register enum rtx_code code; | |
3325 | rtx set; | |
3326 | ||
3327 | p = NEXT_INSN (p); | |
3328 | code = GET_CODE (p); | |
3329 | ||
38e01259 | 3330 | /* If library call, skip to end of it. */ |
5fd8383e | 3331 | if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX))) |
b4ad7b23 RS |
3332 | p = XEXP (temp, 0); |
3333 | ||
3334 | this = 0; | |
3335 | if (code == INSN | |
3336 | && (set = single_set (p)) | |
3337 | && GET_CODE (SET_DEST (set)) == REG | |
3338 | && REGNO (SET_DEST (set)) == regno) | |
3339 | { | |
3340 | this = invariant_p (SET_SRC (set)); | |
3341 | if (this != 0) | |
3342 | value |= this; | |
51723711 | 3343 | else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))) |
b4ad7b23 | 3344 | { |
83d90aac JW |
3345 | /* If this is a libcall, then any invariant REG_EQUAL note is OK. |
3346 | If this is an ordinary insn, then only CONSTANT_P REG_EQUAL | |
3347 | notes are OK. */ | |
3348 | this = (CONSTANT_P (XEXP (temp, 0)) | |
3349 | || (find_reg_note (p, REG_RETVAL, NULL_RTX) | |
3350 | && invariant_p (XEXP (temp, 0)))); | |
b4ad7b23 RS |
3351 | if (this != 0) |
3352 | value |= this; | |
3353 | } | |
3354 | } | |
3355 | if (this != 0) | |
3356 | count--; | |
3357 | else if (code != NOTE) | |
3358 | { | |
4b259e3f | 3359 | VARRAY_INT (set_in_loop, regno) = old; |
b4ad7b23 RS |
3360 | return 0; |
3361 | } | |
3362 | } | |
3363 | ||
4b259e3f | 3364 | VARRAY_INT (set_in_loop, regno) = old; |
b4ad7b23 RS |
3365 | /* If invariant_p ever returned 2, we return 2. */ |
3366 | return 1 + (value & 2); | |
3367 | } | |
3368 | ||
3369 | #if 0 | |
3370 | /* I don't think this condition is sufficient to allow INSN | |
3371 | to be moved, so we no longer test it. */ | |
3372 | ||
3373 | /* Return 1 if all insns in the basic block of INSN and following INSN | |
3374 | that set REG are invariant according to TABLE. */ | |
3375 | ||
3376 | static int | |
3377 | all_sets_invariant_p (reg, insn, table) | |
3378 | rtx reg, insn; | |
3379 | short *table; | |
3380 | { | |
3381 | register rtx p = insn; | |
3382 | register int regno = REGNO (reg); | |
3383 | ||
3384 | while (1) | |
3385 | { | |
3386 | register enum rtx_code code; | |
3387 | p = NEXT_INSN (p); | |
3388 | code = GET_CODE (p); | |
3389 | if (code == CODE_LABEL || code == JUMP_INSN) | |
3390 | return 1; | |
3391 | if (code == INSN && GET_CODE (PATTERN (p)) == SET | |
3392 | && GET_CODE (SET_DEST (PATTERN (p))) == REG | |
3393 | && REGNO (SET_DEST (PATTERN (p))) == regno) | |
3394 | { | |
3395 | if (!invariant_p (SET_SRC (PATTERN (p)), table)) | |
3396 | return 0; | |
3397 | } | |
3398 | } | |
3399 | } | |
3400 | #endif /* 0 */ | |
3401 | \f | |
3402 | /* Look at all uses (not sets) of registers in X. For each, if it is | |
3403 | the single use, set USAGE[REGNO] to INSN; if there was a previous use in | |
3404 | a different insn, set USAGE[REGNO] to const0_rtx. */ | |
3405 | ||
3406 | static void | |
3407 | find_single_use_in_loop (insn, x, usage) | |
3408 | rtx insn; | |
3409 | rtx x; | |
8deb8e2c | 3410 | varray_type usage; |
b4ad7b23 RS |
3411 | { |
3412 | enum rtx_code code = GET_CODE (x); | |
6f7d635c | 3413 | const char *fmt = GET_RTX_FORMAT (code); |
b4ad7b23 RS |
3414 | int i, j; |
3415 | ||
3416 | if (code == REG) | |
8deb8e2c MM |
3417 | VARRAY_RTX (usage, REGNO (x)) |
3418 | = (VARRAY_RTX (usage, REGNO (x)) != 0 | |
3419 | && VARRAY_RTX (usage, REGNO (x)) != insn) | |
b4ad7b23 RS |
3420 | ? const0_rtx : insn; |
3421 | ||
3422 | else if (code == SET) | |
3423 | { | |
3424 | /* Don't count SET_DEST if it is a REG; otherwise count things | |
3425 | in SET_DEST because if a register is partially modified, it won't | |
3426 | show up as a potential movable so we don't care how USAGE is set | |
3427 | for it. */ | |
3428 | if (GET_CODE (SET_DEST (x)) != REG) | |
3429 | find_single_use_in_loop (insn, SET_DEST (x), usage); | |
3430 | find_single_use_in_loop (insn, SET_SRC (x), usage); | |
3431 | } | |
3432 | else | |
3433 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) | |
3434 | { | |
3435 | if (fmt[i] == 'e' && XEXP (x, i) != 0) | |
3436 | find_single_use_in_loop (insn, XEXP (x, i), usage); | |
3437 | else if (fmt[i] == 'E') | |
3438 | for (j = XVECLEN (x, i) - 1; j >= 0; j--) | |
3439 | find_single_use_in_loop (insn, XVECEXP (x, i, j), usage); | |
3440 | } | |
3441 | } | |
3442 | \f | |
a4c3ddd8 BS |
3443 | /* Count and record any set in X which is contained in INSN. Update |
3444 | MAY_NOT_MOVE and LAST_SET for any register set in X. */ | |
3445 | ||
3446 | static void | |
3447 | count_one_set (insn, x, may_not_move, last_set) | |
3448 | rtx insn, x; | |
3449 | varray_type may_not_move; | |
3450 | rtx *last_set; | |
3451 | { | |
3452 | if (GET_CODE (x) == CLOBBER && GET_CODE (XEXP (x, 0)) == REG) | |
3453 | /* Don't move a reg that has an explicit clobber. | |
3454 | It's not worth the pain to try to do it correctly. */ | |
3455 | VARRAY_CHAR (may_not_move, REGNO (XEXP (x, 0))) = 1; | |
3456 | ||
3457 | if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER) | |
3458 | { | |
3459 | rtx dest = SET_DEST (x); | |
3460 | while (GET_CODE (dest) == SUBREG | |
3461 | || GET_CODE (dest) == ZERO_EXTRACT | |
3462 | || GET_CODE (dest) == SIGN_EXTRACT | |
3463 | || GET_CODE (dest) == STRICT_LOW_PART) | |
3464 | dest = XEXP (dest, 0); | |
3465 | if (GET_CODE (dest) == REG) | |
3466 | { | |
3467 | register int regno = REGNO (dest); | |
3468 | /* If this is the first setting of this reg | |
3469 | in current basic block, and it was set before, | |
3470 | it must be set in two basic blocks, so it cannot | |
3471 | be moved out of the loop. */ | |
4b259e3f | 3472 | if (VARRAY_INT (set_in_loop, regno) > 0 |
a4c3ddd8 BS |
3473 | && last_set[regno] == 0) |
3474 | VARRAY_CHAR (may_not_move, regno) = 1; | |
3475 | /* If this is not first setting in current basic block, | |
3476 | see if reg was used in between previous one and this. | |
3477 | If so, neither one can be moved. */ | |
3478 | if (last_set[regno] != 0 | |
3479 | && reg_used_between_p (dest, last_set[regno], insn)) | |
3480 | VARRAY_CHAR (may_not_move, regno) = 1; | |
4b259e3f R |
3481 | if (VARRAY_INT (set_in_loop, regno) < 127) |
3482 | ++VARRAY_INT (set_in_loop, regno); | |
a4c3ddd8 BS |
3483 | last_set[regno] = insn; |
3484 | } | |
3485 | } | |
3486 | } | |
3487 | ||
4b259e3f | 3488 | /* Increment SET_IN_LOOP at the index of each register |
b4ad7b23 | 3489 | that is modified by an insn between FROM and TO. |
4b259e3f | 3490 | If the value of an element of SET_IN_LOOP becomes 127 or more, |
b4ad7b23 RS |
3491 | stop incrementing it, to avoid overflow. |
3492 | ||
3493 | Store in SINGLE_USAGE[I] the single insn in which register I is | |
3494 | used, if it is only used once. Otherwise, it is set to 0 (for no | |
3495 | uses) or const0_rtx for more than one use. This parameter may be zero, | |
3496 | in which case this processing is not done. | |
3497 | ||
3498 | Store in *COUNT_PTR the number of actual instruction | |
3499 | in the loop. We use this to decide what is worth moving out. */ | |
3500 | ||
3501 | /* last_set[n] is nonzero iff reg n has been set in the current basic block. | |
3502 | In that case, it is the insn that last set reg n. */ | |
3503 | ||
3504 | static void | |
3505 | count_loop_regs_set (from, to, may_not_move, single_usage, count_ptr, nregs) | |
3506 | register rtx from, to; | |
8deb8e2c MM |
3507 | varray_type may_not_move; |
3508 | varray_type single_usage; | |
b4ad7b23 RS |
3509 | int *count_ptr; |
3510 | int nregs; | |
3511 | { | |
3512 | register rtx *last_set = (rtx *) alloca (nregs * sizeof (rtx)); | |
3513 | register rtx insn; | |
3514 | register int count = 0; | |
b4ad7b23 | 3515 | |
4c9a05bc | 3516 | bzero ((char *) last_set, nregs * sizeof (rtx)); |
b4ad7b23 RS |
3517 | for (insn = from; insn != to; insn = NEXT_INSN (insn)) |
3518 | { | |
3519 | if (GET_RTX_CLASS (GET_CODE (insn)) == 'i') | |
3520 | { | |
3521 | ++count; | |
3522 | ||
d6b44532 RH |
3523 | /* Record registers that have exactly one use. */ |
3524 | find_single_use_in_loop (insn, PATTERN (insn), single_usage); | |
b4ad7b23 | 3525 | |
d6b44532 RH |
3526 | /* Include uses in REG_EQUAL notes. */ |
3527 | if (REG_NOTES (insn)) | |
3528 | find_single_use_in_loop (insn, REG_NOTES (insn), single_usage); | |
b4ad7b23 | 3529 | |
b4ad7b23 RS |
3530 | if (GET_CODE (PATTERN (insn)) == SET |
3531 | || GET_CODE (PATTERN (insn)) == CLOBBER) | |
a4c3ddd8 | 3532 | count_one_set (insn, PATTERN (insn), may_not_move, last_set); |
b4ad7b23 RS |
3533 | else if (GET_CODE (PATTERN (insn)) == PARALLEL) |
3534 | { | |
3535 | register int i; | |
3536 | for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--) | |
a4c3ddd8 BS |
3537 | count_one_set (insn, XVECEXP (PATTERN (insn), 0, i), |
3538 | may_not_move, last_set); | |
b4ad7b23 RS |
3539 | } |
3540 | } | |
4c9a05bc | 3541 | |
b4ad7b23 | 3542 | if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN) |
4c9a05bc | 3543 | bzero ((char *) last_set, nregs * sizeof (rtx)); |
b4ad7b23 RS |
3544 | } |
3545 | *count_ptr = count; | |
3546 | } | |
3547 | \f | |
3548 | /* Given a loop that is bounded by LOOP_START and LOOP_END | |
3549 | and that is entered at SCAN_START, | |
3550 | return 1 if the register set in SET contained in insn INSN is used by | |
3551 | any insn that precedes INSN in cyclic order starting | |
3552 | from the loop entry point. | |
3553 | ||
3554 | We don't want to use INSN_LUID here because if we restrict INSN to those | |
3555 | that have a valid INSN_LUID, it means we cannot move an invariant out | |
3556 | from an inner loop past two loops. */ | |
3557 | ||
3558 | static int | |
3559 | loop_reg_used_before_p (set, insn, loop_start, scan_start, loop_end) | |
3560 | rtx set, insn, loop_start, scan_start, loop_end; | |
3561 | { | |
3562 | rtx reg = SET_DEST (set); | |
3563 | rtx p; | |
3564 | ||
3565 | /* Scan forward checking for register usage. If we hit INSN, we | |
3566 | are done. Otherwise, if we hit LOOP_END, wrap around to LOOP_START. */ | |
3567 | for (p = scan_start; p != insn; p = NEXT_INSN (p)) | |
3568 | { | |
3569 | if (GET_RTX_CLASS (GET_CODE (p)) == 'i' | |
3570 | && reg_overlap_mentioned_p (reg, PATTERN (p))) | |
3571 | return 1; | |
3572 | ||
3573 | if (p == loop_end) | |
3574 | p = loop_start; | |
3575 | } | |
3576 | ||
3577 | return 0; | |
3578 | } | |
3579 | \f | |
3580 | /* A "basic induction variable" or biv is a pseudo reg that is set | |
3581 | (within this loop) only by incrementing or decrementing it. */ | |
3582 | /* A "general induction variable" or giv is a pseudo reg whose | |
3583 | value is a linear function of a biv. */ | |
3584 | ||
3585 | /* Bivs are recognized by `basic_induction_var'; | |
45f97e2e | 3586 | Givs by `general_induction_var'. */ |
b4ad7b23 RS |
3587 | |
3588 | /* Indexed by register number, indicates whether or not register is an | |
3589 | induction variable, and if so what type. */ | |
3590 | ||
3ec2b590 | 3591 | varray_type reg_iv_type; |
b4ad7b23 RS |
3592 | |
3593 | /* Indexed by register number, contains pointer to `struct induction' | |
3594 | if register is an induction variable. This holds general info for | |
3595 | all induction variables. */ | |
3596 | ||
3ec2b590 | 3597 | varray_type reg_iv_info; |
b4ad7b23 RS |
3598 | |
3599 | /* Indexed by register number, contains pointer to `struct iv_class' | |
3600 | if register is a basic induction variable. This holds info describing | |
3601 | the class (a related group) of induction variables that the biv belongs | |
3602 | to. */ | |
3603 | ||
3604 | struct iv_class **reg_biv_class; | |
3605 | ||
3606 | /* The head of a list which links together (via the next field) | |
3607 | every iv class for the current loop. */ | |
3608 | ||
3609 | struct iv_class *loop_iv_list; | |
3610 | ||
3ec2b590 R |
3611 | /* Givs made from biv increments are always splittable for loop unrolling. |
3612 | Since there is no regscan info for them, we have to keep track of them | |
3613 | separately. */ | |
3614 | int first_increment_giv, last_increment_giv; | |
3615 | ||
b4ad7b23 RS |
3616 | /* Communication with routines called via `note_stores'. */ |
3617 | ||
3618 | static rtx note_insn; | |
3619 | ||
3620 | /* Dummy register to have non-zero DEST_REG for DEST_ADDR type givs. */ | |
3621 | ||
3622 | static rtx addr_placeholder; | |
3623 | ||
3624 | /* ??? Unfinished optimizations, and possible future optimizations, | |
3625 | for the strength reduction code. */ | |
3626 | ||
b4ad7b23 | 3627 | /* ??? The interaction of biv elimination, and recognition of 'constant' |
0f41302f | 3628 | bivs, may cause problems. */ |
b4ad7b23 RS |
3629 | |
3630 | /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause | |
3631 | performance problems. | |
3632 | ||
3633 | Perhaps don't eliminate things that can be combined with an addressing | |
3634 | mode. Find all givs that have the same biv, mult_val, and add_val; | |
3635 | then for each giv, check to see if its only use dies in a following | |
3636 | memory address. If so, generate a new memory address and check to see | |
3637 | if it is valid. If it is valid, then store the modified memory address, | |
3638 | otherwise, mark the giv as not done so that it will get its own iv. */ | |
3639 | ||
3640 | /* ??? Could try to optimize branches when it is known that a biv is always | |
3641 | positive. */ | |
3642 | ||
3643 | /* ??? When replace a biv in a compare insn, we should replace with closest | |
3644 | giv so that an optimized branch can still be recognized by the combiner, | |
3645 | e.g. the VAX acb insn. */ | |
3646 | ||
3647 | /* ??? Many of the checks involving uid_luid could be simplified if regscan | |
3648 | was rerun in loop_optimize whenever a register was added or moved. | |
3649 | Also, some of the optimizations could be a little less conservative. */ | |
3650 | \f | |
41a972a9 | 3651 | /* Perform strength reduction and induction variable elimination. |
b4ad7b23 | 3652 | |
41a972a9 | 3653 | Pseudo registers created during this function will be beyond the last |
b4ad7b23 RS |
3654 | valid index in several tables including n_times_set and regno_last_uid. |
3655 | This does not cause a problem here, because the added registers cannot be | |
3656 | givs outside of their loop, and hence will never be reconsidered. | |
41a972a9 MM |
3657 | But scan_loop must check regnos to make sure they are in bounds. |
3658 | ||
3659 | SCAN_START is the first instruction in the loop, as the loop would | |
3660 | actually be executed. END is the NOTE_INSN_LOOP_END. LOOP_TOP is | |
3661 | the first instruction in the loop, as it is layed out in the | |
6dd49eb4 R |
3662 | instruction stream. LOOP_START is the NOTE_INSN_LOOP_BEG. |
3663 | LOOP_CONT is the NOTE_INSN_LOOP_CONT. */ | |
b4ad7b23 RS |
3664 | |
3665 | static void | |
3666 | strength_reduce (scan_start, end, loop_top, insn_count, | |
3c748bb6 | 3667 | loop_start, loop_end, loop_info, loop_cont, unroll_p, bct_p) |
b4ad7b23 RS |
3668 | rtx scan_start; |
3669 | rtx end; | |
3670 | rtx loop_top; | |
3671 | int insn_count; | |
3672 | rtx loop_start; | |
3673 | rtx loop_end; | |
3c748bb6 | 3674 | struct loop_info *loop_info; |
6dd49eb4 | 3675 | rtx loop_cont; |
d46965b9 | 3676 | int unroll_p, bct_p ATTRIBUTE_UNUSED; |
b4ad7b23 RS |
3677 | { |
3678 | rtx p; | |
3679 | rtx set; | |
3680 | rtx inc_val; | |
3681 | rtx mult_val; | |
3682 | rtx dest_reg; | |
3ec2b590 | 3683 | rtx *location; |
b4ad7b23 RS |
3684 | /* This is 1 if current insn is not executed at least once for every loop |
3685 | iteration. */ | |
3686 | int not_every_iteration = 0; | |
7dcd3836 RK |
3687 | /* This is 1 if current insn may be executed more than once for every |
3688 | loop iteration. */ | |
3689 | int maybe_multiple = 0; | |
ae188a87 JL |
3690 | /* This is 1 if we have past a branch back to the top of the loop |
3691 | (aka a loop latch). */ | |
3692 | int past_loop_latch = 0; | |
b4ad7b23 RS |
3693 | /* Temporary list pointers for traversing loop_iv_list. */ |
3694 | struct iv_class *bl, **backbl; | |
3695 | /* Ratio of extra register life span we can justify | |
3696 | for saving an instruction. More if loop doesn't call subroutines | |
3697 | since in that case saving an insn makes more difference | |
3698 | and more registers are available. */ | |
3699 | /* ??? could set this to last value of threshold in move_movables */ | |
3c748bb6 | 3700 | int threshold = (loop_info->has_call ? 1 : 2) * (3 + n_non_fixed_regs); |
b4ad7b23 RS |
3701 | /* Map of pseudo-register replacements. */ |
3702 | rtx *reg_map; | |
97ec0ad8 | 3703 | int reg_map_size; |
b4ad7b23 RS |
3704 | int call_seen; |
3705 | rtx test; | |
3706 | rtx end_insert_before; | |
5ea7a4ae | 3707 | int loop_depth = 0; |
3ec2b590 | 3708 | int n_extra_increment; |
73049ebc | 3709 | int unrolled_insn_copies; |
b4ad7b23 | 3710 | |
5353610b R |
3711 | /* If scan_start points to the loop exit test, we have to be wary of |
3712 | subversive use of gotos inside expression statements. */ | |
3713 | if (prev_nonnote_insn (scan_start) != prev_nonnote_insn (loop_start)) | |
3714 | maybe_multiple = back_branch_in_range_p (scan_start, loop_start, loop_end); | |
3715 | ||
3ec2b590 R |
3716 | VARRAY_INT_INIT (reg_iv_type, max_reg_before_loop, "reg_iv_type"); |
3717 | VARRAY_GENERIC_PTR_INIT (reg_iv_info, max_reg_before_loop, "reg_iv_info"); | |
b4ad7b23 RS |
3718 | reg_biv_class = (struct iv_class **) |
3719 | alloca (max_reg_before_loop * sizeof (struct iv_class *)); | |
3720 | bzero ((char *) reg_biv_class, (max_reg_before_loop | |
3721 | * sizeof (struct iv_class *))); | |
3722 | ||
3723 | loop_iv_list = 0; | |
3724 | addr_placeholder = gen_reg_rtx (Pmode); | |
3725 | ||
3726 | /* Save insn immediately after the loop_end. Insns inserted after loop_end | |
3727 | must be put before this insn, so that they will appear in the right | |
b2586fe0 | 3728 | order (i.e. loop order). |
b4ad7b23 | 3729 | |
b2586fe0 JL |
3730 | If loop_end is the end of the current function, then emit a |
3731 | NOTE_INSN_DELETED after loop_end and set end_insert_before to the | |
3732 | dummy note insn. */ | |
3733 | if (NEXT_INSN (loop_end) != 0) | |
3734 | end_insert_before = NEXT_INSN (loop_end); | |
3735 | else | |
3736 | end_insert_before = emit_note_after (NOTE_INSN_DELETED, loop_end); | |
b4ad7b23 RS |
3737 | |
3738 | /* Scan through loop to find all possible bivs. */ | |
3739 | ||
41a972a9 MM |
3740 | for (p = next_insn_in_loop (scan_start, scan_start, end, loop_top); |
3741 | p != NULL_RTX; | |
3742 | p = next_insn_in_loop (p, scan_start, end, loop_top)) | |
b4ad7b23 | 3743 | { |
b4ad7b23 RS |
3744 | if (GET_CODE (p) == INSN |
3745 | && (set = single_set (p)) | |
3746 | && GET_CODE (SET_DEST (set)) == REG) | |
3747 | { | |
3748 | dest_reg = SET_DEST (set); | |
3749 | if (REGNO (dest_reg) < max_reg_before_loop | |
3750 | && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER | |
3ec2b590 | 3751 | && REG_IV_TYPE (REGNO (dest_reg)) != NOT_BASIC_INDUCT) |
b4ad7b23 | 3752 | { |
7056f7e8 | 3753 | if (basic_induction_var (SET_SRC (set), GET_MODE (SET_SRC (set)), |
3ec2b590 R |
3754 | dest_reg, p, &inc_val, &mult_val, |
3755 | &location)) | |
b4ad7b23 RS |
3756 | { |
3757 | /* It is a possible basic induction variable. | |
3758 | Create and initialize an induction structure for it. */ | |
3759 | ||
3760 | struct induction *v | |
3761 | = (struct induction *) alloca (sizeof (struct induction)); | |
3762 | ||
3ec2b590 | 3763 | record_biv (v, p, dest_reg, inc_val, mult_val, location, |
7dcd3836 | 3764 | not_every_iteration, maybe_multiple); |
3ec2b590 | 3765 | REG_IV_TYPE (REGNO (dest_reg)) = BASIC_INDUCT; |
b4ad7b23 RS |
3766 | } |
3767 | else if (REGNO (dest_reg) < max_reg_before_loop) | |
3ec2b590 | 3768 | REG_IV_TYPE (REGNO (dest_reg)) = NOT_BASIC_INDUCT; |
b4ad7b23 RS |
3769 | } |
3770 | } | |
3771 | ||
7dcd3836 RK |
3772 | /* Past CODE_LABEL, we get to insns that may be executed multiple |
3773 | times. The only way we can be sure that they can't is if every | |
38e01259 | 3774 | jump insn between here and the end of the loop either |
5353610b R |
3775 | returns, exits the loop, is a jump to a location that is still |
3776 | behind the label, or is a jump to the loop start. */ | |
7dcd3836 RK |
3777 | |
3778 | if (GET_CODE (p) == CODE_LABEL) | |
3779 | { | |
3780 | rtx insn = p; | |
3781 | ||
3782 | maybe_multiple = 0; | |
3783 | ||
3784 | while (1) | |
3785 | { | |
3786 | insn = NEXT_INSN (insn); | |
3787 | if (insn == scan_start) | |
3788 | break; | |
3789 | if (insn == end) | |
3790 | { | |
3791 | if (loop_top != 0) | |
f67ff5de | 3792 | insn = loop_top; |
7dcd3836 RK |
3793 | else |
3794 | break; | |
3795 | if (insn == scan_start) | |
3796 | break; | |
3797 | } | |
3798 | ||
3799 | if (GET_CODE (insn) == JUMP_INSN | |
3800 | && GET_CODE (PATTERN (insn)) != RETURN | |
3801 | && (! condjump_p (insn) | |
3802 | || (JUMP_LABEL (insn) != 0 | |
8516af93 | 3803 | && JUMP_LABEL (insn) != scan_start |
1cb1fe66 | 3804 | && ! loop_insn_first_p (p, JUMP_LABEL (insn))))) |
8516af93 JW |
3805 | { |
3806 | maybe_multiple = 1; | |
3807 | break; | |
3808 | } | |
7dcd3836 RK |
3809 | } |
3810 | } | |
3811 | ||
8516af93 JW |
3812 | /* Past a jump, we get to insns for which we can't count |
3813 | on whether they will be executed during each iteration. */ | |
3814 | /* This code appears twice in strength_reduce. There is also similar | |
3815 | code in scan_loop. */ | |
3816 | if (GET_CODE (p) == JUMP_INSN | |
b4ad7b23 RS |
3817 | /* If we enter the loop in the middle, and scan around to the |
3818 | beginning, don't set not_every_iteration for that. | |
3819 | This can be any kind of jump, since we want to know if insns | |
3820 | will be executed if the loop is executed. */ | |
8516af93 | 3821 | && ! (JUMP_LABEL (p) == loop_top |
b4ad7b23 RS |
3822 | && ((NEXT_INSN (NEXT_INSN (p)) == loop_end && simplejump_p (p)) |
3823 | || (NEXT_INSN (p) == loop_end && condjump_p (p))))) | |
8516af93 JW |
3824 | { |
3825 | rtx label = 0; | |
3826 | ||
3827 | /* If this is a jump outside the loop, then it also doesn't | |
3828 | matter. Check to see if the target of this branch is on the | |
3829 | loop_number_exits_labels list. */ | |
3830 | ||
3831 | for (label = loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]]; | |
3832 | label; | |
3833 | label = LABEL_NEXTREF (label)) | |
3834 | if (XEXP (label, 0) == JUMP_LABEL (p)) | |
3835 | break; | |
3836 | ||
3837 | if (! label) | |
3838 | not_every_iteration = 1; | |
3839 | } | |
b4ad7b23 | 3840 | |
5ea7a4ae JW |
3841 | else if (GET_CODE (p) == NOTE) |
3842 | { | |
3843 | /* At the virtual top of a converted loop, insns are again known to | |
3844 | be executed each iteration: logically, the loop begins here | |
5f3db57e JL |
3845 | even though the exit code has been duplicated. |
3846 | ||
3847 | Insns are also again known to be executed each iteration at | |
3848 | the LOOP_CONT note. */ | |
3849 | if ((NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP | |
3850 | || NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_CONT) | |
3851 | && loop_depth == 0) | |
5ea7a4ae JW |
3852 | not_every_iteration = 0; |
3853 | else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG) | |
3854 | loop_depth++; | |
3855 | else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END) | |
3856 | loop_depth--; | |
3857 | } | |
b4ad7b23 | 3858 | |
ae188a87 JL |
3859 | /* Note if we pass a loop latch. If we do, then we can not clear |
3860 | NOT_EVERY_ITERATION below when we pass the last CODE_LABEL in | |
3861 | a loop since a jump before the last CODE_LABEL may have started | |
3862 | a new loop iteration. | |
3863 | ||
3864 | Note that LOOP_TOP is only set for rotated loops and we need | |
3865 | this check for all loops, so compare against the CODE_LABEL | |
3866 | which immediately follows LOOP_START. */ | |
3867 | if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == NEXT_INSN (loop_start)) | |
3868 | past_loop_latch = 1; | |
3869 | ||
b4ad7b23 RS |
3870 | /* Unlike in the code motion pass where MAYBE_NEVER indicates that |
3871 | an insn may never be executed, NOT_EVERY_ITERATION indicates whether | |
3872 | or not an insn is known to be executed each iteration of the | |
3873 | loop, whether or not any iterations are known to occur. | |
3874 | ||
3875 | Therefore, if we have just passed a label and have no more labels | |
ae188a87 JL |
3876 | between here and the test insn of the loop, and we have not passed |
3877 | a jump to the top of the loop, then we know these insns will be | |
3878 | executed each iteration. */ | |
b4ad7b23 | 3879 | |
ae188a87 JL |
3880 | if (not_every_iteration |
3881 | && ! past_loop_latch | |
3882 | && GET_CODE (p) == CODE_LABEL | |
6dd49eb4 | 3883 | && no_labels_between_p (p, loop_end) |
1cb1fe66 | 3884 | && loop_insn_first_p (p, loop_cont)) |
b4ad7b23 RS |
3885 | not_every_iteration = 0; |
3886 | } | |
3887 | ||
3888 | /* Scan loop_iv_list to remove all regs that proved not to be bivs. | |
3889 | Make a sanity check against n_times_set. */ | |
3890 | for (backbl = &loop_iv_list, bl = *backbl; bl; bl = bl->next) | |
3891 | { | |
3ec2b590 | 3892 | if (REG_IV_TYPE (bl->regno) != BASIC_INDUCT |
b4ad7b23 RS |
3893 | /* Above happens if register modified by subreg, etc. */ |
3894 | /* Make sure it is not recognized as a basic induction var: */ | |
8deb8e2c | 3895 | || VARRAY_INT (n_times_set, bl->regno) != bl->biv_count |
b4ad7b23 RS |
3896 | /* If never incremented, it is invariant that we decided not to |
3897 | move. So leave it alone. */ | |
3898 | || ! bl->incremented) | |
3899 | { | |
3900 | if (loop_dump_stream) | |
3901 | fprintf (loop_dump_stream, "Reg %d: biv discarded, %s\n", | |
3902 | bl->regno, | |
3ec2b590 | 3903 | (REG_IV_TYPE (bl->regno) != BASIC_INDUCT |
b4ad7b23 RS |
3904 | ? "not induction variable" |
3905 | : (! bl->incremented ? "never incremented" | |
3906 | : "count error"))); | |
3907 | ||
3ec2b590 | 3908 | REG_IV_TYPE (bl->regno) = NOT_BASIC_INDUCT; |
b4ad7b23 RS |
3909 | *backbl = bl->next; |
3910 | } | |
3911 | else | |
3912 | { | |
3913 | backbl = &bl->next; | |
3914 | ||
3915 | if (loop_dump_stream) | |
3916 | fprintf (loop_dump_stream, "Reg %d: biv verified\n", bl->regno); | |
3917 | } | |
3918 | } | |
3919 | ||
3920 | /* Exit if there are no bivs. */ | |
3921 | if (! loop_iv_list) | |
3922 | { | |
3923 | /* Can still unroll the loop anyways, but indicate that there is no | |
3924 | strength reduction info available. */ | |
81797aba | 3925 | if (unroll_p) |
302670f3 MH |
3926 | unroll_loop (loop_end, insn_count, loop_start, end_insert_before, |
3927 | loop_info, 0); | |
b4ad7b23 RS |
3928 | |
3929 | return; | |
3930 | } | |
3931 | ||
3932 | /* Find initial value for each biv by searching backwards from loop_start, | |
3933 | halting at first label. Also record any test condition. */ | |
3934 | ||
3935 | call_seen = 0; | |
3936 | for (p = loop_start; p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p)) | |
3937 | { | |
3938 | note_insn = p; | |
3939 | ||
3940 | if (GET_CODE (p) == CALL_INSN) | |
3941 | call_seen = 1; | |
3942 | ||
3943 | if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN | |
3944 | || GET_CODE (p) == CALL_INSN) | |
3945 | note_stores (PATTERN (p), record_initial); | |
3946 | ||
3947 | /* Record any test of a biv that branches around the loop if no store | |
3948 | between it and the start of loop. We only care about tests with | |
3949 | constants and registers and only certain of those. */ | |
3950 | if (GET_CODE (p) == JUMP_INSN | |
3951 | && JUMP_LABEL (p) != 0 | |
3952 | && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop_end) | |
3953 | && (test = get_condition_for_loop (p)) != 0 | |
3954 | && GET_CODE (XEXP (test, 0)) == REG | |
3955 | && REGNO (XEXP (test, 0)) < max_reg_before_loop | |
3956 | && (bl = reg_biv_class[REGNO (XEXP (test, 0))]) != 0 | |
3957 | && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop_start) | |
3958 | && bl->init_insn == 0) | |
3959 | { | |
3960 | /* If an NE test, we have an initial value! */ | |
3961 | if (GET_CODE (test) == NE) | |
3962 | { | |
3963 | bl->init_insn = p; | |
38a448ca RH |
3964 | bl->init_set = gen_rtx_SET (VOIDmode, |
3965 | XEXP (test, 0), XEXP (test, 1)); | |
b4ad7b23 RS |
3966 | } |
3967 | else | |
3968 | bl->initial_test = test; | |
3969 | } | |
3970 | } | |
3971 | ||
3972 | /* Look at the each biv and see if we can say anything better about its | |
3973 | initial value from any initializing insns set up above. (This is done | |
3974 | in two passes to avoid missing SETs in a PARALLEL.) */ | |
53dc05e4 | 3975 | for (backbl = &loop_iv_list; (bl = *backbl); backbl = &bl->next) |
b4ad7b23 RS |
3976 | { |
3977 | rtx src; | |
956d6950 | 3978 | rtx note; |
b4ad7b23 RS |
3979 | |
3980 | if (! bl->init_insn) | |
3981 | continue; | |
3982 | ||
956d6950 JL |
3983 | /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value |
3984 | is a constant, use the value of that. */ | |
3985 | if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL | |
3986 | && CONSTANT_P (XEXP (note, 0))) | |
3987 | || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL | |
3988 | && CONSTANT_P (XEXP (note, 0)))) | |
3989 | src = XEXP (note, 0); | |
3990 | else | |
3991 | src = SET_SRC (bl->init_set); | |
b4ad7b23 RS |
3992 | |
3993 | if (loop_dump_stream) | |
3994 | fprintf (loop_dump_stream, | |
3995 | "Biv %d initialized at insn %d: initial value ", | |
3996 | bl->regno, INSN_UID (bl->init_insn)); | |
3997 | ||
43a674af JW |
3998 | if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno]) |
3999 | || GET_MODE (src) == VOIDmode) | |
63d59526 | 4000 | && valid_initial_value_p (src, bl->init_insn, call_seen, loop_start)) |
b4ad7b23 RS |
4001 | { |
4002 | bl->initial_value = src; | |
4003 | ||
4004 | if (loop_dump_stream) | |
4005 | { | |
4006 | if (GET_CODE (src) == CONST_INT) | |
9ba7a303 JC |
4007 | { |
4008 | fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (src)); | |
4009 | fputc ('\n', loop_dump_stream); | |
4010 | } | |
b4ad7b23 RS |
4011 | else |
4012 | { | |
4013 | print_rtl (loop_dump_stream, src); | |
4014 | fprintf (loop_dump_stream, "\n"); | |
4015 | } | |
4016 | } | |
4017 | } | |
4018 | else | |
4019 | { | |
3ec2b590 | 4020 | struct iv_class *bl2 = 0; |
6a651371 | 4021 | rtx increment = NULL_RTX; |
3ec2b590 R |
4022 | |
4023 | /* Biv initial value is not a simple move. If it is the sum of | |
4024 | another biv and a constant, check if both bivs are incremented | |
4025 | in lockstep. Then we are actually looking at a giv. | |
4026 | For simplicity, we only handle the case where there is but a | |
4027 | single increment, and the register is not used elsewhere. */ | |
4028 | if (bl->biv_count == 1 | |
4029 | && bl->regno < max_reg_before_loop | |
4030 | && uid_luid[REGNO_LAST_UID (bl->regno)] < INSN_LUID (loop_end) | |
4031 | && GET_CODE (src) == PLUS | |
4032 | && GET_CODE (XEXP (src, 0)) == REG | |
4033 | && CONSTANT_P (XEXP (src, 1)) | |
4034 | && ((increment = biv_total_increment (bl, loop_start, loop_end)) | |
4035 | != NULL_RTX)) | |
4036 | { | |
4037 | int regno = REGNO (XEXP (src, 0)); | |
b4ad7b23 | 4038 | |
3ec2b590 R |
4039 | for (bl2 = loop_iv_list; bl2; bl2 = bl2->next) |
4040 | if (bl2->regno == regno) | |
4041 | break; | |
4042 | } | |
4043 | ||
4044 | /* Now, can we transform this biv into a giv? */ | |
4045 | if (bl2 | |
4046 | && bl2->biv_count == 1 | |
4047 | && rtx_equal_p (increment, | |
4048 | biv_total_increment (bl2, loop_start, loop_end)) | |
4049 | /* init_insn is only set to insns that are before loop_start | |
4050 | without any intervening labels. */ | |
4051 | && ! reg_set_between_p (bl2->biv->src_reg, | |
4052 | PREV_INSN (bl->init_insn), loop_start) | |
4053 | /* The register from BL2 must be set before the register from | |
4054 | BL is set, or we must be able to move the latter set after | |
4055 | the former set. Currently there can't be any labels | |
4056 | in-between when biv_toal_increment returns nonzero both times | |
4057 | but we test it here in case some day some real cfg analysis | |
4058 | gets used to set always_computable. */ | |
1cb1fe66 | 4059 | && ((loop_insn_first_p (bl2->biv->insn, bl->biv->insn) |
3ec2b590 R |
4060 | && no_labels_between_p (bl2->biv->insn, bl->biv->insn)) |
4061 | || (! reg_used_between_p (bl->biv->src_reg, bl->biv->insn, | |
4062 | bl2->biv->insn) | |
4063 | && no_jumps_between_p (bl->biv->insn, bl2->biv->insn))) | |
4064 | && validate_change (bl->biv->insn, | |
4065 | &SET_SRC (single_set (bl->biv->insn)), | |
4066 | copy_rtx (src), 0)) | |
4067 | { | |
4068 | int loop_num = uid_loop_num[INSN_UID (loop_start)]; | |
4069 | rtx dominator = loop_number_cont_dominator[loop_num]; | |
3ec2b590 R |
4070 | rtx giv = bl->biv->src_reg; |
4071 | rtx giv_insn = bl->biv->insn; | |
4072 | rtx after_giv = NEXT_INSN (giv_insn); | |
4073 | ||
4074 | if (loop_dump_stream) | |
4075 | fprintf (loop_dump_stream, "is giv of biv %d\n", bl2->regno); | |
4076 | /* Let this giv be discovered by the generic code. */ | |
4077 | REG_IV_TYPE (bl->regno) = UNKNOWN_INDUCT; | |
e76d2376 | 4078 | reg_biv_class[bl->regno] = NULL_PTR; |
3ec2b590 R |
4079 | /* We can get better optimization if we can move the giv setting |
4080 | before the first giv use. */ | |
4081 | if (dominator | |
079a615d | 4082 | && ! loop_insn_first_p (dominator, scan_start) |
3ec2b590 R |
4083 | && ! reg_set_between_p (bl2->biv->src_reg, loop_start, |
4084 | dominator) | |
4085 | && ! reg_used_between_p (giv, loop_start, dominator) | |
4086 | && ! reg_used_between_p (giv, giv_insn, loop_end)) | |
4087 | { | |
4088 | rtx p; | |
22b4cc65 | 4089 | rtx next; |
3ec2b590 | 4090 | |
22b4cc65 | 4091 | for (next = NEXT_INSN (dominator); ; next = NEXT_INSN (next)) |
3ec2b590 | 4092 | { |
3ec2b590 R |
4093 | if ((GET_RTX_CLASS (GET_CODE (next)) == 'i' |
4094 | && (reg_mentioned_p (giv, PATTERN (next)) | |
4095 | || reg_set_p (bl2->biv->src_reg, next))) | |
4096 | || GET_CODE (next) == JUMP_INSN) | |
4097 | break; | |
4098 | #ifdef HAVE_cc0 | |
4099 | if (GET_RTX_CLASS (GET_CODE (next)) != 'i' | |
4100 | || ! sets_cc0_p (PATTERN (next))) | |
4101 | #endif | |
4102 | dominator = next; | |
4103 | } | |
4104 | if (loop_dump_stream) | |
4105 | fprintf (loop_dump_stream, "move after insn %d\n", | |
4106 | INSN_UID (dominator)); | |
4107 | /* Avoid problems with luids by actually moving the insn | |
4108 | and adjusting all luids in the range. */ | |
4109 | reorder_insns (giv_insn, giv_insn, dominator); | |
4110 | for (p = dominator; INSN_UID (p) >= max_uid_for_loop; ) | |
4111 | p = PREV_INSN (p); | |
4112 | compute_luids (giv_insn, after_giv, INSN_LUID (p)); | |
4113 | /* If the only purpose of the init insn is to initialize | |
4114 | this giv, delete it. */ | |
4115 | if (single_set (bl->init_insn) | |
4116 | && ! reg_used_between_p (giv, bl->init_insn, loop_start)) | |
4117 | delete_insn (bl->init_insn); | |
4118 | } | |
1cb1fe66 | 4119 | else if (! loop_insn_first_p (bl2->biv->insn, bl->biv->insn)) |
3ec2b590 R |
4120 | { |
4121 | rtx p = PREV_INSN (giv_insn); | |
4122 | while (INSN_UID (p) >= max_uid_for_loop) | |
4123 | p = PREV_INSN (p); | |
4124 | reorder_insns (giv_insn, giv_insn, bl2->biv->insn); | |
4125 | compute_luids (after_giv, NEXT_INSN (giv_insn), | |
4126 | INSN_LUID (p)); | |
4127 | } | |
4128 | /* Remove this biv from the chain. */ | |
4129 | if (bl->next) | |
e76d2376 R |
4130 | { |
4131 | /* We move the following giv from *bl->next into *bl. | |
4132 | We have to update reg_biv_class for that moved biv | |
4133 | to point to its new address. */ | |
4134 | *bl = *bl->next; | |
4135 | reg_biv_class[bl->regno] = bl; | |
4136 | } | |
3ec2b590 R |
4137 | else |
4138 | { | |
4139 | *backbl = 0; | |
4140 | break; | |
4141 | } | |
4142 | } | |
4143 | ||
4144 | /* If we can't make it a giv, | |
4145 | let biv keep initial value of "itself". */ | |
4146 | else if (loop_dump_stream) | |
b4ad7b23 RS |
4147 | fprintf (loop_dump_stream, "is complex\n"); |
4148 | } | |
4149 | } | |
4150 | ||
3ec2b590 R |
4151 | /* If a biv is unconditionally incremented several times in a row, convert |
4152 | all but the last increment into a giv. */ | |
4153 | ||
4154 | /* Get an upper bound for the number of registers | |
4155 | we might have after all bivs have been processed. */ | |
4156 | first_increment_giv = max_reg_num (); | |
4157 | for (n_extra_increment = 0, bl = loop_iv_list; bl; bl = bl->next) | |
4158 | n_extra_increment += bl->biv_count - 1; | |
6449b397 NC |
4159 | |
4160 | /* If the loop contains volatile memory references do not allow any | |
4161 | replacements to take place, since this could loose the volatile markers. */ | |
3c748bb6 | 4162 | if (n_extra_increment && ! loop_info->has_volatile) |
3ec2b590 R |
4163 | { |
4164 | int nregs = first_increment_giv + n_extra_increment; | |
4165 | ||
4166 | /* Reallocate reg_iv_type and reg_iv_info. */ | |
4167 | VARRAY_GROW (reg_iv_type, nregs); | |
4168 | VARRAY_GROW (reg_iv_info, nregs); | |
4169 | ||
4170 | for (bl = loop_iv_list; bl; bl = bl->next) | |
4171 | { | |
4172 | struct induction **vp, *v, *next; | |
b72bdd84 R |
4173 | int biv_dead_after_loop = 0; |
4174 | ||
3ec2b590 R |
4175 | /* The biv increments lists are in reverse order. Fix this first. */ |
4176 | for (v = bl->biv, bl->biv = 0; v; v = next) | |
4177 | { | |
4178 | next = v->next_iv; | |
4179 | v->next_iv = bl->biv; | |
4180 | bl->biv = v; | |
4181 | } | |
b72bdd84 R |
4182 | |
4183 | /* We must guard against the case that an early exit between v->insn | |
4184 | and next->insn leaves the biv live after the loop, since that | |
4185 | would mean that we'd be missing an increment for the final | |
4186 | value. The following test to set biv_dead_after_loop is like | |
4187 | the first part of the test to set bl->eliminable. | |
4188 | We don't check here if we can calculate the final value, since | |
4189 | this can't succeed if we already know that there is a jump | |
4190 | between v->insn and next->insn, yet next->always_executed is | |
4191 | set and next->maybe_multiple is cleared. Such a combination | |
cc291433 | 4192 | implies that the jump destination is outside the loop. |
b72bdd84 R |
4193 | If we want to make this check more sophisticated, we should |
4194 | check each branch between v->insn and next->insn individually | |
cc291433 | 4195 | to see if the biv is dead at its destination. */ |
b72bdd84 R |
4196 | |
4197 | if (uid_luid[REGNO_LAST_UID (bl->regno)] < INSN_LUID (loop_end) | |
4198 | && bl->init_insn | |
4199 | && INSN_UID (bl->init_insn) < max_uid_for_loop | |
4200 | && (uid_luid[REGNO_FIRST_UID (bl->regno)] | |
4201 | >= INSN_LUID (bl->init_insn)) | |
4202 | #ifdef HAVE_decrement_and_branch_until_zero | |
4203 | && ! bl->nonneg | |
4204 | #endif | |
4205 | && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set))) | |
4206 | biv_dead_after_loop = 1; | |
4207 | ||
3ec2b590 R |
4208 | for (vp = &bl->biv, next = *vp; v = next, next = v->next_iv;) |
4209 | { | |
4210 | HOST_WIDE_INT offset; | |
a47f48d8 | 4211 | rtx set, add_val, old_reg, dest_reg, last_use_insn, note; |
3ec2b590 | 4212 | int old_regno, new_regno; |
b72bdd84 | 4213 | |
3ec2b590 R |
4214 | if (! v->always_executed |
4215 | || v->maybe_multiple | |
4216 | || GET_CODE (v->add_val) != CONST_INT | |
4217 | || ! next->always_executed | |
4218 | || next->maybe_multiple | |
b72bdd84 | 4219 | || ! CONSTANT_P (next->add_val) |
2485126f R |
4220 | || v->mult_val != const1_rtx |
4221 | || next->mult_val != const1_rtx | |
b72bdd84 R |
4222 | || ! (biv_dead_after_loop |
4223 | || no_jumps_between_p (v->insn, next->insn))) | |
3ec2b590 R |
4224 | { |
4225 | vp = &v->next_iv; | |
4226 | continue; | |
4227 | } | |
4228 | offset = INTVAL (v->add_val); | |
4229 | set = single_set (v->insn); | |
4230 | add_val = plus_constant (next->add_val, offset); | |
4231 | old_reg = v->dest_reg; | |
4232 | dest_reg = gen_reg_rtx (v->mode); | |
4233 | ||
f56246be R |
4234 | /* Unlike reg_iv_type / reg_iv_info, the other three arrays |
4235 | have been allocated with some slop space, so we may not | |
4236 | actually need to reallocate them. If we do, the following | |
4237 | if statement will be executed just once in this loop. */ | |
3ec2b590 R |
4238 | if ((unsigned) max_reg_num () > n_times_set->num_elements) |
4239 | { | |
f56246be | 4240 | /* Grow all the remaining arrays. */ |
3ec2b590 R |
4241 | VARRAY_GROW (set_in_loop, nregs); |
4242 | VARRAY_GROW (n_times_set, nregs); | |
4243 | VARRAY_GROW (may_not_optimize, nregs); | |
a366a40a | 4244 | VARRAY_GROW (reg_single_usage, nregs); |
3ec2b590 R |
4245 | } |
4246 | ||
8e9fb571 | 4247 | if (! validate_change (next->insn, next->location, add_val, 0)) |
3ec2b590 R |
4248 | { |
4249 | vp = &v->next_iv; | |
4250 | continue; | |
4251 | } | |
8e9fb571 R |
4252 | |
4253 | /* Here we can try to eliminate the increment by combining | |
4254 | it into the uses. */ | |
4255 | ||
4256 | /* Set last_use_insn so that we can check against it. */ | |
4257 | ||
4258 | for (last_use_insn = v->insn, p = NEXT_INSN (v->insn); | |
4259 | p != next->insn; | |
4260 | p = next_insn_in_loop (p, scan_start, end, loop_top)) | |
4261 | { | |
8e9fb571 R |
4262 | if (GET_RTX_CLASS (GET_CODE (p)) != 'i') |
4263 | continue; | |
4264 | if (reg_mentioned_p (old_reg, PATTERN (p))) | |
4265 | { | |
4266 | last_use_insn = p; | |
4267 | } | |
4268 | } | |
4269 | ||
4270 | /* If we can't get the LUIDs for the insns, we can't | |
4271 | calculate the lifetime. This is likely from unrolling | |
4272 | of an inner loop, so there is little point in making this | |
4273 | a DEST_REG giv anyways. */ | |
4274 | if (INSN_UID (v->insn) >= max_uid_for_loop | |
4275 | || INSN_UID (last_use_insn) >= max_uid_for_loop | |
4276 | || ! validate_change (v->insn, &SET_DEST (set), dest_reg, 0)) | |
4277 | { | |
4278 | /* Change the increment at NEXT back to what it was. */ | |
4279 | if (! validate_change (next->insn, next->location, | |
4280 | next->add_val, 0)) | |
4281 | abort (); | |
4282 | vp = &v->next_iv; | |
4283 | continue; | |
4284 | } | |
3ec2b590 R |
4285 | next->add_val = add_val; |
4286 | v->dest_reg = dest_reg; | |
4287 | v->giv_type = DEST_REG; | |
4288 | v->location = &SET_SRC (set); | |
4289 | v->cant_derive = 0; | |
4290 | v->combined_with = 0; | |
4291 | v->maybe_dead = 0; | |
4292 | v->derive_adjustment = 0; | |
4293 | v->same = 0; | |
4294 | v->ignore = 0; | |
4295 | v->new_reg = 0; | |
4296 | v->final_value = 0; | |
4297 | v->same_insn = 0; | |
4298 | v->auto_inc_opt = 0; | |
4299 | v->unrolled = 0; | |
4300 | v->shared = 0; | |
4d87f7a7 | 4301 | v->derived_from = 0; |
3ec2b590 R |
4302 | v->always_computable = 1; |
4303 | v->always_executed = 1; | |
4304 | v->replaceable = 1; | |
4305 | v->no_const_addval = 0; | |
4306 | ||
4307 | old_regno = REGNO (old_reg); | |
4308 | new_regno = REGNO (dest_reg); | |
4309 | VARRAY_INT (set_in_loop, old_regno)--; | |
4310 | VARRAY_INT (set_in_loop, new_regno) = 1; | |
4311 | VARRAY_INT (n_times_set, old_regno)--; | |
4312 | VARRAY_INT (n_times_set, new_regno) = 1; | |
4313 | VARRAY_CHAR (may_not_optimize, new_regno) = 0; | |
4314 | ||
4315 | REG_IV_TYPE (new_regno) = GENERAL_INDUCT; | |
4316 | REG_IV_INFO (new_regno) = v; | |
a47f48d8 R |
4317 | |
4318 | /* If next_insn has a REG_EQUAL note that mentiones OLD_REG, | |
4319 | it must be replaced. */ | |
4320 | note = find_reg_note (next->insn, REG_EQUAL, NULL_RTX); | |
4321 | if (note && reg_mentioned_p (old_reg, XEXP (note, 0))) | |
4322 | XEXP (note, 0) = copy_rtx (SET_SRC (single_set (next->insn))); | |
4323 | ||
3ec2b590 R |
4324 | /* Remove the increment from the list of biv increments, |
4325 | and record it as a giv. */ | |
4326 | *vp = next; | |
4327 | bl->biv_count--; | |
4328 | v->next_iv = bl->giv; | |
4329 | bl->giv = v; | |
4330 | bl->giv_count++; | |
4331 | v->benefit = rtx_cost (SET_SRC (set), SET); | |
4332 | bl->total_benefit += v->benefit; | |
4333 | ||
4334 | /* Now replace the biv with DEST_REG in all insns between | |
4335 | the replaced increment and the next increment, and | |
4336 | remember the last insn that needed a replacement. */ | |
4337 | for (last_use_insn = v->insn, p = NEXT_INSN (v->insn); | |
4338 | p != next->insn; | |
4339 | p = next_insn_in_loop (p, scan_start, end, loop_top)) | |
4340 | { | |
4341 | rtx note; | |
4342 | ||
4343 | if (GET_RTX_CLASS (GET_CODE (p)) != 'i') | |
4344 | continue; | |
4345 | if (reg_mentioned_p (old_reg, PATTERN (p))) | |
4346 | { | |
4347 | last_use_insn = p; | |
4348 | if (! validate_replace_rtx (old_reg, dest_reg, p)) | |
4349 | abort (); | |
4350 | } | |
4351 | for (note = REG_NOTES (p); note; note = XEXP (note, 1)) | |
4352 | { | |
4353 | if (GET_CODE (note) == EXPR_LIST) | |
4354 | XEXP (note, 0) | |
4355 | = replace_rtx (XEXP (note, 0), old_reg, dest_reg); | |
4356 | } | |
4357 | } | |
4358 | ||
4359 | v->last_use = last_use_insn; | |
4360 | v->lifetime = INSN_LUID (v->insn) - INSN_LUID (last_use_insn); | |
4361 | /* If the lifetime is zero, it means that this register is really | |
4362 | a dead store. So mark this as a giv that can be ignored. | |
4363 | This will not prevent the biv from being eliminated. */ | |
4364 | if (v->lifetime == 0) | |
4365 | v->ignore = 1; | |
1ccf8937 R |
4366 | |
4367 | if (loop_dump_stream) | |
4368 | fprintf (loop_dump_stream, | |
4369 | "Increment %d of biv %d converted to giv %d.\n\n", | |
4370 | INSN_UID (v->insn), old_regno, new_regno); | |
3ec2b590 R |
4371 | } |
4372 | } | |
4373 | } | |
4374 | last_increment_giv = max_reg_num () - 1; | |
4375 | ||
b4ad7b23 RS |
4376 | /* Search the loop for general induction variables. */ |
4377 | ||
4378 | /* A register is a giv if: it is only set once, it is a function of a | |
4379 | biv and a constant (or invariant), and it is not a biv. */ | |
4380 | ||
4381 | not_every_iteration = 0; | |
5ea7a4ae | 4382 | loop_depth = 0; |
c5c76735 | 4383 | maybe_multiple = 0; |
b4ad7b23 RS |
4384 | p = scan_start; |
4385 | while (1) | |
4386 | { | |
4387 | p = NEXT_INSN (p); | |
4388 | /* At end of a straight-in loop, we are done. | |
4389 | At end of a loop entered at the bottom, scan the top. */ | |
4390 | if (p == scan_start) | |
4391 | break; | |
4392 | if (p == end) | |
4393 | { | |
4394 | if (loop_top != 0) | |
f67ff5de | 4395 | p = loop_top; |
b4ad7b23 RS |
4396 | else |
4397 | break; | |
4398 | if (p == scan_start) | |
4399 | break; | |
4400 | } | |
4401 | ||
4402 | /* Look for a general induction variable in a register. */ | |
4403 | if (GET_CODE (p) == INSN | |
4404 | && (set = single_set (p)) | |
4405 | && GET_CODE (SET_DEST (set)) == REG | |
8deb8e2c | 4406 | && ! VARRAY_CHAR (may_not_optimize, REGNO (SET_DEST (set)))) |
b4ad7b23 RS |
4407 | { |
4408 | rtx src_reg; | |
4409 | rtx add_val; | |
4410 | rtx mult_val; | |
4411 | int benefit; | |
4412 | rtx regnote = 0; | |
a07516d3 | 4413 | rtx last_consec_insn; |
b4ad7b23 RS |
4414 | |
4415 | dest_reg = SET_DEST (set); | |
4416 | if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER) | |
4417 | continue; | |
4418 | ||
4419 | if (/* SET_SRC is a giv. */ | |
45f97e2e RH |
4420 | (general_induction_var (SET_SRC (set), &src_reg, &add_val, |
4421 | &mult_val, 0, &benefit) | |
0f41302f | 4422 | /* Equivalent expression is a giv. */ |
5fd8383e | 4423 | || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX)) |
45f97e2e RH |
4424 | && general_induction_var (XEXP (regnote, 0), &src_reg, |
4425 | &add_val, &mult_val, 0, | |
4426 | &benefit))) | |
b4ad7b23 RS |
4427 | /* Don't try to handle any regs made by loop optimization. |
4428 | We have nothing on them in regno_first_uid, etc. */ | |
4429 | && REGNO (dest_reg) < max_reg_before_loop | |
4430 | /* Don't recognize a BASIC_INDUCT_VAR here. */ | |
4431 | && dest_reg != src_reg | |
4432 | /* This must be the only place where the register is set. */ | |
8deb8e2c | 4433 | && (VARRAY_INT (n_times_set, REGNO (dest_reg)) == 1 |
0f41302f | 4434 | /* or all sets must be consecutive and make a giv. */ |
b4ad7b23 RS |
4435 | || (benefit = consec_sets_giv (benefit, p, |
4436 | src_reg, dest_reg, | |
a07516d3 R |
4437 | &add_val, &mult_val, |
4438 | &last_consec_insn)))) | |
b4ad7b23 | 4439 | { |
b4ad7b23 RS |
4440 | struct induction *v |
4441 | = (struct induction *) alloca (sizeof (struct induction)); | |
b4ad7b23 RS |
4442 | |
4443 | /* If this is a library call, increase benefit. */ | |
5fd8383e | 4444 | if (find_reg_note (p, REG_RETVAL, NULL_RTX)) |
b4ad7b23 RS |
4445 | benefit += libcall_benefit (p); |
4446 | ||
4447 | /* Skip the consecutive insns, if there are any. */ | |
a07516d3 R |
4448 | if (VARRAY_INT (n_times_set, REGNO (dest_reg)) != 1) |
4449 | p = last_consec_insn; | |
b4ad7b23 RS |
4450 | |
4451 | record_giv (v, p, src_reg, dest_reg, mult_val, add_val, benefit, | |
c5c76735 JL |
4452 | DEST_REG, not_every_iteration, maybe_multiple, |
4453 | NULL_PTR, loop_start, loop_end); | |
b4ad7b23 RS |
4454 | |
4455 | } | |
4456 | } | |
4457 | ||
4458 | #ifndef DONT_REDUCE_ADDR | |
4459 | /* Look for givs which are memory addresses. */ | |
4460 | /* This resulted in worse code on a VAX 8600. I wonder if it | |
4461 | still does. */ | |
4462 | if (GET_CODE (p) == INSN) | |
c5c76735 JL |
4463 | find_mem_givs (PATTERN (p), p, not_every_iteration, maybe_multiple, |
4464 | loop_start, loop_end); | |
b4ad7b23 RS |
4465 | #endif |
4466 | ||
4467 | /* Update the status of whether giv can derive other givs. This can | |
4468 | change when we pass a label or an insn that updates a biv. */ | |
7dcd3836 RK |
4469 | if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN |
4470 | || GET_CODE (p) == CODE_LABEL) | |
b4ad7b23 RS |
4471 | update_giv_derive (p); |
4472 | ||
c5c76735 JL |
4473 | /* Past CODE_LABEL, we get to insns that may be executed multiple |
4474 | times. The only way we can be sure that they can't is if every | |
4475 | every jump insn between here and the end of the loop either | |
4476 | returns, exits the loop, is a forward jump, or is a jump | |
4477 | to the loop start. */ | |
4478 | ||
4479 | if (GET_CODE (p) == CODE_LABEL) | |
4480 | { | |
4481 | rtx insn = p; | |
4482 | ||
4483 | maybe_multiple = 0; | |
4484 | ||
4485 | while (1) | |
4486 | { | |
4487 | insn = NEXT_INSN (insn); | |
4488 | if (insn == scan_start) | |
4489 | break; | |
4490 | if (insn == end) | |
4491 | { | |
4492 | if (loop_top != 0) | |
4493 | insn = loop_top; | |
4494 | else | |
4495 | break; | |
4496 | if (insn == scan_start) | |
4497 | break; | |
4498 | } | |
4499 | ||
4500 | if (GET_CODE (insn) == JUMP_INSN | |
4501 | && GET_CODE (PATTERN (insn)) != RETURN | |
4502 | && (! condjump_p (insn) | |
4503 | || (JUMP_LABEL (insn) != 0 | |
4504 | && JUMP_LABEL (insn) != scan_start | |
4505 | && (INSN_UID (JUMP_LABEL (insn)) >= max_uid_for_loop | |
4506 | || INSN_UID (insn) >= max_uid_for_loop | |
4507 | || (INSN_LUID (JUMP_LABEL (insn)) | |
4508 | < INSN_LUID (insn)))))) | |
4509 | { | |
4510 | maybe_multiple = 1; | |
4511 | break; | |
4512 | } | |
4513 | } | |
4514 | } | |
4515 | ||
8516af93 JW |
4516 | /* Past a jump, we get to insns for which we can't count |
4517 | on whether they will be executed during each iteration. */ | |
4518 | /* This code appears twice in strength_reduce. There is also similar | |
4519 | code in scan_loop. */ | |
4520 | if (GET_CODE (p) == JUMP_INSN | |
4521 | /* If we enter the loop in the middle, and scan around to the | |
4522 | beginning, don't set not_every_iteration for that. | |
b4ad7b23 RS |
4523 | This can be any kind of jump, since we want to know if insns |
4524 | will be executed if the loop is executed. */ | |
8516af93 | 4525 | && ! (JUMP_LABEL (p) == loop_top |
b4ad7b23 RS |
4526 | && ((NEXT_INSN (NEXT_INSN (p)) == loop_end && simplejump_p (p)) |
4527 | || (NEXT_INSN (p) == loop_end && condjump_p (p))))) | |
8516af93 JW |
4528 | { |
4529 | rtx label = 0; | |
4530 | ||
4531 | /* If this is a jump outside the loop, then it also doesn't | |
4532 | matter. Check to see if the target of this branch is on the | |
4533 | loop_number_exits_labels list. */ | |
4534 | ||
4535 | for (label = loop_number_exit_labels[uid_loop_num[INSN_UID (loop_start)]]; | |
4536 | label; | |
4537 | label = LABEL_NEXTREF (label)) | |
4538 | if (XEXP (label, 0) == JUMP_LABEL (p)) | |
4539 | break; | |
4540 | ||
4541 | if (! label) | |
4542 | not_every_iteration = 1; | |
4543 | } | |
b4ad7b23 | 4544 | |
5ea7a4ae JW |
4545 | else if (GET_CODE (p) == NOTE) |
4546 | { | |
4547 | /* At the virtual top of a converted loop, insns are again known to | |
4548 | be executed each iteration: logically, the loop begins here | |
5f3db57e JL |
4549 | even though the exit code has been duplicated. |
4550 | ||
4551 | Insns are also again known to be executed each iteration at | |
4552 | the LOOP_CONT note. */ | |
4553 | if ((NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP | |
4554 | || NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_CONT) | |
4555 | && loop_depth == 0) | |
5ea7a4ae JW |
4556 | not_every_iteration = 0; |
4557 | else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG) | |
4558 | loop_depth++; | |
4559 | else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END) | |
4560 | loop_depth--; | |
4561 | } | |
b4ad7b23 RS |
4562 | |
4563 | /* Unlike in the code motion pass where MAYBE_NEVER indicates that | |
4564 | an insn may never be executed, NOT_EVERY_ITERATION indicates whether | |
4565 | or not an insn is known to be executed each iteration of the | |
4566 | loop, whether or not any iterations are known to occur. | |
4567 | ||
4568 | Therefore, if we have just passed a label and have no more labels | |
4569 | between here and the test insn of the loop, we know these insns | |
4570 | will be executed each iteration. */ | |
4571 | ||
4572 | if (not_every_iteration && GET_CODE (p) == CODE_LABEL | |
6dd49eb4 | 4573 | && no_labels_between_p (p, loop_end) |
1cb1fe66 | 4574 | && loop_insn_first_p (p, loop_cont)) |
b4ad7b23 RS |
4575 | not_every_iteration = 0; |
4576 | } | |
4577 | ||
4578 | /* Try to calculate and save the number of loop iterations. This is | |
4579 | set to zero if the actual number can not be calculated. This must | |
4580 | be called after all giv's have been identified, since otherwise it may | |
4581 | fail if the iteration variable is a giv. */ | |
4582 | ||
302670f3 | 4583 | loop_iterations (loop_start, loop_end, loop_info); |
b4ad7b23 RS |
4584 | |
4585 | /* Now for each giv for which we still don't know whether or not it is | |
4586 | replaceable, check to see if it is replaceable because its final value | |
4587 | can be calculated. This must be done after loop_iterations is called, | |
4588 | so that final_giv_value will work correctly. */ | |
4589 | ||
4590 | for (bl = loop_iv_list; bl; bl = bl->next) | |
4591 | { | |
4592 | struct induction *v; | |
4593 | ||
4594 | for (v = bl->giv; v; v = v->next_iv) | |
4595 | if (! v->replaceable && ! v->not_replaceable) | |
302670f3 | 4596 | check_final_value (v, loop_start, loop_end, loop_info->n_iterations); |
b4ad7b23 RS |
4597 | } |
4598 | ||
4599 | /* Try to prove that the loop counter variable (if any) is always | |
4600 | nonnegative; if so, record that fact with a REG_NONNEG note | |
4601 | so that "decrement and branch until zero" insn can be used. */ | |
5629b16c | 4602 | check_dbra_loop (loop_end, insn_count, loop_start, loop_info); |
b4ad7b23 | 4603 | |
97ec0ad8 R |
4604 | /* Create reg_map to hold substitutions for replaceable giv regs. |
4605 | Some givs might have been made from biv increments, so look at | |
4606 | reg_iv_type for a suitable size. */ | |
4607 | reg_map_size = reg_iv_type->num_elements; | |
4608 | reg_map = (rtx *) alloca (reg_map_size * sizeof (rtx)); | |
4609 | bzero ((char *) reg_map, reg_map_size * sizeof (rtx)); | |
b4ad7b23 RS |
4610 | |
4611 | /* Examine each iv class for feasibility of strength reduction/induction | |
4612 | variable elimination. */ | |
4613 | ||
4614 | for (bl = loop_iv_list; bl; bl = bl->next) | |
4615 | { | |
4616 | struct induction *v; | |
4617 | int benefit; | |
4618 | int all_reduced; | |
4619 | rtx final_value = 0; | |
3c748bb6 | 4620 | unsigned int nregs; |
b4ad7b23 RS |
4621 | |
4622 | /* Test whether it will be possible to eliminate this biv | |
4623 | provided all givs are reduced. This is possible if either | |
4624 | the reg is not used outside the loop, or we can compute | |
4625 | what its final value will be. | |
4626 | ||
4627 | For architectures with a decrement_and_branch_until_zero insn, | |
4628 | don't do this if we put a REG_NONNEG note on the endtest for | |
4629 | this biv. */ | |
4630 | ||
4631 | /* Compare against bl->init_insn rather than loop_start. | |
4632 | We aren't concerned with any uses of the biv between | |
4633 | init_insn and loop_start since these won't be affected | |
4634 | by the value of the biv elsewhere in the function, so | |
4635 | long as init_insn doesn't use the biv itself. | |
4636 | March 14, 1989 -- self@bayes.arc.nasa.gov */ | |
4637 | ||
b1f21e0a | 4638 | if ((uid_luid[REGNO_LAST_UID (bl->regno)] < INSN_LUID (loop_end) |
b4ad7b23 RS |
4639 | && bl->init_insn |
4640 | && INSN_UID (bl->init_insn) < max_uid_for_loop | |
b1f21e0a | 4641 | && uid_luid[REGNO_FIRST_UID (bl->regno)] >= INSN_LUID (bl->init_insn) |
b4ad7b23 RS |
4642 | #ifdef HAVE_decrement_and_branch_until_zero |
4643 | && ! bl->nonneg | |
4644 | #endif | |
4645 | && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set))) | |
302670f3 MH |
4646 | || ((final_value = final_biv_value (bl, loop_start, loop_end, |
4647 | loop_info->n_iterations)) | |
b4ad7b23 RS |
4648 | #ifdef HAVE_decrement_and_branch_until_zero |
4649 | && ! bl->nonneg | |
4650 | #endif | |
4651 | )) | |
4652 | bl->eliminable = maybe_eliminate_biv (bl, loop_start, end, 0, | |
4653 | threshold, insn_count); | |
4654 | else | |
4655 | { | |
4656 | if (loop_dump_stream) | |
4657 | { | |
4658 | fprintf (loop_dump_stream, | |
4659 | "Cannot eliminate biv %d.\n", | |
4660 | bl->regno); | |
4661 | fprintf (loop_dump_stream, | |
4662 | "First use: insn %d, last use: insn %d.\n", | |
b1f21e0a MM |
4663 | REGNO_FIRST_UID (bl->regno), |
4664 | REGNO_LAST_UID (bl->regno)); | |
b4ad7b23 RS |
4665 | } |
4666 | } | |
4667 | ||
4668 | /* Combine all giv's for this iv_class. */ | |
4669 | combine_givs (bl); | |
4670 | ||
4671 | /* This will be true at the end, if all givs which depend on this | |
4672 | biv have been strength reduced. | |
4673 | We can't (currently) eliminate the biv unless this is so. */ | |
4674 | all_reduced = 1; | |
4675 | ||
4676 | /* Check each giv in this class to see if we will benefit by reducing | |
4677 | it. Skip giv's combined with others. */ | |
4678 | for (v = bl->giv; v; v = v->next_iv) | |
4679 | { | |
4680 | struct induction *tv; | |
4681 | ||
4682 | if (v->ignore || v->same) | |
4683 | continue; | |
4684 | ||
4685 | benefit = v->benefit; | |
4686 | ||
4687 | /* Reduce benefit if not replaceable, since we will insert | |
4688 | a move-insn to replace the insn that calculates this giv. | |
4689 | Don't do this unless the giv is a user variable, since it | |
4690 | will often be marked non-replaceable because of the duplication | |
4691 | of the exit code outside the loop. In such a case, the copies | |
4692 | we insert are dead and will be deleted. So they don't have | |
4693 | a cost. Similar situations exist. */ | |
4694 | /* ??? The new final_[bg]iv_value code does a much better job | |
4695 | of finding replaceable giv's, and hence this code may no longer | |
4696 | be necessary. */ | |
4697 | if (! v->replaceable && ! bl->eliminable | |
4698 | && REG_USERVAR_P (v->dest_reg)) | |
4699 | benefit -= copy_cost; | |
4700 | ||
4701 | /* Decrease the benefit to count the add-insns that we will | |
4702 | insert to increment the reduced reg for the giv. */ | |
4703 | benefit -= add_cost * bl->biv_count; | |
4704 | ||
4705 | /* Decide whether to strength-reduce this giv or to leave the code | |
4706 | unchanged (recompute it from the biv each time it is used). | |
4707 | This decision can be made independently for each giv. */ | |
4708 | ||
ab162578 JL |
4709 | #ifdef AUTO_INC_DEC |
4710 | /* Attempt to guess whether autoincrement will handle some of the | |
4711 | new add insns; if so, increase BENEFIT (undo the subtraction of | |
4712 | add_cost that was done above). */ | |
4713 | if (v->giv_type == DEST_ADDR | |
4714 | && GET_CODE (v->mult_val) == CONST_INT) | |
4715 | { | |
940da324 JL |
4716 | if (HAVE_POST_INCREMENT |
4717 | && INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode)) | |
ab162578 | 4718 | benefit += add_cost * bl->biv_count; |
940da324 JL |
4719 | else if (HAVE_PRE_INCREMENT |
4720 | && INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode)) | |
4721 | benefit += add_cost * bl->biv_count; | |
4722 | else if (HAVE_POST_DECREMENT | |
4723 | && -INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode)) | |
4724 | benefit += add_cost * bl->biv_count; | |
4725 | else if (HAVE_PRE_DECREMENT | |
4726 | && -INTVAL (v->mult_val) == GET_MODE_SIZE (v->mem_mode)) | |
ab162578 | 4727 | benefit += add_cost * bl->biv_count; |
ab162578 JL |
4728 | } |
4729 | #endif | |
b4ad7b23 RS |
4730 | |
4731 | /* If an insn is not to be strength reduced, then set its ignore | |
4732 | flag, and clear all_reduced. */ | |
4733 | ||
e6f6eb29 JW |
4734 | /* A giv that depends on a reversed biv must be reduced if it is |
4735 | used after the loop exit, otherwise, it would have the wrong | |
4736 | value after the loop exit. To make it simple, just reduce all | |
4737 | of such giv's whether or not we know they are used after the loop | |
4738 | exit. */ | |
4739 | ||
e5eb27e5 JL |
4740 | if ( ! flag_reduce_all_givs && v->lifetime * threshold * benefit < insn_count |
4741 | && ! bl->reversed ) | |
b4ad7b23 RS |
4742 | { |
4743 | if (loop_dump_stream) | |
4744 | fprintf (loop_dump_stream, | |
4745 | "giv of insn %d not worth while, %d vs %d.\n", | |
4746 | INSN_UID (v->insn), | |
4747 | v->lifetime * threshold * benefit, insn_count); | |
4748 | v->ignore = 1; | |
4749 | all_reduced = 0; | |
4750 | } | |
4751 | else | |
4752 | { | |
4753 | /* Check that we can increment the reduced giv without a | |
4754 | multiply insn. If not, reject it. */ | |
4755 | ||
4756 | for (tv = bl->biv; tv; tv = tv->next_iv) | |
4757 | if (tv->mult_val == const1_rtx | |
4758 | && ! product_cheap_p (tv->add_val, v->mult_val)) | |
4759 | { | |
4760 | if (loop_dump_stream) | |
4761 | fprintf (loop_dump_stream, | |
4762 | "giv of insn %d: would need a multiply.\n", | |
4763 | INSN_UID (v->insn)); | |
4764 | v->ignore = 1; | |
4765 | all_reduced = 0; | |
4766 | break; | |
4767 | } | |
4768 | } | |
4769 | } | |
4770 | ||
8c354a41 R |
4771 | /* Check for givs whose first use is their definition and whose |
4772 | last use is the definition of another giv. If so, it is likely | |
4773 | dead and should not be used to derive another giv nor to | |
4774 | eliminate a biv. */ | |
4775 | for (v = bl->giv; v; v = v->next_iv) | |
4776 | { | |
4777 | if (v->ignore | |
4778 | || (v->same && v->same->ignore)) | |
4779 | continue; | |
4780 | ||
4781 | if (v->last_use) | |
4782 | { | |
4783 | struct induction *v1; | |
4784 | ||
4785 | for (v1 = bl->giv; v1; v1 = v1->next_iv) | |
4786 | if (v->last_use == v1->insn) | |
4787 | v->maybe_dead = 1; | |
4788 | } | |
4789 | else if (v->giv_type == DEST_REG | |
4790 | && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn)) | |
4791 | { | |
4792 | struct induction *v1; | |
4793 | ||
4794 | for (v1 = bl->giv; v1; v1 = v1->next_iv) | |
4795 | if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn)) | |
4796 | v->maybe_dead = 1; | |
4797 | } | |
4798 | } | |
4799 | ||
3ec2b590 | 4800 | /* Now that we know which givs will be reduced, try to rearrange the |
f56246be R |
4801 | combinations to reduce register pressure. |
4802 | recombine_givs calls find_life_end, which needs reg_iv_type and | |
4803 | reg_iv_info to be valid for all pseudos. We do the necessary | |
4804 | reallocation here since it allows to check if there are still | |
4805 | more bivs to process. */ | |
4806 | nregs = max_reg_num (); | |
4807 | if (nregs > reg_iv_type->num_elements) | |
4808 | { | |
4809 | /* If there are still more bivs to process, allocate some slack | |
4810 | space so that we're not constantly reallocating these arrays. */ | |
4811 | if (bl->next) | |
4812 | nregs += nregs / 4; | |
4813 | /* Reallocate reg_iv_type and reg_iv_info. */ | |
4814 | VARRAY_GROW (reg_iv_type, nregs); | |
4815 | VARRAY_GROW (reg_iv_info, nregs); | |
4816 | } | |
53dc05e4 | 4817 | recombine_givs (bl, loop_start, loop_end, unroll_p); |
3ec2b590 | 4818 | |
b4ad7b23 RS |
4819 | /* Reduce each giv that we decided to reduce. */ |
4820 | ||
4821 | for (v = bl->giv; v; v = v->next_iv) | |
4822 | { | |
4823 | struct induction *tv; | |
4824 | if (! v->ignore && v->same == 0) | |
4825 | { | |
8516af93 JW |
4826 | int auto_inc_opt = 0; |
4827 | ||
743f9f5d R |
4828 | /* If the code for derived givs immediately below has already |
4829 | allocated a new_reg, we must keep it. */ | |
4830 | if (! v->new_reg) | |
4831 | v->new_reg = gen_reg_rtx (v->mode); | |
b4ad7b23 | 4832 | |
4d87f7a7 | 4833 | if (v->derived_from) |
3ec2b590 | 4834 | { |
743f9f5d R |
4835 | struct induction *d = v->derived_from; |
4836 | ||
4837 | /* In case d->dest_reg is not replaceable, we have | |
4838 | to replace it in v->insn now. */ | |
4839 | if (! d->new_reg) | |
4840 | d->new_reg = gen_reg_rtx (d->mode); | |
4841 | PATTERN (v->insn) | |
4842 | = replace_rtx (PATTERN (v->insn), d->dest_reg, d->new_reg); | |
3ec2b590 R |
4843 | PATTERN (v->insn) |
4844 | = replace_rtx (PATTERN (v->insn), v->dest_reg, v->new_reg); | |
1b786838 R |
4845 | /* For each place where the biv is incremented, add an |
4846 | insn to set the new, reduced reg for the giv. | |
4847 | We used to do this only for biv_count != 1, but | |
4848 | this fails when there is a giv after a single biv | |
4849 | increment, e.g. when the last giv was expressed as | |
4850 | pre-decrement. */ | |
4851 | for (tv = bl->biv; tv; tv = tv->next_iv) | |
3ec2b590 | 4852 | { |
1b786838 R |
4853 | /* We always emit reduced giv increments before the |
4854 | biv increment when bl->biv_count != 1. So by | |
4855 | emitting the add insns for derived givs after the | |
4856 | biv increment, they pick up the updated value of | |
4857 | the reduced giv. | |
4858 | If the reduced giv is processed with | |
4859 | auto_inc_opt == 1, then it is incremented earlier | |
4860 | than the biv, hence we'll still pick up the right | |
4861 | value. | |
4862 | If it's processed with auto_inc_opt == -1, | |
4863 | that implies that the biv increment is before the | |
4864 | first reduced giv's use. The derived giv's lifetime | |
4865 | is after the reduced giv's lifetime, hence in this | |
4866 | case, the biv increment doesn't matter. */ | |
4867 | emit_insn_after (copy_rtx (PATTERN (v->insn)), tv->insn); | |
3ec2b590 R |
4868 | } |
4869 | continue; | |
4870 | } | |
4871 | ||
8516af93 JW |
4872 | #ifdef AUTO_INC_DEC |
4873 | /* If the target has auto-increment addressing modes, and | |
4874 | this is an address giv, then try to put the increment | |
4875 | immediately after its use, so that flow can create an | |
4876 | auto-increment addressing mode. */ | |
4877 | if (v->giv_type == DEST_ADDR && bl->biv_count == 1 | |
085daa5a JW |
4878 | && bl->biv->always_executed && ! bl->biv->maybe_multiple |
4879 | /* We don't handle reversed biv's because bl->biv->insn | |
4880 | does not have a valid INSN_LUID. */ | |
4881 | && ! bl->reversed | |
f5963e61 JL |
4882 | && v->always_executed && ! v->maybe_multiple |
4883 | && INSN_UID (v->insn) < max_uid_for_loop) | |
8516af93 JW |
4884 | { |
4885 | /* If other giv's have been combined with this one, then | |
4886 | this will work only if all uses of the other giv's occur | |
4887 | before this giv's insn. This is difficult to check. | |
4888 | ||
4889 | We simplify this by looking for the common case where | |
4890 | there is one DEST_REG giv, and this giv's insn is the | |
4891 | last use of the dest_reg of that DEST_REG giv. If the | |
38e01259 | 4892 | increment occurs after the address giv, then we can |
8516af93 JW |
4893 | perform the optimization. (Otherwise, the increment |
4894 | would have to go before other_giv, and we would not be | |
4895 | able to combine it with the address giv to get an | |
4896 | auto-inc address.) */ | |
4897 | if (v->combined_with) | |
4898 | { | |
4899 | struct induction *other_giv = 0; | |
4900 | ||
4901 | for (tv = bl->giv; tv; tv = tv->next_iv) | |
4902 | if (tv->same == v) | |
4903 | { | |
4904 | if (other_giv) | |
4905 | break; | |
4906 | else | |
4907 | other_giv = tv; | |
4908 | } | |
4909 | if (! tv && other_giv | |
43243872 | 4910 | && REGNO (other_giv->dest_reg) < max_reg_before_loop |
b1f21e0a | 4911 | && (REGNO_LAST_UID (REGNO (other_giv->dest_reg)) |
8516af93 JW |
4912 | == INSN_UID (v->insn)) |
4913 | && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn)) | |
4914 | auto_inc_opt = 1; | |
4915 | } | |
38e01259 | 4916 | /* Check for case where increment is before the address |
72b0c616 RK |
4917 | giv. Do this test in "loop order". */ |
4918 | else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn) | |
4919 | && (INSN_LUID (v->insn) < INSN_LUID (scan_start) | |
4920 | || (INSN_LUID (bl->biv->insn) | |
4921 | > INSN_LUID (scan_start)))) | |
4922 | || (INSN_LUID (v->insn) < INSN_LUID (scan_start) | |
4923 | && (INSN_LUID (scan_start) | |
4924 | < INSN_LUID (bl->biv->insn)))) | |
8516af93 JW |
4925 | auto_inc_opt = -1; |
4926 | else | |
4927 | auto_inc_opt = 1; | |
4928 | ||
bb91b814 | 4929 | #ifdef HAVE_cc0 |
a7a4457e DE |
4930 | { |
4931 | rtx prev; | |
4932 | ||
4933 | /* We can't put an insn immediately after one setting | |
4934 | cc0, or immediately before one using cc0. */ | |
4935 | if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn))) | |
4936 | || (auto_inc_opt == -1 | |
4937 | && (prev = prev_nonnote_insn (v->insn)) != 0 | |
4938 | && GET_RTX_CLASS (GET_CODE (prev)) == 'i' | |
4939 | && sets_cc0_p (PATTERN (prev)))) | |
4940 | auto_inc_opt = 0; | |
4941 | } | |
bb91b814 JW |
4942 | #endif |
4943 | ||
8516af93 JW |
4944 | if (auto_inc_opt) |
4945 | v->auto_inc_opt = 1; | |
4946 | } | |
4947 | #endif | |
4948 | ||
4949 | /* For each place where the biv is incremented, add an insn | |
4950 | to increment the new, reduced reg for the giv. */ | |
b4ad7b23 RS |
4951 | for (tv = bl->biv; tv; tv = tv->next_iv) |
4952 | { | |
8516af93 JW |
4953 | rtx insert_before; |
4954 | ||
4955 | if (! auto_inc_opt) | |
4956 | insert_before = tv->insn; | |
4957 | else if (auto_inc_opt == 1) | |
4958 | insert_before = NEXT_INSN (v->insn); | |
4959 | else | |
4960 | insert_before = v->insn; | |
4961 | ||
b4ad7b23 RS |
4962 | if (tv->mult_val == const1_rtx) |
4963 | emit_iv_add_mult (tv->add_val, v->mult_val, | |
8516af93 | 4964 | v->new_reg, v->new_reg, insert_before); |
b4ad7b23 RS |
4965 | else /* tv->mult_val == const0_rtx */ |
4966 | /* A multiply is acceptable here | |
4967 | since this is presumed to be seldom executed. */ | |
4968 | emit_iv_add_mult (tv->add_val, v->mult_val, | |
8516af93 | 4969 | v->add_val, v->new_reg, insert_before); |
b4ad7b23 RS |
4970 | } |
4971 | ||
4972 | /* Add code at loop start to initialize giv's reduced reg. */ | |
4973 | ||
4974 | emit_iv_add_mult (bl->initial_value, v->mult_val, | |
4975 | v->add_val, v->new_reg, loop_start); | |
4976 | } | |
4977 | } | |
4978 | ||
4979 | /* Rescan all givs. If a giv is the same as a giv not reduced, mark it | |
4980 | as not reduced. | |
4981 | ||
4982 | For each giv register that can be reduced now: if replaceable, | |
4983 | substitute reduced reg wherever the old giv occurs; | |
8c354a41 | 4984 | else add new move insn "giv_reg = reduced_reg". */ |
b4ad7b23 | 4985 | |
b4ad7b23 RS |
4986 | for (v = bl->giv; v; v = v->next_iv) |
4987 | { | |
4988 | if (v->same && v->same->ignore) | |
4989 | v->ignore = 1; | |
4990 | ||
4991 | if (v->ignore) | |
4992 | continue; | |
4993 | ||
b4ad7b23 RS |
4994 | /* Update expression if this was combined, in case other giv was |
4995 | replaced. */ | |
4996 | if (v->same) | |
4997 | v->new_reg = replace_rtx (v->new_reg, | |
4998 | v->same->dest_reg, v->same->new_reg); | |
4999 | ||
5000 | if (v->giv_type == DEST_ADDR) | |
5001 | /* Store reduced reg as the address in the memref where we found | |
5002 | this giv. */ | |
9abdca9c | 5003 | validate_change (v->insn, v->location, v->new_reg, 0); |
b4ad7b23 RS |
5004 | else if (v->replaceable) |
5005 | { | |
5006 | reg_map[REGNO (v->dest_reg)] = v->new_reg; | |
5007 | ||
5008 | #if 0 | |
5009 | /* I can no longer duplicate the original problem. Perhaps | |
5010 | this is unnecessary now? */ | |
5011 | ||
5012 | /* Replaceable; it isn't strictly necessary to delete the old | |
5013 | insn and emit a new one, because v->dest_reg is now dead. | |
5014 | ||
5015 | However, especially when unrolling loops, the special | |
5016 | handling for (set REG0 REG1) in the second cse pass may | |
5017 | make v->dest_reg live again. To avoid this problem, emit | |
5018 | an insn to set the original giv reg from the reduced giv. | |
5019 | We can not delete the original insn, since it may be part | |
5020 | of a LIBCALL, and the code in flow that eliminates dead | |
5021 | libcalls will fail if it is deleted. */ | |
5022 | emit_insn_after (gen_move_insn (v->dest_reg, v->new_reg), | |
5023 | v->insn); | |
5024 | #endif | |
5025 | } | |
5026 | else | |
5027 | { | |
5028 | /* Not replaceable; emit an insn to set the original giv reg from | |
5029 | the reduced giv, same as above. */ | |
5030 | emit_insn_after (gen_move_insn (v->dest_reg, v->new_reg), | |
5031 | v->insn); | |
5032 | } | |
5033 | ||
5034 | /* When a loop is reversed, givs which depend on the reversed | |
5035 | biv, and which are live outside the loop, must be set to their | |
5036 | correct final value. This insn is only needed if the giv is | |
5037 | not replaceable. The correct final value is the same as the | |
5038 | value that the giv starts the reversed loop with. */ | |
5039 | if (bl->reversed && ! v->replaceable) | |
5040 | emit_iv_add_mult (bl->initial_value, v->mult_val, | |
5041 | v->add_val, v->dest_reg, end_insert_before); | |
5042 | else if (v->final_value) | |
5043 | { | |
5044 | rtx insert_before; | |
5045 | ||
5046 | /* If the loop has multiple exits, emit the insn before the | |
5047 | loop to ensure that it will always be executed no matter | |
5048 | how the loop exits. Otherwise, emit the insn after the loop, | |
5049 | since this is slightly more efficient. */ | |
353127c2 | 5050 | if (loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]]) |
b4ad7b23 RS |
5051 | insert_before = loop_start; |
5052 | else | |
5053 | insert_before = end_insert_before; | |
5054 | emit_insn_before (gen_move_insn (v->dest_reg, v->final_value), | |
5055 | insert_before); | |
5056 | ||
5057 | #if 0 | |
5058 | /* If the insn to set the final value of the giv was emitted | |
5059 | before the loop, then we must delete the insn inside the loop | |
5060 | that sets it. If this is a LIBCALL, then we must delete | |
5061 | every insn in the libcall. Note, however, that | |
5062 | final_giv_value will only succeed when there are multiple | |
5063 | exits if the giv is dead at each exit, hence it does not | |
5064 | matter that the original insn remains because it is dead | |
5065 | anyways. */ | |
5066 | /* Delete the insn inside the loop that sets the giv since | |
5067 | the giv is now set before (or after) the loop. */ | |
5068 | delete_insn (v->insn); | |
5069 | #endif | |
5070 | } | |
5071 | ||
5072 | if (loop_dump_stream) | |
5073 | { | |
5074 | fprintf (loop_dump_stream, "giv at %d reduced to ", | |
5075 | INSN_UID (v->insn)); | |
5076 | print_rtl (loop_dump_stream, v->new_reg); | |
5077 | fprintf (loop_dump_stream, "\n"); | |
5078 | } | |
5079 | } | |
5080 | ||
5081 | /* All the givs based on the biv bl have been reduced if they | |
5082 | merit it. */ | |
5083 | ||
5084 | /* For each giv not marked as maybe dead that has been combined with a | |
5085 | second giv, clear any "maybe dead" mark on that second giv. | |
5086 | v->new_reg will either be or refer to the register of the giv it | |
5087 | combined with. | |
5088 | ||
5089 | Doing this clearing avoids problems in biv elimination where a | |
5090 | giv's new_reg is a complex value that can't be put in the insn but | |
5091 | the giv combined with (with a reg as new_reg) is marked maybe_dead. | |
5092 | Since the register will be used in either case, we'd prefer it be | |
5093 | used from the simpler giv. */ | |
5094 | ||
5095 | for (v = bl->giv; v; v = v->next_iv) | |
5096 | if (! v->maybe_dead && v->same) | |
5097 | v->same->maybe_dead = 0; | |
5098 | ||
5099 | /* Try to eliminate the biv, if it is a candidate. | |
5100 | This won't work if ! all_reduced, | |
5101 | since the givs we planned to use might not have been reduced. | |
5102 | ||
d45cf215 | 5103 | We have to be careful that we didn't initially think we could eliminate |
b4ad7b23 RS |
5104 | this biv because of a giv that we now think may be dead and shouldn't |
5105 | be used as a biv replacement. | |
5106 | ||
5107 | Also, there is the possibility that we may have a giv that looks | |
5108 | like it can be used to eliminate a biv, but the resulting insn | |
5109 | isn't valid. This can happen, for example, on the 88k, where a | |
5110 | JUMP_INSN can compare a register only with zero. Attempts to | |
c5b7917e | 5111 | replace it with a compare with a constant will fail. |
b4ad7b23 RS |
5112 | |
5113 | Note that in cases where this call fails, we may have replaced some | |
5114 | of the occurrences of the biv with a giv, but no harm was done in | |
5115 | doing so in the rare cases where it can occur. */ | |
5116 | ||
5117 | if (all_reduced == 1 && bl->eliminable | |
5118 | && maybe_eliminate_biv (bl, loop_start, end, 1, | |
5119 | threshold, insn_count)) | |
5120 | ||
5121 | { | |
5122 | /* ?? If we created a new test to bypass the loop entirely, | |
5123 | or otherwise drop straight in, based on this test, then | |
5124 | we might want to rewrite it also. This way some later | |
5125 | pass has more hope of removing the initialization of this | |
0f41302f | 5126 | biv entirely. */ |
b4ad7b23 RS |
5127 | |
5128 | /* If final_value != 0, then the biv may be used after loop end | |
5129 | and we must emit an insn to set it just in case. | |
5130 | ||
5131 | Reversed bivs already have an insn after the loop setting their | |
5132 | value, so we don't need another one. We can't calculate the | |
0f41302f | 5133 | proper final value for such a biv here anyways. */ |
b4ad7b23 RS |
5134 | if (final_value != 0 && ! bl->reversed) |
5135 | { | |
5136 | rtx insert_before; | |
5137 | ||
5138 | /* If the loop has multiple exits, emit the insn before the | |
5139 | loop to ensure that it will always be executed no matter | |
5140 | how the loop exits. Otherwise, emit the insn after the | |
5141 | loop, since this is slightly more efficient. */ | |
353127c2 | 5142 | if (loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]]) |
b4ad7b23 RS |
5143 | insert_before = loop_start; |
5144 | else | |
5145 | insert_before = end_insert_before; | |
5146 | ||
5147 | emit_insn_before (gen_move_insn (bl->biv->dest_reg, final_value), | |
5148 | end_insert_before); | |
5149 | } | |
5150 | ||
5151 | #if 0 | |
5152 | /* Delete all of the instructions inside the loop which set | |
5153 | the biv, as they are all dead. If is safe to delete them, | |
5154 | because an insn setting a biv will never be part of a libcall. */ | |
5155 | /* However, deleting them will invalidate the regno_last_uid info, | |
5156 | so keeping them around is more convenient. Final_biv_value | |
5157 | will only succeed when there are multiple exits if the biv | |
5158 | is dead at each exit, hence it does not matter that the original | |
5159 | insn remains, because it is dead anyways. */ | |
5160 | for (v = bl->biv; v; v = v->next_iv) | |
5161 | delete_insn (v->insn); | |
5162 | #endif | |
5163 | ||
5164 | if (loop_dump_stream) | |
5165 | fprintf (loop_dump_stream, "Reg %d: biv eliminated\n", | |
5166 | bl->regno); | |
5167 | } | |
5168 | } | |
5169 | ||
5170 | /* Go through all the instructions in the loop, making all the | |
5171 | register substitutions scheduled in REG_MAP. */ | |
5172 | ||
5173 | for (p = loop_start; p != end; p = NEXT_INSN (p)) | |
5174 | if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN | |
5175 | || GET_CODE (p) == CALL_INSN) | |
5176 | { | |
97ec0ad8 R |
5177 | replace_regs (PATTERN (p), reg_map, reg_map_size, 0); |
5178 | replace_regs (REG_NOTES (p), reg_map, reg_map_size, 0); | |
da0c128e | 5179 | INSN_CODE (p) = -1; |
b4ad7b23 RS |
5180 | } |
5181 | ||
73049ebc MT |
5182 | if (loop_info->n_iterations > 0) |
5183 | { | |
5184 | /* When we completely unroll a loop we will likely not need the increment | |
5185 | of the loop BIV and we will not need the conditional branch at the | |
5186 | end of the loop. */ | |
5187 | unrolled_insn_copies = insn_count - 2; | |
5188 | ||
5189 | #ifdef HAVE_cc0 | |
5190 | /* When we completely unroll a loop on a HAVE_cc0 machine we will not | |
5191 | need the comparison before the conditional branch at the end of the | |
5192 | loop. */ | |
80b8e8de | 5193 | unrolled_insn_copies -= 1; |
73049ebc MT |
5194 | #endif |
5195 | ||
5196 | /* We'll need one copy for each loop iteration. */ | |
5197 | unrolled_insn_copies *= loop_info->n_iterations; | |
5198 | ||
5199 | /* A little slop to account for the ability to remove initialization | |
5200 | code, better CSE, and other secondary benefits of completely | |
5201 | unrolling some loops. */ | |
5202 | unrolled_insn_copies -= 1; | |
5203 | ||
5204 | /* Clamp the value. */ | |
5205 | if (unrolled_insn_copies < 0) | |
5206 | unrolled_insn_copies = 0; | |
5207 | } | |
5208 | ||
b4ad7b23 RS |
5209 | /* Unroll loops from within strength reduction so that we can use the |
5210 | induction variable information that strength_reduce has already | |
73049ebc MT |
5211 | collected. Always unroll loops that would be as small or smaller |
5212 | unrolled than when rolled. */ | |
5213 | if (unroll_p | |
5214 | || (loop_info->n_iterations > 0 | |
5215 | && unrolled_insn_copies <= insn_count)) | |
302670f3 MH |
5216 | unroll_loop (loop_end, insn_count, loop_start, end_insert_before, |
5217 | loop_info, 1); | |
b4ad7b23 | 5218 | |
8c660648 | 5219 | #ifdef HAVE_decrement_and_branch_on_count |
cac8ce95 DE |
5220 | /* Instrument the loop with BCT insn. */ |
5221 | if (HAVE_decrement_and_branch_on_count && bct_p | |
5222 | && flag_branch_on_count_reg) | |
302670f3 | 5223 | insert_bct (loop_start, loop_end, loop_info); |
cac8ce95 | 5224 | #endif /* HAVE_decrement_and_branch_on_count */ |
8c660648 | 5225 | |
b4ad7b23 RS |
5226 | if (loop_dump_stream) |
5227 | fprintf (loop_dump_stream, "\n"); | |
3ec2b590 R |
5228 | VARRAY_FREE (reg_iv_type); |
5229 | VARRAY_FREE (reg_iv_info); | |
b4ad7b23 RS |
5230 | } |
5231 | \f | |
5232 | /* Return 1 if X is a valid source for an initial value (or as value being | |
5233 | compared against in an initial test). | |
5234 | ||
5235 | X must be either a register or constant and must not be clobbered between | |
5236 | the current insn and the start of the loop. | |
5237 | ||
5238 | INSN is the insn containing X. */ | |
5239 | ||
5240 | static int | |
5241 | valid_initial_value_p (x, insn, call_seen, loop_start) | |
5242 | rtx x; | |
5243 | rtx insn; | |
5244 | int call_seen; | |
5245 | rtx loop_start; | |
5246 | { | |
5247 | if (CONSTANT_P (x)) | |
5248 | return 1; | |
5249 | ||
d45cf215 | 5250 | /* Only consider pseudos we know about initialized in insns whose luids |
b4ad7b23 RS |
5251 | we know. */ |
5252 | if (GET_CODE (x) != REG | |
5253 | || REGNO (x) >= max_reg_before_loop) | |
5254 | return 0; | |
5255 | ||
5256 | /* Don't use call-clobbered registers across a call which clobbers it. On | |
5257 | some machines, don't use any hard registers at all. */ | |
5258 | if (REGNO (x) < FIRST_PSEUDO_REGISTER | |
e9a25f70 JL |
5259 | && (SMALL_REGISTER_CLASSES |
5260 | || (call_used_regs[REGNO (x)] && call_seen))) | |
b4ad7b23 RS |
5261 | return 0; |
5262 | ||
5263 | /* Don't use registers that have been clobbered before the start of the | |
5264 | loop. */ | |
5265 | if (reg_set_between_p (x, insn, loop_start)) | |
5266 | return 0; | |
5267 | ||
5268 | return 1; | |
5269 | } | |
5270 | \f | |
5271 | /* Scan X for memory refs and check each memory address | |
5272 | as a possible giv. INSN is the insn whose pattern X comes from. | |
5273 | NOT_EVERY_ITERATION is 1 if the insn might not be executed during | |
c5c76735 JL |
5274 | every loop iteration. MAYBE_MULTIPLE is 1 if the insn might be executed |
5275 | more thanonce in each loop iteration. */ | |
b4ad7b23 RS |
5276 | |
5277 | static void | |
c5c76735 JL |
5278 | find_mem_givs (x, insn, not_every_iteration, maybe_multiple, loop_start, |
5279 | loop_end) | |
b4ad7b23 RS |
5280 | rtx x; |
5281 | rtx insn; | |
c5c76735 | 5282 | int not_every_iteration, maybe_multiple; |
b4ad7b23 RS |
5283 | rtx loop_start, loop_end; |
5284 | { | |
5285 | register int i, j; | |
5286 | register enum rtx_code code; | |
6f7d635c | 5287 | register const char *fmt; |
b4ad7b23 RS |
5288 | |
5289 | if (x == 0) | |
5290 | return; | |
5291 | ||
5292 | code = GET_CODE (x); | |
5293 | switch (code) | |
5294 | { | |
5295 | case REG: | |
5296 | case CONST_INT: | |
5297 | case CONST: | |
5298 | case CONST_DOUBLE: | |
5299 | case SYMBOL_REF: | |
5300 | case LABEL_REF: | |
5301 | case PC: | |
5302 | case CC0: | |
5303 | case ADDR_VEC: | |
5304 | case ADDR_DIFF_VEC: | |
5305 | case USE: | |
5306 | case CLOBBER: | |
5307 | return; | |
5308 | ||
5309 | case MEM: | |
5310 | { | |
5311 | rtx src_reg; | |
5312 | rtx add_val; | |
5313 | rtx mult_val; | |
5314 | int benefit; | |
5315 | ||
45f97e2e RH |
5316 | /* This code used to disable creating GIVs with mult_val == 1 and |
5317 | add_val == 0. However, this leads to lost optimizations when | |
5318 | it comes time to combine a set of related DEST_ADDR GIVs, since | |
5319 | this one would not be seen. */ | |
b4ad7b23 | 5320 | |
45f97e2e RH |
5321 | if (general_induction_var (XEXP (x, 0), &src_reg, &add_val, |
5322 | &mult_val, 1, &benefit)) | |
b4ad7b23 RS |
5323 | { |
5324 | /* Found one; record it. */ | |
5325 | struct induction *v | |
5326 | = (struct induction *) oballoc (sizeof (struct induction)); | |
5327 | ||
5328 | record_giv (v, insn, src_reg, addr_placeholder, mult_val, | |
5329 | add_val, benefit, DEST_ADDR, not_every_iteration, | |
c5c76735 | 5330 | maybe_multiple, &XEXP (x, 0), loop_start, loop_end); |
b4ad7b23 RS |
5331 | |
5332 | v->mem_mode = GET_MODE (x); | |
5333 | } | |
b4ad7b23 | 5334 | } |
e9a25f70 JL |
5335 | return; |
5336 | ||
5337 | default: | |
5338 | break; | |
b4ad7b23 RS |
5339 | } |
5340 | ||
5341 | /* Recursively scan the subexpressions for other mem refs. */ | |
5342 | ||
5343 | fmt = GET_RTX_FORMAT (code); | |
5344 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) | |
5345 | if (fmt[i] == 'e') | |
c5c76735 JL |
5346 | find_mem_givs (XEXP (x, i), insn, not_every_iteration, maybe_multiple, |
5347 | loop_start, loop_end); | |
b4ad7b23 RS |
5348 | else if (fmt[i] == 'E') |
5349 | for (j = 0; j < XVECLEN (x, i); j++) | |
5350 | find_mem_givs (XVECEXP (x, i, j), insn, not_every_iteration, | |
c5c76735 | 5351 | maybe_multiple, loop_start, loop_end); |
b4ad7b23 RS |
5352 | } |
5353 | \f | |
5354 | /* Fill in the data about one biv update. | |
5355 | V is the `struct induction' in which we record the biv. (It is | |
5356 | allocated by the caller, with alloca.) | |
5357 | INSN is the insn that sets it. | |
5358 | DEST_REG is the biv's reg. | |
5359 | ||
5360 | MULT_VAL is const1_rtx if the biv is being incremented here, in which case | |
5361 | INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is | |
7dcd3836 RK |
5362 | being set to INC_VAL. |
5363 | ||
5364 | NOT_EVERY_ITERATION is nonzero if this biv update is not know to be | |
5365 | executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update | |
5366 | can be executed more than once per iteration. If MAYBE_MULTIPLE | |
5367 | and NOT_EVERY_ITERATION are both zero, we know that the biv update is | |
5368 | executed exactly once per iteration. */ | |
b4ad7b23 RS |
5369 | |
5370 | static void | |
3ec2b590 | 5371 | record_biv (v, insn, dest_reg, inc_val, mult_val, location, |
7dcd3836 | 5372 | not_every_iteration, maybe_multiple) |
b4ad7b23 RS |
5373 | struct induction *v; |
5374 | rtx insn; | |
5375 | rtx dest_reg; | |
5376 | rtx inc_val; | |
5377 | rtx mult_val; | |
3ec2b590 | 5378 | rtx *location; |
b4ad7b23 | 5379 | int not_every_iteration; |
7dcd3836 | 5380 | int maybe_multiple; |
b4ad7b23 RS |
5381 | { |
5382 | struct iv_class *bl; | |
5383 | ||
5384 | v->insn = insn; | |
5385 | v->src_reg = dest_reg; | |
5386 | v->dest_reg = dest_reg; | |
5387 | v->mult_val = mult_val; | |
5388 | v->add_val = inc_val; | |
3ec2b590 | 5389 | v->location = location; |
b4ad7b23 RS |
5390 | v->mode = GET_MODE (dest_reg); |
5391 | v->always_computable = ! not_every_iteration; | |
8516af93 | 5392 | v->always_executed = ! not_every_iteration; |
7dcd3836 | 5393 | v->maybe_multiple = maybe_multiple; |
b4ad7b23 RS |
5394 | |
5395 | /* Add this to the reg's iv_class, creating a class | |
5396 | if this is the first incrementation of the reg. */ | |
5397 | ||
5398 | bl = reg_biv_class[REGNO (dest_reg)]; | |
5399 | if (bl == 0) | |
5400 | { | |
5401 | /* Create and initialize new iv_class. */ | |
5402 | ||
5403 | bl = (struct iv_class *) oballoc (sizeof (struct iv_class)); | |
5404 | ||
5405 | bl->regno = REGNO (dest_reg); | |
5406 | bl->biv = 0; | |
5407 | bl->giv = 0; | |
5408 | bl->biv_count = 0; | |
5409 | bl->giv_count = 0; | |
5410 | ||
5411 | /* Set initial value to the reg itself. */ | |
5412 | bl->initial_value = dest_reg; | |
c5b7917e | 5413 | /* We haven't seen the initializing insn yet */ |
b4ad7b23 RS |
5414 | bl->init_insn = 0; |
5415 | bl->init_set = 0; | |
5416 | bl->initial_test = 0; | |
5417 | bl->incremented = 0; | |
5418 | bl->eliminable = 0; | |
5419 | bl->nonneg = 0; | |
5420 | bl->reversed = 0; | |
b5d27be7 | 5421 | bl->total_benefit = 0; |
b4ad7b23 RS |
5422 | |
5423 | /* Add this class to loop_iv_list. */ | |
5424 | bl->next = loop_iv_list; | |
5425 | loop_iv_list = bl; | |
5426 | ||
5427 | /* Put it in the array of biv register classes. */ | |
5428 | reg_biv_class[REGNO (dest_reg)] = bl; | |
5429 | } | |
5430 | ||
5431 | /* Update IV_CLASS entry for this biv. */ | |
5432 | v->next_iv = bl->biv; | |
5433 | bl->biv = v; | |
5434 | bl->biv_count++; | |
5435 | if (mult_val == const1_rtx) | |
5436 | bl->incremented = 1; | |
5437 | ||
5438 | if (loop_dump_stream) | |
5439 | { | |
5440 | fprintf (loop_dump_stream, | |
5441 | "Insn %d: possible biv, reg %d,", | |
5442 | INSN_UID (insn), REGNO (dest_reg)); | |
5443 | if (GET_CODE (inc_val) == CONST_INT) | |
9ba7a303 JC |
5444 | { |
5445 | fprintf (loop_dump_stream, " const ="); | |
5446 | fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (inc_val)); | |
5447 | fputc ('\n', loop_dump_stream); | |
5448 | } | |
b4ad7b23 RS |
5449 | else |
5450 | { | |
5451 | fprintf (loop_dump_stream, " const = "); | |
5452 | print_rtl (loop_dump_stream, inc_val); | |
5453 | fprintf (loop_dump_stream, "\n"); | |
5454 | } | |
5455 | } | |
5456 | } | |
5457 | \f | |
5458 | /* Fill in the data about one giv. | |
5459 | V is the `struct induction' in which we record the giv. (It is | |
5460 | allocated by the caller, with alloca.) | |
5461 | INSN is the insn that sets it. | |
5462 | BENEFIT estimates the savings from deleting this insn. | |
5463 | TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed | |
5464 | into a register or is used as a memory address. | |
5465 | ||
5466 | SRC_REG is the biv reg which the giv is computed from. | |
5467 | DEST_REG is the giv's reg (if the giv is stored in a reg). | |
5468 | MULT_VAL and ADD_VAL are the coefficients used to compute the giv. | |
5469 | LOCATION points to the place where this giv's value appears in INSN. */ | |
5470 | ||
5471 | static void | |
5472 | record_giv (v, insn, src_reg, dest_reg, mult_val, add_val, benefit, | |
c5c76735 JL |
5473 | type, not_every_iteration, maybe_multiple, location, loop_start, |
5474 | loop_end) | |
b4ad7b23 RS |
5475 | struct induction *v; |
5476 | rtx insn; | |
5477 | rtx src_reg; | |
5478 | rtx dest_reg; | |
5479 | rtx mult_val, add_val; | |
5480 | int benefit; | |
5481 | enum g_types type; | |
c5c76735 | 5482 | int not_every_iteration, maybe_multiple; |
b4ad7b23 RS |
5483 | rtx *location; |
5484 | rtx loop_start, loop_end; | |
5485 | { | |
5486 | struct induction *b; | |
5487 | struct iv_class *bl; | |
5488 | rtx set = single_set (insn); | |
b4ad7b23 RS |
5489 | |
5490 | v->insn = insn; | |
5491 | v->src_reg = src_reg; | |
5492 | v->giv_type = type; | |
5493 | v->dest_reg = dest_reg; | |
5494 | v->mult_val = mult_val; | |
5495 | v->add_val = add_val; | |
5496 | v->benefit = benefit; | |
5497 | v->location = location; | |
5498 | v->cant_derive = 0; | |
5499 | v->combined_with = 0; | |
c5c76735 | 5500 | v->maybe_multiple = maybe_multiple; |
b4ad7b23 RS |
5501 | v->maybe_dead = 0; |
5502 | v->derive_adjustment = 0; | |
5503 | v->same = 0; | |
5504 | v->ignore = 0; | |
5505 | v->new_reg = 0; | |
5506 | v->final_value = 0; | |
f415f7be | 5507 | v->same_insn = 0; |
8516af93 | 5508 | v->auto_inc_opt = 0; |
9ae8ffe7 JL |
5509 | v->unrolled = 0; |
5510 | v->shared = 0; | |
4d87f7a7 | 5511 | v->derived_from = 0; |
3ec2b590 | 5512 | v->last_use = 0; |
b4ad7b23 RS |
5513 | |
5514 | /* The v->always_computable field is used in update_giv_derive, to | |
5515 | determine whether a giv can be used to derive another giv. For a | |
5516 | DEST_REG giv, INSN computes a new value for the giv, so its value | |
5517 | isn't computable if INSN insn't executed every iteration. | |
5518 | However, for a DEST_ADDR giv, INSN merely uses the value of the giv; | |
5519 | it does not compute a new value. Hence the value is always computable | |
d45cf215 | 5520 | regardless of whether INSN is executed each iteration. */ |
b4ad7b23 RS |
5521 | |
5522 | if (type == DEST_ADDR) | |
5523 | v->always_computable = 1; | |
5524 | else | |
5525 | v->always_computable = ! not_every_iteration; | |
5526 | ||
8516af93 JW |
5527 | v->always_executed = ! not_every_iteration; |
5528 | ||
b4ad7b23 RS |
5529 | if (type == DEST_ADDR) |
5530 | { | |
5531 | v->mode = GET_MODE (*location); | |
5532 | v->lifetime = 1; | |
b4ad7b23 RS |
5533 | } |
5534 | else /* type == DEST_REG */ | |
5535 | { | |
5536 | v->mode = GET_MODE (SET_DEST (set)); | |
5537 | ||
b1f21e0a MM |
5538 | v->lifetime = (uid_luid[REGNO_LAST_UID (REGNO (dest_reg))] |
5539 | - uid_luid[REGNO_FIRST_UID (REGNO (dest_reg))]); | |
b4ad7b23 | 5540 | |
b4ad7b23 RS |
5541 | /* If the lifetime is zero, it means that this register is |
5542 | really a dead store. So mark this as a giv that can be | |
0f41302f | 5543 | ignored. This will not prevent the biv from being eliminated. */ |
b4ad7b23 RS |
5544 | if (v->lifetime == 0) |
5545 | v->ignore = 1; | |
5546 | ||
3ec2b590 R |
5547 | REG_IV_TYPE (REGNO (dest_reg)) = GENERAL_INDUCT; |
5548 | REG_IV_INFO (REGNO (dest_reg)) = v; | |
b4ad7b23 RS |
5549 | } |
5550 | ||
5551 | /* Add the giv to the class of givs computed from one biv. */ | |
5552 | ||
5553 | bl = reg_biv_class[REGNO (src_reg)]; | |
5554 | if (bl) | |
5555 | { | |
5556 | v->next_iv = bl->giv; | |
5557 | bl->giv = v; | |
5558 | /* Don't count DEST_ADDR. This is supposed to count the number of | |
5559 | insns that calculate givs. */ | |
5560 | if (type == DEST_REG) | |
5561 | bl->giv_count++; | |
5562 | bl->total_benefit += benefit; | |
5563 | } | |
5564 | else | |
5565 | /* Fatal error, biv missing for this giv? */ | |
5566 | abort (); | |
5567 | ||
5568 | if (type == DEST_ADDR) | |
5569 | v->replaceable = 1; | |
5570 | else | |
5571 | { | |
5572 | /* The giv can be replaced outright by the reduced register only if all | |
5573 | of the following conditions are true: | |
5574 | - the insn that sets the giv is always executed on any iteration | |
5575 | on which the giv is used at all | |
5576 | (there are two ways to deduce this: | |
5577 | either the insn is executed on every iteration, | |
5578 | or all uses follow that insn in the same basic block), | |
5579 | - the giv is not used outside the loop | |
5580 | - no assignments to the biv occur during the giv's lifetime. */ | |
5581 | ||
b1f21e0a | 5582 | if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn) |
b4ad7b23 | 5583 | /* Previous line always fails if INSN was moved by loop opt. */ |
b1f21e0a | 5584 | && uid_luid[REGNO_LAST_UID (REGNO (dest_reg))] < INSN_LUID (loop_end) |
b4ad7b23 RS |
5585 | && (! not_every_iteration |
5586 | || last_use_this_basic_block (dest_reg, insn))) | |
5587 | { | |
5588 | /* Now check that there are no assignments to the biv within the | |
5589 | giv's lifetime. This requires two separate checks. */ | |
5590 | ||
5591 | /* Check each biv update, and fail if any are between the first | |
5592 | and last use of the giv. | |
5593 | ||
5594 | If this loop contains an inner loop that was unrolled, then | |
5595 | the insn modifying the biv may have been emitted by the loop | |
5596 | unrolling code, and hence does not have a valid luid. Just | |
5597 | mark the biv as not replaceable in this case. It is not very | |
5598 | useful as a biv, because it is used in two different loops. | |
5599 | It is very unlikely that we would be able to optimize the giv | |
5600 | using this biv anyways. */ | |
5601 | ||
5602 | v->replaceable = 1; | |
5603 | for (b = bl->biv; b; b = b->next_iv) | |
5604 | { | |
5605 | if (INSN_UID (b->insn) >= max_uid_for_loop | |
5606 | || ((uid_luid[INSN_UID (b->insn)] | |
b1f21e0a | 5607 | >= uid_luid[REGNO_FIRST_UID (REGNO (dest_reg))]) |
b4ad7b23 | 5608 | && (uid_luid[INSN_UID (b->insn)] |
b1f21e0a | 5609 | <= uid_luid[REGNO_LAST_UID (REGNO (dest_reg))]))) |
b4ad7b23 RS |
5610 | { |
5611 | v->replaceable = 0; | |
5612 | v->not_replaceable = 1; | |
5613 | break; | |
5614 | } | |
5615 | } | |
5616 | ||
5031afa7 JW |
5617 | /* If there are any backwards branches that go from after the |
5618 | biv update to before it, then this giv is not replaceable. */ | |
b4ad7b23 | 5619 | if (v->replaceable) |
5031afa7 JW |
5620 | for (b = bl->biv; b; b = b->next_iv) |
5621 | if (back_branch_in_range_p (b->insn, loop_start, loop_end)) | |
5622 | { | |
5623 | v->replaceable = 0; | |
5624 | v->not_replaceable = 1; | |
5625 | break; | |
5626 | } | |
b4ad7b23 RS |
5627 | } |
5628 | else | |
5629 | { | |
5630 | /* May still be replaceable, we don't have enough info here to | |
5631 | decide. */ | |
5632 | v->replaceable = 0; | |
5633 | v->not_replaceable = 0; | |
5634 | } | |
5635 | } | |
5636 | ||
45f97e2e RH |
5637 | /* Record whether the add_val contains a const_int, for later use by |
5638 | combine_givs. */ | |
5639 | { | |
5640 | rtx tem = add_val; | |
5641 | ||
5642 | v->no_const_addval = 1; | |
5643 | if (tem == const0_rtx) | |
5644 | ; | |
5645 | else if (GET_CODE (tem) == CONST_INT) | |
5646 | v->no_const_addval = 0; | |
5647 | else if (GET_CODE (tem) == PLUS) | |
5648 | { | |
5649 | while (1) | |
5650 | { | |
5651 | if (GET_CODE (XEXP (tem, 0)) == PLUS) | |
5652 | tem = XEXP (tem, 0); | |
5653 | else if (GET_CODE (XEXP (tem, 1)) == PLUS) | |
5654 | tem = XEXP (tem, 1); | |
5655 | else | |
5656 | break; | |
5657 | } | |
5658 | if (GET_CODE (XEXP (tem, 1)) == CONST_INT) | |
5659 | v->no_const_addval = 0; | |
5660 | } | |
5661 | } | |
5662 | ||
b4ad7b23 RS |
5663 | if (loop_dump_stream) |
5664 | { | |
5665 | if (type == DEST_REG) | |
5666 | fprintf (loop_dump_stream, "Insn %d: giv reg %d", | |
5667 | INSN_UID (insn), REGNO (dest_reg)); | |
5668 | else | |
5669 | fprintf (loop_dump_stream, "Insn %d: dest address", | |
5670 | INSN_UID (insn)); | |
5671 | ||
5672 | fprintf (loop_dump_stream, " src reg %d benefit %d", | |
5673 | REGNO (src_reg), v->benefit); | |
4b259e3f R |
5674 | fprintf (loop_dump_stream, " lifetime %d", |
5675 | v->lifetime); | |
b4ad7b23 RS |
5676 | |
5677 | if (v->replaceable) | |
5678 | fprintf (loop_dump_stream, " replaceable"); | |
5679 | ||
45f97e2e RH |
5680 | if (v->no_const_addval) |
5681 | fprintf (loop_dump_stream, " ncav"); | |
5682 | ||
b4ad7b23 | 5683 | if (GET_CODE (mult_val) == CONST_INT) |
9ba7a303 JC |
5684 | { |
5685 | fprintf (loop_dump_stream, " mult "); | |
5686 | fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (mult_val)); | |
5687 | } | |
b4ad7b23 RS |
5688 | else |
5689 | { | |
5690 | fprintf (loop_dump_stream, " mult "); | |
5691 | print_rtl (loop_dump_stream, mult_val); | |
5692 | } | |
5693 | ||
5694 | if (GET_CODE (add_val) == CONST_INT) | |
9ba7a303 JC |
5695 | { |
5696 | fprintf (loop_dump_stream, " add "); | |
5697 | fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (add_val)); | |
5698 | } | |
b4ad7b23 RS |
5699 | else |
5700 | { | |
5701 | fprintf (loop_dump_stream, " add "); | |
5702 | print_rtl (loop_dump_stream, add_val); | |
5703 | } | |
5704 | } | |
5705 | ||
5706 | if (loop_dump_stream) | |
5707 | fprintf (loop_dump_stream, "\n"); | |
5708 | ||
5709 | } | |
5710 | ||
5711 | ||
5712 | /* All this does is determine whether a giv can be made replaceable because | |
5713 | its final value can be calculated. This code can not be part of record_giv | |
5714 | above, because final_giv_value requires that the number of loop iterations | |
5715 | be known, and that can not be accurately calculated until after all givs | |
5716 | have been identified. */ | |
5717 | ||
5718 | static void | |
302670f3 | 5719 | check_final_value (v, loop_start, loop_end, n_iterations) |
b4ad7b23 RS |
5720 | struct induction *v; |
5721 | rtx loop_start, loop_end; | |
302670f3 | 5722 | unsigned HOST_WIDE_INT n_iterations; |
b4ad7b23 RS |
5723 | { |
5724 | struct iv_class *bl; | |
5725 | rtx final_value = 0; | |
b4ad7b23 RS |
5726 | |
5727 | bl = reg_biv_class[REGNO (v->src_reg)]; | |
5728 | ||
5729 | /* DEST_ADDR givs will never reach here, because they are always marked | |
5730 | replaceable above in record_giv. */ | |
5731 | ||
5732 | /* The giv can be replaced outright by the reduced register only if all | |
5733 | of the following conditions are true: | |
5734 | - the insn that sets the giv is always executed on any iteration | |
5735 | on which the giv is used at all | |
5736 | (there are two ways to deduce this: | |
5737 | either the insn is executed on every iteration, | |
5738 | or all uses follow that insn in the same basic block), | |
5739 | - its final value can be calculated (this condition is different | |
5740 | than the one above in record_giv) | |
5741 | - no assignments to the biv occur during the giv's lifetime. */ | |
5742 | ||
5743 | #if 0 | |
5744 | /* This is only called now when replaceable is known to be false. */ | |
5745 | /* Clear replaceable, so that it won't confuse final_giv_value. */ | |
5746 | v->replaceable = 0; | |
5747 | #endif | |
5748 | ||
302670f3 | 5749 | if ((final_value = final_giv_value (v, loop_start, loop_end, n_iterations)) |
b4ad7b23 RS |
5750 | && (v->always_computable || last_use_this_basic_block (v->dest_reg, v->insn))) |
5751 | { | |
5752 | int biv_increment_seen = 0; | |
5753 | rtx p = v->insn; | |
5754 | rtx last_giv_use; | |
5755 | ||
5756 | v->replaceable = 1; | |
5757 | ||
5758 | /* When trying to determine whether or not a biv increment occurs | |
5759 | during the lifetime of the giv, we can ignore uses of the variable | |
5760 | outside the loop because final_value is true. Hence we can not | |
5761 | use regno_last_uid and regno_first_uid as above in record_giv. */ | |
5762 | ||
5763 | /* Search the loop to determine whether any assignments to the | |
5764 | biv occur during the giv's lifetime. Start with the insn | |
5765 | that sets the giv, and search around the loop until we come | |
5766 | back to that insn again. | |
5767 | ||
5768 | Also fail if there is a jump within the giv's lifetime that jumps | |
5769 | to somewhere outside the lifetime but still within the loop. This | |
5770 | catches spaghetti code where the execution order is not linear, and | |
5771 | hence the above test fails. Here we assume that the giv lifetime | |
5772 | does not extend from one iteration of the loop to the next, so as | |
5773 | to make the test easier. Since the lifetime isn't known yet, | |
5774 | this requires two loops. See also record_giv above. */ | |
5775 | ||
5776 | last_giv_use = v->insn; | |
5777 | ||
5778 | while (1) | |
5779 | { | |
5780 | p = NEXT_INSN (p); | |
5781 | if (p == loop_end) | |
5782 | p = NEXT_INSN (loop_start); | |
5783 | if (p == v->insn) | |
5784 | break; | |
5785 | ||
5786 | if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN | |
5787 | || GET_CODE (p) == CALL_INSN) | |
5788 | { | |
5789 | if (biv_increment_seen) | |
5790 | { | |
5791 | if (reg_mentioned_p (v->dest_reg, PATTERN (p))) | |
5792 | { | |
5793 | v->replaceable = 0; | |
5794 | v->not_replaceable = 1; | |
5795 | break; | |
5796 | } | |
5797 | } | |
c5da853f | 5798 | else if (reg_set_p (v->src_reg, PATTERN (p))) |
b4ad7b23 RS |
5799 | biv_increment_seen = 1; |
5800 | else if (reg_mentioned_p (v->dest_reg, PATTERN (p))) | |
5801 | last_giv_use = p; | |
5802 | } | |
5803 | } | |
5804 | ||
5805 | /* Now that the lifetime of the giv is known, check for branches | |
5806 | from within the lifetime to outside the lifetime if it is still | |
5807 | replaceable. */ | |
5808 | ||
5809 | if (v->replaceable) | |
5810 | { | |
5811 | p = v->insn; | |
5812 | while (1) | |
5813 | { | |
5814 | p = NEXT_INSN (p); | |
5815 | if (p == loop_end) | |
5816 | p = NEXT_INSN (loop_start); | |
5817 | if (p == last_giv_use) | |
5818 | break; | |
5819 | ||
5820 | if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) | |
5821 | && LABEL_NAME (JUMP_LABEL (p)) | |
1cb1fe66 R |
5822 | && ((loop_insn_first_p (JUMP_LABEL (p), v->insn) |
5823 | && loop_insn_first_p (loop_start, JUMP_LABEL (p))) | |
5824 | || (loop_insn_first_p (last_giv_use, JUMP_LABEL (p)) | |
5825 | && loop_insn_first_p (JUMP_LABEL (p), loop_end)))) | |
b4ad7b23 RS |
5826 | { |
5827 | v->replaceable = 0; | |
5828 | v->not_replaceable = 1; | |
5829 | ||
5830 | if (loop_dump_stream) | |
5831 | fprintf (loop_dump_stream, | |
5832 | "Found branch outside giv lifetime.\n"); | |
5833 | ||
5834 | break; | |
5835 | } | |
5836 | } | |
5837 | } | |
5838 | ||
5839 | /* If it is replaceable, then save the final value. */ | |
5840 | if (v->replaceable) | |
5841 | v->final_value = final_value; | |
5842 | } | |
5843 | ||
5844 | if (loop_dump_stream && v->replaceable) | |
5845 | fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n", | |
5846 | INSN_UID (v->insn), REGNO (v->dest_reg)); | |
5847 | } | |
5848 | \f | |
5849 | /* Update the status of whether a giv can derive other givs. | |
5850 | ||
5851 | We need to do something special if there is or may be an update to the biv | |
5852 | between the time the giv is defined and the time it is used to derive | |
5853 | another giv. | |
5854 | ||
5855 | In addition, a giv that is only conditionally set is not allowed to | |
5856 | derive another giv once a label has been passed. | |
5857 | ||
5858 | The cases we look at are when a label or an update to a biv is passed. */ | |
5859 | ||
5860 | static void | |
5861 | update_giv_derive (p) | |
5862 | rtx p; | |
5863 | { | |
5864 | struct iv_class *bl; | |
5865 | struct induction *biv, *giv; | |
5866 | rtx tem; | |
5867 | int dummy; | |
5868 | ||
5869 | /* Search all IV classes, then all bivs, and finally all givs. | |
5870 | ||
7dcd3836 | 5871 | There are three cases we are concerned with. First we have the situation |
b4ad7b23 RS |
5872 | of a giv that is only updated conditionally. In that case, it may not |
5873 | derive any givs after a label is passed. | |
5874 | ||
5875 | The second case is when a biv update occurs, or may occur, after the | |
5876 | definition of a giv. For certain biv updates (see below) that are | |
5877 | known to occur between the giv definition and use, we can adjust the | |
5878 | giv definition. For others, or when the biv update is conditional, | |
5879 | we must prevent the giv from deriving any other givs. There are two | |
5880 | sub-cases within this case. | |
5881 | ||
5882 | If this is a label, we are concerned with any biv update that is done | |
5883 | conditionally, since it may be done after the giv is defined followed by | |
5884 | a branch here (actually, we need to pass both a jump and a label, but | |
5885 | this extra tracking doesn't seem worth it). | |
5886 | ||
7dcd3836 RK |
5887 | If this is a jump, we are concerned about any biv update that may be |
5888 | executed multiple times. We are actually only concerned about | |
5889 | backward jumps, but it is probably not worth performing the test | |
5890 | on the jump again here. | |
5891 | ||
5892 | If this is a biv update, we must adjust the giv status to show that a | |
b4ad7b23 RS |
5893 | subsequent biv update was performed. If this adjustment cannot be done, |
5894 | the giv cannot derive further givs. */ | |
5895 | ||
5896 | for (bl = loop_iv_list; bl; bl = bl->next) | |
5897 | for (biv = bl->biv; biv; biv = biv->next_iv) | |
7dcd3836 RK |
5898 | if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN |
5899 | || biv->insn == p) | |
b4ad7b23 RS |
5900 | { |
5901 | for (giv = bl->giv; giv; giv = giv->next_iv) | |
5902 | { | |
5903 | /* If cant_derive is already true, there is no point in | |
5904 | checking all of these conditions again. */ | |
5905 | if (giv->cant_derive) | |
5906 | continue; | |
5907 | ||
5908 | /* If this giv is conditionally set and we have passed a label, | |
5909 | it cannot derive anything. */ | |
5910 | if (GET_CODE (p) == CODE_LABEL && ! giv->always_computable) | |
5911 | giv->cant_derive = 1; | |
5912 | ||
5913 | /* Skip givs that have mult_val == 0, since | |
5914 | they are really invariants. Also skip those that are | |
5915 | replaceable, since we know their lifetime doesn't contain | |
5916 | any biv update. */ | |
5917 | else if (giv->mult_val == const0_rtx || giv->replaceable) | |
5918 | continue; | |
5919 | ||
5920 | /* The only way we can allow this giv to derive another | |
5921 | is if this is a biv increment and we can form the product | |
5922 | of biv->add_val and giv->mult_val. In this case, we will | |
5923 | be able to compute a compensation. */ | |
5924 | else if (biv->insn == p) | |
5925 | { | |
c160c628 RK |
5926 | tem = 0; |
5927 | ||
5928 | if (biv->mult_val == const1_rtx) | |
38a448ca RH |
5929 | tem = simplify_giv_expr (gen_rtx_MULT (giv->mode, |
5930 | biv->add_val, | |
5931 | giv->mult_val), | |
c160c628 RK |
5932 | &dummy); |
5933 | ||
5934 | if (tem && giv->derive_adjustment) | |
c5c76735 JL |
5935 | tem = simplify_giv_expr |
5936 | (gen_rtx_PLUS (giv->mode, tem, giv->derive_adjustment), | |
5937 | &dummy); | |
5938 | ||
c160c628 | 5939 | if (tem) |
b4ad7b23 RS |
5940 | giv->derive_adjustment = tem; |
5941 | else | |
5942 | giv->cant_derive = 1; | |
5943 | } | |
7dcd3836 RK |
5944 | else if ((GET_CODE (p) == CODE_LABEL && ! biv->always_computable) |
5945 | || (GET_CODE (p) == JUMP_INSN && biv->maybe_multiple)) | |
b4ad7b23 RS |
5946 | giv->cant_derive = 1; |
5947 | } | |
5948 | } | |
5949 | } | |
5950 | \f | |
5951 | /* Check whether an insn is an increment legitimate for a basic induction var. | |
7056f7e8 RS |
5952 | X is the source of insn P, or a part of it. |
5953 | MODE is the mode in which X should be interpreted. | |
5954 | ||
b4ad7b23 RS |
5955 | DEST_REG is the putative biv, also the destination of the insn. |
5956 | We accept patterns of these forms: | |
09d7f5a5 | 5957 | REG = REG + INVARIANT (includes REG = REG - CONSTANT) |
b4ad7b23 | 5958 | REG = INVARIANT + REG |
b4ad7b23 RS |
5959 | |
5960 | If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX, | |
3ec2b590 R |
5961 | store the additive term into *INC_VAL, and store the place where |
5962 | we found the additive term into *LOCATION. | |
b4ad7b23 RS |
5963 | |
5964 | If X is an assignment of an invariant into DEST_REG, we set | |
5965 | *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL. | |
5966 | ||
09d7f5a5 RK |
5967 | We also want to detect a BIV when it corresponds to a variable |
5968 | whose mode was promoted via PROMOTED_MODE. In that case, an increment | |
5969 | of the variable may be a PLUS that adds a SUBREG of that variable to | |
5970 | an invariant and then sign- or zero-extends the result of the PLUS | |
5971 | into the variable. | |
5972 | ||
5973 | Most GIVs in such cases will be in the promoted mode, since that is the | |
5974 | probably the natural computation mode (and almost certainly the mode | |
5975 | used for addresses) on the machine. So we view the pseudo-reg containing | |
5976 | the variable as the BIV, as if it were simply incremented. | |
5977 | ||
5978 | Note that treating the entire pseudo as a BIV will result in making | |
5979 | simple increments to any GIVs based on it. However, if the variable | |
5980 | overflows in its declared mode but not its promoted mode, the result will | |
5981 | be incorrect. This is acceptable if the variable is signed, since | |
5982 | overflows in such cases are undefined, but not if it is unsigned, since | |
5983 | those overflows are defined. So we only check for SIGN_EXTEND and | |
5984 | not ZERO_EXTEND. | |
5985 | ||
5986 | If we cannot find a biv, we return 0. */ | |
b4ad7b23 RS |
5987 | |
5988 | static int | |
3ec2b590 | 5989 | basic_induction_var (x, mode, dest_reg, p, inc_val, mult_val, location) |
b4ad7b23 | 5990 | register rtx x; |
7056f7e8 | 5991 | enum machine_mode mode; |
09d7f5a5 | 5992 | rtx p; |
b4ad7b23 RS |
5993 | rtx dest_reg; |
5994 | rtx *inc_val; | |
5995 | rtx *mult_val; | |
3ec2b590 | 5996 | rtx **location; |
b4ad7b23 RS |
5997 | { |
5998 | register enum rtx_code code; | |
3ec2b590 | 5999 | rtx *argp, arg; |
09d7f5a5 | 6000 | rtx insn, set = 0; |
b4ad7b23 RS |
6001 | |
6002 | code = GET_CODE (x); | |
6003 | switch (code) | |
6004 | { | |
6005 | case PLUS: | |
45f97e2e | 6006 | if (rtx_equal_p (XEXP (x, 0), dest_reg) |
09d7f5a5 RK |
6007 | || (GET_CODE (XEXP (x, 0)) == SUBREG |
6008 | && SUBREG_PROMOTED_VAR_P (XEXP (x, 0)) | |
6009 | && SUBREG_REG (XEXP (x, 0)) == dest_reg)) | |
3ec2b590 R |
6010 | { |
6011 | argp = &XEXP (x, 1); | |
6012 | } | |
45f97e2e | 6013 | else if (rtx_equal_p (XEXP (x, 1), dest_reg) |
09d7f5a5 | 6014 | || (GET_CODE (XEXP (x, 1)) == SUBREG |
b81fd0f4 RS |
6015 | && SUBREG_PROMOTED_VAR_P (XEXP (x, 1)) |
6016 | && SUBREG_REG (XEXP (x, 1)) == dest_reg)) | |
3ec2b590 R |
6017 | { |
6018 | argp = &XEXP (x, 0); | |
6019 | } | |
b4ad7b23 RS |
6020 | else |
6021 | return 0; | |
6022 | ||
3ec2b590 | 6023 | arg = *argp; |
b4ad7b23 RS |
6024 | if (invariant_p (arg) != 1) |
6025 | return 0; | |
6026 | ||
7056f7e8 | 6027 | *inc_val = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0); |
b4ad7b23 | 6028 | *mult_val = const1_rtx; |
3ec2b590 | 6029 | *location = argp; |
b4ad7b23 RS |
6030 | return 1; |
6031 | ||
09d7f5a5 RK |
6032 | case SUBREG: |
6033 | /* If this is a SUBREG for a promoted variable, check the inner | |
6034 | value. */ | |
6035 | if (SUBREG_PROMOTED_VAR_P (x)) | |
7056f7e8 | 6036 | return basic_induction_var (SUBREG_REG (x), GET_MODE (SUBREG_REG (x)), |
3ec2b590 | 6037 | dest_reg, p, inc_val, mult_val, location); |
fe159061 | 6038 | return 0; |
b4ad7b23 | 6039 | |
09d7f5a5 | 6040 | case REG: |
45f97e2e | 6041 | /* If this register is assigned in a previous insn, look at its |
09d7f5a5 RK |
6042 | source, but don't go outside the loop or past a label. */ |
6043 | ||
45f97e2e RH |
6044 | insn = p; |
6045 | while (1) | |
6046 | { | |
6047 | do { | |
6048 | insn = PREV_INSN (insn); | |
6049 | } while (insn && GET_CODE (insn) == NOTE | |
6050 | && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG); | |
09d7f5a5 | 6051 | |
45f97e2e RH |
6052 | if (!insn) |
6053 | break; | |
6054 | set = single_set (insn); | |
6055 | if (set == 0) | |
6056 | break; | |
09d7f5a5 | 6057 | |
45f97e2e RH |
6058 | if ((SET_DEST (set) == x |
6059 | || (GET_CODE (SET_DEST (set)) == SUBREG | |
6060 | && (GET_MODE_SIZE (GET_MODE (SET_DEST (set))) | |
6061 | <= UNITS_PER_WORD) | |
6062 | && SUBREG_REG (SET_DEST (set)) == x)) | |
6063 | && basic_induction_var (SET_SRC (set), | |
6064 | (GET_MODE (SET_SRC (set)) == VOIDmode | |
6065 | ? GET_MODE (x) | |
6066 | : GET_MODE (SET_SRC (set))), | |
6067 | dest_reg, insn, | |
3ec2b590 | 6068 | inc_val, mult_val, location)) |
45f97e2e RH |
6069 | return 1; |
6070 | } | |
0f41302f | 6071 | /* ... fall through ... */ |
b4ad7b23 RS |
6072 | |
6073 | /* Can accept constant setting of biv only when inside inner most loop. | |
6074 | Otherwise, a biv of an inner loop may be incorrectly recognized | |
6075 | as a biv of the outer loop, | |
6076 | causing code to be moved INTO the inner loop. */ | |
6077 | case MEM: | |
b4ad7b23 RS |
6078 | if (invariant_p (x) != 1) |
6079 | return 0; | |
6080 | case CONST_INT: | |
6081 | case SYMBOL_REF: | |
6082 | case CONST: | |
829002bb BM |
6083 | /* convert_modes aborts if we try to convert to or from CCmode, so just |
6084 | exclude that case. It is very unlikely that a condition code value | |
6085 | would be a useful iterator anyways. */ | |
3c748bb6 | 6086 | if (this_loop_info.loops_enclosed == 1 |
829002bb BM |
6087 | && GET_MODE_CLASS (mode) != MODE_CC |
6088 | && GET_MODE_CLASS (GET_MODE (dest_reg)) != MODE_CC) | |
b4ad7b23 | 6089 | { |
7056f7e8 RS |
6090 | /* Possible bug here? Perhaps we don't know the mode of X. */ |
6091 | *inc_val = convert_modes (GET_MODE (dest_reg), mode, x, 0); | |
b4ad7b23 RS |
6092 | *mult_val = const0_rtx; |
6093 | return 1; | |
6094 | } | |
6095 | else | |
6096 | return 0; | |
6097 | ||
09d7f5a5 | 6098 | case SIGN_EXTEND: |
7056f7e8 | 6099 | return basic_induction_var (XEXP (x, 0), GET_MODE (XEXP (x, 0)), |
3ec2b590 | 6100 | dest_reg, p, inc_val, mult_val, location); |
45f97e2e | 6101 | |
09d7f5a5 RK |
6102 | case ASHIFTRT: |
6103 | /* Similar, since this can be a sign extension. */ | |
6104 | for (insn = PREV_INSN (p); | |
6105 | (insn && GET_CODE (insn) == NOTE | |
6106 | && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG); | |
6107 | insn = PREV_INSN (insn)) | |
6108 | ; | |
6109 | ||
6110 | if (insn) | |
6111 | set = single_set (insn); | |
6112 | ||
6113 | if (set && SET_DEST (set) == XEXP (x, 0) | |
6114 | && GET_CODE (XEXP (x, 1)) == CONST_INT | |
6115 | && INTVAL (XEXP (x, 1)) >= 0 | |
6116 | && GET_CODE (SET_SRC (set)) == ASHIFT | |
6117 | && XEXP (x, 1) == XEXP (SET_SRC (set), 1)) | |
7056f7e8 RS |
6118 | return basic_induction_var (XEXP (SET_SRC (set), 0), |
6119 | GET_MODE (XEXP (x, 0)), | |
3ec2b590 R |
6120 | dest_reg, insn, inc_val, mult_val, |
6121 | location); | |
09d7f5a5 RK |
6122 | return 0; |
6123 | ||
b4ad7b23 RS |
6124 | default: |
6125 | return 0; | |
6126 | } | |
6127 | } | |
6128 | \f | |
6129 | /* A general induction variable (giv) is any quantity that is a linear | |
6130 | function of a basic induction variable, | |
6131 | i.e. giv = biv * mult_val + add_val. | |
6132 | The coefficients can be any loop invariant quantity. | |
6133 | A giv need not be computed directly from the biv; | |
6134 | it can be computed by way of other givs. */ | |
6135 | ||
6136 | /* Determine whether X computes a giv. | |
6137 | If it does, return a nonzero value | |
6138 | which is the benefit from eliminating the computation of X; | |
6139 | set *SRC_REG to the register of the biv that it is computed from; | |
6140 | set *ADD_VAL and *MULT_VAL to the coefficients, | |
6141 | such that the value of X is biv * mult + add; */ | |
6142 | ||
6143 | static int | |
45f97e2e | 6144 | general_induction_var (x, src_reg, add_val, mult_val, is_addr, pbenefit) |
b4ad7b23 RS |
6145 | rtx x; |
6146 | rtx *src_reg; | |
6147 | rtx *add_val; | |
6148 | rtx *mult_val; | |
45f97e2e RH |
6149 | int is_addr; |
6150 | int *pbenefit; | |
b4ad7b23 RS |
6151 | { |
6152 | rtx orig_x = x; | |
b4ad7b23 RS |
6153 | char *storage; |
6154 | ||
6155 | /* If this is an invariant, forget it, it isn't a giv. */ | |
6156 | if (invariant_p (x) == 1) | |
6157 | return 0; | |
6158 | ||
6159 | /* See if the expression could be a giv and get its form. | |
6160 | Mark our place on the obstack in case we don't find a giv. */ | |
6161 | storage = (char *) oballoc (0); | |
45f97e2e RH |
6162 | *pbenefit = 0; |
6163 | x = simplify_giv_expr (x, pbenefit); | |
b4ad7b23 RS |
6164 | if (x == 0) |
6165 | { | |
6166 | obfree (storage); | |
6167 | return 0; | |
6168 | } | |
6169 | ||
6170 | switch (GET_CODE (x)) | |
6171 | { | |
6172 | case USE: | |
6173 | case CONST_INT: | |
6174 | /* Since this is now an invariant and wasn't before, it must be a giv | |
6175 | with MULT_VAL == 0. It doesn't matter which BIV we associate this | |
6176 | with. */ | |
6177 | *src_reg = loop_iv_list->biv->dest_reg; | |
6178 | *mult_val = const0_rtx; | |
6179 | *add_val = x; | |
6180 | break; | |
6181 | ||
6182 | case REG: | |
6183 | /* This is equivalent to a BIV. */ | |
6184 | *src_reg = x; | |
6185 | *mult_val = const1_rtx; | |
6186 | *add_val = const0_rtx; | |
6187 | break; | |
6188 | ||
6189 | case PLUS: | |
6190 | /* Either (plus (biv) (invar)) or | |
6191 | (plus (mult (biv) (invar_1)) (invar_2)). */ | |
6192 | if (GET_CODE (XEXP (x, 0)) == MULT) | |
6193 | { | |
6194 | *src_reg = XEXP (XEXP (x, 0), 0); | |
6195 | *mult_val = XEXP (XEXP (x, 0), 1); | |
6196 | } | |
6197 | else | |
6198 | { | |
6199 | *src_reg = XEXP (x, 0); | |
6200 | *mult_val = const1_rtx; | |
6201 | } | |
6202 | *add_val = XEXP (x, 1); | |
6203 | break; | |
6204 | ||
6205 | case MULT: | |
6206 | /* ADD_VAL is zero. */ | |
6207 | *src_reg = XEXP (x, 0); | |
6208 | *mult_val = XEXP (x, 1); | |
6209 | *add_val = const0_rtx; | |
6210 | break; | |
6211 | ||
6212 | default: | |
6213 | abort (); | |
6214 | } | |
6215 | ||
6216 | /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be | |
6217 | unless they are CONST_INT). */ | |
6218 | if (GET_CODE (*add_val) == USE) | |
6219 | *add_val = XEXP (*add_val, 0); | |
6220 | if (GET_CODE (*mult_val) == USE) | |
6221 | *mult_val = XEXP (*mult_val, 0); | |
6222 | ||
45f97e2e RH |
6223 | if (is_addr) |
6224 | { | |
6225 | #ifdef ADDRESS_COST | |
6226 | *pbenefit += ADDRESS_COST (orig_x) - reg_address_cost; | |
6227 | #else | |
6228 | *pbenefit += rtx_cost (orig_x, MEM) - reg_address_cost; | |
6229 | #endif | |
6230 | } | |
6231 | else | |
6232 | *pbenefit += rtx_cost (orig_x, SET); | |
b4ad7b23 | 6233 | |
45f97e2e RH |
6234 | /* Always return true if this is a giv so it will be detected as such, |
6235 | even if the benefit is zero or negative. This allows elimination | |
6236 | of bivs that might otherwise not be eliminated. */ | |
6237 | return 1; | |
b4ad7b23 RS |
6238 | } |
6239 | \f | |
6240 | /* Given an expression, X, try to form it as a linear function of a biv. | |
6241 | We will canonicalize it to be of the form | |
6242 | (plus (mult (BIV) (invar_1)) | |
6243 | (invar_2)) | |
c5b7917e | 6244 | with possible degeneracies. |
b4ad7b23 RS |
6245 | |
6246 | The invariant expressions must each be of a form that can be used as a | |
6247 | machine operand. We surround then with a USE rtx (a hack, but localized | |
6248 | and certainly unambiguous!) if not a CONST_INT for simplicity in this | |
6249 | routine; it is the caller's responsibility to strip them. | |
6250 | ||
6251 | If no such canonicalization is possible (i.e., two biv's are used or an | |
6252 | expression that is neither invariant nor a biv or giv), this routine | |
6253 | returns 0. | |
6254 | ||
6255 | For a non-zero return, the result will have a code of CONST_INT, USE, | |
6256 | REG (for a BIV), PLUS, or MULT. No other codes will occur. | |
6257 | ||
6258 | *BENEFIT will be incremented by the benefit of any sub-giv encountered. */ | |
6259 | ||
45f97e2e RH |
6260 | static rtx sge_plus PROTO ((enum machine_mode, rtx, rtx)); |
6261 | static rtx sge_plus_constant PROTO ((rtx, rtx)); | |
6262 | ||
b4ad7b23 RS |
6263 | static rtx |
6264 | simplify_giv_expr (x, benefit) | |
6265 | rtx x; | |
6266 | int *benefit; | |
6267 | { | |
6268 | enum machine_mode mode = GET_MODE (x); | |
6269 | rtx arg0, arg1; | |
6270 | rtx tem; | |
6271 | ||
6272 | /* If this is not an integer mode, or if we cannot do arithmetic in this | |
6273 | mode, this can't be a giv. */ | |
6274 | if (mode != VOIDmode | |
6275 | && (GET_MODE_CLASS (mode) != MODE_INT | |
5fd8383e | 6276 | || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT)) |
45f97e2e | 6277 | return NULL_RTX; |
b4ad7b23 RS |
6278 | |
6279 | switch (GET_CODE (x)) | |
6280 | { | |
6281 | case PLUS: | |
6282 | arg0 = simplify_giv_expr (XEXP (x, 0), benefit); | |
6283 | arg1 = simplify_giv_expr (XEXP (x, 1), benefit); | |
6284 | if (arg0 == 0 || arg1 == 0) | |
45f97e2e | 6285 | return NULL_RTX; |
b4ad7b23 RS |
6286 | |
6287 | /* Put constant last, CONST_INT last if both constant. */ | |
6288 | if ((GET_CODE (arg0) == USE | |
6289 | || GET_CODE (arg0) == CONST_INT) | |
45f97e2e RH |
6290 | && ! ((GET_CODE (arg0) == USE |
6291 | && GET_CODE (arg1) == USE) | |
6292 | || GET_CODE (arg1) == CONST_INT)) | |
b4ad7b23 RS |
6293 | tem = arg0, arg0 = arg1, arg1 = tem; |
6294 | ||
6295 | /* Handle addition of zero, then addition of an invariant. */ | |
6296 | if (arg1 == const0_rtx) | |
6297 | return arg0; | |
6298 | else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE) | |
6299 | switch (GET_CODE (arg0)) | |
6300 | { | |
6301 | case CONST_INT: | |
6302 | case USE: | |
45f97e2e RH |
6303 | /* Adding two invariants must result in an invariant, so enclose |
6304 | addition operation inside a USE and return it. */ | |
b4ad7b23 RS |
6305 | if (GET_CODE (arg0) == USE) |
6306 | arg0 = XEXP (arg0, 0); | |
da0af5a5 JL |
6307 | if (GET_CODE (arg1) == USE) |
6308 | arg1 = XEXP (arg1, 0); | |
6309 | ||
45f97e2e RH |
6310 | if (GET_CODE (arg0) == CONST_INT) |
6311 | tem = arg0, arg0 = arg1, arg1 = tem; | |
6312 | if (GET_CODE (arg1) == CONST_INT) | |
6313 | tem = sge_plus_constant (arg0, arg1); | |
da0af5a5 | 6314 | else |
45f97e2e | 6315 | tem = sge_plus (mode, arg0, arg1); |
b4ad7b23 | 6316 | |
45f97e2e RH |
6317 | if (GET_CODE (tem) != CONST_INT) |
6318 | tem = gen_rtx_USE (mode, tem); | |
b4ad7b23 RS |
6319 | return tem; |
6320 | ||
6321 | case REG: | |
6322 | case MULT: | |
6323 | /* biv + invar or mult + invar. Return sum. */ | |
38a448ca | 6324 | return gen_rtx_PLUS (mode, arg0, arg1); |
b4ad7b23 RS |
6325 | |
6326 | case PLUS: | |
6327 | /* (a + invar_1) + invar_2. Associate. */ | |
c5c76735 JL |
6328 | return |
6329 | simplify_giv_expr (gen_rtx_PLUS (mode, | |
6330 | XEXP (arg0, 0), | |
6331 | gen_rtx_PLUS (mode, | |
6332 | XEXP (arg0, 1), | |
6333 | arg1)), | |
6334 | benefit); | |
b4ad7b23 RS |
6335 | |
6336 | default: | |
6337 | abort (); | |
6338 | } | |
6339 | ||
6340 | /* Each argument must be either REG, PLUS, or MULT. Convert REG to | |
6341 | MULT to reduce cases. */ | |
6342 | if (GET_CODE (arg0) == REG) | |
38a448ca | 6343 | arg0 = gen_rtx_MULT (mode, arg0, const1_rtx); |
b4ad7b23 | 6344 | if (GET_CODE (arg1) == REG) |
38a448ca | 6345 | arg1 = gen_rtx_MULT (mode, arg1, const1_rtx); |
b4ad7b23 RS |
6346 | |
6347 | /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT. | |
6348 | Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT. | |
6349 | Recurse to associate the second PLUS. */ | |
6350 | if (GET_CODE (arg1) == MULT) | |
6351 | tem = arg0, arg0 = arg1, arg1 = tem; | |
6352 | ||
6353 | if (GET_CODE (arg1) == PLUS) | |
c5c76735 JL |
6354 | return |
6355 | simplify_giv_expr (gen_rtx_PLUS (mode, | |
6356 | gen_rtx_PLUS (mode, arg0, | |
6357 | XEXP (arg1, 0)), | |
6358 | XEXP (arg1, 1)), | |
6359 | benefit); | |
b4ad7b23 RS |
6360 | |
6361 | /* Now must have MULT + MULT. Distribute if same biv, else not giv. */ | |
6362 | if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT) | |
45f97e2e | 6363 | return NULL_RTX; |
b4ad7b23 | 6364 | |
45f97e2e RH |
6365 | if (!rtx_equal_p (arg0, arg1)) |
6366 | return NULL_RTX; | |
b4ad7b23 | 6367 | |
38a448ca RH |
6368 | return simplify_giv_expr (gen_rtx_MULT (mode, |
6369 | XEXP (arg0, 0), | |
6370 | gen_rtx_PLUS (mode, | |
6371 | XEXP (arg0, 1), | |
6372 | XEXP (arg1, 1))), | |
b4ad7b23 RS |
6373 | benefit); |
6374 | ||
6375 | case MINUS: | |
0f41302f | 6376 | /* Handle "a - b" as "a + b * (-1)". */ |
38a448ca RH |
6377 | return simplify_giv_expr (gen_rtx_PLUS (mode, |
6378 | XEXP (x, 0), | |
c5c76735 JL |
6379 | gen_rtx_MULT (mode, |
6380 | XEXP (x, 1), | |
38a448ca | 6381 | constm1_rtx)), |
b4ad7b23 RS |
6382 | benefit); |
6383 | ||
6384 | case MULT: | |
6385 | arg0 = simplify_giv_expr (XEXP (x, 0), benefit); | |
6386 | arg1 = simplify_giv_expr (XEXP (x, 1), benefit); | |
6387 | if (arg0 == 0 || arg1 == 0) | |
45f97e2e | 6388 | return NULL_RTX; |
b4ad7b23 RS |
6389 | |
6390 | /* Put constant last, CONST_INT last if both constant. */ | |
6391 | if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT) | |
6392 | && GET_CODE (arg1) != CONST_INT) | |
6393 | tem = arg0, arg0 = arg1, arg1 = tem; | |
6394 | ||
6395 | /* If second argument is not now constant, not giv. */ | |
6396 | if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT) | |
45f97e2e | 6397 | return NULL_RTX; |
b4ad7b23 RS |
6398 | |
6399 | /* Handle multiply by 0 or 1. */ | |
6400 | if (arg1 == const0_rtx) | |
6401 | return const0_rtx; | |
6402 | ||
6403 | else if (arg1 == const1_rtx) | |
6404 | return arg0; | |
6405 | ||
6406 | switch (GET_CODE (arg0)) | |
6407 | { | |
6408 | case REG: | |
6409 | /* biv * invar. Done. */ | |
38a448ca | 6410 | return gen_rtx_MULT (mode, arg0, arg1); |
b4ad7b23 RS |
6411 | |
6412 | case CONST_INT: | |
6413 | /* Product of two constants. */ | |
5fd8383e | 6414 | return GEN_INT (INTVAL (arg0) * INTVAL (arg1)); |
b4ad7b23 RS |
6415 | |
6416 | case USE: | |
45f97e2e RH |
6417 | /* invar * invar. It is a giv, but very few of these will |
6418 | actually pay off, so limit to simple registers. */ | |
6419 | if (GET_CODE (arg1) != CONST_INT) | |
6420 | return NULL_RTX; | |
6421 | ||
6422 | arg0 = XEXP (arg0, 0); | |
6423 | if (GET_CODE (arg0) == REG) | |
6424 | tem = gen_rtx_MULT (mode, arg0, arg1); | |
6425 | else if (GET_CODE (arg0) == MULT | |
6426 | && GET_CODE (XEXP (arg0, 0)) == REG | |
6427 | && GET_CODE (XEXP (arg0, 1)) == CONST_INT) | |
6428 | { | |
6429 | tem = gen_rtx_MULT (mode, XEXP (arg0, 0), | |
6430 | GEN_INT (INTVAL (XEXP (arg0, 1)) | |
6431 | * INTVAL (arg1))); | |
6432 | } | |
6433 | else | |
6434 | return NULL_RTX; | |
6435 | return gen_rtx_USE (mode, tem); | |
b4ad7b23 RS |
6436 | |
6437 | case MULT: | |
6438 | /* (a * invar_1) * invar_2. Associate. */ | |
c5c76735 JL |
6439 | return simplify_giv_expr (gen_rtx_MULT (mode, |
6440 | XEXP (arg0, 0), | |
38a448ca RH |
6441 | gen_rtx_MULT (mode, |
6442 | XEXP (arg0, 1), | |
6443 | arg1)), | |
b4ad7b23 RS |
6444 | benefit); |
6445 | ||
6446 | case PLUS: | |
6447 | /* (a + invar_1) * invar_2. Distribute. */ | |
38a448ca RH |
6448 | return simplify_giv_expr (gen_rtx_PLUS (mode, |
6449 | gen_rtx_MULT (mode, | |
6450 | XEXP (arg0, 0), | |
6451 | arg1), | |
6452 | gen_rtx_MULT (mode, | |
6453 | XEXP (arg0, 1), | |
6454 | arg1)), | |
b4ad7b23 RS |
6455 | benefit); |
6456 | ||
6457 | default: | |
6458 | abort (); | |
6459 | } | |
6460 | ||
6461 | case ASHIFT: | |
b4ad7b23 RS |
6462 | /* Shift by constant is multiply by power of two. */ |
6463 | if (GET_CODE (XEXP (x, 1)) != CONST_INT) | |
6464 | return 0; | |
6465 | ||
c5c76735 JL |
6466 | return |
6467 | simplify_giv_expr (gen_rtx_MULT (mode, | |
6468 | XEXP (x, 0), | |
6469 | GEN_INT ((HOST_WIDE_INT) 1 | |
6470 | << INTVAL (XEXP (x, 1)))), | |
6471 | benefit); | |
b4ad7b23 RS |
6472 | |
6473 | case NEG: | |
6474 | /* "-a" is "a * (-1)" */ | |
38a448ca | 6475 | return simplify_giv_expr (gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx), |
b4ad7b23 RS |
6476 | benefit); |
6477 | ||
6478 | case NOT: | |
6479 | /* "~a" is "-a - 1". Silly, but easy. */ | |
38a448ca RH |
6480 | return simplify_giv_expr (gen_rtx_MINUS (mode, |
6481 | gen_rtx_NEG (mode, XEXP (x, 0)), | |
6482 | const1_rtx), | |
b4ad7b23 RS |
6483 | benefit); |
6484 | ||
6485 | case USE: | |
6486 | /* Already in proper form for invariant. */ | |
6487 | return x; | |
6488 | ||
6489 | case REG: | |
6490 | /* If this is a new register, we can't deal with it. */ | |
6491 | if (REGNO (x) >= max_reg_before_loop) | |
6492 | return 0; | |
6493 | ||
6494 | /* Check for biv or giv. */ | |
3ec2b590 | 6495 | switch (REG_IV_TYPE (REGNO (x))) |
b4ad7b23 RS |
6496 | { |
6497 | case BASIC_INDUCT: | |
6498 | return x; | |
6499 | case GENERAL_INDUCT: | |
6500 | { | |
3ec2b590 | 6501 | struct induction *v = REG_IV_INFO (REGNO (x)); |
b4ad7b23 RS |
6502 | |
6503 | /* Form expression from giv and add benefit. Ensure this giv | |
6504 | can derive another and subtract any needed adjustment if so. */ | |
6505 | *benefit += v->benefit; | |
6506 | if (v->cant_derive) | |
6507 | return 0; | |
6508 | ||
c5c76735 JL |
6509 | tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode, |
6510 | v->src_reg, v->mult_val), | |
6511 | v->add_val); | |
6512 | ||
b4ad7b23 | 6513 | if (v->derive_adjustment) |
38a448ca | 6514 | tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment); |
b4ad7b23 RS |
6515 | return simplify_giv_expr (tem, benefit); |
6516 | } | |
e9a25f70 JL |
6517 | |
6518 | default: | |
45f97e2e RH |
6519 | /* If it isn't an induction variable, and it is invariant, we |
6520 | may be able to simplify things further by looking through | |
6521 | the bits we just moved outside the loop. */ | |
6522 | if (invariant_p (x) == 1) | |
6523 | { | |
6524 | struct movable *m; | |
6525 | ||
6526 | for (m = the_movables; m ; m = m->next) | |
6527 | if (rtx_equal_p (x, m->set_dest)) | |
6528 | { | |
6529 | /* Ok, we found a match. Substitute and simplify. */ | |
6530 | ||
6531 | /* If we match another movable, we must use that, as | |
6532 | this one is going away. */ | |
6533 | if (m->match) | |
6534 | return simplify_giv_expr (m->match->set_dest, benefit); | |
6535 | ||
6536 | /* If consec is non-zero, this is a member of a group of | |
6537 | instructions that were moved together. We handle this | |
6538 | case only to the point of seeking to the last insn and | |
6539 | looking for a REG_EQUAL. Fail if we don't find one. */ | |
6540 | if (m->consec != 0) | |
6541 | { | |
6542 | int i = m->consec; | |
6543 | tem = m->insn; | |
6544 | do { tem = NEXT_INSN (tem); } while (--i > 0); | |
6545 | ||
6546 | tem = find_reg_note (tem, REG_EQUAL, NULL_RTX); | |
6547 | if (tem) | |
6548 | tem = XEXP (tem, 0); | |
6549 | } | |
6550 | else | |
6551 | { | |
6552 | tem = single_set (m->insn); | |
6553 | if (tem) | |
6554 | tem = SET_SRC (tem); | |
6555 | } | |
6556 | ||
6557 | if (tem) | |
6558 | { | |
6559 | /* What we are most interested in is pointer | |
6560 | arithmetic on invariants -- only take | |
6561 | patterns we may be able to do something with. */ | |
6562 | if (GET_CODE (tem) == PLUS | |
6563 | || GET_CODE (tem) == MULT | |
6564 | || GET_CODE (tem) == ASHIFT | |
6565 | || GET_CODE (tem) == CONST_INT | |
6566 | || GET_CODE (tem) == SYMBOL_REF) | |
6567 | { | |
6568 | tem = simplify_giv_expr (tem, benefit); | |
6569 | if (tem) | |
6570 | return tem; | |
6571 | } | |
6572 | else if (GET_CODE (tem) == CONST | |
6573 | && GET_CODE (XEXP (tem, 0)) == PLUS | |
6574 | && GET_CODE (XEXP (XEXP (tem, 0), 0)) == SYMBOL_REF | |
6575 | && GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT) | |
6576 | { | |
6577 | tem = simplify_giv_expr (XEXP (tem, 0), benefit); | |
6578 | if (tem) | |
6579 | return tem; | |
6580 | } | |
6581 | } | |
6582 | break; | |
6583 | } | |
6584 | } | |
e9a25f70 | 6585 | break; |
b4ad7b23 RS |
6586 | } |
6587 | ||
6588 | /* Fall through to general case. */ | |
6589 | default: | |
6590 | /* If invariant, return as USE (unless CONST_INT). | |
6591 | Otherwise, not giv. */ | |
6592 | if (GET_CODE (x) == USE) | |
6593 | x = XEXP (x, 0); | |
6594 | ||
6595 | if (invariant_p (x) == 1) | |
6596 | { | |
6597 | if (GET_CODE (x) == CONST_INT) | |
6598 | return x; | |
45f97e2e RH |
6599 | if (GET_CODE (x) == CONST |
6600 | && GET_CODE (XEXP (x, 0)) == PLUS | |
6601 | && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF | |
6602 | && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT) | |
6603 | x = XEXP (x, 0); | |
6604 | return gen_rtx_USE (mode, x); | |
b4ad7b23 RS |
6605 | } |
6606 | else | |
6607 | return 0; | |
6608 | } | |
6609 | } | |
45f97e2e RH |
6610 | |
6611 | /* This routine folds invariants such that there is only ever one | |
6612 | CONST_INT in the summation. It is only used by simplify_giv_expr. */ | |
6613 | ||
6614 | static rtx | |
6615 | sge_plus_constant (x, c) | |
6616 | rtx x, c; | |
6617 | { | |
6618 | if (GET_CODE (x) == CONST_INT) | |
6619 | return GEN_INT (INTVAL (x) + INTVAL (c)); | |
6620 | else if (GET_CODE (x) != PLUS) | |
6621 | return gen_rtx_PLUS (GET_MODE (x), x, c); | |
6622 | else if (GET_CODE (XEXP (x, 1)) == CONST_INT) | |
6623 | { | |
6624 | return gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0), | |
6625 | GEN_INT (INTVAL (XEXP (x, 1)) + INTVAL (c))); | |
6626 | } | |
6627 | else if (GET_CODE (XEXP (x, 0)) == PLUS | |
6628 | || GET_CODE (XEXP (x, 1)) != PLUS) | |
6629 | { | |
6630 | return gen_rtx_PLUS (GET_MODE (x), | |
6631 | sge_plus_constant (XEXP (x, 0), c), XEXP (x, 1)); | |
6632 | } | |
6633 | else | |
6634 | { | |
6635 | return gen_rtx_PLUS (GET_MODE (x), | |
6636 | sge_plus_constant (XEXP (x, 1), c), XEXP (x, 0)); | |
6637 | } | |
6638 | } | |
6639 | ||
6640 | static rtx | |
6641 | sge_plus (mode, x, y) | |
6642 | enum machine_mode mode; | |
6643 | rtx x, y; | |
6644 | { | |
6645 | while (GET_CODE (y) == PLUS) | |
6646 | { | |
6647 | rtx a = XEXP (y, 0); | |
6648 | if (GET_CODE (a) == CONST_INT) | |
6649 | x = sge_plus_constant (x, a); | |
6650 | else | |
6651 | x = gen_rtx_PLUS (mode, x, a); | |
6652 | y = XEXP (y, 1); | |
6653 | } | |
6654 | if (GET_CODE (y) == CONST_INT) | |
6655 | x = sge_plus_constant (x, y); | |
6656 | else | |
6657 | x = gen_rtx_PLUS (mode, x, y); | |
6658 | return x; | |
6659 | } | |
b4ad7b23 RS |
6660 | \f |
6661 | /* Help detect a giv that is calculated by several consecutive insns; | |
6662 | for example, | |
6663 | giv = biv * M | |
6664 | giv = giv + A | |
6665 | The caller has already identified the first insn P as having a giv as dest; | |
6666 | we check that all other insns that set the same register follow | |
6667 | immediately after P, that they alter nothing else, | |
6668 | and that the result of the last is still a giv. | |
6669 | ||
6670 | The value is 0 if the reg set in P is not really a giv. | |
6671 | Otherwise, the value is the amount gained by eliminating | |
6672 | all the consecutive insns that compute the value. | |
6673 | ||
6674 | FIRST_BENEFIT is the amount gained by eliminating the first insn, P. | |
6675 | SRC_REG is the reg of the biv; DEST_REG is the reg of the giv. | |
6676 | ||
6677 | The coefficients of the ultimate giv value are stored in | |
6678 | *MULT_VAL and *ADD_VAL. */ | |
6679 | ||
6680 | static int | |
6681 | consec_sets_giv (first_benefit, p, src_reg, dest_reg, | |
a07516d3 | 6682 | add_val, mult_val, last_consec_insn) |
b4ad7b23 RS |
6683 | int first_benefit; |
6684 | rtx p; | |
6685 | rtx src_reg; | |
6686 | rtx dest_reg; | |
6687 | rtx *add_val; | |
6688 | rtx *mult_val; | |
a07516d3 | 6689 | rtx *last_consec_insn; |
b4ad7b23 RS |
6690 | { |
6691 | int count; | |
6692 | enum rtx_code code; | |
6693 | int benefit; | |
6694 | rtx temp; | |
6695 | rtx set; | |
6696 | ||
6697 | /* Indicate that this is a giv so that we can update the value produced in | |
6698 | each insn of the multi-insn sequence. | |
6699 | ||
6700 | This induction structure will be used only by the call to | |
6701 | general_induction_var below, so we can allocate it on our stack. | |
6702 | If this is a giv, our caller will replace the induct var entry with | |
6703 | a new induction structure. */ | |
6704 | struct induction *v | |
6705 | = (struct induction *) alloca (sizeof (struct induction)); | |
6706 | v->src_reg = src_reg; | |
6707 | v->mult_val = *mult_val; | |
6708 | v->add_val = *add_val; | |
6709 | v->benefit = first_benefit; | |
6710 | v->cant_derive = 0; | |
6711 | v->derive_adjustment = 0; | |
6712 | ||
3ec2b590 R |
6713 | REG_IV_TYPE (REGNO (dest_reg)) = GENERAL_INDUCT; |
6714 | REG_IV_INFO (REGNO (dest_reg)) = v; | |
b4ad7b23 | 6715 | |
8deb8e2c | 6716 | count = VARRAY_INT (n_times_set, REGNO (dest_reg)) - 1; |
b4ad7b23 RS |
6717 | |
6718 | while (count > 0) | |
6719 | { | |
6720 | p = NEXT_INSN (p); | |
6721 | code = GET_CODE (p); | |
6722 | ||
6723 | /* If libcall, skip to end of call sequence. */ | |
5fd8383e | 6724 | if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX))) |
b4ad7b23 RS |
6725 | p = XEXP (temp, 0); |
6726 | ||
6727 | if (code == INSN | |
6728 | && (set = single_set (p)) | |
6729 | && GET_CODE (SET_DEST (set)) == REG | |
6730 | && SET_DEST (set) == dest_reg | |
45f97e2e RH |
6731 | && (general_induction_var (SET_SRC (set), &src_reg, |
6732 | add_val, mult_val, 0, &benefit) | |
b4ad7b23 | 6733 | /* Giv created by equivalent expression. */ |
5fd8383e | 6734 | || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)) |
45f97e2e RH |
6735 | && general_induction_var (XEXP (temp, 0), &src_reg, |
6736 | add_val, mult_val, 0, &benefit))) | |
b4ad7b23 RS |
6737 | && src_reg == v->src_reg) |
6738 | { | |
5fd8383e | 6739 | if (find_reg_note (p, REG_RETVAL, NULL_RTX)) |
b4ad7b23 RS |
6740 | benefit += libcall_benefit (p); |
6741 | ||
6742 | count--; | |
6743 | v->mult_val = *mult_val; | |
6744 | v->add_val = *add_val; | |
6745 | v->benefit = benefit; | |
6746 | } | |
6747 | else if (code != NOTE) | |
6748 | { | |
6749 | /* Allow insns that set something other than this giv to a | |
6750 | constant. Such insns are needed on machines which cannot | |
6751 | include long constants and should not disqualify a giv. */ | |
6752 | if (code == INSN | |
6753 | && (set = single_set (p)) | |
6754 | && SET_DEST (set) != dest_reg | |
6755 | && CONSTANT_P (SET_SRC (set))) | |
6756 | continue; | |
6757 | ||
3ec2b590 | 6758 | REG_IV_TYPE (REGNO (dest_reg)) = UNKNOWN_INDUCT; |
b4ad7b23 RS |
6759 | return 0; |
6760 | } | |
6761 | } | |
6762 | ||
a07516d3 | 6763 | *last_consec_insn = p; |
b4ad7b23 RS |
6764 | return v->benefit; |
6765 | } | |
6766 | \f | |
6767 | /* Return an rtx, if any, that expresses giv G2 as a function of the register | |
6768 | represented by G1. If no such expression can be found, or it is clear that | |
6769 | it cannot possibly be a valid address, 0 is returned. | |
6770 | ||
6771 | To perform the computation, we note that | |
45f97e2e RH |
6772 | G1 = x * v + a and |
6773 | G2 = y * v + b | |
b4ad7b23 RS |
6774 | where `v' is the biv. |
6775 | ||
45f97e2e RH |
6776 | So G2 = (y/b) * G1 + (b - a*y/x). |
6777 | ||
6778 | Note that MULT = y/x. | |
6779 | ||
6780 | Update: A and B are now allowed to be additive expressions such that | |
6781 | B contains all variables in A. That is, computing B-A will not require | |
6782 | subtracting variables. */ | |
6783 | ||
6784 | static rtx | |
6785 | express_from_1 (a, b, mult) | |
6786 | rtx a, b, mult; | |
6787 | { | |
6788 | /* If MULT is zero, then A*MULT is zero, and our expression is B. */ | |
6789 | ||
6790 | if (mult == const0_rtx) | |
6791 | return b; | |
6792 | ||
6793 | /* If MULT is not 1, we cannot handle A with non-constants, since we | |
6794 | would then be required to subtract multiples of the registers in A. | |
6795 | This is theoretically possible, and may even apply to some Fortran | |
6796 | constructs, but it is a lot of work and we do not attempt it here. */ | |
6797 | ||
6798 | if (mult != const1_rtx && GET_CODE (a) != CONST_INT) | |
6799 | return NULL_RTX; | |
6800 | ||
6801 | /* In general these structures are sorted top to bottom (down the PLUS | |
6802 | chain), but not left to right across the PLUS. If B is a higher | |
6803 | order giv than A, we can strip one level and recurse. If A is higher | |
6804 | order, we'll eventually bail out, but won't know that until the end. | |
6805 | If they are the same, we'll strip one level around this loop. */ | |
6806 | ||
6807 | while (GET_CODE (a) == PLUS && GET_CODE (b) == PLUS) | |
6808 | { | |
6809 | rtx ra, rb, oa, ob, tmp; | |
6810 | ||
6811 | ra = XEXP (a, 0), oa = XEXP (a, 1); | |
6812 | if (GET_CODE (ra) == PLUS) | |
6813 | tmp = ra, ra = oa, oa = tmp; | |
6814 | ||
6815 | rb = XEXP (b, 0), ob = XEXP (b, 1); | |
6816 | if (GET_CODE (rb) == PLUS) | |
6817 | tmp = rb, rb = ob, ob = tmp; | |
6818 | ||
6819 | if (rtx_equal_p (ra, rb)) | |
6820 | /* We matched: remove one reg completely. */ | |
6821 | a = oa, b = ob; | |
6822 | else if (GET_CODE (ob) != PLUS && rtx_equal_p (ra, ob)) | |
6823 | /* An alternate match. */ | |
6824 | a = oa, b = rb; | |
6825 | else if (GET_CODE (oa) != PLUS && rtx_equal_p (oa, rb)) | |
6826 | /* An alternate match. */ | |
6827 | a = ra, b = ob; | |
6828 | else | |
6829 | { | |
6830 | /* Indicates an extra register in B. Strip one level from B and | |
6831 | recurse, hoping B was the higher order expression. */ | |
6832 | ob = express_from_1 (a, ob, mult); | |
6833 | if (ob == NULL_RTX) | |
6834 | return NULL_RTX; | |
6835 | return gen_rtx_PLUS (GET_MODE (b), rb, ob); | |
6836 | } | |
6837 | } | |
6838 | ||
6839 | /* Here we are at the last level of A, go through the cases hoping to | |
6840 | get rid of everything but a constant. */ | |
6841 | ||
6842 | if (GET_CODE (a) == PLUS) | |
6843 | { | |
efe3eb65 | 6844 | rtx ra, oa; |
45f97e2e RH |
6845 | |
6846 | ra = XEXP (a, 0), oa = XEXP (a, 1); | |
6847 | if (rtx_equal_p (oa, b)) | |
6848 | oa = ra; | |
6849 | else if (!rtx_equal_p (ra, b)) | |
6850 | return NULL_RTX; | |
6851 | ||
6852 | if (GET_CODE (oa) != CONST_INT) | |
6853 | return NULL_RTX; | |
6854 | ||
6855 | return GEN_INT (-INTVAL (oa) * INTVAL (mult)); | |
6856 | } | |
6857 | else if (GET_CODE (a) == CONST_INT) | |
6858 | { | |
6859 | return plus_constant (b, -INTVAL (a) * INTVAL (mult)); | |
6860 | } | |
6861 | else if (GET_CODE (b) == PLUS) | |
6862 | { | |
6863 | if (rtx_equal_p (a, XEXP (b, 0))) | |
6864 | return XEXP (b, 1); | |
6865 | else if (rtx_equal_p (a, XEXP (b, 1))) | |
6866 | return XEXP (b, 0); | |
6867 | else | |
6868 | return NULL_RTX; | |
6869 | } | |
6870 | else if (rtx_equal_p (a, b)) | |
6871 | return const0_rtx; | |
6872 | ||
6873 | return NULL_RTX; | |
6874 | } | |
b4ad7b23 | 6875 | |
4d87f7a7 | 6876 | rtx |
b4ad7b23 RS |
6877 | express_from (g1, g2) |
6878 | struct induction *g1, *g2; | |
6879 | { | |
6880 | rtx mult, add; | |
6881 | ||
6882 | /* The value that G1 will be multiplied by must be a constant integer. Also, | |
6883 | the only chance we have of getting a valid address is if b*c/a (see above | |
6884 | for notation) is also an integer. */ | |
45f97e2e RH |
6885 | if (GET_CODE (g1->mult_val) == CONST_INT |
6886 | && GET_CODE (g2->mult_val) == CONST_INT) | |
6887 | { | |
6888 | if (g1->mult_val == const0_rtx | |
6889 | || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0) | |
6890 | return NULL_RTX; | |
6891 | mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val)); | |
6892 | } | |
6893 | else if (rtx_equal_p (g1->mult_val, g2->mult_val)) | |
6894 | mult = const1_rtx; | |
6895 | else | |
6896 | { | |
6897 | /* ??? Find out if the one is a multiple of the other? */ | |
6898 | return NULL_RTX; | |
6899 | } | |
b4ad7b23 | 6900 | |
45f97e2e | 6901 | add = express_from_1 (g1->add_val, g2->add_val, mult); |
e0485b85 RH |
6902 | if (add == NULL_RTX) |
6903 | { | |
6904 | /* Failed. If we've got a multiplication factor between G1 and G2, | |
6905 | scale G1's addend and try again. */ | |
6906 | if (INTVAL (mult) > 1) | |
6907 | { | |
6908 | rtx g1_add_val = g1->add_val; | |
6909 | if (GET_CODE (g1_add_val) == MULT | |
6910 | && GET_CODE (XEXP (g1_add_val, 1)) == CONST_INT) | |
6911 | { | |
6912 | HOST_WIDE_INT m; | |
6913 | m = INTVAL (mult) * INTVAL (XEXP (g1_add_val, 1)); | |
6914 | g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val), | |
6915 | XEXP (g1_add_val, 0), GEN_INT (m)); | |
6916 | } | |
6917 | else | |
6918 | { | |
6919 | g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val), g1_add_val, | |
6920 | mult); | |
6921 | } | |
6922 | ||
6923 | add = express_from_1 (g1_add_val, g2->add_val, const1_rtx); | |
6924 | } | |
6925 | } | |
45f97e2e RH |
6926 | if (add == NULL_RTX) |
6927 | return NULL_RTX; | |
b4ad7b23 RS |
6928 | |
6929 | /* Form simplified final result. */ | |
6930 | if (mult == const0_rtx) | |
6931 | return add; | |
6932 | else if (mult == const1_rtx) | |
6933 | mult = g1->dest_reg; | |
6934 | else | |
38a448ca | 6935 | mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult); |
b4ad7b23 RS |
6936 | |
6937 | if (add == const0_rtx) | |
6938 | return mult; | |
6939 | else | |
86219cc7 BS |
6940 | { |
6941 | if (GET_CODE (add) == PLUS | |
6942 | && CONSTANT_P (XEXP (add, 1))) | |
6943 | { | |
6944 | rtx tem = XEXP (add, 1); | |
6945 | mult = gen_rtx_PLUS (g2->mode, mult, XEXP (add, 0)); | |
6946 | add = tem; | |
6947 | } | |
6948 | ||
6949 | return gen_rtx_PLUS (g2->mode, mult, add); | |
6950 | } | |
6951 | ||
b4ad7b23 | 6952 | } |
b4ad7b23 | 6953 | \f |
da5a44b3 BS |
6954 | /* Return an rtx, if any, that expresses giv G2 as a function of the register |
6955 | represented by G1. This indicates that G2 should be combined with G1 and | |
6956 | that G2 can use (either directly or via an address expression) a register | |
6957 | used to represent G1. */ | |
b4ad7b23 | 6958 | |
45f97e2e | 6959 | static rtx |
b4ad7b23 RS |
6960 | combine_givs_p (g1, g2) |
6961 | struct induction *g1, *g2; | |
6962 | { | |
45f97e2e | 6963 | rtx tem = express_from (g1, g2); |
b4ad7b23 | 6964 | |
45f97e2e RH |
6965 | /* If these givs are identical, they can be combined. We use the results |
6966 | of express_from because the addends are not in a canonical form, so | |
6967 | rtx_equal_p is a weaker test. */ | |
3ec2b590 R |
6968 | /* But don't combine a DEST_REG giv with a DEST_ADDR giv; we want the |
6969 | combination to be the other way round. */ | |
6970 | if (tem == g1->dest_reg | |
6971 | && (g1->giv_type == DEST_REG || g2->giv_type == DEST_ADDR)) | |
b4ad7b23 | 6972 | { |
45f97e2e | 6973 | return g1->dest_reg; |
b4ad7b23 RS |
6974 | } |
6975 | ||
b4ad7b23 RS |
6976 | /* If G2 can be expressed as a function of G1 and that function is valid |
6977 | as an address and no more expensive than using a register for G2, | |
6978 | the expression of G2 in terms of G1 can be used. */ | |
45f97e2e RH |
6979 | if (tem != NULL_RTX |
6980 | && g2->giv_type == DEST_ADDR | |
b4ad7b23 | 6981 | && memory_address_p (g2->mem_mode, tem) |
45f97e2e RH |
6982 | /* ??? Looses, especially with -fforce-addr, where *g2->location |
6983 | will always be a register, and so anything more complicated | |
6984 | gets discarded. */ | |
6985 | #if 0 | |
6986 | #ifdef ADDRESS_COST | |
6987 | && ADDRESS_COST (tem) <= ADDRESS_COST (*g2->location) | |
6988 | #else | |
6989 | && rtx_cost (tem, MEM) <= rtx_cost (*g2->location, MEM) | |
6990 | #endif | |
6991 | #endif | |
6992 | ) | |
b4ad7b23 | 6993 | { |
45f97e2e | 6994 | return tem; |
b4ad7b23 | 6995 | } |
b4ad7b23 | 6996 | |
45f97e2e | 6997 | return NULL_RTX; |
b4ad7b23 RS |
6998 | } |
6999 | \f | |
45f97e2e RH |
7000 | struct combine_givs_stats |
7001 | { | |
7002 | int giv_number; | |
7003 | int total_benefit; | |
7004 | }; | |
7005 | ||
7006 | static int | |
7007 | cmp_combine_givs_stats (x, y) | |
7008 | struct combine_givs_stats *x, *y; | |
7009 | { | |
7010 | int d; | |
7011 | d = y->total_benefit - x->total_benefit; | |
7012 | /* Stabilize the sort. */ | |
7013 | if (!d) | |
7014 | d = x->giv_number - y->giv_number; | |
7015 | return d; | |
7016 | } | |
7017 | ||
b4ad7b23 RS |
7018 | /* Check all pairs of givs for iv_class BL and see if any can be combined with |
7019 | any other. If so, point SAME to the giv combined with and set NEW_REG to | |
7020 | be an expression (in terms of the other giv's DEST_REG) equivalent to the | |
7021 | giv. Also, update BENEFIT and related fields for cost/benefit analysis. */ | |
7022 | ||
7023 | static void | |
7024 | combine_givs (bl) | |
7025 | struct iv_class *bl; | |
7026 | { | |
ba12c883 RH |
7027 | /* Additional benefit to add for being combined multiple times. */ |
7028 | const int extra_benefit = 3; | |
7029 | ||
29a82058 | 7030 | struct induction *g1, *g2, **giv_array; |
45f97e2e RH |
7031 | int i, j, k, giv_count; |
7032 | struct combine_givs_stats *stats; | |
7033 | rtx *can_combine; | |
b4ad7b23 | 7034 | |
7027f90a JW |
7035 | /* Count givs, because bl->giv_count is incorrect here. */ |
7036 | giv_count = 0; | |
b4ad7b23 | 7037 | for (g1 = bl->giv; g1; g1 = g1->next_iv) |
45f97e2e RH |
7038 | if (!g1->ignore) |
7039 | giv_count++; | |
7027f90a JW |
7040 | |
7041 | giv_array | |
7042 | = (struct induction **) alloca (giv_count * sizeof (struct induction *)); | |
7043 | i = 0; | |
7044 | for (g1 = bl->giv; g1; g1 = g1->next_iv) | |
45f97e2e RH |
7045 | if (!g1->ignore) |
7046 | giv_array[i++] = g1; | |
7027f90a | 7047 | |
45f97e2e | 7048 | stats = (struct combine_givs_stats *) alloca (giv_count * sizeof (*stats)); |
efe3eb65 | 7049 | bzero ((char *) stats, giv_count * sizeof (*stats)); |
7027f90a | 7050 | |
45f97e2e | 7051 | can_combine = (rtx *) alloca (giv_count * giv_count * sizeof(rtx)); |
efe3eb65 | 7052 | bzero ((char *) can_combine, giv_count * giv_count * sizeof(rtx)); |
7027f90a JW |
7053 | |
7054 | for (i = 0; i < giv_count; i++) | |
7055 | { | |
45f97e2e | 7056 | int this_benefit; |
ba12c883 | 7057 | rtx single_use; |
45f97e2e | 7058 | |
7027f90a | 7059 | g1 = giv_array[i]; |
ba12c883 RH |
7060 | stats[i].giv_number = i; |
7061 | ||
7062 | /* If a DEST_REG GIV is used only once, do not allow it to combine | |
7063 | with anything, for in doing so we will gain nothing that cannot | |
7064 | be had by simply letting the GIV with which we would have combined | |
7065 | to be reduced on its own. The losage shows up in particular with | |
7066 | DEST_ADDR targets on hosts with reg+reg addressing, though it can | |
7067 | be seen elsewhere as well. */ | |
7068 | if (g1->giv_type == DEST_REG | |
7069 | && (single_use = VARRAY_RTX (reg_single_usage, REGNO (g1->dest_reg))) | |
7070 | && single_use != const0_rtx) | |
7071 | continue; | |
45f97e2e RH |
7072 | |
7073 | this_benefit = g1->benefit; | |
7074 | /* Add an additional weight for zero addends. */ | |
7075 | if (g1->no_const_addval) | |
7076 | this_benefit += 1; | |
ba12c883 | 7077 | |
45f97e2e RH |
7078 | for (j = 0; j < giv_count; j++) |
7079 | { | |
7080 | rtx this_combine; | |
7081 | ||
7082 | g2 = giv_array[j]; | |
7083 | if (g1 != g2 | |
7084 | && (this_combine = combine_givs_p (g1, g2)) != NULL_RTX) | |
7085 | { | |
7086 | can_combine[i*giv_count + j] = this_combine; | |
ba12c883 | 7087 | this_benefit += g2->benefit + extra_benefit; |
45f97e2e RH |
7088 | } |
7089 | } | |
45f97e2e RH |
7090 | stats[i].total_benefit = this_benefit; |
7091 | } | |
7092 | ||
7093 | /* Iterate, combining until we can't. */ | |
7094 | restart: | |
7095 | qsort (stats, giv_count, sizeof(*stats), cmp_combine_givs_stats); | |
7096 | ||
7097 | if (loop_dump_stream) | |
7098 | { | |
7099 | fprintf (loop_dump_stream, "Sorted combine statistics:\n"); | |
7100 | for (k = 0; k < giv_count; k++) | |
7101 | { | |
7102 | g1 = giv_array[stats[k].giv_number]; | |
7103 | if (!g1->combined_with && !g1->same) | |
7104 | fprintf (loop_dump_stream, " {%d, %d}", | |
7105 | INSN_UID (giv_array[stats[k].giv_number]->insn), | |
7106 | stats[k].total_benefit); | |
7107 | } | |
7108 | putc ('\n', loop_dump_stream); | |
7109 | } | |
7110 | ||
7111 | for (k = 0; k < giv_count; k++) | |
7112 | { | |
7113 | int g1_add_benefit = 0; | |
7114 | ||
7115 | i = stats[k].giv_number; | |
7116 | g1 = giv_array[i]; | |
7117 | ||
7118 | /* If it has already been combined, skip. */ | |
7119 | if (g1->combined_with || g1->same) | |
7120 | continue; | |
7121 | ||
7122 | for (j = 0; j < giv_count; j++) | |
7123 | { | |
7124 | g2 = giv_array[j]; | |
7125 | if (g1 != g2 && can_combine[i*giv_count + j] | |
7126 | /* If it has already been combined, skip. */ | |
7127 | && ! g2->same && ! g2->combined_with) | |
7128 | { | |
7129 | int l; | |
7130 | ||
7131 | g2->new_reg = can_combine[i*giv_count + j]; | |
7132 | g2->same = g1; | |
3ec2b590 | 7133 | g1->combined_with++; |
45f97e2e RH |
7134 | g1->lifetime += g2->lifetime; |
7135 | ||
ba12c883 | 7136 | g1_add_benefit += g2->benefit; |
45f97e2e RH |
7137 | |
7138 | /* ??? The new final_[bg]iv_value code does a much better job | |
7139 | of finding replaceable giv's, and hence this code may no | |
7140 | longer be necessary. */ | |
7141 | if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg)) | |
7142 | g1_add_benefit -= copy_cost; | |
7027f90a | 7143 | |
45f97e2e RH |
7144 | /* To help optimize the next set of combinations, remove |
7145 | this giv from the benefits of other potential mates. */ | |
7146 | for (l = 0; l < giv_count; ++l) | |
7147 | { | |
7148 | int m = stats[l].giv_number; | |
7149 | if (can_combine[m*giv_count + j]) | |
ba12c883 | 7150 | stats[l].total_benefit -= g2->benefit + extra_benefit; |
45f97e2e RH |
7151 | } |
7152 | ||
7153 | if (loop_dump_stream) | |
7154 | fprintf (loop_dump_stream, | |
7155 | "giv at %d combined with giv at %d\n", | |
7156 | INSN_UID (g2->insn), INSN_UID (g1->insn)); | |
7157 | } | |
7158 | } | |
7159 | ||
7160 | /* To help optimize the next set of combinations, remove | |
7161 | this giv from the benefits of other potential mates. */ | |
7162 | if (g1->combined_with) | |
7163 | { | |
7164 | for (j = 0; j < giv_count; ++j) | |
7165 | { | |
7166 | int m = stats[j].giv_number; | |
0466bdc4 | 7167 | if (can_combine[m*giv_count + i]) |
ba12c883 | 7168 | stats[j].total_benefit -= g1->benefit + extra_benefit; |
45f97e2e RH |
7169 | } |
7170 | ||
7171 | g1->benefit += g1_add_benefit; | |
7172 | ||
7173 | /* We've finished with this giv, and everything it touched. | |
7174 | Restart the combination so that proper weights for the | |
7175 | rest of the givs are properly taken into account. */ | |
7176 | /* ??? Ideally we would compact the arrays at this point, so | |
7177 | as to not cover old ground. But sanely compacting | |
7178 | can_combine is tricky. */ | |
7179 | goto restart; | |
7180 | } | |
7027f90a | 7181 | } |
b4ad7b23 RS |
7182 | } |
7183 | \f | |
3ec2b590 R |
7184 | struct recombine_givs_stats |
7185 | { | |
7186 | int giv_number; | |
7187 | int start_luid, end_luid; | |
7188 | }; | |
7189 | ||
7190 | /* Used below as comparison function for qsort. We want a ascending luid | |
7191 | when scanning the array starting at the end, thus the arguments are | |
7192 | used in reverse. */ | |
7193 | static int | |
7194 | cmp_recombine_givs_stats (x, y) | |
7195 | struct recombine_givs_stats *x, *y; | |
7196 | { | |
7197 | int d; | |
7198 | d = y->start_luid - x->start_luid; | |
7199 | /* Stabilize the sort. */ | |
7200 | if (!d) | |
7201 | d = y->giv_number - x->giv_number; | |
7202 | return d; | |
7203 | } | |
7204 | ||
7205 | /* Scan X, which is a part of INSN, for the end of life of a giv. Also | |
7206 | look for the start of life of a giv where the start has not been seen | |
7207 | yet to unlock the search for the end of its life. | |
7208 | Only consider givs that belong to BIV. | |
7209 | Return the total number of lifetime ends that have been found. */ | |
7210 | static int | |
7211 | find_life_end (x, stats, insn, biv) | |
7212 | rtx x, insn, biv; | |
7213 | struct recombine_givs_stats *stats; | |
7214 | { | |
7215 | enum rtx_code code; | |
6f7d635c | 7216 | const char *fmt; |
3ec2b590 R |
7217 | int i, j; |
7218 | int retval; | |
7219 | ||
7220 | code = GET_CODE (x); | |
7221 | switch (code) | |
7222 | { | |
7223 | case SET: | |
7224 | { | |
7225 | rtx reg = SET_DEST (x); | |
7226 | if (GET_CODE (reg) == REG) | |
7227 | { | |
7228 | int regno = REGNO (reg); | |
7229 | struct induction *v = REG_IV_INFO (regno); | |
7230 | ||
7231 | if (REG_IV_TYPE (regno) == GENERAL_INDUCT | |
7232 | && ! v->ignore | |
7233 | && v->src_reg == biv | |
7234 | && stats[v->ix].end_luid <= 0) | |
7235 | { | |
7236 | /* If we see a 0 here for end_luid, it means that we have | |
7237 | scanned the entire loop without finding any use at all. | |
7238 | We must not predicate this code on a start_luid match | |
7239 | since that would make the test fail for givs that have | |
7240 | been hoisted out of inner loops. */ | |
7241 | if (stats[v->ix].end_luid == 0) | |
7242 | { | |
7243 | stats[v->ix].end_luid = stats[v->ix].start_luid; | |
7244 | return 1 + find_life_end (SET_SRC (x), stats, insn, biv); | |
7245 | } | |
7246 | else if (stats[v->ix].start_luid == INSN_LUID (insn)) | |
7247 | stats[v->ix].end_luid = 0; | |
7248 | } | |
7249 | return find_life_end (SET_SRC (x), stats, insn, biv); | |
7250 | } | |
7251 | break; | |
7252 | } | |
7253 | case REG: | |
7254 | { | |
7255 | int regno = REGNO (x); | |
7256 | struct induction *v = REG_IV_INFO (regno); | |
7257 | ||
7258 | if (REG_IV_TYPE (regno) == GENERAL_INDUCT | |
7259 | && ! v->ignore | |
7260 | && v->src_reg == biv | |
7261 | && stats[v->ix].end_luid == 0) | |
7262 | { | |
7263 | while (INSN_UID (insn) >= max_uid_for_loop) | |
7264 | insn = NEXT_INSN (insn); | |
7265 | stats[v->ix].end_luid = INSN_LUID (insn); | |
7266 | return 1; | |
7267 | } | |
7268 | return 0; | |
7269 | } | |
7270 | case LABEL_REF: | |
7271 | case CONST_DOUBLE: | |
7272 | case CONST_INT: | |
7273 | case CONST: | |
7274 | return 0; | |
7275 | default: | |
7276 | break; | |
7277 | } | |
7278 | fmt = GET_RTX_FORMAT (code); | |
7279 | retval = 0; | |
7280 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) | |
7281 | { | |
7282 | if (fmt[i] == 'e') | |
7283 | retval += find_life_end (XEXP (x, i), stats, insn, biv); | |
7284 | ||
7285 | else if (fmt[i] == 'E') | |
7286 | for (j = XVECLEN (x, i) - 1; j >= 0; j--) | |
7287 | retval += find_life_end (XVECEXP (x, i, j), stats, insn, biv); | |
7288 | } | |
7289 | return retval; | |
7290 | } | |
7291 | ||
7292 | /* For each giv that has been combined with another, look if | |
7293 | we can combine it with the most recently used one instead. | |
7294 | This tends to shorten giv lifetimes, and helps the next step: | |
7295 | try to derive givs from other givs. */ | |
7296 | static void | |
53dc05e4 | 7297 | recombine_givs (bl, loop_start, loop_end, unroll_p) |
3ec2b590 R |
7298 | struct iv_class *bl; |
7299 | rtx loop_start, loop_end; | |
53dc05e4 | 7300 | int unroll_p; |
3ec2b590 R |
7301 | { |
7302 | struct induction *v, **giv_array, *last_giv; | |
7303 | struct recombine_givs_stats *stats; | |
7304 | int giv_count; | |
7305 | int i, rescan; | |
7306 | int ends_need_computing; | |
7307 | ||
7308 | for (giv_count = 0, v = bl->giv; v; v = v->next_iv) | |
7309 | { | |
7310 | if (! v->ignore) | |
7311 | giv_count++; | |
7312 | } | |
7313 | giv_array | |
7314 | = (struct induction **) alloca (giv_count * sizeof (struct induction *)); | |
7315 | stats = (struct recombine_givs_stats *) alloca (giv_count * sizeof *stats); | |
7316 | ||
7317 | /* Initialize stats and set up the ix field for each giv in stats to name | |
7318 | the corresponding index into stats. */ | |
7319 | for (i = 0, v = bl->giv; v; v = v->next_iv) | |
7320 | { | |
7321 | rtx p; | |
7322 | ||
7323 | if (v->ignore) | |
7324 | continue; | |
7325 | giv_array[i] = v; | |
7326 | stats[i].giv_number = i; | |
7327 | /* If this giv has been hoisted out of an inner loop, use the luid of | |
7328 | the previous insn. */ | |
7329 | for (p = v->insn; INSN_UID (p) >= max_uid_for_loop; ) | |
7330 | p = PREV_INSN (p); | |
7331 | stats[i].start_luid = INSN_LUID (p); | |
3ec2b590 R |
7332 | i++; |
7333 | } | |
7334 | ||
7335 | qsort (stats, giv_count, sizeof(*stats), cmp_recombine_givs_stats); | |
7336 | ||
0aa487d5 R |
7337 | /* Set up the ix field for each giv in stats to name |
7338 | the corresponding index into stats, and | |
7339 | do the actual most-recently-used recombination. */ | |
3ec2b590 R |
7340 | for (last_giv = 0, i = giv_count - 1; i >= 0; i--) |
7341 | { | |
7342 | v = giv_array[stats[i].giv_number]; | |
0aa487d5 | 7343 | v->ix = i; |
3ec2b590 R |
7344 | if (v->same) |
7345 | { | |
7346 | struct induction *old_same = v->same; | |
7347 | rtx new_combine; | |
7348 | ||
7349 | /* combine_givs_p actually says if we can make this transformation. | |
7350 | The other tests are here only to avoid keeping a giv alive | |
7351 | that could otherwise be eliminated. */ | |
7352 | if (last_giv | |
7353 | && ((old_same->maybe_dead && ! old_same->combined_with) | |
7354 | || ! last_giv->maybe_dead | |
7355 | || last_giv->combined_with) | |
7356 | && (new_combine = combine_givs_p (last_giv, v))) | |
7357 | { | |
7358 | old_same->combined_with--; | |
7359 | v->new_reg = new_combine; | |
7360 | v->same = last_giv; | |
7361 | last_giv->combined_with++; | |
7362 | /* No need to update lifetimes / benefits here since we have | |
7363 | already decided what to reduce. */ | |
516e5fa6 RH |
7364 | |
7365 | if (loop_dump_stream) | |
7366 | { | |
7367 | fprintf (loop_dump_stream, | |
7368 | "giv at %d recombined with giv at %d as ", | |
7369 | INSN_UID (v->insn), INSN_UID (last_giv->insn)); | |
7370 | print_rtl (loop_dump_stream, v->new_reg); | |
7371 | putc ('\n', loop_dump_stream); | |
7372 | } | |
3ec2b590 R |
7373 | continue; |
7374 | } | |
7375 | v = v->same; | |
7376 | } | |
7377 | else if (v->giv_type != DEST_REG) | |
7378 | continue; | |
7379 | if (! last_giv | |
7380 | || (last_giv->maybe_dead && ! last_giv->combined_with) | |
7381 | || ! v->maybe_dead | |
7382 | || v->combined_with) | |
7383 | last_giv = v; | |
7384 | } | |
7385 | ||
7386 | ends_need_computing = 0; | |
7387 | /* For each DEST_REG giv, compute lifetime starts, and try to compute | |
7388 | lifetime ends from regscan info. */ | |
0aa487d5 | 7389 | for (i = giv_count - 1; i >= 0; i--) |
3ec2b590 | 7390 | { |
0aa487d5 | 7391 | v = giv_array[stats[i].giv_number]; |
3ec2b590 R |
7392 | if (v->ignore) |
7393 | continue; | |
7394 | if (v->giv_type == DEST_ADDR) | |
7395 | { | |
7396 | /* Loop unrolling of an inner loop can even create new DEST_REG | |
7397 | givs. */ | |
7398 | rtx p; | |
7399 | for (p = v->insn; INSN_UID (p) >= max_uid_for_loop; ) | |
7400 | p = PREV_INSN (p); | |
7401 | stats[i].start_luid = stats[i].end_luid = INSN_LUID (p); | |
7402 | if (p != v->insn) | |
7403 | stats[i].end_luid++; | |
7404 | } | |
7405 | else /* v->giv_type == DEST_REG */ | |
7406 | { | |
7407 | if (v->last_use) | |
7408 | { | |
7409 | stats[i].start_luid = INSN_LUID (v->insn); | |
7410 | stats[i].end_luid = INSN_LUID (v->last_use); | |
7411 | } | |
7412 | else if (INSN_UID (v->insn) >= max_uid_for_loop) | |
7413 | { | |
7414 | rtx p; | |
7415 | /* This insn has been created by loop optimization on an inner | |
7416 | loop. We don't have a proper start_luid that will match | |
7417 | when we see the first set. But we do know that there will | |
7418 | be no use before the set, so we can set end_luid to 0 so that | |
7419 | we'll start looking for the last use right away. */ | |
7420 | for (p = PREV_INSN (v->insn); INSN_UID (p) >= max_uid_for_loop; ) | |
7421 | p = PREV_INSN (p); | |
7422 | stats[i].start_luid = INSN_LUID (p); | |
7423 | stats[i].end_luid = 0; | |
7424 | ends_need_computing++; | |
7425 | } | |
7426 | else | |
7427 | { | |
7428 | int regno = REGNO (v->dest_reg); | |
7429 | int count = VARRAY_INT (n_times_set, regno) - 1; | |
7430 | rtx p = v->insn; | |
7431 | ||
7432 | /* Find the first insn that sets the giv, so that we can verify | |
7433 | if this giv's lifetime wraps around the loop. We also need | |
7434 | the luid of the first setting insn in order to detect the | |
7435 | last use properly. */ | |
7436 | while (count) | |
7437 | { | |
7438 | p = prev_nonnote_insn (p); | |
7439 | if (reg_set_p (v->dest_reg, p)) | |
7440 | count--; | |
7441 | } | |
7442 | ||
7443 | stats[i].start_luid = INSN_LUID (p); | |
7444 | if (stats[i].start_luid > uid_luid[REGNO_FIRST_UID (regno)]) | |
7445 | { | |
7446 | stats[i].end_luid = -1; | |
7447 | ends_need_computing++; | |
7448 | } | |
7449 | else | |
7450 | { | |
7451 | stats[i].end_luid = uid_luid[REGNO_LAST_UID (regno)]; | |
7452 | if (stats[i].end_luid > INSN_LUID (loop_end)) | |
7453 | { | |
7454 | stats[i].end_luid = -1; | |
7455 | ends_need_computing++; | |
7456 | } | |
7457 | } | |
7458 | } | |
7459 | } | |
3ec2b590 R |
7460 | } |
7461 | ||
7462 | /* If the regscan information was unconclusive for one or more DEST_REG | |
7463 | givs, scan the all insn in the loop to find out lifetime ends. */ | |
7464 | if (ends_need_computing) | |
7465 | { | |
7466 | rtx biv = bl->biv->src_reg; | |
7467 | rtx p = loop_end; | |
7468 | ||
7469 | do | |
7470 | { | |
7471 | if (p == loop_start) | |
7472 | p = loop_end; | |
7473 | p = PREV_INSN (p); | |
7474 | if (GET_RTX_CLASS (GET_CODE (p)) != 'i') | |
7475 | continue; | |
7476 | ends_need_computing -= find_life_end (PATTERN (p), stats, p, biv); | |
7477 | } | |
7478 | while (ends_need_computing); | |
7479 | } | |
7480 | ||
7481 | /* Set start_luid back to the last insn that sets the giv. This allows | |
7482 | more combinations. */ | |
0aa487d5 | 7483 | for (i = giv_count - 1; i >= 0; i--) |
3ec2b590 | 7484 | { |
0aa487d5 | 7485 | v = giv_array[stats[i].giv_number]; |
3ec2b590 R |
7486 | if (v->ignore) |
7487 | continue; | |
7488 | if (INSN_UID (v->insn) < max_uid_for_loop) | |
7489 | stats[i].start_luid = INSN_LUID (v->insn); | |
3ec2b590 R |
7490 | } |
7491 | ||
7492 | /* Now adjust lifetime ends by taking combined givs into account. */ | |
0aa487d5 | 7493 | for (i = giv_count - 1; i >= 0; i--) |
3ec2b590 R |
7494 | { |
7495 | unsigned luid; | |
7496 | int j; | |
7497 | ||
0aa487d5 | 7498 | v = giv_array[stats[i].giv_number]; |
3ec2b590 R |
7499 | if (v->ignore) |
7500 | continue; | |
7501 | if (v->same && ! v->same->ignore) | |
7502 | { | |
7503 | j = v->same->ix; | |
7504 | luid = stats[i].start_luid; | |
7505 | /* Use unsigned arithmetic to model loop wrap-around. */ | |
7506 | if (luid - stats[j].start_luid | |
7507 | > (unsigned) stats[j].end_luid - stats[j].start_luid) | |
7508 | stats[j].end_luid = luid; | |
7509 | } | |
3ec2b590 R |
7510 | } |
7511 | ||
7512 | qsort (stats, giv_count, sizeof(*stats), cmp_recombine_givs_stats); | |
7513 | ||
7514 | /* Try to derive DEST_REG givs from previous DEST_REG givs with the | |
7515 | same mult_val and non-overlapping lifetime. This reduces register | |
7516 | pressure. | |
7517 | Once we find a DEST_REG giv that is suitable to derive others from, | |
7518 | we set last_giv to this giv, and try to derive as many other DEST_REG | |
7519 | givs from it without joining overlapping lifetimes. If we then | |
7520 | encounter a DEST_REG giv that we can't derive, we set rescan to the | |
7521 | index for this giv (unless rescan is already set). | |
7522 | When we are finished with the current LAST_GIV (i.e. the inner loop | |
7523 | terminates), we start again with rescan, which then becomes the new | |
7524 | LAST_GIV. */ | |
7525 | for (i = giv_count - 1; i >= 0; i = rescan) | |
7526 | { | |
7527 | int life_start, life_end; | |
7528 | ||
7529 | for (last_giv = 0, rescan = -1; i >= 0; i--) | |
7530 | { | |
7531 | rtx sum; | |
7532 | ||
7533 | v = giv_array[stats[i].giv_number]; | |
4d87f7a7 | 7534 | if (v->giv_type != DEST_REG || v->derived_from || v->same) |
3ec2b590 R |
7535 | continue; |
7536 | if (! last_giv) | |
7537 | { | |
7221f080 R |
7538 | /* Don't use a giv that's likely to be dead to derive |
7539 | others - that would be likely to keep that giv alive. */ | |
7540 | if (! v->maybe_dead || v->combined_with) | |
7541 | { | |
7542 | last_giv = v; | |
7543 | life_start = stats[i].start_luid; | |
7544 | life_end = stats[i].end_luid; | |
7545 | } | |
3ec2b590 R |
7546 | continue; |
7547 | } | |
7548 | /* Use unsigned arithmetic to model loop wrap around. */ | |
7549 | if (((unsigned) stats[i].start_luid - life_start | |
7550 | >= (unsigned) life_end - life_start) | |
7551 | && ((unsigned) stats[i].end_luid - life_start | |
7221f080 R |
7552 | > (unsigned) life_end - life_start) |
7553 | /* Check that the giv insn we're about to use for deriving | |
7554 | precedes all uses of that giv. Note that initializing the | |
7555 | derived giv would defeat the purpose of reducing register | |
7556 | pressure. | |
7557 | ??? We could arrange to move the insn. */ | |
7558 | && ((unsigned) stats[i].end_luid - INSN_LUID (loop_start) | |
7559 | > (unsigned) stats[i].start_luid - INSN_LUID (loop_start)) | |
3ec2b590 R |
7560 | && rtx_equal_p (last_giv->mult_val, v->mult_val) |
7561 | /* ??? Could handle libcalls, but would need more logic. */ | |
7562 | && ! find_reg_note (v->insn, REG_RETVAL, NULL_RTX) | |
7563 | /* We would really like to know if for any giv that v | |
7564 | is combined with, v->insn or any intervening biv increment | |
7565 | dominates that combined giv. However, we | |
7566 | don't have this detailed control flow information. | |
7567 | N.B. since last_giv will be reduced, it is valid | |
7568 | anywhere in the loop, so we don't need to check the | |
7221f080 R |
7569 | validity of last_giv. |
7570 | We rely here on the fact that v->always_executed implies that | |
7571 | there is no jump to someplace else in the loop before the | |
7572 | giv insn, and hence any insn that is executed before the | |
7573 | giv insn in the loop will have a lower luid. */ | |
3ec2b590 R |
7574 | && (v->always_executed || ! v->combined_with) |
7575 | && (sum = express_from (last_giv, v)) | |
53dc05e4 R |
7576 | /* Make sure we don't make the add more expensive. ADD_COST |
7577 | doesn't take different costs of registers and constants into | |
7578 | account, so compare the cost of the actual SET_SRCs. */ | |
7579 | && (rtx_cost (sum, SET) | |
7580 | <= rtx_cost (SET_SRC (single_set (v->insn)), SET)) | |
7581 | /* ??? unroll can't understand anything but reg + const_int | |
7582 | sums. It would be cleaner to fix unroll. */ | |
7583 | && ((GET_CODE (sum) == PLUS | |
7584 | && GET_CODE (XEXP (sum, 0)) == REG | |
7585 | && GET_CODE (XEXP (sum, 1)) == CONST_INT) | |
7586 | || ! unroll_p) | |
3ec2b590 | 7587 | && validate_change (v->insn, &PATTERN (v->insn), |
743f9f5d | 7588 | gen_rtx_SET (VOIDmode, v->dest_reg, sum), 0)) |
3ec2b590 | 7589 | { |
4d87f7a7 | 7590 | v->derived_from = last_giv; |
3ec2b590 | 7591 | life_end = stats[i].end_luid; |
516e5fa6 RH |
7592 | |
7593 | if (loop_dump_stream) | |
7594 | { | |
7595 | fprintf (loop_dump_stream, | |
7596 | "giv at %d derived from %d as ", | |
7597 | INSN_UID (v->insn), INSN_UID (last_giv->insn)); | |
743f9f5d | 7598 | print_rtl (loop_dump_stream, sum); |
516e5fa6 RH |
7599 | putc ('\n', loop_dump_stream); |
7600 | } | |
3ec2b590 R |
7601 | } |
7602 | else if (rescan < 0) | |
7603 | rescan = i; | |
7604 | } | |
7605 | } | |
7606 | } | |
7607 | \f | |
b4ad7b23 RS |
7608 | /* EMIT code before INSERT_BEFORE to set REG = B * M + A. */ |
7609 | ||
7610 | void | |
7611 | emit_iv_add_mult (b, m, a, reg, insert_before) | |
7612 | rtx b; /* initial value of basic induction variable */ | |
7613 | rtx m; /* multiplicative constant */ | |
7614 | rtx a; /* additive constant */ | |
7615 | rtx reg; /* destination register */ | |
7616 | rtx insert_before; | |
7617 | { | |
7618 | rtx seq; | |
7619 | rtx result; | |
7620 | ||
7621 | /* Prevent unexpected sharing of these rtx. */ | |
7622 | a = copy_rtx (a); | |
7623 | b = copy_rtx (b); | |
7624 | ||
0f41302f | 7625 | /* Increase the lifetime of any invariants moved further in code. */ |
b4ad7b23 RS |
7626 | update_reg_last_use (a, insert_before); |
7627 | update_reg_last_use (b, insert_before); | |
7628 | update_reg_last_use (m, insert_before); | |
7629 | ||
7630 | start_sequence (); | |
7631 | result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 0); | |
7632 | if (reg != result) | |
7633 | emit_move_insn (reg, result); | |
7634 | seq = gen_sequence (); | |
7635 | end_sequence (); | |
7636 | ||
7637 | emit_insn_before (seq, insert_before); | |
9ae8ffe7 | 7638 | |
00116a7b RH |
7639 | /* It is entirely possible that the expansion created lots of new |
7640 | registers. Iterate over the sequence we just created and | |
7641 | record them all. */ | |
7642 | ||
7643 | if (GET_CODE (seq) == SEQUENCE) | |
7644 | { | |
7645 | int i; | |
7646 | for (i = 0; i < XVECLEN (seq, 0); ++i) | |
7647 | { | |
7648 | rtx set = single_set (XVECEXP (seq, 0, i)); | |
7649 | if (set && GET_CODE (SET_DEST (set)) == REG) | |
7650 | record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0); | |
7651 | } | |
7652 | } | |
7653 | else if (GET_CODE (seq) == SET | |
7654 | && GET_CODE (SET_DEST (seq)) == REG) | |
7655 | record_base_value (REGNO (SET_DEST (seq)), SET_SRC (seq), 0); | |
b4ad7b23 RS |
7656 | } |
7657 | \f | |
7658 | /* Test whether A * B can be computed without | |
7659 | an actual multiply insn. Value is 1 if so. */ | |
7660 | ||
7661 | static int | |
7662 | product_cheap_p (a, b) | |
7663 | rtx a; | |
7664 | rtx b; | |
7665 | { | |
7666 | int i; | |
7667 | rtx tmp; | |
7668 | struct obstack *old_rtl_obstack = rtl_obstack; | |
7669 | char *storage = (char *) obstack_alloc (&temp_obstack, 0); | |
7670 | int win = 1; | |
7671 | ||
0f41302f | 7672 | /* If only one is constant, make it B. */ |
b4ad7b23 RS |
7673 | if (GET_CODE (a) == CONST_INT) |
7674 | tmp = a, a = b, b = tmp; | |
7675 | ||
7676 | /* If first constant, both constant, so don't need multiply. */ | |
7677 | if (GET_CODE (a) == CONST_INT) | |
7678 | return 1; | |
7679 | ||
7680 | /* If second not constant, neither is constant, so would need multiply. */ | |
7681 | if (GET_CODE (b) != CONST_INT) | |
7682 | return 0; | |
7683 | ||
7684 | /* One operand is constant, so might not need multiply insn. Generate the | |
7685 | code for the multiply and see if a call or multiply, or long sequence | |
7686 | of insns is generated. */ | |
7687 | ||
7688 | rtl_obstack = &temp_obstack; | |
7689 | start_sequence (); | |
5fd8383e | 7690 | expand_mult (GET_MODE (a), a, b, NULL_RTX, 0); |
b4ad7b23 RS |
7691 | tmp = gen_sequence (); |
7692 | end_sequence (); | |
7693 | ||
7694 | if (GET_CODE (tmp) == SEQUENCE) | |
7695 | { | |
7696 | if (XVEC (tmp, 0) == 0) | |
7697 | win = 1; | |
7698 | else if (XVECLEN (tmp, 0) > 3) | |
7699 | win = 0; | |
7700 | else | |
7701 | for (i = 0; i < XVECLEN (tmp, 0); i++) | |
7702 | { | |
7703 | rtx insn = XVECEXP (tmp, 0, i); | |
7704 | ||
7705 | if (GET_CODE (insn) != INSN | |
7706 | || (GET_CODE (PATTERN (insn)) == SET | |
7707 | && GET_CODE (SET_SRC (PATTERN (insn))) == MULT) | |
7708 | || (GET_CODE (PATTERN (insn)) == PARALLEL | |
7709 | && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET | |
7710 | && GET_CODE (SET_SRC (XVECEXP (PATTERN (insn), 0, 0))) == MULT)) | |
7711 | { | |
7712 | win = 0; | |
7713 | break; | |
7714 | } | |
7715 | } | |
7716 | } | |
7717 | else if (GET_CODE (tmp) == SET | |
7718 | && GET_CODE (SET_SRC (tmp)) == MULT) | |
7719 | win = 0; | |
7720 | else if (GET_CODE (tmp) == PARALLEL | |
7721 | && GET_CODE (XVECEXP (tmp, 0, 0)) == SET | |
7722 | && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT) | |
7723 | win = 0; | |
7724 | ||
7725 | /* Free any storage we obtained in generating this multiply and restore rtl | |
7726 | allocation to its normal obstack. */ | |
7727 | obstack_free (&temp_obstack, storage); | |
7728 | rtl_obstack = old_rtl_obstack; | |
7729 | ||
7730 | return win; | |
7731 | } | |
7732 | \f | |
7733 | /* Check to see if loop can be terminated by a "decrement and branch until | |
7734 | zero" instruction. If so, add a REG_NONNEG note to the branch insn if so. | |
7735 | Also try reversing an increment loop to a decrement loop | |
7736 | to see if the optimization can be performed. | |
7737 | Value is nonzero if optimization was performed. */ | |
7738 | ||
7739 | /* This is useful even if the architecture doesn't have such an insn, | |
7740 | because it might change a loops which increments from 0 to n to a loop | |
7741 | which decrements from n to 0. A loop that decrements to zero is usually | |
7742 | faster than one that increments from zero. */ | |
7743 | ||
7744 | /* ??? This could be rewritten to use some of the loop unrolling procedures, | |
7745 | such as approx_final_value, biv_total_increment, loop_iterations, and | |
7746 | final_[bg]iv_value. */ | |
7747 | ||
7748 | static int | |
5629b16c | 7749 | check_dbra_loop (loop_end, insn_count, loop_start, loop_info) |
b4ad7b23 RS |
7750 | rtx loop_end; |
7751 | int insn_count; | |
7752 | rtx loop_start; | |
5629b16c | 7753 | struct loop_info *loop_info; |
b4ad7b23 RS |
7754 | { |
7755 | struct iv_class *bl; | |
7756 | rtx reg; | |
7757 | rtx jump_label; | |
7758 | rtx final_value; | |
7759 | rtx start_value; | |
b4ad7b23 RS |
7760 | rtx new_add_val; |
7761 | rtx comparison; | |
7762 | rtx before_comparison; | |
7763 | rtx p; | |
0628fde6 JW |
7764 | rtx jump; |
7765 | rtx first_compare; | |
7766 | int compare_and_branch; | |
b4ad7b23 RS |
7767 | |
7768 | /* If last insn is a conditional branch, and the insn before tests a | |
7769 | register value, try to optimize it. Otherwise, we can't do anything. */ | |
7770 | ||
0628fde6 JW |
7771 | jump = PREV_INSN (loop_end); |
7772 | comparison = get_condition_for_loop (jump); | |
b4ad7b23 RS |
7773 | if (comparison == 0) |
7774 | return 0; | |
7775 | ||
0628fde6 JW |
7776 | /* Try to compute whether the compare/branch at the loop end is one or |
7777 | two instructions. */ | |
7778 | get_condition (jump, &first_compare); | |
7779 | if (first_compare == jump) | |
7780 | compare_and_branch = 1; | |
7781 | else if (first_compare == prev_nonnote_insn (jump)) | |
7782 | compare_and_branch = 2; | |
7783 | else | |
7784 | return 0; | |
7785 | ||
b4ad7b23 RS |
7786 | /* Check all of the bivs to see if the compare uses one of them. |
7787 | Skip biv's set more than once because we can't guarantee that | |
7788 | it will be zero on the last iteration. Also skip if the biv is | |
7789 | used between its update and the test insn. */ | |
7790 | ||
7791 | for (bl = loop_iv_list; bl; bl = bl->next) | |
7792 | { | |
7793 | if (bl->biv_count == 1 | |
7794 | && bl->biv->dest_reg == XEXP (comparison, 0) | |
7795 | && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn, | |
0628fde6 | 7796 | first_compare)) |
b4ad7b23 RS |
7797 | break; |
7798 | } | |
7799 | ||
7800 | if (! bl) | |
7801 | return 0; | |
7802 | ||
7803 | /* Look for the case where the basic induction variable is always | |
7804 | nonnegative, and equals zero on the last iteration. | |
7805 | In this case, add a reg_note REG_NONNEG, which allows the | |
7806 | m68k DBRA instruction to be used. */ | |
7807 | ||
7808 | if (((GET_CODE (comparison) == GT | |
7809 | && GET_CODE (XEXP (comparison, 1)) == CONST_INT | |
7810 | && INTVAL (XEXP (comparison, 1)) == -1) | |
7811 | || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx)) | |
7812 | && GET_CODE (bl->biv->add_val) == CONST_INT | |
7813 | && INTVAL (bl->biv->add_val) < 0) | |
7814 | { | |
7815 | /* Initial value must be greater than 0, | |
7816 | init_val % -dec_value == 0 to ensure that it equals zero on | |
7817 | the last iteration */ | |
7818 | ||
7819 | if (GET_CODE (bl->initial_value) == CONST_INT | |
7820 | && INTVAL (bl->initial_value) > 0 | |
db3cf6fb MS |
7821 | && (INTVAL (bl->initial_value) |
7822 | % (-INTVAL (bl->biv->add_val))) == 0) | |
b4ad7b23 RS |
7823 | { |
7824 | /* register always nonnegative, add REG_NOTE to branch */ | |
7825 | REG_NOTES (PREV_INSN (loop_end)) | |
38a448ca RH |
7826 | = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX, |
7827 | REG_NOTES (PREV_INSN (loop_end))); | |
b4ad7b23 RS |
7828 | bl->nonneg = 1; |
7829 | ||
7830 | return 1; | |
7831 | } | |
7832 | ||
7833 | /* If the decrement is 1 and the value was tested as >= 0 before | |
7834 | the loop, then we can safely optimize. */ | |
7835 | for (p = loop_start; p; p = PREV_INSN (p)) | |
7836 | { | |
7837 | if (GET_CODE (p) == CODE_LABEL) | |
7838 | break; | |
7839 | if (GET_CODE (p) != JUMP_INSN) | |
7840 | continue; | |
7841 | ||
7842 | before_comparison = get_condition_for_loop (p); | |
7843 | if (before_comparison | |
7844 | && XEXP (before_comparison, 0) == bl->biv->dest_reg | |
7845 | && GET_CODE (before_comparison) == LT | |
7846 | && XEXP (before_comparison, 1) == const0_rtx | |
7847 | && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start) | |
7848 | && INTVAL (bl->biv->add_val) == -1) | |
7849 | { | |
7850 | REG_NOTES (PREV_INSN (loop_end)) | |
38a448ca RH |
7851 | = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX, |
7852 | REG_NOTES (PREV_INSN (loop_end))); | |
b4ad7b23 RS |
7853 | bl->nonneg = 1; |
7854 | ||
7855 | return 1; | |
7856 | } | |
7857 | } | |
7858 | } | |
ef178af3 ZW |
7859 | else if (GET_CODE (bl->biv->add_val) == CONST_INT |
7860 | && INTVAL (bl->biv->add_val) > 0) | |
b4ad7b23 RS |
7861 | { |
7862 | /* Try to change inc to dec, so can apply above optimization. */ | |
7863 | /* Can do this if: | |
7864 | all registers modified are induction variables or invariant, | |
7865 | all memory references have non-overlapping addresses | |
7866 | (obviously true if only one write) | |
7867 | allow 2 insns for the compare/jump at the end of the loop. */ | |
45cc060e JW |
7868 | /* Also, we must avoid any instructions which use both the reversed |
7869 | biv and another biv. Such instructions will fail if the loop is | |
7870 | reversed. We meet this condition by requiring that either | |
7871 | no_use_except_counting is true, or else that there is only | |
7872 | one biv. */ | |
b4ad7b23 RS |
7873 | int num_nonfixed_reads = 0; |
7874 | /* 1 if the iteration var is used only to count iterations. */ | |
7875 | int no_use_except_counting = 0; | |
b418c26e JW |
7876 | /* 1 if the loop has no memory store, or it has a single memory store |
7877 | which is reversible. */ | |
7878 | int reversible_mem_store = 1; | |
b4ad7b23 | 7879 | |
b4ad7b23 | 7880 | if (bl->giv_count == 0 |
353127c2 | 7881 | && ! loop_number_exit_count[uid_loop_num[INSN_UID (loop_start)]]) |
b4ad7b23 RS |
7882 | { |
7883 | rtx bivreg = regno_reg_rtx[bl->regno]; | |
7884 | ||
7885 | /* If there are no givs for this biv, and the only exit is the | |
38e01259 | 7886 | fall through at the end of the loop, then |
b4ad7b23 RS |
7887 | see if perhaps there are no uses except to count. */ |
7888 | no_use_except_counting = 1; | |
7889 | for (p = loop_start; p != loop_end; p = NEXT_INSN (p)) | |
7890 | if (GET_RTX_CLASS (GET_CODE (p)) == 'i') | |
7891 | { | |
7892 | rtx set = single_set (p); | |
7893 | ||
7894 | if (set && GET_CODE (SET_DEST (set)) == REG | |
7895 | && REGNO (SET_DEST (set)) == bl->regno) | |
7896 | /* An insn that sets the biv is okay. */ | |
7897 | ; | |
7898 | else if (p == prev_nonnote_insn (prev_nonnote_insn (loop_end)) | |
7899 | || p == prev_nonnote_insn (loop_end)) | |
7900 | /* Don't bother about the end test. */ | |
7901 | ; | |
7902 | else if (reg_mentioned_p (bivreg, PATTERN (p))) | |
b4ad7b23 RS |
7903 | { |
7904 | no_use_except_counting = 0; | |
7905 | break; | |
7906 | } | |
7907 | } | |
7908 | } | |
7909 | ||
c48ba252 R |
7910 | if (no_use_except_counting) |
7911 | ; /* no need to worry about MEMs. */ | |
7912 | else if (num_mem_sets <= 1) | |
7913 | { | |
7914 | for (p = loop_start; p != loop_end; p = NEXT_INSN (p)) | |
7915 | if (GET_RTX_CLASS (GET_CODE (p)) == 'i') | |
7916 | num_nonfixed_reads += count_nonfixed_reads (PATTERN (p)); | |
7917 | ||
7918 | /* If the loop has a single store, and the destination address is | |
7919 | invariant, then we can't reverse the loop, because this address | |
7920 | might then have the wrong value at loop exit. | |
7921 | This would work if the source was invariant also, however, in that | |
7922 | case, the insn should have been moved out of the loop. */ | |
7923 | ||
7924 | if (num_mem_sets == 1) | |
2d4fde68 R |
7925 | { |
7926 | struct induction *v; | |
7927 | ||
7928 | reversible_mem_store | |
7929 | = (! unknown_address_altered | |
ef1d4aec | 7930 | && ! invariant_p (XEXP (XEXP (loop_store_mems, 0), 0))); |
2d4fde68 R |
7931 | |
7932 | /* If the store depends on a register that is set after the | |
7933 | store, it depends on the initial value, and is thus not | |
7934 | reversible. */ | |
7935 | for (v = bl->giv; reversible_mem_store && v; v = v->next_iv) | |
7936 | { | |
7937 | if (v->giv_type == DEST_REG | |
7938 | && reg_mentioned_p (v->dest_reg, | |
7939 | XEXP (loop_store_mems, 0)) | |
1cb1fe66 | 7940 | && loop_insn_first_p (first_loop_store_insn, v->insn)) |
2d4fde68 R |
7941 | reversible_mem_store = 0; |
7942 | } | |
7943 | } | |
c48ba252 R |
7944 | } |
7945 | else | |
7946 | return 0; | |
b418c26e | 7947 | |
b4ad7b23 RS |
7948 | /* This code only acts for innermost loops. Also it simplifies |
7949 | the memory address check by only reversing loops with | |
7950 | zero or one memory access. | |
7951 | Two memory accesses could involve parts of the same array, | |
c48ba252 R |
7952 | and that can't be reversed. |
7953 | If the biv is used only for counting, than we don't need to worry | |
7954 | about all these things. */ | |
7955 | ||
7956 | if ((num_nonfixed_reads <= 1 | |
3c748bb6 MH |
7957 | && ! loop_info->has_call |
7958 | && ! loop_info->has_volatile | |
c48ba252 R |
7959 | && reversible_mem_store |
7960 | && (bl->giv_count + bl->biv_count + num_mem_sets | |
7961 | + num_movables + compare_and_branch == insn_count) | |
7962 | && (bl == loop_iv_list && bl->next == 0)) | |
7963 | || no_use_except_counting) | |
b4ad7b23 | 7964 | { |
b4ad7b23 RS |
7965 | rtx tem; |
7966 | ||
7967 | /* Loop can be reversed. */ | |
7968 | if (loop_dump_stream) | |
7969 | fprintf (loop_dump_stream, "Can reverse loop\n"); | |
7970 | ||
7971 | /* Now check other conditions: | |
e9a25f70 | 7972 | |
956d6950 JL |
7973 | The increment must be a constant, as must the initial value, |
7974 | and the comparison code must be LT. | |
b4ad7b23 RS |
7975 | |
7976 | This test can probably be improved since +/- 1 in the constant | |
7977 | can be obtained by changing LT to LE and vice versa; this is | |
7978 | confusing. */ | |
7979 | ||
e9a25f70 | 7980 | if (comparison |
c48ba252 R |
7981 | /* for constants, LE gets turned into LT */ |
7982 | && (GET_CODE (comparison) == LT | |
7983 | || (GET_CODE (comparison) == LE | |
7984 | && no_use_except_counting))) | |
b4ad7b23 | 7985 | { |
3c748bb6 | 7986 | HOST_WIDE_INT add_val, add_adjust, comparison_val; |
c48ba252 R |
7987 | rtx initial_value, comparison_value; |
7988 | int nonneg = 0; | |
7989 | enum rtx_code cmp_code; | |
7990 | int comparison_const_width; | |
7991 | unsigned HOST_WIDE_INT comparison_sign_mask; | |
e9a25f70 JL |
7992 | |
7993 | add_val = INTVAL (bl->biv->add_val); | |
c48ba252 | 7994 | comparison_value = XEXP (comparison, 1); |
2c74fb2b AS |
7995 | if (GET_MODE (comparison_value) == VOIDmode) |
7996 | comparison_const_width | |
7997 | = GET_MODE_BITSIZE (GET_MODE (XEXP (comparison, 0))); | |
7998 | else | |
7999 | comparison_const_width | |
8000 | = GET_MODE_BITSIZE (GET_MODE (comparison_value)); | |
c48ba252 R |
8001 | if (comparison_const_width > HOST_BITS_PER_WIDE_INT) |
8002 | comparison_const_width = HOST_BITS_PER_WIDE_INT; | |
8003 | comparison_sign_mask | |
8004 | = (unsigned HOST_WIDE_INT)1 << (comparison_const_width - 1); | |
8005 | ||
3aa94dc8 JL |
8006 | /* If the comparison value is not a loop invariant, then we |
8007 | can not reverse this loop. | |
8008 | ||
8009 | ??? If the insns which initialize the comparison value as | |
8010 | a whole compute an invariant result, then we could move | |
8011 | them out of the loop and proceed with loop reversal. */ | |
9231189b | 8012 | if (!invariant_p (comparison_value)) |
3aa94dc8 JL |
8013 | return 0; |
8014 | ||
c48ba252 R |
8015 | if (GET_CODE (comparison_value) == CONST_INT) |
8016 | comparison_val = INTVAL (comparison_value); | |
e9a25f70 JL |
8017 | initial_value = bl->initial_value; |
8018 | ||
a8decb2c JL |
8019 | /* Normalize the initial value if it is an integer and |
8020 | has no other use except as a counter. This will allow | |
8021 | a few more loops to be reversed. */ | |
8022 | if (no_use_except_counting | |
c48ba252 | 8023 | && GET_CODE (comparison_value) == CONST_INT |
a8decb2c | 8024 | && GET_CODE (initial_value) == CONST_INT) |
e9a25f70 JL |
8025 | { |
8026 | comparison_val = comparison_val - INTVAL (bl->initial_value); | |
c48ba252 R |
8027 | /* The code below requires comparison_val to be a multiple |
8028 | of add_val in order to do the loop reversal, so | |
8029 | round up comparison_val to a multiple of add_val. | |
8030 | Since comparison_value is constant, we know that the | |
8031 | current comparison code is LT. */ | |
8032 | comparison_val = comparison_val + add_val - 1; | |
8033 | comparison_val | |
8034 | -= (unsigned HOST_WIDE_INT) comparison_val % add_val; | |
8035 | /* We postpone overflow checks for COMPARISON_VAL here; | |
8036 | even if there is an overflow, we might still be able to | |
8037 | reverse the loop, if converting the loop exit test to | |
8038 | NE is possible. */ | |
8039 | initial_value = const0_rtx; | |
e9a25f70 JL |
8040 | } |
8041 | ||
c48ba252 R |
8042 | /* First check if we can do a vanilla loop reversal. */ |
8043 | if (initial_value == const0_rtx | |
3c748bb6 MH |
8044 | /* If we have a decrement_and_branch_on_count, |
8045 | prefer the NE test, since this will allow that | |
8046 | instruction to be generated. Note that we must | |
8047 | use a vanilla loop reversal if the biv is used to | |
8048 | calculate a giv or has a non-counting use. */ | |
8049 | #if ! defined (HAVE_decrement_and_branch_until_zero) \ | |
8050 | && defined (HAVE_decrement_and_branch_on_count) | |
35704c46 | 8051 | && (! (add_val == 1 && loop_info->vtop |
c5cbf81e JL |
8052 | && (bl->biv_count == 0 |
8053 | || no_use_except_counting))) | |
c48ba252 R |
8054 | #endif |
8055 | && GET_CODE (comparison_value) == CONST_INT | |
8056 | /* Now do postponed overflow checks on COMPARISON_VAL. */ | |
8057 | && ! (((comparison_val - add_val) ^ INTVAL (comparison_value)) | |
8058 | & comparison_sign_mask)) | |
8059 | { | |
8060 | /* Register will always be nonnegative, with value | |
8061 | 0 on last iteration */ | |
8062 | add_adjust = add_val; | |
8063 | nonneg = 1; | |
8064 | cmp_code = GE; | |
8065 | } | |
35704c46 | 8066 | else if (add_val == 1 && loop_info->vtop |
c5cbf81e JL |
8067 | && (bl->biv_count == 0 |
8068 | || no_use_except_counting)) | |
c48ba252 R |
8069 | { |
8070 | add_adjust = 0; | |
8071 | cmp_code = NE; | |
8072 | } | |
8073 | else | |
8074 | return 0; | |
8075 | ||
8076 | if (GET_CODE (comparison) == LE) | |
8077 | add_adjust -= add_val; | |
8078 | ||
e9a25f70 JL |
8079 | /* If the initial value is not zero, or if the comparison |
8080 | value is not an exact multiple of the increment, then we | |
8081 | can not reverse this loop. */ | |
c48ba252 R |
8082 | if (initial_value == const0_rtx |
8083 | && GET_CODE (comparison_value) == CONST_INT) | |
8084 | { | |
8085 | if (((unsigned HOST_WIDE_INT) comparison_val % add_val) != 0) | |
8086 | return 0; | |
8087 | } | |
8088 | else | |
8089 | { | |
8090 | if (! no_use_except_counting || add_val != 1) | |
8091 | return 0; | |
8092 | } | |
e9a25f70 | 8093 | |
8ed69d09 R |
8094 | final_value = comparison_value; |
8095 | ||
e9a25f70 JL |
8096 | /* Reset these in case we normalized the initial value |
8097 | and comparison value above. */ | |
8ed69d09 R |
8098 | if (GET_CODE (comparison_value) == CONST_INT |
8099 | && GET_CODE (initial_value) == CONST_INT) | |
8100 | { | |
8101 | comparison_value = GEN_INT (comparison_val); | |
8102 | final_value | |
8103 | = GEN_INT (comparison_val + INTVAL (bl->initial_value)); | |
8104 | } | |
e9a25f70 | 8105 | bl->initial_value = initial_value; |
b4ad7b23 RS |
8106 | |
8107 | /* Save some info needed to produce the new insns. */ | |
8108 | reg = bl->biv->dest_reg; | |
8109 | jump_label = XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end))), 1); | |
3c2f289c RK |
8110 | if (jump_label == pc_rtx) |
8111 | jump_label = XEXP (SET_SRC (PATTERN (PREV_INSN (loop_end))), 2); | |
5fd8383e | 8112 | new_add_val = GEN_INT (- INTVAL (bl->biv->add_val)); |
b4ad7b23 | 8113 | |
c48ba252 R |
8114 | /* Set start_value; if this is not a CONST_INT, we need |
8115 | to generate a SUB. | |
8116 | Initialize biv to start_value before loop start. | |
b4ad7b23 RS |
8117 | The old initializing insn will be deleted as a |
8118 | dead store by flow.c. */ | |
c48ba252 R |
8119 | if (initial_value == const0_rtx |
8120 | && GET_CODE (comparison_value) == CONST_INT) | |
8121 | { | |
8122 | start_value = GEN_INT (comparison_val - add_adjust); | |
8123 | emit_insn_before (gen_move_insn (reg, start_value), | |
8124 | loop_start); | |
8125 | } | |
8126 | else if (GET_CODE (initial_value) == CONST_INT) | |
8127 | { | |
8128 | rtx offset = GEN_INT (-INTVAL (initial_value) - add_adjust); | |
8129 | enum machine_mode mode = GET_MODE (reg); | |
8130 | enum insn_code icode | |
8131 | = add_optab->handlers[(int) mode].insn_code; | |
8132 | if (! (*insn_operand_predicate[icode][0]) (reg, mode) | |
8133 | || ! ((*insn_operand_predicate[icode][1]) | |
8134 | (comparison_value, mode)) | |
8135 | || ! (*insn_operand_predicate[icode][2]) (offset, mode)) | |
8136 | return 0; | |
8137 | start_value | |
8138 | = gen_rtx_PLUS (mode, comparison_value, offset); | |
8139 | emit_insn_before ((GEN_FCN (icode) | |
8140 | (reg, comparison_value, offset)), | |
8141 | loop_start); | |
8142 | if (GET_CODE (comparison) == LE) | |
8143 | final_value = gen_rtx_PLUS (mode, comparison_value, | |
8144 | GEN_INT (add_val)); | |
8145 | } | |
8146 | else if (! add_adjust) | |
8147 | { | |
8148 | enum machine_mode mode = GET_MODE (reg); | |
8149 | enum insn_code icode | |
8150 | = sub_optab->handlers[(int) mode].insn_code; | |
8151 | if (! (*insn_operand_predicate[icode][0]) (reg, mode) | |
8152 | || ! ((*insn_operand_predicate[icode][1]) | |
8153 | (comparison_value, mode)) | |
8154 | || ! ((*insn_operand_predicate[icode][2]) | |
8155 | (initial_value, mode))) | |
8156 | return 0; | |
8157 | start_value | |
8158 | = gen_rtx_MINUS (mode, comparison_value, initial_value); | |
8159 | emit_insn_before ((GEN_FCN (icode) | |
8160 | (reg, comparison_value, initial_value)), | |
8161 | loop_start); | |
8162 | } | |
8163 | else | |
8164 | /* We could handle the other cases too, but it'll be | |
8165 | better to have a testcase first. */ | |
8166 | return 0; | |
b4ad7b23 | 8167 | |
225a7e3d JL |
8168 | /* We may not have a single insn which can increment a reg, so |
8169 | create a sequence to hold all the insns from expand_inc. */ | |
8170 | start_sequence (); | |
8171 | expand_inc (reg, new_add_val); | |
8172 | tem = gen_sequence (); | |
8173 | end_sequence (); | |
8174 | ||
8175 | p = emit_insn_before (tem, bl->biv->insn); | |
b4ad7b23 RS |
8176 | delete_insn (bl->biv->insn); |
8177 | ||
8178 | /* Update biv info to reflect its new status. */ | |
8179 | bl->biv->insn = p; | |
8180 | bl->initial_value = start_value; | |
8181 | bl->biv->add_val = new_add_val; | |
8182 | ||
5629b16c | 8183 | /* Update loop info. */ |
eb6a3bc0 MH |
8184 | loop_info->initial_value = reg; |
8185 | loop_info->initial_equiv_value = reg; | |
5629b16c MH |
8186 | loop_info->final_value = const0_rtx; |
8187 | loop_info->final_equiv_value = const0_rtx; | |
8188 | loop_info->comparison_value = const0_rtx; | |
8189 | loop_info->comparison_code = cmp_code; | |
8190 | loop_info->increment = new_add_val; | |
8191 | ||
b4ad7b23 RS |
8192 | /* Inc LABEL_NUSES so that delete_insn will |
8193 | not delete the label. */ | |
8194 | LABEL_NUSES (XEXP (jump_label, 0)) ++; | |
8195 | ||
8196 | /* Emit an insn after the end of the loop to set the biv's | |
8197 | proper exit value if it is used anywhere outside the loop. */ | |
0628fde6 | 8198 | if ((REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare)) |
b4ad7b23 | 8199 | || ! bl->init_insn |
b1f21e0a | 8200 | || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn)) |
b4ad7b23 RS |
8201 | emit_insn_after (gen_move_insn (reg, final_value), |
8202 | loop_end); | |
8203 | ||
8204 | /* Delete compare/branch at end of loop. */ | |
8205 | delete_insn (PREV_INSN (loop_end)); | |
0628fde6 JW |
8206 | if (compare_and_branch == 2) |
8207 | delete_insn (first_compare); | |
b4ad7b23 RS |
8208 | |
8209 | /* Add new compare/branch insn at end of loop. */ | |
8210 | start_sequence (); | |
362cc3d4 MH |
8211 | emit_cmp_and_jump_insns (reg, const0_rtx, cmp_code, NULL_RTX, |
8212 | GET_MODE (reg), 0, 0, | |
8213 | XEXP (jump_label, 0)); | |
b4ad7b23 RS |
8214 | tem = gen_sequence (); |
8215 | end_sequence (); | |
8216 | emit_jump_insn_before (tem, loop_end); | |
8217 | ||
a7060368 MH |
8218 | for (tem = PREV_INSN (loop_end); |
8219 | tem && GET_CODE (tem) != JUMP_INSN; | |
8220 | tem = PREV_INSN (tem)) | |
8221 | ; | |
8222 | ||
8223 | if (tem) | |
8224 | JUMP_LABEL (tem) = XEXP (jump_label, 0); | |
8225 | ||
c48ba252 | 8226 | if (nonneg) |
b4ad7b23 | 8227 | { |
c48ba252 R |
8228 | if (tem) |
8229 | { | |
c48ba252 R |
8230 | /* Increment of LABEL_NUSES done above. */ |
8231 | /* Register is now always nonnegative, | |
8232 | so add REG_NONNEG note to the branch. */ | |
8233 | REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX, | |
8234 | REG_NOTES (tem)); | |
8235 | } | |
8236 | bl->nonneg = 1; | |
b4ad7b23 RS |
8237 | } |
8238 | ||
22b452e7 BS |
8239 | /* No insn may reference both the reversed and another biv or it |
8240 | will fail (see comment near the top of the loop reversal | |
8241 | code). | |
8242 | Earlier on, we have verified that the biv has no use except | |
8243 | counting, or it is the only biv in this function. | |
8244 | However, the code that computes no_use_except_counting does | |
8245 | not verify reg notes. It's possible to have an insn that | |
8246 | references another biv, and has a REG_EQUAL note with an | |
8247 | expression based on the reversed biv. To avoid this case, | |
8248 | remove all REG_EQUAL notes based on the reversed biv | |
8249 | here. */ | |
8250 | for (p = loop_start; p != loop_end; p = NEXT_INSN (p)) | |
8251 | if (GET_RTX_CLASS (GET_CODE (p)) == 'i') | |
8252 | { | |
8253 | rtx *pnote; | |
8254 | rtx set = single_set (p); | |
8255 | /* If this is a set of a GIV based on the reversed biv, any | |
8256 | REG_EQUAL notes should still be correct. */ | |
8257 | if (! set | |
8258 | || GET_CODE (SET_DEST (set)) != REG | |
6a651371 | 8259 | || (size_t) REGNO (SET_DEST (set)) >= reg_iv_type->num_elements |
22b452e7 BS |
8260 | || REG_IV_TYPE (REGNO (SET_DEST (set))) != GENERAL_INDUCT |
8261 | || REG_IV_INFO (REGNO (SET_DEST (set)))->src_reg != bl->biv->src_reg) | |
8262 | for (pnote = ®_NOTES (p); *pnote;) | |
8263 | { | |
8264 | if (REG_NOTE_KIND (*pnote) == REG_EQUAL | |
8265 | && reg_mentioned_p (regno_reg_rtx[bl->regno], | |
8266 | XEXP (*pnote, 0))) | |
8267 | *pnote = XEXP (*pnote, 1); | |
8268 | else | |
8269 | pnote = &XEXP (*pnote, 1); | |
8270 | } | |
8271 | } | |
8272 | ||
b4ad7b23 RS |
8273 | /* Mark that this biv has been reversed. Each giv which depends |
8274 | on this biv, and which is also live past the end of the loop | |
8275 | will have to be fixed up. */ | |
8276 | ||
8277 | bl->reversed = 1; | |
8278 | ||
8279 | if (loop_dump_stream) | |
b50cb11f MH |
8280 | { |
8281 | fprintf (loop_dump_stream, "Reversed loop"); | |
8282 | if (bl->nonneg) | |
8283 | fprintf (loop_dump_stream, " and added reg_nonneg\n"); | |
8284 | else | |
8285 | fprintf (loop_dump_stream, "\n"); | |
8286 | } | |
b4ad7b23 RS |
8287 | |
8288 | return 1; | |
8289 | } | |
8290 | } | |
8291 | } | |
8292 | ||
8293 | return 0; | |
8294 | } | |
8295 | \f | |
8296 | /* Verify whether the biv BL appears to be eliminable, | |
8297 | based on the insns in the loop that refer to it. | |
8298 | LOOP_START is the first insn of the loop, and END is the end insn. | |
8299 | ||
8300 | If ELIMINATE_P is non-zero, actually do the elimination. | |
8301 | ||
8302 | THRESHOLD and INSN_COUNT are from loop_optimize and are used to | |
8303 | determine whether invariant insns should be placed inside or at the | |
8304 | start of the loop. */ | |
8305 | ||
8306 | static int | |
8307 | maybe_eliminate_biv (bl, loop_start, end, eliminate_p, threshold, insn_count) | |
8308 | struct iv_class *bl; | |
8309 | rtx loop_start; | |
8310 | rtx end; | |
8311 | int eliminate_p; | |
8312 | int threshold, insn_count; | |
8313 | { | |
8314 | rtx reg = bl->biv->dest_reg; | |
bd5a664e | 8315 | rtx p; |
b4ad7b23 RS |
8316 | |
8317 | /* Scan all insns in the loop, stopping if we find one that uses the | |
8318 | biv in a way that we cannot eliminate. */ | |
8319 | ||
8320 | for (p = loop_start; p != end; p = NEXT_INSN (p)) | |
8321 | { | |
8322 | enum rtx_code code = GET_CODE (p); | |
8323 | rtx where = threshold >= insn_count ? loop_start : p; | |
8324 | ||
fdb1833a R |
8325 | /* If this is a libcall that sets a giv, skip ahead to its end. */ |
8326 | if (GET_RTX_CLASS (code) == 'i') | |
8327 | { | |
8328 | rtx note = find_reg_note (p, REG_LIBCALL, NULL_RTX); | |
8329 | ||
8330 | if (note) | |
8331 | { | |
8332 | rtx last = XEXP (note, 0); | |
8333 | rtx set = single_set (last); | |
8334 | ||
8335 | if (set && GET_CODE (SET_DEST (set)) == REG) | |
8336 | { | |
8337 | int regno = REGNO (SET_DEST (set)); | |
8338 | ||
ab519383 GS |
8339 | if (regno < max_reg_before_loop |
8340 | && REG_IV_TYPE (regno) == GENERAL_INDUCT | |
fdb1833a R |
8341 | && REG_IV_INFO (regno)->src_reg == bl->biv->src_reg) |
8342 | p = last; | |
8343 | } | |
8344 | } | |
8345 | } | |
b4ad7b23 RS |
8346 | if ((code == INSN || code == JUMP_INSN || code == CALL_INSN) |
8347 | && reg_mentioned_p (reg, PATTERN (p)) | |
8348 | && ! maybe_eliminate_biv_1 (PATTERN (p), p, bl, eliminate_p, where)) | |
8349 | { | |
8350 | if (loop_dump_stream) | |
8351 | fprintf (loop_dump_stream, | |
8352 | "Cannot eliminate biv %d: biv used in insn %d.\n", | |
8353 | bl->regno, INSN_UID (p)); | |
8354 | break; | |
8355 | } | |
8356 | } | |
8357 | ||
8358 | if (p == end) | |
8359 | { | |
8360 | if (loop_dump_stream) | |
8361 | fprintf (loop_dump_stream, "biv %d %s eliminated.\n", | |
8362 | bl->regno, eliminate_p ? "was" : "can be"); | |
8363 | return 1; | |
8364 | } | |
8365 | ||
8366 | return 0; | |
8367 | } | |
8368 | \f | |
a6207a2b | 8369 | /* INSN and REFERENCE are instructions in the same insn chain. |
f38cbf0f | 8370 | Return non-zero if INSN is first. */ |
a6207a2b | 8371 | |
c99f8c2a | 8372 | int |
a6207a2b R |
8373 | loop_insn_first_p (insn, reference) |
8374 | rtx insn, reference; | |
8375 | { | |
f38cbf0f R |
8376 | rtx p, q; |
8377 | ||
8378 | for (p = insn, q = reference; ;) | |
8379 | { | |
8380 | /* Start with test for not first so that INSN == REFERENCE yields not | |
8381 | first. */ | |
8382 | if (q == insn || ! p) | |
8383 | return 0; | |
8384 | if (p == reference || ! q) | |
8385 | return 1; | |
8386 | ||
7c2772f1 R |
8387 | /* Either of P or Q might be a NOTE. Notes have the same LUID as the |
8388 | previous insn, hence the <= comparison below does not work if | |
8389 | P is a note. */ | |
f38cbf0f | 8390 | if (INSN_UID (p) < max_uid_for_loop |
7c2772f1 R |
8391 | && INSN_UID (q) < max_uid_for_loop |
8392 | && GET_CODE (p) != NOTE) | |
8393 | return INSN_LUID (p) <= INSN_LUID (q); | |
f38cbf0f | 8394 | |
7c2772f1 R |
8395 | if (INSN_UID (p) >= max_uid_for_loop |
8396 | || GET_CODE (p) == NOTE) | |
f38cbf0f R |
8397 | p = NEXT_INSN (p); |
8398 | if (INSN_UID (q) >= max_uid_for_loop) | |
8399 | q = NEXT_INSN (q); | |
8400 | } | |
a6207a2b R |
8401 | } |
8402 | ||
8403 | /* We are trying to eliminate BIV in INSN using GIV. Return non-zero if | |
8404 | the offset that we have to take into account due to auto-increment / | |
8405 | div derivation is zero. */ | |
8406 | static int | |
8407 | biv_elimination_giv_has_0_offset (biv, giv, insn) | |
8408 | struct induction *biv, *giv; | |
8409 | rtx insn; | |
8410 | { | |
8411 | /* If the giv V had the auto-inc address optimization applied | |
8412 | to it, and INSN occurs between the giv insn and the biv | |
8413 | insn, then we'd have to adjust the value used here. | |
8414 | This is rare, so we don't bother to make this possible. */ | |
8415 | if (giv->auto_inc_opt | |
8416 | && ((loop_insn_first_p (giv->insn, insn) | |
8417 | && loop_insn_first_p (insn, biv->insn)) | |
8418 | || (loop_insn_first_p (biv->insn, insn) | |
8419 | && loop_insn_first_p (insn, giv->insn)))) | |
8420 | return 0; | |
8421 | ||
8422 | /* If the giv V was derived from another giv, and INSN does | |
8423 | not occur between the giv insn and the biv insn, then we'd | |
8424 | have to adjust the value used here. This is rare, so we don't | |
8425 | bother to make this possible. */ | |
8426 | if (giv->derived_from | |
8427 | && ! (giv->always_executed | |
8428 | && loop_insn_first_p (giv->insn, insn) | |
8429 | && loop_insn_first_p (insn, biv->insn))) | |
8430 | return 0; | |
8431 | if (giv->same | |
8432 | && giv->same->derived_from | |
8433 | && ! (giv->same->always_executed | |
8434 | && loop_insn_first_p (giv->same->insn, insn) | |
8435 | && loop_insn_first_p (insn, biv->insn))) | |
8436 | return 0; | |
8437 | ||
8438 | return 1; | |
8439 | } | |
8440 | ||
b4ad7b23 RS |
8441 | /* If BL appears in X (part of the pattern of INSN), see if we can |
8442 | eliminate its use. If so, return 1. If not, return 0. | |
8443 | ||
8444 | If BIV does not appear in X, return 1. | |
8445 | ||
8446 | If ELIMINATE_P is non-zero, actually do the elimination. WHERE indicates | |
8447 | where extra insns should be added. Depending on how many items have been | |
8448 | moved out of the loop, it will either be before INSN or at the start of | |
8449 | the loop. */ | |
8450 | ||
8451 | static int | |
8452 | maybe_eliminate_biv_1 (x, insn, bl, eliminate_p, where) | |
8453 | rtx x, insn; | |
8454 | struct iv_class *bl; | |
8455 | int eliminate_p; | |
8456 | rtx where; | |
8457 | { | |
8458 | enum rtx_code code = GET_CODE (x); | |
8459 | rtx reg = bl->biv->dest_reg; | |
8460 | enum machine_mode mode = GET_MODE (reg); | |
8461 | struct induction *v; | |
51723711 KG |
8462 | rtx arg, tem; |
8463 | #ifdef HAVE_cc0 | |
8464 | rtx new; | |
8465 | #endif | |
b4ad7b23 | 8466 | int arg_operand; |
6f7d635c | 8467 | const char *fmt; |
b4ad7b23 RS |
8468 | int i, j; |
8469 | ||
8470 | switch (code) | |
8471 | { | |
8472 | case REG: | |
8473 | /* If we haven't already been able to do something with this BIV, | |
8474 | we can't eliminate it. */ | |
8475 | if (x == reg) | |
8476 | return 0; | |
8477 | return 1; | |
8478 | ||
8479 | case SET: | |
8480 | /* If this sets the BIV, it is not a problem. */ | |
8481 | if (SET_DEST (x) == reg) | |
8482 | return 1; | |
8483 | ||
8484 | /* If this is an insn that defines a giv, it is also ok because | |
8485 | it will go away when the giv is reduced. */ | |
8486 | for (v = bl->giv; v; v = v->next_iv) | |
8487 | if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg) | |
8488 | return 1; | |
8489 | ||
8490 | #ifdef HAVE_cc0 | |
8491 | if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg) | |
8492 | { | |
8493 | /* Can replace with any giv that was reduced and | |
8494 | that has (MULT_VAL != 0) and (ADD_VAL == 0). | |
fbdc6da8 RK |
8495 | Require a constant for MULT_VAL, so we know it's nonzero. |
8496 | ??? We disable this optimization to avoid potential | |
8497 | overflows. */ | |
b4ad7b23 RS |
8498 | |
8499 | for (v = bl->giv; v; v = v->next_iv) | |
8500 | if (CONSTANT_P (v->mult_val) && v->mult_val != const0_rtx | |
8501 | && v->add_val == const0_rtx | |
453331a3 | 8502 | && ! v->ignore && ! v->maybe_dead && v->always_computable |
fbdc6da8 RK |
8503 | && v->mode == mode |
8504 | && 0) | |
b4ad7b23 | 8505 | { |
a6207a2b | 8506 | if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn)) |
8516af93 JW |
8507 | continue; |
8508 | ||
b4ad7b23 RS |
8509 | if (! eliminate_p) |
8510 | return 1; | |
8511 | ||
8512 | /* If the giv has the opposite direction of change, | |
8513 | then reverse the comparison. */ | |
8514 | if (INTVAL (v->mult_val) < 0) | |
38a448ca RH |
8515 | new = gen_rtx_COMPARE (GET_MODE (v->new_reg), |
8516 | const0_rtx, v->new_reg); | |
b4ad7b23 RS |
8517 | else |
8518 | new = v->new_reg; | |
8519 | ||
8520 | /* We can probably test that giv's reduced reg. */ | |
8521 | if (validate_change (insn, &SET_SRC (x), new, 0)) | |
8522 | return 1; | |
8523 | } | |
8524 | ||
8525 | /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0); | |
8526 | replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL). | |
fbdc6da8 RK |
8527 | Require a constant for MULT_VAL, so we know it's nonzero. |
8528 | ??? Do this only if ADD_VAL is a pointer to avoid a potential | |
8529 | overflow problem. */ | |
b4ad7b23 RS |
8530 | |
8531 | for (v = bl->giv; v; v = v->next_iv) | |
8532 | if (CONSTANT_P (v->mult_val) && v->mult_val != const0_rtx | |
453331a3 | 8533 | && ! v->ignore && ! v->maybe_dead && v->always_computable |
fbdc6da8 RK |
8534 | && v->mode == mode |
8535 | && (GET_CODE (v->add_val) == SYMBOL_REF | |
8536 | || GET_CODE (v->add_val) == LABEL_REF | |
8537 | || GET_CODE (v->add_val) == CONST | |
8538 | || (GET_CODE (v->add_val) == REG | |
8539 | && REGNO_POINTER_FLAG (REGNO (v->add_val))))) | |
b4ad7b23 | 8540 | { |
a6207a2b | 8541 | if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn)) |
8516af93 JW |
8542 | continue; |
8543 | ||
b4ad7b23 RS |
8544 | if (! eliminate_p) |
8545 | return 1; | |
8546 | ||
8547 | /* If the giv has the opposite direction of change, | |
8548 | then reverse the comparison. */ | |
8549 | if (INTVAL (v->mult_val) < 0) | |
38a448ca RH |
8550 | new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val), |
8551 | v->new_reg); | |
b4ad7b23 | 8552 | else |
38a448ca RH |
8553 | new = gen_rtx_COMPARE (VOIDmode, v->new_reg, |
8554 | copy_rtx (v->add_val)); | |
b4ad7b23 RS |
8555 | |
8556 | /* Replace biv with the giv's reduced register. */ | |
8557 | update_reg_last_use (v->add_val, insn); | |
8558 | if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0)) | |
8559 | return 1; | |
8560 | ||
8561 | /* Insn doesn't support that constant or invariant. Copy it | |
8562 | into a register (it will be a loop invariant.) */ | |
8563 | tem = gen_reg_rtx (GET_MODE (v->new_reg)); | |
8564 | ||
8565 | emit_insn_before (gen_move_insn (tem, copy_rtx (v->add_val)), | |
8566 | where); | |
8567 | ||
2ae3dcac RK |
8568 | /* Substitute the new register for its invariant value in |
8569 | the compare expression. */ | |
8570 | XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem; | |
8571 | if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0)) | |
b4ad7b23 RS |
8572 | return 1; |
8573 | } | |
8574 | } | |
8575 | #endif | |
8576 | break; | |
8577 | ||
8578 | case COMPARE: | |
8579 | case EQ: case NE: | |
8580 | case GT: case GE: case GTU: case GEU: | |
8581 | case LT: case LE: case LTU: case LEU: | |
8582 | /* See if either argument is the biv. */ | |
8583 | if (XEXP (x, 0) == reg) | |
8584 | arg = XEXP (x, 1), arg_operand = 1; | |
8585 | else if (XEXP (x, 1) == reg) | |
8586 | arg = XEXP (x, 0), arg_operand = 0; | |
8587 | else | |
8588 | break; | |
8589 | ||
8590 | if (CONSTANT_P (arg)) | |
8591 | { | |
8592 | /* First try to replace with any giv that has constant positive | |
8593 | mult_val and constant add_val. We might be able to support | |
8594 | negative mult_val, but it seems complex to do it in general. */ | |
8595 | ||
8596 | for (v = bl->giv; v; v = v->next_iv) | |
8597 | if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0 | |
fbdc6da8 RK |
8598 | && (GET_CODE (v->add_val) == SYMBOL_REF |
8599 | || GET_CODE (v->add_val) == LABEL_REF | |
8600 | || GET_CODE (v->add_val) == CONST | |
8601 | || (GET_CODE (v->add_val) == REG | |
8602 | && REGNO_POINTER_FLAG (REGNO (v->add_val)))) | |
453331a3 | 8603 | && ! v->ignore && ! v->maybe_dead && v->always_computable |
b4ad7b23 RS |
8604 | && v->mode == mode) |
8605 | { | |
a6207a2b | 8606 | if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn)) |
8516af93 JW |
8607 | continue; |
8608 | ||
b4ad7b23 RS |
8609 | if (! eliminate_p) |
8610 | return 1; | |
8611 | ||
8612 | /* Replace biv with the giv's reduced reg. */ | |
8613 | XEXP (x, 1-arg_operand) = v->new_reg; | |
8614 | ||
8615 | /* If all constants are actually constant integers and | |
8616 | the derived constant can be directly placed in the COMPARE, | |
8617 | do so. */ | |
8618 | if (GET_CODE (arg) == CONST_INT | |
8619 | && GET_CODE (v->mult_val) == CONST_INT | |
8620 | && GET_CODE (v->add_val) == CONST_INT | |
8621 | && validate_change (insn, &XEXP (x, arg_operand), | |
5fd8383e RK |
8622 | GEN_INT (INTVAL (arg) |
8623 | * INTVAL (v->mult_val) | |
8624 | + INTVAL (v->add_val)), 0)) | |
b4ad7b23 RS |
8625 | return 1; |
8626 | ||
8627 | /* Otherwise, load it into a register. */ | |
8628 | tem = gen_reg_rtx (mode); | |
8629 | emit_iv_add_mult (arg, v->mult_val, v->add_val, tem, where); | |
8630 | if (validate_change (insn, &XEXP (x, arg_operand), tem, 0)) | |
8631 | return 1; | |
8632 | ||
8633 | /* If that failed, put back the change we made above. */ | |
8634 | XEXP (x, 1-arg_operand) = reg; | |
8635 | } | |
8636 | ||
8637 | /* Look for giv with positive constant mult_val and nonconst add_val. | |
fbdc6da8 RK |
8638 | Insert insns to calculate new compare value. |
8639 | ??? Turn this off due to possible overflow. */ | |
b4ad7b23 RS |
8640 | |
8641 | for (v = bl->giv; v; v = v->next_iv) | |
d45cf215 | 8642 | if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0 |
453331a3 | 8643 | && ! v->ignore && ! v->maybe_dead && v->always_computable |
fbdc6da8 RK |
8644 | && v->mode == mode |
8645 | && 0) | |
b4ad7b23 RS |
8646 | { |
8647 | rtx tem; | |
8648 | ||
a6207a2b | 8649 | if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn)) |
8516af93 JW |
8650 | continue; |
8651 | ||
b4ad7b23 RS |
8652 | if (! eliminate_p) |
8653 | return 1; | |
8654 | ||
8655 | tem = gen_reg_rtx (mode); | |
8656 | ||
8657 | /* Replace biv with giv's reduced register. */ | |
8658 | validate_change (insn, &XEXP (x, 1 - arg_operand), | |
8659 | v->new_reg, 1); | |
8660 | ||
8661 | /* Compute value to compare against. */ | |
8662 | emit_iv_add_mult (arg, v->mult_val, v->add_val, tem, where); | |
8663 | /* Use it in this insn. */ | |
8664 | validate_change (insn, &XEXP (x, arg_operand), tem, 1); | |
8665 | if (apply_change_group ()) | |
8666 | return 1; | |
8667 | } | |
8668 | } | |
8669 | else if (GET_CODE (arg) == REG || GET_CODE (arg) == MEM) | |
8670 | { | |
8671 | if (invariant_p (arg) == 1) | |
8672 | { | |
8673 | /* Look for giv with constant positive mult_val and nonconst | |
fbdc6da8 RK |
8674 | add_val. Insert insns to compute new compare value. |
8675 | ??? Turn this off due to possible overflow. */ | |
b4ad7b23 RS |
8676 | |
8677 | for (v = bl->giv; v; v = v->next_iv) | |
8678 | if (CONSTANT_P (v->mult_val) && INTVAL (v->mult_val) > 0 | |
453331a3 | 8679 | && ! v->ignore && ! v->maybe_dead && v->always_computable |
fbdc6da8 RK |
8680 | && v->mode == mode |
8681 | && 0) | |
b4ad7b23 RS |
8682 | { |
8683 | rtx tem; | |
8684 | ||
a6207a2b | 8685 | if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn)) |
8516af93 JW |
8686 | continue; |
8687 | ||
b4ad7b23 RS |
8688 | if (! eliminate_p) |
8689 | return 1; | |
8690 | ||
8691 | tem = gen_reg_rtx (mode); | |
8692 | ||
8693 | /* Replace biv with giv's reduced register. */ | |
8694 | validate_change (insn, &XEXP (x, 1 - arg_operand), | |
8695 | v->new_reg, 1); | |
8696 | ||
8697 | /* Compute value to compare against. */ | |
8698 | emit_iv_add_mult (arg, v->mult_val, v->add_val, | |
8699 | tem, where); | |
8700 | validate_change (insn, &XEXP (x, arg_operand), tem, 1); | |
8701 | if (apply_change_group ()) | |
8702 | return 1; | |
8703 | } | |
8704 | } | |
8705 | ||
8706 | /* This code has problems. Basically, you can't know when | |
8707 | seeing if we will eliminate BL, whether a particular giv | |
8708 | of ARG will be reduced. If it isn't going to be reduced, | |
8709 | we can't eliminate BL. We can try forcing it to be reduced, | |
8710 | but that can generate poor code. | |
8711 | ||
8712 | The problem is that the benefit of reducing TV, below should | |
8713 | be increased if BL can actually be eliminated, but this means | |
8714 | we might have to do a topological sort of the order in which | |
8715 | we try to process biv. It doesn't seem worthwhile to do | |
8716 | this sort of thing now. */ | |
8717 | ||
8718 | #if 0 | |
8719 | /* Otherwise the reg compared with had better be a biv. */ | |
8720 | if (GET_CODE (arg) != REG | |
3ec2b590 | 8721 | || REG_IV_TYPE (REGNO (arg)) != BASIC_INDUCT) |
b4ad7b23 RS |
8722 | return 0; |
8723 | ||
8724 | /* Look for a pair of givs, one for each biv, | |
8725 | with identical coefficients. */ | |
8726 | for (v = bl->giv; v; v = v->next_iv) | |
8727 | { | |
8728 | struct induction *tv; | |
8729 | ||
8730 | if (v->ignore || v->maybe_dead || v->mode != mode) | |
8731 | continue; | |
8732 | ||
8733 | for (tv = reg_biv_class[REGNO (arg)]->giv; tv; tv = tv->next_iv) | |
8734 | if (! tv->ignore && ! tv->maybe_dead | |
8735 | && rtx_equal_p (tv->mult_val, v->mult_val) | |
8736 | && rtx_equal_p (tv->add_val, v->add_val) | |
8737 | && tv->mode == mode) | |
8738 | { | |
a6207a2b | 8739 | if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn)) |
8516af93 JW |
8740 | continue; |
8741 | ||
b4ad7b23 RS |
8742 | if (! eliminate_p) |
8743 | return 1; | |
8744 | ||
8745 | /* Replace biv with its giv's reduced reg. */ | |
8746 | XEXP (x, 1-arg_operand) = v->new_reg; | |
8747 | /* Replace other operand with the other giv's | |
8748 | reduced reg. */ | |
8749 | XEXP (x, arg_operand) = tv->new_reg; | |
8750 | return 1; | |
8751 | } | |
8752 | } | |
8753 | #endif | |
8754 | } | |
8755 | ||
8756 | /* If we get here, the biv can't be eliminated. */ | |
8757 | return 0; | |
8758 | ||
8759 | case MEM: | |
8760 | /* If this address is a DEST_ADDR giv, it doesn't matter if the | |
8761 | biv is used in it, since it will be replaced. */ | |
8762 | for (v = bl->giv; v; v = v->next_iv) | |
8763 | if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0)) | |
8764 | return 1; | |
8765 | break; | |
e9a25f70 JL |
8766 | |
8767 | default: | |
8768 | break; | |
b4ad7b23 RS |
8769 | } |
8770 | ||
8771 | /* See if any subexpression fails elimination. */ | |
8772 | fmt = GET_RTX_FORMAT (code); | |
8773 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) | |
8774 | { | |
8775 | switch (fmt[i]) | |
8776 | { | |
8777 | case 'e': | |
8778 | if (! maybe_eliminate_biv_1 (XEXP (x, i), insn, bl, | |
8779 | eliminate_p, where)) | |
8780 | return 0; | |
8781 | break; | |
8782 | ||
8783 | case 'E': | |
8784 | for (j = XVECLEN (x, i) - 1; j >= 0; j--) | |
8785 | if (! maybe_eliminate_biv_1 (XVECEXP (x, i, j), insn, bl, | |
8786 | eliminate_p, where)) | |
8787 | return 0; | |
8788 | break; | |
8789 | } | |
8790 | } | |
8791 | ||
8792 | return 1; | |
8793 | } | |
8794 | \f | |
8795 | /* Return nonzero if the last use of REG | |
8796 | is in an insn following INSN in the same basic block. */ | |
8797 | ||
8798 | static int | |
8799 | last_use_this_basic_block (reg, insn) | |
8800 | rtx reg; | |
8801 | rtx insn; | |
8802 | { | |
8803 | rtx n; | |
8804 | for (n = insn; | |
8805 | n && GET_CODE (n) != CODE_LABEL && GET_CODE (n) != JUMP_INSN; | |
8806 | n = NEXT_INSN (n)) | |
8807 | { | |
b1f21e0a | 8808 | if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n)) |
b4ad7b23 RS |
8809 | return 1; |
8810 | } | |
8811 | return 0; | |
8812 | } | |
8813 | \f | |
8814 | /* Called via `note_stores' to record the initial value of a biv. Here we | |
8815 | just record the location of the set and process it later. */ | |
8816 | ||
8817 | static void | |
8818 | record_initial (dest, set) | |
8819 | rtx dest; | |
8820 | rtx set; | |
8821 | { | |
8822 | struct iv_class *bl; | |
8823 | ||
8824 | if (GET_CODE (dest) != REG | |
8825 | || REGNO (dest) >= max_reg_before_loop | |
3ec2b590 | 8826 | || REG_IV_TYPE (REGNO (dest)) != BASIC_INDUCT) |
b4ad7b23 RS |
8827 | return; |
8828 | ||
8829 | bl = reg_biv_class[REGNO (dest)]; | |
8830 | ||
8831 | /* If this is the first set found, record it. */ | |
8832 | if (bl->init_insn == 0) | |
8833 | { | |
8834 | bl->init_insn = note_insn; | |
8835 | bl->init_set = set; | |
8836 | } | |
8837 | } | |
8838 | \f | |
8839 | /* If any of the registers in X are "old" and currently have a last use earlier | |
8840 | than INSN, update them to have a last use of INSN. Their actual last use | |
8841 | will be the previous insn but it will not have a valid uid_luid so we can't | |
8842 | use it. */ | |
8843 | ||
8844 | static void | |
8845 | update_reg_last_use (x, insn) | |
8846 | rtx x; | |
8847 | rtx insn; | |
8848 | { | |
8849 | /* Check for the case where INSN does not have a valid luid. In this case, | |
8850 | there is no need to modify the regno_last_uid, as this can only happen | |
8851 | when code is inserted after the loop_end to set a pseudo's final value, | |
8852 | and hence this insn will never be the last use of x. */ | |
8853 | if (GET_CODE (x) == REG && REGNO (x) < max_reg_before_loop | |
8854 | && INSN_UID (insn) < max_uid_for_loop | |
b1f21e0a MM |
8855 | && uid_luid[REGNO_LAST_UID (REGNO (x))] < uid_luid[INSN_UID (insn)]) |
8856 | REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn); | |
b4ad7b23 RS |
8857 | else |
8858 | { | |
8859 | register int i, j; | |
6f7d635c | 8860 | register const char *fmt = GET_RTX_FORMAT (GET_CODE (x)); |
b4ad7b23 RS |
8861 | for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--) |
8862 | { | |
8863 | if (fmt[i] == 'e') | |
8864 | update_reg_last_use (XEXP (x, i), insn); | |
8865 | else if (fmt[i] == 'E') | |
8866 | for (j = XVECLEN (x, i) - 1; j >= 0; j--) | |
8867 | update_reg_last_use (XVECEXP (x, i, j), insn); | |
8868 | } | |
8869 | } | |
8870 | } | |
8871 | \f | |
8872 | /* Given a jump insn JUMP, return the condition that will cause it to branch | |
8873 | to its JUMP_LABEL. If the condition cannot be understood, or is an | |
8874 | inequality floating-point comparison which needs to be reversed, 0 will | |
8875 | be returned. | |
8876 | ||
8877 | If EARLIEST is non-zero, it is a pointer to a place where the earliest | |
8878 | insn used in locating the condition was found. If a replacement test | |
8879 | of the condition is desired, it should be placed in front of that | |
8880 | insn and we will be sure that the inputs are still valid. | |
8881 | ||
8882 | The condition will be returned in a canonical form to simplify testing by | |
8883 | callers. Specifically: | |
8884 | ||
8885 | (1) The code will always be a comparison operation (EQ, NE, GT, etc.). | |
8886 | (2) Both operands will be machine operands; (cc0) will have been replaced. | |
8887 | (3) If an operand is a constant, it will be the second operand. | |
8888 | (4) (LE x const) will be replaced with (LT x <const+1>) and similarly | |
8889 | for GE, GEU, and LEU. */ | |
8890 | ||
8891 | rtx | |
8892 | get_condition (jump, earliest) | |
8893 | rtx jump; | |
8894 | rtx *earliest; | |
8895 | { | |
8896 | enum rtx_code code; | |
8897 | rtx prev = jump; | |
8898 | rtx set; | |
8899 | rtx tem; | |
8900 | rtx op0, op1; | |
8901 | int reverse_code = 0; | |
8902 | int did_reverse_condition = 0; | |
f283421d | 8903 | enum machine_mode mode; |
b4ad7b23 RS |
8904 | |
8905 | /* If this is not a standard conditional jump, we can't parse it. */ | |
8906 | if (GET_CODE (jump) != JUMP_INSN | |
8907 | || ! condjump_p (jump) || simplejump_p (jump)) | |
8908 | return 0; | |
8909 | ||
8910 | code = GET_CODE (XEXP (SET_SRC (PATTERN (jump)), 0)); | |
f283421d | 8911 | mode = GET_MODE (XEXP (SET_SRC (PATTERN (jump)), 0)); |
b4ad7b23 RS |
8912 | op0 = XEXP (XEXP (SET_SRC (PATTERN (jump)), 0), 0); |
8913 | op1 = XEXP (XEXP (SET_SRC (PATTERN (jump)), 0), 1); | |
8914 | ||
8915 | if (earliest) | |
8916 | *earliest = jump; | |
8917 | ||
8918 | /* If this branches to JUMP_LABEL when the condition is false, reverse | |
8919 | the condition. */ | |
b5d27be7 RS |
8920 | if (GET_CODE (XEXP (SET_SRC (PATTERN (jump)), 2)) == LABEL_REF |
8921 | && XEXP (XEXP (SET_SRC (PATTERN (jump)), 2), 0) == JUMP_LABEL (jump)) | |
b4ad7b23 RS |
8922 | code = reverse_condition (code), did_reverse_condition ^= 1; |
8923 | ||
8924 | /* If we are comparing a register with zero, see if the register is set | |
8925 | in the previous insn to a COMPARE or a comparison operation. Perform | |
8926 | the same tests as a function of STORE_FLAG_VALUE as find_comparison_args | |
8927 | in cse.c */ | |
8928 | ||
a18b5d98 | 8929 | while (GET_RTX_CLASS (code) == '<' && op1 == CONST0_RTX (GET_MODE (op0))) |
b4ad7b23 RS |
8930 | { |
8931 | /* Set non-zero when we find something of interest. */ | |
8932 | rtx x = 0; | |
8933 | ||
8934 | #ifdef HAVE_cc0 | |
8935 | /* If comparison with cc0, import actual comparison from compare | |
8936 | insn. */ | |
8937 | if (op0 == cc0_rtx) | |
8938 | { | |
8939 | if ((prev = prev_nonnote_insn (prev)) == 0 | |
8940 | || GET_CODE (prev) != INSN | |
8941 | || (set = single_set (prev)) == 0 | |
8942 | || SET_DEST (set) != cc0_rtx) | |
8943 | return 0; | |
8944 | ||
8945 | op0 = SET_SRC (set); | |
8946 | op1 = CONST0_RTX (GET_MODE (op0)); | |
8947 | if (earliest) | |
8948 | *earliest = prev; | |
8949 | } | |
8950 | #endif | |
8951 | ||
8952 | /* If this is a COMPARE, pick up the two things being compared. */ | |
8953 | if (GET_CODE (op0) == COMPARE) | |
8954 | { | |
8955 | op1 = XEXP (op0, 1); | |
8956 | op0 = XEXP (op0, 0); | |
8957 | continue; | |
8958 | } | |
8959 | else if (GET_CODE (op0) != REG) | |
8960 | break; | |
8961 | ||
8962 | /* Go back to the previous insn. Stop if it is not an INSN. We also | |
8963 | stop if it isn't a single set or if it has a REG_INC note because | |
8964 | we don't want to bother dealing with it. */ | |
8965 | ||
8966 | if ((prev = prev_nonnote_insn (prev)) == 0 | |
8967 | || GET_CODE (prev) != INSN | |
8968 | || FIND_REG_INC_NOTE (prev, 0) | |
8969 | || (set = single_set (prev)) == 0) | |
8970 | break; | |
8971 | ||
8972 | /* If this is setting OP0, get what it sets it to if it looks | |
8973 | relevant. */ | |
a95c317b | 8974 | if (rtx_equal_p (SET_DEST (set), op0)) |
b4ad7b23 RS |
8975 | { |
8976 | enum machine_mode inner_mode = GET_MODE (SET_SRC (set)); | |
8977 | ||
f283421d RH |
8978 | /* ??? We may not combine comparisons done in a CCmode with |
8979 | comparisons not done in a CCmode. This is to aid targets | |
8980 | like Alpha that have an IEEE compliant EQ instruction, and | |
8981 | a non-IEEE compliant BEQ instruction. The use of CCmode is | |
8982 | actually artificial, simply to prevent the combination, but | |
12f289ac JW |
8983 | should not affect other platforms. |
8984 | ||
8985 | However, we must allow VOIDmode comparisons to match either | |
8986 | CCmode or non-CCmode comparison, because some ports have | |
8987 | modeless comparisons inside branch patterns. | |
8988 | ||
8989 | ??? This mode check should perhaps look more like the mode check | |
8990 | in simplify_comparison in combine. */ | |
f283421d | 8991 | |
b4ad7b23 | 8992 | if ((GET_CODE (SET_SRC (set)) == COMPARE |
b565a316 RK |
8993 | || (((code == NE |
8994 | || (code == LT | |
8995 | && GET_MODE_CLASS (inner_mode) == MODE_INT | |
5fd8383e RK |
8996 | && (GET_MODE_BITSIZE (inner_mode) |
8997 | <= HOST_BITS_PER_WIDE_INT) | |
b565a316 | 8998 | && (STORE_FLAG_VALUE |
5fd8383e RK |
8999 | & ((HOST_WIDE_INT) 1 |
9000 | << (GET_MODE_BITSIZE (inner_mode) - 1)))) | |
b565a316 RK |
9001 | #ifdef FLOAT_STORE_FLAG_VALUE |
9002 | || (code == LT | |
9003 | && GET_MODE_CLASS (inner_mode) == MODE_FLOAT | |
9004 | && FLOAT_STORE_FLAG_VALUE < 0) | |
9005 | #endif | |
9006 | )) | |
f283421d | 9007 | && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<')) |
12f289ac JW |
9008 | && (((GET_MODE_CLASS (mode) == MODE_CC) |
9009 | == (GET_MODE_CLASS (inner_mode) == MODE_CC)) | |
9010 | || mode == VOIDmode || inner_mode == VOIDmode)) | |
b4ad7b23 | 9011 | x = SET_SRC (set); |
b565a316 RK |
9012 | else if (((code == EQ |
9013 | || (code == GE | |
5fd8383e RK |
9014 | && (GET_MODE_BITSIZE (inner_mode) |
9015 | <= HOST_BITS_PER_WIDE_INT) | |
b565a316 RK |
9016 | && GET_MODE_CLASS (inner_mode) == MODE_INT |
9017 | && (STORE_FLAG_VALUE | |
5fd8383e RK |
9018 | & ((HOST_WIDE_INT) 1 |
9019 | << (GET_MODE_BITSIZE (inner_mode) - 1)))) | |
b565a316 RK |
9020 | #ifdef FLOAT_STORE_FLAG_VALUE |
9021 | || (code == GE | |
9022 | && GET_MODE_CLASS (inner_mode) == MODE_FLOAT | |
9023 | && FLOAT_STORE_FLAG_VALUE < 0) | |
fb8ca0a4 | 9024 | #endif |
b565a316 | 9025 | )) |
f283421d | 9026 | && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<' |
12f289ac JW |
9027 | && (((GET_MODE_CLASS (mode) == MODE_CC) |
9028 | == (GET_MODE_CLASS (inner_mode) == MODE_CC)) | |
9029 | || mode == VOIDmode || inner_mode == VOIDmode)) | |
9030 | ||
b4ad7b23 RS |
9031 | { |
9032 | /* We might have reversed a LT to get a GE here. But this wasn't | |
9033 | actually the comparison of data, so we don't flag that we | |
9034 | have had to reverse the condition. */ | |
9035 | did_reverse_condition ^= 1; | |
9036 | reverse_code = 1; | |
9037 | x = SET_SRC (set); | |
9038 | } | |
71ef37f6 RK |
9039 | else |
9040 | break; | |
b4ad7b23 RS |
9041 | } |
9042 | ||
9043 | else if (reg_set_p (op0, prev)) | |
9044 | /* If this sets OP0, but not directly, we have to give up. */ | |
9045 | break; | |
9046 | ||
9047 | if (x) | |
9048 | { | |
9049 | if (GET_RTX_CLASS (GET_CODE (x)) == '<') | |
9050 | code = GET_CODE (x); | |
9051 | if (reverse_code) | |
9052 | { | |
9053 | code = reverse_condition (code); | |
9054 | did_reverse_condition ^= 1; | |
9055 | reverse_code = 0; | |
9056 | } | |
9057 | ||
9058 | op0 = XEXP (x, 0), op1 = XEXP (x, 1); | |
9059 | if (earliest) | |
9060 | *earliest = prev; | |
9061 | } | |
9062 | } | |
9063 | ||
9064 | /* If constant is first, put it last. */ | |
9065 | if (CONSTANT_P (op0)) | |
9066 | code = swap_condition (code), tem = op0, op0 = op1, op1 = tem; | |
9067 | ||
9068 | /* If OP0 is the result of a comparison, we weren't able to find what | |
9069 | was really being compared, so fail. */ | |
9070 | if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC) | |
9071 | return 0; | |
9072 | ||
d8cfa4ee RK |
9073 | /* Canonicalize any ordered comparison with integers involving equality |
9074 | if we can do computations in the relevant mode and we do not | |
9075 | overflow. */ | |
9076 | ||
9077 | if (GET_CODE (op1) == CONST_INT | |
9078 | && GET_MODE (op0) != VOIDmode | |
9079 | && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT) | |
b4ad7b23 | 9080 | { |
5fd8383e RK |
9081 | HOST_WIDE_INT const_val = INTVAL (op1); |
9082 | unsigned HOST_WIDE_INT uconst_val = const_val; | |
d8cfa4ee RK |
9083 | unsigned HOST_WIDE_INT max_val |
9084 | = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0)); | |
b4ad7b23 RS |
9085 | |
9086 | switch (code) | |
d8cfa4ee RK |
9087 | { |
9088 | case LE: | |
e51712db | 9089 | if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1) |
d8cfa4ee RK |
9090 | code = LT, op1 = GEN_INT (const_val + 1); |
9091 | break; | |
b4ad7b23 | 9092 | |
460f50dc R |
9093 | /* When cross-compiling, const_val might be sign-extended from |
9094 | BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */ | |
d8cfa4ee | 9095 | case GE: |
e51712db | 9096 | if ((HOST_WIDE_INT) (const_val & max_val) |
d8cfa4ee RK |
9097 | != (((HOST_WIDE_INT) 1 |
9098 | << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1)))) | |
9099 | code = GT, op1 = GEN_INT (const_val - 1); | |
9100 | break; | |
b4ad7b23 | 9101 | |
d8cfa4ee | 9102 | case LEU: |
460f50dc | 9103 | if (uconst_val < max_val) |
d8cfa4ee RK |
9104 | code = LTU, op1 = GEN_INT (uconst_val + 1); |
9105 | break; | |
b4ad7b23 | 9106 | |
d8cfa4ee RK |
9107 | case GEU: |
9108 | if (uconst_val != 0) | |
9109 | code = GTU, op1 = GEN_INT (uconst_val - 1); | |
9110 | break; | |
e9a25f70 JL |
9111 | |
9112 | default: | |
9113 | break; | |
d8cfa4ee | 9114 | } |
b4ad7b23 RS |
9115 | } |
9116 | ||
9117 | /* If this was floating-point and we reversed anything other than an | |
9118 | EQ or NE, return zero. */ | |
9119 | if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT | |
9120 | && did_reverse_condition && code != NE && code != EQ | |
1fc3d466 | 9121 | && ! flag_fast_math |
b4ad7b23 RS |
9122 | && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT) |
9123 | return 0; | |
9124 | ||
9125 | #ifdef HAVE_cc0 | |
9126 | /* Never return CC0; return zero instead. */ | |
9127 | if (op0 == cc0_rtx) | |
9128 | return 0; | |
9129 | #endif | |
9130 | ||
38a448ca | 9131 | return gen_rtx_fmt_ee (code, VOIDmode, op0, op1); |
b4ad7b23 RS |
9132 | } |
9133 | ||
9134 | /* Similar to above routine, except that we also put an invariant last | |
9135 | unless both operands are invariants. */ | |
9136 | ||
9137 | rtx | |
9138 | get_condition_for_loop (x) | |
9139 | rtx x; | |
9140 | { | |
5fd8383e | 9141 | rtx comparison = get_condition (x, NULL_PTR); |
b4ad7b23 RS |
9142 | |
9143 | if (comparison == 0 | |
9144 | || ! invariant_p (XEXP (comparison, 0)) | |
9145 | || invariant_p (XEXP (comparison, 1))) | |
9146 | return comparison; | |
9147 | ||
38a448ca RH |
9148 | return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode, |
9149 | XEXP (comparison, 1), XEXP (comparison, 0)); | |
b4ad7b23 | 9150 | } |
8c660648 | 9151 | |
51723711 | 9152 | #ifdef HAVE_decrement_and_branch_on_count |
cac8ce95 DE |
9153 | /* Instrument loop for insertion of bct instruction. We distinguish between |
9154 | loops with compile-time bounds and those with run-time bounds. | |
9155 | Information from loop_iterations() is used to compute compile-time bounds. | |
9156 | Run-time bounds should use loop preconditioning, but currently ignored. | |
9157 | */ | |
9158 | ||
45f97e2e | 9159 | static void |
302670f3 | 9160 | insert_bct (loop_start, loop_end, loop_info) |
cac8ce95 | 9161 | rtx loop_start, loop_end; |
302670f3 | 9162 | struct loop_info *loop_info; |
8c660648 | 9163 | { |
8c660648 | 9164 | int i; |
cac8ce95 | 9165 | unsigned HOST_WIDE_INT n_iterations; |
8c660648 | 9166 | |
cac8ce95 | 9167 | int increment_direction, compare_direction; |
8c660648 | 9168 | |
cac8ce95 DE |
9169 | /* If the loop condition is <= or >=, the number of iteration |
9170 | is 1 more than the range of the bounds of the loop. */ | |
9171 | int add_iteration = 0; | |
8c660648 | 9172 | |
cac8ce95 | 9173 | enum machine_mode loop_var_mode = word_mode; |
8c660648 | 9174 | |
cac8ce95 | 9175 | int loop_num = uid_loop_num [INSN_UID (loop_start)]; |
8c660648 | 9176 | |
cac8ce95 | 9177 | /* It's impossible to instrument a competely unrolled loop. */ |
3c748bb6 | 9178 | if (loop_info->unroll_number == loop_info->n_iterations) |
8c660648 | 9179 | return; |
8c660648 | 9180 | |
cac8ce95 DE |
9181 | /* Make sure that the count register is not in use. */ |
9182 | if (loop_used_count_register [loop_num]) | |
8c660648 | 9183 | { |
cac8ce95 | 9184 | if (loop_dump_stream) |
8c660648 | 9185 | fprintf (loop_dump_stream, |
cac8ce95 DE |
9186 | "insert_bct %d: BCT instrumentation failed: count register already in use\n", |
9187 | loop_num); | |
8c660648 JL |
9188 | return; |
9189 | } | |
9190 | ||
cac8ce95 DE |
9191 | /* Make sure that the function has no indirect jumps. */ |
9192 | if (indirect_jump_in_function) | |
9193 | { | |
8c660648 JL |
9194 | if (loop_dump_stream) |
9195 | fprintf (loop_dump_stream, | |
cac8ce95 DE |
9196 | "insert_bct %d: BCT instrumentation failed: indirect jump in function\n", |
9197 | loop_num); | |
8c660648 JL |
9198 | return; |
9199 | } | |
9200 | ||
cac8ce95 DE |
9201 | /* Make sure that the last loop insn is a conditional jump. */ |
9202 | if (GET_CODE (PREV_INSN (loop_end)) != JUMP_INSN | |
9203 | || ! condjump_p (PREV_INSN (loop_end)) | |
9204 | || simplejump_p (PREV_INSN (loop_end))) | |
9205 | { | |
8c660648 JL |
9206 | if (loop_dump_stream) |
9207 | fprintf (loop_dump_stream, | |
cac8ce95 DE |
9208 | "insert_bct %d: BCT instrumentation failed: invalid jump at loop end\n", |
9209 | loop_num); | |
8c660648 JL |
9210 | return; |
9211 | } | |
8c660648 | 9212 | |
cac8ce95 DE |
9213 | /* Make sure that the loop does not contain a function call |
9214 | (the count register might be altered by the called function). */ | |
3c748bb6 | 9215 | if (loop_info->has_call) |
8c660648 | 9216 | { |
cac8ce95 DE |
9217 | if (loop_dump_stream) |
9218 | fprintf (loop_dump_stream, | |
9219 | "insert_bct %d: BCT instrumentation failed: function call in loop\n", | |
9220 | loop_num); | |
9221 | return; | |
9222 | } | |
8c660648 | 9223 | |
cac8ce95 DE |
9224 | /* Make sure that the loop does not jump via a table. |
9225 | (the count register might be used to perform the branch on table). */ | |
3c748bb6 | 9226 | if (loop_info->has_tablejump) |
cac8ce95 | 9227 | { |
8c368ee2 DE |
9228 | if (loop_dump_stream) |
9229 | fprintf (loop_dump_stream, | |
9230 | "insert_bct %d: BCT instrumentation failed: computed branch in the loop\n", | |
9231 | loop_num); | |
9232 | return; | |
cac8ce95 | 9233 | } |
8c660648 | 9234 | |
cac8ce95 | 9235 | /* Account for loop unrolling in instrumented iteration count. */ |
302670f3 MH |
9236 | if (loop_info->unroll_number > 1) |
9237 | n_iterations = loop_info->n_iterations / loop_info->unroll_number; | |
cac8ce95 | 9238 | else |
302670f3 | 9239 | n_iterations = loop_info->n_iterations; |
8c660648 | 9240 | |
cac8ce95 DE |
9241 | if (n_iterations != 0 && n_iterations < 3) |
9242 | { | |
9243 | /* Allow an enclosing outer loop to benefit if possible. */ | |
9244 | if (loop_dump_stream) | |
9245 | fprintf (loop_dump_stream, | |
9246 | "insert_bct %d: Too few iterations to benefit from BCT optimization\n", | |
9247 | loop_num); | |
9248 | return; | |
9249 | } | |
8c660648 | 9250 | |
cac8ce95 | 9251 | /* Try to instrument the loop. */ |
8c660648 | 9252 | |
cac8ce95 DE |
9253 | /* Handle the simpler case, where the bounds are known at compile time. */ |
9254 | if (n_iterations > 0) | |
9255 | { | |
9256 | /* Mark all enclosing loops that they cannot use count register. */ | |
8c368ee2 | 9257 | for (i = loop_num; i != -1; i = loop_outer_loop[i]) |
cac8ce95 | 9258 | loop_used_count_register[i] = 1; |
8c660648 | 9259 | instrument_loop_bct (loop_start, loop_end, GEN_INT (n_iterations)); |
8c660648 JL |
9260 | return; |
9261 | } | |
9262 | ||
cac8ce95 DE |
9263 | /* Handle the more complex case, that the bounds are NOT known |
9264 | at compile time. In this case we generate run_time calculation | |
9265 | of the number of iterations. */ | |
9266 | ||
302670f3 | 9267 | if (loop_info->iteration_var == 0) |
400d6322 DE |
9268 | { |
9269 | if (loop_dump_stream) | |
9270 | fprintf (loop_dump_stream, | |
9271 | "insert_bct %d: BCT Runtime Instrumentation failed: no loop iteration variable found\n", | |
9272 | loop_num); | |
9273 | return; | |
9274 | } | |
9275 | ||
302670f3 MH |
9276 | if (GET_MODE_CLASS (GET_MODE (loop_info->iteration_var)) != MODE_INT |
9277 | || GET_MODE_SIZE (GET_MODE (loop_info->iteration_var)) != UNITS_PER_WORD) | |
cac8ce95 DE |
9278 | { |
9279 | if (loop_dump_stream) | |
9280 | fprintf (loop_dump_stream, | |
400d6322 | 9281 | "insert_bct %d: BCT Runtime Instrumentation failed: loop variable not integer\n", |
cac8ce95 DE |
9282 | loop_num); |
9283 | return; | |
9284 | } | |
8c660648 JL |
9285 | |
9286 | /* With runtime bounds, if the compare is of the form '!=' we give up */ | |
302670f3 | 9287 | if (loop_info->comparison_code == NE) |
cac8ce95 DE |
9288 | { |
9289 | if (loop_dump_stream) | |
9290 | fprintf (loop_dump_stream, | |
400d6322 | 9291 | "insert_bct %d: BCT Runtime Instrumentation failed: runtime bounds with != comparison\n", |
cac8ce95 DE |
9292 | loop_num); |
9293 | return; | |
9294 | } | |
9295 | /* Use common loop preconditioning code instead. */ | |
9296 | #if 0 | |
9297 | else | |
9298 | { | |
9299 | /* We rely on the existence of run-time guard to ensure that the | |
9300 | loop executes at least once. */ | |
9301 | rtx sequence; | |
9302 | rtx iterations_num_reg; | |
8c660648 | 9303 | |
cac8ce95 DE |
9304 | unsigned HOST_WIDE_INT increment_value_abs |
9305 | = INTVAL (increment) * increment_direction; | |
8c660648 | 9306 | |
cac8ce95 DE |
9307 | /* make sure that the increment is a power of two, otherwise (an |
9308 | expensive) divide is needed. */ | |
9309 | if (exact_log2 (increment_value_abs) == -1) | |
9310 | { | |
9311 | if (loop_dump_stream) | |
9312 | fprintf (loop_dump_stream, | |
9313 | "insert_bct: not instrumenting BCT because the increment is not power of 2\n"); | |
9314 | return; | |
9315 | } | |
8c660648 | 9316 | |
cac8ce95 DE |
9317 | /* compute the number of iterations */ |
9318 | start_sequence (); | |
8c660648 | 9319 | { |
cac8ce95 | 9320 | rtx temp_reg; |
8c660648 | 9321 | |
cac8ce95 DE |
9322 | /* Again, the number of iterations is calculated by: |
9323 | ; | |
9324 | ; compare-val - initial-val + (increment -1) + additional-iteration | |
9325 | ; num_iterations = ----------------------------------------------------------------- | |
9326 | ; increment | |
8c660648 | 9327 | */ |
cac8ce95 DE |
9328 | /* ??? Do we have to call copy_rtx here before passing rtx to |
9329 | expand_binop? */ | |
9330 | if (compare_direction > 0) | |
9331 | { | |
9332 | /* <, <= :the loop variable is increasing */ | |
9333 | temp_reg = expand_binop (loop_var_mode, sub_optab, | |
9334 | comparison_value, initial_value, | |
9335 | NULL_RTX, 0, OPTAB_LIB_WIDEN); | |
9336 | } | |
9337 | else | |
9338 | { | |
9339 | temp_reg = expand_binop (loop_var_mode, sub_optab, | |
9340 | initial_value, comparison_value, | |
9341 | NULL_RTX, 0, OPTAB_LIB_WIDEN); | |
9342 | } | |
8c660648 | 9343 | |
cac8ce95 DE |
9344 | if (increment_value_abs - 1 + add_iteration != 0) |
9345 | temp_reg = expand_binop (loop_var_mode, add_optab, temp_reg, | |
9346 | GEN_INT (increment_value_abs - 1 | |
9347 | + add_iteration), | |
9348 | NULL_RTX, 0, OPTAB_LIB_WIDEN); | |
8c660648 | 9349 | |
cac8ce95 | 9350 | if (increment_value_abs != 1) |
66b9b71f MT |
9351 | iterations_num_reg = expand_binop (loop_var_mode, asr_optab, |
9352 | temp_reg, | |
9353 | GEN_INT (exact_log2 (increment_value_abs)), | |
9354 | NULL_RTX, 0, OPTAB_LIB_WIDEN); | |
cac8ce95 DE |
9355 | else |
9356 | iterations_num_reg = temp_reg; | |
9357 | } | |
9358 | sequence = gen_sequence (); | |
9359 | end_sequence (); | |
9360 | emit_insn_before (sequence, loop_start); | |
9361 | instrument_loop_bct (loop_start, loop_end, iterations_num_reg); | |
8c660648 | 9362 | } |
cac8ce95 DE |
9363 | |
9364 | return; | |
9365 | #endif /* Complex case */ | |
8c660648 JL |
9366 | } |
9367 | ||
cac8ce95 DE |
9368 | /* Instrument loop by inserting a bct in it as follows: |
9369 | 1. A new counter register is created. | |
9370 | 2. In the head of the loop the new variable is initialized to the value | |
9371 | passed in the loop_num_iterations parameter. | |
8c660648 | 9372 | 3. At the end of the loop, comparison of the register with 0 is generated. |
cac8ce95 DE |
9373 | The created comparison follows the pattern defined for the |
9374 | decrement_and_branch_on_count insn, so this insn will be generated. | |
9375 | 4. The branch on the old variable are deleted. The compare must remain | |
9376 | because it might be used elsewhere. If the loop-variable or condition | |
9377 | register are used elsewhere, they will be eliminated by flow. */ | |
8c660648 JL |
9378 | |
9379 | static void | |
9380 | instrument_loop_bct (loop_start, loop_end, loop_num_iterations) | |
9381 | rtx loop_start, loop_end; | |
9382 | rtx loop_num_iterations; | |
9383 | { | |
cac8ce95 | 9384 | rtx counter_reg; |
8c660648 | 9385 | rtx start_label; |
8c660648 | 9386 | rtx sequence; |
8c660648 | 9387 | |
8c660648 JL |
9388 | if (HAVE_decrement_and_branch_on_count) |
9389 | { | |
9390 | if (loop_dump_stream) | |
cac8ce95 DE |
9391 | { |
9392 | fputs ("instrument_bct: Inserting BCT (", loop_dump_stream); | |
9393 | if (GET_CODE (loop_num_iterations) == CONST_INT) | |
9394 | fprintf (loop_dump_stream, HOST_WIDE_INT_PRINT_DEC, | |
9395 | INTVAL (loop_num_iterations)); | |
9396 | else | |
9397 | fputs ("runtime", loop_dump_stream); | |
9398 | fputs (" iterations)", loop_dump_stream); | |
9399 | } | |
8c660648 | 9400 | |
5accd822 DE |
9401 | /* Discard original jump to continue loop. Original compare result |
9402 | may still be live, so it cannot be discarded explicitly. */ | |
8c660648 JL |
9403 | delete_insn (PREV_INSN (loop_end)); |
9404 | ||
cac8ce95 | 9405 | /* Insert the label which will delimit the start of the loop. */ |
8c660648 JL |
9406 | start_label = gen_label_rtx (); |
9407 | emit_label_after (start_label, loop_start); | |
9408 | ||
cac8ce95 | 9409 | /* Insert initialization of the count register into the loop header. */ |
8c660648 | 9410 | start_sequence (); |
cac8ce95 DE |
9411 | counter_reg = gen_reg_rtx (word_mode); |
9412 | emit_insn (gen_move_insn (counter_reg, loop_num_iterations)); | |
8c660648 JL |
9413 | sequence = gen_sequence (); |
9414 | end_sequence (); | |
5accd822 | 9415 | emit_insn_before (sequence, loop_start); |
8c660648 | 9416 | |
cac8ce95 | 9417 | /* Insert new comparison on the count register instead of the |
8c660648 JL |
9418 | old one, generating the needed BCT pattern (that will be |
9419 | later recognized by assembly generation phase). */ | |
cac8ce95 | 9420 | emit_jump_insn_before (gen_decrement_and_branch_on_count (counter_reg, |
5accd822 | 9421 | start_label), |
8c660648 JL |
9422 | loop_end); |
9423 | LABEL_NUSES (start_label)++; | |
9424 | } | |
9425 | ||
8c660648 | 9426 | } |
51723711 KG |
9427 | #endif /* HAVE_decrement_and_branch_on_count */ |
9428 | ||
2a1777af | 9429 | /* Scan the function and determine whether it has indirect (computed) jumps. |
8c660648 | 9430 | |
2a1777af JL |
9431 | This is taken mostly from flow.c; similar code exists elsewhere |
9432 | in the compiler. It may be useful to put this into rtlanal.c. */ | |
8c660648 JL |
9433 | static int |
9434 | indirect_jump_in_function_p (start) | |
9435 | rtx start; | |
9436 | { | |
9437 | rtx insn; | |
8c660648 | 9438 | |
2a1777af JL |
9439 | for (insn = start; insn; insn = NEXT_INSN (insn)) |
9440 | if (computed_jump_p (insn)) | |
9441 | return 1; | |
7019d00e L |
9442 | |
9443 | return 0; | |
8c660648 | 9444 | } |
41a972a9 MM |
9445 | |
9446 | /* Add MEM to the LOOP_MEMS array, if appropriate. See the | |
9447 | documentation for LOOP_MEMS for the definition of `appropriate'. | |
9448 | This function is called from prescan_loop via for_each_rtx. */ | |
9449 | ||
9450 | static int | |
9451 | insert_loop_mem (mem, data) | |
9452 | rtx *mem; | |
e51712db | 9453 | void *data ATTRIBUTE_UNUSED; |
41a972a9 MM |
9454 | { |
9455 | int i; | |
9456 | rtx m = *mem; | |
9457 | ||
9458 | if (m == NULL_RTX) | |
9459 | return 0; | |
9460 | ||
9461 | switch (GET_CODE (m)) | |
9462 | { | |
9463 | case MEM: | |
9464 | break; | |
9465 | ||
9466 | case CONST_DOUBLE: | |
9467 | /* We're not interested in the MEM associated with a | |
9468 | CONST_DOUBLE, so there's no need to traverse into this. */ | |
9469 | return -1; | |
9470 | ||
9471 | default: | |
9472 | /* This is not a MEM. */ | |
9473 | return 0; | |
9474 | } | |
9475 | ||
9476 | /* See if we've already seen this MEM. */ | |
9477 | for (i = 0; i < loop_mems_idx; ++i) | |
9478 | if (rtx_equal_p (m, loop_mems[i].mem)) | |
9479 | { | |
9480 | if (GET_MODE (m) != GET_MODE (loop_mems[i].mem)) | |
9481 | /* The modes of the two memory accesses are different. If | |
9482 | this happens, something tricky is going on, and we just | |
9483 | don't optimize accesses to this MEM. */ | |
9484 | loop_mems[i].optimize = 0; | |
9485 | ||
9486 | return 0; | |
9487 | } | |
9488 | ||
9489 | /* Resize the array, if necessary. */ | |
9490 | if (loop_mems_idx == loop_mems_allocated) | |
9491 | { | |
9492 | if (loop_mems_allocated != 0) | |
9493 | loop_mems_allocated *= 2; | |
9494 | else | |
9495 | loop_mems_allocated = 32; | |
9496 | ||
9497 | loop_mems = (loop_mem_info*) | |
9498 | xrealloc (loop_mems, | |
9499 | loop_mems_allocated * sizeof (loop_mem_info)); | |
9500 | } | |
9501 | ||
9502 | /* Actually insert the MEM. */ | |
9503 | loop_mems[loop_mems_idx].mem = m; | |
9504 | /* We can't hoist this MEM out of the loop if it's a BLKmode MEM | |
9505 | because we can't put it in a register. We still store it in the | |
9506 | table, though, so that if we see the same address later, but in a | |
9507 | non-BLK mode, we'll not think we can optimize it at that point. */ | |
9508 | loop_mems[loop_mems_idx].optimize = (GET_MODE (m) != BLKmode); | |
9509 | loop_mems[loop_mems_idx].reg = NULL_RTX; | |
9510 | ++loop_mems_idx; | |
8deb8e2c MM |
9511 | |
9512 | return 0; | |
41a972a9 MM |
9513 | } |
9514 | ||
4b259e3f | 9515 | /* Like load_mems, but also ensures that SET_IN_LOOP, |
41a972a9 MM |
9516 | MAY_NOT_OPTIMIZE, REG_SINGLE_USAGE, and INSN_COUNT have the correct |
9517 | values after load_mems. */ | |
9518 | ||
9519 | static void | |
9520 | load_mems_and_recount_loop_regs_set (scan_start, end, loop_top, start, | |
d6b44532 | 9521 | insn_count) |
41a972a9 MM |
9522 | rtx scan_start; |
9523 | rtx end; | |
9524 | rtx loop_top; | |
9525 | rtx start; | |
41a972a9 MM |
9526 | int *insn_count; |
9527 | { | |
9528 | int nregs = max_reg_num (); | |
9529 | ||
9530 | load_mems (scan_start, end, loop_top, start); | |
9531 | ||
4b259e3f | 9532 | /* Recalculate set_in_loop and friends since load_mems may have |
41a972a9 MM |
9533 | created new registers. */ |
9534 | if (max_reg_num () > nregs) | |
9535 | { | |
9536 | int i; | |
9537 | int old_nregs; | |
9538 | ||
9539 | old_nregs = nregs; | |
9540 | nregs = max_reg_num (); | |
9541 | ||
4b259e3f | 9542 | if ((unsigned) nregs > set_in_loop->num_elements) |
8deb8e2c MM |
9543 | { |
9544 | /* Grow all the arrays. */ | |
4b259e3f | 9545 | VARRAY_GROW (set_in_loop, nregs); |
8deb8e2c | 9546 | VARRAY_GROW (n_times_set, nregs); |
8deb8e2c | 9547 | VARRAY_GROW (may_not_optimize, nregs); |
d6b44532 | 9548 | VARRAY_GROW (reg_single_usage, nregs); |
8deb8e2c MM |
9549 | } |
9550 | /* Clear the arrays */ | |
4b259e3f | 9551 | bzero ((char *) &set_in_loop->data, nregs * sizeof (int)); |
8deb8e2c | 9552 | bzero ((char *) &may_not_optimize->data, nregs * sizeof (char)); |
d6b44532 | 9553 | bzero ((char *) ®_single_usage->data, nregs * sizeof (rtx)); |
41a972a9 MM |
9554 | |
9555 | count_loop_regs_set (loop_top ? loop_top : start, end, | |
9556 | may_not_optimize, reg_single_usage, | |
9557 | insn_count, nregs); | |
9558 | ||
9559 | for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) | |
8deb8e2c MM |
9560 | { |
9561 | VARRAY_CHAR (may_not_optimize, i) = 1; | |
4b259e3f | 9562 | VARRAY_INT (set_in_loop, i) = 1; |
8deb8e2c | 9563 | } |
41a972a9 | 9564 | |
dd0208b9 DM |
9565 | #ifdef AVOID_CCMODE_COPIES |
9566 | /* Don't try to move insns which set CC registers if we should not | |
9567 | create CCmode register copies. */ | |
78b87d18 | 9568 | for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--) |
dd0208b9 | 9569 | if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC) |
8deb8e2c | 9570 | VARRAY_CHAR (may_not_optimize, i) = 1; |
dd0208b9 DM |
9571 | #endif |
9572 | ||
4b259e3f R |
9573 | /* Set n_times_set for the new registers. */ |
9574 | bcopy ((char *) (&set_in_loop->data.i[0] + old_nregs), | |
9575 | (char *) (&n_times_set->data.i[0] + old_nregs), | |
41a972a9 MM |
9576 | (nregs - old_nregs) * sizeof (int)); |
9577 | } | |
9578 | } | |
9579 | ||
9580 | /* Move MEMs into registers for the duration of the loop. SCAN_START | |
9581 | is the first instruction in the loop (as it is executed). The | |
9582 | other parameters are as for next_insn_in_loop. */ | |
9583 | ||
9584 | static void | |
9585 | load_mems (scan_start, end, loop_top, start) | |
9586 | rtx scan_start; | |
9587 | rtx end; | |
9588 | rtx loop_top; | |
9589 | rtx start; | |
9590 | { | |
9591 | int maybe_never = 0; | |
9592 | int i; | |
9593 | rtx p; | |
9594 | rtx label = NULL_RTX; | |
6a651371 | 9595 | rtx end_label = NULL_RTX; |
41a972a9 MM |
9596 | |
9597 | if (loop_mems_idx > 0) | |
9598 | { | |
9599 | /* Nonzero if the next instruction may never be executed. */ | |
9600 | int next_maybe_never = 0; | |
9601 | ||
9602 | /* Check to see if it's possible that some instructions in the | |
9603 | loop are never executed. */ | |
9604 | for (p = next_insn_in_loop (scan_start, scan_start, end, loop_top); | |
9605 | p != NULL_RTX && !maybe_never; | |
9606 | p = next_insn_in_loop (p, scan_start, end, loop_top)) | |
9607 | { | |
9608 | if (GET_CODE (p) == CODE_LABEL) | |
9609 | maybe_never = 1; | |
9610 | else if (GET_CODE (p) == JUMP_INSN | |
9611 | /* If we enter the loop in the middle, and scan | |
9612 | around to the beginning, don't set maybe_never | |
9613 | for that. This must be an unconditional jump, | |
9614 | otherwise the code at the top of the loop might | |
9615 | never be executed. Unconditional jumps are | |
9616 | followed a by barrier then loop end. */ | |
9617 | && ! (GET_CODE (p) == JUMP_INSN | |
9618 | && JUMP_LABEL (p) == loop_top | |
9619 | && NEXT_INSN (NEXT_INSN (p)) == end | |
9620 | && simplejump_p (p))) | |
9621 | { | |
9622 | if (!condjump_p (p)) | |
9623 | /* Something complicated. */ | |
9624 | maybe_never = 1; | |
9625 | else | |
9626 | /* If there are any more instructions in the loop, they | |
9627 | might not be reached. */ | |
9628 | next_maybe_never = 1; | |
9629 | } | |
9630 | else if (next_maybe_never) | |
9631 | maybe_never = 1; | |
9632 | } | |
9633 | ||
9634 | /* Actually move the MEMs. */ | |
9635 | for (i = 0; i < loop_mems_idx; ++i) | |
9636 | { | |
41a972a9 MM |
9637 | int written = 0; |
9638 | rtx reg; | |
9639 | rtx mem = loop_mems[i].mem; | |
5026a502 | 9640 | rtx mem_list_entry; |
41a972a9 MM |
9641 | |
9642 | if (MEM_VOLATILE_P (mem) | |
9643 | || invariant_p (XEXP (mem, 0)) != 1) | |
9644 | /* There's no telling whether or not MEM is modified. */ | |
9645 | loop_mems[i].optimize = 0; | |
9646 | ||
9647 | /* Go through the MEMs written to in the loop to see if this | |
9648 | one is aliased by one of them. */ | |
5026a502 JL |
9649 | mem_list_entry = loop_store_mems; |
9650 | while (mem_list_entry) | |
41a972a9 | 9651 | { |
5026a502 | 9652 | if (rtx_equal_p (mem, XEXP (mem_list_entry, 0))) |
41a972a9 | 9653 | written = 1; |
5026a502 | 9654 | else if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode, |
41a972a9 MM |
9655 | mem, rtx_varies_p)) |
9656 | { | |
9657 | /* MEM is indeed aliased by this store. */ | |
9658 | loop_mems[i].optimize = 0; | |
9659 | break; | |
9660 | } | |
5026a502 | 9661 | mem_list_entry = XEXP (mem_list_entry, 1); |
41a972a9 MM |
9662 | } |
9663 | ||
9664 | /* If this MEM is written to, we must be sure that there | |
9665 | are no reads from another MEM that aliases this one. */ | |
9666 | if (loop_mems[i].optimize && written) | |
9667 | { | |
9668 | int j; | |
9669 | ||
9670 | for (j = 0; j < loop_mems_idx; ++j) | |
9671 | { | |
9672 | if (j == i) | |
9673 | continue; | |
9674 | else if (true_dependence (mem, | |
9675 | VOIDmode, | |
9676 | loop_mems[j].mem, | |
9677 | rtx_varies_p)) | |
9678 | { | |
9679 | /* It's not safe to hoist loop_mems[i] out of | |
9680 | the loop because writes to it might not be | |
9681 | seen by reads from loop_mems[j]. */ | |
9682 | loop_mems[i].optimize = 0; | |
9683 | break; | |
9684 | } | |
9685 | } | |
9686 | } | |
9687 | ||
9688 | if (maybe_never && may_trap_p (mem)) | |
9689 | /* We can't access the MEM outside the loop; it might | |
9690 | cause a trap that wouldn't have happened otherwise. */ | |
9691 | loop_mems[i].optimize = 0; | |
9692 | ||
9693 | if (!loop_mems[i].optimize) | |
9694 | /* We thought we were going to lift this MEM out of the | |
9695 | loop, but later discovered that we could not. */ | |
9696 | continue; | |
9697 | ||
9698 | /* Allocate a pseudo for this MEM. We set REG_USERVAR_P in | |
9699 | order to keep scan_loop from moving stores to this MEM | |
9700 | out of the loop just because this REG is neither a | |
9701 | user-variable nor used in the loop test. */ | |
9702 | reg = gen_reg_rtx (GET_MODE (mem)); | |
9703 | REG_USERVAR_P (reg) = 1; | |
9704 | loop_mems[i].reg = reg; | |
9705 | ||
9706 | /* Now, replace all references to the MEM with the | |
9707 | corresponding pesudos. */ | |
9708 | for (p = next_insn_in_loop (scan_start, scan_start, end, loop_top); | |
9709 | p != NULL_RTX; | |
9710 | p = next_insn_in_loop (p, scan_start, end, loop_top)) | |
9711 | { | |
59d4e481 KGA |
9712 | rtx_and_int ri; |
9713 | ri.r = p; | |
9714 | ri.i = i; | |
41a972a9 MM |
9715 | for_each_rtx (&p, replace_loop_mem, &ri); |
9716 | } | |
9717 | ||
9718 | if (!apply_change_group ()) | |
9719 | /* We couldn't replace all occurrences of the MEM. */ | |
9720 | loop_mems[i].optimize = 0; | |
9721 | else | |
9722 | { | |
9723 | rtx set; | |
9724 | ||
9725 | /* Load the memory immediately before START, which is | |
9726 | the NOTE_LOOP_BEG. */ | |
826947c5 | 9727 | set = gen_move_insn (reg, mem); |
41a972a9 MM |
9728 | emit_insn_before (set, start); |
9729 | ||
9730 | if (written) | |
9731 | { | |
9732 | if (label == NULL_RTX) | |
9733 | { | |
9734 | /* We must compute the former | |
9735 | right-after-the-end label before we insert | |
9736 | the new one. */ | |
9737 | end_label = next_label (end); | |
9738 | label = gen_label_rtx (); | |
9739 | emit_label_after (label, end); | |
9740 | } | |
9741 | ||
9742 | /* Store the memory immediately after END, which is | |
9743 | the NOTE_LOOP_END. */ | |
826947c5 | 9744 | set = gen_move_insn (copy_rtx (mem), reg); |
41a972a9 MM |
9745 | emit_insn_after (set, label); |
9746 | } | |
9747 | ||
9748 | if (loop_dump_stream) | |
9749 | { | |
9750 | fprintf (loop_dump_stream, "Hoisted regno %d %s from ", | |
9751 | REGNO (reg), (written ? "r/w" : "r/o")); | |
9752 | print_rtl (loop_dump_stream, mem); | |
9753 | fputc ('\n', loop_dump_stream); | |
9754 | } | |
9755 | } | |
9756 | } | |
9757 | } | |
9758 | ||
9759 | if (label != NULL_RTX) | |
9760 | { | |
9761 | /* Now, we need to replace all references to the previous exit | |
9762 | label with the new one. */ | |
59d4e481 KGA |
9763 | rtx_pair rr; |
9764 | rr.r1 = end_label; | |
9765 | rr.r2 = label; | |
41a972a9 MM |
9766 | |
9767 | for (p = start; p != end; p = NEXT_INSN (p)) | |
7940acc4 JW |
9768 | { |
9769 | for_each_rtx (&p, replace_label, &rr); | |
9770 | ||
9771 | /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL | |
9772 | field. This is not handled by for_each_rtx because it doesn't | |
9773 | handle unprinted ('0') fields. We need to update JUMP_LABEL | |
9774 | because the immediately following unroll pass will use it. | |
9775 | replace_label would not work anyways, because that only handles | |
9776 | LABEL_REFs. */ | |
9777 | if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == end_label) | |
9778 | JUMP_LABEL (p) = label; | |
9779 | } | |
41a972a9 MM |
9780 | } |
9781 | } | |
9782 | ||
9783 | /* Replace MEM with its associated pseudo register. This function is | |
9784 | called from load_mems via for_each_rtx. DATA is actually an | |
9785 | rtx_and_int * describing the instruction currently being scanned | |
9786 | and the MEM we are currently replacing. */ | |
9787 | ||
9788 | static int | |
9789 | replace_loop_mem (mem, data) | |
9790 | rtx *mem; | |
9791 | void *data; | |
9792 | { | |
9793 | rtx_and_int *ri; | |
9794 | rtx insn; | |
9795 | int i; | |
9796 | rtx m = *mem; | |
9797 | ||
9798 | if (m == NULL_RTX) | |
9799 | return 0; | |
9800 | ||
9801 | switch (GET_CODE (m)) | |
9802 | { | |
9803 | case MEM: | |
9804 | break; | |
9805 | ||
9806 | case CONST_DOUBLE: | |
9807 | /* We're not interested in the MEM associated with a | |
9808 | CONST_DOUBLE, so there's no need to traverse into one. */ | |
9809 | return -1; | |
9810 | ||
9811 | default: | |
9812 | /* This is not a MEM. */ | |
9813 | return 0; | |
9814 | } | |
9815 | ||
9816 | ri = (rtx_and_int*) data; | |
9817 | i = ri->i; | |
9818 | ||
9819 | if (!rtx_equal_p (loop_mems[i].mem, m)) | |
9820 | /* This is not the MEM we are currently replacing. */ | |
9821 | return 0; | |
9822 | ||
9823 | insn = ri->r; | |
9824 | ||
9825 | /* Actually replace the MEM. */ | |
9826 | validate_change (insn, mem, loop_mems[i].reg, 1); | |
9827 | ||
9828 | return 0; | |
9829 | } | |
9830 | ||
9831 | /* Replace occurrences of the old exit label for the loop with the new | |
9832 | one. DATA is an rtx_pair containing the old and new labels, | |
9833 | respectively. */ | |
9834 | ||
9835 | static int | |
9836 | replace_label (x, data) | |
9837 | rtx *x; | |
9838 | void *data; | |
9839 | { | |
9840 | rtx l = *x; | |
9841 | rtx old_label = ((rtx_pair*) data)->r1; | |
9842 | rtx new_label = ((rtx_pair*) data)->r2; | |
9843 | ||
9844 | if (l == NULL_RTX) | |
9845 | return 0; | |
9846 | ||
9847 | if (GET_CODE (l) != LABEL_REF) | |
9848 | return 0; | |
9849 | ||
9850 | if (XEXP (l, 0) != old_label) | |
9851 | return 0; | |
9852 | ||
9853 | XEXP (l, 0) = new_label; | |
9854 | ++LABEL_NUSES (new_label); | |
9855 | --LABEL_NUSES (old_label); | |
9856 | ||
9857 | return 0; | |
9858 | } | |
9859 |