]>
Commit | Line | Data |
---|---|---|
c8465d70 | 1 | /* Perform various loop optimizations, including strength reduction. |
d050d723 | 2 | Copyright (C) 1987, 1988, 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997, |
021d7b26 | 3 | 1998, 1999, 2000, 2001 Free Software Foundation, Inc. |
b4ad7b23 | 4 | |
1322177d | 5 | This file is part of GCC. |
b4ad7b23 | 6 | |
1322177d LB |
7 | GCC is free software; you can redistribute it and/or modify it under |
8 | the terms of the GNU General Public License as published by the Free | |
9 | Software Foundation; either version 2, or (at your option) any later | |
10 | version. | |
b4ad7b23 | 11 | |
1322177d LB |
12 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
13 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
15 | for more details. | |
b4ad7b23 RS |
16 | |
17 | You should have received a copy of the GNU General Public License | |
1322177d LB |
18 | along with GCC; see the file COPYING. If not, write to the Free |
19 | Software Foundation, 59 Temple Place - Suite 330, Boston, MA | |
20 | 02111-1307, USA. */ | |
b4ad7b23 | 21 | |
b4ad7b23 RS |
22 | /* This is the loop optimization pass of the compiler. |
23 | It finds invariant computations within loops and moves them | |
e6fcb60d | 24 | to the beginning of the loop. Then it identifies basic and |
b4ad7b23 RS |
25 | general induction variables. Strength reduction is applied to the general |
26 | induction variables, and induction variable elimination is applied to | |
27 | the basic induction variables. | |
28 | ||
29 | It also finds cases where | |
30 | a register is set within the loop by zero-extending a narrower value | |
31 | and changes these to zero the entire register once before the loop | |
32 | and merely copy the low part within the loop. | |
33 | ||
34 | Most of the complexity is in heuristics to decide when it is worth | |
35 | while to do these things. */ | |
36 | ||
37 | #include "config.h" | |
670ee920 | 38 | #include "system.h" |
b4ad7b23 | 39 | #include "rtl.h" |
6baf1cc8 | 40 | #include "tm_p.h" |
b4ad7b23 | 41 | #include "obstack.h" |
49ad7cfa | 42 | #include "function.h" |
b4ad7b23 | 43 | #include "expr.h" |
efc9bd41 | 44 | #include "hard-reg-set.h" |
c29f60c0 | 45 | #include "basic-block.h" |
b4ad7b23 | 46 | #include "insn-config.h" |
b4ad7b23 | 47 | #include "regs.h" |
b4ad7b23 RS |
48 | #include "recog.h" |
49 | #include "flags.h" | |
50 | #include "real.h" | |
b4ad7b23 | 51 | #include "loop.h" |
eab5c70a | 52 | #include "cselib.h" |
6adb4e3a | 53 | #include "except.h" |
2e107e9e | 54 | #include "toplev.h" |
aa18f20e | 55 | #include "predict.h" |
b4ad7b23 | 56 | |
b8056b46 MH |
57 | #define LOOP_REG_LIFETIME(LOOP, REGNO) \ |
58 | ((REGNO_LAST_LUID (REGNO) - REGNO_FIRST_LUID (REGNO))) | |
59 | ||
60 | #define LOOP_REG_GLOBAL_P(LOOP, REGNO) \ | |
61 | ((REGNO_LAST_LUID (REGNO) > INSN_LUID ((LOOP)->end) \ | |
62 | || REGNO_FIRST_LUID (REGNO) < INSN_LUID ((LOOP)->start))) | |
63 | ||
8529a489 | 64 | |
b4ad7b23 | 65 | /* Vector mapping INSN_UIDs to luids. |
d45cf215 | 66 | The luids are like uids but increase monotonically always. |
b4ad7b23 RS |
67 | We use them to see whether a jump comes from outside a given loop. */ |
68 | ||
69 | int *uid_luid; | |
70 | ||
71 | /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop | |
72 | number the insn is contained in. */ | |
73 | ||
a2be868f | 74 | struct loop **uid_loop; |
b4ad7b23 RS |
75 | |
76 | /* 1 + largest uid of any insn. */ | |
77 | ||
78 | int max_uid_for_loop; | |
79 | ||
80 | /* 1 + luid of last insn. */ | |
81 | ||
82 | static int max_luid; | |
83 | ||
84 | /* Number of loops detected in current function. Used as index to the | |
85 | next few tables. */ | |
86 | ||
87 | static int max_loop_num; | |
88 | ||
b4ad7b23 RS |
89 | /* Bound on pseudo register number before loop optimization. |
90 | A pseudo has valid regscan info if its number is < max_reg_before_loop. */ | |
770ae6cc | 91 | unsigned int max_reg_before_loop; |
b4ad7b23 | 92 | |
0a326ec9 BS |
93 | /* The value to pass to the next call of reg_scan_update. */ |
94 | static int loop_max_reg; | |
95 | ||
b4ad7b23 RS |
96 | #define obstack_chunk_alloc xmalloc |
97 | #define obstack_chunk_free free | |
b4ad7b23 RS |
98 | \f |
99 | /* During the analysis of a loop, a chain of `struct movable's | |
100 | is made to record all the movable insns found. | |
101 | Then the entire chain can be scanned to decide which to move. */ | |
102 | ||
103 | struct movable | |
104 | { | |
105 | rtx insn; /* A movable insn */ | |
0f41302f MS |
106 | rtx set_src; /* The expression this reg is set from. */ |
107 | rtx set_dest; /* The destination of this SET. */ | |
b4ad7b23 | 108 | rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST |
0f41302f | 109 | of any registers used within the LIBCALL. */ |
e6fcb60d | 110 | int consec; /* Number of consecutive following insns |
b4ad7b23 | 111 | that must be moved with this one. */ |
770ae6cc | 112 | unsigned int regno; /* The register it sets */ |
b4ad7b23 RS |
113 | short lifetime; /* lifetime of that register; |
114 | may be adjusted when matching movables | |
115 | that load the same value are found. */ | |
116 | short savings; /* Number of insns we can move for this reg, | |
117 | including other movables that force this | |
118 | or match this one. */ | |
119 | unsigned int cond : 1; /* 1 if only conditionally movable */ | |
120 | unsigned int force : 1; /* 1 means MUST move this insn */ | |
121 | unsigned int global : 1; /* 1 means reg is live outside this loop */ | |
122 | /* If PARTIAL is 1, GLOBAL means something different: | |
123 | that the reg is live outside the range from where it is set | |
124 | to the following label. */ | |
125 | unsigned int done : 1; /* 1 inhibits further processing of this */ | |
e6fcb60d | 126 | |
b4ad7b23 RS |
127 | unsigned int partial : 1; /* 1 means this reg is used for zero-extending. |
128 | In particular, moving it does not make it | |
129 | invariant. */ | |
130 | unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to | |
131 | load SRC, rather than copying INSN. */ | |
1a61c29f JW |
132 | unsigned int move_insn_first:1;/* Same as above, if this is necessary for the |
133 | first insn of a consecutive sets group. */ | |
0f41302f | 134 | unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */ |
b4ad7b23 RS |
135 | enum machine_mode savemode; /* Nonzero means it is a mode for a low part |
136 | that we should avoid changing when clearing | |
137 | the rest of the reg. */ | |
138 | struct movable *match; /* First entry for same value */ | |
139 | struct movable *forces; /* An insn that must be moved if this is */ | |
140 | struct movable *next; | |
141 | }; | |
142 | ||
45f97e2e | 143 | |
b4ad7b23 RS |
144 | FILE *loop_dump_stream; |
145 | ||
146 | /* Forward declarations. */ | |
147 | ||
3fe41456 KG |
148 | static void find_and_verify_loops PARAMS ((rtx, struct loops *)); |
149 | static void mark_loop_jump PARAMS ((rtx, struct loop *)); | |
150 | static void prescan_loop PARAMS ((struct loop *)); | |
151 | static int reg_in_basic_block_p PARAMS ((rtx, rtx)); | |
0534b804 MH |
152 | static int consec_sets_invariant_p PARAMS ((const struct loop *, |
153 | rtx, int, rtx)); | |
3fe41456 | 154 | static int labels_in_range_p PARAMS ((rtx, int)); |
f1d4ac80 | 155 | static void count_one_set PARAMS ((struct loop_regs *, rtx, rtx, rtx *)); |
3fe41456 KG |
156 | static void note_addr_stored PARAMS ((rtx, rtx, void *)); |
157 | static void note_set_pseudo_multiple_uses PARAMS ((rtx, rtx, void *)); | |
158 | static int loop_reg_used_before_p PARAMS ((const struct loop *, rtx, rtx)); | |
1bf14ad7 | 159 | static void scan_loop PARAMS ((struct loop*, int)); |
e9a25f70 | 160 | #if 0 |
3fe41456 | 161 | static void replace_call_address PARAMS ((rtx, rtx, rtx)); |
e9a25f70 | 162 | #endif |
3fe41456 KG |
163 | static rtx skip_consec_insns PARAMS ((rtx, int)); |
164 | static int libcall_benefit PARAMS ((rtx)); | |
6ec92010 MH |
165 | static void ignore_some_movables PARAMS ((struct loop_movables *)); |
166 | static void force_movables PARAMS ((struct loop_movables *)); | |
167 | static void combine_movables PARAMS ((struct loop_movables *, | |
168 | struct loop_regs *)); | |
28680540 | 169 | static int num_unmoved_movables PARAMS ((const struct loop *)); |
6ec92010 MH |
170 | static int regs_match_p PARAMS ((rtx, rtx, struct loop_movables *)); |
171 | static int rtx_equal_for_loop_p PARAMS ((rtx, rtx, struct loop_movables *, | |
1ecd860b | 172 | struct loop_regs *)); |
3fe41456 | 173 | static void add_label_notes PARAMS ((rtx, rtx)); |
6ec92010 | 174 | static void move_movables PARAMS ((struct loop *loop, struct loop_movables *, |
ed5bb68d | 175 | int, int)); |
6ec92010 MH |
176 | static void loop_movables_add PARAMS((struct loop_movables *, |
177 | struct movable *)); | |
178 | static void loop_movables_free PARAMS((struct loop_movables *)); | |
0534b804 | 179 | static int count_nonfixed_reads PARAMS ((const struct loop *, rtx)); |
6ec73c7c MH |
180 | static void loop_bivs_find PARAMS((struct loop *)); |
181 | static void loop_bivs_init_find PARAMS((struct loop *)); | |
182 | static void loop_bivs_check PARAMS((struct loop *)); | |
183 | static void loop_givs_find PARAMS((struct loop *)); | |
184 | static void loop_givs_check PARAMS((struct loop *)); | |
e304a8e6 MH |
185 | static int loop_biv_eliminable_p PARAMS((struct loop *, struct iv_class *, |
186 | int, int)); | |
187 | static int loop_giv_reduce_benefit PARAMS((struct loop *, struct iv_class *, | |
188 | struct induction *, rtx)); | |
189 | static void loop_givs_dead_check PARAMS((struct loop *, struct iv_class *)); | |
190 | static void loop_givs_reduce PARAMS((struct loop *, struct iv_class *)); | |
191 | static void loop_givs_rescan PARAMS((struct loop *, struct iv_class *, | |
96a45535 | 192 | rtx *)); |
b2735d9a | 193 | static void loop_ivs_free PARAMS((struct loop *)); |
28680540 | 194 | static void strength_reduce PARAMS ((struct loop *, int)); |
f1d4ac80 | 195 | static void find_single_use_in_loop PARAMS ((struct loop_regs *, rtx, rtx)); |
3fe41456 | 196 | static int valid_initial_value_p PARAMS ((rtx, rtx, int, rtx)); |
0534b804 | 197 | static void find_mem_givs PARAMS ((const struct loop *, rtx, rtx, int, int)); |
ed5bb68d MH |
198 | static void record_biv PARAMS ((struct loop *, struct induction *, |
199 | rtx, rtx, rtx, rtx, rtx *, | |
98d1cd45 | 200 | int, int)); |
0534b804 MH |
201 | static void check_final_value PARAMS ((const struct loop *, |
202 | struct induction *)); | |
099f0f3f MH |
203 | static void loop_ivs_dump PARAMS((const struct loop *, FILE *, int)); |
204 | static void loop_iv_class_dump PARAMS((const struct iv_class *, FILE *, int)); | |
c804f3f8 MH |
205 | static void loop_biv_dump PARAMS((const struct induction *, FILE *, int)); |
206 | static void loop_giv_dump PARAMS((const struct induction *, FILE *, int)); | |
e6fcb60d | 207 | static void record_giv PARAMS ((const struct loop *, struct induction *, |
e8cb4873 RH |
208 | rtx, rtx, rtx, rtx, rtx, rtx, int, |
209 | enum g_types, int, int, rtx *)); | |
0534b804 | 210 | static void update_giv_derive PARAMS ((const struct loop *, rtx)); |
e8cb4873 RH |
211 | static void check_ext_dependant_givs PARAMS ((struct iv_class *, |
212 | struct loop_info *)); | |
e6fcb60d | 213 | static int basic_induction_var PARAMS ((const struct loop *, rtx, |
0534b804 | 214 | enum machine_mode, rtx, rtx, |
98d1cd45 | 215 | rtx *, rtx *, rtx **)); |
e8cb4873 | 216 | static rtx simplify_giv_expr PARAMS ((const struct loop *, rtx, rtx *, int *)); |
0534b804 | 217 | static int general_induction_var PARAMS ((const struct loop *loop, rtx, rtx *, |
e8cb4873 RH |
218 | rtx *, rtx *, rtx *, int, int *, |
219 | enum machine_mode)); | |
0534b804 | 220 | static int consec_sets_giv PARAMS ((const struct loop *, int, rtx, |
e8cb4873 | 221 | rtx, rtx, rtx *, rtx *, rtx *, rtx *)); |
3fe41456 KG |
222 | static int check_dbra_loop PARAMS ((struct loop *, int)); |
223 | static rtx express_from_1 PARAMS ((rtx, rtx, rtx)); | |
224 | static rtx combine_givs_p PARAMS ((struct induction *, struct induction *)); | |
b4f75276 | 225 | static int cmp_combine_givs_stats PARAMS ((const PTR, const PTR)); |
1ecd860b | 226 | static void combine_givs PARAMS ((struct loop_regs *, struct iv_class *)); |
3fe41456 | 227 | static int product_cheap_p PARAMS ((rtx, rtx)); |
0534b804 MH |
228 | static int maybe_eliminate_biv PARAMS ((const struct loop *, struct iv_class *, |
229 | int, int, int)); | |
e6fcb60d | 230 | static int maybe_eliminate_biv_1 PARAMS ((const struct loop *, rtx, rtx, |
96a45535 MH |
231 | struct iv_class *, int, |
232 | basic_block, rtx)); | |
3fe41456 KG |
233 | static int last_use_this_basic_block PARAMS ((rtx, rtx)); |
234 | static void record_initial PARAMS ((rtx, rtx, void *)); | |
235 | static void update_reg_last_use PARAMS ((rtx, rtx)); | |
236 | static rtx next_insn_in_loop PARAMS ((const struct loop *, rtx)); | |
28680540 MM |
237 | static void loop_regs_scan PARAMS ((const struct loop *, int)); |
238 | static int count_insns_in_loop PARAMS ((const struct loop *)); | |
3fe41456 KG |
239 | static void load_mems PARAMS ((const struct loop *)); |
240 | static int insert_loop_mem PARAMS ((rtx *, void *)); | |
241 | static int replace_loop_mem PARAMS ((rtx *, void *)); | |
afa1738b | 242 | static void replace_loop_mems PARAMS ((rtx, rtx, rtx)); |
3fe41456 | 243 | static int replace_loop_reg PARAMS ((rtx *, void *)); |
afa1738b | 244 | static void replace_loop_regs PARAMS ((rtx insn, rtx, rtx)); |
3fe41456 | 245 | static void note_reg_stored PARAMS ((rtx, rtx, void *)); |
770ae6cc | 246 | static void try_copy_prop PARAMS ((const struct loop *, rtx, unsigned int)); |
d2335c24 MH |
247 | static void try_swap_copy_prop PARAMS ((const struct loop *, rtx, |
248 | unsigned int)); | |
3fe41456 | 249 | static int replace_label PARAMS ((rtx *, void *)); |
82ee5e63 JH |
250 | static rtx check_insn_for_givs PARAMS((struct loop *, rtx, int, int)); |
251 | static rtx check_insn_for_bivs PARAMS((struct loop *, rtx, int, int)); | |
96a45535 MH |
252 | static rtx gen_add_mult PARAMS ((rtx, rtx, rtx, rtx)); |
253 | static void loop_regs_update PARAMS ((const struct loop *, rtx)); | |
630c79be | 254 | static int iv_add_mult_cost PARAMS ((rtx, rtx, rtx, rtx)); |
41a972a9 | 255 | |
96a45535 MH |
256 | static rtx loop_insn_emit_after PARAMS((const struct loop *, basic_block, |
257 | rtx, rtx)); | |
86e21212 MH |
258 | static rtx loop_call_insn_emit_before PARAMS((const struct loop *, |
259 | basic_block, rtx, rtx)); | |
260 | static rtx loop_call_insn_hoist PARAMS((const struct loop *, rtx)); | |
96a45535 | 261 | static rtx loop_insn_sink_or_swim PARAMS((const struct loop *, rtx)); |
804a718a | 262 | |
6057c0e6 | 263 | static void loop_dump_aux PARAMS ((const struct loop *, FILE *, int)); |
e6217dcd | 264 | static void loop_delete_insns PARAMS ((rtx, rtx)); |
099f0f3f MH |
265 | void debug_ivs PARAMS ((const struct loop *)); |
266 | void debug_iv_class PARAMS ((const struct iv_class *)); | |
c804f3f8 MH |
267 | void debug_biv PARAMS ((const struct induction *)); |
268 | void debug_giv PARAMS ((const struct induction *)); | |
6057c0e6 | 269 | void debug_loop PARAMS ((const struct loop *)); |
685efa54 | 270 | void debug_loops PARAMS ((const struct loops *)); |
6057c0e6 | 271 | |
fd5d5b07 KH |
272 | typedef struct rtx_pair |
273 | { | |
41a972a9 MM |
274 | rtx r1; |
275 | rtx r2; | |
276 | } rtx_pair; | |
277 | ||
afa1738b MH |
278 | typedef struct loop_replace_args |
279 | { | |
280 | rtx match; | |
281 | rtx replacement; | |
282 | rtx insn; | |
283 | } loop_replace_args; | |
284 | ||
41a972a9 MM |
285 | /* Nonzero iff INSN is between START and END, inclusive. */ |
286 | #define INSN_IN_RANGE_P(INSN, START, END) \ | |
287 | (INSN_UID (INSN) < max_uid_for_loop \ | |
288 | && INSN_LUID (INSN) >= INSN_LUID (START) \ | |
289 | && INSN_LUID (INSN) <= INSN_LUID (END)) | |
8c660648 | 290 | |
2a1777af | 291 | /* Indirect_jump_in_function is computed once per function. */ |
5527bf14 | 292 | static int indirect_jump_in_function; |
3fe41456 | 293 | static int indirect_jump_in_function_p PARAMS ((rtx)); |
2a1777af | 294 | |
3fe41456 | 295 | static int compute_luids PARAMS ((rtx, rtx, int)); |
a6207a2b | 296 | |
3fe41456 | 297 | static int biv_elimination_giv_has_0_offset PARAMS ((struct induction *, |
fd5d5b07 KH |
298 | struct induction *, |
299 | rtx)); | |
b4ad7b23 | 300 | \f |
b4ad7b23 RS |
301 | /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to |
302 | copy the value of the strength reduced giv to its original register. */ | |
45f97e2e RH |
303 | static int copy_cost; |
304 | ||
305 | /* Cost of using a register, to normalize the benefits of a giv. */ | |
306 | static int reg_address_cost; | |
307 | ||
b4ad7b23 RS |
308 | void |
309 | init_loop () | |
310 | { | |
38a448ca | 311 | rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1); |
b4ad7b23 | 312 | |
01329426 | 313 | reg_address_cost = address_cost (reg, SImode); |
45f97e2e | 314 | |
f1c1dfc3 | 315 | copy_cost = COSTS_N_INSNS (1); |
b4ad7b23 RS |
316 | } |
317 | \f | |
3ec2b590 R |
318 | /* Compute the mapping from uids to luids. |
319 | LUIDs are numbers assigned to insns, like uids, | |
320 | except that luids increase monotonically through the code. | |
321 | Start at insn START and stop just before END. Assign LUIDs | |
322 | starting with PREV_LUID + 1. Return the last assigned LUID + 1. */ | |
323 | static int | |
324 | compute_luids (start, end, prev_luid) | |
325 | rtx start, end; | |
326 | int prev_luid; | |
327 | { | |
328 | int i; | |
329 | rtx insn; | |
330 | ||
331 | for (insn = start, i = prev_luid; insn != end; insn = NEXT_INSN (insn)) | |
332 | { | |
333 | if (INSN_UID (insn) >= max_uid_for_loop) | |
334 | continue; | |
335 | /* Don't assign luids to line-number NOTEs, so that the distance in | |
336 | luids between two insns is not affected by -g. */ | |
337 | if (GET_CODE (insn) != NOTE | |
338 | || NOTE_LINE_NUMBER (insn) <= 0) | |
339 | uid_luid[INSN_UID (insn)] = ++i; | |
340 | else | |
341 | /* Give a line number note the same luid as preceding insn. */ | |
342 | uid_luid[INSN_UID (insn)] = i; | |
343 | } | |
344 | return i + 1; | |
345 | } | |
346 | \f | |
b4ad7b23 RS |
347 | /* Entry point of this file. Perform loop optimization |
348 | on the current function. F is the first insn of the function | |
349 | and DUMPFILE is a stream for output of a trace of actions taken | |
350 | (or 0 if none should be output). */ | |
351 | ||
352 | void | |
1bf14ad7 | 353 | loop_optimize (f, dumpfile, flags) |
b4ad7b23 RS |
354 | /* f is the first instruction of a chain of insns for one function */ |
355 | rtx f; | |
356 | FILE *dumpfile; | |
1bf14ad7 | 357 | int flags; |
b4ad7b23 RS |
358 | { |
359 | register rtx insn; | |
360 | register int i; | |
a2be868f MH |
361 | struct loops loops_data; |
362 | struct loops *loops = &loops_data; | |
4cda35d4 | 363 | struct loop_info *loops_info; |
b4ad7b23 RS |
364 | |
365 | loop_dump_stream = dumpfile; | |
366 | ||
367 | init_recog_no_volatile (); | |
b4ad7b23 RS |
368 | |
369 | max_reg_before_loop = max_reg_num (); | |
0a326ec9 | 370 | loop_max_reg = max_reg_before_loop; |
b4ad7b23 | 371 | |
b4ad7b23 RS |
372 | regs_may_share = 0; |
373 | ||
0f41302f | 374 | /* Count the number of loops. */ |
b4ad7b23 RS |
375 | |
376 | max_loop_num = 0; | |
377 | for (insn = f; insn; insn = NEXT_INSN (insn)) | |
378 | { | |
379 | if (GET_CODE (insn) == NOTE | |
380 | && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG) | |
381 | max_loop_num++; | |
382 | } | |
383 | ||
384 | /* Don't waste time if no loops. */ | |
385 | if (max_loop_num == 0) | |
386 | return; | |
387 | ||
a2be868f MH |
388 | loops->num = max_loop_num; |
389 | ||
b4ad7b23 RS |
390 | /* Get size to use for tables indexed by uids. |
391 | Leave some space for labels allocated by find_and_verify_loops. */ | |
1c01e9df | 392 | max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32; |
b4ad7b23 | 393 | |
67289ea6 | 394 | uid_luid = (int *) xcalloc (max_uid_for_loop, sizeof (int)); |
e6fcb60d | 395 | uid_loop = (struct loop **) xcalloc (max_uid_for_loop, |
a2be868f | 396 | sizeof (struct loop *)); |
8c660648 | 397 | |
4cda35d4 MH |
398 | /* Allocate storage for array of loops. */ |
399 | loops->array = (struct loop *) | |
400 | xcalloc (loops->num, sizeof (struct loop)); | |
401 | ||
b4ad7b23 RS |
402 | /* Find and process each loop. |
403 | First, find them, and record them in order of their beginnings. */ | |
a2be868f | 404 | find_and_verify_loops (f, loops); |
b4ad7b23 | 405 | |
4cda35d4 MH |
406 | /* Allocate and initialize auxiliary loop information. */ |
407 | loops_info = xcalloc (loops->num, sizeof (struct loop_info)); | |
408 | for (i = 0; i < loops->num; i++) | |
52b38064 | 409 | loops->array[i].aux = loops_info + i; |
4cda35d4 | 410 | |
b4ad7b23 RS |
411 | /* Now find all register lifetimes. This must be done after |
412 | find_and_verify_loops, because it might reorder the insns in the | |
413 | function. */ | |
0a326ec9 | 414 | reg_scan (f, max_reg_before_loop, 1); |
b4ad7b23 | 415 | |
7506f491 DE |
416 | /* This must occur after reg_scan so that registers created by gcse |
417 | will have entries in the register tables. | |
418 | ||
419 | We could have added a call to reg_scan after gcse_main in toplev.c, | |
420 | but moving this call to init_alias_analysis is more efficient. */ | |
421 | init_alias_analysis (); | |
422 | ||
e318cec0 R |
423 | /* See if we went too far. Note that get_max_uid already returns |
424 | one more that the maximum uid of all insn. */ | |
1c01e9df TW |
425 | if (get_max_uid () > max_uid_for_loop) |
426 | abort (); | |
f5963e61 | 427 | /* Now reset it to the actual size we need. See above. */ |
e318cec0 | 428 | max_uid_for_loop = get_max_uid (); |
1c01e9df | 429 | |
a2be868f MH |
430 | /* find_and_verify_loops has already called compute_luids, but it |
431 | might have rearranged code afterwards, so we need to recompute | |
432 | the luids now. */ | |
3ec2b590 | 433 | max_luid = compute_luids (f, NULL_RTX, 0); |
b4ad7b23 RS |
434 | |
435 | /* Don't leave gaps in uid_luid for insns that have been | |
436 | deleted. It is possible that the first or last insn | |
437 | using some register has been deleted by cross-jumping. | |
438 | Make sure that uid_luid for that former insn's uid | |
439 | points to the general area where that insn used to be. */ | |
440 | for (i = 0; i < max_uid_for_loop; i++) | |
441 | { | |
442 | uid_luid[0] = uid_luid[i]; | |
443 | if (uid_luid[0] != 0) | |
444 | break; | |
445 | } | |
446 | for (i = 0; i < max_uid_for_loop; i++) | |
447 | if (uid_luid[i] == 0) | |
448 | uid_luid[i] = uid_luid[i - 1]; | |
449 | ||
2a1777af JL |
450 | /* Determine if the function has indirect jump. On some systems |
451 | this prevents low overhead loop instructions from being used. */ | |
8c660648 | 452 | indirect_jump_in_function = indirect_jump_in_function_p (f); |
8c660648 | 453 | |
78458962 MH |
454 | /* Now scan the loops, last ones first, since this means inner ones are done |
455 | before outer ones. */ | |
456 | for (i = max_loop_num - 1; i >= 0; i--) | |
457 | { | |
458 | struct loop *loop = &loops->array[i]; | |
459 | ||
a2be868f | 460 | if (! loop->invalid && loop->end) |
1bf14ad7 | 461 | scan_loop (loop, flags); |
a2be868f | 462 | } |
07e857c2 | 463 | |
116eebd6 MM |
464 | /* If there were lexical blocks inside the loop, they have been |
465 | replicated. We will now have more than one NOTE_INSN_BLOCK_BEG | |
466 | and NOTE_INSN_BLOCK_END for each such block. We must duplicate | |
467 | the BLOCKs as well. */ | |
428248f7 | 468 | if (write_symbols != NO_DEBUG) |
116eebd6 | 469 | reorder_blocks (); |
45f97e2e RH |
470 | |
471 | end_alias_analysis (); | |
67289ea6 MM |
472 | |
473 | /* Clean up. */ | |
67289ea6 | 474 | free (uid_luid); |
a2be868f | 475 | free (uid_loop); |
4cda35d4 MH |
476 | free (loops_info); |
477 | free (loops->array); | |
b4ad7b23 RS |
478 | } |
479 | \f | |
41a972a9 MM |
480 | /* Returns the next insn, in execution order, after INSN. START and |
481 | END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop, | |
0534b804 | 482 | respectively. LOOP->TOP, if non-NULL, is the top of the loop in the |
41a972a9 MM |
483 | insn-stream; it is used with loops that are entered near the |
484 | bottom. */ | |
485 | ||
486 | static rtx | |
a2be868f MH |
487 | next_insn_in_loop (loop, insn) |
488 | const struct loop *loop; | |
41a972a9 | 489 | rtx insn; |
41a972a9 MM |
490 | { |
491 | insn = NEXT_INSN (insn); | |
492 | ||
a2be868f | 493 | if (insn == loop->end) |
41a972a9 | 494 | { |
a2be868f | 495 | if (loop->top) |
41a972a9 | 496 | /* Go to the top of the loop, and continue there. */ |
a2be868f | 497 | insn = loop->top; |
41a972a9 MM |
498 | else |
499 | /* We're done. */ | |
500 | insn = NULL_RTX; | |
501 | } | |
502 | ||
a2be868f | 503 | if (insn == loop->scan_start) |
41a972a9 MM |
504 | /* We're done. */ |
505 | insn = NULL_RTX; | |
506 | ||
507 | return insn; | |
508 | } | |
509 | ||
a2be868f | 510 | /* Optimize one loop described by LOOP. */ |
b4ad7b23 RS |
511 | |
512 | /* ??? Could also move memory writes out of loops if the destination address | |
513 | is invariant, the source is invariant, the memory write is not volatile, | |
514 | and if we can prove that no read inside the loop can read this address | |
515 | before the write occurs. If there is a read of this address after the | |
516 | write, then we can also mark the memory read as invariant. */ | |
517 | ||
518 | static void | |
1bf14ad7 | 519 | scan_loop (loop, flags) |
a2be868f | 520 | struct loop *loop; |
1bf14ad7 | 521 | int flags; |
b4ad7b23 | 522 | { |
1ecd860b MH |
523 | struct loop_info *loop_info = LOOP_INFO (loop); |
524 | struct loop_regs *regs = LOOP_REGS (loop); | |
b4ad7b23 | 525 | register int i; |
a2be868f MH |
526 | rtx loop_start = loop->start; |
527 | rtx loop_end = loop->end; | |
41a972a9 | 528 | rtx p; |
b4ad7b23 RS |
529 | /* 1 if we are scanning insns that could be executed zero times. */ |
530 | int maybe_never = 0; | |
531 | /* 1 if we are scanning insns that might never be executed | |
532 | due to a subroutine call which might exit before they are reached. */ | |
533 | int call_passed = 0; | |
b4ad7b23 RS |
534 | /* Jump insn that enters the loop, or 0 if control drops in. */ |
535 | rtx loop_entry_jump = 0; | |
b4ad7b23 RS |
536 | /* Number of insns in the loop. */ |
537 | int insn_count; | |
b4ad7b23 | 538 | int tem; |
0a326ec9 | 539 | rtx temp, update_start, update_end; |
b4ad7b23 RS |
540 | /* The SET from an insn, if it is the only SET in the insn. */ |
541 | rtx set, set1; | |
542 | /* Chain describing insns movable in current loop. */ | |
6ec92010 | 543 | struct loop_movables *movables = LOOP_MOVABLES (loop); |
b4ad7b23 RS |
544 | /* Ratio of extra register life span we can justify |
545 | for saving an instruction. More if loop doesn't call subroutines | |
546 | since in that case saving an insn makes more difference | |
547 | and more registers are available. */ | |
548 | int threshold; | |
5ea7a4ae JW |
549 | /* Nonzero if we are scanning instructions in a sub-loop. */ |
550 | int loop_depth = 0; | |
a2be868f | 551 | |
afa1738b MH |
552 | loop->top = 0; |
553 | ||
02055ad6 MH |
554 | movables->head = 0; |
555 | movables->last = 0; | |
02055ad6 | 556 | |
b4ad7b23 RS |
557 | /* Determine whether this loop starts with a jump down to a test at |
558 | the end. This will occur for a small number of loops with a test | |
559 | that is too complex to duplicate in front of the loop. | |
560 | ||
561 | We search for the first insn or label in the loop, skipping NOTEs. | |
562 | However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG | |
563 | (because we might have a loop executed only once that contains a | |
564 | loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END | |
565 | (in case we have a degenerate loop). | |
566 | ||
567 | Note that if we mistakenly think that a loop is entered at the top | |
568 | when, in fact, it is entered at the exit test, the only effect will be | |
569 | slightly poorer optimization. Making the opposite error can generate | |
e6fcb60d | 570 | incorrect code. Since very few loops now start with a jump to the |
b4ad7b23 RS |
571 | exit test, the code here to detect that case is very conservative. */ |
572 | ||
573 | for (p = NEXT_INSN (loop_start); | |
a2be868f | 574 | p != loop_end |
2c3c49de | 575 | && GET_CODE (p) != CODE_LABEL && ! INSN_P (p) |
b4ad7b23 RS |
576 | && (GET_CODE (p) != NOTE |
577 | || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG | |
578 | && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END)); | |
579 | p = NEXT_INSN (p)) | |
580 | ; | |
581 | ||
a2be868f | 582 | loop->scan_start = p; |
b4ad7b23 | 583 | |
96a45535 MH |
584 | /* If loop end is the end of the current function, then emit a |
585 | NOTE_INSN_DELETED after loop_end and set loop->sink to the dummy | |
586 | note insn. This is the position we use when sinking insns out of | |
587 | the loop. */ | |
588 | if (NEXT_INSN (loop->end) != 0) | |
589 | loop->sink = NEXT_INSN (loop->end); | |
590 | else | |
591 | loop->sink = emit_note_after (NOTE_INSN_DELETED, loop->end); | |
592 | ||
b4ad7b23 | 593 | /* Set up variables describing this loop. */ |
a2be868f | 594 | prescan_loop (loop); |
3c748bb6 | 595 | threshold = (loop_info->has_call ? 1 : 2) * (1 + n_non_fixed_regs); |
b4ad7b23 RS |
596 | |
597 | /* If loop has a jump before the first label, | |
598 | the true entry is the target of that jump. | |
599 | Start scan from there. | |
a2be868f | 600 | But record in LOOP->TOP the place where the end-test jumps |
b4ad7b23 RS |
601 | back to so we can scan that after the end of the loop. */ |
602 | if (GET_CODE (p) == JUMP_INSN) | |
603 | { | |
604 | loop_entry_jump = p; | |
605 | ||
606 | /* Loop entry must be unconditional jump (and not a RETURN) */ | |
7f1c097d | 607 | if (any_uncondjump_p (p) |
b4ad7b23 RS |
608 | && JUMP_LABEL (p) != 0 |
609 | /* Check to see whether the jump actually | |
610 | jumps out of the loop (meaning it's no loop). | |
611 | This case can happen for things like | |
612 | do {..} while (0). If this label was generated previously | |
613 | by loop, we can't tell anything about it and have to reject | |
614 | the loop. */ | |
a2be868f | 615 | && INSN_IN_RANGE_P (JUMP_LABEL (p), loop_start, loop_end)) |
b4ad7b23 | 616 | { |
a2be868f MH |
617 | loop->top = next_label (loop->scan_start); |
618 | loop->scan_start = JUMP_LABEL (p); | |
b4ad7b23 RS |
619 | } |
620 | } | |
621 | ||
a2be868f | 622 | /* If LOOP->SCAN_START was an insn created by loop, we don't know its luid |
b4ad7b23 | 623 | as required by loop_reg_used_before_p. So skip such loops. (This |
e6fcb60d | 624 | test may never be true, but it's best to play it safe.) |
b4ad7b23 RS |
625 | |
626 | Also, skip loops where we do not start scanning at a label. This | |
627 | test also rejects loops starting with a JUMP_INSN that failed the | |
628 | test above. */ | |
629 | ||
a2be868f MH |
630 | if (INSN_UID (loop->scan_start) >= max_uid_for_loop |
631 | || GET_CODE (loop->scan_start) != CODE_LABEL) | |
b4ad7b23 RS |
632 | { |
633 | if (loop_dump_stream) | |
634 | fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n", | |
a2be868f | 635 | INSN_UID (loop_start), INSN_UID (loop_end)); |
b4ad7b23 RS |
636 | return; |
637 | } | |
638 | ||
1d7ae250 MH |
639 | /* Allocate extra space for REGs that might be created by load_mems. |
640 | We allocate a little extra slop as well, in the hopes that we | |
641 | won't have to reallocate the regs array. */ | |
28680540 MM |
642 | loop_regs_scan (loop, loop_info->mems_idx + 16); |
643 | insn_count = count_insns_in_loop (loop); | |
b4ad7b23 RS |
644 | |
645 | if (loop_dump_stream) | |
646 | { | |
647 | fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n", | |
a2be868f MH |
648 | INSN_UID (loop_start), INSN_UID (loop_end), insn_count); |
649 | if (loop->cont) | |
b4ad7b23 | 650 | fprintf (loop_dump_stream, "Continue at insn %d.\n", |
a2be868f | 651 | INSN_UID (loop->cont)); |
b4ad7b23 RS |
652 | } |
653 | ||
654 | /* Scan through the loop finding insns that are safe to move. | |
f1d4ac80 | 655 | Set REGS->ARRAY[I].SET_IN_LOOP negative for the reg I being set, so that |
b4ad7b23 RS |
656 | this reg will be considered invariant for subsequent insns. |
657 | We consider whether subsequent insns use the reg | |
658 | in deciding whether it is worth actually moving. | |
659 | ||
660 | MAYBE_NEVER is nonzero if we have passed a conditional jump insn | |
661 | and therefore it is possible that the insns we are scanning | |
662 | would never be executed. At such times, we must make sure | |
663 | that it is safe to execute the insn once instead of zero times. | |
664 | When MAYBE_NEVER is 0, all insns will be executed at least once | |
665 | so that is not a problem. */ | |
666 | ||
e6fcb60d | 667 | for (p = next_insn_in_loop (loop, loop->scan_start); |
41a972a9 | 668 | p != NULL_RTX; |
a2be868f | 669 | p = next_insn_in_loop (loop, p)) |
b4ad7b23 | 670 | { |
b4ad7b23 RS |
671 | if (GET_CODE (p) == INSN |
672 | && (set = single_set (p)) | |
673 | && GET_CODE (SET_DEST (set)) == REG | |
f1d4ac80 | 674 | && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize) |
b4ad7b23 RS |
675 | { |
676 | int tem1 = 0; | |
677 | int tem2 = 0; | |
678 | int move_insn = 0; | |
679 | rtx src = SET_SRC (set); | |
680 | rtx dependencies = 0; | |
681 | ||
682 | /* Figure out what to use as a source of this insn. If a REG_EQUIV | |
683 | note is given or if a REG_EQUAL note with a constant operand is | |
684 | specified, use it as the source and mark that we should move | |
685 | this insn by calling emit_move_insn rather that duplicating the | |
686 | insn. | |
687 | ||
688 | Otherwise, only use the REG_EQUAL contents if a REG_RETVAL note | |
689 | is present. */ | |
5fd8383e | 690 | temp = find_reg_note (p, REG_EQUIV, NULL_RTX); |
b4ad7b23 RS |
691 | if (temp) |
692 | src = XEXP (temp, 0), move_insn = 1; | |
e6fcb60d | 693 | else |
b4ad7b23 | 694 | { |
5fd8383e | 695 | temp = find_reg_note (p, REG_EQUAL, NULL_RTX); |
b4ad7b23 RS |
696 | if (temp && CONSTANT_P (XEXP (temp, 0))) |
697 | src = XEXP (temp, 0), move_insn = 1; | |
5fd8383e | 698 | if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX)) |
b4ad7b23 RS |
699 | { |
700 | src = XEXP (temp, 0); | |
701 | /* A libcall block can use regs that don't appear in | |
702 | the equivalent expression. To move the libcall, | |
703 | we must move those regs too. */ | |
704 | dependencies = libcall_other_reg (p, src); | |
705 | } | |
706 | } | |
707 | ||
03598dea JH |
708 | /* For parallels, add any possible uses to the depencies, as we can't move |
709 | the insn without resolving them first. */ | |
710 | if (GET_CODE (PATTERN (p)) == PARALLEL) | |
711 | { | |
712 | for (i = 0; i < XVECLEN (PATTERN (p), 0); i++) | |
713 | { | |
714 | rtx x = XVECEXP (PATTERN (p), 0, i); | |
715 | if (GET_CODE (x) == USE) | |
716 | dependencies = gen_rtx_EXPR_LIST (VOIDmode, XEXP (x, 0), dependencies); | |
717 | } | |
718 | } | |
719 | ||
b4ad7b23 RS |
720 | /* Don't try to optimize a register that was made |
721 | by loop-optimization for an inner loop. | |
722 | We don't know its life-span, so we can't compute the benefit. */ | |
723 | if (REGNO (SET_DEST (set)) >= max_reg_before_loop) | |
724 | ; | |
77854601 | 725 | else if (/* The register is used in basic blocks other |
95ca22f4 MM |
726 | than the one where it is set (meaning that |
727 | something after this point in the loop might | |
728 | depend on its value before the set). */ | |
77854601 MH |
729 | ! reg_in_basic_block_p (p, SET_DEST (set)) |
730 | /* And the set is not guaranteed to be executed one | |
731 | the loop starts, or the value before the set is | |
e6fcb60d | 732 | needed before the set occurs... |
77854601 MH |
733 | |
734 | ??? Note we have quadratic behaviour here, mitigated | |
735 | by the fact that the previous test will often fail for | |
736 | large loops. Rather than re-scanning the entire loop | |
737 | each time for register usage, we should build tables | |
738 | of the register usage and use them here instead. */ | |
739 | && (maybe_never | |
a2be868f | 740 | || loop_reg_used_before_p (loop, set, p))) |
e6fcb60d | 741 | /* It is unsafe to move the set. |
e1f7435e JL |
742 | |
743 | This code used to consider it OK to move a set of a variable | |
744 | which was not created by the user and not used in an exit test. | |
745 | That behavior is incorrect and was removed. */ | |
b4ad7b23 | 746 | ; |
0534b804 | 747 | else if ((tem = loop_invariant_p (loop, src)) |
b4ad7b23 | 748 | && (dependencies == 0 |
0534b804 | 749 | || (tem2 = loop_invariant_p (loop, dependencies)) != 0) |
f1d4ac80 | 750 | && (regs->array[REGNO (SET_DEST (set))].set_in_loop == 1 |
b4ad7b23 | 751 | || (tem1 |
e6fcb60d | 752 | = consec_sets_invariant_p |
0534b804 | 753 | (loop, SET_DEST (set), |
f1d4ac80 | 754 | regs->array[REGNO (SET_DEST (set))].set_in_loop, |
8deb8e2c | 755 | p))) |
b4ad7b23 RS |
756 | /* If the insn can cause a trap (such as divide by zero), |
757 | can't move it unless it's guaranteed to be executed | |
758 | once loop is entered. Even a function call might | |
759 | prevent the trap insn from being reached | |
760 | (since it might exit!) */ | |
761 | && ! ((maybe_never || call_passed) | |
762 | && may_trap_p (src))) | |
763 | { | |
764 | register struct movable *m; | |
765 | register int regno = REGNO (SET_DEST (set)); | |
766 | ||
767 | /* A potential lossage is where we have a case where two insns | |
768 | can be combined as long as they are both in the loop, but | |
769 | we move one of them outside the loop. For large loops, | |
770 | this can lose. The most common case of this is the address | |
e6fcb60d | 771 | of a function being called. |
b4ad7b23 RS |
772 | |
773 | Therefore, if this register is marked as being used exactly | |
774 | once if we are in a loop with calls (a "large loop"), see if | |
775 | we can replace the usage of this register with the source | |
e6fcb60d | 776 | of this SET. If we can, delete this insn. |
b4ad7b23 RS |
777 | |
778 | Don't do this if P has a REG_RETVAL note or if we have | |
779 | SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */ | |
780 | ||
3c748bb6 | 781 | if (loop_info->has_call |
f1d4ac80 MH |
782 | && regs->array[regno].single_usage != 0 |
783 | && regs->array[regno].single_usage != const0_rtx | |
b1f21e0a MM |
784 | && REGNO_FIRST_UID (regno) == INSN_UID (p) |
785 | && (REGNO_LAST_UID (regno) | |
f1d4ac80 MH |
786 | == INSN_UID (regs->array[regno].single_usage)) |
787 | && regs->array[regno].set_in_loop == 1 | |
89cc6be3 | 788 | && GET_CODE (SET_SRC (set)) != ASM_OPERANDS |
b4ad7b23 | 789 | && ! side_effects_p (SET_SRC (set)) |
5fd8383e | 790 | && ! find_reg_note (p, REG_RETVAL, NULL_RTX) |
e9a25f70 JL |
791 | && (! SMALL_REGISTER_CLASSES |
792 | || (! (GET_CODE (SET_SRC (set)) == REG | |
793 | && REGNO (SET_SRC (set)) < FIRST_PSEUDO_REGISTER))) | |
b4ad7b23 RS |
794 | /* This test is not redundant; SET_SRC (set) might be |
795 | a call-clobbered register and the life of REGNO | |
796 | might span a call. */ | |
797 | && ! modified_between_p (SET_SRC (set), p, | |
f1d4ac80 MH |
798 | regs->array[regno].single_usage) |
799 | && no_labels_between_p (p, regs->array[regno].single_usage) | |
b4ad7b23 | 800 | && validate_replace_rtx (SET_DEST (set), SET_SRC (set), |
f1d4ac80 | 801 | regs->array[regno].single_usage)) |
b4ad7b23 | 802 | { |
5eeedd4d JW |
803 | /* Replace any usage in a REG_EQUAL note. Must copy the |
804 | new source, so that we don't get rtx sharing between the | |
805 | SET_SOURCE and REG_NOTES of insn p. */ | |
f1d4ac80 MH |
806 | REG_NOTES (regs->array[regno].single_usage) |
807 | = replace_rtx (REG_NOTES (regs->array[regno].single_usage), | |
5eeedd4d | 808 | SET_DEST (set), copy_rtx (SET_SRC (set))); |
e6fcb60d | 809 | |
b4ad7b23 RS |
810 | PUT_CODE (p, NOTE); |
811 | NOTE_LINE_NUMBER (p) = NOTE_INSN_DELETED; | |
812 | NOTE_SOURCE_FILE (p) = 0; | |
f1d4ac80 | 813 | regs->array[regno].set_in_loop = 0; |
b4ad7b23 RS |
814 | continue; |
815 | } | |
816 | ||
6ec92010 | 817 | m = (struct movable *) xmalloc (sizeof (struct movable)); |
b4ad7b23 RS |
818 | m->next = 0; |
819 | m->insn = p; | |
820 | m->set_src = src; | |
821 | m->dependencies = dependencies; | |
822 | m->set_dest = SET_DEST (set); | |
823 | m->force = 0; | |
f1d4ac80 | 824 | m->consec = regs->array[REGNO (SET_DEST (set))].set_in_loop - 1; |
b4ad7b23 RS |
825 | m->done = 0; |
826 | m->forces = 0; | |
827 | m->partial = 0; | |
828 | m->move_insn = move_insn; | |
1a61c29f | 829 | m->move_insn_first = 0; |
5fd8383e | 830 | m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0); |
b4ad7b23 RS |
831 | m->savemode = VOIDmode; |
832 | m->regno = regno; | |
28353b16 JR |
833 | /* Set M->cond if either loop_invariant_p |
834 | or consec_sets_invariant_p returned 2 | |
835 | (only conditionally invariant). */ | |
b4ad7b23 | 836 | m->cond = ((tem | tem1 | tem2) > 1); |
b8056b46 | 837 | m->global = LOOP_REG_GLOBAL_P (loop, regno); |
b4ad7b23 | 838 | m->match = 0; |
b8056b46 | 839 | m->lifetime = LOOP_REG_LIFETIME (loop, regno); |
f1d4ac80 | 840 | m->savings = regs->array[regno].n_times_set; |
5fd8383e | 841 | if (find_reg_note (p, REG_RETVAL, NULL_RTX)) |
b4ad7b23 | 842 | m->savings += libcall_benefit (p); |
f1d4ac80 | 843 | regs->array[regno].set_in_loop = move_insn ? -2 : -1; |
b4ad7b23 | 844 | /* Add M to the end of the chain MOVABLES. */ |
6ec92010 | 845 | loop_movables_add (movables, m); |
b4ad7b23 RS |
846 | |
847 | if (m->consec > 0) | |
848 | { | |
1a61c29f JW |
849 | /* It is possible for the first instruction to have a |
850 | REG_EQUAL note but a non-invariant SET_SRC, so we must | |
851 | remember the status of the first instruction in case | |
852 | the last instruction doesn't have a REG_EQUAL note. */ | |
853 | m->move_insn_first = m->move_insn; | |
854 | ||
b4ad7b23 | 855 | /* Skip this insn, not checking REG_LIBCALL notes. */ |
202a34fd | 856 | p = next_nonnote_insn (p); |
b4ad7b23 RS |
857 | /* Skip the consecutive insns, if there are any. */ |
858 | p = skip_consec_insns (p, m->consec); | |
859 | /* Back up to the last insn of the consecutive group. */ | |
860 | p = prev_nonnote_insn (p); | |
861 | ||
862 | /* We must now reset m->move_insn, m->is_equiv, and possibly | |
863 | m->set_src to correspond to the effects of all the | |
864 | insns. */ | |
5fd8383e | 865 | temp = find_reg_note (p, REG_EQUIV, NULL_RTX); |
b4ad7b23 RS |
866 | if (temp) |
867 | m->set_src = XEXP (temp, 0), m->move_insn = 1; | |
868 | else | |
869 | { | |
5fd8383e | 870 | temp = find_reg_note (p, REG_EQUAL, NULL_RTX); |
b4ad7b23 RS |
871 | if (temp && CONSTANT_P (XEXP (temp, 0))) |
872 | m->set_src = XEXP (temp, 0), m->move_insn = 1; | |
873 | else | |
874 | m->move_insn = 0; | |
875 | ||
876 | } | |
5fd8383e | 877 | m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0); |
b4ad7b23 RS |
878 | } |
879 | } | |
880 | /* If this register is always set within a STRICT_LOW_PART | |
881 | or set to zero, then its high bytes are constant. | |
882 | So clear them outside the loop and within the loop | |
883 | just load the low bytes. | |
884 | We must check that the machine has an instruction to do so. | |
885 | Also, if the value loaded into the register | |
886 | depends on the same register, this cannot be done. */ | |
887 | else if (SET_SRC (set) == const0_rtx | |
888 | && GET_CODE (NEXT_INSN (p)) == INSN | |
889 | && (set1 = single_set (NEXT_INSN (p))) | |
890 | && GET_CODE (set1) == SET | |
891 | && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART) | |
892 | && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG) | |
893 | && (SUBREG_REG (XEXP (SET_DEST (set1), 0)) | |
894 | == SET_DEST (set)) | |
895 | && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1))) | |
896 | { | |
897 | register int regno = REGNO (SET_DEST (set)); | |
f1d4ac80 | 898 | if (regs->array[regno].set_in_loop == 2) |
b4ad7b23 RS |
899 | { |
900 | register struct movable *m; | |
021d7b26 | 901 | m = (struct movable *) xmalloc (sizeof (struct movable)); |
b4ad7b23 RS |
902 | m->next = 0; |
903 | m->insn = p; | |
904 | m->set_dest = SET_DEST (set); | |
905 | m->dependencies = 0; | |
906 | m->force = 0; | |
907 | m->consec = 0; | |
908 | m->done = 0; | |
909 | m->forces = 0; | |
910 | m->move_insn = 0; | |
8cf619da | 911 | m->move_insn_first = 0; |
b4ad7b23 RS |
912 | m->partial = 1; |
913 | /* If the insn may not be executed on some cycles, | |
914 | we can't clear the whole reg; clear just high part. | |
915 | Not even if the reg is used only within this loop. | |
916 | Consider this: | |
917 | while (1) | |
918 | while (s != t) { | |
919 | if (foo ()) x = *s; | |
920 | use (x); | |
921 | } | |
922 | Clearing x before the inner loop could clobber a value | |
923 | being saved from the last time around the outer loop. | |
924 | However, if the reg is not used outside this loop | |
925 | and all uses of the register are in the same | |
926 | basic block as the store, there is no problem. | |
927 | ||
928 | If this insn was made by loop, we don't know its | |
929 | INSN_LUID and hence must make a conservative | |
0f41302f | 930 | assumption. */ |
b4ad7b23 | 931 | m->global = (INSN_UID (p) >= max_uid_for_loop |
b8056b46 | 932 | || LOOP_REG_GLOBAL_P (loop, regno) |
b4ad7b23 | 933 | || (labels_in_range_p |
8529a489 | 934 | (p, REGNO_FIRST_LUID (regno)))); |
b4ad7b23 RS |
935 | if (maybe_never && m->global) |
936 | m->savemode = GET_MODE (SET_SRC (set1)); | |
937 | else | |
938 | m->savemode = VOIDmode; | |
939 | m->regno = regno; | |
940 | m->cond = 0; | |
941 | m->match = 0; | |
b8056b46 | 942 | m->lifetime = LOOP_REG_LIFETIME (loop, regno); |
b4ad7b23 | 943 | m->savings = 1; |
f1d4ac80 | 944 | regs->array[regno].set_in_loop = -1; |
b4ad7b23 | 945 | /* Add M to the end of the chain MOVABLES. */ |
6ec92010 | 946 | loop_movables_add (movables, m); |
b4ad7b23 RS |
947 | } |
948 | } | |
949 | } | |
950 | /* Past a call insn, we get to insns which might not be executed | |
951 | because the call might exit. This matters for insns that trap. | |
e76d9acb | 952 | Constant and pure call insns always return, so they don't count. */ |
24a28584 | 953 | else if (GET_CODE (p) == CALL_INSN && ! CONST_OR_PURE_CALL_P (p)) |
b4ad7b23 RS |
954 | call_passed = 1; |
955 | /* Past a label or a jump, we get to insns for which we | |
956 | can't count on whether or how many times they will be | |
957 | executed during each iteration. Therefore, we can | |
958 | only move out sets of trivial variables | |
959 | (those not used after the loop). */ | |
8516af93 | 960 | /* Similar code appears twice in strength_reduce. */ |
b4ad7b23 RS |
961 | else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN) |
962 | /* If we enter the loop in the middle, and scan around to the | |
963 | beginning, don't set maybe_never for that. This must be an | |
964 | unconditional jump, otherwise the code at the top of the | |
965 | loop might never be executed. Unconditional jumps are | |
966 | followed a by barrier then loop end. */ | |
a2be868f MH |
967 | && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop->top |
968 | && NEXT_INSN (NEXT_INSN (p)) == loop_end | |
7f1c097d | 969 | && any_uncondjump_p (p))) |
b4ad7b23 | 970 | maybe_never = 1; |
5ea7a4ae JW |
971 | else if (GET_CODE (p) == NOTE) |
972 | { | |
973 | /* At the virtual top of a converted loop, insns are again known to | |
974 | be executed: logically, the loop begins here even though the exit | |
975 | code has been duplicated. */ | |
976 | if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0) | |
977 | maybe_never = call_passed = 0; | |
978 | else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG) | |
979 | loop_depth++; | |
980 | else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END) | |
981 | loop_depth--; | |
982 | } | |
b4ad7b23 RS |
983 | } |
984 | ||
985 | /* If one movable subsumes another, ignore that other. */ | |
986 | ||
987 | ignore_some_movables (movables); | |
988 | ||
989 | /* For each movable insn, see if the reg that it loads | |
990 | leads when it dies right into another conditionally movable insn. | |
991 | If so, record that the second insn "forces" the first one, | |
992 | since the second can be moved only if the first is. */ | |
993 | ||
994 | force_movables (movables); | |
995 | ||
996 | /* See if there are multiple movable insns that load the same value. | |
997 | If there are, make all but the first point at the first one | |
998 | through the `match' field, and add the priorities of them | |
999 | all together as the priority of the first. */ | |
1000 | ||
1ecd860b | 1001 | combine_movables (movables, regs); |
e6fcb60d | 1002 | |
b4ad7b23 | 1003 | /* Now consider each movable insn to decide whether it is worth moving. |
f1d4ac80 | 1004 | Store 0 in regs->array[I].set_in_loop for each reg I that is moved. |
b4ad7b23 | 1005 | |
9dd07f87 R |
1006 | Generally this increases code size, so do not move moveables when |
1007 | optimizing for code size. */ | |
1008 | ||
1009 | if (! optimize_size) | |
ed5bb68d | 1010 | move_movables (loop, movables, threshold, insn_count); |
b4ad7b23 RS |
1011 | |
1012 | /* Now candidates that still are negative are those not moved. | |
f1d4ac80 | 1013 | Change regs->array[I].set_in_loop to indicate that those are not actually |
1ecd860b | 1014 | invariant. */ |
f1d4ac80 MH |
1015 | for (i = 0; i < regs->num; i++) |
1016 | if (regs->array[i].set_in_loop < 0) | |
1017 | regs->array[i].set_in_loop = regs->array[i].n_times_set; | |
b4ad7b23 | 1018 | |
3ec2b590 | 1019 | /* Now that we've moved some things out of the loop, we might be able to |
d6b44532 | 1020 | hoist even more memory references. */ |
1d7ae250 MH |
1021 | load_mems (loop); |
1022 | ||
1023 | /* Recalculate regs->array if load_mems has created new registers. */ | |
1024 | if (max_reg_num () > regs->num) | |
28680540 | 1025 | loop_regs_scan (loop, 0); |
4b259e3f | 1026 | |
0a326ec9 | 1027 | for (update_start = loop_start; |
0534b804 MH |
1028 | PREV_INSN (update_start) |
1029 | && GET_CODE (PREV_INSN (update_start)) != CODE_LABEL; | |
0a326ec9 BS |
1030 | update_start = PREV_INSN (update_start)) |
1031 | ; | |
a2be868f | 1032 | update_end = NEXT_INSN (loop_end); |
0a326ec9 BS |
1033 | |
1034 | reg_scan_update (update_start, update_end, loop_max_reg); | |
1035 | loop_max_reg = max_reg_num (); | |
1036 | ||
b4ad7b23 | 1037 | if (flag_strength_reduce) |
45f97e2e | 1038 | { |
bb45bd5a JJ |
1039 | if (update_end && GET_CODE (update_end) == CODE_LABEL) |
1040 | /* Ensure our label doesn't go away. */ | |
1041 | LABEL_NUSES (update_end)++; | |
1042 | ||
28680540 | 1043 | strength_reduce (loop, flags); |
0a326ec9 BS |
1044 | |
1045 | reg_scan_update (update_start, update_end, loop_max_reg); | |
1046 | loop_max_reg = max_reg_num (); | |
bb45bd5a JJ |
1047 | |
1048 | if (update_end && GET_CODE (update_end) == CODE_LABEL | |
1049 | && --LABEL_NUSES (update_end) == 0) | |
1050 | delete_insn (update_end); | |
45f97e2e | 1051 | } |
8deb8e2c | 1052 | |
6ec92010 MH |
1053 | |
1054 | /* The movable information is required for strength reduction. */ | |
1055 | loop_movables_free (movables); | |
1056 | ||
f1d4ac80 MH |
1057 | free (regs->array); |
1058 | regs->array = 0; | |
1059 | regs->num = 0; | |
b4ad7b23 RS |
1060 | } |
1061 | \f | |
1062 | /* Add elements to *OUTPUT to record all the pseudo-regs | |
1063 | mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */ | |
1064 | ||
1065 | void | |
1066 | record_excess_regs (in_this, not_in_this, output) | |
1067 | rtx in_this, not_in_this; | |
1068 | rtx *output; | |
1069 | { | |
1070 | enum rtx_code code; | |
6f7d635c | 1071 | const char *fmt; |
b4ad7b23 RS |
1072 | int i; |
1073 | ||
1074 | code = GET_CODE (in_this); | |
1075 | ||
1076 | switch (code) | |
1077 | { | |
1078 | case PC: | |
1079 | case CC0: | |
1080 | case CONST_INT: | |
1081 | case CONST_DOUBLE: | |
1082 | case CONST: | |
1083 | case SYMBOL_REF: | |
1084 | case LABEL_REF: | |
1085 | return; | |
1086 | ||
1087 | case REG: | |
1088 | if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER | |
1089 | && ! reg_mentioned_p (in_this, not_in_this)) | |
38a448ca | 1090 | *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output); |
b4ad7b23 | 1091 | return; |
e6fcb60d | 1092 | |
e9a25f70 JL |
1093 | default: |
1094 | break; | |
b4ad7b23 RS |
1095 | } |
1096 | ||
1097 | fmt = GET_RTX_FORMAT (code); | |
1098 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) | |
1099 | { | |
1100 | int j; | |
1101 | ||
1102 | switch (fmt[i]) | |
1103 | { | |
1104 | case 'E': | |
1105 | for (j = 0; j < XVECLEN (in_this, i); j++) | |
1106 | record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output); | |
1107 | break; | |
1108 | ||
1109 | case 'e': | |
1110 | record_excess_regs (XEXP (in_this, i), not_in_this, output); | |
1111 | break; | |
1112 | } | |
1113 | } | |
1114 | } | |
1115 | \f | |
1116 | /* Check what regs are referred to in the libcall block ending with INSN, | |
1117 | aside from those mentioned in the equivalent value. | |
1118 | If there are none, return 0. | |
1119 | If there are one or more, return an EXPR_LIST containing all of them. */ | |
1120 | ||
89d3d442 | 1121 | rtx |
b4ad7b23 RS |
1122 | libcall_other_reg (insn, equiv) |
1123 | rtx insn, equiv; | |
1124 | { | |
5fd8383e | 1125 | rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX); |
b4ad7b23 RS |
1126 | rtx p = XEXP (note, 0); |
1127 | rtx output = 0; | |
1128 | ||
1129 | /* First, find all the regs used in the libcall block | |
1130 | that are not mentioned as inputs to the result. */ | |
1131 | ||
1132 | while (p != insn) | |
1133 | { | |
1134 | if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN | |
1135 | || GET_CODE (p) == CALL_INSN) | |
1136 | record_excess_regs (PATTERN (p), equiv, &output); | |
1137 | p = NEXT_INSN (p); | |
1138 | } | |
1139 | ||
1140 | return output; | |
1141 | } | |
1142 | \f | |
1143 | /* Return 1 if all uses of REG | |
1144 | are between INSN and the end of the basic block. */ | |
1145 | ||
e6fcb60d | 1146 | static int |
b4ad7b23 RS |
1147 | reg_in_basic_block_p (insn, reg) |
1148 | rtx insn, reg; | |
1149 | { | |
1150 | int regno = REGNO (reg); | |
1151 | rtx p; | |
1152 | ||
b1f21e0a | 1153 | if (REGNO_FIRST_UID (regno) != INSN_UID (insn)) |
b4ad7b23 RS |
1154 | return 0; |
1155 | ||
1156 | /* Search this basic block for the already recorded last use of the reg. */ | |
1157 | for (p = insn; p; p = NEXT_INSN (p)) | |
1158 | { | |
1159 | switch (GET_CODE (p)) | |
1160 | { | |
1161 | case NOTE: | |
1162 | break; | |
1163 | ||
1164 | case INSN: | |
1165 | case CALL_INSN: | |
1166 | /* Ordinary insn: if this is the last use, we win. */ | |
b1f21e0a | 1167 | if (REGNO_LAST_UID (regno) == INSN_UID (p)) |
b4ad7b23 RS |
1168 | return 1; |
1169 | break; | |
1170 | ||
1171 | case JUMP_INSN: | |
1172 | /* Jump insn: if this is the last use, we win. */ | |
b1f21e0a | 1173 | if (REGNO_LAST_UID (regno) == INSN_UID (p)) |
b4ad7b23 RS |
1174 | return 1; |
1175 | /* Otherwise, it's the end of the basic block, so we lose. */ | |
1176 | return 0; | |
1177 | ||
1178 | case CODE_LABEL: | |
1179 | case BARRIER: | |
1180 | /* It's the end of the basic block, so we lose. */ | |
1181 | return 0; | |
e6fcb60d | 1182 | |
e9a25f70 JL |
1183 | default: |
1184 | break; | |
b4ad7b23 RS |
1185 | } |
1186 | } | |
1187 | ||
035a6890 R |
1188 | /* The "last use" that was recorded can't be found after the first |
1189 | use. This can happen when the last use was deleted while | |
1190 | processing an inner loop, this inner loop was then completely | |
1191 | unrolled, and the outer loop is always exited after the inner loop, | |
1192 | so that everything after the first use becomes a single basic block. */ | |
1193 | return 1; | |
b4ad7b23 RS |
1194 | } |
1195 | \f | |
1196 | /* Compute the benefit of eliminating the insns in the block whose | |
1197 | last insn is LAST. This may be a group of insns used to compute a | |
1198 | value directly or can contain a library call. */ | |
1199 | ||
1200 | static int | |
1201 | libcall_benefit (last) | |
1202 | rtx last; | |
1203 | { | |
1204 | rtx insn; | |
1205 | int benefit = 0; | |
1206 | ||
5fd8383e | 1207 | for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0); |
b4ad7b23 RS |
1208 | insn != last; insn = NEXT_INSN (insn)) |
1209 | { | |
1210 | if (GET_CODE (insn) == CALL_INSN) | |
1211 | benefit += 10; /* Assume at least this many insns in a library | |
0f41302f | 1212 | routine. */ |
b4ad7b23 RS |
1213 | else if (GET_CODE (insn) == INSN |
1214 | && GET_CODE (PATTERN (insn)) != USE | |
1215 | && GET_CODE (PATTERN (insn)) != CLOBBER) | |
1216 | benefit++; | |
1217 | } | |
1218 | ||
1219 | return benefit; | |
1220 | } | |
1221 | \f | |
1222 | /* Skip COUNT insns from INSN, counting library calls as 1 insn. */ | |
1223 | ||
1224 | static rtx | |
1225 | skip_consec_insns (insn, count) | |
1226 | rtx insn; | |
1227 | int count; | |
1228 | { | |
1229 | for (; count > 0; count--) | |
1230 | { | |
1231 | rtx temp; | |
1232 | ||
1233 | /* If first insn of libcall sequence, skip to end. */ | |
e6fcb60d | 1234 | /* Do this at start of loop, since INSN is guaranteed to |
b4ad7b23 RS |
1235 | be an insn here. */ |
1236 | if (GET_CODE (insn) != NOTE | |
5fd8383e | 1237 | && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX))) |
b4ad7b23 RS |
1238 | insn = XEXP (temp, 0); |
1239 | ||
e6fcb60d KH |
1240 | do |
1241 | insn = NEXT_INSN (insn); | |
b4ad7b23 RS |
1242 | while (GET_CODE (insn) == NOTE); |
1243 | } | |
1244 | ||
1245 | return insn; | |
1246 | } | |
1247 | ||
1248 | /* Ignore any movable whose insn falls within a libcall | |
1249 | which is part of another movable. | |
1250 | We make use of the fact that the movable for the libcall value | |
1251 | was made later and so appears later on the chain. */ | |
1252 | ||
1253 | static void | |
1254 | ignore_some_movables (movables) | |
6ec92010 | 1255 | struct loop_movables *movables; |
b4ad7b23 RS |
1256 | { |
1257 | register struct movable *m, *m1; | |
1258 | ||
02055ad6 | 1259 | for (m = movables->head; m; m = m->next) |
b4ad7b23 RS |
1260 | { |
1261 | /* Is this a movable for the value of a libcall? */ | |
5fd8383e | 1262 | rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX); |
b4ad7b23 RS |
1263 | if (note) |
1264 | { | |
1265 | rtx insn; | |
1266 | /* Check for earlier movables inside that range, | |
1267 | and mark them invalid. We cannot use LUIDs here because | |
1268 | insns created by loop.c for prior loops don't have LUIDs. | |
1269 | Rather than reject all such insns from movables, we just | |
1270 | explicitly check each insn in the libcall (since invariant | |
1271 | libcalls aren't that common). */ | |
1272 | for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn)) | |
02055ad6 | 1273 | for (m1 = movables->head; m1 != m; m1 = m1->next) |
b4ad7b23 RS |
1274 | if (m1->insn == insn) |
1275 | m1->done = 1; | |
1276 | } | |
1277 | } | |
e6fcb60d | 1278 | } |
b4ad7b23 RS |
1279 | |
1280 | /* For each movable insn, see if the reg that it loads | |
1281 | leads when it dies right into another conditionally movable insn. | |
1282 | If so, record that the second insn "forces" the first one, | |
1283 | since the second can be moved only if the first is. */ | |
1284 | ||
1285 | static void | |
1286 | force_movables (movables) | |
6ec92010 | 1287 | struct loop_movables *movables; |
b4ad7b23 RS |
1288 | { |
1289 | register struct movable *m, *m1; | |
02055ad6 | 1290 | for (m1 = movables->head; m1; m1 = m1->next) |
b4ad7b23 RS |
1291 | /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */ |
1292 | if (!m1->partial && !m1->done) | |
1293 | { | |
1294 | int regno = m1->regno; | |
1295 | for (m = m1->next; m; m = m->next) | |
1296 | /* ??? Could this be a bug? What if CSE caused the | |
1297 | register of M1 to be used after this insn? | |
1298 | Since CSE does not update regno_last_uid, | |
1299 | this insn M->insn might not be where it dies. | |
1300 | But very likely this doesn't matter; what matters is | |
1301 | that M's reg is computed from M1's reg. */ | |
b1f21e0a | 1302 | if (INSN_UID (m->insn) == REGNO_LAST_UID (regno) |
b4ad7b23 RS |
1303 | && !m->done) |
1304 | break; | |
1305 | if (m != 0 && m->set_src == m1->set_dest | |
1306 | /* If m->consec, m->set_src isn't valid. */ | |
1307 | && m->consec == 0) | |
1308 | m = 0; | |
1309 | ||
1310 | /* Increase the priority of the moving the first insn | |
1311 | since it permits the second to be moved as well. */ | |
1312 | if (m != 0) | |
1313 | { | |
1314 | m->forces = m1; | |
1315 | m1->lifetime += m->lifetime; | |
3875b31d | 1316 | m1->savings += m->savings; |
b4ad7b23 RS |
1317 | } |
1318 | } | |
1319 | } | |
1320 | \f | |
1321 | /* Find invariant expressions that are equal and can be combined into | |
1322 | one register. */ | |
1323 | ||
1324 | static void | |
1ecd860b | 1325 | combine_movables (movables, regs) |
6ec92010 | 1326 | struct loop_movables *movables; |
1ecd860b | 1327 | struct loop_regs *regs; |
b4ad7b23 RS |
1328 | { |
1329 | register struct movable *m; | |
ed5bb68d | 1330 | char *matched_regs = (char *) xmalloc (regs->num); |
b4ad7b23 RS |
1331 | enum machine_mode mode; |
1332 | ||
1333 | /* Regs that are set more than once are not allowed to match | |
1334 | or be matched. I'm no longer sure why not. */ | |
1335 | /* Perhaps testing m->consec_sets would be more appropriate here? */ | |
1336 | ||
02055ad6 | 1337 | for (m = movables->head; m; m = m->next) |
f1d4ac80 | 1338 | if (m->match == 0 && regs->array[m->regno].n_times_set == 1 |
e6fcb60d | 1339 | && !m->partial) |
b4ad7b23 RS |
1340 | { |
1341 | register struct movable *m1; | |
1342 | int regno = m->regno; | |
b4ad7b23 | 1343 | |
961192e1 | 1344 | memset (matched_regs, 0, regs->num); |
b4ad7b23 RS |
1345 | matched_regs[regno] = 1; |
1346 | ||
88016fb7 DE |
1347 | /* We want later insns to match the first one. Don't make the first |
1348 | one match any later ones. So start this loop at m->next. */ | |
1349 | for (m1 = m->next; m1; m1 = m1->next) | |
f1d4ac80 MH |
1350 | if (m != m1 && m1->match == 0 |
1351 | && regs->array[m1->regno].n_times_set == 1 | |
b4ad7b23 RS |
1352 | /* A reg used outside the loop mustn't be eliminated. */ |
1353 | && !m1->global | |
1354 | /* A reg used for zero-extending mustn't be eliminated. */ | |
1355 | && !m1->partial | |
1356 | && (matched_regs[m1->regno] | |
1357 | || | |
1358 | ( | |
1359 | /* Can combine regs with different modes loaded from the | |
1360 | same constant only if the modes are the same or | |
1361 | if both are integer modes with M wider or the same | |
1362 | width as M1. The check for integer is redundant, but | |
1363 | safe, since the only case of differing destination | |
1364 | modes with equal sources is when both sources are | |
1365 | VOIDmode, i.e., CONST_INT. */ | |
1366 | (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest) | |
1367 | || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT | |
1368 | && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT | |
1369 | && (GET_MODE_BITSIZE (GET_MODE (m->set_dest)) | |
1370 | >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest))))) | |
1371 | /* See if the source of M1 says it matches M. */ | |
1372 | && ((GET_CODE (m1->set_src) == REG | |
1373 | && matched_regs[REGNO (m1->set_src)]) | |
1374 | || rtx_equal_for_loop_p (m->set_src, m1->set_src, | |
1ecd860b | 1375 | movables, regs)))) |
b4ad7b23 RS |
1376 | && ((m->dependencies == m1->dependencies) |
1377 | || rtx_equal_p (m->dependencies, m1->dependencies))) | |
1378 | { | |
1379 | m->lifetime += m1->lifetime; | |
1380 | m->savings += m1->savings; | |
1381 | m1->done = 1; | |
1382 | m1->match = m; | |
1383 | matched_regs[m1->regno] = 1; | |
1384 | } | |
1385 | } | |
1386 | ||
1387 | /* Now combine the regs used for zero-extension. | |
1388 | This can be done for those not marked `global' | |
1389 | provided their lives don't overlap. */ | |
1390 | ||
1391 | for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode; | |
1392 | mode = GET_MODE_WIDER_MODE (mode)) | |
1393 | { | |
1394 | register struct movable *m0 = 0; | |
1395 | ||
1396 | /* Combine all the registers for extension from mode MODE. | |
1397 | Don't combine any that are used outside this loop. */ | |
02055ad6 | 1398 | for (m = movables->head; m; m = m->next) |
b4ad7b23 RS |
1399 | if (m->partial && ! m->global |
1400 | && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn))))) | |
1401 | { | |
1402 | register struct movable *m1; | |
8529a489 MH |
1403 | int first = REGNO_FIRST_LUID (m->regno); |
1404 | int last = REGNO_LAST_LUID (m->regno); | |
b4ad7b23 RS |
1405 | |
1406 | if (m0 == 0) | |
1407 | { | |
1408 | /* First one: don't check for overlap, just record it. */ | |
1409 | m0 = m; | |
e6fcb60d | 1410 | continue; |
b4ad7b23 RS |
1411 | } |
1412 | ||
1413 | /* Make sure they extend to the same mode. | |
1414 | (Almost always true.) */ | |
1415 | if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest)) | |
e6fcb60d | 1416 | continue; |
b4ad7b23 RS |
1417 | |
1418 | /* We already have one: check for overlap with those | |
1419 | already combined together. */ | |
02055ad6 | 1420 | for (m1 = movables->head; m1 != m; m1 = m1->next) |
b4ad7b23 | 1421 | if (m1 == m0 || (m1->partial && m1->match == m0)) |
8529a489 MH |
1422 | if (! (REGNO_FIRST_LUID (m1->regno) > last |
1423 | || REGNO_LAST_LUID (m1->regno) < first)) | |
b4ad7b23 RS |
1424 | goto overlap; |
1425 | ||
1426 | /* No overlap: we can combine this with the others. */ | |
1427 | m0->lifetime += m->lifetime; | |
1428 | m0->savings += m->savings; | |
1429 | m->done = 1; | |
1430 | m->match = m0; | |
1431 | ||
e6fcb60d KH |
1432 | overlap: |
1433 | ; | |
b4ad7b23 RS |
1434 | } |
1435 | } | |
4da896b2 MM |
1436 | |
1437 | /* Clean up. */ | |
1438 | free (matched_regs); | |
b4ad7b23 | 1439 | } |
28680540 MM |
1440 | |
1441 | /* Returns the number of movable instructions in LOOP that were not | |
1442 | moved outside the loop. */ | |
1443 | ||
1444 | static int | |
1445 | num_unmoved_movables (loop) | |
1446 | const struct loop *loop; | |
1447 | { | |
1448 | int num = 0; | |
1449 | struct movable *m; | |
1450 | ||
1451 | for (m = LOOP_MOVABLES (loop)->head; m; m = m->next) | |
1452 | if (!m->done) | |
1453 | ++num; | |
1454 | ||
1455 | return num; | |
1456 | } | |
1457 | ||
b4ad7b23 RS |
1458 | \f |
1459 | /* Return 1 if regs X and Y will become the same if moved. */ | |
1460 | ||
1461 | static int | |
1462 | regs_match_p (x, y, movables) | |
1463 | rtx x, y; | |
6ec92010 | 1464 | struct loop_movables *movables; |
b4ad7b23 | 1465 | { |
770ae6cc RK |
1466 | unsigned int xn = REGNO (x); |
1467 | unsigned int yn = REGNO (y); | |
b4ad7b23 RS |
1468 | struct movable *mx, *my; |
1469 | ||
02055ad6 | 1470 | for (mx = movables->head; mx; mx = mx->next) |
b4ad7b23 RS |
1471 | if (mx->regno == xn) |
1472 | break; | |
1473 | ||
02055ad6 | 1474 | for (my = movables->head; my; my = my->next) |
b4ad7b23 RS |
1475 | if (my->regno == yn) |
1476 | break; | |
1477 | ||
1478 | return (mx && my | |
1479 | && ((mx->match == my->match && mx->match != 0) | |
1480 | || mx->match == my | |
1481 | || mx == my->match)); | |
1482 | } | |
1483 | ||
1484 | /* Return 1 if X and Y are identical-looking rtx's. | |
1485 | This is the Lisp function EQUAL for rtx arguments. | |
1486 | ||
1487 | If two registers are matching movables or a movable register and an | |
1488 | equivalent constant, consider them equal. */ | |
1489 | ||
1490 | static int | |
1ecd860b | 1491 | rtx_equal_for_loop_p (x, y, movables, regs) |
b4ad7b23 | 1492 | rtx x, y; |
6ec92010 | 1493 | struct loop_movables *movables; |
ed5bb68d | 1494 | struct loop_regs *regs; |
b4ad7b23 RS |
1495 | { |
1496 | register int i; | |
1497 | register int j; | |
1498 | register struct movable *m; | |
1499 | register enum rtx_code code; | |
6f7d635c | 1500 | register const char *fmt; |
b4ad7b23 RS |
1501 | |
1502 | if (x == y) | |
1503 | return 1; | |
1504 | if (x == 0 || y == 0) | |
1505 | return 0; | |
1506 | ||
1507 | code = GET_CODE (x); | |
1508 | ||
1509 | /* If we have a register and a constant, they may sometimes be | |
1510 | equal. */ | |
f1d4ac80 | 1511 | if (GET_CODE (x) == REG && regs->array[REGNO (x)].set_in_loop == -2 |
b4ad7b23 | 1512 | && CONSTANT_P (y)) |
b1a0c816 | 1513 | { |
02055ad6 | 1514 | for (m = movables->head; m; m = m->next) |
b1a0c816 JL |
1515 | if (m->move_insn && m->regno == REGNO (x) |
1516 | && rtx_equal_p (m->set_src, y)) | |
1517 | return 1; | |
1518 | } | |
f1d4ac80 | 1519 | else if (GET_CODE (y) == REG && regs->array[REGNO (y)].set_in_loop == -2 |
b4ad7b23 | 1520 | && CONSTANT_P (x)) |
b1a0c816 | 1521 | { |
02055ad6 | 1522 | for (m = movables->head; m; m = m->next) |
b1a0c816 JL |
1523 | if (m->move_insn && m->regno == REGNO (y) |
1524 | && rtx_equal_p (m->set_src, x)) | |
1525 | return 1; | |
1526 | } | |
b4ad7b23 RS |
1527 | |
1528 | /* Otherwise, rtx's of different codes cannot be equal. */ | |
1529 | if (code != GET_CODE (y)) | |
1530 | return 0; | |
1531 | ||
1532 | /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. | |
1533 | (REG:SI x) and (REG:HI x) are NOT equivalent. */ | |
1534 | ||
1535 | if (GET_MODE (x) != GET_MODE (y)) | |
1536 | return 0; | |
1537 | ||
1538 | /* These three types of rtx's can be compared nonrecursively. */ | |
1539 | if (code == REG) | |
1540 | return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables)); | |
1541 | ||
1542 | if (code == LABEL_REF) | |
1543 | return XEXP (x, 0) == XEXP (y, 0); | |
1544 | if (code == SYMBOL_REF) | |
1545 | return XSTR (x, 0) == XSTR (y, 0); | |
1546 | ||
1547 | /* Compare the elements. If any pair of corresponding elements | |
1548 | fail to match, return 0 for the whole things. */ | |
1549 | ||
1550 | fmt = GET_RTX_FORMAT (code); | |
1551 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) | |
1552 | { | |
1553 | switch (fmt[i]) | |
1554 | { | |
5fd8383e RK |
1555 | case 'w': |
1556 | if (XWINT (x, i) != XWINT (y, i)) | |
1557 | return 0; | |
1558 | break; | |
1559 | ||
b4ad7b23 RS |
1560 | case 'i': |
1561 | if (XINT (x, i) != XINT (y, i)) | |
1562 | return 0; | |
1563 | break; | |
1564 | ||
1565 | case 'E': | |
1566 | /* Two vectors must have the same length. */ | |
1567 | if (XVECLEN (x, i) != XVECLEN (y, i)) | |
1568 | return 0; | |
1569 | ||
1570 | /* And the corresponding elements must match. */ | |
1571 | for (j = 0; j < XVECLEN (x, i); j++) | |
1ecd860b MH |
1572 | if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j), |
1573 | movables, regs) == 0) | |
b4ad7b23 RS |
1574 | return 0; |
1575 | break; | |
1576 | ||
1577 | case 'e': | |
ed5bb68d MH |
1578 | if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables, regs) |
1579 | == 0) | |
b4ad7b23 RS |
1580 | return 0; |
1581 | break; | |
1582 | ||
1583 | case 's': | |
1584 | if (strcmp (XSTR (x, i), XSTR (y, i))) | |
1585 | return 0; | |
1586 | break; | |
1587 | ||
1588 | case 'u': | |
1589 | /* These are just backpointers, so they don't matter. */ | |
1590 | break; | |
1591 | ||
1592 | case '0': | |
1593 | break; | |
1594 | ||
1595 | /* It is believed that rtx's at this level will never | |
1596 | contain anything but integers and other rtx's, | |
1597 | except for within LABEL_REFs and SYMBOL_REFs. */ | |
1598 | default: | |
1599 | abort (); | |
1600 | } | |
1601 | } | |
1602 | return 1; | |
1603 | } | |
1604 | \f | |
c160c628 | 1605 | /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all |
5b1ef594 | 1606 | insns in INSNS which use the reference. LABEL_NUSES for CODE_LABEL |
dc297297 | 1607 | references is incremented once for each added note. */ |
c160c628 RK |
1608 | |
1609 | static void | |
1610 | add_label_notes (x, insns) | |
1611 | rtx x; | |
1612 | rtx insns; | |
1613 | { | |
1614 | enum rtx_code code = GET_CODE (x); | |
7dcd3836 | 1615 | int i, j; |
6f7d635c | 1616 | const char *fmt; |
c160c628 RK |
1617 | rtx insn; |
1618 | ||
82d00367 | 1619 | if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x)) |
c160c628 | 1620 | { |
6b3603c2 JL |
1621 | /* This code used to ignore labels that referred to dispatch tables to |
1622 | avoid flow generating (slighly) worse code. | |
1623 | ||
1624 | We no longer ignore such label references (see LABEL_REF handling in | |
1625 | mark_jump_label for additional information). */ | |
1626 | for (insn = insns; insn; insn = NEXT_INSN (insn)) | |
1627 | if (reg_mentioned_p (XEXP (x, 0), insn)) | |
5b1ef594 JDA |
1628 | { |
1629 | REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_LABEL, XEXP (x, 0), | |
1630 | REG_NOTES (insn)); | |
1631 | if (LABEL_P (XEXP (x, 0))) | |
1632 | LABEL_NUSES (XEXP (x, 0))++; | |
1633 | } | |
c160c628 RK |
1634 | } |
1635 | ||
1636 | fmt = GET_RTX_FORMAT (code); | |
1637 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) | |
7dcd3836 RK |
1638 | { |
1639 | if (fmt[i] == 'e') | |
1640 | add_label_notes (XEXP (x, i), insns); | |
1641 | else if (fmt[i] == 'E') | |
1642 | for (j = XVECLEN (x, i) - 1; j >= 0; j--) | |
1643 | add_label_notes (XVECEXP (x, i, j), insns); | |
1644 | } | |
c160c628 RK |
1645 | } |
1646 | \f | |
b4ad7b23 RS |
1647 | /* Scan MOVABLES, and move the insns that deserve to be moved. |
1648 | If two matching movables are combined, replace one reg with the | |
1649 | other throughout. */ | |
1650 | ||
1651 | static void | |
ed5bb68d | 1652 | move_movables (loop, movables, threshold, insn_count) |
0534b804 | 1653 | struct loop *loop; |
6ec92010 | 1654 | struct loop_movables *movables; |
b4ad7b23 RS |
1655 | int threshold; |
1656 | int insn_count; | |
b4ad7b23 | 1657 | { |
1ecd860b | 1658 | struct loop_regs *regs = LOOP_REGS (loop); |
ed5bb68d | 1659 | int nregs = regs->num; |
b4ad7b23 RS |
1660 | rtx new_start = 0; |
1661 | register struct movable *m; | |
1662 | register rtx p; | |
0534b804 MH |
1663 | rtx loop_start = loop->start; |
1664 | rtx loop_end = loop->end; | |
b4ad7b23 RS |
1665 | /* Map of pseudo-register replacements to handle combining |
1666 | when we move several insns that load the same value | |
1667 | into different pseudo-registers. */ | |
4da896b2 MM |
1668 | rtx *reg_map = (rtx *) xcalloc (nregs, sizeof (rtx)); |
1669 | char *already_moved = (char *) xcalloc (nregs, sizeof (char)); | |
b4ad7b23 | 1670 | |
02055ad6 | 1671 | for (m = movables->head; m; m = m->next) |
b4ad7b23 RS |
1672 | { |
1673 | /* Describe this movable insn. */ | |
1674 | ||
1675 | if (loop_dump_stream) | |
1676 | { | |
1677 | fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ", | |
1678 | INSN_UID (m->insn), m->regno, m->lifetime); | |
1679 | if (m->consec > 0) | |
1680 | fprintf (loop_dump_stream, "consec %d, ", m->consec); | |
1681 | if (m->cond) | |
1682 | fprintf (loop_dump_stream, "cond "); | |
1683 | if (m->force) | |
1684 | fprintf (loop_dump_stream, "force "); | |
1685 | if (m->global) | |
1686 | fprintf (loop_dump_stream, "global "); | |
1687 | if (m->done) | |
1688 | fprintf (loop_dump_stream, "done "); | |
1689 | if (m->move_insn) | |
1690 | fprintf (loop_dump_stream, "move-insn "); | |
1691 | if (m->match) | |
1692 | fprintf (loop_dump_stream, "matches %d ", | |
1693 | INSN_UID (m->match->insn)); | |
1694 | if (m->forces) | |
1695 | fprintf (loop_dump_stream, "forces %d ", | |
1696 | INSN_UID (m->forces->insn)); | |
1697 | } | |
1698 | ||
b4ad7b23 RS |
1699 | /* Ignore the insn if it's already done (it matched something else). |
1700 | Otherwise, see if it is now safe to move. */ | |
1701 | ||
1702 | if (!m->done | |
1703 | && (! m->cond | |
0534b804 | 1704 | || (1 == loop_invariant_p (loop, m->set_src) |
b4ad7b23 | 1705 | && (m->dependencies == 0 |
0534b804 | 1706 | || 1 == loop_invariant_p (loop, m->dependencies)) |
b4ad7b23 | 1707 | && (m->consec == 0 |
0534b804 | 1708 | || 1 == consec_sets_invariant_p (loop, m->set_dest, |
b4ad7b23 RS |
1709 | m->consec + 1, |
1710 | m->insn)))) | |
1711 | && (! m->forces || m->forces->done)) | |
1712 | { | |
1713 | register int regno; | |
1714 | register rtx p; | |
1715 | int savings = m->savings; | |
1716 | ||
1717 | /* We have an insn that is safe to move. | |
1718 | Compute its desirability. */ | |
1719 | ||
1720 | p = m->insn; | |
1721 | regno = m->regno; | |
1722 | ||
1723 | if (loop_dump_stream) | |
1724 | fprintf (loop_dump_stream, "savings %d ", savings); | |
1725 | ||
f1d4ac80 | 1726 | if (regs->array[regno].moved_once && loop_dump_stream) |
877ca132 | 1727 | fprintf (loop_dump_stream, "halved since already moved "); |
b4ad7b23 RS |
1728 | |
1729 | /* An insn MUST be moved if we already moved something else | |
1730 | which is safe only if this one is moved too: that is, | |
1731 | if already_moved[REGNO] is nonzero. */ | |
1732 | ||
1733 | /* An insn is desirable to move if the new lifetime of the | |
1734 | register is no more than THRESHOLD times the old lifetime. | |
1735 | If it's not desirable, it means the loop is so big | |
1736 | that moving won't speed things up much, | |
1737 | and it is liable to make register usage worse. */ | |
1738 | ||
1739 | /* It is also desirable to move if it can be moved at no | |
1740 | extra cost because something else was already moved. */ | |
1741 | ||
1742 | if (already_moved[regno] | |
e5eb27e5 | 1743 | || flag_move_all_movables |
877ca132 | 1744 | || (threshold * savings * m->lifetime) >= |
f1d4ac80 | 1745 | (regs->array[regno].moved_once ? insn_count * 2 : insn_count) |
b4ad7b23 | 1746 | || (m->forces && m->forces->done |
f1d4ac80 | 1747 | && regs->array[m->forces->regno].n_times_set == 1)) |
b4ad7b23 RS |
1748 | { |
1749 | int count; | |
1750 | register struct movable *m1; | |
6a651371 | 1751 | rtx first = NULL_RTX; |
b4ad7b23 RS |
1752 | |
1753 | /* Now move the insns that set the reg. */ | |
1754 | ||
1755 | if (m->partial && m->match) | |
1756 | { | |
1757 | rtx newpat, i1; | |
1758 | rtx r1, r2; | |
1759 | /* Find the end of this chain of matching regs. | |
1760 | Thus, we load each reg in the chain from that one reg. | |
1761 | And that reg is loaded with 0 directly, | |
1762 | since it has ->match == 0. */ | |
1763 | for (m1 = m; m1->match; m1 = m1->match); | |
1764 | newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)), | |
1765 | SET_DEST (PATTERN (m1->insn))); | |
804a718a | 1766 | i1 = loop_insn_hoist (loop, newpat); |
b4ad7b23 RS |
1767 | |
1768 | /* Mark the moved, invariant reg as being allowed to | |
1769 | share a hard reg with the other matching invariant. */ | |
1770 | REG_NOTES (i1) = REG_NOTES (m->insn); | |
1771 | r1 = SET_DEST (PATTERN (m->insn)); | |
1772 | r2 = SET_DEST (PATTERN (m1->insn)); | |
38a448ca RH |
1773 | regs_may_share |
1774 | = gen_rtx_EXPR_LIST (VOIDmode, r1, | |
1775 | gen_rtx_EXPR_LIST (VOIDmode, r2, | |
1776 | regs_may_share)); | |
b4ad7b23 RS |
1777 | delete_insn (m->insn); |
1778 | ||
1779 | if (new_start == 0) | |
1780 | new_start = i1; | |
1781 | ||
1782 | if (loop_dump_stream) | |
1783 | fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1)); | |
1784 | } | |
1785 | /* If we are to re-generate the item being moved with a | |
1786 | new move insn, first delete what we have and then emit | |
1787 | the move insn before the loop. */ | |
1788 | else if (m->move_insn) | |
1789 | { | |
804a718a | 1790 | rtx i1, temp, seq; |
b4ad7b23 RS |
1791 | |
1792 | for (count = m->consec; count >= 0; count--) | |
1793 | { | |
1794 | /* If this is the first insn of a library call sequence, | |
1795 | skip to the end. */ | |
1796 | if (GET_CODE (p) != NOTE | |
5fd8383e | 1797 | && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX))) |
b4ad7b23 RS |
1798 | p = XEXP (temp, 0); |
1799 | ||
1800 | /* If this is the last insn of a libcall sequence, then | |
1801 | delete every insn in the sequence except the last. | |
1802 | The last insn is handled in the normal manner. */ | |
1803 | if (GET_CODE (p) != NOTE | |
5fd8383e | 1804 | && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX))) |
b4ad7b23 RS |
1805 | { |
1806 | temp = XEXP (temp, 0); | |
1807 | while (temp != p) | |
1808 | temp = delete_insn (temp); | |
1809 | } | |
1810 | ||
9655bf95 | 1811 | temp = p; |
b4ad7b23 | 1812 | p = delete_insn (p); |
9655bf95 DM |
1813 | |
1814 | /* simplify_giv_expr expects that it can walk the insns | |
1815 | at m->insn forwards and see this old sequence we are | |
1816 | tossing here. delete_insn does preserve the next | |
1817 | pointers, but when we skip over a NOTE we must fix | |
1818 | it up. Otherwise that code walks into the non-deleted | |
1819 | insn stream. */ | |
dd202606 | 1820 | while (p && GET_CODE (p) == NOTE) |
9655bf95 | 1821 | p = NEXT_INSN (temp) = NEXT_INSN (p); |
b4ad7b23 RS |
1822 | } |
1823 | ||
1824 | start_sequence (); | |
1825 | emit_move_insn (m->set_dest, m->set_src); | |
c160c628 | 1826 | temp = get_insns (); |
804a718a | 1827 | seq = gen_sequence (); |
b4ad7b23 RS |
1828 | end_sequence (); |
1829 | ||
c160c628 RK |
1830 | add_label_notes (m->set_src, temp); |
1831 | ||
804a718a | 1832 | i1 = loop_insn_hoist (loop, seq); |
5fd8383e | 1833 | if (! find_reg_note (i1, REG_EQUAL, NULL_RTX)) |
b4ad7b23 | 1834 | REG_NOTES (i1) |
38a448ca RH |
1835 | = gen_rtx_EXPR_LIST (m->is_equiv ? REG_EQUIV : REG_EQUAL, |
1836 | m->set_src, REG_NOTES (i1)); | |
b4ad7b23 RS |
1837 | |
1838 | if (loop_dump_stream) | |
1839 | fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1)); | |
1840 | ||
1841 | /* The more regs we move, the less we like moving them. */ | |
1842 | threshold -= 3; | |
1843 | } | |
1844 | else | |
1845 | { | |
1846 | for (count = m->consec; count >= 0; count--) | |
1847 | { | |
1848 | rtx i1, temp; | |
1849 | ||
0f41302f | 1850 | /* If first insn of libcall sequence, skip to end. */ |
e6fcb60d | 1851 | /* Do this at start of loop, since p is guaranteed to |
b4ad7b23 RS |
1852 | be an insn here. */ |
1853 | if (GET_CODE (p) != NOTE | |
5fd8383e | 1854 | && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX))) |
b4ad7b23 RS |
1855 | p = XEXP (temp, 0); |
1856 | ||
1857 | /* If last insn of libcall sequence, move all | |
1858 | insns except the last before the loop. The last | |
1859 | insn is handled in the normal manner. */ | |
1860 | if (GET_CODE (p) != NOTE | |
5fd8383e | 1861 | && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX))) |
b4ad7b23 RS |
1862 | { |
1863 | rtx fn_address = 0; | |
1864 | rtx fn_reg = 0; | |
1865 | rtx fn_address_insn = 0; | |
1866 | ||
1867 | first = 0; | |
1868 | for (temp = XEXP (temp, 0); temp != p; | |
1869 | temp = NEXT_INSN (temp)) | |
1870 | { | |
1871 | rtx body; | |
1872 | rtx n; | |
1873 | rtx next; | |
1874 | ||
1875 | if (GET_CODE (temp) == NOTE) | |
1876 | continue; | |
1877 | ||
1878 | body = PATTERN (temp); | |
1879 | ||
1880 | /* Find the next insn after TEMP, | |
1881 | not counting USE or NOTE insns. */ | |
1882 | for (next = NEXT_INSN (temp); next != p; | |
1883 | next = NEXT_INSN (next)) | |
1884 | if (! (GET_CODE (next) == INSN | |
1885 | && GET_CODE (PATTERN (next)) == USE) | |
1886 | && GET_CODE (next) != NOTE) | |
1887 | break; | |
e6fcb60d | 1888 | |
b4ad7b23 RS |
1889 | /* If that is the call, this may be the insn |
1890 | that loads the function address. | |
1891 | ||
1892 | Extract the function address from the insn | |
1893 | that loads it into a register. | |
1894 | If this insn was cse'd, we get incorrect code. | |
1895 | ||
1896 | So emit a new move insn that copies the | |
1897 | function address into the register that the | |
1898 | call insn will use. flow.c will delete any | |
1899 | redundant stores that we have created. */ | |
1900 | if (GET_CODE (next) == CALL_INSN | |
1901 | && GET_CODE (body) == SET | |
1902 | && GET_CODE (SET_DEST (body)) == REG | |
5fd8383e RK |
1903 | && (n = find_reg_note (temp, REG_EQUAL, |
1904 | NULL_RTX))) | |
b4ad7b23 RS |
1905 | { |
1906 | fn_reg = SET_SRC (body); | |
1907 | if (GET_CODE (fn_reg) != REG) | |
1908 | fn_reg = SET_DEST (body); | |
1909 | fn_address = XEXP (n, 0); | |
1910 | fn_address_insn = temp; | |
1911 | } | |
1912 | /* We have the call insn. | |
1913 | If it uses the register we suspect it might, | |
1914 | load it with the correct address directly. */ | |
1915 | if (GET_CODE (temp) == CALL_INSN | |
1916 | && fn_address != 0 | |
d9f8a199 | 1917 | && reg_referenced_p (fn_reg, body)) |
86e21212 MH |
1918 | loop_insn_emit_after (loop, 0, fn_address_insn, |
1919 | gen_move_insn | |
1920 | (fn_reg, fn_address)); | |
b4ad7b23 RS |
1921 | |
1922 | if (GET_CODE (temp) == CALL_INSN) | |
f97d29ce | 1923 | { |
86e21212 | 1924 | i1 = loop_call_insn_hoist (loop, body); |
f97d29ce JW |
1925 | /* Because the USAGE information potentially |
1926 | contains objects other than hard registers | |
1927 | we need to copy it. */ | |
8c4f5c09 | 1928 | if (CALL_INSN_FUNCTION_USAGE (temp)) |
db3cf6fb MS |
1929 | CALL_INSN_FUNCTION_USAGE (i1) |
1930 | = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp)); | |
f97d29ce | 1931 | } |
b4ad7b23 | 1932 | else |
804a718a | 1933 | i1 = loop_insn_hoist (loop, body); |
b4ad7b23 RS |
1934 | if (first == 0) |
1935 | first = i1; | |
1936 | if (temp == fn_address_insn) | |
1937 | fn_address_insn = i1; | |
1938 | REG_NOTES (i1) = REG_NOTES (temp); | |
1939 | delete_insn (temp); | |
1940 | } | |
18985c91 R |
1941 | if (new_start == 0) |
1942 | new_start = first; | |
b4ad7b23 RS |
1943 | } |
1944 | if (m->savemode != VOIDmode) | |
1945 | { | |
1946 | /* P sets REG to zero; but we should clear only | |
1947 | the bits that are not covered by the mode | |
1948 | m->savemode. */ | |
1949 | rtx reg = m->set_dest; | |
1950 | rtx sequence; | |
1951 | rtx tem; | |
e6fcb60d | 1952 | |
b4ad7b23 | 1953 | start_sequence (); |
ef89d648 ZW |
1954 | tem = expand_simple_binop |
1955 | (GET_MODE (reg), AND, reg, | |
5fd8383e RK |
1956 | GEN_INT ((((HOST_WIDE_INT) 1 |
1957 | << GET_MODE_BITSIZE (m->savemode))) | |
b4ad7b23 RS |
1958 | - 1), |
1959 | reg, 1, OPTAB_LIB_WIDEN); | |
1960 | if (tem == 0) | |
1961 | abort (); | |
1962 | if (tem != reg) | |
1963 | emit_move_insn (reg, tem); | |
1964 | sequence = gen_sequence (); | |
1965 | end_sequence (); | |
804a718a | 1966 | i1 = loop_insn_hoist (loop, sequence); |
b4ad7b23 RS |
1967 | } |
1968 | else if (GET_CODE (p) == CALL_INSN) | |
f97d29ce | 1969 | { |
86e21212 | 1970 | i1 = loop_call_insn_hoist (loop, PATTERN (p)); |
f97d29ce JW |
1971 | /* Because the USAGE information potentially |
1972 | contains objects other than hard registers | |
1973 | we need to copy it. */ | |
8c4f5c09 | 1974 | if (CALL_INSN_FUNCTION_USAGE (p)) |
db3cf6fb MS |
1975 | CALL_INSN_FUNCTION_USAGE (i1) |
1976 | = copy_rtx (CALL_INSN_FUNCTION_USAGE (p)); | |
f97d29ce | 1977 | } |
1a61c29f JW |
1978 | else if (count == m->consec && m->move_insn_first) |
1979 | { | |
804a718a | 1980 | rtx seq; |
1a61c29f JW |
1981 | /* The SET_SRC might not be invariant, so we must |
1982 | use the REG_EQUAL note. */ | |
1983 | start_sequence (); | |
1984 | emit_move_insn (m->set_dest, m->set_src); | |
1985 | temp = get_insns (); | |
804a718a | 1986 | seq = gen_sequence (); |
1a61c29f JW |
1987 | end_sequence (); |
1988 | ||
1989 | add_label_notes (m->set_src, temp); | |
1990 | ||
804a718a | 1991 | i1 = loop_insn_hoist (loop, seq); |
1a61c29f JW |
1992 | if (! find_reg_note (i1, REG_EQUAL, NULL_RTX)) |
1993 | REG_NOTES (i1) | |
1994 | = gen_rtx_EXPR_LIST ((m->is_equiv ? REG_EQUIV | |
1995 | : REG_EQUAL), | |
1996 | m->set_src, REG_NOTES (i1)); | |
1997 | } | |
b4ad7b23 | 1998 | else |
804a718a | 1999 | i1 = loop_insn_hoist (loop, PATTERN (p)); |
b4ad7b23 | 2000 | |
1a61c29f JW |
2001 | if (REG_NOTES (i1) == 0) |
2002 | { | |
2003 | REG_NOTES (i1) = REG_NOTES (p); | |
b4ad7b23 | 2004 | |
1a61c29f JW |
2005 | /* If there is a REG_EQUAL note present whose value |
2006 | is not loop invariant, then delete it, since it | |
2007 | may cause problems with later optimization passes. | |
2008 | It is possible for cse to create such notes | |
2009 | like this as a result of record_jump_cond. */ | |
e6fcb60d | 2010 | |
1a61c29f | 2011 | if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX)) |
0534b804 | 2012 | && ! loop_invariant_p (loop, XEXP (temp, 0))) |
1a61c29f JW |
2013 | remove_note (i1, temp); |
2014 | } | |
e6726b1f | 2015 | |
b4ad7b23 RS |
2016 | if (new_start == 0) |
2017 | new_start = i1; | |
2018 | ||
2019 | if (loop_dump_stream) | |
2020 | fprintf (loop_dump_stream, " moved to %d", | |
2021 | INSN_UID (i1)); | |
2022 | ||
b4ad7b23 RS |
2023 | /* If library call, now fix the REG_NOTES that contain |
2024 | insn pointers, namely REG_LIBCALL on FIRST | |
2025 | and REG_RETVAL on I1. */ | |
51723711 | 2026 | if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX))) |
b4ad7b23 RS |
2027 | { |
2028 | XEXP (temp, 0) = first; | |
5fd8383e | 2029 | temp = find_reg_note (first, REG_LIBCALL, NULL_RTX); |
b4ad7b23 RS |
2030 | XEXP (temp, 0) = i1; |
2031 | } | |
2032 | ||
9655bf95 | 2033 | temp = p; |
b4ad7b23 | 2034 | delete_insn (p); |
9655bf95 DM |
2035 | p = NEXT_INSN (p); |
2036 | ||
2037 | /* simplify_giv_expr expects that it can walk the insns | |
2038 | at m->insn forwards and see this old sequence we are | |
2039 | tossing here. delete_insn does preserve the next | |
2040 | pointers, but when we skip over a NOTE we must fix | |
2041 | it up. Otherwise that code walks into the non-deleted | |
2042 | insn stream. */ | |
2043 | while (p && GET_CODE (p) == NOTE) | |
2044 | p = NEXT_INSN (temp) = NEXT_INSN (p); | |
b4ad7b23 RS |
2045 | } |
2046 | ||
2047 | /* The more regs we move, the less we like moving them. */ | |
2048 | threshold -= 3; | |
2049 | } | |
2050 | ||
2051 | /* Any other movable that loads the same register | |
2052 | MUST be moved. */ | |
2053 | already_moved[regno] = 1; | |
2054 | ||
2055 | /* This reg has been moved out of one loop. */ | |
f1d4ac80 | 2056 | regs->array[regno].moved_once = 1; |
b4ad7b23 RS |
2057 | |
2058 | /* The reg set here is now invariant. */ | |
2059 | if (! m->partial) | |
f1d4ac80 | 2060 | regs->array[regno].set_in_loop = 0; |
b4ad7b23 RS |
2061 | |
2062 | m->done = 1; | |
2063 | ||
2064 | /* Change the length-of-life info for the register | |
2065 | to say it lives at least the full length of this loop. | |
2066 | This will help guide optimizations in outer loops. */ | |
2067 | ||
8529a489 | 2068 | if (REGNO_FIRST_LUID (regno) > INSN_LUID (loop_start)) |
b4ad7b23 RS |
2069 | /* This is the old insn before all the moved insns. |
2070 | We can't use the moved insn because it is out of range | |
2071 | in uid_luid. Only the old insns have luids. */ | |
b1f21e0a | 2072 | REGNO_FIRST_UID (regno) = INSN_UID (loop_start); |
8529a489 | 2073 | if (REGNO_LAST_LUID (regno) < INSN_LUID (loop_end)) |
0534b804 | 2074 | REGNO_LAST_UID (regno) = INSN_UID (loop_end); |
b4ad7b23 RS |
2075 | |
2076 | /* Combine with this moved insn any other matching movables. */ | |
2077 | ||
2078 | if (! m->partial) | |
02055ad6 | 2079 | for (m1 = movables->head; m1; m1 = m1->next) |
b4ad7b23 RS |
2080 | if (m1->match == m) |
2081 | { | |
2082 | rtx temp; | |
2083 | ||
2084 | /* Schedule the reg loaded by M1 | |
2085 | for replacement so that shares the reg of M. | |
2086 | If the modes differ (only possible in restricted | |
51f0646f JL |
2087 | circumstances, make a SUBREG. |
2088 | ||
2089 | Note this assumes that the target dependent files | |
2090 | treat REG and SUBREG equally, including within | |
2091 | GO_IF_LEGITIMATE_ADDRESS and in all the | |
2092 | predicates since we never verify that replacing the | |
2093 | original register with a SUBREG results in a | |
2094 | recognizable insn. */ | |
b4ad7b23 RS |
2095 | if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)) |
2096 | reg_map[m1->regno] = m->set_dest; | |
2097 | else | |
2098 | reg_map[m1->regno] | |
2099 | = gen_lowpart_common (GET_MODE (m1->set_dest), | |
2100 | m->set_dest); | |
e6fcb60d | 2101 | |
b4ad7b23 RS |
2102 | /* Get rid of the matching insn |
2103 | and prevent further processing of it. */ | |
2104 | m1->done = 1; | |
2105 | ||
2106 | /* if library call, delete all insn except last, which | |
2107 | is deleted below */ | |
51723711 KG |
2108 | if ((temp = find_reg_note (m1->insn, REG_RETVAL, |
2109 | NULL_RTX))) | |
b4ad7b23 RS |
2110 | { |
2111 | for (temp = XEXP (temp, 0); temp != m1->insn; | |
2112 | temp = NEXT_INSN (temp)) | |
2113 | delete_insn (temp); | |
2114 | } | |
2115 | delete_insn (m1->insn); | |
2116 | ||
2117 | /* Any other movable that loads the same register | |
2118 | MUST be moved. */ | |
2119 | already_moved[m1->regno] = 1; | |
2120 | ||
2121 | /* The reg merged here is now invariant, | |
2122 | if the reg it matches is invariant. */ | |
2123 | if (! m->partial) | |
f1d4ac80 | 2124 | regs->array[m1->regno].set_in_loop = 0; |
b4ad7b23 RS |
2125 | } |
2126 | } | |
2127 | else if (loop_dump_stream) | |
2128 | fprintf (loop_dump_stream, "not desirable"); | |
2129 | } | |
2130 | else if (loop_dump_stream && !m->match) | |
2131 | fprintf (loop_dump_stream, "not safe"); | |
2132 | ||
2133 | if (loop_dump_stream) | |
2134 | fprintf (loop_dump_stream, "\n"); | |
2135 | } | |
2136 | ||
2137 | if (new_start == 0) | |
2138 | new_start = loop_start; | |
2139 | ||
2140 | /* Go through all the instructions in the loop, making | |
2141 | all the register substitutions scheduled in REG_MAP. */ | |
0534b804 | 2142 | for (p = new_start; p != loop_end; p = NEXT_INSN (p)) |
b4ad7b23 RS |
2143 | if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN |
2144 | || GET_CODE (p) == CALL_INSN) | |
2145 | { | |
2146 | replace_regs (PATTERN (p), reg_map, nregs, 0); | |
2147 | replace_regs (REG_NOTES (p), reg_map, nregs, 0); | |
da0c128e | 2148 | INSN_CODE (p) = -1; |
b4ad7b23 | 2149 | } |
4da896b2 MM |
2150 | |
2151 | /* Clean up. */ | |
2152 | free (reg_map); | |
2153 | free (already_moved); | |
b4ad7b23 | 2154 | } |
6ec92010 MH |
2155 | |
2156 | ||
2157 | static void | |
2158 | loop_movables_add (movables, m) | |
2159 | struct loop_movables *movables; | |
2160 | struct movable *m; | |
2161 | { | |
2162 | if (movables->head == 0) | |
2163 | movables->head = m; | |
2164 | else | |
2165 | movables->last->next = m; | |
2166 | movables->last = m; | |
2167 | } | |
2168 | ||
2169 | ||
2170 | static void | |
2171 | loop_movables_free (movables) | |
2172 | struct loop_movables *movables; | |
2173 | { | |
2174 | struct movable *m; | |
2175 | struct movable *m_next; | |
2176 | ||
2177 | for (m = movables->head; m; m = m_next) | |
2178 | { | |
2179 | m_next = m->next; | |
2180 | free (m); | |
2181 | } | |
2182 | } | |
b4ad7b23 RS |
2183 | \f |
2184 | #if 0 | |
2185 | /* Scan X and replace the address of any MEM in it with ADDR. | |
2186 | REG is the address that MEM should have before the replacement. */ | |
2187 | ||
2188 | static void | |
2189 | replace_call_address (x, reg, addr) | |
2190 | rtx x, reg, addr; | |
2191 | { | |
2192 | register enum rtx_code code; | |
2193 | register int i; | |
6f7d635c | 2194 | register const char *fmt; |
b4ad7b23 RS |
2195 | |
2196 | if (x == 0) | |
2197 | return; | |
2198 | code = GET_CODE (x); | |
2199 | switch (code) | |
2200 | { | |
2201 | case PC: | |
2202 | case CC0: | |
2203 | case CONST_INT: | |
2204 | case CONST_DOUBLE: | |
2205 | case CONST: | |
2206 | case SYMBOL_REF: | |
2207 | case LABEL_REF: | |
2208 | case REG: | |
2209 | return; | |
2210 | ||
2211 | case SET: | |
2212 | /* Short cut for very common case. */ | |
2213 | replace_call_address (XEXP (x, 1), reg, addr); | |
2214 | return; | |
2215 | ||
2216 | case CALL: | |
2217 | /* Short cut for very common case. */ | |
2218 | replace_call_address (XEXP (x, 0), reg, addr); | |
2219 | return; | |
2220 | ||
2221 | case MEM: | |
2222 | /* If this MEM uses a reg other than the one we expected, | |
2223 | something is wrong. */ | |
2224 | if (XEXP (x, 0) != reg) | |
2225 | abort (); | |
2226 | XEXP (x, 0) = addr; | |
2227 | return; | |
e6fcb60d | 2228 | |
e9a25f70 JL |
2229 | default: |
2230 | break; | |
b4ad7b23 RS |
2231 | } |
2232 | ||
2233 | fmt = GET_RTX_FORMAT (code); | |
2234 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) | |
2235 | { | |
2236 | if (fmt[i] == 'e') | |
2237 | replace_call_address (XEXP (x, i), reg, addr); | |
d4757e6a | 2238 | else if (fmt[i] == 'E') |
b4ad7b23 RS |
2239 | { |
2240 | register int j; | |
2241 | for (j = 0; j < XVECLEN (x, i); j++) | |
2242 | replace_call_address (XVECEXP (x, i, j), reg, addr); | |
2243 | } | |
2244 | } | |
2245 | } | |
2246 | #endif | |
2247 | \f | |
2248 | /* Return the number of memory refs to addresses that vary | |
2249 | in the rtx X. */ | |
2250 | ||
2251 | static int | |
0534b804 MH |
2252 | count_nonfixed_reads (loop, x) |
2253 | const struct loop *loop; | |
b4ad7b23 RS |
2254 | rtx x; |
2255 | { | |
2256 | register enum rtx_code code; | |
2257 | register int i; | |
6f7d635c | 2258 | register const char *fmt; |
b4ad7b23 RS |
2259 | int value; |
2260 | ||
2261 | if (x == 0) | |
2262 | return 0; | |
2263 | ||
2264 | code = GET_CODE (x); | |
2265 | switch (code) | |
2266 | { | |
2267 | case PC: | |
2268 | case CC0: | |
2269 | case CONST_INT: | |
2270 | case CONST_DOUBLE: | |
2271 | case CONST: | |
2272 | case SYMBOL_REF: | |
2273 | case LABEL_REF: | |
2274 | case REG: | |
2275 | return 0; | |
2276 | ||
2277 | case MEM: | |
0534b804 MH |
2278 | return ((loop_invariant_p (loop, XEXP (x, 0)) != 1) |
2279 | + count_nonfixed_reads (loop, XEXP (x, 0))); | |
e6fcb60d | 2280 | |
e9a25f70 JL |
2281 | default: |
2282 | break; | |
b4ad7b23 RS |
2283 | } |
2284 | ||
2285 | value = 0; | |
2286 | fmt = GET_RTX_FORMAT (code); | |
2287 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) | |
2288 | { | |
2289 | if (fmt[i] == 'e') | |
0534b804 MH |
2290 | value += count_nonfixed_reads (loop, XEXP (x, i)); |
2291 | if (fmt[i] == 'E') | |
b4ad7b23 RS |
2292 | { |
2293 | register int j; | |
2294 | for (j = 0; j < XVECLEN (x, i); j++) | |
0534b804 | 2295 | value += count_nonfixed_reads (loop, XVECEXP (x, i, j)); |
b4ad7b23 RS |
2296 | } |
2297 | } | |
2298 | return value; | |
2299 | } | |
b4ad7b23 | 2300 | \f |
3c748bb6 | 2301 | /* Scan a loop setting the elements `cont', `vtop', `loops_enclosed', |
576d0b54 | 2302 | `has_call', `has_nonconst_call', `has_volatile', `has_tablejump', |
afa1738b MH |
2303 | `unknown_address_altered', `unknown_constant_address_altered', and |
2304 | `num_mem_sets' in LOOP. Also, fill in the array `mems' and the | |
2305 | list `store_mems' in LOOP. */ | |
b4ad7b23 RS |
2306 | |
2307 | static void | |
a2be868f MH |
2308 | prescan_loop (loop) |
2309 | struct loop *loop; | |
b4ad7b23 RS |
2310 | { |
2311 | register int level = 1; | |
41a972a9 | 2312 | rtx insn; |
52b38064 | 2313 | struct loop_info *loop_info = LOOP_INFO (loop); |
a2be868f MH |
2314 | rtx start = loop->start; |
2315 | rtx end = loop->end; | |
41a972a9 MM |
2316 | /* The label after END. Jumping here is just like falling off the |
2317 | end of the loop. We use next_nonnote_insn instead of next_label | |
2318 | as a hedge against the (pathological) case where some actual insn | |
2319 | might end up between the two. */ | |
2320 | rtx exit_target = next_nonnote_insn (end); | |
3c748bb6 | 2321 | |
3c748bb6 | 2322 | loop_info->has_indirect_jump = indirect_jump_in_function; |
e304a8e6 | 2323 | loop_info->pre_header_has_call = 0; |
3c748bb6 | 2324 | loop_info->has_call = 0; |
576d0b54 | 2325 | loop_info->has_nonconst_call = 0; |
3c748bb6 MH |
2326 | loop_info->has_volatile = 0; |
2327 | loop_info->has_tablejump = 0; | |
3c748bb6 | 2328 | loop_info->has_multiple_exit_targets = 0; |
a2be868f | 2329 | loop->level = 1; |
b4ad7b23 | 2330 | |
afa1738b MH |
2331 | loop_info->unknown_address_altered = 0; |
2332 | loop_info->unknown_constant_address_altered = 0; | |
2333 | loop_info->store_mems = NULL_RTX; | |
2334 | loop_info->first_loop_store_insn = NULL_RTX; | |
2335 | loop_info->mems_idx = 0; | |
2336 | loop_info->num_mem_sets = 0; | |
b4ad7b23 | 2337 | |
e304a8e6 MH |
2338 | |
2339 | for (insn = start; insn && GET_CODE (insn) != CODE_LABEL; | |
2340 | insn = PREV_INSN (insn)) | |
2341 | { | |
2342 | if (GET_CODE (insn) == CALL_INSN) | |
2343 | { | |
2344 | loop_info->pre_header_has_call = 1; | |
2345 | break; | |
2346 | } | |
2347 | } | |
2348 | ||
b4ad7b23 RS |
2349 | for (insn = NEXT_INSN (start); insn != NEXT_INSN (end); |
2350 | insn = NEXT_INSN (insn)) | |
2351 | { | |
2352 | if (GET_CODE (insn) == NOTE) | |
2353 | { | |
2354 | if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG) | |
2355 | { | |
2356 | ++level; | |
2357 | /* Count number of loops contained in this one. */ | |
a2be868f | 2358 | loop->level++; |
b4ad7b23 RS |
2359 | } |
2360 | else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END) | |
2361 | { | |
2362 | --level; | |
b4ad7b23 RS |
2363 | } |
2364 | } | |
2365 | else if (GET_CODE (insn) == CALL_INSN) | |
2366 | { | |
24a28584 | 2367 | if (! CONST_OR_PURE_CALL_P (insn)) |
576d0b54 MH |
2368 | { |
2369 | loop_info->unknown_address_altered = 1; | |
2370 | loop_info->has_nonconst_call = 1; | |
2371 | } | |
3c748bb6 | 2372 | loop_info->has_call = 1; |
b4ad7b23 | 2373 | } |
41a972a9 | 2374 | else if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN) |
b4ad7b23 | 2375 | { |
41a972a9 MM |
2376 | rtx label1 = NULL_RTX; |
2377 | rtx label2 = NULL_RTX; | |
2378 | ||
2379 | if (volatile_refs_p (PATTERN (insn))) | |
3c748bb6 | 2380 | loop_info->has_volatile = 1; |
8c368ee2 DE |
2381 | |
2382 | if (GET_CODE (insn) == JUMP_INSN | |
2383 | && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC | |
2384 | || GET_CODE (PATTERN (insn)) == ADDR_VEC)) | |
3c748bb6 | 2385 | loop_info->has_tablejump = 1; |
e6fcb60d | 2386 | |
afa1738b MH |
2387 | note_stores (PATTERN (insn), note_addr_stored, loop_info); |
2388 | if (! loop_info->first_loop_store_insn && loop_info->store_mems) | |
2389 | loop_info->first_loop_store_insn = insn; | |
41a972a9 | 2390 | |
3c748bb6 | 2391 | if (! loop_info->has_multiple_exit_targets |
41a972a9 MM |
2392 | && GET_CODE (insn) == JUMP_INSN |
2393 | && GET_CODE (PATTERN (insn)) == SET | |
2394 | && SET_DEST (PATTERN (insn)) == pc_rtx) | |
552bc76f | 2395 | { |
41a972a9 MM |
2396 | if (GET_CODE (SET_SRC (PATTERN (insn))) == IF_THEN_ELSE) |
2397 | { | |
2398 | label1 = XEXP (SET_SRC (PATTERN (insn)), 1); | |
2399 | label2 = XEXP (SET_SRC (PATTERN (insn)), 2); | |
2400 | } | |
2401 | else | |
2402 | { | |
2403 | label1 = SET_SRC (PATTERN (insn)); | |
2404 | } | |
2405 | ||
e6fcb60d KH |
2406 | do |
2407 | { | |
2408 | if (label1 && label1 != pc_rtx) | |
2409 | { | |
2410 | if (GET_CODE (label1) != LABEL_REF) | |
2411 | { | |
2412 | /* Something tricky. */ | |
2413 | loop_info->has_multiple_exit_targets = 1; | |
2414 | break; | |
2415 | } | |
2416 | else if (XEXP (label1, 0) != exit_target | |
2417 | && LABEL_OUTSIDE_LOOP_P (label1)) | |
2418 | { | |
2419 | /* A jump outside the current loop. */ | |
2420 | loop_info->has_multiple_exit_targets = 1; | |
2421 | break; | |
2422 | } | |
2423 | } | |
552bc76f | 2424 | |
e6fcb60d KH |
2425 | label1 = label2; |
2426 | label2 = NULL_RTX; | |
2427 | } | |
2428 | while (label1); | |
552bc76f | 2429 | } |
b4ad7b23 | 2430 | } |
41a972a9 | 2431 | else if (GET_CODE (insn) == RETURN) |
3c748bb6 | 2432 | loop_info->has_multiple_exit_targets = 1; |
b4ad7b23 | 2433 | } |
41a972a9 MM |
2434 | |
2435 | /* Now, rescan the loop, setting up the LOOP_MEMS array. */ | |
20bd7bfa | 2436 | if (/* An exception thrown by a called function might land us |
41a972a9 | 2437 | anywhere. */ |
576d0b54 | 2438 | ! loop_info->has_nonconst_call |
41a972a9 MM |
2439 | /* We don't want loads for MEMs moved to a location before the |
2440 | one at which their stack memory becomes allocated. (Note | |
2441 | that this is not a problem for malloc, etc., since those | |
2442 | require actual function calls. */ | |
a2be868f | 2443 | && ! current_function_calls_alloca |
41a972a9 MM |
2444 | /* There are ways to leave the loop other than falling off the |
2445 | end. */ | |
a2be868f | 2446 | && ! loop_info->has_multiple_exit_targets) |
41a972a9 MM |
2447 | for (insn = NEXT_INSN (start); insn != NEXT_INSN (end); |
2448 | insn = NEXT_INSN (insn)) | |
afa1738b | 2449 | for_each_rtx (&insn, insert_loop_mem, loop_info); |
20bd7bfa JW |
2450 | |
2451 | /* BLKmode MEMs are added to LOOP_STORE_MEM as necessary so | |
2452 | that loop_invariant_p and load_mems can use true_dependence | |
2453 | to determine what is really clobbered. */ | |
afa1738b | 2454 | if (loop_info->unknown_address_altered) |
20bd7bfa JW |
2455 | { |
2456 | rtx mem = gen_rtx_MEM (BLKmode, const0_rtx); | |
2457 | ||
fd5d5b07 | 2458 | loop_info->store_mems |
afa1738b | 2459 | = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems); |
20bd7bfa | 2460 | } |
afa1738b | 2461 | if (loop_info->unknown_constant_address_altered) |
20bd7bfa JW |
2462 | { |
2463 | rtx mem = gen_rtx_MEM (BLKmode, const0_rtx); | |
2464 | ||
2465 | RTX_UNCHANGING_P (mem) = 1; | |
fd5d5b07 | 2466 | loop_info->store_mems |
afa1738b | 2467 | = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems); |
20bd7bfa | 2468 | } |
b4ad7b23 RS |
2469 | } |
2470 | \f | |
2471 | /* Scan the function looking for loops. Record the start and end of each loop. | |
2472 | Also mark as invalid loops any loops that contain a setjmp or are branched | |
2473 | to from outside the loop. */ | |
2474 | ||
2475 | static void | |
a2be868f | 2476 | find_and_verify_loops (f, loops) |
b4ad7b23 | 2477 | rtx f; |
a2be868f | 2478 | struct loops *loops; |
b4ad7b23 | 2479 | { |
a2be868f MH |
2480 | rtx insn; |
2481 | rtx label; | |
2482 | int num_loops; | |
2483 | struct loop *current_loop; | |
2484 | struct loop *next_loop; | |
2485 | struct loop *loop; | |
2486 | ||
2487 | num_loops = loops->num; | |
b4ad7b23 | 2488 | |
3ec2b590 R |
2489 | compute_luids (f, NULL_RTX, 0); |
2490 | ||
b4ad7b23 RS |
2491 | /* If there are jumps to undefined labels, |
2492 | treat them as jumps out of any/all loops. | |
2493 | This also avoids writing past end of tables when there are no loops. */ | |
a2be868f | 2494 | uid_loop[0] = NULL; |
b4ad7b23 RS |
2495 | |
2496 | /* Find boundaries of loops, mark which loops are contained within | |
2497 | loops, and invalidate loops that have setjmp. */ | |
2498 | ||
a2be868f MH |
2499 | num_loops = 0; |
2500 | current_loop = NULL; | |
b4ad7b23 RS |
2501 | for (insn = f; insn; insn = NEXT_INSN (insn)) |
2502 | { | |
2503 | if (GET_CODE (insn) == NOTE) | |
2504 | switch (NOTE_LINE_NUMBER (insn)) | |
2505 | { | |
2506 | case NOTE_INSN_LOOP_BEG: | |
a2be868f MH |
2507 | next_loop = loops->array + num_loops; |
2508 | next_loop->num = num_loops; | |
2509 | num_loops++; | |
2510 | next_loop->start = insn; | |
2511 | next_loop->outer = current_loop; | |
b4ad7b23 RS |
2512 | current_loop = next_loop; |
2513 | break; | |
2514 | ||
3ec2b590 | 2515 | case NOTE_INSN_LOOP_CONT: |
a2be868f | 2516 | current_loop->cont = insn; |
3ec2b590 | 2517 | break; |
e375c819 MH |
2518 | |
2519 | case NOTE_INSN_LOOP_VTOP: | |
2520 | current_loop->vtop = insn; | |
2521 | break; | |
2522 | ||
b4ad7b23 | 2523 | case NOTE_INSN_LOOP_END: |
a2be868f | 2524 | if (! current_loop) |
b4ad7b23 RS |
2525 | abort (); |
2526 | ||
a2be868f | 2527 | current_loop->end = insn; |
a2be868f | 2528 | current_loop = current_loop->outer; |
b4ad7b23 RS |
2529 | break; |
2530 | ||
e9a25f70 JL |
2531 | default: |
2532 | break; | |
b4ad7b23 RS |
2533 | } |
2534 | ||
19652adf | 2535 | if (GET_CODE (insn) == CALL_INSN |
570a98eb JH |
2536 | && find_reg_note (insn, REG_SETJMP, NULL)) |
2537 | { | |
2538 | /* In this case, we must invalidate our current loop and any | |
2539 | enclosing loop. */ | |
2540 | for (loop = current_loop; loop; loop = loop->outer) | |
2541 | { | |
2542 | loop->invalid = 1; | |
2543 | if (loop_dump_stream) | |
2544 | fprintf (loop_dump_stream, | |
2545 | "\nLoop at %d ignored due to setjmp.\n", | |
2546 | INSN_UID (loop->start)); | |
2547 | } | |
2548 | } | |
2549 | ||
b4ad7b23 RS |
2550 | /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the |
2551 | enclosing loop, but this doesn't matter. */ | |
a2be868f | 2552 | uid_loop[INSN_UID (insn)] = current_loop; |
b4ad7b23 RS |
2553 | } |
2554 | ||
034dabc9 JW |
2555 | /* Any loop containing a label used in an initializer must be invalidated, |
2556 | because it can be jumped into from anywhere. */ | |
2557 | ||
2558 | for (label = forced_labels; label; label = XEXP (label, 1)) | |
2559 | { | |
a2be868f MH |
2560 | for (loop = uid_loop[INSN_UID (XEXP (label, 0))]; |
2561 | loop; loop = loop->outer) | |
2562 | loop->invalid = 1; | |
034dabc9 JW |
2563 | } |
2564 | ||
6adb4e3a MS |
2565 | /* Any loop containing a label used for an exception handler must be |
2566 | invalidated, because it can be jumped into from anywhere. */ | |
2567 | ||
2568 | for (label = exception_handler_labels; label; label = XEXP (label, 1)) | |
2569 | { | |
a2be868f MH |
2570 | for (loop = uid_loop[INSN_UID (XEXP (label, 0))]; |
2571 | loop; loop = loop->outer) | |
2572 | loop->invalid = 1; | |
6adb4e3a MS |
2573 | } |
2574 | ||
034dabc9 JW |
2575 | /* Now scan all insn's in the function. If any JUMP_INSN branches into a |
2576 | loop that it is not contained within, that loop is marked invalid. | |
2577 | If any INSN or CALL_INSN uses a label's address, then the loop containing | |
2578 | that label is marked invalid, because it could be jumped into from | |
2579 | anywhere. | |
b4ad7b23 RS |
2580 | |
2581 | Also look for blocks of code ending in an unconditional branch that | |
e6fcb60d | 2582 | exits the loop. If such a block is surrounded by a conditional |
b4ad7b23 RS |
2583 | branch around the block, move the block elsewhere (see below) and |
2584 | invert the jump to point to the code block. This may eliminate a | |
2585 | label in our loop and will simplify processing by both us and a | |
2586 | possible second cse pass. */ | |
2587 | ||
2588 | for (insn = f; insn; insn = NEXT_INSN (insn)) | |
2c3c49de | 2589 | if (INSN_P (insn)) |
b4ad7b23 | 2590 | { |
a2be868f | 2591 | struct loop *this_loop = uid_loop[INSN_UID (insn)]; |
b4ad7b23 | 2592 | |
034dabc9 JW |
2593 | if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN) |
2594 | { | |
2595 | rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX); | |
2596 | if (note) | |
2597 | { | |
a2be868f MH |
2598 | for (loop = uid_loop[INSN_UID (XEXP (note, 0))]; |
2599 | loop; loop = loop->outer) | |
2600 | loop->invalid = 1; | |
034dabc9 JW |
2601 | } |
2602 | } | |
2603 | ||
2604 | if (GET_CODE (insn) != JUMP_INSN) | |
2605 | continue; | |
2606 | ||
a2be868f | 2607 | mark_loop_jump (PATTERN (insn), this_loop); |
b4ad7b23 RS |
2608 | |
2609 | /* See if this is an unconditional branch outside the loop. */ | |
a2be868f | 2610 | if (this_loop |
b4ad7b23 | 2611 | && (GET_CODE (PATTERN (insn)) == RETURN |
7f1c097d JH |
2612 | || (any_uncondjump_p (insn) |
2613 | && onlyjump_p (insn) | |
a2be868f MH |
2614 | && (uid_loop[INSN_UID (JUMP_LABEL (insn))] |
2615 | != this_loop))) | |
1c01e9df | 2616 | && get_max_uid () < max_uid_for_loop) |
b4ad7b23 RS |
2617 | { |
2618 | rtx p; | |
2619 | rtx our_next = next_real_insn (insn); | |
3b10cf4b | 2620 | rtx last_insn_to_move = NEXT_INSN (insn); |
a2be868f MH |
2621 | struct loop *dest_loop; |
2622 | struct loop *outer_loop = NULL; | |
b4ad7b23 RS |
2623 | |
2624 | /* Go backwards until we reach the start of the loop, a label, | |
2625 | or a JUMP_INSN. */ | |
2626 | for (p = PREV_INSN (insn); | |
2627 | GET_CODE (p) != CODE_LABEL | |
2628 | && ! (GET_CODE (p) == NOTE | |
2629 | && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG) | |
2630 | && GET_CODE (p) != JUMP_INSN; | |
2631 | p = PREV_INSN (p)) | |
2632 | ; | |
2633 | ||
edf711a4 RK |
2634 | /* Check for the case where we have a jump to an inner nested |
2635 | loop, and do not perform the optimization in that case. */ | |
2636 | ||
fdccb6df | 2637 | if (JUMP_LABEL (insn)) |
edf711a4 | 2638 | { |
a2be868f MH |
2639 | dest_loop = uid_loop[INSN_UID (JUMP_LABEL (insn))]; |
2640 | if (dest_loop) | |
fdccb6df | 2641 | { |
a2be868f MH |
2642 | for (outer_loop = dest_loop; outer_loop; |
2643 | outer_loop = outer_loop->outer) | |
2644 | if (outer_loop == this_loop) | |
fdccb6df RK |
2645 | break; |
2646 | } | |
edf711a4 | 2647 | } |
edf711a4 | 2648 | |
89724a5a RK |
2649 | /* Make sure that the target of P is within the current loop. */ |
2650 | ||
9a8e74f0 | 2651 | if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) |
a2be868f MH |
2652 | && uid_loop[INSN_UID (JUMP_LABEL (p))] != this_loop) |
2653 | outer_loop = this_loop; | |
89724a5a | 2654 | |
b4ad7b23 RS |
2655 | /* If we stopped on a JUMP_INSN to the next insn after INSN, |
2656 | we have a block of code to try to move. | |
2657 | ||
2658 | We look backward and then forward from the target of INSN | |
2659 | to find a BARRIER at the same loop depth as the target. | |
2660 | If we find such a BARRIER, we make a new label for the start | |
2661 | of the block, invert the jump in P and point it to that label, | |
2662 | and move the block of code to the spot we found. */ | |
2663 | ||
a2be868f | 2664 | if (! outer_loop |
edf711a4 | 2665 | && GET_CODE (p) == JUMP_INSN |
c6096c5e RS |
2666 | && JUMP_LABEL (p) != 0 |
2667 | /* Just ignore jumps to labels that were never emitted. | |
2668 | These always indicate compilation errors. */ | |
2669 | && INSN_UID (JUMP_LABEL (p)) != 0 | |
7f1c097d | 2670 | && any_condjump_p (p) && onlyjump_p (p) |
3b10cf4b MM |
2671 | && next_real_insn (JUMP_LABEL (p)) == our_next |
2672 | /* If it's not safe to move the sequence, then we | |
2673 | mustn't try. */ | |
e6fcb60d | 2674 | && insns_safe_to_move_p (p, NEXT_INSN (insn), |
3b10cf4b | 2675 | &last_insn_to_move)) |
b4ad7b23 RS |
2676 | { |
2677 | rtx target | |
2678 | = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn (); | |
a2be868f | 2679 | struct loop *target_loop = uid_loop[INSN_UID (target)]; |
17bec8ee | 2680 | rtx loc, loc2; |
c4f81e4a JH |
2681 | rtx tmp; |
2682 | ||
2683 | /* Search for possible garbage past the conditional jumps | |
b0fd92a3 | 2684 | and look for the last barrier. */ |
c4f81e4a JH |
2685 | for (tmp = last_insn_to_move; |
2686 | tmp && GET_CODE (tmp) != CODE_LABEL; tmp = NEXT_INSN (tmp)) | |
2687 | if (GET_CODE (tmp) == BARRIER) | |
2688 | last_insn_to_move = tmp; | |
b4ad7b23 RS |
2689 | |
2690 | for (loc = target; loc; loc = PREV_INSN (loc)) | |
2691 | if (GET_CODE (loc) == BARRIER | |
17bec8ee BS |
2692 | /* Don't move things inside a tablejump. */ |
2693 | && ((loc2 = next_nonnote_insn (loc)) == 0 | |
2694 | || GET_CODE (loc2) != CODE_LABEL | |
2695 | || (loc2 = next_nonnote_insn (loc2)) == 0 | |
2696 | || GET_CODE (loc2) != JUMP_INSN | |
2697 | || (GET_CODE (PATTERN (loc2)) != ADDR_VEC | |
2698 | && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC)) | |
a2be868f | 2699 | && uid_loop[INSN_UID (loc)] == target_loop) |
b4ad7b23 RS |
2700 | break; |
2701 | ||
2702 | if (loc == 0) | |
2703 | for (loc = target; loc; loc = NEXT_INSN (loc)) | |
2704 | if (GET_CODE (loc) == BARRIER | |
17bec8ee BS |
2705 | /* Don't move things inside a tablejump. */ |
2706 | && ((loc2 = next_nonnote_insn (loc)) == 0 | |
2707 | || GET_CODE (loc2) != CODE_LABEL | |
2708 | || (loc2 = next_nonnote_insn (loc2)) == 0 | |
2709 | || GET_CODE (loc2) != JUMP_INSN | |
2710 | || (GET_CODE (PATTERN (loc2)) != ADDR_VEC | |
2711 | && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC)) | |
a2be868f | 2712 | && uid_loop[INSN_UID (loc)] == target_loop) |
b4ad7b23 RS |
2713 | break; |
2714 | ||
2715 | if (loc) | |
2716 | { | |
2717 | rtx cond_label = JUMP_LABEL (p); | |
2718 | rtx new_label = get_label_after (p); | |
2719 | ||
2720 | /* Ensure our label doesn't go away. */ | |
2721 | LABEL_NUSES (cond_label)++; | |
2722 | ||
a2be868f | 2723 | /* Verify that uid_loop is large enough and that |
0f41302f | 2724 | we can invert P. */ |
9ba11d5a RH |
2725 | if (invert_jump (p, new_label, 1)) |
2726 | { | |
2727 | rtx q, r; | |
2728 | ||
2729 | /* If no suitable BARRIER was found, create a suitable | |
2730 | one before TARGET. Since TARGET is a fall through | |
2731 | path, we'll need to insert an jump around our block | |
2732 | and a add a BARRIER before TARGET. | |
2733 | ||
2734 | This creates an extra unconditional jump outside | |
2735 | the loop. However, the benefits of removing rarely | |
2736 | executed instructions from inside the loop usually | |
2737 | outweighs the cost of the extra unconditional jump | |
2738 | outside the loop. */ | |
2739 | if (loc == 0) | |
2740 | { | |
2741 | rtx temp; | |
2742 | ||
2743 | temp = gen_jump (JUMP_LABEL (insn)); | |
2744 | temp = emit_jump_insn_before (temp, target); | |
2745 | JUMP_LABEL (temp) = JUMP_LABEL (insn); | |
2746 | LABEL_NUSES (JUMP_LABEL (insn))++; | |
2747 | loc = emit_barrier_before (target); | |
2748 | } | |
2749 | ||
2750 | /* Include the BARRIER after INSN and copy the | |
2751 | block after LOC. */ | |
e6fcb60d | 2752 | new_label = squeeze_notes (new_label, |
9ba11d5a RH |
2753 | last_insn_to_move); |
2754 | reorder_insns (new_label, last_insn_to_move, loc); | |
2755 | ||
2756 | /* All those insns are now in TARGET_LOOP. */ | |
e6fcb60d | 2757 | for (q = new_label; |
9ba11d5a RH |
2758 | q != NEXT_INSN (last_insn_to_move); |
2759 | q = NEXT_INSN (q)) | |
2760 | uid_loop[INSN_UID (q)] = target_loop; | |
2761 | ||
2762 | /* The label jumped to by INSN is no longer a loop | |
2763 | exit. Unless INSN does not have a label (e.g., | |
2764 | it is a RETURN insn), search loop->exit_labels | |
2765 | to find its label_ref, and remove it. Also turn | |
2766 | off LABEL_OUTSIDE_LOOP_P bit. */ | |
2767 | if (JUMP_LABEL (insn)) | |
2768 | { | |
fd5d5b07 KH |
2769 | for (q = 0, r = this_loop->exit_labels; |
2770 | r; | |
2771 | q = r, r = LABEL_NEXTREF (r)) | |
9ba11d5a RH |
2772 | if (XEXP (r, 0) == JUMP_LABEL (insn)) |
2773 | { | |
2774 | LABEL_OUTSIDE_LOOP_P (r) = 0; | |
2775 | if (q) | |
2776 | LABEL_NEXTREF (q) = LABEL_NEXTREF (r); | |
2777 | else | |
2778 | this_loop->exit_labels = LABEL_NEXTREF (r); | |
2779 | break; | |
2780 | } | |
2781 | ||
2782 | for (loop = this_loop; loop && loop != target_loop; | |
2783 | loop = loop->outer) | |
2784 | loop->exit_count--; | |
2785 | ||
2786 | /* If we didn't find it, then something is | |
2787 | wrong. */ | |
2788 | if (! r) | |
2789 | abort (); | |
2790 | } | |
2791 | ||
2792 | /* P is now a jump outside the loop, so it must be put | |
2793 | in loop->exit_labels, and marked as such. | |
2794 | The easiest way to do this is to just call | |
2795 | mark_loop_jump again for P. */ | |
2796 | mark_loop_jump (PATTERN (p), this_loop); | |
2797 | ||
2798 | /* If INSN now jumps to the insn after it, | |
2799 | delete INSN. */ | |
2800 | if (JUMP_LABEL (insn) != 0 | |
2801 | && (next_real_insn (JUMP_LABEL (insn)) | |
2802 | == next_real_insn (insn))) | |
2803 | delete_insn (insn); | |
2804 | } | |
b4ad7b23 RS |
2805 | |
2806 | /* Continue the loop after where the conditional | |
2807 | branch used to jump, since the only branch insn | |
2808 | in the block (if it still remains) is an inter-loop | |
2809 | branch and hence needs no processing. */ | |
2810 | insn = NEXT_INSN (cond_label); | |
2811 | ||
2812 | if (--LABEL_NUSES (cond_label) == 0) | |
2813 | delete_insn (cond_label); | |
3ad0cfaf RK |
2814 | |
2815 | /* This loop will be continued with NEXT_INSN (insn). */ | |
2816 | insn = PREV_INSN (insn); | |
b4ad7b23 RS |
2817 | } |
2818 | } | |
2819 | } | |
2820 | } | |
2821 | } | |
2822 | ||
2823 | /* If any label in X jumps to a loop different from LOOP_NUM and any of the | |
2824 | loops it is contained in, mark the target loop invalid. | |
2825 | ||
2826 | For speed, we assume that X is part of a pattern of a JUMP_INSN. */ | |
2827 | ||
2828 | static void | |
a2be868f | 2829 | mark_loop_jump (x, loop) |
b4ad7b23 | 2830 | rtx x; |
a2be868f | 2831 | struct loop *loop; |
b4ad7b23 | 2832 | { |
a2be868f MH |
2833 | struct loop *dest_loop; |
2834 | struct loop *outer_loop; | |
b4ad7b23 RS |
2835 | int i; |
2836 | ||
2837 | switch (GET_CODE (x)) | |
2838 | { | |
2839 | case PC: | |
2840 | case USE: | |
2841 | case CLOBBER: | |
2842 | case REG: | |
2843 | case MEM: | |
2844 | case CONST_INT: | |
2845 | case CONST_DOUBLE: | |
2846 | case RETURN: | |
2847 | return; | |
2848 | ||
2849 | case CONST: | |
2850 | /* There could be a label reference in here. */ | |
a2be868f | 2851 | mark_loop_jump (XEXP (x, 0), loop); |
b4ad7b23 RS |
2852 | return; |
2853 | ||
2854 | case PLUS: | |
2855 | case MINUS: | |
2856 | case MULT: | |
a2be868f MH |
2857 | mark_loop_jump (XEXP (x, 0), loop); |
2858 | mark_loop_jump (XEXP (x, 1), loop); | |
b4ad7b23 RS |
2859 | return; |
2860 | ||
c4ae2725 JL |
2861 | case LO_SUM: |
2862 | /* This may refer to a LABEL_REF or SYMBOL_REF. */ | |
a2be868f | 2863 | mark_loop_jump (XEXP (x, 1), loop); |
c4ae2725 JL |
2864 | return; |
2865 | ||
b4ad7b23 RS |
2866 | case SIGN_EXTEND: |
2867 | case ZERO_EXTEND: | |
a2be868f | 2868 | mark_loop_jump (XEXP (x, 0), loop); |
b4ad7b23 RS |
2869 | return; |
2870 | ||
2871 | case LABEL_REF: | |
a2be868f | 2872 | dest_loop = uid_loop[INSN_UID (XEXP (x, 0))]; |
b4ad7b23 RS |
2873 | |
2874 | /* Link together all labels that branch outside the loop. This | |
2875 | is used by final_[bg]iv_value and the loop unrolling code. Also | |
2876 | mark this LABEL_REF so we know that this branch should predict | |
2877 | false. */ | |
2878 | ||
edf711a4 RK |
2879 | /* A check to make sure the label is not in an inner nested loop, |
2880 | since this does not count as a loop exit. */ | |
a2be868f | 2881 | if (dest_loop) |
edf711a4 | 2882 | { |
a2be868f MH |
2883 | for (outer_loop = dest_loop; outer_loop; |
2884 | outer_loop = outer_loop->outer) | |
2885 | if (outer_loop == loop) | |
edf711a4 RK |
2886 | break; |
2887 | } | |
2888 | else | |
a2be868f | 2889 | outer_loop = NULL; |
edf711a4 | 2890 | |
a2be868f | 2891 | if (loop && ! outer_loop) |
b4ad7b23 RS |
2892 | { |
2893 | LABEL_OUTSIDE_LOOP_P (x) = 1; | |
a2be868f MH |
2894 | LABEL_NEXTREF (x) = loop->exit_labels; |
2895 | loop->exit_labels = x; | |
353127c2 | 2896 | |
a2be868f MH |
2897 | for (outer_loop = loop; |
2898 | outer_loop && outer_loop != dest_loop; | |
2899 | outer_loop = outer_loop->outer) | |
2900 | outer_loop->exit_count++; | |
b4ad7b23 RS |
2901 | } |
2902 | ||
2903 | /* If this is inside a loop, but not in the current loop or one enclosed | |
2904 | by it, it invalidates at least one loop. */ | |
2905 | ||
a2be868f | 2906 | if (! dest_loop) |
b4ad7b23 RS |
2907 | return; |
2908 | ||
2909 | /* We must invalidate every nested loop containing the target of this | |
2910 | label, except those that also contain the jump insn. */ | |
2911 | ||
a2be868f | 2912 | for (; dest_loop; dest_loop = dest_loop->outer) |
b4ad7b23 RS |
2913 | { |
2914 | /* Stop when we reach a loop that also contains the jump insn. */ | |
a2be868f | 2915 | for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer) |
b4ad7b23 RS |
2916 | if (dest_loop == outer_loop) |
2917 | return; | |
2918 | ||
2919 | /* If we get here, we know we need to invalidate a loop. */ | |
a2be868f | 2920 | if (loop_dump_stream && ! dest_loop->invalid) |
b4ad7b23 RS |
2921 | fprintf (loop_dump_stream, |
2922 | "\nLoop at %d ignored due to multiple entry points.\n", | |
a2be868f | 2923 | INSN_UID (dest_loop->start)); |
e6fcb60d | 2924 | |
a2be868f | 2925 | dest_loop->invalid = 1; |
b4ad7b23 RS |
2926 | } |
2927 | return; | |
2928 | ||
2929 | case SET: | |
2930 | /* If this is not setting pc, ignore. */ | |
2931 | if (SET_DEST (x) == pc_rtx) | |
a2be868f | 2932 | mark_loop_jump (SET_SRC (x), loop); |
b4ad7b23 RS |
2933 | return; |
2934 | ||
2935 | case IF_THEN_ELSE: | |
a2be868f MH |
2936 | mark_loop_jump (XEXP (x, 1), loop); |
2937 | mark_loop_jump (XEXP (x, 2), loop); | |
b4ad7b23 RS |
2938 | return; |
2939 | ||
2940 | case PARALLEL: | |
2941 | case ADDR_VEC: | |
2942 | for (i = 0; i < XVECLEN (x, 0); i++) | |
a2be868f | 2943 | mark_loop_jump (XVECEXP (x, 0, i), loop); |
b4ad7b23 RS |
2944 | return; |
2945 | ||
2946 | case ADDR_DIFF_VEC: | |
2947 | for (i = 0; i < XVECLEN (x, 1); i++) | |
a2be868f | 2948 | mark_loop_jump (XVECEXP (x, 1, i), loop); |
b4ad7b23 RS |
2949 | return; |
2950 | ||
2951 | default: | |
c4ae2725 JL |
2952 | /* Strictly speaking this is not a jump into the loop, only a possible |
2953 | jump out of the loop. However, we have no way to link the destination | |
2954 | of this jump onto the list of exit labels. To be safe we mark this | |
2955 | loop and any containing loops as invalid. */ | |
a2be868f | 2956 | if (loop) |
353127c2 | 2957 | { |
a2be868f | 2958 | for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer) |
c4ae2725 | 2959 | { |
a2be868f | 2960 | if (loop_dump_stream && ! outer_loop->invalid) |
c4ae2725 JL |
2961 | fprintf (loop_dump_stream, |
2962 | "\nLoop at %d ignored due to unknown exit jump.\n", | |
a2be868f MH |
2963 | INSN_UID (outer_loop->start)); |
2964 | outer_loop->invalid = 1; | |
c4ae2725 | 2965 | } |
353127c2 | 2966 | } |
b6ccc3fb | 2967 | return; |
b4ad7b23 RS |
2968 | } |
2969 | } | |
2970 | \f | |
2971 | /* Return nonzero if there is a label in the range from | |
2972 | insn INSN to and including the insn whose luid is END | |
2973 | INSN must have an assigned luid (i.e., it must not have | |
2974 | been previously created by loop.c). */ | |
2975 | ||
2976 | static int | |
2977 | labels_in_range_p (insn, end) | |
2978 | rtx insn; | |
2979 | int end; | |
2980 | { | |
2981 | while (insn && INSN_LUID (insn) <= end) | |
2982 | { | |
2983 | if (GET_CODE (insn) == CODE_LABEL) | |
2984 | return 1; | |
2985 | insn = NEXT_INSN (insn); | |
2986 | } | |
2987 | ||
2988 | return 0; | |
2989 | } | |
2990 | ||
2991 | /* Record that a memory reference X is being set. */ | |
2992 | ||
2993 | static void | |
84832317 | 2994 | note_addr_stored (x, y, data) |
b4ad7b23 | 2995 | rtx x; |
693e265f | 2996 | rtx y ATTRIBUTE_UNUSED; |
84832317 | 2997 | void *data ATTRIBUTE_UNUSED; |
b4ad7b23 | 2998 | { |
afa1738b MH |
2999 | struct loop_info *loop_info = data; |
3000 | ||
b4ad7b23 RS |
3001 | if (x == 0 || GET_CODE (x) != MEM) |
3002 | return; | |
3003 | ||
3004 | /* Count number of memory writes. | |
3005 | This affects heuristics in strength_reduce. */ | |
afa1738b | 3006 | loop_info->num_mem_sets++; |
fd5d5b07 | 3007 | |
ca800983 | 3008 | /* BLKmode MEM means all memory is clobbered. */ |
afa1738b | 3009 | if (GET_MODE (x) == BLKmode) |
14a774a9 RK |
3010 | { |
3011 | if (RTX_UNCHANGING_P (x)) | |
afa1738b | 3012 | loop_info->unknown_constant_address_altered = 1; |
14a774a9 | 3013 | else |
afa1738b | 3014 | loop_info->unknown_address_altered = 1; |
fd5d5b07 | 3015 | |
14a774a9 RK |
3016 | return; |
3017 | } | |
fd5d5b07 KH |
3018 | |
3019 | loop_info->store_mems = gen_rtx_EXPR_LIST (VOIDmode, x, | |
afa1738b | 3020 | loop_info->store_mems); |
b4ad7b23 | 3021 | } |
59487769 JL |
3022 | |
3023 | /* X is a value modified by an INSN that references a biv inside a loop | |
3024 | exit test (ie, X is somehow related to the value of the biv). If X | |
3025 | is a pseudo that is used more than once, then the biv is (effectively) | |
635a2a90 | 3026 | used more than once. DATA is a pointer to a loop_regs structure. */ |
59487769 JL |
3027 | |
3028 | static void | |
84832317 | 3029 | note_set_pseudo_multiple_uses (x, y, data) |
59487769 JL |
3030 | rtx x; |
3031 | rtx y ATTRIBUTE_UNUSED; | |
84832317 | 3032 | void *data; |
59487769 | 3033 | { |
1ecd860b MH |
3034 | struct loop_regs *regs = (struct loop_regs *) data; |
3035 | ||
59487769 JL |
3036 | if (x == 0) |
3037 | return; | |
3038 | ||
3039 | while (GET_CODE (x) == STRICT_LOW_PART | |
3040 | || GET_CODE (x) == SIGN_EXTRACT | |
3041 | || GET_CODE (x) == ZERO_EXTRACT | |
3042 | || GET_CODE (x) == SUBREG) | |
3043 | x = XEXP (x, 0); | |
3044 | ||
3045 | if (GET_CODE (x) != REG || REGNO (x) < FIRST_PSEUDO_REGISTER) | |
3046 | return; | |
3047 | ||
3048 | /* If we do not have usage information, or if we know the register | |
3049 | is used more than once, note that fact for check_dbra_loop. */ | |
3050 | if (REGNO (x) >= max_reg_before_loop | |
f1d4ac80 MH |
3051 | || ! regs->array[REGNO (x)].single_usage |
3052 | || regs->array[REGNO (x)].single_usage == const0_rtx) | |
635a2a90 | 3053 | regs->multiple_uses = 1; |
59487769 | 3054 | } |
b4ad7b23 RS |
3055 | \f |
3056 | /* Return nonzero if the rtx X is invariant over the current loop. | |
3057 | ||
3058 | The value is 2 if we refer to something only conditionally invariant. | |
3059 | ||
20bd7bfa | 3060 | A memory ref is invariant if it is not volatile and does not conflict |
afa1738b | 3061 | with anything stored in `loop_info->store_mems'. */ |
b4ad7b23 RS |
3062 | |
3063 | int | |
0534b804 MH |
3064 | loop_invariant_p (loop, x) |
3065 | const struct loop *loop; | |
b4ad7b23 RS |
3066 | register rtx x; |
3067 | { | |
afa1738b | 3068 | struct loop_info *loop_info = LOOP_INFO (loop); |
1ecd860b | 3069 | struct loop_regs *regs = LOOP_REGS (loop); |
b4ad7b23 RS |
3070 | register int i; |
3071 | register enum rtx_code code; | |
6f7d635c | 3072 | register const char *fmt; |
b4ad7b23 | 3073 | int conditional = 0; |
5026a502 | 3074 | rtx mem_list_entry; |
b4ad7b23 RS |
3075 | |
3076 | if (x == 0) | |
3077 | return 1; | |
3078 | code = GET_CODE (x); | |
3079 | switch (code) | |
3080 | { | |
3081 | case CONST_INT: | |
3082 | case CONST_DOUBLE: | |
3083 | case SYMBOL_REF: | |
3084 | case CONST: | |
3085 | return 1; | |
3086 | ||
3087 | case LABEL_REF: | |
3088 | /* A LABEL_REF is normally invariant, however, if we are unrolling | |
3089 | loops, and this label is inside the loop, then it isn't invariant. | |
3090 | This is because each unrolled copy of the loop body will have | |
3091 | a copy of this label. If this was invariant, then an insn loading | |
3092 | the address of this label into a register might get moved outside | |
3093 | the loop, and then each loop body would end up using the same label. | |
3094 | ||
3095 | We don't know the loop bounds here though, so just fail for all | |
3096 | labels. */ | |
81797aba | 3097 | if (flag_unroll_loops) |
b4ad7b23 RS |
3098 | return 0; |
3099 | else | |
3100 | return 1; | |
3101 | ||
3102 | case PC: | |
3103 | case CC0: | |
3104 | case UNSPEC_VOLATILE: | |
3105 | return 0; | |
3106 | ||
3107 | case REG: | |
3108 | /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid | |
3109 | since the reg might be set by initialization within the loop. */ | |
1f027d54 RK |
3110 | |
3111 | if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx | |
3112 | || x == arg_pointer_rtx) | |
3113 | && ! current_function_has_nonlocal_goto) | |
b4ad7b23 | 3114 | return 1; |
1f027d54 | 3115 | |
0534b804 | 3116 | if (LOOP_INFO (loop)->has_call |
b4ad7b23 RS |
3117 | && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)]) |
3118 | return 0; | |
1f027d54 | 3119 | |
f1d4ac80 | 3120 | if (regs->array[REGNO (x)].set_in_loop < 0) |
b4ad7b23 | 3121 | return 2; |
1f027d54 | 3122 | |
f1d4ac80 | 3123 | return regs->array[REGNO (x)].set_in_loop == 0; |
b4ad7b23 RS |
3124 | |
3125 | case MEM: | |
d5e3f151 JW |
3126 | /* Volatile memory references must be rejected. Do this before |
3127 | checking for read-only items, so that volatile read-only items | |
3128 | will be rejected also. */ | |
3129 | if (MEM_VOLATILE_P (x)) | |
3130 | return 0; | |
3131 | ||
b4ad7b23 | 3132 | /* See if there is any dependence between a store and this load. */ |
afa1738b | 3133 | mem_list_entry = loop_info->store_mems; |
5026a502 JL |
3134 | while (mem_list_entry) |
3135 | { | |
3136 | if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode, | |
3137 | x, rtx_varies_p)) | |
3138 | return 0; | |
14a774a9 | 3139 | |
5026a502 JL |
3140 | mem_list_entry = XEXP (mem_list_entry, 1); |
3141 | } | |
b4ad7b23 RS |
3142 | |
3143 | /* It's not invalidated by a store in memory | |
3144 | but we must still verify the address is invariant. */ | |
3145 | break; | |
3146 | ||
3147 | case ASM_OPERANDS: | |
3148 | /* Don't mess with insns declared volatile. */ | |
3149 | if (MEM_VOLATILE_P (x)) | |
3150 | return 0; | |
e9a25f70 | 3151 | break; |
e6fcb60d | 3152 | |
e9a25f70 JL |
3153 | default: |
3154 | break; | |
b4ad7b23 RS |
3155 | } |
3156 | ||
3157 | fmt = GET_RTX_FORMAT (code); | |
3158 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) | |
3159 | { | |
3160 | if (fmt[i] == 'e') | |
3161 | { | |
0534b804 | 3162 | int tem = loop_invariant_p (loop, XEXP (x, i)); |
b4ad7b23 RS |
3163 | if (tem == 0) |
3164 | return 0; | |
3165 | if (tem == 2) | |
3166 | conditional = 1; | |
3167 | } | |
3168 | else if (fmt[i] == 'E') | |
3169 | { | |
3170 | register int j; | |
3171 | for (j = 0; j < XVECLEN (x, i); j++) | |
3172 | { | |
0534b804 | 3173 | int tem = loop_invariant_p (loop, XVECEXP (x, i, j)); |
b4ad7b23 RS |
3174 | if (tem == 0) |
3175 | return 0; | |
3176 | if (tem == 2) | |
3177 | conditional = 1; | |
3178 | } | |
3179 | ||
3180 | } | |
3181 | } | |
3182 | ||
3183 | return 1 + conditional; | |
3184 | } | |
b4ad7b23 RS |
3185 | \f |
3186 | /* Return nonzero if all the insns in the loop that set REG | |
3187 | are INSN and the immediately following insns, | |
3188 | and if each of those insns sets REG in an invariant way | |
3189 | (not counting uses of REG in them). | |
3190 | ||
3191 | The value is 2 if some of these insns are only conditionally invariant. | |
3192 | ||
3193 | We assume that INSN itself is the first set of REG | |
3194 | and that its source is invariant. */ | |
3195 | ||
3196 | static int | |
0534b804 MH |
3197 | consec_sets_invariant_p (loop, reg, n_sets, insn) |
3198 | const struct loop *loop; | |
b4ad7b23 RS |
3199 | int n_sets; |
3200 | rtx reg, insn; | |
3201 | { | |
1ecd860b | 3202 | struct loop_regs *regs = LOOP_REGS (loop); |
770ae6cc RK |
3203 | rtx p = insn; |
3204 | unsigned int regno = REGNO (reg); | |
b4ad7b23 RS |
3205 | rtx temp; |
3206 | /* Number of sets we have to insist on finding after INSN. */ | |
3207 | int count = n_sets - 1; | |
f1d4ac80 | 3208 | int old = regs->array[regno].set_in_loop; |
b4ad7b23 RS |
3209 | int value = 0; |
3210 | int this; | |
3211 | ||
3212 | /* If N_SETS hit the limit, we can't rely on its value. */ | |
3213 | if (n_sets == 127) | |
3214 | return 0; | |
3215 | ||
f1d4ac80 | 3216 | regs->array[regno].set_in_loop = 0; |
b4ad7b23 RS |
3217 | |
3218 | while (count > 0) | |
3219 | { | |
3220 | register enum rtx_code code; | |
3221 | rtx set; | |
3222 | ||
3223 | p = NEXT_INSN (p); | |
3224 | code = GET_CODE (p); | |
3225 | ||
38e01259 | 3226 | /* If library call, skip to end of it. */ |
5fd8383e | 3227 | if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX))) |
b4ad7b23 RS |
3228 | p = XEXP (temp, 0); |
3229 | ||
3230 | this = 0; | |
3231 | if (code == INSN | |
3232 | && (set = single_set (p)) | |
3233 | && GET_CODE (SET_DEST (set)) == REG | |
3234 | && REGNO (SET_DEST (set)) == regno) | |
3235 | { | |
0534b804 | 3236 | this = loop_invariant_p (loop, SET_SRC (set)); |
b4ad7b23 RS |
3237 | if (this != 0) |
3238 | value |= this; | |
51723711 | 3239 | else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))) |
b4ad7b23 | 3240 | { |
83d90aac JW |
3241 | /* If this is a libcall, then any invariant REG_EQUAL note is OK. |
3242 | If this is an ordinary insn, then only CONSTANT_P REG_EQUAL | |
3243 | notes are OK. */ | |
3244 | this = (CONSTANT_P (XEXP (temp, 0)) | |
3245 | || (find_reg_note (p, REG_RETVAL, NULL_RTX) | |
0534b804 | 3246 | && loop_invariant_p (loop, XEXP (temp, 0)))); |
b4ad7b23 RS |
3247 | if (this != 0) |
3248 | value |= this; | |
3249 | } | |
3250 | } | |
3251 | if (this != 0) | |
3252 | count--; | |
3253 | else if (code != NOTE) | |
3254 | { | |
f1d4ac80 | 3255 | regs->array[regno].set_in_loop = old; |
b4ad7b23 RS |
3256 | return 0; |
3257 | } | |
3258 | } | |
3259 | ||
f1d4ac80 | 3260 | regs->array[regno].set_in_loop = old; |
0534b804 | 3261 | /* If loop_invariant_p ever returned 2, we return 2. */ |
b4ad7b23 RS |
3262 | return 1 + (value & 2); |
3263 | } | |
3264 | ||
3265 | #if 0 | |
3266 | /* I don't think this condition is sufficient to allow INSN | |
3267 | to be moved, so we no longer test it. */ | |
3268 | ||
3269 | /* Return 1 if all insns in the basic block of INSN and following INSN | |
3270 | that set REG are invariant according to TABLE. */ | |
3271 | ||
3272 | static int | |
3273 | all_sets_invariant_p (reg, insn, table) | |
3274 | rtx reg, insn; | |
3275 | short *table; | |
3276 | { | |
3277 | register rtx p = insn; | |
3278 | register int regno = REGNO (reg); | |
3279 | ||
3280 | while (1) | |
3281 | { | |
3282 | register enum rtx_code code; | |
3283 | p = NEXT_INSN (p); | |
3284 | code = GET_CODE (p); | |
3285 | if (code == CODE_LABEL || code == JUMP_INSN) | |
3286 | return 1; | |
3287 | if (code == INSN && GET_CODE (PATTERN (p)) == SET | |
3288 | && GET_CODE (SET_DEST (PATTERN (p))) == REG | |
3289 | && REGNO (SET_DEST (PATTERN (p))) == regno) | |
3290 | { | |
0534b804 | 3291 | if (! loop_invariant_p (loop, SET_SRC (PATTERN (p)), table)) |
b4ad7b23 RS |
3292 | return 0; |
3293 | } | |
3294 | } | |
3295 | } | |
3296 | #endif /* 0 */ | |
3297 | \f | |
3298 | /* Look at all uses (not sets) of registers in X. For each, if it is | |
3299 | the single use, set USAGE[REGNO] to INSN; if there was a previous use in | |
3300 | a different insn, set USAGE[REGNO] to const0_rtx. */ | |
3301 | ||
3302 | static void | |
f1d4ac80 MH |
3303 | find_single_use_in_loop (regs, insn, x) |
3304 | struct loop_regs *regs; | |
b4ad7b23 RS |
3305 | rtx insn; |
3306 | rtx x; | |
b4ad7b23 RS |
3307 | { |
3308 | enum rtx_code code = GET_CODE (x); | |
6f7d635c | 3309 | const char *fmt = GET_RTX_FORMAT (code); |
b4ad7b23 RS |
3310 | int i, j; |
3311 | ||
3312 | if (code == REG) | |
f1d4ac80 MH |
3313 | regs->array[REGNO (x)].single_usage |
3314 | = (regs->array[REGNO (x)].single_usage != 0 | |
3315 | && regs->array[REGNO (x)].single_usage != insn) | |
b4ad7b23 RS |
3316 | ? const0_rtx : insn; |
3317 | ||
3318 | else if (code == SET) | |
3319 | { | |
3320 | /* Don't count SET_DEST if it is a REG; otherwise count things | |
3321 | in SET_DEST because if a register is partially modified, it won't | |
e6fcb60d | 3322 | show up as a potential movable so we don't care how USAGE is set |
b4ad7b23 RS |
3323 | for it. */ |
3324 | if (GET_CODE (SET_DEST (x)) != REG) | |
f1d4ac80 MH |
3325 | find_single_use_in_loop (regs, insn, SET_DEST (x)); |
3326 | find_single_use_in_loop (regs, insn, SET_SRC (x)); | |
b4ad7b23 RS |
3327 | } |
3328 | else | |
3329 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) | |
3330 | { | |
3331 | if (fmt[i] == 'e' && XEXP (x, i) != 0) | |
f1d4ac80 | 3332 | find_single_use_in_loop (regs, insn, XEXP (x, i)); |
b4ad7b23 RS |
3333 | else if (fmt[i] == 'E') |
3334 | for (j = XVECLEN (x, i) - 1; j >= 0; j--) | |
f1d4ac80 | 3335 | find_single_use_in_loop (regs, insn, XVECEXP (x, i, j)); |
b4ad7b23 RS |
3336 | } |
3337 | } | |
3338 | \f | |
a4c3ddd8 | 3339 | /* Count and record any set in X which is contained in INSN. Update |
f1d4ac80 MH |
3340 | REGS->array[I].MAY_NOT_OPTIMIZE and LAST_SET for any register I set |
3341 | in X. */ | |
a4c3ddd8 BS |
3342 | |
3343 | static void | |
f1d4ac80 | 3344 | count_one_set (regs, insn, x, last_set) |
1ecd860b | 3345 | struct loop_regs *regs; |
a4c3ddd8 | 3346 | rtx insn, x; |
a4c3ddd8 BS |
3347 | rtx *last_set; |
3348 | { | |
3349 | if (GET_CODE (x) == CLOBBER && GET_CODE (XEXP (x, 0)) == REG) | |
3350 | /* Don't move a reg that has an explicit clobber. | |
3351 | It's not worth the pain to try to do it correctly. */ | |
f1d4ac80 | 3352 | regs->array[REGNO (XEXP (x, 0))].may_not_optimize = 1; |
a4c3ddd8 BS |
3353 | |
3354 | if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER) | |
3355 | { | |
3356 | rtx dest = SET_DEST (x); | |
3357 | while (GET_CODE (dest) == SUBREG | |
3358 | || GET_CODE (dest) == ZERO_EXTRACT | |
3359 | || GET_CODE (dest) == SIGN_EXTRACT | |
3360 | || GET_CODE (dest) == STRICT_LOW_PART) | |
3361 | dest = XEXP (dest, 0); | |
3362 | if (GET_CODE (dest) == REG) | |
3363 | { | |
3364 | register int regno = REGNO (dest); | |
3365 | /* If this is the first setting of this reg | |
3366 | in current basic block, and it was set before, | |
3367 | it must be set in two basic blocks, so it cannot | |
3368 | be moved out of the loop. */ | |
f1d4ac80 MH |
3369 | if (regs->array[regno].set_in_loop > 0 |
3370 | && last_set == 0) | |
3371 | regs->array[regno].may_not_optimize = 1; | |
a4c3ddd8 BS |
3372 | /* If this is not first setting in current basic block, |
3373 | see if reg was used in between previous one and this. | |
3374 | If so, neither one can be moved. */ | |
3375 | if (last_set[regno] != 0 | |
3376 | && reg_used_between_p (dest, last_set[regno], insn)) | |
f1d4ac80 MH |
3377 | regs->array[regno].may_not_optimize = 1; |
3378 | if (regs->array[regno].set_in_loop < 127) | |
3379 | ++regs->array[regno].set_in_loop; | |
a4c3ddd8 BS |
3380 | last_set[regno] = insn; |
3381 | } | |
3382 | } | |
3383 | } | |
b4ad7b23 | 3384 | \f |
0534b804 MH |
3385 | /* Given a loop that is bounded by LOOP->START and LOOP->END and that |
3386 | is entered at LOOP->SCAN_START, return 1 if the register set in SET | |
3387 | contained in insn INSN is used by any insn that precedes INSN in | |
3388 | cyclic order starting from the loop entry point. | |
b4ad7b23 RS |
3389 | |
3390 | We don't want to use INSN_LUID here because if we restrict INSN to those | |
3391 | that have a valid INSN_LUID, it means we cannot move an invariant out | |
3392 | from an inner loop past two loops. */ | |
3393 | ||
3394 | static int | |
a2be868f MH |
3395 | loop_reg_used_before_p (loop, set, insn) |
3396 | const struct loop *loop; | |
3397 | rtx set, insn; | |
b4ad7b23 RS |
3398 | { |
3399 | rtx reg = SET_DEST (set); | |
3400 | rtx p; | |
3401 | ||
3402 | /* Scan forward checking for register usage. If we hit INSN, we | |
a2be868f MH |
3403 | are done. Otherwise, if we hit LOOP->END, wrap around to LOOP->START. */ |
3404 | for (p = loop->scan_start; p != insn; p = NEXT_INSN (p)) | |
b4ad7b23 | 3405 | { |
2c3c49de | 3406 | if (INSN_P (p) && reg_overlap_mentioned_p (reg, PATTERN (p))) |
b4ad7b23 RS |
3407 | return 1; |
3408 | ||
a2be868f MH |
3409 | if (p == loop->end) |
3410 | p = loop->start; | |
b4ad7b23 RS |
3411 | } |
3412 | ||
3413 | return 0; | |
3414 | } | |
3415 | \f | |
3416 | /* A "basic induction variable" or biv is a pseudo reg that is set | |
3417 | (within this loop) only by incrementing or decrementing it. */ | |
3418 | /* A "general induction variable" or giv is a pseudo reg whose | |
3419 | value is a linear function of a biv. */ | |
3420 | ||
3421 | /* Bivs are recognized by `basic_induction_var'; | |
45f97e2e | 3422 | Givs by `general_induction_var'. */ |
b4ad7b23 | 3423 | |
b4ad7b23 RS |
3424 | /* Communication with routines called via `note_stores'. */ |
3425 | ||
3426 | static rtx note_insn; | |
3427 | ||
3428 | /* Dummy register to have non-zero DEST_REG for DEST_ADDR type givs. */ | |
3429 | ||
3430 | static rtx addr_placeholder; | |
3431 | ||
3432 | /* ??? Unfinished optimizations, and possible future optimizations, | |
3433 | for the strength reduction code. */ | |
3434 | ||
b4ad7b23 | 3435 | /* ??? The interaction of biv elimination, and recognition of 'constant' |
0f41302f | 3436 | bivs, may cause problems. */ |
b4ad7b23 RS |
3437 | |
3438 | /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause | |
3439 | performance problems. | |
3440 | ||
3441 | Perhaps don't eliminate things that can be combined with an addressing | |
3442 | mode. Find all givs that have the same biv, mult_val, and add_val; | |
3443 | then for each giv, check to see if its only use dies in a following | |
3444 | memory address. If so, generate a new memory address and check to see | |
3445 | if it is valid. If it is valid, then store the modified memory address, | |
3446 | otherwise, mark the giv as not done so that it will get its own iv. */ | |
3447 | ||
3448 | /* ??? Could try to optimize branches when it is known that a biv is always | |
3449 | positive. */ | |
3450 | ||
3451 | /* ??? When replace a biv in a compare insn, we should replace with closest | |
3452 | giv so that an optimized branch can still be recognized by the combiner, | |
3453 | e.g. the VAX acb insn. */ | |
3454 | ||
3455 | /* ??? Many of the checks involving uid_luid could be simplified if regscan | |
3456 | was rerun in loop_optimize whenever a register was added or moved. | |
3457 | Also, some of the optimizations could be a little less conservative. */ | |
3458 | \f | |
5e787f07 JH |
3459 | /* Scan the loop body and call FNCALL for each insn. In the addition to the |
3460 | LOOP and INSN parameters pass MAYBE_MULTIPLE and NOT_EVERY_ITERATION to the | |
3461 | callback. | |
e6fcb60d | 3462 | |
5e787f07 JH |
3463 | NOT_EVERY_ITERATION if current insn is not executed at least once for every |
3464 | loop iteration except for the last one. | |
3465 | ||
3466 | MAYBE_MULTIPLE is 1 if current insn may be executed more than once for every | |
3467 | loop iteration. | |
3468 | */ | |
3469 | void | |
3470 | for_each_insn_in_loop (loop, fncall) | |
a2be868f | 3471 | struct loop *loop; |
5e787f07 | 3472 | loop_insn_callback fncall; |
b4ad7b23 | 3473 | { |
b4ad7b23 RS |
3474 | /* This is 1 if current insn is not executed at least once for every loop |
3475 | iteration. */ | |
3476 | int not_every_iteration = 0; | |
7dcd3836 | 3477 | int maybe_multiple = 0; |
ae188a87 | 3478 | int past_loop_latch = 0; |
5ea7a4ae | 3479 | int loop_depth = 0; |
5e787f07 | 3480 | rtx p; |
b4ad7b23 | 3481 | |
a2be868f | 3482 | /* If loop_scan_start points to the loop exit test, we have to be wary of |
5353610b | 3483 | subversive use of gotos inside expression statements. */ |
5e787f07 JH |
3484 | if (prev_nonnote_insn (loop->scan_start) != prev_nonnote_insn (loop->start)) |
3485 | maybe_multiple = back_branch_in_range_p (loop, loop->scan_start); | |
b4ad7b23 RS |
3486 | |
3487 | /* Scan through loop to find all possible bivs. */ | |
3488 | ||
5e787f07 | 3489 | for (p = next_insn_in_loop (loop, loop->scan_start); |
41a972a9 | 3490 | p != NULL_RTX; |
a2be868f | 3491 | p = next_insn_in_loop (loop, p)) |
b4ad7b23 | 3492 | { |
c35971c8 | 3493 | p = fncall (loop, p, not_every_iteration, maybe_multiple); |
b4ad7b23 | 3494 | |
7dcd3836 | 3495 | /* Past CODE_LABEL, we get to insns that may be executed multiple |
5e787f07 JH |
3496 | times. The only way we can be sure that they can't is if every |
3497 | jump insn between here and the end of the loop either | |
3498 | returns, exits the loop, is a jump to a location that is still | |
3499 | behind the label, or is a jump to the loop start. */ | |
7dcd3836 RK |
3500 | |
3501 | if (GET_CODE (p) == CODE_LABEL) | |
3502 | { | |
3503 | rtx insn = p; | |
3504 | ||
3505 | maybe_multiple = 0; | |
3506 | ||
3507 | while (1) | |
3508 | { | |
3509 | insn = NEXT_INSN (insn); | |
5e787f07 | 3510 | if (insn == loop->scan_start) |
7dcd3836 | 3511 | break; |
5e787f07 | 3512 | if (insn == loop->end) |
7dcd3836 | 3513 | { |
5e787f07 JH |
3514 | if (loop->top != 0) |
3515 | insn = loop->top; | |
7dcd3836 RK |
3516 | else |
3517 | break; | |
5e787f07 | 3518 | if (insn == loop->scan_start) |
7dcd3836 RK |
3519 | break; |
3520 | } | |
3521 | ||
3522 | if (GET_CODE (insn) == JUMP_INSN | |
3523 | && GET_CODE (PATTERN (insn)) != RETURN | |
7f1c097d | 3524 | && (!any_condjump_p (insn) |
7dcd3836 | 3525 | || (JUMP_LABEL (insn) != 0 |
5e787f07 JH |
3526 | && JUMP_LABEL (insn) != loop->scan_start |
3527 | && !loop_insn_first_p (p, JUMP_LABEL (insn))))) | |
8516af93 JW |
3528 | { |
3529 | maybe_multiple = 1; | |
3530 | break; | |
3531 | } | |
7dcd3836 RK |
3532 | } |
3533 | } | |
3534 | ||
8516af93 | 3535 | /* Past a jump, we get to insns for which we can't count |
5e787f07 | 3536 | on whether they will be executed during each iteration. */ |
8516af93 | 3537 | /* This code appears twice in strength_reduce. There is also similar |
5e787f07 | 3538 | code in scan_loop. */ |
8516af93 | 3539 | if (GET_CODE (p) == JUMP_INSN |
5e787f07 JH |
3540 | /* If we enter the loop in the middle, and scan around to the |
3541 | beginning, don't set not_every_iteration for that. | |
3542 | This can be any kind of jump, since we want to know if insns | |
3543 | will be executed if the loop is executed. */ | |
3544 | && !(JUMP_LABEL (p) == loop->top | |
7f1c097d JH |
3545 | && ((NEXT_INSN (NEXT_INSN (p)) == loop->end |
3546 | && any_uncondjump_p (p)) | |
3547 | || (NEXT_INSN (p) == loop->end && any_condjump_p (p))))) | |
8516af93 JW |
3548 | { |
3549 | rtx label = 0; | |
3550 | ||
3551 | /* If this is a jump outside the loop, then it also doesn't | |
3552 | matter. Check to see if the target of this branch is on the | |
a2be868f | 3553 | loop->exits_labels list. */ |
5e787f07 | 3554 | |
0534b804 | 3555 | for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label)) |
8516af93 JW |
3556 | if (XEXP (label, 0) == JUMP_LABEL (p)) |
3557 | break; | |
3558 | ||
5e787f07 | 3559 | if (!label) |
8516af93 JW |
3560 | not_every_iteration = 1; |
3561 | } | |
b4ad7b23 | 3562 | |
5ea7a4ae JW |
3563 | else if (GET_CODE (p) == NOTE) |
3564 | { | |
3565 | /* At the virtual top of a converted loop, insns are again known to | |
3566 | be executed each iteration: logically, the loop begins here | |
5f3db57e JL |
3567 | even though the exit code has been duplicated. |
3568 | ||
3569 | Insns are also again known to be executed each iteration at | |
3570 | the LOOP_CONT note. */ | |
3571 | if ((NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP | |
3572 | || NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_CONT) | |
3573 | && loop_depth == 0) | |
5ea7a4ae JW |
3574 | not_every_iteration = 0; |
3575 | else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG) | |
3576 | loop_depth++; | |
3577 | else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END) | |
3578 | loop_depth--; | |
3579 | } | |
b4ad7b23 | 3580 | |
ae188a87 | 3581 | /* Note if we pass a loop latch. If we do, then we can not clear |
5e787f07 JH |
3582 | NOT_EVERY_ITERATION below when we pass the last CODE_LABEL in |
3583 | a loop since a jump before the last CODE_LABEL may have started | |
3584 | a new loop iteration. | |
3585 | ||
3586 | Note that LOOP_TOP is only set for rotated loops and we need | |
3587 | this check for all loops, so compare against the CODE_LABEL | |
3588 | which immediately follows LOOP_START. */ | |
3589 | if (GET_CODE (p) == JUMP_INSN | |
3590 | && JUMP_LABEL (p) == NEXT_INSN (loop->start)) | |
ae188a87 JL |
3591 | past_loop_latch = 1; |
3592 | ||
b4ad7b23 | 3593 | /* Unlike in the code motion pass where MAYBE_NEVER indicates that |
5e787f07 JH |
3594 | an insn may never be executed, NOT_EVERY_ITERATION indicates whether |
3595 | or not an insn is known to be executed each iteration of the | |
3596 | loop, whether or not any iterations are known to occur. | |
b4ad7b23 | 3597 | |
5e787f07 JH |
3598 | Therefore, if we have just passed a label and have no more labels |
3599 | between here and the test insn of the loop, and we have not passed | |
3600 | a jump to the top of the loop, then we know these insns will be | |
3601 | executed each iteration. */ | |
b4ad7b23 | 3602 | |
5e787f07 JH |
3603 | if (not_every_iteration |
3604 | && !past_loop_latch | |
ae188a87 | 3605 | && GET_CODE (p) == CODE_LABEL |
5e787f07 JH |
3606 | && no_labels_between_p (p, loop->end) |
3607 | && loop_insn_first_p (p, loop->cont)) | |
b4ad7b23 RS |
3608 | not_every_iteration = 0; |
3609 | } | |
5e787f07 JH |
3610 | } |
3611 | \f | |
5e787f07 | 3612 | static void |
6ec73c7c | 3613 | loop_bivs_find (loop) |
5e787f07 | 3614 | struct loop *loop; |
5e787f07 | 3615 | { |
1ecd860b | 3616 | struct loop_regs *regs = LOOP_REGS (loop); |
ed5bb68d | 3617 | struct loop_ivs *ivs = LOOP_IVS (loop); |
14be28e5 | 3618 | /* Temporary list pointers for traversing ivs->list. */ |
5e787f07 | 3619 | struct iv_class *bl, **backbl; |
5e787f07 | 3620 | |
14be28e5 | 3621 | ivs->list = 0; |
5e787f07 | 3622 | |
5e787f07 | 3623 | for_each_insn_in_loop (loop, check_insn_for_bivs); |
6ec73c7c | 3624 | |
14be28e5 | 3625 | /* Scan ivs->list to remove all regs that proved not to be bivs. |
1ecd860b | 3626 | Make a sanity check against regs->n_times_set. */ |
14be28e5 | 3627 | for (backbl = &ivs->list, bl = *backbl; bl; bl = bl->next) |
b4ad7b23 | 3628 | { |
ed5bb68d | 3629 | if (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT |
b4ad7b23 RS |
3630 | /* Above happens if register modified by subreg, etc. */ |
3631 | /* Make sure it is not recognized as a basic induction var: */ | |
f1d4ac80 | 3632 | || regs->array[bl->regno].n_times_set != bl->biv_count |
b4ad7b23 RS |
3633 | /* If never incremented, it is invariant that we decided not to |
3634 | move. So leave it alone. */ | |
3635 | || ! bl->incremented) | |
3636 | { | |
3637 | if (loop_dump_stream) | |
c804f3f8 | 3638 | fprintf (loop_dump_stream, "Biv %d: discarded, %s\n", |
b4ad7b23 | 3639 | bl->regno, |
ed5bb68d | 3640 | (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT |
b4ad7b23 RS |
3641 | ? "not induction variable" |
3642 | : (! bl->incremented ? "never incremented" | |
3643 | : "count error"))); | |
e6fcb60d | 3644 | |
ed5bb68d | 3645 | REG_IV_TYPE (ivs, bl->regno) = NOT_BASIC_INDUCT; |
b4ad7b23 RS |
3646 | *backbl = bl->next; |
3647 | } | |
3648 | else | |
3649 | { | |
3650 | backbl = &bl->next; | |
3651 | ||
3652 | if (loop_dump_stream) | |
c804f3f8 | 3653 | fprintf (loop_dump_stream, "Biv %d: verified\n", bl->regno); |
b4ad7b23 RS |
3654 | } |
3655 | } | |
6ec73c7c | 3656 | } |
b4ad7b23 | 3657 | |
b4ad7b23 | 3658 | |
6ec73c7c MH |
3659 | /* Determine how BIVS are initialised by looking through pre-header |
3660 | extended basic block. */ | |
3661 | static void | |
3662 | loop_bivs_init_find (loop) | |
3663 | struct loop *loop; | |
3664 | { | |
6ec73c7c | 3665 | struct loop_ivs *ivs = LOOP_IVS (loop); |
14be28e5 | 3666 | /* Temporary list pointers for traversing ivs->list. */ |
6ec73c7c | 3667 | struct iv_class *bl; |
e304a8e6 MH |
3668 | int call_seen; |
3669 | rtx p; | |
b4ad7b23 RS |
3670 | |
3671 | /* Find initial value for each biv by searching backwards from loop_start, | |
3672 | halting at first label. Also record any test condition. */ | |
3673 | ||
3674 | call_seen = 0; | |
e304a8e6 | 3675 | for (p = loop->start; p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p)) |
b4ad7b23 | 3676 | { |
e304a8e6 MH |
3677 | rtx test; |
3678 | ||
b4ad7b23 RS |
3679 | note_insn = p; |
3680 | ||
3681 | if (GET_CODE (p) == CALL_INSN) | |
3682 | call_seen = 1; | |
3683 | ||
ce7de04c | 3684 | if (INSN_P (p)) |
ed5bb68d | 3685 | note_stores (PATTERN (p), record_initial, ivs); |
b4ad7b23 RS |
3686 | |
3687 | /* Record any test of a biv that branches around the loop if no store | |
3688 | between it and the start of loop. We only care about tests with | |
3689 | constants and registers and only certain of those. */ | |
3690 | if (GET_CODE (p) == JUMP_INSN | |
3691 | && JUMP_LABEL (p) != 0 | |
e304a8e6 | 3692 | && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop->end) |
0534b804 | 3693 | && (test = get_condition_for_loop (loop, p)) != 0 |
b4ad7b23 RS |
3694 | && GET_CODE (XEXP (test, 0)) == REG |
3695 | && REGNO (XEXP (test, 0)) < max_reg_before_loop | |
8b634749 | 3696 | && (bl = REG_IV_CLASS (ivs, REGNO (XEXP (test, 0)))) != 0 |
e304a8e6 | 3697 | && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop->start) |
b4ad7b23 RS |
3698 | && bl->init_insn == 0) |
3699 | { | |
3700 | /* If an NE test, we have an initial value! */ | |
3701 | if (GET_CODE (test) == NE) | |
3702 | { | |
3703 | bl->init_insn = p; | |
38a448ca RH |
3704 | bl->init_set = gen_rtx_SET (VOIDmode, |
3705 | XEXP (test, 0), XEXP (test, 1)); | |
b4ad7b23 RS |
3706 | } |
3707 | else | |
3708 | bl->initial_test = test; | |
3709 | } | |
3710 | } | |
6ec73c7c MH |
3711 | } |
3712 | ||
3713 | ||
3714 | /* Look at the each biv and see if we can say anything better about its | |
3715 | initial value from any initializing insns set up above. (This is done | |
3716 | in two passes to avoid missing SETs in a PARALLEL.) */ | |
3717 | static void | |
3718 | loop_bivs_check (loop) | |
3719 | struct loop *loop; | |
3720 | { | |
3721 | struct loop_ivs *ivs = LOOP_IVS (loop); | |
14be28e5 | 3722 | /* Temporary list pointers for traversing ivs->list. */ |
6ec73c7c MH |
3723 | struct iv_class *bl; |
3724 | struct iv_class **backbl; | |
b4ad7b23 | 3725 | |
14be28e5 | 3726 | for (backbl = &ivs->list; (bl = *backbl); backbl = &bl->next) |
b4ad7b23 RS |
3727 | { |
3728 | rtx src; | |
956d6950 | 3729 | rtx note; |
b4ad7b23 RS |
3730 | |
3731 | if (! bl->init_insn) | |
3732 | continue; | |
3733 | ||
956d6950 JL |
3734 | /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value |
3735 | is a constant, use the value of that. */ | |
3736 | if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL | |
3737 | && CONSTANT_P (XEXP (note, 0))) | |
3738 | || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL | |
3739 | && CONSTANT_P (XEXP (note, 0)))) | |
3740 | src = XEXP (note, 0); | |
3741 | else | |
3742 | src = SET_SRC (bl->init_set); | |
b4ad7b23 RS |
3743 | |
3744 | if (loop_dump_stream) | |
3745 | fprintf (loop_dump_stream, | |
c804f3f8 | 3746 | "Biv %d: initialized at insn %d: initial value ", |
b4ad7b23 RS |
3747 | bl->regno, INSN_UID (bl->init_insn)); |
3748 | ||
43a674af JW |
3749 | if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno]) |
3750 | || GET_MODE (src) == VOIDmode) | |
e304a8e6 MH |
3751 | && valid_initial_value_p (src, bl->init_insn, |
3752 | LOOP_INFO (loop)->pre_header_has_call, | |
3753 | loop->start)) | |
b4ad7b23 RS |
3754 | { |
3755 | bl->initial_value = src; | |
3756 | ||
3757 | if (loop_dump_stream) | |
3758 | { | |
c804f3f8 MH |
3759 | print_simple_rtl (loop_dump_stream, src); |
3760 | fputc ('\n', loop_dump_stream); | |
b4ad7b23 RS |
3761 | } |
3762 | } | |
b4f75276 | 3763 | /* If we can't make it a giv, |
6ec73c7c | 3764 | let biv keep initial value of "itself". */ |
b4f75276 BS |
3765 | else if (loop_dump_stream) |
3766 | fprintf (loop_dump_stream, "is complex\n"); | |
3ec2b590 | 3767 | } |
6ec73c7c | 3768 | } |
3ec2b590 | 3769 | |
b4ad7b23 | 3770 | |
6ec73c7c MH |
3771 | /* Search the loop for general induction variables. */ |
3772 | ||
3773 | static void | |
3774 | loop_givs_find (loop) | |
3775 | struct loop* loop; | |
3776 | { | |
5e787f07 | 3777 | for_each_insn_in_loop (loop, check_insn_for_givs); |
6ec73c7c | 3778 | } |
b4ad7b23 | 3779 | |
b4ad7b23 | 3780 | |
6ec73c7c MH |
3781 | /* For each giv for which we still don't know whether or not it is |
3782 | replaceable, check to see if it is replaceable because its final value | |
3783 | can be calculated. */ | |
b4ad7b23 | 3784 | |
6ec73c7c MH |
3785 | static void |
3786 | loop_givs_check (loop) | |
3787 | struct loop *loop; | |
3788 | { | |
3789 | struct loop_ivs *ivs = LOOP_IVS (loop); | |
3790 | struct iv_class *bl; | |
b4ad7b23 | 3791 | |
14be28e5 | 3792 | for (bl = ivs->list; bl; bl = bl->next) |
b4ad7b23 RS |
3793 | { |
3794 | struct induction *v; | |
3795 | ||
3796 | for (v = bl->giv; v; v = v->next_iv) | |
3797 | if (! v->replaceable && ! v->not_replaceable) | |
0534b804 | 3798 | check_final_value (loop, v); |
b4ad7b23 | 3799 | } |
6ec73c7c MH |
3800 | } |
3801 | ||
3802 | ||
e304a8e6 MH |
3803 | /* Return non-zero if it is possible to eliminate the biv BL provided |
3804 | all givs are reduced. This is possible if either the reg is not | |
3805 | used outside the loop, or we can compute what its final value will | |
3806 | be. */ | |
3807 | ||
3808 | static int | |
6ec73c7c MH |
3809 | loop_biv_eliminable_p (loop, bl, threshold, insn_count) |
3810 | struct loop *loop; | |
3811 | struct iv_class *bl; | |
3812 | int threshold; | |
3813 | int insn_count; | |
3814 | { | |
e304a8e6 MH |
3815 | /* For architectures with a decrement_and_branch_until_zero insn, |
3816 | don't do this if we put a REG_NONNEG note on the endtest for this | |
3817 | biv. */ | |
3818 | ||
3819 | #ifdef HAVE_decrement_and_branch_until_zero | |
3820 | if (bl->nonneg) | |
3821 | { | |
3822 | if (loop_dump_stream) | |
3823 | fprintf (loop_dump_stream, | |
3824 | "Cannot eliminate nonneg biv %d.\n", bl->regno); | |
3825 | return 0; | |
3826 | } | |
3827 | #endif | |
3828 | ||
3829 | /* Check that biv is used outside loop or if it has a final value. | |
3830 | Compare against bl->init_insn rather than loop->start. We aren't | |
3831 | concerned with any uses of the biv between init_insn and | |
3832 | loop->start since these won't be affected by the value of the biv | |
3833 | elsewhere in the function, so long as init_insn doesn't use the | |
3834 | biv itself. */ | |
6ec73c7c MH |
3835 | |
3836 | if ((REGNO_LAST_LUID (bl->regno) < INSN_LUID (loop->end) | |
3837 | && bl->init_insn | |
3838 | && INSN_UID (bl->init_insn) < max_uid_for_loop | |
3839 | && REGNO_FIRST_LUID (bl->regno) >= INSN_LUID (bl->init_insn) | |
6ec73c7c | 3840 | && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set))) |
e304a8e6 | 3841 | || (bl->final_value = final_biv_value (loop, bl))) |
6ec73c7c | 3842 | return maybe_eliminate_biv (loop, bl, 0, threshold, insn_count); |
e304a8e6 MH |
3843 | |
3844 | if (loop_dump_stream) | |
3845 | { | |
3846 | fprintf (loop_dump_stream, | |
3847 | "Cannot eliminate biv %d.\n", | |
3848 | bl->regno); | |
3849 | fprintf (loop_dump_stream, | |
3850 | "First use: insn %d, last use: insn %d.\n", | |
3851 | REGNO_FIRST_UID (bl->regno), | |
3852 | REGNO_LAST_UID (bl->regno)); | |
3853 | } | |
3854 | return 0; | |
3855 | } | |
3856 | ||
3857 | ||
3858 | /* Reduce each giv of BL that we have decided to reduce. */ | |
3859 | ||
3860 | static void | |
3861 | loop_givs_reduce (loop, bl) | |
3862 | struct loop *loop; | |
3863 | struct iv_class *bl; | |
3864 | { | |
3865 | struct induction *v; | |
3866 | ||
3867 | for (v = bl->giv; v; v = v->next_iv) | |
3868 | { | |
3869 | struct induction *tv; | |
3870 | if (! v->ignore && v->same == 0) | |
3871 | { | |
3872 | int auto_inc_opt = 0; | |
3873 | ||
3874 | /* If the code for derived givs immediately below has already | |
3875 | allocated a new_reg, we must keep it. */ | |
3876 | if (! v->new_reg) | |
3877 | v->new_reg = gen_reg_rtx (v->mode); | |
3878 | ||
3879 | #ifdef AUTO_INC_DEC | |
3880 | /* If the target has auto-increment addressing modes, and | |
3881 | this is an address giv, then try to put the increment | |
3882 | immediately after its use, so that flow can create an | |
3883 | auto-increment addressing mode. */ | |
3884 | if (v->giv_type == DEST_ADDR && bl->biv_count == 1 | |
3885 | && bl->biv->always_executed && ! bl->biv->maybe_multiple | |
3886 | /* We don't handle reversed biv's because bl->biv->insn | |
3887 | does not have a valid INSN_LUID. */ | |
3888 | && ! bl->reversed | |
3889 | && v->always_executed && ! v->maybe_multiple | |
3890 | && INSN_UID (v->insn) < max_uid_for_loop) | |
3891 | { | |
3892 | /* If other giv's have been combined with this one, then | |
3893 | this will work only if all uses of the other giv's occur | |
3894 | before this giv's insn. This is difficult to check. | |
3895 | ||
3896 | We simplify this by looking for the common case where | |
3897 | there is one DEST_REG giv, and this giv's insn is the | |
3898 | last use of the dest_reg of that DEST_REG giv. If the | |
3899 | increment occurs after the address giv, then we can | |
3900 | perform the optimization. (Otherwise, the increment | |
3901 | would have to go before other_giv, and we would not be | |
3902 | able to combine it with the address giv to get an | |
3903 | auto-inc address.) */ | |
3904 | if (v->combined_with) | |
3905 | { | |
3906 | struct induction *other_giv = 0; | |
3907 | ||
3908 | for (tv = bl->giv; tv; tv = tv->next_iv) | |
3909 | if (tv->same == v) | |
3910 | { | |
3911 | if (other_giv) | |
3912 | break; | |
3913 | else | |
3914 | other_giv = tv; | |
3915 | } | |
3916 | if (! tv && other_giv | |
3917 | && REGNO (other_giv->dest_reg) < max_reg_before_loop | |
3918 | && (REGNO_LAST_UID (REGNO (other_giv->dest_reg)) | |
3919 | == INSN_UID (v->insn)) | |
3920 | && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn)) | |
3921 | auto_inc_opt = 1; | |
3922 | } | |
3923 | /* Check for case where increment is before the address | |
3924 | giv. Do this test in "loop order". */ | |
3925 | else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn) | |
3926 | && (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start) | |
3927 | || (INSN_LUID (bl->biv->insn) | |
3928 | > INSN_LUID (loop->scan_start)))) | |
3929 | || (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start) | |
3930 | && (INSN_LUID (loop->scan_start) | |
3931 | < INSN_LUID (bl->biv->insn)))) | |
3932 | auto_inc_opt = -1; | |
3933 | else | |
3934 | auto_inc_opt = 1; | |
3935 | ||
3936 | #ifdef HAVE_cc0 | |
3937 | { | |
3938 | rtx prev; | |
3939 | ||
3940 | /* We can't put an insn immediately after one setting | |
3941 | cc0, or immediately before one using cc0. */ | |
3942 | if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn))) | |
3943 | || (auto_inc_opt == -1 | |
3944 | && (prev = prev_nonnote_insn (v->insn)) != 0 | |
3945 | && INSN_P (prev) | |
3946 | && sets_cc0_p (PATTERN (prev)))) | |
3947 | auto_inc_opt = 0; | |
3948 | } | |
3949 | #endif | |
3950 | ||
3951 | if (auto_inc_opt) | |
3952 | v->auto_inc_opt = 1; | |
3953 | } | |
3954 | #endif | |
3955 | ||
3956 | /* For each place where the biv is incremented, add an insn | |
3957 | to increment the new, reduced reg for the giv. */ | |
3958 | for (tv = bl->biv; tv; tv = tv->next_iv) | |
3959 | { | |
3960 | rtx insert_before; | |
3961 | ||
3962 | if (! auto_inc_opt) | |
3963 | insert_before = tv->insn; | |
3964 | else if (auto_inc_opt == 1) | |
3965 | insert_before = NEXT_INSN (v->insn); | |
3966 | else | |
3967 | insert_before = v->insn; | |
3968 | ||
3969 | if (tv->mult_val == const1_rtx) | |
96a45535 MH |
3970 | loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val, |
3971 | v->new_reg, v->new_reg, | |
3972 | 0, insert_before); | |
e304a8e6 MH |
3973 | else /* tv->mult_val == const0_rtx */ |
3974 | /* A multiply is acceptable here | |
3975 | since this is presumed to be seldom executed. */ | |
96a45535 MH |
3976 | loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val, |
3977 | v->add_val, v->new_reg, | |
3978 | 0, insert_before); | |
e304a8e6 MH |
3979 | } |
3980 | ||
3981 | /* Add code at loop start to initialize giv's reduced reg. */ | |
3982 | ||
96a45535 MH |
3983 | loop_iv_add_mult_hoist (loop, |
3984 | extend_value_for_giv (v, bl->initial_value), | |
3985 | v->mult_val, v->add_val, v->new_reg); | |
e304a8e6 MH |
3986 | } |
3987 | } | |
3988 | } | |
3989 | ||
3990 | ||
3991 | /* Check for givs whose first use is their definition and whose | |
3992 | last use is the definition of another giv. If so, it is likely | |
3993 | dead and should not be used to derive another giv nor to | |
3994 | eliminate a biv. */ | |
3995 | ||
3996 | static void | |
3997 | loop_givs_dead_check (loop, bl) | |
3998 | struct loop *loop ATTRIBUTE_UNUSED; | |
3999 | struct iv_class *bl; | |
4000 | { | |
4001 | struct induction *v; | |
4002 | ||
4003 | for (v = bl->giv; v; v = v->next_iv) | |
4004 | { | |
4005 | if (v->ignore | |
4006 | || (v->same && v->same->ignore)) | |
4007 | continue; | |
4008 | ||
4009 | if (v->giv_type == DEST_REG | |
4010 | && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn)) | |
4011 | { | |
4012 | struct induction *v1; | |
4013 | ||
4014 | for (v1 = bl->giv; v1; v1 = v1->next_iv) | |
4015 | if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn)) | |
4016 | v->maybe_dead = 1; | |
4017 | } | |
4018 | } | |
4019 | } | |
4020 | ||
4021 | ||
4022 | static void | |
96a45535 | 4023 | loop_givs_rescan (loop, bl, reg_map) |
e304a8e6 MH |
4024 | struct loop *loop; |
4025 | struct iv_class *bl; | |
4026 | rtx *reg_map; | |
e304a8e6 MH |
4027 | { |
4028 | struct induction *v; | |
4029 | ||
4030 | for (v = bl->giv; v; v = v->next_iv) | |
4031 | { | |
4032 | if (v->same && v->same->ignore) | |
4033 | v->ignore = 1; | |
4034 | ||
4035 | if (v->ignore) | |
4036 | continue; | |
4037 | ||
4038 | /* Update expression if this was combined, in case other giv was | |
4039 | replaced. */ | |
4040 | if (v->same) | |
4041 | v->new_reg = replace_rtx (v->new_reg, | |
4042 | v->same->dest_reg, v->same->new_reg); | |
4043 | ||
4044 | /* See if this register is known to be a pointer to something. If | |
4045 | so, see if we can find the alignment. First see if there is a | |
4046 | destination register that is a pointer. If so, this shares the | |
4047 | alignment too. Next see if we can deduce anything from the | |
4048 | computational information. If not, and this is a DEST_ADDR | |
4049 | giv, at least we know that it's a pointer, though we don't know | |
4050 | the alignment. */ | |
4051 | if (GET_CODE (v->new_reg) == REG | |
4052 | && v->giv_type == DEST_REG | |
4053 | && REG_POINTER (v->dest_reg)) | |
4054 | mark_reg_pointer (v->new_reg, | |
4055 | REGNO_POINTER_ALIGN (REGNO (v->dest_reg))); | |
4056 | else if (GET_CODE (v->new_reg) == REG | |
4057 | && REG_POINTER (v->src_reg)) | |
4058 | { | |
4059 | unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->src_reg)); | |
4060 | ||
4061 | if (align == 0 | |
4062 | || GET_CODE (v->add_val) != CONST_INT | |
4063 | || INTVAL (v->add_val) % (align / BITS_PER_UNIT) != 0) | |
4064 | align = 0; | |
4065 | ||
4066 | mark_reg_pointer (v->new_reg, align); | |
4067 | } | |
4068 | else if (GET_CODE (v->new_reg) == REG | |
4069 | && GET_CODE (v->add_val) == REG | |
4070 | && REG_POINTER (v->add_val)) | |
4071 | { | |
4072 | unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->add_val)); | |
4073 | ||
4074 | if (align == 0 || GET_CODE (v->mult_val) != CONST_INT | |
4075 | || INTVAL (v->mult_val) % (align / BITS_PER_UNIT) != 0) | |
4076 | align = 0; | |
4077 | ||
4078 | mark_reg_pointer (v->new_reg, align); | |
4079 | } | |
4080 | else if (GET_CODE (v->new_reg) == REG && v->giv_type == DEST_ADDR) | |
4081 | mark_reg_pointer (v->new_reg, 0); | |
4082 | ||
4083 | if (v->giv_type == DEST_ADDR) | |
4084 | /* Store reduced reg as the address in the memref where we found | |
4085 | this giv. */ | |
4086 | validate_change (v->insn, v->location, v->new_reg, 0); | |
4087 | else if (v->replaceable) | |
4088 | { | |
4089 | reg_map[REGNO (v->dest_reg)] = v->new_reg; | |
4090 | } | |
4091 | else | |
4092 | { | |
4093 | /* Not replaceable; emit an insn to set the original giv reg from | |
4094 | the reduced giv, same as above. */ | |
86e21212 MH |
4095 | loop_insn_emit_after (loop, 0, v->insn, |
4096 | gen_move_insn (v->dest_reg, v->new_reg)); | |
e304a8e6 MH |
4097 | } |
4098 | ||
4099 | /* When a loop is reversed, givs which depend on the reversed | |
4100 | biv, and which are live outside the loop, must be set to their | |
4101 | correct final value. This insn is only needed if the giv is | |
4102 | not replaceable. The correct final value is the same as the | |
4103 | value that the giv starts the reversed loop with. */ | |
4104 | if (bl->reversed && ! v->replaceable) | |
96a45535 MH |
4105 | loop_iv_add_mult_sink (loop, |
4106 | extend_value_for_giv (v, bl->initial_value), | |
4107 | v->mult_val, v->add_val, v->dest_reg); | |
e304a8e6 | 4108 | else if (v->final_value) |
96a45535 MH |
4109 | loop_insn_sink_or_swim (loop, |
4110 | gen_move_insn (v->dest_reg, v->final_value)); | |
e304a8e6 MH |
4111 | |
4112 | if (loop_dump_stream) | |
4113 | { | |
4114 | fprintf (loop_dump_stream, "giv at %d reduced to ", | |
4115 | INSN_UID (v->insn)); | |
c804f3f8 | 4116 | print_simple_rtl (loop_dump_stream, v->new_reg); |
e304a8e6 MH |
4117 | fprintf (loop_dump_stream, "\n"); |
4118 | } | |
4119 | } | |
4120 | } | |
4121 | ||
4122 | ||
4123 | static int | |
4124 | loop_giv_reduce_benefit (loop, bl, v, test_reg) | |
4125 | struct loop *loop ATTRIBUTE_UNUSED; | |
4126 | struct iv_class *bl; | |
4127 | struct induction *v; | |
4128 | rtx test_reg; | |
4129 | { | |
4130 | int add_cost; | |
4131 | int benefit; | |
4132 | ||
4133 | benefit = v->benefit; | |
4134 | PUT_MODE (test_reg, v->mode); | |
4135 | add_cost = iv_add_mult_cost (bl->biv->add_val, v->mult_val, | |
4136 | test_reg, test_reg); | |
4137 | ||
4138 | /* Reduce benefit if not replaceable, since we will insert a | |
4139 | move-insn to replace the insn that calculates this giv. Don't do | |
4140 | this unless the giv is a user variable, since it will often be | |
4141 | marked non-replaceable because of the duplication of the exit | |
4142 | code outside the loop. In such a case, the copies we insert are | |
4143 | dead and will be deleted. So they don't have a cost. Similar | |
4144 | situations exist. */ | |
4145 | /* ??? The new final_[bg]iv_value code does a much better job of | |
4146 | finding replaceable giv's, and hence this code may no longer be | |
4147 | necessary. */ | |
4148 | if (! v->replaceable && ! bl->eliminable | |
4149 | && REG_USERVAR_P (v->dest_reg)) | |
4150 | benefit -= copy_cost; | |
4151 | ||
4152 | /* Decrease the benefit to count the add-insns that we will insert | |
4153 | to increment the reduced reg for the giv. ??? This can | |
4154 | overestimate the run-time cost of the additional insns, e.g. if | |
4155 | there are multiple basic blocks that increment the biv, but only | |
4156 | one of these blocks is executed during each iteration. There is | |
4157 | no good way to detect cases like this with the current structure | |
4158 | of the loop optimizer. This code is more accurate for | |
4159 | determining code size than run-time benefits. */ | |
4160 | benefit -= add_cost * bl->biv_count; | |
4161 | ||
4162 | /* Decide whether to strength-reduce this giv or to leave the code | |
4163 | unchanged (recompute it from the biv each time it is used). This | |
4164 | decision can be made independently for each giv. */ | |
4165 | ||
4166 | #ifdef AUTO_INC_DEC | |
4167 | /* Attempt to guess whether autoincrement will handle some of the | |
4168 | new add insns; if so, increase BENEFIT (undo the subtraction of | |
4169 | add_cost that was done above). */ | |
4170 | if (v->giv_type == DEST_ADDR | |
4171 | /* Increasing the benefit is risky, since this is only a guess. | |
4172 | Avoid increasing register pressure in cases where there would | |
4173 | be no other benefit from reducing this giv. */ | |
4174 | && benefit > 0 | |
4175 | && GET_CODE (v->mult_val) == CONST_INT) | |
4176 | { | |
616fde53 MH |
4177 | int size = GET_MODE_SIZE (GET_MODE (v->mem)); |
4178 | ||
e304a8e6 | 4179 | if (HAVE_POST_INCREMENT |
616fde53 | 4180 | && INTVAL (v->mult_val) == size) |
e304a8e6 MH |
4181 | benefit += add_cost * bl->biv_count; |
4182 | else if (HAVE_PRE_INCREMENT | |
616fde53 | 4183 | && INTVAL (v->mult_val) == size) |
e304a8e6 MH |
4184 | benefit += add_cost * bl->biv_count; |
4185 | else if (HAVE_POST_DECREMENT | |
616fde53 | 4186 | && -INTVAL (v->mult_val) == size) |
e304a8e6 MH |
4187 | benefit += add_cost * bl->biv_count; |
4188 | else if (HAVE_PRE_DECREMENT | |
616fde53 | 4189 | && -INTVAL (v->mult_val) == size) |
e304a8e6 MH |
4190 | benefit += add_cost * bl->biv_count; |
4191 | } | |
4192 | #endif | |
4193 | ||
4194 | return benefit; | |
6ec73c7c MH |
4195 | } |
4196 | ||
4197 | ||
b2735d9a MH |
4198 | /* Free IV structures for LOOP. */ |
4199 | ||
4200 | static void | |
4201 | loop_ivs_free (loop) | |
4202 | struct loop *loop; | |
4203 | { | |
4204 | struct loop_ivs *ivs = LOOP_IVS (loop); | |
4205 | struct iv_class *iv = ivs->list; | |
4206 | ||
4207 | free (ivs->regs); | |
4208 | ||
4209 | while (iv) | |
4210 | { | |
4211 | struct iv_class *next = iv->next; | |
4212 | struct induction *induction; | |
4213 | struct induction *next_induction; | |
4214 | ||
4215 | for (induction = iv->biv; induction; induction = next_induction) | |
4216 | { | |
4217 | next_induction = induction->next_iv; | |
4218 | free (induction); | |
4219 | } | |
4220 | for (induction = iv->giv; induction; induction = next_induction) | |
4221 | { | |
4222 | next_induction = induction->next_iv; | |
4223 | free (induction); | |
4224 | } | |
4225 | ||
4226 | free (iv); | |
4227 | iv = next; | |
4228 | } | |
4229 | } | |
4230 | ||
4231 | ||
6ec73c7c MH |
4232 | /* Perform strength reduction and induction variable elimination. |
4233 | ||
4234 | Pseudo registers created during this function will be beyond the | |
f1d4ac80 MH |
4235 | last valid index in several tables including |
4236 | REGS->ARRAY[I].N_TIMES_SET and REGNO_LAST_UID. This does not cause a | |
4237 | problem here, because the added registers cannot be givs outside of | |
4238 | their loop, and hence will never be reconsidered. But scan_loop | |
4239 | must check regnos to make sure they are in bounds. */ | |
6ec73c7c MH |
4240 | |
4241 | static void | |
28680540 | 4242 | strength_reduce (loop, flags) |
6ec73c7c | 4243 | struct loop *loop; |
6ec73c7c MH |
4244 | int flags; |
4245 | { | |
4246 | struct loop_info *loop_info = LOOP_INFO (loop); | |
4247 | struct loop_regs *regs = LOOP_REGS (loop); | |
4248 | struct loop_ivs *ivs = LOOP_IVS (loop); | |
4249 | rtx p; | |
14be28e5 | 4250 | /* Temporary list pointer for traversing ivs->list. */ |
e304a8e6 | 4251 | struct iv_class *bl; |
6ec73c7c MH |
4252 | /* Ratio of extra register life span we can justify |
4253 | for saving an instruction. More if loop doesn't call subroutines | |
4254 | since in that case saving an insn makes more difference | |
4255 | and more registers are available. */ | |
4256 | /* ??? could set this to last value of threshold in move_movables */ | |
4257 | int threshold = (loop_info->has_call ? 1 : 2) * (3 + n_non_fixed_regs); | |
4258 | /* Map of pseudo-register replacements. */ | |
4259 | rtx *reg_map = NULL; | |
4260 | int reg_map_size; | |
6ec73c7c | 4261 | int unrolled_insn_copies = 0; |
6ec73c7c | 4262 | rtx test_reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1); |
28680540 | 4263 | int insn_count = count_insns_in_loop (loop); |
6ec73c7c MH |
4264 | |
4265 | addr_placeholder = gen_reg_rtx (Pmode); | |
4266 | ||
14be28e5 MH |
4267 | ivs->n_regs = max_reg_before_loop; |
4268 | ivs->regs = (struct iv *) xcalloc (ivs->n_regs, sizeof (struct iv)); | |
6ec73c7c MH |
4269 | |
4270 | /* Find all BIVs in loop. */ | |
4271 | loop_bivs_find (loop); | |
4272 | ||
4273 | /* Exit if there are no bivs. */ | |
14be28e5 | 4274 | if (! ivs->list) |
6ec73c7c MH |
4275 | { |
4276 | /* Can still unroll the loop anyways, but indicate that there is no | |
4277 | strength reduction info available. */ | |
4278 | if (flags & LOOP_UNROLL) | |
96a45535 | 4279 | unroll_loop (loop, insn_count, 0); |
6ec73c7c | 4280 | |
b2735d9a MH |
4281 | loop_ivs_free (loop); |
4282 | return; | |
6ec73c7c MH |
4283 | } |
4284 | ||
4285 | /* Determine how BIVS are initialised by looking through pre-header | |
4286 | extended basic block. */ | |
4287 | loop_bivs_init_find (loop); | |
4288 | ||
4289 | /* Look at the each biv and see if we can say anything better about its | |
4290 | initial value from any initializing insns set up above. */ | |
4291 | loop_bivs_check (loop); | |
4292 | ||
4293 | /* Search the loop for general induction variables. */ | |
4294 | loop_givs_find (loop); | |
4295 | ||
4296 | /* Try to calculate and save the number of loop iterations. This is | |
4297 | set to zero if the actual number can not be calculated. This must | |
4298 | be called after all giv's have been identified, since otherwise it may | |
4299 | fail if the iteration variable is a giv. */ | |
4300 | loop_iterations (loop); | |
4301 | ||
4302 | /* Now for each giv for which we still don't know whether or not it is | |
4303 | replaceable, check to see if it is replaceable because its final value | |
4304 | can be calculated. This must be done after loop_iterations is called, | |
4305 | so that final_giv_value will work correctly. */ | |
4306 | loop_givs_check (loop); | |
b4ad7b23 RS |
4307 | |
4308 | /* Try to prove that the loop counter variable (if any) is always | |
4309 | nonnegative; if so, record that fact with a REG_NONNEG note | |
4310 | so that "decrement and branch until zero" insn can be used. */ | |
a2be868f | 4311 | check_dbra_loop (loop, insn_count); |
b4ad7b23 | 4312 | |
97ec0ad8 R |
4313 | /* Create reg_map to hold substitutions for replaceable giv regs. |
4314 | Some givs might have been made from biv increments, so look at | |
ed5bb68d | 4315 | ivs->reg_iv_type for a suitable size. */ |
14be28e5 | 4316 | reg_map_size = ivs->n_regs; |
4da896b2 | 4317 | reg_map = (rtx *) xcalloc (reg_map_size, sizeof (rtx)); |
b4ad7b23 RS |
4318 | |
4319 | /* Examine each iv class for feasibility of strength reduction/induction | |
4320 | variable elimination. */ | |
4321 | ||
14be28e5 | 4322 | for (bl = ivs->list; bl; bl = bl->next) |
b4ad7b23 RS |
4323 | { |
4324 | struct induction *v; | |
4325 | int benefit; | |
6ec73c7c | 4326 | |
b4ad7b23 | 4327 | /* Test whether it will be possible to eliminate this biv |
6ec73c7c | 4328 | provided all givs are reduced. */ |
e304a8e6 | 4329 | bl->eliminable = loop_biv_eliminable_p (loop, bl, threshold, insn_count); |
b4ad7b23 | 4330 | |
97ebd24c JW |
4331 | /* This will be true at the end, if all givs which depend on this |
4332 | biv have been strength reduced. | |
4333 | We can't (currently) eliminate the biv unless this is so. */ | |
4334 | bl->all_reduced = 1; | |
4335 | ||
6ec73c7c | 4336 | /* Check each extension dependent giv in this class to see if its |
e8cb4873 RH |
4337 | root biv is safe from wrapping in the interior mode. */ |
4338 | check_ext_dependant_givs (bl, loop_info); | |
4339 | ||
b4ad7b23 | 4340 | /* Combine all giv's for this iv_class. */ |
1ecd860b | 4341 | combine_givs (regs, bl); |
b4ad7b23 | 4342 | |
b4ad7b23 RS |
4343 | for (v = bl->giv; v; v = v->next_iv) |
4344 | { | |
4345 | struct induction *tv; | |
4346 | ||
4347 | if (v->ignore || v->same) | |
4348 | continue; | |
4349 | ||
e304a8e6 | 4350 | benefit = loop_giv_reduce_benefit (loop, bl, v, test_reg); |
b4ad7b23 RS |
4351 | |
4352 | /* If an insn is not to be strength reduced, then set its ignore | |
e304a8e6 | 4353 | flag, and clear bl->all_reduced. */ |
b4ad7b23 | 4354 | |
e6f6eb29 JW |
4355 | /* A giv that depends on a reversed biv must be reduced if it is |
4356 | used after the loop exit, otherwise, it would have the wrong | |
4357 | value after the loop exit. To make it simple, just reduce all | |
4358 | of such giv's whether or not we know they are used after the loop | |
4359 | exit. */ | |
4360 | ||
6ec73c7c MH |
4361 | if (! flag_reduce_all_givs |
4362 | && v->lifetime * threshold * benefit < insn_count | |
4363 | && ! bl->reversed) | |
b4ad7b23 RS |
4364 | { |
4365 | if (loop_dump_stream) | |
4366 | fprintf (loop_dump_stream, | |
4367 | "giv of insn %d not worth while, %d vs %d.\n", | |
4368 | INSN_UID (v->insn), | |
4369 | v->lifetime * threshold * benefit, insn_count); | |
4370 | v->ignore = 1; | |
e304a8e6 | 4371 | bl->all_reduced = 0; |
b4ad7b23 RS |
4372 | } |
4373 | else | |
4374 | { | |
4375 | /* Check that we can increment the reduced giv without a | |
4376 | multiply insn. If not, reject it. */ | |
4377 | ||
4378 | for (tv = bl->biv; tv; tv = tv->next_iv) | |
4379 | if (tv->mult_val == const1_rtx | |
4380 | && ! product_cheap_p (tv->add_val, v->mult_val)) | |
4381 | { | |
4382 | if (loop_dump_stream) | |
4383 | fprintf (loop_dump_stream, | |
4384 | "giv of insn %d: would need a multiply.\n", | |
4385 | INSN_UID (v->insn)); | |
4386 | v->ignore = 1; | |
e304a8e6 | 4387 | bl->all_reduced = 0; |
b4ad7b23 RS |
4388 | break; |
4389 | } | |
4390 | } | |
4391 | } | |
4392 | ||
8c354a41 R |
4393 | /* Check for givs whose first use is their definition and whose |
4394 | last use is the definition of another giv. If so, it is likely | |
4395 | dead and should not be used to derive another giv nor to | |
4396 | eliminate a biv. */ | |
e304a8e6 | 4397 | loop_givs_dead_check (loop, bl); |
8c354a41 | 4398 | |
b4ad7b23 | 4399 | /* Reduce each giv that we decided to reduce. */ |
e304a8e6 | 4400 | loop_givs_reduce (loop, bl); |
b4ad7b23 RS |
4401 | |
4402 | /* Rescan all givs. If a giv is the same as a giv not reduced, mark it | |
4403 | as not reduced. | |
e6fcb60d | 4404 | |
b4ad7b23 RS |
4405 | For each giv register that can be reduced now: if replaceable, |
4406 | substitute reduced reg wherever the old giv occurs; | |
8c354a41 | 4407 | else add new move insn "giv_reg = reduced_reg". */ |
96a45535 | 4408 | loop_givs_rescan (loop, bl, reg_map); |
b4ad7b23 RS |
4409 | |
4410 | /* All the givs based on the biv bl have been reduced if they | |
4411 | merit it. */ | |
4412 | ||
4413 | /* For each giv not marked as maybe dead that has been combined with a | |
4414 | second giv, clear any "maybe dead" mark on that second giv. | |
4415 | v->new_reg will either be or refer to the register of the giv it | |
4416 | combined with. | |
4417 | ||
e304a8e6 MH |
4418 | Doing this clearing avoids problems in biv elimination where |
4419 | a giv's new_reg is a complex value that can't be put in the | |
4420 | insn but the giv combined with (with a reg as new_reg) is | |
4421 | marked maybe_dead. Since the register will be used in either | |
4422 | case, we'd prefer it be used from the simpler giv. */ | |
b4ad7b23 RS |
4423 | |
4424 | for (v = bl->giv; v; v = v->next_iv) | |
4425 | if (! v->maybe_dead && v->same) | |
4426 | v->same->maybe_dead = 0; | |
4427 | ||
4428 | /* Try to eliminate the biv, if it is a candidate. | |
e304a8e6 | 4429 | This won't work if ! bl->all_reduced, |
b4ad7b23 RS |
4430 | since the givs we planned to use might not have been reduced. |
4431 | ||
e304a8e6 MH |
4432 | We have to be careful that we didn't initially think we could |
4433 | eliminate this biv because of a giv that we now think may be | |
4434 | dead and shouldn't be used as a biv replacement. | |
b4ad7b23 RS |
4435 | |
4436 | Also, there is the possibility that we may have a giv that looks | |
4437 | like it can be used to eliminate a biv, but the resulting insn | |
e6fcb60d | 4438 | isn't valid. This can happen, for example, on the 88k, where a |
b4ad7b23 | 4439 | JUMP_INSN can compare a register only with zero. Attempts to |
c5b7917e | 4440 | replace it with a compare with a constant will fail. |
b4ad7b23 RS |
4441 | |
4442 | Note that in cases where this call fails, we may have replaced some | |
4443 | of the occurrences of the biv with a giv, but no harm was done in | |
4444 | doing so in the rare cases where it can occur. */ | |
4445 | ||
e304a8e6 | 4446 | if (bl->all_reduced == 1 && bl->eliminable |
0534b804 | 4447 | && maybe_eliminate_biv (loop, bl, 1, threshold, insn_count)) |
b4ad7b23 RS |
4448 | { |
4449 | /* ?? If we created a new test to bypass the loop entirely, | |
4450 | or otherwise drop straight in, based on this test, then | |
4451 | we might want to rewrite it also. This way some later | |
4452 | pass has more hope of removing the initialization of this | |
0f41302f | 4453 | biv entirely. */ |
b4ad7b23 RS |
4454 | |
4455 | /* If final_value != 0, then the biv may be used after loop end | |
4456 | and we must emit an insn to set it just in case. | |
4457 | ||
4458 | Reversed bivs already have an insn after the loop setting their | |
4459 | value, so we don't need another one. We can't calculate the | |
0f41302f | 4460 | proper final value for such a biv here anyways. */ |
e304a8e6 | 4461 | if (bl->final_value && ! bl->reversed) |
96a45535 MH |
4462 | loop_insn_sink_or_swim (loop, gen_move_insn |
4463 | (bl->biv->dest_reg, bl->final_value)); | |
b4ad7b23 | 4464 | |
b4ad7b23 RS |
4465 | if (loop_dump_stream) |
4466 | fprintf (loop_dump_stream, "Reg %d: biv eliminated\n", | |
4467 | bl->regno); | |
4468 | } | |
4469 | } | |
4470 | ||
4471 | /* Go through all the instructions in the loop, making all the | |
4472 | register substitutions scheduled in REG_MAP. */ | |
4473 | ||
e304a8e6 | 4474 | for (p = loop->start; p != loop->end; p = NEXT_INSN (p)) |
b4ad7b23 | 4475 | if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN |
e6fcb60d | 4476 | || GET_CODE (p) == CALL_INSN) |
b4ad7b23 | 4477 | { |
97ec0ad8 R |
4478 | replace_regs (PATTERN (p), reg_map, reg_map_size, 0); |
4479 | replace_regs (REG_NOTES (p), reg_map, reg_map_size, 0); | |
da0c128e | 4480 | INSN_CODE (p) = -1; |
b4ad7b23 RS |
4481 | } |
4482 | ||
73049ebc MT |
4483 | if (loop_info->n_iterations > 0) |
4484 | { | |
4485 | /* When we completely unroll a loop we will likely not need the increment | |
4486 | of the loop BIV and we will not need the conditional branch at the | |
4487 | end of the loop. */ | |
4488 | unrolled_insn_copies = insn_count - 2; | |
4489 | ||
4490 | #ifdef HAVE_cc0 | |
4491 | /* When we completely unroll a loop on a HAVE_cc0 machine we will not | |
4492 | need the comparison before the conditional branch at the end of the | |
4493 | loop. */ | |
80b8e8de | 4494 | unrolled_insn_copies -= 1; |
73049ebc MT |
4495 | #endif |
4496 | ||
4497 | /* We'll need one copy for each loop iteration. */ | |
4498 | unrolled_insn_copies *= loop_info->n_iterations; | |
4499 | ||
4500 | /* A little slop to account for the ability to remove initialization | |
4501 | code, better CSE, and other secondary benefits of completely | |
4502 | unrolling some loops. */ | |
4503 | unrolled_insn_copies -= 1; | |
4504 | ||
4505 | /* Clamp the value. */ | |
4506 | if (unrolled_insn_copies < 0) | |
4507 | unrolled_insn_copies = 0; | |
4508 | } | |
e6fcb60d | 4509 | |
b4ad7b23 RS |
4510 | /* Unroll loops from within strength reduction so that we can use the |
4511 | induction variable information that strength_reduce has already | |
73049ebc MT |
4512 | collected. Always unroll loops that would be as small or smaller |
4513 | unrolled than when rolled. */ | |
1bf14ad7 | 4514 | if ((flags & LOOP_UNROLL) |
73049ebc MT |
4515 | || (loop_info->n_iterations > 0 |
4516 | && unrolled_insn_copies <= insn_count)) | |
96a45535 | 4517 | unroll_loop (loop, insn_count, 1); |
b4ad7b23 | 4518 | |
5527bf14 RH |
4519 | #ifdef HAVE_doloop_end |
4520 | if (HAVE_doloop_end && (flags & LOOP_BCT) && flag_branch_on_count_reg) | |
4521 | doloop_optimize (loop); | |
4522 | #endif /* HAVE_doloop_end */ | |
8c660648 | 4523 | |
aa18f20e JH |
4524 | /* In case number of iterations is known, drop branch prediction note |
4525 | in the branch. Do that only in second loop pass, as loop unrolling | |
4526 | may change the number of iterations performed. */ | |
4527 | if ((flags & LOOP_BCT) | |
4528 | && loop_info->n_iterations / loop_info->unroll_number > 1) | |
4529 | { | |
923cbdc3 | 4530 | int n = loop_info->n_iterations / loop_info->unroll_number; |
aa18f20e JH |
4531 | predict_insn (PREV_INSN (loop->end), |
4532 | PRED_LOOP_ITERATIONS, | |
4533 | REG_BR_PROB_BASE - REG_BR_PROB_BASE / n); | |
4534 | } | |
4535 | ||
b4ad7b23 RS |
4536 | if (loop_dump_stream) |
4537 | fprintf (loop_dump_stream, "\n"); | |
69ba6af3 | 4538 | |
b2735d9a | 4539 | loop_ivs_free (loop); |
4da896b2 MM |
4540 | if (reg_map) |
4541 | free (reg_map); | |
b4ad7b23 RS |
4542 | } |
4543 | \f | |
5e787f07 | 4544 | /*Record all basic induction variables calculated in the insn. */ |
82ee5e63 | 4545 | static rtx |
5e787f07 JH |
4546 | check_insn_for_bivs (loop, p, not_every_iteration, maybe_multiple) |
4547 | struct loop *loop; | |
4548 | rtx p; | |
4549 | int not_every_iteration; | |
4550 | int maybe_multiple; | |
4551 | { | |
ed5bb68d | 4552 | struct loop_ivs *ivs = LOOP_IVS (loop); |
5e787f07 JH |
4553 | rtx set; |
4554 | rtx dest_reg; | |
4555 | rtx inc_val; | |
4556 | rtx mult_val; | |
4557 | rtx *location; | |
4558 | ||
4559 | if (GET_CODE (p) == INSN | |
4560 | && (set = single_set (p)) | |
4561 | && GET_CODE (SET_DEST (set)) == REG) | |
4562 | { | |
4563 | dest_reg = SET_DEST (set); | |
4564 | if (REGNO (dest_reg) < max_reg_before_loop | |
4565 | && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER | |
ed5bb68d | 4566 | && REG_IV_TYPE (ivs, REGNO (dest_reg)) != NOT_BASIC_INDUCT) |
5e787f07 | 4567 | { |
5e787f07 JH |
4568 | if (basic_induction_var (loop, SET_SRC (set), |
4569 | GET_MODE (SET_SRC (set)), | |
4570 | dest_reg, p, &inc_val, &mult_val, | |
98d1cd45 | 4571 | &location)) |
5e787f07 JH |
4572 | { |
4573 | /* It is a possible basic induction variable. | |
4574 | Create and initialize an induction structure for it. */ | |
4575 | ||
4576 | struct induction *v | |
1f8f4a0b | 4577 | = (struct induction *) xmalloc (sizeof (struct induction)); |
5e787f07 | 4578 | |
ed5bb68d | 4579 | record_biv (loop, v, p, dest_reg, inc_val, mult_val, location, |
98d1cd45 | 4580 | not_every_iteration, maybe_multiple); |
ed5bb68d | 4581 | REG_IV_TYPE (ivs, REGNO (dest_reg)) = BASIC_INDUCT; |
5e787f07 | 4582 | } |
86fee241 | 4583 | else if (REGNO (dest_reg) < ivs->n_regs) |
ed5bb68d | 4584 | REG_IV_TYPE (ivs, REGNO (dest_reg)) = NOT_BASIC_INDUCT; |
5e787f07 JH |
4585 | } |
4586 | } | |
82ee5e63 | 4587 | return p; |
5e787f07 JH |
4588 | } |
4589 | \f | |
e6fcb60d | 4590 | /* Record all givs calculated in the insn. |
5e787f07 JH |
4591 | A register is a giv if: it is only set once, it is a function of a |
4592 | biv and a constant (or invariant), and it is not a biv. */ | |
82ee5e63 | 4593 | static rtx |
5e787f07 JH |
4594 | check_insn_for_givs (loop, p, not_every_iteration, maybe_multiple) |
4595 | struct loop *loop; | |
4596 | rtx p; | |
4597 | int not_every_iteration; | |
4598 | int maybe_multiple; | |
4599 | { | |
1ecd860b | 4600 | struct loop_regs *regs = LOOP_REGS (loop); |
ed5bb68d | 4601 | |
5e787f07 JH |
4602 | rtx set; |
4603 | /* Look for a general induction variable in a register. */ | |
4604 | if (GET_CODE (p) == INSN | |
4605 | && (set = single_set (p)) | |
4606 | && GET_CODE (SET_DEST (set)) == REG | |
f1d4ac80 | 4607 | && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize) |
5e787f07 JH |
4608 | { |
4609 | rtx src_reg; | |
4610 | rtx dest_reg; | |
4611 | rtx add_val; | |
4612 | rtx mult_val; | |
e8cb4873 | 4613 | rtx ext_val; |
5e787f07 JH |
4614 | int benefit; |
4615 | rtx regnote = 0; | |
4616 | rtx last_consec_insn; | |
4617 | ||
4618 | dest_reg = SET_DEST (set); | |
4619 | if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER) | |
82ee5e63 | 4620 | return p; |
5e787f07 JH |
4621 | |
4622 | if (/* SET_SRC is a giv. */ | |
4623 | (general_induction_var (loop, SET_SRC (set), &src_reg, &add_val, | |
e8cb4873 | 4624 | &mult_val, &ext_val, 0, &benefit, VOIDmode) |
5e787f07 JH |
4625 | /* Equivalent expression is a giv. */ |
4626 | || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX)) | |
4627 | && general_induction_var (loop, XEXP (regnote, 0), &src_reg, | |
e8cb4873 | 4628 | &add_val, &mult_val, &ext_val, 0, |
01329426 | 4629 | &benefit, VOIDmode))) |
5e787f07 JH |
4630 | /* Don't try to handle any regs made by loop optimization. |
4631 | We have nothing on them in regno_first_uid, etc. */ | |
4632 | && REGNO (dest_reg) < max_reg_before_loop | |
4633 | /* Don't recognize a BASIC_INDUCT_VAR here. */ | |
4634 | && dest_reg != src_reg | |
4635 | /* This must be the only place where the register is set. */ | |
f1d4ac80 | 4636 | && (regs->array[REGNO (dest_reg)].n_times_set == 1 |
5e787f07 JH |
4637 | /* or all sets must be consecutive and make a giv. */ |
4638 | || (benefit = consec_sets_giv (loop, benefit, p, | |
4639 | src_reg, dest_reg, | |
e8cb4873 | 4640 | &add_val, &mult_val, &ext_val, |
5e787f07 JH |
4641 | &last_consec_insn)))) |
4642 | { | |
4643 | struct induction *v | |
1f8f4a0b | 4644 | = (struct induction *) xmalloc (sizeof (struct induction)); |
5e787f07 JH |
4645 | |
4646 | /* If this is a library call, increase benefit. */ | |
4647 | if (find_reg_note (p, REG_RETVAL, NULL_RTX)) | |
4648 | benefit += libcall_benefit (p); | |
4649 | ||
4650 | /* Skip the consecutive insns, if there are any. */ | |
f1d4ac80 | 4651 | if (regs->array[REGNO (dest_reg)].n_times_set != 1) |
5e787f07 JH |
4652 | p = last_consec_insn; |
4653 | ||
4654 | record_giv (loop, v, p, src_reg, dest_reg, mult_val, add_val, | |
e8cb4873 | 4655 | ext_val, benefit, DEST_REG, not_every_iteration, |
6496a589 | 4656 | maybe_multiple, (rtx*)0); |
5e787f07 JH |
4657 | |
4658 | } | |
4659 | } | |
4660 | ||
4661 | #ifndef DONT_REDUCE_ADDR | |
4662 | /* Look for givs which are memory addresses. */ | |
4663 | /* This resulted in worse code on a VAX 8600. I wonder if it | |
4664 | still does. */ | |
4665 | if (GET_CODE (p) == INSN) | |
4666 | find_mem_givs (loop, PATTERN (p), p, not_every_iteration, | |
4667 | maybe_multiple); | |
4668 | #endif | |
4669 | ||
4670 | /* Update the status of whether giv can derive other givs. This can | |
4671 | change when we pass a label or an insn that updates a biv. */ | |
4672 | if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN | |
e6fcb60d | 4673 | || GET_CODE (p) == CODE_LABEL) |
5e787f07 | 4674 | update_giv_derive (loop, p); |
82ee5e63 | 4675 | return p; |
5e787f07 JH |
4676 | } |
4677 | \f | |
b4ad7b23 RS |
4678 | /* Return 1 if X is a valid source for an initial value (or as value being |
4679 | compared against in an initial test). | |
4680 | ||
4681 | X must be either a register or constant and must not be clobbered between | |
4682 | the current insn and the start of the loop. | |
4683 | ||
4684 | INSN is the insn containing X. */ | |
4685 | ||
4686 | static int | |
4687 | valid_initial_value_p (x, insn, call_seen, loop_start) | |
4688 | rtx x; | |
4689 | rtx insn; | |
4690 | int call_seen; | |
4691 | rtx loop_start; | |
4692 | { | |
4693 | if (CONSTANT_P (x)) | |
4694 | return 1; | |
4695 | ||
d45cf215 | 4696 | /* Only consider pseudos we know about initialized in insns whose luids |
b4ad7b23 RS |
4697 | we know. */ |
4698 | if (GET_CODE (x) != REG | |
4699 | || REGNO (x) >= max_reg_before_loop) | |
4700 | return 0; | |
4701 | ||
4702 | /* Don't use call-clobbered registers across a call which clobbers it. On | |
4703 | some machines, don't use any hard registers at all. */ | |
4704 | if (REGNO (x) < FIRST_PSEUDO_REGISTER | |
e9a25f70 JL |
4705 | && (SMALL_REGISTER_CLASSES |
4706 | || (call_used_regs[REGNO (x)] && call_seen))) | |
b4ad7b23 RS |
4707 | return 0; |
4708 | ||
4709 | /* Don't use registers that have been clobbered before the start of the | |
4710 | loop. */ | |
4711 | if (reg_set_between_p (x, insn, loop_start)) | |
4712 | return 0; | |
4713 | ||
4714 | return 1; | |
4715 | } | |
4716 | \f | |
4717 | /* Scan X for memory refs and check each memory address | |
4718 | as a possible giv. INSN is the insn whose pattern X comes from. | |
4719 | NOT_EVERY_ITERATION is 1 if the insn might not be executed during | |
c5c76735 JL |
4720 | every loop iteration. MAYBE_MULTIPLE is 1 if the insn might be executed |
4721 | more thanonce in each loop iteration. */ | |
b4ad7b23 RS |
4722 | |
4723 | static void | |
0534b804 MH |
4724 | find_mem_givs (loop, x, insn, not_every_iteration, maybe_multiple) |
4725 | const struct loop *loop; | |
b4ad7b23 RS |
4726 | rtx x; |
4727 | rtx insn; | |
c5c76735 | 4728 | int not_every_iteration, maybe_multiple; |
b4ad7b23 RS |
4729 | { |
4730 | register int i, j; | |
4731 | register enum rtx_code code; | |
6f7d635c | 4732 | register const char *fmt; |
b4ad7b23 RS |
4733 | |
4734 | if (x == 0) | |
4735 | return; | |
4736 | ||
4737 | code = GET_CODE (x); | |
4738 | switch (code) | |
4739 | { | |
4740 | case REG: | |
4741 | case CONST_INT: | |
4742 | case CONST: | |
4743 | case CONST_DOUBLE: | |
4744 | case SYMBOL_REF: | |
4745 | case LABEL_REF: | |
4746 | case PC: | |
4747 | case CC0: | |
4748 | case ADDR_VEC: | |
4749 | case ADDR_DIFF_VEC: | |
4750 | case USE: | |
4751 | case CLOBBER: | |
4752 | return; | |
4753 | ||
4754 | case MEM: | |
4755 | { | |
4756 | rtx src_reg; | |
4757 | rtx add_val; | |
4758 | rtx mult_val; | |
e8cb4873 | 4759 | rtx ext_val; |
b4ad7b23 RS |
4760 | int benefit; |
4761 | ||
45f97e2e | 4762 | /* This code used to disable creating GIVs with mult_val == 1 and |
e6fcb60d | 4763 | add_val == 0. However, this leads to lost optimizations when |
45f97e2e RH |
4764 | it comes time to combine a set of related DEST_ADDR GIVs, since |
4765 | this one would not be seen. */ | |
b4ad7b23 | 4766 | |
0534b804 | 4767 | if (general_induction_var (loop, XEXP (x, 0), &src_reg, &add_val, |
e8cb4873 RH |
4768 | &mult_val, &ext_val, 1, &benefit, |
4769 | GET_MODE (x))) | |
b4ad7b23 RS |
4770 | { |
4771 | /* Found one; record it. */ | |
4772 | struct induction *v | |
1f8f4a0b | 4773 | = (struct induction *) xmalloc (sizeof (struct induction)); |
b4ad7b23 | 4774 | |
0534b804 | 4775 | record_giv (loop, v, insn, src_reg, addr_placeholder, mult_val, |
e8cb4873 RH |
4776 | add_val, ext_val, benefit, DEST_ADDR, |
4777 | not_every_iteration, maybe_multiple, &XEXP (x, 0)); | |
b4ad7b23 | 4778 | |
099f0f3f | 4779 | v->mem = x; |
b4ad7b23 | 4780 | } |
b4ad7b23 | 4781 | } |
e9a25f70 JL |
4782 | return; |
4783 | ||
4784 | default: | |
4785 | break; | |
b4ad7b23 RS |
4786 | } |
4787 | ||
4788 | /* Recursively scan the subexpressions for other mem refs. */ | |
4789 | ||
4790 | fmt = GET_RTX_FORMAT (code); | |
4791 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) | |
4792 | if (fmt[i] == 'e') | |
0534b804 MH |
4793 | find_mem_givs (loop, XEXP (x, i), insn, not_every_iteration, |
4794 | maybe_multiple); | |
b4ad7b23 RS |
4795 | else if (fmt[i] == 'E') |
4796 | for (j = 0; j < XVECLEN (x, i); j++) | |
0534b804 MH |
4797 | find_mem_givs (loop, XVECEXP (x, i, j), insn, not_every_iteration, |
4798 | maybe_multiple); | |
b4ad7b23 RS |
4799 | } |
4800 | \f | |
4801 | /* Fill in the data about one biv update. | |
4802 | V is the `struct induction' in which we record the biv. (It is | |
4803 | allocated by the caller, with alloca.) | |
4804 | INSN is the insn that sets it. | |
4805 | DEST_REG is the biv's reg. | |
4806 | ||
4807 | MULT_VAL is const1_rtx if the biv is being incremented here, in which case | |
4808 | INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is | |
7dcd3836 RK |
4809 | being set to INC_VAL. |
4810 | ||
4811 | NOT_EVERY_ITERATION is nonzero if this biv update is not know to be | |
4812 | executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update | |
4813 | can be executed more than once per iteration. If MAYBE_MULTIPLE | |
4814 | and NOT_EVERY_ITERATION are both zero, we know that the biv update is | |
4815 | executed exactly once per iteration. */ | |
b4ad7b23 RS |
4816 | |
4817 | static void | |
ed5bb68d | 4818 | record_biv (loop, v, insn, dest_reg, inc_val, mult_val, location, |
98d1cd45 | 4819 | not_every_iteration, maybe_multiple) |
ed5bb68d | 4820 | struct loop *loop; |
b4ad7b23 RS |
4821 | struct induction *v; |
4822 | rtx insn; | |
4823 | rtx dest_reg; | |
4824 | rtx inc_val; | |
4825 | rtx mult_val; | |
3ec2b590 | 4826 | rtx *location; |
b4ad7b23 | 4827 | int not_every_iteration; |
7dcd3836 | 4828 | int maybe_multiple; |
b4ad7b23 | 4829 | { |
ed5bb68d | 4830 | struct loop_ivs *ivs = LOOP_IVS (loop); |
b4ad7b23 RS |
4831 | struct iv_class *bl; |
4832 | ||
4833 | v->insn = insn; | |
4834 | v->src_reg = dest_reg; | |
4835 | v->dest_reg = dest_reg; | |
4836 | v->mult_val = mult_val; | |
4837 | v->add_val = inc_val; | |
e8cb4873 | 4838 | v->ext_dependant = NULL_RTX; |
3ec2b590 | 4839 | v->location = location; |
b4ad7b23 RS |
4840 | v->mode = GET_MODE (dest_reg); |
4841 | v->always_computable = ! not_every_iteration; | |
8516af93 | 4842 | v->always_executed = ! not_every_iteration; |
7dcd3836 | 4843 | v->maybe_multiple = maybe_multiple; |
b4ad7b23 RS |
4844 | |
4845 | /* Add this to the reg's iv_class, creating a class | |
4846 | if this is the first incrementation of the reg. */ | |
4847 | ||
8b634749 | 4848 | bl = REG_IV_CLASS (ivs, REGNO (dest_reg)); |
b4ad7b23 RS |
4849 | if (bl == 0) |
4850 | { | |
4851 | /* Create and initialize new iv_class. */ | |
4852 | ||
1f8f4a0b | 4853 | bl = (struct iv_class *) xmalloc (sizeof (struct iv_class)); |
b4ad7b23 RS |
4854 | |
4855 | bl->regno = REGNO (dest_reg); | |
4856 | bl->biv = 0; | |
4857 | bl->giv = 0; | |
4858 | bl->biv_count = 0; | |
4859 | bl->giv_count = 0; | |
4860 | ||
4861 | /* Set initial value to the reg itself. */ | |
4862 | bl->initial_value = dest_reg; | |
e304a8e6 | 4863 | bl->final_value = 0; |
c5b7917e | 4864 | /* We haven't seen the initializing insn yet */ |
b4ad7b23 RS |
4865 | bl->init_insn = 0; |
4866 | bl->init_set = 0; | |
4867 | bl->initial_test = 0; | |
4868 | bl->incremented = 0; | |
4869 | bl->eliminable = 0; | |
4870 | bl->nonneg = 0; | |
4871 | bl->reversed = 0; | |
b5d27be7 | 4872 | bl->total_benefit = 0; |
b4ad7b23 | 4873 | |
14be28e5 MH |
4874 | /* Add this class to ivs->list. */ |
4875 | bl->next = ivs->list; | |
4876 | ivs->list = bl; | |
b4ad7b23 RS |
4877 | |
4878 | /* Put it in the array of biv register classes. */ | |
8b634749 | 4879 | REG_IV_CLASS (ivs, REGNO (dest_reg)) = bl; |
b4ad7b23 RS |
4880 | } |
4881 | ||
4882 | /* Update IV_CLASS entry for this biv. */ | |
4883 | v->next_iv = bl->biv; | |
4884 | bl->biv = v; | |
4885 | bl->biv_count++; | |
4886 | if (mult_val == const1_rtx) | |
4887 | bl->incremented = 1; | |
4888 | ||
4889 | if (loop_dump_stream) | |
c804f3f8 | 4890 | loop_biv_dump (v, loop_dump_stream, 0); |
b4ad7b23 RS |
4891 | } |
4892 | \f | |
4893 | /* Fill in the data about one giv. | |
4894 | V is the `struct induction' in which we record the giv. (It is | |
4895 | allocated by the caller, with alloca.) | |
4896 | INSN is the insn that sets it. | |
4897 | BENEFIT estimates the savings from deleting this insn. | |
4898 | TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed | |
4899 | into a register or is used as a memory address. | |
4900 | ||
4901 | SRC_REG is the biv reg which the giv is computed from. | |
4902 | DEST_REG is the giv's reg (if the giv is stored in a reg). | |
4903 | MULT_VAL and ADD_VAL are the coefficients used to compute the giv. | |
4904 | LOCATION points to the place where this giv's value appears in INSN. */ | |
4905 | ||
4906 | static void | |
e8cb4873 RH |
4907 | record_giv (loop, v, insn, src_reg, dest_reg, mult_val, add_val, ext_val, |
4908 | benefit, type, not_every_iteration, maybe_multiple, location) | |
0534b804 | 4909 | const struct loop *loop; |
b4ad7b23 RS |
4910 | struct induction *v; |
4911 | rtx insn; | |
4912 | rtx src_reg; | |
4913 | rtx dest_reg; | |
e8cb4873 | 4914 | rtx mult_val, add_val, ext_val; |
b4ad7b23 RS |
4915 | int benefit; |
4916 | enum g_types type; | |
c5c76735 | 4917 | int not_every_iteration, maybe_multiple; |
b4ad7b23 | 4918 | rtx *location; |
b4ad7b23 | 4919 | { |
ed5bb68d | 4920 | struct loop_ivs *ivs = LOOP_IVS (loop); |
b4ad7b23 RS |
4921 | struct induction *b; |
4922 | struct iv_class *bl; | |
4923 | rtx set = single_set (insn); | |
ce7de04c JH |
4924 | rtx temp; |
4925 | ||
4926 | /* Attempt to prove constantness of the values. */ | |
4927 | temp = simplify_rtx (add_val); | |
4928 | if (temp) | |
4929 | add_val = temp; | |
b4ad7b23 RS |
4930 | |
4931 | v->insn = insn; | |
4932 | v->src_reg = src_reg; | |
4933 | v->giv_type = type; | |
4934 | v->dest_reg = dest_reg; | |
4935 | v->mult_val = mult_val; | |
4936 | v->add_val = add_val; | |
e8cb4873 | 4937 | v->ext_dependant = ext_val; |
b4ad7b23 RS |
4938 | v->benefit = benefit; |
4939 | v->location = location; | |
4940 | v->cant_derive = 0; | |
4941 | v->combined_with = 0; | |
c5c76735 | 4942 | v->maybe_multiple = maybe_multiple; |
b4ad7b23 RS |
4943 | v->maybe_dead = 0; |
4944 | v->derive_adjustment = 0; | |
4945 | v->same = 0; | |
4946 | v->ignore = 0; | |
4947 | v->new_reg = 0; | |
4948 | v->final_value = 0; | |
f415f7be | 4949 | v->same_insn = 0; |
8516af93 | 4950 | v->auto_inc_opt = 0; |
9ae8ffe7 JL |
4951 | v->unrolled = 0; |
4952 | v->shared = 0; | |
b4ad7b23 RS |
4953 | |
4954 | /* The v->always_computable field is used in update_giv_derive, to | |
4955 | determine whether a giv can be used to derive another giv. For a | |
4956 | DEST_REG giv, INSN computes a new value for the giv, so its value | |
4957 | isn't computable if INSN insn't executed every iteration. | |
4958 | However, for a DEST_ADDR giv, INSN merely uses the value of the giv; | |
4959 | it does not compute a new value. Hence the value is always computable | |
d45cf215 | 4960 | regardless of whether INSN is executed each iteration. */ |
b4ad7b23 RS |
4961 | |
4962 | if (type == DEST_ADDR) | |
4963 | v->always_computable = 1; | |
4964 | else | |
4965 | v->always_computable = ! not_every_iteration; | |
4966 | ||
8516af93 JW |
4967 | v->always_executed = ! not_every_iteration; |
4968 | ||
b4ad7b23 RS |
4969 | if (type == DEST_ADDR) |
4970 | { | |
4971 | v->mode = GET_MODE (*location); | |
4972 | v->lifetime = 1; | |
b4ad7b23 RS |
4973 | } |
4974 | else /* type == DEST_REG */ | |
4975 | { | |
4976 | v->mode = GET_MODE (SET_DEST (set)); | |
4977 | ||
b8056b46 | 4978 | v->lifetime = LOOP_REG_LIFETIME (loop, REGNO (dest_reg)); |
b4ad7b23 | 4979 | |
b4ad7b23 RS |
4980 | /* If the lifetime is zero, it means that this register is |
4981 | really a dead store. So mark this as a giv that can be | |
0f41302f | 4982 | ignored. This will not prevent the biv from being eliminated. */ |
b4ad7b23 RS |
4983 | if (v->lifetime == 0) |
4984 | v->ignore = 1; | |
4985 | ||
ed5bb68d MH |
4986 | REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT; |
4987 | REG_IV_INFO (ivs, REGNO (dest_reg)) = v; | |
b4ad7b23 RS |
4988 | } |
4989 | ||
4990 | /* Add the giv to the class of givs computed from one biv. */ | |
4991 | ||
8b634749 | 4992 | bl = REG_IV_CLASS (ivs, REGNO (src_reg)); |
b4ad7b23 RS |
4993 | if (bl) |
4994 | { | |
4995 | v->next_iv = bl->giv; | |
4996 | bl->giv = v; | |
4997 | /* Don't count DEST_ADDR. This is supposed to count the number of | |
4998 | insns that calculate givs. */ | |
4999 | if (type == DEST_REG) | |
5000 | bl->giv_count++; | |
5001 | bl->total_benefit += benefit; | |
5002 | } | |
5003 | else | |
5004 | /* Fatal error, biv missing for this giv? */ | |
5005 | abort (); | |
5006 | ||
5007 | if (type == DEST_ADDR) | |
5008 | v->replaceable = 1; | |
5009 | else | |
5010 | { | |
5011 | /* The giv can be replaced outright by the reduced register only if all | |
5012 | of the following conditions are true: | |
5013 | - the insn that sets the giv is always executed on any iteration | |
5014 | on which the giv is used at all | |
5015 | (there are two ways to deduce this: | |
5016 | either the insn is executed on every iteration, | |
5017 | or all uses follow that insn in the same basic block), | |
5018 | - the giv is not used outside the loop | |
5019 | - no assignments to the biv occur during the giv's lifetime. */ | |
5020 | ||
b1f21e0a | 5021 | if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn) |
b4ad7b23 | 5022 | /* Previous line always fails if INSN was moved by loop opt. */ |
8529a489 | 5023 | && REGNO_LAST_LUID (REGNO (dest_reg)) |
0534b804 | 5024 | < INSN_LUID (loop->end) |
b4ad7b23 RS |
5025 | && (! not_every_iteration |
5026 | || last_use_this_basic_block (dest_reg, insn))) | |
e6fcb60d | 5027 | { |
b4ad7b23 RS |
5028 | /* Now check that there are no assignments to the biv within the |
5029 | giv's lifetime. This requires two separate checks. */ | |
5030 | ||
5031 | /* Check each biv update, and fail if any are between the first | |
5032 | and last use of the giv. | |
e6fcb60d | 5033 | |
b4ad7b23 RS |
5034 | If this loop contains an inner loop that was unrolled, then |
5035 | the insn modifying the biv may have been emitted by the loop | |
5036 | unrolling code, and hence does not have a valid luid. Just | |
5037 | mark the biv as not replaceable in this case. It is not very | |
5038 | useful as a biv, because it is used in two different loops. | |
5039 | It is very unlikely that we would be able to optimize the giv | |
5040 | using this biv anyways. */ | |
5041 | ||
5042 | v->replaceable = 1; | |
5043 | for (b = bl->biv; b; b = b->next_iv) | |
5044 | { | |
5045 | if (INSN_UID (b->insn) >= max_uid_for_loop | |
8529a489 MH |
5046 | || ((INSN_LUID (b->insn) |
5047 | >= REGNO_FIRST_LUID (REGNO (dest_reg))) | |
5048 | && (INSN_LUID (b->insn) | |
5049 | <= REGNO_LAST_LUID (REGNO (dest_reg))))) | |
b4ad7b23 RS |
5050 | { |
5051 | v->replaceable = 0; | |
5052 | v->not_replaceable = 1; | |
5053 | break; | |
e6fcb60d | 5054 | } |
b4ad7b23 RS |
5055 | } |
5056 | ||
5031afa7 JW |
5057 | /* If there are any backwards branches that go from after the |
5058 | biv update to before it, then this giv is not replaceable. */ | |
b4ad7b23 | 5059 | if (v->replaceable) |
5031afa7 | 5060 | for (b = bl->biv; b; b = b->next_iv) |
0534b804 | 5061 | if (back_branch_in_range_p (loop, b->insn)) |
5031afa7 JW |
5062 | { |
5063 | v->replaceable = 0; | |
5064 | v->not_replaceable = 1; | |
5065 | break; | |
5066 | } | |
b4ad7b23 RS |
5067 | } |
5068 | else | |
5069 | { | |
5070 | /* May still be replaceable, we don't have enough info here to | |
5071 | decide. */ | |
5072 | v->replaceable = 0; | |
5073 | v->not_replaceable = 0; | |
5074 | } | |
5075 | } | |
5076 | ||
45f97e2e RH |
5077 | /* Record whether the add_val contains a const_int, for later use by |
5078 | combine_givs. */ | |
5079 | { | |
5080 | rtx tem = add_val; | |
5081 | ||
5082 | v->no_const_addval = 1; | |
5083 | if (tem == const0_rtx) | |
5084 | ; | |
ce7de04c | 5085 | else if (CONSTANT_P (add_val)) |
45f97e2e | 5086 | v->no_const_addval = 0; |
ce7de04c | 5087 | if (GET_CODE (tem) == PLUS) |
45f97e2e | 5088 | { |
ce7de04c | 5089 | while (1) |
45f97e2e RH |
5090 | { |
5091 | if (GET_CODE (XEXP (tem, 0)) == PLUS) | |
5092 | tem = XEXP (tem, 0); | |
5093 | else if (GET_CODE (XEXP (tem, 1)) == PLUS) | |
5094 | tem = XEXP (tem, 1); | |
5095 | else | |
5096 | break; | |
5097 | } | |
ce7de04c JH |
5098 | if (CONSTANT_P (XEXP (tem, 1))) |
5099 | v->no_const_addval = 0; | |
45f97e2e RH |
5100 | } |
5101 | } | |
5102 | ||
b4ad7b23 | 5103 | if (loop_dump_stream) |
c804f3f8 | 5104 | loop_giv_dump (v, loop_dump_stream, 0); |
b4ad7b23 RS |
5105 | } |
5106 | ||
b4ad7b23 RS |
5107 | /* All this does is determine whether a giv can be made replaceable because |
5108 | its final value can be calculated. This code can not be part of record_giv | |
5109 | above, because final_giv_value requires that the number of loop iterations | |
5110 | be known, and that can not be accurately calculated until after all givs | |
5111 | have been identified. */ | |
5112 | ||
5113 | static void | |
0534b804 MH |
5114 | check_final_value (loop, v) |
5115 | const struct loop *loop; | |
b4ad7b23 | 5116 | struct induction *v; |
b4ad7b23 | 5117 | { |
ed5bb68d | 5118 | struct loop_ivs *ivs = LOOP_IVS (loop); |
b4ad7b23 RS |
5119 | struct iv_class *bl; |
5120 | rtx final_value = 0; | |
b4ad7b23 | 5121 | |
8b634749 | 5122 | bl = REG_IV_CLASS (ivs, REGNO (v->src_reg)); |
b4ad7b23 RS |
5123 | |
5124 | /* DEST_ADDR givs will never reach here, because they are always marked | |
5125 | replaceable above in record_giv. */ | |
5126 | ||
5127 | /* The giv can be replaced outright by the reduced register only if all | |
5128 | of the following conditions are true: | |
5129 | - the insn that sets the giv is always executed on any iteration | |
5130 | on which the giv is used at all | |
5131 | (there are two ways to deduce this: | |
5132 | either the insn is executed on every iteration, | |
5133 | or all uses follow that insn in the same basic block), | |
5134 | - its final value can be calculated (this condition is different | |
5135 | than the one above in record_giv) | |
70dd0f7f | 5136 | - it's not used before the it's set |
b4ad7b23 RS |
5137 | - no assignments to the biv occur during the giv's lifetime. */ |
5138 | ||
5139 | #if 0 | |
5140 | /* This is only called now when replaceable is known to be false. */ | |
5141 | /* Clear replaceable, so that it won't confuse final_giv_value. */ | |
5142 | v->replaceable = 0; | |
5143 | #endif | |
5144 | ||
0534b804 | 5145 | if ((final_value = final_giv_value (loop, v)) |
b4ad7b23 RS |
5146 | && (v->always_computable || last_use_this_basic_block (v->dest_reg, v->insn))) |
5147 | { | |
70dd0f7f | 5148 | int biv_increment_seen = 0, before_giv_insn = 0; |
b4ad7b23 RS |
5149 | rtx p = v->insn; |
5150 | rtx last_giv_use; | |
5151 | ||
5152 | v->replaceable = 1; | |
5153 | ||
5154 | /* When trying to determine whether or not a biv increment occurs | |
5155 | during the lifetime of the giv, we can ignore uses of the variable | |
5156 | outside the loop because final_value is true. Hence we can not | |
5157 | use regno_last_uid and regno_first_uid as above in record_giv. */ | |
5158 | ||
5159 | /* Search the loop to determine whether any assignments to the | |
5160 | biv occur during the giv's lifetime. Start with the insn | |
5161 | that sets the giv, and search around the loop until we come | |
5162 | back to that insn again. | |
5163 | ||
5164 | Also fail if there is a jump within the giv's lifetime that jumps | |
5165 | to somewhere outside the lifetime but still within the loop. This | |
5166 | catches spaghetti code where the execution order is not linear, and | |
5167 | hence the above test fails. Here we assume that the giv lifetime | |
5168 | does not extend from one iteration of the loop to the next, so as | |
5169 | to make the test easier. Since the lifetime isn't known yet, | |
5170 | this requires two loops. See also record_giv above. */ | |
5171 | ||
5172 | last_giv_use = v->insn; | |
5173 | ||
5174 | while (1) | |
5175 | { | |
5176 | p = NEXT_INSN (p); | |
0534b804 | 5177 | if (p == loop->end) |
70dd0f7f FS |
5178 | { |
5179 | before_giv_insn = 1; | |
5180 | p = NEXT_INSN (loop->start); | |
5181 | } | |
b4ad7b23 RS |
5182 | if (p == v->insn) |
5183 | break; | |
5184 | ||
5185 | if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN | |
5186 | || GET_CODE (p) == CALL_INSN) | |
5187 | { | |
8a09bb27 JW |
5188 | /* It is possible for the BIV increment to use the GIV if we |
5189 | have a cycle. Thus we must be sure to check each insn for | |
5190 | both BIV and GIV uses, and we must check for BIV uses | |
5191 | first. */ | |
5192 | ||
5193 | if (! biv_increment_seen | |
5194 | && reg_set_p (v->src_reg, PATTERN (p))) | |
5195 | biv_increment_seen = 1; | |
fd5d5b07 | 5196 | |
8a09bb27 | 5197 | if (reg_mentioned_p (v->dest_reg, PATTERN (p))) |
b4ad7b23 | 5198 | { |
70dd0f7f | 5199 | if (biv_increment_seen || before_giv_insn) |
b4ad7b23 RS |
5200 | { |
5201 | v->replaceable = 0; | |
5202 | v->not_replaceable = 1; | |
5203 | break; | |
5204 | } | |
8a09bb27 | 5205 | last_giv_use = p; |
b4ad7b23 | 5206 | } |
b4ad7b23 RS |
5207 | } |
5208 | } | |
e6fcb60d | 5209 | |
b4ad7b23 RS |
5210 | /* Now that the lifetime of the giv is known, check for branches |
5211 | from within the lifetime to outside the lifetime if it is still | |
5212 | replaceable. */ | |
5213 | ||
5214 | if (v->replaceable) | |
5215 | { | |
5216 | p = v->insn; | |
5217 | while (1) | |
5218 | { | |
5219 | p = NEXT_INSN (p); | |
0534b804 MH |
5220 | if (p == loop->end) |
5221 | p = NEXT_INSN (loop->start); | |
b4ad7b23 RS |
5222 | if (p == last_giv_use) |
5223 | break; | |
5224 | ||
5225 | if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) | |
5226 | && LABEL_NAME (JUMP_LABEL (p)) | |
1cb1fe66 | 5227 | && ((loop_insn_first_p (JUMP_LABEL (p), v->insn) |
0534b804 | 5228 | && loop_insn_first_p (loop->start, JUMP_LABEL (p))) |
1cb1fe66 | 5229 | || (loop_insn_first_p (last_giv_use, JUMP_LABEL (p)) |
0534b804 | 5230 | && loop_insn_first_p (JUMP_LABEL (p), loop->end)))) |
b4ad7b23 RS |
5231 | { |
5232 | v->replaceable = 0; | |
5233 | v->not_replaceable = 1; | |
5234 | ||
5235 | if (loop_dump_stream) | |
5236 | fprintf (loop_dump_stream, | |
5237 | "Found branch outside giv lifetime.\n"); | |
5238 | ||
5239 | break; | |
5240 | } | |
5241 | } | |
5242 | } | |
5243 | ||
5244 | /* If it is replaceable, then save the final value. */ | |
5245 | if (v->replaceable) | |
5246 | v->final_value = final_value; | |
5247 | } | |
5248 | ||
5249 | if (loop_dump_stream && v->replaceable) | |
5250 | fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n", | |
5251 | INSN_UID (v->insn), REGNO (v->dest_reg)); | |
5252 | } | |
5253 | \f | |
5254 | /* Update the status of whether a giv can derive other givs. | |
5255 | ||
5256 | We need to do something special if there is or may be an update to the biv | |
5257 | between the time the giv is defined and the time it is used to derive | |
5258 | another giv. | |
5259 | ||
5260 | In addition, a giv that is only conditionally set is not allowed to | |
5261 | derive another giv once a label has been passed. | |
5262 | ||
5263 | The cases we look at are when a label or an update to a biv is passed. */ | |
5264 | ||
5265 | static void | |
0534b804 | 5266 | update_giv_derive (loop, p) |
e6fcb60d | 5267 | const struct loop *loop; |
b4ad7b23 RS |
5268 | rtx p; |
5269 | { | |
ed5bb68d | 5270 | struct loop_ivs *ivs = LOOP_IVS (loop); |
b4ad7b23 RS |
5271 | struct iv_class *bl; |
5272 | struct induction *biv, *giv; | |
5273 | rtx tem; | |
5274 | int dummy; | |
5275 | ||
5276 | /* Search all IV classes, then all bivs, and finally all givs. | |
5277 | ||
7dcd3836 | 5278 | There are three cases we are concerned with. First we have the situation |
b4ad7b23 RS |
5279 | of a giv that is only updated conditionally. In that case, it may not |
5280 | derive any givs after a label is passed. | |
5281 | ||
5282 | The second case is when a biv update occurs, or may occur, after the | |
5283 | definition of a giv. For certain biv updates (see below) that are | |
5284 | known to occur between the giv definition and use, we can adjust the | |
5285 | giv definition. For others, or when the biv update is conditional, | |
5286 | we must prevent the giv from deriving any other givs. There are two | |
5287 | sub-cases within this case. | |
5288 | ||
5289 | If this is a label, we are concerned with any biv update that is done | |
5290 | conditionally, since it may be done after the giv is defined followed by | |
5291 | a branch here (actually, we need to pass both a jump and a label, but | |
5292 | this extra tracking doesn't seem worth it). | |
5293 | ||
7dcd3836 RK |
5294 | If this is a jump, we are concerned about any biv update that may be |
5295 | executed multiple times. We are actually only concerned about | |
5296 | backward jumps, but it is probably not worth performing the test | |
5297 | on the jump again here. | |
5298 | ||
5299 | If this is a biv update, we must adjust the giv status to show that a | |
b4ad7b23 RS |
5300 | subsequent biv update was performed. If this adjustment cannot be done, |
5301 | the giv cannot derive further givs. */ | |
5302 | ||
14be28e5 | 5303 | for (bl = ivs->list; bl; bl = bl->next) |
b4ad7b23 | 5304 | for (biv = bl->biv; biv; biv = biv->next_iv) |
7dcd3836 RK |
5305 | if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN |
5306 | || biv->insn == p) | |
b4ad7b23 RS |
5307 | { |
5308 | for (giv = bl->giv; giv; giv = giv->next_iv) | |
5309 | { | |
5310 | /* If cant_derive is already true, there is no point in | |
5311 | checking all of these conditions again. */ | |
5312 | if (giv->cant_derive) | |
5313 | continue; | |
5314 | ||
5315 | /* If this giv is conditionally set and we have passed a label, | |
5316 | it cannot derive anything. */ | |
5317 | if (GET_CODE (p) == CODE_LABEL && ! giv->always_computable) | |
5318 | giv->cant_derive = 1; | |
5319 | ||
5320 | /* Skip givs that have mult_val == 0, since | |
5321 | they are really invariants. Also skip those that are | |
5322 | replaceable, since we know their lifetime doesn't contain | |
5323 | any biv update. */ | |
5324 | else if (giv->mult_val == const0_rtx || giv->replaceable) | |
5325 | continue; | |
5326 | ||
5327 | /* The only way we can allow this giv to derive another | |
5328 | is if this is a biv increment and we can form the product | |
5329 | of biv->add_val and giv->mult_val. In this case, we will | |
5330 | be able to compute a compensation. */ | |
5331 | else if (biv->insn == p) | |
5332 | { | |
e8cb4873 | 5333 | rtx ext_val_dummy; |
c160c628 | 5334 | |
e8cb4873 | 5335 | tem = 0; |
c160c628 | 5336 | if (biv->mult_val == const1_rtx) |
0534b804 MH |
5337 | tem = simplify_giv_expr (loop, |
5338 | gen_rtx_MULT (giv->mode, | |
38a448ca RH |
5339 | biv->add_val, |
5340 | giv->mult_val), | |
e8cb4873 | 5341 | &ext_val_dummy, &dummy); |
c160c628 RK |
5342 | |
5343 | if (tem && giv->derive_adjustment) | |
c5c76735 | 5344 | tem = simplify_giv_expr |
0534b804 MH |
5345 | (loop, |
5346 | gen_rtx_PLUS (giv->mode, tem, giv->derive_adjustment), | |
e8cb4873 | 5347 | &ext_val_dummy, &dummy); |
c5c76735 | 5348 | |
c160c628 | 5349 | if (tem) |
b4ad7b23 RS |
5350 | giv->derive_adjustment = tem; |
5351 | else | |
5352 | giv->cant_derive = 1; | |
5353 | } | |
7dcd3836 RK |
5354 | else if ((GET_CODE (p) == CODE_LABEL && ! biv->always_computable) |
5355 | || (GET_CODE (p) == JUMP_INSN && biv->maybe_multiple)) | |
b4ad7b23 RS |
5356 | giv->cant_derive = 1; |
5357 | } | |
5358 | } | |
5359 | } | |
5360 | \f | |
5361 | /* Check whether an insn is an increment legitimate for a basic induction var. | |
7056f7e8 RS |
5362 | X is the source of insn P, or a part of it. |
5363 | MODE is the mode in which X should be interpreted. | |
5364 | ||
b4ad7b23 RS |
5365 | DEST_REG is the putative biv, also the destination of the insn. |
5366 | We accept patterns of these forms: | |
09d7f5a5 | 5367 | REG = REG + INVARIANT (includes REG = REG - CONSTANT) |
b4ad7b23 | 5368 | REG = INVARIANT + REG |
b4ad7b23 RS |
5369 | |
5370 | If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX, | |
3ec2b590 R |
5371 | store the additive term into *INC_VAL, and store the place where |
5372 | we found the additive term into *LOCATION. | |
b4ad7b23 RS |
5373 | |
5374 | If X is an assignment of an invariant into DEST_REG, we set | |
5375 | *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL. | |
5376 | ||
09d7f5a5 RK |
5377 | We also want to detect a BIV when it corresponds to a variable |
5378 | whose mode was promoted via PROMOTED_MODE. In that case, an increment | |
5379 | of the variable may be a PLUS that adds a SUBREG of that variable to | |
5380 | an invariant and then sign- or zero-extends the result of the PLUS | |
5381 | into the variable. | |
5382 | ||
5383 | Most GIVs in such cases will be in the promoted mode, since that is the | |
5384 | probably the natural computation mode (and almost certainly the mode | |
5385 | used for addresses) on the machine. So we view the pseudo-reg containing | |
5386 | the variable as the BIV, as if it were simply incremented. | |
5387 | ||
5388 | Note that treating the entire pseudo as a BIV will result in making | |
5389 | simple increments to any GIVs based on it. However, if the variable | |
5390 | overflows in its declared mode but not its promoted mode, the result will | |
e6fcb60d | 5391 | be incorrect. This is acceptable if the variable is signed, since |
09d7f5a5 RK |
5392 | overflows in such cases are undefined, but not if it is unsigned, since |
5393 | those overflows are defined. So we only check for SIGN_EXTEND and | |
5394 | not ZERO_EXTEND. | |
5395 | ||
5396 | If we cannot find a biv, we return 0. */ | |
b4ad7b23 RS |
5397 | |
5398 | static int | |
98d1cd45 | 5399 | basic_induction_var (loop, x, mode, dest_reg, p, inc_val, mult_val, location) |
0534b804 | 5400 | const struct loop *loop; |
b4ad7b23 | 5401 | register rtx x; |
7056f7e8 | 5402 | enum machine_mode mode; |
b4ad7b23 | 5403 | rtx dest_reg; |
a2be868f | 5404 | rtx p; |
b4ad7b23 RS |
5405 | rtx *inc_val; |
5406 | rtx *mult_val; | |
3ec2b590 | 5407 | rtx **location; |
b4ad7b23 RS |
5408 | { |
5409 | register enum rtx_code code; | |
3ec2b590 | 5410 | rtx *argp, arg; |
09d7f5a5 | 5411 | rtx insn, set = 0; |
b4ad7b23 RS |
5412 | |
5413 | code = GET_CODE (x); | |
69ba6af3 | 5414 | *location = NULL; |
b4ad7b23 RS |
5415 | switch (code) |
5416 | { | |
5417 | case PLUS: | |
45f97e2e | 5418 | if (rtx_equal_p (XEXP (x, 0), dest_reg) |
09d7f5a5 RK |
5419 | || (GET_CODE (XEXP (x, 0)) == SUBREG |
5420 | && SUBREG_PROMOTED_VAR_P (XEXP (x, 0)) | |
5421 | && SUBREG_REG (XEXP (x, 0)) == dest_reg)) | |
3ec2b590 R |
5422 | { |
5423 | argp = &XEXP (x, 1); | |
5424 | } | |
45f97e2e | 5425 | else if (rtx_equal_p (XEXP (x, 1), dest_reg) |
09d7f5a5 | 5426 | || (GET_CODE (XEXP (x, 1)) == SUBREG |
b81fd0f4 RS |
5427 | && SUBREG_PROMOTED_VAR_P (XEXP (x, 1)) |
5428 | && SUBREG_REG (XEXP (x, 1)) == dest_reg)) | |
3ec2b590 R |
5429 | { |
5430 | argp = &XEXP (x, 0); | |
5431 | } | |
b4ad7b23 | 5432 | else |
e6fcb60d | 5433 | return 0; |
b4ad7b23 | 5434 | |
3ec2b590 | 5435 | arg = *argp; |
0534b804 | 5436 | if (loop_invariant_p (loop, arg) != 1) |
b4ad7b23 RS |
5437 | return 0; |
5438 | ||
7056f7e8 | 5439 | *inc_val = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0); |
b4ad7b23 | 5440 | *mult_val = const1_rtx; |
3ec2b590 | 5441 | *location = argp; |
b4ad7b23 RS |
5442 | return 1; |
5443 | ||
09d7f5a5 RK |
5444 | case SUBREG: |
5445 | /* If this is a SUBREG for a promoted variable, check the inner | |
5446 | value. */ | |
5447 | if (SUBREG_PROMOTED_VAR_P (x)) | |
0534b804 MH |
5448 | return basic_induction_var (loop, SUBREG_REG (x), |
5449 | GET_MODE (SUBREG_REG (x)), | |
98d1cd45 | 5450 | dest_reg, p, inc_val, mult_val, location); |
fe159061 | 5451 | return 0; |
b4ad7b23 | 5452 | |
09d7f5a5 | 5453 | case REG: |
45f97e2e | 5454 | /* If this register is assigned in a previous insn, look at its |
09d7f5a5 RK |
5455 | source, but don't go outside the loop or past a label. */ |
5456 | ||
af198097 R |
5457 | /* If this sets a register to itself, we would repeat any previous |
5458 | biv increment if we applied this strategy blindly. */ | |
5459 | if (rtx_equal_p (dest_reg, x)) | |
5460 | return 0; | |
5461 | ||
45f97e2e RH |
5462 | insn = p; |
5463 | while (1) | |
5464 | { | |
7dbe6ae9 | 5465 | rtx dest; |
e6fcb60d KH |
5466 | do |
5467 | { | |
5468 | insn = PREV_INSN (insn); | |
5469 | } | |
5470 | while (insn && GET_CODE (insn) == NOTE | |
5471 | && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG); | |
09d7f5a5 | 5472 | |
e6fcb60d | 5473 | if (!insn) |
45f97e2e RH |
5474 | break; |
5475 | set = single_set (insn); | |
5476 | if (set == 0) | |
5477 | break; | |
7dbe6ae9 BS |
5478 | dest = SET_DEST (set); |
5479 | if (dest == x | |
5480 | || (GET_CODE (dest) == SUBREG | |
5481 | && (GET_MODE_SIZE (GET_MODE (dest)) <= UNITS_PER_WORD) | |
5482 | && (GET_MODE_CLASS (GET_MODE (dest)) == MODE_INT) | |
5483 | && SUBREG_REG (dest) == x)) | |
5484 | return basic_induction_var (loop, SET_SRC (set), | |
5485 | (GET_MODE (SET_SRC (set)) == VOIDmode | |
5486 | ? GET_MODE (x) | |
5487 | : GET_MODE (SET_SRC (set))), | |
5488 | dest_reg, insn, | |
5489 | inc_val, mult_val, location); | |
5490 | ||
5491 | while (GET_CODE (dest) == SIGN_EXTRACT | |
5492 | || GET_CODE (dest) == ZERO_EXTRACT | |
5493 | || GET_CODE (dest) == SUBREG | |
5494 | || GET_CODE (dest) == STRICT_LOW_PART) | |
5495 | dest = XEXP (dest, 0); | |
5496 | if (dest == x) | |
5497 | break; | |
45f97e2e | 5498 | } |
fd5d5b07 | 5499 | /* Fall through. */ |
b4ad7b23 RS |
5500 | |
5501 | /* Can accept constant setting of biv only when inside inner most loop. | |
5502 | Otherwise, a biv of an inner loop may be incorrectly recognized | |
5503 | as a biv of the outer loop, | |
5504 | causing code to be moved INTO the inner loop. */ | |
5505 | case MEM: | |
0534b804 | 5506 | if (loop_invariant_p (loop, x) != 1) |
b4ad7b23 RS |
5507 | return 0; |
5508 | case CONST_INT: | |
5509 | case SYMBOL_REF: | |
5510 | case CONST: | |
829002bb BM |
5511 | /* convert_modes aborts if we try to convert to or from CCmode, so just |
5512 | exclude that case. It is very unlikely that a condition code value | |
5513 | would be a useful iterator anyways. */ | |
0534b804 | 5514 | if (loop->level == 1 |
829002bb BM |
5515 | && GET_MODE_CLASS (mode) != MODE_CC |
5516 | && GET_MODE_CLASS (GET_MODE (dest_reg)) != MODE_CC) | |
fd5d5b07 | 5517 | { |
7056f7e8 RS |
5518 | /* Possible bug here? Perhaps we don't know the mode of X. */ |
5519 | *inc_val = convert_modes (GET_MODE (dest_reg), mode, x, 0); | |
e6fcb60d KH |
5520 | *mult_val = const0_rtx; |
5521 | return 1; | |
5522 | } | |
b4ad7b23 | 5523 | else |
e6fcb60d | 5524 | return 0; |
b4ad7b23 | 5525 | |
09d7f5a5 | 5526 | case SIGN_EXTEND: |
0534b804 | 5527 | return basic_induction_var (loop, XEXP (x, 0), GET_MODE (XEXP (x, 0)), |
98d1cd45 | 5528 | dest_reg, p, inc_val, mult_val, location); |
45f97e2e | 5529 | |
09d7f5a5 RK |
5530 | case ASHIFTRT: |
5531 | /* Similar, since this can be a sign extension. */ | |
5532 | for (insn = PREV_INSN (p); | |
5533 | (insn && GET_CODE (insn) == NOTE | |
5534 | && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG); | |
5535 | insn = PREV_INSN (insn)) | |
5536 | ; | |
5537 | ||
5538 | if (insn) | |
5539 | set = single_set (insn); | |
5540 | ||
af198097 R |
5541 | if (! rtx_equal_p (dest_reg, XEXP (x, 0)) |
5542 | && set && SET_DEST (set) == XEXP (x, 0) | |
09d7f5a5 RK |
5543 | && GET_CODE (XEXP (x, 1)) == CONST_INT |
5544 | && INTVAL (XEXP (x, 1)) >= 0 | |
5545 | && GET_CODE (SET_SRC (set)) == ASHIFT | |
98d1cd45 R |
5546 | && XEXP (x, 1) == XEXP (SET_SRC (set), 1)) |
5547 | return basic_induction_var (loop, XEXP (SET_SRC (set), 0), | |
5548 | GET_MODE (XEXP (x, 0)), | |
5549 | dest_reg, insn, inc_val, mult_val, | |
5550 | location); | |
09d7f5a5 RK |
5551 | return 0; |
5552 | ||
b4ad7b23 RS |
5553 | default: |
5554 | return 0; | |
5555 | } | |
5556 | } | |
5557 | \f | |
5558 | /* A general induction variable (giv) is any quantity that is a linear | |
5559 | function of a basic induction variable, | |
5560 | i.e. giv = biv * mult_val + add_val. | |
5561 | The coefficients can be any loop invariant quantity. | |
5562 | A giv need not be computed directly from the biv; | |
5563 | it can be computed by way of other givs. */ | |
5564 | ||
5565 | /* Determine whether X computes a giv. | |
5566 | If it does, return a nonzero value | |
5567 | which is the benefit from eliminating the computation of X; | |
5568 | set *SRC_REG to the register of the biv that it is computed from; | |
5569 | set *ADD_VAL and *MULT_VAL to the coefficients, | |
5570 | such that the value of X is biv * mult + add; */ | |
5571 | ||
5572 | static int | |
e8cb4873 RH |
5573 | general_induction_var (loop, x, src_reg, add_val, mult_val, ext_val, |
5574 | is_addr, pbenefit, addr_mode) | |
0534b804 | 5575 | const struct loop *loop; |
b4ad7b23 RS |
5576 | rtx x; |
5577 | rtx *src_reg; | |
5578 | rtx *add_val; | |
5579 | rtx *mult_val; | |
e8cb4873 | 5580 | rtx *ext_val; |
45f97e2e RH |
5581 | int is_addr; |
5582 | int *pbenefit; | |
01329426 | 5583 | enum machine_mode addr_mode; |
b4ad7b23 | 5584 | { |
ed5bb68d | 5585 | struct loop_ivs *ivs = LOOP_IVS (loop); |
b4ad7b23 | 5586 | rtx orig_x = x; |
b4ad7b23 RS |
5587 | |
5588 | /* If this is an invariant, forget it, it isn't a giv. */ | |
0534b804 | 5589 | if (loop_invariant_p (loop, x) == 1) |
b4ad7b23 RS |
5590 | return 0; |
5591 | ||
45f97e2e | 5592 | *pbenefit = 0; |
e8cb4873 RH |
5593 | *ext_val = NULL_RTX; |
5594 | x = simplify_giv_expr (loop, x, ext_val, pbenefit); | |
b4ad7b23 | 5595 | if (x == 0) |
1f8f4a0b | 5596 | return 0; |
b4ad7b23 RS |
5597 | |
5598 | switch (GET_CODE (x)) | |
5599 | { | |
5600 | case USE: | |
5601 | case CONST_INT: | |
5602 | /* Since this is now an invariant and wasn't before, it must be a giv | |
5603 | with MULT_VAL == 0. It doesn't matter which BIV we associate this | |
5604 | with. */ | |
14be28e5 | 5605 | *src_reg = ivs->list->biv->dest_reg; |
b4ad7b23 RS |
5606 | *mult_val = const0_rtx; |
5607 | *add_val = x; | |
5608 | break; | |
5609 | ||
5610 | case REG: | |
5611 | /* This is equivalent to a BIV. */ | |
5612 | *src_reg = x; | |
5613 | *mult_val = const1_rtx; | |
5614 | *add_val = const0_rtx; | |
5615 | break; | |
5616 | ||
5617 | case PLUS: | |
5618 | /* Either (plus (biv) (invar)) or | |
5619 | (plus (mult (biv) (invar_1)) (invar_2)). */ | |
5620 | if (GET_CODE (XEXP (x, 0)) == MULT) | |
5621 | { | |
5622 | *src_reg = XEXP (XEXP (x, 0), 0); | |
5623 | *mult_val = XEXP (XEXP (x, 0), 1); | |
5624 | } | |
5625 | else | |
5626 | { | |
5627 | *src_reg = XEXP (x, 0); | |
5628 | *mult_val = const1_rtx; | |
5629 | } | |
5630 | *add_val = XEXP (x, 1); | |
5631 | break; | |
5632 | ||
5633 | case MULT: | |
5634 | /* ADD_VAL is zero. */ | |
5635 | *src_reg = XEXP (x, 0); | |
5636 | *mult_val = XEXP (x, 1); | |
5637 | *add_val = const0_rtx; | |
5638 | break; | |
5639 | ||
5640 | default: | |
5641 | abort (); | |
5642 | } | |
5643 | ||
5644 | /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be | |
5645 | unless they are CONST_INT). */ | |
5646 | if (GET_CODE (*add_val) == USE) | |
5647 | *add_val = XEXP (*add_val, 0); | |
5648 | if (GET_CODE (*mult_val) == USE) | |
5649 | *mult_val = XEXP (*mult_val, 0); | |
5650 | ||
45f97e2e | 5651 | if (is_addr) |
01329426 | 5652 | *pbenefit += address_cost (orig_x, addr_mode) - reg_address_cost; |
45f97e2e RH |
5653 | else |
5654 | *pbenefit += rtx_cost (orig_x, SET); | |
b4ad7b23 | 5655 | |
45f97e2e | 5656 | /* Always return true if this is a giv so it will be detected as such, |
e6fcb60d KH |
5657 | even if the benefit is zero or negative. This allows elimination |
5658 | of bivs that might otherwise not be eliminated. */ | |
5659 | return 1; | |
b4ad7b23 RS |
5660 | } |
5661 | \f | |
5662 | /* Given an expression, X, try to form it as a linear function of a biv. | |
5663 | We will canonicalize it to be of the form | |
5664 | (plus (mult (BIV) (invar_1)) | |
5665 | (invar_2)) | |
c5b7917e | 5666 | with possible degeneracies. |
b4ad7b23 RS |
5667 | |
5668 | The invariant expressions must each be of a form that can be used as a | |
5669 | machine operand. We surround then with a USE rtx (a hack, but localized | |
5670 | and certainly unambiguous!) if not a CONST_INT for simplicity in this | |
5671 | routine; it is the caller's responsibility to strip them. | |
5672 | ||
5673 | If no such canonicalization is possible (i.e., two biv's are used or an | |
5674 | expression that is neither invariant nor a biv or giv), this routine | |
5675 | returns 0. | |
5676 | ||
5677 | For a non-zero return, the result will have a code of CONST_INT, USE, | |
e6fcb60d | 5678 | REG (for a BIV), PLUS, or MULT. No other codes will occur. |
b4ad7b23 RS |
5679 | |
5680 | *BENEFIT will be incremented by the benefit of any sub-giv encountered. */ | |
5681 | ||
f428f252 KG |
5682 | static rtx sge_plus PARAMS ((enum machine_mode, rtx, rtx)); |
5683 | static rtx sge_plus_constant PARAMS ((rtx, rtx)); | |
45f97e2e | 5684 | |
b4ad7b23 | 5685 | static rtx |
e8cb4873 | 5686 | simplify_giv_expr (loop, x, ext_val, benefit) |
0534b804 | 5687 | const struct loop *loop; |
b4ad7b23 | 5688 | rtx x; |
e8cb4873 | 5689 | rtx *ext_val; |
b4ad7b23 RS |
5690 | int *benefit; |
5691 | { | |
ed5bb68d | 5692 | struct loop_ivs *ivs = LOOP_IVS (loop); |
1ecd860b | 5693 | struct loop_regs *regs = LOOP_REGS (loop); |
b4ad7b23 RS |
5694 | enum machine_mode mode = GET_MODE (x); |
5695 | rtx arg0, arg1; | |
5696 | rtx tem; | |
5697 | ||
5698 | /* If this is not an integer mode, or if we cannot do arithmetic in this | |
5699 | mode, this can't be a giv. */ | |
5700 | if (mode != VOIDmode | |
5701 | && (GET_MODE_CLASS (mode) != MODE_INT | |
5fd8383e | 5702 | || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT)) |
45f97e2e | 5703 | return NULL_RTX; |
b4ad7b23 RS |
5704 | |
5705 | switch (GET_CODE (x)) | |
5706 | { | |
5707 | case PLUS: | |
e8cb4873 RH |
5708 | arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit); |
5709 | arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit); | |
b4ad7b23 | 5710 | if (arg0 == 0 || arg1 == 0) |
45f97e2e | 5711 | return NULL_RTX; |
b4ad7b23 RS |
5712 | |
5713 | /* Put constant last, CONST_INT last if both constant. */ | |
5714 | if ((GET_CODE (arg0) == USE | |
5715 | || GET_CODE (arg0) == CONST_INT) | |
45f97e2e RH |
5716 | && ! ((GET_CODE (arg0) == USE |
5717 | && GET_CODE (arg1) == USE) | |
5718 | || GET_CODE (arg1) == CONST_INT)) | |
b4ad7b23 RS |
5719 | tem = arg0, arg0 = arg1, arg1 = tem; |
5720 | ||
5721 | /* Handle addition of zero, then addition of an invariant. */ | |
5722 | if (arg1 == const0_rtx) | |
5723 | return arg0; | |
5724 | else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE) | |
5725 | switch (GET_CODE (arg0)) | |
5726 | { | |
5727 | case CONST_INT: | |
5728 | case USE: | |
45f97e2e RH |
5729 | /* Adding two invariants must result in an invariant, so enclose |
5730 | addition operation inside a USE and return it. */ | |
b4ad7b23 RS |
5731 | if (GET_CODE (arg0) == USE) |
5732 | arg0 = XEXP (arg0, 0); | |
da0af5a5 JL |
5733 | if (GET_CODE (arg1) == USE) |
5734 | arg1 = XEXP (arg1, 0); | |
5735 | ||
45f97e2e RH |
5736 | if (GET_CODE (arg0) == CONST_INT) |
5737 | tem = arg0, arg0 = arg1, arg1 = tem; | |
5738 | if (GET_CODE (arg1) == CONST_INT) | |
5739 | tem = sge_plus_constant (arg0, arg1); | |
da0af5a5 | 5740 | else |
45f97e2e | 5741 | tem = sge_plus (mode, arg0, arg1); |
b4ad7b23 | 5742 | |
45f97e2e RH |
5743 | if (GET_CODE (tem) != CONST_INT) |
5744 | tem = gen_rtx_USE (mode, tem); | |
b4ad7b23 RS |
5745 | return tem; |
5746 | ||
5747 | case REG: | |
5748 | case MULT: | |
5749 | /* biv + invar or mult + invar. Return sum. */ | |
38a448ca | 5750 | return gen_rtx_PLUS (mode, arg0, arg1); |
b4ad7b23 RS |
5751 | |
5752 | case PLUS: | |
5753 | /* (a + invar_1) + invar_2. Associate. */ | |
c5c76735 | 5754 | return |
0534b804 MH |
5755 | simplify_giv_expr (loop, |
5756 | gen_rtx_PLUS (mode, | |
c5c76735 JL |
5757 | XEXP (arg0, 0), |
5758 | gen_rtx_PLUS (mode, | |
5759 | XEXP (arg0, 1), | |
5760 | arg1)), | |
e8cb4873 | 5761 | ext_val, benefit); |
b4ad7b23 RS |
5762 | |
5763 | default: | |
5764 | abort (); | |
5765 | } | |
5766 | ||
5767 | /* Each argument must be either REG, PLUS, or MULT. Convert REG to | |
5768 | MULT to reduce cases. */ | |
5769 | if (GET_CODE (arg0) == REG) | |
38a448ca | 5770 | arg0 = gen_rtx_MULT (mode, arg0, const1_rtx); |
b4ad7b23 | 5771 | if (GET_CODE (arg1) == REG) |
38a448ca | 5772 | arg1 = gen_rtx_MULT (mode, arg1, const1_rtx); |
b4ad7b23 RS |
5773 | |
5774 | /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT. | |
5775 | Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT. | |
5776 | Recurse to associate the second PLUS. */ | |
5777 | if (GET_CODE (arg1) == MULT) | |
5778 | tem = arg0, arg0 = arg1, arg1 = tem; | |
5779 | ||
5780 | if (GET_CODE (arg1) == PLUS) | |
c5c76735 | 5781 | return |
0534b804 MH |
5782 | simplify_giv_expr (loop, |
5783 | gen_rtx_PLUS (mode, | |
c5c76735 JL |
5784 | gen_rtx_PLUS (mode, arg0, |
5785 | XEXP (arg1, 0)), | |
5786 | XEXP (arg1, 1)), | |
e8cb4873 | 5787 | ext_val, benefit); |
b4ad7b23 RS |
5788 | |
5789 | /* Now must have MULT + MULT. Distribute if same biv, else not giv. */ | |
5790 | if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT) | |
45f97e2e | 5791 | return NULL_RTX; |
b4ad7b23 | 5792 | |
45f97e2e RH |
5793 | if (!rtx_equal_p (arg0, arg1)) |
5794 | return NULL_RTX; | |
b4ad7b23 | 5795 | |
0534b804 MH |
5796 | return simplify_giv_expr (loop, |
5797 | gen_rtx_MULT (mode, | |
38a448ca RH |
5798 | XEXP (arg0, 0), |
5799 | gen_rtx_PLUS (mode, | |
5800 | XEXP (arg0, 1), | |
5801 | XEXP (arg1, 1))), | |
e8cb4873 | 5802 | ext_val, benefit); |
b4ad7b23 RS |
5803 | |
5804 | case MINUS: | |
0f41302f | 5805 | /* Handle "a - b" as "a + b * (-1)". */ |
0534b804 MH |
5806 | return simplify_giv_expr (loop, |
5807 | gen_rtx_PLUS (mode, | |
38a448ca | 5808 | XEXP (x, 0), |
c5c76735 JL |
5809 | gen_rtx_MULT (mode, |
5810 | XEXP (x, 1), | |
38a448ca | 5811 | constm1_rtx)), |
e8cb4873 | 5812 | ext_val, benefit); |
b4ad7b23 RS |
5813 | |
5814 | case MULT: | |
e8cb4873 RH |
5815 | arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit); |
5816 | arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit); | |
b4ad7b23 | 5817 | if (arg0 == 0 || arg1 == 0) |
45f97e2e | 5818 | return NULL_RTX; |
b4ad7b23 RS |
5819 | |
5820 | /* Put constant last, CONST_INT last if both constant. */ | |
5821 | if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT) | |
5822 | && GET_CODE (arg1) != CONST_INT) | |
5823 | tem = arg0, arg0 = arg1, arg1 = tem; | |
5824 | ||
5825 | /* If second argument is not now constant, not giv. */ | |
5826 | if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT) | |
45f97e2e | 5827 | return NULL_RTX; |
b4ad7b23 RS |
5828 | |
5829 | /* Handle multiply by 0 or 1. */ | |
5830 | if (arg1 == const0_rtx) | |
5831 | return const0_rtx; | |
5832 | ||
5833 | else if (arg1 == const1_rtx) | |
5834 | return arg0; | |
5835 | ||
5836 | switch (GET_CODE (arg0)) | |
5837 | { | |
5838 | case REG: | |
5839 | /* biv * invar. Done. */ | |
38a448ca | 5840 | return gen_rtx_MULT (mode, arg0, arg1); |
b4ad7b23 RS |
5841 | |
5842 | case CONST_INT: | |
5843 | /* Product of two constants. */ | |
5fd8383e | 5844 | return GEN_INT (INTVAL (arg0) * INTVAL (arg1)); |
b4ad7b23 RS |
5845 | |
5846 | case USE: | |
29aef5ca | 5847 | /* invar * invar is a giv, but attempt to simplify it somehow. */ |
45f97e2e RH |
5848 | if (GET_CODE (arg1) != CONST_INT) |
5849 | return NULL_RTX; | |
5850 | ||
5851 | arg0 = XEXP (arg0, 0); | |
29aef5ca | 5852 | if (GET_CODE (arg0) == MULT) |
45f97e2e | 5853 | { |
29aef5ca JH |
5854 | /* (invar_0 * invar_1) * invar_2. Associate. */ |
5855 | return simplify_giv_expr (loop, | |
5856 | gen_rtx_MULT (mode, | |
5857 | XEXP (arg0, 0), | |
5858 | gen_rtx_MULT (mode, | |
5859 | XEXP (arg0, | |
5860 | 1), | |
5861 | arg1)), | |
e8cb4873 | 5862 | ext_val, benefit); |
45f97e2e | 5863 | } |
29aef5ca JH |
5864 | /* Porpagate the MULT expressions to the intermost nodes. */ |
5865 | else if (GET_CODE (arg0) == PLUS) | |
5866 | { | |
5867 | /* (invar_0 + invar_1) * invar_2. Distribute. */ | |
5868 | return simplify_giv_expr (loop, | |
5869 | gen_rtx_PLUS (mode, | |
5870 | gen_rtx_MULT (mode, | |
5871 | XEXP (arg0, | |
5872 | 0), | |
5873 | arg1), | |
5874 | gen_rtx_MULT (mode, | |
5875 | XEXP (arg0, | |
5876 | 1), | |
5877 | arg1)), | |
e8cb4873 | 5878 | ext_val, benefit); |
29aef5ca JH |
5879 | } |
5880 | return gen_rtx_USE (mode, gen_rtx_MULT (mode, arg0, arg1)); | |
b4ad7b23 RS |
5881 | |
5882 | case MULT: | |
5883 | /* (a * invar_1) * invar_2. Associate. */ | |
0534b804 MH |
5884 | return simplify_giv_expr (loop, |
5885 | gen_rtx_MULT (mode, | |
c5c76735 | 5886 | XEXP (arg0, 0), |
38a448ca RH |
5887 | gen_rtx_MULT (mode, |
5888 | XEXP (arg0, 1), | |
5889 | arg1)), | |
e8cb4873 | 5890 | ext_val, benefit); |
b4ad7b23 RS |
5891 | |
5892 | case PLUS: | |
5893 | /* (a + invar_1) * invar_2. Distribute. */ | |
0534b804 MH |
5894 | return simplify_giv_expr (loop, |
5895 | gen_rtx_PLUS (mode, | |
38a448ca RH |
5896 | gen_rtx_MULT (mode, |
5897 | XEXP (arg0, 0), | |
5898 | arg1), | |
5899 | gen_rtx_MULT (mode, | |
5900 | XEXP (arg0, 1), | |
5901 | arg1)), | |
e8cb4873 | 5902 | ext_val, benefit); |
b4ad7b23 RS |
5903 | |
5904 | default: | |
5905 | abort (); | |
5906 | } | |
5907 | ||
5908 | case ASHIFT: | |
b4ad7b23 RS |
5909 | /* Shift by constant is multiply by power of two. */ |
5910 | if (GET_CODE (XEXP (x, 1)) != CONST_INT) | |
5911 | return 0; | |
5912 | ||
c5c76735 | 5913 | return |
0534b804 MH |
5914 | simplify_giv_expr (loop, |
5915 | gen_rtx_MULT (mode, | |
c5c76735 JL |
5916 | XEXP (x, 0), |
5917 | GEN_INT ((HOST_WIDE_INT) 1 | |
5918 | << INTVAL (XEXP (x, 1)))), | |
e8cb4873 | 5919 | ext_val, benefit); |
b4ad7b23 RS |
5920 | |
5921 | case NEG: | |
5922 | /* "-a" is "a * (-1)" */ | |
0534b804 MH |
5923 | return simplify_giv_expr (loop, |
5924 | gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx), | |
e8cb4873 | 5925 | ext_val, benefit); |
b4ad7b23 RS |
5926 | |
5927 | case NOT: | |
5928 | /* "~a" is "-a - 1". Silly, but easy. */ | |
0534b804 MH |
5929 | return simplify_giv_expr (loop, |
5930 | gen_rtx_MINUS (mode, | |
38a448ca RH |
5931 | gen_rtx_NEG (mode, XEXP (x, 0)), |
5932 | const1_rtx), | |
e8cb4873 | 5933 | ext_val, benefit); |
b4ad7b23 RS |
5934 | |
5935 | case USE: | |
5936 | /* Already in proper form for invariant. */ | |
5937 | return x; | |
5938 | ||
e8cb4873 RH |
5939 | case SIGN_EXTEND: |
5940 | case ZERO_EXTEND: | |
5941 | case TRUNCATE: | |
5942 | /* Conditionally recognize extensions of simple IVs. After we've | |
fd5d5b07 | 5943 | computed loop traversal counts and verified the range of the |
e8cb4873 RH |
5944 | source IV, we'll reevaluate this as a GIV. */ |
5945 | if (*ext_val == NULL_RTX) | |
5946 | { | |
5947 | arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit); | |
5948 | if (arg0 && *ext_val == NULL_RTX && GET_CODE (arg0) == REG) | |
5949 | { | |
5950 | *ext_val = gen_rtx_fmt_e (GET_CODE (x), mode, arg0); | |
5951 | return arg0; | |
5952 | } | |
5953 | } | |
5954 | goto do_default; | |
5955 | ||
fd5d5b07 | 5956 | case REG: |
b4ad7b23 RS |
5957 | /* If this is a new register, we can't deal with it. */ |
5958 | if (REGNO (x) >= max_reg_before_loop) | |
5959 | return 0; | |
5960 | ||
5961 | /* Check for biv or giv. */ | |
ed5bb68d | 5962 | switch (REG_IV_TYPE (ivs, REGNO (x))) |
b4ad7b23 RS |
5963 | { |
5964 | case BASIC_INDUCT: | |
5965 | return x; | |
5966 | case GENERAL_INDUCT: | |
5967 | { | |
ed5bb68d | 5968 | struct induction *v = REG_IV_INFO (ivs, REGNO (x)); |
b4ad7b23 RS |
5969 | |
5970 | /* Form expression from giv and add benefit. Ensure this giv | |
5971 | can derive another and subtract any needed adjustment if so. */ | |
630c79be BS |
5972 | |
5973 | /* Increasing the benefit here is risky. The only case in which it | |
5974 | is arguably correct is if this is the only use of V. In other | |
5975 | cases, this will artificially inflate the benefit of the current | |
5976 | giv, and lead to suboptimal code. Thus, it is disabled, since | |
5977 | potentially not reducing an only marginally beneficial giv is | |
5978 | less harmful than reducing many givs that are not really | |
5979 | beneficial. */ | |
5980 | { | |
f1d4ac80 | 5981 | rtx single_use = regs->array[REGNO (x)].single_usage; |
630c79be BS |
5982 | if (single_use && single_use != const0_rtx) |
5983 | *benefit += v->benefit; | |
5984 | } | |
5985 | ||
b4ad7b23 RS |
5986 | if (v->cant_derive) |
5987 | return 0; | |
5988 | ||
c5c76735 JL |
5989 | tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode, |
5990 | v->src_reg, v->mult_val), | |
5991 | v->add_val); | |
5992 | ||
b4ad7b23 | 5993 | if (v->derive_adjustment) |
38a448ca | 5994 | tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment); |
e8cb4873 RH |
5995 | arg0 = simplify_giv_expr (loop, tem, ext_val, benefit); |
5996 | if (*ext_val) | |
5997 | { | |
5998 | if (!v->ext_dependant) | |
5999 | return arg0; | |
6000 | } | |
6001 | else | |
6002 | { | |
6003 | *ext_val = v->ext_dependant; | |
6004 | return arg0; | |
6005 | } | |
6006 | return 0; | |
b4ad7b23 | 6007 | } |
e9a25f70 JL |
6008 | |
6009 | default: | |
e8cb4873 | 6010 | do_default: |
45f97e2e RH |
6011 | /* If it isn't an induction variable, and it is invariant, we |
6012 | may be able to simplify things further by looking through | |
6013 | the bits we just moved outside the loop. */ | |
0534b804 | 6014 | if (loop_invariant_p (loop, x) == 1) |
45f97e2e RH |
6015 | { |
6016 | struct movable *m; | |
6ec92010 | 6017 | struct loop_movables *movables = LOOP_MOVABLES (loop); |
45f97e2e | 6018 | |
6ec92010 | 6019 | for (m = movables->head; m; m = m->next) |
45f97e2e RH |
6020 | if (rtx_equal_p (x, m->set_dest)) |
6021 | { | |
6022 | /* Ok, we found a match. Substitute and simplify. */ | |
6023 | ||
e6fcb60d | 6024 | /* If we match another movable, we must use that, as |
45f97e2e RH |
6025 | this one is going away. */ |
6026 | if (m->match) | |
e6fcb60d | 6027 | return simplify_giv_expr (loop, m->match->set_dest, |
e8cb4873 | 6028 | ext_val, benefit); |
45f97e2e RH |
6029 | |
6030 | /* If consec is non-zero, this is a member of a group of | |
6031 | instructions that were moved together. We handle this | |
6032 | case only to the point of seeking to the last insn and | |
6033 | looking for a REG_EQUAL. Fail if we don't find one. */ | |
6034 | if (m->consec != 0) | |
6035 | { | |
6036 | int i = m->consec; | |
6037 | tem = m->insn; | |
fd5d5b07 KH |
6038 | do |
6039 | { | |
6040 | tem = NEXT_INSN (tem); | |
6041 | } | |
6042 | while (--i > 0); | |
45f97e2e RH |
6043 | |
6044 | tem = find_reg_note (tem, REG_EQUAL, NULL_RTX); | |
6045 | if (tem) | |
6046 | tem = XEXP (tem, 0); | |
6047 | } | |
6048 | else | |
6049 | { | |
e6fcb60d KH |
6050 | tem = single_set (m->insn); |
6051 | if (tem) | |
45f97e2e RH |
6052 | tem = SET_SRC (tem); |
6053 | } | |
6054 | ||
6055 | if (tem) | |
6056 | { | |
6057 | /* What we are most interested in is pointer | |
6058 | arithmetic on invariants -- only take | |
6059 | patterns we may be able to do something with. */ | |
6060 | if (GET_CODE (tem) == PLUS | |
6061 | || GET_CODE (tem) == MULT | |
6062 | || GET_CODE (tem) == ASHIFT | |
6063 | || GET_CODE (tem) == CONST_INT | |
6064 | || GET_CODE (tem) == SYMBOL_REF) | |
6065 | { | |
e8cb4873 RH |
6066 | tem = simplify_giv_expr (loop, tem, ext_val, |
6067 | benefit); | |
45f97e2e RH |
6068 | if (tem) |
6069 | return tem; | |
6070 | } | |
6071 | else if (GET_CODE (tem) == CONST | |
fd5d5b07 KH |
6072 | && GET_CODE (XEXP (tem, 0)) == PLUS |
6073 | && GET_CODE (XEXP (XEXP (tem, 0), 0)) == SYMBOL_REF | |
6074 | && GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT) | |
45f97e2e | 6075 | { |
0534b804 | 6076 | tem = simplify_giv_expr (loop, XEXP (tem, 0), |
e8cb4873 | 6077 | ext_val, benefit); |
45f97e2e RH |
6078 | if (tem) |
6079 | return tem; | |
6080 | } | |
6081 | } | |
6082 | break; | |
6083 | } | |
6084 | } | |
e9a25f70 | 6085 | break; |
b4ad7b23 RS |
6086 | } |
6087 | ||
6088 | /* Fall through to general case. */ | |
6089 | default: | |
6090 | /* If invariant, return as USE (unless CONST_INT). | |
6091 | Otherwise, not giv. */ | |
6092 | if (GET_CODE (x) == USE) | |
6093 | x = XEXP (x, 0); | |
6094 | ||
0534b804 | 6095 | if (loop_invariant_p (loop, x) == 1) |
b4ad7b23 RS |
6096 | { |
6097 | if (GET_CODE (x) == CONST_INT) | |
6098 | return x; | |
45f97e2e RH |
6099 | if (GET_CODE (x) == CONST |
6100 | && GET_CODE (XEXP (x, 0)) == PLUS | |
6101 | && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF | |
6102 | && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT) | |
6103 | x = XEXP (x, 0); | |
6104 | return gen_rtx_USE (mode, x); | |
b4ad7b23 RS |
6105 | } |
6106 | else | |
6107 | return 0; | |
6108 | } | |
6109 | } | |
45f97e2e RH |
6110 | |
6111 | /* This routine folds invariants such that there is only ever one | |
6112 | CONST_INT in the summation. It is only used by simplify_giv_expr. */ | |
6113 | ||
6114 | static rtx | |
6115 | sge_plus_constant (x, c) | |
6116 | rtx x, c; | |
6117 | { | |
6118 | if (GET_CODE (x) == CONST_INT) | |
6119 | return GEN_INT (INTVAL (x) + INTVAL (c)); | |
6120 | else if (GET_CODE (x) != PLUS) | |
6121 | return gen_rtx_PLUS (GET_MODE (x), x, c); | |
6122 | else if (GET_CODE (XEXP (x, 1)) == CONST_INT) | |
6123 | { | |
6124 | return gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0), | |
6125 | GEN_INT (INTVAL (XEXP (x, 1)) + INTVAL (c))); | |
6126 | } | |
6127 | else if (GET_CODE (XEXP (x, 0)) == PLUS | |
6128 | || GET_CODE (XEXP (x, 1)) != PLUS) | |
6129 | { | |
6130 | return gen_rtx_PLUS (GET_MODE (x), | |
6131 | sge_plus_constant (XEXP (x, 0), c), XEXP (x, 1)); | |
6132 | } | |
6133 | else | |
6134 | { | |
6135 | return gen_rtx_PLUS (GET_MODE (x), | |
6136 | sge_plus_constant (XEXP (x, 1), c), XEXP (x, 0)); | |
6137 | } | |
6138 | } | |
6139 | ||
6140 | static rtx | |
6141 | sge_plus (mode, x, y) | |
6142 | enum machine_mode mode; | |
6143 | rtx x, y; | |
6144 | { | |
6145 | while (GET_CODE (y) == PLUS) | |
6146 | { | |
6147 | rtx a = XEXP (y, 0); | |
6148 | if (GET_CODE (a) == CONST_INT) | |
6149 | x = sge_plus_constant (x, a); | |
6150 | else | |
6151 | x = gen_rtx_PLUS (mode, x, a); | |
6152 | y = XEXP (y, 1); | |
6153 | } | |
6154 | if (GET_CODE (y) == CONST_INT) | |
6155 | x = sge_plus_constant (x, y); | |
6156 | else | |
6157 | x = gen_rtx_PLUS (mode, x, y); | |
6158 | return x; | |
6159 | } | |
b4ad7b23 RS |
6160 | \f |
6161 | /* Help detect a giv that is calculated by several consecutive insns; | |
6162 | for example, | |
6163 | giv = biv * M | |
6164 | giv = giv + A | |
6165 | The caller has already identified the first insn P as having a giv as dest; | |
6166 | we check that all other insns that set the same register follow | |
6167 | immediately after P, that they alter nothing else, | |
6168 | and that the result of the last is still a giv. | |
6169 | ||
6170 | The value is 0 if the reg set in P is not really a giv. | |
6171 | Otherwise, the value is the amount gained by eliminating | |
6172 | all the consecutive insns that compute the value. | |
6173 | ||
6174 | FIRST_BENEFIT is the amount gained by eliminating the first insn, P. | |
6175 | SRC_REG is the reg of the biv; DEST_REG is the reg of the giv. | |
6176 | ||
6177 | The coefficients of the ultimate giv value are stored in | |
6178 | *MULT_VAL and *ADD_VAL. */ | |
6179 | ||
6180 | static int | |
0534b804 | 6181 | consec_sets_giv (loop, first_benefit, p, src_reg, dest_reg, |
e8cb4873 | 6182 | add_val, mult_val, ext_val, last_consec_insn) |
0534b804 | 6183 | const struct loop *loop; |
b4ad7b23 RS |
6184 | int first_benefit; |
6185 | rtx p; | |
6186 | rtx src_reg; | |
6187 | rtx dest_reg; | |
6188 | rtx *add_val; | |
6189 | rtx *mult_val; | |
e8cb4873 | 6190 | rtx *ext_val; |
a07516d3 | 6191 | rtx *last_consec_insn; |
b4ad7b23 | 6192 | { |
ed5bb68d | 6193 | struct loop_ivs *ivs = LOOP_IVS (loop); |
1ecd860b | 6194 | struct loop_regs *regs = LOOP_REGS (loop); |
b4ad7b23 RS |
6195 | int count; |
6196 | enum rtx_code code; | |
6197 | int benefit; | |
6198 | rtx temp; | |
6199 | rtx set; | |
6200 | ||
6201 | /* Indicate that this is a giv so that we can update the value produced in | |
e6fcb60d | 6202 | each insn of the multi-insn sequence. |
b4ad7b23 RS |
6203 | |
6204 | This induction structure will be used only by the call to | |
6205 | general_induction_var below, so we can allocate it on our stack. | |
6206 | If this is a giv, our caller will replace the induct var entry with | |
6207 | a new induction structure. */ | |
847dde95 BS |
6208 | struct induction *v; |
6209 | ||
6210 | if (REG_IV_TYPE (ivs, REGNO (dest_reg)) != UNKNOWN_INDUCT) | |
6211 | return 0; | |
6212 | ||
6213 | v = (struct induction *) alloca (sizeof (struct induction)); | |
b4ad7b23 RS |
6214 | v->src_reg = src_reg; |
6215 | v->mult_val = *mult_val; | |
6216 | v->add_val = *add_val; | |
6217 | v->benefit = first_benefit; | |
6218 | v->cant_derive = 0; | |
6219 | v->derive_adjustment = 0; | |
e8cb4873 | 6220 | v->ext_dependant = NULL_RTX; |
b4ad7b23 | 6221 | |
ed5bb68d MH |
6222 | REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT; |
6223 | REG_IV_INFO (ivs, REGNO (dest_reg)) = v; | |
b4ad7b23 | 6224 | |
f1d4ac80 | 6225 | count = regs->array[REGNO (dest_reg)].n_times_set - 1; |
b4ad7b23 RS |
6226 | |
6227 | while (count > 0) | |
6228 | { | |
6229 | p = NEXT_INSN (p); | |
6230 | code = GET_CODE (p); | |
6231 | ||
6232 | /* If libcall, skip to end of call sequence. */ | |
5fd8383e | 6233 | if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX))) |
b4ad7b23 RS |
6234 | p = XEXP (temp, 0); |
6235 | ||
6236 | if (code == INSN | |
6237 | && (set = single_set (p)) | |
6238 | && GET_CODE (SET_DEST (set)) == REG | |
6239 | && SET_DEST (set) == dest_reg | |
0534b804 | 6240 | && (general_induction_var (loop, SET_SRC (set), &src_reg, |
e8cb4873 RH |
6241 | add_val, mult_val, ext_val, 0, |
6242 | &benefit, VOIDmode) | |
b4ad7b23 | 6243 | /* Giv created by equivalent expression. */ |
5fd8383e | 6244 | || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)) |
0534b804 | 6245 | && general_induction_var (loop, XEXP (temp, 0), &src_reg, |
e8cb4873 RH |
6246 | add_val, mult_val, ext_val, 0, |
6247 | &benefit, VOIDmode))) | |
b4ad7b23 RS |
6248 | && src_reg == v->src_reg) |
6249 | { | |
5fd8383e | 6250 | if (find_reg_note (p, REG_RETVAL, NULL_RTX)) |
b4ad7b23 RS |
6251 | benefit += libcall_benefit (p); |
6252 | ||
6253 | count--; | |
6254 | v->mult_val = *mult_val; | |
6255 | v->add_val = *add_val; | |
630c79be | 6256 | v->benefit += benefit; |
b4ad7b23 RS |
6257 | } |
6258 | else if (code != NOTE) | |
6259 | { | |
6260 | /* Allow insns that set something other than this giv to a | |
6261 | constant. Such insns are needed on machines which cannot | |
6262 | include long constants and should not disqualify a giv. */ | |
6263 | if (code == INSN | |
6264 | && (set = single_set (p)) | |
6265 | && SET_DEST (set) != dest_reg | |
6266 | && CONSTANT_P (SET_SRC (set))) | |
6267 | continue; | |
6268 | ||
ed5bb68d | 6269 | REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT; |
b4ad7b23 RS |
6270 | return 0; |
6271 | } | |
6272 | } | |
6273 | ||
847dde95 | 6274 | REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT; |
a07516d3 | 6275 | *last_consec_insn = p; |
b4ad7b23 RS |
6276 | return v->benefit; |
6277 | } | |
6278 | \f | |
6279 | /* Return an rtx, if any, that expresses giv G2 as a function of the register | |
6280 | represented by G1. If no such expression can be found, or it is clear that | |
e6fcb60d | 6281 | it cannot possibly be a valid address, 0 is returned. |
b4ad7b23 RS |
6282 | |
6283 | To perform the computation, we note that | |
45f97e2e RH |
6284 | G1 = x * v + a and |
6285 | G2 = y * v + b | |
b4ad7b23 RS |
6286 | where `v' is the biv. |
6287 | ||
45f97e2e RH |
6288 | So G2 = (y/b) * G1 + (b - a*y/x). |
6289 | ||
6290 | Note that MULT = y/x. | |
6291 | ||
6292 | Update: A and B are now allowed to be additive expressions such that | |
6293 | B contains all variables in A. That is, computing B-A will not require | |
6294 | subtracting variables. */ | |
6295 | ||
6296 | static rtx | |
6297 | express_from_1 (a, b, mult) | |
6298 | rtx a, b, mult; | |
6299 | { | |
6300 | /* If MULT is zero, then A*MULT is zero, and our expression is B. */ | |
6301 | ||
6302 | if (mult == const0_rtx) | |
6303 | return b; | |
6304 | ||
6305 | /* If MULT is not 1, we cannot handle A with non-constants, since we | |
6306 | would then be required to subtract multiples of the registers in A. | |
6307 | This is theoretically possible, and may even apply to some Fortran | |
6308 | constructs, but it is a lot of work and we do not attempt it here. */ | |
6309 | ||
6310 | if (mult != const1_rtx && GET_CODE (a) != CONST_INT) | |
6311 | return NULL_RTX; | |
6312 | ||
6313 | /* In general these structures are sorted top to bottom (down the PLUS | |
6314 | chain), but not left to right across the PLUS. If B is a higher | |
6315 | order giv than A, we can strip one level and recurse. If A is higher | |
6316 | order, we'll eventually bail out, but won't know that until the end. | |
6317 | If they are the same, we'll strip one level around this loop. */ | |
6318 | ||
6319 | while (GET_CODE (a) == PLUS && GET_CODE (b) == PLUS) | |
6320 | { | |
6321 | rtx ra, rb, oa, ob, tmp; | |
6322 | ||
6323 | ra = XEXP (a, 0), oa = XEXP (a, 1); | |
6324 | if (GET_CODE (ra) == PLUS) | |
e6fcb60d | 6325 | tmp = ra, ra = oa, oa = tmp; |
45f97e2e RH |
6326 | |
6327 | rb = XEXP (b, 0), ob = XEXP (b, 1); | |
6328 | if (GET_CODE (rb) == PLUS) | |
e6fcb60d | 6329 | tmp = rb, rb = ob, ob = tmp; |
45f97e2e RH |
6330 | |
6331 | if (rtx_equal_p (ra, rb)) | |
6332 | /* We matched: remove one reg completely. */ | |
6333 | a = oa, b = ob; | |
6334 | else if (GET_CODE (ob) != PLUS && rtx_equal_p (ra, ob)) | |
6335 | /* An alternate match. */ | |
6336 | a = oa, b = rb; | |
6337 | else if (GET_CODE (oa) != PLUS && rtx_equal_p (oa, rb)) | |
6338 | /* An alternate match. */ | |
6339 | a = ra, b = ob; | |
6340 | else | |
6341 | { | |
fd5d5b07 | 6342 | /* Indicates an extra register in B. Strip one level from B and |
45f97e2e RH |
6343 | recurse, hoping B was the higher order expression. */ |
6344 | ob = express_from_1 (a, ob, mult); | |
6345 | if (ob == NULL_RTX) | |
6346 | return NULL_RTX; | |
6347 | return gen_rtx_PLUS (GET_MODE (b), rb, ob); | |
6348 | } | |
6349 | } | |
6350 | ||
6351 | /* Here we are at the last level of A, go through the cases hoping to | |
6352 | get rid of everything but a constant. */ | |
6353 | ||
6354 | if (GET_CODE (a) == PLUS) | |
6355 | { | |
efe3eb65 | 6356 | rtx ra, oa; |
45f97e2e RH |
6357 | |
6358 | ra = XEXP (a, 0), oa = XEXP (a, 1); | |
6359 | if (rtx_equal_p (oa, b)) | |
6360 | oa = ra; | |
6361 | else if (!rtx_equal_p (ra, b)) | |
6362 | return NULL_RTX; | |
6363 | ||
6364 | if (GET_CODE (oa) != CONST_INT) | |
6365 | return NULL_RTX; | |
6366 | ||
6367 | return GEN_INT (-INTVAL (oa) * INTVAL (mult)); | |
6368 | } | |
6369 | else if (GET_CODE (a) == CONST_INT) | |
6370 | { | |
6371 | return plus_constant (b, -INTVAL (a) * INTVAL (mult)); | |
6372 | } | |
ce7de04c JH |
6373 | else if (CONSTANT_P (a)) |
6374 | { | |
0ca90ba6 | 6375 | return simplify_gen_binary (MINUS, GET_MODE (b) != VOIDmode ? GET_MODE (b) : GET_MODE (a), const0_rtx, a); |
ce7de04c | 6376 | } |
45f97e2e RH |
6377 | else if (GET_CODE (b) == PLUS) |
6378 | { | |
6379 | if (rtx_equal_p (a, XEXP (b, 0))) | |
6380 | return XEXP (b, 1); | |
6381 | else if (rtx_equal_p (a, XEXP (b, 1))) | |
6382 | return XEXP (b, 0); | |
6383 | else | |
6384 | return NULL_RTX; | |
6385 | } | |
6386 | else if (rtx_equal_p (a, b)) | |
6387 | return const0_rtx; | |
6388 | ||
6389 | return NULL_RTX; | |
6390 | } | |
b4ad7b23 | 6391 | |
4d87f7a7 | 6392 | rtx |
b4ad7b23 RS |
6393 | express_from (g1, g2) |
6394 | struct induction *g1, *g2; | |
6395 | { | |
6396 | rtx mult, add; | |
6397 | ||
6398 | /* The value that G1 will be multiplied by must be a constant integer. Also, | |
6399 | the only chance we have of getting a valid address is if b*c/a (see above | |
6400 | for notation) is also an integer. */ | |
45f97e2e RH |
6401 | if (GET_CODE (g1->mult_val) == CONST_INT |
6402 | && GET_CODE (g2->mult_val) == CONST_INT) | |
6403 | { | |
6404 | if (g1->mult_val == const0_rtx | |
e6fcb60d KH |
6405 | || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0) |
6406 | return NULL_RTX; | |
45f97e2e RH |
6407 | mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val)); |
6408 | } | |
6409 | else if (rtx_equal_p (g1->mult_val, g2->mult_val)) | |
6410 | mult = const1_rtx; | |
6411 | else | |
6412 | { | |
6413 | /* ??? Find out if the one is a multiple of the other? */ | |
6414 | return NULL_RTX; | |
6415 | } | |
b4ad7b23 | 6416 | |
45f97e2e | 6417 | add = express_from_1 (g1->add_val, g2->add_val, mult); |
e0485b85 RH |
6418 | if (add == NULL_RTX) |
6419 | { | |
6420 | /* Failed. If we've got a multiplication factor between G1 and G2, | |
6421 | scale G1's addend and try again. */ | |
6422 | if (INTVAL (mult) > 1) | |
6423 | { | |
6424 | rtx g1_add_val = g1->add_val; | |
6425 | if (GET_CODE (g1_add_val) == MULT | |
6426 | && GET_CODE (XEXP (g1_add_val, 1)) == CONST_INT) | |
6427 | { | |
6428 | HOST_WIDE_INT m; | |
6429 | m = INTVAL (mult) * INTVAL (XEXP (g1_add_val, 1)); | |
6430 | g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val), | |
6431 | XEXP (g1_add_val, 0), GEN_INT (m)); | |
6432 | } | |
6433 | else | |
6434 | { | |
6435 | g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val), g1_add_val, | |
6436 | mult); | |
6437 | } | |
6438 | ||
6439 | add = express_from_1 (g1_add_val, g2->add_val, const1_rtx); | |
6440 | } | |
6441 | } | |
45f97e2e RH |
6442 | if (add == NULL_RTX) |
6443 | return NULL_RTX; | |
b4ad7b23 RS |
6444 | |
6445 | /* Form simplified final result. */ | |
6446 | if (mult == const0_rtx) | |
6447 | return add; | |
6448 | else if (mult == const1_rtx) | |
6449 | mult = g1->dest_reg; | |
6450 | else | |
38a448ca | 6451 | mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult); |
b4ad7b23 RS |
6452 | |
6453 | if (add == const0_rtx) | |
6454 | return mult; | |
6455 | else | |
86219cc7 BS |
6456 | { |
6457 | if (GET_CODE (add) == PLUS | |
6458 | && CONSTANT_P (XEXP (add, 1))) | |
6459 | { | |
6460 | rtx tem = XEXP (add, 1); | |
6461 | mult = gen_rtx_PLUS (g2->mode, mult, XEXP (add, 0)); | |
6462 | add = tem; | |
6463 | } | |
e6fcb60d | 6464 | |
86219cc7 BS |
6465 | return gen_rtx_PLUS (g2->mode, mult, add); |
6466 | } | |
b4ad7b23 | 6467 | } |
b4ad7b23 | 6468 | \f |
da5a44b3 BS |
6469 | /* Return an rtx, if any, that expresses giv G2 as a function of the register |
6470 | represented by G1. This indicates that G2 should be combined with G1 and | |
6471 | that G2 can use (either directly or via an address expression) a register | |
6472 | used to represent G1. */ | |
b4ad7b23 | 6473 | |
45f97e2e | 6474 | static rtx |
b4ad7b23 RS |
6475 | combine_givs_p (g1, g2) |
6476 | struct induction *g1, *g2; | |
6477 | { | |
e8cb4873 RH |
6478 | rtx comb, ret; |
6479 | ||
6480 | /* With the introduction of ext dependant givs, we must care for modes. | |
6481 | G2 must not use a wider mode than G1. */ | |
6482 | if (GET_MODE_SIZE (g1->mode) < GET_MODE_SIZE (g2->mode)) | |
6483 | return NULL_RTX; | |
6484 | ||
6485 | ret = comb = express_from (g1, g2); | |
6486 | if (comb == NULL_RTX) | |
6487 | return NULL_RTX; | |
6488 | if (g1->mode != g2->mode) | |
6489 | ret = gen_lowpart (g2->mode, comb); | |
b4ad7b23 | 6490 | |
45f97e2e RH |
6491 | /* If these givs are identical, they can be combined. We use the results |
6492 | of express_from because the addends are not in a canonical form, so | |
6493 | rtx_equal_p is a weaker test. */ | |
3ec2b590 R |
6494 | /* But don't combine a DEST_REG giv with a DEST_ADDR giv; we want the |
6495 | combination to be the other way round. */ | |
e8cb4873 | 6496 | if (comb == g1->dest_reg |
3ec2b590 | 6497 | && (g1->giv_type == DEST_REG || g2->giv_type == DEST_ADDR)) |
b4ad7b23 | 6498 | { |
e8cb4873 | 6499 | return ret; |
b4ad7b23 RS |
6500 | } |
6501 | ||
b4ad7b23 RS |
6502 | /* If G2 can be expressed as a function of G1 and that function is valid |
6503 | as an address and no more expensive than using a register for G2, | |
6504 | the expression of G2 in terms of G1 can be used. */ | |
e8cb4873 | 6505 | if (ret != NULL_RTX |
45f97e2e | 6506 | && g2->giv_type == DEST_ADDR |
099f0f3f | 6507 | && memory_address_p (GET_MODE (g2->mem), ret) |
45f97e2e RH |
6508 | /* ??? Looses, especially with -fforce-addr, where *g2->location |
6509 | will always be a register, and so anything more complicated | |
6510 | gets discarded. */ | |
6511 | #if 0 | |
6512 | #ifdef ADDRESS_COST | |
6513 | && ADDRESS_COST (tem) <= ADDRESS_COST (*g2->location) | |
6514 | #else | |
6515 | && rtx_cost (tem, MEM) <= rtx_cost (*g2->location, MEM) | |
6516 | #endif | |
6517 | #endif | |
6518 | ) | |
b4ad7b23 | 6519 | { |
e8cb4873 | 6520 | return ret; |
b4ad7b23 | 6521 | } |
b4ad7b23 | 6522 | |
45f97e2e | 6523 | return NULL_RTX; |
b4ad7b23 RS |
6524 | } |
6525 | \f | |
e8cb4873 RH |
6526 | /* Check each extension dependant giv in this class to see if its |
6527 | root biv is safe from wrapping in the interior mode, which would | |
6528 | make the giv illegal. */ | |
6529 | ||
6530 | static void | |
6531 | check_ext_dependant_givs (bl, loop_info) | |
6532 | struct iv_class *bl; | |
6533 | struct loop_info *loop_info; | |
6534 | { | |
6535 | int ze_ok = 0, se_ok = 0, info_ok = 0; | |
6536 | enum machine_mode biv_mode = GET_MODE (bl->biv->src_reg); | |
6537 | HOST_WIDE_INT start_val; | |
616fde53 MH |
6538 | unsigned HOST_WIDE_INT u_end_val = 0; |
6539 | unsigned HOST_WIDE_INT u_start_val = 0; | |
e8cb4873 RH |
6540 | rtx incr = pc_rtx; |
6541 | struct induction *v; | |
6542 | ||
6543 | /* Make sure the iteration data is available. We must have | |
6544 | constants in order to be certain of no overflow. */ | |
6545 | /* ??? An unknown iteration count with an increment of +-1 | |
6546 | combined with friendly exit tests of against an invariant | |
6547 | value is also ameanable to optimization. Not implemented. */ | |
6548 | if (loop_info->n_iterations > 0 | |
6549 | && bl->initial_value | |
6550 | && GET_CODE (bl->initial_value) == CONST_INT | |
6551 | && (incr = biv_total_increment (bl)) | |
6552 | && GET_CODE (incr) == CONST_INT | |
6553 | /* Make sure the host can represent the arithmetic. */ | |
6554 | && HOST_BITS_PER_WIDE_INT >= GET_MODE_BITSIZE (biv_mode)) | |
6555 | { | |
6556 | unsigned HOST_WIDE_INT abs_incr, total_incr; | |
6557 | HOST_WIDE_INT s_end_val; | |
6558 | int neg_incr; | |
6559 | ||
6560 | info_ok = 1; | |
6561 | start_val = INTVAL (bl->initial_value); | |
6562 | u_start_val = start_val; | |
fd5d5b07 | 6563 | |
e8cb4873 RH |
6564 | neg_incr = 0, abs_incr = INTVAL (incr); |
6565 | if (INTVAL (incr) < 0) | |
6566 | neg_incr = 1, abs_incr = -abs_incr; | |
6567 | total_incr = abs_incr * loop_info->n_iterations; | |
6568 | ||
6569 | /* Check for host arithmatic overflow. */ | |
6570 | if (total_incr / loop_info->n_iterations == abs_incr) | |
6571 | { | |
6572 | unsigned HOST_WIDE_INT u_max; | |
6573 | HOST_WIDE_INT s_max; | |
6574 | ||
6575 | u_end_val = start_val + (neg_incr ? -total_incr : total_incr); | |
6576 | s_end_val = u_end_val; | |
6577 | u_max = GET_MODE_MASK (biv_mode); | |
6578 | s_max = u_max >> 1; | |
fd5d5b07 | 6579 | |
e8cb4873 RH |
6580 | /* Check zero extension of biv ok. */ |
6581 | if (start_val >= 0 | |
6582 | /* Check for host arithmatic overflow. */ | |
6583 | && (neg_incr | |
6584 | ? u_end_val < u_start_val | |
6585 | : u_end_val > u_start_val) | |
6586 | /* Check for target arithmetic overflow. */ | |
6587 | && (neg_incr | |
6588 | ? 1 /* taken care of with host overflow */ | |
6589 | : u_end_val <= u_max)) | |
6590 | { | |
6591 | ze_ok = 1; | |
6592 | } | |
fd5d5b07 | 6593 | |
e8cb4873 RH |
6594 | /* Check sign extension of biv ok. */ |
6595 | /* ??? While it is true that overflow with signed and pointer | |
6596 | arithmetic is undefined, I fear too many programmers don't | |
6597 | keep this fact in mind -- myself included on occasion. | |
6598 | So leave alone with the signed overflow optimizations. */ | |
6599 | if (start_val >= -s_max - 1 | |
6600 | /* Check for host arithmatic overflow. */ | |
6601 | && (neg_incr | |
6602 | ? s_end_val < start_val | |
6603 | : s_end_val > start_val) | |
6604 | /* Check for target arithmetic overflow. */ | |
6605 | && (neg_incr | |
6606 | ? s_end_val >= -s_max - 1 | |
6607 | : s_end_val <= s_max)) | |
6608 | { | |
6609 | se_ok = 1; | |
6610 | } | |
6611 | } | |
6612 | } | |
6613 | ||
6614 | /* Invalidate givs that fail the tests. */ | |
6615 | for (v = bl->giv; v; v = v->next_iv) | |
6616 | if (v->ext_dependant) | |
6617 | { | |
6618 | enum rtx_code code = GET_CODE (v->ext_dependant); | |
6619 | int ok = 0; | |
6620 | ||
6621 | switch (code) | |
6622 | { | |
6623 | case SIGN_EXTEND: | |
6624 | ok = se_ok; | |
6625 | break; | |
6626 | case ZERO_EXTEND: | |
6627 | ok = ze_ok; | |
6628 | break; | |
6629 | ||
6630 | case TRUNCATE: | |
6631 | /* We don't know whether this value is being used as either | |
6632 | signed or unsigned, so to safely truncate we must satisfy | |
fd5d5b07 | 6633 | both. The initial check here verifies the BIV itself; |
e8cb4873 RH |
6634 | once that is successful we may check its range wrt the |
6635 | derived GIV. */ | |
6636 | if (se_ok && ze_ok) | |
6637 | { | |
6638 | enum machine_mode outer_mode = GET_MODE (v->ext_dependant); | |
6639 | unsigned HOST_WIDE_INT max = GET_MODE_MASK (outer_mode) >> 1; | |
6640 | ||
6641 | /* We know from the above that both endpoints are nonnegative, | |
6642 | and that there is no wrapping. Verify that both endpoints | |
6643 | are within the (signed) range of the outer mode. */ | |
6644 | if (u_start_val <= max && u_end_val <= max) | |
6645 | ok = 1; | |
6646 | } | |
6647 | break; | |
6648 | ||
6649 | default: | |
6650 | abort (); | |
6651 | } | |
6652 | ||
6653 | if (ok) | |
6654 | { | |
6655 | if (loop_dump_stream) | |
6656 | { | |
fd5d5b07 KH |
6657 | fprintf (loop_dump_stream, |
6658 | "Verified ext dependant giv at %d of reg %d\n", | |
6659 | INSN_UID (v->insn), bl->regno); | |
e8cb4873 RH |
6660 | } |
6661 | } | |
6662 | else | |
6663 | { | |
6664 | if (loop_dump_stream) | |
6665 | { | |
6666 | const char *why; | |
6667 | ||
6668 | if (info_ok) | |
6669 | why = "biv iteration values overflowed"; | |
6670 | else | |
6671 | { | |
6672 | if (incr == pc_rtx) | |
6673 | incr = biv_total_increment (bl); | |
6674 | if (incr == const1_rtx) | |
6675 | why = "biv iteration info incomplete; incr by 1"; | |
6676 | else | |
6677 | why = "biv iteration info incomplete"; | |
6678 | } | |
6679 | ||
fd5d5b07 KH |
6680 | fprintf (loop_dump_stream, |
6681 | "Failed ext dependant giv at %d, %s\n", | |
6682 | INSN_UID (v->insn), why); | |
e8cb4873 RH |
6683 | } |
6684 | v->ignore = 1; | |
97ebd24c | 6685 | bl->all_reduced = 0; |
e8cb4873 RH |
6686 | } |
6687 | } | |
6688 | } | |
6689 | ||
6690 | /* Generate a version of VALUE in a mode appropriate for initializing V. */ | |
6691 | ||
6692 | rtx | |
6693 | extend_value_for_giv (v, value) | |
6694 | struct induction *v; | |
6695 | rtx value; | |
6696 | { | |
6697 | rtx ext_dep = v->ext_dependant; | |
6698 | ||
6699 | if (! ext_dep) | |
6700 | return value; | |
6701 | ||
6702 | /* Recall that check_ext_dependant_givs verified that the known bounds | |
6703 | of a biv did not overflow or wrap with respect to the extension for | |
6704 | the giv. Therefore, constants need no additional adjustment. */ | |
6705 | if (CONSTANT_P (value) && GET_MODE (value) == VOIDmode) | |
6706 | return value; | |
6707 | ||
6708 | /* Otherwise, we must adjust the value to compensate for the | |
6709 | differing modes of the biv and the giv. */ | |
6710 | return gen_rtx_fmt_e (GET_CODE (ext_dep), GET_MODE (ext_dep), value); | |
6711 | } | |
6712 | \f | |
45f97e2e RH |
6713 | struct combine_givs_stats |
6714 | { | |
6715 | int giv_number; | |
6716 | int total_benefit; | |
6717 | }; | |
6718 | ||
6719 | static int | |
f428f252 KG |
6720 | cmp_combine_givs_stats (xp, yp) |
6721 | const PTR xp; | |
6722 | const PTR yp; | |
45f97e2e | 6723 | { |
f428f252 KG |
6724 | const struct combine_givs_stats * const x = |
6725 | (const struct combine_givs_stats *) xp; | |
6726 | const struct combine_givs_stats * const y = | |
6727 | (const struct combine_givs_stats *) yp; | |
45f97e2e RH |
6728 | int d; |
6729 | d = y->total_benefit - x->total_benefit; | |
6730 | /* Stabilize the sort. */ | |
6731 | if (!d) | |
6732 | d = x->giv_number - y->giv_number; | |
6733 | return d; | |
6734 | } | |
6735 | ||
b4ad7b23 RS |
6736 | /* Check all pairs of givs for iv_class BL and see if any can be combined with |
6737 | any other. If so, point SAME to the giv combined with and set NEW_REG to | |
6738 | be an expression (in terms of the other giv's DEST_REG) equivalent to the | |
6739 | giv. Also, update BENEFIT and related fields for cost/benefit analysis. */ | |
6740 | ||
6741 | static void | |
1ecd860b MH |
6742 | combine_givs (regs, bl) |
6743 | struct loop_regs *regs; | |
b4ad7b23 RS |
6744 | struct iv_class *bl; |
6745 | { | |
ba12c883 RH |
6746 | /* Additional benefit to add for being combined multiple times. */ |
6747 | const int extra_benefit = 3; | |
6748 | ||
29a82058 | 6749 | struct induction *g1, *g2, **giv_array; |
45f97e2e RH |
6750 | int i, j, k, giv_count; |
6751 | struct combine_givs_stats *stats; | |
6752 | rtx *can_combine; | |
b4ad7b23 | 6753 | |
7027f90a JW |
6754 | /* Count givs, because bl->giv_count is incorrect here. */ |
6755 | giv_count = 0; | |
b4ad7b23 | 6756 | for (g1 = bl->giv; g1; g1 = g1->next_iv) |
45f97e2e RH |
6757 | if (!g1->ignore) |
6758 | giv_count++; | |
7027f90a JW |
6759 | |
6760 | giv_array | |
6761 | = (struct induction **) alloca (giv_count * sizeof (struct induction *)); | |
6762 | i = 0; | |
6763 | for (g1 = bl->giv; g1; g1 = g1->next_iv) | |
45f97e2e RH |
6764 | if (!g1->ignore) |
6765 | giv_array[i++] = g1; | |
7027f90a | 6766 | |
67289ea6 | 6767 | stats = (struct combine_givs_stats *) xcalloc (giv_count, sizeof (*stats)); |
e6fcb60d | 6768 | can_combine = (rtx *) xcalloc (giv_count, giv_count * sizeof (rtx)); |
7027f90a JW |
6769 | |
6770 | for (i = 0; i < giv_count; i++) | |
6771 | { | |
45f97e2e | 6772 | int this_benefit; |
ba12c883 | 6773 | rtx single_use; |
45f97e2e | 6774 | |
7027f90a | 6775 | g1 = giv_array[i]; |
ba12c883 RH |
6776 | stats[i].giv_number = i; |
6777 | ||
6778 | /* If a DEST_REG GIV is used only once, do not allow it to combine | |
6779 | with anything, for in doing so we will gain nothing that cannot | |
6780 | be had by simply letting the GIV with which we would have combined | |
e6fcb60d | 6781 | to be reduced on its own. The losage shows up in particular with |
ba12c883 RH |
6782 | DEST_ADDR targets on hosts with reg+reg addressing, though it can |
6783 | be seen elsewhere as well. */ | |
6784 | if (g1->giv_type == DEST_REG | |
f1d4ac80 | 6785 | && (single_use = regs->array[REGNO (g1->dest_reg)].single_usage) |
ba12c883 RH |
6786 | && single_use != const0_rtx) |
6787 | continue; | |
45f97e2e RH |
6788 | |
6789 | this_benefit = g1->benefit; | |
6790 | /* Add an additional weight for zero addends. */ | |
6791 | if (g1->no_const_addval) | |
6792 | this_benefit += 1; | |
ba12c883 | 6793 | |
45f97e2e RH |
6794 | for (j = 0; j < giv_count; j++) |
6795 | { | |
6796 | rtx this_combine; | |
6797 | ||
6798 | g2 = giv_array[j]; | |
6799 | if (g1 != g2 | |
6800 | && (this_combine = combine_givs_p (g1, g2)) != NULL_RTX) | |
6801 | { | |
e6fcb60d | 6802 | can_combine[i * giv_count + j] = this_combine; |
ba12c883 | 6803 | this_benefit += g2->benefit + extra_benefit; |
45f97e2e RH |
6804 | } |
6805 | } | |
45f97e2e RH |
6806 | stats[i].total_benefit = this_benefit; |
6807 | } | |
6808 | ||
6809 | /* Iterate, combining until we can't. */ | |
6810 | restart: | |
e6fcb60d | 6811 | qsort (stats, giv_count, sizeof (*stats), cmp_combine_givs_stats); |
45f97e2e RH |
6812 | |
6813 | if (loop_dump_stream) | |
6814 | { | |
6815 | fprintf (loop_dump_stream, "Sorted combine statistics:\n"); | |
6816 | for (k = 0; k < giv_count; k++) | |
6817 | { | |
6818 | g1 = giv_array[stats[k].giv_number]; | |
6819 | if (!g1->combined_with && !g1->same) | |
e6fcb60d | 6820 | fprintf (loop_dump_stream, " {%d, %d}", |
45f97e2e RH |
6821 | INSN_UID (giv_array[stats[k].giv_number]->insn), |
6822 | stats[k].total_benefit); | |
6823 | } | |
6824 | putc ('\n', loop_dump_stream); | |
6825 | } | |
6826 | ||
6827 | for (k = 0; k < giv_count; k++) | |
6828 | { | |
6829 | int g1_add_benefit = 0; | |
6830 | ||
6831 | i = stats[k].giv_number; | |
6832 | g1 = giv_array[i]; | |
6833 | ||
6834 | /* If it has already been combined, skip. */ | |
6835 | if (g1->combined_with || g1->same) | |
6836 | continue; | |
6837 | ||
6838 | for (j = 0; j < giv_count; j++) | |
6839 | { | |
6840 | g2 = giv_array[j]; | |
e6fcb60d | 6841 | if (g1 != g2 && can_combine[i * giv_count + j] |
45f97e2e RH |
6842 | /* If it has already been combined, skip. */ |
6843 | && ! g2->same && ! g2->combined_with) | |
6844 | { | |
6845 | int l; | |
6846 | ||
e6fcb60d | 6847 | g2->new_reg = can_combine[i * giv_count + j]; |
45f97e2e | 6848 | g2->same = g1; |
3ec2b590 | 6849 | g1->combined_with++; |
45f97e2e RH |
6850 | g1->lifetime += g2->lifetime; |
6851 | ||
ba12c883 | 6852 | g1_add_benefit += g2->benefit; |
45f97e2e RH |
6853 | |
6854 | /* ??? The new final_[bg]iv_value code does a much better job | |
6855 | of finding replaceable giv's, and hence this code may no | |
6856 | longer be necessary. */ | |
6857 | if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg)) | |
6858 | g1_add_benefit -= copy_cost; | |
e6fcb60d | 6859 | |
45f97e2e RH |
6860 | /* To help optimize the next set of combinations, remove |
6861 | this giv from the benefits of other potential mates. */ | |
6862 | for (l = 0; l < giv_count; ++l) | |
6863 | { | |
6864 | int m = stats[l].giv_number; | |
e6fcb60d | 6865 | if (can_combine[m * giv_count + j]) |
ba12c883 | 6866 | stats[l].total_benefit -= g2->benefit + extra_benefit; |
45f97e2e RH |
6867 | } |
6868 | ||
6869 | if (loop_dump_stream) | |
6870 | fprintf (loop_dump_stream, | |
630c79be BS |
6871 | "giv at %d combined with giv at %d; new benefit %d + %d, lifetime %d\n", |
6872 | INSN_UID (g2->insn), INSN_UID (g1->insn), | |
6873 | g1->benefit, g1_add_benefit, g1->lifetime); | |
45f97e2e RH |
6874 | } |
6875 | } | |
6876 | ||
6877 | /* To help optimize the next set of combinations, remove | |
6878 | this giv from the benefits of other potential mates. */ | |
6879 | if (g1->combined_with) | |
6880 | { | |
6881 | for (j = 0; j < giv_count; ++j) | |
6882 | { | |
6883 | int m = stats[j].giv_number; | |
e6fcb60d | 6884 | if (can_combine[m * giv_count + i]) |
ba12c883 | 6885 | stats[j].total_benefit -= g1->benefit + extra_benefit; |
45f97e2e RH |
6886 | } |
6887 | ||
6888 | g1->benefit += g1_add_benefit; | |
6889 | ||
6890 | /* We've finished with this giv, and everything it touched. | |
e6fcb60d | 6891 | Restart the combination so that proper weights for the |
45f97e2e RH |
6892 | rest of the givs are properly taken into account. */ |
6893 | /* ??? Ideally we would compact the arrays at this point, so | |
6894 | as to not cover old ground. But sanely compacting | |
6895 | can_combine is tricky. */ | |
6896 | goto restart; | |
6897 | } | |
7027f90a | 6898 | } |
67289ea6 MM |
6899 | |
6900 | /* Clean up. */ | |
6901 | free (stats); | |
6902 | free (can_combine); | |
b4ad7b23 RS |
6903 | } |
6904 | \f | |
96a45535 | 6905 | /* Generate sequence for REG = B * M + A. */ |
b4ad7b23 | 6906 | |
96a45535 MH |
6907 | static rtx |
6908 | gen_add_mult (b, m, a, reg) | |
b4ad7b23 RS |
6909 | rtx b; /* initial value of basic induction variable */ |
6910 | rtx m; /* multiplicative constant */ | |
6911 | rtx a; /* additive constant */ | |
6912 | rtx reg; /* destination register */ | |
b4ad7b23 RS |
6913 | { |
6914 | rtx seq; | |
6915 | rtx result; | |
6916 | ||
b4ad7b23 | 6917 | start_sequence (); |
96a45535 | 6918 | /* Use unsigned arithmetic. */ |
91ce572a | 6919 | result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1); |
b4ad7b23 RS |
6920 | if (reg != result) |
6921 | emit_move_insn (reg, result); | |
6922 | seq = gen_sequence (); | |
6923 | end_sequence (); | |
6924 | ||
96a45535 MH |
6925 | return seq; |
6926 | } | |
6927 | ||
6928 | ||
6929 | /* Update registers created in insn sequence SEQ. */ | |
9ae8ffe7 | 6930 | |
96a45535 MH |
6931 | static void |
6932 | loop_regs_update (loop, seq) | |
6933 | const struct loop *loop ATTRIBUTE_UNUSED; | |
6934 | rtx seq; | |
6935 | { | |
6936 | /* Update register info for alias analysis. */ | |
00116a7b RH |
6937 | |
6938 | if (GET_CODE (seq) == SEQUENCE) | |
6939 | { | |
6940 | int i; | |
6941 | for (i = 0; i < XVECLEN (seq, 0); ++i) | |
6942 | { | |
9e525635 | 6943 | rtx set = single_set (XVECEXP (seq, 0, i)); |
00116a7b RH |
6944 | if (set && GET_CODE (SET_DEST (set)) == REG) |
6945 | record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0); | |
6946 | } | |
6947 | } | |
30a1181a MH |
6948 | else |
6949 | { | |
6950 | rtx set = single_set (seq); | |
6951 | if (set && GET_CODE (SET_DEST (set)) == REG) | |
6952 | record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0); | |
6953 | } | |
b4ad7b23 | 6954 | } |
630c79be | 6955 | |
96a45535 MH |
6956 | |
6957 | /* EMIT code before BEFORE_BB/BEFORE_INSN to set REG = B * M + A. */ | |
6958 | ||
6959 | void | |
6960 | loop_iv_add_mult_emit_before (loop, b, m, a, reg, before_bb, before_insn) | |
6961 | const struct loop *loop; | |
6962 | rtx b; /* initial value of basic induction variable */ | |
6963 | rtx m; /* multiplicative constant */ | |
6964 | rtx a; /* additive constant */ | |
6965 | rtx reg; /* destination register */ | |
6966 | basic_block before_bb; | |
6967 | rtx before_insn; | |
6968 | { | |
6969 | rtx seq; | |
6970 | ||
6971 | if (! before_insn) | |
6972 | { | |
6973 | loop_iv_add_mult_hoist (loop, b, m, a, reg); | |
6974 | return; | |
6975 | } | |
6976 | ||
6977 | /* Use copy_rtx to prevent unexpected sharing of these rtx. */ | |
6978 | seq = gen_add_mult (copy_rtx (b), m, copy_rtx (a), reg); | |
6979 | ||
6980 | /* Increase the lifetime of any invariants moved further in code. */ | |
6981 | update_reg_last_use (a, before_insn); | |
6982 | update_reg_last_use (b, before_insn); | |
6983 | update_reg_last_use (m, before_insn); | |
6984 | ||
6985 | loop_insn_emit_before (loop, before_bb, before_insn, seq); | |
6986 | ||
6987 | /* It is possible that the expansion created lots of new registers. | |
6988 | Iterate over the sequence we just created and record them all. */ | |
6989 | loop_regs_update (loop, seq); | |
6990 | } | |
6991 | ||
6992 | ||
6993 | /* Emit insns in loop pre-header to set REG = B * M + A. */ | |
6994 | ||
6995 | void | |
6996 | loop_iv_add_mult_sink (loop, b, m, a, reg) | |
6997 | const struct loop *loop; | |
6998 | rtx b; /* initial value of basic induction variable */ | |
6999 | rtx m; /* multiplicative constant */ | |
7000 | rtx a; /* additive constant */ | |
7001 | rtx reg; /* destination register */ | |
7002 | { | |
7003 | rtx seq; | |
7004 | ||
7005 | /* Use copy_rtx to prevent unexpected sharing of these rtx. */ | |
7006 | seq = gen_add_mult (copy_rtx (b), m, copy_rtx (a), reg); | |
7007 | ||
7008 | /* Increase the lifetime of any invariants moved further in code. | |
7009 | ???? Is this really necessary? */ | |
7010 | update_reg_last_use (a, loop->sink); | |
7011 | update_reg_last_use (b, loop->sink); | |
7012 | update_reg_last_use (m, loop->sink); | |
7013 | ||
7014 | loop_insn_sink (loop, seq); | |
7015 | ||
7016 | /* It is possible that the expansion created lots of new registers. | |
7017 | Iterate over the sequence we just created and record them all. */ | |
7018 | loop_regs_update (loop, seq); | |
7019 | } | |
7020 | ||
7021 | ||
7022 | /* Emit insns after loop to set REG = B * M + A. */ | |
7023 | ||
7024 | void | |
7025 | loop_iv_add_mult_hoist (loop, b, m, a, reg) | |
7026 | const struct loop *loop; | |
7027 | rtx b; /* initial value of basic induction variable */ | |
7028 | rtx m; /* multiplicative constant */ | |
7029 | rtx a; /* additive constant */ | |
7030 | rtx reg; /* destination register */ | |
7031 | { | |
7032 | rtx seq; | |
7033 | ||
7034 | /* Use copy_rtx to prevent unexpected sharing of these rtx. */ | |
7035 | seq = gen_add_mult (copy_rtx (b), m, copy_rtx (a), reg); | |
7036 | ||
7037 | loop_insn_hoist (loop, seq); | |
7038 | ||
7039 | /* It is possible that the expansion created lots of new registers. | |
7040 | Iterate over the sequence we just created and record them all. */ | |
7041 | loop_regs_update (loop, seq); | |
7042 | } | |
7043 | ||
7044 | ||
7045 | ||
7046 | /* Similar to gen_add_mult, but compute cost rather than generating | |
7047 | sequence. */ | |
7048 | ||
630c79be BS |
7049 | static int |
7050 | iv_add_mult_cost (b, m, a, reg) | |
7051 | rtx b; /* initial value of basic induction variable */ | |
7052 | rtx m; /* multiplicative constant */ | |
7053 | rtx a; /* additive constant */ | |
7054 | rtx reg; /* destination register */ | |
7055 | { | |
7056 | int cost = 0; | |
7057 | rtx last, result; | |
7058 | ||
7059 | start_sequence (); | |
96a45535 | 7060 | result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1); |
630c79be BS |
7061 | if (reg != result) |
7062 | emit_move_insn (reg, result); | |
7063 | last = get_last_insn (); | |
7064 | while (last) | |
7065 | { | |
7066 | rtx t = single_set (last); | |
7067 | if (t) | |
7068 | cost += rtx_cost (SET_SRC (t), SET); | |
7069 | last = PREV_INSN (last); | |
7070 | } | |
7071 | end_sequence (); | |
7072 | return cost; | |
7073 | } | |
b4ad7b23 RS |
7074 | \f |
7075 | /* Test whether A * B can be computed without | |
7076 | an actual multiply insn. Value is 1 if so. */ | |
7077 | ||
7078 | static int | |
7079 | product_cheap_p (a, b) | |
7080 | rtx a; | |
7081 | rtx b; | |
7082 | { | |
7083 | int i; | |
7084 | rtx tmp; | |
b4ad7b23 RS |
7085 | int win = 1; |
7086 | ||
0f41302f | 7087 | /* If only one is constant, make it B. */ |
b4ad7b23 RS |
7088 | if (GET_CODE (a) == CONST_INT) |
7089 | tmp = a, a = b, b = tmp; | |
7090 | ||
7091 | /* If first constant, both constant, so don't need multiply. */ | |
7092 | if (GET_CODE (a) == CONST_INT) | |
7093 | return 1; | |
7094 | ||
7095 | /* If second not constant, neither is constant, so would need multiply. */ | |
7096 | if (GET_CODE (b) != CONST_INT) | |
7097 | return 0; | |
7098 | ||
7099 | /* One operand is constant, so might not need multiply insn. Generate the | |
7100 | code for the multiply and see if a call or multiply, or long sequence | |
7101 | of insns is generated. */ | |
7102 | ||
b4ad7b23 | 7103 | start_sequence (); |
91ce572a | 7104 | expand_mult (GET_MODE (a), a, b, NULL_RTX, 1); |
b4ad7b23 RS |
7105 | tmp = gen_sequence (); |
7106 | end_sequence (); | |
7107 | ||
7108 | if (GET_CODE (tmp) == SEQUENCE) | |
7109 | { | |
7110 | if (XVEC (tmp, 0) == 0) | |
7111 | win = 1; | |
7112 | else if (XVECLEN (tmp, 0) > 3) | |
7113 | win = 0; | |
7114 | else | |
7115 | for (i = 0; i < XVECLEN (tmp, 0); i++) | |
7116 | { | |
7117 | rtx insn = XVECEXP (tmp, 0, i); | |
7118 | ||
7119 | if (GET_CODE (insn) != INSN | |
7120 | || (GET_CODE (PATTERN (insn)) == SET | |
7121 | && GET_CODE (SET_SRC (PATTERN (insn))) == MULT) | |
7122 | || (GET_CODE (PATTERN (insn)) == PARALLEL | |
7123 | && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET | |
7124 | && GET_CODE (SET_SRC (XVECEXP (PATTERN (insn), 0, 0))) == MULT)) | |
7125 | { | |
7126 | win = 0; | |
7127 | break; | |
7128 | } | |
7129 | } | |
7130 | } | |
7131 | else if (GET_CODE (tmp) == SET | |
7132 | && GET_CODE (SET_SRC (tmp)) == MULT) | |
7133 | win = 0; | |
7134 | else if (GET_CODE (tmp) == PARALLEL | |
7135 | && GET_CODE (XVECEXP (tmp, 0, 0)) == SET | |
7136 | && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT) | |
7137 | win = 0; | |
7138 | ||
b4ad7b23 RS |
7139 | return win; |
7140 | } | |
7141 | \f | |
7142 | /* Check to see if loop can be terminated by a "decrement and branch until | |
7143 | zero" instruction. If so, add a REG_NONNEG note to the branch insn if so. | |
7144 | Also try reversing an increment loop to a decrement loop | |
7145 | to see if the optimization can be performed. | |
7146 | Value is nonzero if optimization was performed. */ | |
7147 | ||
7148 | /* This is useful even if the architecture doesn't have such an insn, | |
7149 | because it might change a loops which increments from 0 to n to a loop | |
7150 | which decrements from n to 0. A loop that decrements to zero is usually | |
7151 | faster than one that increments from zero. */ | |
7152 | ||
7153 | /* ??? This could be rewritten to use some of the loop unrolling procedures, | |
7154 | such as approx_final_value, biv_total_increment, loop_iterations, and | |
7155 | final_[bg]iv_value. */ | |
7156 | ||
7157 | static int | |
a2be868f MH |
7158 | check_dbra_loop (loop, insn_count) |
7159 | struct loop *loop; | |
b4ad7b23 | 7160 | int insn_count; |
b4ad7b23 | 7161 | { |
1ecd860b MH |
7162 | struct loop_info *loop_info = LOOP_INFO (loop); |
7163 | struct loop_regs *regs = LOOP_REGS (loop); | |
ed5bb68d | 7164 | struct loop_ivs *ivs = LOOP_IVS (loop); |
b4ad7b23 RS |
7165 | struct iv_class *bl; |
7166 | rtx reg; | |
7167 | rtx jump_label; | |
7168 | rtx final_value; | |
7169 | rtx start_value; | |
b4ad7b23 RS |
7170 | rtx new_add_val; |
7171 | rtx comparison; | |
7172 | rtx before_comparison; | |
7173 | rtx p; | |
0628fde6 JW |
7174 | rtx jump; |
7175 | rtx first_compare; | |
7176 | int compare_and_branch; | |
a2be868f MH |
7177 | rtx loop_start = loop->start; |
7178 | rtx loop_end = loop->end; | |
b4ad7b23 RS |
7179 | |
7180 | /* If last insn is a conditional branch, and the insn before tests a | |
7181 | register value, try to optimize it. Otherwise, we can't do anything. */ | |
7182 | ||
0628fde6 | 7183 | jump = PREV_INSN (loop_end); |
0534b804 | 7184 | comparison = get_condition_for_loop (loop, jump); |
b4ad7b23 RS |
7185 | if (comparison == 0) |
7186 | return 0; | |
7f1c097d JH |
7187 | if (!onlyjump_p (jump)) |
7188 | return 0; | |
b4ad7b23 | 7189 | |
0628fde6 JW |
7190 | /* Try to compute whether the compare/branch at the loop end is one or |
7191 | two instructions. */ | |
7192 | get_condition (jump, &first_compare); | |
7193 | if (first_compare == jump) | |
7194 | compare_and_branch = 1; | |
7195 | else if (first_compare == prev_nonnote_insn (jump)) | |
7196 | compare_and_branch = 2; | |
7197 | else | |
7198 | return 0; | |
7199 | ||
947851b2 CC |
7200 | { |
7201 | /* If more than one condition is present to control the loop, then | |
5d8fcdcb | 7202 | do not proceed, as this function does not know how to rewrite |
7905cfef JL |
7203 | loop tests with more than one condition. |
7204 | ||
7205 | Look backwards from the first insn in the last comparison | |
7206 | sequence and see if we've got another comparison sequence. */ | |
947851b2 CC |
7207 | |
7208 | rtx jump1; | |
7905cfef | 7209 | if ((jump1 = prev_nonnote_insn (first_compare)) != loop->cont) |
c12c6a56 | 7210 | if (GET_CODE (jump1) == JUMP_INSN) |
fd5d5b07 | 7211 | return 0; |
947851b2 CC |
7212 | } |
7213 | ||
b4ad7b23 RS |
7214 | /* Check all of the bivs to see if the compare uses one of them. |
7215 | Skip biv's set more than once because we can't guarantee that | |
7216 | it will be zero on the last iteration. Also skip if the biv is | |
7217 | used between its update and the test insn. */ | |
7218 | ||
14be28e5 | 7219 | for (bl = ivs->list; bl; bl = bl->next) |
b4ad7b23 RS |
7220 | { |
7221 | if (bl->biv_count == 1 | |
6979065c | 7222 | && ! bl->biv->maybe_multiple |
b4ad7b23 RS |
7223 | && bl->biv->dest_reg == XEXP (comparison, 0) |
7224 | && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn, | |
0628fde6 | 7225 | first_compare)) |
b4ad7b23 RS |
7226 | break; |
7227 | } | |
7228 | ||
7229 | if (! bl) | |
7230 | return 0; | |
7231 | ||
7232 | /* Look for the case where the basic induction variable is always | |
7233 | nonnegative, and equals zero on the last iteration. | |
7234 | In this case, add a reg_note REG_NONNEG, which allows the | |
7235 | m68k DBRA instruction to be used. */ | |
7236 | ||
7237 | if (((GET_CODE (comparison) == GT | |
7238 | && GET_CODE (XEXP (comparison, 1)) == CONST_INT | |
7239 | && INTVAL (XEXP (comparison, 1)) == -1) | |
7240 | || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx)) | |
7241 | && GET_CODE (bl->biv->add_val) == CONST_INT | |
7242 | && INTVAL (bl->biv->add_val) < 0) | |
7243 | { | |
7244 | /* Initial value must be greater than 0, | |
7245 | init_val % -dec_value == 0 to ensure that it equals zero on | |
7246 | the last iteration */ | |
7247 | ||
7248 | if (GET_CODE (bl->initial_value) == CONST_INT | |
7249 | && INTVAL (bl->initial_value) > 0 | |
db3cf6fb MS |
7250 | && (INTVAL (bl->initial_value) |
7251 | % (-INTVAL (bl->biv->add_val))) == 0) | |
b4ad7b23 RS |
7252 | { |
7253 | /* register always nonnegative, add REG_NOTE to branch */ | |
65b98a02 JW |
7254 | if (! find_reg_note (jump, REG_NONNEG, NULL_RTX)) |
7255 | REG_NOTES (jump) | |
7256 | = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg, | |
7257 | REG_NOTES (jump)); | |
b4ad7b23 RS |
7258 | bl->nonneg = 1; |
7259 | ||
7260 | return 1; | |
7261 | } | |
7262 | ||
7263 | /* If the decrement is 1 and the value was tested as >= 0 before | |
7264 | the loop, then we can safely optimize. */ | |
7265 | for (p = loop_start; p; p = PREV_INSN (p)) | |
7266 | { | |
7267 | if (GET_CODE (p) == CODE_LABEL) | |
7268 | break; | |
7269 | if (GET_CODE (p) != JUMP_INSN) | |
7270 | continue; | |
7271 | ||
0534b804 | 7272 | before_comparison = get_condition_for_loop (loop, p); |
b4ad7b23 RS |
7273 | if (before_comparison |
7274 | && XEXP (before_comparison, 0) == bl->biv->dest_reg | |
7275 | && GET_CODE (before_comparison) == LT | |
7276 | && XEXP (before_comparison, 1) == const0_rtx | |
7277 | && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start) | |
7278 | && INTVAL (bl->biv->add_val) == -1) | |
7279 | { | |
65b98a02 JW |
7280 | if (! find_reg_note (jump, REG_NONNEG, NULL_RTX)) |
7281 | REG_NOTES (jump) | |
7282 | = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg, | |
7283 | REG_NOTES (jump)); | |
b4ad7b23 RS |
7284 | bl->nonneg = 1; |
7285 | ||
7286 | return 1; | |
7287 | } | |
7288 | } | |
7289 | } | |
ef178af3 ZW |
7290 | else if (GET_CODE (bl->biv->add_val) == CONST_INT |
7291 | && INTVAL (bl->biv->add_val) > 0) | |
b4ad7b23 RS |
7292 | { |
7293 | /* Try to change inc to dec, so can apply above optimization. */ | |
7294 | /* Can do this if: | |
7295 | all registers modified are induction variables or invariant, | |
7296 | all memory references have non-overlapping addresses | |
7297 | (obviously true if only one write) | |
7298 | allow 2 insns for the compare/jump at the end of the loop. */ | |
45cc060e JW |
7299 | /* Also, we must avoid any instructions which use both the reversed |
7300 | biv and another biv. Such instructions will fail if the loop is | |
7301 | reversed. We meet this condition by requiring that either | |
7302 | no_use_except_counting is true, or else that there is only | |
7303 | one biv. */ | |
b4ad7b23 RS |
7304 | int num_nonfixed_reads = 0; |
7305 | /* 1 if the iteration var is used only to count iterations. */ | |
7306 | int no_use_except_counting = 0; | |
b418c26e JW |
7307 | /* 1 if the loop has no memory store, or it has a single memory store |
7308 | which is reversible. */ | |
7309 | int reversible_mem_store = 1; | |
b4ad7b23 | 7310 | |
0534b804 | 7311 | if (bl->giv_count == 0 && ! loop->exit_count) |
b4ad7b23 RS |
7312 | { |
7313 | rtx bivreg = regno_reg_rtx[bl->regno]; | |
c7b30677 | 7314 | struct iv_class *blt; |
b4ad7b23 RS |
7315 | |
7316 | /* If there are no givs for this biv, and the only exit is the | |
38e01259 | 7317 | fall through at the end of the loop, then |
b4ad7b23 RS |
7318 | see if perhaps there are no uses except to count. */ |
7319 | no_use_except_counting = 1; | |
7320 | for (p = loop_start; p != loop_end; p = NEXT_INSN (p)) | |
2c3c49de | 7321 | if (INSN_P (p)) |
b4ad7b23 RS |
7322 | { |
7323 | rtx set = single_set (p); | |
7324 | ||
7325 | if (set && GET_CODE (SET_DEST (set)) == REG | |
7326 | && REGNO (SET_DEST (set)) == bl->regno) | |
7327 | /* An insn that sets the biv is okay. */ | |
7328 | ; | |
59487769 JL |
7329 | else if ((p == prev_nonnote_insn (prev_nonnote_insn (loop_end)) |
7330 | || p == prev_nonnote_insn (loop_end)) | |
7331 | && reg_mentioned_p (bivreg, PATTERN (p))) | |
7332 | { | |
7333 | /* If either of these insns uses the biv and sets a pseudo | |
7334 | that has more than one usage, then the biv has uses | |
7335 | other than counting since it's used to derive a value | |
7336 | that is used more than one time. */ | |
84832317 | 7337 | note_stores (PATTERN (p), note_set_pseudo_multiple_uses, |
1ecd860b MH |
7338 | regs); |
7339 | if (regs->multiple_uses) | |
59487769 JL |
7340 | { |
7341 | no_use_except_counting = 0; | |
7342 | break; | |
7343 | } | |
7344 | } | |
b4ad7b23 | 7345 | else if (reg_mentioned_p (bivreg, PATTERN (p))) |
b4ad7b23 RS |
7346 | { |
7347 | no_use_except_counting = 0; | |
7348 | break; | |
7349 | } | |
7350 | } | |
c7b30677 FS |
7351 | |
7352 | /* A biv has uses besides counting if it is used to set another biv. */ | |
7353 | for (blt = ivs->list; blt; blt = blt->next) | |
7354 | if (blt->init_set && reg_mentioned_p (bivreg, SET_SRC (blt->init_set))) | |
7355 | { | |
7356 | no_use_except_counting = 0; | |
7357 | break; | |
7358 | } | |
b4ad7b23 RS |
7359 | } |
7360 | ||
c48ba252 | 7361 | if (no_use_except_counting) |
e6fcb60d KH |
7362 | /* No need to worry about MEMs. */ |
7363 | ; | |
afa1738b | 7364 | else if (loop_info->num_mem_sets <= 1) |
c48ba252 R |
7365 | { |
7366 | for (p = loop_start; p != loop_end; p = NEXT_INSN (p)) | |
2c3c49de | 7367 | if (INSN_P (p)) |
0534b804 | 7368 | num_nonfixed_reads += count_nonfixed_reads (loop, PATTERN (p)); |
c48ba252 R |
7369 | |
7370 | /* If the loop has a single store, and the destination address is | |
7371 | invariant, then we can't reverse the loop, because this address | |
7372 | might then have the wrong value at loop exit. | |
7373 | This would work if the source was invariant also, however, in that | |
7374 | case, the insn should have been moved out of the loop. */ | |
7375 | ||
afa1738b | 7376 | if (loop_info->num_mem_sets == 1) |
2d4fde68 R |
7377 | { |
7378 | struct induction *v; | |
7379 | ||
28680540 MM |
7380 | /* If we could prove that each of the memory locations |
7381 | written to was different, then we could reverse the | |
7382 | store -- but we don't presently have any way of | |
7383 | knowing that. */ | |
7384 | reversible_mem_store = 0; | |
2d4fde68 R |
7385 | |
7386 | /* If the store depends on a register that is set after the | |
7387 | store, it depends on the initial value, and is thus not | |
7388 | reversible. */ | |
7389 | for (v = bl->giv; reversible_mem_store && v; v = v->next_iv) | |
7390 | { | |
7391 | if (v->giv_type == DEST_REG | |
7392 | && reg_mentioned_p (v->dest_reg, | |
afa1738b | 7393 | PATTERN (loop_info->first_loop_store_insn)) |
fd5d5b07 | 7394 | && loop_insn_first_p (loop_info->first_loop_store_insn, |
afa1738b | 7395 | v->insn)) |
2d4fde68 R |
7396 | reversible_mem_store = 0; |
7397 | } | |
7398 | } | |
c48ba252 R |
7399 | } |
7400 | else | |
7401 | return 0; | |
b418c26e | 7402 | |
b4ad7b23 RS |
7403 | /* This code only acts for innermost loops. Also it simplifies |
7404 | the memory address check by only reversing loops with | |
7405 | zero or one memory access. | |
7406 | Two memory accesses could involve parts of the same array, | |
c48ba252 R |
7407 | and that can't be reversed. |
7408 | If the biv is used only for counting, than we don't need to worry | |
7409 | about all these things. */ | |
7410 | ||
7411 | if ((num_nonfixed_reads <= 1 | |
576d0b54 | 7412 | && ! loop_info->has_nonconst_call |
3c748bb6 | 7413 | && ! loop_info->has_volatile |
c48ba252 | 7414 | && reversible_mem_store |
afa1738b | 7415 | && (bl->giv_count + bl->biv_count + loop_info->num_mem_sets |
28680540 | 7416 | + num_unmoved_movables (loop) + compare_and_branch == insn_count) |
14be28e5 | 7417 | && (bl == ivs->list && bl->next == 0)) |
c48ba252 | 7418 | || no_use_except_counting) |
b4ad7b23 | 7419 | { |
b4ad7b23 RS |
7420 | rtx tem; |
7421 | ||
7422 | /* Loop can be reversed. */ | |
7423 | if (loop_dump_stream) | |
7424 | fprintf (loop_dump_stream, "Can reverse loop\n"); | |
7425 | ||
7426 | /* Now check other conditions: | |
e9a25f70 | 7427 | |
956d6950 | 7428 | The increment must be a constant, as must the initial value, |
e6fcb60d | 7429 | and the comparison code must be LT. |
b4ad7b23 RS |
7430 | |
7431 | This test can probably be improved since +/- 1 in the constant | |
7432 | can be obtained by changing LT to LE and vice versa; this is | |
7433 | confusing. */ | |
7434 | ||
e9a25f70 | 7435 | if (comparison |
c48ba252 R |
7436 | /* for constants, LE gets turned into LT */ |
7437 | && (GET_CODE (comparison) == LT | |
7438 | || (GET_CODE (comparison) == LE | |
7439 | && no_use_except_counting))) | |
b4ad7b23 | 7440 | { |
f428f252 | 7441 | HOST_WIDE_INT add_val, add_adjust, comparison_val = 0; |
c48ba252 R |
7442 | rtx initial_value, comparison_value; |
7443 | int nonneg = 0; | |
7444 | enum rtx_code cmp_code; | |
7445 | int comparison_const_width; | |
7446 | unsigned HOST_WIDE_INT comparison_sign_mask; | |
e9a25f70 JL |
7447 | |
7448 | add_val = INTVAL (bl->biv->add_val); | |
c48ba252 | 7449 | comparison_value = XEXP (comparison, 1); |
2c74fb2b AS |
7450 | if (GET_MODE (comparison_value) == VOIDmode) |
7451 | comparison_const_width | |
7452 | = GET_MODE_BITSIZE (GET_MODE (XEXP (comparison, 0))); | |
7453 | else | |
7454 | comparison_const_width | |
7455 | = GET_MODE_BITSIZE (GET_MODE (comparison_value)); | |
c48ba252 R |
7456 | if (comparison_const_width > HOST_BITS_PER_WIDE_INT) |
7457 | comparison_const_width = HOST_BITS_PER_WIDE_INT; | |
7458 | comparison_sign_mask | |
e6fcb60d | 7459 | = (unsigned HOST_WIDE_INT) 1 << (comparison_const_width - 1); |
c48ba252 | 7460 | |
3aa94dc8 JL |
7461 | /* If the comparison value is not a loop invariant, then we |
7462 | can not reverse this loop. | |
7463 | ||
7464 | ??? If the insns which initialize the comparison value as | |
7465 | a whole compute an invariant result, then we could move | |
7466 | them out of the loop and proceed with loop reversal. */ | |
0534b804 | 7467 | if (! loop_invariant_p (loop, comparison_value)) |
3aa94dc8 JL |
7468 | return 0; |
7469 | ||
c48ba252 R |
7470 | if (GET_CODE (comparison_value) == CONST_INT) |
7471 | comparison_val = INTVAL (comparison_value); | |
e9a25f70 | 7472 | initial_value = bl->initial_value; |
e6fcb60d KH |
7473 | |
7474 | /* Normalize the initial value if it is an integer and | |
a8decb2c JL |
7475 | has no other use except as a counter. This will allow |
7476 | a few more loops to be reversed. */ | |
7477 | if (no_use_except_counting | |
c48ba252 | 7478 | && GET_CODE (comparison_value) == CONST_INT |
a8decb2c | 7479 | && GET_CODE (initial_value) == CONST_INT) |
e9a25f70 JL |
7480 | { |
7481 | comparison_val = comparison_val - INTVAL (bl->initial_value); | |
c48ba252 R |
7482 | /* The code below requires comparison_val to be a multiple |
7483 | of add_val in order to do the loop reversal, so | |
7484 | round up comparison_val to a multiple of add_val. | |
7485 | Since comparison_value is constant, we know that the | |
7486 | current comparison code is LT. */ | |
7487 | comparison_val = comparison_val + add_val - 1; | |
7488 | comparison_val | |
7489 | -= (unsigned HOST_WIDE_INT) comparison_val % add_val; | |
7490 | /* We postpone overflow checks for COMPARISON_VAL here; | |
7491 | even if there is an overflow, we might still be able to | |
7492 | reverse the loop, if converting the loop exit test to | |
7493 | NE is possible. */ | |
7494 | initial_value = const0_rtx; | |
e9a25f70 JL |
7495 | } |
7496 | ||
c48ba252 R |
7497 | /* First check if we can do a vanilla loop reversal. */ |
7498 | if (initial_value == const0_rtx | |
3c748bb6 MH |
7499 | /* If we have a decrement_and_branch_on_count, |
7500 | prefer the NE test, since this will allow that | |
7501 | instruction to be generated. Note that we must | |
7502 | use a vanilla loop reversal if the biv is used to | |
7503 | calculate a giv or has a non-counting use. */ | |
7504 | #if ! defined (HAVE_decrement_and_branch_until_zero) \ | |
7505 | && defined (HAVE_decrement_and_branch_on_count) | |
a2be868f | 7506 | && (! (add_val == 1 && loop->vtop |
c5cbf81e JL |
7507 | && (bl->biv_count == 0 |
7508 | || no_use_except_counting))) | |
c48ba252 R |
7509 | #endif |
7510 | && GET_CODE (comparison_value) == CONST_INT | |
7511 | /* Now do postponed overflow checks on COMPARISON_VAL. */ | |
7512 | && ! (((comparison_val - add_val) ^ INTVAL (comparison_value)) | |
7513 | & comparison_sign_mask)) | |
7514 | { | |
7515 | /* Register will always be nonnegative, with value | |
7516 | 0 on last iteration */ | |
7517 | add_adjust = add_val; | |
7518 | nonneg = 1; | |
7519 | cmp_code = GE; | |
7520 | } | |
a2be868f | 7521 | else if (add_val == 1 && loop->vtop |
c5cbf81e JL |
7522 | && (bl->biv_count == 0 |
7523 | || no_use_except_counting)) | |
c48ba252 R |
7524 | { |
7525 | add_adjust = 0; | |
7526 | cmp_code = NE; | |
7527 | } | |
7528 | else | |
7529 | return 0; | |
7530 | ||
7531 | if (GET_CODE (comparison) == LE) | |
7532 | add_adjust -= add_val; | |
7533 | ||
e9a25f70 JL |
7534 | /* If the initial value is not zero, or if the comparison |
7535 | value is not an exact multiple of the increment, then we | |
7536 | can not reverse this loop. */ | |
c48ba252 R |
7537 | if (initial_value == const0_rtx |
7538 | && GET_CODE (comparison_value) == CONST_INT) | |
7539 | { | |
7540 | if (((unsigned HOST_WIDE_INT) comparison_val % add_val) != 0) | |
7541 | return 0; | |
7542 | } | |
7543 | else | |
7544 | { | |
7545 | if (! no_use_except_counting || add_val != 1) | |
7546 | return 0; | |
7547 | } | |
e9a25f70 | 7548 | |
8ed69d09 R |
7549 | final_value = comparison_value; |
7550 | ||
e9a25f70 JL |
7551 | /* Reset these in case we normalized the initial value |
7552 | and comparison value above. */ | |
8ed69d09 R |
7553 | if (GET_CODE (comparison_value) == CONST_INT |
7554 | && GET_CODE (initial_value) == CONST_INT) | |
7555 | { | |
7556 | comparison_value = GEN_INT (comparison_val); | |
7557 | final_value | |
7558 | = GEN_INT (comparison_val + INTVAL (bl->initial_value)); | |
7559 | } | |
e9a25f70 | 7560 | bl->initial_value = initial_value; |
b4ad7b23 RS |
7561 | |
7562 | /* Save some info needed to produce the new insns. */ | |
7563 | reg = bl->biv->dest_reg; | |
353df065 | 7564 | jump_label = condjump_label (PREV_INSN (loop_end)); |
fd5d5b07 | 7565 | new_add_val = GEN_INT (-INTVAL (bl->biv->add_val)); |
b4ad7b23 | 7566 | |
c48ba252 R |
7567 | /* Set start_value; if this is not a CONST_INT, we need |
7568 | to generate a SUB. | |
7569 | Initialize biv to start_value before loop start. | |
b4ad7b23 RS |
7570 | The old initializing insn will be deleted as a |
7571 | dead store by flow.c. */ | |
c48ba252 R |
7572 | if (initial_value == const0_rtx |
7573 | && GET_CODE (comparison_value) == CONST_INT) | |
7574 | { | |
7575 | start_value = GEN_INT (comparison_val - add_adjust); | |
804a718a | 7576 | loop_insn_hoist (loop, gen_move_insn (reg, start_value)); |
c48ba252 R |
7577 | } |
7578 | else if (GET_CODE (initial_value) == CONST_INT) | |
7579 | { | |
c48ba252 | 7580 | enum machine_mode mode = GET_MODE (reg); |
ef89d648 ZW |
7581 | rtx offset = GEN_INT (-INTVAL (initial_value) - add_adjust); |
7582 | rtx add_insn = gen_add3_insn (reg, comparison_value, offset); | |
7583 | ||
7584 | if (add_insn == 0) | |
c48ba252 | 7585 | return 0; |
ef89d648 | 7586 | |
c48ba252 R |
7587 | start_value |
7588 | = gen_rtx_PLUS (mode, comparison_value, offset); | |
ef89d648 | 7589 | loop_insn_hoist (loop, add_insn); |
c48ba252 R |
7590 | if (GET_CODE (comparison) == LE) |
7591 | final_value = gen_rtx_PLUS (mode, comparison_value, | |
7592 | GEN_INT (add_val)); | |
7593 | } | |
7594 | else if (! add_adjust) | |
7595 | { | |
7596 | enum machine_mode mode = GET_MODE (reg); | |
ef89d648 ZW |
7597 | rtx sub_insn = gen_sub3_insn (reg, comparison_value, |
7598 | initial_value); | |
7599 | ||
7600 | if (sub_insn == 0) | |
c48ba252 R |
7601 | return 0; |
7602 | start_value | |
7603 | = gen_rtx_MINUS (mode, comparison_value, initial_value); | |
ef89d648 | 7604 | loop_insn_hoist (loop, sub_insn); |
c48ba252 R |
7605 | } |
7606 | else | |
7607 | /* We could handle the other cases too, but it'll be | |
7608 | better to have a testcase first. */ | |
7609 | return 0; | |
b4ad7b23 | 7610 | |
225a7e3d JL |
7611 | /* We may not have a single insn which can increment a reg, so |
7612 | create a sequence to hold all the insns from expand_inc. */ | |
7613 | start_sequence (); | |
7614 | expand_inc (reg, new_add_val); | |
e6fcb60d KH |
7615 | tem = gen_sequence (); |
7616 | end_sequence (); | |
225a7e3d | 7617 | |
86e21212 | 7618 | p = loop_insn_emit_before (loop, 0, bl->biv->insn, tem); |
b4ad7b23 | 7619 | delete_insn (bl->biv->insn); |
e6fcb60d | 7620 | |
b4ad7b23 RS |
7621 | /* Update biv info to reflect its new status. */ |
7622 | bl->biv->insn = p; | |
7623 | bl->initial_value = start_value; | |
7624 | bl->biv->add_val = new_add_val; | |
7625 | ||
5629b16c | 7626 | /* Update loop info. */ |
eb6a3bc0 MH |
7627 | loop_info->initial_value = reg; |
7628 | loop_info->initial_equiv_value = reg; | |
5629b16c MH |
7629 | loop_info->final_value = const0_rtx; |
7630 | loop_info->final_equiv_value = const0_rtx; | |
7631 | loop_info->comparison_value = const0_rtx; | |
7632 | loop_info->comparison_code = cmp_code; | |
7633 | loop_info->increment = new_add_val; | |
7634 | ||
b4ad7b23 RS |
7635 | /* Inc LABEL_NUSES so that delete_insn will |
7636 | not delete the label. */ | |
fd5d5b07 | 7637 | LABEL_NUSES (XEXP (jump_label, 0))++; |
b4ad7b23 RS |
7638 | |
7639 | /* Emit an insn after the end of the loop to set the biv's | |
7640 | proper exit value if it is used anywhere outside the loop. */ | |
0628fde6 | 7641 | if ((REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare)) |
b4ad7b23 | 7642 | || ! bl->init_insn |
b1f21e0a | 7643 | || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn)) |
96a45535 | 7644 | loop_insn_sink (loop, gen_move_insn (reg, final_value)); |
b4ad7b23 RS |
7645 | |
7646 | /* Delete compare/branch at end of loop. */ | |
7647 | delete_insn (PREV_INSN (loop_end)); | |
0628fde6 JW |
7648 | if (compare_and_branch == 2) |
7649 | delete_insn (first_compare); | |
b4ad7b23 RS |
7650 | |
7651 | /* Add new compare/branch insn at end of loop. */ | |
7652 | start_sequence (); | |
362cc3d4 | 7653 | emit_cmp_and_jump_insns (reg, const0_rtx, cmp_code, NULL_RTX, |
e6fcb60d | 7654 | GET_MODE (reg), 0, 0, |
362cc3d4 | 7655 | XEXP (jump_label, 0)); |
b4ad7b23 RS |
7656 | tem = gen_sequence (); |
7657 | end_sequence (); | |
7658 | emit_jump_insn_before (tem, loop_end); | |
7659 | ||
a7060368 MH |
7660 | for (tem = PREV_INSN (loop_end); |
7661 | tem && GET_CODE (tem) != JUMP_INSN; | |
7662 | tem = PREV_INSN (tem)) | |
7663 | ; | |
7664 | ||
7665 | if (tem) | |
7666 | JUMP_LABEL (tem) = XEXP (jump_label, 0); | |
7667 | ||
c48ba252 | 7668 | if (nonneg) |
b4ad7b23 | 7669 | { |
c48ba252 R |
7670 | if (tem) |
7671 | { | |
c48ba252 R |
7672 | /* Increment of LABEL_NUSES done above. */ |
7673 | /* Register is now always nonnegative, | |
7674 | so add REG_NONNEG note to the branch. */ | |
65b98a02 | 7675 | REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, reg, |
c48ba252 R |
7676 | REG_NOTES (tem)); |
7677 | } | |
7678 | bl->nonneg = 1; | |
b4ad7b23 RS |
7679 | } |
7680 | ||
22b452e7 BS |
7681 | /* No insn may reference both the reversed and another biv or it |
7682 | will fail (see comment near the top of the loop reversal | |
7683 | code). | |
7684 | Earlier on, we have verified that the biv has no use except | |
7685 | counting, or it is the only biv in this function. | |
7686 | However, the code that computes no_use_except_counting does | |
7687 | not verify reg notes. It's possible to have an insn that | |
7688 | references another biv, and has a REG_EQUAL note with an | |
7689 | expression based on the reversed biv. To avoid this case, | |
7690 | remove all REG_EQUAL notes based on the reversed biv | |
7691 | here. */ | |
7692 | for (p = loop_start; p != loop_end; p = NEXT_INSN (p)) | |
2c3c49de | 7693 | if (INSN_P (p)) |
22b452e7 BS |
7694 | { |
7695 | rtx *pnote; | |
7696 | rtx set = single_set (p); | |
7697 | /* If this is a set of a GIV based on the reversed biv, any | |
7698 | REG_EQUAL notes should still be correct. */ | |
7699 | if (! set | |
7700 | || GET_CODE (SET_DEST (set)) != REG | |
14be28e5 | 7701 | || (size_t) REGNO (SET_DEST (set)) >= ivs->n_regs |
ed5bb68d MH |
7702 | || REG_IV_TYPE (ivs, REGNO (SET_DEST (set))) != GENERAL_INDUCT |
7703 | || REG_IV_INFO (ivs, REGNO (SET_DEST (set)))->src_reg != bl->biv->src_reg) | |
22b452e7 BS |
7704 | for (pnote = ®_NOTES (p); *pnote;) |
7705 | { | |
7706 | if (REG_NOTE_KIND (*pnote) == REG_EQUAL | |
7707 | && reg_mentioned_p (regno_reg_rtx[bl->regno], | |
7708 | XEXP (*pnote, 0))) | |
7709 | *pnote = XEXP (*pnote, 1); | |
7710 | else | |
7711 | pnote = &XEXP (*pnote, 1); | |
7712 | } | |
7713 | } | |
7714 | ||
b4ad7b23 RS |
7715 | /* Mark that this biv has been reversed. Each giv which depends |
7716 | on this biv, and which is also live past the end of the loop | |
7717 | will have to be fixed up. */ | |
7718 | ||
7719 | bl->reversed = 1; | |
7720 | ||
7721 | if (loop_dump_stream) | |
b50cb11f MH |
7722 | { |
7723 | fprintf (loop_dump_stream, "Reversed loop"); | |
7724 | if (bl->nonneg) | |
7725 | fprintf (loop_dump_stream, " and added reg_nonneg\n"); | |
7726 | else | |
7727 | fprintf (loop_dump_stream, "\n"); | |
7728 | } | |
b4ad7b23 RS |
7729 | |
7730 | return 1; | |
7731 | } | |
7732 | } | |
7733 | } | |
7734 | ||
7735 | return 0; | |
7736 | } | |
7737 | \f | |
7738 | /* Verify whether the biv BL appears to be eliminable, | |
7739 | based on the insns in the loop that refer to it. | |
b4ad7b23 RS |
7740 | |
7741 | If ELIMINATE_P is non-zero, actually do the elimination. | |
7742 | ||
7743 | THRESHOLD and INSN_COUNT are from loop_optimize and are used to | |
7744 | determine whether invariant insns should be placed inside or at the | |
7745 | start of the loop. */ | |
7746 | ||
7747 | static int | |
0534b804 MH |
7748 | maybe_eliminate_biv (loop, bl, eliminate_p, threshold, insn_count) |
7749 | const struct loop *loop; | |
b4ad7b23 | 7750 | struct iv_class *bl; |
b4ad7b23 RS |
7751 | int eliminate_p; |
7752 | int threshold, insn_count; | |
7753 | { | |
ed5bb68d | 7754 | struct loop_ivs *ivs = LOOP_IVS (loop); |
b4ad7b23 | 7755 | rtx reg = bl->biv->dest_reg; |
bd5a664e | 7756 | rtx p; |
b4ad7b23 RS |
7757 | |
7758 | /* Scan all insns in the loop, stopping if we find one that uses the | |
7759 | biv in a way that we cannot eliminate. */ | |
7760 | ||
96a45535 | 7761 | for (p = loop->start; p != loop->end; p = NEXT_INSN (p)) |
b4ad7b23 RS |
7762 | { |
7763 | enum rtx_code code = GET_CODE (p); | |
96a45535 MH |
7764 | basic_block where_bb = 0; |
7765 | rtx where_insn = threshold >= insn_count ? 0 : p; | |
b4ad7b23 | 7766 | |
fdb1833a R |
7767 | /* If this is a libcall that sets a giv, skip ahead to its end. */ |
7768 | if (GET_RTX_CLASS (code) == 'i') | |
7769 | { | |
7770 | rtx note = find_reg_note (p, REG_LIBCALL, NULL_RTX); | |
7771 | ||
7772 | if (note) | |
7773 | { | |
7774 | rtx last = XEXP (note, 0); | |
7775 | rtx set = single_set (last); | |
7776 | ||
7777 | if (set && GET_CODE (SET_DEST (set)) == REG) | |
7778 | { | |
770ae6cc | 7779 | unsigned int regno = REGNO (SET_DEST (set)); |
fdb1833a | 7780 | |
86fee241 | 7781 | if (regno < ivs->n_regs |
ed5bb68d MH |
7782 | && REG_IV_TYPE (ivs, regno) == GENERAL_INDUCT |
7783 | && REG_IV_INFO (ivs, regno)->src_reg == bl->biv->src_reg) | |
fdb1833a R |
7784 | p = last; |
7785 | } | |
7786 | } | |
7787 | } | |
b4ad7b23 RS |
7788 | if ((code == INSN || code == JUMP_INSN || code == CALL_INSN) |
7789 | && reg_mentioned_p (reg, PATTERN (p)) | |
0534b804 | 7790 | && ! maybe_eliminate_biv_1 (loop, PATTERN (p), p, bl, |
96a45535 | 7791 | eliminate_p, where_bb, where_insn)) |
b4ad7b23 RS |
7792 | { |
7793 | if (loop_dump_stream) | |
7794 | fprintf (loop_dump_stream, | |
7795 | "Cannot eliminate biv %d: biv used in insn %d.\n", | |
7796 | bl->regno, INSN_UID (p)); | |
7797 | break; | |
7798 | } | |
7799 | } | |
7800 | ||
96a45535 | 7801 | if (p == loop->end) |
b4ad7b23 RS |
7802 | { |
7803 | if (loop_dump_stream) | |
7804 | fprintf (loop_dump_stream, "biv %d %s eliminated.\n", | |
7805 | bl->regno, eliminate_p ? "was" : "can be"); | |
7806 | return 1; | |
7807 | } | |
7808 | ||
7809 | return 0; | |
7810 | } | |
7811 | \f | |
a6207a2b | 7812 | /* INSN and REFERENCE are instructions in the same insn chain. |
f38cbf0f | 7813 | Return non-zero if INSN is first. */ |
a6207a2b | 7814 | |
c99f8c2a | 7815 | int |
a6207a2b R |
7816 | loop_insn_first_p (insn, reference) |
7817 | rtx insn, reference; | |
7818 | { | |
f38cbf0f R |
7819 | rtx p, q; |
7820 | ||
e6fcb60d | 7821 | for (p = insn, q = reference;;) |
f38cbf0f R |
7822 | { |
7823 | /* Start with test for not first so that INSN == REFERENCE yields not | |
7824 | first. */ | |
7825 | if (q == insn || ! p) | |
7826 | return 0; | |
7827 | if (p == reference || ! q) | |
7828 | return 1; | |
7829 | ||
7c2772f1 R |
7830 | /* Either of P or Q might be a NOTE. Notes have the same LUID as the |
7831 | previous insn, hence the <= comparison below does not work if | |
7832 | P is a note. */ | |
f38cbf0f | 7833 | if (INSN_UID (p) < max_uid_for_loop |
7c2772f1 R |
7834 | && INSN_UID (q) < max_uid_for_loop |
7835 | && GET_CODE (p) != NOTE) | |
7836 | return INSN_LUID (p) <= INSN_LUID (q); | |
f38cbf0f | 7837 | |
7c2772f1 R |
7838 | if (INSN_UID (p) >= max_uid_for_loop |
7839 | || GET_CODE (p) == NOTE) | |
f38cbf0f R |
7840 | p = NEXT_INSN (p); |
7841 | if (INSN_UID (q) >= max_uid_for_loop) | |
7842 | q = NEXT_INSN (q); | |
7843 | } | |
a6207a2b R |
7844 | } |
7845 | ||
7846 | /* We are trying to eliminate BIV in INSN using GIV. Return non-zero if | |
7847 | the offset that we have to take into account due to auto-increment / | |
7848 | div derivation is zero. */ | |
7849 | static int | |
7850 | biv_elimination_giv_has_0_offset (biv, giv, insn) | |
7851 | struct induction *biv, *giv; | |
7852 | rtx insn; | |
7853 | { | |
7854 | /* If the giv V had the auto-inc address optimization applied | |
7855 | to it, and INSN occurs between the giv insn and the biv | |
7856 | insn, then we'd have to adjust the value used here. | |
7857 | This is rare, so we don't bother to make this possible. */ | |
7858 | if (giv->auto_inc_opt | |
7859 | && ((loop_insn_first_p (giv->insn, insn) | |
7860 | && loop_insn_first_p (insn, biv->insn)) | |
7861 | || (loop_insn_first_p (biv->insn, insn) | |
7862 | && loop_insn_first_p (insn, giv->insn)))) | |
7863 | return 0; | |
7864 | ||
a6207a2b R |
7865 | return 1; |
7866 | } | |
7867 | ||
b4ad7b23 RS |
7868 | /* If BL appears in X (part of the pattern of INSN), see if we can |
7869 | eliminate its use. If so, return 1. If not, return 0. | |
7870 | ||
7871 | If BIV does not appear in X, return 1. | |
7872 | ||
96a45535 MH |
7873 | If ELIMINATE_P is non-zero, actually do the elimination. |
7874 | WHERE_INSN/WHERE_BB indicate where extra insns should be added. | |
7875 | Depending on how many items have been moved out of the loop, it | |
7876 | will either be before INSN (when WHERE_INSN is non-zero) or at the | |
7877 | start of the loop (when WHERE_INSN is zero). */ | |
b4ad7b23 RS |
7878 | |
7879 | static int | |
96a45535 | 7880 | maybe_eliminate_biv_1 (loop, x, insn, bl, eliminate_p, where_bb, where_insn) |
0534b804 | 7881 | const struct loop *loop; |
b4ad7b23 RS |
7882 | rtx x, insn; |
7883 | struct iv_class *bl; | |
7884 | int eliminate_p; | |
96a45535 MH |
7885 | basic_block where_bb; |
7886 | rtx where_insn; | |
b4ad7b23 RS |
7887 | { |
7888 | enum rtx_code code = GET_CODE (x); | |
7889 | rtx reg = bl->biv->dest_reg; | |
7890 | enum machine_mode mode = GET_MODE (reg); | |
7891 | struct induction *v; | |
51723711 KG |
7892 | rtx arg, tem; |
7893 | #ifdef HAVE_cc0 | |
7894 | rtx new; | |
7895 | #endif | |
b4ad7b23 | 7896 | int arg_operand; |
6f7d635c | 7897 | const char *fmt; |
b4ad7b23 RS |
7898 | int i, j; |
7899 | ||
7900 | switch (code) | |
7901 | { | |
7902 | case REG: | |
7903 | /* If we haven't already been able to do something with this BIV, | |
7904 | we can't eliminate it. */ | |
7905 | if (x == reg) | |
7906 | return 0; | |
7907 | return 1; | |
7908 | ||
7909 | case SET: | |
7910 | /* If this sets the BIV, it is not a problem. */ | |
7911 | if (SET_DEST (x) == reg) | |
7912 | return 1; | |
7913 | ||
7914 | /* If this is an insn that defines a giv, it is also ok because | |
7915 | it will go away when the giv is reduced. */ | |
7916 | for (v = bl->giv; v; v = v->next_iv) | |
7917 | if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg) | |
7918 | return 1; | |
7919 | ||
7920 | #ifdef HAVE_cc0 | |
7921 | if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg) | |
7922 | { | |
7923 | /* Can replace with any giv that was reduced and | |
7924 | that has (MULT_VAL != 0) and (ADD_VAL == 0). | |
fbdc6da8 RK |
7925 | Require a constant for MULT_VAL, so we know it's nonzero. |
7926 | ??? We disable this optimization to avoid potential | |
7927 | overflows. */ | |
b4ad7b23 RS |
7928 | |
7929 | for (v = bl->giv; v; v = v->next_iv) | |
3508c681 | 7930 | if (GET_CODE (v->mult_val) == CONST_INT && v->mult_val != const0_rtx |
b4ad7b23 | 7931 | && v->add_val == const0_rtx |
453331a3 | 7932 | && ! v->ignore && ! v->maybe_dead && v->always_computable |
fbdc6da8 RK |
7933 | && v->mode == mode |
7934 | && 0) | |
b4ad7b23 | 7935 | { |
a6207a2b | 7936 | if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn)) |
8516af93 JW |
7937 | continue; |
7938 | ||
b4ad7b23 RS |
7939 | if (! eliminate_p) |
7940 | return 1; | |
7941 | ||
7942 | /* If the giv has the opposite direction of change, | |
7943 | then reverse the comparison. */ | |
7944 | if (INTVAL (v->mult_val) < 0) | |
38a448ca RH |
7945 | new = gen_rtx_COMPARE (GET_MODE (v->new_reg), |
7946 | const0_rtx, v->new_reg); | |
b4ad7b23 RS |
7947 | else |
7948 | new = v->new_reg; | |
7949 | ||
7950 | /* We can probably test that giv's reduced reg. */ | |
7951 | if (validate_change (insn, &SET_SRC (x), new, 0)) | |
7952 | return 1; | |
7953 | } | |
7954 | ||
7955 | /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0); | |
7956 | replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL). | |
fbdc6da8 RK |
7957 | Require a constant for MULT_VAL, so we know it's nonzero. |
7958 | ??? Do this only if ADD_VAL is a pointer to avoid a potential | |
7959 | overflow problem. */ | |
b4ad7b23 RS |
7960 | |
7961 | for (v = bl->giv; v; v = v->next_iv) | |
e6fcb60d KH |
7962 | if (GET_CODE (v->mult_val) == CONST_INT |
7963 | && v->mult_val != const0_rtx | |
453331a3 | 7964 | && ! v->ignore && ! v->maybe_dead && v->always_computable |
fbdc6da8 RK |
7965 | && v->mode == mode |
7966 | && (GET_CODE (v->add_val) == SYMBOL_REF | |
7967 | || GET_CODE (v->add_val) == LABEL_REF | |
7968 | || GET_CODE (v->add_val) == CONST | |
7969 | || (GET_CODE (v->add_val) == REG | |
3502dc9c | 7970 | && REG_POINTER (v->add_val)))) |
b4ad7b23 | 7971 | { |
a6207a2b | 7972 | if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn)) |
8516af93 JW |
7973 | continue; |
7974 | ||
b4ad7b23 RS |
7975 | if (! eliminate_p) |
7976 | return 1; | |
7977 | ||
7978 | /* If the giv has the opposite direction of change, | |
7979 | then reverse the comparison. */ | |
7980 | if (INTVAL (v->mult_val) < 0) | |
38a448ca RH |
7981 | new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val), |
7982 | v->new_reg); | |
b4ad7b23 | 7983 | else |
38a448ca RH |
7984 | new = gen_rtx_COMPARE (VOIDmode, v->new_reg, |
7985 | copy_rtx (v->add_val)); | |
b4ad7b23 RS |
7986 | |
7987 | /* Replace biv with the giv's reduced register. */ | |
7988 | update_reg_last_use (v->add_val, insn); | |
7989 | if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0)) | |
7990 | return 1; | |
7991 | ||
7992 | /* Insn doesn't support that constant or invariant. Copy it | |
7993 | into a register (it will be a loop invariant.) */ | |
7994 | tem = gen_reg_rtx (GET_MODE (v->new_reg)); | |
7995 | ||
86e21212 MH |
7996 | loop_insn_emit_before (loop, 0, where_insn, |
7997 | gen_move_insn (tem, | |
7998 | copy_rtx (v->add_val))); | |
b4ad7b23 | 7999 | |
2ae3dcac | 8000 | /* Substitute the new register for its invariant value in |
e6fcb60d | 8001 | the compare expression. */ |
2ae3dcac RK |
8002 | XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem; |
8003 | if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0)) | |
b4ad7b23 RS |
8004 | return 1; |
8005 | } | |
8006 | } | |
8007 | #endif | |
8008 | break; | |
8009 | ||
8010 | case COMPARE: | |
8011 | case EQ: case NE: | |
8012 | case GT: case GE: case GTU: case GEU: | |
8013 | case LT: case LE: case LTU: case LEU: | |
8014 | /* See if either argument is the biv. */ | |
8015 | if (XEXP (x, 0) == reg) | |
8016 | arg = XEXP (x, 1), arg_operand = 1; | |
8017 | else if (XEXP (x, 1) == reg) | |
8018 | arg = XEXP (x, 0), arg_operand = 0; | |
8019 | else | |
8020 | break; | |
8021 | ||
8022 | if (CONSTANT_P (arg)) | |
8023 | { | |
8024 | /* First try to replace with any giv that has constant positive | |
8025 | mult_val and constant add_val. We might be able to support | |
8026 | negative mult_val, but it seems complex to do it in general. */ | |
8027 | ||
8028 | for (v = bl->giv; v; v = v->next_iv) | |
e6fcb60d KH |
8029 | if (GET_CODE (v->mult_val) == CONST_INT |
8030 | && INTVAL (v->mult_val) > 0 | |
fbdc6da8 RK |
8031 | && (GET_CODE (v->add_val) == SYMBOL_REF |
8032 | || GET_CODE (v->add_val) == LABEL_REF | |
8033 | || GET_CODE (v->add_val) == CONST | |
8034 | || (GET_CODE (v->add_val) == REG | |
3502dc9c | 8035 | && REG_POINTER (v->add_val))) |
453331a3 | 8036 | && ! v->ignore && ! v->maybe_dead && v->always_computable |
b4ad7b23 RS |
8037 | && v->mode == mode) |
8038 | { | |
a6207a2b | 8039 | if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn)) |
8516af93 JW |
8040 | continue; |
8041 | ||
b4ad7b23 RS |
8042 | if (! eliminate_p) |
8043 | return 1; | |
8044 | ||
8045 | /* Replace biv with the giv's reduced reg. */ | |
e6fcb60d | 8046 | validate_change (insn, &XEXP (x, 1 - arg_operand), v->new_reg, 1); |
b4ad7b23 RS |
8047 | |
8048 | /* If all constants are actually constant integers and | |
8049 | the derived constant can be directly placed in the COMPARE, | |
8050 | do so. */ | |
8051 | if (GET_CODE (arg) == CONST_INT | |
8052 | && GET_CODE (v->mult_val) == CONST_INT | |
3508c681 JH |
8053 | && GET_CODE (v->add_val) == CONST_INT) |
8054 | { | |
8055 | validate_change (insn, &XEXP (x, arg_operand), | |
8056 | GEN_INT (INTVAL (arg) | |
e6fcb60d KH |
8057 | * INTVAL (v->mult_val) |
8058 | + INTVAL (v->add_val)), 1); | |
3508c681 JH |
8059 | } |
8060 | else | |
8061 | { | |
8062 | /* Otherwise, load it into a register. */ | |
8063 | tem = gen_reg_rtx (mode); | |
96a45535 MH |
8064 | loop_iv_add_mult_emit_before (loop, arg, |
8065 | v->mult_val, v->add_val, | |
8066 | tem, where_bb, where_insn); | |
3508c681 JH |
8067 | validate_change (insn, &XEXP (x, arg_operand), tem, 1); |
8068 | } | |
8069 | if (apply_change_group ()) | |
b4ad7b23 | 8070 | return 1; |
b4ad7b23 | 8071 | } |
e6fcb60d | 8072 | |
b4ad7b23 | 8073 | /* Look for giv with positive constant mult_val and nonconst add_val. |
e6fcb60d | 8074 | Insert insns to calculate new compare value. |
fbdc6da8 | 8075 | ??? Turn this off due to possible overflow. */ |
b4ad7b23 RS |
8076 | |
8077 | for (v = bl->giv; v; v = v->next_iv) | |
e6fcb60d KH |
8078 | if (GET_CODE (v->mult_val) == CONST_INT |
8079 | && INTVAL (v->mult_val) > 0 | |
453331a3 | 8080 | && ! v->ignore && ! v->maybe_dead && v->always_computable |
fbdc6da8 RK |
8081 | && v->mode == mode |
8082 | && 0) | |
b4ad7b23 RS |
8083 | { |
8084 | rtx tem; | |
8085 | ||
a6207a2b | 8086 | if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn)) |
8516af93 JW |
8087 | continue; |
8088 | ||
b4ad7b23 RS |
8089 | if (! eliminate_p) |
8090 | return 1; | |
8091 | ||
8092 | tem = gen_reg_rtx (mode); | |
8093 | ||
8094 | /* Replace biv with giv's reduced register. */ | |
8095 | validate_change (insn, &XEXP (x, 1 - arg_operand), | |
8096 | v->new_reg, 1); | |
8097 | ||
8098 | /* Compute value to compare against. */ | |
96a45535 MH |
8099 | loop_iv_add_mult_emit_before (loop, arg, |
8100 | v->mult_val, v->add_val, | |
8101 | tem, where_bb, where_insn); | |
b4ad7b23 RS |
8102 | /* Use it in this insn. */ |
8103 | validate_change (insn, &XEXP (x, arg_operand), tem, 1); | |
8104 | if (apply_change_group ()) | |
8105 | return 1; | |
8106 | } | |
8107 | } | |
8108 | else if (GET_CODE (arg) == REG || GET_CODE (arg) == MEM) | |
8109 | { | |
0534b804 | 8110 | if (loop_invariant_p (loop, arg) == 1) |
b4ad7b23 RS |
8111 | { |
8112 | /* Look for giv with constant positive mult_val and nonconst | |
e6fcb60d | 8113 | add_val. Insert insns to compute new compare value. |
fbdc6da8 | 8114 | ??? Turn this off due to possible overflow. */ |
b4ad7b23 RS |
8115 | |
8116 | for (v = bl->giv; v; v = v->next_iv) | |
3508c681 | 8117 | if (GET_CODE (v->mult_val) == CONST_INT && INTVAL (v->mult_val) > 0 |
453331a3 | 8118 | && ! v->ignore && ! v->maybe_dead && v->always_computable |
fbdc6da8 RK |
8119 | && v->mode == mode |
8120 | && 0) | |
b4ad7b23 RS |
8121 | { |
8122 | rtx tem; | |
8123 | ||
a6207a2b | 8124 | if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn)) |
8516af93 JW |
8125 | continue; |
8126 | ||
b4ad7b23 RS |
8127 | if (! eliminate_p) |
8128 | return 1; | |
8129 | ||
8130 | tem = gen_reg_rtx (mode); | |
8131 | ||
8132 | /* Replace biv with giv's reduced register. */ | |
8133 | validate_change (insn, &XEXP (x, 1 - arg_operand), | |
8134 | v->new_reg, 1); | |
8135 | ||
8136 | /* Compute value to compare against. */ | |
96a45535 MH |
8137 | loop_iv_add_mult_emit_before (loop, arg, |
8138 | v->mult_val, v->add_val, | |
8139 | tem, where_bb, where_insn); | |
b4ad7b23 RS |
8140 | validate_change (insn, &XEXP (x, arg_operand), tem, 1); |
8141 | if (apply_change_group ()) | |
8142 | return 1; | |
8143 | } | |
8144 | } | |
8145 | ||
8146 | /* This code has problems. Basically, you can't know when | |
8147 | seeing if we will eliminate BL, whether a particular giv | |
8148 | of ARG will be reduced. If it isn't going to be reduced, | |
8149 | we can't eliminate BL. We can try forcing it to be reduced, | |
8150 | but that can generate poor code. | |
8151 | ||
8152 | The problem is that the benefit of reducing TV, below should | |
8153 | be increased if BL can actually be eliminated, but this means | |
8154 | we might have to do a topological sort of the order in which | |
8155 | we try to process biv. It doesn't seem worthwhile to do | |
8156 | this sort of thing now. */ | |
8157 | ||
8158 | #if 0 | |
8159 | /* Otherwise the reg compared with had better be a biv. */ | |
8160 | if (GET_CODE (arg) != REG | |
ed5bb68d | 8161 | || REG_IV_TYPE (ivs, REGNO (arg)) != BASIC_INDUCT) |
b4ad7b23 RS |
8162 | return 0; |
8163 | ||
8164 | /* Look for a pair of givs, one for each biv, | |
8165 | with identical coefficients. */ | |
8166 | for (v = bl->giv; v; v = v->next_iv) | |
8167 | { | |
8168 | struct induction *tv; | |
8169 | ||
8170 | if (v->ignore || v->maybe_dead || v->mode != mode) | |
8171 | continue; | |
8172 | ||
8b634749 MH |
8173 | for (tv = REG_IV_CLASS (ivs, REGNO (arg))->giv; tv; |
8174 | tv = tv->next_iv) | |
b4ad7b23 RS |
8175 | if (! tv->ignore && ! tv->maybe_dead |
8176 | && rtx_equal_p (tv->mult_val, v->mult_val) | |
8177 | && rtx_equal_p (tv->add_val, v->add_val) | |
8178 | && tv->mode == mode) | |
8179 | { | |
a6207a2b | 8180 | if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn)) |
8516af93 JW |
8181 | continue; |
8182 | ||
b4ad7b23 RS |
8183 | if (! eliminate_p) |
8184 | return 1; | |
8185 | ||
8186 | /* Replace biv with its giv's reduced reg. */ | |
e6fcb60d | 8187 | XEXP (x, 1 - arg_operand) = v->new_reg; |
b4ad7b23 RS |
8188 | /* Replace other operand with the other giv's |
8189 | reduced reg. */ | |
8190 | XEXP (x, arg_operand) = tv->new_reg; | |
8191 | return 1; | |
8192 | } | |
8193 | } | |
8194 | #endif | |
8195 | } | |
8196 | ||
8197 | /* If we get here, the biv can't be eliminated. */ | |
8198 | return 0; | |
8199 | ||
8200 | case MEM: | |
8201 | /* If this address is a DEST_ADDR giv, it doesn't matter if the | |
8202 | biv is used in it, since it will be replaced. */ | |
8203 | for (v = bl->giv; v; v = v->next_iv) | |
8204 | if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0)) | |
8205 | return 1; | |
8206 | break; | |
e9a25f70 JL |
8207 | |
8208 | default: | |
8209 | break; | |
b4ad7b23 RS |
8210 | } |
8211 | ||
8212 | /* See if any subexpression fails elimination. */ | |
8213 | fmt = GET_RTX_FORMAT (code); | |
8214 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) | |
8215 | { | |
8216 | switch (fmt[i]) | |
8217 | { | |
8218 | case 'e': | |
e6fcb60d | 8219 | if (! maybe_eliminate_biv_1 (loop, XEXP (x, i), insn, bl, |
96a45535 | 8220 | eliminate_p, where_bb, where_insn)) |
b4ad7b23 RS |
8221 | return 0; |
8222 | break; | |
8223 | ||
8224 | case 'E': | |
8225 | for (j = XVECLEN (x, i) - 1; j >= 0; j--) | |
0534b804 | 8226 | if (! maybe_eliminate_biv_1 (loop, XVECEXP (x, i, j), insn, bl, |
96a45535 | 8227 | eliminate_p, where_bb, where_insn)) |
b4ad7b23 RS |
8228 | return 0; |
8229 | break; | |
8230 | } | |
8231 | } | |
8232 | ||
8233 | return 1; | |
e6fcb60d | 8234 | } |
b4ad7b23 RS |
8235 | \f |
8236 | /* Return nonzero if the last use of REG | |
8237 | is in an insn following INSN in the same basic block. */ | |
8238 | ||
8239 | static int | |
8240 | last_use_this_basic_block (reg, insn) | |
8241 | rtx reg; | |
8242 | rtx insn; | |
8243 | { | |
8244 | rtx n; | |
8245 | for (n = insn; | |
8246 | n && GET_CODE (n) != CODE_LABEL && GET_CODE (n) != JUMP_INSN; | |
8247 | n = NEXT_INSN (n)) | |
8248 | { | |
b1f21e0a | 8249 | if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n)) |
b4ad7b23 RS |
8250 | return 1; |
8251 | } | |
8252 | return 0; | |
8253 | } | |
8254 | \f | |
8255 | /* Called via `note_stores' to record the initial value of a biv. Here we | |
8256 | just record the location of the set and process it later. */ | |
8257 | ||
8258 | static void | |
84832317 | 8259 | record_initial (dest, set, data) |
b4ad7b23 RS |
8260 | rtx dest; |
8261 | rtx set; | |
84832317 | 8262 | void *data ATTRIBUTE_UNUSED; |
b4ad7b23 | 8263 | { |
ed5bb68d | 8264 | struct loop_ivs *ivs = (struct loop_ivs *) data; |
b4ad7b23 RS |
8265 | struct iv_class *bl; |
8266 | ||
8267 | if (GET_CODE (dest) != REG | |
86fee241 | 8268 | || REGNO (dest) >= ivs->n_regs |
ed5bb68d | 8269 | || REG_IV_TYPE (ivs, REGNO (dest)) != BASIC_INDUCT) |
b4ad7b23 RS |
8270 | return; |
8271 | ||
8b634749 | 8272 | bl = REG_IV_CLASS (ivs, REGNO (dest)); |
b4ad7b23 RS |
8273 | |
8274 | /* If this is the first set found, record it. */ | |
8275 | if (bl->init_insn == 0) | |
8276 | { | |
8277 | bl->init_insn = note_insn; | |
8278 | bl->init_set = set; | |
8279 | } | |
8280 | } | |
8281 | \f | |
8282 | /* If any of the registers in X are "old" and currently have a last use earlier | |
8283 | than INSN, update them to have a last use of INSN. Their actual last use | |
8284 | will be the previous insn but it will not have a valid uid_luid so we can't | |
96a45535 | 8285 | use it. X must be a source expression only. */ |
b4ad7b23 RS |
8286 | |
8287 | static void | |
8288 | update_reg_last_use (x, insn) | |
8289 | rtx x; | |
8290 | rtx insn; | |
8291 | { | |
8292 | /* Check for the case where INSN does not have a valid luid. In this case, | |
8293 | there is no need to modify the regno_last_uid, as this can only happen | |
8294 | when code is inserted after the loop_end to set a pseudo's final value, | |
96a45535 MH |
8295 | and hence this insn will never be the last use of x. |
8296 | ???? This comment is not correct. See for example loop_givs_reduce. | |
8297 | This may insert an insn before another new insn. */ | |
b4ad7b23 RS |
8298 | if (GET_CODE (x) == REG && REGNO (x) < max_reg_before_loop |
8299 | && INSN_UID (insn) < max_uid_for_loop | |
8529a489 | 8300 | && REGNO_LAST_LUID (REGNO (x)) < INSN_LUID (insn)) |
96a45535 MH |
8301 | { |
8302 | REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn); | |
8303 | } | |
b4ad7b23 RS |
8304 | else |
8305 | { | |
8306 | register int i, j; | |
6f7d635c | 8307 | register const char *fmt = GET_RTX_FORMAT (GET_CODE (x)); |
b4ad7b23 RS |
8308 | for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--) |
8309 | { | |
8310 | if (fmt[i] == 'e') | |
8311 | update_reg_last_use (XEXP (x, i), insn); | |
8312 | else if (fmt[i] == 'E') | |
8313 | for (j = XVECLEN (x, i) - 1; j >= 0; j--) | |
8314 | update_reg_last_use (XVECEXP (x, i, j), insn); | |
8315 | } | |
8316 | } | |
8317 | } | |
8318 | \f | |
a49a6a68 JW |
8319 | /* Given an insn INSN and condition COND, return the condition in a |
8320 | canonical form to simplify testing by callers. Specifically: | |
b4ad7b23 RS |
8321 | |
8322 | (1) The code will always be a comparison operation (EQ, NE, GT, etc.). | |
8323 | (2) Both operands will be machine operands; (cc0) will have been replaced. | |
8324 | (3) If an operand is a constant, it will be the second operand. | |
8325 | (4) (LE x const) will be replaced with (LT x <const+1>) and similarly | |
a49a6a68 JW |
8326 | for GE, GEU, and LEU. |
8327 | ||
8328 | If the condition cannot be understood, or is an inequality floating-point | |
8329 | comparison which needs to be reversed, 0 will be returned. | |
8330 | ||
8331 | If REVERSE is non-zero, then reverse the condition prior to canonizing it. | |
8332 | ||
8333 | If EARLIEST is non-zero, it is a pointer to a place where the earliest | |
8334 | insn used in locating the condition was found. If a replacement test | |
8335 | of the condition is desired, it should be placed in front of that | |
10f13594 RH |
8336 | insn and we will be sure that the inputs are still valid. |
8337 | ||
8338 | If WANT_REG is non-zero, we wish the condition to be relative to that | |
8339 | register, if possible. Therefore, do not canonicalize the condition | |
8340 | further. */ | |
b4ad7b23 RS |
8341 | |
8342 | rtx | |
10f13594 | 8343 | canonicalize_condition (insn, cond, reverse, earliest, want_reg) |
a49a6a68 JW |
8344 | rtx insn; |
8345 | rtx cond; | |
8346 | int reverse; | |
b4ad7b23 | 8347 | rtx *earliest; |
10f13594 | 8348 | rtx want_reg; |
b4ad7b23 RS |
8349 | { |
8350 | enum rtx_code code; | |
a49a6a68 | 8351 | rtx prev = insn; |
b4ad7b23 RS |
8352 | rtx set; |
8353 | rtx tem; | |
8354 | rtx op0, op1; | |
8355 | int reverse_code = 0; | |
f283421d | 8356 | enum machine_mode mode; |
b4ad7b23 | 8357 | |
a49a6a68 JW |
8358 | code = GET_CODE (cond); |
8359 | mode = GET_MODE (cond); | |
8360 | op0 = XEXP (cond, 0); | |
8361 | op1 = XEXP (cond, 1); | |
b4ad7b23 | 8362 | |
a49a6a68 | 8363 | if (reverse) |
c9212f33 JH |
8364 | code = reversed_comparison_code (cond, insn); |
8365 | if (code == UNKNOWN) | |
8366 | return 0; | |
b4ad7b23 RS |
8367 | |
8368 | if (earliest) | |
a49a6a68 | 8369 | *earliest = insn; |
b4ad7b23 RS |
8370 | |
8371 | /* If we are comparing a register with zero, see if the register is set | |
8372 | in the previous insn to a COMPARE or a comparison operation. Perform | |
8373 | the same tests as a function of STORE_FLAG_VALUE as find_comparison_args | |
8374 | in cse.c */ | |
8375 | ||
10f13594 | 8376 | while (GET_RTX_CLASS (code) == '<' |
fd5d5b07 | 8377 | && op1 == CONST0_RTX (GET_MODE (op0)) |
10f13594 | 8378 | && op0 != want_reg) |
b4ad7b23 RS |
8379 | { |
8380 | /* Set non-zero when we find something of interest. */ | |
8381 | rtx x = 0; | |
8382 | ||
8383 | #ifdef HAVE_cc0 | |
8384 | /* If comparison with cc0, import actual comparison from compare | |
8385 | insn. */ | |
8386 | if (op0 == cc0_rtx) | |
8387 | { | |
8388 | if ((prev = prev_nonnote_insn (prev)) == 0 | |
8389 | || GET_CODE (prev) != INSN | |
8390 | || (set = single_set (prev)) == 0 | |
8391 | || SET_DEST (set) != cc0_rtx) | |
8392 | return 0; | |
8393 | ||
8394 | op0 = SET_SRC (set); | |
8395 | op1 = CONST0_RTX (GET_MODE (op0)); | |
8396 | if (earliest) | |
8397 | *earliest = prev; | |
8398 | } | |
8399 | #endif | |
8400 | ||
8401 | /* If this is a COMPARE, pick up the two things being compared. */ | |
8402 | if (GET_CODE (op0) == COMPARE) | |
8403 | { | |
8404 | op1 = XEXP (op0, 1); | |
8405 | op0 = XEXP (op0, 0); | |
8406 | continue; | |
8407 | } | |
8408 | else if (GET_CODE (op0) != REG) | |
8409 | break; | |
8410 | ||
8411 | /* Go back to the previous insn. Stop if it is not an INSN. We also | |
8412 | stop if it isn't a single set or if it has a REG_INC note because | |
8413 | we don't want to bother dealing with it. */ | |
8414 | ||
8415 | if ((prev = prev_nonnote_insn (prev)) == 0 | |
8416 | || GET_CODE (prev) != INSN | |
c9212f33 JH |
8417 | || FIND_REG_INC_NOTE (prev, 0)) |
8418 | break; | |
8419 | ||
8420 | set = set_of (op0, prev); | |
8421 | ||
8422 | if (set | |
8423 | && (GET_CODE (set) != SET | |
8424 | || !rtx_equal_p (SET_DEST (set), op0))) | |
b4ad7b23 RS |
8425 | break; |
8426 | ||
8427 | /* If this is setting OP0, get what it sets it to if it looks | |
8428 | relevant. */ | |
c9212f33 | 8429 | if (set) |
b4ad7b23 | 8430 | { |
6d90e7c0 | 8431 | enum machine_mode inner_mode = GET_MODE (SET_DEST (set)); |
b4ad7b23 | 8432 | |
f283421d RH |
8433 | /* ??? We may not combine comparisons done in a CCmode with |
8434 | comparisons not done in a CCmode. This is to aid targets | |
8435 | like Alpha that have an IEEE compliant EQ instruction, and | |
8436 | a non-IEEE compliant BEQ instruction. The use of CCmode is | |
8437 | actually artificial, simply to prevent the combination, but | |
12f289ac JW |
8438 | should not affect other platforms. |
8439 | ||
8440 | However, we must allow VOIDmode comparisons to match either | |
8441 | CCmode or non-CCmode comparison, because some ports have | |
8442 | modeless comparisons inside branch patterns. | |
8443 | ||
8444 | ??? This mode check should perhaps look more like the mode check | |
8445 | in simplify_comparison in combine. */ | |
f283421d | 8446 | |
b4ad7b23 | 8447 | if ((GET_CODE (SET_SRC (set)) == COMPARE |
b565a316 RK |
8448 | || (((code == NE |
8449 | || (code == LT | |
8450 | && GET_MODE_CLASS (inner_mode) == MODE_INT | |
5fd8383e RK |
8451 | && (GET_MODE_BITSIZE (inner_mode) |
8452 | <= HOST_BITS_PER_WIDE_INT) | |
b565a316 | 8453 | && (STORE_FLAG_VALUE |
5fd8383e RK |
8454 | & ((HOST_WIDE_INT) 1 |
8455 | << (GET_MODE_BITSIZE (inner_mode) - 1)))) | |
b565a316 RK |
8456 | #ifdef FLOAT_STORE_FLAG_VALUE |
8457 | || (code == LT | |
8458 | && GET_MODE_CLASS (inner_mode) == MODE_FLOAT | |
12530dbe RH |
8459 | && (REAL_VALUE_NEGATIVE |
8460 | (FLOAT_STORE_FLAG_VALUE (inner_mode)))) | |
b565a316 RK |
8461 | #endif |
8462 | )) | |
f283421d | 8463 | && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<')) |
12f289ac JW |
8464 | && (((GET_MODE_CLASS (mode) == MODE_CC) |
8465 | == (GET_MODE_CLASS (inner_mode) == MODE_CC)) | |
8466 | || mode == VOIDmode || inner_mode == VOIDmode)) | |
b4ad7b23 | 8467 | x = SET_SRC (set); |
b565a316 RK |
8468 | else if (((code == EQ |
8469 | || (code == GE | |
5fd8383e RK |
8470 | && (GET_MODE_BITSIZE (inner_mode) |
8471 | <= HOST_BITS_PER_WIDE_INT) | |
b565a316 RK |
8472 | && GET_MODE_CLASS (inner_mode) == MODE_INT |
8473 | && (STORE_FLAG_VALUE | |
5fd8383e RK |
8474 | & ((HOST_WIDE_INT) 1 |
8475 | << (GET_MODE_BITSIZE (inner_mode) - 1)))) | |
b565a316 RK |
8476 | #ifdef FLOAT_STORE_FLAG_VALUE |
8477 | || (code == GE | |
8478 | && GET_MODE_CLASS (inner_mode) == MODE_FLOAT | |
12530dbe RH |
8479 | && (REAL_VALUE_NEGATIVE |
8480 | (FLOAT_STORE_FLAG_VALUE (inner_mode)))) | |
fb8ca0a4 | 8481 | #endif |
b565a316 | 8482 | )) |
f283421d | 8483 | && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<' |
e6fcb60d | 8484 | && (((GET_MODE_CLASS (mode) == MODE_CC) |
12f289ac JW |
8485 | == (GET_MODE_CLASS (inner_mode) == MODE_CC)) |
8486 | || mode == VOIDmode || inner_mode == VOIDmode)) | |
8487 | ||
b4ad7b23 | 8488 | { |
b4ad7b23 RS |
8489 | reverse_code = 1; |
8490 | x = SET_SRC (set); | |
8491 | } | |
71ef37f6 RK |
8492 | else |
8493 | break; | |
b4ad7b23 RS |
8494 | } |
8495 | ||
8496 | else if (reg_set_p (op0, prev)) | |
8497 | /* If this sets OP0, but not directly, we have to give up. */ | |
8498 | break; | |
8499 | ||
8500 | if (x) | |
8501 | { | |
8502 | if (GET_RTX_CLASS (GET_CODE (x)) == '<') | |
8503 | code = GET_CODE (x); | |
8504 | if (reverse_code) | |
8505 | { | |
c9212f33 | 8506 | code = reversed_comparison_code (x, prev); |
1eb8759b RH |
8507 | if (code == UNKNOWN) |
8508 | return 0; | |
b4ad7b23 RS |
8509 | reverse_code = 0; |
8510 | } | |
8511 | ||
8512 | op0 = XEXP (x, 0), op1 = XEXP (x, 1); | |
8513 | if (earliest) | |
8514 | *earliest = prev; | |
8515 | } | |
8516 | } | |
8517 | ||
8518 | /* If constant is first, put it last. */ | |
8519 | if (CONSTANT_P (op0)) | |
8520 | code = swap_condition (code), tem = op0, op0 = op1, op1 = tem; | |
8521 | ||
8522 | /* If OP0 is the result of a comparison, we weren't able to find what | |
8523 | was really being compared, so fail. */ | |
8524 | if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC) | |
8525 | return 0; | |
8526 | ||
d8cfa4ee RK |
8527 | /* Canonicalize any ordered comparison with integers involving equality |
8528 | if we can do computations in the relevant mode and we do not | |
8529 | overflow. */ | |
8530 | ||
8531 | if (GET_CODE (op1) == CONST_INT | |
8532 | && GET_MODE (op0) != VOIDmode | |
8533 | && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT) | |
b4ad7b23 | 8534 | { |
5fd8383e RK |
8535 | HOST_WIDE_INT const_val = INTVAL (op1); |
8536 | unsigned HOST_WIDE_INT uconst_val = const_val; | |
d8cfa4ee RK |
8537 | unsigned HOST_WIDE_INT max_val |
8538 | = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0)); | |
b4ad7b23 RS |
8539 | |
8540 | switch (code) | |
d8cfa4ee RK |
8541 | { |
8542 | case LE: | |
e51712db | 8543 | if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1) |
e6fcb60d | 8544 | code = LT, op1 = GEN_INT (const_val + 1); |
d8cfa4ee | 8545 | break; |
b4ad7b23 | 8546 | |
460f50dc R |
8547 | /* When cross-compiling, const_val might be sign-extended from |
8548 | BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */ | |
d8cfa4ee | 8549 | case GE: |
e51712db | 8550 | if ((HOST_WIDE_INT) (const_val & max_val) |
d8cfa4ee RK |
8551 | != (((HOST_WIDE_INT) 1 |
8552 | << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1)))) | |
8553 | code = GT, op1 = GEN_INT (const_val - 1); | |
8554 | break; | |
b4ad7b23 | 8555 | |
d8cfa4ee | 8556 | case LEU: |
460f50dc | 8557 | if (uconst_val < max_val) |
d8cfa4ee RK |
8558 | code = LTU, op1 = GEN_INT (uconst_val + 1); |
8559 | break; | |
b4ad7b23 | 8560 | |
d8cfa4ee RK |
8561 | case GEU: |
8562 | if (uconst_val != 0) | |
8563 | code = GTU, op1 = GEN_INT (uconst_val - 1); | |
8564 | break; | |
e9a25f70 JL |
8565 | |
8566 | default: | |
8567 | break; | |
d8cfa4ee | 8568 | } |
b4ad7b23 RS |
8569 | } |
8570 | ||
b4ad7b23 RS |
8571 | #ifdef HAVE_cc0 |
8572 | /* Never return CC0; return zero instead. */ | |
8573 | if (op0 == cc0_rtx) | |
8574 | return 0; | |
8575 | #endif | |
8576 | ||
38a448ca | 8577 | return gen_rtx_fmt_ee (code, VOIDmode, op0, op1); |
b4ad7b23 RS |
8578 | } |
8579 | ||
a49a6a68 JW |
8580 | /* Given a jump insn JUMP, return the condition that will cause it to branch |
8581 | to its JUMP_LABEL. If the condition cannot be understood, or is an | |
8582 | inequality floating-point comparison which needs to be reversed, 0 will | |
8583 | be returned. | |
8584 | ||
8585 | If EARLIEST is non-zero, it is a pointer to a place where the earliest | |
8586 | insn used in locating the condition was found. If a replacement test | |
8587 | of the condition is desired, it should be placed in front of that | |
8588 | insn and we will be sure that the inputs are still valid. */ | |
8589 | ||
8590 | rtx | |
8591 | get_condition (jump, earliest) | |
8592 | rtx jump; | |
8593 | rtx *earliest; | |
8594 | { | |
8595 | rtx cond; | |
8596 | int reverse; | |
7f1c097d | 8597 | rtx set; |
a49a6a68 JW |
8598 | |
8599 | /* If this is not a standard conditional jump, we can't parse it. */ | |
8600 | if (GET_CODE (jump) != JUMP_INSN | |
7f1c097d | 8601 | || ! any_condjump_p (jump)) |
a49a6a68 | 8602 | return 0; |
7f1c097d | 8603 | set = pc_set (jump); |
a49a6a68 | 8604 | |
7f1c097d | 8605 | cond = XEXP (SET_SRC (set), 0); |
a49a6a68 JW |
8606 | |
8607 | /* If this branches to JUMP_LABEL when the condition is false, reverse | |
8608 | the condition. */ | |
8609 | reverse | |
7f1c097d JH |
8610 | = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF |
8611 | && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump); | |
a49a6a68 | 8612 | |
10f13594 | 8613 | return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX); |
a49a6a68 JW |
8614 | } |
8615 | ||
b4ad7b23 RS |
8616 | /* Similar to above routine, except that we also put an invariant last |
8617 | unless both operands are invariants. */ | |
8618 | ||
8619 | rtx | |
0534b804 MH |
8620 | get_condition_for_loop (loop, x) |
8621 | const struct loop *loop; | |
b4ad7b23 RS |
8622 | rtx x; |
8623 | { | |
6496a589 | 8624 | rtx comparison = get_condition (x, (rtx*)0); |
b4ad7b23 RS |
8625 | |
8626 | if (comparison == 0 | |
0534b804 MH |
8627 | || ! loop_invariant_p (loop, XEXP (comparison, 0)) |
8628 | || loop_invariant_p (loop, XEXP (comparison, 1))) | |
b4ad7b23 RS |
8629 | return comparison; |
8630 | ||
38a448ca RH |
8631 | return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode, |
8632 | XEXP (comparison, 1), XEXP (comparison, 0)); | |
b4ad7b23 | 8633 | } |
8c660648 | 8634 | |
2a1777af | 8635 | /* Scan the function and determine whether it has indirect (computed) jumps. |
8c660648 | 8636 | |
2a1777af JL |
8637 | This is taken mostly from flow.c; similar code exists elsewhere |
8638 | in the compiler. It may be useful to put this into rtlanal.c. */ | |
8c660648 JL |
8639 | static int |
8640 | indirect_jump_in_function_p (start) | |
8641 | rtx start; | |
8642 | { | |
8643 | rtx insn; | |
8c660648 | 8644 | |
2a1777af JL |
8645 | for (insn = start; insn; insn = NEXT_INSN (insn)) |
8646 | if (computed_jump_p (insn)) | |
8647 | return 1; | |
7019d00e L |
8648 | |
8649 | return 0; | |
8c660648 | 8650 | } |
41a972a9 MM |
8651 | |
8652 | /* Add MEM to the LOOP_MEMS array, if appropriate. See the | |
8653 | documentation for LOOP_MEMS for the definition of `appropriate'. | |
8654 | This function is called from prescan_loop via for_each_rtx. */ | |
8655 | ||
8656 | static int | |
8657 | insert_loop_mem (mem, data) | |
8658 | rtx *mem; | |
e51712db | 8659 | void *data ATTRIBUTE_UNUSED; |
41a972a9 | 8660 | { |
afa1738b | 8661 | struct loop_info *loop_info = data; |
41a972a9 MM |
8662 | int i; |
8663 | rtx m = *mem; | |
8664 | ||
8665 | if (m == NULL_RTX) | |
8666 | return 0; | |
8667 | ||
8668 | switch (GET_CODE (m)) | |
8669 | { | |
8670 | case MEM: | |
8671 | break; | |
8672 | ||
27114460 RH |
8673 | case CLOBBER: |
8674 | /* We're not interested in MEMs that are only clobbered. */ | |
8675 | return -1; | |
8676 | ||
41a972a9 MM |
8677 | case CONST_DOUBLE: |
8678 | /* We're not interested in the MEM associated with a | |
8679 | CONST_DOUBLE, so there's no need to traverse into this. */ | |
8680 | return -1; | |
8681 | ||
4ce580a2 RE |
8682 | case EXPR_LIST: |
8683 | /* We're not interested in any MEMs that only appear in notes. */ | |
8684 | return -1; | |
8685 | ||
41a972a9 MM |
8686 | default: |
8687 | /* This is not a MEM. */ | |
8688 | return 0; | |
8689 | } | |
8690 | ||
8691 | /* See if we've already seen this MEM. */ | |
afa1738b MH |
8692 | for (i = 0; i < loop_info->mems_idx; ++i) |
8693 | if (rtx_equal_p (m, loop_info->mems[i].mem)) | |
41a972a9 | 8694 | { |
afa1738b | 8695 | if (GET_MODE (m) != GET_MODE (loop_info->mems[i].mem)) |
41a972a9 MM |
8696 | /* The modes of the two memory accesses are different. If |
8697 | this happens, something tricky is going on, and we just | |
8698 | don't optimize accesses to this MEM. */ | |
afa1738b | 8699 | loop_info->mems[i].optimize = 0; |
41a972a9 MM |
8700 | |
8701 | return 0; | |
8702 | } | |
8703 | ||
8704 | /* Resize the array, if necessary. */ | |
afa1738b | 8705 | if (loop_info->mems_idx == loop_info->mems_allocated) |
41a972a9 | 8706 | { |
afa1738b MH |
8707 | if (loop_info->mems_allocated != 0) |
8708 | loop_info->mems_allocated *= 2; | |
41a972a9 | 8709 | else |
afa1738b | 8710 | loop_info->mems_allocated = 32; |
41a972a9 | 8711 | |
fd5d5b07 | 8712 | loop_info->mems = (loop_mem_info *) |
afa1738b MH |
8713 | xrealloc (loop_info->mems, |
8714 | loop_info->mems_allocated * sizeof (loop_mem_info)); | |
41a972a9 MM |
8715 | } |
8716 | ||
8717 | /* Actually insert the MEM. */ | |
afa1738b | 8718 | loop_info->mems[loop_info->mems_idx].mem = m; |
41a972a9 MM |
8719 | /* We can't hoist this MEM out of the loop if it's a BLKmode MEM |
8720 | because we can't put it in a register. We still store it in the | |
8721 | table, though, so that if we see the same address later, but in a | |
8722 | non-BLK mode, we'll not think we can optimize it at that point. */ | |
afa1738b MH |
8723 | loop_info->mems[loop_info->mems_idx].optimize = (GET_MODE (m) != BLKmode); |
8724 | loop_info->mems[loop_info->mems_idx].reg = NULL_RTX; | |
8725 | ++loop_info->mems_idx; | |
8deb8e2c MM |
8726 | |
8727 | return 0; | |
41a972a9 MM |
8728 | } |
8729 | ||
1d7ae250 MH |
8730 | |
8731 | /* Allocate REGS->ARRAY or reallocate it if it is too small. | |
8732 | ||
8733 | Increment REGS->ARRAY[I].SET_IN_LOOP at the index I of each | |
8734 | register that is modified by an insn between FROM and TO. If the | |
8735 | value of an element of REGS->array[I].SET_IN_LOOP becomes 127 or | |
8736 | more, stop incrementing it, to avoid overflow. | |
8737 | ||
8738 | Store in REGS->ARRAY[I].SINGLE_USAGE the single insn in which | |
8739 | register I is used, if it is only used once. Otherwise, it is set | |
8740 | to 0 (for no uses) or const0_rtx for more than one use. This | |
8741 | parameter may be zero, in which case this processing is not done. | |
8742 | ||
8743 | Set REGS->ARRAY[I].MAY_NOT_OPTIMIZE nonzero if we should not | |
28680540 | 8744 | optimize register I. */ |
41a972a9 MM |
8745 | |
8746 | static void | |
28680540 | 8747 | loop_regs_scan (loop, extra_size) |
a2be868f | 8748 | const struct loop *loop; |
1d7ae250 | 8749 | int extra_size; |
41a972a9 | 8750 | { |
1ecd860b | 8751 | struct loop_regs *regs = LOOP_REGS (loop); |
1d7ae250 MH |
8752 | int old_nregs; |
8753 | /* last_set[n] is nonzero iff reg n has been set in the current | |
8754 | basic block. In that case, it is the insn that last set reg n. */ | |
8755 | rtx *last_set; | |
8756 | rtx insn; | |
1d7ae250 | 8757 | int i; |
41a972a9 | 8758 | |
1d7ae250 MH |
8759 | old_nregs = regs->num; |
8760 | regs->num = max_reg_num (); | |
e6fcb60d | 8761 | |
1d7ae250 MH |
8762 | /* Grow the regs array if not allocated or too small. */ |
8763 | if (regs->num >= regs->size) | |
41a972a9 | 8764 | { |
1d7ae250 MH |
8765 | regs->size = regs->num + extra_size; |
8766 | ||
8767 | regs->array = (struct loop_reg *) | |
8768 | xrealloc (regs->array, regs->size * sizeof (*regs->array)); | |
8769 | ||
8770 | /* Zero the new elements. */ | |
8771 | memset (regs->array + old_nregs, 0, | |
8772 | (regs->size - old_nregs) * sizeof (*regs->array)); | |
8773 | } | |
41a972a9 | 8774 | |
1d7ae250 MH |
8775 | /* Clear previously scanned fields but do not clear n_times_set. */ |
8776 | for (i = 0; i < old_nregs; i++) | |
8777 | { | |
8778 | regs->array[i].set_in_loop = 0; | |
8779 | regs->array[i].may_not_optimize = 0; | |
8780 | regs->array[i].single_usage = NULL_RTX; | |
8781 | } | |
8782 | ||
8783 | last_set = (rtx *) xcalloc (regs->num, sizeof (rtx)); | |
f1d4ac80 | 8784 | |
1d7ae250 MH |
8785 | /* Scan the loop, recording register usage. */ |
8786 | for (insn = loop->top ? loop->top : loop->start; insn != loop->end; | |
8787 | insn = NEXT_INSN (insn)) | |
8788 | { | |
8789 | if (INSN_P (insn)) | |
f1d4ac80 | 8790 | { |
1d7ae250 MH |
8791 | /* Record registers that have exactly one use. */ |
8792 | find_single_use_in_loop (regs, insn, PATTERN (insn)); | |
587f56c2 | 8793 | |
1d7ae250 MH |
8794 | /* Include uses in REG_EQUAL notes. */ |
8795 | if (REG_NOTES (insn)) | |
8796 | find_single_use_in_loop (regs, insn, REG_NOTES (insn)); | |
41a972a9 | 8797 | |
1d7ae250 MH |
8798 | if (GET_CODE (PATTERN (insn)) == SET |
8799 | || GET_CODE (PATTERN (insn)) == CLOBBER) | |
8800 | count_one_set (regs, insn, PATTERN (insn), last_set); | |
8801 | else if (GET_CODE (PATTERN (insn)) == PARALLEL) | |
8802 | { | |
8803 | register int i; | |
8804 | for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--) | |
8805 | count_one_set (regs, insn, XVECEXP (PATTERN (insn), 0, i), | |
8806 | last_set); | |
8807 | } | |
8deb8e2c | 8808 | } |
41a972a9 | 8809 | |
1d7ae250 MH |
8810 | if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN) |
8811 | memset (last_set, 0, regs->num * sizeof (rtx)); | |
8812 | } | |
41a972a9 | 8813 | |
1d7ae250 MH |
8814 | for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) |
8815 | { | |
8816 | regs->array[i].may_not_optimize = 1; | |
8817 | regs->array[i].set_in_loop = 1; | |
8818 | } | |
e6fcb60d | 8819 | |
dd0208b9 | 8820 | #ifdef AVOID_CCMODE_COPIES |
1d7ae250 MH |
8821 | /* Don't try to move insns which set CC registers if we should not |
8822 | create CCmode register copies. */ | |
8823 | for (i = regs->num - 1; i >= FIRST_PSEUDO_REGISTER; i--) | |
8824 | if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC) | |
8825 | regs->array[i].may_not_optimize = 1; | |
dd0208b9 | 8826 | #endif |
1d7ae250 MH |
8827 | |
8828 | /* Set regs->array[I].n_times_set for the new registers. */ | |
8829 | for (i = old_nregs; i < regs->num; i++) | |
8830 | regs->array[i].n_times_set = regs->array[i].set_in_loop; | |
dd0208b9 | 8831 | |
1d7ae250 | 8832 | free (last_set); |
41a972a9 MM |
8833 | } |
8834 | ||
28680540 MM |
8835 | /* Returns the number of real INSNs in the LOOP. */ |
8836 | ||
8837 | static int | |
8838 | count_insns_in_loop (loop) | |
8839 | const struct loop *loop; | |
8840 | { | |
8841 | int count = 0; | |
8842 | rtx insn; | |
8843 | ||
8844 | for (insn = loop->top ? loop->top : loop->start; insn != loop->end; | |
8845 | insn = NEXT_INSN (insn)) | |
8846 | if (INSN_P (insn)) | |
8847 | ++count; | |
8848 | ||
8849 | return count; | |
8850 | } | |
1d7ae250 | 8851 | |
a2be868f | 8852 | /* Move MEMs into registers for the duration of the loop. */ |
41a972a9 MM |
8853 | |
8854 | static void | |
a2be868f MH |
8855 | load_mems (loop) |
8856 | const struct loop *loop; | |
41a972a9 | 8857 | { |
afa1738b | 8858 | struct loop_info *loop_info = LOOP_INFO (loop); |
1ecd860b | 8859 | struct loop_regs *regs = LOOP_REGS (loop); |
41a972a9 MM |
8860 | int maybe_never = 0; |
8861 | int i; | |
1757e774 | 8862 | rtx p, prev_ebb_head; |
41a972a9 | 8863 | rtx label = NULL_RTX; |
48c4d691 | 8864 | rtx end_label; |
328f4006 BS |
8865 | /* Nonzero if the next instruction may never be executed. */ |
8866 | int next_maybe_never = 0; | |
616fde53 | 8867 | unsigned int last_max_reg = max_reg_num (); |
41a972a9 | 8868 | |
afa1738b | 8869 | if (loop_info->mems_idx == 0) |
328f4006 | 8870 | return; |
41a972a9 | 8871 | |
48c4d691 JJ |
8872 | /* We cannot use next_label here because it skips over normal insns. */ |
8873 | end_label = next_nonnote_insn (loop->end); | |
8874 | if (end_label && GET_CODE (end_label) != CODE_LABEL) | |
8875 | end_label = NULL_RTX; | |
eab5c70a | 8876 | |
48c4d691 JJ |
8877 | /* Check to see if it's possible that some instructions in the loop are |
8878 | never executed. Also check if there is a goto out of the loop other | |
8879 | than right after the end of the loop. */ | |
e6fcb60d | 8880 | for (p = next_insn_in_loop (loop, loop->scan_start); |
17e2b3cb | 8881 | p != NULL_RTX; |
a2be868f | 8882 | p = next_insn_in_loop (loop, p)) |
328f4006 BS |
8883 | { |
8884 | if (GET_CODE (p) == CODE_LABEL) | |
8885 | maybe_never = 1; | |
8886 | else if (GET_CODE (p) == JUMP_INSN | |
8887 | /* If we enter the loop in the middle, and scan | |
8888 | around to the beginning, don't set maybe_never | |
8889 | for that. This must be an unconditional jump, | |
8890 | otherwise the code at the top of the loop might | |
8891 | never be executed. Unconditional jumps are | |
8892 | followed a by barrier then loop end. */ | |
e6fcb60d | 8893 | && ! (GET_CODE (p) == JUMP_INSN |
a2be868f MH |
8894 | && JUMP_LABEL (p) == loop->top |
8895 | && NEXT_INSN (NEXT_INSN (p)) == loop->end | |
7f1c097d | 8896 | && any_uncondjump_p (p))) |
41a972a9 | 8897 | { |
48c4d691 JJ |
8898 | /* If this is a jump outside of the loop but not right |
8899 | after the end of the loop, we would have to emit new fixup | |
8900 | sequences for each such label. */ | |
13c502cd MM |
8901 | if (/* If we can't tell where control might go when this |
8902 | JUMP_INSN is executed, we must be conservative. */ | |
8903 | !JUMP_LABEL (p) | |
8904 | || (JUMP_LABEL (p) != end_label | |
8905 | && (INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop | |
8906 | || INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop->start) | |
8907 | || INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop->end)))) | |
48c4d691 JJ |
8908 | return; |
8909 | ||
7f1c097d | 8910 | if (!any_condjump_p (p)) |
328f4006 | 8911 | /* Something complicated. */ |
41a972a9 | 8912 | maybe_never = 1; |
328f4006 BS |
8913 | else |
8914 | /* If there are any more instructions in the loop, they | |
8915 | might not be reached. */ | |
e6fcb60d KH |
8916 | next_maybe_never = 1; |
8917 | } | |
328f4006 BS |
8918 | else if (next_maybe_never) |
8919 | maybe_never = 1; | |
8920 | } | |
8921 | ||
48c4d691 JJ |
8922 | /* Find start of the extended basic block that enters the loop. */ |
8923 | for (p = loop->start; | |
8924 | PREV_INSN (p) && GET_CODE (p) != CODE_LABEL; | |
8925 | p = PREV_INSN (p)) | |
8926 | ; | |
1757e774 | 8927 | prev_ebb_head = p; |
48c4d691 JJ |
8928 | |
8929 | cselib_init (); | |
8930 | ||
8931 | /* Build table of mems that get set to constant values before the | |
8932 | loop. */ | |
8933 | for (; p != loop->start; p = NEXT_INSN (p)) | |
8934 | cselib_process_insn (p); | |
8935 | ||
328f4006 | 8936 | /* Actually move the MEMs. */ |
afa1738b | 8937 | for (i = 0; i < loop_info->mems_idx; ++i) |
328f4006 | 8938 | { |
d2335c24 MH |
8939 | regset_head load_copies; |
8940 | regset_head store_copies; | |
328f4006 BS |
8941 | int written = 0; |
8942 | rtx reg; | |
afa1738b | 8943 | rtx mem = loop_info->mems[i].mem; |
328f4006 | 8944 | rtx mem_list_entry; |
41a972a9 | 8945 | |
e6fcb60d | 8946 | if (MEM_VOLATILE_P (mem) |
0534b804 | 8947 | || loop_invariant_p (loop, XEXP (mem, 0)) != 1) |
328f4006 | 8948 | /* There's no telling whether or not MEM is modified. */ |
afa1738b | 8949 | loop_info->mems[i].optimize = 0; |
328f4006 BS |
8950 | |
8951 | /* Go through the MEMs written to in the loop to see if this | |
8952 | one is aliased by one of them. */ | |
afa1738b | 8953 | mem_list_entry = loop_info->store_mems; |
328f4006 | 8954 | while (mem_list_entry) |
41a972a9 | 8955 | { |
328f4006 BS |
8956 | if (rtx_equal_p (mem, XEXP (mem_list_entry, 0))) |
8957 | written = 1; | |
8958 | else if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode, | |
8959 | mem, rtx_varies_p)) | |
41a972a9 | 8960 | { |
328f4006 | 8961 | /* MEM is indeed aliased by this store. */ |
afa1738b | 8962 | loop_info->mems[i].optimize = 0; |
328f4006 | 8963 | break; |
41a972a9 | 8964 | } |
328f4006 BS |
8965 | mem_list_entry = XEXP (mem_list_entry, 1); |
8966 | } | |
f0b60c1c SM |
8967 | |
8968 | if (flag_float_store && written | |
8969 | && GET_MODE_CLASS (GET_MODE (mem)) == MODE_FLOAT) | |
afa1738b | 8970 | loop_info->mems[i].optimize = 0; |
e6fcb60d | 8971 | |
328f4006 | 8972 | /* If this MEM is written to, we must be sure that there |
e6fcb60d | 8973 | are no reads from another MEM that aliases this one. */ |
afa1738b | 8974 | if (loop_info->mems[i].optimize && written) |
328f4006 BS |
8975 | { |
8976 | int j; | |
41a972a9 | 8977 | |
afa1738b | 8978 | for (j = 0; j < loop_info->mems_idx; ++j) |
328f4006 BS |
8979 | { |
8980 | if (j == i) | |
8981 | continue; | |
8982 | else if (true_dependence (mem, | |
8983 | VOIDmode, | |
afa1738b | 8984 | loop_info->mems[j].mem, |
328f4006 | 8985 | rtx_varies_p)) |
41a972a9 | 8986 | { |
afa1738b | 8987 | /* It's not safe to hoist loop_info->mems[i] out of |
328f4006 | 8988 | the loop because writes to it might not be |
afa1738b MH |
8989 | seen by reads from loop_info->mems[j]. */ |
8990 | loop_info->mems[i].optimize = 0; | |
328f4006 | 8991 | break; |
41a972a9 MM |
8992 | } |
8993 | } | |
328f4006 | 8994 | } |
41a972a9 | 8995 | |
328f4006 BS |
8996 | if (maybe_never && may_trap_p (mem)) |
8997 | /* We can't access the MEM outside the loop; it might | |
8998 | cause a trap that wouldn't have happened otherwise. */ | |
afa1738b | 8999 | loop_info->mems[i].optimize = 0; |
e6fcb60d | 9000 | |
afa1738b | 9001 | if (!loop_info->mems[i].optimize) |
328f4006 BS |
9002 | /* We thought we were going to lift this MEM out of the |
9003 | loop, but later discovered that we could not. */ | |
9004 | continue; | |
41a972a9 | 9005 | |
d2335c24 MH |
9006 | INIT_REG_SET (&load_copies); |
9007 | INIT_REG_SET (&store_copies); | |
c29f60c0 | 9008 | |
328f4006 BS |
9009 | /* Allocate a pseudo for this MEM. We set REG_USERVAR_P in |
9010 | order to keep scan_loop from moving stores to this MEM | |
9011 | out of the loop just because this REG is neither a | |
9012 | user-variable nor used in the loop test. */ | |
9013 | reg = gen_reg_rtx (GET_MODE (mem)); | |
9014 | REG_USERVAR_P (reg) = 1; | |
afa1738b | 9015 | loop_info->mems[i].reg = reg; |
328f4006 BS |
9016 | |
9017 | /* Now, replace all references to the MEM with the | |
1757e774 | 9018 | corresponding pseudos. */ |
c29f60c0 | 9019 | maybe_never = 0; |
a2be868f | 9020 | for (p = next_insn_in_loop (loop, loop->scan_start); |
328f4006 | 9021 | p != NULL_RTX; |
a2be868f | 9022 | p = next_insn_in_loop (loop, p)) |
328f4006 | 9023 | { |
2c3c49de | 9024 | if (INSN_P (p)) |
c29f60c0 | 9025 | { |
d2335c24 MH |
9026 | rtx set; |
9027 | ||
9028 | set = single_set (p); | |
9029 | ||
c29f60c0 BS |
9030 | /* See if this copies the mem into a register that isn't |
9031 | modified afterwards. We'll try to do copy propagation | |
9032 | a little further on. */ | |
c29f60c0 BS |
9033 | if (set |
9034 | /* @@@ This test is _way_ too conservative. */ | |
9035 | && ! maybe_never | |
9036 | && GET_CODE (SET_DEST (set)) == REG | |
9037 | && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER | |
9038 | && REGNO (SET_DEST (set)) < last_max_reg | |
f1d4ac80 | 9039 | && regs->array[REGNO (SET_DEST (set))].n_times_set == 1 |
d2335c24 MH |
9040 | && rtx_equal_p (SET_SRC (set), mem)) |
9041 | SET_REGNO_REG_SET (&load_copies, REGNO (SET_DEST (set))); | |
9042 | ||
fd5d5b07 | 9043 | /* See if this copies the mem from a register that isn't |
d2335c24 MH |
9044 | modified afterwards. We'll try to remove the |
9045 | redundant copy later on by doing a little register | |
9046 | renaming and copy propagation. This will help | |
9047 | to untangle things for the BIV detection code. */ | |
fd5d5b07 KH |
9048 | if (set |
9049 | && ! maybe_never | |
9050 | && GET_CODE (SET_SRC (set)) == REG | |
9051 | && REGNO (SET_SRC (set)) >= FIRST_PSEUDO_REGISTER | |
9052 | && REGNO (SET_SRC (set)) < last_max_reg | |
f1d4ac80 | 9053 | && regs->array[REGNO (SET_SRC (set))].n_times_set == 1 |
fd5d5b07 KH |
9054 | && rtx_equal_p (SET_DEST (set), mem)) |
9055 | SET_REGNO_REG_SET (&store_copies, REGNO (SET_SRC (set))); | |
9056 | ||
9057 | /* Replace the memory reference with the shadow register. */ | |
afa1738b MH |
9058 | replace_loop_mems (p, loop_info->mems[i].mem, |
9059 | loop_info->mems[i].reg); | |
c29f60c0 BS |
9060 | } |
9061 | ||
9062 | if (GET_CODE (p) == CODE_LABEL | |
9063 | || GET_CODE (p) == JUMP_INSN) | |
9064 | maybe_never = 1; | |
328f4006 | 9065 | } |
41a972a9 | 9066 | |
328f4006 BS |
9067 | if (! apply_change_group ()) |
9068 | /* We couldn't replace all occurrences of the MEM. */ | |
afa1738b | 9069 | loop_info->mems[i].optimize = 0; |
328f4006 BS |
9070 | else |
9071 | { | |
0534b804 | 9072 | /* Load the memory immediately before LOOP->START, which is |
328f4006 | 9073 | the NOTE_LOOP_BEG. */ |
eab5c70a BS |
9074 | cselib_val *e = cselib_lookup (mem, VOIDmode, 0); |
9075 | rtx set; | |
9076 | rtx best = mem; | |
9077 | int j; | |
9078 | struct elt_loc_list *const_equiv = 0; | |
9079 | ||
9080 | if (e) | |
9081 | { | |
9082 | struct elt_loc_list *equiv; | |
9083 | struct elt_loc_list *best_equiv = 0; | |
9084 | for (equiv = e->locs; equiv; equiv = equiv->next) | |
9085 | { | |
9086 | if (CONSTANT_P (equiv->loc)) | |
9087 | const_equiv = equiv; | |
28b6b9b2 | 9088 | else if (GET_CODE (equiv->loc) == REG |
1757e774 | 9089 | /* Extending hard register lifetimes causes crash |
28b6b9b2 JH |
9090 | on SRC targets. Doing so on non-SRC is |
9091 | probably also not good idea, since we most | |
9092 | probably have pseudoregister equivalence as | |
9093 | well. */ | |
9094 | && REGNO (equiv->loc) >= FIRST_PSEUDO_REGISTER) | |
eab5c70a BS |
9095 | best_equiv = equiv; |
9096 | } | |
9097 | /* Use the constant equivalence if that is cheap enough. */ | |
9098 | if (! best_equiv) | |
9099 | best_equiv = const_equiv; | |
9100 | else if (const_equiv | |
9101 | && (rtx_cost (const_equiv->loc, SET) | |
9102 | <= rtx_cost (best_equiv->loc, SET))) | |
9103 | { | |
9104 | best_equiv = const_equiv; | |
9105 | const_equiv = 0; | |
9106 | } | |
9107 | ||
9108 | /* If best_equiv is nonzero, we know that MEM is set to a | |
9109 | constant or register before the loop. We will use this | |
9110 | knowledge to initialize the shadow register with that | |
9111 | constant or reg rather than by loading from MEM. */ | |
9112 | if (best_equiv) | |
9113 | best = copy_rtx (best_equiv->loc); | |
9114 | } | |
1757e774 | 9115 | |
eab5c70a | 9116 | set = gen_move_insn (reg, best); |
804a718a | 9117 | set = loop_insn_hoist (loop, set); |
1757e774 BS |
9118 | if (REG_P (best)) |
9119 | { | |
9120 | for (p = prev_ebb_head; p != loop->start; p = NEXT_INSN (p)) | |
9121 | if (REGNO_LAST_UID (REGNO (best)) == INSN_UID (p)) | |
9122 | { | |
9123 | REGNO_LAST_UID (REGNO (best)) = INSN_UID (set); | |
9124 | break; | |
9125 | } | |
9126 | } | |
9127 | ||
eab5c70a BS |
9128 | if (const_equiv) |
9129 | REG_NOTES (set) = gen_rtx_EXPR_LIST (REG_EQUAL, | |
9130 | copy_rtx (const_equiv->loc), | |
9131 | REG_NOTES (set)); | |
41a972a9 | 9132 | |
328f4006 BS |
9133 | if (written) |
9134 | { | |
9135 | if (label == NULL_RTX) | |
41a972a9 | 9136 | { |
328f4006 | 9137 | label = gen_label_rtx (); |
a2be868f | 9138 | emit_label_after (label, loop->end); |
41a972a9 MM |
9139 | } |
9140 | ||
328f4006 BS |
9141 | /* Store the memory immediately after END, which is |
9142 | the NOTE_LOOP_END. */ | |
e6fcb60d | 9143 | set = gen_move_insn (copy_rtx (mem), reg); |
86e21212 | 9144 | loop_insn_emit_after (loop, 0, label, set); |
328f4006 BS |
9145 | } |
9146 | ||
9147 | if (loop_dump_stream) | |
9148 | { | |
9149 | fprintf (loop_dump_stream, "Hoisted regno %d %s from ", | |
9150 | REGNO (reg), (written ? "r/w" : "r/o")); | |
9151 | print_rtl (loop_dump_stream, mem); | |
9152 | fputc ('\n', loop_dump_stream); | |
41a972a9 | 9153 | } |
c29f60c0 BS |
9154 | |
9155 | /* Attempt a bit of copy propagation. This helps untangle the | |
9156 | data flow, and enables {basic,general}_induction_var to find | |
9157 | more bivs/givs. */ | |
9158 | EXECUTE_IF_SET_IN_REG_SET | |
d2335c24 | 9159 | (&load_copies, FIRST_PSEUDO_REGISTER, j, |
c29f60c0 | 9160 | { |
d2335c24 | 9161 | try_copy_prop (loop, reg, j); |
c29f60c0 | 9162 | }); |
d2335c24 MH |
9163 | CLEAR_REG_SET (&load_copies); |
9164 | ||
9165 | EXECUTE_IF_SET_IN_REG_SET | |
9166 | (&store_copies, FIRST_PSEUDO_REGISTER, j, | |
9167 | { | |
9168 | try_swap_copy_prop (loop, reg, j); | |
9169 | }); | |
9170 | CLEAR_REG_SET (&store_copies); | |
41a972a9 MM |
9171 | } |
9172 | } | |
9173 | ||
48c4d691 | 9174 | if (label != NULL_RTX && end_label != NULL_RTX) |
41a972a9 MM |
9175 | { |
9176 | /* Now, we need to replace all references to the previous exit | |
9177 | label with the new one. */ | |
e6fcb60d | 9178 | rtx_pair rr; |
59d4e481 KGA |
9179 | rr.r1 = end_label; |
9180 | rr.r2 = label; | |
41a972a9 | 9181 | |
a2be868f | 9182 | for (p = loop->start; p != loop->end; p = NEXT_INSN (p)) |
7940acc4 JW |
9183 | { |
9184 | for_each_rtx (&p, replace_label, &rr); | |
9185 | ||
9186 | /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL | |
9187 | field. This is not handled by for_each_rtx because it doesn't | |
9188 | handle unprinted ('0') fields. We need to update JUMP_LABEL | |
9189 | because the immediately following unroll pass will use it. | |
9190 | replace_label would not work anyways, because that only handles | |
9191 | LABEL_REFs. */ | |
9192 | if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == end_label) | |
9193 | JUMP_LABEL (p) = label; | |
9194 | } | |
41a972a9 | 9195 | } |
eab5c70a BS |
9196 | |
9197 | cselib_finish (); | |
41a972a9 MM |
9198 | } |
9199 | ||
8571e492 BS |
9200 | /* For communication between note_reg_stored and its caller. */ |
9201 | struct note_reg_stored_arg | |
9202 | { | |
9203 | int set_seen; | |
9204 | rtx reg; | |
9205 | }; | |
9206 | ||
9207 | /* Called via note_stores, record in SET_SEEN whether X, which is written, | |
9208 | is equal to ARG. */ | |
9209 | static void | |
9210 | note_reg_stored (x, setter, arg) | |
272df862 | 9211 | rtx x, setter ATTRIBUTE_UNUSED; |
8571e492 BS |
9212 | void *arg; |
9213 | { | |
e6fcb60d | 9214 | struct note_reg_stored_arg *t = (struct note_reg_stored_arg *) arg; |
8571e492 BS |
9215 | if (t->reg == x) |
9216 | t->set_seen = 1; | |
9217 | } | |
9218 | ||
c29f60c0 BS |
9219 | /* Try to replace every occurrence of pseudo REGNO with REPLACEMENT. |
9220 | There must be exactly one insn that sets this pseudo; it will be | |
9221 | deleted if all replacements succeed and we can prove that the register | |
0534b804 | 9222 | is not used after the loop. */ |
770ae6cc | 9223 | |
c29f60c0 | 9224 | static void |
a2be868f MH |
9225 | try_copy_prop (loop, replacement, regno) |
9226 | const struct loop *loop; | |
9227 | rtx replacement; | |
770ae6cc | 9228 | unsigned int regno; |
c29f60c0 | 9229 | { |
8571e492 BS |
9230 | /* This is the reg that we are copying from. */ |
9231 | rtx reg_rtx = regno_reg_rtx[regno]; | |
c29f60c0 BS |
9232 | rtx init_insn = 0; |
9233 | rtx insn; | |
8571e492 BS |
9234 | /* These help keep track of whether we replaced all uses of the reg. */ |
9235 | int replaced_last = 0; | |
9236 | int store_is_first = 0; | |
9237 | ||
a2be868f | 9238 | for (insn = next_insn_in_loop (loop, loop->scan_start); |
c29f60c0 | 9239 | insn != NULL_RTX; |
a2be868f | 9240 | insn = next_insn_in_loop (loop, insn)) |
c29f60c0 BS |
9241 | { |
9242 | rtx set; | |
d42971c4 | 9243 | |
8571e492 BS |
9244 | /* Only substitute within one extended basic block from the initializing |
9245 | insn. */ | |
9246 | if (GET_CODE (insn) == CODE_LABEL && init_insn) | |
9247 | break; | |
d42971c4 | 9248 | |
2c3c49de | 9249 | if (! INSN_P (insn)) |
c29f60c0 | 9250 | continue; |
8571e492 BS |
9251 | |
9252 | /* Is this the initializing insn? */ | |
c29f60c0 BS |
9253 | set = single_set (insn); |
9254 | if (set | |
9255 | && GET_CODE (SET_DEST (set)) == REG | |
9256 | && REGNO (SET_DEST (set)) == regno) | |
9257 | { | |
9258 | if (init_insn) | |
9259 | abort (); | |
8571e492 | 9260 | |
c29f60c0 | 9261 | init_insn = insn; |
8571e492 BS |
9262 | if (REGNO_FIRST_UID (regno) == INSN_UID (insn)) |
9263 | store_is_first = 1; | |
9264 | } | |
9265 | ||
9266 | /* Only substitute after seeing the initializing insn. */ | |
9267 | if (init_insn && insn != init_insn) | |
e6fcb60d | 9268 | { |
8571e492 | 9269 | struct note_reg_stored_arg arg; |
8571e492 | 9270 | |
afa1738b | 9271 | replace_loop_regs (insn, reg_rtx, replacement); |
8571e492 BS |
9272 | if (REGNO_LAST_UID (regno) == INSN_UID (insn)) |
9273 | replaced_last = 1; | |
9274 | ||
9275 | /* Stop replacing when REPLACEMENT is modified. */ | |
9276 | arg.reg = replacement; | |
9277 | arg.set_seen = 0; | |
9278 | note_stores (PATTERN (insn), note_reg_stored, &arg); | |
9279 | if (arg.set_seen) | |
f1330226 JH |
9280 | { |
9281 | rtx note = find_reg_note (insn, REG_EQUAL, NULL); | |
9282 | ||
9283 | /* It is possible that we've turned previously valid REG_EQUAL to | |
9284 | invalid, as we change the REGNO to REPLACEMENT and unlike REGNO, | |
9285 | REPLACEMENT is modified, we get different meaning. */ | |
9286 | if (note && reg_mentioned_p (replacement, XEXP (note, 0))) | |
9287 | remove_note (insn, note); | |
9288 | break; | |
9289 | } | |
c29f60c0 | 9290 | } |
c29f60c0 BS |
9291 | } |
9292 | if (! init_insn) | |
9293 | abort (); | |
9294 | if (apply_change_group ()) | |
9295 | { | |
8571e492 BS |
9296 | if (loop_dump_stream) |
9297 | fprintf (loop_dump_stream, " Replaced reg %d", regno); | |
9298 | if (store_is_first && replaced_last) | |
c29f60c0 | 9299 | { |
e8c8470b MM |
9300 | rtx first; |
9301 | rtx retval_note; | |
9302 | ||
9303 | /* Assume we're just deleting INIT_INSN. */ | |
9304 | first = init_insn; | |
9305 | /* Look for REG_RETVAL note. If we're deleting the end of | |
9306 | the libcall sequence, the whole sequence can go. */ | |
9307 | retval_note = find_reg_note (init_insn, REG_RETVAL, NULL_RTX); | |
9308 | /* If we found a REG_RETVAL note, find the first instruction | |
9309 | in the sequence. */ | |
9310 | if (retval_note) | |
9311 | first = XEXP (retval_note, 0); | |
9312 | ||
9313 | /* Delete the instructions. */ | |
9314 | loop_delete_insns (first, init_insn); | |
c29f60c0 BS |
9315 | } |
9316 | if (loop_dump_stream) | |
8571e492 | 9317 | fprintf (loop_dump_stream, ".\n"); |
c29f60c0 BS |
9318 | } |
9319 | } | |
9320 | ||
e8c8470b MM |
9321 | /* Replace all the instructions from FIRST up to and including LAST |
9322 | with NOTE_INSN_DELETED notes. */ | |
9323 | ||
9324 | static void | |
9325 | loop_delete_insns (first, last) | |
9326 | rtx first; | |
9327 | rtx last; | |
9328 | { | |
9329 | while (1) | |
9330 | { | |
9331 | PUT_CODE (first, NOTE); | |
9332 | NOTE_LINE_NUMBER (first) = NOTE_INSN_DELETED; | |
9333 | if (loop_dump_stream) | |
9334 | fprintf (loop_dump_stream, ", deleting init_insn (%d)", | |
9335 | INSN_UID (first)); | |
9336 | ||
9337 | /* If this was the LAST instructions we're supposed to delete, | |
9338 | we're done. */ | |
9339 | if (first == last) | |
9340 | break; | |
9341 | ||
9342 | first = NEXT_INSN (first); | |
9343 | } | |
9344 | } | |
9345 | ||
d2335c24 MH |
9346 | /* Try to replace occurrences of pseudo REGNO with REPLACEMENT within |
9347 | loop LOOP if the order of the sets of these registers can be | |
9348 | swapped. There must be exactly one insn within the loop that sets | |
9349 | this pseudo followed immediately by a move insn that sets | |
9350 | REPLACEMENT with REGNO. */ | |
9351 | static void | |
9352 | try_swap_copy_prop (loop, replacement, regno) | |
9353 | const struct loop *loop; | |
9354 | rtx replacement; | |
9355 | unsigned int regno; | |
9356 | { | |
9357 | rtx insn; | |
616fde53 | 9358 | rtx set = NULL_RTX; |
d2335c24 MH |
9359 | unsigned int new_regno; |
9360 | ||
9361 | new_regno = REGNO (replacement); | |
9362 | ||
9363 | for (insn = next_insn_in_loop (loop, loop->scan_start); | |
9364 | insn != NULL_RTX; | |
9365 | insn = next_insn_in_loop (loop, insn)) | |
9366 | { | |
9367 | /* Search for the insn that copies REGNO to NEW_REGNO? */ | |
616fde53 | 9368 | if (INSN_P (insn) |
d2335c24 MH |
9369 | && (set = single_set (insn)) |
9370 | && GET_CODE (SET_DEST (set)) == REG | |
9371 | && REGNO (SET_DEST (set)) == new_regno | |
9372 | && GET_CODE (SET_SRC (set)) == REG | |
9373 | && REGNO (SET_SRC (set)) == regno) | |
9374 | break; | |
9375 | } | |
9376 | ||
0ef52662 | 9377 | if (insn != NULL_RTX) |
d2335c24 MH |
9378 | { |
9379 | rtx prev_insn; | |
9380 | rtx prev_set; | |
fd5d5b07 | 9381 | |
d2335c24 MH |
9382 | /* Some DEF-USE info would come in handy here to make this |
9383 | function more general. For now, just check the previous insn | |
9384 | which is the most likely candidate for setting REGNO. */ | |
fd5d5b07 | 9385 | |
d2335c24 | 9386 | prev_insn = PREV_INSN (insn); |
fd5d5b07 | 9387 | |
616fde53 | 9388 | if (INSN_P (insn) |
d2335c24 MH |
9389 | && (prev_set = single_set (prev_insn)) |
9390 | && GET_CODE (SET_DEST (prev_set)) == REG | |
9391 | && REGNO (SET_DEST (prev_set)) == regno) | |
9392 | { | |
9393 | /* We have: | |
9394 | (set (reg regno) (expr)) | |
9395 | (set (reg new_regno) (reg regno)) | |
fd5d5b07 | 9396 | |
d2335c24 MH |
9397 | so try converting this to: |
9398 | (set (reg new_regno) (expr)) | |
9399 | (set (reg regno) (reg new_regno)) | |
9400 | ||
9401 | The former construct is often generated when a global | |
9402 | variable used for an induction variable is shadowed by a | |
9403 | register (NEW_REGNO). The latter construct improves the | |
9404 | chances of GIV replacement and BIV elimination. */ | |
9405 | ||
9406 | validate_change (prev_insn, &SET_DEST (prev_set), | |
9407 | replacement, 1); | |
9408 | validate_change (insn, &SET_DEST (set), | |
9409 | SET_SRC (set), 1); | |
9410 | validate_change (insn, &SET_SRC (set), | |
9411 | replacement, 1); | |
9412 | ||
9413 | if (apply_change_group ()) | |
9414 | { | |
9415 | if (loop_dump_stream) | |
fd5d5b07 KH |
9416 | fprintf (loop_dump_stream, |
9417 | " Swapped set of reg %d at %d with reg %d at %d.\n", | |
9418 | regno, INSN_UID (insn), | |
d2335c24 MH |
9419 | new_regno, INSN_UID (prev_insn)); |
9420 | ||
9421 | /* Update first use of REGNO. */ | |
9422 | if (REGNO_FIRST_UID (regno) == INSN_UID (prev_insn)) | |
9423 | REGNO_FIRST_UID (regno) = INSN_UID (insn); | |
9424 | ||
9425 | /* Now perform copy propagation to hopefully | |
9426 | remove all uses of REGNO within the loop. */ | |
9427 | try_copy_prop (loop, replacement, regno); | |
9428 | } | |
9429 | } | |
9430 | } | |
9431 | } | |
9432 | ||
41a972a9 | 9433 | /* Replace MEM with its associated pseudo register. This function is |
afa1738b MH |
9434 | called from load_mems via for_each_rtx. DATA is actually a pointer |
9435 | to a structure describing the instruction currently being scanned | |
41a972a9 MM |
9436 | and the MEM we are currently replacing. */ |
9437 | ||
9438 | static int | |
9439 | replace_loop_mem (mem, data) | |
9440 | rtx *mem; | |
9441 | void *data; | |
9442 | { | |
fd5d5b07 | 9443 | loop_replace_args *args = (loop_replace_args *) data; |
41a972a9 MM |
9444 | rtx m = *mem; |
9445 | ||
9446 | if (m == NULL_RTX) | |
9447 | return 0; | |
9448 | ||
9449 | switch (GET_CODE (m)) | |
9450 | { | |
9451 | case MEM: | |
9452 | break; | |
9453 | ||
9454 | case CONST_DOUBLE: | |
9455 | /* We're not interested in the MEM associated with a | |
9456 | CONST_DOUBLE, so there's no need to traverse into one. */ | |
9457 | return -1; | |
9458 | ||
9459 | default: | |
9460 | /* This is not a MEM. */ | |
9461 | return 0; | |
9462 | } | |
9463 | ||
afa1738b | 9464 | if (!rtx_equal_p (args->match, m)) |
41a972a9 MM |
9465 | /* This is not the MEM we are currently replacing. */ |
9466 | return 0; | |
9467 | ||
41a972a9 | 9468 | /* Actually replace the MEM. */ |
afa1738b | 9469 | validate_change (args->insn, mem, args->replacement, 1); |
41a972a9 MM |
9470 | |
9471 | return 0; | |
9472 | } | |
9473 | ||
afa1738b MH |
9474 | static void |
9475 | replace_loop_mems (insn, mem, reg) | |
fd5d5b07 KH |
9476 | rtx insn; |
9477 | rtx mem; | |
9478 | rtx reg; | |
9479 | { | |
afa1738b MH |
9480 | loop_replace_args args; |
9481 | ||
9482 | args.insn = insn; | |
9483 | args.match = mem; | |
9484 | args.replacement = reg; | |
9485 | ||
9486 | for_each_rtx (&insn, replace_loop_mem, &args); | |
9487 | } | |
9488 | ||
c29f60c0 | 9489 | /* Replace one register with another. Called through for_each_rtx; PX points |
fd5d5b07 | 9490 | to the rtx being scanned. DATA is actually a pointer to |
afa1738b | 9491 | a structure of arguments. */ |
c29f60c0 BS |
9492 | |
9493 | static int | |
9494 | replace_loop_reg (px, data) | |
9495 | rtx *px; | |
9496 | void *data; | |
9497 | { | |
9498 | rtx x = *px; | |
fd5d5b07 | 9499 | loop_replace_args *args = (loop_replace_args *) data; |
c29f60c0 BS |
9500 | |
9501 | if (x == NULL_RTX) | |
9502 | return 0; | |
9503 | ||
afa1738b MH |
9504 | if (x == args->match) |
9505 | validate_change (args->insn, px, args->replacement, 1); | |
c29f60c0 BS |
9506 | |
9507 | return 0; | |
9508 | } | |
9509 | ||
afa1738b MH |
9510 | static void |
9511 | replace_loop_regs (insn, reg, replacement) | |
9512 | rtx insn; | |
9513 | rtx reg; | |
9514 | rtx replacement; | |
9515 | { | |
9516 | loop_replace_args args; | |
9517 | ||
9518 | args.insn = insn; | |
9519 | args.match = reg; | |
9520 | args.replacement = replacement; | |
9521 | ||
9522 | for_each_rtx (&insn, replace_loop_reg, &args); | |
9523 | } | |
9524 | ||
41a972a9 MM |
9525 | /* Replace occurrences of the old exit label for the loop with the new |
9526 | one. DATA is an rtx_pair containing the old and new labels, | |
9527 | respectively. */ | |
9528 | ||
9529 | static int | |
9530 | replace_label (x, data) | |
9531 | rtx *x; | |
9532 | void *data; | |
9533 | { | |
9534 | rtx l = *x; | |
e6fcb60d KH |
9535 | rtx old_label = ((rtx_pair *) data)->r1; |
9536 | rtx new_label = ((rtx_pair *) data)->r2; | |
41a972a9 MM |
9537 | |
9538 | if (l == NULL_RTX) | |
9539 | return 0; | |
9540 | ||
9541 | if (GET_CODE (l) != LABEL_REF) | |
9542 | return 0; | |
9543 | ||
9544 | if (XEXP (l, 0) != old_label) | |
9545 | return 0; | |
e6fcb60d | 9546 | |
41a972a9 MM |
9547 | XEXP (l, 0) = new_label; |
9548 | ++LABEL_NUSES (new_label); | |
9549 | --LABEL_NUSES (old_label); | |
9550 | ||
9551 | return 0; | |
9552 | } | |
6057c0e6 | 9553 | \f |
96a45535 MH |
9554 | /* Emit insn for PATTERN after WHERE_INSN in basic block WHERE_BB |
9555 | (ignored in the interim). */ | |
9556 | ||
9557 | static rtx | |
9558 | loop_insn_emit_after (loop, where_bb, where_insn, pattern) | |
9559 | const struct loop *loop ATTRIBUTE_UNUSED; | |
9560 | basic_block where_bb ATTRIBUTE_UNUSED; | |
9561 | rtx where_insn; | |
9562 | rtx pattern; | |
9563 | { | |
9564 | return emit_insn_after (pattern, where_insn); | |
9565 | } | |
9566 | ||
9567 | ||
804a718a MH |
9568 | /* If WHERE_INSN is non-zero emit insn for PATTERN before WHERE_INSN |
9569 | in basic block WHERE_BB (ignored in the interim) within the loop | |
9570 | otherwise hoist PATTERN into the loop pre-header. */ | |
9571 | ||
86e21212 | 9572 | rtx |
804a718a MH |
9573 | loop_insn_emit_before (loop, where_bb, where_insn, pattern) |
9574 | const struct loop *loop; | |
9575 | basic_block where_bb ATTRIBUTE_UNUSED; | |
9576 | rtx where_insn; | |
9577 | rtx pattern; | |
9578 | { | |
9579 | if (! where_insn) | |
9580 | return loop_insn_hoist (loop, pattern); | |
9581 | return emit_insn_before (pattern, where_insn); | |
9582 | } | |
9583 | ||
9584 | ||
86e21212 MH |
9585 | /* Emit call insn for PATTERN before WHERE_INSN in basic block |
9586 | WHERE_BB (ignored in the interim) within the loop. */ | |
9587 | ||
9588 | static rtx | |
9589 | loop_call_insn_emit_before (loop, where_bb, where_insn, pattern) | |
9590 | const struct loop *loop ATTRIBUTE_UNUSED; | |
9591 | basic_block where_bb ATTRIBUTE_UNUSED; | |
9592 | rtx where_insn; | |
9593 | rtx pattern; | |
9594 | { | |
9595 | return emit_call_insn_before (pattern, where_insn); | |
9596 | } | |
9597 | ||
9598 | ||
804a718a MH |
9599 | /* Hoist insn for PATTERN into the loop pre-header. */ |
9600 | ||
9601 | rtx | |
9602 | loop_insn_hoist (loop, pattern) | |
9603 | const struct loop *loop; | |
9604 | rtx pattern; | |
9605 | { | |
9606 | return loop_insn_emit_before (loop, 0, loop->start, pattern); | |
9607 | } | |
96a45535 MH |
9608 | |
9609 | ||
86e21212 MH |
9610 | /* Hoist call insn for PATTERN into the loop pre-header. */ |
9611 | ||
9612 | static rtx | |
9613 | loop_call_insn_hoist (loop, pattern) | |
9614 | const struct loop *loop; | |
9615 | rtx pattern; | |
9616 | { | |
9617 | return loop_call_insn_emit_before (loop, 0, loop->start, pattern); | |
9618 | } | |
9619 | ||
9620 | ||
96a45535 MH |
9621 | /* Sink insn for PATTERN after the loop end. */ |
9622 | ||
9623 | rtx | |
9624 | loop_insn_sink (loop, pattern) | |
9625 | const struct loop *loop; | |
9626 | rtx pattern; | |
9627 | { | |
9628 | return loop_insn_emit_before (loop, 0, loop->sink, pattern); | |
9629 | } | |
9630 | ||
9631 | ||
9632 | /* If the loop has multiple exits, emit insn for PATTERN before the | |
9633 | loop to ensure that it will always be executed no matter how the | |
9634 | loop exits. Otherwise, emit the insn for PATTERN after the loop, | |
9635 | since this is slightly more efficient. */ | |
9636 | ||
9637 | static rtx | |
9638 | loop_insn_sink_or_swim (loop, pattern) | |
9639 | const struct loop *loop; | |
9640 | rtx pattern; | |
9641 | { | |
9642 | if (loop->exit_count) | |
9643 | return loop_insn_hoist (loop, pattern); | |
9644 | else | |
9645 | return loop_insn_sink (loop, pattern); | |
9646 | } | |
804a718a | 9647 | \f |
099f0f3f MH |
9648 | static void |
9649 | loop_ivs_dump (loop, file, verbose) | |
9650 | const struct loop *loop; | |
9651 | FILE *file; | |
9652 | int verbose; | |
9653 | { | |
9654 | struct iv_class *bl; | |
9655 | int iv_num = 0; | |
9656 | ||
9657 | if (! loop || ! file) | |
9658 | return; | |
9659 | ||
9660 | for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next) | |
9661 | iv_num++; | |
9662 | ||
9663 | fprintf (file, "Loop %d: %d IV classes\n", loop->num, iv_num); | |
9664 | ||
9665 | for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next) | |
9666 | { | |
9667 | loop_iv_class_dump (bl, file, verbose); | |
9668 | fputc ('\n', file); | |
9669 | } | |
9670 | } | |
9671 | ||
9672 | ||
9673 | static void | |
9674 | loop_iv_class_dump (bl, file, verbose) | |
9675 | const struct iv_class *bl; | |
9676 | FILE *file; | |
9677 | int verbose ATTRIBUTE_UNUSED; | |
9678 | { | |
9679 | struct induction *v; | |
9680 | rtx incr; | |
9681 | int i; | |
9682 | ||
9683 | if (! bl || ! file) | |
9684 | return; | |
9685 | ||
9686 | fprintf (file, "IV class for reg %d, benefit %d\n", | |
9687 | bl->regno, bl->total_benefit); | |
9688 | ||
9689 | fprintf (file, " Init insn %d", INSN_UID (bl->init_insn)); | |
9690 | if (bl->initial_value) | |
9691 | { | |
9692 | fprintf (file, ", init val: "); | |
9693 | print_simple_rtl (file, bl->initial_value); | |
9694 | } | |
9695 | if (bl->initial_test) | |
9696 | { | |
9697 | fprintf (file, ", init test: "); | |
9698 | print_simple_rtl (file, bl->initial_test); | |
9699 | } | |
9700 | fputc ('\n', file); | |
9701 | ||
9702 | if (bl->final_value) | |
9703 | { | |
9704 | fprintf (file, " Final val: "); | |
9705 | print_simple_rtl (file, bl->final_value); | |
9706 | fputc ('\n', file); | |
9707 | } | |
9708 | ||
9709 | if ((incr = biv_total_increment (bl))) | |
9710 | { | |
9711 | fprintf (file, " Total increment: "); | |
9712 | print_simple_rtl (file, incr); | |
9713 | fputc ('\n', file); | |
9714 | } | |
9715 | ||
9716 | /* List the increments. */ | |
9717 | for (i = 0, v = bl->biv; v; v = v->next_iv, i++) | |
9718 | { | |
9719 | fprintf (file, " Inc%d: insn %d, incr: ", i, INSN_UID (v->insn)); | |
9720 | print_simple_rtl (file, v->add_val); | |
9721 | fputc ('\n', file); | |
9722 | } | |
9723 | ||
9724 | /* List the givs. */ | |
9725 | for (i = 0, v = bl->giv; v; v = v->next_iv, i++) | |
9726 | { | |
9727 | fprintf (file, " Giv%d: insn %d, benefit %d, ", | |
9728 | i, INSN_UID (v->insn), v->benefit); | |
9729 | if (v->giv_type == DEST_ADDR) | |
9730 | print_simple_rtl (file, v->mem); | |
9731 | else | |
9732 | print_simple_rtl (file, single_set (v->insn)); | |
9733 | fputc ('\n', file); | |
9734 | } | |
9735 | } | |
9736 | ||
9737 | ||
c804f3f8 MH |
9738 | static void |
9739 | loop_biv_dump (v, file, verbose) | |
9740 | const struct induction *v; | |
9741 | FILE *file; | |
9742 | int verbose; | |
9743 | { | |
9744 | if (! v || ! file) | |
9745 | return; | |
9746 | ||
9747 | fprintf (file, | |
9748 | "Biv %d: insn %d", | |
9749 | REGNO (v->dest_reg), INSN_UID (v->insn)); | |
9750 | fprintf (file, " const "); | |
9751 | print_simple_rtl (file, v->add_val); | |
9752 | ||
9753 | if (verbose && v->final_value) | |
9754 | { | |
9755 | fputc ('\n', file); | |
9756 | fprintf (file, " final "); | |
9757 | print_simple_rtl (file, v->final_value); | |
9758 | } | |
9759 | ||
9760 | fputc ('\n', file); | |
9761 | } | |
9762 | ||
9763 | ||
9764 | static void | |
9765 | loop_giv_dump (v, file, verbose) | |
9766 | const struct induction *v; | |
9767 | FILE *file; | |
9768 | int verbose; | |
9769 | { | |
9770 | if (! v || ! file) | |
9771 | return; | |
9772 | ||
9773 | if (v->giv_type == DEST_REG) | |
9774 | fprintf (file, "Giv %d: insn %d", | |
9775 | REGNO (v->dest_reg), INSN_UID (v->insn)); | |
9776 | else | |
9777 | fprintf (file, "Dest address: insn %d", | |
9778 | INSN_UID (v->insn)); | |
9779 | ||
9780 | fprintf (file, " src reg %d benefit %d", | |
9781 | REGNO (v->src_reg), v->benefit); | |
9782 | fprintf (file, " lifetime %d", | |
9783 | v->lifetime); | |
9784 | ||
9785 | if (v->replaceable) | |
9786 | fprintf (file, " replaceable"); | |
9787 | ||
9788 | if (v->no_const_addval) | |
9789 | fprintf (file, " ncav"); | |
9790 | ||
9791 | if (v->ext_dependant) | |
9792 | { | |
9793 | switch (GET_CODE (v->ext_dependant)) | |
9794 | { | |
9795 | case SIGN_EXTEND: | |
9796 | fprintf (file, " ext se"); | |
9797 | break; | |
9798 | case ZERO_EXTEND: | |
9799 | fprintf (file, " ext ze"); | |
9800 | break; | |
9801 | case TRUNCATE: | |
9802 | fprintf (file, " ext tr"); | |
9803 | break; | |
9804 | default: | |
9805 | abort (); | |
9806 | } | |
9807 | } | |
9808 | ||
9809 | fputc ('\n', file); | |
9810 | fprintf (file, " mult "); | |
9811 | print_simple_rtl (file, v->mult_val); | |
9812 | ||
9813 | fputc ('\n', file); | |
9814 | fprintf (file, " add "); | |
9815 | print_simple_rtl (file, v->add_val); | |
9816 | ||
9817 | if (verbose && v->final_value) | |
9818 | { | |
9819 | fputc ('\n', file); | |
9820 | fprintf (file, " final "); | |
9821 | print_simple_rtl (file, v->final_value); | |
9822 | } | |
9823 | ||
9824 | fputc ('\n', file); | |
9825 | } | |
9826 | ||
9827 | ||
099f0f3f MH |
9828 | void |
9829 | debug_ivs (loop) | |
9830 | const struct loop *loop; | |
9831 | { | |
9832 | loop_ivs_dump (loop, stderr, 1); | |
9833 | } | |
9834 | ||
9835 | ||
9836 | void | |
9837 | debug_iv_class (bl) | |
9838 | const struct iv_class *bl; | |
9839 | { | |
9840 | loop_iv_class_dump (bl, stderr, 1); | |
9841 | } | |
9842 | ||
9843 | ||
c804f3f8 MH |
9844 | void |
9845 | debug_biv (v) | |
9846 | const struct induction *v; | |
9847 | { | |
9848 | loop_biv_dump (v, stderr, 1); | |
9849 | } | |
9850 | ||
9851 | ||
9852 | void | |
9853 | debug_giv (v) | |
9854 | const struct induction *v; | |
9855 | { | |
9856 | loop_giv_dump (v, stderr, 1); | |
9857 | } | |
9858 | ||
9859 | ||
6057c0e6 MH |
9860 | #define LOOP_BLOCK_NUM_1(INSN) \ |
9861 | ((INSN) ? (BLOCK_FOR_INSN (INSN) ? BLOCK_NUM (INSN) : - 1) : -1) | |
9862 | ||
9863 | /* The notes do not have an assigned block, so look at the next insn. */ | |
9864 | #define LOOP_BLOCK_NUM(INSN) \ | |
9865 | ((INSN) ? (GET_CODE (INSN) == NOTE \ | |
9866 | ? LOOP_BLOCK_NUM_1 (next_nonnote_insn (INSN)) \ | |
9867 | : LOOP_BLOCK_NUM_1 (INSN)) \ | |
9868 | : -1) | |
9869 | ||
9870 | #define LOOP_INSN_UID(INSN) ((INSN) ? INSN_UID (INSN) : -1) | |
9871 | ||
fd5d5b07 KH |
9872 | static void |
9873 | loop_dump_aux (loop, file, verbose) | |
6057c0e6 MH |
9874 | const struct loop *loop; |
9875 | FILE *file; | |
1dcdb0df | 9876 | int verbose ATTRIBUTE_UNUSED; |
6057c0e6 MH |
9877 | { |
9878 | rtx label; | |
9879 | ||
9880 | if (! loop || ! file) | |
9881 | return; | |
9882 | ||
9883 | /* Print diagnostics to compare our concept of a loop with | |
9884 | what the loop notes say. */ | |
9885 | if (! PREV_INSN (loop->first->head) | |
9886 | || GET_CODE (PREV_INSN (loop->first->head)) != NOTE | |
9887 | || NOTE_LINE_NUMBER (PREV_INSN (loop->first->head)) | |
9888 | != NOTE_INSN_LOOP_BEG) | |
fd5d5b07 | 9889 | fprintf (file, ";; No NOTE_INSN_LOOP_BEG at %d\n", |
6057c0e6 MH |
9890 | INSN_UID (PREV_INSN (loop->first->head))); |
9891 | if (! NEXT_INSN (loop->last->end) | |
9892 | || GET_CODE (NEXT_INSN (loop->last->end)) != NOTE | |
9893 | || NOTE_LINE_NUMBER (NEXT_INSN (loop->last->end)) | |
9894 | != NOTE_INSN_LOOP_END) | |
9895 | fprintf (file, ";; No NOTE_INSN_LOOP_END at %d\n", | |
9896 | INSN_UID (NEXT_INSN (loop->last->end))); | |
9897 | ||
9898 | if (loop->start) | |
9899 | { | |
9900 | fprintf (file, | |
9901 | ";; start %d (%d), cont dom %d (%d), cont %d (%d), vtop %d (%d), end %d (%d)\n", | |
9902 | LOOP_BLOCK_NUM (loop->start), | |
9903 | LOOP_INSN_UID (loop->start), | |
9904 | LOOP_BLOCK_NUM (loop->cont), | |
9905 | LOOP_INSN_UID (loop->cont), | |
9906 | LOOP_BLOCK_NUM (loop->cont), | |
9907 | LOOP_INSN_UID (loop->cont), | |
9908 | LOOP_BLOCK_NUM (loop->vtop), | |
9909 | LOOP_INSN_UID (loop->vtop), | |
9910 | LOOP_BLOCK_NUM (loop->end), | |
9911 | LOOP_INSN_UID (loop->end)); | |
9912 | fprintf (file, ";; top %d (%d), scan start %d (%d)\n", | |
9913 | LOOP_BLOCK_NUM (loop->top), | |
fd5d5b07 | 9914 | LOOP_INSN_UID (loop->top), |
6057c0e6 MH |
9915 | LOOP_BLOCK_NUM (loop->scan_start), |
9916 | LOOP_INSN_UID (loop->scan_start)); | |
9917 | fprintf (file, ";; exit_count %d", loop->exit_count); | |
9918 | if (loop->exit_count) | |
9919 | { | |
9920 | fputs (", labels:", file); | |
9921 | for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label)) | |
9922 | { | |
9923 | fprintf (file, " %d ", | |
9924 | LOOP_INSN_UID (XEXP (label, 0))); | |
9925 | } | |
9926 | } | |
9927 | fputs ("\n", file); | |
fd5d5b07 | 9928 | |
6057c0e6 MH |
9929 | /* This can happen when a marked loop appears as two nested loops, |
9930 | say from while (a || b) {}. The inner loop won't match | |
9931 | the loop markers but the outer one will. */ | |
9932 | if (LOOP_BLOCK_NUM (loop->cont) != loop->latch->index) | |
9933 | fprintf (file, ";; NOTE_INSN_LOOP_CONT not in loop latch\n"); | |
9934 | } | |
9935 | } | |
6057c0e6 MH |
9936 | |
9937 | /* Call this function from the debugger to dump LOOP. */ | |
9938 | ||
9939 | void | |
9940 | debug_loop (loop) | |
9941 | const struct loop *loop; | |
9942 | { | |
9943 | flow_loop_dump (loop, stderr, loop_dump_aux, 1); | |
9944 | } | |
685efa54 MH |
9945 | |
9946 | /* Call this function from the debugger to dump LOOPS. */ | |
9947 | ||
9948 | void | |
9949 | debug_loops (loops) | |
9950 | const struct loops *loops; | |
9951 | { | |
9952 | flow_loops_dump (loops, stderr, loop_dump_aux, 1); | |
9953 | } |