]>
Commit | Line | Data |
---|---|---|
c8465d70 | 1 | /* Perform various loop optimizations, including strength reduction. |
d050d723 | 2 | Copyright (C) 1987, 1988, 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997, |
8beccec8 | 3 | 1998, 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc. |
b4ad7b23 | 4 | |
1322177d | 5 | This file is part of GCC. |
b4ad7b23 | 6 | |
1322177d LB |
7 | GCC is free software; you can redistribute it and/or modify it under |
8 | the terms of the GNU General Public License as published by the Free | |
9 | Software Foundation; either version 2, or (at your option) any later | |
10 | version. | |
b4ad7b23 | 11 | |
1322177d LB |
12 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
13 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
15 | for more details. | |
b4ad7b23 RS |
16 | |
17 | You should have received a copy of the GNU General Public License | |
1322177d LB |
18 | along with GCC; see the file COPYING. If not, write to the Free |
19 | Software Foundation, 59 Temple Place - Suite 330, Boston, MA | |
20 | 02111-1307, USA. */ | |
b4ad7b23 | 21 | |
b4ad7b23 RS |
22 | /* This is the loop optimization pass of the compiler. |
23 | It finds invariant computations within loops and moves them | |
e6fcb60d | 24 | to the beginning of the loop. Then it identifies basic and |
b4ad7b23 RS |
25 | general induction variables. Strength reduction is applied to the general |
26 | induction variables, and induction variable elimination is applied to | |
27 | the basic induction variables. | |
28 | ||
29 | It also finds cases where | |
30 | a register is set within the loop by zero-extending a narrower value | |
31 | and changes these to zero the entire register once before the loop | |
32 | and merely copy the low part within the loop. | |
33 | ||
34 | Most of the complexity is in heuristics to decide when it is worth | |
35 | while to do these things. */ | |
36 | ||
37 | #include "config.h" | |
670ee920 | 38 | #include "system.h" |
4977bab6 ZW |
39 | #include "coretypes.h" |
40 | #include "tm.h" | |
b4ad7b23 | 41 | #include "rtl.h" |
6baf1cc8 | 42 | #include "tm_p.h" |
49ad7cfa | 43 | #include "function.h" |
b4ad7b23 | 44 | #include "expr.h" |
efc9bd41 | 45 | #include "hard-reg-set.h" |
c29f60c0 | 46 | #include "basic-block.h" |
b4ad7b23 | 47 | #include "insn-config.h" |
b4ad7b23 | 48 | #include "regs.h" |
b4ad7b23 RS |
49 | #include "recog.h" |
50 | #include "flags.h" | |
51 | #include "real.h" | |
b4ad7b23 | 52 | #include "loop.h" |
eab5c70a | 53 | #include "cselib.h" |
6adb4e3a | 54 | #include "except.h" |
2e107e9e | 55 | #include "toplev.h" |
aa18f20e | 56 | #include "predict.h" |
0dd0e980 | 57 | #include "insn-flags.h" |
11303d15 | 58 | #include "optabs.h" |
3d436d2a | 59 | #include "cfgloop.h" |
0dd0e980 JH |
60 | |
61 | /* Not really meaningful values, but at least something. */ | |
62 | #ifndef SIMULTANEOUS_PREFETCHES | |
63 | #define SIMULTANEOUS_PREFETCHES 3 | |
64 | #endif | |
65 | #ifndef PREFETCH_BLOCK | |
66 | #define PREFETCH_BLOCK 32 | |
67 | #endif | |
68 | #ifndef HAVE_prefetch | |
69 | #define HAVE_prefetch 0 | |
a415f7bc | 70 | #define CODE_FOR_prefetch 0 |
0dd0e980 JH |
71 | #define gen_prefetch(a,b,c) (abort(), NULL_RTX) |
72 | #endif | |
73 | ||
e0bb17a8 | 74 | /* Give up the prefetch optimizations once we exceed a given threshold. |
0dd0e980 JH |
75 | It is unlikely that we would be able to optimize something in a loop |
76 | with so many detected prefetches. */ | |
77 | #define MAX_PREFETCHES 100 | |
78 | /* The number of prefetch blocks that are beneficial to fetch at once before | |
79 | a loop with a known (and low) iteration count. */ | |
80 | #define PREFETCH_BLOCKS_BEFORE_LOOP_MAX 6 | |
81 | /* For very tiny loops it is not worthwhile to prefetch even before the loop, | |
82 | since it is likely that the data are already in the cache. */ | |
83 | #define PREFETCH_BLOCKS_BEFORE_LOOP_MIN 2 | |
0dd0e980 JH |
84 | |
85 | /* Parameterize some prefetch heuristics so they can be turned on and off | |
3d042e77 | 86 | easily for performance testing on new architectures. These can be |
0dd0e980 JH |
87 | defined in target-dependent files. */ |
88 | ||
89 | /* Prefetch is worthwhile only when loads/stores are dense. */ | |
90 | #ifndef PREFETCH_ONLY_DENSE_MEM | |
91 | #define PREFETCH_ONLY_DENSE_MEM 1 | |
92 | #endif | |
93 | ||
94 | /* Define what we mean by "dense" loads and stores; This value divided by 256 | |
95 | is the minimum percentage of memory references that worth prefetching. */ | |
96 | #ifndef PREFETCH_DENSE_MEM | |
97 | #define PREFETCH_DENSE_MEM 220 | |
98 | #endif | |
99 | ||
100 | /* Do not prefetch for a loop whose iteration count is known to be low. */ | |
101 | #ifndef PREFETCH_NO_LOW_LOOPCNT | |
102 | #define PREFETCH_NO_LOW_LOOPCNT 1 | |
103 | #endif | |
104 | ||
105 | /* Define what we mean by a "low" iteration count. */ | |
106 | #ifndef PREFETCH_LOW_LOOPCNT | |
107 | #define PREFETCH_LOW_LOOPCNT 32 | |
108 | #endif | |
109 | ||
110 | /* Do not prefetch for a loop that contains a function call; such a loop is | |
111 | probably not an internal loop. */ | |
112 | #ifndef PREFETCH_NO_CALL | |
113 | #define PREFETCH_NO_CALL 1 | |
114 | #endif | |
115 | ||
116 | /* Do not prefetch accesses with an extreme stride. */ | |
117 | #ifndef PREFETCH_NO_EXTREME_STRIDE | |
118 | #define PREFETCH_NO_EXTREME_STRIDE 1 | |
119 | #endif | |
120 | ||
121 | /* Define what we mean by an "extreme" stride. */ | |
122 | #ifndef PREFETCH_EXTREME_STRIDE | |
123 | #define PREFETCH_EXTREME_STRIDE 4096 | |
124 | #endif | |
125 | ||
79a497cd JJ |
126 | /* Define a limit to how far apart indices can be and still be merged |
127 | into a single prefetch. */ | |
128 | #ifndef PREFETCH_EXTREME_DIFFERENCE | |
129 | #define PREFETCH_EXTREME_DIFFERENCE 4096 | |
130 | #endif | |
131 | ||
132 | /* Issue prefetch instructions before the loop to fetch data to be used | |
133 | in the first few loop iterations. */ | |
134 | #ifndef PREFETCH_BEFORE_LOOP | |
135 | #define PREFETCH_BEFORE_LOOP 1 | |
136 | #endif | |
137 | ||
0dd0e980 JH |
138 | /* Do not handle reversed order prefetches (negative stride). */ |
139 | #ifndef PREFETCH_NO_REVERSE_ORDER | |
140 | #define PREFETCH_NO_REVERSE_ORDER 1 | |
141 | #endif | |
142 | ||
79a497cd JJ |
143 | /* Prefetch even if the GIV is in conditional code. */ |
144 | #ifndef PREFETCH_CONDITIONAL | |
62e6ca55 | 145 | #define PREFETCH_CONDITIONAL 1 |
0dd0e980 JH |
146 | #endif |
147 | ||
b8056b46 MH |
148 | #define LOOP_REG_LIFETIME(LOOP, REGNO) \ |
149 | ((REGNO_LAST_LUID (REGNO) - REGNO_FIRST_LUID (REGNO))) | |
150 | ||
151 | #define LOOP_REG_GLOBAL_P(LOOP, REGNO) \ | |
152 | ((REGNO_LAST_LUID (REGNO) > INSN_LUID ((LOOP)->end) \ | |
153 | || REGNO_FIRST_LUID (REGNO) < INSN_LUID ((LOOP)->start))) | |
154 | ||
d5e0243a DE |
155 | #define LOOP_REGNO_NREGS(REGNO, SET_DEST) \ |
156 | ((REGNO) < FIRST_PSEUDO_REGISTER \ | |
44a5565d | 157 | ? (int) HARD_REGNO_NREGS ((REGNO), GET_MODE (SET_DEST)) : 1) |
d5e0243a | 158 | |
8529a489 | 159 | |
b4ad7b23 | 160 | /* Vector mapping INSN_UIDs to luids. |
d45cf215 | 161 | The luids are like uids but increase monotonically always. |
b4ad7b23 RS |
162 | We use them to see whether a jump comes from outside a given loop. */ |
163 | ||
164 | int *uid_luid; | |
165 | ||
166 | /* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop | |
167 | number the insn is contained in. */ | |
168 | ||
a2be868f | 169 | struct loop **uid_loop; |
b4ad7b23 RS |
170 | |
171 | /* 1 + largest uid of any insn. */ | |
172 | ||
173 | int max_uid_for_loop; | |
174 | ||
b4ad7b23 RS |
175 | /* Number of loops detected in current function. Used as index to the |
176 | next few tables. */ | |
177 | ||
178 | static int max_loop_num; | |
179 | ||
b4ad7b23 RS |
180 | /* Bound on pseudo register number before loop optimization. |
181 | A pseudo has valid regscan info if its number is < max_reg_before_loop. */ | |
770ae6cc | 182 | unsigned int max_reg_before_loop; |
b4ad7b23 | 183 | |
0a326ec9 BS |
184 | /* The value to pass to the next call of reg_scan_update. */ |
185 | static int loop_max_reg; | |
b4ad7b23 RS |
186 | \f |
187 | /* During the analysis of a loop, a chain of `struct movable's | |
188 | is made to record all the movable insns found. | |
189 | Then the entire chain can be scanned to decide which to move. */ | |
190 | ||
191 | struct movable | |
192 | { | |
193 | rtx insn; /* A movable insn */ | |
0f41302f MS |
194 | rtx set_src; /* The expression this reg is set from. */ |
195 | rtx set_dest; /* The destination of this SET. */ | |
b4ad7b23 | 196 | rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST |
0f41302f | 197 | of any registers used within the LIBCALL. */ |
e6fcb60d | 198 | int consec; /* Number of consecutive following insns |
b4ad7b23 | 199 | that must be moved with this one. */ |
770ae6cc | 200 | unsigned int regno; /* The register it sets */ |
b4ad7b23 RS |
201 | short lifetime; /* lifetime of that register; |
202 | may be adjusted when matching movables | |
203 | that load the same value are found. */ | |
204 | short savings; /* Number of insns we can move for this reg, | |
205 | including other movables that force this | |
206 | or match this one. */ | |
0c20a65f | 207 | ENUM_BITFIELD(machine_mode) savemode : 8; /* Nonzero means it is a mode for |
8f7ee471 DJ |
208 | a low part that we should avoid changing when |
209 | clearing the rest of the reg. */ | |
b4ad7b23 RS |
210 | unsigned int cond : 1; /* 1 if only conditionally movable */ |
211 | unsigned int force : 1; /* 1 means MUST move this insn */ | |
212 | unsigned int global : 1; /* 1 means reg is live outside this loop */ | |
213 | /* If PARTIAL is 1, GLOBAL means something different: | |
214 | that the reg is live outside the range from where it is set | |
215 | to the following label. */ | |
216 | unsigned int done : 1; /* 1 inhibits further processing of this */ | |
e6fcb60d | 217 | |
b4ad7b23 RS |
218 | unsigned int partial : 1; /* 1 means this reg is used for zero-extending. |
219 | In particular, moving it does not make it | |
220 | invariant. */ | |
221 | unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to | |
222 | load SRC, rather than copying INSN. */ | |
1a61c29f JW |
223 | unsigned int move_insn_first:1;/* Same as above, if this is necessary for the |
224 | first insn of a consecutive sets group. */ | |
0f41302f | 225 | unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */ |
8f7ee471 DJ |
226 | unsigned int insert_temp : 1; /* 1 means we copy to a new pseudo and replace |
227 | the original insn with a copy from that | |
3dc575ff | 228 | pseudo, rather than deleting it. */ |
b4ad7b23 RS |
229 | struct movable *match; /* First entry for same value */ |
230 | struct movable *forces; /* An insn that must be moved if this is */ | |
231 | struct movable *next; | |
232 | }; | |
233 | ||
45f97e2e | 234 | |
b4ad7b23 RS |
235 | FILE *loop_dump_stream; |
236 | ||
237 | /* Forward declarations. */ | |
238 | ||
0c20a65f AJ |
239 | static void invalidate_loops_containing_label (rtx); |
240 | static void find_and_verify_loops (rtx, struct loops *); | |
241 | static void mark_loop_jump (rtx, struct loop *); | |
242 | static void prescan_loop (struct loop *); | |
243 | static int reg_in_basic_block_p (rtx, rtx); | |
244 | static int consec_sets_invariant_p (const struct loop *, rtx, int, rtx); | |
245 | static int labels_in_range_p (rtx, int); | |
246 | static void count_one_set (struct loop_regs *, rtx, rtx, rtx *); | |
247 | static void note_addr_stored (rtx, rtx, void *); | |
248 | static void note_set_pseudo_multiple_uses (rtx, rtx, void *); | |
249 | static int loop_reg_used_before_p (const struct loop *, rtx, rtx); | |
250 | static void scan_loop (struct loop*, int); | |
e9a25f70 | 251 | #if 0 |
0c20a65f | 252 | static void replace_call_address (rtx, rtx, rtx); |
e9a25f70 | 253 | #endif |
0c20a65f AJ |
254 | static rtx skip_consec_insns (rtx, int); |
255 | static int libcall_benefit (rtx); | |
256 | static void ignore_some_movables (struct loop_movables *); | |
257 | static void force_movables (struct loop_movables *); | |
258 | static void combine_movables (struct loop_movables *, struct loop_regs *); | |
259 | static int num_unmoved_movables (const struct loop *); | |
260 | static int regs_match_p (rtx, rtx, struct loop_movables *); | |
261 | static int rtx_equal_for_loop_p (rtx, rtx, struct loop_movables *, | |
262 | struct loop_regs *); | |
263 | static void add_label_notes (rtx, rtx); | |
264 | static void move_movables (struct loop *loop, struct loop_movables *, int, | |
265 | int); | |
266 | static void loop_movables_add (struct loop_movables *, struct movable *); | |
267 | static void loop_movables_free (struct loop_movables *); | |
268 | static int count_nonfixed_reads (const struct loop *, rtx); | |
269 | static void loop_bivs_find (struct loop *); | |
270 | static void loop_bivs_init_find (struct loop *); | |
271 | static void loop_bivs_check (struct loop *); | |
272 | static void loop_givs_find (struct loop *); | |
273 | static void loop_givs_check (struct loop *); | |
274 | static int loop_biv_eliminable_p (struct loop *, struct iv_class *, int, int); | |
275 | static int loop_giv_reduce_benefit (struct loop *, struct iv_class *, | |
276 | struct induction *, rtx); | |
277 | static void loop_givs_dead_check (struct loop *, struct iv_class *); | |
278 | static void loop_givs_reduce (struct loop *, struct iv_class *); | |
279 | static void loop_givs_rescan (struct loop *, struct iv_class *, rtx *); | |
280 | static void loop_ivs_free (struct loop *); | |
281 | static void strength_reduce (struct loop *, int); | |
282 | static void find_single_use_in_loop (struct loop_regs *, rtx, rtx); | |
283 | static int valid_initial_value_p (rtx, rtx, int, rtx); | |
284 | static void find_mem_givs (const struct loop *, rtx, rtx, int, int); | |
285 | static void record_biv (struct loop *, struct induction *, rtx, rtx, rtx, | |
286 | rtx, rtx *, int, int); | |
287 | static void check_final_value (const struct loop *, struct induction *); | |
288 | static void loop_ivs_dump (const struct loop *, FILE *, int); | |
289 | static void loop_iv_class_dump (const struct iv_class *, FILE *, int); | |
290 | static void loop_biv_dump (const struct induction *, FILE *, int); | |
291 | static void loop_giv_dump (const struct induction *, FILE *, int); | |
292 | static void record_giv (const struct loop *, struct induction *, rtx, rtx, | |
293 | rtx, rtx, rtx, rtx, int, enum g_types, int, int, | |
294 | rtx *); | |
295 | static void update_giv_derive (const struct loop *, rtx); | |
03988cac | 296 | static void check_ext_dependent_givs (const struct loop *, struct iv_class *); |
0c20a65f AJ |
297 | static int basic_induction_var (const struct loop *, rtx, enum machine_mode, |
298 | rtx, rtx, rtx *, rtx *, rtx **); | |
299 | static rtx simplify_giv_expr (const struct loop *, rtx, rtx *, int *); | |
300 | static int general_induction_var (const struct loop *loop, rtx, rtx *, rtx *, | |
301 | rtx *, rtx *, int, int *, enum machine_mode); | |
302 | static int consec_sets_giv (const struct loop *, int, rtx, rtx, rtx, rtx *, | |
303 | rtx *, rtx *, rtx *); | |
304 | static int check_dbra_loop (struct loop *, int); | |
305 | static rtx express_from_1 (rtx, rtx, rtx); | |
306 | static rtx combine_givs_p (struct induction *, struct induction *); | |
307 | static int cmp_combine_givs_stats (const void *, const void *); | |
308 | static void combine_givs (struct loop_regs *, struct iv_class *); | |
309 | static int product_cheap_p (rtx, rtx); | |
310 | static int maybe_eliminate_biv (const struct loop *, struct iv_class *, int, | |
311 | int, int); | |
312 | static int maybe_eliminate_biv_1 (const struct loop *, rtx, rtx, | |
313 | struct iv_class *, int, basic_block, rtx); | |
314 | static int last_use_this_basic_block (rtx, rtx); | |
315 | static void record_initial (rtx, rtx, void *); | |
316 | static void update_reg_last_use (rtx, rtx); | |
317 | static rtx next_insn_in_loop (const struct loop *, rtx); | |
318 | static void loop_regs_scan (const struct loop *, int); | |
319 | static int count_insns_in_loop (const struct loop *); | |
320 | static int find_mem_in_note_1 (rtx *, void *); | |
321 | static rtx find_mem_in_note (rtx); | |
322 | static void load_mems (const struct loop *); | |
323 | static int insert_loop_mem (rtx *, void *); | |
324 | static int replace_loop_mem (rtx *, void *); | |
325 | static void replace_loop_mems (rtx, rtx, rtx, int); | |
326 | static int replace_loop_reg (rtx *, void *); | |
327 | static void replace_loop_regs (rtx insn, rtx, rtx); | |
328 | static void note_reg_stored (rtx, rtx, void *); | |
329 | static void try_copy_prop (const struct loop *, rtx, unsigned int); | |
330 | static void try_swap_copy_prop (const struct loop *, rtx, unsigned int); | |
331 | static rtx check_insn_for_givs (struct loop *, rtx, int, int); | |
332 | static rtx check_insn_for_bivs (struct loop *, rtx, int, int); | |
333 | static rtx gen_add_mult (rtx, rtx, rtx, rtx); | |
334 | static void loop_regs_update (const struct loop *, rtx); | |
335 | static int iv_add_mult_cost (rtx, rtx, rtx, rtx); | |
336 | ||
337 | static rtx loop_insn_emit_after (const struct loop *, basic_block, rtx, rtx); | |
338 | static rtx loop_call_insn_emit_before (const struct loop *, basic_block, | |
339 | rtx, rtx); | |
340 | static rtx loop_call_insn_hoist (const struct loop *, rtx); | |
341 | static rtx loop_insn_sink_or_swim (const struct loop *, rtx); | |
342 | ||
343 | static void loop_dump_aux (const struct loop *, FILE *, int); | |
344 | static void loop_delete_insns (rtx, rtx); | |
345 | static HOST_WIDE_INT remove_constant_addition (rtx *); | |
346 | static rtx gen_load_of_final_value (rtx, rtx); | |
347 | void debug_ivs (const struct loop *); | |
348 | void debug_iv_class (const struct iv_class *); | |
349 | void debug_biv (const struct induction *); | |
350 | void debug_giv (const struct induction *); | |
351 | void debug_loop (const struct loop *); | |
352 | void debug_loops (const struct loops *); | |
6057c0e6 | 353 | |
afa1738b MH |
354 | typedef struct loop_replace_args |
355 | { | |
356 | rtx match; | |
357 | rtx replacement; | |
358 | rtx insn; | |
359 | } loop_replace_args; | |
360 | ||
41a972a9 | 361 | /* Nonzero iff INSN is between START and END, inclusive. */ |
6b8c9327 AJ |
362 | #define INSN_IN_RANGE_P(INSN, START, END) \ |
363 | (INSN_UID (INSN) < max_uid_for_loop \ | |
41a972a9 MM |
364 | && INSN_LUID (INSN) >= INSN_LUID (START) \ |
365 | && INSN_LUID (INSN) <= INSN_LUID (END)) | |
8c660648 | 366 | |
2a1777af | 367 | /* Indirect_jump_in_function is computed once per function. */ |
5527bf14 | 368 | static int indirect_jump_in_function; |
0c20a65f | 369 | static int indirect_jump_in_function_p (rtx); |
2a1777af | 370 | |
0c20a65f | 371 | static int compute_luids (rtx, rtx, int); |
a6207a2b | 372 | |
0c20a65f AJ |
373 | static int biv_elimination_giv_has_0_offset (struct induction *, |
374 | struct induction *, rtx); | |
b4ad7b23 | 375 | \f |
b4ad7b23 RS |
376 | /* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to |
377 | copy the value of the strength reduced giv to its original register. */ | |
45f97e2e RH |
378 | static int copy_cost; |
379 | ||
380 | /* Cost of using a register, to normalize the benefits of a giv. */ | |
381 | static int reg_address_cost; | |
382 | ||
b4ad7b23 | 383 | void |
0c20a65f | 384 | init_loop (void) |
b4ad7b23 | 385 | { |
38a448ca | 386 | rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1); |
b4ad7b23 | 387 | |
01329426 | 388 | reg_address_cost = address_cost (reg, SImode); |
45f97e2e | 389 | |
f1c1dfc3 | 390 | copy_cost = COSTS_N_INSNS (1); |
b4ad7b23 RS |
391 | } |
392 | \f | |
3ec2b590 R |
393 | /* Compute the mapping from uids to luids. |
394 | LUIDs are numbers assigned to insns, like uids, | |
395 | except that luids increase monotonically through the code. | |
396 | Start at insn START and stop just before END. Assign LUIDs | |
397 | starting with PREV_LUID + 1. Return the last assigned LUID + 1. */ | |
398 | static int | |
0c20a65f | 399 | compute_luids (rtx start, rtx end, int prev_luid) |
3ec2b590 R |
400 | { |
401 | int i; | |
402 | rtx insn; | |
403 | ||
404 | for (insn = start, i = prev_luid; insn != end; insn = NEXT_INSN (insn)) | |
405 | { | |
406 | if (INSN_UID (insn) >= max_uid_for_loop) | |
407 | continue; | |
408 | /* Don't assign luids to line-number NOTEs, so that the distance in | |
409 | luids between two insns is not affected by -g. */ | |
410 | if (GET_CODE (insn) != NOTE | |
411 | || NOTE_LINE_NUMBER (insn) <= 0) | |
412 | uid_luid[INSN_UID (insn)] = ++i; | |
413 | else | |
414 | /* Give a line number note the same luid as preceding insn. */ | |
415 | uid_luid[INSN_UID (insn)] = i; | |
416 | } | |
417 | return i + 1; | |
418 | } | |
419 | \f | |
b4ad7b23 RS |
420 | /* Entry point of this file. Perform loop optimization |
421 | on the current function. F is the first insn of the function | |
422 | and DUMPFILE is a stream for output of a trace of actions taken | |
423 | (or 0 if none should be output). */ | |
424 | ||
425 | void | |
0c20a65f | 426 | loop_optimize (rtx f, FILE *dumpfile, int flags) |
b4ad7b23 | 427 | { |
b3694847 SS |
428 | rtx insn; |
429 | int i; | |
a2be868f MH |
430 | struct loops loops_data; |
431 | struct loops *loops = &loops_data; | |
4cda35d4 | 432 | struct loop_info *loops_info; |
b4ad7b23 RS |
433 | |
434 | loop_dump_stream = dumpfile; | |
435 | ||
436 | init_recog_no_volatile (); | |
b4ad7b23 RS |
437 | |
438 | max_reg_before_loop = max_reg_num (); | |
0a326ec9 | 439 | loop_max_reg = max_reg_before_loop; |
b4ad7b23 | 440 | |
b4ad7b23 RS |
441 | regs_may_share = 0; |
442 | ||
0f41302f | 443 | /* Count the number of loops. */ |
b4ad7b23 RS |
444 | |
445 | max_loop_num = 0; | |
446 | for (insn = f; insn; insn = NEXT_INSN (insn)) | |
447 | { | |
448 | if (GET_CODE (insn) == NOTE | |
449 | && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG) | |
450 | max_loop_num++; | |
451 | } | |
452 | ||
453 | /* Don't waste time if no loops. */ | |
454 | if (max_loop_num == 0) | |
455 | return; | |
456 | ||
a2be868f MH |
457 | loops->num = max_loop_num; |
458 | ||
b4ad7b23 RS |
459 | /* Get size to use for tables indexed by uids. |
460 | Leave some space for labels allocated by find_and_verify_loops. */ | |
1c01e9df | 461 | max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32; |
b4ad7b23 | 462 | |
703ad42b KG |
463 | uid_luid = xcalloc (max_uid_for_loop, sizeof (int)); |
464 | uid_loop = xcalloc (max_uid_for_loop, sizeof (struct loop *)); | |
8c660648 | 465 | |
4cda35d4 | 466 | /* Allocate storage for array of loops. */ |
703ad42b | 467 | loops->array = xcalloc (loops->num, sizeof (struct loop)); |
4cda35d4 | 468 | |
b4ad7b23 RS |
469 | /* Find and process each loop. |
470 | First, find them, and record them in order of their beginnings. */ | |
a2be868f | 471 | find_and_verify_loops (f, loops); |
b4ad7b23 | 472 | |
4cda35d4 MH |
473 | /* Allocate and initialize auxiliary loop information. */ |
474 | loops_info = xcalloc (loops->num, sizeof (struct loop_info)); | |
3d436d2a | 475 | for (i = 0; i < (int) loops->num; i++) |
52b38064 | 476 | loops->array[i].aux = loops_info + i; |
4cda35d4 | 477 | |
b4ad7b23 RS |
478 | /* Now find all register lifetimes. This must be done after |
479 | find_and_verify_loops, because it might reorder the insns in the | |
480 | function. */ | |
0a326ec9 | 481 | reg_scan (f, max_reg_before_loop, 1); |
b4ad7b23 | 482 | |
7506f491 DE |
483 | /* This must occur after reg_scan so that registers created by gcse |
484 | will have entries in the register tables. | |
485 | ||
486 | We could have added a call to reg_scan after gcse_main in toplev.c, | |
487 | but moving this call to init_alias_analysis is more efficient. */ | |
488 | init_alias_analysis (); | |
489 | ||
e318cec0 R |
490 | /* See if we went too far. Note that get_max_uid already returns |
491 | one more that the maximum uid of all insn. */ | |
1c01e9df TW |
492 | if (get_max_uid () > max_uid_for_loop) |
493 | abort (); | |
f5963e61 | 494 | /* Now reset it to the actual size we need. See above. */ |
e318cec0 | 495 | max_uid_for_loop = get_max_uid (); |
1c01e9df | 496 | |
a2be868f MH |
497 | /* find_and_verify_loops has already called compute_luids, but it |
498 | might have rearranged code afterwards, so we need to recompute | |
499 | the luids now. */ | |
4977bab6 | 500 | compute_luids (f, NULL_RTX, 0); |
b4ad7b23 RS |
501 | |
502 | /* Don't leave gaps in uid_luid for insns that have been | |
503 | deleted. It is possible that the first or last insn | |
504 | using some register has been deleted by cross-jumping. | |
505 | Make sure that uid_luid for that former insn's uid | |
506 | points to the general area where that insn used to be. */ | |
507 | for (i = 0; i < max_uid_for_loop; i++) | |
508 | { | |
509 | uid_luid[0] = uid_luid[i]; | |
510 | if (uid_luid[0] != 0) | |
511 | break; | |
512 | } | |
513 | for (i = 0; i < max_uid_for_loop; i++) | |
514 | if (uid_luid[i] == 0) | |
515 | uid_luid[i] = uid_luid[i - 1]; | |
516 | ||
2a1777af JL |
517 | /* Determine if the function has indirect jump. On some systems |
518 | this prevents low overhead loop instructions from being used. */ | |
8c660648 | 519 | indirect_jump_in_function = indirect_jump_in_function_p (f); |
8c660648 | 520 | |
78458962 MH |
521 | /* Now scan the loops, last ones first, since this means inner ones are done |
522 | before outer ones. */ | |
523 | for (i = max_loop_num - 1; i >= 0; i--) | |
524 | { | |
525 | struct loop *loop = &loops->array[i]; | |
526 | ||
a2be868f | 527 | if (! loop->invalid && loop->end) |
1bf14ad7 | 528 | scan_loop (loop, flags); |
a2be868f | 529 | } |
07e857c2 | 530 | |
45f97e2e | 531 | end_alias_analysis (); |
67289ea6 MM |
532 | |
533 | /* Clean up. */ | |
67289ea6 | 534 | free (uid_luid); |
a2be868f | 535 | free (uid_loop); |
4cda35d4 MH |
536 | free (loops_info); |
537 | free (loops->array); | |
b4ad7b23 RS |
538 | } |
539 | \f | |
41a972a9 MM |
540 | /* Returns the next insn, in execution order, after INSN. START and |
541 | END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop, | |
0534b804 | 542 | respectively. LOOP->TOP, if non-NULL, is the top of the loop in the |
41a972a9 MM |
543 | insn-stream; it is used with loops that are entered near the |
544 | bottom. */ | |
545 | ||
546 | static rtx | |
0c20a65f | 547 | next_insn_in_loop (const struct loop *loop, rtx insn) |
41a972a9 MM |
548 | { |
549 | insn = NEXT_INSN (insn); | |
550 | ||
a2be868f | 551 | if (insn == loop->end) |
41a972a9 | 552 | { |
a2be868f | 553 | if (loop->top) |
41a972a9 | 554 | /* Go to the top of the loop, and continue there. */ |
a2be868f | 555 | insn = loop->top; |
41a972a9 MM |
556 | else |
557 | /* We're done. */ | |
558 | insn = NULL_RTX; | |
559 | } | |
560 | ||
a2be868f | 561 | if (insn == loop->scan_start) |
41a972a9 MM |
562 | /* We're done. */ |
563 | insn = NULL_RTX; | |
564 | ||
565 | return insn; | |
566 | } | |
567 | ||
a2be868f | 568 | /* Optimize one loop described by LOOP. */ |
b4ad7b23 RS |
569 | |
570 | /* ??? Could also move memory writes out of loops if the destination address | |
571 | is invariant, the source is invariant, the memory write is not volatile, | |
572 | and if we can prove that no read inside the loop can read this address | |
573 | before the write occurs. If there is a read of this address after the | |
574 | write, then we can also mark the memory read as invariant. */ | |
575 | ||
576 | static void | |
0c20a65f | 577 | scan_loop (struct loop *loop, int flags) |
b4ad7b23 | 578 | { |
1ecd860b MH |
579 | struct loop_info *loop_info = LOOP_INFO (loop); |
580 | struct loop_regs *regs = LOOP_REGS (loop); | |
b3694847 | 581 | int i; |
a2be868f MH |
582 | rtx loop_start = loop->start; |
583 | rtx loop_end = loop->end; | |
41a972a9 | 584 | rtx p; |
b4ad7b23 RS |
585 | /* 1 if we are scanning insns that could be executed zero times. */ |
586 | int maybe_never = 0; | |
587 | /* 1 if we are scanning insns that might never be executed | |
588 | due to a subroutine call which might exit before they are reached. */ | |
589 | int call_passed = 0; | |
b4ad7b23 RS |
590 | /* Number of insns in the loop. */ |
591 | int insn_count; | |
b4ad7b23 | 592 | int tem; |
0a326ec9 | 593 | rtx temp, update_start, update_end; |
b4ad7b23 RS |
594 | /* The SET from an insn, if it is the only SET in the insn. */ |
595 | rtx set, set1; | |
596 | /* Chain describing insns movable in current loop. */ | |
6ec92010 | 597 | struct loop_movables *movables = LOOP_MOVABLES (loop); |
b4ad7b23 RS |
598 | /* Ratio of extra register life span we can justify |
599 | for saving an instruction. More if loop doesn't call subroutines | |
600 | since in that case saving an insn makes more difference | |
601 | and more registers are available. */ | |
602 | int threshold; | |
5ea7a4ae JW |
603 | /* Nonzero if we are scanning instructions in a sub-loop. */ |
604 | int loop_depth = 0; | |
ed6cc1f5 | 605 | int in_libcall; |
a2be868f | 606 | |
afa1738b MH |
607 | loop->top = 0; |
608 | ||
02055ad6 MH |
609 | movables->head = 0; |
610 | movables->last = 0; | |
02055ad6 | 611 | |
b4ad7b23 RS |
612 | /* Determine whether this loop starts with a jump down to a test at |
613 | the end. This will occur for a small number of loops with a test | |
614 | that is too complex to duplicate in front of the loop. | |
615 | ||
616 | We search for the first insn or label in the loop, skipping NOTEs. | |
617 | However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG | |
618 | (because we might have a loop executed only once that contains a | |
619 | loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END | |
620 | (in case we have a degenerate loop). | |
621 | ||
622 | Note that if we mistakenly think that a loop is entered at the top | |
623 | when, in fact, it is entered at the exit test, the only effect will be | |
624 | slightly poorer optimization. Making the opposite error can generate | |
e6fcb60d | 625 | incorrect code. Since very few loops now start with a jump to the |
b4ad7b23 RS |
626 | exit test, the code here to detect that case is very conservative. */ |
627 | ||
628 | for (p = NEXT_INSN (loop_start); | |
a2be868f | 629 | p != loop_end |
2c3c49de | 630 | && GET_CODE (p) != CODE_LABEL && ! INSN_P (p) |
b4ad7b23 RS |
631 | && (GET_CODE (p) != NOTE |
632 | || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG | |
633 | && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END)); | |
634 | p = NEXT_INSN (p)) | |
635 | ; | |
636 | ||
a2be868f | 637 | loop->scan_start = p; |
b4ad7b23 | 638 | |
96a45535 MH |
639 | /* If loop end is the end of the current function, then emit a |
640 | NOTE_INSN_DELETED after loop_end and set loop->sink to the dummy | |
641 | note insn. This is the position we use when sinking insns out of | |
642 | the loop. */ | |
643 | if (NEXT_INSN (loop->end) != 0) | |
644 | loop->sink = NEXT_INSN (loop->end); | |
645 | else | |
646 | loop->sink = emit_note_after (NOTE_INSN_DELETED, loop->end); | |
647 | ||
b4ad7b23 | 648 | /* Set up variables describing this loop. */ |
a2be868f | 649 | prescan_loop (loop); |
3c748bb6 | 650 | threshold = (loop_info->has_call ? 1 : 2) * (1 + n_non_fixed_regs); |
b4ad7b23 RS |
651 | |
652 | /* If loop has a jump before the first label, | |
653 | the true entry is the target of that jump. | |
654 | Start scan from there. | |
a2be868f | 655 | But record in LOOP->TOP the place where the end-test jumps |
b4ad7b23 | 656 | back to so we can scan that after the end of the loop. */ |
4977bab6 | 657 | if (GET_CODE (p) == JUMP_INSN |
b4ad7b23 | 658 | /* Loop entry must be unconditional jump (and not a RETURN) */ |
4977bab6 ZW |
659 | && any_uncondjump_p (p) |
660 | && JUMP_LABEL (p) != 0 | |
661 | /* Check to see whether the jump actually | |
662 | jumps out of the loop (meaning it's no loop). | |
663 | This case can happen for things like | |
664 | do {..} while (0). If this label was generated previously | |
665 | by loop, we can't tell anything about it and have to reject | |
666 | the loop. */ | |
667 | && INSN_IN_RANGE_P (JUMP_LABEL (p), loop_start, loop_end)) | |
668 | { | |
669 | loop->top = next_label (loop->scan_start); | |
670 | loop->scan_start = JUMP_LABEL (p); | |
b4ad7b23 RS |
671 | } |
672 | ||
a2be868f | 673 | /* If LOOP->SCAN_START was an insn created by loop, we don't know its luid |
b4ad7b23 | 674 | as required by loop_reg_used_before_p. So skip such loops. (This |
e6fcb60d | 675 | test may never be true, but it's best to play it safe.) |
b4ad7b23 RS |
676 | |
677 | Also, skip loops where we do not start scanning at a label. This | |
678 | test also rejects loops starting with a JUMP_INSN that failed the | |
679 | test above. */ | |
680 | ||
a2be868f MH |
681 | if (INSN_UID (loop->scan_start) >= max_uid_for_loop |
682 | || GET_CODE (loop->scan_start) != CODE_LABEL) | |
b4ad7b23 RS |
683 | { |
684 | if (loop_dump_stream) | |
685 | fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n", | |
a2be868f | 686 | INSN_UID (loop_start), INSN_UID (loop_end)); |
b4ad7b23 RS |
687 | return; |
688 | } | |
689 | ||
1d7ae250 MH |
690 | /* Allocate extra space for REGs that might be created by load_mems. |
691 | We allocate a little extra slop as well, in the hopes that we | |
692 | won't have to reallocate the regs array. */ | |
28680540 MM |
693 | loop_regs_scan (loop, loop_info->mems_idx + 16); |
694 | insn_count = count_insns_in_loop (loop); | |
b4ad7b23 RS |
695 | |
696 | if (loop_dump_stream) | |
697 | { | |
698 | fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n", | |
a2be868f MH |
699 | INSN_UID (loop_start), INSN_UID (loop_end), insn_count); |
700 | if (loop->cont) | |
b4ad7b23 | 701 | fprintf (loop_dump_stream, "Continue at insn %d.\n", |
a2be868f | 702 | INSN_UID (loop->cont)); |
b4ad7b23 RS |
703 | } |
704 | ||
705 | /* Scan through the loop finding insns that are safe to move. | |
f1d4ac80 | 706 | Set REGS->ARRAY[I].SET_IN_LOOP negative for the reg I being set, so that |
b4ad7b23 RS |
707 | this reg will be considered invariant for subsequent insns. |
708 | We consider whether subsequent insns use the reg | |
709 | in deciding whether it is worth actually moving. | |
710 | ||
711 | MAYBE_NEVER is nonzero if we have passed a conditional jump insn | |
712 | and therefore it is possible that the insns we are scanning | |
713 | would never be executed. At such times, we must make sure | |
714 | that it is safe to execute the insn once instead of zero times. | |
715 | When MAYBE_NEVER is 0, all insns will be executed at least once | |
716 | so that is not a problem. */ | |
717 | ||
ed6cc1f5 | 718 | for (in_libcall = 0, p = next_insn_in_loop (loop, loop->scan_start); |
41a972a9 | 719 | p != NULL_RTX; |
a2be868f | 720 | p = next_insn_in_loop (loop, p)) |
b4ad7b23 | 721 | { |
ed6cc1f5 R |
722 | if (in_libcall && INSN_P (p) && find_reg_note (p, REG_RETVAL, NULL_RTX)) |
723 | in_libcall--; | |
724 | if (GET_CODE (p) == INSN) | |
b4ad7b23 | 725 | { |
ed6cc1f5 | 726 | temp = find_reg_note (p, REG_LIBCALL, NULL_RTX); |
b4ad7b23 | 727 | if (temp) |
ed6cc1f5 R |
728 | in_libcall++; |
729 | if (! in_libcall | |
730 | && (set = single_set (p)) | |
731 | && GET_CODE (SET_DEST (set)) == REG | |
732 | #ifdef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED | |
733 | && SET_DEST (set) != pic_offset_table_rtx | |
734 | #endif | |
735 | && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize) | |
b4ad7b23 | 736 | { |
ed6cc1f5 R |
737 | int tem1 = 0; |
738 | int tem2 = 0; | |
739 | int move_insn = 0; | |
8f7ee471 | 740 | int insert_temp = 0; |
ed6cc1f5 R |
741 | rtx src = SET_SRC (set); |
742 | rtx dependencies = 0; | |
743 | ||
744 | /* Figure out what to use as a source of this insn. If a | |
745 | REG_EQUIV note is given or if a REG_EQUAL note with a | |
746 | constant operand is specified, use it as the source and | |
747 | mark that we should move this insn by calling | |
748 | emit_move_insn rather that duplicating the insn. | |
749 | ||
750 | Otherwise, only use the REG_EQUAL contents if a REG_RETVAL | |
751 | note is present. */ | |
752 | temp = find_reg_note (p, REG_EQUIV, NULL_RTX); | |
753 | if (temp) | |
b4ad7b23 | 754 | src = XEXP (temp, 0), move_insn = 1; |
ed6cc1f5 | 755 | else |
b4ad7b23 | 756 | { |
ed6cc1f5 R |
757 | temp = find_reg_note (p, REG_EQUAL, NULL_RTX); |
758 | if (temp && CONSTANT_P (XEXP (temp, 0))) | |
759 | src = XEXP (temp, 0), move_insn = 1; | |
760 | if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX)) | |
761 | { | |
762 | src = XEXP (temp, 0); | |
763 | /* A libcall block can use regs that don't appear in | |
764 | the equivalent expression. To move the libcall, | |
765 | we must move those regs too. */ | |
766 | dependencies = libcall_other_reg (p, src); | |
767 | } | |
b4ad7b23 RS |
768 | } |
769 | ||
3d042e77 | 770 | /* For parallels, add any possible uses to the dependencies, as |
ed6cc1f5 R |
771 | we can't move the insn without resolving them first. */ |
772 | if (GET_CODE (PATTERN (p)) == PARALLEL) | |
b4ad7b23 | 773 | { |
ed6cc1f5 | 774 | for (i = 0; i < XVECLEN (PATTERN (p), 0); i++) |
b4ad7b23 | 775 | { |
ed6cc1f5 R |
776 | rtx x = XVECEXP (PATTERN (p), 0, i); |
777 | if (GET_CODE (x) == USE) | |
778 | dependencies | |
779 | = gen_rtx_EXPR_LIST (VOIDmode, XEXP (x, 0), | |
780 | dependencies); | |
b4ad7b23 | 781 | } |
b4ad7b23 | 782 | } |
ed6cc1f5 | 783 | |
8f7ee471 DJ |
784 | if (/* The register is used in basic blocks other |
785 | than the one where it is set (meaning that | |
786 | something after this point in the loop might | |
787 | depend on its value before the set). */ | |
788 | ! reg_in_basic_block_p (p, SET_DEST (set)) | |
789 | /* And the set is not guaranteed to be executed once | |
790 | the loop starts, or the value before the set is | |
791 | needed before the set occurs... | |
792 | ||
793 | ??? Note we have quadratic behavior here, mitigated | |
794 | by the fact that the previous test will often fail for | |
795 | large loops. Rather than re-scanning the entire loop | |
796 | each time for register usage, we should build tables | |
797 | of the register usage and use them here instead. */ | |
798 | && (maybe_never | |
799 | || loop_reg_used_before_p (loop, set, p))) | |
800 | /* It is unsafe to move the set. However, it may be OK to | |
0c20a65f | 801 | move the source into a new pseudo, and substitute a |
8f7ee471 DJ |
802 | reg-to-reg copy for the original insn. |
803 | ||
804 | This code used to consider it OK to move a set of a variable | |
805 | which was not created by the user and not used in an exit | |
806 | test. | |
807 | That behavior is incorrect and was removed. */ | |
808 | insert_temp = 1; | |
809 | ||
9feff114 JDA |
810 | /* Don't try to optimize a MODE_CC set with a constant |
811 | source. It probably will be combined with a conditional | |
812 | jump. */ | |
813 | if (GET_MODE_CLASS (GET_MODE (SET_DEST (set))) == MODE_CC | |
814 | && CONSTANT_P (src)) | |
815 | ; | |
ed6cc1f5 R |
816 | /* Don't try to optimize a register that was made |
817 | by loop-optimization for an inner loop. | |
818 | We don't know its life-span, so we can't compute | |
819 | the benefit. */ | |
9feff114 | 820 | else if (REGNO (SET_DEST (set)) >= max_reg_before_loop) |
ed6cc1f5 | 821 | ; |
773eae39 EB |
822 | /* Don't move the source and add a reg-to-reg copy: |
823 | - with -Os (this certainly increases size), | |
824 | - if the mode doesn't support copy operations (obviously), | |
825 | - if the source is already a reg (the motion will gain nothing), | |
826 | - if the source is a legitimate constant (likewise). */ | |
0c20a65f | 827 | else if (insert_temp |
773eae39 EB |
828 | && (optimize_size |
829 | || ! can_copy_p (GET_MODE (SET_SRC (set))) | |
830 | || GET_CODE (SET_SRC (set)) == REG | |
8f7ee471 DJ |
831 | || (CONSTANT_P (SET_SRC (set)) |
832 | && LEGITIMATE_CONSTANT_P (SET_SRC (set))))) | |
ed6cc1f5 R |
833 | ; |
834 | else if ((tem = loop_invariant_p (loop, src)) | |
835 | && (dependencies == 0 | |
836 | || (tem2 | |
837 | = loop_invariant_p (loop, dependencies)) != 0) | |
838 | && (regs->array[REGNO (SET_DEST (set))].set_in_loop == 1 | |
839 | || (tem1 | |
840 | = consec_sets_invariant_p | |
841 | (loop, SET_DEST (set), | |
842 | regs->array[REGNO (SET_DEST (set))].set_in_loop, | |
843 | p))) | |
844 | /* If the insn can cause a trap (such as divide by zero), | |
845 | can't move it unless it's guaranteed to be executed | |
846 | once loop is entered. Even a function call might | |
847 | prevent the trap insn from being reached | |
848 | (since it might exit!) */ | |
849 | && ! ((maybe_never || call_passed) | |
850 | && may_trap_p (src))) | |
b4ad7b23 | 851 | { |
b3694847 | 852 | struct movable *m; |
ed6cc1f5 R |
853 | int regno = REGNO (SET_DEST (set)); |
854 | ||
855 | /* A potential lossage is where we have a case where two insns | |
856 | can be combined as long as they are both in the loop, but | |
857 | we move one of them outside the loop. For large loops, | |
858 | this can lose. The most common case of this is the address | |
859 | of a function being called. | |
860 | ||
861 | Therefore, if this register is marked as being used | |
862 | exactly once if we are in a loop with calls | |
863 | (a "large loop"), see if we can replace the usage of | |
864 | this register with the source of this SET. If we can, | |
865 | delete this insn. | |
866 | ||
867 | Don't do this if P has a REG_RETVAL note or if we have | |
868 | SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */ | |
869 | ||
870 | if (loop_info->has_call | |
871 | && regs->array[regno].single_usage != 0 | |
872 | && regs->array[regno].single_usage != const0_rtx | |
873 | && REGNO_FIRST_UID (regno) == INSN_UID (p) | |
874 | && (REGNO_LAST_UID (regno) | |
875 | == INSN_UID (regs->array[regno].single_usage)) | |
876 | && regs->array[regno].set_in_loop == 1 | |
877 | && GET_CODE (SET_SRC (set)) != ASM_OPERANDS | |
878 | && ! side_effects_p (SET_SRC (set)) | |
879 | && ! find_reg_note (p, REG_RETVAL, NULL_RTX) | |
880 | && (! SMALL_REGISTER_CLASSES | |
881 | || (! (GET_CODE (SET_SRC (set)) == REG | |
882 | && (REGNO (SET_SRC (set)) | |
883 | < FIRST_PSEUDO_REGISTER)))) | |
884 | /* This test is not redundant; SET_SRC (set) might be | |
885 | a call-clobbered register and the life of REGNO | |
886 | might span a call. */ | |
887 | && ! modified_between_p (SET_SRC (set), p, | |
888 | regs->array[regno].single_usage) | |
889 | && no_labels_between_p (p, | |
890 | regs->array[regno].single_usage) | |
891 | && validate_replace_rtx (SET_DEST (set), SET_SRC (set), | |
892 | regs->array[regno].single_usage)) | |
893 | { | |
894 | /* Replace any usage in a REG_EQUAL note. Must copy | |
895 | the new source, so that we don't get rtx sharing | |
896 | between the SET_SOURCE and REG_NOTES of insn p. */ | |
897 | REG_NOTES (regs->array[regno].single_usage) | |
898 | = (replace_rtx | |
899 | (REG_NOTES (regs->array[regno].single_usage), | |
900 | SET_DEST (set), copy_rtx (SET_SRC (set)))); | |
901 | ||
902 | delete_insn (p); | |
903 | for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set)); | |
904 | i++) | |
905 | regs->array[regno+i].set_in_loop = 0; | |
906 | continue; | |
907 | } | |
908 | ||
703ad42b | 909 | m = xmalloc (sizeof (struct movable)); |
b4ad7b23 RS |
910 | m->next = 0; |
911 | m->insn = p; | |
ed6cc1f5 R |
912 | m->set_src = src; |
913 | m->dependencies = dependencies; | |
b4ad7b23 | 914 | m->set_dest = SET_DEST (set); |
b4ad7b23 | 915 | m->force = 0; |
ed6cc1f5 R |
916 | m->consec |
917 | = regs->array[REGNO (SET_DEST (set))].set_in_loop - 1; | |
b4ad7b23 RS |
918 | m->done = 0; |
919 | m->forces = 0; | |
ed6cc1f5 R |
920 | m->partial = 0; |
921 | m->move_insn = move_insn; | |
8cf619da | 922 | m->move_insn_first = 0; |
8f7ee471 | 923 | m->insert_temp = insert_temp; |
ed6cc1f5 R |
924 | m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0); |
925 | m->savemode = VOIDmode; | |
b4ad7b23 | 926 | m->regno = regno; |
ed6cc1f5 R |
927 | /* Set M->cond if either loop_invariant_p |
928 | or consec_sets_invariant_p returned 2 | |
929 | (only conditionally invariant). */ | |
930 | m->cond = ((tem | tem1 | tem2) > 1); | |
931 | m->global = LOOP_REG_GLOBAL_P (loop, regno); | |
b4ad7b23 | 932 | m->match = 0; |
b8056b46 | 933 | m->lifetime = LOOP_REG_LIFETIME (loop, regno); |
ed6cc1f5 R |
934 | m->savings = regs->array[regno].n_times_set; |
935 | if (find_reg_note (p, REG_RETVAL, NULL_RTX)) | |
936 | m->savings += libcall_benefit (p); | |
44a5565d | 937 | for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++) |
ed6cc1f5 | 938 | regs->array[regno+i].set_in_loop = move_insn ? -2 : -1; |
b4ad7b23 | 939 | /* Add M to the end of the chain MOVABLES. */ |
6ec92010 | 940 | loop_movables_add (movables, m); |
ed6cc1f5 R |
941 | |
942 | if (m->consec > 0) | |
943 | { | |
944 | /* It is possible for the first instruction to have a | |
945 | REG_EQUAL note but a non-invariant SET_SRC, so we must | |
946 | remember the status of the first instruction in case | |
947 | the last instruction doesn't have a REG_EQUAL note. */ | |
948 | m->move_insn_first = m->move_insn; | |
949 | ||
950 | /* Skip this insn, not checking REG_LIBCALL notes. */ | |
951 | p = next_nonnote_insn (p); | |
952 | /* Skip the consecutive insns, if there are any. */ | |
953 | p = skip_consec_insns (p, m->consec); | |
954 | /* Back up to the last insn of the consecutive group. */ | |
955 | p = prev_nonnote_insn (p); | |
956 | ||
957 | /* We must now reset m->move_insn, m->is_equiv, and | |
958 | possibly m->set_src to correspond to the effects of | |
959 | all the insns. */ | |
960 | temp = find_reg_note (p, REG_EQUIV, NULL_RTX); | |
961 | if (temp) | |
962 | m->set_src = XEXP (temp, 0), m->move_insn = 1; | |
963 | else | |
964 | { | |
965 | temp = find_reg_note (p, REG_EQUAL, NULL_RTX); | |
966 | if (temp && CONSTANT_P (XEXP (temp, 0))) | |
967 | m->set_src = XEXP (temp, 0), m->move_insn = 1; | |
968 | else | |
969 | m->move_insn = 0; | |
970 | ||
971 | } | |
972 | m->is_equiv | |
973 | = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0); | |
974 | } | |
975 | } | |
976 | /* If this register is always set within a STRICT_LOW_PART | |
977 | or set to zero, then its high bytes are constant. | |
978 | So clear them outside the loop and within the loop | |
979 | just load the low bytes. | |
980 | We must check that the machine has an instruction to do so. | |
981 | Also, if the value loaded into the register | |
982 | depends on the same register, this cannot be done. */ | |
983 | else if (SET_SRC (set) == const0_rtx | |
984 | && GET_CODE (NEXT_INSN (p)) == INSN | |
985 | && (set1 = single_set (NEXT_INSN (p))) | |
986 | && GET_CODE (set1) == SET | |
987 | && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART) | |
988 | && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG) | |
989 | && (SUBREG_REG (XEXP (SET_DEST (set1), 0)) | |
990 | == SET_DEST (set)) | |
991 | && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1))) | |
992 | { | |
993 | int regno = REGNO (SET_DEST (set)); | |
994 | if (regs->array[regno].set_in_loop == 2) | |
995 | { | |
996 | struct movable *m; | |
703ad42b | 997 | m = xmalloc (sizeof (struct movable)); |
ed6cc1f5 R |
998 | m->next = 0; |
999 | m->insn = p; | |
1000 | m->set_dest = SET_DEST (set); | |
1001 | m->dependencies = 0; | |
1002 | m->force = 0; | |
1003 | m->consec = 0; | |
1004 | m->done = 0; | |
1005 | m->forces = 0; | |
1006 | m->move_insn = 0; | |
1007 | m->move_insn_first = 0; | |
8f7ee471 | 1008 | m->insert_temp = insert_temp; |
ed6cc1f5 R |
1009 | m->partial = 1; |
1010 | /* If the insn may not be executed on some cycles, | |
1011 | we can't clear the whole reg; clear just high part. | |
1012 | Not even if the reg is used only within this loop. | |
1013 | Consider this: | |
1014 | while (1) | |
1015 | while (s != t) { | |
1016 | if (foo ()) x = *s; | |
1017 | use (x); | |
1018 | } | |
1019 | Clearing x before the inner loop could clobber a value | |
1020 | being saved from the last time around the outer loop. | |
1021 | However, if the reg is not used outside this loop | |
1022 | and all uses of the register are in the same | |
1023 | basic block as the store, there is no problem. | |
1024 | ||
1025 | If this insn was made by loop, we don't know its | |
1026 | INSN_LUID and hence must make a conservative | |
1027 | assumption. */ | |
1028 | m->global = (INSN_UID (p) >= max_uid_for_loop | |
1029 | || LOOP_REG_GLOBAL_P (loop, regno) | |
1030 | || (labels_in_range_p | |
1031 | (p, REGNO_FIRST_LUID (regno)))); | |
1032 | if (maybe_never && m->global) | |
1033 | m->savemode = GET_MODE (SET_SRC (set1)); | |
1034 | else | |
1035 | m->savemode = VOIDmode; | |
1036 | m->regno = regno; | |
1037 | m->cond = 0; | |
1038 | m->match = 0; | |
1039 | m->lifetime = LOOP_REG_LIFETIME (loop, regno); | |
1040 | m->savings = 1; | |
1041 | for (i = 0; | |
44a5565d | 1042 | i < LOOP_REGNO_NREGS (regno, SET_DEST (set)); |
ed6cc1f5 R |
1043 | i++) |
1044 | regs->array[regno+i].set_in_loop = -1; | |
1045 | /* Add M to the end of the chain MOVABLES. */ | |
1046 | loop_movables_add (movables, m); | |
1047 | } | |
b4ad7b23 RS |
1048 | } |
1049 | } | |
1050 | } | |
1051 | /* Past a call insn, we get to insns which might not be executed | |
1052 | because the call might exit. This matters for insns that trap. | |
e76d9acb | 1053 | Constant and pure call insns always return, so they don't count. */ |
24a28584 | 1054 | else if (GET_CODE (p) == CALL_INSN && ! CONST_OR_PURE_CALL_P (p)) |
b4ad7b23 RS |
1055 | call_passed = 1; |
1056 | /* Past a label or a jump, we get to insns for which we | |
1057 | can't count on whether or how many times they will be | |
1058 | executed during each iteration. Therefore, we can | |
1059 | only move out sets of trivial variables | |
1060 | (those not used after the loop). */ | |
8516af93 | 1061 | /* Similar code appears twice in strength_reduce. */ |
b4ad7b23 RS |
1062 | else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN) |
1063 | /* If we enter the loop in the middle, and scan around to the | |
1064 | beginning, don't set maybe_never for that. This must be an | |
1065 | unconditional jump, otherwise the code at the top of the | |
1066 | loop might never be executed. Unconditional jumps are | |
fb530c07 | 1067 | followed by a barrier then the loop_end. */ |
e11e816e | 1068 | && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop->top |
a2be868f | 1069 | && NEXT_INSN (NEXT_INSN (p)) == loop_end |
7f1c097d | 1070 | && any_uncondjump_p (p))) |
b4ad7b23 | 1071 | maybe_never = 1; |
5ea7a4ae JW |
1072 | else if (GET_CODE (p) == NOTE) |
1073 | { | |
1074 | /* At the virtual top of a converted loop, insns are again known to | |
1075 | be executed: logically, the loop begins here even though the exit | |
1076 | code has been duplicated. */ | |
1077 | if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0) | |
1078 | maybe_never = call_passed = 0; | |
1079 | else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG) | |
1080 | loop_depth++; | |
1081 | else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END) | |
1082 | loop_depth--; | |
1083 | } | |
b4ad7b23 RS |
1084 | } |
1085 | ||
1086 | /* If one movable subsumes another, ignore that other. */ | |
1087 | ||
1088 | ignore_some_movables (movables); | |
1089 | ||
1090 | /* For each movable insn, see if the reg that it loads | |
1091 | leads when it dies right into another conditionally movable insn. | |
1092 | If so, record that the second insn "forces" the first one, | |
1093 | since the second can be moved only if the first is. */ | |
1094 | ||
1095 | force_movables (movables); | |
1096 | ||
1097 | /* See if there are multiple movable insns that load the same value. | |
1098 | If there are, make all but the first point at the first one | |
1099 | through the `match' field, and add the priorities of them | |
1100 | all together as the priority of the first. */ | |
1101 | ||
1ecd860b | 1102 | combine_movables (movables, regs); |
e6fcb60d | 1103 | |
b4ad7b23 | 1104 | /* Now consider each movable insn to decide whether it is worth moving. |
f1d4ac80 | 1105 | Store 0 in regs->array[I].set_in_loop for each reg I that is moved. |
b4ad7b23 | 1106 | |
fe352c29 | 1107 | For machines with few registers this increases code size, so do not |
0c20a65f | 1108 | move moveables when optimizing for code size on such machines. |
fe352c29 | 1109 | (The 18 below is the value for i386.) */ |
9dd07f87 | 1110 | |
0c20a65f | 1111 | if (!optimize_size |
fe352c29 | 1112 | || (reg_class_size[GENERAL_REGS] > 18 && !loop_info->has_call)) |
44c5edc0 JJ |
1113 | { |
1114 | move_movables (loop, movables, threshold, insn_count); | |
1115 | ||
1116 | /* Recalculate regs->array if move_movables has created new | |
1117 | registers. */ | |
1118 | if (max_reg_num () > regs->num) | |
1119 | { | |
1120 | loop_regs_scan (loop, 0); | |
1121 | for (update_start = loop_start; | |
1122 | PREV_INSN (update_start) | |
1123 | && GET_CODE (PREV_INSN (update_start)) != CODE_LABEL; | |
1124 | update_start = PREV_INSN (update_start)) | |
1125 | ; | |
1126 | update_end = NEXT_INSN (loop_end); | |
1127 | ||
1128 | reg_scan_update (update_start, update_end, loop_max_reg); | |
1129 | loop_max_reg = max_reg_num (); | |
1130 | } | |
1131 | } | |
b4ad7b23 RS |
1132 | |
1133 | /* Now candidates that still are negative are those not moved. | |
f1d4ac80 | 1134 | Change regs->array[I].set_in_loop to indicate that those are not actually |
1ecd860b | 1135 | invariant. */ |
f1d4ac80 MH |
1136 | for (i = 0; i < regs->num; i++) |
1137 | if (regs->array[i].set_in_loop < 0) | |
1138 | regs->array[i].set_in_loop = regs->array[i].n_times_set; | |
b4ad7b23 | 1139 | |
3ec2b590 | 1140 | /* Now that we've moved some things out of the loop, we might be able to |
d6b44532 | 1141 | hoist even more memory references. */ |
1d7ae250 MH |
1142 | load_mems (loop); |
1143 | ||
1144 | /* Recalculate regs->array if load_mems has created new registers. */ | |
1145 | if (max_reg_num () > regs->num) | |
28680540 | 1146 | loop_regs_scan (loop, 0); |
4b259e3f | 1147 | |
0a326ec9 | 1148 | for (update_start = loop_start; |
0534b804 MH |
1149 | PREV_INSN (update_start) |
1150 | && GET_CODE (PREV_INSN (update_start)) != CODE_LABEL; | |
0a326ec9 BS |
1151 | update_start = PREV_INSN (update_start)) |
1152 | ; | |
a2be868f | 1153 | update_end = NEXT_INSN (loop_end); |
0a326ec9 BS |
1154 | |
1155 | reg_scan_update (update_start, update_end, loop_max_reg); | |
1156 | loop_max_reg = max_reg_num (); | |
1157 | ||
b4ad7b23 | 1158 | if (flag_strength_reduce) |
45f97e2e | 1159 | { |
bb45bd5a JJ |
1160 | if (update_end && GET_CODE (update_end) == CODE_LABEL) |
1161 | /* Ensure our label doesn't go away. */ | |
1162 | LABEL_NUSES (update_end)++; | |
1163 | ||
28680540 | 1164 | strength_reduce (loop, flags); |
0a326ec9 BS |
1165 | |
1166 | reg_scan_update (update_start, update_end, loop_max_reg); | |
1167 | loop_max_reg = max_reg_num (); | |
bb45bd5a JJ |
1168 | |
1169 | if (update_end && GET_CODE (update_end) == CODE_LABEL | |
1170 | && --LABEL_NUSES (update_end) == 0) | |
53c17031 | 1171 | delete_related_insns (update_end); |
45f97e2e | 1172 | } |
8deb8e2c | 1173 | |
6ec92010 MH |
1174 | |
1175 | /* The movable information is required for strength reduction. */ | |
1176 | loop_movables_free (movables); | |
1177 | ||
f1d4ac80 MH |
1178 | free (regs->array); |
1179 | regs->array = 0; | |
1180 | regs->num = 0; | |
b4ad7b23 RS |
1181 | } |
1182 | \f | |
1183 | /* Add elements to *OUTPUT to record all the pseudo-regs | |
1184 | mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */ | |
1185 | ||
1186 | void | |
0c20a65f | 1187 | record_excess_regs (rtx in_this, rtx not_in_this, rtx *output) |
b4ad7b23 RS |
1188 | { |
1189 | enum rtx_code code; | |
6f7d635c | 1190 | const char *fmt; |
b4ad7b23 RS |
1191 | int i; |
1192 | ||
1193 | code = GET_CODE (in_this); | |
1194 | ||
1195 | switch (code) | |
1196 | { | |
1197 | case PC: | |
1198 | case CC0: | |
1199 | case CONST_INT: | |
1200 | case CONST_DOUBLE: | |
1201 | case CONST: | |
1202 | case SYMBOL_REF: | |
1203 | case LABEL_REF: | |
1204 | return; | |
1205 | ||
1206 | case REG: | |
1207 | if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER | |
1208 | && ! reg_mentioned_p (in_this, not_in_this)) | |
38a448ca | 1209 | *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output); |
b4ad7b23 | 1210 | return; |
e6fcb60d | 1211 | |
e9a25f70 JL |
1212 | default: |
1213 | break; | |
b4ad7b23 RS |
1214 | } |
1215 | ||
1216 | fmt = GET_RTX_FORMAT (code); | |
1217 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) | |
1218 | { | |
1219 | int j; | |
1220 | ||
1221 | switch (fmt[i]) | |
1222 | { | |
1223 | case 'E': | |
1224 | for (j = 0; j < XVECLEN (in_this, i); j++) | |
1225 | record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output); | |
1226 | break; | |
1227 | ||
1228 | case 'e': | |
1229 | record_excess_regs (XEXP (in_this, i), not_in_this, output); | |
1230 | break; | |
1231 | } | |
1232 | } | |
1233 | } | |
1234 | \f | |
1235 | /* Check what regs are referred to in the libcall block ending with INSN, | |
1236 | aside from those mentioned in the equivalent value. | |
1237 | If there are none, return 0. | |
1238 | If there are one or more, return an EXPR_LIST containing all of them. */ | |
1239 | ||
89d3d442 | 1240 | rtx |
0c20a65f | 1241 | libcall_other_reg (rtx insn, rtx equiv) |
b4ad7b23 | 1242 | { |
5fd8383e | 1243 | rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX); |
b4ad7b23 RS |
1244 | rtx p = XEXP (note, 0); |
1245 | rtx output = 0; | |
1246 | ||
1247 | /* First, find all the regs used in the libcall block | |
1248 | that are not mentioned as inputs to the result. */ | |
1249 | ||
1250 | while (p != insn) | |
1251 | { | |
1252 | if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN | |
1253 | || GET_CODE (p) == CALL_INSN) | |
1254 | record_excess_regs (PATTERN (p), equiv, &output); | |
1255 | p = NEXT_INSN (p); | |
1256 | } | |
1257 | ||
1258 | return output; | |
1259 | } | |
1260 | \f | |
1261 | /* Return 1 if all uses of REG | |
1262 | are between INSN and the end of the basic block. */ | |
1263 | ||
e6fcb60d | 1264 | static int |
0c20a65f | 1265 | reg_in_basic_block_p (rtx insn, rtx reg) |
b4ad7b23 RS |
1266 | { |
1267 | int regno = REGNO (reg); | |
1268 | rtx p; | |
1269 | ||
b1f21e0a | 1270 | if (REGNO_FIRST_UID (regno) != INSN_UID (insn)) |
b4ad7b23 RS |
1271 | return 0; |
1272 | ||
1273 | /* Search this basic block for the already recorded last use of the reg. */ | |
1274 | for (p = insn; p; p = NEXT_INSN (p)) | |
1275 | { | |
1276 | switch (GET_CODE (p)) | |
1277 | { | |
1278 | case NOTE: | |
1279 | break; | |
1280 | ||
1281 | case INSN: | |
1282 | case CALL_INSN: | |
1283 | /* Ordinary insn: if this is the last use, we win. */ | |
b1f21e0a | 1284 | if (REGNO_LAST_UID (regno) == INSN_UID (p)) |
b4ad7b23 RS |
1285 | return 1; |
1286 | break; | |
1287 | ||
1288 | case JUMP_INSN: | |
1289 | /* Jump insn: if this is the last use, we win. */ | |
b1f21e0a | 1290 | if (REGNO_LAST_UID (regno) == INSN_UID (p)) |
b4ad7b23 RS |
1291 | return 1; |
1292 | /* Otherwise, it's the end of the basic block, so we lose. */ | |
1293 | return 0; | |
1294 | ||
1295 | case CODE_LABEL: | |
1296 | case BARRIER: | |
1297 | /* It's the end of the basic block, so we lose. */ | |
1298 | return 0; | |
e6fcb60d | 1299 | |
e9a25f70 JL |
1300 | default: |
1301 | break; | |
b4ad7b23 RS |
1302 | } |
1303 | } | |
1304 | ||
035a6890 R |
1305 | /* The "last use" that was recorded can't be found after the first |
1306 | use. This can happen when the last use was deleted while | |
1307 | processing an inner loop, this inner loop was then completely | |
1308 | unrolled, and the outer loop is always exited after the inner loop, | |
1309 | so that everything after the first use becomes a single basic block. */ | |
1310 | return 1; | |
b4ad7b23 RS |
1311 | } |
1312 | \f | |
1313 | /* Compute the benefit of eliminating the insns in the block whose | |
1314 | last insn is LAST. This may be a group of insns used to compute a | |
1315 | value directly or can contain a library call. */ | |
1316 | ||
1317 | static int | |
0c20a65f | 1318 | libcall_benefit (rtx last) |
b4ad7b23 RS |
1319 | { |
1320 | rtx insn; | |
1321 | int benefit = 0; | |
1322 | ||
5fd8383e | 1323 | for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0); |
b4ad7b23 RS |
1324 | insn != last; insn = NEXT_INSN (insn)) |
1325 | { | |
1326 | if (GET_CODE (insn) == CALL_INSN) | |
1327 | benefit += 10; /* Assume at least this many insns in a library | |
0f41302f | 1328 | routine. */ |
b4ad7b23 RS |
1329 | else if (GET_CODE (insn) == INSN |
1330 | && GET_CODE (PATTERN (insn)) != USE | |
1331 | && GET_CODE (PATTERN (insn)) != CLOBBER) | |
1332 | benefit++; | |
1333 | } | |
1334 | ||
1335 | return benefit; | |
1336 | } | |
1337 | \f | |
1338 | /* Skip COUNT insns from INSN, counting library calls as 1 insn. */ | |
1339 | ||
1340 | static rtx | |
0c20a65f | 1341 | skip_consec_insns (rtx insn, int count) |
b4ad7b23 RS |
1342 | { |
1343 | for (; count > 0; count--) | |
1344 | { | |
1345 | rtx temp; | |
1346 | ||
1347 | /* If first insn of libcall sequence, skip to end. */ | |
e6fcb60d | 1348 | /* Do this at start of loop, since INSN is guaranteed to |
b4ad7b23 RS |
1349 | be an insn here. */ |
1350 | if (GET_CODE (insn) != NOTE | |
5fd8383e | 1351 | && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX))) |
b4ad7b23 RS |
1352 | insn = XEXP (temp, 0); |
1353 | ||
e6fcb60d KH |
1354 | do |
1355 | insn = NEXT_INSN (insn); | |
b4ad7b23 RS |
1356 | while (GET_CODE (insn) == NOTE); |
1357 | } | |
1358 | ||
1359 | return insn; | |
1360 | } | |
1361 | ||
1362 | /* Ignore any movable whose insn falls within a libcall | |
1363 | which is part of another movable. | |
1364 | We make use of the fact that the movable for the libcall value | |
1365 | was made later and so appears later on the chain. */ | |
1366 | ||
1367 | static void | |
0c20a65f | 1368 | ignore_some_movables (struct loop_movables *movables) |
b4ad7b23 | 1369 | { |
b3694847 | 1370 | struct movable *m, *m1; |
b4ad7b23 | 1371 | |
02055ad6 | 1372 | for (m = movables->head; m; m = m->next) |
b4ad7b23 RS |
1373 | { |
1374 | /* Is this a movable for the value of a libcall? */ | |
5fd8383e | 1375 | rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX); |
b4ad7b23 RS |
1376 | if (note) |
1377 | { | |
1378 | rtx insn; | |
1379 | /* Check for earlier movables inside that range, | |
1380 | and mark them invalid. We cannot use LUIDs here because | |
1381 | insns created by loop.c for prior loops don't have LUIDs. | |
1382 | Rather than reject all such insns from movables, we just | |
1383 | explicitly check each insn in the libcall (since invariant | |
1384 | libcalls aren't that common). */ | |
1385 | for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn)) | |
02055ad6 | 1386 | for (m1 = movables->head; m1 != m; m1 = m1->next) |
b4ad7b23 RS |
1387 | if (m1->insn == insn) |
1388 | m1->done = 1; | |
1389 | } | |
1390 | } | |
e6fcb60d | 1391 | } |
b4ad7b23 RS |
1392 | |
1393 | /* For each movable insn, see if the reg that it loads | |
1394 | leads when it dies right into another conditionally movable insn. | |
1395 | If so, record that the second insn "forces" the first one, | |
1396 | since the second can be moved only if the first is. */ | |
1397 | ||
1398 | static void | |
0c20a65f | 1399 | force_movables (struct loop_movables *movables) |
b4ad7b23 | 1400 | { |
b3694847 SS |
1401 | struct movable *m, *m1; |
1402 | ||
02055ad6 | 1403 | for (m1 = movables->head; m1; m1 = m1->next) |
b4ad7b23 RS |
1404 | /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */ |
1405 | if (!m1->partial && !m1->done) | |
1406 | { | |
1407 | int regno = m1->regno; | |
1408 | for (m = m1->next; m; m = m->next) | |
1409 | /* ??? Could this be a bug? What if CSE caused the | |
1410 | register of M1 to be used after this insn? | |
1411 | Since CSE does not update regno_last_uid, | |
1412 | this insn M->insn might not be where it dies. | |
1413 | But very likely this doesn't matter; what matters is | |
1414 | that M's reg is computed from M1's reg. */ | |
b1f21e0a | 1415 | if (INSN_UID (m->insn) == REGNO_LAST_UID (regno) |
b4ad7b23 RS |
1416 | && !m->done) |
1417 | break; | |
1418 | if (m != 0 && m->set_src == m1->set_dest | |
1419 | /* If m->consec, m->set_src isn't valid. */ | |
1420 | && m->consec == 0) | |
1421 | m = 0; | |
1422 | ||
1423 | /* Increase the priority of the moving the first insn | |
1424 | since it permits the second to be moved as well. */ | |
1425 | if (m != 0) | |
1426 | { | |
1427 | m->forces = m1; | |
1428 | m1->lifetime += m->lifetime; | |
3875b31d | 1429 | m1->savings += m->savings; |
b4ad7b23 RS |
1430 | } |
1431 | } | |
1432 | } | |
1433 | \f | |
1434 | /* Find invariant expressions that are equal and can be combined into | |
1435 | one register. */ | |
1436 | ||
1437 | static void | |
0c20a65f | 1438 | combine_movables (struct loop_movables *movables, struct loop_regs *regs) |
b4ad7b23 | 1439 | { |
b3694847 | 1440 | struct movable *m; |
703ad42b | 1441 | char *matched_regs = xmalloc (regs->num); |
b4ad7b23 RS |
1442 | enum machine_mode mode; |
1443 | ||
1444 | /* Regs that are set more than once are not allowed to match | |
1445 | or be matched. I'm no longer sure why not. */ | |
02a566dc DJ |
1446 | /* Only pseudo registers are allowed to match or be matched, |
1447 | since move_movables does not validate the change. */ | |
b4ad7b23 RS |
1448 | /* Perhaps testing m->consec_sets would be more appropriate here? */ |
1449 | ||
02055ad6 | 1450 | for (m = movables->head; m; m = m->next) |
f1d4ac80 | 1451 | if (m->match == 0 && regs->array[m->regno].n_times_set == 1 |
02a566dc | 1452 | && m->regno >= FIRST_PSEUDO_REGISTER |
8f7ee471 | 1453 | && !m->insert_temp |
e6fcb60d | 1454 | && !m->partial) |
b4ad7b23 | 1455 | { |
b3694847 | 1456 | struct movable *m1; |
b4ad7b23 | 1457 | int regno = m->regno; |
b4ad7b23 | 1458 | |
961192e1 | 1459 | memset (matched_regs, 0, regs->num); |
b4ad7b23 RS |
1460 | matched_regs[regno] = 1; |
1461 | ||
88016fb7 DE |
1462 | /* We want later insns to match the first one. Don't make the first |
1463 | one match any later ones. So start this loop at m->next. */ | |
1464 | for (m1 = m->next; m1; m1 = m1->next) | |
02a566dc | 1465 | if (m != m1 && m1->match == 0 |
8f7ee471 | 1466 | && !m1->insert_temp |
f1d4ac80 | 1467 | && regs->array[m1->regno].n_times_set == 1 |
02a566dc | 1468 | && m1->regno >= FIRST_PSEUDO_REGISTER |
b4ad7b23 RS |
1469 | /* A reg used outside the loop mustn't be eliminated. */ |
1470 | && !m1->global | |
1471 | /* A reg used for zero-extending mustn't be eliminated. */ | |
1472 | && !m1->partial | |
1473 | && (matched_regs[m1->regno] | |
1474 | || | |
1475 | ( | |
1476 | /* Can combine regs with different modes loaded from the | |
1477 | same constant only if the modes are the same or | |
1478 | if both are integer modes with M wider or the same | |
1479 | width as M1. The check for integer is redundant, but | |
1480 | safe, since the only case of differing destination | |
1481 | modes with equal sources is when both sources are | |
1482 | VOIDmode, i.e., CONST_INT. */ | |
1483 | (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest) | |
1484 | || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT | |
1485 | && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT | |
1486 | && (GET_MODE_BITSIZE (GET_MODE (m->set_dest)) | |
1487 | >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest))))) | |
1488 | /* See if the source of M1 says it matches M. */ | |
1489 | && ((GET_CODE (m1->set_src) == REG | |
1490 | && matched_regs[REGNO (m1->set_src)]) | |
1491 | || rtx_equal_for_loop_p (m->set_src, m1->set_src, | |
1ecd860b | 1492 | movables, regs)))) |
b4ad7b23 RS |
1493 | && ((m->dependencies == m1->dependencies) |
1494 | || rtx_equal_p (m->dependencies, m1->dependencies))) | |
1495 | { | |
1496 | m->lifetime += m1->lifetime; | |
1497 | m->savings += m1->savings; | |
1498 | m1->done = 1; | |
1499 | m1->match = m; | |
1500 | matched_regs[m1->regno] = 1; | |
1501 | } | |
1502 | } | |
1503 | ||
1504 | /* Now combine the regs used for zero-extension. | |
1505 | This can be done for those not marked `global' | |
1506 | provided their lives don't overlap. */ | |
1507 | ||
1508 | for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode; | |
1509 | mode = GET_MODE_WIDER_MODE (mode)) | |
1510 | { | |
b3694847 | 1511 | struct movable *m0 = 0; |
b4ad7b23 RS |
1512 | |
1513 | /* Combine all the registers for extension from mode MODE. | |
1514 | Don't combine any that are used outside this loop. */ | |
02055ad6 | 1515 | for (m = movables->head; m; m = m->next) |
b4ad7b23 RS |
1516 | if (m->partial && ! m->global |
1517 | && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn))))) | |
1518 | { | |
b3694847 SS |
1519 | struct movable *m1; |
1520 | ||
8529a489 MH |
1521 | int first = REGNO_FIRST_LUID (m->regno); |
1522 | int last = REGNO_LAST_LUID (m->regno); | |
b4ad7b23 RS |
1523 | |
1524 | if (m0 == 0) | |
1525 | { | |
1526 | /* First one: don't check for overlap, just record it. */ | |
1527 | m0 = m; | |
e6fcb60d | 1528 | continue; |
b4ad7b23 RS |
1529 | } |
1530 | ||
1531 | /* Make sure they extend to the same mode. | |
1532 | (Almost always true.) */ | |
1533 | if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest)) | |
e6fcb60d | 1534 | continue; |
b4ad7b23 RS |
1535 | |
1536 | /* We already have one: check for overlap with those | |
1537 | already combined together. */ | |
02055ad6 | 1538 | for (m1 = movables->head; m1 != m; m1 = m1->next) |
b4ad7b23 | 1539 | if (m1 == m0 || (m1->partial && m1->match == m0)) |
8529a489 MH |
1540 | if (! (REGNO_FIRST_LUID (m1->regno) > last |
1541 | || REGNO_LAST_LUID (m1->regno) < first)) | |
b4ad7b23 RS |
1542 | goto overlap; |
1543 | ||
1544 | /* No overlap: we can combine this with the others. */ | |
1545 | m0->lifetime += m->lifetime; | |
1546 | m0->savings += m->savings; | |
1547 | m->done = 1; | |
1548 | m->match = m0; | |
1549 | ||
e6fcb60d KH |
1550 | overlap: |
1551 | ; | |
b4ad7b23 RS |
1552 | } |
1553 | } | |
4da896b2 MM |
1554 | |
1555 | /* Clean up. */ | |
1556 | free (matched_regs); | |
b4ad7b23 | 1557 | } |
28680540 MM |
1558 | |
1559 | /* Returns the number of movable instructions in LOOP that were not | |
1560 | moved outside the loop. */ | |
1561 | ||
1562 | static int | |
0c20a65f | 1563 | num_unmoved_movables (const struct loop *loop) |
28680540 MM |
1564 | { |
1565 | int num = 0; | |
1566 | struct movable *m; | |
1567 | ||
1568 | for (m = LOOP_MOVABLES (loop)->head; m; m = m->next) | |
1569 | if (!m->done) | |
1570 | ++num; | |
1571 | ||
1572 | return num; | |
1573 | } | |
1574 | ||
b4ad7b23 RS |
1575 | \f |
1576 | /* Return 1 if regs X and Y will become the same if moved. */ | |
1577 | ||
1578 | static int | |
0c20a65f | 1579 | regs_match_p (rtx x, rtx y, struct loop_movables *movables) |
b4ad7b23 | 1580 | { |
770ae6cc RK |
1581 | unsigned int xn = REGNO (x); |
1582 | unsigned int yn = REGNO (y); | |
b4ad7b23 RS |
1583 | struct movable *mx, *my; |
1584 | ||
02055ad6 | 1585 | for (mx = movables->head; mx; mx = mx->next) |
b4ad7b23 RS |
1586 | if (mx->regno == xn) |
1587 | break; | |
1588 | ||
02055ad6 | 1589 | for (my = movables->head; my; my = my->next) |
b4ad7b23 RS |
1590 | if (my->regno == yn) |
1591 | break; | |
1592 | ||
1593 | return (mx && my | |
1594 | && ((mx->match == my->match && mx->match != 0) | |
1595 | || mx->match == my | |
1596 | || mx == my->match)); | |
1597 | } | |
1598 | ||
1599 | /* Return 1 if X and Y are identical-looking rtx's. | |
1600 | This is the Lisp function EQUAL for rtx arguments. | |
1601 | ||
1602 | If two registers are matching movables or a movable register and an | |
1603 | equivalent constant, consider them equal. */ | |
1604 | ||
1605 | static int | |
0c20a65f AJ |
1606 | rtx_equal_for_loop_p (rtx x, rtx y, struct loop_movables *movables, |
1607 | struct loop_regs *regs) | |
b4ad7b23 | 1608 | { |
b3694847 SS |
1609 | int i; |
1610 | int j; | |
1611 | struct movable *m; | |
1612 | enum rtx_code code; | |
1613 | const char *fmt; | |
b4ad7b23 RS |
1614 | |
1615 | if (x == y) | |
1616 | return 1; | |
1617 | if (x == 0 || y == 0) | |
1618 | return 0; | |
1619 | ||
1620 | code = GET_CODE (x); | |
1621 | ||
1622 | /* If we have a register and a constant, they may sometimes be | |
1623 | equal. */ | |
f1d4ac80 | 1624 | if (GET_CODE (x) == REG && regs->array[REGNO (x)].set_in_loop == -2 |
b4ad7b23 | 1625 | && CONSTANT_P (y)) |
b1a0c816 | 1626 | { |
02055ad6 | 1627 | for (m = movables->head; m; m = m->next) |
b1a0c816 JL |
1628 | if (m->move_insn && m->regno == REGNO (x) |
1629 | && rtx_equal_p (m->set_src, y)) | |
1630 | return 1; | |
1631 | } | |
f1d4ac80 | 1632 | else if (GET_CODE (y) == REG && regs->array[REGNO (y)].set_in_loop == -2 |
b4ad7b23 | 1633 | && CONSTANT_P (x)) |
b1a0c816 | 1634 | { |
02055ad6 | 1635 | for (m = movables->head; m; m = m->next) |
b1a0c816 JL |
1636 | if (m->move_insn && m->regno == REGNO (y) |
1637 | && rtx_equal_p (m->set_src, x)) | |
1638 | return 1; | |
1639 | } | |
b4ad7b23 RS |
1640 | |
1641 | /* Otherwise, rtx's of different codes cannot be equal. */ | |
1642 | if (code != GET_CODE (y)) | |
1643 | return 0; | |
1644 | ||
1645 | /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. | |
1646 | (REG:SI x) and (REG:HI x) are NOT equivalent. */ | |
1647 | ||
1648 | if (GET_MODE (x) != GET_MODE (y)) | |
1649 | return 0; | |
1650 | ||
1651 | /* These three types of rtx's can be compared nonrecursively. */ | |
1652 | if (code == REG) | |
1653 | return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables)); | |
1654 | ||
1655 | if (code == LABEL_REF) | |
1656 | return XEXP (x, 0) == XEXP (y, 0); | |
1657 | if (code == SYMBOL_REF) | |
1658 | return XSTR (x, 0) == XSTR (y, 0); | |
1659 | ||
1660 | /* Compare the elements. If any pair of corresponding elements | |
1661 | fail to match, return 0 for the whole things. */ | |
1662 | ||
1663 | fmt = GET_RTX_FORMAT (code); | |
1664 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) | |
1665 | { | |
1666 | switch (fmt[i]) | |
1667 | { | |
5fd8383e RK |
1668 | case 'w': |
1669 | if (XWINT (x, i) != XWINT (y, i)) | |
1670 | return 0; | |
1671 | break; | |
1672 | ||
b4ad7b23 RS |
1673 | case 'i': |
1674 | if (XINT (x, i) != XINT (y, i)) | |
1675 | return 0; | |
1676 | break; | |
1677 | ||
1678 | case 'E': | |
1679 | /* Two vectors must have the same length. */ | |
1680 | if (XVECLEN (x, i) != XVECLEN (y, i)) | |
1681 | return 0; | |
1682 | ||
1683 | /* And the corresponding elements must match. */ | |
1684 | for (j = 0; j < XVECLEN (x, i); j++) | |
1ecd860b MH |
1685 | if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j), |
1686 | movables, regs) == 0) | |
b4ad7b23 RS |
1687 | return 0; |
1688 | break; | |
1689 | ||
1690 | case 'e': | |
ed5bb68d MH |
1691 | if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables, regs) |
1692 | == 0) | |
b4ad7b23 RS |
1693 | return 0; |
1694 | break; | |
1695 | ||
1696 | case 's': | |
1697 | if (strcmp (XSTR (x, i), XSTR (y, i))) | |
1698 | return 0; | |
1699 | break; | |
1700 | ||
1701 | case 'u': | |
1702 | /* These are just backpointers, so they don't matter. */ | |
1703 | break; | |
1704 | ||
1705 | case '0': | |
1706 | break; | |
1707 | ||
1708 | /* It is believed that rtx's at this level will never | |
1709 | contain anything but integers and other rtx's, | |
1710 | except for within LABEL_REFs and SYMBOL_REFs. */ | |
1711 | default: | |
1712 | abort (); | |
1713 | } | |
1714 | } | |
1715 | return 1; | |
1716 | } | |
1717 | \f | |
c160c628 | 1718 | /* If X contains any LABEL_REF's, add REG_LABEL notes for them to all |
5b1ef594 | 1719 | insns in INSNS which use the reference. LABEL_NUSES for CODE_LABEL |
dc297297 | 1720 | references is incremented once for each added note. */ |
c160c628 RK |
1721 | |
1722 | static void | |
0c20a65f | 1723 | add_label_notes (rtx x, rtx insns) |
c160c628 RK |
1724 | { |
1725 | enum rtx_code code = GET_CODE (x); | |
7dcd3836 | 1726 | int i, j; |
6f7d635c | 1727 | const char *fmt; |
c160c628 RK |
1728 | rtx insn; |
1729 | ||
82d00367 | 1730 | if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x)) |
c160c628 | 1731 | { |
6b3603c2 | 1732 | /* This code used to ignore labels that referred to dispatch tables to |
e0bb17a8 | 1733 | avoid flow generating (slightly) worse code. |
6b3603c2 JL |
1734 | |
1735 | We no longer ignore such label references (see LABEL_REF handling in | |
1736 | mark_jump_label for additional information). */ | |
1737 | for (insn = insns; insn; insn = NEXT_INSN (insn)) | |
1738 | if (reg_mentioned_p (XEXP (x, 0), insn)) | |
5b1ef594 | 1739 | { |
6b8c9327 | 1740 | REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL, XEXP (x, 0), |
5b1ef594 JDA |
1741 | REG_NOTES (insn)); |
1742 | if (LABEL_P (XEXP (x, 0))) | |
1743 | LABEL_NUSES (XEXP (x, 0))++; | |
1744 | } | |
c160c628 RK |
1745 | } |
1746 | ||
1747 | fmt = GET_RTX_FORMAT (code); | |
1748 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) | |
7dcd3836 RK |
1749 | { |
1750 | if (fmt[i] == 'e') | |
1751 | add_label_notes (XEXP (x, i), insns); | |
1752 | else if (fmt[i] == 'E') | |
1753 | for (j = XVECLEN (x, i) - 1; j >= 0; j--) | |
1754 | add_label_notes (XVECEXP (x, i, j), insns); | |
1755 | } | |
c160c628 RK |
1756 | } |
1757 | \f | |
b4ad7b23 RS |
1758 | /* Scan MOVABLES, and move the insns that deserve to be moved. |
1759 | If two matching movables are combined, replace one reg with the | |
1760 | other throughout. */ | |
1761 | ||
1762 | static void | |
0c20a65f AJ |
1763 | move_movables (struct loop *loop, struct loop_movables *movables, |
1764 | int threshold, int insn_count) | |
b4ad7b23 | 1765 | { |
1ecd860b | 1766 | struct loop_regs *regs = LOOP_REGS (loop); |
ed5bb68d | 1767 | int nregs = regs->num; |
b4ad7b23 | 1768 | rtx new_start = 0; |
b3694847 SS |
1769 | struct movable *m; |
1770 | rtx p; | |
0534b804 MH |
1771 | rtx loop_start = loop->start; |
1772 | rtx loop_end = loop->end; | |
b4ad7b23 RS |
1773 | /* Map of pseudo-register replacements to handle combining |
1774 | when we move several insns that load the same value | |
1775 | into different pseudo-registers. */ | |
703ad42b KG |
1776 | rtx *reg_map = xcalloc (nregs, sizeof (rtx)); |
1777 | char *already_moved = xcalloc (nregs, sizeof (char)); | |
b4ad7b23 | 1778 | |
02055ad6 | 1779 | for (m = movables->head; m; m = m->next) |
b4ad7b23 RS |
1780 | { |
1781 | /* Describe this movable insn. */ | |
1782 | ||
1783 | if (loop_dump_stream) | |
1784 | { | |
1785 | fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ", | |
1786 | INSN_UID (m->insn), m->regno, m->lifetime); | |
1787 | if (m->consec > 0) | |
1788 | fprintf (loop_dump_stream, "consec %d, ", m->consec); | |
1789 | if (m->cond) | |
1790 | fprintf (loop_dump_stream, "cond "); | |
1791 | if (m->force) | |
1792 | fprintf (loop_dump_stream, "force "); | |
1793 | if (m->global) | |
1794 | fprintf (loop_dump_stream, "global "); | |
1795 | if (m->done) | |
1796 | fprintf (loop_dump_stream, "done "); | |
1797 | if (m->move_insn) | |
1798 | fprintf (loop_dump_stream, "move-insn "); | |
1799 | if (m->match) | |
1800 | fprintf (loop_dump_stream, "matches %d ", | |
1801 | INSN_UID (m->match->insn)); | |
1802 | if (m->forces) | |
1803 | fprintf (loop_dump_stream, "forces %d ", | |
1804 | INSN_UID (m->forces->insn)); | |
1805 | } | |
1806 | ||
b4ad7b23 RS |
1807 | /* Ignore the insn if it's already done (it matched something else). |
1808 | Otherwise, see if it is now safe to move. */ | |
1809 | ||
1810 | if (!m->done | |
1811 | && (! m->cond | |
0534b804 | 1812 | || (1 == loop_invariant_p (loop, m->set_src) |
b4ad7b23 | 1813 | && (m->dependencies == 0 |
0534b804 | 1814 | || 1 == loop_invariant_p (loop, m->dependencies)) |
b4ad7b23 | 1815 | && (m->consec == 0 |
0534b804 | 1816 | || 1 == consec_sets_invariant_p (loop, m->set_dest, |
b4ad7b23 RS |
1817 | m->consec + 1, |
1818 | m->insn)))) | |
1819 | && (! m->forces || m->forces->done)) | |
1820 | { | |
b3694847 SS |
1821 | int regno; |
1822 | rtx p; | |
b4ad7b23 RS |
1823 | int savings = m->savings; |
1824 | ||
1825 | /* We have an insn that is safe to move. | |
1826 | Compute its desirability. */ | |
1827 | ||
1828 | p = m->insn; | |
1829 | regno = m->regno; | |
1830 | ||
1831 | if (loop_dump_stream) | |
1832 | fprintf (loop_dump_stream, "savings %d ", savings); | |
1833 | ||
f1d4ac80 | 1834 | if (regs->array[regno].moved_once && loop_dump_stream) |
877ca132 | 1835 | fprintf (loop_dump_stream, "halved since already moved "); |
b4ad7b23 RS |
1836 | |
1837 | /* An insn MUST be moved if we already moved something else | |
1838 | which is safe only if this one is moved too: that is, | |
1839 | if already_moved[REGNO] is nonzero. */ | |
1840 | ||
1841 | /* An insn is desirable to move if the new lifetime of the | |
1842 | register is no more than THRESHOLD times the old lifetime. | |
1843 | If it's not desirable, it means the loop is so big | |
1844 | that moving won't speed things up much, | |
1845 | and it is liable to make register usage worse. */ | |
1846 | ||
1847 | /* It is also desirable to move if it can be moved at no | |
1848 | extra cost because something else was already moved. */ | |
1849 | ||
1850 | if (already_moved[regno] | |
e5eb27e5 | 1851 | || flag_move_all_movables |
877ca132 | 1852 | || (threshold * savings * m->lifetime) >= |
f1d4ac80 | 1853 | (regs->array[regno].moved_once ? insn_count * 2 : insn_count) |
b4ad7b23 | 1854 | || (m->forces && m->forces->done |
f1d4ac80 | 1855 | && regs->array[m->forces->regno].n_times_set == 1)) |
b4ad7b23 RS |
1856 | { |
1857 | int count; | |
b3694847 | 1858 | struct movable *m1; |
6a651371 | 1859 | rtx first = NULL_RTX; |
8f7ee471 DJ |
1860 | rtx newreg = NULL_RTX; |
1861 | ||
1862 | if (m->insert_temp) | |
1863 | newreg = gen_reg_rtx (GET_MODE (m->set_dest)); | |
b4ad7b23 RS |
1864 | |
1865 | /* Now move the insns that set the reg. */ | |
1866 | ||
1867 | if (m->partial && m->match) | |
1868 | { | |
1869 | rtx newpat, i1; | |
1870 | rtx r1, r2; | |
1871 | /* Find the end of this chain of matching regs. | |
1872 | Thus, we load each reg in the chain from that one reg. | |
1873 | And that reg is loaded with 0 directly, | |
1874 | since it has ->match == 0. */ | |
1875 | for (m1 = m; m1->match; m1 = m1->match); | |
1876 | newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)), | |
1877 | SET_DEST (PATTERN (m1->insn))); | |
804a718a | 1878 | i1 = loop_insn_hoist (loop, newpat); |
b4ad7b23 RS |
1879 | |
1880 | /* Mark the moved, invariant reg as being allowed to | |
1881 | share a hard reg with the other matching invariant. */ | |
1882 | REG_NOTES (i1) = REG_NOTES (m->insn); | |
1883 | r1 = SET_DEST (PATTERN (m->insn)); | |
1884 | r2 = SET_DEST (PATTERN (m1->insn)); | |
38a448ca RH |
1885 | regs_may_share |
1886 | = gen_rtx_EXPR_LIST (VOIDmode, r1, | |
1887 | gen_rtx_EXPR_LIST (VOIDmode, r2, | |
1888 | regs_may_share)); | |
49ce134f | 1889 | delete_insn (m->insn); |
b4ad7b23 RS |
1890 | |
1891 | if (new_start == 0) | |
1892 | new_start = i1; | |
1893 | ||
1894 | if (loop_dump_stream) | |
1895 | fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1)); | |
1896 | } | |
1897 | /* If we are to re-generate the item being moved with a | |
1898 | new move insn, first delete what we have and then emit | |
1899 | the move insn before the loop. */ | |
1900 | else if (m->move_insn) | |
1901 | { | |
804a718a | 1902 | rtx i1, temp, seq; |
b4ad7b23 RS |
1903 | |
1904 | for (count = m->consec; count >= 0; count--) | |
1905 | { | |
1906 | /* If this is the first insn of a library call sequence, | |
ed6cc1f5 | 1907 | something is very wrong. */ |
b4ad7b23 | 1908 | if (GET_CODE (p) != NOTE |
5fd8383e | 1909 | && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX))) |
ed6cc1f5 | 1910 | abort (); |
b4ad7b23 RS |
1911 | |
1912 | /* If this is the last insn of a libcall sequence, then | |
1913 | delete every insn in the sequence except the last. | |
1914 | The last insn is handled in the normal manner. */ | |
1915 | if (GET_CODE (p) != NOTE | |
5fd8383e | 1916 | && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX))) |
b4ad7b23 RS |
1917 | { |
1918 | temp = XEXP (temp, 0); | |
1919 | while (temp != p) | |
49ce134f | 1920 | temp = delete_insn (temp); |
b4ad7b23 RS |
1921 | } |
1922 | ||
9655bf95 | 1923 | temp = p; |
49ce134f | 1924 | p = delete_insn (p); |
9655bf95 DM |
1925 | |
1926 | /* simplify_giv_expr expects that it can walk the insns | |
1927 | at m->insn forwards and see this old sequence we are | |
1928 | tossing here. delete_insn does preserve the next | |
1929 | pointers, but when we skip over a NOTE we must fix | |
1930 | it up. Otherwise that code walks into the non-deleted | |
1931 | insn stream. */ | |
dd202606 | 1932 | while (p && GET_CODE (p) == NOTE) |
9655bf95 | 1933 | p = NEXT_INSN (temp) = NEXT_INSN (p); |
8f7ee471 DJ |
1934 | |
1935 | if (m->insert_temp) | |
1936 | { | |
1937 | /* Replace the original insn with a move from | |
3dc575ff | 1938 | our newly created temp. */ |
8f7ee471 | 1939 | start_sequence (); |
0c20a65f | 1940 | emit_move_insn (m->set_dest, newreg); |
8f7ee471 DJ |
1941 | seq = get_insns (); |
1942 | end_sequence (); | |
1943 | emit_insn_before (seq, p); | |
1944 | } | |
b4ad7b23 RS |
1945 | } |
1946 | ||
1947 | start_sequence (); | |
0c20a65f | 1948 | emit_move_insn (m->insert_temp ? newreg : m->set_dest, |
8f7ee471 | 1949 | m->set_src); |
2f937369 | 1950 | seq = get_insns (); |
b4ad7b23 RS |
1951 | end_sequence (); |
1952 | ||
2f937369 | 1953 | add_label_notes (m->set_src, seq); |
c160c628 | 1954 | |
804a718a | 1955 | i1 = loop_insn_hoist (loop, seq); |
5fd8383e | 1956 | if (! find_reg_note (i1, REG_EQUAL, NULL_RTX)) |
3d238248 JJ |
1957 | set_unique_reg_note (i1, |
1958 | m->is_equiv ? REG_EQUIV : REG_EQUAL, | |
1959 | m->set_src); | |
b4ad7b23 RS |
1960 | |
1961 | if (loop_dump_stream) | |
1962 | fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1)); | |
1963 | ||
1964 | /* The more regs we move, the less we like moving them. */ | |
1965 | threshold -= 3; | |
1966 | } | |
1967 | else | |
1968 | { | |
1969 | for (count = m->consec; count >= 0; count--) | |
1970 | { | |
1971 | rtx i1, temp; | |
1972 | ||
0f41302f | 1973 | /* If first insn of libcall sequence, skip to end. */ |
e6fcb60d | 1974 | /* Do this at start of loop, since p is guaranteed to |
b4ad7b23 RS |
1975 | be an insn here. */ |
1976 | if (GET_CODE (p) != NOTE | |
5fd8383e | 1977 | && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX))) |
b4ad7b23 RS |
1978 | p = XEXP (temp, 0); |
1979 | ||
1980 | /* If last insn of libcall sequence, move all | |
1981 | insns except the last before the loop. The last | |
1982 | insn is handled in the normal manner. */ | |
1983 | if (GET_CODE (p) != NOTE | |
5fd8383e | 1984 | && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX))) |
b4ad7b23 RS |
1985 | { |
1986 | rtx fn_address = 0; | |
1987 | rtx fn_reg = 0; | |
1988 | rtx fn_address_insn = 0; | |
1989 | ||
1990 | first = 0; | |
1991 | for (temp = XEXP (temp, 0); temp != p; | |
1992 | temp = NEXT_INSN (temp)) | |
1993 | { | |
1994 | rtx body; | |
1995 | rtx n; | |
1996 | rtx next; | |
1997 | ||
1998 | if (GET_CODE (temp) == NOTE) | |
1999 | continue; | |
2000 | ||
2001 | body = PATTERN (temp); | |
2002 | ||
2003 | /* Find the next insn after TEMP, | |
2004 | not counting USE or NOTE insns. */ | |
2005 | for (next = NEXT_INSN (temp); next != p; | |
2006 | next = NEXT_INSN (next)) | |
2007 | if (! (GET_CODE (next) == INSN | |
2008 | && GET_CODE (PATTERN (next)) == USE) | |
2009 | && GET_CODE (next) != NOTE) | |
2010 | break; | |
e6fcb60d | 2011 | |
b4ad7b23 RS |
2012 | /* If that is the call, this may be the insn |
2013 | that loads the function address. | |
2014 | ||
2015 | Extract the function address from the insn | |
2016 | that loads it into a register. | |
2017 | If this insn was cse'd, we get incorrect code. | |
2018 | ||
2019 | So emit a new move insn that copies the | |
2020 | function address into the register that the | |
2021 | call insn will use. flow.c will delete any | |
2022 | redundant stores that we have created. */ | |
2023 | if (GET_CODE (next) == CALL_INSN | |
2024 | && GET_CODE (body) == SET | |
2025 | && GET_CODE (SET_DEST (body)) == REG | |
5fd8383e RK |
2026 | && (n = find_reg_note (temp, REG_EQUAL, |
2027 | NULL_RTX))) | |
b4ad7b23 RS |
2028 | { |
2029 | fn_reg = SET_SRC (body); | |
2030 | if (GET_CODE (fn_reg) != REG) | |
2031 | fn_reg = SET_DEST (body); | |
2032 | fn_address = XEXP (n, 0); | |
2033 | fn_address_insn = temp; | |
2034 | } | |
2035 | /* We have the call insn. | |
2036 | If it uses the register we suspect it might, | |
2037 | load it with the correct address directly. */ | |
2038 | if (GET_CODE (temp) == CALL_INSN | |
2039 | && fn_address != 0 | |
d9f8a199 | 2040 | && reg_referenced_p (fn_reg, body)) |
86e21212 MH |
2041 | loop_insn_emit_after (loop, 0, fn_address_insn, |
2042 | gen_move_insn | |
2043 | (fn_reg, fn_address)); | |
b4ad7b23 RS |
2044 | |
2045 | if (GET_CODE (temp) == CALL_INSN) | |
f97d29ce | 2046 | { |
86e21212 | 2047 | i1 = loop_call_insn_hoist (loop, body); |
f97d29ce JW |
2048 | /* Because the USAGE information potentially |
2049 | contains objects other than hard registers | |
2050 | we need to copy it. */ | |
8c4f5c09 | 2051 | if (CALL_INSN_FUNCTION_USAGE (temp)) |
db3cf6fb MS |
2052 | CALL_INSN_FUNCTION_USAGE (i1) |
2053 | = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp)); | |
f97d29ce | 2054 | } |
b4ad7b23 | 2055 | else |
804a718a | 2056 | i1 = loop_insn_hoist (loop, body); |
b4ad7b23 RS |
2057 | if (first == 0) |
2058 | first = i1; | |
2059 | if (temp == fn_address_insn) | |
2060 | fn_address_insn = i1; | |
2061 | REG_NOTES (i1) = REG_NOTES (temp); | |
49ce134f JH |
2062 | REG_NOTES (temp) = NULL; |
2063 | delete_insn (temp); | |
b4ad7b23 | 2064 | } |
18985c91 R |
2065 | if (new_start == 0) |
2066 | new_start = first; | |
b4ad7b23 RS |
2067 | } |
2068 | if (m->savemode != VOIDmode) | |
2069 | { | |
2070 | /* P sets REG to zero; but we should clear only | |
2071 | the bits that are not covered by the mode | |
2072 | m->savemode. */ | |
2073 | rtx reg = m->set_dest; | |
2074 | rtx sequence; | |
2075 | rtx tem; | |
e6fcb60d | 2076 | |
b4ad7b23 | 2077 | start_sequence (); |
ef89d648 ZW |
2078 | tem = expand_simple_binop |
2079 | (GET_MODE (reg), AND, reg, | |
5fd8383e RK |
2080 | GEN_INT ((((HOST_WIDE_INT) 1 |
2081 | << GET_MODE_BITSIZE (m->savemode))) | |
b4ad7b23 RS |
2082 | - 1), |
2083 | reg, 1, OPTAB_LIB_WIDEN); | |
2084 | if (tem == 0) | |
2085 | abort (); | |
2086 | if (tem != reg) | |
2087 | emit_move_insn (reg, tem); | |
2f937369 | 2088 | sequence = get_insns (); |
b4ad7b23 | 2089 | end_sequence (); |
804a718a | 2090 | i1 = loop_insn_hoist (loop, sequence); |
b4ad7b23 RS |
2091 | } |
2092 | else if (GET_CODE (p) == CALL_INSN) | |
f97d29ce | 2093 | { |
86e21212 | 2094 | i1 = loop_call_insn_hoist (loop, PATTERN (p)); |
f97d29ce JW |
2095 | /* Because the USAGE information potentially |
2096 | contains objects other than hard registers | |
2097 | we need to copy it. */ | |
8c4f5c09 | 2098 | if (CALL_INSN_FUNCTION_USAGE (p)) |
db3cf6fb MS |
2099 | CALL_INSN_FUNCTION_USAGE (i1) |
2100 | = copy_rtx (CALL_INSN_FUNCTION_USAGE (p)); | |
f97d29ce | 2101 | } |
1a61c29f JW |
2102 | else if (count == m->consec && m->move_insn_first) |
2103 | { | |
804a718a | 2104 | rtx seq; |
1a61c29f JW |
2105 | /* The SET_SRC might not be invariant, so we must |
2106 | use the REG_EQUAL note. */ | |
2107 | start_sequence (); | |
2108 | emit_move_insn (m->set_dest, m->set_src); | |
2f937369 | 2109 | seq = get_insns (); |
1a61c29f JW |
2110 | end_sequence (); |
2111 | ||
2f937369 | 2112 | add_label_notes (m->set_src, seq); |
1a61c29f | 2113 | |
804a718a | 2114 | i1 = loop_insn_hoist (loop, seq); |
1a61c29f | 2115 | if (! find_reg_note (i1, REG_EQUAL, NULL_RTX)) |
3d238248 JJ |
2116 | set_unique_reg_note (i1, m->is_equiv ? REG_EQUIV |
2117 | : REG_EQUAL, m->set_src); | |
1a61c29f | 2118 | } |
8f7ee471 DJ |
2119 | else if (m->insert_temp) |
2120 | { | |
703ad42b KG |
2121 | rtx *reg_map2 = xcalloc (REGNO (newreg), |
2122 | sizeof(rtx)); | |
8f7ee471 DJ |
2123 | reg_map2 [m->regno] = newreg; |
2124 | ||
2125 | i1 = loop_insn_hoist (loop, copy_rtx (PATTERN (p))); | |
2126 | replace_regs (i1, reg_map2, REGNO (newreg), 1); | |
2127 | free (reg_map2); | |
0c20a65f | 2128 | } |
b4ad7b23 | 2129 | else |
804a718a | 2130 | i1 = loop_insn_hoist (loop, PATTERN (p)); |
b4ad7b23 | 2131 | |
1a61c29f JW |
2132 | if (REG_NOTES (i1) == 0) |
2133 | { | |
2134 | REG_NOTES (i1) = REG_NOTES (p); | |
49ce134f | 2135 | REG_NOTES (p) = NULL; |
b4ad7b23 | 2136 | |
1a61c29f JW |
2137 | /* If there is a REG_EQUAL note present whose value |
2138 | is not loop invariant, then delete it, since it | |
2139 | may cause problems with later optimization passes. | |
2140 | It is possible for cse to create such notes | |
2141 | like this as a result of record_jump_cond. */ | |
e6fcb60d | 2142 | |
1a61c29f | 2143 | if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX)) |
0534b804 | 2144 | && ! loop_invariant_p (loop, XEXP (temp, 0))) |
1a61c29f JW |
2145 | remove_note (i1, temp); |
2146 | } | |
e6726b1f | 2147 | |
b4ad7b23 RS |
2148 | if (new_start == 0) |
2149 | new_start = i1; | |
2150 | ||
2151 | if (loop_dump_stream) | |
2152 | fprintf (loop_dump_stream, " moved to %d", | |
2153 | INSN_UID (i1)); | |
2154 | ||
b4ad7b23 RS |
2155 | /* If library call, now fix the REG_NOTES that contain |
2156 | insn pointers, namely REG_LIBCALL on FIRST | |
2157 | and REG_RETVAL on I1. */ | |
51723711 | 2158 | if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX))) |
b4ad7b23 RS |
2159 | { |
2160 | XEXP (temp, 0) = first; | |
5fd8383e | 2161 | temp = find_reg_note (first, REG_LIBCALL, NULL_RTX); |
b4ad7b23 RS |
2162 | XEXP (temp, 0) = i1; |
2163 | } | |
2164 | ||
9655bf95 | 2165 | temp = p; |
49ce134f | 2166 | delete_insn (p); |
9655bf95 DM |
2167 | p = NEXT_INSN (p); |
2168 | ||
2169 | /* simplify_giv_expr expects that it can walk the insns | |
2170 | at m->insn forwards and see this old sequence we are | |
2171 | tossing here. delete_insn does preserve the next | |
2172 | pointers, but when we skip over a NOTE we must fix | |
2173 | it up. Otherwise that code walks into the non-deleted | |
2174 | insn stream. */ | |
2175 | while (p && GET_CODE (p) == NOTE) | |
2176 | p = NEXT_INSN (temp) = NEXT_INSN (p); | |
8f7ee471 DJ |
2177 | |
2178 | if (m->insert_temp) | |
2179 | { | |
2180 | rtx seq; | |
2181 | /* Replace the original insn with a move from | |
3dc575ff | 2182 | our newly created temp. */ |
8f7ee471 | 2183 | start_sequence (); |
0c20a65f | 2184 | emit_move_insn (m->set_dest, newreg); |
8f7ee471 DJ |
2185 | seq = get_insns (); |
2186 | end_sequence (); | |
2187 | emit_insn_before (seq, p); | |
2188 | } | |
b4ad7b23 RS |
2189 | } |
2190 | ||
2191 | /* The more regs we move, the less we like moving them. */ | |
2192 | threshold -= 3; | |
2193 | } | |
2194 | ||
8f7ee471 | 2195 | m->done = 1; |
b4ad7b23 | 2196 | |
8f7ee471 | 2197 | if (!m->insert_temp) |
d5e0243a | 2198 | { |
8f7ee471 DJ |
2199 | /* Any other movable that loads the same register |
2200 | MUST be moved. */ | |
2201 | already_moved[regno] = 1; | |
b4ad7b23 | 2202 | |
8f7ee471 DJ |
2203 | /* This reg has been moved out of one loop. */ |
2204 | regs->array[regno].moved_once = 1; | |
b4ad7b23 | 2205 | |
8f7ee471 DJ |
2206 | /* The reg set here is now invariant. */ |
2207 | if (! m->partial) | |
2208 | { | |
2209 | int i; | |
2210 | for (i = 0; i < LOOP_REGNO_NREGS (regno, m->set_dest); i++) | |
2211 | regs->array[regno+i].set_in_loop = 0; | |
2212 | } | |
2213 | ||
2214 | /* Change the length-of-life info for the register | |
2215 | to say it lives at least the full length of this loop. | |
2216 | This will help guide optimizations in outer loops. */ | |
2217 | ||
2218 | if (REGNO_FIRST_LUID (regno) > INSN_LUID (loop_start)) | |
2219 | /* This is the old insn before all the moved insns. | |
2220 | We can't use the moved insn because it is out of range | |
2221 | in uid_luid. Only the old insns have luids. */ | |
2222 | REGNO_FIRST_UID (regno) = INSN_UID (loop_start); | |
2223 | if (REGNO_LAST_LUID (regno) < INSN_LUID (loop_end)) | |
2224 | REGNO_LAST_UID (regno) = INSN_UID (loop_end); | |
2225 | } | |
b4ad7b23 RS |
2226 | |
2227 | /* Combine with this moved insn any other matching movables. */ | |
2228 | ||
2229 | if (! m->partial) | |
02055ad6 | 2230 | for (m1 = movables->head; m1; m1 = m1->next) |
b4ad7b23 RS |
2231 | if (m1->match == m) |
2232 | { | |
2233 | rtx temp; | |
2234 | ||
2235 | /* Schedule the reg loaded by M1 | |
2236 | for replacement so that shares the reg of M. | |
2237 | If the modes differ (only possible in restricted | |
51f0646f JL |
2238 | circumstances, make a SUBREG. |
2239 | ||
2240 | Note this assumes that the target dependent files | |
2241 | treat REG and SUBREG equally, including within | |
2242 | GO_IF_LEGITIMATE_ADDRESS and in all the | |
2243 | predicates since we never verify that replacing the | |
2244 | original register with a SUBREG results in a | |
2245 | recognizable insn. */ | |
b4ad7b23 RS |
2246 | if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)) |
2247 | reg_map[m1->regno] = m->set_dest; | |
2248 | else | |
2249 | reg_map[m1->regno] | |
2250 | = gen_lowpart_common (GET_MODE (m1->set_dest), | |
2251 | m->set_dest); | |
e6fcb60d | 2252 | |
b4ad7b23 RS |
2253 | /* Get rid of the matching insn |
2254 | and prevent further processing of it. */ | |
2255 | m1->done = 1; | |
2256 | ||
49ce134f | 2257 | /* if library call, delete all insns. */ |
51723711 KG |
2258 | if ((temp = find_reg_note (m1->insn, REG_RETVAL, |
2259 | NULL_RTX))) | |
49ce134f JH |
2260 | delete_insn_chain (XEXP (temp, 0), m1->insn); |
2261 | else | |
2262 | delete_insn (m1->insn); | |
b4ad7b23 RS |
2263 | |
2264 | /* Any other movable that loads the same register | |
2265 | MUST be moved. */ | |
2266 | already_moved[m1->regno] = 1; | |
2267 | ||
2268 | /* The reg merged here is now invariant, | |
2269 | if the reg it matches is invariant. */ | |
2270 | if (! m->partial) | |
d5e0243a DE |
2271 | { |
2272 | int i; | |
2273 | for (i = 0; | |
44a5565d | 2274 | i < LOOP_REGNO_NREGS (regno, m1->set_dest); |
d5e0243a DE |
2275 | i++) |
2276 | regs->array[m1->regno+i].set_in_loop = 0; | |
2277 | } | |
b4ad7b23 RS |
2278 | } |
2279 | } | |
2280 | else if (loop_dump_stream) | |
2281 | fprintf (loop_dump_stream, "not desirable"); | |
2282 | } | |
2283 | else if (loop_dump_stream && !m->match) | |
2284 | fprintf (loop_dump_stream, "not safe"); | |
2285 | ||
2286 | if (loop_dump_stream) | |
2287 | fprintf (loop_dump_stream, "\n"); | |
2288 | } | |
2289 | ||
2290 | if (new_start == 0) | |
2291 | new_start = loop_start; | |
2292 | ||
2293 | /* Go through all the instructions in the loop, making | |
2294 | all the register substitutions scheduled in REG_MAP. */ | |
0534b804 | 2295 | for (p = new_start; p != loop_end; p = NEXT_INSN (p)) |
b4ad7b23 RS |
2296 | if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN |
2297 | || GET_CODE (p) == CALL_INSN) | |
2298 | { | |
2299 | replace_regs (PATTERN (p), reg_map, nregs, 0); | |
2300 | replace_regs (REG_NOTES (p), reg_map, nregs, 0); | |
da0c128e | 2301 | INSN_CODE (p) = -1; |
b4ad7b23 | 2302 | } |
4da896b2 MM |
2303 | |
2304 | /* Clean up. */ | |
2305 | free (reg_map); | |
2306 | free (already_moved); | |
b4ad7b23 | 2307 | } |
6ec92010 MH |
2308 | |
2309 | ||
2310 | static void | |
0c20a65f | 2311 | loop_movables_add (struct loop_movables *movables, struct movable *m) |
6ec92010 MH |
2312 | { |
2313 | if (movables->head == 0) | |
2314 | movables->head = m; | |
2315 | else | |
2316 | movables->last->next = m; | |
2317 | movables->last = m; | |
2318 | } | |
2319 | ||
2320 | ||
2321 | static void | |
0c20a65f | 2322 | loop_movables_free (struct loop_movables *movables) |
6ec92010 MH |
2323 | { |
2324 | struct movable *m; | |
2325 | struct movable *m_next; | |
2326 | ||
2327 | for (m = movables->head; m; m = m_next) | |
2328 | { | |
2329 | m_next = m->next; | |
2330 | free (m); | |
2331 | } | |
6b8c9327 | 2332 | } |
b4ad7b23 RS |
2333 | \f |
2334 | #if 0 | |
2335 | /* Scan X and replace the address of any MEM in it with ADDR. | |
2336 | REG is the address that MEM should have before the replacement. */ | |
2337 | ||
2338 | static void | |
0c20a65f | 2339 | replace_call_address (rtx x, rtx reg, rtx addr) |
b4ad7b23 | 2340 | { |
b3694847 SS |
2341 | enum rtx_code code; |
2342 | int i; | |
2343 | const char *fmt; | |
b4ad7b23 RS |
2344 | |
2345 | if (x == 0) | |
2346 | return; | |
2347 | code = GET_CODE (x); | |
2348 | switch (code) | |
2349 | { | |
2350 | case PC: | |
2351 | case CC0: | |
2352 | case CONST_INT: | |
2353 | case CONST_DOUBLE: | |
2354 | case CONST: | |
2355 | case SYMBOL_REF: | |
2356 | case LABEL_REF: | |
2357 | case REG: | |
2358 | return; | |
2359 | ||
2360 | case SET: | |
2361 | /* Short cut for very common case. */ | |
2362 | replace_call_address (XEXP (x, 1), reg, addr); | |
2363 | return; | |
2364 | ||
2365 | case CALL: | |
2366 | /* Short cut for very common case. */ | |
2367 | replace_call_address (XEXP (x, 0), reg, addr); | |
2368 | return; | |
2369 | ||
2370 | case MEM: | |
2371 | /* If this MEM uses a reg other than the one we expected, | |
2372 | something is wrong. */ | |
2373 | if (XEXP (x, 0) != reg) | |
2374 | abort (); | |
2375 | XEXP (x, 0) = addr; | |
2376 | return; | |
e6fcb60d | 2377 | |
e9a25f70 JL |
2378 | default: |
2379 | break; | |
b4ad7b23 RS |
2380 | } |
2381 | ||
2382 | fmt = GET_RTX_FORMAT (code); | |
2383 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) | |
2384 | { | |
2385 | if (fmt[i] == 'e') | |
2386 | replace_call_address (XEXP (x, i), reg, addr); | |
d4757e6a | 2387 | else if (fmt[i] == 'E') |
b4ad7b23 | 2388 | { |
b3694847 | 2389 | int j; |
b4ad7b23 RS |
2390 | for (j = 0; j < XVECLEN (x, i); j++) |
2391 | replace_call_address (XVECEXP (x, i, j), reg, addr); | |
2392 | } | |
2393 | } | |
2394 | } | |
2395 | #endif | |
2396 | \f | |
2397 | /* Return the number of memory refs to addresses that vary | |
2398 | in the rtx X. */ | |
2399 | ||
2400 | static int | |
0c20a65f | 2401 | count_nonfixed_reads (const struct loop *loop, rtx x) |
b4ad7b23 | 2402 | { |
b3694847 SS |
2403 | enum rtx_code code; |
2404 | int i; | |
2405 | const char *fmt; | |
b4ad7b23 RS |
2406 | int value; |
2407 | ||
2408 | if (x == 0) | |
2409 | return 0; | |
2410 | ||
2411 | code = GET_CODE (x); | |
2412 | switch (code) | |
2413 | { | |
2414 | case PC: | |
2415 | case CC0: | |
2416 | case CONST_INT: | |
2417 | case CONST_DOUBLE: | |
2418 | case CONST: | |
2419 | case SYMBOL_REF: | |
2420 | case LABEL_REF: | |
2421 | case REG: | |
2422 | return 0; | |
2423 | ||
2424 | case MEM: | |
0534b804 MH |
2425 | return ((loop_invariant_p (loop, XEXP (x, 0)) != 1) |
2426 | + count_nonfixed_reads (loop, XEXP (x, 0))); | |
e6fcb60d | 2427 | |
e9a25f70 JL |
2428 | default: |
2429 | break; | |
b4ad7b23 RS |
2430 | } |
2431 | ||
2432 | value = 0; | |
2433 | fmt = GET_RTX_FORMAT (code); | |
2434 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) | |
2435 | { | |
2436 | if (fmt[i] == 'e') | |
0534b804 MH |
2437 | value += count_nonfixed_reads (loop, XEXP (x, i)); |
2438 | if (fmt[i] == 'E') | |
b4ad7b23 | 2439 | { |
b3694847 | 2440 | int j; |
b4ad7b23 | 2441 | for (j = 0; j < XVECLEN (x, i); j++) |
0534b804 | 2442 | value += count_nonfixed_reads (loop, XVECEXP (x, i, j)); |
b4ad7b23 RS |
2443 | } |
2444 | } | |
2445 | return value; | |
2446 | } | |
b4ad7b23 | 2447 | \f |
3c748bb6 | 2448 | /* Scan a loop setting the elements `cont', `vtop', `loops_enclosed', |
576d0b54 | 2449 | `has_call', `has_nonconst_call', `has_volatile', `has_tablejump', |
afa1738b MH |
2450 | `unknown_address_altered', `unknown_constant_address_altered', and |
2451 | `num_mem_sets' in LOOP. Also, fill in the array `mems' and the | |
2452 | list `store_mems' in LOOP. */ | |
b4ad7b23 RS |
2453 | |
2454 | static void | |
0c20a65f | 2455 | prescan_loop (struct loop *loop) |
b4ad7b23 | 2456 | { |
b3694847 | 2457 | int level = 1; |
41a972a9 | 2458 | rtx insn; |
52b38064 | 2459 | struct loop_info *loop_info = LOOP_INFO (loop); |
a2be868f MH |
2460 | rtx start = loop->start; |
2461 | rtx end = loop->end; | |
41a972a9 MM |
2462 | /* The label after END. Jumping here is just like falling off the |
2463 | end of the loop. We use next_nonnote_insn instead of next_label | |
2464 | as a hedge against the (pathological) case where some actual insn | |
2465 | might end up between the two. */ | |
2466 | rtx exit_target = next_nonnote_insn (end); | |
3c748bb6 | 2467 | |
3c748bb6 | 2468 | loop_info->has_indirect_jump = indirect_jump_in_function; |
e304a8e6 | 2469 | loop_info->pre_header_has_call = 0; |
3c748bb6 | 2470 | loop_info->has_call = 0; |
576d0b54 | 2471 | loop_info->has_nonconst_call = 0; |
62e6ca55 | 2472 | loop_info->has_prefetch = 0; |
3c748bb6 MH |
2473 | loop_info->has_volatile = 0; |
2474 | loop_info->has_tablejump = 0; | |
3c748bb6 | 2475 | loop_info->has_multiple_exit_targets = 0; |
a2be868f | 2476 | loop->level = 1; |
b4ad7b23 | 2477 | |
afa1738b MH |
2478 | loop_info->unknown_address_altered = 0; |
2479 | loop_info->unknown_constant_address_altered = 0; | |
2480 | loop_info->store_mems = NULL_RTX; | |
2481 | loop_info->first_loop_store_insn = NULL_RTX; | |
2482 | loop_info->mems_idx = 0; | |
2483 | loop_info->num_mem_sets = 0; | |
3eae4643 | 2484 | /* If loop opts run twice, this was set on 1st pass for 2nd. */ |
dad482e6 | 2485 | loop_info->preconditioned = NOTE_PRECONDITIONED (end); |
e304a8e6 | 2486 | |
6b8c9327 | 2487 | for (insn = start; insn && GET_CODE (insn) != CODE_LABEL; |
e304a8e6 MH |
2488 | insn = PREV_INSN (insn)) |
2489 | { | |
2490 | if (GET_CODE (insn) == CALL_INSN) | |
2491 | { | |
2492 | loop_info->pre_header_has_call = 1; | |
2493 | break; | |
2494 | } | |
2495 | } | |
2496 | ||
b4ad7b23 RS |
2497 | for (insn = NEXT_INSN (start); insn != NEXT_INSN (end); |
2498 | insn = NEXT_INSN (insn)) | |
2499 | { | |
b7fe373b | 2500 | switch (GET_CODE (insn)) |
b4ad7b23 | 2501 | { |
b7fe373b | 2502 | case NOTE: |
b4ad7b23 RS |
2503 | if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG) |
2504 | { | |
2505 | ++level; | |
2506 | /* Count number of loops contained in this one. */ | |
a2be868f | 2507 | loop->level++; |
b4ad7b23 RS |
2508 | } |
2509 | else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END) | |
b7fe373b RH |
2510 | --level; |
2511 | break; | |
2512 | ||
2513 | case CALL_INSN: | |
24a28584 | 2514 | if (! CONST_OR_PURE_CALL_P (insn)) |
576d0b54 MH |
2515 | { |
2516 | loop_info->unknown_address_altered = 1; | |
2517 | loop_info->has_nonconst_call = 1; | |
2518 | } | |
a6a063b8 AM |
2519 | else if (pure_call_p (insn)) |
2520 | loop_info->has_nonconst_call = 1; | |
3c748bb6 | 2521 | loop_info->has_call = 1; |
b7fe373b RH |
2522 | if (can_throw_internal (insn)) |
2523 | loop_info->has_multiple_exit_targets = 1; | |
ee960939 OH |
2524 | |
2525 | /* Calls initializing constant objects have CLOBBER of MEM /u in the | |
2526 | attached FUNCTION_USAGE expression list, not accounted for by the | |
2527 | code above. We should note these to avoid missing dependencies in | |
2528 | later references. */ | |
2529 | { | |
2530 | rtx fusage_entry; | |
0c20a65f AJ |
2531 | |
2532 | for (fusage_entry = CALL_INSN_FUNCTION_USAGE (insn); | |
ee960939 OH |
2533 | fusage_entry; fusage_entry = XEXP (fusage_entry, 1)) |
2534 | { | |
2535 | rtx fusage = XEXP (fusage_entry, 0); | |
2536 | ||
2537 | if (GET_CODE (fusage) == CLOBBER | |
2538 | && GET_CODE (XEXP (fusage, 0)) == MEM | |
2539 | && RTX_UNCHANGING_P (XEXP (fusage, 0))) | |
2540 | { | |
2541 | note_stores (fusage, note_addr_stored, loop_info); | |
2542 | if (! loop_info->first_loop_store_insn | |
2543 | && loop_info->store_mems) | |
2544 | loop_info->first_loop_store_insn = insn; | |
2545 | } | |
2546 | } | |
2547 | } | |
b7fe373b RH |
2548 | break; |
2549 | ||
2550 | case JUMP_INSN: | |
2551 | if (! loop_info->has_multiple_exit_targets) | |
2552 | { | |
2553 | rtx set = pc_set (insn); | |
2554 | ||
2555 | if (set) | |
2556 | { | |
7d104885 | 2557 | rtx src = SET_SRC (set); |
b7fe373b RH |
2558 | rtx label1, label2; |
2559 | ||
7d104885 | 2560 | if (GET_CODE (src) == IF_THEN_ELSE) |
b7fe373b | 2561 | { |
7d104885 GS |
2562 | label1 = XEXP (src, 1); |
2563 | label2 = XEXP (src, 2); | |
b7fe373b RH |
2564 | } |
2565 | else | |
2566 | { | |
7d104885 | 2567 | label1 = src; |
b7fe373b RH |
2568 | label2 = NULL_RTX; |
2569 | } | |
2570 | ||
2571 | do | |
2572 | { | |
2573 | if (label1 && label1 != pc_rtx) | |
2574 | { | |
2575 | if (GET_CODE (label1) != LABEL_REF) | |
2576 | { | |
2577 | /* Something tricky. */ | |
2578 | loop_info->has_multiple_exit_targets = 1; | |
2579 | break; | |
2580 | } | |
2581 | else if (XEXP (label1, 0) != exit_target | |
2582 | && LABEL_OUTSIDE_LOOP_P (label1)) | |
2583 | { | |
2584 | /* A jump outside the current loop. */ | |
2585 | loop_info->has_multiple_exit_targets = 1; | |
2586 | break; | |
2587 | } | |
2588 | } | |
2589 | ||
2590 | label1 = label2; | |
2591 | label2 = NULL_RTX; | |
2592 | } | |
2593 | while (label1); | |
2594 | } | |
2595 | else | |
2596 | { | |
2597 | /* A return, or something tricky. */ | |
2598 | loop_info->has_multiple_exit_targets = 1; | |
2599 | } | |
2600 | } | |
2601 | /* FALLTHRU */ | |
41a972a9 | 2602 | |
b7fe373b | 2603 | case INSN: |
41a972a9 | 2604 | if (volatile_refs_p (PATTERN (insn))) |
3c748bb6 | 2605 | loop_info->has_volatile = 1; |
8c368ee2 DE |
2606 | |
2607 | if (GET_CODE (insn) == JUMP_INSN | |
2608 | && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC | |
2609 | || GET_CODE (PATTERN (insn)) == ADDR_VEC)) | |
3c748bb6 | 2610 | loop_info->has_tablejump = 1; |
e6fcb60d | 2611 | |
afa1738b MH |
2612 | note_stores (PATTERN (insn), note_addr_stored, loop_info); |
2613 | if (! loop_info->first_loop_store_insn && loop_info->store_mems) | |
2614 | loop_info->first_loop_store_insn = insn; | |
41a972a9 | 2615 | |
b7fe373b RH |
2616 | if (flag_non_call_exceptions && can_throw_internal (insn)) |
2617 | loop_info->has_multiple_exit_targets = 1; | |
2618 | break; | |
552bc76f | 2619 | |
b7fe373b RH |
2620 | default: |
2621 | break; | |
b4ad7b23 RS |
2622 | } |
2623 | } | |
41a972a9 MM |
2624 | |
2625 | /* Now, rescan the loop, setting up the LOOP_MEMS array. */ | |
20bd7bfa | 2626 | if (/* An exception thrown by a called function might land us |
41a972a9 | 2627 | anywhere. */ |
576d0b54 | 2628 | ! loop_info->has_nonconst_call |
41a972a9 MM |
2629 | /* We don't want loads for MEMs moved to a location before the |
2630 | one at which their stack memory becomes allocated. (Note | |
2631 | that this is not a problem for malloc, etc., since those | |
2632 | require actual function calls. */ | |
a2be868f | 2633 | && ! current_function_calls_alloca |
41a972a9 MM |
2634 | /* There are ways to leave the loop other than falling off the |
2635 | end. */ | |
a2be868f | 2636 | && ! loop_info->has_multiple_exit_targets) |
41a972a9 MM |
2637 | for (insn = NEXT_INSN (start); insn != NEXT_INSN (end); |
2638 | insn = NEXT_INSN (insn)) | |
afa1738b | 2639 | for_each_rtx (&insn, insert_loop_mem, loop_info); |
20bd7bfa JW |
2640 | |
2641 | /* BLKmode MEMs are added to LOOP_STORE_MEM as necessary so | |
2642 | that loop_invariant_p and load_mems can use true_dependence | |
2643 | to determine what is really clobbered. */ | |
afa1738b | 2644 | if (loop_info->unknown_address_altered) |
20bd7bfa JW |
2645 | { |
2646 | rtx mem = gen_rtx_MEM (BLKmode, const0_rtx); | |
2647 | ||
fd5d5b07 | 2648 | loop_info->store_mems |
afa1738b | 2649 | = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems); |
20bd7bfa | 2650 | } |
afa1738b | 2651 | if (loop_info->unknown_constant_address_altered) |
20bd7bfa JW |
2652 | { |
2653 | rtx mem = gen_rtx_MEM (BLKmode, const0_rtx); | |
2654 | ||
2655 | RTX_UNCHANGING_P (mem) = 1; | |
fd5d5b07 | 2656 | loop_info->store_mems |
afa1738b | 2657 | = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems); |
20bd7bfa | 2658 | } |
b4ad7b23 RS |
2659 | } |
2660 | \f | |
6a58eee9 RH |
2661 | /* Invalidate all loops containing LABEL. */ |
2662 | ||
2663 | static void | |
0c20a65f | 2664 | invalidate_loops_containing_label (rtx label) |
6a58eee9 RH |
2665 | { |
2666 | struct loop *loop; | |
2667 | for (loop = uid_loop[INSN_UID (label)]; loop; loop = loop->outer) | |
2668 | loop->invalid = 1; | |
2669 | } | |
2670 | ||
b4ad7b23 RS |
2671 | /* Scan the function looking for loops. Record the start and end of each loop. |
2672 | Also mark as invalid loops any loops that contain a setjmp or are branched | |
2673 | to from outside the loop. */ | |
2674 | ||
2675 | static void | |
0c20a65f | 2676 | find_and_verify_loops (rtx f, struct loops *loops) |
b4ad7b23 | 2677 | { |
a2be868f MH |
2678 | rtx insn; |
2679 | rtx label; | |
2680 | int num_loops; | |
2681 | struct loop *current_loop; | |
2682 | struct loop *next_loop; | |
2683 | struct loop *loop; | |
2684 | ||
2685 | num_loops = loops->num; | |
b4ad7b23 | 2686 | |
3ec2b590 R |
2687 | compute_luids (f, NULL_RTX, 0); |
2688 | ||
b4ad7b23 RS |
2689 | /* If there are jumps to undefined labels, |
2690 | treat them as jumps out of any/all loops. | |
2691 | This also avoids writing past end of tables when there are no loops. */ | |
a2be868f | 2692 | uid_loop[0] = NULL; |
b4ad7b23 RS |
2693 | |
2694 | /* Find boundaries of loops, mark which loops are contained within | |
2695 | loops, and invalidate loops that have setjmp. */ | |
2696 | ||
a2be868f MH |
2697 | num_loops = 0; |
2698 | current_loop = NULL; | |
b4ad7b23 RS |
2699 | for (insn = f; insn; insn = NEXT_INSN (insn)) |
2700 | { | |
2701 | if (GET_CODE (insn) == NOTE) | |
2702 | switch (NOTE_LINE_NUMBER (insn)) | |
2703 | { | |
2704 | case NOTE_INSN_LOOP_BEG: | |
a2be868f MH |
2705 | next_loop = loops->array + num_loops; |
2706 | next_loop->num = num_loops; | |
2707 | num_loops++; | |
2708 | next_loop->start = insn; | |
2709 | next_loop->outer = current_loop; | |
b4ad7b23 RS |
2710 | current_loop = next_loop; |
2711 | break; | |
2712 | ||
3ec2b590 | 2713 | case NOTE_INSN_LOOP_CONT: |
a2be868f | 2714 | current_loop->cont = insn; |
3ec2b590 | 2715 | break; |
e375c819 MH |
2716 | |
2717 | case NOTE_INSN_LOOP_VTOP: | |
2718 | current_loop->vtop = insn; | |
2719 | break; | |
2720 | ||
b4ad7b23 | 2721 | case NOTE_INSN_LOOP_END: |
a2be868f | 2722 | if (! current_loop) |
b4ad7b23 RS |
2723 | abort (); |
2724 | ||
a2be868f | 2725 | current_loop->end = insn; |
a2be868f | 2726 | current_loop = current_loop->outer; |
b4ad7b23 RS |
2727 | break; |
2728 | ||
e9a25f70 JL |
2729 | default: |
2730 | break; | |
b4ad7b23 RS |
2731 | } |
2732 | ||
19652adf | 2733 | if (GET_CODE (insn) == CALL_INSN |
570a98eb JH |
2734 | && find_reg_note (insn, REG_SETJMP, NULL)) |
2735 | { | |
2736 | /* In this case, we must invalidate our current loop and any | |
2737 | enclosing loop. */ | |
2738 | for (loop = current_loop; loop; loop = loop->outer) | |
2739 | { | |
2740 | loop->invalid = 1; | |
2741 | if (loop_dump_stream) | |
2742 | fprintf (loop_dump_stream, | |
2743 | "\nLoop at %d ignored due to setjmp.\n", | |
2744 | INSN_UID (loop->start)); | |
2745 | } | |
2746 | } | |
2747 | ||
b4ad7b23 RS |
2748 | /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the |
2749 | enclosing loop, but this doesn't matter. */ | |
a2be868f | 2750 | uid_loop[INSN_UID (insn)] = current_loop; |
b4ad7b23 RS |
2751 | } |
2752 | ||
034dabc9 JW |
2753 | /* Any loop containing a label used in an initializer must be invalidated, |
2754 | because it can be jumped into from anywhere. */ | |
034dabc9 | 2755 | for (label = forced_labels; label; label = XEXP (label, 1)) |
6a58eee9 | 2756 | invalidate_loops_containing_label (XEXP (label, 0)); |
034dabc9 | 2757 | |
6adb4e3a MS |
2758 | /* Any loop containing a label used for an exception handler must be |
2759 | invalidated, because it can be jumped into from anywhere. */ | |
6a58eee9 | 2760 | for_each_eh_label (invalidate_loops_containing_label); |
6adb4e3a | 2761 | |
034dabc9 JW |
2762 | /* Now scan all insn's in the function. If any JUMP_INSN branches into a |
2763 | loop that it is not contained within, that loop is marked invalid. | |
2764 | If any INSN or CALL_INSN uses a label's address, then the loop containing | |
2765 | that label is marked invalid, because it could be jumped into from | |
2766 | anywhere. | |
b4ad7b23 RS |
2767 | |
2768 | Also look for blocks of code ending in an unconditional branch that | |
e6fcb60d | 2769 | exits the loop. If such a block is surrounded by a conditional |
b4ad7b23 RS |
2770 | branch around the block, move the block elsewhere (see below) and |
2771 | invert the jump to point to the code block. This may eliminate a | |
2772 | label in our loop and will simplify processing by both us and a | |
2773 | possible second cse pass. */ | |
2774 | ||
2775 | for (insn = f; insn; insn = NEXT_INSN (insn)) | |
2c3c49de | 2776 | if (INSN_P (insn)) |
b4ad7b23 | 2777 | { |
a2be868f | 2778 | struct loop *this_loop = uid_loop[INSN_UID (insn)]; |
b4ad7b23 | 2779 | |
034dabc9 JW |
2780 | if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN) |
2781 | { | |
2782 | rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX); | |
2783 | if (note) | |
6a58eee9 | 2784 | invalidate_loops_containing_label (XEXP (note, 0)); |
034dabc9 JW |
2785 | } |
2786 | ||
2787 | if (GET_CODE (insn) != JUMP_INSN) | |
2788 | continue; | |
2789 | ||
a2be868f | 2790 | mark_loop_jump (PATTERN (insn), this_loop); |
b4ad7b23 RS |
2791 | |
2792 | /* See if this is an unconditional branch outside the loop. */ | |
a2be868f | 2793 | if (this_loop |
b4ad7b23 | 2794 | && (GET_CODE (PATTERN (insn)) == RETURN |
7f1c097d JH |
2795 | || (any_uncondjump_p (insn) |
2796 | && onlyjump_p (insn) | |
a2be868f MH |
2797 | && (uid_loop[INSN_UID (JUMP_LABEL (insn))] |
2798 | != this_loop))) | |
1c01e9df | 2799 | && get_max_uid () < max_uid_for_loop) |
b4ad7b23 RS |
2800 | { |
2801 | rtx p; | |
2802 | rtx our_next = next_real_insn (insn); | |
3b10cf4b | 2803 | rtx last_insn_to_move = NEXT_INSN (insn); |
a2be868f MH |
2804 | struct loop *dest_loop; |
2805 | struct loop *outer_loop = NULL; | |
b4ad7b23 RS |
2806 | |
2807 | /* Go backwards until we reach the start of the loop, a label, | |
2808 | or a JUMP_INSN. */ | |
2809 | for (p = PREV_INSN (insn); | |
2810 | GET_CODE (p) != CODE_LABEL | |
2811 | && ! (GET_CODE (p) == NOTE | |
2812 | && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG) | |
2813 | && GET_CODE (p) != JUMP_INSN; | |
2814 | p = PREV_INSN (p)) | |
2815 | ; | |
2816 | ||
edf711a4 RK |
2817 | /* Check for the case where we have a jump to an inner nested |
2818 | loop, and do not perform the optimization in that case. */ | |
2819 | ||
fdccb6df | 2820 | if (JUMP_LABEL (insn)) |
edf711a4 | 2821 | { |
a2be868f MH |
2822 | dest_loop = uid_loop[INSN_UID (JUMP_LABEL (insn))]; |
2823 | if (dest_loop) | |
fdccb6df | 2824 | { |
a2be868f MH |
2825 | for (outer_loop = dest_loop; outer_loop; |
2826 | outer_loop = outer_loop->outer) | |
2827 | if (outer_loop == this_loop) | |
fdccb6df RK |
2828 | break; |
2829 | } | |
edf711a4 | 2830 | } |
edf711a4 | 2831 | |
89724a5a RK |
2832 | /* Make sure that the target of P is within the current loop. */ |
2833 | ||
9a8e74f0 | 2834 | if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) |
a2be868f MH |
2835 | && uid_loop[INSN_UID (JUMP_LABEL (p))] != this_loop) |
2836 | outer_loop = this_loop; | |
89724a5a | 2837 | |
b4ad7b23 RS |
2838 | /* If we stopped on a JUMP_INSN to the next insn after INSN, |
2839 | we have a block of code to try to move. | |
2840 | ||
2841 | We look backward and then forward from the target of INSN | |
2842 | to find a BARRIER at the same loop depth as the target. | |
2843 | If we find such a BARRIER, we make a new label for the start | |
2844 | of the block, invert the jump in P and point it to that label, | |
2845 | and move the block of code to the spot we found. */ | |
2846 | ||
a2be868f | 2847 | if (! outer_loop |
edf711a4 | 2848 | && GET_CODE (p) == JUMP_INSN |
c6096c5e RS |
2849 | && JUMP_LABEL (p) != 0 |
2850 | /* Just ignore jumps to labels that were never emitted. | |
2851 | These always indicate compilation errors. */ | |
2852 | && INSN_UID (JUMP_LABEL (p)) != 0 | |
7f1c097d | 2853 | && any_condjump_p (p) && onlyjump_p (p) |
3b10cf4b MM |
2854 | && next_real_insn (JUMP_LABEL (p)) == our_next |
2855 | /* If it's not safe to move the sequence, then we | |
2856 | mustn't try. */ | |
e6fcb60d | 2857 | && insns_safe_to_move_p (p, NEXT_INSN (insn), |
3b10cf4b | 2858 | &last_insn_to_move)) |
b4ad7b23 RS |
2859 | { |
2860 | rtx target | |
2861 | = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn (); | |
a2be868f | 2862 | struct loop *target_loop = uid_loop[INSN_UID (target)]; |
17bec8ee | 2863 | rtx loc, loc2; |
c4f81e4a JH |
2864 | rtx tmp; |
2865 | ||
2866 | /* Search for possible garbage past the conditional jumps | |
b0fd92a3 | 2867 | and look for the last barrier. */ |
c4f81e4a JH |
2868 | for (tmp = last_insn_to_move; |
2869 | tmp && GET_CODE (tmp) != CODE_LABEL; tmp = NEXT_INSN (tmp)) | |
2870 | if (GET_CODE (tmp) == BARRIER) | |
2871 | last_insn_to_move = tmp; | |
b4ad7b23 RS |
2872 | |
2873 | for (loc = target; loc; loc = PREV_INSN (loc)) | |
2874 | if (GET_CODE (loc) == BARRIER | |
17bec8ee BS |
2875 | /* Don't move things inside a tablejump. */ |
2876 | && ((loc2 = next_nonnote_insn (loc)) == 0 | |
2877 | || GET_CODE (loc2) != CODE_LABEL | |
2878 | || (loc2 = next_nonnote_insn (loc2)) == 0 | |
2879 | || GET_CODE (loc2) != JUMP_INSN | |
2880 | || (GET_CODE (PATTERN (loc2)) != ADDR_VEC | |
2881 | && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC)) | |
a2be868f | 2882 | && uid_loop[INSN_UID (loc)] == target_loop) |
b4ad7b23 RS |
2883 | break; |
2884 | ||
2885 | if (loc == 0) | |
2886 | for (loc = target; loc; loc = NEXT_INSN (loc)) | |
2887 | if (GET_CODE (loc) == BARRIER | |
17bec8ee BS |
2888 | /* Don't move things inside a tablejump. */ |
2889 | && ((loc2 = next_nonnote_insn (loc)) == 0 | |
2890 | || GET_CODE (loc2) != CODE_LABEL | |
2891 | || (loc2 = next_nonnote_insn (loc2)) == 0 | |
2892 | || GET_CODE (loc2) != JUMP_INSN | |
2893 | || (GET_CODE (PATTERN (loc2)) != ADDR_VEC | |
2894 | && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC)) | |
a2be868f | 2895 | && uid_loop[INSN_UID (loc)] == target_loop) |
b4ad7b23 RS |
2896 | break; |
2897 | ||
2898 | if (loc) | |
2899 | { | |
2900 | rtx cond_label = JUMP_LABEL (p); | |
2901 | rtx new_label = get_label_after (p); | |
2902 | ||
2903 | /* Ensure our label doesn't go away. */ | |
2904 | LABEL_NUSES (cond_label)++; | |
2905 | ||
a2be868f | 2906 | /* Verify that uid_loop is large enough and that |
0f41302f | 2907 | we can invert P. */ |
9ba11d5a RH |
2908 | if (invert_jump (p, new_label, 1)) |
2909 | { | |
2910 | rtx q, r; | |
2911 | ||
2912 | /* If no suitable BARRIER was found, create a suitable | |
2913 | one before TARGET. Since TARGET is a fall through | |
09da1532 | 2914 | path, we'll need to insert a jump around our block |
4fe9b91c | 2915 | and add a BARRIER before TARGET. |
9ba11d5a RH |
2916 | |
2917 | This creates an extra unconditional jump outside | |
2918 | the loop. However, the benefits of removing rarely | |
2919 | executed instructions from inside the loop usually | |
2920 | outweighs the cost of the extra unconditional jump | |
2921 | outside the loop. */ | |
2922 | if (loc == 0) | |
2923 | { | |
2924 | rtx temp; | |
2925 | ||
2926 | temp = gen_jump (JUMP_LABEL (insn)); | |
2927 | temp = emit_jump_insn_before (temp, target); | |
2928 | JUMP_LABEL (temp) = JUMP_LABEL (insn); | |
2929 | LABEL_NUSES (JUMP_LABEL (insn))++; | |
2930 | loc = emit_barrier_before (target); | |
2931 | } | |
2932 | ||
2933 | /* Include the BARRIER after INSN and copy the | |
2934 | block after LOC. */ | |
2b7d71b2 JJ |
2935 | if (squeeze_notes (&new_label, &last_insn_to_move)) |
2936 | abort (); | |
9ba11d5a RH |
2937 | reorder_insns (new_label, last_insn_to_move, loc); |
2938 | ||
2939 | /* All those insns are now in TARGET_LOOP. */ | |
e6fcb60d | 2940 | for (q = new_label; |
9ba11d5a RH |
2941 | q != NEXT_INSN (last_insn_to_move); |
2942 | q = NEXT_INSN (q)) | |
2943 | uid_loop[INSN_UID (q)] = target_loop; | |
2944 | ||
2945 | /* The label jumped to by INSN is no longer a loop | |
2946 | exit. Unless INSN does not have a label (e.g., | |
2947 | it is a RETURN insn), search loop->exit_labels | |
2948 | to find its label_ref, and remove it. Also turn | |
2949 | off LABEL_OUTSIDE_LOOP_P bit. */ | |
2950 | if (JUMP_LABEL (insn)) | |
2951 | { | |
fd5d5b07 KH |
2952 | for (q = 0, r = this_loop->exit_labels; |
2953 | r; | |
2954 | q = r, r = LABEL_NEXTREF (r)) | |
9ba11d5a RH |
2955 | if (XEXP (r, 0) == JUMP_LABEL (insn)) |
2956 | { | |
2957 | LABEL_OUTSIDE_LOOP_P (r) = 0; | |
2958 | if (q) | |
2959 | LABEL_NEXTREF (q) = LABEL_NEXTREF (r); | |
2960 | else | |
2961 | this_loop->exit_labels = LABEL_NEXTREF (r); | |
2962 | break; | |
2963 | } | |
2964 | ||
2965 | for (loop = this_loop; loop && loop != target_loop; | |
2966 | loop = loop->outer) | |
2967 | loop->exit_count--; | |
2968 | ||
2969 | /* If we didn't find it, then something is | |
2970 | wrong. */ | |
2971 | if (! r) | |
2972 | abort (); | |
2973 | } | |
2974 | ||
2975 | /* P is now a jump outside the loop, so it must be put | |
2976 | in loop->exit_labels, and marked as such. | |
2977 | The easiest way to do this is to just call | |
2978 | mark_loop_jump again for P. */ | |
2979 | mark_loop_jump (PATTERN (p), this_loop); | |
2980 | ||
2981 | /* If INSN now jumps to the insn after it, | |
2982 | delete INSN. */ | |
2983 | if (JUMP_LABEL (insn) != 0 | |
2984 | && (next_real_insn (JUMP_LABEL (insn)) | |
2985 | == next_real_insn (insn))) | |
53c17031 | 2986 | delete_related_insns (insn); |
9ba11d5a | 2987 | } |
b4ad7b23 RS |
2988 | |
2989 | /* Continue the loop after where the conditional | |
2990 | branch used to jump, since the only branch insn | |
2991 | in the block (if it still remains) is an inter-loop | |
2992 | branch and hence needs no processing. */ | |
2993 | insn = NEXT_INSN (cond_label); | |
2994 | ||
2995 | if (--LABEL_NUSES (cond_label) == 0) | |
53c17031 | 2996 | delete_related_insns (cond_label); |
3ad0cfaf RK |
2997 | |
2998 | /* This loop will be continued with NEXT_INSN (insn). */ | |
2999 | insn = PREV_INSN (insn); | |
b4ad7b23 RS |
3000 | } |
3001 | } | |
3002 | } | |
3003 | } | |
3004 | } | |
3005 | ||
3006 | /* If any label in X jumps to a loop different from LOOP_NUM and any of the | |
3007 | loops it is contained in, mark the target loop invalid. | |
3008 | ||
3009 | For speed, we assume that X is part of a pattern of a JUMP_INSN. */ | |
3010 | ||
3011 | static void | |
0c20a65f | 3012 | mark_loop_jump (rtx x, struct loop *loop) |
b4ad7b23 | 3013 | { |
a2be868f MH |
3014 | struct loop *dest_loop; |
3015 | struct loop *outer_loop; | |
b4ad7b23 RS |
3016 | int i; |
3017 | ||
3018 | switch (GET_CODE (x)) | |
3019 | { | |
3020 | case PC: | |
3021 | case USE: | |
3022 | case CLOBBER: | |
3023 | case REG: | |
3024 | case MEM: | |
3025 | case CONST_INT: | |
3026 | case CONST_DOUBLE: | |
3027 | case RETURN: | |
3028 | return; | |
3029 | ||
3030 | case CONST: | |
3031 | /* There could be a label reference in here. */ | |
a2be868f | 3032 | mark_loop_jump (XEXP (x, 0), loop); |
b4ad7b23 RS |
3033 | return; |
3034 | ||
3035 | case PLUS: | |
3036 | case MINUS: | |
3037 | case MULT: | |
a2be868f MH |
3038 | mark_loop_jump (XEXP (x, 0), loop); |
3039 | mark_loop_jump (XEXP (x, 1), loop); | |
b4ad7b23 RS |
3040 | return; |
3041 | ||
c4ae2725 JL |
3042 | case LO_SUM: |
3043 | /* This may refer to a LABEL_REF or SYMBOL_REF. */ | |
a2be868f | 3044 | mark_loop_jump (XEXP (x, 1), loop); |
c4ae2725 JL |
3045 | return; |
3046 | ||
b4ad7b23 RS |
3047 | case SIGN_EXTEND: |
3048 | case ZERO_EXTEND: | |
a2be868f | 3049 | mark_loop_jump (XEXP (x, 0), loop); |
b4ad7b23 RS |
3050 | return; |
3051 | ||
3052 | case LABEL_REF: | |
a2be868f | 3053 | dest_loop = uid_loop[INSN_UID (XEXP (x, 0))]; |
b4ad7b23 RS |
3054 | |
3055 | /* Link together all labels that branch outside the loop. This | |
3056 | is used by final_[bg]iv_value and the loop unrolling code. Also | |
3057 | mark this LABEL_REF so we know that this branch should predict | |
3058 | false. */ | |
3059 | ||
edf711a4 RK |
3060 | /* A check to make sure the label is not in an inner nested loop, |
3061 | since this does not count as a loop exit. */ | |
a2be868f | 3062 | if (dest_loop) |
edf711a4 | 3063 | { |
a2be868f MH |
3064 | for (outer_loop = dest_loop; outer_loop; |
3065 | outer_loop = outer_loop->outer) | |
3066 | if (outer_loop == loop) | |
edf711a4 RK |
3067 | break; |
3068 | } | |
3069 | else | |
a2be868f | 3070 | outer_loop = NULL; |
edf711a4 | 3071 | |
a2be868f | 3072 | if (loop && ! outer_loop) |
b4ad7b23 RS |
3073 | { |
3074 | LABEL_OUTSIDE_LOOP_P (x) = 1; | |
a2be868f MH |
3075 | LABEL_NEXTREF (x) = loop->exit_labels; |
3076 | loop->exit_labels = x; | |
353127c2 | 3077 | |
a2be868f MH |
3078 | for (outer_loop = loop; |
3079 | outer_loop && outer_loop != dest_loop; | |
3080 | outer_loop = outer_loop->outer) | |
3081 | outer_loop->exit_count++; | |
b4ad7b23 RS |
3082 | } |
3083 | ||
3084 | /* If this is inside a loop, but not in the current loop or one enclosed | |
3085 | by it, it invalidates at least one loop. */ | |
3086 | ||
a2be868f | 3087 | if (! dest_loop) |
b4ad7b23 RS |
3088 | return; |
3089 | ||
3090 | /* We must invalidate every nested loop containing the target of this | |
3091 | label, except those that also contain the jump insn. */ | |
3092 | ||
a2be868f | 3093 | for (; dest_loop; dest_loop = dest_loop->outer) |
b4ad7b23 RS |
3094 | { |
3095 | /* Stop when we reach a loop that also contains the jump insn. */ | |
a2be868f | 3096 | for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer) |
b4ad7b23 RS |
3097 | if (dest_loop == outer_loop) |
3098 | return; | |
3099 | ||
3100 | /* If we get here, we know we need to invalidate a loop. */ | |
a2be868f | 3101 | if (loop_dump_stream && ! dest_loop->invalid) |
b4ad7b23 RS |
3102 | fprintf (loop_dump_stream, |
3103 | "\nLoop at %d ignored due to multiple entry points.\n", | |
a2be868f | 3104 | INSN_UID (dest_loop->start)); |
e6fcb60d | 3105 | |
a2be868f | 3106 | dest_loop->invalid = 1; |
b4ad7b23 RS |
3107 | } |
3108 | return; | |
3109 | ||
3110 | case SET: | |
3111 | /* If this is not setting pc, ignore. */ | |
3112 | if (SET_DEST (x) == pc_rtx) | |
a2be868f | 3113 | mark_loop_jump (SET_SRC (x), loop); |
b4ad7b23 RS |
3114 | return; |
3115 | ||
3116 | case IF_THEN_ELSE: | |
a2be868f MH |
3117 | mark_loop_jump (XEXP (x, 1), loop); |
3118 | mark_loop_jump (XEXP (x, 2), loop); | |
b4ad7b23 RS |
3119 | return; |
3120 | ||
3121 | case PARALLEL: | |
3122 | case ADDR_VEC: | |
3123 | for (i = 0; i < XVECLEN (x, 0); i++) | |
a2be868f | 3124 | mark_loop_jump (XVECEXP (x, 0, i), loop); |
b4ad7b23 RS |
3125 | return; |
3126 | ||
3127 | case ADDR_DIFF_VEC: | |
3128 | for (i = 0; i < XVECLEN (x, 1); i++) | |
a2be868f | 3129 | mark_loop_jump (XVECEXP (x, 1, i), loop); |
b4ad7b23 RS |
3130 | return; |
3131 | ||
3132 | default: | |
c4ae2725 JL |
3133 | /* Strictly speaking this is not a jump into the loop, only a possible |
3134 | jump out of the loop. However, we have no way to link the destination | |
3135 | of this jump onto the list of exit labels. To be safe we mark this | |
3136 | loop and any containing loops as invalid. */ | |
a2be868f | 3137 | if (loop) |
353127c2 | 3138 | { |
a2be868f | 3139 | for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer) |
c4ae2725 | 3140 | { |
a2be868f | 3141 | if (loop_dump_stream && ! outer_loop->invalid) |
c4ae2725 JL |
3142 | fprintf (loop_dump_stream, |
3143 | "\nLoop at %d ignored due to unknown exit jump.\n", | |
a2be868f MH |
3144 | INSN_UID (outer_loop->start)); |
3145 | outer_loop->invalid = 1; | |
c4ae2725 | 3146 | } |
353127c2 | 3147 | } |
b6ccc3fb | 3148 | return; |
b4ad7b23 RS |
3149 | } |
3150 | } | |
3151 | \f | |
3152 | /* Return nonzero if there is a label in the range from | |
3153 | insn INSN to and including the insn whose luid is END | |
3154 | INSN must have an assigned luid (i.e., it must not have | |
3155 | been previously created by loop.c). */ | |
3156 | ||
3157 | static int | |
0c20a65f | 3158 | labels_in_range_p (rtx insn, int end) |
b4ad7b23 RS |
3159 | { |
3160 | while (insn && INSN_LUID (insn) <= end) | |
3161 | { | |
3162 | if (GET_CODE (insn) == CODE_LABEL) | |
3163 | return 1; | |
3164 | insn = NEXT_INSN (insn); | |
3165 | } | |
3166 | ||
3167 | return 0; | |
3168 | } | |
3169 | ||
3170 | /* Record that a memory reference X is being set. */ | |
3171 | ||
3172 | static void | |
0c20a65f AJ |
3173 | note_addr_stored (rtx x, rtx y ATTRIBUTE_UNUSED, |
3174 | void *data ATTRIBUTE_UNUSED) | |
b4ad7b23 | 3175 | { |
afa1738b MH |
3176 | struct loop_info *loop_info = data; |
3177 | ||
b4ad7b23 RS |
3178 | if (x == 0 || GET_CODE (x) != MEM) |
3179 | return; | |
3180 | ||
3181 | /* Count number of memory writes. | |
3182 | This affects heuristics in strength_reduce. */ | |
afa1738b | 3183 | loop_info->num_mem_sets++; |
fd5d5b07 | 3184 | |
ca800983 | 3185 | /* BLKmode MEM means all memory is clobbered. */ |
afa1738b | 3186 | if (GET_MODE (x) == BLKmode) |
14a774a9 RK |
3187 | { |
3188 | if (RTX_UNCHANGING_P (x)) | |
afa1738b | 3189 | loop_info->unknown_constant_address_altered = 1; |
14a774a9 | 3190 | else |
afa1738b | 3191 | loop_info->unknown_address_altered = 1; |
fd5d5b07 | 3192 | |
14a774a9 RK |
3193 | return; |
3194 | } | |
fd5d5b07 KH |
3195 | |
3196 | loop_info->store_mems = gen_rtx_EXPR_LIST (VOIDmode, x, | |
afa1738b | 3197 | loop_info->store_mems); |
b4ad7b23 | 3198 | } |
59487769 JL |
3199 | |
3200 | /* X is a value modified by an INSN that references a biv inside a loop | |
3201 | exit test (ie, X is somehow related to the value of the biv). If X | |
3202 | is a pseudo that is used more than once, then the biv is (effectively) | |
635a2a90 | 3203 | used more than once. DATA is a pointer to a loop_regs structure. */ |
59487769 JL |
3204 | |
3205 | static void | |
0c20a65f | 3206 | note_set_pseudo_multiple_uses (rtx x, rtx y ATTRIBUTE_UNUSED, void *data) |
59487769 | 3207 | { |
1ecd860b MH |
3208 | struct loop_regs *regs = (struct loop_regs *) data; |
3209 | ||
59487769 JL |
3210 | if (x == 0) |
3211 | return; | |
3212 | ||
3213 | while (GET_CODE (x) == STRICT_LOW_PART | |
3214 | || GET_CODE (x) == SIGN_EXTRACT | |
3215 | || GET_CODE (x) == ZERO_EXTRACT | |
3216 | || GET_CODE (x) == SUBREG) | |
3217 | x = XEXP (x, 0); | |
3218 | ||
3219 | if (GET_CODE (x) != REG || REGNO (x) < FIRST_PSEUDO_REGISTER) | |
3220 | return; | |
3221 | ||
3222 | /* If we do not have usage information, or if we know the register | |
3223 | is used more than once, note that fact for check_dbra_loop. */ | |
3224 | if (REGNO (x) >= max_reg_before_loop | |
f1d4ac80 MH |
3225 | || ! regs->array[REGNO (x)].single_usage |
3226 | || regs->array[REGNO (x)].single_usage == const0_rtx) | |
635a2a90 | 3227 | regs->multiple_uses = 1; |
59487769 | 3228 | } |
b4ad7b23 RS |
3229 | \f |
3230 | /* Return nonzero if the rtx X is invariant over the current loop. | |
3231 | ||
3232 | The value is 2 if we refer to something only conditionally invariant. | |
3233 | ||
20bd7bfa | 3234 | A memory ref is invariant if it is not volatile and does not conflict |
afa1738b | 3235 | with anything stored in `loop_info->store_mems'. */ |
b4ad7b23 RS |
3236 | |
3237 | int | |
0c20a65f | 3238 | loop_invariant_p (const struct loop *loop, rtx x) |
b4ad7b23 | 3239 | { |
afa1738b | 3240 | struct loop_info *loop_info = LOOP_INFO (loop); |
1ecd860b | 3241 | struct loop_regs *regs = LOOP_REGS (loop); |
b3694847 SS |
3242 | int i; |
3243 | enum rtx_code code; | |
3244 | const char *fmt; | |
b4ad7b23 | 3245 | int conditional = 0; |
5026a502 | 3246 | rtx mem_list_entry; |
b4ad7b23 RS |
3247 | |
3248 | if (x == 0) | |
3249 | return 1; | |
3250 | code = GET_CODE (x); | |
3251 | switch (code) | |
3252 | { | |
3253 | case CONST_INT: | |
3254 | case CONST_DOUBLE: | |
3255 | case SYMBOL_REF: | |
3256 | case CONST: | |
3257 | return 1; | |
3258 | ||
3259 | case LABEL_REF: | |
3260 | /* A LABEL_REF is normally invariant, however, if we are unrolling | |
3261 | loops, and this label is inside the loop, then it isn't invariant. | |
3262 | This is because each unrolled copy of the loop body will have | |
3263 | a copy of this label. If this was invariant, then an insn loading | |
3264 | the address of this label into a register might get moved outside | |
3265 | the loop, and then each loop body would end up using the same label. | |
3266 | ||
3267 | We don't know the loop bounds here though, so just fail for all | |
3268 | labels. */ | |
b17d5d7c | 3269 | if (flag_old_unroll_loops) |
b4ad7b23 RS |
3270 | return 0; |
3271 | else | |
3272 | return 1; | |
3273 | ||
3274 | case PC: | |
3275 | case CC0: | |
3276 | case UNSPEC_VOLATILE: | |
3277 | return 0; | |
3278 | ||
3279 | case REG: | |
3280 | /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid | |
3281 | since the reg might be set by initialization within the loop. */ | |
1f027d54 RK |
3282 | |
3283 | if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx | |
71cef493 | 3284 | || x == arg_pointer_rtx || x == pic_offset_table_rtx) |
1f027d54 | 3285 | && ! current_function_has_nonlocal_goto) |
b4ad7b23 | 3286 | return 1; |
1f027d54 | 3287 | |
0534b804 | 3288 | if (LOOP_INFO (loop)->has_call |
b4ad7b23 RS |
3289 | && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)]) |
3290 | return 0; | |
1f027d54 | 3291 | |
2aff9508 DJ |
3292 | /* Out-of-range regs can occur when we are called from unrolling. |
3293 | These have always been created by the unroller and are set in | |
32dd366d | 3294 | the loop, hence are never invariant. */ |
2aff9508 | 3295 | |
fc555370 | 3296 | if (REGNO (x) >= (unsigned) regs->num) |
2aff9508 DJ |
3297 | return 0; |
3298 | ||
f1d4ac80 | 3299 | if (regs->array[REGNO (x)].set_in_loop < 0) |
b4ad7b23 | 3300 | return 2; |
1f027d54 | 3301 | |
f1d4ac80 | 3302 | return regs->array[REGNO (x)].set_in_loop == 0; |
b4ad7b23 RS |
3303 | |
3304 | case MEM: | |
d5e3f151 JW |
3305 | /* Volatile memory references must be rejected. Do this before |
3306 | checking for read-only items, so that volatile read-only items | |
3307 | will be rejected also. */ | |
3308 | if (MEM_VOLATILE_P (x)) | |
3309 | return 0; | |
3310 | ||
b4ad7b23 | 3311 | /* See if there is any dependence between a store and this load. */ |
afa1738b | 3312 | mem_list_entry = loop_info->store_mems; |
5026a502 JL |
3313 | while (mem_list_entry) |
3314 | { | |
3315 | if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode, | |
3316 | x, rtx_varies_p)) | |
3317 | return 0; | |
14a774a9 | 3318 | |
5026a502 JL |
3319 | mem_list_entry = XEXP (mem_list_entry, 1); |
3320 | } | |
b4ad7b23 RS |
3321 | |
3322 | /* It's not invalidated by a store in memory | |
3323 | but we must still verify the address is invariant. */ | |
3324 | break; | |
3325 | ||
3326 | case ASM_OPERANDS: | |
3327 | /* Don't mess with insns declared volatile. */ | |
3328 | if (MEM_VOLATILE_P (x)) | |
3329 | return 0; | |
e9a25f70 | 3330 | break; |
e6fcb60d | 3331 | |
e9a25f70 JL |
3332 | default: |
3333 | break; | |
b4ad7b23 RS |
3334 | } |
3335 | ||
3336 | fmt = GET_RTX_FORMAT (code); | |
3337 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) | |
3338 | { | |
3339 | if (fmt[i] == 'e') | |
3340 | { | |
0534b804 | 3341 | int tem = loop_invariant_p (loop, XEXP (x, i)); |
b4ad7b23 RS |
3342 | if (tem == 0) |
3343 | return 0; | |
3344 | if (tem == 2) | |
3345 | conditional = 1; | |
3346 | } | |
3347 | else if (fmt[i] == 'E') | |
3348 | { | |
b3694847 | 3349 | int j; |
b4ad7b23 RS |
3350 | for (j = 0; j < XVECLEN (x, i); j++) |
3351 | { | |
0534b804 | 3352 | int tem = loop_invariant_p (loop, XVECEXP (x, i, j)); |
b4ad7b23 RS |
3353 | if (tem == 0) |
3354 | return 0; | |
3355 | if (tem == 2) | |
3356 | conditional = 1; | |
3357 | } | |
3358 | ||
3359 | } | |
3360 | } | |
3361 | ||
3362 | return 1 + conditional; | |
3363 | } | |
b4ad7b23 RS |
3364 | \f |
3365 | /* Return nonzero if all the insns in the loop that set REG | |
3366 | are INSN and the immediately following insns, | |
3367 | and if each of those insns sets REG in an invariant way | |
3368 | (not counting uses of REG in them). | |
3369 | ||
3370 | The value is 2 if some of these insns are only conditionally invariant. | |
3371 | ||
3372 | We assume that INSN itself is the first set of REG | |
3373 | and that its source is invariant. */ | |
3374 | ||
3375 | static int | |
0c20a65f AJ |
3376 | consec_sets_invariant_p (const struct loop *loop, rtx reg, int n_sets, |
3377 | rtx insn) | |
b4ad7b23 | 3378 | { |
1ecd860b | 3379 | struct loop_regs *regs = LOOP_REGS (loop); |
770ae6cc RK |
3380 | rtx p = insn; |
3381 | unsigned int regno = REGNO (reg); | |
b4ad7b23 RS |
3382 | rtx temp; |
3383 | /* Number of sets we have to insist on finding after INSN. */ | |
3384 | int count = n_sets - 1; | |
f1d4ac80 | 3385 | int old = regs->array[regno].set_in_loop; |
b4ad7b23 RS |
3386 | int value = 0; |
3387 | int this; | |
3388 | ||
3389 | /* If N_SETS hit the limit, we can't rely on its value. */ | |
3390 | if (n_sets == 127) | |
3391 | return 0; | |
3392 | ||
f1d4ac80 | 3393 | regs->array[regno].set_in_loop = 0; |
b4ad7b23 RS |
3394 | |
3395 | while (count > 0) | |
3396 | { | |
b3694847 | 3397 | enum rtx_code code; |
b4ad7b23 RS |
3398 | rtx set; |
3399 | ||
3400 | p = NEXT_INSN (p); | |
3401 | code = GET_CODE (p); | |
3402 | ||
38e01259 | 3403 | /* If library call, skip to end of it. */ |
5fd8383e | 3404 | if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX))) |
b4ad7b23 RS |
3405 | p = XEXP (temp, 0); |
3406 | ||
3407 | this = 0; | |
3408 | if (code == INSN | |
3409 | && (set = single_set (p)) | |
3410 | && GET_CODE (SET_DEST (set)) == REG | |
3411 | && REGNO (SET_DEST (set)) == regno) | |
3412 | { | |
0534b804 | 3413 | this = loop_invariant_p (loop, SET_SRC (set)); |
b4ad7b23 RS |
3414 | if (this != 0) |
3415 | value |= this; | |
51723711 | 3416 | else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))) |
b4ad7b23 | 3417 | { |
83d90aac JW |
3418 | /* If this is a libcall, then any invariant REG_EQUAL note is OK. |
3419 | If this is an ordinary insn, then only CONSTANT_P REG_EQUAL | |
3420 | notes are OK. */ | |
3421 | this = (CONSTANT_P (XEXP (temp, 0)) | |
3422 | || (find_reg_note (p, REG_RETVAL, NULL_RTX) | |
0534b804 | 3423 | && loop_invariant_p (loop, XEXP (temp, 0)))); |
b4ad7b23 RS |
3424 | if (this != 0) |
3425 | value |= this; | |
3426 | } | |
3427 | } | |
3428 | if (this != 0) | |
3429 | count--; | |
3430 | else if (code != NOTE) | |
3431 | { | |
f1d4ac80 | 3432 | regs->array[regno].set_in_loop = old; |
b4ad7b23 RS |
3433 | return 0; |
3434 | } | |
3435 | } | |
3436 | ||
f1d4ac80 | 3437 | regs->array[regno].set_in_loop = old; |
0534b804 | 3438 | /* If loop_invariant_p ever returned 2, we return 2. */ |
b4ad7b23 RS |
3439 | return 1 + (value & 2); |
3440 | } | |
3441 | ||
3442 | #if 0 | |
3443 | /* I don't think this condition is sufficient to allow INSN | |
3444 | to be moved, so we no longer test it. */ | |
3445 | ||
3446 | /* Return 1 if all insns in the basic block of INSN and following INSN | |
3447 | that set REG are invariant according to TABLE. */ | |
3448 | ||
3449 | static int | |
0c20a65f | 3450 | all_sets_invariant_p (rtx reg, rtx insn, short *table) |
b4ad7b23 | 3451 | { |
b3694847 SS |
3452 | rtx p = insn; |
3453 | int regno = REGNO (reg); | |
b4ad7b23 RS |
3454 | |
3455 | while (1) | |
3456 | { | |
b3694847 | 3457 | enum rtx_code code; |
b4ad7b23 RS |
3458 | p = NEXT_INSN (p); |
3459 | code = GET_CODE (p); | |
3460 | if (code == CODE_LABEL || code == JUMP_INSN) | |
3461 | return 1; | |
3462 | if (code == INSN && GET_CODE (PATTERN (p)) == SET | |
3463 | && GET_CODE (SET_DEST (PATTERN (p))) == REG | |
3464 | && REGNO (SET_DEST (PATTERN (p))) == regno) | |
3465 | { | |
0534b804 | 3466 | if (! loop_invariant_p (loop, SET_SRC (PATTERN (p)), table)) |
b4ad7b23 RS |
3467 | return 0; |
3468 | } | |
3469 | } | |
3470 | } | |
3471 | #endif /* 0 */ | |
3472 | \f | |
3473 | /* Look at all uses (not sets) of registers in X. For each, if it is | |
3474 | the single use, set USAGE[REGNO] to INSN; if there was a previous use in | |
3475 | a different insn, set USAGE[REGNO] to const0_rtx. */ | |
3476 | ||
3477 | static void | |
0c20a65f | 3478 | find_single_use_in_loop (struct loop_regs *regs, rtx insn, rtx x) |
b4ad7b23 RS |
3479 | { |
3480 | enum rtx_code code = GET_CODE (x); | |
6f7d635c | 3481 | const char *fmt = GET_RTX_FORMAT (code); |
b4ad7b23 RS |
3482 | int i, j; |
3483 | ||
3484 | if (code == REG) | |
f1d4ac80 MH |
3485 | regs->array[REGNO (x)].single_usage |
3486 | = (regs->array[REGNO (x)].single_usage != 0 | |
3487 | && regs->array[REGNO (x)].single_usage != insn) | |
b4ad7b23 RS |
3488 | ? const0_rtx : insn; |
3489 | ||
3490 | else if (code == SET) | |
3491 | { | |
3492 | /* Don't count SET_DEST if it is a REG; otherwise count things | |
3493 | in SET_DEST because if a register is partially modified, it won't | |
e6fcb60d | 3494 | show up as a potential movable so we don't care how USAGE is set |
b4ad7b23 RS |
3495 | for it. */ |
3496 | if (GET_CODE (SET_DEST (x)) != REG) | |
f1d4ac80 MH |
3497 | find_single_use_in_loop (regs, insn, SET_DEST (x)); |
3498 | find_single_use_in_loop (regs, insn, SET_SRC (x)); | |
b4ad7b23 RS |
3499 | } |
3500 | else | |
3501 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) | |
3502 | { | |
3503 | if (fmt[i] == 'e' && XEXP (x, i) != 0) | |
f1d4ac80 | 3504 | find_single_use_in_loop (regs, insn, XEXP (x, i)); |
b4ad7b23 RS |
3505 | else if (fmt[i] == 'E') |
3506 | for (j = XVECLEN (x, i) - 1; j >= 0; j--) | |
f1d4ac80 | 3507 | find_single_use_in_loop (regs, insn, XVECEXP (x, i, j)); |
b4ad7b23 RS |
3508 | } |
3509 | } | |
3510 | \f | |
a4c3ddd8 | 3511 | /* Count and record any set in X which is contained in INSN. Update |
f1d4ac80 MH |
3512 | REGS->array[I].MAY_NOT_OPTIMIZE and LAST_SET for any register I set |
3513 | in X. */ | |
a4c3ddd8 BS |
3514 | |
3515 | static void | |
0c20a65f | 3516 | count_one_set (struct loop_regs *regs, rtx insn, rtx x, rtx *last_set) |
a4c3ddd8 BS |
3517 | { |
3518 | if (GET_CODE (x) == CLOBBER && GET_CODE (XEXP (x, 0)) == REG) | |
3519 | /* Don't move a reg that has an explicit clobber. | |
3520 | It's not worth the pain to try to do it correctly. */ | |
f1d4ac80 | 3521 | regs->array[REGNO (XEXP (x, 0))].may_not_optimize = 1; |
a4c3ddd8 BS |
3522 | |
3523 | if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER) | |
3524 | { | |
3525 | rtx dest = SET_DEST (x); | |
3526 | while (GET_CODE (dest) == SUBREG | |
3527 | || GET_CODE (dest) == ZERO_EXTRACT | |
3528 | || GET_CODE (dest) == SIGN_EXTRACT | |
3529 | || GET_CODE (dest) == STRICT_LOW_PART) | |
3530 | dest = XEXP (dest, 0); | |
3531 | if (GET_CODE (dest) == REG) | |
3532 | { | |
d5e0243a | 3533 | int i; |
b3694847 | 3534 | int regno = REGNO (dest); |
44a5565d | 3535 | for (i = 0; i < LOOP_REGNO_NREGS (regno, dest); i++) |
d5e0243a DE |
3536 | { |
3537 | /* If this is the first setting of this reg | |
3538 | in current basic block, and it was set before, | |
3539 | it must be set in two basic blocks, so it cannot | |
3540 | be moved out of the loop. */ | |
3541 | if (regs->array[regno].set_in_loop > 0 | |
51a7f205 | 3542 | && last_set[regno] == 0) |
d5e0243a DE |
3543 | regs->array[regno+i].may_not_optimize = 1; |
3544 | /* If this is not first setting in current basic block, | |
3545 | see if reg was used in between previous one and this. | |
3546 | If so, neither one can be moved. */ | |
3547 | if (last_set[regno] != 0 | |
3548 | && reg_used_between_p (dest, last_set[regno], insn)) | |
3549 | regs->array[regno+i].may_not_optimize = 1; | |
3550 | if (regs->array[regno+i].set_in_loop < 127) | |
3551 | ++regs->array[regno+i].set_in_loop; | |
3552 | last_set[regno+i] = insn; | |
3553 | } | |
a4c3ddd8 BS |
3554 | } |
3555 | } | |
3556 | } | |
b4ad7b23 | 3557 | \f |
0534b804 MH |
3558 | /* Given a loop that is bounded by LOOP->START and LOOP->END and that |
3559 | is entered at LOOP->SCAN_START, return 1 if the register set in SET | |
3560 | contained in insn INSN is used by any insn that precedes INSN in | |
3561 | cyclic order starting from the loop entry point. | |
b4ad7b23 RS |
3562 | |
3563 | We don't want to use INSN_LUID here because if we restrict INSN to those | |
3564 | that have a valid INSN_LUID, it means we cannot move an invariant out | |
3565 | from an inner loop past two loops. */ | |
3566 | ||
3567 | static int | |
0c20a65f | 3568 | loop_reg_used_before_p (const struct loop *loop, rtx set, rtx insn) |
b4ad7b23 RS |
3569 | { |
3570 | rtx reg = SET_DEST (set); | |
3571 | rtx p; | |
3572 | ||
3573 | /* Scan forward checking for register usage. If we hit INSN, we | |
a2be868f MH |
3574 | are done. Otherwise, if we hit LOOP->END, wrap around to LOOP->START. */ |
3575 | for (p = loop->scan_start; p != insn; p = NEXT_INSN (p)) | |
b4ad7b23 | 3576 | { |
2c3c49de | 3577 | if (INSN_P (p) && reg_overlap_mentioned_p (reg, PATTERN (p))) |
b4ad7b23 RS |
3578 | return 1; |
3579 | ||
a2be868f MH |
3580 | if (p == loop->end) |
3581 | p = loop->start; | |
b4ad7b23 RS |
3582 | } |
3583 | ||
3584 | return 0; | |
3585 | } | |
3586 | \f | |
0dd0e980 JH |
3587 | |
3588 | /* Information we collect about arrays that we might want to prefetch. */ | |
3589 | struct prefetch_info | |
3590 | { | |
3591 | struct iv_class *class; /* Class this prefetch is based on. */ | |
3592 | struct induction *giv; /* GIV this prefetch is based on. */ | |
3593 | rtx base_address; /* Start prefetching from this address plus | |
3594 | index. */ | |
3595 | HOST_WIDE_INT index; | |
3596 | HOST_WIDE_INT stride; /* Prefetch stride in bytes in each | |
3597 | iteration. */ | |
3dc60fc7 | 3598 | unsigned int bytes_accessed; /* Sum of sizes of all accesses to this |
0dd0e980 JH |
3599 | prefetch area in one iteration. */ |
3600 | unsigned int total_bytes; /* Total bytes loop will access in this block. | |
3601 | This is set only for loops with known | |
3602 | iteration counts and is 0xffffffff | |
3603 | otherwise. */ | |
62e6ca55 JJ |
3604 | int prefetch_in_loop; /* Number of prefetch insns in loop. */ |
3605 | int prefetch_before_loop; /* Number of prefetch insns before loop. */ | |
0dd0e980 | 3606 | unsigned int write : 1; /* 1 for read/write prefetches. */ |
0dd0e980 JH |
3607 | }; |
3608 | ||
3609 | /* Data used by check_store function. */ | |
3610 | struct check_store_data | |
3611 | { | |
3612 | rtx mem_address; | |
3613 | int mem_write; | |
3614 | }; | |
3615 | ||
0c20a65f AJ |
3616 | static void check_store (rtx, rtx, void *); |
3617 | static void emit_prefetch_instructions (struct loop *); | |
3618 | static int rtx_equal_for_prefetch_p (rtx, rtx); | |
0dd0e980 JH |
3619 | |
3620 | /* Set mem_write when mem_address is found. Used as callback to | |
3621 | note_stores. */ | |
3622 | static void | |
0c20a65f | 3623 | check_store (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data) |
0dd0e980 | 3624 | { |
505ddab6 | 3625 | struct check_store_data *d = (struct check_store_data *) data; |
0dd0e980 JH |
3626 | |
3627 | if ((GET_CODE (x) == MEM) && rtx_equal_p (d->mem_address, XEXP (x, 0))) | |
3628 | d->mem_write = 1; | |
3629 | } | |
3630 | \f | |
3631 | /* Like rtx_equal_p, but attempts to swap commutative operands. This is | |
3632 | important to get some addresses combined. Later more sophisticated | |
3d042e77 | 3633 | transformations can be added when necessary. |
0dd0e980 JH |
3634 | |
3635 | ??? Same trick with swapping operand is done at several other places. | |
3636 | It can be nice to develop some common way to handle this. */ | |
3637 | ||
3638 | static int | |
0c20a65f | 3639 | rtx_equal_for_prefetch_p (rtx x, rtx y) |
0dd0e980 JH |
3640 | { |
3641 | int i; | |
3642 | int j; | |
3643 | enum rtx_code code = GET_CODE (x); | |
3644 | const char *fmt; | |
3645 | ||
3646 | if (x == y) | |
3647 | return 1; | |
3648 | if (code != GET_CODE (y)) | |
3649 | return 0; | |
3650 | ||
3651 | code = GET_CODE (x); | |
3652 | ||
3653 | if (GET_RTX_CLASS (code) == 'c') | |
3654 | { | |
3655 | return ((rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 0)) | |
3656 | && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 1))) | |
3657 | || (rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 1)) | |
3658 | && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 0)))); | |
3659 | } | |
3660 | /* Compare the elements. If any pair of corresponding elements fails to | |
3661 | match, return 0 for the whole thing. */ | |
3662 | ||
3663 | fmt = GET_RTX_FORMAT (code); | |
3664 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) | |
3665 | { | |
3666 | switch (fmt[i]) | |
3667 | { | |
3668 | case 'w': | |
3669 | if (XWINT (x, i) != XWINT (y, i)) | |
3670 | return 0; | |
3671 | break; | |
3672 | ||
3673 | case 'i': | |
3674 | if (XINT (x, i) != XINT (y, i)) | |
3675 | return 0; | |
3676 | break; | |
3677 | ||
3678 | case 'E': | |
3679 | /* Two vectors must have the same length. */ | |
3680 | if (XVECLEN (x, i) != XVECLEN (y, i)) | |
3681 | return 0; | |
3682 | ||
3683 | /* And the corresponding elements must match. */ | |
3684 | for (j = 0; j < XVECLEN (x, i); j++) | |
3685 | if (rtx_equal_for_prefetch_p (XVECEXP (x, i, j), | |
3686 | XVECEXP (y, i, j)) == 0) | |
3687 | return 0; | |
3688 | break; | |
3689 | ||
3690 | case 'e': | |
3691 | if (rtx_equal_for_prefetch_p (XEXP (x, i), XEXP (y, i)) == 0) | |
3692 | return 0; | |
3693 | break; | |
3694 | ||
3695 | case 's': | |
3696 | if (strcmp (XSTR (x, i), XSTR (y, i))) | |
3697 | return 0; | |
3698 | break; | |
3699 | ||
3700 | case 'u': | |
3701 | /* These are just backpointers, so they don't matter. */ | |
3702 | break; | |
3703 | ||
3704 | case '0': | |
3705 | break; | |
3706 | ||
3707 | /* It is believed that rtx's at this level will never | |
3708 | contain anything but integers and other rtx's, | |
3709 | except for within LABEL_REFs and SYMBOL_REFs. */ | |
3710 | default: | |
3711 | abort (); | |
3712 | } | |
3713 | } | |
3714 | return 1; | |
3715 | } | |
3716 | \f | |
3717 | /* Remove constant addition value from the expression X (when present) | |
3718 | and return it. */ | |
62ab1caf | 3719 | |
0dd0e980 | 3720 | static HOST_WIDE_INT |
0c20a65f | 3721 | remove_constant_addition (rtx *x) |
0dd0e980 JH |
3722 | { |
3723 | HOST_WIDE_INT addval = 0; | |
62ab1caf | 3724 | rtx exp = *x; |
0dd0e980 | 3725 | |
a2cd028f | 3726 | /* Avoid clobbering a shared CONST expression. */ |
0dd0e980 | 3727 | if (GET_CODE (exp) == CONST) |
a2cd028f JJ |
3728 | { |
3729 | if (GET_CODE (XEXP (exp, 0)) == PLUS | |
3730 | && GET_CODE (XEXP (XEXP (exp, 0), 0)) == SYMBOL_REF | |
3731 | && GET_CODE (XEXP (XEXP (exp, 0), 1)) == CONST_INT) | |
3732 | { | |
3733 | *x = XEXP (XEXP (exp, 0), 0); | |
3734 | return INTVAL (XEXP (XEXP (exp, 0), 1)); | |
3735 | } | |
3736 | return 0; | |
3737 | } | |
3738 | ||
0dd0e980 JH |
3739 | if (GET_CODE (exp) == CONST_INT) |
3740 | { | |
3741 | addval = INTVAL (exp); | |
3742 | *x = const0_rtx; | |
3743 | } | |
62ab1caf | 3744 | |
0dd0e980 JH |
3745 | /* For plus expression recurse on ourself. */ |
3746 | else if (GET_CODE (exp) == PLUS) | |
3747 | { | |
3748 | addval += remove_constant_addition (&XEXP (exp, 0)); | |
3749 | addval += remove_constant_addition (&XEXP (exp, 1)); | |
62ab1caf RK |
3750 | |
3751 | /* In case our parameter was constant, remove extra zero from the | |
3752 | expression. */ | |
0dd0e980 | 3753 | if (XEXP (exp, 0) == const0_rtx) |
e11e816e | 3754 | *x = XEXP (exp, 1); |
0dd0e980 | 3755 | else if (XEXP (exp, 1) == const0_rtx) |
e11e816e | 3756 | *x = XEXP (exp, 0); |
0dd0e980 | 3757 | } |
62ab1caf | 3758 | |
0dd0e980 JH |
3759 | return addval; |
3760 | } | |
3761 | ||
3762 | /* Attempt to identify accesses to arrays that are most likely to cause cache | |
3763 | misses, and emit prefetch instructions a few prefetch blocks forward. | |
3764 | ||
3765 | To detect the arrays we use the GIV information that was collected by the | |
3766 | strength reduction pass. | |
3767 | ||
3768 | The prefetch instructions are generated after the GIV information is done | |
3769 | and before the strength reduction process. The new GIVs are injected into | |
3770 | the strength reduction tables, so the prefetch addresses are optimized as | |
3771 | well. | |
3772 | ||
3773 | GIVs are split into base address, stride, and constant addition values. | |
3774 | GIVs with the same address, stride and close addition values are combined | |
3775 | into a single prefetch. Also writes to GIVs are detected, so that prefetch | |
3776 | for write instructions can be used for the block we write to, on machines | |
3777 | that support write prefetches. | |
3778 | ||
3779 | Several heuristics are used to determine when to prefetch. They are | |
3a79cccb | 3780 | controlled by defined symbols that can be overridden for each target. */ |
62ab1caf | 3781 | |
0dd0e980 | 3782 | static void |
0c20a65f | 3783 | emit_prefetch_instructions (struct loop *loop) |
0dd0e980 JH |
3784 | { |
3785 | int num_prefetches = 0; | |
3786 | int num_real_prefetches = 0; | |
3787 | int num_real_write_prefetches = 0; | |
62e6ca55 JJ |
3788 | int num_prefetches_before = 0; |
3789 | int num_write_prefetches_before = 0; | |
3790 | int ahead = 0; | |
0dd0e980 JH |
3791 | int i; |
3792 | struct iv_class *bl; | |
3793 | struct induction *iv; | |
3794 | struct prefetch_info info[MAX_PREFETCHES]; | |
3795 | struct loop_ivs *ivs = LOOP_IVS (loop); | |
3796 | ||
3797 | if (!HAVE_prefetch) | |
3798 | return; | |
3799 | ||
3800 | /* Consider only loops w/o calls. When a call is done, the loop is probably | |
3801 | slow enough to read the memory. */ | |
3802 | if (PREFETCH_NO_CALL && LOOP_INFO (loop)->has_call) | |
3803 | { | |
3804 | if (loop_dump_stream) | |
79a497cd | 3805 | fprintf (loop_dump_stream, "Prefetch: ignoring loop: has call.\n"); |
62ab1caf | 3806 | |
0dd0e980 JH |
3807 | return; |
3808 | } | |
3809 | ||
79a497cd | 3810 | /* Don't prefetch in loops known to have few iterations. */ |
0dd0e980 JH |
3811 | if (PREFETCH_NO_LOW_LOOPCNT |
3812 | && LOOP_INFO (loop)->n_iterations | |
3813 | && LOOP_INFO (loop)->n_iterations <= PREFETCH_LOW_LOOPCNT) | |
3814 | { | |
3815 | if (loop_dump_stream) | |
3816 | fprintf (loop_dump_stream, | |
79a497cd | 3817 | "Prefetch: ignoring loop: not enough iterations.\n"); |
0dd0e980 JH |
3818 | return; |
3819 | } | |
3820 | ||
3821 | /* Search all induction variables and pick those interesting for the prefetch | |
3822 | machinery. */ | |
3823 | for (bl = ivs->list; bl; bl = bl->next) | |
3824 | { | |
3825 | struct induction *biv = bl->biv, *biv1; | |
3826 | int basestride = 0; | |
3827 | ||
3828 | biv1 = biv; | |
62ab1caf | 3829 | |
0dd0e980 JH |
3830 | /* Expect all BIVs to be executed in each iteration. This makes our |
3831 | analysis more conservative. */ | |
3832 | while (biv1) | |
3833 | { | |
3834 | /* Discard non-constant additions that we can't handle well yet, and | |
3835 | BIVs that are executed multiple times; such BIVs ought to be | |
3836 | handled in the nested loop. We accept not_every_iteration BIVs, | |
3837 | since these only result in larger strides and make our | |
79a497cd | 3838 | heuristics more conservative. */ |
0dd0e980 JH |
3839 | if (GET_CODE (biv->add_val) != CONST_INT) |
3840 | { | |
3841 | if (loop_dump_stream) | |
3842 | { | |
62ab1caf | 3843 | fprintf (loop_dump_stream, |
79a497cd | 3844 | "Prefetch: ignoring biv %d: non-constant addition at insn %d:", |
0dd0e980 JH |
3845 | REGNO (biv->src_reg), INSN_UID (biv->insn)); |
3846 | print_rtl (loop_dump_stream, biv->add_val); | |
3847 | fprintf (loop_dump_stream, "\n"); | |
3848 | } | |
3849 | break; | |
3850 | } | |
62ab1caf | 3851 | |
0dd0e980 JH |
3852 | if (biv->maybe_multiple) |
3853 | { | |
3854 | if (loop_dump_stream) | |
3855 | { | |
62ab1caf | 3856 | fprintf (loop_dump_stream, |
79a497cd | 3857 | "Prefetch: ignoring biv %d: maybe_multiple at insn %i:", |
0dd0e980 JH |
3858 | REGNO (biv->src_reg), INSN_UID (biv->insn)); |
3859 | print_rtl (loop_dump_stream, biv->add_val); | |
3860 | fprintf (loop_dump_stream, "\n"); | |
3861 | } | |
3862 | break; | |
3863 | } | |
62ab1caf | 3864 | |
0dd0e980 JH |
3865 | basestride += INTVAL (biv1->add_val); |
3866 | biv1 = biv1->next_iv; | |
3867 | } | |
62ab1caf | 3868 | |
0dd0e980 JH |
3869 | if (biv1 || !basestride) |
3870 | continue; | |
62ab1caf | 3871 | |
0dd0e980 JH |
3872 | for (iv = bl->giv; iv; iv = iv->next_iv) |
3873 | { | |
3874 | rtx address; | |
3875 | rtx temp; | |
3876 | HOST_WIDE_INT index = 0; | |
3877 | int add = 1; | |
79a497cd JJ |
3878 | HOST_WIDE_INT stride = 0; |
3879 | int stride_sign = 1; | |
0dd0e980 | 3880 | struct check_store_data d; |
79a497cd | 3881 | const char *ignore_reason = NULL; |
0dd0e980 JH |
3882 | int size = GET_MODE_SIZE (GET_MODE (iv)); |
3883 | ||
79a497cd JJ |
3884 | /* See whether an induction variable is interesting to us and if |
3885 | not, report the reason. */ | |
3886 | if (iv->giv_type != DEST_ADDR) | |
3887 | ignore_reason = "giv is not a destination address"; | |
3888 | ||
3889 | /* We are interested only in constant stride memory references | |
3890 | in order to be able to compute density easily. */ | |
3891 | else if (GET_CODE (iv->mult_val) != CONST_INT) | |
3892 | ignore_reason = "stride is not constant"; | |
3893 | ||
3894 | else | |
3895 | { | |
3896 | stride = INTVAL (iv->mult_val) * basestride; | |
3897 | if (stride < 0) | |
62e6ca55 | 3898 | { |
79a497cd JJ |
3899 | stride = -stride; |
3900 | stride_sign = -1; | |
62e6ca55 | 3901 | } |
79a497cd JJ |
3902 | |
3903 | /* On some targets, reversed order prefetches are not | |
62e6ca55 | 3904 | worthwhile. */ |
79a497cd JJ |
3905 | if (PREFETCH_NO_REVERSE_ORDER && stride_sign < 0) |
3906 | ignore_reason = "reversed order stride"; | |
3907 | ||
3908 | /* Prefetch of accesses with an extreme stride might not be | |
62e6ca55 | 3909 | worthwhile, either. */ |
79a497cd JJ |
3910 | else if (PREFETCH_NO_EXTREME_STRIDE |
3911 | && stride > PREFETCH_EXTREME_STRIDE) | |
3912 | ignore_reason = "extreme stride"; | |
3913 | ||
62ab1caf | 3914 | /* Ignore GIVs with varying add values; we can't predict the |
62e6ca55 | 3915 | value for the next iteration. */ |
79a497cd JJ |
3916 | else if (!loop_invariant_p (loop, iv->add_val)) |
3917 | ignore_reason = "giv has varying add value"; | |
3918 | ||
62ab1caf | 3919 | /* Ignore GIVs in the nested loops; they ought to have been |
62e6ca55 | 3920 | handled already. */ |
79a497cd JJ |
3921 | else if (iv->maybe_multiple) |
3922 | ignore_reason = "giv is in nested loop"; | |
3923 | } | |
3924 | ||
3925 | if (ignore_reason != NULL) | |
0dd0e980 JH |
3926 | { |
3927 | if (loop_dump_stream) | |
79a497cd JJ |
3928 | fprintf (loop_dump_stream, |
3929 | "Prefetch: ignoring giv at %d: %s.\n", | |
3930 | INSN_UID (iv->insn), ignore_reason); | |
0dd0e980 JH |
3931 | continue; |
3932 | } | |
3933 | ||
3934 | /* Determine the pointer to the basic array we are examining. It is | |
3935 | the sum of the BIV's initial value and the GIV's add_val. */ | |
0dd0e980 JH |
3936 | address = copy_rtx (iv->add_val); |
3937 | temp = copy_rtx (bl->initial_value); | |
3938 | ||
3939 | address = simplify_gen_binary (PLUS, Pmode, temp, address); | |
3940 | index = remove_constant_addition (&address); | |
3941 | ||
0dd0e980 JH |
3942 | d.mem_write = 0; |
3943 | d.mem_address = *iv->location; | |
62ab1caf | 3944 | |
0dd0e980 JH |
3945 | /* When the GIV is not always executed, we might be better off by |
3946 | not dirtying the cache pages. */ | |
79a497cd | 3947 | if (PREFETCH_CONDITIONAL || iv->always_executed) |
0dd0e980 | 3948 | note_stores (PATTERN (iv->insn), check_store, &d); |
62e6ca55 JJ |
3949 | else |
3950 | { | |
3951 | if (loop_dump_stream) | |
3952 | fprintf (loop_dump_stream, "Prefetch: Ignoring giv at %d: %s\n", | |
3953 | INSN_UID (iv->insn), "in conditional code."); | |
3954 | continue; | |
3955 | } | |
0dd0e980 JH |
3956 | |
3957 | /* Attempt to find another prefetch to the same array and see if we | |
3958 | can merge this one. */ | |
3959 | for (i = 0; i < num_prefetches; i++) | |
3960 | if (rtx_equal_for_prefetch_p (address, info[i].base_address) | |
3961 | && stride == info[i].stride) | |
3962 | { | |
3963 | /* In case both access same array (same location | |
3964 | just with small difference in constant indexes), merge | |
3965 | the prefetches. Just do the later and the earlier will | |
3966 | get prefetched from previous iteration. | |
79a497cd | 3967 | The artificial threshold should not be too small, |
0dd0e980 JH |
3968 | but also not bigger than small portion of memory usually |
3969 | traversed by single loop. */ | |
79a497cd JJ |
3970 | if (index >= info[i].index |
3971 | && index - info[i].index < PREFETCH_EXTREME_DIFFERENCE) | |
0dd0e980 JH |
3972 | { |
3973 | info[i].write |= d.mem_write; | |
79a497cd | 3974 | info[i].bytes_accessed += size; |
0dd0e980 JH |
3975 | info[i].index = index; |
3976 | info[i].giv = iv; | |
3977 | info[i].class = bl; | |
3978 | info[num_prefetches].base_address = address; | |
3979 | add = 0; | |
3980 | break; | |
3981 | } | |
62ab1caf | 3982 | |
79a497cd JJ |
3983 | if (index < info[i].index |
3984 | && info[i].index - index < PREFETCH_EXTREME_DIFFERENCE) | |
0dd0e980 JH |
3985 | { |
3986 | info[i].write |= d.mem_write; | |
79a497cd | 3987 | info[i].bytes_accessed += size; |
0dd0e980 JH |
3988 | add = 0; |
3989 | break; | |
3990 | } | |
3991 | } | |
62ab1caf | 3992 | |
0dd0e980 JH |
3993 | /* Merging failed. */ |
3994 | if (add) | |
3995 | { | |
3996 | info[num_prefetches].giv = iv; | |
3997 | info[num_prefetches].class = bl; | |
3998 | info[num_prefetches].index = index; | |
3999 | info[num_prefetches].stride = stride; | |
4000 | info[num_prefetches].base_address = address; | |
4001 | info[num_prefetches].write = d.mem_write; | |
79a497cd | 4002 | info[num_prefetches].bytes_accessed = size; |
0dd0e980 JH |
4003 | num_prefetches++; |
4004 | if (num_prefetches >= MAX_PREFETCHES) | |
4005 | { | |
4006 | if (loop_dump_stream) | |
62ab1caf RK |
4007 | fprintf (loop_dump_stream, |
4008 | "Maximal number of prefetches exceeded.\n"); | |
0dd0e980 JH |
4009 | return; |
4010 | } | |
4011 | } | |
4012 | } | |
4013 | } | |
62ab1caf | 4014 | |
0dd0e980 JH |
4015 | for (i = 0; i < num_prefetches; i++) |
4016 | { | |
79a497cd JJ |
4017 | int density; |
4018 | ||
4019 | /* Attempt to calculate the total number of bytes fetched by all | |
4020 | iterations of the loop. Avoid overflow. */ | |
0dd0e980 | 4021 | if (LOOP_INFO (loop)->n_iterations |
62e6ca55 | 4022 | && ((unsigned HOST_WIDE_INT) (0xffffffff / info[i].stride) |
62ab1caf | 4023 | >= LOOP_INFO (loop)->n_iterations)) |
0dd0e980 JH |
4024 | info[i].total_bytes = info[i].stride * LOOP_INFO (loop)->n_iterations; |
4025 | else | |
4026 | info[i].total_bytes = 0xffffffff; | |
4027 | ||
79a497cd JJ |
4028 | density = info[i].bytes_accessed * 100 / info[i].stride; |
4029 | ||
4030 | /* Prefetch might be worthwhile only when the loads/stores are dense. */ | |
4031 | if (PREFETCH_ONLY_DENSE_MEM) | |
4032 | if (density * 256 > PREFETCH_DENSE_MEM * 100 | |
4033 | && (info[i].total_bytes / PREFETCH_BLOCK | |
62e6ca55 | 4034 | >= PREFETCH_BLOCKS_BEFORE_LOOP_MIN)) |
79a497cd JJ |
4035 | { |
4036 | info[i].prefetch_before_loop = 1; | |
4037 | info[i].prefetch_in_loop | |
4038 | = (info[i].total_bytes / PREFETCH_BLOCK | |
62e6ca55 | 4039 | > PREFETCH_BLOCKS_BEFORE_LOOP_MAX); |
79a497cd | 4040 | } |
62e6ca55 | 4041 | else |
79a497cd JJ |
4042 | { |
4043 | info[i].prefetch_in_loop = 0, info[i].prefetch_before_loop = 0; | |
4044 | if (loop_dump_stream) | |
4045 | fprintf (loop_dump_stream, | |
4046 | "Prefetch: ignoring giv at %d: %d%% density is too low.\n", | |
4047 | INSN_UID (info[i].giv->insn), density); | |
4048 | } | |
0dd0e980 | 4049 | else |
79a497cd | 4050 | info[i].prefetch_in_loop = 1, info[i].prefetch_before_loop = 1; |
0dd0e980 | 4051 | |
62e6ca55 JJ |
4052 | /* Find how many prefetch instructions we'll use within the loop. */ |
4053 | if (info[i].prefetch_in_loop != 0) | |
0dd0e980 | 4054 | { |
62e6ca55 | 4055 | info[i].prefetch_in_loop = ((info[i].stride + PREFETCH_BLOCK - 1) |
0dd0e980 | 4056 | / PREFETCH_BLOCK); |
62e6ca55 | 4057 | num_real_prefetches += info[i].prefetch_in_loop; |
0dd0e980 | 4058 | if (info[i].write) |
62e6ca55 | 4059 | num_real_write_prefetches += info[i].prefetch_in_loop; |
0dd0e980 JH |
4060 | } |
4061 | } | |
62ab1caf | 4062 | |
62e6ca55 JJ |
4063 | /* Determine how many iterations ahead to prefetch within the loop, based |
4064 | on how many prefetches we currently expect to do within the loop. */ | |
4065 | if (num_real_prefetches != 0) | |
4066 | { | |
4067 | if ((ahead = SIMULTANEOUS_PREFETCHES / num_real_prefetches) == 0) | |
4068 | { | |
4069 | if (loop_dump_stream) | |
4070 | fprintf (loop_dump_stream, | |
4071 | "Prefetch: ignoring prefetches within loop: ahead is zero; %d < %d\n", | |
4072 | SIMULTANEOUS_PREFETCHES, num_real_prefetches); | |
4073 | num_real_prefetches = 0, num_real_write_prefetches = 0; | |
4074 | } | |
4075 | } | |
4076 | /* We'll also use AHEAD to determine how many prefetch instructions to | |
4077 | emit before a loop, so don't leave it zero. */ | |
4078 | if (ahead == 0) | |
4079 | ahead = PREFETCH_BLOCKS_BEFORE_LOOP_MAX; | |
4080 | ||
4081 | for (i = 0; i < num_prefetches; i++) | |
0dd0e980 | 4082 | { |
62e6ca55 JJ |
4083 | /* Update if we've decided not to prefetch anything within the loop. */ |
4084 | if (num_real_prefetches == 0) | |
4085 | info[i].prefetch_in_loop = 0; | |
4086 | ||
4087 | /* Find how many prefetch instructions we'll use before the loop. */ | |
4088 | if (info[i].prefetch_before_loop != 0) | |
4089 | { | |
4090 | int n = info[i].total_bytes / PREFETCH_BLOCK; | |
4091 | if (n > ahead) | |
4092 | n = ahead; | |
4093 | info[i].prefetch_before_loop = n; | |
4094 | num_prefetches_before += n; | |
4095 | if (info[i].write) | |
4096 | num_write_prefetches_before += n; | |
4097 | } | |
4098 | ||
4099 | if (loop_dump_stream) | |
0dd0e980 | 4100 | { |
79a497cd JJ |
4101 | if (info[i].prefetch_in_loop == 0 |
4102 | && info[i].prefetch_before_loop == 0) | |
4103 | continue; | |
4104 | fprintf (loop_dump_stream, "Prefetch insn: %d", | |
0dd0e980 | 4105 | INSN_UID (info[i].giv->insn)); |
62ab1caf | 4106 | fprintf (loop_dump_stream, |
62e6ca55 JJ |
4107 | "; in loop: %d; before: %d; %s\n", |
4108 | info[i].prefetch_in_loop, | |
4109 | info[i].prefetch_before_loop, | |
79a497cd JJ |
4110 | info[i].write ? "read/write" : "read only"); |
4111 | fprintf (loop_dump_stream, | |
4112 | " density: %d%%; bytes_accessed: %u; total_bytes: %u\n", | |
4113 | (int) (info[i].bytes_accessed * 100 / info[i].stride), | |
4114 | info[i].bytes_accessed, info[i].total_bytes); | |
90ff44cf KG |
4115 | fprintf (loop_dump_stream, " index: " HOST_WIDE_INT_PRINT_DEC |
4116 | "; stride: " HOST_WIDE_INT_PRINT_DEC "; address: ", | |
4117 | info[i].index, info[i].stride); | |
79a497cd JJ |
4118 | print_rtl (loop_dump_stream, info[i].base_address); |
4119 | fprintf (loop_dump_stream, "\n"); | |
0dd0e980 | 4120 | } |
0dd0e980 JH |
4121 | } |
4122 | ||
62e6ca55 | 4123 | if (num_real_prefetches + num_prefetches_before > 0) |
79a497cd | 4124 | { |
62e6ca55 JJ |
4125 | /* Record that this loop uses prefetch instructions. */ |
4126 | LOOP_INFO (loop)->has_prefetch = 1; | |
4127 | ||
79a497cd | 4128 | if (loop_dump_stream) |
62e6ca55 JJ |
4129 | { |
4130 | fprintf (loop_dump_stream, "Real prefetches needed within loop: %d (write: %d)\n", | |
4131 | num_real_prefetches, num_real_write_prefetches); | |
4132 | fprintf (loop_dump_stream, "Real prefetches needed before loop: %d (write: %d)\n", | |
4133 | num_prefetches_before, num_write_prefetches_before); | |
4134 | } | |
79a497cd | 4135 | } |
62ab1caf | 4136 | |
0dd0e980 JH |
4137 | for (i = 0; i < num_prefetches; i++) |
4138 | { | |
62e6ca55 | 4139 | int y; |
62ab1caf | 4140 | |
62e6ca55 JJ |
4141 | for (y = 0; y < info[i].prefetch_in_loop; y++) |
4142 | { | |
4143 | rtx loc = copy_rtx (*info[i].giv->location); | |
4144 | rtx insn; | |
4145 | int bytes_ahead = PREFETCH_BLOCK * (ahead + y); | |
4146 | rtx before_insn = info[i].giv->insn; | |
4147 | rtx prev_insn = PREV_INSN (info[i].giv->insn); | |
ba4f7968 | 4148 | rtx seq; |
62e6ca55 JJ |
4149 | |
4150 | /* We can save some effort by offsetting the address on | |
4151 | architectures with offsettable memory references. */ | |
4152 | if (offsettable_address_p (0, VOIDmode, loc)) | |
4153 | loc = plus_constant (loc, bytes_ahead); | |
4154 | else | |
0dd0e980 | 4155 | { |
62e6ca55 JJ |
4156 | rtx reg = gen_reg_rtx (Pmode); |
4157 | loop_iv_add_mult_emit_before (loop, loc, const1_rtx, | |
0c20a65f AJ |
4158 | GEN_INT (bytes_ahead), reg, |
4159 | 0, before_insn); | |
62e6ca55 JJ |
4160 | loc = reg; |
4161 | } | |
0dd0e980 | 4162 | |
ba4f7968 | 4163 | start_sequence (); |
62e6ca55 JJ |
4164 | /* Make sure the address operand is valid for prefetch. */ |
4165 | if (! (*insn_data[(int)CODE_FOR_prefetch].operand[0].predicate) | |
4166 | (loc, insn_data[(int)CODE_FOR_prefetch].operand[0].mode)) | |
4167 | loc = force_reg (Pmode, loc); | |
ba4f7968 JH |
4168 | emit_insn (gen_prefetch (loc, GEN_INT (info[i].write), |
4169 | GEN_INT (3))); | |
2f937369 | 4170 | seq = get_insns (); |
ba4f7968 JH |
4171 | end_sequence (); |
4172 | emit_insn_before (seq, before_insn); | |
62e6ca55 JJ |
4173 | |
4174 | /* Check all insns emitted and record the new GIV | |
4175 | information. */ | |
4176 | insn = NEXT_INSN (prev_insn); | |
4177 | while (insn != before_insn) | |
4178 | { | |
4179 | insn = check_insn_for_givs (loop, insn, | |
4180 | info[i].giv->always_executed, | |
4181 | info[i].giv->maybe_multiple); | |
4182 | insn = NEXT_INSN (insn); | |
0dd0e980 JH |
4183 | } |
4184 | } | |
62ab1caf | 4185 | |
62e6ca55 | 4186 | if (PREFETCH_BEFORE_LOOP) |
0dd0e980 | 4187 | { |
62e6ca55 JJ |
4188 | /* Emit insns before the loop to fetch the first cache lines or, |
4189 | if we're not prefetching within the loop, everything we expect | |
4190 | to need. */ | |
4191 | for (y = 0; y < info[i].prefetch_before_loop; y++) | |
0dd0e980 JH |
4192 | { |
4193 | rtx reg = gen_reg_rtx (Pmode); | |
4194 | rtx loop_start = loop->start; | |
62e6ca55 | 4195 | rtx init_val = info[i].class->initial_value; |
0dd0e980 JH |
4196 | rtx add_val = simplify_gen_binary (PLUS, Pmode, |
4197 | info[i].giv->add_val, | |
4198 | GEN_INT (y * PREFETCH_BLOCK)); | |
62ab1caf | 4199 | |
62e6ca55 JJ |
4200 | /* Functions called by LOOP_IV_ADD_EMIT_BEFORE expect a |
4201 | non-constant INIT_VAL to have the same mode as REG, which | |
4202 | in this case we know to be Pmode. */ | |
4203 | if (GET_MODE (init_val) != Pmode && !CONSTANT_P (init_val)) | |
b0c42aed JH |
4204 | { |
4205 | rtx seq; | |
4206 | ||
4207 | start_sequence (); | |
4208 | init_val = convert_to_mode (Pmode, init_val, 0); | |
4209 | seq = get_insns (); | |
4210 | end_sequence (); | |
4211 | loop_insn_emit_before (loop, 0, loop_start, seq); | |
4212 | } | |
62e6ca55 | 4213 | loop_iv_add_mult_emit_before (loop, init_val, |
0dd0e980 | 4214 | info[i].giv->mult_val, |
62e6ca55 | 4215 | add_val, reg, 0, loop_start); |
0dd0e980 | 4216 | emit_insn_before (gen_prefetch (reg, GEN_INT (info[i].write), |
62ab1caf RK |
4217 | GEN_INT (3)), |
4218 | loop_start); | |
0dd0e980 JH |
4219 | } |
4220 | } | |
4221 | } | |
62ab1caf | 4222 | |
0dd0e980 JH |
4223 | return; |
4224 | } | |
4225 | \f | |
b4ad7b23 RS |
4226 | /* A "basic induction variable" or biv is a pseudo reg that is set |
4227 | (within this loop) only by incrementing or decrementing it. */ | |
4228 | /* A "general induction variable" or giv is a pseudo reg whose | |
4229 | value is a linear function of a biv. */ | |
4230 | ||
4231 | /* Bivs are recognized by `basic_induction_var'; | |
45f97e2e | 4232 | Givs by `general_induction_var'. */ |
b4ad7b23 | 4233 | |
b4ad7b23 RS |
4234 | /* Communication with routines called via `note_stores'. */ |
4235 | ||
4236 | static rtx note_insn; | |
4237 | ||
cc2902df | 4238 | /* Dummy register to have nonzero DEST_REG for DEST_ADDR type givs. */ |
b4ad7b23 RS |
4239 | |
4240 | static rtx addr_placeholder; | |
4241 | ||
4242 | /* ??? Unfinished optimizations, and possible future optimizations, | |
4243 | for the strength reduction code. */ | |
4244 | ||
b4ad7b23 | 4245 | /* ??? The interaction of biv elimination, and recognition of 'constant' |
0f41302f | 4246 | bivs, may cause problems. */ |
b4ad7b23 RS |
4247 | |
4248 | /* ??? Add heuristics so that DEST_ADDR strength reduction does not cause | |
4249 | performance problems. | |
4250 | ||
4251 | Perhaps don't eliminate things that can be combined with an addressing | |
4252 | mode. Find all givs that have the same biv, mult_val, and add_val; | |
4253 | then for each giv, check to see if its only use dies in a following | |
4254 | memory address. If so, generate a new memory address and check to see | |
4255 | if it is valid. If it is valid, then store the modified memory address, | |
4256 | otherwise, mark the giv as not done so that it will get its own iv. */ | |
4257 | ||
4258 | /* ??? Could try to optimize branches when it is known that a biv is always | |
4259 | positive. */ | |
4260 | ||
4261 | /* ??? When replace a biv in a compare insn, we should replace with closest | |
4262 | giv so that an optimized branch can still be recognized by the combiner, | |
4263 | e.g. the VAX acb insn. */ | |
4264 | ||
4265 | /* ??? Many of the checks involving uid_luid could be simplified if regscan | |
4266 | was rerun in loop_optimize whenever a register was added or moved. | |
4267 | Also, some of the optimizations could be a little less conservative. */ | |
4268 | \f | |
5e787f07 JH |
4269 | /* Scan the loop body and call FNCALL for each insn. In the addition to the |
4270 | LOOP and INSN parameters pass MAYBE_MULTIPLE and NOT_EVERY_ITERATION to the | |
4271 | callback. | |
e6fcb60d | 4272 | |
048c8616 EB |
4273 | NOT_EVERY_ITERATION is 1 if current insn is not known to be executed at |
4274 | least once for every loop iteration except for the last one. | |
5e787f07 JH |
4275 | |
4276 | MAYBE_MULTIPLE is 1 if current insn may be executed more than once for every | |
4277 | loop iteration. | |
4278 | */ | |
4279 | void | |
0c20a65f | 4280 | for_each_insn_in_loop (struct loop *loop, loop_insn_callback fncall) |
b4ad7b23 | 4281 | { |
b4ad7b23 | 4282 | int not_every_iteration = 0; |
7dcd3836 | 4283 | int maybe_multiple = 0; |
ae188a87 | 4284 | int past_loop_latch = 0; |
5ea7a4ae | 4285 | int loop_depth = 0; |
5e787f07 | 4286 | rtx p; |
b4ad7b23 | 4287 | |
a2be868f | 4288 | /* If loop_scan_start points to the loop exit test, we have to be wary of |
5353610b | 4289 | subversive use of gotos inside expression statements. */ |
5e787f07 JH |
4290 | if (prev_nonnote_insn (loop->scan_start) != prev_nonnote_insn (loop->start)) |
4291 | maybe_multiple = back_branch_in_range_p (loop, loop->scan_start); | |
b4ad7b23 | 4292 | |
4b7e68e7 | 4293 | /* Scan through loop and update NOT_EVERY_ITERATION and MAYBE_MULTIPLE. */ |
5e787f07 | 4294 | for (p = next_insn_in_loop (loop, loop->scan_start); |
41a972a9 | 4295 | p != NULL_RTX; |
a2be868f | 4296 | p = next_insn_in_loop (loop, p)) |
b4ad7b23 | 4297 | { |
c35971c8 | 4298 | p = fncall (loop, p, not_every_iteration, maybe_multiple); |
b4ad7b23 | 4299 | |
7dcd3836 | 4300 | /* Past CODE_LABEL, we get to insns that may be executed multiple |
5e787f07 JH |
4301 | times. The only way we can be sure that they can't is if every |
4302 | jump insn between here and the end of the loop either | |
4303 | returns, exits the loop, is a jump to a location that is still | |
4304 | behind the label, or is a jump to the loop start. */ | |
7dcd3836 RK |
4305 | |
4306 | if (GET_CODE (p) == CODE_LABEL) | |
4307 | { | |
4308 | rtx insn = p; | |
4309 | ||
4310 | maybe_multiple = 0; | |
4311 | ||
4312 | while (1) | |
4313 | { | |
4314 | insn = NEXT_INSN (insn); | |
5e787f07 | 4315 | if (insn == loop->scan_start) |
7dcd3836 | 4316 | break; |
5e787f07 | 4317 | if (insn == loop->end) |
7dcd3836 | 4318 | { |
5e787f07 JH |
4319 | if (loop->top != 0) |
4320 | insn = loop->top; | |
7dcd3836 RK |
4321 | else |
4322 | break; | |
5e787f07 | 4323 | if (insn == loop->scan_start) |
7dcd3836 RK |
4324 | break; |
4325 | } | |
4326 | ||
4327 | if (GET_CODE (insn) == JUMP_INSN | |
4328 | && GET_CODE (PATTERN (insn)) != RETURN | |
7f1c097d | 4329 | && (!any_condjump_p (insn) |
7dcd3836 | 4330 | || (JUMP_LABEL (insn) != 0 |
5e787f07 JH |
4331 | && JUMP_LABEL (insn) != loop->scan_start |
4332 | && !loop_insn_first_p (p, JUMP_LABEL (insn))))) | |
8516af93 JW |
4333 | { |
4334 | maybe_multiple = 1; | |
4335 | break; | |
4336 | } | |
7dcd3836 RK |
4337 | } |
4338 | } | |
4339 | ||
8516af93 | 4340 | /* Past a jump, we get to insns for which we can't count |
5e787f07 | 4341 | on whether they will be executed during each iteration. */ |
8516af93 | 4342 | /* This code appears twice in strength_reduce. There is also similar |
5e787f07 | 4343 | code in scan_loop. */ |
8516af93 | 4344 | if (GET_CODE (p) == JUMP_INSN |
5e787f07 JH |
4345 | /* If we enter the loop in the middle, and scan around to the |
4346 | beginning, don't set not_every_iteration for that. | |
4347 | This can be any kind of jump, since we want to know if insns | |
4348 | will be executed if the loop is executed. */ | |
4349 | && !(JUMP_LABEL (p) == loop->top | |
048c8616 EB |
4350 | && ((NEXT_INSN (NEXT_INSN (p)) == loop->end |
4351 | && any_uncondjump_p (p)) | |
4352 | || (NEXT_INSN (p) == loop->end && any_condjump_p (p))))) | |
8516af93 JW |
4353 | { |
4354 | rtx label = 0; | |
4355 | ||
4356 | /* If this is a jump outside the loop, then it also doesn't | |
4357 | matter. Check to see if the target of this branch is on the | |
a2be868f | 4358 | loop->exits_labels list. */ |
5e787f07 | 4359 | |
0534b804 | 4360 | for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label)) |
8516af93 JW |
4361 | if (XEXP (label, 0) == JUMP_LABEL (p)) |
4362 | break; | |
4363 | ||
5e787f07 | 4364 | if (!label) |
8516af93 JW |
4365 | not_every_iteration = 1; |
4366 | } | |
b4ad7b23 | 4367 | |
5ea7a4ae JW |
4368 | else if (GET_CODE (p) == NOTE) |
4369 | { | |
4370 | /* At the virtual top of a converted loop, insns are again known to | |
4371 | be executed each iteration: logically, the loop begins here | |
5f3db57e JL |
4372 | even though the exit code has been duplicated. |
4373 | ||
4374 | Insns are also again known to be executed each iteration at | |
4375 | the LOOP_CONT note. */ | |
4376 | if ((NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP | |
4377 | || NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_CONT) | |
4378 | && loop_depth == 0) | |
5ea7a4ae JW |
4379 | not_every_iteration = 0; |
4380 | else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG) | |
4381 | loop_depth++; | |
4382 | else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END) | |
4383 | loop_depth--; | |
4384 | } | |
b4ad7b23 | 4385 | |
ae188a87 | 4386 | /* Note if we pass a loop latch. If we do, then we can not clear |
5e787f07 JH |
4387 | NOT_EVERY_ITERATION below when we pass the last CODE_LABEL in |
4388 | a loop since a jump before the last CODE_LABEL may have started | |
4389 | a new loop iteration. | |
4390 | ||
4391 | Note that LOOP_TOP is only set for rotated loops and we need | |
4392 | this check for all loops, so compare against the CODE_LABEL | |
4393 | which immediately follows LOOP_START. */ | |
4394 | if (GET_CODE (p) == JUMP_INSN | |
4395 | && JUMP_LABEL (p) == NEXT_INSN (loop->start)) | |
ae188a87 JL |
4396 | past_loop_latch = 1; |
4397 | ||
b4ad7b23 | 4398 | /* Unlike in the code motion pass where MAYBE_NEVER indicates that |
5e787f07 JH |
4399 | an insn may never be executed, NOT_EVERY_ITERATION indicates whether |
4400 | or not an insn is known to be executed each iteration of the | |
4401 | loop, whether or not any iterations are known to occur. | |
b4ad7b23 | 4402 | |
5e787f07 JH |
4403 | Therefore, if we have just passed a label and have no more labels |
4404 | between here and the test insn of the loop, and we have not passed | |
4405 | a jump to the top of the loop, then we know these insns will be | |
4406 | executed each iteration. */ | |
b4ad7b23 | 4407 | |
5e787f07 JH |
4408 | if (not_every_iteration |
4409 | && !past_loop_latch | |
ae188a87 | 4410 | && GET_CODE (p) == CODE_LABEL |
5e787f07 JH |
4411 | && no_labels_between_p (p, loop->end) |
4412 | && loop_insn_first_p (p, loop->cont)) | |
b4ad7b23 RS |
4413 | not_every_iteration = 0; |
4414 | } | |
5e787f07 JH |
4415 | } |
4416 | \f | |
5e787f07 | 4417 | static void |
0c20a65f | 4418 | loop_bivs_find (struct loop *loop) |
5e787f07 | 4419 | { |
1ecd860b | 4420 | struct loop_regs *regs = LOOP_REGS (loop); |
ed5bb68d | 4421 | struct loop_ivs *ivs = LOOP_IVS (loop); |
14be28e5 | 4422 | /* Temporary list pointers for traversing ivs->list. */ |
5e787f07 | 4423 | struct iv_class *bl, **backbl; |
5e787f07 | 4424 | |
14be28e5 | 4425 | ivs->list = 0; |
5e787f07 | 4426 | |
5e787f07 | 4427 | for_each_insn_in_loop (loop, check_insn_for_bivs); |
6b8c9327 | 4428 | |
14be28e5 | 4429 | /* Scan ivs->list to remove all regs that proved not to be bivs. |
1ecd860b | 4430 | Make a sanity check against regs->n_times_set. */ |
14be28e5 | 4431 | for (backbl = &ivs->list, bl = *backbl; bl; bl = bl->next) |
b4ad7b23 | 4432 | { |
ed5bb68d | 4433 | if (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT |
b4ad7b23 RS |
4434 | /* Above happens if register modified by subreg, etc. */ |
4435 | /* Make sure it is not recognized as a basic induction var: */ | |
f1d4ac80 | 4436 | || regs->array[bl->regno].n_times_set != bl->biv_count |
b4ad7b23 RS |
4437 | /* If never incremented, it is invariant that we decided not to |
4438 | move. So leave it alone. */ | |
4439 | || ! bl->incremented) | |
4440 | { | |
4441 | if (loop_dump_stream) | |
c804f3f8 | 4442 | fprintf (loop_dump_stream, "Biv %d: discarded, %s\n", |
b4ad7b23 | 4443 | bl->regno, |
ed5bb68d | 4444 | (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT |
b4ad7b23 RS |
4445 | ? "not induction variable" |
4446 | : (! bl->incremented ? "never incremented" | |
4447 | : "count error"))); | |
e6fcb60d | 4448 | |
ed5bb68d | 4449 | REG_IV_TYPE (ivs, bl->regno) = NOT_BASIC_INDUCT; |
b4ad7b23 RS |
4450 | *backbl = bl->next; |
4451 | } | |
4452 | else | |
4453 | { | |
4454 | backbl = &bl->next; | |
4455 | ||
4456 | if (loop_dump_stream) | |
c804f3f8 | 4457 | fprintf (loop_dump_stream, "Biv %d: verified\n", bl->regno); |
b4ad7b23 RS |
4458 | } |
4459 | } | |
6ec73c7c | 4460 | } |
b4ad7b23 | 4461 | |
b4ad7b23 | 4462 | |
4912a07c | 4463 | /* Determine how BIVS are initialized by looking through pre-header |
6ec73c7c MH |
4464 | extended basic block. */ |
4465 | static void | |
0c20a65f | 4466 | loop_bivs_init_find (struct loop *loop) |
6ec73c7c | 4467 | { |
6ec73c7c | 4468 | struct loop_ivs *ivs = LOOP_IVS (loop); |
14be28e5 | 4469 | /* Temporary list pointers for traversing ivs->list. */ |
6ec73c7c | 4470 | struct iv_class *bl; |
e304a8e6 MH |
4471 | int call_seen; |
4472 | rtx p; | |
b4ad7b23 RS |
4473 | |
4474 | /* Find initial value for each biv by searching backwards from loop_start, | |
4475 | halting at first label. Also record any test condition. */ | |
4476 | ||
4477 | call_seen = 0; | |
e304a8e6 | 4478 | for (p = loop->start; p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p)) |
b4ad7b23 | 4479 | { |
e304a8e6 MH |
4480 | rtx test; |
4481 | ||
b4ad7b23 RS |
4482 | note_insn = p; |
4483 | ||
4484 | if (GET_CODE (p) == CALL_INSN) | |
4485 | call_seen = 1; | |
4486 | ||
ce7de04c | 4487 | if (INSN_P (p)) |
ed5bb68d | 4488 | note_stores (PATTERN (p), record_initial, ivs); |
b4ad7b23 RS |
4489 | |
4490 | /* Record any test of a biv that branches around the loop if no store | |
4491 | between it and the start of loop. We only care about tests with | |
4492 | constants and registers and only certain of those. */ | |
4493 | if (GET_CODE (p) == JUMP_INSN | |
4494 | && JUMP_LABEL (p) != 0 | |
e304a8e6 | 4495 | && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop->end) |
0534b804 | 4496 | && (test = get_condition_for_loop (loop, p)) != 0 |
b4ad7b23 RS |
4497 | && GET_CODE (XEXP (test, 0)) == REG |
4498 | && REGNO (XEXP (test, 0)) < max_reg_before_loop | |
8b634749 | 4499 | && (bl = REG_IV_CLASS (ivs, REGNO (XEXP (test, 0)))) != 0 |
e304a8e6 | 4500 | && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop->start) |
b4ad7b23 RS |
4501 | && bl->init_insn == 0) |
4502 | { | |
4503 | /* If an NE test, we have an initial value! */ | |
4504 | if (GET_CODE (test) == NE) | |
4505 | { | |
4506 | bl->init_insn = p; | |
38a448ca RH |
4507 | bl->init_set = gen_rtx_SET (VOIDmode, |
4508 | XEXP (test, 0), XEXP (test, 1)); | |
b4ad7b23 RS |
4509 | } |
4510 | else | |
4511 | bl->initial_test = test; | |
4512 | } | |
4513 | } | |
6ec73c7c MH |
4514 | } |
4515 | ||
4516 | ||
4517 | /* Look at the each biv and see if we can say anything better about its | |
4518 | initial value from any initializing insns set up above. (This is done | |
4519 | in two passes to avoid missing SETs in a PARALLEL.) */ | |
4520 | static void | |
0c20a65f | 4521 | loop_bivs_check (struct loop *loop) |
6ec73c7c MH |
4522 | { |
4523 | struct loop_ivs *ivs = LOOP_IVS (loop); | |
14be28e5 | 4524 | /* Temporary list pointers for traversing ivs->list. */ |
6ec73c7c MH |
4525 | struct iv_class *bl; |
4526 | struct iv_class **backbl; | |
b4ad7b23 | 4527 | |
14be28e5 | 4528 | for (backbl = &ivs->list; (bl = *backbl); backbl = &bl->next) |
b4ad7b23 RS |
4529 | { |
4530 | rtx src; | |
956d6950 | 4531 | rtx note; |
b4ad7b23 RS |
4532 | |
4533 | if (! bl->init_insn) | |
4534 | continue; | |
4535 | ||
956d6950 JL |
4536 | /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value |
4537 | is a constant, use the value of that. */ | |
4538 | if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL | |
4539 | && CONSTANT_P (XEXP (note, 0))) | |
4540 | || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL | |
4541 | && CONSTANT_P (XEXP (note, 0)))) | |
4542 | src = XEXP (note, 0); | |
4543 | else | |
4544 | src = SET_SRC (bl->init_set); | |
b4ad7b23 RS |
4545 | |
4546 | if (loop_dump_stream) | |
4547 | fprintf (loop_dump_stream, | |
c804f3f8 | 4548 | "Biv %d: initialized at insn %d: initial value ", |
b4ad7b23 RS |
4549 | bl->regno, INSN_UID (bl->init_insn)); |
4550 | ||
43a674af JW |
4551 | if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno]) |
4552 | || GET_MODE (src) == VOIDmode) | |
6b8c9327 AJ |
4553 | && valid_initial_value_p (src, bl->init_insn, |
4554 | LOOP_INFO (loop)->pre_header_has_call, | |
e304a8e6 | 4555 | loop->start)) |
b4ad7b23 RS |
4556 | { |
4557 | bl->initial_value = src; | |
4558 | ||
4559 | if (loop_dump_stream) | |
4560 | { | |
c804f3f8 MH |
4561 | print_simple_rtl (loop_dump_stream, src); |
4562 | fputc ('\n', loop_dump_stream); | |
b4ad7b23 RS |
4563 | } |
4564 | } | |
b4f75276 | 4565 | /* If we can't make it a giv, |
6ec73c7c | 4566 | let biv keep initial value of "itself". */ |
b4f75276 BS |
4567 | else if (loop_dump_stream) |
4568 | fprintf (loop_dump_stream, "is complex\n"); | |
3ec2b590 | 4569 | } |
6ec73c7c | 4570 | } |
3ec2b590 | 4571 | |
b4ad7b23 | 4572 | |
6ec73c7c MH |
4573 | /* Search the loop for general induction variables. */ |
4574 | ||
4575 | static void | |
0c20a65f | 4576 | loop_givs_find (struct loop* loop) |
6ec73c7c | 4577 | { |
5e787f07 | 4578 | for_each_insn_in_loop (loop, check_insn_for_givs); |
6ec73c7c | 4579 | } |
b4ad7b23 | 4580 | |
b4ad7b23 | 4581 | |
6ec73c7c MH |
4582 | /* For each giv for which we still don't know whether or not it is |
4583 | replaceable, check to see if it is replaceable because its final value | |
6d2f8887 | 4584 | can be calculated. */ |
b4ad7b23 | 4585 | |
6ec73c7c | 4586 | static void |
0c20a65f | 4587 | loop_givs_check (struct loop *loop) |
6ec73c7c MH |
4588 | { |
4589 | struct loop_ivs *ivs = LOOP_IVS (loop); | |
4590 | struct iv_class *bl; | |
b4ad7b23 | 4591 | |
14be28e5 | 4592 | for (bl = ivs->list; bl; bl = bl->next) |
b4ad7b23 RS |
4593 | { |
4594 | struct induction *v; | |
4595 | ||
4596 | for (v = bl->giv; v; v = v->next_iv) | |
4597 | if (! v->replaceable && ! v->not_replaceable) | |
0534b804 | 4598 | check_final_value (loop, v); |
b4ad7b23 | 4599 | } |
6ec73c7c MH |
4600 | } |
4601 | ||
4602 | ||
cc2902df | 4603 | /* Return nonzero if it is possible to eliminate the biv BL provided |
e304a8e6 MH |
4604 | all givs are reduced. This is possible if either the reg is not |
4605 | used outside the loop, or we can compute what its final value will | |
4606 | be. */ | |
4607 | ||
4608 | static int | |
0c20a65f AJ |
4609 | loop_biv_eliminable_p (struct loop *loop, struct iv_class *bl, |
4610 | int threshold, int insn_count) | |
6ec73c7c | 4611 | { |
e304a8e6 MH |
4612 | /* For architectures with a decrement_and_branch_until_zero insn, |
4613 | don't do this if we put a REG_NONNEG note on the endtest for this | |
4614 | biv. */ | |
4615 | ||
4616 | #ifdef HAVE_decrement_and_branch_until_zero | |
4617 | if (bl->nonneg) | |
4618 | { | |
4619 | if (loop_dump_stream) | |
4620 | fprintf (loop_dump_stream, | |
4621 | "Cannot eliminate nonneg biv %d.\n", bl->regno); | |
4622 | return 0; | |
4623 | } | |
4624 | #endif | |
4625 | ||
4626 | /* Check that biv is used outside loop or if it has a final value. | |
4627 | Compare against bl->init_insn rather than loop->start. We aren't | |
4628 | concerned with any uses of the biv between init_insn and | |
4629 | loop->start since these won't be affected by the value of the biv | |
4630 | elsewhere in the function, so long as init_insn doesn't use the | |
4631 | biv itself. */ | |
6b8c9327 | 4632 | |
6ec73c7c MH |
4633 | if ((REGNO_LAST_LUID (bl->regno) < INSN_LUID (loop->end) |
4634 | && bl->init_insn | |
4635 | && INSN_UID (bl->init_insn) < max_uid_for_loop | |
4636 | && REGNO_FIRST_LUID (bl->regno) >= INSN_LUID (bl->init_insn) | |
6ec73c7c | 4637 | && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set))) |
e304a8e6 | 4638 | || (bl->final_value = final_biv_value (loop, bl))) |
6ec73c7c | 4639 | return maybe_eliminate_biv (loop, bl, 0, threshold, insn_count); |
6b8c9327 | 4640 | |
e304a8e6 MH |
4641 | if (loop_dump_stream) |
4642 | { | |
4643 | fprintf (loop_dump_stream, | |
4644 | "Cannot eliminate biv %d.\n", | |
4645 | bl->regno); | |
4646 | fprintf (loop_dump_stream, | |
4647 | "First use: insn %d, last use: insn %d.\n", | |
4648 | REGNO_FIRST_UID (bl->regno), | |
4649 | REGNO_LAST_UID (bl->regno)); | |
4650 | } | |
4651 | return 0; | |
4652 | } | |
4653 | ||
4654 | ||
4655 | /* Reduce each giv of BL that we have decided to reduce. */ | |
4656 | ||
4657 | static void | |
0c20a65f | 4658 | loop_givs_reduce (struct loop *loop, struct iv_class *bl) |
e304a8e6 MH |
4659 | { |
4660 | struct induction *v; | |
4661 | ||
4662 | for (v = bl->giv; v; v = v->next_iv) | |
4663 | { | |
4664 | struct induction *tv; | |
4665 | if (! v->ignore && v->same == 0) | |
4666 | { | |
4667 | int auto_inc_opt = 0; | |
6b8c9327 | 4668 | |
e304a8e6 MH |
4669 | /* If the code for derived givs immediately below has already |
4670 | allocated a new_reg, we must keep it. */ | |
4671 | if (! v->new_reg) | |
4672 | v->new_reg = gen_reg_rtx (v->mode); | |
6b8c9327 | 4673 | |
e304a8e6 MH |
4674 | #ifdef AUTO_INC_DEC |
4675 | /* If the target has auto-increment addressing modes, and | |
4676 | this is an address giv, then try to put the increment | |
4677 | immediately after its use, so that flow can create an | |
4678 | auto-increment addressing mode. */ | |
4679 | if (v->giv_type == DEST_ADDR && bl->biv_count == 1 | |
4680 | && bl->biv->always_executed && ! bl->biv->maybe_multiple | |
4681 | /* We don't handle reversed biv's because bl->biv->insn | |
4682 | does not have a valid INSN_LUID. */ | |
4683 | && ! bl->reversed | |
4684 | && v->always_executed && ! v->maybe_multiple | |
4685 | && INSN_UID (v->insn) < max_uid_for_loop) | |
4686 | { | |
4687 | /* If other giv's have been combined with this one, then | |
4688 | this will work only if all uses of the other giv's occur | |
4689 | before this giv's insn. This is difficult to check. | |
6b8c9327 | 4690 | |
e304a8e6 MH |
4691 | We simplify this by looking for the common case where |
4692 | there is one DEST_REG giv, and this giv's insn is the | |
4693 | last use of the dest_reg of that DEST_REG giv. If the | |
4694 | increment occurs after the address giv, then we can | |
4695 | perform the optimization. (Otherwise, the increment | |
4696 | would have to go before other_giv, and we would not be | |
4697 | able to combine it with the address giv to get an | |
4698 | auto-inc address.) */ | |
4699 | if (v->combined_with) | |
4700 | { | |
4701 | struct induction *other_giv = 0; | |
6b8c9327 | 4702 | |
e304a8e6 MH |
4703 | for (tv = bl->giv; tv; tv = tv->next_iv) |
4704 | if (tv->same == v) | |
4705 | { | |
4706 | if (other_giv) | |
4707 | break; | |
4708 | else | |
4709 | other_giv = tv; | |
4710 | } | |
4711 | if (! tv && other_giv | |
4712 | && REGNO (other_giv->dest_reg) < max_reg_before_loop | |
4713 | && (REGNO_LAST_UID (REGNO (other_giv->dest_reg)) | |
4714 | == INSN_UID (v->insn)) | |
4715 | && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn)) | |
4716 | auto_inc_opt = 1; | |
4717 | } | |
4718 | /* Check for case where increment is before the address | |
4719 | giv. Do this test in "loop order". */ | |
4720 | else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn) | |
4721 | && (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start) | |
4722 | || (INSN_LUID (bl->biv->insn) | |
4723 | > INSN_LUID (loop->scan_start)))) | |
4724 | || (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start) | |
4725 | && (INSN_LUID (loop->scan_start) | |
4726 | < INSN_LUID (bl->biv->insn)))) | |
4727 | auto_inc_opt = -1; | |
4728 | else | |
4729 | auto_inc_opt = 1; | |
6b8c9327 | 4730 | |
e304a8e6 MH |
4731 | #ifdef HAVE_cc0 |
4732 | { | |
4733 | rtx prev; | |
6b8c9327 | 4734 | |
e304a8e6 MH |
4735 | /* We can't put an insn immediately after one setting |
4736 | cc0, or immediately before one using cc0. */ | |
4737 | if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn))) | |
4738 | || (auto_inc_opt == -1 | |
4739 | && (prev = prev_nonnote_insn (v->insn)) != 0 | |
4740 | && INSN_P (prev) | |
4741 | && sets_cc0_p (PATTERN (prev)))) | |
4742 | auto_inc_opt = 0; | |
4743 | } | |
4744 | #endif | |
6b8c9327 | 4745 | |
e304a8e6 MH |
4746 | if (auto_inc_opt) |
4747 | v->auto_inc_opt = 1; | |
4748 | } | |
4749 | #endif | |
6b8c9327 | 4750 | |
e304a8e6 MH |
4751 | /* For each place where the biv is incremented, add an insn |
4752 | to increment the new, reduced reg for the giv. */ | |
4753 | for (tv = bl->biv; tv; tv = tv->next_iv) | |
4754 | { | |
4755 | rtx insert_before; | |
6b8c9327 | 4756 | |
c7d325c8 GN |
4757 | /* Skip if location is the same as a previous one. */ |
4758 | if (tv->same) | |
4759 | continue; | |
e304a8e6 | 4760 | if (! auto_inc_opt) |
2567406a | 4761 | insert_before = NEXT_INSN (tv->insn); |
e304a8e6 MH |
4762 | else if (auto_inc_opt == 1) |
4763 | insert_before = NEXT_INSN (v->insn); | |
4764 | else | |
4765 | insert_before = v->insn; | |
6b8c9327 | 4766 | |
e304a8e6 | 4767 | if (tv->mult_val == const1_rtx) |
96a45535 | 4768 | loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val, |
6b8c9327 | 4769 | v->new_reg, v->new_reg, |
96a45535 | 4770 | 0, insert_before); |
e304a8e6 MH |
4771 | else /* tv->mult_val == const0_rtx */ |
4772 | /* A multiply is acceptable here | |
4773 | since this is presumed to be seldom executed. */ | |
96a45535 | 4774 | loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val, |
6b8c9327 | 4775 | v->add_val, v->new_reg, |
96a45535 | 4776 | 0, insert_before); |
e304a8e6 | 4777 | } |
6b8c9327 | 4778 | |
e304a8e6 | 4779 | /* Add code at loop start to initialize giv's reduced reg. */ |
6b8c9327 | 4780 | |
96a45535 MH |
4781 | loop_iv_add_mult_hoist (loop, |
4782 | extend_value_for_giv (v, bl->initial_value), | |
4783 | v->mult_val, v->add_val, v->new_reg); | |
e304a8e6 MH |
4784 | } |
4785 | } | |
4786 | } | |
4787 | ||
4788 | ||
4789 | /* Check for givs whose first use is their definition and whose | |
4790 | last use is the definition of another giv. If so, it is likely | |
4791 | dead and should not be used to derive another giv nor to | |
4792 | eliminate a biv. */ | |
4793 | ||
4794 | static void | |
0c20a65f | 4795 | loop_givs_dead_check (struct loop *loop ATTRIBUTE_UNUSED, struct iv_class *bl) |
e304a8e6 MH |
4796 | { |
4797 | struct induction *v; | |
4798 | ||
4799 | for (v = bl->giv; v; v = v->next_iv) | |
4800 | { | |
4801 | if (v->ignore | |
4802 | || (v->same && v->same->ignore)) | |
4803 | continue; | |
6b8c9327 | 4804 | |
e304a8e6 MH |
4805 | if (v->giv_type == DEST_REG |
4806 | && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn)) | |
4807 | { | |
4808 | struct induction *v1; | |
6b8c9327 | 4809 | |
e304a8e6 MH |
4810 | for (v1 = bl->giv; v1; v1 = v1->next_iv) |
4811 | if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn)) | |
4812 | v->maybe_dead = 1; | |
4813 | } | |
4814 | } | |
4815 | } | |
4816 | ||
4817 | ||
4818 | static void | |
0c20a65f | 4819 | loop_givs_rescan (struct loop *loop, struct iv_class *bl, rtx *reg_map) |
e304a8e6 MH |
4820 | { |
4821 | struct induction *v; | |
4822 | ||
4823 | for (v = bl->giv; v; v = v->next_iv) | |
4824 | { | |
4825 | if (v->same && v->same->ignore) | |
4826 | v->ignore = 1; | |
6b8c9327 | 4827 | |
e304a8e6 MH |
4828 | if (v->ignore) |
4829 | continue; | |
6b8c9327 | 4830 | |
e304a8e6 MH |
4831 | /* Update expression if this was combined, in case other giv was |
4832 | replaced. */ | |
4833 | if (v->same) | |
4834 | v->new_reg = replace_rtx (v->new_reg, | |
4835 | v->same->dest_reg, v->same->new_reg); | |
6b8c9327 | 4836 | |
e304a8e6 MH |
4837 | /* See if this register is known to be a pointer to something. If |
4838 | so, see if we can find the alignment. First see if there is a | |
4839 | destination register that is a pointer. If so, this shares the | |
4840 | alignment too. Next see if we can deduce anything from the | |
4841 | computational information. If not, and this is a DEST_ADDR | |
4842 | giv, at least we know that it's a pointer, though we don't know | |
4843 | the alignment. */ | |
4844 | if (GET_CODE (v->new_reg) == REG | |
4845 | && v->giv_type == DEST_REG | |
4846 | && REG_POINTER (v->dest_reg)) | |
4847 | mark_reg_pointer (v->new_reg, | |
4848 | REGNO_POINTER_ALIGN (REGNO (v->dest_reg))); | |
4849 | else if (GET_CODE (v->new_reg) == REG | |
4850 | && REG_POINTER (v->src_reg)) | |
4851 | { | |
4852 | unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->src_reg)); | |
6b8c9327 | 4853 | |
e304a8e6 MH |
4854 | if (align == 0 |
4855 | || GET_CODE (v->add_val) != CONST_INT | |
4856 | || INTVAL (v->add_val) % (align / BITS_PER_UNIT) != 0) | |
4857 | align = 0; | |
6b8c9327 | 4858 | |
e304a8e6 MH |
4859 | mark_reg_pointer (v->new_reg, align); |
4860 | } | |
4861 | else if (GET_CODE (v->new_reg) == REG | |
4862 | && GET_CODE (v->add_val) == REG | |
4863 | && REG_POINTER (v->add_val)) | |
4864 | { | |
4865 | unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->add_val)); | |
6b8c9327 | 4866 | |
e304a8e6 MH |
4867 | if (align == 0 || GET_CODE (v->mult_val) != CONST_INT |
4868 | || INTVAL (v->mult_val) % (align / BITS_PER_UNIT) != 0) | |
4869 | align = 0; | |
6b8c9327 | 4870 | |
e304a8e6 MH |
4871 | mark_reg_pointer (v->new_reg, align); |
4872 | } | |
4873 | else if (GET_CODE (v->new_reg) == REG && v->giv_type == DEST_ADDR) | |
4874 | mark_reg_pointer (v->new_reg, 0); | |
6b8c9327 | 4875 | |
e304a8e6 MH |
4876 | if (v->giv_type == DEST_ADDR) |
4877 | /* Store reduced reg as the address in the memref where we found | |
4878 | this giv. */ | |
4879 | validate_change (v->insn, v->location, v->new_reg, 0); | |
4880 | else if (v->replaceable) | |
4881 | { | |
4882 | reg_map[REGNO (v->dest_reg)] = v->new_reg; | |
4883 | } | |
4884 | else | |
4885 | { | |
f1a73cfe | 4886 | rtx original_insn = v->insn; |
544823b6 | 4887 | rtx note; |
f1a73cfe | 4888 | |
e304a8e6 MH |
4889 | /* Not replaceable; emit an insn to set the original giv reg from |
4890 | the reduced giv, same as above. */ | |
f1a73cfe EB |
4891 | v->insn = loop_insn_emit_after (loop, 0, original_insn, |
4892 | gen_move_insn (v->dest_reg, | |
4893 | v->new_reg)); | |
4894 | ||
0c20a65f AJ |
4895 | /* The original insn may have a REG_EQUAL note. This note is |
4896 | now incorrect and may result in invalid substitutions later. | |
4897 | The original insn is dead, but may be part of a libcall | |
4898 | sequence, which doesn't seem worth the bother of handling. */ | |
4899 | note = find_reg_note (original_insn, REG_EQUAL, NULL_RTX); | |
4900 | if (note) | |
4901 | remove_note (original_insn, note); | |
e304a8e6 | 4902 | } |
6b8c9327 | 4903 | |
e304a8e6 MH |
4904 | /* When a loop is reversed, givs which depend on the reversed |
4905 | biv, and which are live outside the loop, must be set to their | |
4906 | correct final value. This insn is only needed if the giv is | |
4907 | not replaceable. The correct final value is the same as the | |
4908 | value that the giv starts the reversed loop with. */ | |
4909 | if (bl->reversed && ! v->replaceable) | |
6b8c9327 | 4910 | loop_iv_add_mult_sink (loop, |
96a45535 MH |
4911 | extend_value_for_giv (v, bl->initial_value), |
4912 | v->mult_val, v->add_val, v->dest_reg); | |
e304a8e6 | 4913 | else if (v->final_value) |
6b8c9327 | 4914 | loop_insn_sink_or_swim (loop, |
74411039 JH |
4915 | gen_load_of_final_value (v->dest_reg, |
4916 | v->final_value)); | |
6b8c9327 | 4917 | |
e304a8e6 MH |
4918 | if (loop_dump_stream) |
4919 | { | |
4920 | fprintf (loop_dump_stream, "giv at %d reduced to ", | |
4921 | INSN_UID (v->insn)); | |
c804f3f8 | 4922 | print_simple_rtl (loop_dump_stream, v->new_reg); |
e304a8e6 MH |
4923 | fprintf (loop_dump_stream, "\n"); |
4924 | } | |
4925 | } | |
4926 | } | |
4927 | ||
4928 | ||
4929 | static int | |
0c20a65f AJ |
4930 | loop_giv_reduce_benefit (struct loop *loop ATTRIBUTE_UNUSED, |
4931 | struct iv_class *bl, struct induction *v, | |
4932 | rtx test_reg) | |
e304a8e6 MH |
4933 | { |
4934 | int add_cost; | |
4935 | int benefit; | |
4936 | ||
4937 | benefit = v->benefit; | |
4938 | PUT_MODE (test_reg, v->mode); | |
4939 | add_cost = iv_add_mult_cost (bl->biv->add_val, v->mult_val, | |
4940 | test_reg, test_reg); | |
6b8c9327 | 4941 | |
e304a8e6 MH |
4942 | /* Reduce benefit if not replaceable, since we will insert a |
4943 | move-insn to replace the insn that calculates this giv. Don't do | |
4944 | this unless the giv is a user variable, since it will often be | |
4945 | marked non-replaceable because of the duplication of the exit | |
4946 | code outside the loop. In such a case, the copies we insert are | |
4947 | dead and will be deleted. So they don't have a cost. Similar | |
4948 | situations exist. */ | |
4949 | /* ??? The new final_[bg]iv_value code does a much better job of | |
4950 | finding replaceable giv's, and hence this code may no longer be | |
4951 | necessary. */ | |
4952 | if (! v->replaceable && ! bl->eliminable | |
4953 | && REG_USERVAR_P (v->dest_reg)) | |
4954 | benefit -= copy_cost; | |
6b8c9327 | 4955 | |
e304a8e6 MH |
4956 | /* Decrease the benefit to count the add-insns that we will insert |
4957 | to increment the reduced reg for the giv. ??? This can | |
4958 | overestimate the run-time cost of the additional insns, e.g. if | |
4959 | there are multiple basic blocks that increment the biv, but only | |
4960 | one of these blocks is executed during each iteration. There is | |
4961 | no good way to detect cases like this with the current structure | |
4962 | of the loop optimizer. This code is more accurate for | |
4963 | determining code size than run-time benefits. */ | |
4964 | benefit -= add_cost * bl->biv_count; | |
4965 | ||
4966 | /* Decide whether to strength-reduce this giv or to leave the code | |
4967 | unchanged (recompute it from the biv each time it is used). This | |
4968 | decision can be made independently for each giv. */ | |
4969 | ||
4970 | #ifdef AUTO_INC_DEC | |
4971 | /* Attempt to guess whether autoincrement will handle some of the | |
4972 | new add insns; if so, increase BENEFIT (undo the subtraction of | |
4973 | add_cost that was done above). */ | |
4974 | if (v->giv_type == DEST_ADDR | |
4975 | /* Increasing the benefit is risky, since this is only a guess. | |
4976 | Avoid increasing register pressure in cases where there would | |
4977 | be no other benefit from reducing this giv. */ | |
4978 | && benefit > 0 | |
4979 | && GET_CODE (v->mult_val) == CONST_INT) | |
4980 | { | |
616fde53 MH |
4981 | int size = GET_MODE_SIZE (GET_MODE (v->mem)); |
4982 | ||
e304a8e6 | 4983 | if (HAVE_POST_INCREMENT |
616fde53 | 4984 | && INTVAL (v->mult_val) == size) |
e304a8e6 MH |
4985 | benefit += add_cost * bl->biv_count; |
4986 | else if (HAVE_PRE_INCREMENT | |
616fde53 | 4987 | && INTVAL (v->mult_val) == size) |
e304a8e6 MH |
4988 | benefit += add_cost * bl->biv_count; |
4989 | else if (HAVE_POST_DECREMENT | |
616fde53 | 4990 | && -INTVAL (v->mult_val) == size) |
e304a8e6 MH |
4991 | benefit += add_cost * bl->biv_count; |
4992 | else if (HAVE_PRE_DECREMENT | |
616fde53 | 4993 | && -INTVAL (v->mult_val) == size) |
e304a8e6 MH |
4994 | benefit += add_cost * bl->biv_count; |
4995 | } | |
4996 | #endif | |
4997 | ||
4998 | return benefit; | |
6ec73c7c MH |
4999 | } |
5000 | ||
5001 | ||
b2735d9a MH |
5002 | /* Free IV structures for LOOP. */ |
5003 | ||
5004 | static void | |
0c20a65f | 5005 | loop_ivs_free (struct loop *loop) |
b2735d9a MH |
5006 | { |
5007 | struct loop_ivs *ivs = LOOP_IVS (loop); | |
5008 | struct iv_class *iv = ivs->list; | |
6b8c9327 | 5009 | |
b2735d9a MH |
5010 | free (ivs->regs); |
5011 | ||
5012 | while (iv) | |
5013 | { | |
5014 | struct iv_class *next = iv->next; | |
5015 | struct induction *induction; | |
5016 | struct induction *next_induction; | |
6b8c9327 | 5017 | |
b2735d9a MH |
5018 | for (induction = iv->biv; induction; induction = next_induction) |
5019 | { | |
5020 | next_induction = induction->next_iv; | |
5021 | free (induction); | |
5022 | } | |
5023 | for (induction = iv->giv; induction; induction = next_induction) | |
5024 | { | |
5025 | next_induction = induction->next_iv; | |
5026 | free (induction); | |
5027 | } | |
6b8c9327 | 5028 | |
b2735d9a MH |
5029 | free (iv); |
5030 | iv = next; | |
5031 | } | |
5032 | } | |
5033 | ||
5034 | ||
6ec73c7c MH |
5035 | /* Perform strength reduction and induction variable elimination. |
5036 | ||
5037 | Pseudo registers created during this function will be beyond the | |
f1d4ac80 MH |
5038 | last valid index in several tables including |
5039 | REGS->ARRAY[I].N_TIMES_SET and REGNO_LAST_UID. This does not cause a | |
5040 | problem here, because the added registers cannot be givs outside of | |
5041 | their loop, and hence will never be reconsidered. But scan_loop | |
5042 | must check regnos to make sure they are in bounds. */ | |
6ec73c7c MH |
5043 | |
5044 | static void | |
0c20a65f | 5045 | strength_reduce (struct loop *loop, int flags) |
6ec73c7c MH |
5046 | { |
5047 | struct loop_info *loop_info = LOOP_INFO (loop); | |
5048 | struct loop_regs *regs = LOOP_REGS (loop); | |
5049 | struct loop_ivs *ivs = LOOP_IVS (loop); | |
5050 | rtx p; | |
14be28e5 | 5051 | /* Temporary list pointer for traversing ivs->list. */ |
e304a8e6 | 5052 | struct iv_class *bl; |
6ec73c7c MH |
5053 | /* Ratio of extra register life span we can justify |
5054 | for saving an instruction. More if loop doesn't call subroutines | |
5055 | since in that case saving an insn makes more difference | |
5056 | and more registers are available. */ | |
5057 | /* ??? could set this to last value of threshold in move_movables */ | |
5058 | int threshold = (loop_info->has_call ? 1 : 2) * (3 + n_non_fixed_regs); | |
5059 | /* Map of pseudo-register replacements. */ | |
5060 | rtx *reg_map = NULL; | |
5061 | int reg_map_size; | |
6ec73c7c | 5062 | int unrolled_insn_copies = 0; |
6ec73c7c | 5063 | rtx test_reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1); |
28680540 | 5064 | int insn_count = count_insns_in_loop (loop); |
6ec73c7c MH |
5065 | |
5066 | addr_placeholder = gen_reg_rtx (Pmode); | |
5067 | ||
14be28e5 | 5068 | ivs->n_regs = max_reg_before_loop; |
703ad42b | 5069 | ivs->regs = xcalloc (ivs->n_regs, sizeof (struct iv)); |
6ec73c7c MH |
5070 | |
5071 | /* Find all BIVs in loop. */ | |
5072 | loop_bivs_find (loop); | |
5073 | ||
5074 | /* Exit if there are no bivs. */ | |
14be28e5 | 5075 | if (! ivs->list) |
6ec73c7c MH |
5076 | { |
5077 | /* Can still unroll the loop anyways, but indicate that there is no | |
5078 | strength reduction info available. */ | |
5079 | if (flags & LOOP_UNROLL) | |
96a45535 | 5080 | unroll_loop (loop, insn_count, 0); |
6ec73c7c | 5081 | |
b2735d9a MH |
5082 | loop_ivs_free (loop); |
5083 | return; | |
6ec73c7c MH |
5084 | } |
5085 | ||
4912a07c | 5086 | /* Determine how BIVS are initialized by looking through pre-header |
6ec73c7c MH |
5087 | extended basic block. */ |
5088 | loop_bivs_init_find (loop); | |
5089 | ||
5090 | /* Look at the each biv and see if we can say anything better about its | |
5091 | initial value from any initializing insns set up above. */ | |
5092 | loop_bivs_check (loop); | |
5093 | ||
5094 | /* Search the loop for general induction variables. */ | |
5095 | loop_givs_find (loop); | |
5096 | ||
5097 | /* Try to calculate and save the number of loop iterations. This is | |
5098 | set to zero if the actual number can not be calculated. This must | |
5099 | be called after all giv's have been identified, since otherwise it may | |
5100 | fail if the iteration variable is a giv. */ | |
5101 | loop_iterations (loop); | |
5102 | ||
0dd0e980 JH |
5103 | #ifdef HAVE_prefetch |
5104 | if (flags & LOOP_PREFETCH) | |
5105 | emit_prefetch_instructions (loop); | |
5106 | #endif | |
5107 | ||
6ec73c7c MH |
5108 | /* Now for each giv for which we still don't know whether or not it is |
5109 | replaceable, check to see if it is replaceable because its final value | |
5110 | can be calculated. This must be done after loop_iterations is called, | |
5111 | so that final_giv_value will work correctly. */ | |
5112 | loop_givs_check (loop); | |
b4ad7b23 RS |
5113 | |
5114 | /* Try to prove that the loop counter variable (if any) is always | |
5115 | nonnegative; if so, record that fact with a REG_NONNEG note | |
5116 | so that "decrement and branch until zero" insn can be used. */ | |
a2be868f | 5117 | check_dbra_loop (loop, insn_count); |
b4ad7b23 | 5118 | |
97ec0ad8 R |
5119 | /* Create reg_map to hold substitutions for replaceable giv regs. |
5120 | Some givs might have been made from biv increments, so look at | |
ed5bb68d | 5121 | ivs->reg_iv_type for a suitable size. */ |
14be28e5 | 5122 | reg_map_size = ivs->n_regs; |
703ad42b | 5123 | reg_map = xcalloc (reg_map_size, sizeof (rtx)); |
b4ad7b23 RS |
5124 | |
5125 | /* Examine each iv class for feasibility of strength reduction/induction | |
5126 | variable elimination. */ | |
5127 | ||
14be28e5 | 5128 | for (bl = ivs->list; bl; bl = bl->next) |
b4ad7b23 RS |
5129 | { |
5130 | struct induction *v; | |
5131 | int benefit; | |
6b8c9327 | 5132 | |
b4ad7b23 | 5133 | /* Test whether it will be possible to eliminate this biv |
6ec73c7c | 5134 | provided all givs are reduced. */ |
e304a8e6 | 5135 | bl->eliminable = loop_biv_eliminable_p (loop, bl, threshold, insn_count); |
b4ad7b23 | 5136 | |
97ebd24c JW |
5137 | /* This will be true at the end, if all givs which depend on this |
5138 | biv have been strength reduced. | |
5139 | We can't (currently) eliminate the biv unless this is so. */ | |
5140 | bl->all_reduced = 1; | |
5141 | ||
6ec73c7c | 5142 | /* Check each extension dependent giv in this class to see if its |
e8cb4873 | 5143 | root biv is safe from wrapping in the interior mode. */ |
03988cac | 5144 | check_ext_dependent_givs (loop, bl); |
e8cb4873 | 5145 | |
b4ad7b23 | 5146 | /* Combine all giv's for this iv_class. */ |
1ecd860b | 5147 | combine_givs (regs, bl); |
b4ad7b23 | 5148 | |
b4ad7b23 RS |
5149 | for (v = bl->giv; v; v = v->next_iv) |
5150 | { | |
5151 | struct induction *tv; | |
5152 | ||
5153 | if (v->ignore || v->same) | |
5154 | continue; | |
5155 | ||
e304a8e6 | 5156 | benefit = loop_giv_reduce_benefit (loop, bl, v, test_reg); |
b4ad7b23 RS |
5157 | |
5158 | /* If an insn is not to be strength reduced, then set its ignore | |
e304a8e6 | 5159 | flag, and clear bl->all_reduced. */ |
b4ad7b23 | 5160 | |
e6f6eb29 JW |
5161 | /* A giv that depends on a reversed biv must be reduced if it is |
5162 | used after the loop exit, otherwise, it would have the wrong | |
5163 | value after the loop exit. To make it simple, just reduce all | |
5164 | of such giv's whether or not we know they are used after the loop | |
5165 | exit. */ | |
5166 | ||
6ec73c7c MH |
5167 | if (! flag_reduce_all_givs |
5168 | && v->lifetime * threshold * benefit < insn_count | |
5169 | && ! bl->reversed) | |
b4ad7b23 RS |
5170 | { |
5171 | if (loop_dump_stream) | |
5172 | fprintf (loop_dump_stream, | |
5173 | "giv of insn %d not worth while, %d vs %d.\n", | |
5174 | INSN_UID (v->insn), | |
5175 | v->lifetime * threshold * benefit, insn_count); | |
5176 | v->ignore = 1; | |
e304a8e6 | 5177 | bl->all_reduced = 0; |
b4ad7b23 RS |
5178 | } |
5179 | else | |
5180 | { | |
5181 | /* Check that we can increment the reduced giv without a | |
5182 | multiply insn. If not, reject it. */ | |
5183 | ||
5184 | for (tv = bl->biv; tv; tv = tv->next_iv) | |
5185 | if (tv->mult_val == const1_rtx | |
5186 | && ! product_cheap_p (tv->add_val, v->mult_val)) | |
5187 | { | |
5188 | if (loop_dump_stream) | |
5189 | fprintf (loop_dump_stream, | |
5190 | "giv of insn %d: would need a multiply.\n", | |
5191 | INSN_UID (v->insn)); | |
5192 | v->ignore = 1; | |
e304a8e6 | 5193 | bl->all_reduced = 0; |
b4ad7b23 RS |
5194 | break; |
5195 | } | |
5196 | } | |
5197 | } | |
5198 | ||
8c354a41 R |
5199 | /* Check for givs whose first use is their definition and whose |
5200 | last use is the definition of another giv. If so, it is likely | |
5201 | dead and should not be used to derive another giv nor to | |
5202 | eliminate a biv. */ | |
e304a8e6 | 5203 | loop_givs_dead_check (loop, bl); |
8c354a41 | 5204 | |
b4ad7b23 | 5205 | /* Reduce each giv that we decided to reduce. */ |
e304a8e6 | 5206 | loop_givs_reduce (loop, bl); |
b4ad7b23 RS |
5207 | |
5208 | /* Rescan all givs. If a giv is the same as a giv not reduced, mark it | |
5209 | as not reduced. | |
e6fcb60d | 5210 | |
b4ad7b23 RS |
5211 | For each giv register that can be reduced now: if replaceable, |
5212 | substitute reduced reg wherever the old giv occurs; | |
8c354a41 | 5213 | else add new move insn "giv_reg = reduced_reg". */ |
96a45535 | 5214 | loop_givs_rescan (loop, bl, reg_map); |
b4ad7b23 RS |
5215 | |
5216 | /* All the givs based on the biv bl have been reduced if they | |
5217 | merit it. */ | |
5218 | ||
5219 | /* For each giv not marked as maybe dead that has been combined with a | |
5220 | second giv, clear any "maybe dead" mark on that second giv. | |
5221 | v->new_reg will either be or refer to the register of the giv it | |
5222 | combined with. | |
5223 | ||
e304a8e6 MH |
5224 | Doing this clearing avoids problems in biv elimination where |
5225 | a giv's new_reg is a complex value that can't be put in the | |
5226 | insn but the giv combined with (with a reg as new_reg) is | |
5227 | marked maybe_dead. Since the register will be used in either | |
5228 | case, we'd prefer it be used from the simpler giv. */ | |
b4ad7b23 RS |
5229 | |
5230 | for (v = bl->giv; v; v = v->next_iv) | |
5231 | if (! v->maybe_dead && v->same) | |
5232 | v->same->maybe_dead = 0; | |
5233 | ||
5234 | /* Try to eliminate the biv, if it is a candidate. | |
e304a8e6 | 5235 | This won't work if ! bl->all_reduced, |
b4ad7b23 RS |
5236 | since the givs we planned to use might not have been reduced. |
5237 | ||
e304a8e6 MH |
5238 | We have to be careful that we didn't initially think we could |
5239 | eliminate this biv because of a giv that we now think may be | |
5240 | dead and shouldn't be used as a biv replacement. | |
b4ad7b23 RS |
5241 | |
5242 | Also, there is the possibility that we may have a giv that looks | |
5243 | like it can be used to eliminate a biv, but the resulting insn | |
e6fcb60d | 5244 | isn't valid. This can happen, for example, on the 88k, where a |
b4ad7b23 | 5245 | JUMP_INSN can compare a register only with zero. Attempts to |
c5b7917e | 5246 | replace it with a compare with a constant will fail. |
b4ad7b23 RS |
5247 | |
5248 | Note that in cases where this call fails, we may have replaced some | |
5249 | of the occurrences of the biv with a giv, but no harm was done in | |
5250 | doing so in the rare cases where it can occur. */ | |
5251 | ||
e304a8e6 | 5252 | if (bl->all_reduced == 1 && bl->eliminable |
0534b804 | 5253 | && maybe_eliminate_biv (loop, bl, 1, threshold, insn_count)) |
b4ad7b23 RS |
5254 | { |
5255 | /* ?? If we created a new test to bypass the loop entirely, | |
5256 | or otherwise drop straight in, based on this test, then | |
5257 | we might want to rewrite it also. This way some later | |
5258 | pass has more hope of removing the initialization of this | |
0f41302f | 5259 | biv entirely. */ |
b4ad7b23 RS |
5260 | |
5261 | /* If final_value != 0, then the biv may be used after loop end | |
5262 | and we must emit an insn to set it just in case. | |
5263 | ||
5264 | Reversed bivs already have an insn after the loop setting their | |
5265 | value, so we don't need another one. We can't calculate the | |
0f41302f | 5266 | proper final value for such a biv here anyways. */ |
e304a8e6 | 5267 | if (bl->final_value && ! bl->reversed) |
74411039 JH |
5268 | loop_insn_sink_or_swim (loop, |
5269 | gen_load_of_final_value (bl->biv->dest_reg, | |
5270 | bl->final_value)); | |
b4ad7b23 | 5271 | |
b4ad7b23 RS |
5272 | if (loop_dump_stream) |
5273 | fprintf (loop_dump_stream, "Reg %d: biv eliminated\n", | |
5274 | bl->regno); | |
5275 | } | |
5c7f5a5f RH |
5276 | /* See above note wrt final_value. But since we couldn't eliminate |
5277 | the biv, we must set the value after the loop instead of before. */ | |
5278 | else if (bl->final_value && ! bl->reversed) | |
74411039 JH |
5279 | loop_insn_sink (loop, gen_load_of_final_value (bl->biv->dest_reg, |
5280 | bl->final_value)); | |
b4ad7b23 RS |
5281 | } |
5282 | ||
5283 | /* Go through all the instructions in the loop, making all the | |
5284 | register substitutions scheduled in REG_MAP. */ | |
5285 | ||
e304a8e6 | 5286 | for (p = loop->start; p != loop->end; p = NEXT_INSN (p)) |
b4ad7b23 | 5287 | if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN |
e6fcb60d | 5288 | || GET_CODE (p) == CALL_INSN) |
b4ad7b23 | 5289 | { |
97ec0ad8 R |
5290 | replace_regs (PATTERN (p), reg_map, reg_map_size, 0); |
5291 | replace_regs (REG_NOTES (p), reg_map, reg_map_size, 0); | |
da0c128e | 5292 | INSN_CODE (p) = -1; |
b4ad7b23 RS |
5293 | } |
5294 | ||
73049ebc MT |
5295 | if (loop_info->n_iterations > 0) |
5296 | { | |
5297 | /* When we completely unroll a loop we will likely not need the increment | |
5298 | of the loop BIV and we will not need the conditional branch at the | |
5299 | end of the loop. */ | |
5300 | unrolled_insn_copies = insn_count - 2; | |
5301 | ||
5302 | #ifdef HAVE_cc0 | |
5303 | /* When we completely unroll a loop on a HAVE_cc0 machine we will not | |
5304 | need the comparison before the conditional branch at the end of the | |
5305 | loop. */ | |
80b8e8de | 5306 | unrolled_insn_copies -= 1; |
73049ebc MT |
5307 | #endif |
5308 | ||
5309 | /* We'll need one copy for each loop iteration. */ | |
5310 | unrolled_insn_copies *= loop_info->n_iterations; | |
5311 | ||
5312 | /* A little slop to account for the ability to remove initialization | |
5313 | code, better CSE, and other secondary benefits of completely | |
5314 | unrolling some loops. */ | |
5315 | unrolled_insn_copies -= 1; | |
5316 | ||
5317 | /* Clamp the value. */ | |
5318 | if (unrolled_insn_copies < 0) | |
5319 | unrolled_insn_copies = 0; | |
5320 | } | |
e6fcb60d | 5321 | |
b4ad7b23 RS |
5322 | /* Unroll loops from within strength reduction so that we can use the |
5323 | induction variable information that strength_reduce has already | |
73049ebc MT |
5324 | collected. Always unroll loops that would be as small or smaller |
5325 | unrolled than when rolled. */ | |
1bf14ad7 | 5326 | if ((flags & LOOP_UNROLL) |
5e1afb11 | 5327 | || ((flags & LOOP_AUTO_UNROLL) |
6584b4aa | 5328 | && loop_info->n_iterations > 0 |
73049ebc | 5329 | && unrolled_insn_copies <= insn_count)) |
96a45535 | 5330 | unroll_loop (loop, insn_count, 1); |
b4ad7b23 | 5331 | |
5527bf14 RH |
5332 | #ifdef HAVE_doloop_end |
5333 | if (HAVE_doloop_end && (flags & LOOP_BCT) && flag_branch_on_count_reg) | |
5334 | doloop_optimize (loop); | |
5335 | #endif /* HAVE_doloop_end */ | |
8c660648 | 5336 | |
aa18f20e JH |
5337 | /* In case number of iterations is known, drop branch prediction note |
5338 | in the branch. Do that only in second loop pass, as loop unrolling | |
5339 | may change the number of iterations performed. */ | |
26b738be | 5340 | if (flags & LOOP_BCT) |
aa18f20e | 5341 | { |
26b738be RH |
5342 | unsigned HOST_WIDE_INT n |
5343 | = loop_info->n_iterations / loop_info->unroll_number; | |
5344 | if (n > 1) | |
7df98878 | 5345 | predict_insn (prev_nonnote_insn (loop->end), PRED_LOOP_ITERATIONS, |
26b738be | 5346 | REG_BR_PROB_BASE - REG_BR_PROB_BASE / n); |
aa18f20e JH |
5347 | } |
5348 | ||
b4ad7b23 RS |
5349 | if (loop_dump_stream) |
5350 | fprintf (loop_dump_stream, "\n"); | |
69ba6af3 | 5351 | |
b2735d9a | 5352 | loop_ivs_free (loop); |
4da896b2 MM |
5353 | if (reg_map) |
5354 | free (reg_map); | |
b4ad7b23 RS |
5355 | } |
5356 | \f | |
5e787f07 | 5357 | /*Record all basic induction variables calculated in the insn. */ |
82ee5e63 | 5358 | static rtx |
0c20a65f AJ |
5359 | check_insn_for_bivs (struct loop *loop, rtx p, int not_every_iteration, |
5360 | int maybe_multiple) | |
5e787f07 | 5361 | { |
ed5bb68d | 5362 | struct loop_ivs *ivs = LOOP_IVS (loop); |
5e787f07 JH |
5363 | rtx set; |
5364 | rtx dest_reg; | |
5365 | rtx inc_val; | |
5366 | rtx mult_val; | |
5367 | rtx *location; | |
5368 | ||
5369 | if (GET_CODE (p) == INSN | |
5370 | && (set = single_set (p)) | |
5371 | && GET_CODE (SET_DEST (set)) == REG) | |
5372 | { | |
5373 | dest_reg = SET_DEST (set); | |
5374 | if (REGNO (dest_reg) < max_reg_before_loop | |
5375 | && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER | |
ed5bb68d | 5376 | && REG_IV_TYPE (ivs, REGNO (dest_reg)) != NOT_BASIC_INDUCT) |
5e787f07 | 5377 | { |
5e787f07 JH |
5378 | if (basic_induction_var (loop, SET_SRC (set), |
5379 | GET_MODE (SET_SRC (set)), | |
5380 | dest_reg, p, &inc_val, &mult_val, | |
98d1cd45 | 5381 | &location)) |
5e787f07 JH |
5382 | { |
5383 | /* It is a possible basic induction variable. | |
5384 | Create and initialize an induction structure for it. */ | |
5385 | ||
703ad42b | 5386 | struct induction *v = xmalloc (sizeof (struct induction)); |
5e787f07 | 5387 | |
ed5bb68d | 5388 | record_biv (loop, v, p, dest_reg, inc_val, mult_val, location, |
98d1cd45 | 5389 | not_every_iteration, maybe_multiple); |
ed5bb68d | 5390 | REG_IV_TYPE (ivs, REGNO (dest_reg)) = BASIC_INDUCT; |
5e787f07 | 5391 | } |
86fee241 | 5392 | else if (REGNO (dest_reg) < ivs->n_regs) |
ed5bb68d | 5393 | REG_IV_TYPE (ivs, REGNO (dest_reg)) = NOT_BASIC_INDUCT; |
5e787f07 JH |
5394 | } |
5395 | } | |
82ee5e63 | 5396 | return p; |
5e787f07 JH |
5397 | } |
5398 | \f | |
e6fcb60d | 5399 | /* Record all givs calculated in the insn. |
5e787f07 JH |
5400 | A register is a giv if: it is only set once, it is a function of a |
5401 | biv and a constant (or invariant), and it is not a biv. */ | |
82ee5e63 | 5402 | static rtx |
0c20a65f AJ |
5403 | check_insn_for_givs (struct loop *loop, rtx p, int not_every_iteration, |
5404 | int maybe_multiple) | |
5e787f07 | 5405 | { |
1ecd860b | 5406 | struct loop_regs *regs = LOOP_REGS (loop); |
ed5bb68d | 5407 | |
5e787f07 JH |
5408 | rtx set; |
5409 | /* Look for a general induction variable in a register. */ | |
5410 | if (GET_CODE (p) == INSN | |
5411 | && (set = single_set (p)) | |
5412 | && GET_CODE (SET_DEST (set)) == REG | |
f1d4ac80 | 5413 | && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize) |
5e787f07 JH |
5414 | { |
5415 | rtx src_reg; | |
5416 | rtx dest_reg; | |
5417 | rtx add_val; | |
5418 | rtx mult_val; | |
e8cb4873 | 5419 | rtx ext_val; |
5e787f07 JH |
5420 | int benefit; |
5421 | rtx regnote = 0; | |
5422 | rtx last_consec_insn; | |
5423 | ||
5424 | dest_reg = SET_DEST (set); | |
5425 | if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER) | |
82ee5e63 | 5426 | return p; |
5e787f07 JH |
5427 | |
5428 | if (/* SET_SRC is a giv. */ | |
5429 | (general_induction_var (loop, SET_SRC (set), &src_reg, &add_val, | |
e8cb4873 | 5430 | &mult_val, &ext_val, 0, &benefit, VOIDmode) |
5e787f07 JH |
5431 | /* Equivalent expression is a giv. */ |
5432 | || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX)) | |
5433 | && general_induction_var (loop, XEXP (regnote, 0), &src_reg, | |
e8cb4873 | 5434 | &add_val, &mult_val, &ext_val, 0, |
01329426 | 5435 | &benefit, VOIDmode))) |
5e787f07 JH |
5436 | /* Don't try to handle any regs made by loop optimization. |
5437 | We have nothing on them in regno_first_uid, etc. */ | |
5438 | && REGNO (dest_reg) < max_reg_before_loop | |
5439 | /* Don't recognize a BASIC_INDUCT_VAR here. */ | |
5440 | && dest_reg != src_reg | |
5441 | /* This must be the only place where the register is set. */ | |
f1d4ac80 | 5442 | && (regs->array[REGNO (dest_reg)].n_times_set == 1 |
5e787f07 JH |
5443 | /* or all sets must be consecutive and make a giv. */ |
5444 | || (benefit = consec_sets_giv (loop, benefit, p, | |
5445 | src_reg, dest_reg, | |
e8cb4873 | 5446 | &add_val, &mult_val, &ext_val, |
5e787f07 JH |
5447 | &last_consec_insn)))) |
5448 | { | |
703ad42b | 5449 | struct induction *v = xmalloc (sizeof (struct induction)); |
5e787f07 JH |
5450 | |
5451 | /* If this is a library call, increase benefit. */ | |
5452 | if (find_reg_note (p, REG_RETVAL, NULL_RTX)) | |
5453 | benefit += libcall_benefit (p); | |
5454 | ||
5455 | /* Skip the consecutive insns, if there are any. */ | |
f1d4ac80 | 5456 | if (regs->array[REGNO (dest_reg)].n_times_set != 1) |
5e787f07 JH |
5457 | p = last_consec_insn; |
5458 | ||
5459 | record_giv (loop, v, p, src_reg, dest_reg, mult_val, add_val, | |
e8cb4873 | 5460 | ext_val, benefit, DEST_REG, not_every_iteration, |
505ddab6 | 5461 | maybe_multiple, (rtx*) 0); |
5e787f07 JH |
5462 | |
5463 | } | |
5464 | } | |
5465 | ||
5e787f07 | 5466 | /* Look for givs which are memory addresses. */ |
5e787f07 JH |
5467 | if (GET_CODE (p) == INSN) |
5468 | find_mem_givs (loop, PATTERN (p), p, not_every_iteration, | |
5469 | maybe_multiple); | |
5e787f07 JH |
5470 | |
5471 | /* Update the status of whether giv can derive other givs. This can | |
5472 | change when we pass a label or an insn that updates a biv. */ | |
5473 | if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN | |
e6fcb60d | 5474 | || GET_CODE (p) == CODE_LABEL) |
5e787f07 | 5475 | update_giv_derive (loop, p); |
82ee5e63 | 5476 | return p; |
5e787f07 JH |
5477 | } |
5478 | \f | |
b4ad7b23 RS |
5479 | /* Return 1 if X is a valid source for an initial value (or as value being |
5480 | compared against in an initial test). | |
5481 | ||
5482 | X must be either a register or constant and must not be clobbered between | |
5483 | the current insn and the start of the loop. | |
5484 | ||
5485 | INSN is the insn containing X. */ | |
5486 | ||
5487 | static int | |
0c20a65f | 5488 | valid_initial_value_p (rtx x, rtx insn, int call_seen, rtx loop_start) |
b4ad7b23 RS |
5489 | { |
5490 | if (CONSTANT_P (x)) | |
5491 | return 1; | |
5492 | ||
d45cf215 | 5493 | /* Only consider pseudos we know about initialized in insns whose luids |
b4ad7b23 RS |
5494 | we know. */ |
5495 | if (GET_CODE (x) != REG | |
5496 | || REGNO (x) >= max_reg_before_loop) | |
5497 | return 0; | |
5498 | ||
5499 | /* Don't use call-clobbered registers across a call which clobbers it. On | |
5500 | some machines, don't use any hard registers at all. */ | |
5501 | if (REGNO (x) < FIRST_PSEUDO_REGISTER | |
e9a25f70 JL |
5502 | && (SMALL_REGISTER_CLASSES |
5503 | || (call_used_regs[REGNO (x)] && call_seen))) | |
b4ad7b23 RS |
5504 | return 0; |
5505 | ||
5506 | /* Don't use registers that have been clobbered before the start of the | |
5507 | loop. */ | |
5508 | if (reg_set_between_p (x, insn, loop_start)) | |
5509 | return 0; | |
5510 | ||
5511 | return 1; | |
5512 | } | |
5513 | \f | |
5514 | /* Scan X for memory refs and check each memory address | |
5515 | as a possible giv. INSN is the insn whose pattern X comes from. | |
5516 | NOT_EVERY_ITERATION is 1 if the insn might not be executed during | |
c5c76735 | 5517 | every loop iteration. MAYBE_MULTIPLE is 1 if the insn might be executed |
3d042e77 | 5518 | more than once in each loop iteration. */ |
b4ad7b23 RS |
5519 | |
5520 | static void | |
0c20a65f AJ |
5521 | find_mem_givs (const struct loop *loop, rtx x, rtx insn, |
5522 | int not_every_iteration, int maybe_multiple) | |
b4ad7b23 | 5523 | { |
b3694847 SS |
5524 | int i, j; |
5525 | enum rtx_code code; | |
5526 | const char *fmt; | |
b4ad7b23 RS |
5527 | |
5528 | if (x == 0) | |
5529 | return; | |
5530 | ||
5531 | code = GET_CODE (x); | |
5532 | switch (code) | |
5533 | { | |
5534 | case REG: | |
5535 | case CONST_INT: | |
5536 | case CONST: | |
5537 | case CONST_DOUBLE: | |
5538 | case SYMBOL_REF: | |
5539 | case LABEL_REF: | |
5540 | case PC: | |
5541 | case CC0: | |
5542 | case ADDR_VEC: | |
5543 | case ADDR_DIFF_VEC: | |
5544 | case USE: | |
5545 | case CLOBBER: | |
5546 | return; | |
5547 | ||
5548 | case MEM: | |
5549 | { | |
5550 | rtx src_reg; | |
5551 | rtx add_val; | |
5552 | rtx mult_val; | |
e8cb4873 | 5553 | rtx ext_val; |
b4ad7b23 RS |
5554 | int benefit; |
5555 | ||
45f97e2e | 5556 | /* This code used to disable creating GIVs with mult_val == 1 and |
e6fcb60d | 5557 | add_val == 0. However, this leads to lost optimizations when |
45f97e2e | 5558 | it comes time to combine a set of related DEST_ADDR GIVs, since |
6d2f8887 | 5559 | this one would not be seen. */ |
b4ad7b23 | 5560 | |
0534b804 | 5561 | if (general_induction_var (loop, XEXP (x, 0), &src_reg, &add_val, |
e8cb4873 RH |
5562 | &mult_val, &ext_val, 1, &benefit, |
5563 | GET_MODE (x))) | |
b4ad7b23 RS |
5564 | { |
5565 | /* Found one; record it. */ | |
703ad42b | 5566 | struct induction *v = xmalloc (sizeof (struct induction)); |
b4ad7b23 | 5567 | |
0534b804 | 5568 | record_giv (loop, v, insn, src_reg, addr_placeholder, mult_val, |
e8cb4873 RH |
5569 | add_val, ext_val, benefit, DEST_ADDR, |
5570 | not_every_iteration, maybe_multiple, &XEXP (x, 0)); | |
b4ad7b23 | 5571 | |
099f0f3f | 5572 | v->mem = x; |
b4ad7b23 | 5573 | } |
b4ad7b23 | 5574 | } |
e9a25f70 JL |
5575 | return; |
5576 | ||
5577 | default: | |
5578 | break; | |
b4ad7b23 RS |
5579 | } |
5580 | ||
5581 | /* Recursively scan the subexpressions for other mem refs. */ | |
5582 | ||
5583 | fmt = GET_RTX_FORMAT (code); | |
5584 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) | |
5585 | if (fmt[i] == 'e') | |
0534b804 MH |
5586 | find_mem_givs (loop, XEXP (x, i), insn, not_every_iteration, |
5587 | maybe_multiple); | |
b4ad7b23 RS |
5588 | else if (fmt[i] == 'E') |
5589 | for (j = 0; j < XVECLEN (x, i); j++) | |
0534b804 MH |
5590 | find_mem_givs (loop, XVECEXP (x, i, j), insn, not_every_iteration, |
5591 | maybe_multiple); | |
b4ad7b23 RS |
5592 | } |
5593 | \f | |
5594 | /* Fill in the data about one biv update. | |
5595 | V is the `struct induction' in which we record the biv. (It is | |
5596 | allocated by the caller, with alloca.) | |
5597 | INSN is the insn that sets it. | |
5598 | DEST_REG is the biv's reg. | |
5599 | ||
5600 | MULT_VAL is const1_rtx if the biv is being incremented here, in which case | |
5601 | INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is | |
7dcd3836 RK |
5602 | being set to INC_VAL. |
5603 | ||
5604 | NOT_EVERY_ITERATION is nonzero if this biv update is not know to be | |
5605 | executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update | |
5606 | can be executed more than once per iteration. If MAYBE_MULTIPLE | |
5607 | and NOT_EVERY_ITERATION are both zero, we know that the biv update is | |
5608 | executed exactly once per iteration. */ | |
b4ad7b23 RS |
5609 | |
5610 | static void | |
0c20a65f AJ |
5611 | record_biv (struct loop *loop, struct induction *v, rtx insn, rtx dest_reg, |
5612 | rtx inc_val, rtx mult_val, rtx *location, | |
5613 | int not_every_iteration, int maybe_multiple) | |
b4ad7b23 | 5614 | { |
ed5bb68d | 5615 | struct loop_ivs *ivs = LOOP_IVS (loop); |
b4ad7b23 RS |
5616 | struct iv_class *bl; |
5617 | ||
5618 | v->insn = insn; | |
5619 | v->src_reg = dest_reg; | |
5620 | v->dest_reg = dest_reg; | |
5621 | v->mult_val = mult_val; | |
5622 | v->add_val = inc_val; | |
affd4f33 | 5623 | v->ext_dependent = NULL_RTX; |
3ec2b590 | 5624 | v->location = location; |
b4ad7b23 RS |
5625 | v->mode = GET_MODE (dest_reg); |
5626 | v->always_computable = ! not_every_iteration; | |
8516af93 | 5627 | v->always_executed = ! not_every_iteration; |
7dcd3836 | 5628 | v->maybe_multiple = maybe_multiple; |
c7d325c8 | 5629 | v->same = 0; |
b4ad7b23 RS |
5630 | |
5631 | /* Add this to the reg's iv_class, creating a class | |
5632 | if this is the first incrementation of the reg. */ | |
5633 | ||
8b634749 | 5634 | bl = REG_IV_CLASS (ivs, REGNO (dest_reg)); |
b4ad7b23 RS |
5635 | if (bl == 0) |
5636 | { | |
5637 | /* Create and initialize new iv_class. */ | |
5638 | ||
703ad42b | 5639 | bl = xmalloc (sizeof (struct iv_class)); |
b4ad7b23 RS |
5640 | |
5641 | bl->regno = REGNO (dest_reg); | |
5642 | bl->biv = 0; | |
5643 | bl->giv = 0; | |
5644 | bl->biv_count = 0; | |
5645 | bl->giv_count = 0; | |
5646 | ||
5647 | /* Set initial value to the reg itself. */ | |
5648 | bl->initial_value = dest_reg; | |
e304a8e6 | 5649 | bl->final_value = 0; |
c5b7917e | 5650 | /* We haven't seen the initializing insn yet */ |
b4ad7b23 RS |
5651 | bl->init_insn = 0; |
5652 | bl->init_set = 0; | |
5653 | bl->initial_test = 0; | |
5654 | bl->incremented = 0; | |
5655 | bl->eliminable = 0; | |
5656 | bl->nonneg = 0; | |
5657 | bl->reversed = 0; | |
b5d27be7 | 5658 | bl->total_benefit = 0; |
b4ad7b23 | 5659 | |
14be28e5 MH |
5660 | /* Add this class to ivs->list. */ |
5661 | bl->next = ivs->list; | |
5662 | ivs->list = bl; | |
b4ad7b23 RS |
5663 | |
5664 | /* Put it in the array of biv register classes. */ | |
8b634749 | 5665 | REG_IV_CLASS (ivs, REGNO (dest_reg)) = bl; |
b4ad7b23 | 5666 | } |
c7d325c8 GN |
5667 | else |
5668 | { | |
5669 | /* Check if location is the same as a previous one. */ | |
5670 | struct induction *induction; | |
5671 | for (induction = bl->biv; induction; induction = induction->next_iv) | |
5672 | if (location == induction->location) | |
5673 | { | |
5674 | v->same = induction; | |
5675 | break; | |
5676 | } | |
5677 | } | |
b4ad7b23 RS |
5678 | |
5679 | /* Update IV_CLASS entry for this biv. */ | |
5680 | v->next_iv = bl->biv; | |
5681 | bl->biv = v; | |
5682 | bl->biv_count++; | |
5683 | if (mult_val == const1_rtx) | |
5684 | bl->incremented = 1; | |
5685 | ||
5686 | if (loop_dump_stream) | |
c804f3f8 | 5687 | loop_biv_dump (v, loop_dump_stream, 0); |
b4ad7b23 RS |
5688 | } |
5689 | \f | |
5690 | /* Fill in the data about one giv. | |
5691 | V is the `struct induction' in which we record the giv. (It is | |
5692 | allocated by the caller, with alloca.) | |
5693 | INSN is the insn that sets it. | |
5694 | BENEFIT estimates the savings from deleting this insn. | |
5695 | TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed | |
5696 | into a register or is used as a memory address. | |
5697 | ||
5698 | SRC_REG is the biv reg which the giv is computed from. | |
5699 | DEST_REG is the giv's reg (if the giv is stored in a reg). | |
5700 | MULT_VAL and ADD_VAL are the coefficients used to compute the giv. | |
5701 | LOCATION points to the place where this giv's value appears in INSN. */ | |
5702 | ||
5703 | static void | |
0c20a65f AJ |
5704 | record_giv (const struct loop *loop, struct induction *v, rtx insn, |
5705 | rtx src_reg, rtx dest_reg, rtx mult_val, rtx add_val, | |
5706 | rtx ext_val, int benefit, enum g_types type, | |
5707 | int not_every_iteration, int maybe_multiple, rtx *location) | |
b4ad7b23 | 5708 | { |
ed5bb68d | 5709 | struct loop_ivs *ivs = LOOP_IVS (loop); |
b4ad7b23 RS |
5710 | struct induction *b; |
5711 | struct iv_class *bl; | |
5712 | rtx set = single_set (insn); | |
ce7de04c JH |
5713 | rtx temp; |
5714 | ||
3d042e77 | 5715 | /* Attempt to prove constantness of the values. Don't let simplify_rtx |
9b3bd424 | 5716 | undo the MULT canonicalization that we performed earlier. */ |
ce7de04c | 5717 | temp = simplify_rtx (add_val); |
9b3bd424 RH |
5718 | if (temp |
5719 | && ! (GET_CODE (add_val) == MULT | |
5720 | && GET_CODE (temp) == ASHIFT)) | |
ce7de04c | 5721 | add_val = temp; |
b4ad7b23 RS |
5722 | |
5723 | v->insn = insn; | |
5724 | v->src_reg = src_reg; | |
5725 | v->giv_type = type; | |
5726 | v->dest_reg = dest_reg; | |
5727 | v->mult_val = mult_val; | |
5728 | v->add_val = add_val; | |
affd4f33 | 5729 | v->ext_dependent = ext_val; |
b4ad7b23 RS |
5730 | v->benefit = benefit; |
5731 | v->location = location; | |
5732 | v->cant_derive = 0; | |
5733 | v->combined_with = 0; | |
c5c76735 | 5734 | v->maybe_multiple = maybe_multiple; |
b4ad7b23 RS |
5735 | v->maybe_dead = 0; |
5736 | v->derive_adjustment = 0; | |
5737 | v->same = 0; | |
5738 | v->ignore = 0; | |
5739 | v->new_reg = 0; | |
5740 | v->final_value = 0; | |
f415f7be | 5741 | v->same_insn = 0; |
8516af93 | 5742 | v->auto_inc_opt = 0; |
9ae8ffe7 JL |
5743 | v->unrolled = 0; |
5744 | v->shared = 0; | |
b4ad7b23 RS |
5745 | |
5746 | /* The v->always_computable field is used in update_giv_derive, to | |
5747 | determine whether a giv can be used to derive another giv. For a | |
5748 | DEST_REG giv, INSN computes a new value for the giv, so its value | |
5749 | isn't computable if INSN insn't executed every iteration. | |
5750 | However, for a DEST_ADDR giv, INSN merely uses the value of the giv; | |
5751 | it does not compute a new value. Hence the value is always computable | |
d45cf215 | 5752 | regardless of whether INSN is executed each iteration. */ |
b4ad7b23 RS |
5753 | |
5754 | if (type == DEST_ADDR) | |
5755 | v->always_computable = 1; | |
5756 | else | |
5757 | v->always_computable = ! not_every_iteration; | |
5758 | ||
8516af93 JW |
5759 | v->always_executed = ! not_every_iteration; |
5760 | ||
b4ad7b23 RS |
5761 | if (type == DEST_ADDR) |
5762 | { | |
5763 | v->mode = GET_MODE (*location); | |
5764 | v->lifetime = 1; | |
b4ad7b23 RS |
5765 | } |
5766 | else /* type == DEST_REG */ | |
5767 | { | |
5768 | v->mode = GET_MODE (SET_DEST (set)); | |
5769 | ||
b8056b46 | 5770 | v->lifetime = LOOP_REG_LIFETIME (loop, REGNO (dest_reg)); |
b4ad7b23 | 5771 | |
b4ad7b23 RS |
5772 | /* If the lifetime is zero, it means that this register is |
5773 | really a dead store. So mark this as a giv that can be | |
0f41302f | 5774 | ignored. This will not prevent the biv from being eliminated. */ |
b4ad7b23 RS |
5775 | if (v->lifetime == 0) |
5776 | v->ignore = 1; | |
5777 | ||
ed5bb68d MH |
5778 | REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT; |
5779 | REG_IV_INFO (ivs, REGNO (dest_reg)) = v; | |
b4ad7b23 RS |
5780 | } |
5781 | ||
5782 | /* Add the giv to the class of givs computed from one biv. */ | |
5783 | ||
8b634749 | 5784 | bl = REG_IV_CLASS (ivs, REGNO (src_reg)); |
b4ad7b23 RS |
5785 | if (bl) |
5786 | { | |
5787 | v->next_iv = bl->giv; | |
5788 | bl->giv = v; | |
5789 | /* Don't count DEST_ADDR. This is supposed to count the number of | |
5790 | insns that calculate givs. */ | |
5791 | if (type == DEST_REG) | |
5792 | bl->giv_count++; | |
5793 | bl->total_benefit += benefit; | |
5794 | } | |
5795 | else | |
5796 | /* Fatal error, biv missing for this giv? */ | |
5797 | abort (); | |
5798 | ||
5799 | if (type == DEST_ADDR) | |
3bdcef4d AJ |
5800 | { |
5801 | v->replaceable = 1; | |
5802 | v->not_replaceable = 0; | |
5803 | } | |
b4ad7b23 RS |
5804 | else |
5805 | { | |
5806 | /* The giv can be replaced outright by the reduced register only if all | |
5807 | of the following conditions are true: | |
6b8c9327 | 5808 | - the insn that sets the giv is always executed on any iteration |
b4ad7b23 RS |
5809 | on which the giv is used at all |
5810 | (there are two ways to deduce this: | |
5811 | either the insn is executed on every iteration, | |
5812 | or all uses follow that insn in the same basic block), | |
6b8c9327 | 5813 | - the giv is not used outside the loop |
b4ad7b23 RS |
5814 | - no assignments to the biv occur during the giv's lifetime. */ |
5815 | ||
b1f21e0a | 5816 | if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn) |
b4ad7b23 | 5817 | /* Previous line always fails if INSN was moved by loop opt. */ |
8529a489 | 5818 | && REGNO_LAST_LUID (REGNO (dest_reg)) |
0534b804 | 5819 | < INSN_LUID (loop->end) |
b4ad7b23 RS |
5820 | && (! not_every_iteration |
5821 | || last_use_this_basic_block (dest_reg, insn))) | |
e6fcb60d | 5822 | { |
b4ad7b23 RS |
5823 | /* Now check that there are no assignments to the biv within the |
5824 | giv's lifetime. This requires two separate checks. */ | |
5825 | ||
5826 | /* Check each biv update, and fail if any are between the first | |
5827 | and last use of the giv. | |
e6fcb60d | 5828 | |
b4ad7b23 RS |
5829 | If this loop contains an inner loop that was unrolled, then |
5830 | the insn modifying the biv may have been emitted by the loop | |
5831 | unrolling code, and hence does not have a valid luid. Just | |
5832 | mark the biv as not replaceable in this case. It is not very | |
5833 | useful as a biv, because it is used in two different loops. | |
5834 | It is very unlikely that we would be able to optimize the giv | |
5835 | using this biv anyways. */ | |
5836 | ||
5837 | v->replaceable = 1; | |
3bdcef4d | 5838 | v->not_replaceable = 0; |
b4ad7b23 RS |
5839 | for (b = bl->biv; b; b = b->next_iv) |
5840 | { | |
5841 | if (INSN_UID (b->insn) >= max_uid_for_loop | |
8529a489 MH |
5842 | || ((INSN_LUID (b->insn) |
5843 | >= REGNO_FIRST_LUID (REGNO (dest_reg))) | |
5844 | && (INSN_LUID (b->insn) | |
5845 | <= REGNO_LAST_LUID (REGNO (dest_reg))))) | |
b4ad7b23 RS |
5846 | { |
5847 | v->replaceable = 0; | |
5848 | v->not_replaceable = 1; | |
5849 | break; | |
e6fcb60d | 5850 | } |
b4ad7b23 RS |
5851 | } |
5852 | ||
5031afa7 JW |
5853 | /* If there are any backwards branches that go from after the |
5854 | biv update to before it, then this giv is not replaceable. */ | |
b4ad7b23 | 5855 | if (v->replaceable) |
5031afa7 | 5856 | for (b = bl->biv; b; b = b->next_iv) |
0534b804 | 5857 | if (back_branch_in_range_p (loop, b->insn)) |
5031afa7 JW |
5858 | { |
5859 | v->replaceable = 0; | |
5860 | v->not_replaceable = 1; | |
5861 | break; | |
5862 | } | |
b4ad7b23 RS |
5863 | } |
5864 | else | |
5865 | { | |
5866 | /* May still be replaceable, we don't have enough info here to | |
5867 | decide. */ | |
5868 | v->replaceable = 0; | |
5869 | v->not_replaceable = 0; | |
5870 | } | |
5871 | } | |
5872 | ||
45f97e2e RH |
5873 | /* Record whether the add_val contains a const_int, for later use by |
5874 | combine_givs. */ | |
5875 | { | |
5876 | rtx tem = add_val; | |
5877 | ||
5878 | v->no_const_addval = 1; | |
5879 | if (tem == const0_rtx) | |
5880 | ; | |
ce7de04c | 5881 | else if (CONSTANT_P (add_val)) |
45f97e2e | 5882 | v->no_const_addval = 0; |
ce7de04c | 5883 | if (GET_CODE (tem) == PLUS) |
45f97e2e | 5884 | { |
ce7de04c | 5885 | while (1) |
45f97e2e RH |
5886 | { |
5887 | if (GET_CODE (XEXP (tem, 0)) == PLUS) | |
5888 | tem = XEXP (tem, 0); | |
5889 | else if (GET_CODE (XEXP (tem, 1)) == PLUS) | |
5890 | tem = XEXP (tem, 1); | |
5891 | else | |
5892 | break; | |
5893 | } | |
ce7de04c JH |
5894 | if (CONSTANT_P (XEXP (tem, 1))) |
5895 | v->no_const_addval = 0; | |
45f97e2e RH |
5896 | } |
5897 | } | |
5898 | ||
b4ad7b23 | 5899 | if (loop_dump_stream) |
c804f3f8 | 5900 | loop_giv_dump (v, loop_dump_stream, 0); |
b4ad7b23 RS |
5901 | } |
5902 | ||
b4ad7b23 RS |
5903 | /* All this does is determine whether a giv can be made replaceable because |
5904 | its final value can be calculated. This code can not be part of record_giv | |
5905 | above, because final_giv_value requires that the number of loop iterations | |
5906 | be known, and that can not be accurately calculated until after all givs | |
5907 | have been identified. */ | |
5908 | ||
5909 | static void | |
0c20a65f | 5910 | check_final_value (const struct loop *loop, struct induction *v) |
b4ad7b23 | 5911 | { |
b4ad7b23 | 5912 | rtx final_value = 0; |
b4ad7b23 | 5913 | |
b4ad7b23 RS |
5914 | /* DEST_ADDR givs will never reach here, because they are always marked |
5915 | replaceable above in record_giv. */ | |
5916 | ||
5917 | /* The giv can be replaced outright by the reduced register only if all | |
5918 | of the following conditions are true: | |
5919 | - the insn that sets the giv is always executed on any iteration | |
5920 | on which the giv is used at all | |
5921 | (there are two ways to deduce this: | |
5922 | either the insn is executed on every iteration, | |
5923 | or all uses follow that insn in the same basic block), | |
5924 | - its final value can be calculated (this condition is different | |
5925 | than the one above in record_giv) | |
70dd0f7f | 5926 | - it's not used before the it's set |
b4ad7b23 RS |
5927 | - no assignments to the biv occur during the giv's lifetime. */ |
5928 | ||
5929 | #if 0 | |
5930 | /* This is only called now when replaceable is known to be false. */ | |
5931 | /* Clear replaceable, so that it won't confuse final_giv_value. */ | |
5932 | v->replaceable = 0; | |
5933 | #endif | |
5934 | ||
0534b804 | 5935 | if ((final_value = final_giv_value (loop, v)) |
045d7161 EB |
5936 | && (v->always_executed |
5937 | || last_use_this_basic_block (v->dest_reg, v->insn))) | |
b4ad7b23 | 5938 | { |
70dd0f7f | 5939 | int biv_increment_seen = 0, before_giv_insn = 0; |
b4ad7b23 RS |
5940 | rtx p = v->insn; |
5941 | rtx last_giv_use; | |
5942 | ||
5943 | v->replaceable = 1; | |
3bdcef4d | 5944 | v->not_replaceable = 0; |
b4ad7b23 RS |
5945 | |
5946 | /* When trying to determine whether or not a biv increment occurs | |
5947 | during the lifetime of the giv, we can ignore uses of the variable | |
5948 | outside the loop because final_value is true. Hence we can not | |
5949 | use regno_last_uid and regno_first_uid as above in record_giv. */ | |
5950 | ||
5951 | /* Search the loop to determine whether any assignments to the | |
5952 | biv occur during the giv's lifetime. Start with the insn | |
5953 | that sets the giv, and search around the loop until we come | |
5954 | back to that insn again. | |
5955 | ||
5956 | Also fail if there is a jump within the giv's lifetime that jumps | |
5957 | to somewhere outside the lifetime but still within the loop. This | |
5958 | catches spaghetti code where the execution order is not linear, and | |
5959 | hence the above test fails. Here we assume that the giv lifetime | |
5960 | does not extend from one iteration of the loop to the next, so as | |
5961 | to make the test easier. Since the lifetime isn't known yet, | |
5962 | this requires two loops. See also record_giv above. */ | |
5963 | ||
5964 | last_giv_use = v->insn; | |
5965 | ||
5966 | while (1) | |
5967 | { | |
5968 | p = NEXT_INSN (p); | |
0534b804 | 5969 | if (p == loop->end) |
70dd0f7f FS |
5970 | { |
5971 | before_giv_insn = 1; | |
5972 | p = NEXT_INSN (loop->start); | |
5973 | } | |
b4ad7b23 RS |
5974 | if (p == v->insn) |
5975 | break; | |
5976 | ||
5977 | if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN | |
5978 | || GET_CODE (p) == CALL_INSN) | |
5979 | { | |
8a09bb27 JW |
5980 | /* It is possible for the BIV increment to use the GIV if we |
5981 | have a cycle. Thus we must be sure to check each insn for | |
5982 | both BIV and GIV uses, and we must check for BIV uses | |
5983 | first. */ | |
5984 | ||
5985 | if (! biv_increment_seen | |
5986 | && reg_set_p (v->src_reg, PATTERN (p))) | |
5987 | biv_increment_seen = 1; | |
fd5d5b07 | 5988 | |
8a09bb27 | 5989 | if (reg_mentioned_p (v->dest_reg, PATTERN (p))) |
b4ad7b23 | 5990 | { |
70dd0f7f | 5991 | if (biv_increment_seen || before_giv_insn) |
b4ad7b23 RS |
5992 | { |
5993 | v->replaceable = 0; | |
5994 | v->not_replaceable = 1; | |
5995 | break; | |
5996 | } | |
8a09bb27 | 5997 | last_giv_use = p; |
b4ad7b23 | 5998 | } |
b4ad7b23 RS |
5999 | } |
6000 | } | |
e6fcb60d | 6001 | |
b4ad7b23 RS |
6002 | /* Now that the lifetime of the giv is known, check for branches |
6003 | from within the lifetime to outside the lifetime if it is still | |
6004 | replaceable. */ | |
6005 | ||
6006 | if (v->replaceable) | |
6007 | { | |
6008 | p = v->insn; | |
6009 | while (1) | |
6010 | { | |
6011 | p = NEXT_INSN (p); | |
0534b804 MH |
6012 | if (p == loop->end) |
6013 | p = NEXT_INSN (loop->start); | |
b4ad7b23 RS |
6014 | if (p == last_giv_use) |
6015 | break; | |
6016 | ||
6017 | if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) | |
6018 | && LABEL_NAME (JUMP_LABEL (p)) | |
1cb1fe66 | 6019 | && ((loop_insn_first_p (JUMP_LABEL (p), v->insn) |
0534b804 | 6020 | && loop_insn_first_p (loop->start, JUMP_LABEL (p))) |
1cb1fe66 | 6021 | || (loop_insn_first_p (last_giv_use, JUMP_LABEL (p)) |
0534b804 | 6022 | && loop_insn_first_p (JUMP_LABEL (p), loop->end)))) |
b4ad7b23 RS |
6023 | { |
6024 | v->replaceable = 0; | |
6025 | v->not_replaceable = 1; | |
6026 | ||
6027 | if (loop_dump_stream) | |
6028 | fprintf (loop_dump_stream, | |
6029 | "Found branch outside giv lifetime.\n"); | |
6030 | ||
6031 | break; | |
6032 | } | |
6033 | } | |
6034 | } | |
6035 | ||
6036 | /* If it is replaceable, then save the final value. */ | |
6037 | if (v->replaceable) | |
6038 | v->final_value = final_value; | |
6039 | } | |
6040 | ||
6041 | if (loop_dump_stream && v->replaceable) | |
6042 | fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n", | |
6043 | INSN_UID (v->insn), REGNO (v->dest_reg)); | |
6044 | } | |
6045 | \f | |
6046 | /* Update the status of whether a giv can derive other givs. | |
6047 | ||
6048 | We need to do something special if there is or may be an update to the biv | |
6049 | between the time the giv is defined and the time it is used to derive | |
6050 | another giv. | |
6051 | ||
6052 | In addition, a giv that is only conditionally set is not allowed to | |
6053 | derive another giv once a label has been passed. | |
6054 | ||
6055 | The cases we look at are when a label or an update to a biv is passed. */ | |
6056 | ||
6057 | static void | |
0c20a65f | 6058 | update_giv_derive (const struct loop *loop, rtx p) |
b4ad7b23 | 6059 | { |
ed5bb68d | 6060 | struct loop_ivs *ivs = LOOP_IVS (loop); |
b4ad7b23 RS |
6061 | struct iv_class *bl; |
6062 | struct induction *biv, *giv; | |
6063 | rtx tem; | |
6064 | int dummy; | |
6065 | ||
6066 | /* Search all IV classes, then all bivs, and finally all givs. | |
6067 | ||
7dcd3836 | 6068 | There are three cases we are concerned with. First we have the situation |
b4ad7b23 RS |
6069 | of a giv that is only updated conditionally. In that case, it may not |
6070 | derive any givs after a label is passed. | |
6071 | ||
6072 | The second case is when a biv update occurs, or may occur, after the | |
6073 | definition of a giv. For certain biv updates (see below) that are | |
6074 | known to occur between the giv definition and use, we can adjust the | |
6075 | giv definition. For others, or when the biv update is conditional, | |
6076 | we must prevent the giv from deriving any other givs. There are two | |
6077 | sub-cases within this case. | |
6078 | ||
6079 | If this is a label, we are concerned with any biv update that is done | |
6080 | conditionally, since it may be done after the giv is defined followed by | |
6081 | a branch here (actually, we need to pass both a jump and a label, but | |
6082 | this extra tracking doesn't seem worth it). | |
6083 | ||
7dcd3836 RK |
6084 | If this is a jump, we are concerned about any biv update that may be |
6085 | executed multiple times. We are actually only concerned about | |
6086 | backward jumps, but it is probably not worth performing the test | |
6087 | on the jump again here. | |
6088 | ||
6089 | If this is a biv update, we must adjust the giv status to show that a | |
b4ad7b23 RS |
6090 | subsequent biv update was performed. If this adjustment cannot be done, |
6091 | the giv cannot derive further givs. */ | |
6092 | ||
14be28e5 | 6093 | for (bl = ivs->list; bl; bl = bl->next) |
b4ad7b23 | 6094 | for (biv = bl->biv; biv; biv = biv->next_iv) |
7dcd3836 RK |
6095 | if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN |
6096 | || biv->insn == p) | |
b4ad7b23 RS |
6097 | { |
6098 | for (giv = bl->giv; giv; giv = giv->next_iv) | |
6099 | { | |
6100 | /* If cant_derive is already true, there is no point in | |
6101 | checking all of these conditions again. */ | |
6102 | if (giv->cant_derive) | |
6103 | continue; | |
6104 | ||
6105 | /* If this giv is conditionally set and we have passed a label, | |
6106 | it cannot derive anything. */ | |
6107 | if (GET_CODE (p) == CODE_LABEL && ! giv->always_computable) | |
6108 | giv->cant_derive = 1; | |
6109 | ||
6110 | /* Skip givs that have mult_val == 0, since | |
6111 | they are really invariants. Also skip those that are | |
6112 | replaceable, since we know their lifetime doesn't contain | |
6113 | any biv update. */ | |
6114 | else if (giv->mult_val == const0_rtx || giv->replaceable) | |
6115 | continue; | |
6116 | ||
6117 | /* The only way we can allow this giv to derive another | |
6118 | is if this is a biv increment and we can form the product | |
6119 | of biv->add_val and giv->mult_val. In this case, we will | |
6120 | be able to compute a compensation. */ | |
6121 | else if (biv->insn == p) | |
6122 | { | |
e8cb4873 | 6123 | rtx ext_val_dummy; |
c160c628 | 6124 | |
e8cb4873 | 6125 | tem = 0; |
c160c628 | 6126 | if (biv->mult_val == const1_rtx) |
0534b804 MH |
6127 | tem = simplify_giv_expr (loop, |
6128 | gen_rtx_MULT (giv->mode, | |
38a448ca RH |
6129 | biv->add_val, |
6130 | giv->mult_val), | |
e8cb4873 | 6131 | &ext_val_dummy, &dummy); |
c160c628 RK |
6132 | |
6133 | if (tem && giv->derive_adjustment) | |
c5c76735 | 6134 | tem = simplify_giv_expr |
0534b804 MH |
6135 | (loop, |
6136 | gen_rtx_PLUS (giv->mode, tem, giv->derive_adjustment), | |
e8cb4873 | 6137 | &ext_val_dummy, &dummy); |
c5c76735 | 6138 | |
c160c628 | 6139 | if (tem) |
b4ad7b23 RS |
6140 | giv->derive_adjustment = tem; |
6141 | else | |
6142 | giv->cant_derive = 1; | |
6143 | } | |
7dcd3836 RK |
6144 | else if ((GET_CODE (p) == CODE_LABEL && ! biv->always_computable) |
6145 | || (GET_CODE (p) == JUMP_INSN && biv->maybe_multiple)) | |
b4ad7b23 RS |
6146 | giv->cant_derive = 1; |
6147 | } | |
6148 | } | |
6149 | } | |
6150 | \f | |
6151 | /* Check whether an insn is an increment legitimate for a basic induction var. | |
7056f7e8 RS |
6152 | X is the source of insn P, or a part of it. |
6153 | MODE is the mode in which X should be interpreted. | |
6154 | ||
b4ad7b23 RS |
6155 | DEST_REG is the putative biv, also the destination of the insn. |
6156 | We accept patterns of these forms: | |
09d7f5a5 | 6157 | REG = REG + INVARIANT (includes REG = REG - CONSTANT) |
b4ad7b23 | 6158 | REG = INVARIANT + REG |
b4ad7b23 RS |
6159 | |
6160 | If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX, | |
3ec2b590 R |
6161 | store the additive term into *INC_VAL, and store the place where |
6162 | we found the additive term into *LOCATION. | |
b4ad7b23 RS |
6163 | |
6164 | If X is an assignment of an invariant into DEST_REG, we set | |
6165 | *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL. | |
6166 | ||
09d7f5a5 RK |
6167 | We also want to detect a BIV when it corresponds to a variable |
6168 | whose mode was promoted via PROMOTED_MODE. In that case, an increment | |
6169 | of the variable may be a PLUS that adds a SUBREG of that variable to | |
6170 | an invariant and then sign- or zero-extends the result of the PLUS | |
6171 | into the variable. | |
6172 | ||
6173 | Most GIVs in such cases will be in the promoted mode, since that is the | |
6174 | probably the natural computation mode (and almost certainly the mode | |
6175 | used for addresses) on the machine. So we view the pseudo-reg containing | |
6176 | the variable as the BIV, as if it were simply incremented. | |
6177 | ||
6178 | Note that treating the entire pseudo as a BIV will result in making | |
6179 | simple increments to any GIVs based on it. However, if the variable | |
6180 | overflows in its declared mode but not its promoted mode, the result will | |
e6fcb60d | 6181 | be incorrect. This is acceptable if the variable is signed, since |
09d7f5a5 RK |
6182 | overflows in such cases are undefined, but not if it is unsigned, since |
6183 | those overflows are defined. So we only check for SIGN_EXTEND and | |
6184 | not ZERO_EXTEND. | |
6185 | ||
6186 | If we cannot find a biv, we return 0. */ | |
b4ad7b23 RS |
6187 | |
6188 | static int | |
0c20a65f AJ |
6189 | basic_induction_var (const struct loop *loop, rtx x, enum machine_mode mode, |
6190 | rtx dest_reg, rtx p, rtx *inc_val, rtx *mult_val, | |
6191 | rtx **location) | |
b4ad7b23 | 6192 | { |
b3694847 | 6193 | enum rtx_code code; |
3ec2b590 | 6194 | rtx *argp, arg; |
16f6812f | 6195 | rtx insn, set = 0, last, inc; |
b4ad7b23 RS |
6196 | |
6197 | code = GET_CODE (x); | |
69ba6af3 | 6198 | *location = NULL; |
b4ad7b23 RS |
6199 | switch (code) |
6200 | { | |
6201 | case PLUS: | |
45f97e2e | 6202 | if (rtx_equal_p (XEXP (x, 0), dest_reg) |
09d7f5a5 RK |
6203 | || (GET_CODE (XEXP (x, 0)) == SUBREG |
6204 | && SUBREG_PROMOTED_VAR_P (XEXP (x, 0)) | |
6205 | && SUBREG_REG (XEXP (x, 0)) == dest_reg)) | |
3ec2b590 R |
6206 | { |
6207 | argp = &XEXP (x, 1); | |
6208 | } | |
45f97e2e | 6209 | else if (rtx_equal_p (XEXP (x, 1), dest_reg) |
09d7f5a5 | 6210 | || (GET_CODE (XEXP (x, 1)) == SUBREG |
b81fd0f4 RS |
6211 | && SUBREG_PROMOTED_VAR_P (XEXP (x, 1)) |
6212 | && SUBREG_REG (XEXP (x, 1)) == dest_reg)) | |
3ec2b590 R |
6213 | { |
6214 | argp = &XEXP (x, 0); | |
6215 | } | |
b4ad7b23 | 6216 | else |
e6fcb60d | 6217 | return 0; |
b4ad7b23 | 6218 | |
3ec2b590 | 6219 | arg = *argp; |
0534b804 | 6220 | if (loop_invariant_p (loop, arg) != 1) |
b4ad7b23 RS |
6221 | return 0; |
6222 | ||
16f6812f JJ |
6223 | /* convert_modes can emit new instructions, e.g. when arg is a loop |
6224 | invariant MEM and dest_reg has a different mode. | |
6225 | These instructions would be emitted after the end of the function | |
6226 | and then *inc_val would be an unitialized pseudo. | |
6227 | Detect this and bail in this case. | |
6228 | Other alternatives to solve this can be introducing a convert_modes | |
6229 | variant which is allowed to fail but not allowed to emit new | |
6230 | instructions, emit these instructions before loop start and let | |
6231 | it be garbage collected if *inc_val is never used or saving the | |
6232 | *inc_val initialization sequence generated here and when *inc_val | |
6233 | is going to be actually used, emit it at some suitable place. */ | |
6234 | last = get_last_insn (); | |
6235 | inc = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0); | |
6236 | if (get_last_insn () != last) | |
6237 | { | |
6238 | delete_insns_since (last); | |
6239 | return 0; | |
6240 | } | |
6241 | ||
6242 | *inc_val = inc; | |
b4ad7b23 | 6243 | *mult_val = const1_rtx; |
3ec2b590 | 6244 | *location = argp; |
b4ad7b23 RS |
6245 | return 1; |
6246 | ||
09d7f5a5 | 6247 | case SUBREG: |
b76b08ef RK |
6248 | /* If what's inside the SUBREG is a BIV, then the SUBREG. This will |
6249 | handle addition of promoted variables. | |
6250 | ??? The comment at the start of this function is wrong: promoted | |
6251 | variable increments don't look like it says they do. */ | |
6252 | return basic_induction_var (loop, SUBREG_REG (x), | |
6253 | GET_MODE (SUBREG_REG (x)), | |
6254 | dest_reg, p, inc_val, mult_val, location); | |
b4ad7b23 | 6255 | |
09d7f5a5 | 6256 | case REG: |
45f97e2e | 6257 | /* If this register is assigned in a previous insn, look at its |
09d7f5a5 RK |
6258 | source, but don't go outside the loop or past a label. */ |
6259 | ||
af198097 R |
6260 | /* If this sets a register to itself, we would repeat any previous |
6261 | biv increment if we applied this strategy blindly. */ | |
6262 | if (rtx_equal_p (dest_reg, x)) | |
6263 | return 0; | |
6264 | ||
45f97e2e RH |
6265 | insn = p; |
6266 | while (1) | |
6267 | { | |
7dbe6ae9 | 6268 | rtx dest; |
e6fcb60d KH |
6269 | do |
6270 | { | |
6271 | insn = PREV_INSN (insn); | |
6272 | } | |
6273 | while (insn && GET_CODE (insn) == NOTE | |
6274 | && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG); | |
09d7f5a5 | 6275 | |
e6fcb60d | 6276 | if (!insn) |
45f97e2e RH |
6277 | break; |
6278 | set = single_set (insn); | |
6279 | if (set == 0) | |
6280 | break; | |
7dbe6ae9 BS |
6281 | dest = SET_DEST (set); |
6282 | if (dest == x | |
6283 | || (GET_CODE (dest) == SUBREG | |
6284 | && (GET_MODE_SIZE (GET_MODE (dest)) <= UNITS_PER_WORD) | |
6285 | && (GET_MODE_CLASS (GET_MODE (dest)) == MODE_INT) | |
6286 | && SUBREG_REG (dest) == x)) | |
6287 | return basic_induction_var (loop, SET_SRC (set), | |
6288 | (GET_MODE (SET_SRC (set)) == VOIDmode | |
6289 | ? GET_MODE (x) | |
6290 | : GET_MODE (SET_SRC (set))), | |
6291 | dest_reg, insn, | |
6292 | inc_val, mult_val, location); | |
6293 | ||
6294 | while (GET_CODE (dest) == SIGN_EXTRACT | |
6295 | || GET_CODE (dest) == ZERO_EXTRACT | |
6296 | || GET_CODE (dest) == SUBREG | |
6297 | || GET_CODE (dest) == STRICT_LOW_PART) | |
6298 | dest = XEXP (dest, 0); | |
6299 | if (dest == x) | |
6300 | break; | |
45f97e2e | 6301 | } |
fd5d5b07 | 6302 | /* Fall through. */ |
b4ad7b23 RS |
6303 | |
6304 | /* Can accept constant setting of biv only when inside inner most loop. | |
6b8c9327 | 6305 | Otherwise, a biv of an inner loop may be incorrectly recognized |
b4ad7b23 RS |
6306 | as a biv of the outer loop, |
6307 | causing code to be moved INTO the inner loop. */ | |
6308 | case MEM: | |
0534b804 | 6309 | if (loop_invariant_p (loop, x) != 1) |
b4ad7b23 RS |
6310 | return 0; |
6311 | case CONST_INT: | |
6312 | case SYMBOL_REF: | |
6313 | case CONST: | |
829002bb BM |
6314 | /* convert_modes aborts if we try to convert to or from CCmode, so just |
6315 | exclude that case. It is very unlikely that a condition code value | |
4061c1a3 JJ |
6316 | would be a useful iterator anyways. convert_modes aborts if we try to |
6317 | convert a float mode to non-float or vice versa too. */ | |
0534b804 | 6318 | if (loop->level == 1 |
4061c1a3 JJ |
6319 | && GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (dest_reg)) |
6320 | && GET_MODE_CLASS (mode) != MODE_CC) | |
fd5d5b07 | 6321 | { |
7056f7e8 | 6322 | /* Possible bug here? Perhaps we don't know the mode of X. */ |
16f6812f JJ |
6323 | last = get_last_insn (); |
6324 | inc = convert_modes (GET_MODE (dest_reg), mode, x, 0); | |
6325 | if (get_last_insn () != last) | |
6326 | { | |
6327 | delete_insns_since (last); | |
6328 | return 0; | |
6329 | } | |
6330 | ||
6331 | *inc_val = inc; | |
e6fcb60d KH |
6332 | *mult_val = const0_rtx; |
6333 | return 1; | |
6334 | } | |
b4ad7b23 | 6335 | else |
e6fcb60d | 6336 | return 0; |
b4ad7b23 | 6337 | |
09d7f5a5 | 6338 | case SIGN_EXTEND: |
4fa26a60 RS |
6339 | /* Ignore this BIV if signed arithmetic overflow is defined. */ |
6340 | if (flag_wrapv) | |
6341 | return 0; | |
0534b804 | 6342 | return basic_induction_var (loop, XEXP (x, 0), GET_MODE (XEXP (x, 0)), |
98d1cd45 | 6343 | dest_reg, p, inc_val, mult_val, location); |
45f97e2e | 6344 | |
09d7f5a5 RK |
6345 | case ASHIFTRT: |
6346 | /* Similar, since this can be a sign extension. */ | |
6347 | for (insn = PREV_INSN (p); | |
6348 | (insn && GET_CODE (insn) == NOTE | |
6349 | && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG); | |
6350 | insn = PREV_INSN (insn)) | |
6351 | ; | |
6352 | ||
6353 | if (insn) | |
6354 | set = single_set (insn); | |
6355 | ||
af198097 R |
6356 | if (! rtx_equal_p (dest_reg, XEXP (x, 0)) |
6357 | && set && SET_DEST (set) == XEXP (x, 0) | |
09d7f5a5 RK |
6358 | && GET_CODE (XEXP (x, 1)) == CONST_INT |
6359 | && INTVAL (XEXP (x, 1)) >= 0 | |
6360 | && GET_CODE (SET_SRC (set)) == ASHIFT | |
98d1cd45 R |
6361 | && XEXP (x, 1) == XEXP (SET_SRC (set), 1)) |
6362 | return basic_induction_var (loop, XEXP (SET_SRC (set), 0), | |
6363 | GET_MODE (XEXP (x, 0)), | |
6364 | dest_reg, insn, inc_val, mult_val, | |
6365 | location); | |
09d7f5a5 RK |
6366 | return 0; |
6367 | ||
b4ad7b23 RS |
6368 | default: |
6369 | return 0; | |
6370 | } | |
6371 | } | |
6372 | \f | |
6373 | /* A general induction variable (giv) is any quantity that is a linear | |
6374 | function of a basic induction variable, | |
6375 | i.e. giv = biv * mult_val + add_val. | |
6376 | The coefficients can be any loop invariant quantity. | |
6377 | A giv need not be computed directly from the biv; | |
6378 | it can be computed by way of other givs. */ | |
6379 | ||
6380 | /* Determine whether X computes a giv. | |
6381 | If it does, return a nonzero value | |
6382 | which is the benefit from eliminating the computation of X; | |
6383 | set *SRC_REG to the register of the biv that it is computed from; | |
6384 | set *ADD_VAL and *MULT_VAL to the coefficients, | |
6385 | such that the value of X is biv * mult + add; */ | |
6386 | ||
6387 | static int | |
0c20a65f AJ |
6388 | general_induction_var (const struct loop *loop, rtx x, rtx *src_reg, |
6389 | rtx *add_val, rtx *mult_val, rtx *ext_val, | |
6390 | int is_addr, int *pbenefit, | |
6391 | enum machine_mode addr_mode) | |
b4ad7b23 | 6392 | { |
ed5bb68d | 6393 | struct loop_ivs *ivs = LOOP_IVS (loop); |
b4ad7b23 | 6394 | rtx orig_x = x; |
b4ad7b23 RS |
6395 | |
6396 | /* If this is an invariant, forget it, it isn't a giv. */ | |
0534b804 | 6397 | if (loop_invariant_p (loop, x) == 1) |
b4ad7b23 RS |
6398 | return 0; |
6399 | ||
45f97e2e | 6400 | *pbenefit = 0; |
e8cb4873 RH |
6401 | *ext_val = NULL_RTX; |
6402 | x = simplify_giv_expr (loop, x, ext_val, pbenefit); | |
b4ad7b23 | 6403 | if (x == 0) |
1f8f4a0b | 6404 | return 0; |
b4ad7b23 RS |
6405 | |
6406 | switch (GET_CODE (x)) | |
6407 | { | |
6408 | case USE: | |
6409 | case CONST_INT: | |
6410 | /* Since this is now an invariant and wasn't before, it must be a giv | |
6411 | with MULT_VAL == 0. It doesn't matter which BIV we associate this | |
6412 | with. */ | |
14be28e5 | 6413 | *src_reg = ivs->list->biv->dest_reg; |
b4ad7b23 RS |
6414 | *mult_val = const0_rtx; |
6415 | *add_val = x; | |
6416 | break; | |
6417 | ||
6418 | case REG: | |
6419 | /* This is equivalent to a BIV. */ | |
6420 | *src_reg = x; | |
6421 | *mult_val = const1_rtx; | |
6422 | *add_val = const0_rtx; | |
6423 | break; | |
6424 | ||
6425 | case PLUS: | |
6426 | /* Either (plus (biv) (invar)) or | |
6427 | (plus (mult (biv) (invar_1)) (invar_2)). */ | |
6428 | if (GET_CODE (XEXP (x, 0)) == MULT) | |
6429 | { | |
6430 | *src_reg = XEXP (XEXP (x, 0), 0); | |
6431 | *mult_val = XEXP (XEXP (x, 0), 1); | |
6432 | } | |
6433 | else | |
6434 | { | |
6435 | *src_reg = XEXP (x, 0); | |
6436 | *mult_val = const1_rtx; | |
6437 | } | |
6438 | *add_val = XEXP (x, 1); | |
6439 | break; | |
6440 | ||
6441 | case MULT: | |
6442 | /* ADD_VAL is zero. */ | |
6443 | *src_reg = XEXP (x, 0); | |
6444 | *mult_val = XEXP (x, 1); | |
6445 | *add_val = const0_rtx; | |
6446 | break; | |
6447 | ||
6448 | default: | |
6449 | abort (); | |
6450 | } | |
6451 | ||
6452 | /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be | |
6453 | unless they are CONST_INT). */ | |
6454 | if (GET_CODE (*add_val) == USE) | |
6455 | *add_val = XEXP (*add_val, 0); | |
6456 | if (GET_CODE (*mult_val) == USE) | |
6457 | *mult_val = XEXP (*mult_val, 0); | |
6458 | ||
45f97e2e | 6459 | if (is_addr) |
01329426 | 6460 | *pbenefit += address_cost (orig_x, addr_mode) - reg_address_cost; |
45f97e2e RH |
6461 | else |
6462 | *pbenefit += rtx_cost (orig_x, SET); | |
b4ad7b23 | 6463 | |
45f97e2e | 6464 | /* Always return true if this is a giv so it will be detected as such, |
e6fcb60d KH |
6465 | even if the benefit is zero or negative. This allows elimination |
6466 | of bivs that might otherwise not be eliminated. */ | |
6467 | return 1; | |
b4ad7b23 RS |
6468 | } |
6469 | \f | |
6470 | /* Given an expression, X, try to form it as a linear function of a biv. | |
6471 | We will canonicalize it to be of the form | |
6b8c9327 | 6472 | (plus (mult (BIV) (invar_1)) |
b4ad7b23 | 6473 | (invar_2)) |
c5b7917e | 6474 | with possible degeneracies. |
b4ad7b23 RS |
6475 | |
6476 | The invariant expressions must each be of a form that can be used as a | |
6477 | machine operand. We surround then with a USE rtx (a hack, but localized | |
6478 | and certainly unambiguous!) if not a CONST_INT for simplicity in this | |
6479 | routine; it is the caller's responsibility to strip them. | |
6480 | ||
6481 | If no such canonicalization is possible (i.e., two biv's are used or an | |
6482 | expression that is neither invariant nor a biv or giv), this routine | |
6483 | returns 0. | |
6484 | ||
cc2902df | 6485 | For a nonzero return, the result will have a code of CONST_INT, USE, |
e6fcb60d | 6486 | REG (for a BIV), PLUS, or MULT. No other codes will occur. |
b4ad7b23 RS |
6487 | |
6488 | *BENEFIT will be incremented by the benefit of any sub-giv encountered. */ | |
6489 | ||
0c20a65f AJ |
6490 | static rtx sge_plus (enum machine_mode, rtx, rtx); |
6491 | static rtx sge_plus_constant (rtx, rtx); | |
45f97e2e | 6492 | |
b4ad7b23 | 6493 | static rtx |
0c20a65f | 6494 | simplify_giv_expr (const struct loop *loop, rtx x, rtx *ext_val, int *benefit) |
b4ad7b23 | 6495 | { |
ed5bb68d | 6496 | struct loop_ivs *ivs = LOOP_IVS (loop); |
1ecd860b | 6497 | struct loop_regs *regs = LOOP_REGS (loop); |
b4ad7b23 RS |
6498 | enum machine_mode mode = GET_MODE (x); |
6499 | rtx arg0, arg1; | |
6500 | rtx tem; | |
6501 | ||
6502 | /* If this is not an integer mode, or if we cannot do arithmetic in this | |
6503 | mode, this can't be a giv. */ | |
6504 | if (mode != VOIDmode | |
6505 | && (GET_MODE_CLASS (mode) != MODE_INT | |
5fd8383e | 6506 | || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT)) |
45f97e2e | 6507 | return NULL_RTX; |
b4ad7b23 RS |
6508 | |
6509 | switch (GET_CODE (x)) | |
6510 | { | |
6511 | case PLUS: | |
e8cb4873 RH |
6512 | arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit); |
6513 | arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit); | |
b4ad7b23 | 6514 | if (arg0 == 0 || arg1 == 0) |
45f97e2e | 6515 | return NULL_RTX; |
b4ad7b23 RS |
6516 | |
6517 | /* Put constant last, CONST_INT last if both constant. */ | |
6518 | if ((GET_CODE (arg0) == USE | |
6519 | || GET_CODE (arg0) == CONST_INT) | |
45f97e2e RH |
6520 | && ! ((GET_CODE (arg0) == USE |
6521 | && GET_CODE (arg1) == USE) | |
6522 | || GET_CODE (arg1) == CONST_INT)) | |
b4ad7b23 RS |
6523 | tem = arg0, arg0 = arg1, arg1 = tem; |
6524 | ||
6525 | /* Handle addition of zero, then addition of an invariant. */ | |
6526 | if (arg1 == const0_rtx) | |
6527 | return arg0; | |
6528 | else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE) | |
6529 | switch (GET_CODE (arg0)) | |
6530 | { | |
6531 | case CONST_INT: | |
6532 | case USE: | |
45f97e2e | 6533 | /* Adding two invariants must result in an invariant, so enclose |
6b8c9327 | 6534 | addition operation inside a USE and return it. */ |
b4ad7b23 RS |
6535 | if (GET_CODE (arg0) == USE) |
6536 | arg0 = XEXP (arg0, 0); | |
da0af5a5 JL |
6537 | if (GET_CODE (arg1) == USE) |
6538 | arg1 = XEXP (arg1, 0); | |
6539 | ||
45f97e2e RH |
6540 | if (GET_CODE (arg0) == CONST_INT) |
6541 | tem = arg0, arg0 = arg1, arg1 = tem; | |
6542 | if (GET_CODE (arg1) == CONST_INT) | |
6543 | tem = sge_plus_constant (arg0, arg1); | |
da0af5a5 | 6544 | else |
45f97e2e | 6545 | tem = sge_plus (mode, arg0, arg1); |
b4ad7b23 | 6546 | |
45f97e2e RH |
6547 | if (GET_CODE (tem) != CONST_INT) |
6548 | tem = gen_rtx_USE (mode, tem); | |
b4ad7b23 RS |
6549 | return tem; |
6550 | ||
6551 | case REG: | |
6552 | case MULT: | |
6553 | /* biv + invar or mult + invar. Return sum. */ | |
38a448ca | 6554 | return gen_rtx_PLUS (mode, arg0, arg1); |
b4ad7b23 RS |
6555 | |
6556 | case PLUS: | |
6557 | /* (a + invar_1) + invar_2. Associate. */ | |
c5c76735 | 6558 | return |
0534b804 MH |
6559 | simplify_giv_expr (loop, |
6560 | gen_rtx_PLUS (mode, | |
c5c76735 JL |
6561 | XEXP (arg0, 0), |
6562 | gen_rtx_PLUS (mode, | |
6563 | XEXP (arg0, 1), | |
6564 | arg1)), | |
e8cb4873 | 6565 | ext_val, benefit); |
b4ad7b23 RS |
6566 | |
6567 | default: | |
6568 | abort (); | |
6569 | } | |
6570 | ||
6571 | /* Each argument must be either REG, PLUS, or MULT. Convert REG to | |
6572 | MULT to reduce cases. */ | |
6573 | if (GET_CODE (arg0) == REG) | |
38a448ca | 6574 | arg0 = gen_rtx_MULT (mode, arg0, const1_rtx); |
b4ad7b23 | 6575 | if (GET_CODE (arg1) == REG) |
38a448ca | 6576 | arg1 = gen_rtx_MULT (mode, arg1, const1_rtx); |
b4ad7b23 RS |
6577 | |
6578 | /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT. | |
6579 | Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT. | |
6580 | Recurse to associate the second PLUS. */ | |
6581 | if (GET_CODE (arg1) == MULT) | |
6582 | tem = arg0, arg0 = arg1, arg1 = tem; | |
6583 | ||
6584 | if (GET_CODE (arg1) == PLUS) | |
505ddab6 KH |
6585 | return |
6586 | simplify_giv_expr (loop, | |
6587 | gen_rtx_PLUS (mode, | |
6588 | gen_rtx_PLUS (mode, arg0, | |
6589 | XEXP (arg1, 0)), | |
6590 | XEXP (arg1, 1)), | |
6591 | ext_val, benefit); | |
b4ad7b23 RS |
6592 | |
6593 | /* Now must have MULT + MULT. Distribute if same biv, else not giv. */ | |
6594 | if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT) | |
45f97e2e | 6595 | return NULL_RTX; |
b4ad7b23 | 6596 | |
45f97e2e RH |
6597 | if (!rtx_equal_p (arg0, arg1)) |
6598 | return NULL_RTX; | |
b4ad7b23 | 6599 | |
0534b804 MH |
6600 | return simplify_giv_expr (loop, |
6601 | gen_rtx_MULT (mode, | |
38a448ca RH |
6602 | XEXP (arg0, 0), |
6603 | gen_rtx_PLUS (mode, | |
6604 | XEXP (arg0, 1), | |
6605 | XEXP (arg1, 1))), | |
e8cb4873 | 6606 | ext_val, benefit); |
b4ad7b23 RS |
6607 | |
6608 | case MINUS: | |
0f41302f | 6609 | /* Handle "a - b" as "a + b * (-1)". */ |
0534b804 MH |
6610 | return simplify_giv_expr (loop, |
6611 | gen_rtx_PLUS (mode, | |
38a448ca | 6612 | XEXP (x, 0), |
c5c76735 JL |
6613 | gen_rtx_MULT (mode, |
6614 | XEXP (x, 1), | |
38a448ca | 6615 | constm1_rtx)), |
e8cb4873 | 6616 | ext_val, benefit); |
b4ad7b23 RS |
6617 | |
6618 | case MULT: | |
e8cb4873 RH |
6619 | arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit); |
6620 | arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit); | |
b4ad7b23 | 6621 | if (arg0 == 0 || arg1 == 0) |
45f97e2e | 6622 | return NULL_RTX; |
b4ad7b23 RS |
6623 | |
6624 | /* Put constant last, CONST_INT last if both constant. */ | |
6625 | if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT) | |
6626 | && GET_CODE (arg1) != CONST_INT) | |
6627 | tem = arg0, arg0 = arg1, arg1 = tem; | |
6628 | ||
6629 | /* If second argument is not now constant, not giv. */ | |
6630 | if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT) | |
45f97e2e | 6631 | return NULL_RTX; |
b4ad7b23 RS |
6632 | |
6633 | /* Handle multiply by 0 or 1. */ | |
6634 | if (arg1 == const0_rtx) | |
6635 | return const0_rtx; | |
6636 | ||
6637 | else if (arg1 == const1_rtx) | |
6638 | return arg0; | |
6639 | ||
6640 | switch (GET_CODE (arg0)) | |
6641 | { | |
6642 | case REG: | |
6643 | /* biv * invar. Done. */ | |
38a448ca | 6644 | return gen_rtx_MULT (mode, arg0, arg1); |
b4ad7b23 RS |
6645 | |
6646 | case CONST_INT: | |
6647 | /* Product of two constants. */ | |
5fd8383e | 6648 | return GEN_INT (INTVAL (arg0) * INTVAL (arg1)); |
b4ad7b23 RS |
6649 | |
6650 | case USE: | |
29aef5ca | 6651 | /* invar * invar is a giv, but attempt to simplify it somehow. */ |
45f97e2e RH |
6652 | if (GET_CODE (arg1) != CONST_INT) |
6653 | return NULL_RTX; | |
6654 | ||
6655 | arg0 = XEXP (arg0, 0); | |
29aef5ca | 6656 | if (GET_CODE (arg0) == MULT) |
45f97e2e | 6657 | { |
29aef5ca JH |
6658 | /* (invar_0 * invar_1) * invar_2. Associate. */ |
6659 | return simplify_giv_expr (loop, | |
6660 | gen_rtx_MULT (mode, | |
6661 | XEXP (arg0, 0), | |
6662 | gen_rtx_MULT (mode, | |
6663 | XEXP (arg0, | |
6664 | 1), | |
6665 | arg1)), | |
e8cb4873 | 6666 | ext_val, benefit); |
45f97e2e | 6667 | } |
3d042e77 | 6668 | /* Propagate the MULT expressions to the intermost nodes. */ |
29aef5ca JH |
6669 | else if (GET_CODE (arg0) == PLUS) |
6670 | { | |
6671 | /* (invar_0 + invar_1) * invar_2. Distribute. */ | |
6672 | return simplify_giv_expr (loop, | |
6673 | gen_rtx_PLUS (mode, | |
6674 | gen_rtx_MULT (mode, | |
6675 | XEXP (arg0, | |
6676 | 0), | |
6677 | arg1), | |
6678 | gen_rtx_MULT (mode, | |
6679 | XEXP (arg0, | |
6680 | 1), | |
6681 | arg1)), | |
e8cb4873 | 6682 | ext_val, benefit); |
29aef5ca JH |
6683 | } |
6684 | return gen_rtx_USE (mode, gen_rtx_MULT (mode, arg0, arg1)); | |
b4ad7b23 RS |
6685 | |
6686 | case MULT: | |
6687 | /* (a * invar_1) * invar_2. Associate. */ | |
0534b804 MH |
6688 | return simplify_giv_expr (loop, |
6689 | gen_rtx_MULT (mode, | |
c5c76735 | 6690 | XEXP (arg0, 0), |
38a448ca RH |
6691 | gen_rtx_MULT (mode, |
6692 | XEXP (arg0, 1), | |
6693 | arg1)), | |
e8cb4873 | 6694 | ext_val, benefit); |
b4ad7b23 RS |
6695 | |
6696 | case PLUS: | |
6697 | /* (a + invar_1) * invar_2. Distribute. */ | |
0534b804 MH |
6698 | return simplify_giv_expr (loop, |
6699 | gen_rtx_PLUS (mode, | |
38a448ca RH |
6700 | gen_rtx_MULT (mode, |
6701 | XEXP (arg0, 0), | |
6702 | arg1), | |
6703 | gen_rtx_MULT (mode, | |
6704 | XEXP (arg0, 1), | |
6705 | arg1)), | |
e8cb4873 | 6706 | ext_val, benefit); |
b4ad7b23 RS |
6707 | |
6708 | default: | |
6709 | abort (); | |
6710 | } | |
6711 | ||
6712 | case ASHIFT: | |
b4ad7b23 RS |
6713 | /* Shift by constant is multiply by power of two. */ |
6714 | if (GET_CODE (XEXP (x, 1)) != CONST_INT) | |
6715 | return 0; | |
6716 | ||
c5c76735 | 6717 | return |
0534b804 MH |
6718 | simplify_giv_expr (loop, |
6719 | gen_rtx_MULT (mode, | |
c5c76735 JL |
6720 | XEXP (x, 0), |
6721 | GEN_INT ((HOST_WIDE_INT) 1 | |
6722 | << INTVAL (XEXP (x, 1)))), | |
e8cb4873 | 6723 | ext_val, benefit); |
b4ad7b23 RS |
6724 | |
6725 | case NEG: | |
6726 | /* "-a" is "a * (-1)" */ | |
0534b804 MH |
6727 | return simplify_giv_expr (loop, |
6728 | gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx), | |
e8cb4873 | 6729 | ext_val, benefit); |
b4ad7b23 RS |
6730 | |
6731 | case NOT: | |
6732 | /* "~a" is "-a - 1". Silly, but easy. */ | |
0534b804 MH |
6733 | return simplify_giv_expr (loop, |
6734 | gen_rtx_MINUS (mode, | |
38a448ca RH |
6735 | gen_rtx_NEG (mode, XEXP (x, 0)), |
6736 | const1_rtx), | |
e8cb4873 | 6737 | ext_val, benefit); |
b4ad7b23 RS |
6738 | |
6739 | case USE: | |
6740 | /* Already in proper form for invariant. */ | |
6741 | return x; | |
6742 | ||
e8cb4873 RH |
6743 | case SIGN_EXTEND: |
6744 | case ZERO_EXTEND: | |
6745 | case TRUNCATE: | |
6746 | /* Conditionally recognize extensions of simple IVs. After we've | |
fd5d5b07 | 6747 | computed loop traversal counts and verified the range of the |
e8cb4873 RH |
6748 | source IV, we'll reevaluate this as a GIV. */ |
6749 | if (*ext_val == NULL_RTX) | |
6750 | { | |
6751 | arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit); | |
6752 | if (arg0 && *ext_val == NULL_RTX && GET_CODE (arg0) == REG) | |
6753 | { | |
6754 | *ext_val = gen_rtx_fmt_e (GET_CODE (x), mode, arg0); | |
6755 | return arg0; | |
6756 | } | |
6757 | } | |
6758 | goto do_default; | |
6759 | ||
fd5d5b07 | 6760 | case REG: |
b4ad7b23 RS |
6761 | /* If this is a new register, we can't deal with it. */ |
6762 | if (REGNO (x) >= max_reg_before_loop) | |
6763 | return 0; | |
6764 | ||
6765 | /* Check for biv or giv. */ | |
ed5bb68d | 6766 | switch (REG_IV_TYPE (ivs, REGNO (x))) |
b4ad7b23 RS |
6767 | { |
6768 | case BASIC_INDUCT: | |
6769 | return x; | |
6770 | case GENERAL_INDUCT: | |
6771 | { | |
ed5bb68d | 6772 | struct induction *v = REG_IV_INFO (ivs, REGNO (x)); |
b4ad7b23 RS |
6773 | |
6774 | /* Form expression from giv and add benefit. Ensure this giv | |
6775 | can derive another and subtract any needed adjustment if so. */ | |
630c79be BS |
6776 | |
6777 | /* Increasing the benefit here is risky. The only case in which it | |
6778 | is arguably correct is if this is the only use of V. In other | |
6779 | cases, this will artificially inflate the benefit of the current | |
6780 | giv, and lead to suboptimal code. Thus, it is disabled, since | |
6781 | potentially not reducing an only marginally beneficial giv is | |
6782 | less harmful than reducing many givs that are not really | |
6783 | beneficial. */ | |
6784 | { | |
f1d4ac80 | 6785 | rtx single_use = regs->array[REGNO (x)].single_usage; |
630c79be BS |
6786 | if (single_use && single_use != const0_rtx) |
6787 | *benefit += v->benefit; | |
6788 | } | |
6789 | ||
b4ad7b23 RS |
6790 | if (v->cant_derive) |
6791 | return 0; | |
6792 | ||
c5c76735 JL |
6793 | tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode, |
6794 | v->src_reg, v->mult_val), | |
6795 | v->add_val); | |
6796 | ||
b4ad7b23 | 6797 | if (v->derive_adjustment) |
38a448ca | 6798 | tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment); |
e8cb4873 RH |
6799 | arg0 = simplify_giv_expr (loop, tem, ext_val, benefit); |
6800 | if (*ext_val) | |
6801 | { | |
affd4f33 | 6802 | if (!v->ext_dependent) |
e8cb4873 RH |
6803 | return arg0; |
6804 | } | |
6805 | else | |
6806 | { | |
affd4f33 | 6807 | *ext_val = v->ext_dependent; |
e8cb4873 RH |
6808 | return arg0; |
6809 | } | |
6810 | return 0; | |
b4ad7b23 | 6811 | } |
e9a25f70 JL |
6812 | |
6813 | default: | |
e8cb4873 | 6814 | do_default: |
45f97e2e RH |
6815 | /* If it isn't an induction variable, and it is invariant, we |
6816 | may be able to simplify things further by looking through | |
6817 | the bits we just moved outside the loop. */ | |
0534b804 | 6818 | if (loop_invariant_p (loop, x) == 1) |
45f97e2e RH |
6819 | { |
6820 | struct movable *m; | |
6ec92010 | 6821 | struct loop_movables *movables = LOOP_MOVABLES (loop); |
45f97e2e | 6822 | |
6ec92010 | 6823 | for (m = movables->head; m; m = m->next) |
45f97e2e RH |
6824 | if (rtx_equal_p (x, m->set_dest)) |
6825 | { | |
6826 | /* Ok, we found a match. Substitute and simplify. */ | |
6827 | ||
e6fcb60d | 6828 | /* If we match another movable, we must use that, as |
45f97e2e RH |
6829 | this one is going away. */ |
6830 | if (m->match) | |
e6fcb60d | 6831 | return simplify_giv_expr (loop, m->match->set_dest, |
e8cb4873 | 6832 | ext_val, benefit); |
45f97e2e | 6833 | |
cc2902df | 6834 | /* If consec is nonzero, this is a member of a group of |
45f97e2e RH |
6835 | instructions that were moved together. We handle this |
6836 | case only to the point of seeking to the last insn and | |
6837 | looking for a REG_EQUAL. Fail if we don't find one. */ | |
6838 | if (m->consec != 0) | |
6839 | { | |
6840 | int i = m->consec; | |
6841 | tem = m->insn; | |
fd5d5b07 KH |
6842 | do |
6843 | { | |
6844 | tem = NEXT_INSN (tem); | |
6845 | } | |
6846 | while (--i > 0); | |
45f97e2e RH |
6847 | |
6848 | tem = find_reg_note (tem, REG_EQUAL, NULL_RTX); | |
6849 | if (tem) | |
6850 | tem = XEXP (tem, 0); | |
6851 | } | |
6852 | else | |
6853 | { | |
e6fcb60d KH |
6854 | tem = single_set (m->insn); |
6855 | if (tem) | |
45f97e2e RH |
6856 | tem = SET_SRC (tem); |
6857 | } | |
6858 | ||
6859 | if (tem) | |
6860 | { | |
6861 | /* What we are most interested in is pointer | |
6862 | arithmetic on invariants -- only take | |
6863 | patterns we may be able to do something with. */ | |
6864 | if (GET_CODE (tem) == PLUS | |
6865 | || GET_CODE (tem) == MULT | |
6866 | || GET_CODE (tem) == ASHIFT | |
6867 | || GET_CODE (tem) == CONST_INT | |
6868 | || GET_CODE (tem) == SYMBOL_REF) | |
6869 | { | |
e8cb4873 RH |
6870 | tem = simplify_giv_expr (loop, tem, ext_val, |
6871 | benefit); | |
45f97e2e RH |
6872 | if (tem) |
6873 | return tem; | |
6874 | } | |
6875 | else if (GET_CODE (tem) == CONST | |
fd5d5b07 KH |
6876 | && GET_CODE (XEXP (tem, 0)) == PLUS |
6877 | && GET_CODE (XEXP (XEXP (tem, 0), 0)) == SYMBOL_REF | |
6878 | && GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT) | |
45f97e2e | 6879 | { |
0534b804 | 6880 | tem = simplify_giv_expr (loop, XEXP (tem, 0), |
e8cb4873 | 6881 | ext_val, benefit); |
45f97e2e RH |
6882 | if (tem) |
6883 | return tem; | |
6884 | } | |
6885 | } | |
6886 | break; | |
6887 | } | |
6888 | } | |
e9a25f70 | 6889 | break; |
b4ad7b23 RS |
6890 | } |
6891 | ||
6892 | /* Fall through to general case. */ | |
6893 | default: | |
6894 | /* If invariant, return as USE (unless CONST_INT). | |
6895 | Otherwise, not giv. */ | |
6896 | if (GET_CODE (x) == USE) | |
6897 | x = XEXP (x, 0); | |
6898 | ||
0534b804 | 6899 | if (loop_invariant_p (loop, x) == 1) |
b4ad7b23 RS |
6900 | { |
6901 | if (GET_CODE (x) == CONST_INT) | |
6902 | return x; | |
45f97e2e RH |
6903 | if (GET_CODE (x) == CONST |
6904 | && GET_CODE (XEXP (x, 0)) == PLUS | |
6905 | && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF | |
6906 | && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT) | |
6907 | x = XEXP (x, 0); | |
6908 | return gen_rtx_USE (mode, x); | |
b4ad7b23 RS |
6909 | } |
6910 | else | |
6911 | return 0; | |
6912 | } | |
6913 | } | |
45f97e2e RH |
6914 | |
6915 | /* This routine folds invariants such that there is only ever one | |
6916 | CONST_INT in the summation. It is only used by simplify_giv_expr. */ | |
6917 | ||
6918 | static rtx | |
0c20a65f | 6919 | sge_plus_constant (rtx x, rtx c) |
45f97e2e RH |
6920 | { |
6921 | if (GET_CODE (x) == CONST_INT) | |
6922 | return GEN_INT (INTVAL (x) + INTVAL (c)); | |
6923 | else if (GET_CODE (x) != PLUS) | |
6924 | return gen_rtx_PLUS (GET_MODE (x), x, c); | |
6925 | else if (GET_CODE (XEXP (x, 1)) == CONST_INT) | |
6926 | { | |
6927 | return gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0), | |
6928 | GEN_INT (INTVAL (XEXP (x, 1)) + INTVAL (c))); | |
6929 | } | |
6930 | else if (GET_CODE (XEXP (x, 0)) == PLUS | |
6931 | || GET_CODE (XEXP (x, 1)) != PLUS) | |
6932 | { | |
6933 | return gen_rtx_PLUS (GET_MODE (x), | |
6934 | sge_plus_constant (XEXP (x, 0), c), XEXP (x, 1)); | |
6935 | } | |
6936 | else | |
6937 | { | |
6938 | return gen_rtx_PLUS (GET_MODE (x), | |
6939 | sge_plus_constant (XEXP (x, 1), c), XEXP (x, 0)); | |
6940 | } | |
6941 | } | |
6942 | ||
6943 | static rtx | |
0c20a65f | 6944 | sge_plus (enum machine_mode mode, rtx x, rtx y) |
45f97e2e RH |
6945 | { |
6946 | while (GET_CODE (y) == PLUS) | |
6947 | { | |
6948 | rtx a = XEXP (y, 0); | |
6949 | if (GET_CODE (a) == CONST_INT) | |
6950 | x = sge_plus_constant (x, a); | |
6951 | else | |
6952 | x = gen_rtx_PLUS (mode, x, a); | |
6953 | y = XEXP (y, 1); | |
6954 | } | |
6955 | if (GET_CODE (y) == CONST_INT) | |
6956 | x = sge_plus_constant (x, y); | |
6957 | else | |
6958 | x = gen_rtx_PLUS (mode, x, y); | |
6959 | return x; | |
6960 | } | |
b4ad7b23 RS |
6961 | \f |
6962 | /* Help detect a giv that is calculated by several consecutive insns; | |
6963 | for example, | |
6964 | giv = biv * M | |
6965 | giv = giv + A | |
6966 | The caller has already identified the first insn P as having a giv as dest; | |
6967 | we check that all other insns that set the same register follow | |
6968 | immediately after P, that they alter nothing else, | |
6969 | and that the result of the last is still a giv. | |
6970 | ||
6971 | The value is 0 if the reg set in P is not really a giv. | |
6972 | Otherwise, the value is the amount gained by eliminating | |
6973 | all the consecutive insns that compute the value. | |
6974 | ||
6975 | FIRST_BENEFIT is the amount gained by eliminating the first insn, P. | |
6976 | SRC_REG is the reg of the biv; DEST_REG is the reg of the giv. | |
6977 | ||
6978 | The coefficients of the ultimate giv value are stored in | |
6979 | *MULT_VAL and *ADD_VAL. */ | |
6980 | ||
6981 | static int | |
0c20a65f AJ |
6982 | consec_sets_giv (const struct loop *loop, int first_benefit, rtx p, |
6983 | rtx src_reg, rtx dest_reg, rtx *add_val, rtx *mult_val, | |
6984 | rtx *ext_val, rtx *last_consec_insn) | |
b4ad7b23 | 6985 | { |
ed5bb68d | 6986 | struct loop_ivs *ivs = LOOP_IVS (loop); |
1ecd860b | 6987 | struct loop_regs *regs = LOOP_REGS (loop); |
b4ad7b23 RS |
6988 | int count; |
6989 | enum rtx_code code; | |
6990 | int benefit; | |
6991 | rtx temp; | |
6992 | rtx set; | |
6993 | ||
6994 | /* Indicate that this is a giv so that we can update the value produced in | |
e6fcb60d | 6995 | each insn of the multi-insn sequence. |
b4ad7b23 RS |
6996 | |
6997 | This induction structure will be used only by the call to | |
6998 | general_induction_var below, so we can allocate it on our stack. | |
6999 | If this is a giv, our caller will replace the induct var entry with | |
7000 | a new induction structure. */ | |
847dde95 BS |
7001 | struct induction *v; |
7002 | ||
7003 | if (REG_IV_TYPE (ivs, REGNO (dest_reg)) != UNKNOWN_INDUCT) | |
7004 | return 0; | |
7005 | ||
703ad42b | 7006 | v = alloca (sizeof (struct induction)); |
b4ad7b23 RS |
7007 | v->src_reg = src_reg; |
7008 | v->mult_val = *mult_val; | |
7009 | v->add_val = *add_val; | |
7010 | v->benefit = first_benefit; | |
7011 | v->cant_derive = 0; | |
7012 | v->derive_adjustment = 0; | |
affd4f33 | 7013 | v->ext_dependent = NULL_RTX; |
b4ad7b23 | 7014 | |
ed5bb68d MH |
7015 | REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT; |
7016 | REG_IV_INFO (ivs, REGNO (dest_reg)) = v; | |
b4ad7b23 | 7017 | |
f1d4ac80 | 7018 | count = regs->array[REGNO (dest_reg)].n_times_set - 1; |
b4ad7b23 RS |
7019 | |
7020 | while (count > 0) | |
7021 | { | |
7022 | p = NEXT_INSN (p); | |
7023 | code = GET_CODE (p); | |
7024 | ||
7025 | /* If libcall, skip to end of call sequence. */ | |
5fd8383e | 7026 | if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX))) |
b4ad7b23 RS |
7027 | p = XEXP (temp, 0); |
7028 | ||
7029 | if (code == INSN | |
7030 | && (set = single_set (p)) | |
7031 | && GET_CODE (SET_DEST (set)) == REG | |
7032 | && SET_DEST (set) == dest_reg | |
0534b804 | 7033 | && (general_induction_var (loop, SET_SRC (set), &src_reg, |
e8cb4873 RH |
7034 | add_val, mult_val, ext_val, 0, |
7035 | &benefit, VOIDmode) | |
b4ad7b23 | 7036 | /* Giv created by equivalent expression. */ |
5fd8383e | 7037 | || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)) |
0534b804 | 7038 | && general_induction_var (loop, XEXP (temp, 0), &src_reg, |
e8cb4873 RH |
7039 | add_val, mult_val, ext_val, 0, |
7040 | &benefit, VOIDmode))) | |
b4ad7b23 RS |
7041 | && src_reg == v->src_reg) |
7042 | { | |
5fd8383e | 7043 | if (find_reg_note (p, REG_RETVAL, NULL_RTX)) |
b4ad7b23 RS |
7044 | benefit += libcall_benefit (p); |
7045 | ||
7046 | count--; | |
7047 | v->mult_val = *mult_val; | |
7048 | v->add_val = *add_val; | |
630c79be | 7049 | v->benefit += benefit; |
b4ad7b23 RS |
7050 | } |
7051 | else if (code != NOTE) | |
7052 | { | |
7053 | /* Allow insns that set something other than this giv to a | |
7054 | constant. Such insns are needed on machines which cannot | |
7055 | include long constants and should not disqualify a giv. */ | |
7056 | if (code == INSN | |
7057 | && (set = single_set (p)) | |
7058 | && SET_DEST (set) != dest_reg | |
7059 | && CONSTANT_P (SET_SRC (set))) | |
7060 | continue; | |
7061 | ||
ed5bb68d | 7062 | REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT; |
b4ad7b23 RS |
7063 | return 0; |
7064 | } | |
7065 | } | |
7066 | ||
847dde95 | 7067 | REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT; |
a07516d3 | 7068 | *last_consec_insn = p; |
b4ad7b23 RS |
7069 | return v->benefit; |
7070 | } | |
7071 | \f | |
7072 | /* Return an rtx, if any, that expresses giv G2 as a function of the register | |
7073 | represented by G1. If no such expression can be found, or it is clear that | |
e6fcb60d | 7074 | it cannot possibly be a valid address, 0 is returned. |
b4ad7b23 RS |
7075 | |
7076 | To perform the computation, we note that | |
6b8c9327 | 7077 | G1 = x * v + a and |
45f97e2e | 7078 | G2 = y * v + b |
b4ad7b23 RS |
7079 | where `v' is the biv. |
7080 | ||
45f97e2e RH |
7081 | So G2 = (y/b) * G1 + (b - a*y/x). |
7082 | ||
7083 | Note that MULT = y/x. | |
7084 | ||
7085 | Update: A and B are now allowed to be additive expressions such that | |
7086 | B contains all variables in A. That is, computing B-A will not require | |
7087 | subtracting variables. */ | |
7088 | ||
7089 | static rtx | |
0c20a65f | 7090 | express_from_1 (rtx a, rtx b, rtx mult) |
45f97e2e RH |
7091 | { |
7092 | /* If MULT is zero, then A*MULT is zero, and our expression is B. */ | |
7093 | ||
7094 | if (mult == const0_rtx) | |
7095 | return b; | |
7096 | ||
7097 | /* If MULT is not 1, we cannot handle A with non-constants, since we | |
7098 | would then be required to subtract multiples of the registers in A. | |
7099 | This is theoretically possible, and may even apply to some Fortran | |
7100 | constructs, but it is a lot of work and we do not attempt it here. */ | |
7101 | ||
7102 | if (mult != const1_rtx && GET_CODE (a) != CONST_INT) | |
7103 | return NULL_RTX; | |
7104 | ||
7105 | /* In general these structures are sorted top to bottom (down the PLUS | |
7106 | chain), but not left to right across the PLUS. If B is a higher | |
7107 | order giv than A, we can strip one level and recurse. If A is higher | |
7108 | order, we'll eventually bail out, but won't know that until the end. | |
7109 | If they are the same, we'll strip one level around this loop. */ | |
7110 | ||
7111 | while (GET_CODE (a) == PLUS && GET_CODE (b) == PLUS) | |
7112 | { | |
7113 | rtx ra, rb, oa, ob, tmp; | |
7114 | ||
7115 | ra = XEXP (a, 0), oa = XEXP (a, 1); | |
7116 | if (GET_CODE (ra) == PLUS) | |
e6fcb60d | 7117 | tmp = ra, ra = oa, oa = tmp; |
45f97e2e RH |
7118 | |
7119 | rb = XEXP (b, 0), ob = XEXP (b, 1); | |
7120 | if (GET_CODE (rb) == PLUS) | |
e6fcb60d | 7121 | tmp = rb, rb = ob, ob = tmp; |
45f97e2e RH |
7122 | |
7123 | if (rtx_equal_p (ra, rb)) | |
7124 | /* We matched: remove one reg completely. */ | |
7125 | a = oa, b = ob; | |
7126 | else if (GET_CODE (ob) != PLUS && rtx_equal_p (ra, ob)) | |
7127 | /* An alternate match. */ | |
7128 | a = oa, b = rb; | |
7129 | else if (GET_CODE (oa) != PLUS && rtx_equal_p (oa, rb)) | |
7130 | /* An alternate match. */ | |
7131 | a = ra, b = ob; | |
7132 | else | |
7133 | { | |
fd5d5b07 | 7134 | /* Indicates an extra register in B. Strip one level from B and |
45f97e2e RH |
7135 | recurse, hoping B was the higher order expression. */ |
7136 | ob = express_from_1 (a, ob, mult); | |
7137 | if (ob == NULL_RTX) | |
7138 | return NULL_RTX; | |
7139 | return gen_rtx_PLUS (GET_MODE (b), rb, ob); | |
7140 | } | |
7141 | } | |
7142 | ||
7143 | /* Here we are at the last level of A, go through the cases hoping to | |
7144 | get rid of everything but a constant. */ | |
7145 | ||
7146 | if (GET_CODE (a) == PLUS) | |
7147 | { | |
efe3eb65 | 7148 | rtx ra, oa; |
45f97e2e RH |
7149 | |
7150 | ra = XEXP (a, 0), oa = XEXP (a, 1); | |
7151 | if (rtx_equal_p (oa, b)) | |
7152 | oa = ra; | |
7153 | else if (!rtx_equal_p (ra, b)) | |
7154 | return NULL_RTX; | |
7155 | ||
7156 | if (GET_CODE (oa) != CONST_INT) | |
7157 | return NULL_RTX; | |
7158 | ||
7159 | return GEN_INT (-INTVAL (oa) * INTVAL (mult)); | |
7160 | } | |
7161 | else if (GET_CODE (a) == CONST_INT) | |
7162 | { | |
7163 | return plus_constant (b, -INTVAL (a) * INTVAL (mult)); | |
7164 | } | |
ce7de04c JH |
7165 | else if (CONSTANT_P (a)) |
7166 | { | |
9b3bd424 RH |
7167 | enum machine_mode mode_a = GET_MODE (a); |
7168 | enum machine_mode mode_b = GET_MODE (b); | |
7169 | enum machine_mode mode = mode_b == VOIDmode ? mode_a : mode_b; | |
7170 | return simplify_gen_binary (MINUS, mode, b, a); | |
ce7de04c | 7171 | } |
45f97e2e RH |
7172 | else if (GET_CODE (b) == PLUS) |
7173 | { | |
7174 | if (rtx_equal_p (a, XEXP (b, 0))) | |
7175 | return XEXP (b, 1); | |
7176 | else if (rtx_equal_p (a, XEXP (b, 1))) | |
7177 | return XEXP (b, 0); | |
7178 | else | |
7179 | return NULL_RTX; | |
7180 | } | |
7181 | else if (rtx_equal_p (a, b)) | |
7182 | return const0_rtx; | |
7183 | ||
7184 | return NULL_RTX; | |
7185 | } | |
b4ad7b23 | 7186 | |
4d87f7a7 | 7187 | rtx |
0c20a65f | 7188 | express_from (struct induction *g1, struct induction *g2) |
b4ad7b23 RS |
7189 | { |
7190 | rtx mult, add; | |
7191 | ||
7192 | /* The value that G1 will be multiplied by must be a constant integer. Also, | |
7193 | the only chance we have of getting a valid address is if b*c/a (see above | |
7194 | for notation) is also an integer. */ | |
45f97e2e RH |
7195 | if (GET_CODE (g1->mult_val) == CONST_INT |
7196 | && GET_CODE (g2->mult_val) == CONST_INT) | |
7197 | { | |
7198 | if (g1->mult_val == const0_rtx | |
e6fcb60d KH |
7199 | || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0) |
7200 | return NULL_RTX; | |
45f97e2e RH |
7201 | mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val)); |
7202 | } | |
7203 | else if (rtx_equal_p (g1->mult_val, g2->mult_val)) | |
7204 | mult = const1_rtx; | |
7205 | else | |
7206 | { | |
7207 | /* ??? Find out if the one is a multiple of the other? */ | |
7208 | return NULL_RTX; | |
7209 | } | |
b4ad7b23 | 7210 | |
45f97e2e | 7211 | add = express_from_1 (g1->add_val, g2->add_val, mult); |
e0485b85 RH |
7212 | if (add == NULL_RTX) |
7213 | { | |
7214 | /* Failed. If we've got a multiplication factor between G1 and G2, | |
7215 | scale G1's addend and try again. */ | |
7216 | if (INTVAL (mult) > 1) | |
7217 | { | |
7218 | rtx g1_add_val = g1->add_val; | |
7219 | if (GET_CODE (g1_add_val) == MULT | |
7220 | && GET_CODE (XEXP (g1_add_val, 1)) == CONST_INT) | |
7221 | { | |
7222 | HOST_WIDE_INT m; | |
7223 | m = INTVAL (mult) * INTVAL (XEXP (g1_add_val, 1)); | |
7224 | g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val), | |
7225 | XEXP (g1_add_val, 0), GEN_INT (m)); | |
7226 | } | |
7227 | else | |
7228 | { | |
7229 | g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val), g1_add_val, | |
7230 | mult); | |
7231 | } | |
7232 | ||
7233 | add = express_from_1 (g1_add_val, g2->add_val, const1_rtx); | |
7234 | } | |
7235 | } | |
45f97e2e RH |
7236 | if (add == NULL_RTX) |
7237 | return NULL_RTX; | |
b4ad7b23 RS |
7238 | |
7239 | /* Form simplified final result. */ | |
7240 | if (mult == const0_rtx) | |
7241 | return add; | |
7242 | else if (mult == const1_rtx) | |
7243 | mult = g1->dest_reg; | |
7244 | else | |
38a448ca | 7245 | mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult); |
b4ad7b23 RS |
7246 | |
7247 | if (add == const0_rtx) | |
7248 | return mult; | |
7249 | else | |
86219cc7 BS |
7250 | { |
7251 | if (GET_CODE (add) == PLUS | |
7252 | && CONSTANT_P (XEXP (add, 1))) | |
7253 | { | |
7254 | rtx tem = XEXP (add, 1); | |
7255 | mult = gen_rtx_PLUS (g2->mode, mult, XEXP (add, 0)); | |
7256 | add = tem; | |
7257 | } | |
e6fcb60d | 7258 | |
86219cc7 BS |
7259 | return gen_rtx_PLUS (g2->mode, mult, add); |
7260 | } | |
b4ad7b23 | 7261 | } |
b4ad7b23 | 7262 | \f |
da5a44b3 BS |
7263 | /* Return an rtx, if any, that expresses giv G2 as a function of the register |
7264 | represented by G1. This indicates that G2 should be combined with G1 and | |
7265 | that G2 can use (either directly or via an address expression) a register | |
7266 | used to represent G1. */ | |
b4ad7b23 | 7267 | |
45f97e2e | 7268 | static rtx |
0c20a65f | 7269 | combine_givs_p (struct induction *g1, struct induction *g2) |
b4ad7b23 | 7270 | { |
e8cb4873 RH |
7271 | rtx comb, ret; |
7272 | ||
ff7cc307 | 7273 | /* With the introduction of ext dependent givs, we must care for modes. |
e8cb4873 RH |
7274 | G2 must not use a wider mode than G1. */ |
7275 | if (GET_MODE_SIZE (g1->mode) < GET_MODE_SIZE (g2->mode)) | |
7276 | return NULL_RTX; | |
7277 | ||
7278 | ret = comb = express_from (g1, g2); | |
7279 | if (comb == NULL_RTX) | |
7280 | return NULL_RTX; | |
7281 | if (g1->mode != g2->mode) | |
7282 | ret = gen_lowpart (g2->mode, comb); | |
b4ad7b23 | 7283 | |
45f97e2e RH |
7284 | /* If these givs are identical, they can be combined. We use the results |
7285 | of express_from because the addends are not in a canonical form, so | |
7286 | rtx_equal_p is a weaker test. */ | |
3ec2b590 R |
7287 | /* But don't combine a DEST_REG giv with a DEST_ADDR giv; we want the |
7288 | combination to be the other way round. */ | |
e8cb4873 | 7289 | if (comb == g1->dest_reg |
3ec2b590 | 7290 | && (g1->giv_type == DEST_REG || g2->giv_type == DEST_ADDR)) |
b4ad7b23 | 7291 | { |
e8cb4873 | 7292 | return ret; |
b4ad7b23 RS |
7293 | } |
7294 | ||
b4ad7b23 RS |
7295 | /* If G2 can be expressed as a function of G1 and that function is valid |
7296 | as an address and no more expensive than using a register for G2, | |
7297 | the expression of G2 in terms of G1 can be used. */ | |
e8cb4873 | 7298 | if (ret != NULL_RTX |
45f97e2e | 7299 | && g2->giv_type == DEST_ADDR |
dcefdf67 RH |
7300 | && memory_address_p (GET_MODE (g2->mem), ret)) |
7301 | return ret; | |
b4ad7b23 | 7302 | |
45f97e2e | 7303 | return NULL_RTX; |
b4ad7b23 RS |
7304 | } |
7305 | \f | |
ff7cc307 | 7306 | /* Check each extension dependent giv in this class to see if its |
e8cb4873 RH |
7307 | root biv is safe from wrapping in the interior mode, which would |
7308 | make the giv illegal. */ | |
7309 | ||
7310 | static void | |
03988cac | 7311 | check_ext_dependent_givs (const struct loop *loop, struct iv_class *bl) |
e8cb4873 | 7312 | { |
03988cac | 7313 | struct loop_info *loop_info = LOOP_INFO (loop); |
e8cb4873 RH |
7314 | int ze_ok = 0, se_ok = 0, info_ok = 0; |
7315 | enum machine_mode biv_mode = GET_MODE (bl->biv->src_reg); | |
7316 | HOST_WIDE_INT start_val; | |
616fde53 MH |
7317 | unsigned HOST_WIDE_INT u_end_val = 0; |
7318 | unsigned HOST_WIDE_INT u_start_val = 0; | |
e8cb4873 RH |
7319 | rtx incr = pc_rtx; |
7320 | struct induction *v; | |
7321 | ||
7322 | /* Make sure the iteration data is available. We must have | |
7323 | constants in order to be certain of no overflow. */ | |
e8cb4873 RH |
7324 | if (loop_info->n_iterations > 0 |
7325 | && bl->initial_value | |
7326 | && GET_CODE (bl->initial_value) == CONST_INT | |
7327 | && (incr = biv_total_increment (bl)) | |
7328 | && GET_CODE (incr) == CONST_INT | |
7329 | /* Make sure the host can represent the arithmetic. */ | |
7330 | && HOST_BITS_PER_WIDE_INT >= GET_MODE_BITSIZE (biv_mode)) | |
7331 | { | |
7332 | unsigned HOST_WIDE_INT abs_incr, total_incr; | |
7333 | HOST_WIDE_INT s_end_val; | |
7334 | int neg_incr; | |
7335 | ||
7336 | info_ok = 1; | |
7337 | start_val = INTVAL (bl->initial_value); | |
7338 | u_start_val = start_val; | |
fd5d5b07 | 7339 | |
e8cb4873 RH |
7340 | neg_incr = 0, abs_incr = INTVAL (incr); |
7341 | if (INTVAL (incr) < 0) | |
7342 | neg_incr = 1, abs_incr = -abs_incr; | |
7343 | total_incr = abs_incr * loop_info->n_iterations; | |
7344 | ||
3d042e77 | 7345 | /* Check for host arithmetic overflow. */ |
e8cb4873 RH |
7346 | if (total_incr / loop_info->n_iterations == abs_incr) |
7347 | { | |
7348 | unsigned HOST_WIDE_INT u_max; | |
7349 | HOST_WIDE_INT s_max; | |
7350 | ||
7351 | u_end_val = start_val + (neg_incr ? -total_incr : total_incr); | |
7352 | s_end_val = u_end_val; | |
7353 | u_max = GET_MODE_MASK (biv_mode); | |
7354 | s_max = u_max >> 1; | |
fd5d5b07 | 7355 | |
e8cb4873 RH |
7356 | /* Check zero extension of biv ok. */ |
7357 | if (start_val >= 0 | |
3d042e77 | 7358 | /* Check for host arithmetic overflow. */ |
e8cb4873 RH |
7359 | && (neg_incr |
7360 | ? u_end_val < u_start_val | |
7361 | : u_end_val > u_start_val) | |
7362 | /* Check for target arithmetic overflow. */ | |
7363 | && (neg_incr | |
7364 | ? 1 /* taken care of with host overflow */ | |
7365 | : u_end_val <= u_max)) | |
7366 | { | |
7367 | ze_ok = 1; | |
7368 | } | |
fd5d5b07 | 7369 | |
e8cb4873 RH |
7370 | /* Check sign extension of biv ok. */ |
7371 | /* ??? While it is true that overflow with signed and pointer | |
7372 | arithmetic is undefined, I fear too many programmers don't | |
7373 | keep this fact in mind -- myself included on occasion. | |
7374 | So leave alone with the signed overflow optimizations. */ | |
7375 | if (start_val >= -s_max - 1 | |
3d042e77 | 7376 | /* Check for host arithmetic overflow. */ |
e8cb4873 RH |
7377 | && (neg_incr |
7378 | ? s_end_val < start_val | |
7379 | : s_end_val > start_val) | |
7380 | /* Check for target arithmetic overflow. */ | |
7381 | && (neg_incr | |
7382 | ? s_end_val >= -s_max - 1 | |
7383 | : s_end_val <= s_max)) | |
7384 | { | |
7385 | se_ok = 1; | |
7386 | } | |
7387 | } | |
7388 | } | |
7389 | ||
03988cac UW |
7390 | /* If we know the BIV is compared at run-time against an |
7391 | invariant value, and the increment is +/- 1, we may also | |
7392 | be able to prove that the BIV cannot overflow. */ | |
7393 | else if (bl->biv->src_reg == loop_info->iteration_var | |
7394 | && loop_info->comparison_value | |
7395 | && loop_invariant_p (loop, loop_info->comparison_value) | |
7396 | && (incr = biv_total_increment (bl)) | |
7397 | && GET_CODE (incr) == CONST_INT) | |
7398 | { | |
7399 | /* If the increment is +1, and the exit test is a <, | |
7400 | the BIV cannot overflow. (For <=, we have the | |
7401 | problematic case that the comparison value might | |
7402 | be the maximum value of the range.) */ | |
7403 | if (INTVAL (incr) == 1) | |
7404 | { | |
7405 | if (loop_info->comparison_code == LT) | |
7406 | se_ok = ze_ok = 1; | |
7407 | else if (loop_info->comparison_code == LTU) | |
7408 | ze_ok = 1; | |
7409 | } | |
7410 | ||
7411 | /* Likewise for increment -1 and exit test >. */ | |
7412 | if (INTVAL (incr) == -1) | |
7413 | { | |
7414 | if (loop_info->comparison_code == GT) | |
7415 | se_ok = ze_ok = 1; | |
7416 | else if (loop_info->comparison_code == GTU) | |
7417 | ze_ok = 1; | |
7418 | } | |
7419 | } | |
7420 | ||
e8cb4873 RH |
7421 | /* Invalidate givs that fail the tests. */ |
7422 | for (v = bl->giv; v; v = v->next_iv) | |
affd4f33 | 7423 | if (v->ext_dependent) |
e8cb4873 | 7424 | { |
affd4f33 | 7425 | enum rtx_code code = GET_CODE (v->ext_dependent); |
e8cb4873 RH |
7426 | int ok = 0; |
7427 | ||
7428 | switch (code) | |
7429 | { | |
7430 | case SIGN_EXTEND: | |
7431 | ok = se_ok; | |
7432 | break; | |
7433 | case ZERO_EXTEND: | |
7434 | ok = ze_ok; | |
7435 | break; | |
7436 | ||
7437 | case TRUNCATE: | |
7438 | /* We don't know whether this value is being used as either | |
7439 | signed or unsigned, so to safely truncate we must satisfy | |
fd5d5b07 | 7440 | both. The initial check here verifies the BIV itself; |
e8cb4873 | 7441 | once that is successful we may check its range wrt the |
03988cac UW |
7442 | derived GIV. This works only if we were able to determine |
7443 | constant start and end values above. */ | |
7444 | if (se_ok && ze_ok && info_ok) | |
e8cb4873 | 7445 | { |
affd4f33 | 7446 | enum machine_mode outer_mode = GET_MODE (v->ext_dependent); |
e8cb4873 RH |
7447 | unsigned HOST_WIDE_INT max = GET_MODE_MASK (outer_mode) >> 1; |
7448 | ||
7449 | /* We know from the above that both endpoints are nonnegative, | |
7450 | and that there is no wrapping. Verify that both endpoints | |
7451 | are within the (signed) range of the outer mode. */ | |
7452 | if (u_start_val <= max && u_end_val <= max) | |
7453 | ok = 1; | |
7454 | } | |
7455 | break; | |
7456 | ||
7457 | default: | |
7458 | abort (); | |
7459 | } | |
7460 | ||
7461 | if (ok) | |
7462 | { | |
7463 | if (loop_dump_stream) | |
7464 | { | |
fd5d5b07 | 7465 | fprintf (loop_dump_stream, |
ff7cc307 | 7466 | "Verified ext dependent giv at %d of reg %d\n", |
fd5d5b07 | 7467 | INSN_UID (v->insn), bl->regno); |
e8cb4873 RH |
7468 | } |
7469 | } | |
7470 | else | |
7471 | { | |
7472 | if (loop_dump_stream) | |
7473 | { | |
7474 | const char *why; | |
7475 | ||
7476 | if (info_ok) | |
7477 | why = "biv iteration values overflowed"; | |
7478 | else | |
7479 | { | |
7480 | if (incr == pc_rtx) | |
7481 | incr = biv_total_increment (bl); | |
7482 | if (incr == const1_rtx) | |
7483 | why = "biv iteration info incomplete; incr by 1"; | |
7484 | else | |
7485 | why = "biv iteration info incomplete"; | |
7486 | } | |
7487 | ||
fd5d5b07 | 7488 | fprintf (loop_dump_stream, |
ff7cc307 | 7489 | "Failed ext dependent giv at %d, %s\n", |
fd5d5b07 | 7490 | INSN_UID (v->insn), why); |
e8cb4873 RH |
7491 | } |
7492 | v->ignore = 1; | |
97ebd24c | 7493 | bl->all_reduced = 0; |
e8cb4873 RH |
7494 | } |
7495 | } | |
7496 | } | |
7497 | ||
7498 | /* Generate a version of VALUE in a mode appropriate for initializing V. */ | |
7499 | ||
7500 | rtx | |
0c20a65f | 7501 | extend_value_for_giv (struct induction *v, rtx value) |
e8cb4873 | 7502 | { |
affd4f33 | 7503 | rtx ext_dep = v->ext_dependent; |
e8cb4873 RH |
7504 | |
7505 | if (! ext_dep) | |
7506 | return value; | |
7507 | ||
affd4f33 | 7508 | /* Recall that check_ext_dependent_givs verified that the known bounds |
e8cb4873 RH |
7509 | of a biv did not overflow or wrap with respect to the extension for |
7510 | the giv. Therefore, constants need no additional adjustment. */ | |
7511 | if (CONSTANT_P (value) && GET_MODE (value) == VOIDmode) | |
7512 | return value; | |
7513 | ||
7514 | /* Otherwise, we must adjust the value to compensate for the | |
7515 | differing modes of the biv and the giv. */ | |
7516 | return gen_rtx_fmt_e (GET_CODE (ext_dep), GET_MODE (ext_dep), value); | |
7517 | } | |
7518 | \f | |
45f97e2e RH |
7519 | struct combine_givs_stats |
7520 | { | |
7521 | int giv_number; | |
7522 | int total_benefit; | |
7523 | }; | |
7524 | ||
7525 | static int | |
0c20a65f | 7526 | cmp_combine_givs_stats (const void *xp, const void *yp) |
45f97e2e | 7527 | { |
f428f252 KG |
7528 | const struct combine_givs_stats * const x = |
7529 | (const struct combine_givs_stats *) xp; | |
7530 | const struct combine_givs_stats * const y = | |
7531 | (const struct combine_givs_stats *) yp; | |
45f97e2e RH |
7532 | int d; |
7533 | d = y->total_benefit - x->total_benefit; | |
7534 | /* Stabilize the sort. */ | |
7535 | if (!d) | |
7536 | d = x->giv_number - y->giv_number; | |
7537 | return d; | |
7538 | } | |
7539 | ||
b4ad7b23 RS |
7540 | /* Check all pairs of givs for iv_class BL and see if any can be combined with |
7541 | any other. If so, point SAME to the giv combined with and set NEW_REG to | |
7542 | be an expression (in terms of the other giv's DEST_REG) equivalent to the | |
7543 | giv. Also, update BENEFIT and related fields for cost/benefit analysis. */ | |
7544 | ||
7545 | static void | |
0c20a65f | 7546 | combine_givs (struct loop_regs *regs, struct iv_class *bl) |
b4ad7b23 | 7547 | { |
ba12c883 RH |
7548 | /* Additional benefit to add for being combined multiple times. */ |
7549 | const int extra_benefit = 3; | |
7550 | ||
29a82058 | 7551 | struct induction *g1, *g2, **giv_array; |
45f97e2e RH |
7552 | int i, j, k, giv_count; |
7553 | struct combine_givs_stats *stats; | |
7554 | rtx *can_combine; | |
b4ad7b23 | 7555 | |
7027f90a JW |
7556 | /* Count givs, because bl->giv_count is incorrect here. */ |
7557 | giv_count = 0; | |
b4ad7b23 | 7558 | for (g1 = bl->giv; g1; g1 = g1->next_iv) |
45f97e2e RH |
7559 | if (!g1->ignore) |
7560 | giv_count++; | |
7027f90a | 7561 | |
703ad42b | 7562 | giv_array = alloca (giv_count * sizeof (struct induction *)); |
7027f90a JW |
7563 | i = 0; |
7564 | for (g1 = bl->giv; g1; g1 = g1->next_iv) | |
45f97e2e RH |
7565 | if (!g1->ignore) |
7566 | giv_array[i++] = g1; | |
7027f90a | 7567 | |
703ad42b KG |
7568 | stats = xcalloc (giv_count, sizeof (*stats)); |
7569 | can_combine = xcalloc (giv_count, giv_count * sizeof (rtx)); | |
7027f90a JW |
7570 | |
7571 | for (i = 0; i < giv_count; i++) | |
7572 | { | |
45f97e2e | 7573 | int this_benefit; |
ba12c883 | 7574 | rtx single_use; |
45f97e2e | 7575 | |
7027f90a | 7576 | g1 = giv_array[i]; |
ba12c883 RH |
7577 | stats[i].giv_number = i; |
7578 | ||
7579 | /* If a DEST_REG GIV is used only once, do not allow it to combine | |
7580 | with anything, for in doing so we will gain nothing that cannot | |
7581 | be had by simply letting the GIV with which we would have combined | |
e6fcb60d | 7582 | to be reduced on its own. The losage shows up in particular with |
ba12c883 RH |
7583 | DEST_ADDR targets on hosts with reg+reg addressing, though it can |
7584 | be seen elsewhere as well. */ | |
7585 | if (g1->giv_type == DEST_REG | |
f1d4ac80 | 7586 | && (single_use = regs->array[REGNO (g1->dest_reg)].single_usage) |
ba12c883 RH |
7587 | && single_use != const0_rtx) |
7588 | continue; | |
45f97e2e RH |
7589 | |
7590 | this_benefit = g1->benefit; | |
7591 | /* Add an additional weight for zero addends. */ | |
7592 | if (g1->no_const_addval) | |
7593 | this_benefit += 1; | |
ba12c883 | 7594 | |
45f97e2e RH |
7595 | for (j = 0; j < giv_count; j++) |
7596 | { | |
7597 | rtx this_combine; | |
7598 | ||
7599 | g2 = giv_array[j]; | |
7600 | if (g1 != g2 | |
7601 | && (this_combine = combine_givs_p (g1, g2)) != NULL_RTX) | |
7602 | { | |
e6fcb60d | 7603 | can_combine[i * giv_count + j] = this_combine; |
ba12c883 | 7604 | this_benefit += g2->benefit + extra_benefit; |
45f97e2e RH |
7605 | } |
7606 | } | |
45f97e2e RH |
7607 | stats[i].total_benefit = this_benefit; |
7608 | } | |
7609 | ||
7610 | /* Iterate, combining until we can't. */ | |
7611 | restart: | |
e6fcb60d | 7612 | qsort (stats, giv_count, sizeof (*stats), cmp_combine_givs_stats); |
45f97e2e RH |
7613 | |
7614 | if (loop_dump_stream) | |
7615 | { | |
7616 | fprintf (loop_dump_stream, "Sorted combine statistics:\n"); | |
7617 | for (k = 0; k < giv_count; k++) | |
7618 | { | |
7619 | g1 = giv_array[stats[k].giv_number]; | |
7620 | if (!g1->combined_with && !g1->same) | |
e6fcb60d | 7621 | fprintf (loop_dump_stream, " {%d, %d}", |
45f97e2e RH |
7622 | INSN_UID (giv_array[stats[k].giv_number]->insn), |
7623 | stats[k].total_benefit); | |
7624 | } | |
7625 | putc ('\n', loop_dump_stream); | |
7626 | } | |
7627 | ||
7628 | for (k = 0; k < giv_count; k++) | |
7629 | { | |
7630 | int g1_add_benefit = 0; | |
7631 | ||
7632 | i = stats[k].giv_number; | |
7633 | g1 = giv_array[i]; | |
7634 | ||
7635 | /* If it has already been combined, skip. */ | |
7636 | if (g1->combined_with || g1->same) | |
7637 | continue; | |
7638 | ||
7639 | for (j = 0; j < giv_count; j++) | |
7640 | { | |
7641 | g2 = giv_array[j]; | |
e6fcb60d | 7642 | if (g1 != g2 && can_combine[i * giv_count + j] |
45f97e2e RH |
7643 | /* If it has already been combined, skip. */ |
7644 | && ! g2->same && ! g2->combined_with) | |
7645 | { | |
7646 | int l; | |
7647 | ||
e6fcb60d | 7648 | g2->new_reg = can_combine[i * giv_count + j]; |
45f97e2e | 7649 | g2->same = g1; |
5bb08c84 JH |
7650 | /* For destination, we now may replace by mem expression instead |
7651 | of register. This changes the costs considerably, so add the | |
7652 | compensation. */ | |
7653 | if (g2->giv_type == DEST_ADDR) | |
7654 | g2->benefit = (g2->benefit + reg_address_cost | |
7655 | - address_cost (g2->new_reg, | |
7656 | GET_MODE (g2->mem))); | |
3ec2b590 | 7657 | g1->combined_with++; |
45f97e2e RH |
7658 | g1->lifetime += g2->lifetime; |
7659 | ||
ba12c883 | 7660 | g1_add_benefit += g2->benefit; |
45f97e2e RH |
7661 | |
7662 | /* ??? The new final_[bg]iv_value code does a much better job | |
7663 | of finding replaceable giv's, and hence this code may no | |
7664 | longer be necessary. */ | |
7665 | if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg)) | |
7666 | g1_add_benefit -= copy_cost; | |
e6fcb60d | 7667 | |
45f97e2e RH |
7668 | /* To help optimize the next set of combinations, remove |
7669 | this giv from the benefits of other potential mates. */ | |
7670 | for (l = 0; l < giv_count; ++l) | |
7671 | { | |
7672 | int m = stats[l].giv_number; | |
e6fcb60d | 7673 | if (can_combine[m * giv_count + j]) |
ba12c883 | 7674 | stats[l].total_benefit -= g2->benefit + extra_benefit; |
45f97e2e RH |
7675 | } |
7676 | ||
7677 | if (loop_dump_stream) | |
7678 | fprintf (loop_dump_stream, | |
630c79be BS |
7679 | "giv at %d combined with giv at %d; new benefit %d + %d, lifetime %d\n", |
7680 | INSN_UID (g2->insn), INSN_UID (g1->insn), | |
7681 | g1->benefit, g1_add_benefit, g1->lifetime); | |
45f97e2e RH |
7682 | } |
7683 | } | |
7684 | ||
7685 | /* To help optimize the next set of combinations, remove | |
7686 | this giv from the benefits of other potential mates. */ | |
7687 | if (g1->combined_with) | |
7688 | { | |
7689 | for (j = 0; j < giv_count; ++j) | |
7690 | { | |
7691 | int m = stats[j].giv_number; | |
e6fcb60d | 7692 | if (can_combine[m * giv_count + i]) |
ba12c883 | 7693 | stats[j].total_benefit -= g1->benefit + extra_benefit; |
45f97e2e RH |
7694 | } |
7695 | ||
7696 | g1->benefit += g1_add_benefit; | |
7697 | ||
7698 | /* We've finished with this giv, and everything it touched. | |
e6fcb60d | 7699 | Restart the combination so that proper weights for the |
45f97e2e RH |
7700 | rest of the givs are properly taken into account. */ |
7701 | /* ??? Ideally we would compact the arrays at this point, so | |
7702 | as to not cover old ground. But sanely compacting | |
7703 | can_combine is tricky. */ | |
7704 | goto restart; | |
7705 | } | |
7027f90a | 7706 | } |
67289ea6 MM |
7707 | |
7708 | /* Clean up. */ | |
7709 | free (stats); | |
7710 | free (can_combine); | |
b4ad7b23 RS |
7711 | } |
7712 | \f | |
0c20a65f AJ |
7713 | /* Generate sequence for REG = B * M + A. B is the initial value of |
7714 | the basic induction variable, M a multiplicative constant, A an | |
7715 | additive constant and REG the destination register. */ | |
b4ad7b23 | 7716 | |
96a45535 | 7717 | static rtx |
0c20a65f | 7718 | gen_add_mult (rtx b, rtx m, rtx a, rtx reg) |
b4ad7b23 RS |
7719 | { |
7720 | rtx seq; | |
7721 | rtx result; | |
7722 | ||
b4ad7b23 | 7723 | start_sequence (); |
96a45535 | 7724 | /* Use unsigned arithmetic. */ |
91ce572a | 7725 | result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1); |
b4ad7b23 RS |
7726 | if (reg != result) |
7727 | emit_move_insn (reg, result); | |
2f937369 | 7728 | seq = get_insns (); |
b4ad7b23 RS |
7729 | end_sequence (); |
7730 | ||
96a45535 MH |
7731 | return seq; |
7732 | } | |
7733 | ||
7734 | ||
7735 | /* Update registers created in insn sequence SEQ. */ | |
9ae8ffe7 | 7736 | |
96a45535 | 7737 | static void |
0c20a65f | 7738 | loop_regs_update (const struct loop *loop ATTRIBUTE_UNUSED, rtx seq) |
96a45535 | 7739 | { |
2f937369 DM |
7740 | rtx insn; |
7741 | ||
96a45535 | 7742 | /* Update register info for alias analysis. */ |
00116a7b | 7743 | |
2f937369 DM |
7744 | if (seq == NULL_RTX) |
7745 | return; | |
7746 | ||
7747 | if (INSN_P (seq)) | |
00116a7b | 7748 | { |
2f937369 DM |
7749 | insn = seq; |
7750 | while (insn != NULL_RTX) | |
00116a7b | 7751 | { |
2f937369 DM |
7752 | rtx set = single_set (insn); |
7753 | ||
00116a7b RH |
7754 | if (set && GET_CODE (SET_DEST (set)) == REG) |
7755 | record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0); | |
2f937369 DM |
7756 | |
7757 | insn = NEXT_INSN (insn); | |
00116a7b RH |
7758 | } |
7759 | } | |
2f937369 DM |
7760 | else if (GET_CODE (seq) == SET |
7761 | && GET_CODE (SET_DEST (seq)) == REG) | |
7762 | record_base_value (REGNO (SET_DEST (seq)), SET_SRC (seq), 0); | |
b4ad7b23 | 7763 | } |
630c79be | 7764 | |
96a45535 | 7765 | |
0c20a65f AJ |
7766 | /* EMIT code before BEFORE_BB/BEFORE_INSN to set REG = B * M + A. B |
7767 | is the initial value of the basic induction variable, M a | |
7768 | multiplicative constant, A an additive constant and REG the | |
7769 | destination register. */ | |
96a45535 MH |
7770 | |
7771 | void | |
0c20a65f AJ |
7772 | loop_iv_add_mult_emit_before (const struct loop *loop, rtx b, rtx m, rtx a, |
7773 | rtx reg, basic_block before_bb, rtx before_insn) | |
96a45535 MH |
7774 | { |
7775 | rtx seq; | |
7776 | ||
7777 | if (! before_insn) | |
7778 | { | |
7779 | loop_iv_add_mult_hoist (loop, b, m, a, reg); | |
7780 | return; | |
7781 | } | |
7782 | ||
7783 | /* Use copy_rtx to prevent unexpected sharing of these rtx. */ | |
2e279a9b | 7784 | seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg); |
96a45535 MH |
7785 | |
7786 | /* Increase the lifetime of any invariants moved further in code. */ | |
7787 | update_reg_last_use (a, before_insn); | |
7788 | update_reg_last_use (b, before_insn); | |
7789 | update_reg_last_use (m, before_insn); | |
7790 | ||
96a45535 | 7791 | /* It is possible that the expansion created lots of new registers. |
ac8354b1 JW |
7792 | Iterate over the sequence we just created and record them all. We |
7793 | must do this before inserting the sequence. */ | |
96a45535 | 7794 | loop_regs_update (loop, seq); |
ac8354b1 JW |
7795 | |
7796 | loop_insn_emit_before (loop, before_bb, before_insn, seq); | |
96a45535 MH |
7797 | } |
7798 | ||
7799 | ||
0c20a65f AJ |
7800 | /* Emit insns in loop pre-header to set REG = B * M + A. B is the |
7801 | initial value of the basic induction variable, M a multiplicative | |
7802 | constant, A an additive constant and REG the destination | |
7803 | register. */ | |
96a45535 MH |
7804 | |
7805 | void | |
0c20a65f | 7806 | loop_iv_add_mult_sink (const struct loop *loop, rtx b, rtx m, rtx a, rtx reg) |
96a45535 MH |
7807 | { |
7808 | rtx seq; | |
7809 | ||
7810 | /* Use copy_rtx to prevent unexpected sharing of these rtx. */ | |
2e279a9b | 7811 | seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg); |
96a45535 MH |
7812 | |
7813 | /* Increase the lifetime of any invariants moved further in code. | |
7814 | ???? Is this really necessary? */ | |
7815 | update_reg_last_use (a, loop->sink); | |
7816 | update_reg_last_use (b, loop->sink); | |
7817 | update_reg_last_use (m, loop->sink); | |
7818 | ||
96a45535 | 7819 | /* It is possible that the expansion created lots of new registers. |
ac8354b1 JW |
7820 | Iterate over the sequence we just created and record them all. We |
7821 | must do this before inserting the sequence. */ | |
96a45535 | 7822 | loop_regs_update (loop, seq); |
ac8354b1 JW |
7823 | |
7824 | loop_insn_sink (loop, seq); | |
96a45535 MH |
7825 | } |
7826 | ||
7827 | ||
0c20a65f AJ |
7828 | /* Emit insns after loop to set REG = B * M + A. B is the initial |
7829 | value of the basic induction variable, M a multiplicative constant, | |
7830 | A an additive constant and REG the destination register. */ | |
96a45535 MH |
7831 | |
7832 | void | |
0c20a65f | 7833 | loop_iv_add_mult_hoist (const struct loop *loop, rtx b, rtx m, rtx a, rtx reg) |
96a45535 MH |
7834 | { |
7835 | rtx seq; | |
7836 | ||
7837 | /* Use copy_rtx to prevent unexpected sharing of these rtx. */ | |
2e279a9b | 7838 | seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg); |
96a45535 | 7839 | |
96a45535 | 7840 | /* It is possible that the expansion created lots of new registers. |
ac8354b1 JW |
7841 | Iterate over the sequence we just created and record them all. We |
7842 | must do this before inserting the sequence. */ | |
96a45535 | 7843 | loop_regs_update (loop, seq); |
ac8354b1 JW |
7844 | |
7845 | loop_insn_hoist (loop, seq); | |
96a45535 MH |
7846 | } |
7847 | ||
7848 | ||
7849 | ||
7850 | /* Similar to gen_add_mult, but compute cost rather than generating | |
7851 | sequence. */ | |
7852 | ||
630c79be | 7853 | static int |
0c20a65f | 7854 | iv_add_mult_cost (rtx b, rtx m, rtx a, rtx reg) |
630c79be BS |
7855 | { |
7856 | int cost = 0; | |
7857 | rtx last, result; | |
7858 | ||
7859 | start_sequence (); | |
96a45535 | 7860 | result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1); |
630c79be BS |
7861 | if (reg != result) |
7862 | emit_move_insn (reg, result); | |
7863 | last = get_last_insn (); | |
7864 | while (last) | |
7865 | { | |
7866 | rtx t = single_set (last); | |
7867 | if (t) | |
7868 | cost += rtx_cost (SET_SRC (t), SET); | |
7869 | last = PREV_INSN (last); | |
7870 | } | |
7871 | end_sequence (); | |
7872 | return cost; | |
7873 | } | |
b4ad7b23 RS |
7874 | \f |
7875 | /* Test whether A * B can be computed without | |
2f937369 DM |
7876 | an actual multiply insn. Value is 1 if so. |
7877 | ||
7878 | ??? This function stinks because it generates a ton of wasted RTL | |
7879 | ??? and as a result fragments GC memory to no end. There are other | |
7880 | ??? places in the compiler which are invoked a lot and do the same | |
7881 | ??? thing, generate wasted RTL just to see if something is possible. */ | |
b4ad7b23 RS |
7882 | |
7883 | static int | |
0c20a65f | 7884 | product_cheap_p (rtx a, rtx b) |
b4ad7b23 | 7885 | { |
b4ad7b23 | 7886 | rtx tmp; |
2f937369 | 7887 | int win, n_insns; |
b4ad7b23 | 7888 | |
0f41302f | 7889 | /* If only one is constant, make it B. */ |
b4ad7b23 RS |
7890 | if (GET_CODE (a) == CONST_INT) |
7891 | tmp = a, a = b, b = tmp; | |
7892 | ||
7893 | /* If first constant, both constant, so don't need multiply. */ | |
7894 | if (GET_CODE (a) == CONST_INT) | |
7895 | return 1; | |
7896 | ||
7897 | /* If second not constant, neither is constant, so would need multiply. */ | |
7898 | if (GET_CODE (b) != CONST_INT) | |
7899 | return 0; | |
7900 | ||
7901 | /* One operand is constant, so might not need multiply insn. Generate the | |
7902 | code for the multiply and see if a call or multiply, or long sequence | |
7903 | of insns is generated. */ | |
7904 | ||
b4ad7b23 | 7905 | start_sequence (); |
91ce572a | 7906 | expand_mult (GET_MODE (a), a, b, NULL_RTX, 1); |
2f937369 | 7907 | tmp = get_insns (); |
b4ad7b23 RS |
7908 | end_sequence (); |
7909 | ||
2f937369 DM |
7910 | win = 1; |
7911 | if (INSN_P (tmp)) | |
b4ad7b23 | 7912 | { |
2f937369 DM |
7913 | n_insns = 0; |
7914 | while (tmp != NULL_RTX) | |
7915 | { | |
7916 | rtx next = NEXT_INSN (tmp); | |
7917 | ||
7918 | if (++n_insns > 3 | |
7919 | || GET_CODE (tmp) != INSN | |
7920 | || (GET_CODE (PATTERN (tmp)) == SET | |
7921 | && GET_CODE (SET_SRC (PATTERN (tmp))) == MULT) | |
7922 | || (GET_CODE (PATTERN (tmp)) == PARALLEL | |
7923 | && GET_CODE (XVECEXP (PATTERN (tmp), 0, 0)) == SET | |
7924 | && GET_CODE (SET_SRC (XVECEXP (PATTERN (tmp), 0, 0))) == MULT)) | |
7925 | { | |
7926 | win = 0; | |
7927 | break; | |
7928 | } | |
7929 | ||
7930 | tmp = next; | |
7931 | } | |
b4ad7b23 RS |
7932 | } |
7933 | else if (GET_CODE (tmp) == SET | |
7934 | && GET_CODE (SET_SRC (tmp)) == MULT) | |
7935 | win = 0; | |
7936 | else if (GET_CODE (tmp) == PARALLEL | |
7937 | && GET_CODE (XVECEXP (tmp, 0, 0)) == SET | |
7938 | && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT) | |
7939 | win = 0; | |
7940 | ||
b4ad7b23 RS |
7941 | return win; |
7942 | } | |
7943 | \f | |
7944 | /* Check to see if loop can be terminated by a "decrement and branch until | |
7945 | zero" instruction. If so, add a REG_NONNEG note to the branch insn if so. | |
7946 | Also try reversing an increment loop to a decrement loop | |
7947 | to see if the optimization can be performed. | |
7948 | Value is nonzero if optimization was performed. */ | |
7949 | ||
7950 | /* This is useful even if the architecture doesn't have such an insn, | |
7951 | because it might change a loops which increments from 0 to n to a loop | |
7952 | which decrements from n to 0. A loop that decrements to zero is usually | |
7953 | faster than one that increments from zero. */ | |
7954 | ||
7955 | /* ??? This could be rewritten to use some of the loop unrolling procedures, | |
7956 | such as approx_final_value, biv_total_increment, loop_iterations, and | |
7957 | final_[bg]iv_value. */ | |
7958 | ||
7959 | static int | |
0c20a65f | 7960 | check_dbra_loop (struct loop *loop, int insn_count) |
b4ad7b23 | 7961 | { |
1ecd860b MH |
7962 | struct loop_info *loop_info = LOOP_INFO (loop); |
7963 | struct loop_regs *regs = LOOP_REGS (loop); | |
ed5bb68d | 7964 | struct loop_ivs *ivs = LOOP_IVS (loop); |
b4ad7b23 RS |
7965 | struct iv_class *bl; |
7966 | rtx reg; | |
7967 | rtx jump_label; | |
7968 | rtx final_value; | |
7969 | rtx start_value; | |
b4ad7b23 RS |
7970 | rtx new_add_val; |
7971 | rtx comparison; | |
7972 | rtx before_comparison; | |
7973 | rtx p; | |
0628fde6 JW |
7974 | rtx jump; |
7975 | rtx first_compare; | |
7976 | int compare_and_branch; | |
a2be868f MH |
7977 | rtx loop_start = loop->start; |
7978 | rtx loop_end = loop->end; | |
b4ad7b23 RS |
7979 | |
7980 | /* If last insn is a conditional branch, and the insn before tests a | |
7981 | register value, try to optimize it. Otherwise, we can't do anything. */ | |
7982 | ||
0628fde6 | 7983 | jump = PREV_INSN (loop_end); |
0534b804 | 7984 | comparison = get_condition_for_loop (loop, jump); |
b4ad7b23 RS |
7985 | if (comparison == 0) |
7986 | return 0; | |
7f1c097d JH |
7987 | if (!onlyjump_p (jump)) |
7988 | return 0; | |
b4ad7b23 | 7989 | |
0628fde6 JW |
7990 | /* Try to compute whether the compare/branch at the loop end is one or |
7991 | two instructions. */ | |
7992 | get_condition (jump, &first_compare); | |
7993 | if (first_compare == jump) | |
7994 | compare_and_branch = 1; | |
7995 | else if (first_compare == prev_nonnote_insn (jump)) | |
7996 | compare_and_branch = 2; | |
7997 | else | |
7998 | return 0; | |
7999 | ||
947851b2 CC |
8000 | { |
8001 | /* If more than one condition is present to control the loop, then | |
5d8fcdcb | 8002 | do not proceed, as this function does not know how to rewrite |
7905cfef JL |
8003 | loop tests with more than one condition. |
8004 | ||
8005 | Look backwards from the first insn in the last comparison | |
8006 | sequence and see if we've got another comparison sequence. */ | |
947851b2 CC |
8007 | |
8008 | rtx jump1; | |
7905cfef | 8009 | if ((jump1 = prev_nonnote_insn (first_compare)) != loop->cont) |
c12c6a56 | 8010 | if (GET_CODE (jump1) == JUMP_INSN) |
fd5d5b07 | 8011 | return 0; |
947851b2 CC |
8012 | } |
8013 | ||
b4ad7b23 RS |
8014 | /* Check all of the bivs to see if the compare uses one of them. |
8015 | Skip biv's set more than once because we can't guarantee that | |
8016 | it will be zero on the last iteration. Also skip if the biv is | |
8017 | used between its update and the test insn. */ | |
8018 | ||
14be28e5 | 8019 | for (bl = ivs->list; bl; bl = bl->next) |
b4ad7b23 RS |
8020 | { |
8021 | if (bl->biv_count == 1 | |
6979065c | 8022 | && ! bl->biv->maybe_multiple |
b4ad7b23 RS |
8023 | && bl->biv->dest_reg == XEXP (comparison, 0) |
8024 | && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn, | |
0628fde6 | 8025 | first_compare)) |
b4ad7b23 RS |
8026 | break; |
8027 | } | |
8028 | ||
8029 | if (! bl) | |
8030 | return 0; | |
8031 | ||
8032 | /* Look for the case where the basic induction variable is always | |
8033 | nonnegative, and equals zero on the last iteration. | |
8034 | In this case, add a reg_note REG_NONNEG, which allows the | |
8035 | m68k DBRA instruction to be used. */ | |
8036 | ||
8037 | if (((GET_CODE (comparison) == GT | |
8038 | && GET_CODE (XEXP (comparison, 1)) == CONST_INT | |
8039 | && INTVAL (XEXP (comparison, 1)) == -1) | |
8040 | || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx)) | |
8041 | && GET_CODE (bl->biv->add_val) == CONST_INT | |
8042 | && INTVAL (bl->biv->add_val) < 0) | |
8043 | { | |
8044 | /* Initial value must be greater than 0, | |
8045 | init_val % -dec_value == 0 to ensure that it equals zero on | |
8046 | the last iteration */ | |
8047 | ||
8048 | if (GET_CODE (bl->initial_value) == CONST_INT | |
8049 | && INTVAL (bl->initial_value) > 0 | |
db3cf6fb MS |
8050 | && (INTVAL (bl->initial_value) |
8051 | % (-INTVAL (bl->biv->add_val))) == 0) | |
b4ad7b23 RS |
8052 | { |
8053 | /* register always nonnegative, add REG_NOTE to branch */ | |
65b98a02 JW |
8054 | if (! find_reg_note (jump, REG_NONNEG, NULL_RTX)) |
8055 | REG_NOTES (jump) | |
8056 | = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg, | |
8057 | REG_NOTES (jump)); | |
b4ad7b23 RS |
8058 | bl->nonneg = 1; |
8059 | ||
8060 | return 1; | |
8061 | } | |
8062 | ||
8063 | /* If the decrement is 1 and the value was tested as >= 0 before | |
8064 | the loop, then we can safely optimize. */ | |
8065 | for (p = loop_start; p; p = PREV_INSN (p)) | |
8066 | { | |
8067 | if (GET_CODE (p) == CODE_LABEL) | |
8068 | break; | |
8069 | if (GET_CODE (p) != JUMP_INSN) | |
8070 | continue; | |
8071 | ||
0534b804 | 8072 | before_comparison = get_condition_for_loop (loop, p); |
b4ad7b23 RS |
8073 | if (before_comparison |
8074 | && XEXP (before_comparison, 0) == bl->biv->dest_reg | |
8075 | && GET_CODE (before_comparison) == LT | |
8076 | && XEXP (before_comparison, 1) == const0_rtx | |
8077 | && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start) | |
8078 | && INTVAL (bl->biv->add_val) == -1) | |
8079 | { | |
65b98a02 JW |
8080 | if (! find_reg_note (jump, REG_NONNEG, NULL_RTX)) |
8081 | REG_NOTES (jump) | |
8082 | = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg, | |
8083 | REG_NOTES (jump)); | |
b4ad7b23 RS |
8084 | bl->nonneg = 1; |
8085 | ||
8086 | return 1; | |
8087 | } | |
8088 | } | |
8089 | } | |
ef178af3 ZW |
8090 | else if (GET_CODE (bl->biv->add_val) == CONST_INT |
8091 | && INTVAL (bl->biv->add_val) > 0) | |
b4ad7b23 RS |
8092 | { |
8093 | /* Try to change inc to dec, so can apply above optimization. */ | |
8094 | /* Can do this if: | |
8095 | all registers modified are induction variables or invariant, | |
8096 | all memory references have non-overlapping addresses | |
8097 | (obviously true if only one write) | |
8098 | allow 2 insns for the compare/jump at the end of the loop. */ | |
45cc060e JW |
8099 | /* Also, we must avoid any instructions which use both the reversed |
8100 | biv and another biv. Such instructions will fail if the loop is | |
8101 | reversed. We meet this condition by requiring that either | |
8102 | no_use_except_counting is true, or else that there is only | |
8103 | one biv. */ | |
b4ad7b23 RS |
8104 | int num_nonfixed_reads = 0; |
8105 | /* 1 if the iteration var is used only to count iterations. */ | |
8106 | int no_use_except_counting = 0; | |
b418c26e JW |
8107 | /* 1 if the loop has no memory store, or it has a single memory store |
8108 | which is reversible. */ | |
8109 | int reversible_mem_store = 1; | |
b4ad7b23 | 8110 | |
2b701ab7 RH |
8111 | if (bl->giv_count == 0 |
8112 | && !loop->exit_count | |
8113 | && !loop_info->has_multiple_exit_targets) | |
b4ad7b23 RS |
8114 | { |
8115 | rtx bivreg = regno_reg_rtx[bl->regno]; | |
c7b30677 | 8116 | struct iv_class *blt; |
b4ad7b23 RS |
8117 | |
8118 | /* If there are no givs for this biv, and the only exit is the | |
38e01259 | 8119 | fall through at the end of the loop, then |
b4ad7b23 RS |
8120 | see if perhaps there are no uses except to count. */ |
8121 | no_use_except_counting = 1; | |
8122 | for (p = loop_start; p != loop_end; p = NEXT_INSN (p)) | |
2c3c49de | 8123 | if (INSN_P (p)) |
b4ad7b23 RS |
8124 | { |
8125 | rtx set = single_set (p); | |
8126 | ||
8127 | if (set && GET_CODE (SET_DEST (set)) == REG | |
8128 | && REGNO (SET_DEST (set)) == bl->regno) | |
8129 | /* An insn that sets the biv is okay. */ | |
8130 | ; | |
3ec6c6ee NS |
8131 | else if (!reg_mentioned_p (bivreg, PATTERN (p))) |
8132 | /* An insn that doesn't mention the biv is okay. */ | |
8133 | ; | |
8134 | else if (p == prev_nonnote_insn (prev_nonnote_insn (loop_end)) | |
8135 | || p == prev_nonnote_insn (loop_end)) | |
59487769 JL |
8136 | { |
8137 | /* If either of these insns uses the biv and sets a pseudo | |
8138 | that has more than one usage, then the biv has uses | |
8139 | other than counting since it's used to derive a value | |
8140 | that is used more than one time. */ | |
84832317 | 8141 | note_stores (PATTERN (p), note_set_pseudo_multiple_uses, |
1ecd860b MH |
8142 | regs); |
8143 | if (regs->multiple_uses) | |
59487769 JL |
8144 | { |
8145 | no_use_except_counting = 0; | |
8146 | break; | |
8147 | } | |
8148 | } | |
3ec6c6ee | 8149 | else |
b4ad7b23 RS |
8150 | { |
8151 | no_use_except_counting = 0; | |
8152 | break; | |
8153 | } | |
8154 | } | |
c7b30677 | 8155 | |
b7fe373b RH |
8156 | /* A biv has uses besides counting if it is used to set |
8157 | another biv. */ | |
c7b30677 | 8158 | for (blt = ivs->list; blt; blt = blt->next) |
b7fe373b RH |
8159 | if (blt->init_set |
8160 | && reg_mentioned_p (bivreg, SET_SRC (blt->init_set))) | |
c7b30677 FS |
8161 | { |
8162 | no_use_except_counting = 0; | |
8163 | break; | |
8164 | } | |
b4ad7b23 RS |
8165 | } |
8166 | ||
c48ba252 | 8167 | if (no_use_except_counting) |
e6fcb60d KH |
8168 | /* No need to worry about MEMs. */ |
8169 | ; | |
afa1738b | 8170 | else if (loop_info->num_mem_sets <= 1) |
c48ba252 R |
8171 | { |
8172 | for (p = loop_start; p != loop_end; p = NEXT_INSN (p)) | |
2c3c49de | 8173 | if (INSN_P (p)) |
0534b804 | 8174 | num_nonfixed_reads += count_nonfixed_reads (loop, PATTERN (p)); |
c48ba252 R |
8175 | |
8176 | /* If the loop has a single store, and the destination address is | |
8177 | invariant, then we can't reverse the loop, because this address | |
8178 | might then have the wrong value at loop exit. | |
8179 | This would work if the source was invariant also, however, in that | |
8180 | case, the insn should have been moved out of the loop. */ | |
8181 | ||
afa1738b | 8182 | if (loop_info->num_mem_sets == 1) |
2d4fde68 R |
8183 | { |
8184 | struct induction *v; | |
8185 | ||
28680540 MM |
8186 | /* If we could prove that each of the memory locations |
8187 | written to was different, then we could reverse the | |
8188 | store -- but we don't presently have any way of | |
8189 | knowing that. */ | |
8190 | reversible_mem_store = 0; | |
2d4fde68 R |
8191 | |
8192 | /* If the store depends on a register that is set after the | |
8193 | store, it depends on the initial value, and is thus not | |
8194 | reversible. */ | |
8195 | for (v = bl->giv; reversible_mem_store && v; v = v->next_iv) | |
8196 | { | |
8197 | if (v->giv_type == DEST_REG | |
8198 | && reg_mentioned_p (v->dest_reg, | |
afa1738b | 8199 | PATTERN (loop_info->first_loop_store_insn)) |
fd5d5b07 | 8200 | && loop_insn_first_p (loop_info->first_loop_store_insn, |
afa1738b | 8201 | v->insn)) |
2d4fde68 R |
8202 | reversible_mem_store = 0; |
8203 | } | |
8204 | } | |
c48ba252 R |
8205 | } |
8206 | else | |
8207 | return 0; | |
b418c26e | 8208 | |
b4ad7b23 RS |
8209 | /* This code only acts for innermost loops. Also it simplifies |
8210 | the memory address check by only reversing loops with | |
8211 | zero or one memory access. | |
8212 | Two memory accesses could involve parts of the same array, | |
c48ba252 R |
8213 | and that can't be reversed. |
8214 | If the biv is used only for counting, than we don't need to worry | |
8215 | about all these things. */ | |
8216 | ||
8217 | if ((num_nonfixed_reads <= 1 | |
576d0b54 | 8218 | && ! loop_info->has_nonconst_call |
62e6ca55 | 8219 | && ! loop_info->has_prefetch |
3c748bb6 | 8220 | && ! loop_info->has_volatile |
c48ba252 | 8221 | && reversible_mem_store |
afa1738b | 8222 | && (bl->giv_count + bl->biv_count + loop_info->num_mem_sets |
28680540 | 8223 | + num_unmoved_movables (loop) + compare_and_branch == insn_count) |
14be28e5 | 8224 | && (bl == ivs->list && bl->next == 0)) |
62e6ca55 | 8225 | || (no_use_except_counting && ! loop_info->has_prefetch)) |
b4ad7b23 | 8226 | { |
b4ad7b23 RS |
8227 | rtx tem; |
8228 | ||
8229 | /* Loop can be reversed. */ | |
8230 | if (loop_dump_stream) | |
8231 | fprintf (loop_dump_stream, "Can reverse loop\n"); | |
8232 | ||
8233 | /* Now check other conditions: | |
e9a25f70 | 8234 | |
956d6950 | 8235 | The increment must be a constant, as must the initial value, |
e6fcb60d | 8236 | and the comparison code must be LT. |
b4ad7b23 RS |
8237 | |
8238 | This test can probably be improved since +/- 1 in the constant | |
8239 | can be obtained by changing LT to LE and vice versa; this is | |
8240 | confusing. */ | |
8241 | ||
e9a25f70 | 8242 | if (comparison |
c48ba252 R |
8243 | /* for constants, LE gets turned into LT */ |
8244 | && (GET_CODE (comparison) == LT | |
8245 | || (GET_CODE (comparison) == LE | |
8246 | && no_use_except_counting))) | |
b4ad7b23 | 8247 | { |
f428f252 | 8248 | HOST_WIDE_INT add_val, add_adjust, comparison_val = 0; |
c48ba252 R |
8249 | rtx initial_value, comparison_value; |
8250 | int nonneg = 0; | |
8251 | enum rtx_code cmp_code; | |
8252 | int comparison_const_width; | |
8253 | unsigned HOST_WIDE_INT comparison_sign_mask; | |
e9a25f70 JL |
8254 | |
8255 | add_val = INTVAL (bl->biv->add_val); | |
c48ba252 | 8256 | comparison_value = XEXP (comparison, 1); |
2c74fb2b AS |
8257 | if (GET_MODE (comparison_value) == VOIDmode) |
8258 | comparison_const_width | |
8259 | = GET_MODE_BITSIZE (GET_MODE (XEXP (comparison, 0))); | |
8260 | else | |
8261 | comparison_const_width | |
8262 | = GET_MODE_BITSIZE (GET_MODE (comparison_value)); | |
c48ba252 R |
8263 | if (comparison_const_width > HOST_BITS_PER_WIDE_INT) |
8264 | comparison_const_width = HOST_BITS_PER_WIDE_INT; | |
8265 | comparison_sign_mask | |
e6fcb60d | 8266 | = (unsigned HOST_WIDE_INT) 1 << (comparison_const_width - 1); |
c48ba252 | 8267 | |
3aa94dc8 JL |
8268 | /* If the comparison value is not a loop invariant, then we |
8269 | can not reverse this loop. | |
8270 | ||
8271 | ??? If the insns which initialize the comparison value as | |
8272 | a whole compute an invariant result, then we could move | |
8273 | them out of the loop and proceed with loop reversal. */ | |
0534b804 | 8274 | if (! loop_invariant_p (loop, comparison_value)) |
3aa94dc8 JL |
8275 | return 0; |
8276 | ||
c48ba252 R |
8277 | if (GET_CODE (comparison_value) == CONST_INT) |
8278 | comparison_val = INTVAL (comparison_value); | |
e9a25f70 | 8279 | initial_value = bl->initial_value; |
e6fcb60d KH |
8280 | |
8281 | /* Normalize the initial value if it is an integer and | |
a8decb2c JL |
8282 | has no other use except as a counter. This will allow |
8283 | a few more loops to be reversed. */ | |
8284 | if (no_use_except_counting | |
c48ba252 | 8285 | && GET_CODE (comparison_value) == CONST_INT |
a8decb2c | 8286 | && GET_CODE (initial_value) == CONST_INT) |
e9a25f70 JL |
8287 | { |
8288 | comparison_val = comparison_val - INTVAL (bl->initial_value); | |
c48ba252 R |
8289 | /* The code below requires comparison_val to be a multiple |
8290 | of add_val in order to do the loop reversal, so | |
8291 | round up comparison_val to a multiple of add_val. | |
8292 | Since comparison_value is constant, we know that the | |
8293 | current comparison code is LT. */ | |
8294 | comparison_val = comparison_val + add_val - 1; | |
8295 | comparison_val | |
8296 | -= (unsigned HOST_WIDE_INT) comparison_val % add_val; | |
8297 | /* We postpone overflow checks for COMPARISON_VAL here; | |
8298 | even if there is an overflow, we might still be able to | |
8299 | reverse the loop, if converting the loop exit test to | |
8300 | NE is possible. */ | |
8301 | initial_value = const0_rtx; | |
e9a25f70 JL |
8302 | } |
8303 | ||
c48ba252 R |
8304 | /* First check if we can do a vanilla loop reversal. */ |
8305 | if (initial_value == const0_rtx | |
3c748bb6 MH |
8306 | /* If we have a decrement_and_branch_on_count, |
8307 | prefer the NE test, since this will allow that | |
8308 | instruction to be generated. Note that we must | |
8309 | use a vanilla loop reversal if the biv is used to | |
8310 | calculate a giv or has a non-counting use. */ | |
8311 | #if ! defined (HAVE_decrement_and_branch_until_zero) \ | |
8312 | && defined (HAVE_decrement_and_branch_on_count) | |
a2be868f | 8313 | && (! (add_val == 1 && loop->vtop |
c5cbf81e JL |
8314 | && (bl->biv_count == 0 |
8315 | || no_use_except_counting))) | |
c48ba252 R |
8316 | #endif |
8317 | && GET_CODE (comparison_value) == CONST_INT | |
8318 | /* Now do postponed overflow checks on COMPARISON_VAL. */ | |
8319 | && ! (((comparison_val - add_val) ^ INTVAL (comparison_value)) | |
8320 | & comparison_sign_mask)) | |
8321 | { | |
8322 | /* Register will always be nonnegative, with value | |
8323 | 0 on last iteration */ | |
8324 | add_adjust = add_val; | |
8325 | nonneg = 1; | |
8326 | cmp_code = GE; | |
8327 | } | |
a2be868f | 8328 | else if (add_val == 1 && loop->vtop |
c5cbf81e JL |
8329 | && (bl->biv_count == 0 |
8330 | || no_use_except_counting)) | |
c48ba252 R |
8331 | { |
8332 | add_adjust = 0; | |
8333 | cmp_code = NE; | |
8334 | } | |
8335 | else | |
8336 | return 0; | |
8337 | ||
8338 | if (GET_CODE (comparison) == LE) | |
8339 | add_adjust -= add_val; | |
8340 | ||
e9a25f70 JL |
8341 | /* If the initial value is not zero, or if the comparison |
8342 | value is not an exact multiple of the increment, then we | |
8343 | can not reverse this loop. */ | |
c48ba252 R |
8344 | if (initial_value == const0_rtx |
8345 | && GET_CODE (comparison_value) == CONST_INT) | |
8346 | { | |
8347 | if (((unsigned HOST_WIDE_INT) comparison_val % add_val) != 0) | |
8348 | return 0; | |
8349 | } | |
8350 | else | |
8351 | { | |
8352 | if (! no_use_except_counting || add_val != 1) | |
8353 | return 0; | |
8354 | } | |
e9a25f70 | 8355 | |
8ed69d09 R |
8356 | final_value = comparison_value; |
8357 | ||
e9a25f70 JL |
8358 | /* Reset these in case we normalized the initial value |
8359 | and comparison value above. */ | |
8ed69d09 R |
8360 | if (GET_CODE (comparison_value) == CONST_INT |
8361 | && GET_CODE (initial_value) == CONST_INT) | |
8362 | { | |
8363 | comparison_value = GEN_INT (comparison_val); | |
8364 | final_value | |
8365 | = GEN_INT (comparison_val + INTVAL (bl->initial_value)); | |
8366 | } | |
e9a25f70 | 8367 | bl->initial_value = initial_value; |
b4ad7b23 RS |
8368 | |
8369 | /* Save some info needed to produce the new insns. */ | |
8370 | reg = bl->biv->dest_reg; | |
353df065 | 8371 | jump_label = condjump_label (PREV_INSN (loop_end)); |
fd5d5b07 | 8372 | new_add_val = GEN_INT (-INTVAL (bl->biv->add_val)); |
b4ad7b23 | 8373 | |
c48ba252 R |
8374 | /* Set start_value; if this is not a CONST_INT, we need |
8375 | to generate a SUB. | |
8376 | Initialize biv to start_value before loop start. | |
b4ad7b23 RS |
8377 | The old initializing insn will be deleted as a |
8378 | dead store by flow.c. */ | |
c48ba252 R |
8379 | if (initial_value == const0_rtx |
8380 | && GET_CODE (comparison_value) == CONST_INT) | |
8381 | { | |
8382 | start_value = GEN_INT (comparison_val - add_adjust); | |
804a718a | 8383 | loop_insn_hoist (loop, gen_move_insn (reg, start_value)); |
c48ba252 R |
8384 | } |
8385 | else if (GET_CODE (initial_value) == CONST_INT) | |
8386 | { | |
c48ba252 | 8387 | enum machine_mode mode = GET_MODE (reg); |
ef89d648 ZW |
8388 | rtx offset = GEN_INT (-INTVAL (initial_value) - add_adjust); |
8389 | rtx add_insn = gen_add3_insn (reg, comparison_value, offset); | |
8390 | ||
8391 | if (add_insn == 0) | |
c48ba252 | 8392 | return 0; |
ef89d648 | 8393 | |
c48ba252 R |
8394 | start_value |
8395 | = gen_rtx_PLUS (mode, comparison_value, offset); | |
ef89d648 | 8396 | loop_insn_hoist (loop, add_insn); |
c48ba252 R |
8397 | if (GET_CODE (comparison) == LE) |
8398 | final_value = gen_rtx_PLUS (mode, comparison_value, | |
8399 | GEN_INT (add_val)); | |
8400 | } | |
8401 | else if (! add_adjust) | |
8402 | { | |
8403 | enum machine_mode mode = GET_MODE (reg); | |
ef89d648 ZW |
8404 | rtx sub_insn = gen_sub3_insn (reg, comparison_value, |
8405 | initial_value); | |
8406 | ||
8407 | if (sub_insn == 0) | |
c48ba252 R |
8408 | return 0; |
8409 | start_value | |
8410 | = gen_rtx_MINUS (mode, comparison_value, initial_value); | |
ef89d648 | 8411 | loop_insn_hoist (loop, sub_insn); |
c48ba252 R |
8412 | } |
8413 | else | |
8414 | /* We could handle the other cases too, but it'll be | |
8415 | better to have a testcase first. */ | |
8416 | return 0; | |
b4ad7b23 | 8417 | |
225a7e3d JL |
8418 | /* We may not have a single insn which can increment a reg, so |
8419 | create a sequence to hold all the insns from expand_inc. */ | |
8420 | start_sequence (); | |
8421 | expand_inc (reg, new_add_val); | |
2f937369 | 8422 | tem = get_insns (); |
e6fcb60d | 8423 | end_sequence (); |
225a7e3d | 8424 | |
86e21212 | 8425 | p = loop_insn_emit_before (loop, 0, bl->biv->insn, tem); |
49ce134f | 8426 | delete_insn (bl->biv->insn); |
e6fcb60d | 8427 | |
b4ad7b23 RS |
8428 | /* Update biv info to reflect its new status. */ |
8429 | bl->biv->insn = p; | |
8430 | bl->initial_value = start_value; | |
8431 | bl->biv->add_val = new_add_val; | |
8432 | ||
5629b16c | 8433 | /* Update loop info. */ |
eb6a3bc0 MH |
8434 | loop_info->initial_value = reg; |
8435 | loop_info->initial_equiv_value = reg; | |
5629b16c MH |
8436 | loop_info->final_value = const0_rtx; |
8437 | loop_info->final_equiv_value = const0_rtx; | |
8438 | loop_info->comparison_value = const0_rtx; | |
8439 | loop_info->comparison_code = cmp_code; | |
8440 | loop_info->increment = new_add_val; | |
8441 | ||
b4ad7b23 RS |
8442 | /* Inc LABEL_NUSES so that delete_insn will |
8443 | not delete the label. */ | |
fd5d5b07 | 8444 | LABEL_NUSES (XEXP (jump_label, 0))++; |
b4ad7b23 RS |
8445 | |
8446 | /* Emit an insn after the end of the loop to set the biv's | |
8447 | proper exit value if it is used anywhere outside the loop. */ | |
0628fde6 | 8448 | if ((REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare)) |
b4ad7b23 | 8449 | || ! bl->init_insn |
b1f21e0a | 8450 | || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn)) |
74411039 | 8451 | loop_insn_sink (loop, gen_load_of_final_value (reg, final_value)); |
b4ad7b23 RS |
8452 | |
8453 | /* Delete compare/branch at end of loop. */ | |
53c17031 | 8454 | delete_related_insns (PREV_INSN (loop_end)); |
0628fde6 | 8455 | if (compare_and_branch == 2) |
53c17031 | 8456 | delete_related_insns (first_compare); |
b4ad7b23 RS |
8457 | |
8458 | /* Add new compare/branch insn at end of loop. */ | |
8459 | start_sequence (); | |
362cc3d4 | 8460 | emit_cmp_and_jump_insns (reg, const0_rtx, cmp_code, NULL_RTX, |
a06ef755 | 8461 | GET_MODE (reg), 0, |
362cc3d4 | 8462 | XEXP (jump_label, 0)); |
2f937369 | 8463 | tem = get_insns (); |
b4ad7b23 RS |
8464 | end_sequence (); |
8465 | emit_jump_insn_before (tem, loop_end); | |
8466 | ||
a7060368 MH |
8467 | for (tem = PREV_INSN (loop_end); |
8468 | tem && GET_CODE (tem) != JUMP_INSN; | |
8469 | tem = PREV_INSN (tem)) | |
8470 | ; | |
8471 | ||
8472 | if (tem) | |
8473 | JUMP_LABEL (tem) = XEXP (jump_label, 0); | |
8474 | ||
c48ba252 | 8475 | if (nonneg) |
b4ad7b23 | 8476 | { |
c48ba252 R |
8477 | if (tem) |
8478 | { | |
c48ba252 R |
8479 | /* Increment of LABEL_NUSES done above. */ |
8480 | /* Register is now always nonnegative, | |
8481 | so add REG_NONNEG note to the branch. */ | |
65b98a02 | 8482 | REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, reg, |
c48ba252 R |
8483 | REG_NOTES (tem)); |
8484 | } | |
8485 | bl->nonneg = 1; | |
b4ad7b23 RS |
8486 | } |
8487 | ||
22b452e7 BS |
8488 | /* No insn may reference both the reversed and another biv or it |
8489 | will fail (see comment near the top of the loop reversal | |
8490 | code). | |
8491 | Earlier on, we have verified that the biv has no use except | |
8492 | counting, or it is the only biv in this function. | |
8493 | However, the code that computes no_use_except_counting does | |
8494 | not verify reg notes. It's possible to have an insn that | |
8495 | references another biv, and has a REG_EQUAL note with an | |
8496 | expression based on the reversed biv. To avoid this case, | |
8497 | remove all REG_EQUAL notes based on the reversed biv | |
8498 | here. */ | |
8499 | for (p = loop_start; p != loop_end; p = NEXT_INSN (p)) | |
2c3c49de | 8500 | if (INSN_P (p)) |
22b452e7 BS |
8501 | { |
8502 | rtx *pnote; | |
8503 | rtx set = single_set (p); | |
8504 | /* If this is a set of a GIV based on the reversed biv, any | |
8505 | REG_EQUAL notes should still be correct. */ | |
8506 | if (! set | |
8507 | || GET_CODE (SET_DEST (set)) != REG | |
14be28e5 | 8508 | || (size_t) REGNO (SET_DEST (set)) >= ivs->n_regs |
ed5bb68d MH |
8509 | || REG_IV_TYPE (ivs, REGNO (SET_DEST (set))) != GENERAL_INDUCT |
8510 | || REG_IV_INFO (ivs, REGNO (SET_DEST (set)))->src_reg != bl->biv->src_reg) | |
22b452e7 BS |
8511 | for (pnote = ®_NOTES (p); *pnote;) |
8512 | { | |
8513 | if (REG_NOTE_KIND (*pnote) == REG_EQUAL | |
8514 | && reg_mentioned_p (regno_reg_rtx[bl->regno], | |
8515 | XEXP (*pnote, 0))) | |
8516 | *pnote = XEXP (*pnote, 1); | |
8517 | else | |
8518 | pnote = &XEXP (*pnote, 1); | |
8519 | } | |
8520 | } | |
8521 | ||
b4ad7b23 RS |
8522 | /* Mark that this biv has been reversed. Each giv which depends |
8523 | on this biv, and which is also live past the end of the loop | |
8524 | will have to be fixed up. */ | |
8525 | ||
8526 | bl->reversed = 1; | |
8527 | ||
8528 | if (loop_dump_stream) | |
b50cb11f MH |
8529 | { |
8530 | fprintf (loop_dump_stream, "Reversed loop"); | |
8531 | if (bl->nonneg) | |
8532 | fprintf (loop_dump_stream, " and added reg_nonneg\n"); | |
8533 | else | |
8534 | fprintf (loop_dump_stream, "\n"); | |
8535 | } | |
b4ad7b23 RS |
8536 | |
8537 | return 1; | |
8538 | } | |
8539 | } | |
8540 | } | |
8541 | ||
8542 | return 0; | |
8543 | } | |
8544 | \f | |
8545 | /* Verify whether the biv BL appears to be eliminable, | |
8546 | based on the insns in the loop that refer to it. | |
b4ad7b23 | 8547 | |
cc2902df | 8548 | If ELIMINATE_P is nonzero, actually do the elimination. |
b4ad7b23 RS |
8549 | |
8550 | THRESHOLD and INSN_COUNT are from loop_optimize and are used to | |
8551 | determine whether invariant insns should be placed inside or at the | |
8552 | start of the loop. */ | |
8553 | ||
8554 | static int | |
0c20a65f AJ |
8555 | maybe_eliminate_biv (const struct loop *loop, struct iv_class *bl, |
8556 | int eliminate_p, int threshold, int insn_count) | |
b4ad7b23 | 8557 | { |
ed5bb68d | 8558 | struct loop_ivs *ivs = LOOP_IVS (loop); |
b4ad7b23 | 8559 | rtx reg = bl->biv->dest_reg; |
bd5a664e | 8560 | rtx p; |
b4ad7b23 RS |
8561 | |
8562 | /* Scan all insns in the loop, stopping if we find one that uses the | |
8563 | biv in a way that we cannot eliminate. */ | |
8564 | ||
96a45535 | 8565 | for (p = loop->start; p != loop->end; p = NEXT_INSN (p)) |
b4ad7b23 RS |
8566 | { |
8567 | enum rtx_code code = GET_CODE (p); | |
96a45535 MH |
8568 | basic_block where_bb = 0; |
8569 | rtx where_insn = threshold >= insn_count ? 0 : p; | |
039baf9e | 8570 | rtx note; |
b4ad7b23 | 8571 | |
fdb1833a R |
8572 | /* If this is a libcall that sets a giv, skip ahead to its end. */ |
8573 | if (GET_RTX_CLASS (code) == 'i') | |
8574 | { | |
039baf9e | 8575 | note = find_reg_note (p, REG_LIBCALL, NULL_RTX); |
fdb1833a R |
8576 | |
8577 | if (note) | |
8578 | { | |
8579 | rtx last = XEXP (note, 0); | |
8580 | rtx set = single_set (last); | |
8581 | ||
8582 | if (set && GET_CODE (SET_DEST (set)) == REG) | |
8583 | { | |
770ae6cc | 8584 | unsigned int regno = REGNO (SET_DEST (set)); |
fdb1833a | 8585 | |
86fee241 | 8586 | if (regno < ivs->n_regs |
ed5bb68d MH |
8587 | && REG_IV_TYPE (ivs, regno) == GENERAL_INDUCT |
8588 | && REG_IV_INFO (ivs, regno)->src_reg == bl->biv->src_reg) | |
fdb1833a R |
8589 | p = last; |
8590 | } | |
8591 | } | |
8592 | } | |
039baf9e EB |
8593 | |
8594 | /* Closely examine the insn if the biv is mentioned. */ | |
b4ad7b23 RS |
8595 | if ((code == INSN || code == JUMP_INSN || code == CALL_INSN) |
8596 | && reg_mentioned_p (reg, PATTERN (p)) | |
0534b804 | 8597 | && ! maybe_eliminate_biv_1 (loop, PATTERN (p), p, bl, |
96a45535 | 8598 | eliminate_p, where_bb, where_insn)) |
b4ad7b23 RS |
8599 | { |
8600 | if (loop_dump_stream) | |
8601 | fprintf (loop_dump_stream, | |
8602 | "Cannot eliminate biv %d: biv used in insn %d.\n", | |
8603 | bl->regno, INSN_UID (p)); | |
8604 | break; | |
8605 | } | |
039baf9e EB |
8606 | |
8607 | /* If we are eliminating, kill REG_EQUAL notes mentioning the biv. */ | |
8608 | if (eliminate_p | |
8609 | && (note = find_reg_note (p, REG_EQUAL, NULL_RTX)) != NULL_RTX | |
8610 | && reg_mentioned_p (reg, XEXP (note, 0))) | |
8611 | remove_note (p, note); | |
b4ad7b23 RS |
8612 | } |
8613 | ||
96a45535 | 8614 | if (p == loop->end) |
b4ad7b23 RS |
8615 | { |
8616 | if (loop_dump_stream) | |
8617 | fprintf (loop_dump_stream, "biv %d %s eliminated.\n", | |
8618 | bl->regno, eliminate_p ? "was" : "can be"); | |
8619 | return 1; | |
8620 | } | |
8621 | ||
8622 | return 0; | |
8623 | } | |
8624 | \f | |
a6207a2b | 8625 | /* INSN and REFERENCE are instructions in the same insn chain. |
cc2902df | 8626 | Return nonzero if INSN is first. */ |
a6207a2b | 8627 | |
c99f8c2a | 8628 | int |
0c20a65f | 8629 | loop_insn_first_p (rtx insn, rtx reference) |
a6207a2b | 8630 | { |
f38cbf0f R |
8631 | rtx p, q; |
8632 | ||
e6fcb60d | 8633 | for (p = insn, q = reference;;) |
f38cbf0f R |
8634 | { |
8635 | /* Start with test for not first so that INSN == REFERENCE yields not | |
8636 | first. */ | |
8637 | if (q == insn || ! p) | |
e11e816e | 8638 | return 0; |
f38cbf0f | 8639 | if (p == reference || ! q) |
e11e816e | 8640 | return 1; |
f38cbf0f | 8641 | |
7c2772f1 R |
8642 | /* Either of P or Q might be a NOTE. Notes have the same LUID as the |
8643 | previous insn, hence the <= comparison below does not work if | |
8644 | P is a note. */ | |
f38cbf0f | 8645 | if (INSN_UID (p) < max_uid_for_loop |
7c2772f1 R |
8646 | && INSN_UID (q) < max_uid_for_loop |
8647 | && GET_CODE (p) != NOTE) | |
8648 | return INSN_LUID (p) <= INSN_LUID (q); | |
f38cbf0f | 8649 | |
7c2772f1 R |
8650 | if (INSN_UID (p) >= max_uid_for_loop |
8651 | || GET_CODE (p) == NOTE) | |
f38cbf0f R |
8652 | p = NEXT_INSN (p); |
8653 | if (INSN_UID (q) >= max_uid_for_loop) | |
8654 | q = NEXT_INSN (q); | |
8655 | } | |
a6207a2b R |
8656 | } |
8657 | ||
cc2902df | 8658 | /* We are trying to eliminate BIV in INSN using GIV. Return nonzero if |
a6207a2b R |
8659 | the offset that we have to take into account due to auto-increment / |
8660 | div derivation is zero. */ | |
8661 | static int | |
0c20a65f AJ |
8662 | biv_elimination_giv_has_0_offset (struct induction *biv, |
8663 | struct induction *giv, rtx insn) | |
a6207a2b R |
8664 | { |
8665 | /* If the giv V had the auto-inc address optimization applied | |
8666 | to it, and INSN occurs between the giv insn and the biv | |
8667 | insn, then we'd have to adjust the value used here. | |
8668 | This is rare, so we don't bother to make this possible. */ | |
8669 | if (giv->auto_inc_opt | |
8670 | && ((loop_insn_first_p (giv->insn, insn) | |
8671 | && loop_insn_first_p (insn, biv->insn)) | |
8672 | || (loop_insn_first_p (biv->insn, insn) | |
8673 | && loop_insn_first_p (insn, giv->insn)))) | |
8674 | return 0; | |
8675 | ||
a6207a2b R |
8676 | return 1; |
8677 | } | |
8678 | ||
b4ad7b23 RS |
8679 | /* If BL appears in X (part of the pattern of INSN), see if we can |
8680 | eliminate its use. If so, return 1. If not, return 0. | |
8681 | ||
8682 | If BIV does not appear in X, return 1. | |
8683 | ||
cc2902df | 8684 | If ELIMINATE_P is nonzero, actually do the elimination. |
96a45535 MH |
8685 | WHERE_INSN/WHERE_BB indicate where extra insns should be added. |
8686 | Depending on how many items have been moved out of the loop, it | |
cc2902df | 8687 | will either be before INSN (when WHERE_INSN is nonzero) or at the |
96a45535 | 8688 | start of the loop (when WHERE_INSN is zero). */ |
b4ad7b23 RS |
8689 | |
8690 | static int | |
0c20a65f AJ |
8691 | maybe_eliminate_biv_1 (const struct loop *loop, rtx x, rtx insn, |
8692 | struct iv_class *bl, int eliminate_p, | |
8693 | basic_block where_bb, rtx where_insn) | |
b4ad7b23 RS |
8694 | { |
8695 | enum rtx_code code = GET_CODE (x); | |
8696 | rtx reg = bl->biv->dest_reg; | |
8697 | enum machine_mode mode = GET_MODE (reg); | |
8698 | struct induction *v; | |
51723711 KG |
8699 | rtx arg, tem; |
8700 | #ifdef HAVE_cc0 | |
8701 | rtx new; | |
8702 | #endif | |
b4ad7b23 | 8703 | int arg_operand; |
6f7d635c | 8704 | const char *fmt; |
b4ad7b23 RS |
8705 | int i, j; |
8706 | ||
8707 | switch (code) | |
8708 | { | |
8709 | case REG: | |
8710 | /* If we haven't already been able to do something with this BIV, | |
8711 | we can't eliminate it. */ | |
8712 | if (x == reg) | |
8713 | return 0; | |
8714 | return 1; | |
8715 | ||
8716 | case SET: | |
8717 | /* If this sets the BIV, it is not a problem. */ | |
8718 | if (SET_DEST (x) == reg) | |
8719 | return 1; | |
8720 | ||
8721 | /* If this is an insn that defines a giv, it is also ok because | |
8722 | it will go away when the giv is reduced. */ | |
8723 | for (v = bl->giv; v; v = v->next_iv) | |
8724 | if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg) | |
8725 | return 1; | |
8726 | ||
8727 | #ifdef HAVE_cc0 | |
8728 | if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg) | |
8729 | { | |
8730 | /* Can replace with any giv that was reduced and | |
8731 | that has (MULT_VAL != 0) and (ADD_VAL == 0). | |
fbdc6da8 RK |
8732 | Require a constant for MULT_VAL, so we know it's nonzero. |
8733 | ??? We disable this optimization to avoid potential | |
8734 | overflows. */ | |
b4ad7b23 RS |
8735 | |
8736 | for (v = bl->giv; v; v = v->next_iv) | |
3508c681 | 8737 | if (GET_CODE (v->mult_val) == CONST_INT && v->mult_val != const0_rtx |
b4ad7b23 | 8738 | && v->add_val == const0_rtx |
453331a3 | 8739 | && ! v->ignore && ! v->maybe_dead && v->always_computable |
fbdc6da8 RK |
8740 | && v->mode == mode |
8741 | && 0) | |
b4ad7b23 | 8742 | { |
a6207a2b | 8743 | if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn)) |
8516af93 JW |
8744 | continue; |
8745 | ||
b4ad7b23 RS |
8746 | if (! eliminate_p) |
8747 | return 1; | |
8748 | ||
8749 | /* If the giv has the opposite direction of change, | |
8750 | then reverse the comparison. */ | |
8751 | if (INTVAL (v->mult_val) < 0) | |
38a448ca RH |
8752 | new = gen_rtx_COMPARE (GET_MODE (v->new_reg), |
8753 | const0_rtx, v->new_reg); | |
b4ad7b23 RS |
8754 | else |
8755 | new = v->new_reg; | |
8756 | ||
8757 | /* We can probably test that giv's reduced reg. */ | |
8758 | if (validate_change (insn, &SET_SRC (x), new, 0)) | |
8759 | return 1; | |
8760 | } | |
8761 | ||
8762 | /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0); | |
8763 | replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL). | |
fbdc6da8 RK |
8764 | Require a constant for MULT_VAL, so we know it's nonzero. |
8765 | ??? Do this only if ADD_VAL is a pointer to avoid a potential | |
8766 | overflow problem. */ | |
b4ad7b23 RS |
8767 | |
8768 | for (v = bl->giv; v; v = v->next_iv) | |
e6fcb60d KH |
8769 | if (GET_CODE (v->mult_val) == CONST_INT |
8770 | && v->mult_val != const0_rtx | |
453331a3 | 8771 | && ! v->ignore && ! v->maybe_dead && v->always_computable |
fbdc6da8 RK |
8772 | && v->mode == mode |
8773 | && (GET_CODE (v->add_val) == SYMBOL_REF | |
8774 | || GET_CODE (v->add_val) == LABEL_REF | |
8775 | || GET_CODE (v->add_val) == CONST | |
8776 | || (GET_CODE (v->add_val) == REG | |
3502dc9c | 8777 | && REG_POINTER (v->add_val)))) |
b4ad7b23 | 8778 | { |
a6207a2b | 8779 | if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn)) |
8516af93 JW |
8780 | continue; |
8781 | ||
b4ad7b23 RS |
8782 | if (! eliminate_p) |
8783 | return 1; | |
8784 | ||
8785 | /* If the giv has the opposite direction of change, | |
8786 | then reverse the comparison. */ | |
8787 | if (INTVAL (v->mult_val) < 0) | |
38a448ca RH |
8788 | new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val), |
8789 | v->new_reg); | |
b4ad7b23 | 8790 | else |
38a448ca RH |
8791 | new = gen_rtx_COMPARE (VOIDmode, v->new_reg, |
8792 | copy_rtx (v->add_val)); | |
b4ad7b23 RS |
8793 | |
8794 | /* Replace biv with the giv's reduced register. */ | |
8795 | update_reg_last_use (v->add_val, insn); | |
8796 | if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0)) | |
8797 | return 1; | |
8798 | ||
8799 | /* Insn doesn't support that constant or invariant. Copy it | |
8800 | into a register (it will be a loop invariant.) */ | |
8801 | tem = gen_reg_rtx (GET_MODE (v->new_reg)); | |
8802 | ||
86e21212 MH |
8803 | loop_insn_emit_before (loop, 0, where_insn, |
8804 | gen_move_insn (tem, | |
8805 | copy_rtx (v->add_val))); | |
b4ad7b23 | 8806 | |
2ae3dcac | 8807 | /* Substitute the new register for its invariant value in |
e6fcb60d | 8808 | the compare expression. */ |
2ae3dcac RK |
8809 | XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem; |
8810 | if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0)) | |
b4ad7b23 RS |
8811 | return 1; |
8812 | } | |
8813 | } | |
8814 | #endif | |
8815 | break; | |
8816 | ||
8817 | case COMPARE: | |
8818 | case EQ: case NE: | |
8819 | case GT: case GE: case GTU: case GEU: | |
8820 | case LT: case LE: case LTU: case LEU: | |
8821 | /* See if either argument is the biv. */ | |
8822 | if (XEXP (x, 0) == reg) | |
8823 | arg = XEXP (x, 1), arg_operand = 1; | |
8824 | else if (XEXP (x, 1) == reg) | |
8825 | arg = XEXP (x, 0), arg_operand = 0; | |
8826 | else | |
8827 | break; | |
8828 | ||
8829 | if (CONSTANT_P (arg)) | |
8830 | { | |
8831 | /* First try to replace with any giv that has constant positive | |
8832 | mult_val and constant add_val. We might be able to support | |
8833 | negative mult_val, but it seems complex to do it in general. */ | |
8834 | ||
8835 | for (v = bl->giv; v; v = v->next_iv) | |
e6fcb60d KH |
8836 | if (GET_CODE (v->mult_val) == CONST_INT |
8837 | && INTVAL (v->mult_val) > 0 | |
fbdc6da8 RK |
8838 | && (GET_CODE (v->add_val) == SYMBOL_REF |
8839 | || GET_CODE (v->add_val) == LABEL_REF | |
8840 | || GET_CODE (v->add_val) == CONST | |
8841 | || (GET_CODE (v->add_val) == REG | |
3502dc9c | 8842 | && REG_POINTER (v->add_val))) |
453331a3 | 8843 | && ! v->ignore && ! v->maybe_dead && v->always_computable |
b4ad7b23 RS |
8844 | && v->mode == mode) |
8845 | { | |
a6207a2b | 8846 | if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn)) |
8516af93 JW |
8847 | continue; |
8848 | ||
ce60bf25 EB |
8849 | /* Don't eliminate if the linear combination that makes up |
8850 | the giv overflows when it is applied to ARG. */ | |
8851 | if (GET_CODE (arg) == CONST_INT) | |
8852 | { | |
8853 | rtx add_val; | |
8854 | ||
8855 | if (GET_CODE (v->add_val) == CONST_INT) | |
8856 | add_val = v->add_val; | |
8857 | else | |
8858 | add_val = const0_rtx; | |
8859 | ||
8860 | if (const_mult_add_overflow_p (arg, v->mult_val, | |
8861 | add_val, mode, 1)) | |
8862 | continue; | |
8863 | } | |
8864 | ||
b4ad7b23 RS |
8865 | if (! eliminate_p) |
8866 | return 1; | |
8867 | ||
8868 | /* Replace biv with the giv's reduced reg. */ | |
e6fcb60d | 8869 | validate_change (insn, &XEXP (x, 1 - arg_operand), v->new_reg, 1); |
b4ad7b23 RS |
8870 | |
8871 | /* If all constants are actually constant integers and | |
8872 | the derived constant can be directly placed in the COMPARE, | |
8873 | do so. */ | |
8874 | if (GET_CODE (arg) == CONST_INT | |
3508c681 JH |
8875 | && GET_CODE (v->add_val) == CONST_INT) |
8876 | { | |
ce60bf25 EB |
8877 | tem = expand_mult_add (arg, NULL_RTX, v->mult_val, |
8878 | v->add_val, mode, 1); | |
3508c681 JH |
8879 | } |
8880 | else | |
8881 | { | |
8882 | /* Otherwise, load it into a register. */ | |
8883 | tem = gen_reg_rtx (mode); | |
96a45535 MH |
8884 | loop_iv_add_mult_emit_before (loop, arg, |
8885 | v->mult_val, v->add_val, | |
8886 | tem, where_bb, where_insn); | |
3508c681 | 8887 | } |
ce60bf25 EB |
8888 | |
8889 | validate_change (insn, &XEXP (x, arg_operand), tem, 1); | |
8890 | ||
3508c681 | 8891 | if (apply_change_group ()) |
b4ad7b23 | 8892 | return 1; |
b4ad7b23 | 8893 | } |
e6fcb60d | 8894 | |
b4ad7b23 | 8895 | /* Look for giv with positive constant mult_val and nonconst add_val. |
e6fcb60d | 8896 | Insert insns to calculate new compare value. |
fbdc6da8 | 8897 | ??? Turn this off due to possible overflow. */ |
b4ad7b23 RS |
8898 | |
8899 | for (v = bl->giv; v; v = v->next_iv) | |
e6fcb60d KH |
8900 | if (GET_CODE (v->mult_val) == CONST_INT |
8901 | && INTVAL (v->mult_val) > 0 | |
453331a3 | 8902 | && ! v->ignore && ! v->maybe_dead && v->always_computable |
fbdc6da8 RK |
8903 | && v->mode == mode |
8904 | && 0) | |
b4ad7b23 RS |
8905 | { |
8906 | rtx tem; | |
8907 | ||
a6207a2b | 8908 | if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn)) |
8516af93 JW |
8909 | continue; |
8910 | ||
b4ad7b23 RS |
8911 | if (! eliminate_p) |
8912 | return 1; | |
8913 | ||
8914 | tem = gen_reg_rtx (mode); | |
8915 | ||
8916 | /* Replace biv with giv's reduced register. */ | |
8917 | validate_change (insn, &XEXP (x, 1 - arg_operand), | |
8918 | v->new_reg, 1); | |
8919 | ||
8920 | /* Compute value to compare against. */ | |
6b8c9327 | 8921 | loop_iv_add_mult_emit_before (loop, arg, |
96a45535 MH |
8922 | v->mult_val, v->add_val, |
8923 | tem, where_bb, where_insn); | |
b4ad7b23 RS |
8924 | /* Use it in this insn. */ |
8925 | validate_change (insn, &XEXP (x, arg_operand), tem, 1); | |
8926 | if (apply_change_group ()) | |
8927 | return 1; | |
8928 | } | |
8929 | } | |
8930 | else if (GET_CODE (arg) == REG || GET_CODE (arg) == MEM) | |
8931 | { | |
0534b804 | 8932 | if (loop_invariant_p (loop, arg) == 1) |
b4ad7b23 RS |
8933 | { |
8934 | /* Look for giv with constant positive mult_val and nonconst | |
e6fcb60d | 8935 | add_val. Insert insns to compute new compare value. |
fbdc6da8 | 8936 | ??? Turn this off due to possible overflow. */ |
b4ad7b23 RS |
8937 | |
8938 | for (v = bl->giv; v; v = v->next_iv) | |
3508c681 | 8939 | if (GET_CODE (v->mult_val) == CONST_INT && INTVAL (v->mult_val) > 0 |
453331a3 | 8940 | && ! v->ignore && ! v->maybe_dead && v->always_computable |
fbdc6da8 RK |
8941 | && v->mode == mode |
8942 | && 0) | |
b4ad7b23 RS |
8943 | { |
8944 | rtx tem; | |
8945 | ||
a6207a2b | 8946 | if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn)) |
8516af93 JW |
8947 | continue; |
8948 | ||
b4ad7b23 RS |
8949 | if (! eliminate_p) |
8950 | return 1; | |
8951 | ||
8952 | tem = gen_reg_rtx (mode); | |
8953 | ||
8954 | /* Replace biv with giv's reduced register. */ | |
8955 | validate_change (insn, &XEXP (x, 1 - arg_operand), | |
8956 | v->new_reg, 1); | |
8957 | ||
8958 | /* Compute value to compare against. */ | |
6b8c9327 | 8959 | loop_iv_add_mult_emit_before (loop, arg, |
96a45535 MH |
8960 | v->mult_val, v->add_val, |
8961 | tem, where_bb, where_insn); | |
b4ad7b23 RS |
8962 | validate_change (insn, &XEXP (x, arg_operand), tem, 1); |
8963 | if (apply_change_group ()) | |
8964 | return 1; | |
8965 | } | |
8966 | } | |
8967 | ||
8968 | /* This code has problems. Basically, you can't know when | |
8969 | seeing if we will eliminate BL, whether a particular giv | |
8970 | of ARG will be reduced. If it isn't going to be reduced, | |
8971 | we can't eliminate BL. We can try forcing it to be reduced, | |
8972 | but that can generate poor code. | |
8973 | ||
8974 | The problem is that the benefit of reducing TV, below should | |
8975 | be increased if BL can actually be eliminated, but this means | |
8976 | we might have to do a topological sort of the order in which | |
8977 | we try to process biv. It doesn't seem worthwhile to do | |
8978 | this sort of thing now. */ | |
8979 | ||
8980 | #if 0 | |
8981 | /* Otherwise the reg compared with had better be a biv. */ | |
8982 | if (GET_CODE (arg) != REG | |
ed5bb68d | 8983 | || REG_IV_TYPE (ivs, REGNO (arg)) != BASIC_INDUCT) |
b4ad7b23 RS |
8984 | return 0; |
8985 | ||
8986 | /* Look for a pair of givs, one for each biv, | |
8987 | with identical coefficients. */ | |
8988 | for (v = bl->giv; v; v = v->next_iv) | |
8989 | { | |
8990 | struct induction *tv; | |
8991 | ||
8992 | if (v->ignore || v->maybe_dead || v->mode != mode) | |
8993 | continue; | |
8994 | ||
6b8c9327 | 8995 | for (tv = REG_IV_CLASS (ivs, REGNO (arg))->giv; tv; |
8b634749 | 8996 | tv = tv->next_iv) |
b4ad7b23 RS |
8997 | if (! tv->ignore && ! tv->maybe_dead |
8998 | && rtx_equal_p (tv->mult_val, v->mult_val) | |
8999 | && rtx_equal_p (tv->add_val, v->add_val) | |
9000 | && tv->mode == mode) | |
9001 | { | |
a6207a2b | 9002 | if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn)) |
8516af93 JW |
9003 | continue; |
9004 | ||
b4ad7b23 RS |
9005 | if (! eliminate_p) |
9006 | return 1; | |
9007 | ||
9008 | /* Replace biv with its giv's reduced reg. */ | |
e6fcb60d | 9009 | XEXP (x, 1 - arg_operand) = v->new_reg; |
b4ad7b23 RS |
9010 | /* Replace other operand with the other giv's |
9011 | reduced reg. */ | |
9012 | XEXP (x, arg_operand) = tv->new_reg; | |
9013 | return 1; | |
9014 | } | |
9015 | } | |
9016 | #endif | |
9017 | } | |
9018 | ||
9019 | /* If we get here, the biv can't be eliminated. */ | |
9020 | return 0; | |
9021 | ||
9022 | case MEM: | |
9023 | /* If this address is a DEST_ADDR giv, it doesn't matter if the | |
9024 | biv is used in it, since it will be replaced. */ | |
9025 | for (v = bl->giv; v; v = v->next_iv) | |
9026 | if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0)) | |
9027 | return 1; | |
9028 | break; | |
e9a25f70 JL |
9029 | |
9030 | default: | |
9031 | break; | |
b4ad7b23 RS |
9032 | } |
9033 | ||
9034 | /* See if any subexpression fails elimination. */ | |
9035 | fmt = GET_RTX_FORMAT (code); | |
9036 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) | |
9037 | { | |
9038 | switch (fmt[i]) | |
9039 | { | |
9040 | case 'e': | |
e6fcb60d | 9041 | if (! maybe_eliminate_biv_1 (loop, XEXP (x, i), insn, bl, |
96a45535 | 9042 | eliminate_p, where_bb, where_insn)) |
b4ad7b23 RS |
9043 | return 0; |
9044 | break; | |
9045 | ||
9046 | case 'E': | |
9047 | for (j = XVECLEN (x, i) - 1; j >= 0; j--) | |
0534b804 | 9048 | if (! maybe_eliminate_biv_1 (loop, XVECEXP (x, i, j), insn, bl, |
96a45535 | 9049 | eliminate_p, where_bb, where_insn)) |
b4ad7b23 RS |
9050 | return 0; |
9051 | break; | |
9052 | } | |
9053 | } | |
9054 | ||
9055 | return 1; | |
e6fcb60d | 9056 | } |
b4ad7b23 RS |
9057 | \f |
9058 | /* Return nonzero if the last use of REG | |
9059 | is in an insn following INSN in the same basic block. */ | |
9060 | ||
9061 | static int | |
0c20a65f | 9062 | last_use_this_basic_block (rtx reg, rtx insn) |
b4ad7b23 RS |
9063 | { |
9064 | rtx n; | |
9065 | for (n = insn; | |
9066 | n && GET_CODE (n) != CODE_LABEL && GET_CODE (n) != JUMP_INSN; | |
9067 | n = NEXT_INSN (n)) | |
9068 | { | |
b1f21e0a | 9069 | if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n)) |
b4ad7b23 RS |
9070 | return 1; |
9071 | } | |
9072 | return 0; | |
9073 | } | |
9074 | \f | |
9075 | /* Called via `note_stores' to record the initial value of a biv. Here we | |
9076 | just record the location of the set and process it later. */ | |
9077 | ||
9078 | static void | |
0c20a65f | 9079 | record_initial (rtx dest, rtx set, void *data ATTRIBUTE_UNUSED) |
b4ad7b23 | 9080 | { |
ed5bb68d | 9081 | struct loop_ivs *ivs = (struct loop_ivs *) data; |
b4ad7b23 RS |
9082 | struct iv_class *bl; |
9083 | ||
9084 | if (GET_CODE (dest) != REG | |
86fee241 | 9085 | || REGNO (dest) >= ivs->n_regs |
ed5bb68d | 9086 | || REG_IV_TYPE (ivs, REGNO (dest)) != BASIC_INDUCT) |
b4ad7b23 RS |
9087 | return; |
9088 | ||
8b634749 | 9089 | bl = REG_IV_CLASS (ivs, REGNO (dest)); |
b4ad7b23 RS |
9090 | |
9091 | /* If this is the first set found, record it. */ | |
9092 | if (bl->init_insn == 0) | |
9093 | { | |
9094 | bl->init_insn = note_insn; | |
9095 | bl->init_set = set; | |
9096 | } | |
9097 | } | |
9098 | \f | |
9099 | /* If any of the registers in X are "old" and currently have a last use earlier | |
9100 | than INSN, update them to have a last use of INSN. Their actual last use | |
9101 | will be the previous insn but it will not have a valid uid_luid so we can't | |
96a45535 | 9102 | use it. X must be a source expression only. */ |
b4ad7b23 RS |
9103 | |
9104 | static void | |
0c20a65f | 9105 | update_reg_last_use (rtx x, rtx insn) |
b4ad7b23 RS |
9106 | { |
9107 | /* Check for the case where INSN does not have a valid luid. In this case, | |
9108 | there is no need to modify the regno_last_uid, as this can only happen | |
9109 | when code is inserted after the loop_end to set a pseudo's final value, | |
6b8c9327 AJ |
9110 | and hence this insn will never be the last use of x. |
9111 | ???? This comment is not correct. See for example loop_givs_reduce. | |
96a45535 | 9112 | This may insert an insn before another new insn. */ |
b4ad7b23 RS |
9113 | if (GET_CODE (x) == REG && REGNO (x) < max_reg_before_loop |
9114 | && INSN_UID (insn) < max_uid_for_loop | |
8529a489 | 9115 | && REGNO_LAST_LUID (REGNO (x)) < INSN_LUID (insn)) |
96a45535 MH |
9116 | { |
9117 | REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn); | |
9118 | } | |
b4ad7b23 RS |
9119 | else |
9120 | { | |
b3694847 SS |
9121 | int i, j; |
9122 | const char *fmt = GET_RTX_FORMAT (GET_CODE (x)); | |
b4ad7b23 RS |
9123 | for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--) |
9124 | { | |
9125 | if (fmt[i] == 'e') | |
9126 | update_reg_last_use (XEXP (x, i), insn); | |
9127 | else if (fmt[i] == 'E') | |
9128 | for (j = XVECLEN (x, i) - 1; j >= 0; j--) | |
9129 | update_reg_last_use (XVECEXP (x, i, j), insn); | |
9130 | } | |
9131 | } | |
9132 | } | |
9133 | \f | |
a49a6a68 JW |
9134 | /* Given an insn INSN and condition COND, return the condition in a |
9135 | canonical form to simplify testing by callers. Specifically: | |
b4ad7b23 RS |
9136 | |
9137 | (1) The code will always be a comparison operation (EQ, NE, GT, etc.). | |
9138 | (2) Both operands will be machine operands; (cc0) will have been replaced. | |
9139 | (3) If an operand is a constant, it will be the second operand. | |
9140 | (4) (LE x const) will be replaced with (LT x <const+1>) and similarly | |
a49a6a68 JW |
9141 | for GE, GEU, and LEU. |
9142 | ||
9143 | If the condition cannot be understood, or is an inequality floating-point | |
9144 | comparison which needs to be reversed, 0 will be returned. | |
9145 | ||
cc2902df | 9146 | If REVERSE is nonzero, then reverse the condition prior to canonizing it. |
a49a6a68 | 9147 | |
cc2902df | 9148 | If EARLIEST is nonzero, it is a pointer to a place where the earliest |
a49a6a68 JW |
9149 | insn used in locating the condition was found. If a replacement test |
9150 | of the condition is desired, it should be placed in front of that | |
10f13594 RH |
9151 | insn and we will be sure that the inputs are still valid. |
9152 | ||
cc2902df | 9153 | If WANT_REG is nonzero, we wish the condition to be relative to that |
10f13594 RH |
9154 | register, if possible. Therefore, do not canonicalize the condition |
9155 | further. */ | |
b4ad7b23 RS |
9156 | |
9157 | rtx | |
0c20a65f AJ |
9158 | canonicalize_condition (rtx insn, rtx cond, int reverse, rtx *earliest, |
9159 | rtx want_reg) | |
b4ad7b23 RS |
9160 | { |
9161 | enum rtx_code code; | |
a49a6a68 | 9162 | rtx prev = insn; |
b4ad7b23 RS |
9163 | rtx set; |
9164 | rtx tem; | |
9165 | rtx op0, op1; | |
9166 | int reverse_code = 0; | |
f283421d | 9167 | enum machine_mode mode; |
b4ad7b23 | 9168 | |
a49a6a68 JW |
9169 | code = GET_CODE (cond); |
9170 | mode = GET_MODE (cond); | |
9171 | op0 = XEXP (cond, 0); | |
9172 | op1 = XEXP (cond, 1); | |
b4ad7b23 | 9173 | |
a49a6a68 | 9174 | if (reverse) |
c9212f33 JH |
9175 | code = reversed_comparison_code (cond, insn); |
9176 | if (code == UNKNOWN) | |
9177 | return 0; | |
b4ad7b23 RS |
9178 | |
9179 | if (earliest) | |
a49a6a68 | 9180 | *earliest = insn; |
b4ad7b23 RS |
9181 | |
9182 | /* If we are comparing a register with zero, see if the register is set | |
9183 | in the previous insn to a COMPARE or a comparison operation. Perform | |
9184 | the same tests as a function of STORE_FLAG_VALUE as find_comparison_args | |
9185 | in cse.c */ | |
9186 | ||
10f13594 | 9187 | while (GET_RTX_CLASS (code) == '<' |
fd5d5b07 | 9188 | && op1 == CONST0_RTX (GET_MODE (op0)) |
10f13594 | 9189 | && op0 != want_reg) |
b4ad7b23 | 9190 | { |
cc2902df | 9191 | /* Set nonzero when we find something of interest. */ |
b4ad7b23 RS |
9192 | rtx x = 0; |
9193 | ||
9194 | #ifdef HAVE_cc0 | |
9195 | /* If comparison with cc0, import actual comparison from compare | |
9196 | insn. */ | |
9197 | if (op0 == cc0_rtx) | |
9198 | { | |
9199 | if ((prev = prev_nonnote_insn (prev)) == 0 | |
9200 | || GET_CODE (prev) != INSN | |
9201 | || (set = single_set (prev)) == 0 | |
9202 | || SET_DEST (set) != cc0_rtx) | |
9203 | return 0; | |
9204 | ||
9205 | op0 = SET_SRC (set); | |
9206 | op1 = CONST0_RTX (GET_MODE (op0)); | |
9207 | if (earliest) | |
9208 | *earliest = prev; | |
9209 | } | |
9210 | #endif | |
9211 | ||
9212 | /* If this is a COMPARE, pick up the two things being compared. */ | |
9213 | if (GET_CODE (op0) == COMPARE) | |
9214 | { | |
9215 | op1 = XEXP (op0, 1); | |
9216 | op0 = XEXP (op0, 0); | |
9217 | continue; | |
9218 | } | |
9219 | else if (GET_CODE (op0) != REG) | |
9220 | break; | |
9221 | ||
9222 | /* Go back to the previous insn. Stop if it is not an INSN. We also | |
9223 | stop if it isn't a single set or if it has a REG_INC note because | |
9224 | we don't want to bother dealing with it. */ | |
9225 | ||
9226 | if ((prev = prev_nonnote_insn (prev)) == 0 | |
9227 | || GET_CODE (prev) != INSN | |
ff81832f | 9228 | || FIND_REG_INC_NOTE (prev, NULL_RTX)) |
c9212f33 JH |
9229 | break; |
9230 | ||
9231 | set = set_of (op0, prev); | |
9232 | ||
9233 | if (set | |
9234 | && (GET_CODE (set) != SET | |
9235 | || !rtx_equal_p (SET_DEST (set), op0))) | |
b4ad7b23 RS |
9236 | break; |
9237 | ||
9238 | /* If this is setting OP0, get what it sets it to if it looks | |
9239 | relevant. */ | |
c9212f33 | 9240 | if (set) |
b4ad7b23 | 9241 | { |
6d90e7c0 | 9242 | enum machine_mode inner_mode = GET_MODE (SET_DEST (set)); |
efdc7e19 RH |
9243 | #ifdef FLOAT_STORE_FLAG_VALUE |
9244 | REAL_VALUE_TYPE fsfv; | |
9245 | #endif | |
b4ad7b23 | 9246 | |
f283421d RH |
9247 | /* ??? We may not combine comparisons done in a CCmode with |
9248 | comparisons not done in a CCmode. This is to aid targets | |
9249 | like Alpha that have an IEEE compliant EQ instruction, and | |
9250 | a non-IEEE compliant BEQ instruction. The use of CCmode is | |
9251 | actually artificial, simply to prevent the combination, but | |
12f289ac JW |
9252 | should not affect other platforms. |
9253 | ||
9254 | However, we must allow VOIDmode comparisons to match either | |
9255 | CCmode or non-CCmode comparison, because some ports have | |
9256 | modeless comparisons inside branch patterns. | |
9257 | ||
9258 | ??? This mode check should perhaps look more like the mode check | |
9259 | in simplify_comparison in combine. */ | |
f283421d | 9260 | |
b4ad7b23 | 9261 | if ((GET_CODE (SET_SRC (set)) == COMPARE |
b565a316 RK |
9262 | || (((code == NE |
9263 | || (code == LT | |
9264 | && GET_MODE_CLASS (inner_mode) == MODE_INT | |
5fd8383e RK |
9265 | && (GET_MODE_BITSIZE (inner_mode) |
9266 | <= HOST_BITS_PER_WIDE_INT) | |
b565a316 | 9267 | && (STORE_FLAG_VALUE |
5fd8383e RK |
9268 | & ((HOST_WIDE_INT) 1 |
9269 | << (GET_MODE_BITSIZE (inner_mode) - 1)))) | |
b565a316 RK |
9270 | #ifdef FLOAT_STORE_FLAG_VALUE |
9271 | || (code == LT | |
9272 | && GET_MODE_CLASS (inner_mode) == MODE_FLOAT | |
efdc7e19 RH |
9273 | && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode), |
9274 | REAL_VALUE_NEGATIVE (fsfv))) | |
b565a316 RK |
9275 | #endif |
9276 | )) | |
f283421d | 9277 | && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<')) |
12f289ac JW |
9278 | && (((GET_MODE_CLASS (mode) == MODE_CC) |
9279 | == (GET_MODE_CLASS (inner_mode) == MODE_CC)) | |
9280 | || mode == VOIDmode || inner_mode == VOIDmode)) | |
b4ad7b23 | 9281 | x = SET_SRC (set); |
b565a316 RK |
9282 | else if (((code == EQ |
9283 | || (code == GE | |
5fd8383e RK |
9284 | && (GET_MODE_BITSIZE (inner_mode) |
9285 | <= HOST_BITS_PER_WIDE_INT) | |
b565a316 RK |
9286 | && GET_MODE_CLASS (inner_mode) == MODE_INT |
9287 | && (STORE_FLAG_VALUE | |
5fd8383e RK |
9288 | & ((HOST_WIDE_INT) 1 |
9289 | << (GET_MODE_BITSIZE (inner_mode) - 1)))) | |
b565a316 RK |
9290 | #ifdef FLOAT_STORE_FLAG_VALUE |
9291 | || (code == GE | |
9292 | && GET_MODE_CLASS (inner_mode) == MODE_FLOAT | |
efdc7e19 RH |
9293 | && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode), |
9294 | REAL_VALUE_NEGATIVE (fsfv))) | |
fb8ca0a4 | 9295 | #endif |
b565a316 | 9296 | )) |
f283421d | 9297 | && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<' |
e6fcb60d | 9298 | && (((GET_MODE_CLASS (mode) == MODE_CC) |
12f289ac JW |
9299 | == (GET_MODE_CLASS (inner_mode) == MODE_CC)) |
9300 | || mode == VOIDmode || inner_mode == VOIDmode)) | |
9301 | ||
b4ad7b23 | 9302 | { |
b4ad7b23 RS |
9303 | reverse_code = 1; |
9304 | x = SET_SRC (set); | |
9305 | } | |
71ef37f6 RK |
9306 | else |
9307 | break; | |
b4ad7b23 RS |
9308 | } |
9309 | ||
9310 | else if (reg_set_p (op0, prev)) | |
9311 | /* If this sets OP0, but not directly, we have to give up. */ | |
9312 | break; | |
9313 | ||
9314 | if (x) | |
9315 | { | |
9316 | if (GET_RTX_CLASS (GET_CODE (x)) == '<') | |
9317 | code = GET_CODE (x); | |
9318 | if (reverse_code) | |
9319 | { | |
c9212f33 | 9320 | code = reversed_comparison_code (x, prev); |
1eb8759b RH |
9321 | if (code == UNKNOWN) |
9322 | return 0; | |
b4ad7b23 RS |
9323 | reverse_code = 0; |
9324 | } | |
9325 | ||
9326 | op0 = XEXP (x, 0), op1 = XEXP (x, 1); | |
9327 | if (earliest) | |
9328 | *earliest = prev; | |
9329 | } | |
9330 | } | |
9331 | ||
9332 | /* If constant is first, put it last. */ | |
9333 | if (CONSTANT_P (op0)) | |
9334 | code = swap_condition (code), tem = op0, op0 = op1, op1 = tem; | |
9335 | ||
9336 | /* If OP0 is the result of a comparison, we weren't able to find what | |
9337 | was really being compared, so fail. */ | |
9338 | if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC) | |
9339 | return 0; | |
9340 | ||
d8cfa4ee RK |
9341 | /* Canonicalize any ordered comparison with integers involving equality |
9342 | if we can do computations in the relevant mode and we do not | |
9343 | overflow. */ | |
9344 | ||
9345 | if (GET_CODE (op1) == CONST_INT | |
9346 | && GET_MODE (op0) != VOIDmode | |
9347 | && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT) | |
b4ad7b23 | 9348 | { |
5fd8383e RK |
9349 | HOST_WIDE_INT const_val = INTVAL (op1); |
9350 | unsigned HOST_WIDE_INT uconst_val = const_val; | |
d8cfa4ee RK |
9351 | unsigned HOST_WIDE_INT max_val |
9352 | = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0)); | |
b4ad7b23 RS |
9353 | |
9354 | switch (code) | |
d8cfa4ee RK |
9355 | { |
9356 | case LE: | |
e51712db | 9357 | if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1) |
e879eb2f | 9358 | code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0)); |
d8cfa4ee | 9359 | break; |
b4ad7b23 | 9360 | |
460f50dc R |
9361 | /* When cross-compiling, const_val might be sign-extended from |
9362 | BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */ | |
d8cfa4ee | 9363 | case GE: |
e51712db | 9364 | if ((HOST_WIDE_INT) (const_val & max_val) |
d8cfa4ee RK |
9365 | != (((HOST_WIDE_INT) 1 |
9366 | << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1)))) | |
e879eb2f | 9367 | code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0)); |
d8cfa4ee | 9368 | break; |
b4ad7b23 | 9369 | |
d8cfa4ee | 9370 | case LEU: |
460f50dc | 9371 | if (uconst_val < max_val) |
e879eb2f | 9372 | code = LTU, op1 = gen_int_mode (uconst_val + 1, GET_MODE (op0)); |
d8cfa4ee | 9373 | break; |
b4ad7b23 | 9374 | |
d8cfa4ee RK |
9375 | case GEU: |
9376 | if (uconst_val != 0) | |
e879eb2f | 9377 | code = GTU, op1 = gen_int_mode (uconst_val - 1, GET_MODE (op0)); |
d8cfa4ee | 9378 | break; |
e9a25f70 JL |
9379 | |
9380 | default: | |
9381 | break; | |
d8cfa4ee | 9382 | } |
b4ad7b23 RS |
9383 | } |
9384 | ||
b4ad7b23 | 9385 | /* Never return CC0; return zero instead. */ |
8beccec8 | 9386 | if (CC0_P (op0)) |
b4ad7b23 | 9387 | return 0; |
b4ad7b23 | 9388 | |
38a448ca | 9389 | return gen_rtx_fmt_ee (code, VOIDmode, op0, op1); |
b4ad7b23 RS |
9390 | } |
9391 | ||
a49a6a68 JW |
9392 | /* Given a jump insn JUMP, return the condition that will cause it to branch |
9393 | to its JUMP_LABEL. If the condition cannot be understood, or is an | |
9394 | inequality floating-point comparison which needs to be reversed, 0 will | |
9395 | be returned. | |
9396 | ||
cc2902df | 9397 | If EARLIEST is nonzero, it is a pointer to a place where the earliest |
a49a6a68 JW |
9398 | insn used in locating the condition was found. If a replacement test |
9399 | of the condition is desired, it should be placed in front of that | |
9400 | insn and we will be sure that the inputs are still valid. */ | |
9401 | ||
9402 | rtx | |
0c20a65f | 9403 | get_condition (rtx jump, rtx *earliest) |
a49a6a68 JW |
9404 | { |
9405 | rtx cond; | |
9406 | int reverse; | |
7f1c097d | 9407 | rtx set; |
a49a6a68 JW |
9408 | |
9409 | /* If this is not a standard conditional jump, we can't parse it. */ | |
9410 | if (GET_CODE (jump) != JUMP_INSN | |
7f1c097d | 9411 | || ! any_condjump_p (jump)) |
a49a6a68 | 9412 | return 0; |
7f1c097d | 9413 | set = pc_set (jump); |
a49a6a68 | 9414 | |
7f1c097d | 9415 | cond = XEXP (SET_SRC (set), 0); |
a49a6a68 JW |
9416 | |
9417 | /* If this branches to JUMP_LABEL when the condition is false, reverse | |
9418 | the condition. */ | |
9419 | reverse | |
7f1c097d JH |
9420 | = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF |
9421 | && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump); | |
a49a6a68 | 9422 | |
10f13594 | 9423 | return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX); |
a49a6a68 JW |
9424 | } |
9425 | ||
b4ad7b23 RS |
9426 | /* Similar to above routine, except that we also put an invariant last |
9427 | unless both operands are invariants. */ | |
9428 | ||
9429 | rtx | |
0c20a65f | 9430 | get_condition_for_loop (const struct loop *loop, rtx x) |
b4ad7b23 | 9431 | { |
505ddab6 | 9432 | rtx comparison = get_condition (x, (rtx*) 0); |
b4ad7b23 RS |
9433 | |
9434 | if (comparison == 0 | |
0534b804 MH |
9435 | || ! loop_invariant_p (loop, XEXP (comparison, 0)) |
9436 | || loop_invariant_p (loop, XEXP (comparison, 1))) | |
b4ad7b23 RS |
9437 | return comparison; |
9438 | ||
38a448ca RH |
9439 | return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode, |
9440 | XEXP (comparison, 1), XEXP (comparison, 0)); | |
b4ad7b23 | 9441 | } |
8c660648 | 9442 | |
2a1777af | 9443 | /* Scan the function and determine whether it has indirect (computed) jumps. |
8c660648 | 9444 | |
2a1777af JL |
9445 | This is taken mostly from flow.c; similar code exists elsewhere |
9446 | in the compiler. It may be useful to put this into rtlanal.c. */ | |
8c660648 | 9447 | static int |
0c20a65f | 9448 | indirect_jump_in_function_p (rtx start) |
8c660648 JL |
9449 | { |
9450 | rtx insn; | |
8c660648 | 9451 | |
2a1777af JL |
9452 | for (insn = start; insn; insn = NEXT_INSN (insn)) |
9453 | if (computed_jump_p (insn)) | |
9454 | return 1; | |
7019d00e L |
9455 | |
9456 | return 0; | |
8c660648 | 9457 | } |
41a972a9 MM |
9458 | |
9459 | /* Add MEM to the LOOP_MEMS array, if appropriate. See the | |
9460 | documentation for LOOP_MEMS for the definition of `appropriate'. | |
9461 | This function is called from prescan_loop via for_each_rtx. */ | |
9462 | ||
9463 | static int | |
0c20a65f | 9464 | insert_loop_mem (rtx *mem, void *data ATTRIBUTE_UNUSED) |
41a972a9 | 9465 | { |
afa1738b | 9466 | struct loop_info *loop_info = data; |
41a972a9 MM |
9467 | int i; |
9468 | rtx m = *mem; | |
9469 | ||
9470 | if (m == NULL_RTX) | |
9471 | return 0; | |
9472 | ||
9473 | switch (GET_CODE (m)) | |
9474 | { | |
9475 | case MEM: | |
9476 | break; | |
9477 | ||
27114460 RH |
9478 | case CLOBBER: |
9479 | /* We're not interested in MEMs that are only clobbered. */ | |
9480 | return -1; | |
9481 | ||
41a972a9 MM |
9482 | case CONST_DOUBLE: |
9483 | /* We're not interested in the MEM associated with a | |
9484 | CONST_DOUBLE, so there's no need to traverse into this. */ | |
9485 | return -1; | |
9486 | ||
4ce580a2 RE |
9487 | case EXPR_LIST: |
9488 | /* We're not interested in any MEMs that only appear in notes. */ | |
9489 | return -1; | |
9490 | ||
41a972a9 MM |
9491 | default: |
9492 | /* This is not a MEM. */ | |
9493 | return 0; | |
9494 | } | |
9495 | ||
9496 | /* See if we've already seen this MEM. */ | |
afa1738b MH |
9497 | for (i = 0; i < loop_info->mems_idx; ++i) |
9498 | if (rtx_equal_p (m, loop_info->mems[i].mem)) | |
41a972a9 | 9499 | { |
afa1738b | 9500 | if (GET_MODE (m) != GET_MODE (loop_info->mems[i].mem)) |
41a972a9 MM |
9501 | /* The modes of the two memory accesses are different. If |
9502 | this happens, something tricky is going on, and we just | |
9503 | don't optimize accesses to this MEM. */ | |
afa1738b | 9504 | loop_info->mems[i].optimize = 0; |
41a972a9 MM |
9505 | |
9506 | return 0; | |
9507 | } | |
9508 | ||
9509 | /* Resize the array, if necessary. */ | |
afa1738b | 9510 | if (loop_info->mems_idx == loop_info->mems_allocated) |
41a972a9 | 9511 | { |
afa1738b MH |
9512 | if (loop_info->mems_allocated != 0) |
9513 | loop_info->mems_allocated *= 2; | |
41a972a9 | 9514 | else |
afa1738b | 9515 | loop_info->mems_allocated = 32; |
41a972a9 | 9516 | |
703ad42b KG |
9517 | loop_info->mems = xrealloc (loop_info->mems, |
9518 | loop_info->mems_allocated * sizeof (loop_mem_info)); | |
41a972a9 MM |
9519 | } |
9520 | ||
9521 | /* Actually insert the MEM. */ | |
afa1738b | 9522 | loop_info->mems[loop_info->mems_idx].mem = m; |
41a972a9 MM |
9523 | /* We can't hoist this MEM out of the loop if it's a BLKmode MEM |
9524 | because we can't put it in a register. We still store it in the | |
9525 | table, though, so that if we see the same address later, but in a | |
9526 | non-BLK mode, we'll not think we can optimize it at that point. */ | |
afa1738b MH |
9527 | loop_info->mems[loop_info->mems_idx].optimize = (GET_MODE (m) != BLKmode); |
9528 | loop_info->mems[loop_info->mems_idx].reg = NULL_RTX; | |
9529 | ++loop_info->mems_idx; | |
8deb8e2c MM |
9530 | |
9531 | return 0; | |
41a972a9 MM |
9532 | } |
9533 | ||
1d7ae250 MH |
9534 | |
9535 | /* Allocate REGS->ARRAY or reallocate it if it is too small. | |
9536 | ||
9537 | Increment REGS->ARRAY[I].SET_IN_LOOP at the index I of each | |
9538 | register that is modified by an insn between FROM and TO. If the | |
9539 | value of an element of REGS->array[I].SET_IN_LOOP becomes 127 or | |
9540 | more, stop incrementing it, to avoid overflow. | |
9541 | ||
9542 | Store in REGS->ARRAY[I].SINGLE_USAGE the single insn in which | |
9543 | register I is used, if it is only used once. Otherwise, it is set | |
9544 | to 0 (for no uses) or const0_rtx for more than one use. This | |
9545 | parameter may be zero, in which case this processing is not done. | |
9546 | ||
9547 | Set REGS->ARRAY[I].MAY_NOT_OPTIMIZE nonzero if we should not | |
28680540 | 9548 | optimize register I. */ |
41a972a9 MM |
9549 | |
9550 | static void | |
0c20a65f | 9551 | loop_regs_scan (const struct loop *loop, int extra_size) |
41a972a9 | 9552 | { |
1ecd860b | 9553 | struct loop_regs *regs = LOOP_REGS (loop); |
1d7ae250 MH |
9554 | int old_nregs; |
9555 | /* last_set[n] is nonzero iff reg n has been set in the current | |
9556 | basic block. In that case, it is the insn that last set reg n. */ | |
9557 | rtx *last_set; | |
9558 | rtx insn; | |
1d7ae250 | 9559 | int i; |
41a972a9 | 9560 | |
1d7ae250 MH |
9561 | old_nregs = regs->num; |
9562 | regs->num = max_reg_num (); | |
e6fcb60d | 9563 | |
1d7ae250 MH |
9564 | /* Grow the regs array if not allocated or too small. */ |
9565 | if (regs->num >= regs->size) | |
41a972a9 | 9566 | { |
1d7ae250 | 9567 | regs->size = regs->num + extra_size; |
6b8c9327 | 9568 | |
703ad42b | 9569 | regs->array = xrealloc (regs->array, regs->size * sizeof (*regs->array)); |
1d7ae250 MH |
9570 | |
9571 | /* Zero the new elements. */ | |
9572 | memset (regs->array + old_nregs, 0, | |
9573 | (regs->size - old_nregs) * sizeof (*regs->array)); | |
9574 | } | |
41a972a9 | 9575 | |
1d7ae250 MH |
9576 | /* Clear previously scanned fields but do not clear n_times_set. */ |
9577 | for (i = 0; i < old_nregs; i++) | |
9578 | { | |
9579 | regs->array[i].set_in_loop = 0; | |
9580 | regs->array[i].may_not_optimize = 0; | |
9581 | regs->array[i].single_usage = NULL_RTX; | |
9582 | } | |
9583 | ||
703ad42b | 9584 | last_set = xcalloc (regs->num, sizeof (rtx)); |
f1d4ac80 | 9585 | |
1d7ae250 MH |
9586 | /* Scan the loop, recording register usage. */ |
9587 | for (insn = loop->top ? loop->top : loop->start; insn != loop->end; | |
9588 | insn = NEXT_INSN (insn)) | |
9589 | { | |
9590 | if (INSN_P (insn)) | |
f1d4ac80 | 9591 | { |
1d7ae250 MH |
9592 | /* Record registers that have exactly one use. */ |
9593 | find_single_use_in_loop (regs, insn, PATTERN (insn)); | |
587f56c2 | 9594 | |
1d7ae250 MH |
9595 | /* Include uses in REG_EQUAL notes. */ |
9596 | if (REG_NOTES (insn)) | |
9597 | find_single_use_in_loop (regs, insn, REG_NOTES (insn)); | |
41a972a9 | 9598 | |
1d7ae250 MH |
9599 | if (GET_CODE (PATTERN (insn)) == SET |
9600 | || GET_CODE (PATTERN (insn)) == CLOBBER) | |
9601 | count_one_set (regs, insn, PATTERN (insn), last_set); | |
9602 | else if (GET_CODE (PATTERN (insn)) == PARALLEL) | |
9603 | { | |
b3694847 | 9604 | int i; |
1d7ae250 MH |
9605 | for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--) |
9606 | count_one_set (regs, insn, XVECEXP (PATTERN (insn), 0, i), | |
9607 | last_set); | |
9608 | } | |
8deb8e2c | 9609 | } |
41a972a9 | 9610 | |
1d7ae250 MH |
9611 | if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN) |
9612 | memset (last_set, 0, regs->num * sizeof (rtx)); | |
2d34c587 UW |
9613 | |
9614 | /* Invalidate all registers used for function argument passing. | |
9615 | We check rtx_varies_p for the same reason as below, to allow | |
9616 | optimizing PIC calculations. */ | |
9617 | if (GET_CODE (insn) == CALL_INSN) | |
9618 | { | |
9619 | rtx link; | |
0c20a65f AJ |
9620 | for (link = CALL_INSN_FUNCTION_USAGE (insn); |
9621 | link; | |
2d34c587 UW |
9622 | link = XEXP (link, 1)) |
9623 | { | |
9624 | rtx op, reg; | |
9625 | ||
9626 | if (GET_CODE (op = XEXP (link, 0)) == USE | |
9627 | && GET_CODE (reg = XEXP (op, 0)) == REG | |
9628 | && rtx_varies_p (reg, 1)) | |
9629 | regs->array[REGNO (reg)].may_not_optimize = 1; | |
9630 | } | |
9631 | } | |
1d7ae250 | 9632 | } |
41a972a9 | 9633 | |
bc532bf7 SC |
9634 | /* Invalidate all hard registers clobbered by calls. With one exception: |
9635 | a call-clobbered PIC register is still function-invariant for our | |
9636 | purposes, since we can hoist any PIC calculations out of the loop. | |
9637 | Thus the call to rtx_varies_p. */ | |
9638 | if (LOOP_INFO (loop)->has_call) | |
9639 | for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) | |
9640 | if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i) | |
603b4b92 | 9641 | && rtx_varies_p (regno_reg_rtx[i], 1)) |
e11e816e KH |
9642 | { |
9643 | regs->array[i].may_not_optimize = 1; | |
9644 | regs->array[i].set_in_loop = 1; | |
9645 | } | |
e6fcb60d | 9646 | |
dd0208b9 | 9647 | #ifdef AVOID_CCMODE_COPIES |
1d7ae250 MH |
9648 | /* Don't try to move insns which set CC registers if we should not |
9649 | create CCmode register copies. */ | |
9650 | for (i = regs->num - 1; i >= FIRST_PSEUDO_REGISTER; i--) | |
9651 | if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC) | |
9652 | regs->array[i].may_not_optimize = 1; | |
dd0208b9 | 9653 | #endif |
6b8c9327 | 9654 | |
1d7ae250 MH |
9655 | /* Set regs->array[I].n_times_set for the new registers. */ |
9656 | for (i = old_nregs; i < regs->num; i++) | |
9657 | regs->array[i].n_times_set = regs->array[i].set_in_loop; | |
dd0208b9 | 9658 | |
1d7ae250 | 9659 | free (last_set); |
41a972a9 MM |
9660 | } |
9661 | ||
28680540 MM |
9662 | /* Returns the number of real INSNs in the LOOP. */ |
9663 | ||
9664 | static int | |
0c20a65f | 9665 | count_insns_in_loop (const struct loop *loop) |
28680540 MM |
9666 | { |
9667 | int count = 0; | |
9668 | rtx insn; | |
9669 | ||
9670 | for (insn = loop->top ? loop->top : loop->start; insn != loop->end; | |
9671 | insn = NEXT_INSN (insn)) | |
9672 | if (INSN_P (insn)) | |
9673 | ++count; | |
9674 | ||
9675 | return count; | |
9676 | } | |
1d7ae250 | 9677 | |
a2be868f | 9678 | /* Move MEMs into registers for the duration of the loop. */ |
41a972a9 MM |
9679 | |
9680 | static void | |
0c20a65f | 9681 | load_mems (const struct loop *loop) |
41a972a9 | 9682 | { |
afa1738b | 9683 | struct loop_info *loop_info = LOOP_INFO (loop); |
1ecd860b | 9684 | struct loop_regs *regs = LOOP_REGS (loop); |
41a972a9 MM |
9685 | int maybe_never = 0; |
9686 | int i; | |
1757e774 | 9687 | rtx p, prev_ebb_head; |
41a972a9 | 9688 | rtx label = NULL_RTX; |
48c4d691 | 9689 | rtx end_label; |
328f4006 BS |
9690 | /* Nonzero if the next instruction may never be executed. */ |
9691 | int next_maybe_never = 0; | |
616fde53 | 9692 | unsigned int last_max_reg = max_reg_num (); |
41a972a9 | 9693 | |
afa1738b | 9694 | if (loop_info->mems_idx == 0) |
328f4006 | 9695 | return; |
41a972a9 | 9696 | |
48c4d691 JJ |
9697 | /* We cannot use next_label here because it skips over normal insns. */ |
9698 | end_label = next_nonnote_insn (loop->end); | |
9699 | if (end_label && GET_CODE (end_label) != CODE_LABEL) | |
9700 | end_label = NULL_RTX; | |
eab5c70a | 9701 | |
48c4d691 JJ |
9702 | /* Check to see if it's possible that some instructions in the loop are |
9703 | never executed. Also check if there is a goto out of the loop other | |
9704 | than right after the end of the loop. */ | |
e6fcb60d | 9705 | for (p = next_insn_in_loop (loop, loop->scan_start); |
17e2b3cb | 9706 | p != NULL_RTX; |
a2be868f | 9707 | p = next_insn_in_loop (loop, p)) |
328f4006 BS |
9708 | { |
9709 | if (GET_CODE (p) == CODE_LABEL) | |
9710 | maybe_never = 1; | |
9711 | else if (GET_CODE (p) == JUMP_INSN | |
9712 | /* If we enter the loop in the middle, and scan | |
9713 | around to the beginning, don't set maybe_never | |
9714 | for that. This must be an unconditional jump, | |
9715 | otherwise the code at the top of the loop might | |
9716 | never be executed. Unconditional jumps are | |
9717 | followed a by barrier then loop end. */ | |
e6fcb60d | 9718 | && ! (GET_CODE (p) == JUMP_INSN |
a2be868f MH |
9719 | && JUMP_LABEL (p) == loop->top |
9720 | && NEXT_INSN (NEXT_INSN (p)) == loop->end | |
7f1c097d | 9721 | && any_uncondjump_p (p))) |
41a972a9 | 9722 | { |
48c4d691 JJ |
9723 | /* If this is a jump outside of the loop but not right |
9724 | after the end of the loop, we would have to emit new fixup | |
9725 | sequences for each such label. */ | |
13c502cd MM |
9726 | if (/* If we can't tell where control might go when this |
9727 | JUMP_INSN is executed, we must be conservative. */ | |
9728 | !JUMP_LABEL (p) | |
9729 | || (JUMP_LABEL (p) != end_label | |
9730 | && (INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop | |
9731 | || INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop->start) | |
9732 | || INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop->end)))) | |
48c4d691 JJ |
9733 | return; |
9734 | ||
7f1c097d | 9735 | if (!any_condjump_p (p)) |
328f4006 | 9736 | /* Something complicated. */ |
41a972a9 | 9737 | maybe_never = 1; |
328f4006 BS |
9738 | else |
9739 | /* If there are any more instructions in the loop, they | |
9740 | might not be reached. */ | |
e6fcb60d KH |
9741 | next_maybe_never = 1; |
9742 | } | |
328f4006 BS |
9743 | else if (next_maybe_never) |
9744 | maybe_never = 1; | |
9745 | } | |
9746 | ||
48c4d691 JJ |
9747 | /* Find start of the extended basic block that enters the loop. */ |
9748 | for (p = loop->start; | |
9749 | PREV_INSN (p) && GET_CODE (p) != CODE_LABEL; | |
9750 | p = PREV_INSN (p)) | |
9751 | ; | |
1757e774 | 9752 | prev_ebb_head = p; |
48c4d691 JJ |
9753 | |
9754 | cselib_init (); | |
9755 | ||
9756 | /* Build table of mems that get set to constant values before the | |
9757 | loop. */ | |
9758 | for (; p != loop->start; p = NEXT_INSN (p)) | |
9759 | cselib_process_insn (p); | |
9760 | ||
328f4006 | 9761 | /* Actually move the MEMs. */ |
afa1738b | 9762 | for (i = 0; i < loop_info->mems_idx; ++i) |
328f4006 | 9763 | { |
d2335c24 MH |
9764 | regset_head load_copies; |
9765 | regset_head store_copies; | |
328f4006 BS |
9766 | int written = 0; |
9767 | rtx reg; | |
afa1738b | 9768 | rtx mem = loop_info->mems[i].mem; |
328f4006 | 9769 | rtx mem_list_entry; |
41a972a9 | 9770 | |
e6fcb60d | 9771 | if (MEM_VOLATILE_P (mem) |
0534b804 | 9772 | || loop_invariant_p (loop, XEXP (mem, 0)) != 1) |
328f4006 | 9773 | /* There's no telling whether or not MEM is modified. */ |
afa1738b | 9774 | loop_info->mems[i].optimize = 0; |
328f4006 BS |
9775 | |
9776 | /* Go through the MEMs written to in the loop to see if this | |
9777 | one is aliased by one of them. */ | |
afa1738b | 9778 | mem_list_entry = loop_info->store_mems; |
328f4006 | 9779 | while (mem_list_entry) |
41a972a9 | 9780 | { |
328f4006 BS |
9781 | if (rtx_equal_p (mem, XEXP (mem_list_entry, 0))) |
9782 | written = 1; | |
9783 | else if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode, | |
9784 | mem, rtx_varies_p)) | |
41a972a9 | 9785 | { |
328f4006 | 9786 | /* MEM is indeed aliased by this store. */ |
afa1738b | 9787 | loop_info->mems[i].optimize = 0; |
328f4006 | 9788 | break; |
41a972a9 | 9789 | } |
328f4006 BS |
9790 | mem_list_entry = XEXP (mem_list_entry, 1); |
9791 | } | |
f0b60c1c SM |
9792 | |
9793 | if (flag_float_store && written | |
9794 | && GET_MODE_CLASS (GET_MODE (mem)) == MODE_FLOAT) | |
afa1738b | 9795 | loop_info->mems[i].optimize = 0; |
e6fcb60d | 9796 | |
328f4006 | 9797 | /* If this MEM is written to, we must be sure that there |
e6fcb60d | 9798 | are no reads from another MEM that aliases this one. */ |
afa1738b | 9799 | if (loop_info->mems[i].optimize && written) |
328f4006 BS |
9800 | { |
9801 | int j; | |
41a972a9 | 9802 | |
afa1738b | 9803 | for (j = 0; j < loop_info->mems_idx; ++j) |
328f4006 BS |
9804 | { |
9805 | if (j == i) | |
9806 | continue; | |
9807 | else if (true_dependence (mem, | |
9808 | VOIDmode, | |
afa1738b | 9809 | loop_info->mems[j].mem, |
328f4006 | 9810 | rtx_varies_p)) |
41a972a9 | 9811 | { |
afa1738b | 9812 | /* It's not safe to hoist loop_info->mems[i] out of |
328f4006 | 9813 | the loop because writes to it might not be |
afa1738b MH |
9814 | seen by reads from loop_info->mems[j]. */ |
9815 | loop_info->mems[i].optimize = 0; | |
328f4006 | 9816 | break; |
41a972a9 MM |
9817 | } |
9818 | } | |
328f4006 | 9819 | } |
41a972a9 | 9820 | |
328f4006 BS |
9821 | if (maybe_never && may_trap_p (mem)) |
9822 | /* We can't access the MEM outside the loop; it might | |
9823 | cause a trap that wouldn't have happened otherwise. */ | |
afa1738b | 9824 | loop_info->mems[i].optimize = 0; |
e6fcb60d | 9825 | |
afa1738b | 9826 | if (!loop_info->mems[i].optimize) |
328f4006 BS |
9827 | /* We thought we were going to lift this MEM out of the |
9828 | loop, but later discovered that we could not. */ | |
9829 | continue; | |
41a972a9 | 9830 | |
d2335c24 MH |
9831 | INIT_REG_SET (&load_copies); |
9832 | INIT_REG_SET (&store_copies); | |
c29f60c0 | 9833 | |
328f4006 BS |
9834 | /* Allocate a pseudo for this MEM. We set REG_USERVAR_P in |
9835 | order to keep scan_loop from moving stores to this MEM | |
9836 | out of the loop just because this REG is neither a | |
9837 | user-variable nor used in the loop test. */ | |
9838 | reg = gen_reg_rtx (GET_MODE (mem)); | |
9839 | REG_USERVAR_P (reg) = 1; | |
afa1738b | 9840 | loop_info->mems[i].reg = reg; |
328f4006 BS |
9841 | |
9842 | /* Now, replace all references to the MEM with the | |
1757e774 | 9843 | corresponding pseudos. */ |
c29f60c0 | 9844 | maybe_never = 0; |
a2be868f | 9845 | for (p = next_insn_in_loop (loop, loop->scan_start); |
328f4006 | 9846 | p != NULL_RTX; |
a2be868f | 9847 | p = next_insn_in_loop (loop, p)) |
328f4006 | 9848 | { |
2c3c49de | 9849 | if (INSN_P (p)) |
c29f60c0 | 9850 | { |
d2335c24 MH |
9851 | rtx set; |
9852 | ||
9853 | set = single_set (p); | |
9854 | ||
c29f60c0 BS |
9855 | /* See if this copies the mem into a register that isn't |
9856 | modified afterwards. We'll try to do copy propagation | |
9857 | a little further on. */ | |
c29f60c0 BS |
9858 | if (set |
9859 | /* @@@ This test is _way_ too conservative. */ | |
9860 | && ! maybe_never | |
9861 | && GET_CODE (SET_DEST (set)) == REG | |
9862 | && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER | |
9863 | && REGNO (SET_DEST (set)) < last_max_reg | |
f1d4ac80 | 9864 | && regs->array[REGNO (SET_DEST (set))].n_times_set == 1 |
d2335c24 MH |
9865 | && rtx_equal_p (SET_SRC (set), mem)) |
9866 | SET_REGNO_REG_SET (&load_copies, REGNO (SET_DEST (set))); | |
9867 | ||
fd5d5b07 | 9868 | /* See if this copies the mem from a register that isn't |
d2335c24 MH |
9869 | modified afterwards. We'll try to remove the |
9870 | redundant copy later on by doing a little register | |
9871 | renaming and copy propagation. This will help | |
9872 | to untangle things for the BIV detection code. */ | |
fd5d5b07 KH |
9873 | if (set |
9874 | && ! maybe_never | |
9875 | && GET_CODE (SET_SRC (set)) == REG | |
9876 | && REGNO (SET_SRC (set)) >= FIRST_PSEUDO_REGISTER | |
9877 | && REGNO (SET_SRC (set)) < last_max_reg | |
f1d4ac80 | 9878 | && regs->array[REGNO (SET_SRC (set))].n_times_set == 1 |
fd5d5b07 KH |
9879 | && rtx_equal_p (SET_DEST (set), mem)) |
9880 | SET_REGNO_REG_SET (&store_copies, REGNO (SET_SRC (set))); | |
9881 | ||
fcae219a R |
9882 | /* If this is a call which uses / clobbers this memory |
9883 | location, we must not change the interface here. */ | |
9884 | if (GET_CODE (p) == CALL_INSN | |
9885 | && reg_mentioned_p (loop_info->mems[i].mem, | |
9886 | CALL_INSN_FUNCTION_USAGE (p))) | |
9887 | { | |
9888 | cancel_changes (0); | |
9889 | loop_info->mems[i].optimize = 0; | |
9890 | break; | |
9891 | } | |
9892 | else | |
9893 | /* Replace the memory reference with the shadow register. */ | |
9894 | replace_loop_mems (p, loop_info->mems[i].mem, | |
e81eb37f | 9895 | loop_info->mems[i].reg, written); |
c29f60c0 BS |
9896 | } |
9897 | ||
9898 | if (GET_CODE (p) == CODE_LABEL | |
9899 | || GET_CODE (p) == JUMP_INSN) | |
9900 | maybe_never = 1; | |
328f4006 | 9901 | } |
41a972a9 | 9902 | |
fcae219a R |
9903 | if (! loop_info->mems[i].optimize) |
9904 | ; /* We found we couldn't do the replacement, so do nothing. */ | |
9905 | else if (! apply_change_group ()) | |
328f4006 | 9906 | /* We couldn't replace all occurrences of the MEM. */ |
afa1738b | 9907 | loop_info->mems[i].optimize = 0; |
328f4006 BS |
9908 | else |
9909 | { | |
0534b804 | 9910 | /* Load the memory immediately before LOOP->START, which is |
328f4006 | 9911 | the NOTE_LOOP_BEG. */ |
eab5c70a BS |
9912 | cselib_val *e = cselib_lookup (mem, VOIDmode, 0); |
9913 | rtx set; | |
9914 | rtx best = mem; | |
9915 | int j; | |
9916 | struct elt_loc_list *const_equiv = 0; | |
9917 | ||
9918 | if (e) | |
9919 | { | |
9920 | struct elt_loc_list *equiv; | |
9921 | struct elt_loc_list *best_equiv = 0; | |
9922 | for (equiv = e->locs; equiv; equiv = equiv->next) | |
9923 | { | |
9924 | if (CONSTANT_P (equiv->loc)) | |
9925 | const_equiv = equiv; | |
28b6b9b2 | 9926 | else if (GET_CODE (equiv->loc) == REG |
1757e774 | 9927 | /* Extending hard register lifetimes causes crash |
28b6b9b2 JH |
9928 | on SRC targets. Doing so on non-SRC is |
9929 | probably also not good idea, since we most | |
9930 | probably have pseudoregister equivalence as | |
9931 | well. */ | |
9932 | && REGNO (equiv->loc) >= FIRST_PSEUDO_REGISTER) | |
eab5c70a BS |
9933 | best_equiv = equiv; |
9934 | } | |
9935 | /* Use the constant equivalence if that is cheap enough. */ | |
9936 | if (! best_equiv) | |
9937 | best_equiv = const_equiv; | |
9938 | else if (const_equiv | |
9939 | && (rtx_cost (const_equiv->loc, SET) | |
9940 | <= rtx_cost (best_equiv->loc, SET))) | |
9941 | { | |
9942 | best_equiv = const_equiv; | |
9943 | const_equiv = 0; | |
9944 | } | |
9945 | ||
9946 | /* If best_equiv is nonzero, we know that MEM is set to a | |
9947 | constant or register before the loop. We will use this | |
9948 | knowledge to initialize the shadow register with that | |
9949 | constant or reg rather than by loading from MEM. */ | |
9950 | if (best_equiv) | |
9951 | best = copy_rtx (best_equiv->loc); | |
9952 | } | |
1757e774 | 9953 | |
eab5c70a | 9954 | set = gen_move_insn (reg, best); |
804a718a | 9955 | set = loop_insn_hoist (loop, set); |
1757e774 BS |
9956 | if (REG_P (best)) |
9957 | { | |
9958 | for (p = prev_ebb_head; p != loop->start; p = NEXT_INSN (p)) | |
9959 | if (REGNO_LAST_UID (REGNO (best)) == INSN_UID (p)) | |
9960 | { | |
9961 | REGNO_LAST_UID (REGNO (best)) = INSN_UID (set); | |
9962 | break; | |
9963 | } | |
9964 | } | |
9965 | ||
eab5c70a | 9966 | if (const_equiv) |
3d238248 | 9967 | set_unique_reg_note (set, REG_EQUAL, copy_rtx (const_equiv->loc)); |
41a972a9 | 9968 | |
328f4006 BS |
9969 | if (written) |
9970 | { | |
9971 | if (label == NULL_RTX) | |
41a972a9 | 9972 | { |
328f4006 | 9973 | label = gen_label_rtx (); |
a2be868f | 9974 | emit_label_after (label, loop->end); |
41a972a9 MM |
9975 | } |
9976 | ||
328f4006 BS |
9977 | /* Store the memory immediately after END, which is |
9978 | the NOTE_LOOP_END. */ | |
e6fcb60d | 9979 | set = gen_move_insn (copy_rtx (mem), reg); |
86e21212 | 9980 | loop_insn_emit_after (loop, 0, label, set); |
328f4006 BS |
9981 | } |
9982 | ||
9983 | if (loop_dump_stream) | |
9984 | { | |
9985 | fprintf (loop_dump_stream, "Hoisted regno %d %s from ", | |
9986 | REGNO (reg), (written ? "r/w" : "r/o")); | |
9987 | print_rtl (loop_dump_stream, mem); | |
9988 | fputc ('\n', loop_dump_stream); | |
41a972a9 | 9989 | } |
c29f60c0 BS |
9990 | |
9991 | /* Attempt a bit of copy propagation. This helps untangle the | |
9992 | data flow, and enables {basic,general}_induction_var to find | |
9993 | more bivs/givs. */ | |
9994 | EXECUTE_IF_SET_IN_REG_SET | |
d2335c24 | 9995 | (&load_copies, FIRST_PSEUDO_REGISTER, j, |
c29f60c0 | 9996 | { |
d2335c24 | 9997 | try_copy_prop (loop, reg, j); |
c29f60c0 | 9998 | }); |
d2335c24 MH |
9999 | CLEAR_REG_SET (&load_copies); |
10000 | ||
10001 | EXECUTE_IF_SET_IN_REG_SET | |
10002 | (&store_copies, FIRST_PSEUDO_REGISTER, j, | |
10003 | { | |
10004 | try_swap_copy_prop (loop, reg, j); | |
10005 | }); | |
10006 | CLEAR_REG_SET (&store_copies); | |
41a972a9 MM |
10007 | } |
10008 | } | |
10009 | ||
48c4d691 | 10010 | if (label != NULL_RTX && end_label != NULL_RTX) |
41a972a9 MM |
10011 | { |
10012 | /* Now, we need to replace all references to the previous exit | |
10013 | label with the new one. */ | |
4af16369 | 10014 | replace_label_data rr; |
59d4e481 KGA |
10015 | rr.r1 = end_label; |
10016 | rr.r2 = label; | |
4af16369 | 10017 | rr.update_label_nuses = true; |
41a972a9 | 10018 | |
a2be868f | 10019 | for (p = loop->start; p != loop->end; p = NEXT_INSN (p)) |
7940acc4 JW |
10020 | { |
10021 | for_each_rtx (&p, replace_label, &rr); | |
7940acc4 | 10022 | } |
41a972a9 | 10023 | } |
eab5c70a BS |
10024 | |
10025 | cselib_finish (); | |
41a972a9 MM |
10026 | } |
10027 | ||
8571e492 BS |
10028 | /* For communication between note_reg_stored and its caller. */ |
10029 | struct note_reg_stored_arg | |
10030 | { | |
10031 | int set_seen; | |
10032 | rtx reg; | |
10033 | }; | |
10034 | ||
10035 | /* Called via note_stores, record in SET_SEEN whether X, which is written, | |
10036 | is equal to ARG. */ | |
10037 | static void | |
0c20a65f | 10038 | note_reg_stored (rtx x, rtx setter ATTRIBUTE_UNUSED, void *arg) |
8571e492 | 10039 | { |
e6fcb60d | 10040 | struct note_reg_stored_arg *t = (struct note_reg_stored_arg *) arg; |
8571e492 BS |
10041 | if (t->reg == x) |
10042 | t->set_seen = 1; | |
10043 | } | |
10044 | ||
c29f60c0 BS |
10045 | /* Try to replace every occurrence of pseudo REGNO with REPLACEMENT. |
10046 | There must be exactly one insn that sets this pseudo; it will be | |
10047 | deleted if all replacements succeed and we can prove that the register | |
0534b804 | 10048 | is not used after the loop. */ |
770ae6cc | 10049 | |
c29f60c0 | 10050 | static void |
0c20a65f | 10051 | try_copy_prop (const struct loop *loop, rtx replacement, unsigned int regno) |
c29f60c0 | 10052 | { |
8571e492 BS |
10053 | /* This is the reg that we are copying from. */ |
10054 | rtx reg_rtx = regno_reg_rtx[regno]; | |
c29f60c0 BS |
10055 | rtx init_insn = 0; |
10056 | rtx insn; | |
8571e492 BS |
10057 | /* These help keep track of whether we replaced all uses of the reg. */ |
10058 | int replaced_last = 0; | |
10059 | int store_is_first = 0; | |
10060 | ||
a2be868f | 10061 | for (insn = next_insn_in_loop (loop, loop->scan_start); |
c29f60c0 | 10062 | insn != NULL_RTX; |
a2be868f | 10063 | insn = next_insn_in_loop (loop, insn)) |
c29f60c0 BS |
10064 | { |
10065 | rtx set; | |
d42971c4 | 10066 | |
8571e492 BS |
10067 | /* Only substitute within one extended basic block from the initializing |
10068 | insn. */ | |
10069 | if (GET_CODE (insn) == CODE_LABEL && init_insn) | |
10070 | break; | |
d42971c4 | 10071 | |
2c3c49de | 10072 | if (! INSN_P (insn)) |
c29f60c0 | 10073 | continue; |
8571e492 BS |
10074 | |
10075 | /* Is this the initializing insn? */ | |
c29f60c0 BS |
10076 | set = single_set (insn); |
10077 | if (set | |
10078 | && GET_CODE (SET_DEST (set)) == REG | |
10079 | && REGNO (SET_DEST (set)) == regno) | |
10080 | { | |
10081 | if (init_insn) | |
10082 | abort (); | |
8571e492 | 10083 | |
c29f60c0 | 10084 | init_insn = insn; |
8571e492 BS |
10085 | if (REGNO_FIRST_UID (regno) == INSN_UID (insn)) |
10086 | store_is_first = 1; | |
10087 | } | |
10088 | ||
10089 | /* Only substitute after seeing the initializing insn. */ | |
10090 | if (init_insn && insn != init_insn) | |
e6fcb60d | 10091 | { |
8571e492 | 10092 | struct note_reg_stored_arg arg; |
8571e492 | 10093 | |
afa1738b | 10094 | replace_loop_regs (insn, reg_rtx, replacement); |
8571e492 BS |
10095 | if (REGNO_LAST_UID (regno) == INSN_UID (insn)) |
10096 | replaced_last = 1; | |
10097 | ||
10098 | /* Stop replacing when REPLACEMENT is modified. */ | |
10099 | arg.reg = replacement; | |
10100 | arg.set_seen = 0; | |
10101 | note_stores (PATTERN (insn), note_reg_stored, &arg); | |
10102 | if (arg.set_seen) | |
f1330226 JH |
10103 | { |
10104 | rtx note = find_reg_note (insn, REG_EQUAL, NULL); | |
10105 | ||
10106 | /* It is possible that we've turned previously valid REG_EQUAL to | |
10107 | invalid, as we change the REGNO to REPLACEMENT and unlike REGNO, | |
10108 | REPLACEMENT is modified, we get different meaning. */ | |
10109 | if (note && reg_mentioned_p (replacement, XEXP (note, 0))) | |
10110 | remove_note (insn, note); | |
10111 | break; | |
10112 | } | |
c29f60c0 | 10113 | } |
c29f60c0 BS |
10114 | } |
10115 | if (! init_insn) | |
10116 | abort (); | |
10117 | if (apply_change_group ()) | |
10118 | { | |
8571e492 BS |
10119 | if (loop_dump_stream) |
10120 | fprintf (loop_dump_stream, " Replaced reg %d", regno); | |
10121 | if (store_is_first && replaced_last) | |
c29f60c0 | 10122 | { |
e8c8470b MM |
10123 | rtx first; |
10124 | rtx retval_note; | |
10125 | ||
10126 | /* Assume we're just deleting INIT_INSN. */ | |
10127 | first = init_insn; | |
10128 | /* Look for REG_RETVAL note. If we're deleting the end of | |
10129 | the libcall sequence, the whole sequence can go. */ | |
10130 | retval_note = find_reg_note (init_insn, REG_RETVAL, NULL_RTX); | |
10131 | /* If we found a REG_RETVAL note, find the first instruction | |
10132 | in the sequence. */ | |
10133 | if (retval_note) | |
10134 | first = XEXP (retval_note, 0); | |
10135 | ||
10136 | /* Delete the instructions. */ | |
10137 | loop_delete_insns (first, init_insn); | |
c29f60c0 BS |
10138 | } |
10139 | if (loop_dump_stream) | |
8571e492 | 10140 | fprintf (loop_dump_stream, ".\n"); |
c29f60c0 BS |
10141 | } |
10142 | } | |
10143 | ||
e8c8470b MM |
10144 | /* Replace all the instructions from FIRST up to and including LAST |
10145 | with NOTE_INSN_DELETED notes. */ | |
10146 | ||
10147 | static void | |
0c20a65f | 10148 | loop_delete_insns (rtx first, rtx last) |
e8c8470b MM |
10149 | { |
10150 | while (1) | |
10151 | { | |
e8c8470b MM |
10152 | if (loop_dump_stream) |
10153 | fprintf (loop_dump_stream, ", deleting init_insn (%d)", | |
10154 | INSN_UID (first)); | |
ca6c03ca | 10155 | delete_insn (first); |
e8c8470b MM |
10156 | |
10157 | /* If this was the LAST instructions we're supposed to delete, | |
10158 | we're done. */ | |
10159 | if (first == last) | |
10160 | break; | |
10161 | ||
10162 | first = NEXT_INSN (first); | |
10163 | } | |
10164 | } | |
10165 | ||
d2335c24 MH |
10166 | /* Try to replace occurrences of pseudo REGNO with REPLACEMENT within |
10167 | loop LOOP if the order of the sets of these registers can be | |
10168 | swapped. There must be exactly one insn within the loop that sets | |
10169 | this pseudo followed immediately by a move insn that sets | |
10170 | REPLACEMENT with REGNO. */ | |
10171 | static void | |
0c20a65f AJ |
10172 | try_swap_copy_prop (const struct loop *loop, rtx replacement, |
10173 | unsigned int regno) | |
d2335c24 MH |
10174 | { |
10175 | rtx insn; | |
616fde53 | 10176 | rtx set = NULL_RTX; |
d2335c24 MH |
10177 | unsigned int new_regno; |
10178 | ||
10179 | new_regno = REGNO (replacement); | |
10180 | ||
10181 | for (insn = next_insn_in_loop (loop, loop->scan_start); | |
10182 | insn != NULL_RTX; | |
10183 | insn = next_insn_in_loop (loop, insn)) | |
10184 | { | |
10185 | /* Search for the insn that copies REGNO to NEW_REGNO? */ | |
616fde53 | 10186 | if (INSN_P (insn) |
d2335c24 MH |
10187 | && (set = single_set (insn)) |
10188 | && GET_CODE (SET_DEST (set)) == REG | |
10189 | && REGNO (SET_DEST (set)) == new_regno | |
10190 | && GET_CODE (SET_SRC (set)) == REG | |
10191 | && REGNO (SET_SRC (set)) == regno) | |
10192 | break; | |
10193 | } | |
10194 | ||
0ef52662 | 10195 | if (insn != NULL_RTX) |
d2335c24 MH |
10196 | { |
10197 | rtx prev_insn; | |
10198 | rtx prev_set; | |
fd5d5b07 | 10199 | |
d2335c24 MH |
10200 | /* Some DEF-USE info would come in handy here to make this |
10201 | function more general. For now, just check the previous insn | |
10202 | which is the most likely candidate for setting REGNO. */ | |
fd5d5b07 | 10203 | |
d2335c24 | 10204 | prev_insn = PREV_INSN (insn); |
fd5d5b07 | 10205 | |
616fde53 | 10206 | if (INSN_P (insn) |
d2335c24 MH |
10207 | && (prev_set = single_set (prev_insn)) |
10208 | && GET_CODE (SET_DEST (prev_set)) == REG | |
10209 | && REGNO (SET_DEST (prev_set)) == regno) | |
10210 | { | |
10211 | /* We have: | |
10212 | (set (reg regno) (expr)) | |
10213 | (set (reg new_regno) (reg regno)) | |
fd5d5b07 | 10214 | |
d2335c24 MH |
10215 | so try converting this to: |
10216 | (set (reg new_regno) (expr)) | |
10217 | (set (reg regno) (reg new_regno)) | |
10218 | ||
10219 | The former construct is often generated when a global | |
10220 | variable used for an induction variable is shadowed by a | |
10221 | register (NEW_REGNO). The latter construct improves the | |
10222 | chances of GIV replacement and BIV elimination. */ | |
10223 | ||
10224 | validate_change (prev_insn, &SET_DEST (prev_set), | |
10225 | replacement, 1); | |
10226 | validate_change (insn, &SET_DEST (set), | |
10227 | SET_SRC (set), 1); | |
10228 | validate_change (insn, &SET_SRC (set), | |
10229 | replacement, 1); | |
10230 | ||
10231 | if (apply_change_group ()) | |
10232 | { | |
10233 | if (loop_dump_stream) | |
fd5d5b07 KH |
10234 | fprintf (loop_dump_stream, |
10235 | " Swapped set of reg %d at %d with reg %d at %d.\n", | |
10236 | regno, INSN_UID (insn), | |
d2335c24 MH |
10237 | new_regno, INSN_UID (prev_insn)); |
10238 | ||
10239 | /* Update first use of REGNO. */ | |
10240 | if (REGNO_FIRST_UID (regno) == INSN_UID (prev_insn)) | |
10241 | REGNO_FIRST_UID (regno) = INSN_UID (insn); | |
10242 | ||
10243 | /* Now perform copy propagation to hopefully | |
10244 | remove all uses of REGNO within the loop. */ | |
10245 | try_copy_prop (loop, replacement, regno); | |
10246 | } | |
10247 | } | |
10248 | } | |
10249 | } | |
10250 | ||
00dcfe80 AM |
10251 | /* Worker function for find_mem_in_note, called via for_each_rtx. */ |
10252 | ||
e81eb37f | 10253 | static int |
0c20a65f | 10254 | find_mem_in_note_1 (rtx *x, void *data) |
e81eb37f AM |
10255 | { |
10256 | if (*x != NULL_RTX && GET_CODE (*x) == MEM) | |
10257 | { | |
10258 | rtx *res = (rtx *) data; | |
10259 | *res = *x; | |
10260 | return 1; | |
10261 | } | |
10262 | return 0; | |
10263 | } | |
10264 | ||
00dcfe80 AM |
10265 | /* Returns the first MEM found in NOTE by depth-first search. */ |
10266 | ||
e81eb37f | 10267 | static rtx |
0c20a65f | 10268 | find_mem_in_note (rtx note) |
e81eb37f AM |
10269 | { |
10270 | if (note && for_each_rtx (¬e, find_mem_in_note_1, ¬e)) | |
10271 | return note; | |
10272 | return NULL_RTX; | |
10273 | } | |
0c20a65f | 10274 | |
41a972a9 | 10275 | /* Replace MEM with its associated pseudo register. This function is |
afa1738b MH |
10276 | called from load_mems via for_each_rtx. DATA is actually a pointer |
10277 | to a structure describing the instruction currently being scanned | |
41a972a9 MM |
10278 | and the MEM we are currently replacing. */ |
10279 | ||
10280 | static int | |
0c20a65f | 10281 | replace_loop_mem (rtx *mem, void *data) |
41a972a9 | 10282 | { |
fd5d5b07 | 10283 | loop_replace_args *args = (loop_replace_args *) data; |
41a972a9 MM |
10284 | rtx m = *mem; |
10285 | ||
10286 | if (m == NULL_RTX) | |
10287 | return 0; | |
10288 | ||
10289 | switch (GET_CODE (m)) | |
10290 | { | |
10291 | case MEM: | |
10292 | break; | |
10293 | ||
10294 | case CONST_DOUBLE: | |
10295 | /* We're not interested in the MEM associated with a | |
10296 | CONST_DOUBLE, so there's no need to traverse into one. */ | |
10297 | return -1; | |
10298 | ||
10299 | default: | |
10300 | /* This is not a MEM. */ | |
10301 | return 0; | |
10302 | } | |
10303 | ||
afa1738b | 10304 | if (!rtx_equal_p (args->match, m)) |
41a972a9 MM |
10305 | /* This is not the MEM we are currently replacing. */ |
10306 | return 0; | |
10307 | ||
41a972a9 | 10308 | /* Actually replace the MEM. */ |
afa1738b | 10309 | validate_change (args->insn, mem, args->replacement, 1); |
41a972a9 MM |
10310 | |
10311 | return 0; | |
10312 | } | |
10313 | ||
afa1738b | 10314 | static void |
0c20a65f | 10315 | replace_loop_mems (rtx insn, rtx mem, rtx reg, int written) |
fd5d5b07 | 10316 | { |
afa1738b MH |
10317 | loop_replace_args args; |
10318 | ||
10319 | args.insn = insn; | |
10320 | args.match = mem; | |
10321 | args.replacement = reg; | |
10322 | ||
10323 | for_each_rtx (&insn, replace_loop_mem, &args); | |
e81eb37f AM |
10324 | |
10325 | /* If we hoist a mem write out of the loop, then REG_EQUAL | |
10326 | notes referring to the mem are no longer valid. */ | |
10327 | if (written) | |
10328 | { | |
10329 | rtx note, sub; | |
10330 | rtx *link; | |
10331 | ||
10332 | for (link = ®_NOTES (insn); (note = *link); link = &XEXP (note, 1)) | |
10333 | { | |
10334 | if (REG_NOTE_KIND (note) == REG_EQUAL | |
10335 | && (sub = find_mem_in_note (note)) | |
10336 | && true_dependence (mem, VOIDmode, sub, rtx_varies_p)) | |
10337 | { | |
10338 | /* Remove the note. */ | |
10339 | validate_change (NULL_RTX, link, XEXP (note, 1), 1); | |
10340 | break; | |
10341 | } | |
10342 | } | |
10343 | } | |
afa1738b MH |
10344 | } |
10345 | ||
c29f60c0 | 10346 | /* Replace one register with another. Called through for_each_rtx; PX points |
fd5d5b07 | 10347 | to the rtx being scanned. DATA is actually a pointer to |
afa1738b | 10348 | a structure of arguments. */ |
c29f60c0 BS |
10349 | |
10350 | static int | |
0c20a65f | 10351 | replace_loop_reg (rtx *px, void *data) |
c29f60c0 BS |
10352 | { |
10353 | rtx x = *px; | |
fd5d5b07 | 10354 | loop_replace_args *args = (loop_replace_args *) data; |
c29f60c0 BS |
10355 | |
10356 | if (x == NULL_RTX) | |
10357 | return 0; | |
10358 | ||
afa1738b MH |
10359 | if (x == args->match) |
10360 | validate_change (args->insn, px, args->replacement, 1); | |
c29f60c0 BS |
10361 | |
10362 | return 0; | |
10363 | } | |
10364 | ||
afa1738b | 10365 | static void |
0c20a65f | 10366 | replace_loop_regs (rtx insn, rtx reg, rtx replacement) |
afa1738b MH |
10367 | { |
10368 | loop_replace_args args; | |
10369 | ||
10370 | args.insn = insn; | |
10371 | args.match = reg; | |
10372 | args.replacement = replacement; | |
10373 | ||
10374 | for_each_rtx (&insn, replace_loop_reg, &args); | |
10375 | } | |
6057c0e6 | 10376 | \f |
96a45535 MH |
10377 | /* Emit insn for PATTERN after WHERE_INSN in basic block WHERE_BB |
10378 | (ignored in the interim). */ | |
10379 | ||
10380 | static rtx | |
0c20a65f AJ |
10381 | loop_insn_emit_after (const struct loop *loop ATTRIBUTE_UNUSED, |
10382 | basic_block where_bb ATTRIBUTE_UNUSED, rtx where_insn, | |
10383 | rtx pattern) | |
96a45535 MH |
10384 | { |
10385 | return emit_insn_after (pattern, where_insn); | |
10386 | } | |
10387 | ||
10388 | ||
cc2902df | 10389 | /* If WHERE_INSN is nonzero emit insn for PATTERN before WHERE_INSN |
804a718a MH |
10390 | in basic block WHERE_BB (ignored in the interim) within the loop |
10391 | otherwise hoist PATTERN into the loop pre-header. */ | |
10392 | ||
86e21212 | 10393 | rtx |
0c20a65f AJ |
10394 | loop_insn_emit_before (const struct loop *loop, |
10395 | basic_block where_bb ATTRIBUTE_UNUSED, | |
10396 | rtx where_insn, rtx pattern) | |
804a718a MH |
10397 | { |
10398 | if (! where_insn) | |
10399 | return loop_insn_hoist (loop, pattern); | |
10400 | return emit_insn_before (pattern, where_insn); | |
10401 | } | |
10402 | ||
10403 | ||
86e21212 MH |
10404 | /* Emit call insn for PATTERN before WHERE_INSN in basic block |
10405 | WHERE_BB (ignored in the interim) within the loop. */ | |
10406 | ||
10407 | static rtx | |
0c20a65f AJ |
10408 | loop_call_insn_emit_before (const struct loop *loop ATTRIBUTE_UNUSED, |
10409 | basic_block where_bb ATTRIBUTE_UNUSED, | |
10410 | rtx where_insn, rtx pattern) | |
86e21212 MH |
10411 | { |
10412 | return emit_call_insn_before (pattern, where_insn); | |
10413 | } | |
10414 | ||
10415 | ||
804a718a MH |
10416 | /* Hoist insn for PATTERN into the loop pre-header. */ |
10417 | ||
10418 | rtx | |
0c20a65f | 10419 | loop_insn_hoist (const struct loop *loop, rtx pattern) |
804a718a MH |
10420 | { |
10421 | return loop_insn_emit_before (loop, 0, loop->start, pattern); | |
10422 | } | |
96a45535 MH |
10423 | |
10424 | ||
86e21212 MH |
10425 | /* Hoist call insn for PATTERN into the loop pre-header. */ |
10426 | ||
10427 | static rtx | |
0c20a65f | 10428 | loop_call_insn_hoist (const struct loop *loop, rtx pattern) |
86e21212 MH |
10429 | { |
10430 | return loop_call_insn_emit_before (loop, 0, loop->start, pattern); | |
10431 | } | |
10432 | ||
10433 | ||
96a45535 MH |
10434 | /* Sink insn for PATTERN after the loop end. */ |
10435 | ||
10436 | rtx | |
0c20a65f | 10437 | loop_insn_sink (const struct loop *loop, rtx pattern) |
96a45535 MH |
10438 | { |
10439 | return loop_insn_emit_before (loop, 0, loop->sink, pattern); | |
10440 | } | |
10441 | ||
e0bb17a8 | 10442 | /* bl->final_value can be either general_operand or PLUS of general_operand |
3d042e77 | 10443 | and constant. Emit sequence of instructions to load it into REG. */ |
74411039 | 10444 | static rtx |
0c20a65f | 10445 | gen_load_of_final_value (rtx reg, rtx final_value) |
74411039 JH |
10446 | { |
10447 | rtx seq; | |
10448 | start_sequence (); | |
10449 | final_value = force_operand (final_value, reg); | |
10450 | if (final_value != reg) | |
10451 | emit_move_insn (reg, final_value); | |
2f937369 | 10452 | seq = get_insns (); |
74411039 JH |
10453 | end_sequence (); |
10454 | return seq; | |
10455 | } | |
96a45535 MH |
10456 | |
10457 | /* If the loop has multiple exits, emit insn for PATTERN before the | |
10458 | loop to ensure that it will always be executed no matter how the | |
10459 | loop exits. Otherwise, emit the insn for PATTERN after the loop, | |
10460 | since this is slightly more efficient. */ | |
10461 | ||
10462 | static rtx | |
0c20a65f | 10463 | loop_insn_sink_or_swim (const struct loop *loop, rtx pattern) |
96a45535 MH |
10464 | { |
10465 | if (loop->exit_count) | |
10466 | return loop_insn_hoist (loop, pattern); | |
10467 | else | |
10468 | return loop_insn_sink (loop, pattern); | |
10469 | } | |
804a718a | 10470 | \f |
099f0f3f | 10471 | static void |
0c20a65f | 10472 | loop_ivs_dump (const struct loop *loop, FILE *file, int verbose) |
099f0f3f MH |
10473 | { |
10474 | struct iv_class *bl; | |
10475 | int iv_num = 0; | |
10476 | ||
10477 | if (! loop || ! file) | |
10478 | return; | |
10479 | ||
10480 | for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next) | |
10481 | iv_num++; | |
10482 | ||
10483 | fprintf (file, "Loop %d: %d IV classes\n", loop->num, iv_num); | |
10484 | ||
10485 | for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next) | |
10486 | { | |
10487 | loop_iv_class_dump (bl, file, verbose); | |
10488 | fputc ('\n', file); | |
10489 | } | |
10490 | } | |
10491 | ||
10492 | ||
10493 | static void | |
0c20a65f AJ |
10494 | loop_iv_class_dump (const struct iv_class *bl, FILE *file, |
10495 | int verbose ATTRIBUTE_UNUSED) | |
099f0f3f MH |
10496 | { |
10497 | struct induction *v; | |
10498 | rtx incr; | |
10499 | int i; | |
10500 | ||
10501 | if (! bl || ! file) | |
10502 | return; | |
10503 | ||
10504 | fprintf (file, "IV class for reg %d, benefit %d\n", | |
10505 | bl->regno, bl->total_benefit); | |
10506 | ||
10507 | fprintf (file, " Init insn %d", INSN_UID (bl->init_insn)); | |
10508 | if (bl->initial_value) | |
10509 | { | |
10510 | fprintf (file, ", init val: "); | |
10511 | print_simple_rtl (file, bl->initial_value); | |
10512 | } | |
10513 | if (bl->initial_test) | |
10514 | { | |
10515 | fprintf (file, ", init test: "); | |
10516 | print_simple_rtl (file, bl->initial_test); | |
10517 | } | |
10518 | fputc ('\n', file); | |
10519 | ||
10520 | if (bl->final_value) | |
10521 | { | |
10522 | fprintf (file, " Final val: "); | |
10523 | print_simple_rtl (file, bl->final_value); | |
10524 | fputc ('\n', file); | |
10525 | } | |
10526 | ||
10527 | if ((incr = biv_total_increment (bl))) | |
10528 | { | |
10529 | fprintf (file, " Total increment: "); | |
10530 | print_simple_rtl (file, incr); | |
10531 | fputc ('\n', file); | |
10532 | } | |
10533 | ||
10534 | /* List the increments. */ | |
10535 | for (i = 0, v = bl->biv; v; v = v->next_iv, i++) | |
10536 | { | |
10537 | fprintf (file, " Inc%d: insn %d, incr: ", i, INSN_UID (v->insn)); | |
10538 | print_simple_rtl (file, v->add_val); | |
10539 | fputc ('\n', file); | |
10540 | } | |
10541 | ||
10542 | /* List the givs. */ | |
10543 | for (i = 0, v = bl->giv; v; v = v->next_iv, i++) | |
10544 | { | |
6b8c9327 | 10545 | fprintf (file, " Giv%d: insn %d, benefit %d, ", |
099f0f3f MH |
10546 | i, INSN_UID (v->insn), v->benefit); |
10547 | if (v->giv_type == DEST_ADDR) | |
e11e816e | 10548 | print_simple_rtl (file, v->mem); |
099f0f3f | 10549 | else |
e11e816e | 10550 | print_simple_rtl (file, single_set (v->insn)); |
099f0f3f MH |
10551 | fputc ('\n', file); |
10552 | } | |
10553 | } | |
10554 | ||
10555 | ||
c804f3f8 | 10556 | static void |
0c20a65f | 10557 | loop_biv_dump (const struct induction *v, FILE *file, int verbose) |
c804f3f8 MH |
10558 | { |
10559 | if (! v || ! file) | |
10560 | return; | |
10561 | ||
10562 | fprintf (file, | |
10563 | "Biv %d: insn %d", | |
10564 | REGNO (v->dest_reg), INSN_UID (v->insn)); | |
10565 | fprintf (file, " const "); | |
10566 | print_simple_rtl (file, v->add_val); | |
10567 | ||
10568 | if (verbose && v->final_value) | |
10569 | { | |
6b8c9327 | 10570 | fputc ('\n', file); |
c804f3f8 MH |
10571 | fprintf (file, " final "); |
10572 | print_simple_rtl (file, v->final_value); | |
10573 | } | |
10574 | ||
10575 | fputc ('\n', file); | |
10576 | } | |
10577 | ||
10578 | ||
10579 | static void | |
0c20a65f | 10580 | loop_giv_dump (const struct induction *v, FILE *file, int verbose) |
c804f3f8 MH |
10581 | { |
10582 | if (! v || ! file) | |
10583 | return; | |
10584 | ||
10585 | if (v->giv_type == DEST_REG) | |
10586 | fprintf (file, "Giv %d: insn %d", | |
e11e816e | 10587 | REGNO (v->dest_reg), INSN_UID (v->insn)); |
c804f3f8 MH |
10588 | else |
10589 | fprintf (file, "Dest address: insn %d", | |
10590 | INSN_UID (v->insn)); | |
6b8c9327 | 10591 | |
c804f3f8 MH |
10592 | fprintf (file, " src reg %d benefit %d", |
10593 | REGNO (v->src_reg), v->benefit); | |
10594 | fprintf (file, " lifetime %d", | |
10595 | v->lifetime); | |
6b8c9327 | 10596 | |
c804f3f8 MH |
10597 | if (v->replaceable) |
10598 | fprintf (file, " replaceable"); | |
6b8c9327 | 10599 | |
c804f3f8 MH |
10600 | if (v->no_const_addval) |
10601 | fprintf (file, " ncav"); | |
6b8c9327 | 10602 | |
affd4f33 | 10603 | if (v->ext_dependent) |
c804f3f8 | 10604 | { |
affd4f33 | 10605 | switch (GET_CODE (v->ext_dependent)) |
c804f3f8 MH |
10606 | { |
10607 | case SIGN_EXTEND: | |
10608 | fprintf (file, " ext se"); | |
10609 | break; | |
10610 | case ZERO_EXTEND: | |
10611 | fprintf (file, " ext ze"); | |
10612 | break; | |
10613 | case TRUNCATE: | |
10614 | fprintf (file, " ext tr"); | |
505ddab6 | 10615 | break; |
c804f3f8 MH |
10616 | default: |
10617 | abort (); | |
10618 | } | |
10619 | } | |
10620 | ||
6b8c9327 | 10621 | fputc ('\n', file); |
c804f3f8 MH |
10622 | fprintf (file, " mult "); |
10623 | print_simple_rtl (file, v->mult_val); | |
10624 | ||
6b8c9327 | 10625 | fputc ('\n', file); |
c804f3f8 MH |
10626 | fprintf (file, " add "); |
10627 | print_simple_rtl (file, v->add_val); | |
10628 | ||
10629 | if (verbose && v->final_value) | |
10630 | { | |
6b8c9327 | 10631 | fputc ('\n', file); |
c804f3f8 MH |
10632 | fprintf (file, " final "); |
10633 | print_simple_rtl (file, v->final_value); | |
10634 | } | |
10635 | ||
6b8c9327 | 10636 | fputc ('\n', file); |
c804f3f8 MH |
10637 | } |
10638 | ||
10639 | ||
099f0f3f | 10640 | void |
0c20a65f | 10641 | debug_ivs (const struct loop *loop) |
099f0f3f MH |
10642 | { |
10643 | loop_ivs_dump (loop, stderr, 1); | |
10644 | } | |
10645 | ||
10646 | ||
10647 | void | |
0c20a65f | 10648 | debug_iv_class (const struct iv_class *bl) |
099f0f3f MH |
10649 | { |
10650 | loop_iv_class_dump (bl, stderr, 1); | |
10651 | } | |
10652 | ||
10653 | ||
c804f3f8 | 10654 | void |
0c20a65f | 10655 | debug_biv (const struct induction *v) |
c804f3f8 MH |
10656 | { |
10657 | loop_biv_dump (v, stderr, 1); | |
10658 | } | |
10659 | ||
10660 | ||
10661 | void | |
0c20a65f | 10662 | debug_giv (const struct induction *v) |
c804f3f8 MH |
10663 | { |
10664 | loop_giv_dump (v, stderr, 1); | |
10665 | } | |
10666 | ||
10667 | ||
6057c0e6 MH |
10668 | #define LOOP_BLOCK_NUM_1(INSN) \ |
10669 | ((INSN) ? (BLOCK_FOR_INSN (INSN) ? BLOCK_NUM (INSN) : - 1) : -1) | |
10670 | ||
10671 | /* The notes do not have an assigned block, so look at the next insn. */ | |
10672 | #define LOOP_BLOCK_NUM(INSN) \ | |
10673 | ((INSN) ? (GET_CODE (INSN) == NOTE \ | |
10674 | ? LOOP_BLOCK_NUM_1 (next_nonnote_insn (INSN)) \ | |
10675 | : LOOP_BLOCK_NUM_1 (INSN)) \ | |
10676 | : -1) | |
10677 | ||
10678 | #define LOOP_INSN_UID(INSN) ((INSN) ? INSN_UID (INSN) : -1) | |
10679 | ||
fd5d5b07 | 10680 | static void |
0c20a65f AJ |
10681 | loop_dump_aux (const struct loop *loop, FILE *file, |
10682 | int verbose ATTRIBUTE_UNUSED) | |
6057c0e6 MH |
10683 | { |
10684 | rtx label; | |
10685 | ||
10686 | if (! loop || ! file) | |
10687 | return; | |
10688 | ||
10689 | /* Print diagnostics to compare our concept of a loop with | |
10690 | what the loop notes say. */ | |
10691 | if (! PREV_INSN (loop->first->head) | |
10692 | || GET_CODE (PREV_INSN (loop->first->head)) != NOTE | |
10693 | || NOTE_LINE_NUMBER (PREV_INSN (loop->first->head)) | |
10694 | != NOTE_INSN_LOOP_BEG) | |
fd5d5b07 | 10695 | fprintf (file, ";; No NOTE_INSN_LOOP_BEG at %d\n", |
6057c0e6 MH |
10696 | INSN_UID (PREV_INSN (loop->first->head))); |
10697 | if (! NEXT_INSN (loop->last->end) | |
10698 | || GET_CODE (NEXT_INSN (loop->last->end)) != NOTE | |
10699 | || NOTE_LINE_NUMBER (NEXT_INSN (loop->last->end)) | |
10700 | != NOTE_INSN_LOOP_END) | |
10701 | fprintf (file, ";; No NOTE_INSN_LOOP_END at %d\n", | |
10702 | INSN_UID (NEXT_INSN (loop->last->end))); | |
10703 | ||
10704 | if (loop->start) | |
10705 | { | |
10706 | fprintf (file, | |
10707 | ";; start %d (%d), cont dom %d (%d), cont %d (%d), vtop %d (%d), end %d (%d)\n", | |
10708 | LOOP_BLOCK_NUM (loop->start), | |
10709 | LOOP_INSN_UID (loop->start), | |
10710 | LOOP_BLOCK_NUM (loop->cont), | |
10711 | LOOP_INSN_UID (loop->cont), | |
10712 | LOOP_BLOCK_NUM (loop->cont), | |
10713 | LOOP_INSN_UID (loop->cont), | |
10714 | LOOP_BLOCK_NUM (loop->vtop), | |
10715 | LOOP_INSN_UID (loop->vtop), | |
10716 | LOOP_BLOCK_NUM (loop->end), | |
10717 | LOOP_INSN_UID (loop->end)); | |
10718 | fprintf (file, ";; top %d (%d), scan start %d (%d)\n", | |
10719 | LOOP_BLOCK_NUM (loop->top), | |
fd5d5b07 | 10720 | LOOP_INSN_UID (loop->top), |
6057c0e6 MH |
10721 | LOOP_BLOCK_NUM (loop->scan_start), |
10722 | LOOP_INSN_UID (loop->scan_start)); | |
10723 | fprintf (file, ";; exit_count %d", loop->exit_count); | |
10724 | if (loop->exit_count) | |
10725 | { | |
10726 | fputs (", labels:", file); | |
10727 | for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label)) | |
10728 | { | |
10729 | fprintf (file, " %d ", | |
10730 | LOOP_INSN_UID (XEXP (label, 0))); | |
10731 | } | |
10732 | } | |
10733 | fputs ("\n", file); | |
fd5d5b07 | 10734 | |
6057c0e6 MH |
10735 | /* This can happen when a marked loop appears as two nested loops, |
10736 | say from while (a || b) {}. The inner loop won't match | |
10737 | the loop markers but the outer one will. */ | |
0b17ab2f | 10738 | if (LOOP_BLOCK_NUM (loop->cont) != loop->latch->index) |
6057c0e6 MH |
10739 | fprintf (file, ";; NOTE_INSN_LOOP_CONT not in loop latch\n"); |
10740 | } | |
10741 | } | |
6057c0e6 MH |
10742 | |
10743 | /* Call this function from the debugger to dump LOOP. */ | |
10744 | ||
10745 | void | |
0c20a65f | 10746 | debug_loop (const struct loop *loop) |
6057c0e6 MH |
10747 | { |
10748 | flow_loop_dump (loop, stderr, loop_dump_aux, 1); | |
10749 | } | |
685efa54 MH |
10750 | |
10751 | /* Call this function from the debugger to dump LOOPS. */ | |
10752 | ||
10753 | void | |
0c20a65f | 10754 | debug_loops (const struct loops *loops) |
685efa54 MH |
10755 | { |
10756 | flow_loops_dump (loops, stderr, loop_dump_aux, 1); | |
10757 | } |