]>
Commit | Line | Data |
---|---|---|
402209ff | 1 | /* Control flow optimization code for GNU compiler. |
23a5b65a | 2 | Copyright (C) 1987-2014 Free Software Foundation, Inc. |
402209ff JH |
3 | |
4 | This file is part of GCC. | |
5 | ||
6 | GCC is free software; you can redistribute it and/or modify it under | |
7 | the terms of the GNU General Public License as published by the Free | |
9dcd6f09 | 8 | Software Foundation; either version 3, or (at your option) any later |
402209ff JH |
9 | version. |
10 | ||
11 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |
12 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
13 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
14 | for more details. | |
15 | ||
16 | You should have received a copy of the GNU General Public License | |
9dcd6f09 NC |
17 | along with GCC; see the file COPYING3. If not see |
18 | <http://www.gnu.org/licenses/>. */ | |
402209ff | 19 | |
1ea7e6ad | 20 | /* This file contains optimizer of the control flow. The main entry point is |
402209ff JH |
21 | cleanup_cfg. Following optimizations are performed: |
22 | ||
23 | - Unreachable blocks removal | |
d1a6adeb | 24 | - Edge forwarding (edge to the forwarder block is forwarded to its |
eaec9b3d | 25 | successor. Simplification of the branch instruction is performed by |
402209ff | 26 | underlying infrastructure so branch can be converted to simplejump or |
f5143c46 | 27 | eliminated). |
402209ff JH |
28 | - Cross jumping (tail merging) |
29 | - Conditional jump-around-simplejump simplification | |
30 | - Basic block merging. */ | |
31 | ||
32 | #include "config.h" | |
33 | #include "system.h" | |
4977bab6 ZW |
34 | #include "coretypes.h" |
35 | #include "tm.h" | |
402209ff | 36 | #include "rtl.h" |
4d648807 | 37 | #include "tree.h" |
402209ff | 38 | #include "hard-reg-set.h" |
7932a3db | 39 | #include "regs.h" |
402209ff JH |
40 | #include "insn-config.h" |
41 | #include "flags.h" | |
42 | #include "recog.h" | |
718f9c0f | 43 | #include "diagnostic-core.h" |
8ecba28a | 44 | #include "cselib.h" |
5f24e0dc | 45 | #include "params.h" |
9f16e871 | 46 | #include "tm_p.h" |
e4ec2cac | 47 | #include "target.h" |
78bde837 | 48 | #include "function.h" /* For inline functions in emit-rtl.h they need crtl. */ |
78528714 | 49 | #include "emit-rtl.h" |
ef330312 PB |
50 | #include "tree-pass.h" |
51 | #include "cfgloop.h" | |
52 | #include "expr.h" | |
6fb5fa3c | 53 | #include "df.h" |
c1e3e2d9 | 54 | #include "dce.h" |
7d817ebc | 55 | #include "dbgcnt.h" |
96b3c03f | 56 | #include "emit-rtl.h" |
402209ff | 57 | |
2dd2d53e | 58 | #define FORWARDER_BLOCK_P(BB) ((BB)->flags & BB_FORWARDER_BLOCK) |
c22cacf3 | 59 | |
7cf240d5 JH |
60 | /* Set to true when we are running first pass of try_optimize_cfg loop. */ |
61 | static bool first_pass; | |
c1e3e2d9 | 62 | |
073a8998 | 63 | /* Set to true if crossjumps occurred in the latest run of try_optimize_cfg. */ |
c1e3e2d9 SB |
64 | static bool crossjumps_occured; |
65 | ||
4ec5d4f5 BS |
66 | /* Set to true if we couldn't run an optimization due to stale liveness |
67 | information; we should run df_analyze to enable more opportunities. */ | |
68 | static bool block_was_dirty; | |
69 | ||
bf22920b | 70 | static bool try_crossjump_to_edge (int, edge, edge, enum replace_direction); |
d329e058 | 71 | static bool try_crossjump_bb (int, basic_block); |
c2fc5456 | 72 | static bool outgoing_edges_match (int, basic_block, basic_block); |
472c95f5 | 73 | static enum replace_direction old_insns_match_p (int, rtx, rtx); |
d329e058 | 74 | |
d329e058 AJ |
75 | static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block); |
76 | static void merge_blocks_move_successor_nojumps (basic_block, basic_block); | |
d329e058 AJ |
77 | static bool try_optimize_cfg (int); |
78 | static bool try_simplify_condjump (basic_block); | |
79 | static bool try_forward_edges (int, basic_block); | |
6fb5fa3c | 80 | static edge thread_jump (edge, basic_block); |
d329e058 AJ |
81 | static bool mark_effect (rtx, bitmap); |
82 | static void notice_new_block (basic_block); | |
83 | static void update_forwarder_flag (basic_block); | |
84 | static int mentions_nonequal_regs (rtx *, void *); | |
c2fc5456 | 85 | static void merge_memattrs (rtx, rtx); |
635559ab JH |
86 | \f |
87 | /* Set flags for newly created block. */ | |
88 | ||
89 | static void | |
d329e058 | 90 | notice_new_block (basic_block bb) |
635559ab JH |
91 | { |
92 | if (!bb) | |
93 | return; | |
5f0d2358 | 94 | |
635559ab | 95 | if (forwarder_block_p (bb)) |
2dd2d53e | 96 | bb->flags |= BB_FORWARDER_BLOCK; |
635559ab JH |
97 | } |
98 | ||
99 | /* Recompute forwarder flag after block has been modified. */ | |
100 | ||
101 | static void | |
d329e058 | 102 | update_forwarder_flag (basic_block bb) |
635559ab JH |
103 | { |
104 | if (forwarder_block_p (bb)) | |
2dd2d53e | 105 | bb->flags |= BB_FORWARDER_BLOCK; |
635559ab | 106 | else |
2dd2d53e | 107 | bb->flags &= ~BB_FORWARDER_BLOCK; |
635559ab | 108 | } |
402209ff JH |
109 | \f |
110 | /* Simplify a conditional jump around an unconditional jump. | |
111 | Return true if something changed. */ | |
112 | ||
113 | static bool | |
d329e058 | 114 | try_simplify_condjump (basic_block cbranch_block) |
402209ff JH |
115 | { |
116 | basic_block jump_block, jump_dest_block, cbranch_dest_block; | |
117 | edge cbranch_jump_edge, cbranch_fallthru_edge; | |
118 | rtx cbranch_insn; | |
119 | ||
120 | /* Verify that there are exactly two successors. */ | |
628f6a4e | 121 | if (EDGE_COUNT (cbranch_block->succs) != 2) |
402209ff JH |
122 | return false; |
123 | ||
124 | /* Verify that we've got a normal conditional branch at the end | |
125 | of the block. */ | |
a813c111 | 126 | cbranch_insn = BB_END (cbranch_block); |
402209ff JH |
127 | if (!any_condjump_p (cbranch_insn)) |
128 | return false; | |
129 | ||
130 | cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block); | |
131 | cbranch_jump_edge = BRANCH_EDGE (cbranch_block); | |
132 | ||
133 | /* The next block must not have multiple predecessors, must not | |
134 | be the last block in the function, and must contain just the | |
135 | unconditional jump. */ | |
136 | jump_block = cbranch_fallthru_edge->dest; | |
c5cbcccf | 137 | if (!single_pred_p (jump_block) |
fefa31b5 | 138 | || jump_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun) |
635559ab | 139 | || !FORWARDER_BLOCK_P (jump_block)) |
402209ff | 140 | return false; |
c5cbcccf | 141 | jump_dest_block = single_succ (jump_block); |
402209ff | 142 | |
750054a2 CT |
143 | /* If we are partitioning hot/cold basic blocks, we don't want to |
144 | mess up unconditional or indirect jumps that cross between hot | |
c22cacf3 | 145 | and cold sections. |
8e8d5162 CT |
146 | |
147 | Basic block partitioning may result in some jumps that appear to | |
c22cacf3 MS |
148 | be optimizable (or blocks that appear to be mergeable), but which really |
149 | must be left untouched (they are required to make it safely across | |
150 | partition boundaries). See the comments at the top of | |
8e8d5162 | 151 | bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */ |
750054a2 | 152 | |
87c8b4be CT |
153 | if (BB_PARTITION (jump_block) != BB_PARTITION (jump_dest_block) |
154 | || (cbranch_jump_edge->flags & EDGE_CROSSING)) | |
750054a2 CT |
155 | return false; |
156 | ||
402209ff JH |
157 | /* The conditional branch must target the block after the |
158 | unconditional branch. */ | |
159 | cbranch_dest_block = cbranch_jump_edge->dest; | |
160 | ||
fefa31b5 | 161 | if (cbranch_dest_block == EXIT_BLOCK_PTR_FOR_FN (cfun) |
2f52c531 | 162 | || !can_fallthru (jump_block, cbranch_dest_block)) |
402209ff JH |
163 | return false; |
164 | ||
ca6c03ca JH |
165 | /* Invert the conditional branch. */ |
166 | if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0)) | |
167 | return false; | |
402209ff | 168 | |
c263766c RH |
169 | if (dump_file) |
170 | fprintf (dump_file, "Simplifying condjump %i around jump %i\n", | |
a813c111 | 171 | INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block))); |
402209ff JH |
172 | |
173 | /* Success. Update the CFG to match. Note that after this point | |
174 | the edge variable names appear backwards; the redirection is done | |
175 | this way to preserve edge profile data. */ | |
176 | cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge, | |
177 | cbranch_dest_block); | |
178 | cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge, | |
179 | jump_dest_block); | |
180 | cbranch_jump_edge->flags |= EDGE_FALLTHRU; | |
181 | cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU; | |
b446e5a2 | 182 | update_br_prob_note (cbranch_block); |
402209ff JH |
183 | |
184 | /* Delete the block with the unconditional jump, and clean up the mess. */ | |
f470c378 ZD |
185 | delete_basic_block (jump_block); |
186 | tidy_fallthru_edge (cbranch_jump_edge); | |
261139ce | 187 | update_forwarder_flag (cbranch_block); |
402209ff JH |
188 | |
189 | return true; | |
190 | } | |
191 | \f | |
8ecba28a JH |
192 | /* Attempt to prove that operation is NOOP using CSElib or mark the effect |
193 | on register. Used by jump threading. */ | |
5f0d2358 | 194 | |
8ecba28a | 195 | static bool |
d329e058 | 196 | mark_effect (rtx exp, regset nonequal) |
8ecba28a | 197 | { |
9f16e871 JH |
198 | int regno; |
199 | rtx dest; | |
8ecba28a JH |
200 | switch (GET_CODE (exp)) |
201 | { | |
202 | /* In case we do clobber the register, mark it as equal, as we know the | |
c22cacf3 | 203 | value is dead so it don't have to match. */ |
f87c27b4 KH |
204 | case CLOBBER: |
205 | if (REG_P (XEXP (exp, 0))) | |
206 | { | |
207 | dest = XEXP (exp, 0); | |
208 | regno = REGNO (dest); | |
f773c2bd AS |
209 | if (HARD_REGISTER_NUM_P (regno)) |
210 | bitmap_clear_range (nonequal, regno, | |
211 | hard_regno_nregs[regno][GET_MODE (dest)]); | |
212 | else | |
213 | bitmap_clear_bit (nonequal, regno); | |
f87c27b4 KH |
214 | } |
215 | return false; | |
5f0d2358 | 216 | |
f87c27b4 KH |
217 | case SET: |
218 | if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp))) | |
8ecba28a | 219 | return false; |
f87c27b4 KH |
220 | dest = SET_DEST (exp); |
221 | if (dest == pc_rtx) | |
8ecba28a | 222 | return false; |
f87c27b4 KH |
223 | if (!REG_P (dest)) |
224 | return true; | |
225 | regno = REGNO (dest); | |
f773c2bd AS |
226 | if (HARD_REGISTER_NUM_P (regno)) |
227 | bitmap_set_range (nonequal, regno, | |
228 | hard_regno_nregs[regno][GET_MODE (dest)]); | |
229 | else | |
230 | bitmap_set_bit (nonequal, regno); | |
f87c27b4 KH |
231 | return false; |
232 | ||
233 | default: | |
234 | return false; | |
8ecba28a JH |
235 | } |
236 | } | |
fe477d8b | 237 | |
dcc24678 | 238 | /* Return nonzero if X is a register set in regset DATA. |
fe477d8b JH |
239 | Called via for_each_rtx. */ |
240 | static int | |
d329e058 | 241 | mentions_nonequal_regs (rtx *x, void *data) |
fe477d8b JH |
242 | { |
243 | regset nonequal = (regset) data; | |
244 | if (REG_P (*x)) | |
245 | { | |
246 | int regno; | |
247 | ||
248 | regno = REGNO (*x); | |
249 | if (REGNO_REG_SET_P (nonequal, regno)) | |
250 | return 1; | |
251 | if (regno < FIRST_PSEUDO_REGISTER) | |
252 | { | |
66fd46b6 | 253 | int n = hard_regno_nregs[regno][GET_MODE (*x)]; |
fe477d8b JH |
254 | while (--n > 0) |
255 | if (REGNO_REG_SET_P (nonequal, regno + n)) | |
256 | return 1; | |
257 | } | |
258 | } | |
259 | return 0; | |
260 | } | |
8ecba28a | 261 | /* Attempt to prove that the basic block B will have no side effects and |
95bd1dd7 | 262 | always continues in the same edge if reached via E. Return the edge |
8ecba28a JH |
263 | if exist, NULL otherwise. */ |
264 | ||
265 | static edge | |
6fb5fa3c | 266 | thread_jump (edge e, basic_block b) |
8ecba28a JH |
267 | { |
268 | rtx set1, set2, cond1, cond2, insn; | |
269 | enum rtx_code code1, code2, reversed_code2; | |
270 | bool reverse1 = false; | |
3cd8c58a | 271 | unsigned i; |
8ecba28a JH |
272 | regset nonequal; |
273 | bool failed = false; | |
a2041967 | 274 | reg_set_iterator rsi; |
8ecba28a | 275 | |
2dd2d53e | 276 | if (b->flags & BB_NONTHREADABLE_BLOCK) |
1540f9eb JH |
277 | return NULL; |
278 | ||
8ecba28a JH |
279 | /* At the moment, we do handle only conditional jumps, but later we may |
280 | want to extend this code to tablejumps and others. */ | |
628f6a4e | 281 | if (EDGE_COUNT (e->src->succs) != 2) |
8ecba28a | 282 | return NULL; |
628f6a4e | 283 | if (EDGE_COUNT (b->succs) != 2) |
1540f9eb | 284 | { |
2dd2d53e | 285 | b->flags |= BB_NONTHREADABLE_BLOCK; |
1540f9eb JH |
286 | return NULL; |
287 | } | |
8ecba28a JH |
288 | |
289 | /* Second branch must end with onlyjump, as we will eliminate the jump. */ | |
a813c111 | 290 | if (!any_condjump_p (BB_END (e->src))) |
8ecba28a | 291 | return NULL; |
f87c27b4 | 292 | |
a813c111 | 293 | if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b))) |
1540f9eb | 294 | { |
2dd2d53e | 295 | b->flags |= BB_NONTHREADABLE_BLOCK; |
1540f9eb JH |
296 | return NULL; |
297 | } | |
8ecba28a | 298 | |
a813c111 SB |
299 | set1 = pc_set (BB_END (e->src)); |
300 | set2 = pc_set (BB_END (b)); | |
8ecba28a | 301 | if (((e->flags & EDGE_FALLTHRU) != 0) |
68f3f6f1 | 302 | != (XEXP (SET_SRC (set1), 1) == pc_rtx)) |
8ecba28a JH |
303 | reverse1 = true; |
304 | ||
305 | cond1 = XEXP (SET_SRC (set1), 0); | |
306 | cond2 = XEXP (SET_SRC (set2), 0); | |
307 | if (reverse1) | |
a813c111 | 308 | code1 = reversed_comparison_code (cond1, BB_END (e->src)); |
8ecba28a JH |
309 | else |
310 | code1 = GET_CODE (cond1); | |
311 | ||
312 | code2 = GET_CODE (cond2); | |
a813c111 | 313 | reversed_code2 = reversed_comparison_code (cond2, BB_END (b)); |
8ecba28a JH |
314 | |
315 | if (!comparison_dominates_p (code1, code2) | |
316 | && !comparison_dominates_p (code1, reversed_code2)) | |
317 | return NULL; | |
318 | ||
319 | /* Ensure that the comparison operators are equivalent. | |
95bd1dd7 | 320 | ??? This is far too pessimistic. We should allow swapped operands, |
8ecba28a JH |
321 | different CCmodes, or for example comparisons for interval, that |
322 | dominate even when operands are not equivalent. */ | |
323 | if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0)) | |
324 | || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1))) | |
325 | return NULL; | |
326 | ||
327 | /* Short circuit cases where block B contains some side effects, as we can't | |
328 | safely bypass it. */ | |
a813c111 | 329 | for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b)); |
8ecba28a JH |
330 | insn = NEXT_INSN (insn)) |
331 | if (INSN_P (insn) && side_effects_p (PATTERN (insn))) | |
1540f9eb | 332 | { |
2dd2d53e | 333 | b->flags |= BB_NONTHREADABLE_BLOCK; |
1540f9eb JH |
334 | return NULL; |
335 | } | |
8ecba28a | 336 | |
457eeaae | 337 | cselib_init (0); |
8ecba28a JH |
338 | |
339 | /* First process all values computed in the source basic block. */ | |
3cd8c58a NS |
340 | for (insn = NEXT_INSN (BB_HEAD (e->src)); |
341 | insn != NEXT_INSN (BB_END (e->src)); | |
8ecba28a JH |
342 | insn = NEXT_INSN (insn)) |
343 | if (INSN_P (insn)) | |
344 | cselib_process_insn (insn); | |
345 | ||
8bdbfff5 | 346 | nonequal = BITMAP_ALLOC (NULL); |
8ecba28a | 347 | CLEAR_REG_SET (nonequal); |
5f0d2358 | 348 | |
8ecba28a JH |
349 | /* Now assume that we've continued by the edge E to B and continue |
350 | processing as if it were same basic block. | |
8ecba28a | 351 | Our goal is to prove that whole block is an NOOP. */ |
5f0d2358 | 352 | |
3cd8c58a NS |
353 | for (insn = NEXT_INSN (BB_HEAD (b)); |
354 | insn != NEXT_INSN (BB_END (b)) && !failed; | |
8ecba28a | 355 | insn = NEXT_INSN (insn)) |
f87c27b4 KH |
356 | { |
357 | if (INSN_P (insn)) | |
358 | { | |
359 | rtx pat = PATTERN (insn); | |
360 | ||
361 | if (GET_CODE (pat) == PARALLEL) | |
362 | { | |
3cd8c58a | 363 | for (i = 0; i < (unsigned)XVECLEN (pat, 0); i++) |
f87c27b4 KH |
364 | failed |= mark_effect (XVECEXP (pat, 0, i), nonequal); |
365 | } | |
366 | else | |
367 | failed |= mark_effect (pat, nonequal); | |
368 | } | |
5f0d2358 | 369 | |
f87c27b4 KH |
370 | cselib_process_insn (insn); |
371 | } | |
8ecba28a JH |
372 | |
373 | /* Later we should clear nonequal of dead registers. So far we don't | |
374 | have life information in cfg_cleanup. */ | |
375 | if (failed) | |
1540f9eb | 376 | { |
2dd2d53e | 377 | b->flags |= BB_NONTHREADABLE_BLOCK; |
1540f9eb JH |
378 | goto failed_exit; |
379 | } | |
8ecba28a | 380 | |
fe477d8b JH |
381 | /* cond2 must not mention any register that is not equal to the |
382 | former block. */ | |
383 | if (for_each_rtx (&cond2, mentions_nonequal_regs, nonequal)) | |
384 | goto failed_exit; | |
385 | ||
a2041967 KH |
386 | EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, rsi) |
387 | goto failed_exit; | |
8ecba28a | 388 | |
8bdbfff5 | 389 | BITMAP_FREE (nonequal); |
8ecba28a JH |
390 | cselib_finish (); |
391 | if ((comparison_dominates_p (code1, code2) != 0) | |
4deaa2f8 | 392 | != (XEXP (SET_SRC (set2), 1) == pc_rtx)) |
8ecba28a JH |
393 | return BRANCH_EDGE (b); |
394 | else | |
395 | return FALLTHRU_EDGE (b); | |
396 | ||
397 | failed_exit: | |
8bdbfff5 | 398 | BITMAP_FREE (nonequal); |
8ecba28a JH |
399 | cselib_finish (); |
400 | return NULL; | |
401 | } | |
402 | \f | |
402209ff | 403 | /* Attempt to forward edges leaving basic block B. |
eaec9b3d | 404 | Return true if successful. */ |
402209ff JH |
405 | |
406 | static bool | |
d329e058 | 407 | try_forward_edges (int mode, basic_block b) |
402209ff JH |
408 | { |
409 | bool changed = false; | |
628f6a4e BE |
410 | edge_iterator ei; |
411 | edge e, *threaded_edges = NULL; | |
402209ff | 412 | |
750054a2 CT |
413 | /* If we are partitioning hot/cold basic blocks, we don't want to |
414 | mess up unconditional or indirect jumps that cross between hot | |
c22cacf3 MS |
415 | and cold sections. |
416 | ||
8e8d5162 | 417 | Basic block partitioning may result in some jumps that appear to |
fa10beec RW |
418 | be optimizable (or blocks that appear to be mergeable), but which really |
419 | must be left untouched (they are required to make it safely across | |
c22cacf3 | 420 | partition boundaries). See the comments at the top of |
8e8d5162 CT |
421 | bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */ |
422 | ||
339ba33b | 423 | if (JUMP_P (BB_END (b)) && CROSSING_JUMP_P (BB_END (b))) |
750054a2 CT |
424 | return false; |
425 | ||
628f6a4e | 426 | for (ei = ei_start (b->succs); (e = ei_safe_edge (ei)); ) |
402209ff JH |
427 | { |
428 | basic_block target, first; | |
8a829274 EB |
429 | location_t goto_locus; |
430 | int counter; | |
8ecba28a | 431 | bool threaded = false; |
bcb3bc6d | 432 | int nthreaded_edges = 0; |
4ec5d4f5 | 433 | bool may_thread = first_pass || (b->flags & BB_MODIFIED) != 0; |
402209ff | 434 | |
402209ff JH |
435 | /* Skip complex edges because we don't know how to update them. |
436 | ||
c22cacf3 MS |
437 | Still handle fallthru edges, as we can succeed to forward fallthru |
438 | edge to the same place as the branch edge of conditional branch | |
439 | and turn conditional branch to an unconditional branch. */ | |
402209ff | 440 | if (e->flags & EDGE_COMPLEX) |
628f6a4e BE |
441 | { |
442 | ei_next (&ei); | |
443 | continue; | |
444 | } | |
402209ff JH |
445 | |
446 | target = first = e->dest; | |
24bd1a0b | 447 | counter = NUM_FIXED_BLOCKS; |
7241571e | 448 | goto_locus = e->goto_locus; |
402209ff | 449 | |
9fb32434 | 450 | /* If we are partitioning hot/cold basic_blocks, we don't want to mess |
8e8d5162 CT |
451 | up jumps that cross between hot/cold sections. |
452 | ||
453 | Basic block partitioning may result in some jumps that appear | |
c22cacf3 MS |
454 | to be optimizable (or blocks that appear to be mergeable), but which |
455 | really must be left untouched (they are required to make it safely | |
8e8d5162 CT |
456 | across partition boundaries). See the comments at the top of |
457 | bb-reorder.c:partition_hot_cold_basic_blocks for complete | |
458 | details. */ | |
9fb32434 | 459 | |
fefa31b5 | 460 | if (first != EXIT_BLOCK_PTR_FOR_FN (cfun) |
339ba33b RS |
461 | && JUMP_P (BB_END (first)) |
462 | && CROSSING_JUMP_P (BB_END (first))) | |
3371a64f | 463 | return changed; |
9fb32434 | 464 | |
0cae8d31 | 465 | while (counter < n_basic_blocks_for_fn (cfun)) |
402209ff | 466 | { |
8ecba28a JH |
467 | basic_block new_target = NULL; |
468 | bool new_target_threaded = false; | |
4ec5d4f5 | 469 | may_thread |= (target->flags & BB_MODIFIED) != 0; |
8ecba28a JH |
470 | |
471 | if (FORWARDER_BLOCK_P (target) | |
c22cacf3 | 472 | && !(single_succ_edge (target)->flags & EDGE_CROSSING) |
fefa31b5 | 473 | && single_succ (target) != EXIT_BLOCK_PTR_FOR_FN (cfun)) |
8ecba28a JH |
474 | { |
475 | /* Bypass trivial infinite loops. */ | |
c5cbcccf ZD |
476 | new_target = single_succ (target); |
477 | if (target == new_target) | |
0cae8d31 | 478 | counter = n_basic_blocks_for_fn (cfun); |
7241571e JJ |
479 | else if (!optimize) |
480 | { | |
481 | /* When not optimizing, ensure that edges or forwarder | |
482 | blocks with different locus are not optimized out. */ | |
8a829274 EB |
483 | location_t new_locus = single_succ_edge (target)->goto_locus; |
484 | location_t locus = goto_locus; | |
7241571e | 485 | |
ffa4602f EB |
486 | if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION |
487 | && LOCATION_LOCUS (locus) != UNKNOWN_LOCATION | |
5368224f | 488 | && new_locus != locus) |
50a36e42 EB |
489 | new_target = NULL; |
490 | else | |
7241571e | 491 | { |
ffa4602f | 492 | if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION) |
50a36e42 | 493 | locus = new_locus; |
7241571e | 494 | |
ffa4602f | 495 | rtx last = BB_END (target); |
11321111 AO |
496 | if (DEBUG_INSN_P (last)) |
497 | last = prev_nondebug_insn (last); | |
ffa4602f EB |
498 | if (last && INSN_P (last)) |
499 | new_locus = INSN_LOCATION (last); | |
500 | else | |
501 | new_locus = UNKNOWN_LOCATION; | |
11321111 | 502 | |
ffa4602f EB |
503 | if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION |
504 | && LOCATION_LOCUS (locus) != UNKNOWN_LOCATION | |
5368224f | 505 | && new_locus != locus) |
50a36e42 EB |
506 | new_target = NULL; |
507 | else | |
508 | { | |
ffa4602f | 509 | if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION) |
50a36e42 EB |
510 | locus = new_locus; |
511 | ||
512 | goto_locus = locus; | |
513 | } | |
7241571e JJ |
514 | } |
515 | } | |
8ecba28a | 516 | } |
5f0d2358 | 517 | |
8ecba28a JH |
518 | /* Allow to thread only over one edge at time to simplify updating |
519 | of probabilities. */ | |
7cf240d5 | 520 | else if ((mode & CLEANUP_THREADING) && may_thread) |
8ecba28a | 521 | { |
6fb5fa3c | 522 | edge t = thread_jump (e, target); |
1c570418 | 523 | if (t) |
8ecba28a | 524 | { |
bcb3bc6d | 525 | if (!threaded_edges) |
0cae8d31 DM |
526 | threaded_edges = XNEWVEC (edge, |
527 | n_basic_blocks_for_fn (cfun)); | |
3b3b1e32 RH |
528 | else |
529 | { | |
530 | int i; | |
531 | ||
532 | /* Detect an infinite loop across blocks not | |
533 | including the start block. */ | |
534 | for (i = 0; i < nthreaded_edges; ++i) | |
535 | if (threaded_edges[i] == t) | |
536 | break; | |
537 | if (i < nthreaded_edges) | |
b90e45ae | 538 | { |
0cae8d31 | 539 | counter = n_basic_blocks_for_fn (cfun); |
b90e45ae JH |
540 | break; |
541 | } | |
3b3b1e32 RH |
542 | } |
543 | ||
544 | /* Detect an infinite loop across the start block. */ | |
545 | if (t->dest == b) | |
546 | break; | |
547 | ||
0cae8d31 DM |
548 | gcc_assert (nthreaded_edges |
549 | < (n_basic_blocks_for_fn (cfun) | |
550 | - NUM_FIXED_BLOCKS)); | |
1c570418 | 551 | threaded_edges[nthreaded_edges++] = t; |
3b3b1e32 RH |
552 | |
553 | new_target = t->dest; | |
554 | new_target_threaded = true; | |
8ecba28a JH |
555 | } |
556 | } | |
5f0d2358 | 557 | |
8ecba28a JH |
558 | if (!new_target) |
559 | break; | |
402209ff | 560 | |
8ecba28a JH |
561 | counter++; |
562 | target = new_target; | |
563 | threaded |= new_target_threaded; | |
f87c27b4 | 564 | } |
402209ff | 565 | |
0cae8d31 | 566 | if (counter >= n_basic_blocks_for_fn (cfun)) |
402209ff | 567 | { |
c263766c RH |
568 | if (dump_file) |
569 | fprintf (dump_file, "Infinite loop in BB %i.\n", | |
0b17ab2f | 570 | target->index); |
402209ff JH |
571 | } |
572 | else if (target == first) | |
573 | ; /* We didn't do anything. */ | |
574 | else | |
575 | { | |
576 | /* Save the values now, as the edge may get removed. */ | |
577 | gcov_type edge_count = e->count; | |
578 | int edge_probability = e->probability; | |
8ecba28a | 579 | int edge_frequency; |
1c570418 | 580 | int n = 0; |
402209ff | 581 | |
7241571e JJ |
582 | e->goto_locus = goto_locus; |
583 | ||
6ee3c8e4 | 584 | /* Don't force if target is exit block. */ |
fefa31b5 | 585 | if (threaded && target != EXIT_BLOCK_PTR_FOR_FN (cfun)) |
402209ff | 586 | { |
8ecba28a | 587 | notice_new_block (redirect_edge_and_branch_force (e, target)); |
c263766c RH |
588 | if (dump_file) |
589 | fprintf (dump_file, "Conditionals threaded.\n"); | |
402209ff | 590 | } |
8ecba28a | 591 | else if (!redirect_edge_and_branch (e, target)) |
402209ff | 592 | { |
c263766c RH |
593 | if (dump_file) |
594 | fprintf (dump_file, | |
5f0d2358 | 595 | "Forwarding edge %i->%i to %i failed.\n", |
0b17ab2f | 596 | b->index, e->dest->index, target->index); |
628f6a4e | 597 | ei_next (&ei); |
8ecba28a | 598 | continue; |
402209ff | 599 | } |
5f0d2358 | 600 | |
8ecba28a JH |
601 | /* We successfully forwarded the edge. Now update profile |
602 | data: for each edge we traversed in the chain, remove | |
603 | the original edge's execution count. */ | |
8ddb5a29 | 604 | edge_frequency = apply_probability (b->frequency, edge_probability); |
8ecba28a | 605 | |
8ecba28a JH |
606 | do |
607 | { | |
608 | edge t; | |
5f0d2358 | 609 | |
c5cbcccf | 610 | if (!single_succ_p (first)) |
3b3b1e32 | 611 | { |
341c100f | 612 | gcc_assert (n < nthreaded_edges); |
3b3b1e32 | 613 | t = threaded_edges [n++]; |
341c100f | 614 | gcc_assert (t->src == first); |
15db5571 JH |
615 | update_bb_profile_for_threading (first, edge_frequency, |
616 | edge_count, t); | |
b446e5a2 | 617 | update_br_prob_note (first); |
3b3b1e32 | 618 | } |
8ecba28a | 619 | else |
bcb3bc6d | 620 | { |
15db5571 JH |
621 | first->count -= edge_count; |
622 | if (first->count < 0) | |
623 | first->count = 0; | |
624 | first->frequency -= edge_frequency; | |
625 | if (first->frequency < 0) | |
626 | first->frequency = 0; | |
bcb3bc6d JH |
627 | /* It is possible that as the result of |
628 | threading we've removed edge as it is | |
629 | threaded to the fallthru edge. Avoid | |
630 | getting out of sync. */ | |
631 | if (n < nthreaded_edges | |
632 | && first == threaded_edges [n]->src) | |
633 | n++; | |
c5cbcccf | 634 | t = single_succ_edge (first); |
f87c27b4 | 635 | } |
5f0d2358 | 636 | |
b446e5a2 JH |
637 | t->count -= edge_count; |
638 | if (t->count < 0) | |
639 | t->count = 0; | |
8ecba28a JH |
640 | first = t->dest; |
641 | } | |
642 | while (first != target); | |
643 | ||
644 | changed = true; | |
628f6a4e | 645 | continue; |
402209ff | 646 | } |
628f6a4e | 647 | ei_next (&ei); |
402209ff JH |
648 | } |
649 | ||
04695783 | 650 | free (threaded_edges); |
402209ff JH |
651 | return changed; |
652 | } | |
653 | \f | |
402209ff JH |
654 | |
655 | /* Blocks A and B are to be merged into a single block. A has no incoming | |
656 | fallthru edge, so it can be moved before B without adding or modifying | |
657 | any jumps (aside from the jump from A to B). */ | |
658 | ||
4262e623 | 659 | static void |
d329e058 | 660 | merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b) |
402209ff JH |
661 | { |
662 | rtx barrier; | |
402209ff | 663 | |
750054a2 CT |
664 | /* If we are partitioning hot/cold basic blocks, we don't want to |
665 | mess up unconditional or indirect jumps that cross between hot | |
8e8d5162 | 666 | and cold sections. |
c22cacf3 | 667 | |
8e8d5162 | 668 | Basic block partitioning may result in some jumps that appear to |
c22cacf3 MS |
669 | be optimizable (or blocks that appear to be mergeable), but which really |
670 | must be left untouched (they are required to make it safely across | |
671 | partition boundaries). See the comments at the top of | |
8e8d5162 CT |
672 | bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */ |
673 | ||
87c8b4be | 674 | if (BB_PARTITION (a) != BB_PARTITION (b)) |
750054a2 CT |
675 | return; |
676 | ||
a813c111 | 677 | barrier = next_nonnote_insn (BB_END (a)); |
341c100f | 678 | gcc_assert (BARRIER_P (barrier)); |
53c17031 | 679 | delete_insn (barrier); |
402209ff | 680 | |
402209ff | 681 | /* Scramble the insn chain. */ |
a813c111 SB |
682 | if (BB_END (a) != PREV_INSN (BB_HEAD (b))) |
683 | reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b))); | |
6fb5fa3c | 684 | df_set_bb_dirty (a); |
402209ff | 685 | |
c263766c RH |
686 | if (dump_file) |
687 | fprintf (dump_file, "Moved block %d before %d and merged.\n", | |
0b17ab2f | 688 | a->index, b->index); |
402209ff | 689 | |
bf77398c | 690 | /* Swap the records for the two blocks around. */ |
402209ff | 691 | |
918ed612 ZD |
692 | unlink_block (a); |
693 | link_block (a, b->prev_bb); | |
694 | ||
402209ff | 695 | /* Now blocks A and B are contiguous. Merge them. */ |
bc35512f | 696 | merge_blocks (a, b); |
402209ff JH |
697 | } |
698 | ||
699 | /* Blocks A and B are to be merged into a single block. B has no outgoing | |
700 | fallthru edge, so it can be moved after A without adding or modifying | |
701 | any jumps (aside from the jump from A to B). */ | |
702 | ||
4262e623 | 703 | static void |
d329e058 | 704 | merge_blocks_move_successor_nojumps (basic_block a, basic_block b) |
402209ff | 705 | { |
f62ce55b | 706 | rtx barrier, real_b_end; |
ee735eef | 707 | rtx label, table; |
402209ff | 708 | |
750054a2 CT |
709 | /* If we are partitioning hot/cold basic blocks, we don't want to |
710 | mess up unconditional or indirect jumps that cross between hot | |
c22cacf3 MS |
711 | and cold sections. |
712 | ||
8e8d5162 | 713 | Basic block partitioning may result in some jumps that appear to |
c22cacf3 MS |
714 | be optimizable (or blocks that appear to be mergeable), but which really |
715 | must be left untouched (they are required to make it safely across | |
716 | partition boundaries). See the comments at the top of | |
8e8d5162 CT |
717 | bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */ |
718 | ||
87c8b4be | 719 | if (BB_PARTITION (a) != BB_PARTITION (b)) |
750054a2 CT |
720 | return; |
721 | ||
a813c111 | 722 | real_b_end = BB_END (b); |
402209ff | 723 | |
ee735eef JZ |
724 | /* If there is a jump table following block B temporarily add the jump table |
725 | to block B so that it will also be moved to the correct location. */ | |
a813c111 SB |
726 | if (tablejump_p (BB_END (b), &label, &table) |
727 | && prev_active_insn (label) == BB_END (b)) | |
402209ff | 728 | { |
a813c111 | 729 | BB_END (b) = table; |
402209ff JH |
730 | } |
731 | ||
732 | /* There had better have been a barrier there. Delete it. */ | |
a813c111 | 733 | barrier = NEXT_INSN (BB_END (b)); |
4b4bf941 | 734 | if (barrier && BARRIER_P (barrier)) |
53c17031 | 735 | delete_insn (barrier); |
402209ff | 736 | |
402209ff JH |
737 | |
738 | /* Scramble the insn chain. */ | |
a813c111 | 739 | reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a)); |
402209ff | 740 | |
f62ce55b | 741 | /* Restore the real end of b. */ |
a813c111 | 742 | BB_END (b) = real_b_end; |
f62ce55b | 743 | |
c263766c RH |
744 | if (dump_file) |
745 | fprintf (dump_file, "Moved block %d after %d and merged.\n", | |
0b17ab2f | 746 | b->index, a->index); |
2150ad33 RH |
747 | |
748 | /* Now blocks A and B are contiguous. Merge them. */ | |
bc35512f | 749 | merge_blocks (a, b); |
402209ff JH |
750 | } |
751 | ||
752 | /* Attempt to merge basic blocks that are potentially non-adjacent. | |
ec3ae3da JH |
753 | Return NULL iff the attempt failed, otherwise return basic block |
754 | where cleanup_cfg should continue. Because the merging commonly | |
755 | moves basic block away or introduces another optimization | |
e0bb17a8 | 756 | possibility, return basic block just before B so cleanup_cfg don't |
ec3ae3da JH |
757 | need to iterate. |
758 | ||
759 | It may be good idea to return basic block before C in the case | |
760 | C has been moved after B and originally appeared earlier in the | |
4d6922ee | 761 | insn sequence, but we have no information available about the |
ec3ae3da JH |
762 | relative ordering of these two. Hopefully it is not too common. */ |
763 | ||
764 | static basic_block | |
bc35512f | 765 | merge_blocks_move (edge e, basic_block b, basic_block c, int mode) |
402209ff | 766 | { |
ec3ae3da | 767 | basic_block next; |
402209ff | 768 | |
750054a2 CT |
769 | /* If we are partitioning hot/cold basic blocks, we don't want to |
770 | mess up unconditional or indirect jumps that cross between hot | |
c22cacf3 MS |
771 | and cold sections. |
772 | ||
8e8d5162 | 773 | Basic block partitioning may result in some jumps that appear to |
c22cacf3 MS |
774 | be optimizable (or blocks that appear to be mergeable), but which really |
775 | must be left untouched (they are required to make it safely across | |
776 | partition boundaries). See the comments at the top of | |
8e8d5162 CT |
777 | bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */ |
778 | ||
87c8b4be | 779 | if (BB_PARTITION (b) != BB_PARTITION (c)) |
750054a2 | 780 | return NULL; |
c22cacf3 | 781 | |
402209ff JH |
782 | /* If B has a fallthru edge to C, no need to move anything. */ |
783 | if (e->flags & EDGE_FALLTHRU) | |
784 | { | |
0b17ab2f | 785 | int b_index = b->index, c_index = c->index; |
7d776ee2 RG |
786 | |
787 | /* Protect the loop latches. */ | |
788 | if (current_loops && c->loop_father->latch == c) | |
789 | return NULL; | |
790 | ||
bc35512f | 791 | merge_blocks (b, c); |
635559ab | 792 | update_forwarder_flag (b); |
402209ff | 793 | |
c263766c RH |
794 | if (dump_file) |
795 | fprintf (dump_file, "Merged %d and %d without moving.\n", | |
f87c27b4 | 796 | b_index, c_index); |
402209ff | 797 | |
fefa31b5 | 798 | return b->prev_bb == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? b : b->prev_bb; |
402209ff | 799 | } |
5f0d2358 | 800 | |
402209ff JH |
801 | /* Otherwise we will need to move code around. Do that only if expensive |
802 | transformations are allowed. */ | |
803 | else if (mode & CLEANUP_EXPENSIVE) | |
804 | { | |
4262e623 JH |
805 | edge tmp_edge, b_fallthru_edge; |
806 | bool c_has_outgoing_fallthru; | |
807 | bool b_has_incoming_fallthru; | |
402209ff JH |
808 | |
809 | /* Avoid overactive code motion, as the forwarder blocks should be | |
c22cacf3 | 810 | eliminated by edge redirection instead. One exception might have |
402209ff JH |
811 | been if B is a forwarder block and C has no fallthru edge, but |
812 | that should be cleaned up by bb-reorder instead. */ | |
635559ab | 813 | if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c)) |
ec3ae3da | 814 | return NULL; |
402209ff JH |
815 | |
816 | /* We must make sure to not munge nesting of lexical blocks, | |
817 | and loop notes. This is done by squeezing out all the notes | |
818 | and leaving them there to lie. Not ideal, but functional. */ | |
819 | ||
0fd4b31d | 820 | tmp_edge = find_fallthru_edge (c->succs); |
402209ff | 821 | c_has_outgoing_fallthru = (tmp_edge != NULL); |
402209ff | 822 | |
0fd4b31d | 823 | tmp_edge = find_fallthru_edge (b->preds); |
402209ff | 824 | b_has_incoming_fallthru = (tmp_edge != NULL); |
4262e623 | 825 | b_fallthru_edge = tmp_edge; |
ec3ae3da | 826 | next = b->prev_bb; |
912b79e7 JH |
827 | if (next == c) |
828 | next = next->prev_bb; | |
4262e623 JH |
829 | |
830 | /* Otherwise, we're going to try to move C after B. If C does | |
831 | not have an outgoing fallthru, then it can be moved | |
832 | immediately after B without introducing or modifying jumps. */ | |
833 | if (! c_has_outgoing_fallthru) | |
834 | { | |
835 | merge_blocks_move_successor_nojumps (b, c); | |
fefa31b5 | 836 | return next == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? next->next_bb : next; |
4262e623 | 837 | } |
402209ff JH |
838 | |
839 | /* If B does not have an incoming fallthru, then it can be moved | |
840 | immediately before C without introducing or modifying jumps. | |
841 | C cannot be the first block, so we do not have to worry about | |
842 | accessing a non-existent block. */ | |
402209ff | 843 | |
4262e623 JH |
844 | if (b_has_incoming_fallthru) |
845 | { | |
473fb060 | 846 | basic_block bb; |
5f0d2358 | 847 | |
fefa31b5 | 848 | if (b_fallthru_edge->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)) |
ec3ae3da | 849 | return NULL; |
7dddfb65 JH |
850 | bb = force_nonfallthru (b_fallthru_edge); |
851 | if (bb) | |
852 | notice_new_block (bb); | |
4262e623 | 853 | } |
5f0d2358 | 854 | |
4262e623 | 855 | merge_blocks_move_predecessor_nojumps (b, c); |
fefa31b5 | 856 | return next == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? next->next_bb : next; |
402209ff | 857 | } |
5f0d2358 | 858 | |
10d6c0d0 | 859 | return NULL; |
402209ff JH |
860 | } |
861 | \f | |
c2fc5456 R |
862 | |
863 | /* Removes the memory attributes of MEM expression | |
864 | if they are not equal. */ | |
865 | ||
866 | void | |
867 | merge_memattrs (rtx x, rtx y) | |
868 | { | |
869 | int i; | |
870 | int j; | |
871 | enum rtx_code code; | |
872 | const char *fmt; | |
873 | ||
874 | if (x == y) | |
875 | return; | |
876 | if (x == 0 || y == 0) | |
877 | return; | |
878 | ||
879 | code = GET_CODE (x); | |
880 | ||
881 | if (code != GET_CODE (y)) | |
882 | return; | |
883 | ||
884 | if (GET_MODE (x) != GET_MODE (y)) | |
885 | return; | |
886 | ||
96b3c03f | 887 | if (code == MEM && !mem_attrs_eq_p (MEM_ATTRS (x), MEM_ATTRS (y))) |
c2fc5456 R |
888 | { |
889 | if (! MEM_ATTRS (x)) | |
890 | MEM_ATTRS (y) = 0; | |
891 | else if (! MEM_ATTRS (y)) | |
892 | MEM_ATTRS (x) = 0; | |
c22cacf3 | 893 | else |
c2fc5456 | 894 | { |
f5541398 | 895 | HOST_WIDE_INT mem_size; |
c2fc5456 R |
896 | |
897 | if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y)) | |
898 | { | |
899 | set_mem_alias_set (x, 0); | |
900 | set_mem_alias_set (y, 0); | |
901 | } | |
c22cacf3 | 902 | |
c2fc5456 R |
903 | if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y))) |
904 | { | |
905 | set_mem_expr (x, 0); | |
906 | set_mem_expr (y, 0); | |
527210c4 RS |
907 | clear_mem_offset (x); |
908 | clear_mem_offset (y); | |
c2fc5456 | 909 | } |
527210c4 RS |
910 | else if (MEM_OFFSET_KNOWN_P (x) != MEM_OFFSET_KNOWN_P (y) |
911 | || (MEM_OFFSET_KNOWN_P (x) | |
912 | && MEM_OFFSET (x) != MEM_OFFSET (y))) | |
c2fc5456 | 913 | { |
527210c4 RS |
914 | clear_mem_offset (x); |
915 | clear_mem_offset (y); | |
c2fc5456 | 916 | } |
c22cacf3 | 917 | |
f5541398 RS |
918 | if (MEM_SIZE_KNOWN_P (x) && MEM_SIZE_KNOWN_P (y)) |
919 | { | |
920 | mem_size = MAX (MEM_SIZE (x), MEM_SIZE (y)); | |
921 | set_mem_size (x, mem_size); | |
922 | set_mem_size (y, mem_size); | |
923 | } | |
c2fc5456 | 924 | else |
f5541398 RS |
925 | { |
926 | clear_mem_size (x); | |
927 | clear_mem_size (y); | |
928 | } | |
c2fc5456 R |
929 | |
930 | set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y))); | |
931 | set_mem_align (y, MEM_ALIGN (x)); | |
932 | } | |
933 | } | |
84cf4ab6 JJ |
934 | if (code == MEM) |
935 | { | |
936 | if (MEM_READONLY_P (x) != MEM_READONLY_P (y)) | |
937 | { | |
938 | MEM_READONLY_P (x) = 0; | |
939 | MEM_READONLY_P (y) = 0; | |
940 | } | |
941 | if (MEM_NOTRAP_P (x) != MEM_NOTRAP_P (y)) | |
942 | { | |
943 | MEM_NOTRAP_P (x) = 0; | |
944 | MEM_NOTRAP_P (y) = 0; | |
945 | } | |
946 | if (MEM_VOLATILE_P (x) != MEM_VOLATILE_P (y)) | |
947 | { | |
948 | MEM_VOLATILE_P (x) = 1; | |
949 | MEM_VOLATILE_P (y) = 1; | |
950 | } | |
951 | } | |
c22cacf3 | 952 | |
c2fc5456 R |
953 | fmt = GET_RTX_FORMAT (code); |
954 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) | |
955 | { | |
956 | switch (fmt[i]) | |
957 | { | |
958 | case 'E': | |
959 | /* Two vectors must have the same length. */ | |
960 | if (XVECLEN (x, i) != XVECLEN (y, i)) | |
961 | return; | |
962 | ||
963 | for (j = 0; j < XVECLEN (x, i); j++) | |
964 | merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j)); | |
965 | ||
966 | break; | |
967 | ||
968 | case 'e': | |
969 | merge_memattrs (XEXP (x, i), XEXP (y, i)); | |
970 | } | |
971 | } | |
972 | return; | |
973 | } | |
974 | ||
975 | ||
472c95f5 TV |
976 | /* Checks if patterns P1 and P2 are equivalent, apart from the possibly |
977 | different single sets S1 and S2. */ | |
c2fc5456 R |
978 | |
979 | static bool | |
472c95f5 TV |
980 | equal_different_set_p (rtx p1, rtx s1, rtx p2, rtx s2) |
981 | { | |
982 | int i; | |
983 | rtx e1, e2; | |
984 | ||
985 | if (p1 == s1 && p2 == s2) | |
986 | return true; | |
987 | ||
988 | if (GET_CODE (p1) != PARALLEL || GET_CODE (p2) != PARALLEL) | |
989 | return false; | |
990 | ||
991 | if (XVECLEN (p1, 0) != XVECLEN (p2, 0)) | |
992 | return false; | |
993 | ||
994 | for (i = 0; i < XVECLEN (p1, 0); i++) | |
995 | { | |
996 | e1 = XVECEXP (p1, 0, i); | |
997 | e2 = XVECEXP (p2, 0, i); | |
998 | if (e1 == s1 && e2 == s2) | |
999 | continue; | |
1000 | if (reload_completed | |
1001 | ? rtx_renumbered_equal_p (e1, e2) : rtx_equal_p (e1, e2)) | |
1002 | continue; | |
1003 | ||
1004 | return false; | |
1005 | } | |
1006 | ||
1007 | return true; | |
1008 | } | |
1009 | ||
1010 | /* Examine register notes on I1 and I2 and return: | |
1011 | - dir_forward if I1 can be replaced by I2, or | |
1012 | - dir_backward if I2 can be replaced by I1, or | |
1013 | - dir_both if both are the case. */ | |
1014 | ||
1015 | static enum replace_direction | |
1016 | can_replace_by (rtx i1, rtx i2) | |
1017 | { | |
1018 | rtx s1, s2, d1, d2, src1, src2, note1, note2; | |
1019 | bool c1, c2; | |
1020 | ||
1021 | /* Check for 2 sets. */ | |
1022 | s1 = single_set (i1); | |
1023 | s2 = single_set (i2); | |
1024 | if (s1 == NULL_RTX || s2 == NULL_RTX) | |
1025 | return dir_none; | |
1026 | ||
1027 | /* Check that the 2 sets set the same dest. */ | |
1028 | d1 = SET_DEST (s1); | |
1029 | d2 = SET_DEST (s2); | |
1030 | if (!(reload_completed | |
1031 | ? rtx_renumbered_equal_p (d1, d2) : rtx_equal_p (d1, d2))) | |
1032 | return dir_none; | |
1033 | ||
1034 | /* Find identical req_equiv or reg_equal note, which implies that the 2 sets | |
1035 | set dest to the same value. */ | |
1036 | note1 = find_reg_equal_equiv_note (i1); | |
1037 | note2 = find_reg_equal_equiv_note (i2); | |
1038 | if (!note1 || !note2 || !rtx_equal_p (XEXP (note1, 0), XEXP (note2, 0)) | |
1039 | || !CONST_INT_P (XEXP (note1, 0))) | |
1040 | return dir_none; | |
1041 | ||
1042 | if (!equal_different_set_p (PATTERN (i1), s1, PATTERN (i2), s2)) | |
1043 | return dir_none; | |
1044 | ||
1045 | /* Although the 2 sets set dest to the same value, we cannot replace | |
1046 | (set (dest) (const_int)) | |
1047 | by | |
1048 | (set (dest) (reg)) | |
1049 | because we don't know if the reg is live and has the same value at the | |
1050 | location of replacement. */ | |
1051 | src1 = SET_SRC (s1); | |
1052 | src2 = SET_SRC (s2); | |
1053 | c1 = CONST_INT_P (src1); | |
1054 | c2 = CONST_INT_P (src2); | |
1055 | if (c1 && c2) | |
1056 | return dir_both; | |
1057 | else if (c2) | |
1058 | return dir_forward; | |
1059 | else if (c1) | |
1060 | return dir_backward; | |
1061 | ||
1062 | return dir_none; | |
1063 | } | |
1064 | ||
1065 | /* Merges directions A and B. */ | |
1066 | ||
1067 | static enum replace_direction | |
1068 | merge_dir (enum replace_direction a, enum replace_direction b) | |
1069 | { | |
1070 | /* Implements the following table: | |
1071 | |bo fw bw no | |
1072 | ---+----------- | |
1073 | bo |bo fw bw no | |
1074 | fw |-- fw no no | |
1075 | bw |-- -- bw no | |
1076 | no |-- -- -- no. */ | |
1077 | ||
1078 | if (a == b) | |
1079 | return a; | |
1080 | ||
1081 | if (a == dir_both) | |
1082 | return b; | |
1083 | if (b == dir_both) | |
1084 | return a; | |
1085 | ||
1086 | return dir_none; | |
1087 | } | |
1088 | ||
1089 | /* Examine I1 and I2 and return: | |
1090 | - dir_forward if I1 can be replaced by I2, or | |
1091 | - dir_backward if I2 can be replaced by I1, or | |
1092 | - dir_both if both are the case. */ | |
1093 | ||
1094 | static enum replace_direction | |
c2fc5456 R |
1095 | old_insns_match_p (int mode ATTRIBUTE_UNUSED, rtx i1, rtx i2) |
1096 | { | |
1097 | rtx p1, p2; | |
1098 | ||
1099 | /* Verify that I1 and I2 are equivalent. */ | |
1100 | if (GET_CODE (i1) != GET_CODE (i2)) | |
472c95f5 | 1101 | return dir_none; |
c2fc5456 | 1102 | |
ba21aba3 DD |
1103 | /* __builtin_unreachable() may lead to empty blocks (ending with |
1104 | NOTE_INSN_BASIC_BLOCK). They may be crossjumped. */ | |
1105 | if (NOTE_INSN_BASIC_BLOCK_P (i1) && NOTE_INSN_BASIC_BLOCK_P (i2)) | |
472c95f5 | 1106 | return dir_both; |
ba21aba3 | 1107 | |
9a08d230 RH |
1108 | /* ??? Do not allow cross-jumping between different stack levels. */ |
1109 | p1 = find_reg_note (i1, REG_ARGS_SIZE, NULL); | |
1110 | p2 = find_reg_note (i2, REG_ARGS_SIZE, NULL); | |
42aa5124 RH |
1111 | if (p1 && p2) |
1112 | { | |
1113 | p1 = XEXP (p1, 0); | |
1114 | p2 = XEXP (p2, 0); | |
1115 | if (!rtx_equal_p (p1, p2)) | |
1116 | return dir_none; | |
1117 | ||
1118 | /* ??? Worse, this adjustment had better be constant lest we | |
1119 | have differing incoming stack levels. */ | |
1120 | if (!frame_pointer_needed | |
1121 | && find_args_size_adjust (i1) == HOST_WIDE_INT_MIN) | |
1122 | return dir_none; | |
1123 | } | |
1124 | else if (p1 || p2) | |
9a08d230 RH |
1125 | return dir_none; |
1126 | ||
7752e522 | 1127 | p1 = PATTERN (i1); |
c2fc5456 R |
1128 | p2 = PATTERN (i2); |
1129 | ||
1130 | if (GET_CODE (p1) != GET_CODE (p2)) | |
472c95f5 | 1131 | return dir_none; |
c2fc5456 R |
1132 | |
1133 | /* If this is a CALL_INSN, compare register usage information. | |
1134 | If we don't check this on stack register machines, the two | |
1135 | CALL_INSNs might be merged leaving reg-stack.c with mismatching | |
1136 | numbers of stack registers in the same basic block. | |
1137 | If we don't check this on machines with delay slots, a delay slot may | |
1138 | be filled that clobbers a parameter expected by the subroutine. | |
1139 | ||
1140 | ??? We take the simple route for now and assume that if they're | |
31ce8a53 | 1141 | equal, they were constructed identically. |
c2fc5456 | 1142 | |
31ce8a53 BS |
1143 | Also check for identical exception regions. */ |
1144 | ||
1145 | if (CALL_P (i1)) | |
1146 | { | |
1147 | /* Ensure the same EH region. */ | |
1148 | rtx n1 = find_reg_note (i1, REG_EH_REGION, 0); | |
1149 | rtx n2 = find_reg_note (i2, REG_EH_REGION, 0); | |
1150 | ||
1151 | if (!n1 && n2) | |
472c95f5 | 1152 | return dir_none; |
31ce8a53 BS |
1153 | |
1154 | if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0))) | |
472c95f5 | 1155 | return dir_none; |
31ce8a53 BS |
1156 | |
1157 | if (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1), | |
c22cacf3 | 1158 | CALL_INSN_FUNCTION_USAGE (i2)) |
31ce8a53 | 1159 | || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2)) |
472c95f5 | 1160 | return dir_none; |
68a9738a JJ |
1161 | |
1162 | /* For address sanitizer, never crossjump __asan_report_* builtins, | |
1163 | otherwise errors might be reported on incorrect lines. */ | |
de5a5fa1 | 1164 | if (flag_sanitize & SANITIZE_ADDRESS) |
68a9738a JJ |
1165 | { |
1166 | rtx call = get_call_rtx_from (i1); | |
1167 | if (call && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF) | |
1168 | { | |
1169 | rtx symbol = XEXP (XEXP (call, 0), 0); | |
1170 | if (SYMBOL_REF_DECL (symbol) | |
1171 | && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL) | |
1172 | { | |
1173 | if ((DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol)) | |
1174 | == BUILT_IN_NORMAL) | |
1175 | && DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol)) | |
1176 | >= BUILT_IN_ASAN_REPORT_LOAD1 | |
1177 | && DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol)) | |
8946c29e | 1178 | <= BUILT_IN_ASAN_STOREN) |
68a9738a JJ |
1179 | return dir_none; |
1180 | } | |
1181 | } | |
1182 | } | |
31ce8a53 | 1183 | } |
c2fc5456 R |
1184 | |
1185 | #ifdef STACK_REGS | |
1186 | /* If cross_jump_death_matters is not 0, the insn's mode | |
1187 | indicates whether or not the insn contains any stack-like | |
1188 | regs. */ | |
1189 | ||
1190 | if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1)) | |
1191 | { | |
1192 | /* If register stack conversion has already been done, then | |
c22cacf3 MS |
1193 | death notes must also be compared before it is certain that |
1194 | the two instruction streams match. */ | |
c2fc5456 R |
1195 | |
1196 | rtx note; | |
1197 | HARD_REG_SET i1_regset, i2_regset; | |
1198 | ||
1199 | CLEAR_HARD_REG_SET (i1_regset); | |
1200 | CLEAR_HARD_REG_SET (i2_regset); | |
1201 | ||
1202 | for (note = REG_NOTES (i1); note; note = XEXP (note, 1)) | |
1203 | if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0))) | |
1204 | SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0))); | |
1205 | ||
1206 | for (note = REG_NOTES (i2); note; note = XEXP (note, 1)) | |
1207 | if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0))) | |
1208 | SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0))); | |
1209 | ||
56b138ae | 1210 | if (!hard_reg_set_equal_p (i1_regset, i2_regset)) |
472c95f5 | 1211 | return dir_none; |
c2fc5456 R |
1212 | } |
1213 | #endif | |
1214 | ||
1215 | if (reload_completed | |
1216 | ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2)) | |
472c95f5 | 1217 | return dir_both; |
c2fc5456 | 1218 | |
472c95f5 | 1219 | return can_replace_by (i1, i2); |
c2fc5456 R |
1220 | } |
1221 | \f | |
31ce8a53 BS |
1222 | /* When comparing insns I1 and I2 in flow_find_cross_jump or |
1223 | flow_find_head_matching_sequence, ensure the notes match. */ | |
1224 | ||
1225 | static void | |
1226 | merge_notes (rtx i1, rtx i2) | |
1227 | { | |
1228 | /* If the merged insns have different REG_EQUAL notes, then | |
1229 | remove them. */ | |
1230 | rtx equiv1 = find_reg_equal_equiv_note (i1); | |
1231 | rtx equiv2 = find_reg_equal_equiv_note (i2); | |
1232 | ||
1233 | if (equiv1 && !equiv2) | |
1234 | remove_note (i1, equiv1); | |
1235 | else if (!equiv1 && equiv2) | |
1236 | remove_note (i2, equiv2); | |
1237 | else if (equiv1 && equiv2 | |
1238 | && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0))) | |
1239 | { | |
1240 | remove_note (i1, equiv1); | |
1241 | remove_note (i2, equiv2); | |
1242 | } | |
1243 | } | |
1244 | ||
823918ae TV |
1245 | /* Walks from I1 in BB1 backward till the next non-debug insn, and returns the |
1246 | resulting insn in I1, and the corresponding bb in BB1. At the head of a | |
1247 | bb, if there is a predecessor bb that reaches this bb via fallthru, and | |
1248 | FOLLOW_FALLTHRU, walks further in the predecessor bb and registers this in | |
1249 | DID_FALLTHRU. Otherwise, stops at the head of the bb. */ | |
1250 | ||
1251 | static void | |
1252 | walk_to_nondebug_insn (rtx *i1, basic_block *bb1, bool follow_fallthru, | |
1253 | bool *did_fallthru) | |
1254 | { | |
1255 | edge fallthru; | |
1256 | ||
1257 | *did_fallthru = false; | |
1258 | ||
1259 | /* Ignore notes. */ | |
1260 | while (!NONDEBUG_INSN_P (*i1)) | |
1261 | { | |
1262 | if (*i1 != BB_HEAD (*bb1)) | |
1263 | { | |
1264 | *i1 = PREV_INSN (*i1); | |
1265 | continue; | |
1266 | } | |
1267 | ||
1268 | if (!follow_fallthru) | |
1269 | return; | |
1270 | ||
1271 | fallthru = find_fallthru_edge ((*bb1)->preds); | |
fefa31b5 | 1272 | if (!fallthru || fallthru->src == ENTRY_BLOCK_PTR_FOR_FN (cfun) |
823918ae TV |
1273 | || !single_succ_p (fallthru->src)) |
1274 | return; | |
1275 | ||
1276 | *bb1 = fallthru->src; | |
1277 | *i1 = BB_END (*bb1); | |
1278 | *did_fallthru = true; | |
1279 | } | |
1280 | } | |
1281 | ||
c2fc5456 | 1282 | /* Look through the insns at the end of BB1 and BB2 and find the longest |
472c95f5 TV |
1283 | sequence that are either equivalent, or allow forward or backward |
1284 | replacement. Store the first insns for that sequence in *F1 and *F2 and | |
1285 | return the sequence length. | |
1286 | ||
1287 | DIR_P indicates the allowed replacement direction on function entry, and | |
1288 | the actual replacement direction on function exit. If NULL, only equivalent | |
1289 | sequences are allowed. | |
c2fc5456 R |
1290 | |
1291 | To simplify callers of this function, if the blocks match exactly, | |
1292 | store the head of the blocks in *F1 and *F2. */ | |
1293 | ||
31ce8a53 | 1294 | int |
472c95f5 TV |
1295 | flow_find_cross_jump (basic_block bb1, basic_block bb2, rtx *f1, rtx *f2, |
1296 | enum replace_direction *dir_p) | |
c2fc5456 R |
1297 | { |
1298 | rtx i1, i2, last1, last2, afterlast1, afterlast2; | |
1299 | int ninsns = 0; | |
472c95f5 | 1300 | enum replace_direction dir, last_dir, afterlast_dir; |
823918ae | 1301 | bool follow_fallthru, did_fallthru; |
472c95f5 TV |
1302 | |
1303 | if (dir_p) | |
1304 | dir = *dir_p; | |
1305 | else | |
1306 | dir = dir_both; | |
1307 | afterlast_dir = dir; | |
1308 | last_dir = afterlast_dir; | |
c2fc5456 R |
1309 | |
1310 | /* Skip simple jumps at the end of the blocks. Complex jumps still | |
1311 | need to be compared for equivalence, which we'll do below. */ | |
1312 | ||
1313 | i1 = BB_END (bb1); | |
1314 | last1 = afterlast1 = last2 = afterlast2 = NULL_RTX; | |
1315 | if (onlyjump_p (i1) | |
1316 | || (returnjump_p (i1) && !side_effects_p (PATTERN (i1)))) | |
1317 | { | |
1318 | last1 = i1; | |
1319 | i1 = PREV_INSN (i1); | |
1320 | } | |
1321 | ||
1322 | i2 = BB_END (bb2); | |
1323 | if (onlyjump_p (i2) | |
1324 | || (returnjump_p (i2) && !side_effects_p (PATTERN (i2)))) | |
1325 | { | |
1326 | last2 = i2; | |
a0cbe71e JJ |
1327 | /* Count everything except for unconditional jump as insn. |
1328 | Don't count any jumps if dir_p is NULL. */ | |
1329 | if (!simplejump_p (i2) && !returnjump_p (i2) && last1 && dir_p) | |
c2fc5456 R |
1330 | ninsns++; |
1331 | i2 = PREV_INSN (i2); | |
1332 | } | |
1333 | ||
1334 | while (true) | |
1335 | { | |
823918ae TV |
1336 | /* In the following example, we can replace all jumps to C by jumps to A. |
1337 | ||
1338 | This removes 4 duplicate insns. | |
1339 | [bb A] insn1 [bb C] insn1 | |
1340 | insn2 insn2 | |
1341 | [bb B] insn3 insn3 | |
1342 | insn4 insn4 | |
1343 | jump_insn jump_insn | |
1344 | ||
1345 | We could also replace all jumps to A by jumps to C, but that leaves B | |
1346 | alive, and removes only 2 duplicate insns. In a subsequent crossjump | |
1347 | step, all jumps to B would be replaced with jumps to the middle of C, | |
1348 | achieving the same result with more effort. | |
1349 | So we allow only the first possibility, which means that we don't allow | |
1350 | fallthru in the block that's being replaced. */ | |
1351 | ||
1352 | follow_fallthru = dir_p && dir != dir_forward; | |
1353 | walk_to_nondebug_insn (&i1, &bb1, follow_fallthru, &did_fallthru); | |
1354 | if (did_fallthru) | |
1355 | dir = dir_backward; | |
1356 | ||
1357 | follow_fallthru = dir_p && dir != dir_backward; | |
1358 | walk_to_nondebug_insn (&i2, &bb2, follow_fallthru, &did_fallthru); | |
1359 | if (did_fallthru) | |
1360 | dir = dir_forward; | |
c2fc5456 R |
1361 | |
1362 | if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2)) | |
1363 | break; | |
1364 | ||
472c95f5 TV |
1365 | dir = merge_dir (dir, old_insns_match_p (0, i1, i2)); |
1366 | if (dir == dir_none || (!dir_p && dir != dir_both)) | |
c2fc5456 R |
1367 | break; |
1368 | ||
1369 | merge_memattrs (i1, i2); | |
1370 | ||
1371 | /* Don't begin a cross-jump with a NOTE insn. */ | |
1372 | if (INSN_P (i1)) | |
1373 | { | |
31ce8a53 | 1374 | merge_notes (i1, i2); |
c2fc5456 R |
1375 | |
1376 | afterlast1 = last1, afterlast2 = last2; | |
1377 | last1 = i1, last2 = i2; | |
472c95f5 TV |
1378 | afterlast_dir = last_dir; |
1379 | last_dir = dir; | |
a0cbe71e | 1380 | if (active_insn_p (i1)) |
2a562b0a | 1381 | ninsns++; |
c2fc5456 R |
1382 | } |
1383 | ||
1384 | i1 = PREV_INSN (i1); | |
1385 | i2 = PREV_INSN (i2); | |
1386 | } | |
1387 | ||
1388 | #ifdef HAVE_cc0 | |
1389 | /* Don't allow the insn after a compare to be shared by | |
1390 | cross-jumping unless the compare is also shared. */ | |
1391 | if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1)) | |
472c95f5 | 1392 | last1 = afterlast1, last2 = afterlast2, last_dir = afterlast_dir, ninsns--; |
c2fc5456 R |
1393 | #endif |
1394 | ||
1395 | /* Include preceding notes and labels in the cross-jump. One, | |
1396 | this may bring us to the head of the blocks as requested above. | |
1397 | Two, it keeps line number notes as matched as may be. */ | |
1398 | if (ninsns) | |
1399 | { | |
823918ae | 1400 | bb1 = BLOCK_FOR_INSN (last1); |
b5b8b0ac | 1401 | while (last1 != BB_HEAD (bb1) && !NONDEBUG_INSN_P (PREV_INSN (last1))) |
c2fc5456 R |
1402 | last1 = PREV_INSN (last1); |
1403 | ||
1404 | if (last1 != BB_HEAD (bb1) && LABEL_P (PREV_INSN (last1))) | |
1405 | last1 = PREV_INSN (last1); | |
1406 | ||
823918ae | 1407 | bb2 = BLOCK_FOR_INSN (last2); |
b5b8b0ac | 1408 | while (last2 != BB_HEAD (bb2) && !NONDEBUG_INSN_P (PREV_INSN (last2))) |
c2fc5456 R |
1409 | last2 = PREV_INSN (last2); |
1410 | ||
1411 | if (last2 != BB_HEAD (bb2) && LABEL_P (PREV_INSN (last2))) | |
1412 | last2 = PREV_INSN (last2); | |
1413 | ||
1414 | *f1 = last1; | |
1415 | *f2 = last2; | |
1416 | } | |
1417 | ||
472c95f5 TV |
1418 | if (dir_p) |
1419 | *dir_p = last_dir; | |
c2fc5456 R |
1420 | return ninsns; |
1421 | } | |
1422 | ||
31ce8a53 BS |
1423 | /* Like flow_find_cross_jump, except start looking for a matching sequence from |
1424 | the head of the two blocks. Do not include jumps at the end. | |
1425 | If STOP_AFTER is nonzero, stop after finding that many matching | |
b59e0455 JJ |
1426 | instructions. If STOP_AFTER is zero, count all INSN_P insns, if it is |
1427 | non-zero, only count active insns. */ | |
31ce8a53 BS |
1428 | |
1429 | int | |
1430 | flow_find_head_matching_sequence (basic_block bb1, basic_block bb2, rtx *f1, | |
1431 | rtx *f2, int stop_after) | |
1432 | { | |
1433 | rtx i1, i2, last1, last2, beforelast1, beforelast2; | |
1434 | int ninsns = 0; | |
1435 | edge e; | |
1436 | edge_iterator ei; | |
1437 | int nehedges1 = 0, nehedges2 = 0; | |
1438 | ||
1439 | FOR_EACH_EDGE (e, ei, bb1->succs) | |
1440 | if (e->flags & EDGE_EH) | |
1441 | nehedges1++; | |
1442 | FOR_EACH_EDGE (e, ei, bb2->succs) | |
1443 | if (e->flags & EDGE_EH) | |
1444 | nehedges2++; | |
1445 | ||
1446 | i1 = BB_HEAD (bb1); | |
1447 | i2 = BB_HEAD (bb2); | |
1448 | last1 = beforelast1 = last2 = beforelast2 = NULL_RTX; | |
1449 | ||
1450 | while (true) | |
1451 | { | |
4ec5d4f5 | 1452 | /* Ignore notes, except NOTE_INSN_EPILOGUE_BEG. */ |
31ce8a53 | 1453 | while (!NONDEBUG_INSN_P (i1) && i1 != BB_END (bb1)) |
4ec5d4f5 BS |
1454 | { |
1455 | if (NOTE_P (i1) && NOTE_KIND (i1) == NOTE_INSN_EPILOGUE_BEG) | |
1456 | break; | |
1457 | i1 = NEXT_INSN (i1); | |
1458 | } | |
31ce8a53 BS |
1459 | |
1460 | while (!NONDEBUG_INSN_P (i2) && i2 != BB_END (bb2)) | |
4ec5d4f5 BS |
1461 | { |
1462 | if (NOTE_P (i2) && NOTE_KIND (i2) == NOTE_INSN_EPILOGUE_BEG) | |
1463 | break; | |
1464 | i2 = NEXT_INSN (i2); | |
1465 | } | |
31ce8a53 | 1466 | |
662592e1 BS |
1467 | if ((i1 == BB_END (bb1) && !NONDEBUG_INSN_P (i1)) |
1468 | || (i2 == BB_END (bb2) && !NONDEBUG_INSN_P (i2))) | |
1469 | break; | |
1470 | ||
31ce8a53 BS |
1471 | if (NOTE_P (i1) || NOTE_P (i2) |
1472 | || JUMP_P (i1) || JUMP_P (i2)) | |
1473 | break; | |
1474 | ||
1475 | /* A sanity check to make sure we're not merging insns with different | |
1476 | effects on EH. If only one of them ends a basic block, it shouldn't | |
1477 | have an EH edge; if both end a basic block, there should be the same | |
1478 | number of EH edges. */ | |
1479 | if ((i1 == BB_END (bb1) && i2 != BB_END (bb2) | |
1480 | && nehedges1 > 0) | |
1481 | || (i2 == BB_END (bb2) && i1 != BB_END (bb1) | |
1482 | && nehedges2 > 0) | |
1483 | || (i1 == BB_END (bb1) && i2 == BB_END (bb2) | |
1484 | && nehedges1 != nehedges2)) | |
1485 | break; | |
1486 | ||
472c95f5 | 1487 | if (old_insns_match_p (0, i1, i2) != dir_both) |
31ce8a53 BS |
1488 | break; |
1489 | ||
1490 | merge_memattrs (i1, i2); | |
1491 | ||
1492 | /* Don't begin a cross-jump with a NOTE insn. */ | |
1493 | if (INSN_P (i1)) | |
1494 | { | |
1495 | merge_notes (i1, i2); | |
1496 | ||
1497 | beforelast1 = last1, beforelast2 = last2; | |
1498 | last1 = i1, last2 = i2; | |
b59e0455 | 1499 | if (!stop_after || active_insn_p (i1)) |
a0cbe71e | 1500 | ninsns++; |
31ce8a53 BS |
1501 | } |
1502 | ||
1503 | if (i1 == BB_END (bb1) || i2 == BB_END (bb2) | |
1504 | || (stop_after > 0 && ninsns == stop_after)) | |
1505 | break; | |
1506 | ||
1507 | i1 = NEXT_INSN (i1); | |
1508 | i2 = NEXT_INSN (i2); | |
1509 | } | |
1510 | ||
1511 | #ifdef HAVE_cc0 | |
1512 | /* Don't allow a compare to be shared by cross-jumping unless the insn | |
1513 | after the compare is also shared. */ | |
1514 | if (ninsns && reg_mentioned_p (cc0_rtx, last1) && sets_cc0_p (last1)) | |
1515 | last1 = beforelast1, last2 = beforelast2, ninsns--; | |
1516 | #endif | |
1517 | ||
1518 | if (ninsns) | |
1519 | { | |
1520 | *f1 = last1; | |
1521 | *f2 = last2; | |
1522 | } | |
1523 | ||
1524 | return ninsns; | |
1525 | } | |
1526 | ||
c2fc5456 R |
1527 | /* Return true iff outgoing edges of BB1 and BB2 match, together with |
1528 | the branch instruction. This means that if we commonize the control | |
1529 | flow before end of the basic block, the semantic remains unchanged. | |
402209ff JH |
1530 | |
1531 | We may assume that there exists one edge with a common destination. */ | |
1532 | ||
1533 | static bool | |
c2fc5456 | 1534 | outgoing_edges_match (int mode, basic_block bb1, basic_block bb2) |
402209ff | 1535 | { |
0dd0e980 JH |
1536 | int nehedges1 = 0, nehedges2 = 0; |
1537 | edge fallthru1 = 0, fallthru2 = 0; | |
1538 | edge e1, e2; | |
628f6a4e | 1539 | edge_iterator ei; |
0dd0e980 | 1540 | |
6626665f | 1541 | /* If we performed shrink-wrapping, edges to the exit block can |
484db665 BS |
1542 | only be distinguished for JUMP_INSNs. The two paths may differ in |
1543 | whether they went through the prologue. Sibcalls are fine, we know | |
1544 | that we either didn't need or inserted an epilogue before them. */ | |
1545 | if (crtl->shrink_wrapped | |
fefa31b5 DM |
1546 | && single_succ_p (bb1) |
1547 | && single_succ (bb1) == EXIT_BLOCK_PTR_FOR_FN (cfun) | |
484db665 BS |
1548 | && !JUMP_P (BB_END (bb1)) |
1549 | && !(CALL_P (BB_END (bb1)) && SIBLING_CALL_P (BB_END (bb1)))) | |
1550 | return false; | |
1551 | ||
c04cf67b RH |
1552 | /* If BB1 has only one successor, we may be looking at either an |
1553 | unconditional jump, or a fake edge to exit. */ | |
c5cbcccf ZD |
1554 | if (single_succ_p (bb1) |
1555 | && (single_succ_edge (bb1)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0 | |
4b4bf941 | 1556 | && (!JUMP_P (BB_END (bb1)) || simplejump_p (BB_END (bb1)))) |
c5cbcccf ZD |
1557 | return (single_succ_p (bb2) |
1558 | && (single_succ_edge (bb2)->flags | |
1559 | & (EDGE_COMPLEX | EDGE_FAKE)) == 0 | |
4b4bf941 | 1560 | && (!JUMP_P (BB_END (bb2)) || simplejump_p (BB_END (bb2)))); |
402209ff JH |
1561 | |
1562 | /* Match conditional jumps - this may get tricky when fallthru and branch | |
1563 | edges are crossed. */ | |
628f6a4e | 1564 | if (EDGE_COUNT (bb1->succs) == 2 |
a813c111 SB |
1565 | && any_condjump_p (BB_END (bb1)) |
1566 | && onlyjump_p (BB_END (bb1))) | |
402209ff | 1567 | { |
c2fc5456 R |
1568 | edge b1, f1, b2, f2; |
1569 | bool reverse, match; | |
1570 | rtx set1, set2, cond1, cond2; | |
1571 | enum rtx_code code1, code2; | |
1572 | ||
628f6a4e | 1573 | if (EDGE_COUNT (bb2->succs) != 2 |
a813c111 SB |
1574 | || !any_condjump_p (BB_END (bb2)) |
1575 | || !onlyjump_p (BB_END (bb2))) | |
0a2ed1f1 | 1576 | return false; |
c2fc5456 R |
1577 | |
1578 | b1 = BRANCH_EDGE (bb1); | |
1579 | b2 = BRANCH_EDGE (bb2); | |
1580 | f1 = FALLTHRU_EDGE (bb1); | |
1581 | f2 = FALLTHRU_EDGE (bb2); | |
1582 | ||
1583 | /* Get around possible forwarders on fallthru edges. Other cases | |
c22cacf3 | 1584 | should be optimized out already. */ |
c2fc5456 R |
1585 | if (FORWARDER_BLOCK_P (f1->dest)) |
1586 | f1 = single_succ_edge (f1->dest); | |
1587 | ||
1588 | if (FORWARDER_BLOCK_P (f2->dest)) | |
1589 | f2 = single_succ_edge (f2->dest); | |
1590 | ||
1591 | /* To simplify use of this function, return false if there are | |
1592 | unneeded forwarder blocks. These will get eliminated later | |
1593 | during cleanup_cfg. */ | |
1594 | if (FORWARDER_BLOCK_P (f1->dest) | |
1595 | || FORWARDER_BLOCK_P (f2->dest) | |
1596 | || FORWARDER_BLOCK_P (b1->dest) | |
1597 | || FORWARDER_BLOCK_P (b2->dest)) | |
1598 | return false; | |
1599 | ||
1600 | if (f1->dest == f2->dest && b1->dest == b2->dest) | |
1601 | reverse = false; | |
1602 | else if (f1->dest == b2->dest && b1->dest == f2->dest) | |
1603 | reverse = true; | |
1604 | else | |
1605 | return false; | |
1606 | ||
1607 | set1 = pc_set (BB_END (bb1)); | |
1608 | set2 = pc_set (BB_END (bb2)); | |
1609 | if ((XEXP (SET_SRC (set1), 1) == pc_rtx) | |
1610 | != (XEXP (SET_SRC (set2), 1) == pc_rtx)) | |
1611 | reverse = !reverse; | |
1612 | ||
1613 | cond1 = XEXP (SET_SRC (set1), 0); | |
1614 | cond2 = XEXP (SET_SRC (set2), 0); | |
1615 | code1 = GET_CODE (cond1); | |
1616 | if (reverse) | |
1617 | code2 = reversed_comparison_code (cond2, BB_END (bb2)); | |
1618 | else | |
1619 | code2 = GET_CODE (cond2); | |
1620 | ||
1621 | if (code2 == UNKNOWN) | |
1622 | return false; | |
1623 | ||
1624 | /* Verify codes and operands match. */ | |
1625 | match = ((code1 == code2 | |
1626 | && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0)) | |
1627 | && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1))) | |
1628 | || (code1 == swap_condition (code2) | |
1629 | && rtx_renumbered_equal_p (XEXP (cond1, 1), | |
1630 | XEXP (cond2, 0)) | |
1631 | && rtx_renumbered_equal_p (XEXP (cond1, 0), | |
1632 | XEXP (cond2, 1)))); | |
1633 | ||
1634 | /* If we return true, we will join the blocks. Which means that | |
1635 | we will only have one branch prediction bit to work with. Thus | |
1636 | we require the existing branches to have probabilities that are | |
1637 | roughly similar. */ | |
1638 | if (match | |
efd8f750 JH |
1639 | && optimize_bb_for_speed_p (bb1) |
1640 | && optimize_bb_for_speed_p (bb2)) | |
c2fc5456 R |
1641 | { |
1642 | int prob2; | |
1643 | ||
1644 | if (b1->dest == b2->dest) | |
1645 | prob2 = b2->probability; | |
1646 | else | |
1647 | /* Do not use f2 probability as f2 may be forwarded. */ | |
1648 | prob2 = REG_BR_PROB_BASE - b2->probability; | |
1649 | ||
1650 | /* Fail if the difference in probabilities is greater than 50%. | |
1651 | This rules out two well-predicted branches with opposite | |
1652 | outcomes. */ | |
1653 | if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2) | |
1654 | { | |
1655 | if (dump_file) | |
1656 | fprintf (dump_file, | |
1657 | "Outcomes of branch in bb %i and %i differ too much (%i %i)\n", | |
1658 | bb1->index, bb2->index, b1->probability, prob2); | |
1659 | ||
1660 | return false; | |
1661 | } | |
1662 | } | |
1663 | ||
1664 | if (dump_file && match) | |
1665 | fprintf (dump_file, "Conditionals in bb %i and %i match.\n", | |
1666 | bb1->index, bb2->index); | |
1667 | ||
1668 | return match; | |
402209ff JH |
1669 | } |
1670 | ||
09da1532 | 1671 | /* Generic case - we are seeing a computed jump, table jump or trapping |
0dd0e980 JH |
1672 | instruction. */ |
1673 | ||
39811184 JZ |
1674 | /* Check whether there are tablejumps in the end of BB1 and BB2. |
1675 | Return true if they are identical. */ | |
1676 | { | |
1677 | rtx label1, label2; | |
1678 | rtx table1, table2; | |
1679 | ||
a813c111 SB |
1680 | if (tablejump_p (BB_END (bb1), &label1, &table1) |
1681 | && tablejump_p (BB_END (bb2), &label2, &table2) | |
39811184 JZ |
1682 | && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2))) |
1683 | { | |
1684 | /* The labels should never be the same rtx. If they really are same | |
1685 | the jump tables are same too. So disable crossjumping of blocks BB1 | |
1686 | and BB2 because when deleting the common insns in the end of BB1 | |
6de9cd9a | 1687 | by delete_basic_block () the jump table would be deleted too. */ |
4af16369 | 1688 | /* If LABEL2 is referenced in BB1->END do not do anything |
39811184 JZ |
1689 | because we would loose information when replacing |
1690 | LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */ | |
a813c111 | 1691 | if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1))) |
39811184 JZ |
1692 | { |
1693 | /* Set IDENTICAL to true when the tables are identical. */ | |
1694 | bool identical = false; | |
1695 | rtx p1, p2; | |
1696 | ||
1697 | p1 = PATTERN (table1); | |
1698 | p2 = PATTERN (table2); | |
1699 | if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2)) | |
1700 | { | |
1701 | identical = true; | |
1702 | } | |
1703 | else if (GET_CODE (p1) == ADDR_DIFF_VEC | |
1704 | && (XVECLEN (p1, 1) == XVECLEN (p2, 1)) | |
1705 | && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2)) | |
1706 | && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3))) | |
1707 | { | |
1708 | int i; | |
1709 | ||
1710 | identical = true; | |
1711 | for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--) | |
1712 | if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i))) | |
1713 | identical = false; | |
1714 | } | |
1715 | ||
c2fc5456 | 1716 | if (identical) |
39811184 | 1717 | { |
c2fc5456 | 1718 | replace_label_data rr; |
39811184 JZ |
1719 | bool match; |
1720 | ||
c2fc5456 | 1721 | /* Temporarily replace references to LABEL1 with LABEL2 |
39811184 | 1722 | in BB1->END so that we could compare the instructions. */ |
c2fc5456 R |
1723 | rr.r1 = label1; |
1724 | rr.r2 = label2; | |
1725 | rr.update_label_nuses = false; | |
1726 | for_each_rtx (&BB_END (bb1), replace_label, &rr); | |
39811184 | 1727 | |
472c95f5 TV |
1728 | match = (old_insns_match_p (mode, BB_END (bb1), BB_END (bb2)) |
1729 | == dir_both); | |
c263766c RH |
1730 | if (dump_file && match) |
1731 | fprintf (dump_file, | |
39811184 JZ |
1732 | "Tablejumps in bb %i and %i match.\n", |
1733 | bb1->index, bb2->index); | |
1734 | ||
c2fc5456 R |
1735 | /* Set the original label in BB1->END because when deleting |
1736 | a block whose end is a tablejump, the tablejump referenced | |
1737 | from the instruction is deleted too. */ | |
1738 | rr.r1 = label2; | |
1739 | rr.r2 = label1; | |
1740 | for_each_rtx (&BB_END (bb1), replace_label, &rr); | |
1741 | ||
39811184 JZ |
1742 | return match; |
1743 | } | |
1744 | } | |
1745 | return false; | |
1746 | } | |
1747 | } | |
39811184 | 1748 | |
d41d6122 TJ |
1749 | /* Find the last non-debug non-note instruction in each bb, except |
1750 | stop when we see the NOTE_INSN_BASIC_BLOCK, as old_insns_match_p | |
1751 | handles that case specially. old_insns_match_p does not handle | |
1752 | other types of instruction notes. */ | |
206604dc JJ |
1753 | rtx last1 = BB_END (bb1); |
1754 | rtx last2 = BB_END (bb2); | |
d41d6122 TJ |
1755 | while (!NOTE_INSN_BASIC_BLOCK_P (last1) && |
1756 | (DEBUG_INSN_P (last1) || NOTE_P (last1))) | |
1757 | last1 = PREV_INSN (last1); | |
1758 | while (!NOTE_INSN_BASIC_BLOCK_P (last2) && | |
1759 | (DEBUG_INSN_P (last2) || NOTE_P (last2))) | |
1760 | last2 = PREV_INSN (last2); | |
1761 | gcc_assert (last1 && last2); | |
1762 | ||
0dd0e980 | 1763 | /* First ensure that the instructions match. There may be many outgoing |
39811184 | 1764 | edges so this test is generally cheaper. */ |
206604dc | 1765 | if (old_insns_match_p (mode, last1, last2) != dir_both) |
0dd0e980 JH |
1766 | return false; |
1767 | ||
1768 | /* Search the outgoing edges, ensure that the counts do match, find possible | |
1769 | fallthru and exception handling edges since these needs more | |
1770 | validation. */ | |
628f6a4e BE |
1771 | if (EDGE_COUNT (bb1->succs) != EDGE_COUNT (bb2->succs)) |
1772 | return false; | |
1773 | ||
206604dc | 1774 | bool nonfakeedges = false; |
628f6a4e | 1775 | FOR_EACH_EDGE (e1, ei, bb1->succs) |
0dd0e980 | 1776 | { |
628f6a4e | 1777 | e2 = EDGE_SUCC (bb2, ei.index); |
c22cacf3 | 1778 | |
206604dc JJ |
1779 | if ((e1->flags & EDGE_FAKE) == 0) |
1780 | nonfakeedges = true; | |
1781 | ||
0dd0e980 JH |
1782 | if (e1->flags & EDGE_EH) |
1783 | nehedges1++; | |
5f0d2358 | 1784 | |
0dd0e980 JH |
1785 | if (e2->flags & EDGE_EH) |
1786 | nehedges2++; | |
5f0d2358 | 1787 | |
0dd0e980 JH |
1788 | if (e1->flags & EDGE_FALLTHRU) |
1789 | fallthru1 = e1; | |
1790 | if (e2->flags & EDGE_FALLTHRU) | |
1791 | fallthru2 = e2; | |
1792 | } | |
5f0d2358 | 1793 | |
0dd0e980 | 1794 | /* If number of edges of various types does not match, fail. */ |
628f6a4e | 1795 | if (nehedges1 != nehedges2 |
5f0d2358 | 1796 | || (fallthru1 != 0) != (fallthru2 != 0)) |
0dd0e980 JH |
1797 | return false; |
1798 | ||
206604dc JJ |
1799 | /* If !ACCUMULATE_OUTGOING_ARGS, bb1 (and bb2) have no successors |
1800 | and the last real insn doesn't have REG_ARGS_SIZE note, don't | |
1801 | attempt to optimize, as the two basic blocks might have different | |
1802 | REG_ARGS_SIZE depths. For noreturn calls and unconditional | |
1803 | traps there should be REG_ARG_SIZE notes, they could be missing | |
1804 | for __builtin_unreachable () uses though. */ | |
1805 | if (!nonfakeedges | |
1806 | && !ACCUMULATE_OUTGOING_ARGS | |
1807 | && (!INSN_P (last1) | |
1808 | || !find_reg_note (last1, REG_ARGS_SIZE, NULL))) | |
1809 | return false; | |
1810 | ||
0dd0e980 JH |
1811 | /* fallthru edges must be forwarded to the same destination. */ |
1812 | if (fallthru1) | |
1813 | { | |
1814 | basic_block d1 = (forwarder_block_p (fallthru1->dest) | |
c5cbcccf | 1815 | ? single_succ (fallthru1->dest): fallthru1->dest); |
0dd0e980 | 1816 | basic_block d2 = (forwarder_block_p (fallthru2->dest) |
c5cbcccf | 1817 | ? single_succ (fallthru2->dest): fallthru2->dest); |
5f0d2358 | 1818 | |
0dd0e980 JH |
1819 | if (d1 != d2) |
1820 | return false; | |
1821 | } | |
5f0d2358 | 1822 | |
5f77fbd4 JJ |
1823 | /* Ensure the same EH region. */ |
1824 | { | |
a813c111 SB |
1825 | rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0); |
1826 | rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0); | |
5f0d2358 | 1827 | |
5f77fbd4 JJ |
1828 | if (!n1 && n2) |
1829 | return false; | |
1830 | ||
1831 | if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0))) | |
1832 | return false; | |
1833 | } | |
5f0d2358 | 1834 | |
38109dab GL |
1835 | /* The same checks as in try_crossjump_to_edge. It is required for RTL |
1836 | version of sequence abstraction. */ | |
1837 | FOR_EACH_EDGE (e1, ei, bb2->succs) | |
1838 | { | |
1839 | edge e2; | |
1840 | edge_iterator ei; | |
1841 | basic_block d1 = e1->dest; | |
1842 | ||
1843 | if (FORWARDER_BLOCK_P (d1)) | |
1844 | d1 = EDGE_SUCC (d1, 0)->dest; | |
1845 | ||
1846 | FOR_EACH_EDGE (e2, ei, bb1->succs) | |
1847 | { | |
1848 | basic_block d2 = e2->dest; | |
1849 | if (FORWARDER_BLOCK_P (d2)) | |
1850 | d2 = EDGE_SUCC (d2, 0)->dest; | |
1851 | if (d1 == d2) | |
1852 | break; | |
1853 | } | |
1854 | ||
1855 | if (!e2) | |
1856 | return false; | |
1857 | } | |
1858 | ||
0dd0e980 | 1859 | return true; |
402209ff JH |
1860 | } |
1861 | ||
38109dab GL |
1862 | /* Returns true if BB basic block has a preserve label. */ |
1863 | ||
1864 | static bool | |
1865 | block_has_preserve_label (basic_block bb) | |
1866 | { | |
1867 | return (bb | |
1868 | && block_label (bb) | |
1869 | && LABEL_PRESERVE_P (block_label (bb))); | |
1870 | } | |
1871 | ||
402209ff JH |
1872 | /* E1 and E2 are edges with the same destination block. Search their |
1873 | predecessors for common code. If found, redirect control flow from | |
bf22920b TV |
1874 | (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC (dir_forward), |
1875 | or the other way around (dir_backward). DIR specifies the allowed | |
1876 | replacement direction. */ | |
402209ff JH |
1877 | |
1878 | static bool | |
bf22920b TV |
1879 | try_crossjump_to_edge (int mode, edge e1, edge e2, |
1880 | enum replace_direction dir) | |
402209ff | 1881 | { |
c2fc5456 | 1882 | int nmatch; |
402209ff | 1883 | basic_block src1 = e1->src, src2 = e2->src; |
39587bb9 | 1884 | basic_block redirect_to, redirect_from, to_remove; |
823918ae | 1885 | basic_block osrc1, osrc2, redirect_edges_to, tmp; |
c2fc5456 | 1886 | rtx newpos1, newpos2; |
402209ff | 1887 | edge s; |
628f6a4e | 1888 | edge_iterator ei; |
c2fc5456 R |
1889 | |
1890 | newpos1 = newpos2 = NULL_RTX; | |
6de9cd9a | 1891 | |
750054a2 | 1892 | /* If we have partitioned hot/cold basic blocks, it is a bad idea |
c22cacf3 | 1893 | to try this optimization. |
8e8d5162 CT |
1894 | |
1895 | Basic block partitioning may result in some jumps that appear to | |
c22cacf3 MS |
1896 | be optimizable (or blocks that appear to be mergeable), but which really |
1897 | must be left untouched (they are required to make it safely across | |
1898 | partition boundaries). See the comments at the top of | |
8e8d5162 | 1899 | bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */ |
750054a2 | 1900 | |
af205f67 | 1901 | if (crtl->has_bb_partition && reload_completed) |
750054a2 CT |
1902 | return false; |
1903 | ||
402209ff JH |
1904 | /* Search backward through forwarder blocks. We don't need to worry |
1905 | about multiple entry or chained forwarders, as they will be optimized | |
1906 | away. We do this to look past the unconditional jump following a | |
1907 | conditional jump that is required due to the current CFG shape. */ | |
c5cbcccf | 1908 | if (single_pred_p (src1) |
635559ab | 1909 | && FORWARDER_BLOCK_P (src1)) |
c5cbcccf | 1910 | e1 = single_pred_edge (src1), src1 = e1->src; |
5f0d2358 | 1911 | |
c5cbcccf | 1912 | if (single_pred_p (src2) |
635559ab | 1913 | && FORWARDER_BLOCK_P (src2)) |
c5cbcccf | 1914 | e2 = single_pred_edge (src2), src2 = e2->src; |
402209ff JH |
1915 | |
1916 | /* Nothing to do if we reach ENTRY, or a common source block. */ | |
fefa31b5 DM |
1917 | if (src1 == ENTRY_BLOCK_PTR_FOR_FN (cfun) || src2 |
1918 | == ENTRY_BLOCK_PTR_FOR_FN (cfun)) | |
402209ff JH |
1919 | return false; |
1920 | if (src1 == src2) | |
1921 | return false; | |
1922 | ||
1923 | /* Seeing more than 1 forwarder blocks would confuse us later... */ | |
635559ab | 1924 | if (FORWARDER_BLOCK_P (e1->dest) |
c5cbcccf | 1925 | && FORWARDER_BLOCK_P (single_succ (e1->dest))) |
402209ff | 1926 | return false; |
5f0d2358 | 1927 | |
635559ab | 1928 | if (FORWARDER_BLOCK_P (e2->dest) |
c5cbcccf | 1929 | && FORWARDER_BLOCK_P (single_succ (e2->dest))) |
402209ff JH |
1930 | return false; |
1931 | ||
1932 | /* Likewise with dead code (possibly newly created by the other optimizations | |
1933 | of cfg_cleanup). */ | |
628f6a4e | 1934 | if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0) |
402209ff JH |
1935 | return false; |
1936 | ||
402209ff | 1937 | /* Look for the common insn sequence, part the first ... */ |
c2fc5456 | 1938 | if (!outgoing_edges_match (mode, src1, src2)) |
402209ff JH |
1939 | return false; |
1940 | ||
1941 | /* ... and part the second. */ | |
472c95f5 | 1942 | nmatch = flow_find_cross_jump (src1, src2, &newpos1, &newpos2, &dir); |
12183e0f | 1943 | |
823918ae TV |
1944 | osrc1 = src1; |
1945 | osrc2 = src2; | |
1946 | if (newpos1 != NULL_RTX) | |
1947 | src1 = BLOCK_FOR_INSN (newpos1); | |
1948 | if (newpos2 != NULL_RTX) | |
1949 | src2 = BLOCK_FOR_INSN (newpos2); | |
1950 | ||
bf22920b TV |
1951 | if (dir == dir_backward) |
1952 | { | |
1953 | #define SWAP(T, X, Y) do { T tmp = (X); (X) = (Y); (Y) = tmp; } while (0) | |
1954 | SWAP (basic_block, osrc1, osrc2); | |
1955 | SWAP (basic_block, src1, src2); | |
1956 | SWAP (edge, e1, e2); | |
1957 | SWAP (rtx, newpos1, newpos2); | |
1958 | #undef SWAP | |
1959 | } | |
1960 | ||
12183e0f PH |
1961 | /* Don't proceed with the crossjump unless we found a sufficient number |
1962 | of matching instructions or the 'from' block was totally matched | |
1963 | (such that its predecessors will hopefully be redirected and the | |
1964 | block removed). */ | |
c2fc5456 R |
1965 | if ((nmatch < PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS)) |
1966 | && (newpos1 != BB_HEAD (src1))) | |
7d22e898 | 1967 | return false; |
402209ff | 1968 | |
75c40d56 | 1969 | /* Avoid deleting preserve label when redirecting ABNORMAL edges. */ |
38109dab GL |
1970 | if (block_has_preserve_label (e1->dest) |
1971 | && (e1->flags & EDGE_ABNORMAL)) | |
1972 | return false; | |
1973 | ||
39811184 JZ |
1974 | /* Here we know that the insns in the end of SRC1 which are common with SRC2 |
1975 | will be deleted. | |
1976 | If we have tablejumps in the end of SRC1 and SRC2 | |
1977 | they have been already compared for equivalence in outgoing_edges_match () | |
1978 | so replace the references to TABLE1 by references to TABLE2. */ | |
1979 | { | |
1980 | rtx label1, label2; | |
1981 | rtx table1, table2; | |
1982 | ||
823918ae TV |
1983 | if (tablejump_p (BB_END (osrc1), &label1, &table1) |
1984 | && tablejump_p (BB_END (osrc2), &label2, &table2) | |
39811184 JZ |
1985 | && label1 != label2) |
1986 | { | |
4af16369 | 1987 | replace_label_data rr; |
39811184 JZ |
1988 | rtx insn; |
1989 | ||
1990 | /* Replace references to LABEL1 with LABEL2. */ | |
1991 | rr.r1 = label1; | |
1992 | rr.r2 = label2; | |
4af16369 | 1993 | rr.update_label_nuses = true; |
39811184 JZ |
1994 | for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) |
1995 | { | |
1996 | /* Do not replace the label in SRC1->END because when deleting | |
1997 | a block whose end is a tablejump, the tablejump referenced | |
1998 | from the instruction is deleted too. */ | |
823918ae | 1999 | if (insn != BB_END (osrc1)) |
39811184 JZ |
2000 | for_each_rtx (&insn, replace_label, &rr); |
2001 | } | |
2002 | } | |
2003 | } | |
10d6c0d0 | 2004 | |
b604fe9b SB |
2005 | /* Avoid splitting if possible. We must always split when SRC2 has |
2006 | EH predecessor edges, or we may end up with basic blocks with both | |
2007 | normal and EH predecessor edges. */ | |
c2fc5456 | 2008 | if (newpos2 == BB_HEAD (src2) |
b604fe9b | 2009 | && !(EDGE_PRED (src2, 0)->flags & EDGE_EH)) |
402209ff JH |
2010 | redirect_to = src2; |
2011 | else | |
2012 | { | |
c2fc5456 | 2013 | if (newpos2 == BB_HEAD (src2)) |
b604fe9b SB |
2014 | { |
2015 | /* Skip possible basic block header. */ | |
c2fc5456 R |
2016 | if (LABEL_P (newpos2)) |
2017 | newpos2 = NEXT_INSN (newpos2); | |
b5b8b0ac AO |
2018 | while (DEBUG_INSN_P (newpos2)) |
2019 | newpos2 = NEXT_INSN (newpos2); | |
c2fc5456 R |
2020 | if (NOTE_P (newpos2)) |
2021 | newpos2 = NEXT_INSN (newpos2); | |
b5b8b0ac AO |
2022 | while (DEBUG_INSN_P (newpos2)) |
2023 | newpos2 = NEXT_INSN (newpos2); | |
b604fe9b SB |
2024 | } |
2025 | ||
c263766c RH |
2026 | if (dump_file) |
2027 | fprintf (dump_file, "Splitting bb %i before %i insns\n", | |
0b17ab2f | 2028 | src2->index, nmatch); |
c2fc5456 | 2029 | redirect_to = split_block (src2, PREV_INSN (newpos2))->dest; |
402209ff JH |
2030 | } |
2031 | ||
c263766c | 2032 | if (dump_file) |
c2fc5456 R |
2033 | fprintf (dump_file, |
2034 | "Cross jumping from bb %i to bb %i; %i common insns\n", | |
2035 | src1->index, src2->index, nmatch); | |
402209ff | 2036 | |
6fc0bb99 | 2037 | /* We may have some registers visible through the block. */ |
6fb5fa3c | 2038 | df_set_bb_dirty (redirect_to); |
402209ff | 2039 | |
823918ae TV |
2040 | if (osrc2 == src2) |
2041 | redirect_edges_to = redirect_to; | |
2042 | else | |
2043 | redirect_edges_to = osrc2; | |
2044 | ||
402209ff | 2045 | /* Recompute the frequencies and counts of outgoing edges. */ |
823918ae | 2046 | FOR_EACH_EDGE (s, ei, redirect_edges_to->succs) |
402209ff JH |
2047 | { |
2048 | edge s2; | |
628f6a4e | 2049 | edge_iterator ei; |
402209ff JH |
2050 | basic_block d = s->dest; |
2051 | ||
635559ab | 2052 | if (FORWARDER_BLOCK_P (d)) |
c5cbcccf | 2053 | d = single_succ (d); |
5f0d2358 | 2054 | |
628f6a4e | 2055 | FOR_EACH_EDGE (s2, ei, src1->succs) |
402209ff JH |
2056 | { |
2057 | basic_block d2 = s2->dest; | |
635559ab | 2058 | if (FORWARDER_BLOCK_P (d2)) |
c5cbcccf | 2059 | d2 = single_succ (d2); |
402209ff JH |
2060 | if (d == d2) |
2061 | break; | |
2062 | } | |
5f0d2358 | 2063 | |
402209ff JH |
2064 | s->count += s2->count; |
2065 | ||
2066 | /* Take care to update possible forwarder blocks. We verified | |
c22cacf3 MS |
2067 | that there is no more than one in the chain, so we can't run |
2068 | into infinite loop. */ | |
635559ab | 2069 | if (FORWARDER_BLOCK_P (s->dest)) |
402209ff | 2070 | { |
c5cbcccf | 2071 | single_succ_edge (s->dest)->count += s2->count; |
402209ff JH |
2072 | s->dest->count += s2->count; |
2073 | s->dest->frequency += EDGE_FREQUENCY (s); | |
2074 | } | |
5f0d2358 | 2075 | |
635559ab | 2076 | if (FORWARDER_BLOCK_P (s2->dest)) |
402209ff | 2077 | { |
c5cbcccf ZD |
2078 | single_succ_edge (s2->dest)->count -= s2->count; |
2079 | if (single_succ_edge (s2->dest)->count < 0) | |
2080 | single_succ_edge (s2->dest)->count = 0; | |
402209ff JH |
2081 | s2->dest->count -= s2->count; |
2082 | s2->dest->frequency -= EDGE_FREQUENCY (s); | |
b446e5a2 JH |
2083 | if (s2->dest->frequency < 0) |
2084 | s2->dest->frequency = 0; | |
2085 | if (s2->dest->count < 0) | |
2086 | s2->dest->count = 0; | |
402209ff | 2087 | } |
5f0d2358 | 2088 | |
823918ae | 2089 | if (!redirect_edges_to->frequency && !src1->frequency) |
402209ff JH |
2090 | s->probability = (s->probability + s2->probability) / 2; |
2091 | else | |
5f0d2358 | 2092 | s->probability |
823918ae | 2093 | = ((s->probability * redirect_edges_to->frequency + |
5f0d2358 | 2094 | s2->probability * src1->frequency) |
823918ae | 2095 | / (redirect_edges_to->frequency + src1->frequency)); |
402209ff JH |
2096 | } |
2097 | ||
52982a97 EB |
2098 | /* Adjust count and frequency for the block. An earlier jump |
2099 | threading pass may have left the profile in an inconsistent | |
2100 | state (see update_bb_profile_for_threading) so we must be | |
2101 | prepared for overflows. */ | |
823918ae TV |
2102 | tmp = redirect_to; |
2103 | do | |
2104 | { | |
2105 | tmp->count += src1->count; | |
2106 | tmp->frequency += src1->frequency; | |
2107 | if (tmp->frequency > BB_FREQ_MAX) | |
2108 | tmp->frequency = BB_FREQ_MAX; | |
2109 | if (tmp == redirect_edges_to) | |
2110 | break; | |
2111 | tmp = find_fallthru_edge (tmp->succs)->dest; | |
2112 | } | |
2113 | while (true); | |
2114 | update_br_prob_note (redirect_edges_to); | |
402209ff JH |
2115 | |
2116 | /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */ | |
2117 | ||
c2fc5456 R |
2118 | /* Skip possible basic block header. */ |
2119 | if (LABEL_P (newpos1)) | |
2120 | newpos1 = NEXT_INSN (newpos1); | |
b5b8b0ac AO |
2121 | |
2122 | while (DEBUG_INSN_P (newpos1)) | |
2123 | newpos1 = NEXT_INSN (newpos1); | |
2124 | ||
cd9c1ca8 | 2125 | if (NOTE_INSN_BASIC_BLOCK_P (newpos1)) |
c2fc5456 R |
2126 | newpos1 = NEXT_INSN (newpos1); |
2127 | ||
b5b8b0ac AO |
2128 | while (DEBUG_INSN_P (newpos1)) |
2129 | newpos1 = NEXT_INSN (newpos1); | |
2130 | ||
c2fc5456 | 2131 | redirect_from = split_block (src1, PREV_INSN (newpos1))->src; |
c5cbcccf | 2132 | to_remove = single_succ (redirect_from); |
402209ff | 2133 | |
c5cbcccf | 2134 | redirect_edge_and_branch_force (single_succ_edge (redirect_from), redirect_to); |
f470c378 | 2135 | delete_basic_block (to_remove); |
402209ff | 2136 | |
39587bb9 | 2137 | update_forwarder_flag (redirect_from); |
7cbd12b8 JJ |
2138 | if (redirect_to != src2) |
2139 | update_forwarder_flag (src2); | |
635559ab | 2140 | |
402209ff JH |
2141 | return true; |
2142 | } | |
2143 | ||
2144 | /* Search the predecessors of BB for common insn sequences. When found, | |
2145 | share code between them by redirecting control flow. Return true if | |
2146 | any changes made. */ | |
2147 | ||
2148 | static bool | |
d329e058 | 2149 | try_crossjump_bb (int mode, basic_block bb) |
402209ff | 2150 | { |
628f6a4e | 2151 | edge e, e2, fallthru; |
402209ff | 2152 | bool changed; |
628f6a4e | 2153 | unsigned max, ix, ix2; |
402209ff | 2154 | |
f63d1bf7 | 2155 | /* Nothing to do if there is not at least two incoming edges. */ |
628f6a4e | 2156 | if (EDGE_COUNT (bb->preds) < 2) |
402209ff JH |
2157 | return false; |
2158 | ||
bbcb0c05 SB |
2159 | /* Don't crossjump if this block ends in a computed jump, |
2160 | unless we are optimizing for size. */ | |
efd8f750 | 2161 | if (optimize_bb_for_size_p (bb) |
fefa31b5 | 2162 | && bb != EXIT_BLOCK_PTR_FOR_FN (cfun) |
bbcb0c05 SB |
2163 | && computed_jump_p (BB_END (bb))) |
2164 | return false; | |
2165 | ||
750054a2 CT |
2166 | /* If we are partitioning hot/cold basic blocks, we don't want to |
2167 | mess up unconditional or indirect jumps that cross between hot | |
c22cacf3 MS |
2168 | and cold sections. |
2169 | ||
8e8d5162 | 2170 | Basic block partitioning may result in some jumps that appear to |
c22cacf3 MS |
2171 | be optimizable (or blocks that appear to be mergeable), but which really |
2172 | must be left untouched (they are required to make it safely across | |
2173 | partition boundaries). See the comments at the top of | |
8e8d5162 CT |
2174 | bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */ |
2175 | ||
c22cacf3 MS |
2176 | if (BB_PARTITION (EDGE_PRED (bb, 0)->src) != |
2177 | BB_PARTITION (EDGE_PRED (bb, 1)->src) | |
87c8b4be | 2178 | || (EDGE_PRED (bb, 0)->flags & EDGE_CROSSING)) |
750054a2 CT |
2179 | return false; |
2180 | ||
402209ff JH |
2181 | /* It is always cheapest to redirect a block that ends in a branch to |
2182 | a block that falls through into BB, as that adds no branches to the | |
2183 | program. We'll try that combination first. */ | |
5f24e0dc RH |
2184 | fallthru = NULL; |
2185 | max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES); | |
628f6a4e BE |
2186 | |
2187 | if (EDGE_COUNT (bb->preds) > max) | |
2188 | return false; | |
2189 | ||
0fd4b31d | 2190 | fallthru = find_fallthru_edge (bb->preds); |
402209ff JH |
2191 | |
2192 | changed = false; | |
0248bceb | 2193 | for (ix = 0; ix < EDGE_COUNT (bb->preds);) |
402209ff | 2194 | { |
0248bceb | 2195 | e = EDGE_PRED (bb, ix); |
628f6a4e | 2196 | ix++; |
402209ff | 2197 | |
c1e3e2d9 SB |
2198 | /* As noted above, first try with the fallthru predecessor (or, a |
2199 | fallthru predecessor if we are in cfglayout mode). */ | |
402209ff JH |
2200 | if (fallthru) |
2201 | { | |
2202 | /* Don't combine the fallthru edge into anything else. | |
2203 | If there is a match, we'll do it the other way around. */ | |
2204 | if (e == fallthru) | |
2205 | continue; | |
7cf240d5 JH |
2206 | /* If nothing changed since the last attempt, there is nothing |
2207 | we can do. */ | |
2208 | if (!first_pass | |
4ec5d4f5 BS |
2209 | && !((e->src->flags & BB_MODIFIED) |
2210 | || (fallthru->src->flags & BB_MODIFIED))) | |
7cf240d5 | 2211 | continue; |
402209ff | 2212 | |
bf22920b | 2213 | if (try_crossjump_to_edge (mode, e, fallthru, dir_forward)) |
402209ff JH |
2214 | { |
2215 | changed = true; | |
628f6a4e | 2216 | ix = 0; |
402209ff JH |
2217 | continue; |
2218 | } | |
2219 | } | |
2220 | ||
2221 | /* Non-obvious work limiting check: Recognize that we're going | |
2222 | to call try_crossjump_bb on every basic block. So if we have | |
2223 | two blocks with lots of outgoing edges (a switch) and they | |
2224 | share lots of common destinations, then we would do the | |
2225 | cross-jump check once for each common destination. | |
2226 | ||
2227 | Now, if the blocks actually are cross-jump candidates, then | |
2228 | all of their destinations will be shared. Which means that | |
2229 | we only need check them for cross-jump candidacy once. We | |
2230 | can eliminate redundant checks of crossjump(A,B) by arbitrarily | |
2231 | choosing to do the check from the block for which the edge | |
2232 | in question is the first successor of A. */ | |
628f6a4e | 2233 | if (EDGE_SUCC (e->src, 0) != e) |
402209ff JH |
2234 | continue; |
2235 | ||
0248bceb | 2236 | for (ix2 = 0; ix2 < EDGE_COUNT (bb->preds); ix2++) |
402209ff | 2237 | { |
0248bceb | 2238 | e2 = EDGE_PRED (bb, ix2); |
402209ff JH |
2239 | |
2240 | if (e2 == e) | |
2241 | continue; | |
2242 | ||
2243 | /* We've already checked the fallthru edge above. */ | |
2244 | if (e2 == fallthru) | |
2245 | continue; | |
2246 | ||
402209ff JH |
2247 | /* The "first successor" check above only prevents multiple |
2248 | checks of crossjump(A,B). In order to prevent redundant | |
2249 | checks of crossjump(B,A), require that A be the block | |
2250 | with the lowest index. */ | |
0b17ab2f | 2251 | if (e->src->index > e2->src->index) |
402209ff JH |
2252 | continue; |
2253 | ||
7cf240d5 JH |
2254 | /* If nothing changed since the last attempt, there is nothing |
2255 | we can do. */ | |
2256 | if (!first_pass | |
4ec5d4f5 BS |
2257 | && !((e->src->flags & BB_MODIFIED) |
2258 | || (e2->src->flags & BB_MODIFIED))) | |
7cf240d5 JH |
2259 | continue; |
2260 | ||
bf22920b TV |
2261 | /* Both e and e2 are not fallthru edges, so we can crossjump in either |
2262 | direction. */ | |
2263 | if (try_crossjump_to_edge (mode, e, e2, dir_both)) | |
402209ff JH |
2264 | { |
2265 | changed = true; | |
628f6a4e | 2266 | ix = 0; |
402209ff JH |
2267 | break; |
2268 | } | |
2269 | } | |
2270 | } | |
2271 | ||
c1e3e2d9 SB |
2272 | if (changed) |
2273 | crossjumps_occured = true; | |
2274 | ||
402209ff JH |
2275 | return changed; |
2276 | } | |
2277 | ||
4ec5d4f5 BS |
2278 | /* Search the successors of BB for common insn sequences. When found, |
2279 | share code between them by moving it across the basic block | |
2280 | boundary. Return true if any changes made. */ | |
2281 | ||
2282 | static bool | |
2283 | try_head_merge_bb (basic_block bb) | |
2284 | { | |
2285 | basic_block final_dest_bb = NULL; | |
2286 | int max_match = INT_MAX; | |
2287 | edge e0; | |
2288 | rtx *headptr, *currptr, *nextptr; | |
2289 | bool changed, moveall; | |
2290 | unsigned ix; | |
2291 | rtx e0_last_head, cond, move_before; | |
2292 | unsigned nedges = EDGE_COUNT (bb->succs); | |
2293 | rtx jump = BB_END (bb); | |
2294 | regset live, live_union; | |
2295 | ||
2296 | /* Nothing to do if there is not at least two outgoing edges. */ | |
2297 | if (nedges < 2) | |
2298 | return false; | |
2299 | ||
2300 | /* Don't crossjump if this block ends in a computed jump, | |
2301 | unless we are optimizing for size. */ | |
2302 | if (optimize_bb_for_size_p (bb) | |
fefa31b5 | 2303 | && bb != EXIT_BLOCK_PTR_FOR_FN (cfun) |
4ec5d4f5 BS |
2304 | && computed_jump_p (BB_END (bb))) |
2305 | return false; | |
2306 | ||
2307 | cond = get_condition (jump, &move_before, true, false); | |
2308 | if (cond == NULL_RTX) | |
43052d45 BS |
2309 | { |
2310 | #ifdef HAVE_cc0 | |
2311 | if (reg_mentioned_p (cc0_rtx, jump)) | |
2312 | move_before = prev_nonnote_nondebug_insn (jump); | |
2313 | else | |
2314 | #endif | |
2315 | move_before = jump; | |
2316 | } | |
4ec5d4f5 BS |
2317 | |
2318 | for (ix = 0; ix < nedges; ix++) | |
fefa31b5 | 2319 | if (EDGE_SUCC (bb, ix)->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)) |
4ec5d4f5 BS |
2320 | return false; |
2321 | ||
2322 | for (ix = 0; ix < nedges; ix++) | |
2323 | { | |
2324 | edge e = EDGE_SUCC (bb, ix); | |
2325 | basic_block other_bb = e->dest; | |
2326 | ||
2327 | if (df_get_bb_dirty (other_bb)) | |
2328 | { | |
2329 | block_was_dirty = true; | |
2330 | return false; | |
2331 | } | |
2332 | ||
2333 | if (e->flags & EDGE_ABNORMAL) | |
2334 | return false; | |
2335 | ||
2336 | /* Normally, all destination blocks must only be reachable from this | |
2337 | block, i.e. they must have one incoming edge. | |
2338 | ||
2339 | There is one special case we can handle, that of multiple consecutive | |
2340 | jumps where the first jumps to one of the targets of the second jump. | |
2341 | This happens frequently in switch statements for default labels. | |
2342 | The structure is as follows: | |
2343 | FINAL_DEST_BB | |
2344 | .... | |
2345 | if (cond) jump A; | |
2346 | fall through | |
2347 | BB | |
2348 | jump with targets A, B, C, D... | |
2349 | A | |
2350 | has two incoming edges, from FINAL_DEST_BB and BB | |
2351 | ||
2352 | In this case, we can try to move the insns through BB and into | |
2353 | FINAL_DEST_BB. */ | |
2354 | if (EDGE_COUNT (other_bb->preds) != 1) | |
2355 | { | |
2356 | edge incoming_edge, incoming_bb_other_edge; | |
2357 | edge_iterator ei; | |
2358 | ||
2359 | if (final_dest_bb != NULL | |
2360 | || EDGE_COUNT (other_bb->preds) != 2) | |
2361 | return false; | |
2362 | ||
2363 | /* We must be able to move the insns across the whole block. */ | |
2364 | move_before = BB_HEAD (bb); | |
2365 | while (!NONDEBUG_INSN_P (move_before)) | |
2366 | move_before = NEXT_INSN (move_before); | |
2367 | ||
2368 | if (EDGE_COUNT (bb->preds) != 1) | |
2369 | return false; | |
2370 | incoming_edge = EDGE_PRED (bb, 0); | |
2371 | final_dest_bb = incoming_edge->src; | |
2372 | if (EDGE_COUNT (final_dest_bb->succs) != 2) | |
2373 | return false; | |
2374 | FOR_EACH_EDGE (incoming_bb_other_edge, ei, final_dest_bb->succs) | |
2375 | if (incoming_bb_other_edge != incoming_edge) | |
2376 | break; | |
2377 | if (incoming_bb_other_edge->dest != other_bb) | |
2378 | return false; | |
2379 | } | |
2380 | } | |
2381 | ||
2382 | e0 = EDGE_SUCC (bb, 0); | |
2383 | e0_last_head = NULL_RTX; | |
2384 | changed = false; | |
2385 | ||
2386 | for (ix = 1; ix < nedges; ix++) | |
2387 | { | |
2388 | edge e = EDGE_SUCC (bb, ix); | |
2389 | rtx e0_last, e_last; | |
2390 | int nmatch; | |
2391 | ||
2392 | nmatch = flow_find_head_matching_sequence (e0->dest, e->dest, | |
2393 | &e0_last, &e_last, 0); | |
2394 | if (nmatch == 0) | |
2395 | return false; | |
2396 | ||
2397 | if (nmatch < max_match) | |
2398 | { | |
2399 | max_match = nmatch; | |
2400 | e0_last_head = e0_last; | |
2401 | } | |
2402 | } | |
2403 | ||
2404 | /* If we matched an entire block, we probably have to avoid moving the | |
2405 | last insn. */ | |
2406 | if (max_match > 0 | |
2407 | && e0_last_head == BB_END (e0->dest) | |
2408 | && (find_reg_note (e0_last_head, REG_EH_REGION, 0) | |
2409 | || control_flow_insn_p (e0_last_head))) | |
2410 | { | |
2411 | max_match--; | |
2412 | if (max_match == 0) | |
2413 | return false; | |
b59e0455 JJ |
2414 | do |
2415 | e0_last_head = prev_real_insn (e0_last_head); | |
2416 | while (DEBUG_INSN_P (e0_last_head)); | |
4ec5d4f5 BS |
2417 | } |
2418 | ||
2419 | if (max_match == 0) | |
2420 | return false; | |
2421 | ||
2422 | /* We must find a union of the live registers at each of the end points. */ | |
2423 | live = BITMAP_ALLOC (NULL); | |
2424 | live_union = BITMAP_ALLOC (NULL); | |
2425 | ||
2426 | currptr = XNEWVEC (rtx, nedges); | |
2427 | headptr = XNEWVEC (rtx, nedges); | |
2428 | nextptr = XNEWVEC (rtx, nedges); | |
2429 | ||
2430 | for (ix = 0; ix < nedges; ix++) | |
2431 | { | |
2432 | int j; | |
2433 | basic_block merge_bb = EDGE_SUCC (bb, ix)->dest; | |
2434 | rtx head = BB_HEAD (merge_bb); | |
2435 | ||
b59e0455 JJ |
2436 | while (!NONDEBUG_INSN_P (head)) |
2437 | head = NEXT_INSN (head); | |
4ec5d4f5 BS |
2438 | headptr[ix] = head; |
2439 | currptr[ix] = head; | |
2440 | ||
2441 | /* Compute the end point and live information */ | |
2442 | for (j = 1; j < max_match; j++) | |
b59e0455 JJ |
2443 | do |
2444 | head = NEXT_INSN (head); | |
2445 | while (!NONDEBUG_INSN_P (head)); | |
4ec5d4f5 BS |
2446 | simulate_backwards_to_point (merge_bb, live, head); |
2447 | IOR_REG_SET (live_union, live); | |
2448 | } | |
2449 | ||
2450 | /* If we're moving across two blocks, verify the validity of the | |
2451 | first move, then adjust the target and let the loop below deal | |
2452 | with the final move. */ | |
2453 | if (final_dest_bb != NULL) | |
2454 | { | |
2455 | rtx move_upto; | |
2456 | ||
2457 | moveall = can_move_insns_across (currptr[0], e0_last_head, move_before, | |
2458 | jump, e0->dest, live_union, | |
2459 | NULL, &move_upto); | |
2460 | if (!moveall) | |
2461 | { | |
2462 | if (move_upto == NULL_RTX) | |
2463 | goto out; | |
2464 | ||
2465 | while (e0_last_head != move_upto) | |
2466 | { | |
2467 | df_simulate_one_insn_backwards (e0->dest, e0_last_head, | |
2468 | live_union); | |
2469 | e0_last_head = PREV_INSN (e0_last_head); | |
2470 | } | |
2471 | } | |
2472 | if (e0_last_head == NULL_RTX) | |
2473 | goto out; | |
2474 | ||
2475 | jump = BB_END (final_dest_bb); | |
2476 | cond = get_condition (jump, &move_before, true, false); | |
2477 | if (cond == NULL_RTX) | |
43052d45 BS |
2478 | { |
2479 | #ifdef HAVE_cc0 | |
2480 | if (reg_mentioned_p (cc0_rtx, jump)) | |
2481 | move_before = prev_nonnote_nondebug_insn (jump); | |
2482 | else | |
2483 | #endif | |
2484 | move_before = jump; | |
2485 | } | |
4ec5d4f5 BS |
2486 | } |
2487 | ||
2488 | do | |
2489 | { | |
2490 | rtx move_upto; | |
2491 | moveall = can_move_insns_across (currptr[0], e0_last_head, | |
2492 | move_before, jump, e0->dest, live_union, | |
2493 | NULL, &move_upto); | |
2494 | if (!moveall && move_upto == NULL_RTX) | |
2495 | { | |
2496 | if (jump == move_before) | |
2497 | break; | |
2498 | ||
2499 | /* Try again, using a different insertion point. */ | |
2500 | move_before = jump; | |
2501 | ||
2502 | #ifdef HAVE_cc0 | |
2503 | /* Don't try moving before a cc0 user, as that may invalidate | |
2504 | the cc0. */ | |
2505 | if (reg_mentioned_p (cc0_rtx, jump)) | |
2506 | break; | |
2507 | #endif | |
2508 | ||
2509 | continue; | |
2510 | } | |
2511 | ||
2512 | if (final_dest_bb && !moveall) | |
2513 | /* We haven't checked whether a partial move would be OK for the first | |
2514 | move, so we have to fail this case. */ | |
2515 | break; | |
2516 | ||
2517 | changed = true; | |
2518 | for (;;) | |
2519 | { | |
2520 | if (currptr[0] == move_upto) | |
2521 | break; | |
2522 | for (ix = 0; ix < nedges; ix++) | |
2523 | { | |
2524 | rtx curr = currptr[ix]; | |
2525 | do | |
2526 | curr = NEXT_INSN (curr); | |
2527 | while (!NONDEBUG_INSN_P (curr)); | |
2528 | currptr[ix] = curr; | |
2529 | } | |
2530 | } | |
2531 | ||
2532 | /* If we can't currently move all of the identical insns, remember | |
2533 | each insn after the range that we'll merge. */ | |
2534 | if (!moveall) | |
2535 | for (ix = 0; ix < nedges; ix++) | |
2536 | { | |
2537 | rtx curr = currptr[ix]; | |
2538 | do | |
2539 | curr = NEXT_INSN (curr); | |
2540 | while (!NONDEBUG_INSN_P (curr)); | |
2541 | nextptr[ix] = curr; | |
2542 | } | |
2543 | ||
2544 | reorder_insns (headptr[0], currptr[0], PREV_INSN (move_before)); | |
2545 | df_set_bb_dirty (EDGE_SUCC (bb, 0)->dest); | |
2546 | if (final_dest_bb != NULL) | |
2547 | df_set_bb_dirty (final_dest_bb); | |
2548 | df_set_bb_dirty (bb); | |
2549 | for (ix = 1; ix < nedges; ix++) | |
2550 | { | |
2551 | df_set_bb_dirty (EDGE_SUCC (bb, ix)->dest); | |
2552 | delete_insn_chain (headptr[ix], currptr[ix], false); | |
2553 | } | |
2554 | if (!moveall) | |
2555 | { | |
2556 | if (jump == move_before) | |
2557 | break; | |
2558 | ||
2559 | /* For the unmerged insns, try a different insertion point. */ | |
2560 | move_before = jump; | |
2561 | ||
2562 | #ifdef HAVE_cc0 | |
2563 | /* Don't try moving before a cc0 user, as that may invalidate | |
2564 | the cc0. */ | |
2565 | if (reg_mentioned_p (cc0_rtx, jump)) | |
2566 | break; | |
2567 | #endif | |
2568 | ||
2569 | for (ix = 0; ix < nedges; ix++) | |
2570 | currptr[ix] = headptr[ix] = nextptr[ix]; | |
2571 | } | |
2572 | } | |
2573 | while (!moveall); | |
2574 | ||
2575 | out: | |
2576 | free (currptr); | |
2577 | free (headptr); | |
2578 | free (nextptr); | |
2579 | ||
2580 | crossjumps_occured |= changed; | |
2581 | ||
2582 | return changed; | |
2583 | } | |
2584 | ||
7752e522 JJ |
2585 | /* Return true if BB contains just bb note, or bb note followed |
2586 | by only DEBUG_INSNs. */ | |
2587 | ||
2588 | static bool | |
2589 | trivially_empty_bb_p (basic_block bb) | |
2590 | { | |
2591 | rtx insn = BB_END (bb); | |
2592 | ||
2593 | while (1) | |
2594 | { | |
2595 | if (insn == BB_HEAD (bb)) | |
2596 | return true; | |
2597 | if (!DEBUG_INSN_P (insn)) | |
2598 | return false; | |
2599 | insn = PREV_INSN (insn); | |
2600 | } | |
2601 | } | |
2602 | ||
402209ff JH |
2603 | /* Do simple CFG optimizations - basic block merging, simplifying of jump |
2604 | instructions etc. Return nonzero if changes were made. */ | |
2605 | ||
2606 | static bool | |
d329e058 | 2607 | try_optimize_cfg (int mode) |
402209ff | 2608 | { |
402209ff JH |
2609 | bool changed_overall = false; |
2610 | bool changed; | |
2611 | int iterations = 0; | |
ec3ae3da | 2612 | basic_block bb, b, next; |
402209ff | 2613 | |
6fb5fa3c | 2614 | if (mode & (CLEANUP_CROSSJUMP | CLEANUP_THREADING)) |
38c1593d JH |
2615 | clear_bb_flags (); |
2616 | ||
c1e3e2d9 SB |
2617 | crossjumps_occured = false; |
2618 | ||
11cd3bed | 2619 | FOR_EACH_BB_FN (bb, cfun) |
2dd2d53e SB |
2620 | update_forwarder_flag (bb); |
2621 | ||
245f1bfa | 2622 | if (! targetm.cannot_modify_jumps_p ()) |
402209ff | 2623 | { |
7cf240d5 | 2624 | first_pass = true; |
e4ec2cac AO |
2625 | /* Attempt to merge blocks as made possible by edge removal. If |
2626 | a block has only one successor, and the successor has only | |
2627 | one predecessor, they may be combined. */ | |
2628 | do | |
402209ff | 2629 | { |
4ec5d4f5 | 2630 | block_was_dirty = false; |
e4ec2cac AO |
2631 | changed = false; |
2632 | iterations++; | |
2633 | ||
c263766c RH |
2634 | if (dump_file) |
2635 | fprintf (dump_file, | |
e4ec2cac AO |
2636 | "\n\ntry_optimize_cfg iteration %i\n\n", |
2637 | iterations); | |
402209ff | 2638 | |
fefa31b5 DM |
2639 | for (b = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; b |
2640 | != EXIT_BLOCK_PTR_FOR_FN (cfun);) | |
402209ff | 2641 | { |
e0082a72 | 2642 | basic_block c; |
e4ec2cac AO |
2643 | edge s; |
2644 | bool changed_here = false; | |
5f0d2358 | 2645 | |
468059bc DD |
2646 | /* Delete trivially dead basic blocks. This is either |
2647 | blocks with no predecessors, or empty blocks with no | |
1e211590 DD |
2648 | successors. However if the empty block with no |
2649 | successors is the successor of the ENTRY_BLOCK, it is | |
2650 | kept. This ensures that the ENTRY_BLOCK will have a | |
2651 | successor which is a precondition for many RTL | |
2652 | passes. Empty blocks may result from expanding | |
468059bc DD |
2653 | __builtin_unreachable (). */ |
2654 | if (EDGE_COUNT (b->preds) == 0 | |
1e211590 | 2655 | || (EDGE_COUNT (b->succs) == 0 |
7752e522 | 2656 | && trivially_empty_bb_p (b) |
fefa31b5 DM |
2657 | && single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun))->dest |
2658 | != b)) | |
e4ec2cac | 2659 | { |
f6366fc7 | 2660 | c = b->prev_bb; |
f1de5107 | 2661 | if (EDGE_COUNT (b->preds) > 0) |
3b5fda81 JJ |
2662 | { |
2663 | edge e; | |
2664 | edge_iterator ei; | |
2665 | ||
f1de5107 JJ |
2666 | if (current_ir_type () == IR_RTL_CFGLAYOUT) |
2667 | { | |
bcc708fc MM |
2668 | if (BB_FOOTER (b) |
2669 | && BARRIER_P (BB_FOOTER (b))) | |
f1de5107 JJ |
2670 | FOR_EACH_EDGE (e, ei, b->preds) |
2671 | if ((e->flags & EDGE_FALLTHRU) | |
bcc708fc | 2672 | && BB_FOOTER (e->src) == NULL) |
f1de5107 | 2673 | { |
bcc708fc | 2674 | if (BB_FOOTER (b)) |
f1de5107 | 2675 | { |
bcc708fc MM |
2676 | BB_FOOTER (e->src) = BB_FOOTER (b); |
2677 | BB_FOOTER (b) = NULL; | |
f1de5107 JJ |
2678 | } |
2679 | else | |
2680 | { | |
2681 | start_sequence (); | |
bcc708fc | 2682 | BB_FOOTER (e->src) = emit_barrier (); |
f1de5107 JJ |
2683 | end_sequence (); |
2684 | } | |
2685 | } | |
2686 | } | |
2687 | else | |
2688 | { | |
2689 | rtx last = get_last_bb_insn (b); | |
2690 | if (last && BARRIER_P (last)) | |
2691 | FOR_EACH_EDGE (e, ei, b->preds) | |
2692 | if ((e->flags & EDGE_FALLTHRU)) | |
2693 | emit_barrier_after (BB_END (e->src)); | |
2694 | } | |
3b5fda81 | 2695 | } |
f470c378 | 2696 | delete_basic_block (b); |
bef16e87 | 2697 | changed = true; |
6626665f | 2698 | /* Avoid trying to remove the exit block. */ |
fefa31b5 | 2699 | b = (c == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? c->next_bb : c); |
83bd032b | 2700 | continue; |
e4ec2cac | 2701 | } |
402209ff | 2702 | |
6ce2bcb7 | 2703 | /* Remove code labels no longer used. */ |
c5cbcccf ZD |
2704 | if (single_pred_p (b) |
2705 | && (single_pred_edge (b)->flags & EDGE_FALLTHRU) | |
2706 | && !(single_pred_edge (b)->flags & EDGE_COMPLEX) | |
4b4bf941 | 2707 | && LABEL_P (BB_HEAD (b)) |
e4ec2cac AO |
2708 | /* If the previous block ends with a branch to this |
2709 | block, we can't delete the label. Normally this | |
2710 | is a condjump that is yet to be simplified, but | |
2711 | if CASE_DROPS_THRU, this can be a tablejump with | |
2712 | some element going to the same place as the | |
2713 | default (fallthru). */ | |
fefa31b5 | 2714 | && (single_pred (b) == ENTRY_BLOCK_PTR_FOR_FN (cfun) |
c5cbcccf | 2715 | || !JUMP_P (BB_END (single_pred (b))) |
a813c111 | 2716 | || ! label_is_jump_target_p (BB_HEAD (b), |
c5cbcccf | 2717 | BB_END (single_pred (b))))) |
e4ec2cac | 2718 | { |
03fbe718 | 2719 | delete_insn (BB_HEAD (b)); |
c263766c RH |
2720 | if (dump_file) |
2721 | fprintf (dump_file, "Deleted label in block %i.\n", | |
0b17ab2f | 2722 | b->index); |
e4ec2cac | 2723 | } |
402209ff | 2724 | |
e4ec2cac | 2725 | /* If we fall through an empty block, we can remove it. */ |
9be94227 | 2726 | if (!(mode & (CLEANUP_CFGLAYOUT | CLEANUP_NO_INSN_DEL)) |
c5cbcccf ZD |
2727 | && single_pred_p (b) |
2728 | && (single_pred_edge (b)->flags & EDGE_FALLTHRU) | |
4b4bf941 | 2729 | && !LABEL_P (BB_HEAD (b)) |
e4ec2cac AO |
2730 | && FORWARDER_BLOCK_P (b) |
2731 | /* Note that forwarder_block_p true ensures that | |
2732 | there is a successor for this block. */ | |
c5cbcccf | 2733 | && (single_succ_edge (b)->flags & EDGE_FALLTHRU) |
0cae8d31 | 2734 | && n_basic_blocks_for_fn (cfun) > NUM_FIXED_BLOCKS + 1) |
e4ec2cac | 2735 | { |
c263766c RH |
2736 | if (dump_file) |
2737 | fprintf (dump_file, | |
e4ec2cac | 2738 | "Deleting fallthru block %i.\n", |
0b17ab2f | 2739 | b->index); |
e4ec2cac | 2740 | |
fefa31b5 DM |
2741 | c = ((b->prev_bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)) |
2742 | ? b->next_bb : b->prev_bb); | |
c5cbcccf ZD |
2743 | redirect_edge_succ_nodup (single_pred_edge (b), |
2744 | single_succ (b)); | |
f470c378 | 2745 | delete_basic_block (b); |
e4ec2cac AO |
2746 | changed = true; |
2747 | b = c; | |
1e211590 | 2748 | continue; |
e4ec2cac | 2749 | } |
5f0d2358 | 2750 | |
50a36e42 | 2751 | /* Merge B with its single successor, if any. */ |
c5cbcccf ZD |
2752 | if (single_succ_p (b) |
2753 | && (s = single_succ_edge (b)) | |
ec3ae3da | 2754 | && !(s->flags & EDGE_COMPLEX) |
fefa31b5 | 2755 | && (c = s->dest) != EXIT_BLOCK_PTR_FOR_FN (cfun) |
c5cbcccf | 2756 | && single_pred_p (c) |
bc35512f JH |
2757 | && b != c) |
2758 | { | |
2759 | /* When not in cfg_layout mode use code aware of reordering | |
2760 | INSN. This code possibly creates new basic blocks so it | |
2761 | does not fit merge_blocks interface and is kept here in | |
2762 | hope that it will become useless once more of compiler | |
2763 | is transformed to use cfg_layout mode. */ | |
c22cacf3 | 2764 | |
bc35512f JH |
2765 | if ((mode & CLEANUP_CFGLAYOUT) |
2766 | && can_merge_blocks_p (b, c)) | |
2767 | { | |
2768 | merge_blocks (b, c); | |
2769 | update_forwarder_flag (b); | |
2770 | changed_here = true; | |
2771 | } | |
2772 | else if (!(mode & CLEANUP_CFGLAYOUT) | |
2773 | /* If the jump insn has side effects, | |
2774 | we can't kill the edge. */ | |
4b4bf941 | 2775 | && (!JUMP_P (BB_END (b)) |
e24e7211 | 2776 | || (reload_completed |
a813c111 | 2777 | ? simplejump_p (BB_END (b)) |
e4efa971 JH |
2778 | : (onlyjump_p (BB_END (b)) |
2779 | && !tablejump_p (BB_END (b), | |
2780 | NULL, NULL)))) | |
bc35512f JH |
2781 | && (next = merge_blocks_move (s, b, c, mode))) |
2782 | { | |
2783 | b = next; | |
2784 | changed_here = true; | |
2785 | } | |
ec3ae3da | 2786 | } |
e4ec2cac AO |
2787 | |
2788 | /* Simplify branch over branch. */ | |
bc35512f JH |
2789 | if ((mode & CLEANUP_EXPENSIVE) |
2790 | && !(mode & CLEANUP_CFGLAYOUT) | |
2791 | && try_simplify_condjump (b)) | |
38c1593d | 2792 | changed_here = true; |
402209ff | 2793 | |
e4ec2cac AO |
2794 | /* If B has a single outgoing edge, but uses a |
2795 | non-trivial jump instruction without side-effects, we | |
2796 | can either delete the jump entirely, or replace it | |
3348b696 | 2797 | with a simple unconditional jump. */ |
c5cbcccf | 2798 | if (single_succ_p (b) |
fefa31b5 | 2799 | && single_succ (b) != EXIT_BLOCK_PTR_FOR_FN (cfun) |
a813c111 | 2800 | && onlyjump_p (BB_END (b)) |
339ba33b | 2801 | && !CROSSING_JUMP_P (BB_END (b)) |
c5cbcccf ZD |
2802 | && try_redirect_by_replacing_jump (single_succ_edge (b), |
2803 | single_succ (b), | |
20b4e8ae | 2804 | (mode & CLEANUP_CFGLAYOUT) != 0)) |
e4ec2cac | 2805 | { |
e4ec2cac AO |
2806 | update_forwarder_flag (b); |
2807 | changed_here = true; | |
2808 | } | |
402209ff | 2809 | |
e4ec2cac AO |
2810 | /* Simplify branch to branch. */ |
2811 | if (try_forward_edges (mode, b)) | |
afe8b6ec EB |
2812 | { |
2813 | update_forwarder_flag (b); | |
2814 | changed_here = true; | |
2815 | } | |
402209ff | 2816 | |
e4ec2cac AO |
2817 | /* Look for shared code between blocks. */ |
2818 | if ((mode & CLEANUP_CROSSJUMP) | |
2819 | && try_crossjump_bb (mode, b)) | |
2820 | changed_here = true; | |
402209ff | 2821 | |
4ec5d4f5 BS |
2822 | if ((mode & CLEANUP_CROSSJUMP) |
2823 | /* This can lengthen register lifetimes. Do it only after | |
2824 | reload. */ | |
2825 | && reload_completed | |
2826 | && try_head_merge_bb (b)) | |
2827 | changed_here = true; | |
2828 | ||
e4ec2cac AO |
2829 | /* Don't get confused by the index shift caused by |
2830 | deleting blocks. */ | |
2831 | if (!changed_here) | |
e0082a72 | 2832 | b = b->next_bb; |
e4ec2cac AO |
2833 | else |
2834 | changed = true; | |
2835 | } | |
402209ff | 2836 | |
e4ec2cac | 2837 | if ((mode & CLEANUP_CROSSJUMP) |
fefa31b5 | 2838 | && try_crossjump_bb (mode, EXIT_BLOCK_PTR_FOR_FN (cfun))) |
402209ff | 2839 | changed = true; |
402209ff | 2840 | |
4ec5d4f5 BS |
2841 | if (block_was_dirty) |
2842 | { | |
2843 | /* This should only be set by head-merging. */ | |
2844 | gcc_assert (mode & CLEANUP_CROSSJUMP); | |
2845 | df_analyze (); | |
2846 | } | |
2847 | ||
e4ec2cac | 2848 | if (changed) |
600b5b1d TJ |
2849 | { |
2850 | /* Edge forwarding in particular can cause hot blocks previously | |
2851 | reached by both hot and cold blocks to become dominated only | |
2852 | by cold blocks. This will cause the verification below to fail, | |
2853 | and lead to now cold code in the hot section. This is not easy | |
2854 | to detect and fix during edge forwarding, and in some cases | |
2855 | is only visible after newly unreachable blocks are deleted, | |
2856 | which will be done in fixup_partitions. */ | |
2857 | fixup_partitions (); | |
2858 | ||
2859 | #ifdef ENABLE_CHECKING | |
2860 | verify_flow_info (); | |
402209ff | 2861 | #endif |
600b5b1d | 2862 | } |
402209ff | 2863 | |
e4ec2cac | 2864 | changed_overall |= changed; |
7cf240d5 | 2865 | first_pass = false; |
e4ec2cac AO |
2866 | } |
2867 | while (changed); | |
402209ff | 2868 | } |
ca6c03ca | 2869 | |
04a90bec | 2870 | FOR_ALL_BB_FN (b, cfun) |
2dd2d53e | 2871 | b->flags &= ~(BB_FORWARDER_BLOCK | BB_NONTHREADABLE_BLOCK); |
635559ab | 2872 | |
402209ff JH |
2873 | return changed_overall; |
2874 | } | |
2875 | \f | |
6d2f8887 | 2876 | /* Delete all unreachable basic blocks. */ |
4262e623 | 2877 | |
969d70ca | 2878 | bool |
d329e058 | 2879 | delete_unreachable_blocks (void) |
402209ff | 2880 | { |
402209ff | 2881 | bool changed = false; |
b5b8b0ac | 2882 | basic_block b, prev_bb; |
402209ff JH |
2883 | |
2884 | find_unreachable_blocks (); | |
2885 | ||
b5b8b0ac AO |
2886 | /* When we're in GIMPLE mode and there may be debug insns, we should |
2887 | delete blocks in reverse dominator order, so as to get a chance | |
2888 | to substitute all released DEFs into debug stmts. If we don't | |
2889 | have dominators information, walking blocks backward gets us a | |
2890 | better chance of retaining most debug information than | |
2891 | otherwise. */ | |
532aafad | 2892 | if (MAY_HAVE_DEBUG_INSNS && current_ir_type () == IR_GIMPLE |
b5b8b0ac | 2893 | && dom_info_available_p (CDI_DOMINATORS)) |
402209ff | 2894 | { |
fefa31b5 DM |
2895 | for (b = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb; |
2896 | b != ENTRY_BLOCK_PTR_FOR_FN (cfun); b = prev_bb) | |
b5b8b0ac AO |
2897 | { |
2898 | prev_bb = b->prev_bb; | |
2899 | ||
2900 | if (!(b->flags & BB_REACHABLE)) | |
2901 | { | |
2902 | /* Speed up the removal of blocks that don't dominate | |
2903 | others. Walking backwards, this should be the common | |
2904 | case. */ | |
2905 | if (!first_dom_son (CDI_DOMINATORS, b)) | |
2906 | delete_basic_block (b); | |
2907 | else | |
2908 | { | |
9771b263 | 2909 | vec<basic_block> h |
b5b8b0ac AO |
2910 | = get_all_dominated_blocks (CDI_DOMINATORS, b); |
2911 | ||
9771b263 | 2912 | while (h.length ()) |
b5b8b0ac | 2913 | { |
9771b263 | 2914 | b = h.pop (); |
b5b8b0ac AO |
2915 | |
2916 | prev_bb = b->prev_bb; | |
0b17ab2f | 2917 | |
b5b8b0ac AO |
2918 | gcc_assert (!(b->flags & BB_REACHABLE)); |
2919 | ||
2920 | delete_basic_block (b); | |
2921 | } | |
2922 | ||
9771b263 | 2923 | h.release (); |
b5b8b0ac AO |
2924 | } |
2925 | ||
2926 | changed = true; | |
2927 | } | |
2928 | } | |
2929 | } | |
2930 | else | |
2931 | { | |
fefa31b5 DM |
2932 | for (b = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb; |
2933 | b != ENTRY_BLOCK_PTR_FOR_FN (cfun); b = prev_bb) | |
6a58eee9 | 2934 | { |
b5b8b0ac AO |
2935 | prev_bb = b->prev_bb; |
2936 | ||
2937 | if (!(b->flags & BB_REACHABLE)) | |
2938 | { | |
2939 | delete_basic_block (b); | |
2940 | changed = true; | |
2941 | } | |
6a58eee9 | 2942 | } |
402209ff JH |
2943 | } |
2944 | ||
2945 | if (changed) | |
2946 | tidy_fallthru_edges (); | |
2947 | return changed; | |
2948 | } | |
6fb5fa3c DB |
2949 | |
2950 | /* Delete any jump tables never referenced. We can't delete them at the | |
29f3fd5b SB |
2951 | time of removing tablejump insn as they are referenced by the preceding |
2952 | insns computing the destination, so we delay deleting and garbagecollect | |
2953 | them once life information is computed. */ | |
6fb5fa3c DB |
2954 | void |
2955 | delete_dead_jumptables (void) | |
2956 | { | |
2957 | basic_block bb; | |
2958 | ||
29f3fd5b SB |
2959 | /* A dead jump table does not belong to any basic block. Scan insns |
2960 | between two adjacent basic blocks. */ | |
11cd3bed | 2961 | FOR_EACH_BB_FN (bb, cfun) |
6fb5fa3c | 2962 | { |
29f3fd5b SB |
2963 | rtx insn, next; |
2964 | ||
2965 | for (insn = NEXT_INSN (BB_END (bb)); | |
2966 | insn && !NOTE_INSN_BASIC_BLOCK_P (insn); | |
2967 | insn = next) | |
57d6c446 | 2968 | { |
29f3fd5b SB |
2969 | next = NEXT_INSN (insn); |
2970 | if (LABEL_P (insn) | |
2971 | && LABEL_NUSES (insn) == LABEL_PRESERVE_P (insn) | |
2972 | && JUMP_TABLE_DATA_P (next)) | |
2973 | { | |
2974 | rtx label = insn, jump = next; | |
2975 | ||
2976 | if (dump_file) | |
2977 | fprintf (dump_file, "Dead jumptable %i removed\n", | |
2978 | INSN_UID (insn)); | |
2979 | ||
2980 | next = NEXT_INSN (next); | |
2981 | delete_insn (jump); | |
2982 | delete_insn (label); | |
2983 | } | |
6fb5fa3c DB |
2984 | } |
2985 | } | |
2986 | } | |
2987 | ||
402209ff JH |
2988 | \f |
2989 | /* Tidy the CFG by deleting unreachable code and whatnot. */ | |
2990 | ||
2991 | bool | |
d329e058 | 2992 | cleanup_cfg (int mode) |
402209ff | 2993 | { |
402209ff JH |
2994 | bool changed = false; |
2995 | ||
aeceeb06 SB |
2996 | /* Set the cfglayout mode flag here. We could update all the callers |
2997 | but that is just inconvenient, especially given that we eventually | |
2998 | want to have cfglayout mode as the default. */ | |
2999 | if (current_ir_type () == IR_RTL_CFGLAYOUT) | |
3000 | mode |= CLEANUP_CFGLAYOUT; | |
3001 | ||
402209ff | 3002 | timevar_push (TV_CLEANUP_CFG); |
3dec4024 JH |
3003 | if (delete_unreachable_blocks ()) |
3004 | { | |
3005 | changed = true; | |
3006 | /* We've possibly created trivially dead code. Cleanup it right | |
95bd1dd7 | 3007 | now to introduce more opportunities for try_optimize_cfg. */ |
6fb5fa3c | 3008 | if (!(mode & (CLEANUP_NO_INSN_DEL)) |
3dec4024 | 3009 | && !reload_completed) |
62e5bf5d | 3010 | delete_trivially_dead_insns (get_insns (), max_reg_num ()); |
3dec4024 | 3011 | } |
bf77398c ZD |
3012 | |
3013 | compact_blocks (); | |
3014 | ||
c1e3e2d9 SB |
3015 | /* To tail-merge blocks ending in the same noreturn function (e.g. |
3016 | a call to abort) we have to insert fake edges to exit. Do this | |
3017 | here once. The fake edges do not interfere with any other CFG | |
3018 | cleanups. */ | |
3019 | if (mode & CLEANUP_CROSSJUMP) | |
3020 | add_noreturn_fake_exit_edges (); | |
3021 | ||
7d817ebc DE |
3022 | if (!dbg_cnt (cfg_cleanup)) |
3023 | return changed; | |
3024 | ||
3dec4024 JH |
3025 | while (try_optimize_cfg (mode)) |
3026 | { | |
3027 | delete_unreachable_blocks (), changed = true; | |
c1e3e2d9 | 3028 | if (!(mode & CLEANUP_NO_INSN_DEL)) |
3dec4024 | 3029 | { |
c1e3e2d9 SB |
3030 | /* Try to remove some trivially dead insns when doing an expensive |
3031 | cleanup. But delete_trivially_dead_insns doesn't work after | |
3032 | reload (it only handles pseudos) and run_fast_dce is too costly | |
3033 | to run in every iteration. | |
3034 | ||
3035 | For effective cross jumping, we really want to run a fast DCE to | |
3036 | clean up any dead conditions, or they get in the way of performing | |
3037 | useful tail merges. | |
3038 | ||
3039 | Other transformations in cleanup_cfg are not so sensitive to dead | |
3040 | code, so delete_trivially_dead_insns or even doing nothing at all | |
3041 | is good enough. */ | |
3042 | if ((mode & CLEANUP_EXPENSIVE) && !reload_completed | |
3043 | && !delete_trivially_dead_insns (get_insns (), max_reg_num ())) | |
3dec4024 | 3044 | break; |
4ec5d4f5 | 3045 | if ((mode & CLEANUP_CROSSJUMP) && crossjumps_occured) |
f842d54f | 3046 | run_fast_dce (); |
3dec4024 JH |
3047 | } |
3048 | else | |
3049 | break; | |
3dec4024 | 3050 | } |
402209ff | 3051 | |
c1e3e2d9 SB |
3052 | if (mode & CLEANUP_CROSSJUMP) |
3053 | remove_fake_exit_edges (); | |
3054 | ||
29f3fd5b SB |
3055 | /* Don't call delete_dead_jumptables in cfglayout mode, because |
3056 | that function assumes that jump tables are in the insns stream. | |
3057 | But we also don't _have_ to delete dead jumptables in cfglayout | |
3058 | mode because we shouldn't even be looking at things that are | |
3059 | not in a basic block. Dead jumptables are cleaned up when | |
3060 | going out of cfglayout mode. */ | |
3061 | if (!(mode & CLEANUP_CFGLAYOUT)) | |
6fb5fa3c DB |
3062 | delete_dead_jumptables (); |
3063 | ||
7d776ee2 RG |
3064 | /* ??? We probably do this way too often. */ |
3065 | if (current_loops | |
3066 | && (changed | |
3067 | || (mode & CLEANUP_CFG_CHANGED))) | |
3068 | { | |
7d776ee2 RG |
3069 | timevar_push (TV_REPAIR_LOOPS); |
3070 | /* The above doesn't preserve dominance info if available. */ | |
3071 | gcc_assert (!dom_info_available_p (CDI_DOMINATORS)); | |
3072 | calculate_dominance_info (CDI_DOMINATORS); | |
01cb1ef5 | 3073 | fix_loop_structure (NULL); |
7d776ee2 RG |
3074 | free_dominance_info (CDI_DOMINATORS); |
3075 | timevar_pop (TV_REPAIR_LOOPS); | |
3076 | } | |
3077 | ||
402209ff JH |
3078 | timevar_pop (TV_CLEANUP_CFG); |
3079 | ||
402209ff JH |
3080 | return changed; |
3081 | } | |
ef330312 | 3082 | \f |
27a4cd48 DM |
3083 | namespace { |
3084 | ||
3085 | const pass_data pass_data_jump = | |
11a687e7 | 3086 | { |
27a4cd48 DM |
3087 | RTL_PASS, /* type */ |
3088 | "jump", /* name */ | |
3089 | OPTGROUP_NONE, /* optinfo_flags */ | |
27a4cd48 DM |
3090 | TV_JUMP, /* tv_id */ |
3091 | 0, /* properties_required */ | |
3092 | 0, /* properties_provided */ | |
3093 | 0, /* properties_destroyed */ | |
3094 | 0, /* todo_flags_start */ | |
3bea341f | 3095 | 0, /* todo_flags_finish */ |
11a687e7 | 3096 | }; |
27a4cd48 DM |
3097 | |
3098 | class pass_jump : public rtl_opt_pass | |
3099 | { | |
3100 | public: | |
c3284718 RS |
3101 | pass_jump (gcc::context *ctxt) |
3102 | : rtl_opt_pass (pass_data_jump, ctxt) | |
27a4cd48 DM |
3103 | {} |
3104 | ||
3105 | /* opt_pass methods: */ | |
be55bfe6 | 3106 | virtual unsigned int execute (function *); |
27a4cd48 DM |
3107 | |
3108 | }; // class pass_jump | |
3109 | ||
be55bfe6 TS |
3110 | unsigned int |
3111 | pass_jump::execute (function *) | |
3112 | { | |
3113 | delete_trivially_dead_insns (get_insns (), max_reg_num ()); | |
3114 | if (dump_file) | |
3115 | dump_flow_info (dump_file, dump_flags); | |
3116 | cleanup_cfg ((optimize ? CLEANUP_EXPENSIVE : 0) | |
3117 | | (flag_thread_jumps ? CLEANUP_THREADING : 0)); | |
3118 | return 0; | |
3119 | } | |
3120 | ||
27a4cd48 DM |
3121 | } // anon namespace |
3122 | ||
3123 | rtl_opt_pass * | |
3124 | make_pass_jump (gcc::context *ctxt) | |
3125 | { | |
3126 | return new pass_jump (ctxt); | |
3127 | } | |
11a687e7 | 3128 | \f |
27a4cd48 DM |
3129 | namespace { |
3130 | ||
3131 | const pass_data pass_data_jump2 = | |
ef330312 | 3132 | { |
27a4cd48 DM |
3133 | RTL_PASS, /* type */ |
3134 | "jump2", /* name */ | |
3135 | OPTGROUP_NONE, /* optinfo_flags */ | |
27a4cd48 DM |
3136 | TV_JUMP, /* tv_id */ |
3137 | 0, /* properties_required */ | |
3138 | 0, /* properties_provided */ | |
3139 | 0, /* properties_destroyed */ | |
3140 | 0, /* todo_flags_start */ | |
3bea341f | 3141 | 0, /* todo_flags_finish */ |
ef330312 | 3142 | }; |
27a4cd48 DM |
3143 | |
3144 | class pass_jump2 : public rtl_opt_pass | |
3145 | { | |
3146 | public: | |
c3284718 RS |
3147 | pass_jump2 (gcc::context *ctxt) |
3148 | : rtl_opt_pass (pass_data_jump2, ctxt) | |
27a4cd48 DM |
3149 | {} |
3150 | ||
3151 | /* opt_pass methods: */ | |
be55bfe6 TS |
3152 | virtual unsigned int execute (function *) |
3153 | { | |
3154 | cleanup_cfg (flag_crossjumping ? CLEANUP_CROSSJUMP : 0); | |
3155 | return 0; | |
3156 | } | |
27a4cd48 DM |
3157 | |
3158 | }; // class pass_jump2 | |
3159 | ||
3160 | } // anon namespace | |
3161 | ||
3162 | rtl_opt_pass * | |
3163 | make_pass_jump2 (gcc::context *ctxt) | |
3164 | { | |
3165 | return new pass_jump2 (ctxt); | |
3166 | } |