]> gcc.gnu.org Git - gcc.git/blame - gcc/cfgcleanup.c
LANGUAGES: Fix typos.
[gcc.git] / gcc / cfgcleanup.c
CommitLineData
402209ff
JH
1/* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
f773c2bd 3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2010, 2011
62e5bf5d 4 Free Software Foundation, Inc.
402209ff
JH
5
6This file is part of GCC.
7
8GCC is free software; you can redistribute it and/or modify it under
9the terms of the GNU General Public License as published by the Free
9dcd6f09 10Software Foundation; either version 3, or (at your option) any later
402209ff
JH
11version.
12
13GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14WARRANTY; without even the implied warranty of MERCHANTABILITY or
15FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16for more details.
17
18You should have received a copy of the GNU General Public License
9dcd6f09
NC
19along with GCC; see the file COPYING3. If not see
20<http://www.gnu.org/licenses/>. */
402209ff 21
1ea7e6ad 22/* This file contains optimizer of the control flow. The main entry point is
402209ff
JH
23 cleanup_cfg. Following optimizations are performed:
24
25 - Unreachable blocks removal
d1a6adeb 26 - Edge forwarding (edge to the forwarder block is forwarded to its
eaec9b3d 27 successor. Simplification of the branch instruction is performed by
402209ff 28 underlying infrastructure so branch can be converted to simplejump or
f5143c46 29 eliminated).
402209ff
JH
30 - Cross jumping (tail merging)
31 - Conditional jump-around-simplejump simplification
32 - Basic block merging. */
33
34#include "config.h"
35#include "system.h"
4977bab6
ZW
36#include "coretypes.h"
37#include "tm.h"
402209ff
JH
38#include "rtl.h"
39#include "hard-reg-set.h"
7932a3db 40#include "regs.h"
402209ff
JH
41#include "timevar.h"
42#include "output.h"
43#include "insn-config.h"
44#include "flags.h"
45#include "recog.h"
718f9c0f 46#include "diagnostic-core.h"
8ecba28a 47#include "cselib.h"
5f24e0dc 48#include "params.h"
9f16e871 49#include "tm_p.h"
e4ec2cac 50#include "target.h"
750054a2 51#include "cfglayout.h"
78528714 52#include "emit-rtl.h"
ef330312
PB
53#include "tree-pass.h"
54#include "cfgloop.h"
55#include "expr.h"
6fb5fa3c 56#include "df.h"
c1e3e2d9 57#include "dce.h"
7d817ebc 58#include "dbgcnt.h"
402209ff 59
2dd2d53e 60#define FORWARDER_BLOCK_P(BB) ((BB)->flags & BB_FORWARDER_BLOCK)
c22cacf3 61
7cf240d5
JH
62/* Set to true when we are running first pass of try_optimize_cfg loop. */
63static bool first_pass;
c1e3e2d9 64
073a8998 65/* Set to true if crossjumps occurred in the latest run of try_optimize_cfg. */
c1e3e2d9
SB
66static bool crossjumps_occured;
67
4ec5d4f5
BS
68/* Set to true if we couldn't run an optimization due to stale liveness
69 information; we should run df_analyze to enable more opportunities. */
70static bool block_was_dirty;
71
bf22920b 72static bool try_crossjump_to_edge (int, edge, edge, enum replace_direction);
d329e058 73static bool try_crossjump_bb (int, basic_block);
c2fc5456 74static bool outgoing_edges_match (int, basic_block, basic_block);
472c95f5 75static enum replace_direction old_insns_match_p (int, rtx, rtx);
d329e058 76
d329e058
AJ
77static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
78static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
d329e058
AJ
79static bool try_optimize_cfg (int);
80static bool try_simplify_condjump (basic_block);
81static bool try_forward_edges (int, basic_block);
6fb5fa3c 82static edge thread_jump (edge, basic_block);
d329e058
AJ
83static bool mark_effect (rtx, bitmap);
84static void notice_new_block (basic_block);
85static void update_forwarder_flag (basic_block);
86static int mentions_nonequal_regs (rtx *, void *);
c2fc5456 87static void merge_memattrs (rtx, rtx);
635559ab
JH
88\f
89/* Set flags for newly created block. */
90
91static void
d329e058 92notice_new_block (basic_block bb)
635559ab
JH
93{
94 if (!bb)
95 return;
5f0d2358 96
635559ab 97 if (forwarder_block_p (bb))
2dd2d53e 98 bb->flags |= BB_FORWARDER_BLOCK;
635559ab
JH
99}
100
101/* Recompute forwarder flag after block has been modified. */
102
103static void
d329e058 104update_forwarder_flag (basic_block bb)
635559ab
JH
105{
106 if (forwarder_block_p (bb))
2dd2d53e 107 bb->flags |= BB_FORWARDER_BLOCK;
635559ab 108 else
2dd2d53e 109 bb->flags &= ~BB_FORWARDER_BLOCK;
635559ab 110}
402209ff
JH
111\f
112/* Simplify a conditional jump around an unconditional jump.
113 Return true if something changed. */
114
115static bool
d329e058 116try_simplify_condjump (basic_block cbranch_block)
402209ff
JH
117{
118 basic_block jump_block, jump_dest_block, cbranch_dest_block;
119 edge cbranch_jump_edge, cbranch_fallthru_edge;
120 rtx cbranch_insn;
121
122 /* Verify that there are exactly two successors. */
628f6a4e 123 if (EDGE_COUNT (cbranch_block->succs) != 2)
402209ff
JH
124 return false;
125
126 /* Verify that we've got a normal conditional branch at the end
127 of the block. */
a813c111 128 cbranch_insn = BB_END (cbranch_block);
402209ff
JH
129 if (!any_condjump_p (cbranch_insn))
130 return false;
131
132 cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
133 cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
134
135 /* The next block must not have multiple predecessors, must not
136 be the last block in the function, and must contain just the
137 unconditional jump. */
138 jump_block = cbranch_fallthru_edge->dest;
c5cbcccf 139 if (!single_pred_p (jump_block)
f6366fc7 140 || jump_block->next_bb == EXIT_BLOCK_PTR
635559ab 141 || !FORWARDER_BLOCK_P (jump_block))
402209ff 142 return false;
c5cbcccf 143 jump_dest_block = single_succ (jump_block);
402209ff 144
750054a2
CT
145 /* If we are partitioning hot/cold basic blocks, we don't want to
146 mess up unconditional or indirect jumps that cross between hot
c22cacf3 147 and cold sections.
8e8d5162
CT
148
149 Basic block partitioning may result in some jumps that appear to
c22cacf3
MS
150 be optimizable (or blocks that appear to be mergeable), but which really
151 must be left untouched (they are required to make it safely across
152 partition boundaries). See the comments at the top of
8e8d5162 153 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
750054a2 154
87c8b4be
CT
155 if (BB_PARTITION (jump_block) != BB_PARTITION (jump_dest_block)
156 || (cbranch_jump_edge->flags & EDGE_CROSSING))
750054a2
CT
157 return false;
158
402209ff
JH
159 /* The conditional branch must target the block after the
160 unconditional branch. */
161 cbranch_dest_block = cbranch_jump_edge->dest;
162
2f52c531
R
163 if (cbranch_dest_block == EXIT_BLOCK_PTR
164 || !can_fallthru (jump_block, cbranch_dest_block))
402209ff
JH
165 return false;
166
ca6c03ca
JH
167 /* Invert the conditional branch. */
168 if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
169 return false;
402209ff 170
c263766c
RH
171 if (dump_file)
172 fprintf (dump_file, "Simplifying condjump %i around jump %i\n",
a813c111 173 INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
402209ff
JH
174
175 /* Success. Update the CFG to match. Note that after this point
176 the edge variable names appear backwards; the redirection is done
177 this way to preserve edge profile data. */
178 cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
179 cbranch_dest_block);
180 cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
181 jump_dest_block);
182 cbranch_jump_edge->flags |= EDGE_FALLTHRU;
183 cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
b446e5a2 184 update_br_prob_note (cbranch_block);
402209ff
JH
185
186 /* Delete the block with the unconditional jump, and clean up the mess. */
f470c378
ZD
187 delete_basic_block (jump_block);
188 tidy_fallthru_edge (cbranch_jump_edge);
261139ce 189 update_forwarder_flag (cbranch_block);
402209ff
JH
190
191 return true;
192}
193\f
8ecba28a
JH
194/* Attempt to prove that operation is NOOP using CSElib or mark the effect
195 on register. Used by jump threading. */
5f0d2358 196
8ecba28a 197static bool
d329e058 198mark_effect (rtx exp, regset nonequal)
8ecba28a 199{
9f16e871
JH
200 int regno;
201 rtx dest;
8ecba28a
JH
202 switch (GET_CODE (exp))
203 {
204 /* In case we do clobber the register, mark it as equal, as we know the
c22cacf3 205 value is dead so it don't have to match. */
f87c27b4
KH
206 case CLOBBER:
207 if (REG_P (XEXP (exp, 0)))
208 {
209 dest = XEXP (exp, 0);
210 regno = REGNO (dest);
f773c2bd
AS
211 if (HARD_REGISTER_NUM_P (regno))
212 bitmap_clear_range (nonequal, regno,
213 hard_regno_nregs[regno][GET_MODE (dest)]);
214 else
215 bitmap_clear_bit (nonequal, regno);
f87c27b4
KH
216 }
217 return false;
5f0d2358 218
f87c27b4
KH
219 case SET:
220 if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
8ecba28a 221 return false;
f87c27b4
KH
222 dest = SET_DEST (exp);
223 if (dest == pc_rtx)
8ecba28a 224 return false;
f87c27b4
KH
225 if (!REG_P (dest))
226 return true;
227 regno = REGNO (dest);
f773c2bd
AS
228 if (HARD_REGISTER_NUM_P (regno))
229 bitmap_set_range (nonequal, regno,
230 hard_regno_nregs[regno][GET_MODE (dest)]);
231 else
232 bitmap_set_bit (nonequal, regno);
f87c27b4
KH
233 return false;
234
235 default:
236 return false;
8ecba28a
JH
237 }
238}
fe477d8b 239
dcc24678 240/* Return nonzero if X is a register set in regset DATA.
fe477d8b
JH
241 Called via for_each_rtx. */
242static int
d329e058 243mentions_nonequal_regs (rtx *x, void *data)
fe477d8b
JH
244{
245 regset nonequal = (regset) data;
246 if (REG_P (*x))
247 {
248 int regno;
249
250 regno = REGNO (*x);
251 if (REGNO_REG_SET_P (nonequal, regno))
252 return 1;
253 if (regno < FIRST_PSEUDO_REGISTER)
254 {
66fd46b6 255 int n = hard_regno_nregs[regno][GET_MODE (*x)];
fe477d8b
JH
256 while (--n > 0)
257 if (REGNO_REG_SET_P (nonequal, regno + n))
258 return 1;
259 }
260 }
261 return 0;
262}
8ecba28a 263/* Attempt to prove that the basic block B will have no side effects and
95bd1dd7 264 always continues in the same edge if reached via E. Return the edge
8ecba28a
JH
265 if exist, NULL otherwise. */
266
267static edge
6fb5fa3c 268thread_jump (edge e, basic_block b)
8ecba28a
JH
269{
270 rtx set1, set2, cond1, cond2, insn;
271 enum rtx_code code1, code2, reversed_code2;
272 bool reverse1 = false;
3cd8c58a 273 unsigned i;
8ecba28a
JH
274 regset nonequal;
275 bool failed = false;
a2041967 276 reg_set_iterator rsi;
8ecba28a 277
2dd2d53e 278 if (b->flags & BB_NONTHREADABLE_BLOCK)
1540f9eb
JH
279 return NULL;
280
8ecba28a
JH
281 /* At the moment, we do handle only conditional jumps, but later we may
282 want to extend this code to tablejumps and others. */
628f6a4e 283 if (EDGE_COUNT (e->src->succs) != 2)
8ecba28a 284 return NULL;
628f6a4e 285 if (EDGE_COUNT (b->succs) != 2)
1540f9eb 286 {
2dd2d53e 287 b->flags |= BB_NONTHREADABLE_BLOCK;
1540f9eb
JH
288 return NULL;
289 }
8ecba28a
JH
290
291 /* Second branch must end with onlyjump, as we will eliminate the jump. */
a813c111 292 if (!any_condjump_p (BB_END (e->src)))
8ecba28a 293 return NULL;
f87c27b4 294
a813c111 295 if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b)))
1540f9eb 296 {
2dd2d53e 297 b->flags |= BB_NONTHREADABLE_BLOCK;
1540f9eb
JH
298 return NULL;
299 }
8ecba28a 300
a813c111
SB
301 set1 = pc_set (BB_END (e->src));
302 set2 = pc_set (BB_END (b));
8ecba28a 303 if (((e->flags & EDGE_FALLTHRU) != 0)
68f3f6f1 304 != (XEXP (SET_SRC (set1), 1) == pc_rtx))
8ecba28a
JH
305 reverse1 = true;
306
307 cond1 = XEXP (SET_SRC (set1), 0);
308 cond2 = XEXP (SET_SRC (set2), 0);
309 if (reverse1)
a813c111 310 code1 = reversed_comparison_code (cond1, BB_END (e->src));
8ecba28a
JH
311 else
312 code1 = GET_CODE (cond1);
313
314 code2 = GET_CODE (cond2);
a813c111 315 reversed_code2 = reversed_comparison_code (cond2, BB_END (b));
8ecba28a
JH
316
317 if (!comparison_dominates_p (code1, code2)
318 && !comparison_dominates_p (code1, reversed_code2))
319 return NULL;
320
321 /* Ensure that the comparison operators are equivalent.
95bd1dd7 322 ??? This is far too pessimistic. We should allow swapped operands,
8ecba28a
JH
323 different CCmodes, or for example comparisons for interval, that
324 dominate even when operands are not equivalent. */
325 if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
326 || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
327 return NULL;
328
329 /* Short circuit cases where block B contains some side effects, as we can't
330 safely bypass it. */
a813c111 331 for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b));
8ecba28a
JH
332 insn = NEXT_INSN (insn))
333 if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
1540f9eb 334 {
2dd2d53e 335 b->flags |= BB_NONTHREADABLE_BLOCK;
1540f9eb
JH
336 return NULL;
337 }
8ecba28a 338
457eeaae 339 cselib_init (0);
8ecba28a
JH
340
341 /* First process all values computed in the source basic block. */
3cd8c58a
NS
342 for (insn = NEXT_INSN (BB_HEAD (e->src));
343 insn != NEXT_INSN (BB_END (e->src));
8ecba28a
JH
344 insn = NEXT_INSN (insn))
345 if (INSN_P (insn))
346 cselib_process_insn (insn);
347
8bdbfff5 348 nonequal = BITMAP_ALLOC (NULL);
8ecba28a 349 CLEAR_REG_SET (nonequal);
5f0d2358 350
8ecba28a
JH
351 /* Now assume that we've continued by the edge E to B and continue
352 processing as if it were same basic block.
8ecba28a 353 Our goal is to prove that whole block is an NOOP. */
5f0d2358 354
3cd8c58a
NS
355 for (insn = NEXT_INSN (BB_HEAD (b));
356 insn != NEXT_INSN (BB_END (b)) && !failed;
8ecba28a 357 insn = NEXT_INSN (insn))
f87c27b4
KH
358 {
359 if (INSN_P (insn))
360 {
361 rtx pat = PATTERN (insn);
362
363 if (GET_CODE (pat) == PARALLEL)
364 {
3cd8c58a 365 for (i = 0; i < (unsigned)XVECLEN (pat, 0); i++)
f87c27b4
KH
366 failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
367 }
368 else
369 failed |= mark_effect (pat, nonequal);
370 }
5f0d2358 371
f87c27b4
KH
372 cselib_process_insn (insn);
373 }
8ecba28a
JH
374
375 /* Later we should clear nonequal of dead registers. So far we don't
376 have life information in cfg_cleanup. */
377 if (failed)
1540f9eb 378 {
2dd2d53e 379 b->flags |= BB_NONTHREADABLE_BLOCK;
1540f9eb
JH
380 goto failed_exit;
381 }
8ecba28a 382
fe477d8b
JH
383 /* cond2 must not mention any register that is not equal to the
384 former block. */
385 if (for_each_rtx (&cond2, mentions_nonequal_regs, nonequal))
386 goto failed_exit;
387
a2041967
KH
388 EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, rsi)
389 goto failed_exit;
8ecba28a 390
8bdbfff5 391 BITMAP_FREE (nonequal);
8ecba28a
JH
392 cselib_finish ();
393 if ((comparison_dominates_p (code1, code2) != 0)
4deaa2f8 394 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
8ecba28a
JH
395 return BRANCH_EDGE (b);
396 else
397 return FALLTHRU_EDGE (b);
398
399failed_exit:
8bdbfff5 400 BITMAP_FREE (nonequal);
8ecba28a
JH
401 cselib_finish ();
402 return NULL;
403}
404\f
402209ff 405/* Attempt to forward edges leaving basic block B.
eaec9b3d 406 Return true if successful. */
402209ff
JH
407
408static bool
d329e058 409try_forward_edges (int mode, basic_block b)
402209ff
JH
410{
411 bool changed = false;
628f6a4e
BE
412 edge_iterator ei;
413 edge e, *threaded_edges = NULL;
402209ff 414
750054a2
CT
415 /* If we are partitioning hot/cold basic blocks, we don't want to
416 mess up unconditional or indirect jumps that cross between hot
c22cacf3
MS
417 and cold sections.
418
8e8d5162 419 Basic block partitioning may result in some jumps that appear to
fa10beec
RW
420 be optimizable (or blocks that appear to be mergeable), but which really
421 must be left untouched (they are required to make it safely across
c22cacf3 422 partition boundaries). See the comments at the top of
8e8d5162
CT
423 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
424
87c8b4be 425 if (find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX))
750054a2
CT
426 return false;
427
628f6a4e 428 for (ei = ei_start (b->succs); (e = ei_safe_edge (ei)); )
402209ff
JH
429 {
430 basic_block target, first;
7241571e 431 int counter, goto_locus;
8ecba28a 432 bool threaded = false;
bcb3bc6d 433 int nthreaded_edges = 0;
4ec5d4f5 434 bool may_thread = first_pass || (b->flags & BB_MODIFIED) != 0;
402209ff 435
402209ff
JH
436 /* Skip complex edges because we don't know how to update them.
437
c22cacf3
MS
438 Still handle fallthru edges, as we can succeed to forward fallthru
439 edge to the same place as the branch edge of conditional branch
440 and turn conditional branch to an unconditional branch. */
402209ff 441 if (e->flags & EDGE_COMPLEX)
628f6a4e
BE
442 {
443 ei_next (&ei);
444 continue;
445 }
402209ff
JH
446
447 target = first = e->dest;
24bd1a0b 448 counter = NUM_FIXED_BLOCKS;
7241571e 449 goto_locus = e->goto_locus;
402209ff 450
9fb32434 451 /* If we are partitioning hot/cold basic_blocks, we don't want to mess
8e8d5162
CT
452 up jumps that cross between hot/cold sections.
453
454 Basic block partitioning may result in some jumps that appear
c22cacf3
MS
455 to be optimizable (or blocks that appear to be mergeable), but which
456 really must be left untouched (they are required to make it safely
8e8d5162
CT
457 across partition boundaries). See the comments at the top of
458 bb-reorder.c:partition_hot_cold_basic_blocks for complete
459 details. */
9fb32434 460
87c8b4be 461 if (first != EXIT_BLOCK_PTR
9fb32434
CT
462 && find_reg_note (BB_END (first), REG_CROSSING_JUMP, NULL_RTX))
463 return false;
464
0b17ab2f 465 while (counter < n_basic_blocks)
402209ff 466 {
8ecba28a
JH
467 basic_block new_target = NULL;
468 bool new_target_threaded = false;
4ec5d4f5 469 may_thread |= (target->flags & BB_MODIFIED) != 0;
8ecba28a
JH
470
471 if (FORWARDER_BLOCK_P (target)
c22cacf3 472 && !(single_succ_edge (target)->flags & EDGE_CROSSING)
c5cbcccf 473 && single_succ (target) != EXIT_BLOCK_PTR)
8ecba28a
JH
474 {
475 /* Bypass trivial infinite loops. */
c5cbcccf
ZD
476 new_target = single_succ (target);
477 if (target == new_target)
0b17ab2f 478 counter = n_basic_blocks;
7241571e
JJ
479 else if (!optimize)
480 {
481 /* When not optimizing, ensure that edges or forwarder
482 blocks with different locus are not optimized out. */
50a36e42
EB
483 int new_locus = single_succ_edge (target)->goto_locus;
484 int locus = goto_locus;
7241571e 485
50a36e42
EB
486 if (new_locus && locus && !locator_eq (new_locus, locus))
487 new_target = NULL;
488 else
7241571e 489 {
11321111
AO
490 rtx last;
491
50a36e42
EB
492 if (new_locus)
493 locus = new_locus;
7241571e 494
11321111
AO
495 last = BB_END (target);
496 if (DEBUG_INSN_P (last))
497 last = prev_nondebug_insn (last);
498
499 new_locus = last && INSN_P (last)
500 ? INSN_LOCATOR (last) : 0;
50a36e42
EB
501
502 if (new_locus && locus && !locator_eq (new_locus, locus))
503 new_target = NULL;
504 else
505 {
506 if (new_locus)
507 locus = new_locus;
508
509 goto_locus = locus;
510 }
7241571e
JJ
511 }
512 }
8ecba28a 513 }
5f0d2358 514
8ecba28a
JH
515 /* Allow to thread only over one edge at time to simplify updating
516 of probabilities. */
7cf240d5 517 else if ((mode & CLEANUP_THREADING) && may_thread)
8ecba28a 518 {
6fb5fa3c 519 edge t = thread_jump (e, target);
1c570418 520 if (t)
8ecba28a 521 {
bcb3bc6d 522 if (!threaded_edges)
5ed6ace5 523 threaded_edges = XNEWVEC (edge, n_basic_blocks);
3b3b1e32
RH
524 else
525 {
526 int i;
527
528 /* Detect an infinite loop across blocks not
529 including the start block. */
530 for (i = 0; i < nthreaded_edges; ++i)
531 if (threaded_edges[i] == t)
532 break;
533 if (i < nthreaded_edges)
b90e45ae 534 {
0b17ab2f 535 counter = n_basic_blocks;
b90e45ae
JH
536 break;
537 }
3b3b1e32
RH
538 }
539
540 /* Detect an infinite loop across the start block. */
541 if (t->dest == b)
542 break;
543
24bd1a0b 544 gcc_assert (nthreaded_edges < n_basic_blocks - NUM_FIXED_BLOCKS);
1c570418 545 threaded_edges[nthreaded_edges++] = t;
3b3b1e32
RH
546
547 new_target = t->dest;
548 new_target_threaded = true;
8ecba28a
JH
549 }
550 }
5f0d2358 551
8ecba28a
JH
552 if (!new_target)
553 break;
402209ff 554
8ecba28a
JH
555 counter++;
556 target = new_target;
557 threaded |= new_target_threaded;
f87c27b4 558 }
402209ff 559
0b17ab2f 560 if (counter >= n_basic_blocks)
402209ff 561 {
c263766c
RH
562 if (dump_file)
563 fprintf (dump_file, "Infinite loop in BB %i.\n",
0b17ab2f 564 target->index);
402209ff
JH
565 }
566 else if (target == first)
567 ; /* We didn't do anything. */
568 else
569 {
570 /* Save the values now, as the edge may get removed. */
571 gcov_type edge_count = e->count;
572 int edge_probability = e->probability;
8ecba28a 573 int edge_frequency;
1c570418 574 int n = 0;
402209ff 575
7241571e
JJ
576 e->goto_locus = goto_locus;
577
6ee3c8e4
JJ
578 /* Don't force if target is exit block. */
579 if (threaded && target != EXIT_BLOCK_PTR)
402209ff 580 {
8ecba28a 581 notice_new_block (redirect_edge_and_branch_force (e, target));
c263766c
RH
582 if (dump_file)
583 fprintf (dump_file, "Conditionals threaded.\n");
402209ff 584 }
8ecba28a 585 else if (!redirect_edge_and_branch (e, target))
402209ff 586 {
c263766c
RH
587 if (dump_file)
588 fprintf (dump_file,
5f0d2358 589 "Forwarding edge %i->%i to %i failed.\n",
0b17ab2f 590 b->index, e->dest->index, target->index);
628f6a4e 591 ei_next (&ei);
8ecba28a 592 continue;
402209ff 593 }
5f0d2358 594
8ecba28a
JH
595 /* We successfully forwarded the edge. Now update profile
596 data: for each edge we traversed in the chain, remove
597 the original edge's execution count. */
598 edge_frequency = ((edge_probability * b->frequency
599 + REG_BR_PROB_BASE / 2)
600 / REG_BR_PROB_BASE);
601
8ecba28a
JH
602 do
603 {
604 edge t;
5f0d2358 605
c5cbcccf 606 if (!single_succ_p (first))
3b3b1e32 607 {
341c100f 608 gcc_assert (n < nthreaded_edges);
3b3b1e32 609 t = threaded_edges [n++];
341c100f 610 gcc_assert (t->src == first);
15db5571
JH
611 update_bb_profile_for_threading (first, edge_frequency,
612 edge_count, t);
b446e5a2 613 update_br_prob_note (first);
3b3b1e32 614 }
8ecba28a 615 else
bcb3bc6d 616 {
15db5571
JH
617 first->count -= edge_count;
618 if (first->count < 0)
619 first->count = 0;
620 first->frequency -= edge_frequency;
621 if (first->frequency < 0)
622 first->frequency = 0;
bcb3bc6d
JH
623 /* It is possible that as the result of
624 threading we've removed edge as it is
625 threaded to the fallthru edge. Avoid
626 getting out of sync. */
627 if (n < nthreaded_edges
628 && first == threaded_edges [n]->src)
629 n++;
c5cbcccf 630 t = single_succ_edge (first);
f87c27b4 631 }
5f0d2358 632
b446e5a2
JH
633 t->count -= edge_count;
634 if (t->count < 0)
635 t->count = 0;
8ecba28a
JH
636 first = t->dest;
637 }
638 while (first != target);
639
640 changed = true;
628f6a4e 641 continue;
402209ff 642 }
628f6a4e 643 ei_next (&ei);
402209ff
JH
644 }
645
04695783 646 free (threaded_edges);
402209ff
JH
647 return changed;
648}
649\f
402209ff
JH
650
651/* Blocks A and B are to be merged into a single block. A has no incoming
652 fallthru edge, so it can be moved before B without adding or modifying
653 any jumps (aside from the jump from A to B). */
654
4262e623 655static void
d329e058 656merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
402209ff
JH
657{
658 rtx barrier;
402209ff 659
750054a2
CT
660 /* If we are partitioning hot/cold basic blocks, we don't want to
661 mess up unconditional or indirect jumps that cross between hot
8e8d5162 662 and cold sections.
c22cacf3 663
8e8d5162 664 Basic block partitioning may result in some jumps that appear to
c22cacf3
MS
665 be optimizable (or blocks that appear to be mergeable), but which really
666 must be left untouched (they are required to make it safely across
667 partition boundaries). See the comments at the top of
8e8d5162
CT
668 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
669
87c8b4be 670 if (BB_PARTITION (a) != BB_PARTITION (b))
750054a2
CT
671 return;
672
a813c111 673 barrier = next_nonnote_insn (BB_END (a));
341c100f 674 gcc_assert (BARRIER_P (barrier));
53c17031 675 delete_insn (barrier);
402209ff 676
402209ff 677 /* Scramble the insn chain. */
a813c111
SB
678 if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
679 reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
6fb5fa3c 680 df_set_bb_dirty (a);
402209ff 681
c263766c
RH
682 if (dump_file)
683 fprintf (dump_file, "Moved block %d before %d and merged.\n",
0b17ab2f 684 a->index, b->index);
402209ff 685
bf77398c 686 /* Swap the records for the two blocks around. */
402209ff 687
918ed612
ZD
688 unlink_block (a);
689 link_block (a, b->prev_bb);
690
402209ff 691 /* Now blocks A and B are contiguous. Merge them. */
bc35512f 692 merge_blocks (a, b);
402209ff
JH
693}
694
695/* Blocks A and B are to be merged into a single block. B has no outgoing
696 fallthru edge, so it can be moved after A without adding or modifying
697 any jumps (aside from the jump from A to B). */
698
4262e623 699static void
d329e058 700merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
402209ff 701{
f62ce55b 702 rtx barrier, real_b_end;
ee735eef 703 rtx label, table;
402209ff 704
750054a2
CT
705 /* If we are partitioning hot/cold basic blocks, we don't want to
706 mess up unconditional or indirect jumps that cross between hot
c22cacf3
MS
707 and cold sections.
708
8e8d5162 709 Basic block partitioning may result in some jumps that appear to
c22cacf3
MS
710 be optimizable (or blocks that appear to be mergeable), but which really
711 must be left untouched (they are required to make it safely across
712 partition boundaries). See the comments at the top of
8e8d5162
CT
713 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
714
87c8b4be 715 if (BB_PARTITION (a) != BB_PARTITION (b))
750054a2
CT
716 return;
717
a813c111 718 real_b_end = BB_END (b);
402209ff 719
ee735eef
JZ
720 /* If there is a jump table following block B temporarily add the jump table
721 to block B so that it will also be moved to the correct location. */
a813c111
SB
722 if (tablejump_p (BB_END (b), &label, &table)
723 && prev_active_insn (label) == BB_END (b))
402209ff 724 {
a813c111 725 BB_END (b) = table;
402209ff
JH
726 }
727
728 /* There had better have been a barrier there. Delete it. */
a813c111 729 barrier = NEXT_INSN (BB_END (b));
4b4bf941 730 if (barrier && BARRIER_P (barrier))
53c17031 731 delete_insn (barrier);
402209ff 732
402209ff
JH
733
734 /* Scramble the insn chain. */
a813c111 735 reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
402209ff 736
f62ce55b 737 /* Restore the real end of b. */
a813c111 738 BB_END (b) = real_b_end;
f62ce55b 739
c263766c
RH
740 if (dump_file)
741 fprintf (dump_file, "Moved block %d after %d and merged.\n",
0b17ab2f 742 b->index, a->index);
2150ad33
RH
743
744 /* Now blocks A and B are contiguous. Merge them. */
bc35512f 745 merge_blocks (a, b);
402209ff
JH
746}
747
748/* Attempt to merge basic blocks that are potentially non-adjacent.
ec3ae3da
JH
749 Return NULL iff the attempt failed, otherwise return basic block
750 where cleanup_cfg should continue. Because the merging commonly
751 moves basic block away or introduces another optimization
e0bb17a8 752 possibility, return basic block just before B so cleanup_cfg don't
ec3ae3da
JH
753 need to iterate.
754
755 It may be good idea to return basic block before C in the case
756 C has been moved after B and originally appeared earlier in the
4d6922ee 757 insn sequence, but we have no information available about the
ec3ae3da
JH
758 relative ordering of these two. Hopefully it is not too common. */
759
760static basic_block
bc35512f 761merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
402209ff 762{
ec3ae3da 763 basic_block next;
402209ff 764
750054a2
CT
765 /* If we are partitioning hot/cold basic blocks, we don't want to
766 mess up unconditional or indirect jumps that cross between hot
c22cacf3
MS
767 and cold sections.
768
8e8d5162 769 Basic block partitioning may result in some jumps that appear to
c22cacf3
MS
770 be optimizable (or blocks that appear to be mergeable), but which really
771 must be left untouched (they are required to make it safely across
772 partition boundaries). See the comments at the top of
8e8d5162
CT
773 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
774
87c8b4be 775 if (BB_PARTITION (b) != BB_PARTITION (c))
750054a2 776 return NULL;
c22cacf3 777
402209ff
JH
778 /* If B has a fallthru edge to C, no need to move anything. */
779 if (e->flags & EDGE_FALLTHRU)
780 {
0b17ab2f 781 int b_index = b->index, c_index = c->index;
7d776ee2
RG
782
783 /* Protect the loop latches. */
784 if (current_loops && c->loop_father->latch == c)
785 return NULL;
786
bc35512f 787 merge_blocks (b, c);
635559ab 788 update_forwarder_flag (b);
402209ff 789
c263766c
RH
790 if (dump_file)
791 fprintf (dump_file, "Merged %d and %d without moving.\n",
f87c27b4 792 b_index, c_index);
402209ff 793
ec3ae3da 794 return b->prev_bb == ENTRY_BLOCK_PTR ? b : b->prev_bb;
402209ff 795 }
5f0d2358 796
402209ff
JH
797 /* Otherwise we will need to move code around. Do that only if expensive
798 transformations are allowed. */
799 else if (mode & CLEANUP_EXPENSIVE)
800 {
4262e623
JH
801 edge tmp_edge, b_fallthru_edge;
802 bool c_has_outgoing_fallthru;
803 bool b_has_incoming_fallthru;
402209ff
JH
804
805 /* Avoid overactive code motion, as the forwarder blocks should be
c22cacf3 806 eliminated by edge redirection instead. One exception might have
402209ff
JH
807 been if B is a forwarder block and C has no fallthru edge, but
808 that should be cleaned up by bb-reorder instead. */
635559ab 809 if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
ec3ae3da 810 return NULL;
402209ff
JH
811
812 /* We must make sure to not munge nesting of lexical blocks,
813 and loop notes. This is done by squeezing out all the notes
814 and leaving them there to lie. Not ideal, but functional. */
815
0fd4b31d 816 tmp_edge = find_fallthru_edge (c->succs);
402209ff 817 c_has_outgoing_fallthru = (tmp_edge != NULL);
402209ff 818
0fd4b31d 819 tmp_edge = find_fallthru_edge (b->preds);
402209ff 820 b_has_incoming_fallthru = (tmp_edge != NULL);
4262e623 821 b_fallthru_edge = tmp_edge;
ec3ae3da 822 next = b->prev_bb;
912b79e7
JH
823 if (next == c)
824 next = next->prev_bb;
4262e623
JH
825
826 /* Otherwise, we're going to try to move C after B. If C does
827 not have an outgoing fallthru, then it can be moved
828 immediately after B without introducing or modifying jumps. */
829 if (! c_has_outgoing_fallthru)
830 {
831 merge_blocks_move_successor_nojumps (b, c);
c22cacf3 832 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
4262e623 833 }
402209ff
JH
834
835 /* If B does not have an incoming fallthru, then it can be moved
836 immediately before C without introducing or modifying jumps.
837 C cannot be the first block, so we do not have to worry about
838 accessing a non-existent block. */
402209ff 839
4262e623
JH
840 if (b_has_incoming_fallthru)
841 {
473fb060 842 basic_block bb;
5f0d2358 843
4262e623 844 if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
ec3ae3da 845 return NULL;
7dddfb65
JH
846 bb = force_nonfallthru (b_fallthru_edge);
847 if (bb)
848 notice_new_block (bb);
4262e623 849 }
5f0d2358 850
4262e623 851 merge_blocks_move_predecessor_nojumps (b, c);
ec3ae3da 852 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
402209ff 853 }
5f0d2358 854
10d6c0d0 855 return NULL;
402209ff
JH
856}
857\f
c2fc5456
R
858
859/* Removes the memory attributes of MEM expression
860 if they are not equal. */
861
862void
863merge_memattrs (rtx x, rtx y)
864{
865 int i;
866 int j;
867 enum rtx_code code;
868 const char *fmt;
869
870 if (x == y)
871 return;
872 if (x == 0 || y == 0)
873 return;
874
875 code = GET_CODE (x);
876
877 if (code != GET_CODE (y))
878 return;
879
880 if (GET_MODE (x) != GET_MODE (y))
881 return;
882
883 if (code == MEM && MEM_ATTRS (x) != MEM_ATTRS (y))
884 {
885 if (! MEM_ATTRS (x))
886 MEM_ATTRS (y) = 0;
887 else if (! MEM_ATTRS (y))
888 MEM_ATTRS (x) = 0;
c22cacf3 889 else
c2fc5456 890 {
f5541398 891 HOST_WIDE_INT mem_size;
c2fc5456
R
892
893 if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
894 {
895 set_mem_alias_set (x, 0);
896 set_mem_alias_set (y, 0);
897 }
c22cacf3 898
c2fc5456
R
899 if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
900 {
901 set_mem_expr (x, 0);
902 set_mem_expr (y, 0);
527210c4
RS
903 clear_mem_offset (x);
904 clear_mem_offset (y);
c2fc5456 905 }
527210c4
RS
906 else if (MEM_OFFSET_KNOWN_P (x) != MEM_OFFSET_KNOWN_P (y)
907 || (MEM_OFFSET_KNOWN_P (x)
908 && MEM_OFFSET (x) != MEM_OFFSET (y)))
c2fc5456 909 {
527210c4
RS
910 clear_mem_offset (x);
911 clear_mem_offset (y);
c2fc5456 912 }
c22cacf3 913
f5541398
RS
914 if (MEM_SIZE_KNOWN_P (x) && MEM_SIZE_KNOWN_P (y))
915 {
916 mem_size = MAX (MEM_SIZE (x), MEM_SIZE (y));
917 set_mem_size (x, mem_size);
918 set_mem_size (y, mem_size);
919 }
c2fc5456 920 else
f5541398
RS
921 {
922 clear_mem_size (x);
923 clear_mem_size (y);
924 }
c2fc5456
R
925
926 set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
927 set_mem_align (y, MEM_ALIGN (x));
928 }
929 }
c22cacf3 930
c2fc5456
R
931 fmt = GET_RTX_FORMAT (code);
932 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
933 {
934 switch (fmt[i])
935 {
936 case 'E':
937 /* Two vectors must have the same length. */
938 if (XVECLEN (x, i) != XVECLEN (y, i))
939 return;
940
941 for (j = 0; j < XVECLEN (x, i); j++)
942 merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));
943
944 break;
945
946 case 'e':
947 merge_memattrs (XEXP (x, i), XEXP (y, i));
948 }
949 }
950 return;
951}
952
953
472c95f5
TV
954 /* Checks if patterns P1 and P2 are equivalent, apart from the possibly
955 different single sets S1 and S2. */
c2fc5456
R
956
957static bool
472c95f5
TV
958equal_different_set_p (rtx p1, rtx s1, rtx p2, rtx s2)
959{
960 int i;
961 rtx e1, e2;
962
963 if (p1 == s1 && p2 == s2)
964 return true;
965
966 if (GET_CODE (p1) != PARALLEL || GET_CODE (p2) != PARALLEL)
967 return false;
968
969 if (XVECLEN (p1, 0) != XVECLEN (p2, 0))
970 return false;
971
972 for (i = 0; i < XVECLEN (p1, 0); i++)
973 {
974 e1 = XVECEXP (p1, 0, i);
975 e2 = XVECEXP (p2, 0, i);
976 if (e1 == s1 && e2 == s2)
977 continue;
978 if (reload_completed
979 ? rtx_renumbered_equal_p (e1, e2) : rtx_equal_p (e1, e2))
980 continue;
981
982 return false;
983 }
984
985 return true;
986}
987
988/* Examine register notes on I1 and I2 and return:
989 - dir_forward if I1 can be replaced by I2, or
990 - dir_backward if I2 can be replaced by I1, or
991 - dir_both if both are the case. */
992
993static enum replace_direction
994can_replace_by (rtx i1, rtx i2)
995{
996 rtx s1, s2, d1, d2, src1, src2, note1, note2;
997 bool c1, c2;
998
999 /* Check for 2 sets. */
1000 s1 = single_set (i1);
1001 s2 = single_set (i2);
1002 if (s1 == NULL_RTX || s2 == NULL_RTX)
1003 return dir_none;
1004
1005 /* Check that the 2 sets set the same dest. */
1006 d1 = SET_DEST (s1);
1007 d2 = SET_DEST (s2);
1008 if (!(reload_completed
1009 ? rtx_renumbered_equal_p (d1, d2) : rtx_equal_p (d1, d2)))
1010 return dir_none;
1011
1012 /* Find identical req_equiv or reg_equal note, which implies that the 2 sets
1013 set dest to the same value. */
1014 note1 = find_reg_equal_equiv_note (i1);
1015 note2 = find_reg_equal_equiv_note (i2);
1016 if (!note1 || !note2 || !rtx_equal_p (XEXP (note1, 0), XEXP (note2, 0))
1017 || !CONST_INT_P (XEXP (note1, 0)))
1018 return dir_none;
1019
1020 if (!equal_different_set_p (PATTERN (i1), s1, PATTERN (i2), s2))
1021 return dir_none;
1022
1023 /* Although the 2 sets set dest to the same value, we cannot replace
1024 (set (dest) (const_int))
1025 by
1026 (set (dest) (reg))
1027 because we don't know if the reg is live and has the same value at the
1028 location of replacement. */
1029 src1 = SET_SRC (s1);
1030 src2 = SET_SRC (s2);
1031 c1 = CONST_INT_P (src1);
1032 c2 = CONST_INT_P (src2);
1033 if (c1 && c2)
1034 return dir_both;
1035 else if (c2)
1036 return dir_forward;
1037 else if (c1)
1038 return dir_backward;
1039
1040 return dir_none;
1041}
1042
1043/* Merges directions A and B. */
1044
1045static enum replace_direction
1046merge_dir (enum replace_direction a, enum replace_direction b)
1047{
1048 /* Implements the following table:
1049 |bo fw bw no
1050 ---+-----------
1051 bo |bo fw bw no
1052 fw |-- fw no no
1053 bw |-- -- bw no
1054 no |-- -- -- no. */
1055
1056 if (a == b)
1057 return a;
1058
1059 if (a == dir_both)
1060 return b;
1061 if (b == dir_both)
1062 return a;
1063
1064 return dir_none;
1065}
1066
1067/* Examine I1 and I2 and return:
1068 - dir_forward if I1 can be replaced by I2, or
1069 - dir_backward if I2 can be replaced by I1, or
1070 - dir_both if both are the case. */
1071
1072static enum replace_direction
c2fc5456
R
1073old_insns_match_p (int mode ATTRIBUTE_UNUSED, rtx i1, rtx i2)
1074{
1075 rtx p1, p2;
1076
1077 /* Verify that I1 and I2 are equivalent. */
1078 if (GET_CODE (i1) != GET_CODE (i2))
472c95f5 1079 return dir_none;
c2fc5456 1080
ba21aba3
DD
1081 /* __builtin_unreachable() may lead to empty blocks (ending with
1082 NOTE_INSN_BASIC_BLOCK). They may be crossjumped. */
1083 if (NOTE_INSN_BASIC_BLOCK_P (i1) && NOTE_INSN_BASIC_BLOCK_P (i2))
472c95f5 1084 return dir_both;
ba21aba3 1085
9a08d230
RH
1086 /* ??? Do not allow cross-jumping between different stack levels. */
1087 p1 = find_reg_note (i1, REG_ARGS_SIZE, NULL);
1088 p2 = find_reg_note (i2, REG_ARGS_SIZE, NULL);
42aa5124
RH
1089 if (p1 && p2)
1090 {
1091 p1 = XEXP (p1, 0);
1092 p2 = XEXP (p2, 0);
1093 if (!rtx_equal_p (p1, p2))
1094 return dir_none;
1095
1096 /* ??? Worse, this adjustment had better be constant lest we
1097 have differing incoming stack levels. */
1098 if (!frame_pointer_needed
1099 && find_args_size_adjust (i1) == HOST_WIDE_INT_MIN)
1100 return dir_none;
1101 }
1102 else if (p1 || p2)
9a08d230
RH
1103 return dir_none;
1104
7752e522 1105 p1 = PATTERN (i1);
c2fc5456
R
1106 p2 = PATTERN (i2);
1107
1108 if (GET_CODE (p1) != GET_CODE (p2))
472c95f5 1109 return dir_none;
c2fc5456
R
1110
1111 /* If this is a CALL_INSN, compare register usage information.
1112 If we don't check this on stack register machines, the two
1113 CALL_INSNs might be merged leaving reg-stack.c with mismatching
1114 numbers of stack registers in the same basic block.
1115 If we don't check this on machines with delay slots, a delay slot may
1116 be filled that clobbers a parameter expected by the subroutine.
1117
1118 ??? We take the simple route for now and assume that if they're
31ce8a53 1119 equal, they were constructed identically.
c2fc5456 1120
31ce8a53
BS
1121 Also check for identical exception regions. */
1122
1123 if (CALL_P (i1))
1124 {
1125 /* Ensure the same EH region. */
1126 rtx n1 = find_reg_note (i1, REG_EH_REGION, 0);
1127 rtx n2 = find_reg_note (i2, REG_EH_REGION, 0);
1128
1129 if (!n1 && n2)
472c95f5 1130 return dir_none;
31ce8a53
BS
1131
1132 if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
472c95f5 1133 return dir_none;
31ce8a53
BS
1134
1135 if (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
c22cacf3 1136 CALL_INSN_FUNCTION_USAGE (i2))
31ce8a53 1137 || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2))
472c95f5 1138 return dir_none;
31ce8a53 1139 }
c2fc5456
R
1140
1141#ifdef STACK_REGS
1142 /* If cross_jump_death_matters is not 0, the insn's mode
1143 indicates whether or not the insn contains any stack-like
1144 regs. */
1145
1146 if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
1147 {
1148 /* If register stack conversion has already been done, then
c22cacf3
MS
1149 death notes must also be compared before it is certain that
1150 the two instruction streams match. */
c2fc5456
R
1151
1152 rtx note;
1153 HARD_REG_SET i1_regset, i2_regset;
1154
1155 CLEAR_HARD_REG_SET (i1_regset);
1156 CLEAR_HARD_REG_SET (i2_regset);
1157
1158 for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
1159 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1160 SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
1161
1162 for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
1163 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1164 SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
1165
56b138ae 1166 if (!hard_reg_set_equal_p (i1_regset, i2_regset))
472c95f5 1167 return dir_none;
c2fc5456
R
1168 }
1169#endif
1170
1171 if (reload_completed
1172 ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
472c95f5 1173 return dir_both;
c2fc5456 1174
472c95f5 1175 return can_replace_by (i1, i2);
c2fc5456
R
1176}
1177\f
31ce8a53
BS
1178/* When comparing insns I1 and I2 in flow_find_cross_jump or
1179 flow_find_head_matching_sequence, ensure the notes match. */
1180
1181static void
1182merge_notes (rtx i1, rtx i2)
1183{
1184 /* If the merged insns have different REG_EQUAL notes, then
1185 remove them. */
1186 rtx equiv1 = find_reg_equal_equiv_note (i1);
1187 rtx equiv2 = find_reg_equal_equiv_note (i2);
1188
1189 if (equiv1 && !equiv2)
1190 remove_note (i1, equiv1);
1191 else if (!equiv1 && equiv2)
1192 remove_note (i2, equiv2);
1193 else if (equiv1 && equiv2
1194 && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
1195 {
1196 remove_note (i1, equiv1);
1197 remove_note (i2, equiv2);
1198 }
1199}
1200
823918ae
TV
1201 /* Walks from I1 in BB1 backward till the next non-debug insn, and returns the
1202 resulting insn in I1, and the corresponding bb in BB1. At the head of a
1203 bb, if there is a predecessor bb that reaches this bb via fallthru, and
1204 FOLLOW_FALLTHRU, walks further in the predecessor bb and registers this in
1205 DID_FALLTHRU. Otherwise, stops at the head of the bb. */
1206
1207static void
1208walk_to_nondebug_insn (rtx *i1, basic_block *bb1, bool follow_fallthru,
1209 bool *did_fallthru)
1210{
1211 edge fallthru;
1212
1213 *did_fallthru = false;
1214
1215 /* Ignore notes. */
1216 while (!NONDEBUG_INSN_P (*i1))
1217 {
1218 if (*i1 != BB_HEAD (*bb1))
1219 {
1220 *i1 = PREV_INSN (*i1);
1221 continue;
1222 }
1223
1224 if (!follow_fallthru)
1225 return;
1226
1227 fallthru = find_fallthru_edge ((*bb1)->preds);
1228 if (!fallthru || fallthru->src == ENTRY_BLOCK_PTR_FOR_FUNCTION (cfun)
1229 || !single_succ_p (fallthru->src))
1230 return;
1231
1232 *bb1 = fallthru->src;
1233 *i1 = BB_END (*bb1);
1234 *did_fallthru = true;
1235 }
1236}
1237
c2fc5456 1238/* Look through the insns at the end of BB1 and BB2 and find the longest
472c95f5
TV
1239 sequence that are either equivalent, or allow forward or backward
1240 replacement. Store the first insns for that sequence in *F1 and *F2 and
1241 return the sequence length.
1242
1243 DIR_P indicates the allowed replacement direction on function entry, and
1244 the actual replacement direction on function exit. If NULL, only equivalent
1245 sequences are allowed.
c2fc5456
R
1246
1247 To simplify callers of this function, if the blocks match exactly,
1248 store the head of the blocks in *F1 and *F2. */
1249
31ce8a53 1250int
472c95f5
TV
1251flow_find_cross_jump (basic_block bb1, basic_block bb2, rtx *f1, rtx *f2,
1252 enum replace_direction *dir_p)
c2fc5456
R
1253{
1254 rtx i1, i2, last1, last2, afterlast1, afterlast2;
1255 int ninsns = 0;
2a562b0a 1256 rtx p1;
472c95f5 1257 enum replace_direction dir, last_dir, afterlast_dir;
823918ae 1258 bool follow_fallthru, did_fallthru;
472c95f5
TV
1259
1260 if (dir_p)
1261 dir = *dir_p;
1262 else
1263 dir = dir_both;
1264 afterlast_dir = dir;
1265 last_dir = afterlast_dir;
c2fc5456
R
1266
1267 /* Skip simple jumps at the end of the blocks. Complex jumps still
1268 need to be compared for equivalence, which we'll do below. */
1269
1270 i1 = BB_END (bb1);
1271 last1 = afterlast1 = last2 = afterlast2 = NULL_RTX;
1272 if (onlyjump_p (i1)
1273 || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
1274 {
1275 last1 = i1;
1276 i1 = PREV_INSN (i1);
1277 }
1278
1279 i2 = BB_END (bb2);
1280 if (onlyjump_p (i2)
1281 || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
1282 {
1283 last2 = i2;
1284 /* Count everything except for unconditional jump as insn. */
1285 if (!simplejump_p (i2) && !returnjump_p (i2) && last1)
1286 ninsns++;
1287 i2 = PREV_INSN (i2);
1288 }
1289
1290 while (true)
1291 {
823918ae
TV
1292 /* In the following example, we can replace all jumps to C by jumps to A.
1293
1294 This removes 4 duplicate insns.
1295 [bb A] insn1 [bb C] insn1
1296 insn2 insn2
1297 [bb B] insn3 insn3
1298 insn4 insn4
1299 jump_insn jump_insn
1300
1301 We could also replace all jumps to A by jumps to C, but that leaves B
1302 alive, and removes only 2 duplicate insns. In a subsequent crossjump
1303 step, all jumps to B would be replaced with jumps to the middle of C,
1304 achieving the same result with more effort.
1305 So we allow only the first possibility, which means that we don't allow
1306 fallthru in the block that's being replaced. */
1307
1308 follow_fallthru = dir_p && dir != dir_forward;
1309 walk_to_nondebug_insn (&i1, &bb1, follow_fallthru, &did_fallthru);
1310 if (did_fallthru)
1311 dir = dir_backward;
1312
1313 follow_fallthru = dir_p && dir != dir_backward;
1314 walk_to_nondebug_insn (&i2, &bb2, follow_fallthru, &did_fallthru);
1315 if (did_fallthru)
1316 dir = dir_forward;
c2fc5456
R
1317
1318 if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
1319 break;
1320
472c95f5
TV
1321 dir = merge_dir (dir, old_insns_match_p (0, i1, i2));
1322 if (dir == dir_none || (!dir_p && dir != dir_both))
c2fc5456
R
1323 break;
1324
1325 merge_memattrs (i1, i2);
1326
1327 /* Don't begin a cross-jump with a NOTE insn. */
1328 if (INSN_P (i1))
1329 {
31ce8a53 1330 merge_notes (i1, i2);
c2fc5456
R
1331
1332 afterlast1 = last1, afterlast2 = last2;
1333 last1 = i1, last2 = i2;
472c95f5
TV
1334 afterlast_dir = last_dir;
1335 last_dir = dir;
2a562b0a
TV
1336 p1 = PATTERN (i1);
1337 if (!(GET_CODE (p1) == USE || GET_CODE (p1) == CLOBBER))
1338 ninsns++;
c2fc5456
R
1339 }
1340
1341 i1 = PREV_INSN (i1);
1342 i2 = PREV_INSN (i2);
1343 }
1344
1345#ifdef HAVE_cc0
1346 /* Don't allow the insn after a compare to be shared by
1347 cross-jumping unless the compare is also shared. */
1348 if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
472c95f5 1349 last1 = afterlast1, last2 = afterlast2, last_dir = afterlast_dir, ninsns--;
c2fc5456
R
1350#endif
1351
1352 /* Include preceding notes and labels in the cross-jump. One,
1353 this may bring us to the head of the blocks as requested above.
1354 Two, it keeps line number notes as matched as may be. */
1355 if (ninsns)
1356 {
823918ae 1357 bb1 = BLOCK_FOR_INSN (last1);
b5b8b0ac 1358 while (last1 != BB_HEAD (bb1) && !NONDEBUG_INSN_P (PREV_INSN (last1)))
c2fc5456
R
1359 last1 = PREV_INSN (last1);
1360
1361 if (last1 != BB_HEAD (bb1) && LABEL_P (PREV_INSN (last1)))
1362 last1 = PREV_INSN (last1);
1363
823918ae 1364 bb2 = BLOCK_FOR_INSN (last2);
b5b8b0ac 1365 while (last2 != BB_HEAD (bb2) && !NONDEBUG_INSN_P (PREV_INSN (last2)))
c2fc5456
R
1366 last2 = PREV_INSN (last2);
1367
1368 if (last2 != BB_HEAD (bb2) && LABEL_P (PREV_INSN (last2)))
1369 last2 = PREV_INSN (last2);
1370
1371 *f1 = last1;
1372 *f2 = last2;
1373 }
1374
472c95f5
TV
1375 if (dir_p)
1376 *dir_p = last_dir;
c2fc5456
R
1377 return ninsns;
1378}
1379
31ce8a53
BS
1380/* Like flow_find_cross_jump, except start looking for a matching sequence from
1381 the head of the two blocks. Do not include jumps at the end.
1382 If STOP_AFTER is nonzero, stop after finding that many matching
1383 instructions. */
1384
1385int
1386flow_find_head_matching_sequence (basic_block bb1, basic_block bb2, rtx *f1,
1387 rtx *f2, int stop_after)
1388{
1389 rtx i1, i2, last1, last2, beforelast1, beforelast2;
1390 int ninsns = 0;
1391 edge e;
1392 edge_iterator ei;
1393 int nehedges1 = 0, nehedges2 = 0;
1394
1395 FOR_EACH_EDGE (e, ei, bb1->succs)
1396 if (e->flags & EDGE_EH)
1397 nehedges1++;
1398 FOR_EACH_EDGE (e, ei, bb2->succs)
1399 if (e->flags & EDGE_EH)
1400 nehedges2++;
1401
1402 i1 = BB_HEAD (bb1);
1403 i2 = BB_HEAD (bb2);
1404 last1 = beforelast1 = last2 = beforelast2 = NULL_RTX;
1405
1406 while (true)
1407 {
4ec5d4f5 1408 /* Ignore notes, except NOTE_INSN_EPILOGUE_BEG. */
31ce8a53 1409 while (!NONDEBUG_INSN_P (i1) && i1 != BB_END (bb1))
4ec5d4f5
BS
1410 {
1411 if (NOTE_P (i1) && NOTE_KIND (i1) == NOTE_INSN_EPILOGUE_BEG)
1412 break;
1413 i1 = NEXT_INSN (i1);
1414 }
31ce8a53
BS
1415
1416 while (!NONDEBUG_INSN_P (i2) && i2 != BB_END (bb2))
4ec5d4f5
BS
1417 {
1418 if (NOTE_P (i2) && NOTE_KIND (i2) == NOTE_INSN_EPILOGUE_BEG)
1419 break;
1420 i2 = NEXT_INSN (i2);
1421 }
31ce8a53 1422
662592e1
BS
1423 if ((i1 == BB_END (bb1) && !NONDEBUG_INSN_P (i1))
1424 || (i2 == BB_END (bb2) && !NONDEBUG_INSN_P (i2)))
1425 break;
1426
31ce8a53
BS
1427 if (NOTE_P (i1) || NOTE_P (i2)
1428 || JUMP_P (i1) || JUMP_P (i2))
1429 break;
1430
1431 /* A sanity check to make sure we're not merging insns with different
1432 effects on EH. If only one of them ends a basic block, it shouldn't
1433 have an EH edge; if both end a basic block, there should be the same
1434 number of EH edges. */
1435 if ((i1 == BB_END (bb1) && i2 != BB_END (bb2)
1436 && nehedges1 > 0)
1437 || (i2 == BB_END (bb2) && i1 != BB_END (bb1)
1438 && nehedges2 > 0)
1439 || (i1 == BB_END (bb1) && i2 == BB_END (bb2)
1440 && nehedges1 != nehedges2))
1441 break;
1442
472c95f5 1443 if (old_insns_match_p (0, i1, i2) != dir_both)
31ce8a53
BS
1444 break;
1445
1446 merge_memattrs (i1, i2);
1447
1448 /* Don't begin a cross-jump with a NOTE insn. */
1449 if (INSN_P (i1))
1450 {
1451 merge_notes (i1, i2);
1452
1453 beforelast1 = last1, beforelast2 = last2;
1454 last1 = i1, last2 = i2;
1455 ninsns++;
1456 }
1457
1458 if (i1 == BB_END (bb1) || i2 == BB_END (bb2)
1459 || (stop_after > 0 && ninsns == stop_after))
1460 break;
1461
1462 i1 = NEXT_INSN (i1);
1463 i2 = NEXT_INSN (i2);
1464 }
1465
1466#ifdef HAVE_cc0
1467 /* Don't allow a compare to be shared by cross-jumping unless the insn
1468 after the compare is also shared. */
1469 if (ninsns && reg_mentioned_p (cc0_rtx, last1) && sets_cc0_p (last1))
1470 last1 = beforelast1, last2 = beforelast2, ninsns--;
1471#endif
1472
1473 if (ninsns)
1474 {
1475 *f1 = last1;
1476 *f2 = last2;
1477 }
1478
1479 return ninsns;
1480}
1481
c2fc5456
R
1482/* Return true iff outgoing edges of BB1 and BB2 match, together with
1483 the branch instruction. This means that if we commonize the control
1484 flow before end of the basic block, the semantic remains unchanged.
402209ff
JH
1485
1486 We may assume that there exists one edge with a common destination. */
1487
1488static bool
c2fc5456 1489outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
402209ff 1490{
0dd0e980
JH
1491 int nehedges1 = 0, nehedges2 = 0;
1492 edge fallthru1 = 0, fallthru2 = 0;
1493 edge e1, e2;
628f6a4e 1494 edge_iterator ei;
0dd0e980 1495
484db665
BS
1496 /* If we performed shrink-wrapping, edges to the EXIT_BLOCK_PTR can
1497 only be distinguished for JUMP_INSNs. The two paths may differ in
1498 whether they went through the prologue. Sibcalls are fine, we know
1499 that we either didn't need or inserted an epilogue before them. */
1500 if (crtl->shrink_wrapped
1501 && single_succ_p (bb1) && single_succ (bb1) == EXIT_BLOCK_PTR
1502 && !JUMP_P (BB_END (bb1))
1503 && !(CALL_P (BB_END (bb1)) && SIBLING_CALL_P (BB_END (bb1))))
1504 return false;
1505
c04cf67b
RH
1506 /* If BB1 has only one successor, we may be looking at either an
1507 unconditional jump, or a fake edge to exit. */
c5cbcccf
ZD
1508 if (single_succ_p (bb1)
1509 && (single_succ_edge (bb1)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
4b4bf941 1510 && (!JUMP_P (BB_END (bb1)) || simplejump_p (BB_END (bb1))))
c5cbcccf
ZD
1511 return (single_succ_p (bb2)
1512 && (single_succ_edge (bb2)->flags
1513 & (EDGE_COMPLEX | EDGE_FAKE)) == 0
4b4bf941 1514 && (!JUMP_P (BB_END (bb2)) || simplejump_p (BB_END (bb2))));
402209ff
JH
1515
1516 /* Match conditional jumps - this may get tricky when fallthru and branch
1517 edges are crossed. */
628f6a4e 1518 if (EDGE_COUNT (bb1->succs) == 2
a813c111
SB
1519 && any_condjump_p (BB_END (bb1))
1520 && onlyjump_p (BB_END (bb1)))
402209ff 1521 {
c2fc5456
R
1522 edge b1, f1, b2, f2;
1523 bool reverse, match;
1524 rtx set1, set2, cond1, cond2;
1525 enum rtx_code code1, code2;
1526
628f6a4e 1527 if (EDGE_COUNT (bb2->succs) != 2
a813c111
SB
1528 || !any_condjump_p (BB_END (bb2))
1529 || !onlyjump_p (BB_END (bb2)))
0a2ed1f1 1530 return false;
c2fc5456
R
1531
1532 b1 = BRANCH_EDGE (bb1);
1533 b2 = BRANCH_EDGE (bb2);
1534 f1 = FALLTHRU_EDGE (bb1);
1535 f2 = FALLTHRU_EDGE (bb2);
1536
1537 /* Get around possible forwarders on fallthru edges. Other cases
c22cacf3 1538 should be optimized out already. */
c2fc5456
R
1539 if (FORWARDER_BLOCK_P (f1->dest))
1540 f1 = single_succ_edge (f1->dest);
1541
1542 if (FORWARDER_BLOCK_P (f2->dest))
1543 f2 = single_succ_edge (f2->dest);
1544
1545 /* To simplify use of this function, return false if there are
1546 unneeded forwarder blocks. These will get eliminated later
1547 during cleanup_cfg. */
1548 if (FORWARDER_BLOCK_P (f1->dest)
1549 || FORWARDER_BLOCK_P (f2->dest)
1550 || FORWARDER_BLOCK_P (b1->dest)
1551 || FORWARDER_BLOCK_P (b2->dest))
1552 return false;
1553
1554 if (f1->dest == f2->dest && b1->dest == b2->dest)
1555 reverse = false;
1556 else if (f1->dest == b2->dest && b1->dest == f2->dest)
1557 reverse = true;
1558 else
1559 return false;
1560
1561 set1 = pc_set (BB_END (bb1));
1562 set2 = pc_set (BB_END (bb2));
1563 if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1564 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1565 reverse = !reverse;
1566
1567 cond1 = XEXP (SET_SRC (set1), 0);
1568 cond2 = XEXP (SET_SRC (set2), 0);
1569 code1 = GET_CODE (cond1);
1570 if (reverse)
1571 code2 = reversed_comparison_code (cond2, BB_END (bb2));
1572 else
1573 code2 = GET_CODE (cond2);
1574
1575 if (code2 == UNKNOWN)
1576 return false;
1577
1578 /* Verify codes and operands match. */
1579 match = ((code1 == code2
1580 && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1581 && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1582 || (code1 == swap_condition (code2)
1583 && rtx_renumbered_equal_p (XEXP (cond1, 1),
1584 XEXP (cond2, 0))
1585 && rtx_renumbered_equal_p (XEXP (cond1, 0),
1586 XEXP (cond2, 1))));
1587
1588 /* If we return true, we will join the blocks. Which means that
1589 we will only have one branch prediction bit to work with. Thus
1590 we require the existing branches to have probabilities that are
1591 roughly similar. */
1592 if (match
efd8f750
JH
1593 && optimize_bb_for_speed_p (bb1)
1594 && optimize_bb_for_speed_p (bb2))
c2fc5456
R
1595 {
1596 int prob2;
1597
1598 if (b1->dest == b2->dest)
1599 prob2 = b2->probability;
1600 else
1601 /* Do not use f2 probability as f2 may be forwarded. */
1602 prob2 = REG_BR_PROB_BASE - b2->probability;
1603
1604 /* Fail if the difference in probabilities is greater than 50%.
1605 This rules out two well-predicted branches with opposite
1606 outcomes. */
1607 if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
1608 {
1609 if (dump_file)
1610 fprintf (dump_file,
1611 "Outcomes of branch in bb %i and %i differ too much (%i %i)\n",
1612 bb1->index, bb2->index, b1->probability, prob2);
1613
1614 return false;
1615 }
1616 }
1617
1618 if (dump_file && match)
1619 fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
1620 bb1->index, bb2->index);
1621
1622 return match;
402209ff
JH
1623 }
1624
09da1532 1625 /* Generic case - we are seeing a computed jump, table jump or trapping
0dd0e980
JH
1626 instruction. */
1627
39811184
JZ
1628 /* Check whether there are tablejumps in the end of BB1 and BB2.
1629 Return true if they are identical. */
1630 {
1631 rtx label1, label2;
1632 rtx table1, table2;
1633
a813c111
SB
1634 if (tablejump_p (BB_END (bb1), &label1, &table1)
1635 && tablejump_p (BB_END (bb2), &label2, &table2)
39811184
JZ
1636 && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
1637 {
1638 /* The labels should never be the same rtx. If they really are same
1639 the jump tables are same too. So disable crossjumping of blocks BB1
1640 and BB2 because when deleting the common insns in the end of BB1
6de9cd9a 1641 by delete_basic_block () the jump table would be deleted too. */
4af16369 1642 /* If LABEL2 is referenced in BB1->END do not do anything
39811184
JZ
1643 because we would loose information when replacing
1644 LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */
a813c111 1645 if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)))
39811184
JZ
1646 {
1647 /* Set IDENTICAL to true when the tables are identical. */
1648 bool identical = false;
1649 rtx p1, p2;
1650
1651 p1 = PATTERN (table1);
1652 p2 = PATTERN (table2);
1653 if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2))
1654 {
1655 identical = true;
1656 }
1657 else if (GET_CODE (p1) == ADDR_DIFF_VEC
1658 && (XVECLEN (p1, 1) == XVECLEN (p2, 1))
1659 && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2))
1660 && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3)))
1661 {
1662 int i;
1663
1664 identical = true;
1665 for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--)
1666 if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i)))
1667 identical = false;
1668 }
1669
c2fc5456 1670 if (identical)
39811184 1671 {
c2fc5456 1672 replace_label_data rr;
39811184
JZ
1673 bool match;
1674
c2fc5456 1675 /* Temporarily replace references to LABEL1 with LABEL2
39811184 1676 in BB1->END so that we could compare the instructions. */
c2fc5456
R
1677 rr.r1 = label1;
1678 rr.r2 = label2;
1679 rr.update_label_nuses = false;
1680 for_each_rtx (&BB_END (bb1), replace_label, &rr);
39811184 1681
472c95f5
TV
1682 match = (old_insns_match_p (mode, BB_END (bb1), BB_END (bb2))
1683 == dir_both);
c263766c
RH
1684 if (dump_file && match)
1685 fprintf (dump_file,
39811184
JZ
1686 "Tablejumps in bb %i and %i match.\n",
1687 bb1->index, bb2->index);
1688
c2fc5456
R
1689 /* Set the original label in BB1->END because when deleting
1690 a block whose end is a tablejump, the tablejump referenced
1691 from the instruction is deleted too. */
1692 rr.r1 = label2;
1693 rr.r2 = label1;
1694 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1695
39811184
JZ
1696 return match;
1697 }
1698 }
1699 return false;
1700 }
1701 }
39811184 1702
0dd0e980 1703 /* First ensure that the instructions match. There may be many outgoing
39811184 1704 edges so this test is generally cheaper. */
472c95f5 1705 if (old_insns_match_p (mode, BB_END (bb1), BB_END (bb2)) != dir_both)
0dd0e980
JH
1706 return false;
1707
1708 /* Search the outgoing edges, ensure that the counts do match, find possible
1709 fallthru and exception handling edges since these needs more
1710 validation. */
628f6a4e
BE
1711 if (EDGE_COUNT (bb1->succs) != EDGE_COUNT (bb2->succs))
1712 return false;
1713
1714 FOR_EACH_EDGE (e1, ei, bb1->succs)
0dd0e980 1715 {
628f6a4e 1716 e2 = EDGE_SUCC (bb2, ei.index);
c22cacf3 1717
0dd0e980
JH
1718 if (e1->flags & EDGE_EH)
1719 nehedges1++;
5f0d2358 1720
0dd0e980
JH
1721 if (e2->flags & EDGE_EH)
1722 nehedges2++;
5f0d2358 1723
0dd0e980
JH
1724 if (e1->flags & EDGE_FALLTHRU)
1725 fallthru1 = e1;
1726 if (e2->flags & EDGE_FALLTHRU)
1727 fallthru2 = e2;
1728 }
5f0d2358 1729
0dd0e980 1730 /* If number of edges of various types does not match, fail. */
628f6a4e 1731 if (nehedges1 != nehedges2
5f0d2358 1732 || (fallthru1 != 0) != (fallthru2 != 0))
0dd0e980
JH
1733 return false;
1734
1735 /* fallthru edges must be forwarded to the same destination. */
1736 if (fallthru1)
1737 {
1738 basic_block d1 = (forwarder_block_p (fallthru1->dest)
c5cbcccf 1739 ? single_succ (fallthru1->dest): fallthru1->dest);
0dd0e980 1740 basic_block d2 = (forwarder_block_p (fallthru2->dest)
c5cbcccf 1741 ? single_succ (fallthru2->dest): fallthru2->dest);
5f0d2358 1742
0dd0e980
JH
1743 if (d1 != d2)
1744 return false;
1745 }
5f0d2358 1746
5f77fbd4
JJ
1747 /* Ensure the same EH region. */
1748 {
a813c111
SB
1749 rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0);
1750 rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0);
5f0d2358 1751
5f77fbd4
JJ
1752 if (!n1 && n2)
1753 return false;
1754
1755 if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1756 return false;
1757 }
5f0d2358 1758
38109dab
GL
1759 /* The same checks as in try_crossjump_to_edge. It is required for RTL
1760 version of sequence abstraction. */
1761 FOR_EACH_EDGE (e1, ei, bb2->succs)
1762 {
1763 edge e2;
1764 edge_iterator ei;
1765 basic_block d1 = e1->dest;
1766
1767 if (FORWARDER_BLOCK_P (d1))
1768 d1 = EDGE_SUCC (d1, 0)->dest;
1769
1770 FOR_EACH_EDGE (e2, ei, bb1->succs)
1771 {
1772 basic_block d2 = e2->dest;
1773 if (FORWARDER_BLOCK_P (d2))
1774 d2 = EDGE_SUCC (d2, 0)->dest;
1775 if (d1 == d2)
1776 break;
1777 }
1778
1779 if (!e2)
1780 return false;
1781 }
1782
0dd0e980 1783 return true;
402209ff
JH
1784}
1785
38109dab
GL
1786/* Returns true if BB basic block has a preserve label. */
1787
1788static bool
1789block_has_preserve_label (basic_block bb)
1790{
1791 return (bb
1792 && block_label (bb)
1793 && LABEL_PRESERVE_P (block_label (bb)));
1794}
1795
402209ff
JH
1796/* E1 and E2 are edges with the same destination block. Search their
1797 predecessors for common code. If found, redirect control flow from
bf22920b
TV
1798 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC (dir_forward),
1799 or the other way around (dir_backward). DIR specifies the allowed
1800 replacement direction. */
402209ff
JH
1801
1802static bool
bf22920b
TV
1803try_crossjump_to_edge (int mode, edge e1, edge e2,
1804 enum replace_direction dir)
402209ff 1805{
c2fc5456 1806 int nmatch;
402209ff 1807 basic_block src1 = e1->src, src2 = e2->src;
39587bb9 1808 basic_block redirect_to, redirect_from, to_remove;
823918ae 1809 basic_block osrc1, osrc2, redirect_edges_to, tmp;
c2fc5456 1810 rtx newpos1, newpos2;
402209ff 1811 edge s;
628f6a4e 1812 edge_iterator ei;
c2fc5456
R
1813
1814 newpos1 = newpos2 = NULL_RTX;
6de9cd9a 1815
750054a2 1816 /* If we have partitioned hot/cold basic blocks, it is a bad idea
c22cacf3 1817 to try this optimization.
8e8d5162
CT
1818
1819 Basic block partitioning may result in some jumps that appear to
c22cacf3
MS
1820 be optimizable (or blocks that appear to be mergeable), but which really
1821 must be left untouched (they are required to make it safely across
1822 partition boundaries). See the comments at the top of
8e8d5162 1823 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
750054a2 1824
ef4375b2 1825 if (flag_reorder_blocks_and_partition && reload_completed)
750054a2
CT
1826 return false;
1827
402209ff
JH
1828 /* Search backward through forwarder blocks. We don't need to worry
1829 about multiple entry or chained forwarders, as they will be optimized
1830 away. We do this to look past the unconditional jump following a
1831 conditional jump that is required due to the current CFG shape. */
c5cbcccf 1832 if (single_pred_p (src1)
635559ab 1833 && FORWARDER_BLOCK_P (src1))
c5cbcccf 1834 e1 = single_pred_edge (src1), src1 = e1->src;
5f0d2358 1835
c5cbcccf 1836 if (single_pred_p (src2)
635559ab 1837 && FORWARDER_BLOCK_P (src2))
c5cbcccf 1838 e2 = single_pred_edge (src2), src2 = e2->src;
402209ff
JH
1839
1840 /* Nothing to do if we reach ENTRY, or a common source block. */
1841 if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
1842 return false;
1843 if (src1 == src2)
1844 return false;
1845
1846 /* Seeing more than 1 forwarder blocks would confuse us later... */
635559ab 1847 if (FORWARDER_BLOCK_P (e1->dest)
c5cbcccf 1848 && FORWARDER_BLOCK_P (single_succ (e1->dest)))
402209ff 1849 return false;
5f0d2358 1850
635559ab 1851 if (FORWARDER_BLOCK_P (e2->dest)
c5cbcccf 1852 && FORWARDER_BLOCK_P (single_succ (e2->dest)))
402209ff
JH
1853 return false;
1854
1855 /* Likewise with dead code (possibly newly created by the other optimizations
1856 of cfg_cleanup). */
628f6a4e 1857 if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
402209ff
JH
1858 return false;
1859
402209ff 1860 /* Look for the common insn sequence, part the first ... */
c2fc5456 1861 if (!outgoing_edges_match (mode, src1, src2))
402209ff
JH
1862 return false;
1863
1864 /* ... and part the second. */
472c95f5 1865 nmatch = flow_find_cross_jump (src1, src2, &newpos1, &newpos2, &dir);
12183e0f 1866
823918ae
TV
1867 osrc1 = src1;
1868 osrc2 = src2;
1869 if (newpos1 != NULL_RTX)
1870 src1 = BLOCK_FOR_INSN (newpos1);
1871 if (newpos2 != NULL_RTX)
1872 src2 = BLOCK_FOR_INSN (newpos2);
1873
bf22920b
TV
1874 if (dir == dir_backward)
1875 {
1876#define SWAP(T, X, Y) do { T tmp = (X); (X) = (Y); (Y) = tmp; } while (0)
1877 SWAP (basic_block, osrc1, osrc2);
1878 SWAP (basic_block, src1, src2);
1879 SWAP (edge, e1, e2);
1880 SWAP (rtx, newpos1, newpos2);
1881#undef SWAP
1882 }
1883
12183e0f
PH
1884 /* Don't proceed with the crossjump unless we found a sufficient number
1885 of matching instructions or the 'from' block was totally matched
1886 (such that its predecessors will hopefully be redirected and the
1887 block removed). */
c2fc5456
R
1888 if ((nmatch < PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS))
1889 && (newpos1 != BB_HEAD (src1)))
7d22e898 1890 return false;
402209ff 1891
75c40d56 1892 /* Avoid deleting preserve label when redirecting ABNORMAL edges. */
38109dab
GL
1893 if (block_has_preserve_label (e1->dest)
1894 && (e1->flags & EDGE_ABNORMAL))
1895 return false;
1896
39811184
JZ
1897 /* Here we know that the insns in the end of SRC1 which are common with SRC2
1898 will be deleted.
1899 If we have tablejumps in the end of SRC1 and SRC2
1900 they have been already compared for equivalence in outgoing_edges_match ()
1901 so replace the references to TABLE1 by references to TABLE2. */
1902 {
1903 rtx label1, label2;
1904 rtx table1, table2;
1905
823918ae
TV
1906 if (tablejump_p (BB_END (osrc1), &label1, &table1)
1907 && tablejump_p (BB_END (osrc2), &label2, &table2)
39811184
JZ
1908 && label1 != label2)
1909 {
4af16369 1910 replace_label_data rr;
39811184
JZ
1911 rtx insn;
1912
1913 /* Replace references to LABEL1 with LABEL2. */
1914 rr.r1 = label1;
1915 rr.r2 = label2;
4af16369 1916 rr.update_label_nuses = true;
39811184
JZ
1917 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
1918 {
1919 /* Do not replace the label in SRC1->END because when deleting
1920 a block whose end is a tablejump, the tablejump referenced
1921 from the instruction is deleted too. */
823918ae 1922 if (insn != BB_END (osrc1))
39811184
JZ
1923 for_each_rtx (&insn, replace_label, &rr);
1924 }
1925 }
1926 }
10d6c0d0 1927
b604fe9b
SB
1928 /* Avoid splitting if possible. We must always split when SRC2 has
1929 EH predecessor edges, or we may end up with basic blocks with both
1930 normal and EH predecessor edges. */
c2fc5456 1931 if (newpos2 == BB_HEAD (src2)
b604fe9b 1932 && !(EDGE_PRED (src2, 0)->flags & EDGE_EH))
402209ff
JH
1933 redirect_to = src2;
1934 else
1935 {
c2fc5456 1936 if (newpos2 == BB_HEAD (src2))
b604fe9b
SB
1937 {
1938 /* Skip possible basic block header. */
c2fc5456
R
1939 if (LABEL_P (newpos2))
1940 newpos2 = NEXT_INSN (newpos2);
b5b8b0ac
AO
1941 while (DEBUG_INSN_P (newpos2))
1942 newpos2 = NEXT_INSN (newpos2);
c2fc5456
R
1943 if (NOTE_P (newpos2))
1944 newpos2 = NEXT_INSN (newpos2);
b5b8b0ac
AO
1945 while (DEBUG_INSN_P (newpos2))
1946 newpos2 = NEXT_INSN (newpos2);
b604fe9b
SB
1947 }
1948
c263766c
RH
1949 if (dump_file)
1950 fprintf (dump_file, "Splitting bb %i before %i insns\n",
0b17ab2f 1951 src2->index, nmatch);
c2fc5456 1952 redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
402209ff
JH
1953 }
1954
c263766c 1955 if (dump_file)
c2fc5456
R
1956 fprintf (dump_file,
1957 "Cross jumping from bb %i to bb %i; %i common insns\n",
1958 src1->index, src2->index, nmatch);
402209ff 1959
6fc0bb99 1960 /* We may have some registers visible through the block. */
6fb5fa3c 1961 df_set_bb_dirty (redirect_to);
402209ff 1962
823918ae
TV
1963 if (osrc2 == src2)
1964 redirect_edges_to = redirect_to;
1965 else
1966 redirect_edges_to = osrc2;
1967
402209ff 1968 /* Recompute the frequencies and counts of outgoing edges. */
823918ae 1969 FOR_EACH_EDGE (s, ei, redirect_edges_to->succs)
402209ff
JH
1970 {
1971 edge s2;
628f6a4e 1972 edge_iterator ei;
402209ff
JH
1973 basic_block d = s->dest;
1974
635559ab 1975 if (FORWARDER_BLOCK_P (d))
c5cbcccf 1976 d = single_succ (d);
5f0d2358 1977
628f6a4e 1978 FOR_EACH_EDGE (s2, ei, src1->succs)
402209ff
JH
1979 {
1980 basic_block d2 = s2->dest;
635559ab 1981 if (FORWARDER_BLOCK_P (d2))
c5cbcccf 1982 d2 = single_succ (d2);
402209ff
JH
1983 if (d == d2)
1984 break;
1985 }
5f0d2358 1986
402209ff
JH
1987 s->count += s2->count;
1988
1989 /* Take care to update possible forwarder blocks. We verified
c22cacf3
MS
1990 that there is no more than one in the chain, so we can't run
1991 into infinite loop. */
635559ab 1992 if (FORWARDER_BLOCK_P (s->dest))
402209ff 1993 {
c5cbcccf 1994 single_succ_edge (s->dest)->count += s2->count;
402209ff
JH
1995 s->dest->count += s2->count;
1996 s->dest->frequency += EDGE_FREQUENCY (s);
1997 }
5f0d2358 1998
635559ab 1999 if (FORWARDER_BLOCK_P (s2->dest))
402209ff 2000 {
c5cbcccf
ZD
2001 single_succ_edge (s2->dest)->count -= s2->count;
2002 if (single_succ_edge (s2->dest)->count < 0)
2003 single_succ_edge (s2->dest)->count = 0;
402209ff
JH
2004 s2->dest->count -= s2->count;
2005 s2->dest->frequency -= EDGE_FREQUENCY (s);
b446e5a2
JH
2006 if (s2->dest->frequency < 0)
2007 s2->dest->frequency = 0;
2008 if (s2->dest->count < 0)
2009 s2->dest->count = 0;
402209ff 2010 }
5f0d2358 2011
823918ae 2012 if (!redirect_edges_to->frequency && !src1->frequency)
402209ff
JH
2013 s->probability = (s->probability + s2->probability) / 2;
2014 else
5f0d2358 2015 s->probability
823918ae 2016 = ((s->probability * redirect_edges_to->frequency +
5f0d2358 2017 s2->probability * src1->frequency)
823918ae 2018 / (redirect_edges_to->frequency + src1->frequency));
402209ff
JH
2019 }
2020
52982a97
EB
2021 /* Adjust count and frequency for the block. An earlier jump
2022 threading pass may have left the profile in an inconsistent
2023 state (see update_bb_profile_for_threading) so we must be
2024 prepared for overflows. */
823918ae
TV
2025 tmp = redirect_to;
2026 do
2027 {
2028 tmp->count += src1->count;
2029 tmp->frequency += src1->frequency;
2030 if (tmp->frequency > BB_FREQ_MAX)
2031 tmp->frequency = BB_FREQ_MAX;
2032 if (tmp == redirect_edges_to)
2033 break;
2034 tmp = find_fallthru_edge (tmp->succs)->dest;
2035 }
2036 while (true);
2037 update_br_prob_note (redirect_edges_to);
402209ff
JH
2038
2039 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
2040
c2fc5456
R
2041 /* Skip possible basic block header. */
2042 if (LABEL_P (newpos1))
2043 newpos1 = NEXT_INSN (newpos1);
b5b8b0ac
AO
2044
2045 while (DEBUG_INSN_P (newpos1))
2046 newpos1 = NEXT_INSN (newpos1);
2047
cd9c1ca8 2048 if (NOTE_INSN_BASIC_BLOCK_P (newpos1))
c2fc5456
R
2049 newpos1 = NEXT_INSN (newpos1);
2050
b5b8b0ac
AO
2051 while (DEBUG_INSN_P (newpos1))
2052 newpos1 = NEXT_INSN (newpos1);
2053
c2fc5456 2054 redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
c5cbcccf 2055 to_remove = single_succ (redirect_from);
402209ff 2056
c5cbcccf 2057 redirect_edge_and_branch_force (single_succ_edge (redirect_from), redirect_to);
f470c378 2058 delete_basic_block (to_remove);
402209ff 2059
39587bb9 2060 update_forwarder_flag (redirect_from);
7cbd12b8
JJ
2061 if (redirect_to != src2)
2062 update_forwarder_flag (src2);
635559ab 2063
402209ff
JH
2064 return true;
2065}
2066
2067/* Search the predecessors of BB for common insn sequences. When found,
2068 share code between them by redirecting control flow. Return true if
2069 any changes made. */
2070
2071static bool
d329e058 2072try_crossjump_bb (int mode, basic_block bb)
402209ff 2073{
628f6a4e 2074 edge e, e2, fallthru;
402209ff 2075 bool changed;
628f6a4e 2076 unsigned max, ix, ix2;
402209ff 2077
f63d1bf7 2078 /* Nothing to do if there is not at least two incoming edges. */
628f6a4e 2079 if (EDGE_COUNT (bb->preds) < 2)
402209ff
JH
2080 return false;
2081
bbcb0c05
SB
2082 /* Don't crossjump if this block ends in a computed jump,
2083 unless we are optimizing for size. */
efd8f750 2084 if (optimize_bb_for_size_p (bb)
bbcb0c05
SB
2085 && bb != EXIT_BLOCK_PTR
2086 && computed_jump_p (BB_END (bb)))
2087 return false;
2088
750054a2
CT
2089 /* If we are partitioning hot/cold basic blocks, we don't want to
2090 mess up unconditional or indirect jumps that cross between hot
c22cacf3
MS
2091 and cold sections.
2092
8e8d5162 2093 Basic block partitioning may result in some jumps that appear to
c22cacf3
MS
2094 be optimizable (or blocks that appear to be mergeable), but which really
2095 must be left untouched (they are required to make it safely across
2096 partition boundaries). See the comments at the top of
8e8d5162
CT
2097 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
2098
c22cacf3
MS
2099 if (BB_PARTITION (EDGE_PRED (bb, 0)->src) !=
2100 BB_PARTITION (EDGE_PRED (bb, 1)->src)
87c8b4be 2101 || (EDGE_PRED (bb, 0)->flags & EDGE_CROSSING))
750054a2
CT
2102 return false;
2103
402209ff
JH
2104 /* It is always cheapest to redirect a block that ends in a branch to
2105 a block that falls through into BB, as that adds no branches to the
2106 program. We'll try that combination first. */
5f24e0dc
RH
2107 fallthru = NULL;
2108 max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
628f6a4e
BE
2109
2110 if (EDGE_COUNT (bb->preds) > max)
2111 return false;
2112
0fd4b31d 2113 fallthru = find_fallthru_edge (bb->preds);
402209ff
JH
2114
2115 changed = false;
0248bceb 2116 for (ix = 0; ix < EDGE_COUNT (bb->preds);)
402209ff 2117 {
0248bceb 2118 e = EDGE_PRED (bb, ix);
628f6a4e 2119 ix++;
402209ff 2120
c1e3e2d9
SB
2121 /* As noted above, first try with the fallthru predecessor (or, a
2122 fallthru predecessor if we are in cfglayout mode). */
402209ff
JH
2123 if (fallthru)
2124 {
2125 /* Don't combine the fallthru edge into anything else.
2126 If there is a match, we'll do it the other way around. */
2127 if (e == fallthru)
2128 continue;
7cf240d5
JH
2129 /* If nothing changed since the last attempt, there is nothing
2130 we can do. */
2131 if (!first_pass
4ec5d4f5
BS
2132 && !((e->src->flags & BB_MODIFIED)
2133 || (fallthru->src->flags & BB_MODIFIED)))
7cf240d5 2134 continue;
402209ff 2135
bf22920b 2136 if (try_crossjump_to_edge (mode, e, fallthru, dir_forward))
402209ff
JH
2137 {
2138 changed = true;
628f6a4e 2139 ix = 0;
402209ff
JH
2140 continue;
2141 }
2142 }
2143
2144 /* Non-obvious work limiting check: Recognize that we're going
2145 to call try_crossjump_bb on every basic block. So if we have
2146 two blocks with lots of outgoing edges (a switch) and they
2147 share lots of common destinations, then we would do the
2148 cross-jump check once for each common destination.
2149
2150 Now, if the blocks actually are cross-jump candidates, then
2151 all of their destinations will be shared. Which means that
2152 we only need check them for cross-jump candidacy once. We
2153 can eliminate redundant checks of crossjump(A,B) by arbitrarily
2154 choosing to do the check from the block for which the edge
2155 in question is the first successor of A. */
628f6a4e 2156 if (EDGE_SUCC (e->src, 0) != e)
402209ff
JH
2157 continue;
2158
0248bceb 2159 for (ix2 = 0; ix2 < EDGE_COUNT (bb->preds); ix2++)
402209ff 2160 {
0248bceb 2161 e2 = EDGE_PRED (bb, ix2);
402209ff
JH
2162
2163 if (e2 == e)
2164 continue;
2165
2166 /* We've already checked the fallthru edge above. */
2167 if (e2 == fallthru)
2168 continue;
2169
402209ff
JH
2170 /* The "first successor" check above only prevents multiple
2171 checks of crossjump(A,B). In order to prevent redundant
2172 checks of crossjump(B,A), require that A be the block
2173 with the lowest index. */
0b17ab2f 2174 if (e->src->index > e2->src->index)
402209ff
JH
2175 continue;
2176
7cf240d5
JH
2177 /* If nothing changed since the last attempt, there is nothing
2178 we can do. */
2179 if (!first_pass
4ec5d4f5
BS
2180 && !((e->src->flags & BB_MODIFIED)
2181 || (e2->src->flags & BB_MODIFIED)))
7cf240d5
JH
2182 continue;
2183
bf22920b
TV
2184 /* Both e and e2 are not fallthru edges, so we can crossjump in either
2185 direction. */
2186 if (try_crossjump_to_edge (mode, e, e2, dir_both))
402209ff
JH
2187 {
2188 changed = true;
628f6a4e 2189 ix = 0;
402209ff
JH
2190 break;
2191 }
2192 }
2193 }
2194
c1e3e2d9
SB
2195 if (changed)
2196 crossjumps_occured = true;
2197
402209ff
JH
2198 return changed;
2199}
2200
4ec5d4f5
BS
2201/* Search the successors of BB for common insn sequences. When found,
2202 share code between them by moving it across the basic block
2203 boundary. Return true if any changes made. */
2204
2205static bool
2206try_head_merge_bb (basic_block bb)
2207{
2208 basic_block final_dest_bb = NULL;
2209 int max_match = INT_MAX;
2210 edge e0;
2211 rtx *headptr, *currptr, *nextptr;
2212 bool changed, moveall;
2213 unsigned ix;
2214 rtx e0_last_head, cond, move_before;
2215 unsigned nedges = EDGE_COUNT (bb->succs);
2216 rtx jump = BB_END (bb);
2217 regset live, live_union;
2218
2219 /* Nothing to do if there is not at least two outgoing edges. */
2220 if (nedges < 2)
2221 return false;
2222
2223 /* Don't crossjump if this block ends in a computed jump,
2224 unless we are optimizing for size. */
2225 if (optimize_bb_for_size_p (bb)
2226 && bb != EXIT_BLOCK_PTR
2227 && computed_jump_p (BB_END (bb)))
2228 return false;
2229
2230 cond = get_condition (jump, &move_before, true, false);
2231 if (cond == NULL_RTX)
43052d45
BS
2232 {
2233#ifdef HAVE_cc0
2234 if (reg_mentioned_p (cc0_rtx, jump))
2235 move_before = prev_nonnote_nondebug_insn (jump);
2236 else
2237#endif
2238 move_before = jump;
2239 }
4ec5d4f5
BS
2240
2241 for (ix = 0; ix < nedges; ix++)
2242 if (EDGE_SUCC (bb, ix)->dest == EXIT_BLOCK_PTR)
2243 return false;
2244
2245 for (ix = 0; ix < nedges; ix++)
2246 {
2247 edge e = EDGE_SUCC (bb, ix);
2248 basic_block other_bb = e->dest;
2249
2250 if (df_get_bb_dirty (other_bb))
2251 {
2252 block_was_dirty = true;
2253 return false;
2254 }
2255
2256 if (e->flags & EDGE_ABNORMAL)
2257 return false;
2258
2259 /* Normally, all destination blocks must only be reachable from this
2260 block, i.e. they must have one incoming edge.
2261
2262 There is one special case we can handle, that of multiple consecutive
2263 jumps where the first jumps to one of the targets of the second jump.
2264 This happens frequently in switch statements for default labels.
2265 The structure is as follows:
2266 FINAL_DEST_BB
2267 ....
2268 if (cond) jump A;
2269 fall through
2270 BB
2271 jump with targets A, B, C, D...
2272 A
2273 has two incoming edges, from FINAL_DEST_BB and BB
2274
2275 In this case, we can try to move the insns through BB and into
2276 FINAL_DEST_BB. */
2277 if (EDGE_COUNT (other_bb->preds) != 1)
2278 {
2279 edge incoming_edge, incoming_bb_other_edge;
2280 edge_iterator ei;
2281
2282 if (final_dest_bb != NULL
2283 || EDGE_COUNT (other_bb->preds) != 2)
2284 return false;
2285
2286 /* We must be able to move the insns across the whole block. */
2287 move_before = BB_HEAD (bb);
2288 while (!NONDEBUG_INSN_P (move_before))
2289 move_before = NEXT_INSN (move_before);
2290
2291 if (EDGE_COUNT (bb->preds) != 1)
2292 return false;
2293 incoming_edge = EDGE_PRED (bb, 0);
2294 final_dest_bb = incoming_edge->src;
2295 if (EDGE_COUNT (final_dest_bb->succs) != 2)
2296 return false;
2297 FOR_EACH_EDGE (incoming_bb_other_edge, ei, final_dest_bb->succs)
2298 if (incoming_bb_other_edge != incoming_edge)
2299 break;
2300 if (incoming_bb_other_edge->dest != other_bb)
2301 return false;
2302 }
2303 }
2304
2305 e0 = EDGE_SUCC (bb, 0);
2306 e0_last_head = NULL_RTX;
2307 changed = false;
2308
2309 for (ix = 1; ix < nedges; ix++)
2310 {
2311 edge e = EDGE_SUCC (bb, ix);
2312 rtx e0_last, e_last;
2313 int nmatch;
2314
2315 nmatch = flow_find_head_matching_sequence (e0->dest, e->dest,
2316 &e0_last, &e_last, 0);
2317 if (nmatch == 0)
2318 return false;
2319
2320 if (nmatch < max_match)
2321 {
2322 max_match = nmatch;
2323 e0_last_head = e0_last;
2324 }
2325 }
2326
2327 /* If we matched an entire block, we probably have to avoid moving the
2328 last insn. */
2329 if (max_match > 0
2330 && e0_last_head == BB_END (e0->dest)
2331 && (find_reg_note (e0_last_head, REG_EH_REGION, 0)
2332 || control_flow_insn_p (e0_last_head)))
2333 {
2334 max_match--;
2335 if (max_match == 0)
2336 return false;
2337 do
2338 e0_last_head = prev_real_insn (e0_last_head);
2339 while (DEBUG_INSN_P (e0_last_head));
2340 }
2341
2342 if (max_match == 0)
2343 return false;
2344
2345 /* We must find a union of the live registers at each of the end points. */
2346 live = BITMAP_ALLOC (NULL);
2347 live_union = BITMAP_ALLOC (NULL);
2348
2349 currptr = XNEWVEC (rtx, nedges);
2350 headptr = XNEWVEC (rtx, nedges);
2351 nextptr = XNEWVEC (rtx, nedges);
2352
2353 for (ix = 0; ix < nedges; ix++)
2354 {
2355 int j;
2356 basic_block merge_bb = EDGE_SUCC (bb, ix)->dest;
2357 rtx head = BB_HEAD (merge_bb);
2358
2359 while (!NONDEBUG_INSN_P (head))
2360 head = NEXT_INSN (head);
2361 headptr[ix] = head;
2362 currptr[ix] = head;
2363
2364 /* Compute the end point and live information */
2365 for (j = 1; j < max_match; j++)
2366 do
2367 head = NEXT_INSN (head);
2368 while (!NONDEBUG_INSN_P (head));
2369 simulate_backwards_to_point (merge_bb, live, head);
2370 IOR_REG_SET (live_union, live);
2371 }
2372
2373 /* If we're moving across two blocks, verify the validity of the
2374 first move, then adjust the target and let the loop below deal
2375 with the final move. */
2376 if (final_dest_bb != NULL)
2377 {
2378 rtx move_upto;
2379
2380 moveall = can_move_insns_across (currptr[0], e0_last_head, move_before,
2381 jump, e0->dest, live_union,
2382 NULL, &move_upto);
2383 if (!moveall)
2384 {
2385 if (move_upto == NULL_RTX)
2386 goto out;
2387
2388 while (e0_last_head != move_upto)
2389 {
2390 df_simulate_one_insn_backwards (e0->dest, e0_last_head,
2391 live_union);
2392 e0_last_head = PREV_INSN (e0_last_head);
2393 }
2394 }
2395 if (e0_last_head == NULL_RTX)
2396 goto out;
2397
2398 jump = BB_END (final_dest_bb);
2399 cond = get_condition (jump, &move_before, true, false);
2400 if (cond == NULL_RTX)
43052d45
BS
2401 {
2402#ifdef HAVE_cc0
2403 if (reg_mentioned_p (cc0_rtx, jump))
2404 move_before = prev_nonnote_nondebug_insn (jump);
2405 else
2406#endif
2407 move_before = jump;
2408 }
4ec5d4f5
BS
2409 }
2410
2411 do
2412 {
2413 rtx move_upto;
2414 moveall = can_move_insns_across (currptr[0], e0_last_head,
2415 move_before, jump, e0->dest, live_union,
2416 NULL, &move_upto);
2417 if (!moveall && move_upto == NULL_RTX)
2418 {
2419 if (jump == move_before)
2420 break;
2421
2422 /* Try again, using a different insertion point. */
2423 move_before = jump;
2424
2425#ifdef HAVE_cc0
2426 /* Don't try moving before a cc0 user, as that may invalidate
2427 the cc0. */
2428 if (reg_mentioned_p (cc0_rtx, jump))
2429 break;
2430#endif
2431
2432 continue;
2433 }
2434
2435 if (final_dest_bb && !moveall)
2436 /* We haven't checked whether a partial move would be OK for the first
2437 move, so we have to fail this case. */
2438 break;
2439
2440 changed = true;
2441 for (;;)
2442 {
2443 if (currptr[0] == move_upto)
2444 break;
2445 for (ix = 0; ix < nedges; ix++)
2446 {
2447 rtx curr = currptr[ix];
2448 do
2449 curr = NEXT_INSN (curr);
2450 while (!NONDEBUG_INSN_P (curr));
2451 currptr[ix] = curr;
2452 }
2453 }
2454
2455 /* If we can't currently move all of the identical insns, remember
2456 each insn after the range that we'll merge. */
2457 if (!moveall)
2458 for (ix = 0; ix < nedges; ix++)
2459 {
2460 rtx curr = currptr[ix];
2461 do
2462 curr = NEXT_INSN (curr);
2463 while (!NONDEBUG_INSN_P (curr));
2464 nextptr[ix] = curr;
2465 }
2466
2467 reorder_insns (headptr[0], currptr[0], PREV_INSN (move_before));
2468 df_set_bb_dirty (EDGE_SUCC (bb, 0)->dest);
2469 if (final_dest_bb != NULL)
2470 df_set_bb_dirty (final_dest_bb);
2471 df_set_bb_dirty (bb);
2472 for (ix = 1; ix < nedges; ix++)
2473 {
2474 df_set_bb_dirty (EDGE_SUCC (bb, ix)->dest);
2475 delete_insn_chain (headptr[ix], currptr[ix], false);
2476 }
2477 if (!moveall)
2478 {
2479 if (jump == move_before)
2480 break;
2481
2482 /* For the unmerged insns, try a different insertion point. */
2483 move_before = jump;
2484
2485#ifdef HAVE_cc0
2486 /* Don't try moving before a cc0 user, as that may invalidate
2487 the cc0. */
2488 if (reg_mentioned_p (cc0_rtx, jump))
2489 break;
2490#endif
2491
2492 for (ix = 0; ix < nedges; ix++)
2493 currptr[ix] = headptr[ix] = nextptr[ix];
2494 }
2495 }
2496 while (!moveall);
2497
2498 out:
2499 free (currptr);
2500 free (headptr);
2501 free (nextptr);
2502
2503 crossjumps_occured |= changed;
2504
2505 return changed;
2506}
2507
7752e522
JJ
2508/* Return true if BB contains just bb note, or bb note followed
2509 by only DEBUG_INSNs. */
2510
2511static bool
2512trivially_empty_bb_p (basic_block bb)
2513{
2514 rtx insn = BB_END (bb);
2515
2516 while (1)
2517 {
2518 if (insn == BB_HEAD (bb))
2519 return true;
2520 if (!DEBUG_INSN_P (insn))
2521 return false;
2522 insn = PREV_INSN (insn);
2523 }
2524}
2525
402209ff
JH
2526/* Do simple CFG optimizations - basic block merging, simplifying of jump
2527 instructions etc. Return nonzero if changes were made. */
2528
2529static bool
d329e058 2530try_optimize_cfg (int mode)
402209ff 2531{
402209ff
JH
2532 bool changed_overall = false;
2533 bool changed;
2534 int iterations = 0;
ec3ae3da 2535 basic_block bb, b, next;
402209ff 2536
6fb5fa3c 2537 if (mode & (CLEANUP_CROSSJUMP | CLEANUP_THREADING))
38c1593d
JH
2538 clear_bb_flags ();
2539
c1e3e2d9
SB
2540 crossjumps_occured = false;
2541
2dd2d53e
SB
2542 FOR_EACH_BB (bb)
2543 update_forwarder_flag (bb);
2544
245f1bfa 2545 if (! targetm.cannot_modify_jumps_p ())
402209ff 2546 {
7cf240d5 2547 first_pass = true;
e4ec2cac
AO
2548 /* Attempt to merge blocks as made possible by edge removal. If
2549 a block has only one successor, and the successor has only
2550 one predecessor, they may be combined. */
2551 do
402209ff 2552 {
4ec5d4f5 2553 block_was_dirty = false;
e4ec2cac
AO
2554 changed = false;
2555 iterations++;
2556
c263766c
RH
2557 if (dump_file)
2558 fprintf (dump_file,
e4ec2cac
AO
2559 "\n\ntry_optimize_cfg iteration %i\n\n",
2560 iterations);
402209ff 2561
e0082a72 2562 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR;)
402209ff 2563 {
e0082a72 2564 basic_block c;
e4ec2cac
AO
2565 edge s;
2566 bool changed_here = false;
5f0d2358 2567
468059bc
DD
2568 /* Delete trivially dead basic blocks. This is either
2569 blocks with no predecessors, or empty blocks with no
1e211590
DD
2570 successors. However if the empty block with no
2571 successors is the successor of the ENTRY_BLOCK, it is
2572 kept. This ensures that the ENTRY_BLOCK will have a
2573 successor which is a precondition for many RTL
2574 passes. Empty blocks may result from expanding
468059bc
DD
2575 __builtin_unreachable (). */
2576 if (EDGE_COUNT (b->preds) == 0
1e211590 2577 || (EDGE_COUNT (b->succs) == 0
7752e522 2578 && trivially_empty_bb_p (b)
1e211590 2579 && single_succ_edge (ENTRY_BLOCK_PTR)->dest != b))
e4ec2cac 2580 {
f6366fc7 2581 c = b->prev_bb;
f1de5107 2582 if (EDGE_COUNT (b->preds) > 0)
3b5fda81
JJ
2583 {
2584 edge e;
2585 edge_iterator ei;
2586
f1de5107
JJ
2587 if (current_ir_type () == IR_RTL_CFGLAYOUT)
2588 {
bcc708fc
MM
2589 if (BB_FOOTER (b)
2590 && BARRIER_P (BB_FOOTER (b)))
f1de5107
JJ
2591 FOR_EACH_EDGE (e, ei, b->preds)
2592 if ((e->flags & EDGE_FALLTHRU)
bcc708fc 2593 && BB_FOOTER (e->src) == NULL)
f1de5107 2594 {
bcc708fc 2595 if (BB_FOOTER (b))
f1de5107 2596 {
bcc708fc
MM
2597 BB_FOOTER (e->src) = BB_FOOTER (b);
2598 BB_FOOTER (b) = NULL;
f1de5107
JJ
2599 }
2600 else
2601 {
2602 start_sequence ();
bcc708fc 2603 BB_FOOTER (e->src) = emit_barrier ();
f1de5107
JJ
2604 end_sequence ();
2605 }
2606 }
2607 }
2608 else
2609 {
2610 rtx last = get_last_bb_insn (b);
2611 if (last && BARRIER_P (last))
2612 FOR_EACH_EDGE (e, ei, b->preds)
2613 if ((e->flags & EDGE_FALLTHRU))
2614 emit_barrier_after (BB_END (e->src));
2615 }
3b5fda81 2616 }
f470c378 2617 delete_basic_block (b);
bef16e87 2618 changed = true;
83bd032b
ZD
2619 /* Avoid trying to remove ENTRY_BLOCK_PTR. */
2620 b = (c == ENTRY_BLOCK_PTR ? c->next_bb : c);
2621 continue;
e4ec2cac 2622 }
402209ff 2623
6ce2bcb7 2624 /* Remove code labels no longer used. */
c5cbcccf
ZD
2625 if (single_pred_p (b)
2626 && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
2627 && !(single_pred_edge (b)->flags & EDGE_COMPLEX)
4b4bf941 2628 && LABEL_P (BB_HEAD (b))
e4ec2cac
AO
2629 /* If the previous block ends with a branch to this
2630 block, we can't delete the label. Normally this
2631 is a condjump that is yet to be simplified, but
2632 if CASE_DROPS_THRU, this can be a tablejump with
2633 some element going to the same place as the
2634 default (fallthru). */
c5cbcccf
ZD
2635 && (single_pred (b) == ENTRY_BLOCK_PTR
2636 || !JUMP_P (BB_END (single_pred (b)))
a813c111 2637 || ! label_is_jump_target_p (BB_HEAD (b),
c5cbcccf 2638 BB_END (single_pred (b)))))
e4ec2cac 2639 {
03fbe718 2640 delete_insn (BB_HEAD (b));
c263766c
RH
2641 if (dump_file)
2642 fprintf (dump_file, "Deleted label in block %i.\n",
0b17ab2f 2643 b->index);
e4ec2cac 2644 }
402209ff 2645
e4ec2cac 2646 /* If we fall through an empty block, we can remove it. */
9be94227 2647 if (!(mode & (CLEANUP_CFGLAYOUT | CLEANUP_NO_INSN_DEL))
c5cbcccf
ZD
2648 && single_pred_p (b)
2649 && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
4b4bf941 2650 && !LABEL_P (BB_HEAD (b))
e4ec2cac
AO
2651 && FORWARDER_BLOCK_P (b)
2652 /* Note that forwarder_block_p true ensures that
2653 there is a successor for this block. */
c5cbcccf 2654 && (single_succ_edge (b)->flags & EDGE_FALLTHRU)
24bd1a0b 2655 && n_basic_blocks > NUM_FIXED_BLOCKS + 1)
e4ec2cac 2656 {
c263766c
RH
2657 if (dump_file)
2658 fprintf (dump_file,
e4ec2cac 2659 "Deleting fallthru block %i.\n",
0b17ab2f 2660 b->index);
e4ec2cac 2661
f6366fc7 2662 c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb;
c5cbcccf
ZD
2663 redirect_edge_succ_nodup (single_pred_edge (b),
2664 single_succ (b));
f470c378 2665 delete_basic_block (b);
e4ec2cac
AO
2666 changed = true;
2667 b = c;
1e211590 2668 continue;
e4ec2cac 2669 }
5f0d2358 2670
50a36e42 2671 /* Merge B with its single successor, if any. */
c5cbcccf
ZD
2672 if (single_succ_p (b)
2673 && (s = single_succ_edge (b))
ec3ae3da
JH
2674 && !(s->flags & EDGE_COMPLEX)
2675 && (c = s->dest) != EXIT_BLOCK_PTR
c5cbcccf 2676 && single_pred_p (c)
bc35512f
JH
2677 && b != c)
2678 {
2679 /* When not in cfg_layout mode use code aware of reordering
2680 INSN. This code possibly creates new basic blocks so it
2681 does not fit merge_blocks interface and is kept here in
2682 hope that it will become useless once more of compiler
2683 is transformed to use cfg_layout mode. */
c22cacf3 2684
bc35512f
JH
2685 if ((mode & CLEANUP_CFGLAYOUT)
2686 && can_merge_blocks_p (b, c))
2687 {
2688 merge_blocks (b, c);
2689 update_forwarder_flag (b);
2690 changed_here = true;
2691 }
2692 else if (!(mode & CLEANUP_CFGLAYOUT)
2693 /* If the jump insn has side effects,
2694 we can't kill the edge. */
4b4bf941 2695 && (!JUMP_P (BB_END (b))
e24e7211 2696 || (reload_completed
a813c111 2697 ? simplejump_p (BB_END (b))
e4efa971
JH
2698 : (onlyjump_p (BB_END (b))
2699 && !tablejump_p (BB_END (b),
2700 NULL, NULL))))
bc35512f
JH
2701 && (next = merge_blocks_move (s, b, c, mode)))
2702 {
2703 b = next;
2704 changed_here = true;
2705 }
ec3ae3da 2706 }
e4ec2cac
AO
2707
2708 /* Simplify branch over branch. */
bc35512f
JH
2709 if ((mode & CLEANUP_EXPENSIVE)
2710 && !(mode & CLEANUP_CFGLAYOUT)
2711 && try_simplify_condjump (b))
38c1593d 2712 changed_here = true;
402209ff 2713
e4ec2cac
AO
2714 /* If B has a single outgoing edge, but uses a
2715 non-trivial jump instruction without side-effects, we
2716 can either delete the jump entirely, or replace it
3348b696 2717 with a simple unconditional jump. */
c5cbcccf
ZD
2718 if (single_succ_p (b)
2719 && single_succ (b) != EXIT_BLOCK_PTR
a813c111 2720 && onlyjump_p (BB_END (b))
750054a2 2721 && !find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
c5cbcccf
ZD
2722 && try_redirect_by_replacing_jump (single_succ_edge (b),
2723 single_succ (b),
20b4e8ae 2724 (mode & CLEANUP_CFGLAYOUT) != 0))
e4ec2cac 2725 {
e4ec2cac
AO
2726 update_forwarder_flag (b);
2727 changed_here = true;
2728 }
402209ff 2729
e4ec2cac
AO
2730 /* Simplify branch to branch. */
2731 if (try_forward_edges (mode, b))
afe8b6ec
EB
2732 {
2733 update_forwarder_flag (b);
2734 changed_here = true;
2735 }
402209ff 2736
e4ec2cac
AO
2737 /* Look for shared code between blocks. */
2738 if ((mode & CLEANUP_CROSSJUMP)
2739 && try_crossjump_bb (mode, b))
2740 changed_here = true;
402209ff 2741
4ec5d4f5
BS
2742 if ((mode & CLEANUP_CROSSJUMP)
2743 /* This can lengthen register lifetimes. Do it only after
2744 reload. */
2745 && reload_completed
2746 && try_head_merge_bb (b))
2747 changed_here = true;
2748
e4ec2cac
AO
2749 /* Don't get confused by the index shift caused by
2750 deleting blocks. */
2751 if (!changed_here)
e0082a72 2752 b = b->next_bb;
e4ec2cac
AO
2753 else
2754 changed = true;
2755 }
402209ff 2756
e4ec2cac
AO
2757 if ((mode & CLEANUP_CROSSJUMP)
2758 && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
402209ff 2759 changed = true;
402209ff 2760
4ec5d4f5
BS
2761 if (block_was_dirty)
2762 {
2763 /* This should only be set by head-merging. */
2764 gcc_assert (mode & CLEANUP_CROSSJUMP);
2765 df_analyze ();
2766 }
2767
402209ff 2768#ifdef ENABLE_CHECKING
e4ec2cac
AO
2769 if (changed)
2770 verify_flow_info ();
402209ff
JH
2771#endif
2772
e4ec2cac 2773 changed_overall |= changed;
7cf240d5 2774 first_pass = false;
e4ec2cac
AO
2775 }
2776 while (changed);
402209ff 2777 }
ca6c03ca 2778
2dd2d53e
SB
2779 FOR_ALL_BB (b)
2780 b->flags &= ~(BB_FORWARDER_BLOCK | BB_NONTHREADABLE_BLOCK);
635559ab 2781
402209ff
JH
2782 return changed_overall;
2783}
2784\f
6d2f8887 2785/* Delete all unreachable basic blocks. */
4262e623 2786
969d70ca 2787bool
d329e058 2788delete_unreachable_blocks (void)
402209ff 2789{
402209ff 2790 bool changed = false;
b5b8b0ac 2791 basic_block b, prev_bb;
402209ff
JH
2792
2793 find_unreachable_blocks ();
2794
b5b8b0ac
AO
2795 /* When we're in GIMPLE mode and there may be debug insns, we should
2796 delete blocks in reverse dominator order, so as to get a chance
2797 to substitute all released DEFs into debug stmts. If we don't
2798 have dominators information, walking blocks backward gets us a
2799 better chance of retaining most debug information than
2800 otherwise. */
2801 if (MAY_HAVE_DEBUG_STMTS && current_ir_type () == IR_GIMPLE
2802 && dom_info_available_p (CDI_DOMINATORS))
402209ff 2803 {
b5b8b0ac
AO
2804 for (b = EXIT_BLOCK_PTR->prev_bb; b != ENTRY_BLOCK_PTR; b = prev_bb)
2805 {
2806 prev_bb = b->prev_bb;
2807
2808 if (!(b->flags & BB_REACHABLE))
2809 {
2810 /* Speed up the removal of blocks that don't dominate
2811 others. Walking backwards, this should be the common
2812 case. */
2813 if (!first_dom_son (CDI_DOMINATORS, b))
2814 delete_basic_block (b);
2815 else
2816 {
2817 VEC (basic_block, heap) *h
2818 = get_all_dominated_blocks (CDI_DOMINATORS, b);
2819
2820 while (VEC_length (basic_block, h))
2821 {
2822 b = VEC_pop (basic_block, h);
2823
2824 prev_bb = b->prev_bb;
0b17ab2f 2825
b5b8b0ac
AO
2826 gcc_assert (!(b->flags & BB_REACHABLE));
2827
2828 delete_basic_block (b);
2829 }
2830
2831 VEC_free (basic_block, heap, h);
2832 }
2833
2834 changed = true;
2835 }
2836 }
2837 }
2838 else
2839 {
2840 for (b = EXIT_BLOCK_PTR->prev_bb; b != ENTRY_BLOCK_PTR; b = prev_bb)
6a58eee9 2841 {
b5b8b0ac
AO
2842 prev_bb = b->prev_bb;
2843
2844 if (!(b->flags & BB_REACHABLE))
2845 {
2846 delete_basic_block (b);
2847 changed = true;
2848 }
6a58eee9 2849 }
402209ff
JH
2850 }
2851
2852 if (changed)
2853 tidy_fallthru_edges ();
2854 return changed;
2855}
6fb5fa3c
DB
2856
2857/* Delete any jump tables never referenced. We can't delete them at the
2858 time of removing tablejump insn as they are referenced by the preceding
2859 insns computing the destination, so we delay deleting and garbagecollect
2860 them once life information is computed. */
2861void
2862delete_dead_jumptables (void)
2863{
2864 basic_block bb;
2865
2866 /* A dead jump table does not belong to any basic block. Scan insns
2867 between two adjacent basic blocks. */
2868 FOR_EACH_BB (bb)
2869 {
2870 rtx insn, next;
2871
2872 for (insn = NEXT_INSN (BB_END (bb));
2873 insn && !NOTE_INSN_BASIC_BLOCK_P (insn);
2874 insn = next)
2875 {
2876 next = NEXT_INSN (insn);
2877 if (LABEL_P (insn)
2878 && LABEL_NUSES (insn) == LABEL_PRESERVE_P (insn)
481683e1 2879 && JUMP_TABLE_DATA_P (next))
6fb5fa3c
DB
2880 {
2881 rtx label = insn, jump = next;
2882
2883 if (dump_file)
2884 fprintf (dump_file, "Dead jumptable %i removed\n",
2885 INSN_UID (insn));
2886
2887 next = NEXT_INSN (next);
2888 delete_insn (jump);
2889 delete_insn (label);
2890 }
2891 }
2892 }
2893}
2894
402209ff
JH
2895\f
2896/* Tidy the CFG by deleting unreachable code and whatnot. */
2897
2898bool
d329e058 2899cleanup_cfg (int mode)
402209ff 2900{
402209ff
JH
2901 bool changed = false;
2902
aeceeb06
SB
2903 /* Set the cfglayout mode flag here. We could update all the callers
2904 but that is just inconvenient, especially given that we eventually
2905 want to have cfglayout mode as the default. */
2906 if (current_ir_type () == IR_RTL_CFGLAYOUT)
2907 mode |= CLEANUP_CFGLAYOUT;
2908
402209ff 2909 timevar_push (TV_CLEANUP_CFG);
3dec4024
JH
2910 if (delete_unreachable_blocks ())
2911 {
2912 changed = true;
2913 /* We've possibly created trivially dead code. Cleanup it right
95bd1dd7 2914 now to introduce more opportunities for try_optimize_cfg. */
6fb5fa3c 2915 if (!(mode & (CLEANUP_NO_INSN_DEL))
3dec4024 2916 && !reload_completed)
62e5bf5d 2917 delete_trivially_dead_insns (get_insns (), max_reg_num ());
3dec4024 2918 }
bf77398c
ZD
2919
2920 compact_blocks ();
2921
c1e3e2d9
SB
2922 /* To tail-merge blocks ending in the same noreturn function (e.g.
2923 a call to abort) we have to insert fake edges to exit. Do this
2924 here once. The fake edges do not interfere with any other CFG
2925 cleanups. */
2926 if (mode & CLEANUP_CROSSJUMP)
2927 add_noreturn_fake_exit_edges ();
2928
7d817ebc
DE
2929 if (!dbg_cnt (cfg_cleanup))
2930 return changed;
2931
3dec4024
JH
2932 while (try_optimize_cfg (mode))
2933 {
2934 delete_unreachable_blocks (), changed = true;
c1e3e2d9 2935 if (!(mode & CLEANUP_NO_INSN_DEL))
3dec4024 2936 {
c1e3e2d9
SB
2937 /* Try to remove some trivially dead insns when doing an expensive
2938 cleanup. But delete_trivially_dead_insns doesn't work after
2939 reload (it only handles pseudos) and run_fast_dce is too costly
2940 to run in every iteration.
2941
2942 For effective cross jumping, we really want to run a fast DCE to
2943 clean up any dead conditions, or they get in the way of performing
2944 useful tail merges.
2945
2946 Other transformations in cleanup_cfg are not so sensitive to dead
2947 code, so delete_trivially_dead_insns or even doing nothing at all
2948 is good enough. */
2949 if ((mode & CLEANUP_EXPENSIVE) && !reload_completed
2950 && !delete_trivially_dead_insns (get_insns (), max_reg_num ()))
3dec4024 2951 break;
4ec5d4f5 2952 if ((mode & CLEANUP_CROSSJUMP) && crossjumps_occured)
f842d54f 2953 run_fast_dce ();
3dec4024
JH
2954 }
2955 else
2956 break;
3dec4024 2957 }
402209ff 2958
c1e3e2d9
SB
2959 if (mode & CLEANUP_CROSSJUMP)
2960 remove_fake_exit_edges ();
2961
6fb5fa3c
DB
2962 /* Don't call delete_dead_jumptables in cfglayout mode, because
2963 that function assumes that jump tables are in the insns stream.
2964 But we also don't _have_ to delete dead jumptables in cfglayout
2965 mode because we shouldn't even be looking at things that are
2966 not in a basic block. Dead jumptables are cleaned up when
2967 going out of cfglayout mode. */
2968 if (!(mode & CLEANUP_CFGLAYOUT))
2969 delete_dead_jumptables ();
2970
7d776ee2
RG
2971 /* ??? We probably do this way too often. */
2972 if (current_loops
2973 && (changed
2974 || (mode & CLEANUP_CFG_CHANGED)))
2975 {
2976 bitmap changed_bbs;
2977 timevar_push (TV_REPAIR_LOOPS);
2978 /* The above doesn't preserve dominance info if available. */
2979 gcc_assert (!dom_info_available_p (CDI_DOMINATORS));
2980 calculate_dominance_info (CDI_DOMINATORS);
2981 changed_bbs = BITMAP_ALLOC (NULL);
2982 fix_loop_structure (changed_bbs);
2983 BITMAP_FREE (changed_bbs);
2984 free_dominance_info (CDI_DOMINATORS);
2985 timevar_pop (TV_REPAIR_LOOPS);
2986 }
2987
402209ff
JH
2988 timevar_pop (TV_CLEANUP_CFG);
2989
402209ff
JH
2990 return changed;
2991}
ef330312 2992\f
c2924966 2993static unsigned int
ef330312
PB
2994rest_of_handle_jump2 (void)
2995{
ef330312 2996 delete_trivially_dead_insns (get_insns (), max_reg_num ());
ef330312 2997 if (dump_file)
5b4fdb20 2998 dump_flow_info (dump_file, dump_flags);
59994160 2999 cleanup_cfg ((optimize ? CLEANUP_EXPENSIVE : 0)
c22cacf3 3000 | (flag_thread_jumps ? CLEANUP_THREADING : 0));
c2924966 3001 return 0;
ef330312
PB
3002}
3003
3004
8ddbbcae 3005struct rtl_opt_pass pass_jump2 =
ef330312 3006{
8ddbbcae
JH
3007 {
3008 RTL_PASS,
ef330312 3009 "jump", /* name */
c22cacf3
MS
3010 NULL, /* gate */
3011 rest_of_handle_jump2, /* execute */
ef330312
PB
3012 NULL, /* sub */
3013 NULL, /* next */
3014 0, /* static_pass_number */
3015 TV_JUMP, /* tv_id */
3016 0, /* properties_required */
3017 0, /* properties_provided */
3018 0, /* properties_destroyed */
3019 TODO_ggc_collect, /* todo_flags_start */
22c5fa5f 3020 TODO_verify_rtl_sharing, /* todo_flags_finish */
8ddbbcae 3021 }
ef330312 3022};
This page took 2.715379 seconds and 5 git commands to generate.