]> gcc.gnu.org Git - gcc.git/blame - gcc/cfgcleanup.c
tree-flow-inline.h (set_default_def, [...]): Kill.
[gcc.git] / gcc / cfgcleanup.c
CommitLineData
402209ff
JH
1/* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
17385e0d 3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
402209ff
JH
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9Software Foundation; either version 2, or (at your option) any later
10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15for more details.
16
17You should have received a copy of the GNU General Public License
18along with GCC; see the file COPYING. If not, write to the Free
366ccddb
KC
19Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
2002110-1301, USA. */
402209ff 21
1ea7e6ad 22/* This file contains optimizer of the control flow. The main entry point is
402209ff
JH
23 cleanup_cfg. Following optimizations are performed:
24
25 - Unreachable blocks removal
d1a6adeb 26 - Edge forwarding (edge to the forwarder block is forwarded to its
eaec9b3d 27 successor. Simplification of the branch instruction is performed by
402209ff 28 underlying infrastructure so branch can be converted to simplejump or
f5143c46 29 eliminated).
402209ff
JH
30 - Cross jumping (tail merging)
31 - Conditional jump-around-simplejump simplification
32 - Basic block merging. */
33
34#include "config.h"
35#include "system.h"
4977bab6
ZW
36#include "coretypes.h"
37#include "tm.h"
402209ff
JH
38#include "rtl.h"
39#include "hard-reg-set.h"
7932a3db 40#include "regs.h"
402209ff
JH
41#include "timevar.h"
42#include "output.h"
43#include "insn-config.h"
44#include "flags.h"
45#include "recog.h"
46#include "toplev.h"
8ecba28a 47#include "cselib.h"
5f24e0dc 48#include "params.h"
9f16e871 49#include "tm_p.h"
e4ec2cac 50#include "target.h"
750054a2 51#include "cfglayout.h"
78528714 52#include "emit-rtl.h"
ef330312
PB
53#include "tree-pass.h"
54#include "cfgloop.h"
55#include "expr.h"
402209ff 56
2dd2d53e
SB
57#define FORWARDER_BLOCK_P(BB) ((BB)->flags & BB_FORWARDER_BLOCK)
58
7cf240d5
JH
59/* Set to true when we are running first pass of try_optimize_cfg loop. */
60static bool first_pass;
d329e058
AJ
61static bool try_crossjump_to_edge (int, edge, edge);
62static bool try_crossjump_bb (int, basic_block);
7d22e898 63static bool outgoing_edges_match (int *, struct equiv_info *);
d329e058 64
d329e058
AJ
65static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
66static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
d329e058
AJ
67static bool try_optimize_cfg (int);
68static bool try_simplify_condjump (basic_block);
69static bool try_forward_edges (int, basic_block);
70static edge thread_jump (int, edge, basic_block);
71static bool mark_effect (rtx, bitmap);
72static void notice_new_block (basic_block);
73static void update_forwarder_flag (basic_block);
74static int mentions_nonequal_regs (rtx *, void *);
635559ab
JH
75\f
76/* Set flags for newly created block. */
77
78static void
d329e058 79notice_new_block (basic_block bb)
635559ab
JH
80{
81 if (!bb)
82 return;
5f0d2358 83
635559ab 84 if (forwarder_block_p (bb))
2dd2d53e 85 bb->flags |= BB_FORWARDER_BLOCK;
635559ab
JH
86}
87
88/* Recompute forwarder flag after block has been modified. */
89
90static void
d329e058 91update_forwarder_flag (basic_block bb)
635559ab
JH
92{
93 if (forwarder_block_p (bb))
2dd2d53e 94 bb->flags |= BB_FORWARDER_BLOCK;
635559ab 95 else
2dd2d53e 96 bb->flags &= ~BB_FORWARDER_BLOCK;
635559ab 97}
402209ff
JH
98\f
99/* Simplify a conditional jump around an unconditional jump.
100 Return true if something changed. */
101
102static bool
d329e058 103try_simplify_condjump (basic_block cbranch_block)
402209ff
JH
104{
105 basic_block jump_block, jump_dest_block, cbranch_dest_block;
106 edge cbranch_jump_edge, cbranch_fallthru_edge;
107 rtx cbranch_insn;
108
109 /* Verify that there are exactly two successors. */
628f6a4e 110 if (EDGE_COUNT (cbranch_block->succs) != 2)
402209ff
JH
111 return false;
112
113 /* Verify that we've got a normal conditional branch at the end
114 of the block. */
a813c111 115 cbranch_insn = BB_END (cbranch_block);
402209ff
JH
116 if (!any_condjump_p (cbranch_insn))
117 return false;
118
119 cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
120 cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
121
122 /* The next block must not have multiple predecessors, must not
123 be the last block in the function, and must contain just the
124 unconditional jump. */
125 jump_block = cbranch_fallthru_edge->dest;
c5cbcccf 126 if (!single_pred_p (jump_block)
f6366fc7 127 || jump_block->next_bb == EXIT_BLOCK_PTR
635559ab 128 || !FORWARDER_BLOCK_P (jump_block))
402209ff 129 return false;
c5cbcccf 130 jump_dest_block = single_succ (jump_block);
402209ff 131
750054a2
CT
132 /* If we are partitioning hot/cold basic blocks, we don't want to
133 mess up unconditional or indirect jumps that cross between hot
8e8d5162
CT
134 and cold sections.
135
136 Basic block partitioning may result in some jumps that appear to
137 be optimizable (or blocks that appear to be mergeable), but which really
138 must be left untouched (they are required to make it safely across
139 partition boundaries). See the comments at the top of
140 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
750054a2 141
87c8b4be
CT
142 if (BB_PARTITION (jump_block) != BB_PARTITION (jump_dest_block)
143 || (cbranch_jump_edge->flags & EDGE_CROSSING))
750054a2
CT
144 return false;
145
402209ff
JH
146 /* The conditional branch must target the block after the
147 unconditional branch. */
148 cbranch_dest_block = cbranch_jump_edge->dest;
149
2f52c531
R
150 if (cbranch_dest_block == EXIT_BLOCK_PTR
151 || !can_fallthru (jump_block, cbranch_dest_block))
402209ff
JH
152 return false;
153
ca6c03ca
JH
154 /* Invert the conditional branch. */
155 if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
156 return false;
402209ff 157
c263766c
RH
158 if (dump_file)
159 fprintf (dump_file, "Simplifying condjump %i around jump %i\n",
a813c111 160 INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
402209ff
JH
161
162 /* Success. Update the CFG to match. Note that after this point
163 the edge variable names appear backwards; the redirection is done
164 this way to preserve edge profile data. */
165 cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
166 cbranch_dest_block);
167 cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
168 jump_dest_block);
169 cbranch_jump_edge->flags |= EDGE_FALLTHRU;
170 cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
b446e5a2 171 update_br_prob_note (cbranch_block);
402209ff
JH
172
173 /* Delete the block with the unconditional jump, and clean up the mess. */
f470c378
ZD
174 delete_basic_block (jump_block);
175 tidy_fallthru_edge (cbranch_jump_edge);
261139ce 176 update_forwarder_flag (cbranch_block);
402209ff
JH
177
178 return true;
179}
180\f
8ecba28a
JH
181/* Attempt to prove that operation is NOOP using CSElib or mark the effect
182 on register. Used by jump threading. */
5f0d2358 183
8ecba28a 184static bool
d329e058 185mark_effect (rtx exp, regset nonequal)
8ecba28a 186{
9f16e871
JH
187 int regno;
188 rtx dest;
8ecba28a
JH
189 switch (GET_CODE (exp))
190 {
191 /* In case we do clobber the register, mark it as equal, as we know the
192 value is dead so it don't have to match. */
f87c27b4
KH
193 case CLOBBER:
194 if (REG_P (XEXP (exp, 0)))
195 {
196 dest = XEXP (exp, 0);
197 regno = REGNO (dest);
198 CLEAR_REGNO_REG_SET (nonequal, regno);
199 if (regno < FIRST_PSEUDO_REGISTER)
200 {
66fd46b6 201 int n = hard_regno_nregs[regno][GET_MODE (dest)];
f87c27b4
KH
202 while (--n > 0)
203 CLEAR_REGNO_REG_SET (nonequal, regno + n);
204 }
205 }
206 return false;
5f0d2358 207
f87c27b4
KH
208 case SET:
209 if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
8ecba28a 210 return false;
f87c27b4
KH
211 dest = SET_DEST (exp);
212 if (dest == pc_rtx)
8ecba28a 213 return false;
f87c27b4
KH
214 if (!REG_P (dest))
215 return true;
216 regno = REGNO (dest);
217 SET_REGNO_REG_SET (nonequal, regno);
218 if (regno < FIRST_PSEUDO_REGISTER)
219 {
66fd46b6 220 int n = hard_regno_nregs[regno][GET_MODE (dest)];
f87c27b4
KH
221 while (--n > 0)
222 SET_REGNO_REG_SET (nonequal, regno + n);
223 }
224 return false;
225
226 default:
227 return false;
8ecba28a
JH
228 }
229}
fe477d8b 230
dcc24678 231/* Return nonzero if X is a register set in regset DATA.
fe477d8b
JH
232 Called via for_each_rtx. */
233static int
d329e058 234mentions_nonequal_regs (rtx *x, void *data)
fe477d8b
JH
235{
236 regset nonequal = (regset) data;
237 if (REG_P (*x))
238 {
239 int regno;
240
241 regno = REGNO (*x);
242 if (REGNO_REG_SET_P (nonequal, regno))
243 return 1;
244 if (regno < FIRST_PSEUDO_REGISTER)
245 {
66fd46b6 246 int n = hard_regno_nregs[regno][GET_MODE (*x)];
fe477d8b
JH
247 while (--n > 0)
248 if (REGNO_REG_SET_P (nonequal, regno + n))
249 return 1;
250 }
251 }
252 return 0;
253}
8ecba28a 254/* Attempt to prove that the basic block B will have no side effects and
95bd1dd7 255 always continues in the same edge if reached via E. Return the edge
8ecba28a
JH
256 if exist, NULL otherwise. */
257
258static edge
d329e058 259thread_jump (int mode, edge e, basic_block b)
8ecba28a
JH
260{
261 rtx set1, set2, cond1, cond2, insn;
262 enum rtx_code code1, code2, reversed_code2;
263 bool reverse1 = false;
3cd8c58a 264 unsigned i;
8ecba28a
JH
265 regset nonequal;
266 bool failed = false;
a2041967 267 reg_set_iterator rsi;
8ecba28a 268
2dd2d53e 269 if (b->flags & BB_NONTHREADABLE_BLOCK)
1540f9eb
JH
270 return NULL;
271
8ecba28a
JH
272 /* At the moment, we do handle only conditional jumps, but later we may
273 want to extend this code to tablejumps and others. */
628f6a4e 274 if (EDGE_COUNT (e->src->succs) != 2)
8ecba28a 275 return NULL;
628f6a4e 276 if (EDGE_COUNT (b->succs) != 2)
1540f9eb 277 {
2dd2d53e 278 b->flags |= BB_NONTHREADABLE_BLOCK;
1540f9eb
JH
279 return NULL;
280 }
8ecba28a
JH
281
282 /* Second branch must end with onlyjump, as we will eliminate the jump. */
a813c111 283 if (!any_condjump_p (BB_END (e->src)))
8ecba28a 284 return NULL;
f87c27b4 285
a813c111 286 if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b)))
1540f9eb 287 {
2dd2d53e 288 b->flags |= BB_NONTHREADABLE_BLOCK;
1540f9eb
JH
289 return NULL;
290 }
8ecba28a 291
a813c111
SB
292 set1 = pc_set (BB_END (e->src));
293 set2 = pc_set (BB_END (b));
8ecba28a 294 if (((e->flags & EDGE_FALLTHRU) != 0)
68f3f6f1 295 != (XEXP (SET_SRC (set1), 1) == pc_rtx))
8ecba28a
JH
296 reverse1 = true;
297
298 cond1 = XEXP (SET_SRC (set1), 0);
299 cond2 = XEXP (SET_SRC (set2), 0);
300 if (reverse1)
a813c111 301 code1 = reversed_comparison_code (cond1, BB_END (e->src));
8ecba28a
JH
302 else
303 code1 = GET_CODE (cond1);
304
305 code2 = GET_CODE (cond2);
a813c111 306 reversed_code2 = reversed_comparison_code (cond2, BB_END (b));
8ecba28a
JH
307
308 if (!comparison_dominates_p (code1, code2)
309 && !comparison_dominates_p (code1, reversed_code2))
310 return NULL;
311
312 /* Ensure that the comparison operators are equivalent.
95bd1dd7 313 ??? This is far too pessimistic. We should allow swapped operands,
8ecba28a
JH
314 different CCmodes, or for example comparisons for interval, that
315 dominate even when operands are not equivalent. */
316 if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
317 || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
318 return NULL;
319
320 /* Short circuit cases where block B contains some side effects, as we can't
321 safely bypass it. */
a813c111 322 for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b));
8ecba28a
JH
323 insn = NEXT_INSN (insn))
324 if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
1540f9eb 325 {
2dd2d53e 326 b->flags |= BB_NONTHREADABLE_BLOCK;
1540f9eb
JH
327 return NULL;
328 }
8ecba28a 329
463301c3 330 cselib_init (false);
8ecba28a
JH
331
332 /* First process all values computed in the source basic block. */
3cd8c58a
NS
333 for (insn = NEXT_INSN (BB_HEAD (e->src));
334 insn != NEXT_INSN (BB_END (e->src));
8ecba28a
JH
335 insn = NEXT_INSN (insn))
336 if (INSN_P (insn))
337 cselib_process_insn (insn);
338
8bdbfff5 339 nonequal = BITMAP_ALLOC (NULL);
8ecba28a 340 CLEAR_REG_SET (nonequal);
5f0d2358 341
8ecba28a
JH
342 /* Now assume that we've continued by the edge E to B and continue
343 processing as if it were same basic block.
8ecba28a 344 Our goal is to prove that whole block is an NOOP. */
5f0d2358 345
3cd8c58a
NS
346 for (insn = NEXT_INSN (BB_HEAD (b));
347 insn != NEXT_INSN (BB_END (b)) && !failed;
8ecba28a 348 insn = NEXT_INSN (insn))
f87c27b4
KH
349 {
350 if (INSN_P (insn))
351 {
352 rtx pat = PATTERN (insn);
353
354 if (GET_CODE (pat) == PARALLEL)
355 {
3cd8c58a 356 for (i = 0; i < (unsigned)XVECLEN (pat, 0); i++)
f87c27b4
KH
357 failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
358 }
359 else
360 failed |= mark_effect (pat, nonequal);
361 }
5f0d2358 362
f87c27b4
KH
363 cselib_process_insn (insn);
364 }
8ecba28a
JH
365
366 /* Later we should clear nonequal of dead registers. So far we don't
367 have life information in cfg_cleanup. */
368 if (failed)
1540f9eb 369 {
2dd2d53e 370 b->flags |= BB_NONTHREADABLE_BLOCK;
1540f9eb
JH
371 goto failed_exit;
372 }
8ecba28a 373
fe477d8b
JH
374 /* cond2 must not mention any register that is not equal to the
375 former block. */
376 if (for_each_rtx (&cond2, mentions_nonequal_regs, nonequal))
377 goto failed_exit;
378
8ecba28a
JH
379 /* In case liveness information is available, we need to prove equivalence
380 only of the live values. */
381 if (mode & CLEANUP_UPDATE_LIFE)
5e2d947c 382 AND_REG_SET (nonequal, b->il.rtl->global_live_at_end);
8ecba28a 383
a2041967
KH
384 EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, rsi)
385 goto failed_exit;
8ecba28a 386
8bdbfff5 387 BITMAP_FREE (nonequal);
8ecba28a
JH
388 cselib_finish ();
389 if ((comparison_dominates_p (code1, code2) != 0)
4deaa2f8 390 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
8ecba28a
JH
391 return BRANCH_EDGE (b);
392 else
393 return FALLTHRU_EDGE (b);
394
395failed_exit:
8bdbfff5 396 BITMAP_FREE (nonequal);
8ecba28a
JH
397 cselib_finish ();
398 return NULL;
399}
400\f
402209ff 401/* Attempt to forward edges leaving basic block B.
eaec9b3d 402 Return true if successful. */
402209ff
JH
403
404static bool
d329e058 405try_forward_edges (int mode, basic_block b)
402209ff
JH
406{
407 bool changed = false;
628f6a4e
BE
408 edge_iterator ei;
409 edge e, *threaded_edges = NULL;
402209ff 410
750054a2
CT
411 /* If we are partitioning hot/cold basic blocks, we don't want to
412 mess up unconditional or indirect jumps that cross between hot
8e8d5162 413 and cold sections.
750054a2 414
8e8d5162
CT
415 Basic block partitioning may result in some jumps that appear to
416 be optimizable (or blocks that appear to be mergeable), but which really m
417 ust be left untouched (they are required to make it safely across
418 partition boundaries). See the comments at the top of
419 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
420
87c8b4be 421 if (find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX))
750054a2
CT
422 return false;
423
628f6a4e 424 for (ei = ei_start (b->succs); (e = ei_safe_edge (ei)); )
402209ff
JH
425 {
426 basic_block target, first;
427 int counter;
8ecba28a 428 bool threaded = false;
bcb3bc6d 429 int nthreaded_edges = 0;
7cf240d5 430 bool may_thread = first_pass | (b->flags & BB_DIRTY);
402209ff 431
402209ff
JH
432 /* Skip complex edges because we don't know how to update them.
433
eaec9b3d 434 Still handle fallthru edges, as we can succeed to forward fallthru
402209ff 435 edge to the same place as the branch edge of conditional branch
eaec9b3d 436 and turn conditional branch to an unconditional branch. */
402209ff 437 if (e->flags & EDGE_COMPLEX)
628f6a4e
BE
438 {
439 ei_next (&ei);
440 continue;
441 }
402209ff
JH
442
443 target = first = e->dest;
444 counter = 0;
445
9fb32434 446 /* If we are partitioning hot/cold basic_blocks, we don't want to mess
8e8d5162
CT
447 up jumps that cross between hot/cold sections.
448
449 Basic block partitioning may result in some jumps that appear
450 to be optimizable (or blocks that appear to be mergeable), but which
451 really must be left untouched (they are required to make it safely
452 across partition boundaries). See the comments at the top of
453 bb-reorder.c:partition_hot_cold_basic_blocks for complete
454 details. */
9fb32434 455
87c8b4be 456 if (first != EXIT_BLOCK_PTR
9fb32434
CT
457 && find_reg_note (BB_END (first), REG_CROSSING_JUMP, NULL_RTX))
458 return false;
459
0b17ab2f 460 while (counter < n_basic_blocks)
402209ff 461 {
8ecba28a
JH
462 basic_block new_target = NULL;
463 bool new_target_threaded = false;
7cf240d5 464 may_thread |= target->flags & BB_DIRTY;
8ecba28a
JH
465
466 if (FORWARDER_BLOCK_P (target)
c5cbcccf
ZD
467 && !(single_succ_edge (target)->flags & EDGE_CROSSING)
468 && single_succ (target) != EXIT_BLOCK_PTR)
8ecba28a
JH
469 {
470 /* Bypass trivial infinite loops. */
c5cbcccf
ZD
471 new_target = single_succ (target);
472 if (target == new_target)
0b17ab2f 473 counter = n_basic_blocks;
8ecba28a 474 }
5f0d2358 475
8ecba28a
JH
476 /* Allow to thread only over one edge at time to simplify updating
477 of probabilities. */
7cf240d5 478 else if ((mode & CLEANUP_THREADING) && may_thread)
8ecba28a 479 {
1c570418
JH
480 edge t = thread_jump (mode, e, target);
481 if (t)
8ecba28a 482 {
bcb3bc6d 483 if (!threaded_edges)
1c570418 484 threaded_edges = xmalloc (sizeof (*threaded_edges)
0b17ab2f 485 * n_basic_blocks);
3b3b1e32
RH
486 else
487 {
488 int i;
489
490 /* Detect an infinite loop across blocks not
491 including the start block. */
492 for (i = 0; i < nthreaded_edges; ++i)
493 if (threaded_edges[i] == t)
494 break;
495 if (i < nthreaded_edges)
b90e45ae 496 {
0b17ab2f 497 counter = n_basic_blocks;
b90e45ae
JH
498 break;
499 }
3b3b1e32
RH
500 }
501
502 /* Detect an infinite loop across the start block. */
503 if (t->dest == b)
504 break;
505
341c100f 506 gcc_assert (nthreaded_edges < n_basic_blocks);
1c570418 507 threaded_edges[nthreaded_edges++] = t;
3b3b1e32
RH
508
509 new_target = t->dest;
510 new_target_threaded = true;
8ecba28a
JH
511 }
512 }
5f0d2358 513
8ecba28a
JH
514 if (!new_target)
515 break;
402209ff
JH
516
517 /* Avoid killing of loop pre-headers, as it is the place loop
518 optimizer wants to hoist code to.
519
520 For fallthru forwarders, the LOOP_BEG note must appear between
521 the header of block and CODE_LABEL of the loop, for non forwarders
522 it must appear before the JUMP_INSN. */
997de8ed 523 if ((mode & CLEANUP_PRE_LOOP) && optimize && flag_loop_optimize)
402209ff 524 {
628f6a4e 525 rtx insn = (EDGE_SUCC (target, 0)->flags & EDGE_FALLTHRU
a813c111 526 ? BB_HEAD (target) : prev_nonnote_insn (BB_END (target)));
402209ff 527
4b4bf941 528 if (!NOTE_P (insn))
402209ff
JH
529 insn = NEXT_INSN (insn);
530
4b4bf941 531 for (; insn && !LABEL_P (insn) && !INSN_P (insn);
402209ff 532 insn = NEXT_INSN (insn))
4b4bf941 533 if (NOTE_P (insn)
402209ff
JH
534 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
535 break;
536
a560c83a 537 if (insn && NOTE_P (insn))
402209ff 538 break;
ac19be7e
DJ
539
540 /* Do not clean up branches to just past the end of a loop
541 at this time; it can mess up the loop optimizer's
991b6592 542 recognition of some patterns. */
ac19be7e 543
a813c111 544 insn = PREV_INSN (BB_HEAD (target));
4b4bf941 545 if (insn && NOTE_P (insn)
ac19be7e
DJ
546 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
547 break;
402209ff 548 }
5f0d2358 549
8ecba28a
JH
550 counter++;
551 target = new_target;
552 threaded |= new_target_threaded;
f87c27b4 553 }
402209ff 554
0b17ab2f 555 if (counter >= n_basic_blocks)
402209ff 556 {
c263766c
RH
557 if (dump_file)
558 fprintf (dump_file, "Infinite loop in BB %i.\n",
0b17ab2f 559 target->index);
402209ff
JH
560 }
561 else if (target == first)
562 ; /* We didn't do anything. */
563 else
564 {
565 /* Save the values now, as the edge may get removed. */
566 gcov_type edge_count = e->count;
567 int edge_probability = e->probability;
8ecba28a 568 int edge_frequency;
1c570418 569 int n = 0;
402209ff 570
6ee3c8e4
JJ
571 /* Don't force if target is exit block. */
572 if (threaded && target != EXIT_BLOCK_PTR)
402209ff 573 {
8ecba28a 574 notice_new_block (redirect_edge_and_branch_force (e, target));
c263766c
RH
575 if (dump_file)
576 fprintf (dump_file, "Conditionals threaded.\n");
402209ff 577 }
8ecba28a 578 else if (!redirect_edge_and_branch (e, target))
402209ff 579 {
c263766c
RH
580 if (dump_file)
581 fprintf (dump_file,
5f0d2358 582 "Forwarding edge %i->%i to %i failed.\n",
0b17ab2f 583 b->index, e->dest->index, target->index);
628f6a4e 584 ei_next (&ei);
8ecba28a 585 continue;
402209ff 586 }
5f0d2358 587
8ecba28a
JH
588 /* We successfully forwarded the edge. Now update profile
589 data: for each edge we traversed in the chain, remove
590 the original edge's execution count. */
591 edge_frequency = ((edge_probability * b->frequency
592 + REG_BR_PROB_BASE / 2)
593 / REG_BR_PROB_BASE);
594
595 if (!FORWARDER_BLOCK_P (b) && forwarder_block_p (b))
2dd2d53e 596 b->flags |= BB_FORWARDER_BLOCK;
8ecba28a
JH
597
598 do
599 {
600 edge t;
5f0d2358 601
c5cbcccf 602 if (!single_succ_p (first))
3b3b1e32 603 {
341c100f 604 gcc_assert (n < nthreaded_edges);
3b3b1e32 605 t = threaded_edges [n++];
341c100f 606 gcc_assert (t->src == first);
15db5571
JH
607 update_bb_profile_for_threading (first, edge_frequency,
608 edge_count, t);
b446e5a2 609 update_br_prob_note (first);
3b3b1e32 610 }
8ecba28a 611 else
bcb3bc6d 612 {
15db5571
JH
613 first->count -= edge_count;
614 if (first->count < 0)
615 first->count = 0;
616 first->frequency -= edge_frequency;
617 if (first->frequency < 0)
618 first->frequency = 0;
bcb3bc6d
JH
619 /* It is possible that as the result of
620 threading we've removed edge as it is
621 threaded to the fallthru edge. Avoid
622 getting out of sync. */
623 if (n < nthreaded_edges
624 && first == threaded_edges [n]->src)
625 n++;
c5cbcccf 626 t = single_succ_edge (first);
f87c27b4 627 }
5f0d2358 628
b446e5a2
JH
629 t->count -= edge_count;
630 if (t->count < 0)
631 t->count = 0;
8ecba28a
JH
632 first = t->dest;
633 }
634 while (first != target);
635
636 changed = true;
628f6a4e 637 continue;
402209ff 638 }
628f6a4e 639 ei_next (&ei);
402209ff
JH
640 }
641
1c570418
JH
642 if (threaded_edges)
643 free (threaded_edges);
402209ff
JH
644 return changed;
645}
646\f
402209ff
JH
647
648/* Blocks A and B are to be merged into a single block. A has no incoming
649 fallthru edge, so it can be moved before B without adding or modifying
650 any jumps (aside from the jump from A to B). */
651
4262e623 652static void
d329e058 653merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
402209ff
JH
654{
655 rtx barrier;
341c100f 656 bool only_notes;
402209ff 657
750054a2
CT
658 /* If we are partitioning hot/cold basic blocks, we don't want to
659 mess up unconditional or indirect jumps that cross between hot
8e8d5162 660 and cold sections.
750054a2 661
8e8d5162
CT
662 Basic block partitioning may result in some jumps that appear to
663 be optimizable (or blocks that appear to be mergeable), but which really
664 must be left untouched (they are required to make it safely across
665 partition boundaries). See the comments at the top of
666 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
667
87c8b4be 668 if (BB_PARTITION (a) != BB_PARTITION (b))
750054a2
CT
669 return;
670
a813c111 671 barrier = next_nonnote_insn (BB_END (a));
341c100f 672 gcc_assert (BARRIER_P (barrier));
53c17031 673 delete_insn (barrier);
402209ff
JH
674
675 /* Move block and loop notes out of the chain so that we do not
676 disturb their order.
677
678 ??? A better solution would be to squeeze out all the non-nested notes
679 and adjust the block trees appropriately. Even better would be to have
680 a tighter connection between block trees and rtl so that this is not
681 necessary. */
341c100f
NS
682 only_notes = squeeze_notes (&BB_HEAD (a), &BB_END (a));
683 gcc_assert (!only_notes);
402209ff
JH
684
685 /* Scramble the insn chain. */
a813c111
SB
686 if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
687 reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
38c1593d 688 a->flags |= BB_DIRTY;
402209ff 689
c263766c
RH
690 if (dump_file)
691 fprintf (dump_file, "Moved block %d before %d and merged.\n",
0b17ab2f 692 a->index, b->index);
402209ff 693
bf77398c 694 /* Swap the records for the two blocks around. */
402209ff 695
918ed612
ZD
696 unlink_block (a);
697 link_block (a, b->prev_bb);
698
402209ff 699 /* Now blocks A and B are contiguous. Merge them. */
bc35512f 700 merge_blocks (a, b);
402209ff
JH
701}
702
703/* Blocks A and B are to be merged into a single block. B has no outgoing
704 fallthru edge, so it can be moved after A without adding or modifying
705 any jumps (aside from the jump from A to B). */
706
4262e623 707static void
d329e058 708merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
402209ff 709{
f62ce55b 710 rtx barrier, real_b_end;
ee735eef 711 rtx label, table;
341c100f 712 bool only_notes;
402209ff 713
750054a2
CT
714 /* If we are partitioning hot/cold basic blocks, we don't want to
715 mess up unconditional or indirect jumps that cross between hot
8e8d5162 716 and cold sections.
750054a2 717
8e8d5162
CT
718 Basic block partitioning may result in some jumps that appear to
719 be optimizable (or blocks that appear to be mergeable), but which really
720 must be left untouched (they are required to make it safely across
721 partition boundaries). See the comments at the top of
722 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
723
87c8b4be 724 if (BB_PARTITION (a) != BB_PARTITION (b))
750054a2
CT
725 return;
726
a813c111 727 real_b_end = BB_END (b);
402209ff 728
ee735eef
JZ
729 /* If there is a jump table following block B temporarily add the jump table
730 to block B so that it will also be moved to the correct location. */
a813c111
SB
731 if (tablejump_p (BB_END (b), &label, &table)
732 && prev_active_insn (label) == BB_END (b))
402209ff 733 {
a813c111 734 BB_END (b) = table;
402209ff
JH
735 }
736
737 /* There had better have been a barrier there. Delete it. */
a813c111 738 barrier = NEXT_INSN (BB_END (b));
4b4bf941 739 if (barrier && BARRIER_P (barrier))
53c17031 740 delete_insn (barrier);
402209ff
JH
741
742 /* Move block and loop notes out of the chain so that we do not
743 disturb their order.
744
745 ??? A better solution would be to squeeze out all the non-nested notes
746 and adjust the block trees appropriately. Even better would be to have
747 a tighter connection between block trees and rtl so that this is not
748 necessary. */
341c100f
NS
749 only_notes = squeeze_notes (&BB_HEAD (b), &BB_END (b));
750 gcc_assert (!only_notes);
751
402209ff
JH
752
753 /* Scramble the insn chain. */
a813c111 754 reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
402209ff 755
f62ce55b 756 /* Restore the real end of b. */
a813c111 757 BB_END (b) = real_b_end;
f62ce55b 758
c263766c
RH
759 if (dump_file)
760 fprintf (dump_file, "Moved block %d after %d and merged.\n",
0b17ab2f 761 b->index, a->index);
2150ad33
RH
762
763 /* Now blocks A and B are contiguous. Merge them. */
bc35512f 764 merge_blocks (a, b);
402209ff
JH
765}
766
767/* Attempt to merge basic blocks that are potentially non-adjacent.
ec3ae3da
JH
768 Return NULL iff the attempt failed, otherwise return basic block
769 where cleanup_cfg should continue. Because the merging commonly
770 moves basic block away or introduces another optimization
e0bb17a8 771 possibility, return basic block just before B so cleanup_cfg don't
ec3ae3da
JH
772 need to iterate.
773
774 It may be good idea to return basic block before C in the case
775 C has been moved after B and originally appeared earlier in the
4d6922ee 776 insn sequence, but we have no information available about the
ec3ae3da
JH
777 relative ordering of these two. Hopefully it is not too common. */
778
779static basic_block
bc35512f 780merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
402209ff 781{
ec3ae3da 782 basic_block next;
402209ff 783
750054a2
CT
784 /* If we are partitioning hot/cold basic blocks, we don't want to
785 mess up unconditional or indirect jumps that cross between hot
8e8d5162 786 and cold sections.
750054a2 787
8e8d5162
CT
788 Basic block partitioning may result in some jumps that appear to
789 be optimizable (or blocks that appear to be mergeable), but which really
790 must be left untouched (they are required to make it safely across
791 partition boundaries). See the comments at the top of
792 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
793
87c8b4be 794 if (BB_PARTITION (b) != BB_PARTITION (c))
750054a2
CT
795 return NULL;
796
797
798
402209ff
JH
799 /* If B has a fallthru edge to C, no need to move anything. */
800 if (e->flags & EDGE_FALLTHRU)
801 {
0b17ab2f 802 int b_index = b->index, c_index = c->index;
bc35512f 803 merge_blocks (b, c);
635559ab 804 update_forwarder_flag (b);
402209ff 805
c263766c
RH
806 if (dump_file)
807 fprintf (dump_file, "Merged %d and %d without moving.\n",
f87c27b4 808 b_index, c_index);
402209ff 809
ec3ae3da 810 return b->prev_bb == ENTRY_BLOCK_PTR ? b : b->prev_bb;
402209ff 811 }
5f0d2358 812
402209ff
JH
813 /* Otherwise we will need to move code around. Do that only if expensive
814 transformations are allowed. */
815 else if (mode & CLEANUP_EXPENSIVE)
816 {
4262e623
JH
817 edge tmp_edge, b_fallthru_edge;
818 bool c_has_outgoing_fallthru;
819 bool b_has_incoming_fallthru;
628f6a4e 820 edge_iterator ei;
402209ff
JH
821
822 /* Avoid overactive code motion, as the forwarder blocks should be
823 eliminated by edge redirection instead. One exception might have
824 been if B is a forwarder block and C has no fallthru edge, but
825 that should be cleaned up by bb-reorder instead. */
635559ab 826 if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
ec3ae3da 827 return NULL;
402209ff
JH
828
829 /* We must make sure to not munge nesting of lexical blocks,
830 and loop notes. This is done by squeezing out all the notes
831 and leaving them there to lie. Not ideal, but functional. */
832
628f6a4e 833 FOR_EACH_EDGE (tmp_edge, ei, c->succs)
402209ff
JH
834 if (tmp_edge->flags & EDGE_FALLTHRU)
835 break;
5f0d2358 836
402209ff 837 c_has_outgoing_fallthru = (tmp_edge != NULL);
402209ff 838
628f6a4e 839 FOR_EACH_EDGE (tmp_edge, ei, b->preds)
402209ff
JH
840 if (tmp_edge->flags & EDGE_FALLTHRU)
841 break;
5f0d2358 842
402209ff 843 b_has_incoming_fallthru = (tmp_edge != NULL);
4262e623 844 b_fallthru_edge = tmp_edge;
ec3ae3da 845 next = b->prev_bb;
912b79e7
JH
846 if (next == c)
847 next = next->prev_bb;
4262e623
JH
848
849 /* Otherwise, we're going to try to move C after B. If C does
850 not have an outgoing fallthru, then it can be moved
851 immediately after B without introducing or modifying jumps. */
852 if (! c_has_outgoing_fallthru)
853 {
854 merge_blocks_move_successor_nojumps (b, c);
ec3ae3da 855 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
4262e623 856 }
402209ff
JH
857
858 /* If B does not have an incoming fallthru, then it can be moved
859 immediately before C without introducing or modifying jumps.
860 C cannot be the first block, so we do not have to worry about
861 accessing a non-existent block. */
402209ff 862
4262e623
JH
863 if (b_has_incoming_fallthru)
864 {
473fb060 865 basic_block bb;
5f0d2358 866
4262e623 867 if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
ec3ae3da 868 return NULL;
7dddfb65
JH
869 bb = force_nonfallthru (b_fallthru_edge);
870 if (bb)
871 notice_new_block (bb);
4262e623 872 }
5f0d2358 873
4262e623 874 merge_blocks_move_predecessor_nojumps (b, c);
ec3ae3da 875 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
402209ff 876 }
5f0d2358 877
10d6c0d0 878 return NULL;
402209ff
JH
879}
880\f
7f416ffb 881/* Return true iff the condbranches at the end of BB1 and BB2 match. */
7d22e898
R
882bool
883condjump_equiv_p (struct equiv_info *info, bool call_init)
0dd0e980 884{
7d22e898
R
885 basic_block bb1 = info->x_block;
886 basic_block bb2 = info->y_block;
887 edge b1 = BRANCH_EDGE (bb1);
888 edge b2 = BRANCH_EDGE (bb2);
889 edge f1 = FALLTHRU_EDGE (bb1);
890 edge f2 = FALLTHRU_EDGE (bb2);
7f416ffb
R
891 bool reverse, match;
892 rtx set1, set2, cond1, cond2;
7d22e898 893 rtx src1, src2;
7f416ffb
R
894 enum rtx_code code1, code2;
895
7f416ffb
R
896 /* Get around possible forwarders on fallthru edges. Other cases
897 should be optimized out already. */
898 if (FORWARDER_BLOCK_P (f1->dest))
899 f1 = single_succ_edge (f1->dest);
900
901 if (FORWARDER_BLOCK_P (f2->dest))
902 f2 = single_succ_edge (f2->dest);
903
904 /* To simplify use of this function, return false if there are
905 unneeded forwarder blocks. These will get eliminated later
906 during cleanup_cfg. */
907 if (FORWARDER_BLOCK_P (f1->dest)
908 || FORWARDER_BLOCK_P (f2->dest)
909 || FORWARDER_BLOCK_P (b1->dest)
910 || FORWARDER_BLOCK_P (b2->dest))
0dd0e980
JH
911 return false;
912
7f416ffb
R
913 if (f1->dest == f2->dest && b1->dest == b2->dest)
914 reverse = false;
915 else if (f1->dest == b2->dest && b1->dest == f2->dest)
916 reverse = true;
917 else
0dd0e980
JH
918 return false;
919
7f416ffb
R
920 set1 = pc_set (BB_END (bb1));
921 set2 = pc_set (BB_END (bb2));
922 if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
923 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
924 reverse = !reverse;
0dd0e980 925
7d22e898
R
926 src1 = SET_SRC (set1);
927 src2 = SET_SRC (set2);
928 cond1 = XEXP (src1, 0);
929 cond2 = XEXP (src2, 0);
7f416ffb
R
930 code1 = GET_CODE (cond1);
931 if (reverse)
932 code2 = reversed_comparison_code (cond2, BB_END (bb2));
933 else
934 code2 = GET_CODE (cond2);
0dd0e980 935
7f416ffb 936 if (code2 == UNKNOWN)
0dd0e980
JH
937 return false;
938
7d22e898
R
939 if (call_init && !struct_equiv_init (STRUCT_EQUIV_START | info->mode, info))
940 gcc_unreachable ();
941 /* Make the sources of the pc sets unreadable so that when we call
942 insns_match_p it won't process them.
943 The death_notes_match_p from insns_match_p won't see the local registers
944 used for the pc set, but that could only cause missed optimizations when
945 there are actually condjumps that use stack registers. */
946 SET_SRC (set1) = pc_rtx;
947 SET_SRC (set2) = pc_rtx;
7f416ffb 948 /* Verify codes and operands match. */
7d22e898
R
949 if (code1 == code2)
950 {
951 match = (insns_match_p (BB_END (bb1), BB_END (bb2), info)
952 && rtx_equiv_p (&XEXP (cond1, 0), XEXP (cond2, 0), 1, info)
953 && rtx_equiv_p (&XEXP (cond1, 1), XEXP (cond2, 1), 1, info));
954
955 }
956 else if (code1 == swap_condition (code2))
957 {
958 match = (insns_match_p (BB_END (bb1), BB_END (bb2), info)
959 && rtx_equiv_p (&XEXP (cond1, 1), XEXP (cond2, 0), 1, info)
960 && rtx_equiv_p (&XEXP (cond1, 0), XEXP (cond2, 1), 1, info));
961
962 }
963 else
964 match = false;
965 SET_SRC (set1) = src1;
966 SET_SRC (set2) = src2;
967 match &= verify_changes (0);
7f416ffb
R
968
969 /* If we return true, we will join the blocks. Which means that
970 we will only have one branch prediction bit to work with. Thus
971 we require the existing branches to have probabilities that are
972 roughly similar. */
973 if (match
974 && !optimize_size
975 && maybe_hot_bb_p (bb1)
976 && maybe_hot_bb_p (bb2))
08f7f057 977 {
7f416ffb 978 int prob2;
402209ff 979
7f416ffb 980 if (b1->dest == b2->dest)
22e0395a 981 prob2 = b2->probability;
7f416ffb 982 else
22e0395a
R
983 /* Do not use f2 probability as f2 may be forwarded. */
984 prob2 = REG_BR_PROB_BASE - b2->probability;
7f416ffb
R
985
986 /* Fail if the difference in probabilities is greater than 50%.
22e0395a
R
987 This rules out two well-predicted branches with opposite
988 outcomes. */
7f416ffb 989 if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
22e0395a
R
990 {
991 if (dump_file)
992 fprintf (dump_file,
993 "Outcomes of branch in bb %i and %i differ too much (%i %i)\n",
994 bb1->index, bb2->index, b1->probability, prob2);
995
7d22e898 996 match = false;
22e0395a 997 }
402209ff
JH
998 }
999
7f416ffb
R
1000 if (dump_file && match)
1001 fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
22e0395a 1002 bb1->index, bb2->index);
5f0d2358 1003
7d22e898
R
1004 if (!match)
1005 cancel_changes (0);
7f416ffb 1006 return match;
402209ff 1007}
7d22e898
R
1008
1009/* Return true iff outgoing edges of INFO->y_block and INFO->x_block match,
1010 together with the branch instruction. This means that if we commonize the
1011 control flow before end of the basic block, the semantic remains unchanged.
1012 If we need to compare jumps, we set STRUCT_EQUIV_MATCH_JUMPS in *MODE,
1013 and pass *MODE to struct_equiv_init or assign it to INFO->mode, as
1014 appropriate.
402209ff
JH
1015
1016 We may assume that there exists one edge with a common destination. */
1017
1018static bool
7d22e898 1019outgoing_edges_match (int *mode, struct equiv_info *info)
402209ff 1020{
7d22e898
R
1021 basic_block bb1 = info->y_block;
1022 basic_block bb2 = info->x_block;
0dd0e980
JH
1023 int nehedges1 = 0, nehedges2 = 0;
1024 edge fallthru1 = 0, fallthru2 = 0;
1025 edge e1, e2;
628f6a4e 1026 edge_iterator ei;
0dd0e980 1027
c04cf67b
RH
1028 /* If BB1 has only one successor, we may be looking at either an
1029 unconditional jump, or a fake edge to exit. */
c5cbcccf
ZD
1030 if (single_succ_p (bb1)
1031 && (single_succ_edge (bb1)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
4b4bf941 1032 && (!JUMP_P (BB_END (bb1)) || simplejump_p (BB_END (bb1))))
c5cbcccf
ZD
1033 return (single_succ_p (bb2)
1034 && (single_succ_edge (bb2)->flags
1035 & (EDGE_COMPLEX | EDGE_FAKE)) == 0
4b4bf941 1036 && (!JUMP_P (BB_END (bb2)) || simplejump_p (BB_END (bb2))));
402209ff 1037
7d22e898 1038 *mode |= STRUCT_EQUIV_MATCH_JUMPS;
402209ff
JH
1039 /* Match conditional jumps - this may get tricky when fallthru and branch
1040 edges are crossed. */
628f6a4e 1041 if (EDGE_COUNT (bb1->succs) == 2
a813c111
SB
1042 && any_condjump_p (BB_END (bb1))
1043 && onlyjump_p (BB_END (bb1)))
402209ff 1044 {
628f6a4e 1045 if (EDGE_COUNT (bb2->succs) != 2
a813c111
SB
1046 || !any_condjump_p (BB_END (bb2))
1047 || !onlyjump_p (BB_END (bb2)))
0a2ed1f1 1048 return false;
7d22e898
R
1049 info->mode = *mode;
1050 return condjump_equiv_p (info, true);
402209ff
JH
1051 }
1052
09da1532 1053 /* Generic case - we are seeing a computed jump, table jump or trapping
0dd0e980
JH
1054 instruction. */
1055
39811184
JZ
1056 /* Check whether there are tablejumps in the end of BB1 and BB2.
1057 Return true if they are identical. */
1058 {
1059 rtx label1, label2;
1060 rtx table1, table2;
1061
a813c111
SB
1062 if (tablejump_p (BB_END (bb1), &label1, &table1)
1063 && tablejump_p (BB_END (bb2), &label2, &table2)
39811184
JZ
1064 && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
1065 {
1066 /* The labels should never be the same rtx. If they really are same
1067 the jump tables are same too. So disable crossjumping of blocks BB1
1068 and BB2 because when deleting the common insns in the end of BB1
6de9cd9a 1069 by delete_basic_block () the jump table would be deleted too. */
4af16369 1070 /* If LABEL2 is referenced in BB1->END do not do anything
39811184
JZ
1071 because we would loose information when replacing
1072 LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */
a813c111 1073 if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)))
39811184
JZ
1074 {
1075 /* Set IDENTICAL to true when the tables are identical. */
1076 bool identical = false;
1077 rtx p1, p2;
1078
1079 p1 = PATTERN (table1);
1080 p2 = PATTERN (table2);
1081 if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2))
1082 {
1083 identical = true;
1084 }
1085 else if (GET_CODE (p1) == ADDR_DIFF_VEC
1086 && (XVECLEN (p1, 1) == XVECLEN (p2, 1))
1087 && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2))
1088 && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3)))
1089 {
1090 int i;
1091
1092 identical = true;
1093 for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--)
1094 if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i)))
1095 identical = false;
1096 }
1097
7d22e898
R
1098 if (identical
1099 && struct_equiv_init (STRUCT_EQUIV_START | *mode, info))
39811184 1100 {
39811184
JZ
1101 bool match;
1102
7d22e898 1103 /* Indicate that LABEL1 is to be replaced with LABEL2
39811184 1104 in BB1->END so that we could compare the instructions. */
7d22e898
R
1105 info->y_label = label1;
1106 info->x_label = label2;
39811184 1107
7d22e898 1108 match = insns_match_p (BB_END (bb1), BB_END (bb2), info);
c263766c
RH
1109 if (dump_file && match)
1110 fprintf (dump_file,
39811184
JZ
1111 "Tablejumps in bb %i and %i match.\n",
1112 bb1->index, bb2->index);
1113
39811184
JZ
1114 return match;
1115 }
1116 }
1117 return false;
1118 }
1119 }
39811184 1120
0dd0e980 1121 /* First ensure that the instructions match. There may be many outgoing
39811184 1122 edges so this test is generally cheaper. */
7d22e898
R
1123 if (!struct_equiv_init (STRUCT_EQUIV_START | *mode, info)
1124 || !insns_match_p (BB_END (bb1), BB_END (bb2), info))
0dd0e980
JH
1125 return false;
1126
1127 /* Search the outgoing edges, ensure that the counts do match, find possible
1128 fallthru and exception handling edges since these needs more
1129 validation. */
628f6a4e
BE
1130 if (EDGE_COUNT (bb1->succs) != EDGE_COUNT (bb2->succs))
1131 return false;
1132
1133 FOR_EACH_EDGE (e1, ei, bb1->succs)
0dd0e980 1134 {
628f6a4e
BE
1135 e2 = EDGE_SUCC (bb2, ei.index);
1136
0dd0e980
JH
1137 if (e1->flags & EDGE_EH)
1138 nehedges1++;
5f0d2358 1139
0dd0e980
JH
1140 if (e2->flags & EDGE_EH)
1141 nehedges2++;
5f0d2358 1142
0dd0e980
JH
1143 if (e1->flags & EDGE_FALLTHRU)
1144 fallthru1 = e1;
1145 if (e2->flags & EDGE_FALLTHRU)
1146 fallthru2 = e2;
1147 }
5f0d2358 1148
0dd0e980 1149 /* If number of edges of various types does not match, fail. */
628f6a4e 1150 if (nehedges1 != nehedges2
5f0d2358 1151 || (fallthru1 != 0) != (fallthru2 != 0))
0dd0e980
JH
1152 return false;
1153
1154 /* fallthru edges must be forwarded to the same destination. */
1155 if (fallthru1)
1156 {
1157 basic_block d1 = (forwarder_block_p (fallthru1->dest)
c5cbcccf 1158 ? single_succ (fallthru1->dest): fallthru1->dest);
0dd0e980 1159 basic_block d2 = (forwarder_block_p (fallthru2->dest)
c5cbcccf 1160 ? single_succ (fallthru2->dest): fallthru2->dest);
5f0d2358 1161
0dd0e980
JH
1162 if (d1 != d2)
1163 return false;
1164 }
5f0d2358 1165
5f77fbd4
JJ
1166 /* Ensure the same EH region. */
1167 {
a813c111
SB
1168 rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0);
1169 rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0);
5f0d2358 1170
5f77fbd4
JJ
1171 if (!n1 && n2)
1172 return false;
1173
1174 if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1175 return false;
1176 }
5f0d2358 1177
e0bb17a8 1178 /* We don't need to match the rest of edges as above checks should be enough
0dd0e980
JH
1179 to ensure that they are equivalent. */
1180 return true;
402209ff
JH
1181}
1182
1183/* E1 and E2 are edges with the same destination block. Search their
1184 predecessors for common code. If found, redirect control flow from
1185 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */
1186
1187static bool
d329e058 1188try_crossjump_to_edge (int mode, edge e1, edge e2)
402209ff 1189{
7d22e898 1190 int nmatch, i;
402209ff 1191 basic_block src1 = e1->src, src2 = e2->src;
39587bb9 1192 basic_block redirect_to, redirect_from, to_remove;
402209ff 1193 edge s;
628f6a4e 1194 edge_iterator ei;
7d22e898
R
1195 struct equiv_info info;
1196 rtx x_active, y_active;
6de9cd9a 1197
750054a2 1198 /* If we have partitioned hot/cold basic blocks, it is a bad idea
22e0395a 1199 to try this optimization.
8e8d5162
CT
1200
1201 Basic block partitioning may result in some jumps that appear to
22e0395a
R
1202 be optimizable (or blocks that appear to be mergeable), but which really
1203 must be left untouched (they are required to make it safely across
1204 partition boundaries). See the comments at the top of
8e8d5162 1205 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
750054a2
CT
1206
1207 if (flag_reorder_blocks_and_partition && no_new_pseudos)
1208 return false;
1209
402209ff
JH
1210 /* Search backward through forwarder blocks. We don't need to worry
1211 about multiple entry or chained forwarders, as they will be optimized
1212 away. We do this to look past the unconditional jump following a
1213 conditional jump that is required due to the current CFG shape. */
c5cbcccf 1214 if (single_pred_p (src1)
635559ab 1215 && FORWARDER_BLOCK_P (src1))
c5cbcccf 1216 e1 = single_pred_edge (src1), src1 = e1->src;
5f0d2358 1217
c5cbcccf 1218 if (single_pred_p (src2)
635559ab 1219 && FORWARDER_BLOCK_P (src2))
c5cbcccf 1220 e2 = single_pred_edge (src2), src2 = e2->src;
402209ff
JH
1221
1222 /* Nothing to do if we reach ENTRY, or a common source block. */
1223 if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
1224 return false;
1225 if (src1 == src2)
1226 return false;
1227
1228 /* Seeing more than 1 forwarder blocks would confuse us later... */
635559ab 1229 if (FORWARDER_BLOCK_P (e1->dest)
c5cbcccf 1230 && FORWARDER_BLOCK_P (single_succ (e1->dest)))
402209ff 1231 return false;
5f0d2358 1232
635559ab 1233 if (FORWARDER_BLOCK_P (e2->dest)
c5cbcccf 1234 && FORWARDER_BLOCK_P (single_succ (e2->dest)))
402209ff
JH
1235 return false;
1236
1237 /* Likewise with dead code (possibly newly created by the other optimizations
1238 of cfg_cleanup). */
628f6a4e 1239 if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
402209ff
JH
1240 return false;
1241
402209ff 1242 /* Look for the common insn sequence, part the first ... */
7d22e898
R
1243 info.x_block = src2;
1244 info.y_block = src1;
1245 if (!outgoing_edges_match (&mode, &info))
402209ff
JH
1246 return false;
1247
1248 /* ... and part the second. */
7d22e898
R
1249 info.input_cost = optimize_size ? COSTS_N_INSNS (1) : -1;
1250 nmatch = struct_equiv_block_eq (STRUCT_EQUIV_START | mode, &info);
12183e0f
PH
1251
1252 /* Don't proceed with the crossjump unless we found a sufficient number
1253 of matching instructions or the 'from' block was totally matched
1254 (such that its predecessors will hopefully be redirected and the
1255 block removed). */
7d22e898 1256 if (!nmatch)
402209ff 1257 return false;
7d22e898
R
1258 if ((nmatch -info.cur.input_count < PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS))
1259 && (info.cur.y_start != BB_HEAD (src1)))
1260 return false;
1261 while (info.need_rerun)
1262 {
1263 nmatch = struct_equiv_block_eq (STRUCT_EQUIV_RERUN | mode, &info);
1264 if (!nmatch)
1265 return false;
1266 if ((nmatch -info.cur.input_count < PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS))
1267 && (info.cur.y_start != BB_HEAD (src1)))
1268 return false;
1269 }
1270 nmatch = struct_equiv_block_eq (STRUCT_EQUIV_FINAL | mode, &info);
1271 if ((nmatch -info.cur.input_count < PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS))
1272 && (info.cur.y_start != BB_HEAD (src1)))
1273 return false;
1274
1275 /* Skip possible basic block header. */
1276 x_active = info.cur.x_start;
1277 if (LABEL_P (x_active))
1278 x_active = NEXT_INSN (x_active);
1279 if (NOTE_P (x_active))
1280 x_active = NEXT_INSN (x_active);
1281
1282 y_active = info.cur.y_start;
1283 if (LABEL_P (y_active))
1284 y_active = NEXT_INSN (y_active);
1285 if (NOTE_P (y_active))
1286 y_active = NEXT_INSN (y_active);
1287
1288 /* In order for this code to become active, either we have to be called
1289 before reload, or struct_equiv_block_eq needs to add register scavenging
1290 code to allocate input_reg after reload. */
1291 if (info.input_reg)
1292 {
1293 emit_insn_before (gen_move_insn (info.input_reg, info.x_input),
1294 x_active);
1295 emit_insn_before (gen_move_insn (info.input_reg, info.y_input),
1296 y_active);
1297 }
1298
1299 for (i = 0; i < info.cur.local_count; i++)
1300 if (info.local_rvalue[i])
1301 emit_insn_before (gen_move_insn (info.x_local[i], info.y_local[i]),
1302 y_active);
402209ff 1303
39811184
JZ
1304 /* Here we know that the insns in the end of SRC1 which are common with SRC2
1305 will be deleted.
1306 If we have tablejumps in the end of SRC1 and SRC2
1307 they have been already compared for equivalence in outgoing_edges_match ()
1308 so replace the references to TABLE1 by references to TABLE2. */
1309 {
1310 rtx label1, label2;
1311 rtx table1, table2;
1312
a813c111
SB
1313 if (tablejump_p (BB_END (src1), &label1, &table1)
1314 && tablejump_p (BB_END (src2), &label2, &table2)
39811184
JZ
1315 && label1 != label2)
1316 {
4af16369 1317 replace_label_data rr;
39811184
JZ
1318 rtx insn;
1319
1320 /* Replace references to LABEL1 with LABEL2. */
1321 rr.r1 = label1;
1322 rr.r2 = label2;
4af16369 1323 rr.update_label_nuses = true;
39811184
JZ
1324 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
1325 {
1326 /* Do not replace the label in SRC1->END because when deleting
1327 a block whose end is a tablejump, the tablejump referenced
1328 from the instruction is deleted too. */
a813c111 1329 if (insn != BB_END (src1))
39811184
JZ
1330 for_each_rtx (&insn, replace_label, &rr);
1331 }
1332 }
1333 }
10d6c0d0 1334
b604fe9b
SB
1335 /* Avoid splitting if possible. We must always split when SRC2 has
1336 EH predecessor edges, or we may end up with basic blocks with both
1337 normal and EH predecessor edges. */
7d22e898 1338 if (info.cur.x_start == BB_HEAD (src2)
b604fe9b 1339 && !(EDGE_PRED (src2, 0)->flags & EDGE_EH))
402209ff
JH
1340 redirect_to = src2;
1341 else
1342 {
7d22e898 1343 if (info.cur.x_start == BB_HEAD (src2))
b604fe9b
SB
1344 {
1345 /* Skip possible basic block header. */
7d22e898
R
1346 if (LABEL_P (info.cur.x_start))
1347 info.cur.x_start = NEXT_INSN (info.cur.x_start);
1348 if (NOTE_P (info.cur.x_start))
1349 info.cur.x_start = NEXT_INSN (info.cur.x_start);
b604fe9b
SB
1350 }
1351
c263766c
RH
1352 if (dump_file)
1353 fprintf (dump_file, "Splitting bb %i before %i insns\n",
0b17ab2f 1354 src2->index, nmatch);
7d22e898
R
1355 redirect_to = split_block (src2, PREV_INSN (info.cur.x_start))->dest;
1356 COPY_REG_SET (info.y_block->il.rtl->global_live_at_end,
1357 info.x_block->il.rtl->global_live_at_end);
402209ff
JH
1358 }
1359
c263766c 1360 if (dump_file)
7d22e898
R
1361 {
1362 fprintf (dump_file, "Cross jumping from bb %i to bb %i; %i common insns",
1363 src1->index, src2->index, nmatch);
1364 if (info.cur.local_count)
1365 fprintf (dump_file, ", %i local registers", info.cur.local_count);
1366 fprintf (dump_file, "\n");
1367 }
402209ff
JH
1368
1369 redirect_to->count += src1->count;
1370 redirect_to->frequency += src1->frequency;
2ca6672b
JH
1371 /* We may have some registers visible trought the block. */
1372 redirect_to->flags |= BB_DIRTY;
402209ff
JH
1373
1374 /* Recompute the frequencies and counts of outgoing edges. */
628f6a4e 1375 FOR_EACH_EDGE (s, ei, redirect_to->succs)
402209ff
JH
1376 {
1377 edge s2;
628f6a4e 1378 edge_iterator ei;
402209ff
JH
1379 basic_block d = s->dest;
1380
635559ab 1381 if (FORWARDER_BLOCK_P (d))
c5cbcccf 1382 d = single_succ (d);
5f0d2358 1383
628f6a4e 1384 FOR_EACH_EDGE (s2, ei, src1->succs)
402209ff
JH
1385 {
1386 basic_block d2 = s2->dest;
635559ab 1387 if (FORWARDER_BLOCK_P (d2))
c5cbcccf 1388 d2 = single_succ (d2);
402209ff
JH
1389 if (d == d2)
1390 break;
1391 }
5f0d2358 1392
402209ff
JH
1393 s->count += s2->count;
1394
1395 /* Take care to update possible forwarder blocks. We verified
1396 that there is no more than one in the chain, so we can't run
1397 into infinite loop. */
635559ab 1398 if (FORWARDER_BLOCK_P (s->dest))
402209ff 1399 {
c5cbcccf 1400 single_succ_edge (s->dest)->count += s2->count;
402209ff
JH
1401 s->dest->count += s2->count;
1402 s->dest->frequency += EDGE_FREQUENCY (s);
1403 }
5f0d2358 1404
635559ab 1405 if (FORWARDER_BLOCK_P (s2->dest))
402209ff 1406 {
c5cbcccf
ZD
1407 single_succ_edge (s2->dest)->count -= s2->count;
1408 if (single_succ_edge (s2->dest)->count < 0)
1409 single_succ_edge (s2->dest)->count = 0;
402209ff
JH
1410 s2->dest->count -= s2->count;
1411 s2->dest->frequency -= EDGE_FREQUENCY (s);
b446e5a2
JH
1412 if (s2->dest->frequency < 0)
1413 s2->dest->frequency = 0;
1414 if (s2->dest->count < 0)
1415 s2->dest->count = 0;
402209ff 1416 }
5f0d2358 1417
402209ff
JH
1418 if (!redirect_to->frequency && !src1->frequency)
1419 s->probability = (s->probability + s2->probability) / 2;
1420 else
5f0d2358
RK
1421 s->probability
1422 = ((s->probability * redirect_to->frequency +
1423 s2->probability * src1->frequency)
1424 / (redirect_to->frequency + src1->frequency));
402209ff
JH
1425 }
1426
b446e5a2 1427 update_br_prob_note (redirect_to);
402209ff
JH
1428
1429 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
1430
7d22e898 1431 redirect_from = split_block (src1, PREV_INSN (y_active))->src;
c5cbcccf 1432 to_remove = single_succ (redirect_from);
402209ff 1433
c5cbcccf 1434 redirect_edge_and_branch_force (single_succ_edge (redirect_from), redirect_to);
f470c378 1435 delete_basic_block (to_remove);
402209ff 1436
39587bb9 1437 update_forwarder_flag (redirect_from);
7cbd12b8
JJ
1438 if (redirect_to != src2)
1439 update_forwarder_flag (src2);
635559ab 1440
402209ff
JH
1441 return true;
1442}
1443
1444/* Search the predecessors of BB for common insn sequences. When found,
1445 share code between them by redirecting control flow. Return true if
1446 any changes made. */
1447
1448static bool
d329e058 1449try_crossjump_bb (int mode, basic_block bb)
402209ff 1450{
628f6a4e 1451 edge e, e2, fallthru;
402209ff 1452 bool changed;
628f6a4e
BE
1453 unsigned max, ix, ix2;
1454 basic_block ev, ev2;
1455 edge_iterator ei;
402209ff 1456
f63d1bf7 1457 /* Nothing to do if there is not at least two incoming edges. */
628f6a4e 1458 if (EDGE_COUNT (bb->preds) < 2)
402209ff
JH
1459 return false;
1460
bbcb0c05
SB
1461 /* Don't crossjump if this block ends in a computed jump,
1462 unless we are optimizing for size. */
1463 if (!optimize_size
1464 && bb != EXIT_BLOCK_PTR
1465 && computed_jump_p (BB_END (bb)))
1466 return false;
1467
750054a2
CT
1468 /* If we are partitioning hot/cold basic blocks, we don't want to
1469 mess up unconditional or indirect jumps that cross between hot
8e8d5162 1470 and cold sections.
750054a2 1471
8e8d5162
CT
1472 Basic block partitioning may result in some jumps that appear to
1473 be optimizable (or blocks that appear to be mergeable), but which really
1474 must be left untouched (they are required to make it safely across
1475 partition boundaries). See the comments at the top of
1476 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1477
87c8b4be
CT
1478 if (BB_PARTITION (EDGE_PRED (bb, 0)->src) !=
1479 BB_PARTITION (EDGE_PRED (bb, 1)->src)
1480 || (EDGE_PRED (bb, 0)->flags & EDGE_CROSSING))
750054a2
CT
1481 return false;
1482
402209ff
JH
1483 /* It is always cheapest to redirect a block that ends in a branch to
1484 a block that falls through into BB, as that adds no branches to the
1485 program. We'll try that combination first. */
5f24e0dc
RH
1486 fallthru = NULL;
1487 max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
628f6a4e
BE
1488
1489 if (EDGE_COUNT (bb->preds) > max)
1490 return false;
1491
1492 FOR_EACH_EDGE (e, ei, bb->preds)
f5eb5fd0 1493 {
5f24e0dc 1494 if (e->flags & EDGE_FALLTHRU)
628f6a4e 1495 fallthru = e;
f5eb5fd0 1496 }
402209ff
JH
1497
1498 changed = false;
628f6a4e 1499 for (ix = 0, ev = bb; ix < EDGE_COUNT (ev->preds); )
402209ff 1500 {
628f6a4e
BE
1501 e = EDGE_PRED (ev, ix);
1502 ix++;
402209ff 1503
402209ff
JH
1504 /* As noted above, first try with the fallthru predecessor. */
1505 if (fallthru)
1506 {
1507 /* Don't combine the fallthru edge into anything else.
1508 If there is a match, we'll do it the other way around. */
1509 if (e == fallthru)
1510 continue;
7cf240d5
JH
1511 /* If nothing changed since the last attempt, there is nothing
1512 we can do. */
1513 if (!first_pass
1514 && (!(e->src->flags & BB_DIRTY)
1515 && !(fallthru->src->flags & BB_DIRTY)))
1516 continue;
402209ff
JH
1517
1518 if (try_crossjump_to_edge (mode, e, fallthru))
1519 {
1520 changed = true;
628f6a4e
BE
1521 ix = 0;
1522 ev = bb;
402209ff
JH
1523 continue;
1524 }
1525 }
1526
1527 /* Non-obvious work limiting check: Recognize that we're going
1528 to call try_crossjump_bb on every basic block. So if we have
1529 two blocks with lots of outgoing edges (a switch) and they
1530 share lots of common destinations, then we would do the
1531 cross-jump check once for each common destination.
1532
1533 Now, if the blocks actually are cross-jump candidates, then
1534 all of their destinations will be shared. Which means that
1535 we only need check them for cross-jump candidacy once. We
1536 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1537 choosing to do the check from the block for which the edge
1538 in question is the first successor of A. */
628f6a4e 1539 if (EDGE_SUCC (e->src, 0) != e)
402209ff
JH
1540 continue;
1541
628f6a4e 1542 for (ix2 = 0, ev2 = bb; ix2 < EDGE_COUNT (ev2->preds); )
402209ff 1543 {
628f6a4e
BE
1544 e2 = EDGE_PRED (ev2, ix2);
1545 ix2++;
402209ff
JH
1546
1547 if (e2 == e)
1548 continue;
1549
1550 /* We've already checked the fallthru edge above. */
1551 if (e2 == fallthru)
1552 continue;
1553
402209ff
JH
1554 /* The "first successor" check above only prevents multiple
1555 checks of crossjump(A,B). In order to prevent redundant
1556 checks of crossjump(B,A), require that A be the block
1557 with the lowest index. */
0b17ab2f 1558 if (e->src->index > e2->src->index)
402209ff
JH
1559 continue;
1560
7cf240d5
JH
1561 /* If nothing changed since the last attempt, there is nothing
1562 we can do. */
1563 if (!first_pass
1564 && (!(e->src->flags & BB_DIRTY)
1565 && !(e2->src->flags & BB_DIRTY)))
1566 continue;
1567
402209ff
JH
1568 if (try_crossjump_to_edge (mode, e, e2))
1569 {
1570 changed = true;
628f6a4e
BE
1571 ev2 = bb;
1572 ix = 0;
402209ff
JH
1573 break;
1574 }
1575 }
1576 }
1577
1578 return changed;
1579}
1580
1581/* Do simple CFG optimizations - basic block merging, simplifying of jump
1582 instructions etc. Return nonzero if changes were made. */
1583
1584static bool
d329e058 1585try_optimize_cfg (int mode)
402209ff 1586{
402209ff
JH
1587 bool changed_overall = false;
1588 bool changed;
1589 int iterations = 0;
ec3ae3da 1590 basic_block bb, b, next;
402209ff 1591
ca6c03ca
JH
1592 if (mode & CLEANUP_CROSSJUMP)
1593 add_noreturn_fake_exit_edges ();
1594
7cf240d5 1595 if (mode & (CLEANUP_UPDATE_LIFE | CLEANUP_CROSSJUMP | CLEANUP_THREADING))
38c1593d
JH
1596 clear_bb_flags ();
1597
2dd2d53e
SB
1598 FOR_EACH_BB (bb)
1599 update_forwarder_flag (bb);
1600
245f1bfa 1601 if (! targetm.cannot_modify_jumps_p ())
402209ff 1602 {
7cf240d5 1603 first_pass = true;
e4ec2cac
AO
1604 /* Attempt to merge blocks as made possible by edge removal. If
1605 a block has only one successor, and the successor has only
1606 one predecessor, they may be combined. */
1607 do
402209ff 1608 {
e4ec2cac
AO
1609 changed = false;
1610 iterations++;
1611
c263766c
RH
1612 if (dump_file)
1613 fprintf (dump_file,
e4ec2cac
AO
1614 "\n\ntry_optimize_cfg iteration %i\n\n",
1615 iterations);
402209ff 1616
e0082a72 1617 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR;)
402209ff 1618 {
e0082a72 1619 basic_block c;
e4ec2cac
AO
1620 edge s;
1621 bool changed_here = false;
5f0d2358 1622
e4ec2cac 1623 /* Delete trivially dead basic blocks. */
628f6a4e 1624 while (EDGE_COUNT (b->preds) == 0)
e4ec2cac 1625 {
f6366fc7 1626 c = b->prev_bb;
c263766c
RH
1627 if (dump_file)
1628 fprintf (dump_file, "Deleting block %i.\n",
0b17ab2f 1629 b->index);
e4ec2cac 1630
f470c378 1631 delete_basic_block (b);
bc35512f
JH
1632 if (!(mode & CLEANUP_CFGLAYOUT))
1633 changed = true;
e4ec2cac
AO
1634 b = c;
1635 }
402209ff 1636
6ce2bcb7 1637 /* Remove code labels no longer used. */
c5cbcccf
ZD
1638 if (single_pred_p (b)
1639 && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
1640 && !(single_pred_edge (b)->flags & EDGE_COMPLEX)
4b4bf941 1641 && LABEL_P (BB_HEAD (b))
e4ec2cac
AO
1642 /* If the previous block ends with a branch to this
1643 block, we can't delete the label. Normally this
1644 is a condjump that is yet to be simplified, but
1645 if CASE_DROPS_THRU, this can be a tablejump with
1646 some element going to the same place as the
1647 default (fallthru). */
c5cbcccf
ZD
1648 && (single_pred (b) == ENTRY_BLOCK_PTR
1649 || !JUMP_P (BB_END (single_pred (b)))
a813c111 1650 || ! label_is_jump_target_p (BB_HEAD (b),
c5cbcccf 1651 BB_END (single_pred (b)))))
e4ec2cac 1652 {
a813c111 1653 rtx label = BB_HEAD (b);
5f0d2358 1654
e4ec2cac 1655 delete_insn_chain (label, label);
bc35512f
JH
1656 /* In the case label is undeletable, move it after the
1657 BASIC_BLOCK note. */
a813c111 1658 if (NOTE_LINE_NUMBER (BB_HEAD (b)) == NOTE_INSN_DELETED_LABEL)
bc35512f 1659 {
a813c111 1660 rtx bb_note = NEXT_INSN (BB_HEAD (b));
bc35512f
JH
1661
1662 reorder_insns_nobb (label, label, bb_note);
a813c111 1663 BB_HEAD (b) = bb_note;
bc35512f 1664 }
c263766c
RH
1665 if (dump_file)
1666 fprintf (dump_file, "Deleted label in block %i.\n",
0b17ab2f 1667 b->index);
e4ec2cac 1668 }
402209ff 1669
e4ec2cac 1670 /* If we fall through an empty block, we can remove it. */
bc35512f 1671 if (!(mode & CLEANUP_CFGLAYOUT)
c5cbcccf
ZD
1672 && single_pred_p (b)
1673 && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
4b4bf941 1674 && !LABEL_P (BB_HEAD (b))
e4ec2cac
AO
1675 && FORWARDER_BLOCK_P (b)
1676 /* Note that forwarder_block_p true ensures that
1677 there is a successor for this block. */
c5cbcccf 1678 && (single_succ_edge (b)->flags & EDGE_FALLTHRU)
0b17ab2f 1679 && n_basic_blocks > 1)
e4ec2cac 1680 {
c263766c
RH
1681 if (dump_file)
1682 fprintf (dump_file,
e4ec2cac 1683 "Deleting fallthru block %i.\n",
0b17ab2f 1684 b->index);
e4ec2cac 1685
f6366fc7 1686 c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb;
c5cbcccf
ZD
1687 redirect_edge_succ_nodup (single_pred_edge (b),
1688 single_succ (b));
f470c378 1689 delete_basic_block (b);
e4ec2cac
AO
1690 changed = true;
1691 b = c;
1692 }
5f0d2358 1693
c5cbcccf
ZD
1694 if (single_succ_p (b)
1695 && (s = single_succ_edge (b))
ec3ae3da
JH
1696 && !(s->flags & EDGE_COMPLEX)
1697 && (c = s->dest) != EXIT_BLOCK_PTR
c5cbcccf 1698 && single_pred_p (c)
bc35512f
JH
1699 && b != c)
1700 {
1701 /* When not in cfg_layout mode use code aware of reordering
1702 INSN. This code possibly creates new basic blocks so it
1703 does not fit merge_blocks interface and is kept here in
1704 hope that it will become useless once more of compiler
1705 is transformed to use cfg_layout mode. */
1706
1707 if ((mode & CLEANUP_CFGLAYOUT)
1708 && can_merge_blocks_p (b, c))
1709 {
1710 merge_blocks (b, c);
1711 update_forwarder_flag (b);
1712 changed_here = true;
1713 }
1714 else if (!(mode & CLEANUP_CFGLAYOUT)
1715 /* If the jump insn has side effects,
1716 we can't kill the edge. */
4b4bf941 1717 && (!JUMP_P (BB_END (b))
e24e7211 1718 || (reload_completed
a813c111 1719 ? simplejump_p (BB_END (b))
e4efa971
JH
1720 : (onlyjump_p (BB_END (b))
1721 && !tablejump_p (BB_END (b),
1722 NULL, NULL))))
bc35512f
JH
1723 && (next = merge_blocks_move (s, b, c, mode)))
1724 {
1725 b = next;
1726 changed_here = true;
1727 }
ec3ae3da 1728 }
e4ec2cac
AO
1729
1730 /* Simplify branch over branch. */
bc35512f
JH
1731 if ((mode & CLEANUP_EXPENSIVE)
1732 && !(mode & CLEANUP_CFGLAYOUT)
1733 && try_simplify_condjump (b))
38c1593d 1734 changed_here = true;
402209ff 1735
e4ec2cac
AO
1736 /* If B has a single outgoing edge, but uses a
1737 non-trivial jump instruction without side-effects, we
1738 can either delete the jump entirely, or replace it
3348b696 1739 with a simple unconditional jump. */
c5cbcccf
ZD
1740 if (single_succ_p (b)
1741 && single_succ (b) != EXIT_BLOCK_PTR
a813c111 1742 && onlyjump_p (BB_END (b))
750054a2 1743 && !find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
c5cbcccf
ZD
1744 && try_redirect_by_replacing_jump (single_succ_edge (b),
1745 single_succ (b),
20b4e8ae 1746 (mode & CLEANUP_CFGLAYOUT) != 0))
e4ec2cac 1747 {
e4ec2cac
AO
1748 update_forwarder_flag (b);
1749 changed_here = true;
1750 }
402209ff 1751
e4ec2cac
AO
1752 /* Simplify branch to branch. */
1753 if (try_forward_edges (mode, b))
1754 changed_here = true;
402209ff 1755
e4ec2cac
AO
1756 /* Look for shared code between blocks. */
1757 if ((mode & CLEANUP_CROSSJUMP)
1758 && try_crossjump_bb (mode, b))
1759 changed_here = true;
402209ff 1760
e4ec2cac
AO
1761 /* Don't get confused by the index shift caused by
1762 deleting blocks. */
1763 if (!changed_here)
e0082a72 1764 b = b->next_bb;
e4ec2cac
AO
1765 else
1766 changed = true;
1767 }
402209ff 1768
e4ec2cac
AO
1769 if ((mode & CLEANUP_CROSSJUMP)
1770 && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
402209ff 1771 changed = true;
402209ff
JH
1772
1773#ifdef ENABLE_CHECKING
e4ec2cac
AO
1774 if (changed)
1775 verify_flow_info ();
402209ff
JH
1776#endif
1777
e4ec2cac 1778 changed_overall |= changed;
7cf240d5 1779 first_pass = false;
e4ec2cac
AO
1780 }
1781 while (changed);
402209ff 1782 }
ca6c03ca
JH
1783
1784 if (mode & CLEANUP_CROSSJUMP)
6809cbf9 1785 remove_fake_exit_edges ();
ca6c03ca 1786
2dd2d53e
SB
1787 FOR_ALL_BB (b)
1788 b->flags &= ~(BB_FORWARDER_BLOCK | BB_NONTHREADABLE_BLOCK);
635559ab 1789
402209ff
JH
1790 return changed_overall;
1791}
1792\f
6d2f8887 1793/* Delete all unreachable basic blocks. */
4262e623 1794
969d70ca 1795bool
d329e058 1796delete_unreachable_blocks (void)
402209ff 1797{
402209ff 1798 bool changed = false;
e0082a72 1799 basic_block b, next_bb;
402209ff
JH
1800
1801 find_unreachable_blocks ();
1802
bf77398c 1803 /* Delete all unreachable basic blocks. */
402209ff 1804
e0082a72 1805 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR; b = next_bb)
402209ff 1806 {
e0082a72 1807 next_bb = b->next_bb;
0b17ab2f 1808
402209ff 1809 if (!(b->flags & BB_REACHABLE))
6a58eee9 1810 {
f470c378 1811 delete_basic_block (b);
6a58eee9
RH
1812 changed = true;
1813 }
402209ff
JH
1814 }
1815
1816 if (changed)
1817 tidy_fallthru_edges ();
1818 return changed;
1819}
6de9cd9a
DN
1820
1821/* Merges sequential blocks if possible. */
1822
1823bool
1824merge_seq_blocks (void)
1825{
1826 basic_block bb;
1827 bool changed = false;
1828
1829 for (bb = ENTRY_BLOCK_PTR->next_bb; bb != EXIT_BLOCK_PTR; )
1830 {
c5cbcccf
ZD
1831 if (single_succ_p (bb)
1832 && can_merge_blocks_p (bb, single_succ (bb)))
6de9cd9a
DN
1833 {
1834 /* Merge the blocks and retry. */
c5cbcccf 1835 merge_blocks (bb, single_succ (bb));
6de9cd9a
DN
1836 changed = true;
1837 continue;
1838 }
1839
1840 bb = bb->next_bb;
1841 }
1842
1843 return changed;
1844}
402209ff
JH
1845\f
1846/* Tidy the CFG by deleting unreachable code and whatnot. */
1847
1848bool
d329e058 1849cleanup_cfg (int mode)
402209ff 1850{
402209ff
JH
1851 bool changed = false;
1852
1853 timevar_push (TV_CLEANUP_CFG);
3dec4024
JH
1854 if (delete_unreachable_blocks ())
1855 {
1856 changed = true;
1857 /* We've possibly created trivially dead code. Cleanup it right
95bd1dd7 1858 now to introduce more opportunities for try_optimize_cfg. */
6ce2bcb7 1859 if (!(mode & (CLEANUP_NO_INSN_DEL | CLEANUP_UPDATE_LIFE))
3dec4024
JH
1860 && !reload_completed)
1861 delete_trivially_dead_insns (get_insns(), max_reg_num ());
1862 }
bf77398c
ZD
1863
1864 compact_blocks ();
1865
3dec4024
JH
1866 while (try_optimize_cfg (mode))
1867 {
1868 delete_unreachable_blocks (), changed = true;
1869 if (mode & CLEANUP_UPDATE_LIFE)
1870 {
95bd1dd7
KH
1871 /* Cleaning up CFG introduces more opportunities for dead code
1872 removal that in turn may introduce more opportunities for
3dec4024 1873 cleaning up the CFG. */
e41f3392 1874 if (!update_life_info_in_dirty_blocks (UPDATE_LIFE_GLOBAL_RM_NOTES,
3dec4024
JH
1875 PROP_DEATH_NOTES
1876 | PROP_SCAN_DEAD_CODE
1877 | PROP_KILL_DEAD_CODE
23bd7a93
JH
1878 | ((mode & CLEANUP_LOG_LINKS)
1879 ? PROP_LOG_LINKS : 0)))
3dec4024
JH
1880 break;
1881 }
6ce2bcb7 1882 else if (!(mode & CLEANUP_NO_INSN_DEL)
1c4a429a 1883 && (mode & CLEANUP_EXPENSIVE)
95479831 1884 && !reload_completed)
3dec4024
JH
1885 {
1886 if (!delete_trivially_dead_insns (get_insns(), max_reg_num ()))
1887 break;
1888 }
1889 else
1890 break;
1891 delete_dead_jumptables ();
1892 }
402209ff 1893
402209ff
JH
1894 timevar_pop (TV_CLEANUP_CFG);
1895
402209ff
JH
1896 return changed;
1897}
ef330312
PB
1898\f
1899static void
1900rest_of_handle_jump (void)
1901{
1902 delete_unreachable_blocks ();
1903
1904 if (cfun->tail_call_emit)
1905 fixup_tail_calls ();
1906}
1907
1908struct tree_opt_pass pass_jump =
1909{
1910 "sibling", /* name */
1911 NULL, /* gate */
1912 rest_of_handle_jump, /* execute */
1913 NULL, /* sub */
1914 NULL, /* next */
1915 0, /* static_pass_number */
1916 TV_JUMP, /* tv_id */
1917 0, /* properties_required */
1918 0, /* properties_provided */
1919 0, /* properties_destroyed */
1920 TODO_ggc_collect, /* todo_flags_start */
1921 TODO_dump_func |
1922 TODO_verify_flow, /* todo_flags_finish */
1923 'i' /* letter */
1924};
1925
1926
1927static void
1928rest_of_handle_jump2 (void)
1929{
1930 /* Turn NOTE_INSN_EXPECTED_VALUE into REG_BR_PROB. Do this
1931 before jump optimization switches branch directions. */
1932 if (flag_guess_branch_prob)
1933 expected_value_to_br_prob ();
1934
1935 delete_trivially_dead_insns (get_insns (), max_reg_num ());
1936 reg_scan (get_insns (), max_reg_num ());
1937 if (dump_file)
1938 dump_flow_info (dump_file);
1939 cleanup_cfg ((optimize ? CLEANUP_EXPENSIVE : 0) | CLEANUP_PRE_LOOP
1940 | (flag_thread_jumps ? CLEANUP_THREADING : 0));
1941
1942 create_loop_notes ();
1943
1944 purge_line_number_notes ();
1945
1946 if (optimize)
1947 cleanup_cfg (CLEANUP_EXPENSIVE | CLEANUP_PRE_LOOP);
1948
1949 /* Jump optimization, and the removal of NULL pointer checks, may
1950 have reduced the number of instructions substantially. CSE, and
1951 future passes, allocate arrays whose dimensions involve the
1952 maximum instruction UID, so if we can reduce the maximum UID
1953 we'll save big on memory. */
1954 renumber_insns (dump_file);
1955}
1956
1957
1958struct tree_opt_pass pass_jump2 =
1959{
1960 "jump", /* name */
1961 NULL, /* gate */
1962 rest_of_handle_jump2, /* execute */
1963 NULL, /* sub */
1964 NULL, /* next */
1965 0, /* static_pass_number */
1966 TV_JUMP, /* tv_id */
1967 0, /* properties_required */
1968 0, /* properties_provided */
1969 0, /* properties_destroyed */
1970 TODO_ggc_collect, /* todo_flags_start */
1971 TODO_dump_func, /* todo_flags_finish */
1972 'j' /* letter */
1973};
1974
1975
This page took 1.292543 seconds and 5 git commands to generate.