1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
22 /* This file contains optimizer of the control flow. The main entrypoint is
23 cleanup_cfg. Following optimizations are performed:
25 - Unreachable blocks removal
26 - Edge forwarding (edge to the forwarder block is forwarded to it's
27 successor. Simplification of the branch instruction is performed by
28 underlying infrastructure so branch can be converted to simplejump or
30 - Cross jumping (tail merging)
31 - Conditional jump-around-simplejump simplification
32 - Basic block merging. */
37 #include "hard-reg-set.h"
38 #include "basic-block.h"
41 #include "insn-config.h"
49 /* cleanup_cfg maintains following flags for each basic block. */
53 /* Set if BB is the forwarder block to avoid too many
54 forwarder_block_p calls. */
55 BB_FORWARDER_BLOCK
= 1,
56 BB_NONTHREADABLE_BLOCK
= 2
59 #define BB_FLAGS(BB) (enum bb_flags) (BB)->aux
60 #define BB_SET_FLAG(BB, FLAG) \
61 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux | (FLAG))
62 #define BB_CLEAR_FLAG(BB, FLAG) \
63 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux & ~(FLAG))
65 #define FORWARDER_BLOCK_P(BB) (BB_FLAGS (BB) & BB_FORWARDER_BLOCK)
67 static bool try_crossjump_to_edge
PARAMS ((int, edge
, edge
));
68 static bool try_crossjump_bb
PARAMS ((int, basic_block
));
69 static bool outgoing_edges_match
PARAMS ((int,
70 basic_block
, basic_block
));
71 static int flow_find_cross_jump
PARAMS ((int, basic_block
, basic_block
,
73 static bool insns_match_p
PARAMS ((int, rtx
, rtx
));
75 static bool label_is_jump_target_p
PARAMS ((rtx
, rtx
));
76 static bool tail_recursion_label_p
PARAMS ((rtx
));
77 static void merge_blocks_move_predecessor_nojumps
PARAMS ((basic_block
,
79 static void merge_blocks_move_successor_nojumps
PARAMS ((basic_block
,
81 static bool merge_blocks
PARAMS ((edge
,basic_block
,basic_block
,
83 static bool try_optimize_cfg
PARAMS ((int));
84 static bool try_simplify_condjump
PARAMS ((basic_block
));
85 static bool try_forward_edges
PARAMS ((int, basic_block
));
86 static edge thread_jump
PARAMS ((int, edge
, basic_block
));
87 static bool mark_effect
PARAMS ((rtx
, bitmap
));
88 static void notice_new_block
PARAMS ((basic_block
));
89 static void update_forwarder_flag
PARAMS ((basic_block
));
90 static int mentions_nonequal_regs
PARAMS ((rtx
*, void *));
92 /* Set flags for newly created block. */
101 if (forwarder_block_p (bb
))
102 BB_SET_FLAG (bb
, BB_FORWARDER_BLOCK
);
105 /* Recompute forwarder flag after block has been modified. */
108 update_forwarder_flag (bb
)
111 if (forwarder_block_p (bb
))
112 BB_SET_FLAG (bb
, BB_FORWARDER_BLOCK
);
114 BB_CLEAR_FLAG (bb
, BB_FORWARDER_BLOCK
);
117 /* Simplify a conditional jump around an unconditional jump.
118 Return true if something changed. */
121 try_simplify_condjump (cbranch_block
)
122 basic_block cbranch_block
;
124 basic_block jump_block
, jump_dest_block
, cbranch_dest_block
;
125 edge cbranch_jump_edge
, cbranch_fallthru_edge
;
128 /* Verify that there are exactly two successors. */
129 if (!cbranch_block
->succ
130 || !cbranch_block
->succ
->succ_next
131 || cbranch_block
->succ
->succ_next
->succ_next
)
134 /* Verify that we've got a normal conditional branch at the end
136 cbranch_insn
= cbranch_block
->end
;
137 if (!any_condjump_p (cbranch_insn
))
140 cbranch_fallthru_edge
= FALLTHRU_EDGE (cbranch_block
);
141 cbranch_jump_edge
= BRANCH_EDGE (cbranch_block
);
143 /* The next block must not have multiple predecessors, must not
144 be the last block in the function, and must contain just the
145 unconditional jump. */
146 jump_block
= cbranch_fallthru_edge
->dest
;
147 if (jump_block
->pred
->pred_next
148 || jump_block
->next_bb
== EXIT_BLOCK_PTR
149 || !FORWARDER_BLOCK_P (jump_block
))
151 jump_dest_block
= jump_block
->succ
->dest
;
153 /* The conditional branch must target the block after the
154 unconditional branch. */
155 cbranch_dest_block
= cbranch_jump_edge
->dest
;
157 if (!can_fallthru (jump_block
, cbranch_dest_block
))
160 /* Invert the conditional branch. */
161 if (!invert_jump (cbranch_insn
, block_label (jump_dest_block
), 0))
165 fprintf (rtl_dump_file
, "Simplifying condjump %i around jump %i\n",
166 INSN_UID (cbranch_insn
), INSN_UID (jump_block
->end
));
168 /* Success. Update the CFG to match. Note that after this point
169 the edge variable names appear backwards; the redirection is done
170 this way to preserve edge profile data. */
171 cbranch_jump_edge
= redirect_edge_succ_nodup (cbranch_jump_edge
,
173 cbranch_fallthru_edge
= redirect_edge_succ_nodup (cbranch_fallthru_edge
,
175 cbranch_jump_edge
->flags
|= EDGE_FALLTHRU
;
176 cbranch_fallthru_edge
->flags
&= ~EDGE_FALLTHRU
;
177 update_br_prob_note (cbranch_block
);
179 /* Delete the block with the unconditional jump, and clean up the mess. */
180 flow_delete_block (jump_block
);
181 tidy_fallthru_edge (cbranch_jump_edge
, cbranch_block
, cbranch_dest_block
);
186 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
187 on register. Used by jump threading. */
190 mark_effect (exp
, nonequal
)
196 switch (GET_CODE (exp
))
198 /* In case we do clobber the register, mark it as equal, as we know the
199 value is dead so it don't have to match. */
201 if (REG_P (XEXP (exp
, 0)))
203 dest
= XEXP (exp
, 0);
204 regno
= REGNO (dest
);
205 CLEAR_REGNO_REG_SET (nonequal
, regno
);
206 if (regno
< FIRST_PSEUDO_REGISTER
)
208 int n
= HARD_REGNO_NREGS (regno
, GET_MODE (dest
));
210 CLEAR_REGNO_REG_SET (nonequal
, regno
+ n
);
216 if (rtx_equal_for_cselib_p (SET_DEST (exp
), SET_SRC (exp
)))
218 dest
= SET_DEST (exp
);
223 regno
= REGNO (dest
);
224 SET_REGNO_REG_SET (nonequal
, regno
);
225 if (regno
< FIRST_PSEUDO_REGISTER
)
227 int n
= HARD_REGNO_NREGS (regno
, GET_MODE (dest
));
229 SET_REGNO_REG_SET (nonequal
, regno
+ n
);
238 /* Return nonzero if X is an register set in regset DATA.
239 Called via for_each_rtx. */
241 mentions_nonequal_regs (x
, data
)
245 regset nonequal
= (regset
) data
;
251 if (REGNO_REG_SET_P (nonequal
, regno
))
253 if (regno
< FIRST_PSEUDO_REGISTER
)
255 int n
= HARD_REGNO_NREGS (regno
, GET_MODE (*x
));
257 if (REGNO_REG_SET_P (nonequal
, regno
+ n
))
263 /* Attempt to prove that the basic block B will have no side effects and
264 allways continues in the same edge if reached via E. Return the edge
265 if exist, NULL otherwise. */
268 thread_jump (mode
, e
, b
)
273 rtx set1
, set2
, cond1
, cond2
, insn
;
274 enum rtx_code code1
, code2
, reversed_code2
;
275 bool reverse1
= false;
280 if (BB_FLAGS (b
) & BB_NONTHREADABLE_BLOCK
)
283 /* At the moment, we do handle only conditional jumps, but later we may
284 want to extend this code to tablejumps and others. */
285 if (!e
->src
->succ
->succ_next
|| e
->src
->succ
->succ_next
->succ_next
)
287 if (!b
->succ
|| !b
->succ
->succ_next
|| b
->succ
->succ_next
->succ_next
)
289 BB_SET_FLAG (b
, BB_NONTHREADABLE_BLOCK
);
293 /* Second branch must end with onlyjump, as we will eliminate the jump. */
294 if (!any_condjump_p (e
->src
->end
))
297 if (!any_condjump_p (b
->end
) || !onlyjump_p (b
->end
))
299 BB_SET_FLAG (b
, BB_NONTHREADABLE_BLOCK
);
303 set1
= pc_set (e
->src
->end
);
304 set2
= pc_set (b
->end
);
305 if (((e
->flags
& EDGE_FALLTHRU
) != 0)
306 != (XEXP (SET_SRC (set1
), 1) == pc_rtx
))
309 cond1
= XEXP (SET_SRC (set1
), 0);
310 cond2
= XEXP (SET_SRC (set2
), 0);
312 code1
= reversed_comparison_code (cond1
, e
->src
->end
);
314 code1
= GET_CODE (cond1
);
316 code2
= GET_CODE (cond2
);
317 reversed_code2
= reversed_comparison_code (cond2
, b
->end
);
319 if (!comparison_dominates_p (code1
, code2
)
320 && !comparison_dominates_p (code1
, reversed_code2
))
323 /* Ensure that the comparison operators are equivalent.
324 ??? This is far too pesimistic. We should allow swapped operands,
325 different CCmodes, or for example comparisons for interval, that
326 dominate even when operands are not equivalent. */
327 if (!rtx_equal_p (XEXP (cond1
, 0), XEXP (cond2
, 0))
328 || !rtx_equal_p (XEXP (cond1
, 1), XEXP (cond2
, 1)))
331 /* Short circuit cases where block B contains some side effects, as we can't
333 for (insn
= NEXT_INSN (b
->head
); insn
!= NEXT_INSN (b
->end
);
334 insn
= NEXT_INSN (insn
))
335 if (INSN_P (insn
) && side_effects_p (PATTERN (insn
)))
337 BB_SET_FLAG (b
, BB_NONTHREADABLE_BLOCK
);
343 /* First process all values computed in the source basic block. */
344 for (insn
= NEXT_INSN (e
->src
->head
); insn
!= NEXT_INSN (e
->src
->end
);
345 insn
= NEXT_INSN (insn
))
347 cselib_process_insn (insn
);
349 nonequal
= BITMAP_XMALLOC();
350 CLEAR_REG_SET (nonequal
);
352 /* Now assume that we've continued by the edge E to B and continue
353 processing as if it were same basic block.
354 Our goal is to prove that whole block is an NOOP. */
356 for (insn
= NEXT_INSN (b
->head
); insn
!= NEXT_INSN (b
->end
) && !failed
;
357 insn
= NEXT_INSN (insn
))
361 rtx pat
= PATTERN (insn
);
363 if (GET_CODE (pat
) == PARALLEL
)
365 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
366 failed
|= mark_effect (XVECEXP (pat
, 0, i
), nonequal
);
369 failed
|= mark_effect (pat
, nonequal
);
372 cselib_process_insn (insn
);
375 /* Later we should clear nonequal of dead registers. So far we don't
376 have life information in cfg_cleanup. */
379 BB_SET_FLAG (b
, BB_NONTHREADABLE_BLOCK
);
383 /* cond2 must not mention any register that is not equal to the
385 if (for_each_rtx (&cond2
, mentions_nonequal_regs
, nonequal
))
388 /* In case liveness information is available, we need to prove equivalence
389 only of the live values. */
390 if (mode
& CLEANUP_UPDATE_LIFE
)
391 AND_REG_SET (nonequal
, b
->global_live_at_end
);
393 EXECUTE_IF_SET_IN_REG_SET (nonequal
, 0, i
, goto failed_exit
;);
395 BITMAP_XFREE (nonequal
);
397 if ((comparison_dominates_p (code1
, code2
) != 0)
398 != (XEXP (SET_SRC (set2
), 1) == pc_rtx
))
399 return BRANCH_EDGE (b
);
401 return FALLTHRU_EDGE (b
);
404 BITMAP_XFREE (nonequal
);
409 /* Attempt to forward edges leaving basic block B.
410 Return true if successful. */
413 try_forward_edges (mode
, b
)
417 bool changed
= false;
418 edge e
, next
, *threaded_edges
= NULL
;
420 for (e
= b
->succ
; e
; e
= next
)
422 basic_block target
, first
;
424 bool threaded
= false;
425 int nthreaded_edges
= 0;
429 /* Skip complex edges because we don't know how to update them.
431 Still handle fallthru edges, as we can succeed to forward fallthru
432 edge to the same place as the branch edge of conditional branch
433 and turn conditional branch to an unconditional branch. */
434 if (e
->flags
& EDGE_COMPLEX
)
437 target
= first
= e
->dest
;
440 while (counter
< n_basic_blocks
)
442 basic_block new_target
= NULL
;
443 bool new_target_threaded
= false;
445 if (FORWARDER_BLOCK_P (target
)
446 && target
->succ
->dest
!= EXIT_BLOCK_PTR
)
448 /* Bypass trivial infinite loops. */
449 if (target
== target
->succ
->dest
)
450 counter
= n_basic_blocks
;
451 new_target
= target
->succ
->dest
;
454 /* Allow to thread only over one edge at time to simplify updating
456 else if (mode
& CLEANUP_THREADING
)
458 edge t
= thread_jump (mode
, e
, target
);
462 threaded_edges
= xmalloc (sizeof (*threaded_edges
)
468 /* Detect an infinite loop across blocks not
469 including the start block. */
470 for (i
= 0; i
< nthreaded_edges
; ++i
)
471 if (threaded_edges
[i
] == t
)
473 if (i
< nthreaded_edges
)
475 counter
= n_basic_blocks
;
480 /* Detect an infinite loop across the start block. */
484 if (nthreaded_edges
>= n_basic_blocks
)
486 threaded_edges
[nthreaded_edges
++] = t
;
488 new_target
= t
->dest
;
489 new_target_threaded
= true;
496 /* Avoid killing of loop pre-headers, as it is the place loop
497 optimizer wants to hoist code to.
499 For fallthru forwarders, the LOOP_BEG note must appear between
500 the header of block and CODE_LABEL of the loop, for non forwarders
501 it must appear before the JUMP_INSN. */
502 if (mode
& CLEANUP_PRE_LOOP
)
504 rtx insn
= (target
->succ
->flags
& EDGE_FALLTHRU
505 ? target
->head
: prev_nonnote_insn (target
->end
));
507 if (GET_CODE (insn
) != NOTE
)
508 insn
= NEXT_INSN (insn
);
510 for (; insn
&& GET_CODE (insn
) != CODE_LABEL
&& !INSN_P (insn
);
511 insn
= NEXT_INSN (insn
))
512 if (GET_CODE (insn
) == NOTE
513 && NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_BEG
)
516 if (GET_CODE (insn
) == NOTE
)
519 /* Do not clean up branches to just past the end of a loop
520 at this time; it can mess up the loop optimizer's
521 recognition of some patterns. */
523 insn
= PREV_INSN (target
->head
);
524 if (insn
&& GET_CODE (insn
) == NOTE
525 && NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_END
)
531 threaded
|= new_target_threaded
;
534 if (counter
>= n_basic_blocks
)
537 fprintf (rtl_dump_file
, "Infinite loop in BB %i.\n",
540 else if (target
== first
)
541 ; /* We didn't do anything. */
544 /* Save the values now, as the edge may get removed. */
545 gcov_type edge_count
= e
->count
;
546 int edge_probability
= e
->probability
;
550 /* Don't force if target is exit block. */
551 if (threaded
&& target
!= EXIT_BLOCK_PTR
)
553 notice_new_block (redirect_edge_and_branch_force (e
, target
));
555 fprintf (rtl_dump_file
, "Conditionals threaded.\n");
557 else if (!redirect_edge_and_branch (e
, target
))
560 fprintf (rtl_dump_file
,
561 "Forwarding edge %i->%i to %i failed.\n",
562 b
->index
, e
->dest
->index
, target
->index
);
566 /* We successfully forwarded the edge. Now update profile
567 data: for each edge we traversed in the chain, remove
568 the original edge's execution count. */
569 edge_frequency
= ((edge_probability
* b
->frequency
570 + REG_BR_PROB_BASE
/ 2)
573 if (!FORWARDER_BLOCK_P (b
) && forwarder_block_p (b
))
574 BB_SET_FLAG (b
, BB_FORWARDER_BLOCK
);
580 first
->count
-= edge_count
;
581 if (first
->count
< 0)
583 first
->frequency
-= edge_frequency
;
584 if (first
->frequency
< 0)
585 first
->frequency
= 0;
586 if (first
->succ
->succ_next
)
590 if (n
>= nthreaded_edges
)
592 t
= threaded_edges
[n
++];
595 if (first
->frequency
)
596 prob
= edge_frequency
* REG_BR_PROB_BASE
/ first
->frequency
;
599 if (prob
> t
->probability
)
600 prob
= t
->probability
;
601 t
->probability
-= prob
;
602 prob
= REG_BR_PROB_BASE
- prob
;
605 first
->succ
->probability
= REG_BR_PROB_BASE
;
606 first
->succ
->succ_next
->probability
= 0;
609 for (e
= first
->succ
; e
; e
= e
->succ_next
)
610 e
->probability
= ((e
->probability
* REG_BR_PROB_BASE
)
612 update_br_prob_note (first
);
616 /* It is possible that as the result of
617 threading we've removed edge as it is
618 threaded to the fallthru edge. Avoid
619 getting out of sync. */
620 if (n
< nthreaded_edges
621 && first
== threaded_edges
[n
]->src
)
626 t
->count
-= edge_count
;
631 while (first
!= target
);
638 free (threaded_edges
);
642 /* Return true if LABEL is a target of JUMP_INSN. This applies only
643 to non-complex jumps. That is, direct unconditional, conditional,
644 and tablejumps, but not computed jumps or returns. It also does
645 not apply to the fallthru case of a conditional jump. */
648 label_is_jump_target_p (label
, jump_insn
)
649 rtx label
, jump_insn
;
651 rtx tmp
= JUMP_LABEL (jump_insn
);
657 && (tmp
= NEXT_INSN (tmp
)) != NULL_RTX
658 && GET_CODE (tmp
) == JUMP_INSN
659 && (tmp
= PATTERN (tmp
),
660 GET_CODE (tmp
) == ADDR_VEC
661 || GET_CODE (tmp
) == ADDR_DIFF_VEC
))
663 rtvec vec
= XVEC (tmp
, GET_CODE (tmp
) == ADDR_DIFF_VEC
);
664 int i
, veclen
= GET_NUM_ELEM (vec
);
666 for (i
= 0; i
< veclen
; ++i
)
667 if (XEXP (RTVEC_ELT (vec
, i
), 0) == label
)
674 /* Return true if LABEL is used for tail recursion. */
677 tail_recursion_label_p (label
)
682 for (x
= tail_recursion_label_list
; x
; x
= XEXP (x
, 1))
683 if (label
== XEXP (x
, 0))
689 /* Blocks A and B are to be merged into a single block. A has no incoming
690 fallthru edge, so it can be moved before B without adding or modifying
691 any jumps (aside from the jump from A to B). */
694 merge_blocks_move_predecessor_nojumps (a
, b
)
699 barrier
= next_nonnote_insn (a
->end
);
700 if (GET_CODE (barrier
) != BARRIER
)
702 delete_insn (barrier
);
704 /* Move block and loop notes out of the chain so that we do not
707 ??? A better solution would be to squeeze out all the non-nested notes
708 and adjust the block trees appropriately. Even better would be to have
709 a tighter connection between block trees and rtl so that this is not
711 if (squeeze_notes (&a
->head
, &a
->end
))
714 /* Scramble the insn chain. */
715 if (a
->end
!= PREV_INSN (b
->head
))
716 reorder_insns_nobb (a
->head
, a
->end
, PREV_INSN (b
->head
));
717 a
->flags
|= BB_DIRTY
;
720 fprintf (rtl_dump_file
, "Moved block %d before %d and merged.\n",
723 /* Swap the records for the two blocks around. */
726 link_block (a
, b
->prev_bb
);
728 /* Now blocks A and B are contiguous. Merge them. */
729 merge_blocks_nomove (a
, b
);
732 /* Blocks A and B are to be merged into a single block. B has no outgoing
733 fallthru edge, so it can be moved after A without adding or modifying
734 any jumps (aside from the jump from A to B). */
737 merge_blocks_move_successor_nojumps (a
, b
)
740 rtx barrier
, real_b_end
;
743 barrier
= NEXT_INSN (b
->end
);
745 /* Recognize a jump table following block B. */
747 && GET_CODE (barrier
) == CODE_LABEL
748 && NEXT_INSN (barrier
)
749 && GET_CODE (NEXT_INSN (barrier
)) == JUMP_INSN
750 && (GET_CODE (PATTERN (NEXT_INSN (barrier
))) == ADDR_VEC
751 || GET_CODE (PATTERN (NEXT_INSN (barrier
))) == ADDR_DIFF_VEC
))
753 /* Temporarily add the table jump insn to b, so that it will also
754 be moved to the correct location. */
755 b
->end
= NEXT_INSN (barrier
);
756 barrier
= NEXT_INSN (b
->end
);
759 /* There had better have been a barrier there. Delete it. */
760 if (barrier
&& GET_CODE (barrier
) == BARRIER
)
761 delete_insn (barrier
);
763 /* Move block and loop notes out of the chain so that we do not
766 ??? A better solution would be to squeeze out all the non-nested notes
767 and adjust the block trees appropriately. Even better would be to have
768 a tighter connection between block trees and rtl so that this is not
770 if (squeeze_notes (&b
->head
, &b
->end
))
773 /* Scramble the insn chain. */
774 reorder_insns_nobb (b
->head
, b
->end
, a
->end
);
776 /* Restore the real end of b. */
780 fprintf (rtl_dump_file
, "Moved block %d after %d and merged.\n",
783 /* Now blocks A and B are contiguous. Merge them. */
784 merge_blocks_nomove (a
, b
);
787 /* Attempt to merge basic blocks that are potentially non-adjacent.
788 Return true iff the attempt succeeded. */
791 merge_blocks (e
, b
, c
, mode
)
796 /* If C has a tail recursion label, do not merge. There is no
797 edge recorded from the call_placeholder back to this label, as
798 that would make optimize_sibling_and_tail_recursive_calls more
799 complex for no gain. */
800 if ((mode
& CLEANUP_PRE_SIBCALL
)
801 && GET_CODE (c
->head
) == CODE_LABEL
802 && tail_recursion_label_p (c
->head
))
805 /* If B has a fallthru edge to C, no need to move anything. */
806 if (e
->flags
& EDGE_FALLTHRU
)
808 int b_index
= b
->index
, c_index
= c
->index
;
809 merge_blocks_nomove (b
, c
);
810 update_forwarder_flag (b
);
813 fprintf (rtl_dump_file
, "Merged %d and %d without moving.\n",
819 /* Otherwise we will need to move code around. Do that only if expensive
820 transformations are allowed. */
821 else if (mode
& CLEANUP_EXPENSIVE
)
823 edge tmp_edge
, b_fallthru_edge
;
824 bool c_has_outgoing_fallthru
;
825 bool b_has_incoming_fallthru
;
827 /* Avoid overactive code motion, as the forwarder blocks should be
828 eliminated by edge redirection instead. One exception might have
829 been if B is a forwarder block and C has no fallthru edge, but
830 that should be cleaned up by bb-reorder instead. */
831 if (FORWARDER_BLOCK_P (b
) || FORWARDER_BLOCK_P (c
))
834 /* We must make sure to not munge nesting of lexical blocks,
835 and loop notes. This is done by squeezing out all the notes
836 and leaving them there to lie. Not ideal, but functional. */
838 for (tmp_edge
= c
->succ
; tmp_edge
; tmp_edge
= tmp_edge
->succ_next
)
839 if (tmp_edge
->flags
& EDGE_FALLTHRU
)
842 c_has_outgoing_fallthru
= (tmp_edge
!= NULL
);
844 for (tmp_edge
= b
->pred
; tmp_edge
; tmp_edge
= tmp_edge
->pred_next
)
845 if (tmp_edge
->flags
& EDGE_FALLTHRU
)
848 b_has_incoming_fallthru
= (tmp_edge
!= NULL
);
849 b_fallthru_edge
= tmp_edge
;
851 /* Otherwise, we're going to try to move C after B. If C does
852 not have an outgoing fallthru, then it can be moved
853 immediately after B without introducing or modifying jumps. */
854 if (! c_has_outgoing_fallthru
)
856 merge_blocks_move_successor_nojumps (b
, c
);
860 /* If B does not have an incoming fallthru, then it can be moved
861 immediately before C without introducing or modifying jumps.
862 C cannot be the first block, so we do not have to worry about
863 accessing a non-existent block. */
865 if (b_has_incoming_fallthru
)
869 if (b_fallthru_edge
->src
== ENTRY_BLOCK_PTR
)
871 bb
= force_nonfallthru (b_fallthru_edge
);
873 notice_new_block (bb
);
876 merge_blocks_move_predecessor_nojumps (b
, c
);
884 /* Return true if I1 and I2 are equivalent and thus can be crossjumped. */
887 insns_match_p (mode
, i1
, i2
)
888 int mode ATTRIBUTE_UNUSED
;
893 /* Verify that I1 and I2 are equivalent. */
894 if (GET_CODE (i1
) != GET_CODE (i2
))
900 if (GET_CODE (p1
) != GET_CODE (p2
))
903 /* If this is a CALL_INSN, compare register usage information.
904 If we don't check this on stack register machines, the two
905 CALL_INSNs might be merged leaving reg-stack.c with mismatching
906 numbers of stack registers in the same basic block.
907 If we don't check this on machines with delay slots, a delay slot may
908 be filled that clobbers a parameter expected by the subroutine.
910 ??? We take the simple route for now and assume that if they're
911 equal, they were constructed identically. */
913 if (GET_CODE (i1
) == CALL_INSN
914 && (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1
),
915 CALL_INSN_FUNCTION_USAGE (i2
))
916 || SIBLING_CALL_P (i1
) != SIBLING_CALL_P (i2
)))
920 /* If cross_jump_death_matters is not 0, the insn's mode
921 indicates whether or not the insn contains any stack-like
924 if ((mode
& CLEANUP_POST_REGSTACK
) && stack_regs_mentioned (i1
))
926 /* If register stack conversion has already been done, then
927 death notes must also be compared before it is certain that
928 the two instruction streams match. */
931 HARD_REG_SET i1_regset
, i2_regset
;
933 CLEAR_HARD_REG_SET (i1_regset
);
934 CLEAR_HARD_REG_SET (i2_regset
);
936 for (note
= REG_NOTES (i1
); note
; note
= XEXP (note
, 1))
937 if (REG_NOTE_KIND (note
) == REG_DEAD
&& STACK_REG_P (XEXP (note
, 0)))
938 SET_HARD_REG_BIT (i1_regset
, REGNO (XEXP (note
, 0)));
940 for (note
= REG_NOTES (i2
); note
; note
= XEXP (note
, 1))
941 if (REG_NOTE_KIND (note
) == REG_DEAD
&& STACK_REG_P (XEXP (note
, 0)))
942 SET_HARD_REG_BIT (i2_regset
, REGNO (XEXP (note
, 0)));
944 GO_IF_HARD_REG_EQUAL (i1_regset
, i2_regset
, done
);
954 ? ! rtx_renumbered_equal_p (p1
, p2
) : ! rtx_equal_p (p1
, p2
))
956 /* The following code helps take care of G++ cleanups. */
957 rtx equiv1
= find_reg_equal_equiv_note (i1
);
958 rtx equiv2
= find_reg_equal_equiv_note (i2
);
961 /* If the equivalences are not to a constant, they may
962 reference pseudos that no longer exist, so we can't
964 && (! reload_completed
965 || (CONSTANT_P (XEXP (equiv1
, 0))
966 && rtx_equal_p (XEXP (equiv1
, 0), XEXP (equiv2
, 0)))))
968 rtx s1
= single_set (i1
);
969 rtx s2
= single_set (i2
);
970 if (s1
!= 0 && s2
!= 0
971 && rtx_renumbered_equal_p (SET_DEST (s1
), SET_DEST (s2
)))
973 validate_change (i1
, &SET_SRC (s1
), XEXP (equiv1
, 0), 1);
974 validate_change (i2
, &SET_SRC (s2
), XEXP (equiv2
, 0), 1);
975 if (! rtx_renumbered_equal_p (p1
, p2
))
977 else if (apply_change_group ())
988 /* Look through the insns at the end of BB1 and BB2 and find the longest
989 sequence that are equivalent. Store the first insns for that sequence
990 in *F1 and *F2 and return the sequence length.
992 To simplify callers of this function, if the blocks match exactly,
993 store the head of the blocks in *F1 and *F2. */
996 flow_find_cross_jump (mode
, bb1
, bb2
, f1
, f2
)
997 int mode ATTRIBUTE_UNUSED
;
998 basic_block bb1
, bb2
;
1001 rtx i1
, i2
, last1
, last2
, afterlast1
, afterlast2
;
1004 /* Skip simple jumps at the end of the blocks. Complex jumps still
1005 need to be compared for equivalence, which we'll do below. */
1008 last1
= afterlast1
= last2
= afterlast2
= NULL_RTX
;
1010 || (returnjump_p (i1
) && !side_effects_p (PATTERN (i1
))))
1013 i1
= PREV_INSN (i1
);
1018 || (returnjump_p (i2
) && !side_effects_p (PATTERN (i2
))))
1021 /* Count everything except for unconditional jump as insn. */
1022 if (!simplejump_p (i2
) && !returnjump_p (i2
) && last1
)
1024 i2
= PREV_INSN (i2
);
1030 while (!active_insn_p (i1
) && i1
!= bb1
->head
)
1031 i1
= PREV_INSN (i1
);
1033 while (!active_insn_p (i2
) && i2
!= bb2
->head
)
1034 i2
= PREV_INSN (i2
);
1036 if (i1
== bb1
->head
|| i2
== bb2
->head
)
1039 if (!insns_match_p (mode
, i1
, i2
))
1042 /* Don't begin a cross-jump with a USE or CLOBBER insn. */
1043 if (active_insn_p (i1
))
1045 /* If the merged insns have different REG_EQUAL notes, then
1047 rtx equiv1
= find_reg_equal_equiv_note (i1
);
1048 rtx equiv2
= find_reg_equal_equiv_note (i2
);
1050 if (equiv1
&& !equiv2
)
1051 remove_note (i1
, equiv1
);
1052 else if (!equiv1
&& equiv2
)
1053 remove_note (i2
, equiv2
);
1054 else if (equiv1
&& equiv2
1055 && !rtx_equal_p (XEXP (equiv1
, 0), XEXP (equiv2
, 0)))
1057 remove_note (i1
, equiv1
);
1058 remove_note (i2
, equiv2
);
1061 afterlast1
= last1
, afterlast2
= last2
;
1062 last1
= i1
, last2
= i2
;
1066 i1
= PREV_INSN (i1
);
1067 i2
= PREV_INSN (i2
);
1071 /* Don't allow the insn after a compare to be shared by
1072 cross-jumping unless the compare is also shared. */
1073 if (ninsns
&& reg_mentioned_p (cc0_rtx
, last1
) && ! sets_cc0_p (last1
))
1074 last1
= afterlast1
, last2
= afterlast2
, ninsns
--;
1077 /* Include preceding notes and labels in the cross-jump. One,
1078 this may bring us to the head of the blocks as requested above.
1079 Two, it keeps line number notes as matched as may be. */
1082 while (last1
!= bb1
->head
&& !active_insn_p (PREV_INSN (last1
)))
1083 last1
= PREV_INSN (last1
);
1085 if (last1
!= bb1
->head
&& GET_CODE (PREV_INSN (last1
)) == CODE_LABEL
)
1086 last1
= PREV_INSN (last1
);
1088 while (last2
!= bb2
->head
&& !active_insn_p (PREV_INSN (last2
)))
1089 last2
= PREV_INSN (last2
);
1091 if (last2
!= bb2
->head
&& GET_CODE (PREV_INSN (last2
)) == CODE_LABEL
)
1092 last2
= PREV_INSN (last2
);
1101 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1102 the branch instruction. This means that if we commonize the control
1103 flow before end of the basic block, the semantic remains unchanged.
1105 We may assume that there exists one edge with a common destination. */
1108 outgoing_edges_match (mode
, bb1
, bb2
)
1113 int nehedges1
= 0, nehedges2
= 0;
1114 edge fallthru1
= 0, fallthru2
= 0;
1117 /* If BB1 has only one successor, we may be looking at either an
1118 unconditional jump, or a fake edge to exit. */
1119 if (bb1
->succ
&& !bb1
->succ
->succ_next
1120 && !(bb1
->succ
->flags
& (EDGE_COMPLEX
| EDGE_FAKE
)))
1121 return (bb2
->succ
&& !bb2
->succ
->succ_next
1122 && (bb2
->succ
->flags
& (EDGE_COMPLEX
| EDGE_FAKE
)) == 0);
1124 /* Match conditional jumps - this may get tricky when fallthru and branch
1125 edges are crossed. */
1127 && bb1
->succ
->succ_next
1128 && !bb1
->succ
->succ_next
->succ_next
1129 && any_condjump_p (bb1
->end
)
1130 && onlyjump_p (bb1
->end
))
1132 edge b1
, f1
, b2
, f2
;
1133 bool reverse
, match
;
1134 rtx set1
, set2
, cond1
, cond2
;
1135 enum rtx_code code1
, code2
;
1138 || !bb2
->succ
->succ_next
1139 || bb2
->succ
->succ_next
->succ_next
1140 || !any_condjump_p (bb2
->end
)
1141 || !onlyjump_p (bb2
->end
))
1144 /* Do not crossjump across loop boundaries. This is a temporary
1145 workaround for the common scenario in which crossjumping results
1146 in killing the duplicated loop condition, making bb-reorder rotate
1147 the loop incorectly, leaving an extra unconditional jump inside
1150 This check should go away once bb-reorder knows how to duplicate
1151 code in this case or rotate the loops to avoid this scenario. */
1152 if (bb1
->loop_depth
!= bb2
->loop_depth
)
1155 b1
= BRANCH_EDGE (bb1
);
1156 b2
= BRANCH_EDGE (bb2
);
1157 f1
= FALLTHRU_EDGE (bb1
);
1158 f2
= FALLTHRU_EDGE (bb2
);
1160 /* Get around possible forwarders on fallthru edges. Other cases
1161 should be optimized out already. */
1162 if (FORWARDER_BLOCK_P (f1
->dest
))
1163 f1
= f1
->dest
->succ
;
1165 if (FORWARDER_BLOCK_P (f2
->dest
))
1166 f2
= f2
->dest
->succ
;
1168 /* To simplify use of this function, return false if there are
1169 unneeded forwarder blocks. These will get eliminated later
1170 during cleanup_cfg. */
1171 if (FORWARDER_BLOCK_P (f1
->dest
)
1172 || FORWARDER_BLOCK_P (f2
->dest
)
1173 || FORWARDER_BLOCK_P (b1
->dest
)
1174 || FORWARDER_BLOCK_P (b2
->dest
))
1177 if (f1
->dest
== f2
->dest
&& b1
->dest
== b2
->dest
)
1179 else if (f1
->dest
== b2
->dest
&& b1
->dest
== f2
->dest
)
1184 set1
= pc_set (bb1
->end
);
1185 set2
= pc_set (bb2
->end
);
1186 if ((XEXP (SET_SRC (set1
), 1) == pc_rtx
)
1187 != (XEXP (SET_SRC (set2
), 1) == pc_rtx
))
1190 cond1
= XEXP (SET_SRC (set1
), 0);
1191 cond2
= XEXP (SET_SRC (set2
), 0);
1192 code1
= GET_CODE (cond1
);
1194 code2
= reversed_comparison_code (cond2
, bb2
->end
);
1196 code2
= GET_CODE (cond2
);
1198 if (code2
== UNKNOWN
)
1201 /* Verify codes and operands match. */
1202 match
= ((code1
== code2
1203 && rtx_renumbered_equal_p (XEXP (cond1
, 0), XEXP (cond2
, 0))
1204 && rtx_renumbered_equal_p (XEXP (cond1
, 1), XEXP (cond2
, 1)))
1205 || (code1
== swap_condition (code2
)
1206 && rtx_renumbered_equal_p (XEXP (cond1
, 1),
1208 && rtx_renumbered_equal_p (XEXP (cond1
, 0),
1211 /* If we return true, we will join the blocks. Which means that
1212 we will only have one branch prediction bit to work with. Thus
1213 we require the existing branches to have probabilities that are
1217 && maybe_hot_bb_p (bb1
)
1218 && maybe_hot_bb_p (bb2
))
1222 if (b1
->dest
== b2
->dest
)
1223 prob2
= b2
->probability
;
1225 /* Do not use f2 probability as f2 may be forwarded. */
1226 prob2
= REG_BR_PROB_BASE
- b2
->probability
;
1228 /* Fail if the difference in probabilities is greater than 50%.
1229 This rules out two well-predicted branches with opposite
1231 if (abs (b1
->probability
- prob2
) > REG_BR_PROB_BASE
/ 2)
1234 fprintf (rtl_dump_file
,
1235 "Outcomes of branch in bb %i and %i differs to much (%i %i)\n",
1236 bb1
->index
, bb2
->index
, b1
->probability
, prob2
);
1242 if (rtl_dump_file
&& match
)
1243 fprintf (rtl_dump_file
, "Conditionals in bb %i and %i match.\n",
1244 bb1
->index
, bb2
->index
);
1249 /* Generic case - we are seeing a computed jump, table jump or trapping
1252 /* First ensure that the instructions match. There may be many outgoing
1253 edges so this test is generally cheaper.
1254 ??? Currently the tablejumps will never match, as they do have
1255 different tables. */
1256 if (!insns_match_p (mode
, bb1
->end
, bb2
->end
))
1259 /* Search the outgoing edges, ensure that the counts do match, find possible
1260 fallthru and exception handling edges since these needs more
1262 for (e1
= bb1
->succ
, e2
= bb2
->succ
; e1
&& e2
;
1263 e1
= e1
->succ_next
, e2
= e2
->succ_next
)
1265 if (e1
->flags
& EDGE_EH
)
1268 if (e2
->flags
& EDGE_EH
)
1271 if (e1
->flags
& EDGE_FALLTHRU
)
1273 if (e2
->flags
& EDGE_FALLTHRU
)
1277 /* If number of edges of various types does not match, fail. */
1279 || nehedges1
!= nehedges2
1280 || (fallthru1
!= 0) != (fallthru2
!= 0))
1283 /* fallthru edges must be forwarded to the same destination. */
1286 basic_block d1
= (forwarder_block_p (fallthru1
->dest
)
1287 ? fallthru1
->dest
->succ
->dest
: fallthru1
->dest
);
1288 basic_block d2
= (forwarder_block_p (fallthru2
->dest
)
1289 ? fallthru2
->dest
->succ
->dest
: fallthru2
->dest
);
1295 /* In case we do have EH edges, ensure we are in the same region. */
1298 rtx n1
= find_reg_note (bb1
->end
, REG_EH_REGION
, 0);
1299 rtx n2
= find_reg_note (bb2
->end
, REG_EH_REGION
, 0);
1301 if (XEXP (n1
, 0) != XEXP (n2
, 0))
1305 /* We don't need to match the rest of edges as above checks should be enought
1306 to ensure that they are equivalent. */
1310 /* E1 and E2 are edges with the same destination block. Search their
1311 predecessors for common code. If found, redirect control flow from
1312 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */
1315 try_crossjump_to_edge (mode
, e1
, e2
)
1320 basic_block src1
= e1
->src
, src2
= e2
->src
;
1321 basic_block redirect_to
, redirect_from
, to_remove
;
1322 rtx newpos1
, newpos2
;
1325 /* Search backward through forwarder blocks. We don't need to worry
1326 about multiple entry or chained forwarders, as they will be optimized
1327 away. We do this to look past the unconditional jump following a
1328 conditional jump that is required due to the current CFG shape. */
1330 && !src1
->pred
->pred_next
1331 && FORWARDER_BLOCK_P (src1
))
1332 e1
= src1
->pred
, src1
= e1
->src
;
1335 && !src2
->pred
->pred_next
1336 && FORWARDER_BLOCK_P (src2
))
1337 e2
= src2
->pred
, src2
= e2
->src
;
1339 /* Nothing to do if we reach ENTRY, or a common source block. */
1340 if (src1
== ENTRY_BLOCK_PTR
|| src2
== ENTRY_BLOCK_PTR
)
1345 /* Seeing more than 1 forwarder blocks would confuse us later... */
1346 if (FORWARDER_BLOCK_P (e1
->dest
)
1347 && FORWARDER_BLOCK_P (e1
->dest
->succ
->dest
))
1350 if (FORWARDER_BLOCK_P (e2
->dest
)
1351 && FORWARDER_BLOCK_P (e2
->dest
->succ
->dest
))
1354 /* Likewise with dead code (possibly newly created by the other optimizations
1356 if (!src1
->pred
|| !src2
->pred
)
1359 /* Look for the common insn sequence, part the first ... */
1360 if (!outgoing_edges_match (mode
, src1
, src2
))
1363 /* ... and part the second. */
1364 nmatch
= flow_find_cross_jump (mode
, src1
, src2
, &newpos1
, &newpos2
);
1368 /* Avoid splitting if possible. */
1369 if (newpos2
== src2
->head
)
1374 fprintf (rtl_dump_file
, "Splitting bb %i before %i insns\n",
1375 src2
->index
, nmatch
);
1376 redirect_to
= split_block (src2
, PREV_INSN (newpos2
))->dest
;
1380 fprintf (rtl_dump_file
,
1381 "Cross jumping from bb %i to bb %i; %i common insns\n",
1382 src1
->index
, src2
->index
, nmatch
);
1384 redirect_to
->count
+= src1
->count
;
1385 redirect_to
->frequency
+= src1
->frequency
;
1386 /* We may have some registers visible trought the block. */
1387 redirect_to
->flags
|= BB_DIRTY
;
1389 /* Recompute the frequencies and counts of outgoing edges. */
1390 for (s
= redirect_to
->succ
; s
; s
= s
->succ_next
)
1393 basic_block d
= s
->dest
;
1395 if (FORWARDER_BLOCK_P (d
))
1398 for (s2
= src1
->succ
; ; s2
= s2
->succ_next
)
1400 basic_block d2
= s2
->dest
;
1401 if (FORWARDER_BLOCK_P (d2
))
1402 d2
= d2
->succ
->dest
;
1407 s
->count
+= s2
->count
;
1409 /* Take care to update possible forwarder blocks. We verified
1410 that there is no more than one in the chain, so we can't run
1411 into infinite loop. */
1412 if (FORWARDER_BLOCK_P (s
->dest
))
1414 s
->dest
->succ
->count
+= s2
->count
;
1415 s
->dest
->count
+= s2
->count
;
1416 s
->dest
->frequency
+= EDGE_FREQUENCY (s
);
1419 if (FORWARDER_BLOCK_P (s2
->dest
))
1421 s2
->dest
->succ
->count
-= s2
->count
;
1422 if (s2
->dest
->succ
->count
< 0)
1423 s2
->dest
->succ
->count
= 0;
1424 s2
->dest
->count
-= s2
->count
;
1425 s2
->dest
->frequency
-= EDGE_FREQUENCY (s
);
1426 if (s2
->dest
->frequency
< 0)
1427 s2
->dest
->frequency
= 0;
1428 if (s2
->dest
->count
< 0)
1429 s2
->dest
->count
= 0;
1432 if (!redirect_to
->frequency
&& !src1
->frequency
)
1433 s
->probability
= (s
->probability
+ s2
->probability
) / 2;
1436 = ((s
->probability
* redirect_to
->frequency
+
1437 s2
->probability
* src1
->frequency
)
1438 / (redirect_to
->frequency
+ src1
->frequency
));
1441 update_br_prob_note (redirect_to
);
1443 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
1445 /* Skip possible basic block header. */
1446 if (GET_CODE (newpos1
) == CODE_LABEL
)
1447 newpos1
= NEXT_INSN (newpos1
);
1449 if (GET_CODE (newpos1
) == NOTE
)
1450 newpos1
= NEXT_INSN (newpos1
);
1452 redirect_from
= split_block (src1
, PREV_INSN (newpos1
))->src
;
1453 to_remove
= redirect_from
->succ
->dest
;
1455 redirect_edge_and_branch_force (redirect_from
->succ
, redirect_to
);
1456 flow_delete_block (to_remove
);
1458 update_forwarder_flag (redirect_from
);
1463 /* Search the predecessors of BB for common insn sequences. When found,
1464 share code between them by redirecting control flow. Return true if
1465 any changes made. */
1468 try_crossjump_bb (mode
, bb
)
1472 edge e
, e2
, nexte2
, nexte
, fallthru
;
1476 /* Nothing to do if there is not at least two incoming edges. */
1477 if (!bb
->pred
|| !bb
->pred
->pred_next
)
1480 /* It is always cheapest to redirect a block that ends in a branch to
1481 a block that falls through into BB, as that adds no branches to the
1482 program. We'll try that combination first. */
1483 for (fallthru
= bb
->pred
; fallthru
; fallthru
= fallthru
->pred_next
, n
++)
1485 if (fallthru
->flags
& EDGE_FALLTHRU
)
1492 for (e
= bb
->pred
; e
; e
= nexte
)
1494 nexte
= e
->pred_next
;
1496 /* As noted above, first try with the fallthru predecessor. */
1499 /* Don't combine the fallthru edge into anything else.
1500 If there is a match, we'll do it the other way around. */
1504 if (try_crossjump_to_edge (mode
, e
, fallthru
))
1512 /* Non-obvious work limiting check: Recognize that we're going
1513 to call try_crossjump_bb on every basic block. So if we have
1514 two blocks with lots of outgoing edges (a switch) and they
1515 share lots of common destinations, then we would do the
1516 cross-jump check once for each common destination.
1518 Now, if the blocks actually are cross-jump candidates, then
1519 all of their destinations will be shared. Which means that
1520 we only need check them for cross-jump candidacy once. We
1521 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1522 choosing to do the check from the block for which the edge
1523 in question is the first successor of A. */
1524 if (e
->src
->succ
!= e
)
1527 for (e2
= bb
->pred
; e2
; e2
= nexte2
)
1529 nexte2
= e2
->pred_next
;
1534 /* We've already checked the fallthru edge above. */
1538 /* The "first successor" check above only prevents multiple
1539 checks of crossjump(A,B). In order to prevent redundant
1540 checks of crossjump(B,A), require that A be the block
1541 with the lowest index. */
1542 if (e
->src
->index
> e2
->src
->index
)
1545 if (try_crossjump_to_edge (mode
, e
, e2
))
1557 /* Do simple CFG optimizations - basic block merging, simplifying of jump
1558 instructions etc. Return nonzero if changes were made. */
1561 try_optimize_cfg (mode
)
1564 bool changed_overall
= false;
1569 if (mode
& CLEANUP_CROSSJUMP
)
1570 add_noreturn_fake_exit_edges ();
1573 update_forwarder_flag (bb
);
1575 if (mode
& CLEANUP_UPDATE_LIFE
)
1578 if (! (* targetm
.cannot_modify_jumps_p
) ())
1580 /* Attempt to merge blocks as made possible by edge removal. If
1581 a block has only one successor, and the successor has only
1582 one predecessor, they may be combined. */
1589 fprintf (rtl_dump_file
,
1590 "\n\ntry_optimize_cfg iteration %i\n\n",
1593 for (b
= ENTRY_BLOCK_PTR
->next_bb
; b
!= EXIT_BLOCK_PTR
;)
1597 bool changed_here
= false;
1599 /* Delete trivially dead basic blocks. */
1600 while (b
->pred
== NULL
)
1604 fprintf (rtl_dump_file
, "Deleting block %i.\n",
1607 flow_delete_block (b
);
1612 /* Remove code labels no longer used. Don't do this
1613 before CALL_PLACEHOLDER is removed, as some branches
1614 may be hidden within. */
1615 if (b
->pred
->pred_next
== NULL
1616 && (b
->pred
->flags
& EDGE_FALLTHRU
)
1617 && !(b
->pred
->flags
& EDGE_COMPLEX
)
1618 && GET_CODE (b
->head
) == CODE_LABEL
1619 && (!(mode
& CLEANUP_PRE_SIBCALL
)
1620 || !tail_recursion_label_p (b
->head
))
1621 /* If the previous block ends with a branch to this
1622 block, we can't delete the label. Normally this
1623 is a condjump that is yet to be simplified, but
1624 if CASE_DROPS_THRU, this can be a tablejump with
1625 some element going to the same place as the
1626 default (fallthru). */
1627 && (b
->pred
->src
== ENTRY_BLOCK_PTR
1628 || GET_CODE (b
->pred
->src
->end
) != JUMP_INSN
1629 || ! label_is_jump_target_p (b
->head
,
1630 b
->pred
->src
->end
)))
1632 rtx label
= b
->head
;
1634 b
->head
= NEXT_INSN (b
->head
);
1635 delete_insn_chain (label
, label
);
1637 fprintf (rtl_dump_file
, "Deleted label in block %i.\n",
1641 /* If we fall through an empty block, we can remove it. */
1642 if (b
->pred
->pred_next
== NULL
1643 && (b
->pred
->flags
& EDGE_FALLTHRU
)
1644 && GET_CODE (b
->head
) != CODE_LABEL
1645 && FORWARDER_BLOCK_P (b
)
1646 /* Note that forwarder_block_p true ensures that
1647 there is a successor for this block. */
1648 && (b
->succ
->flags
& EDGE_FALLTHRU
)
1649 && n_basic_blocks
> 1)
1652 fprintf (rtl_dump_file
,
1653 "Deleting fallthru block %i.\n",
1656 c
= b
->prev_bb
== ENTRY_BLOCK_PTR
? b
->next_bb
: b
->prev_bb
;
1657 redirect_edge_succ_nodup (b
->pred
, b
->succ
->dest
);
1658 flow_delete_block (b
);
1663 /* Merge blocks. Loop because chains of blocks might be
1665 while ((s
= b
->succ
) != NULL
1666 && s
->succ_next
== NULL
1667 && !(s
->flags
& EDGE_COMPLEX
)
1668 && (c
= s
->dest
) != EXIT_BLOCK_PTR
1669 && c
->pred
->pred_next
== NULL
1671 /* If the jump insn has side effects,
1672 we can't kill the edge. */
1673 && (GET_CODE (b
->end
) != JUMP_INSN
1674 || simplejump_p (b
->end
))
1675 && merge_blocks (s
, b
, c
, mode
))
1676 changed_here
= true;
1678 /* Simplify branch over branch. */
1679 if ((mode
& CLEANUP_EXPENSIVE
) && try_simplify_condjump (b
))
1680 changed_here
= true;
1682 /* If B has a single outgoing edge, but uses a
1683 non-trivial jump instruction without side-effects, we
1684 can either delete the jump entirely, or replace it
1685 with a simple unconditional jump. Use
1686 redirect_edge_and_branch to do the dirty work. */
1688 && ! b
->succ
->succ_next
1689 && b
->succ
->dest
!= EXIT_BLOCK_PTR
1690 && onlyjump_p (b
->end
)
1691 && redirect_edge_and_branch (b
->succ
, b
->succ
->dest
))
1693 update_forwarder_flag (b
);
1694 changed_here
= true;
1697 /* Simplify branch to branch. */
1698 if (try_forward_edges (mode
, b
))
1699 changed_here
= true;
1701 /* Look for shared code between blocks. */
1702 if ((mode
& CLEANUP_CROSSJUMP
)
1703 && try_crossjump_bb (mode
, b
))
1704 changed_here
= true;
1706 /* Don't get confused by the index shift caused by
1714 if ((mode
& CLEANUP_CROSSJUMP
)
1715 && try_crossjump_bb (mode
, EXIT_BLOCK_PTR
))
1718 #ifdef ENABLE_CHECKING
1720 verify_flow_info ();
1723 changed_overall
|= changed
;
1728 if (mode
& CLEANUP_CROSSJUMP
)
1729 remove_fake_edges ();
1731 clear_aux_for_blocks ();
1733 return changed_overall
;
1736 /* Delete all unreachable basic blocks. */
1739 delete_unreachable_blocks ()
1741 bool changed
= false;
1742 basic_block b
, next_bb
;
1744 find_unreachable_blocks ();
1746 /* Delete all unreachable basic blocks. */
1748 for (b
= ENTRY_BLOCK_PTR
->next_bb
; b
!= EXIT_BLOCK_PTR
; b
= next_bb
)
1750 next_bb
= b
->next_bb
;
1752 if (!(b
->flags
& BB_REACHABLE
))
1754 flow_delete_block (b
);
1760 tidy_fallthru_edges ();
1764 /* Tidy the CFG by deleting unreachable code and whatnot. */
1770 bool changed
= false;
1772 timevar_push (TV_CLEANUP_CFG
);
1773 if (delete_unreachable_blocks ())
1776 /* We've possibly created trivially dead code. Cleanup it right
1777 now to introduce more oppurtunities for try_optimize_cfg. */
1778 if (!(mode
& (CLEANUP_NO_INSN_DEL
1779 | CLEANUP_UPDATE_LIFE
| CLEANUP_PRE_SIBCALL
))
1780 && !reload_completed
)
1781 delete_trivially_dead_insns (get_insns(), max_reg_num ());
1786 while (try_optimize_cfg (mode
))
1788 delete_unreachable_blocks (), changed
= true;
1789 if (mode
& CLEANUP_UPDATE_LIFE
)
1791 /* Cleaning up CFG introduces more oppurtunities for dead code
1792 removal that in turn may introduce more oppurtunities for
1793 cleaning up the CFG. */
1794 if (!update_life_info_in_dirty_blocks (UPDATE_LIFE_GLOBAL_RM_NOTES
,
1796 | PROP_SCAN_DEAD_CODE
1797 | PROP_KILL_DEAD_CODE
1801 else if (!(mode
& (CLEANUP_NO_INSN_DEL
| CLEANUP_PRE_SIBCALL
))
1802 && !reload_completed
)
1804 if (!delete_trivially_dead_insns (get_insns(), max_reg_num ()))
1809 delete_dead_jumptables ();
1812 /* Kill the data we won't maintain. */
1813 free_EXPR_LIST_list (&label_value_list
);
1814 timevar_pop (TV_CLEANUP_CFG
);