1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
22 /* This file contains optimizer of the control flow. The main entrypoint is
23 cleanup_cfg. Following optimizations are performed:
25 - Unreachable blocks removal
26 - Edge forwarding (edge to the forwarder block is forwarded to it's
27 successor. Simplification of the branch instruction is performed by
28 underlying infrastructure so branch can be converted to simplejump or
30 - Cross jumping (tail merging)
31 - Conditional jump-around-simplejump simplification
32 - Basic block merging. */
37 #include "hard-reg-set.h"
38 #include "basic-block.h"
41 #include "insn-config.h"
51 /* cleanup_cfg maintains following flags for each basic block. */
55 /* Set if BB is the forwarder block to avoid too many
56 forwarder_block_p calls. */
57 BB_FORWARDER_BLOCK
= 1,
58 BB_NONTHREADABLE_BLOCK
= 2
61 #define BB_FLAGS(BB) (enum bb_flags) (BB)->aux
62 #define BB_SET_FLAG(BB, FLAG) \
63 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux | (FLAG))
64 #define BB_CLEAR_FLAG(BB, FLAG) \
65 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux & ~(FLAG))
67 #define FORWARDER_BLOCK_P(BB) (BB_FLAGS (BB) & BB_FORWARDER_BLOCK)
69 static bool try_crossjump_to_edge
PARAMS ((int, edge
, edge
));
70 static bool try_crossjump_bb
PARAMS ((int, basic_block
));
71 static bool outgoing_edges_match
PARAMS ((int,
72 basic_block
, basic_block
));
73 static int flow_find_cross_jump
PARAMS ((int, basic_block
, basic_block
,
75 static bool insns_match_p
PARAMS ((int, rtx
, rtx
));
77 static bool delete_unreachable_blocks
PARAMS ((void));
78 static bool label_is_jump_target_p
PARAMS ((rtx
, rtx
));
79 static bool tail_recursion_label_p
PARAMS ((rtx
));
80 static void merge_blocks_move_predecessor_nojumps
PARAMS ((basic_block
,
82 static void merge_blocks_move_successor_nojumps
PARAMS ((basic_block
,
84 static bool merge_blocks
PARAMS ((edge
,basic_block
,basic_block
,
86 static bool try_optimize_cfg
PARAMS ((int));
87 static bool try_simplify_condjump
PARAMS ((basic_block
));
88 static bool try_forward_edges
PARAMS ((int, basic_block
));
89 static edge thread_jump
PARAMS ((int, edge
, basic_block
));
90 static bool mark_effect
PARAMS ((rtx
, bitmap
));
91 static void notice_new_block
PARAMS ((basic_block
));
92 static void update_forwarder_flag
PARAMS ((basic_block
));
93 static int mentions_nonequal_regs
PARAMS ((rtx
*, void *));
95 /* Set flags for newly created block. */
104 if (forwarder_block_p (bb
))
105 BB_SET_FLAG (bb
, BB_FORWARDER_BLOCK
);
108 /* Recompute forwarder flag after block has been modified. */
111 update_forwarder_flag (bb
)
114 if (forwarder_block_p (bb
))
115 BB_SET_FLAG (bb
, BB_FORWARDER_BLOCK
);
117 BB_CLEAR_FLAG (bb
, BB_FORWARDER_BLOCK
);
120 /* Simplify a conditional jump around an unconditional jump.
121 Return true if something changed. */
124 try_simplify_condjump (cbranch_block
)
125 basic_block cbranch_block
;
127 basic_block jump_block
, jump_dest_block
, cbranch_dest_block
;
128 edge cbranch_jump_edge
, cbranch_fallthru_edge
;
131 /* Verify that there are exactly two successors. */
132 if (!cbranch_block
->succ
133 || !cbranch_block
->succ
->succ_next
134 || cbranch_block
->succ
->succ_next
->succ_next
)
137 /* Verify that we've got a normal conditional branch at the end
139 cbranch_insn
= cbranch_block
->end
;
140 if (!any_condjump_p (cbranch_insn
))
143 cbranch_fallthru_edge
= FALLTHRU_EDGE (cbranch_block
);
144 cbranch_jump_edge
= BRANCH_EDGE (cbranch_block
);
146 /* The next block must not have multiple predecessors, must not
147 be the last block in the function, and must contain just the
148 unconditional jump. */
149 jump_block
= cbranch_fallthru_edge
->dest
;
150 if (jump_block
->pred
->pred_next
151 || jump_block
->index
== n_basic_blocks
- 1
152 || !FORWARDER_BLOCK_P (jump_block
))
154 jump_dest_block
= jump_block
->succ
->dest
;
156 /* The conditional branch must target the block after the
157 unconditional branch. */
158 cbranch_dest_block
= cbranch_jump_edge
->dest
;
160 if (!can_fallthru (jump_block
, cbranch_dest_block
))
163 /* Invert the conditional branch. */
164 if (!invert_jump (cbranch_insn
, block_label (jump_dest_block
), 0))
168 fprintf (rtl_dump_file
, "Simplifying condjump %i around jump %i\n",
169 INSN_UID (cbranch_insn
), INSN_UID (jump_block
->end
));
171 /* Success. Update the CFG to match. Note that after this point
172 the edge variable names appear backwards; the redirection is done
173 this way to preserve edge profile data. */
174 cbranch_jump_edge
= redirect_edge_succ_nodup (cbranch_jump_edge
,
176 cbranch_fallthru_edge
= redirect_edge_succ_nodup (cbranch_fallthru_edge
,
178 cbranch_jump_edge
->flags
|= EDGE_FALLTHRU
;
179 cbranch_fallthru_edge
->flags
&= ~EDGE_FALLTHRU
;
180 update_br_prob_note (cbranch_block
);
182 /* Delete the block with the unconditional jump, and clean up the mess. */
183 flow_delete_block (jump_block
);
184 tidy_fallthru_edge (cbranch_jump_edge
, cbranch_block
, cbranch_dest_block
);
189 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
190 on register. Used by jump threading. */
193 mark_effect (exp
, nonequal
)
199 switch (GET_CODE (exp
))
201 /* In case we do clobber the register, mark it as equal, as we know the
202 value is dead so it don't have to match. */
204 if (REG_P (XEXP (exp
, 0)))
206 dest
= XEXP (exp
, 0);
207 regno
= REGNO (dest
);
208 CLEAR_REGNO_REG_SET (nonequal
, regno
);
209 if (regno
< FIRST_PSEUDO_REGISTER
)
211 int n
= HARD_REGNO_NREGS (regno
, GET_MODE (dest
));
213 CLEAR_REGNO_REG_SET (nonequal
, regno
+ n
);
219 if (rtx_equal_for_cselib_p (SET_DEST (exp
), SET_SRC (exp
)))
221 dest
= SET_DEST (exp
);
226 regno
= REGNO (dest
);
227 SET_REGNO_REG_SET (nonequal
, regno
);
228 if (regno
< FIRST_PSEUDO_REGISTER
)
230 int n
= HARD_REGNO_NREGS (regno
, GET_MODE (dest
));
232 SET_REGNO_REG_SET (nonequal
, regno
+ n
);
241 /* Return nonzero if X is an register set in regset DATA.
242 Called via for_each_rtx. */
244 mentions_nonequal_regs (x
, data
)
248 regset nonequal
= (regset
) data
;
254 if (REGNO_REG_SET_P (nonequal
, regno
))
256 if (regno
< FIRST_PSEUDO_REGISTER
)
258 int n
= HARD_REGNO_NREGS (regno
, GET_MODE (*x
));
260 if (REGNO_REG_SET_P (nonequal
, regno
+ n
))
266 /* Attempt to prove that the basic block B will have no side effects and
267 allways continues in the same edge if reached via E. Return the edge
268 if exist, NULL otherwise. */
271 thread_jump (mode
, e
, b
)
276 rtx set1
, set2
, cond1
, cond2
, insn
;
277 enum rtx_code code1
, code2
, reversed_code2
;
278 bool reverse1
= false;
283 if (BB_FLAGS (b
) & BB_NONTHREADABLE_BLOCK
)
286 /* At the moment, we do handle only conditional jumps, but later we may
287 want to extend this code to tablejumps and others. */
288 if (!e
->src
->succ
->succ_next
|| e
->src
->succ
->succ_next
->succ_next
)
290 if (!b
->succ
|| !b
->succ
->succ_next
|| b
->succ
->succ_next
->succ_next
)
292 BB_SET_FLAG (b
, BB_NONTHREADABLE_BLOCK
);
296 /* Second branch must end with onlyjump, as we will eliminate the jump. */
297 if (!any_condjump_p (e
->src
->end
))
300 if (!any_condjump_p (b
->end
) || !onlyjump_p (b
->end
))
302 BB_SET_FLAG (b
, BB_NONTHREADABLE_BLOCK
);
306 set1
= pc_set (e
->src
->end
);
307 set2
= pc_set (b
->end
);
308 if (((e
->flags
& EDGE_FALLTHRU
) != 0)
309 != (XEXP (SET_SRC (set1
), 1) == pc_rtx
))
312 cond1
= XEXP (SET_SRC (set1
), 0);
313 cond2
= XEXP (SET_SRC (set2
), 0);
315 code1
= reversed_comparison_code (cond1
, e
->src
->end
);
317 code1
= GET_CODE (cond1
);
319 code2
= GET_CODE (cond2
);
320 reversed_code2
= reversed_comparison_code (cond2
, b
->end
);
322 if (!comparison_dominates_p (code1
, code2
)
323 && !comparison_dominates_p (code1
, reversed_code2
))
326 /* Ensure that the comparison operators are equivalent.
327 ??? This is far too pesimistic. We should allow swapped operands,
328 different CCmodes, or for example comparisons for interval, that
329 dominate even when operands are not equivalent. */
330 if (!rtx_equal_p (XEXP (cond1
, 0), XEXP (cond2
, 0))
331 || !rtx_equal_p (XEXP (cond1
, 1), XEXP (cond2
, 1)))
334 /* Short circuit cases where block B contains some side effects, as we can't
336 for (insn
= NEXT_INSN (b
->head
); insn
!= NEXT_INSN (b
->end
);
337 insn
= NEXT_INSN (insn
))
338 if (INSN_P (insn
) && side_effects_p (PATTERN (insn
)))
340 BB_SET_FLAG (b
, BB_NONTHREADABLE_BLOCK
);
346 /* First process all values computed in the source basic block. */
347 for (insn
= NEXT_INSN (e
->src
->head
); insn
!= NEXT_INSN (e
->src
->end
);
348 insn
= NEXT_INSN (insn
))
350 cselib_process_insn (insn
);
352 nonequal
= BITMAP_XMALLOC();
353 CLEAR_REG_SET (nonequal
);
355 /* Now assume that we've continued by the edge E to B and continue
356 processing as if it were same basic block.
357 Our goal is to prove that whole block is an NOOP. */
359 for (insn
= NEXT_INSN (b
->head
); insn
!= NEXT_INSN (b
->end
) && !failed
;
360 insn
= NEXT_INSN (insn
))
364 rtx pat
= PATTERN (insn
);
366 if (GET_CODE (pat
) == PARALLEL
)
368 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
369 failed
|= mark_effect (XVECEXP (pat
, 0, i
), nonequal
);
372 failed
|= mark_effect (pat
, nonequal
);
375 cselib_process_insn (insn
);
378 /* Later we should clear nonequal of dead registers. So far we don't
379 have life information in cfg_cleanup. */
382 BB_SET_FLAG (b
, BB_NONTHREADABLE_BLOCK
);
386 /* cond2 must not mention any register that is not equal to the
388 if (for_each_rtx (&cond2
, mentions_nonequal_regs
, nonequal
))
391 /* In case liveness information is available, we need to prove equivalence
392 only of the live values. */
393 if (mode
& CLEANUP_UPDATE_LIFE
)
394 AND_REG_SET (nonequal
, b
->global_live_at_end
);
396 EXECUTE_IF_SET_IN_REG_SET (nonequal
, 0, i
, goto failed_exit
;);
398 BITMAP_XFREE (nonequal
);
400 if ((comparison_dominates_p (code1
, code2
) != 0)
401 != (XEXP (SET_SRC (set2
), 1) == pc_rtx
))
402 return BRANCH_EDGE (b
);
404 return FALLTHRU_EDGE (b
);
407 BITMAP_XFREE (nonequal
);
412 /* Attempt to forward edges leaving basic block B.
413 Return true if successful. */
416 try_forward_edges (mode
, b
)
420 bool changed
= false;
421 edge e
, next
, *threaded_edges
= NULL
;
423 for (e
= b
->succ
; e
; e
= next
)
425 basic_block target
, first
;
427 bool threaded
= false;
428 int nthreaded_edges
= 0;
432 /* Skip complex edges because we don't know how to update them.
434 Still handle fallthru edges, as we can succeed to forward fallthru
435 edge to the same place as the branch edge of conditional branch
436 and turn conditional branch to an unconditional branch. */
437 if (e
->flags
& EDGE_COMPLEX
)
440 target
= first
= e
->dest
;
443 while (counter
< n_basic_blocks
)
445 basic_block new_target
= NULL
;
446 bool new_target_threaded
= false;
448 if (FORWARDER_BLOCK_P (target
)
449 && target
->succ
->dest
!= EXIT_BLOCK_PTR
)
451 /* Bypass trivial infinite loops. */
452 if (target
== target
->succ
->dest
)
453 counter
= n_basic_blocks
;
454 new_target
= target
->succ
->dest
;
457 /* Allow to thread only over one edge at time to simplify updating
459 else if (mode
& CLEANUP_THREADING
)
461 edge t
= thread_jump (mode
, e
, target
);
465 threaded_edges
= xmalloc (sizeof (*threaded_edges
)
471 /* Detect an infinite loop across blocks not
472 including the start block. */
473 for (i
= 0; i
< nthreaded_edges
; ++i
)
474 if (threaded_edges
[i
] == t
)
476 if (i
< nthreaded_edges
)
478 counter
= n_basic_blocks
;
483 /* Detect an infinite loop across the start block. */
487 if (nthreaded_edges
>= n_basic_blocks
)
489 threaded_edges
[nthreaded_edges
++] = t
;
491 new_target
= t
->dest
;
492 new_target_threaded
= true;
499 /* Avoid killing of loop pre-headers, as it is the place loop
500 optimizer wants to hoist code to.
502 For fallthru forwarders, the LOOP_BEG note must appear between
503 the header of block and CODE_LABEL of the loop, for non forwarders
504 it must appear before the JUMP_INSN. */
505 if (mode
& CLEANUP_PRE_LOOP
)
507 rtx insn
= (target
->succ
->flags
& EDGE_FALLTHRU
508 ? target
->head
: prev_nonnote_insn (target
->end
));
510 if (GET_CODE (insn
) != NOTE
)
511 insn
= NEXT_INSN (insn
);
513 for (; insn
&& GET_CODE (insn
) != CODE_LABEL
&& !INSN_P (insn
);
514 insn
= NEXT_INSN (insn
))
515 if (GET_CODE (insn
) == NOTE
516 && NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_BEG
)
519 if (GET_CODE (insn
) == NOTE
)
525 threaded
|= new_target_threaded
;
528 if (counter
>= n_basic_blocks
)
531 fprintf (rtl_dump_file
, "Infinite loop in BB %i.\n",
534 else if (target
== first
)
535 ; /* We didn't do anything. */
538 /* Save the values now, as the edge may get removed. */
539 gcov_type edge_count
= e
->count
;
540 int edge_probability
= e
->probability
;
544 /* Don't force if target is exit block. */
545 if (threaded
&& target
!= EXIT_BLOCK_PTR
)
547 notice_new_block (redirect_edge_and_branch_force (e
, target
));
549 fprintf (rtl_dump_file
, "Conditionals threaded.\n");
551 else if (!redirect_edge_and_branch (e
, target
))
554 fprintf (rtl_dump_file
,
555 "Forwarding edge %i->%i to %i failed.\n",
556 b
->index
, e
->dest
->index
, target
->index
);
560 /* We successfully forwarded the edge. Now update profile
561 data: for each edge we traversed in the chain, remove
562 the original edge's execution count. */
563 edge_frequency
= ((edge_probability
* b
->frequency
564 + REG_BR_PROB_BASE
/ 2)
567 if (!FORWARDER_BLOCK_P (b
) && forwarder_block_p (b
))
568 BB_SET_FLAG (b
, BB_FORWARDER_BLOCK
);
574 first
->count
-= edge_count
;
575 if (first
->count
< 0)
577 first
->frequency
-= edge_frequency
;
578 if (first
->frequency
< 0)
579 first
->frequency
= 0;
580 if (first
->succ
->succ_next
)
584 if (n
>= nthreaded_edges
)
586 t
= threaded_edges
[n
++];
589 if (first
->frequency
)
590 prob
= edge_frequency
* REG_BR_PROB_BASE
/ first
->frequency
;
593 if (prob
> t
->probability
)
594 prob
= t
->probability
;
595 t
->probability
-= prob
;
596 prob
= REG_BR_PROB_BASE
- prob
;
599 first
->succ
->probability
= REG_BR_PROB_BASE
;
600 first
->succ
->succ_next
->probability
= 0;
603 for (e
= first
->succ
; e
; e
= e
->succ_next
)
604 e
->probability
= ((e
->probability
* REG_BR_PROB_BASE
)
606 update_br_prob_note (first
);
610 /* It is possible that as the result of
611 threading we've removed edge as it is
612 threaded to the fallthru edge. Avoid
613 getting out of sync. */
614 if (n
< nthreaded_edges
615 && first
== threaded_edges
[n
]->src
)
620 t
->count
-= edge_count
;
625 while (first
!= target
);
632 free (threaded_edges
);
636 /* Return true if LABEL is a target of JUMP_INSN. This applies only
637 to non-complex jumps. That is, direct unconditional, conditional,
638 and tablejumps, but not computed jumps or returns. It also does
639 not apply to the fallthru case of a conditional jump. */
642 label_is_jump_target_p (label
, jump_insn
)
643 rtx label
, jump_insn
;
645 rtx tmp
= JUMP_LABEL (jump_insn
);
651 && (tmp
= NEXT_INSN (tmp
)) != NULL_RTX
652 && GET_CODE (tmp
) == JUMP_INSN
653 && (tmp
= PATTERN (tmp
),
654 GET_CODE (tmp
) == ADDR_VEC
655 || GET_CODE (tmp
) == ADDR_DIFF_VEC
))
657 rtvec vec
= XVEC (tmp
, GET_CODE (tmp
) == ADDR_DIFF_VEC
);
658 int i
, veclen
= GET_NUM_ELEM (vec
);
660 for (i
= 0; i
< veclen
; ++i
)
661 if (XEXP (RTVEC_ELT (vec
, i
), 0) == label
)
668 /* Return true if LABEL is used for tail recursion. */
671 tail_recursion_label_p (label
)
676 for (x
= tail_recursion_label_list
; x
; x
= XEXP (x
, 1))
677 if (label
== XEXP (x
, 0))
683 /* Blocks A and B are to be merged into a single block. A has no incoming
684 fallthru edge, so it can be moved before B without adding or modifying
685 any jumps (aside from the jump from A to B). */
688 merge_blocks_move_predecessor_nojumps (a
, b
)
694 barrier
= next_nonnote_insn (a
->end
);
695 if (GET_CODE (barrier
) != BARRIER
)
697 delete_insn (barrier
);
699 /* Move block and loop notes out of the chain so that we do not
702 ??? A better solution would be to squeeze out all the non-nested notes
703 and adjust the block trees appropriately. Even better would be to have
704 a tighter connection between block trees and rtl so that this is not
706 if (squeeze_notes (&a
->head
, &a
->end
))
709 /* Scramble the insn chain. */
710 if (a
->end
!= PREV_INSN (b
->head
))
711 reorder_insns_nobb (a
->head
, a
->end
, PREV_INSN (b
->head
));
712 a
->flags
|= BB_DIRTY
;
715 fprintf (rtl_dump_file
, "Moved block %d before %d and merged.\n",
718 /* Swap the records for the two blocks around. Although we are deleting B,
719 A is now where B was and we want to compact the BB array from where
721 BASIC_BLOCK (a
->index
) = b
;
722 BASIC_BLOCK (b
->index
) = a
;
727 /* Now blocks A and B are contiguous. Merge them. */
728 merge_blocks_nomove (a
, b
);
731 /* Blocks A and B are to be merged into a single block. B has no outgoing
732 fallthru edge, so it can be moved after A without adding or modifying
733 any jumps (aside from the jump from A to B). */
736 merge_blocks_move_successor_nojumps (a
, b
)
739 rtx barrier
, real_b_end
;
742 barrier
= NEXT_INSN (b
->end
);
744 /* Recognize a jump table following block B. */
746 && GET_CODE (barrier
) == CODE_LABEL
747 && NEXT_INSN (barrier
)
748 && GET_CODE (NEXT_INSN (barrier
)) == JUMP_INSN
749 && (GET_CODE (PATTERN (NEXT_INSN (barrier
))) == ADDR_VEC
750 || GET_CODE (PATTERN (NEXT_INSN (barrier
))) == ADDR_DIFF_VEC
))
752 /* Temporarily add the table jump insn to b, so that it will also
753 be moved to the correct location. */
754 b
->end
= NEXT_INSN (barrier
);
755 barrier
= NEXT_INSN (b
->end
);
758 /* There had better have been a barrier there. Delete it. */
759 if (barrier
&& GET_CODE (barrier
) == BARRIER
)
760 delete_insn (barrier
);
762 /* Move block and loop notes out of the chain so that we do not
765 ??? A better solution would be to squeeze out all the non-nested notes
766 and adjust the block trees appropriately. Even better would be to have
767 a tighter connection between block trees and rtl so that this is not
769 if (squeeze_notes (&b
->head
, &b
->end
))
772 /* Scramble the insn chain. */
773 reorder_insns_nobb (b
->head
, b
->end
, a
->end
);
775 /* Restore the real end of b. */
779 fprintf (rtl_dump_file
, "Moved block %d after %d and merged.\n",
782 /* Now blocks A and B are contiguous. Merge them. */
783 merge_blocks_nomove (a
, b
);
786 /* Attempt to merge basic blocks that are potentially non-adjacent.
787 Return true iff the attempt succeeded. */
790 merge_blocks (e
, b
, c
, mode
)
795 /* If C has a tail recursion label, do not merge. There is no
796 edge recorded from the call_placeholder back to this label, as
797 that would make optimize_sibling_and_tail_recursive_calls more
798 complex for no gain. */
799 if ((mode
& CLEANUP_PRE_SIBCALL
)
800 && GET_CODE (c
->head
) == CODE_LABEL
801 && tail_recursion_label_p (c
->head
))
804 /* If B has a fallthru edge to C, no need to move anything. */
805 if (e
->flags
& EDGE_FALLTHRU
)
807 int b_index
= b
->index
, c_index
= c
->index
;
808 merge_blocks_nomove (b
, c
);
809 update_forwarder_flag (b
);
812 fprintf (rtl_dump_file
, "Merged %d and %d without moving.\n",
818 /* Otherwise we will need to move code around. Do that only if expensive
819 transformations are allowed. */
820 else if (mode
& CLEANUP_EXPENSIVE
)
822 edge tmp_edge
, b_fallthru_edge
;
823 bool c_has_outgoing_fallthru
;
824 bool b_has_incoming_fallthru
;
826 /* Avoid overactive code motion, as the forwarder blocks should be
827 eliminated by edge redirection instead. One exception might have
828 been if B is a forwarder block and C has no fallthru edge, but
829 that should be cleaned up by bb-reorder instead. */
830 if (FORWARDER_BLOCK_P (b
) || FORWARDER_BLOCK_P (c
))
833 /* We must make sure to not munge nesting of lexical blocks,
834 and loop notes. This is done by squeezing out all the notes
835 and leaving them there to lie. Not ideal, but functional. */
837 for (tmp_edge
= c
->succ
; tmp_edge
; tmp_edge
= tmp_edge
->succ_next
)
838 if (tmp_edge
->flags
& EDGE_FALLTHRU
)
841 c_has_outgoing_fallthru
= (tmp_edge
!= NULL
);
843 for (tmp_edge
= b
->pred
; tmp_edge
; tmp_edge
= tmp_edge
->pred_next
)
844 if (tmp_edge
->flags
& EDGE_FALLTHRU
)
847 b_has_incoming_fallthru
= (tmp_edge
!= NULL
);
848 b_fallthru_edge
= tmp_edge
;
850 /* Otherwise, we're going to try to move C after B. If C does
851 not have an outgoing fallthru, then it can be moved
852 immediately after B without introducing or modifying jumps. */
853 if (! c_has_outgoing_fallthru
)
855 merge_blocks_move_successor_nojumps (b
, c
);
859 /* If B does not have an incoming fallthru, then it can be moved
860 immediately before C without introducing or modifying jumps.
861 C cannot be the first block, so we do not have to worry about
862 accessing a non-existent block. */
864 if (b_has_incoming_fallthru
)
868 if (b_fallthru_edge
->src
== ENTRY_BLOCK_PTR
)
870 bb
= force_nonfallthru (b_fallthru_edge
);
872 notice_new_block (bb
);
875 merge_blocks_move_predecessor_nojumps (b
, c
);
883 /* Return true if I1 and I2 are equivalent and thus can be crossjumped. */
886 insns_match_p (mode
, i1
, i2
)
887 int mode ATTRIBUTE_UNUSED
;
892 /* Verify that I1 and I2 are equivalent. */
893 if (GET_CODE (i1
) != GET_CODE (i2
))
899 if (GET_CODE (p1
) != GET_CODE (p2
))
902 /* If this is a CALL_INSN, compare register usage information.
903 If we don't check this on stack register machines, the two
904 CALL_INSNs might be merged leaving reg-stack.c with mismatching
905 numbers of stack registers in the same basic block.
906 If we don't check this on machines with delay slots, a delay slot may
907 be filled that clobbers a parameter expected by the subroutine.
909 ??? We take the simple route for now and assume that if they're
910 equal, they were constructed identically. */
912 if (GET_CODE (i1
) == CALL_INSN
913 && !rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1
),
914 CALL_INSN_FUNCTION_USAGE (i2
)))
918 /* If cross_jump_death_matters is not 0, the insn's mode
919 indicates whether or not the insn contains any stack-like
922 if ((mode
& CLEANUP_POST_REGSTACK
) && stack_regs_mentioned (i1
))
924 /* If register stack conversion has already been done, then
925 death notes must also be compared before it is certain that
926 the two instruction streams match. */
929 HARD_REG_SET i1_regset
, i2_regset
;
931 CLEAR_HARD_REG_SET (i1_regset
);
932 CLEAR_HARD_REG_SET (i2_regset
);
934 for (note
= REG_NOTES (i1
); note
; note
= XEXP (note
, 1))
935 if (REG_NOTE_KIND (note
) == REG_DEAD
&& STACK_REG_P (XEXP (note
, 0)))
936 SET_HARD_REG_BIT (i1_regset
, REGNO (XEXP (note
, 0)));
938 for (note
= REG_NOTES (i2
); note
; note
= XEXP (note
, 1))
939 if (REG_NOTE_KIND (note
) == REG_DEAD
&& STACK_REG_P (XEXP (note
, 0)))
940 SET_HARD_REG_BIT (i2_regset
, REGNO (XEXP (note
, 0)));
942 GO_IF_HARD_REG_EQUAL (i1_regset
, i2_regset
, done
);
952 ? ! rtx_renumbered_equal_p (p1
, p2
) : ! rtx_equal_p (p1
, p2
))
954 /* The following code helps take care of G++ cleanups. */
955 rtx equiv1
= find_reg_equal_equiv_note (i1
);
956 rtx equiv2
= find_reg_equal_equiv_note (i2
);
959 /* If the equivalences are not to a constant, they may
960 reference pseudos that no longer exist, so we can't
962 && (! reload_completed
963 || (CONSTANT_P (XEXP (equiv1
, 0))
964 && rtx_equal_p (XEXP (equiv1
, 0), XEXP (equiv2
, 0)))))
966 rtx s1
= single_set (i1
);
967 rtx s2
= single_set (i2
);
968 if (s1
!= 0 && s2
!= 0
969 && rtx_renumbered_equal_p (SET_DEST (s1
), SET_DEST (s2
)))
971 validate_change (i1
, &SET_SRC (s1
), XEXP (equiv1
, 0), 1);
972 validate_change (i2
, &SET_SRC (s2
), XEXP (equiv2
, 0), 1);
973 if (! rtx_renumbered_equal_p (p1
, p2
))
975 else if (apply_change_group ())
986 /* Look through the insns at the end of BB1 and BB2 and find the longest
987 sequence that are equivalent. Store the first insns for that sequence
988 in *F1 and *F2 and return the sequence length.
990 To simplify callers of this function, if the blocks match exactly,
991 store the head of the blocks in *F1 and *F2. */
994 flow_find_cross_jump (mode
, bb1
, bb2
, f1
, f2
)
995 int mode ATTRIBUTE_UNUSED
;
996 basic_block bb1
, bb2
;
999 rtx i1
, i2
, last1
, last2
, afterlast1
, afterlast2
;
1002 /* Skip simple jumps at the end of the blocks. Complex jumps still
1003 need to be compared for equivalence, which we'll do below. */
1006 last1
= afterlast1
= last2
= afterlast2
= NULL_RTX
;
1008 || (returnjump_p (i1
) && !side_effects_p (PATTERN (i1
))))
1011 i1
= PREV_INSN (i1
);
1016 || (returnjump_p (i2
) && !side_effects_p (PATTERN (i2
))))
1019 /* Count everything except for unconditional jump as insn. */
1020 if (!simplejump_p (i2
) && !returnjump_p (i2
) && last1
)
1022 i2
= PREV_INSN (i2
);
1028 while (!active_insn_p (i1
) && i1
!= bb1
->head
)
1029 i1
= PREV_INSN (i1
);
1031 while (!active_insn_p (i2
) && i2
!= bb2
->head
)
1032 i2
= PREV_INSN (i2
);
1034 if (i1
== bb1
->head
|| i2
== bb2
->head
)
1037 if (!insns_match_p (mode
, i1
, i2
))
1040 /* Don't begin a cross-jump with a USE or CLOBBER insn. */
1041 if (active_insn_p (i1
))
1043 /* If the merged insns have different REG_EQUAL notes, then
1045 rtx equiv1
= find_reg_equal_equiv_note (i1
);
1046 rtx equiv2
= find_reg_equal_equiv_note (i2
);
1048 if (equiv1
&& !equiv2
)
1049 remove_note (i1
, equiv1
);
1050 else if (!equiv1
&& equiv2
)
1051 remove_note (i2
, equiv2
);
1052 else if (equiv1
&& equiv2
1053 && !rtx_equal_p (XEXP (equiv1
, 0), XEXP (equiv2
, 0)))
1055 remove_note (i1
, equiv1
);
1056 remove_note (i2
, equiv2
);
1059 afterlast1
= last1
, afterlast2
= last2
;
1060 last1
= i1
, last2
= i2
;
1064 i1
= PREV_INSN (i1
);
1065 i2
= PREV_INSN (i2
);
1069 /* Don't allow the insn after a compare to be shared by
1070 cross-jumping unless the compare is also shared. */
1071 if (ninsns
&& reg_mentioned_p (cc0_rtx
, last1
) && ! sets_cc0_p (last1
))
1072 last1
= afterlast1
, last2
= afterlast2
, ninsns
--;
1075 /* Include preceding notes and labels in the cross-jump. One,
1076 this may bring us to the head of the blocks as requested above.
1077 Two, it keeps line number notes as matched as may be. */
1080 while (last1
!= bb1
->head
&& !active_insn_p (PREV_INSN (last1
)))
1081 last1
= PREV_INSN (last1
);
1083 if (last1
!= bb1
->head
&& GET_CODE (PREV_INSN (last1
)) == CODE_LABEL
)
1084 last1
= PREV_INSN (last1
);
1086 while (last2
!= bb2
->head
&& !active_insn_p (PREV_INSN (last2
)))
1087 last2
= PREV_INSN (last2
);
1089 if (last2
!= bb2
->head
&& GET_CODE (PREV_INSN (last2
)) == CODE_LABEL
)
1090 last2
= PREV_INSN (last2
);
1099 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1100 the branch instruction. This means that if we commonize the control
1101 flow before end of the basic block, the semantic remains unchanged.
1103 We may assume that there exists one edge with a common destination. */
1106 outgoing_edges_match (mode
, bb1
, bb2
)
1111 int nehedges1
= 0, nehedges2
= 0;
1112 edge fallthru1
= 0, fallthru2
= 0;
1115 /* If BB1 has only one successor, we may be looking at either an
1116 unconditional jump, or a fake edge to exit. */
1117 if (bb1
->succ
&& !bb1
->succ
->succ_next
1118 && !(bb1
->succ
->flags
& (EDGE_COMPLEX
| EDGE_FAKE
)))
1119 return (bb2
->succ
&& !bb2
->succ
->succ_next
1120 && (bb2
->succ
->flags
& (EDGE_COMPLEX
| EDGE_FAKE
)) == 0);
1122 /* Match conditional jumps - this may get tricky when fallthru and branch
1123 edges are crossed. */
1125 && bb1
->succ
->succ_next
1126 && !bb1
->succ
->succ_next
->succ_next
1127 && any_condjump_p (bb1
->end
)
1128 && onlyjump_p (bb1
->end
))
1130 edge b1
, f1
, b2
, f2
;
1131 bool reverse
, match
;
1132 rtx set1
, set2
, cond1
, cond2
;
1133 enum rtx_code code1
, code2
;
1136 || !bb2
->succ
->succ_next
1137 || bb2
->succ
->succ_next
->succ_next
1138 || !any_condjump_p (bb2
->end
)
1139 || !onlyjump_p (bb2
->end
))
1142 /* Do not crossjump across loop boundaries. This is a temporary
1143 workaround for the common scenario in which crossjumping results
1144 in killing the duplicated loop condition, making bb-reorder rotate
1145 the loop incorectly, leaving an extra unconditional jump inside
1148 This check should go away once bb-reorder knows how to duplicate
1149 code in this case or rotate the loops to avoid this scenario. */
1150 if (bb1
->loop_depth
!= bb2
->loop_depth
)
1153 b1
= BRANCH_EDGE (bb1
);
1154 b2
= BRANCH_EDGE (bb2
);
1155 f1
= FALLTHRU_EDGE (bb1
);
1156 f2
= FALLTHRU_EDGE (bb2
);
1158 /* Get around possible forwarders on fallthru edges. Other cases
1159 should be optimized out already. */
1160 if (FORWARDER_BLOCK_P (f1
->dest
))
1161 f1
= f1
->dest
->succ
;
1163 if (FORWARDER_BLOCK_P (f2
->dest
))
1164 f2
= f2
->dest
->succ
;
1166 /* To simplify use of this function, return false if there are
1167 unneeded forwarder blocks. These will get eliminated later
1168 during cleanup_cfg. */
1169 if (FORWARDER_BLOCK_P (f1
->dest
)
1170 || FORWARDER_BLOCK_P (f2
->dest
)
1171 || FORWARDER_BLOCK_P (b1
->dest
)
1172 || FORWARDER_BLOCK_P (b2
->dest
))
1175 if (f1
->dest
== f2
->dest
&& b1
->dest
== b2
->dest
)
1177 else if (f1
->dest
== b2
->dest
&& b1
->dest
== f2
->dest
)
1182 set1
= pc_set (bb1
->end
);
1183 set2
= pc_set (bb2
->end
);
1184 if ((XEXP (SET_SRC (set1
), 1) == pc_rtx
)
1185 != (XEXP (SET_SRC (set2
), 1) == pc_rtx
))
1188 cond1
= XEXP (SET_SRC (set1
), 0);
1189 cond2
= XEXP (SET_SRC (set2
), 0);
1190 code1
= GET_CODE (cond1
);
1192 code2
= reversed_comparison_code (cond2
, bb2
->end
);
1194 code2
= GET_CODE (cond2
);
1196 if (code2
== UNKNOWN
)
1199 /* Verify codes and operands match. */
1200 match
= ((code1
== code2
1201 && rtx_renumbered_equal_p (XEXP (cond1
, 0), XEXP (cond2
, 0))
1202 && rtx_renumbered_equal_p (XEXP (cond1
, 1), XEXP (cond2
, 1)))
1203 || (code1
== swap_condition (code2
)
1204 && rtx_renumbered_equal_p (XEXP (cond1
, 1),
1206 && rtx_renumbered_equal_p (XEXP (cond1
, 0),
1209 /* If we return true, we will join the blocks. Which means that
1210 we will only have one branch prediction bit to work with. Thus
1211 we require the existing branches to have probabilities that are
1215 && bb1
->frequency
> BB_FREQ_MAX
/ 1000
1216 && bb2
->frequency
> BB_FREQ_MAX
/ 1000)
1220 if (b1
->dest
== b2
->dest
)
1221 prob2
= b2
->probability
;
1223 /* Do not use f2 probability as f2 may be forwarded. */
1224 prob2
= REG_BR_PROB_BASE
- b2
->probability
;
1226 /* Fail if the difference in probabilities is greater than 50%.
1227 This rules out two well-predicted branches with opposite
1229 if (abs (b1
->probability
- prob2
) > REG_BR_PROB_BASE
/ 2)
1232 fprintf (rtl_dump_file
,
1233 "Outcomes of branch in bb %i and %i differs to much (%i %i)\n",
1234 bb1
->index
, bb2
->index
, b1
->probability
, prob2
);
1240 if (rtl_dump_file
&& match
)
1241 fprintf (rtl_dump_file
, "Conditionals in bb %i and %i match.\n",
1242 bb1
->index
, bb2
->index
);
1247 /* Generic case - we are seeing an computed jump, table jump or trapping
1250 /* First ensure that the instructions match. There may be many outgoing
1251 edges so this test is generally cheaper.
1252 ??? Currently the tablejumps will never match, as they do have
1253 different tables. */
1254 if (!insns_match_p (mode
, bb1
->end
, bb2
->end
))
1257 /* Search the outgoing edges, ensure that the counts do match, find possible
1258 fallthru and exception handling edges since these needs more
1260 for (e1
= bb1
->succ
, e2
= bb2
->succ
; e1
&& e2
;
1261 e1
= e1
->succ_next
, e2
= e2
->succ_next
)
1263 if (e1
->flags
& EDGE_EH
)
1266 if (e2
->flags
& EDGE_EH
)
1269 if (e1
->flags
& EDGE_FALLTHRU
)
1271 if (e2
->flags
& EDGE_FALLTHRU
)
1275 /* If number of edges of various types does not match, fail. */
1277 || nehedges1
!= nehedges2
1278 || (fallthru1
!= 0) != (fallthru2
!= 0))
1281 /* fallthru edges must be forwarded to the same destination. */
1284 basic_block d1
= (forwarder_block_p (fallthru1
->dest
)
1285 ? fallthru1
->dest
->succ
->dest
: fallthru1
->dest
);
1286 basic_block d2
= (forwarder_block_p (fallthru2
->dest
)
1287 ? fallthru2
->dest
->succ
->dest
: fallthru2
->dest
);
1293 /* In case we do have EH edges, ensure we are in the same region. */
1296 rtx n1
= find_reg_note (bb1
->end
, REG_EH_REGION
, 0);
1297 rtx n2
= find_reg_note (bb2
->end
, REG_EH_REGION
, 0);
1299 if (XEXP (n1
, 0) != XEXP (n2
, 0))
1303 /* We don't need to match the rest of edges as above checks should be enought
1304 to ensure that they are equivalent. */
1308 /* E1 and E2 are edges with the same destination block. Search their
1309 predecessors for common code. If found, redirect control flow from
1310 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */
1313 try_crossjump_to_edge (mode
, e1
, e2
)
1318 basic_block src1
= e1
->src
, src2
= e2
->src
;
1319 basic_block redirect_to
;
1320 rtx newpos1
, newpos2
;
1325 /* Search backward through forwarder blocks. We don't need to worry
1326 about multiple entry or chained forwarders, as they will be optimized
1327 away. We do this to look past the unconditional jump following a
1328 conditional jump that is required due to the current CFG shape. */
1330 && !src1
->pred
->pred_next
1331 && FORWARDER_BLOCK_P (src1
))
1332 e1
= src1
->pred
, src1
= e1
->src
;
1335 && !src2
->pred
->pred_next
1336 && FORWARDER_BLOCK_P (src2
))
1337 e2
= src2
->pred
, src2
= e2
->src
;
1339 /* Nothing to do if we reach ENTRY, or a common source block. */
1340 if (src1
== ENTRY_BLOCK_PTR
|| src2
== ENTRY_BLOCK_PTR
)
1345 /* Seeing more than 1 forwarder blocks would confuse us later... */
1346 if (FORWARDER_BLOCK_P (e1
->dest
)
1347 && FORWARDER_BLOCK_P (e1
->dest
->succ
->dest
))
1350 if (FORWARDER_BLOCK_P (e2
->dest
)
1351 && FORWARDER_BLOCK_P (e2
->dest
->succ
->dest
))
1354 /* Likewise with dead code (possibly newly created by the other optimizations
1356 if (!src1
->pred
|| !src2
->pred
)
1359 /* Look for the common insn sequence, part the first ... */
1360 if (!outgoing_edges_match (mode
, src1
, src2
))
1363 /* ... and part the second. */
1364 nmatch
= flow_find_cross_jump (mode
, src1
, src2
, &newpos1
, &newpos2
);
1368 /* Avoid splitting if possible. */
1369 if (newpos2
== src2
->head
)
1374 fprintf (rtl_dump_file
, "Splitting bb %i before %i insns\n",
1375 src2
->index
, nmatch
);
1376 redirect_to
= split_block (src2
, PREV_INSN (newpos2
))->dest
;
1380 fprintf (rtl_dump_file
,
1381 "Cross jumping from bb %i to bb %i; %i common insns\n",
1382 src1
->index
, src2
->index
, nmatch
);
1384 redirect_to
->count
+= src1
->count
;
1385 redirect_to
->frequency
+= src1
->frequency
;
1386 /* We may have some registers visible trought the block. */
1387 redirect_to
->flags
|= BB_DIRTY
;
1389 /* Recompute the frequencies and counts of outgoing edges. */
1390 for (s
= redirect_to
->succ
; s
; s
= s
->succ_next
)
1393 basic_block d
= s
->dest
;
1395 if (FORWARDER_BLOCK_P (d
))
1398 for (s2
= src1
->succ
; ; s2
= s2
->succ_next
)
1400 basic_block d2
= s2
->dest
;
1401 if (FORWARDER_BLOCK_P (d2
))
1402 d2
= d2
->succ
->dest
;
1407 s
->count
+= s2
->count
;
1409 /* Take care to update possible forwarder blocks. We verified
1410 that there is no more than one in the chain, so we can't run
1411 into infinite loop. */
1412 if (FORWARDER_BLOCK_P (s
->dest
))
1414 s
->dest
->succ
->count
+= s2
->count
;
1415 s
->dest
->count
+= s2
->count
;
1416 s
->dest
->frequency
+= EDGE_FREQUENCY (s
);
1419 if (FORWARDER_BLOCK_P (s2
->dest
))
1421 s2
->dest
->succ
->count
-= s2
->count
;
1422 if (s2
->dest
->succ
->count
< 0)
1423 s2
->dest
->succ
->count
= 0;
1424 s2
->dest
->count
-= s2
->count
;
1425 s2
->dest
->frequency
-= EDGE_FREQUENCY (s
);
1426 if (s2
->dest
->frequency
< 0)
1427 s2
->dest
->frequency
= 0;
1428 if (s2
->dest
->count
< 0)
1429 s2
->dest
->count
= 0;
1432 if (!redirect_to
->frequency
&& !src1
->frequency
)
1433 s
->probability
= (s
->probability
+ s2
->probability
) / 2;
1436 = ((s
->probability
* redirect_to
->frequency
+
1437 s2
->probability
* src1
->frequency
)
1438 / (redirect_to
->frequency
+ src1
->frequency
));
1441 update_br_prob_note (redirect_to
);
1443 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
1445 /* Skip possible basic block header. */
1446 if (GET_CODE (newpos1
) == CODE_LABEL
)
1447 newpos1
= NEXT_INSN (newpos1
);
1449 if (GET_CODE (newpos1
) == NOTE
)
1450 newpos1
= NEXT_INSN (newpos1
);
1453 /* Emit the jump insn. */
1454 label
= block_label (redirect_to
);
1455 emit_jump_insn_after (gen_jump (label
), src1
->end
);
1456 JUMP_LABEL (src1
->end
) = label
;
1457 LABEL_NUSES (label
)++;
1459 /* Delete the now unreachable instructions. */
1460 delete_insn_chain (newpos1
, last
);
1462 /* Make sure there is a barrier after the new jump. */
1463 last
= next_nonnote_insn (src1
->end
);
1464 if (!last
|| GET_CODE (last
) != BARRIER
)
1465 emit_barrier_after (src1
->end
);
1469 remove_edge (src1
->succ
);
1470 make_single_succ_edge (src1
, redirect_to
, 0);
1472 update_forwarder_flag (src1
);
1477 /* Search the predecessors of BB for common insn sequences. When found,
1478 share code between them by redirecting control flow. Return true if
1479 any changes made. */
1482 try_crossjump_bb (mode
, bb
)
1486 edge e
, e2
, nexte2
, nexte
, fallthru
;
1490 /* Nothing to do if there is not at least two incoming edges. */
1491 if (!bb
->pred
|| !bb
->pred
->pred_next
)
1494 /* It is always cheapest to redirect a block that ends in a branch to
1495 a block that falls through into BB, as that adds no branches to the
1496 program. We'll try that combination first. */
1497 for (fallthru
= bb
->pred
; fallthru
; fallthru
= fallthru
->pred_next
, n
++)
1499 if (fallthru
->flags
& EDGE_FALLTHRU
)
1506 for (e
= bb
->pred
; e
; e
= nexte
)
1508 nexte
= e
->pred_next
;
1510 /* As noted above, first try with the fallthru predecessor. */
1513 /* Don't combine the fallthru edge into anything else.
1514 If there is a match, we'll do it the other way around. */
1518 if (try_crossjump_to_edge (mode
, e
, fallthru
))
1526 /* Non-obvious work limiting check: Recognize that we're going
1527 to call try_crossjump_bb on every basic block. So if we have
1528 two blocks with lots of outgoing edges (a switch) and they
1529 share lots of common destinations, then we would do the
1530 cross-jump check once for each common destination.
1532 Now, if the blocks actually are cross-jump candidates, then
1533 all of their destinations will be shared. Which means that
1534 we only need check them for cross-jump candidacy once. We
1535 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1536 choosing to do the check from the block for which the edge
1537 in question is the first successor of A. */
1538 if (e
->src
->succ
!= e
)
1541 for (e2
= bb
->pred
; e2
; e2
= nexte2
)
1543 nexte2
= e2
->pred_next
;
1548 /* We've already checked the fallthru edge above. */
1552 /* The "first successor" check above only prevents multiple
1553 checks of crossjump(A,B). In order to prevent redundant
1554 checks of crossjump(B,A), require that A be the block
1555 with the lowest index. */
1556 if (e
->src
->index
> e2
->src
->index
)
1559 if (try_crossjump_to_edge (mode
, e
, e2
))
1571 /* Do simple CFG optimizations - basic block merging, simplifying of jump
1572 instructions etc. Return nonzero if changes were made. */
1575 try_optimize_cfg (mode
)
1579 bool changed_overall
= false;
1583 if (mode
& CLEANUP_CROSSJUMP
)
1584 add_noreturn_fake_exit_edges ();
1586 for (i
= 0; i
< n_basic_blocks
; i
++)
1587 update_forwarder_flag (BASIC_BLOCK (i
));
1589 if (mode
& CLEANUP_UPDATE_LIFE
)
1592 if (! (* targetm
.cannot_modify_jumps_p
) ())
1594 /* Attempt to merge blocks as made possible by edge removal. If
1595 a block has only one successor, and the successor has only
1596 one predecessor, they may be combined. */
1603 fprintf (rtl_dump_file
,
1604 "\n\ntry_optimize_cfg iteration %i\n\n",
1607 for (i
= 0; i
< n_basic_blocks
;)
1609 basic_block c
, b
= BASIC_BLOCK (i
);
1611 bool changed_here
= false;
1613 /* Delete trivially dead basic blocks. */
1614 while (b
->pred
== NULL
)
1616 c
= BASIC_BLOCK (b
->index
- 1);
1618 fprintf (rtl_dump_file
, "Deleting block %i.\n",
1621 flow_delete_block (b
);
1626 /* Remove code labels no longer used. Don't do this
1627 before CALL_PLACEHOLDER is removed, as some branches
1628 may be hidden within. */
1629 if (b
->pred
->pred_next
== NULL
1630 && (b
->pred
->flags
& EDGE_FALLTHRU
)
1631 && !(b
->pred
->flags
& EDGE_COMPLEX
)
1632 && GET_CODE (b
->head
) == CODE_LABEL
1633 && (!(mode
& CLEANUP_PRE_SIBCALL
)
1634 || !tail_recursion_label_p (b
->head
))
1635 /* If the previous block ends with a branch to this
1636 block, we can't delete the label. Normally this
1637 is a condjump that is yet to be simplified, but
1638 if CASE_DROPS_THRU, this can be a tablejump with
1639 some element going to the same place as the
1640 default (fallthru). */
1641 && (b
->pred
->src
== ENTRY_BLOCK_PTR
1642 || GET_CODE (b
->pred
->src
->end
) != JUMP_INSN
1643 || ! label_is_jump_target_p (b
->head
,
1644 b
->pred
->src
->end
)))
1646 rtx label
= b
->head
;
1648 b
->head
= NEXT_INSN (b
->head
);
1649 delete_insn_chain (label
, label
);
1651 fprintf (rtl_dump_file
, "Deleted label in block %i.\n",
1655 /* If we fall through an empty block, we can remove it. */
1656 if (b
->pred
->pred_next
== NULL
1657 && (b
->pred
->flags
& EDGE_FALLTHRU
)
1658 && GET_CODE (b
->head
) != CODE_LABEL
1659 && FORWARDER_BLOCK_P (b
)
1660 /* Note that forwarder_block_p true ensures that
1661 there is a successor for this block. */
1662 && (b
->succ
->flags
& EDGE_FALLTHRU
)
1663 && n_basic_blocks
> 1)
1666 fprintf (rtl_dump_file
,
1667 "Deleting fallthru block %i.\n",
1670 c
= BASIC_BLOCK (b
->index
? b
->index
- 1 : 1);
1671 redirect_edge_succ_nodup (b
->pred
, b
->succ
->dest
);
1672 flow_delete_block (b
);
1677 /* Merge blocks. Loop because chains of blocks might be
1679 while ((s
= b
->succ
) != NULL
1680 && s
->succ_next
== NULL
1681 && !(s
->flags
& EDGE_COMPLEX
)
1682 && (c
= s
->dest
) != EXIT_BLOCK_PTR
1683 && c
->pred
->pred_next
== NULL
1684 /* If the jump insn has side effects,
1685 we can't kill the edge. */
1686 && (GET_CODE (b
->end
) != JUMP_INSN
1687 || onlyjump_p (b
->end
))
1688 && merge_blocks (s
, b
, c
, mode
))
1689 changed_here
= true;
1691 /* Simplify branch over branch. */
1692 if ((mode
& CLEANUP_EXPENSIVE
) && try_simplify_condjump (b
))
1693 changed_here
= true;
1695 /* If B has a single outgoing edge, but uses a
1696 non-trivial jump instruction without side-effects, we
1697 can either delete the jump entirely, or replace it
1698 with a simple unconditional jump. Use
1699 redirect_edge_and_branch to do the dirty work. */
1701 && ! b
->succ
->succ_next
1702 && b
->succ
->dest
!= EXIT_BLOCK_PTR
1703 && onlyjump_p (b
->end
)
1704 && redirect_edge_and_branch (b
->succ
, b
->succ
->dest
))
1706 update_forwarder_flag (b
);
1707 changed_here
= true;
1710 /* Simplify branch to branch. */
1711 if (try_forward_edges (mode
, b
))
1712 changed_here
= true;
1714 /* Look for shared code between blocks. */
1715 if ((mode
& CLEANUP_CROSSJUMP
)
1716 && try_crossjump_bb (mode
, b
))
1717 changed_here
= true;
1719 /* Don't get confused by the index shift caused by
1727 if ((mode
& CLEANUP_CROSSJUMP
)
1728 && try_crossjump_bb (mode
, EXIT_BLOCK_PTR
))
1731 #ifdef ENABLE_CHECKING
1733 verify_flow_info ();
1736 changed_overall
|= changed
;
1741 if (mode
& CLEANUP_CROSSJUMP
)
1742 remove_fake_edges ();
1744 clear_aux_for_blocks ();
1746 return changed_overall
;
1749 /* Delete all unreachable basic blocks. */
1752 delete_unreachable_blocks ()
1755 bool changed
= false;
1757 find_unreachable_blocks ();
1759 /* Delete all unreachable basic blocks. Do compaction concurrently,
1760 as otherwise we can wind up with O(N^2) behaviour here when we
1761 have oodles of dead code. */
1763 for (i
= j
= 0; i
< n_basic_blocks
; ++i
)
1765 basic_block b
= BASIC_BLOCK (i
);
1767 if (!(b
->flags
& BB_REACHABLE
))
1769 flow_delete_block_noexpunge (b
);
1770 expunge_block_nocompact (b
);
1775 BASIC_BLOCK (j
) = b
;
1780 basic_block_info
->num_elements
= j
;
1783 tidy_fallthru_edges ();
1787 /* Tidy the CFG by deleting unreachable code and whatnot. */
1793 bool changed
= false;
1795 timevar_push (TV_CLEANUP_CFG
);
1796 if (delete_unreachable_blocks ())
1799 /* We've possibly created trivially dead code. Cleanup it right
1800 now to introduce more oppurtunities for try_optimize_cfg. */
1801 if (!(mode
& (CLEANUP_UPDATE_LIFE
| CLEANUP_PRE_SIBCALL
))
1802 && !reload_completed
)
1803 delete_trivially_dead_insns (get_insns(), max_reg_num ());
1805 while (try_optimize_cfg (mode
))
1807 delete_unreachable_blocks (), changed
= true;
1808 if (mode
& CLEANUP_UPDATE_LIFE
)
1810 /* Cleaning up CFG introduces more oppurtunities for dead code
1811 removal that in turn may introduce more oppurtunities for
1812 cleaning up the CFG. */
1813 if (!update_life_info_in_dirty_blocks (UPDATE_LIFE_GLOBAL_RM_NOTES
,
1815 | PROP_SCAN_DEAD_CODE
1816 | PROP_KILL_DEAD_CODE
1820 else if (!(mode
& CLEANUP_PRE_SIBCALL
) && !reload_completed
)
1822 if (!delete_trivially_dead_insns (get_insns(), max_reg_num ()))
1827 delete_dead_jumptables ();
1830 /* Kill the data we won't maintain. */
1831 free_EXPR_LIST_list (&label_value_list
);
1832 free_EXPR_LIST_list (&tail_recursion_label_list
);
1833 timevar_pop (TV_CLEANUP_CFG
);