1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
22 /* This file contains optimizer of the control flow. The main entrypoint is
23 cleanup_cfg. Following optimizations are performed:
25 - Unreachable blocks removal
26 - Edge forwarding (edge to the forwarder block is forwarded to it's
27 successor. Simplification of the branch instruction is performed by
28 underlying infrastructure so branch can be converted to simplejump or
30 - Cross jumping (tail merging)
31 - Conditional jump-around-simplejump simplification
32 - Basic block merging. */
37 #include "hard-reg-set.h"
38 #include "basic-block.h"
41 #include "insn-config.h"
49 /* cleanup_cfg maintains following flags for each basic block. */
51 /* Set if life info needs to be recomputed for given BB. */
53 /* Set if BB is the forwarder block to avoid too many
54 forwarder_block_p calls. */
55 BB_FORWARDER_BLOCK
= 2
58 #define BB_FLAGS(bb) (enum bb_flags)(bb)->aux
59 #define BB_SET_FLAG(bb,flag) \
60 (bb)->aux = (void *) (long) ((enum bb_flags)(bb)->aux | (flag))
61 #define BB_CLEAR_FLAG(bb,flag) \
62 (bb)->aux = (void *) (long) ((enum bb_flags)(bb)->aux & ~(flag))
64 #define FORWARDER_BLOCK_P(bb) (BB_FLAGS(bb) & BB_FORWARDER_BLOCK)
66 static bool try_crossjump_to_edge
PARAMS ((int, edge
, edge
));
67 static bool try_crossjump_bb
PARAMS ((int, basic_block
));
68 static bool outgoing_edges_match
PARAMS ((int,
69 basic_block
, basic_block
));
70 static int flow_find_cross_jump
PARAMS ((int, basic_block
, basic_block
,
72 static bool insns_match_p
PARAMS ((int, rtx
, rtx
));
74 static bool delete_unreachable_blocks
PARAMS ((void));
75 static bool label_is_jump_target_p
PARAMS ((rtx
, rtx
));
76 static bool tail_recursion_label_p
PARAMS ((rtx
));
77 static void merge_blocks_move_predecessor_nojumps
PARAMS ((basic_block
,
79 static void merge_blocks_move_successor_nojumps
PARAMS ((basic_block
,
81 static bool merge_blocks
PARAMS ((edge
,basic_block
,basic_block
,
83 static bool try_optimize_cfg
PARAMS ((int));
84 static bool try_simplify_condjump
PARAMS ((basic_block
));
85 static bool try_forward_edges
PARAMS ((int, basic_block
));
86 static edge thread_jump
PARAMS ((int, edge
, basic_block
));
87 static bool mark_effect
PARAMS ((rtx
, bitmap
));
88 static void notice_new_block
PARAMS ((basic_block
));
89 static void update_forwarder_flag
PARAMS ((basic_block
));
91 /* Set flags for newly created block. */
99 BB_SET_FLAG (bb
, BB_UPDATE_LIFE
);
100 if (forwarder_block_p (bb
))
101 BB_SET_FLAG (bb
, BB_FORWARDER_BLOCK
);
104 /* Recompute forwarder flag after block has been modified. */
107 update_forwarder_flag (bb
)
110 if (forwarder_block_p (bb
))
111 BB_SET_FLAG (bb
, BB_FORWARDER_BLOCK
);
113 BB_CLEAR_FLAG (bb
, BB_FORWARDER_BLOCK
);
116 /* Simplify a conditional jump around an unconditional jump.
117 Return true if something changed. */
120 try_simplify_condjump (cbranch_block
)
121 basic_block cbranch_block
;
123 basic_block jump_block
, jump_dest_block
, cbranch_dest_block
;
124 edge cbranch_jump_edge
, cbranch_fallthru_edge
;
127 /* Verify that there are exactly two successors. */
128 if (!cbranch_block
->succ
129 || !cbranch_block
->succ
->succ_next
130 || cbranch_block
->succ
->succ_next
->succ_next
)
133 /* Verify that we've got a normal conditional branch at the end
135 cbranch_insn
= cbranch_block
->end
;
136 if (!any_condjump_p (cbranch_insn
))
139 cbranch_fallthru_edge
= FALLTHRU_EDGE (cbranch_block
);
140 cbranch_jump_edge
= BRANCH_EDGE (cbranch_block
);
142 /* The next block must not have multiple predecessors, must not
143 be the last block in the function, and must contain just the
144 unconditional jump. */
145 jump_block
= cbranch_fallthru_edge
->dest
;
146 if (jump_block
->pred
->pred_next
147 || jump_block
->index
== n_basic_blocks
- 1
148 || !FORWARDER_BLOCK_P (jump_block
))
150 jump_dest_block
= jump_block
->succ
->dest
;
152 /* The conditional branch must target the block after the
153 unconditional branch. */
154 cbranch_dest_block
= cbranch_jump_edge
->dest
;
156 if (!can_fallthru (jump_block
, cbranch_dest_block
))
159 /* Invert the conditional branch. */
160 if (!invert_jump (cbranch_insn
, block_label (jump_dest_block
), 0))
164 fprintf (rtl_dump_file
, "Simplifying condjump %i around jump %i\n",
165 INSN_UID (cbranch_insn
), INSN_UID (jump_block
->end
));
167 /* Success. Update the CFG to match. Note that after this point
168 the edge variable names appear backwards; the redirection is done
169 this way to preserve edge profile data. */
170 cbranch_jump_edge
= redirect_edge_succ_nodup (cbranch_jump_edge
,
172 cbranch_fallthru_edge
= redirect_edge_succ_nodup (cbranch_fallthru_edge
,
174 cbranch_jump_edge
->flags
|= EDGE_FALLTHRU
;
175 cbranch_fallthru_edge
->flags
&= ~EDGE_FALLTHRU
;
177 /* Delete the block with the unconditional jump, and clean up the mess. */
178 flow_delete_block (jump_block
);
179 tidy_fallthru_edge (cbranch_jump_edge
, cbranch_block
, cbranch_dest_block
);
184 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
185 on register. Used by jump threading. */
187 mark_effect (exp
, nonequal
)
191 switch (GET_CODE (exp
))
193 /* In case we do clobber the register, mark it as equal, as we know the
194 value is dead so it don't have to match. */
196 if (REG_P (XEXP (exp
, 0)))
197 CLEAR_REGNO_REG_SET (nonequal
, REGNO (XEXP (exp
, 0)));
200 if (rtx_equal_for_cselib_p (SET_DEST (exp
), SET_SRC (exp
)))
202 if (GET_CODE (SET_SRC (exp
)) != REG
)
204 SET_REGNO_REG_SET (nonequal
, REGNO (SET_SRC (exp
)));
210 /* Attempt to prove that the basic block B will have no side effects and
211 allways continues in the same edge if reached via E. Return the edge
212 if exist, NULL otherwise. */
215 thread_jump (mode
, e
, b
)
220 rtx set1
, set2
, cond1
, cond2
, insn
;
221 enum rtx_code code1
, code2
, reversed_code2
;
222 bool reverse1
= false;
227 /* At the moment, we do handle only conditional jumps, but later we may
228 want to extend this code to tablejumps and others. */
229 if (!e
->src
->succ
->succ_next
|| e
->src
->succ
->succ_next
->succ_next
)
231 if (!b
->succ
|| !b
->succ
->succ_next
|| b
->succ
->succ_next
->succ_next
)
234 /* Second branch must end with onlyjump, as we will eliminate the jump. */
235 if (!any_condjump_p (e
->src
->end
) || !any_condjump_p (b
->end
)
236 || !onlyjump_p (b
->end
))
239 set1
= pc_set (e
->src
->end
);
240 set2
= pc_set (b
->end
);
241 if (((e
->flags
& EDGE_FALLTHRU
) != 0)
242 != (XEXP (SET_SRC (set1
), 0) == pc_rtx
))
245 cond1
= XEXP (SET_SRC (set1
), 0);
246 cond2
= XEXP (SET_SRC (set2
), 0);
248 code1
= reversed_comparison_code (cond1
, b
->end
);
250 code1
= GET_CODE (cond1
);
252 code2
= GET_CODE (cond2
);
253 reversed_code2
= reversed_comparison_code (cond2
, b
->end
);
255 if (!comparison_dominates_p (code1
, code2
)
256 && !comparison_dominates_p (code1
, reversed_code2
))
259 /* Ensure that the comparison operators are equivalent.
260 ??? This is far too pesimistic. We should allow swapped operands,
261 different CCmodes, or for example comparisons for interval, that
262 dominate even when operands are not equivalent. */
263 if (!rtx_equal_p (XEXP (cond1
, 0), XEXP (cond2
, 0))
264 || !rtx_equal_p (XEXP (cond1
, 1), XEXP (cond2
, 1)))
267 /* Short circuit cases where block B contains some side effects, as we can't
269 for (insn
= NEXT_INSN (b
->head
); insn
!= NEXT_INSN (b
->end
);
270 insn
= NEXT_INSN (insn
))
271 if (INSN_P (insn
) && side_effects_p (PATTERN (insn
)))
276 /* First process all values computed in the source basic block. */
277 for (insn
= NEXT_INSN (e
->src
->head
); insn
!= NEXT_INSN (e
->src
->end
);
278 insn
= NEXT_INSN (insn
))
280 cselib_process_insn (insn
);
282 nonequal
= BITMAP_XMALLOC();
283 CLEAR_REG_SET (nonequal
);
284 /* Now assume that we've continued by the edge E to B and continue
285 processing as if it were same basic block.
287 Our goal is to prove that whole block is an NOOP. */
288 for (insn
= NEXT_INSN (b
->head
); insn
!= b
->end
&& !failed
;
289 insn
= NEXT_INSN (insn
))
293 rtx pat
= PATTERN (insn
);
295 if (GET_CODE (pat
) == PARALLEL
)
297 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
298 failed
|= mark_effect (XVECEXP (pat
, 0, i
), nonequal
);
301 failed
|= mark_effect (pat
, nonequal
);
303 cselib_process_insn (insn
);
306 /* Later we should clear nonequal of dead registers. So far we don't
307 have life information in cfg_cleanup. */
311 /* In case liveness information is available, we need to prove equivalence
312 only of the live values. */
313 if (mode
& CLEANUP_UPDATE_LIFE
)
314 AND_REG_SET (nonequal
, b
->global_live_at_end
);
316 EXECUTE_IF_SET_IN_REG_SET (nonequal
, 0, i
, goto failed_exit
;);
318 BITMAP_XFREE (nonequal
);
320 if ((comparison_dominates_p (code1
, code2
) != 0)
321 != (XEXP (SET_SRC (set2
), 0) == pc_rtx
))
322 return BRANCH_EDGE (b
);
324 return FALLTHRU_EDGE (b
);
327 BITMAP_XFREE (nonequal
);
332 /* Attempt to forward edges leaving basic block B.
333 Return true if successful. */
336 try_forward_edges (mode
, b
)
340 bool changed
= false;
341 edge e
, next
, threaded_edge
;
343 for (e
= b
->succ
; e
; e
= next
)
345 basic_block target
, first
;
347 bool threaded
= false;
351 /* Skip complex edges because we don't know how to update them.
353 Still handle fallthru edges, as we can succeed to forward fallthru
354 edge to the same place as the branch edge of conditional branch
355 and turn conditional branch to an unconditional branch. */
356 if (e
->flags
& EDGE_COMPLEX
)
359 target
= first
= e
->dest
;
362 while (counter
< n_basic_blocks
)
364 basic_block new_target
= NULL
;
365 bool new_target_threaded
= false;
367 if (FORWARDER_BLOCK_P (target
)
368 && target
->succ
->dest
!= EXIT_BLOCK_PTR
)
370 /* Bypass trivial infinite loops. */
371 if (target
== target
->succ
->dest
)
372 counter
= n_basic_blocks
;
373 new_target
= target
->succ
->dest
;
375 /* Allow to thread only over one edge at time to simplify updating
377 else if ((mode
& CLEANUP_THREADING
) && !threaded
)
379 threaded_edge
= thread_jump (mode
, e
, target
);
382 new_target
= threaded_edge
->dest
;
383 new_target_threaded
= true;
389 /* Avoid killing of loop pre-headers, as it is the place loop
390 optimizer wants to hoist code to.
392 For fallthru forwarders, the LOOP_BEG note must appear between
393 the header of block and CODE_LABEL of the loop, for non forwarders
394 it must appear before the JUMP_INSN. */
395 if (mode
& CLEANUP_PRE_LOOP
)
397 rtx insn
= (target
->succ
->flags
& EDGE_FALLTHRU
398 ? target
->head
: prev_nonnote_insn (target
->end
));
400 if (GET_CODE (insn
) != NOTE
)
401 insn
= NEXT_INSN (insn
);
403 for (;insn
&& GET_CODE (insn
) != CODE_LABEL
&& !INSN_P (insn
);
404 insn
= NEXT_INSN (insn
))
405 if (GET_CODE (insn
) == NOTE
406 && NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_BEG
)
409 if (GET_CODE (insn
) == NOTE
)
414 threaded
|= new_target_threaded
;
417 if (counter
>= n_basic_blocks
)
420 fprintf (rtl_dump_file
, "Infinite loop in BB %i.\n",
423 else if (target
== first
)
424 ; /* We didn't do anything. */
427 /* Save the values now, as the edge may get removed. */
428 gcov_type edge_count
= e
->count
;
429 int edge_probability
= e
->probability
;
434 notice_new_block (redirect_edge_and_branch_force (e
, target
));
436 fprintf (rtl_dump_file
, "Conditionals threaded.\n");
438 else if (!redirect_edge_and_branch (e
, target
))
441 fprintf (rtl_dump_file
, "Forwarding edge %i->%i to %i failed.\n",
442 b
->index
, e
->dest
->index
, target
->index
);
445 /* We successfully forwarded the edge. Now update profile
446 data: for each edge we traversed in the chain, remove
447 the original edge's execution count. */
448 edge_frequency
= ((edge_probability
* b
->frequency
449 + REG_BR_PROB_BASE
/ 2)
452 if (!FORWARDER_BLOCK_P (b
) && forwarder_block_p (b
))
453 BB_SET_FLAG (b
, BB_FORWARDER_BLOCK
);
454 BB_SET_FLAG (b
, BB_UPDATE_LIFE
);
459 first
->count
-= edge_count
;
460 first
->succ
->count
-= edge_count
;
461 first
->frequency
-= edge_frequency
;
462 if (first
->succ
->succ_next
)
468 while (first
!= target
);
477 /* Return true if LABEL is a target of JUMP_INSN. This applies only
478 to non-complex jumps. That is, direct unconditional, conditional,
479 and tablejumps, but not computed jumps or returns. It also does
480 not apply to the fallthru case of a conditional jump. */
483 label_is_jump_target_p (label
, jump_insn
)
484 rtx label
, jump_insn
;
486 rtx tmp
= JUMP_LABEL (jump_insn
);
492 && (tmp
= NEXT_INSN (tmp
)) != NULL_RTX
493 && GET_CODE (tmp
) == JUMP_INSN
494 && (tmp
= PATTERN (tmp
),
495 GET_CODE (tmp
) == ADDR_VEC
496 || GET_CODE (tmp
) == ADDR_DIFF_VEC
))
498 rtvec vec
= XVEC (tmp
, GET_CODE (tmp
) == ADDR_DIFF_VEC
);
499 int i
, veclen
= GET_NUM_ELEM (vec
);
501 for (i
= 0; i
< veclen
; ++i
)
502 if (XEXP (RTVEC_ELT (vec
, i
), 0) == label
)
509 /* Return true if LABEL is used for tail recursion. */
512 tail_recursion_label_p (label
)
517 for (x
= tail_recursion_label_list
; x
; x
= XEXP (x
, 1))
518 if (label
== XEXP (x
, 0))
524 /* Blocks A and B are to be merged into a single block. A has no incoming
525 fallthru edge, so it can be moved before B without adding or modifying
526 any jumps (aside from the jump from A to B). */
529 merge_blocks_move_predecessor_nojumps (a
, b
)
535 barrier
= next_nonnote_insn (a
->end
);
536 if (GET_CODE (barrier
) != BARRIER
)
538 delete_insn (barrier
);
540 /* Move block and loop notes out of the chain so that we do not
543 ??? A better solution would be to squeeze out all the non-nested notes
544 and adjust the block trees appropriately. Even better would be to have
545 a tighter connection between block trees and rtl so that this is not
547 if (squeeze_notes (&a
->head
, &a
->end
))
550 /* Scramble the insn chain. */
551 if (a
->end
!= PREV_INSN (b
->head
))
552 reorder_insns_nobb (a
->head
, a
->end
, PREV_INSN (b
->head
));
553 BB_SET_FLAG (a
, BB_UPDATE_LIFE
);
557 fprintf (rtl_dump_file
, "Moved block %d before %d and merged.\n",
561 /* Swap the records for the two blocks around. Although we are deleting B,
562 A is now where B was and we want to compact the BB array from where
564 BASIC_BLOCK (a
->index
) = b
;
565 BASIC_BLOCK (b
->index
) = a
;
570 /* Now blocks A and B are contiguous. Merge them. */
571 merge_blocks_nomove (a
, b
);
574 /* Blocks A and B are to be merged into a single block. B has no outgoing
575 fallthru edge, so it can be moved after A without adding or modifying
576 any jumps (aside from the jump from A to B). */
579 merge_blocks_move_successor_nojumps (a
, b
)
582 rtx barrier
, real_b_end
;
585 barrier
= NEXT_INSN (b
->end
);
587 /* Recognize a jump table following block B. */
589 && GET_CODE (barrier
) == CODE_LABEL
590 && NEXT_INSN (barrier
)
591 && GET_CODE (NEXT_INSN (barrier
)) == JUMP_INSN
592 && (GET_CODE (PATTERN (NEXT_INSN (barrier
))) == ADDR_VEC
593 || GET_CODE (PATTERN (NEXT_INSN (barrier
))) == ADDR_DIFF_VEC
))
595 /* Temporarily add the table jump insn to b, so that it will also
596 be moved to the correct location. */
597 b
->end
= NEXT_INSN (barrier
);
598 barrier
= NEXT_INSN (b
->end
);
601 /* There had better have been a barrier there. Delete it. */
602 if (barrier
&& GET_CODE (barrier
) == BARRIER
)
603 delete_insn (barrier
);
605 /* Move block and loop notes out of the chain so that we do not
608 ??? A better solution would be to squeeze out all the non-nested notes
609 and adjust the block trees appropriately. Even better would be to have
610 a tighter connection between block trees and rtl so that this is not
612 if (squeeze_notes (&b
->head
, &b
->end
))
615 /* Scramble the insn chain. */
616 reorder_insns_nobb (b
->head
, b
->end
, a
->end
);
618 /* Restore the real end of b. */
621 /* Now blocks A and B are contiguous. Merge them. */
622 merge_blocks_nomove (a
, b
);
623 BB_SET_FLAG (a
, BB_UPDATE_LIFE
);
627 fprintf (rtl_dump_file
, "Moved block %d after %d and merged.\n",
632 /* Attempt to merge basic blocks that are potentially non-adjacent.
633 Return true iff the attempt succeeded. */
636 merge_blocks (e
, b
, c
, mode
)
641 /* If C has a tail recursion label, do not merge. There is no
642 edge recorded from the call_placeholder back to this label, as
643 that would make optimize_sibling_and_tail_recursive_calls more
644 complex for no gain. */
645 if ((mode
& CLEANUP_PRE_SIBCALL
)
646 && GET_CODE (c
->head
) == CODE_LABEL
647 && tail_recursion_label_p (c
->head
))
650 /* If B has a fallthru edge to C, no need to move anything. */
651 if (e
->flags
& EDGE_FALLTHRU
)
653 /* We need to update liveness in case C already has broken liveness
654 or B ends by conditional jump to next instructions that will be
656 if ((BB_FLAGS (c
) & BB_UPDATE_LIFE
)
657 || GET_CODE (b
->end
) == JUMP_INSN
)
658 BB_SET_FLAG (b
, BB_UPDATE_LIFE
);
659 merge_blocks_nomove (b
, c
);
660 update_forwarder_flag (b
);
664 fprintf (rtl_dump_file
, "Merged %d and %d without moving.\n",
670 /* Otherwise we will need to move code around. Do that only if expensive
671 transformations are allowed. */
672 else if (mode
& CLEANUP_EXPENSIVE
)
674 edge tmp_edge
, b_fallthru_edge
;
675 bool c_has_outgoing_fallthru
;
676 bool b_has_incoming_fallthru
;
678 /* Avoid overactive code motion, as the forwarder blocks should be
679 eliminated by edge redirection instead. One exception might have
680 been if B is a forwarder block and C has no fallthru edge, but
681 that should be cleaned up by bb-reorder instead. */
682 if (FORWARDER_BLOCK_P (b
) || FORWARDER_BLOCK_P (c
))
685 /* We must make sure to not munge nesting of lexical blocks,
686 and loop notes. This is done by squeezing out all the notes
687 and leaving them there to lie. Not ideal, but functional. */
689 for (tmp_edge
= c
->succ
; tmp_edge
; tmp_edge
= tmp_edge
->succ_next
)
690 if (tmp_edge
->flags
& EDGE_FALLTHRU
)
692 c_has_outgoing_fallthru
= (tmp_edge
!= NULL
);
694 for (tmp_edge
= b
->pred
; tmp_edge
; tmp_edge
= tmp_edge
->pred_next
)
695 if (tmp_edge
->flags
& EDGE_FALLTHRU
)
697 b_has_incoming_fallthru
= (tmp_edge
!= NULL
);
698 b_fallthru_edge
= tmp_edge
;
700 /* Otherwise, we're going to try to move C after B. If C does
701 not have an outgoing fallthru, then it can be moved
702 immediately after B without introducing or modifying jumps. */
703 if (! c_has_outgoing_fallthru
)
705 merge_blocks_move_successor_nojumps (b
, c
);
709 /* If B does not have an incoming fallthru, then it can be moved
710 immediately before C without introducing or modifying jumps.
711 C cannot be the first block, so we do not have to worry about
712 accessing a non-existent block. */
714 if (b_has_incoming_fallthru
)
717 if (b_fallthru_edge
->src
== ENTRY_BLOCK_PTR
)
719 bb
= force_nonfallthru (b_fallthru_edge
);
721 notice_new_block (bb
);
723 BB_SET_FLAG (b_fallthru_edge
->src
, BB_UPDATE_LIFE
);
725 merge_blocks_move_predecessor_nojumps (b
, c
);
732 /* Return true if I1 and I2 are equivalent and thus can be crossjumped. */
735 insns_match_p (mode
, i1
, i2
)
736 int mode ATTRIBUTE_UNUSED
;
741 /* Verify that I1 and I2 are equivalent. */
742 if (GET_CODE (i1
) != GET_CODE (i2
))
748 if (GET_CODE (p1
) != GET_CODE (p2
))
751 /* If this is a CALL_INSN, compare register usage information.
752 If we don't check this on stack register machines, the two
753 CALL_INSNs might be merged leaving reg-stack.c with mismatching
754 numbers of stack registers in the same basic block.
755 If we don't check this on machines with delay slots, a delay slot may
756 be filled that clobbers a parameter expected by the subroutine.
758 ??? We take the simple route for now and assume that if they're
759 equal, they were constructed identically. */
761 if (GET_CODE (i1
) == CALL_INSN
762 && !rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1
),
763 CALL_INSN_FUNCTION_USAGE (i2
)))
767 /* If cross_jump_death_matters is not 0, the insn's mode
768 indicates whether or not the insn contains any stack-like
771 if ((mode
& CLEANUP_POST_REGSTACK
) && stack_regs_mentioned (i1
))
773 /* If register stack conversion has already been done, then
774 death notes must also be compared before it is certain that
775 the two instruction streams match. */
778 HARD_REG_SET i1_regset
, i2_regset
;
780 CLEAR_HARD_REG_SET (i1_regset
);
781 CLEAR_HARD_REG_SET (i2_regset
);
783 for (note
= REG_NOTES (i1
); note
; note
= XEXP (note
, 1))
784 if (REG_NOTE_KIND (note
) == REG_DEAD
&& STACK_REG_P (XEXP (note
, 0)))
785 SET_HARD_REG_BIT (i1_regset
, REGNO (XEXP (note
, 0)));
787 for (note
= REG_NOTES (i2
); note
; note
= XEXP (note
, 1))
788 if (REG_NOTE_KIND (note
) == REG_DEAD
&& STACK_REG_P (XEXP (note
, 0)))
789 SET_HARD_REG_BIT (i2_regset
, REGNO (XEXP (note
, 0)));
791 GO_IF_HARD_REG_EQUAL (i1_regset
, i2_regset
, done
);
801 ? ! rtx_renumbered_equal_p (p1
, p2
) : ! rtx_equal_p (p1
, p2
))
803 /* The following code helps take care of G++ cleanups. */
804 rtx equiv1
= find_reg_equal_equiv_note (i1
);
805 rtx equiv2
= find_reg_equal_equiv_note (i2
);
808 /* If the equivalences are not to a constant, they may
809 reference pseudos that no longer exist, so we can't
811 && (! reload_completed
812 || (CONSTANT_P (XEXP (equiv1
, 0))
813 && rtx_equal_p (XEXP (equiv1
, 0), XEXP (equiv2
, 0)))))
815 rtx s1
= single_set (i1
);
816 rtx s2
= single_set (i2
);
817 if (s1
!= 0 && s2
!= 0
818 && rtx_renumbered_equal_p (SET_DEST (s1
), SET_DEST (s2
)))
820 validate_change (i1
, &SET_SRC (s1
), XEXP (equiv1
, 0), 1);
821 validate_change (i2
, &SET_SRC (s2
), XEXP (equiv2
, 0), 1);
822 if (! rtx_renumbered_equal_p (p1
, p2
))
824 else if (apply_change_group ())
833 /* Look through the insns at the end of BB1 and BB2 and find the longest
834 sequence that are equivalent. Store the first insns for that sequence
835 in *F1 and *F2 and return the sequence length.
837 To simplify callers of this function, if the blocks match exactly,
838 store the head of the blocks in *F1 and *F2. */
841 flow_find_cross_jump (mode
, bb1
, bb2
, f1
, f2
)
842 int mode ATTRIBUTE_UNUSED
;
843 basic_block bb1
, bb2
;
846 rtx i1
, i2
, last1
, last2
, afterlast1
, afterlast2
;
849 /* Skip simple jumps at the end of the blocks. Complex jumps still
850 need to be compared for equivalence, which we'll do below. */
853 last1
= afterlast1
= last2
= afterlast2
= NULL_RTX
;
855 || (returnjump_p (i1
) && !side_effects_p (PATTERN (i1
))))
858 /* Count everything except for unconditional jump as insn. */
859 if (!simplejump_p (i1
) && !returnjump_p (i1
))
865 || (returnjump_p (i2
) && !side_effects_p (PATTERN (i2
))))
874 while (!active_insn_p (i1
) && i1
!= bb1
->head
)
876 while (!active_insn_p (i2
) && i2
!= bb2
->head
)
879 if (i1
== bb1
->head
|| i2
== bb2
->head
)
882 if (!insns_match_p (mode
, i1
, i2
))
885 /* Don't begin a cross-jump with a USE or CLOBBER insn. */
886 if (active_insn_p (i1
))
888 /* If the merged insns have different REG_EQUAL notes, then
890 rtx equiv1
= find_reg_equal_equiv_note (i1
);
891 rtx equiv2
= find_reg_equal_equiv_note (i2
);
893 if (equiv1
&& !equiv2
)
894 remove_note (i1
, equiv1
);
895 else if (!equiv1
&& equiv2
)
896 remove_note (i2
, equiv2
);
897 else if (equiv1
&& equiv2
898 && !rtx_equal_p (XEXP (equiv1
, 0), XEXP (equiv2
, 0)))
900 remove_note (i1
, equiv1
);
901 remove_note (i2
, equiv2
);
904 afterlast1
= last1
, afterlast2
= last2
;
905 last1
= i1
, last2
= i2
;
915 /* Don't allow the insn after a compare to be shared by
916 cross-jumping unless the compare is also shared. */
917 if (reg_mentioned_p (cc0_rtx
, last1
) && ! sets_cc0_p (last1
))
918 last1
= afterlast1
, last2
= afterlast2
, ninsns
--;
922 /* Include preceding notes and labels in the cross-jump. One,
923 this may bring us to the head of the blocks as requested above.
924 Two, it keeps line number notes as matched as may be. */
927 while (last1
!= bb1
->head
&& !active_insn_p (PREV_INSN (last1
)))
928 last1
= PREV_INSN (last1
);
929 if (last1
!= bb1
->head
&& GET_CODE (PREV_INSN (last1
)) == CODE_LABEL
)
930 last1
= PREV_INSN (last1
);
931 while (last2
!= bb2
->head
&& !active_insn_p (PREV_INSN (last2
)))
932 last2
= PREV_INSN (last2
);
933 if (last2
!= bb2
->head
&& GET_CODE (PREV_INSN (last2
)) == CODE_LABEL
)
934 last2
= PREV_INSN (last2
);
943 /* Return true iff outgoing edges of BB1 and BB2 match, together with
944 the branch instruction. This means that if we commonize the control
945 flow before end of the basic block, the semantic remains unchanged.
947 We may assume that there exists one edge with a common destination. */
950 outgoing_edges_match (mode
, bb1
, bb2
)
955 int nehedges1
= 0, nehedges2
= 0;
956 edge fallthru1
= 0, fallthru2
= 0;
959 /* If BB1 has only one successor, we may be looking at either an
960 unconditional jump, or a fake edge to exit. */
961 if (bb1
->succ
&& !bb1
->succ
->succ_next
)
963 if (! bb2
->succ
|| bb2
->succ
->succ_next
)
965 return insns_match_p (mode
, bb1
->end
, bb2
->end
);
968 /* Match conditional jumps - this may get tricky when fallthru and branch
969 edges are crossed. */
971 && bb1
->succ
->succ_next
972 && !bb1
->succ
->succ_next
->succ_next
973 && any_condjump_p (bb1
->end
))
977 rtx set1
, set2
, cond1
, cond2
;
978 enum rtx_code code1
, code2
;
981 || !bb2
->succ
->succ_next
982 || bb1
->succ
->succ_next
->succ_next
983 || !any_condjump_p (bb2
->end
))
986 b1
= BRANCH_EDGE (bb1
);
987 b2
= BRANCH_EDGE (bb2
);
988 f1
= FALLTHRU_EDGE (bb1
);
989 f2
= FALLTHRU_EDGE (bb2
);
991 /* Get around possible forwarders on fallthru edges. Other cases
992 should be optimized out already. */
993 if (FORWARDER_BLOCK_P (f1
->dest
))
995 if (FORWARDER_BLOCK_P (f2
->dest
))
998 /* To simplify use of this function, return false if there are
999 unneeded forwarder blocks. These will get eliminated later
1000 during cleanup_cfg. */
1001 if (FORWARDER_BLOCK_P (f1
->dest
)
1002 || FORWARDER_BLOCK_P (f2
->dest
)
1003 || FORWARDER_BLOCK_P (b1
->dest
)
1004 || FORWARDER_BLOCK_P (b2
->dest
))
1007 if (f1
->dest
== f2
->dest
&& b1
->dest
== b2
->dest
)
1009 else if (f1
->dest
== b2
->dest
&& b1
->dest
== f2
->dest
)
1014 set1
= pc_set (bb1
->end
);
1015 set2
= pc_set (bb2
->end
);
1016 if ((XEXP (SET_SRC (set1
), 1) == pc_rtx
)
1017 != (XEXP (SET_SRC (set2
), 1) == pc_rtx
))
1020 cond1
= XEXP (SET_SRC (set1
), 0);
1021 cond2
= XEXP (SET_SRC (set2
), 0);
1022 code1
= GET_CODE (cond1
);
1024 code2
= reversed_comparison_code (cond2
, bb2
->end
);
1026 code2
= GET_CODE (cond2
);
1027 if (code2
== UNKNOWN
)
1030 /* Verify codes and operands match. */
1031 match
= ((code1
== code2
1032 && rtx_renumbered_equal_p (XEXP (cond1
, 0), XEXP (cond2
, 0))
1033 && rtx_renumbered_equal_p (XEXP (cond1
, 1), XEXP (cond2
, 1)))
1034 || (code1
== swap_condition (code2
)
1035 && rtx_renumbered_equal_p (XEXP (cond1
, 1),
1037 && rtx_renumbered_equal_p (XEXP (cond1
, 0),
1040 /* If we return true, we will join the blocks. Which means that
1041 we will only have one branch prediction bit to work with. Thus
1042 we require the existing branches to have probabilities that are
1044 /* ??? We should use bb->frequency to allow merging in infrequently
1045 executed blocks, but at the moment it is not available when
1046 cleanup_cfg is run. */
1047 if (match
&& !optimize_size
)
1051 note1
= find_reg_note (bb1
->end
, REG_BR_PROB
, 0);
1052 note2
= find_reg_note (bb2
->end
, REG_BR_PROB
, 0);
1056 prob1
= INTVAL (XEXP (note1
, 0));
1057 prob2
= INTVAL (XEXP (note2
, 0));
1059 prob2
= REG_BR_PROB_BASE
- prob2
;
1061 /* Fail if the difference in probabilities is
1063 if (abs (prob1
- prob2
) > REG_BR_PROB_BASE
/ 20)
1066 else if (note1
|| note2
)
1070 if (rtl_dump_file
&& match
)
1071 fprintf (rtl_dump_file
, "Conditionals in bb %i and %i match.\n",
1072 bb1
->index
, bb2
->index
);
1077 /* Generic case - we are seeing an computed jump, table jump or trapping
1080 /* First ensure that the instructions match. There may be many outgoing
1081 edges so this test is generally cheaper.
1082 ??? Currently the tablejumps will never match, as they do have
1083 different tables. */
1084 if (!insns_match_p (mode
, bb1
->end
, bb2
->end
))
1087 /* Search the outgoing edges, ensure that the counts do match, find possible
1088 fallthru and exception handling edges since these needs more
1090 for (e1
= bb1
->succ
, e2
= bb2
->succ
; e1
&& e2
;
1091 e1
= e1
->succ_next
, e2
= e2
->succ_next
)
1093 if (e1
->flags
& EDGE_EH
)
1095 if (e2
->flags
& EDGE_EH
)
1097 if (e1
->flags
& EDGE_FALLTHRU
)
1099 if (e2
->flags
& EDGE_FALLTHRU
)
1102 /* If number of edges of various types does not match, fail. */
1105 if (nehedges1
!= nehedges2
)
1107 if ((fallthru1
!= 0) != (fallthru2
!= 0))
1110 /* fallthru edges must be forwarded to the same destination. */
1113 basic_block d1
= (forwarder_block_p (fallthru1
->dest
)
1114 ? fallthru1
->dest
->succ
->dest
: fallthru1
->dest
);
1115 basic_block d2
= (forwarder_block_p (fallthru2
->dest
)
1116 ? fallthru2
->dest
->succ
->dest
: fallthru2
->dest
);
1120 /* In case we do have EH edges, ensure we are in the same region. */
1123 rtx n1
= find_reg_note (bb1
->end
, REG_EH_REGION
, 0);
1124 rtx n2
= find_reg_note (bb2
->end
, REG_EH_REGION
, 0);
1125 if (XEXP (n1
, 0) != XEXP (n2
, 0))
1128 /* We don't need to match the rest of edges as above checks should be enought
1129 to ensure that they are equivalent. */
1133 /* E1 and E2 are edges with the same destination block. Search their
1134 predecessors for common code. If found, redirect control flow from
1135 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */
1138 try_crossjump_to_edge (mode
, e1
, e2
)
1143 basic_block src1
= e1
->src
, src2
= e2
->src
;
1144 basic_block redirect_to
;
1145 rtx newpos1
, newpos2
;
1151 /* Search backward through forwarder blocks. We don't need to worry
1152 about multiple entry or chained forwarders, as they will be optimized
1153 away. We do this to look past the unconditional jump following a
1154 conditional jump that is required due to the current CFG shape. */
1156 && !src1
->pred
->pred_next
1157 && FORWARDER_BLOCK_P (src1
))
1163 && !src2
->pred
->pred_next
1164 && FORWARDER_BLOCK_P (src2
))
1170 /* Nothing to do if we reach ENTRY, or a common source block. */
1171 if (src1
== ENTRY_BLOCK_PTR
|| src2
== ENTRY_BLOCK_PTR
)
1176 /* Seeing more than 1 forwarder blocks would confuse us later... */
1177 if (FORWARDER_BLOCK_P (e1
->dest
)
1178 && FORWARDER_BLOCK_P (e1
->dest
->succ
->dest
))
1180 if (FORWARDER_BLOCK_P (e2
->dest
)
1181 && FORWARDER_BLOCK_P (e2
->dest
->succ
->dest
))
1184 /* Likewise with dead code (possibly newly created by the other optimizations
1186 if (!src1
->pred
|| !src2
->pred
)
1189 /* Look for the common insn sequence, part the first ... */
1190 if (!outgoing_edges_match (mode
, src1
, src2
))
1193 /* ... and part the second. */
1194 nmatch
= flow_find_cross_jump (mode
, src1
, src2
, &newpos1
, &newpos2
);
1198 /* Avoid splitting if possible. */
1199 if (newpos2
== src2
->head
)
1204 fprintf (rtl_dump_file
, "Splitting bb %i before %i insns\n",
1205 src2
->index
, nmatch
);
1206 redirect_to
= split_block (src2
, PREV_INSN (newpos2
))->dest
;
1210 fprintf (rtl_dump_file
,
1211 "Cross jumping from bb %i to bb %i; %i common insns\n",
1212 src1
->index
, src2
->index
, nmatch
);
1214 redirect_to
->count
+= src1
->count
;
1215 redirect_to
->frequency
+= src1
->frequency
;
1217 /* Recompute the frequencies and counts of outgoing edges. */
1218 for (s
= redirect_to
->succ
; s
; s
= s
->succ_next
)
1221 basic_block d
= s
->dest
;
1223 if (FORWARDER_BLOCK_P (d
))
1225 for (s2
= src1
->succ
; ; s2
= s2
->succ_next
)
1227 basic_block d2
= s2
->dest
;
1228 if (FORWARDER_BLOCK_P (d2
))
1229 d2
= d2
->succ
->dest
;
1233 s
->count
+= s2
->count
;
1235 /* Take care to update possible forwarder blocks. We verified
1236 that there is no more than one in the chain, so we can't run
1237 into infinite loop. */
1238 if (FORWARDER_BLOCK_P (s
->dest
))
1240 s
->dest
->succ
->count
+= s2
->count
;
1241 s
->dest
->count
+= s2
->count
;
1242 s
->dest
->frequency
+= EDGE_FREQUENCY (s
);
1244 if (FORWARDER_BLOCK_P (s2
->dest
))
1246 s2
->dest
->succ
->count
-= s2
->count
;
1247 s2
->dest
->count
-= s2
->count
;
1248 s2
->dest
->frequency
-= EDGE_FREQUENCY (s
);
1250 if (!redirect_to
->frequency
&& !src1
->frequency
)
1251 s
->probability
= (s
->probability
+ s2
->probability
) / 2;
1254 ((s
->probability
* redirect_to
->frequency
+
1255 s2
->probability
* src1
->frequency
)
1256 / (redirect_to
->frequency
+ src1
->frequency
));
1259 note
= find_reg_note (redirect_to
->end
, REG_BR_PROB
, 0);
1261 XEXP (note
, 0) = GEN_INT (BRANCH_EDGE (redirect_to
)->probability
);
1263 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
1265 /* Skip possible basic block header. */
1266 if (GET_CODE (newpos1
) == CODE_LABEL
)
1267 newpos1
= NEXT_INSN (newpos1
);
1268 if (GET_CODE (newpos1
) == NOTE
)
1269 newpos1
= NEXT_INSN (newpos1
);
1272 /* Emit the jump insn. */
1273 label
= block_label (redirect_to
);
1274 emit_jump_insn_after (gen_jump (label
), src1
->end
);
1275 JUMP_LABEL (src1
->end
) = label
;
1276 LABEL_NUSES (label
)++;
1278 /* Delete the now unreachable instructions. */
1279 delete_insn_chain (newpos1
, last
);
1281 /* Make sure there is a barrier after the new jump. */
1282 last
= next_nonnote_insn (src1
->end
);
1283 if (!last
|| GET_CODE (last
) != BARRIER
)
1284 emit_barrier_after (src1
->end
);
1288 remove_edge (src1
->succ
);
1289 make_single_succ_edge (src1
, redirect_to
, 0);
1291 BB_SET_FLAG (src1
, BB_UPDATE_LIFE
);
1292 update_forwarder_flag (src1
);
1297 /* Search the predecessors of BB for common insn sequences. When found,
1298 share code between them by redirecting control flow. Return true if
1299 any changes made. */
1302 try_crossjump_bb (mode
, bb
)
1306 edge e
, e2
, nexte2
, nexte
, fallthru
;
1309 /* Nothing to do if there is not at least two incoming edges. */
1310 if (!bb
->pred
|| !bb
->pred
->pred_next
)
1313 /* It is always cheapest to redirect a block that ends in a branch to
1314 a block that falls through into BB, as that adds no branches to the
1315 program. We'll try that combination first. */
1316 for (fallthru
= bb
->pred
; fallthru
; fallthru
= fallthru
->pred_next
)
1317 if (fallthru
->flags
& EDGE_FALLTHRU
)
1321 for (e
= bb
->pred
; e
; e
= nexte
)
1323 nexte
= e
->pred_next
;
1325 /* As noted above, first try with the fallthru predecessor. */
1328 /* Don't combine the fallthru edge into anything else.
1329 If there is a match, we'll do it the other way around. */
1333 if (try_crossjump_to_edge (mode
, e
, fallthru
))
1341 /* Non-obvious work limiting check: Recognize that we're going
1342 to call try_crossjump_bb on every basic block. So if we have
1343 two blocks with lots of outgoing edges (a switch) and they
1344 share lots of common destinations, then we would do the
1345 cross-jump check once for each common destination.
1347 Now, if the blocks actually are cross-jump candidates, then
1348 all of their destinations will be shared. Which means that
1349 we only need check them for cross-jump candidacy once. We
1350 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1351 choosing to do the check from the block for which the edge
1352 in question is the first successor of A. */
1353 if (e
->src
->succ
!= e
)
1356 for (e2
= bb
->pred
; e2
; e2
= nexte2
)
1358 nexte2
= e2
->pred_next
;
1363 /* We've already checked the fallthru edge above. */
1367 /* The "first successor" check above only prevents multiple
1368 checks of crossjump(A,B). In order to prevent redundant
1369 checks of crossjump(B,A), require that A be the block
1370 with the lowest index. */
1371 if (e
->src
->index
> e2
->src
->index
)
1374 if (try_crossjump_to_edge (mode
, e
, e2
))
1386 /* Do simple CFG optimizations - basic block merging, simplifying of jump
1387 instructions etc. Return nonzero if changes were made. */
1390 try_optimize_cfg (mode
)
1394 bool changed_overall
= false;
1399 if (mode
& CLEANUP_CROSSJUMP
)
1400 add_noreturn_fake_exit_edges ();
1402 for (i
= 0; i
< n_basic_blocks
; i
++)
1403 update_forwarder_flag (BASIC_BLOCK (i
));
1405 /* Attempt to merge blocks as made possible by edge removal. If a block
1406 has only one successor, and the successor has only one predecessor,
1407 they may be combined. */
1415 fprintf (rtl_dump_file
, "\n\ntry_optimize_cfg iteration %i\n\n",
1418 for (i
= 0; i
< n_basic_blocks
;)
1420 basic_block c
, b
= BASIC_BLOCK (i
);
1422 bool changed_here
= false;
1424 /* Delete trivially dead basic blocks. */
1425 while (b
->pred
== NULL
)
1427 c
= BASIC_BLOCK (b
->index
- 1);
1429 fprintf (rtl_dump_file
, "Deleting block %i.\n", b
->index
);
1430 flow_delete_block (b
);
1435 /* Remove code labels no longer used. Don't do this before
1436 CALL_PLACEHOLDER is removed, as some branches may be hidden
1438 if (b
->pred
->pred_next
== NULL
1439 && (b
->pred
->flags
& EDGE_FALLTHRU
)
1440 && !(b
->pred
->flags
& EDGE_COMPLEX
)
1441 && GET_CODE (b
->head
) == CODE_LABEL
1442 && (!(mode
& CLEANUP_PRE_SIBCALL
)
1443 || !tail_recursion_label_p (b
->head
))
1444 /* If the previous block ends with a branch to this block,
1445 we can't delete the label. Normally this is a condjump
1446 that is yet to be simplified, but if CASE_DROPS_THRU,
1447 this can be a tablejump with some element going to the
1448 same place as the default (fallthru). */
1449 && (b
->pred
->src
== ENTRY_BLOCK_PTR
1450 || GET_CODE (b
->pred
->src
->end
) != JUMP_INSN
1451 || ! label_is_jump_target_p (b
->head
, b
->pred
->src
->end
)))
1453 rtx label
= b
->head
;
1454 b
->head
= NEXT_INSN (b
->head
);
1455 delete_insn_chain (label
, label
);
1457 fprintf (rtl_dump_file
, "Deleted label in block %i.\n",
1461 /* If we fall through an empty block, we can remove it. */
1462 if (b
->pred
->pred_next
== NULL
1463 && (b
->pred
->flags
& EDGE_FALLTHRU
)
1464 && GET_CODE (b
->head
) != CODE_LABEL
1465 && FORWARDER_BLOCK_P (b
)
1466 /* Note that forwarder_block_p true ensures that there
1467 is a successor for this block. */
1468 && (b
->succ
->flags
& EDGE_FALLTHRU
)
1469 && n_basic_blocks
> 1)
1472 fprintf (rtl_dump_file
, "Deleting fallthru block %i.\n",
1474 c
= BASIC_BLOCK (b
->index
? b
->index
- 1 : 1);
1475 redirect_edge_succ_nodup (b
->pred
, b
->succ
->dest
);
1476 flow_delete_block (b
);
1481 /* Merge blocks. Loop because chains of blocks might be
1483 while ((s
= b
->succ
) != NULL
1484 && s
->succ_next
== NULL
1485 && !(s
->flags
& EDGE_COMPLEX
)
1486 && (c
= s
->dest
) != EXIT_BLOCK_PTR
1487 && c
->pred
->pred_next
== NULL
1488 /* If the jump insn has side effects,
1489 we can't kill the edge. */
1490 && (GET_CODE (b
->end
) != JUMP_INSN
1491 || onlyjump_p (b
->end
))
1492 && merge_blocks (s
, b
, c
, mode
))
1493 changed_here
= true;
1495 /* Simplify branch over branch. */
1496 if ((mode
& CLEANUP_EXPENSIVE
) && try_simplify_condjump (b
))
1498 BB_SET_FLAG (b
, BB_UPDATE_LIFE
);
1499 changed_here
= true;
1502 /* If B has a single outgoing edge, but uses a non-trivial jump
1503 instruction without side-effects, we can either delete the
1504 jump entirely, or replace it with a simple unconditional jump.
1505 Use redirect_edge_and_branch to do the dirty work. */
1507 && ! b
->succ
->succ_next
1508 && b
->succ
->dest
!= EXIT_BLOCK_PTR
1509 && onlyjump_p (b
->end
)
1510 && redirect_edge_and_branch (b
->succ
, b
->succ
->dest
))
1512 BB_SET_FLAG (b
, BB_UPDATE_LIFE
);
1513 update_forwarder_flag (b
);
1514 changed_here
= true;
1517 /* Simplify branch to branch. */
1518 if (try_forward_edges (mode
, b
))
1519 changed_here
= true;
1521 /* Look for shared code between blocks. */
1522 if ((mode
& CLEANUP_CROSSJUMP
)
1523 && try_crossjump_bb (mode
, b
))
1524 changed_here
= true;
1526 /* Don't get confused by the index shift caused by deleting
1534 if ((mode
& CLEANUP_CROSSJUMP
)
1535 && try_crossjump_bb (mode
, EXIT_BLOCK_PTR
))
1538 #ifdef ENABLE_CHECKING
1540 verify_flow_info ();
1543 changed_overall
|= changed
;
1547 if (mode
& CLEANUP_CROSSJUMP
)
1548 remove_fake_edges ();
1550 if ((mode
& CLEANUP_UPDATE_LIFE
) && changed_overall
)
1553 blocks
= sbitmap_alloc (n_basic_blocks
);
1554 sbitmap_zero (blocks
);
1555 for (i
= 0; i
< n_basic_blocks
; i
++)
1556 if (BB_FLAGS (BASIC_BLOCK (i
)) & BB_UPDATE_LIFE
)
1559 SET_BIT (blocks
, i
);
1562 update_life_info (blocks
, UPDATE_LIFE_GLOBAL
,
1563 PROP_DEATH_NOTES
| PROP_SCAN_DEAD_CODE
1564 | PROP_KILL_DEAD_CODE
);
1565 sbitmap_free (blocks
);
1567 for (i
= 0; i
< n_basic_blocks
; i
++)
1568 BASIC_BLOCK (i
)->aux
= NULL
;
1570 return changed_overall
;
1573 /* Delete all unreachable basic blocks. */
1576 delete_unreachable_blocks ()
1579 bool changed
= false;
1581 find_unreachable_blocks ();
1583 /* Delete all unreachable basic blocks. Count down so that we
1584 don't interfere with the block renumbering that happens in
1585 flow_delete_block. */
1587 for (i
= n_basic_blocks
- 1; i
>= 0; --i
)
1589 basic_block b
= BASIC_BLOCK (i
);
1591 if (!(b
->flags
& BB_REACHABLE
))
1592 flow_delete_block (b
), changed
= true;
1596 tidy_fallthru_edges ();
1600 /* Tidy the CFG by deleting unreachable code and whatnot. */
1606 bool changed
= false;
1608 timevar_push (TV_CLEANUP_CFG
);
1609 changed
= delete_unreachable_blocks ();
1610 if (try_optimize_cfg (mode
))
1611 delete_unreachable_blocks (), changed
= true;
1613 /* Kill the data we won't maintain. */
1614 free_EXPR_LIST_list (&label_value_list
);
1615 free_EXPR_LIST_list (&tail_recursion_label_list
);
1616 timevar_pop (TV_CLEANUP_CFG
);