]> gcc.gnu.org Git - gcc.git/blob - gcc/tree-cfgcleanup.c
invoke.texi ([Wnarrowing]): Update for non-constants in C++11.
[gcc.git] / gcc / tree-cfgcleanup.c
1 /* CFG cleanup for trees.
2 Copyright (C) 2001-2014 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "tree.h"
25 #include "tm_p.h"
26 #include "basic-block.h"
27 #include "diagnostic-core.h"
28 #include "flags.h"
29 #include "function.h"
30 #include "langhooks.h"
31 #include "tree-ssa-alias.h"
32 #include "internal-fn.h"
33 #include "tree-eh.h"
34 #include "gimple-expr.h"
35 #include "is-a.h"
36 #include "gimple.h"
37 #include "gimplify.h"
38 #include "gimple-iterator.h"
39 #include "gimple-ssa.h"
40 #include "tree-cfg.h"
41 #include "tree-phinodes.h"
42 #include "ssa-iterators.h"
43 #include "stringpool.h"
44 #include "tree-ssanames.h"
45 #include "tree-ssa-loop-manip.h"
46 #include "expr.h"
47 #include "tree-dfa.h"
48 #include "tree-ssa.h"
49 #include "tree-pass.h"
50 #include "except.h"
51 #include "cfgloop.h"
52 #include "hashtab.h"
53 #include "tree-ssa-propagate.h"
54 #include "tree-scalar-evolution.h"
55
56 /* The set of blocks in that at least one of the following changes happened:
57 -- the statement at the end of the block was changed
58 -- the block was newly created
59 -- the set of the predecessors of the block changed
60 -- the set of the successors of the block changed
61 ??? Maybe we could track these changes separately, since they determine
62 what cleanups it makes sense to try on the block. */
63 bitmap cfgcleanup_altered_bbs;
64
65 /* Remove any fallthru edge from EV. Return true if an edge was removed. */
66
67 static bool
68 remove_fallthru_edge (vec<edge, va_gc> *ev)
69 {
70 edge_iterator ei;
71 edge e;
72
73 FOR_EACH_EDGE (e, ei, ev)
74 if ((e->flags & EDGE_FALLTHRU) != 0)
75 {
76 if (e->flags & EDGE_COMPLEX)
77 e->flags &= ~EDGE_FALLTHRU;
78 else
79 remove_edge_and_dominated_blocks (e);
80 return true;
81 }
82 return false;
83 }
84
85
86 /* Disconnect an unreachable block in the control expression starting
87 at block BB. */
88
89 static bool
90 cleanup_control_expr_graph (basic_block bb, gimple_stmt_iterator gsi)
91 {
92 edge taken_edge;
93 bool retval = false;
94 gimple stmt = gsi_stmt (gsi);
95 tree val;
96
97 if (!single_succ_p (bb))
98 {
99 edge e;
100 edge_iterator ei;
101 bool warned;
102 location_t loc;
103
104 fold_defer_overflow_warnings ();
105 loc = gimple_location (stmt);
106 switch (gimple_code (stmt))
107 {
108 case GIMPLE_COND:
109 val = fold_binary_loc (loc, gimple_cond_code (stmt),
110 boolean_type_node,
111 gimple_cond_lhs (stmt),
112 gimple_cond_rhs (stmt));
113 break;
114
115 case GIMPLE_SWITCH:
116 val = gimple_switch_index (stmt);
117 break;
118
119 default:
120 val = NULL_TREE;
121 }
122 taken_edge = find_taken_edge (bb, val);
123 if (!taken_edge)
124 {
125 fold_undefer_and_ignore_overflow_warnings ();
126 return false;
127 }
128
129 /* Remove all the edges except the one that is always executed. */
130 warned = false;
131 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
132 {
133 if (e != taken_edge)
134 {
135 if (!warned)
136 {
137 fold_undefer_overflow_warnings
138 (true, stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
139 warned = true;
140 }
141
142 taken_edge->probability += e->probability;
143 taken_edge->count += e->count;
144 remove_edge_and_dominated_blocks (e);
145 retval = true;
146 }
147 else
148 ei_next (&ei);
149 }
150 if (!warned)
151 fold_undefer_and_ignore_overflow_warnings ();
152 if (taken_edge->probability > REG_BR_PROB_BASE)
153 taken_edge->probability = REG_BR_PROB_BASE;
154 }
155 else
156 taken_edge = single_succ_edge (bb);
157
158 bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
159 gsi_remove (&gsi, true);
160 taken_edge->flags = EDGE_FALLTHRU;
161
162 return retval;
163 }
164
165 /* Try to remove superfluous control structures in basic block BB. Returns
166 true if anything changes. */
167
168 static bool
169 cleanup_control_flow_bb (basic_block bb)
170 {
171 gimple_stmt_iterator gsi;
172 bool retval = false;
173 gimple stmt;
174
175 /* If the last statement of the block could throw and now cannot,
176 we need to prune cfg. */
177 retval |= gimple_purge_dead_eh_edges (bb);
178
179 gsi = gsi_last_bb (bb);
180 if (gsi_end_p (gsi))
181 return retval;
182
183 stmt = gsi_stmt (gsi);
184
185 if (gimple_code (stmt) == GIMPLE_COND
186 || gimple_code (stmt) == GIMPLE_SWITCH)
187 retval |= cleanup_control_expr_graph (bb, gsi);
188 else if (gimple_code (stmt) == GIMPLE_GOTO
189 && TREE_CODE (gimple_goto_dest (stmt)) == ADDR_EXPR
190 && (TREE_CODE (TREE_OPERAND (gimple_goto_dest (stmt), 0))
191 == LABEL_DECL))
192 {
193 /* If we had a computed goto which has a compile-time determinable
194 destination, then we can eliminate the goto. */
195 edge e;
196 tree label;
197 edge_iterator ei;
198 basic_block target_block;
199
200 /* First look at all the outgoing edges. Delete any outgoing
201 edges which do not go to the right block. For the one
202 edge which goes to the right block, fix up its flags. */
203 label = TREE_OPERAND (gimple_goto_dest (stmt), 0);
204 target_block = label_to_block (label);
205 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
206 {
207 if (e->dest != target_block)
208 remove_edge_and_dominated_blocks (e);
209 else
210 {
211 /* Turn off the EDGE_ABNORMAL flag. */
212 e->flags &= ~EDGE_ABNORMAL;
213
214 /* And set EDGE_FALLTHRU. */
215 e->flags |= EDGE_FALLTHRU;
216 ei_next (&ei);
217 }
218 }
219
220 bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
221 bitmap_set_bit (cfgcleanup_altered_bbs, target_block->index);
222
223 /* Remove the GOTO_EXPR as it is not needed. The CFG has all the
224 relevant information we need. */
225 gsi_remove (&gsi, true);
226 retval = true;
227 }
228
229 /* Check for indirect calls that have been turned into
230 noreturn calls. */
231 else if (is_gimple_call (stmt)
232 && gimple_call_noreturn_p (stmt)
233 && remove_fallthru_edge (bb->succs))
234 retval = true;
235
236 return retval;
237 }
238
239 /* Return true if basic block BB does nothing except pass control
240 flow to another block and that we can safely insert a label at
241 the start of the successor block.
242
243 As a precondition, we require that BB be not equal to
244 the entry block. */
245
246 static bool
247 tree_forwarder_block_p (basic_block bb, bool phi_wanted)
248 {
249 gimple_stmt_iterator gsi;
250 location_t locus;
251
252 /* BB must have a single outgoing edge. */
253 if (single_succ_p (bb) != 1
254 /* If PHI_WANTED is false, BB must not have any PHI nodes.
255 Otherwise, BB must have PHI nodes. */
256 || gimple_seq_empty_p (phi_nodes (bb)) == phi_wanted
257 /* BB may not be a predecessor of the exit block. */
258 || single_succ (bb) == EXIT_BLOCK_PTR_FOR_FN (cfun)
259 /* Nor should this be an infinite loop. */
260 || single_succ (bb) == bb
261 /* BB may not have an abnormal outgoing edge. */
262 || (single_succ_edge (bb)->flags & EDGE_ABNORMAL))
263 return false;
264
265 gcc_checking_assert (bb != ENTRY_BLOCK_PTR_FOR_FN (cfun));
266
267 locus = single_succ_edge (bb)->goto_locus;
268
269 /* There should not be an edge coming from entry, or an EH edge. */
270 {
271 edge_iterator ei;
272 edge e;
273
274 FOR_EACH_EDGE (e, ei, bb->preds)
275 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun) || (e->flags & EDGE_EH))
276 return false;
277 /* If goto_locus of any of the edges differs, prevent removing
278 the forwarder block for -O0. */
279 else if (optimize == 0 && e->goto_locus != locus)
280 return false;
281 }
282
283 /* Now walk through the statements backward. We can ignore labels,
284 anything else means this is not a forwarder block. */
285 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
286 {
287 gimple stmt = gsi_stmt (gsi);
288
289 switch (gimple_code (stmt))
290 {
291 case GIMPLE_LABEL:
292 if (DECL_NONLOCAL (gimple_label_label (stmt)))
293 return false;
294 if (optimize == 0 && gimple_location (stmt) != locus)
295 return false;
296 break;
297
298 /* ??? For now, hope there's a corresponding debug
299 assignment at the destination. */
300 case GIMPLE_DEBUG:
301 break;
302
303 default:
304 return false;
305 }
306 }
307
308 if (current_loops)
309 {
310 basic_block dest;
311 /* Protect loop headers. */
312 if (bb->loop_father->header == bb)
313 return false;
314
315 dest = EDGE_SUCC (bb, 0)->dest;
316 /* Protect loop preheaders and latches if requested. */
317 if (dest->loop_father->header == dest)
318 {
319 if (bb->loop_father == dest->loop_father)
320 {
321 if (loops_state_satisfies_p (LOOPS_HAVE_SIMPLE_LATCHES))
322 return false;
323 /* If bb doesn't have a single predecessor we'd make this
324 loop have multiple latches. Don't do that if that
325 would in turn require disambiguating them. */
326 return (single_pred_p (bb)
327 || loops_state_satisfies_p
328 (LOOPS_MAY_HAVE_MULTIPLE_LATCHES));
329 }
330 else if (bb->loop_father == loop_outer (dest->loop_father))
331 return !loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS);
332 /* Always preserve other edges into loop headers that are
333 not simple latches or preheaders. */
334 return false;
335 }
336 }
337
338 return true;
339 }
340
341 /* If all the PHI nodes in DEST have alternatives for E1 and E2 and
342 those alternatives are equal in each of the PHI nodes, then return
343 true, else return false. */
344
345 static bool
346 phi_alternatives_equal (basic_block dest, edge e1, edge e2)
347 {
348 int n1 = e1->dest_idx;
349 int n2 = e2->dest_idx;
350 gimple_stmt_iterator gsi;
351
352 for (gsi = gsi_start_phis (dest); !gsi_end_p (gsi); gsi_next (&gsi))
353 {
354 gimple phi = gsi_stmt (gsi);
355 tree val1 = gimple_phi_arg_def (phi, n1);
356 tree val2 = gimple_phi_arg_def (phi, n2);
357
358 gcc_assert (val1 != NULL_TREE);
359 gcc_assert (val2 != NULL_TREE);
360
361 if (!operand_equal_for_phi_arg_p (val1, val2))
362 return false;
363 }
364
365 return true;
366 }
367
368 /* Removes forwarder block BB. Returns false if this failed. */
369
370 static bool
371 remove_forwarder_block (basic_block bb)
372 {
373 edge succ = single_succ_edge (bb), e, s;
374 basic_block dest = succ->dest;
375 gimple label;
376 edge_iterator ei;
377 gimple_stmt_iterator gsi, gsi_to;
378 bool can_move_debug_stmts;
379
380 /* We check for infinite loops already in tree_forwarder_block_p.
381 However it may happen that the infinite loop is created
382 afterwards due to removal of forwarders. */
383 if (dest == bb)
384 return false;
385
386 /* If the destination block consists of a nonlocal label or is a
387 EH landing pad, do not merge it. */
388 label = first_stmt (dest);
389 if (label
390 && gimple_code (label) == GIMPLE_LABEL
391 && (DECL_NONLOCAL (gimple_label_label (label))
392 || EH_LANDING_PAD_NR (gimple_label_label (label)) != 0))
393 return false;
394
395 /* If there is an abnormal edge to basic block BB, but not into
396 dest, problems might occur during removal of the phi node at out
397 of ssa due to overlapping live ranges of registers.
398
399 If there is an abnormal edge in DEST, the problems would occur
400 anyway since cleanup_dead_labels would then merge the labels for
401 two different eh regions, and rest of exception handling code
402 does not like it.
403
404 So if there is an abnormal edge to BB, proceed only if there is
405 no abnormal edge to DEST and there are no phi nodes in DEST. */
406 if (bb_has_abnormal_pred (bb)
407 && (bb_has_abnormal_pred (dest)
408 || !gimple_seq_empty_p (phi_nodes (dest))))
409 return false;
410
411 /* If there are phi nodes in DEST, and some of the blocks that are
412 predecessors of BB are also predecessors of DEST, check that the
413 phi node arguments match. */
414 if (!gimple_seq_empty_p (phi_nodes (dest)))
415 {
416 FOR_EACH_EDGE (e, ei, bb->preds)
417 {
418 s = find_edge (e->src, dest);
419 if (!s)
420 continue;
421
422 if (!phi_alternatives_equal (dest, succ, s))
423 return false;
424 }
425 }
426
427 can_move_debug_stmts = MAY_HAVE_DEBUG_STMTS && single_pred_p (dest);
428
429 basic_block pred = NULL;
430 if (single_pred_p (bb))
431 pred = single_pred (bb);
432
433 /* Redirect the edges. */
434 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
435 {
436 bitmap_set_bit (cfgcleanup_altered_bbs, e->src->index);
437
438 if (e->flags & EDGE_ABNORMAL)
439 {
440 /* If there is an abnormal edge, redirect it anyway, and
441 move the labels to the new block to make it legal. */
442 s = redirect_edge_succ_nodup (e, dest);
443 }
444 else
445 s = redirect_edge_and_branch (e, dest);
446
447 if (s == e)
448 {
449 /* Create arguments for the phi nodes, since the edge was not
450 here before. */
451 for (gsi = gsi_start_phis (dest);
452 !gsi_end_p (gsi);
453 gsi_next (&gsi))
454 {
455 gimple phi = gsi_stmt (gsi);
456 source_location l = gimple_phi_arg_location_from_edge (phi, succ);
457 tree def = gimple_phi_arg_def (phi, succ->dest_idx);
458 add_phi_arg (phi, unshare_expr (def), s, l);
459 }
460 }
461 }
462
463 /* Move nonlocal labels and computed goto targets as well as user
464 defined labels and labels with an EH landing pad number to the
465 new block, so that the redirection of the abnormal edges works,
466 jump targets end up in a sane place and debug information for
467 labels is retained. */
468 gsi_to = gsi_start_bb (dest);
469 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
470 {
471 tree decl;
472 label = gsi_stmt (gsi);
473 if (is_gimple_debug (label))
474 break;
475 decl = gimple_label_label (label);
476 if (EH_LANDING_PAD_NR (decl) != 0
477 || DECL_NONLOCAL (decl)
478 || FORCED_LABEL (decl)
479 || !DECL_ARTIFICIAL (decl))
480 {
481 gsi_remove (&gsi, false);
482 gsi_insert_before (&gsi_to, label, GSI_SAME_STMT);
483 }
484 else
485 gsi_next (&gsi);
486 }
487
488 /* Move debug statements if the destination has a single predecessor. */
489 if (can_move_debug_stmts)
490 {
491 gsi_to = gsi_after_labels (dest);
492 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); )
493 {
494 gimple debug = gsi_stmt (gsi);
495 if (!is_gimple_debug (debug))
496 break;
497 gsi_remove (&gsi, false);
498 gsi_insert_before (&gsi_to, debug, GSI_SAME_STMT);
499 }
500 }
501
502 bitmap_set_bit (cfgcleanup_altered_bbs, dest->index);
503
504 /* Update the dominators. */
505 if (dom_info_available_p (CDI_DOMINATORS))
506 {
507 basic_block dom, dombb, domdest;
508
509 dombb = get_immediate_dominator (CDI_DOMINATORS, bb);
510 domdest = get_immediate_dominator (CDI_DOMINATORS, dest);
511 if (domdest == bb)
512 {
513 /* Shortcut to avoid calling (relatively expensive)
514 nearest_common_dominator unless necessary. */
515 dom = dombb;
516 }
517 else
518 dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb);
519
520 set_immediate_dominator (CDI_DOMINATORS, dest, dom);
521 }
522
523 /* Adjust latch infomation of BB's parent loop as otherwise
524 the cfg hook has a hard time not to kill the loop. */
525 if (current_loops && bb->loop_father->latch == bb)
526 bb->loop_father->latch = pred;
527
528 /* And kill the forwarder block. */
529 delete_basic_block (bb);
530
531 return true;
532 }
533
534 /* STMT is a call that has been discovered noreturn. Fixup the CFG
535 and remove LHS. Return true if something changed. */
536
537 bool
538 fixup_noreturn_call (gimple stmt)
539 {
540 basic_block bb = gimple_bb (stmt);
541 bool changed = false;
542
543 if (gimple_call_builtin_p (stmt, BUILT_IN_RETURN))
544 return false;
545
546 /* First split basic block if stmt is not last. */
547 if (stmt != gsi_stmt (gsi_last_bb (bb)))
548 split_block (bb, stmt);
549
550 changed |= remove_fallthru_edge (bb->succs);
551
552 /* If there is LHS, remove it. */
553 if (gimple_call_lhs (stmt))
554 {
555 tree op = gimple_call_lhs (stmt);
556 gimple_call_set_lhs (stmt, NULL_TREE);
557
558 /* We need to remove SSA name to avoid checking errors.
559 All uses are dominated by the noreturn and thus will
560 be removed afterwards.
561 We proactively remove affected non-PHI statements to avoid
562 fixup_cfg from trying to update them and crashing. */
563 if (TREE_CODE (op) == SSA_NAME)
564 {
565 use_operand_p use_p;
566 imm_use_iterator iter;
567 gimple use_stmt;
568 bitmap_iterator bi;
569 unsigned int bb_index;
570
571 bitmap blocks = BITMAP_ALLOC (NULL);
572
573 FOR_EACH_IMM_USE_STMT (use_stmt, iter, op)
574 {
575 if (gimple_code (use_stmt) != GIMPLE_PHI)
576 bitmap_set_bit (blocks, gimple_bb (use_stmt)->index);
577 else
578 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
579 SET_USE (use_p, error_mark_node);
580 }
581 EXECUTE_IF_SET_IN_BITMAP (blocks, 0, bb_index, bi)
582 delete_basic_block (BASIC_BLOCK_FOR_FN (cfun, bb_index));
583 BITMAP_FREE (blocks);
584 release_ssa_name (op);
585 }
586 update_stmt (stmt);
587 changed = true;
588 }
589 return changed;
590 }
591
592
593 /* Split basic blocks on calls in the middle of a basic block that are now
594 known not to return, and remove the unreachable code. */
595
596 static bool
597 split_bbs_on_noreturn_calls (void)
598 {
599 bool changed = false;
600 gimple stmt;
601 basic_block bb;
602
603 /* Detect cases where a mid-block call is now known not to return. */
604 if (cfun->gimple_df)
605 while (vec_safe_length (MODIFIED_NORETURN_CALLS (cfun)))
606 {
607 stmt = MODIFIED_NORETURN_CALLS (cfun)->pop ();
608 bb = gimple_bb (stmt);
609 /* BB might be deleted at this point, so verify first
610 BB is present in the cfg. */
611 if (bb == NULL
612 || bb->index < NUM_FIXED_BLOCKS
613 || bb->index >= last_basic_block_for_fn (cfun)
614 || BASIC_BLOCK_FOR_FN (cfun, bb->index) != bb
615 || !gimple_call_noreturn_p (stmt))
616 continue;
617
618 changed |= fixup_noreturn_call (stmt);
619 }
620
621 return changed;
622 }
623
624 /* Tries to cleanup cfg in basic block BB. Returns true if anything
625 changes. */
626
627 static bool
628 cleanup_tree_cfg_bb (basic_block bb)
629 {
630 bool retval = cleanup_control_flow_bb (bb);
631
632 if (tree_forwarder_block_p (bb, false)
633 && remove_forwarder_block (bb))
634 return true;
635
636 /* Merging the blocks may create new opportunities for folding
637 conditional branches (due to the elimination of single-valued PHI
638 nodes). */
639 if (single_succ_p (bb)
640 && can_merge_blocks_p (bb, single_succ (bb)))
641 {
642 merge_blocks (bb, single_succ (bb));
643 return true;
644 }
645
646 return retval;
647 }
648
649 /* Iterate the cfg cleanups, while anything changes. */
650
651 static bool
652 cleanup_tree_cfg_1 (void)
653 {
654 bool retval = false;
655 basic_block bb;
656 unsigned i, n;
657
658 retval |= split_bbs_on_noreturn_calls ();
659
660 /* Prepare the worklists of altered blocks. */
661 cfgcleanup_altered_bbs = BITMAP_ALLOC (NULL);
662
663 /* During forwarder block cleanup, we may redirect edges out of
664 SWITCH_EXPRs, which can get expensive. So we want to enable
665 recording of edge to CASE_LABEL_EXPR. */
666 start_recording_case_labels ();
667
668 /* Start by iterating over all basic blocks. We cannot use FOR_EACH_BB_FN,
669 since the basic blocks may get removed. */
670 n = last_basic_block_for_fn (cfun);
671 for (i = NUM_FIXED_BLOCKS; i < n; i++)
672 {
673 bb = BASIC_BLOCK_FOR_FN (cfun, i);
674 if (bb)
675 retval |= cleanup_tree_cfg_bb (bb);
676 }
677
678 /* Now process the altered blocks, as long as any are available. */
679 while (!bitmap_empty_p (cfgcleanup_altered_bbs))
680 {
681 i = bitmap_first_set_bit (cfgcleanup_altered_bbs);
682 bitmap_clear_bit (cfgcleanup_altered_bbs, i);
683 if (i < NUM_FIXED_BLOCKS)
684 continue;
685
686 bb = BASIC_BLOCK_FOR_FN (cfun, i);
687 if (!bb)
688 continue;
689
690 retval |= cleanup_tree_cfg_bb (bb);
691
692 /* Rerun split_bbs_on_noreturn_calls, in case we have altered any noreturn
693 calls. */
694 retval |= split_bbs_on_noreturn_calls ();
695 }
696
697 end_recording_case_labels ();
698 BITMAP_FREE (cfgcleanup_altered_bbs);
699 return retval;
700 }
701
702
703 /* Remove unreachable blocks and other miscellaneous clean up work.
704 Return true if the flowgraph was modified, false otherwise. */
705
706 static bool
707 cleanup_tree_cfg_noloop (void)
708 {
709 bool changed;
710
711 timevar_push (TV_TREE_CLEANUP_CFG);
712
713 /* Iterate until there are no more cleanups left to do. If any
714 iteration changed the flowgraph, set CHANGED to true.
715
716 If dominance information is available, there cannot be any unreachable
717 blocks. */
718 if (!dom_info_available_p (CDI_DOMINATORS))
719 {
720 changed = delete_unreachable_blocks ();
721 calculate_dominance_info (CDI_DOMINATORS);
722 }
723 else
724 {
725 #ifdef ENABLE_CHECKING
726 verify_dominators (CDI_DOMINATORS);
727 #endif
728 changed = false;
729 }
730
731 changed |= cleanup_tree_cfg_1 ();
732
733 gcc_assert (dom_info_available_p (CDI_DOMINATORS));
734 compact_blocks ();
735
736 #ifdef ENABLE_CHECKING
737 verify_flow_info ();
738 #endif
739
740 timevar_pop (TV_TREE_CLEANUP_CFG);
741
742 if (changed && current_loops)
743 loops_state_set (LOOPS_NEED_FIXUP);
744
745 return changed;
746 }
747
748 /* Repairs loop structures. */
749
750 static void
751 repair_loop_structures (void)
752 {
753 bitmap changed_bbs;
754 unsigned n_new_loops;
755
756 calculate_dominance_info (CDI_DOMINATORS);
757
758 timevar_push (TV_REPAIR_LOOPS);
759 changed_bbs = BITMAP_ALLOC (NULL);
760 n_new_loops = fix_loop_structure (changed_bbs);
761
762 /* This usually does nothing. But sometimes parts of cfg that originally
763 were inside a loop get out of it due to edge removal (since they
764 become unreachable by back edges from latch). Also a former
765 irreducible loop can become reducible - in this case force a full
766 rewrite into loop-closed SSA form. */
767 if (loops_state_satisfies_p (LOOP_CLOSED_SSA))
768 rewrite_into_loop_closed_ssa (n_new_loops ? NULL : changed_bbs,
769 TODO_update_ssa);
770
771 BITMAP_FREE (changed_bbs);
772
773 #ifdef ENABLE_CHECKING
774 verify_loop_structure ();
775 #endif
776 scev_reset ();
777
778 timevar_pop (TV_REPAIR_LOOPS);
779 }
780
781 /* Cleanup cfg and repair loop structures. */
782
783 bool
784 cleanup_tree_cfg (void)
785 {
786 bool changed = cleanup_tree_cfg_noloop ();
787
788 if (current_loops != NULL
789 && loops_state_satisfies_p (LOOPS_NEED_FIXUP))
790 repair_loop_structures ();
791
792 return changed;
793 }
794
795 /* Tries to merge the PHI nodes at BB into those at BB's sole successor.
796 Returns true if successful. */
797
798 static bool
799 remove_forwarder_block_with_phi (basic_block bb)
800 {
801 edge succ = single_succ_edge (bb);
802 basic_block dest = succ->dest;
803 gimple label;
804 basic_block dombb, domdest, dom;
805
806 /* We check for infinite loops already in tree_forwarder_block_p.
807 However it may happen that the infinite loop is created
808 afterwards due to removal of forwarders. */
809 if (dest == bb)
810 return false;
811
812 /* If the destination block consists of a nonlocal label, do not
813 merge it. */
814 label = first_stmt (dest);
815 if (label
816 && gimple_code (label) == GIMPLE_LABEL
817 && DECL_NONLOCAL (gimple_label_label (label)))
818 return false;
819
820 /* Record BB's single pred in case we need to update the father
821 loop's latch information later. */
822 basic_block pred = NULL;
823 if (single_pred_p (bb))
824 pred = single_pred (bb);
825
826 /* Redirect each incoming edge to BB to DEST. */
827 while (EDGE_COUNT (bb->preds) > 0)
828 {
829 edge e = EDGE_PRED (bb, 0), s;
830 gimple_stmt_iterator gsi;
831
832 s = find_edge (e->src, dest);
833 if (s)
834 {
835 /* We already have an edge S from E->src to DEST. If S and
836 E->dest's sole successor edge have the same PHI arguments
837 at DEST, redirect S to DEST. */
838 if (phi_alternatives_equal (dest, s, succ))
839 {
840 e = redirect_edge_and_branch (e, dest);
841 redirect_edge_var_map_clear (e);
842 continue;
843 }
844
845 /* PHI arguments are different. Create a forwarder block by
846 splitting E so that we can merge PHI arguments on E to
847 DEST. */
848 e = single_succ_edge (split_edge (e));
849 }
850
851 s = redirect_edge_and_branch (e, dest);
852
853 /* redirect_edge_and_branch must not create a new edge. */
854 gcc_assert (s == e);
855
856 /* Add to the PHI nodes at DEST each PHI argument removed at the
857 destination of E. */
858 for (gsi = gsi_start_phis (dest);
859 !gsi_end_p (gsi);
860 gsi_next (&gsi))
861 {
862 gimple phi = gsi_stmt (gsi);
863 tree def = gimple_phi_arg_def (phi, succ->dest_idx);
864 source_location locus = gimple_phi_arg_location_from_edge (phi, succ);
865
866 if (TREE_CODE (def) == SSA_NAME)
867 {
868 /* If DEF is one of the results of PHI nodes removed during
869 redirection, replace it with the PHI argument that used
870 to be on E. */
871 vec<edge_var_map> *head = redirect_edge_var_map_vector (e);
872 size_t length = head ? head->length () : 0;
873 for (size_t i = 0; i < length; i++)
874 {
875 edge_var_map *vm = &(*head)[i];
876 tree old_arg = redirect_edge_var_map_result (vm);
877 tree new_arg = redirect_edge_var_map_def (vm);
878
879 if (def == old_arg)
880 {
881 def = new_arg;
882 locus = redirect_edge_var_map_location (vm);
883 break;
884 }
885 }
886 }
887
888 add_phi_arg (phi, def, s, locus);
889 }
890
891 redirect_edge_var_map_clear (e);
892 }
893
894 /* Update the dominators. */
895 dombb = get_immediate_dominator (CDI_DOMINATORS, bb);
896 domdest = get_immediate_dominator (CDI_DOMINATORS, dest);
897 if (domdest == bb)
898 {
899 /* Shortcut to avoid calling (relatively expensive)
900 nearest_common_dominator unless necessary. */
901 dom = dombb;
902 }
903 else
904 dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb);
905
906 set_immediate_dominator (CDI_DOMINATORS, dest, dom);
907
908 /* Adjust latch infomation of BB's parent loop as otherwise
909 the cfg hook has a hard time not to kill the loop. */
910 if (current_loops && bb->loop_father->latch == bb)
911 bb->loop_father->latch = pred;
912
913 /* Remove BB since all of BB's incoming edges have been redirected
914 to DEST. */
915 delete_basic_block (bb);
916
917 return true;
918 }
919
920 /* This pass merges PHI nodes if one feeds into another. For example,
921 suppose we have the following:
922
923 goto <bb 9> (<L9>);
924
925 <L8>:;
926 tem_17 = foo ();
927
928 # tem_6 = PHI <tem_17(8), tem_23(7)>;
929 <L9>:;
930
931 # tem_3 = PHI <tem_6(9), tem_2(5)>;
932 <L10>:;
933
934 Then we merge the first PHI node into the second one like so:
935
936 goto <bb 9> (<L10>);
937
938 <L8>:;
939 tem_17 = foo ();
940
941 # tem_3 = PHI <tem_23(7), tem_2(5), tem_17(8)>;
942 <L10>:;
943 */
944
945 namespace {
946
947 const pass_data pass_data_merge_phi =
948 {
949 GIMPLE_PASS, /* type */
950 "mergephi", /* name */
951 OPTGROUP_NONE, /* optinfo_flags */
952 TV_TREE_MERGE_PHI, /* tv_id */
953 ( PROP_cfg | PROP_ssa ), /* properties_required */
954 0, /* properties_provided */
955 0, /* properties_destroyed */
956 0, /* todo_flags_start */
957 0, /* todo_flags_finish */
958 };
959
960 class pass_merge_phi : public gimple_opt_pass
961 {
962 public:
963 pass_merge_phi (gcc::context *ctxt)
964 : gimple_opt_pass (pass_data_merge_phi, ctxt)
965 {}
966
967 /* opt_pass methods: */
968 opt_pass * clone () { return new pass_merge_phi (m_ctxt); }
969 virtual unsigned int execute (function *);
970
971 }; // class pass_merge_phi
972
973 unsigned int
974 pass_merge_phi::execute (function *fun)
975 {
976 basic_block *worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (fun));
977 basic_block *current = worklist;
978 basic_block bb;
979
980 calculate_dominance_info (CDI_DOMINATORS);
981
982 /* Find all PHI nodes that we may be able to merge. */
983 FOR_EACH_BB_FN (bb, fun)
984 {
985 basic_block dest;
986
987 /* Look for a forwarder block with PHI nodes. */
988 if (!tree_forwarder_block_p (bb, true))
989 continue;
990
991 dest = single_succ (bb);
992
993 /* We have to feed into another basic block with PHI
994 nodes. */
995 if (gimple_seq_empty_p (phi_nodes (dest))
996 /* We don't want to deal with a basic block with
997 abnormal edges. */
998 || bb_has_abnormal_pred (bb))
999 continue;
1000
1001 if (!dominated_by_p (CDI_DOMINATORS, dest, bb))
1002 {
1003 /* If BB does not dominate DEST, then the PHI nodes at
1004 DEST must be the only users of the results of the PHI
1005 nodes at BB. */
1006 *current++ = bb;
1007 }
1008 else
1009 {
1010 gimple_stmt_iterator gsi;
1011 unsigned int dest_idx = single_succ_edge (bb)->dest_idx;
1012
1013 /* BB dominates DEST. There may be many users of the PHI
1014 nodes in BB. However, there is still a trivial case we
1015 can handle. If the result of every PHI in BB is used
1016 only by a PHI in DEST, then we can trivially merge the
1017 PHI nodes from BB into DEST. */
1018 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
1019 gsi_next (&gsi))
1020 {
1021 gimple phi = gsi_stmt (gsi);
1022 tree result = gimple_phi_result (phi);
1023 use_operand_p imm_use;
1024 gimple use_stmt;
1025
1026 /* If the PHI's result is never used, then we can just
1027 ignore it. */
1028 if (has_zero_uses (result))
1029 continue;
1030
1031 /* Get the single use of the result of this PHI node. */
1032 if (!single_imm_use (result, &imm_use, &use_stmt)
1033 || gimple_code (use_stmt) != GIMPLE_PHI
1034 || gimple_bb (use_stmt) != dest
1035 || gimple_phi_arg_def (use_stmt, dest_idx) != result)
1036 break;
1037 }
1038
1039 /* If the loop above iterated through all the PHI nodes
1040 in BB, then we can merge the PHIs from BB into DEST. */
1041 if (gsi_end_p (gsi))
1042 *current++ = bb;
1043 }
1044 }
1045
1046 /* Now let's drain WORKLIST. */
1047 bool changed = false;
1048 while (current != worklist)
1049 {
1050 bb = *--current;
1051 changed |= remove_forwarder_block_with_phi (bb);
1052 }
1053 free (worklist);
1054
1055 /* Removing forwarder blocks can cause formerly irreducible loops
1056 to become reducible if we merged two entry blocks. */
1057 if (changed
1058 && current_loops)
1059 loops_state_set (LOOPS_NEED_FIXUP);
1060
1061 return 0;
1062 }
1063
1064 } // anon namespace
1065
1066 gimple_opt_pass *
1067 make_pass_merge_phi (gcc::context *ctxt)
1068 {
1069 return new pass_merge_phi (ctxt);
1070 }
1071
1072 /* Pass: cleanup the CFG just before expanding trees to RTL.
1073 This is just a round of label cleanups and case node grouping
1074 because after the tree optimizers have run such cleanups may
1075 be necessary. */
1076
1077 static unsigned int
1078 execute_cleanup_cfg_post_optimizing (void)
1079 {
1080 unsigned int todo = 0;
1081 if (cleanup_tree_cfg ())
1082 todo |= TODO_update_ssa;
1083 maybe_remove_unreachable_handlers ();
1084 cleanup_dead_labels ();
1085 group_case_labels ();
1086 if ((flag_compare_debug_opt || flag_compare_debug)
1087 && flag_dump_final_insns)
1088 {
1089 FILE *final_output = fopen (flag_dump_final_insns, "a");
1090
1091 if (!final_output)
1092 {
1093 error ("could not open final insn dump file %qs: %m",
1094 flag_dump_final_insns);
1095 flag_dump_final_insns = NULL;
1096 }
1097 else
1098 {
1099 int save_unnumbered = flag_dump_unnumbered;
1100 int save_noaddr = flag_dump_noaddr;
1101
1102 flag_dump_noaddr = flag_dump_unnumbered = 1;
1103 fprintf (final_output, "\n");
1104 dump_enumerated_decls (final_output, dump_flags | TDF_NOUID);
1105 flag_dump_noaddr = save_noaddr;
1106 flag_dump_unnumbered = save_unnumbered;
1107 if (fclose (final_output))
1108 {
1109 error ("could not close final insn dump file %qs: %m",
1110 flag_dump_final_insns);
1111 flag_dump_final_insns = NULL;
1112 }
1113 }
1114 }
1115 return todo;
1116 }
1117
1118 namespace {
1119
1120 const pass_data pass_data_cleanup_cfg_post_optimizing =
1121 {
1122 GIMPLE_PASS, /* type */
1123 "optimized", /* name */
1124 OPTGROUP_NONE, /* optinfo_flags */
1125 TV_TREE_CLEANUP_CFG, /* tv_id */
1126 PROP_cfg, /* properties_required */
1127 0, /* properties_provided */
1128 0, /* properties_destroyed */
1129 0, /* todo_flags_start */
1130 TODO_remove_unused_locals, /* todo_flags_finish */
1131 };
1132
1133 class pass_cleanup_cfg_post_optimizing : public gimple_opt_pass
1134 {
1135 public:
1136 pass_cleanup_cfg_post_optimizing (gcc::context *ctxt)
1137 : gimple_opt_pass (pass_data_cleanup_cfg_post_optimizing, ctxt)
1138 {}
1139
1140 /* opt_pass methods: */
1141 virtual unsigned int execute (function *)
1142 {
1143 return execute_cleanup_cfg_post_optimizing ();
1144 }
1145
1146 }; // class pass_cleanup_cfg_post_optimizing
1147
1148 } // anon namespace
1149
1150 gimple_opt_pass *
1151 make_pass_cleanup_cfg_post_optimizing (gcc::context *ctxt)
1152 {
1153 return new pass_cleanup_cfg_post_optimizing (ctxt);
1154 }
1155
1156
This page took 0.088673 seconds and 5 git commands to generate.