[PR42245] Recompute top order after redirecting an edge

Alexander Monakov amonakov@ispras.ru
Wed Dec 30 16:34:00 GMT 2009


Redirecting an edge may cause topological order of basic blocks to be
invalidated.  We cannot recompute it immediately, because redirecting might
have made a block unreachable, and that block will not be visited during
post_order_compute.  Instead, we introduce a return value to
sel_redirect_edge_and_branch to indicate whether caller must recompute
toporder after empty blocks are deleted.

2009-12-30  Andrey Belevantsev  <abel@ispras.ru>
	    Alexander Monakov  <amonakov@ispras.ru>

	PR middle-end/42245
	* sel-sched-ir.c (sel_recompute_toporder): New.  Use it...
	(maybe_tidy_empty_bb): ... here
	(tidy_control_flow): ... and here.  Recompute topological order
	of basic blocks in region if necessary.
	(sel_redirect_edge_and_branch): Change return type.  Return true
	if topological order might have been invalidated.
	* sel-sched-ir.h (sel_redirect_edge_and_branch): Update prototype.

	* gcc.dg/pr42245.c: New.
---
 gcc/sel-sched-ir.c             |   55 ++++++++++++++++++++++++++++++++++++---
 gcc/sel-sched-ir.h             |    2 +-
 gcc/testsuite/gcc.dg/pr42245.c |   30 +++++++++++++++++++++
 3 files changed, 81 insertions(+), 6 deletions(-)
 create mode 100644 gcc/testsuite/gcc.dg/pr42245.c

diff --git a/gcc/sel-sched-ir.c b/gcc/sel-sched-ir.c
index e864eb4..16eacfe 100644
--- a/gcc/sel-sched-ir.c
+++ b/gcc/sel-sched-ir.c
@@ -3503,6 +3503,33 @@ verify_backedges (void)
 
 /* Functions to work with control flow.  */
 
+/* Recompute BLOCK_TO_BB and BB_FOR_BLOCK for current region so that blocks
+   are sorted in topological order (it might have been invalidated by
+   redirecting an edge).  */
+static void
+sel_recompute_toporder (void)
+{
+  int i, n, rgn;
+  int *postorder, n_blocks;
+
+  postorder = XALLOCAVEC (int, n_basic_blocks);
+  n_blocks = post_order_compute (postorder, false, false);
+
+  rgn = CONTAINING_RGN (BB_TO_BLOCK (0));
+  for (n = 0, i = n_blocks - 1; i >= 0; i--)
+    if (CONTAINING_RGN (postorder[i]) == rgn)
+      {
+	BLOCK_TO_BB (postorder[i]) = n;
+	BB_TO_BLOCK (n) = postorder[i];
+	n++;
+      }
+
+  /* Assert that we updated info for all blocks.  We may miss some blocks if
+     this function is called when redirecting an edge made a block
+     unreachable, but that block is not deleted yet.  */
+  gcc_assert (n == RGN_NR_BLOCKS (rgn));
+}
+
 /* Tidy the possibly empty block BB.  */
 bool
 maybe_tidy_empty_bb (basic_block bb)
@@ -3510,7 +3537,7 @@ maybe_tidy_empty_bb (basic_block bb)
   basic_block succ_bb, pred_bb;
   edge e;
   edge_iterator ei;
-  bool rescan_p;
+  bool rescan_p, recompute_toporder_p = false;
 
   /* Keep empty bb only if this block immediately precedes EXIT and
      has incoming non-fallthrough edge.  Otherwise remove it.  */
@@ -3552,7 +3579,7 @@ maybe_tidy_empty_bb (basic_block bb)
 
           if (!(e->flags & EDGE_FALLTHRU))
             {
-              sel_redirect_edge_and_branch (e, succ_bb);
+              recompute_toporder_p |= sel_redirect_edge_and_branch (e, succ_bb);
               rescan_p = true;
               break;
             }
@@ -3572,6 +3599,9 @@ maybe_tidy_empty_bb (basic_block bb)
       remove_empty_bb (bb, true);
     }
 
+  if (recompute_toporder_p)
+    sel_recompute_toporder ();
+
 #ifdef ENABLE_CHECKING
   verify_backedges ();
 #endif
@@ -3640,10 +3670,13 @@ tidy_control_flow (basic_block xbb, bool full_tidying)
       /* Also this jump is not at the scheduling boundary.  */
       && !IN_CURRENT_FENCE_P (BB_END (xbb->prev_bb)))
     {
+      bool recompute_toporder_p;
       /* Clear data structures of jump - jump itself will be removed
          by sel_redirect_edge_and_branch.  */
       clear_expr (INSN_EXPR (BB_END (xbb->prev_bb)));
-      sel_redirect_edge_and_branch (EDGE_SUCC (xbb->prev_bb, 0), xbb);
+      recompute_toporder_p
+        = sel_redirect_edge_and_branch (EDGE_SUCC (xbb->prev_bb, 0), xbb);
+
       gcc_assert (EDGE_SUCC (xbb->prev_bb, 0)->flags & EDGE_FALLTHRU);
 
       /* It can turn out that after removing unused jump, basic block
@@ -3651,6 +3684,8 @@ tidy_control_flow (basic_block xbb, bool full_tidying)
          remove it too.  */
       if (sel_bb_empty_p (xbb->prev_bb))
         changed = maybe_tidy_empty_bb (xbb->prev_bb);
+      else if (recompute_toporder_p)
+	sel_recompute_toporder ();
     }
 
   return changed;
@@ -5355,8 +5390,9 @@ sel_redirect_edge_and_branch_force (edge e, basic_block to)
     sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
 }
 
-/* A wrapper for redirect_edge_and_branch.  */
-void
+/* A wrapper for redirect_edge_and_branch.  Return TRUE if blocks connected by
+   redirected edge are in reverse topological order.  */
+bool
 sel_redirect_edge_and_branch (edge e, basic_block to)
 {
   bool latch_edge_p;
@@ -5364,6 +5400,7 @@ sel_redirect_edge_and_branch (edge e, basic_block to)
   int prev_max_uid;
   rtx jump;
   edge redirected;
+  bool recompute_toporder_p = false;
 
   latch_edge_p = (pipelining_p
                   && current_loop_nest
@@ -5383,9 +5420,17 @@ sel_redirect_edge_and_branch (edge e, basic_block to)
       gcc_assert (loop_latch_edge (current_loop_nest));
     }
 
+  /* In rare situations, the topological relation between the blocks connected
+     by the redirected edge can change.  Update block_to_bb/bb_to_block.  */
+  if (CONTAINING_RGN (e->src->index) == CONTAINING_RGN (to->index)
+      && BLOCK_TO_BB (e->src->index) > BLOCK_TO_BB (to->index))
+    recompute_toporder_p = true;
+
   jump = find_new_jump (src, NULL, prev_max_uid);
   if (jump)
     sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP);
+
+  return recompute_toporder_p;
 }
 
 /* This variable holds the cfg hooks used by the selective scheduler.  */
diff --git a/gcc/sel-sched-ir.h b/gcc/sel-sched-ir.h
index 1950a65..8476f4e 100644
--- a/gcc/sel-sched-ir.h
+++ b/gcc/sel-sched-ir.h
@@ -1617,7 +1617,7 @@ extern bool maybe_tidy_empty_bb (basic_block bb);
 extern basic_block sel_split_edge (edge);
 extern basic_block sel_create_recovery_block (insn_t);
 extern void sel_merge_blocks (basic_block, basic_block);
-extern void sel_redirect_edge_and_branch (edge, basic_block);
+extern bool sel_redirect_edge_and_branch (edge, basic_block);
 extern void sel_redirect_edge_and_branch_force (edge, basic_block);
 extern void sel_init_pipelining (void);
 extern void sel_finish_pipelining (void);
diff --git a/gcc/testsuite/gcc.dg/pr42245.c b/gcc/testsuite/gcc.dg/pr42245.c
new file mode 100644
index 0000000..98dd1d0
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr42245.c
@@ -0,0 +1,30 @@
+/* { dg-do compile { target powerpc*-*-* ia64-*-* x86_64-*-* } } */
+/* { dg-options "-O2 -fselective-scheduling -fsel-sched-pipelining" } */
+
+extern int N_words;
+typedef struct DIS_node_struct DIS_node;
+typedef struct CON_list_struct CON_list;
+
+struct DIS_node_struct
+{
+  CON_list *cl;
+};
+
+void
+build_DIS_CON_tree (void)
+{
+  int w;
+  DIS_node *dnroot, *dn;
+  CON_list *child, *xchild;
+  for (w = 0; w < N_words; w++)
+    {
+      if (dnroot == ((void *) 0))
+        {
+          dnroot = dn;
+          for (child = dn->cl; child != ((void *) 0); child = xchild)
+            {
+            }
+        }
+    }
+}
+
-- 
1.6.4.3



More information about the Gcc-patches mailing list