This is the mail archive of the gcc-patches@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

Make ipa-split to be feedback guided pass


Hi,
after Richard's SSA-profiling changes, we no longer run ipa-split on the
profile.  This is wrong, since splitting is obviously profile guided pass.

To avoid need of extra global pass over the callgraph after profiling, this
patch keeps the original ipa-split on place, disables it when profiling is
active and adds new pass_feedback_split_functions run as subpass of profling
to handle post-profile read splitting.

There is little complication with need to rebuild cgraph edges on functions
that are split (this is not big deal given that the function headers are
small).  One can't do that in the pass itself, since TODOs cleanup CFG
and one gets ICEs on stale cgraph edges.  Instead I've added new TODO to 
deal with this case.

lto-profiledbootstrapped/regtested x86_64-linux, OK?

Honza

	PR tree-optimization/47234 
	* tree-pass.h (TODO_rebuild_cgraph_edges): New TODO.
	(pass_feedback_split_functions): Declare.
	* passes.c (init_optimization_passes): Add ipa-split as subpass of
	tree-profile.
	* ipa-split.c (gate_split_functions): Update comments; disable
	split-functions for profile_arc_flag and branch_probabilities.
	(gate_feedback_split_functions): New function.
	(execute_feedback_split_functions): New function.
	(pass_feedback_split_functions): New global var.
Index: tree-pass.h
===================================================================
--- tree-pass.h	(revision 168596)
+++ tree-pass.h	(working copy)
@@ -312,6 +312,9 @@ struct dump_file_info
 /* Rebuild the addressable-vars bitmap and do register promotion.  */
 #define TODO_update_address_taken	(1 << 21)
 
+/* Rebuild the callgraph edges.  */
+#define TODO_rebuild_cgraph_edges       (1 << 22)
+
 /* Internally used in execute_function_todo().  */
 #define TODO_update_ssa_any		\
     (TODO_update_ssa			\
@@ -442,6 +445,7 @@ extern struct gimple_opt_pass pass_local
 extern struct gimple_opt_pass pass_tracer;
 extern struct gimple_opt_pass pass_warn_unused_result;
 extern struct gimple_opt_pass pass_split_functions;
+extern struct gimple_opt_pass pass_feedback_split_functions;
 
 /* IPA Passes */
 extern struct simple_ipa_opt_pass pass_ipa_lower_emutls;
Index: passes.c
===================================================================
--- passes.c	(revision 168596)
+++ passes.c	(working copy)
@@ -785,6 +786,10 @@ init_optimization_passes (void)
       NEXT_PASS (pass_inline_parameters);
     }
   NEXT_PASS (pass_ipa_tree_profile);
+    {
+      struct opt_pass **p = &pass_ipa_tree_profile.pass.sub;
+      NEXT_PASS (pass_feedback_split_functions);
+    }
   NEXT_PASS (pass_ipa_increase_alignment);
   NEXT_PASS (pass_ipa_matrix_reorg);
   NEXT_PASS (pass_ipa_lower_emutls);
@@ -1227,6 +1232,9 @@ execute_function_todo (void *data)
   if (flags & TODO_rebuild_frequencies)
     rebuild_frequencies ();
 
+  if (flags & TODO_rebuild_cgraph_edges)
+    rebuild_cgraph_edges ();
+
   /* If we've seen errors do not bother running any verifiers.  */
   if (seen_error ())
     return;
Index: ipa-split.c
===================================================================
--- ipa-split.c	(revision 168596)
+++ ipa-split.c	(working copy)
@@ -1265,7 +1265,9 @@ execute_split_functions (void)
   /* See if it makes sense to try to split.
      It makes sense to split if we inline, that is if we have direct calls to
      handle or direct calls are possibly going to appear as result of indirect
-     inlining or LTO.
+     inlining or LTO.  Also handle -fprofile-generate as LTO to allow non-LTO
+     training for LTO -fprofile-use build.
+
      Note that we are not completely conservative about disqualifying functions
      called once.  It is possible that the caller is called more then once and
      then inlining would still benefit.  */
@@ -1336,10 +1338,15 @@ execute_split_functions (void)
   return todo;
 }
 
+/* Gate function splitting pass.  When doing profile feedback, we want
+   to execute the pass after profiling is read.  So disable one in 
+   early optimization.  */
+
 static bool
 gate_split_functions (void)
 {
-  return flag_partial_inlining;
+  return (flag_partial_inlining
+	  && !profile_arc_flag && !flag_branch_probabilities);
 }
 
 struct gimple_opt_pass pass_split_functions =
@@ -1352,6 +1359,47 @@ struct gimple_opt_pass pass_split_functi
   NULL,					/* sub */
   NULL,					/* next */
   0,					/* static_pass_number */
+  TV_IPA_FNSPLIT,			/* tv_id */
+  PROP_cfg,				/* properties_required */
+  0,					/* properties_provided */
+  0,					/* properties_destroyed */
+  0,					/* todo_flags_start */
+  TODO_dump_func			/* todo_flags_finish */
+ }
+};
+
+/* Gate feedback driven function splitting pass.
+   We don't need to split when profiling at all, we are producing
+   lousy code anyway.  */
+
+static bool
+gate_feedback_split_functions (void)
+{
+  return (flag_partial_inlining
+	  && flag_branch_probabilities);
+}
+
+/* Execute function splitting pass.  */
+
+static unsigned int
+execute_feedback_split_functions (void)
+{
+  unsigned int retval = execute_split_functions ();
+  if (retval)
+    retval |= TODO_rebuild_cgraph_edges;
+  return retval;
+}
+
+struct gimple_opt_pass pass_feedback_split_functions =
+{
+ {
+  GIMPLE_PASS,
+  "feedback_fnsplit",			/* name */
+  gate_feedback_split_functions,	/* gate */
+  execute_feedback_split_functions,	/* execute */
+  NULL,					/* sub */
+  NULL,					/* next */
+  0,					/* static_pass_number */
   TV_IPA_FNSPLIT,			/* tv_id */
   PROP_cfg,				/* properties_required */
   0,					/* properties_provided */


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]