This is the mail archive of the gcc-patches@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[sel-sched]: Committed patch to fix selective scheduling on ppc64


Hi,

This patch makes selective scheduling to work on ppc64.

--
Maxim
2007-06-07  Maxim Kuvyrkov  <mkuvyrkov@ispras.ru>

	Make sel-sched work on ppc64.  Cleanup ia64 scheduling hooks.
	
	* sel-sched.c (fill_ready_list): Rewrite filtering of available
	expressions to schedule.  Handle unrecognizable insns correctly.

	* config/ia64/ia64.c (ia64_reset_main_sched_context): Remove.
	(ia64_set_sched_context): Cleanup.
	(ia64_free_sched_context): Add assertion.

	* config/rs6000/rs6000.c (rs6000_alloc_sched_context): New static
	function implementing hook.
	(rs6000_init_sched_context, rs6000_set_sched_context): Ditto.
	(rs6000_free_sched_context): Ditto.
	(TARGET_SCHED_ALLOC_SCHED_CONTEXT, TARGET_SCHED_INIT_SCHED_CONTEXT):
	Redefine to point to the functions implementing hooks.
	(TARGET_SCHED_SET_SCHED_CONTEXT, TARGET_SCHED_FREE_SCHED_CONTEXT):
	Ditto.
	(rs6000_sched_reorder2): Do not use load_store_pendulum heuristic
	during selective scheduling.
	(pad_groups): Fix typo.
	(rs6000_sched_finish): Do not redefine_groups after selective
	scheduling.
	(struct _rs6000_sched_context): New type.
	(rs6000_sched_context_def, rs6000_sched_context_t): New typedefs.
Index: gcc/sel-sched.c
===================================================================
--- gcc/sel-sched.c	(revision 1142)
+++ gcc/sel-sched.c	(revision 1177)
@@ -2634,75 +2634,123 @@ sel_rank_for_schedule (const void *x, co
    max_issue).  BND and FENCE are current boundary and fence, 
    respectively.  */
 static rhs_t
-fill_ready_list (av_set_t av, bnd_t bnd, fence_t fence)
+fill_ready_list (av_set_t *av_ptr, bnd_t bnd, fence_t fence)
 {
   rhs_t rhs;
   av_set_iterator si;
   int n, i, stalled, sched_next_worked = 0;
   deps_t dc = BND_DC (bnd);
-  bool try_data_p = true;
-  bool try_control_p = true;
 
-  /* Fill the vector with recognizable insns.  */
-  FOR_EACH_RHS (rhs, si, av)
+  /* Don't pipeline already pipelined code as that would increase
+     number of unnecessary register moves.  */  
+  FOR_EACH_RHS_1 (rhs, si, av_ptr)
     {
-      insn_t insn = VINSN_INSN (RHS_VINSN (rhs));
+      if (EXPR_SCHED_TIMES (rhs)
+	  >= PARAM_VALUE (PARAM_SELSCHED_MAX_SCHED_TIMES))
+	av_set_iter_remove (&si);
+    }
 
-      if (/* This will also initialize INSN_CODE for max_issue ().  */
-	  recog_memoized (insn) < 0)
-        {
-          /* Do not pipeline these insns when they were already scheduled;
-             as we emit them unconditionally, it leads to an infinite loop.  */
-          if (EXPR_SCHED_TIMES (rhs) <= 0)
-	    return rhs;
-	  else
-	    gcc_assert (pipelining_p);
-        }
-      else
+  if (spec_info != NULL)
+    {
+      bool try_data_p = true;
+      bool try_control_p = true;
+
+      /* Scan *AV_PTR to find out if we want to consider speculative
+	 instructions for scheduling.  */
+      FOR_EACH_RHS (rhs, si, *av_ptr)
 	{
-	  if (spec_info != NULL)
-	    {
-	      ds_t ds = EXPR_SPEC_DONE_DS (rhs);
+	  ds_t ds;
+
+	  ds = EXPR_SPEC_DONE_DS (rhs);
+
+	  if ((spec_info->flags & PREFER_NON_DATA_SPEC)
+	      && !(ds & BEGIN_DATA))
+	    try_data_p = false;
+
+	  if ((spec_info->flags & PREFER_NON_CONTROL_SPEC)
+	      && !(ds & BEGIN_CONTROL))
+	    try_control_p = false;
+	}
+
+      FOR_EACH_RHS_1 (rhs, si, av_ptr)
+	{
+	  ds_t ds;
+
+	  ds = EXPR_SPEC_DONE_DS (rhs);
 
-	      if ((spec_info->flags & PREFER_NON_DATA_SPEC)
-		  && !(ds & BEGIN_DATA))
-		try_data_p = false;
-
-	      if ((spec_info->flags & PREFER_NON_CONTROL_SPEC)
-		  && !(ds & BEGIN_CONTROL))
-		try_control_p = false;
+	  if (ds & SPECULATIVE)
+	    {
+	      if ((ds & BEGIN_DATA) && !try_data_p)
+		/* We don't want any data speculative instructions right
+		   now.  */
+		av_set_iter_remove (&si);
+
+	      if ((ds & BEGIN_CONTROL) && !try_control_p)
+		/* We don't want any control speculative instructions right
+		   now.  */
+		av_set_iter_remove (&si);
 	    }
 	}
     }
 
+  /* Process USEs in *AV_PTR.  */
+  {
+    bool uses_present_p = false;
+    bool try_uses_p = true;
+
+    FOR_EACH_RHS (rhs, si, *av_ptr)
+      {
+	if (/* This will also initialize INSN_CODE for max_issue ().  */
+	    recog_memoized (EXPR_INSN_RTX (rhs)) < 0)
+	  {
+	    /* If we have a USE in *AV_PTR that was not scheduled yet,
+	       do so because it will do good only.  */
+	    if (EXPR_SCHED_TIMES (rhs) <= 0)
+	      return rhs;
+	    else
+	      {
+		gcc_assert (pipelining_p);
+
+		uses_present_p = true;
+	      }
+	  }
+	else
+	  try_uses_p = false;
+      }
+
+    if (uses_present_p)
+      {
+	if (!try_uses_p)
+	  /* If we don't want to schedule any USEs right now and we have some
+	     in *AV_PTR, remove them.  */
+	  {
+	    FOR_EACH_RHS_1 (rhs, si, av_ptr)
+	      {
+		if (INSN_CODE (EXPR_INSN_RTX (rhs)) < 0)
+		  av_set_iter_remove (&si);
+	      }
+	  }
+	else
+	  /* If we do want to schedule a USE, return the first one.  */
+	  {
+	    FOR_EACH_RHS (rhs, si, *av_ptr)
+	      {
+		gcc_assert (INSN_CODE (EXPR_INSN_RTX (rhs)) < 0);
+
+		return rhs;
+	      }
+	  }
+      }
+  }
+
   /* Allocate the vector for sorting the av set.  */
   if (!vec_av_set)
     vec_av_set = VEC_alloc (rhs_t, heap, 5);
   else
     gcc_assert (VEC_empty (rhs_t, vec_av_set));
 
-  FOR_EACH_RHS (rhs, si, av)
+  FOR_EACH_RHS (rhs, si, *av_ptr)
     {
-      ds_t ds = EXPR_SPEC_DONE_DS (rhs);
-
-      if (ds & SPECULATIVE)
-	{
-	  if ((ds & BEGIN_DATA) && !try_data_p)
-	    /* We don't want any data speculative instructions right now.  */
-	    continue;
-
-	  if ((ds & BEGIN_CONTROL) && !try_control_p)
-	    /* We don't want any control speculative instructions right
-	       now.  */
-	    continue;
-	}
-
-      if (EXPR_SCHED_TIMES (rhs)
-	  >= PARAM_VALUE (PARAM_SELSCHED_MAX_SCHED_TIMES))
-	/* Don't pipeline already pipelined code as that would increase
-	   number of unnecessary register moves.  */
-	continue;
-
       VEC_safe_push (rhs_t, heap, vec_av_set, rhs);
     }
 
@@ -2887,7 +2935,7 @@ find_best_rhs_and_reg_that_fits (av_set_
   /* We have one boundary per fence.  */
   gcc_assert (BLIST_NEXT (bnds) == NULL);
 
-  res = fill_ready_list (*av_vliw_ptr, BLIST_BND (bnds), fence);
+  res = fill_ready_list (av_vliw_ptr, BLIST_BND (bnds), fence);
 
   if (res == NULL && ready.n_ready > 0)
     {
Index: gcc/config/ia64/ia64.c
===================================================================
--- gcc/config/ia64/ia64.c	(revision 1142)
+++ gcc/config/ia64/ia64.c	(revision 1177)
@@ -6794,36 +6794,19 @@ ia64_init_sched_context (void *_sc, bool
     }
 }
 
-/* Resets the global scheduling context.  */
-static void
-ia64_reset_main_sched_context (void)
-{
-  state_reset (prev_cycle_state);
-  last_scheduled_insn = NULL_RTX;
-  memset (rws_sum, 0, sizeof (rws_sum));
-  memset (rws_insn, 0, sizeof (rws_insn));
-  first_instruction = 1;
-}
-
 /* Sets the global scheduling context to the one pointed to by _SC.  */
 static void
 ia64_set_sched_context (void *_sc)
 {
-  if (_sc)
-    {
-      ia64_sched_context_t sc = (ia64_sched_context_t) _sc;
+  ia64_sched_context_t sc = (ia64_sched_context_t) _sc;
 
-      memcpy (prev_cycle_state, sc->prev_cycle_state, dfa_state_size);
-      last_scheduled_insn = sc->last_scheduled_insn;
-      memcpy (rws_sum, sc->rws_sum, sizeof (rws_sum));
-      memcpy (rws_insn, sc->rws_insn, sizeof (rws_insn));
-      first_instruction = sc->first_instruction;
-    }
-  else
-    {
-      gcc_unreachable ();
-      ia64_reset_main_sched_context ();
-    }
+  gcc_assert (sc != NULL);
+
+  memcpy (prev_cycle_state, sc->prev_cycle_state, dfa_state_size);
+  last_scheduled_insn = sc->last_scheduled_insn;
+  memcpy (rws_sum, sc->rws_sum, sizeof (rws_sum));
+  memcpy (rws_insn, sc->rws_insn, sizeof (rws_insn));
+  first_instruction = sc->first_instruction;
 }
 
 /* Clears the data in the _SC scheduling context.  */
@@ -6837,6 +6820,8 @@ ia64_clear_sched_context (void *_sc)
 static void
 ia64_free_sched_context (void *_sc)
 {
+  gcc_assert (_sc != NULL);
+
   free (_sc);
 }
 
Index: gcc/config/rs6000/rs6000.c
===================================================================
--- gcc/config/rs6000/rs6000.c	(revision 1142)
+++ gcc/config/rs6000/rs6000.c	(revision 1177)
@@ -714,6 +714,12 @@ static int rs6000_sched_reorder (FILE *,
 static int rs6000_sched_reorder2 (FILE *, int, rtx *, int *, int);
 static int rs6000_use_sched_lookahead (void);
 static int rs6000_use_sched_lookahead_guard (rtx);
+
+static void * rs6000_alloc_sched_context (void);
+static void rs6000_init_sched_context (void *, bool);
+static void rs6000_set_sched_context (void *);
+static void rs6000_free_sched_context (void *);
+
 static tree rs6000_builtin_mask_for_load (void);
 static tree rs6000_builtin_mul_widen_even (tree);
 static tree rs6000_builtin_mul_widen_odd (tree);
@@ -977,6 +983,15 @@ static const char alt_reg_names[][8] =
 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
 
+#undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
+#define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
+#undef TARGET_SCHED_INIT_SCHED_CONTEXT
+#define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
+#undef TARGET_SCHED_SET_SCHED_CONTEXT
+#define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
+#undef TARGET_SCHED_FREE_SCHED_CONTEXT
+#define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
+
 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
 #undef TARGET_VECTORIZE_BUILTIN_MUL_WIDEN_EVEN
@@ -17726,7 +17741,8 @@ rs6000_sched_reorder2 (FILE *dump, int s
                   for (i=pos; i<*pn_ready-1; i++)
                     ready[i] = ready[i + 1];
                   ready[*pn_ready-1] = tmp;
-                  if INSN_PRIORITY_KNOWN (tmp)
+
+                  if (!SEL_SCHED_P && INSN_PRIORITY_KNOWN (tmp))
                     INSN_PRIORITY (tmp)++;
                   break;
                 }
@@ -17743,7 +17759,8 @@ rs6000_sched_reorder2 (FILE *dump, int s
           while (pos >= 0)
             {
               if (is_load_insn (ready[pos])
-                  && INSN_PRIORITY_KNOWN (ready[pos]))
+                  && !SEL_SCHED_P
+		  && INSN_PRIORITY_KNOWN (ready[pos]))
                 {
                   INSN_PRIORITY (ready[pos])++;
 
@@ -17785,8 +17802,10 @@ rs6000_sched_reorder2 (FILE *dump, int s
                       for (i=pos; i<*pn_ready-1; i++)
                         ready[i] = ready[i + 1];
                       ready[*pn_ready-1] = tmp;
-                      if INSN_PRIORITY_KNOWN (tmp)
+
+                      if (!SEL_SCHED_P && INSN_PRIORITY_KNOWN (tmp))
                         INSN_PRIORITY (tmp)++;
+
                       first_store_pos = -1;
 
                       break;
@@ -17805,7 +17824,7 @@ rs6000_sched_reorder2 (FILE *dump, int s
               for (i=first_store_pos; i<*pn_ready-1; i++)
                 ready[i] = ready[i + 1];
               ready[*pn_ready-1] = tmp;
-              if INSN_PRIORITY_KNOWN (tmp)
+              if (!SEL_SCHED_P && INSN_PRIORITY_KNOWN (tmp))
                 INSN_PRIORITY (tmp)++;
             }
         }
@@ -17819,7 +17838,8 @@ rs6000_sched_reorder2 (FILE *dump, int s
           while (pos >= 0)
             {
               if (is_store_insn (ready[pos])
-                  && INSN_PRIORITY_KNOWN (ready[pos]))
+                  && !SEL_SCHED_P
+		  && INSN_PRIORITY_KNOWN (ready[pos]))
                 {
                   INSN_PRIORITY (ready[pos])++;
 
@@ -18321,7 +18341,7 @@ pad_groups (FILE *dump, int sched_verbos
       if (group_end)
 	{
 	  /* If the scheduler had marked group termination at this location
-	     (between insn and next_indn), and neither insn nor next_insn will
+	     (between insn and next_insn), and neither insn nor next_insn will
 	     force group termination, pad the group with nops to force group
 	     termination.  */
 	  if (can_issue_more
@@ -18375,7 +18395,8 @@ rs6000_sched_finish (FILE *dump, int sch
 
   if (reload_completed && rs6000_sched_groups)
     {
-      if (rs6000_sched_insert_nops == sched_finish_none)
+      if (rs6000_sched_insert_nops == sched_finish_none
+	  || SEL_SCHED_P)
 	return;
 
       if (rs6000_sched_insert_nops == sched_finish_pad_groups)
@@ -18395,6 +18416,67 @@ rs6000_sched_finish (FILE *dump, int sch
 	}
     }
 }
+
+struct _rs6000_sched_context
+{
+  short cached_can_issue_more;
+  rtx last_scheduled_insn;
+  int load_store_pendulum;
+};
+
+typedef struct _rs6000_sched_context rs6000_sched_context_def;
+typedef rs6000_sched_context_def *rs6000_sched_context_t;
+
+/* Allocate store for new scheduling context.  */
+static void *
+rs6000_alloc_sched_context (void)
+{
+  return xmalloc (sizeof (rs6000_sched_context_def));
+}
+
+/* If CLEAN_P is true then initializes _SC with clean data,
+   and from the global context otherwise.  */
+static void
+rs6000_init_sched_context (void *_sc, bool clean_p)
+{
+  rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
+
+  if (clean_p)
+    {
+      sc->cached_can_issue_more = 0;
+      sc->last_scheduled_insn = NULL_RTX;
+      sc->load_store_pendulum = 0;
+    }
+  else
+    {
+      sc->cached_can_issue_more = cached_can_issue_more;
+      sc->last_scheduled_insn = last_scheduled_insn;
+      sc->load_store_pendulum = load_store_pendulum;
+    }
+}
+
+/* Sets the global scheduling context to the one pointed to by _SC.  */
+static void
+rs6000_set_sched_context (void *_sc)
+{
+  rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
+
+  gcc_assert (sc != NULL);
+
+  cached_can_issue_more = sc->cached_can_issue_more;
+  last_scheduled_insn = sc->last_scheduled_insn;
+  load_store_pendulum = sc->load_store_pendulum;
+}
+
+/* Free _SC.  */
+static void
+rs6000_free_sched_context (void *_sc)
+{
+  gcc_assert (_sc != NULL);
+
+  free (_sc);
+}
+
 
 /* Length in units of the trampoline for entering a nested function.  */
 

Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]