This is the mail archive of the gcc-patches@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

bugfix for sched groups


First found on the v850 and cc0. For this testcase (obviously at O2):

void new_line()
{
 printf("\n");
}

int main()
{
 int a,b,c,d;
 char *str[7] = { "This", "is", "a", "test", "program", "with", "-O2."
};
 int i;

 a = 1;
 b = 2;
 c = 4;
 d = 3;

 for (i = 0; i < 7; i++) {
   printf("%s ",str[i]);
 }
 new_line();

 /*  1 >= 2 != 4 >= 3 */
 if (a >= b != c >= d)
   printf("OK\n");
 else printf("NG\n");

 return 0;
}

For the comparison we'd end up moving the sets to cc0 together and the
uses later, not keeping the scheduling group together. After talking
with rth offline it looks like we want to keep a scheduling group's
dependencies within the group - except for the head of the group which
can have outside dependencies. This patch sets this up.

Tested on x86-linux, mips-elf and v850-elf.

OK?

-eric

-- 
Eric Christopher <echristo@redhat.com>

2004-11-12  Eric Christopher  <echristo@redhat.com>
 
        * sched-deps.c (set_sched_group_p): Delete.
        (delete_all_dependencies): New function.
        (fixup_sched_groups): Use. New function.
        (sched_analyze_insn): Use.
 
Index: sched-deps.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/sched-deps.c,v
retrieving revision 1.84
diff -u -p -w -r1.84 sched-deps.c
--- sched-deps.c	8 Nov 2004 19:07:48 -0000	1.84
+++ sched-deps.c	12 Nov 2004 22:53:01 -0000
@@ -94,7 +94,8 @@ static bitmap_head *forward_dependency_c
 static int deps_may_trap_p (rtx);
 static void add_dependence_list (rtx, rtx, enum reg_note);
 static void add_dependence_list_and_free (rtx, rtx *, enum reg_note);
-static void set_sched_group_p (rtx);
+static void delete_all_dependences (rtx);
+static void fixup_sched_groups (rtx);
 
 static void flush_pending_lists (struct deps *, rtx, int, int);
 static void sched_analyze_1 (struct deps *, rtx, rtx);
@@ -369,18 +370,54 @@ add_dependence_list_and_free (rtx insn, 
     }
 }
 
-/* Set SCHED_GROUP_P and care for the rest of the bookkeeping that
-   goes along with that.  */
+/* Clear all dependencies for an insn.  */
 
 static void
-set_sched_group_p (rtx insn)
+delete_all_dependences (rtx insn)
 {
-  rtx prev;
+  /* Clear caches, if they exist, as well as free the dependence.  */
 
-  SCHED_GROUP_P (insn) = 1;
+#ifdef INSN_SCHEDULING
+  if (true_dependency_cache != NULL)
+    {
+      bitmap_clear (&true_dependency_cache[INSN_LUID (insn)]);
+      bitmap_clear (&anti_dependency_cache[INSN_LUID (insn)]);
+      bitmap_clear (&output_dependency_cache[INSN_LUID (insn)]);
+    }
+#endif
 
-  prev = prev_nonnote_insn (insn);
-  add_dependence (insn, prev, REG_DEP_ANTI);
+  free_INSN_LIST_list (&LOG_LINKS (insn));
+}
+
+/* All insns in a scheduling group except the first should only have
+   dependencies on the previous insn in the group.  So we find the
+   first instruction in the scheduling group by walking the dependence
+   chains backwards. Then we add the dependencies for the group to
+   the previous nonnote insn.  */
+
+static void
+fixup_sched_groups (rtx insn)
+{
+  rtx link;
+
+  for (link = LOG_LINKS (insn); link ; link = XEXP (link, 1))
+    {
+      rtx i = insn;
+      do
+	{
+	  i = prev_nonnote_insn (i);
+
+	  if (XEXP (link, 0) == i)
+	    goto next_link;
+	} while (SCHED_GROUP_P (i));
+      add_dependence (i, XEXP (link, 0), REG_NOTE_KIND (link));
+    next_link:;
+    }
+
+  delete_all_dependences (insn);
+
+  if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote_insn
(insn)))
+    add_dependence (insn, prev_nonnote_insn (insn), REG_DEP_ANTI);
 }
 
 /* Process an insn's memory dependencies.  There are four kinds of
@@ -643,7 +680,7 @@ sched_analyze_2 (struct deps *deps, rtx 
 #ifdef HAVE_cc0
     case CC0:
       /* User of CC0 depends on immediately preceding insn.  */
-      set_sched_group_p (insn);
+      SCHED_GROUP_P (insn) = 1;
        /* Don't move CC0 setter to another block (it can set up the
         same flag for previous CC0 users which is safe).  */
       CANT_MOVE (prev_nonnote_insn (insn)) = 1;
@@ -1112,7 +1149,7 @@ sched_analyze_insn (struct deps *deps, r
 
   if (deps->libcall_block_tail_insn)
     {
-      set_sched_group_p (insn);
+      SCHED_GROUP_P (insn) = 1;
       CANT_MOVE (insn) = 1;
     }
 
@@ -1158,15 +1195,10 @@ sched_analyze_insn (struct deps *deps, r
       if (src_regno < FIRST_PSEUDO_REGISTER
 	  || dest_regno < FIRST_PSEUDO_REGISTER)
 	{
-	  /* If we are inside a post-call group right at the start of the
-	     scheduling region, we must not add a dependency.  */
 	  if (deps->in_post_call_group_p == post_call_initial)
-	    {
-	      SCHED_GROUP_P (insn) = 1;
 	      deps->in_post_call_group_p = post_call;
-	    }
-	  else
-	    set_sched_group_p (insn);
+
+	  SCHED_GROUP_P (insn) = 1;
 	  CANT_MOVE (insn) = 1;
 	}
       else
@@ -1175,6 +1207,11 @@ sched_analyze_insn (struct deps *deps, r
 	  deps->in_post_call_group_p = not_post_call;
 	}
     }
+
+  /* Fixup the dependencies in the sched group.  */
+  if (SCHED_GROUP_P (insn))
+    fixup_sched_groups (insn);
+
 }
 
 /* Analyze every insn between HEAD and TAIL inclusive, creating
LOG_LINKS



Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]