[PATCH] scheduler register pressure excessive stack space

law@redhat.com law@redhat.com
Wed May 29 10:30:00 GMT 2002


 In message <200204021625.LAA26702@makai.watson.ibm.com>, David Edelsohn 
writes:
 > 	The following scheduler patch has been bootstrapped on both x86
 > Linux and PowerPC AIX.  A SPEC run shows no performance regression and
 > slight performance improvement for a few testcases.
 > 
 > 	The patch prevents the scheduler from moving CLOBBERs very early
 > which increases register lifetime and can cause extreme stack usage.  The
 > patch also prevents CLOBBERs and other pseudo-instructions which do not
 > correspond to a function unit from counting against the processor issue
 > rate. 
 > 
 > 	This fixes a regression relative to GCC 2.95 and prevents kernel
 > stack overflows for some Linux kernel modules.
 > 
 > 	Okay for mainline and 3.1 branch?
 > 
 > Thanks, David
 > 
 > 
 > 2002-03-29  Dale Johannesen  <dalej@apple.com>
 > 
 > 	* haifa-sched.c (schedule_block): Do not count no-unit insns against
 > 	issue rate.
 > 
 > 2002-03-29  Michael Matz  <matz@kde.org>
 > 	    David Edelsohn  <edesohn@gnu.org>
 > 
 > 	* sched-deps.c (sched_create_groups_for_libcalls): New function.
 > 	(sched_analyze): Use it.
The schedule_block patch needed to be tightened up a little.  We want to 
ignore USEs and CLOBBERs -- the patch inadvertently ignored ASMs as well.

Since we know precisely what we want to ignore, we might as well test for
them directly GET_CODE (PATTERN (insn)) rather than indirectly using 
INSN_CODE (insn).


Here's the patch I'm checking into the mainline sources:

        * haifa-sched.c (schedule_block): Do not count USE and CLOBBER
        insns against the issue rate.

        * sched-deps.c (sched_create_groups_for_libcalls): New function.
        (sched_analyze): Use it.


Index: haifa-sched.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/haifa-sched.c,v
retrieving revision 1.204
diff -c -3 -p -r1.204 haifa-sched.c
*** haifa-sched.c	28 May 2002 22:03:06 -0000	1.204
--- haifa-sched.c	29 May 2002 16:51:35 -0000
*************** schedule_block (b, rgn_n_insns)
*** 2179,2185 ****
  	    can_issue_more =
  	      (*targetm.sched.variable_issue) (sched_dump, sched_verbose,
  					       insn, can_issue_more);
! 	  else
  	    can_issue_more--;
  
  	  schedule_insn (insn, &ready, clock_var);
--- 2179,2188 ----
  	    can_issue_more =
  	      (*targetm.sched.variable_issue) (sched_dump, sched_verbose,
  					       insn, can_issue_more);
! 	  /* A naked CLOBBER or USE generates no instruction, so do
! 	     not count them against the issue rate.  */
! 	  else if (GET_CODE (PATTERN (insn)) != USE
! 		   && GET_CODE (PATTERN (insn)) != CLOBBER)
  	    can_issue_more--;
  
  	  schedule_insn (insn, &ready, clock_var);
Index: sched-deps.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/sched-deps.c,v
retrieving revision 1.38
diff -c -3 -p -r1.38 sched-deps.c
*** sched-deps.c	28 May 2002 22:03:06 -0000	1.38
--- sched-deps.c	29 May 2002 16:51:35 -0000
*************** static void flush_pending_lists PARAMS (
*** 88,93 ****
--- 88,94 ----
  static void sched_analyze_1 PARAMS ((struct deps *, rtx, rtx));
  static void sched_analyze_2 PARAMS ((struct deps *, rtx, rtx));
  static void sched_analyze_insn PARAMS ((struct deps *, rtx, rtx, rtx));
+ static void sched_create_groups_for_libcalls PARAMS ((rtx, rtx));
  static rtx group_leader PARAMS ((rtx));
  
  static rtx get_condition PARAMS ((rtx));
*************** sched_analyze_insn (deps, x, insn, loop_
*** 1210,1215 ****
--- 1211,1267 ----
      }
  }
  
+ /* Find any libcall sequences between HEAD and TAIL inclusive; set
+    SCHED_GROUP_P appropriately for such sequences.  */
+ 
+ static void
+ sched_create_groups_for_libcalls (head, tail)
+      rtx head, tail;
+ {
+   rtx insn;
+   int tail_seen_p = 0;
+ 
+   for (insn = head;; insn = NEXT_INSN (insn))
+     {
+       rtx link, end_seq, set, r0, note;
+       if (INSN_P (insn) && GET_CODE (PATTERN (insn)) == CLOBBER
+ 	  && (r0 = XEXP (PATTERN (insn), 0), GET_CODE (r0) == REG)
+ 	  && (link = find_reg_note (insn, REG_LIBCALL, NULL_RTX)) != 0
+ 	  && (end_seq = XEXP (link, 0)) != 0
+ 	  && INSN_P (end_seq)
+ 	  && (set = single_set (end_seq)) != 0
+ 	  && SET_DEST (set) == r0 && SET_SRC (set) == r0
+ 	  && (note = find_reg_note (end_seq, REG_EQUAL, NULL_RTX)) != 0)
+ 	{
+ 	  /* We found a libcall block between insn and end_seq.
+ 	     The inner insns should be scheduled in a block.  */
+ 	  rtx inner;
+ 	  /* Paranoia.  */
+ 	  if (insn == tail)
+ 	    tail_seen_p = 1;
+ 	  /* We don't want to set this flag on the initial clobber, because
+ 	     the semantic of SCHED_GROUP_P is to make insn be scheduled
+ 	     together with the previous insn.  */
+ 	  for (inner = NEXT_INSN (insn); inner; inner = NEXT_INSN (inner))
+ 	    {
+ 	      if (INSN_P (inner))
+ 		set_sched_group_p (inner);
+ 	      /* Paranoia.  */
+ 	      if (inner == tail)
+ 		tail_seen_p = 1;
+ 	      if (inner == end_seq)
+ 		break;
+ 	    }
+ 	  /* We should be able to skip the whole lib-call block.
+ 	     Remember that one NEXT_INSN is done in the loop-iteration.  */
+ 	  insn = end_seq;
+ 	}
+       if (insn == tail || tail_seen_p)
+ 	break;
+     }
+   return;
+ }
+ 
  /* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
     for every dependency.  */
  
*************** sched_analyze (deps, head, tail)
*** 1357,1362 ****
--- 1409,1418 ----
  	{
  	  if (current_sched_info->use_cselib)
  	    cselib_finish ();
+ 
+ 	  if (! reload_completed)
+ 	    sched_create_groups_for_libcalls (head, tail);
+ 
  	  return;
  	}
      }




More information about the Gcc-patches mailing list