This is the mail archive of the gcc-patches@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[committed] sched-dep.c patch for unconditional dependencies


The IA-64 compiler is hitting an ICE in sched-ebb.c during the machine
dependent reorg pass.  The cause is some missing dependencies.

We have a sequence like
   jump_insn 1
   (note ... NOTE_INSN_LOOP_BEGIN)
   (cond_exec (eq p8 0) ...)
   jump_insn 2 (ne p8 0) ...
The cond_exec is made a scheduling barrier in order to preserve the loop
begin note, which is necessary to preserve the integrity of the
reg_n_refs info which is loop dependent.  So we end up with a dependency
from the cond_exec to jump_insn 1, and all of the dependency lists are
cleared.  We then notice that jump_insn 2 and the cond_exec are mutex,
so no dependency is created between them.  Since jump_insn 2 now has no
dependencies, the scheduler decides to move it before jump_insn 1, and
now we have a mess.  sched-ebb.c fails because the block structure is
confused.  If sched-ebb.c hadn't failed, would have gotten incorrect
code emitted.

The flaw here is that dependencies created for structural reasons
(instead of data flow reasons) must always be unconditional.  This was
broken a few months ago when code was added to try to correctly add
dependencies for COND_EXEC instructions.

This patch adds a new UNCOND argument to some of the functions, so that
we can unconditionally add dependencies when necessary.  I went through
the sched-deps.c code, identified everyplace where we are creating a
structural dependency, and made them unconditional.

I tested this with an IA-64 linux bootstrap and make check for all
default languages.  There were no regressions.

I have checked in the patch.

My work here is not quite finished though.  While looking at this, I
noticed another flaw.  The code is trying to use
last_pending_memory_flush both as a scheduling barrier, and as a list of
pending jump insns.  Unfortunately, this can not work, because
scheduling barriers require unconditional dependencies, and the pending
jump insns don't want unconditional dependencies.  The list of
jump_insns needs to be separate from the last_pending_memory_flush.  I
left this broken for now.  I plan to create a new PR for it.  I think I
can created a testcase by using --param.  A failing case would look
something like
    jump_insn 1
    ....
    jump_insn n-1
    store to foo
    jump_insn n (calls flush_pending_lists)
    load from foo
and now we have no dependency to prevent the load from being moved
before the store.  For now, I suspect this bug won't trigger because we
won't get regions and/or extended basic blocks big enough to trigger it.
-- 
Jim Wilson, GNU Tools Support, http://www.specifix.com
2005-10-11  James E Wilson  <wilson@specifix.com>

	PR target/24232
	* sched-deps.c (add_dependence_list): New arg UNCOND.  Fix all callers.
	(add_dependence_list_and_free): Likewise.
	(sched_analyze_2, case MEM): Delete sched_insns_conditions_mutex_p
	call.

Index: sched-deps.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/sched-deps.c,v
retrieving revision 1.94
diff -p -p -r1.94 sched-deps.c
*** sched-deps.c	27 Jul 2005 16:28:32 -0000	1.94
--- sched-deps.c	13 Oct 2005 18:31:13 -0000
*************** static bitmap_head *forward_dependency_c
*** 87,94 ****
  #endif
  
  static int deps_may_trap_p (rtx);
! static void add_dependence_list (rtx, rtx, enum reg_note);
! static void add_dependence_list_and_free (rtx, rtx *, enum reg_note);
  static void delete_all_dependences (rtx);
  static void fixup_sched_groups (rtx);
  
--- 87,94 ----
  #endif
  
  static int deps_may_trap_p (rtx);
! static void add_dependence_list (rtx, rtx, int, enum reg_note);
! static void add_dependence_list_and_free (rtx, rtx *, int, enum reg_note);
  static void delete_all_dependences (rtx);
  static void fixup_sched_groups (rtx);
  
*************** add_dependence (rtx insn, rtx elem, enum
*** 346,356 ****
  /* A convenience wrapper to operate on an entire list.  */
  
  static void
! add_dependence_list (rtx insn, rtx list, enum reg_note dep_type)
  {
    for (; list; list = XEXP (list, 1))
      {
!       if (! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
  	add_dependence (insn, XEXP (list, 0), dep_type);
      }
  }
--- 346,356 ----
  /* A convenience wrapper to operate on an entire list.  */
  
  static void
! add_dependence_list (rtx insn, rtx list, int uncond, enum reg_note dep_type)
  {
    for (; list; list = XEXP (list, 1))
      {
!       if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
  	add_dependence (insn, XEXP (list, 0), dep_type);
      }
  }
*************** add_dependence_list (rtx insn, rtx list,
*** 358,370 ****
  /* Similar, but free *LISTP at the same time.  */
  
  static void
! add_dependence_list_and_free (rtx insn, rtx *listp, enum reg_note dep_type)
  {
    rtx list, next;
    for (list = *listp, *listp = NULL; list ; list = next)
      {
        next = XEXP (list, 1);
!       if (! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
  	add_dependence (insn, XEXP (list, 0), dep_type);
        free_INSN_LIST_node (list);
      }
--- 358,371 ----
  /* Similar, but free *LISTP at the same time.  */
  
  static void
! add_dependence_list_and_free (rtx insn, rtx *listp, int uncond,
! 			      enum reg_note dep_type)
  {
    rtx list, next;
    for (list = *listp, *listp = NULL; list ; list = next)
      {
        next = XEXP (list, 1);
!       if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
  	add_dependence (insn, XEXP (list, 0), dep_type);
        free_INSN_LIST_node (list);
      }
*************** flush_pending_lists (struct deps *deps, 
*** 468,484 ****
  {
    if (for_write)
      {
!       add_dependence_list_and_free (insn, &deps->pending_read_insns,
  				    REG_DEP_ANTI);
        free_EXPR_LIST_list (&deps->pending_read_mems);
      }
  
!   add_dependence_list_and_free (insn, &deps->pending_write_insns,
  				for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
    free_EXPR_LIST_list (&deps->pending_write_mems);
    deps->pending_lists_length = 0;
  
!   add_dependence_list_and_free (insn, &deps->last_pending_memory_flush,
  				for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
    deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
    deps->pending_flush_length = 1;
--- 469,485 ----
  {
    if (for_write)
      {
!       add_dependence_list_and_free (insn, &deps->pending_read_insns, 0,
  				    REG_DEP_ANTI);
        free_EXPR_LIST_list (&deps->pending_read_mems);
      }
  
!   add_dependence_list_and_free (insn, &deps->pending_write_insns, 0,
  				for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
    free_EXPR_LIST_list (&deps->pending_write_mems);
    deps->pending_lists_length = 0;
  
!   add_dependence_list_and_free (insn, &deps->last_pending_memory_flush, 1,
  				for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
    deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
    deps->pending_flush_length = 1;
*************** sched_analyze_1 (struct deps *deps, rtx 
*** 595,601 ****
  	  /* Don't let it cross a call after scheduling if it doesn't
  	     already cross one.  */
  	  if (REG_N_CALLS_CROSSED (regno) == 0)
! 	    add_dependence_list (insn, deps->last_function_call, REG_DEP_ANTI);
  	}
      }
    else if (MEM_P (dest))
--- 596,603 ----
  	  /* Don't let it cross a call after scheduling if it doesn't
  	     already cross one.  */
  	  if (REG_N_CALLS_CROSSED (regno) == 0)
! 	    add_dependence_list (insn, deps->last_function_call, 1,
! 				 REG_DEP_ANTI);
  	}
      }
    else if (MEM_P (dest))
*************** sched_analyze_1 (struct deps *deps, rtx 
*** 648,654 ****
  	      pending_mem = XEXP (pending_mem, 1);
  	    }
  
! 	  add_dependence_list (insn, deps->last_pending_memory_flush,
  			       REG_DEP_ANTI);
  
  	  add_insn_mem_dependence (deps, &deps->pending_write_insns,
--- 650,656 ----
  	      pending_mem = XEXP (pending_mem, 1);
  	    }
  
! 	  add_dependence_list (insn, deps->last_pending_memory_flush, 1,
  			       REG_DEP_ANTI);
  
  	  add_insn_mem_dependence (deps, &deps->pending_write_insns,
*************** sched_analyze_2 (struct deps *deps, rtx 
*** 791,798 ****
  	  }
  
  	for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
! 	  if ((! JUMP_P (XEXP (u, 0)) || deps_may_trap_p (x))
! 	      && ! sched_insns_conditions_mutex_p (insn, XEXP (u, 0)))
  	    add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
  
  	/* Always add these dependencies to pending_reads, since
--- 793,799 ----
  	  }
  
  	for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
! 	  if (! JUMP_P (XEXP (u, 0)) || deps_may_trap_p (x))
  	    add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
  
  	/* Always add these dependencies to pending_reads, since
*************** sched_analyze_insn (struct deps *deps, r
*** 903,909 ****
  	 and others know that a value is dead.  Depend on the last call
  	 instruction so that reg-stack won't get confused.  */
        if (code == CLOBBER)
! 	add_dependence_list (insn, deps->last_function_call, REG_DEP_OUTPUT);
      }
    else if (code == PARALLEL)
      {
--- 904,910 ----
  	 and others know that a value is dead.  Depend on the last call
  	 instruction so that reg-stack won't get confused.  */
        if (code == CLOBBER)
! 	add_dependence_list (insn, deps->last_function_call, 1, REG_DEP_OUTPUT);
      }
    else if (code == PARALLEL)
      {
*************** sched_analyze_insn (struct deps *deps, r
*** 960,967 ****
  	  EXECUTE_IF_SET_IN_REG_SET (&tmp_uses, 0, i, rsi)
  	    {
  	      struct deps_reg *reg_last = &deps->reg_last[i];
! 	      add_dependence_list (insn, reg_last->sets, REG_DEP_ANTI);
! 	      add_dependence_list (insn, reg_last->clobbers, REG_DEP_ANTI);
  	      reg_last->uses_length++;
  	      reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
  	    }
--- 961,968 ----
  	  EXECUTE_IF_SET_IN_REG_SET (&tmp_uses, 0, i, rsi)
  	    {
  	      struct deps_reg *reg_last = &deps->reg_last[i];
! 	      add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI);
! 	      add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI);
  	      reg_last->uses_length++;
  	      reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
  	    }
*************** sched_analyze_insn (struct deps *deps, r
*** 995,1001 ****
  	      pending_mem = XEXP (pending_mem, 1);
  	    }
  
! 	  add_dependence_list (insn, deps->last_pending_memory_flush,
  			       REG_DEP_ANTI);
  	}
      }
--- 996,1002 ----
  	      pending_mem = XEXP (pending_mem, 1);
  	    }
  
! 	  add_dependence_list (insn, deps->last_pending_memory_flush, 1,
  			       REG_DEP_ANTI);
  	}
      }
*************** sched_analyze_insn (struct deps *deps, r
*** 1038,1049 ****
  	  EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
  	    {
  	      struct deps_reg *reg_last = &deps->reg_last[i];
! 	      add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI);
  	      add_dependence_list
! 		(insn, reg_last->sets,
  		 reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
  	      add_dependence_list
! 		(insn, reg_last->clobbers,
  		 reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
  	    }
  	}
--- 1039,1050 ----
  	  EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
  	    {
  	      struct deps_reg *reg_last = &deps->reg_last[i];
! 	      add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
  	      add_dependence_list
! 		(insn, reg_last->sets, 0,
  		 reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
  	      add_dependence_list
! 		(insn, reg_last->clobbers, 0,
  		 reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
  	    }
  	}
*************** sched_analyze_insn (struct deps *deps, r
*** 1052,1064 ****
  	  EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
  	    {
  	      struct deps_reg *reg_last = &deps->reg_last[i];
! 	      add_dependence_list_and_free (insn, &reg_last->uses,
  					    REG_DEP_ANTI);
  	      add_dependence_list_and_free
! 		(insn, &reg_last->sets,
  		 reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
  	      add_dependence_list_and_free
! 		(insn, &reg_last->clobbers,
  		 reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
  	      reg_last->uses_length = 0;
  	      reg_last->clobbers_length = 0;
--- 1053,1065 ----
  	  EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
  	    {
  	      struct deps_reg *reg_last = &deps->reg_last[i];
! 	      add_dependence_list_and_free (insn, &reg_last->uses, 0,
  					    REG_DEP_ANTI);
  	      add_dependence_list_and_free
! 		(insn, &reg_last->sets, 0,
  		 reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
  	      add_dependence_list_and_free
! 		(insn, &reg_last->clobbers, 0,
  		 reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
  	      reg_last->uses_length = 0;
  	      reg_last->clobbers_length = 0;
*************** sched_analyze_insn (struct deps *deps, r
*** 1085,1109 ****
  	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
  	    {
  	      struct deps_reg *reg_last = &deps->reg_last[i];
! 	      add_dependence_list (insn, reg_last->sets, REG_DEP_TRUE);
! 	      add_dependence_list (insn, reg_last->clobbers, REG_DEP_TRUE);
  	      reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
  	      reg_last->uses_length++;
  	    }
  	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
  	    {
  	      struct deps_reg *reg_last = &deps->reg_last[i];
! 	      add_dependence_list (insn, reg_last->sets, REG_DEP_OUTPUT);
! 	      add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI);
  	      reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
  	      reg_last->clobbers_length++;
  	    }
  	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
  	    {
  	      struct deps_reg *reg_last = &deps->reg_last[i];
! 	      add_dependence_list (insn, reg_last->sets, REG_DEP_OUTPUT);
! 	      add_dependence_list (insn, reg_last->clobbers, REG_DEP_OUTPUT);
! 	      add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI);
  	      reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
  	      SET_REGNO_REG_SET (&deps->reg_conditional_sets, i);
  	    }
--- 1086,1110 ----
  	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
  	    {
  	      struct deps_reg *reg_last = &deps->reg_last[i];
! 	      add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
! 	      add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
  	      reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
  	      reg_last->uses_length++;
  	    }
  	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
  	    {
  	      struct deps_reg *reg_last = &deps->reg_last[i];
! 	      add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
! 	      add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
  	      reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
  	      reg_last->clobbers_length++;
  	    }
  	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
  	    {
  	      struct deps_reg *reg_last = &deps->reg_last[i];
! 	      add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
! 	      add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT);
! 	      add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
  	      reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
  	      SET_REGNO_REG_SET (&deps->reg_conditional_sets, i);
  	    }
*************** sched_analyze_insn (struct deps *deps, r
*** 1113,1120 ****
  	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
  	    {
  	      struct deps_reg *reg_last = &deps->reg_last[i];
! 	      add_dependence_list (insn, reg_last->sets, REG_DEP_TRUE);
! 	      add_dependence_list (insn, reg_last->clobbers, REG_DEP_TRUE);
  	      reg_last->uses_length++;
  	      reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
  	    }
--- 1114,1121 ----
  	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
  	    {
  	      struct deps_reg *reg_last = &deps->reg_last[i];
! 	      add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
! 	      add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
  	      reg_last->uses_length++;
  	      reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
  	    }
*************** sched_analyze_insn (struct deps *deps, r
*** 1124,1134 ****
  	      if (reg_last->uses_length > MAX_PENDING_LIST_LENGTH
  		  || reg_last->clobbers_length > MAX_PENDING_LIST_LENGTH)
  		{
! 		  add_dependence_list_and_free (insn, &reg_last->sets,
  					        REG_DEP_OUTPUT);
! 		  add_dependence_list_and_free (insn, &reg_last->uses,
  						REG_DEP_ANTI);
! 		  add_dependence_list_and_free (insn, &reg_last->clobbers,
  						REG_DEP_OUTPUT);
  		  reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
  		  reg_last->clobbers_length = 0;
--- 1125,1135 ----
  	      if (reg_last->uses_length > MAX_PENDING_LIST_LENGTH
  		  || reg_last->clobbers_length > MAX_PENDING_LIST_LENGTH)
  		{
! 		  add_dependence_list_and_free (insn, &reg_last->sets, 0,
  					        REG_DEP_OUTPUT);
! 		  add_dependence_list_and_free (insn, &reg_last->uses, 0,
  						REG_DEP_ANTI);
! 		  add_dependence_list_and_free (insn, &reg_last->clobbers, 0,
  						REG_DEP_OUTPUT);
  		  reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
  		  reg_last->clobbers_length = 0;
*************** sched_analyze_insn (struct deps *deps, r
*** 1136,1143 ****
  		}
  	      else
  		{
! 		  add_dependence_list (insn, reg_last->sets, REG_DEP_OUTPUT);
! 		  add_dependence_list (insn, reg_last->uses, REG_DEP_ANTI);
  		}
  	      reg_last->clobbers_length++;
  	      reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
--- 1137,1144 ----
  		}
  	      else
  		{
! 		  add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
! 		  add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
  		}
  	      reg_last->clobbers_length++;
  	      reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
*************** sched_analyze_insn (struct deps *deps, r
*** 1145,1155 ****
  	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
  	    {
  	      struct deps_reg *reg_last = &deps->reg_last[i];
! 	      add_dependence_list_and_free (insn, &reg_last->sets,
  					    REG_DEP_OUTPUT);
! 	      add_dependence_list_and_free (insn, &reg_last->clobbers,
  					    REG_DEP_OUTPUT);
! 	      add_dependence_list_and_free (insn, &reg_last->uses,
  					    REG_DEP_ANTI);
  	      reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
  	      reg_last->uses_length = 0;
--- 1146,1156 ----
  	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
  	    {
  	      struct deps_reg *reg_last = &deps->reg_last[i];
! 	      add_dependence_list_and_free (insn, &reg_last->sets, 0,
  					    REG_DEP_OUTPUT);
! 	      add_dependence_list_and_free (insn, &reg_last->clobbers, 0,
  					    REG_DEP_OUTPUT);
! 	      add_dependence_list_and_free (insn, &reg_last->uses, 0,
  					    REG_DEP_ANTI);
  	      reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
  	      reg_last->uses_length = 0;
*************** sched_analyze (struct deps *deps, rtx he
*** 1329,1335 ****
  
  	  /* For each insn which shouldn't cross a call, add a dependence
  	     between that insn and this call insn.  */
! 	  add_dependence_list_and_free (insn, &deps->sched_before_next_call,
  					REG_DEP_ANTI);
  
  	  sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes);
--- 1330,1336 ----
  
  	  /* For each insn which shouldn't cross a call, add a dependence
  	     between that insn and this call insn.  */
! 	  add_dependence_list_and_free (insn, &deps->sched_before_next_call, 1,
  					REG_DEP_ANTI);
  
  	  sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes);

Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]