This is the mail archive of the gcc-patches@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]

Some haifa cleanup


I'm currently working on an optimization pass that will need to reorder
insns.  I'm thinking about reusing parts of haifa-sched.c, specifically
the code that computes dependencies.  Here's a first patch to clean this
code up a bit.  It's the usual mix of combining related variables into
structures, breaking up large functions, etc.  It also removes some
complications where there was an unnecessary distinction between cases
where there was one basic block or more than one.

Tested on alphaev6-linux.

Bernd


	* haifa-sched.c (reg_last_uses, reg_last_sets, reg_last_clobbers,
	pending_read_insns, pending_write_insns, pending_read_mems,
	pending_write_mems, pending_list_length, last_pending_memory_flush,
	last_function_call, sched_before_next_call): Move static variables
	into a structure.
	(bb_ prefixed versions): Replace with single array bb_deps.
	(struct deps): New structure.
	(add_insn_mem_dependence, flush_pending_lists, sched_analyze_1,
	sched_analyze_2, sched_analyze_insn, sched_analyze): Accept new
	argument of type "struct deps *"; use that instead of global
	variables.  All callers changed.
	(init_rgn_data_dependencies): Delete function.
	(init_rtx_vector): Delete function.
	(init_deps): New function.

	(free_pending_lists): Simplify, we always use bb_deps array even if
	only one basic block.
	(compute_block_backward_dependences): Likewise.
	(schedule_region): Likewise.

	(propagate_deps): New function, broken out of
	compute_block_backward_dependences.
	(compute_block_backward_dependences): Use it.
	
Index: haifa-sched.c
===================================================================
RCS file: /cvs/gcc/egcs/gcc/haifa-sched.c,v
retrieving revision 1.132
diff -c -p -r1.132 haifa-sched.c
*** haifa-sched.c	1999/11/15 08:12:29	1.132
--- haifa-sched.c	1999/11/27 16:14:24
*************** fix_sched_param (param, val)
*** 231,244 ****
      warning ("fix_sched_param: unknown param: %s", param);
  }
  
  
- /* Element N is the next insn that sets (hard or pseudo) register
-    N within the current basic block; or zero, if there is no
-    such insn.  Needed for new registers which may be introduced
-    by splitting insns.  */
- static rtx *reg_last_uses;
- static rtx *reg_last_sets;
- static rtx *reg_last_clobbers;
  static regset reg_pending_sets;
  static regset reg_pending_clobbers;
  static int reg_pending_sets_all;
--- 231,296 ----
      warning ("fix_sched_param: unknown param: %s", param);
  }
  
+ /* Describe state of dependencies used during sched_analyze phase.  */
+ struct deps
+ {
+   /* The *_insns and *_mems are paired lists.  Each pending memory operation
+      will have a pointer to the MEM rtx on one list and a pointer to the
+      containing insn on the other list in the same place in the list.  */
+ 
+   /* We can't use add_dependence like the old code did, because a single insn
+      may have multiple memory accesses, and hence needs to be on the list
+      once for each memory access.  Add_dependence won't let you add an insn
+      to a list more than once.  */
+ 
+   /* An INSN_LIST containing all insns with pending read operations.  */
+   rtx pending_read_insns;
+ 
+   /* An EXPR_LIST containing all MEM rtx's which are pending reads.  */
+   rtx pending_read_mems;
+ 
+   /* An INSN_LIST containing all insns with pending write operations.  */
+   rtx pending_write_insns;
+ 
+   /* An EXPR_LIST containing all MEM rtx's which are pending writes.  */
+   rtx pending_write_mems;
+ 
+   /* Indicates the combined length of the two pending lists.  We must prevent
+      these lists from ever growing too large since the number of dependencies
+      produced is at least O(N*N), and execution time is at least O(4*N*N), as
+      a function of the length of these pending lists.  */
+   int pending_lists_length;
+ 
+   /* The last insn upon which all memory references must depend.
+      This is an insn which flushed the pending lists, creating a dependency
+      between it and all previously pending memory references.  This creates
+      a barrier (or a checkpoint) which no memory reference is allowed to cross.
+ 
+      This includes all non constant CALL_INSNs.  When we do interprocedural
+      alias analysis, this restriction can be relaxed.
+      This may also be an INSN that writes memory if the pending lists grow
+      too large.  */
+   rtx last_pending_memory_flush;
+ 
+   /* The last function call we have seen.  All hard regs, and, of course,
+      the last function call, must depend on this.  */
+   rtx last_function_call;
+ 
+   /* The LOG_LINKS field of this is a list of insns which use a pseudo register
+      that does not already cross a call.  We create dependencies between each
+      of those insn and the next call insn, to ensure that they won't cross a call
+      after scheduling is done.  */
+   rtx sched_before_next_call;
+ 
+   /* Element N is the next insn that sets (hard or pseudo) register
+      N within the current basic block; or zero, if there is no
+      such insn.  Needed for new registers which may be introduced
+      by splitting insns.  */
+   rtx *reg_last_uses;
+   rtx *reg_last_sets;
+   rtx *reg_last_clobbers;
+ };
  
  static regset reg_pending_sets;
  static regset reg_pending_clobbers;
  static int reg_pending_sets_all;
*************** static int potential_hazard PROTO ((int,
*** 427,438 ****
  static int insn_cost PROTO ((rtx, rtx, rtx));
  static int priority PROTO ((rtx));
  static void free_pending_lists PROTO ((void));
! static void add_insn_mem_dependence PROTO ((rtx *, rtx *, rtx, rtx));
! static void flush_pending_lists PROTO ((rtx, int));
! static void sched_analyze_1 PROTO ((rtx, rtx));
! static void sched_analyze_2 PROTO ((rtx, rtx));
! static void sched_analyze_insn PROTO ((rtx, rtx, rtx));
! static void sched_analyze PROTO ((rtx, rtx));
  static int rank_for_schedule PROTO ((const PTR, const PTR));
  static void swap_sort PROTO ((rtx *, int));
  static void queue_insn PROTO ((rtx, int));
--- 479,491 ----
  static int insn_cost PROTO ((rtx, rtx, rtx));
  static int priority PROTO ((rtx));
  static void free_pending_lists PROTO ((void));
! static void add_insn_mem_dependence PROTO ((struct deps *, rtx *, rtx *, rtx,
! 					    rtx));
! static void flush_pending_lists PROTO ((struct deps *, rtx, int));
! static void sched_analyze_1 PROTO ((struct deps *, rtx, rtx));
! static void sched_analyze_2 PROTO ((struct deps *, rtx, rtx));
! static void sched_analyze_insn PROTO ((struct deps *, rtx, rtx, rtx));
! static void sched_analyze PROTO ((struct deps *, rtx, rtx));
  static int rank_for_schedule PROTO ((const PTR, const PTR));
  static void swap_sort PROTO ((rtx *, int));
  static void queue_insn PROTO ((rtx, int));
*************** static int is_exception_free PROTO ((rtx
*** 670,676 ****
  
  static char find_insn_mem_list PROTO ((rtx, rtx, rtx, rtx));
  static void compute_block_forward_dependences PROTO ((int));
- static void init_rgn_data_dependences PROTO ((int));
  static void add_branch_dependences PROTO ((rtx, rtx));
  static void compute_block_backward_dependences PROTO ((int));
  void debug_dependencies PROTO ((void));
--- 723,728 ----
*************** static rtx move_insn1 PROTO ((rtx, rtx))
*** 731,737 ****
  static rtx move_insn PROTO ((rtx, rtx));
  static rtx group_leader PROTO ((rtx));
  static int set_priorities PROTO ((int));
! static void init_rtx_vector PROTO ((rtx **, rtx *, int, int));
  static void schedule_region PROTO ((int));
  
  #endif /* INSN_SCHEDULING */
--- 783,789 ----
  static rtx move_insn PROTO ((rtx, rtx));
  static rtx group_leader PROTO ((rtx));
  static int set_priorities PROTO ((int));
! static void init_deps PROTO ((struct deps *));
  static void schedule_region PROTO ((int));
  
  #endif /* INSN_SCHEDULING */
*************** schedule_insns (dump_file)
*** 906,963 ****
  #endif
  
  /* Computation of memory dependencies.  */
- 
- /* The *_insns and *_mems are paired lists.  Each pending memory operation
-    will have a pointer to the MEM rtx on one list and a pointer to the
-    containing insn on the other list in the same place in the list.  */
- 
- /* We can't use add_dependence like the old code did, because a single insn
-    may have multiple memory accesses, and hence needs to be on the list
-    once for each memory access.  Add_dependence won't let you add an insn
-    to a list more than once.  */
- 
- /* An INSN_LIST containing all insns with pending read operations.  */
- static rtx pending_read_insns;
- 
- /* An EXPR_LIST containing all MEM rtx's which are pending reads.  */
- static rtx pending_read_mems;
- 
- /* An INSN_LIST containing all insns with pending write operations.  */
- static rtx pending_write_insns;
- 
- /* An EXPR_LIST containing all MEM rtx's which are pending writes.  */
- static rtx pending_write_mems;
- 
- /* Indicates the combined length of the two pending lists.  We must prevent
-    these lists from ever growing too large since the number of dependencies
-    produced is at least O(N*N), and execution time is at least O(4*N*N), as
-    a function of the length of these pending lists.  */
- 
- static int pending_lists_length;
  
! /* The last insn upon which all memory references must depend.
!    This is an insn which flushed the pending lists, creating a dependency
!    between it and all previously pending memory references.  This creates
!    a barrier (or a checkpoint) which no memory reference is allowed to cross.
! 
!    This includes all non constant CALL_INSNs.  When we do interprocedural
!    alias analysis, this restriction can be relaxed.
!    This may also be an INSN that writes memory if the pending lists grow
!    too large.  */
! 
! static rtx last_pending_memory_flush;
! 
! /* The last function call we have seen.  All hard regs, and, of course,
!    the last function call, must depend on this.  */
! 
! static rtx last_function_call;
! 
! /* The LOG_LINKS field of this is a list of insns which use a pseudo register
!    that does not already cross a call.  We create dependencies between each
!    of those insn and the next call insn, to ensure that they won't cross a call
!    after scheduling is done.  */
  
! static rtx sched_before_next_call;
  
  /* Pointer to the last instruction scheduled.  Used by rank_for_schedule,
     so that insns independent of the last scheduled insn will be preferred
--- 958,971 ----
  #endif
  
  /* Computation of memory dependencies.  */
  
! /* Data structures for the computation of data dependences in a regions.  We
!    keep one mem_deps structure for every basic block.  Before analyzing the
!    data dependences for a bb, its variables are initialized as a function of
!    the variables of its predecessors.  When the analysis for a bb completes,
!    we save the contents to the corresponding bb_mem_deps[bb] variable.  */
  
! static struct deps *bb_deps;
  
  /* Pointer to the last instruction scheduled.  Used by rank_for_schedule,
     so that insns independent of the last scheduled insn will be preferred
*************** static rtx sched_before_next_call;
*** 965,995 ****
  
  static rtx last_scheduled_insn;
  
- /* Data structures for the computation of data dependences in a regions.  We
-    keep one copy of each of the declared above variables for each bb in the
-    region.  Before analyzing the data dependences for a bb, its variables
-    are initialized as a function of the variables of its predecessors.  When
-    the analysis for a bb completes, we save the contents of each variable X
-    to a corresponding bb_X[bb] variable.  For example, pending_read_insns is
-    copied to bb_pending_read_insns[bb].  Another change is that few
-    variables are now a list of insns rather than a single insn:
-    last_pending_memory_flash, last_function_call, reg_last_sets.  The
-    manipulation of these variables was changed appropriately.  */
- 
- static rtx **bb_reg_last_uses;
- static rtx **bb_reg_last_sets;
- static rtx **bb_reg_last_clobbers;
- 
- static rtx *bb_pending_read_insns;
- static rtx *bb_pending_read_mems;
- static rtx *bb_pending_write_insns;
- static rtx *bb_pending_write_mems;
- static int *bb_pending_lists_length;
- 
- static rtx *bb_last_pending_memory_flush;
- static rtx *bb_last_function_call;
- static rtx *bb_sched_before_next_call;
- 
  /* Functions for construction of the control flow graph.  */
  
  /* Return 1 if control flow graph should not be constructed, 0 otherwise.
--- 973,978 ----
*************** priority (insn)
*** 3149,3173 ****
  static void
  free_pending_lists ()
  {
!   if (current_nr_blocks <= 1)
!     {
!       free_INSN_LIST_list (&pending_read_insns);
!       free_INSN_LIST_list (&pending_write_insns);
!       free_EXPR_LIST_list (&pending_read_mems);
!       free_EXPR_LIST_list (&pending_write_mems);
!     }
!   else
!     {
!       /* Interblock scheduling.  */
!       int bb;
  
!       for (bb = 0; bb < current_nr_blocks; bb++)
! 	{
! 	  free_INSN_LIST_list (&bb_pending_read_insns[bb]);
! 	  free_INSN_LIST_list (&bb_pending_write_insns[bb]);
! 	  free_EXPR_LIST_list (&bb_pending_read_mems[bb]);
! 	  free_EXPR_LIST_list (&bb_pending_write_mems[bb]);
! 	}
      }
  }
  
--- 3132,3145 ----
  static void
  free_pending_lists ()
  {
!   int bb;
  
!   for (bb = 0; bb < current_nr_blocks; bb++)
!     {
!       free_INSN_LIST_list (&bb_deps[bb].pending_read_insns);
!       free_INSN_LIST_list (&bb_deps[bb].pending_write_insns);
!       free_EXPR_LIST_list (&bb_deps[bb].pending_read_mems);
!       free_EXPR_LIST_list (&bb_deps[bb].pending_write_mems);
      }
  }
  
*************** free_pending_lists ()
*** 3176,3182 ****
     so that we can do memory aliasing on it.  */
  
  static void
! add_insn_mem_dependence (insn_list, mem_list, insn, mem)
       rtx *insn_list, *mem_list, insn, mem;
  {
    register rtx link;
--- 3148,3155 ----
     so that we can do memory aliasing on it.  */
  
  static void
! add_insn_mem_dependence (d, insn_list, mem_list, insn, mem)
!      struct deps *d;
       rtx *insn_list, *mem_list, insn, mem;
  {
    register rtx link;
*************** add_insn_mem_dependence (insn_list, mem_
*** 3187,3240 ****
    link = alloc_EXPR_LIST (VOIDmode, mem, *mem_list);
    *mem_list = link;
  
!   pending_lists_length++;
  }
  
- 
  /* Make a dependency between every memory reference on the pending lists
     and INSN, thus flushing the pending lists.  If ONLY_WRITE, don't flush
     the read list.  */
  
  static void
! flush_pending_lists (insn, only_write)
       rtx insn;
       int only_write;
  {
    rtx u;
    rtx link;
  
!   while (pending_read_insns && ! only_write)
      {
!       add_dependence (insn, XEXP (pending_read_insns, 0), REG_DEP_ANTI);
  
!       link = pending_read_insns;
!       pending_read_insns = XEXP (pending_read_insns, 1);
        free_INSN_LIST_node (link);
  
!       link = pending_read_mems;
!       pending_read_mems = XEXP (pending_read_mems, 1);
        free_EXPR_LIST_node (link);
      }
!   while (pending_write_insns)
      {
!       add_dependence (insn, XEXP (pending_write_insns, 0), REG_DEP_ANTI);
  
!       link = pending_write_insns;
!       pending_write_insns = XEXP (pending_write_insns, 1);
        free_INSN_LIST_node (link);
  
!       link = pending_write_mems;
!       pending_write_mems = XEXP (pending_write_mems, 1);
        free_EXPR_LIST_node (link);
      }
!   pending_lists_length = 0;
  
    /* last_pending_memory_flush is now a list of insns.  */
!   for (u = last_pending_memory_flush; u; u = XEXP (u, 1))
      add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
  
!   free_INSN_LIST_list (&last_pending_memory_flush);
!   last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
  }
  
  /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
--- 3160,3213 ----
    link = alloc_EXPR_LIST (VOIDmode, mem, *mem_list);
    *mem_list = link;
  
!   d->pending_lists_length++;
  }
  
  /* Make a dependency between every memory reference on the pending lists
     and INSN, thus flushing the pending lists.  If ONLY_WRITE, don't flush
     the read list.  */
  
  static void
! flush_pending_lists (d, insn, only_write)
!      struct deps *d;
       rtx insn;
       int only_write;
  {
    rtx u;
    rtx link;
  
!   while (d->pending_read_insns && ! only_write)
      {
!       add_dependence (insn, XEXP (d->pending_read_insns, 0), REG_DEP_ANTI);
  
!       link = d->pending_read_insns;
!       d->pending_read_insns = XEXP (d->pending_read_insns, 1);
        free_INSN_LIST_node (link);
  
!       link = d->pending_read_mems;
!       d->pending_read_mems = XEXP (d->pending_read_mems, 1);
        free_EXPR_LIST_node (link);
      }
!   while (d->pending_write_insns)
      {
!       add_dependence (insn, XEXP (d->pending_write_insns, 0), REG_DEP_ANTI);
  
!       link = d->pending_write_insns;
!       d->pending_write_insns = XEXP (d->pending_write_insns, 1);
        free_INSN_LIST_node (link);
  
!       link = d->pending_write_mems;
!       d->pending_write_mems = XEXP (d->pending_write_mems, 1);
        free_EXPR_LIST_node (link);
      }
!   d->pending_lists_length = 0;
  
    /* last_pending_memory_flush is now a list of insns.  */
!   for (u = d->last_pending_memory_flush; u; u = XEXP (u, 1))
      add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
  
!   free_INSN_LIST_list (&d->last_pending_memory_flush);
!   d->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
  }
  
  /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
*************** flush_pending_lists (insn, only_write)
*** 3242,3248 ****
     destination of X, and reads of everything mentioned.  */
  
  static void
! sched_analyze_1 (x, insn)
       rtx x;
       rtx insn;
  {
--- 3215,3222 ----
     destination of X, and reads of everything mentioned.  */
  
  static void
! sched_analyze_1 (d, x, insn)
!      struct deps *d;
       rtx x;
       rtx insn;
  {
*************** sched_analyze_1 (x, insn)
*** 3258,3266 ****
      {
        register int i;
        for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
! 	sched_analyze_1 (XVECEXP (dest, 0, i), insn);
        if (GET_CODE (x) == SET)
! 	sched_analyze_2 (SET_SRC (x), insn);
        return;
      }
  
--- 3232,3240 ----
      {
        register int i;
        for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
! 	sched_analyze_1 (d, XVECEXP (dest, 0, i), insn);
        if (GET_CODE (x) == SET)
! 	sched_analyze_2 (d, SET_SRC (x), insn);
        return;
      }
  
*************** sched_analyze_1 (x, insn)
*** 3270,3277 ****
        if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
  	{
  	  /* The second and third arguments are values read by this insn.  */
! 	  sched_analyze_2 (XEXP (dest, 1), insn);
! 	  sched_analyze_2 (XEXP (dest, 2), insn);
  	}
        dest = XEXP (dest, 0);
      }
--- 3244,3251 ----
        if (GET_CODE (dest) == ZERO_EXTRACT || GET_CODE (dest) == SIGN_EXTRACT)
  	{
  	  /* The second and third arguments are values read by this insn.  */
! 	  sched_analyze_2 (d, XEXP (dest, 1), insn);
! 	  sched_analyze_2 (d, XEXP (dest, 2), insn);
  	}
        dest = XEXP (dest, 0);
      }
*************** sched_analyze_1 (x, insn)
*** 3291,3300 ****
  	    {
  	      rtx u;
  
! 	      for (u = reg_last_uses[regno + i]; u; u = XEXP (u, 1))
  		add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
  
! 	      for (u = reg_last_sets[regno + i]; u; u = XEXP (u, 1))
  		add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
  
  	      /* Clobbers need not be ordered with respect to one
--- 3265,3274 ----
  	    {
  	      rtx u;
  
! 	      for (u = d->reg_last_uses[regno + i]; u; u = XEXP (u, 1))
  		add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
  
! 	      for (u = d->reg_last_sets[regno + i]; u; u = XEXP (u, 1))
  		add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
  
  	      /* Clobbers need not be ordered with respect to one
*************** sched_analyze_1 (x, insn)
*** 3302,3309 ****
  		 pending clobber.  */
  	      if (code == SET)
  		{
! 		  free_INSN_LIST_list (&reg_last_uses[regno + i]);
! 	          for (u = reg_last_clobbers[regno + i]; u; u = XEXP (u, 1))
  		    add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
  	          SET_REGNO_REG_SET (reg_pending_sets, regno + i);
  		}
--- 3276,3283 ----
  		 pending clobber.  */
  	      if (code == SET)
  		{
! 		  free_INSN_LIST_list (&d->reg_last_uses[regno + i]);
! 	          for (u = d->reg_last_clobbers[regno + i]; u; u = XEXP (u, 1))
  		    add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
  	          SET_REGNO_REG_SET (reg_pending_sets, regno + i);
  		}
*************** sched_analyze_1 (x, insn)
*** 3313,3319 ****
  	      /* Function calls clobber all call_used regs.  */
  	      if (global_regs[regno + i]
  		  || (code == SET && call_used_regs[regno + i]))
! 		for (u = last_function_call; u; u = XEXP (u, 1))
  		  add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
  	    }
  	}
--- 3287,3293 ----
  	      /* Function calls clobber all call_used regs.  */
  	      if (global_regs[regno + i]
  		  || (code == SET && call_used_regs[regno + i]))
! 		for (u = d->last_function_call; u; u = XEXP (u, 1))
  		  add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
  	    }
  	}
*************** sched_analyze_1 (x, insn)
*** 3321,3336 ****
  	{
  	  rtx u;
  
! 	  for (u = reg_last_uses[regno]; u; u = XEXP (u, 1))
  	    add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
  
! 	  for (u = reg_last_sets[regno]; u; u = XEXP (u, 1))
  	    add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
  
  	  if (code == SET)
  	    {
! 	      free_INSN_LIST_list (&reg_last_uses[regno]);
! 	      for (u = reg_last_clobbers[regno]; u; u = XEXP (u, 1))
  		add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
  	      SET_REGNO_REG_SET (reg_pending_sets, regno);
  	    }
--- 3295,3310 ----
  	{
  	  rtx u;
  
! 	  for (u = d->reg_last_uses[regno]; u; u = XEXP (u, 1))
  	    add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
  
! 	  for (u = d->reg_last_sets[regno]; u; u = XEXP (u, 1))
  	    add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
  
  	  if (code == SET)
  	    {
! 	      free_INSN_LIST_list (&d->reg_last_uses[regno]);
! 	      for (u = d->reg_last_clobbers[regno]; u; u = XEXP (u, 1))
  		add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
  	      SET_REGNO_REG_SET (reg_pending_sets, regno);
  	    }
*************** sched_analyze_1 (x, insn)
*** 3343,3355 ****
  	  if (!reload_completed
  	      && reg_known_equiv_p[regno]
  	      && GET_CODE (reg_known_value[regno]) == MEM)
! 	    sched_analyze_2 (XEXP (reg_known_value[regno], 0), insn);
  
  	  /* Don't let it cross a call after scheduling if it doesn't
  	     already cross one.  */
  
  	  if (REG_N_CALLS_CROSSED (regno) == 0)
! 	    for (u = last_function_call; u; u = XEXP (u, 1))
  	      add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
  	}
      }
--- 3317,3329 ----
  	  if (!reload_completed
  	      && reg_known_equiv_p[regno]
  	      && GET_CODE (reg_known_value[regno]) == MEM)
! 	    sched_analyze_2 (d, XEXP (reg_known_value[regno], 0), insn);
  
  	  /* Don't let it cross a call after scheduling if it doesn't
  	     already cross one.  */
  
  	  if (REG_N_CALLS_CROSSED (regno) == 0)
! 	    for (u = d->last_function_call; u; u = XEXP (u, 1))
  	      add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
  	}
      }
*************** sched_analyze_1 (x, insn)
*** 3357,3363 ****
      {
        /* Writing memory.  */
  
!       if (pending_lists_length > 32)
  	{
  	  /* Flush all pending reads and writes to prevent the pending lists
  	     from getting any larger.  Insn scheduling runs too slowly when
--- 3331,3337 ----
      {
        /* Writing memory.  */
  
!       if (d->pending_lists_length > 32)
  	{
  	  /* Flush all pending reads and writes to prevent the pending lists
  	     from getting any larger.  Insn scheduling runs too slowly when
*************** sched_analyze_1 (x, insn)
*** 3365,3379 ****
  	     seems like a reasonable number.  When compiling GCC with itself,
  	     this flush occurs 8 times for sparc, and 10 times for m88k using
  	     the number 32.  */
! 	  flush_pending_lists (insn, 0);
  	}
        else
  	{
  	  rtx u;
  	  rtx pending, pending_mem;
  
! 	  pending = pending_read_insns;
! 	  pending_mem = pending_read_mems;
  	  while (pending)
  	    {
  	      if (anti_dependence (XEXP (pending_mem, 0), dest))
--- 3339,3353 ----
  	     seems like a reasonable number.  When compiling GCC with itself,
  	     this flush occurs 8 times for sparc, and 10 times for m88k using
  	     the number 32.  */
! 	  flush_pending_lists (d, insn, 0);
  	}
        else
  	{
  	  rtx u;
  	  rtx pending, pending_mem;
  
! 	  pending = d->pending_read_insns;
! 	  pending_mem = d->pending_read_mems;
  	  while (pending)
  	    {
  	      if (anti_dependence (XEXP (pending_mem, 0), dest))
*************** sched_analyze_1 (x, insn)
*** 3383,3390 ****
  	      pending_mem = XEXP (pending_mem, 1);
  	    }
  
! 	  pending = pending_write_insns;
! 	  pending_mem = pending_write_mems;
  	  while (pending)
  	    {
  	      if (output_dependence (XEXP (pending_mem, 0), dest))
--- 3357,3364 ----
  	      pending_mem = XEXP (pending_mem, 1);
  	    }
  
! 	  pending = d->pending_write_insns;
! 	  pending_mem = d->pending_write_mems;
  	  while (pending)
  	    {
  	      if (output_dependence (XEXP (pending_mem, 0), dest))
*************** sched_analyze_1 (x, insn)
*** 3394,3417 ****
  	      pending_mem = XEXP (pending_mem, 1);
  	    }
  
! 	  for (u = last_pending_memory_flush; u; u = XEXP (u, 1))
  	    add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
  
! 	  add_insn_mem_dependence (&pending_write_insns, &pending_write_mems,
! 				   insn, dest);
  	}
!       sched_analyze_2 (XEXP (dest, 0), insn);
      }
  
    /* Analyze reads.  */
    if (GET_CODE (x) == SET)
!     sched_analyze_2 (SET_SRC (x), insn);
  }
  
  /* Analyze the uses of memory and registers in rtx X in INSN.  */
  
  static void
! sched_analyze_2 (x, insn)
       rtx x;
       rtx insn;
  {
--- 3368,3392 ----
  	      pending_mem = XEXP (pending_mem, 1);
  	    }
  
! 	  for (u = d->last_pending_memory_flush; u; u = XEXP (u, 1))
  	    add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
  
! 	  add_insn_mem_dependence (d, &d->pending_write_insns,
! 				   &d->pending_write_mems, insn, dest);
  	}
!       sched_analyze_2 (d, XEXP (dest, 0), insn);
      }
  
    /* Analyze reads.  */
    if (GET_CODE (x) == SET)
!     sched_analyze_2 (d, SET_SRC (x), insn);
  }
  
  /* Analyze the uses of memory and registers in rtx X in INSN.  */
  
  static void
! sched_analyze_2 (d, x, insn)
!      struct deps *d;
       rtx x;
       rtx insn;
  {
*************** sched_analyze_2 (x, insn)
*** 3476,3507 ****
  	    i = HARD_REGNO_NREGS (regno, GET_MODE (x));
  	    while (--i >= 0)
  	      {
! 		reg_last_uses[regno + i]
! 		  = alloc_INSN_LIST (insn, reg_last_uses[regno + i]);
  
! 		for (u = reg_last_sets[regno + i]; u; u = XEXP (u, 1))
  		  add_dependence (insn, XEXP (u, 0), 0);
  
  		/* ??? This should never happen.  */
! 		for (u = reg_last_clobbers[regno + i]; u; u = XEXP (u, 1))
  		  add_dependence (insn, XEXP (u, 0), 0);
  
  		if ((call_used_regs[regno + i] || global_regs[regno + i]))
  		  /* Function calls clobber all call_used regs.  */
! 		  for (u = last_function_call; u; u = XEXP (u, 1))
  		    add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
  	      }
  	  }
  	else
  	  {
! 	    reg_last_uses[regno] = alloc_INSN_LIST (insn,
! 						    reg_last_uses[regno]);
  
! 	    for (u = reg_last_sets[regno]; u; u = XEXP (u, 1))
  	      add_dependence (insn, XEXP (u, 0), 0);
  
  	    /* ??? This should never happen.  */
! 	    for (u = reg_last_clobbers[regno]; u; u = XEXP (u, 1))
  	      add_dependence (insn, XEXP (u, 0), 0);
  
  	    /* Pseudos that are REG_EQUIV to something may be replaced
--- 3451,3482 ----
  	    i = HARD_REGNO_NREGS (regno, GET_MODE (x));
  	    while (--i >= 0)
  	      {
! 		d->reg_last_uses[regno + i]
! 		  = alloc_INSN_LIST (insn, d->reg_last_uses[regno + i]);
  
! 		for (u = d->reg_last_sets[regno + i]; u; u = XEXP (u, 1))
  		  add_dependence (insn, XEXP (u, 0), 0);
  
  		/* ??? This should never happen.  */
! 		for (u = d->reg_last_clobbers[regno + i]; u; u = XEXP (u, 1))
  		  add_dependence (insn, XEXP (u, 0), 0);
  
  		if ((call_used_regs[regno + i] || global_regs[regno + i]))
  		  /* Function calls clobber all call_used regs.  */
! 		  for (u = d->last_function_call; u; u = XEXP (u, 1))
  		    add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
  	      }
  	  }
  	else
  	  {
! 	    d->reg_last_uses[regno]
! 	      = alloc_INSN_LIST (insn, d->reg_last_uses[regno]);
  
! 	    for (u = d->reg_last_sets[regno]; u; u = XEXP (u, 1))
  	      add_dependence (insn, XEXP (u, 0), 0);
  
  	    /* ??? This should never happen.  */
! 	    for (u = d->reg_last_clobbers[regno]; u; u = XEXP (u, 1))
  	      add_dependence (insn, XEXP (u, 0), 0);
  
  	    /* Pseudos that are REG_EQUIV to something may be replaced
*************** sched_analyze_2 (x, insn)
*** 3510,3522 ****
  	    if (!reload_completed
  		&& reg_known_equiv_p[regno]
  		&& GET_CODE (reg_known_value[regno]) == MEM)
! 	      sched_analyze_2 (XEXP (reg_known_value[regno], 0), insn);
  
  	    /* If the register does not already cross any calls, then add this
  	       insn to the sched_before_next_call list so that it will still
  	       not cross calls after scheduling.  */
  	    if (REG_N_CALLS_CROSSED (regno) == 0)
! 	      add_dependence (sched_before_next_call, insn, REG_DEP_ANTI);
  	  }
  	return;
        }
--- 3485,3497 ----
  	    if (!reload_completed
  		&& reg_known_equiv_p[regno]
  		&& GET_CODE (reg_known_value[regno]) == MEM)
! 	      sched_analyze_2 (d, XEXP (reg_known_value[regno], 0), insn);
  
  	    /* If the register does not already cross any calls, then add this
  	       insn to the sched_before_next_call list so that it will still
  	       not cross calls after scheduling.  */
  	    if (REG_N_CALLS_CROSSED (regno) == 0)
! 	      add_dependence (d->sched_before_next_call, insn, REG_DEP_ANTI);
  	  }
  	return;
        }
*************** sched_analyze_2 (x, insn)
*** 3527,3534 ****
  	rtx u;
  	rtx pending, pending_mem;
  
! 	pending = pending_read_insns;
! 	pending_mem = pending_read_mems;
  	while (pending)
  	  {
  	    if (read_dependence (XEXP (pending_mem, 0), x))
--- 3502,3509 ----
  	rtx u;
  	rtx pending, pending_mem;
  
! 	pending = d->pending_read_insns;
! 	pending_mem = d->pending_read_mems;
  	while (pending)
  	  {
  	    if (read_dependence (XEXP (pending_mem, 0), x))
*************** sched_analyze_2 (x, insn)
*** 3538,3545 ****
  	    pending_mem = XEXP (pending_mem, 1);
  	  }
  
! 	pending = pending_write_insns;
! 	pending_mem = pending_write_mems;
  	while (pending)
  	  {
  	    if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
--- 3513,3520 ----
  	    pending_mem = XEXP (pending_mem, 1);
  	  }
  
! 	pending = d->pending_write_insns;
! 	pending_mem = d->pending_write_mems;
  	while (pending)
  	  {
  	    if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
*************** sched_analyze_2 (x, insn)
*** 3550,3571 ****
  	    pending_mem = XEXP (pending_mem, 1);
  	  }
  
! 	for (u = last_pending_memory_flush; u; u = XEXP (u, 1))
  	  add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
  
  	/* Always add these dependencies to pending_reads, since
  	   this insn may be followed by a write.  */
! 	add_insn_mem_dependence (&pending_read_insns, &pending_read_mems,
! 				 insn, x);
  
  	/* Take advantage of tail recursion here.  */
! 	sched_analyze_2 (XEXP (x, 0), insn);
  	return;
        }
  
      /* Force pending stores to memory in case a trap handler needs them.  */
      case TRAP_IF:
!       flush_pending_lists (insn, 1);
        break;
  
      case ASM_OPERANDS:
--- 3525,3546 ----
  	    pending_mem = XEXP (pending_mem, 1);
  	  }
  
! 	for (u = d->last_pending_memory_flush; u; u = XEXP (u, 1))
  	  add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
  
  	/* Always add these dependencies to pending_reads, since
  	   this insn may be followed by a write.  */
! 	add_insn_mem_dependence (d, &d->pending_read_insns,
! 				 &d->pending_read_mems, insn, x);
  
  	/* Take advantage of tail recursion here.  */
! 	sched_analyze_2 (d, XEXP (x, 0), insn);
  	return;
        }
  
      /* Force pending stores to memory in case a trap handler needs them.  */
      case TRAP_IF:
!       flush_pending_lists (d, insn, 1);
        break;
  
      case ASM_OPERANDS:
*************** sched_analyze_2 (x, insn)
*** 3586,3604 ****
  	    int max_reg = max_reg_num ();
  	    for (i = 0; i < max_reg; i++)
  	      {
! 		for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
  		  add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
! 		free_INSN_LIST_list (&reg_last_uses[i]);
  
! 		for (u = reg_last_sets[i]; u; u = XEXP (u, 1))
  		  add_dependence (insn, XEXP (u, 0), 0);
  
! 		for (u = reg_last_clobbers[i]; u; u = XEXP (u, 1))
  		  add_dependence (insn, XEXP (u, 0), 0);
  	      }
  	    reg_pending_sets_all = 1;
  
! 	    flush_pending_lists (insn, 0);
  	  }
  
  	/* For all ASM_OPERANDS, we must traverse the vector of input operands.
--- 3561,3579 ----
  	    int max_reg = max_reg_num ();
  	    for (i = 0; i < max_reg; i++)
  	      {
! 		for (u = d->reg_last_uses[i]; u; u = XEXP (u, 1))
  		  add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
! 		free_INSN_LIST_list (&d->reg_last_uses[i]);
  
! 		for (u = d->reg_last_sets[i]; u; u = XEXP (u, 1))
  		  add_dependence (insn, XEXP (u, 0), 0);
  
! 		for (u = d->reg_last_clobbers[i]; u; u = XEXP (u, 1))
  		  add_dependence (insn, XEXP (u, 0), 0);
  	      }
  	    reg_pending_sets_all = 1;
  
! 	    flush_pending_lists (d, insn, 0);
  	  }
  
  	/* For all ASM_OPERANDS, we must traverse the vector of input operands.
*************** sched_analyze_2 (x, insn)
*** 3609,3615 ****
  	if (code == ASM_OPERANDS)
  	  {
  	    for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
! 	      sched_analyze_2 (ASM_OPERANDS_INPUT (x, j), insn);
  	    return;
  	  }
  	break;
--- 3584,3590 ----
  	if (code == ASM_OPERANDS)
  	  {
  	    for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
! 	      sched_analyze_2 (d, ASM_OPERANDS_INPUT (x, j), insn);
  	    return;
  	  }
  	break;
*************** sched_analyze_2 (x, insn)
*** 3625,3632 ****
           instructions.  Thus we need to pass them to both sched_analyze_1
           and sched_analyze_2.  We must call sched_analyze_2 first in order
           to get the proper antecedent for the read.  */
!       sched_analyze_2 (XEXP (x, 0), insn);
!       sched_analyze_1 (x, insn);
        return;
  
      default:
--- 3600,3607 ----
           instructions.  Thus we need to pass them to both sched_analyze_1
           and sched_analyze_2.  We must call sched_analyze_2 first in order
           to get the proper antecedent for the read.  */
!       sched_analyze_2 (d, XEXP (x, 0), insn);
!       sched_analyze_1 (d, x, insn);
        return;
  
      default:
*************** sched_analyze_2 (x, insn)
*** 3638,3654 ****
    for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
      {
        if (fmt[i] == 'e')
! 	sched_analyze_2 (XEXP (x, i), insn);
        else if (fmt[i] == 'E')
  	for (j = 0; j < XVECLEN (x, i); j++)
! 	  sched_analyze_2 (XVECEXP (x, i, j), insn);
      }
  }
  
  /* Analyze an INSN with pattern X to find all dependencies.  */
  
  static void
! sched_analyze_insn (x, insn, loop_notes)
       rtx x, insn;
       rtx loop_notes;
  {
--- 3613,3630 ----
    for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
      {
        if (fmt[i] == 'e')
! 	sched_analyze_2 (d, XEXP (x, i), insn);
        else if (fmt[i] == 'E')
  	for (j = 0; j < XVECLEN (x, i); j++)
! 	  sched_analyze_2 (d, XVECEXP (x, i, j), insn);
      }
  }
  
  /* Analyze an INSN with pattern X to find all dependencies.  */
  
  static void
! sched_analyze_insn (d, x, insn, loop_notes)
!      struct deps *d;
       rtx x, insn;
       rtx loop_notes;
  {
*************** sched_analyze_insn (x, insn, loop_notes)
*** 3658,3664 ****
    int i;
  
    if (code == SET || code == CLOBBER)
!     sched_analyze_1 (x, insn);
    else if (code == PARALLEL)
      {
        register int i;
--- 3634,3640 ----
    int i;
  
    if (code == SET || code == CLOBBER)
!     sched_analyze_1 (d, x, insn);
    else if (code == PARALLEL)
      {
        register int i;
*************** sched_analyze_insn (x, insn, loop_notes)
*** 3666,3687 ****
  	{
  	  code = GET_CODE (XVECEXP (x, 0, i));
  	  if (code == SET || code == CLOBBER)
! 	    sched_analyze_1 (XVECEXP (x, 0, i), insn);
  	  else
! 	    sched_analyze_2 (XVECEXP (x, 0, i), insn);
  	}
      }
    else
!     sched_analyze_2 (x, insn);
  
    /* Mark registers CLOBBERED or used by called function.  */
    if (GET_CODE (insn) == CALL_INSN)
      for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
        {
  	if (GET_CODE (XEXP (link, 0)) == CLOBBER)
! 	  sched_analyze_1 (XEXP (link, 0), insn);
  	else
! 	  sched_analyze_2 (XEXP (link, 0), insn);
        }
  
    /* If there is a {LOOP,EHREGION}_{BEG,END} note in the middle of a basic
--- 3642,3663 ----
  	{
  	  code = GET_CODE (XVECEXP (x, 0, i));
  	  if (code == SET || code == CLOBBER)
! 	    sched_analyze_1 (d, XVECEXP (x, 0, i), insn);
  	  else
! 	    sched_analyze_2 (d, XVECEXP (x, 0, i), insn);
  	}
      }
    else
!     sched_analyze_2 (d, x, insn);
  
    /* Mark registers CLOBBERED or used by called function.  */
    if (GET_CODE (insn) == CALL_INSN)
      for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
        {
  	if (GET_CODE (XEXP (link, 0)) == CLOBBER)
! 	  sched_analyze_1 (d, XEXP (link, 0), insn);
  	else
! 	  sched_analyze_2 (d, XEXP (link, 0), insn);
        }
  
    /* If there is a {LOOP,EHREGION}_{BEG,END} note in the middle of a basic
*************** sched_analyze_insn (x, insn, loop_notes)
*** 3719,3737 ****
  	  for (i = 0; i < max_reg; i++)
  	    {
  	      rtx u;
! 	      for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
  		add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
! 	      free_INSN_LIST_list (&reg_last_uses[i]);
  
! 	      for (u = reg_last_sets[i]; u; u = XEXP (u, 1))
  		add_dependence (insn, XEXP (u, 0), 0);
  
! 	      for (u = reg_last_clobbers[i]; u; u = XEXP (u, 1))
  		add_dependence (insn, XEXP (u, 0), 0);
  	    }
  	  reg_pending_sets_all = 1;
  
! 	  flush_pending_lists (insn, 0);
  	}
  
      }
--- 3695,3713 ----
  	  for (i = 0; i < max_reg; i++)
  	    {
  	      rtx u;
! 	      for (u = d->reg_last_uses[i]; u; u = XEXP (u, 1))
  		add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
! 	      free_INSN_LIST_list (&d->reg_last_uses[i]);
  
! 	      for (u = d->reg_last_sets[i]; u; u = XEXP (u, 1))
  		add_dependence (insn, XEXP (u, 0), 0);
  
! 	      for (u = d->reg_last_clobbers[i]; u; u = XEXP (u, 1))
  		add_dependence (insn, XEXP (u, 0), 0);
  	    }
  	  reg_pending_sets_all = 1;
  
! 	  flush_pending_lists (d, insn, 0);
  	}
  
      }
*************** sched_analyze_insn (x, insn, loop_notes)
*** 3741,3756 ****
       subsequent sets will be output dependent on it.  */
    EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i,
  			     {
! 			       free_INSN_LIST_list (&reg_last_sets[i]);
! 			       free_INSN_LIST_list (&reg_last_clobbers[i]);
! 			       reg_last_sets[i]
  				 = alloc_INSN_LIST (insn, NULL_RTX);
  			     });
    EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i,
  			     {
! 			       reg_last_clobbers[i]
  				 = alloc_INSN_LIST (insn, 
! 						    reg_last_clobbers[i]);
  			     });
    CLEAR_REG_SET (reg_pending_sets);
    CLEAR_REG_SET (reg_pending_clobbers);
--- 3717,3732 ----
       subsequent sets will be output dependent on it.  */
    EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i,
  			     {
! 			       free_INSN_LIST_list (&d->reg_last_sets[i]);
! 			       free_INSN_LIST_list (&d->reg_last_clobbers[i]);
! 			       d->reg_last_sets[i]
  				 = alloc_INSN_LIST (insn, NULL_RTX);
  			     });
    EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i,
  			     {
! 			       d->reg_last_clobbers[i]
  				 = alloc_INSN_LIST (insn, 
! 						    d->reg_last_clobbers[i]);
  			     });
    CLEAR_REG_SET (reg_pending_sets);
    CLEAR_REG_SET (reg_pending_clobbers);
*************** sched_analyze_insn (x, insn, loop_notes)
*** 3759,3767 ****
      {
        for (i = 0; i < maxreg; i++)
  	{
! 	  free_INSN_LIST_list (&reg_last_sets[i]);
! 	  free_INSN_LIST_list (&reg_last_clobbers[i]);
! 	  reg_last_sets[i] = alloc_INSN_LIST (insn, NULL_RTX);
  	}
  
        reg_pending_sets_all = 0;
--- 3735,3743 ----
      {
        for (i = 0; i < maxreg; i++)
  	{
! 	  free_INSN_LIST_list (&d->reg_last_sets[i]);
! 	  free_INSN_LIST_list (&d->reg_last_clobbers[i]);
! 	  d->reg_last_sets[i] = alloc_INSN_LIST (insn, NULL_RTX);
  	}
  
        reg_pending_sets_all = 0;
*************** sched_analyze_insn (x, insn, loop_notes)
*** 3808,3814 ****
     for every dependency.  */
  
  static void
! sched_analyze (head, tail)
       rtx head, tail;
  {
    register rtx insn;
--- 3784,3791 ----
     for every dependency.  */
  
  static void
! sched_analyze (d, head, tail)
!      struct deps *d;
       rtx head, tail;
  {
    register rtx insn;
*************** sched_analyze (head, tail)
*** 3825,3833 ****
  	  /* Make each JUMP_INSN a scheduling barrier for memory
               references.  */
  	  if (GET_CODE (insn) == JUMP_INSN)
! 	    last_pending_memory_flush
! 	      = alloc_INSN_LIST (insn, last_pending_memory_flush);
! 	  sched_analyze_insn (PATTERN (insn), insn, loop_notes);
  	  loop_notes = 0;
  	}
        else if (GET_CODE (insn) == CALL_INSN)
--- 3802,3810 ----
  	  /* Make each JUMP_INSN a scheduling barrier for memory
               references.  */
  	  if (GET_CODE (insn) == JUMP_INSN)
! 	    d->last_pending_memory_flush
! 	      = alloc_INSN_LIST (insn, d->last_pending_memory_flush);
! 	  sched_analyze_insn (d, PATTERN (insn), insn, loop_notes);
  	  loop_notes = 0;
  	}
        else if (GET_CODE (insn) == CALL_INSN)
*************** sched_analyze (head, tail)
*** 3859,3872 ****
  	      int max_reg = max_reg_num ();
  	      for (i = 0; i < max_reg; i++)
  		{
! 		  for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
  		    add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
! 		  free_INSN_LIST_list (&reg_last_uses[i]);
  
! 		  for (u = reg_last_sets[i]; u; u = XEXP (u, 1))
  		    add_dependence (insn, XEXP (u, 0), 0);
  
! 		  for (u = reg_last_clobbers[i]; u; u = XEXP (u, 1))
  		    add_dependence (insn, XEXP (u, 0), 0);
  		}
  	      reg_pending_sets_all = 1;
--- 3836,3849 ----
  	      int max_reg = max_reg_num ();
  	      for (i = 0; i < max_reg; i++)
  		{
! 		  for (u = d->reg_last_uses[i]; u; u = XEXP (u, 1))
  		    add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
! 		  free_INSN_LIST_list (&d->reg_last_uses[i]);
  
! 		  for (u = d->reg_last_sets[i]; u; u = XEXP (u, 1))
  		    add_dependence (insn, XEXP (u, 0), 0);
  
! 		  for (u = d->reg_last_clobbers[i]; u; u = XEXP (u, 1))
  		    add_dependence (insn, XEXP (u, 0), 0);
  		}
  	      reg_pending_sets_all = 1;
*************** sched_analyze (head, tail)
*** 3886,3895 ****
  	      for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
  		if (call_used_regs[i] || global_regs[i])
  		  {
! 		    for (u = reg_last_uses[i]; u; u = XEXP (u, 1))
  		      add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
  
! 		    for (u = reg_last_sets[i]; u; u = XEXP (u, 1))
  		      add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
  
  		    SET_REGNO_REG_SET (reg_pending_clobbers, i);
--- 3863,3872 ----
  	      for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
  		if (call_used_regs[i] || global_regs[i])
  		  {
! 		    for (u = d->reg_last_uses[i]; u; u = XEXP (u, 1))
  		      add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
  
! 		    for (u = d->reg_last_sets[i]; u; u = XEXP (u, 1))
  		      add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
  
  		    SET_REGNO_REG_SET (reg_pending_clobbers, i);
*************** sched_analyze (head, tail)
*** 3898,3926 ****
  
  	  /* For each insn which shouldn't cross a call, add a dependence
  	     between that insn and this call insn.  */
! 	  x = LOG_LINKS (sched_before_next_call);
  	  while (x)
  	    {
  	      add_dependence (insn, XEXP (x, 0), REG_DEP_ANTI);
  	      x = XEXP (x, 1);
  	    }
! 	  free_INSN_LIST_list (&LOG_LINKS (sched_before_next_call));
  
! 	  sched_analyze_insn (PATTERN (insn), insn, loop_notes);
  	  loop_notes = 0;
  
  	  /* In the absence of interprocedural alias analysis, we must flush
  	     all pending reads and writes, and start new dependencies starting
  	     from here.  But only flush writes for constant calls (which may
  	     be passed a pointer to something we haven't written yet).  */
! 	  flush_pending_lists (insn, CONST_CALL_P (insn));
  
  	  /* Depend this function call (actually, the user of this
  	     function call) on all hard register clobberage.  */
  
  	  /* last_function_call is now a list of insns.  */
! 	  free_INSN_LIST_list(&last_function_call);
! 	  last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
  	}
  
        /* See comments on reemit_notes as to why we do this.  
--- 3875,3903 ----
  
  	  /* For each insn which shouldn't cross a call, add a dependence
  	     between that insn and this call insn.  */
! 	  x = LOG_LINKS (d->sched_before_next_call);
  	  while (x)
  	    {
  	      add_dependence (insn, XEXP (x, 0), REG_DEP_ANTI);
  	      x = XEXP (x, 1);
  	    }
! 	  free_INSN_LIST_list (&LOG_LINKS (d->sched_before_next_call));
  
! 	  sched_analyze_insn (d, PATTERN (insn), insn, loop_notes);
  	  loop_notes = 0;
  
  	  /* In the absence of interprocedural alias analysis, we must flush
  	     all pending reads and writes, and start new dependencies starting
  	     from here.  But only flush writes for constant calls (which may
  	     be passed a pointer to something we haven't written yet).  */
! 	  flush_pending_lists (d, insn, CONST_CALL_P (insn));
  
  	  /* Depend this function call (actually, the user of this
  	     function call) on all hard register clobberage.  */
  
  	  /* last_function_call is now a list of insns.  */
! 	  free_INSN_LIST_list (&d->last_function_call);
! 	  d->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
  	}
  
        /* See comments on reemit_notes as to why we do this.  
*************** compute_block_forward_dependences (bb)
*** 6194,6223 ****
  /* Initialize variables for region data dependence analysis.
     n_bbs is the number of region blocks.  */
  
! __inline static void
! init_rgn_data_dependences (n_bbs)
!      int n_bbs;
  {
!   int bb;
! 
!   /* Variables for which one copy exists for each block.  */
!   bzero ((char *) bb_pending_read_insns, n_bbs * sizeof (rtx));
!   bzero ((char *) bb_pending_read_mems, n_bbs * sizeof (rtx));
!   bzero ((char *) bb_pending_write_insns, n_bbs * sizeof (rtx));
!   bzero ((char *) bb_pending_write_mems, n_bbs * sizeof (rtx));
!   bzero ((char *) bb_pending_lists_length, n_bbs * sizeof (int));
!   bzero ((char *) bb_last_pending_memory_flush, n_bbs * sizeof (rtx));
!   bzero ((char *) bb_last_function_call, n_bbs * sizeof (rtx));
!   bzero ((char *) bb_sched_before_next_call, n_bbs * sizeof (rtx));
! 
!   /* Create an insn here so that we can hang dependencies off of it later.  */
!   for (bb = 0; bb < n_bbs; bb++)
!     {
!       bb_sched_before_next_call[bb] =
! 	gen_rtx_INSN (VOIDmode, 0, NULL_RTX, NULL_RTX,
! 		      NULL_RTX, 0, NULL_RTX, NULL_RTX);
!       LOG_LINKS (bb_sched_before_next_call[bb]) = 0;
!     }
  }
  
  /* Add dependences so that branches are scheduled to run last in their
--- 6171,6197 ----
  /* Initialize variables for region data dependence analysis.
     n_bbs is the number of region blocks.  */
  
! static void
! init_deps (d)
!      struct deps *d;
  {
!   int maxreg = max_reg_num ();
!   d->reg_last_uses = (rtx *) xcalloc (maxreg, sizeof (rtx));
!   d->reg_last_sets = (rtx *) xcalloc (maxreg, sizeof (rtx));
!   d->reg_last_clobbers = (rtx *) xcalloc (maxreg, sizeof (rtx));
! 
!   d->pending_read_insns = 0;
!   d->pending_read_mems = 0;
!   d->pending_write_insns = 0;
!   d->pending_write_mems = 0;
!   d->pending_lists_length = 0;
!   d->last_pending_memory_flush = 0;
!   d->last_function_call = 0;
! 
!   d->sched_before_next_call
!     = gen_rtx_INSN (VOIDmode, 0, NULL_RTX, NULL_RTX,
! 		    NULL_RTX, 0, NULL_RTX, NULL_RTX);
!   LOG_LINKS (d->sched_before_next_call) = 0;
  }
  
  /* Add dependences so that branches are scheduled to run last in their
*************** static void
*** 6227,6233 ****
  add_branch_dependences (head, tail)
       rtx head, tail;
  {
- 
    rtx insn, last;
  
    /* For all branches, calls, uses, clobbers, and cc0 setters, force them
--- 6201,6206 ----
*************** add_branch_dependences (head, tail)
*** 6303,6308 ****
--- 6276,6429 ----
        }
  }
  
+ /* After computing the dependencies for block BB, propagate the dependencies
+    foudn in TMP_DEPS to the successes of the block.  MAX_REG is the number
+    of registers.  */
+ static void
+ propagate_deps (bb, tmp_deps, max_reg)
+      int bb;
+      struct deps *tmp_deps;
+      int max_reg;
+ {
+   int b = BB_TO_BLOCK (bb);
+   int e, first_edge;
+   int reg;
+   rtx link_insn, link_mem;
+   rtx u;
+ 
+   /* These lists should point to the right place, for correct
+      freeing later.  */
+   bb_deps[bb].pending_read_insns = tmp_deps->pending_read_insns;
+   bb_deps[bb].pending_read_mems = tmp_deps->pending_read_mems;
+   bb_deps[bb].pending_write_insns = tmp_deps->pending_write_insns;
+   bb_deps[bb].pending_write_mems = tmp_deps->pending_write_mems;
+ 
+   /* bb's structures are inherited by its successors.  */
+   first_edge = e = OUT_EDGES (b);
+   if (e <= 0)
+     return;
+ 
+   do
+     {
+       rtx x;
+       int b_succ = TO_BLOCK (e);
+       int bb_succ = BLOCK_TO_BB (b_succ);
+       struct deps *succ_deps = bb_deps + bb_succ;
+ 
+       /* Only bbs "below" bb, in the same region, are interesting.  */
+       if (CONTAINING_RGN (b) != CONTAINING_RGN (b_succ)
+ 	  || bb_succ <= bb)
+ 	{
+ 	  e = NEXT_OUT (e);
+ 	  continue;
+ 	}
+ 
+       for (reg = 0; reg < max_reg; reg++)
+ 	{
+ 	  /* reg-last-uses lists are inherited by bb_succ.  */
+ 	  for (u = tmp_deps->reg_last_uses[reg]; u; u = XEXP (u, 1))
+ 	    {
+ 	      if (find_insn_list (XEXP (u, 0),
+ 				  succ_deps->reg_last_uses[reg]))
+ 		continue;
+ 
+ 	      succ_deps->reg_last_uses[reg]
+ 		= alloc_INSN_LIST (XEXP (u, 0),
+ 				   succ_deps->reg_last_uses[reg]);
+ 	    }
+ 
+ 	  /* reg-last-defs lists are inherited by bb_succ.  */
+ 	  for (u = tmp_deps->reg_last_sets[reg]; u; u = XEXP (u, 1))
+ 	    {
+ 	      if (find_insn_list (XEXP (u, 0),
+ 				  succ_deps->reg_last_sets[reg]))
+ 		continue;
+ 
+ 	      succ_deps->reg_last_sets[reg]
+ 		= alloc_INSN_LIST (XEXP (u, 0),
+ 				   succ_deps->reg_last_sets[reg]);
+ 	    }
+ 
+ 	  for (u = tmp_deps->reg_last_clobbers[reg]; u; u = XEXP (u, 1))
+ 	    {
+ 	      if (find_insn_list (XEXP (u, 0),
+ 				  succ_deps->reg_last_clobbers[reg]))
+ 		continue;
+ 
+ 	      succ_deps->reg_last_clobbers[reg]
+ 		= alloc_INSN_LIST (XEXP (u, 0),
+ 				   succ_deps->reg_last_clobbers[reg]);
+ 	    }
+ 	}
+ 
+       /* Mem read/write lists are inherited by bb_succ.  */
+       link_insn = tmp_deps->pending_read_insns;
+       link_mem = tmp_deps->pending_read_mems;
+       while (link_insn)
+ 	{
+ 	  if (!(find_insn_mem_list (XEXP (link_insn, 0),
+ 				    XEXP (link_mem, 0),
+ 				    succ_deps->pending_read_insns,
+ 				    succ_deps->pending_read_mems)))
+ 	    add_insn_mem_dependence (succ_deps, &succ_deps->pending_read_insns,
+ 				     &succ_deps->pending_read_mems,
+ 				     XEXP (link_insn, 0), XEXP (link_mem, 0));
+ 	  link_insn = XEXP (link_insn, 1);
+ 	  link_mem = XEXP (link_mem, 1);
+ 	}
+ 
+       link_insn = tmp_deps->pending_write_insns;
+       link_mem = tmp_deps->pending_write_mems;
+       while (link_insn)
+ 	{
+ 	  if (!(find_insn_mem_list (XEXP (link_insn, 0),
+ 				    XEXP (link_mem, 0),
+ 				    succ_deps->pending_write_insns,
+ 				    succ_deps->pending_write_mems)))
+ 	    add_insn_mem_dependence (succ_deps,
+ 				     &succ_deps->pending_write_insns,
+ 				     &succ_deps->pending_write_mems,
+ 				     XEXP (link_insn, 0), XEXP (link_mem, 0));
+ 
+ 	  link_insn = XEXP (link_insn, 1);
+ 	  link_mem = XEXP (link_mem, 1);
+ 	}
+ 
+       /* last_function_call is inherited by bb_succ.  */
+       for (u = tmp_deps->last_function_call; u; u = XEXP (u, 1))
+ 	{
+ 	  if (find_insn_list (XEXP (u, 0),
+ 			      succ_deps->last_function_call))
+ 	    continue;
+ 
+ 	  succ_deps->last_function_call
+ 	    = alloc_INSN_LIST (XEXP (u, 0),
+ 			       succ_deps->last_function_call);
+ 	}
+ 
+       /* last_pending_memory_flush is inherited by bb_succ.  */
+       for (u = tmp_deps->last_pending_memory_flush; u; u = XEXP (u, 1))
+ 	{
+ 	  if (find_insn_list (XEXP (u, 0), 
+ 			      succ_deps->last_pending_memory_flush))
+ 	    continue;
+ 
+ 	  succ_deps->last_pending_memory_flush
+ 	    = alloc_INSN_LIST (XEXP (u, 0),
+ 			       succ_deps->last_pending_memory_flush);
+ 	}
+ 
+       /* sched_before_next_call is inherited by bb_succ.  */
+       x = LOG_LINKS (tmp_deps->sched_before_next_call);
+       for (; x; x = XEXP (x, 1))
+ 	add_dependence (succ_deps->sched_before_next_call,
+ 			XEXP (x, 0), REG_DEP_ANTI);
+ 
+       e = NEXT_OUT (e);
+     }
+   while (e != first_edge);
+ }
+ 
  /* Compute backward dependences inside bb.  In a multiple blocks region:
     (1) a bb is analyzed after its predecessors, and (2) the lists in
     effect at the end of bb (after analyzing for bb) are inherited by
*************** static void
*** 6324,6513 ****
  compute_block_backward_dependences (bb)
       int bb;
  {
!   int b;
!   rtx x;
    rtx head, tail;
    int max_reg = max_reg_num ();
  
!   b = BB_TO_BLOCK (bb);
  
-   if (current_nr_blocks == 1)
-     {
-       reg_last_uses = (rtx *) xcalloc (max_reg, sizeof (rtx));
-       reg_last_sets = (rtx *) xcalloc (max_reg, sizeof (rtx));
-       reg_last_clobbers = (rtx *) xcalloc (max_reg, sizeof (rtx));
- 
-       pending_read_insns = 0;
-       pending_read_mems = 0;
-       pending_write_insns = 0;
-       pending_write_mems = 0;
-       pending_lists_length = 0;
-       last_function_call = 0;
-       last_pending_memory_flush = 0;
-       sched_before_next_call
- 	= gen_rtx_INSN (VOIDmode, 0, NULL_RTX, NULL_RTX,
- 			NULL_RTX, 0, NULL_RTX, NULL_RTX);
-       LOG_LINKS (sched_before_next_call) = 0;
-     }
-   else
-     {
-       reg_last_uses = bb_reg_last_uses[bb];
-       reg_last_sets = bb_reg_last_sets[bb];
-       reg_last_clobbers = bb_reg_last_clobbers[bb];
- 
-       pending_read_insns = bb_pending_read_insns[bb];
-       pending_read_mems = bb_pending_read_mems[bb];
-       pending_write_insns = bb_pending_write_insns[bb];
-       pending_write_mems = bb_pending_write_mems[bb];
-       pending_lists_length = bb_pending_lists_length[bb];
-       last_function_call = bb_last_function_call[bb];
-       last_pending_memory_flush = bb_last_pending_memory_flush[bb];
- 
-       sched_before_next_call = bb_sched_before_next_call[bb];
-     }
- 
    /* Do the analysis for this block.  */
    get_bb_head_tail (bb, &head, &tail);
!   sched_analyze (head, tail);
    add_branch_dependences (head, tail);
  
    if (current_nr_blocks > 1)
!     {
!       int e, first_edge;
!       int b_succ, bb_succ;
!       int reg;
!       rtx link_insn, link_mem;
!       rtx u;
! 
!       /* These lists should point to the right place, for correct
!          freeing later.  */
!       bb_pending_read_insns[bb] = pending_read_insns;
!       bb_pending_read_mems[bb] = pending_read_mems;
!       bb_pending_write_insns[bb] = pending_write_insns;
!       bb_pending_write_mems[bb] = pending_write_mems;
! 
!       /* bb's structures are inherited by it's successors.  */
!       first_edge = e = OUT_EDGES (b);
!       if (e > 0)
! 	do
! 	  {
! 	    b_succ = TO_BLOCK (e);
! 	    bb_succ = BLOCK_TO_BB (b_succ);
! 
! 	    /* Only bbs "below" bb, in the same region, are interesting.  */
! 	    if (CONTAINING_RGN (b) != CONTAINING_RGN (b_succ)
! 		|| bb_succ <= bb)
! 	      {
! 		e = NEXT_OUT (e);
! 		continue;
! 	      }
! 
! 	    for (reg = 0; reg < max_reg; reg++)
! 	      {
! 
! 		/* reg-last-uses lists are inherited by bb_succ.  */
! 		for (u = reg_last_uses[reg]; u; u = XEXP (u, 1))
! 		  {
! 		    if (find_insn_list (XEXP (u, 0),
! 					(bb_reg_last_uses[bb_succ])[reg]))
! 		      continue;
! 
! 		    (bb_reg_last_uses[bb_succ])[reg]
! 		      = alloc_INSN_LIST (XEXP (u, 0),
! 					 (bb_reg_last_uses[bb_succ])[reg]);
! 		  }
! 
! 		/* reg-last-defs lists are inherited by bb_succ.  */
! 		for (u = reg_last_sets[reg]; u; u = XEXP (u, 1))
! 		  {
! 		    if (find_insn_list (XEXP (u, 0),
! 					(bb_reg_last_sets[bb_succ])[reg]))
! 		      continue;
! 
! 		    (bb_reg_last_sets[bb_succ])[reg]
! 		      = alloc_INSN_LIST (XEXP (u, 0),
! 					 (bb_reg_last_sets[bb_succ])[reg]);
! 		  }
! 
! 		for (u = reg_last_clobbers[reg]; u; u = XEXP (u, 1))
! 		  {
! 		    if (find_insn_list (XEXP (u, 0),
! 					(bb_reg_last_clobbers[bb_succ])[reg]))
! 		      continue;
! 
! 		    (bb_reg_last_clobbers[bb_succ])[reg]
! 		      = alloc_INSN_LIST (XEXP (u, 0),
! 					 (bb_reg_last_clobbers[bb_succ])[reg]);
! 		  }
! 	      }
! 
! 	    /* Mem read/write lists are inherited by bb_succ.  */
! 	    link_insn = pending_read_insns;
! 	    link_mem = pending_read_mems;
! 	    while (link_insn)
! 	      {
! 		if (!(find_insn_mem_list (XEXP (link_insn, 0),
! 					  XEXP (link_mem, 0),
! 					  bb_pending_read_insns[bb_succ],
! 					  bb_pending_read_mems[bb_succ])))
! 		  add_insn_mem_dependence (&bb_pending_read_insns[bb_succ],
! 					   &bb_pending_read_mems[bb_succ],
! 				   XEXP (link_insn, 0), XEXP (link_mem, 0));
! 		link_insn = XEXP (link_insn, 1);
! 		link_mem = XEXP (link_mem, 1);
! 	      }
  
- 	    link_insn = pending_write_insns;
- 	    link_mem = pending_write_mems;
- 	    while (link_insn)
- 	      {
- 		if (!(find_insn_mem_list (XEXP (link_insn, 0),
- 					  XEXP (link_mem, 0),
- 					  bb_pending_write_insns[bb_succ],
- 					  bb_pending_write_mems[bb_succ])))
- 		  add_insn_mem_dependence (&bb_pending_write_insns[bb_succ],
- 					   &bb_pending_write_mems[bb_succ],
- 				   XEXP (link_insn, 0), XEXP (link_mem, 0));
- 
- 		link_insn = XEXP (link_insn, 1);
- 		link_mem = XEXP (link_mem, 1);
- 	      }
- 
- 	    /* last_function_call is inherited by bb_succ.  */
- 	    for (u = last_function_call; u; u = XEXP (u, 1))
- 	      {
- 		if (find_insn_list (XEXP (u, 0),
- 				    bb_last_function_call[bb_succ]))
- 		  continue;
- 
- 		bb_last_function_call[bb_succ]
- 		  = alloc_INSN_LIST (XEXP (u, 0),
- 				     bb_last_function_call[bb_succ]);
- 	      }
- 
- 	    /* last_pending_memory_flush is inherited by bb_succ.  */
- 	    for (u = last_pending_memory_flush; u; u = XEXP (u, 1))
- 	      {
- 		if (find_insn_list (XEXP (u, 0), 
- 				    bb_last_pending_memory_flush[bb_succ]))
- 		  continue;
- 
- 		bb_last_pending_memory_flush[bb_succ]
- 		  = alloc_INSN_LIST (XEXP (u, 0),
- 				     bb_last_pending_memory_flush[bb_succ]);
- 	      }
- 
- 	    /* sched_before_next_call is inherited by bb_succ.  */
- 	    x = LOG_LINKS (sched_before_next_call);
- 	    for (; x; x = XEXP (x, 1))
- 	      add_dependence (bb_sched_before_next_call[bb_succ],
- 			      XEXP (x, 0), REG_DEP_ANTI);
- 
- 	    e = NEXT_OUT (e);
- 	  }
- 	while (e != first_edge);
-     }
- 
    /* Free up the INSN_LISTs.
  
       Note this loop is executed max_reg * nr_regions times.  It's first 
--- 6445,6465 ----
  compute_block_backward_dependences (bb)
       int bb;
  {
!   int i;
    rtx head, tail;
    int max_reg = max_reg_num ();
+   struct deps tmp_deps;
  
!   tmp_deps = bb_deps[bb];
  
    /* Do the analysis for this block.  */
    get_bb_head_tail (bb, &head, &tail);
!   sched_analyze (&tmp_deps, head, tail);
    add_branch_dependences (head, tail);
  
    if (current_nr_blocks > 1)
!     propagate_deps (bb, &tmp_deps, max_reg);
  
    /* Free up the INSN_LISTs.
  
       Note this loop is executed max_reg * nr_regions times.  It's first 
*************** compute_block_backward_dependences (bb)
*** 6515,6543 ****
       The list was empty for the vast majority of those calls.  On the PA, not 
       calling free_INSN_LIST_list in those cases improves -O2 compile times by
       3-5% on average.  */
!   for (b = 0; b < max_reg; ++b)
      {
!       if (reg_last_clobbers[b])
! 	free_INSN_LIST_list (&reg_last_clobbers[b]);
!       if (reg_last_sets[b])
! 	free_INSN_LIST_list (&reg_last_sets[b]);
!       if (reg_last_uses[b])
! 	free_INSN_LIST_list (&reg_last_uses[b]);
      }
  
    /* Assert that we won't need bb_reg_last_* for this block anymore.  */
!   if (current_nr_blocks > 1)
!     {
!       bb_reg_last_uses[bb] = (rtx *) NULL_RTX;
!       bb_reg_last_sets[bb] = (rtx *) NULL_RTX;
!       bb_reg_last_clobbers[bb] = (rtx *) NULL_RTX;
!     }
!   else if (current_nr_blocks == 1)
!     {
!       free (reg_last_uses);
!       free (reg_last_sets);
!       free (reg_last_clobbers);
!     }
  }
  
  /* Print dependences for debugging, callable from debugger.  */
--- 6467,6489 ----
       The list was empty for the vast majority of those calls.  On the PA, not 
       calling free_INSN_LIST_list in those cases improves -O2 compile times by
       3-5% on average.  */
!   for (i = 0; i < max_reg; ++i)
      {
!       if (tmp_deps.reg_last_clobbers[i])
! 	free_INSN_LIST_list (&tmp_deps.reg_last_clobbers[i]);
!       if (tmp_deps.reg_last_sets[i])
! 	free_INSN_LIST_list (&tmp_deps.reg_last_sets[i]);
!       if (tmp_deps.reg_last_uses[i])
! 	free_INSN_LIST_list (&tmp_deps.reg_last_uses[i]);
      }
  
    /* Assert that we won't need bb_reg_last_* for this block anymore.  */
!   free (bb_deps[bb].reg_last_uses);
!   free (bb_deps[bb].reg_last_sets);
!   free (bb_deps[bb].reg_last_clobbers);
!   bb_deps[bb].reg_last_uses = 0;
!   bb_deps[bb].reg_last_sets = 0;
!   bb_deps[bb].reg_last_clobbers = 0;
  }
  
  /* Print dependences for debugging, callable from debugger.  */
*************** set_priorities (bb)
*** 6649,6677 ****
    return n_insn;
  }
  
- /* Make each element of VECTOR point at an rtx-vector,
-    taking the space for all those rtx-vectors from SPACE.
-    SPACE is of type (rtx *), but it is really as long as NELTS rtx-vectors.
-    BYTES_PER_ELT is the number of bytes in one rtx-vector.
-    (this is the same as init_regset_vector () in flow.c)  */
- 
- static void
- init_rtx_vector (vector, space, nelts, bytes_per_elt)
-      rtx **vector;
-      rtx *space;
-      int nelts;
-      int bytes_per_elt;
- {
-   register int i;
-   register rtx *p = space;
- 
-   for (i = 0; i < nelts; i++)
-     {
-       vector[i] = p;
-       p += bytes_per_elt / sizeof (*p);
-     }
- }
- 
  /* Schedule a region.  A region is either an inner loop, a loop-free
     subroutine, or a single basic block.  Each bb in the region is
     scheduled after its flow predecessors.  */
--- 6595,6600 ----
*************** schedule_region (rgn)
*** 6683,6691 ****
    int bb;
    int rgn_n_insns = 0;
    int sched_rgn_n_insns = 0;
-   rtx *bb_reg_last_uses_space = NULL;
-   rtx *bb_reg_last_sets_space = NULL;
-   rtx *bb_reg_last_clobbers_space = NULL;
  
    /* Set variables for the current region.  */
    current_nr_blocks = RGN_NR_BLOCKS (rgn);
--- 6606,6611 ----
*************** schedule_region (rgn)
*** 6696,6743 ****
    reg_pending_sets_all = 0;
  
    /* Initializations for region data dependence analyisis.  */
!   if (current_nr_blocks > 1)
!     {
!       int maxreg = max_reg_num ();
! 
!       bb_reg_last_uses = (rtx **) xmalloc (current_nr_blocks * sizeof (rtx *));
!       bb_reg_last_uses_space 
! 	= (rtx *) xcalloc (current_nr_blocks * maxreg, sizeof (rtx));
!       init_rtx_vector (bb_reg_last_uses, bb_reg_last_uses_space, 
! 		       current_nr_blocks, maxreg * sizeof (rtx *));
! 
!       bb_reg_last_sets = (rtx **) xmalloc (current_nr_blocks * sizeof (rtx *));
!       bb_reg_last_sets_space 
! 	= (rtx *) xcalloc (current_nr_blocks * maxreg, sizeof (rtx));
!       init_rtx_vector (bb_reg_last_sets, bb_reg_last_sets_space, 
! 		       current_nr_blocks, maxreg * sizeof (rtx *));
! 
!       bb_reg_last_clobbers =
! 	(rtx **) xmalloc (current_nr_blocks * sizeof (rtx *));
!       bb_reg_last_clobbers_space 
! 	= (rtx *) xcalloc (current_nr_blocks * maxreg, sizeof (rtx));
!       init_rtx_vector (bb_reg_last_clobbers, bb_reg_last_clobbers_space, 
! 		       current_nr_blocks, maxreg * sizeof (rtx *));
! 
!       bb_pending_read_insns 
! 	= (rtx *) xmalloc (current_nr_blocks * sizeof (rtx));
!       bb_pending_read_mems 
! 	= (rtx *) xmalloc (current_nr_blocks * sizeof (rtx));
!       bb_pending_write_insns =
! 	(rtx *) xmalloc (current_nr_blocks * sizeof (rtx));
!       bb_pending_write_mems 
! 	= (rtx *) xmalloc (current_nr_blocks * sizeof (rtx));
!       bb_pending_lists_length =
! 	(int *) xmalloc (current_nr_blocks * sizeof (int));
!       bb_last_pending_memory_flush =
! 	(rtx *) xmalloc (current_nr_blocks * sizeof (rtx));
!       bb_last_function_call 
! 	= (rtx *) xmalloc (current_nr_blocks * sizeof (rtx));
!       bb_sched_before_next_call =
! 	(rtx *) xmalloc (current_nr_blocks * sizeof (rtx));
! 
!       init_rgn_data_dependences (current_nr_blocks);
!     }
  
    /* Compute LOG_LINKS.  */
    for (bb = 0; bb < current_nr_blocks; bb++)
--- 6616,6624 ----
    reg_pending_sets_all = 0;
  
    /* Initializations for region data dependence analyisis.  */
!   bb_deps = (struct deps *) xmalloc (sizeof (struct deps) * current_nr_blocks);
!   for (bb = 0; bb < current_nr_blocks; bb++)
!     init_deps (bb_deps + bb);
  
    /* Compute LOG_LINKS.  */
    for (bb = 0; bb < current_nr_blocks; bb++)
*************** schedule_region (rgn)
*** 6823,6846 ****
    FREE_REG_SET (reg_pending_sets);
    FREE_REG_SET (reg_pending_clobbers);
  
    if (current_nr_blocks > 1)
      {
        int i;
  
-       free (bb_reg_last_uses_space);
-       free (bb_reg_last_uses);
-       free (bb_reg_last_sets_space);
-       free (bb_reg_last_sets);
-       free (bb_reg_last_clobbers_space);
-       free (bb_reg_last_clobbers);
-       free (bb_pending_read_insns);
-       free (bb_pending_read_mems);
-       free (bb_pending_write_insns);
-       free (bb_pending_write_mems);
-       free (bb_pending_lists_length);
-       free (bb_last_pending_memory_flush);
-       free (bb_last_function_call);
-       free (bb_sched_before_next_call);
        free (prob);
        for (i = 0; i < current_nr_blocks; ++i)
  	{
--- 6704,6715 ----
    FREE_REG_SET (reg_pending_sets);
    FREE_REG_SET (reg_pending_clobbers);
  
+   free (bb_deps);
+ 
    if (current_nr_blocks > 1)
      {
        int i;
  
        free (prob);
        for (i = 0; i < current_nr_blocks; ++i)
  	{


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]