This is the mail archive of the gcc-patches@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

patch committed to dataflow branch.


This patch removes all uses of flow for the sched2 pass and afterwards. 
The majority of this patch is devoted to making all of the schedulers
free of any use of flow.

There are two frags here that should have been on the previous patch
committed.  Before these are submitted to the mainline.

This patch was bootstrapped and regression tested on three platforms.

x86_64-unknown-linux-gnu
powerpc64-unknown-linux-gnu
i686-pc-linux-gnu

2006-04-08  Kenneth Zadeck <zadeck@naturalbridge.com>
    * sched-ebb.c (init_ready_list, can_schedule_ready_p, new_ready,
    compute_jump_reg_dependencies, schedule_ebb, sched_analyze,
    schedule_block): Changed to pass instance of df.
    (compute_jump_reg_dependencies): Changed to use local instance
    of dataflow.
    * ddg.c (build_intra_loop_deps, sched_analyze,
    build_intra_loop_deps): Changed to pass instance of df.
    * ddg.h: added forward reference to struct df.
    * haifa-sched.c (schedule_insns, schedule_insn,
    schedule_block): Changed to pass instance of df.
    * modulo-sched (compute_jump_reg_dependencies): Ditto.
    (sms_schedule): Added call to do dce when stated.
    * sched-deps.c (sched_analyze_insn, sched_analyze):
    Changed to pass instance of df.
    * rtl.h (schedule_insns, schedule_ebbs): Ditto.
    * sched-int.h (init_ready_list, can_schedule_ready_p, new_ready,
    compute_jump_reg_dependencies, sched_analyze, schedule_block):
    Ditto.
    * sched-rgn.c (check_live_1, update_live_1, check_live,
    update_live, init_ready_list, can_schedule_ready_p, new_ready,
    compute_jump_reg_dependencies, compute_block_backward_dependences,
    schedule_region, schedule_insns): Ditto.
    (schedule_insns): Removed call to update_life_info when finished.
     (rest_of_handle_sched, rest_of_handle_sched2): Creates local
     instance of df.
    * passes.c (init_optimization_passes): moved clear_df pass
    earlier.
    * Makefile.in (df-core.o): Added except.h and dce.h
    (modulo-sched.o): Added DF_H.
    * recog.c (split_all_insns): Removed old code that was used to
    update dataflow.
    * reg-stack.c (rest_of_handle_stack_regs): Removed ifdefed out code.

Index: sched-ebb.c
===================================================================
--- sched-ebb.c	(revision 112761)
+++ sched-ebb.c	(working copy)
@@ -51,17 +51,17 @@ static int target_n_insns;
 static int sched_n_insns;
 
 /* Implementations of the sched_info functions for region scheduling.  */
-static void init_ready_list (struct ready_list *);
-static int can_schedule_ready_p (rtx);
-static int new_ready (rtx);
+static void init_ready_list (struct df *, struct ready_list *);
+static int can_schedule_ready_p (struct df *, rtx);
+static int new_ready (struct df *, rtx);
 static int schedule_more_p (void);
 static const char *ebb_print_insn (rtx, int);
 static int rank (rtx, rtx);
 static int contributes_to_priority (rtx, rtx);
-static void compute_jump_reg_dependencies (rtx, regset, regset, regset);
+static void compute_jump_reg_dependencies (struct df *, rtx, regset, regset, regset);
 static basic_block earliest_block_with_similiar_load (basic_block, rtx);
 static void add_deps_for_risky_insns (rtx, rtx);
-static basic_block schedule_ebb (rtx, rtx);
+static basic_block schedule_ebb (struct df * df, rtx, rtx);
 static basic_block fix_basic_block_boundaries (basic_block, basic_block, rtx,
 					       rtx);
 static void add_missing_bbs (rtx, basic_block, basic_block);
@@ -78,7 +78,7 @@ schedule_more_p (void)
    once before scheduling a set of insns.  */
 
 static void
-init_ready_list (struct ready_list *ready)
+init_ready_list (struct df *df ATTRIBUTE_UNUSED, struct ready_list *ready)
 {
   rtx prev_head = current_sched_info->prev_head;
   rtx next_tail = current_sched_info->next_tail;
@@ -107,7 +107,7 @@ init_ready_list (struct ready_list *read
    insn can be scheduled, nonzero if we should silently discard it.  */
 
 static int
-can_schedule_ready_p (rtx insn ATTRIBUTE_UNUSED)
+can_schedule_ready_p (struct df *df ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED)
 {
   sched_n_insns++;
   return 1;
@@ -117,7 +117,7 @@ can_schedule_ready_p (rtx insn ATTRIBUTE
    if it should be moved to the ready list or the queue, or zero if we
    should silently discard it.  */
 static int
-new_ready (rtx next ATTRIBUTE_UNUSED)
+new_ready (struct df *df ATTRIBUTE_UNUSED, rtx next ATTRIBUTE_UNUSED)
 {
   return 1;
 }
@@ -172,8 +172,8 @@ contributes_to_priority (rtx next ATTRIB
     registers that must be considered as set in SET.  */
 
 static void
-compute_jump_reg_dependencies (rtx insn, regset cond_set, regset used,
-			       regset set)
+compute_jump_reg_dependencies (struct df *df, rtx insn, regset cond_set, 
+			       regset used, regset set)
 {
   basic_block b = BLOCK_FOR_INSN (insn);
   edge e;
@@ -186,9 +186,9 @@ compute_jump_reg_dependencies (rtx insn,
 	 it may guard the fallthrough block from using a value that has
 	 conditionally overwritten that of the main codepath.  So we
 	 consider that it restores the value of the main codepath.  */
-      bitmap_and (set, DF_LIVE_IN (rtl_df, e->dest), cond_set);
+      bitmap_and (set, DF_LIVE_IN (df, e->dest), cond_set);
     else
-      bitmap_ior_into (used, DF_LIVE_IN (rtl_df, e->dest));
+      bitmap_ior_into (used, DF_LIVE_IN (df, e->dest));
 }
 
 /* Used in schedule_insns to initialize current_sched_info for scheduling
@@ -479,7 +479,7 @@ add_deps_for_risky_insns (rtx head, rtx 
    and TAIL.  */
 
 static basic_block
-schedule_ebb (rtx head, rtx tail)
+schedule_ebb (struct df *df, rtx head, rtx tail)
 {
   int n_insns;
   basic_block b;
@@ -494,7 +494,7 @@ schedule_ebb (rtx head, rtx tail)
 
   /* Compute LOG_LINKS.  */
   init_deps (&tmp_deps);
-  sched_analyze (&tmp_deps, head, tail);
+  sched_analyze (df, &tmp_deps, head, tail);
   free_deps (&tmp_deps);
 
   /* Compute INSN_DEPEND.  */
@@ -539,7 +539,7 @@ schedule_ebb (rtx head, rtx tail)
 
   current_sched_info->queue_must_finish_empty = 1;
 
-  schedule_block (-1, n_insns);
+  schedule_block (df, -1, n_insns);
 
   /* Sanity check: verify that all region insns were scheduled.  */
   gcc_assert (sched_n_insns == n_insns);
@@ -557,7 +557,7 @@ schedule_ebb (rtx head, rtx tail)
 /* The one entry point in this file.  */
 
 void
-schedule_ebbs (void)
+schedule_ebbs (struct df *df)
 {
   basic_block bb;
   int probability_cutoff;
@@ -617,7 +617,7 @@ schedule_ebbs (void)
 	    break;
 	}
 
-      bb = schedule_ebb (head, tail);
+      bb = schedule_ebb (df, head, tail);
     }
 
   /* Updating life info can be done by local propagation over the modified
Index: ddg.c
===================================================================
--- ddg.c	(revision 112761)
+++ ddg.c	(working copy)
@@ -370,7 +370,7 @@ add_inter_loop_mem_dep (ddg_ptr g, ddg_n
 /* Perform intra-block Data Dependency analysis and connect the nodes in
    the DDG.  We assume the loop has a single basic block.  */
 static void
-build_intra_loop_deps (ddg_ptr g)
+build_intra_loop_deps (struct df *df, ddg_ptr g)
 {
   int i;
   /* Hold the dependency analysis state during dependency calculations.  */
@@ -383,7 +383,7 @@ build_intra_loop_deps (ddg_ptr g)
 
   /* Do the intra-block data dependence analysis for the given block.  */
   get_block_head_tail (g->bb->index, &head, &tail);
-  sched_analyze (&tmp_deps, head, tail);
+  sched_analyze (df, &tmp_deps, head, tail);
 
   /* Build intra-loop data dependencies using the scheduler dependency
      analysis.  */
@@ -511,7 +511,7 @@ create_ddg (basic_block bb, struct df *d
   
 
   /* Build the data dependency graph.  */
-  build_intra_loop_deps (g);
+  build_intra_loop_deps (df, g);
   build_inter_loop_deps (g, df);
   return g;
 }
Index: ddg.h
===================================================================
--- ddg.h	(revision 112761)
+++ ddg.h	(working copy)
@@ -166,6 +166,7 @@ struct ddg_all_sccs
 };
 
 
+struct df;
 ddg_ptr create_ddg (basic_block, struct df *, int closing_branch_deps);
 void free_ddg (ddg_ptr);
 
Index: haifa-sched.c
===================================================================
--- haifa-sched.c	(revision 112761)
+++ haifa-sched.c	(working copy)
@@ -143,6 +143,7 @@ Software Foundation, 51 Franklin Street,
 #include "sched-int.h"
 #include "target.h"
 #include "output.h"
+#include "df.h"
 
 #ifdef INSN_SCHEDULING
 
@@ -442,7 +443,7 @@ static int priority (rtx);
 static int rank_for_schedule (const void *, const void *);
 static void swap_sort (rtx *, int);
 static void queue_insn (rtx, int);
-static int schedule_insn (rtx, struct ready_list *, int);
+static int schedule_insn (struct df *df, rtx, struct ready_list *, int);
 static int find_set_reg_weight (rtx);
 static void find_insn_reg_weight (int);
 static void adjust_priority (rtx);
@@ -502,7 +503,7 @@ struct sched_info *current_sched_info;
 
 #ifndef INSN_SCHEDULING
 void
-schedule_insns (void)
+schedule_insns (struct df *)
 {
 }
 #else
@@ -883,7 +884,7 @@ static int last_clock_var;
    zero for insns in a schedule group).  */
 
 static int
-schedule_insn (rtx insn, struct ready_list *ready, int clock)
+schedule_insn (struct df *df, rtx insn, struct ready_list *ready, int clock)
 {
   rtx link;
   int advance = 0;
@@ -922,7 +923,7 @@ schedule_insn (rtx insn, struct ready_li
 	{
 	  int effective_cost = INSN_TICK (next) - clock;
 
-	  if (! (*current_sched_info->new_ready) (next))
+	  if (! (*current_sched_info->new_ready) (df, next))
 	    continue;
 
 	  if (sched_verbose >= 2)
@@ -1834,7 +1835,7 @@ choose_ready (struct ready_list *ready)
    possibly bringing insns from subsequent blocks in the same region.  */
 
 void
-schedule_block (int b, int rgn_n_insns)
+schedule_block (struct df* df, int b, int rgn_n_insns)
 {
   struct ready_list ready;
   int i, first_cycle_insn_p;
@@ -1886,7 +1887,7 @@ schedule_block (int b, int rgn_n_insns)
   for (i = 0; i <= rgn_n_insns; i++)
     choice_stack[i].state = xmalloc (dfa_state_size);
 
-  (*current_sched_info->init_ready_list) (&ready);
+  (*current_sched_info->init_ready_list) (df, &ready);
 
   if (targetm.sched.md_init)
     targetm.sched.md_init (sched_dump, sched_verbose, ready.veclen);
@@ -2047,7 +2048,7 @@ schedule_block (int b, int rgn_n_insns)
 	      continue;
 	    }
 
-	  if (! (*current_sched_info->can_schedule_ready_p) (insn))
+	  if (! (*current_sched_info->can_schedule_ready_p) (df, insn))
 	    goto next;
 
 	  last_scheduled_insn = move_insn (insn, last_scheduled_insn);
@@ -2066,7 +2067,7 @@ schedule_block (int b, int rgn_n_insns)
 		   && GET_CODE (PATTERN (insn)) != CLOBBER)
 	    can_issue_more--;
 
-	  advance = schedule_insn (insn, &ready, clock_var);
+	  advance = schedule_insn (df, insn, &ready, clock_var);
 
 	  /* After issuing an asm insn we should start a new cycle.  */
 	  if (advance == 0 && asm_p)
Index: modulo-sched.c
===================================================================
--- modulo-sched.c	(revision 112761)
+++ modulo-sched.c	(working copy)
@@ -244,7 +244,8 @@ contributes_to_priority (rtx next, rtx i
 }
 
 static void
-compute_jump_reg_dependencies (rtx insn ATTRIBUTE_UNUSED,
+compute_jump_reg_dependencies (struct df * df ATTRIBUTE_UNUSED,
+			       rtx insn ATTRIBUTE_UNUSED,
 			       regset cond_exec ATTRIBUTE_UNUSED,
 			       regset used ATTRIBUTE_UNUSED,
 			       regset set ATTRIBUTE_UNUSED)
@@ -935,6 +936,7 @@ sms_schedule (void)
 
   /* Init Data Flow analysis, to be used in interloop dep calculation.  */
   df = df_init (DF_HARD_REGS | DF_EQUIV_NOTES |	DF_SUBREGS);
+  df_lr_add_problem (df, DF_LR_RUN_DCE);
   df_rd_add_problem (df, 0);
   df_ru_add_problem (df, 0);
   df_chain_add_problem (df, DF_DU_CHAIN | DF_UD_CHAIN);
Index: sched-deps.c
===================================================================
--- sched-deps.c	(revision 112761)
+++ sched-deps.c	(working copy)
@@ -96,7 +96,7 @@ static void fixup_sched_groups (rtx);
 static void flush_pending_lists (struct deps *, rtx, int, int);
 static void sched_analyze_1 (struct deps *, rtx, rtx);
 static void sched_analyze_2 (struct deps *, rtx, rtx);
-static void sched_analyze_insn (struct deps *, rtx, rtx, rtx);
+static void sched_analyze_insn (struct df *, struct deps *, rtx, rtx, rtx);
 
 static rtx sched_get_condition (rtx);
 static int conditions_mutex_p (rtx, rtx);
@@ -881,7 +881,8 @@ sched_analyze_2 (struct deps *deps, rtx 
 /* Analyze an INSN with pattern X to find all dependencies.  */
 
 static void
-sched_analyze_insn (struct deps *deps, rtx x, rtx insn, rtx loop_notes)
+sched_analyze_insn (struct df *df, struct deps *deps, 
+		    rtx x, rtx insn, rtx loop_notes)
 {
   RTX_CODE code = GET_CODE (x);
   rtx link;
@@ -957,7 +958,7 @@ sched_analyze_insn (struct deps *deps, r
 	  INIT_REG_SET (&tmp_sets);
 
 	  (*current_sched_info->compute_jump_reg_dependencies)
-	    (insn, &deps->reg_conditional_sets, &tmp_uses, &tmp_sets);
+	    (df, insn, &deps->reg_conditional_sets, &tmp_uses, &tmp_sets);
 	  /* Make latency of jump equal to 0 by using anti-dependence.  */
 	  EXECUTE_IF_SET_IN_REG_SET (&tmp_uses, 0, i, rsi)
 	    {
@@ -1242,7 +1243,7 @@ sched_analyze_insn (struct deps *deps, r
    for every dependency.  */
 
 void
-sched_analyze (struct deps *deps, rtx head, rtx tail)
+sched_analyze (struct df *df, struct deps *deps, rtx head, rtx tail)
 {
   rtx insn;
   rtx loop_notes = 0;
@@ -1279,7 +1280,7 @@ sched_analyze (struct deps *deps, rtx he
 		deps->last_pending_memory_flush
 		  = alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
 	    }
-	  sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes);
+	  sched_analyze_insn (df, deps, PATTERN (insn), insn, loop_notes);
 	  loop_notes = 0;
 	}
       else if (CALL_P (insn))
@@ -1334,7 +1335,7 @@ sched_analyze (struct deps *deps, rtx he
 	  add_dependence_list_and_free (insn, &deps->sched_before_next_call, 1,
 					REG_DEP_ANTI);
 
-	  sched_analyze_insn (deps, PATTERN (insn), insn, loop_notes);
+	  sched_analyze_insn (df, deps, PATTERN (insn), insn, loop_notes);
 	  loop_notes = 0;
 
 	  /* In the absence of interprocedural alias analysis, we must flush
Index: rtl.h
===================================================================
--- rtl.h	(revision 112775)
+++ rtl.h	(working copy)
@@ -2094,11 +2094,12 @@ extern void print_rtl_slim_with_bb (FILE
 extern void dump_insn_slim (FILE *f, rtx x);
 extern void debug_insn_slim (rtx x);
 
+struct df;
 /* In sched-rgn.c.  */
-extern void schedule_insns (void);
+extern void schedule_insns (struct df *);
 
 /* In sched-ebb.c.  */
-extern void schedule_ebbs (void);
+extern void schedule_ebbs (struct df *);
 
 /* In haifa-sched.c.  */
 extern void fix_sched_param (const char *, const char *);
Index: sched-int.h
===================================================================
--- sched-int.h	(revision 112761)
+++ sched-int.h	(working copy)
@@ -136,16 +136,16 @@ struct sched_info
 {
   /* Add all insns that are initially ready to the ready list.  Called once
      before scheduling a set of insns.  */
-  void (*init_ready_list) (struct ready_list *);
+  void (*init_ready_list) (struct df *, struct ready_list *);
   /* Called after taking an insn from the ready list.  Returns nonzero if
      this insn can be scheduled, nonzero if we should silently discard it.  */
-  int (*can_schedule_ready_p) (rtx);
+  int (*can_schedule_ready_p) (struct df *, rtx);
   /* Return nonzero if there are more insns that should be scheduled.  */
   int (*schedule_more_p) (void);
   /* Called after an insn has all its dependencies resolved.  Return nonzero
      if it should be moved to the ready list or the queue, or zero if we
      should silently discard it.  */
-  int (*new_ready) (rtx);
+  int (*new_ready) (struct df *, rtx);
   /* Compare priority of two insns.  Return a positive number if the second
      insn is to be preferred for scheduling, and a negative one if the first
      is to be preferred.  Zero if they are equally good.  */
@@ -161,7 +161,7 @@ struct sched_info
   /* Called when computing dependencies for a JUMP_INSN.  This function
      should store the set of registers that must be considered as set by
      the jump in the regset.  */
-  void (*compute_jump_reg_dependencies) (rtx, regset, regset, regset);
+  void (*compute_jump_reg_dependencies) (struct df *, rtx, regset, regset, regset);
 
   /* The boundaries of the set of insns to be scheduled.  */
   rtx prev_head, next_tail;
@@ -333,7 +333,7 @@ extern void print_insn (char *, rtx, int
 /* Functions in sched-deps.c.  */
 extern bool sched_insns_conditions_mutex_p (rtx, rtx);
 extern int add_dependence (rtx, rtx, enum reg_note);
-extern void sched_analyze (struct deps *, rtx, rtx);
+extern void sched_analyze (struct df *, struct deps *, rtx, rtx);
 extern void init_deps (struct deps *);
 extern void free_deps (struct deps *);
 extern void init_deps_global (void);
@@ -358,7 +358,7 @@ extern void rm_other_notes (rtx, rtx);
 extern int insn_cost (rtx, rtx, rtx);
 extern int set_priorities (rtx, rtx);
 
-extern void schedule_block (int, int);
+extern void schedule_block (struct df *, int, int);
 extern void sched_init (void);
 extern void sched_finish (void);
 
Index: sched-rgn.c
===================================================================
--- sched-rgn.c	(revision 112775)
+++ sched-rgn.c	(working copy)
@@ -240,10 +240,10 @@ static void compute_dom_prob_ps (int);
 #define INSN_BB(INSN) (BLOCK_TO_BB (BLOCK_NUM (INSN)))
 
 /* Speculative scheduling functions.  */
-static int check_live_1 (int, rtx);
-static void update_live_1 (int, rtx);
-static int check_live (rtx, int);
-static void update_live (rtx, int);
+static int check_live_1 (struct df *, int, rtx);
+static void update_live_1 (struct df *, int, rtx);
+static int check_live (struct df *, rtx, int);
+static void update_live (struct df *, rtx, int);
 static void set_spec_fed (rtx);
 static int is_pfree (rtx, int, int);
 static int find_conditional_protection (rtx, int);
@@ -254,11 +254,11 @@ static int is_exception_free (rtx, int, 
 static bool sets_likely_spilled (rtx);
 static void sets_likely_spilled_1 (rtx, rtx, void *);
 static void add_branch_dependences (rtx, rtx);
-static void compute_block_backward_dependences (int);
+static void compute_block_backward_dependences (struct df *, int);
 void debug_dependencies (void);
 
 static void init_regions (void);
-static void schedule_region (int);
+static void schedule_region (struct df*, int);
 static rtx concat_INSN_LIST (rtx, rtx);
 static void concat_insn_mem_list (rtx, rtx, rtx *, rtx *);
 static void propagate_deps (int, struct deps *);
@@ -1106,7 +1106,7 @@ debug_candidates (int trg)
    of the split-blocks of src, otherwise return 1.  */
 
 static int
-check_live_1 (int src, rtx x)
+check_live_1 (struct df *df, int src, rtx x)
 {
   int i;
   int regno;
@@ -1126,7 +1126,7 @@ check_live_1 (int src, rtx x)
 
       for (i = XVECLEN (reg, 0) - 1; i >= 0; i--)
 	if (XEXP (XVECEXP (reg, 0, i), 0) != 0)
-	  if (check_live_1 (src, XEXP (XVECEXP (reg, 0, i), 0)))
+	  if (check_live_1 (df, src, XEXP (XVECEXP (reg, 0, i), 0)))
 	    return 1;
 
       return 0;
@@ -1153,7 +1153,7 @@ check_live_1 (int src, rtx x)
 	      for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++)
 		{
 		  basic_block b = candidate_table[src].split_bbs.first_member[i];
-		  if (REGNO_REG_SET_P (DF_LIVE_IN (rtl_df, b), regno + j))
+		  if (REGNO_REG_SET_P (DF_LIVE_IN (df, b), regno + j))
 		    return 0;
 		}
 	    }
@@ -1164,7 +1164,7 @@ check_live_1 (int src, rtx x)
 	  for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++)
 	    {
 	      basic_block b = candidate_table[src].split_bbs.first_member[i];
-	      if (REGNO_REG_SET_P (DF_LIVE_IN (rtl_df, b), regno))
+	      if (REGNO_REG_SET_P (DF_LIVE_IN (df, b), regno))
 		return 0;
 	    }
 	}
@@ -1177,7 +1177,7 @@ check_live_1 (int src, rtx x)
    of every update-block of src.  */
 
 static void
-update_live_1 (int src, rtx x)
+update_live_1 (struct df *df, int src, rtx x)
 {
   int i;
   int regno;
@@ -1197,7 +1197,7 @@ update_live_1 (int src, rtx x)
 
       for (i = XVECLEN (reg, 0) - 1; i >= 0; i--)
 	if (XEXP (XVECEXP (reg, 0, i), 0) != 0)
-	  update_live_1 (src, XEXP (XVECEXP (reg, 0, i), 0));
+	  update_live_1 (df, src, XEXP (XVECEXP (reg, 0, i), 0));
 
       return;
     }
@@ -1220,8 +1220,8 @@ update_live_1 (int src, rtx x)
 	      for (i = 0; i < candidate_table[src].update_bbs.nr_members; i++)
 		{
 		  basic_block b = candidate_table[src].update_bbs.first_member[i];
-		  SET_REGNO_REG_SET (DF_LIVE_IN (rtl_df, b), regno + j);
-		  SET_REGNO_REG_SET (DF_LIVE_OUT (rtl_df, b), regno + j);
+		  SET_REGNO_REG_SET (DF_LIVE_IN (df, b), regno + j);
+		  SET_REGNO_REG_SET (DF_LIVE_OUT (df, b), regno + j);
 		}
 	    }
 	}
@@ -1230,8 +1230,8 @@ update_live_1 (int src, rtx x)
 	  for (i = 0; i < candidate_table[src].update_bbs.nr_members; i++)
 	    {
 	      basic_block b = candidate_table[src].update_bbs.first_member[i];
-	      SET_REGNO_REG_SET (DF_LIVE_IN (rtl_df, b), regno);
-	      SET_REGNO_REG_SET (DF_LIVE_OUT (rtl_df, b), regno);
+	      SET_REGNO_REG_SET (DF_LIVE_IN (df, b), regno);
+	      SET_REGNO_REG_SET (DF_LIVE_OUT (df, b), regno);
 	    }
 	}
     }
@@ -1242,19 +1242,19 @@ update_live_1 (int src, rtx x)
    ready-list or before the scheduling.  */
 
 static int
-check_live (rtx insn, int src)
+check_live (struct df *df, rtx insn, int src)
 {
   /* Find the registers set by instruction.  */
   if (GET_CODE (PATTERN (insn)) == SET
       || GET_CODE (PATTERN (insn)) == CLOBBER)
-    return check_live_1 (src, PATTERN (insn));
+    return check_live_1 (df, src, PATTERN (insn));
   else if (GET_CODE (PATTERN (insn)) == PARALLEL)
     {
       int j;
       for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
 	if ((GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
 	     || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
-	    && !check_live_1 (src, XVECEXP (PATTERN (insn), 0, j)))
+	    && !check_live_1 (df, src, XVECEXP (PATTERN (insn), 0, j)))
 	  return 0;
 
       return 1;
@@ -1267,19 +1267,19 @@ check_live (rtx insn, int src)
    block src to trg.  */
 
 static void
-update_live (rtx insn, int src)
+update_live (struct df *df, rtx insn, int src)
 {
   /* Find the registers set by instruction.  */
   if (GET_CODE (PATTERN (insn)) == SET
       || GET_CODE (PATTERN (insn)) == CLOBBER)
-    update_live_1 (src, PATTERN (insn));
+    update_live_1 (df, src, PATTERN (insn));
   else if (GET_CODE (PATTERN (insn)) == PARALLEL)
     {
       int j;
       for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
 	if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
 	    || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
-	  update_live_1 (src, XVECEXP (PATTERN (insn), 0, j));
+	  update_live_1 (df, src, XVECEXP (PATTERN (insn), 0, j));
     }
 }
 
@@ -1511,14 +1511,14 @@ static int sched_n_insns;
 static int last_was_jump;
 
 /* Implementations of the sched_info functions for region scheduling.  */
-static void init_ready_list (struct ready_list *);
-static int can_schedule_ready_p (rtx);
-static int new_ready (rtx);
+static void init_ready_list (struct df *, struct ready_list *);
+static int can_schedule_ready_p (struct df *, rtx);
+static int new_ready (struct df *, rtx);
 static int schedule_more_p (void);
 static const char *rgn_print_insn (rtx, int);
 static int rgn_rank (rtx, rtx);
 static int contributes_to_priority (rtx, rtx);
-static void compute_jump_reg_dependencies (rtx, regset, regset, regset);
+static void compute_jump_reg_dependencies (struct df *, rtx, regset, regset, regset);
 
 /* Return nonzero if there are more insns that should be scheduled.  */
 
@@ -1532,7 +1532,7 @@ schedule_more_p (void)
    once before scheduling a set of insns.  */
 
 static void
-init_ready_list (struct ready_list *ready)
+init_ready_list (struct df *df, struct ready_list *ready)
 {
   rtx prev_head = current_sched_info->prev_head;
   rtx next_tail = current_sched_info->next_tail;
@@ -1606,7 +1606,7 @@ init_ready_list (struct ready_list *read
 		    || ((recog_memoized (insn) < 0
 			 || min_insn_conflict_delay (curr_state,
 						     insn, insn) <= 3)
-			&& check_live (insn, bb_src)
+			&& check_live (df, insn, bb_src)
 			&& is_exception_free (insn, bb_src, target_bb))))
 	      if (INSN_DEP_COUNT (insn) == 0)
 		{
@@ -1624,7 +1624,7 @@ init_ready_list (struct ready_list *read
    insn can be scheduled, nonzero if we should silently discard it.  */
 
 static int
-can_schedule_ready_p (rtx insn)
+can_schedule_ready_p (struct df *df, rtx insn)
 {
   if (JUMP_P (insn))
     last_was_jump = 1;
@@ -1636,9 +1636,9 @@ can_schedule_ready_p (rtx insn)
 
       if (IS_SPECULATIVE_INSN (insn))
 	{
-	  if (!check_live (insn, INSN_BB (insn)))
+	  if (!check_live (df, insn, INSN_BB (insn)))
 	    return 0;
-	  update_live (insn, INSN_BB (insn));
+	  update_live (df, insn, INSN_BB (insn));
 
 	  /* For speculative load, mark insns fed by it.  */
 	  if (IS_LOAD_INSN (insn) || FED_BY_SPEC_LOAD (insn))
@@ -1688,7 +1688,7 @@ can_schedule_ready_p (rtx insn)
    if it should be moved to the ready list or the queue, or zero if we
    should silently discard it.  */
 static int
-new_ready (rtx next)
+new_ready (struct df *df, rtx next)
 {
   /* For speculative insns, before inserting to ready/queue,
      check live, exception-free, and issue-delay.  */
@@ -1698,7 +1698,7 @@ new_ready (rtx next)
 	  || (IS_SPECULATIVE_INSN (next)
 	      && ((recog_memoized (next) >= 0
 		   && min_insn_conflict_delay (curr_state, next, next) > 3)
-		  || !check_live (next, INSN_BB (next))
+		  || !check_live (df, next, INSN_BB (next))
 		  || !is_exception_free (next, INSN_BB (next), target_bb)))))
     return 0;
   return 1;
@@ -1773,7 +1773,8 @@ contributes_to_priority (rtx next, rtx i
    registers that must be considered as set in SET.  */
 
 static void
-compute_jump_reg_dependencies (rtx insn ATTRIBUTE_UNUSED,
+compute_jump_reg_dependencies (struct df * df ATTRIBUTE_UNUSED,
+			       rtx insn ATTRIBUTE_UNUSED,
 			       regset cond_exec ATTRIBUTE_UNUSED,
 			       regset used ATTRIBUTE_UNUSED,
 			       regset set ATTRIBUTE_UNUSED)
@@ -2089,7 +2090,7 @@ propagate_deps (int bb, struct deps *pre
    similar, and the result is interblock dependences in the region.  */
 
 static void
-compute_block_backward_dependences (int bb)
+compute_block_backward_dependences (struct df *df, int bb)
 {
   rtx head, tail;
   struct deps tmp_deps;
@@ -2098,7 +2099,7 @@ compute_block_backward_dependences (int 
 
   /* Do the analysis for this block.  */
   get_block_head_tail (BB_TO_BLOCK (bb), &head, &tail);
-  sched_analyze (&tmp_deps, head, tail);
+  sched_analyze (df, &tmp_deps, head, tail);
   add_branch_dependences (head, tail);
 
   if (current_nr_blocks > 1)
@@ -2220,7 +2221,7 @@ sched_is_disabled_for_current_region_p (
    scheduled after its flow predecessors.  */
 
 static void
-schedule_region (int rgn)
+schedule_region (struct df *df, int rgn)
 {
   basic_block block;
   edge_iterator ei;
@@ -2247,7 +2248,7 @@ schedule_region (int rgn)
 
   /* Compute LOG_LINKS.  */
   for (bb = 0; bb < current_nr_blocks; bb++)
-    compute_block_backward_dependences (bb);
+    compute_block_backward_dependences (df, bb);
 
   /* Compute INSN_DEPEND.  */
   for (bb = current_nr_blocks - 1; bb >= 0; bb--)
@@ -2355,7 +2356,7 @@ schedule_region (int rgn)
       current_sched_info->queue_must_finish_empty
 	= current_nr_blocks > 1 && !flag_schedule_interblock;
 
-      schedule_block (b, rgn_n_insns);
+      schedule_block (df, b, rgn_n_insns);
       sched_rgn_n_insns += sched_n_insns;
 
       /* Update target block boundaries.  */
@@ -2454,7 +2455,7 @@ init_regions (void)
 /* The one entry point in this file.  */
 
 void
-schedule_insns (void)
+schedule_insns (struct df *df)
 {
   int rgn;
 
@@ -2475,30 +2476,10 @@ schedule_insns (void)
   current_sched_info = &region_sched_info;
   /* Schedule every region in the subroutine.  */
   for (rgn = 0; rgn < nr_regions; rgn++)
-    schedule_region (rgn);
+    schedule_region (df, rgn);
 
-  /* Update life analysis for the subroutine.  Do single block regions
-     first so that we can verify that live_at_start didn't change.  Then
-     do all other blocks.  */
-  /* ??? There is an outside possibility that update_life_info, or more
-     to the point propagate_block, could get called with nonzero flags
-     more than once for one basic block.  This would be kinda bad if it
-     were to happen, since REG_INFO would be accumulated twice for the
-     block, and we'd have twice the REG_DEAD notes.
-
-     I'm fairly certain that this _shouldn't_ happen, since I don't think
-     that live_at_start should change at region heads.  Not sure what the
-     best way to test for this kind of thing...  */
-
-  allocate_reg_life_data ();
   compute_bb_for_insn ();
 
-  /* Don't update reg info after reload, since that affects
-     regs_ever_live, which should not change after reload.  */
-  update_life_info (NULL, UPDATE_LIFE_GLOBAL,
-		    (reload_completed ? PROP_DEATH_NOTES
-		     : PROP_DEATH_NOTES | PROP_REG_INFO));
-
   /* Reposition the prologue and epilogue notes in case we moved the
      prologue/epilogue insns.  */
   if (reload_completed)
@@ -2548,8 +2529,22 @@ rest_of_handle_sched (void)
 #ifdef INSN_SCHEDULING
   /* Do control and data sched analysis,
      and write some of the results to dump file.  */
+  struct df * df = df_init (DF_HARD_REGS);
+  df_lr_add_problem (df, DF_LR_RUN_DCE);
+  df_ur_add_problem (df, 0);
+  df_ri_add_problem (df, DF_RI_LIFE);
+  df_analyze (df);
+
+  schedule_insns (df);
+
+  /* FIXME - temporary hack to rebuild dataflow info after the first
+     round of scheduling.  This will be removed when all passes are
+     responcible for building their own info before running.  */
+  df_clear_flags (df->problems_by_index[DF_LR], DF_LR_RUN_DCE);
+  df_analyze (df);
+  /* end FIXME */
 
-  schedule_insns ();
+  df_finish (df);
 #endif
 }
 
@@ -2568,22 +2563,21 @@ static void
 rest_of_handle_sched2 (void)
 {
 #ifdef INSN_SCHEDULING
-  /* Do control and data sched analysis again,
-     and write some more of the results to dump file.  */
+  struct df * df = df_init (DF_HARD_REGS);
+
+  df_lr_add_problem (df, DF_LR_RUN_DCE);
+  df_ur_add_problem (df, 0);
+  df_ri_add_problem (df, DF_RI_LIFE);
 
   split_all_insns ();
 
+  df_analyze (df);
   if (flag_sched2_use_superblocks || flag_sched2_use_traces)
-    {
-      schedule_ebbs ();
-      /* No liveness updating code yet, but it should be easy to do.
-         reg-stack recomputes the liveness when needed for now.  */
-      count_or_remove_death_notes (NULL, 1);
-      cleanup_cfg (CLEANUP_EXPENSIVE);
-    }
+    schedule_ebbs (df);
   else
-    schedule_insns ();
+    schedule_insns (df);
 
+  df_finish (df);
 #endif
 }
 
Index: passes.c
===================================================================
--- passes.c	(revision 112761)
+++ passes.c	(working copy)
@@ -685,9 +685,9 @@ init_optimization_passes (void)
   NEXT_PASS (pass_reorder_blocks);
   NEXT_PASS (pass_branch_target_load_optimize);
   NEXT_PASS (pass_leaf_regs);
+  NEXT_PASS (pass_clear_df);
   NEXT_PASS (pass_sched2);
   NEXT_PASS (pass_split_before_regstack);
-  NEXT_PASS (pass_clear_df);
   NEXT_PASS (pass_stack_regs);
   NEXT_PASS (pass_compute_alignments);
   NEXT_PASS (pass_duplicate_computed_gotos);
Index: Makefile.in
===================================================================
--- Makefile.in	(revision 112761)
+++ Makefile.in	(working copy)
@@ -2353,7 +2353,7 @@ df-core.o : df-core.c $(CONFIG_H) $(SYST
 df-problems.o : df-problems.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) \
    $(RTL_H) insn-config.h $(RECOG_H) $(FUNCTION_H) $(REGS_H) alloc-pool.h \
    hard-reg-set.h $(BASIC_BLOCK_H) $(DF_H) bitmap.h sbitmap.h $(TM_P_H) \
-   $(FLAGS_H) output.h
+   $(FLAGS_H) output.h except.h dce.h
 df-scan.o : df-scan.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H) \
    insn-config.h $(RECOG_H) $(FUNCTION_H) $(REGS_H) alloc-pool.h \
    hard-reg-set.h $(BASIC_BLOCK_H) $(DF_H) bitmap.h sbitmap.h $(TM_P_H) \
@@ -2526,7 +2526,8 @@ modulo-sched.o : modulo-sched.c $(DDG_H)
    cfghooks.h $(DF_H) $(GCOV_IO_H) hard-reg-set.h $(TM_H) timevar.h tree-pass.h
 haifa-sched.o : haifa-sched.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H) \
    $(SCHED_INT_H) $(REGS_H) hard-reg-set.h $(FLAGS_H) insn-config.h $(FUNCTION_H) \
-   $(INSN_ATTR_H) toplev.h $(RECOG_H) except.h $(TM_P_H) $(TARGET_H) output.h
+   $(INSN_ATTR_H) toplev.h $(RECOG_H) except.h $(TM_P_H) $(TARGET_H) output.h \
+   $(DF_H)
 sched-deps.o : sched-deps.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) \
    $(RTL_H) $(SCHED_INT_H) $(REGS_H) hard-reg-set.h $(FLAGS_H) insn-config.h \
    $(FUNCTION_H) $(INSN_ATTR_H) toplev.h $(RECOG_H) except.h cselib.h \
Index: recog.c
===================================================================
--- recog.c	(revision 112775)
+++ recog.c	(working copy)
@@ -2755,17 +2755,7 @@ split_all_insns (void)
 		     allocation, and there are unlikely to be very many
 		     nops then anyways.  */
 		  if (reload_completed)
-		    {
-		      /* If the no-op set has a REG_UNUSED note, we need
-			 to update liveness information.  */
-		      if (find_reg_note (insn, REG_UNUSED, NULL_RTX))
-			{
-			  SET_BIT (blocks, bb->index);
-			  changed = true;
-			}
-		      /* ??? Is life info affected by deleting edges?  */
 		      delete_insn_and_edges (insn);
-		    }
 		}
 	      else
 		{
Index: reg-stack.c
===================================================================
--- reg-stack.c	(revision 112775)
+++ reg-stack.c	(working copy)
@@ -3176,21 +3176,7 @@ static void
 rest_of_handle_stack_regs (void)
 {
 #ifdef STACK_REGS
-#if 0
-  if (reg_to_stack () && optimize)
-    {
-      regstack_completed = 1;
-      if (cleanup_cfg (CLEANUP_EXPENSIVE | CLEANUP_POST_REGSTACK
-                       | (flag_crossjumping ? CLEANUP_CROSSJUMP : 0))
-          && (flag_reorder_blocks || flag_reorder_blocks_and_partition))
-        {
-          reorder_basic_blocks ();
-          cleanup_cfg (CLEANUP_EXPENSIVE | CLEANUP_POST_REGSTACK);
-        }
-    }
-#else
   reg_to_stack ();
-#endif
   regstack_completed = 1;
 #endif
 }

Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]