This is the mail archive of the gcc-patches@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

Re: [dataflow]: Remove GLAT from the scheduler.


Maxim Kuvyrkov wrote:
> Hi!
>
> This patch removes glat_{start, end} structure from the scheduler.
>
> The drawback of this and previous patches concerning the scheduler on
> the dataflow branch is that they removed sanity check performed by
> haifa-sched.c: check_reg_live ().  Though, it can be added later.
>
>
I assume that you did not test this since the ia-64 still does not
bootstrap on this branch.
I will test this next week when I come back from vacation.

Thanks,

Kenny

> Thanks,
> Maxim
> ------------------------------------------------------------------------
>
> 2006-08-11  Maxim Kuvyrkov  <mkuvyrkov@ispras.ru>
>
> 	* sched-ebb.c (ebb_head, ebb_tail, ebb_head_or_leaf_p): Removed.
> 	(begin_schedule_ready, schedule_ebb, ebb_head_or_leaf_p): Remove
> 	unnecessary argument, update all callers.
> 	(ebb_sched_info): Update initializer.
> 	(df): New static variable to keep dataflow info.
> 	(compute_jump_reg_dependencies): Use it instead of glat.
>
> 	* haifa-sched.c (glat_start, glat_end, glat_size, init_glat,
> 	init_glat1, free_glat): Removed.
> 	(generate_recovery_code, begin_speculative_block,
> 	add_to_speculative_block, init_before_recovery, create_recovery_block,
> 	create_check_block_twin, schedule_block, sched_init, add_block): Remove
> 	unnecessary argument, update all callers.
>
> 	* modulo-sched.c (sms_sched_info): Update initializer.
> 	(sms_schedule): Update call to sched_init ().
>
> 	* sched-int.h (struct sched_info): Remove unnecessary argument and
> 	update all callers of field 'begin_schedule_ready'.  Remove field
> 	'region_head_or_leaf_p'.
> 	(glat_start, glat_end): Remove prototypes.
> 	(enum SCHED_FLAGS): Remove USE_GLAT, DETACH_LIFE_INFO.  Use NEW_BBS
> 	instead.
> 	(schedule_block, sched_init, add_block, schedule_region): Update
> 	prototypes.
> 	
> 	* sched-rgn.c (df, not_in_df): New static variables.
> 	(check_live_1, update_live_1, add_block1): Use them instead of glat.
> 	(begin_schedule_read, schedule_region): Remove unnecessary argument,
> 	update all callers.
> 	(region_head_or_leaf_p): Remove.
> 	(region_sched_info): Update initializer.
>
> 	* config/ia64/ia64.c (set_sched_flags): Use NEW_BBS instead of
> 	DETACH_LIFE_INFO.
> ------------------------------------------------------------------------
>
> --- sched-ebb.c	(/gcc-local/dataflow/gcc)	(revision 21091)
> +++ sched-ebb.c	(/gcc-local/dataflow-fix/gcc)	(revision 21091)
> @@ -42,7 +42,6 @@ Software Foundation, 51 Franklin Street,
>  #include "sched-int.h"
>  #include "target.h"
>  #include "output.h"
> -
>  
>  /* The number of insns scheduled so far.  */
>  static int sched_n_insns;
> @@ -52,15 +51,13 @@ static int n_insns;
>  
>  /* Set of blocks, that already have their dependencies calculated.  */
>  static bitmap_head dont_calc_deps;
> -/* Set of basic blocks, that are ebb heads of tails respectively.  */
> -static bitmap_head ebb_head, ebb_tail;
>  
>  /* Last basic block in current ebb.  */
>  static basic_block last_bb;
>  
>  /* Implementations of the sched_info functions for region scheduling.  */
>  static void init_ready_list (void);
> -static void begin_schedule_ready (struct df *, rtx, rtx);
> +static void begin_schedule_ready (rtx, rtx);
>  static int schedule_more_p (void);
>  static const char *ebb_print_insn (rtx, int);
>  static int rank (rtx, rtx);
> @@ -68,17 +65,13 @@ static int contributes_to_priority (rtx,
>  static void compute_jump_reg_dependencies (rtx, regset, regset, regset);
>  static basic_block earliest_block_with_similiar_load (basic_block, rtx);
>  static void add_deps_for_risky_insns (rtx, rtx);
> -static basic_block schedule_ebb (struct df *, rtx, rtx);
> +static basic_block schedule_ebb (rtx, rtx);
>  
>  static void add_remove_insn (rtx, int);
>  static void add_block1 (basic_block, basic_block);
>  static basic_block advance_target_bb (basic_block, rtx);
>  static void fix_recovery_cfg (int, int, int);
>  
> -#ifdef ENABLE_CHECKING
> -static int ebb_head_or_leaf_p (basic_block, int);
> -#endif
> -
>  /* Return nonzero if there are more insns that should be scheduled.  */
>  
>  static int
> @@ -119,7 +112,7 @@ init_ready_list (void)
>  
>  /* INSN is being scheduled after LAST.  Update counters.  */
>  static void
> -begin_schedule_ready (struct df *df, rtx insn, rtx last)
> +begin_schedule_ready (rtx insn, rtx last)
>  {
>    sched_n_insns++;
>  
> @@ -176,7 +169,7 @@ begin_schedule_ready (struct df *df, rtx
>        current_sched_info->next_tail = NEXT_INSN (BB_END (bb));
>        gcc_assert (current_sched_info->next_tail);
>  
> -      add_block (df, bb, last_bb);
> +      add_block (bb, last_bb);
>        gcc_assert (last_bb == bb);
>      }
>  }
> @@ -225,6 +218,8 @@ contributes_to_priority (rtx next ATTRIB
>    return 1;
>  }
>  
> +static struct df *df;
> +
>   /* INSN is a JUMP_INSN, COND_SET is the set of registers that are
>      conditionally set before INSN.  Store the set of registers that
>      must be considered as used by this jump in USED and that of
> @@ -245,9 +240,9 @@ compute_jump_reg_dependencies (rtx insn,
>  	 it may guard the fallthrough block from using a value that has
>  	 conditionally overwritten that of the main codepath.  So we
>  	 consider that it restores the value of the main codepath.  */
> -      bitmap_and (set, glat_start [e->dest->index], cond_set);
> +      bitmap_and (set, DF_LIVE_IN (df, e->dest), cond_set);
>      else
> -      bitmap_ior_into (used, glat_start [e->dest->index]);
> +      bitmap_ior_into (used, DF_LIVE_IN (df, e->dest));
>  }
>  
>  /* Used in schedule_insns to initialize current_sched_info for scheduling
> @@ -273,12 +268,9 @@ static struct sched_info ebb_sched_info 
>    add_block1,
>    advance_target_bb,
>    fix_recovery_cfg,
> -#ifdef ENABLE_CHECKING
> -  ebb_head_or_leaf_p,
> -#endif
> -  /* We need to DETACH_LIVE_INFO to be able to create new basic blocks.
> -     See begin_schedule_ready ().  */
> -  SCHED_EBB | USE_GLAT | DETACH_LIFE_INFO
> +  SCHED_EBB
> +  /* We can create new blocks in begin_schedule_ready ().  */
> +  | NEW_BBS
>  };
>  
>  /* Returns the earliest block in EBB currently being processed where a
> @@ -434,7 +426,7 @@ add_deps_for_risky_insns (rtx head, rtx 
>     and TAIL.  */
>  
>  static basic_block
> -schedule_ebb (struct df *df, rtx head, rtx tail)
> +schedule_ebb (rtx head, rtx tail)
>  {
>    basic_block first_bb, target_bb;
>    struct deps tmp_deps;
> @@ -510,7 +502,7 @@ schedule_ebb (struct df *df, rtx head, r
>    current_sched_info->queue_must_finish_empty = 1;
>  
>    target_bb = first_bb;
> -  schedule_block (df, &target_bb, n_insns);
> +  schedule_block (&target_bb, n_insns);
>  
>    /* We might pack all instructions into fewer blocks,
>       so we may made some of them empty.  Can't assert (b == last_bb).  */
> @@ -543,7 +535,6 @@ schedule_ebbs (void)
>    basic_block bb;
>    int probability_cutoff;
>    rtx tail;
> -  struct df *df;
>  
>    if (profile_info && flag_branch_probabilities)
>      probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK);
> @@ -565,17 +556,13 @@ schedule_ebbs (void)
>    df_live_add_problem (df, 0);
>    df_ri_add_problem (df, DF_RI_LIFE);
>    df_analyze (df);
> -  sched_init (df);
> +  sched_init ();
>  
>    compute_bb_for_insn ();
>  
>    /* Initialize DONT_CALC_DEPS and ebb-{start, end} markers.  */
>    bitmap_initialize (&dont_calc_deps, 0);
>    bitmap_clear (&dont_calc_deps);
> -  bitmap_initialize (&ebb_head, 0);
> -  bitmap_clear (&ebb_head);
> -  bitmap_initialize (&ebb_tail, 0);
> -  bitmap_clear (&ebb_tail);
>  
>    /* Schedule every region in the subroutine.  */
>    FOR_EACH_BB (bb)
> @@ -614,20 +601,13 @@ schedule_ebbs (void)
>  	    break;
>  	}
>  
> -      bitmap_set_bit (&ebb_head, BLOCK_NUM (head));
> -      bb = schedule_ebb (df, head, tail);
> -      bitmap_set_bit (&ebb_tail, bb->index);
> +      bb = schedule_ebb (head, tail);
>      }
>    bitmap_clear (&dont_calc_deps);
>  
> -  gcc_assert (current_sched_info->flags & DETACH_LIFE_INFO);
> -
>    /* Updating register live information.  */
>    allocate_reg_life_data ();
>  
> -  bitmap_clear (&ebb_head);
> -  bitmap_clear (&ebb_tail);
> -
>    /* Reposition the prologue and epilogue notes in case we moved the
>       prologue/epilogue insns.  */
>    if (reload_completed)
> @@ -699,17 +679,3 @@ fix_recovery_cfg (int bbi ATTRIBUTE_UNUS
>    if (jump_bb_nexti == last_bb->index)
>      last_bb = BASIC_BLOCK (jump_bbi);
>  }
> -
> -#ifdef ENABLE_CHECKING
> -/* Return non zero, if BB is first or last (depending of LEAF_P) block in
> -   current ebb.  For more information please refer to
> -   sched-int.h: struct sched_info: region_head_or_leaf_p.  */
> -static int
> -ebb_head_or_leaf_p (basic_block bb, int leaf_p)
> -{
> -  if (!leaf_p)    
> -    return bitmap_bit_p (&ebb_head, bb->index);
> -  else
> -    return bitmap_bit_p (&ebb_tail, bb->index);
> -}
> -#endif /* ENABLE_CHECKING  */
> --- haifa-sched.c	(/gcc-local/dataflow/gcc)	(revision 21091)
> +++ haifa-sched.c	(/gcc-local/dataflow-fix/gcc)	(revision 21091)
> @@ -220,10 +220,6 @@ static bool added_recovery_block_p;
>  /* Counters of different types of speculative instructions.  */
>  static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control;
>  
> -/* Pointers to GLAT data.  See init_glat for more information.  */
> -regset *glat_start, *glat_end;
> -int glat_size;
> -
>  /* Array used in {unlink, restore}_bb_notes.  */
>  static rtx *bb_header = 0;
>  
> @@ -568,15 +564,15 @@ static void extend_ready (int);
>  static void extend_global (rtx);
>  static void extend_all (rtx);
>  static void init_h_i_d (rtx);
> -static void generate_recovery_code (struct df *, rtx);
> +static void generate_recovery_code (rtx);
>  static void process_insn_depend_be_in_spec (rtx, rtx, ds_t);
> -static void begin_speculative_block (struct df *, rtx);
> -static void add_to_speculative_block (struct df *, rtx);
> +static void begin_speculative_block (rtx);
> +static void add_to_speculative_block (rtx);
>  static dw_t dep_weak (ds_t);
>  static edge find_fallthru_edge (basic_block);
> -static void init_before_recovery (struct df *);
> -static basic_block create_recovery_block (struct df *);
> -static void create_check_block_twin (struct df *, rtx, bool);
> +static void init_before_recovery (void);
> +static basic_block create_recovery_block (void);
> +static void create_check_block_twin (rtx, bool);
>  static void fix_recovery_deps (basic_block);
>  static void associate_line_notes_with_blocks (basic_block);
>  static void change_pattern (rtx, rtx);
> @@ -587,9 +583,6 @@ static void extend_bb (basic_block);
>  static void fix_jump_move (rtx);
>  static void move_block_after_check (rtx);
>  static void move_succs (VEC(edge,gc) **, basic_block);
> -static void init_glat (struct df *);
> -static void init_glat1 (struct df *, basic_block);
> -static void free_glat (void);
>  static void sched_remove_insn (rtx);
>  static void clear_priorities (rtx);
>  static void add_jump_dependencies (rtx, rtx);
> @@ -2261,7 +2254,7 @@ choose_ready (struct ready_list *ready)
>     region.  */
>  
>  void
> -schedule_block (struct df *df, basic_block *target_bb, int rgn_n_insns1)
> +schedule_block (basic_block *target_bb, int rgn_n_insns1)
>  {
>    struct ready_list ready;
>    int i, first_cycle_insn_p;
> @@ -2530,7 +2523,7 @@ schedule_block (struct df *df, basic_blo
>  	  /* DECISION is made.  */	
>    
>            if (TODO_SPEC (insn) & SPECULATIVE)
> -            generate_recovery_code (df, insn);
> +            generate_recovery_code (insn);
>  
>  	  if (control_flow_insn_p (last_scheduled_insn)	     
>  	      /* This is used to to switch basic blocks by request
> @@ -2556,7 +2549,7 @@ schedule_block (struct df *df, basic_blo
>  	    }
>   
>  	  /* Update counters, etc in the scheduler's front end.  */
> -	  (*current_sched_info->begin_schedule_ready) (df, insn,
> +	  (*current_sched_info->begin_schedule_ready) (insn,
>  						       last_scheduled_insn);
>   
>  	  move_insn (insn);
> @@ -2757,7 +2750,7 @@ static int luid;
>  /* Initialize some global state for the scheduler.  */
>  
>  void
> -sched_init (struct df *df)
> +sched_init (void)
>  {
>    basic_block b;
>    rtx insn;
> @@ -2862,14 +2855,8 @@ sched_init (struct df *df)
>  
>    line_note_head = 0;
>    old_last_basic_block = 0;
> -  glat_start = NULL;  
> -  glat_end = NULL;
> -  glat_size = 0;
>    extend_bb (0);
>  
> -  if (current_sched_info->flags & USE_GLAT)
> -    init_glat (df);
> -
>    /* Compute INSN_REG_WEIGHT for all blocks.  We must do this before
>       removing death notes.  */
>    FOR_EACH_BB_REVERSE (b)
> @@ -2898,7 +2885,7 @@ sched_finish (void)
>    free_dependency_caches ();
>    end_alias_analysis ();
>    free (line_note_head);
> -  free_glat ();
> +
>    if (targetm.sched.md_finish_global)
>      targetm.sched.md_finish_global (sched_dump, sched_verbose);
>    
> @@ -3348,16 +3335,16 @@ init_h_i_d (rtx insn)
>  
>  /* Generates recovery code for INSN.  */
>  static void
> -generate_recovery_code (struct df *df, rtx insn)
> +generate_recovery_code (rtx insn)
>  {
>    if (TODO_SPEC (insn) & BEGIN_SPEC)
> -    begin_speculative_block (df, insn);
> +    begin_speculative_block (insn);
>    
>    /* Here we have insn with no dependencies to
>       instructions other then CHECK_SPEC ones.  */
>    
>    if (TODO_SPEC (insn) & BE_IN_SPEC)
> -    add_to_speculative_block (df, insn);
> +    add_to_speculative_block (insn);
>  }
>  
>  /* Helper function.
> @@ -3406,21 +3393,21 @@ process_insn_depend_be_in_spec (rtx link
>  
>  /* Generates recovery code for BEGIN speculative INSN.  */
>  static void
> -begin_speculative_block (struct df *df, rtx insn)
> +begin_speculative_block (rtx insn)
>  {
>    if (TODO_SPEC (insn) & BEGIN_DATA)
>      nr_begin_data++;      
>    if (TODO_SPEC (insn) & BEGIN_CONTROL)
>      nr_begin_control++;
>  
> -  create_check_block_twin (df, insn, false);
> +  create_check_block_twin (insn, false);
>  
>    TODO_SPEC (insn) &= ~BEGIN_SPEC;
>  }
>  
>  /* Generates recovery code for BE_IN speculative INSN.  */
>  static void
> -add_to_speculative_block (struct df *df, rtx insn)
> +add_to_speculative_block (rtx insn)
>  {
>    ds_t ts;
>    rtx link, twins = NULL;
> @@ -3447,7 +3434,7 @@ add_to_speculative_block (struct df *df,
>  
>        if (RECOVERY_BLOCK (check))
>  	{
> -	  create_check_block_twin (df, check, true);
> +	  create_check_block_twin (check, true);
>  	  link = LOG_LINKS (insn);
>  	}
>        else
> @@ -3621,7 +3608,7 @@ find_fallthru_edge (basic_block pred)
>  
>  /* Initialize BEFORE_RECOVERY variable.  */
>  static void
> -init_before_recovery (struct df *df)
> +init_before_recovery (void)
>  {
>    basic_block last;
>    edge e;
> @@ -3663,8 +3650,8 @@ init_before_recovery (struct df *df)
>            
>        emit_barrier_after (x);
>  
> -      add_block (df, empty, 0);
> -      add_block (df, single, 0);
> +      add_block (empty, 0);
> +      add_block (single, 0);
>  
>        before_recovery = single;
>  
> @@ -3679,7 +3666,7 @@ init_before_recovery (struct df *df)
>  
>  /* Returns new recovery block.  */
>  static basic_block
> -create_recovery_block (struct df *df)
> +create_recovery_block (void)
>  {
>    rtx label;
>    basic_block rec;
> @@ -3687,7 +3674,7 @@ create_recovery_block (struct df *df)
>    added_recovery_block_p = true;
>  
>    if (!before_recovery)
> -    init_before_recovery (df);
> +    init_before_recovery ();
>   
>    label = gen_label_rtx ();
>    gcc_assert (BARRIER_P (NEXT_INSN (BB_END (before_recovery))));
> @@ -3711,7 +3698,7 @@ create_recovery_block (struct df *df)
>  /* This function creates recovery code for INSN.  If MUTATE_P is nonzero,
>     INSN is a simple check, that should be converted to branchy one.  */
>  static void
> -create_check_block_twin (struct df *df, rtx insn, bool mutate_p)
> +create_check_block_twin (rtx insn, bool mutate_p)
>  {
>    basic_block rec;
>    rtx label, check, twin, link;
> @@ -3725,7 +3712,7 @@ create_check_block_twin (struct df *df, 
>    /* Create recovery block.  */
>    if (mutate_p || targetm.sched.needs_block_p (insn))
>      {
> -      rec = create_recovery_block (df);
> +      rec = create_recovery_block ();
>        label = BB_HEAD (rec);
>      }
>    else
> @@ -3819,7 +3806,7 @@ create_check_block_twin (struct df *df, 
>        
>        e = make_edge (first_bb, rec, edge_flags);
>  
> -      add_block (df, second_bb, first_bb);
> +      add_block (second_bb, first_bb);
>        
>        gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (second_bb)));
>        label = block_label (second_bb);
> @@ -3850,7 +3837,7 @@ create_check_block_twin (struct df *df, 
>        
>        make_single_succ_edge (rec, second_bb, edge_flags);  
>        
> -      add_block (df, rec, EXIT_BLOCK_PTR);
> +      add_block (rec, EXIT_BLOCK_PTR);
>      }
>  
>    /* Move backward dependences from INSN to CHECK and 
> @@ -4245,19 +4232,6 @@ extend_bb (basic_block bb)
>    
>    old_last_basic_block = last_basic_block;
>  
> -  if (current_sched_info->flags & USE_GLAT)
> -    {
> -      glat_start = xrealloc (glat_start,
> -                             last_basic_block * sizeof (*glat_start));
> -      glat_end = xrealloc (glat_end, last_basic_block * sizeof (*glat_end));
> -
> -      memset (glat_start + glat_size, 0, 
> -	      (last_basic_block - glat_size) * sizeof (*glat_start));
> -      memset (glat_end + glat_size, 0, 
> -	      (last_basic_block - glat_size) * sizeof (*glat_end));
> -      glat_size = last_basic_block;
> -    }
> -
>    /* The following is done to keep current_sched_info->next_tail non null.  */
>  
>    insn = BB_END (EXIT_BLOCK_PTR->prev_bb);
> @@ -4277,13 +4251,11 @@ extend_bb (basic_block bb)
>     If EBB is EXIT_BLOCK_PTR, then BB is recovery block.
>     If EBB is NULL, then BB should be a new region.  */
>  void
> -add_block (struct df *df ATTRIBUTE_UNUSED, basic_block bb, basic_block ebb)
> +add_block (basic_block bb, basic_block ebb)
>  {
> -  gcc_assert (current_sched_info->flags & DETACH_LIFE_INFO);
> -  extend_bb (bb); 
> +  gcc_assert (current_sched_info->flags & NEW_BBS);
>  
> -  glat_start[bb->index] = 0;
> -  glat_end[bb->index] = 0;
> +  extend_bb (bb);
>  
>    if (current_sched_info->add_block)
>      /* This changes only data structures of the front-end.  */
> @@ -4373,54 +4345,6 @@ move_succs (VEC(edge,gc) **succsp, basic
>    *succsp = 0;
>  }
>  
> -/* Initialize GLAT (global_live_at_{start, end}) structures.  GLAT
> -   structures are used to substitute the df live regsets during
> -   scheduling.  */
> -static void
> -init_glat (struct df *df)
> -{
> -  basic_block bb;
> -  FOR_ALL_BB (bb)
> -    init_glat1 (df, bb);
> -}
> -
> -/* Helper function for init_glat.  */
> -static void
> -init_glat1 (struct df *df, basic_block bb)
> -{
> -  glat_start[bb->index] =  DF_LIVE_IN (df, bb);
> -  glat_end[bb->index] =  DF_LIVE_OUT (df, bb);
> -  
> -  if (current_sched_info->flags & DETACH_LIFE_INFO)
> -    {
> -      DF_LIVE_IN (df, bb) = NULL;
> -      DF_LIVE_OUT (df, bb) = NULL;
> -    }
> -}
> -
> -/* Free GLAT information.  */
> -static void
> -free_glat (void)
> -{
> -  if (glat_start)
> -    {
> -      if (current_sched_info->flags & DETACH_LIFE_INFO)
> -	{
> -	  basic_block bb;
> -	  
> -	  FOR_ALL_BB (bb)
> -	    {
> -	      if (glat_start[bb->index])
> -		FREE_REG_SET (glat_start[bb->index]);
> -	      if (glat_end[bb->index])
> -		FREE_REG_SET (glat_end[bb->index]);
> -	    }
> -	}
> -      free (glat_start);
> -      free (glat_end);
> -    }
> -}
> -
>  /* Remove INSN from the instruction stream.
>     INSN should have any dependencies.  */
>  static void
> @@ -4652,13 +4576,9 @@ check_sched_flags (void)
>      gcc_assert (!(f & DO_SPECULATION));
>    if (f & DO_SPECULATION)
>      gcc_assert (!flag_sched_stalled_insns
> -		&& (f & DETACH_LIFE_INFO)
>  		&& spec_info
>  		&& spec_info->mask);
> -  if (f & DETACH_LIFE_INFO)
> -    gcc_assert (f & USE_GLAT);
>  }
> -
>  #endif /* ENABLE_CHECKING */
>  
>  #endif /* INSN_SCHEDULING */
> --- modulo-sched.c	(/gcc-local/dataflow/gcc)	(revision 21091)
> +++ modulo-sched.c	(/gcc-local/dataflow-fix/gcc)	(revision 21091)
> @@ -259,9 +259,6 @@ static struct sched_info sms_sched_info 
>    0, 0, 0,
>  
>    NULL, NULL, NULL, NULL, NULL,
> -#ifdef ENABLE_CHECKING
> -  NULL,
> -#endif
>    0
>  };
>  
> @@ -939,7 +936,7 @@ sms_schedule (void)
>    df_ri_add_problem (df, DF_RI_LIFE);
>    df_chain_add_problem (df, DF_DU_CHAIN | DF_UD_CHAIN);
>    df_analyze (df);
> -  sched_init (df);
> +  sched_init ();
>  
>    /* Allocate memory to hold the DDG array one entry for each loop.
>       We use loop->num as index into this array.  */
> --- sched-int.h	(/gcc-local/dataflow/gcc)	(revision 21091)
> +++ sched-int.h	(/gcc-local/dataflow-fix/gcc)	(revision 21091)
> @@ -199,7 +199,7 @@ struct sched_info
>    /* Called to notify frontend that instruction is being scheduled.
>       The first parameter - instruction to scheduled, the second parameter -
>       last scheduled instruction.  */
> -  void (*begin_schedule_ready) (struct df *, rtx, rtx);
> +  void (*begin_schedule_ready) (rtx, rtx);
>  
>    /* Called to notify frontend, that new basic block is being added.
>       The first parameter - new basic block.
> @@ -222,16 +222,6 @@ struct sched_info
>       parameter.  */
>    void (*fix_recovery_cfg) (int, int, int);
>  
> -#ifdef ENABLE_CHECKING
> -  /* If the second parameter is zero, return nonzero, if block is head of the
> -     region.
> -     If the second parameter is nonzero, return nonzero, if block is leaf of
> -     the region.
> -     global_live_at_start should not change in region heads and
> -     global_live_at_end should not change in region leafs due to scheduling.  */
> -  int (*region_head_or_leaf_p) (basic_block, int);
> -#endif
> -
>    /* ??? FIXME: should use straight bitfields inside sched_info instead of
>       this flag field.  */
>    unsigned int flags;
> @@ -337,10 +327,6 @@ struct haifa_insn_data
>  };
>  
>  extern struct haifa_insn_data *h_i_d;
> -/* Used only if (current_sched_info->flags & USE_GLAT) != 0.
> -   These regsets store global_live_at_{start, end} information
> -   for each basic block.  */
> -extern regset *glat_start, *glat_end;
>  
>  /* Accessor macros for h_i_d.  There are more in haifa-sched.c and
>     sched-rgn.c.  */
> @@ -490,13 +476,8 @@ enum SCHED_FLAGS {
>    DO_SPECULATION = USE_DEPS_LIST << 1,
>    SCHED_RGN = DO_SPECULATION << 1,
>    SCHED_EBB = SCHED_RGN << 1,
> -  /* Detach register live information from basic block headers.
> -     This is necessary to invoke functions, that change CFG (e.g. split_edge).
> -     Requires USE_GLAT.  */
> -  DETACH_LIFE_INFO = SCHED_EBB << 1,
> -  /* Save register live information from basic block headers to
> -     glat_{start, end} arrays.  */
> -  USE_GLAT = DETACH_LIFE_INFO << 1
> +  /* Scheduler can possible create new basic blocks.  Used for assertions.  */
> +  NEW_BBS = SCHED_EBB << 1
>  };
>  
>  enum SPEC_SCHED_FLAGS {
> @@ -629,13 +610,13 @@ extern void rm_other_notes (rtx, rtx);
>  extern int insn_cost (rtx, rtx, rtx);
>  extern int set_priorities (rtx, rtx);
>  
> -extern void schedule_block (struct df *, basic_block *, int);
> -extern void sched_init (struct df *);
> +extern void schedule_block (basic_block *, int);
> +extern void sched_init (void);
>  extern void sched_finish (void);
>  
>  extern int try_ready (rtx);
>  extern void * xrecalloc (void *, size_t, size_t, size_t);
>  extern void unlink_bb_notes (basic_block, basic_block);
> -extern void add_block (struct df *, basic_block, basic_block);
> +extern void add_block (basic_block, basic_block);
>  
>  #endif /* GCC_SCHED_INT_H */
> --- sched-rgn.c	(/gcc-local/dataflow/gcc)	(revision 21091)
> +++ sched-rgn.c	(/gcc-local/dataflow-fix/gcc)	(revision 21091)
> @@ -280,7 +280,7 @@ static void compute_block_backward_depen
>  void debug_dependencies (void);
>  
>  static void init_regions (void);
> -static void schedule_region (struct df *, int);
> +static void schedule_region (int);
>  static rtx concat_INSN_LIST (rtx, rtx);
>  static void concat_insn_mem_list (rtx, rtx, rtx *, rtx *);
>  static void propagate_deps (int, struct deps *);
> @@ -1490,6 +1490,9 @@ debug_candidates (int trg)
>  
>  /* Functions for speculative scheduling.  */
>  
> +static struct df *df;
> +static bitmap_head not_in_df;
> +
>  /* Return 0 if x is a set of a register alive in the beginning of one
>     of the split-blocks of src, otherwise return 1.  */
>  
> @@ -1541,18 +1544,15 @@ check_live_1 (int src, rtx x)
>  	      for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++)
>  		{
>  		  basic_block b = candidate_table[src].split_bbs.first_member[i];
> +		  int t = bitmap_bit_p (&not_in_df, b->index);
>  
>  		  /* We can have split blocks, that were recently generated.
>  		     such blocks are always outside current region.  */
> -		  gcc_assert (glat_start[b->index]
> -			      || CONTAINING_RGN (b->index)
> -			      != CONTAINING_RGN (BB_TO_BLOCK (src)));
> -		  if (!glat_start[b->index]
> -		      || REGNO_REG_SET_P (glat_start[b->index],
> -					  regno + j))
> -		    {
> -		      return 0;
> -		    }
> +		  gcc_assert (!t || (CONTAINING_RGN (b->index)
> +				     != CONTAINING_RGN (BB_TO_BLOCK (src))));
> +
> +		  if (t || REGNO_REG_SET_P (DF_LIVE_IN (df, b), regno + j))
> +		    return 0;
>  		}
>  	    }
>  	}
> @@ -1562,15 +1562,13 @@ check_live_1 (int src, rtx x)
>  	  for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++)
>  	    {
>  	      basic_block b = candidate_table[src].split_bbs.first_member[i];
> +	      int t = bitmap_bit_p (&not_in_df, b->index);
>  
> -	      gcc_assert (glat_start[b->index]
> -			  || CONTAINING_RGN (b->index)
> -			  != CONTAINING_RGN (BB_TO_BLOCK (src)));
> -	      if (!glat_start[b->index]
> -		  || REGNO_REG_SET_P (glat_start[b->index], regno))
> -		{
> -		  return 0;
> -		}
> +	      gcc_assert (!t || (CONTAINING_RGN (b->index)
> +				 != CONTAINING_RGN (BB_TO_BLOCK (src))));
> +
> +	      if (t || REGNO_REG_SET_P (DF_LIVE_IN (df, b), regno))
> +		return 0;
>  	    }
>  	}
>      }
> @@ -1626,7 +1624,7 @@ update_live_1 (int src, rtx x)
>  		{
>  		  basic_block b = candidate_table[src].update_bbs.first_member[i];
>  
> -		  SET_REGNO_REG_SET (glat_start[b->index], regno + j);
> +		  SET_REGNO_REG_SET (DF_LIVE_IN (df, b), regno + j);
>  		}
>  	    }
>  	}
> @@ -1636,7 +1634,7 @@ update_live_1 (int src, rtx x)
>  	    {
>  	      basic_block b = candidate_table[src].update_bbs.first_member[i];
>  
> -	      SET_REGNO_REG_SET (glat_start[b->index], regno);
> +	      SET_REGNO_REG_SET (DF_LIVE_IN (df, b), regno);
>  	    }
>  	}
>      }
> @@ -1916,7 +1914,7 @@ static int sched_n_insns;
>  /* Implementations of the sched_info functions for region scheduling.  */
>  static void init_ready_list (void);
>  static int can_schedule_ready_p (rtx);
> -static void begin_schedule_ready (struct df *, rtx, rtx);
> +static void begin_schedule_ready (rtx, rtx);
>  static ds_t new_ready (rtx, ds_t);
>  static int schedule_more_p (void);
>  static const char *rgn_print_insn (rtx, int);
> @@ -1930,9 +1928,6 @@ static void extend_regions (void);
>  static void add_block1 (basic_block, basic_block);
>  static void fix_recovery_cfg (int, int, int);
>  static basic_block advance_target_bb (basic_block, rtx);
> -#ifdef ENABLE_CHECKING
> -static int region_head_or_leaf_p (basic_block, int);
> -#endif
>  
>  /* Return nonzero if there are more insns that should be scheduled.  */
>  
> @@ -2031,8 +2026,7 @@ can_schedule_ready_p (rtx insn)
>     can_schedule_ready_p () differs from the one passed to
>     begin_schedule_ready ().  */
>  static void
> -begin_schedule_ready (struct df *df ATTRIBUTE_UNUSED, 
> -		      rtx insn, rtx last ATTRIBUTE_UNUSED)
> +begin_schedule_ready (rtx insn, rtx last ATTRIBUTE_UNUSED)
>  {
>    /* An interblock motion?  */
>    if (INSN_BB (insn) != target_bb)
> @@ -2199,13 +2193,7 @@ static struct sched_info region_sched_in
>    add_block1,
>    advance_target_bb,
>    fix_recovery_cfg,
> -#ifdef ENABLE_CHECKING
> -  region_head_or_leaf_p,
> -#endif
> -  SCHED_RGN | USE_GLAT
> -#ifdef ENABLE_CHECKING
> -  | DETACH_LIFE_INFO
> -#endif
> +  SCHED_RGN
>  };
>  
>  /* Determine if PAT sets a CLASS_LIKELY_SPILLED_P register.  */
> @@ -2629,7 +2617,7 @@ sched_is_disabled_for_current_region_p (
>     scheduled after its flow predecessors.  */
>  
>  static void
> -schedule_region (struct df *df, int rgn)
> +schedule_region (int rgn)
>  {
>    basic_block block;
>    edge_iterator ei;
> @@ -2812,7 +2800,7 @@ schedule_region (struct df *df, int rgn)
>        current_sched_info->queue_must_finish_empty = current_nr_blocks == 1;
>  
>        curr_bb = first_bb;
> -      schedule_block (df, &curr_bb, rgn_n_insns);
> +      schedule_block (&curr_bb, rgn_n_insns);
>        gcc_assert (EBB_FIRST_BB (bb) == first_bb);
>        sched_rgn_n_insns += sched_n_insns;
>  
> @@ -2897,7 +2885,6 @@ void
>  schedule_insns (void)
>  {
>    int rgn;
> -  struct df *df;
>  
>    /* Taking care of this degenerate case makes the rest of
>       this code simpler.  */
> @@ -2916,7 +2903,11 @@ schedule_insns (void)
>    df_live_add_problem (df, 0);
>    df_ri_add_problem (df, DF_RI_LIFE);
>    df_analyze (df);
> -  sched_init (df);
> +
> +  sched_init ();
> +
> +  bitmap_initialize (&not_in_df, 0);
> +  bitmap_clear (&not_in_df);
>  
>    min_spec_prob = ((PARAM_VALUE (PARAM_MIN_SPEC_PROB) * REG_BR_PROB_BASE)
>  		    / 100);
> @@ -2929,7 +2920,7 @@ schedule_insns (void)
>    
>    /* Schedule every region in the subroutine.  */
>    for (rgn = 0; rgn < nr_regions; rgn++)
> -    schedule_region (df, rgn);
> +    schedule_region (rgn);
>    
>    free(ebb_head);
>    /* Reposition the prologue and epilogue notes in case we moved the
> @@ -2960,6 +2951,8 @@ schedule_insns (void)
>    free (block_to_bb);
>    free (containing_rgn);
>  
> +  bitmap_clear (&not_in_df);
> +
>    sched_finish ();
>  }
>  
> @@ -2997,6 +2990,8 @@ add_block1 (basic_block bb, basic_block 
>  {
>    extend_regions ();
>  
> +  bitmap_set_bit (&not_in_df, bb->index);
> +
>    if (after == 0 || after == EXIT_BLOCK_PTR)
>      {
>        int i;
> @@ -3109,35 +3104,6 @@ advance_target_bb (basic_block bb, rtx i
>    return bb->next_bb;
>  }
>  
> -#ifdef ENABLE_CHECKING
> -/* Return non zero, if BB is head or leaf (depending of LEAF_P) block in
> -   current region.  For more information please refer to
> -   sched-int.h: struct sched_info: region_head_or_leaf_p.  */
> -static int
> -region_head_or_leaf_p (basic_block bb, int leaf_p)
> -{
> -  if (!leaf_p)    
> -    return bb->index == rgn_bb_table[RGN_BLOCKS (CONTAINING_RGN (bb->index))];
> -  else
> -    {
> -      int i;
> -      edge e;
> -      edge_iterator ei;
> -      
> -      i = CONTAINING_RGN (bb->index);
> -
> -      FOR_EACH_EDGE (e, ei, bb->succs)
> -	if (e->dest != EXIT_BLOCK_PTR
> -            && CONTAINING_RGN (e->dest->index) == i
> -	    /* except self-loop.  */
> -	    && e->dest != bb)
> -	  return 0;
> -      
> -      return 1;
> -    }
> -}
> -#endif /* ENABLE_CHECKING  */
> -
>  #endif
>  
>  static bool
> --- config/ia64/ia64.c	(/gcc-local/dataflow/gcc)	(revision 21091)
> +++ config/ia64/ia64.c	(/gcc-local/dataflow-fix/gcc)	(revision 21091)
> @@ -6733,11 +6733,12 @@ ia64_set_sched_flags (spec_info_t spec_i
>  	    mask |= BE_IN_CONTROL;
>  	}
>  
> -      gcc_assert (*flags & USE_GLAT);
> -
>        if (mask)
>  	{
> -	  *flags |= USE_DEPS_LIST | DETACH_LIFE_INFO | DO_SPECULATION;
> +	  *flags |= USE_DEPS_LIST | DO_SPECULATION;
> +
> +	  if (mask & BE_IN_SPEC)
> +	    *flags |= NEW_BBS;
>  	  
>  	  spec_info->mask = mask;
>  	  spec_info->flags = 0;
>   


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]