This is the mail archive of the gcc-patches@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[sel-sched] Fixes from the first scheduler testing and the x86-64 testing


Hello,

The two patches fix various corner cases found from the testing on x86-64, for which I want to thank the Compile Farm project as extremely helpful, and from the first scheduler testing. The first patch is mainly fixes for correct work on platforms with STACK_REGS and without DFA scheduling (i.e., max_issue is not called). Also, I have removed the pass_sel_sched variable and related stuff, because it seems cleaner to make the decision to run the selective scheduler from rest_of_handle_sched*. The second patch fixes REG_N_CALLS_CROSSED behavior and disables control speculation before reload, kills some dead code, disables substitution through multi-reg references (found on ppc), and fixes the cases in which insn was incorrectly moved instead of being reemitted.

Tested on ia64, committed to sel-sched branch.
Andrey
Index: gcc/ChangeLog.sel-sched
===================================================================
*** gcc/ChangeLog.sel-sched	(revision 135815)
--- gcc/ChangeLog.sel-sched	(revision 135816)
***************
*** 1,3 ****
--- 1,38 ----
+ 2008-05-23  Andrey Belevantsev  <abel@ispras.ru>
+ 
+ 	* tree-pass.h (pass_sel_sched): Remove.
+ 	* sel-sched.c (maybe_skip_selective_scheduling): New.  Export it.
+ 	(gate_handle_sel_sched, handle_sel_sched, pass_sel_sched): Remove.
+ 	(debug_state): New.
+ 	(advance_one_cycle, advance_state_on_fence): Use it.
+ 	(extract_new_fences_from): Fix formatting.
+ 	(can_substitute_through_p): Disallow substitutions through CONST_INTs.
+ 	(estimate_insn_cost): New, factored from ...
+ 	(vinsn_dfa_cost): ... here.  Renamed to get_expr_cost.
+ 	(choose_best_insn): When max_issue is not used, try to select
+ 	the first available insn from the ready list.  Correctly set can_issue.
+ 	(stall_for_cycles): When stalling for more than one cycle, always
+ 	set FENCE_AFTER_STALL_P.
+ 	(track_scheduled_insns_and_blocks): Always add found insns to
+         current_originators.
+ 	(reset_sched_cycles_in_current_ebb): New variables real_insn,
+         after_stall.  Use estimate_insn_cost.  Handle the corner case when
+         state_transition seems to be lying about the cost.
+ 	* sel-sched-ir.c (state_create): Always start from advanced state.
+ 	(deps_init_id_note_reg_set): When stack registers are used,
+         make the instruction of type use to avoid renaming.
+ 	(maybe_downgrade_id_to_use): Likewise.  Downgrade only SETs.
+         (setup_id_reg_sets): Treat writes/reads to/from stack registers
+         as corresponding action with the first stack reg.
+ 	(has_dependence_p): Also punt for MOVE_BARRIER.
+ 	* sched-rgn.c (rest_of_handle_sched, rest_of_handle_sched2): Run
+         selective scheduling if requested.
+ 	* passes.c (init_optimization_passes): Remove mentions of pass_sel_sched.
+ 	* config/i386/i386.c (override_options): Turn on selective scheduling
+ 	with -O2 for testing purposes.
+ 	* config/ia64/ia64.c (ia64_reorg): Remove debug handling of selective
+         scheduling.
+ 
  2008-05-13  Andrey Belevantsev  <abel@ispras.ru>
  
  	* sel-sched-ir.c (vinsn_init): Call init_id_from_df when possible.
Index: gcc/tree-pass.h
===================================================================
*** gcc/tree-pass.h	(revision 135815)
--- gcc/tree-pass.h	(revision 135816)
*************** extern struct rtl_opt_pass pass_mode_swi
*** 428,434 ****
  extern struct rtl_opt_pass pass_see;
  extern struct rtl_opt_pass pass_sms;
  extern struct rtl_opt_pass pass_sched;
- extern struct rtl_opt_pass pass_sel_sched;
  extern struct rtl_opt_pass pass_local_alloc;
  extern struct rtl_opt_pass pass_global_alloc;
  extern struct rtl_opt_pass pass_postreload;
--- 428,433 ----
Index: gcc/sel-sched.c
===================================================================
*** gcc/sel-sched.c	(revision 135815)
--- gcc/sel-sched.c	(revision 135816)
*************** static int code_motion_path_driver (insn
*** 553,558 ****
--- 553,560 ----
  static void sel_sched_region_1 (void);
  static void sel_sched_region_2 (sel_sched_region_2_data_t);
  static av_set_t compute_av_set_inside_bb (insn_t, ilist_t, int, bool);
+ 
+ static void debug_state (state_t);
  
  
  /* Functions that work with fences.  */
*************** advance_one_cycle (fence_t fence)
*** 582,588 ****
        i++;
      }
    if (sched_verbose >= 2)
!     sel_print ("Finished a cycle.  Current cycle = %d\n", FENCE_CYCLE (fence));
  }
  
  /* Returns true when SUCC in a fallthru bb of INSN, possibly
--- 584,593 ----
        i++;
      }
    if (sched_verbose >= 2)
!     {
!       sel_print ("Finished a cycle.  Current cycle = %d\n", FENCE_CYCLE (fence));
!       debug_state (FENCE_STATE (fence));
!     }
  }
  
  /* Returns true when SUCC in a fallthru bb of INSN, possibly
*************** extract_new_fences_from (flist_t old_fen
*** 665,672 ****
        if (0 < seqno && seqno <= orig_max_seqno
            && (pipelining_p || INSN_SCHED_TIMES (succ) <= 0))
          {
!           bool b = in_same_ebb_p (insn, succ)
!             || in_fallthru_bb_p (insn, succ);
  
            if (sched_verbose >= 1)
              sel_print ("Fence %d continues as %d[%d] (state %s)\n", 
--- 670,677 ----
        if (0 < seqno && seqno <= orig_max_seqno
            && (pipelining_p || INSN_SCHED_TIMES (succ) <= 0))
          {
!           bool b = (in_same_ebb_p (insn, succ)
!                     || in_fallthru_bb_p (insn, succ)); 
  
            if (sched_verbose >= 1)
              sel_print ("Fence %d continues as %d[%d] (state %s)\n", 
*************** can_substitute_through_p (insn_t insn, d
*** 704,712 ****
  
    /* Now we just need to make sure the INSN_RHS consists of only one 
       simple REG rtx.  */
!   if ((REG_P (INSN_LHS (insn)) 
!        && (REG_P (INSN_RHS (insn))
!            || GET_CODE (INSN_RHS (insn)) == CONST_INT)))
      return true;             
    return false;
  }
--- 709,716 ----
  
    /* Now we just need to make sure the INSN_RHS consists of only one 
       simple REG rtx.  */
!   if (REG_P (INSN_LHS (insn)) 
!       && REG_P (INSN_RHS (insn)))
      return true;             
    return false;
  }
*************** invoke_aftermath_hooks (fence_t fence, r
*** 4058,4063 ****
--- 4062,4114 ----
    return issue_more;
  }
  
+ /* Estimate the cost of issuing INSN on DFA state STATE.  */
+ static int
+ estimate_insn_cost (rtx insn, state_t state)
+ {
+   static state_t temp = NULL;
+   int cost;
+ 
+   if (!temp)
+     temp = xmalloc (dfa_state_size);
+ 
+   memcpy (temp, state, dfa_state_size);
+   cost = state_transition (temp, insn);
+ 
+   if (cost < 0)
+     return 0;
+   else if (cost == 0)
+     return 1;
+   return cost;
+ }
+ 
+ /* Return the cost of issuing EXPR on the FENCE as estimated by DFA.  
+    This function properly handles ASMs, USEs etc.  */
+ static int
+ get_expr_cost (expr_t expr, fence_t fence)
+ {
+   rtx insn = EXPR_INSN_RTX (expr);
+ 
+   if (recog_memoized (insn) < 0)
+     {
+       if (!FENCE_STARTS_CYCLE_P (fence) 
+           /* FIXME: Is this condition necessary?  */
+           && VINSN_UNIQUE_P (EXPR_VINSN (expr))
+ 	  && INSN_ASM_P (insn))
+ 	/* This is asm insn which is tryed to be issued on the
+ 	   cycle not first.  Issue it on the next cycle.  */
+ 	return 1;
+       else
+ 	/* A USE insn, or something else we don't need to
+ 	   understand.  We can't pass these directly to
+ 	   state_transition because it will trigger a
+ 	   fatal error for unrecognizable insns.  */
+ 	return 0;
+     }
+   else
+     return estimate_insn_cost (insn, FENCE_STATE (fence));
+ }
+ 
  /* Find the best insn for scheduling, either via max_issue or just take 
     the most prioritized available.  */
  static int
*************** choose_best_insn (fence_t fence, int pri
*** 4076,4096 ****
      }
    else
      {
!       /* We can't use max_issue; just return the first element.  */
!       expr_t expr = find_expr_for_ready (0, true);
  
!       if (vinsn_dfa_cost (EXPR_VINSN (expr), fence) >= 1)
!         {
!           can_issue = 0;
!           *index = -1;
!         }
!       else
!         {
!           can_issue = 1;
!           *index = 0;
!           if (sched_verbose >= 2)
!             sel_print ("using first insn from the ready list\n");
!         }
      }
  
    return can_issue;
--- 4127,4156 ----
      }
    else
      {
!       /* We can't use max_issue; just return the first available element.  */
!       int i;
  
!       for (i = 0; i < ready.n_ready; i++)
! 	{
! 	  expr_t expr = find_expr_for_ready (i, true);
! 
! 	  if (get_expr_cost (expr, fence) < 1)
! 	    {
! 	      can_issue = can_issue_more;
! 	      *index = i;
! 
! 	      if (sched_verbose >= 2)
! 		sel_print ("using %dth insn from the ready list\n", i + 1);
! 
! 	      break;
! 	    }
! 	}
! 
!       if (i == ready.n_ready)
! 	{
! 	  can_issue = 0;
! 	  *index = -1;
! 	}
      }
  
    return can_issue;
*************** move_exprs_to_boundary (bnd_t bnd, expr_
*** 4813,4818 ****
--- 4873,4892 ----
      }
  }
  
+ 
+ /* Debug a DFA state as an array of bytes.  */
+ static void
+ debug_state (state_t state)
+ {
+   unsigned char *p;
+   unsigned int i, size = dfa_state_size;
+ 
+   sel_print ("state (%u):", size);
+   for (i = 0, p = (unsigned char *) state; i < size; i++)
+     sel_print (" %d", p[i]);
+   sel_print ("\n");
+ }
+ 
  /* Advance state on FENCE with INSN.  Return true if INSN is 
     an ASM, and we should advance state once more.  */
  static bool
*************** advance_state_on_fence (fence_t fence, i
*** 4850,4855 ****
--- 4924,4931 ----
          advance_one_cycle (fence);
      }
  
+   if (sched_verbose >= 2)
+     debug_state (FENCE_STATE (fence));
    FENCE_STARTS_CYCLE_P (fence) = 0;
    return asm_p;
  }
*************** stall_for_cycles (fence_t fence, int n)
*** 5002,5008 ****
  {
    int could_more;
                
!   could_more = FENCE_ISSUED_INSNS (fence) < issue_rate;
    while (n--)
      advance_one_cycle (fence);
    if (could_more)
--- 5078,5084 ----
  {
    int could_more;
                
!   could_more = n > 1 || FENCE_ISSUED_INSNS (fence) < issue_rate;
    while (n--)
      advance_one_cycle (fence);
    if (could_more)
*************** move_op_after_merge_succs (cmpd_local_pa
*** 5312,5325 ****
  static void
  track_scheduled_insns_and_blocks (rtx insn)
  {
!   /* This can be previously created bookkeeping copy; do not count these.  */
    if (!bitmap_bit_p (current_copies, INSN_UID (insn)))
      {
!       bitmap_set_bit (current_originators, INSN_UID (insn));
! 
        if (INSN_SCHED_TIMES (insn) > 0)
- 	/* Note that original block needs to be rescheduled, as we pulled an
- 	   instruction out of it.  */
  	bitmap_set_bit (blocks_to_reschedule, BLOCK_FOR_INSN (insn)->index);
        else if (INSN_UID (insn) < first_emitted_uid)
  	num_insns_scheduled++;
--- 5388,5402 ----
  static void
  track_scheduled_insns_and_blocks (rtx insn)
  {
!   /* Even if this insn can be a copy that will be removed during current move_op,
!      we still need to count it as an originator.  */
!   bitmap_set_bit (current_originators, INSN_UID (insn));
! 
    if (!bitmap_bit_p (current_copies, INSN_UID (insn)))
      {
!       /* Note that original block needs to be rescheduled, as we pulled an
! 	 instruction out of it.  */
        if (INSN_SCHED_TIMES (insn) > 0)
  	bitmap_set_bit (blocks_to_reschedule, BLOCK_FOR_INSN (insn)->index);
        else if (INSN_UID (insn) < first_emitted_uid)
  	num_insns_scheduled++;
*************** reset_sched_cycles_in_current_ebb (void)
*** 6426,6450 ****
  
    state_reset (curr_state);
    advance_state (curr_state);
! 
!   for (insn = current_sched_info->head; insn != current_sched_info->next_tail;
         insn = NEXT_INSN (insn))
      {
        int cost, haifa_cost;
        int sort_p;
!       bool asm_p;
        int clock;
  
        if (!INSN_P (insn))
  	continue;
  
        asm_p = false;
        clock = INSN_SCHED_CYCLE (insn);
  
        cost = clock - last_clock;
  
        /* Initialize HAIFA_COST.  */
!       if (recog_memoized (insn) < 0)
  	{
  	  asm_p = INSN_ASM_P (insn);
  
--- 6503,6529 ----
  
    state_reset (curr_state);
    advance_state (curr_state);
!   
!   for (insn = current_sched_info->head;
!        insn != current_sched_info->next_tail;
         insn = NEXT_INSN (insn))
      {
        int cost, haifa_cost;
        int sort_p;
!       bool asm_p, real_insn, after_stall;
        int clock;
  
        if (!INSN_P (insn))
  	continue;
  
        asm_p = false;
+       real_insn = recog_memoized (insn) >= 0;
        clock = INSN_SCHED_CYCLE (insn);
  
        cost = clock - last_clock;
  
        /* Initialize HAIFA_COST.  */
!       if (! real_insn)
  	{
  	  asm_p = INSN_ASM_P (insn);
  
*************** reset_sched_cycles_in_current_ebb (void)
*** 6458,6495 ****
  	    haifa_cost = 0;
  	}
        else
! 	{
! 	  state_t tmp_state = alloca (dfa_state_size);
! 
! 	  memcpy (tmp_state, curr_state, dfa_state_size);
! 	  haifa_cost = state_transition (tmp_state, insn);
! 
! 	  /* ??? We can't assert anything about cost here yet,
! 	     because sometimes our scheduler gets out of sync with
! 	     Haifa.
! 	     This is to be fixed.  */
! 	  if (haifa_cost == 0)
! 	    haifa_cost = 1;
! 	  else if (haifa_cost < 0)
! 	    haifa_cost = 0;
! 	}
  
        /* Stall for whatever cycles we've stalled before.  */
        if (INSN_AFTER_STALL_P (insn) && cost > haifa_cost)
! 	haifa_cost = cost;
  
        if (haifa_cost > 0)
  	{
! 	  int i = haifa_cost;
  
! 	  while (i--)
  	    {
  	      advance_state (curr_state);
  	      if (sched_verbose >= 2)
! 		sel_print ("advance_state (state_transition)\n");
  	    }
  
! 	  haifa_clock += haifa_cost;
  	}
        else
  	gcc_assert (haifa_cost == 0);
--- 6537,6578 ----
  	    haifa_cost = 0;
  	}
        else
!         haifa_cost = estimate_insn_cost (insn, curr_state);
  
        /* Stall for whatever cycles we've stalled before.  */
+       after_stall = 0;
        if (INSN_AFTER_STALL_P (insn) && cost > haifa_cost)
!         {
!           haifa_cost = cost;
!           after_stall = 1;
!         }
  
        if (haifa_cost > 0)
  	{
! 	  int i = 0;
  
! 	  while (haifa_cost--)
  	    {
  	      advance_state (curr_state);
+               i++;
+ 
  	      if (sched_verbose >= 2)
!                 {
!                   sel_print ("advance_state (state_transition)\n");
!                   debug_state (curr_state);
!                 }
! 
!               /* The DFA may report that e.g. insn requires 2 cycles to be 
!                  issued, but on the next cycle it says that insn is ready 
!                  to go.  Check this here.  */
!               if (!after_stall
!                   && real_insn 
!                   && haifa_cost > 0
!                   && estimate_insn_cost (insn, curr_state) == 0)
!                 break;
  	    }
  
! 	  haifa_clock += i;
  	}
        else
  	gcc_assert (haifa_cost == 0);
*************** reset_sched_cycles_in_current_ebb (void)
*** 6505,6516 ****
  	    advance_state (curr_state);
  	    haifa_clock++;
  	    if (sched_verbose >= 2)
! 	      sel_print ("advance_state (dfa_new_cycle)\n");
! 	  }
  
!       if (recog_memoized (insn) >= 0)
  	{
  	  cost = state_transition (curr_state, insn);
  	  gcc_assert (cost < 0);
  	}
  
--- 6588,6606 ----
  	    advance_state (curr_state);
  	    haifa_clock++;
  	    if (sched_verbose >= 2)
!               {
!                 sel_print ("advance_state (dfa_new_cycle)\n");
!                 debug_state (curr_state);
!               }
!           }
  
!       if (real_insn)
  	{
  	  cost = state_transition (curr_state, insn);
+ 
+           if (sched_verbose >= 2)
+             debug_state (curr_state);
+ 
  	  gcc_assert (cost < 0);
  	}
  
*************** sel_global_finish (void)
*** 7144,7152 ****
    free_dominance_info (CDI_DOMINATORS);
  }
  
  /* The entry point.  */
  void
! selective_scheduling_run (void)
  {
    int rgn;
  
--- 7234,7276 ----
    free_dominance_info (CDI_DOMINATORS);
  }
  
+ /* Return true when we need to skip selective scheduling.  Used for debugging.  */
+ bool
+ maybe_skip_selective_scheduling (void)
+ {
+   int now;
+   int start;
+   int stop;
+   bool do_p;
+   static int sel1_run = 0;
+   static int sel2_run = 0;
+ 
+   if (!reload_completed)
+     {
+       now = ++sel1_run;
+       start = PARAM_VALUE (PARAM_SEL1_START);
+       stop = PARAM_VALUE (PARAM_SEL1_STOP);
+       do_p = (PARAM_VALUE (PARAM_SEL1_P) == 1);
+     }
+   else
+     {
+       now = ++sel2_run;
+       start = PARAM_VALUE (PARAM_SEL2_START);
+       stop = PARAM_VALUE (PARAM_SEL2_STOP);
+       do_p = (PARAM_VALUE (PARAM_SEL2_P) == 1);
+     }
+ 
+   if (do_p)
+     do_p = (start <= now) && (now <= stop);
+   else
+     do_p = (start > now) || (now > stop);
+   
+   return !do_p;
+ }
+ 
  /* The entry point.  */
  void
! run_selective_scheduling (void)
  {
    int rgn;
  
*************** selective_scheduling_run (void)
*** 7185,7256 ****
  }
  
  #endif
- 
- /* A gate function for selective scheduling.  */
- static bool
- gate_handle_sel_sched (void)
- {
- #ifdef INSN_SCHEDULING
-   return (reload_completed 
-           ? flag_selective_scheduling2 && flag_schedule_insns_after_reload
-           : flag_selective_scheduling && flag_schedule_insns);
- #else
-   return false;
- #endif
- }
- 
- static int sel1_run = 0;
- 
- /* Run instruction scheduler.  */
- static unsigned int
- handle_sel_sched (void)
- {
-   if (reload_completed)
-     split_all_insns ();
- #ifdef INSN_SCHEDULING
-   {
-     int now;
-     int start;
-     int stop;
-     bool do_p;
- 
-     now = ++sel1_run;
-     start = PARAM_VALUE (PARAM_SEL1_START);
-     stop = PARAM_VALUE (PARAM_SEL1_STOP);
-     do_p = (PARAM_VALUE (PARAM_SEL1_P) == 1);
- 
-     if (do_p)
-       do_p = (start <= now) && (now <= stop);
-     else
-       do_p = (start > now) || (now > stop);
- 
-     if ((flag_selective_scheduling || flag_selective_scheduling2) && do_p)
-       selective_scheduling_run ();
-     else
-       schedule_insns ();
-   }
- #endif
-   return 0;
- }
- 
- struct rtl_opt_pass pass_sel_sched =
- {
-   {
-     RTL_PASS,
-     "sel-sched",                          /* name */
-     gate_handle_sel_sched,                /* gate */
-     handle_sel_sched,                 	/* execute */
-     NULL,                                 /* sub */
-     NULL,                                 /* next */
-     0,                                    /* static_pass_number */
-     TV_SEL_SCHED,                         /* tv_id */
-     0,                                    /* properties_required */
-     0,                                    /* properties_provided */
-     0,                                    /* properties_destroyed */
-     0,                                    /* todo_flags_start */
-     TODO_df_finish | TODO_verify_rtl_sharing |
-     TODO_dump_func |
-     TODO_verify_flow |
-     TODO_ggc_collect                      /* todo_flags_finish */
-   }
- };
--- 7309,7311 ----
Index: gcc/sel-sched.h
===================================================================
*** gcc/sel-sched.h	(revision 135815)
--- gcc/sel-sched.h	(revision 135816)
*************** along with GCC; see the file COPYING3.  
*** 21,25 ****
  #define GCC_SEL_SCHED_H
  
  /* The main entry point.  */
! extern void selective_scheduling_run (void);
  #endif /* GCC_SEL_SCHED_H */
--- 21,27 ----
  #define GCC_SEL_SCHED_H
  
  /* The main entry point.  */
! extern void run_selective_scheduling (void);
! extern bool maybe_skip_selective_scheduling (void);
! 
  #endif /* GCC_SEL_SCHED_H */
Index: gcc/sel-sched-ir.c
===================================================================
*** gcc/sel-sched-ir.c	(revision 135815)
--- gcc/sel-sched-ir.c	(revision 135816)
*************** state_create (void)
*** 510,515 ****
--- 510,516 ----
    state_t state = state_alloc ();
  
    state_reset (state);
+   advance_state (state);
    return state;
  }
  
*************** deps_init_id_note_reg_set (int regno)
*** 2406,2411 ****
--- 2407,2419 ----
  
    if (IDATA_TYPE (deps_init_id_data.id) != PC)
      SET_REGNO_REG_SET (IDATA_REG_SETS (deps_init_id_data.id), regno);
+ 
+ #ifdef STACK_REGS
+   /* Make instructions that set stack registers to be ineligible for 
+      renaming to avoid issues with find_used_regs.  */
+   if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
+     deps_init_id_data.force_use_p = true;
+ #endif
  }
  
  /* Note a clobber of REGNO.  */
*************** maybe_downgrade_id_to_use (idata_t id, i
*** 2532,2539 ****
    rtx lhs = IDATA_LHS (id);
    rtx rhs = IDATA_RHS (id);
    
!   if (IDATA_TYPE (id) == SET 
!       && (!lhs || !lhs_and_rhs_separable_p (lhs, rhs)))
      {
        IDATA_TYPE (id) = USE;
        return;
--- 2540,2550 ----
    rtx lhs = IDATA_LHS (id);
    rtx rhs = IDATA_RHS (id);
    
!   /* We downgrade only SETs.  */
!   if (IDATA_TYPE (id) != SET)
!     return;
! 
!   if (!lhs || !lhs_and_rhs_separable_p (lhs, rhs))
      {
        IDATA_TYPE (id) = USE;
        return;
*************** maybe_downgrade_id_to_use (idata_t id, i
*** 2550,2555 ****
--- 2561,2576 ----
            must_be_use = true;
            break;
          }
+ 
+ #ifdef STACK_REGS
+       /* Make instructions that set stack registers to be ineligible for 
+ 	 renaming to avoid issues with find_used_regs.  */
+       if (IN_RANGE (DF_REF_REGNO (def), FIRST_STACK_REG, LAST_STACK_REG))
+ 	{
+ 	  must_be_use = true;
+ 	  break;
+ 	}
+ #endif
      }    
    
    if (must_be_use)
*************** setup_id_reg_sets (idata_t id, insn_t in
*** 2574,2581 ****
                                       | DF_REF_PRE_POST_MODIFY)))
          SET_REGNO_REG_SET (IDATA_REG_CLOBBERS (id), regno);
        else if (! DF_REF_FLAGS_IS_SET (def, DF_REF_MAY_CLOBBER))
!         SET_REGNO_REG_SET (IDATA_REG_SETS (id), regno);
!       
        /* Mark special refs that generate read/write def pair.  */
        if (DF_REF_FLAGS_IS_SET (def, DF_REF_CONDITIONAL)
            || regno == STACK_POINTER_REGNUM)
--- 2595,2610 ----
                                       | DF_REF_PRE_POST_MODIFY)))
          SET_REGNO_REG_SET (IDATA_REG_CLOBBERS (id), regno);
        else if (! DF_REF_FLAGS_IS_SET (def, DF_REF_MAY_CLOBBER))
!         {
! 	  SET_REGNO_REG_SET (IDATA_REG_SETS (id), regno);
! 
! #ifdef STACK_REGS
! 	  /* For stack registers, treat writes to them as writes 
! 	     to the first one to be consistent with sched-deps.c.  */
! 	  if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
! 	    SET_REGNO_REG_SET (IDATA_REG_SETS (id), FIRST_STACK_REG);
! #endif
! 	}
        /* Mark special refs that generate read/write def pair.  */
        if (DF_REF_FLAGS_IS_SET (def, DF_REF_CONDITIONAL)
            || regno == STACK_POINTER_REGNUM)
*************** setup_id_reg_sets (idata_t id, insn_t in
*** 2592,2598 ****
        if (bitmap_bit_p (tmp, regno))
          bitmap_clear_bit (tmp, regno);
        else if (! DF_REF_FLAGS_IS_SET (use, DF_REF_CALL_STACK_USAGE))
!         SET_REGNO_REG_SET (IDATA_REG_USES (id), regno);
      }
  
    return_regset_to_pool (tmp);
--- 2621,2636 ----
        if (bitmap_bit_p (tmp, regno))
          bitmap_clear_bit (tmp, regno);
        else if (! DF_REF_FLAGS_IS_SET (use, DF_REF_CALL_STACK_USAGE))
! 	{
! 	  SET_REGNO_REG_SET (IDATA_REG_USES (id), regno);
! 
! #ifdef STACK_REGS
! 	  /* For stack registers, treat reads from them as reads from 
! 	     the first one to be consistent with sched-deps.c.  */
! 	  if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG))
! 	    SET_REGNO_REG_SET (IDATA_REG_USES (id), FIRST_STACK_REG);
! #endif
! 	}
      }
  
    return_regset_to_pool (tmp);
*************** has_dependence_p (expr_t expr, insn_t pr
*** 3186,3192 ****
    /* When a barrier was found, set DEPS_IN_INSN bits.  */
    if (dc->last_reg_pending_barrier == TRUE_BARRIER)
      has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_TRUE;
!   
    /* Do not allow stores to memory to move through checks.  Currently
       we don't move this to sched-deps.c as the check doesn't have
       obvious places to which this dependence can be attached.  
--- 3224,3232 ----
    /* When a barrier was found, set DEPS_IN_INSN bits.  */
    if (dc->last_reg_pending_barrier == TRUE_BARRIER)
      has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_TRUE;
!   else if (dc->last_reg_pending_barrier == MOVE_BARRIER)
!     has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_ANTI;
! 
    /* Do not allow stores to memory to move through checks.  Currently
       we don't move this to sched-deps.c as the check doesn't have
       obvious places to which this dependence can be attached.  
*************** sel_finish_new_insns (void)
*** 4007,4051 ****
  
    VEC_free (rtx, heap, new_insns);
  }
- 
- /* Return the cost of VINSN as estimated by DFA.  This function properly
-    handles ASMs, USEs etc.  */
- int
- vinsn_dfa_cost (vinsn_t vinsn, fence_t fence)
- {
-   rtx insn = VINSN_INSN_RTX (vinsn);
- 
-   if (recog_memoized (insn) < 0)
-     {
-       if (!FENCE_STARTS_CYCLE_P (fence) && VINSN_UNIQUE_P (vinsn)
- 	  && INSN_ASM_P (insn))
- 	/* This is asm insn which is tryed to be issued on the
- 	   cycle not first.  Issue it on the next cycle.  */
- 	return 1;
-       else
- 	/* A USE insn, or something else we don't need to
- 	   understand.  We can't pass these directly to
- 	   state_transition because it will trigger a
- 	   fatal error for unrecognizable insns.  */
- 	return 0;
-     }
-   else
-     {
-       int cost;
-       state_t temp_state = alloca (dfa_state_size);
- 
-       state_copy (temp_state, FENCE_STATE (fence));
- 
-       cost = state_transition (temp_state, insn);
- 
-       if (cost < 0)
- 	return 0;
-       else if (cost == 0)
- 	return 1;
- 
-       return cost;
-     }
- }
  
  
  /* Functions to init/finish work with lv sets.  */
--- 4047,4052 ----
Index: gcc/sel-sched-ir.h
===================================================================
*** gcc/sel-sched-ir.h	(revision 135815)
--- gcc/sel-sched-ir.h	(revision 135816)
*************** extern void sel_finish_new_insns (void);
*** 1559,1565 ****
  
  extern bool bookkeeping_can_be_created_if_moved_through_p (insn_t);
  extern bool sel_remove_insn (insn_t, bool, bool);
- extern int vinsn_dfa_cost (vinsn_t, fence_t);
  extern bool bb_header_p (insn_t);
  extern void sel_init_invalid_data_sets (insn_t);
  extern bool insn_at_boundary_p (insn_t);
--- 1559,1564 ----
Index: gcc/sched-rgn.c
===================================================================
*** gcc/sched-rgn.c	(revision 135815)
--- gcc/sched-rgn.c	(revision 135816)
*************** along with GCC; see the file COPYING3.  
*** 64,69 ****
--- 64,70 ----
  #include "cfglayout.h"
  #include "params.h"
  #include "sched-int.h"
+ #include "sel-sched.h"
  #include "cselib.h"
  #include "target.h"
  #include "timevar.h"
*************** static unsigned int
*** 3412,3418 ****
  rest_of_handle_sched (void)
  {
  #ifdef INSN_SCHEDULING
!   schedule_insns ();
  #endif
    return 0;
  }
--- 3413,3423 ----
  rest_of_handle_sched (void)
  {
  #ifdef INSN_SCHEDULING
!   if (flag_selective_scheduling
!       && ! maybe_skip_selective_scheduling ())
!     run_selective_scheduling ();
!   else
!     schedule_insns ();
  #endif
    return 0;
  }
*************** static unsigned int
*** 3433,3444 ****
  rest_of_handle_sched2 (void)
  {
  #ifdef INSN_SCHEDULING
!   /* Do control and data sched analysis again,
!      and write some more of the results to dump file.  */
!   if (flag_sched2_use_superblocks || flag_sched2_use_traces)
!     schedule_ebbs ();
    else
!     schedule_insns ();
  #endif
    return 0;
  }
--- 3438,3455 ----
  rest_of_handle_sched2 (void)
  {
  #ifdef INSN_SCHEDULING
!   if (flag_selective_scheduling2
!       && ! maybe_skip_selective_scheduling ())
!     run_selective_scheduling ();
    else
!     {
!       /* Do control and data sched analysis again,
! 	 and write some more of the results to dump file.  */
!       if (flag_sched2_use_superblocks || flag_sched2_use_traces)
! 	schedule_ebbs ();
!       else
! 	schedule_insns ();
!     }
  #endif
    return 0;
  }
Index: gcc/passes.c
===================================================================
*** gcc/passes.c	(revision 135815)
--- gcc/passes.c	(revision 135816)
*************** init_optimization_passes (void)
*** 738,744 ****
        NEXT_PASS (pass_see);
        NEXT_PASS (pass_match_asm_constraints);
        NEXT_PASS (pass_sms);
-       NEXT_PASS (pass_sel_sched);
        NEXT_PASS (pass_sched);
        NEXT_PASS (pass_subregs_of_mode_init);
        NEXT_PASS (pass_local_alloc);
--- 738,743 ----
*************** init_optimization_passes (void)
*** 764,770 ****
  	  NEXT_PASS (pass_branch_target_load_optimize2);
  	  NEXT_PASS (pass_leaf_regs);
  	  NEXT_PASS (pass_split_before_sched2);
-           NEXT_PASS (pass_sel_sched);
  	  NEXT_PASS (pass_sched2);
  	  NEXT_PASS (pass_stack_regs);
  	    {
--- 763,768 ----
Index: gcc/config/i386/i386.c
===================================================================
*** gcc/config/i386/i386.c	(revision 135815)
--- gcc/config/i386/i386.c	(revision 135816)
*************** override_options (void)
*** 2763,2768 ****
--- 2763,2775 ----
       can be optimized to ap = __builtin_next_arg (0).  */
    if (!TARGET_64BIT || TARGET_64BIT_MS_ABI)
      targetm.expand_builtin_va_start = NULL;
+ 
+   if (optimize >= 2
+       && ! sel_sched_switch_set)
+     {
+       flag_selective_scheduling2 = 1;
+       flag_sel_sched_pipelining = 1;
+     }
  }
  
  /* Return true if this goes in large data/bss.  */
Index: gcc/config/ia64/ia64.c
===================================================================
*** gcc/config/ia64/ia64.c	(revision 135815)
--- gcc/config/ia64/ia64.c	(revision 135816)
*************** emit_predicate_relation_info (void)
*** 9096,9104 ****
      }
  }
  
- /* Counts how many times selective scheduling was run.  */
- static int sel2_run = 0;
- 
  /* Perform machine dependent operations on the rtl chain INSNS.  */
  
  static void
--- 9096,9101 ----
*************** ia64_reorg (void)
*** 9187,9214 ****
  	  _1mfb_ = get_cpu_unit_code ("1b_1mfb.");
  	  _1mlx_ = get_cpu_unit_code ("1b_1mlx.");
  	}
!       {
! 	int now;
! 	int start;
! 	int stop;
! 	bool do_p;
! 
! 	now = ++sel2_run;
! 	start = PARAM_VALUE (PARAM_SEL2_START);
! 	stop = PARAM_VALUE (PARAM_SEL2_STOP);
! 	do_p = (PARAM_VALUE (PARAM_SEL2_P) == 1);
! 
! 	if (do_p)
! 	  do_p = (start <= now) && (now <= stop);
! 	else
! 	  do_p = (start > now) || (now > stop);
! 
! 	if (flag_selective_scheduling2 && do_p)
! 	  selective_scheduling_run ();
  	else
  	  schedule_ebbs ();
!       }
! 
        /* We cannot reuse this one because it has been corrupted by the
  	 evil glat.  */
        finish_bundle_states ();
--- 9184,9196 ----
  	  _1mfb_ = get_cpu_unit_code ("1b_1mfb.");
  	  _1mlx_ = get_cpu_unit_code ("1b_1mlx.");
  	}
!       
!       if (flag_selective_scheduling2 
! 	  && !maybe_skip_selective_scheduling ())
! 	  run_selective_scheduling ();
  	else
  	  schedule_ebbs ();
!       
        /* We cannot reuse this one because it has been corrupted by the
  	 evil glat.  */
        finish_bundle_states ();
Index: gcc/ChangeLog.sel-sched
===================================================================
*** gcc/ChangeLog.sel-sched	(revision 136168)
--- gcc/ChangeLog.sel-sched	(revision 136169)
***************
*** 1,3 ****
--- 1,63 ----
+ 2008-05-29  Dmitry Melnik  <dm@ispras.ru>
+ 	    Andrey Belevantsev  <abel@ispras.ru>
+ 
+ 	* sel-sched.c (struct rtx_search_arg): Kill y, max_occur, iter, and
+ 	bitmask fields.  Adjust all uses.
+ 	(struct reg_rename): Add crosses_call field. 
+ 	(struct cmpd_local_params): Kill generated_nop field.
+ 	MAke removed_last_insn a bool bitfield.
+ 	(struct moveop_static_params): New field was_renamed.
+ 	(vec_blocked_vinsns): Rename to vec_bookkeeping_blocked_vinsns.
+ 	(vec_target_unavailable_vinsns): New.
+ 	(rtx_search): Rename to rtx_ok_for_substitution_p.  Adjust all uses.
+ 	(count_occurrences_1): Bail out if we find a multi-reg reference.
+ 	(choose_best_pseudo_reg): Do not let a register cross a call if 
+ 	it doesn't already cross one.  Initialize REG_N_CALLS_CROSSED for
+ 	the new pseudo register.
+ 	(verify_target_availability): Pass reg_rename instead of 
+ 	unavailable_hard_regs to it.  Fix assert to consider 
+ 	REG_N_CALLS_CROSSED.
+ 	(find_used_regs): Compute reg_rename_p->crosses_call.
+ 	(vinsn_vec_has_expr_p, vinsn_vec_clear, vinsn_vec_add, 
+ 	vinsn_vec_free): New functions instead of add_to_blocked_exprs, 
+ 	free_blocked_exprs, clear_blocked_exprs, expr_blocked_by_bookkeeping_p.
+ 	Adjust all uses.
+ 	(fill_vec_av_set): Reset target_available bit for expressions which 
+ 	were already scheduled with another register on the fences.
+ 	(emit_insn_from_expr_after): Restore the check for HARD_REGISTER_NUM_P.
+ 	(compute_av_set_on_boundaries): When rewinding a boundary, also set
+ 	FENCE_INSN correctly.
+ 	(fill_insns): Pass fence to compute_av_set_on_boundaries.
+ 	(schedule_expr_on_boundary): Compute cant_move before move_op.
+ 	Add only renamed exprs to vec_target_unavailable_vinsns.
+ 	(maybe_emit_renaming_copy): Set params->was_renamed to true.
+ 	(remove_insn_from_stream): Also generate nop when we hit another fence.
+ 	(move_op_at_first_insn): Generate bookkeeping and update sets only
+ 	on non-toplevel move_op.
+ 	(code_motion_process_successors): Also rescan when number of successors
+ 	has changed.
+ 	(move_op): Set EXPR_WAS_RENAMED to true when we renamed any of the insns
+ 	found.
+ 	(init_seqno): When not rescheduling, pass number of insns in a region
+ 	instead of computing it from sched_max_luid.
+ 	(purge_empty_blocks): New, factored out from ... 
+ 	(sel_region_init): ... here.  Even when we do not have to schedule
+ 	the region, initialize its data for bundling.
+ 	(sel_sched_region_1): Call purge_empty_blocks from here.
+ 	* opts.c Set sel_sched_switch_set also when -fselective-scheduling.
+ 	* sel-sched-ir.c (sel_move_insn): Assert that we have disconnected
+ 	this insn properly.
+ 	(merge_expr_data): Fix thinko.
+ 	(set_unavailable_target_for_expr): Properly set EXPR_TARGET_AVAILABLE.
+ 	(maybe_tidy_empty_bb): Export.  Free data sets in here, not ... 
+ 	(tidy_control_flow): ... here.  Do not apply the optimization
+ 	to empty bbs.  Verify backedges later.
+ 	(remove_empty_bb): Use sel_redirect_edge_and_branch.
+ 	* sched-deps.c (maybe_extend_reg_info_p): New.
+ 	(sched_analyze_reg): Use it.
+ 	* config/ia64/ia64.c (ia64_set_sched_flags): Disable control speculation
+ 	before reload.
+ 
  2008-05-23  Andrey Belevantsev  <abel@ispras.ru>
  
  	* tree-pass.h (pass_sel_sched): Remove.
Index: gcc/sel-sched.c
===================================================================
*** gcc/sel-sched.c	(revision 136168)
--- gcc/sel-sched.c	(revision 136169)
*************** along with GCC; see the file COPYING3.  
*** 117,122 ****
--- 117,123 ----
  
     Computing available expressions
     ===============================
+ 
     The computation (compute_av_set) is a bottom-up traversal.  At each insn,
     we're moving the union of its successors' sets through it via 
     moveup_expr_set.  The dependent expressions are removed.  Local 
*************** struct rtx_search_arg
*** 272,292 ****
    /* What we are searching for.  */
    rtx x;
  
-   /* With what X will be replaced.  */
-   rtx y;
- 
    /* The occurence counter.  */
    int n;
- 
-   /* Stop after finding MAX_OCCUR occurences, if it is positive.  */
-   int max_occur;
- 
-   /* Iterator of the av_set to remove expr from.  */
-   av_set_iterator *iter;
- 
-   /* Bitmask according to which substitutions are performed.  */
-   unsigned long bitmask;
  };
  typedef struct rtx_search_arg *rtx_search_arg_p;
  
  /* This descibes the data given to sel_sched_region_2.  */
--- 273,282 ----
    /* What we are searching for.  */
    rtx x;
  
    /* The occurence counter.  */
    int n;
  };
+ 
  typedef struct rtx_search_arg *rtx_search_arg_p;
  
  /* This descibes the data given to sel_sched_region_2.  */
*************** struct reg_rename
*** 335,340 ****
--- 325,333 ----
  
    /* These are *available* for renaming.  */
    HARD_REG_SET available_for_renaming;
+ 
+   /* Whether this code motion path crosses a call.  */
+   bool crosses_call;
  };
  
  /* A global structure that contains the needed information about harg 
*************** struct cmpd_local_params
*** 358,366 ****
    /* C_EXPR merged from all successors and locally allocated temporary C_EXPR.  */
    expr_t c_expr_merged, c_expr_local;
  
-   /* Generated NOP insn.  */
-   insn_t generated_nop;
- 
    /* Local params used in fur_* functions.  */
    /* Copy of the ORIGINAL_INSN list, stores the original insns already
       found before entering the current level of code_motion_path_driver.  */
--- 351,356 ----
*************** struct cmpd_local_params
*** 368,375 ****
  
    /* Local params used in move_op_* functions.  */
    /* True when we have removed last insn in the block which was 
!      also a boundary.  */
!   bool removed_last_insn;
  };
  
  /* Stores the static parameters for move_op_* calls.  */
--- 358,365 ----
  
    /* Local params used in move_op_* functions.  */
    /* True when we have removed last insn in the block which was 
!      also a boundary.  Do not update anything or create bookkeeping copies.  */
!   BOOL_BITFIELD removed_last_insn : 1;
  };
  
  /* Stores the static parameters for move_op_* calls.  */
*************** struct moveop_static_params
*** 384,389 ****
--- 374,382 ----
    /* An UID of expr_vliw which is to be moved up.  If we find other exprs,
       they are to be removed.  */
    int uid;
+ 
+   /* True if we scheduled an insn with different register.  */
+   bool was_renamed;
  };
  
  /* Stores the static parameters for fur_* calls.  */
*************** static VEC(expr_t, heap) *vec_av_set = N
*** 511,517 ****
     for the detailed explanations.  */
  DEF_VEC_P(vinsn_t);
  DEF_VEC_ALLOC_P(vinsn_t,heap);
! static VEC(vinsn_t, heap) *vec_blocked_vinsns = NULL;
  
  /* Vector to store temporary nops inserted in move_op to prevent removal
     of empty bbs.  */
--- 504,512 ----
     for the detailed explanations.  */
  DEF_VEC_P(vinsn_t);
  DEF_VEC_ALLOC_P(vinsn_t,heap);
! typedef VEC(vinsn_t, heap) *vinsn_vec_t;
! static vinsn_vec_t vec_bookkeeping_blocked_vinsns = NULL;
! static vinsn_vec_t vec_target_unavailable_vinsns = NULL;
  
  /* Vector to store temporary nops inserted in move_op to prevent removal
     of empty bbs.  */
*************** static int stat_substitutions_total;
*** 539,545 ****
  
  
  /* Forward declarations of static functions.  */
! static bool rtx_search (rtx, rtx);
  static int sel_rank_for_schedule (const void *, const void *);
  static av_set_t find_sequential_best_exprs (bnd_t, expr_t, bool);
  
--- 534,540 ----
  
  
  /* Forward declarations of static functions.  */
! static bool rtx_ok_for_substitution_p (rtx, rtx);
  static int sel_rank_for_schedule (const void *, const void *);
  static av_set_t find_sequential_best_exprs (bnd_t, expr_t, bool);
  
*************** substitute_reg_in_expr (expr_t expr, ins
*** 741,747 ****
    old = undo ? INSN_RHS (insn) : INSN_LHS (insn);
  
    /* Substitute if INSN has a form of x:=y and LHS(INSN) occurs in *VI.  */
!   if (rtx_search (old, *where))
      {
        rtx new_insn;
        rtx *where_replace;
--- 736,742 ----
    old = undo ? INSN_RHS (insn) : INSN_LHS (insn);
  
    /* Substitute if INSN has a form of x:=y and LHS(INSN) occurs in *VI.  */
!   if (rtx_ok_for_substitution_p (old, *where))
      {
        rtx new_insn;
        rtx *where_replace;
*************** count_occurrences_1 (rtx *cur_rtx, void 
*** 802,813 ****
      r33.  Actually, there's no change, but it spoils debugging.  */
    if (exp_equiv_p (*cur_rtx, p->x, 0, true))
      {
!       p->n++;
  
!       /* Stop search, if we've already found the requested number of
! 	 occurences.  */
!       if (p->max_occur > 0 && p->n >= p->max_occur)
! 	return 1;
  
        /* Do not traverse subexprs.  */
        return -1;
--- 797,811 ----
      r33.  Actually, there's no change, but it spoils debugging.  */
    if (exp_equiv_p (*cur_rtx, p->x, 0, true))
      {
!       /* Bail out if we occupy more than one register.  */
!       if (REG_P (*cur_rtx)
!           && hard_regno_nregs[REGNO(*cur_rtx)][GET_MODE (*cur_rtx)] > 1)
!         {
!           p->n = 0;
!           return 1;
!         }
  
!       p->n++;
  
        /* Do not traverse subexprs.  */
        return -1;
*************** count_occurrences_1 (rtx *cur_rtx, void 
*** 821,846 ****
           simplify_subreg will be called by validate_replace_rtx, and 
           unsubstitution will fail later.  */
        p->n = 0;
!       p->max_occur = 0;
!       return -1;
      }
  
    /* Continue search.  */
    return 0;
  }
  
! /* Return the number of places WHAT appears within WHERE.  Find no more
!    than MAX_OCCUR occurences.  This function is analogous to
!    count_occurrences but it counts not only subexprs that have 
!    equal pointers, but also those for which exp_equiv_p is true.  */
  static int 
! count_occurrences_equiv (rtx what, rtx where, int max_occur)
  {
    struct rtx_search_arg arg;
  
    arg.x = what;
    arg.n = 0;
-   arg.max_occur = max_occur;
  
    for_each_rtx (&where, &count_occurrences_1, (void *) &arg);
  
--- 819,840 ----
           simplify_subreg will be called by validate_replace_rtx, and 
           unsubstitution will fail later.  */
        p->n = 0;
!       return 1;
      }
  
    /* Continue search.  */
    return 0;
  }
  
! /* Return the number of places WHAT appears within WHERE.  
!    Bail out when we found a reference occupying several hard registers.  */
  static int 
! count_occurrences_equiv (rtx what, rtx where)
  {
    struct rtx_search_arg arg;
  
    arg.x = what;
    arg.n = 0;
  
    for_each_rtx (&where, &count_occurrences_1, (void *) &arg);
  
*************** count_occurrences_equiv (rtx what, rtx w
*** 849,857 ****
  
  /* Returns TRUE if WHAT is found in WHERE rtx tree.  */
  static bool
! rtx_search (rtx what, rtx where)
  {
!   return (count_occurrences_equiv (what, where, 1) > 0);
  }
  
  
--- 843,851 ----
  
  /* Returns TRUE if WHAT is found in WHERE rtx tree.  */
  static bool
! rtx_ok_for_substitution_p (rtx what, rtx where)
  {
!   return (count_occurrences_equiv (what, where) > 0);
  }
  
  
*************** choose_best_pseudo_reg (regset used_regs
*** 1503,1509 ****
                   limitations (frame/stack registers, calls crossed).  */
                if (!TEST_HARD_REG_BIT (reg_rename_p->unavailable_hard_regs, 
                                        orig_regno))
!                 return gen_rtx_REG (mode, orig_regno);
                
                bad_hard_regs = true;
              }
--- 1497,1510 ----
                   limitations (frame/stack registers, calls crossed).  */
                if (!TEST_HARD_REG_BIT (reg_rename_p->unavailable_hard_regs, 
                                        orig_regno))
! 		{
! 		  /* Don't let register cross a call if it doesn't already 
! 		     cross one.  This condition is written in accordance with 
! 		     that in sched-deps.c sched_analyze_reg().  */
! 		  if (!reg_rename_p->crosses_call 
! 		      || REG_N_CALLS_CROSSED (orig_regno) > 0)
! 		    return gen_rtx_REG (mode, orig_regno);		    
! 		}
                
                bad_hard_regs = true;
              }
*************** choose_best_pseudo_reg (regset used_regs
*** 1521,1535 ****
    
    /* We haven't found a register from original operations.  Get a new one.  
       FIXME: control register pressure somehow.  */
!   gcc_assert (mode != VOIDmode);
!   return gen_reg_rtx (mode);
  }
  
  /* True when target of EXPR is available due to TARGET_AVAILABLE,
     USED_REGS and UNAVAILABLE_HARD_REGS.  */
  static void
  verify_target_availability (expr_t expr, regset used_regs, 
!                             HARD_REG_SET unavailable_hard_regs)
  {
    unsigned n, i, regno;
    enum machine_mode mode;
--- 1522,1545 ----
    
    /* We haven't found a register from original operations.  Get a new one.  
       FIXME: control register pressure somehow.  */
!   {
!     rtx new_reg = gen_reg_rtx (mode);
! 
!     gcc_assert (mode != VOIDmode);
! 
!     max_regno = max_reg_num ();
!     maybe_extend_reg_info_p ();
!     REG_N_CALLS_CROSSED (REGNO (new_reg)) = reg_rename_p->crosses_call ? 1 : 0;
! 
!     return new_reg;
!   }
  }
  
  /* True when target of EXPR is available due to TARGET_AVAILABLE,
     USED_REGS and UNAVAILABLE_HARD_REGS.  */
  static void
  verify_target_availability (expr_t expr, regset used_regs, 
! 			    struct reg_rename *reg_rename_p)
  {
    unsigned n, i, regno;
    enum machine_mode mode;
*************** verify_target_availability (expr_t expr,
*** 1548,1554 ****
      {
        if (bitmap_bit_p (used_regs, regno + i))
          live_available = false;
!       if (TEST_HARD_REG_BIT  (unavailable_hard_regs, regno + i))
          hard_available = false;
      }
  
--- 1558,1564 ----
      {
        if (bitmap_bit_p (used_regs, regno + i))
          live_available = false;
!       if (TEST_HARD_REG_BIT (reg_rename_p->unavailable_hard_regs, regno + i))
          hard_available = false;
      }
  
*************** verify_target_availability (expr_t expr,
*** 1560,1568 ****
      /* Check only if we haven't scheduled something on the previous fence, 
         cause due to MAX_SOFTWARE_LOOKAHEAD_WINDOW_SIZE issues
         and having more than one fence, we may end having targ_un in a block
!        in which successors target register is actually available.  */     
      gcc_assert (scheduled_something_on_previous_fence || !live_available 
! 		|| !hard_available);
  }
  
  /* Collect unavailable registers for EXPR from BNDS into USED_REGS.  */
--- 1570,1588 ----
      /* Check only if we haven't scheduled something on the previous fence, 
         cause due to MAX_SOFTWARE_LOOKAHEAD_WINDOW_SIZE issues
         and having more than one fence, we may end having targ_un in a block
!        in which successors target register is actually available.  
! 
!        The last condition handles the case when a dependence from a call insn
!        was created in sched-deps.c for insns with destination registers that 
!        never crossed a call before, but do cross one after our code motion.  
! 
!        FIXME: in the latter case, we just uselessly called find_used_regs, 
!        because we can't move this expression with any other register 
!        as well.  */
      gcc_assert (scheduled_something_on_previous_fence || !live_available 
! 		|| !hard_available 
! 		|| (!reload_completed && reg_rename_p->crosses_call 
! 		    && REG_N_CALLS_CROSSED (regno) == 0));
  }
  
  /* Collect unavailable registers for EXPR from BNDS into USED_REGS.  */
*************** find_best_reg_for_expr (expr_t expr, bli
*** 1671,1678 ****
        rtx best_reg = NULL_RTX;
        /* Check that we have computed availability of a target register
  	 correctly.  */
!       verify_target_availability (expr, used_regs,
! 				  reg_rename_data.unavailable_hard_regs);
  
        /* Turn everything in hard regs after reload.  */
        if (reload_completed)
--- 1691,1697 ----
        rtx best_reg = NULL_RTX;
        /* Check that we have computed availability of a target register
  	 correctly.  */
!       verify_target_availability (expr, used_regs, &reg_rename_data);
  
        /* Turn everything in hard regs after reload.  */
        if (reload_completed)
*************** find_used_regs (insn_t insn, av_set_t or
*** 3146,3152 ****
    code_motion_path_driver_info = &fur_hooks;
    
    res = code_motion_path_driver (insn, orig_ops, NULL, &lparams, &sparams);
!   
    gcc_assert (res == 1);
    gcc_assert (original_insns && *original_insns);
  
--- 3165,3173 ----
    code_motion_path_driver_info = &fur_hooks;
    
    res = code_motion_path_driver (insn, orig_ops, NULL, &lparams, &sparams);
! 
!   reg_rename_p->crosses_call |= sparams.crosses_call;
! 
    gcc_assert (res == 1);
    gcc_assert (original_insns && *original_insns);
  
*************** process_use_exprs (av_set_t *av_ptr)
*** 3423,3436 ****
    return NULL;
  }
  
! /* Lookup EXPR in VEC_BLOCKED_VINSNS and return TRUE if found.  */
  static bool
! expr_blocked_by_bookkeeping_p (expr_t expr)
  {
    vinsn_t vinsn;
    int n;
  
!   for (n = 0; VEC_iterate (vinsn_t, vec_blocked_vinsns, n, vinsn); n++)
      if (VINSN_SEPARABLE_P (vinsn))
        {
          if (vinsn_equal_p (vinsn, EXPR_VINSN (expr)))
--- 3444,3457 ----
    return NULL;
  }
  
! /* Lookup EXPR in VINSN_VEC and return TRUE if found.  */
  static bool
! vinsn_vec_has_expr_p (vinsn_vec_t vinsn_vec, expr_t expr)
  {
    vinsn_t vinsn;
    int n;
  
!   for (n = 0; VEC_iterate (vinsn_t, vinsn_vec, n, vinsn); n++)
      if (VINSN_SEPARABLE_P (vinsn))
        {
          if (vinsn_equal_p (vinsn, EXPR_VINSN (expr)))
*************** av_set_could_be_blocked_by_bookkeeping_p
*** 3459,3503 ****
    av_set_iterator iter;
  
    FOR_EACH_EXPR (expr, iter, orig_ops)
!     if (expr_blocked_by_bookkeeping_p (expr))
        return true;
  
    return false;
  }
  
! /* Clear VEC_BLOCKED_VINSNS.  */
  static void
! clear_blocked_exprs (void)
  {
!   unsigned len = VEC_length (vinsn_t, vec_blocked_vinsns);
    if (len > 0)
      {
        vinsn_t vinsn;
        int n;
        
!       for (n = 0; VEC_iterate (vinsn_t, vec_blocked_vinsns, n, vinsn); n++)
          vinsn_detach (vinsn);
!       VEC_block_remove (vinsn_t, vec_blocked_vinsns, 0, len);
      }
  }
  
! /* Add the vinsn of EXPR to the blocked exprs vector.  */
  static void
! add_to_blocked_exprs (expr_t expr)
  {
-   /* Unfortunately, the below code could be also fired up on
-      separable insns.
-      FIXME: add an example of how this could happen.  */
    vinsn_attach (EXPR_VINSN (expr));
!   VEC_safe_push (vinsn_t, heap, vec_blocked_vinsns, EXPR_VINSN (expr));
  }
  
  /* Free the vector representing blocked expressions.  */    
  static void
! free_blocked_exprs (void)
  {
!   if (vec_blocked_vinsns)
!     VEC_free (vinsn_t, heap, vec_blocked_vinsns);
  }
  
  /* Turn AV into a vector, filter inappropriate insns and sort it.  Return 
--- 3480,3521 ----
    av_set_iterator iter;
  
    FOR_EACH_EXPR (expr, iter, orig_ops)
!     if (vinsn_vec_has_expr_p (vec_bookkeeping_blocked_vinsns, expr))
        return true;
  
    return false;
  }
  
! /* Clear VINSN_VEC and detach vinsns.  */
  static void
! vinsn_vec_clear (vinsn_vec_t *vinsn_vec)
  {
!   unsigned len = VEC_length (vinsn_t, *vinsn_vec);
    if (len > 0)
      {
        vinsn_t vinsn;
        int n;
        
!       for (n = 0; VEC_iterate (vinsn_t, *vinsn_vec, n, vinsn); n++)
          vinsn_detach (vinsn);
!       VEC_block_remove (vinsn_t, *vinsn_vec, 0, len);
      }
  }
  
! /* Add the vinsn of EXPR to the VINSN_VEC.  */
  static void
! vinsn_vec_add (vinsn_vec_t *vinsn_vec, expr_t expr)
  {
    vinsn_attach (EXPR_VINSN (expr));
!   VEC_safe_push (vinsn_t, heap, *vinsn_vec, EXPR_VINSN (expr));
  }
  
  /* Free the vector representing blocked expressions.  */    
  static void
! vinsn_vec_free (vinsn_vec_t *vinsn_vec)
  {
!   if (*vinsn_vec)
!     VEC_free (vinsn_t, heap, *vinsn_vec);
  }
  
  /* Turn AV into a vector, filter inappropriate insns and sort it.  Return 
*************** fill_vec_av_set (av_set_t av, blist_t bn
*** 3555,3565 ****
           FIXME: try to minimize calls to this.  */
        target_available = EXPR_TARGET_AVAILABLE (expr);
  
        /* If the availability of the EXPR is invalidated by the insertion of
  	 bookkeeping earlier, make sure that we won't choose this expr for
  	 scheduling if it's not separable, and if it is separable, then
  	 we have to recompute the set of available registers for it.  */
!       if (expr_blocked_by_bookkeeping_p (expr))
  	{
            VEC_unordered_remove (expr_t, vec_av_set, n);
            if (sched_verbose >= 4)
--- 3573,3590 ----
           FIXME: try to minimize calls to this.  */
        target_available = EXPR_TARGET_AVAILABLE (expr);
  
+       /* If insn was already scheduled on the current fence,
+ 	 set TARGET_AVAILABLE to false no matter what expr's attribute says.  
+ 	 FIXME: test it with 'target_available = -1' (but probably it doesn't
+ 	 make any sense here).  */
+       if (vinsn_vec_has_expr_p (vec_target_unavailable_vinsns, expr))
+ 	target_available = -1;
+ 
        /* If the availability of the EXPR is invalidated by the insertion of
  	 bookkeeping earlier, make sure that we won't choose this expr for
  	 scheduling if it's not separable, and if it is separable, then
  	 we have to recompute the set of available registers for it.  */
!       if (vinsn_vec_has_expr_p (vec_bookkeeping_blocked_vinsns, expr))
  	{
            VEC_unordered_remove (expr_t, vec_av_set, n);
            if (sched_verbose >= 4)
*************** emit_insn_from_expr_after (expr_t expr, 
*** 4242,4249 ****
      {
        unsigned regno = expr_dest_regno (expr);
        
!       reg_rename_tick[regno] = ++reg_rename_this_tick;
!       df_set_regs_ever_live (regno, true);
      }
    
    return sel_gen_insn_from_expr_after (expr, vinsn, seqno, 
--- 4267,4277 ----
      {
        unsigned regno = expr_dest_regno (expr);
        
!       if (HARD_REGISTER_NUM_P (regno))
! 	{
! 	  df_set_regs_ever_live (regno, true);
! 	  reg_rename_tick[regno] = ++reg_rename_this_tick;
! 	}
      }
    
    return sel_gen_insn_from_expr_after (expr, vinsn, seqno, 
*************** remove_temp_moveop_nops (void)
*** 4619,4625 ****
     distinguishing between bookkeeping copies and original insns.  */
  static int max_uid_before_move_op = 0;
  
! #ifdef ENABLE_CHECKING
  /* Records the number of fill_insns runs for debugging purposes.  */
  static int fill_insns_run = 0;
  
--- 4647,4653 ----
     distinguishing between bookkeeping copies and original insns.  */
  static int max_uid_before_move_op = 0;
  
! #ifdef ENABLE_ASSERT_CHECKING
  /* Records the number of fill_insns runs for debugging purposes.  */
  static int fill_insns_run = 0;
  
*************** remove_insns_for_debug (blist_t bnds, av
*** 4673,4679 ****
  
  /* Compute available instructions on boundaries.  */
  static void
! compute_av_set_on_boundaries (blist_t bnds, av_set_t *av_vliw_p)
  {
    if (sched_verbose >= 2)
      {
--- 4701,4707 ----
  
  /* Compute available instructions on boundaries.  */
  static void
! compute_av_set_on_boundaries (fence_t fence, blist_t bnds, av_set_t *av_vliw_p)
  {
    if (sched_verbose >= 2)
      {
*************** compute_av_set_on_boundaries (blist_t bn
*** 4700,4706 ****
              if (sel_bb_head_p (bnd_to))
                break;
            }
!       BND_TO (bnd) = bnd_to;
  
        av_set_clear (&BND_AV (bnd));
        BND_AV (bnd) = compute_av_set (BND_TO (bnd), NULL, 0, true);
--- 4728,4740 ----
              if (sel_bb_head_p (bnd_to))
                break;
            }
! 
!       if (BND_TO (bnd) != bnd_to)
! 	{
!   	  gcc_assert (FENCE_INSN (fence) == BND_TO (bnd));
! 	  FENCE_INSN (fence) = bnd_to;
! 	  BND_TO (bnd) = bnd_to;
! 	}
  
        av_set_clear (&BND_AV (bnd));
        BND_AV (bnd) = compute_av_set (BND_TO (bnd), NULL, 0, true);
*************** schedule_expr_on_boundary (bnd_t bnd, ex
*** 5032,5037 ****
--- 5066,5075 ----
          move_cond_jump (insn, bnd);
      }
  
+   /* Calculate cant_move now as EXPR_WAS_RENAMED can change after move_op 
+      meaning that there was *any* renaming somewhere.  */
+   cant_move = EXPR_WAS_CHANGED (expr_vliw) || EXPR_WAS_RENAMED (expr_vliw);
+ 
    /* Find a place for C_EXPR to schedule.  */
    place_to_insert = prepare_place_to_insert (bnd);
    move_exprs_to_boundary (bnd, expr_vliw, expr_seq, c_expr);
*************** schedule_expr_on_boundary (bnd_t bnd, ex
*** 5041,5047 ****
       the expr_seq set has more than one expr, and we chose the one that 
       is not equal to expr_vliw.  Then expr_vliw may be insn in stream, and 
       we can't use it.  Generate the new vinsn.  */
-   cant_move = EXPR_WAS_CHANGED (expr_vliw) || EXPR_WAS_RENAMED (expr_vliw);
    if (INSN_IN_STREAM_P (EXPR_INSN_RTX (expr_vliw)))
      {
        vinsn_t vinsn_new;
--- 5079,5084 ----
*************** schedule_expr_on_boundary (bnd_t bnd, ex
*** 5063,5068 ****
--- 5100,5110 ----
    remove_temp_moveop_nops ();
  
    av_set_clear (&expr_seq);
+  
+   /* Save the expression scheduled so to reset target availability if we'll 
+      meet it later on the same fence.  */
+   if (EXPR_WAS_RENAMED (expr_vliw))
+     vinsn_vec_add (&vec_target_unavailable_vinsns, INSN_EXPR (insn));
  
    /* Check that the recent movement didn't destroyed loop
       structure.  */
*************** stall_for_cycles (fence_t fence, int n)
*** 5085,5091 ****
      FENCE_AFTER_STALL_P (fence) = 1;
  }
  
- 
  /* Gather a parallel group of insns at FENCE and assign their seqno 
     to SEQNO.  All scheduled insns are gathered in SCHEDULED_INSNS_TAILPP 
     list for later recalculation of seqnos.  */
--- 5127,5132 ----
*************** fill_insns (fence_t fence, int seqno, il
*** 5115,5124 ****
        int max_insns = pipelining_p ? issue_rate : 2 * issue_rate;
        int max_stall = pipelining_p ? 1 : 3;
        
!       compute_av_set_on_boundaries (bnds, &av_vliw);
        remove_insns_that_need_bookkeeping (fence, &av_vliw);
  
! #ifdef ENABLE_CHECKING
        /* If debug parameters tell us to ignore this attempt to move an insn,
  	 obey.  */
        remove_insns_for_debug (bnds, &av_vliw);
--- 5156,5165 ----
        int max_insns = pipelining_p ? issue_rate : 2 * issue_rate;
        int max_stall = pipelining_p ? 1 : 3;
        
!       compute_av_set_on_boundaries (fence, bnds, &av_vliw);
        remove_insns_that_need_bookkeeping (fence, &av_vliw);
  
! #ifdef ENABLE_ASSERT_CHECKING
        /* If debug parameters tell us to ignore this attempt to move an insn,
  	 obey.  */
        remove_insns_for_debug (bnds, &av_vliw);
*************** update_and_record_unavailable_insns (bas
*** 5271,5283 ****
  	 CUR_EXPR is in new AV_SET.  */
        FOR_EACH_EXPR (cur_expr, i, old_av_set)
          {
!           expr_t new_expr = av_set_lookup (BB_AV_SET (book_block), EXPR_VINSN (cur_expr));
  
            if (! new_expr 
                /* In this case, we can just turn off the E_T_A bit, but we can't 
                   represent this information with the current vector.  */
!               || EXPR_TARGET_AVAILABLE (new_expr) != EXPR_TARGET_AVAILABLE (cur_expr))
!             add_to_blocked_exprs (cur_expr);
          }
  
        av_set_clear (&old_av_set);
--- 5312,5329 ----
  	 CUR_EXPR is in new AV_SET.  */
        FOR_EACH_EXPR (cur_expr, i, old_av_set)
          {
!           expr_t new_expr = av_set_lookup (BB_AV_SET (book_block), 
! 					   EXPR_VINSN (cur_expr));
  
            if (! new_expr 
                /* In this case, we can just turn off the E_T_A bit, but we can't 
                   represent this information with the current vector.  */
!               || EXPR_TARGET_AVAILABLE (new_expr) 
! 		 != EXPR_TARGET_AVAILABLE (cur_expr))
! 	    /* Unfortunately, the below code could be also fired up on
! 	       separable insns.
! 	       FIXME: add an example of how this could happen.  */
!             vinsn_vec_add (&vec_bookkeeping_blocked_vinsns, cur_expr);
          }
  
        av_set_clear (&old_av_set);
*************** maybe_emit_renaming_copy (rtx insn, 
*** 5440,5445 ****
--- 5486,5492 ----
        replace_dest_with_reg_in_expr (params->c_expr, params->dest);
        
        insn_emitted = true;
+       params->was_renamed = true;
      }
    
    return insn_emitted;
*************** handle_emitting_transformations (rtx ins
*** 5490,5497 ****
  
  /* Remove INSN from stream to schedule it later.  */
  static void
! remove_insn_from_stream (rtx insn, cmpd_local_params_p lparams, 
!                          bool only_disconnect)
  {
    insn_t nop, bb_head, bb_end;
    bool need_nop_to_preserve_bb;
--- 5537,5543 ----
  
  /* Remove INSN from stream to schedule it later.  */
  static void
! remove_insn_from_stream (rtx insn, bool only_disconnect)
  {
    insn_t nop, bb_head, bb_end;
    bool need_nop_to_preserve_bb;
*************** remove_insn_from_stream (rtx insn, cmpd_
*** 5504,5538 ****
    bb_end = sel_bb_end (bb);
    need_nop_to_preserve_bb = ((bb_head == bb_end)
                               || (NEXT_INSN (bb_head) == bb_end 
!                                  && JUMP_P (bb_end)));
  
    /* If there's only one insn in the BB, make sure that a nop is
       inserted into it, so the basic block won't disappear when we'll
       delete INSN below with sel_remove_insn. It should also survive
!      till the return to fill_insns, so if the nop was created locally
!      in move_op to retain data sets, reset GENERATED_NOP so it won't
!      be deleted at the exit of this move_op.  */	     
    if (need_nop_to_preserve_bb)
      {
-       gcc_assert (!lparams->generated_nop);
        nop = get_nop_from_pool (insn);
-       lparams->generated_nop = nop;
        gcc_assert (INSN_NOP_P (nop));
        VEC_safe_push (insn_t, heap, vec_temp_moveop_nops, nop);
      }
-   else
-     lparams->generated_nop = NULL;
  
    sel_remove_insn (insn, only_disconnect, false);
  }
  
  /* This function is called when original expr is found.
!    INSN - current insn traversed, EXPR - the corresponding expr found.
!    If nop is generated in the function or we need to call update_data_sets
!    on nop, then they are saved in GENERATED_NOP and 
!    CALL_UPDATE_DATA_SETS_ON_NOP.  */
  static void
! move_op_orig_expr_found (insn_t insn, expr_t expr, cmpd_local_params_p lparams, 
                           void *static_params)
  {
    bool only_disconnect, insn_emitted;
--- 5550,5577 ----
    bb_end = sel_bb_end (bb);
    need_nop_to_preserve_bb = ((bb_head == bb_end)
                               || (NEXT_INSN (bb_head) == bb_end 
!                                  && JUMP_P (bb_end))
!                              || IN_CURRENT_FENCE_P (NEXT_INSN (insn)));
  
    /* If there's only one insn in the BB, make sure that a nop is
       inserted into it, so the basic block won't disappear when we'll
       delete INSN below with sel_remove_insn. It should also survive
!      till the return to fill_insns.  */	     
    if (need_nop_to_preserve_bb)
      {
        nop = get_nop_from_pool (insn);
        gcc_assert (INSN_NOP_P (nop));
        VEC_safe_push (insn_t, heap, vec_temp_moveop_nops, nop);
      }
  
    sel_remove_insn (insn, only_disconnect, false);
  }
  
  /* This function is called when original expr is found.
!    INSN - current insn traversed, EXPR - the corresponding expr found.  */
  static void
! move_op_orig_expr_found (insn_t insn, expr_t expr, 
!                          cmpd_local_params_p lparams ATTRIBUTE_UNUSED, 
                           void *static_params)
  {
    bool only_disconnect, insn_emitted;
*************** move_op_orig_expr_found (insn_t insn, ex
*** 5543,5549 ****
    insn_emitted = handle_emitting_transformations (insn, expr, params);
    only_disconnect = (params->uid == INSN_UID (insn)
                       && ! insn_emitted  && ! EXPR_WAS_CHANGED (expr));
!   remove_insn_from_stream (insn, lparams, only_disconnect);
  }
  
  /* The function is called when original expr is found.
--- 5582,5588 ----
    insn_emitted = handle_emitting_transformations (insn, expr, params);
    only_disconnect = (params->uid == INSN_UID (insn)
                       && ! insn_emitted  && ! EXPR_WAS_CHANGED (expr));
!   remove_insn_from_stream (insn, only_disconnect);
  }
  
  /* The function is called when original expr is found.
*************** move_op_at_first_insn (insn_t insn, cmpd
*** 5603,5625 ****
    /* When we have removed the boundary insn for scheduling, which also 
       happened to be the end insn in its bb, we don't need to update sets.  */
    if (!lparams->removed_last_insn 
        && sel_bb_head_p (insn))
      {
        /* We should generate bookkeeping code only if we are not at the
           top level of the move_op.  */
!       if (lparams->e1 
!           && sel_num_cfg_preds_gt_1 (insn))
          book_block = generate_bookkeeping_insn (sparams->c_expr,
                                                  lparams->e1, lparams->e2);
        /* Update data sets for the current insn.  */
!       if (lparams->generated_nop)
!         update_data_sets (lparams->generated_nop);
!       else
!         /* Do not update the sets on the bb header which is also a boundary.
!            These should not be touched, and we'd make them incorrect as 
!            now the insn being scheduled is not there yet.  */
!         if (lparams->e1)
!           update_data_sets (insn);
      }
    
    /* If bookkeeping code was inserted, we need to update av sets of basic
--- 5642,5657 ----
    /* When we have removed the boundary insn for scheduling, which also 
       happened to be the end insn in its bb, we don't need to update sets.  */
    if (!lparams->removed_last_insn 
+       && lparams->e1
        && sel_bb_head_p (insn))
      {
        /* We should generate bookkeeping code only if we are not at the
           top level of the move_op.  */
!       if (sel_num_cfg_preds_gt_1 (insn))
          book_block = generate_bookkeeping_insn (sparams->c_expr,
                                                  lparams->e1, lparams->e2);
        /* Update data sets for the current insn.  */
!       update_data_sets (insn);
      }
    
    /* If bookkeeping code was inserted, we need to update av sets of basic
*************** move_op_at_first_insn (insn_t insn, cmpd
*** 5664,5678 ****
       it also may have predecessors with av_sets, containing instructions that 
       are no longer available, we save all such expressions that become
       unavailable during data sets update on the bookkeeping block in
!      VEC_BLOCKED_VINSNS.  Later we avoid selecting such expressions for
!      scheduling.  This allows us to avoid recomputation of av_sets outside 
!      the code motion path.  */
        
    if (book_block)
      update_and_record_unavailable_insns (book_block);
  
!   /* If INSN was previously marked for deletion, it's time to do it.  
!      GENERATED_NOP was set where the original expr was found.  */
    if (lparams->removed_last_insn)
      insn = PREV_INSN (insn);
    
--- 5696,5709 ----
       it also may have predecessors with av_sets, containing instructions that 
       are no longer available, we save all such expressions that become
       unavailable during data sets update on the bookkeeping block in
!      VEC_BOOKKEEPING_BLOCKED_VINSNS.  Later we avoid selecting such 
!      expressions for scheduling.  This allows us to avoid recomputation of 
!      av_sets outside the code motion path.  */
        
    if (book_block)
      update_and_record_unavailable_insns (book_block);
  
!   /* If INSN was previously marked for deletion, it's time to do it.  */
    if (lparams->removed_last_insn)
      insn = PREV_INSN (insn);
    
*************** code_motion_process_successors (insn_t i
*** 5853,5859 ****
--- 5884,5892 ----
    int res = 0;
    succ_iterator succ_i;
    rtx succ;
+   basic_block bb;
    int old_index;
+   unsigned old_succs;
  
    struct cmpd_local_params lparams;
    expr_def _x;
*************** code_motion_process_successors (insn_t i
*** 5871,5877 ****
       Rescan successors in this case.  */     
  
   rescan:
!   old_index = BLOCK_FOR_INSN (insn)->index; 
    
    FOR_EACH_SUCC_1 (succ, succ_i, insn, code_motion_path_driver_info->succ_flags)
      {
--- 5904,5912 ----
       Rescan successors in this case.  */     
  
   rescan:
!   bb = BLOCK_FOR_INSN (insn);
!   old_index = bb->index; 
!   old_succs = EDGE_COUNT (bb->succs);
    
    FOR_EACH_SUCC_1 (succ, succ_i, insn, code_motion_path_driver_info->succ_flags)
      {
*************** code_motion_process_successors (insn_t i
*** 5897,5903 ****
        else if (b == -1 && res != 1)
          res = b;
  
!       if (BLOCK_FOR_INSN (insn)->index != old_index)
          goto rescan;
      }
  
--- 5932,5941 ----
        else if (b == -1 && res != 1)
          res = b;
  
!       /* We have simplified the control flow below this point.  In this case,
!          the iterator becomes invalid.  We need to try again.  */
!       if (BLOCK_FOR_INSN (insn)->index != old_index
!           || EDGE_COUNT (bb->succs) != old_succs)
          goto rescan;
      }
  
*************** code_motion_path_driver (insn_t insn, av
*** 5993,5999 ****
    if (code_motion_path_driver_info->on_enter)
      code_motion_path_driver_info->on_enter (insn, local_params_in,
                                              static_params, false);
-   local_params_in->generated_nop = NULL;
    orig_ops = av_set_copy (orig_ops);
  
    /* Filter the orig_ops set.  */
--- 6031,6036 ----
*************** code_motion_path_driver (insn_t insn, av
*** 6063,6070 ****
  	     loop).  */
            if (insn == first_insn)
              {
-               removed_last_insn = sel_bb_end_p (last_insn);
                first_insn = NEXT_INSN (last_insn);
              }
  	  insn = last_insn;
  	  break;
--- 6100,6107 ----
  	     loop).  */
            if (insn == first_insn)
              {
                first_insn = NEXT_INSN (last_insn);
+               removed_last_insn = sel_bb_end_p (last_insn);
              }
  	  insn = last_insn;
  	  break;
*************** move_op (insn_t insn, av_set_t orig_ops,
*** 6172,6182 ****
--- 6209,6221 ----
  {
    struct moveop_static_params sparams;
    struct cmpd_local_params lparams;
+   bool res;
  
    /* Init params for code_motion_path_driver.  */ 
    sparams.dest = dest;
    sparams.c_expr = c_expr;
    sparams.uid = INSN_UID (EXPR_INSN_RTX (expr_vliw));
+   sparams.was_renamed = false;
    lparams.e1 = NULL;
  
    /* We haven't visited any blocks yet.  */
*************** move_op (insn_t insn, av_set_t orig_ops,
*** 6184,6191 ****
    
    /* Set approriate hooks and data.  */
    code_motion_path_driver_info = &move_op_hooks;
!   
!   return code_motion_path_driver (insn, orig_ops, NULL, &lparams, &sparams);
  }
  
  
--- 6223,6234 ----
    
    /* Set approriate hooks and data.  */
    code_motion_path_driver_info = &move_op_hooks;
!   res = code_motion_path_driver (insn, orig_ops, NULL, &lparams, &sparams);
! 
!   if (sparams.was_renamed)
!     EXPR_WAS_RENAMED (expr_vliw) = true;
! 
!   return res;
  }
  
  
*************** init_seqno_1 (basic_block bb, sbitmap vi
*** 6230,6236 ****
  
  /* Initialize seqnos for the current region.  */
  static int
! init_seqno (bool rescheduling_p, bitmap blocks_to_reschedule, basic_block from)
  {
    sbitmap visited_bbs;
    bitmap_iterator bi;
--- 6273,6279 ----
  
  /* Initialize seqnos for the current region.  */
  static int
! init_seqno (int number_of_insns, bitmap blocks_to_reschedule, basic_block from)
  {
    sbitmap visited_bbs;
    bitmap_iterator bi;
*************** init_seqno (bool rescheduling_p, bitmap 
*** 6253,6266 ****
        from = EBB_FIRST_BB (0);
      }
  
!   cur_seqno = sched_max_luid - 1;
! 
    init_seqno_1 (from, visited_bbs, blocks_to_reschedule);
! 
!   gcc_assert (rescheduling_p || cur_seqno == 0);
  
    sbitmap_free (visited_bbs);
- 
    return sched_max_luid - 1;
  }
  
--- 6296,6306 ----
        from = EBB_FIRST_BB (0);
      }
  
!   cur_seqno = number_of_insns > 0 ? number_of_insns : sched_max_luid - 1;
    init_seqno_1 (from, visited_bbs, blocks_to_reschedule);
!   gcc_assert (cur_seqno == 0 || number_of_insns == 0);
  
    sbitmap_free (visited_bbs);
    return sched_max_luid - 1;
  }
  
*************** setup_current_loop_nest (int rgn)
*** 6310,6315 ****
--- 6350,6372 ----
    gcc_assert (LOOP_MARKED_FOR_PIPELINING_P (current_loop_nest));
  }
  
+ /* Purge meaningless empty blocks in the middle of a region.  */
+ static void
+ purge_empty_blocks (void)
+ {
+   int i ;
+ 
+   for (i = 1; i < current_nr_blocks; )
+     {
+       basic_block b = BASIC_BLOCK (BB_TO_BLOCK (i));
+ 
+       if (maybe_tidy_empty_bb (b))
+ 	continue;
+ 
+       i++;
+     }
+ }
+ 
  /* Compute instruction priorities for current region.  */
  static void
  sel_compute_priorities (int rgn)
*************** sel_region_init (int rgn)
*** 6336,6343 ****
  
    rgn_setup_region (rgn);
  
!   if (sched_is_disabled_for_current_region_p ()
!       || current_region_empty_p ())
      return true;
  
    if (flag_sel_sched_pipelining)
--- 6393,6402 ----
  
    rgn_setup_region (rgn);
  
!   /* Even if sched_is_disabled_for_current_region_p() is true, we still 
!      do region initialization here so the region can be bundled correctly,
!      but we'll skip the scheduling in sel_sched_region ().  */
!   if (current_region_empty_p ())
      return true;
  
    if (flag_sel_sched_pipelining)
*************** sel_region_init (int rgn)
*** 6373,6396 ****
  
    blocks_to_reschedule = BITMAP_ALLOC (NULL);
  
-   /* Purge meaningless empty blocks in the middle of a region.  */
-   for (i = 1; i < current_nr_blocks; )
-     {
-       basic_block b = BASIC_BLOCK (BB_TO_BLOCK (i));
-       
-       if (single_pred_p (b)
-           && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
-           && !LABEL_P (BB_HEAD (b))
-           && sel_bb_empty_p (b)
-           && (single_succ_edge (b)->flags & EDGE_FALLTHRU)
-           && in_current_region_p (single_succ (b)))
-         {
-           sel_remove_empty_bb (b, false, true);
-           continue;
-         }
-       i++;
-     }
- 
    /* Init correct liveness sets on each instruction of a single-block loop.
       This is the only situation when we can't update liveness when calling
       compute_live for the first insn of the loop.  */
--- 6432,6437 ----
*************** sel_region_init (int rgn)
*** 6405,6411 ****
            (sel_bb_head (BASIC_BLOCK (BB_TO_BLOCK (header))));
      }
        
-   
    /* Set hooks so that no newly generated insn will go out unnoticed.  */
    sel_register_rtl_hooks ();
    sel_register_cfg_hooks ();
--- 6446,6451 ----
*************** find_ebb_boundaries (basic_block bb, bit
*** 6469,6474 ****
--- 6509,6515 ----
    do
      {
        bitmap_set_bit (scheduled_blocks, BLOCK_TO_BB (bb1->index));
+ 
        if (sched_verbose >= 2)
  	sel_print ("%d; ", bb1->index);
      }
*************** sel_region_finish (void)
*** 6712,6718 ****
    BITMAP_FREE (current_copies);
    BITMAP_FREE (current_originators);
    BITMAP_FREE (code_motion_visited_blocks);
!   free_blocked_exprs ();
  
    /* If LV_SET of the region head should be updated, do it now because
       there will be no other chance.  */
--- 6753,6760 ----
    BITMAP_FREE (current_copies);
    BITMAP_FREE (current_originators);
    BITMAP_FREE (code_motion_visited_blocks);
!   vinsn_vec_free (&vec_bookkeeping_blocked_vinsns);
!   vinsn_vec_free (&vec_target_unavailable_vinsns);
  
    /* If LV_SET of the region head should be updated, do it now because
       there will be no other chance.  */
*************** schedule_on_fences (flist_t fences, int 
*** 6813,6819 ****
  
    /* All av_sets are invalidated by GLOBAL_LEVEL increase, thus we
       don't need to keep bookkeeping-invalidated exprs any more.  */
!   clear_blocked_exprs ();
  }
  
  /* Calculate MIN_SEQNO and MAX_SEQNO.  */
--- 6855,6862 ----
  
    /* All av_sets are invalidated by GLOBAL_LEVEL increase, thus we
       don't need to keep bookkeeping-invalidated exprs any more.  */
!   vinsn_vec_clear (&vec_bookkeeping_blocked_vinsns);
!   vinsn_vec_clear (&vec_target_unavailable_vinsns);
  }
  
  /* Calculate MIN_SEQNO and MAX_SEQNO.  */
*************** static void
*** 6958,6971 ****
  sel_sched_region_1 (void)
  {
    struct sel_sched_region_2_data_def _data, *data = &_data;
  
!   data->orig_max_seqno = init_seqno (false, NULL, NULL);
!   gcc_assert (data->orig_max_seqno >= 1);
  
!   fences = NULL;
  
    /* When pipelining outer loops, create fences on the loop header,
       not preheader.  */
    if (current_loop_nest)
      init_fences (BB_END (EBB_FIRST_BB (0)));
    else
--- 7001,7021 ----
  sel_sched_region_1 (void)
  {
    struct sel_sched_region_2_data_def _data, *data = &_data;
+   int number_of_insns;
  
!   /* Remove empty blocks that might be in the region from the beginning.  
!      We need to do save sched_max_luid before that, as it actually shows
!      the number of insns in the region, and purge_empty_blocks can 
!      alter it.  */
!   number_of_insns = sched_max_luid - 1;
!   purge_empty_blocks ();
  
!   data->orig_max_seqno = init_seqno (number_of_insns, NULL, NULL);
!   gcc_assert (data->orig_max_seqno >= 1);
  
    /* When pipelining outer loops, create fences on the loop header,
       not preheader.  */
+   fences = NULL;
    if (current_loop_nest)
      init_fences (BB_END (EBB_FIRST_BB (0)));
    else
*************** sel_sched_region_1 (void)
*** 7037,7043 ****
                      {
                        flist_tail_init (new_fences);
  
! 		      data->orig_max_seqno = init_seqno (true, blocks_to_reschedule, bb);
  
                        /* Mark BB as head of the new ebb.  */
                        bitmap_set_bit (forced_ebb_heads, bb->index);
--- 7087,7093 ----
                      {
                        flist_tail_init (new_fences);
  
! 		      data->orig_max_seqno = init_seqno (0, blocks_to_reschedule, bb);
  
                        /* Mark BB as head of the new ebb.  */
                        bitmap_set_bit (forced_ebb_heads, bb->index);
*************** sel_sched_region_1 (void)
*** 7120,7126 ****
            for (i = BLOCK_TO_BB (loop_entry->index); i < current_nr_blocks; i++)
  	    clear_outdated_rtx_info (EBB_FIRST_BB (i));
  
!           data->orig_max_seqno = init_seqno (true, NULL, NULL);
            flist_tail_init (new_fences);
  
            /* Mark BB as head of the new ebb.  */
--- 7170,7176 ----
            for (i = BLOCK_TO_BB (loop_entry->index); i < current_nr_blocks; i++)
  	    clear_outdated_rtx_info (EBB_FIRST_BB (i));
  
!           data->orig_max_seqno = init_seqno (0, NULL, NULL);
            flist_tail_init (new_fences);
  
            /* Mark BB as head of the new ebb.  */
*************** sel_sched_region (int rgn)
*** 7170,7175 ****
--- 7220,7228 ----
      else
        schedule_p = (region_start > region) || (region > region_stop);
  
+     if (sched_is_disabled_for_current_region_p ())
+       schedule_p = false;
+ 
      if (schedule_p)
        sel_sched_region_1 ();
      else
Index: gcc/opts.c
===================================================================
*** gcc/opts.c	(revision 136168)
--- gcc/opts.c	(revision 136169)
*************** common_handle_option (size_t scode, cons
*** 1799,1804 ****
--- 1799,1805 ----
        set_random_seed (arg);
        break;
  
+     case OPT_fselective_scheduling:
      case OPT_fselective_scheduling2:
        sel_sched_switch_set = true;
        break;
Index: gcc/sel-sched-ir.c
===================================================================
*** gcc/sel-sched-ir.c	(revision 136168)
--- gcc/sel-sched-ir.c	(revision 136169)
*************** sel_move_insn (expr_t expr, int seqno, i
*** 1360,1365 ****
--- 1360,1367 ----
    basic_block bb = BLOCK_FOR_INSN (after);
    insn_t next = NEXT_INSN (after);
  
+   /* Assert that in move_op we disconnected this insn properly.  */
+   gcc_assert (EXPR_VINSN (INSN_EXPR (insn)) != NULL);
    PREV_INSN (insn) = after;
    NEXT_INSN (insn) = next;
  
*************** merge_expr_data (expr_t to, expr_t from,
*** 1779,1785 ****
                              phist->spec_ds);
  
    EXPR_WAS_SUBSTITUTED (to) |= EXPR_WAS_SUBSTITUTED (from);
!   EXPR_WAS_RENAMED (to) |= EXPR_WAS_RENAMED (to);
    EXPR_CANT_MOVE (to) |= EXPR_CANT_MOVE (from);
  
    update_target_availability (to, from, split_point);
--- 1781,1787 ----
                              phist->spec_ds);
  
    EXPR_WAS_SUBSTITUTED (to) |= EXPR_WAS_SUBSTITUTED (from);
!   EXPR_WAS_RENAMED (to) |= EXPR_WAS_RENAMED (from);
    EXPR_CANT_MOVE (to) |= EXPR_CANT_MOVE (from);
  
    update_target_availability (to, from, split_point);
*************** set_unavailable_target_for_expr (expr_t 
*** 1825,1831 ****
      {
        if (REG_P (EXPR_LHS (expr))
            && bitmap_bit_p (lv_set, REGNO (EXPR_LHS (expr))))
!         EXPR_TARGET_AVAILABLE (expr) = false;
      }
    else
      {
--- 1827,1858 ----
      {
        if (REG_P (EXPR_LHS (expr))
            && bitmap_bit_p (lv_set, REGNO (EXPR_LHS (expr))))
! 	{
! 	  /* If it's an insn like r1 = use (r1, ...), and it exists in 
! 	     different forms in each of the av_sets being merged, we can't say 
! 	     whether original destination register is available or not.  
! 	     However, this still works if destination register is not used 
! 	     in the original expression: if the branch at which LV_SET we're
! 	     looking here is not actually 'other branch' in sense that same
! 	     expression is available through it (but it can't be determined 
! 	     at computation stage because of transformations on one of the
! 	     branches), it still won't affect the availability.  
! 	     Liveness of a register somewhere on a code motion path means 
! 	     it's either read somewhere on a codemotion path, live on 
! 	     'other' branch, live at the point immediately following
! 	     the original operation, or is read by the original operation.
! 	     The latter case is filtered out in the condition below.
! 	     It still doesn't cover the case when register is defined and used
! 	     somewhere within the code motion path, and in this case we could
! 	     miss a unifying code motion along both branches using a renamed
! 	     register, but it won't affect a code correctness since upon
! 	     an actual code motion a bookkeeping code would be generated.  */
! 	  if (bitmap_bit_p (VINSN_REG_USES (EXPR_VINSN (expr)), 
! 			    REGNO (EXPR_LHS (expr))))
! 	    EXPR_TARGET_AVAILABLE (expr) = -1;
! 	  else
! 	    EXPR_TARGET_AVAILABLE (expr) = false;
! 	}
      }
    else
      {
*************** verify_backedges (void)
*** 3489,3495 ****
  #endif
  
  /* Tidy the possibly empty block BB.  */
! static bool
  maybe_tidy_empty_bb (basic_block bb)
  {
    basic_block succ_bb, pred_bb;
--- 3516,3522 ----
  #endif
  
  /* Tidy the possibly empty block BB.  */
! bool
  maybe_tidy_empty_bb (basic_block bb)
  {
    basic_block succ_bb, pred_bb;
*************** maybe_tidy_empty_bb (basic_block bb)
*** 3504,3509 ****
--- 3531,3538 ----
                || !(single_pred_edge (bb)->flags & EDGE_FALLTHRU))))
      return false;
  
+   free_data_sets (bb);
+ 
    /* Do not delete BB if it has more than one successor.
       That can occur when we moving a jump.  */
    if (!single_succ_p (bb))
*************** tidy_control_flow (basic_block xbb, bool
*** 3588,3593 ****
--- 3617,3623 ----
       basic block we will not get a jump to next instruction, which 
       can be harmful.  */
    if (sel_bb_head (xbb) == sel_bb_end (xbb) 
+       && !sel_bb_empty_p (xbb)
        && INSN_NOP_P (sel_bb_end (xbb))
        /* Flow goes fallthru from current block to the next.  */
        && EDGE_COUNT (xbb->succs) == 1
*************** tidy_control_flow (basic_block xbb, bool
*** 3611,3625 ****
           remove it too.  */
        if (sel_bb_empty_p (xbb->prev_bb))
          {
-           free_data_sets (xbb->prev_bb);
            changed = maybe_tidy_empty_bb (xbb->prev_bb);
          }
      }
  
- #ifdef ENABLE_CHECKING
-   verify_backedges ();
- #endif
- 
    return changed;
  }
  
--- 3641,3650 ----
*************** sel_remove_insn (insn_t insn, bool only_
*** 3660,3668 ****
    PREV_INSN (insn) = NULL_RTX;
    NEXT_INSN (insn) = NULL_RTX;
  
-   if (sel_bb_empty_p (bb))
-     free_data_sets (bb);
- 
    return tidy_control_flow (bb, full_tidying);
  }
  
--- 3685,3690 ----
*************** remove_empty_bb (basic_block empty_bb, b
*** 5235,5241 ****
  	succ = NULL;
  
        if (EDGE_COUNT (empty_bb->preds) > 0 && succ != NULL)
! 	redirect_edge_succ_nodup (EDGE_PRED (empty_bb, 0), succ);
  
        if (EDGE_COUNT (empty_bb->succs) > 0 && pred != NULL)
  	{
--- 5257,5263 ----
  	succ = NULL;
  
        if (EDGE_COUNT (empty_bb->preds) > 0 && succ != NULL)
!         sel_redirect_edge_and_branch (EDGE_PRED (empty_bb, 0), succ);
  
        if (EDGE_COUNT (empty_bb->succs) > 0 && pred != NULL)
  	{
*************** sel_redirect_edge_and_branch (edge e, ba
*** 5525,5531 ****
        gcc_assert (loop_latch_edge (current_loop_nest));
      }
  
!   gcc_assert (ee == e && last_added_blocks == NULL);
  
    /* Now the CFG has been updated, and we can init data for the newly 
       created insns.  */
--- 5547,5553 ----
        gcc_assert (loop_latch_edge (current_loop_nest));
      }
  
!   gcc_assert (last_added_blocks == NULL);
  
    /* Now the CFG has been updated, and we can init data for the newly 
       created insns.  */
*************** jump_leads_only_to_bb_p (insn_t jump, ba
*** 6182,6188 ****
    /* It is not jump, jump with side-effects or jump can lead to several 
       basic blocks.  */
    if (!onlyjump_p (jump)
!       || !any_uncondjump_p(jump))
      return false;
  
    /* Several outgoing edges, abnormal edge or destination of jump is 
--- 6204,6210 ----
    /* It is not jump, jump with side-effects or jump can lead to several 
       basic blocks.  */
    if (!onlyjump_p (jump)
!       || !any_uncondjump_p (jump))
      return false;
  
    /* Several outgoing edges, abnormal edge or destination of jump is 
Index: gcc/sel-sched-ir.h
===================================================================
*** gcc/sel-sched-ir.h	(revision 136168)
--- gcc/sel-sched-ir.h	(revision 136169)
*************** extern void free_bb_note_pool (void);
*** 1592,1597 ****
--- 1592,1598 ----
  
  extern basic_block sel_create_basic_block_before (basic_block);
  extern void sel_remove_empty_bb (basic_block, bool, bool);
+ extern bool maybe_tidy_empty_bb (basic_block bb);
  extern basic_block sel_split_edge (edge);
  extern basic_block sel_create_recovery_block (insn_t);
  extern void sel_merge_blocks (basic_block, basic_block);
Index: gcc/sched-deps.c
===================================================================
*** gcc/sched-deps.c	(revision 136168)
--- gcc/sched-deps.c	(revision 136169)
*************** extend_deps_reg_info (struct deps *deps,
*** 1673,1678 ****
--- 1673,1695 ----
      }
  }
  
+ /* Extends REG_INFO_P if needed.  */
+ void
+ maybe_extend_reg_info_p (void)
+ {
+   /* Extend REG_INFO_P, if needed.  */
+   if ((unsigned int)max_regno - 1 >= reg_info_p_size)
+     {
+       size_t new_reg_info_p_size = max_regno + 128;
+ 
+       gcc_assert (!reload_completed && sel_sched_p ());
+ 
+       reg_info_p = xrecalloc (reg_info_p, new_reg_info_p_size, 
+ 			      reg_info_p_size, sizeof (*reg_info_p));
+       reg_info_p_size = new_reg_info_p_size;
+     }
+ }
+ 
  /* Analyze a single reference to register (reg:MODE REGNO) in INSN.
     The type of the reference is specified by REF and can be SET,
     CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE.  */
*************** sched_analyze_reg (struct deps *deps, in
*** 1686,1691 ****
--- 1703,1710 ----
        && (regno >= max_reg_num () - 1 || regno >= deps->max_reg))
      extend_deps_reg_info (deps, regno);
  
+   maybe_extend_reg_info_p ();
+ 
    /* A hard reg in a wide mode may really be multiple registers.
       If so, mark all of them just like the first.  */
    if (regno < FIRST_PSEUDO_REGISTER)
*************** sched_analyze_reg (struct deps *deps, in
*** 1737,1749 ****
  	}
  
        /* Don't let it cross a call after scheduling if it doesn't
! 	 already cross one.
! 	 If REGNO >= REG_INFO_P_SIZE, then it was introduced in selective
! 	 scheduling, and it could have happened only before reload.
! 	 Thus, we can consider INSN moveable, since reload should take care of
! 	 the all the operations renamed into new pseudos.  */
!       if ((!sel_sched_p () || regno < FIRST_PSEUDO_REGISTER)
! 	  && REG_N_CALLS_CROSSED (regno) == 0)
  	{
  	  if (!deps->readonly 
                && ref == USE)
--- 1756,1763 ----
  	}
  
        /* Don't let it cross a call after scheduling if it doesn't
! 	 already cross one.  */
!       if (REG_N_CALLS_CROSSED (regno) == 0)
  	{
  	  if (!deps->readonly 
                && ref == USE)
Index: gcc/sched-int.h
===================================================================
*** gcc/sched-int.h	(revision 136168)
--- gcc/sched-int.h	(revision 136169)
*************** extern void haifa_note_reg_set (int);
*** 1123,1128 ****
--- 1123,1130 ----
  extern void haifa_note_reg_clobber (int);
  extern void haifa_note_reg_use (int);
  
+ extern void maybe_extend_reg_info_p (void);
+ 
  extern void deps_start_bb (struct deps *, rtx);
  extern enum reg_note ds_to_dt (ds_t);
  
Index: gcc/sched-rgn.c
===================================================================
*** gcc/sched-rgn.c	(revision 136168)
--- gcc/sched-rgn.c	(revision 136169)
*************** static bool
*** 3401,3408 ****
  gate_handle_sched (void)
  {
  #ifdef INSN_SCHEDULING
!   return flag_schedule_insns && !flag_selective_scheduling 
!     && dbg_cnt (sched_func);
  #else
    return 0;
  #endif
--- 3401,3407 ----
  gate_handle_sched (void)
  {
  #ifdef INSN_SCHEDULING
!   return flag_schedule_insns && dbg_cnt (sched_func);
  #else
    return 0;
  #endif
Index: gcc/config/ia64/ia64.c
===================================================================
*** gcc/config/ia64/ia64.c	(revision 136168)
--- gcc/config/ia64/ia64.c	(revision 136169)
*************** ia64_override_options (void)
*** 5266,5272 ****
    ia64_flag_schedule_insns2 = flag_schedule_insns_after_reload;
    flag_schedule_insns_after_reload = 0;
    
!   if (optimize >= 3
        && ! sel_sched_switch_set)
      {
        flag_selective_scheduling2 = 1;
--- 5266,5272 ----
    ia64_flag_schedule_insns2 = flag_schedule_insns_after_reload;
    flag_schedule_insns_after_reload = 0;
    
!   if (optimize >= 3 
        && ! sel_sched_switch_set)
      {
        flag_selective_scheduling2 = 1;
*************** ia64_override_options (void)
*** 5274,5282 ****
      }
    if (flag_sel_sched_pipelining && flag_auto_inc_dec)
      {
        flag_auto_inc_dec = 0;
-       /* warning (0, "-fauto-inc-dec is turned off"
-          " when the selective pipeliner is used"); */
      }
  
    ia64_section_threshold = g_switch_set ? g_switch_value : IA64_DEFAULT_GVALUE;
--- 5274,5282 ----
      }
    if (flag_sel_sched_pipelining && flag_auto_inc_dec)
      {
+       /* FIXME: remove this when we'd implement breaking autoinsns as 
+          a transformation.  */
        flag_auto_inc_dec = 0;
      }
  
    ia64_section_threshold = g_switch_set ? g_switch_value : IA64_DEFAULT_GVALUE;
*************** ia64_set_sched_flags (spec_info_t spec_i
*** 7227,7233 ****
  	}
        
        if ((!sel_sched_p () && mflag_sched_control_spec)
! 	  || (sel_sched_p () && mflag_sel_sched_control_spec))
  	{
  	  mask |= BEGIN_CONTROL;
  	  
--- 7227,7235 ----
  	}
        
        if ((!sel_sched_p () && mflag_sched_control_spec)
! 	  || (sel_sched_p () 
!               && reload_completed
!               && mflag_sel_sched_control_spec))
  	{
  	  mask |= BEGIN_CONTROL;
  	  

Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]