This is the mail archive of the gcc-patches@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[lno] Bounds analysis and other improvements


Hello,

this patch adds the following features

-- It uses analysis of bounds on number of iterations of a loop to
   determine whether an induction variable can be computed in
   a wider mode.  This enables strength reduction on architectures
   where sizeof (void *) != sizeof (int).

-- It enables handling of hard registers in invariant motion.

Some smaller improvements and bugfixes:

-- The code to determine number of iterations of a loop was moved
   to tree-ssa-loop-niter.c.
-- Some type handling bugs in tree-chrec.c fixed.
-- A bug causing the newly created induction variables not to be
   coalesced into one variable fixed.

Zdenek

Index: ChangeLog.lno
===================================================================
RCS file: /cvs/gcc/gcc/gcc/Attic/ChangeLog.lno,v
retrieving revision 1.1.2.150
diff -c -3 -p -r1.1.2.150 ChangeLog.lno
*** ChangeLog.lno	27 Apr 2004 14:10:15 -0000	1.1.2.150
--- ChangeLog.lno	30 Apr 2004 23:36:55 -0000
***************
*** 1,3 ****
--- 1,53 ----
+ 2004-04-30  Zdenek Dvorak  <rakdver@atrey.karlin.mff.cuni.cz>
+ 
+ 	* tree-ssa-loop-niter.c: New file.
+ 	* Makefile.in (tree-ssa-loop-niter.o): New.
+ 	* cfgloop.h (struct loop): Add bounds field.
+ 	* df.c (df_reg_clobber_gen): Removed.
+ 	(df_bb_rd_local_compute, df_insn_refs_record, df_rd_local_compute):
+ 	Make more effective for hard regs.
+ 	* loop-invariant.c (check_maybe_invariant, find_defs,
+ 	find_invariant_insn): Handle hard regs.
+ 	* tree-chrec.c (chrec_fold_plus_poly_poly, chrec_fold_multiply,
+ 	chrec_fold_multiply, chrec_merge): Handle types correctly.
+ 	(chrec_convert): Use count_ev_in_wider_type.
+ 	* tree-chrec.h (count_ev_in_wider_type): Declare.
+ 	* tree-flow.h (struct tree_niter_desc): Moved from
+ 	tree-ssa-loop-ivopts.c.
+ 	(number_of_iterations_cond, number_of_iterations_exit,
+ 	loop_niter_by_eval, find_loop_niter_by_eval,
+ 	estimate_numbers_of_iterations, can_count_iv_in_wider_type,
+ 	free_numbers_of_iterations_estimates): Declare.
+ 	* tree-scalar-evolution.c (count_ev_in_wider_type, scev_reset,
+ 	simple_iv): New.
+ 	(number_of_iterations_in_loop): Check that the exit condition
+ 	is tested in every iteration.
+ 	* tree-scalar-evolution.h (scev_reset, simple_iv): Declare.
+ 	* tree-ssa-loop-ivcanon.c (MAX_ITERATIONS_TO_TRACK,
+ 	chain_of_csts_start, get_base_for, get_val_for,
+ 	loop_niter_by_eval, find_loop_niter_by_eval): Moved to
+ 	tree-ssa-loop-niter.c.
+ 	* tree-ssa-loop-ivopts.c (struct tree_niter_desc): Moved to
+ 	tree-flow.h.
+ 	(force_gimple_operand): Accept a variable for the target of
+ 	the assignment.
+ 	(create_iv, rewrite_use_nonlinear_expr,
+ 	rewrite_use_address, rewrite_use_compare,
+ 	rewrite_use_outer): Changed due to this.
+ 	(tree_ssa_iv_optimize_init): Call estimate_numbers_of_iterations.
+ 	(get_var_def): Removed.
+ 	(find_givs_in_stmt_scev): Use simple_iv.
+ 	(inverse, number_of_iterations_cond): Moved to tree-ssa-loop-niter.c.
+ 	(determine_number_of_iterations): Use number_of_iterations_exit.
+ 	(idx_find_step, find_interesting_uses_address): Use
+ 	can_count_iv_in_wider_type.
+ 	(force_var_cost): Determine the costs more precisely.
+ 	(tree_ssa_iv_optimize_finalize): Call
+ 	free_numbers_of_iterations_estimates.
+ 	(tree_ssa_iv_optimize_loop): Call scev_reset.
+ 	* varasm.c (force_const_mem): Set MEM_NOTRAP_P flag.
+ 	* config/rs6000/rs6000.c (rs6000_emit_move): Set MEM_NOTRAP_P flag.
+ 
  2004-04-27  Sebastian Pop  <pop@cri.ensmp.fr>
  
  	* lambda-code.c (build_int_cst): Moved...
Index: Makefile.in
===================================================================
RCS file: /cvs/gcc/gcc/gcc/Makefile.in,v
retrieving revision 1.903.2.158.2.26
diff -c -3 -p -r1.903.2.158.2.26 Makefile.in
*** Makefile.in	25 Apr 2004 20:33:00 -0000	1.903.2.158.2.26
--- Makefile.in	30 Apr 2004 23:36:55 -0000
*************** OBJS-common = \
*** 878,884 ****
   tree-ssa-phiopt.o tree-ssa-forwprop.o tree-nested.o tree-ssa-dse.o	   \
   tree-ssa-dom.o domwalk.o tree-tailcall.o gimple-low.o tree-iterator.o	   \
   tree-phinodes.o tree-ssanames.o tree-sra.o tree-complex.o tree-ssa-loop.o \
!  tree-ssa-loop-unswitch.o						   \
   tree-vectorizer.o tree-ssa-loop-im.o tree-ssa-loop-manip.o		   \
   tree-ssa-loop-ivopts.o tree-ssa-loop-ivcanon.o tree-dg.o loop-invariant.o \
   alias.o bb-reorder.o bitmap.o builtins.o caller-save.o calls.o	  	   \
--- 878,884 ----
   tree-ssa-phiopt.o tree-ssa-forwprop.o tree-nested.o tree-ssa-dse.o	   \
   tree-ssa-dom.o domwalk.o tree-tailcall.o gimple-low.o tree-iterator.o	   \
   tree-phinodes.o tree-ssanames.o tree-sra.o tree-complex.o tree-ssa-loop.o \
!  tree-ssa-loop-unswitch.o tree-ssa-loop-niter.o				   \
   tree-vectorizer.o tree-ssa-loop-im.o tree-ssa-loop-manip.o		   \
   tree-ssa-loop-ivopts.o tree-ssa-loop-ivcanon.o tree-dg.o loop-invariant.o \
   alias.o bb-reorder.o bitmap.o builtins.o caller-save.o calls.o	  	   \
*************** tree-ssa-loop-manip.o : tree-ssa-loop-ma
*** 1669,1674 ****
--- 1669,1678 ----
     $(SYSTEM_H) $(RTL_H) $(TREE_H) $(TM_P_H) cfgloop.h \
     output.h diagnostic.h $(TIMEVAR_H) $(TM_H) coretypes.h $(TREE_DUMP_H) \
     tree-pass.h
+ tree-ssa-loop-niter.o : tree-ssa-loop-niter.c $(TREE_FLOW_H) $(CONFIG_H) \
+    $(SYSTEM_H) $(RTL_H) $(TREE_H) $(TM_P_H) cfgloop.h $(PARAMS_H) tree-inline.h \
+    output.h diagnostic.h $(TM_H) coretypes.h $(TREE_DUMP_H) flags.h \
+    tree-pass.h $(SCEV_H)		 
  tree-ssa-alias.o : tree-ssa-alias.c $(TREE_FLOW_H) $(CONFIG_H) $(SYSTEM_H) \
     $(RTL_H) $(TREE_H) $(TM_P_H) $(EXPR_H) $(GGC_H) tree-inline.h flags.h \
     function.h $(TIMEVAR_H) tree-alias-common.h convert.h $(TM_H) coretypes.h \
Index: cfgloop.h
===================================================================
RCS file: /cvs/gcc/gcc/gcc/cfgloop.h,v
retrieving revision 1.2.4.9.2.13
diff -c -3 -p -r1.2.4.9.2.13 cfgloop.h
*** cfgloop.h	21 Mar 2004 03:19:43 -0000	1.2.4.9.2.13
--- cfgloop.h	30 Apr 2004 23:36:55 -0000
*************** struct loop
*** 174,179 ****
--- 174,182 ----
       this field directly: number_of_iterations_in_loop computes and
       caches the computed information in this field.  */
    tree nb_iterations;
+ 
+   /* Upper bound on number of iterations of a loop.  */
+   struct nb_iter_bound *bounds;
  };
  
  /* Flags for state of loop structure.  */
Index: df.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/df.c,v
retrieving revision 1.32.2.12.4.6
diff -c -3 -p -r1.32.2.12.4.6 df.c
*** df.c	29 Mar 2004 17:52:31 -0000	1.32.2.12.4.6
--- df.c	30 Apr 2004 23:36:55 -0000
*************** static void df_bitmaps_free (struct df *
*** 209,215 ****
  static void df_free (struct df *);
  static void df_alloc (struct df *, int);
  
- static rtx df_reg_clobber_gen (unsigned int);
  static rtx df_reg_use_gen (unsigned int);
  
  static inline struct df_link *df_link_create (struct ref *, struct df_link *);
--- 209,214 ----
*************** static void df_bb_du_chain_create (struc
*** 244,250 ****
  static void df_du_chain_create (struct df *, bitmap);
  static void df_bb_ud_chain_create (struct df *, basic_block);
  static void df_ud_chain_create (struct df *, bitmap);
! static void df_bb_rd_local_compute (struct df *, basic_block);
  static void df_rd_local_compute (struct df *, bitmap);
  static void df_bb_ru_local_compute (struct df *, basic_block);
  static void df_ru_local_compute (struct df *, bitmap);
--- 243,249 ----
  static void df_du_chain_create (struct df *, bitmap);
  static void df_bb_ud_chain_create (struct df *, basic_block);
  static void df_ud_chain_create (struct df *, bitmap);
! static void df_bb_rd_local_compute (struct df *, basic_block, bitmap);
  static void df_rd_local_compute (struct df *, bitmap);
  static void df_bb_ru_local_compute (struct df *, basic_block);
  static void df_ru_local_compute (struct df *, bitmap);
*************** static rtx df_reg_use_gen (unsigned int 
*** 609,627 ****
    use = gen_rtx_USE (GET_MODE (reg), reg);
    return use;
  }
- 
- 
- /* Return a CLOBBER for register REGNO.  */
- static rtx df_reg_clobber_gen (unsigned int regno)
- {
-   rtx reg;
-   rtx use;
- 
-   reg = regno_reg_rtx[regno];
- 
-   use = gen_rtx_CLOBBER (GET_MODE (reg), reg);
-   return use;
- }
  
  /* Local chain manipulation routines.  */
  
--- 608,613 ----
*************** df_insn_refs_record (struct df *df, basi
*** 1226,1231 ****
--- 1212,1222 ----
  	{
  	  rtx note;
  
+ 	  /* The problem here is that there are awfully many hard registers
+ 	     clobbered by call and "defs" created through them are not
+ 	     interesting.  So we must just make sure we include them when
+ 	     computing kill bitmaps.  */
+ #if 0
  	  if (df->flags & DF_HARD_REGS)
  	    {
  	      /* Kill all registers invalidated by a call.  */
*************** df_insn_refs_record (struct df *df, basi
*** 1236,1241 ****
--- 1227,1233 ----
  		    df_defs_record (df, reg_clob, bb, insn);
  		  }
  	    }
+ #endif
  
  	  /* There may be extra registers to be clobbered.  */
  	  for (note = CALL_INSN_FUNCTION_USAGE (insn);
*************** df_lr_transfer_function (int bb ATTRIBUT
*** 1634,1645 ****
  
  /* Compute local reaching def info for basic block BB.  */
  static void
! df_bb_rd_local_compute (struct df *df, basic_block bb)
  {
    struct bb_info *bb_info = DF_BB_INFO (df, bb);
    rtx insn;
  
!   FOR_BB_INSNS (bb, insn)
      {
        unsigned int uid = INSN_UID (insn);
        struct df_link *def_link;
--- 1626,1639 ----
  
  /* Compute local reaching def info for basic block BB.  */
  static void
! df_bb_rd_local_compute (struct df *df, basic_block bb, bitmap call_killed_defs)
  {
    struct bb_info *bb_info = DF_BB_INFO (df, bb);
    rtx insn;
+   bitmap seen = BITMAP_XMALLOC ();
+   bool call_seen = false;
  
!   FOR_BB_INSNS_REVERSE (bb, insn)
      {
        unsigned int uid = INSN_UID (insn);
        struct df_link *def_link;
*************** df_bb_rd_local_compute (struct df *df, b
*** 1653,1658 ****
--- 1647,1658 ----
  	  unsigned int regno = DF_REF_REGNO (def);
  	  struct df_link *def2_link;
  
+ 	  if (bitmap_bit_p (seen, regno)
+ 	      || (call_seen
+ 		  && regno < FIRST_PSEUDO_REGISTER
+ 		  && TEST_HARD_REG_BIT (regs_invalidated_by_call, regno)))
+ 	    continue;
+ 
  	  for (def2_link = df->regs[regno].defs; def2_link;
  	       def2_link = def2_link->next)
  	    {
*************** df_bb_rd_local_compute (struct df *df, b
*** 1663,1676 ****
  		 be killed by this BB but it keeps things a lot
  		 simpler.  */
  	      bitmap_set_bit (bb_info->rd_kill, DF_REF_ID (def2));
- 
- 	      /* Zap from the set of gens for this BB.  */
- 	      bitmap_clear_bit (bb_info->rd_gen, DF_REF_ID (def2));
  	    }
  
  	  bitmap_set_bit (bb_info->rd_gen, DF_REF_ID (def));
  	}
      }
  }
  
  
--- 1663,1683 ----
  		 be killed by this BB but it keeps things a lot
  		 simpler.  */
  	      bitmap_set_bit (bb_info->rd_kill, DF_REF_ID (def2));
  	    }
  
  	  bitmap_set_bit (bb_info->rd_gen, DF_REF_ID (def));
+ 	  bitmap_set_bit (seen, regno);
+ 	}
+ 
+       if (GET_CODE (insn) == CALL_INSN && (df->flags & DF_HARD_REGS))
+ 	{
+ 	  bitmap_operation (bb_info->rd_kill, bb_info->rd_kill,
+ 			    call_killed_defs, BITMAP_IOR);
+ 	  call_seen = 1;
  	}
      }
+ 
+   BITMAP_XFREE (seen);
  }
  
  
*************** static void
*** 1679,1689 ****
  df_rd_local_compute (struct df *df, bitmap blocks)
  {
    basic_block bb;
  
    FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
    {
!     df_bb_rd_local_compute (df, bb);
    });
  }
  
  
--- 1686,1717 ----
  df_rd_local_compute (struct df *df, bitmap blocks)
  {
    basic_block bb;
+   bitmap killed_by_call = NULL;
+   unsigned regno;
+   struct df_link *def_link;
+ 
+   if (df->flags & DF_HARD_REGS)
+     {
+       killed_by_call = BITMAP_XMALLOC ();
+       for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ 	{
+ 	  if (!TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
+ 	    continue;
+ 	  
+ 	  for (def_link = df->regs[regno].defs;
+ 	       def_link;
+ 	       def_link = def_link->next)
+ 	    bitmap_set_bit (killed_by_call, DF_REF_ID (def_link->ref));
+ 	}
+     }
  
    FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
    {
!     df_bb_rd_local_compute (df, bb, killed_by_call);
    });
+ 
+   if (df->flags & DF_HARD_REGS)
+     BITMAP_XFREE (killed_by_call);
  }
  
  
*************** df_analyze_subcfg (struct df *df, bitmap
*** 2374,2379 ****
--- 2402,2409 ----
  	    }
  	}
      }
+   
+   df->flags = flags;
  
    df_reg_def_chain_clean (df);
    df_reg_use_chain_clean (df);
Index: loop-invariant.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/Attic/loop-invariant.c,v
retrieving revision 1.1.4.7
diff -c -3 -p -r1.1.4.7 loop-invariant.c
*** loop-invariant.c	15 Apr 2004 12:59:17 -0000	1.1.4.7
--- loop-invariant.c	30 Apr 2004 23:36:55 -0000
*************** check_maybe_invariant (rtx x)
*** 145,163 ****
        return false;
  
      case REG:
-       if (HARD_REGISTER_P (x))
- 	{
- 	  /* TODO -- handling of hard regs.  It should not be hard due to usage
- 	     of df.c, but don't forget to include patches from the rtlopt branch
- 	     to speed up handling of call clobbered registers.  */
- 	  return false;
- 	}
- 
        return true;
  
      case MEM:
        /* Load/store motion is done elsewhere.  ??? Perhaps also add it here?
  	 It should not be hard, and might be faster than "elsewhere".  */
        return false;
  
      case ASM_OPERANDS:
--- 145,161 ----
        return false;
  
      case REG:
        return true;
  
      case MEM:
        /* Load/store motion is done elsewhere.  ??? Perhaps also add it here?
  	 It should not be hard, and might be faster than "elsewhere".  */
+ 
+       /* Just handle the most trivial case where we load from an unchanging
+ 	 location (most importantly, pic tables).  */
+       if (RTX_UNCHANGING_P (x))
+ 	break;
+ 
        return false;
  
      case ASM_OPERANDS:
*************** find_defs (struct loop *loop, basic_bloc
*** 305,311 ****
    for (i = 0; i < loop->num_nodes; i++)
      bitmap_set_bit (blocks, body[i]->index);
  
!   df_analyze_subcfg (df, blocks, DF_UD_CHAIN);
    BITMAP_XFREE (blocks);
  }
  
--- 303,309 ----
    for (i = 0; i < loop->num_nodes; i++)
      bitmap_set_bit (blocks, body[i]->index);
  
!   df_analyze_subcfg (df, blocks, DF_UD_CHAIN | DF_HARD_REGS);
    BITMAP_XFREE (blocks);
  }
  
*************** find_invariant_insn (rtx insn, bool alwa
*** 441,447 ****
      return;
  
    if (!always_reached
!       && may_trap_p (insn))
      return;
  
    depends_on = BITMAP_XMALLOC ();
--- 439,445 ----
      return;
  
    if (!always_reached
!       && may_trap_p (PATTERN (insn)))
      return;
  
    depends_on = BITMAP_XMALLOC ();
Index: tree-chrec.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/Attic/tree-chrec.c,v
retrieving revision 1.1.2.19
diff -c -3 -p -r1.1.2.19 tree-chrec.c
*** tree-chrec.c	27 Apr 2004 14:10:13 -0000	1.1.2.19
--- tree-chrec.c	30 Apr 2004 23:36:55 -0000
*************** Software Foundation, 59 Temple Place - S
*** 37,43 ****
  #include "tree-chrec.h"
  #include "tree-pass.h"
  
- 
  /* Extended folder for chrecs.  */
  
  /* Determines whether CST is not a constant evolution.  */
--- 37,42 ----
*************** chrec_fold_plus_poly_poly (enum tree_cod
*** 185,192 ****
  	return build_polynomial_chrec 
  	  (CHREC_VARIABLE (poly1), 
  	   chrec_fold_minus (type, poly0, CHREC_LEFT (poly1)),
! 	   chrec_fold_multiply (integer_type_node, CHREC_RIGHT (poly1), 
! 				integer_minus_one_node));
      }
    
    if (CHREC_VARIABLE (poly0) > CHREC_VARIABLE (poly1))
--- 184,191 ----
  	return build_polynomial_chrec 
  	  (CHREC_VARIABLE (poly1), 
  	   chrec_fold_minus (type, poly0, CHREC_LEFT (poly1)),
! 	   chrec_fold_multiply (type, CHREC_RIGHT (poly1), 
! 				convert (type, integer_minus_one_node)));
      }
    
    if (CHREC_VARIABLE (poly0) > CHREC_VARIABLE (poly1))
*************** chrec_fold_multiply (tree type, 
*** 1012,1018 ****
  	  if (integer_onep (op1))
  	    return op0;
  	  if (integer_zerop (op1))
! 	    return integer_zero_node;
  	  
  	  return build_polynomial_chrec 
  	    (CHREC_VARIABLE (op0), 
--- 1011,1017 ----
  	  if (integer_onep (op1))
  	    return op0;
  	  if (integer_zerop (op1))
! 	    return convert (type, integer_zero_node);
  	  
  	  return build_polynomial_chrec 
  	    (CHREC_VARIABLE (op0), 
*************** chrec_fold_multiply (tree type, 
*** 1033,1039 ****
  	  if (integer_onep (op1))
  	    return op0;
  	  if (integer_zerop (op1))
! 	    return integer_zero_node;
  	  
  	  return build_peeled_chrec 
  	    (CHREC_VARIABLE (op0),
--- 1032,1038 ----
  	  if (integer_onep (op1))
  	    return op0;
  	  if (integer_zerop (op1))
! 	    return convert (type, integer_zero_node);
  	  
  	  return build_peeled_chrec 
  	    (CHREC_VARIABLE (op0),
*************** chrec_fold_multiply (tree type, 
*** 1054,1060 ****
  	  if (integer_onep (op1))
  	    return op0;
  	  if (integer_zerop (op1))
! 	    return integer_zero_node;
  	  
  	  return build_exponential_chrec 
  	    (CHREC_VARIABLE (op0),
--- 1053,1059 ----
  	  if (integer_onep (op1))
  	    return op0;
  	  if (integer_zerop (op1))
! 	    return convert (type, integer_zero_node);
  	  
  	  return build_exponential_chrec 
  	    (CHREC_VARIABLE (op0),
*************** chrec_fold_multiply (tree type, 
*** 1090,1096 ****
  	  if (integer_onep (op1))
  	    return op0;
  	  if (integer_zerop (op1))
! 	    return integer_zero_node;
  	  return chrec_fold_multiply_ival_cst (type, op0, op1);
  	}
        
--- 1089,1095 ----
  	  if (integer_onep (op1))
  	    return op0;
  	  if (integer_zerop (op1))
! 	    return convert (type, integer_zero_node);
  	  return chrec_fold_multiply_ival_cst (type, op0, op1);
  	}
        
*************** chrec_fold_multiply (tree type, 
*** 1099,1105 ****
  	return op1;
        
        if (integer_zerop (op0))
! 	return integer_zero_node;
        
        switch (TREE_CODE (op1))
  	{
--- 1098,1104 ----
  	return op1;
        
        if (integer_zerop (op0))
! 	return convert (type, integer_zero_node);
        
        switch (TREE_CODE (op1))
  	{
*************** chrec_fold_multiply (tree type, 
*** 1128,1134 ****
  	  if (integer_onep (op1))
  	    return op0;
  	  if (integer_zerop (op1))
! 	    return integer_zero_node;
  	  return tree_fold_multiply (type, op0, op1);
  	}
      }
--- 1127,1133 ----
  	  if (integer_onep (op1))
  	    return op0;
  	  if (integer_zerop (op1))
! 	    return convert (type, integer_zero_node);
  	  return tree_fold_multiply (type, op0, op1);
  	}
      }
*************** tree
*** 1583,1588 ****
--- 1582,1589 ----
  chrec_merge (tree chrec1, 
  	     tree chrec2)
  {
+   tree type = chrec_type (chrec1);
+ 
    if (chrec1 == chrec_top
        || chrec2 == chrec_top)
      return chrec_top;
*************** chrec_merge (tree chrec1, 
*** 1613,1625 ****
  	  return build_polynomial_chrec 
  	    (CHREC_VARIABLE (chrec2),
  	     chrec_merge (chrec1, CHREC_LEFT (chrec2)),
! 	     chrec_merge (integer_zero_node, CHREC_RIGHT (chrec2)));
  	  
  	case EXPONENTIAL_CHREC:
  	  return build_exponential_chrec 
  	    (CHREC_VARIABLE (chrec2),
  	     chrec_merge (chrec1, CHREC_LEFT (chrec2)),
! 	     chrec_merge (integer_one_node, CHREC_RIGHT (chrec2)));
  
  	default:
  	  return chrec_top;
--- 1614,1628 ----
  	  return build_polynomial_chrec 
  	    (CHREC_VARIABLE (chrec2),
  	     chrec_merge (chrec1, CHREC_LEFT (chrec2)),
! 	     chrec_merge (convert (type, integer_zero_node),
! 			  CHREC_RIGHT (chrec2)));
  	  
  	case EXPONENTIAL_CHREC:
  	  return build_exponential_chrec 
  	    (CHREC_VARIABLE (chrec2),
  	     chrec_merge (chrec1, CHREC_LEFT (chrec2)),
! 	     chrec_merge (convert (type, integer_one_node),
! 			  CHREC_RIGHT (chrec2)));
  
  	default:
  	  return chrec_top;
*************** chrec_merge (tree chrec1, 
*** 1633,1639 ****
  	  return build_polynomial_chrec 
  	    (CHREC_VARIABLE (chrec1),
  	     chrec_merge (CHREC_LEFT (chrec1), chrec2),
! 	     chrec_merge (CHREC_RIGHT (chrec1), integer_zero_node));
  	  
  	case POLYNOMIAL_CHREC:
  	  if (CHREC_VARIABLE (chrec1) == CHREC_VARIABLE (chrec2))
--- 1636,1643 ----
  	  return build_polynomial_chrec 
  	    (CHREC_VARIABLE (chrec1),
  	     chrec_merge (CHREC_LEFT (chrec1), chrec2),
! 	     chrec_merge (CHREC_RIGHT (chrec1),
! 			  convert (type, integer_zero_node)));
  	  
  	case POLYNOMIAL_CHREC:
  	  if (CHREC_VARIABLE (chrec1) == CHREC_VARIABLE (chrec2))
*************** chrec_merge (tree chrec1, 
*** 1645,1656 ****
  	    return build_polynomial_chrec 
  	      (CHREC_VARIABLE (chrec2),
  	       chrec_merge (chrec1, CHREC_LEFT (chrec2)),
! 	       chrec_merge (integer_zero_node, CHREC_RIGHT (chrec2)));
  	  else
  	    return build_polynomial_chrec 
  	      (CHREC_VARIABLE (chrec1),
  	       chrec_merge (CHREC_LEFT (chrec1), chrec2),
! 	       chrec_merge (CHREC_RIGHT (chrec1), integer_zero_node));
  	  
  	case EXPONENTIAL_CHREC:
  	  return chrec_top;
--- 1649,1662 ----
  	    return build_polynomial_chrec 
  	      (CHREC_VARIABLE (chrec2),
  	       chrec_merge (chrec1, CHREC_LEFT (chrec2)),
! 	       chrec_merge (convert (type, integer_zero_node),
! 			    CHREC_RIGHT (chrec2)));
  	  else
  	    return build_polynomial_chrec 
  	      (CHREC_VARIABLE (chrec1),
  	       chrec_merge (CHREC_LEFT (chrec1), chrec2),
! 	       chrec_merge (CHREC_RIGHT (chrec1),
! 			    convert (type, integer_zero_node)));
  	  
  	case EXPONENTIAL_CHREC:
  	  return chrec_top;
*************** chrec_convert (tree type, 
*** 1965,1971 ****
      return chrec;
  
    if (TYPE_PRECISION (ct) < TYPE_PRECISION (type))
!     return convert (type, chrec);
  
    switch (TREE_CODE (chrec))
      {
--- 1971,1977 ----
      return chrec;
  
    if (TYPE_PRECISION (ct) < TYPE_PRECISION (type))
!     return count_ev_in_wider_type (type, chrec);
  
    switch (TREE_CODE (chrec))
      {
Index: tree-chrec.h
===================================================================
RCS file: /cvs/gcc/gcc/gcc/Attic/tree-chrec.h,v
retrieving revision 1.1.2.15
diff -c -3 -p -r1.1.2.15 tree-chrec.h
*** tree-chrec.h	27 Apr 2004 14:10:14 -0000	1.1.2.15
--- tree-chrec.h	30 Apr 2004 23:36:55 -0000
*************** extern tree chrec_fold_plus (tree, tree,
*** 72,77 ****
--- 72,78 ----
  extern tree chrec_fold_minus (tree, tree, tree);
  extern tree chrec_fold_multiply (tree, tree, tree);
  extern tree chrec_convert (tree, tree);
+ extern tree count_ev_in_wider_type (tree, tree);
  extern tree chrec_type (tree);
  
  /* Operations.  */
Index: tree-flow.h
===================================================================
RCS file: /cvs/gcc/gcc/gcc/Attic/tree-flow.h,v
retrieving revision 1.1.4.177.2.25
diff -c -3 -p -r1.1.4.177.2.25 tree-flow.h
*** tree-flow.h	25 Apr 2004 20:33:42 -0000	1.1.4.177.2.25
--- tree-flow.h	30 Apr 2004 23:36:55 -0000
*************** void rewrite_into_loop_closed_ssa (void)
*** 636,641 ****
--- 636,660 ----
  void verify_loop_closed_ssa (void);
  void compute_phi_arg_on_exit (edge, tree, tree);
  
+ /* Description of number of iterations of a loop.  */
+ struct tree_niter_desc
+ {
+   tree assumptions;	/* Assumptions for the number of iterations be valid.  */
+   tree may_be_zero;	/* Condition under that the loop exits in the first
+ 			   iteration.  */
+   tree niter;		/* Number of iterations.  */
+ };
+ 
+ void number_of_iterations_cond (tree, tree, tree, enum tree_code, tree, tree,
+ 				struct tree_niter_desc *);
+ bool number_of_iterations_exit (struct loop *, edge,
+ 				struct tree_niter_desc *niter);
+ tree loop_niter_by_eval (struct loop *, edge);
+ tree find_loop_niter_by_eval (struct loop *, edge *);
+ void estimate_numbers_of_iterations (struct loops *);
+ tree can_count_iv_in_wider_type (struct loop *, tree, tree, tree, tree);
+ void free_numbers_of_iterations_estimates (struct loops *);
+ 
  /* In tree-flow-inline.h  */
  static inline int phi_arg_from_edge (tree, edge);
  static inline struct phi_arg_d *phi_element_for_edge (tree, edge);
Index: tree-scalar-evolution.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/Attic/tree-scalar-evolution.c,v
retrieving revision 1.1.2.43
diff -c -3 -p -r1.1.2.43 tree-scalar-evolution.c
*** tree-scalar-evolution.c	27 Apr 2004 14:10:14 -0000	1.1.2.43
--- tree-scalar-evolution.c	30 Apr 2004 23:36:55 -0000
*************** find_var_scev_info (tree var)
*** 365,370 ****
--- 365,397 ----
    return &res->chrec;
  }
  
+ /* Tries to express CHREC in wider type TYPE.  */
+ 
+ tree
+ count_ev_in_wider_type (tree type, tree chrec)
+ {
+   tree base, step;
+   struct loop *loop;
+ 
+   if (!evolution_function_is_affine_p (chrec))
+     return convert (type, chrec);
+ 
+   base = CHREC_LEFT (chrec);
+   step = CHREC_RIGHT (chrec);
+   loop = loop_from_num (current_loops, CHREC_VARIABLE (chrec));
+ 
+   /* TODO -- if we knew the statement at that the conversion occurs,
+      we could pass it to can_count_iv_in_wider_type and get a better
+      result.  */
+   step = can_count_iv_in_wider_type (loop, type, base, step, NULL_TREE);
+   if (!step)
+     return convert (type, chrec);
+   base = chrec_convert (type, base);
+ 
+   return build_polynomial_chrec (CHREC_VARIABLE (chrec),
+ 				 base, step);
+ }
+ 
  /* Determines whether the chrec contains symbolic names defined in
     LOOP_NB.  */
  
*************** number_of_iterations_in_loop (struct loo
*** 3006,3011 ****
--- 3033,3041 ----
    
    test = TREE_OPERAND (cond, 0);
    exit = loop_exit_edge (loop, 0);
+   if (!dominated_by_p (CDI_DOMINATORS, loop->latch, exit->src))
+     return set_nb_iterations_in_loop (loop, chrec_top);
+ 
    if (exit->flags & EDGE_TRUE_VALUE)
      test = invert_truthvalue (test);
  
*************** scev_initialize (struct loops *loops)
*** 3385,3390 ****
--- 3415,3437 ----
        flow_loop_scan (loops->parray[i], LOOP_EXIT_EDGES);
  }
  
+ /* Cleans up the information cached by the scalar evolutions analysis.  */
+ 
+ void
+ scev_reset (void)
+ {
+   unsigned i;
+   struct loop *loop;
+ 
+   htab_empty (scalar_evolution_info);
+   for (i = 1; i < current_loops->num; i++)
+     {
+       loop = current_loops->parray[i];
+       if (loop)
+ 	loop->nb_iterations = NULL_TREE;
+     }
+ }
+ 
  /* Initialize the analysis of scalar evolutions.  */
  
  static void
*************** scev_init (void)
*** 3394,3399 ****
--- 3441,3486 ----
    if (!current_loops)
      return;
    scev_initialize (current_loops);
+ }
+ 
+ /* Checks whether OP behaves as a simple affine iv of LOOP in STMT and returns
+    its BASE and STEP if possible.  */
+ 
+ bool
+ simple_iv (struct loop *loop, tree stmt, tree op, tree *base, tree *step)
+ {
+   basic_block bb = bb_for_stmt (stmt);
+   tree type, ev;
+ 
+   *base = NULL_TREE;
+   *step = NULL_TREE;
+ 
+   type = TREE_TYPE (op);
+   if (TREE_CODE (type) != INTEGER_TYPE
+       && TREE_CODE (type) != POINTER_TYPE)
+     return false;
+ 
+   ev = analyze_scalar_evolution_in_loop (loop, bb->loop_father, op);
+   if (tree_does_not_contain_chrecs (ev)
+       && !chrec_contains_symbols_defined_in_loop (ev, loop->num))
+     {
+       *base = ev;
+       return true;
+     }
+ 
+   if (TREE_CODE (ev) != POLYNOMIAL_CHREC
+       || CHREC_VARIABLE (ev) != (unsigned) loop->num)
+     return false;
+ 
+   *step = CHREC_RIGHT (ev);
+   if (TREE_CODE (*step) != INTEGER_CST)
+     return false;
+   *base = CHREC_LEFT (ev);
+   if (tree_contains_chrecs (*base)
+       || chrec_contains_symbols_defined_in_loop (*base, loop->num))
+     return false;
+ 
+   return true;
  }
  
  /* Runs the analysis of scalar evolutions.  */
Index: tree-scalar-evolution.h
===================================================================
RCS file: /cvs/gcc/gcc/gcc/Attic/tree-scalar-evolution.h,v
retrieving revision 1.1.2.10
diff -c -3 -p -r1.1.2.10 tree-scalar-evolution.h
*** tree-scalar-evolution.h	15 Apr 2004 01:01:47 -0000	1.1.2.10
--- tree-scalar-evolution.h	30 Apr 2004 23:36:55 -0000
*************** extern tree number_of_iterations_in_loop
*** 28,33 ****
--- 28,34 ----
  extern tree get_loop_exit_condition (struct loop *);
  
  extern void scev_initialize (struct loops *loops);
+ extern void scev_reset (void);
  extern void scev_finalize (void);
  extern tree analyze_scalar_evolution (struct loop *, tree);
  extern tree analyze_scalar_evolution_in_loop (struct loop *, struct loop *,
*************** extern tree instantiate_parameters (stru
*** 36,40 ****
--- 37,42 ----
  extern void eliminate_redundant_checks (void);
  extern void gather_stats_on_scev_database (void);
  
+ bool simple_iv (struct loop *, tree, tree, tree *, tree *);
  
  #endif  /* GCC_TREE_SCALAR_EVOLUTION_H  */
Index: tree-ssa-loop-ivcanon.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/Attic/tree-ssa-loop-ivcanon.c,v
retrieving revision 1.1.2.7
diff -c -3 -p -r1.1.2.7 tree-ssa-loop-ivcanon.c
*** tree-ssa-loop-ivcanon.c	29 Mar 2004 17:52:31 -0000	1.1.2.7
--- tree-ssa-loop-ivcanon.c	30 Apr 2004 23:36:55 -0000
*************** Software Foundation, 59 Temple Place - S
*** 56,286 ****
  #include "flags.h"
  #include "tree-inline.h"
  
- /* Bound on the number of iterations we try to evaluate.  */
- 
- #define MAX_ITERATIONS_TO_TRACK 1000
- 
- /* Determines a loop phi node of LOOP such that X is derived from it
-    by a chain of operations with constants.  */
- 
- static tree
- chain_of_csts_start (struct loop *loop, tree x)
- {
-   tree stmt = SSA_NAME_DEF_STMT (x);
-   basic_block bb = bb_for_stmt (stmt);
-   use_optype uses;
- 
-   if (!bb
-       || !flow_bb_inside_loop_p (loop, bb))
-     return NULL_TREE;
-   
-   if (TREE_CODE (stmt) == PHI_NODE)
-     {
-       if (bb == loop->header)
- 	return stmt;
- 
-       return NULL_TREE;
-     }
- 
-   if (TREE_CODE (stmt) != MODIFY_EXPR)
-     return NULL_TREE;
- 
-   get_stmt_operands (stmt);
-   if (NUM_VUSES (STMT_VUSE_OPS (stmt)) > 0)
-     return NULL_TREE;
-   if (NUM_VDEFS (STMT_VDEF_OPS (stmt)) > 0)
-     return NULL_TREE;
-   if (NUM_DEFS (STMT_DEF_OPS (stmt)) > 1)
-     return NULL_TREE;
-   uses = STMT_USE_OPS (stmt);
-   if (NUM_USES (uses) != 1)
-     return NULL_TREE;
- 
-   return chain_of_csts_start (loop, USE_OP (uses, 0));
- }
- 
- /* Determines whether X is derived from a value of a phi node in LOOP
-    such that
- 
-    * this derivation consists only from operations with constants
-    * the initial value of the phi node is constant
-    * its value in the next iteration can be derived from the current one
-      by a chain of operations with constants.  */
- 
- static tree
- get_base_for (struct loop *loop, tree x)
- {
-   tree phi, init, next;
- 
-   if (is_gimple_min_invariant (x))
-     return x;
- 
-   phi = chain_of_csts_start (loop, x);
-   if (!phi)
-     return NULL_TREE;
- 
-   init = phi_element_for_edge (phi, loop_preheader_edge (loop))->def;
-   next = phi_element_for_edge (phi, loop_latch_edge (loop))->def;
- 
-   if (TREE_CODE (next) != SSA_NAME)
-     return NULL_TREE;
- 
-   if (!is_gimple_min_invariant (init))
-     return NULL_TREE;
- 
-   if (chain_of_csts_start (loop, next) != phi)
-     return NULL_TREE;
- 
-   return phi;
- }
- 
- /* Evaluates value of X, provided that the value of the variable defined
-    in the loop phi node from that X is derived by operations with constants
-    is BASE.  */
- 
- static tree
- get_val_for (tree x, tree base)
- {
-   tree stmt, *op, nx, val;
-   use_optype uses;
- 
-   if (!x)
-     return base;
- 
-   stmt = SSA_NAME_DEF_STMT (x);
-   if (TREE_CODE (stmt) == PHI_NODE)
-     return base;
- 
-   uses = STMT_USE_OPS (stmt);
-   op = USE_OP_PTR (uses, 0);
- 
-   nx = *op;
-   val = get_val_for (nx, base);
-   *op = val;
-   val = fold (TREE_OPERAND (stmt, 1));
-   *op = nx;
- 
-   return val;
- }
- 
- /* Tries to count the number of iterations of LOOP till it exits by EXIT
-    by brute force.  */
- 
- static tree
- loop_niter_by_eval (struct loop *loop, edge exit)
- {
-   tree cond, cnd, acnd;
-   tree op[2], val[2], next[2], aval[2], phi[2];
-   unsigned i, j;
-   enum tree_code cmp;
- 
-   cond = last_stmt (exit->src);
-   if (!cond || TREE_CODE (cond) != COND_EXPR)
-     return chrec_top;
- 
-   cnd = COND_EXPR_COND (cond);
-   if (exit->flags & EDGE_TRUE_VALUE)
-     cnd = invert_truthvalue (cnd);
- 
-   cmp = TREE_CODE (cnd);
-   switch (cmp)
-     {
-     case EQ_EXPR:
-     case NE_EXPR:
-     case GT_EXPR:
-     case GE_EXPR:
-     case LT_EXPR:
-     case LE_EXPR:
-       for (j = 0; j < 2; j++)
- 	op[j] = TREE_OPERAND (cnd, j);
-       break;
- 
-     default:
-       return chrec_top;
-     }
- 
-   for (j = 0; j < 2; j++)
-     {
-       phi[j] = get_base_for (loop, op[j]);
-       if (!phi[j])
- 	return chrec_top;
-     }
- 
-   for (j = 0; j < 2; j++)
-     {
-       if (TREE_CODE (phi[j]) == PHI_NODE)
- 	{
- 	  val[j] = phi_element_for_edge (phi[j],
- 					 loop_preheader_edge (loop))->def;
- 	  next[j] = phi_element_for_edge (phi[j],
- 					  loop_latch_edge (loop))->def;
- 	}
-       else
- 	{
- 	  val[j] = phi[j];
- 	  next[j] = NULL_TREE;
- 	  op[j] = NULL_TREE;
- 	}
-     }
- 
-   for (i = 0; i < MAX_ITERATIONS_TO_TRACK; i++)
-     {
-       for (j = 0; j < 2; j++)
- 	aval[j] = get_val_for (op[j], val[j]);
- 
-       acnd = fold (build (cmp, boolean_type_node, aval[0], aval[1]));
-       if (integer_zerop (acnd))
- 	{
- 	  if (dump_file && (dump_flags & TDF_DETAILS))
- 	    fprintf (dump_file,
- 		     "Proved that loop %d iterates %d times using brute force.\n",
- 		     loop->num, i);
- 	  return build_int_2 (i, 0);
- 	}
- 
-       for (j = 0; j < 2; j++)
- 	val[j] = get_val_for (next[j], val[j]);
-     }
- 
-   return chrec_top;
- }
- 
- /* Finds the exit of the LOOP by that the loop exits after a constant
-    number of iterations and stores it to *EXIT.  The iteration count
-    is returned.  */
- 
- static tree
- find_loop_niter_by_eval (struct loop *loop, edge *exit)
- {
-   unsigned n_exits, i;
-   edge *exits = get_loop_exit_edges (loop, &n_exits);
-   edge ex;
-   tree niter = NULL_TREE, aniter;
- 
-   *exit = NULL;
-   for (i = 0; i < n_exits; i++)
-     {
-       ex = exits[i];
-       if (!just_once_each_iteration_p (loop, ex->src))
- 	continue;
- 
-       aniter = loop_niter_by_eval (loop, ex);
-       if (TREE_CODE (aniter) != INTEGER_CST)
- 	continue;
- 
-       if (niter
- 	  && !integer_nonzerop (fold (build (LT_EXPR, boolean_type_node,
- 					     aniter, niter))))
- 	continue;
- 
-       niter = aniter;
-       *exit = ex;
-     }
-   free (exits);
- 
-   return niter ? niter : chrec_top;
- }
- 
  /* Adds a canonical induction variable to LOOP iterating NITER times.  EXIT
     is the exit edge whose condition is replaced.  */
  
--- 56,61 ----
Index: tree-ssa-loop-ivopts.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/Attic/tree-ssa-loop-ivopts.c,v
retrieving revision 1.1.2.31
diff -c -3 -p -r1.1.2.31 tree-ssa-loop-ivopts.c
*** tree-ssa-loop-ivopts.c	27 Apr 2004 14:10:14 -0000	1.1.2.31
--- tree-ssa-loop-ivopts.c	30 Apr 2004 23:36:56 -0000
*************** struct version_info
*** 121,135 ****
    bool preserve_biv;	/* For the original biv, whether to preserve it.  */
  };
  
- /* Description of number of iterations of a loop.  */
- struct tree_niter_desc
- {
-   tree assumptions;	/* Assumptions for the number of iterations be valid.  */
-   tree may_be_zero;	/* Condition under that the loop exits in the first
- 			   iteration.  */
-   tree niter;		/* Number of iterations.  */
- };
- 
  /* Information attached to loop.  */
  struct loop_data
  {
--- 121,126 ----
*************** static unsigned spill_cost;	/* The cost 
*** 252,260 ****
  
  static varray_type decl_rtl_to_reset;
  
! #define SWAP(X, Y) do { void *tmp = (X); (X) = (Y); (Y) = tmp; } while (0)
! 
! static tree force_gimple_operand (tree, tree *, bool);
  
  /* Number of uses recorded in DATA.  */
  
--- 243,249 ----
  
  static varray_type decl_rtl_to_reset;
  
! static tree force_gimple_operand (tree, tree *, bool, tree);
  
  /* Number of uses recorded in DATA.  */
  
*************** idx_force_simple (tree base ATTRIBUTE_UN
*** 606,612 ****
    struct idx_fs_data *d = data;
    tree stmts;
  
!   *idx = force_gimple_operand (*idx, &stmts, true);
  
    if (stmts)
      {
--- 595,601 ----
    struct idx_fs_data *d = data;
    tree stmts;
  
!   *idx = force_gimple_operand (*idx, &stmts, true, NULL_TREE);
  
    if (stmts)
      {
*************** update_addressable_flag (tree expr)
*** 640,649 ****
  
  /* Expands EXPR to list of gimple statements STMTS, forcing it to become
     a gimple operand that is returned.  If SIMPLE is true, force the operand
!    to be either ssa_name or integer constant.  */
  
  static tree
! force_gimple_operand (tree expr, tree *stmts, bool simple)
  {
    enum tree_code code;
    char class;
--- 629,639 ----
  
  /* Expands EXPR to list of gimple statements STMTS, forcing it to become
     a gimple operand that is returned.  If SIMPLE is true, force the operand
!    to be either ssa_name or integer constant.  If VAR is not NULL, make the
!    base variable of the final destination be VAR if possible.  */
  
  static tree
! force_gimple_operand (tree expr, tree *stmts, bool simple, tree var)
  {
    enum tree_code code;
    char class;
*************** force_gimple_operand (tree expr, tree *s
*** 676,695 ****
      {
        op0 = TREE_OPERAND (expr, 0);
        if (TREE_CODE (op0) == INDIRECT_REF)
! 	return force_gimple_operand (TREE_OPERAND (op0, 0), stmts, simple);
      }
  
!   atmp = create_tmp_var (TREE_TYPE (expr), "fgotmp");
!   add_referenced_tmp_var (atmp);
  
    switch (class)
      {
      case '1':
      case '2':
!       op0 = force_gimple_operand (TREE_OPERAND (expr, 0), &stmts0, false);
        if (class == '2')
  	{
! 	  op1 = force_gimple_operand (TREE_OPERAND (expr, 1), &stmts1, false);
  	  rhs = build (code, TREE_TYPE (expr), op0, op1);
  	}
        else
--- 666,693 ----
      {
        op0 = TREE_OPERAND (expr, 0);
        if (TREE_CODE (op0) == INDIRECT_REF)
! 	return force_gimple_operand (TREE_OPERAND (op0, 0), stmts, simple,
! 				     NULL_TREE);
      }
  
!   if (var)
!     atmp = var;
!   else
!     {
!       atmp = create_tmp_var (TREE_TYPE (expr), "fgotmp");
!       add_referenced_tmp_var (atmp);
!     }
  
    switch (class)
      {
      case '1':
      case '2':
!       op0 = force_gimple_operand (TREE_OPERAND (expr, 0), &stmts0, false,
! 				  NULL_TREE);
        if (class == '2')
  	{
! 	  op1 = force_gimple_operand (TREE_OPERAND (expr, 1), &stmts1, false,
! 				      NULL_TREE);
  	  rhs = build (code, TREE_TYPE (expr), op0, op1);
  	}
        else
*************** tree_ssa_iv_optimize_init (struct loops 
*** 938,943 ****
--- 936,943 ----
    VARRAY_GENERIC_PTR_NOGC_INIT (decl_rtl_to_reset, 20, "decl_rtl_to_reset");
  
    scev_initialize (loops);
+   estimate_numbers_of_iterations (loops);
+   scev_reset ();
  }
  
  /* Allocates an induction variable with given initial value BASE and step STEP
*************** mark_bivs (struct ivopts_data *data)
*** 1151,1183 ****
      }
  }
  
- /* Finds definition of VAR and fills in BASE and STEP accordingly.  */
- 
- static bool
- get_var_def (struct ivopts_data *data, tree var, tree *base, tree *step)
- {
-   struct iv *iv;
-   
-   if (is_gimple_min_invariant (var))
-     {
-       *base = var;
-       *step = NULL_TREE;
-       return true;
-     }
- 
-   if (TREE_CODE (var) != SSA_NAME)
-     return false;
- 
-   iv = get_iv (data, var);
-   if (!iv)
-     return false;
- 
-   *base = iv->base;
-   *step = iv->step;
- 
-   return true;
- }
- 
  /* Checks whether STMT defines a linear induction variable and stores its
     parameters to BASE and STEP.  */
  
--- 1151,1156 ----
*************** static bool
*** 1185,1193 ****
  find_givs_in_stmt_scev (struct ivopts_data *data, tree stmt,
  			tree *base, tree *step)
  {
!   tree lhs, type, ev;
    struct loop *loop = data->current_loop;
-   basic_block bb = bb_for_stmt (stmt);
  
    *base = NULL_TREE;
    *step = NULL_TREE;
--- 1158,1165 ----
  find_givs_in_stmt_scev (struct ivopts_data *data, tree stmt,
  			tree *base, tree *step)
  {
!   tree lhs;
    struct loop *loop = data->current_loop;
  
    *base = NULL_TREE;
    *step = NULL_TREE;
*************** find_givs_in_stmt_scev (struct ivopts_da
*** 1199,1227 ****
    if (TREE_CODE (lhs) != SSA_NAME)
      return false;
  
!   type = TREE_TYPE (lhs);
!   if (TREE_CODE (type) != INTEGER_TYPE
!       && TREE_CODE (type) != POINTER_TYPE)
!     return false;
! 
!   ev = analyze_scalar_evolution_in_loop (loop, bb->loop_father, lhs);
!   if (tree_does_not_contain_chrecs (ev)
!       && !chrec_contains_symbols (ev))
!     {
!       *base = ev;
!       return true;
!     }
! 
!   if (TREE_CODE (ev) != POLYNOMIAL_CHREC
!       || CHREC_VARIABLE (ev) != (unsigned) loop->num)
!     return false;
! 
!   *step = CHREC_RIGHT (ev);
!   if (TREE_CODE (*step) != INTEGER_CST)
!     return false;
!   *base = CHREC_LEFT (ev);
!   if (tree_contains_chrecs (*base)
!       || chrec_contains_symbols (*base))
      return false;
  
    if (contains_abnormal_ssa_name_p (*base))
--- 1171,1177 ----
    if (TREE_CODE (lhs) != SSA_NAME)
      return false;
  
!   if (!simple_iv (loop, stmt, TREE_OPERAND (stmt, 1), base, step))
      return false;
  
    if (contains_abnormal_ssa_name_p (*base))
*************** find_givs (struct ivopts_data *data)
*** 1268,1653 ****
    free (body);
  }
  
- /* Computes inverse of X modulo 2^s, where MASK = 2^s-1.  */
- 
- static tree
- inverse (tree x, tree mask)
- {
-   tree type = TREE_TYPE (x);
-   tree ctr = EXEC_BINARY (RSHIFT_EXPR, type, mask, integer_one_node);
-   tree rslt = convert (type, integer_one_node);
- 
-   while (integer_nonzerop (ctr))
-     {
-       rslt = EXEC_BINARY (MULT_EXPR, type, rslt, x);
-       rslt = EXEC_BINARY (BIT_AND_EXPR, type, rslt, mask);
-       x = EXEC_BINARY (MULT_EXPR, type, x, x);
-       x = EXEC_BINARY (BIT_AND_EXPR, type, x, mask);
-       ctr = EXEC_BINARY (RSHIFT_EXPR, type, ctr, integer_one_node);
-     }
- 
-   return rslt;
- }
- 
- /* Determine the number of iterations according to condition (for staying
-    inside loop) BASE0 + STEP0 * i (CODE) BASE1 + STEP1 * i, computed in TYPE.
-    Store the results to NITER.  */
- 
- static void
- number_of_iterations_cond (tree type, tree base0, tree step0,
- 			   enum tree_code code, tree base1, tree step1,
- 			   struct tree_niter_desc *niter)
- {
-   tree step, delta, mmin, mmax;
-   tree may_xform, bound, s, d, tmp;
-   bool was_sharp = false;
-   tree assumption;
-   tree assumptions = boolean_true_node;
-   tree noloop_assumptions = boolean_false_node;
-   tree unsigned_step_type;
- 
-   /* The meaning of these assumptions is this:
-      if !assumptions
-        then the rest of information does not have to be valid
-      if noloop_assumptions then the loop does not have to roll
-        (but it is only conservative approximation, i.e. it only says that
-        if !noloop_assumptions, then the loop does not end before the computed
-        number of iterations)  */
- 
-   /* Make < comparison from > ones.  */
-   if (code == GE_EXPR
-       || code == GT_EXPR)
-     {
-       SWAP (base0, base1);
-       SWAP (step0, step1);
-       code = swap_tree_comparison (code);
-     }
- 
-   /* We can take care of the case of two induction variables chasing each other
-      if the test is NE. I have never seen a loop using it, but still it is
-      cool.  */
-   if (!zero_p (step0) && !zero_p (step1))
-     {
-       if (code != NE_EXPR)
- 	return;
- 
-       step0 = EXEC_BINARY (MINUS_EXPR, type, step0, step1);
-       step1 = NULL_TREE;
-     }
- 
-   /* If the result is a constant,  the loop is weird.  More precise handling
-      would be possible, but the situation is not common enough to waste time
-      on it.  */
-   if (zero_p (step0) && zero_p (step1))
-     return;
- 
-   /* Ignore loops of while (i-- < 10) type.  */
-   if (code != NE_EXPR)
-     {
-       if (step0 && !tree_expr_nonnegative_p (step0))
- 	return;
- 
-       if (!zero_p (step1) && tree_expr_nonnegative_p (step1))
- 	return;
-     }
- 
-   /* For pointers these are NULL.  We assume pointer arithmetics never
-      overflows.  */
-   mmin = TYPE_MIN_VALUE (type);
-   mmax = TYPE_MAX_VALUE (type);
- 
-   /* Some more condition normalization.  We must record some assumptions
-      due to overflows.  */
- 
-   if (code == LT_EXPR)
-     {
-       /* We want to take care only of <=; this is easy,
- 	 as in cases the overflow would make the transformation unsafe the loop
- 	 does not roll.  Seemingly it would make more sense to want to take
- 	 care of <, as NE is more simmilar to it, but the problem is that here
- 	 the transformation would be more difficult due to possibly infinite
- 	 loops.  */
-       if (zero_p (step0))
- 	{
- 	  if (mmax)
- 	    assumption = fold (build (EQ_EXPR, boolean_type_node, base0, mmax));
- 	  else
- 	    assumption = boolean_true_node;
- 	  if (integer_nonzerop (assumption))
- 	    goto zero_iter;
- 	  base0 = fold (build (PLUS_EXPR, type, base0,
- 			       convert (type, integer_one_node)));
- 	}
-       else
- 	{
- 	  if (mmin)
- 	    assumption = fold (build (EQ_EXPR, boolean_type_node, base1, mmin));
- 	  else
- 	    assumption = boolean_true_node;
- 	  if (integer_nonzerop (assumption))
- 	    goto zero_iter;
- 	  base1 = fold (build (MINUS_EXPR, type, base1,
- 			       convert (type, integer_one_node)));
- 	}
-       noloop_assumptions = assumption;
-       code = LE_EXPR;
- 
-       /* It will be useful to be able to tell the difference once more in
- 	 <= -> != reduction.  */
-       was_sharp = true;
-     }
- 
-   /* Take care of trivially infinite loops.  */
-   if (code != NE_EXPR)
-     {
-       if (zero_p (step0)
- 	  && mmin
- 	  && operand_equal_p (base0, mmin, 0))
- 	return;
-       if (zero_p (step1)
- 	  && mmax
- 	  && operand_equal_p (base1, mmax, 0))
- 	return;
-     }
- 
-   /* If we can we want to take care of NE conditions instead of size
-      comparisons, as they are much more friendly (most importantly
-      this takes care of special handling of loops with step 1).  We can
-      do it if we first check that upper bound is greater or equal to
-      lower bound, their difference is constant c modulo step and that
-      there is not an overflow.  */
-   if (code != NE_EXPR)
-     {
-       if (zero_p (step0))
- 	step = EXEC_UNARY (NEGATE_EXPR, type, step1);
-       else
- 	step = step0;
-       delta = build (MINUS_EXPR, type, base1, base0);
-       delta = fold (build (FLOOR_MOD_EXPR, type, delta, step));
-       may_xform = boolean_false_node;
- 
-       if (TREE_CODE (delta) == INTEGER_CST)
- 	{
- 	  tmp = EXEC_BINARY (MINUS_EXPR, type, step,
- 			     convert (type, integer_one_node));
- 	  if (was_sharp
- 	      && operand_equal_p (delta, tmp, 0))
- 	    {
- 	      /* A special case.  We have transformed condition of type
- 		 for (i = 0; i < 4; i += 4)
- 		 into
- 		 for (i = 0; i <= 3; i += 4)
- 		 obviously if the test for overflow during that transformation
- 		 passed, we cannot overflow here.  Most importantly any
- 		 loop with sharp end condition and step 1 falls into this
- 		 cathegory, so handling this case specially is definitely
- 		 worth the troubles.  */
- 	      may_xform = boolean_true_node;
- 	    }
- 	  else if (zero_p (step0))
- 	    {
- 	      if (!mmin)
- 		may_xform = boolean_true_node;
- 	      else
- 		{
- 		  bound = EXEC_BINARY (PLUS_EXPR, type, mmin, step);
- 		  bound = EXEC_BINARY (MINUS_EXPR, type, bound, delta);
- 		  may_xform = fold (build (LE_EXPR, boolean_type_node,
- 					   bound, base0));
- 		}
- 	    }
- 	  else
- 	    {
- 	      if (!mmax)
- 		may_xform = boolean_true_node;
- 	      else
- 		{
- 		  bound = EXEC_BINARY (MINUS_EXPR, type, mmax, step);
- 		  bound = EXEC_BINARY (PLUS_EXPR, type, bound, delta);
- 		  may_xform = fold (build (LE_EXPR, boolean_type_node,
- 					   base1, bound));
- 		}
- 	    }
- 	}
- 
-       if (!integer_zerop (may_xform))
- 	{
- 	  /* We perform the transformation always provided that it is not
- 	     completely senseless.  This is OK, as we would need this assumption
- 	     to determine the number of iterations anyway.  */
- 	  if (!integer_nonzerop (may_xform))
- 	    assumptions = may_xform;
- 
- 	  if (zero_p (step0))
- 	    {
- 	      base0 = build (PLUS_EXPR, type, base0, delta);
- 	      base0 = fold (build (MINUS_EXPR, type, base0, step));
- 	    }
- 	  else
- 	    {
- 	      base1 = build (MINUS_EXPR, type, base1, delta);
- 	      base1 = fold (build (PLUS_EXPR, type, base1, step));
- 	    }
- 
- 	  assumption = fold (build (GT_EXPR, boolean_type_node, base0, base1));
- 	  noloop_assumptions = fold (build (TRUTH_OR_EXPR, boolean_type_node,
- 					    noloop_assumptions, assumption));
- 	  code = NE_EXPR;
- 	}
-     }
- 
-   /* Count the number of iterations.  */
-   if (code == NE_EXPR)
-     {
-       /* Everything we do here is just arithmetics modulo size of mode.  This
- 	 makes us able to do more involved computations of number of iterations
- 	 than in other cases.  First transform the condition into shape
- 	 s * i <> c, with s positive.  */
-       base1 = fold (build (MINUS_EXPR, type, base1, base0));
-       base0 = NULL_TREE;
-       if (!zero_p (step1))
-   	step0 = EXEC_UNARY (NEGATE_EXPR, type, step1);
-       step1 = NULL_TREE;
-       if (!tree_expr_nonnegative_p (step0))
- 	{
- 	  step0 = EXEC_UNARY (NEGATE_EXPR, type, step0);
- 	  base1 = fold (build1 (NEGATE_EXPR, type, base1));
- 	}
- 
-       /* Let nsd (s, size of mode) = d.  If d does not divide c, the loop
- 	 is infinite.  Otherwise, the number of iterations is
- 	 (inverse(s/d) * (c/d)) mod (size of mode/d).  */
-       s = step0;
-       d = integer_one_node;
-       unsigned_step_type = make_unsigned_type (TYPE_PRECISION (type));
-       bound = convert (unsigned_step_type, build_int_2 (~0, ~0));
-       while (1)
- 	{
- 	  tmp = EXEC_BINARY (BIT_AND_EXPR, type, s, integer_one_node);
- 	  if (integer_nonzerop (tmp))
- 	    break;
- 	  
- 	  s = EXEC_BINARY (RSHIFT_EXPR, type, s, integer_one_node);
- 	  d = EXEC_BINARY (LSHIFT_EXPR, type, d, integer_one_node);
- 	  bound = EXEC_BINARY (RSHIFT_EXPR, type, bound, integer_one_node);
- 	}
- 
-       tmp = fold (build (EXACT_DIV_EXPR, type, base1, d));
-       tmp = fold (build (MULT_EXPR, type, tmp, inverse (s, bound)));
-       niter->niter = fold (build (BIT_AND_EXPR, type, tmp, bound));
-     }
-   else
-     {
-       if (zero_p (step1))
- 	/* Condition in shape a + s * i <= b
- 	   We must know that b + s does not overflow and a <= b + s and then we
- 	   can compute number of iterations as (b + s - a) / s.  (It might
- 	   seem that we in fact could be more clever about testing the b + s
- 	   overflow condition using some information about b - a mod s,
- 	   but it was already taken into account during LE -> NE transform).  */
- 	{
- 	  if (mmax)
- 	    {
- 	      bound = EXEC_BINARY (MINUS_EXPR, type, mmax, step0);
- 	      assumption = fold (build (LE_EXPR, boolean_type_node,
- 					base1, bound));
- 	      assumptions = fold (build (TRUTH_AND_EXPR, boolean_type_node,
- 					 assumptions, assumption));
- 	    }
- 	  step = step0;
- 	  tmp = fold (build (PLUS_EXPR, type, base1, step0));
- 	  assumption = fold (build (GT_EXPR, boolean_type_node, base0, tmp));
- 	  delta = fold (build (PLUS_EXPR, type, base1, step));
- 	  delta = fold (build (MINUS_EXPR, type, delta, base0));
- 	}
-       else
- 	{
- 	  /* Condition in shape a <= b - s * i
- 	     We must know that a - s does not overflow and a - s <= b and then
- 	     we can again compute number of iterations as (b - (a - s)) / s.  */
- 	  if (mmin)
- 	    {
- 	      bound = EXEC_BINARY (MINUS_EXPR, type, mmin, step1);
- 	      assumption = fold (build (LE_EXPR, boolean_type_node,
- 					bound, base0));
- 	      assumptions = fold (build (TRUTH_AND_EXPR, boolean_type_node,
- 					 assumptions, assumption));
- 	    }
- 	  step = fold (build1 (NEGATE_EXPR, type, step1));
- 	  tmp = fold (build (PLUS_EXPR, type, base0, step1));
- 	  assumption = fold (build (GT_EXPR, boolean_type_node, tmp, base1));
- 	  delta = fold (build (MINUS_EXPR, type, base0, step));
- 	  delta = fold (build (MINUS_EXPR, type, base1, delta));
- 	}
-       noloop_assumptions = fold (build (TRUTH_OR_EXPR, boolean_type_node,
- 					noloop_assumptions, assumption));
-       delta = fold (build (FLOOR_DIV_EXPR, type, delta, step));
-       niter->niter = delta;
-     }
- 
-   niter->assumptions = assumptions;
-   niter->may_be_zero = noloop_assumptions;
-   return;
- 
- zero_iter:
-   niter->assumptions = boolean_true_node;
-   niter->may_be_zero = boolean_true_node;
-   niter->niter = convert (type, integer_zero_node);
-   return;
- }
- 
  /* Determine the number of iterations of the current loop.  */
  
  static void
  determine_number_of_iterations (struct ivopts_data *data)
  {
-   tree stmt, cond, type;
-   tree op0, base0, step0;
-   tree op1, base1, step1;
-   enum tree_code code;
    struct loop *loop = data->current_loop;
  
!   if (!loop_data (loop)->single_exit)
!     return;
! 
!   stmt = last_stmt (loop_data (loop)->single_exit->src);
!   if (!stmt || TREE_CODE (stmt) != COND_EXPR)
!     return;
! 
!   /* We want the condition for staying inside loop.  */
!   cond = COND_EXPR_COND (stmt);
!   if (loop_data (loop)->single_exit->flags & EDGE_TRUE_VALUE)
!     cond = invert_truthvalue (cond);
! 
!   code = TREE_CODE (cond);
!   switch (code)
!     {
!     case GT_EXPR:
!     case GE_EXPR:
!     case NE_EXPR:
!     case LT_EXPR:
!     case LE_EXPR:
!       break;
! 
!     default:
!       return;
!     }
!   
!   op0 = TREE_OPERAND (cond, 0);
!   op1 = TREE_OPERAND (cond, 1);
!   type = TREE_TYPE (op0);
! 
!   if (TREE_CODE (type) != INTEGER_TYPE
!     && TREE_CODE (type) != POINTER_TYPE)
!     return;
!       
!   if (!get_var_def (data, op0, &base0, &step0))
!     return;
!   if (!get_var_def (data, op1, &base1, &step1))
      return;
  
!   number_of_iterations_cond (type, base0, step0, code, base1, step1,
! 			     &loop_data (loop)->niter);
  }
  
  /* For each ssa name defined in LOOP determines whether it is an induction
--- 1218,1235 ----
    free (body);
  }
  
  /* Determine the number of iterations of the current loop.  */
  
  static void
  determine_number_of_iterations (struct ivopts_data *data)
  {
    struct loop *loop = data->current_loop;
+   edge exit = loop_data (loop)->single_exit;
  
!   if (!exit)
      return;
  
!   number_of_iterations_exit (loop, exit, &loop_data (loop)->niter);
  }
  
  /* For each ssa name defined in LOOP determines whether it is an induction
*************** find_interesting_uses_cond (struct ivopt
*** 1888,1905 ****
     initial ones.  Returns false when the value of the index cannot be determined.
     Callback for for_each_index.  */
  
! static struct ivopts_data *ifs_ivopts_data;
  static bool
  idx_find_step (tree base, tree *idx, void *data)
  {
!   tree *step_p = data;
    struct iv *iv;
!   tree step, type, iv_type;
    
    if (TREE_CODE (*idx) != SSA_NAME)
      return true;
  
!   iv = get_iv (ifs_ivopts_data, *idx);
    if (!iv)
      return false;
  
--- 1470,1493 ----
     initial ones.  Returns false when the value of the index cannot be determined.
     Callback for for_each_index.  */
  
! struct ifs_ivopts_data
! {
!   struct ivopts_data *ivopts_data;
!   tree stmt;
!   tree *step_p;
! };
! 
  static bool
  idx_find_step (tree base, tree *idx, void *data)
  {
!   struct ifs_ivopts_data *dta = data;
    struct iv *iv;
!   tree step, type, iv_type, iv_step;
    
    if (TREE_CODE (*idx) != SSA_NAME)
      return true;
  
!   iv = get_iv (dta->ivopts_data, *idx);
    if (!iv)
      return false;
  
*************** idx_find_step (tree base, tree *idx, voi
*** 1922,1957 ****
      }
  
    if (TYPE_PRECISION (iv_type) < TYPE_PRECISION (type))
      {
        /* The index might wrap.  */
- 
-       /* TODO -- this is especially bad for targets where
- 	 sizeof (int) < sizeof (void *).  We should at least:
- 
- 	 1) Use the number of iterations of the current loop to prove
- 	    that the index cannot wrap.
- 	 2) Record whether only a signed arithmetics is used during computation
- 	    of the index (behavior of overflows during signed arithmetics is
- 	    undefined, so we may assume that it does not happen). Problems:
- 	    * The optimizations may create overflowing signed arithmetics.
- 	    * And they may also remove the no-op casts used to make the
- 	      behavior of overflows defined.
- 	 3) Use array bounds when known (if the memory is accessed at each
- 	    iteration, we know the index cannot come out of them).  Better,
- 	    use this to estimate the number of iterations of the loop.
- 	 4) If all indices are of the same type, we can also rewrite the
- 	    access as &base + (extend) (step * i), and optimize the step * i
- 	    part separately.  */
        return false;
      }
  
!   step = EXEC_BINARY (MULT_EXPR, type, step,
! 		      convert (type, iv->step));
  
!   if (!*step_p)
!     *step_p = step;
    else
!     *step_p = EXEC_BINARY (PLUS_EXPR, type, *step_p, step);
  
    return true;
  }
--- 1510,1532 ----
      }
  
    if (TYPE_PRECISION (iv_type) < TYPE_PRECISION (type))
+     iv_step = can_count_iv_in_wider_type (dta->ivopts_data->current_loop,
+ 					  type, iv->base, iv->step, dta->stmt);
+   else
+     iv_step = convert (iv_type, iv->step);
+ 
+   if (!iv_step)
      {
        /* The index might wrap.  */
        return false;
      }
  
!   step = EXEC_BINARY (MULT_EXPR, type, step, iv_step);
  
!   if (!*dta->step_p)
!     *dta->step_p = step;
    else
!     *dta->step_p = EXEC_BINARY (PLUS_EXPR, type, *dta->step_p, step);
  
    return true;
  }
*************** find_interesting_uses_address (struct iv
*** 1974,1979 ****
--- 1549,1555 ----
  {
    tree base = unshare_expr (*op_p), step = NULL;
    struct iv *civ;
+   struct ifs_ivopts_data ifs_ivopts_data;
  
    /* Ignore bitfields for now.  Not really something terribly complicated
       to handle.  TODO.  */
*************** find_interesting_uses_address (struct iv
*** 1981,1988 ****
        && DECL_NONADDRESSABLE_P (TREE_OPERAND (base, 1)))
      goto fail;
  
!   ifs_ivopts_data = data;
!   if (!for_each_index (&base, idx_find_step, &step)
        || zero_p (step))
      goto fail;
  
--- 1557,1566 ----
        && DECL_NONADDRESSABLE_P (TREE_OPERAND (base, 1)))
      goto fail;
  
!   ifs_ivopts_data.ivopts_data = data;
!   ifs_ivopts_data.stmt = stmt;
!   ifs_ivopts_data.step_p = &step;
!   if (!for_each_index (&base, idx_find_step, &ifs_ivopts_data)
        || zero_p (step))
      goto fail;
  
*************** static unsigned
*** 3122,3137 ****
  force_var_cost (struct ivopts_data *data,
  		tree expr, bitmap *depends_on)
  {
    if (depends_on)
      {
        fd_ivopts_data = data;
        walk_tree (&expr, find_depends, depends_on, NULL);
      }
  
!   if (TREE_INVARIANT (expr)
!       || SSA_VAR_P (expr))
      return 0;
  
    return spill_cost;
  }
  
--- 2700,2770 ----
  force_var_cost (struct ivopts_data *data,
  		tree expr, bitmap *depends_on)
  {
+   static bool costs_initialized = false;
+   static unsigned integer_cost;
+   static unsigned symbol_cost;
+   static unsigned address_cost;
+ 
+   if (!costs_initialized)
+     {
+       tree var = create_tmp_var_raw (integer_type_node, "test_var");
+       rtx x = gen_rtx_SYMBOL_REF (Pmode, "test_var");
+       tree addr;
+       tree type = build_pointer_type (integer_type_node);
+ 
+       integer_cost = computation_cost (convert (integer_type_node,
+ 						build_int_2 (2000, 0)));
+ 
+       SET_DECL_RTL (var, x);
+       TREE_STATIC (var) = 1;
+       addr = build (ADDR_EXPR, type, var);
+       symbol_cost = computation_cost (addr);
+ 
+       address_cost = computation_cost (build (PLUS_EXPR, type,
+ 					      addr,
+ 					      convert (type,
+ 						       build_int_2 (2000, 0))));
+       if (dump_file && (dump_flags & TDF_DETAILS))
+ 	{
+ 	  fprintf (dump_file, "force_var_cost:\n");
+ 	  fprintf (dump_file, "  integer %d\n", (int) integer_cost);
+ 	  fprintf (dump_file, "  symbol %d\n", (int) symbol_cost);
+ 	  fprintf (dump_file, "  address %d\n", (int) address_cost);
+ 	  fprintf (dump_file, "  other %d\n", (int) spill_cost);
+ 	  fprintf (dump_file, "\n");
+ 	}
+ 
+       costs_initialized = true;
+     }
+ 
    if (depends_on)
      {
        fd_ivopts_data = data;
        walk_tree (&expr, find_depends, depends_on, NULL);
      }
  
!   if (SSA_VAR_P (expr))
      return 0;
  
+   if (TREE_INVARIANT (expr))
+     {
+       if (TREE_CODE (expr) == INTEGER_CST)
+ 	return integer_cost;
+ 
+       if (TREE_CODE (expr) == ADDR_EXPR)
+ 	{
+ 	  tree obj = TREE_OPERAND (expr, 0);
+ 
+ 	  if (TREE_CODE (obj) == VAR_DECL
+ 	      || TREE_CODE (obj) == PARM_DECL
+ 	      || TREE_CODE (obj) == RESULT_DECL)
+ 	    return symbol_cost;
+ 	}
+ 
+       return address_cost;
+     }
+ 
+   /* Just an arbitrary value, FIXME.  */
    return spill_cost;
  }
  
*************** create_iv (tree base, tree step, tree va
*** 4329,4335 ****
    else
      bsi_insert_before (incr_pos, stmt, BSI_NEW_STMT);
  
!   initial = force_gimple_operand (base, &stmts, false);
    if (stmts)
      {
        basic_block new_bb;
--- 3962,3968 ----
    else
      bsi_insert_before (incr_pos, stmt, BSI_NEW_STMT);
  
!   initial = force_gimple_operand (base, &stmts, false, var);
    if (stmts)
      {
        basic_block new_bb;
*************** rewrite_use_nonlinear_expr (struct ivopt
*** 4461,4467 ****
        bsi = stmt_bsi (use->stmt);
      }
  
!   op = force_gimple_operand (comp, &stmts, false);
  
    if (TREE_CODE (use->stmt) == PHI_NODE)
      {
--- 4094,4100 ----
        bsi = stmt_bsi (use->stmt);
      }
  
!   op = force_gimple_operand (comp, &stmts, false, SSA_NAME_VAR (*use->op_p));
  
    if (TREE_CODE (use->stmt) == PHI_NODE)
      {
*************** rewrite_use_address (struct ivopts_data 
*** 4490,4511 ****
  					     use, cand));
    block_stmt_iterator bsi = stmt_bsi (use->stmt);
    tree stmts;
!   tree op = force_gimple_operand (comp, &stmts, false);
!   tree var, tmp_var, name;
  
    if (stmts)
      bsi_insert_before (&bsi, stmts, BSI_SAME_STMT);
  
    if (TREE_CODE (op) == SSA_NAME)
      {
-       /* We need to add a memory tag for the variable.  But we do not want
- 	 to add it to the temporary used for the computations, since this leads
- 	 to problems in redundancy elimination when there are common parts
- 	 in two computations refering to the different arrays.  So we rewrite
- 	 the base variable of the ssa name to a new temporary.  */
-       tmp_var = create_tmp_var (TREE_TYPE (op), "ruatmp");
-       add_referenced_tmp_var (tmp_var);
- 
        var = get_base_address (*use->op_p);
        if (TREE_CODE (var) == INDIRECT_REF)
  	var = TREE_OPERAND (var, 0);
--- 4123,4136 ----
  					     use, cand));
    block_stmt_iterator bsi = stmt_bsi (use->stmt);
    tree stmts;
!   tree op = force_gimple_operand (comp, &stmts, false, NULL_TREE);
!   tree var, new_var, new_name, copy, name;
  
    if (stmts)
      bsi_insert_before (&bsi, stmts, BSI_SAME_STMT);
  
    if (TREE_CODE (op) == SSA_NAME)
      {
        var = get_base_address (*use->op_p);
        if (TREE_CODE (var) == INDIRECT_REF)
  	var = TREE_OPERAND (var, 0);
*************** rewrite_use_address (struct ivopts_data 
*** 4518,4537 ****
  	name = NULL_TREE;
        if (var_ann (var)->type_mem_tag)
  	var = var_ann (var)->type_mem_tag;
-       var_ann (tmp_var)->type_mem_tag = var;
  
        if (name)
  	{
! 	  ssa_name_ann_t ann = ssa_name_ann (name), new_ann;
! 
! 	  if (ann && ann->name_mem_tag)
! 	    {
! 	      new_ann = get_ssa_name_ann (op);
! 	      new_ann->name_mem_tag = ann->name_mem_tag;
! 	    }
! 	}
! 
!       SSA_NAME_VAR (op) = tmp_var;
      }
  
    *use->op_p = build1 (INDIRECT_REF, TREE_TYPE (*use->op_p), op);
--- 4143,4167 ----
  	name = NULL_TREE;
        if (var_ann (var)->type_mem_tag)
  	var = var_ann (var)->type_mem_tag;
  
+       /* We need to add a memory tag for the variable.  But we do not want
+ 	 to add it to the temporary used for the computations, since this leads
+ 	 to problems in redundancy elimination when there are common parts
+ 	 in two computations refering to the different arrays.  So we copy
+ 	 the variable to a new temporary.  */
+       copy = build (MODIFY_EXPR, void_type_node, NULL_TREE, op);
        if (name)
+ 	new_name = duplicate_ssa_name (name, copy);
+       else
  	{
! 	  new_var = create_tmp_var (TREE_TYPE (op), "ruatmp");
! 	  add_referenced_tmp_var (new_var);
! 	  var_ann (new_var)->type_mem_tag = var;
! 	  new_name = make_ssa_name (new_var, copy);
! 	}
!       TREE_OPERAND (copy, 0) = new_name;
!       bsi_insert_before (&bsi, copy, BSI_SAME_STMT);
!       op = new_name;
      }
  
    *use->op_p = build1 (INDIRECT_REF, TREE_TYPE (*use->op_p), op);
*************** rewrite_use_compare (struct ivopts_data 
*** 4552,4558 ****
    if (may_eliminate_iv (data->current_loop,
  			use, cand, &compare, &bound))
      {
!       op = force_gimple_operand (unshare_expr (bound), &stmts, false);
  
        if (stmts)
  	bsi_insert_before (&bsi, stmts, BSI_SAME_STMT);
--- 4182,4189 ----
    if (may_eliminate_iv (data->current_loop,
  			use, cand, &compare, &bound))
      {
!       op = force_gimple_operand (unshare_expr (bound), &stmts, false,
! 				 NULL_TREE);
  
        if (stmts)
  	bsi_insert_before (&bsi, stmts, BSI_SAME_STMT);
*************** rewrite_use_compare (struct ivopts_data 
*** 4574,4580 ****
        || zero_p (get_iv (data, *op_p)->step))
      op_p = &TREE_OPERAND (cond, 1);
  
!   op = force_gimple_operand (comp, &stmts, false);
    if (stmts)
      bsi_insert_before (&bsi, stmts, BSI_SAME_STMT);
  
--- 4205,4211 ----
        || zero_p (get_iv (data, *op_p)->step))
      op_p = &TREE_OPERAND (cond, 1);
  
!   op = force_gimple_operand (comp, &stmts, false, SSA_NAME_VAR (*op_p));
    if (stmts)
      bsi_insert_before (&bsi, stmts, BSI_SAME_STMT);
  
*************** rewrite_use_outer (struct ivopts_data *d
*** 4739,4745 ****
  	value = get_computation_at (data->current_loop,
  				    use, cand, last_stmt (exit->src));
  
!       op = force_gimple_operand (value, &stmts, true);
  	  
        /* If we will preserve the iv anyway and we would need to perform
  	 some computation to replace the final value, do nothing.  */
--- 4370,4376 ----
  	value = get_computation_at (data->current_loop,
  				    use, cand, last_stmt (exit->src));
  
!       op = force_gimple_operand (value, &stmts, true, SSA_NAME_VAR (tgt));
  	  
        /* If we will preserve the iv anyway and we would need to perform
  	 some computation to replace the final value, do nothing.  */
*************** tree_ssa_iv_optimize_finalize (struct lo
*** 4932,4937 ****
--- 4563,4569 ----
    VARRAY_FREE (data->iv_candidates);
  
    scev_finalize ();
+   free_numbers_of_iterations_estimates (loops);
  }
  
  /* Optimizes the LOOP.  Returns true if anything changed.  */
*************** tree_ssa_iv_optimize_loop (struct ivopts
*** 4994,4999 ****
--- 4626,4637 ----
    loop_commit_inserts ();
  
    BITMAP_XFREE (iv_set);
+ 
+   /* We have changed the structure of induction variables; it might happen
+      that definitions in the scev database refer to some of them that were
+      eliminated.  */
+   scev_reset ();
+ 
  finish:
    free_loop_data (data);
  
Index: tree-ssa-loop-niter.c
===================================================================
RCS file: tree-ssa-loop-niter.c
diff -N tree-ssa-loop-niter.c
*** /dev/null	1 Jan 1970 00:00:00 -0000
--- tree-ssa-loop-niter.c	30 Apr 2004 23:36:56 -0000
***************
*** 0 ****
--- 1,1068 ----
+ /* Functions to determine/estimate number of iterations of a loop.
+    Copyright (C) 2004 Free Software Foundation, Inc.
+    
+ This file is part of GCC.
+    
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 2, or (at your option) any
+ later version.
+    
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ for more details.
+    
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING.  If not, write to the Free
+ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
+ 02111-1307, USA.  */
+ 
+ #include "config.h"
+ #include "system.h"
+ #include "coretypes.h"
+ #include "tm.h"
+ #include "tree.h"
+ #include "rtl.h"
+ #include "tm_p.h"
+ #include "hard-reg-set.h"
+ #include "basic-block.h"
+ #include "output.h"
+ #include "diagnostic.h"
+ #include "tree-flow.h"
+ #include "tree-dump.h"
+ #include "cfgloop.h"
+ #include "tree-pass.h"
+ #include "ggc.h"
+ #include "tree-fold-const.h"
+ #include "tree-chrec.h"
+ #include "tree-scalar-evolution.h"
+ #include "params.h"
+ #include "flags.h"
+ #include "tree-inline.h"
+ 
+ #define SWAP(X, Y) do { void *tmp = (X); (X) = (Y); (Y) = tmp; } while (0)
+ 
+ /* Just to shorten the ugly names.  */
+ #define EXEC_BINARY nondestructive_fold_binary_to_constant
+ #define EXEC_UNARY nondestructive_fold_unary_to_constant
+ 
+ /*
+ 
+    Analysis of number of iterations of an affine exit test.
+ 
+ */
+ 
+ /* Checks whether ARG is either NULL_TREE or constant zero.  */
+ 
+ static bool
+ zero_p (tree arg)
+ {
+   if (!arg)
+     return true;
+ 
+   return integer_zerop (arg);
+ }
+ 
+ /* Computes inverse of X modulo 2^s, where MASK = 2^s-1.  */
+ 
+ static tree
+ inverse (tree x, tree mask)
+ {
+   tree type = TREE_TYPE (x);
+   tree ctr = EXEC_BINARY (RSHIFT_EXPR, type, mask, integer_one_node);
+   tree rslt = convert (type, integer_one_node);
+ 
+   while (integer_nonzerop (ctr))
+     {
+       rslt = EXEC_BINARY (MULT_EXPR, type, rslt, x);
+       rslt = EXEC_BINARY (BIT_AND_EXPR, type, rslt, mask);
+       x = EXEC_BINARY (MULT_EXPR, type, x, x);
+       x = EXEC_BINARY (BIT_AND_EXPR, type, x, mask);
+       ctr = EXEC_BINARY (RSHIFT_EXPR, type, ctr, integer_one_node);
+     }
+ 
+   return rslt;
+ }
+ 
+ /* Returns unsigned variant of TYPE.  */
+ 
+ static tree
+ unsigned_type_for (tree type)
+ {
+   return make_unsigned_type (TYPE_PRECISION (type));
+ }
+ 
+ /* Returns signed variant of TYPE.  */
+ 
+ static tree
+ signed_type_for (tree type)
+ {
+   return make_signed_type (TYPE_PRECISION (type));
+ }
+ 
+ /* Determine the number of iterations according to condition (for staying
+    inside loop) BASE0 + STEP0 * i (CODE) BASE1 + STEP1 * i, computed in TYPE.
+    Store the results to NITER.  */
+ 
+ void
+ number_of_iterations_cond (tree type, tree base0, tree step0,
+ 			   enum tree_code code, tree base1, tree step1,
+ 			   struct tree_niter_desc *niter)
+ {
+   tree step, delta, mmin, mmax;
+   tree may_xform, bound, s, d, tmp;
+   bool was_sharp = false;
+   tree assumption;
+   tree assumptions = boolean_true_node;
+   tree noloop_assumptions = boolean_false_node;
+   tree niter_type, signed_niter_type;
+ 
+   /* The meaning of these assumptions is this:
+      if !assumptions
+        then the rest of information does not have to be valid
+      if noloop_assumptions then the loop does not have to roll
+        (but it is only conservative approximation, i.e. it only says that
+        if !noloop_assumptions, then the loop does not end before the computed
+        number of iterations)  */
+ 
+   /* Make < comparison from > ones.  */
+   if (code == GE_EXPR
+       || code == GT_EXPR)
+     {
+       SWAP (base0, base1);
+       SWAP (step0, step1);
+       code = swap_tree_comparison (code);
+     }
+ 
+   /* We can take care of the case of two induction variables chasing each other
+      if the test is NE. I have never seen a loop using it, but still it is
+      cool.  */
+   if (!zero_p (step0) && !zero_p (step1))
+     {
+       if (code != NE_EXPR)
+ 	return;
+ 
+       step0 = EXEC_BINARY (MINUS_EXPR, type, step0, step1);
+       step1 = NULL_TREE;
+     }
+ 
+   /* If the result is a constant,  the loop is weird.  More precise handling
+      would be possible, but the situation is not common enough to waste time
+      on it.  */
+   if (zero_p (step0) && zero_p (step1))
+     return;
+ 
+   /* Ignore loops of while (i-- < 10) type.  */
+   if (code != NE_EXPR)
+     {
+       if (step0 && !tree_expr_nonnegative_p (step0))
+ 	return;
+ 
+       if (!zero_p (step1) && tree_expr_nonnegative_p (step1))
+ 	return;
+     }
+ 
+   if (TREE_CODE (type) == POINTER_TYPE)
+     {
+       /* We assume pointer arithmetics never overflows.  */
+       mmin = mmax = NULL_TREE;
+     }
+   else
+     {
+       mmin = TYPE_MIN_VALUE (type);
+       mmax = TYPE_MAX_VALUE (type);
+     }
+ 
+   /* Some more condition normalization.  We must record some assumptions
+      due to overflows.  */
+ 
+   if (code == LT_EXPR)
+     {
+       /* We want to take care only of <=; this is easy,
+ 	 as in cases the overflow would make the transformation unsafe the loop
+ 	 does not roll.  Seemingly it would make more sense to want to take
+ 	 care of <, as NE is more simmilar to it, but the problem is that here
+ 	 the transformation would be more difficult due to possibly infinite
+ 	 loops.  */
+       if (zero_p (step0))
+ 	{
+ 	  if (mmax)
+ 	    assumption = fold (build (EQ_EXPR, boolean_type_node, base0, mmax));
+ 	  else
+ 	    assumption = boolean_true_node;
+ 	  if (integer_nonzerop (assumption))
+ 	    goto zero_iter;
+ 	  base0 = fold (build (PLUS_EXPR, type, base0,
+ 			       convert (type, integer_one_node)));
+ 	}
+       else
+ 	{
+ 	  if (mmin)
+ 	    assumption = fold (build (EQ_EXPR, boolean_type_node, base1, mmin));
+ 	  else
+ 	    assumption = boolean_true_node;
+ 	  if (integer_nonzerop (assumption))
+ 	    goto zero_iter;
+ 	  base1 = fold (build (MINUS_EXPR, type, base1,
+ 			       convert (type, integer_one_node)));
+ 	}
+       noloop_assumptions = assumption;
+       code = LE_EXPR;
+ 
+       /* It will be useful to be able to tell the difference once more in
+ 	 <= -> != reduction.  */
+       was_sharp = true;
+     }
+ 
+   /* Take care of trivially infinite loops.  */
+   if (code != NE_EXPR)
+     {
+       if (zero_p (step0)
+ 	  && mmin
+ 	  && operand_equal_p (base0, mmin, 0))
+ 	return;
+       if (zero_p (step1)
+ 	  && mmax
+ 	  && operand_equal_p (base1, mmax, 0))
+ 	return;
+     }
+ 
+   /* If we can we want to take care of NE conditions instead of size
+      comparisons, as they are much more friendly (most importantly
+      this takes care of special handling of loops with step 1).  We can
+      do it if we first check that upper bound is greater or equal to
+      lower bound, their difference is constant c modulo step and that
+      there is not an overflow.  */
+   if (code != NE_EXPR)
+     {
+       if (zero_p (step0))
+ 	step = EXEC_UNARY (NEGATE_EXPR, type, step1);
+       else
+ 	step = step0;
+       delta = build (MINUS_EXPR, type, base1, base0);
+       delta = fold (build (FLOOR_MOD_EXPR, type, delta, step));
+       may_xform = boolean_false_node;
+ 
+       if (TREE_CODE (delta) == INTEGER_CST)
+ 	{
+ 	  tmp = EXEC_BINARY (MINUS_EXPR, type, step,
+ 			     convert (type, integer_one_node));
+ 	  if (was_sharp
+ 	      && operand_equal_p (delta, tmp, 0))
+ 	    {
+ 	      /* A special case.  We have transformed condition of type
+ 		 for (i = 0; i < 4; i += 4)
+ 		 into
+ 		 for (i = 0; i <= 3; i += 4)
+ 		 obviously if the test for overflow during that transformation
+ 		 passed, we cannot overflow here.  Most importantly any
+ 		 loop with sharp end condition and step 1 falls into this
+ 		 cathegory, so handling this case specially is definitely
+ 		 worth the troubles.  */
+ 	      may_xform = boolean_true_node;
+ 	    }
+ 	  else if (zero_p (step0))
+ 	    {
+ 	      if (!mmin)
+ 		may_xform = boolean_true_node;
+ 	      else
+ 		{
+ 		  bound = EXEC_BINARY (PLUS_EXPR, type, mmin, step);
+ 		  bound = EXEC_BINARY (MINUS_EXPR, type, bound, delta);
+ 		  may_xform = fold (build (LE_EXPR, boolean_type_node,
+ 					   bound, base0));
+ 		}
+ 	    }
+ 	  else
+ 	    {
+ 	      if (!mmax)
+ 		may_xform = boolean_true_node;
+ 	      else
+ 		{
+ 		  bound = EXEC_BINARY (MINUS_EXPR, type, mmax, step);
+ 		  bound = EXEC_BINARY (PLUS_EXPR, type, bound, delta);
+ 		  may_xform = fold (build (LE_EXPR, boolean_type_node,
+ 					   base1, bound));
+ 		}
+ 	    }
+ 	}
+ 
+       if (!integer_zerop (may_xform))
+ 	{
+ 	  /* We perform the transformation always provided that it is not
+ 	     completely senseless.  This is OK, as we would need this assumption
+ 	     to determine the number of iterations anyway.  */
+ 	  if (!integer_nonzerop (may_xform))
+ 	    assumptions = may_xform;
+ 
+ 	  if (zero_p (step0))
+ 	    {
+ 	      base0 = build (PLUS_EXPR, type, base0, delta);
+ 	      base0 = fold (build (MINUS_EXPR, type, base0, step));
+ 	    }
+ 	  else
+ 	    {
+ 	      base1 = build (MINUS_EXPR, type, base1, delta);
+ 	      base1 = fold (build (PLUS_EXPR, type, base1, step));
+ 	    }
+ 
+ 	  assumption = fold (build (GT_EXPR, boolean_type_node, base0, base1));
+ 	  noloop_assumptions = fold (build (TRUTH_OR_EXPR, boolean_type_node,
+ 					    noloop_assumptions, assumption));
+ 	  code = NE_EXPR;
+ 	}
+     }
+ 
+   /* Count the number of iterations.  */
+   niter_type = unsigned_type_for (type);
+   signed_niter_type = signed_type_for (type);
+ 
+   if (code == NE_EXPR)
+     {
+       /* Everything we do here is just arithmetics modulo size of mode.  This
+ 	 makes us able to do more involved computations of number of iterations
+ 	 than in other cases.  First transform the condition into shape
+ 	 s * i <> c, with s positive.  */
+       base1 = fold (build (MINUS_EXPR, type, base1, base0));
+       base0 = NULL_TREE;
+       if (!zero_p (step1))
+   	step0 = EXEC_UNARY (NEGATE_EXPR, type, step1);
+       step1 = NULL_TREE;
+       if (!tree_expr_nonnegative_p (convert (signed_niter_type, step0)))
+ 	{
+ 	  step0 = EXEC_UNARY (NEGATE_EXPR, type, step0);
+ 	  base1 = fold (build1 (NEGATE_EXPR, type, base1));
+ 	}
+ 
+       base1 = convert (niter_type, base1);
+       step0 = convert (niter_type, step0);
+ 
+       /* Let nsd (s, size of mode) = d.  If d does not divide c, the loop
+ 	 is infinite.  Otherwise, the number of iterations is
+ 	 (inverse(s/d) * (c/d)) mod (size of mode/d).  */
+       s = step0;
+       d = integer_one_node;
+       bound = convert (niter_type, build_int_2 (~0, ~0));
+       while (1)
+ 	{
+ 	  tmp = EXEC_BINARY (BIT_AND_EXPR, niter_type, s,
+ 			     convert (niter_type, integer_one_node));
+ 	  if (integer_nonzerop (tmp))
+ 	    break;
+ 	  
+ 	  s = EXEC_BINARY (RSHIFT_EXPR, niter_type, s,
+ 			   convert (niter_type, integer_one_node));
+ 	  d = EXEC_BINARY (LSHIFT_EXPR, niter_type, d,
+ 			   convert (niter_type, integer_one_node));
+ 	  bound = EXEC_BINARY (RSHIFT_EXPR, niter_type, bound,
+ 			       convert (niter_type, integer_one_node));
+ 	}
+ 
+       tmp = fold (build (EXACT_DIV_EXPR, niter_type, base1, d));
+       tmp = fold (build (MULT_EXPR, niter_type, tmp, inverse (s, bound)));
+       niter->niter = fold (build (BIT_AND_EXPR, niter_type, tmp, bound));
+     }
+   else
+     {
+       if (zero_p (step1))
+ 	/* Condition in shape a + s * i <= b
+ 	   We must know that b + s does not overflow and a <= b + s and then we
+ 	   can compute number of iterations as (b + s - a) / s.  (It might
+ 	   seem that we in fact could be more clever about testing the b + s
+ 	   overflow condition using some information about b - a mod s,
+ 	   but it was already taken into account during LE -> NE transform).  */
+ 	{
+ 	  if (mmax)
+ 	    {
+ 	      bound = EXEC_BINARY (MINUS_EXPR, type, mmax, step0);
+ 	      assumption = fold (build (LE_EXPR, boolean_type_node,
+ 					base1, bound));
+ 	      assumptions = fold (build (TRUTH_AND_EXPR, boolean_type_node,
+ 					 assumptions, assumption));
+ 	    }
+ 
+ 	  step = step0;
+ 	  tmp = fold (build (PLUS_EXPR, type, base1, step0));
+ 	  assumption = fold (build (GT_EXPR, boolean_type_node, base0, tmp));
+ 	  delta = fold (build (PLUS_EXPR, type, base1, step));
+ 	  delta = fold (build (MINUS_EXPR, type, delta, base0));
+ 	  delta = convert (niter_type, delta);
+ 	}
+       else
+ 	{
+ 	  /* Condition in shape a <= b - s * i
+ 	     We must know that a - s does not overflow and a - s <= b and then
+ 	     we can again compute number of iterations as (b - (a - s)) / s.  */
+ 	  if (mmin)
+ 	    {
+ 	      bound = EXEC_BINARY (MINUS_EXPR, type, mmin, step1);
+ 	      assumption = fold (build (LE_EXPR, boolean_type_node,
+ 					bound, base0));
+ 	      assumptions = fold (build (TRUTH_AND_EXPR, boolean_type_node,
+ 					 assumptions, assumption));
+ 	    }
+ 	  step = fold (build1 (NEGATE_EXPR, type, step1));
+ 	  tmp = fold (build (PLUS_EXPR, type, base0, step1));
+ 	  assumption = fold (build (GT_EXPR, boolean_type_node, tmp, base1));
+ 	  delta = fold (build (MINUS_EXPR, type, base0, step));
+ 	  delta = fold (build (MINUS_EXPR, type, base1, delta));
+ 	  delta = convert (niter_type, delta);
+ 	}
+       noloop_assumptions = fold (build (TRUTH_OR_EXPR, boolean_type_node,
+ 					noloop_assumptions, assumption));
+       delta = fold (build (FLOOR_DIV_EXPR, niter_type, delta,
+ 			   convert (niter_type, step)));
+       niter->niter = delta;
+     }
+ 
+   niter->assumptions = assumptions;
+   niter->may_be_zero = noloop_assumptions;
+   return;
+ 
+ zero_iter:
+   niter->assumptions = boolean_true_node;
+   niter->may_be_zero = boolean_true_node;
+   niter->niter = convert (type, integer_zero_node);
+   return;
+ }
+ 
+ /* Stores description of number of iterations of LOOP derived from EXIT
+    in NITER.  */
+ 
+ bool
+ number_of_iterations_exit (struct loop *loop, edge exit,
+ 			   struct tree_niter_desc *niter)
+ {
+   tree stmt, cond, type;
+   tree op0, base0, step0;
+   tree op1, base1, step1;
+   enum tree_code code;
+ 
+   if (!dominated_by_p (CDI_DOMINATORS, loop->latch, exit->src))
+     return false;
+ 
+   niter->assumptions = convert (boolean_type_node, integer_zero_node);
+   stmt = last_stmt (exit->src);
+   if (!stmt || TREE_CODE (stmt) != COND_EXPR)
+     return false;
+ 
+   /* We want the condition for staying inside loop.  */
+   cond = COND_EXPR_COND (stmt);
+   if (exit->flags & EDGE_TRUE_VALUE)
+     cond = invert_truthvalue (cond);
+ 
+   code = TREE_CODE (cond);
+   switch (code)
+     {
+     case GT_EXPR:
+     case GE_EXPR:
+     case NE_EXPR:
+     case LT_EXPR:
+     case LE_EXPR:
+       break;
+ 
+     default:
+       return false;
+     }
+   
+   op0 = TREE_OPERAND (cond, 0);
+   op1 = TREE_OPERAND (cond, 1);
+   type = TREE_TYPE (op0);
+ 
+   if (TREE_CODE (type) != INTEGER_TYPE
+     && TREE_CODE (type) != POINTER_TYPE)
+     return false;
+      
+   if (!simple_iv (loop, stmt, op0, &base0, &step0))
+     return false;
+   if (!simple_iv (loop, stmt, op1, &base1, &step1))
+     return false;
+ 
+   number_of_iterations_cond (type, base0, step0, code, base1, step1,
+ 			     niter);
+   return integer_onep (niter->assumptions);
+ }
+ 
+ /*
+ 
+    Analysis of a number of iterations of a loop by a brute-force evaluation.
+ 
+ */
+ 
+ /* Bound on the number of iterations we try to evaluate.  */
+ 
+ #define MAX_ITERATIONS_TO_TRACK 1000
+ 
+ /* Determines a loop phi node of LOOP such that X is derived from it
+    by a chain of operations with constants.  */
+ 
+ static tree
+ chain_of_csts_start (struct loop *loop, tree x)
+ {
+   tree stmt = SSA_NAME_DEF_STMT (x);
+   basic_block bb = bb_for_stmt (stmt);
+   use_optype uses;
+ 
+   if (!bb
+       || !flow_bb_inside_loop_p (loop, bb))
+     return NULL_TREE;
+   
+   if (TREE_CODE (stmt) == PHI_NODE)
+     {
+       if (bb == loop->header)
+ 	return stmt;
+ 
+       return NULL_TREE;
+     }
+ 
+   if (TREE_CODE (stmt) != MODIFY_EXPR)
+     return NULL_TREE;
+ 
+   get_stmt_operands (stmt);
+   if (NUM_VUSES (STMT_VUSE_OPS (stmt)) > 0)
+     return NULL_TREE;
+   if (NUM_VDEFS (STMT_VDEF_OPS (stmt)) > 0)
+     return NULL_TREE;
+   if (NUM_DEFS (STMT_DEF_OPS (stmt)) > 1)
+     return NULL_TREE;
+   uses = STMT_USE_OPS (stmt);
+   if (NUM_USES (uses) != 1)
+     return NULL_TREE;
+ 
+   return chain_of_csts_start (loop, USE_OP (uses, 0));
+ }
+ 
+ /* Determines whether X is derived from a value of a phi node in LOOP
+    such that
+ 
+    * this derivation consists only from operations with constants
+    * the initial value of the phi node is constant
+    * its value in the next iteration can be derived from the current one
+      by a chain of operations with constants.  */
+ 
+ static tree
+ get_base_for (struct loop *loop, tree x)
+ {
+   tree phi, init, next;
+ 
+   if (is_gimple_min_invariant (x))
+     return x;
+ 
+   phi = chain_of_csts_start (loop, x);
+   if (!phi)
+     return NULL_TREE;
+ 
+   init = phi_element_for_edge (phi, loop_preheader_edge (loop))->def;
+   next = phi_element_for_edge (phi, loop_latch_edge (loop))->def;
+ 
+   if (TREE_CODE (next) != SSA_NAME)
+     return NULL_TREE;
+ 
+   if (!is_gimple_min_invariant (init))
+     return NULL_TREE;
+ 
+   if (chain_of_csts_start (loop, next) != phi)
+     return NULL_TREE;
+ 
+   return phi;
+ }
+ 
+ /* Evaluates value of X, provided that the value of the variable defined
+    in the loop phi node from that X is derived by operations with constants
+    is BASE.  */
+ 
+ static tree
+ get_val_for (tree x, tree base)
+ {
+   tree stmt, *op, nx, val;
+   use_optype uses;
+ 
+   if (!x)
+     return base;
+ 
+   stmt = SSA_NAME_DEF_STMT (x);
+   if (TREE_CODE (stmt) == PHI_NODE)
+     return base;
+ 
+   uses = STMT_USE_OPS (stmt);
+   op = USE_OP_PTR (uses, 0);
+ 
+   nx = *op;
+   val = get_val_for (nx, base);
+   *op = val;
+   val = fold (TREE_OPERAND (stmt, 1));
+   *op = nx;
+ 
+   return val;
+ }
+ 
+ /* Tries to count the number of iterations of LOOP till it exits by EXIT
+    by brute force.  */
+ 
+ tree
+ loop_niter_by_eval (struct loop *loop, edge exit)
+ {
+   tree cond, cnd, acnd;
+   tree op[2], val[2], next[2], aval[2], phi[2];
+   unsigned i, j;
+   enum tree_code cmp;
+ 
+   cond = last_stmt (exit->src);
+   if (!cond || TREE_CODE (cond) != COND_EXPR)
+     return chrec_top;
+ 
+   cnd = COND_EXPR_COND (cond);
+   if (exit->flags & EDGE_TRUE_VALUE)
+     cnd = invert_truthvalue (cnd);
+ 
+   cmp = TREE_CODE (cnd);
+   switch (cmp)
+     {
+     case EQ_EXPR:
+     case NE_EXPR:
+     case GT_EXPR:
+     case GE_EXPR:
+     case LT_EXPR:
+     case LE_EXPR:
+       for (j = 0; j < 2; j++)
+ 	op[j] = TREE_OPERAND (cnd, j);
+       break;
+ 
+     default:
+       return chrec_top;
+     }
+ 
+   for (j = 0; j < 2; j++)
+     {
+       phi[j] = get_base_for (loop, op[j]);
+       if (!phi[j])
+ 	return chrec_top;
+     }
+ 
+   for (j = 0; j < 2; j++)
+     {
+       if (TREE_CODE (phi[j]) == PHI_NODE)
+ 	{
+ 	  val[j] = phi_element_for_edge (phi[j],
+ 					 loop_preheader_edge (loop))->def;
+ 	  next[j] = phi_element_for_edge (phi[j],
+ 					  loop_latch_edge (loop))->def;
+ 	}
+       else
+ 	{
+ 	  val[j] = phi[j];
+ 	  next[j] = NULL_TREE;
+ 	  op[j] = NULL_TREE;
+ 	}
+     }
+ 
+   for (i = 0; i < MAX_ITERATIONS_TO_TRACK; i++)
+     {
+       for (j = 0; j < 2; j++)
+ 	aval[j] = get_val_for (op[j], val[j]);
+ 
+       acnd = fold (build (cmp, boolean_type_node, aval[0], aval[1]));
+       if (integer_zerop (acnd))
+ 	{
+ 	  if (dump_file && (dump_flags & TDF_DETAILS))
+ 	    fprintf (dump_file,
+ 		     "Proved that loop %d iterates %d times using brute force.\n",
+ 		     loop->num, i);
+ 	  return build_int_2 (i, 0);
+ 	}
+ 
+       for (j = 0; j < 2; j++)
+ 	val[j] = get_val_for (next[j], val[j]);
+     }
+ 
+   return chrec_top;
+ }
+ 
+ /* Finds the exit of the LOOP by that the loop exits after a constant
+    number of iterations and stores it to *EXIT.  The iteration count
+    is returned.  */
+ 
+ tree
+ find_loop_niter_by_eval (struct loop *loop, edge *exit)
+ {
+   unsigned n_exits, i;
+   edge *exits = get_loop_exit_edges (loop, &n_exits);
+   edge ex;
+   tree niter = NULL_TREE, aniter;
+ 
+   *exit = NULL;
+   for (i = 0; i < n_exits; i++)
+     {
+       ex = exits[i];
+       if (!just_once_each_iteration_p (loop, ex->src))
+ 	continue;
+ 
+       aniter = loop_niter_by_eval (loop, ex);
+       if (TREE_CODE (aniter) != INTEGER_CST)
+ 	continue;
+ 
+       if (niter
+ 	  && !integer_nonzerop (fold (build (LT_EXPR, boolean_type_node,
+ 					     aniter, niter))))
+ 	continue;
+ 
+       niter = aniter;
+       *exit = ex;
+     }
+   free (exits);
+ 
+   return niter ? niter : chrec_top;
+ }
+ 
+ /*
+ 
+    Analysis of upper bounds on number of iterations of a loop.
+ 
+ */
+ 
+ /* Bound on number of iterations of a loop.  */
+ 
+ struct nb_iter_bound
+ {
+   tree bound;		/* The bound on the number of executions of anything
+ 			   after ...  */
+   tree at_stmt;		/* ... this statement during one execution of loop.  */
+   struct nb_iter_bound *next;
+ 			/* The next bound in a list.  */
+ };
+ 
+ /* Records that AT_STMT is executed at most BOUND times in LOOP.  */
+ 
+ static void
+ record_estimate (struct loop *loop, tree bound, tree at_stmt)
+ {
+   struct nb_iter_bound *elt = xmalloc (sizeof (struct nb_iter_bound));
+ 
+   if (dump_file && (dump_flags & TDF_DETAILS))
+     {
+       fprintf (dump_file, "Statements after ");
+       print_generic_expr (dump_file, at_stmt, TDF_SLIM);
+       fprintf (dump_file, " are executed at most ");
+       print_generic_expr (dump_file, bound, TDF_SLIM);
+       fprintf (dump_file, " times in loop %d.\n", loop->num);
+     }
+ 
+   elt->bound = bound;
+   elt->at_stmt = at_stmt;
+   elt->next = loop->bounds;
+   loop->bounds = elt;
+ }
+ 
+ /* Records estimates on numbers of iterations of LOOP.  */
+ 
+ static void
+ estimate_numbers_of_iterations_loop (struct loop *loop)
+ {
+   edge *exits;
+   tree niter, type;
+   unsigned i, n_exits;
+   struct tree_niter_desc niter_desc;
+ 
+   /* First, use the scev information about the number of iterations.  */
+   niter = number_of_iterations_in_loop (loop);
+   if (niter != chrec_top)
+     {
+       type = TREE_TYPE (niter);
+       niter = fold (build (MINUS_EXPR, type, niter,
+ 			   convert (type, integer_one_node)));
+       record_estimate (loop, niter, last_stmt (loop_exit_edge (loop, 0)->src));
+     }
+ 
+   /* Now use number_of_iterations_exit.  */
+   exits = get_loop_exit_edges (loop, &n_exits);
+   for (i = 0; i < n_exits; i++)
+     {
+       if (!number_of_iterations_exit (loop, exits[i], &niter_desc))
+ 	continue;
+ 
+       niter = niter_desc.niter;
+       type = TREE_TYPE (niter);
+       if (!integer_zerop (niter_desc.may_be_zero)
+ 	  && !integer_nonzerop (niter_desc.may_be_zero))
+ 	niter = build (COND_EXPR, type, niter_desc.may_be_zero,
+ 		       convert (type, integer_zero_node),
+ 		       niter);
+       record_estimate (loop, niter, last_stmt (exits[i]->src));
+     }
+   free (exits);
+   
+   /* TODO Here we could use other possibilities, like bounds of arrays accessed
+      in the loop.  */
+ }
+ 
+ /* Records estimates on numbers of iterations of LOOPS.  */
+ 
+ void
+ estimate_numbers_of_iterations (struct loops *loops)
+ {
+   unsigned i;
+   struct loop *loop;
+ 
+   for (i = 1; i < loops->num; i++)
+     {
+       loop = loops->parray[i];
+       if (loop)
+ 	estimate_numbers_of_iterations_loop (loop);
+     }
+ }
+ 
+ /* If A > B, returns -1.  If A == B, returns 0.  If A < B, returns 1.
+    If neither of these relations can be proved, returns 2.  */
+ 
+ static int
+ compare_trees (tree a, tree b)
+ {
+   tree typea = TREE_TYPE (a), typeb = TREE_TYPE (b);
+   tree type;
+ 
+   if (TYPE_PRECISION (typea) > TYPE_PRECISION (typeb))
+     type = typea;
+   else
+     type = typeb;
+ 
+   a = convert (type, a);
+   b = convert (type, b);
+ 
+   if (integer_nonzerop (fold (build (EQ_EXPR, boolean_type_node, a, b))))
+     return 0;
+   if (integer_nonzerop (fold (build (LT_EXPR, boolean_type_node, a, b))))
+     return 1;
+   if (integer_nonzerop (fold (build (GT_EXPR, boolean_type_node, a, b))))
+     return -1;
+ 
+   return 2;
+ }
+ 
+ /* Returns the largest value obtainable by casting something in INNER type to
+    OUTER type.  */
+ 
+ static tree
+ upper_bound_in_type (tree outer, tree inner)
+ {
+   unsigned HOST_WIDE_INT lo, hi;
+   unsigned bits = TYPE_PRECISION (inner);
+ 
+   if (TYPE_UNSIGNED (outer))
+     {
+       if (bits <= HOST_BITS_PER_WIDE_INT)
+ 	{
+ 	  hi = 0;
+ 	  lo = (~(unsigned HOST_WIDE_INT) 0)
+ 		  >> (HOST_BITS_PER_WIDE_INT - bits);
+ 	}
+       else
+ 	{
+ 	  hi = (~(unsigned HOST_WIDE_INT) 0)
+ 		  >> (2 * HOST_BITS_PER_WIDE_INT - bits);
+ 	  lo = ~(unsigned HOST_WIDE_INT) 0;
+ 	}
+     }
+   else
+     {
+       if (bits <= HOST_BITS_PER_WIDE_INT)
+ 	{
+ 	  hi = 0;
+ 	  lo = (~(unsigned HOST_WIDE_INT) 0)
+ 		  >> (HOST_BITS_PER_WIDE_INT - bits) >> 1;
+ 	}
+       else
+ 	{
+ 	  hi = (~(unsigned HOST_WIDE_INT) 0)
+ 		  >> (2 * HOST_BITS_PER_WIDE_INT - bits) >> 1;
+ 	  lo = ~(unsigned HOST_WIDE_INT) 0;
+ 	}
+     }
+ 
+   return convert (outer,
+ 		  convert (inner,
+ 			   build_int_2 (lo, hi)));
+ }
+ 
+ /* Returns the smallest value obtainable by casting something in INNER type to
+    OUTER type.  */
+ 
+ static tree
+ lower_bound_in_type (tree outer, tree inner)
+ {
+   unsigned HOST_WIDE_INT lo, hi;
+   unsigned bits = TYPE_PRECISION (inner);
+ 
+   if (TYPE_UNSIGNED (outer))
+     lo = hi = 0;
+   else if (bits <= HOST_BITS_PER_WIDE_INT)
+     {
+       hi = ~(unsigned HOST_WIDE_INT) 0;
+       lo = (~(unsigned HOST_WIDE_INT) 0) << (bits - 1);
+     }
+   else
+     {
+       hi = (~(unsigned HOST_WIDE_INT) 0) << (bits - HOST_BITS_PER_WIDE_INT - 1);
+       lo = 0;
+     }
+ 
+   return convert (outer,
+ 		  convert (inner,
+ 			   build_int_2 (lo, hi)));
+ }
+ 
+ /* Returns true if statement S1 dominates statement S2.  */
+ 
+ static bool
+ stmt_dominates_stmt_p (tree s1, tree s2)
+ {
+   basic_block bb1 = bb_for_stmt (s1), bb2 = bb_for_stmt (s2);
+ 
+   if (!bb1
+       || s1 == s2)
+     return true;
+ 
+   if (bb1 == bb2)
+     {
+       block_stmt_iterator bsi;
+ 
+       for (bsi = bsi_start (bb1); bsi_stmt (bsi) != s2; bsi_next (&bsi))
+ 	if (bsi_stmt (bsi) == s1)
+ 	  return true;
+ 
+       return false;
+     }
+ 
+   return dominated_by_p (CDI_DOMINATORS, bb2, bb1);
+ }
+ 
+ /* Checks whether it is correct to count the induction variable BASE + STEP * I
+    at AT_STMT in wider TYPE, using the fact that statement OF is executed at
+    most BOUND times in the loop.  If it is possible, return the value of step in
+    the TYPE, otherwise return NULL_TREE.  */
+ 
+ static tree
+ can_count_iv_in_wider_type_bound (tree type, tree base, tree step,
+ 				  tree at_stmt,
+ 				  tree bound, tree of)
+ {
+   tree inner_type = TREE_TYPE (base), b, bplusstep, new_step, new_step_abs;
+   tree valid_niter, extreme, unsigned_type, delta, bound_type;
+ 
+   b = convert (type, base);
+   bplusstep = convert (type,
+ 		       fold (build (PLUS_EXPR, inner_type, base, step)));
+   new_step = fold (build (MINUS_EXPR, type, bplusstep, b));
+   if (TREE_CODE (new_step) != INTEGER_CST)
+     return NULL_TREE;
+ 
+   switch (compare_trees (bplusstep, b))
+     {
+     case -1:
+       extreme = upper_bound_in_type (type, inner_type);
+       delta = fold (build (MINUS_EXPR, type, extreme, b));
+       new_step_abs = new_step;
+       break;
+ 
+     case 1:
+       extreme = lower_bound_in_type (type, inner_type);
+       new_step_abs = fold (build (NEGATE_EXPR, type, new_step));
+       delta = fold (build (MINUS_EXPR, type, b, extreme));
+       break;
+ 
+     case 0:
+       return new_step;
+ 
+     default:
+       return NULL_TREE;
+     }
+ 
+   unsigned_type = unsigned_type_for (type);
+   delta = convert (unsigned_type, delta);
+   new_step_abs = convert (unsigned_type, new_step_abs);
+   valid_niter = fold (build (FLOOR_DIV_EXPR, unsigned_type,
+ 			     delta, new_step_abs));
+ 
+   bound_type = TREE_TYPE (bound);
+   if (TYPE_PRECISION (type) > TYPE_PRECISION (bound_type))
+     bound = convert (unsigned_type, bound);
+   else
+     valid_niter = convert (bound_type, valid_niter);
+     
+   if (at_stmt && stmt_dominates_stmt_p (of, at_stmt))
+     {
+       /* After the statement OF we know that anything is executed at most
+ 	 BOUND times.  */
+       if (integer_nonzerop (fold (build (GE_EXPR, boolean_type_node,
+ 					 valid_niter, bound))))
+ 	return new_step;
+     }
+   else
+     {
+       /* Before the statement OF we know that anything is executed at most
+ 	 BOUND + 1 times.  */
+       if (integer_nonzerop (fold (build (GT_EXPR, boolean_type_node,
+ 					 valid_niter, bound))))
+ 	return new_step;
+     }
+ 
+   return NULL_TREE;
+ }
+ 
+ /* Checks whether it is correct to count the induction variable BASE + STEP * I
+    at AT_STMT in wider TYPE, using the bounds on numbers of iterations of a
+    LOOP.  If it is possible, return the value of step in the TYPE, otherwise
+    return NULL_TREE.  */
+ 
+ tree
+ can_count_iv_in_wider_type (struct loop *loop, tree type, tree base, tree step,
+ 			    tree at_stmt)
+ {
+   struct nb_iter_bound *bound;
+   tree new_step;
+ 
+   for (bound = loop->bounds; bound; bound = bound->next)
+     {
+       new_step = can_count_iv_in_wider_type_bound (type, base, step,
+ 						   at_stmt,
+ 						   bound->bound,
+ 						   bound->at_stmt);
+ 
+       if (new_step)
+ 	return new_step;
+     }
+ 
+   return NULL_TREE;
+ }
+ 
+ /* Frees the information on upper bounds on numbers of iterations of LOOP.  */
+ 
+ static void
+ free_numbers_of_iterations_estimates_loop (struct loop *loop)
+ {
+   struct nb_iter_bound *bound, *next;
+   
+   for (bound = loop->bounds; bound; bound = next)
+     {
+       next = bound->next;
+       free (bound);
+     }
+ 
+   loop->bounds = NULL;
+ }
+ 
+ /* Frees the information on upper bounds on numbers of iterations of LOOPS.  */
+ 
+ void
+ free_numbers_of_iterations_estimates (struct loops *loops)
+ {
+   unsigned i;
+   struct loop *loop;
+ 
+   for (i = 1; i < loops->num; i++)
+     {
+       loop = loops->parray[i];
+       if (loop)
+ 	free_numbers_of_iterations_estimates_loop (loop);
+     }
+ }
Index: varasm.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/varasm.c,v
retrieving revision 1.295.2.40.2.4
diff -c -3 -p -r1.295.2.40.2.4 varasm.c
*** varasm.c	25 Apr 2004 20:33:44 -0000	1.295.2.40.2.4
--- varasm.c	30 Apr 2004 23:36:56 -0000
*************** force_const_mem (enum machine_mode mode,
*** 2859,2864 ****
--- 2859,2865 ----
    desc->mem = def = gen_rtx_MEM (mode, symbol);
    set_mem_attributes (def, lang_hooks.types.type_for_mode (mode, 0), 1);
    RTX_UNCHANGING_P (def) = 1;
+   MEM_NOTRAP_P (def) = 1;
  
    /* If we're dropping a label to the constant pool, make sure we
       don't delete it.  */
Index: config/rs6000/rs6000.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/config/rs6000/rs6000.c,v
retrieving revision 1.332.2.36.2.6
diff -c -3 -p -r1.332.2.36.2.6 rs6000.c
*** config/rs6000/rs6000.c	25 Apr 2004 20:34:42 -0000	1.332.2.36.2.6
--- config/rs6000/rs6000.c	30 Apr 2004 23:36:56 -0000
*************** rs6000_emit_move (rtx dest, rtx source, 
*** 3803,3808 ****
--- 3803,3809 ----
  			       create_TOC_reference (XEXP (operands[1], 0)));
  	      set_mem_alias_set (operands[1], get_TOC_alias_set ());
  	      RTX_UNCHANGING_P (operands[1]) = 1;
+ 	      MEM_NOTRAP_P (operands[1]) = 1;
  	    }
  	}
        break;


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]