This is the mail archive of the gcc-patches@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

Re: [PATCH][RFC] bitwise CCP, 2nd try


On Thu, 5 Aug 2010, Paolo Bonzini wrote:

> On 08/04/2010 04:14 PM, Richard Guenther wrote:
> > On Wed, 4 Aug 2010, Paolo Bonzini wrote:
> > 
> > > On 08/04/2010 02:53 PM, Richard Guenther wrote:
> > > >       gcc_assert (old_val->lattice_val<   new_val.lattice_val
> > > >                   || (old_val->lattice_val == new_val.lattice_val
> > > >     		  && ((!old_val->value&&   !new_val.value)
> > > > + 		      /* Allow transitioning from&x to&x&   ~3.  */
> > > > + 		      || (TREE_CODE (old_val->value) != INTEGER_CST
> > > > + 			   && TREE_CODE (new_val.value) ==
> > > > INTEGER_CST)
> > > > + 		      || (TREE_CODE (old_val->value) == INTEGER_CST
> > > > + 			   && TREE_CODE (new_val.value) == INTEGER_CST
> > > > + 			   && double_int_equal_p
> > > > + 			       (double_int_and_not
> > > > + 				  (tree_to_double_int
> > > > (old_val->value),
> > > > + 				   new_val.mask),
> > > > + 				double_int_and_not
> > > > + 				  (tree_to_double_int (new_val.value),
> > > > + 				   new_val.mask)))
> > > >     		      || operand_equal_p (old_val->value,
> > > > new_val.value,
> > > > 0))));
> > > 
> > > Also, do you have a testcase for the CONSTANT/CONSTANT check?
> > 
> > The &x -> &x & ~3 transition?  It happens during bootstrap - I yet
> > have to create a set of (exhaustive) testcases.
> 
> That, and
> 
> +   /* We have to be careful to not go up the bitwise lattice
> +      represented by the mask.
> +      ???  This doesn't seem to be the best place to enforce this.  */
> 
> if they aren't the same thing.

No, they are not.  The bitwise folding routines do not treat the mask
as a lattice, so only upon setting the new lattice value we do that
and make sure that bits that already were varying in the previous
lattice state stay so.  But we of course still require and check
that the non-varying bits didn't change when the overall lattice
state didn't change (with the special case mentioned above, when
merging &x, &x + 4, &x + 8, ... at a loop PHI node we allow the
lattice value to change from &x to { 0, ~3 }.

The following is a final(?) patch, I added your suggestion for
the comparison result, fixed some latent bugs and most importantly
added some more testcases.  gcc.c-torture/execute/20100805-1.c
should cover the lattice transition checks and 
gcc.dg/tree-ssa/ssa-ccp-33.c is a test that we can infer alignment
of a pointer induction variable.

Bootstrapped and tested on x86_64-unknown-linux-gnu.

Anything further?

Thanks,
Richard.

2010-08-04  Richard Guenther  <rguenther@suse.de>

	* tree-ssa-ccp.c (struct prop_value_d): Add mask member.
	(dump_lattice_value): Dump it.
	(get_default_value): Adjust.
	(get_constant_value): Likewise.
	(set_value_varying): Likewise.
	(set_lattice_value): Make sure to not go up the lattice
	with bitwise constant values.
	(get_value_for_expr): Handle ADDR_EXPRs.
	(value_to_double_int): New function.
	(get_value_from_alignment): Likewise.
	(do_dbg_cnt): Adjust.
	(ccp_lattice_meet): Handle partially constant values.
	(bit_value_unop_1): New function.
	(bit_value_binop_1): Likewise.
	(bit_value_unop): Likewise.
	(bit_value_binop): Likewise.
	(evaluate_stmt): Track partially constant values if
	flag_tree_bit_ccp is set.
	(ccp_fold_stmt): Dump if we folded a predicate.
	(ccp_visit_stmt): Adjust.
	* common.opt (ftree-bit-ccp): New flag.
	* doc/invoke.texi (ftree-bit-ccp): Document.
	* opts.c (decode_options): Enable bit-CCP at -O1.

	* gcc.dg/tree-ssa/ssa-dce-3.c: XFAIL.
	* gcc.dg/tree-ssa/pr23744.c: Disable CCP.
	* gcc.dg/tree-ssa/pr25382.c: Likewise.
	* gcc.dg/tree-ssa/ssa-ccp-30.c: New testcase.
	* gcc.dg/tree-ssa/ssa-ccp-31.c: Likewise.
	* gcc.dg/tree-ssa/ssa-ccp-32.c: Likewise.
	* gcc.dg/tree-ssa/ssa-ccp-33.c: Likewise.
	* gcc.c-torture/execute/20100805-1.c: Likewise.

Index: trunk/gcc/tree-ssa-ccp.c
===================================================================
*** trunk.orig/gcc/tree-ssa-ccp.c	2010-08-04 17:33:42.000000000 +0200
--- trunk/gcc/tree-ssa-ccp.c	2010-08-05 12:48:33.000000000 +0200
*************** struct prop_value_d {
*** 150,155 ****
--- 150,159 ----
  
      /* Propagated value.  */
      tree value;
+ 
+     /* Mask that applies to the propagated value during CCP.  For
+        X with a CONSTANT lattice value X & ~mask == value & ~mask.  */
+     double_int mask;
  };
  
  typedef struct prop_value_d prop_value_t;
*************** dump_lattice_value (FILE *outf, const ch
*** 183,189 ****
        break;
      case CONSTANT:
        fprintf (outf, "%sCONSTANT ", prefix);
!       print_generic_expr (outf, val.value, dump_flags);
        break;
      default:
        gcc_unreachable ();
--- 187,204 ----
        break;
      case CONSTANT:
        fprintf (outf, "%sCONSTANT ", prefix);
!       if (TREE_CODE (val.value) != INTEGER_CST
! 	  || double_int_zero_p (val.mask))
! 	print_generic_expr (outf, val.value, dump_flags);
!       else
! 	{
! 	  double_int cval = double_int_and_not (tree_to_double_int (val.value),
! 						val.mask);
! 	  fprintf (outf, "%sCONSTANT " HOST_WIDE_INT_PRINT_DOUBLE_HEX,
! 		   prefix, cval.high, cval.low);
! 	  fprintf (outf, " (" HOST_WIDE_INT_PRINT_DOUBLE_HEX ")",
! 		   val.mask.high, val.mask.low);
! 	}
        break;
      default:
        gcc_unreachable ();
*************** static prop_value_t
*** 225,231 ****
  get_default_value (tree var)
  {
    tree sym = SSA_NAME_VAR (var);
!   prop_value_t val = { UNINITIALIZED, NULL_TREE };
    gimple stmt;
  
    stmt = SSA_NAME_DEF_STMT (var);
--- 240,246 ----
  get_default_value (tree var)
  {
    tree sym = SSA_NAME_VAR (var);
!   prop_value_t val = { UNINITIALIZED, NULL_TREE, { 0, 0 } };
    gimple stmt;
  
    stmt = SSA_NAME_DEF_STMT (var);
*************** get_default_value (tree var)
*** 240,246 ****
  	  && TREE_CODE (sym) == VAR_DECL)
  	val.lattice_val = UNDEFINED;
        else
! 	val.lattice_val = VARYING;
      }
    else if (is_gimple_assign (stmt)
  	   /* Value-returning GIMPLE_CALL statements assign to
--- 255,264 ----
  	  && TREE_CODE (sym) == VAR_DECL)
  	val.lattice_val = UNDEFINED;
        else
! 	{
! 	  val.lattice_val = VARYING;
! 	  val.mask = double_int_minus_one;
! 	}
      }
    else if (is_gimple_assign (stmt)
  	   /* Value-returning GIMPLE_CALL statements assign to
*************** get_default_value (tree var)
*** 266,271 ****
--- 284,290 ----
      {
        /* Otherwise, VAR will never take on a constant value.  */
        val.lattice_val = VARYING;
+       val.mask = double_int_minus_one;
      }
  
    return val;
*************** static inline tree
*** 297,303 ****
  get_constant_value (tree var)
  {
    prop_value_t *val = get_value (var);
!   if (val && val->lattice_val == CONSTANT)
      return val->value;
    return NULL_TREE;
  }
--- 316,325 ----
  get_constant_value (tree var)
  {
    prop_value_t *val = get_value (var);
!   if (val
!       && val->lattice_val == CONSTANT
!       && (TREE_CODE (val->value) != INTEGER_CST
! 	  || double_int_zero_p (val->mask)))
      return val->value;
    return NULL_TREE;
  }
*************** set_value_varying (tree var)
*** 311,316 ****
--- 333,339 ----
  
    val->lattice_val = VARYING;
    val->value = NULL_TREE;
+   val->mask = double_int_minus_one;
  }
  
  /* For float types, modify the value of VAL to make ccp work correctly
*************** canonicalize_float_value (prop_value_t *
*** 360,365 ****
--- 383,424 ----
      }
  }
  
+ /* Return whether the lattice transition is valid.  */
+ 
+ static bool
+ valid_lattice_transition (prop_value_t old_val, prop_value_t new_val)
+ {
+   /* Lattice transitions must always be monotonically increasing in
+      value.  */
+   if (old_val.lattice_val < new_val.lattice_val)
+     return true;
+ 
+   if (old_val.lattice_val != new_val.lattice_val)
+     return false;
+ 
+   if (!old_val.value && !new_val.value)
+     return true;
+ 
+   /* Now both lattice values are CONSTANT.  */
+ 
+   /* Allow transitioning from &x to &x & ~3.  */
+   if (TREE_CODE (old_val.value) != INTEGER_CST
+       && TREE_CODE (new_val.value) == INTEGER_CST)
+     return true;
+ 
+   /* Bit-lattices have to agree in the still valid bits.  */
+   if (TREE_CODE (old_val.value) == INTEGER_CST
+       && TREE_CODE (new_val.value) == INTEGER_CST)
+     return double_int_equal_p
+ 		(double_int_and_not (tree_to_double_int (old_val.value),
+ 				     new_val.mask),
+ 		 double_int_and_not (tree_to_double_int (new_val.value),
+ 				     new_val.mask));
+ 
+   /* Otherwise constant values have to agree.  */
+   return operand_equal_p (old_val.value, new_val.value, 0);
+ }
+ 
  /* Set the value for variable VAR to NEW_VAL.  Return true if the new
     value is different from VAR's previous value.  */
  
*************** set_lattice_value (tree var, prop_value_
*** 371,387 ****
  
    canonicalize_float_value (&new_val);
  
!   /* Lattice transitions must always be monotonically increasing in
!      value.  If *OLD_VAL and NEW_VAL are the same, return false to
!      inform the caller that this was a non-transition.  */
! 
!   gcc_assert (old_val->lattice_val < new_val.lattice_val
!               || (old_val->lattice_val == new_val.lattice_val
! 		  && ((!old_val->value && !new_val.value)
! 		      || operand_equal_p (old_val->value, new_val.value, 0))));
! 
!   if (old_val->lattice_val != new_val.lattice_val)
      {
        if (dump_file && (dump_flags & TDF_DETAILS))
  	{
  	  dump_lattice_value (dump_file, "Lattice value changed to ", new_val);
--- 430,463 ----
  
    canonicalize_float_value (&new_val);
  
!   /* We have to be careful to not go up the bitwise lattice
!      represented by the mask.
!      ???  This doesn't seem to be the best place to enforce this.  */
!   if (new_val.lattice_val == CONSTANT
!       && old_val->lattice_val == CONSTANT
!       && TREE_CODE (new_val.value) == INTEGER_CST
!       && TREE_CODE (old_val->value) == INTEGER_CST)
!     {
!       double_int diff;
!       diff = double_int_xor (tree_to_double_int (new_val.value),
! 			     tree_to_double_int (old_val->value));
!       new_val.mask = double_int_ior (new_val.mask,
! 				     double_int_ior (old_val->mask, diff));
!     }
! 
!   gcc_assert (valid_lattice_transition (*old_val, new_val));
! 
!   /* If *OLD_VAL and NEW_VAL are the same, return false to inform the
!      caller that this was a non-transition.  */
!   if (old_val->lattice_val != new_val.lattice_val
!       || (new_val.lattice_val == CONSTANT
! 	  && TREE_CODE (new_val.value) == INTEGER_CST
! 	  && (TREE_CODE (old_val->value) != INTEGER_CST
! 	      || !double_int_equal_p (new_val.mask, old_val->mask))))
      {
+       /* ???  We would like to delay creation of INTEGER_CSTs from
+ 	 partially constants here.  */
+ 
        if (dump_file && (dump_flags & TDF_DETAILS))
  	{
  	  dump_lattice_value (dump_file, "Lattice value changed to ", new_val);
*************** set_lattice_value (tree var, prop_value_
*** 397,427 ****
    return false;
  }
  
! /* Return the value for the tree operand EXPR.  */
  
  static prop_value_t
! get_value_for_expr (tree expr)
  {
    prop_value_t val;
  
    if (TREE_CODE (expr) == SSA_NAME)
!     val = *(get_value (expr));
!   else if (is_gimple_min_invariant (expr))
      {
        val.lattice_val = CONSTANT;
        val.value = expr;
        canonicalize_float_value (&val);
      }
    else
      {
        val.lattice_val = VARYING;
        val.value = NULL_TREE;
      }
- 
    return val;
  }
  
- 
  /* Return the likely CCP lattice value for STMT.
  
     If STMT has no operands, then return CONSTANT.
--- 473,614 ----
    return false;
  }
  
! static prop_value_t get_value_for_expr (tree, bool);
! static prop_value_t bit_value_binop (enum tree_code, tree, tree, tree);
! static void bit_value_binop_1 (enum tree_code, tree, double_int *, double_int *,
! 			       tree, double_int, double_int,
! 			       tree, double_int, double_int);
! 
! /* Return a double_int that can be used for bitwise simplifications
!    from VAL.  */
! 
! static double_int
! value_to_double_int (prop_value_t val)
! {
!   if (val.value
!       && TREE_CODE (val.value) == INTEGER_CST)
!     return tree_to_double_int (val.value);
!   else
!     return double_int_zero;
! }
! 
! /* Return the value for the address expression EXPR based on alignment
!    information.  */
! 
! static prop_value_t
! get_value_from_alignment (tree expr)
! {
!   prop_value_t val;
!   HOST_WIDE_INT bitsize, bitpos;
!   tree base, offset;
!   enum machine_mode mode;
!   int align;
! 
!   gcc_assert (TREE_CODE (expr) == ADDR_EXPR);
! 
!   base = get_inner_reference (TREE_OPERAND (expr, 0),
! 			      &bitsize, &bitpos, &offset,
! 			      &mode, &align, &align, false);
!   if (TREE_CODE (base) == MISALIGNED_INDIRECT_REF)
!     val = get_value_for_expr (TREE_OPERAND (base, 0), true);
!   else if (TREE_CODE (base) == MEM_REF)
!     val = bit_value_binop (PLUS_EXPR, TREE_TYPE (expr),
! 			   TREE_OPERAND (base, 0), TREE_OPERAND (base, 1));
!   else if (base
! 	   && ((align = get_object_alignment (base, BITS_PER_UNIT,
! 					      BIGGEST_ALIGNMENT))
! 		> BITS_PER_UNIT))
!     {
!       val.lattice_val = CONSTANT;
!       /* We assume pointers are zero-extended.  */
!       val.mask = double_int_and_not
! 	           (double_int_mask (TYPE_PRECISION (TREE_TYPE (expr))),
! 		    uhwi_to_double_int (align / BITS_PER_UNIT - 1));
!       val.value = build_int_cst (TREE_TYPE (expr), 0);
!     }
!   else
!     {
!       val.lattice_val = VARYING;
!       val.mask = double_int_minus_one;
!       val.value = NULL_TREE;
!     }
!   if (bitpos != 0)
!     {
!       double_int value, mask;
!       bit_value_binop_1 (PLUS_EXPR, TREE_TYPE (expr), &value, &mask,
! 			 TREE_TYPE (expr), value_to_double_int (val), val.mask,
! 			 TREE_TYPE (expr),
! 			 shwi_to_double_int (bitpos / BITS_PER_UNIT),
! 			 double_int_zero);
!       val.lattice_val = double_int_minus_one_p (mask) ? VARYING : CONSTANT;
!       val.mask = mask;
!       if (val.lattice_val == CONSTANT)
! 	val.value = double_int_to_tree (TREE_TYPE (expr), value);
!       else
! 	val.value = NULL_TREE;
!     }
!   /* ???  We should handle i * 4 and more complex expressions from
!      the offset, possibly by just expanding get_value_for_expr.  */
!   if (offset != NULL_TREE)
!     {
!       double_int value, mask;
!       prop_value_t oval = get_value_for_expr (offset, true);
!       bit_value_binop_1 (PLUS_EXPR, TREE_TYPE (expr), &value, &mask,
! 			 TREE_TYPE (expr), value_to_double_int (val), val.mask,
! 			 TREE_TYPE (expr), value_to_double_int (oval),
! 			 oval.mask);
!       val.mask = mask;
!       if (double_int_minus_one_p (mask))
! 	{
! 	  val.lattice_val = VARYING;
! 	  val.value = NULL_TREE;
! 	}
!       else
! 	{
! 	  val.lattice_val = CONSTANT;
! 	  val.value = double_int_to_tree (TREE_TYPE (expr), value);
! 	}
!     }
! 
!   return val;
! }
! 
! /* Return the value for the tree operand EXPR.  If FOR_BITS_P is true
!    return constant bits extracted from alignment information for
!    invariant addresses.  */
  
  static prop_value_t
! get_value_for_expr (tree expr, bool for_bits_p)
  {
    prop_value_t val;
  
    if (TREE_CODE (expr) == SSA_NAME)
!     {
!       val = *get_value (expr);
!       if (for_bits_p
! 	  && val.lattice_val == CONSTANT
! 	  && TREE_CODE (val.value) == ADDR_EXPR)
! 	val = get_value_from_alignment (val.value);
!     }
!   else if (is_gimple_min_invariant (expr)
! 	   && (!for_bits_p || TREE_CODE (expr) != ADDR_EXPR))
      {
        val.lattice_val = CONSTANT;
        val.value = expr;
+       val.mask = double_int_zero;
        canonicalize_float_value (&val);
      }
+   else if (TREE_CODE (expr) == ADDR_EXPR)
+     val = get_value_from_alignment (expr);
    else
      {
        val.lattice_val = VARYING;
+       val.mask = double_int_minus_one;
        val.value = NULL_TREE;
      }
    return val;
  }
  
  /* Return the likely CCP lattice value for STMT.
  
     If STMT has no operands, then return CONSTANT.
*************** do_dbg_cnt (void)
*** 637,642 ****
--- 824,830 ----
        if (!dbg_cnt (ccp))
          {
            const_val[i].lattice_val = VARYING;
+ 	  const_val[i].mask = double_int_minus_one;
            const_val[i].value = NULL_TREE;
          }
      }
*************** ccp_lattice_meet (prop_value_t *val1, pr
*** 692,714 ****
      {
        /* any M VARYING = VARYING.  */
        val1->lattice_val = VARYING;
        val1->value = NULL_TREE;
      }
    else if (val1->lattice_val == CONSTANT
  	   && val2->lattice_val == CONSTANT
  	   && simple_cst_equal (val1->value, val2->value) == 1)
      {
        /* Ci M Cj = Ci		if (i == j)
  	 Ci M Cj = VARYING	if (i != j)
  
!          If these two values come from memory stores, make sure that
! 	 they come from the same memory reference.
!          Nothing to do.  VAL1 already contains the value we want.  */
      }
    else
      {
        /* Any other combination is VARYING.  */
        val1->lattice_val = VARYING;
        val1->value = NULL_TREE;
      }
  }
--- 880,937 ----
      {
        /* any M VARYING = VARYING.  */
        val1->lattice_val = VARYING;
+       val1->mask = double_int_minus_one;
        val1->value = NULL_TREE;
      }
    else if (val1->lattice_val == CONSTANT
  	   && val2->lattice_val == CONSTANT
+ 	   && TREE_CODE (val1->value) == INTEGER_CST
+ 	   && TREE_CODE (val2->value) == INTEGER_CST)
+     {
+       /* Ci M Cj = Ci		if (i == j)
+ 	 Ci M Cj = VARYING	if (i != j)
+ 
+          For INTEGER_CSTs mask unequal bits.  If no equal bits remain,
+ 	 drop to varying.  */
+       val1->mask
+ 	  = double_int_ior (double_int_ior (val1->mask,
+ 					    val2->mask),
+ 			    double_int_xor (tree_to_double_int (val1->value),
+ 					    tree_to_double_int (val2->value)));
+       if (double_int_minus_one_p (val1->mask))
+ 	{
+ 	  val1->lattice_val = VARYING;
+ 	  val1->value = NULL_TREE;
+ 	}
+     }
+   else if (val1->lattice_val == CONSTANT
+ 	   && val2->lattice_val == CONSTANT
  	   && simple_cst_equal (val1->value, val2->value) == 1)
      {
        /* Ci M Cj = Ci		if (i == j)
  	 Ci M Cj = VARYING	if (i != j)
  
!          VAL1 already contains the value we want for equivalent values.  */
!     }
!   else if (val1->lattice_val == CONSTANT
! 	   && val2->lattice_val == CONSTANT
! 	   && (TREE_CODE (val1->value) == ADDR_EXPR
! 	       || TREE_CODE (val2->value) == ADDR_EXPR))
!     {
!       /* When not equal addresses are involved try meeting for
! 	 alignment.  */
!       prop_value_t tem = *val2;
!       if (TREE_CODE (val1->value) == ADDR_EXPR)
! 	*val1 = get_value_for_expr (val1->value, true);
!       if (TREE_CODE (val2->value) == ADDR_EXPR)
! 	tem = get_value_for_expr (val2->value, true);
!       ccp_lattice_meet (val1, &tem);
      }
    else
      {
        /* Any other combination is VARYING.  */
        val1->lattice_val = VARYING;
+       val1->mask = double_int_minus_one;
        val1->value = NULL_TREE;
      }
  }
*************** ccp_visit_phi_node (gimple phi)
*** 769,775 ****
        if (e->flags & EDGE_EXECUTABLE)
  	{
  	  tree arg = gimple_phi_arg (phi, i)->def;
! 	  prop_value_t arg_val = get_value_for_expr (arg);
  
  	  ccp_lattice_meet (&new_val, &arg_val);
  
--- 992,998 ----
        if (e->flags & EDGE_EXECUTABLE)
  	{
  	  tree arg = gimple_phi_arg (phi, i)->def;
! 	  prop_value_t arg_val = get_value_for_expr (arg, false);
  
  	  ccp_lattice_meet (&new_val, &arg_val);
  
*************** fold_const_aggregate_ref (tree t)
*** 1334,1339 ****
--- 1557,1903 ----
    return NULL_TREE;
  }
  
+ /* Apply the operation CODE in type TYPE to the value, mask pair
+    RVAL and RMASK representing a value of type RTYPE and set
+    the value, mask pair *VAL and *MASK to the result.  */
+ 
+ static void
+ bit_value_unop_1 (enum tree_code code, tree type,
+ 		  double_int *val, double_int *mask,
+ 		  tree rtype, double_int rval, double_int rmask)
+ {
+   switch (code)
+     {
+     case BIT_NOT_EXPR:
+       *mask = rmask;
+       *val = double_int_not (rval);
+       break;
+ 
+     case NEGATE_EXPR:
+       {
+ 	double_int temv, temm;
+ 	/* Return ~rval + 1.  */
+ 	bit_value_unop_1 (BIT_NOT_EXPR, type, &temv, &temm, type, rval, rmask);
+ 	bit_value_binop_1 (PLUS_EXPR, type, val, mask,
+ 			 type, temv, temm,
+ 			 type, double_int_one, double_int_zero);
+ 	break;
+       }
+ 
+     CASE_CONVERT:
+       {
+ 	bool uns;
+ 
+ 	/* First extend mask and value according to the original type.  */
+ 	uns = (TREE_CODE (rtype) == INTEGER_TYPE && TYPE_IS_SIZETYPE (rtype)
+ 	       ? 0 : TYPE_UNSIGNED (rtype));
+ 	*mask = double_int_ext (rmask, TYPE_PRECISION (rtype), uns);
+ 	*val = double_int_ext (rval, TYPE_PRECISION (rtype), uns);
+ 
+ 	/* Then extend mask and value according to the target type.  */
+ 	uns = (TREE_CODE (type) == INTEGER_TYPE && TYPE_IS_SIZETYPE (type)
+ 	       ? 0 : TYPE_UNSIGNED (type));
+ 	*mask = double_int_ext (*mask, TYPE_PRECISION (type), uns);
+ 	*val = double_int_ext (*val, TYPE_PRECISION (type), uns);
+ 	break;
+       }
+ 
+     default:
+       *mask = double_int_minus_one;
+       break;
+     }
+ }
+ 
+ /* Apply the operation CODE in type TYPE to the value, mask pairs
+    R1VAL, R1MASK and R2VAL, R2MASK representing a values of type R1TYPE
+    and R2TYPE and set the value, mask pair *VAL and *MASK to the result.  */
+ 
+ static void
+ bit_value_binop_1 (enum tree_code code, tree type,
+ 		   double_int *val, double_int *mask,
+ 		   tree r1type, double_int r1val, double_int r1mask,
+ 		   tree r2type, double_int r2val, double_int r2mask)
+ {
+   bool uns = (TREE_CODE (type) == INTEGER_TYPE
+ 	      && TYPE_IS_SIZETYPE (type) ? 0 : TYPE_UNSIGNED (type));
+   /* Assume we'll get a constant result.  Use an initial varying value,
+      we fall back to varying in the end if necessary.  */
+   *mask = double_int_minus_one;
+   switch (code)
+     {
+     case BIT_AND_EXPR:
+       /* The mask is constant where there is a known not
+ 	 set bit, (m1 | m2) & ((v1 | m1) & (v2 | m2)) */
+       *mask = double_int_and (double_int_ior (r1mask, r2mask),
+ 			      double_int_and (double_int_ior (r1val, r1mask),
+ 					      double_int_ior (r2val, r2mask)));
+       *val = double_int_and (r1val, r2val);
+       break;
+ 
+     case BIT_IOR_EXPR:
+       /* The mask is constant where there is a known
+ 	 set bit, (m1 | m2) & ~((v1 & ~m1) | (v2 & ~m2)).  */
+       *mask = double_int_and_not
+ 	  	(double_int_ior (r1mask, r2mask),
+ 		 double_int_ior (double_int_and_not (r1val, r1mask),
+ 				 double_int_and_not (r2val, r2mask)));
+       *val = double_int_ior (r1val, r2val);
+       break;
+ 
+     case BIT_XOR_EXPR:
+       /* m1 | m2  */
+       *mask = double_int_ior (r1mask, r2mask);
+       *val = double_int_xor (r1val, r2val);
+       break;
+ 
+     case LROTATE_EXPR:
+     case RROTATE_EXPR:
+       if (double_int_zero_p (r2mask))
+ 	{
+ 	  HOST_WIDE_INT shift = r2val.low;
+ 	  if (code == RROTATE_EXPR)
+ 	    shift = -shift;
+ 	  *mask = double_int_lrotate (r1mask, shift, TYPE_PRECISION (type));
+ 	  *val = double_int_lrotate (r1val, shift, TYPE_PRECISION (type));
+ 	}
+       break;
+ 
+     case LSHIFT_EXPR:
+     case RSHIFT_EXPR:
+       /* ???  We can handle partially known shift counts if we know
+ 	 its sign.  That way we can tell that (x << (y | 8)) & 255
+ 	 is zero.  */
+       if (double_int_zero_p (r2mask))
+ 	{
+ 	  HOST_WIDE_INT shift = r2val.low;
+ 	  if (code == RSHIFT_EXPR)
+ 	    shift = -shift;
+ 	  /* We need to know if we are doing a left or a right shift
+ 	     to properly shift in zeros for left shift and unsigned
+ 	     right shifts and the sign bit for signed right shifts.
+ 	     For signed right shifts we shift in varying in case
+ 	     the sign bit was varying.  */
+ 	  if (shift > 0)
+ 	    {
+ 	      *mask = double_int_lshift (r1mask, shift,
+ 					 TYPE_PRECISION (type), false);
+ 	      *val = double_int_lshift (r1val, shift,
+ 					TYPE_PRECISION (type), false);
+ 	    }
+ 	  else if (shift < 0)
+ 	    {
+ 	      shift = -shift;
+ 	      *mask = double_int_rshift (r1mask, shift,
+ 					 TYPE_PRECISION (type), !uns);
+ 	      *val = double_int_rshift (r1val, shift,
+ 					TYPE_PRECISION (type), !uns);
+ 	    }
+ 	  else
+ 	    {
+ 	      *mask = r1mask;
+ 	      *val = r1val;
+ 	    }
+ 	}
+       break;
+ 
+     case PLUS_EXPR:
+     case POINTER_PLUS_EXPR:
+       {
+ 	double_int lo, hi;
+ 	/* Do the addition with unknown bits set to zero, to give carry-ins of
+ 	   zero wherever possible.  */
+ 	lo = double_int_add (double_int_and_not (r1val, r1mask),
+ 			     double_int_and_not (r2val, r2mask));
+ 	lo = double_int_ext (lo, TYPE_PRECISION (type), uns);
+ 	/* Do the addition with unknown bits set to one, to give carry-ins of
+ 	   one wherever possible.  */
+ 	hi = double_int_add (double_int_ior (r1val, r1mask),
+ 			     double_int_ior (r2val, r2mask));
+ 	hi = double_int_ext (hi, TYPE_PRECISION (type), uns);
+ 	/* Each bit in the result is known if (a) the corresponding bits in
+ 	   both inputs are known, and (b) the carry-in to that bit position
+ 	   is known.  We can check condition (b) by seeing if we got the same
+ 	   result with minimised carries as with maximised carries.  */
+ 	*mask = double_int_ior (double_int_ior (r1mask, r2mask),
+ 				double_int_xor (lo, hi));
+ 	*mask = double_int_ext (*mask, TYPE_PRECISION (type), uns);
+ 	/* It shouldn't matter whether we choose lo or hi here.  */
+ 	*val = lo;
+ 	break;
+       }
+ 
+     case MINUS_EXPR:
+       {
+ 	double_int temv, temm;
+ 	bit_value_unop_1 (NEGATE_EXPR, r2type, &temv, &temm,
+ 			  r2type, r2val, r2mask);
+ 	bit_value_binop_1 (PLUS_EXPR, type, val, mask,
+ 			   r1type, r1val, r1mask,
+ 			   r2type, temv, temm);
+ 	break;
+       }
+ 
+     case MULT_EXPR:
+       {
+ 	/* Just track trailing zeros in both operands and transfer
+ 	   them to the other.  */
+ 	int r1tz = double_int_ctz (double_int_ior (r1val, r1mask));
+ 	int r2tz = double_int_ctz (double_int_ior (r2val, r2mask));
+ 	if (r1tz + r2tz >= HOST_BITS_PER_DOUBLE_INT)
+ 	  {
+ 	    *mask = double_int_zero;
+ 	    *val = double_int_zero;
+ 	  }
+ 	else if (r1tz + r2tz > 0)
+ 	  {
+ 	    *mask = double_int_not (double_int_mask (r1tz + r2tz));
+ 	    *mask = double_int_ext (*mask, TYPE_PRECISION (type), uns);
+ 	    *val = double_int_zero;
+ 	  }
+ 	break;
+       }
+ 
+     case EQ_EXPR:
+     case NE_EXPR:
+       {
+ 	double_int m = double_int_ior (r1mask, r2mask);
+ 	if (!double_int_equal_p (double_int_and_not (r1val, m),
+ 				 double_int_and_not (r2val, m)))
+ 	  {
+ 	    *mask = double_int_zero;
+ 	    *val = ((code == EQ_EXPR) ? double_int_zero : double_int_one);
+ 	  }
+ 	else
+ 	  {
+ 	    /* We know the result of a comparison is always one or zero.  */
+ 	    *mask = double_int_one;
+ 	    *val = double_int_zero;
+ 	  }
+ 	break;
+       }
+ 
+     case GE_EXPR:
+     case GT_EXPR:
+       {
+ 	double_int tem = r1val;
+ 	r1val = r2val;
+ 	r2val = tem;
+ 	tem = r1mask;
+ 	r1mask = r2mask;
+ 	r2mask = tem;
+ 	code = swap_tree_comparison (code);
+       }
+       /* Fallthru.  */
+     case LT_EXPR:
+     case LE_EXPR:
+       {
+ 	int minmax, maxmin;
+ 	/* If the most significant bits are not known we know nothing.  */
+ 	if (double_int_negative_p (r1mask) || double_int_negative_p (r2mask))
+ 	  break;
+ 
+ 	/* If we know the most significant bits we know the values
+ 	   value ranges by means of treating varying bits as zero
+ 	   or one.  Do a cross comparison of the max/min pairs.  */
+ 	maxmin = double_int_cmp (double_int_ior (r1val, r1mask),
+ 				 double_int_and_not (r2val, r2mask), uns);
+ 	minmax = double_int_cmp (double_int_and_not (r1val, r1mask),
+ 				 double_int_ior (r2val, r2mask), uns);
+ 	if (maxmin < 0)  /* r1 is less than r2.  */
+ 	  {
+ 	    *mask = double_int_zero;
+ 	    *val = double_int_one;
+ 	  }
+ 	else if (minmax > 0)  /* r1 is not less or equal to r2.  */
+ 	  {
+ 	    *mask = double_int_zero;
+ 	    *val = double_int_zero;
+ 	  }
+ 	else if (maxmin == minmax)  /* r1 and r2 are equal.  */
+ 	  {
+ 	    /* This probably should never happen as we'd have
+ 	       folded the thing during fully constant value folding.  */
+ 	    *mask = double_int_zero;
+ 	    *val = (code == LE_EXPR ? double_int_one :  double_int_zero);
+ 	  }
+ 	else
+ 	  {
+ 	    /* We know the result of a comparison is always one or zero.  */
+ 	    *mask = double_int_one;
+ 	    *val = double_int_zero;
+ 	  }
+ 	break;
+       }
+ 
+     default:;
+     }
+ }
+ 
+ /* Return the propagation value when applying the operation CODE to
+    the value RHS yielding type TYPE.  */
+ 
+ static prop_value_t
+ bit_value_unop (enum tree_code code, tree type, tree rhs)
+ {
+   prop_value_t rval = get_value_for_expr (rhs, true);
+   double_int value, mask;
+   prop_value_t val;
+   gcc_assert ((rval.lattice_val == CONSTANT
+ 	       && TREE_CODE (rval.value) == INTEGER_CST)
+ 	      || double_int_minus_one_p (rval.mask));
+   bit_value_unop_1 (code, type, &value, &mask,
+ 		    TREE_TYPE (rhs), value_to_double_int (rval), rval.mask);
+   if (!double_int_minus_one_p (mask))
+     {
+       val.lattice_val = CONSTANT;
+       val.mask = mask;
+       /* ???  Delay building trees here.  */
+       val.value = double_int_to_tree (type, value);
+     }
+   else
+     {
+       val.lattice_val = VARYING;
+       val.value = NULL_TREE;
+       val.mask = double_int_minus_one;
+     }
+   return val;
+ }
+ 
+ /* Return the propagation value when applying the operation CODE to
+    the values RHS1 and RHS2 yielding type TYPE.  */
+ 
+ static prop_value_t
+ bit_value_binop (enum tree_code code, tree type, tree rhs1, tree rhs2)
+ {
+   prop_value_t r1val = get_value_for_expr (rhs1, true);
+   prop_value_t r2val = get_value_for_expr (rhs2, true);
+   double_int value, mask;
+   prop_value_t val;
+   gcc_assert ((r1val.lattice_val == CONSTANT
+ 	       && TREE_CODE (r1val.value) == INTEGER_CST)
+ 	      || double_int_minus_one_p (r1val.mask));
+   gcc_assert ((r2val.lattice_val == CONSTANT
+ 	       && TREE_CODE (r2val.value) == INTEGER_CST)
+ 	      || double_int_minus_one_p (r2val.mask));
+   bit_value_binop_1 (code, type, &value, &mask,
+ 		     TREE_TYPE (rhs1), value_to_double_int (r1val), r1val.mask,
+ 		     TREE_TYPE (rhs2), value_to_double_int (r2val), r2val.mask);
+   if (!double_int_minus_one_p (mask))
+     {
+       val.lattice_val = CONSTANT;
+       val.mask = mask;
+       /* ???  Delay building trees here.  */
+       val.value = double_int_to_tree (type, value);
+     }
+   else
+     {
+       val.lattice_val = VARYING;
+       val.value = NULL_TREE;
+       val.mask = double_int_minus_one;
+     }
+   return val;
+ }
+ 
  /* Evaluate statement STMT.
     Valid only for assignments, calls, conditionals, and switches. */
  
*************** evaluate_stmt (gimple stmt)
*** 1343,1351 ****
    prop_value_t val;
    tree simplified = NULL_TREE;
    ccp_lattice_t likelyvalue = likely_value (stmt);
!   bool is_constant;
  
!   fold_defer_overflow_warnings ();
  
    /* If the statement is likely to have a CONSTANT result, then try
       to fold the statement to determine the constant value.  */
--- 1907,1932 ----
    prop_value_t val;
    tree simplified = NULL_TREE;
    ccp_lattice_t likelyvalue = likely_value (stmt);
!   bool is_constant = false;
  
!   if (dump_file && (dump_flags & TDF_DETAILS))
!     {
!       fprintf (dump_file, "which is likely ");
!       switch (likelyvalue)
! 	{
! 	case CONSTANT:
! 	  fprintf (dump_file, "CONSTANT");
! 	  break;
! 	case UNDEFINED:
! 	  fprintf (dump_file, "UNDEFINED");
! 	  break;
! 	case VARYING:
! 	  fprintf (dump_file, "VARYING");
! 	  break;
! 	default:;
! 	}
!       fprintf (dump_file, "\n");
!     }
  
    /* If the statement is likely to have a CONSTANT result, then try
       to fold the statement to determine the constant value.  */
*************** evaluate_stmt (gimple stmt)
*** 1353,1359 ****
       Since likely_value never returns CONSTANT for calls, we will
       not attempt to fold them, including builtins that may profit.  */
    if (likelyvalue == CONSTANT)
!     simplified = ccp_fold (stmt);
    /* If the statement is likely to have a VARYING result, then do not
       bother folding the statement.  */
    else if (likelyvalue == VARYING)
--- 1934,1952 ----
       Since likely_value never returns CONSTANT for calls, we will
       not attempt to fold them, including builtins that may profit.  */
    if (likelyvalue == CONSTANT)
!     {
!       fold_defer_overflow_warnings ();
!       simplified = ccp_fold (stmt);
!       is_constant = simplified && is_gimple_min_invariant (simplified);
!       fold_undefer_overflow_warnings (is_constant, stmt, 0);
!       if (is_constant)
! 	{
! 	  /* The statement produced a constant value.  */
! 	  val.lattice_val = CONSTANT;
! 	  val.value = simplified;
! 	  val.mask = double_int_zero;
! 	}
!     }
    /* If the statement is likely to have a VARYING result, then do not
       bother folding the statement.  */
    else if (likelyvalue == VARYING)
*************** evaluate_stmt (gimple stmt)
*** 1373,1418 ****
        else
  	/* These cannot satisfy is_gimple_min_invariant without folding.  */
  	gcc_assert (code == GIMPLE_CALL || code == GIMPLE_COND);
      }
  
!   is_constant = simplified && is_gimple_min_invariant (simplified);
! 
!   fold_undefer_overflow_warnings (is_constant, stmt, 0);
! 
!   if (dump_file && (dump_flags & TDF_DETAILS))
      {
!       fprintf (dump_file, "which is likely ");
!       switch (likelyvalue)
  	{
! 	case CONSTANT:
! 	  fprintf (dump_file, "CONSTANT");
! 	  break;
! 	case UNDEFINED:
! 	  fprintf (dump_file, "UNDEFINED");
! 	  break;
! 	case VARYING:
! 	  fprintf (dump_file, "VARYING");
! 	  break;
! 	default:;
  	}
!       fprintf (dump_file, "\n");
      }
  
!   if (is_constant)
!     {
!       /* The statement produced a constant value.  */
!       val.lattice_val = CONSTANT;
!       val.value = simplified;
!     }
!   else
      {
        /* The statement produced a nonconstant value.  If the statement
  	 had UNDEFINED operands, then the result of the statement
  	 should be UNDEFINED.  Otherwise, the statement is VARYING.  */
        if (likelyvalue == UNDEFINED)
! 	val.lattice_val = likelyvalue;
        else
! 	val.lattice_val = VARYING;
  
        val.value = NULL_TREE;
      }
--- 1966,2050 ----
        else
  	/* These cannot satisfy is_gimple_min_invariant without folding.  */
  	gcc_assert (code == GIMPLE_CALL || code == GIMPLE_COND);
+       is_constant = simplified && is_gimple_min_invariant (simplified);
+       if (is_constant)
+ 	{
+ 	  /* The statement produced a constant value.  */
+ 	  val.lattice_val = CONSTANT;
+ 	  val.value = simplified;
+ 	  val.mask = double_int_zero;
+ 	}
      }
  
!   /* Resort to simplification for bitwise tracking.  */
!   if (flag_tree_bit_ccp
!       && likelyvalue == CONSTANT
!       && !is_constant)
      {
!       enum gimple_code code = gimple_code (stmt);
!       val.lattice_val = VARYING;
!       val.value = NULL_TREE;
!       val.mask = double_int_minus_one;
!       if (code == GIMPLE_ASSIGN)
  	{
! 	  enum tree_code subcode = gimple_assign_rhs_code (stmt);
! 	  tree rhs1 = gimple_assign_rhs1 (stmt);
! 	  switch (get_gimple_rhs_class (subcode))
! 	    {
! 	    case GIMPLE_SINGLE_RHS:
! 	      if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
! 		  || POINTER_TYPE_P (TREE_TYPE (rhs1)))
! 		val = get_value_for_expr (rhs1, true);
! 	      break;
! 
! 	    case GIMPLE_UNARY_RHS:
! 	      if ((INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
! 		   || POINTER_TYPE_P (TREE_TYPE (rhs1)))
! 		  && (INTEGRAL_TYPE_P (gimple_expr_type (stmt))
! 		      || POINTER_TYPE_P (gimple_expr_type (stmt))))
! 		val = bit_value_unop (subcode, gimple_expr_type (stmt), rhs1);
! 	      break;
! 
! 	    case GIMPLE_BINARY_RHS:
! 	      if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
! 		  || POINTER_TYPE_P (TREE_TYPE (rhs1)))
! 		{
! 		  tree rhs2 = gimple_assign_rhs2 (stmt);
! 		  val = bit_value_binop (subcode,
! 					 TREE_TYPE (rhs1), rhs1, rhs2);
! 		}
! 	      break;
! 
! 	    default:;
! 	    }
  	}
!       else if (code == GIMPLE_COND)
! 	{
! 	  enum tree_code code = gimple_cond_code (stmt);
! 	  tree rhs1 = gimple_cond_lhs (stmt);
! 	  tree rhs2 = gimple_cond_rhs (stmt);
! 	  if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
! 	      || POINTER_TYPE_P (TREE_TYPE (rhs1)))
! 	    val = bit_value_binop (code, TREE_TYPE (rhs1), rhs1, rhs2);
! 	}
!       is_constant = (val.lattice_val == CONSTANT);
      }
  
!   if (!is_constant)
      {
        /* The statement produced a nonconstant value.  If the statement
  	 had UNDEFINED operands, then the result of the statement
  	 should be UNDEFINED.  Otherwise, the statement is VARYING.  */
        if (likelyvalue == UNDEFINED)
! 	{
! 	  val.lattice_val = likelyvalue;
! 	  val.mask = double_int_zero;
! 	}
        else
! 	{
! 	  val.lattice_val = VARYING;
! 	  val.mask = double_int_minus_one;
! 	}
  
        val.value = NULL_TREE;
      }
*************** ccp_fold_stmt (gimple_stmt_iterator *gsi
*** 1438,1446 ****
  	   fold more conditionals here.  */
  	val = evaluate_stmt (stmt);
  	if (val.lattice_val != CONSTANT
! 	    || TREE_CODE (val.value) != INTEGER_CST)
  	  return false;
  
  	if (integer_zerop (val.value))
  	  gimple_cond_make_false (stmt);
  	else
--- 2070,2087 ----
  	   fold more conditionals here.  */
  	val = evaluate_stmt (stmt);
  	if (val.lattice_val != CONSTANT
! 	    || !double_int_zero_p (val.mask))
  	  return false;
  
+ 	if (dump_file)
+ 	  {
+ 	    fprintf (dump_file, "Folding predicate ");
+ 	    print_gimple_expr (dump_file, stmt, 0, 0);
+ 	    fprintf (dump_file, " to ");
+ 	    print_generic_expr (dump_file, val.value, 0);
+ 	    fprintf (dump_file, "\n");
+ 	  }
+ 
  	if (integer_zerop (val.value))
  	  gimple_cond_make_false (stmt);
  	else
*************** visit_cond_stmt (gimple stmt, edge *take
*** 1583,1594 ****
  
    block = gimple_bb (stmt);
    val = evaluate_stmt (stmt);
  
    /* Find which edge out of the conditional block will be taken and add it
       to the worklist.  If no single edge can be determined statically,
       return SSA_PROP_VARYING to feed all the outgoing edges to the
       propagation engine.  */
!   *taken_edge_p = val.value ? find_taken_edge (block, val.value) : 0;
    if (*taken_edge_p)
      return SSA_PROP_INTERESTING;
    else
--- 2224,2238 ----
  
    block = gimple_bb (stmt);
    val = evaluate_stmt (stmt);
+   if (val.lattice_val != CONSTANT
+       || !double_int_zero_p (val.mask))
+     return SSA_PROP_VARYING;
  
    /* Find which edge out of the conditional block will be taken and add it
       to the worklist.  If no single edge can be determined statically,
       return SSA_PROP_VARYING to feed all the outgoing edges to the
       propagation engine.  */
!   *taken_edge_p = find_taken_edge (block, val.value);
    if (*taken_edge_p)
      return SSA_PROP_INTERESTING;
    else
*************** ccp_visit_stmt (gimple stmt, edge *taken
*** 1653,1659 ****
       Mark them VARYING.  */
    FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
      {
!       prop_value_t v = { VARYING, NULL_TREE };
        set_lattice_value (def, v);
      }
  
--- 2297,2303 ----
       Mark them VARYING.  */
    FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
      {
!       prop_value_t v = { VARYING, NULL_TREE, { -1, (HOST_WIDE_INT) -1 } };
        set_lattice_value (def, v);
      }
  
Index: trunk/gcc/testsuite/gcc.dg/tree-ssa/ssa-dce-3.c
===================================================================
*** trunk.orig/gcc/testsuite/gcc.dg/tree-ssa/ssa-dce-3.c	2010-08-04 17:33:42.000000000 +0200
--- trunk/gcc/testsuite/gcc.dg/tree-ssa/ssa-dce-3.c	2010-08-05 11:38:28.000000000 +0200
*************** int main(void)
*** 21,31 ****
    return 0;
  }
  
  /* We should eliminate the inner condition, but the loop must be preserved
     as it is infinite.  Therefore there should be just one phi node (for i):  */
! /* { dg-final { scan-tree-dump-times "PHI " 1 "cddce1"} } */
  
  /* And one if (for the exit condition of the loop):  */
! /* { dg-final { scan-tree-dump-times "if " 1 "cddce1"} } */
  
  /* { dg-final { cleanup-tree-dump "cddce1" } } */
--- 21,35 ----
    return 0;
  }
  
+ /* We now can prove the infiniteness of the loop during CCP and fail
+    to eliminate the code inside the infinite loop because we start
+    by marking the j % 7 condition as useful.  See PR45178.  */
+ 
  /* We should eliminate the inner condition, but the loop must be preserved
     as it is infinite.  Therefore there should be just one phi node (for i):  */
! /* { dg-final { scan-tree-dump-times "PHI " 1 "cddce1" { xfail *-*-* } } } */
  
  /* And one if (for the exit condition of the loop):  */
! /* { dg-final { scan-tree-dump-times "if " 1 "cddce1" } } */
  
  /* { dg-final { cleanup-tree-dump "cddce1" } } */
Index: trunk/gcc/common.opt
===================================================================
*** trunk.orig/gcc/common.opt	2010-08-04 17:33:42.000000000 +0200
--- trunk/gcc/common.opt	2010-08-05 11:38:28.000000000 +0200
*************** ftree-ccp
*** 1281,1286 ****
--- 1281,1290 ----
  Common Report Var(flag_tree_ccp) Optimization
  Enable SSA-CCP optimization on trees
  
+ ftree-bit-ccp
+ Common Report Var(flag_tree_bit_ccp) Optimization
+ Enable SSA-BIT-CCP optimization on trees
+ 
  ftree-store-ccp
  Common
  Does nothing.  Preserved for backward compatibility.
Index: trunk/gcc/doc/invoke.texi
===================================================================
*** trunk.orig/gcc/doc/invoke.texi	2010-08-04 17:33:42.000000000 +0200
--- trunk/gcc/doc/invoke.texi	2010-08-05 11:38:28.000000000 +0200
*************** Objective-C and Objective-C++ Dialects}.
*** 380,386 ****
  -fsel-sched-pipelining -fsel-sched-pipelining-outer-loops @gol
  -fsignaling-nans -fsingle-precision-constant -fsplit-ivs-in-unroller @gol
  -fsplit-wide-types -fstack-protector -fstack-protector-all @gol
! -fstrict-aliasing -fstrict-overflow -fthread-jumps -ftracer @gol
  -ftree-builtin-call-dce -ftree-ccp -ftree-ch -ftree-copy-prop @gol
  -ftree-copyrename -ftree-dce -ftree-dominator-opts -ftree-dse @gol
  -ftree-forwprop -ftree-fre -ftree-loop-if-convert -ftree-loop-im @gol
--- 380,386 ----
  -fsel-sched-pipelining -fsel-sched-pipelining-outer-loops @gol
  -fsignaling-nans -fsingle-precision-constant -fsplit-ivs-in-unroller @gol
  -fsplit-wide-types -fstack-protector -fstack-protector-all @gol
! -fstrict-aliasing -fstrict-overflow -fthread-jumps -ftracer -ftree-bit-ccp @gol
  -ftree-builtin-call-dce -ftree-ccp -ftree-ch -ftree-copy-prop @gol
  -ftree-copyrename -ftree-dce -ftree-dominator-opts -ftree-dse @gol
  -ftree-forwprop -ftree-fre -ftree-loop-if-convert -ftree-loop-im @gol
*************** compilation time.
*** 5848,5853 ****
--- 5848,5854 ----
  -fipa-reference @gol
  -fmerge-constants
  -fsplit-wide-types @gol
+ -ftree-bit-ccp @gol
  -ftree-builtin-call-dce @gol
  -ftree-ccp @gol
  -ftree-ch @gol
*************** Transposing is enabled only if profiling
*** 6737,6742 ****
--- 6738,6750 ----
  Perform forward store motion  on trees.  This flag is
  enabled by default at @option{-O} and higher.
  
+ @item -ftree-bit-ccp
+ @opindex ftree-bit-ccp
+ Perform sparse conditional bit constant propagation on trees and propagate
+ pointer alignment information.
+ This pass only operates on local scalar variables and is enabled by default
+ at @option{-O} and higher.  It requires that @option{-ftree-ccp} is enabled.
+ 
  @item -ftree-ccp
  @opindex ftree-ccp
  Perform sparse conditional constant propagation (CCP) on trees.  This
Index: trunk/gcc/opts.c
===================================================================
*** trunk.orig/gcc/opts.c	2010-08-04 17:33:42.000000000 +0200
--- trunk/gcc/opts.c	2010-08-05 11:38:28.000000000 +0200
*************** decode_options (unsigned int argc, const
*** 767,772 ****
--- 767,773 ----
    flag_merge_constants = opt1;
    flag_split_wide_types = opt1;
    flag_tree_ccp = opt1;
+   flag_tree_bit_ccp = opt1;
    flag_tree_dce = opt1;
    flag_tree_dom = opt1;
    flag_tree_dse = opt1;
Index: trunk/gcc/testsuite/gcc.dg/tree-ssa/pr23744.c
===================================================================
*** trunk.orig/gcc/testsuite/gcc.dg/tree-ssa/pr23744.c	2010-08-04 17:33:42.000000000 +0200
--- trunk/gcc/testsuite/gcc.dg/tree-ssa/pr23744.c	2010-08-05 11:38:28.000000000 +0200
***************
*** 1,5 ****
  /* { dg-do compile } */
! /* { dg-options "-O2 -fdump-tree-vrp1" } */
  
  int g (int i, int j)
  {
--- 1,5 ----
  /* { dg-do compile } */
! /* { dg-options "-O2 -fno-tree-ccp -fdump-tree-vrp1" } */
  
  int g (int i, int j)
  {
Index: trunk/gcc/testsuite/gcc.dg/tree-ssa/pr25382.c
===================================================================
*** trunk.orig/gcc/testsuite/gcc.dg/tree-ssa/pr25382.c	2010-08-04 17:33:42.000000000 +0200
--- trunk/gcc/testsuite/gcc.dg/tree-ssa/pr25382.c	2010-08-05 11:38:28.000000000 +0200
***************
*** 3,9 ****
     Check that VRP now gets ranges from BIT_AND_EXPRs.  */
  
  /* { dg-do compile } */
! /* { dg-options "-O2 -fdump-tree-vrp1" } */
  
  int
  foo (int a)
--- 3,9 ----
     Check that VRP now gets ranges from BIT_AND_EXPRs.  */
  
  /* { dg-do compile } */
! /* { dg-options "-O2 -fno-tree-ccp -fdump-tree-vrp1" } */
  
  int
  foo (int a)
Index: trunk/gcc/testsuite/gcc.dg/tree-ssa/ssa-ccp-30.c
===================================================================
*** /dev/null	1970-01-01 00:00:00.000000000 +0000
--- trunk/gcc/testsuite/gcc.dg/tree-ssa/ssa-ccp-30.c	2010-08-05 11:38:28.000000000 +0200
***************
*** 0 ****
--- 1,15 ----
+ /* { dg-do compile } */
+ /* { dg-options "-O -fdump-tree-ccp1" } */
+ 
+ int
+ foo (int a)
+ {
+   int b = a & 0xff;
+   if (b > 300)
+     return 2;
+   else
+     return 1;
+ }
+ 
+ /* { dg-final { scan-tree-dump-times "Folding predicate b_.* > 300 to 0" 1 "ccp1" } } */
+ /* { dg-final { cleanup-tree-dump "ccp1" } } */
Index: trunk/gcc/testsuite/gcc.dg/tree-ssa/ssa-ccp-31.c
===================================================================
*** /dev/null	1970-01-01 00:00:00.000000000 +0000
--- trunk/gcc/testsuite/gcc.dg/tree-ssa/ssa-ccp-31.c	2010-08-05 11:38:28.000000000 +0200
***************
*** 0 ****
--- 1,19 ----
+ /* { dg-do compile } */
+ /* { dg-options "-O -fdump-tree-ccp1" } */
+ 
+ int g (int i, int j)
+ {
+   int t = 0;
+   int i1;
+ 
+   if (i == j)
+     t = 3;
+   for (i1 = 0; i1 < 10000; i1++) h();
+   if (t != 5)
+     return 0;
+   else
+     return 1;
+ }
+ 
+ /* { dg-final { scan-tree-dump-times "Folding predicate.*to 1" 1 "ccp1" } } */
+ /* { dg-final { cleanup-tree-dump "ccp1" } } */
Index: trunk/gcc/testsuite/gcc.c-torture/execute/20100805-1.c
===================================================================
*** /dev/null	1970-01-01 00:00:00.000000000 +0000
--- trunk/gcc/testsuite/gcc.c-torture/execute/20100805-1.c	2010-08-05 12:16:47.000000000 +0200
***************
*** 0 ****
--- 1,15 ----
+ unsigned int foo (unsigned int a, unsigned int b)
+ {
+   unsigned i;
+   a = a & 1;
+   for (i = 0; i < b; ++i)
+     a = a << 1 | a >> (sizeof (unsigned int) * 8 - 1);
+   return a;
+ }
+ extern void abort (void);
+ int main()
+ {
+   if (foo (1, sizeof (unsigned int) * 8 + 1) != 2)
+     abort ();
+   return 0;
+ }
Index: trunk/gcc/testsuite/gcc.dg/tree-ssa/ssa-ccp-32.c
===================================================================
*** /dev/null	1970-01-01 00:00:00.000000000 +0000
--- trunk/gcc/testsuite/gcc.dg/tree-ssa/ssa-ccp-32.c	2010-08-05 12:35:26.000000000 +0200
***************
*** 0 ****
--- 1,58 ----
+ /* { dg-do run } */
+ /* { dg-options "-O" } */
+ 
+ extern void link_error (void);
+ unsigned int __attribute__((noinline,noclone))
+ test0 (unsigned int a)
+ {
+   a = a & 1;
+   a = a << 1 | a >> (sizeof (unsigned int) * 8 - 1);
+   if (a & 1)
+     {
+       a = a | 4;
+       link_error ();
+     }
+   if (a & 4)
+     link_error ();
+   return a;
+ }
+ int __attribute__((noinline,noclone))
+ test1 (int a)
+ {
+   a |= 1;
+   a = a << (sizeof (int) * 8 - 1);
+   if (a >= 0)
+     link_error ();
+   a = a * 4;
+   if (a & ~3)
+     link_error ();
+   if (a == -1)
+     link_error ();
+   return a;
+ }
+ int __attribute__((noinline,noclone))
+ test2 (int a)
+ {
+   a = a | 0xff;
+   a = a + 1;
+   if (a & 0xff)
+     link_error ();
+   a = -a;
+   if (a & 0xff)
+     link_error ();
+   a = a - 1;
+   if (a & 0xff != 0xff)
+     link_error ();
+   return a;
+ }
+ extern void abort (void);
+ int main()
+ {
+   if (test0 (1) != 2)
+     abort ();
+   if (test1 (0) != 0)
+     abort ();
+   if (test2 (-1) != -1)
+     abort ();
+   return 0;
+ }
Index: trunk/gcc/testsuite/gcc.dg/tree-ssa/ssa-ccp-33.c
===================================================================
*** /dev/null	1970-01-01 00:00:00.000000000 +0000
--- trunk/gcc/testsuite/gcc.dg/tree-ssa/ssa-ccp-33.c	2010-08-05 12:49:30.000000000 +0200
***************
*** 0 ****
--- 1,17 ----
+ /* { dg-do link } */
+ /* { dg-options "-O" } */
+ 
+ extern void link_error ();
+ int a[256];
+ void foo(int n)
+ {
+   int *p;
+   for (p = a; n != 0; --n, ++p)
+     ;
+   if ((__SIZE_TYPE__)p & (sizeof (int) - 1))
+     link_error ();
+ }
+ int main()
+ {
+   return 0;
+ }


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]