This is the mail archive of the gcc-patches@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[PATCH][match-and-simplify] Add more binary patterns exercised by fold_stmt


The patch also makes us allow to iterate over things in the predicate
position like

(for op (INTEGER_CST REAL_CST)
 (simplify
  (plus @0 op)
...

Bootstrapped and tested on x86_64-unknown-linux-gnu, applied to the 
branch.

Richard.

2014-11-13  Richard Biener  <rguenther@suse.de>

	* match.pd: Implement more binary patterns exercised by
	fold_stmt.
	* fold-const.c (sing_bit_p): Export.
	(exact_inverse): Likewise.
	(fold_binary_loc): Remove patterns here.
	(tree_unary_nonnegative_warnv_p): Use CASE_CONVERT.
	* fold-const.h (sing_bit_p): Declare.
	(exact_inverse): Likewise.
	* genmatch.c (add_operator): Allow CONSTRUCTOR.
	(dt_node::gen_kids): Handle CONSTRUCTOR not as GENERIC.
	(parser::parse_op): Allow to iterate over predicates.

Index: gcc/fold-const.c
===================================================================
*** gcc/fold-const.c.orig	2014-11-12 14:03:26.818390922 +0100
--- gcc/fold-const.c	2014-11-13 14:19:06.634568905 +0100
*************** static tree decode_field_reference (loca
*** 131,137 ****
  				    HOST_WIDE_INT *,
  				    machine_mode *, int *, int *,
  				    tree *, tree *);
- static tree sign_bit_p (tree, const_tree);
  static int simple_operand_p (const_tree);
  static bool simple_operand_p_2 (tree);
  static tree range_binop (enum tree_code, tree, tree, int, tree, int);
--- 131,136 ----
*************** all_ones_mask_p (const_tree mask, unsign
*** 3700,3706 ****
     The return value is the (sub)expression whose sign bit is VAL,
     or NULL_TREE otherwise.  */
  
! static tree
  sign_bit_p (tree exp, const_tree val)
  {
    int width;
--- 3699,3705 ----
     The return value is the (sub)expression whose sign bit is VAL,
     or NULL_TREE otherwise.  */
  
! tree
  sign_bit_p (tree exp, const_tree val)
  {
    int width;
*************** fold_addr_of_array_ref_difference (locat
*** 9541,9547 ****
  /* If the real or vector real constant CST of type TYPE has an exact
     inverse, return it, else return NULL.  */
  
! static tree
  exact_inverse (tree type, tree cst)
  {
    REAL_VALUE_TYPE r;
--- 9540,9546 ----
  /* If the real or vector real constant CST of type TYPE has an exact
     inverse, return it, else return NULL.  */
  
! tree
  exact_inverse (tree type, tree cst)
  {
    REAL_VALUE_TYPE r;
*************** fold_binary_loc (location_t loc,
*** 10030,10054 ****
  	}
        else
  	{
- 	  /* See if ARG1 is zero and X + ARG1 reduces to X.  */
- 	  if (fold_real_zero_addition_p (TREE_TYPE (arg0), arg1, 0))
- 	    return non_lvalue_loc (loc, fold_convert_loc (loc, type, arg0));
- 
- 	  /* Likewise if the operands are reversed.  */
- 	  if (fold_real_zero_addition_p (TREE_TYPE (arg1), arg0, 0))
- 	    return non_lvalue_loc (loc, fold_convert_loc (loc, type, arg1));
- 
- 	  /* Convert X + -C into X - C.  */
- 	  if (TREE_CODE (arg1) == REAL_CST
- 	      && REAL_VALUE_NEGATIVE (TREE_REAL_CST (arg1)))
- 	    {
- 	      tem = fold_negate_const (arg1, type);
- 	      if (!TREE_OVERFLOW (arg1) || !flag_trapping_math)
- 		return fold_build2_loc (loc, MINUS_EXPR, type,
- 				    fold_convert_loc (loc, type, arg0),
- 				    fold_convert_loc (loc, type, tem));
- 	    }
- 
  	  /* Fold __complex__ ( x, 0 ) + __complex__ ( 0, y )
  	     to __complex__ ( x, y ).  This is not the same for SNaNs or
  	     if signed zeros are involved.  */
--- 10029,10034 ----
*************** fold_binary_loc (location_t loc,
*** 10090,10101 ****
  	      && (tem = distribute_real_division (loc, code, type, arg0, arg1)))
  	    return tem;
  
- 	  /* Convert x+x into x*2.0.  */
- 	  if (operand_equal_p (arg0, arg1, 0)
- 	      && SCALAR_FLOAT_TYPE_P (type))
- 	    return fold_build2_loc (loc, MULT_EXPR, type, arg0,
- 				build_real (type, dconst2));
- 
            /* Convert a + (b*c + d*e) into (a + b*c) + d*e.
               We associate floats only if the user has specified
               -fassociative-math.  */
--- 10070,10075 ----
*************** fold_binary_loc (location_t loc,
*** 10448,10456 ****
  
        if (! FLOAT_TYPE_P (type))
  	{
- 	  if (integer_zerop (arg0))
- 	    return negate_expr (fold_convert_loc (loc, type, arg1));
- 
  	  /* Fold A - (A & B) into ~B & A.  */
  	  if (!TREE_SIDE_EFFECTS (arg0)
  	      && TREE_CODE (arg1) == BIT_AND_EXPR)
--- 10422,10427 ----
*************** fold_binary_loc (location_t loc,
*** 10495,10510 ****
  	    }
  	}
  
-       /* See if ARG1 is zero and X - ARG1 reduces to X.  */
-       else if (fold_real_zero_addition_p (TREE_TYPE (arg0), arg1, 1))
- 	return non_lvalue_loc (loc, fold_convert_loc (loc, type, arg0));
- 
-       /* (ARG0 - ARG1) is the same as (-ARG1 + ARG0).  So check whether
- 	 ARG0 is zero and X + ARG0 reduces to X, since that would mean
- 	 (-ARG1 + ARG0) reduces to -ARG1.  */
-       else if (fold_real_zero_addition_p (TREE_TYPE (arg1), arg0, 0))
- 	return negate_expr (fold_convert_loc (loc, type, arg1));
- 
        /* Fold __complex__ ( x, 0 ) - __complex__ ( 0, y ) to
  	 __complex__ ( x, -y ).  This is not the same for SNaNs or if
  	 signed zeros are involved.  */
--- 10466,10471 ----
*************** fold_binary_loc (location_t loc,
*** 10619,10629 ****
  
        if (! FLOAT_TYPE_P (type))
  	{
- 	  /* Transform x * -1 into -x.  Make sure to do the negation
- 	     on the original operand with conversions not stripped
- 	     because we can only strip non-sign-changing conversions.  */
- 	  if (integer_minus_onep (arg1))
- 	    return fold_convert_loc (loc, type, negate_expr (op0));
  	  /* Transform x * -C into -x * C if x is easily negatable.  */
  	  if (TREE_CODE (arg1) == INTEGER_CST
  	      && tree_int_cst_sgn (arg1) == -1
--- 10580,10585 ----
*************** fold_binary_loc (location_t loc,
*** 10687,10715 ****
  	}
        else
  	{
- 	  /* Maybe fold x * 0 to 0.  The expressions aren't the same
- 	     when x is NaN, since x * 0 is also NaN.  Nor are they the
- 	     same in modes with signed zeros, since multiplying a
- 	     negative value by 0 gives -0, not +0.  */
- 	  if (!HONOR_NANS (TYPE_MODE (TREE_TYPE (arg0)))
- 	      && !HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg0)))
- 	      && real_zerop (arg1))
- 	    return omit_one_operand_loc (loc, type, arg1, arg0);
- 	  /* In IEEE floating point, x*1 is not equivalent to x for snans.
- 	     Likewise for complex arithmetic with signed zeros.  */
- 	  if (!HONOR_SNANS (TYPE_MODE (TREE_TYPE (arg0)))
- 	      && (!HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg0)))
- 		  || !COMPLEX_FLOAT_TYPE_P (TREE_TYPE (arg0)))
- 	      && real_onep (arg1))
- 	    return non_lvalue_loc (loc, fold_convert_loc (loc, type, arg0));
- 
- 	  /* Transform x * -1.0 into -x.  */
- 	  if (!HONOR_SNANS (TYPE_MODE (TREE_TYPE (arg0)))
- 	      && (!HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg0)))
- 		  || !COMPLEX_FLOAT_TYPE_P (TREE_TYPE (arg0)))
- 	      && real_minus_onep (arg1))
- 	    return fold_convert_loc (loc, type, negate_expr (arg0));
- 
  	  /* Convert (C1/X)*C2 into (C1*C2)/X.  This transformation may change
               the result for floating point types due to rounding so it is applied
               only if -fassociative-math was specify.  */
--- 10643,10648 ----
*************** fold_binary_loc (location_t loc,
*** 11588,11620 ****
  	  && real_zerop (arg1))
  	return NULL_TREE;
  
-       /* Optimize A / A to 1.0 if we don't care about
- 	 NaNs or Infinities.  Skip the transformation
- 	 for non-real operands.  */
-       if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (arg0))
- 	  && ! HONOR_NANS (TYPE_MODE (TREE_TYPE (arg0)))
- 	  && ! HONOR_INFINITIES (TYPE_MODE (TREE_TYPE (arg0)))
- 	  && operand_equal_p (arg0, arg1, 0))
- 	{
- 	  tree r = build_real (TREE_TYPE (arg0), dconst1);
- 
- 	  return omit_two_operands_loc (loc, type, r, arg0, arg1);
- 	}
- 
-       /* The complex version of the above A / A optimization.  */
-       if (COMPLEX_FLOAT_TYPE_P (TREE_TYPE (arg0))
- 	  && operand_equal_p (arg0, arg1, 0))
- 	{
- 	  tree elem_type = TREE_TYPE (TREE_TYPE (arg0));
- 	  if (! HONOR_NANS (TYPE_MODE (elem_type))
- 	      && ! HONOR_INFINITIES (TYPE_MODE (elem_type)))
- 	    {
- 	      tree r = build_real (elem_type, dconst1);
- 	      /* omit_two_operands will call fold_convert for us.  */
- 	      return omit_two_operands_loc (loc, type, r, arg0, arg1);
- 	    }
- 	}
- 
        /* (-A) / (-B) -> A / B  */
        if (TREE_CODE (arg0) == NEGATE_EXPR && negate_expr_p (arg1))
  	return fold_build2_loc (loc, RDIV_EXPR, type,
--- 11521,11526 ----
*************** fold_binary_loc (location_t loc,
*** 11625,11666 ****
  			    negate_expr (arg0),
  			    TREE_OPERAND (arg1, 0));
  
-       /* In IEEE floating point, x/1 is not equivalent to x for snans.  */
-       if (!HONOR_SNANS (TYPE_MODE (TREE_TYPE (arg0)))
- 	  && real_onep (arg1))
- 	return non_lvalue_loc (loc, fold_convert_loc (loc, type, arg0));
- 
-       /* In IEEE floating point, x/-1 is not equivalent to -x for snans.  */
-       if (!HONOR_SNANS (TYPE_MODE (TREE_TYPE (arg0)))
- 	  && real_minus_onep (arg1))
- 	return non_lvalue_loc (loc, fold_convert_loc (loc, type,
- 						  negate_expr (arg0)));
- 
-       /* If ARG1 is a constant, we can convert this to a multiply by the
- 	 reciprocal.  This does not have the same rounding properties,
- 	 so only do this if -freciprocal-math.  We can actually
- 	 always safely do it if ARG1 is a power of two, but it's hard to
- 	 tell if it is or not in a portable manner.  */
-       if (optimize
- 	  && (TREE_CODE (arg1) == REAL_CST
- 	      || (TREE_CODE (arg1) == COMPLEX_CST
- 		  && COMPLEX_FLOAT_TYPE_P (TREE_TYPE (arg1)))
- 	      || (TREE_CODE (arg1) == VECTOR_CST
- 		  && VECTOR_FLOAT_TYPE_P (TREE_TYPE (arg1)))))
- 	{
- 	  if (flag_reciprocal_math
- 	      && 0 != (tem = const_binop (code, build_one_cst (type), arg1)))
- 	    return fold_build2_loc (loc, MULT_EXPR, type, arg0, tem);
- 	  /* Find the reciprocal if optimizing and the result is exact.
- 	     TODO: Complex reciprocal not implemented.  */
- 	  if (TREE_CODE (arg1) != COMPLEX_CST)
- 	    {
- 	      tree inverse = exact_inverse (TREE_TYPE (arg0), arg1);
- 
- 	      if (inverse)
- 		return fold_build2_loc (loc, MULT_EXPR, type, arg0, inverse);
- 	    }
- 	}
        /* Convert A/B/C to A/(B*C).  */
        if (flag_reciprocal_math
  	  && TREE_CODE (arg0) == RDIV_EXPR)
--- 11531,11536 ----
*************** fold_binary_loc (location_t loc,
*** 11883,11895 ****
  	    }
  	}
  
-       /* For unsigned integral types, FLOOR_DIV_EXPR is the same as
- 	 TRUNC_DIV_EXPR.  Rewrite into the latter in this case.  */
-       if (INTEGRAL_TYPE_P (type)
- 	  && TYPE_UNSIGNED (type)
- 	  && code == FLOOR_DIV_EXPR)
- 	return fold_build2_loc (loc, TRUNC_DIV_EXPR, type, op0, op1);
- 
        /* Fall through */
  
      case ROUND_DIV_EXPR:
--- 11753,11758 ----
*************** fold_binary_loc (location_t loc,
*** 11897,11907 ****
      case EXACT_DIV_EXPR:
        if (integer_zerop (arg1))
  	return NULL_TREE;
-       /* X / -1 is -X.  */
-       if (!TYPE_UNSIGNED (type)
- 	  && TREE_CODE (arg1) == INTEGER_CST
- 	  && wi::eq_p (arg1, -1))
- 	return fold_convert_loc (loc, type, negate_expr (arg0));
  
        /* Convert -A / -B to A / B when the type is signed and overflow is
  	 undefined.  */
--- 11760,11765 ----
*************** fold_binary_loc (location_t loc,
*** 11964,11989 ****
      case FLOOR_MOD_EXPR:
      case ROUND_MOD_EXPR:
      case TRUNC_MOD_EXPR:
-       /* X % -1 is zero.  */
-       if (!TYPE_UNSIGNED (type)
- 	  && TREE_CODE (arg1) == INTEGER_CST
- 	  && wi::eq_p (arg1, -1))
- 	return omit_one_operand_loc (loc, type, integer_zero_node, arg0);
- 
-       /* X % -C is the same as X % C.  */
-       if (code == TRUNC_MOD_EXPR
- 	  && TYPE_SIGN (type) == SIGNED
- 	  && TREE_CODE (arg1) == INTEGER_CST
- 	  && !TREE_OVERFLOW (arg1)
- 	  && wi::neg_p (arg1)
- 	  && !TYPE_OVERFLOW_TRAPS (type)
- 	  /* Avoid this transformation if C is INT_MIN, i.e. C == -C.  */
- 	  && !sign_bit_p (arg1, arg1))
- 	return fold_build2_loc (loc, code, type,
- 			    fold_convert_loc (loc, type, arg0),
- 			    fold_convert_loc (loc, type,
- 					      negate_expr (arg1)));
- 
        /* X % -Y is the same as X % Y.  */
        if (code == TRUNC_MOD_EXPR
  	  && !TYPE_UNSIGNED (type)
--- 11822,11827 ----
*************** fold_binary_loc (location_t loc,
*** 12037,12066 ****
  
      case LROTATE_EXPR:
      case RROTATE_EXPR:
-       if (integer_all_onesp (arg0))
- 	return omit_one_operand_loc (loc, type, arg0, arg1);
-       goto shift;
- 
      case RSHIFT_EXPR:
-       /* Optimize -1 >> x for arithmetic right shifts.  */
-       if (integer_all_onesp (arg0) && !TYPE_UNSIGNED (type)
- 	  && tree_expr_nonnegative_p (arg1))
- 	return omit_one_operand_loc (loc, type, arg0, arg1);
-       /* ... fall through ...  */
- 
      case LSHIFT_EXPR:
-     shift:
-       if (integer_zerop (arg1))
- 	return non_lvalue_loc (loc, fold_convert_loc (loc, type, arg0));
-       if (integer_zerop (arg0))
- 	return omit_one_operand_loc (loc, type, arg0, arg1);
- 
-       /* Prefer vector1 << scalar to vector1 << vector2
- 	 if vector2 is uniform.  */
-       if (VECTOR_TYPE_P (TREE_TYPE (arg1))
- 	  && (tem = uniform_vector_p (arg1)) != NULL_TREE)
- 	return fold_build2_loc (loc, code, type, op0, tem);
- 
        /* Since negative shift count is not well-defined,
  	 don't try to compute it in the compiler.  */
        if (TREE_CODE (arg1) == INTEGER_CST && tree_int_cst_sgn (arg1) < 0)
--- 11875,11882 ----
*************** fold_binary_loc (location_t loc,
*** 12120,12134 ****
  	    }
  	}
  
-       /* Rewrite an LROTATE_EXPR by a constant into an
- 	 RROTATE_EXPR by a new constant.  */
-       if (code == LROTATE_EXPR && TREE_CODE (arg1) == INTEGER_CST)
- 	{
- 	  tree tem = build_int_cst (TREE_TYPE (arg1), prec);
- 	  tem = const_binop (MINUS_EXPR, tem, arg1);
- 	  return fold_build2_loc (loc, RROTATE_EXPR, type, op0, tem);
- 	}
- 
        /* If we have a rotate of a bit operation with the rotate count and
  	 the second operand of the bit operation both constant,
  	 permute the two operations.  */
--- 11936,11941 ----
*************** fold_binary_loc (location_t loc,
*** 12176,12198 ****
        return NULL_TREE;
  
      case MIN_EXPR:
-       if (operand_equal_p (arg0, arg1, 0))
- 	return omit_one_operand_loc (loc, type, arg0, arg1);
-       if (INTEGRAL_TYPE_P (type)
- 	  && operand_equal_p (arg1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
- 	return omit_one_operand_loc (loc, type, arg1, arg0);
        tem = fold_minmax (loc, MIN_EXPR, type, arg0, arg1);
        if (tem)
  	return tem;
        goto associate;
  
      case MAX_EXPR:
-       if (operand_equal_p (arg0, arg1, 0))
- 	return omit_one_operand_loc (loc, type, arg0, arg1);
-       if (INTEGRAL_TYPE_P (type)
- 	  && TYPE_MAX_VALUE (type)
- 	  && operand_equal_p (arg1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
- 	return omit_one_operand_loc (loc, type, arg1, arg0);
        tem = fold_minmax (loc, MAX_EXPR, type, arg0, arg1);
        if (tem)
  	return tem;
--- 11983,11994 ----
*************** tree_unary_nonnegative_warnv_p (enum tre
*** 14880,14886 ****
        return tree_expr_nonnegative_warnv_p (op0,
  					    strict_overflow_p);
  
!     case NOP_EXPR:
        {
  	tree inner_type = TREE_TYPE (op0);
  	tree outer_type = type;
--- 14676,14682 ----
        return tree_expr_nonnegative_warnv_p (op0,
  					    strict_overflow_p);
  
!     CASE_CONVERT:
        {
  	tree inner_type = TREE_TYPE (op0);
  	tree outer_type = type;
Index: gcc/fold-const.h
===================================================================
*** gcc/fold-const.h.orig	2014-11-13 13:24:43.534711699 +0100
--- gcc/fold-const.h	2014-11-13 13:49:58.034645424 +0100
*************** extern tree make_range_step (location_t,
*** 167,171 ****
--- 167,173 ----
  extern tree build_range_check (location_t, tree, tree, int, tree, tree);
  extern bool merge_ranges (int *, tree *, tree *, int, tree, tree, int,
  			  tree, tree);
+ extern tree sign_bit_p (tree, const_tree);
+ extern tree exact_inverse (tree, tree);
  
  #endif // GCC_FOLD_CONST_H
Index: gcc/match.pd
===================================================================
*** gcc/match.pd.orig	2014-11-13 13:24:43.535711699 +0100
--- gcc/match.pd	2014-11-13 14:00:20.621618180 +0100
*************** along with GCC; see the file COPYING3.
*** 45,63 ****
   (pointer_plus integer_zerop @1)
   (non_lvalue (convert @1)))
  
  /* Simplify x - x.
     This is unsafe for certain floats even in non-IEEE formats.
     In IEEE, it is unsafe because it does wrong for NaNs.
     Also note that operand_equal_p is always false if an operand
     is volatile.  */
  (simplify
!   (minus @0 @0)
!   (if (!FLOAT_TYPE_P (type) || !HONOR_NANS (TYPE_MODE (type)))
!    { build_zero_cst (type); }))
  
  (simplify
!   (mult @0 integer_zerop@1)
!   @1)
  
  /* Make sure to preserve divisions by zero.  This is the reason why
     we don't simplify x / x to 1 or 0 / x to 0.  */
--- 45,103 ----
   (pointer_plus integer_zerop @1)
   (non_lvalue (convert @1)))
  
+ /* See if ARG1 is zero and X + ARG1 reduces to X.
+    Likewise if the operands are reversed.  */
+ (simplify
+  (plus:c @0 real_zerop@1)
+  (if (fold_real_zero_addition_p (type, @1, 0))
+   (non_lvalue @0)))
+ 
+ /* See if ARG1 is zero and X - ARG1 reduces to X.  */
+ (simplify
+  (minus @0 real_zerop@1)
+  (if (fold_real_zero_addition_p (type, @1, 1))
+   (non_lvalue @0)))
+ 
  /* Simplify x - x.
     This is unsafe for certain floats even in non-IEEE formats.
     In IEEE, it is unsafe because it does wrong for NaNs.
     Also note that operand_equal_p is always false if an operand
     is volatile.  */
  (simplify
!  (minus @0 @0)
!  (if (!FLOAT_TYPE_P (type) || !HONOR_NANS (TYPE_MODE (type)))
!   { build_zero_cst (type); }))
  
  (simplify
!  (mult @0 integer_zerop@1)
!  @1)
! 
! /* Maybe fold x * 0 to 0.  The expressions aren't the same
!    when x is NaN, since x * 0 is also NaN.  Nor are they the
!    same in modes with signed zeros, since multiplying a
!    negative value by 0 gives -0, not +0.  */
! (simplify
!  (mult @0 real_zerop@1)
!  (if (!HONOR_NANS (TYPE_MODE (type))
!       && !HONOR_SIGNED_ZEROS (TYPE_MODE (type)))
!   @1))
! 
! /* In IEEE floating point, x*1 is not equivalent to x for snans.
!    Likewise for complex arithmetic with signed zeros.  */
! (simplify
!  (mult @0 real_onep)
!  (if (!HONOR_SNANS (TYPE_MODE (type))
!       && (!HONOR_SIGNED_ZEROS (TYPE_MODE (type))
!           || !COMPLEX_FLOAT_TYPE_P (type)))
!   (non_lvalue @0)))
! 
! /* Transform x * -1.0 into -x.  */
! (simplify
!  (mult @0 real_minus_onep)
!   (if (!HONOR_SNANS (TYPE_MODE (type))
!        && (!HONOR_SIGNED_ZEROS (TYPE_MODE (type))
!            || !COMPLEX_FLOAT_TYPE_P (type)))
!    (negate @0)))
  
  /* Make sure to preserve divisions by zero.  This is the reason why
     we don't simplify x / x to 1 or 0 / x to 0.  */
*************** along with GCC; see the file COPYING3.
*** 66,84 ****
      (op @0 integer_onep)
      (non_lvalue @0)))
  
  /* Same applies to modulo operations, but fold is inconsistent here
     and simplifies 0 % x to 0, only preserving literal 0 % 0.  */
! (for op (ceil_mod floor_mod round_mod trunc_mod)
   /* 0 % X is always zero.  */
   (simplify
!   (op integer_zerop@0 @1)
    /* But not for 0 % 0 so that we can get the proper warnings and errors.  */
    (if (!integer_zerop (@1))
     @0))
   /* X % 1 is always zero.  */
   (simplify
!   (op @0 integer_onep)
!   { build_zero_cst (type); }))
  
  /* x | ~0 -> ~0  */
  (simplify
--- 106,202 ----
      (op @0 integer_onep)
      (non_lvalue @0)))
  
+ /* X / -1 is -X.  */
+ (for div (trunc_div ceil_div floor_div round_div exact_div)
+  (simplify
+    (div @0 INTEGER_CST@1)
+    (if (!TYPE_UNSIGNED (type)
+         && wi::eq_p (@1, -1))
+     (negate @0))))
+ 
+ /* For unsigned integral types, FLOOR_DIV_EXPR is the same as
+    TRUNC_DIV_EXPR.  Rewrite into the latter in this case.  */
+ (simplify
+  (floor_div @0 @1)
+  (if (INTEGRAL_TYPE_P (type) && TYPE_UNSIGNED (type))
+   (trunc_div @0 @1)))
+ 
+ /* Optimize A / A to 1.0 if we don't care about
+    NaNs or Infinities.  Skip the transformation
+    for non-real operands.  */
+ (simplify
+  (rdiv @0 @0)
+  (if (SCALAR_FLOAT_TYPE_P (type)
+       && ! HONOR_NANS (TYPE_MODE (type))
+       && ! HONOR_INFINITIES (TYPE_MODE (type)))
+   { build_real (type, dconst1); })
+  /* The complex version of the above A / A optimization.  */
+  (if (COMPLEX_FLOAT_TYPE_P (type)
+       && ! HONOR_NANS (TYPE_MODE (TREE_TYPE (type)))
+       && ! HONOR_INFINITIES (TYPE_MODE (TREE_TYPE (type))))
+   (convert { build_real (TREE_TYPE (type), dconst1); })))
+ 
+ /* In IEEE floating point, x/1 is not equivalent to x for snans.  */
+ (simplify
+  (rdiv @0 real_onep)
+  (if (!HONOR_SNANS (TYPE_MODE (type)))
+   (non_lvalue @0)))
+ 
+ /* In IEEE floating point, x/-1 is not equivalent to -x for snans.  */
+ (simplify
+  (rdiv @0 real_minus_onep)
+  (if (!HONOR_SNANS (TYPE_MODE (type)))
+   (negate @0)))
+ 
+ /* If ARG1 is a constant, we can convert this to a multiply by the
+    reciprocal.  This does not have the same rounding properties,
+    so only do this if -freciprocal-math.  We can actually
+    always safely do it if ARG1 is a power of two, but it's hard to
+    tell if it is or not in a portable manner.  */
+ (for cst (REAL_CST COMPLEX_CST VECTOR_CST)
+  (simplify
+   (rdiv @0 cst@1)
+   (if (optimize)
+    (if (flag_reciprocal_math)
+     (with
+      { tree tem = fold_binary (RDIV_EXPR, type, build_one_cst (type), @1); }
+      (if (tem)
+       (mult @0 { tem; } ))))
+    (if (cst != COMPLEX_CST)
+     (with { tree inverse = exact_inverse (type, @1); }
+      (if (inverse)
+       (mult @0 { inverse; } )))))))
+ 
  /* Same applies to modulo operations, but fold is inconsistent here
     and simplifies 0 % x to 0, only preserving literal 0 % 0.  */
! (for mod (ceil_mod floor_mod round_mod trunc_mod)
   /* 0 % X is always zero.  */
   (simplify
!   (mod integer_zerop@0 @1)
    /* But not for 0 % 0 so that we can get the proper warnings and errors.  */
    (if (!integer_zerop (@1))
     @0))
   /* X % 1 is always zero.  */
   (simplify
!   (mod @0 integer_onep)
!   { build_zero_cst (type); })
!  /* X % -1 is zero.  */
!  (simplify
!   (mod @0 INTEGER_CST@1)
!   (if (!TYPE_UNSIGNED (type)
!        && wi::eq_p (@1, -1))
!    { build_zero_cst (type); })))
! 
! /* X % -C is the same as X % C.  */
! (simplify
!  (trunc_mod @0 INTEGER_CST@1)
!   (if (TYPE_SIGN (type) == SIGNED
!        && !TREE_OVERFLOW (@1)
!        && wi::neg_p (@1)
!        && !TYPE_OVERFLOW_TRAPS (type)
!        /* Avoid this transformation if C is INT_MIN, i.e. C == -C.  */
!        && !sign_bit_p (@1, @1))
!    (trunc_mod @0 (negate @1))))
  
  /* x | ~0 -> ~0  */
  (simplify
*************** along with GCC; see the file COPYING3.
*** 384,389 ****
--- 502,565 ----
       (convert @1))))))
  
  
+ /* Simplifications of MIN_EXPR and MAX_EXPR.  */
+ 
+ (for minmax (min max)
+  (simplify
+   (minmax @0 @0)
+   @0))
+ (simplify
+  (min @0 @1)
+  (if (INTEGRAL_TYPE_P (type)
+       && TYPE_MIN_VALUE (type)
+       && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
+   @1))
+ (simplify
+  (max @0 @1)
+  (if (INTEGRAL_TYPE_P (type)
+       && TYPE_MAX_VALUE (type)
+       && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
+   @1))
+ 
+ 
+ /* Simplifications of shift and rotates.  */
+ 
+ (for rotate (lrotate rrotate)
+  (simplify
+   (rotate integer_all_onesp@0 @1)
+   @0))
+ 
+ /* Optimize -1 >> x for arithmetic right shifts.  */
+ (simplify
+  (rshift integer_all_onesp@0 @1)
+  (if (!TYPE_UNSIGNED (type)
+       && tree_expr_nonnegative_p (@1))
+   @0))
+ 
+ (for shiftrotate (lrotate rrotate lshift rshift)
+  (simplify
+   (shiftrotate @0 integer_zerop)
+   (non_lvalue @0))
+  (simplify
+   (shiftrotate integer_zerop@0 @1)
+   @0)
+  /* Prefer vector1 << scalar to vector1 << vector2
+     if vector2 is uniform.  */
+  (for vec (VECTOR_CST CONSTRUCTOR)
+   (simplify
+    (shiftrotate @0 vec@1)
+    (with { tree tem = uniform_vector_p (@1); }
+     (if (tem)
+      (shiftrotate @0 { tem; }))))))
+ 
+ /* Rewrite an LROTATE_EXPR by a constant into an
+    RROTATE_EXPR by a new constant.  */
+ (simplify
+  (lrotate @0 INTEGER_CST@1)
+  (rrotate @0 { fold_binary (MINUS_EXPR, TREE_TYPE (@1),
+ 			    build_int_cst (TREE_TYPE (@1),
+ 					   element_precision (type)), @1); }))
+ 
  
  /* Simplifications of conversions.  */
  
*************** along with GCC; see the file COPYING3.
*** 560,565 ****
--- 736,773 ----
     (convert @0)))
  
  
+ /* Canonicalization of binary operations.  */
+ 
+ /* Convert X + -C into X - C.  */
+ (simplify
+  (plus @0 REAL_CST@1)
+  (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
+   (with { tree tem = fold_unary (NEGATE_EXPR, type, @1); }
+    (if (!TREE_OVERFLOW (tem) || !flag_trapping_math)
+     (minus @0 { tem; })))))
+ 
+ /* Convert x+x into x*2.0.  */
+ (simplify
+  (plus @0 @0)
+  (if (SCALAR_FLOAT_TYPE_P (type))
+   (mult @0 { build_real (type, dconst2); })))
+ 
+ (simplify
+  (minus integer_zerop @1)
+  (negate @1))
+ 
+ /* (ARG0 - ARG1) is the same as (-ARG1 + ARG0).  So check whether
+    ARG0 is zero and X + ARG0 reduces to X, since that would mean
+    (-ARG1 + ARG0) reduces to -ARG1.  */
+ (simplify
+  (minus real_zerop@0 @1)
+  (if (fold_real_zero_addition_p (type, @0, 0))
+   (negate @1)))
+ 
+ /* Transform x * -1 into -x.  */
+ (simplify
+  (mult @0 integer_minus_onep)
+  (negate @0))
  
  
  /* Simple example for a user-defined predicate - modeled after
Index: gcc/genmatch.c
===================================================================
*** gcc/genmatch.c.orig	2014-11-12 14:05:12.791386284 +0100
--- gcc/genmatch.c	2014-11-13 13:56:42.042627745 +0100
*************** add_operator (enum tree_code code, const
*** 310,316 ****
        /* For {REAL,IMAG}PART_EXPR and VIEW_CONVERT_EXPR.  */
        && strcmp (tcc, "tcc_reference") != 0
        /* To have INTEGER_CST and friends as "predicate operators".  */
!       && strcmp (tcc, "tcc_constant") != 0)
      return;
    operator_id *op = new operator_id (code, id, nargs, tcc);
    id_base **slot = operators->find_slot_with_hash (op, op->hashval, INSERT);
--- 310,318 ----
        /* For {REAL,IMAG}PART_EXPR and VIEW_CONVERT_EXPR.  */
        && strcmp (tcc, "tcc_reference") != 0
        /* To have INTEGER_CST and friends as "predicate operators".  */
!       && strcmp (tcc, "tcc_constant") != 0
!       /* And allow CONSTRUCTOR for vector initializers.  */
!       && !(code == CONSTRUCTOR))
      return;
    operator_id *op = new operator_id (code, id, nargs, tcc);
    id_base **slot = operators->find_slot_with_hash (op, op->hashval, INSERT);
*************** dt_node::gen_kids (FILE *f, bool gimple)
*** 2013,2019 ****
  	  dt_operand *op = as_a<dt_operand *> (kids[i]);
  	  if (expr *e = dyn_cast <expr *> (op->op))
  	    {
! 	      if (e->ops.length () == 0)
  		generic_exprs.safe_push (op);
  	      else if (e->operation->kind == id_base::FN)
  		{
--- 2015,2022 ----
  	  dt_operand *op = as_a<dt_operand *> (kids[i]);
  	  if (expr *e = dyn_cast <expr *> (op->op))
  	    {
! 	      if (e->ops.length () == 0
! 		  && (!gimple || !(*e->operation == CONSTRUCTOR)))
  		generic_exprs.safe_push (op);
  	      else if (e->operation->kind == id_base::FN)
  		{
*************** dt_node::gen_kids (FILE *f, bool gimple)
*** 2124,2131 ****
  		   "}\n");
  	}
  
!       fprintf (f, "break;\n"
! 	       "}\n");
      }
  
    for (unsigned i = 0; i < generic_exprs.length (); ++i)
--- 2127,2134 ----
  		   "}\n");
  	}
  
!       fprintf (f, "}\n"
! 	       "break;\n");
      }
  
    for (unsigned i = 0; i < generic_exprs.length (); ++i)
*************** parser::parse_op ()
*** 3026,3031 ****
--- 3029,3042 ----
  	    {
  	      if (code->nargs != 0)
  		fatal_at (token, "using an operator with operands as predicate");
+ 	      /* Parse the zero-operand operator "predicates" as
+ 		 expression.  */
+ 	      op = new expr (opr);
+ 	    }
+ 	  else if (user_id *code = dyn_cast <user_id *> (opr))
+ 	    {
+ 	      if (code->nargs != 0)
+ 		fatal_at (token, "using an operator with operands as predicate");
  	      /* Parse the zero-operand operator "predicates" as
  		 expression.  */
  	      op = new expr (opr);


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]