This is the mail archive of the gcc-patches@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

Re: [PATCH] Split simplify_unary_operation and simplify_binary_operation


My apologies to Paolo for getting distracted before commenting on his
patch.

Huh, a two-days turnaround time on a patch that may not even be suitable for 4.0 does not seem much to me. :-)


The function simplify_const_relational_operation
ends up being useful in its own right, and can potentially be used
by other RTL optimizers when its known both operands are constant.
Sound reasonable?

Of course, and I was already planning to investigate this direction if the patch was approved. (Even if my primary reason to do so was to find my way more easily while moving combine_simplify_rtx simplifications to simplify-rtx.c, for 4.1).


Note however that simplify_unary_operation and simplify_binary_operation are not *that* monolithic. The resulting functions are very simple if one extracts simplify_const_unary_operation and simplify_const_binary_operation. This is unlike simplify_relational_operation, whose logic is more complicated mostly due to historical warts (when it was only looking for constant results, it was already looking in COMPARE rtx's). For instance, the only "strange" thing that appears in simplify_unary_operation is looking into CONSTs.

Actually most of the code readability benefit could be achieved simply by inverting a couple of ifs (this would not obviously achieve the flexibility of having the simplify_const_* functions available outside simplify-rtx.c).

Here is a patch, bootstrapped powerpc-apple-darwin7.7.0, regtested with no regressions. Ok for mainline?

Paolo
2005-02-10 Paolo Bonzini <bonzini@gnu.org>

	* simplify-rtx.c (simplify_unary_operation_1,
	simplify_const_unary_operation): New, extracted from...
	(simplify_unary_operation): ... this one.
	(simplify_binary_operation_1,
	simplify_const_binary_operation): New, extracted from...
	(simplify_binary_operation): ... this one.
	* rtl.h (simplify_const_unary_operation,
	simplify_const_binary_operation): Add prototypes.

Index: simplify-rtx.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/simplify-rtx.c,v
retrieving revision 1.228
diff -d -u -p -b -r1.228 simplify-rtx.c
--- simplify-rtx.c	12 Feb 2005 15:17:56 -0000	1.228
+++ simplify-rtx.c	14 Feb 2005 15:35:26 -0000
@@ -60,6 +60,9 @@ static rtx simplify_associative_operatio
 					   rtx, rtx);
 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
 					    enum machine_mode, rtx, rtx);
+static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
+static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
+					rtx, rtx, rtx, rtx);
 
 /* Negate a CONST_INT rtx, truncating (because a conversion from a
    maximally negative number can overflow).  */
@@ -355,53 +358,292 @@ rtx
 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
 			  rtx op, enum machine_mode op_mode)
 {
+  rtx trueop, tem;
+
+  if (GET_CODE (op) == CONST)
+    op = XEXP (op, 0);
+
+  trueop = avoid_constant_pool_reference (op);
+
+  tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
+  if (tem)
+    return tem;
+
+  return simplify_unary_operation_1 (code, mode, op);
+}
+
+/* Perform some simplifications we can do even if the operands
+   aren't constant.  */
+static rtx
+simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
+{
+  enum rtx_code reversed;
+  rtx temp;
+
+  switch (code)
+    {
+    case NOT:
+      /* (not (not X)) == X.  */
+      if (GET_CODE (op) == NOT)
+	return XEXP (op, 0);
+
+      /* (not (eq X Y)) == (ne X Y), etc.  */
+      if (COMPARISON_P (op)
+	  && (mode == BImode || STORE_FLAG_VALUE == -1)
+	  && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
+	return simplify_gen_relational (reversed, mode, VOIDmode,
+					XEXP (op, 0), XEXP (op, 1));
+
+      /* (not (plus X -1)) can become (neg X).  */
+      if (GET_CODE (op) == PLUS
+	  && XEXP (op, 1) == constm1_rtx)
+	return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
+
+      /* Similarly, (not (neg X)) is (plus X -1).  */
+      if (GET_CODE (op) == NEG)
+	return plus_constant (XEXP (op, 0), -1);
+
+      /* (not (xor X C)) for C constant is (xor X D) with D = ~C.  */
+      if (GET_CODE (op) == XOR
+	  && GET_CODE (XEXP (op, 1)) == CONST_INT
+	  && (temp = simplify_unary_operation (NOT, mode,
+					       XEXP (op, 1), mode)) != 0)
+	return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
+
+      /* (not (plus X C)) for signbit C is (xor X D) with D = ~C.  */
+      if (GET_CODE (op) == PLUS
+	  && GET_CODE (XEXP (op, 1)) == CONST_INT
+	  && mode_signbit_p (mode, XEXP (op, 1))
+	  && (temp = simplify_unary_operation (NOT, mode,
+					       XEXP (op, 1), mode)) != 0)
+	return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
+
+
+      /* (not (ashift 1 X)) is (rotate ~1 X).  We used to do this for
+	 operands other than 1, but that is not valid.  We could do a
+	 similar simplification for (not (lshiftrt C X)) where C is
+	 just the sign bit, but this doesn't seem common enough to
+	 bother with.  */
+      if (GET_CODE (op) == ASHIFT
+	  && XEXP (op, 0) == const1_rtx)
+	{
+	  temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
+	  return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
+	}
+
+      /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
+	 by reversing the comparison code if valid.  */
+      if (STORE_FLAG_VALUE == -1
+	  && COMPARISON_P (op)
+	  && (reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN)
+	return simplify_gen_relational (reversed, mode, VOIDmode,
+					XEXP (op, 0), XEXP (op, 1));
+
+      /* (not (ashiftrt foo C)) where C is the number of bits in FOO
+	 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
+	 so we can perform the above simplification.  */
+      
+      if (STORE_FLAG_VALUE == -1
+	  && GET_CODE (op) == ASHIFTRT
+	  && GET_CODE (XEXP (op, 1)) == CONST_INT
+	  && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
+	return simplify_gen_relational (GE, mode, VOIDmode,
+					XEXP (op, 0), const0_rtx);
+
+      break;
+
+    case NEG:
+      /* (neg (neg X)) == X.  */
+      if (GET_CODE (op) == NEG)
+	return XEXP (op, 0);
+
+      /* (neg (plus X 1)) can become (not X).  */
+      if (GET_CODE (op) == PLUS
+	  && XEXP (op, 1) == const1_rtx)
+	return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
+      
+      /* Similarly, (neg (not X)) is (plus X 1).  */
+      if (GET_CODE (op) == NOT)
+	return plus_constant (XEXP (op, 0), 1);
+      
+      /* (neg (minus X Y)) can become (minus Y X).  This transformation
+	 isn't safe for modes with signed zeros, since if X and Y are
+	 both +0, (minus Y X) is the same as (minus X Y).  If the
+	 rounding mode is towards +infinity (or -infinity) then the two
+	 expressions will be rounded differently.  */
+      if (GET_CODE (op) == MINUS
+	  && !HONOR_SIGNED_ZEROS (mode)
+	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
+	return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
+      
+      if (GET_CODE (op) == PLUS
+	  && !HONOR_SIGNED_ZEROS (mode)
+	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
+	{
+	  /* (neg (plus A C)) is simplified to (minus -C A).  */
+	  if (GET_CODE (XEXP (op, 1)) == CONST_INT
+	      || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
+	    {
+	      temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
+	      if (temp)
+		return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
+	    }
+
+	  /* (neg (plus A B)) is canonicalized to (minus (neg A) B).  */
+	  temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
+	  return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
+	}
+
+      /* (neg (mult A B)) becomes (mult (neg A) B).
+	 This works even for floating-point values.  */
+      if (GET_CODE (op) == MULT
+	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
+	{
+	  temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
+	  return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
+	}
+
+      /* NEG commutes with ASHIFT since it is multiplication.  Only do
+	 this if we can then eliminate the NEG (e.g., if the operand
+	 is a constant).  */
+      if (GET_CODE (op) == ASHIFT)
+	{
+	  temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
+	  if (temp)
+	    return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
+	}
+
+      /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
+	 C is equal to the width of MODE minus 1.  */
+      if (GET_CODE (op) == ASHIFTRT
+	  && GET_CODE (XEXP (op, 1)) == CONST_INT
+	  && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
+	return simplify_gen_binary (LSHIFTRT, mode,
+				    XEXP (op, 0), XEXP (op, 1));
+
+      /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
+	 C is equal to the width of MODE minus 1.  */
+      if (GET_CODE (op) == LSHIFTRT
+	  && GET_CODE (XEXP (op, 1)) == CONST_INT
+	  && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
+	return simplify_gen_binary (ASHIFTRT, mode,
+				    XEXP (op, 0), XEXP (op, 1));
+      
+      break;
+
+    case SIGN_EXTEND:
+      /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
+	 becomes just the MINUS if its mode is MODE.  This allows
+	 folding switch statements on machines using casesi (such as
+	 the VAX).  */
+      if (GET_CODE (op) == TRUNCATE
+	  && GET_MODE (XEXP (op, 0)) == mode
+	  && GET_CODE (XEXP (op, 0)) == MINUS
+	  && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
+	  && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
+	return XEXP (op, 0);
+
+      /* Check for a sign extension of a subreg of a promoted
+	 variable, where the promotion is sign-extended, and the
+	 target mode is the same as the variable's promotion.  */
+      if (GET_CODE (op) == SUBREG
+	  && SUBREG_PROMOTED_VAR_P (op)
+	  && ! SUBREG_PROMOTED_UNSIGNED_P (op)
+	  && GET_MODE (XEXP (op, 0)) == mode)
+	return XEXP (op, 0);
+
+#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
+      if (! POINTERS_EXTEND_UNSIGNED
+	  && mode == Pmode && GET_MODE (op) == ptr_mode
+	  && (CONSTANT_P (op)
+	      || (GET_CODE (op) == SUBREG
+		  && REG_P (SUBREG_REG (op))
+		  && REG_POINTER (SUBREG_REG (op))
+		  && GET_MODE (SUBREG_REG (op)) == Pmode)))
+	return convert_memory_address (Pmode, op);
+#endif
+      break;
+
+    case ZERO_EXTEND:
+      /* Check for a zero extension of a subreg of a promoted
+	 variable, where the promotion is zero-extended, and the
+	 target mode is the same as the variable's promotion.  */
+      if (GET_CODE (op) == SUBREG
+	  && SUBREG_PROMOTED_VAR_P (op)
+	  && SUBREG_PROMOTED_UNSIGNED_P (op)
+	  && GET_MODE (XEXP (op, 0)) == mode)
+	return XEXP (op, 0);
+
+#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
+      if (POINTERS_EXTEND_UNSIGNED > 0
+	  && mode == Pmode && GET_MODE (op) == ptr_mode
+	  && (CONSTANT_P (op)
+	      || (GET_CODE (op) == SUBREG
+		  && REG_P (SUBREG_REG (op))
+		  && REG_POINTER (SUBREG_REG (op))
+		  && GET_MODE (SUBREG_REG (op)) == Pmode)))
+	return convert_memory_address (Pmode, op);
+#endif
+      break;
+
+    default:
+      break;
+    }
+  
+  return 0;
+}
+
+/* Try to compute the value of a unary operation CODE whose output mode is to
+   be MODE with input operand OP whose mode was originally OP_MODE.
+   Return zero if the value cannot be computed.  */
+rtx
+simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
+				rtx op, enum machine_mode op_mode)
+{
   unsigned int width = GET_MODE_BITSIZE (mode);
-  rtx trueop = avoid_constant_pool_reference (op);
 
   if (code == VEC_DUPLICATE)
     {
       gcc_assert (VECTOR_MODE_P (mode));
-      if (GET_MODE (trueop) != VOIDmode)
+      if (GET_MODE (op) != VOIDmode)
       {
-	if (!VECTOR_MODE_P (GET_MODE (trueop)))
-	  gcc_assert (GET_MODE_INNER (mode) == GET_MODE (trueop));
+	if (!VECTOR_MODE_P (GET_MODE (op)))
+	  gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
 	else
 	  gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
-						(GET_MODE (trueop)));
+						(GET_MODE (op)));
       }
-      if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
-	  || GET_CODE (trueop) == CONST_VECTOR)
+      if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
+	  || GET_CODE (op) == CONST_VECTOR)
 	{
           int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
           unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
 	  rtvec v = rtvec_alloc (n_elts);
 	  unsigned int i;
 
-	  if (GET_CODE (trueop) != CONST_VECTOR)
+	  if (GET_CODE (op) != CONST_VECTOR)
 	    for (i = 0; i < n_elts; i++)
-	      RTVEC_ELT (v, i) = trueop;
+	      RTVEC_ELT (v, i) = op;
 	  else
 	    {
-	      enum machine_mode inmode = GET_MODE (trueop);
+	      enum machine_mode inmode = GET_MODE (op);
               int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
               unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
 
 	      gcc_assert (in_n_elts < n_elts);
 	      gcc_assert ((n_elts % in_n_elts) == 0);
 	      for (i = 0; i < n_elts; i++)
-	        RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
+	        RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
 	    }
 	  return gen_rtx_CONST_VECTOR (mode, v);
 	}
     }
-  else if (GET_CODE (op) == CONST)
-    return simplify_unary_operation (code, mode, XEXP (op, 0), op_mode);
 
-  if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
+  if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
     {
       int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
       unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
-      enum machine_mode opmode = GET_MODE (trueop);
+      enum machine_mode opmode = GET_MODE (op);
       int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
       unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
       rtvec v = rtvec_alloc (n_elts);
@@ -411,7 +653,7 @@ simplify_unary_operation (enum rtx_code 
       for (i = 0; i < n_elts; i++)
 	{
 	  rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
-					    CONST_VECTOR_ELT (trueop, i),
+					    CONST_VECTOR_ELT (op, i),
 					    GET_MODE_INNER (opmode));
 	  if (!x)
 	    return 0;
@@ -424,32 +666,32 @@ simplify_unary_operation (enum rtx_code 
      check the wrong mode (input vs. output) for a conversion operation,
      such as FIX.  At some point, this should be simplified.  */
 
-  if (code == FLOAT && GET_MODE (trueop) == VOIDmode
-      && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
+  if (code == FLOAT && GET_MODE (op) == VOIDmode
+      && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
     {
       HOST_WIDE_INT hv, lv;
       REAL_VALUE_TYPE d;
 
-      if (GET_CODE (trueop) == CONST_INT)
-	lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
+      if (GET_CODE (op) == CONST_INT)
+	lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
       else
-	lv = CONST_DOUBLE_LOW (trueop),  hv = CONST_DOUBLE_HIGH (trueop);
+	lv = CONST_DOUBLE_LOW (op),  hv = CONST_DOUBLE_HIGH (op);
 
       REAL_VALUE_FROM_INT (d, lv, hv, mode);
       d = real_value_truncate (mode, d);
       return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
     }
-  else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
-	   && (GET_CODE (trueop) == CONST_DOUBLE
-	       || GET_CODE (trueop) == CONST_INT))
+  else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
+	   && (GET_CODE (op) == CONST_DOUBLE
+	       || GET_CODE (op) == CONST_INT))
     {
       HOST_WIDE_INT hv, lv;
       REAL_VALUE_TYPE d;
 
-      if (GET_CODE (trueop) == CONST_INT)
-	lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
+      if (GET_CODE (op) == CONST_INT)
+	lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
       else
-	lv = CONST_DOUBLE_LOW (trueop),  hv = CONST_DOUBLE_HIGH (trueop);
+	lv = CONST_DOUBLE_LOW (op),  hv = CONST_DOUBLE_HIGH (op);
 
       if (op_mode == VOIDmode)
 	{
@@ -468,10 +710,10 @@ simplify_unary_operation (enum rtx_code 
       return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
     }
 
-  if (GET_CODE (trueop) == CONST_INT
+  if (GET_CODE (op) == CONST_INT
       && width <= HOST_BITS_PER_WIDE_INT && width > 0)
     {
-      HOST_WIDE_INT arg0 = INTVAL (trueop);
+      HOST_WIDE_INT arg0 = INTVAL (op);
       HOST_WIDE_INT val;
 
       switch (code)
@@ -594,18 +836,18 @@ simplify_unary_operation (enum rtx_code 
 
   /* We can do some operations on integer CONST_DOUBLEs.  Also allow
      for a DImode operation on a CONST_INT.  */
-  else if (GET_MODE (trueop) == VOIDmode
+  else if (GET_MODE (op) == VOIDmode
 	   && width <= HOST_BITS_PER_WIDE_INT * 2
-	   && (GET_CODE (trueop) == CONST_DOUBLE
-	       || GET_CODE (trueop) == CONST_INT))
+	   && (GET_CODE (op) == CONST_DOUBLE
+	       || GET_CODE (op) == CONST_INT))
     {
       unsigned HOST_WIDE_INT l1, lv;
       HOST_WIDE_INT h1, hv;
 
-      if (GET_CODE (trueop) == CONST_DOUBLE)
-	l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
+      if (GET_CODE (op) == CONST_DOUBLE)
+	l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
       else
-	l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
+	l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
 
       switch (code)
 	{
@@ -719,11 +961,11 @@ simplify_unary_operation (enum rtx_code 
       return immed_double_const (lv, hv, mode);
     }
 
-  else if (GET_CODE (trueop) == CONST_DOUBLE
+  else if (GET_CODE (op) == CONST_DOUBLE
 	   && GET_MODE_CLASS (mode) == MODE_FLOAT)
     {
       REAL_VALUE_TYPE d, t;
-      REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
+      REAL_VALUE_FROM_CONST_DOUBLE (d, op);
 
       switch (code)
 	{
@@ -753,7 +995,7 @@ simplify_unary_operation (enum rtx_code 
 	    long tmp[4];
 	    int i;
 
-	    real_to_target (tmp, &d, GET_MODE (trueop));
+	    real_to_target (tmp, &d, GET_MODE (op));
 	    for (i = 0; i < 4; i++)
 	      tmp[i] = ~tmp[i];
 	    real_from_target (&d, tmp, mode);
@@ -764,8 +1006,8 @@ simplify_unary_operation (enum rtx_code 
       return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
     }
 
-  else if (GET_CODE (trueop) == CONST_DOUBLE
-	   && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
+  else if (GET_CODE (op) == CONST_DOUBLE
+	   && GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT
 	   && GET_MODE_CLASS (mode) == MODE_INT
 	   && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
     {
@@ -774,9 +1016,11 @@ simplify_unary_operation (enum rtx_code 
 	 by target backends), for consistency, this routine implements the
 	 same semantics for constant folding as used by the middle-end.  */
 
+      /* This was formerly used only for non-IEEE float.
+	 eggert@twinsun.com says it is safe for IEEE also.  */
       HOST_WIDE_INT xh, xl, th, tl;
       REAL_VALUE_TYPE x, t;
-      REAL_VALUE_FROM_CONST_DOUBLE (x, trueop);
+      REAL_VALUE_FROM_CONST_DOUBLE (x, op);
       switch (code)
 	{
 	case FIX:
@@ -862,237 +1106,7 @@ simplify_unary_operation (enum rtx_code 
       return immed_double_const (xl, xh, mode);
     }
 
-  /* This was formerly used only for non-IEEE float.
-     eggert@twinsun.com says it is safe for IEEE also.  */
-  else
-    {
-      enum rtx_code reversed;
-      rtx temp;
-
-      /* There are some simplifications we can do even if the operands
-	 aren't constant.  */
-      switch (code)
-	{
-	case NOT:
-	  /* (not (not X)) == X.  */
-	  if (GET_CODE (op) == NOT)
-	    return XEXP (op, 0);
-
-	  /* (not (eq X Y)) == (ne X Y), etc.  */
-	  if (COMPARISON_P (op)
-	      && (mode == BImode || STORE_FLAG_VALUE == -1)
-	      && ((reversed = reversed_comparison_code (op, NULL_RTX))
-		  != UNKNOWN))
-	    return simplify_gen_relational (reversed, mode, VOIDmode,
-					    XEXP (op, 0), XEXP (op, 1));
-
-          /* (not (plus X -1)) can become (neg X).  */
-          if (GET_CODE (op) == PLUS
-	      && XEXP (op, 1) == constm1_rtx)
-	    return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
-
-	  /* Similarly, (not (neg X)) is (plus X -1).  */
-	  if (GET_CODE (op) == NEG)
-	    return plus_constant (XEXP (op, 0), -1);
-
-	  /* (not (xor X C)) for C constant is (xor X D) with D = ~C.  */
-	  if (GET_CODE (op) == XOR
-	      && GET_CODE (XEXP (op, 1)) == CONST_INT
-	      && (temp = simplify_unary_operation (NOT, mode,
-						   XEXP (op, 1),
-						   mode)) != 0)
-	    return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
-
-	  /* (not (plus X C)) for signbit C is (xor X D) with D = ~C.  */
-	  if (GET_CODE (op) == PLUS
-	      && GET_CODE (XEXP (op, 1)) == CONST_INT
-	      && mode_signbit_p (mode, XEXP (op, 1))
-	      && (temp = simplify_unary_operation (NOT, mode,
-						   XEXP (op, 1),
-						   mode)) != 0)
-	    return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
-
-
-
-	  /* (not (ashift 1 X)) is (rotate ~1 X).  We used to do this for
-	     operands other than 1, but that is not valid.  We could do a
-	     similar simplification for (not (lshiftrt C X)) where C is
-	     just the sign bit, but this doesn't seem common enough to
-	     bother with.  */
-	  if (GET_CODE (op) == ASHIFT
-	      && XEXP (op, 0) == const1_rtx)
-	    {
-	      temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
-	      return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
-	    }
-
-	  /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
-	     by reversing the comparison code if valid.  */
-	  if (STORE_FLAG_VALUE == -1
-	      && COMPARISON_P (op)
-	      && (reversed = reversed_comparison_code (op, NULL_RTX))
-		 != UNKNOWN)
-	    return simplify_gen_relational (reversed, mode, VOIDmode,
-					    XEXP (op, 0), XEXP (op, 1));
-
-	  /* (not (ashiftrt foo C)) where C is the number of bits in FOO
-	     minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
-	     so we can perform the above simplification.  */
-
-	  if (STORE_FLAG_VALUE == -1
-	      && GET_CODE (op) == ASHIFTRT
-	      && GET_CODE (XEXP (op, 1)) == CONST_INT
-	      && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
-	    return simplify_gen_relational (GE, mode, VOIDmode,
-					    XEXP (op, 0), const0_rtx);
-
-	  break;
-
-	case NEG:
-	  /* (neg (neg X)) == X.  */
-	  if (GET_CODE (op) == NEG)
-	    return XEXP (op, 0);
-
-	  /* (neg (plus X 1)) can become (not X).  */
-	  if (GET_CODE (op) == PLUS
-	      && XEXP (op, 1) == const1_rtx)
-	    return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
-
-	  /* Similarly, (neg (not X)) is (plus X 1).  */
-	  if (GET_CODE (op) == NOT)
-	    return plus_constant (XEXP (op, 0), 1);
-
-	  /* (neg (minus X Y)) can become (minus Y X).  This transformation
-	     isn't safe for modes with signed zeros, since if X and Y are
-	     both +0, (minus Y X) is the same as (minus X Y).  If the
-	     rounding mode is towards +infinity (or -infinity) then the two
-	     expressions will be rounded differently.  */
-	  if (GET_CODE (op) == MINUS
-	      && !HONOR_SIGNED_ZEROS (mode)
-	      && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
-	    return simplify_gen_binary (MINUS, mode, XEXP (op, 1),
-					XEXP (op, 0));
-
-	  if (GET_CODE (op) == PLUS
-	      && !HONOR_SIGNED_ZEROS (mode)
-	      && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
-	    {
-	      /* (neg (plus A C)) is simplified to (minus -C A).  */
-	      if (GET_CODE (XEXP (op, 1)) == CONST_INT
-		  || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
-		{
-		  temp = simplify_unary_operation (NEG, mode, XEXP (op, 1),
-						   mode);
-		  if (temp)
-		    return simplify_gen_binary (MINUS, mode, temp,
-						XEXP (op, 0));
-		}
-
-	      /* (neg (plus A B)) is canonicalized to (minus (neg A) B).  */
-	      temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
-	      return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
-	    }
-
-	  /* (neg (mult A B)) becomes (mult (neg A) B).
-	     This works even for floating-point values.  */
-	  if (GET_CODE (op) == MULT
-	      && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
-	    {
-	      temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
-	      return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
-	    }
-
-	  /* NEG commutes with ASHIFT since it is multiplication.  Only do
-	     this if we can then eliminate the NEG (e.g., if the operand
-	     is a constant).  */
-	  if (GET_CODE (op) == ASHIFT)
-	    {
-	      temp = simplify_unary_operation (NEG, mode, XEXP (op, 0),
-					       mode);
-	      if (temp)
-		return simplify_gen_binary (ASHIFT, mode, temp,
-					    XEXP (op, 1));
-	    }
-
-	  /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
-	     C is equal to the width of MODE minus 1.  */
-	  if (GET_CODE (op) == ASHIFTRT
-	      && GET_CODE (XEXP (op, 1)) == CONST_INT
-	      && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
-		return simplify_gen_binary (LSHIFTRT, mode,
-					    XEXP (op, 0), XEXP (op, 1));
-
-	  /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
-	     C is equal to the width of MODE minus 1.  */
-	  if (GET_CODE (op) == LSHIFTRT
-	      && GET_CODE (XEXP (op, 1)) == CONST_INT
-	      && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
-		return simplify_gen_binary (ASHIFTRT, mode,
-					    XEXP (op, 0), XEXP (op, 1));
-
-	  break;
-
-	case SIGN_EXTEND:
-	  /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
-	     becomes just the MINUS if its mode is MODE.  This allows
-	     folding switch statements on machines using casesi (such as
-	     the VAX).  */
-	  if (GET_CODE (op) == TRUNCATE
-	      && GET_MODE (XEXP (op, 0)) == mode
-	      && GET_CODE (XEXP (op, 0)) == MINUS
-	      && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
-	      && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
-	    return XEXP (op, 0);
-
-	  /* Check for a sign extension of a subreg of a promoted
-	     variable, where the promotion is sign-extended, and the
-	     target mode is the same as the variable's promotion.  */
-	  if (GET_CODE (op) == SUBREG
-	      && SUBREG_PROMOTED_VAR_P (op)
-	      && ! SUBREG_PROMOTED_UNSIGNED_P (op)
-	      && GET_MODE (XEXP (op, 0)) == mode)
-	    return XEXP (op, 0);
-
-#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
-	  if (! POINTERS_EXTEND_UNSIGNED
-	      && mode == Pmode && GET_MODE (op) == ptr_mode
-	      && (CONSTANT_P (op)
-		  || (GET_CODE (op) == SUBREG
-		      && REG_P (SUBREG_REG (op))
-		      && REG_POINTER (SUBREG_REG (op))
-		      && GET_MODE (SUBREG_REG (op)) == Pmode)))
-	    return convert_memory_address (Pmode, op);
-#endif
-	  break;
-
-	case ZERO_EXTEND:
-	  /* Check for a zero extension of a subreg of a promoted
-	     variable, where the promotion is zero-extended, and the
-	     target mode is the same as the variable's promotion.  */
-	  if (GET_CODE (op) == SUBREG
-	      && SUBREG_PROMOTED_VAR_P (op)
-	      && SUBREG_PROMOTED_UNSIGNED_P (op)
-	      && GET_MODE (XEXP (op, 0)) == mode)
-	    return XEXP (op, 0);
-
-#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
-	  if (POINTERS_EXTEND_UNSIGNED > 0
-	      && mode == Pmode && GET_MODE (op) == ptr_mode
-	      && (CONSTANT_P (op)
-		  || (GET_CODE (op) == SUBREG
-		      && REG_P (SUBREG_REG (op))
-		      && REG_POINTER (SUBREG_REG (op))
-		      && GET_MODE (SUBREG_REG (op)) == Pmode)))
-	    return convert_memory_address (Pmode, op);
-#endif
-	  break;
-
-	default:
-	  break;
-	}
-
-      return 0;
-    }
+  return NULL_RTX;
 }
 
 /* Subroutine of simplify_binary_operation to simplify a commutative,
@@ -1153,6 +1167,7 @@ simplify_associative_operation (enum rtx
   return 0;
 }
 
+
 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
    and OP1.  Return 0 if no simplification is possible.
 
@@ -1162,9 +1177,6 @@ rtx
 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
 			   rtx op0, rtx op1)
 {
-  HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
-  HOST_WIDE_INT val;
-  unsigned int width = GET_MODE_BITSIZE (mode);
   rtx trueop0, trueop1;
   rtx tem;
 
@@ -1185,324 +1197,20 @@ simplify_binary_operation (enum rtx_code
   trueop0 = avoid_constant_pool_reference (op0);
   trueop1 = avoid_constant_pool_reference (op1);
 
-  if (VECTOR_MODE_P (mode)
-      && code != VEC_CONCAT
-      && GET_CODE (trueop0) == CONST_VECTOR
-      && GET_CODE (trueop1) == CONST_VECTOR)
-    {
-      unsigned n_elts = GET_MODE_NUNITS (mode);
-      enum machine_mode op0mode = GET_MODE (trueop0);
-      unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
-      enum machine_mode op1mode = GET_MODE (trueop1);
-      unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
-      rtvec v = rtvec_alloc (n_elts);
-      unsigned int i;
-
-      gcc_assert (op0_n_elts == n_elts);
-      gcc_assert (op1_n_elts == n_elts);
-      for (i = 0; i < n_elts; i++)
-	{
-	  rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
-					     CONST_VECTOR_ELT (trueop0, i),
-					     CONST_VECTOR_ELT (trueop1, i));
-	  if (!x)
-	    return 0;
-	  RTVEC_ELT (v, i) = x;
-	}
-
-      return gen_rtx_CONST_VECTOR (mode, v);
-    }
-
-  if (VECTOR_MODE_P (mode)
-      && code == VEC_CONCAT
-      && CONSTANT_P (trueop0) && CONSTANT_P (trueop1))
-    {
-      unsigned n_elts = GET_MODE_NUNITS (mode);
-      rtvec v = rtvec_alloc (n_elts);
-
-      gcc_assert (n_elts >= 2);
-      if (n_elts == 2)
-	{
-	  gcc_assert (GET_CODE (trueop0) != CONST_VECTOR);
-	  gcc_assert (GET_CODE (trueop1) != CONST_VECTOR);
-
-	  RTVEC_ELT (v, 0) = trueop0;
-	  RTVEC_ELT (v, 1) = trueop1;
-	}
-      else
-	{
-	  unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (trueop0));
-	  unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (trueop1));
-	  unsigned i;
-
-	  gcc_assert (GET_CODE (trueop0) == CONST_VECTOR);
-	  gcc_assert (GET_CODE (trueop1) == CONST_VECTOR);
-	  gcc_assert (op0_n_elts + op1_n_elts == n_elts);
-
-	  for (i = 0; i < op0_n_elts; ++i)
-	    RTVEC_ELT (v, i) = XVECEXP (trueop0, 0, i);
-	  for (i = 0; i < op1_n_elts; ++i)
-	    RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (trueop1, 0, i);
-	}
-
-      return gen_rtx_CONST_VECTOR (mode, v);
-    }
-
-  if (GET_MODE_CLASS (mode) == MODE_FLOAT
-      && GET_CODE (trueop0) == CONST_DOUBLE
-      && GET_CODE (trueop1) == CONST_DOUBLE
-      && mode == GET_MODE (op0) && mode == GET_MODE (op1))
-    {
-      if (code == AND
-	  || code == IOR
-	  || code == XOR)
-	{
-	  long tmp0[4];
-	  long tmp1[4];
-	  REAL_VALUE_TYPE r;
-	  int i;
-
-	  real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
-			  GET_MODE (op0));
-	  real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
-			  GET_MODE (op1));
-	  for (i = 0; i < 4; i++)
-	    {
-	      switch (code)
-	      {
-	      case AND:
-		tmp0[i] &= tmp1[i];
-		break;
-	      case IOR:
-		tmp0[i] |= tmp1[i];
-		break;
-	      case XOR:
-		tmp0[i] ^= tmp1[i];
-		break;
-	      default:
-		gcc_unreachable ();
-	      }
-	    }
-	   real_from_target (&r, tmp0, mode);
-	   return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
-	}
-      else
-	{
-	  REAL_VALUE_TYPE f0, f1, value, result;
-	  bool inexact;
-
-	  REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
-	  REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
-	  real_convert (&f0, mode, &f0);
-	  real_convert (&f1, mode, &f1);
-
-	  if (HONOR_SNANS (mode)
-	      && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
-	    return 0;
-
-	  if (code == DIV
-	      && REAL_VALUES_EQUAL (f1, dconst0)
-	      && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
-	    return 0;
-
-	  if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
-	      && flag_trapping_math
-	      && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
-	    {
-	      int s0 = REAL_VALUE_NEGATIVE (f0);
-	      int s1 = REAL_VALUE_NEGATIVE (f1);
-
-	      switch (code)
-		{
-		case PLUS:
-		  /* Inf + -Inf = NaN plus exception.  */
-		  if (s0 != s1)
-		    return 0;
-		  break;
-		case MINUS:
-		  /* Inf - Inf = NaN plus exception.  */
-		  if (s0 == s1)
-		    return 0;
-		  break;
-		case DIV:
-		  /* Inf / Inf = NaN plus exception.  */
-		  return 0;
-		default:
-		  break;
-		}
-	    }
-
-	  if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
-	      && flag_trapping_math
-	      && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
-		  || (REAL_VALUE_ISINF (f1)
-		      && REAL_VALUES_EQUAL (f0, dconst0))))
-	    /* Inf * 0 = NaN plus exception.  */
-	    return 0;
-
-	  inexact = real_arithmetic (&value, rtx_to_tree_code (code),
-				     &f0, &f1);
-	  real_convert (&result, mode, &value);
-
-	  /* Don't constant fold this floating point operation if the
-	     result may dependent upon the run-time rounding mode and
-	     flag_rounding_math is set, or if GCC's software emulation
-	     is unable to accurately represent the result.  */
-
-	  if ((flag_rounding_math
-	       || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
-		   && !flag_unsafe_math_optimizations))
-	      && (inexact || !real_identical (&result, &value)))
-	    return NULL_RTX;
-
-	  return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
-	}
-    }
-
-  /* We can fold some multi-word operations.  */
-  if (GET_MODE_CLASS (mode) == MODE_INT
-      && width == HOST_BITS_PER_WIDE_INT * 2
-      && (GET_CODE (trueop0) == CONST_DOUBLE
-	  || GET_CODE (trueop0) == CONST_INT)
-      && (GET_CODE (trueop1) == CONST_DOUBLE
-	  || GET_CODE (trueop1) == CONST_INT))
-    {
-      unsigned HOST_WIDE_INT l1, l2, lv, lt;
-      HOST_WIDE_INT h1, h2, hv, ht;
-
-      if (GET_CODE (trueop0) == CONST_DOUBLE)
-	l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
-      else
-	l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
-
-      if (GET_CODE (trueop1) == CONST_DOUBLE)
-	l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
-      else
-	l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
-
-      switch (code)
-	{
-	case MINUS:
-	  /* A - B == A + (-B).  */
-	  neg_double (l2, h2, &lv, &hv);
-	  l2 = lv, h2 = hv;
-
-	  /* Fall through....  */
-
-	case PLUS:
-	  add_double (l1, h1, l2, h2, &lv, &hv);
-	  break;
-
-	case MULT:
-	  mul_double (l1, h1, l2, h2, &lv, &hv);
-	  break;
-
-	case DIV:
-	  if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
-				    &lv, &hv, &lt, &ht))
-	    return 0;
-	  break;
-
-	case MOD:
-	  if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
-				    &lt, &ht, &lv, &hv))
-	    return 0;
-	  break;
-
-	case UDIV:
-	  if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
-				    &lv, &hv, &lt, &ht))
-	    return 0;
-	  break;
-
-	case UMOD:
-	  if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
-				    &lt, &ht, &lv, &hv))
-	    return 0;
-	  break;
-
-	case AND:
-	  lv = l1 & l2, hv = h1 & h2;
-	  break;
-
-	case IOR:
-	  lv = l1 | l2, hv = h1 | h2;
-	  break;
-
-	case XOR:
-	  lv = l1 ^ l2, hv = h1 ^ h2;
-	  break;
-
-	case SMIN:
-	  if (h1 < h2
-	      || (h1 == h2
-		  && ((unsigned HOST_WIDE_INT) l1
-		      < (unsigned HOST_WIDE_INT) l2)))
-	    lv = l1, hv = h1;
-	  else
-	    lv = l2, hv = h2;
-	  break;
-
-	case SMAX:
-	  if (h1 > h2
-	      || (h1 == h2
-		  && ((unsigned HOST_WIDE_INT) l1
-		      > (unsigned HOST_WIDE_INT) l2)))
-	    lv = l1, hv = h1;
-	  else
-	    lv = l2, hv = h2;
-	  break;
-
-	case UMIN:
-	  if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
-	      || (h1 == h2
-		  && ((unsigned HOST_WIDE_INT) l1
-		      < (unsigned HOST_WIDE_INT) l2)))
-	    lv = l1, hv = h1;
-	  else
-	    lv = l2, hv = h2;
-	  break;
-
-	case UMAX:
-	  if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
-	      || (h1 == h2
-		  && ((unsigned HOST_WIDE_INT) l1
-		      > (unsigned HOST_WIDE_INT) l2)))
-	    lv = l1, hv = h1;
-	  else
-	    lv = l2, hv = h2;
-	  break;
-
-	case LSHIFTRT:   case ASHIFTRT:
-	case ASHIFT:
-	case ROTATE:     case ROTATERT:
-	  if (SHIFT_COUNT_TRUNCATED)
-	    l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
-
-	  if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
-	    return 0;
-
-	  if (code == LSHIFTRT || code == ASHIFTRT)
-	    rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
-			   code == ASHIFTRT);
-	  else if (code == ASHIFT)
-	    lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
-	  else if (code == ROTATE)
-	    lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
-	  else /* code == ROTATERT */
-	    rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
-	  break;
-
-	default:
-	  return 0;
-	}
+  tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
+  if (tem)
+    return tem;
+  return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
+}
 
-      return immed_double_const (lv, hv, mode);
-    }
+static rtx
+simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
+			     rtx op0, rtx op1, rtx trueop0, rtx trueop1)
+{
+  rtx tem;
+  HOST_WIDE_INT val;
+  unsigned int width = GET_MODE_BITSIZE (mode);
 
-  if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
-      || width > HOST_BITS_PER_WIDE_INT || width == 0)
-    {
       /* Even if we can't compute a constant result,
 	 there are some cases worth simplifying.  */
 
@@ -1557,9 +1265,7 @@ simplify_binary_operation (enum rtx_code
 		coeff0 = -1, lhs = XEXP (lhs, 0);
 	      else if (GET_CODE (lhs) == MULT
 		       && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
-		{
 		  coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
-		}
 	      else if (GET_CODE (lhs) == ASHIFT
 		       && GET_CODE (XEXP (lhs, 1)) == CONST_INT
 		       && INTVAL (XEXP (lhs, 1)) >= 0
@@ -2010,8 +1716,8 @@ simplify_binary_operation (enum rtx_code
 	    }
 	  /* Convert divide by power of two into shift.  */
 	  if (GET_CODE (trueop1) == CONST_INT
-	      && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
-	    return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1));
+	      && (val = exact_log2 (INTVAL (trueop1))) > 0)
+	    return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
 	  break;
 
 	case DIV:
@@ -2298,13 +2004,337 @@ simplify_binary_operation (enum rtx_code
 	}
 
       return 0;
+}
+
+rtx
+simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
+				 rtx op0, rtx op1)
+{
+  HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
+  HOST_WIDE_INT val;
+  unsigned int width = GET_MODE_BITSIZE (mode);
+
+  if (VECTOR_MODE_P (mode)
+      && code != VEC_CONCAT
+      && GET_CODE (op0) == CONST_VECTOR
+      && GET_CODE (op1) == CONST_VECTOR)
+    {
+      unsigned n_elts = GET_MODE_NUNITS (mode);
+      enum machine_mode op0mode = GET_MODE (op0);
+      unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
+      enum machine_mode op1mode = GET_MODE (op1);
+      unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
+      rtvec v = rtvec_alloc (n_elts);
+      unsigned int i;
+
+      gcc_assert (op0_n_elts == n_elts);
+      gcc_assert (op1_n_elts == n_elts);
+      for (i = 0; i < n_elts; i++)
+	{
+	  rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
+					     CONST_VECTOR_ELT (op0, i),
+					     CONST_VECTOR_ELT (op1, i));
+	  if (!x)
+	    return 0;
+	  RTVEC_ELT (v, i) = x;
     }
 
+      return gen_rtx_CONST_VECTOR (mode, v);
+    }
+
+  if (VECTOR_MODE_P (mode)
+      && code == VEC_CONCAT
+      && CONSTANT_P (op0) && CONSTANT_P (op1))
+    {
+      unsigned n_elts = GET_MODE_NUNITS (mode);
+      rtvec v = rtvec_alloc (n_elts);
+
+      gcc_assert (n_elts >= 2);
+      if (n_elts == 2)
+	{
+	  gcc_assert (GET_CODE (op0) != CONST_VECTOR);
+	  gcc_assert (GET_CODE (op1) != CONST_VECTOR);
+
+	  RTVEC_ELT (v, 0) = op0;
+	  RTVEC_ELT (v, 1) = op1;
+	}
+      else
+	{
+	  unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
+	  unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
+	  unsigned i;
+
+	  gcc_assert (GET_CODE (op0) == CONST_VECTOR);
+	  gcc_assert (GET_CODE (op1) == CONST_VECTOR);
+	  gcc_assert (op0_n_elts + op1_n_elts == n_elts);
+
+	  for (i = 0; i < op0_n_elts; ++i)
+	    RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
+	  for (i = 0; i < op1_n_elts; ++i)
+	    RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
+	}
+
+      return gen_rtx_CONST_VECTOR (mode, v);
+    }
+
+  if (GET_MODE_CLASS (mode) == MODE_FLOAT
+      && GET_CODE (op0) == CONST_DOUBLE
+      && GET_CODE (op1) == CONST_DOUBLE
+      && mode == GET_MODE (op0) && mode == GET_MODE (op1))
+    {
+      if (code == AND
+	  || code == IOR
+	  || code == XOR)
+	{
+	  long tmp0[4];
+	  long tmp1[4];
+	  REAL_VALUE_TYPE r;
+	  int i;
+
+	  real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
+			  GET_MODE (op0));
+	  real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
+			  GET_MODE (op1));
+	  for (i = 0; i < 4; i++)
+	    {
+	      switch (code)
+	      {
+	      case AND:
+		tmp0[i] &= tmp1[i];
+		break;
+	      case IOR:
+		tmp0[i] |= tmp1[i];
+		break;
+	      case XOR:
+		tmp0[i] ^= tmp1[i];
+		break;
+	      default:
+		gcc_unreachable ();
+	      }
+	    }
+	   real_from_target (&r, tmp0, mode);
+	   return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
+	}
+      else
+	{
+	  REAL_VALUE_TYPE f0, f1, value, result;
+	  bool inexact;
+
+	  REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
+	  REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
+	  real_convert (&f0, mode, &f0);
+	  real_convert (&f1, mode, &f1);
+
+	  if (HONOR_SNANS (mode)
+	      && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
+	    return 0;
+
+	  if (code == DIV
+	      && REAL_VALUES_EQUAL (f1, dconst0)
+	      && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
+	    return 0;
+
+	  if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
+	      && flag_trapping_math
+	      && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
+	    {
+	      int s0 = REAL_VALUE_NEGATIVE (f0);
+	      int s1 = REAL_VALUE_NEGATIVE (f1);
+
+	      switch (code)
+		{
+		case PLUS:
+		  /* Inf + -Inf = NaN plus exception.  */
+		  if (s0 != s1)
+		    return 0;
+		  break;
+		case MINUS:
+		  /* Inf - Inf = NaN plus exception.  */
+		  if (s0 == s1)
+		    return 0;
+		  break;
+		case DIV:
+		  /* Inf / Inf = NaN plus exception.  */
+		  return 0;
+		default:
+		  break;
+		}
+	    }
+
+	  if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
+	      && flag_trapping_math
+	      && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
+		  || (REAL_VALUE_ISINF (f1)
+		      && REAL_VALUES_EQUAL (f0, dconst0))))
+	    /* Inf * 0 = NaN plus exception.  */
+	    return 0;
+
+	  inexact = real_arithmetic (&value, rtx_to_tree_code (code),
+				     &f0, &f1);
+	  real_convert (&result, mode, &value);
+
+	  /* Don't constant fold this floating point operation if the
+	     result may dependent upon the run-time rounding mode and
+	     flag_rounding_math is set, or if GCC's software emulation
+	     is unable to accurately represent the result.  */
+
+	  if ((flag_rounding_math
+	       || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
+		   && !flag_unsafe_math_optimizations))
+	      && (inexact || !real_identical (&result, &value)))
+	    return NULL_RTX;
+
+	  return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
+	}
+    }
+
+  /* We can fold some multi-word operations.  */
+  if (GET_MODE_CLASS (mode) == MODE_INT
+      && width == HOST_BITS_PER_WIDE_INT * 2
+      && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
+      && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
+    {
+      unsigned HOST_WIDE_INT l1, l2, lv, lt;
+      HOST_WIDE_INT h1, h2, hv, ht;
+
+      if (GET_CODE (op0) == CONST_DOUBLE)
+	l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
+      else
+	l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
+
+      if (GET_CODE (op1) == CONST_DOUBLE)
+	l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
+      else
+	l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
+
+      switch (code)
+	{
+	case MINUS:
+	  /* A - B == A + (-B).  */
+	  neg_double (l2, h2, &lv, &hv);
+	  l2 = lv, h2 = hv;
+
+	  /* Fall through....  */
+
+	case PLUS:
+	  add_double (l1, h1, l2, h2, &lv, &hv);
+	  break;
+
+	case MULT:
+	  mul_double (l1, h1, l2, h2, &lv, &hv);
+	  break;
+
+	case DIV:
+	  if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
+				    &lv, &hv, &lt, &ht))
+	    return 0;
+	  break;
+
+	case MOD:
+	  if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
+				    &lt, &ht, &lv, &hv))
+	    return 0;
+	  break;
+
+	case UDIV:
+	  if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
+				    &lv, &hv, &lt, &ht))
+	    return 0;
+	  break;
+
+	case UMOD:
+	  if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
+				    &lt, &ht, &lv, &hv))
+	    return 0;
+	  break;
+
+	case AND:
+	  lv = l1 & l2, hv = h1 & h2;
+	  break;
+
+	case IOR:
+	  lv = l1 | l2, hv = h1 | h2;
+	  break;
+
+	case XOR:
+	  lv = l1 ^ l2, hv = h1 ^ h2;
+	  break;
+
+	case SMIN:
+	  if (h1 < h2
+	      || (h1 == h2
+		  && ((unsigned HOST_WIDE_INT) l1
+		      < (unsigned HOST_WIDE_INT) l2)))
+	    lv = l1, hv = h1;
+	  else
+	    lv = l2, hv = h2;
+	  break;
+
+	case SMAX:
+	  if (h1 > h2
+	      || (h1 == h2
+		  && ((unsigned HOST_WIDE_INT) l1
+		      > (unsigned HOST_WIDE_INT) l2)))
+	    lv = l1, hv = h1;
+	  else
+	    lv = l2, hv = h2;
+	  break;
+
+	case UMIN:
+	  if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
+	      || (h1 == h2
+		  && ((unsigned HOST_WIDE_INT) l1
+		      < (unsigned HOST_WIDE_INT) l2)))
+	    lv = l1, hv = h1;
+	  else
+	    lv = l2, hv = h2;
+	  break;
+
+	case UMAX:
+	  if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
+	      || (h1 == h2
+		  && ((unsigned HOST_WIDE_INT) l1
+		      > (unsigned HOST_WIDE_INT) l2)))
+	    lv = l1, hv = h1;
+	  else
+	    lv = l2, hv = h2;
+	  break;
+
+	case LSHIFTRT:   case ASHIFTRT:
+	case ASHIFT:
+	case ROTATE:     case ROTATERT:
+	  if (SHIFT_COUNT_TRUNCATED)
+	    l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
+
+	  if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
+	    return 0;
+
+	  if (code == LSHIFTRT || code == ASHIFTRT)
+	    rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
+			   code == ASHIFTRT);
+	  else if (code == ASHIFT)
+	    lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
+	  else if (code == ROTATE)
+	    lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
+	  else /* code == ROTATERT */
+	    rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
+	  break;
+
+	default:
+	  return 0;
+	}
+
+      return immed_double_const (lv, hv, mode);
+    }
+
+  if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
+      && width <= HOST_BITS_PER_WIDE_INT && width != 0)
+    {
   /* Get the integer argument values in two forms:
      zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S.  */
 
-  arg0 = INTVAL (trueop0);
-  arg1 = INTVAL (trueop1);
+      arg0 = INTVAL (op0);
+      arg1 = INTVAL (op1);
 
   if (width < HOST_BITS_PER_WIDE_INT)
     {
@@ -2388,12 +2418,13 @@ simplify_binary_operation (enum rtx_code
     case LSHIFTRT:
     case ASHIFT:
     case ASHIFTRT:
-      /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure the
-	 value is in range.  We can't return any old value for out-of-range
-	 arguments because either the middle-end (via shift_truncation_mask)
-	 or the back-end might be relying on target-specific knowledge.
-	 Nor can we rely on shift_truncation_mask, since the shift might
-	 not be part of an ashlM3, lshrM3 or ashrM3 instruction.  */
+	  /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
+	     the value is in range.  We can't return any old value for
+	     out-of-range arguments because either the middle-end (via
+	     shift_truncation_mask) or the back-end might be relying on
+	     target-specific knowledge.  Nor can we rely on
+	     shift_truncation_mask, since the shift might not be part of an
+	     ashlM3, lshrM3 or ashrM3 instruction.  */
       if (SHIFT_COUNT_TRUNCATED)
 	arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
       else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
@@ -2460,10 +2491,14 @@ simplify_binary_operation (enum rtx_code
     }
 
   val = trunc_int_for_mode (val, mode);
-
   return GEN_INT (val);
+    }
+
+  return NULL_RTX;
 }
 
+
+
 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
    PLUS or MINUS.
 
Index: rtl.h
===================================================================
RCS file: /cvs/gcc/gcc/gcc/rtl.h,v
retrieving revision 1.535
diff -d -u -p -b -r1.535 rtl.h
--- rtl.h	24 Jan 2005 08:55:44 -0000	1.535
+++ rtl.h	14 Feb 2005 15:35:26 -0000
@@ -1516,8 +1516,12 @@ extern int split_branch_probability;
 extern rtx split_insns (rtx, rtx);
 
 /* In simplify-rtx.c  */
+extern rtx simplify_const_unary_operation (enum rtx_code, enum machine_mode,
+					   rtx, enum machine_mode);
 extern rtx simplify_unary_operation (enum rtx_code, enum machine_mode, rtx,
 				     enum machine_mode);
+extern rtx simplify_const_binary_operation (enum rtx_code, enum machine_mode,
+					    rtx, rtx);
 extern rtx simplify_binary_operation (enum rtx_code, enum machine_mode, rtx,
 				      rtx);
 extern rtx simplify_ternary_operation (enum rtx_code, enum machine_mode,

Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]