This is the mail archive of the gcc-patches@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

Re: [PATCH] Split simplify_unary_operation and simplify_binary_operation


Paolo Bonzini wrote:
This is a very simple code cleanup patch. I was not sure whether to submit this for 4.0 or wait for the branch, but then I thought that having this in 4.0 may make it a bit easier to backport fixes to 4.0.

All of the simplifications that simplify_relational_operation can do and that do not depend on the operations being constants are in a separate function, simplify_relational_operation_1.

What this patch does is extracting simplify_binary_operation_1 (a whopping 800-lines function on its own!) and simplify_unary_operation_1.

Bootstrapped/regtested powerpc-apple-darwin, ok for mainline?

Paolo

2005-02-10 Paolo Bonzini <bonzini@gnu.org>

    * simplify-rtx.c (simplify_unary_operation_1): Extract from...
    (simplify_unary_operation): ... this one.
    (simplify_binary_operation_1): Extract from...
    (simplify_binary_operation): ... this one.

Here's the patch.


Paolo
Index: simplify-rtx.c
===================================================================
RCS file: /cvs/gcc/gcc/gcc/simplify-rtx.c,v
retrieving revision 1.227
diff -d -u -p -r1.227 simplify-rtx.c
--- simplify-rtx.c	8 Feb 2005 03:38:26 -0000	1.227
+++ simplify-rtx.c	10 Feb 2005 10:58:35 -0000
@@ -60,6 +60,9 @@ static rtx simplify_associative_operatio
 					   rtx, rtx);
 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
 					    enum machine_mode, rtx, rtx);
+static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
+static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
+					rtx, rtx, rtx, rtx);
 
 /* Negate a CONST_INT rtx, truncating (because a conversion from a
    maximally negative number can overflow).  */
@@ -348,6 +351,227 @@ simplify_replace_rtx (rtx x, rtx old_rtx
   return x;
 }
 
+/* Perform some simplifications we can do even if the operands
+   aren't constant.  */
+static rtx
+simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
+{
+  enum rtx_code reversed;
+  rtx temp;
+
+  switch (code)
+    {
+    case NOT:
+      /* (not (not X)) == X.  */
+      if (GET_CODE (op) == NOT)
+	return XEXP (op, 0);
+
+      /* (not (eq X Y)) == (ne X Y), etc.  */
+      if (COMPARISON_P (op)
+	  && (mode == BImode || STORE_FLAG_VALUE == -1)
+	  && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
+	return simplify_gen_relational (reversed, mode, VOIDmode,
+					XEXP (op, 0), XEXP (op, 1));
+
+      /* (not (plus X -1)) can become (neg X).  */
+      if (GET_CODE (op) == PLUS
+	  && XEXP (op, 1) == constm1_rtx)
+	return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
+
+      /* Similarly, (not (neg X)) is (plus X -1).  */
+      if (GET_CODE (op) == NEG)
+	return plus_constant (XEXP (op, 0), -1);
+
+      /* (not (xor X C)) for C constant is (xor X D) with D = ~C.  */
+      if (GET_CODE (op) == XOR
+	  && GET_CODE (XEXP (op, 1)) == CONST_INT
+	  && (temp = simplify_unary_operation (NOT, mode,
+					       XEXP (op, 1), mode)) != 0)
+	return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
+
+      /* (not (plus X C)) for signbit C is (xor X D) with D = ~C.  */
+      if (GET_CODE (op) == PLUS
+	  && GET_CODE (XEXP (op, 1)) == CONST_INT
+	  && mode_signbit_p (mode, XEXP (op, 1))
+	  && (temp = simplify_unary_operation (NOT, mode,
+					       XEXP (op, 1), mode)) != 0)
+	return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
+
+
+      /* (not (ashift 1 X)) is (rotate ~1 X).  We used to do this for
+	 operands other than 1, but that is not valid.  We could do a
+	 similar simplification for (not (lshiftrt C X)) where C is
+	 just the sign bit, but this doesn't seem common enough to
+	 bother with.  */
+      if (GET_CODE (op) == ASHIFT
+	  && XEXP (op, 0) == const1_rtx)
+	{
+	  temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
+	  return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
+	}
+
+      /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
+	 by reversing the comparison code if valid.  */
+      if (STORE_FLAG_VALUE == -1
+	  && COMPARISON_P (op)
+	  && (reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN)
+	return simplify_gen_relational (reversed, mode, VOIDmode,
+					XEXP (op, 0), XEXP (op, 1));
+
+      /* (not (ashiftrt foo C)) where C is the number of bits in FOO
+	 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
+	 so we can perform the above simplification.  */
+      
+      if (STORE_FLAG_VALUE == -1
+	  && GET_CODE (op) == ASHIFTRT
+	  && GET_CODE (XEXP (op, 1)) == CONST_INT
+	  && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
+	return simplify_gen_relational (GE, mode, VOIDmode,
+					XEXP (op, 0), const0_rtx);
+
+      break;
+
+    case NEG:
+      /* (neg (neg X)) == X.  */
+      if (GET_CODE (op) == NEG)
+	return XEXP (op, 0);
+
+      /* (neg (plus X 1)) can become (not X).  */
+      if (GET_CODE (op) == PLUS
+	  && XEXP (op, 1) == const1_rtx)
+	return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
+      
+      /* Similarly, (neg (not X)) is (plus X 1).  */
+      if (GET_CODE (op) == NOT)
+	return plus_constant (XEXP (op, 0), 1);
+      
+      /* (neg (minus X Y)) can become (minus Y X).  This transformation
+	 isn't safe for modes with signed zeros, since if X and Y are
+	 both +0, (minus Y X) is the same as (minus X Y).  If the
+	 rounding mode is towards +infinity (or -infinity) then the two
+	 expressions will be rounded differently.  */
+      if (GET_CODE (op) == MINUS
+	  && !HONOR_SIGNED_ZEROS (mode)
+	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
+	return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
+      
+      if (GET_CODE (op) == PLUS
+	  && !HONOR_SIGNED_ZEROS (mode)
+	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
+	{
+	  /* (neg (plus A C)) is simplified to (minus -C A).  */
+	  if (GET_CODE (XEXP (op, 1)) == CONST_INT
+	      || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
+	    {
+	      temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
+	      if (temp)
+		return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
+	    }
+
+	  /* (neg (plus A B)) is canonicalized to (minus (neg A) B).  */
+	  temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
+	  return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
+	}
+
+      /* (neg (mult A B)) becomes (mult (neg A) B).
+	 This works even for floating-point values.  */
+      if (GET_CODE (op) == MULT
+	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
+	{
+	  temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
+	  return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
+	}
+
+      /* NEG commutes with ASHIFT since it is multiplication.  Only do
+	 this if we can then eliminate the NEG (e.g., if the operand
+	 is a constant).  */
+      if (GET_CODE (op) == ASHIFT)
+	{
+	  temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
+	  if (temp)
+	    return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
+	}
+
+      /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
+	 C is equal to the width of MODE minus 1.  */
+      if (GET_CODE (op) == ASHIFTRT
+	  && GET_CODE (XEXP (op, 1)) == CONST_INT
+	  && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
+	return simplify_gen_binary (LSHIFTRT, mode,
+				    XEXP (op, 0), XEXP (op, 1));
+
+      /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
+	 C is equal to the width of MODE minus 1.  */
+      if (GET_CODE (op) == LSHIFTRT
+	  && GET_CODE (XEXP (op, 1)) == CONST_INT
+	  && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
+	return simplify_gen_binary (ASHIFTRT, mode,
+				    XEXP (op, 0), XEXP (op, 1));
+      
+      break;
+
+    case SIGN_EXTEND:
+      /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
+	 becomes just the MINUS if its mode is MODE.  This allows
+	 folding switch statements on machines using casesi (such as
+	 the VAX).  */
+      if (GET_CODE (op) == TRUNCATE
+	  && GET_MODE (XEXP (op, 0)) == mode
+	  && GET_CODE (XEXP (op, 0)) == MINUS
+	  && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
+	  && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
+	return XEXP (op, 0);
+
+      /* Check for a sign extension of a subreg of a promoted
+	 variable, where the promotion is sign-extended, and the
+	 target mode is the same as the variable's promotion.  */
+      if (GET_CODE (op) == SUBREG
+	  && SUBREG_PROMOTED_VAR_P (op)
+	  && ! SUBREG_PROMOTED_UNSIGNED_P (op)
+	  && GET_MODE (XEXP (op, 0)) == mode)
+	return XEXP (op, 0);
+
+#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
+      if (! POINTERS_EXTEND_UNSIGNED
+	  && mode == Pmode && GET_MODE (op) == ptr_mode
+	  && (CONSTANT_P (op)
+	      || (GET_CODE (op) == SUBREG
+		  && REG_P (SUBREG_REG (op))
+		  && REG_POINTER (SUBREG_REG (op))
+		  && GET_MODE (SUBREG_REG (op)) == Pmode)))
+	return convert_memory_address (Pmode, op);
+#endif
+      break;
+
+    case ZERO_EXTEND:
+      /* Check for a zero extension of a subreg of a promoted
+	 variable, where the promotion is zero-extended, and the
+	 target mode is the same as the variable's promotion.  */
+      if (GET_CODE (op) == SUBREG
+	  && SUBREG_PROMOTED_VAR_P (op)
+	  && SUBREG_PROMOTED_UNSIGNED_P (op)
+	  && GET_MODE (XEXP (op, 0)) == mode)
+	return XEXP (op, 0);
+
+#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
+      if (POINTERS_EXTEND_UNSIGNED > 0
+	  && mode == Pmode && GET_MODE (op) == ptr_mode
+	  && (CONSTANT_P (op)
+	      || (GET_CODE (op) == SUBREG
+		  && REG_P (SUBREG_REG (op))
+		  && REG_POINTER (SUBREG_REG (op))
+		  && GET_MODE (SUBREG_REG (op)) == Pmode)))
+	return convert_memory_address (Pmode, op);
+#endif
+      break;
+
+    default:
+      break;
+    }
+  
+  return 0;
+}
+
 /* Try to simplify a unary operation CODE whose output mode is to be
    MODE with input operand OP whose mode was originally OP_MODE.
    Return zero if no simplification can be made.  */
@@ -774,6 +998,8 @@ simplify_unary_operation (enum rtx_code 
 	 by target backends), for consistency, this routine implements the
 	 same semantics for constant folding as used by the middle-end.  */
 
+      /* This was formerly used only for non-IEEE float.
+	 eggert@twinsun.com says it is safe for IEEE also.  */
       HOST_WIDE_INT xh, xl, th, tl;
       REAL_VALUE_TYPE x, t;
       REAL_VALUE_FROM_CONST_DOUBLE (x, trueop);
@@ -861,238 +1087,8 @@ simplify_unary_operation (enum rtx_code 
 	}
       return immed_double_const (xl, xh, mode);
     }
-
-  /* This was formerly used only for non-IEEE float.
-     eggert@twinsun.com says it is safe for IEEE also.  */
   else
-    {
-      enum rtx_code reversed;
-      rtx temp;
-
-      /* There are some simplifications we can do even if the operands
-	 aren't constant.  */
-      switch (code)
-	{
-	case NOT:
-	  /* (not (not X)) == X.  */
-	  if (GET_CODE (op) == NOT)
-	    return XEXP (op, 0);
-
-	  /* (not (eq X Y)) == (ne X Y), etc.  */
-	  if (COMPARISON_P (op)
-	      && (mode == BImode || STORE_FLAG_VALUE == -1)
-	      && ((reversed = reversed_comparison_code (op, NULL_RTX))
-		  != UNKNOWN))
-	    return simplify_gen_relational (reversed, mode, VOIDmode,
-					    XEXP (op, 0), XEXP (op, 1));
-
-          /* (not (plus X -1)) can become (neg X).  */
-          if (GET_CODE (op) == PLUS
-	      && XEXP (op, 1) == constm1_rtx)
-	    return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
-
-	  /* Similarly, (not (neg X)) is (plus X -1).  */
-	  if (GET_CODE (op) == NEG)
-	    return plus_constant (XEXP (op, 0), -1);
-
-	  /* (not (xor X C)) for C constant is (xor X D) with D = ~C.  */
-	  if (GET_CODE (op) == XOR
-	      && GET_CODE (XEXP (op, 1)) == CONST_INT
-	      && (temp = simplify_unary_operation (NOT, mode,
-						   XEXP (op, 1),
-						   mode)) != 0)
-	    return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
-
-	  /* (not (plus X C)) for signbit C is (xor X D) with D = ~C.  */
-	  if (GET_CODE (op) == PLUS
-	      && GET_CODE (XEXP (op, 1)) == CONST_INT
-	      && mode_signbit_p (mode, XEXP (op, 1))
-	      && (temp = simplify_unary_operation (NOT, mode,
-						   XEXP (op, 1),
-						   mode)) != 0)
-	    return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
-
-
-
-	  /* (not (ashift 1 X)) is (rotate ~1 X).  We used to do this for
-	     operands other than 1, but that is not valid.  We could do a
-	     similar simplification for (not (lshiftrt C X)) where C is
-	     just the sign bit, but this doesn't seem common enough to
-	     bother with.  */
-	  if (GET_CODE (op) == ASHIFT
-	      && XEXP (op, 0) == const1_rtx)
-	    {
-	      temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
-	      return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
-	    }
-
-	  /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
-	     by reversing the comparison code if valid.  */
-	  if (STORE_FLAG_VALUE == -1
-	      && COMPARISON_P (op)
-	      && (reversed = reversed_comparison_code (op, NULL_RTX))
-		 != UNKNOWN)
-	    return simplify_gen_relational (reversed, mode, VOIDmode,
-					    XEXP (op, 0), XEXP (op, 1));
-
-	  /* (not (ashiftrt foo C)) where C is the number of bits in FOO
-	     minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
-	     so we can perform the above simplification.  */
-
-	  if (STORE_FLAG_VALUE == -1
-	      && GET_CODE (op) == ASHIFTRT
-	      && GET_CODE (XEXP (op, 1)) == CONST_INT
-	      && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
-	    return simplify_gen_relational (GE, mode, VOIDmode,
-					    XEXP (op, 0), const0_rtx);
-
-	  break;
-
-	case NEG:
-	  /* (neg (neg X)) == X.  */
-	  if (GET_CODE (op) == NEG)
-	    return XEXP (op, 0);
-
-	  /* (neg (plus X 1)) can become (not X).  */
-	  if (GET_CODE (op) == PLUS
-	      && XEXP (op, 1) == const1_rtx)
-	    return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
-
-	  /* Similarly, (neg (not X)) is (plus X 1).  */
-	  if (GET_CODE (op) == NOT)
-	    return plus_constant (XEXP (op, 0), 1);
-
-	  /* (neg (minus X Y)) can become (minus Y X).  This transformation
-	     isn't safe for modes with signed zeros, since if X and Y are
-	     both +0, (minus Y X) is the same as (minus X Y).  If the
-	     rounding mode is towards +infinity (or -infinity) then the two
-	     expressions will be rounded differently.  */
-	  if (GET_CODE (op) == MINUS
-	      && !HONOR_SIGNED_ZEROS (mode)
-	      && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
-	    return simplify_gen_binary (MINUS, mode, XEXP (op, 1),
-					XEXP (op, 0));
-
-	  if (GET_CODE (op) == PLUS
-	      && !HONOR_SIGNED_ZEROS (mode)
-	      && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
-	    {
-	      /* (neg (plus A C)) is simplified to (minus -C A).  */
-	      if (GET_CODE (XEXP (op, 1)) == CONST_INT
-		  || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
-		{
-		  temp = simplify_unary_operation (NEG, mode, XEXP (op, 1),
-						   mode);
-		  if (temp)
-		    return simplify_gen_binary (MINUS, mode, temp,
-						XEXP (op, 0));
-		}
-
-	      /* (neg (plus A B)) is canonicalized to (minus (neg A) B).  */
-	      temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
-	      return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
-	    }
-
-	  /* (neg (mult A B)) becomes (mult (neg A) B).
-	     This works even for floating-point values.  */
-	  if (GET_CODE (op) == MULT
-	      && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
-	    {
-	      temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
-	      return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
-	    }
-
-	  /* NEG commutes with ASHIFT since it is multiplication.  Only do
-	     this if we can then eliminate the NEG (e.g., if the operand
-	     is a constant).  */
-	  if (GET_CODE (op) == ASHIFT)
-	    {
-	      temp = simplify_unary_operation (NEG, mode, XEXP (op, 0),
-					       mode);
-	      if (temp)
-		return simplify_gen_binary (ASHIFT, mode, temp,
-					    XEXP (op, 1));
-	    }
-
-	  /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
-	     C is equal to the width of MODE minus 1.  */
-	  if (GET_CODE (op) == ASHIFTRT
-	      && GET_CODE (XEXP (op, 1)) == CONST_INT
-	      && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
-		return simplify_gen_binary (LSHIFTRT, mode,
-					    XEXP (op, 0), XEXP (op, 1));
-
-	  /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
-	     C is equal to the width of MODE minus 1.  */
-	  if (GET_CODE (op) == LSHIFTRT
-	      && GET_CODE (XEXP (op, 1)) == CONST_INT
-	      && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
-		return simplify_gen_binary (ASHIFTRT, mode,
-					    XEXP (op, 0), XEXP (op, 1));
-
-	  break;
-
-	case SIGN_EXTEND:
-	  /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
-	     becomes just the MINUS if its mode is MODE.  This allows
-	     folding switch statements on machines using casesi (such as
-	     the VAX).  */
-	  if (GET_CODE (op) == TRUNCATE
-	      && GET_MODE (XEXP (op, 0)) == mode
-	      && GET_CODE (XEXP (op, 0)) == MINUS
-	      && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
-	      && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
-	    return XEXP (op, 0);
-
-	  /* Check for a sign extension of a subreg of a promoted
-	     variable, where the promotion is sign-extended, and the
-	     target mode is the same as the variable's promotion.  */
-	  if (GET_CODE (op) == SUBREG
-	      && SUBREG_PROMOTED_VAR_P (op)
-	      && ! SUBREG_PROMOTED_UNSIGNED_P (op)
-	      && GET_MODE (XEXP (op, 0)) == mode)
-	    return XEXP (op, 0);
-
-#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
-	  if (! POINTERS_EXTEND_UNSIGNED
-	      && mode == Pmode && GET_MODE (op) == ptr_mode
-	      && (CONSTANT_P (op)
-		  || (GET_CODE (op) == SUBREG
-		      && REG_P (SUBREG_REG (op))
-		      && REG_POINTER (SUBREG_REG (op))
-		      && GET_MODE (SUBREG_REG (op)) == Pmode)))
-	    return convert_memory_address (Pmode, op);
-#endif
-	  break;
-
-	case ZERO_EXTEND:
-	  /* Check for a zero extension of a subreg of a promoted
-	     variable, where the promotion is zero-extended, and the
-	     target mode is the same as the variable's promotion.  */
-	  if (GET_CODE (op) == SUBREG
-	      && SUBREG_PROMOTED_VAR_P (op)
-	      && SUBREG_PROMOTED_UNSIGNED_P (op)
-	      && GET_MODE (XEXP (op, 0)) == mode)
-	    return XEXP (op, 0);
-
-#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
-	  if (POINTERS_EXTEND_UNSIGNED > 0
-	      && mode == Pmode && GET_MODE (op) == ptr_mode
-	      && (CONSTANT_P (op)
-		  || (GET_CODE (op) == SUBREG
-		      && REG_P (SUBREG_REG (op))
-		      && REG_POINTER (SUBREG_REG (op))
-		      && GET_MODE (SUBREG_REG (op)) == Pmode)))
-	    return convert_memory_address (Pmode, op);
-#endif
-	  break;
-
-	default:
-	  break;
-	}
-
-      return 0;
-    }
+    return simplify_unary_operation_1 (code, mode, op);
 }
 
 /* Subroutine of simplify_binary_operation to simplify a commutative,
@@ -1153,6 +1149,810 @@ simplify_associative_operation (enum rtx
   return 0;
 }
 
+static rtx
+simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
+			     rtx op0, rtx op1, rtx trueop0, rtx trueop1)
+{
+  rtx tem;
+  HOST_WIDE_INT val;
+  unsigned int width = GET_MODE_BITSIZE (mode);
+
+  /* Even if we can't compute a constant result,
+     there are some cases worth simplifying.  */
+
+  switch (code)
+    {
+    case PLUS:
+      /* Maybe simplify x + 0 to x.  The two expressions are equivalent
+	 when x is NaN, infinite, or finite and nonzero.  They aren't
+	 when x is -0 and the rounding mode is not towards -infinity,
+	 since (-0) + 0 is then 0.  */
+      if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
+	return op0;
+
+      /* ((-a) + b) -> (b - a) and similarly for (a + (-b)).  These
+	 transformations are safe even for IEEE.  */
+      if (GET_CODE (op0) == NEG)
+	return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
+      else if (GET_CODE (op1) == NEG)
+	return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
+
+      /* (~a) + 1 -> -a */
+      if (INTEGRAL_MODE_P (mode)
+	  && GET_CODE (op0) == NOT
+	  && trueop1 == const1_rtx)
+	return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
+
+      /* Handle both-operands-constant cases.  We can only add
+	 CONST_INTs to constants since the sum of relocatable symbols
+	 can't be handled by most assemblers.  Don't add CONST_INT
+	 to CONST_INT since overflow won't be computed properly if wider
+	 than HOST_BITS_PER_WIDE_INT.  */
+
+      if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
+	  && GET_CODE (op1) == CONST_INT)
+	return plus_constant (op0, INTVAL (op1));
+      else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
+	       && GET_CODE (op0) == CONST_INT)
+	return plus_constant (op1, INTVAL (op0));
+
+      /* See if this is something like X * C - X or vice versa or
+	 if the multiplication is written as a shift.  If so, we can
+	 distribute and make a new multiply, shift, or maybe just
+	 have X (if C is 2 in the example above).  But don't make
+	 something more expensive than we had before.  */
+
+      if (! FLOAT_MODE_P (mode))
+	{
+	  HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
+	  rtx lhs = op0, rhs = op1;
+
+	  if (GET_CODE (lhs) == NEG)
+	    coeff0 = -1, lhs = XEXP (lhs, 0);
+	  else if (GET_CODE (lhs) == MULT
+		   && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
+	    coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
+	  else if (GET_CODE (lhs) == ASHIFT
+		   && GET_CODE (XEXP (lhs, 1)) == CONST_INT
+		   && INTVAL (XEXP (lhs, 1)) >= 0
+		   && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
+	    {
+	      coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
+	      lhs = XEXP (lhs, 0);
+	    }
+
+	  if (GET_CODE (rhs) == NEG)
+	    coeff1 = -1, rhs = XEXP (rhs, 0);
+	  else if (GET_CODE (rhs) == MULT
+		   && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
+	    {
+	      coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
+	    }
+	  else if (GET_CODE (rhs) == ASHIFT
+		   && GET_CODE (XEXP (rhs, 1)) == CONST_INT
+		   && INTVAL (XEXP (rhs, 1)) >= 0
+		   && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
+	    {
+	      coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
+	      rhs = XEXP (rhs, 0);
+	    }
+
+	  if (rtx_equal_p (lhs, rhs))
+	    {
+	      rtx orig = gen_rtx_PLUS (mode, op0, op1);
+	      tem = simplify_gen_binary (MULT, mode, lhs,
+					 GEN_INT (coeff0 + coeff1));
+	      return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
+		? tem : 0;
+	    }
+	}
+
+      /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit.  */
+      if ((GET_CODE (op1) == CONST_INT
+	   || GET_CODE (op1) == CONST_DOUBLE)
+	  && GET_CODE (op0) == XOR
+	  && (GET_CODE (XEXP (op0, 1)) == CONST_INT
+	      || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
+	  && mode_signbit_p (mode, op1))
+	return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
+				    simplify_gen_binary (XOR, mode, op1,
+							 XEXP (op0, 1)));
+
+      /* If one of the operands is a PLUS or a MINUS, see if we can
+	 simplify this by the associative law.
+	 Don't use the associative law for floating point.
+	 The inaccuracy makes it nonassociative,
+	 and subtle programs can break if operations are associated.  */
+
+      if (INTEGRAL_MODE_P (mode)
+	  && (plus_minus_operand_p (op0)
+	      || plus_minus_operand_p (op1))
+	  && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
+	return tem;
+
+      /* Reassociate floating point addition only when the user
+	 specifies unsafe math optimizations.  */
+      if (FLOAT_MODE_P (mode)
+	  && flag_unsafe_math_optimizations)
+	{
+	  tem = simplify_associative_operation (code, mode, op0, op1);
+	  if (tem)
+	    return tem;
+	}
+      break;
+
+    case COMPARE:
+#ifdef HAVE_cc0
+      /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
+	 using cc0, in which case we want to leave it as a COMPARE
+	 so we can distinguish it from a register-register-copy.
+
+	 In IEEE floating point, x-0 is not the same as x.  */
+
+      if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
+	   || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
+	  && trueop1 == CONST0_RTX (mode))
+	return op0;
+#endif
+
+      /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags).  */
+      if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
+	   || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
+	  && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
+	{
+	  rtx xop00 = XEXP (op0, 0);
+	  rtx xop10 = XEXP (op1, 0);
+
+#ifdef HAVE_cc0
+	  if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
+#else
+	    if (REG_P (xop00) && REG_P (xop10)
+		&& GET_MODE (xop00) == GET_MODE (xop10)
+		&& REGNO (xop00) == REGNO (xop10)
+		&& GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
+		&& GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
+#endif
+	      return xop00;
+	}
+      break;
+
+    case MINUS:
+      /* We can't assume x-x is 0 even with non-IEEE floating point,
+	 but since it is zero except in very strange circumstances, we
+	 will treat it as zero with -funsafe-math-optimizations.  */
+      if (rtx_equal_p (trueop0, trueop1)
+	  && ! side_effects_p (op0)
+	  && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
+	return CONST0_RTX (mode);
+
+      /* Change subtraction from zero into negation.  (0 - x) is the
+	 same as -x when x is NaN, infinite, or finite and nonzero.
+	 But if the mode has signed zeros, and does not round towards
+	 -infinity, then 0 - 0 is 0, not -0.  */
+      if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
+	return simplify_gen_unary (NEG, mode, op1, mode);
+
+      /* (-1 - a) is ~a.  */
+      if (trueop0 == constm1_rtx)
+	return simplify_gen_unary (NOT, mode, op1, mode);
+
+      /* Subtracting 0 has no effect unless the mode has signed zeros
+	 and supports rounding towards -infinity.  In such a case,
+	 0 - 0 is -0.  */
+      if (!(HONOR_SIGNED_ZEROS (mode)
+	    && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
+	  && trueop1 == CONST0_RTX (mode))
+	return op0;
+
+      /* See if this is something like X * C - X or vice versa or
+	 if the multiplication is written as a shift.  If so, we can
+	 distribute and make a new multiply, shift, or maybe just
+	 have X (if C is 2 in the example above).  But don't make
+	 something more expensive than we had before.  */
+
+      if (! FLOAT_MODE_P (mode))
+	{
+	  HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
+	  rtx lhs = op0, rhs = op1;
+
+	  if (GET_CODE (lhs) == NEG)
+	    coeff0 = -1, lhs = XEXP (lhs, 0);
+	  else if (GET_CODE (lhs) == MULT
+		   && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
+	    {
+	      coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
+	    }
+	  else if (GET_CODE (lhs) == ASHIFT
+		   && GET_CODE (XEXP (lhs, 1)) == CONST_INT
+		   && INTVAL (XEXP (lhs, 1)) >= 0
+		   && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
+	    {
+	      coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
+	      lhs = XEXP (lhs, 0);
+	    }
+
+	  if (GET_CODE (rhs) == NEG)
+	    coeff1 = - 1, rhs = XEXP (rhs, 0);
+	  else if (GET_CODE (rhs) == MULT
+		   && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
+	    {
+	      coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
+	    }
+	  else if (GET_CODE (rhs) == ASHIFT
+		   && GET_CODE (XEXP (rhs, 1)) == CONST_INT
+		   && INTVAL (XEXP (rhs, 1)) >= 0
+		   && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
+	    {
+	      coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
+	      rhs = XEXP (rhs, 0);
+	    }
+
+	  if (rtx_equal_p (lhs, rhs))
+	    {
+	      rtx orig = gen_rtx_MINUS (mode, op0, op1);
+	      tem = simplify_gen_binary (MULT, mode, lhs,
+					 GEN_INT (coeff0 - coeff1));
+	      return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
+		? tem : 0;
+	    }
+	}
+
+      /* (a - (-b)) -> (a + b).  True even for IEEE.  */
+      if (GET_CODE (op1) == NEG)
+	return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
+
+      /* (-x - c) may be simplified as (-c - x).  */
+      if (GET_CODE (op0) == NEG
+	  && (GET_CODE (op1) == CONST_INT
+	      || GET_CODE (op1) == CONST_DOUBLE))
+	{
+	  tem = simplify_unary_operation (NEG, mode, op1, mode);
+	  if (tem)
+	    return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
+	}
+
+      /* If one of the operands is a PLUS or a MINUS, see if we can
+	 simplify this by the associative law.
+	 Don't use the associative law for floating point.
+	 The inaccuracy makes it nonassociative,
+	 and subtle programs can break if operations are associated.  */
+
+      if (INTEGRAL_MODE_P (mode)
+	  && (plus_minus_operand_p (op0)
+	      || plus_minus_operand_p (op1))
+	  && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
+	return tem;
+
+      /* Don't let a relocatable value get a negative coeff.  */
+      if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
+	return simplify_gen_binary (PLUS, mode,
+				    op0,
+				    neg_const_int (mode, op1));
+
+      /* (x - (x & y)) -> (x & ~y) */
+      if (GET_CODE (op1) == AND)
+	{
+	  if (rtx_equal_p (op0, XEXP (op1, 0)))
+	    {
+	      tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
+					GET_MODE (XEXP (op1, 1)));
+	      return simplify_gen_binary (AND, mode, op0, tem);
+	    }
+	  if (rtx_equal_p (op0, XEXP (op1, 1)))
+	    {
+	      tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
+					GET_MODE (XEXP (op1, 0)));
+	      return simplify_gen_binary (AND, mode, op0, tem);
+	    }
+	}
+      break;
+
+    case MULT:
+      if (trueop1 == constm1_rtx)
+	return simplify_gen_unary (NEG, mode, op0, mode);
+
+      /* Maybe simplify x * 0 to 0.  The reduction is not valid if
+	 x is NaN, since x * 0 is then also NaN.  Nor is it valid
+	 when the mode has signed zeros, since multiplying a negative
+	 number by 0 will give -0, not 0.  */
+      if (!HONOR_NANS (mode)
+	  && !HONOR_SIGNED_ZEROS (mode)
+	  && trueop1 == CONST0_RTX (mode)
+	  && ! side_effects_p (op0))
+	return op1;
+
+      /* In IEEE floating point, x*1 is not equivalent to x for
+	 signalling NaNs.  */
+      if (!HONOR_SNANS (mode)
+	  && trueop1 == CONST1_RTX (mode))
+	return op0;
+
+      /* Convert multiply by constant power of two into shift unless
+	 we are still generating RTL.  This test is a kludge.  */
+      if (GET_CODE (trueop1) == CONST_INT
+	  && (val = exact_log2 (INTVAL (trueop1))) >= 0
+	  /* If the mode is larger than the host word size, and the
+	     uppermost bit is set, then this isn't a power of two due
+	     to implicit sign extension.  */
+	  && (width <= HOST_BITS_PER_WIDE_INT
+	      || val != HOST_BITS_PER_WIDE_INT - 1))
+	return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
+
+      /* x*2 is x+x and x*(-1) is -x */
+      if (GET_CODE (trueop1) == CONST_DOUBLE
+	  && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
+	  && GET_MODE (op0) == mode)
+	{
+	  REAL_VALUE_TYPE d;
+	  REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
+
+	  if (REAL_VALUES_EQUAL (d, dconst2))
+	    return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
+
+	  if (REAL_VALUES_EQUAL (d, dconstm1))
+	    return simplify_gen_unary (NEG, mode, op0, mode);
+	}
+
+      /* Reassociate multiplication, but for floating point MULTs
+	 only when the user specifies unsafe math optimizations.  */
+      if (! FLOAT_MODE_P (mode)
+	  || flag_unsafe_math_optimizations)
+	{
+	  tem = simplify_associative_operation (code, mode, op0, op1);
+	  if (tem)
+	    return tem;
+	}
+      break;
+
+    case IOR:
+      if (trueop1 == const0_rtx)
+	return op0;
+      if (GET_CODE (trueop1) == CONST_INT
+	  && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
+	      == GET_MODE_MASK (mode)))
+	return op1;
+      if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
+	return op0;
+      /* A | (~A) -> -1 */
+      if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
+	   || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
+	  && ! side_effects_p (op0)
+	  && GET_MODE_CLASS (mode) != MODE_CC)
+	return constm1_rtx;
+      tem = simplify_associative_operation (code, mode, op0, op1);
+      if (tem)
+	return tem;
+      break;
+
+    case XOR:
+      if (trueop1 == const0_rtx)
+	return op0;
+      if (GET_CODE (trueop1) == CONST_INT
+	  && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
+	      == GET_MODE_MASK (mode)))
+	return simplify_gen_unary (NOT, mode, op0, mode);
+      if (trueop0 == trueop1
+	  && ! side_effects_p (op0)
+	  && GET_MODE_CLASS (mode) != MODE_CC)
+	return const0_rtx;
+
+      /* Canonicalize XOR of the most significant bit to PLUS.  */
+      if ((GET_CODE (op1) == CONST_INT
+	   || GET_CODE (op1) == CONST_DOUBLE)
+	  && mode_signbit_p (mode, op1))
+	return simplify_gen_binary (PLUS, mode, op0, op1);
+      /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit.  */
+      if ((GET_CODE (op1) == CONST_INT
+	   || GET_CODE (op1) == CONST_DOUBLE)
+	  && GET_CODE (op0) == PLUS
+	  && (GET_CODE (XEXP (op0, 1)) == CONST_INT
+	      || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
+	  && mode_signbit_p (mode, XEXP (op0, 1)))
+	return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
+				    simplify_gen_binary (XOR, mode, op1,
+							 XEXP (op0, 1)));
+	      
+      tem = simplify_associative_operation (code, mode, op0, op1);
+      if (tem)
+	return tem;
+      break;
+
+    case AND:
+      if (trueop1 == const0_rtx && ! side_effects_p (op0))
+	return const0_rtx;
+      /* If we are turning off bits already known off in OP0, we need
+	 not do an AND.  */
+      if (GET_CODE (trueop1) == CONST_INT
+	  && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+	  && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
+	return op0;
+      if (trueop0 == trueop1 && ! side_effects_p (op0)
+	  && GET_MODE_CLASS (mode) != MODE_CC)
+	return op0;
+      /* A & (~A) -> 0 */
+      if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
+	   || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
+	  && ! side_effects_p (op0)
+	  && GET_MODE_CLASS (mode) != MODE_CC)
+	return const0_rtx;
+
+      /* Transform (and (extend X) C) into (zero_extend (and X C)) if
+	 there are no non-zero bits of C outside of X's mode.  */
+      if ((GET_CODE (op0) == SIGN_EXTEND
+	   || GET_CODE (op0) == ZERO_EXTEND)
+	  && GET_CODE (trueop1) == CONST_INT
+	  && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+	  && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
+	      & INTVAL (trueop1)) == 0)
+	{
+	  enum machine_mode imode = GET_MODE (XEXP (op0, 0));
+	  tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
+				     gen_int_mode (INTVAL (trueop1),
+						   imode));
+	  return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
+	}
+
+      /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
+	 ((A & N) + B) & M -> (A + B) & M
+	 Similarly if (N & M) == 0,
+	 ((A | N) + B) & M -> (A + B) & M
+	 and for - instead of + and/or ^ instead of |.  */
+      if (GET_CODE (trueop1) == CONST_INT
+	  && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+	  && ~INTVAL (trueop1)
+	  && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
+	  && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
+	{
+	  rtx pmop[2];
+	  int which;
+
+	  pmop[0] = XEXP (op0, 0);
+	  pmop[1] = XEXP (op0, 1);
+
+	  for (which = 0; which < 2; which++)
+	    {
+	      tem = pmop[which];
+	      switch (GET_CODE (tem))
+		{
+		case AND:
+		  if (GET_CODE (XEXP (tem, 1)) == CONST_INT
+		      && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
+		      == INTVAL (trueop1))
+		    pmop[which] = XEXP (tem, 0);
+		  break;
+		case IOR:
+		case XOR:
+		  if (GET_CODE (XEXP (tem, 1)) == CONST_INT
+		      && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
+		    pmop[which] = XEXP (tem, 0);
+		  break;
+		default:
+		  break;
+		}
+	    }
+
+	  if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
+	    {
+	      tem = simplify_gen_binary (GET_CODE (op0), mode,
+					 pmop[0], pmop[1]);
+	      return simplify_gen_binary (code, mode, tem, op1);
+	    }
+	}
+      tem = simplify_associative_operation (code, mode, op0, op1);
+      if (tem)
+	return tem;
+      break;
+
+    case UDIV:
+      /* 0/x is 0 (or x&0 if x has side-effects).  */
+      if (trueop0 == const0_rtx)
+	return side_effects_p (op1)
+	  ? simplify_gen_binary (AND, mode, op1, const0_rtx)
+	  : const0_rtx;
+	  /* x/1 is x.  */
+	  if (trueop1 == const1_rtx)
+	    {
+	      /* Handle narrowing UDIV.  */
+	      rtx x = gen_lowpart_common (mode, op0);
+	      if (x)
+		return x;
+	      if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
+		return gen_lowpart_SUBREG (mode, op0);
+	      return op0;
+	    }
+	  /* Convert divide by power of two into shift.  */
+	  if (GET_CODE (trueop1) == CONST_INT
+	      && (val = exact_log2 (INTVAL (trueop1))) > 0)
+	    return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
+	  break;
+
+    case DIV:
+      /* Handle floating point and integers separately.  */
+      if (GET_MODE_CLASS (mode) == MODE_FLOAT)
+	{
+	  /* Maybe change 0.0 / x to 0.0.  This transformation isn't
+	     safe for modes with NaNs, since 0.0 / 0.0 will then be
+	     NaN rather than 0.0.  Nor is it safe for modes with signed
+	     zeros, since dividing 0 by a negative number gives -0.0  */
+	  if (trueop0 == CONST0_RTX (mode)
+	      && !HONOR_NANS (mode)
+	      && !HONOR_SIGNED_ZEROS (mode)
+	      && ! side_effects_p (op1))
+	    return op0;
+	  /* x/1.0 is x.  */
+	  if (trueop1 == CONST1_RTX (mode)
+	      && !HONOR_SNANS (mode))
+	    return op0;
+
+	  if (GET_CODE (trueop1) == CONST_DOUBLE
+	      && trueop1 != CONST0_RTX (mode))
+	    {
+	      REAL_VALUE_TYPE d;
+	      REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
+
+	      /* x/-1.0 is -x.  */
+	      if (REAL_VALUES_EQUAL (d, dconstm1)
+		  && !HONOR_SNANS (mode))
+		return simplify_gen_unary (NEG, mode, op0, mode);
+
+	      /* Change FP division by a constant into multiplication.
+		 Only do this with -funsafe-math-optimizations.  */
+	      if (flag_unsafe_math_optimizations
+		  && !REAL_VALUES_EQUAL (d, dconst0))
+		{
+		  REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
+		  tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
+		  return simplify_gen_binary (MULT, mode, op0, tem);
+		}
+	    }
+	}
+      else
+	{
+	  /* 0/x is 0 (or x&0 if x has side-effects).  */
+	  if (trueop0 == const0_rtx)
+	    return side_effects_p (op1)
+	      ? simplify_gen_binary (AND, mode, op1, const0_rtx)
+	      : const0_rtx;
+	  /* x/1 is x.  */
+	  if (trueop1 == const1_rtx)
+	    {
+	      /* Handle narrowing DIV.  */
+	      rtx x = gen_lowpart_common (mode, op0);
+	      if (x)
+		return x;
+	      if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
+		return gen_lowpart_SUBREG (mode, op0);
+	      return op0;
+	    }
+	  /* x/-1 is -x.  */
+	  if (trueop1 == constm1_rtx)
+	    {
+	      rtx x = gen_lowpart_common (mode, op0);
+	      if (!x)
+		x = (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
+		  ? gen_lowpart_SUBREG (mode, op0) : op0;
+	      return simplify_gen_unary (NEG, mode, x, mode);
+	    }
+	}
+      break;
+
+    case UMOD:
+      /* 0%x is 0 (or x&0 if x has side-effects).  */
+      if (trueop0 == const0_rtx)
+	return side_effects_p (op1)
+	  ? simplify_gen_binary (AND, mode, op1, const0_rtx)
+	  : const0_rtx;
+	  /* x%1 is 0 (of x&0 if x has side-effects).  */
+	  if (trueop1 == const1_rtx)
+	    return side_effects_p (op0)
+	      ? simplify_gen_binary (AND, mode, op0, const0_rtx)
+	      : const0_rtx;
+	  /* Implement modulus by power of two as AND.  */
+	  if (GET_CODE (trueop1) == CONST_INT
+	      && exact_log2 (INTVAL (trueop1)) > 0)
+	    return simplify_gen_binary (AND, mode, op0,
+					GEN_INT (INTVAL (op1) - 1));
+	  break;
+
+    case MOD:
+      /* 0%x is 0 (or x&0 if x has side-effects).  */
+      if (trueop0 == const0_rtx)
+	return side_effects_p (op1)
+	  ? simplify_gen_binary (AND, mode, op1, const0_rtx)
+	  : const0_rtx;
+	  /* x%1 and x%-1 is 0 (or x&0 if x has side-effects).  */
+	  if (trueop1 == const1_rtx || trueop1 == constm1_rtx)
+	    return side_effects_p (op0)
+	      ? simplify_gen_binary (AND, mode, op0, const0_rtx)
+	      : const0_rtx;
+	  break;
+
+    case ROTATERT:
+    case ROTATE:
+    case ASHIFTRT:
+      /* Rotating ~0 always results in ~0.  */
+      if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
+	  && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
+	  && ! side_effects_p (op1))
+	return op0;
+
+      /* Fall through....  */
+
+    case ASHIFT:
+    case LSHIFTRT:
+      if (trueop1 == const0_rtx)
+	return op0;
+      if (trueop0 == const0_rtx && ! side_effects_p (op1))
+	return op0;
+      break;
+
+    case SMIN:
+      if (width <= HOST_BITS_PER_WIDE_INT
+	  && GET_CODE (trueop1) == CONST_INT
+	  && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
+	  && ! side_effects_p (op0))
+	return op1;
+      if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
+	return op0;
+      tem = simplify_associative_operation (code, mode, op0, op1);
+      if (tem)
+	return tem;
+      break;
+
+    case SMAX:
+      if (width <= HOST_BITS_PER_WIDE_INT
+	  && GET_CODE (trueop1) == CONST_INT
+	  && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
+	      == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
+	  && ! side_effects_p (op0))
+	return op1;
+      if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
+	return op0;
+      tem = simplify_associative_operation (code, mode, op0, op1);
+      if (tem)
+	return tem;
+      break;
+
+    case UMIN:
+      if (trueop1 == const0_rtx && ! side_effects_p (op0))
+	return op1;
+      if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
+	return op0;
+      tem = simplify_associative_operation (code, mode, op0, op1);
+      if (tem)
+	return tem;
+      break;
+
+    case UMAX:
+      if (trueop1 == constm1_rtx && ! side_effects_p (op0))
+	return op1;
+      if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
+	return op0;
+      tem = simplify_associative_operation (code, mode, op0, op1);
+      if (tem)
+	return tem;
+      break;
+
+    case SS_PLUS:
+    case US_PLUS:
+    case SS_MINUS:
+    case US_MINUS:
+      /* ??? There are simplifications that can be done.  */
+      return 0;
+
+    case VEC_SELECT:
+      if (!VECTOR_MODE_P (mode))
+	{
+	  gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
+	  gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
+	  gcc_assert (GET_CODE (trueop1) == PARALLEL);
+	  gcc_assert (XVECLEN (trueop1, 0) == 1);
+	  gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
+
+	  if (GET_CODE (trueop0) == CONST_VECTOR)
+	    return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
+						      (trueop1, 0, 0)));
+	}
+      else
+	{
+	  gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
+	  gcc_assert (GET_MODE_INNER (mode)
+		      == GET_MODE_INNER (GET_MODE (trueop0)));
+	  gcc_assert (GET_CODE (trueop1) == PARALLEL);
+
+	  if (GET_CODE (trueop0) == CONST_VECTOR)
+	    {
+	      int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
+	      unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
+	      rtvec v = rtvec_alloc (n_elts);
+	      unsigned int i;
+
+	      gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
+	      for (i = 0; i < n_elts; i++)
+		{
+		  rtx x = XVECEXP (trueop1, 0, i);
+
+		  gcc_assert (GET_CODE (x) == CONST_INT);
+		  RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
+						       INTVAL (x));
+		}
+
+	      return gen_rtx_CONST_VECTOR (mode, v);
+	    }
+	}
+      return 0;
+    case VEC_CONCAT:
+      {
+	enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
+				      ? GET_MODE (trueop0)
+				      : GET_MODE_INNER (mode));
+	enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
+				      ? GET_MODE (trueop1)
+				      : GET_MODE_INNER (mode));
+
+	gcc_assert (VECTOR_MODE_P (mode));
+	gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
+		    == GET_MODE_SIZE (mode));
+
+	if (VECTOR_MODE_P (op0_mode))
+	  gcc_assert (GET_MODE_INNER (mode)
+		      == GET_MODE_INNER (op0_mode));
+	else
+	  gcc_assert (GET_MODE_INNER (mode) == op0_mode);
+
+	if (VECTOR_MODE_P (op1_mode))
+	  gcc_assert (GET_MODE_INNER (mode)
+		      == GET_MODE_INNER (op1_mode));
+	else
+	  gcc_assert (GET_MODE_INNER (mode) == op1_mode);
+
+	if ((GET_CODE (trueop0) == CONST_VECTOR
+	     || GET_CODE (trueop0) == CONST_INT
+	     || GET_CODE (trueop0) == CONST_DOUBLE)
+	    && (GET_CODE (trueop1) == CONST_VECTOR
+		|| GET_CODE (trueop1) == CONST_INT
+		|| GET_CODE (trueop1) == CONST_DOUBLE))
+	  {
+	    int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
+	    unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
+	    rtvec v = rtvec_alloc (n_elts);
+	    unsigned int i;
+	    unsigned in_n_elts = 1;
+
+	    if (VECTOR_MODE_P (op0_mode))
+	      in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
+	    for (i = 0; i < n_elts; i++)
+	      {
+		if (i < in_n_elts)
+		  {
+		    if (!VECTOR_MODE_P (op0_mode))
+		      RTVEC_ELT (v, i) = trueop0;
+		    else
+		      RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
+		  }
+		else
+		  {
+		    if (!VECTOR_MODE_P (op1_mode))
+		      RTVEC_ELT (v, i) = trueop1;
+		    else
+		      RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
+							   i - in_n_elts);
+		  }
+	      }
+
+	    return gen_rtx_CONST_VECTOR (mode, v);
+	  }
+      }
+      return 0;
+
+    default:
+      gcc_unreachable ();
+    }
+
+  return 0;
+}
+
+
 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
    and OP1.  Return 0 if no simplification is possible.
 
@@ -1502,804 +2302,7 @@ simplify_binary_operation (enum rtx_code
 
   if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
       || width > HOST_BITS_PER_WIDE_INT || width == 0)
-    {
-      /* Even if we can't compute a constant result,
-	 there are some cases worth simplifying.  */
-
-      switch (code)
-	{
-	case PLUS:
-	  /* Maybe simplify x + 0 to x.  The two expressions are equivalent
-	     when x is NaN, infinite, or finite and nonzero.  They aren't
-	     when x is -0 and the rounding mode is not towards -infinity,
-	     since (-0) + 0 is then 0.  */
-	  if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
-	    return op0;
-
-	  /* ((-a) + b) -> (b - a) and similarly for (a + (-b)).  These
-	     transformations are safe even for IEEE.  */
-	  if (GET_CODE (op0) == NEG)
-	    return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
-	  else if (GET_CODE (op1) == NEG)
-	    return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
-
-	  /* (~a) + 1 -> -a */
-	  if (INTEGRAL_MODE_P (mode)
-	      && GET_CODE (op0) == NOT
-	      && trueop1 == const1_rtx)
-	    return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
-
-	  /* Handle both-operands-constant cases.  We can only add
-	     CONST_INTs to constants since the sum of relocatable symbols
-	     can't be handled by most assemblers.  Don't add CONST_INT
-	     to CONST_INT since overflow won't be computed properly if wider
-	     than HOST_BITS_PER_WIDE_INT.  */
-
-	  if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
-	      && GET_CODE (op1) == CONST_INT)
-	    return plus_constant (op0, INTVAL (op1));
-	  else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
-		   && GET_CODE (op0) == CONST_INT)
-	    return plus_constant (op1, INTVAL (op0));
-
-	  /* See if this is something like X * C - X or vice versa or
-	     if the multiplication is written as a shift.  If so, we can
-	     distribute and make a new multiply, shift, or maybe just
-	     have X (if C is 2 in the example above).  But don't make
-	     something more expensive than we had before.  */
-
-	  if (! FLOAT_MODE_P (mode))
-	    {
-	      HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
-	      rtx lhs = op0, rhs = op1;
-
-	      if (GET_CODE (lhs) == NEG)
-		coeff0 = -1, lhs = XEXP (lhs, 0);
-	      else if (GET_CODE (lhs) == MULT
-		       && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
-		{
-		  coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
-		}
-	      else if (GET_CODE (lhs) == ASHIFT
-		       && GET_CODE (XEXP (lhs, 1)) == CONST_INT
-		       && INTVAL (XEXP (lhs, 1)) >= 0
-		       && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
-		{
-		  coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
-		  lhs = XEXP (lhs, 0);
-		}
-
-	      if (GET_CODE (rhs) == NEG)
-		coeff1 = -1, rhs = XEXP (rhs, 0);
-	      else if (GET_CODE (rhs) == MULT
-		       && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
-		{
-		  coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
-		}
-	      else if (GET_CODE (rhs) == ASHIFT
-		       && GET_CODE (XEXP (rhs, 1)) == CONST_INT
-		       && INTVAL (XEXP (rhs, 1)) >= 0
-		       && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
-		{
-		  coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
-		  rhs = XEXP (rhs, 0);
-		}
-
-	      if (rtx_equal_p (lhs, rhs))
-		{
-		  rtx orig = gen_rtx_PLUS (mode, op0, op1);
-		  tem = simplify_gen_binary (MULT, mode, lhs,
-					     GEN_INT (coeff0 + coeff1));
-		  return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
-			 ? tem : 0;
-		}
-	    }
-
-	  /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit.  */
-	  if ((GET_CODE (op1) == CONST_INT
-	       || GET_CODE (op1) == CONST_DOUBLE)
-	      && GET_CODE (op0) == XOR
-	      && (GET_CODE (XEXP (op0, 1)) == CONST_INT
-		  || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
-	      && mode_signbit_p (mode, op1))
-	    return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
-					simplify_gen_binary (XOR, mode, op1,
-							     XEXP (op0, 1)));
-
-	  /* If one of the operands is a PLUS or a MINUS, see if we can
-	     simplify this by the associative law.
-	     Don't use the associative law for floating point.
-	     The inaccuracy makes it nonassociative,
-	     and subtle programs can break if operations are associated.  */
-
-	  if (INTEGRAL_MODE_P (mode)
-	      && (plus_minus_operand_p (op0)
-		  || plus_minus_operand_p (op1))
-	      && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
-	    return tem;
-
-	  /* Reassociate floating point addition only when the user
-	     specifies unsafe math optimizations.  */
-	  if (FLOAT_MODE_P (mode)
-	      && flag_unsafe_math_optimizations)
-	    {
-	      tem = simplify_associative_operation (code, mode, op0, op1);
-	      if (tem)
-		return tem;
-	    }
-	  break;
-
-	case COMPARE:
-#ifdef HAVE_cc0
-	  /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
-	     using cc0, in which case we want to leave it as a COMPARE
-	     so we can distinguish it from a register-register-copy.
-
-	     In IEEE floating point, x-0 is not the same as x.  */
-
-	  if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
-	       || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
-	      && trueop1 == CONST0_RTX (mode))
-	    return op0;
-#endif
-
-	  /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags).  */
-	  if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
-	       || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
-	      && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
-	    {
-	      rtx xop00 = XEXP (op0, 0);
-	      rtx xop10 = XEXP (op1, 0);
-
-#ifdef HAVE_cc0
-	      if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
-#else
-	      if (REG_P (xop00) && REG_P (xop10)
-		  && GET_MODE (xop00) == GET_MODE (xop10)
-		  && REGNO (xop00) == REGNO (xop10)
-		  && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
-		  && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
-#endif
-		return xop00;
-	    }
-	  break;
-
-	case MINUS:
-	  /* We can't assume x-x is 0 even with non-IEEE floating point,
-	     but since it is zero except in very strange circumstances, we
-	     will treat it as zero with -funsafe-math-optimizations.  */
-	  if (rtx_equal_p (trueop0, trueop1)
-	      && ! side_effects_p (op0)
-	      && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
-	    return CONST0_RTX (mode);
-
-	  /* Change subtraction from zero into negation.  (0 - x) is the
-	     same as -x when x is NaN, infinite, or finite and nonzero.
-	     But if the mode has signed zeros, and does not round towards
-	     -infinity, then 0 - 0 is 0, not -0.  */
-	  if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
-	    return simplify_gen_unary (NEG, mode, op1, mode);
-
-	  /* (-1 - a) is ~a.  */
-	  if (trueop0 == constm1_rtx)
-	    return simplify_gen_unary (NOT, mode, op1, mode);
-
-	  /* Subtracting 0 has no effect unless the mode has signed zeros
-	     and supports rounding towards -infinity.  In such a case,
-	     0 - 0 is -0.  */
-	  if (!(HONOR_SIGNED_ZEROS (mode)
-		&& HONOR_SIGN_DEPENDENT_ROUNDING (mode))
-	      && trueop1 == CONST0_RTX (mode))
-	    return op0;
-
-	  /* See if this is something like X * C - X or vice versa or
-	     if the multiplication is written as a shift.  If so, we can
-	     distribute and make a new multiply, shift, or maybe just
-	     have X (if C is 2 in the example above).  But don't make
-	     something more expensive than we had before.  */
-
-	  if (! FLOAT_MODE_P (mode))
-	    {
-	      HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
-	      rtx lhs = op0, rhs = op1;
-
-	      if (GET_CODE (lhs) == NEG)
-		coeff0 = -1, lhs = XEXP (lhs, 0);
-	      else if (GET_CODE (lhs) == MULT
-		       && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
-		{
-		  coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
-		}
-	      else if (GET_CODE (lhs) == ASHIFT
-		       && GET_CODE (XEXP (lhs, 1)) == CONST_INT
-		       && INTVAL (XEXP (lhs, 1)) >= 0
-		       && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
-		{
-		  coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
-		  lhs = XEXP (lhs, 0);
-		}
-
-	      if (GET_CODE (rhs) == NEG)
-		coeff1 = - 1, rhs = XEXP (rhs, 0);
-	      else if (GET_CODE (rhs) == MULT
-		       && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
-		{
-		  coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
-		}
-	      else if (GET_CODE (rhs) == ASHIFT
-		       && GET_CODE (XEXP (rhs, 1)) == CONST_INT
-		       && INTVAL (XEXP (rhs, 1)) >= 0
-		       && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
-		{
-		  coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
-		  rhs = XEXP (rhs, 0);
-		}
-
-	      if (rtx_equal_p (lhs, rhs))
-		{
-		  rtx orig = gen_rtx_MINUS (mode, op0, op1);
-		  tem = simplify_gen_binary (MULT, mode, lhs,
-					     GEN_INT (coeff0 - coeff1));
-		  return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
-			 ? tem : 0;
-		}
-	    }
-
-	  /* (a - (-b)) -> (a + b).  True even for IEEE.  */
-	  if (GET_CODE (op1) == NEG)
-	    return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
-
-	  /* (-x - c) may be simplified as (-c - x).  */
-	  if (GET_CODE (op0) == NEG
-	      && (GET_CODE (op1) == CONST_INT
-		  || GET_CODE (op1) == CONST_DOUBLE))
-	    {
-	      tem = simplify_unary_operation (NEG, mode, op1, mode);
-	      if (tem)
-		return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
-	    }
-
-	  /* If one of the operands is a PLUS or a MINUS, see if we can
-	     simplify this by the associative law.
-	     Don't use the associative law for floating point.
-	     The inaccuracy makes it nonassociative,
-	     and subtle programs can break if operations are associated.  */
-
-	  if (INTEGRAL_MODE_P (mode)
-	      && (plus_minus_operand_p (op0)
-		  || plus_minus_operand_p (op1))
-	      && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
-	    return tem;
-
-	  /* Don't let a relocatable value get a negative coeff.  */
-	  if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
-	    return simplify_gen_binary (PLUS, mode,
-					op0,
-					neg_const_int (mode, op1));
-
-	  /* (x - (x & y)) -> (x & ~y) */
-	  if (GET_CODE (op1) == AND)
-	    {
-	      if (rtx_equal_p (op0, XEXP (op1, 0)))
-		{
-		  tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
-					    GET_MODE (XEXP (op1, 1)));
-		  return simplify_gen_binary (AND, mode, op0, tem);
-		}
-	      if (rtx_equal_p (op0, XEXP (op1, 1)))
-		{
-		  tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
-					    GET_MODE (XEXP (op1, 0)));
-		  return simplify_gen_binary (AND, mode, op0, tem);
-		}
-	    }
-	  break;
-
-	case MULT:
-	  if (trueop1 == constm1_rtx)
-	    return simplify_gen_unary (NEG, mode, op0, mode);
-
-	  /* Maybe simplify x * 0 to 0.  The reduction is not valid if
-	     x is NaN, since x * 0 is then also NaN.  Nor is it valid
-	     when the mode has signed zeros, since multiplying a negative
-	     number by 0 will give -0, not 0.  */
-	  if (!HONOR_NANS (mode)
-	      && !HONOR_SIGNED_ZEROS (mode)
-	      && trueop1 == CONST0_RTX (mode)
-	      && ! side_effects_p (op0))
-	    return op1;
-
-	  /* In IEEE floating point, x*1 is not equivalent to x for
-	     signalling NaNs.  */
-	  if (!HONOR_SNANS (mode)
-	      && trueop1 == CONST1_RTX (mode))
-	    return op0;
-
-	  /* Convert multiply by constant power of two into shift unless
-	     we are still generating RTL.  This test is a kludge.  */
-	  if (GET_CODE (trueop1) == CONST_INT
-	      && (val = exact_log2 (INTVAL (trueop1))) >= 0
-	      /* If the mode is larger than the host word size, and the
-		 uppermost bit is set, then this isn't a power of two due
-		 to implicit sign extension.  */
-	      && (width <= HOST_BITS_PER_WIDE_INT
-		  || val != HOST_BITS_PER_WIDE_INT - 1))
-	    return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
-
-	  /* x*2 is x+x and x*(-1) is -x */
-	  if (GET_CODE (trueop1) == CONST_DOUBLE
-	      && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
-	      && GET_MODE (op0) == mode)
-	    {
-	      REAL_VALUE_TYPE d;
-	      REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
-
-	      if (REAL_VALUES_EQUAL (d, dconst2))
-		return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
-
-	      if (REAL_VALUES_EQUAL (d, dconstm1))
-		return simplify_gen_unary (NEG, mode, op0, mode);
-	    }
-
-	  /* Reassociate multiplication, but for floating point MULTs
-	     only when the user specifies unsafe math optimizations.  */
-	  if (! FLOAT_MODE_P (mode)
-	      || flag_unsafe_math_optimizations)
-	    {
-	      tem = simplify_associative_operation (code, mode, op0, op1);
-	      if (tem)
-		return tem;
-	    }
-	  break;
-
-	case IOR:
-	  if (trueop1 == const0_rtx)
-	    return op0;
-	  if (GET_CODE (trueop1) == CONST_INT
-	      && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
-	          == GET_MODE_MASK (mode)))
-	    return op1;
-	  if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
-	    return op0;
-	  /* A | (~A) -> -1 */
-	  if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
-	       || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
-	      && ! side_effects_p (op0)
-	      && GET_MODE_CLASS (mode) != MODE_CC)
-	    return constm1_rtx;
-	  tem = simplify_associative_operation (code, mode, op0, op1);
-	  if (tem)
-	    return tem;
-	  break;
-
-	case XOR:
-	  if (trueop1 == const0_rtx)
-	    return op0;
-	  if (GET_CODE (trueop1) == CONST_INT
-	      && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
-		  == GET_MODE_MASK (mode)))
-	    return simplify_gen_unary (NOT, mode, op0, mode);
-	  if (trueop0 == trueop1
-	      && ! side_effects_p (op0)
-	      && GET_MODE_CLASS (mode) != MODE_CC)
-	    return const0_rtx;
-
-	  /* Canonicalize XOR of the most significant bit to PLUS.  */
-	  if ((GET_CODE (op1) == CONST_INT
-	       || GET_CODE (op1) == CONST_DOUBLE)
-	      && mode_signbit_p (mode, op1))
-	    return simplify_gen_binary (PLUS, mode, op0, op1);
-	  /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit.  */
-	  if ((GET_CODE (op1) == CONST_INT
-	       || GET_CODE (op1) == CONST_DOUBLE)
-	      && GET_CODE (op0) == PLUS
-	      && (GET_CODE (XEXP (op0, 1)) == CONST_INT
-		  || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
-	      && mode_signbit_p (mode, XEXP (op0, 1)))
-	    return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
-					simplify_gen_binary (XOR, mode, op1,
-							     XEXP (op0, 1)));
-	      
-	  tem = simplify_associative_operation (code, mode, op0, op1);
-	  if (tem)
-	    return tem;
-	  break;
-
-	case AND:
-	  if (trueop1 == const0_rtx && ! side_effects_p (op0))
-	    return const0_rtx;
-	  /* If we are turning off bits already known off in OP0, we need
-	     not do an AND.  */
-	  if (GET_CODE (trueop1) == CONST_INT
-	      && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
-	      && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
-	    return op0;
-	  if (trueop0 == trueop1 && ! side_effects_p (op0)
-	      && GET_MODE_CLASS (mode) != MODE_CC)
-	    return op0;
-	  /* A & (~A) -> 0 */
-	  if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
-	       || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
-	      && ! side_effects_p (op0)
-	      && GET_MODE_CLASS (mode) != MODE_CC)
-	    return const0_rtx;
-
-	  /* Transform (and (extend X) C) into (zero_extend (and X C)) if
-	     there are no non-zero bits of C outside of X's mode.  */
-	  if ((GET_CODE (op0) == SIGN_EXTEND
-	       || GET_CODE (op0) == ZERO_EXTEND)
-	      && GET_CODE (trueop1) == CONST_INT
-	      && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
-	      && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
-		  & INTVAL (trueop1)) == 0)
-	    {
-	      enum machine_mode imode = GET_MODE (XEXP (op0, 0));
-	      tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
-					 gen_int_mode (INTVAL (trueop1),
-						       imode));
-	      return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
-	    }
-
-	  /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
-	     ((A & N) + B) & M -> (A + B) & M
-	     Similarly if (N & M) == 0,
-	     ((A | N) + B) & M -> (A + B) & M
-	     and for - instead of + and/or ^ instead of |.  */
-	  if (GET_CODE (trueop1) == CONST_INT
-	      && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
-	      && ~INTVAL (trueop1)
-	      && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
-	      && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
-	    {
-	      rtx pmop[2];
-	      int which;
-
-	      pmop[0] = XEXP (op0, 0);
-	      pmop[1] = XEXP (op0, 1);
-
-	      for (which = 0; which < 2; which++)
-		{
-		  tem = pmop[which];
-		  switch (GET_CODE (tem))
-		    {
-		    case AND:
-		      if (GET_CODE (XEXP (tem, 1)) == CONST_INT
-			  && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
-			     == INTVAL (trueop1))
-			pmop[which] = XEXP (tem, 0);
-		      break;
-		    case IOR:
-		    case XOR:
-		      if (GET_CODE (XEXP (tem, 1)) == CONST_INT
-			  && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
-			pmop[which] = XEXP (tem, 0);
-		      break;
-		    default:
-		      break;
-		    }
-		}
-
-	      if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
-		{
-		  tem = simplify_gen_binary (GET_CODE (op0), mode,
-					     pmop[0], pmop[1]);
-		  return simplify_gen_binary (code, mode, tem, op1);
-		}
-	    }
-	  tem = simplify_associative_operation (code, mode, op0, op1);
-	  if (tem)
-	    return tem;
-	  break;
-
-	case UDIV:
-	  /* 0/x is 0 (or x&0 if x has side-effects).  */
-	  if (trueop0 == const0_rtx)
-	    return side_effects_p (op1)
-		   ? simplify_gen_binary (AND, mode, op1, const0_rtx)
-		   : const0_rtx;
-	  /* x/1 is x.  */
-	  if (trueop1 == const1_rtx)
-	    {
-	      /* Handle narrowing UDIV.  */
-	      rtx x = gen_lowpart_common (mode, op0);
-	      if (x)
-		return x;
-	      if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
-		return gen_lowpart_SUBREG (mode, op0);
-	      return op0;
-	    }
-	  /* Convert divide by power of two into shift.  */
-	  if (GET_CODE (trueop1) == CONST_INT
-	      && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
-	    return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1));
-	  break;
-
-	case DIV:
-	  /* Handle floating point and integers separately.  */
-	  if (GET_MODE_CLASS (mode) == MODE_FLOAT)
-	    {
-	      /* Maybe change 0.0 / x to 0.0.  This transformation isn't
-		 safe for modes with NaNs, since 0.0 / 0.0 will then be
-		 NaN rather than 0.0.  Nor is it safe for modes with signed
-		 zeros, since dividing 0 by a negative number gives -0.0  */
-	      if (trueop0 == CONST0_RTX (mode)
-		  && !HONOR_NANS (mode)
-		  && !HONOR_SIGNED_ZEROS (mode)
-		  && ! side_effects_p (op1))
-		return op0;
-	      /* x/1.0 is x.  */
-	      if (trueop1 == CONST1_RTX (mode)
-		  && !HONOR_SNANS (mode))
-		return op0;
-
-	      if (GET_CODE (trueop1) == CONST_DOUBLE
-		  && trueop1 != CONST0_RTX (mode))
-		{
-		  REAL_VALUE_TYPE d;
-		  REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
-
-		  /* x/-1.0 is -x.  */
-		  if (REAL_VALUES_EQUAL (d, dconstm1)
-		      && !HONOR_SNANS (mode))
-		    return simplify_gen_unary (NEG, mode, op0, mode);
-
-	          /* Change FP division by a constant into multiplication.
-		     Only do this with -funsafe-math-optimizations.  */
-		  if (flag_unsafe_math_optimizations
-		      && !REAL_VALUES_EQUAL (d, dconst0))
-		    {
-		      REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
-		      tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
-		      return simplify_gen_binary (MULT, mode, op0, tem);
-		    }
-		}
-	    }
-	  else
-	    {
-	      /* 0/x is 0 (or x&0 if x has side-effects).  */
-	      if (trueop0 == const0_rtx)
-		return side_effects_p (op1)
-		       ? simplify_gen_binary (AND, mode, op1, const0_rtx)
-		       : const0_rtx;
-	      /* x/1 is x.  */
-	      if (trueop1 == const1_rtx)
-		{
-		  /* Handle narrowing DIV.  */
-		  rtx x = gen_lowpart_common (mode, op0);
-		  if (x)
-		    return x;
-		  if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
-		    return gen_lowpart_SUBREG (mode, op0);
-		  return op0;
-		}
-	      /* x/-1 is -x.  */
-	      if (trueop1 == constm1_rtx)
-		{
-		  rtx x = gen_lowpart_common (mode, op0);
-		  if (!x)
-		    x = (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
-			? gen_lowpart_SUBREG (mode, op0) : op0;
-		  return simplify_gen_unary (NEG, mode, x, mode);
-		}
-	    }
-	  break;
-
-	case UMOD:
-	  /* 0%x is 0 (or x&0 if x has side-effects).  */
-	  if (trueop0 == const0_rtx)
-	    return side_effects_p (op1)
-		   ? simplify_gen_binary (AND, mode, op1, const0_rtx)
-		   : const0_rtx;
-	  /* x%1 is 0 (of x&0 if x has side-effects).  */
-	  if (trueop1 == const1_rtx)
-	    return side_effects_p (op0)
-		   ? simplify_gen_binary (AND, mode, op0, const0_rtx)
-		   : const0_rtx;
-	  /* Implement modulus by power of two as AND.  */
-	  if (GET_CODE (trueop1) == CONST_INT
-	      && exact_log2 (INTVAL (trueop1)) > 0)
-	    return simplify_gen_binary (AND, mode, op0,
-					GEN_INT (INTVAL (op1) - 1));
-	  break;
-
-	case MOD:
-	  /* 0%x is 0 (or x&0 if x has side-effects).  */
-	  if (trueop0 == const0_rtx)
-	    return side_effects_p (op1)
-		   ? simplify_gen_binary (AND, mode, op1, const0_rtx)
-		   : const0_rtx;
-	  /* x%1 and x%-1 is 0 (or x&0 if x has side-effects).  */
-	  if (trueop1 == const1_rtx || trueop1 == constm1_rtx)
-	    return side_effects_p (op0)
-		   ? simplify_gen_binary (AND, mode, op0, const0_rtx)
-		   : const0_rtx;
-	  break;
-
-	case ROTATERT:
-	case ROTATE:
-	case ASHIFTRT:
-	  /* Rotating ~0 always results in ~0.  */
-	  if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
-	      && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
-	      && ! side_effects_p (op1))
-	    return op0;
-
-	  /* Fall through....  */
-
-	case ASHIFT:
-	case LSHIFTRT:
-	  if (trueop1 == const0_rtx)
-	    return op0;
-	  if (trueop0 == const0_rtx && ! side_effects_p (op1))
-	    return op0;
-	  break;
-
-	case SMIN:
-	  if (width <= HOST_BITS_PER_WIDE_INT
-	      && GET_CODE (trueop1) == CONST_INT
-	      && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
-	      && ! side_effects_p (op0))
-	    return op1;
-	  if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
-	    return op0;
-	  tem = simplify_associative_operation (code, mode, op0, op1);
-	  if (tem)
-	    return tem;
-	  break;
-
-	case SMAX:
-	  if (width <= HOST_BITS_PER_WIDE_INT
-	      && GET_CODE (trueop1) == CONST_INT
-	      && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
-		  == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
-	      && ! side_effects_p (op0))
-	    return op1;
-	  if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
-	    return op0;
-	  tem = simplify_associative_operation (code, mode, op0, op1);
-	  if (tem)
-	    return tem;
-	  break;
-
-	case UMIN:
-	  if (trueop1 == const0_rtx && ! side_effects_p (op0))
-	    return op1;
-	  if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
-	    return op0;
-	  tem = simplify_associative_operation (code, mode, op0, op1);
-	  if (tem)
-	    return tem;
-	  break;
-
-	case UMAX:
-	  if (trueop1 == constm1_rtx && ! side_effects_p (op0))
-	    return op1;
-	  if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
-	    return op0;
-	  tem = simplify_associative_operation (code, mode, op0, op1);
-	  if (tem)
-	    return tem;
-	  break;
-
-	case SS_PLUS:
-	case US_PLUS:
-	case SS_MINUS:
-	case US_MINUS:
-	  /* ??? There are simplifications that can be done.  */
-	  return 0;
-
-	case VEC_SELECT:
-	  if (!VECTOR_MODE_P (mode))
-	    {
-	      gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
-	      gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
-	      gcc_assert (GET_CODE (trueop1) == PARALLEL);
-	      gcc_assert (XVECLEN (trueop1, 0) == 1);
-	      gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
-
-	      if (GET_CODE (trueop0) == CONST_VECTOR)
-		return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
-							  (trueop1, 0, 0)));
-	    }
-	  else
-	    {
-	      gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
-	      gcc_assert (GET_MODE_INNER (mode)
-			  == GET_MODE_INNER (GET_MODE (trueop0)));
-	      gcc_assert (GET_CODE (trueop1) == PARALLEL);
-
-	      if (GET_CODE (trueop0) == CONST_VECTOR)
-		{
-		  int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
-		  unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
-		  rtvec v = rtvec_alloc (n_elts);
-		  unsigned int i;
-
-		  gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
-		  for (i = 0; i < n_elts; i++)
-		    {
-		      rtx x = XVECEXP (trueop1, 0, i);
-
-		      gcc_assert (GET_CODE (x) == CONST_INT);
-		      RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
-							   INTVAL (x));
-		    }
-
-		  return gen_rtx_CONST_VECTOR (mode, v);
-		}
-	    }
-	  return 0;
-	case VEC_CONCAT:
-	  {
-	    enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
-					  ? GET_MODE (trueop0)
-					  : GET_MODE_INNER (mode));
-	    enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
-					  ? GET_MODE (trueop1)
-					  : GET_MODE_INNER (mode));
-
-	    gcc_assert (VECTOR_MODE_P (mode));
-	    gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
-			== GET_MODE_SIZE (mode));
-
-	    if (VECTOR_MODE_P (op0_mode))
-	      gcc_assert (GET_MODE_INNER (mode)
-			  == GET_MODE_INNER (op0_mode));
-	    else
-	      gcc_assert (GET_MODE_INNER (mode) == op0_mode);
-
-	    if (VECTOR_MODE_P (op1_mode))
-	      gcc_assert (GET_MODE_INNER (mode)
-			  == GET_MODE_INNER (op1_mode));
-	    else
-	      gcc_assert (GET_MODE_INNER (mode) == op1_mode);
-
-	    if ((GET_CODE (trueop0) == CONST_VECTOR
-		 || GET_CODE (trueop0) == CONST_INT
-		 || GET_CODE (trueop0) == CONST_DOUBLE)
-		&& (GET_CODE (trueop1) == CONST_VECTOR
-		    || GET_CODE (trueop1) == CONST_INT
-		    || GET_CODE (trueop1) == CONST_DOUBLE))
-	      {
-		int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
-		unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
-		rtvec v = rtvec_alloc (n_elts);
-		unsigned int i;
-		unsigned in_n_elts = 1;
-
-		if (VECTOR_MODE_P (op0_mode))
-		  in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
-		for (i = 0; i < n_elts; i++)
-		  {
-		    if (i < in_n_elts)
-		      {
-			if (!VECTOR_MODE_P (op0_mode))
-			  RTVEC_ELT (v, i) = trueop0;
-			else
-			  RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
-		      }
-		    else
-		      {
-			if (!VECTOR_MODE_P (op1_mode))
-			  RTVEC_ELT (v, i) = trueop1;
-			else
-			  RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
-							       i - in_n_elts);
-		      }
-		  }
-
-		return gen_rtx_CONST_VECTOR (mode, v);
-	      }
-	  }
-	  return 0;
-
-	default:
-	  gcc_unreachable ();
-	}
-
-      return 0;
-    }
-
+    return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
   /* Get the integer argument values in two forms:
      zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S.  */
 

Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]