+2004-05-28 Paolo Bonzini <bonzini@gnu.org>
+ Roger Sayle <roger@eyesopen.com>
+
+ PR rtl-optimization/15649
+ Add LTGT_EXPR and improve pretty-printing of unordered
+ comparisons.
+ * c-common.c (c_common_truthvalue_conversion):
+ Handle LTGT_EXPR.
+ * c-typeck.c (build_binary_op): Likewise.
+ * dojump.c (do_jump): Likewise.
+ * expr.c (expand_expr_real_1, do_store_flag): Likewise.
+ * predict.c (tree_predict_by_opcode): Likewise.
+ * real.c (real_compare): Likewise.
+ * tree-cfg.c (verify_expr): Likewise.
+ * tree-inline.c (estimate_num_insns_1): Likewise.
+ * tree-pretty-print.c (dump_generic_node): Likewise.
+ Handle ORDERED_EXPR, UNORDERED_EXPR.
+ (op_symbol): Print unordered comparisons differently
+ than ordered ones.
+ * tree.def (LTGT_EXPR): New '<' tree code.
+ * doc/c-tree.texi (Expressions): Document floating-point
+ comparison nodes.
+
+ Fold comparisons between floating point values.
+ * fold-const.c (enum comparison_code): New, from
+ #define'd constants. Define compcodes for unordered
+ comparisons and for invalid transformations.
+ (invert_tree_comparison): Add "honor_nans" parameter.
+ (fold_truthop): Revamp to work on floating-point types too.
+ (comparison_to_compcode): Support unordered comparisons.
+ Use new enum comparison_code.
+ (compcode_to_comparison): Likewise.
+ (combine_compcodes): New function.
+ (invert_truthvalue): Let invert_tree_comparison decide
+ whether it is valid to fold the comparison. Fold ORDERED
+ and UNORDERED even if flag_unsafe_math_optimizations is off,
+ and the remaining even if flag_unsafe_math_optimizations
+ is off but we are under -fno-trapping-math.
+ (fold_relational_const): Integer modes do not honor NaNs.
+
2004-05-28 Paul Brook <paul@codesourcery.com>
* config/arm/arm.c (arm_output_epilogue): Remove redundant code.
switch (TREE_CODE (expr))
{
- case EQ_EXPR: case NE_EXPR: case UNEQ_EXPR:
+ case EQ_EXPR: case NE_EXPR: case UNEQ_EXPR: case LTGT_EXPR:
case LE_EXPR: case GE_EXPR: case LT_EXPR: case GT_EXPR:
case UNLE_EXPR: case UNGE_EXPR: case UNLT_EXPR: case UNGT_EXPR:
case ORDERED_EXPR: case UNORDERED_EXPR:
case UNGT_EXPR:
case UNGE_EXPR:
case UNEQ_EXPR:
+ case LTGT_EXPR:
build_type = integer_type_node;
if (code0 != REAL_TYPE || code1 != REAL_TYPE)
{
@tindex GE_EXPR
@tindex EQ_EXPR
@tindex NE_EXPR
+@tindex UNLT_EXPR
+@tindex UNLE_EXPR
+@tindex UNGT_EXPR
+@tindex UNGE_EXPR
+@tindex UNEQ_EXPR
+@tindex LTGT_EXPR
@tindex INIT_EXPR
@tindex MODIFY_EXPR
@tindex COMPONENT_REF
type or both of floating type. The result type of these expressions
will always be of integral or boolean type.
+Floating-point comparison may have a fourth possible outcome for a
+comparison, other than less, greater or equal: this is @dfn{unordered},
+and two floating-point values are unordered if one of them is
+a @dfn{not-a-number} (@dfn{NaN}) value. In this case, all of these
+nodes will be false but @code{NE_EXPR}, and the first four of these
+nodes will also raise an invalid operation trap.
+
+@item ORDERED_EXPR
+@itemx UNORDERED_EXPR
+@item UNLT_EXPR
+@itemx UNLE_EXPR
+@itemx UNGT_EXPR
+@itemx UNGE_EXPR
+@itemx UNEQ_EXPR
+@itemx LTGT_EXPR
+
+These nodes represent other relational operations that are only used
+with floating types.
+
+If the outcome of the comparison is unordered, all of these special
+comparisons will be true but @code{ORDERED_EXPR} and @code{LTGT_EXPR}.
+Only @code{LTGT_EXPR} is expected to generate an invalid floating-point
+operation trap when the outcome is unordered.
+
+@code{ORDERED_EXPR} is true if neither of its operands is a NaN,
+while its negation @code{UNORDERED_EXPR} is true if at least one of
+its operands is a NaN.
+
+For floating operations, inverting one of the standard comparison nodes
+will result in one of these nodes, with its name prefixed by
+@code{UN}---the only exception is @code{NE_EXPR}, whose negation is
+@code{LTGT_EXPR}.
+
@item MODIFY_EXPR
These nodes represent assignment. The left-hand side is the first
operand; the right-hand side is the second operand. The left-hand side
{
enum rtx_code rcode1;
- enum tree_code tcode2;
+ enum tree_code tcode1 = UNORDERED_EXPR, tcode2;
case UNLT_EXPR:
rcode1 = UNLT;
rcode1 = UNEQ;
tcode2 = EQ_EXPR;
goto unordered_bcc;
+ case LTGT_EXPR:
+ /* It is ok for LTGT_EXPR to trap when the result is unordered,
+ so expand to (a < b) || (a > b). */
+ rcode1 = LTGT;
+ tcode1 = LT_EXPR;
+ tcode2 = GT_EXPR;
+ goto unordered_bcc;
unordered_bcc:
mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)));
tree cmp0, cmp1;
/* If the target doesn't support combined unordered
- compares, decompose into UNORDERED + comparison. */
- cmp0 = fold (build (UNORDERED_EXPR, TREE_TYPE (exp), op0, op1));
+ compares, decompose into two comparisons. */
+ cmp0 = fold (build (tcode1, TREE_TYPE (exp), op0, op1));
cmp1 = fold (build (tcode2, TREE_TYPE (exp), op0, op1));
exp = build (TRUTH_ORIF_EXPR, TREE_TYPE (exp), cmp0, cmp1);
do_jump (exp, if_false_label, if_true_label);
case UNGT_EXPR:
case UNGE_EXPR:
case UNEQ_EXPR:
+ case LTGT_EXPR:
temp = do_store_flag (exp,
modifier != EXPAND_STACK_PARM ? target : NULL_RTX,
tmode != VOIDmode ? tmode : mode, 0);
case UNEQ_EXPR:
code = UNEQ;
break;
+ case LTGT_EXPR:
+ code = LTGT;
+ break;
default:
abort ();
#include "langhooks.h"
#include "md5.h"
+/* The following constants represent a bit based encoding of GCC's
+ comparison operators. This encoding simplifies transformations
+ on relational comparison operators, such as AND and OR. */
+enum comparison_code {
+ COMPCODE_FALSE = 0,
+ COMPCODE_LT = 1,
+ COMPCODE_EQ = 2,
+ COMPCODE_LE = 3,
+ COMPCODE_GT = 4,
+ COMPCODE_LTGT = 5,
+ COMPCODE_GE = 6,
+ COMPCODE_ORD = 7,
+ COMPCODE_UNORD = 8,
+ COMPCODE_UNLT = 9,
+ COMPCODE_UNEQ = 10,
+ COMPCODE_UNLE = 11,
+ COMPCODE_UNGT = 12,
+ COMPCODE_NE = 13,
+ COMPCODE_UNGE = 14,
+ COMPCODE_TRUE = 15
+};
+
static void encode (HOST_WIDE_INT *, unsigned HOST_WIDE_INT, HOST_WIDE_INT);
static void decode (HOST_WIDE_INT *, unsigned HOST_WIDE_INT *, HOST_WIDE_INT *);
static bool negate_mathfn_p (enum built_in_function);
static hashval_t size_htab_hash (const void *);
static int size_htab_eq (const void *, const void *);
static tree fold_convert_const (enum tree_code, tree, tree);
-static enum tree_code invert_tree_comparison (enum tree_code);
+static enum tree_code invert_tree_comparison (enum tree_code, bool);
static enum tree_code swap_tree_comparison (enum tree_code);
-static int comparison_to_compcode (enum tree_code);
-static enum tree_code compcode_to_comparison (int);
+static enum comparison_code comparison_to_compcode (enum tree_code);
+static enum tree_code compcode_to_comparison (enum comparison_code);
+static tree combine_comparisons (enum tree_code, enum tree_code,
+ enum tree_code, tree, tree, tree);
static int truth_value_p (enum tree_code);
static int operand_equal_for_comparison_p (tree, tree, tree);
static int twoval_comparison_p (tree, tree *, tree *, int *);
static tree fold_relational_const (enum tree_code, tree, tree, tree);
static tree fold_relational_hi_lo (enum tree_code *, const tree, tree *, tree *);
-/* The following constants represent a bit based encoding of GCC's
- comparison operators. This encoding simplifies transformations
- on relational comparison operators, such as AND and OR. */
-#define COMPCODE_FALSE 0
-#define COMPCODE_LT 1
-#define COMPCODE_EQ 2
-#define COMPCODE_LE 3
-#define COMPCODE_GT 4
-#define COMPCODE_NE 5
-#define COMPCODE_GE 6
-#define COMPCODE_TRUE 7
-
/* We know that A1 + B1 = SUM1, using 2's complement arithmetic and ignoring
overflow. Suppose A, B and SUM have the same respective signs as A1, B1,
and SUM1. Then this yields nonzero if overflow occurred during the
\f
/* Given a tree comparison code, return the code that is the logical inverse
of the given code. It is not safe to do this for floating-point
- comparisons, except for NE_EXPR and EQ_EXPR. */
+ comparisons, except for NE_EXPR and EQ_EXPR, so we receive a machine mode
+ as well: if reversing the comparison is unsafe, return ERROR_MARK. */
static enum tree_code
-invert_tree_comparison (enum tree_code code)
+invert_tree_comparison (enum tree_code code, bool honor_nans)
{
+ if (honor_nans && flag_trapping_math)
+ return ERROR_MARK;
+
switch (code)
{
case EQ_EXPR:
case NE_EXPR:
return EQ_EXPR;
case GT_EXPR:
- return LE_EXPR;
+ return honor_nans ? UNLE_EXPR : LE_EXPR;
case GE_EXPR:
- return LT_EXPR;
+ return honor_nans ? UNLT_EXPR : LT_EXPR;
case LT_EXPR:
- return GE_EXPR;
+ return honor_nans ? UNGE_EXPR : GE_EXPR;
case LE_EXPR:
+ return honor_nans ? UNGT_EXPR : GT_EXPR;
+ case LTGT_EXPR:
+ return UNEQ_EXPR;
+ case UNEQ_EXPR:
+ return LTGT_EXPR;
+ case UNGT_EXPR:
+ return LE_EXPR;
+ case UNGE_EXPR:
+ return LT_EXPR;
+ case UNLT_EXPR:
+ return GE_EXPR;
+ case UNLE_EXPR:
return GT_EXPR;
+ case ORDERED_EXPR:
+ return UNORDERED_EXPR;
+ case UNORDERED_EXPR:
+ return ORDERED_EXPR;
default:
abort ();
}
into a compcode bit-based encoding. This function is the inverse of
compcode_to_comparison. */
-static int
+static enum comparison_code
comparison_to_compcode (enum tree_code code)
{
switch (code)
return COMPCODE_NE;
case GE_EXPR:
return COMPCODE_GE;
+ case ORDERED_EXPR:
+ return COMPCODE_ORD;
+ case UNORDERED_EXPR:
+ return COMPCODE_UNORD;
+ case UNLT_EXPR:
+ return COMPCODE_UNLT;
+ case UNEQ_EXPR:
+ return COMPCODE_UNEQ;
+ case UNLE_EXPR:
+ return COMPCODE_UNLE;
+ case UNGT_EXPR:
+ return COMPCODE_UNGT;
+ case LTGT_EXPR:
+ return COMPCODE_LTGT;
+ case UNGE_EXPR:
+ return COMPCODE_UNGE;
default:
abort ();
}
inverse of comparison_to_compcode. */
static enum tree_code
-compcode_to_comparison (int code)
+compcode_to_comparison (enum comparison_code code)
{
switch (code)
{
return NE_EXPR;
case COMPCODE_GE:
return GE_EXPR;
+ case COMPCODE_ORD:
+ return ORDERED_EXPR;
+ case COMPCODE_UNORD:
+ return UNORDERED_EXPR;
+ case COMPCODE_UNLT:
+ return UNLT_EXPR;
+ case COMPCODE_UNEQ:
+ return UNEQ_EXPR;
+ case COMPCODE_UNLE:
+ return UNLE_EXPR;
+ case COMPCODE_UNGT:
+ return UNGT_EXPR;
+ case COMPCODE_LTGT:
+ return LTGT_EXPR;
+ case COMPCODE_UNGE:
+ return UNGE_EXPR;
default:
abort ();
}
}
+/* Return a tree for the comparison which is the combination of
+ doing the AND or OR (depending on CODE) of the two operations LCODE
+ and RCODE on the identical operands LL_ARG and LR_ARG. Take into account
+ the possibility of trapping if the mode has NaNs, and return NULL_TREE
+ if this makes the transformation invalid. */
+
+tree
+combine_comparisons (enum tree_code code, enum tree_code lcode,
+ enum tree_code rcode, tree truth_type,
+ tree ll_arg, tree lr_arg)
+{
+ bool honor_nans = HONOR_NANS (TYPE_MODE (TREE_TYPE (ll_arg)));
+ enum comparison_code lcompcode = comparison_to_compcode (lcode);
+ enum comparison_code rcompcode = comparison_to_compcode (rcode);
+ enum comparison_code compcode;
+
+ switch (code)
+ {
+ case TRUTH_AND_EXPR: case TRUTH_ANDIF_EXPR:
+ compcode = lcompcode & rcompcode;
+ break;
+
+ case TRUTH_OR_EXPR: case TRUTH_ORIF_EXPR:
+ compcode = lcompcode | rcompcode;
+ break;
+
+ default:
+ return NULL_TREE;
+ }
+
+ if (!honor_nans)
+ {
+ /* Eliminate unordered comparisons, as well as LTGT and ORD
+ which are not used unless the mode has NaNs. */
+ compcode &= ~COMPCODE_UNORD;
+ if (compcode == COMPCODE_LTGT)
+ compcode = COMPCODE_NE;
+ else if (compcode == COMPCODE_ORD)
+ compcode = COMPCODE_TRUE;
+ }
+ else if (flag_trapping_math)
+ {
+ /* Check that the original operation and the optimized ones will trap
+ under the same condition. */
+ bool ltrap = (lcompcode & COMPCODE_UNORD) == 0
+ && (lcompcode != COMPCODE_EQ)
+ && (lcompcode != COMPCODE_ORD);
+ bool rtrap = (rcompcode & COMPCODE_UNORD) == 0
+ && (rcompcode != COMPCODE_EQ)
+ && (rcompcode != COMPCODE_ORD);
+ bool trap = (compcode & COMPCODE_UNORD) == 0
+ && (compcode != COMPCODE_EQ)
+ && (compcode != COMPCODE_ORD);
+
+ /* In a short-circuited boolean expression the LHS might be
+ such that the RHS, if evaluated, will never trap. For
+ example, in ORD (x, y) && (x < y), we evaluate the RHS only
+ if neither x nor y is NaN. (This is a mixed blessing: for
+ example, the expression above will never trap, hence
+ optimizing it to x < y would be invalid). */
+ if ((code == TRUTH_ORIF_EXPR && (lcompcode & COMPCODE_UNORD))
+ || (code == TRUTH_ANDIF_EXPR && !(lcompcode & COMPCODE_UNORD)))
+ rtrap = false;
+
+ /* If the comparison was short-circuited, and only the RHS
+ trapped, we may now generate a spurious trap. */
+ if (rtrap && !ltrap
+ && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR))
+ return NULL_TREE;
+
+ /* If we changed the conditions that cause a trap, we lose. */
+ if ((ltrap || rtrap) != trap)
+ return NULL_TREE;
+ }
+
+ if (compcode == COMPCODE_TRUE)
+ return fold_convert (truth_type, integer_one_node);
+ else if (compcode == COMPCODE_FALSE)
+ return fold_convert (truth_type, integer_zero_node);
+ else
+ return fold (build2 (compcode_to_comparison (compcode),
+ truth_type, ll_arg, lr_arg));
+}
+
/* Return nonzero if CODE is a tree code that represents a truth value. */
static int
\f
/* Return a simplified tree node for the truth-negation of ARG. This
never alters ARG itself. We assume that ARG is an operation that
- returns a truth value (0 or 1). */
+ returns a truth value (0 or 1).
+ FIXME: one would think we would fold the result, but it causes
+ problems with the dominator optimizer. */
tree
invert_truthvalue (tree arg)
{
if (TREE_CODE_CLASS (code) == '<')
{
- if (FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
- && !flag_unsafe_math_optimizations
- && code != NE_EXPR
- && code != EQ_EXPR)
- return build1 (TRUTH_NOT_EXPR, type, arg);
- else if (code == UNORDERED_EXPR
- || code == ORDERED_EXPR
- || code == UNEQ_EXPR
- || code == UNLT_EXPR
- || code == UNLE_EXPR
- || code == UNGT_EXPR
- || code == UNGE_EXPR)
+ tree op_type = TREE_TYPE (TREE_OPERAND (arg, 0));
+ if (FLOAT_TYPE_P (op_type)
+ && flag_trapping_math
+ && code != ORDERED_EXPR && code != UNORDERED_EXPR
+ && code != NE_EXPR && code != EQ_EXPR)
return build1 (TRUTH_NOT_EXPR, type, arg);
else
- return build2 (invert_tree_comparison (code), type,
- TREE_OPERAND (arg, 0), TREE_OPERAND (arg, 1));
+ {
+ code = invert_tree_comparison (code,
+ HONOR_NANS (TYPE_MODE (op_type)));
+ if (code == ERROR_MARK)
+ return build1 (TRUTH_NOT_EXPR, type, arg);
+ else
+ return build2 (code, type,
+ TREE_OPERAND (arg, 0), TREE_OPERAND (arg, 1));
+ }
}
switch (code)
if (TREE_CODE_CLASS (lcode) != '<' || TREE_CODE_CLASS (rcode) != '<')
return 0;
- code = ((code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR)
- ? TRUTH_AND_EXPR : TRUTH_OR_EXPR);
-
ll_arg = TREE_OPERAND (lhs, 0);
lr_arg = TREE_OPERAND (lhs, 1);
rl_arg = TREE_OPERAND (rhs, 0);
/* Simplify (x<y) && (x==y) into (x<=y) and related optimizations. */
if (simple_operand_p (ll_arg)
- && simple_operand_p (lr_arg)
- && !FLOAT_TYPE_P (TREE_TYPE (ll_arg)))
+ && simple_operand_p (lr_arg))
{
- int compcode;
-
+ tree result;
if (operand_equal_p (ll_arg, rl_arg, 0)
&& operand_equal_p (lr_arg, rr_arg, 0))
- {
- int lcompcode, rcompcode;
-
- lcompcode = comparison_to_compcode (lcode);
- rcompcode = comparison_to_compcode (rcode);
- compcode = (code == TRUTH_AND_EXPR)
- ? lcompcode & rcompcode
- : lcompcode | rcompcode;
- }
+ {
+ result = combine_comparisons (code, lcode, rcode,
+ truth_type, ll_arg, lr_arg);
+ if (result)
+ return result;
+ }
else if (operand_equal_p (ll_arg, rr_arg, 0)
&& operand_equal_p (lr_arg, rl_arg, 0))
- {
- int lcompcode, rcompcode;
-
- rcode = swap_tree_comparison (rcode);
- lcompcode = comparison_to_compcode (lcode);
- rcompcode = comparison_to_compcode (rcode);
- compcode = (code == TRUTH_AND_EXPR)
- ? lcompcode & rcompcode
- : lcompcode | rcompcode;
- }
- else
- compcode = -1;
-
- if (compcode == COMPCODE_TRUE)
- return fold_convert (truth_type, integer_one_node);
- else if (compcode == COMPCODE_FALSE)
- return fold_convert (truth_type, integer_zero_node);
- else if (compcode != -1)
- return build2 (compcode_to_comparison (compcode),
- truth_type, ll_arg, lr_arg);
+ {
+ result = combine_comparisons (code, lcode,
+ swap_tree_comparison (rcode),
+ truth_type, ll_arg, lr_arg);
+ if (result)
+ return result;
+ }
}
+ code = ((code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR)
+ ? TRUTH_AND_EXPR : TRUTH_OR_EXPR);
+
/* If the RHS can be evaluated unconditionally and its operands are
simple, it wins to evaluate the RHS unconditionally on machines
with expensive branches. In this case, this isn't a comparison
if (code == NE_EXPR || code == GE_EXPR)
{
invert = 1;
- code = invert_tree_comparison (code);
+ code = invert_tree_comparison (code, false);
}
/* Compute a result for LT or EQ if args permit;
break;
case NE_EXPR:
+ case LTGT_EXPR:
/* Floating point comparisons appears to behave in a very
unpredictable way because of special role of = tests in
FP code. */
return do_compare (op0, op1, 1) >= 0;
case UNEQ_EXPR:
return do_compare (op0, op1, 0) == 0;
+ case LTGT_EXPR:
+ return do_compare (op0, op1, 0) != 0;
default:
abort ();
+2004-05-28 Paolo Bonzini <bonzini@gnu.org>
+
+ * gcc.c-torture/compare-fp-1.c, gcc.c-torture/compare-fp-2.c,
+ gcc.c-torture/compare-fp-3.c, gcc.c-torture/compare-fp-4.c,
+ gcc.c-torture/compare-fp-3.x, gcc.c-torture/compare-fp-4.x,
+ gcc.c-torture/pr15649-1.c: New.
+
2004-05-27 Adam Nemet <anemet@lnxw.com>
PR c++/12883
--- /dev/null
+/* Copyright (C) 2004 Free Software Foundation.
+
+ Test for correctness of composite floating-point comparisons.
+
+ Written by Paolo Bonzini, 26th May 2004. */
+
+extern void abort (void);
+
+#define TEST(c) if ((c) != ok) abort ();
+#define ORD(a, b) (!__builtin_isunordered ((a), (b)))
+#define UNORD(a, b) (__builtin_isunordered ((a), (b)))
+#define UNEQ(a, b) (__builtin_isunordered ((a), (b)) || ((a) == (b)))
+#define UNLT(a, b) (__builtin_isunordered ((a), (b)) || ((a) < (b)))
+#define UNLE(a, b) (__builtin_isunordered ((a), (b)) || ((a) <= (b)))
+#define UNGT(a, b) (__builtin_isunordered ((a), (b)) || ((a) > (b)))
+#define UNGE(a, b) (__builtin_isunordered ((a), (b)) || ((a) >= (b)))
+#define LTGT(a, b) (__builtin_islessgreater ((a), (b)))
+
+float pinf;
+float ninf;
+float NaN;
+
+int iuneq (float x, float y, int ok)
+{
+ TEST (UNEQ (x, y));
+ TEST (!LTGT (x, y));
+ TEST (UNLE (x, y) && UNGE (x,y));
+}
+
+int ieq (float x, float y, int ok)
+{
+ TEST (ORD (x, y) && UNEQ (x, y));
+}
+
+int iltgt (float x, float y, int ok)
+{
+ TEST (!UNEQ (x, y)); /* Not optimizable. */
+ TEST (LTGT (x, y)); /* Same, __builtin_islessgreater does not trap. */
+ TEST (ORD (x, y) && (UNLT (x, y) || UNGT (x,y)));
+}
+
+int ine (float x, float y, int ok)
+{
+ TEST (UNLT (x, y) || UNGT (x, y));
+}
+
+int iunlt (float x, float y, int ok)
+{
+ TEST (UNLT (x, y));
+ TEST (UNORD (x, y) || (x < y));
+}
+
+int ilt (float x, float y, int ok)
+{
+ TEST (ORD (x, y) && UNLT (x, y)); /* Not optimized */
+ TEST ((x <= y) && (x != y));
+ TEST ((x <= y) && (y != x));
+ TEST ((x != y) && (x <= y)); /* Not optimized */
+ TEST ((y != x) && (x <= y)); /* Not optimized */
+}
+
+int iunle (float x, float y, int ok)
+{
+ TEST (UNLE (x, y));
+ TEST (UNORD (x, y) || (x <= y));
+}
+
+int ile (float x, float y, int ok)
+{
+ TEST (ORD (x, y) && UNLE (x, y)); /* Not optimized */
+ TEST ((x < y) || (x == y));
+ TEST ((y > x) || (x == y));
+ TEST ((x == y) || (x < y)); /* Not optimized */
+ TEST ((y == x) || (x < y)); /* Not optimized */
+}
+
+int iungt (float x, float y, int ok)
+{
+ TEST (UNGT (x, y));
+ TEST (UNORD (x, y) || (x > y));
+}
+
+int igt (float x, float y, int ok)
+{
+ TEST (ORD (x, y) && UNGT (x, y)); /* Not optimized */
+ TEST ((x >= y) && (x != y));
+ TEST ((x >= y) && (y != x));
+ TEST ((x != y) && (x >= y)); /* Not optimized */
+ TEST ((y != x) && (x >= y)); /* Not optimized */
+}
+
+int iunge (float x, float y, int ok)
+{
+ TEST (UNGE (x, y));
+ TEST (UNORD (x, y) || (x >= y));
+}
+
+int ige (float x, float y, int ok)
+{
+ TEST (ORD (x, y) && UNGE (x, y)); /* Not optimized */
+ TEST ((x > y) || (x == y));
+ TEST ((y < x) || (x == y));
+ TEST ((x == y) || (x > y)); /* Not optimized */
+ TEST ((y == x) || (x > y)); /* Not optimized */
+}
+
+int
+main ()
+{
+ pinf = __builtin_inf ();
+ ninf = -__builtin_inf ();
+ NaN = __builtin_nan ("");
+
+ iuneq (ninf, pinf, 0);
+ iuneq (NaN, NaN, 1);
+ iuneq (pinf, ninf, 0);
+ iuneq (1, 4, 0);
+ iuneq (3, 3, 1);
+ iuneq (5, 2, 0);
+
+ ieq (1, 4, 0);
+ ieq (3, 3, 1);
+ ieq (5, 2, 0);
+
+ iltgt (ninf, pinf, 1);
+ iltgt (NaN, NaN, 0);
+ iltgt (pinf, ninf, 1);
+ iltgt (1, 4, 1);
+ iltgt (3, 3, 0);
+ iltgt (5, 2, 1);
+
+ ine (1, 4, 1);
+ ine (3, 3, 0);
+ ine (5, 2, 1);
+
+ iunlt (NaN, ninf, 1);
+ iunlt (pinf, NaN, 1);
+ iunlt (pinf, ninf, 0);
+ iunlt (pinf, pinf, 0);
+ iunlt (ninf, ninf, 0);
+ iunlt (1, 4, 1);
+ iunlt (3, 3, 0);
+ iunlt (5, 2, 0);
+
+ ilt (1, 4, 1);
+ ilt (3, 3, 0);
+ ilt (5, 2, 0);
+
+ iunle (NaN, ninf, 1);
+ iunle (pinf, NaN, 1);
+ iunle (pinf, ninf, 0);
+ iunle (pinf, pinf, 1);
+ iunle (ninf, ninf, 1);
+ iunle (1, 4, 1);
+ iunle (3, 3, 1);
+ iunle (5, 2, 0);
+
+ ile (1, 4, 1);
+ ile (3, 3, 1);
+ ile (5, 2, 0);
+
+ iungt (NaN, ninf, 1);
+ iungt (pinf, NaN, 1);
+ iungt (pinf, ninf, 1);
+ iungt (pinf, pinf, 0);
+ iungt (ninf, ninf, 0);
+ iungt (1, 4, 0);
+ iungt (3, 3, 0);
+ iungt (5, 2, 1);
+
+ igt (1, 4, 0);
+ igt (3, 3, 0);
+ igt (5, 2, 1);
+
+ iunge (NaN, ninf, 1);
+ iunge (pinf, NaN, 1);
+ iunge (ninf, pinf, 0);
+ iunge (pinf, pinf, 1);
+ iunge (ninf, ninf, 1);
+ iunge (1, 4, 0);
+ iunge (3, 3, 1);
+ iunge (5, 2, 1);
+
+ ige (1, 4, 0);
+ ige (3, 3, 1);
+ ige (5, 2, 1);
+
+ return 0;
+}
--- /dev/null
+/* Copyright (C) 2004 Free Software Foundation.
+
+ Ensure that the composite comparison optimization doesn't misfire
+ and attempt to combine an integer comparison with a floating-point one.
+
+ Written by Paolo Bonzini, 26th May 2004. */
+
+extern void abort (void);
+
+int
+foo (double x, double y)
+{
+ /* If miscompiled the following may become false. */
+ return (x > y) && ((int)x == (int)y);
+}
+
+int
+main ()
+{
+ if (! foo (1.3,1.0))
+ abort ();
+ return 0;
+}
+
--- /dev/null
+/* Copyright (C) 2004 Free Software Foundation.
+
+ Test for composite comparison always true/false optimization.
+
+ Written by Paolo Bonzini, 26th May 2004. */
+
+extern void link_error0 ();
+extern void link_error1 ();
+
+void
+test1 (float x, float y)
+{
+ if ((x==y) && (x!=y))
+ link_error0();
+}
+
+void
+test2 (float x, float y)
+{
+ if ((x<y) && (x>y))
+ link_error0();
+}
+
+void
+test3 (float x, float y)
+{
+ if ((x<y) && (y<x))
+ link_error0();
+}
+
+void
+test4 (float x, float y)
+{
+ if ((x==y) || (x!=y))
+ {
+ }
+ else
+ link_error1 ();
+}
+
+void
+test5 (float x, float y)
+{
+ if (__builtin_isunordered (x, y) || (x>=y) || (x<y))
+ {
+ }
+ else
+ link_error1 ();
+}
+
+void
+test6 (float x, float y)
+{
+ if (__builtin_isunordered (y, x) || (x<=y) || (y<x))
+ {
+ }
+ else
+ link_error1 ();
+}
+
+void
+test7 (float x, float y)
+{
+ if (__builtin_isunordered (x, y) || !__builtin_isunordered (x, y))
+ {
+ }
+ else
+ link_error1 ();
+}
+
+void
+all_tests (float x, float y)
+{
+ test1 (x, y);
+ test2 (x, y);
+ test3 (x, y);
+ test4 (x, y);
+ test5 (x, y);
+ test6 (x, y);
+ test7 (x, y);
+}
+
+int
+main ()
+{
+ all_tests (0, 0);
+ all_tests (1, 2);
+ all_tests (4, 3);
+
+ return 0;
+}
+
+#ifndef __OPTIMIZE__
+void link_error0() {}
+void link_error1() {}
+#endif /* ! __OPTIMIZE__ */
+
--- /dev/null
+set options "-fno-trapping-math"
+return 0
--- /dev/null
+/* Copyright (C) 2004 Free Software Foundation.
+
+ Test for correctness of composite floating-point comparisons.
+
+ Written by Paolo Bonzini, 26th May 2004. */
+
+extern void abort (void);
+
+#define TEST(c) if ((c) != ok) abort ();
+#define ORD(a, b) (((a) < (b)) || (a) >= (b))
+#define UNORD(a, b) (!ORD ((a), (b)))
+#define UNEQ(a, b) (!LTGT ((a), (b)))
+#define UNLT(a, b) (((a) < (b)) || __builtin_isunordered ((a), (b)))
+#define UNLE(a, b) (((a) <= (b)) || __builtin_isunordered ((a), (b)))
+#define UNGT(a, b) (((a) > (b)) || __builtin_isunordered ((a), (b)))
+#define UNGE(a, b) (((a) >= (b)) || __builtin_isunordered ((a), (b)))
+#define LTGT(a, b) (((a) < (b)) || (a) > (b))
+
+float pinf;
+float ninf;
+float NaN;
+
+int iuneq (float x, float y, int ok)
+{
+ TEST (UNEQ (x, y));
+ TEST (!LTGT (x, y));
+ TEST (UNLE (x, y) && UNGE (x,y));
+}
+
+int ieq (float x, float y, int ok)
+{
+ TEST (ORD (x, y) && UNEQ (x, y));
+}
+
+int iltgt (float x, float y, int ok)
+{
+ TEST (!UNEQ (x, y));
+ TEST (LTGT (x, y));
+ TEST (ORD (x, y) && (UNLT (x, y) || UNGT (x,y)));
+}
+
+int ine (float x, float y, int ok)
+{
+ TEST (UNLT (x, y) || UNGT (x, y));
+ TEST ((x < y) || (x > y) || UNORD (x, y));
+}
+
+int iunlt (float x, float y, int ok)
+{
+ TEST (UNLT (x, y));
+ TEST (UNORD (x, y) || (x < y));
+}
+
+int ilt (float x, float y, int ok)
+{
+ TEST (ORD (x, y) && UNLT (x, y));
+ TEST ((x <= y) && (x != y));
+ TEST ((x <= y) && (y != x));
+ TEST ((x != y) && (x <= y));
+ TEST ((y != x) && (x <= y));
+}
+
+int iunle (float x, float y, int ok)
+{
+ TEST (UNLE (x, y));
+ TEST (UNORD (x, y) || (x <= y));
+}
+
+int ile (float x, float y, int ok)
+{
+ TEST (ORD (x, y) && UNLE (x, y));
+ TEST ((x < y) || (x == y));
+ TEST ((y > x) || (x == y));
+ TEST ((x == y) || (x < y));
+ TEST ((y == x) || (x < y));
+}
+
+int iungt (float x, float y, int ok)
+{
+ TEST (UNGT (x, y));
+ TEST (UNORD (x, y) || (x > y));
+}
+
+int igt (float x, float y, int ok)
+{
+ TEST (ORD (x, y) && UNGT (x, y));
+ TEST ((x >= y) && (x != y));
+ TEST ((x >= y) && (y != x));
+ TEST ((x != y) && (x >= y));
+ TEST ((y != x) && (x >= y));
+}
+
+int iunge (float x, float y, int ok)
+{
+ TEST (UNGE (x, y));
+ TEST (UNORD (x, y) || (x >= y));
+}
+
+int ige (float x, float y, int ok)
+{
+ TEST (ORD (x, y) && UNGE (x, y));
+ TEST ((x > y) || (x == y));
+ TEST ((y < x) || (x == y));
+ TEST ((x == y) || (x > y));
+ TEST ((y == x) || (x > y));
+}
+
+int
+main ()
+{
+ pinf = __builtin_inf ();
+ ninf = -__builtin_inf ();
+ NaN = __builtin_nan ("");
+
+ iuneq (ninf, pinf, 0);
+ iuneq (NaN, NaN, 1);
+ iuneq (pinf, ninf, 0);
+ iuneq (1, 4, 0);
+ iuneq (3, 3, 1);
+ iuneq (5, 2, 0);
+
+ ieq (1, 4, 0);
+ ieq (3, 3, 1);
+ ieq (5, 2, 0);
+
+ iltgt (ninf, pinf, 1);
+ iltgt (NaN, NaN, 0);
+ iltgt (pinf, ninf, 1);
+ iltgt (1, 4, 1);
+ iltgt (3, 3, 0);
+ iltgt (5, 2, 1);
+
+ ine (1, 4, 1);
+ ine (3, 3, 0);
+ ine (5, 2, 1);
+
+ iunlt (NaN, ninf, 1);
+ iunlt (pinf, NaN, 1);
+ iunlt (pinf, ninf, 0);
+ iunlt (pinf, pinf, 0);
+ iunlt (ninf, ninf, 0);
+ iunlt (1, 4, 1);
+ iunlt (3, 3, 0);
+ iunlt (5, 2, 0);
+
+ ilt (1, 4, 1);
+ ilt (3, 3, 0);
+ ilt (5, 2, 0);
+
+ iunle (NaN, ninf, 1);
+ iunle (pinf, NaN, 1);
+ iunle (pinf, ninf, 0);
+ iunle (pinf, pinf, 1);
+ iunle (ninf, ninf, 1);
+ iunle (1, 4, 1);
+ iunle (3, 3, 1);
+ iunle (5, 2, 0);
+
+ ile (1, 4, 1);
+ ile (3, 3, 1);
+ ile (5, 2, 0);
+
+ iungt (NaN, ninf, 1);
+ iungt (pinf, NaN, 1);
+ iungt (pinf, ninf, 1);
+ iungt (pinf, pinf, 0);
+ iungt (ninf, ninf, 0);
+ iungt (1, 4, 0);
+ iungt (3, 3, 0);
+ iungt (5, 2, 1);
+
+ igt (1, 4, 0);
+ igt (3, 3, 0);
+ igt (5, 2, 1);
+
+ iunge (NaN, ninf, 1);
+ iunge (pinf, NaN, 1);
+ iunge (ninf, pinf, 0);
+ iunge (pinf, pinf, 1);
+ iunge (ninf, ninf, 1);
+ iunge (1, 4, 0);
+ iunge (3, 3, 1);
+ iunge (5, 2, 1);
+
+ ige (1, 4, 0);
+ ige (3, 3, 1);
+ ige (5, 2, 1);
+
+ return 0;
+}
--- /dev/null
+set options "-fno-trapping-math"
+return 0
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-ffast-math -O2" } */
+
+double foo (double x)
+{
+ return __builtin_isgreater (x, 0.0) ? 0.0 : x;
+}
case UNGT_EXPR:
case UNGE_EXPR:
case UNEQ_EXPR:
+ case LTGT_EXPR:
case PLUS_EXPR:
case MINUS_EXPR:
case MULT_EXPR:
case UNGT_EXPR:
case UNGE_EXPR:
case UNEQ_EXPR:
+ case LTGT_EXPR:
case CONVERT_EXPR:
case UNGT_EXPR:
case UNGE_EXPR:
case UNEQ_EXPR:
+ case LTGT_EXPR:
+ case ORDERED_EXPR:
+ case UNORDERED_EXPR:
{
const char *op = op_symbol (node);
op0 = TREE_OPERAND (node, 0);
pp_character (buffer, '>');
break;
- case UNORDERED_EXPR:
- NIY;
- break;
-
- case ORDERED_EXPR:
- NIY;
- break;
-
case IN_EXPR:
NIY;
break;
case NE_EXPR:
return 9;
+ case UNLT_EXPR:
+ case UNLE_EXPR:
+ case UNGT_EXPR:
+ case UNGE_EXPR:
+ case UNEQ_EXPR:
+ case LTGT_EXPR:
+ case ORDERED_EXPR:
+ case UNORDERED_EXPR:
case LT_EXPR:
case LE_EXPR:
case GT_EXPR:
case BIT_AND_EXPR:
return "&";
+ case ORDERED_EXPR:
+ return "ord";
+ case UNORDERED_EXPR:
+ return "unord";
+
case EQ_EXPR:
- case UNEQ_EXPR:
return "==";
+ case UNEQ_EXPR:
+ return "u==";
case NE_EXPR:
return "!=";
case LT_EXPR:
- case UNLT_EXPR:
return "<";
+ case UNLT_EXPR:
+ return "u<";
case LE_EXPR:
- case UNLE_EXPR:
return "<=";
+ case UNLE_EXPR:
+ return "u<=";
case GT_EXPR:
- case UNGT_EXPR:
return ">";
+ case UNGT_EXPR:
+ return "u>";
case GE_EXPR:
- case UNGE_EXPR:
return ">=";
+ case UNGE_EXPR:
+ return "u>=";
+
+ case LTGT_EXPR:
+ return "<>";
case LSHIFT_EXPR:
return "<<";
DEFTREECODE (UNGE_EXPR, "unge_expr", '<', 2)
DEFTREECODE (UNEQ_EXPR, "uneq_expr", '<', 2)
+/* This is the reverse of uneq_expr. */
+DEFTREECODE (LTGT_EXPR, "ltgt_expr", '<', 2)
+
/* Operations for Pascal sets. Not used now. */
DEFTREECODE (IN_EXPR, "in_expr", '2', 2)
DEFTREECODE (SET_LE_EXPR, "set_le_expr", '<', 2)