[wide-int] Treat order comparisons like other binary ops
Richard Biener
rguenther@suse.de
Tue Oct 29 12:26:00 GMT 2013
On Sun, 27 Oct 2013, Richard Sandiford wrote:
> Until now, eq_p and ne_p have enforced the same argument rules as things
> like addition, while order comparisons like lts_p have treated the two
> arguments as independent and signed. Richard, I think you said on IRC
> that you thought lts_p should behave like the others. E.g. lts_p on two
> trees or two rtxes should make sure that the precisions are the same.
Yes.
> This patch does that. I think all uses of INT_CST_LT and INT_CST_LT_UNSIGNED
> are really comparing to "infinite" precision, and the UNSIGNED distinction is
> only there because double_int isn't wide enough to be effectively infinite.
> Is that right?
Yes, it's only because of the case of set MSB on the double-int.
> Since that isn't a problem with widest_int, the patch gets
> rid of INT_CST_LT_UNSIGNED and only uses INT_CST_LT.
>
>
> The c-lex.c change includes a generic change that I justed posted for trunk.
>
> I rejigged the order in tree.h slightly so that it matches trunk.
>
> Tested on powerpc64-linux-gnu and x86_64-linux-gnu. OK for wide-int?
Ok.
Thanks,
Richard.
> Thanks,
> Richard
>
>
> Index: gcc/c-family/c-common.c
> ===================================================================
> --- gcc/c-family/c-common.c 2013-10-27 14:11:53.006510519 +0000
> +++ gcc/c-family/c-common.c 2013-10-27 14:19:27.667578745 +0000
> @@ -4101,20 +4101,10 @@ shorten_compare (tree *op0_ptr, tree *op
> maxval = convert (*restype_ptr, maxval);
> }
>
> - if (unsignedp && unsignedp0)
> - {
> - min_gt = INT_CST_LT_UNSIGNED (primop1, minval);
> - max_gt = INT_CST_LT_UNSIGNED (primop1, maxval);
> - min_lt = INT_CST_LT_UNSIGNED (minval, primop1);
> - max_lt = INT_CST_LT_UNSIGNED (maxval, primop1);
> - }
> - else
> - {
> - min_gt = INT_CST_LT (primop1, minval);
> - max_gt = INT_CST_LT (primop1, maxval);
> - min_lt = INT_CST_LT (minval, primop1);
> - max_lt = INT_CST_LT (maxval, primop1);
> - }
> + min_gt = INT_CST_LT (primop1, minval);
> + max_gt = INT_CST_LT (primop1, maxval);
> + min_lt = INT_CST_LT (minval, primop1);
> + max_lt = INT_CST_LT (maxval, primop1);
>
> val = 0;
> /* This used to be a switch, but Genix compiler can't handle that. */
> Index: gcc/c-family/c-lex.c
> ===================================================================
> --- gcc/c-family/c-lex.c 2013-10-27 14:11:53.006510519 +0000
> +++ gcc/c-family/c-lex.c 2013-10-27 14:19:27.664578718 +0000
> @@ -48,9 +48,9 @@ static tree interpret_float (const cpp_t
> enum overflow_type *);
> static tree interpret_fixed (const cpp_token *, unsigned int);
> static enum integer_type_kind narrowest_unsigned_type
> - (const wide_int &, unsigned int);
> + (const widest_int &, unsigned int);
> static enum integer_type_kind narrowest_signed_type
> - (const wide_int &, unsigned int);
> + (const widest_int &, unsigned int);
> static enum cpp_ttype lex_string (const cpp_token *, tree *, bool, bool);
> static tree lex_charconst (const cpp_token *);
> static void update_header_times (const char *);
> @@ -526,7 +526,7 @@ c_lex_with_flags (tree *value, location_
> there isn't one. */
>
> static enum integer_type_kind
> -narrowest_unsigned_type (const wide_int &val, unsigned int flags)
> +narrowest_unsigned_type (const widest_int &val, unsigned int flags)
> {
> int itk;
>
> @@ -545,7 +545,7 @@ narrowest_unsigned_type (const wide_int
> continue;
> upper = TYPE_MAX_VALUE (integer_types[itk]);
>
> - if (wi::geu_p (upper, val))
> + if (wi::geu_p (wi::to_widest (upper), val))
> return (enum integer_type_kind) itk;
> }
>
> @@ -554,7 +554,7 @@ narrowest_unsigned_type (const wide_int
>
> /* Ditto, but narrowest signed type. */
> static enum integer_type_kind
> -narrowest_signed_type (const wide_int &val, unsigned int flags)
> +narrowest_signed_type (const widest_int &val, unsigned int flags)
> {
> int itk;
>
> @@ -573,7 +573,7 @@ narrowest_signed_type (const wide_int &v
> continue;
> upper = TYPE_MAX_VALUE (integer_types[itk]);
>
> - if (wi::geu_p (upper, val))
> + if (wi::geu_p (wi::to_widest (upper), val))
> return (enum integer_type_kind) itk;
> }
>
> @@ -588,20 +588,18 @@ interpret_integer (const cpp_token *toke
> tree value, type;
> enum integer_type_kind itk;
> cpp_num integer;
> - cpp_options *options = cpp_get_options (parse_in);
> - HOST_WIDE_INT ival[2];
> - wide_int wval;
> + HOST_WIDE_INT ival[3];
>
> *overflow = OT_NONE;
>
> integer = cpp_interpret_integer (parse_in, token, flags);
> - integer = cpp_num_sign_extend (integer, options->precision);
> if (integer.overflow)
> *overflow = OT_OVERFLOW;
>
> ival[0] = integer.low;
> ival[1] = integer.high;
> - wval = wide_int::from_array (ival, 2, HOST_BITS_PER_WIDE_INT * 2);
> + ival[2] = 0;
> + widest_int wval = widest_int::from_array (ival, 3);
>
> /* The type of a constant with a U suffix is straightforward. */
> if (flags & CPP_N_UNSIGNED)
> Index: gcc/cp/call.c
> ===================================================================
> --- gcc/cp/call.c 2013-10-27 14:11:53.006510519 +0000
> +++ gcc/cp/call.c 2013-10-27 14:19:27.673578799 +0000
> @@ -6493,8 +6493,7 @@ type_passed_as (tree type)
> else if (targetm.calls.promote_prototypes (type)
> && INTEGRAL_TYPE_P (type)
> && COMPLETE_TYPE_P (type)
> - && INT_CST_LT_UNSIGNED (TYPE_SIZE (type),
> - TYPE_SIZE (integer_type_node)))
> + && INT_CST_LT (TYPE_SIZE (type), TYPE_SIZE (integer_type_node)))
> type = integer_type_node;
>
> return type;
> @@ -6534,8 +6533,7 @@ convert_for_arg_passing (tree type, tree
> else if (targetm.calls.promote_prototypes (type)
> && INTEGRAL_TYPE_P (type)
> && COMPLETE_TYPE_P (type)
> - && INT_CST_LT_UNSIGNED (TYPE_SIZE (type),
> - TYPE_SIZE (integer_type_node)))
> + && INT_CST_LT (TYPE_SIZE (type), TYPE_SIZE (integer_type_node)))
> val = cp_perform_integral_promotions (val, complain);
> if ((complain & tf_warning)
> && warn_suggest_attribute_format)
> Index: gcc/cp/class.c
> ===================================================================
> --- gcc/cp/class.c 2013-10-27 14:11:53.006510519 +0000
> +++ gcc/cp/class.c 2013-10-27 14:19:27.675578817 +0000
> @@ -5790,7 +5790,7 @@ end_of_class (tree t, int include_virtua
> continue;
>
> offset = end_of_base (base_binfo);
> - if (INT_CST_LT_UNSIGNED (result, offset))
> + if (INT_CST_LT (result, offset))
> result = offset;
> }
>
> @@ -5800,7 +5800,7 @@ end_of_class (tree t, int include_virtua
> vec_safe_iterate (vbases, i, &base_binfo); i++)
> {
> offset = end_of_base (base_binfo);
> - if (INT_CST_LT_UNSIGNED (result, offset))
> + if (INT_CST_LT (result, offset))
> result = offset;
> }
>
> @@ -5880,7 +5880,7 @@ include_empty_classes (record_layout_inf
> CLASSTYPE_AS_BASE (rli->t) != NULL_TREE);
> rli_size = rli_size_unit_so_far (rli);
> if (TREE_CODE (rli_size) == INTEGER_CST
> - && INT_CST_LT_UNSIGNED (rli_size, eoc))
> + && INT_CST_LT (rli_size, eoc))
> {
> if (!abi_version_at_least (2))
> /* In version 1 of the ABI, the size of a class that ends with
> Index: gcc/cp/init.c
> ===================================================================
> --- gcc/cp/init.c 2013-10-27 14:11:53.006510519 +0000
> +++ gcc/cp/init.c 2013-10-27 14:19:27.661578691 +0000
> @@ -2412,7 +2412,7 @@ build_new_1 (vec<tree, va_gc> **placemen
> gcc_assert (TREE_CODE (size) == INTEGER_CST);
> cookie_size = targetm.cxx.get_cookie_size (elt_type);
> gcc_assert (TREE_CODE (cookie_size) == INTEGER_CST);
> - gcc_checking_assert (wi::ltu_p (cookie_size, max_size));
> + gcc_checking_assert (wi::ltu_p (wi::to_offset (cookie_size), max_size));
> /* Unconditionally subtract the cookie size. This decreases the
> maximum object size and is safe even if we choose not to use
> a cookie after all. */
> Index: gcc/fold-const.c
> ===================================================================
> --- gcc/fold-const.c 2013-10-27 14:11:53.006510519 +0000
> +++ gcc/fold-const.c 2013-10-27 14:19:27.669578763 +0000
> @@ -16448,8 +16448,6 @@ fold_relational_const (enum tree_code co
> {
> if (code == EQ_EXPR)
> result = tree_int_cst_equal (op0, op1);
> - else if (TYPE_UNSIGNED (TREE_TYPE (op0)))
> - result = INT_CST_LT_UNSIGNED (op0, op1);
> else
> result = INT_CST_LT (op0, op1);
> }
> Index: gcc/gimple-fold.c
> ===================================================================
> --- gcc/gimple-fold.c 2013-10-27 14:11:53.006510519 +0000
> +++ gcc/gimple-fold.c 2013-10-27 14:19:27.662578700 +0000
> @@ -2834,7 +2834,7 @@ fold_array_ctor_reference (tree type, tr
> be larger than size of array element. */
> if (!TYPE_SIZE_UNIT (type)
> || TREE_CODE (TYPE_SIZE_UNIT (type)) != INTEGER_CST
> - || wi::lts_p (elt_size, TYPE_SIZE_UNIT (type)))
> + || wi::lts_p (elt_size, wi::to_offset (TYPE_SIZE_UNIT (type))))
> return NULL_TREE;
>
> /* Compute the array index we look for. */
> Index: gcc/java/expr.c
> ===================================================================
> --- gcc/java/expr.c 2013-10-27 14:11:53.006510519 +0000
> +++ gcc/java/expr.c 2013-10-27 14:19:27.675578817 +0000
> @@ -1716,7 +1716,7 @@ build_field_ref (tree self_value, tree s
> tree field_offset = byte_position (field_decl);
> if (! page_size)
> page_size = size_int (4096);
> - check = ! INT_CST_LT_UNSIGNED (field_offset, page_size);
> + check = ! INT_CST_LT (field_offset, page_size);
> }
>
> if (base_type != TREE_TYPE (self_value))
> Index: gcc/loop-doloop.c
> ===================================================================
> --- gcc/loop-doloop.c 2013-10-27 14:11:53.006510519 +0000
> +++ gcc/loop-doloop.c 2013-10-27 14:19:27.663578709 +0000
> @@ -462,8 +462,8 @@ doloop_modify (struct loop *loop, struct
> Note that the maximum value loaded is iterations_max - 1. */
> if (get_max_loop_iterations (loop, &iterations)
> && wi::leu_p (iterations,
> - wi::set_bit_in_zero (GET_MODE_PRECISION (mode) - 1,
> - GET_MODE_PRECISION (mode))))
> + wi::set_bit_in_zero <widest_int>
> + (GET_MODE_PRECISION (mode) - 1)))
> nonneg = 1;
> break;
>
> Index: gcc/targhooks.c
> ===================================================================
> --- gcc/targhooks.c 2013-10-27 14:11:53.006510519 +0000
> +++ gcc/targhooks.c 2013-10-27 14:19:27.670578772 +0000
> @@ -288,7 +288,7 @@ default_cxx_get_cookie_size (tree type)
>
> sizetype_size = size_in_bytes (sizetype);
> type_align = size_int (TYPE_ALIGN_UNIT (type));
> - if (INT_CST_LT_UNSIGNED (type_align, sizetype_size))
> + if (INT_CST_LT (type_align, sizetype_size))
> cookie_size = sizetype_size;
> else
> cookie_size = type_align;
> Index: gcc/tree-ssa-loop-ivcanon.c
> ===================================================================
> --- gcc/tree-ssa-loop-ivcanon.c 2013-10-27 14:11:53.006510519 +0000
> +++ gcc/tree-ssa-loop-ivcanon.c 2013-10-27 14:19:27.662578700 +0000
> @@ -546,7 +546,8 @@ remove_redundant_iv_tests (struct loop *
> || !integer_zerop (niter.may_be_zero)
> || !niter.niter
> || TREE_CODE (niter.niter) != INTEGER_CST
> - || !wi::ltu_p (loop->nb_iterations_upper_bound, niter.niter))
> + || !wi::ltu_p (loop->nb_iterations_upper_bound,
> + wi::to_widest (niter.niter)))
> continue;
>
> if (dump_file && (dump_flags & TDF_DETAILS))
> Index: gcc/tree-ssa-loop-niter.c
> ===================================================================
> --- gcc/tree-ssa-loop-niter.c 2013-10-27 14:11:53.006510519 +0000
> +++ gcc/tree-ssa-loop-niter.c 2013-10-27 14:19:27.663578709 +0000
> @@ -2520,7 +2520,7 @@ do_warn_aggressive_loop_optimizations (s
> || loop->warned_aggressive_loop_optimizations
> /* Only warn if undefined behavior gives us lower estimate than the
> known constant bound. */
> - || wi::cmpu (i_bound, loop->nb_iterations) >= 0
> + || wi::cmpu (i_bound, wi::to_widest (loop->nb_iterations)) >= 0
> /* And undefined behavior happens unconditionally. */
> || !dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (stmt)))
> return;
> Index: gcc/tree-ssa-uninit.c
> ===================================================================
> --- gcc/tree-ssa-uninit.c 2013-10-27 14:11:53.006510519 +0000
> +++ gcc/tree-ssa-uninit.c 2013-10-27 14:19:27.670578772 +0000
> @@ -848,12 +848,11 @@ is_value_included_in (tree val, tree bou
> if (cmpc == EQ_EXPR)
> result = tree_int_cst_equal (val, boundary);
> else if (cmpc == LT_EXPR)
> - result = INT_CST_LT_UNSIGNED (val, boundary);
> + result = INT_CST_LT (val, boundary);
> else
> {
> gcc_assert (cmpc == LE_EXPR);
> - result = (tree_int_cst_equal (val, boundary)
> - || INT_CST_LT_UNSIGNED (val, boundary));
> + result = INT_CST_LE (val, boundary);
> }
> }
> else
> Index: gcc/tree-ssa.c
> ===================================================================
> --- gcc/tree-ssa.c 2013-10-27 14:11:53.006510519 +0000
> +++ gcc/tree-ssa.c 2013-10-27 14:19:27.663578709 +0000
> @@ -1422,7 +1422,7 @@ non_rewritable_mem_ref_base (tree ref)
> && useless_type_conversion_p (TREE_TYPE (base),
> TREE_TYPE (TREE_TYPE (decl)))
> && wi::fits_uhwi_p (mem_ref_offset (base))
> - && wi::gtu_p (TYPE_SIZE_UNIT (TREE_TYPE (decl)),
> + && wi::gtu_p (wi::to_offset (TYPE_SIZE_UNIT (TREE_TYPE (decl))),
> mem_ref_offset (base))
> && multiple_of_p (sizetype, TREE_OPERAND (base, 1),
> TYPE_SIZE_UNIT (TREE_TYPE (base))))
> Index: gcc/tree-vrp.c
> ===================================================================
> --- gcc/tree-vrp.c 2013-10-27 14:11:53.006510519 +0000
> +++ gcc/tree-vrp.c 2013-10-27 14:19:27.671578781 +0000
> @@ -1127,15 +1127,7 @@ operand_less_p (tree val, tree val2)
> {
> /* LT is folded faster than GE and others. Inline the common case. */
> if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
> - {
> - if (TYPE_UNSIGNED (TREE_TYPE (val)))
> - return INT_CST_LT_UNSIGNED (val, val2);
> - else
> - {
> - if (INT_CST_LT (val, val2))
> - return 1;
> - }
> - }
> + return INT_CST_LT (val, val2);
> else
> {
> tree tcmp;
> Index: gcc/tree.c
> ===================================================================
> --- gcc/tree.c 2013-10-27 14:11:53.006510519 +0000
> +++ gcc/tree.c 2013-10-27 14:19:27.665578727 +0000
> @@ -6899,26 +6899,7 @@ tree_int_cst_equal (const_tree t1, const
> int
> tree_int_cst_lt (const_tree t1, const_tree t2)
> {
> - if (t1 == t2)
> - return 0;
> -
> - if (TYPE_UNSIGNED (TREE_TYPE (t1)) != TYPE_UNSIGNED (TREE_TYPE (t2)))
> - {
> - int t1_sgn = tree_int_cst_sgn (t1);
> - int t2_sgn = tree_int_cst_sgn (t2);
> -
> - if (t1_sgn < t2_sgn)
> - return 1;
> - else if (t1_sgn > t2_sgn)
> - return 0;
> - /* Otherwise, both are non-negative, so we compare them as
> - unsigned just in case one of them would overflow a signed
> - type. */
> - }
> - else if (!TYPE_UNSIGNED (TREE_TYPE (t1)))
> - return INT_CST_LT (t1, t2);
> -
> - return INT_CST_LT_UNSIGNED (t1, t2);
> + return INT_CST_LT (t1, t2);
> }
>
> /* Returns -1 if T1 < T2, 0 if T1 == T2, and 1 if T1 > T2. */
> @@ -6926,12 +6907,7 @@ tree_int_cst_lt (const_tree t1, const_tr
> int
> tree_int_cst_compare (const_tree t1, const_tree t2)
> {
> - if (tree_int_cst_lt (t1, t2))
> - return -1;
> - else if (tree_int_cst_lt (t2, t1))
> - return 1;
> - else
> - return 0;
> + return wi::cmps (wi::to_widest (t1), wi::to_widest (t2));
> }
>
> /* Return the HOST_WIDE_INT least significant bits of T, a sizetype
> @@ -8667,18 +8643,7 @@ int_fits_type_p (const_tree c, const_tre
> /* Check if c >= type_low_bound. */
> if (type_low_bound && TREE_CODE (type_low_bound) == INTEGER_CST)
> {
> - wd = type_low_bound;
> - if (sgn_c != TYPE_SIGN (TREE_TYPE (type_low_bound)))
> - {
> - int c_neg = (sgn_c == SIGNED && wi::neg_p (wc));
> - int t_neg = (sgn_c == UNSIGNED && wi::neg_p (wd));
> -
> - if (c_neg && !t_neg)
> - return false;
> - if ((c_neg || !t_neg) && wi::ltu_p (wc, wd))
> - return false;
> - }
> - else if (wi::lt_p (wc, wd, sgn_c))
> + if (INT_CST_LT (c, type_low_bound))
> return false;
> ok_for_low_bound = true;
> }
> @@ -8688,18 +8653,7 @@ int_fits_type_p (const_tree c, const_tre
> /* Check if c <= type_high_bound. */
> if (type_high_bound && TREE_CODE (type_high_bound) == INTEGER_CST)
> {
> - wd = type_high_bound;
> - if (sgn_c != TYPE_SIGN (TREE_TYPE (type_high_bound)))
> - {
> - int c_neg = (sgn_c == SIGNED && wi::neg_p (wc));
> - int t_neg = (sgn_c == UNSIGNED && wi::neg_p (wd));
> -
> - if (t_neg && !c_neg)
> - return false;
> - if ((t_neg || !c_neg) && wi::gtu_p (wc, wd))
> - return false;
> - }
> - else if (wi::gt_p (wc, wd, sgn_c))
> + if (INT_CST_LT (type_high_bound, c))
> return false;
> ok_for_high_bound = true;
> }
> Index: gcc/tree.h
> ===================================================================
> --- gcc/tree.h 2013-10-27 14:11:53.006510519 +0000
> +++ gcc/tree.h 2013-10-27 14:19:27.672578790 +0000
> @@ -877,18 +877,15 @@ #define TREE_LANG_FLAG_6(NODE) \
>
> /* Define additional fields and accessors for nodes representing constants. */
>
> -#define INT_CST_LT(A, B) \
> - (wi::lts_p (A, B))
> -
> -#define INT_CST_LT_UNSIGNED(A, B) \
> - (wi::ltu_p (A, B))
> -
> #define TREE_INT_CST_NUNITS(NODE) \
> (INTEGER_CST_CHECK (NODE)->base.u.int_length.unextended)
> #define TREE_INT_CST_EXT_NUNITS(NODE) \
> (INTEGER_CST_CHECK (NODE)->base.u.int_length.extended)
> #define TREE_INT_CST_ELT(NODE, I) TREE_INT_CST_ELT_CHECK (NODE, I)
>
> +#define INT_CST_LT(A, B) (wi::lts_p (wi::to_widest (A), wi::to_widest (B)))
> +#define INT_CST_LE(A, B) (wi::les_p (wi::to_widest (A), wi::to_widest (B)))
> +
> #define TREE_REAL_CST_PTR(NODE) (REAL_CST_CHECK (NODE)->real_cst.real_cst_ptr)
> #define TREE_REAL_CST(NODE) (*TREE_REAL_CST_PTR (NODE))
>
> Index: gcc/wide-int.cc
> ===================================================================
> --- gcc/wide-int.cc 2013-10-27 14:11:53.006510519 +0000
> +++ gcc/wide-int.cc 2013-10-27 14:19:27.660578682 +0000
> @@ -416,22 +416,19 @@ wi::eq_p_large (const HOST_WIDE_INT *op0
> /* Return true if OP0 < OP1 using signed comparisons. */
> bool
> wi::lts_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
> - unsigned int p0,
> - const HOST_WIDE_INT *op1, unsigned int op1len,
> - unsigned int p1)
> + unsigned int precision,
> + const HOST_WIDE_INT *op1, unsigned int op1len)
> {
> HOST_WIDE_INT s0, s1;
> unsigned HOST_WIDE_INT u0, u1;
> - unsigned int blocks_needed0 = BLOCKS_NEEDED (p0);
> - unsigned int blocks_needed1 = BLOCKS_NEEDED (p1);
> - unsigned int small_prec0 = p0 & (HOST_BITS_PER_WIDE_INT - 1);
> - unsigned int small_prec1 = p1 & (HOST_BITS_PER_WIDE_INT - 1);
> + unsigned int blocks_needed = BLOCKS_NEEDED (precision);
> + unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
> int l = MAX (op0len - 1, op1len - 1);
>
> /* Only the top block is compared as signed. The rest are unsigned
> comparisons. */
> - s0 = selt (op0, op0len, blocks_needed0, small_prec0, l, SIGNED);
> - s1 = selt (op1, op1len, blocks_needed1, small_prec1, l, SIGNED);
> + s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
> + s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
> if (s0 < s1)
> return true;
> if (s0 > s1)
> @@ -440,8 +437,8 @@ wi::lts_p_large (const HOST_WIDE_INT *op
> l--;
> while (l >= 0)
> {
> - u0 = selt (op0, op0len, blocks_needed0, small_prec0, l, SIGNED);
> - u1 = selt (op1, op1len, blocks_needed1, small_prec1, l, SIGNED);
> + u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
> + u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
>
> if (u0 < u1)
> return true;
> @@ -457,22 +454,19 @@ wi::lts_p_large (const HOST_WIDE_INT *op
> signed compares. */
> int
> wi::cmps_large (const HOST_WIDE_INT *op0, unsigned int op0len,
> - unsigned int p0,
> - const HOST_WIDE_INT *op1, unsigned int op1len,
> - unsigned int p1)
> + unsigned int precision,
> + const HOST_WIDE_INT *op1, unsigned int op1len)
> {
> HOST_WIDE_INT s0, s1;
> unsigned HOST_WIDE_INT u0, u1;
> - unsigned int blocks_needed0 = BLOCKS_NEEDED (p0);
> - unsigned int blocks_needed1 = BLOCKS_NEEDED (p1);
> - unsigned int small_prec0 = p0 & (HOST_BITS_PER_WIDE_INT - 1);
> - unsigned int small_prec1 = p1 & (HOST_BITS_PER_WIDE_INT - 1);
> + unsigned int blocks_needed = BLOCKS_NEEDED (precision);
> + unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
> int l = MAX (op0len - 1, op1len - 1);
>
> /* Only the top block is compared as signed. The rest are unsigned
> comparisons. */
> - s0 = selt (op0, op0len, blocks_needed0, small_prec0, l, SIGNED);
> - s1 = selt (op1, op1len, blocks_needed1, small_prec1, l, SIGNED);
> + s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
> + s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
> if (s0 < s1)
> return -1;
> if (s0 > s1)
> @@ -481,8 +475,8 @@ wi::cmps_large (const HOST_WIDE_INT *op0
> l--;
> while (l >= 0)
> {
> - u0 = selt (op0, op0len, blocks_needed0, small_prec0, l, SIGNED);
> - u1 = selt (op1, op1len, blocks_needed1, small_prec1, l, SIGNED);
> + u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
> + u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
>
> if (u0 < u1)
> return -1;
> @@ -496,21 +490,20 @@ wi::cmps_large (const HOST_WIDE_INT *op0
>
> /* Return true if OP0 < OP1 using unsigned comparisons. */
> bool
> -wi::ltu_p_large (const HOST_WIDE_INT *op0, unsigned int op0len, unsigned int p0,
> - const HOST_WIDE_INT *op1, unsigned int op1len, unsigned int p1)
> +wi::ltu_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
> + unsigned int precision,
> + const HOST_WIDE_INT *op1, unsigned int op1len)
> {
> unsigned HOST_WIDE_INT x0;
> unsigned HOST_WIDE_INT x1;
> - unsigned int blocks_needed0 = BLOCKS_NEEDED (p0);
> - unsigned int blocks_needed1 = BLOCKS_NEEDED (p1);
> - unsigned int small_prec0 = p0 & (HOST_BITS_PER_WIDE_INT - 1);
> - unsigned int small_prec1 = p1 & (HOST_BITS_PER_WIDE_INT - 1);
> + unsigned int blocks_needed = BLOCKS_NEEDED (precision);
> + unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
> int l = MAX (op0len - 1, op1len - 1);
>
> while (l >= 0)
> {
> - x0 = selt (op0, op0len, blocks_needed0, small_prec0, l, UNSIGNED);
> - x1 = selt (op1, op1len, blocks_needed1, small_prec1, l, UNSIGNED);
> + x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
> + x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
> if (x0 < x1)
> return true;
> if (x0 > x1)
> @@ -524,21 +517,20 @@ wi::ltu_p_large (const HOST_WIDE_INT *op
> /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
> unsigned compares. */
> int
> -wi::cmpu_large (const HOST_WIDE_INT *op0, unsigned int op0len, unsigned int p0,
> - const HOST_WIDE_INT *op1, unsigned int op1len, unsigned int p1)
> +wi::cmpu_large (const HOST_WIDE_INT *op0, unsigned int op0len,
> + unsigned int precision,
> + const HOST_WIDE_INT *op1, unsigned int op1len)
> {
> unsigned HOST_WIDE_INT x0;
> unsigned HOST_WIDE_INT x1;
> - unsigned int blocks_needed0 = BLOCKS_NEEDED (p0);
> - unsigned int blocks_needed1 = BLOCKS_NEEDED (p1);
> - unsigned int small_prec0 = p0 & (HOST_BITS_PER_WIDE_INT - 1);
> - unsigned int small_prec1 = p1 & (HOST_BITS_PER_WIDE_INT - 1);
> + unsigned int blocks_needed = BLOCKS_NEEDED (precision);
> + unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
> int l = MAX (op0len - 1, op1len - 1);
>
> while (l >= 0)
> {
> - x0 = selt (op0, op0len, blocks_needed0, small_prec0, l, UNSIGNED);
> - x1 = selt (op1, op1len, blocks_needed1, small_prec1, l, UNSIGNED);
> + x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
> + x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
> if (x0 < x1)
> return -1;
> if (x0 > x1)
> Index: gcc/wide-int.h
> ===================================================================
> --- gcc/wide-int.h 2013-10-27 14:19:26.641569564 +0000
> +++ gcc/wide-int.h 2013-10-27 14:19:27.660578682 +0000
> @@ -1355,13 +1355,13 @@ decompose (HOST_WIDE_INT *scratch, unsig
> bool eq_p_large (const HOST_WIDE_INT *, unsigned int,
> const HOST_WIDE_INT *, unsigned int, unsigned int);
> bool lts_p_large (const HOST_WIDE_INT *, unsigned int, unsigned int,
> - const HOST_WIDE_INT *, unsigned int, unsigned int);
> + const HOST_WIDE_INT *, unsigned int);
> bool ltu_p_large (const HOST_WIDE_INT *, unsigned int, unsigned int,
> - const HOST_WIDE_INT *, unsigned int, unsigned int);
> + const HOST_WIDE_INT *, unsigned int);
> int cmps_large (const HOST_WIDE_INT *, unsigned int, unsigned int,
> - const HOST_WIDE_INT *, unsigned int, unsigned int);
> + const HOST_WIDE_INT *, unsigned int);
> int cmpu_large (const HOST_WIDE_INT *, unsigned int, unsigned int,
> - const HOST_WIDE_INT *, unsigned int, unsigned int);
> + const HOST_WIDE_INT *, unsigned int);
> unsigned int sext_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
> unsigned int,
> unsigned int, unsigned int);
> @@ -1525,10 +1525,11 @@ wi::ne_p (const T1 &x, const T2 &y)
> inline bool
> wi::lts_p (const T1 &x, const T2 &y)
> {
> - WIDE_INT_REF_FOR (T1) xi (x);
> - WIDE_INT_REF_FOR (T2) yi (y);
> + unsigned int precision = get_binary_precision (x, y);
> + WIDE_INT_REF_FOR (T1) xi (x, precision);
> + WIDE_INT_REF_FOR (T2) yi (y, precision);
> // We optimize x < y, where y is 64 or fewer bits.
> - if (yi.precision <= HOST_BITS_PER_WIDE_INT)
> + if (wi::fits_shwi_p (yi))
> {
> // If x fits directly into a shwi, we can compare directly.
> if (wi::fits_shwi_p (xi))
> @@ -1541,8 +1542,7 @@ wi::lts_p (const T1 &x, const T2 &y)
> // and hence greater than y.
> return false;
> }
> - return lts_p_large (xi.val, xi.len, xi.precision, yi.val, yi.len,
> - yi.precision);
> + return lts_p_large (xi.val, xi.len, precision, yi.val, yi.len);
> }
>
> /* Return true if X < Y when both are treated as unsigned values. */
> @@ -1550,18 +1550,16 @@ wi::lts_p (const T1 &x, const T2 &y)
> inline bool
> wi::ltu_p (const T1 &x, const T2 &y)
> {
> - WIDE_INT_REF_FOR (T1) xi (x);
> - WIDE_INT_REF_FOR (T2) yi (y);
> - if (xi.precision <= HOST_BITS_PER_WIDE_INT
> - && yi.precision <= HOST_BITS_PER_WIDE_INT)
> + unsigned int precision = get_binary_precision (x, y);
> + WIDE_INT_REF_FOR (T1) xi (x, precision);
> + WIDE_INT_REF_FOR (T2) yi (y, precision);
> + if (precision <= HOST_BITS_PER_WIDE_INT)
> {
> unsigned HOST_WIDE_INT xl = xi.to_uhwi ();
> unsigned HOST_WIDE_INT yl = yi.to_uhwi ();
> return xl < yl;
> }
> - else
> - return ltu_p_large (xi.val, xi.len, xi.precision,
> - yi.val, yi.len, yi.precision);
> + return ltu_p_large (xi.val, xi.len, precision, yi.val, yi.len);
> }
>
> /* Return true if X < Y. Signedness of X and Y is indicated by SGN. */
> @@ -1662,10 +1660,10 @@ wi::ge_p (const T1 &x, const T2 &y, sign
> inline int
> wi::cmps (const T1 &x, const T2 &y)
> {
> - WIDE_INT_REF_FOR (T1) xi (x);
> - WIDE_INT_REF_FOR (T2) yi (y);
> - if (xi.precision <= HOST_BITS_PER_WIDE_INT
> - && yi.precision <= HOST_BITS_PER_WIDE_INT)
> + unsigned int precision = get_binary_precision (x, y);
> + WIDE_INT_REF_FOR (T1) xi (x, precision);
> + WIDE_INT_REF_FOR (T2) yi (y, precision);
> + if (precision <= HOST_BITS_PER_WIDE_INT)
> {
> HOST_WIDE_INT xl = xi.to_shwi ();
> HOST_WIDE_INT yl = yi.to_shwi ();
> @@ -1676,8 +1674,7 @@ wi::cmps (const T1 &x, const T2 &y)
> else
> return 0;
> }
> - return cmps_large (xi.val, xi.len, xi.precision, yi.val, yi.len,
> - yi.precision);
> + return cmps_large (xi.val, xi.len, precision, yi.val, yi.len);
> }
>
> /* Return -1 if X < Y, 0 if X == Y and 1 if X > Y. Treat both X and Y
> @@ -1686,10 +1683,10 @@ wi::cmps (const T1 &x, const T2 &y)
> inline int
> wi::cmpu (const T1 &x, const T2 &y)
> {
> - WIDE_INT_REF_FOR (T1) xi (x);
> - WIDE_INT_REF_FOR (T2) yi (y);
> - if (xi.precision <= HOST_BITS_PER_WIDE_INT
> - && yi.precision <= HOST_BITS_PER_WIDE_INT)
> + unsigned int precision = get_binary_precision (x, y);
> + WIDE_INT_REF_FOR (T1) xi (x, precision);
> + WIDE_INT_REF_FOR (T2) yi (y, precision);
> + if (precision <= HOST_BITS_PER_WIDE_INT)
> {
> unsigned HOST_WIDE_INT xl = xi.to_uhwi ();
> unsigned HOST_WIDE_INT yl = yi.to_uhwi ();
> @@ -1700,8 +1697,7 @@ wi::cmpu (const T1 &x, const T2 &y)
> else
> return 1;
> }
> - return cmpu_large (xi.val, xi.len, xi.precision, yi.val, yi.len,
> - yi.precision);
> + return cmpu_large (xi.val, xi.len, precision, yi.val, yi.len);
> }
>
> /* Return -1 if X < Y, 0 if X == Y and 1 if X > Y. Signedness of
>
>
--
Richard Biener <rguenther@suse.de>
SUSE / SUSE Labs
SUSE LINUX Products GmbH - Nuernberg - AG Nuernberg - HRB 16746
GF: Jeff Hawn, Jennifer Guild, Felix Imend
More information about the Gcc-patches
mailing list