This is the mail archive of the
gcc-patches@gcc.gnu.org
mailing list for the GCC project.
[wide-int] Add is_sign_extended optimisation
- From: Richard Sandiford <rdsandiford at googlemail dot com>
- To: zadeck at naturalbridge dot com, mikestump at comcast dot net, rguenther at suse dot de
- Cc: gcc-patches at gcc dot gnu dot org
- Date: Sun, 20 Oct 2013 11:57:05 +0100
- Subject: [wide-int] Add is_sign_extended optimisation
- Authentication-results: sourceware.org; auth=none
Another follow-up to yesterday's patch. This one implements Richard's
suggestion of having an is_sign_extended trait to optimise cases where
excess upper bits are known to be signs rather than undefined.
The uses so far are:
* make to_shwi () equivalent to slow ()
* turn eq_p into a simple loop
* avoid extensions in sign_mask ()
* avoid extensions in set_len if the input was already sign-extended
The first two are new (compared to wide-int svn) while the second two
partially undo the negative effects of yesterday's patch on is_sign_extended
values.
E.g.
bool
f (wide_int x, HOST_WIDE_INT y)
{
return x == y;
}
now gives:
xorl %eax, %eax
cmpl $1, 264(%rsp)
je .L27
ret
.p2align 4,,10
.p2align 3
.L27:
cmpq 8(%rsp), %rdi
sete %al
ret
And:
wide_int
f (wide_int x, wide_int y)
{
return x == y;
}
gives:
movl 264(%rsp), %ecx
xorl %eax, %eax
cmpl 528(%rsp), %ecx
je .L42
rep ret
.p2align 4,,10
.p2align 3
.L42:
xorl %eax, %eax
jmp .L38
.p2align 4,,10
.p2align 3
.L44:
addl $1, %eax
cmpl %eax, %ecx
je .L43
.L38:
movl %eax, %edx
movq 272(%rsp,%rdx,8), %rsi
cmpq %rsi, 8(%rsp,%rdx,8)
je .L44
xorl %eax, %eax
ret
.p2align 4,,10
.p2align 3
.L43:
movl $1, %eax
ret
(which is a bit poor -- "je .L42" trivially threads to "je. L38",
although that's probably only true after RA).
The code for:
bool
f (wide_int x, unsigned HOST_WIDE_INT y)
{
return x == y;
}
still needs some work though...
The lts_p sequences of wide_int are similar to the ones Mike posted.
Tested on x86_64-linux-gnu. OK for wide-int?
Thanks,
Richard
Index: gcc/rtl.h
===================================================================
--- gcc/rtl.h 2013-10-20 09:38:40.254493991 +0100
+++ gcc/rtl.h 2013-10-20 09:39:28.169894855 +0100
@@ -1410,6 +1410,7 @@ typedef std::pair <rtx, enum machine_mod
{
static const enum precision_type precision_type = VAR_PRECISION;
static const bool host_dependent_precision = false;
+ static const bool is_sign_extended = true;
static unsigned int get_precision (const rtx_mode_t &);
static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int,
const rtx_mode_t &);
Index: gcc/tree.h
===================================================================
--- gcc/tree.h 2013-10-20 09:38:40.254493991 +0100
+++ gcc/tree.h 2013-10-20 09:39:28.170894863 +0100
@@ -5158,6 +5158,7 @@ #define ANON_AGGRNAME_FORMAT "__anon_%d"
{
static const enum precision_type precision_type = FLEXIBLE_PRECISION;
static const bool host_dependent_precision = false;
+ static const bool is_sign_extended = false;
static unsigned int get_precision (const_tree);
static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int,
const_tree);
Index: gcc/wide-int.h
===================================================================
--- gcc/wide-int.h 2013-10-20 09:39:11.527755622 +0100
+++ gcc/wide-int.h 2013-10-20 09:45:17.725820291 +0100
@@ -335,8 +335,21 @@ #define WI_UNARY_RESULT_VAR(RESULT, VAL,
struct wide_int_storage;
typedef generic_wide_int <wide_int_storage> wide_int;
+template <bool SE>
struct wide_int_ref_storage;
-typedef generic_wide_int <wide_int_ref_storage> wide_int_ref;
+
+typedef generic_wide_int <wide_int_ref_storage <false> > wide_int_ref;
+
+/* This can be used instead of wide_int_ref if the referenced value is
+ known to have type T. It carries across properties of T's representation,
+ such as whether excess upper bits in a HWI are defined, and can therefore
+ help avoid redundant work.
+
+ The macro could be replaced with a template typedef, once we're able
+ to use those. */
+#define WIDE_INT_REF_FOR(T) \
+ generic_wide_int \
+ <wide_int_ref_storage <wi::int_traits <T>::is_sign_extended> >
/* Public functions for querying and operating on integers. */
namespace wi
@@ -520,18 +533,6 @@ wi::storage_ref::get_val () const
return val;
}
-namespace wi
-{
- template <>
- struct int_traits <wi::storage_ref>
- {
- static const enum precision_type precision_type = VAR_PRECISION;
- /* wi::storage_ref can be a reference to a primitive type,
- so this is the conservatively-correct setting. */
- static const bool host_dependent_precision = true;
- };
-}
-
/* This class defines an integer type using the storage provided by the
template argument. The storage class must provide the following
functions:
@@ -626,6 +627,9 @@ #define INCDEC_OPERATOR(OP, DELTA) \
#undef INCDEC_OPERATOR
char *dump (char *) const;
+
+ static const bool is_sign_extended
+ = wi::int_traits <generic_wide_int <storage> >::is_sign_extended;
};
template <typename storage>
@@ -653,7 +657,11 @@ inline generic_wide_int <storage>::gener
generic_wide_int <storage>::to_shwi (unsigned int precision) const
{
if (precision == 0)
- precision = this->get_precision ();
+ {
+ if (is_sign_extended)
+ return this->get_val ()[0];
+ precision = this->get_precision ();
+ }
if (precision < HOST_BITS_PER_WIDE_INT)
return sext_hwi (this->get_val ()[0], precision);
else
@@ -692,11 +700,14 @@ generic_wide_int <storage>::to_short_add
generic_wide_int <storage>::sign_mask () const
{
unsigned int len = this->get_len ();
- unsigned int precision = this->get_precision ();
unsigned HOST_WIDE_INT high = this->get_val ()[len - 1];
- int excess = len * HOST_BITS_PER_WIDE_INT - precision;
- if (excess > 0)
- high <<= excess;
+ if (!is_sign_extended)
+ {
+ unsigned int precision = this->get_precision ();
+ int excess = len * HOST_BITS_PER_WIDE_INT - precision;
+ if (excess > 0)
+ high <<= excess;
+ }
return HOST_WIDE_INT (high) < 0 ? -1 : 0;
}
@@ -781,6 +792,7 @@ decompose (HOST_WIDE_INT *, unsigned int
/* Provide the storage for a wide_int_ref. This acts like a read-only
wide_int, with the optimization that VAL is normally a pointer to
another integer's storage, so that no array copy is needed. */
+template <bool SE>
struct wide_int_ref_storage : public wi::storage_ref
{
private:
@@ -799,17 +811,19 @@ struct wide_int_ref_storage : public wi:
/* Create a reference to integer X in its natural precision. Note
that the natural precision is host-dependent for primitive
types. */
+template <bool SE>
template <typename T>
-inline wide_int_ref_storage::wide_int_ref_storage (const T &x)
+inline wide_int_ref_storage <SE>::wide_int_ref_storage (const T &x)
: storage_ref (wi::int_traits <T>::decompose (scratch,
wi::get_precision (x), x))
{
}
/* Create a reference to integer X in precision PRECISION. */
+template <bool SE>
template <typename T>
-inline wide_int_ref_storage::wide_int_ref_storage (const T &x,
- unsigned int precision)
+inline wide_int_ref_storage <SE>::wide_int_ref_storage (const T &x,
+ unsigned int precision)
: storage_ref (wi::int_traits <T>::decompose (scratch, precision, x))
{
}
@@ -817,9 +831,14 @@ inline wide_int_ref_storage::wide_int_re
namespace wi
{
template <>
- struct int_traits <wide_int_ref_storage>
- : public int_traits <wi::storage_ref>
+ template <bool SE>
+ struct int_traits <wide_int_ref_storage <SE> >
{
+ static const enum precision_type precision_type = VAR_PRECISION;
+ /* wi::storage_ref can be a reference to a primitive type,
+ so this is the conservatively-correct setting. */
+ static const bool host_dependent_precision = true;
+ static const bool is_sign_extended = SE;
};
}
@@ -850,7 +869,7 @@ class GTY(()) wide_int_storage
const HOST_WIDE_INT *get_val () const;
unsigned int get_len () const;
HOST_WIDE_INT *write_val ();
- void set_len (unsigned int);
+ void set_len (unsigned int, bool = false);
static wide_int from (const wide_int_ref &, unsigned int, signop);
static wide_int from_array (const HOST_WIDE_INT *, unsigned int,
@@ -871,7 +890,7 @@ inline wide_int_storage::wide_int_storag
inline wide_int_storage::wide_int_storage (const T &x)
{
STATIC_ASSERT (!wi::int_traits<T>::host_dependent_precision);
- wide_int_ref xi (x);
+ WIDE_INT_REF_FOR (T) xi (x);
precision = xi.precision;
wi::copy (*this, xi);
}
@@ -901,10 +920,10 @@ wide_int_storage::write_val ()
}
inline void
-wide_int_storage::set_len (unsigned int l)
+wide_int_storage::set_len (unsigned int l, bool is_sign_extended)
{
len = l;
- if (len * HOST_BITS_PER_WIDE_INT > precision)
+ if (!is_sign_extended && len * HOST_BITS_PER_WIDE_INT > precision)
val[len - 1] = sext_hwi (val[len - 1],
precision % HOST_BITS_PER_WIDE_INT);
}
@@ -951,6 +970,7 @@ wide_int_storage::create (unsigned int p
static const enum precision_type precision_type = VAR_PRECISION;
/* Guaranteed by a static assert in the wide_int_storage constructor. */
static const bool host_dependent_precision = false;
+ static const bool is_sign_extended = true;
template <typename T1, typename T2>
static wide_int get_binary_result (const T1 &, const T2 &);
};
@@ -981,7 +1001,7 @@ wi::copy (T1 &x, const T2 &y)
do
xval[i] = yval[i];
while (++i < len);
- x.set_len (len);
+ x.set_len (len, y.is_sign_extended);
}
/* An N-bit integer. Until we can use typedef templates, use this instead. */
@@ -1006,7 +1026,7 @@ class GTY(()) fixed_wide_int_storage
const HOST_WIDE_INT *get_val () const;
unsigned int get_len () const;
HOST_WIDE_INT *write_val ();
- void set_len (unsigned int);
+ void set_len (unsigned int, bool = false);
static FIXED_WIDE_INT (N) from (const wide_int_ref &, signop);
static FIXED_WIDE_INT (N) from_array (const HOST_WIDE_INT *, unsigned int,
@@ -1027,7 +1047,7 @@ inline fixed_wide_int_storage <N>::fixed
/* Check for type compatibility. We don't want to initialize a
fixed-width integer from something like a wide_int. */
WI_BINARY_RESULT (T, FIXED_WIDE_INT (N)) *assertion ATTRIBUTE_UNUSED;
- wi::copy (*this, wide_int_ref (x, N));
+ wi::copy (*this, WIDE_INT_REF_FOR (T) (x, N));
}
template <int N>
@@ -1060,7 +1080,7 @@ fixed_wide_int_storage <N>::write_val ()
template <int N>
inline void
-fixed_wide_int_storage <N>::set_len (unsigned int l)
+fixed_wide_int_storage <N>::set_len (unsigned int l, bool)
{
len = l;
/* There are no excess bits in val[len - 1]. */
@@ -1101,6 +1121,7 @@ fixed_wide_int_storage <N>::from_array (
{
static const enum precision_type precision_type = CONST_PRECISION;
static const bool host_dependent_precision = false;
+ static const bool is_sign_extended = true;
static const unsigned int precision = N;
template <typename T1, typename T2>
static FIXED_WIDE_INT (N) get_binary_result (const T1 &, const T2 &);
@@ -1190,6 +1211,7 @@ get_binary_result (const T1 &, const T2
{
static const enum precision_type precision_type = FLEXIBLE_PRECISION;
static const bool host_dependent_precision = true;
+ static const bool is_sign_extended = true;
static unsigned int get_precision (T);
static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int, T);
};
@@ -1316,6 +1338,7 @@ wi::two (unsigned int precision)
/* hwi_with_prec has an explicitly-given precision, rather than the
precision of HOST_WIDE_INT. */
static const bool host_dependent_precision = false;
+ static const bool is_sign_extended = true;
static unsigned int get_precision (const wi::hwi_with_prec &);
static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int,
const wi::hwi_with_prec &);
@@ -1427,7 +1450,7 @@ wi::get_binary_precision (const T1 &x, c
inline bool
wi::fits_shwi_p (const T &x)
{
- wide_int_ref xi (x);
+ WIDE_INT_REF_FOR (T) xi (x);
return xi.len == 1;
}
@@ -1437,7 +1460,7 @@ wi::fits_shwi_p (const T &x)
inline bool
wi::fits_uhwi_p (const T &x)
{
- wide_int_ref xi (x);
+ WIDE_INT_REF_FOR (T) xi (x);
if (xi.precision <= HOST_BITS_PER_WIDE_INT)
return true;
if (xi.len == 1)
@@ -1451,7 +1474,7 @@ wi::fits_uhwi_p (const T &x)
inline bool
wi::neg_p (const T &x, signop sgn)
{
- wide_int_ref xi (x);
+ WIDE_INT_REF_FOR (T) xi (x);
if (sgn == UNSIGNED)
return false;
return xi.sign_mask () < 0;
@@ -1463,7 +1486,7 @@ wi::neg_p (const T &x, signop sgn)
inline HOST_WIDE_INT
wi::sign_mask (const T &x)
{
- wide_int_ref xi (x);
+ WIDE_INT_REF_FOR (T) xi (x);
return xi.sign_mask ();
}
@@ -1473,10 +1496,19 @@ wi::sign_mask (const T &x)
wi::eq_p (const T1 &x, const T2 &y)
{
unsigned int precision = get_binary_precision (x, y);
- if (precision == 0)
- return true;
- wide_int_ref xi (x, precision);
- wide_int_ref yi (y, precision);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ if (xi.is_sign_extended && yi.is_sign_extended)
+ {
+ if (xi.len != yi.len)
+ return false;
+ unsigned int i = 0;
+ do
+ if (xi.val[i] != yi.val[i])
+ return false;
+ while (++i != xi.len);
+ return true;
+ }
if (precision <= HOST_BITS_PER_WIDE_INT)
{
unsigned HOST_WIDE_INT diff = xi.ulow () ^ yi.ulow ();
@@ -1502,8 +1534,8 @@ wi::lts_p (const T1 &x, const T2 &y)
// We have to be careful to not allow comparison to a large positive
// unsigned value like 0x8000000000000000, those would be encoded
// with a y.len == 2.
- wide_int_ref xi (x);
- wide_int_ref yi (y);
+ WIDE_INT_REF_FOR (T1) xi (x);
+ WIDE_INT_REF_FOR (T2) yi (y);
if (yi.precision <= HOST_BITS_PER_WIDE_INT
&& yi.len == 1)
{
@@ -1527,8 +1559,8 @@ wi::lts_p (const T1 &x, const T2 &y)
inline bool
wi::ltu_p (const T1 &x, const T2 &y)
{
- wide_int_ref xi (x);
- wide_int_ref yi (y);
+ WIDE_INT_REF_FOR (T1) xi (x);
+ WIDE_INT_REF_FOR (T2) yi (y);
if (xi.precision <= HOST_BITS_PER_WIDE_INT
&& yi.precision <= HOST_BITS_PER_WIDE_INT)
{
@@ -1639,8 +1671,8 @@ wi::ge_p (const T1 &x, const T2 &y, sign
inline int
wi::cmps (const T1 &x, const T2 &y)
{
- wide_int_ref xi (x);
- wide_int_ref yi (y);
+ WIDE_INT_REF_FOR (T1) xi (x);
+ WIDE_INT_REF_FOR (T2) yi (y);
if (xi.precision <= HOST_BITS_PER_WIDE_INT
&& yi.precision <= HOST_BITS_PER_WIDE_INT)
{
@@ -1663,8 +1695,8 @@ wi::cmps (const T1 &x, const T2 &y)
inline int
wi::cmpu (const T1 &x, const T2 &y)
{
- wide_int_ref xi (x);
- wide_int_ref yi (y);
+ WIDE_INT_REF_FOR (T1) xi (x);
+ WIDE_INT_REF_FOR (T2) yi (y);
if (xi.precision <= HOST_BITS_PER_WIDE_INT
&& yi.precision <= HOST_BITS_PER_WIDE_INT)
{
@@ -1699,7 +1731,7 @@ inline WI_UNARY_RESULT (T)
wi::bit_not (const T &x)
{
WI_UNARY_RESULT_VAR (result, val, T, x);
- wide_int_ref xi (x, get_precision (result));
+ WIDE_INT_REF_FOR (T) xi (x, get_precision (result));
for (unsigned int i = 0; i < xi.len; ++i)
val[i] = ~xi.val[i];
result.set_len (xi.len);
@@ -1738,12 +1770,12 @@ wi::sext (const T &x, unsigned int offse
{
WI_UNARY_RESULT_VAR (result, val, T, x);
unsigned int precision = get_precision (result);
- wide_int_ref xi (x, precision);
+ WIDE_INT_REF_FOR (T) xi (x, precision);
if (offset <= HOST_BITS_PER_WIDE_INT)
{
val[0] = sext_hwi (xi.ulow (), offset);
- result.set_len (1);
+ result.set_len (1, true);
}
else
result.set_len (sext_large (val, xi.val, xi.len, precision, offset));
@@ -1757,7 +1789,7 @@ wi::zext (const T &x, unsigned int offse
{
WI_UNARY_RESULT_VAR (result, val, T, x);
unsigned int precision = get_precision (result);
- wide_int_ref xi (x, precision);
+ WIDE_INT_REF_FOR (T) xi (x, precision);
/* This is not just an optimization, it is actually required to
maintain canonization. */
@@ -1793,7 +1825,7 @@ wi::set_bit (const T &x, unsigned int bi
{
WI_UNARY_RESULT_VAR (result, val, T, x);
unsigned int precision = get_precision (result);
- wide_int_ref xi (x, precision);
+ WIDE_INT_REF_FOR (T) xi (x, precision);
if (precision <= HOST_BITS_PER_WIDE_INT)
{
val[0] = xi.ulow () | ((unsigned HOST_WIDE_INT) 1 << bit);
@@ -1813,9 +1845,9 @@ wi::min (const T1 &x, const T2 &y, signo
WI_BINARY_RESULT_VAR (result, val ATTRIBUTE_UNUSED, T1, x, T2, y);
unsigned int precision = get_precision (result);
if (wi::le_p (x, y, sgn))
- wi::copy (result, wide_int_ref (x, precision));
+ wi::copy (result, WIDE_INT_REF_FOR (T1) (x, precision));
else
- wi::copy (result, wide_int_ref (y, precision));
+ wi::copy (result, WIDE_INT_REF_FOR (T2) (y, precision));
return result;
}
@@ -1844,9 +1876,9 @@ wi::max (const T1 &x, const T2 &y, signo
WI_BINARY_RESULT_VAR (result, val ATTRIBUTE_UNUSED, T1, x, T2, y);
unsigned int precision = get_precision (result);
if (wi::ge_p (x, y, sgn))
- wi::copy (result, wide_int_ref (x, precision));
+ wi::copy (result, WIDE_INT_REF_FOR (T1) (x, precision));
else
- wi::copy (result, wide_int_ref (y, precision));
+ wi::copy (result, WIDE_INT_REF_FOR (T2) (y, precision));
return result;
}
@@ -1873,16 +1905,17 @@ wi::bit_and (const T1 &x, const T2 &y)
{
WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
unsigned int precision = get_precision (result);
- wide_int_ref xi (x, precision);
- wide_int_ref yi (y, precision);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended;
if (xi.len + yi.len == 2)
{
val[0] = xi.ulow () & yi.ulow ();
- result.set_len (1);
+ result.set_len (1, is_sign_extended);
}
else
result.set_len (and_large (val, xi.val, xi.len, yi.val, yi.len,
- precision));
+ precision), is_sign_extended);
return result;
}
@@ -1893,16 +1926,17 @@ wi::bit_and_not (const T1 &x, const T2 &
{
WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
unsigned int precision = get_precision (result);
- wide_int_ref xi (x, precision);
- wide_int_ref yi (y, precision);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended;
if (xi.len + yi.len == 2)
{
val[0] = xi.ulow () & ~yi.ulow ();
- result.set_len (1);
+ result.set_len (1, is_sign_extended);
}
else
result.set_len (and_not_large (val, xi.val, xi.len, yi.val, yi.len,
- precision));
+ precision), is_sign_extended);
return result;
}
@@ -1913,16 +1947,17 @@ wi::bit_or (const T1 &x, const T2 &y)
{
WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
unsigned int precision = get_precision (result);
- wide_int_ref xi (x, precision);
- wide_int_ref yi (y, precision);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended;
if (xi.len + yi.len == 2)
{
val[0] = xi.ulow () | yi.ulow ();
- result.set_len (1);
+ result.set_len (1, is_sign_extended);
}
else
result.set_len (or_large (val, xi.val, xi.len,
- yi.val, yi.len, precision));
+ yi.val, yi.len, precision), is_sign_extended);
return result;
}
@@ -1933,16 +1968,17 @@ wi::bit_or_not (const T1 &x, const T2 &y
{
WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
unsigned int precision = get_precision (result);
- wide_int_ref xi (x, precision);
- wide_int_ref yi (y, precision);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended;
if (xi.len + yi.len == 2)
{
val[0] = xi.ulow () | ~yi.ulow ();
- result.set_len (1);
+ result.set_len (1, is_sign_extended);
}
else
result.set_len (or_not_large (val, xi.val, xi.len, yi.val, yi.len,
- precision));
+ precision), is_sign_extended);
return result;
}
@@ -1953,16 +1989,17 @@ wi::bit_xor (const T1 &x, const T2 &y)
{
WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
unsigned int precision = get_precision (result);
- wide_int_ref xi (x, precision);
- wide_int_ref yi (y, precision);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended;
if (xi.len + yi.len == 2)
{
val[0] = xi.ulow () ^ yi.ulow ();
- result.set_len (1);
+ result.set_len (1, is_sign_extended);
}
else
result.set_len (xor_large (val, xi.val, xi.len,
- yi.val, yi.len, precision));
+ yi.val, yi.len, precision), is_sign_extended);
return result;
}
@@ -1973,12 +2010,12 @@ wi::add (const T1 &x, const T2 &y)
{
WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
unsigned int precision = get_precision (result);
- wide_int_ref xi (x, precision);
- wide_int_ref yi (y, precision);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
if (precision <= HOST_BITS_PER_WIDE_INT)
{
val[0] = xi.ulow () + yi.ulow ();
- result.set_len (1);
+ result.set_len (1, xi.is_sign_extended && yi.is_sign_extended);
}
else
result.set_len (add_large (val, xi.val, xi.len,
@@ -1995,8 +2032,8 @@ wi::add (const T1 &x, const T2 &y, signo
{
WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
unsigned int precision = get_precision (result);
- wide_int_ref xi (x, precision);
- wide_int_ref yi (y, precision);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
if (precision <= HOST_BITS_PER_WIDE_INT)
{
unsigned HOST_WIDE_INT xl = xi.ulow ();
@@ -2027,8 +2064,8 @@ wi::sub (const T1 &x, const T2 &y)
{
WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
unsigned int precision = get_precision (result);
- wide_int_ref xi (x, precision);
- wide_int_ref yi (y, precision);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
if (precision <= HOST_BITS_PER_WIDE_INT)
{
val[0] = xi.ulow () - yi.ulow ();
@@ -2049,8 +2086,8 @@ wi::sub (const T1 &x, const T2 &y, signo
{
WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
unsigned int precision = get_precision (result);
- wide_int_ref xi (x, precision);
- wide_int_ref yi (y, precision);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
if (precision <= HOST_BITS_PER_WIDE_INT)
{
unsigned HOST_WIDE_INT xl = xi.ulow ();
@@ -2080,8 +2117,8 @@ wi::mul (const T1 &x, const T2 &y)
{
WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
unsigned int precision = get_precision (result);
- wide_int_ref xi (x, precision);
- wide_int_ref yi (y, precision);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
if (precision <= HOST_BITS_PER_WIDE_INT)
{
val[0] = xi.ulow () * yi.ulow ();
@@ -2101,8 +2138,8 @@ wi::mul (const T1 &x, const T2 &y, signo
{
WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
unsigned int precision = get_precision (result);
- wide_int_ref xi (x, precision);
- wide_int_ref yi (y, precision);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
result.set_len (mul_internal (val, xi.val, xi.len,
yi.val, yi.len, precision,
sgn, overflow, false, false));
@@ -2135,8 +2172,8 @@ wi::mul_high (const T1 &x, const T2 &y,
{
WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
unsigned int precision = get_precision (result);
- wide_int_ref xi (x, precision);
- wide_int_ref yi (y, precision);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
result.set_len (mul_internal (val, xi.val, xi.len,
yi.val, yi.len, precision,
sgn, 0, true, false));
@@ -2152,8 +2189,8 @@ wi::div_trunc (const T1 &x, const T2 &y,
{
WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
unsigned int precision = get_precision (quotient);
- wide_int_ref xi (x, precision);
- wide_int_ref yi (y);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y);
quotient.set_len (divmod_internal (quotient_val, 0, 0, xi.val, xi.len,
precision,
@@ -2188,8 +2225,8 @@ wi::div_floor (const T1 &x, const T2 &y,
WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
unsigned int precision = get_precision (quotient);
- wide_int_ref xi (x, precision);
- wide_int_ref yi (y);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y);
unsigned int remainder_len;
quotient.set_len (divmod_internal (quotient_val,
@@ -2230,8 +2267,8 @@ wi::div_ceil (const T1 &x, const T2 &y,
WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
unsigned int precision = get_precision (quotient);
- wide_int_ref xi (x, precision);
- wide_int_ref yi (y);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y);
unsigned int remainder_len;
quotient.set_len (divmod_internal (quotient_val,
@@ -2255,8 +2292,8 @@ wi::div_round (const T1 &x, const T2 &y,
WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
unsigned int precision = get_precision (quotient);
- wide_int_ref xi (x, precision);
- wide_int_ref yi (y);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y);
unsigned int remainder_len;
quotient.set_len (divmod_internal (quotient_val,
@@ -2299,8 +2336,8 @@ wi::divmod_trunc (const T1 &x, const T2
WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
unsigned int precision = get_precision (quotient);
- wide_int_ref xi (x, precision);
- wide_int_ref yi (y);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y);
unsigned int remainder_len;
quotient.set_len (divmod_internal (quotient_val,
@@ -2322,8 +2359,8 @@ wi::mod_trunc (const T1 &x, const T2 &y,
{
WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
unsigned int precision = get_precision (remainder);
- wide_int_ref xi (x, precision);
- wide_int_ref yi (y);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y);
unsigned int remainder_len;
divmod_internal (0, &remainder_len, remainder_val,
@@ -2362,8 +2399,8 @@ wi::mod_floor (const T1 &x, const T2 &y,
WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
unsigned int precision = get_precision (quotient);
- wide_int_ref xi (x, precision);
- wide_int_ref yi (y);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y);
unsigned int remainder_len;
quotient.set_len (divmod_internal (quotient_val,
@@ -2398,8 +2435,8 @@ wi::mod_ceil (const T1 &x, const T2 &y,
WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
unsigned int precision = get_precision (quotient);
- wide_int_ref xi (x, precision);
- wide_int_ref yi (y);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y);
unsigned int remainder_len;
quotient.set_len (divmod_internal (quotient_val,
@@ -2424,8 +2461,8 @@ wi::mod_round (const T1 &x, const T2 &y,
WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
unsigned int precision = get_precision (quotient);
- wide_int_ref xi (x, precision);
- wide_int_ref yi (y);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y);
unsigned int remainder_len;
quotient.set_len (divmod_internal (quotient_val,
@@ -2499,7 +2536,7 @@ wi::lshift (const T &x, const wide_int_r
{
WI_UNARY_RESULT_VAR (result, val, T, x);
unsigned int precision = get_precision (result);
- wide_int_ref xi (x, precision);
+ WIDE_INT_REF_FOR (T) xi (x, precision);
unsigned int shift = trunc_shift (y, bitsize, precision);
/* Handle the simple cases quickly. */
if (shift >= precision)
@@ -2527,7 +2564,7 @@ wi::lrshift (const T &x, const wide_int_
WI_UNARY_RESULT_VAR (result, val, T, x);
/* Do things in the precision of the input rather than the output,
since the result can be no larger than that. */
- wide_int_ref xi (x);
+ WIDE_INT_REF_FOR (T) xi (x);
unsigned int shift = trunc_shift (y, bitsize, xi.precision);
/* Handle the simple cases quickly. */
if (shift >= xi.precision)
@@ -2555,7 +2592,7 @@ wi::arshift (const T &x, const wide_int_
WI_UNARY_RESULT_VAR (result, val, T, x);
/* Do things in the precision of the input rather than the output,
since the result can be no larger than that. */
- wide_int_ref xi (x);
+ WIDE_INT_REF_FOR (T) xi (x);
unsigned int shift = trunc_shift (y, bitsize, xi.precision);
/* Handle the simple case quickly. */
if (shift >= xi.precision)
@@ -2566,7 +2603,7 @@ wi::arshift (const T &x, const wide_int_
else if (xi.precision <= HOST_BITS_PER_WIDE_INT)
{
val[0] = sext_hwi (xi.ulow () >> shift, xi.precision - shift);
- result.set_len (1);
+ result.set_len (1, true);
}
else
result.set_len (arshift_large (val, xi.val, xi.len, xi.precision,
@@ -2641,7 +2678,7 @@ wi::extract_uhwi (const T &x, unsigned i
unsigned precision = get_precision (x);
if (precision < bitpos + width)
precision = bitpos + width;
- wide_int_ref xi (x, precision);
+ WIDE_INT_REF_FOR (T) xi (x, precision);
/* Handle this rare case after the above, so that we assert about
bogus BITPOS values. */