This is the mail archive of the gcc-patches@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

Re: [wide-int] Handle zero-precision INTEGER_CSTs again


Kenneth Zadeck <zadeck@naturalbridge.com> writes:
> Then with a fixed comment, this patch is fine.

OK, here's what I committed.

Richard


Index: gcc/wide-int.cc
===================================================================
--- gcc/wide-int.cc	2014-05-03 07:59:36.274750108 +0100
+++ gcc/wide-int.cc	2014-05-04 20:48:44.939829293 +0100
@@ -48,8 +48,6 @@ static const HOST_WIDE_INT zeros[WIDE_IN
 #define HALF_INT_MASK (((HOST_WIDE_INT) 1 << HOST_BITS_PER_HALF_WIDE_INT) - 1)
 
 #define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT)
-#define BLOCKS_NEEDED(PREC) \
-  (PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1)
 #define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0)
 
 /* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
@@ -69,20 +67,20 @@ safe_uhwi (const HOST_WIDE_INT *val, uns
 static unsigned int
 canonize (HOST_WIDE_INT *val, unsigned int len, unsigned int precision)
 {
-  unsigned int blocks_needed = BLOCKS_NEEDED (precision);
+  unsigned int blocks_needed = wi::blocks_needed (precision);
   HOST_WIDE_INT top;
   int i;
 
   if (len > blocks_needed)
     len = blocks_needed;
 
+  if (wi::excess_bits (len, precision) > 0)
+    val[len - 1] = sext_hwi (val[len - 1], precision % HOST_BITS_PER_WIDE_INT);
   if (len == 1)
     return len;
 
   top = val[len - 1];
-  if (len * HOST_BITS_PER_WIDE_INT > precision)
-    val[len - 1] = top = sext_hwi (top, precision % HOST_BITS_PER_WIDE_INT);
-  if (top != 0 && top != (HOST_WIDE_INT)-1)
+  if (top != 0 && top != (HOST_WIDE_INT) -1)
     return len;
 
   /* At this point we know that the top is either 0 or -1.  Find the
@@ -134,7 +132,7 @@ wi::from_buffer (const unsigned char *bu
 
   /* We have to clear all the bits ourself, as we merely or in values
      below.  */
-  unsigned int len = BLOCKS_NEEDED (precision);
+  unsigned int len = wi::blocks_needed (precision);
   HOST_WIDE_INT *val = result.write_val ();
   for (unsigned int i = 0; i < len; ++i)
     val[i] = 0;
@@ -180,7 +178,7 @@ wi::to_mpz (const wide_int_ref &x, mpz_t
 {
   int len = x.get_len ();
   const HOST_WIDE_INT *v = x.get_val ();
-  int excess = len * HOST_BITS_PER_WIDE_INT - x.get_precision ();
+  unsigned int excess = wi::excess_bits (len, x.get_precision ());
 
   if (wi::neg_p (x, sgn))
     {
@@ -306,7 +304,8 @@ wi::force_to_size (HOST_WIDE_INT *val, c
 		   unsigned int xlen, unsigned int xprecision,
 		   unsigned int precision, signop sgn)
 {
-  unsigned int blocks_needed = BLOCKS_NEEDED (precision);
+  unsigned int blocks_needed = wi::blocks_needed (precision);
+  unsigned int xblocks_needed = wi::blocks_needed (xprecision);
   unsigned int len = blocks_needed < xlen ? blocks_needed : xlen;
   for (unsigned i = 0; i < len; i++)
     val[i] = xval[i];
@@ -318,11 +317,11 @@ wi::force_to_size (HOST_WIDE_INT *val, c
       /* Expanding.  */
       if (sgn == UNSIGNED)
 	{
-	  if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
+	  if (small_xprecision && len == xblocks_needed)
 	    val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
 	  else if (val[len - 1] < 0)
 	    {
-	      while (len < BLOCKS_NEEDED (xprecision))
+	      while (len < xblocks_needed)
 		val[len++] = -1;
 	      if (small_xprecision)
 		val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
@@ -332,7 +331,7 @@ wi::force_to_size (HOST_WIDE_INT *val, c
 	}
       else
 	{
-	  if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
+	  if (small_xprecision && len == xblocks_needed)
 	    val[len - 1] = sext_hwi (val[len - 1], small_xprecision);
 	}
     }
@@ -372,10 +371,8 @@ selt (const HOST_WIDE_INT *a, unsigned i
 static inline HOST_WIDE_INT
 top_bit_of (const HOST_WIDE_INT *a, unsigned int len, unsigned int prec)
 {
-  int excess = len * HOST_BITS_PER_WIDE_INT - prec;
   unsigned HOST_WIDE_INT val = a[len - 1];
-  if (excess > 0)
-    val <<= excess;
+  val <<= wi::excess_bits (len, prec);
   return val >> (HOST_BITS_PER_WIDE_INT - 1);
 }
 
@@ -391,28 +388,16 @@ wi::eq_p_large (const HOST_WIDE_INT *op0
 		const HOST_WIDE_INT *op1, unsigned int op1len,
 		unsigned int prec)
 {
-  int l0 = op0len - 1;
-  unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
-
   if (op0len != op1len)
     return false;
 
-  if (op0len == BLOCKS_NEEDED (prec) && small_prec)
-    {
-      /* It does not matter if we zext or sext here, we just have to
-	 do both the same way.  */
-      if (zext_hwi (op0 [l0], small_prec) != zext_hwi (op1 [l0], small_prec))
-	return false;
-      l0--;
-    }
-
-  while (l0 >= 0)
-    if (op0[l0] != op1[l0])
+  for (unsigned int i = 0; i < op0len - 1; i++)
+    if (op0[i] != op1[i])
       return false;
-    else
-      l0--;
 
-  return true;
+  unsigned HOST_WIDE_INT top0 = op0[op0len - 1];
+  unsigned HOST_WIDE_INT top1 = op1[op1len - 1];
+  return ((top0 ^ top1) << wi::excess_bits (op0len, prec)) == 0;
 }
 
 /* Return true if OP0 < OP1 using signed comparisons.  */
@@ -423,7 +408,7 @@ wi::lts_p_large (const HOST_WIDE_INT *op
 {
   HOST_WIDE_INT s0, s1;
   unsigned HOST_WIDE_INT u0, u1;
-  unsigned int blocks_needed = BLOCKS_NEEDED (precision);
+  unsigned int blocks_needed = wi::blocks_needed (precision);
   unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
   int l = MAX (op0len - 1, op1len - 1);
 
@@ -461,7 +446,7 @@ wi::cmps_large (const HOST_WIDE_INT *op0
 {
   HOST_WIDE_INT s0, s1;
   unsigned HOST_WIDE_INT u0, u1;
-  unsigned int blocks_needed = BLOCKS_NEEDED (precision);
+  unsigned int blocks_needed = wi::blocks_needed (precision);
   unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
   int l = MAX (op0len - 1, op1len - 1);
 
@@ -498,7 +483,7 @@ wi::ltu_p_large (const HOST_WIDE_INT *op
 {
   unsigned HOST_WIDE_INT x0;
   unsigned HOST_WIDE_INT x1;
-  unsigned int blocks_needed = BLOCKS_NEEDED (precision);
+  unsigned int blocks_needed = wi::blocks_needed (precision);
   unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
   int l = MAX (op0len - 1, op1len - 1);
 
@@ -525,7 +510,7 @@ wi::cmpu_large (const HOST_WIDE_INT *op0
 {
   unsigned HOST_WIDE_INT x0;
   unsigned HOST_WIDE_INT x1;
-  unsigned int blocks_needed = BLOCKS_NEEDED (precision);
+  unsigned int blocks_needed = wi::blocks_needed (precision);
   unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
   int l = MAX (op0len - 1, op1len - 1);
 
@@ -673,7 +658,7 @@ wide_int_storage::bswap () const
 {
   wide_int result = wide_int::create (precision);
   unsigned int i, s;
-  unsigned int len = BLOCKS_NEEDED (precision);
+  unsigned int len = wi::blocks_needed (precision);
   unsigned int xlen = get_len ();
   const HOST_WIDE_INT *xval = get_val ();
   HOST_WIDE_INT *val = result.write_val ();
@@ -1148,7 +1133,7 @@ wi_unpack (unsigned HOST_HALF_WIDE_INT *
   unsigned int i;
   unsigned int j = 0;
   unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
-  unsigned int blocks_needed = BLOCKS_NEEDED (prec);
+  unsigned int blocks_needed = wi::blocks_needed (prec);
   HOST_WIDE_INT mask;
 
   if (sgn == SIGNED)
@@ -1225,7 +1210,7 @@ wi::mul_internal (HOST_WIDE_INT *val, co
   unsigned HOST_WIDE_INT o0, o1, k, t;
   unsigned int i;
   unsigned int j;
-  unsigned int blocks_needed = BLOCKS_NEEDED (prec);
+  unsigned int blocks_needed = wi::blocks_needed (prec);
   unsigned int half_blocks_needed = blocks_needed * 2;
   /* The sizes here are scaled to support a 2x largest mode by 2x
      largest mode yielding a 4x largest mode result.  This is what is
@@ -1427,6 +1412,9 @@ wi::popcount (const wide_int_ref &x)
   unsigned int i;
   int count;
 
+  if (x.precision == 0)
+    return 0;
+
   /* The high order block is special if it is the last block and the
      precision is not an even multiple of HOST_BITS_PER_WIDE_INT.  We
      have to clear out any ones above the precision before doing
@@ -1647,8 +1635,8 @@ wi::divmod_internal (HOST_WIDE_INT *quot
 		     unsigned int divisor_prec, signop sgn,
 		     bool *oflow)
 {
-  unsigned int dividend_blocks_needed = 2 * BLOCKS_NEEDED (dividend_prec);
-  unsigned int divisor_blocks_needed = 2 * BLOCKS_NEEDED (divisor_prec);
+  unsigned int dividend_blocks_needed = 2 * wi::blocks_needed (dividend_prec);
+  unsigned int divisor_blocks_needed = 2 * wi::blocks_needed (divisor_prec);
   unsigned HOST_HALF_WIDE_INT
     b_quotient[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
   unsigned HOST_HALF_WIDE_INT
@@ -1673,7 +1661,7 @@ wi::divmod_internal (HOST_WIDE_INT *quot
   /* The smallest signed number / -1 causes overflow.  The dividend_len
      check is for speed rather than correctness.  */
   if (sgn == SIGNED
-      && dividend_len == BLOCKS_NEEDED (dividend_prec)
+      && dividend_len == wi::blocks_needed (dividend_prec)
       && divisor == -1
       && wi::only_sign_bit_p (dividend))
     overflow = true;
@@ -1831,7 +1819,7 @@ wi::lshift_large (HOST_WIDE_INT *val, co
   unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
 
   /* The whole-block shift fills with zeros.  */
-  unsigned int len = BLOCKS_NEEDED (precision);
+  unsigned int len = wi::blocks_needed (precision);
   for (unsigned int i = 0; i < skip; ++i)
     val[i] = 0;
 
@@ -1869,7 +1857,7 @@ rshift_large_common (HOST_WIDE_INT *val,
 
   /* Work out how many blocks are needed to store the significant bits
      (excluding the upper zeros or signs).  */
-  unsigned int len = BLOCKS_NEEDED (xprecision - shift);
+  unsigned int len = wi::blocks_needed (xprecision - shift);
 
   /* It's easier to handle the simple block case specially.  */
   if (small_shift == 0)
@@ -1942,6 +1930,9 @@ wi::arshift_large (HOST_WIDE_INT *val, c
 int
 wi::clz (const wide_int_ref &x)
 {
+  if (x.precision == 0)
+    return 0;
+
   /* Calculate how many bits there above the highest represented block.  */
   int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
 
@@ -1966,6 +1957,9 @@ wi::clz (const wide_int_ref &x)
 int
 wi::clrsb (const wide_int_ref &x)
 {
+  if (x.precision == 0)
+    return 0;
+
   /* Calculate how many bits there above the highest represented block.  */
   int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
 
@@ -2010,6 +2004,9 @@ wi::ctz (const wide_int_ref &x)
 int
 wi::exact_log2 (const wide_int_ref &x)
 {
+  if (x.precision == 0)
+    return -1;
+
   /* Reject cases where there are implicit -1 blocks above HIGH.  */
   if (x.len * HOST_BITS_PER_WIDE_INT < x.precision && x.sign_mask () < 0)
     return -1;
Index: gcc/wide-int.h
===================================================================
--- gcc/wide-int.h	2014-05-03 07:58:04.005175780 +0100
+++ gcc/wide-int.h	2014-05-04 20:53:28.131085933 +0100
@@ -127,13 +127,16 @@ #define WIDE_INT_H
    Since most integers used in a compiler are small values, it is
    generally profitable to use a representation of the value that is
    as small as possible.  LEN is used to indicate the number of
-   elements of the vector that are in use.  The numbers are stored as
-   sign extended numbers as a means of compression.  Leading
-   HOST_WIDE_INTs that contain strings of either -1 or 0 are removed
-   as long as they can be reconstructed from the top bit that is being
-   represented.
+   elements of the vector that are in use, which is always at least one.
+   The numbers are stored as sign extended numbers as a means of
+   compression.  Leading HOST_WIDE_INTs that contain strings of either
+   -1 or 0 are removed as long as they can be reconstructed from the top
+   bit that is being represented.
+
+   wide_ints can have a precision of 0.  This is needed to handle things
+   like void_zero_node in the C and C++ front ends.  In this case the
+   stored value is always 0 and the length is always 1.
 
-   The precision and length of a wide_int are always greater than 0.
    Any bits in a wide_int above the precision are sign-extended from the
    most significant bit.  For example, a 4-bit value 0x8 is represented as
    VAL = { 0xf...fff8 }.  However, as an optimization, we allow other integer
@@ -430,6 +433,9 @@ #define WIDE_INT_REF_FOR(T) \
 /* Public functions for querying and operating on integers.  */
 namespace wi
 {
+  unsigned int excess_bits (unsigned int, unsigned int);
+  unsigned int blocks_needed (unsigned int);
+
   template <typename T>
   unsigned int get_precision (const T &);
 
@@ -740,7 +746,7 @@ inline generic_wide_int <storage>::gener
 inline HOST_WIDE_INT
 generic_wide_int <storage>::to_shwi (unsigned int precision) const
 {
-  if (precision < HOST_BITS_PER_WIDE_INT)
+  if (precision > 0 && precision < HOST_BITS_PER_WIDE_INT)
     return sext_hwi (this->get_val ()[0], precision);
   else
     return this->get_val ()[0];
@@ -764,7 +770,7 @@ generic_wide_int <storage>::to_shwi () c
 inline unsigned HOST_WIDE_INT
 generic_wide_int <storage>::to_uhwi (unsigned int precision) const
 {
-  if (precision < HOST_BITS_PER_WIDE_INT)
+  if (precision > 0 && precision < HOST_BITS_PER_WIDE_INT)
     return zext_hwi (this->get_val ()[0], precision);
   else
     return this->get_val ()[0];
@@ -797,12 +803,7 @@ generic_wide_int <storage>::sign_mask ()
   unsigned int len = this->get_len ();
   unsigned HOST_WIDE_INT high = this->get_val ()[len - 1];
   if (!is_sign_extended)
-    {
-      unsigned int precision = this->get_precision ();
-      int excess = len * HOST_BITS_PER_WIDE_INT - precision;
-      if (excess > 0)
-	high <<= excess;
-    }
+    high <<= wi::excess_bits (len, this->get_precision ());
   return (HOST_WIDE_INT) (high) < 0 ? -1 : 0;
 }
 
@@ -1068,7 +1069,7 @@ wide_int_storage::write_val ()
 wide_int_storage::set_len (unsigned int l, bool is_sign_extended)
 {
   len = l;
-  if (!is_sign_extended && len * HOST_BITS_PER_WIDE_INT > precision)
+  if (!is_sign_extended && wi::excess_bits (len, precision) > 0)
     val[len - 1] = sext_hwi (val[len - 1],
 			     precision % HOST_BITS_PER_WIDE_INT);
 }
@@ -1347,7 +1348,7 @@ trailing_wide_int_storage::write_val ()
 trailing_wide_int_storage::set_len (unsigned int len, bool is_sign_extended)
 {
   *m_len = len;
-  if (!is_sign_extended && len * HOST_BITS_PER_WIDE_INT > m_precision)
+  if (!is_sign_extended && wi::excess_bits (len, m_precision) > 0)
     m_val[len - 1] = sext_hwi (m_val[len - 1],
 			       m_precision % HOST_BITS_PER_WIDE_INT);
 }
@@ -1368,8 +1369,7 @@ trailing_wide_int_storage::operator = (c
 trailing_wide_ints <N>::set_precision (unsigned int precision)
 {
   m_precision = precision;
-  m_max_len = ((precision + HOST_BITS_PER_WIDE_INT - 1)
-	       / HOST_BITS_PER_WIDE_INT);
+  m_max_len = wi::blocks_needed (precision);
 }
 
 /* Return a reference to element INDEX.  */
@@ -1387,9 +1387,7 @@ trailing_wide_ints <N>::operator [] (uns
 inline size_t
 trailing_wide_ints <N>::extra_size (unsigned int precision)
 {
-  unsigned int max_len = ((precision + HOST_BITS_PER_WIDE_INT - 1)
-			  / HOST_BITS_PER_WIDE_INT);
-  return (N * max_len - 1) * sizeof (HOST_WIDE_INT);
+  return (N * wi::blocks_needed (precision) - 1) * sizeof (HOST_WIDE_INT);
 }
 
 /* This macro is used in structures that end with a trailing_wide_ints field
@@ -1621,6 +1619,26 @@ decompose (HOST_WIDE_INT *scratch, unsig
 				signop, bool *);
 }
 
+/* If a value of length LEN blocks has more than PRECISION bits, return
+   the number of excess bits, otherwise return 0.  For the special case
+   of PRECISION being zero, the single HWI must have the value zero and
+   there are no excess bits.  Handling zero precision this way means
+   that the result is always a valid shift amount.  */
+inline unsigned int
+wi::excess_bits (unsigned int len, unsigned int precision)
+{
+  unsigned int excess = len * HOST_BITS_PER_WIDE_INT - precision;
+  return excess < HOST_BITS_PER_WIDE_INT ? excess : 0;
+}
+
+/* Return the number of blocks needed for precision PRECISION.  */
+inline unsigned int
+wi::blocks_needed (unsigned int precision)
+{
+  return precision == 0 ? 1 : ((precision + HOST_BITS_PER_WIDE_INT - 1)
+			       / HOST_BITS_PER_WIDE_INT);
+}
+
 /* Return the number of bits that integer X can hold.  */
 template <typename T>
 inline unsigned int
@@ -1729,9 +1747,7 @@ wi::eq_p (const T1 &x, const T2 &y)
 	return xi.val[0] == 0;
       /* Otherwise flush out any excess bits first.  */
       unsigned HOST_WIDE_INT diff = xi.val[0] ^ yi.val[0];
-      int excess = HOST_BITS_PER_WIDE_INT - precision;
-      if (excess > 0)
-	diff <<= excess;
+      diff <<= wi::excess_bits (1, precision);
       return diff == 0;
     }
   return eq_p_large (xi.val, xi.len, yi.val, yi.len, precision);
@@ -2323,7 +2339,9 @@ wi::add (const T1 &x, const T2 &y, signo
       unsigned HOST_WIDE_INT xl = xi.ulow ();
       unsigned HOST_WIDE_INT yl = yi.ulow ();
       unsigned HOST_WIDE_INT resultl = xl + yl;
-      if (sgn == SIGNED)
+      if (precision == 0)
+	*overflow = false;
+      else if (sgn == SIGNED)
 	*overflow = (((resultl ^ xl) & (resultl ^ yl))
 		     >> (precision - 1)) & 1;
       else
@@ -2396,7 +2414,9 @@ wi::sub (const T1 &x, const T2 &y, signo
       unsigned HOST_WIDE_INT xl = xi.ulow ();
       unsigned HOST_WIDE_INT yl = yi.ulow ();
       unsigned HOST_WIDE_INT resultl = xl - yl;
-      if (sgn == SIGNED)
+      if (precision == 0)
+	*overflow = false;
+      else if (sgn == SIGNED)
 	*overflow = (((xl ^ yl) & (resultl ^ xl)) >> (precision - 1)) & 1;
       else
 	*overflow = ((resultl << (HOST_BITS_PER_WIDE_INT - precision))


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]