This is the mail archive of the gcc-patches@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

Re: Optimize sreal::normalize


On Tue, Dec 16, 2014 at 9:37 PM, Jan Hubicka <hubicka@ucw.cz> wrote:
> Hi,
> I have optimized inliner to take advantage that the heap is now in sreal and
> found that using small numbers leads to excessive time (7% of WPA) spent by
> sreal::normalize.  This is because the normalization is implemented by loops
> that are unnecesary and can account considerable time when tripped too often.
> This patch implements it via floor_log2 and brings normalize inline.  The
> motivation is to make the constructors from compile time constants to be
> optimized into constant write.
>
> I broke normalize into normalize_up/normalize_down and the hot path to futher
> aid inlininig (w/o profile inliner will most of time inline everything together
> but still it is better than one big function with profile feedback at least.)
>
> Bootstrapped/regtested x86_64-linux, OK?

Ok.

Thanks,
Richard.

> Honza
>
>         * sreal.h (sreal::normalize): Implement inline.
>         (sreal::normalize_up): New function.
>         (sreal::normalize_down): New function.
> Index: sreal.h
> ===================================================================
> --- sreal.h     (revision 218765)
> +++ sreal.h     (working copy)
> @@ -116,7 +116,9 @@ public:
>    }
>
>  private:
> -  void normalize ();
> +  inline void normalize ();
> +  inline void normalize_up ();
> +  inline void normalize_down ();
>    void shift_right (int amount);
>    static sreal signedless_plus (const sreal &a, const sreal &b, bool negative);
>    static sreal signedless_minus (const sreal &a, const sreal &b, bool negative);
> @@ -178,4 +180,85 @@ inline sreal operator>> (const sreal &a,
>    return a.shift (-exp);
>  }
>
> +/* Make significant to be >= SREAL_MIN_SIG.
> +
> +   Make this separate method so inliner can handle hot path better.  */
> +
> +inline void
> +sreal::normalize_up ()
> +{
> +  int64_t s = m_sig < 0 ? -1 : 1;
> +  unsigned HOST_WIDE_INT sig = absu_hwi (m_sig);
> +  int shift = SREAL_PART_BITS - 2 - floor_log2 (sig);
> +
> +  gcc_checking_assert (shift > 0);
> +  sig <<= shift;
> +  m_exp -= shift;
> +  gcc_checking_assert (sig <= SREAL_MAX_SIG && sig >= SREAL_MIN_SIG);
> +
> +  /* Check underflow.  */
> +  if (m_exp < -SREAL_MAX_EXP)
> +    {
> +      m_exp = -SREAL_MAX_EXP;
> +      sig = 0;
> +    }
> +  if (s == -1)
> +    m_sig = -sig;
> +  else
> +    m_sig = sig;
> +}
> +
> +/* Make significant to be <= SREAL_MAX_SIG.
> +
> +   Make this separate method so inliner can handle hot path better.  */
> +
> +inline void
> +sreal::normalize_down ()
> +{
> +  int64_t s = m_sig < 0 ? -1 : 1;
> +  int last_bit;
> +  unsigned HOST_WIDE_INT sig = absu_hwi (m_sig);
> +  int shift = floor_log2 (sig) - SREAL_PART_BITS + 2;
> +
> +  gcc_checking_assert (shift > 0);
> +  last_bit = (sig >> (shift-1)) & 1;
> +  sig >>= shift;
> +  m_exp += shift;
> +  gcc_checking_assert (sig <= SREAL_MAX_SIG && sig >= SREAL_MIN_SIG);
> +
> +  /* Round the number.  */
> +  sig += last_bit;
> +  if (sig > SREAL_MAX_SIG)
> +    {
> +      sig >>= 1;
> +      m_exp++;
> +    }
> +
> +  /* Check overflow.  */
> +  if (m_exp > SREAL_MAX_EXP)
> +    {
> +      m_exp = SREAL_MAX_EXP;
> +      sig = SREAL_MAX_SIG;
> +    }
> +  if (s == -1)
> +    m_sig = -sig;
> +  else
> +    m_sig = sig;
> +}
> +
> +/* Normalize *this; the hot path.  */
> +
> +inline void
> +sreal::normalize ()
> +{
> +  unsigned HOST_WIDE_INT sig = absu_hwi (m_sig);
> +
> +  if (sig == 0)
> +    m_exp = -SREAL_MAX_EXP;
> +  else if (sig > SREAL_MAX_SIG)
> +    normalize_down ();
> +  else if (sig < SREAL_MIN_SIG)
> +    normalize_up ();
> +}
> +
>  #endif
> Index: sreal.c
> ===================================================================
> --- sreal.c     (revision 218765)
> +++ sreal.c     (working copy)
> @@ -96,64 +96,6 @@ sreal::shift_right (int s)
>    m_sig >>= s;
>  }
>
> -/* Normalize *this.  */
> -
> -void
> -sreal::normalize ()
> -{
> -  int64_t s = m_sig < 0 ? -1 : 1;
> -  unsigned HOST_WIDE_INT sig = absu_hwi (m_sig);
> -
> -  if (sig == 0)
> -    {
> -      m_exp = -SREAL_MAX_EXP;
> -    }
> -  else if (sig < SREAL_MIN_SIG)
> -    {
> -      do
> -       {
> -         sig <<= 1;
> -         m_exp--;
> -       }
> -      while (sig < SREAL_MIN_SIG);
> -
> -      /* Check underflow.  */
> -      if (m_exp < -SREAL_MAX_EXP)
> -       {
> -         m_exp = -SREAL_MAX_EXP;
> -         sig = 0;
> -       }
> -    }
> -  else if (sig > SREAL_MAX_SIG)
> -    {
> -      int last_bit;
> -      do
> -       {
> -         last_bit = sig & 1;
> -         sig >>= 1;
> -         m_exp++;
> -       }
> -      while (sig > SREAL_MAX_SIG);
> -
> -      /* Round the number.  */
> -      sig += last_bit;
> -      if (sig > SREAL_MAX_SIG)
> -       {
> -         sig >>= 1;
> -         m_exp++;
> -       }
> -
> -      /* Check overflow.  */
> -      if (m_exp > SREAL_MAX_EXP)
> -       {
> -         m_exp = SREAL_MAX_EXP;
> -         sig = SREAL_MAX_SIG;
> -       }
> -    }
> -
> -  m_sig = s * sig;
> -}
> -
>  /* Return integer value of *this.  */
>
>  int64_t


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]