[PATCH 6/6] rs6000: Guard some x86 intrinsics implementations

Bill Schmidt wschmidt@linux.ibm.com
Wed Aug 18 19:27:26 GMT 2021


Hi Paul,

On 8/9/21 3:23 PM, Paul A. Clarke via Gcc-patches wrote:
> Some compatibility implementations of x86 intrinsics include
> Power intrinsics which require POWER8.  Guard them.
>
> emmintrin.h:
> - _mm_cmpord_pd: Remove code which was ostensibly for pre-POWER8,
>    but which indeed depended on POWER8 (vec_cmpgt(v2du)/vcmpgtud).
>    The "POWER8" version works fine on pre-POWER8.
> - _mm_mul_epu32: vec_mule(v4su) uses vmuleuw.
> pmmintrin.h:
> - _mm_movehdup_ps: vec_mergeo(v4su) uses vmrgow.
> - _mm_moveldup_ps: vec_mergee(v4su) uses vmrgew.
> smmintrin.h:
> - _mm_cmpeq_epi64: vec_cmpeq(v2di) uses vcmpequd.
> - _mm_mul_epi32: vec_mule(v4si) uses vmuluwm.
> - _mm_cmpgt_epi64: vec_cmpgt(v2di) uses vcmpgtsd.
> tmmintrin.h:
> - _mm_sign_epi8: vec_neg(v4si) uses vsububm.
> - _mm_sign_epi16: vec_neg(v4si) uses vsubuhm.
> - _mm_sign_epi32: vec_neg(v4si) uses vsubuwm.
>    Note that the above three could actually be supported pre-POWER8,
>    but current GCC does not support them before POWER8.
> - _mm_sign_pi8: depends on _mm_sign_epi8.
> - _mm_sign_pi16: depends on _mm_sign_epi16.
> - _mm_sign_pi32: depends on _mm_sign_epi32.


Testing, backports, etc.

>
> 2021-08-09  Paul A. Clarke  <pc@us.ibm.com>
>
> gcc


I think you mentioned there's a PR open for this.

> 	* config/rs6000/emmintrin.h: Guard POWER8 intrinsics.
> 	* config/rs6000/pmmintrin.h: Same.
> 	* config/rs6000/smmintrin.h: Same.
> 	* config/rs6000/tmmintrin.h: Same.
> ---
>   gcc/config/rs6000/emmintrin.h | 12 ++----------
>   gcc/config/rs6000/pmmintrin.h |  2 ++
>   gcc/config/rs6000/smmintrin.h |  4 ++++
>   gcc/config/rs6000/tmmintrin.h |  2 ++
>   4 files changed, 10 insertions(+), 10 deletions(-)
>
> diff --git a/gcc/config/rs6000/emmintrin.h b/gcc/config/rs6000/emmintrin.h
> index ce1287edf782..32ad72b4cc35 100644
> --- a/gcc/config/rs6000/emmintrin.h
> +++ b/gcc/config/rs6000/emmintrin.h
> @@ -430,20 +430,10 @@ _mm_cmpnge_pd (__m128d __A, __m128d __B)
>   extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
>   _mm_cmpord_pd (__m128d __A, __m128d __B)
>   {
> -#if _ARCH_PWR8
>     __v2du c, d;
>     /* Compare against self will return false (0's) if NAN.  */
>     c = (__v2du)vec_cmpeq (__A, __A);
>     d = (__v2du)vec_cmpeq (__B, __B);
> -#else
> -  __v2du a, b;
> -  __v2du c, d;
> -  const __v2du double_exp_mask  = {0x7ff0000000000000, 0x7ff0000000000000};
> -  a = (__v2du)vec_abs ((__v2df)__A);
> -  b = (__v2du)vec_abs ((__v2df)__B);
> -  c = (__v2du)vec_cmpgt (double_exp_mask, a);
> -  d = (__v2du)vec_cmpgt (double_exp_mask, b);
> -#endif
>     /* A != NAN and B != NAN.  */
>     return ((__m128d)vec_and(c, d));
>   }
> @@ -1472,6 +1462,7 @@ _mm_mul_su32 (__m64 __A, __m64 __B)
>     return ((__m64)a * (__m64)b);
>   }
>
> +#ifdef _ARCH_PWR8
>   extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
>   _mm_mul_epu32 (__m128i __A, __m128i __B)
>   {
> @@ -1498,6 +1489,7 @@ _mm_mul_epu32 (__m128i __A, __m128i __B)
>     return (__m128i) vec_mule ((__v4su)__A, (__v4su)__B);
>   #endif
>   }
> +#endif
>
>   extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
>   _mm_slli_epi16 (__m128i __A, int __B)
> diff --git a/gcc/config/rs6000/pmmintrin.h b/gcc/config/rs6000/pmmintrin.h
> index eab712fdfa66..d5da1a6daa34 100644
> --- a/gcc/config/rs6000/pmmintrin.h
> +++ b/gcc/config/rs6000/pmmintrin.h
> @@ -123,6 +123,7 @@ _mm_hsub_pd (__m128d __X, __m128d __Y)
>   			    vec_mergel ((__v2df) __X, (__v2df)__Y));
>   }
>
> +#ifdef _ARCH_PWR8
>   extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
>   _mm_movehdup_ps (__m128 __X)
>   {
> @@ -134,6 +135,7 @@ _mm_moveldup_ps (__m128 __X)
>   {
>     return (__m128)vec_mergee ((__v4su)__X, (__v4su)__X);
>   }
> +#endif


Up to you and the maintainers, but I would have a slight preference for 
putting the #ifdef/#endif around each individual function, rather than 
groups of functions.  It's more maintainable that way.

Otherwise LGTM.

Thanks!
Bill

>
>   extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
>   _mm_loaddup_pd (double const *__P)
> diff --git a/gcc/config/rs6000/smmintrin.h b/gcc/config/rs6000/smmintrin.h
> index 8d6ae98c7ce3..d2ba5f11de2e 100644
> --- a/gcc/config/rs6000/smmintrin.h
> +++ b/gcc/config/rs6000/smmintrin.h
> @@ -278,6 +278,7 @@ _mm_extract_ps (__m128 __X, const int __N)
>     return ((__v4si)__X)[__N & 3];
>   }
>
> +#ifdef _ARCH_PWR8
>   extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
>   _mm_blend_epi16 (__m128i __A, __m128i __B, const int __imm8)
>   {
> @@ -289,6 +290,7 @@ _mm_blend_epi16 (__m128i __A, __m128i __B, const int __imm8)
>     #endif
>     return (__m128i) vec_sel ((__v8hu) __A, (__v8hu) __B, __shortmask);
>   }
> +#endif
>
>   extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
>   _mm_blendv_epi8 (__m128i __A, __m128i __B, __m128i __mask)
> @@ -349,6 +351,7 @@ _mm_blend_pd (__m128d __A, __m128d __B, const int __imm8)
>     return (__m128d) __r;
>   }
>
> +#ifdef _ARCH_PWR8
>   __inline __m128d
>   __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
>   _mm_blendv_pd (__m128d __A, __m128d __B, __m128d __mask)
> @@ -357,6 +360,7 @@ _mm_blendv_pd (__m128d __A, __m128d __B, __m128d __mask)
>     const __vector __bool long long __boolmask = vec_cmplt ((__v2di) __mask, __zero);
>     return (__m128d) vec_sel ((__v2du) __A, (__v2du) __B, (__v2du) __boolmask);
>   }
> +#endif
>
>   __inline int
>   __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
> diff --git a/gcc/config/rs6000/tmmintrin.h b/gcc/config/rs6000/tmmintrin.h
> index 971511260b78..4120cc1ea551 100644
> --- a/gcc/config/rs6000/tmmintrin.h
> +++ b/gcc/config/rs6000/tmmintrin.h
> @@ -350,6 +350,7 @@ _mm_shuffle_pi8 (__m64 __A, __m64 __B)
>     return (__m64) ((__v2du) (__C))[0];
>   }
>
> +#ifdef _ARCH_PWR8
>   extern __inline __m128i
>   __attribute__((__gnu_inline__, __always_inline__, __artificial__))
>   _mm_sign_epi8 (__m128i __A, __m128i __B)
> @@ -418,6 +419,7 @@ _mm_sign_pi32 (__m64 __A, __m64 __B)
>     __C = (__v4si) _mm_sign_epi32 ((__m128i) __C, (__m128i) __D);
>     return (__m64) ((__v2du) (__C))[0];
>   }
> +#endif
>
>   extern __inline __m128i
>   __attribute__((__gnu_inline__, __always_inline__, __artificial__))


More information about the Gcc-patches mailing list