[PATCH] aarch64: Use type-qualified builtins for U[R]HADD Neon intrinsics
Richard Sandiford
richard.sandiford@arm.com
Thu Nov 11 10:27:39 GMT 2021
Jonathan Wright <Jonathan.Wright@arm.com> writes:
> Hi,
>
> This patch declares unsigned type-qualified builtins and uses them to
> implement (rounding) halving-add Neon intrinsics. This removes the
> need for many casts in arm_neon.h.
>
> Bootstrapped and regression tested on aarch64-none-linux-gnu - no
> issues.
>
> Ok for master?
>
> Thanks,
> Jonathan
>
> ---
>
> gcc/ChangeLog:
>
> 2021-11-09 Jonathan Wright <jonathan.wright@arm.com>
>
> * config/aarch64/aarch64-simd-builtins.def: Use BINOPU type
> qualifiers in generator macros for u[r]hadd builtins.
> * config/aarch64/arm_neon.h (vhadd_s8): Remove unnecessary
> cast.
> (vhadd_s16): Likewise.
> (vhadd_s32): Likewise.
> (vhadd_u8): Use type-qualified builtin and remove casts.
> (vhadd_u16): Likewise.
> (vhadd_u32): Likewise.
> (vhaddq_s8): Remove unnecessary cast.
> (vhaddq_s16): Likewise.
> (vhaddq_s32): Likewise.
> (vhaddq_u8): Use type-qualified builtin and remove casts.
> (vhaddq_u16): Likewise.
> (vhaddq_u32): Likewise.
> (vrhadd_s8): Remove unnecessary cast.
> (vrhadd_s16): Likewise.
> (vrhadd_s32): Likewise.
> (vrhadd_u8): Use type-qualified builtin and remove casts.
> (vrhadd_u16): Likewise.
> (vrhadd_u32): Likewise.
> (vrhaddq_s8): Remove unnecessary cast.
> (vrhaddq_s16): Likewise.
> (vrhaddq_s32): Likewise.
> (vrhaddq_u8): Use type-wualified builtin and remove casts.
> (vrhaddq_u16): Likewise.
> (vrhaddq_u32): Likewise.
OK, thanks.
Richard
>
> diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def
> index be06a80cea379b8b78c798dbec47fb95eec68db1..8f9a8d1707dfdf6111d740da53275e79500e8cde 100644
> --- a/gcc/config/aarch64/aarch64-simd-builtins.def
> +++ b/gcc/config/aarch64/aarch64-simd-builtins.def
> @@ -178,10 +178,10 @@
> /* Implemented by aarch64_<sur>h<addsub><mode>. */
> BUILTIN_VDQ_BHSI (BINOP, shadd, 0, NONE)
> BUILTIN_VDQ_BHSI (BINOP, shsub, 0, NONE)
> - BUILTIN_VDQ_BHSI (BINOP, uhadd, 0, NONE)
> + BUILTIN_VDQ_BHSI (BINOPU, uhadd, 0, NONE)
> BUILTIN_VDQ_BHSI (BINOP, uhsub, 0, NONE)
> BUILTIN_VDQ_BHSI (BINOP, srhadd, 0, NONE)
> - BUILTIN_VDQ_BHSI (BINOP, urhadd, 0, NONE)
> + BUILTIN_VDQ_BHSI (BINOPU, urhadd, 0, NONE)
>
> /* Implemented by aarch64_<su>addlp<mode>. */
> BUILTIN_VDQV_L (UNOP, saddlp, 0, NONE)
> diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
> index 58b3dddb2c4ebf856de0e9cf0399e42d322beff9..73eea7c261f49155d616a2ddf1d96d4be9bca53f 100644
> --- a/gcc/config/aarch64/arm_neon.h
> +++ b/gcc/config/aarch64/arm_neon.h
> @@ -545,180 +545,168 @@ __extension__ extern __inline int8x8_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> vhadd_s8 (int8x8_t __a, int8x8_t __b)
> {
> - return (int8x8_t) __builtin_aarch64_shaddv8qi (__a, __b);
> + return __builtin_aarch64_shaddv8qi (__a, __b);
> }
>
> __extension__ extern __inline int16x4_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> vhadd_s16 (int16x4_t __a, int16x4_t __b)
> {
> - return (int16x4_t) __builtin_aarch64_shaddv4hi (__a, __b);
> + return __builtin_aarch64_shaddv4hi (__a, __b);
> }
>
> __extension__ extern __inline int32x2_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> vhadd_s32 (int32x2_t __a, int32x2_t __b)
> {
> - return (int32x2_t) __builtin_aarch64_shaddv2si (__a, __b);
> + return __builtin_aarch64_shaddv2si (__a, __b);
> }
>
> __extension__ extern __inline uint8x8_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> vhadd_u8 (uint8x8_t __a, uint8x8_t __b)
> {
> - return (uint8x8_t) __builtin_aarch64_uhaddv8qi ((int8x8_t) __a,
> - (int8x8_t) __b);
> + return __builtin_aarch64_uhaddv8qi_uuu (__a, __b);
> }
>
> __extension__ extern __inline uint16x4_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> vhadd_u16 (uint16x4_t __a, uint16x4_t __b)
> {
> - return (uint16x4_t) __builtin_aarch64_uhaddv4hi ((int16x4_t) __a,
> - (int16x4_t) __b);
> + return __builtin_aarch64_uhaddv4hi_uuu (__a, __b);
> }
>
> __extension__ extern __inline uint32x2_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> vhadd_u32 (uint32x2_t __a, uint32x2_t __b)
> {
> - return (uint32x2_t) __builtin_aarch64_uhaddv2si ((int32x2_t) __a,
> - (int32x2_t) __b);
> + return __builtin_aarch64_uhaddv2si_uuu (__a, __b);
> }
>
> __extension__ extern __inline int8x16_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> vhaddq_s8 (int8x16_t __a, int8x16_t __b)
> {
> - return (int8x16_t) __builtin_aarch64_shaddv16qi (__a, __b);
> + return __builtin_aarch64_shaddv16qi (__a, __b);
> }
>
> __extension__ extern __inline int16x8_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> vhaddq_s16 (int16x8_t __a, int16x8_t __b)
> {
> - return (int16x8_t) __builtin_aarch64_shaddv8hi (__a, __b);
> + return __builtin_aarch64_shaddv8hi (__a, __b);
> }
>
> __extension__ extern __inline int32x4_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> vhaddq_s32 (int32x4_t __a, int32x4_t __b)
> {
> - return (int32x4_t) __builtin_aarch64_shaddv4si (__a, __b);
> + return __builtin_aarch64_shaddv4si (__a, __b);
> }
>
> __extension__ extern __inline uint8x16_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> vhaddq_u8 (uint8x16_t __a, uint8x16_t __b)
> {
> - return (uint8x16_t) __builtin_aarch64_uhaddv16qi ((int8x16_t) __a,
> - (int8x16_t) __b);
> + return __builtin_aarch64_uhaddv16qi_uuu (__a, __b);
> }
>
> __extension__ extern __inline uint16x8_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> vhaddq_u16 (uint16x8_t __a, uint16x8_t __b)
> {
> - return (uint16x8_t) __builtin_aarch64_uhaddv8hi ((int16x8_t) __a,
> - (int16x8_t) __b);
> + return __builtin_aarch64_uhaddv8hi_uuu (__a, __b);
> }
>
> __extension__ extern __inline uint32x4_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> vhaddq_u32 (uint32x4_t __a, uint32x4_t __b)
> {
> - return (uint32x4_t) __builtin_aarch64_uhaddv4si ((int32x4_t) __a,
> - (int32x4_t) __b);
> + return __builtin_aarch64_uhaddv4si_uuu (__a, __b);
> }
>
> __extension__ extern __inline int8x8_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> vrhadd_s8 (int8x8_t __a, int8x8_t __b)
> {
> - return (int8x8_t) __builtin_aarch64_srhaddv8qi (__a, __b);
> + return __builtin_aarch64_srhaddv8qi (__a, __b);
> }
>
> __extension__ extern __inline int16x4_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> vrhadd_s16 (int16x4_t __a, int16x4_t __b)
> {
> - return (int16x4_t) __builtin_aarch64_srhaddv4hi (__a, __b);
> + return __builtin_aarch64_srhaddv4hi (__a, __b);
> }
>
> __extension__ extern __inline int32x2_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> vrhadd_s32 (int32x2_t __a, int32x2_t __b)
> {
> - return (int32x2_t) __builtin_aarch64_srhaddv2si (__a, __b);
> + return __builtin_aarch64_srhaddv2si (__a, __b);
> }
>
> __extension__ extern __inline uint8x8_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> vrhadd_u8 (uint8x8_t __a, uint8x8_t __b)
> {
> - return (uint8x8_t) __builtin_aarch64_urhaddv8qi ((int8x8_t) __a,
> - (int8x8_t) __b);
> + return __builtin_aarch64_urhaddv8qi_uuu (__a, __b);
> }
>
> __extension__ extern __inline uint16x4_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> vrhadd_u16 (uint16x4_t __a, uint16x4_t __b)
> {
> - return (uint16x4_t) __builtin_aarch64_urhaddv4hi ((int16x4_t) __a,
> - (int16x4_t) __b);
> + return __builtin_aarch64_urhaddv4hi_uuu (__a, __b);
> }
>
> __extension__ extern __inline uint32x2_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> vrhadd_u32 (uint32x2_t __a, uint32x2_t __b)
> {
> - return (uint32x2_t) __builtin_aarch64_urhaddv2si ((int32x2_t) __a,
> - (int32x2_t) __b);
> + return __builtin_aarch64_urhaddv2si_uuu (__a, __b);
> }
>
> __extension__ extern __inline int8x16_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> vrhaddq_s8 (int8x16_t __a, int8x16_t __b)
> {
> - return (int8x16_t) __builtin_aarch64_srhaddv16qi (__a, __b);
> + return __builtin_aarch64_srhaddv16qi (__a, __b);
> }
>
> __extension__ extern __inline int16x8_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> vrhaddq_s16 (int16x8_t __a, int16x8_t __b)
> {
> - return (int16x8_t) __builtin_aarch64_srhaddv8hi (__a, __b);
> + return __builtin_aarch64_srhaddv8hi (__a, __b);
> }
>
> __extension__ extern __inline int32x4_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> vrhaddq_s32 (int32x4_t __a, int32x4_t __b)
> {
> - return (int32x4_t) __builtin_aarch64_srhaddv4si (__a, __b);
> + return __builtin_aarch64_srhaddv4si (__a, __b);
> }
>
> __extension__ extern __inline uint8x16_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> vrhaddq_u8 (uint8x16_t __a, uint8x16_t __b)
> {
> - return (uint8x16_t) __builtin_aarch64_urhaddv16qi ((int8x16_t) __a,
> - (int8x16_t) __b);
> + return __builtin_aarch64_urhaddv16qi_uuu (__a, __b);
> }
>
> __extension__ extern __inline uint16x8_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> vrhaddq_u16 (uint16x8_t __a, uint16x8_t __b)
> {
> - return (uint16x8_t) __builtin_aarch64_urhaddv8hi ((int16x8_t) __a,
> - (int16x8_t) __b);
> + return __builtin_aarch64_urhaddv8hi_uuu (__a, __b);
> }
>
> __extension__ extern __inline uint32x4_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> vrhaddq_u32 (uint32x4_t __a, uint32x4_t __b)
> {
> - return (uint32x4_t) __builtin_aarch64_urhaddv4si ((int32x4_t) __a,
> - (int32x4_t) __b);
> + return __builtin_aarch64_urhaddv4si_uuu (__a, __b);
> }
>
> __extension__ extern __inline int8x8_t
More information about the Gcc-patches
mailing list