[PATCH 12/20] aarch64: Use RTL builtins for FP ml[as][q]_lane intrinsics
Jonathan Wright
Jonathan.Wright@arm.com
Fri Apr 30 14:51:07 GMT 2021
Patch updated as per suggestion (similar to patch 10/20.)
Tested and bootstrapped on aarch64-none-linux-gnu - no issues.
Ok for master?
Thanks,
Jonathan
________________________________
From: Richard Sandiford <richard.sandiford@arm.com>
Sent: 28 April 2021 16:37
To: Jonathan Wright via Gcc-patches <gcc-patches@gcc.gnu.org>
Cc: Jonathan Wright <Jonathan.Wright@arm.com>
Subject: Re: [PATCH 12/20] aarch64: Use RTL builtins for FP ml[as][q]_lane intrinsics
Jonathan Wright via Gcc-patches <gcc-patches@gcc.gnu.org> writes:
> Hi,
>
> As subject, this patch rewrites the floating-point vml[as][q]_lane Neon
> intrinsics to use RTL builtins rather than relying on the GCC vector
> extensions. Using RTL builtins allows control over the emission of
> fmla/fmls instructions (which we don't want here.)
>
> With this commit, the code generated by these intrinsics changes from
> a fused multiply-add/subtract instruction to an fmul followed by an
> fadd/fsub instruction. If the programmer really wants fmla/fmls
> instructions, they can use the vfm[as] intrinsics.
>
> Regression tested and bootstrapped on aarch64-none-linux-gnu - no
> issues.
>
> Ok for master?
>
> Thanks,
> Jonathan
>
> ---
>
> gcc/ChangeLog:
>
> 2021-02-16 Jonathan Wright <jonathan.wright@arm.com>
>
> * config/aarch64/aarch64-simd-builtins.def: Add
> float_ml[as]_lane builtin generator macros.
> * config/aarch64/aarch64-simd.md (mul_lane<mode>3): Define.
> (aarch64_float_mla_lane<mode>): Define.
> (aarch64_float_mls_lane<mode>): Define.
> * config/aarch64/arm_neon.h (vmla_lane_f32): Use RTL builtin
> instead of GCC vector extensions.
> (vmlaq_lane_f32): Likewise.
> (vmls_lane_f32): Likewise.
> (vmlsq_lane_f32): Likewise.
>
> diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def
> index 55a5682baeb13041053ef9e6eaa831182ea8b10c..b702493e1351478272bb7d26991a5673943d61ec 100644
> --- a/gcc/config/aarch64/aarch64-simd-builtins.def
> +++ b/gcc/config/aarch64/aarch64-simd-builtins.def
> @@ -668,6 +668,8 @@
> BUILTIN_VDQF_DF (TERNOP, float_mls, 0, FP)
> BUILTIN_VDQSF (TERNOP, float_mla_n, 0, FP)
> BUILTIN_VDQSF (TERNOP, float_mls_n, 0, FP)
> + BUILTIN_VDQSF (QUADOP_LANE, float_mla_lane, 0, FP)
> + BUILTIN_VDQSF (QUADOP_LANE, float_mls_lane, 0, FP)
>
> /* Implemented by aarch64_simd_bsl<mode>. */
> BUILTIN_VDQQH (BSL_P, simd_bsl, 0, NONE)
> diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
> index 95363d7b5ad11f775aa03f24bbcb0b66d20abb7c..abc8b1708b86bcee2e5082cc4659a197c5821985 100644
> --- a/gcc/config/aarch64/aarch64-simd.md
> +++ b/gcc/config/aarch64/aarch64-simd.md
> @@ -2625,6 +2625,22 @@
> [(set_attr "type" "neon_fp_mul_<stype><q>")]
> )
>
> +(define_insn "mul_lane<mode>3"
> + [(set (match_operand:VDQSF 0 "register_operand" "=w")
> + (mult:VDQSF
> + (vec_duplicate:VDQSF
> + (vec_select:<VEL>
> + (match_operand:V2SF 2 "register_operand" "w")
> + (parallel [(match_operand:SI 3 "immediate_operand" "i")])))
> + (match_operand:VDQSF 1 "register_operand" "w")))]
> + "TARGET_SIMD"
> + {
> + operands[3] = aarch64_endian_lane_rtx (V2SFmode, INTVAL (operands[3]));
> + return "fmul\\t%0.<Vtype>, %1.<Vtype>, %2.<Vetype>[%3]";
> + }
> + [(set_attr "type" "neon_fp_mul_s_scalar<q>")]
> +)
> +
Similarly to the 10/20 patch (IIRC), we can instead reuse:
(define_insn "*aarch64_mul3_elt<mode>"
[(set (match_operand:VMUL 0 "register_operand" "=w")
(mult:VMUL
(vec_duplicate:VMUL
(vec_select:<VEL>
(match_operand:VMUL 1 "register_operand" "<h_con>")
(parallel [(match_operand:SI 2 "immediate_operand")])))
(match_operand:VMUL 3 "register_operand" "w")))]
"TARGET_SIMD"
{
operands[2] = aarch64_endian_lane_rtx (<MODE>mode, INTVAL (operands[2]));
return "<f>mul\\t%0.<Vtype>, %3.<Vtype>, %1.<Vetype>[%2]";
}
[(set_attr "type" "neon<fp>_mul_<stype>_scalar<q>")]
)
Thanks,
Richard
> (define_expand "div<mode>3"
> [(set (match_operand:VHSDF 0 "register_operand")
> (div:VHSDF (match_operand:VHSDF 1 "register_operand")
> @@ -2728,6 +2744,46 @@
> }
> )
>
> +(define_expand "aarch64_float_mla_lane<mode>"
> + [(set (match_operand:VDQSF 0 "register_operand")
> + (plus:VDQSF
> + (mult:VDQSF
> + (vec_duplicate:VDQSF
> + (vec_select:<VEL>
> + (match_operand:V2SF 3 "register_operand")
> + (parallel [(match_operand:SI 4 "immediate_operand")])))
> + (match_operand:VDQSF 2 "register_operand"))
> + (match_operand:VDQSF 1 "register_operand")))]
> + "TARGET_SIMD"
> + {
> + rtx scratch = gen_reg_rtx (<MODE>mode);
> + emit_insn (gen_mul_lane<mode>3 (scratch, operands[2],
> + operands[3], operands[4]));
> + emit_insn (gen_add<mode>3 (operands[0], operands[1], scratch));
> + DONE;
> + }
> +)
> +
> +(define_expand "aarch64_float_mls_lane<mode>"
> + [(set (match_operand:VDQSF 0 "register_operand")
> + (minus:VDQSF
> + (match_operand:VDQSF 1 "register_operand")
> + (mult:VDQSF
> + (vec_duplicate:VDQSF
> + (vec_select:<VEL>
> + (match_operand:V2SF 3 "register_operand")
> + (parallel [(match_operand:SI 4 "immediate_operand")])))
> + (match_operand:VDQSF 2 "register_operand"))))]
> + "TARGET_SIMD"
> + {
> + rtx scratch = gen_reg_rtx (<MODE>mode);
> + emit_insn (gen_mul_lane<mode>3 (scratch, operands[2],
> + operands[3], operands[4]));
> + emit_insn (gen_sub<mode>3 (operands[0], operands[1], scratch));
> + DONE;
> + }
> +)
> +
> (define_insn "fma<mode>4"
> [(set (match_operand:VHSDF 0 "register_operand" "=w")
> (fma:VHSDF (match_operand:VHSDF 1 "register_operand" "w")
> diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
> index d4ed47249e3e39f8c88274657c809293e20bec9d..082409fe523cee6ae78f02574762b92d47885c42 100644
> --- a/gcc/config/aarch64/arm_neon.h
> +++ b/gcc/config/aarch64/arm_neon.h
> @@ -20393,7 +20393,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> vmla_lane_f32 (float32x2_t __a, float32x2_t __b,
> float32x2_t __c, const int __lane)
> {
> - return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
> + return __builtin_aarch64_float_mla_lanev2sf (__a, __b, __c, __lane);
> }
>
> __extension__ extern __inline int16x4_t
> @@ -20477,7 +20477,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> vmlaq_lane_f32 (float32x4_t __a, float32x4_t __b,
> float32x2_t __c, const int __lane)
> {
> - return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
> + return __builtin_aarch64_float_mla_lanev4sf (__a, __b, __c, __lane);
> }
>
> __extension__ extern __inline int16x8_t
> @@ -20591,7 +20591,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> vmls_lane_f32 (float32x2_t __a, float32x2_t __b,
> float32x2_t __c, const int __lane)
> {
> - return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
> + return __builtin_aarch64_float_mls_lanev2sf (__a, __b, __c, __lane);
> }
>
> __extension__ extern __inline int16x4_t
> @@ -20675,7 +20675,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> vmlsq_lane_f32 (float32x4_t __a, float32x4_t __b,
> float32x2_t __c, const int __lane)
> {
> - return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
> + return __builtin_aarch64_float_mls_lanev4sf (__a, __b, __c, __lane);
> }
>
> __extension__ extern __inline int16x8_t
-------------- next part --------------
A non-text attachment was scrubbed...
Name: rb14212.patch
Type: application/octet-stream
Size: 5123 bytes
Desc: rb14212.patch
URL: <https://gcc.gnu.org/pipermail/gcc-patches/attachments/20210430/7c581d64/attachment-0001.obj>
More information about the Gcc-patches
mailing list