Use UZP1 instead of INS when combining low and high halves of vectors.
UZP1 has 3 operands which improves register allocation, and is faster on
some microarchitectures.
gcc:
* config/aarch64/aarch64-simd.md (aarch64_combine_internal<mode>):
Use UZP1 instead of INS.
(aarch64_combine_internal_be<mode>): Likewise.
gcc/testsuite:
* gcc.target/aarch64/ldp_stp_16.c: Update to check for UZP1.
* gcc.target/aarch64/pr109072_1.c: Likewise.
* gcc.target/aarch64/vec-init-14.c: Likewise.
* gcc.target/aarch64/vec-init-9.c: Likewise.
&& (register_operand (operands[0], <VDBL>mode)
|| register_operand (operands[2], <MODE>mode))"
{@ [ cons: =0 , 1 , 2 ; attrs: type , arch ]
- [ w , 0 , w ; neon_ins<dblq> , simd ] ins\t%0.<single_type>[1], %2.<single_type>[0]
+ [ w , w , w ; neon_permute<dblq> , simd ] uzp1\t%0.2<single_type>, %1.2<single_type>, %2.2<single_type>
[ w , 0 , ?r ; neon_from_gp<dblq> , simd ] ins\t%0.<single_type>[1], %<single_wx>2
[ w , 0 , ?r ; f_mcr , * ] fmov\t%0.d[1], %2
[ w , 0 , Utv ; neon_load1_one_lane<dblq> , simd ] ld1\t{%0.<single_type>}[1], %2
&& (register_operand (operands[0], <VDBL>mode)
|| register_operand (operands[2], <MODE>mode))"
{@ [ cons: =0 , 1 , 2 ; attrs: type , arch ]
- [ w , 0 , w ; neon_ins<dblq> , simd ] ins\t%0.<single_type>[1], %2.<single_type>[0]
+ [ w , w , w ; neon_permute<dblq> , simd ] uzp1\t%0.2<single_type>, %1.2<single_type>, %2.2<single_type>
[ w , 0 , ?r ; neon_from_gp<dblq> , simd ] ins\t%0.<single_type>[1], %<single_wx>2
[ w , 0 , ?r ; f_mcr , * ] fmov\t%0.d[1], %2
[ w , 0 , Utv ; neon_load1_one_lane<dblq> , simd ] ld1\t{%0.<single_type>}[1], %2
/*
** cons2_4_float: { target aarch64_little_endian }
-** ins v0.s\[1\], v1.s\[0\]
-** stp d0, d0, \[x0\]
-** stp d0, d0, \[x0, #?16\]
+** uzp1 v([0-9])\.2s, v0\.2s, v1\.2s
+** stp d\1, d\1, \[x0\]
+** stp d\1, d\1, \[x0, #?16\]
** ret
*/
/*
** cons2_4_float: { target aarch64_big_endian }
-** ins v1.s\[1\], v0.s\[0\]
-** stp d1, d1, \[x0\]
-** stp d1, d1, \[x0, #?16\]
+** uzp1 v([0-9])\.2s, v1\.2s, v0\.2s
+** stp d\1, d\1, \[x0\]
+** stp d\1, d\1, \[x0, #?16\]
** ret
*/
CONS2_FN (4, float);
/*
** cons4_4_float:
-** ins v[0-9]+\.s[^\n]+
-** ins v[0-9]+\.s[^\n]+
+** uzp1 v[0-9]+\.2s[^\n]+
+** uzp1 v[0-9]+\.2s[^\n]+
** zip1 v([0-9]+).4s, [^\n]+
** stp q\1, q\1, \[x0\]
** stp q\1, q\1, \[x0, #?32\]
/*
** f32x2_2:
-** ins v0\.s\[1\], v1.s\[0\]
+** uzp1 v0\.2s, v0\.2s, v1\.2s
** ret
*/
float32x2_t
/*
** f64x2_2:
-** ins v0\.d\[1\], v1.d\[0\]
+** uzp1 v0\.2d, v0\.2d, v1\.2d
** ret
*/
float64x2_t
/*
** f32_1:
-** ins v0\.s\[1\], v1\.s\[0\]
+** uzp1 v0\.2s, v0\.2s, v1\.2s
** ret
*/
float32x2_t f32_1(float32_t a0, float32_t a1) {
/*
** f32_3:
** ldr s0, \[x0\]
-** ins v0\.s\[1\], v1\.s\[0\]
+** uzp1 v0\.2s, v0\.2s, v1\.2s
** ret
*/
float32x2_t f32_3(float32_t a0, float32_t a1, float32_t *ptr) {
/*
** f64q_1:
-** ins v0\.d\[1\], v1\.d\[0\]
+** uzp1 v0\.2d, v0\.2d, v1\.2d
** ret
*/
float64x2_t f64q_1(float64_t a0, float64_t a1) {
/*
** f64q_3:
** ldr d0, \[x0\]
-** ins v0\.d\[1\], v1\.d\[0\]
+** uzp1 v0\.2d, v0\.2d, v1\.2d
** ret
*/
float64x2_t f64q_3(float64_t a0, float64_t a1, float64_t *ptr) {
/*
** s32q_1:
-** ins v0\.d\[1\], v1\.d\[0\]
+** uzp1 v0\.2d, v0\.2d, v1\.2d
** ret
*/
int32x4_t s32q_1(int32x2_t a0, int32x2_t a1) {
/*
** s32q_3:
** ldr d0, \[x0\]
-** ins v0\.d\[1\], v1\.d\[0\]
+** uzp1 v0\.2d, v0\.2d, v1\.2d
** ret
*/
int32x4_t s32q_3(int32x2_t a0, int32x2_t a1, int32x2_t *ptr) {
/*
** f32q_1:
-** ins v0\.d\[1\], v1\.d\[0\]
+** uzp1 v0\.2d, v0\.2d, v1\.2d
** ret
*/
float32x4_t f32q_1(float32x2_t a0, float32x2_t a1) {
/*
** f32q_3:
** ldr d0, \[x0\]
-** ins v0\.d\[1\], v1\.d\[0\]
+** uzp1 v0\.2d, v0\.2d, v1\.2d
** ret
*/
float32x4_t f32q_3(float32x2_t a0, float32x2_t a1, float32x2_t *ptr) {