From ea58eb88d9b440773e146bd6cb39abb9a9fb894f Mon Sep 17 00:00:00 2001 From: Jackson Woodruff Date: Thu, 17 Aug 2017 12:54:10 +0000 Subject: [PATCH] [AArch64] Improve SIMD store of zero. This patch changes patterns in aarch64-simd.md to replace movi v0.4s, 0 str q0, [x0, 16] With: stp xzr, xzr, [x0, 16] When we are storing zeros to vectors like this: void f(uint32x4_t *p) { uint32x4_t x = { 0, 0, 0, 0}; p[1] = x; } gcc/ 2017-08-17 Jackson Woodruff * aarch64-simd.md (mov): No longer force zero immediate into register. (*aarch64_simd_mov): Add new case for stp using zero immediate. gcc/testsuite/ 2017-08-17 Jackson Woodruff * gcc.target/aarch64/simd/vect_str_zero.c: New testcase. From-SVN: r251149 --- gcc/ChangeLog | 6 +++ gcc/config/aarch64/aarch64-simd.md | 50 +++++++++++-------- gcc/testsuite/ChangeLog | 4 ++ .../gcc.target/aarch64/simd/vect_str_zero.c | 22 ++++++++ 4 files changed, 60 insertions(+), 22 deletions(-) create mode 100644 gcc/testsuite/gcc.target/aarch64/simd/vect_str_zero.c diff --git a/gcc/ChangeLog b/gcc/ChangeLog index dd18d206b3cc..2df218b93daf 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,9 @@ +2017-08-17 Jackson Woodruff + + * aarch64-simd.md (mov): No longer force zero immediate into + register. + (*aarch64_simd_mov): Add new case for stp using zero immediate. + 2017-08-17 Richard Biener * tree-ssa-structalias.c (solve_graph): When propagating diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md index f74b68775cf6..f3e084f8778d 100644 --- a/gcc/config/aarch64/aarch64-simd.md +++ b/gcc/config/aarch64/aarch64-simd.md @@ -23,7 +23,10 @@ (match_operand:VALL_F16 1 "general_operand" ""))] "TARGET_SIMD" " - if (GET_CODE (operands[0]) == MEM) + if (GET_CODE (operands[0]) == MEM + && !(aarch64_simd_imm_zero (operands[1], mode) + && aarch64_legitimate_address_p (mode, operands[0], + PARALLEL, 1))) operands[1] = force_reg (mode, operands[1]); " ) @@ -94,63 +97,66 @@ (define_insn "*aarch64_simd_mov" [(set (match_operand:VD 0 "nonimmediate_operand" - "=w, m, w, ?r, ?w, ?r, w") + "=w, m, m, w, ?r, ?w, ?r, w") (match_operand:VD 1 "general_operand" - "m, w, w, w, r, r, Dn"))] + "m, Dz, w, w, w, r, r, Dn"))] "TARGET_SIMD && (register_operand (operands[0], mode) - || register_operand (operands[1], mode))" + || aarch64_simd_reg_or_zero (operands[1], mode))" { switch (which_alternative) { - case 0: return "ldr\\t%d0, %1"; - case 1: return "str\\t%d1, %0"; - case 2: return "mov\t%0., %1."; - case 3: return "umov\t%0, %1.d[0]"; - case 4: return "fmov\t%d0, %1"; - case 5: return "mov\t%0, %1"; - case 6: + case 0: return "ldr\t%d0, %1"; + case 1: return "str\txzr, %0"; + case 2: return "str\t%d1, %0"; + case 3: return "mov\t%0., %1."; + case 4: return "umov\t%0, %1.d[0]"; + case 5: return "fmov\t%d0, %1"; + case 6: return "mov\t%0, %1"; + case 7: return aarch64_output_simd_mov_immediate (operands[1], mode, 64); default: gcc_unreachable (); } } - [(set_attr "type" "neon_load1_1reg, neon_store1_1reg,\ + [(set_attr "type" "neon_load1_1reg, neon_stp, neon_store1_1reg,\ neon_logic, neon_to_gp, f_mcr,\ mov_reg, neon_move")] ) (define_insn "*aarch64_simd_mov" [(set (match_operand:VQ 0 "nonimmediate_operand" - "=w, m, w, ?r, ?w, ?r, w") + "=w, Ump, m, w, ?r, ?w, ?r, w") (match_operand:VQ 1 "general_operand" - "m, w, w, w, r, r, Dn"))] + "m, Dz, w, w, w, r, r, Dn"))] "TARGET_SIMD && (register_operand (operands[0], mode) - || register_operand (operands[1], mode))" + || aarch64_simd_reg_or_zero (operands[1], mode))" { switch (which_alternative) { case 0: - return "ldr\\t%q0, %1"; + return "ldr\t%q0, %1"; case 1: - return "str\\t%q1, %0"; + return "stp\txzr, xzr, %0"; case 2: - return "mov\t%0., %1."; + return "str\t%q1, %0"; case 3: + return "mov\t%0., %1."; case 4: case 5: - return "#"; case 6: + return "#"; + case 7: return aarch64_output_simd_mov_immediate (operands[1], mode, 128); default: gcc_unreachable (); } } [(set_attr "type" "neon_load1_1reg, neon_store1_1reg,\ - neon_logic, multiple, multiple, multiple,\ - neon_move") - (set_attr "length" "4,4,4,8,8,8,4")] + neon_stp, neon_logic, multiple, multiple,\ + multiple, neon_move") + (set_attr "length" "4,4,4,4,8,8,8,4")] ) ;; When storing lane zero we can use the normal STR and its more permissive diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog index 0b1d531b35bf..7c4270c9c98f 100644 --- a/gcc/testsuite/ChangeLog +++ b/gcc/testsuite/ChangeLog @@ -1,3 +1,7 @@ +2017-08-17 Jackson Woodruff + + * gcc.target/aarch64/simd/vect_str_zero.c: New testcase. + 2017-08-17 Tom de Vries * gcc.dg/ipa/pr81696.c: Require effective target nonlocal_goto. diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vect_str_zero.c b/gcc/testsuite/gcc.target/aarch64/simd/vect_str_zero.c new file mode 100644 index 000000000000..07198de10943 --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/simd/vect_str_zero.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-options "-O1" } */ + +#include + +void +f (uint32x4_t *p) +{ + uint32x4_t x = { 0, 0, 0, 0}; + p[1] = x; + + /* { dg-final { scan-assembler "stp\txzr, xzr," } } */ +} + +void +g (float32x2_t *p) +{ + float32x2_t x = {0.0, 0.0}; + p[0] = x; + + /* { dg-final { scan-assembler "str\txzr, " } } */ +} -- 2.43.5