[PATCH 2/3] arm: Auto-vectorization for MVE: vshl

Christophe Lyon christophe.lyon@linaro.org
Thu Dec 17 17:48:19 GMT 2020


This patch enables MVE vshlq instructions for auto-vectorization.

The existing mve_vshlq_n_<supf><mode> is kept, as it takes a single
immediate as second operand, and is used by arm_mve.h.

We move the vashl<mode>3 insn from neon.md to an expander in
vec-common.md, and the mve_vshlq_<supf><mode> insn from mve.md to
vec-common.md, adding the second alternative fron neon.md.

mve_vshlq_<supf><mode> will be used by a later patch enabling
vectorization for vshr, as a unified version of
ashl3<mode3>_[signed|unsigned] from neon.md. Keeping the use of unspec
VSHLQ enables to generate both 's' and 'u' variants.

It is not clear whether the neon_shift_[reg|imm]<q> attribute is still
suitable, since this insn is also used for MVE.

I kept the mve_vshlq_<supf><mode> naming instead of renaming it to
ashl3_<supf>_<mode> as discussed because the reference in
arm_mve_builtins.def automatically inserts the "mve_" prefix and I
didn't want to make a special case for this.

I haven't yet found why the v16qi and v8hi tests are not vectorized.
With dest[i] = a[i] << b[i] and:
  {
    int i;
    unsigned int i.24_1;
    unsigned int _2;
    int16_t * _3;
    short int _4;
    int _5;
    int16_t * _6;
    short int _7;
    int _8;
    int _9;
    int16_t * _10;
    short int _11;
    unsigned int ivtmp_42;
    unsigned int ivtmp_43;

    <bb 2> [local count: 119292720]:

    <bb 3> [local count: 954449105]:
    i.24_1 = (unsigned int) i_23;
    _2 = i.24_1 * 2;
    _3 = a_15(D) + _2;
    _4 = *_3;
    _5 = (int) _4;
    _6 = b_16(D) + _2;
    _7 = *_6;
    _8 = (int) _7;
    _9 = _5 << _8;
    _10 = dest_17(D) + _2;
    _11 = (short int) _9;
    *_10 = _11;
    i_19 = i_23 + 1;
    ivtmp_42 = ivtmp_43 - 1;
    if (ivtmp_42 != 0)
      goto <bb 5>; [87.50%]
    else
      goto <bb 4>; [12.50%]

    <bb 5> [local count: 835156386]:
    goto <bb 3>; [100.00%]

    <bb 4> [local count: 119292720]:
    return;

  }
the vectorizer says:
mve-vshl.c:37:96: note:   ==> examining statement: _5 = (int) _4;
mve-vshl.c:37:96: note:   vect_is_simple_use: operand *_3, type of def: internal
mve-vshl.c:37:96: note:   vect_is_simple_use: vectype vector(8) short int
mve-vshl.c:37:96: missed:   conversion not supported by target.
mve-vshl.c:37:96: note:   vect_is_simple_use: operand *_3, type of def: internal
mve-vshl.c:37:96: note:   vect_is_simple_use: vectype vector(8) short int
mve-vshl.c:37:96: note:   vect_is_simple_use: operand *_3, type of def: internal
mve-vshl.c:37:96: note:   vect_is_simple_use: vectype vector(8) short int
mve-vshl.c:37:117: missed:   not vectorized: relevant stmt not supported: _5 = (int) _4;
mve-vshl.c:37:96: missed:  bad operation or unsupported loop bound.
mve-vshl.c:37:96: note:  ***** Analysis failed with vector mode V8HI

2020-12-03  Christophe Lyon  <christophe.lyon@linaro.org>

	gcc/
	* config/arm/mve.md (mve_vshlq_<supf><mode>): Move to
	vec-commond.md.
	* config/arm/neon.md (vashl<mode>3): Delete.
	* config/arm/vec-common.md (mve_vshlq_<supf><mode>): New.
	(vasl<mode>3): New expander.

	gcc/testsuite/
	* gcc.target/arm/simd/mve-vshl.c: Add tests for vshl.
---
 gcc/config/arm/mve.md                        | 13 +-----
 gcc/config/arm/neon.md                       | 19 ---------
 gcc/config/arm/vec-common.md                 | 30 ++++++++++++++
 gcc/testsuite/gcc.target/arm/simd/mve-vshl.c | 62 ++++++++++++++++++++++++++++
 4 files changed, 93 insertions(+), 31 deletions(-)
 create mode 100644 gcc/testsuite/gcc.target/arm/simd/mve-vshl.c

diff --git a/gcc/config/arm/mve.md b/gcc/config/arm/mve.md
index 673a83c..8bdb451 100644
--- a/gcc/config/arm/mve.md
+++ b/gcc/config/arm/mve.md
@@ -822,18 +822,7 @@ (define_insn "mve_vcmpneq_<supf><mode>"
 
 ;;
 ;; [vshlq_s, vshlq_u])
-;;
-(define_insn "mve_vshlq_<supf><mode>"
-  [
-   (set (match_operand:MVE_2 0 "s_register_operand" "=w")
-	(unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
-		       (match_operand:MVE_2 2 "s_register_operand" "w")]
-	 VSHLQ))
-  ]
-  "TARGET_HAVE_MVE"
-  "vshl.<supf>%#<V_sz_elem>\t%q0, %q1, %q2"
-  [(set_attr "type" "mve_move")
-])
+;; See vec-common.md
 
 ;;
 ;; [vabdq_s, vabdq_u])
diff --git a/gcc/config/arm/neon.md b/gcc/config/arm/neon.md
index 50220be..ac9bf74 100644
--- a/gcc/config/arm/neon.md
+++ b/gcc/config/arm/neon.md
@@ -845,25 +845,6 @@ (define_insn "*smax<mode>3_neon"
 ; generic vectorizer code.  It ends up creating a V2DI constructor with
 ; SImode elements.
 
-(define_insn "vashl<mode>3"
-  [(set (match_operand:VDQIW 0 "s_register_operand" "=w,w")
-	(ashift:VDQIW (match_operand:VDQIW 1 "s_register_operand" "w,w")
-		      (match_operand:VDQIW 2 "imm_lshift_or_reg_neon" "w,Dm")))]
-  "TARGET_NEON"
-  {
-    switch (which_alternative)
-      {
-        case 0: return "vshl.<V_s_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2";
-        case 1: return neon_output_shift_immediate ("vshl", 'i', &operands[2],
-                         			    <MODE>mode,
-						    VALID_NEON_QREG_MODE (<MODE>mode),
-						    true);
-        default: gcc_unreachable ();
-      }
-  }
-  [(set_attr "type" "neon_shift_reg<q>, neon_shift_imm<q>")]
-)
-
 (define_insn "vashr<mode>3_imm"
   [(set (match_operand:VDQIW 0 "s_register_operand" "=w")
 	(ashiftrt:VDQIW (match_operand:VDQIW 1 "s_register_operand" "w")
diff --git a/gcc/config/arm/vec-common.md b/gcc/config/arm/vec-common.md
index f6a79e2..3a282f0 100644
--- a/gcc/config/arm/vec-common.md
+++ b/gcc/config/arm/vec-common.md
@@ -229,3 +229,33 @@ (define_expand "movmisalign<mode>"
   if (!neon_vector_mem_operand (adjust_mem, 2, true))
     XEXP (adjust_mem, 0) = force_reg (Pmode, XEXP (adjust_mem, 0));
 })
+
+(define_insn "mve_vshlq_<supf><mode>"
+  [(set (match_operand:VDQIW 0 "s_register_operand" "=w,w")
+	(unspec:VDQIW [(match_operand:VDQIW 1 "s_register_operand" "w,w")
+		       (match_operand:VDQIW 2 "imm_lshift_or_reg_neon" "w,Dm")]
+	 VSHLQ))]
+  "ARM_HAVE_<MODE>_ARITH"
+{
+  switch (which_alternative)
+    {
+      case 0: return "vshl.<supf>%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2";
+      case 1: return neon_output_shift_immediate ("vshl", 'i', &operands[2],
+						  <MODE>mode,
+						  VALID_NEON_QREG_MODE (<MODE>mode),
+						  true);
+      default: gcc_unreachable ();
+    }
+}
+  [(set_attr "type" "neon_shift_reg<q>, neon_shift_imm<q>")]
+)
+
+(define_expand "vashl<mode>3"
+  [(set (match_operand:VDQIW 0 "s_register_operand" "")
+	(ashift:VDQIW (match_operand:VDQIW 1 "s_register_operand" "")
+		      (match_operand:VDQIW 2 "imm_lshift_or_reg_neon" "")))]
+  "ARM_HAVE_<MODE>_ARITH"
+{
+  emit_insn (gen_mve_vshlq_u<mode> (operands[0], operands[1], operands[2]));
+  DONE;
+})
\ No newline at end of file
diff --git a/gcc/testsuite/gcc.target/arm/simd/mve-vshl.c b/gcc/testsuite/gcc.target/arm/simd/mve-vshl.c
new file mode 100644
index 0000000..7a06449
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/simd/mve-vshl.c
@@ -0,0 +1,62 @@
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O3" } */
+
+#include <stdint.h>
+
+#define FUNC(SIGN, TYPE, BITS, NB, OP, NAME)				\
+  void test_ ## NAME ##_ ## SIGN ## BITS ## x ## NB (TYPE##BITS##_t * __restrict__ dest, TYPE##BITS##_t *a, TYPE##BITS##_t *b) { \
+    int i;								\
+    for (i=0; i<NB; i++) {						\
+      dest[i] = a[i] OP b[i];						\
+    }									\
+}
+
+#define FUNC_IMM(SIGN, TYPE, BITS, NB, OP, NAME)				\
+  void test_ ## NAME ##_ ## SIGN ## BITS ## x ## NB (TYPE##BITS##_t * __restrict__ dest, TYPE##BITS##_t *a) { \
+    int i;								\
+    for (i=0; i<NB; i++) {						\
+      dest[i] = a[i] OP 5;						\
+    }									\
+}
+
+/* 64-bit vectors.  */
+FUNC(s, int, 32, 2, <<, vshl)
+FUNC(u, uint, 32, 2, <<, vshl)
+FUNC(s, int, 16, 4, <<, vshl)
+FUNC(u, uint, 16, 4, <<, vshl)
+FUNC(s, int, 8, 8, <<, vshl)
+FUNC(u, uint, 8, 8, <<, vshl)
+
+/* 128-bit vectors.  */
+FUNC(s, int, 32, 4, <<, vshl)
+FUNC(u, uint, 32, 4, <<, vshl)
+FUNC(s, int, 16, 8, <<, vshl)  /* FIXME: not vectorized */
+FUNC(u, uint, 16, 8, <<, vshl) /* FIXME: not vectorized */
+FUNC(s, int, 8, 16, <<, vshl)  /* FIXME: not vectorized */
+FUNC(u, uint, 8, 16, <<, vshl) /* FIXME: not vectorized */
+
+/* 64-bit vectors.  */
+FUNC_IMM(s, int, 32, 2, <<, vshlimm)
+FUNC_IMM(u, uint, 32, 2, <<, vshlimm)
+FUNC_IMM(s, int, 16, 4, <<, vshlimm)
+FUNC_IMM(u, uint, 16, 4, <<, vshlimm)
+FUNC_IMM(s, int, 8, 8, <<, vshlimm)
+FUNC_IMM(u, uint, 8, 8, <<, vshlimm)
+
+/* 128-bit vectors.  */
+FUNC_IMM(s, int, 32, 4, <<, vshlimm)
+FUNC_IMM(u, uint, 32, 4, <<, vshlimm)
+FUNC_IMM(s, int, 16, 8, <<, vshlimm)
+FUNC_IMM(u, uint, 16, 8, <<, vshlimm)
+FUNC_IMM(s, int, 8, 16, <<, vshlimm)
+FUNC_IMM(u, uint, 8, 16, <<, vshlimm)
+
+/* MVE has only 128-bit vectors, so we can vectorize only half of the
+   functions above.  */
+/* We only emit vshl.u, which is equivalent to vshl.s anyway.  */
+/* { dg-final { scan-assembler-times {vshl.u[0-9]+\tq[0-9]+, q[0-9]+} 2 } } */
+
+/* We emit vshl.i when the shift amount is an immediate.  */
+/* { dg-final { scan-assembler-times {vshl.i[0-9]+\tq[0-9]+, q[0-9]+} 6 } } */
-- 
2.7.4



More information about the Gcc-patches mailing list