[committed] aarch64: Tweak old vect-* tests to avoid new FAILs

Richard Sandiford richard.sandiford@arm.com
Tue Jul 20 14:44:20 GMT 2021


I'm not sure what these test were originally designed to test.
vaddv and vmaxv seem to be testing for vectorisation, with associated
scan-assembler tests.  But they use arm_neon.h functions to test
the results, which would presumably also trip many of the scans.
That was probably what the split into vect-fmax-fmin.c and
vect-fmaxv-fminv-compile.c was supposed to avoid.

Anyway, the tests started failing after the recent change to allow
staged reductions for epilogue loops.  And epilogues came into play
because the reduction loops iterate LANES-1 rather than LANES times.
(vmaxv was trying to iterate LANES times, but the gimple optimisers
outsmarted it.  The other two explicitly had a count of LANES-1.)

Just suppressing epilogues causes other issues for vaddv and vmaxv.
The easiest fix therefore seemed to be to use an asm to hide the
initial value of the vmaxv loop (so that it really does iterate
LANES times) and then make the others match that style.

Tested on aarch64-linux-gnu & pushed.

Richard


gcc/testsuite/
	PR testsuite/101506
	* gcc.target/aarch64/vect-vmaxv.c: Use an asm to hide the
	true initial value of the reduction from the vectorizer.
	* gcc.target/aarch64/vect-vaddv.c: Likewise.  Make the vector
	loop operate on exactly LANES (rather than LANES-1) iterations.
	* gcc.target/aarch64/vect-fmaxv-fminv.x: Likewise.
---
 .../gcc.target/aarch64/vect-fmaxv-fminv.x     | 20 +++++++++++--------
 gcc/testsuite/gcc.target/aarch64/vect-vaddv.c |  4 ++--
 gcc/testsuite/gcc.target/aarch64/vect-vmaxv.c |  2 +-
 3 files changed, 15 insertions(+), 11 deletions(-)

diff --git a/gcc/testsuite/gcc.target/aarch64/vect-fmaxv-fminv.x b/gcc/testsuite/gcc.target/aarch64/vect-fmaxv-fminv.x
index 0bc6ba494cf..d3ba31c425a 100644
--- a/gcc/testsuite/gcc.target/aarch64/vect-fmaxv-fminv.x
+++ b/gcc/testsuite/gcc.target/aarch64/vect-fmaxv-fminv.x
@@ -5,8 +5,9 @@ typedef double *__restrict__ pRF64;
 float maxv_f32 (pRF32 a)
 {
   int i;
-  float s = a[0];
-  for (i=1;i<8;i++)
+  float s;
+  asm ("" : "=w" (s) : "0" (a[0]));
+  for (i=0;i<8;i++)
     s = (s > a[i] ? s :  a[i]);
 
   return s;
@@ -15,8 +16,9 @@ float maxv_f32 (pRF32 a)
 float minv_f32 (pRF32 a)
 {
   int i;
-  float s = a[0];
-  for (i=1;i<16;i++)
+  float s;
+  asm ("" : "=w" (s) : "0" (a[0]));
+  for (i=0;i<16;i++)
     s = (s < a[i] ? s :  a[i]);
 
   return s;
@@ -25,8 +27,9 @@ float minv_f32 (pRF32 a)
 double maxv_f64 (pRF64 a)
 {
   int i;
-  double s = a[0];
-  for (i=1;i<8;i++)
+  double s;
+  asm ("" : "=w" (s) : "0" (a[0]));
+  for (i=0;i<8;i++)
     s = (s > a[i] ? s :  a[i]);
 
   return s;
@@ -35,8 +38,9 @@ double maxv_f64 (pRF64 a)
 double minv_f64 (pRF64 a)
 {
   int i;
-  double s = a[0];
-  for (i=1;i<16;i++)
+  double s;
+  asm ("" : "=w" (s) : "0" (a[0]));
+  for (i=0;i<16;i++)
     s = (s < a[i] ? s :  a[i]);
 
   return s;
diff --git a/gcc/testsuite/gcc.target/aarch64/vect-vaddv.c b/gcc/testsuite/gcc.target/aarch64/vect-vaddv.c
index 41e9157dbec..3a12ae9706a 100644
--- a/gcc/testsuite/gcc.target/aarch64/vect-vaddv.c
+++ b/gcc/testsuite/gcc.target/aarch64/vect-vaddv.c
@@ -57,8 +57,8 @@ test_vaddv##SUFFIX##_##TYPE##x##LANES##_t (void)			\
   /* Calculate linearly.  */						\
   for (i = 0; i < moves; i++)						\
     {									\
-      out_l[i] = input_##TYPE[i];					\
-      for (j = 1; j < LANES; j++)					\
+      asm ("" : "=r" (out_l[i]) : "0" (0));				\
+      for (j = 0; j < LANES; j++)					\
 	out_l[i] += input_##TYPE[i + j];				\
     }									\
 									\
diff --git a/gcc/testsuite/gcc.target/aarch64/vect-vmaxv.c b/gcc/testsuite/gcc.target/aarch64/vect-vmaxv.c
index 4280834ec4a..1bdea890d3e 100644
--- a/gcc/testsuite/gcc.target/aarch64/vect-vmaxv.c
+++ b/gcc/testsuite/gcc.target/aarch64/vect-vmaxv.c
@@ -36,7 +36,7 @@ test_v##MAXMIN##v##SUFFIX##_##TYPE##x##LANES##_t (void)			\
   /* Calculate linearly.  */						\
   for (i = 0; i < moves; i++)						\
     {									\
-      out_l[i] = input_##TYPE[i];					\
+      asm ("" : "=r" (out_l[i]) : "0" (input_##TYPE[i]));		\
       for (j = 0; j < LANES; j++)					\
 	out_l[i] = input_##TYPE[i + j] CMP_OP out_l[i]  ?		\
 	  input_##TYPE[i + j] : out_l[i];				\


More information about the Gcc-patches mailing list