This is the mail archive of the gcc-patches@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

Re: [Patch ARM/testsuite 02/22] Add unary operators: vabs and vneg.


On 27 June 2014 15:04, Christophe Lyon <christophe.lyon@linaro.org> wrote:
> On 27 June 2014 14:52, Ramana Radhakrishnan <ramana.gcc@googlemail.com> wrote:
>> On Thu, Jun 5, 2014 at 11:04 PM, Christophe Lyon
>> <christophe.lyon@linaro.org> wrote:
>>>
>>> diff --git a/gcc/testsuite/gcc.target/arm/neon-intrinsics/unary_op.inc b/gcc/testsuite/gcc.target/arm/neon-intrinsics/unary_op.inc
>>> new file mode 100644
>>> index 0000000..33f9b5f
>>> --- /dev/null
>>> +++ b/gcc/testsuite/gcc.target/arm/neon-intrinsics/unary_op.inc
>>> @@ -0,0 +1,72 @@
>>> +/* Template file for unary operator validation.
>>> +
>>> +   This file is meant to be included by the relevant test files, which
>>> +   have to define the intrinsic family to test. If a given intrinsic
>>> +   supports variants which are not supported by all the other unary
>>> +   operators, these can be tested by providing a definition for
>>> +   EXTRA_TESTS.  */
>>> +
>>> +#include <arm_neon.h>
>>> +#include "arm-neon-ref.h"
>>> +#include "compute-ref-data.h"
>>> +
>>> +#define FNNAME1(NAME) exec_ ## NAME
>>> +#define FNNAME(NAME) FNNAME1(NAME)
>>> +
>>> +void FNNAME (INSN_NAME) (void)
>>> +{
>>> +  /* Basic test: y=OP(x), then store the result.  */
>>> +#define TEST_UNARY_OP1(INSN, Q, T1, T2, W, N)                          \
>>> +  VECT_VAR(vector_res, T1, W, N) =                                     \
>>> +    INSN##Q##_##T2##W(VECT_VAR(vector, T1, W, N));                     \
>>> +  vst1##Q##_##T2##W(VECT_VAR(result, T1, W, N), VECT_VAR(vector_res, T1, W, N))
>>> +
>>> +#define TEST_UNARY_OP(INSN, Q, T1, T2, W, N)                           \
>>> +  TEST_UNARY_OP1(INSN, Q, T1, T2, W, N)                                        \
>>> +
>>> +  /* No need for 64 bits variants in the general case.  */
>>> +  DECL_VARIABLE(vector, int, 8, 8);
>>> +  DECL_VARIABLE(vector, int, 16, 4);
>>> +  DECL_VARIABLE(vector, int, 32, 2);
>>> +  DECL_VARIABLE(vector, int, 8, 16);
>>> +  DECL_VARIABLE(vector, int, 16, 8);
>>> +  DECL_VARIABLE(vector, int, 32, 4);
>>> +
>>> +  DECL_VARIABLE(vector_res, int, 8, 8);
>>> +  DECL_VARIABLE(vector_res, int, 16, 4);
>>> +  DECL_VARIABLE(vector_res, int, 32, 2);
>>> +  DECL_VARIABLE(vector_res, int, 8, 16);
>>> +  DECL_VARIABLE(vector_res, int, 16, 8);
>>> +  DECL_VARIABLE(vector_res, int, 32, 4);
>>> +
>>> +  clean_results ();
>>> +
>>> +  /* Initialize input "vector" from "buffer".  */
>>> +  VLOAD(vector, buffer, , int, s, 8, 8);
>>> +  VLOAD(vector, buffer, , int, s, 16, 4);
>>> +  VLOAD(vector, buffer, , int, s, 32, 2);
>>> +  VLOAD(vector, buffer, q, int, s, 8, 16);
>>> +  VLOAD(vector, buffer, q, int, s, 16, 8);
>>> +  VLOAD(vector, buffer, q, int, s, 32, 4);
>>> +
>>> +  /* Apply a unary operator named INSN_NAME.  */
>>> +  TEST_UNARY_OP(INSN_NAME, , int, s, 8, 8);
>>> +  TEST_UNARY_OP(INSN_NAME, , int, s, 16, 4);
>>> +  TEST_UNARY_OP(INSN_NAME, , int, s, 32, 2);
>>> +  TEST_UNARY_OP(INSN_NAME, q, int, s, 8, 16);
>>> +  TEST_UNARY_OP(INSN_NAME, q, int, s, 16, 8);
>>> +  TEST_UNARY_OP(INSN_NAME, q, int, s, 32, 4);
>>> +
>>> +  CHECK_RESULTS (TEST_MSG, "");
>>> +
>>> +#ifdef EXTRA_TESTS
>>> +  EXTRA_TESTS();
>>> +#endif
>>> +}
>>> +
>>> +int main (void)
>>> +{
>>> +  FNNAME (INSN_NAME)();
>>> +
>>> +  return 0;
>>> +}
>>> diff --git a/gcc/testsuite/gcc.target/arm/neon-intrinsics/vabs.c b/gcc/testsuite/gcc.target/arm/neon-intrinsics/vabs.c
>>> new file mode 100644
>>> index 0000000..ca3901a
>>> --- /dev/null
>>> +++ b/gcc/testsuite/gcc.target/arm/neon-intrinsics/vabs.c
>>> @@ -0,0 +1,74 @@
>>> +#define INSN_NAME vabs
>>> +#define TEST_MSG "VABS/VABSQ"
>>> +
>>> +/* Extra tests for functions requiring floating-point types.  */
>>> +void exec_vabs_f32(void);
>>> +#define EXTRA_TESTS exec_vabs_f32
>>> +
>>> +#include "unary_op.inc"
>>> +
>>> +/* Expected results.  */
>>> +VECT_VAR_DECL(expected,int,8,8) [] = { 0x10, 0xf, 0xe, 0xd,
>>> +                                      0xc, 0xb, 0xa, 0x9 };
>>> +VECT_VAR_DECL(expected,int,16,4) [] = { 0x10, 0xf, 0xe, 0xd };
>>> +VECT_VAR_DECL(expected,int,32,2) [] = { 0x10, 0xf };
>>> +VECT_VAR_DECL(expected,int,64,1) [] = { 0x3333333333333333 };
>>> +VECT_VAR_DECL(expected,uint,8,8) [] = { 0x33, 0x33, 0x33, 0x33,
>>> +                                       0x33, 0x33, 0x33, 0x33 };
>>> +VECT_VAR_DECL(expected,uint,16,4) [] = { 0x3333, 0x3333, 0x3333, 0x3333 };
>>> +VECT_VAR_DECL(expected,uint,32,2) [] = { 0x33333333, 0x33333333 };
>>> +VECT_VAR_DECL(expected,uint,64,1) [] = { 0x3333333333333333 };
>>> +VECT_VAR_DECL(expected,poly,8,8) [] = { 0x33, 0x33, 0x33, 0x33,
>>> +                                       0x33, 0x33, 0x33, 0x33 };
>>> +VECT_VAR_DECL(expected,poly,16,4) [] = { 0x3333, 0x3333, 0x3333, 0x3333 };
>>> +VECT_VAR_DECL(expected,hfloat,32,2) [] = { 0x33333333, 0x33333333 };
>>> +VECT_VAR_DECL(expected,int,8,16) [] = { 0x10, 0xf, 0xe, 0xd, 0xc, 0xb, 0xa, 0x9,
>>> +                                       0x8, 0x7, 0x6, 0x5, 0x4, 0x3, 0x2, 0x1 };
>>> +VECT_VAR_DECL(expected,int,16,8) [] = { 0x10, 0xf, 0xe, 0xd,
>>> +                                       0xc, 0xb, 0xa, 0x9 };
>>> +VECT_VAR_DECL(expected,int,32,4) [] = { 0x10, 0xf, 0xe, 0xd };
>>> +VECT_VAR_DECL(expected,int,64,2) [] = { 0x3333333333333333,
>>> +                                       0x3333333333333333 };
>>> +VECT_VAR_DECL(expected,uint,8,16) [] = { 0x33, 0x33, 0x33, 0x33,
>>> +                                        0x33, 0x33, 0x33, 0x33,
>>> +                                        0x33, 0x33, 0x33, 0x33,
>>> +                                        0x33, 0x33, 0x33, 0x33 };
>>> +VECT_VAR_DECL(expected,uint,16,8) [] = { 0x3333, 0x3333, 0x3333, 0x3333,
>>> +                                        0x3333, 0x3333, 0x3333, 0x3333 };
>>> +VECT_VAR_DECL(expected,uint,32,4) [] = { 0x33333333, 0x33333333,
>>> +                                        0x33333333, 0x33333333 };
>>> +VECT_VAR_DECL(expected,uint,64,2) [] = { 0x3333333333333333,
>>> +                                        0x3333333333333333 };
>>> +VECT_VAR_DECL(expected,poly,8,16) [] = { 0x33, 0x33, 0x33, 0x33,
>>> +                                        0x33, 0x33, 0x33, 0x33,
>>> +                                        0x33, 0x33, 0x33, 0x33,
>>> +                                        0x33, 0x33, 0x33, 0x33 };
>>> +VECT_VAR_DECL(expected,poly,16,8) [] = { 0x3333, 0x3333, 0x3333, 0x3333,
>>> +                                        0x3333, 0x3333, 0x3333, 0x3333 };
>>> +VECT_VAR_DECL(expected,hfloat,32,4) [] = { 0x33333333, 0x33333333,
>>> +                                         0x33333333, 0x33333333 };
>>> +
>>> +/* Expected results for float32 variants. Needs to be separated since
>>> +   the generic test function does not test floating-point
>>> +   versions.  */
>>> +VECT_VAR_DECL(expected_float32,hfloat,32,2) [] = { 0x40133333, 0x40133333 };
>>> +VECT_VAR_DECL(expected_float32,hfloat,32,4) [] = { 0x4059999a, 0x4059999a,
>>> +                                                  0x4059999a, 0x4059999a };
>>> +
>>> +void exec_vabs_f32(void)
>>> +{
>>> +  DECL_VARIABLE(vector, float, 32, 2);
>>> +  DECL_VARIABLE(vector, float, 32, 4);
>>> +
>>> +  DECL_VARIABLE(vector_res, float, 32, 2);
>>> +  DECL_VARIABLE(vector_res, float, 32, 4);
>>> +
>>> +  VDUP(vector, , float, f, 32, 2, -2.3f);
>>> +  VDUP(vector, q, float, f, 32, 4, 3.4f);
>>> +
>>> +  TEST_UNARY_OP(INSN_NAME, , float, f, 32, 2);
>>> +  TEST_UNARY_OP(INSN_NAME, q, float, f, 32, 4);
>>> +
>>> +  CHECK_FP(TEST_MSG, float, 32, 2, PRIx32, expected_float32, "");
>>> +  CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected_float32, "");
>>> +}
>>> diff --git a/gcc/testsuite/gcc.target/arm/neon-intrinsics/vneg.c b/gcc/testsuite/gcc.target/arm/neon-intrinsics/vneg.c
>>> new file mode 100644
>>> index 0000000..c45492d
>>> --- /dev/null
>>> +++ b/gcc/testsuite/gcc.target/arm/neon-intrinsics/vneg.c
>>> @@ -0,0 +1,74 @@
>>> +#define INSN_NAME vneg
>>> +#define TEST_MSG "VNEG/VNEGQ"
>>> +
>>> +/* Extra tests for functions requiring floating-point types.  */
>>> +void exec_vneg_f32(void);
>>> +#define EXTRA_TESTS exec_vneg_f32
>>> +
>>> +#include "unary_op.inc"
>>> +
>>> +/* Expected results.  */
>>> +VECT_VAR_DECL(expected,int,8,8) [] = { 0x10, 0xf, 0xe, 0xd,
>>> +                                      0xc, 0xb, 0xa, 0x9 };
>>> +VECT_VAR_DECL(expected,int,16,4) [] = { 0x10, 0xf, 0xe, 0xd };
>>> +VECT_VAR_DECL(expected,int,32,2) [] = { 0x10, 0xf };
>>> +VECT_VAR_DECL(expected,int,64,1) [] = { 0x3333333333333333 };
>>> +VECT_VAR_DECL(expected,uint,8,8) [] = { 0x33, 0x33, 0x33, 0x33,
>>> +                                       0x33, 0x33, 0x33, 0x33 };
>>> +VECT_VAR_DECL(expected,uint,16,4) [] = { 0x3333, 0x3333, 0x3333, 0x3333 };
>>> +VECT_VAR_DECL(expected,uint,32,2) [] = { 0x33333333, 0x33333333 };
>>> +VECT_VAR_DECL(expected,uint,64,1) [] = { 0x3333333333333333 };
>>> +VECT_VAR_DECL(expected,poly,8,8) [] = { 0x33, 0x33, 0x33, 0x33,
>>> +                                       0x33, 0x33, 0x33, 0x33 };
>>> +VECT_VAR_DECL(expected,poly,16,4) [] = { 0x3333, 0x3333, 0x3333, 0x3333 };
>>> +VECT_VAR_DECL(expected,hfloat,32,2) [] = { 0x33333333, 0x33333333 };
>>> +VECT_VAR_DECL(expected,int,8,16) [] = { 0x10, 0xf, 0xe, 0xd, 0xc, 0xb, 0xa, 0x9,
>>> +                                       0x8, 0x7, 0x6, 0x5, 0x4, 0x3, 0x2, 0x1 };
>>> +VECT_VAR_DECL(expected,int,16,8) [] = { 0x10, 0xf, 0xe, 0xd,
>>> +                                       0xc, 0xb, 0xa, 0x9 };
>>> +VECT_VAR_DECL(expected,int,32,4) [] = { 0x10, 0xf, 0xe, 0xd };
>>> +VECT_VAR_DECL(expected,int,64,2) [] = { 0x3333333333333333,
>>> +                                       0x3333333333333333 };
>>> +VECT_VAR_DECL(expected,uint,8,16) [] = { 0x33, 0x33, 0x33, 0x33,
>>> +                                        0x33, 0x33, 0x33, 0x33,
>>> +                                        0x33, 0x33, 0x33, 0x33,
>>> +                                        0x33, 0x33, 0x33, 0x33 };
>>> +VECT_VAR_DECL(expected,uint,16,8) [] = { 0x3333, 0x3333, 0x3333, 0x3333,
>>> +                                        0x3333, 0x3333, 0x3333, 0x3333 };
>>> +VECT_VAR_DECL(expected,uint,32,4) [] = { 0x33333333, 0x33333333,
>>> +                                        0x33333333, 0x33333333 };
>>> +VECT_VAR_DECL(expected,uint,64,2) [] = { 0x3333333333333333,
>>> +                                        0x3333333333333333 };
>>> +VECT_VAR_DECL(expected,poly,8,16) [] = { 0x33, 0x33, 0x33, 0x33,
>>> +                                        0x33, 0x33, 0x33, 0x33,
>>> +                                        0x33, 0x33, 0x33, 0x33,
>>> +                                        0x33, 0x33, 0x33, 0x33 };
>>> +VECT_VAR_DECL(expected,poly,16,8) [] = { 0x3333, 0x3333, 0x3333, 0x3333,
>>> +                                        0x3333, 0x3333, 0x3333, 0x3333 };
>>> +VECT_VAR_DECL(expected,hfloat,32,4) [] = { 0x33333333, 0x33333333,
>>> +                                          0x33333333, 0x33333333 };
>>> +
>>> +/* Expected results for float32 variants. Needs to be separated since
>>> +   the generic test function does not test floating-point
>>> +   versions.  */
>>> +VECT_VAR_DECL(expected_float32,hfloat,32,2) [] = { 0xc0133333, 0xc0133333 };
>>> +VECT_VAR_DECL(expected_float32,hfloat,32,4) [] = { 0xc059999a, 0xc059999a,
>>> +                                                  0xc059999a, 0xc059999a };
>>> +
>>> +void exec_vneg_f32(void)
>>> +{
>>> +  DECL_VARIABLE(vector, float, 32, 2);
>>> +  DECL_VARIABLE(vector, float, 32, 4);
>>> +
>>> +  DECL_VARIABLE(vector_res, float, 32, 2);
>>> +  DECL_VARIABLE(vector_res, float, 32, 4);
>>> +
>>> +  VDUP(vector, , float, f, 32, 2, 2.3f);
>>> +  VDUP(vector, q, float, f, 32, 4, 3.4f);
>>> +
>>> +  TEST_UNARY_OP(INSN_NAME, , float, f, 32, 2);
>>> +  TEST_UNARY_OP(INSN_NAME, q, float, f, 32, 4);
>>> +
>>> +  CHECK_FP(TEST_MSG, float, 32, 2, PRIx32, expected_float32, "");
>>> +  CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected_float32, "");
>>> +}
>>> --
>>> 1.8.3.2
>>>
>>
>> Both these intrinsics need an F64 and {S/U}64 variants that are
>> AArch64 specific.  I'm asuming that for the initial tranche of these
>> intrinsics you intend to put in appropriate Changelog entries
>> indicating new file etc.
>
> Yes for the ChangeLog, since there are only new files.
>
>> In the long term given that we have more A64 variants than A32 , I
>> wonder if this is worth putting into gcc.target/aarch64 rather than
>> here.
>>
>> I'm happy to review the patch stack as it stands right now and finish
>> it given that it covers the interesection set of the original AArch32
>> intrinsics and we can rebase this to be in the gcc.target/aarch64
>> directory if the AArch64 maintainers agree .
Not sure to understand what you mean by "rebase" here?

Can't we leave this set in gcc.target/arm, and add the A64-only ones
in gcc.target/aarch64?
When checking aarch64 compiler, it would execute both subdirs.


>>
>
> Indeed my tests currently only include the Aarch32 intrinsics. They
> were written at a time when I hadn't heard about AArch64 :-)
>
> So definitely there is need for at least a 2nd pass to add the "new" intrinsics.
>
> Additionally, I also had tests for dsp-like instrinsics which were
> supported by RVCT and not by GCC. This has changed since you
> introduced ACLE, so another update is desirable here.
>
> My plan is really to convert the existing tests as quickly as possible
> as "one shot", and then update the tests with the new intrinsics. I'd
> rather not have 2 moving targets.
>
> Christophe.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]