Message ID | 1404209174-25364-3-git-send-email-christophe.lyon@linaro.org |
---|---|
State | New |
Headers | show |
On Tue, Jul 1, 2014 at 11:05 AM, Christophe Lyon <christophe.lyon@linaro.org> wrote: > > diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog > index 3a0f99b..44c4990 100644 > --- a/gcc/testsuite/ChangeLog > +++ b/gcc/testsuite/ChangeLog > @@ -1,5 +1,11 @@ > 2014-06-30 Christophe Lyon <christophe.lyon@linaro.org> > > + * gcc.target/aarch64/neon-intrinsics/unary_op.inc: New file. > + * gcc.target/aarch64/neon-intrinsics/vabs.c: Likewise. > + * gcc.target/aarch64/neon-intrinsics/vneg.c: Likewise. > + > +2014-06-30 Christophe Lyon <christophe.lyon@linaro.org> > + > * gcc.target/arm/README.neon-intrinsics: New file. > * gcc.target/aarch64/neon-intrinsics/README: Likewise. > * gcc.target/aarch64/neon-intrinsics/arm-neon-ref.h: Likewise. Ok for ARM if no regressions. Wait for an ack from AArch64 maintainers. Ramana > diff --git a/gcc/testsuite/gcc.target/aarch64/neon-intrinsics/unary_op.inc b/gcc/testsuite/gcc.target/aarch64/neon-intrinsics/unary_op.inc > new file mode 100644 > index 0000000..33f9b5f > --- /dev/null > +++ b/gcc/testsuite/gcc.target/aarch64/neon-intrinsics/unary_op.inc > @@ -0,0 +1,72 @@ > +/* Template file for unary operator validation. > + > + This file is meant to be included by the relevant test files, which > + have to define the intrinsic family to test. If a given intrinsic > + supports variants which are not supported by all the other unary > + operators, these can be tested by providing a definition for > + EXTRA_TESTS. */ > + > +#include <arm_neon.h> > +#include "arm-neon-ref.h" > +#include "compute-ref-data.h" > + > +#define FNNAME1(NAME) exec_ ## NAME > +#define FNNAME(NAME) FNNAME1(NAME) > + > +void FNNAME (INSN_NAME) (void) > +{ > + /* Basic test: y=OP(x), then store the result. */ > +#define TEST_UNARY_OP1(INSN, Q, T1, T2, W, N) \ > + VECT_VAR(vector_res, T1, W, N) = \ > + INSN##Q##_##T2##W(VECT_VAR(vector, T1, W, N)); \ > + vst1##Q##_##T2##W(VECT_VAR(result, T1, W, N), VECT_VAR(vector_res, T1, W, N)) > + > +#define TEST_UNARY_OP(INSN, Q, T1, T2, W, N) \ > + TEST_UNARY_OP1(INSN, Q, T1, T2, W, N) \ > + > + /* No need for 64 bits variants in the general case. */ > + DECL_VARIABLE(vector, int, 8, 8); > + DECL_VARIABLE(vector, int, 16, 4); > + DECL_VARIABLE(vector, int, 32, 2); > + DECL_VARIABLE(vector, int, 8, 16); > + DECL_VARIABLE(vector, int, 16, 8); > + DECL_VARIABLE(vector, int, 32, 4); > + > + DECL_VARIABLE(vector_res, int, 8, 8); > + DECL_VARIABLE(vector_res, int, 16, 4); > + DECL_VARIABLE(vector_res, int, 32, 2); > + DECL_VARIABLE(vector_res, int, 8, 16); > + DECL_VARIABLE(vector_res, int, 16, 8); > + DECL_VARIABLE(vector_res, int, 32, 4); > + > + clean_results (); > + > + /* Initialize input "vector" from "buffer". */ > + VLOAD(vector, buffer, , int, s, 8, 8); > + VLOAD(vector, buffer, , int, s, 16, 4); > + VLOAD(vector, buffer, , int, s, 32, 2); > + VLOAD(vector, buffer, q, int, s, 8, 16); > + VLOAD(vector, buffer, q, int, s, 16, 8); > + VLOAD(vector, buffer, q, int, s, 32, 4); > + > + /* Apply a unary operator named INSN_NAME. */ > + TEST_UNARY_OP(INSN_NAME, , int, s, 8, 8); > + TEST_UNARY_OP(INSN_NAME, , int, s, 16, 4); > + TEST_UNARY_OP(INSN_NAME, , int, s, 32, 2); > + TEST_UNARY_OP(INSN_NAME, q, int, s, 8, 16); > + TEST_UNARY_OP(INSN_NAME, q, int, s, 16, 8); > + TEST_UNARY_OP(INSN_NAME, q, int, s, 32, 4); > + > + CHECK_RESULTS (TEST_MSG, ""); > + > +#ifdef EXTRA_TESTS > + EXTRA_TESTS(); > +#endif > +} > + > +int main (void) > +{ > + FNNAME (INSN_NAME)(); > + > + return 0; > +} > diff --git a/gcc/testsuite/gcc.target/aarch64/neon-intrinsics/vabs.c b/gcc/testsuite/gcc.target/aarch64/neon-intrinsics/vabs.c > new file mode 100644 > index 0000000..ca3901a > --- /dev/null > +++ b/gcc/testsuite/gcc.target/aarch64/neon-intrinsics/vabs.c > @@ -0,0 +1,74 @@ > +#define INSN_NAME vabs > +#define TEST_MSG "VABS/VABSQ" > + > +/* Extra tests for functions requiring floating-point types. */ > +void exec_vabs_f32(void); > +#define EXTRA_TESTS exec_vabs_f32 > + > +#include "unary_op.inc" > + > +/* Expected results. */ > +VECT_VAR_DECL(expected,int,8,8) [] = { 0x10, 0xf, 0xe, 0xd, > + 0xc, 0xb, 0xa, 0x9 }; > +VECT_VAR_DECL(expected,int,16,4) [] = { 0x10, 0xf, 0xe, 0xd }; > +VECT_VAR_DECL(expected,int,32,2) [] = { 0x10, 0xf }; > +VECT_VAR_DECL(expected,int,64,1) [] = { 0x3333333333333333 }; > +VECT_VAR_DECL(expected,uint,8,8) [] = { 0x33, 0x33, 0x33, 0x33, > + 0x33, 0x33, 0x33, 0x33 }; > +VECT_VAR_DECL(expected,uint,16,4) [] = { 0x3333, 0x3333, 0x3333, 0x3333 }; > +VECT_VAR_DECL(expected,uint,32,2) [] = { 0x33333333, 0x33333333 }; > +VECT_VAR_DECL(expected,uint,64,1) [] = { 0x3333333333333333 }; > +VECT_VAR_DECL(expected,poly,8,8) [] = { 0x33, 0x33, 0x33, 0x33, > + 0x33, 0x33, 0x33, 0x33 }; > +VECT_VAR_DECL(expected,poly,16,4) [] = { 0x3333, 0x3333, 0x3333, 0x3333 }; > +VECT_VAR_DECL(expected,hfloat,32,2) [] = { 0x33333333, 0x33333333 }; > +VECT_VAR_DECL(expected,int,8,16) [] = { 0x10, 0xf, 0xe, 0xd, 0xc, 0xb, 0xa, 0x9, > + 0x8, 0x7, 0x6, 0x5, 0x4, 0x3, 0x2, 0x1 }; > +VECT_VAR_DECL(expected,int,16,8) [] = { 0x10, 0xf, 0xe, 0xd, > + 0xc, 0xb, 0xa, 0x9 }; > +VECT_VAR_DECL(expected,int,32,4) [] = { 0x10, 0xf, 0xe, 0xd }; > +VECT_VAR_DECL(expected,int,64,2) [] = { 0x3333333333333333, > + 0x3333333333333333 }; > +VECT_VAR_DECL(expected,uint,8,16) [] = { 0x33, 0x33, 0x33, 0x33, > + 0x33, 0x33, 0x33, 0x33, > + 0x33, 0x33, 0x33, 0x33, > + 0x33, 0x33, 0x33, 0x33 }; > +VECT_VAR_DECL(expected,uint,16,8) [] = { 0x3333, 0x3333, 0x3333, 0x3333, > + 0x3333, 0x3333, 0x3333, 0x3333 }; > +VECT_VAR_DECL(expected,uint,32,4) [] = { 0x33333333, 0x33333333, > + 0x33333333, 0x33333333 }; > +VECT_VAR_DECL(expected,uint,64,2) [] = { 0x3333333333333333, > + 0x3333333333333333 }; > +VECT_VAR_DECL(expected,poly,8,16) [] = { 0x33, 0x33, 0x33, 0x33, > + 0x33, 0x33, 0x33, 0x33, > + 0x33, 0x33, 0x33, 0x33, > + 0x33, 0x33, 0x33, 0x33 }; > +VECT_VAR_DECL(expected,poly,16,8) [] = { 0x3333, 0x3333, 0x3333, 0x3333, > + 0x3333, 0x3333, 0x3333, 0x3333 }; > +VECT_VAR_DECL(expected,hfloat,32,4) [] = { 0x33333333, 0x33333333, > + 0x33333333, 0x33333333 }; > + > +/* Expected results for float32 variants. Needs to be separated since > + the generic test function does not test floating-point > + versions. */ > +VECT_VAR_DECL(expected_float32,hfloat,32,2) [] = { 0x40133333, 0x40133333 }; > +VECT_VAR_DECL(expected_float32,hfloat,32,4) [] = { 0x4059999a, 0x4059999a, > + 0x4059999a, 0x4059999a }; > + > +void exec_vabs_f32(void) > +{ > + DECL_VARIABLE(vector, float, 32, 2); > + DECL_VARIABLE(vector, float, 32, 4); > + > + DECL_VARIABLE(vector_res, float, 32, 2); > + DECL_VARIABLE(vector_res, float, 32, 4); > + > + VDUP(vector, , float, f, 32, 2, -2.3f); > + VDUP(vector, q, float, f, 32, 4, 3.4f); > + > + TEST_UNARY_OP(INSN_NAME, , float, f, 32, 2); > + TEST_UNARY_OP(INSN_NAME, q, float, f, 32, 4); > + > + CHECK_FP(TEST_MSG, float, 32, 2, PRIx32, expected_float32, ""); > + CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected_float32, ""); > +} > diff --git a/gcc/testsuite/gcc.target/aarch64/neon-intrinsics/vneg.c b/gcc/testsuite/gcc.target/aarch64/neon-intrinsics/vneg.c > new file mode 100644 > index 0000000..c45492d > --- /dev/null > +++ b/gcc/testsuite/gcc.target/aarch64/neon-intrinsics/vneg.c > @@ -0,0 +1,74 @@ > +#define INSN_NAME vneg > +#define TEST_MSG "VNEG/VNEGQ" > + > +/* Extra tests for functions requiring floating-point types. */ > +void exec_vneg_f32(void); > +#define EXTRA_TESTS exec_vneg_f32 > + > +#include "unary_op.inc" > + > +/* Expected results. */ > +VECT_VAR_DECL(expected,int,8,8) [] = { 0x10, 0xf, 0xe, 0xd, > + 0xc, 0xb, 0xa, 0x9 }; > +VECT_VAR_DECL(expected,int,16,4) [] = { 0x10, 0xf, 0xe, 0xd }; > +VECT_VAR_DECL(expected,int,32,2) [] = { 0x10, 0xf }; > +VECT_VAR_DECL(expected,int,64,1) [] = { 0x3333333333333333 }; > +VECT_VAR_DECL(expected,uint,8,8) [] = { 0x33, 0x33, 0x33, 0x33, > + 0x33, 0x33, 0x33, 0x33 }; > +VECT_VAR_DECL(expected,uint,16,4) [] = { 0x3333, 0x3333, 0x3333, 0x3333 }; > +VECT_VAR_DECL(expected,uint,32,2) [] = { 0x33333333, 0x33333333 }; > +VECT_VAR_DECL(expected,uint,64,1) [] = { 0x3333333333333333 }; > +VECT_VAR_DECL(expected,poly,8,8) [] = { 0x33, 0x33, 0x33, 0x33, > + 0x33, 0x33, 0x33, 0x33 }; > +VECT_VAR_DECL(expected,poly,16,4) [] = { 0x3333, 0x3333, 0x3333, 0x3333 }; > +VECT_VAR_DECL(expected,hfloat,32,2) [] = { 0x33333333, 0x33333333 }; > +VECT_VAR_DECL(expected,int,8,16) [] = { 0x10, 0xf, 0xe, 0xd, 0xc, 0xb, 0xa, 0x9, > + 0x8, 0x7, 0x6, 0x5, 0x4, 0x3, 0x2, 0x1 }; > +VECT_VAR_DECL(expected,int,16,8) [] = { 0x10, 0xf, 0xe, 0xd, > + 0xc, 0xb, 0xa, 0x9 }; > +VECT_VAR_DECL(expected,int,32,4) [] = { 0x10, 0xf, 0xe, 0xd }; > +VECT_VAR_DECL(expected,int,64,2) [] = { 0x3333333333333333, > + 0x3333333333333333 }; > +VECT_VAR_DECL(expected,uint,8,16) [] = { 0x33, 0x33, 0x33, 0x33, > + 0x33, 0x33, 0x33, 0x33, > + 0x33, 0x33, 0x33, 0x33, > + 0x33, 0x33, 0x33, 0x33 }; > +VECT_VAR_DECL(expected,uint,16,8) [] = { 0x3333, 0x3333, 0x3333, 0x3333, > + 0x3333, 0x3333, 0x3333, 0x3333 }; > +VECT_VAR_DECL(expected,uint,32,4) [] = { 0x33333333, 0x33333333, > + 0x33333333, 0x33333333 }; > +VECT_VAR_DECL(expected,uint,64,2) [] = { 0x3333333333333333, > + 0x3333333333333333 }; > +VECT_VAR_DECL(expected,poly,8,16) [] = { 0x33, 0x33, 0x33, 0x33, > + 0x33, 0x33, 0x33, 0x33, > + 0x33, 0x33, 0x33, 0x33, > + 0x33, 0x33, 0x33, 0x33 }; > +VECT_VAR_DECL(expected,poly,16,8) [] = { 0x3333, 0x3333, 0x3333, 0x3333, > + 0x3333, 0x3333, 0x3333, 0x3333 }; > +VECT_VAR_DECL(expected,hfloat,32,4) [] = { 0x33333333, 0x33333333, > + 0x33333333, 0x33333333 }; > + > +/* Expected results for float32 variants. Needs to be separated since > + the generic test function does not test floating-point > + versions. */ > +VECT_VAR_DECL(expected_float32,hfloat,32,2) [] = { 0xc0133333, 0xc0133333 }; > +VECT_VAR_DECL(expected_float32,hfloat,32,4) [] = { 0xc059999a, 0xc059999a, > + 0xc059999a, 0xc059999a }; > + > +void exec_vneg_f32(void) > +{ > + DECL_VARIABLE(vector, float, 32, 2); > + DECL_VARIABLE(vector, float, 32, 4); > + > + DECL_VARIABLE(vector_res, float, 32, 2); > + DECL_VARIABLE(vector_res, float, 32, 4); > + > + VDUP(vector, , float, f, 32, 2, 2.3f); > + VDUP(vector, q, float, f, 32, 4, 3.4f); > + > + TEST_UNARY_OP(INSN_NAME, , float, f, 32, 2); > + TEST_UNARY_OP(INSN_NAME, q, float, f, 32, 4); > + > + CHECK_FP(TEST_MSG, float, 32, 2, PRIx32, expected_float32, ""); > + CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected_float32, ""); > +} > -- > 1.8.3.2 >
diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog index 3a0f99b..44c4990 100644 --- a/gcc/testsuite/ChangeLog +++ b/gcc/testsuite/ChangeLog @@ -1,5 +1,11 @@ 2014-06-30 Christophe Lyon <christophe.lyon@linaro.org> + * gcc.target/aarch64/neon-intrinsics/unary_op.inc: New file. + * gcc.target/aarch64/neon-intrinsics/vabs.c: Likewise. + * gcc.target/aarch64/neon-intrinsics/vneg.c: Likewise. + +2014-06-30 Christophe Lyon <christophe.lyon@linaro.org> + * gcc.target/arm/README.neon-intrinsics: New file. * gcc.target/aarch64/neon-intrinsics/README: Likewise. * gcc.target/aarch64/neon-intrinsics/arm-neon-ref.h: Likewise. diff --git a/gcc/testsuite/gcc.target/aarch64/neon-intrinsics/unary_op.inc b/gcc/testsuite/gcc.target/aarch64/neon-intrinsics/unary_op.inc new file mode 100644 index 0000000..33f9b5f --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/neon-intrinsics/unary_op.inc @@ -0,0 +1,72 @@ +/* Template file for unary operator validation. + + This file is meant to be included by the relevant test files, which + have to define the intrinsic family to test. If a given intrinsic + supports variants which are not supported by all the other unary + operators, these can be tested by providing a definition for + EXTRA_TESTS. */ + +#include <arm_neon.h> +#include "arm-neon-ref.h" +#include "compute-ref-data.h" + +#define FNNAME1(NAME) exec_ ## NAME +#define FNNAME(NAME) FNNAME1(NAME) + +void FNNAME (INSN_NAME) (void) +{ + /* Basic test: y=OP(x), then store the result. */ +#define TEST_UNARY_OP1(INSN, Q, T1, T2, W, N) \ + VECT_VAR(vector_res, T1, W, N) = \ + INSN##Q##_##T2##W(VECT_VAR(vector, T1, W, N)); \ + vst1##Q##_##T2##W(VECT_VAR(result, T1, W, N), VECT_VAR(vector_res, T1, W, N)) + +#define TEST_UNARY_OP(INSN, Q, T1, T2, W, N) \ + TEST_UNARY_OP1(INSN, Q, T1, T2, W, N) \ + + /* No need for 64 bits variants in the general case. */ + DECL_VARIABLE(vector, int, 8, 8); + DECL_VARIABLE(vector, int, 16, 4); + DECL_VARIABLE(vector, int, 32, 2); + DECL_VARIABLE(vector, int, 8, 16); + DECL_VARIABLE(vector, int, 16, 8); + DECL_VARIABLE(vector, int, 32, 4); + + DECL_VARIABLE(vector_res, int, 8, 8); + DECL_VARIABLE(vector_res, int, 16, 4); + DECL_VARIABLE(vector_res, int, 32, 2); + DECL_VARIABLE(vector_res, int, 8, 16); + DECL_VARIABLE(vector_res, int, 16, 8); + DECL_VARIABLE(vector_res, int, 32, 4); + + clean_results (); + + /* Initialize input "vector" from "buffer". */ + VLOAD(vector, buffer, , int, s, 8, 8); + VLOAD(vector, buffer, , int, s, 16, 4); + VLOAD(vector, buffer, , int, s, 32, 2); + VLOAD(vector, buffer, q, int, s, 8, 16); + VLOAD(vector, buffer, q, int, s, 16, 8); + VLOAD(vector, buffer, q, int, s, 32, 4); + + /* Apply a unary operator named INSN_NAME. */ + TEST_UNARY_OP(INSN_NAME, , int, s, 8, 8); + TEST_UNARY_OP(INSN_NAME, , int, s, 16, 4); + TEST_UNARY_OP(INSN_NAME, , int, s, 32, 2); + TEST_UNARY_OP(INSN_NAME, q, int, s, 8, 16); + TEST_UNARY_OP(INSN_NAME, q, int, s, 16, 8); + TEST_UNARY_OP(INSN_NAME, q, int, s, 32, 4); + + CHECK_RESULTS (TEST_MSG, ""); + +#ifdef EXTRA_TESTS + EXTRA_TESTS(); +#endif +} + +int main (void) +{ + FNNAME (INSN_NAME)(); + + return 0; +} diff --git a/gcc/testsuite/gcc.target/aarch64/neon-intrinsics/vabs.c b/gcc/testsuite/gcc.target/aarch64/neon-intrinsics/vabs.c new file mode 100644 index 0000000..ca3901a --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/neon-intrinsics/vabs.c @@ -0,0 +1,74 @@ +#define INSN_NAME vabs +#define TEST_MSG "VABS/VABSQ" + +/* Extra tests for functions requiring floating-point types. */ +void exec_vabs_f32(void); +#define EXTRA_TESTS exec_vabs_f32 + +#include "unary_op.inc" + +/* Expected results. */ +VECT_VAR_DECL(expected,int,8,8) [] = { 0x10, 0xf, 0xe, 0xd, + 0xc, 0xb, 0xa, 0x9 }; +VECT_VAR_DECL(expected,int,16,4) [] = { 0x10, 0xf, 0xe, 0xd }; +VECT_VAR_DECL(expected,int,32,2) [] = { 0x10, 0xf }; +VECT_VAR_DECL(expected,int,64,1) [] = { 0x3333333333333333 }; +VECT_VAR_DECL(expected,uint,8,8) [] = { 0x33, 0x33, 0x33, 0x33, + 0x33, 0x33, 0x33, 0x33 }; +VECT_VAR_DECL(expected,uint,16,4) [] = { 0x3333, 0x3333, 0x3333, 0x3333 }; +VECT_VAR_DECL(expected,uint,32,2) [] = { 0x33333333, 0x33333333 }; +VECT_VAR_DECL(expected,uint,64,1) [] = { 0x3333333333333333 }; +VECT_VAR_DECL(expected,poly,8,8) [] = { 0x33, 0x33, 0x33, 0x33, + 0x33, 0x33, 0x33, 0x33 }; +VECT_VAR_DECL(expected,poly,16,4) [] = { 0x3333, 0x3333, 0x3333, 0x3333 }; +VECT_VAR_DECL(expected,hfloat,32,2) [] = { 0x33333333, 0x33333333 }; +VECT_VAR_DECL(expected,int,8,16) [] = { 0x10, 0xf, 0xe, 0xd, 0xc, 0xb, 0xa, 0x9, + 0x8, 0x7, 0x6, 0x5, 0x4, 0x3, 0x2, 0x1 }; +VECT_VAR_DECL(expected,int,16,8) [] = { 0x10, 0xf, 0xe, 0xd, + 0xc, 0xb, 0xa, 0x9 }; +VECT_VAR_DECL(expected,int,32,4) [] = { 0x10, 0xf, 0xe, 0xd }; +VECT_VAR_DECL(expected,int,64,2) [] = { 0x3333333333333333, + 0x3333333333333333 }; +VECT_VAR_DECL(expected,uint,8,16) [] = { 0x33, 0x33, 0x33, 0x33, + 0x33, 0x33, 0x33, 0x33, + 0x33, 0x33, 0x33, 0x33, + 0x33, 0x33, 0x33, 0x33 }; +VECT_VAR_DECL(expected,uint,16,8) [] = { 0x3333, 0x3333, 0x3333, 0x3333, + 0x3333, 0x3333, 0x3333, 0x3333 }; +VECT_VAR_DECL(expected,uint,32,4) [] = { 0x33333333, 0x33333333, + 0x33333333, 0x33333333 }; +VECT_VAR_DECL(expected,uint,64,2) [] = { 0x3333333333333333, + 0x3333333333333333 }; +VECT_VAR_DECL(expected,poly,8,16) [] = { 0x33, 0x33, 0x33, 0x33, + 0x33, 0x33, 0x33, 0x33, + 0x33, 0x33, 0x33, 0x33, + 0x33, 0x33, 0x33, 0x33 }; +VECT_VAR_DECL(expected,poly,16,8) [] = { 0x3333, 0x3333, 0x3333, 0x3333, + 0x3333, 0x3333, 0x3333, 0x3333 }; +VECT_VAR_DECL(expected,hfloat,32,4) [] = { 0x33333333, 0x33333333, + 0x33333333, 0x33333333 }; + +/* Expected results for float32 variants. Needs to be separated since + the generic test function does not test floating-point + versions. */ +VECT_VAR_DECL(expected_float32,hfloat,32,2) [] = { 0x40133333, 0x40133333 }; +VECT_VAR_DECL(expected_float32,hfloat,32,4) [] = { 0x4059999a, 0x4059999a, + 0x4059999a, 0x4059999a }; + +void exec_vabs_f32(void) +{ + DECL_VARIABLE(vector, float, 32, 2); + DECL_VARIABLE(vector, float, 32, 4); + + DECL_VARIABLE(vector_res, float, 32, 2); + DECL_VARIABLE(vector_res, float, 32, 4); + + VDUP(vector, , float, f, 32, 2, -2.3f); + VDUP(vector, q, float, f, 32, 4, 3.4f); + + TEST_UNARY_OP(INSN_NAME, , float, f, 32, 2); + TEST_UNARY_OP(INSN_NAME, q, float, f, 32, 4); + + CHECK_FP(TEST_MSG, float, 32, 2, PRIx32, expected_float32, ""); + CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected_float32, ""); +} diff --git a/gcc/testsuite/gcc.target/aarch64/neon-intrinsics/vneg.c b/gcc/testsuite/gcc.target/aarch64/neon-intrinsics/vneg.c new file mode 100644 index 0000000..c45492d --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/neon-intrinsics/vneg.c @@ -0,0 +1,74 @@ +#define INSN_NAME vneg +#define TEST_MSG "VNEG/VNEGQ" + +/* Extra tests for functions requiring floating-point types. */ +void exec_vneg_f32(void); +#define EXTRA_TESTS exec_vneg_f32 + +#include "unary_op.inc" + +/* Expected results. */ +VECT_VAR_DECL(expected,int,8,8) [] = { 0x10, 0xf, 0xe, 0xd, + 0xc, 0xb, 0xa, 0x9 }; +VECT_VAR_DECL(expected,int,16,4) [] = { 0x10, 0xf, 0xe, 0xd }; +VECT_VAR_DECL(expected,int,32,2) [] = { 0x10, 0xf }; +VECT_VAR_DECL(expected,int,64,1) [] = { 0x3333333333333333 }; +VECT_VAR_DECL(expected,uint,8,8) [] = { 0x33, 0x33, 0x33, 0x33, + 0x33, 0x33, 0x33, 0x33 }; +VECT_VAR_DECL(expected,uint,16,4) [] = { 0x3333, 0x3333, 0x3333, 0x3333 }; +VECT_VAR_DECL(expected,uint,32,2) [] = { 0x33333333, 0x33333333 }; +VECT_VAR_DECL(expected,uint,64,1) [] = { 0x3333333333333333 }; +VECT_VAR_DECL(expected,poly,8,8) [] = { 0x33, 0x33, 0x33, 0x33, + 0x33, 0x33, 0x33, 0x33 }; +VECT_VAR_DECL(expected,poly,16,4) [] = { 0x3333, 0x3333, 0x3333, 0x3333 }; +VECT_VAR_DECL(expected,hfloat,32,2) [] = { 0x33333333, 0x33333333 }; +VECT_VAR_DECL(expected,int,8,16) [] = { 0x10, 0xf, 0xe, 0xd, 0xc, 0xb, 0xa, 0x9, + 0x8, 0x7, 0x6, 0x5, 0x4, 0x3, 0x2, 0x1 }; +VECT_VAR_DECL(expected,int,16,8) [] = { 0x10, 0xf, 0xe, 0xd, + 0xc, 0xb, 0xa, 0x9 }; +VECT_VAR_DECL(expected,int,32,4) [] = { 0x10, 0xf, 0xe, 0xd }; +VECT_VAR_DECL(expected,int,64,2) [] = { 0x3333333333333333, + 0x3333333333333333 }; +VECT_VAR_DECL(expected,uint,8,16) [] = { 0x33, 0x33, 0x33, 0x33, + 0x33, 0x33, 0x33, 0x33, + 0x33, 0x33, 0x33, 0x33, + 0x33, 0x33, 0x33, 0x33 }; +VECT_VAR_DECL(expected,uint,16,8) [] = { 0x3333, 0x3333, 0x3333, 0x3333, + 0x3333, 0x3333, 0x3333, 0x3333 }; +VECT_VAR_DECL(expected,uint,32,4) [] = { 0x33333333, 0x33333333, + 0x33333333, 0x33333333 }; +VECT_VAR_DECL(expected,uint,64,2) [] = { 0x3333333333333333, + 0x3333333333333333 }; +VECT_VAR_DECL(expected,poly,8,16) [] = { 0x33, 0x33, 0x33, 0x33, + 0x33, 0x33, 0x33, 0x33, + 0x33, 0x33, 0x33, 0x33, + 0x33, 0x33, 0x33, 0x33 }; +VECT_VAR_DECL(expected,poly,16,8) [] = { 0x3333, 0x3333, 0x3333, 0x3333, + 0x3333, 0x3333, 0x3333, 0x3333 }; +VECT_VAR_DECL(expected,hfloat,32,4) [] = { 0x33333333, 0x33333333, + 0x33333333, 0x33333333 }; + +/* Expected results for float32 variants. Needs to be separated since + the generic test function does not test floating-point + versions. */ +VECT_VAR_DECL(expected_float32,hfloat,32,2) [] = { 0xc0133333, 0xc0133333 }; +VECT_VAR_DECL(expected_float32,hfloat,32,4) [] = { 0xc059999a, 0xc059999a, + 0xc059999a, 0xc059999a }; + +void exec_vneg_f32(void) +{ + DECL_VARIABLE(vector, float, 32, 2); + DECL_VARIABLE(vector, float, 32, 4); + + DECL_VARIABLE(vector_res, float, 32, 2); + DECL_VARIABLE(vector_res, float, 32, 4); + + VDUP(vector, , float, f, 32, 2, 2.3f); + VDUP(vector, q, float, f, 32, 4, 3.4f); + + TEST_UNARY_OP(INSN_NAME, , float, f, 32, 2); + TEST_UNARY_OP(INSN_NAME, q, float, f, 32, 4); + + CHECK_FP(TEST_MSG, float, 32, 2, PRIx32, expected_float32, ""); + CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected_float32, ""); +}