diff mbox series

[v7,6/6] tests/tcg/multiarch: add test for plugin memory access

Message ID 20240724194708.1843704-7-pierrick.bouvier@linaro.org
State New
Headers show
Series plugins: access values during a memory read/write | expand

Commit Message

Pierrick Bouvier July 24, 2024, 7:47 p.m. UTC
Add an explicit test to check expected memory values are read/written.
8,16,32 load/store are tested for all arch.
64,128 load/store are tested for aarch64/x64.
atomic operations (8,16,32,64) are tested for x64 only.

By default, atomic accesses are non atomic if a single cpu is running,
so we force creation of a second one by creating a new thread first.

load/store helpers code path can't be triggered easily in user mode (no
softmmu), so we can't test it here.

Output of test-plugin-mem-access.c is the list of expected patterns in
plugin output. By reading stdout, we can compare to plugins output and
have a multiarch test.

Can be run with:
make -C build/tests/tcg/$ARCH-linux-user run-plugin-test-plugin-mem-access-with-libmem.so

Tested-by: Xingtao Yao <yaoxt.fnst@fujitsu.com>
Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
---
 tests/tcg/multiarch/test-plugin-mem-access.c  | 175 ++++++++++++++++++
 tests/tcg/multiarch/Makefile.target           |   7 +
 .../tcg/multiarch/check-plugin-mem-access.sh  |  30 +++
 3 files changed, 212 insertions(+)
 create mode 100644 tests/tcg/multiarch/test-plugin-mem-access.c
 create mode 100755 tests/tcg/multiarch/check-plugin-mem-access.sh

Comments

Alex Bennée Aug. 29, 2024, 9:03 a.m. UTC | #1
Pierrick Bouvier <pierrick.bouvier@linaro.org> writes:

> Add an explicit test to check expected memory values are read/written.
> 8,16,32 load/store are tested for all arch.
> 64,128 load/store are tested for aarch64/x64.
> atomic operations (8,16,32,64) are tested for x64 only.
>
> By default, atomic accesses are non atomic if a single cpu is running,
> so we force creation of a second one by creating a new thread first.
>
> load/store helpers code path can't be triggered easily in user mode (no
> softmmu), so we can't test it here.
>
> Output of test-plugin-mem-access.c is the list of expected patterns in
> plugin output. By reading stdout, we can compare to plugins output and
> have a multiarch test.
>
> Can be run with:
> make -C build/tests/tcg/$ARCH-linux-user run-plugin-test-plugin-mem-access-with-libmem.so
>
> Tested-by: Xingtao Yao <yaoxt.fnst@fujitsu.com>
> Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
> ---
>  tests/tcg/multiarch/test-plugin-mem-access.c  | 175 ++++++++++++++++++
>  tests/tcg/multiarch/Makefile.target           |   7 +
>  .../tcg/multiarch/check-plugin-mem-access.sh  |  30 +++
>  3 files changed, 212 insertions(+)
>  create mode 100644 tests/tcg/multiarch/test-plugin-mem-access.c
>  create mode 100755 tests/tcg/multiarch/check-plugin-mem-access.sh
>
> diff --git a/tests/tcg/multiarch/test-plugin-mem-access.c b/tests/tcg/multiarch/test-plugin-mem-access.c
> new file mode 100644
> index 00000000000..09d1fa22e35
> --- /dev/null
> +++ b/tests/tcg/multiarch/test-plugin-mem-access.c
> @@ -0,0 +1,175 @@
> +/*
> + * SPDX-License-Identifier: GPL-2.0-or-later
> + *
> + * Check if we detect all memory accesses expected using plugin API.
> + * Used in conjunction with ./check-plugin-mem-access.sh check script.
> + * Output of this program is the list of patterns expected in plugin output.
> + *
> + * 8,16,32 load/store are tested for all arch.
> + * 64,128 load/store are tested for aarch64/x64.
> + * atomic operations (8,16,32,64) are tested for x64 only.
> + */

It would be nice to build this for the softmmu path as well. I'm not
sure if this can be done with as single source or we need a second test.
I shall have a play.

> +
> +#include <pthread.h>
> +#include <stdint.h>
> +#include <stdio.h>
> +#include <stdlib.h>
> +
> +#if defined(__x86_64__)
> +#include <emmintrin.h>
> +#elif defined(__aarch64__)
> +#include <arm_neon.h>
> +#endif /* __x86_64__ */
> +
> +static void *data;
> +
> +/* ,store_u8,.*,8,store,0xf1 */
> +#define PRINT_EXPECTED(function, type, value, action)                 \
> +do {                                                                  \
> +    printf(",%s,.*,%d,%s,%s\n",                                       \
> +           #function, (int) sizeof(type) * 8, action, value);         \
> +}                                                                     \
> +while (0)
> +
> +#define DEFINE_STORE(name, type, value)                  \
> +                                                         \
> +static void print_expected_store_##name(void)            \
> +{                                                        \
> +    PRINT_EXPECTED(store_##name, type, #value, "store"); \
> +}                                                        \
> +                                                         \
> +static void store_##name(void)                           \
> +{                                                        \
> +    *((type *)data) = value;                             \
> +    print_expected_store_##name();                       \
> +}
> +
> +#define DEFINE_ATOMIC_OP(name, type, value)                    \
> +                                                               \
> +static void print_expected_atomic_op_##name(void)              \
> +{                                                              \
> +    PRINT_EXPECTED(atomic_op_##name, type, "0x0*42", "load");  \
> +    PRINT_EXPECTED(atomic_op_##name, type, #value, "store");   \
> +}                                                              \
> +                                                               \
> +static void atomic_op_##name(void)                             \
> +{                                                              \
> +    *((type *)data) = 0x42;                                    \
> +    __sync_val_compare_and_swap((type *)data, 0x42, value);    \
> +    print_expected_atomic_op_##name();                         \
> +}
> +
> +#define DEFINE_LOAD(name, type, value)                  \
> +                                                        \
> +static void print_expected_load_##name(void)            \
> +{                                                       \
> +    PRINT_EXPECTED(load_##name, type, #value, "load");  \
> +}                                                       \
> +                                                        \
> +static void load_##name(void)                           \
> +{                                                       \
> +    type src = *((type *) data);                        \
> +    type dest = src;                                    \
> +    (void)src, (void)dest;                              \
> +    print_expected_load_##name();                       \
> +}
> +
> +DEFINE_STORE(u8, uint8_t, 0xf1)
> +DEFINE_LOAD(u8, uint8_t, 0xf1)
> +DEFINE_STORE(u16, uint16_t, 0xf123)
> +DEFINE_LOAD(u16, uint16_t, 0xf123)
> +DEFINE_STORE(u32, uint32_t, 0xff112233)
> +DEFINE_LOAD(u32, uint32_t, 0xff112233)
> +
> +#if defined(__x86_64__) || defined(__aarch64__)
> +DEFINE_STORE(u64, uint64_t, 0xf123456789abcdef)
> +DEFINE_LOAD(u64, uint64_t, 0xf123456789abcdef)
> +
> +static void print_expected_store_u128(void)
> +{
> +    PRINT_EXPECTED(store_u128, __int128,
> +                   "0xf122334455667788f123456789abcdef", "store");
> +}
> +
> +static void store_u128(void)
> +{
> +#ifdef __x86_64__
> +    _mm_store_si128(data, _mm_set_epi32(0xf1223344, 0x55667788,
> +                                        0xf1234567, 0x89abcdef));
> +#else
> +    const uint32_t init[4] = {0x89abcdef, 0xf1234567, 0x55667788, 0xf1223344};
> +    uint32x4_t vec = vld1q_u32(init);
> +    vst1q_u32(data, vec);
> +#endif /* __x86_64__ */
> +    print_expected_store_u128();
> +}
> +
> +static void print_expected_load_u128(void)
> +{
> +    PRINT_EXPECTED(load_u128, __int128,
> +                   "0xf122334455667788f123456789abcdef", "load");
> +}
> +
> +static void load_u128(void)
> +{
> +#ifdef __x86_64__
> +    __m128i var = _mm_load_si128(data);
> +#else
> +    uint32x4_t var = vld1q_u32(data);
> +#endif
> +    (void) var;
> +    print_expected_load_u128();
> +}
> +#endif /* __x86_64__ || __aarch64__ */
> +
> +#if defined(__x86_64__)
> +DEFINE_ATOMIC_OP(u8, uint8_t, 0xf1)
> +DEFINE_ATOMIC_OP(u16, uint16_t, 0xf123)
> +DEFINE_ATOMIC_OP(u32, uint32_t, 0xff112233)
> +DEFINE_ATOMIC_OP(u64, uint64_t, 0xf123456789abcdef)
> +#endif /* __x86_64__ */
> +
> +static void *f(void *p)
> +{
> +    return NULL;
> +}
> +
> +int main(void)
> +{
> +    /*
> +     * We force creation of a second thread to enable cpu flag CF_PARALLEL.
> +     * This will generate atomic operations when needed.
> +     */
> +    pthread_t thread;
> +    pthread_create(&thread, NULL, &f, NULL);
> +    pthread_join(thread, NULL);
> +
> +    /* allocate storage up to 128 bits */
> +    data = malloc(16);
> +
> +    store_u8();
> +    load_u8();
> +
> +    store_u16();
> +    load_u16();
> +
> +    store_u32();
> +    load_u32();
> +
> +#if defined(__x86_64__) || defined(__aarch64__)
> +    store_u64();
> +    load_u64();
> +
> +    store_u128();
> +    load_u128();
> +#endif /* __x86_64__ || __aarch64__ */
> +
> +#if defined(__x86_64__)
> +    atomic_op_u8();
> +    atomic_op_u16();
> +    atomic_op_u32();
> +    atomic_op_u64();
> +#endif /* __x86_64__ */
> +
> +    free(data);
> +}
> diff --git a/tests/tcg/multiarch/Makefile.target b/tests/tcg/multiarch/Makefile.target
> index 5e3391ec9d2..d90cbd3e521 100644
> --- a/tests/tcg/multiarch/Makefile.target
> +++ b/tests/tcg/multiarch/Makefile.target
> @@ -170,5 +170,12 @@ run-plugin-semiconsole-with-%:
>  TESTS += semihosting semiconsole
>  endif
>  
> +# Test plugin memory access instrumentation
> +run-plugin-test-plugin-mem-access-with-libmem.so: \
> +	PLUGIN_ARGS=$(COMMA)print-accesses=true
> +run-plugin-test-plugin-mem-access-with-libmem.so: \
> +	CHECK_PLUGIN_OUTPUT_COMMAND= \
> +	$(SRC_PATH)/tests/tcg/multiarch/check-plugin-mem-access.sh
> +
>  # Update TESTS
>  TESTS += $(MULTIARCH_TESTS)
> diff --git a/tests/tcg/multiarch/check-plugin-mem-access.sh b/tests/tcg/multiarch/check-plugin-mem-access.sh
> new file mode 100755
> index 00000000000..909606943bb
> --- /dev/null
> +++ b/tests/tcg/multiarch/check-plugin-mem-access.sh
> @@ -0,0 +1,30 @@
> +#!/usr/bin/env bash
> +
> +set -euo pipefail
> +
> +die()
> +{
> +    echo "$@" 1>&2
> +    exit 1
> +}
> +
> +check()
> +{
> +    file=$1
> +    pattern=$2
> +    grep "$pattern" "$file" > /dev/null || die "\"$pattern\" not found in $file"
> +}
> +
> +[ $# -eq 1 ] || die "usage: plugin_out_file"
> +
> +plugin_out=$1
> +
> +expected()
> +{
> +    ./test-plugin-mem-access ||
> +        die "running test-plugin-mem-access executable failed"

I'm confused by this. We seem to be running the test again and this is
going to fail if binfmt_misc isn't setup (which we don't assume for
running the TCG tests).

> +}
> +
> +expected | while read line; do
> +    check "$plugin_out" "$line"
> +done
Pierrick Bouvier Aug. 30, 2024, 7:08 p.m. UTC | #2
On 8/29/24 02:03, Alex Bennée wrote:
> Pierrick Bouvier <pierrick.bouvier@linaro.org> writes:
> 
>> Add an explicit test to check expected memory values are read/written.
>> 8,16,32 load/store are tested for all arch.
>> 64,128 load/store are tested for aarch64/x64.
>> atomic operations (8,16,32,64) are tested for x64 only.
>>
>> By default, atomic accesses are non atomic if a single cpu is running,
>> so we force creation of a second one by creating a new thread first.
>>
>> load/store helpers code path can't be triggered easily in user mode (no
>> softmmu), so we can't test it here.
>>
>> Output of test-plugin-mem-access.c is the list of expected patterns in
>> plugin output. By reading stdout, we can compare to plugins output and
>> have a multiarch test.
>>
>> Can be run with:
>> make -C build/tests/tcg/$ARCH-linux-user run-plugin-test-plugin-mem-access-with-libmem.so
>>
>> Tested-by: Xingtao Yao <yaoxt.fnst@fujitsu.com>
>> Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
>> ---
>>   tests/tcg/multiarch/test-plugin-mem-access.c  | 175 ++++++++++++++++++
>>   tests/tcg/multiarch/Makefile.target           |   7 +
>>   .../tcg/multiarch/check-plugin-mem-access.sh  |  30 +++
>>   3 files changed, 212 insertions(+)
>>   create mode 100644 tests/tcg/multiarch/test-plugin-mem-access.c
>>   create mode 100755 tests/tcg/multiarch/check-plugin-mem-access.sh
>>
>> diff --git a/tests/tcg/multiarch/test-plugin-mem-access.c b/tests/tcg/multiarch/test-plugin-mem-access.c
>> new file mode 100644
>> index 00000000000..09d1fa22e35
>> --- /dev/null
>> +++ b/tests/tcg/multiarch/test-plugin-mem-access.c
>> @@ -0,0 +1,175 @@
>> +/*
>> + * SPDX-License-Identifier: GPL-2.0-or-later
>> + *
>> + * Check if we detect all memory accesses expected using plugin API.
>> + * Used in conjunction with ./check-plugin-mem-access.sh check script.
>> + * Output of this program is the list of patterns expected in plugin output.
>> + *
>> + * 8,16,32 load/store are tested for all arch.
>> + * 64,128 load/store are tested for aarch64/x64.
>> + * atomic operations (8,16,32,64) are tested for x64 only.
>> + */
> 
> It would be nice to build this for the softmmu path as well. I'm not
> sure if this can be done with as single source or we need a second test.
> I shall have a play.
> 

Ok, thanks.

>> +
>> +#include <pthread.h>
>> +#include <stdint.h>
>> +#include <stdio.h>
>> +#include <stdlib.h>
>> +
>> +#if defined(__x86_64__)
>> +#include <emmintrin.h>
>> +#elif defined(__aarch64__)
>> +#include <arm_neon.h>
>> +#endif /* __x86_64__ */
>> +
>> +static void *data;
>> +
>> +/* ,store_u8,.*,8,store,0xf1 */
>> +#define PRINT_EXPECTED(function, type, value, action)                 \
>> +do {                                                                  \
>> +    printf(",%s,.*,%d,%s,%s\n",                                       \
>> +           #function, (int) sizeof(type) * 8, action, value);         \
>> +}                                                                     \
>> +while (0)
>> +
>> +#define DEFINE_STORE(name, type, value)                  \
>> +                                                         \
>> +static void print_expected_store_##name(void)            \
>> +{                                                        \
>> +    PRINT_EXPECTED(store_##name, type, #value, "store"); \
>> +}                                                        \
>> +                                                         \
>> +static void store_##name(void)                           \
>> +{                                                        \
>> +    *((type *)data) = value;                             \
>> +    print_expected_store_##name();                       \
>> +}
>> +
>> +#define DEFINE_ATOMIC_OP(name, type, value)                    \
>> +                                                               \
>> +static void print_expected_atomic_op_##name(void)              \
>> +{                                                              \
>> +    PRINT_EXPECTED(atomic_op_##name, type, "0x0*42", "load");  \
>> +    PRINT_EXPECTED(atomic_op_##name, type, #value, "store");   \
>> +}                                                              \
>> +                                                               \
>> +static void atomic_op_##name(void)                             \
>> +{                                                              \
>> +    *((type *)data) = 0x42;                                    \
>> +    __sync_val_compare_and_swap((type *)data, 0x42, value);    \
>> +    print_expected_atomic_op_##name();                         \
>> +}
>> +
>> +#define DEFINE_LOAD(name, type, value)                  \
>> +                                                        \
>> +static void print_expected_load_##name(void)            \
>> +{                                                       \
>> +    PRINT_EXPECTED(load_##name, type, #value, "load");  \
>> +}                                                       \
>> +                                                        \
>> +static void load_##name(void)                           \
>> +{                                                       \
>> +    type src = *((type *) data);                        \
>> +    type dest = src;                                    \
>> +    (void)src, (void)dest;                              \
>> +    print_expected_load_##name();                       \
>> +}
>> +
>> +DEFINE_STORE(u8, uint8_t, 0xf1)
>> +DEFINE_LOAD(u8, uint8_t, 0xf1)
>> +DEFINE_STORE(u16, uint16_t, 0xf123)
>> +DEFINE_LOAD(u16, uint16_t, 0xf123)
>> +DEFINE_STORE(u32, uint32_t, 0xff112233)
>> +DEFINE_LOAD(u32, uint32_t, 0xff112233)
>> +
>> +#if defined(__x86_64__) || defined(__aarch64__)
>> +DEFINE_STORE(u64, uint64_t, 0xf123456789abcdef)
>> +DEFINE_LOAD(u64, uint64_t, 0xf123456789abcdef)
>> +
>> +static void print_expected_store_u128(void)
>> +{
>> +    PRINT_EXPECTED(store_u128, __int128,
>> +                   "0xf122334455667788f123456789abcdef", "store");
>> +}
>> +
>> +static void store_u128(void)
>> +{
>> +#ifdef __x86_64__
>> +    _mm_store_si128(data, _mm_set_epi32(0xf1223344, 0x55667788,
>> +                                        0xf1234567, 0x89abcdef));
>> +#else
>> +    const uint32_t init[4] = {0x89abcdef, 0xf1234567, 0x55667788, 0xf1223344};
>> +    uint32x4_t vec = vld1q_u32(init);
>> +    vst1q_u32(data, vec);
>> +#endif /* __x86_64__ */
>> +    print_expected_store_u128();
>> +}
>> +
>> +static void print_expected_load_u128(void)
>> +{
>> +    PRINT_EXPECTED(load_u128, __int128,
>> +                   "0xf122334455667788f123456789abcdef", "load");
>> +}
>> +
>> +static void load_u128(void)
>> +{
>> +#ifdef __x86_64__
>> +    __m128i var = _mm_load_si128(data);
>> +#else
>> +    uint32x4_t var = vld1q_u32(data);
>> +#endif
>> +    (void) var;
>> +    print_expected_load_u128();
>> +}
>> +#endif /* __x86_64__ || __aarch64__ */
>> +
>> +#if defined(__x86_64__)
>> +DEFINE_ATOMIC_OP(u8, uint8_t, 0xf1)
>> +DEFINE_ATOMIC_OP(u16, uint16_t, 0xf123)
>> +DEFINE_ATOMIC_OP(u32, uint32_t, 0xff112233)
>> +DEFINE_ATOMIC_OP(u64, uint64_t, 0xf123456789abcdef)
>> +#endif /* __x86_64__ */
>> +
>> +static void *f(void *p)
>> +{
>> +    return NULL;
>> +}
>> +
>> +int main(void)
>> +{
>> +    /*
>> +     * We force creation of a second thread to enable cpu flag CF_PARALLEL.
>> +     * This will generate atomic operations when needed.
>> +     */
>> +    pthread_t thread;
>> +    pthread_create(&thread, NULL, &f, NULL);
>> +    pthread_join(thread, NULL);
>> +
>> +    /* allocate storage up to 128 bits */
>> +    data = malloc(16);
>> +
>> +    store_u8();
>> +    load_u8();
>> +
>> +    store_u16();
>> +    load_u16();
>> +
>> +    store_u32();
>> +    load_u32();
>> +
>> +#if defined(__x86_64__) || defined(__aarch64__)
>> +    store_u64();
>> +    load_u64();
>> +
>> +    store_u128();
>> +    load_u128();
>> +#endif /* __x86_64__ || __aarch64__ */
>> +
>> +#if defined(__x86_64__)
>> +    atomic_op_u8();
>> +    atomic_op_u16();
>> +    atomic_op_u32();
>> +    atomic_op_u64();
>> +#endif /* __x86_64__ */
>> +
>> +    free(data);
>> +}
>> diff --git a/tests/tcg/multiarch/Makefile.target b/tests/tcg/multiarch/Makefile.target
>> index 5e3391ec9d2..d90cbd3e521 100644
>> --- a/tests/tcg/multiarch/Makefile.target
>> +++ b/tests/tcg/multiarch/Makefile.target
>> @@ -170,5 +170,12 @@ run-plugin-semiconsole-with-%:
>>   TESTS += semihosting semiconsole
>>   endif
>>   
>> +# Test plugin memory access instrumentation
>> +run-plugin-test-plugin-mem-access-with-libmem.so: \
>> +	PLUGIN_ARGS=$(COMMA)print-accesses=true
>> +run-plugin-test-plugin-mem-access-with-libmem.so: \
>> +	CHECK_PLUGIN_OUTPUT_COMMAND= \
>> +	$(SRC_PATH)/tests/tcg/multiarch/check-plugin-mem-access.sh
>> +
>>   # Update TESTS
>>   TESTS += $(MULTIARCH_TESTS)
>> diff --git a/tests/tcg/multiarch/check-plugin-mem-access.sh b/tests/tcg/multiarch/check-plugin-mem-access.sh
>> new file mode 100755
>> index 00000000000..909606943bb
>> --- /dev/null
>> +++ b/tests/tcg/multiarch/check-plugin-mem-access.sh
>> @@ -0,0 +1,30 @@
>> +#!/usr/bin/env bash
>> +
>> +set -euo pipefail
>> +
>> +die()
>> +{
>> +    echo "$@" 1>&2
>> +    exit 1
>> +}
>> +
>> +check()
>> +{
>> +    file=$1
>> +    pattern=$2
>> +    grep "$pattern" "$file" > /dev/null || die "\"$pattern\" not found in $file"
>> +}
>> +
>> +[ $# -eq 1 ] || die "usage: plugin_out_file"
>> +
>> +plugin_out=$1
>> +
>> +expected()
>> +{
>> +    ./test-plugin-mem-access ||
>> +        die "running test-plugin-mem-access executable failed"
> 
> I'm confused by this. We seem to be running the test again and this is
> going to fail if binfmt_misc isn't setup (which we don't assume for
> running the TCG tests).
> 

The test stdout is the expected output to grep. This is to avoid avoid 
an "expected file" and a "source file" somewhere else.
Could we use compiled qemu-user to run it instead?

I'm trying to find a solution where "expected" is not duplicated between 
several files.

>> +}
>> +
>> +expected | while read line; do
>> +    check "$plugin_out" "$line"
>> +done
>
Alex Bennée Sept. 4, 2024, 1:19 p.m. UTC | #3
Pierrick Bouvier <pierrick.bouvier@linaro.org> writes:

> On 8/29/24 02:03, Alex Bennée wrote:
>> Pierrick Bouvier <pierrick.bouvier@linaro.org> writes:
>> 
<snip>
>>> diff --git a/tests/tcg/multiarch/check-plugin-mem-access.sh b/tests/tcg/multiarch/check-plugin-mem-access.sh
>>> new file mode 100755
>>> index 00000000000..909606943bb
>>> --- /dev/null
>>> +++ b/tests/tcg/multiarch/check-plugin-mem-access.sh
>>> @@ -0,0 +1,30 @@
>>> +#!/usr/bin/env bash
>>> +
>>> +set -euo pipefail
>>> +
>>> +die()
>>> +{
>>> +    echo "$@" 1>&2
>>> +    exit 1
>>> +}
>>> +
>>> +check()
>>> +{
>>> +    file=$1
>>> +    pattern=$2
>>> +    grep "$pattern" "$file" > /dev/null || die "\"$pattern\" not found in $file"
>>> +}
>>> +
>>> +[ $# -eq 1 ] || die "usage: plugin_out_file"
>>> +
>>> +plugin_out=$1
>>> +
>>> +expected()
>>> +{
>>> +    ./test-plugin-mem-access ||
>>> +        die "running test-plugin-mem-access executable failed"
>> I'm confused by this. We seem to be running the test again and this
>> is
>> going to fail if binfmt_misc isn't setup (which we don't assume for
>> running the TCG tests).
>> 
>
> The test stdout is the expected output to grep. This is to avoid avoid
> an "expected file" and a "source file" somewhere else.

Is this really such an issue. For the system mode test I just did:

  run-plugin-memory-with-libmem.so: 		\
          CHECK_PLUGIN_OUTPUT_COMMAND=$(MULTIARCH_SYSTEM_SRC)/validate-memory-counts.py $@.out

> Could we use compiled qemu-user to run it instead?

Yes - although that would be inefficient (and you need to pass that path
in somehow anyway)

> I'm trying to find a solution where "expected" is not duplicated
> between several files.

Move it all into python?
Alex Bennée Sept. 4, 2024, 3:41 p.m. UTC | #4
Pierrick Bouvier <pierrick.bouvier@linaro.org> writes:

> Add an explicit test to check expected memory values are read/written.
> 8,16,32 load/store are tested for all arch.
> 64,128 load/store are tested for aarch64/x64.
> atomic operations (8,16,32,64) are tested for x64 only.
>
> By default, atomic accesses are non atomic if a single cpu is running,
> so we force creation of a second one by creating a new thread first.
>
> load/store helpers code path can't be triggered easily in user mode (no
> softmmu), so we can't test it here.
>
> Output of test-plugin-mem-access.c is the list of expected patterns in
> plugin output. By reading stdout, we can compare to plugins output and
> have a multiarch test.
>
> Can be run with:
> make -C build/tests/tcg/$ARCH-linux-user run-plugin-test-plugin-mem-access-with-libmem.so
>
> Tested-by: Xingtao Yao <yaoxt.fnst@fujitsu.com>
> Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
> ---
>  tests/tcg/multiarch/test-plugin-mem-access.c  | 175 ++++++++++++++++++
>  tests/tcg/multiarch/Makefile.target           |   7 +
>  .../tcg/multiarch/check-plugin-mem-access.sh  |  30 +++
>  3 files changed, 212 insertions(+)
>  create mode 100644 tests/tcg/multiarch/test-plugin-mem-access.c
>  create mode 100755 tests/tcg/multiarch/check-plugin-mem-access.sh
>
> diff --git a/tests/tcg/multiarch/test-plugin-mem-access.c b/tests/tcg/multiarch/test-plugin-mem-access.c
> new file mode 100644
> index 00000000000..09d1fa22e35
<snip>
> diff --git a/tests/tcg/multiarch/Makefile.target b/tests/tcg/multiarch/Makefile.target
> index 5e3391ec9d2..d90cbd3e521 100644
> --- a/tests/tcg/multiarch/Makefile.target
> +++ b/tests/tcg/multiarch/Makefile.target
> @@ -170,5 +170,12 @@ run-plugin-semiconsole-with-%:
>  TESTS += semihosting semiconsole
>  endif
>

Also you need:

test-plugin-mem-access: CFLAGS+=-pthread
test-plugin-mem-access: LDFLAGS+=-pthread

So less tolerant gcc's include pthread (otherwise the alpha-linux-user
fails), with that fix I get:

   TEST    check plugin libmem.so output with test-plugin-mem-access
  ",store_u8,.*,8,store,0xf1" not found in test-plugin-mem-access-with-libmem.so.pout
  make[1]: *** [Makefile:181: run-plugin-test-plugin-mem-access-with-libmem.so] Error 1
  make: *** [/home/alex/lsrc/qemu.git/tests/Makefile.include:56: run-tcg-tests-alpha-linux-user] Error 2

> +# Test plugin memory access instrumentation
> +run-plugin-test-plugin-mem-access-with-libmem.so: \
> +	PLUGIN_ARGS=$(COMMA)print-accesses=true
> +run-plugin-test-plugin-mem-access-with-libmem.so: \
> +	CHECK_PLUGIN_OUTPUT_COMMAND= \
> +	$(SRC_PATH)/tests/tcg/multiarch/check-plugin-mem-access.sh
> +
>  # Update TESTS
>  TESTS += $(MULTIARCH_TESTS)
> diff --git a/tests/tcg/multiarch/check-plugin-mem-access.sh b/tests/tcg/multiarch/check-plugin-mem-access.sh
> new file mode 100755
> index 00000000000..909606943bb
> --- /dev/null
> +++ b/tests/tcg/multiarch/check-plugin-mem-access.sh
> @@ -0,0 +1,30 @@
> +#!/usr/bin/env bash
> +
> +set -euo pipefail
> +
> +die()
> +{
> +    echo "$@" 1>&2
> +    exit 1
> +}
> +
> +check()
> +{
> +    file=$1
> +    pattern=$2
> +    grep "$pattern" "$file" > /dev/null || die "\"$pattern\" not found in $file"
> +}
> +
> +[ $# -eq 1 ] || die "usage: plugin_out_file"
> +
> +plugin_out=$1
> +
> +expected()
> +{
> +    ./test-plugin-mem-access ||
> +        die "running test-plugin-mem-access executable failed"
> +}
> +
> +expected | while read line; do
> +    check "$plugin_out" "$line"
> +done
Alex Bennée Sept. 4, 2024, 4:28 p.m. UTC | #5
Alex Bennée <alex.bennee@linaro.org> writes:

> Pierrick Bouvier <pierrick.bouvier@linaro.org> writes:
>
>> Add an explicit test to check expected memory values are read/written.
>> 8,16,32 load/store are tested for all arch.
>> 64,128 load/store are tested for aarch64/x64.
>> atomic operations (8,16,32,64) are tested for x64 only.
>>
>> By default, atomic accesses are non atomic if a single cpu is running,
>> so we force creation of a second one by creating a new thread first.
>>
>> load/store helpers code path can't be triggered easily in user mode (no
>> softmmu), so we can't test it here.
>>
>> Output of test-plugin-mem-access.c is the list of expected patterns in
>> plugin output. By reading stdout, we can compare to plugins output and
>> have a multiarch test.
>>
>> Can be run with:
>> make -C build/tests/tcg/$ARCH-linux-user run-plugin-test-plugin-mem-access-with-libmem.so
>>
>> Tested-by: Xingtao Yao <yaoxt.fnst@fujitsu.com>
>> Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
>> ---
>>  tests/tcg/multiarch/test-plugin-mem-access.c  | 175 ++++++++++++++++++
>>  tests/tcg/multiarch/Makefile.target           |   7 +
>>  .../tcg/multiarch/check-plugin-mem-access.sh  |  30 +++
>>  3 files changed, 212 insertions(+)
>>  create mode 100644 tests/tcg/multiarch/test-plugin-mem-access.c
>>  create mode 100755 tests/tcg/multiarch/check-plugin-mem-access.sh
>>
>> diff --git a/tests/tcg/multiarch/test-plugin-mem-access.c b/tests/tcg/multiarch/test-plugin-mem-access.c
>> new file mode 100644
>> index 00000000000..09d1fa22e35
> <snip>
>> diff --git a/tests/tcg/multiarch/Makefile.target b/tests/tcg/multiarch/Makefile.target
>> index 5e3391ec9d2..d90cbd3e521 100644
>> --- a/tests/tcg/multiarch/Makefile.target
>> +++ b/tests/tcg/multiarch/Makefile.target
>> @@ -170,5 +170,12 @@ run-plugin-semiconsole-with-%:
>>  TESTS += semihosting semiconsole
>>  endif
>>
>
> Also you need:
>
> test-plugin-mem-access: CFLAGS+=-pthread
> test-plugin-mem-access: LDFLAGS+=-pthread
>
> So less tolerant gcc's include pthread (otherwise the alpha-linux-user
> fails), with that fix I get:
>
>    TEST    check plugin libmem.so output with test-plugin-mem-access
>   ",store_u8,.*,8,store,0xf1" not found in test-plugin-mem-access-with-libmem.so.pout
>   make[1]: *** [Makefile:181: run-plugin-test-plugin-mem-access-with-libmem.so] Error 1
>   make: *** [/home/alex/lsrc/qemu.git/tests/Makefile.include:56: run-tcg-tests-alpha-linux-user] Error 2

And ensure we enable BWX for alpha so it emits bytes stores instead of
faking it with masking:

modified   tests/tcg/alpha/Makefile.target
@@ -13,3 +13,5 @@ test-cmov: test-cond.c
 	$(CC) $(CFLAGS) $(EXTRA_CFLAGS) $< -o $@ $(LDFLAGS)
 
 run-test-cmov: test-cmov
+
+test-plugin-mem-access: CFLAGS+=-mbwx
diff mbox series

Patch

diff --git a/tests/tcg/multiarch/test-plugin-mem-access.c b/tests/tcg/multiarch/test-plugin-mem-access.c
new file mode 100644
index 00000000000..09d1fa22e35
--- /dev/null
+++ b/tests/tcg/multiarch/test-plugin-mem-access.c
@@ -0,0 +1,175 @@ 
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * Check if we detect all memory accesses expected using plugin API.
+ * Used in conjunction with ./check-plugin-mem-access.sh check script.
+ * Output of this program is the list of patterns expected in plugin output.
+ *
+ * 8,16,32 load/store are tested for all arch.
+ * 64,128 load/store are tested for aarch64/x64.
+ * atomic operations (8,16,32,64) are tested for x64 only.
+ */
+
+#include <pthread.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#if defined(__x86_64__)
+#include <emmintrin.h>
+#elif defined(__aarch64__)
+#include <arm_neon.h>
+#endif /* __x86_64__ */
+
+static void *data;
+
+/* ,store_u8,.*,8,store,0xf1 */
+#define PRINT_EXPECTED(function, type, value, action)                 \
+do {                                                                  \
+    printf(",%s,.*,%d,%s,%s\n",                                       \
+           #function, (int) sizeof(type) * 8, action, value);         \
+}                                                                     \
+while (0)
+
+#define DEFINE_STORE(name, type, value)                  \
+                                                         \
+static void print_expected_store_##name(void)            \
+{                                                        \
+    PRINT_EXPECTED(store_##name, type, #value, "store"); \
+}                                                        \
+                                                         \
+static void store_##name(void)                           \
+{                                                        \
+    *((type *)data) = value;                             \
+    print_expected_store_##name();                       \
+}
+
+#define DEFINE_ATOMIC_OP(name, type, value)                    \
+                                                               \
+static void print_expected_atomic_op_##name(void)              \
+{                                                              \
+    PRINT_EXPECTED(atomic_op_##name, type, "0x0*42", "load");  \
+    PRINT_EXPECTED(atomic_op_##name, type, #value, "store");   \
+}                                                              \
+                                                               \
+static void atomic_op_##name(void)                             \
+{                                                              \
+    *((type *)data) = 0x42;                                    \
+    __sync_val_compare_and_swap((type *)data, 0x42, value);    \
+    print_expected_atomic_op_##name();                         \
+}
+
+#define DEFINE_LOAD(name, type, value)                  \
+                                                        \
+static void print_expected_load_##name(void)            \
+{                                                       \
+    PRINT_EXPECTED(load_##name, type, #value, "load");  \
+}                                                       \
+                                                        \
+static void load_##name(void)                           \
+{                                                       \
+    type src = *((type *) data);                        \
+    type dest = src;                                    \
+    (void)src, (void)dest;                              \
+    print_expected_load_##name();                       \
+}
+
+DEFINE_STORE(u8, uint8_t, 0xf1)
+DEFINE_LOAD(u8, uint8_t, 0xf1)
+DEFINE_STORE(u16, uint16_t, 0xf123)
+DEFINE_LOAD(u16, uint16_t, 0xf123)
+DEFINE_STORE(u32, uint32_t, 0xff112233)
+DEFINE_LOAD(u32, uint32_t, 0xff112233)
+
+#if defined(__x86_64__) || defined(__aarch64__)
+DEFINE_STORE(u64, uint64_t, 0xf123456789abcdef)
+DEFINE_LOAD(u64, uint64_t, 0xf123456789abcdef)
+
+static void print_expected_store_u128(void)
+{
+    PRINT_EXPECTED(store_u128, __int128,
+                   "0xf122334455667788f123456789abcdef", "store");
+}
+
+static void store_u128(void)
+{
+#ifdef __x86_64__
+    _mm_store_si128(data, _mm_set_epi32(0xf1223344, 0x55667788,
+                                        0xf1234567, 0x89abcdef));
+#else
+    const uint32_t init[4] = {0x89abcdef, 0xf1234567, 0x55667788, 0xf1223344};
+    uint32x4_t vec = vld1q_u32(init);
+    vst1q_u32(data, vec);
+#endif /* __x86_64__ */
+    print_expected_store_u128();
+}
+
+static void print_expected_load_u128(void)
+{
+    PRINT_EXPECTED(load_u128, __int128,
+                   "0xf122334455667788f123456789abcdef", "load");
+}
+
+static void load_u128(void)
+{
+#ifdef __x86_64__
+    __m128i var = _mm_load_si128(data);
+#else
+    uint32x4_t var = vld1q_u32(data);
+#endif
+    (void) var;
+    print_expected_load_u128();
+}
+#endif /* __x86_64__ || __aarch64__ */
+
+#if defined(__x86_64__)
+DEFINE_ATOMIC_OP(u8, uint8_t, 0xf1)
+DEFINE_ATOMIC_OP(u16, uint16_t, 0xf123)
+DEFINE_ATOMIC_OP(u32, uint32_t, 0xff112233)
+DEFINE_ATOMIC_OP(u64, uint64_t, 0xf123456789abcdef)
+#endif /* __x86_64__ */
+
+static void *f(void *p)
+{
+    return NULL;
+}
+
+int main(void)
+{
+    /*
+     * We force creation of a second thread to enable cpu flag CF_PARALLEL.
+     * This will generate atomic operations when needed.
+     */
+    pthread_t thread;
+    pthread_create(&thread, NULL, &f, NULL);
+    pthread_join(thread, NULL);
+
+    /* allocate storage up to 128 bits */
+    data = malloc(16);
+
+    store_u8();
+    load_u8();
+
+    store_u16();
+    load_u16();
+
+    store_u32();
+    load_u32();
+
+#if defined(__x86_64__) || defined(__aarch64__)
+    store_u64();
+    load_u64();
+
+    store_u128();
+    load_u128();
+#endif /* __x86_64__ || __aarch64__ */
+
+#if defined(__x86_64__)
+    atomic_op_u8();
+    atomic_op_u16();
+    atomic_op_u32();
+    atomic_op_u64();
+#endif /* __x86_64__ */
+
+    free(data);
+}
diff --git a/tests/tcg/multiarch/Makefile.target b/tests/tcg/multiarch/Makefile.target
index 5e3391ec9d2..d90cbd3e521 100644
--- a/tests/tcg/multiarch/Makefile.target
+++ b/tests/tcg/multiarch/Makefile.target
@@ -170,5 +170,12 @@  run-plugin-semiconsole-with-%:
 TESTS += semihosting semiconsole
 endif
 
+# Test plugin memory access instrumentation
+run-plugin-test-plugin-mem-access-with-libmem.so: \
+	PLUGIN_ARGS=$(COMMA)print-accesses=true
+run-plugin-test-plugin-mem-access-with-libmem.so: \
+	CHECK_PLUGIN_OUTPUT_COMMAND= \
+	$(SRC_PATH)/tests/tcg/multiarch/check-plugin-mem-access.sh
+
 # Update TESTS
 TESTS += $(MULTIARCH_TESTS)
diff --git a/tests/tcg/multiarch/check-plugin-mem-access.sh b/tests/tcg/multiarch/check-plugin-mem-access.sh
new file mode 100755
index 00000000000..909606943bb
--- /dev/null
+++ b/tests/tcg/multiarch/check-plugin-mem-access.sh
@@ -0,0 +1,30 @@ 
+#!/usr/bin/env bash
+
+set -euo pipefail
+
+die()
+{
+    echo "$@" 1>&2
+    exit 1
+}
+
+check()
+{
+    file=$1
+    pattern=$2
+    grep "$pattern" "$file" > /dev/null || die "\"$pattern\" not found in $file"
+}
+
+[ $# -eq 1 ] || die "usage: plugin_out_file"
+
+plugin_out=$1
+
+expected()
+{
+    ./test-plugin-mem-access ||
+        die "running test-plugin-mem-access executable failed"
+}
+
+expected | while read line; do
+    check "$plugin_out" "$line"
+done