diff mbox series

[v5,10/54] accel/tcg: Use have_atomic16 in ldst_atomicity.c.inc

Message ID 20230515143313.734053-11-richard.henderson@linaro.org
State New
Headers show
Series tcg: Improve atomicity support | expand

Commit Message

Richard Henderson May 15, 2023, 2:32 p.m. UTC
Hosts using Intel and AMD AVX cpus are quite common.
Add fast paths through ldst_atomicity using this.

Only enable with CONFIG_INT128; some older clang versions do not
support __int128_t, and the inline assembly won't work on structures.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 accel/tcg/ldst_atomicity.c.inc | 76 +++++++++++++++++++++++++++-------
 1 file changed, 60 insertions(+), 16 deletions(-)

Comments

Peter Maydell May 16, 2023, 10:38 a.m. UTC | #1
On Mon, 15 May 2023 at 15:37, Richard Henderson
<richard.henderson@linaro.org> wrote:
>
> Hosts using Intel and AMD AVX cpus are quite common.
> Add fast paths through ldst_atomicity using this.
>
> Only enable with CONFIG_INT128; some older clang versions do not
> support __int128_t, and the inline assembly won't work on structures.
>
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---
>  accel/tcg/ldst_atomicity.c.inc | 76 +++++++++++++++++++++++++++-------
>  1 file changed, 60 insertions(+), 16 deletions(-)
>
> diff --git a/accel/tcg/ldst_atomicity.c.inc b/accel/tcg/ldst_atomicity.c.inc
> index dd387c9bdd..69c1c61997 100644
> --- a/accel/tcg/ldst_atomicity.c.inc
> +++ b/accel/tcg/ldst_atomicity.c.inc
> @@ -35,6 +35,14 @@
>
>  #if defined(CONFIG_ATOMIC128)
>  # define HAVE_al16_fast    true
> +#elif defined(CONFIG_TCG_INTERPRETER)
> +/*
> + * FIXME: host specific detection for this is in tcg/$host/,
> + * but we're using tcg/tci/ instead.
> + */
> +# define HAVE_al16_fast    false
> +#elif defined(__x86_64__) && defined(CONFIG_INT128)
> +# define HAVE_al16_fast    likely(have_atomic16)
>  #else
>  # define HAVE_al16_fast    false
>  #endif
> @@ -178,6 +186,12 @@ load_atomic16(void *pv)
>
>      r.u = qatomic_read__nocheck(p);
>      return r.s;
> +#elif defined(__x86_64__) && defined(CONFIG_INT128)
> +    Int128Alias r;
> +
> +    /* Via HAVE_al16_fast, have_atomic16 is true. */
> +    asm("vmovdqa %1, %0" : "=x" (r.u) : "m" (*(Int128 *)pv));
> +    return r.s;

This is a compile-time check, so why if we can do
16-byte atomic loads would CONFIG_ATOMIC128 not be set?

>  #else
>      qemu_build_not_reached();
>  #endif
> @@ -399,6 +413,24 @@ load_atom_extract_al16_or_al8(void *pv, int s)
>          r = qatomic_read__nocheck(p16);
>      }
>      return r >> shr;
> +#elif defined(__x86_64__) && defined(CONFIG_INT128)
> +    uintptr_t pi = (uintptr_t)pv;
> +    int shr = (pi & 7) * 8;
> +    uint64_t a, b;
> +
> +    /* Via HAVE_al16_fast, have_atomic16 is true. */
> +    pv = (void *)(pi & ~7);
> +    if (pi & 8) {
> +        uint64_t *p8 = __builtin_assume_aligned(pv, 16, 8);
> +        a = qatomic_read__nocheck(p8);
> +        b = qatomic_read__nocheck(p8 + 1);
> +    } else {
> +        asm("vmovdqa %2, %0\n\tvpextrq $1, %0, %1"
> +            : "=x"(a), "=r"(b) : "m" (*(__uint128_t *)pv));
> +    }
> +    asm("shrd %b2, %1, %0" : "+r"(a) : "r"(b), "c"(shr));
> +
> +    return a;

Similarly this seems to just be an asm version of
the CONFIG_ATOMIC128 code.

thanks
-- PMM
Richard Henderson May 16, 2023, 1:50 p.m. UTC | #2
On 5/16/23 03:38, Peter Maydell wrote:
> On Mon, 15 May 2023 at 15:37, Richard Henderson
> <richard.henderson@linaro.org> wrote:
>>
>> Hosts using Intel and AMD AVX cpus are quite common.
>> Add fast paths through ldst_atomicity using this.
>>
>> Only enable with CONFIG_INT128; some older clang versions do not
>> support __int128_t, and the inline assembly won't work on structures.
>>
>> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
>> ---
>>   accel/tcg/ldst_atomicity.c.inc | 76 +++++++++++++++++++++++++++-------
>>   1 file changed, 60 insertions(+), 16 deletions(-)
>>
>> diff --git a/accel/tcg/ldst_atomicity.c.inc b/accel/tcg/ldst_atomicity.c.inc
>> index dd387c9bdd..69c1c61997 100644
>> --- a/accel/tcg/ldst_atomicity.c.inc
>> +++ b/accel/tcg/ldst_atomicity.c.inc
>> @@ -35,6 +35,14 @@
>>
>>   #if defined(CONFIG_ATOMIC128)
>>   # define HAVE_al16_fast    true
>> +#elif defined(CONFIG_TCG_INTERPRETER)
>> +/*
>> + * FIXME: host specific detection for this is in tcg/$host/,
>> + * but we're using tcg/tci/ instead.
>> + */
>> +# define HAVE_al16_fast    false
>> +#elif defined(__x86_64__) && defined(CONFIG_INT128)
>> +# define HAVE_al16_fast    likely(have_atomic16)
>>   #else
>>   # define HAVE_al16_fast    false
>>   #endif
>> @@ -178,6 +186,12 @@ load_atomic16(void *pv)
>>
>>       r.u = qatomic_read__nocheck(p);
>>       return r.s;
>> +#elif defined(__x86_64__) && defined(CONFIG_INT128)
>> +    Int128Alias r;
>> +
>> +    /* Via HAVE_al16_fast, have_atomic16 is true. */
>> +    asm("vmovdqa %1, %0" : "=x" (r.u) : "m" (*(Int128 *)pv));
>> +    return r.s;
> 
> This is a compile-time check, so why if we can do
> 16-byte atomic loads would CONFIG_ATOMIC128 not be set?

This is *not* a compile-time check: have_atomic16 is initialized in 
tcg/i386/tcg-target.c.inc in patch 9.


r~
diff mbox series

Patch

diff --git a/accel/tcg/ldst_atomicity.c.inc b/accel/tcg/ldst_atomicity.c.inc
index dd387c9bdd..69c1c61997 100644
--- a/accel/tcg/ldst_atomicity.c.inc
+++ b/accel/tcg/ldst_atomicity.c.inc
@@ -35,6 +35,14 @@ 
 
 #if defined(CONFIG_ATOMIC128)
 # define HAVE_al16_fast    true
+#elif defined(CONFIG_TCG_INTERPRETER)
+/*
+ * FIXME: host specific detection for this is in tcg/$host/,
+ * but we're using tcg/tci/ instead.
+ */
+# define HAVE_al16_fast    false
+#elif defined(__x86_64__) && defined(CONFIG_INT128)
+# define HAVE_al16_fast    likely(have_atomic16)
 #else
 # define HAVE_al16_fast    false
 #endif
@@ -178,6 +186,12 @@  load_atomic16(void *pv)
 
     r.u = qatomic_read__nocheck(p);
     return r.s;
+#elif defined(__x86_64__) && defined(CONFIG_INT128)
+    Int128Alias r;
+
+    /* Via HAVE_al16_fast, have_atomic16 is true. */
+    asm("vmovdqa %1, %0" : "=x" (r.u) : "m" (*(Int128 *)pv));
+    return r.s;
 #else
     qemu_build_not_reached();
 #endif
@@ -399,6 +413,24 @@  load_atom_extract_al16_or_al8(void *pv, int s)
         r = qatomic_read__nocheck(p16);
     }
     return r >> shr;
+#elif defined(__x86_64__) && defined(CONFIG_INT128)
+    uintptr_t pi = (uintptr_t)pv;
+    int shr = (pi & 7) * 8;
+    uint64_t a, b;
+
+    /* Via HAVE_al16_fast, have_atomic16 is true. */
+    pv = (void *)(pi & ~7);
+    if (pi & 8) {
+        uint64_t *p8 = __builtin_assume_aligned(pv, 16, 8);
+        a = qatomic_read__nocheck(p8);
+        b = qatomic_read__nocheck(p8 + 1);
+    } else {
+        asm("vmovdqa %2, %0\n\tvpextrq $1, %0, %1"
+            : "=x"(a), "=r"(b) : "m" (*(__uint128_t *)pv));
+    }
+    asm("shrd %b2, %1, %0" : "+r"(a) : "r"(b), "c"(shr));
+
+    return a;
 #else
     qemu_build_not_reached();
 #endif
@@ -715,23 +747,35 @@  static inline void ATTRIBUTE_ATOMIC128_OPT
 store_atomic16(void *pv, Int128Alias val)
 {
 #if defined(CONFIG_ATOMIC128)
-    __uint128_t *pu = __builtin_assume_aligned(pv, 16);
-    qatomic_set__nocheck(pu, val.u);
-#elif defined(CONFIG_CMPXCHG128)
-    __uint128_t *pu = __builtin_assume_aligned(pv, 16);
-    __uint128_t o;
-
-    /*
-     * Without CONFIG_ATOMIC128, __atomic_compare_exchange_n will always
-     * defer to libatomic, so we must use __sync_*_compare_and_swap_16
-     * and accept the sequential consistency that comes with it.
-     */
-    do {
-        o = *pu;
-    } while (!__sync_bool_compare_and_swap_16(pu, o, val.u));
-#else
-    qemu_build_not_reached();
+    {
+        __uint128_t *pu = __builtin_assume_aligned(pv, 16);
+        qatomic_set__nocheck(pu, val.u);
+        return;
+    }
 #endif
+#if defined(__x86_64__) && defined(CONFIG_INT128)
+    if (HAVE_al16_fast) {
+        asm("vmovdqa %1, %0" : "=m"(*(__uint128_t *)pv) : "x" (val.u));
+        return;
+    }
+#endif
+#if defined(CONFIG_CMPXCHG128)
+    {
+        __uint128_t *pu = __builtin_assume_aligned(pv, 16);
+        __uint128_t o;
+
+        /*
+         * Without CONFIG_ATOMIC128, __atomic_compare_exchange_n will always
+         * defer to libatomic, so we must use __sync_*_compare_and_swap_16
+         * and accept the sequential consistency that comes with it.
+         */
+        do {
+            o = *pu;
+        } while (!__sync_bool_compare_and_swap_16(pu, o, val.u));
+        return;
+    }
+#endif
+    qemu_build_not_reached();
 }
 
 /**