diff mbox series

[3/4] include/qemu/thread: Use qatomic_* functions

Message ID 20221024232435.3334600-4-richard.henderson@linaro.org
State Superseded
Headers show
Series atomic: Friendlier assertions, avoidance of __sync | expand

Commit Message

Richard Henderson Oct. 24, 2022, 11:24 p.m. UTC
Use qatomic_*, which expands to __atomic_* in preference
to the "legacy" __sync_* functions.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 include/qemu/thread.h | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

Comments

Claudio Fontana Oct. 25, 2022, 1:47 p.m. UTC | #1
On 10/25/22 01:24, Richard Henderson wrote:
> Use qatomic_*, which expands to __atomic_* in preference
> to the "legacy" __sync_* functions.
> 
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---
>  include/qemu/thread.h | 8 ++++----
>  1 file changed, 4 insertions(+), 4 deletions(-)
> 
> diff --git a/include/qemu/thread.h b/include/qemu/thread.h
> index af19f2b3fc..976e1ab995 100644
> --- a/include/qemu/thread.h
> +++ b/include/qemu/thread.h
> @@ -227,7 +227,7 @@ struct QemuSpin {
>  
>  static inline void qemu_spin_init(QemuSpin *spin)
>  {
> -    __sync_lock_release(&spin->value);
> +    qatomic_set(&spin->value, 0);

Here an integer literal is used, which makes sense, spin->value is int.

>  #ifdef CONFIG_TSAN
>      __tsan_mutex_create(spin, __tsan_mutex_not_static);
>  #endif
> @@ -246,7 +246,7 @@ static inline void qemu_spin_lock(QemuSpin *spin)
>  #ifdef CONFIG_TSAN
>      __tsan_mutex_pre_lock(spin, 0);
>  #endif
> -    while (unlikely(__sync_lock_test_and_set(&spin->value, true))) {
> +    while (unlikely(qatomic_xchg(&spin->value, true))) {

nit: here 'true' is used. Maybe "1" instead of "true" for consistency? 

>          while (qatomic_read(&spin->value)) {
>              cpu_relax();
>          }
> @@ -261,7 +261,7 @@ static inline bool qemu_spin_trylock(QemuSpin *spin)
>  #ifdef CONFIG_TSAN
>      __tsan_mutex_pre_lock(spin, __tsan_mutex_try_lock);
>  #endif
> -    bool busy = __sync_lock_test_and_set(&spin->value, true);
> +    bool busy = qatomic_xchg(&spin->value, true);
>  #ifdef CONFIG_TSAN
>      unsigned flags = __tsan_mutex_try_lock;
>      flags |= busy ? __tsan_mutex_try_lock_failed : 0;
> @@ -280,7 +280,7 @@ static inline void qemu_spin_unlock(QemuSpin *spin)
>  #ifdef CONFIG_TSAN
>      __tsan_mutex_pre_unlock(spin, 0);
>  #endif
> -    __sync_lock_release(&spin->value);
> +    qatomic_store_release(&spin->value, 0);
>  #ifdef CONFIG_TSAN
>      __tsan_mutex_post_unlock(spin, 0);
>  #endif

Thanks,

C
Richard Henderson Oct. 25, 2022, 10:32 p.m. UTC | #2
On 10/25/22 23:47, Claudio Fontana wrote:
> On 10/25/22 01:24, Richard Henderson wrote:
>> Use qatomic_*, which expands to __atomic_* in preference
>> to the "legacy" __sync_* functions.
>>
>> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
>> ---
>>   include/qemu/thread.h | 8 ++++----
>>   1 file changed, 4 insertions(+), 4 deletions(-)
>>
>> diff --git a/include/qemu/thread.h b/include/qemu/thread.h
>> index af19f2b3fc..976e1ab995 100644
>> --- a/include/qemu/thread.h
>> +++ b/include/qemu/thread.h
>> @@ -227,7 +227,7 @@ struct QemuSpin {
>>   
>>   static inline void qemu_spin_init(QemuSpin *spin)
>>   {
>> -    __sync_lock_release(&spin->value);
>> +    qatomic_set(&spin->value, 0);
> 
> Here an integer literal is used, which makes sense, spin->value is int.
> 
>>   #ifdef CONFIG_TSAN
>>       __tsan_mutex_create(spin, __tsan_mutex_not_static);
>>   #endif
>> @@ -246,7 +246,7 @@ static inline void qemu_spin_lock(QemuSpin *spin)
>>   #ifdef CONFIG_TSAN
>>       __tsan_mutex_pre_lock(spin, 0);
>>   #endif
>> -    while (unlikely(__sync_lock_test_and_set(&spin->value, true))) {
>> +    while (unlikely(qatomic_xchg(&spin->value, true))) {
> 
> nit: here 'true' is used. Maybe "1" instead of "true" for consistency?

Fair enough.


r~
diff mbox series

Patch

diff --git a/include/qemu/thread.h b/include/qemu/thread.h
index af19f2b3fc..976e1ab995 100644
--- a/include/qemu/thread.h
+++ b/include/qemu/thread.h
@@ -227,7 +227,7 @@  struct QemuSpin {
 
 static inline void qemu_spin_init(QemuSpin *spin)
 {
-    __sync_lock_release(&spin->value);
+    qatomic_set(&spin->value, 0);
 #ifdef CONFIG_TSAN
     __tsan_mutex_create(spin, __tsan_mutex_not_static);
 #endif
@@ -246,7 +246,7 @@  static inline void qemu_spin_lock(QemuSpin *spin)
 #ifdef CONFIG_TSAN
     __tsan_mutex_pre_lock(spin, 0);
 #endif
-    while (unlikely(__sync_lock_test_and_set(&spin->value, true))) {
+    while (unlikely(qatomic_xchg(&spin->value, true))) {
         while (qatomic_read(&spin->value)) {
             cpu_relax();
         }
@@ -261,7 +261,7 @@  static inline bool qemu_spin_trylock(QemuSpin *spin)
 #ifdef CONFIG_TSAN
     __tsan_mutex_pre_lock(spin, __tsan_mutex_try_lock);
 #endif
-    bool busy = __sync_lock_test_and_set(&spin->value, true);
+    bool busy = qatomic_xchg(&spin->value, true);
 #ifdef CONFIG_TSAN
     unsigned flags = __tsan_mutex_try_lock;
     flags |= busy ? __tsan_mutex_try_lock_failed : 0;
@@ -280,7 +280,7 @@  static inline void qemu_spin_unlock(QemuSpin *spin)
 #ifdef CONFIG_TSAN
     __tsan_mutex_pre_unlock(spin, 0);
 #endif
-    __sync_lock_release(&spin->value);
+    qatomic_store_release(&spin->value, 0);
 #ifdef CONFIG_TSAN
     __tsan_mutex_post_unlock(spin, 0);
 #endif