diff mbox series

[PATCHv2,03/11] atomics: simplify cmpxchg() instrumentation

Message ID 20180625105952.3756-4-mark.rutland@arm.com
State Superseded
Headers show
Series atomics: generate atomic headers / instrument arm64 | expand

Commit Message

Mark Rutland June 25, 2018, 10:59 a.m. UTC
Currently we define some fairly verbose wrappers for the cmpxchg()
family so that we can pass a pointer and size into kasan_check_write().

The wrapper duplicate the size-switching logic necessary in arch code,
and only work for scalar types. On some architectures, (cmp)xchg are
used on non-scalar types, and thus the instrumented wrappers need to be
able to handle this.

We could take the type-punning logic form {READ,WRITE}_ONCE(), but this
makes the wrappers even more verbose, and requires several local
variables in the macros.

Instead, let's simplify the wrappers into simple macros which:

 * snapshot the pointer into a single local variable, called __ai_ptr to
   avoid conflicts with variables in the scope of the caller.

 * call kasan_check_read() on __ai_ptr.

 * invoke the arch_ function, passing the original arguments, bar
   __ai_ptr being substituted for ptr.

There should be no functional change as a result of this patch.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>

Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Will Deacon <will.deacon@arm.com>
---
 include/asm-generic/atomic-instrumented.h | 100 +++++-------------------------
 1 file changed, 15 insertions(+), 85 deletions(-)

-- 
2.11.0

Comments

Andrea Parri June 25, 2018, 11:38 a.m. UTC | #1
On Mon, Jun 25, 2018 at 11:59:44AM +0100, Mark Rutland wrote:
> Currently we define some fairly verbose wrappers for the cmpxchg()

> family so that we can pass a pointer and size into kasan_check_write().

> 

> The wrapper duplicate the size-switching logic necessary in arch code,

> and only work for scalar types. On some architectures, (cmp)xchg are

> used on non-scalar types, and thus the instrumented wrappers need to be

> able to handle this.

> 

> We could take the type-punning logic form {READ,WRITE}_ONCE(), but this

> makes the wrappers even more verbose, and requires several local

> variables in the macros.

> 

> Instead, let's simplify the wrappers into simple macros which:

> 

>  * snapshot the pointer into a single local variable, called __ai_ptr to

>    avoid conflicts with variables in the scope of the caller.

> 

>  * call kasan_check_read() on __ai_ptr.


Maybe I'm misreading the diff: aren't you calling kasan_check_write()?
(not sure if it makes a difference in this case/for KTSan, but CMPXCHG
does not necessarily perform a write...)

  Andrea


> 

>  * invoke the arch_ function, passing the original arguments, bar

>    __ai_ptr being substituted for ptr.

> 

> There should be no functional change as a result of this patch.

> 

> Signed-off-by: Mark Rutland <mark.rutland@arm.com>

> Cc: Boqun Feng <boqun.feng@gmail.com>

> Cc: Dmitry Vyukov <dvyukov@google.com>

> Cc: Peter Zijlstra <peterz@infradead.org>

> Cc: Will Deacon <will.deacon@arm.com>

> ---

>  include/asm-generic/atomic-instrumented.h | 100 +++++-------------------------

>  1 file changed, 15 insertions(+), 85 deletions(-)

> 

> diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h

> index 3c64e95d5ed0..c7c3e4cdd942 100644

> --- a/include/asm-generic/atomic-instrumented.h

> +++ b/include/asm-generic/atomic-instrumented.h

> @@ -408,109 +408,39 @@ static __always_inline bool atomic64_add_negative(s64 i, atomic64_t *v)

>  }

>  #endif

>  

> -static __always_inline unsigned long

> -cmpxchg_size(volatile void *ptr, unsigned long old, unsigned long new, int size)

> -{

> -	kasan_check_write(ptr, size);

> -	switch (size) {

> -	case 1:

> -		return arch_cmpxchg((u8 *)ptr, (u8)old, (u8)new);

> -	case 2:

> -		return arch_cmpxchg((u16 *)ptr, (u16)old, (u16)new);

> -	case 4:

> -		return arch_cmpxchg((u32 *)ptr, (u32)old, (u32)new);

> -	case 8:

> -		BUILD_BUG_ON(sizeof(unsigned long) != 8);

> -		return arch_cmpxchg((u64 *)ptr, (u64)old, (u64)new);

> -	}

> -	BUILD_BUG();

> -	return 0;

> -}

> -

>  #define cmpxchg(ptr, old, new)						\

>  ({									\

> -	((__typeof__(*(ptr)))cmpxchg_size((ptr), (unsigned long)(old),	\

> -		(unsigned long)(new), sizeof(*(ptr))));			\

> +	typeof(ptr) __ai_ptr = (ptr);					\

> +	kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));			\

> +	arch_cmpxchg(__ai_ptr, (old), (new));				\

>  })

>  

> -static __always_inline unsigned long

> -sync_cmpxchg_size(volatile void *ptr, unsigned long old, unsigned long new,

> -		  int size)

> -{

> -	kasan_check_write(ptr, size);

> -	switch (size) {

> -	case 1:

> -		return arch_sync_cmpxchg((u8 *)ptr, (u8)old, (u8)new);

> -	case 2:

> -		return arch_sync_cmpxchg((u16 *)ptr, (u16)old, (u16)new);

> -	case 4:

> -		return arch_sync_cmpxchg((u32 *)ptr, (u32)old, (u32)new);

> -	case 8:

> -		BUILD_BUG_ON(sizeof(unsigned long) != 8);

> -		return arch_sync_cmpxchg((u64 *)ptr, (u64)old, (u64)new);

> -	}

> -	BUILD_BUG();

> -	return 0;

> -}

> -

>  #define sync_cmpxchg(ptr, old, new)					\

>  ({									\

> -	((__typeof__(*(ptr)))sync_cmpxchg_size((ptr),			\

> -		(unsigned long)(old), (unsigned long)(new),		\

> -		sizeof(*(ptr))));					\

> +	typeof(ptr) __ai_ptr = (ptr);					\

> +	kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));			\

> +	arch_sync_cmpxchg(__ai_ptr, (old), (new));			\

>  })

>  

> -static __always_inline unsigned long

> -cmpxchg_local_size(volatile void *ptr, unsigned long old, unsigned long new,

> -		   int size)

> -{

> -	kasan_check_write(ptr, size);

> -	switch (size) {

> -	case 1:

> -		return arch_cmpxchg_local((u8 *)ptr, (u8)old, (u8)new);

> -	case 2:

> -		return arch_cmpxchg_local((u16 *)ptr, (u16)old, (u16)new);

> -	case 4:

> -		return arch_cmpxchg_local((u32 *)ptr, (u32)old, (u32)new);

> -	case 8:

> -		BUILD_BUG_ON(sizeof(unsigned long) != 8);

> -		return arch_cmpxchg_local((u64 *)ptr, (u64)old, (u64)new);

> -	}

> -	BUILD_BUG();

> -	return 0;

> -}

> -

>  #define cmpxchg_local(ptr, old, new)					\

>  ({									\

> -	((__typeof__(*(ptr)))cmpxchg_local_size((ptr),			\

> -		(unsigned long)(old), (unsigned long)(new),		\

> -		sizeof(*(ptr))));					\

> +	typeof(ptr) __ai_ptr = (ptr);					\

> +	kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));			\

> +	arch_cmpxchg_local(__ai_ptr, (old), (new));			\

>  })

>  

> -static __always_inline u64

> -cmpxchg64_size(volatile u64 *ptr, u64 old, u64 new)

> -{

> -	kasan_check_write(ptr, sizeof(*ptr));

> -	return arch_cmpxchg64(ptr, old, new);

> -}

> -

>  #define cmpxchg64(ptr, old, new)					\

>  ({									\

> -	((__typeof__(*(ptr)))cmpxchg64_size((ptr), (u64)(old),		\

> -		(u64)(new)));						\

> +	typeof(ptr) __ai_ptr = (ptr);					\

> +	kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));			\

> +	arch_cmpxchg64(__ai_ptr, (old), (new));				\

>  })

>  

> -static __always_inline u64

> -cmpxchg64_local_size(volatile u64 *ptr, u64 old, u64 new)

> -{

> -	kasan_check_write(ptr, sizeof(*ptr));

> -	return arch_cmpxchg64_local(ptr, old, new);

> -}

> -

>  #define cmpxchg64_local(ptr, old, new)					\

>  ({									\

> -	((__typeof__(*(ptr)))cmpxchg64_local_size((ptr), (u64)(old),	\

> -		(u64)(new)));						\

> +	typeof(ptr) __ai_ptr = (ptr);					\

> +	kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));			\

> +	arch_cmpxchg64_local(__ai_ptr, (old), (new));			\

>  })

>  

>  /*

> -- 

> 2.11.0

>
Dmitry Vyukov June 25, 2018, 11:47 a.m. UTC | #2
On Mon, Jun 25, 2018 at 1:38 PM, Andrea Parri
<andrea.parri@amarulasolutions.com> wrote:
> On Mon, Jun 25, 2018 at 11:59:44AM +0100, Mark Rutland wrote:

>> Currently we define some fairly verbose wrappers for the cmpxchg()

>> family so that we can pass a pointer and size into kasan_check_write().

>>

>> The wrapper duplicate the size-switching logic necessary in arch code,

>> and only work for scalar types. On some architectures, (cmp)xchg are

>> used on non-scalar types, and thus the instrumented wrappers need to be

>> able to handle this.

>>

>> We could take the type-punning logic form {READ,WRITE}_ONCE(), but this

>> makes the wrappers even more verbose, and requires several local

>> variables in the macros.

>>

>> Instead, let's simplify the wrappers into simple macros which:

>>

>>  * snapshot the pointer into a single local variable, called __ai_ptr to

>>    avoid conflicts with variables in the scope of the caller.

>>

>>  * call kasan_check_read() on __ai_ptr.

>

> Maybe I'm misreading the diff: aren't you calling kasan_check_write()?

> (not sure if it makes a difference in this case/for KTSan, but CMPXCHG

> does not necessarily perform a write...)



KTSan will most likely use own instrumentation. For KTSan we need some
operations on metadata be atomic with the atomic operation itself, and
potentially KTSan will even alter the returned value (to produce more
artifacts of relaxed memory models). But what we need from this effort
is a clear single place to insert such instrumentation.


>>  * invoke the arch_ function, passing the original arguments, bar

>>    __ai_ptr being substituted for ptr.

>>

>> There should be no functional change as a result of this patch.

>>

>> Signed-off-by: Mark Rutland <mark.rutland@arm.com>

>> Cc: Boqun Feng <boqun.feng@gmail.com>

>> Cc: Dmitry Vyukov <dvyukov@google.com>

>> Cc: Peter Zijlstra <peterz@infradead.org>

>> Cc: Will Deacon <will.deacon@arm.com>

>> ---

>>  include/asm-generic/atomic-instrumented.h | 100 +++++-------------------------

>>  1 file changed, 15 insertions(+), 85 deletions(-)

>>

>> diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h

>> index 3c64e95d5ed0..c7c3e4cdd942 100644

>> --- a/include/asm-generic/atomic-instrumented.h

>> +++ b/include/asm-generic/atomic-instrumented.h

>> @@ -408,109 +408,39 @@ static __always_inline bool atomic64_add_negative(s64 i, atomic64_t *v)

>>  }

>>  #endif

>>

>> -static __always_inline unsigned long

>> -cmpxchg_size(volatile void *ptr, unsigned long old, unsigned long new, int size)

>> -{

>> -     kasan_check_write(ptr, size);

>> -     switch (size) {

>> -     case 1:

>> -             return arch_cmpxchg((u8 *)ptr, (u8)old, (u8)new);

>> -     case 2:

>> -             return arch_cmpxchg((u16 *)ptr, (u16)old, (u16)new);

>> -     case 4:

>> -             return arch_cmpxchg((u32 *)ptr, (u32)old, (u32)new);

>> -     case 8:

>> -             BUILD_BUG_ON(sizeof(unsigned long) != 8);

>> -             return arch_cmpxchg((u64 *)ptr, (u64)old, (u64)new);

>> -     }

>> -     BUILD_BUG();

>> -     return 0;

>> -}

>> -

>>  #define cmpxchg(ptr, old, new)                                               \

>>  ({                                                                   \

>> -     ((__typeof__(*(ptr)))cmpxchg_size((ptr), (unsigned long)(old),  \

>> -             (unsigned long)(new), sizeof(*(ptr))));                 \

>> +     typeof(ptr) __ai_ptr = (ptr);                                   \

>> +     kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));                 \

>> +     arch_cmpxchg(__ai_ptr, (old), (new));                           \

>>  })

>>

>> -static __always_inline unsigned long

>> -sync_cmpxchg_size(volatile void *ptr, unsigned long old, unsigned long new,

>> -               int size)

>> -{

>> -     kasan_check_write(ptr, size);

>> -     switch (size) {

>> -     case 1:

>> -             return arch_sync_cmpxchg((u8 *)ptr, (u8)old, (u8)new);

>> -     case 2:

>> -             return arch_sync_cmpxchg((u16 *)ptr, (u16)old, (u16)new);

>> -     case 4:

>> -             return arch_sync_cmpxchg((u32 *)ptr, (u32)old, (u32)new);

>> -     case 8:

>> -             BUILD_BUG_ON(sizeof(unsigned long) != 8);

>> -             return arch_sync_cmpxchg((u64 *)ptr, (u64)old, (u64)new);

>> -     }

>> -     BUILD_BUG();

>> -     return 0;

>> -}

>> -

>>  #define sync_cmpxchg(ptr, old, new)                                  \

>>  ({                                                                   \

>> -     ((__typeof__(*(ptr)))sync_cmpxchg_size((ptr),                   \

>> -             (unsigned long)(old), (unsigned long)(new),             \

>> -             sizeof(*(ptr))));                                       \

>> +     typeof(ptr) __ai_ptr = (ptr);                                   \

>> +     kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));                 \

>> +     arch_sync_cmpxchg(__ai_ptr, (old), (new));                      \

>>  })

>>

>> -static __always_inline unsigned long

>> -cmpxchg_local_size(volatile void *ptr, unsigned long old, unsigned long new,

>> -                int size)

>> -{

>> -     kasan_check_write(ptr, size);

>> -     switch (size) {

>> -     case 1:

>> -             return arch_cmpxchg_local((u8 *)ptr, (u8)old, (u8)new);

>> -     case 2:

>> -             return arch_cmpxchg_local((u16 *)ptr, (u16)old, (u16)new);

>> -     case 4:

>> -             return arch_cmpxchg_local((u32 *)ptr, (u32)old, (u32)new);

>> -     case 8:

>> -             BUILD_BUG_ON(sizeof(unsigned long) != 8);

>> -             return arch_cmpxchg_local((u64 *)ptr, (u64)old, (u64)new);

>> -     }

>> -     BUILD_BUG();

>> -     return 0;

>> -}

>> -

>>  #define cmpxchg_local(ptr, old, new)                                 \

>>  ({                                                                   \

>> -     ((__typeof__(*(ptr)))cmpxchg_local_size((ptr),                  \

>> -             (unsigned long)(old), (unsigned long)(new),             \

>> -             sizeof(*(ptr))));                                       \

>> +     typeof(ptr) __ai_ptr = (ptr);                                   \

>> +     kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));                 \

>> +     arch_cmpxchg_local(__ai_ptr, (old), (new));                     \

>>  })

>>

>> -static __always_inline u64

>> -cmpxchg64_size(volatile u64 *ptr, u64 old, u64 new)

>> -{

>> -     kasan_check_write(ptr, sizeof(*ptr));

>> -     return arch_cmpxchg64(ptr, old, new);

>> -}

>> -

>>  #define cmpxchg64(ptr, old, new)                                     \

>>  ({                                                                   \

>> -     ((__typeof__(*(ptr)))cmpxchg64_size((ptr), (u64)(old),          \

>> -             (u64)(new)));                                           \

>> +     typeof(ptr) __ai_ptr = (ptr);                                   \

>> +     kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));                 \

>> +     arch_cmpxchg64(__ai_ptr, (old), (new));                         \

>>  })

>>

>> -static __always_inline u64

>> -cmpxchg64_local_size(volatile u64 *ptr, u64 old, u64 new)

>> -{

>> -     kasan_check_write(ptr, sizeof(*ptr));

>> -     return arch_cmpxchg64_local(ptr, old, new);

>> -}

>> -

>>  #define cmpxchg64_local(ptr, old, new)                                       \

>>  ({                                                                   \

>> -     ((__typeof__(*(ptr)))cmpxchg64_local_size((ptr), (u64)(old),    \

>> -             (u64)(new)));                                           \

>> +     typeof(ptr) __ai_ptr = (ptr);                                   \

>> +     kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));                 \

>> +     arch_cmpxchg64_local(__ai_ptr, (old), (new));                   \

>>  })

>>

>>  /*

>> --

>> 2.11.0

>>
Mark Rutland June 25, 2018, 11:48 a.m. UTC | #3
On Mon, Jun 25, 2018 at 01:38:03PM +0200, Andrea Parri wrote:
> On Mon, Jun 25, 2018 at 11:59:44AM +0100, Mark Rutland wrote:

> > Currently we define some fairly verbose wrappers for the cmpxchg()

> > family so that we can pass a pointer and size into kasan_check_write().

> > 

> > The wrapper duplicate the size-switching logic necessary in arch code,

> > and only work for scalar types. On some architectures, (cmp)xchg are

> > used on non-scalar types, and thus the instrumented wrappers need to be

> > able to handle this.

> > 

> > We could take the type-punning logic form {READ,WRITE}_ONCE(), but this

> > makes the wrappers even more verbose, and requires several local

> > variables in the macros.

> > 

> > Instead, let's simplify the wrappers into simple macros which:

> > 

> >  * snapshot the pointer into a single local variable, called __ai_ptr to

> >    avoid conflicts with variables in the scope of the caller.

> > 

> >  * call kasan_check_read() on __ai_ptr.

> 

> Maybe I'm misreading the diff: aren't you calling kasan_check_write()?


Sorry, yes -- I'll update the commit message.

> (not sure if it makes a difference in this case/for KTSan, but CMPXCHG

> does not necessarily perform a write...)


For KASAN, it shouldn't matter -- it'll only be used to report whether
the access was a read or write, and it's fine to say that it's a
potential write. KTSAN's not yet upstream, so I'll leave that detail to
Dmitry.

Thanks,
Mark.
diff mbox series

Patch

diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h
index 3c64e95d5ed0..c7c3e4cdd942 100644
--- a/include/asm-generic/atomic-instrumented.h
+++ b/include/asm-generic/atomic-instrumented.h
@@ -408,109 +408,39 @@  static __always_inline bool atomic64_add_negative(s64 i, atomic64_t *v)
 }
 #endif
 
-static __always_inline unsigned long
-cmpxchg_size(volatile void *ptr, unsigned long old, unsigned long new, int size)
-{
-	kasan_check_write(ptr, size);
-	switch (size) {
-	case 1:
-		return arch_cmpxchg((u8 *)ptr, (u8)old, (u8)new);
-	case 2:
-		return arch_cmpxchg((u16 *)ptr, (u16)old, (u16)new);
-	case 4:
-		return arch_cmpxchg((u32 *)ptr, (u32)old, (u32)new);
-	case 8:
-		BUILD_BUG_ON(sizeof(unsigned long) != 8);
-		return arch_cmpxchg((u64 *)ptr, (u64)old, (u64)new);
-	}
-	BUILD_BUG();
-	return 0;
-}
-
 #define cmpxchg(ptr, old, new)						\
 ({									\
-	((__typeof__(*(ptr)))cmpxchg_size((ptr), (unsigned long)(old),	\
-		(unsigned long)(new), sizeof(*(ptr))));			\
+	typeof(ptr) __ai_ptr = (ptr);					\
+	kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));			\
+	arch_cmpxchg(__ai_ptr, (old), (new));				\
 })
 
-static __always_inline unsigned long
-sync_cmpxchg_size(volatile void *ptr, unsigned long old, unsigned long new,
-		  int size)
-{
-	kasan_check_write(ptr, size);
-	switch (size) {
-	case 1:
-		return arch_sync_cmpxchg((u8 *)ptr, (u8)old, (u8)new);
-	case 2:
-		return arch_sync_cmpxchg((u16 *)ptr, (u16)old, (u16)new);
-	case 4:
-		return arch_sync_cmpxchg((u32 *)ptr, (u32)old, (u32)new);
-	case 8:
-		BUILD_BUG_ON(sizeof(unsigned long) != 8);
-		return arch_sync_cmpxchg((u64 *)ptr, (u64)old, (u64)new);
-	}
-	BUILD_BUG();
-	return 0;
-}
-
 #define sync_cmpxchg(ptr, old, new)					\
 ({									\
-	((__typeof__(*(ptr)))sync_cmpxchg_size((ptr),			\
-		(unsigned long)(old), (unsigned long)(new),		\
-		sizeof(*(ptr))));					\
+	typeof(ptr) __ai_ptr = (ptr);					\
+	kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));			\
+	arch_sync_cmpxchg(__ai_ptr, (old), (new));			\
 })
 
-static __always_inline unsigned long
-cmpxchg_local_size(volatile void *ptr, unsigned long old, unsigned long new,
-		   int size)
-{
-	kasan_check_write(ptr, size);
-	switch (size) {
-	case 1:
-		return arch_cmpxchg_local((u8 *)ptr, (u8)old, (u8)new);
-	case 2:
-		return arch_cmpxchg_local((u16 *)ptr, (u16)old, (u16)new);
-	case 4:
-		return arch_cmpxchg_local((u32 *)ptr, (u32)old, (u32)new);
-	case 8:
-		BUILD_BUG_ON(sizeof(unsigned long) != 8);
-		return arch_cmpxchg_local((u64 *)ptr, (u64)old, (u64)new);
-	}
-	BUILD_BUG();
-	return 0;
-}
-
 #define cmpxchg_local(ptr, old, new)					\
 ({									\
-	((__typeof__(*(ptr)))cmpxchg_local_size((ptr),			\
-		(unsigned long)(old), (unsigned long)(new),		\
-		sizeof(*(ptr))));					\
+	typeof(ptr) __ai_ptr = (ptr);					\
+	kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));			\
+	arch_cmpxchg_local(__ai_ptr, (old), (new));			\
 })
 
-static __always_inline u64
-cmpxchg64_size(volatile u64 *ptr, u64 old, u64 new)
-{
-	kasan_check_write(ptr, sizeof(*ptr));
-	return arch_cmpxchg64(ptr, old, new);
-}
-
 #define cmpxchg64(ptr, old, new)					\
 ({									\
-	((__typeof__(*(ptr)))cmpxchg64_size((ptr), (u64)(old),		\
-		(u64)(new)));						\
+	typeof(ptr) __ai_ptr = (ptr);					\
+	kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));			\
+	arch_cmpxchg64(__ai_ptr, (old), (new));				\
 })
 
-static __always_inline u64
-cmpxchg64_local_size(volatile u64 *ptr, u64 old, u64 new)
-{
-	kasan_check_write(ptr, sizeof(*ptr));
-	return arch_cmpxchg64_local(ptr, old, new);
-}
-
 #define cmpxchg64_local(ptr, old, new)					\
 ({									\
-	((__typeof__(*(ptr)))cmpxchg64_local_size((ptr), (u64)(old),	\
-		(u64)(new)));						\
+	typeof(ptr) __ai_ptr = (ptr);					\
+	kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));			\
+	arch_cmpxchg64_local(__ai_ptr, (old), (new));			\
 })
 
 /*