Message ID | 20180621121321.4761-9-mark.rutland@arm.com |
---|---|
State | Accepted |
Commit | 00b808ab79ead372daf1a0682d1ef271599c0b55 |
Headers | show |
Series | atomics: API cleanups | expand |
On Thu, Jun 21, 2018 at 01:13:11PM +0100, Mark Rutland wrote: > - if (v->counter != u) { > + val = v->counter; > + if (val != u) > v->counter += a; > - ret = true; > } Ugh, I thought I had fixed this up and removed the trailing brace. This will break some 32-bit arches as-is. Ingo, would you be hapyp to fix this up, or would you prefer that I fix and resend? Sorry about this. Mark.
* Mark Rutland <mark.rutland@arm.com> wrote: > On Thu, Jun 21, 2018 at 01:13:11PM +0100, Mark Rutland wrote: > > - if (v->counter != u) { > > + val = v->counter; > > + if (val != u) > > v->counter += a; > > - ret = true; > > } > > Ugh, I thought I had fixed this up and removed the trailing brace. > > This will break some 32-bit arches as-is. > > Ingo, would you be hapyp to fix this up, or would you prefer that I fix > and resend? I fixed it up, no need to resend unless I find other problems in testing. > Sorry about this. No problem! Thanks, Ingo
diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h index 5105275ac825..49460107b29a 100644 --- a/include/asm-generic/atomic64.h +++ b/include/asm-generic/atomic64.h @@ -53,7 +53,8 @@ ATOMIC64_OPS(xor) extern long long atomic64_dec_if_positive(atomic64_t *v); extern long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n); extern long long atomic64_xchg(atomic64_t *v, long long new); -extern bool atomic64_add_unless(atomic64_t *v, long long a, long long u); +extern long long atomic64_fetch_add_unless(atomic64_t *v, long long a, long long u); +#define atomic64_fetch_add_unless atomic64_fetch_add_unless #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) #define atomic64_inc(v) atomic64_add(1LL, (v)) diff --git a/lib/atomic64.c b/lib/atomic64.c index 4230f4b8906c..16ac13113c8e 100644 --- a/lib/atomic64.c +++ b/lib/atomic64.c @@ -178,18 +178,18 @@ long long atomic64_xchg(atomic64_t *v, long long new) } EXPORT_SYMBOL(atomic64_xchg); -bool atomic64_add_unless(atomic64_t *v, long long a, long long u) +long long atomic64_fetch_add_unless(atomic64_t *v, long long a, long long u) { unsigned long flags; raw_spinlock_t *lock = lock_addr(v); - bool ret = false; + long long val; raw_spin_lock_irqsave(lock, flags); - if (v->counter != u) { + val = v->counter; + if (val != u) v->counter += a; - ret = true; } raw_spin_unlock_irqrestore(lock, flags); - return ret; + return val; } -EXPORT_SYMBOL(atomic64_add_unless); +EXPORT_SYMBOL(atomic64_fetch_add_unless);