mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 19:30:52 +07:00
atomics: Prepare for atomic64_fetch_add_unless()
Currently all architectures must implement atomic_fetch_add_unless(), with common code providing atomic_add_unless(). Architectures must also implement atomic64_add_unless() directly, with no corresponding atomic64_fetch_add_unless(). This divergence is unfortunate, and means that the APIs for atomic_t, atomic64_t, and atomic_long_t differ. In preparation for unifying things, with architectures providing atomic64_fetch_add_unless, this patch adds a generic atomic64_add_unless() which will use atomic64_fetch_add_unless(). The instrumented atomics are updated to take this case into account. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Will Deacon <will.deacon@arm.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Albert Ou <albert@sifive.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Matt Turner <mattst88@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Palmer Dabbelt <palmer@sifive.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Richard Henderson <rth@twiddle.net> Cc: Russell King <linux@armlinux.org.uk> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vineet Gupta <vgupta@synopsys.com> Link: https://lore.kernel.org/lkml/20180621121321.4761-8-mark.rutland@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
eccc2da8c0
commit
0ae1d99402
@ -93,11 +93,20 @@ static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef arch_atomic64_fetch_add_unless
|
||||
#define atomic64_fetch_add_unless atomic64_fetch_add_unless
|
||||
static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
|
||||
{
|
||||
kasan_check_write(v, sizeof(*v));
|
||||
return arch_atomic64_fetch_add_unless(v, a, u);
|
||||
}
|
||||
#else
|
||||
static __always_inline bool atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
|
||||
{
|
||||
kasan_check_write(v, sizeof(*v));
|
||||
return arch_atomic64_add_unless(v, a, u);
|
||||
}
|
||||
#endif
|
||||
|
||||
static __always_inline void atomic_inc(atomic_t *v)
|
||||
{
|
||||
|
@ -1042,6 +1042,22 @@ static inline int atomic_dec_if_positive(atomic_t *v)
|
||||
#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg
|
||||
#endif /* atomic64_try_cmpxchg */
|
||||
|
||||
/**
|
||||
* atomic64_add_unless - add unless the number is already a given value
|
||||
* @v: pointer of type atomic_t
|
||||
* @a: the amount to add to v...
|
||||
* @u: ...unless v is equal to u.
|
||||
*
|
||||
* Atomically adds @a to @v, if @v was not already @u.
|
||||
* Returns true if the addition was done.
|
||||
*/
|
||||
#ifdef atomic64_fetch_add_unless
|
||||
static inline bool atomic64_add_unless(atomic64_t *v, long long a, long long u)
|
||||
{
|
||||
return atomic64_fetch_add_unless(v, a, u) != u;
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* atomic64_inc_not_zero - increment unless the number is zero
|
||||
* @v: pointer of type atomic64_t
|
||||
|
Loading…
Reference in New Issue
Block a user