mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-25 17:25:07 +07:00
69d927bba3
Recent probing at the Linux Kernel Memory Model uncovered a 'surprise'. Strongly ordered architectures where the atomic RmW primitive implies full memory ordering and smp_mb__{before,after}_atomic() are a simple barrier() (such as x86) fail for: *x = 1; atomic_inc(u); smp_mb__after_atomic(); r0 = *y; Because, while the atomic_inc() implies memory order, it (surprisingly) does not provide a compiler barrier. This then allows the compiler to re-order like so: atomic_inc(u); *x = 1; smp_mb__after_atomic(); r0 = *y; Which the CPU is then allowed to re-order (under TSO rules) like: atomic_inc(u); r0 = *y; *x = 1; And this very much was not intended. Therefore strengthen the atomic RmW ops to include a compiler barrier. NOTE: atomic_{or,and,xor} and the bitops already had the compiler barrier. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org>
246 lines
5.8 KiB
C
246 lines
5.8 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _ASM_X86_ATOMIC64_64_H
|
|
#define _ASM_X86_ATOMIC64_64_H
|
|
|
|
#include <linux/types.h>
|
|
#include <asm/alternative.h>
|
|
#include <asm/cmpxchg.h>
|
|
|
|
/* The 64-bit atomic type */
|
|
|
|
#define ATOMIC64_INIT(i) { (i) }
|
|
|
|
/**
|
|
* arch_atomic64_read - read atomic64 variable
|
|
* @v: pointer of type atomic64_t
|
|
*
|
|
* Atomically reads the value of @v.
|
|
* Doesn't imply a read memory barrier.
|
|
*/
|
|
static inline s64 arch_atomic64_read(const atomic64_t *v)
|
|
{
|
|
return READ_ONCE((v)->counter);
|
|
}
|
|
|
|
/**
|
|
* arch_atomic64_set - set atomic64 variable
|
|
* @v: pointer to type atomic64_t
|
|
* @i: required value
|
|
*
|
|
* Atomically sets the value of @v to @i.
|
|
*/
|
|
static inline void arch_atomic64_set(atomic64_t *v, s64 i)
|
|
{
|
|
WRITE_ONCE(v->counter, i);
|
|
}
|
|
|
|
/**
|
|
* arch_atomic64_add - add integer to atomic64 variable
|
|
* @i: integer value to add
|
|
* @v: pointer to type atomic64_t
|
|
*
|
|
* Atomically adds @i to @v.
|
|
*/
|
|
static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v)
|
|
{
|
|
asm volatile(LOCK_PREFIX "addq %1,%0"
|
|
: "=m" (v->counter)
|
|
: "er" (i), "m" (v->counter) : "memory");
|
|
}
|
|
|
|
/**
|
|
* arch_atomic64_sub - subtract the atomic64 variable
|
|
* @i: integer value to subtract
|
|
* @v: pointer to type atomic64_t
|
|
*
|
|
* Atomically subtracts @i from @v.
|
|
*/
|
|
static inline void arch_atomic64_sub(s64 i, atomic64_t *v)
|
|
{
|
|
asm volatile(LOCK_PREFIX "subq %1,%0"
|
|
: "=m" (v->counter)
|
|
: "er" (i), "m" (v->counter) : "memory");
|
|
}
|
|
|
|
/**
|
|
* arch_atomic64_sub_and_test - subtract value from variable and test result
|
|
* @i: integer value to subtract
|
|
* @v: pointer to type atomic64_t
|
|
*
|
|
* Atomically subtracts @i from @v and returns
|
|
* true if the result is zero, or false for all
|
|
* other cases.
|
|
*/
|
|
static inline bool arch_atomic64_sub_and_test(s64 i, atomic64_t *v)
|
|
{
|
|
return GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, e, "er", i);
|
|
}
|
|
#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
|
|
|
|
/**
|
|
* arch_atomic64_inc - increment atomic64 variable
|
|
* @v: pointer to type atomic64_t
|
|
*
|
|
* Atomically increments @v by 1.
|
|
*/
|
|
static __always_inline void arch_atomic64_inc(atomic64_t *v)
|
|
{
|
|
asm volatile(LOCK_PREFIX "incq %0"
|
|
: "=m" (v->counter)
|
|
: "m" (v->counter) : "memory");
|
|
}
|
|
#define arch_atomic64_inc arch_atomic64_inc
|
|
|
|
/**
|
|
* arch_atomic64_dec - decrement atomic64 variable
|
|
* @v: pointer to type atomic64_t
|
|
*
|
|
* Atomically decrements @v by 1.
|
|
*/
|
|
static __always_inline void arch_atomic64_dec(atomic64_t *v)
|
|
{
|
|
asm volatile(LOCK_PREFIX "decq %0"
|
|
: "=m" (v->counter)
|
|
: "m" (v->counter) : "memory");
|
|
}
|
|
#define arch_atomic64_dec arch_atomic64_dec
|
|
|
|
/**
|
|
* arch_atomic64_dec_and_test - decrement and test
|
|
* @v: pointer to type atomic64_t
|
|
*
|
|
* Atomically decrements @v by 1 and
|
|
* returns true if the result is 0, or false for all other
|
|
* cases.
|
|
*/
|
|
static inline bool arch_atomic64_dec_and_test(atomic64_t *v)
|
|
{
|
|
return GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, e);
|
|
}
|
|
#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
|
|
|
|
/**
|
|
* arch_atomic64_inc_and_test - increment and test
|
|
* @v: pointer to type atomic64_t
|
|
*
|
|
* Atomically increments @v by 1
|
|
* and returns true if the result is zero, or false for all
|
|
* other cases.
|
|
*/
|
|
static inline bool arch_atomic64_inc_and_test(atomic64_t *v)
|
|
{
|
|
return GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, e);
|
|
}
|
|
#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
|
|
|
|
/**
|
|
* arch_atomic64_add_negative - add and test if negative
|
|
* @i: integer value to add
|
|
* @v: pointer to type atomic64_t
|
|
*
|
|
* Atomically adds @i to @v and returns true
|
|
* if the result is negative, or false when
|
|
* result is greater than or equal to zero.
|
|
*/
|
|
static inline bool arch_atomic64_add_negative(s64 i, atomic64_t *v)
|
|
{
|
|
return GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, s, "er", i);
|
|
}
|
|
#define arch_atomic64_add_negative arch_atomic64_add_negative
|
|
|
|
/**
|
|
* arch_atomic64_add_return - add and return
|
|
* @i: integer value to add
|
|
* @v: pointer to type atomic64_t
|
|
*
|
|
* Atomically adds @i to @v and returns @i + @v
|
|
*/
|
|
static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
|
|
{
|
|
return i + xadd(&v->counter, i);
|
|
}
|
|
|
|
static inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
|
|
{
|
|
return arch_atomic64_add_return(-i, v);
|
|
}
|
|
|
|
static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
|
|
{
|
|
return xadd(&v->counter, i);
|
|
}
|
|
|
|
static inline s64 arch_atomic64_fetch_sub(s64 i, atomic64_t *v)
|
|
{
|
|
return xadd(&v->counter, -i);
|
|
}
|
|
|
|
static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
|
|
{
|
|
return arch_cmpxchg(&v->counter, old, new);
|
|
}
|
|
|
|
#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
|
|
static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
|
|
{
|
|
return try_cmpxchg(&v->counter, old, new);
|
|
}
|
|
|
|
static inline s64 arch_atomic64_xchg(atomic64_t *v, s64 new)
|
|
{
|
|
return arch_xchg(&v->counter, new);
|
|
}
|
|
|
|
static inline void arch_atomic64_and(s64 i, atomic64_t *v)
|
|
{
|
|
asm volatile(LOCK_PREFIX "andq %1,%0"
|
|
: "+m" (v->counter)
|
|
: "er" (i)
|
|
: "memory");
|
|
}
|
|
|
|
static inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
|
|
{
|
|
s64 val = arch_atomic64_read(v);
|
|
|
|
do {
|
|
} while (!arch_atomic64_try_cmpxchg(v, &val, val & i));
|
|
return val;
|
|
}
|
|
|
|
static inline void arch_atomic64_or(s64 i, atomic64_t *v)
|
|
{
|
|
asm volatile(LOCK_PREFIX "orq %1,%0"
|
|
: "+m" (v->counter)
|
|
: "er" (i)
|
|
: "memory");
|
|
}
|
|
|
|
static inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
|
|
{
|
|
s64 val = arch_atomic64_read(v);
|
|
|
|
do {
|
|
} while (!arch_atomic64_try_cmpxchg(v, &val, val | i));
|
|
return val;
|
|
}
|
|
|
|
static inline void arch_atomic64_xor(s64 i, atomic64_t *v)
|
|
{
|
|
asm volatile(LOCK_PREFIX "xorq %1,%0"
|
|
: "+m" (v->counter)
|
|
: "er" (i)
|
|
: "memory");
|
|
}
|
|
|
|
static inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
|
|
{
|
|
s64 val = arch_atomic64_read(v);
|
|
|
|
do {
|
|
} while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i));
|
|
return val;
|
|
}
|
|
|
|
#endif /* _ASM_X86_ATOMIC64_64_H */
|