mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-21 17:20:56 +07:00
7cc7eaad49
The ifdeffery for atomic*_{fetch_,}andnot() is unlike that for all the other atomics. If atomic*_andnot() is not defined, the corresponding atomic*_fetch_andnot() is assumed to not be defined. Additionally, the fallbacks for the various ordering cases are written much later in atomic.h as static inlines. This isn't problematic today, but gets in the way of scripting the generation of atomics. To prepare for scripting, this patch: * Switches to separate ifdefs for atomic*_andnot() and atomic*_fetch_andnot(), updating implementations as appropriate. * Moves the fallbacks into the standards ifdefs, as macro expansions rather than static inlines. * Removes trivial andnot implementations from architectures, where these are superseded by core code. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Will Deacon <will.deacon@arm.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: https://lore.kernel.org/lkml/20180621121321.4761-19-mark.rutland@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
515 lines
12 KiB
C
515 lines
12 KiB
C
/*
|
|
* arch/arm/include/asm/atomic.h
|
|
*
|
|
* Copyright (C) 1996 Russell King.
|
|
* Copyright (C) 2002 Deep Blue Solutions Ltd.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*/
|
|
#ifndef __ASM_ARM_ATOMIC_H
|
|
#define __ASM_ARM_ATOMIC_H
|
|
|
|
#include <linux/compiler.h>
|
|
#include <linux/prefetch.h>
|
|
#include <linux/types.h>
|
|
#include <linux/irqflags.h>
|
|
#include <asm/barrier.h>
|
|
#include <asm/cmpxchg.h>
|
|
|
|
#define ATOMIC_INIT(i) { (i) }
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
/*
|
|
* On ARM, ordinary assignment (str instruction) doesn't clear the local
|
|
* strex/ldrex monitor on some implementations. The reason we can use it for
|
|
* atomic_set() is the clrex or dummy strex done on every exception return.
|
|
*/
|
|
#define atomic_read(v) READ_ONCE((v)->counter)
|
|
#define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
|
|
|
|
#if __LINUX_ARM_ARCH__ >= 6
|
|
|
|
/*
|
|
* ARMv6 UP and SMP safe atomic ops. We use load exclusive and
|
|
* store exclusive to ensure that these are atomic. We may loop
|
|
* to ensure that the update happens.
|
|
*/
|
|
|
|
#define ATOMIC_OP(op, c_op, asm_op) \
|
|
static inline void atomic_##op(int i, atomic_t *v) \
|
|
{ \
|
|
unsigned long tmp; \
|
|
int result; \
|
|
\
|
|
prefetchw(&v->counter); \
|
|
__asm__ __volatile__("@ atomic_" #op "\n" \
|
|
"1: ldrex %0, [%3]\n" \
|
|
" " #asm_op " %0, %0, %4\n" \
|
|
" strex %1, %0, [%3]\n" \
|
|
" teq %1, #0\n" \
|
|
" bne 1b" \
|
|
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
|
|
: "r" (&v->counter), "Ir" (i) \
|
|
: "cc"); \
|
|
} \
|
|
|
|
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
|
|
static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
|
|
{ \
|
|
unsigned long tmp; \
|
|
int result; \
|
|
\
|
|
prefetchw(&v->counter); \
|
|
\
|
|
__asm__ __volatile__("@ atomic_" #op "_return\n" \
|
|
"1: ldrex %0, [%3]\n" \
|
|
" " #asm_op " %0, %0, %4\n" \
|
|
" strex %1, %0, [%3]\n" \
|
|
" teq %1, #0\n" \
|
|
" bne 1b" \
|
|
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
|
|
: "r" (&v->counter), "Ir" (i) \
|
|
: "cc"); \
|
|
\
|
|
return result; \
|
|
}
|
|
|
|
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
|
|
static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
|
|
{ \
|
|
unsigned long tmp; \
|
|
int result, val; \
|
|
\
|
|
prefetchw(&v->counter); \
|
|
\
|
|
__asm__ __volatile__("@ atomic_fetch_" #op "\n" \
|
|
"1: ldrex %0, [%4]\n" \
|
|
" " #asm_op " %1, %0, %5\n" \
|
|
" strex %2, %1, [%4]\n" \
|
|
" teq %2, #0\n" \
|
|
" bne 1b" \
|
|
: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \
|
|
: "r" (&v->counter), "Ir" (i) \
|
|
: "cc"); \
|
|
\
|
|
return result; \
|
|
}
|
|
|
|
#define atomic_add_return_relaxed atomic_add_return_relaxed
|
|
#define atomic_sub_return_relaxed atomic_sub_return_relaxed
|
|
#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
|
|
#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
|
|
|
|
#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
|
|
#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
|
|
#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
|
|
#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
|
|
|
|
static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
|
|
{
|
|
int oldval;
|
|
unsigned long res;
|
|
|
|
prefetchw(&ptr->counter);
|
|
|
|
do {
|
|
__asm__ __volatile__("@ atomic_cmpxchg\n"
|
|
"ldrex %1, [%3]\n"
|
|
"mov %0, #0\n"
|
|
"teq %1, %4\n"
|
|
"strexeq %0, %5, [%3]\n"
|
|
: "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
|
|
: "r" (&ptr->counter), "Ir" (old), "r" (new)
|
|
: "cc");
|
|
} while (res);
|
|
|
|
return oldval;
|
|
}
|
|
#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
|
|
|
|
static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
|
|
{
|
|
int oldval, newval;
|
|
unsigned long tmp;
|
|
|
|
smp_mb();
|
|
prefetchw(&v->counter);
|
|
|
|
__asm__ __volatile__ ("@ atomic_add_unless\n"
|
|
"1: ldrex %0, [%4]\n"
|
|
" teq %0, %5\n"
|
|
" beq 2f\n"
|
|
" add %1, %0, %6\n"
|
|
" strex %2, %1, [%4]\n"
|
|
" teq %2, #0\n"
|
|
" bne 1b\n"
|
|
"2:"
|
|
: "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
|
|
: "r" (&v->counter), "r" (u), "r" (a)
|
|
: "cc");
|
|
|
|
if (oldval != u)
|
|
smp_mb();
|
|
|
|
return oldval;
|
|
}
|
|
#define atomic_fetch_add_unless atomic_fetch_add_unless
|
|
|
|
#else /* ARM_ARCH_6 */
|
|
|
|
#ifdef CONFIG_SMP
|
|
#error SMP not supported on pre-ARMv6 CPUs
|
|
#endif
|
|
|
|
#define ATOMIC_OP(op, c_op, asm_op) \
|
|
static inline void atomic_##op(int i, atomic_t *v) \
|
|
{ \
|
|
unsigned long flags; \
|
|
\
|
|
raw_local_irq_save(flags); \
|
|
v->counter c_op i; \
|
|
raw_local_irq_restore(flags); \
|
|
} \
|
|
|
|
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
|
|
static inline int atomic_##op##_return(int i, atomic_t *v) \
|
|
{ \
|
|
unsigned long flags; \
|
|
int val; \
|
|
\
|
|
raw_local_irq_save(flags); \
|
|
v->counter c_op i; \
|
|
val = v->counter; \
|
|
raw_local_irq_restore(flags); \
|
|
\
|
|
return val; \
|
|
}
|
|
|
|
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
|
|
static inline int atomic_fetch_##op(int i, atomic_t *v) \
|
|
{ \
|
|
unsigned long flags; \
|
|
int val; \
|
|
\
|
|
raw_local_irq_save(flags); \
|
|
val = v->counter; \
|
|
v->counter c_op i; \
|
|
raw_local_irq_restore(flags); \
|
|
\
|
|
return val; \
|
|
}
|
|
|
|
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
|
|
{
|
|
int ret;
|
|
unsigned long flags;
|
|
|
|
raw_local_irq_save(flags);
|
|
ret = v->counter;
|
|
if (likely(ret == old))
|
|
v->counter = new;
|
|
raw_local_irq_restore(flags);
|
|
|
|
return ret;
|
|
}
|
|
|
|
#define atomic_fetch_andnot atomic_fetch_andnot
|
|
|
|
#endif /* __LINUX_ARM_ARCH__ */
|
|
|
|
#define ATOMIC_OPS(op, c_op, asm_op) \
|
|
ATOMIC_OP(op, c_op, asm_op) \
|
|
ATOMIC_OP_RETURN(op, c_op, asm_op) \
|
|
ATOMIC_FETCH_OP(op, c_op, asm_op)
|
|
|
|
ATOMIC_OPS(add, +=, add)
|
|
ATOMIC_OPS(sub, -=, sub)
|
|
|
|
#define atomic_andnot atomic_andnot
|
|
|
|
#undef ATOMIC_OPS
|
|
#define ATOMIC_OPS(op, c_op, asm_op) \
|
|
ATOMIC_OP(op, c_op, asm_op) \
|
|
ATOMIC_FETCH_OP(op, c_op, asm_op)
|
|
|
|
ATOMIC_OPS(and, &=, and)
|
|
ATOMIC_OPS(andnot, &= ~, bic)
|
|
ATOMIC_OPS(or, |=, orr)
|
|
ATOMIC_OPS(xor, ^=, eor)
|
|
|
|
#undef ATOMIC_OPS
|
|
#undef ATOMIC_FETCH_OP
|
|
#undef ATOMIC_OP_RETURN
|
|
#undef ATOMIC_OP
|
|
|
|
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
|
|
|
|
#ifndef CONFIG_GENERIC_ATOMIC64
|
|
typedef struct {
|
|
long long counter;
|
|
} atomic64_t;
|
|
|
|
#define ATOMIC64_INIT(i) { (i) }
|
|
|
|
#ifdef CONFIG_ARM_LPAE
|
|
static inline long long atomic64_read(const atomic64_t *v)
|
|
{
|
|
long long result;
|
|
|
|
__asm__ __volatile__("@ atomic64_read\n"
|
|
" ldrd %0, %H0, [%1]"
|
|
: "=&r" (result)
|
|
: "r" (&v->counter), "Qo" (v->counter)
|
|
);
|
|
|
|
return result;
|
|
}
|
|
|
|
static inline void atomic64_set(atomic64_t *v, long long i)
|
|
{
|
|
__asm__ __volatile__("@ atomic64_set\n"
|
|
" strd %2, %H2, [%1]"
|
|
: "=Qo" (v->counter)
|
|
: "r" (&v->counter), "r" (i)
|
|
);
|
|
}
|
|
#else
|
|
static inline long long atomic64_read(const atomic64_t *v)
|
|
{
|
|
long long result;
|
|
|
|
__asm__ __volatile__("@ atomic64_read\n"
|
|
" ldrexd %0, %H0, [%1]"
|
|
: "=&r" (result)
|
|
: "r" (&v->counter), "Qo" (v->counter)
|
|
);
|
|
|
|
return result;
|
|
}
|
|
|
|
static inline void atomic64_set(atomic64_t *v, long long i)
|
|
{
|
|
long long tmp;
|
|
|
|
prefetchw(&v->counter);
|
|
__asm__ __volatile__("@ atomic64_set\n"
|
|
"1: ldrexd %0, %H0, [%2]\n"
|
|
" strexd %0, %3, %H3, [%2]\n"
|
|
" teq %0, #0\n"
|
|
" bne 1b"
|
|
: "=&r" (tmp), "=Qo" (v->counter)
|
|
: "r" (&v->counter), "r" (i)
|
|
: "cc");
|
|
}
|
|
#endif
|
|
|
|
#define ATOMIC64_OP(op, op1, op2) \
|
|
static inline void atomic64_##op(long long i, atomic64_t *v) \
|
|
{ \
|
|
long long result; \
|
|
unsigned long tmp; \
|
|
\
|
|
prefetchw(&v->counter); \
|
|
__asm__ __volatile__("@ atomic64_" #op "\n" \
|
|
"1: ldrexd %0, %H0, [%3]\n" \
|
|
" " #op1 " %Q0, %Q0, %Q4\n" \
|
|
" " #op2 " %R0, %R0, %R4\n" \
|
|
" strexd %1, %0, %H0, [%3]\n" \
|
|
" teq %1, #0\n" \
|
|
" bne 1b" \
|
|
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
|
|
: "r" (&v->counter), "r" (i) \
|
|
: "cc"); \
|
|
} \
|
|
|
|
#define ATOMIC64_OP_RETURN(op, op1, op2) \
|
|
static inline long long \
|
|
atomic64_##op##_return_relaxed(long long i, atomic64_t *v) \
|
|
{ \
|
|
long long result; \
|
|
unsigned long tmp; \
|
|
\
|
|
prefetchw(&v->counter); \
|
|
\
|
|
__asm__ __volatile__("@ atomic64_" #op "_return\n" \
|
|
"1: ldrexd %0, %H0, [%3]\n" \
|
|
" " #op1 " %Q0, %Q0, %Q4\n" \
|
|
" " #op2 " %R0, %R0, %R4\n" \
|
|
" strexd %1, %0, %H0, [%3]\n" \
|
|
" teq %1, #0\n" \
|
|
" bne 1b" \
|
|
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
|
|
: "r" (&v->counter), "r" (i) \
|
|
: "cc"); \
|
|
\
|
|
return result; \
|
|
}
|
|
|
|
#define ATOMIC64_FETCH_OP(op, op1, op2) \
|
|
static inline long long \
|
|
atomic64_fetch_##op##_relaxed(long long i, atomic64_t *v) \
|
|
{ \
|
|
long long result, val; \
|
|
unsigned long tmp; \
|
|
\
|
|
prefetchw(&v->counter); \
|
|
\
|
|
__asm__ __volatile__("@ atomic64_fetch_" #op "\n" \
|
|
"1: ldrexd %0, %H0, [%4]\n" \
|
|
" " #op1 " %Q1, %Q0, %Q5\n" \
|
|
" " #op2 " %R1, %R0, %R5\n" \
|
|
" strexd %2, %1, %H1, [%4]\n" \
|
|
" teq %2, #0\n" \
|
|
" bne 1b" \
|
|
: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \
|
|
: "r" (&v->counter), "r" (i) \
|
|
: "cc"); \
|
|
\
|
|
return result; \
|
|
}
|
|
|
|
#define ATOMIC64_OPS(op, op1, op2) \
|
|
ATOMIC64_OP(op, op1, op2) \
|
|
ATOMIC64_OP_RETURN(op, op1, op2) \
|
|
ATOMIC64_FETCH_OP(op, op1, op2)
|
|
|
|
ATOMIC64_OPS(add, adds, adc)
|
|
ATOMIC64_OPS(sub, subs, sbc)
|
|
|
|
#define atomic64_add_return_relaxed atomic64_add_return_relaxed
|
|
#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
|
|
#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
|
|
#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
|
|
|
|
#undef ATOMIC64_OPS
|
|
#define ATOMIC64_OPS(op, op1, op2) \
|
|
ATOMIC64_OP(op, op1, op2) \
|
|
ATOMIC64_FETCH_OP(op, op1, op2)
|
|
|
|
#define atomic64_andnot atomic64_andnot
|
|
|
|
ATOMIC64_OPS(and, and, and)
|
|
ATOMIC64_OPS(andnot, bic, bic)
|
|
ATOMIC64_OPS(or, orr, orr)
|
|
ATOMIC64_OPS(xor, eor, eor)
|
|
|
|
#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
|
|
#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
|
|
#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
|
|
#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
|
|
|
|
#undef ATOMIC64_OPS
|
|
#undef ATOMIC64_FETCH_OP
|
|
#undef ATOMIC64_OP_RETURN
|
|
#undef ATOMIC64_OP
|
|
|
|
static inline long long
|
|
atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
|
|
{
|
|
long long oldval;
|
|
unsigned long res;
|
|
|
|
prefetchw(&ptr->counter);
|
|
|
|
do {
|
|
__asm__ __volatile__("@ atomic64_cmpxchg\n"
|
|
"ldrexd %1, %H1, [%3]\n"
|
|
"mov %0, #0\n"
|
|
"teq %1, %4\n"
|
|
"teqeq %H1, %H4\n"
|
|
"strexdeq %0, %5, %H5, [%3]"
|
|
: "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
|
|
: "r" (&ptr->counter), "r" (old), "r" (new)
|
|
: "cc");
|
|
} while (res);
|
|
|
|
return oldval;
|
|
}
|
|
#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
|
|
|
|
static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
|
|
{
|
|
long long result;
|
|
unsigned long tmp;
|
|
|
|
prefetchw(&ptr->counter);
|
|
|
|
__asm__ __volatile__("@ atomic64_xchg\n"
|
|
"1: ldrexd %0, %H0, [%3]\n"
|
|
" strexd %1, %4, %H4, [%3]\n"
|
|
" teq %1, #0\n"
|
|
" bne 1b"
|
|
: "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
|
|
: "r" (&ptr->counter), "r" (new)
|
|
: "cc");
|
|
|
|
return result;
|
|
}
|
|
#define atomic64_xchg_relaxed atomic64_xchg_relaxed
|
|
|
|
static inline long long atomic64_dec_if_positive(atomic64_t *v)
|
|
{
|
|
long long result;
|
|
unsigned long tmp;
|
|
|
|
smp_mb();
|
|
prefetchw(&v->counter);
|
|
|
|
__asm__ __volatile__("@ atomic64_dec_if_positive\n"
|
|
"1: ldrexd %0, %H0, [%3]\n"
|
|
" subs %Q0, %Q0, #1\n"
|
|
" sbc %R0, %R0, #0\n"
|
|
" teq %R0, #0\n"
|
|
" bmi 2f\n"
|
|
" strexd %1, %0, %H0, [%3]\n"
|
|
" teq %1, #0\n"
|
|
" bne 1b\n"
|
|
"2:"
|
|
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
|
|
: "r" (&v->counter)
|
|
: "cc");
|
|
|
|
smp_mb();
|
|
|
|
return result;
|
|
}
|
|
#define atomic64_dec_if_positive atomic64_dec_if_positive
|
|
|
|
static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a,
|
|
long long u)
|
|
{
|
|
long long oldval, newval;
|
|
unsigned long tmp;
|
|
|
|
smp_mb();
|
|
prefetchw(&v->counter);
|
|
|
|
__asm__ __volatile__("@ atomic64_add_unless\n"
|
|
"1: ldrexd %0, %H0, [%4]\n"
|
|
" teq %0, %5\n"
|
|
" teqeq %H0, %H5\n"
|
|
" beq 2f\n"
|
|
" adds %Q1, %Q0, %Q6\n"
|
|
" adc %R1, %R0, %R6\n"
|
|
" strexd %2, %1, %H1, [%4]\n"
|
|
" teq %2, #0\n"
|
|
" bne 1b\n"
|
|
"2:"
|
|
: "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
|
|
: "r" (&v->counter), "r" (u), "r" (a)
|
|
: "cc");
|
|
|
|
if (oldval != u)
|
|
smp_mb();
|
|
|
|
return oldval;
|
|
}
|
|
#define atomic64_fetch_add_unless atomic64_fetch_add_unless
|
|
|
|
#endif /* !CONFIG_GENERIC_ATOMIC64 */
|
|
#endif
|
|
#endif
|