mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-20 06:17:11 +07:00
a48e61de75
As of ac7c3e4ff4
("compiler: enable CONFIG_OPTIMIZE_INLINING forcibly"),
inline functions are no longer annotated with '__always_inline', which
allows the compiler to decide whether inlining is really a good idea or
not. Although this is a great idea on paper, the reality is that AArch64
GCC prior to 9.1 has been shown to get confused when creating an
out-of-line copy of a function passing explicit 'register' variables
into an inline assembly block:
https://gcc.gnu.org/bugzilla/show_bug.cgi?id=91111
It's not clear whether this is specific to arm64 or not but, for now,
ensure that all of our functions using 'register' variables are marked
as '__always_inline' so that the old behaviour is effectively preserved.
Hopefully other architectures are luckier with their compilers.
Cc: Masahiro Yamada <yamada.masahiro@socionext.com>
Cc: Nicolas Saenz Julienne <nsaenzjulienne@suse.de>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Nick Desaulniers <ndesaulniers@google.com>
Signed-off-by: Will Deacon <will@kernel.org>
401 lines
11 KiB
C
401 lines
11 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Based on arch/arm/include/asm/atomic.h
|
|
*
|
|
* Copyright (C) 1996 Russell King.
|
|
* Copyright (C) 2002 Deep Blue Solutions Ltd.
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
*/
|
|
|
|
#ifndef __ASM_ATOMIC_LSE_H
|
|
#define __ASM_ATOMIC_LSE_H
|
|
|
|
#define ATOMIC_OP(op, asm_op) \
|
|
static inline void __lse_atomic_##op(int i, atomic_t *v) \
|
|
{ \
|
|
asm volatile( \
|
|
" " #asm_op " %w[i], %[v]\n" \
|
|
: [i] "+r" (i), [v] "+Q" (v->counter) \
|
|
: "r" (v)); \
|
|
}
|
|
|
|
ATOMIC_OP(andnot, stclr)
|
|
ATOMIC_OP(or, stset)
|
|
ATOMIC_OP(xor, steor)
|
|
ATOMIC_OP(add, stadd)
|
|
|
|
#undef ATOMIC_OP
|
|
|
|
#define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...) \
|
|
static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v) \
|
|
{ \
|
|
asm volatile( \
|
|
" " #asm_op #mb " %w[i], %w[i], %[v]" \
|
|
: [i] "+r" (i), [v] "+Q" (v->counter) \
|
|
: "r" (v) \
|
|
: cl); \
|
|
\
|
|
return i; \
|
|
}
|
|
|
|
#define ATOMIC_FETCH_OPS(op, asm_op) \
|
|
ATOMIC_FETCH_OP(_relaxed, , op, asm_op) \
|
|
ATOMIC_FETCH_OP(_acquire, a, op, asm_op, "memory") \
|
|
ATOMIC_FETCH_OP(_release, l, op, asm_op, "memory") \
|
|
ATOMIC_FETCH_OP( , al, op, asm_op, "memory")
|
|
|
|
ATOMIC_FETCH_OPS(andnot, ldclr)
|
|
ATOMIC_FETCH_OPS(or, ldset)
|
|
ATOMIC_FETCH_OPS(xor, ldeor)
|
|
ATOMIC_FETCH_OPS(add, ldadd)
|
|
|
|
#undef ATOMIC_FETCH_OP
|
|
#undef ATOMIC_FETCH_OPS
|
|
|
|
#define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \
|
|
static inline int __lse_atomic_add_return##name(int i, atomic_t *v) \
|
|
{ \
|
|
u32 tmp; \
|
|
\
|
|
asm volatile( \
|
|
" ldadd" #mb " %w[i], %w[tmp], %[v]\n" \
|
|
" add %w[i], %w[i], %w[tmp]" \
|
|
: [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \
|
|
: "r" (v) \
|
|
: cl); \
|
|
\
|
|
return i; \
|
|
}
|
|
|
|
ATOMIC_OP_ADD_RETURN(_relaxed, )
|
|
ATOMIC_OP_ADD_RETURN(_acquire, a, "memory")
|
|
ATOMIC_OP_ADD_RETURN(_release, l, "memory")
|
|
ATOMIC_OP_ADD_RETURN( , al, "memory")
|
|
|
|
#undef ATOMIC_OP_ADD_RETURN
|
|
|
|
static inline void __lse_atomic_and(int i, atomic_t *v)
|
|
{
|
|
asm volatile(
|
|
" mvn %w[i], %w[i]\n"
|
|
" stclr %w[i], %[v]"
|
|
: [i] "+&r" (i), [v] "+Q" (v->counter)
|
|
: "r" (v));
|
|
}
|
|
|
|
#define ATOMIC_FETCH_OP_AND(name, mb, cl...) \
|
|
static inline int __lse_atomic_fetch_and##name(int i, atomic_t *v) \
|
|
{ \
|
|
asm volatile( \
|
|
" mvn %w[i], %w[i]\n" \
|
|
" ldclr" #mb " %w[i], %w[i], %[v]" \
|
|
: [i] "+&r" (i), [v] "+Q" (v->counter) \
|
|
: "r" (v) \
|
|
: cl); \
|
|
\
|
|
return i; \
|
|
}
|
|
|
|
ATOMIC_FETCH_OP_AND(_relaxed, )
|
|
ATOMIC_FETCH_OP_AND(_acquire, a, "memory")
|
|
ATOMIC_FETCH_OP_AND(_release, l, "memory")
|
|
ATOMIC_FETCH_OP_AND( , al, "memory")
|
|
|
|
#undef ATOMIC_FETCH_OP_AND
|
|
|
|
static inline void __lse_atomic_sub(int i, atomic_t *v)
|
|
{
|
|
asm volatile(
|
|
" neg %w[i], %w[i]\n"
|
|
" stadd %w[i], %[v]"
|
|
: [i] "+&r" (i), [v] "+Q" (v->counter)
|
|
: "r" (v));
|
|
}
|
|
|
|
#define ATOMIC_OP_SUB_RETURN(name, mb, cl...) \
|
|
static inline int __lse_atomic_sub_return##name(int i, atomic_t *v) \
|
|
{ \
|
|
u32 tmp; \
|
|
\
|
|
asm volatile( \
|
|
" neg %w[i], %w[i]\n" \
|
|
" ldadd" #mb " %w[i], %w[tmp], %[v]\n" \
|
|
" add %w[i], %w[i], %w[tmp]" \
|
|
: [i] "+&r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \
|
|
: "r" (v) \
|
|
: cl); \
|
|
\
|
|
return i; \
|
|
}
|
|
|
|
ATOMIC_OP_SUB_RETURN(_relaxed, )
|
|
ATOMIC_OP_SUB_RETURN(_acquire, a, "memory")
|
|
ATOMIC_OP_SUB_RETURN(_release, l, "memory")
|
|
ATOMIC_OP_SUB_RETURN( , al, "memory")
|
|
|
|
#undef ATOMIC_OP_SUB_RETURN
|
|
|
|
#define ATOMIC_FETCH_OP_SUB(name, mb, cl...) \
|
|
static inline int __lse_atomic_fetch_sub##name(int i, atomic_t *v) \
|
|
{ \
|
|
asm volatile( \
|
|
" neg %w[i], %w[i]\n" \
|
|
" ldadd" #mb " %w[i], %w[i], %[v]" \
|
|
: [i] "+&r" (i), [v] "+Q" (v->counter) \
|
|
: "r" (v) \
|
|
: cl); \
|
|
\
|
|
return i; \
|
|
}
|
|
|
|
ATOMIC_FETCH_OP_SUB(_relaxed, )
|
|
ATOMIC_FETCH_OP_SUB(_acquire, a, "memory")
|
|
ATOMIC_FETCH_OP_SUB(_release, l, "memory")
|
|
ATOMIC_FETCH_OP_SUB( , al, "memory")
|
|
|
|
#undef ATOMIC_FETCH_OP_SUB
|
|
|
|
#define ATOMIC64_OP(op, asm_op) \
|
|
static inline void __lse_atomic64_##op(s64 i, atomic64_t *v) \
|
|
{ \
|
|
asm volatile( \
|
|
" " #asm_op " %[i], %[v]\n" \
|
|
: [i] "+r" (i), [v] "+Q" (v->counter) \
|
|
: "r" (v)); \
|
|
}
|
|
|
|
ATOMIC64_OP(andnot, stclr)
|
|
ATOMIC64_OP(or, stset)
|
|
ATOMIC64_OP(xor, steor)
|
|
ATOMIC64_OP(add, stadd)
|
|
|
|
#undef ATOMIC64_OP
|
|
|
|
#define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...) \
|
|
static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\
|
|
{ \
|
|
asm volatile( \
|
|
" " #asm_op #mb " %[i], %[i], %[v]" \
|
|
: [i] "+r" (i), [v] "+Q" (v->counter) \
|
|
: "r" (v) \
|
|
: cl); \
|
|
\
|
|
return i; \
|
|
}
|
|
|
|
#define ATOMIC64_FETCH_OPS(op, asm_op) \
|
|
ATOMIC64_FETCH_OP(_relaxed, , op, asm_op) \
|
|
ATOMIC64_FETCH_OP(_acquire, a, op, asm_op, "memory") \
|
|
ATOMIC64_FETCH_OP(_release, l, op, asm_op, "memory") \
|
|
ATOMIC64_FETCH_OP( , al, op, asm_op, "memory")
|
|
|
|
ATOMIC64_FETCH_OPS(andnot, ldclr)
|
|
ATOMIC64_FETCH_OPS(or, ldset)
|
|
ATOMIC64_FETCH_OPS(xor, ldeor)
|
|
ATOMIC64_FETCH_OPS(add, ldadd)
|
|
|
|
#undef ATOMIC64_FETCH_OP
|
|
#undef ATOMIC64_FETCH_OPS
|
|
|
|
#define ATOMIC64_OP_ADD_RETURN(name, mb, cl...) \
|
|
static inline long __lse_atomic64_add_return##name(s64 i, atomic64_t *v)\
|
|
{ \
|
|
unsigned long tmp; \
|
|
\
|
|
asm volatile( \
|
|
" ldadd" #mb " %[i], %x[tmp], %[v]\n" \
|
|
" add %[i], %[i], %x[tmp]" \
|
|
: [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \
|
|
: "r" (v) \
|
|
: cl); \
|
|
\
|
|
return i; \
|
|
}
|
|
|
|
ATOMIC64_OP_ADD_RETURN(_relaxed, )
|
|
ATOMIC64_OP_ADD_RETURN(_acquire, a, "memory")
|
|
ATOMIC64_OP_ADD_RETURN(_release, l, "memory")
|
|
ATOMIC64_OP_ADD_RETURN( , al, "memory")
|
|
|
|
#undef ATOMIC64_OP_ADD_RETURN
|
|
|
|
static inline void __lse_atomic64_and(s64 i, atomic64_t *v)
|
|
{
|
|
asm volatile(
|
|
" mvn %[i], %[i]\n"
|
|
" stclr %[i], %[v]"
|
|
: [i] "+&r" (i), [v] "+Q" (v->counter)
|
|
: "r" (v));
|
|
}
|
|
|
|
#define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \
|
|
static inline long __lse_atomic64_fetch_and##name(s64 i, atomic64_t *v) \
|
|
{ \
|
|
asm volatile( \
|
|
" mvn %[i], %[i]\n" \
|
|
" ldclr" #mb " %[i], %[i], %[v]" \
|
|
: [i] "+&r" (i), [v] "+Q" (v->counter) \
|
|
: "r" (v) \
|
|
: cl); \
|
|
\
|
|
return i; \
|
|
}
|
|
|
|
ATOMIC64_FETCH_OP_AND(_relaxed, )
|
|
ATOMIC64_FETCH_OP_AND(_acquire, a, "memory")
|
|
ATOMIC64_FETCH_OP_AND(_release, l, "memory")
|
|
ATOMIC64_FETCH_OP_AND( , al, "memory")
|
|
|
|
#undef ATOMIC64_FETCH_OP_AND
|
|
|
|
static inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
|
|
{
|
|
asm volatile(
|
|
" neg %[i], %[i]\n"
|
|
" stadd %[i], %[v]"
|
|
: [i] "+&r" (i), [v] "+Q" (v->counter)
|
|
: "r" (v));
|
|
}
|
|
|
|
#define ATOMIC64_OP_SUB_RETURN(name, mb, cl...) \
|
|
static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v) \
|
|
{ \
|
|
unsigned long tmp; \
|
|
\
|
|
asm volatile( \
|
|
" neg %[i], %[i]\n" \
|
|
" ldadd" #mb " %[i], %x[tmp], %[v]\n" \
|
|
" add %[i], %[i], %x[tmp]" \
|
|
: [i] "+&r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \
|
|
: "r" (v) \
|
|
: cl); \
|
|
\
|
|
return i; \
|
|
}
|
|
|
|
ATOMIC64_OP_SUB_RETURN(_relaxed, )
|
|
ATOMIC64_OP_SUB_RETURN(_acquire, a, "memory")
|
|
ATOMIC64_OP_SUB_RETURN(_release, l, "memory")
|
|
ATOMIC64_OP_SUB_RETURN( , al, "memory")
|
|
|
|
#undef ATOMIC64_OP_SUB_RETURN
|
|
|
|
#define ATOMIC64_FETCH_OP_SUB(name, mb, cl...) \
|
|
static inline long __lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v) \
|
|
{ \
|
|
asm volatile( \
|
|
" neg %[i], %[i]\n" \
|
|
" ldadd" #mb " %[i], %[i], %[v]" \
|
|
: [i] "+&r" (i), [v] "+Q" (v->counter) \
|
|
: "r" (v) \
|
|
: cl); \
|
|
\
|
|
return i; \
|
|
}
|
|
|
|
ATOMIC64_FETCH_OP_SUB(_relaxed, )
|
|
ATOMIC64_FETCH_OP_SUB(_acquire, a, "memory")
|
|
ATOMIC64_FETCH_OP_SUB(_release, l, "memory")
|
|
ATOMIC64_FETCH_OP_SUB( , al, "memory")
|
|
|
|
#undef ATOMIC64_FETCH_OP_SUB
|
|
|
|
static inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v)
|
|
{
|
|
unsigned long tmp;
|
|
|
|
asm volatile(
|
|
"1: ldr %x[tmp], %[v]\n"
|
|
" subs %[ret], %x[tmp], #1\n"
|
|
" b.lt 2f\n"
|
|
" casal %x[tmp], %[ret], %[v]\n"
|
|
" sub %x[tmp], %x[tmp], #1\n"
|
|
" sub %x[tmp], %x[tmp], %[ret]\n"
|
|
" cbnz %x[tmp], 1b\n"
|
|
"2:"
|
|
: [ret] "+&r" (v), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)
|
|
:
|
|
: "cc", "memory");
|
|
|
|
return (long)v;
|
|
}
|
|
|
|
#define __CMPXCHG_CASE(w, sfx, name, sz, mb, cl...) \
|
|
static __always_inline u##sz \
|
|
__lse__cmpxchg_case_##name##sz(volatile void *ptr, \
|
|
u##sz old, \
|
|
u##sz new) \
|
|
{ \
|
|
register unsigned long x0 asm ("x0") = (unsigned long)ptr; \
|
|
register u##sz x1 asm ("x1") = old; \
|
|
register u##sz x2 asm ("x2") = new; \
|
|
unsigned long tmp; \
|
|
\
|
|
asm volatile( \
|
|
" mov %" #w "[tmp], %" #w "[old]\n" \
|
|
" cas" #mb #sfx "\t%" #w "[tmp], %" #w "[new], %[v]\n" \
|
|
" mov %" #w "[ret], %" #w "[tmp]" \
|
|
: [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr), \
|
|
[tmp] "=&r" (tmp) \
|
|
: [old] "r" (x1), [new] "r" (x2) \
|
|
: cl); \
|
|
\
|
|
return x0; \
|
|
}
|
|
|
|
__CMPXCHG_CASE(w, b, , 8, )
|
|
__CMPXCHG_CASE(w, h, , 16, )
|
|
__CMPXCHG_CASE(w, , , 32, )
|
|
__CMPXCHG_CASE(x, , , 64, )
|
|
__CMPXCHG_CASE(w, b, acq_, 8, a, "memory")
|
|
__CMPXCHG_CASE(w, h, acq_, 16, a, "memory")
|
|
__CMPXCHG_CASE(w, , acq_, 32, a, "memory")
|
|
__CMPXCHG_CASE(x, , acq_, 64, a, "memory")
|
|
__CMPXCHG_CASE(w, b, rel_, 8, l, "memory")
|
|
__CMPXCHG_CASE(w, h, rel_, 16, l, "memory")
|
|
__CMPXCHG_CASE(w, , rel_, 32, l, "memory")
|
|
__CMPXCHG_CASE(x, , rel_, 64, l, "memory")
|
|
__CMPXCHG_CASE(w, b, mb_, 8, al, "memory")
|
|
__CMPXCHG_CASE(w, h, mb_, 16, al, "memory")
|
|
__CMPXCHG_CASE(w, , mb_, 32, al, "memory")
|
|
__CMPXCHG_CASE(x, , mb_, 64, al, "memory")
|
|
|
|
#undef __CMPXCHG_CASE
|
|
|
|
#define __CMPXCHG_DBL(name, mb, cl...) \
|
|
static __always_inline long \
|
|
__lse__cmpxchg_double##name(unsigned long old1, \
|
|
unsigned long old2, \
|
|
unsigned long new1, \
|
|
unsigned long new2, \
|
|
volatile void *ptr) \
|
|
{ \
|
|
unsigned long oldval1 = old1; \
|
|
unsigned long oldval2 = old2; \
|
|
register unsigned long x0 asm ("x0") = old1; \
|
|
register unsigned long x1 asm ("x1") = old2; \
|
|
register unsigned long x2 asm ("x2") = new1; \
|
|
register unsigned long x3 asm ("x3") = new2; \
|
|
register unsigned long x4 asm ("x4") = (unsigned long)ptr; \
|
|
\
|
|
asm volatile( \
|
|
" casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
|
|
" eor %[old1], %[old1], %[oldval1]\n" \
|
|
" eor %[old2], %[old2], %[oldval2]\n" \
|
|
" orr %[old1], %[old1], %[old2]" \
|
|
: [old1] "+&r" (x0), [old2] "+&r" (x1), \
|
|
[v] "+Q" (*(unsigned long *)ptr) \
|
|
: [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \
|
|
[oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \
|
|
: cl); \
|
|
\
|
|
return x0; \
|
|
}
|
|
|
|
__CMPXCHG_DBL( , )
|
|
__CMPXCHG_DBL(_mb, al, "memory")
|
|
|
|
#undef __CMPXCHG_DBL
|
|
|
|
#endif /* __ASM_ATOMIC_LSE_H */
|