mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-23 13:53:54 +07:00
bfc18e389c
While __atomic_add_unless() was originally intended as a building-block for atomic_add_unless(), it's now used in a number of places around the kernel. It's the only common atomic operation named __atomic*(), rather than atomic_*(), and for consistency it would be better named atomic_fetch_add_unless(). This lack of consistency is slightly confusing, and gets in the way of scripting atomics. Given that, let's clean things up and promote it to an official part of the atomics API, in the form of atomic_fetch_add_unless(). This patch converts definitions and invocations over to the new name, including the instrumented version, using the following script: ---- git grep -w __atomic_add_unless | while read line; do sed -i '{s/\<__atomic_add_unless\>/atomic_fetch_add_unless/}' "${line%%:*}"; done git grep -w __arch_atomic_add_unless | while read line; do sed -i '{s/\<__arch_atomic_add_unless\>/arch_atomic_fetch_add_unless/}' "${line%%:*}"; done ---- Note that we do not have atomic{64,_long}_fetch_add_unless(), which will be introduced by later patches. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Will Deacon <will.deacon@arm.com> Acked-by: Geert Uytterhoeven <geert@linux-m68k.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Palmer Dabbelt <palmer@sifive.com> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: https://lore.kernel.org/lkml/20180621121321.4761-2-mark.rutland@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
127 lines
3.0 KiB
C
127 lines
3.0 KiB
C
/*
|
|
* Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
|
|
*
|
|
* This file is licensed under the terms of the GNU General Public License
|
|
* version 2. This program is licensed "as is" without any warranty of any
|
|
* kind, whether express or implied.
|
|
*/
|
|
|
|
#ifndef __ASM_OPENRISC_ATOMIC_H
|
|
#define __ASM_OPENRISC_ATOMIC_H
|
|
|
|
#include <linux/types.h>
|
|
|
|
/* Atomically perform op with v->counter and i */
|
|
#define ATOMIC_OP(op) \
|
|
static inline void atomic_##op(int i, atomic_t *v) \
|
|
{ \
|
|
int tmp; \
|
|
\
|
|
__asm__ __volatile__( \
|
|
"1: l.lwa %0,0(%1) \n" \
|
|
" l." #op " %0,%0,%2 \n" \
|
|
" l.swa 0(%1),%0 \n" \
|
|
" l.bnf 1b \n" \
|
|
" l.nop \n" \
|
|
: "=&r"(tmp) \
|
|
: "r"(&v->counter), "r"(i) \
|
|
: "cc", "memory"); \
|
|
}
|
|
|
|
/* Atomically perform op with v->counter and i, return the result */
|
|
#define ATOMIC_OP_RETURN(op) \
|
|
static inline int atomic_##op##_return(int i, atomic_t *v) \
|
|
{ \
|
|
int tmp; \
|
|
\
|
|
__asm__ __volatile__( \
|
|
"1: l.lwa %0,0(%1) \n" \
|
|
" l." #op " %0,%0,%2 \n" \
|
|
" l.swa 0(%1),%0 \n" \
|
|
" l.bnf 1b \n" \
|
|
" l.nop \n" \
|
|
: "=&r"(tmp) \
|
|
: "r"(&v->counter), "r"(i) \
|
|
: "cc", "memory"); \
|
|
\
|
|
return tmp; \
|
|
}
|
|
|
|
/* Atomically perform op with v->counter and i, return orig v->counter */
|
|
#define ATOMIC_FETCH_OP(op) \
|
|
static inline int atomic_fetch_##op(int i, atomic_t *v) \
|
|
{ \
|
|
int tmp, old; \
|
|
\
|
|
__asm__ __volatile__( \
|
|
"1: l.lwa %0,0(%2) \n" \
|
|
" l." #op " %1,%0,%3 \n" \
|
|
" l.swa 0(%2),%1 \n" \
|
|
" l.bnf 1b \n" \
|
|
" l.nop \n" \
|
|
: "=&r"(old), "=&r"(tmp) \
|
|
: "r"(&v->counter), "r"(i) \
|
|
: "cc", "memory"); \
|
|
\
|
|
return old; \
|
|
}
|
|
|
|
ATOMIC_OP_RETURN(add)
|
|
ATOMIC_OP_RETURN(sub)
|
|
|
|
ATOMIC_FETCH_OP(add)
|
|
ATOMIC_FETCH_OP(sub)
|
|
ATOMIC_FETCH_OP(and)
|
|
ATOMIC_FETCH_OP(or)
|
|
ATOMIC_FETCH_OP(xor)
|
|
|
|
ATOMIC_OP(and)
|
|
ATOMIC_OP(or)
|
|
ATOMIC_OP(xor)
|
|
|
|
#undef ATOMIC_FETCH_OP
|
|
#undef ATOMIC_OP_RETURN
|
|
#undef ATOMIC_OP
|
|
|
|
#define atomic_add_return atomic_add_return
|
|
#define atomic_sub_return atomic_sub_return
|
|
#define atomic_fetch_add atomic_fetch_add
|
|
#define atomic_fetch_sub atomic_fetch_sub
|
|
#define atomic_fetch_and atomic_fetch_and
|
|
#define atomic_fetch_or atomic_fetch_or
|
|
#define atomic_fetch_xor atomic_fetch_xor
|
|
#define atomic_and atomic_and
|
|
#define atomic_or atomic_or
|
|
#define atomic_xor atomic_xor
|
|
|
|
/*
|
|
* Atomically add a to v->counter as long as v is not already u.
|
|
* Returns the original value at v->counter.
|
|
*
|
|
* This is often used through atomic_inc_not_zero()
|
|
*/
|
|
static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
|
|
{
|
|
int old, tmp;
|
|
|
|
__asm__ __volatile__(
|
|
"1: l.lwa %0, 0(%2) \n"
|
|
" l.sfeq %0, %4 \n"
|
|
" l.bf 2f \n"
|
|
" l.add %1, %0, %3 \n"
|
|
" l.swa 0(%2), %1 \n"
|
|
" l.bnf 1b \n"
|
|
" l.nop \n"
|
|
"2: \n"
|
|
: "=&r"(old), "=&r" (tmp)
|
|
: "r"(&v->counter), "r"(a), "r"(u)
|
|
: "cc", "memory");
|
|
|
|
return old;
|
|
}
|
|
#define atomic_fetch_add_unless atomic_fetch_add_unless
|
|
|
|
#include <asm-generic/atomic.h>
|
|
|
|
#endif /* __ASM_OPENRISC_ATOMIC_H */
|