mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-26 19:05:23 +07:00
04e8851af7
As a step towards making the atomic64 API use consistent types treewide, let's have the sparc atomic64 implementation use s64 as the underlying type for atomic64_t, rather than long, matching the generated headers. As atomic64_read() depends on the generic defintion of atomic64_t, this still returns long. This will be converted in a subsequent patch. Otherwise, there should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: David S. Miller <davem@davemloft.net> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will.deacon@arm.com> Cc: aou@eecs.berkeley.edu Cc: arnd@arndb.de Cc: bp@alien8.de Cc: catalin.marinas@arm.com Cc: fenghua.yu@intel.com Cc: heiko.carstens@de.ibm.com Cc: herbert@gondor.apana.org.au Cc: ink@jurassic.park.msu.ru Cc: jhogan@kernel.org Cc: linux@armlinux.org.uk Cc: mattst88@gmail.com Cc: mpe@ellerman.id.au Cc: palmer@sifive.com Cc: paul.burton@mips.com Cc: paulus@samba.org Cc: ralf@linux-mips.org Cc: rth@twiddle.net Cc: tony.luck@intel.com Cc: vgupta@synopsys.com Link: https://lkml.kernel.org/r/20190522132250.26499-15-mark.rutland@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
68 lines
1.7 KiB
C
68 lines
1.7 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/* atomic.h: Thankfully the V9 is at least reasonable for this
|
|
* stuff.
|
|
*
|
|
* Copyright (C) 1996, 1997, 2000, 2012 David S. Miller (davem@redhat.com)
|
|
*/
|
|
|
|
#ifndef __ARCH_SPARC64_ATOMIC__
|
|
#define __ARCH_SPARC64_ATOMIC__
|
|
|
|
#include <linux/types.h>
|
|
#include <asm/cmpxchg.h>
|
|
#include <asm/barrier.h>
|
|
|
|
#define ATOMIC_INIT(i) { (i) }
|
|
#define ATOMIC64_INIT(i) { (i) }
|
|
|
|
#define atomic_read(v) READ_ONCE((v)->counter)
|
|
#define atomic64_read(v) READ_ONCE((v)->counter)
|
|
|
|
#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
|
|
#define atomic64_set(v, i) WRITE_ONCE(((v)->counter), (i))
|
|
|
|
#define ATOMIC_OP(op) \
|
|
void atomic_##op(int, atomic_t *); \
|
|
void atomic64_##op(s64, atomic64_t *);
|
|
|
|
#define ATOMIC_OP_RETURN(op) \
|
|
int atomic_##op##_return(int, atomic_t *); \
|
|
s64 atomic64_##op##_return(s64, atomic64_t *);
|
|
|
|
#define ATOMIC_FETCH_OP(op) \
|
|
int atomic_fetch_##op(int, atomic_t *); \
|
|
s64 atomic64_fetch_##op(s64, atomic64_t *);
|
|
|
|
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
|
|
|
|
ATOMIC_OPS(add)
|
|
ATOMIC_OPS(sub)
|
|
|
|
#undef ATOMIC_OPS
|
|
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
|
|
|
|
ATOMIC_OPS(and)
|
|
ATOMIC_OPS(or)
|
|
ATOMIC_OPS(xor)
|
|
|
|
#undef ATOMIC_OPS
|
|
#undef ATOMIC_FETCH_OP
|
|
#undef ATOMIC_OP_RETURN
|
|
#undef ATOMIC_OP
|
|
|
|
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
|
|
|
|
static inline int atomic_xchg(atomic_t *v, int new)
|
|
{
|
|
return xchg(&v->counter, new);
|
|
}
|
|
|
|
#define atomic64_cmpxchg(v, o, n) \
|
|
((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
|
|
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
|
|
|
|
s64 atomic64_dec_if_positive(atomic64_t *v);
|
|
#define atomic64_dec_if_positive atomic64_dec_if_positive
|
|
|
|
#endif /* !(__ARCH_SPARC64_ATOMIC__) */
|