mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-26 02:36:49 +07:00
d84e28d250
As a step towards making the atomic64 API use consistent types treewide, let's have the ia64 atomic64 implementation use s64 as the underlying type for atomic64_t, rather than long or __s64, matching the generated headers. As atomic64_read() depends on the generic defintion of atomic64_t, this still returns long. This will be converted in a subsequent patch. Otherwise, there should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: Will Deacon <will.deacon@arm.com> Cc: aou@eecs.berkeley.edu Cc: arnd@arndb.de Cc: bp@alien8.de Cc: catalin.marinas@arm.com Cc: davem@davemloft.net Cc: heiko.carstens@de.ibm.com Cc: herbert@gondor.apana.org.au Cc: ink@jurassic.park.msu.ru Cc: jhogan@kernel.org Cc: linux@armlinux.org.uk Cc: mattst88@gmail.com Cc: mpe@ellerman.id.au Cc: palmer@sifive.com Cc: paul.burton@mips.com Cc: paulus@samba.org Cc: ralf@linux-mips.org Cc: rth@twiddle.net Cc: vgupta@synopsys.com Link: https://lkml.kernel.org/r/20190522132250.26499-9-mark.rutland@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
225 lines
6.1 KiB
C
225 lines
6.1 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _ASM_IA64_ATOMIC_H
|
|
#define _ASM_IA64_ATOMIC_H
|
|
|
|
/*
|
|
* Atomic operations that C can't guarantee us. Useful for
|
|
* resource counting etc..
|
|
*
|
|
* NOTE: don't mess with the types below! The "unsigned long" and
|
|
* "int" types were carefully placed so as to ensure proper operation
|
|
* of the macros.
|
|
*
|
|
* Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
|
|
* David Mosberger-Tang <davidm@hpl.hp.com>
|
|
*/
|
|
#include <linux/types.h>
|
|
|
|
#include <asm/intrinsics.h>
|
|
#include <asm/barrier.h>
|
|
|
|
|
|
#define ATOMIC_INIT(i) { (i) }
|
|
#define ATOMIC64_INIT(i) { (i) }
|
|
|
|
#define atomic_read(v) READ_ONCE((v)->counter)
|
|
#define atomic64_read(v) READ_ONCE((v)->counter)
|
|
|
|
#define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
|
|
#define atomic64_set(v,i) WRITE_ONCE(((v)->counter), (i))
|
|
|
|
#define ATOMIC_OP(op, c_op) \
|
|
static __inline__ int \
|
|
ia64_atomic_##op (int i, atomic_t *v) \
|
|
{ \
|
|
__s32 old, new; \
|
|
CMPXCHG_BUGCHECK_DECL \
|
|
\
|
|
do { \
|
|
CMPXCHG_BUGCHECK(v); \
|
|
old = atomic_read(v); \
|
|
new = old c_op i; \
|
|
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
|
|
return new; \
|
|
}
|
|
|
|
#define ATOMIC_FETCH_OP(op, c_op) \
|
|
static __inline__ int \
|
|
ia64_atomic_fetch_##op (int i, atomic_t *v) \
|
|
{ \
|
|
__s32 old, new; \
|
|
CMPXCHG_BUGCHECK_DECL \
|
|
\
|
|
do { \
|
|
CMPXCHG_BUGCHECK(v); \
|
|
old = atomic_read(v); \
|
|
new = old c_op i; \
|
|
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
|
|
return old; \
|
|
}
|
|
|
|
#define ATOMIC_OPS(op, c_op) \
|
|
ATOMIC_OP(op, c_op) \
|
|
ATOMIC_FETCH_OP(op, c_op)
|
|
|
|
ATOMIC_OPS(add, +)
|
|
ATOMIC_OPS(sub, -)
|
|
|
|
#ifdef __OPTIMIZE__
|
|
#define __ia64_atomic_const(i) \
|
|
static const int __ia64_atomic_p = __builtin_constant_p(i) ? \
|
|
((i) == 1 || (i) == 4 || (i) == 8 || (i) == 16 || \
|
|
(i) == -1 || (i) == -4 || (i) == -8 || (i) == -16) : 0;\
|
|
__ia64_atomic_p
|
|
#else
|
|
#define __ia64_atomic_const(i) 0
|
|
#endif
|
|
|
|
#define atomic_add_return(i,v) \
|
|
({ \
|
|
int __ia64_aar_i = (i); \
|
|
__ia64_atomic_const(i) \
|
|
? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
|
|
: ia64_atomic_add(__ia64_aar_i, v); \
|
|
})
|
|
|
|
#define atomic_sub_return(i,v) \
|
|
({ \
|
|
int __ia64_asr_i = (i); \
|
|
__ia64_atomic_const(i) \
|
|
? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
|
|
: ia64_atomic_sub(__ia64_asr_i, v); \
|
|
})
|
|
|
|
#define atomic_fetch_add(i,v) \
|
|
({ \
|
|
int __ia64_aar_i = (i); \
|
|
__ia64_atomic_const(i) \
|
|
? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq) \
|
|
: ia64_atomic_fetch_add(__ia64_aar_i, v); \
|
|
})
|
|
|
|
#define atomic_fetch_sub(i,v) \
|
|
({ \
|
|
int __ia64_asr_i = (i); \
|
|
__ia64_atomic_const(i) \
|
|
? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq) \
|
|
: ia64_atomic_fetch_sub(__ia64_asr_i, v); \
|
|
})
|
|
|
|
ATOMIC_FETCH_OP(and, &)
|
|
ATOMIC_FETCH_OP(or, |)
|
|
ATOMIC_FETCH_OP(xor, ^)
|
|
|
|
#define atomic_and(i,v) (void)ia64_atomic_fetch_and(i,v)
|
|
#define atomic_or(i,v) (void)ia64_atomic_fetch_or(i,v)
|
|
#define atomic_xor(i,v) (void)ia64_atomic_fetch_xor(i,v)
|
|
|
|
#define atomic_fetch_and(i,v) ia64_atomic_fetch_and(i,v)
|
|
#define atomic_fetch_or(i,v) ia64_atomic_fetch_or(i,v)
|
|
#define atomic_fetch_xor(i,v) ia64_atomic_fetch_xor(i,v)
|
|
|
|
#undef ATOMIC_OPS
|
|
#undef ATOMIC_FETCH_OP
|
|
#undef ATOMIC_OP
|
|
|
|
#define ATOMIC64_OP(op, c_op) \
|
|
static __inline__ s64 \
|
|
ia64_atomic64_##op (s64 i, atomic64_t *v) \
|
|
{ \
|
|
s64 old, new; \
|
|
CMPXCHG_BUGCHECK_DECL \
|
|
\
|
|
do { \
|
|
CMPXCHG_BUGCHECK(v); \
|
|
old = atomic64_read(v); \
|
|
new = old c_op i; \
|
|
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
|
|
return new; \
|
|
}
|
|
|
|
#define ATOMIC64_FETCH_OP(op, c_op) \
|
|
static __inline__ s64 \
|
|
ia64_atomic64_fetch_##op (s64 i, atomic64_t *v) \
|
|
{ \
|
|
s64 old, new; \
|
|
CMPXCHG_BUGCHECK_DECL \
|
|
\
|
|
do { \
|
|
CMPXCHG_BUGCHECK(v); \
|
|
old = atomic64_read(v); \
|
|
new = old c_op i; \
|
|
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
|
|
return old; \
|
|
}
|
|
|
|
#define ATOMIC64_OPS(op, c_op) \
|
|
ATOMIC64_OP(op, c_op) \
|
|
ATOMIC64_FETCH_OP(op, c_op)
|
|
|
|
ATOMIC64_OPS(add, +)
|
|
ATOMIC64_OPS(sub, -)
|
|
|
|
#define atomic64_add_return(i,v) \
|
|
({ \
|
|
s64 __ia64_aar_i = (i); \
|
|
__ia64_atomic_const(i) \
|
|
? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
|
|
: ia64_atomic64_add(__ia64_aar_i, v); \
|
|
})
|
|
|
|
#define atomic64_sub_return(i,v) \
|
|
({ \
|
|
s64 __ia64_asr_i = (i); \
|
|
__ia64_atomic_const(i) \
|
|
? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
|
|
: ia64_atomic64_sub(__ia64_asr_i, v); \
|
|
})
|
|
|
|
#define atomic64_fetch_add(i,v) \
|
|
({ \
|
|
s64 __ia64_aar_i = (i); \
|
|
__ia64_atomic_const(i) \
|
|
? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq) \
|
|
: ia64_atomic64_fetch_add(__ia64_aar_i, v); \
|
|
})
|
|
|
|
#define atomic64_fetch_sub(i,v) \
|
|
({ \
|
|
s64 __ia64_asr_i = (i); \
|
|
__ia64_atomic_const(i) \
|
|
? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq) \
|
|
: ia64_atomic64_fetch_sub(__ia64_asr_i, v); \
|
|
})
|
|
|
|
ATOMIC64_FETCH_OP(and, &)
|
|
ATOMIC64_FETCH_OP(or, |)
|
|
ATOMIC64_FETCH_OP(xor, ^)
|
|
|
|
#define atomic64_and(i,v) (void)ia64_atomic64_fetch_and(i,v)
|
|
#define atomic64_or(i,v) (void)ia64_atomic64_fetch_or(i,v)
|
|
#define atomic64_xor(i,v) (void)ia64_atomic64_fetch_xor(i,v)
|
|
|
|
#define atomic64_fetch_and(i,v) ia64_atomic64_fetch_and(i,v)
|
|
#define atomic64_fetch_or(i,v) ia64_atomic64_fetch_or(i,v)
|
|
#define atomic64_fetch_xor(i,v) ia64_atomic64_fetch_xor(i,v)
|
|
|
|
#undef ATOMIC64_OPS
|
|
#undef ATOMIC64_FETCH_OP
|
|
#undef ATOMIC64_OP
|
|
|
|
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
|
|
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
|
|
|
|
#define atomic64_cmpxchg(v, old, new) \
|
|
(cmpxchg(&((v)->counter), old, new))
|
|
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
|
|
|
|
#define atomic_add(i,v) (void)atomic_add_return((i), (v))
|
|
#define atomic_sub(i,v) (void)atomic_sub_return((i), (v))
|
|
|
|
#define atomic64_add(i,v) (void)atomic64_add_return((i), (v))
|
|
#define atomic64_sub(i,v) (void)atomic64_sub_return((i), (v))
|
|
|
|
#endif /* _ASM_IA64_ATOMIC_H */
|