mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-01-26 09:09:34 +07:00
25d8d4eeca
- Add support for (optionally) using queued spinlocks & rwlocks. - Support for a new faster system call ABI using the scv instruction on Power9 or later. - Drop support for the PROT_SAO mmap/mprotect flag as it will be unsupported on Power10 and future processors, leaving us with no way to implement the functionality it requests. This risks breaking userspace, though we believe it is unused in practice. - A bug fix for, and then the removal of, our custom stack expansion checking. We now allow stack expansion up to the rlimit, like other architectures. - Remove the remnants of our (previously disabled) topology update code, which tried to react to NUMA layout changes on virtualised systems, but was prone to crashes and other problems. - Add PMU support for Power10 CPUs. - A change to our signal trampoline so that we don't unbalance the link stack (branch return predictor) in the signal delivery path. - Lots of other cleanups, refactorings, smaller features and so on as usual. Thanks to: Abhishek Goel, Alastair D'Silva, Alexander A. Klimov, Alexey Kardashevskiy, Alistair Popple, Andrew Donnellan, Aneesh Kumar K.V, Anju T Sudhakar, Anton Blanchard, Arnd Bergmann, Athira Rajeev, Balamuruhan S, Bharata B Rao, Bill Wendling, Bin Meng, Cédric Le Goater, Chris Packham, Christophe Leroy, Christoph Hellwig, Daniel Axtens, Dan Williams, David Lamparter, Desnes A. Nunes do Rosario, Erhard F., Finn Thain, Frederic Barrat, Ganesh Goudar, Gautham R. Shenoy, Geoff Levand, Greg Kurz, Gustavo A. R. Silva, Hari Bathini, Harish, Imre Kaloz, Joel Stanley, Joe Perches, John Crispin, Jordan Niethe, Kajol Jain, Kamalesh Babulal, Kees Cook, Laurent Dufour, Leonardo Bras, Li RongQing, Madhavan Srinivasan, Mahesh Salgaonkar, Mark Cave-Ayland, Michal Suchanek, Milton Miller, Mimi Zohar, Murilo Opsfelder Araujo, Nathan Chancellor, Nathan Lynch, Naveen N. Rao, Nayna Jain, Nicholas Piggin, Oliver O'Halloran, Palmer Dabbelt, Pedro Miraglia Franco de Carvalho, Philippe Bergheaud, Pingfan Liu, Pratik Rajesh Sampat, Qian Cai, Qinglang Miao, Randy Dunlap, Ravi Bangoria, Sachin Sant, Sam Bobroff, Sandipan Das, Santosh Sivaraj, Satheesh Rajendran, Shirisha Ganta, Sourabh Jain, Srikar Dronamraju, Stan Johnson, Stephen Rothwell, Thadeu Lima de Souza Cascardo, Thiago Jung Bauermann, Tom Lane, Vaibhav Jain, Vladis Dronov, Wei Yongjun, Wen Xiong, YueHaibing. -----BEGIN PGP SIGNATURE----- iQJHBAABCAAxFiEEJFGtCPCthwEv2Y/bUevqPMjhpYAFAl8tOxATHG1wZUBlbGxl cm1hbi5pZC5hdQAKCRBR6+o8yOGlgDQfEAClXHWf6hnxB84bEu39D51NkVotL1IG BRWFvyix+xHuUkHIouBPAAMl6ngY5X6wkYd+Z+CY9zHNtdSDoVlJE30YXdMQA/dE L/rYxR1884yGR/uU/3wusboO68ReXwcKQPmKOymUfh0zH7ujyJsSWLpXFK1YDC5d 2TVVTi0Q+P5ucMHDh0L+AHirIxZvtZSp43+J7xLtywsj+XAxJWCTGo5WCJbdgbCA Qbv3aOkVyUa3EgsbdM/STPpv82ebqT+PHxeSIO4Jw6ZODtKRH0R5YsWCApuY9eZ+ ebY9RLmgv9ZAhJqB2fv9A5NDcMoGpZNmjM7HrWpXwULKQpkBGHCzJ9FcSdHVMOx8 nbVMFjt4uzLwV1w8lFYslQ2tNH/uH2o9BlryV1RLpiiKokDAJO/NOsWN9y0u/I4J EmAM5DSX2LgVvvas96IlGK8KX4xkOkf8FLX/H5UDvvAfloH8J4CZXk/CWCab/nqY KEHPnMmYvQZ1w9SzyZg9sO/1p6Bl1Gmm75Jv2F1lBiRW/42VcGBI/qLsJ4lC59Fc KbwufYNYYG38wbxDLW1HAPJhRonxIcaZj3EEqk7aTiLZ55nNbu8e2k32CpNXTGqt npOhzJHimcq7L6+878ZW+xpbZwogIEUdRSsmwb6aT8za3ShnYwSA2Q3LYxh9xyGH j3GifvPq6Efp3Q== =QMY1 -----END PGP SIGNATURE----- Merge tag 'powerpc-5.9-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux Pull powerpc updates from Michael Ellerman: - Add support for (optionally) using queued spinlocks & rwlocks. - Support for a new faster system call ABI using the scv instruction on Power9 or later. - Drop support for the PROT_SAO mmap/mprotect flag as it will be unsupported on Power10 and future processors, leaving us with no way to implement the functionality it requests. This risks breaking userspace, though we believe it is unused in practice. - A bug fix for, and then the removal of, our custom stack expansion checking. We now allow stack expansion up to the rlimit, like other architectures. - Remove the remnants of our (previously disabled) topology update code, which tried to react to NUMA layout changes on virtualised systems, but was prone to crashes and other problems. - Add PMU support for Power10 CPUs. - A change to our signal trampoline so that we don't unbalance the link stack (branch return predictor) in the signal delivery path. - Lots of other cleanups, refactorings, smaller features and so on as usual. Thanks to: Abhishek Goel, Alastair D'Silva, Alexander A. Klimov, Alexey Kardashevskiy, Alistair Popple, Andrew Donnellan, Aneesh Kumar K.V, Anju T Sudhakar, Anton Blanchard, Arnd Bergmann, Athira Rajeev, Balamuruhan S, Bharata B Rao, Bill Wendling, Bin Meng, Cédric Le Goater, Chris Packham, Christophe Leroy, Christoph Hellwig, Daniel Axtens, Dan Williams, David Lamparter, Desnes A. Nunes do Rosario, Erhard F., Finn Thain, Frederic Barrat, Ganesh Goudar, Gautham R. Shenoy, Geoff Levand, Greg Kurz, Gustavo A. R. Silva, Hari Bathini, Harish, Imre Kaloz, Joel Stanley, Joe Perches, John Crispin, Jordan Niethe, Kajol Jain, Kamalesh Babulal, Kees Cook, Laurent Dufour, Leonardo Bras, Li RongQing, Madhavan Srinivasan, Mahesh Salgaonkar, Mark Cave-Ayland, Michal Suchanek, Milton Miller, Mimi Zohar, Murilo Opsfelder Araujo, Nathan Chancellor, Nathan Lynch, Naveen N. Rao, Nayna Jain, Nicholas Piggin, Oliver O'Halloran, Palmer Dabbelt, Pedro Miraglia Franco de Carvalho, Philippe Bergheaud, Pingfan Liu, Pratik Rajesh Sampat, Qian Cai, Qinglang Miao, Randy Dunlap, Ravi Bangoria, Sachin Sant, Sam Bobroff, Sandipan Das, Santosh Sivaraj, Satheesh Rajendran, Shirisha Ganta, Sourabh Jain, Srikar Dronamraju, Stan Johnson, Stephen Rothwell, Thadeu Lima de Souza Cascardo, Thiago Jung Bauermann, Tom Lane, Vaibhav Jain, Vladis Dronov, Wei Yongjun, Wen Xiong, YueHaibing. * tag 'powerpc-5.9-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (337 commits) selftests/powerpc: Fix pkey syscall redefinitions powerpc: Fix circular dependency between percpu.h and mmu.h powerpc/powernv/sriov: Fix use of uninitialised variable selftests/powerpc: Skip vmx/vsx/tar/etc tests on older CPUs powerpc/40x: Fix assembler warning about r0 powerpc/papr_scm: Add support for fetching nvdimm 'fuel-gauge' metric powerpc/papr_scm: Fetch nvdimm performance stats from PHYP cpuidle: pseries: Fixup exit latency for CEDE(0) cpuidle: pseries: Add function to parse extended CEDE records cpuidle: pseries: Set the latency-hint before entering CEDE selftests/powerpc: Fix online CPU selection powerpc/perf: Consolidate perf_callchain_user_[64|32]() powerpc/pseries/hotplug-cpu: Remove double free in error path powerpc/pseries/mobility: Add pr_debug() for device tree changes powerpc/pseries/mobility: Set pr_fmt() powerpc/cacheinfo: Warn if cache object chain becomes unordered powerpc/cacheinfo: Improve diagnostics about malformed cache lists powerpc/cacheinfo: Use name@unit instead of full DT path in debug messages powerpc/cacheinfo: Set pr_fmt() powerpc: fix function annotations to avoid section mismatch warnings with gcc-10 ...
577 lines
13 KiB
C
577 lines
13 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _ASM_POWERPC_ATOMIC_H_
|
|
#define _ASM_POWERPC_ATOMIC_H_
|
|
|
|
/*
|
|
* PowerPC atomic operations
|
|
*/
|
|
|
|
#ifdef __KERNEL__
|
|
#include <linux/types.h>
|
|
#include <asm/cmpxchg.h>
|
|
#include <asm/barrier.h>
|
|
|
|
/*
|
|
* Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
|
|
* a "bne-" instruction at the end, so an isync is enough as a acquire barrier
|
|
* on the platform without lwsync.
|
|
*/
|
|
#define __atomic_acquire_fence() \
|
|
__asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory")
|
|
|
|
#define __atomic_release_fence() \
|
|
__asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory")
|
|
|
|
static __inline__ int atomic_read(const atomic_t *v)
|
|
{
|
|
int t;
|
|
|
|
__asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
|
|
|
|
return t;
|
|
}
|
|
|
|
static __inline__ void atomic_set(atomic_t *v, int i)
|
|
{
|
|
__asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
|
|
}
|
|
|
|
#define ATOMIC_OP(op, asm_op) \
|
|
static __inline__ void atomic_##op(int a, atomic_t *v) \
|
|
{ \
|
|
int t; \
|
|
\
|
|
__asm__ __volatile__( \
|
|
"1: lwarx %0,0,%3 # atomic_" #op "\n" \
|
|
#asm_op " %0,%2,%0\n" \
|
|
" stwcx. %0,0,%3 \n" \
|
|
" bne- 1b\n" \
|
|
: "=&r" (t), "+m" (v->counter) \
|
|
: "r" (a), "r" (&v->counter) \
|
|
: "cc"); \
|
|
} \
|
|
|
|
#define ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
|
|
static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \
|
|
{ \
|
|
int t; \
|
|
\
|
|
__asm__ __volatile__( \
|
|
"1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \
|
|
#asm_op " %0,%2,%0\n" \
|
|
" stwcx. %0,0,%3\n" \
|
|
" bne- 1b\n" \
|
|
: "=&r" (t), "+m" (v->counter) \
|
|
: "r" (a), "r" (&v->counter) \
|
|
: "cc"); \
|
|
\
|
|
return t; \
|
|
}
|
|
|
|
#define ATOMIC_FETCH_OP_RELAXED(op, asm_op) \
|
|
static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
|
|
{ \
|
|
int res, t; \
|
|
\
|
|
__asm__ __volatile__( \
|
|
"1: lwarx %0,0,%4 # atomic_fetch_" #op "_relaxed\n" \
|
|
#asm_op " %1,%3,%0\n" \
|
|
" stwcx. %1,0,%4\n" \
|
|
" bne- 1b\n" \
|
|
: "=&r" (res), "=&r" (t), "+m" (v->counter) \
|
|
: "r" (a), "r" (&v->counter) \
|
|
: "cc"); \
|
|
\
|
|
return res; \
|
|
}
|
|
|
|
#define ATOMIC_OPS(op, asm_op) \
|
|
ATOMIC_OP(op, asm_op) \
|
|
ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
|
|
ATOMIC_FETCH_OP_RELAXED(op, asm_op)
|
|
|
|
ATOMIC_OPS(add, add)
|
|
ATOMIC_OPS(sub, subf)
|
|
|
|
#define atomic_add_return_relaxed atomic_add_return_relaxed
|
|
#define atomic_sub_return_relaxed atomic_sub_return_relaxed
|
|
|
|
#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
|
|
#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
|
|
|
|
#undef ATOMIC_OPS
|
|
#define ATOMIC_OPS(op, asm_op) \
|
|
ATOMIC_OP(op, asm_op) \
|
|
ATOMIC_FETCH_OP_RELAXED(op, asm_op)
|
|
|
|
ATOMIC_OPS(and, and)
|
|
ATOMIC_OPS(or, or)
|
|
ATOMIC_OPS(xor, xor)
|
|
|
|
#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
|
|
#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
|
|
#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
|
|
|
|
#undef ATOMIC_OPS
|
|
#undef ATOMIC_FETCH_OP_RELAXED
|
|
#undef ATOMIC_OP_RETURN_RELAXED
|
|
#undef ATOMIC_OP
|
|
|
|
static __inline__ void atomic_inc(atomic_t *v)
|
|
{
|
|
int t;
|
|
|
|
__asm__ __volatile__(
|
|
"1: lwarx %0,0,%2 # atomic_inc\n\
|
|
addic %0,%0,1\n"
|
|
" stwcx. %0,0,%2 \n\
|
|
bne- 1b"
|
|
: "=&r" (t), "+m" (v->counter)
|
|
: "r" (&v->counter)
|
|
: "cc", "xer");
|
|
}
|
|
#define atomic_inc atomic_inc
|
|
|
|
static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
|
|
{
|
|
int t;
|
|
|
|
__asm__ __volatile__(
|
|
"1: lwarx %0,0,%2 # atomic_inc_return_relaxed\n"
|
|
" addic %0,%0,1\n"
|
|
" stwcx. %0,0,%2\n"
|
|
" bne- 1b"
|
|
: "=&r" (t), "+m" (v->counter)
|
|
: "r" (&v->counter)
|
|
: "cc", "xer");
|
|
|
|
return t;
|
|
}
|
|
|
|
static __inline__ void atomic_dec(atomic_t *v)
|
|
{
|
|
int t;
|
|
|
|
__asm__ __volatile__(
|
|
"1: lwarx %0,0,%2 # atomic_dec\n\
|
|
addic %0,%0,-1\n"
|
|
" stwcx. %0,0,%2\n\
|
|
bne- 1b"
|
|
: "=&r" (t), "+m" (v->counter)
|
|
: "r" (&v->counter)
|
|
: "cc", "xer");
|
|
}
|
|
#define atomic_dec atomic_dec
|
|
|
|
static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
|
|
{
|
|
int t;
|
|
|
|
__asm__ __volatile__(
|
|
"1: lwarx %0,0,%2 # atomic_dec_return_relaxed\n"
|
|
" addic %0,%0,-1\n"
|
|
" stwcx. %0,0,%2\n"
|
|
" bne- 1b"
|
|
: "=&r" (t), "+m" (v->counter)
|
|
: "r" (&v->counter)
|
|
: "cc", "xer");
|
|
|
|
return t;
|
|
}
|
|
|
|
#define atomic_inc_return_relaxed atomic_inc_return_relaxed
|
|
#define atomic_dec_return_relaxed atomic_dec_return_relaxed
|
|
|
|
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
|
|
#define atomic_cmpxchg_relaxed(v, o, n) \
|
|
cmpxchg_relaxed(&((v)->counter), (o), (n))
|
|
#define atomic_cmpxchg_acquire(v, o, n) \
|
|
cmpxchg_acquire(&((v)->counter), (o), (n))
|
|
|
|
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
|
|
#define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
|
|
|
|
/*
|
|
* Don't want to override the generic atomic_try_cmpxchg_acquire, because
|
|
* we add a lock hint to the lwarx, which may not be wanted for the
|
|
* _acquire case (and is not used by the other _acquire variants so it
|
|
* would be a surprise).
|
|
*/
|
|
static __always_inline bool
|
|
atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new)
|
|
{
|
|
int r, o = *old;
|
|
|
|
__asm__ __volatile__ (
|
|
"1:\t" PPC_LWARX(%0,0,%2,1) " # atomic_try_cmpxchg_acquire \n"
|
|
" cmpw 0,%0,%3 \n"
|
|
" bne- 2f \n"
|
|
" stwcx. %4,0,%2 \n"
|
|
" bne- 1b \n"
|
|
"\t" PPC_ACQUIRE_BARRIER " \n"
|
|
"2: \n"
|
|
: "=&r" (r), "+m" (v->counter)
|
|
: "r" (&v->counter), "r" (o), "r" (new)
|
|
: "cr0", "memory");
|
|
|
|
if (unlikely(r != o))
|
|
*old = r;
|
|
return likely(r == o);
|
|
}
|
|
|
|
/**
|
|
* atomic_fetch_add_unless - add unless the number is a given value
|
|
* @v: pointer of type atomic_t
|
|
* @a: the amount to add to v...
|
|
* @u: ...unless v is equal to u.
|
|
*
|
|
* Atomically adds @a to @v, so long as it was not @u.
|
|
* Returns the old value of @v.
|
|
*/
|
|
static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
|
|
{
|
|
int t;
|
|
|
|
__asm__ __volatile__ (
|
|
PPC_ATOMIC_ENTRY_BARRIER
|
|
"1: lwarx %0,0,%1 # atomic_fetch_add_unless\n\
|
|
cmpw 0,%0,%3 \n\
|
|
beq 2f \n\
|
|
add %0,%2,%0 \n"
|
|
" stwcx. %0,0,%1 \n\
|
|
bne- 1b \n"
|
|
PPC_ATOMIC_EXIT_BARRIER
|
|
" subf %0,%2,%0 \n\
|
|
2:"
|
|
: "=&r" (t)
|
|
: "r" (&v->counter), "r" (a), "r" (u)
|
|
: "cc", "memory");
|
|
|
|
return t;
|
|
}
|
|
#define atomic_fetch_add_unless atomic_fetch_add_unless
|
|
|
|
/**
|
|
* atomic_inc_not_zero - increment unless the number is zero
|
|
* @v: pointer of type atomic_t
|
|
*
|
|
* Atomically increments @v by 1, so long as @v is non-zero.
|
|
* Returns non-zero if @v was non-zero, and zero otherwise.
|
|
*/
|
|
static __inline__ int atomic_inc_not_zero(atomic_t *v)
|
|
{
|
|
int t1, t2;
|
|
|
|
__asm__ __volatile__ (
|
|
PPC_ATOMIC_ENTRY_BARRIER
|
|
"1: lwarx %0,0,%2 # atomic_inc_not_zero\n\
|
|
cmpwi 0,%0,0\n\
|
|
beq- 2f\n\
|
|
addic %1,%0,1\n"
|
|
" stwcx. %1,0,%2\n\
|
|
bne- 1b\n"
|
|
PPC_ATOMIC_EXIT_BARRIER
|
|
"\n\
|
|
2:"
|
|
: "=&r" (t1), "=&r" (t2)
|
|
: "r" (&v->counter)
|
|
: "cc", "xer", "memory");
|
|
|
|
return t1;
|
|
}
|
|
#define atomic_inc_not_zero(v) atomic_inc_not_zero((v))
|
|
|
|
/*
|
|
* Atomically test *v and decrement if it is greater than 0.
|
|
* The function returns the old value of *v minus 1, even if
|
|
* the atomic variable, v, was not decremented.
|
|
*/
|
|
static __inline__ int atomic_dec_if_positive(atomic_t *v)
|
|
{
|
|
int t;
|
|
|
|
__asm__ __volatile__(
|
|
PPC_ATOMIC_ENTRY_BARRIER
|
|
"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
|
|
cmpwi %0,1\n\
|
|
addi %0,%0,-1\n\
|
|
blt- 2f\n"
|
|
" stwcx. %0,0,%1\n\
|
|
bne- 1b"
|
|
PPC_ATOMIC_EXIT_BARRIER
|
|
"\n\
|
|
2:" : "=&b" (t)
|
|
: "r" (&v->counter)
|
|
: "cc", "memory");
|
|
|
|
return t;
|
|
}
|
|
#define atomic_dec_if_positive atomic_dec_if_positive
|
|
|
|
#ifdef __powerpc64__
|
|
|
|
#define ATOMIC64_INIT(i) { (i) }
|
|
|
|
static __inline__ s64 atomic64_read(const atomic64_t *v)
|
|
{
|
|
s64 t;
|
|
|
|
__asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
|
|
|
|
return t;
|
|
}
|
|
|
|
static __inline__ void atomic64_set(atomic64_t *v, s64 i)
|
|
{
|
|
__asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
|
|
}
|
|
|
|
#define ATOMIC64_OP(op, asm_op) \
|
|
static __inline__ void atomic64_##op(s64 a, atomic64_t *v) \
|
|
{ \
|
|
s64 t; \
|
|
\
|
|
__asm__ __volatile__( \
|
|
"1: ldarx %0,0,%3 # atomic64_" #op "\n" \
|
|
#asm_op " %0,%2,%0\n" \
|
|
" stdcx. %0,0,%3 \n" \
|
|
" bne- 1b\n" \
|
|
: "=&r" (t), "+m" (v->counter) \
|
|
: "r" (a), "r" (&v->counter) \
|
|
: "cc"); \
|
|
}
|
|
|
|
#define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
|
|
static inline s64 \
|
|
atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
|
|
{ \
|
|
s64 t; \
|
|
\
|
|
__asm__ __volatile__( \
|
|
"1: ldarx %0,0,%3 # atomic64_" #op "_return_relaxed\n" \
|
|
#asm_op " %0,%2,%0\n" \
|
|
" stdcx. %0,0,%3\n" \
|
|
" bne- 1b\n" \
|
|
: "=&r" (t), "+m" (v->counter) \
|
|
: "r" (a), "r" (&v->counter) \
|
|
: "cc"); \
|
|
\
|
|
return t; \
|
|
}
|
|
|
|
#define ATOMIC64_FETCH_OP_RELAXED(op, asm_op) \
|
|
static inline s64 \
|
|
atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
|
|
{ \
|
|
s64 res, t; \
|
|
\
|
|
__asm__ __volatile__( \
|
|
"1: ldarx %0,0,%4 # atomic64_fetch_" #op "_relaxed\n" \
|
|
#asm_op " %1,%3,%0\n" \
|
|
" stdcx. %1,0,%4\n" \
|
|
" bne- 1b\n" \
|
|
: "=&r" (res), "=&r" (t), "+m" (v->counter) \
|
|
: "r" (a), "r" (&v->counter) \
|
|
: "cc"); \
|
|
\
|
|
return res; \
|
|
}
|
|
|
|
#define ATOMIC64_OPS(op, asm_op) \
|
|
ATOMIC64_OP(op, asm_op) \
|
|
ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
|
|
ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
|
|
|
|
ATOMIC64_OPS(add, add)
|
|
ATOMIC64_OPS(sub, subf)
|
|
|
|
#define atomic64_add_return_relaxed atomic64_add_return_relaxed
|
|
#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
|
|
|
|
#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
|
|
#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
|
|
|
|
#undef ATOMIC64_OPS
|
|
#define ATOMIC64_OPS(op, asm_op) \
|
|
ATOMIC64_OP(op, asm_op) \
|
|
ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
|
|
|
|
ATOMIC64_OPS(and, and)
|
|
ATOMIC64_OPS(or, or)
|
|
ATOMIC64_OPS(xor, xor)
|
|
|
|
#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
|
|
#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
|
|
#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
|
|
|
|
#undef ATOPIC64_OPS
|
|
#undef ATOMIC64_FETCH_OP_RELAXED
|
|
#undef ATOMIC64_OP_RETURN_RELAXED
|
|
#undef ATOMIC64_OP
|
|
|
|
static __inline__ void atomic64_inc(atomic64_t *v)
|
|
{
|
|
s64 t;
|
|
|
|
__asm__ __volatile__(
|
|
"1: ldarx %0,0,%2 # atomic64_inc\n\
|
|
addic %0,%0,1\n\
|
|
stdcx. %0,0,%2 \n\
|
|
bne- 1b"
|
|
: "=&r" (t), "+m" (v->counter)
|
|
: "r" (&v->counter)
|
|
: "cc", "xer");
|
|
}
|
|
#define atomic64_inc atomic64_inc
|
|
|
|
static __inline__ s64 atomic64_inc_return_relaxed(atomic64_t *v)
|
|
{
|
|
s64 t;
|
|
|
|
__asm__ __volatile__(
|
|
"1: ldarx %0,0,%2 # atomic64_inc_return_relaxed\n"
|
|
" addic %0,%0,1\n"
|
|
" stdcx. %0,0,%2\n"
|
|
" bne- 1b"
|
|
: "=&r" (t), "+m" (v->counter)
|
|
: "r" (&v->counter)
|
|
: "cc", "xer");
|
|
|
|
return t;
|
|
}
|
|
|
|
static __inline__ void atomic64_dec(atomic64_t *v)
|
|
{
|
|
s64 t;
|
|
|
|
__asm__ __volatile__(
|
|
"1: ldarx %0,0,%2 # atomic64_dec\n\
|
|
addic %0,%0,-1\n\
|
|
stdcx. %0,0,%2\n\
|
|
bne- 1b"
|
|
: "=&r" (t), "+m" (v->counter)
|
|
: "r" (&v->counter)
|
|
: "cc", "xer");
|
|
}
|
|
#define atomic64_dec atomic64_dec
|
|
|
|
static __inline__ s64 atomic64_dec_return_relaxed(atomic64_t *v)
|
|
{
|
|
s64 t;
|
|
|
|
__asm__ __volatile__(
|
|
"1: ldarx %0,0,%2 # atomic64_dec_return_relaxed\n"
|
|
" addic %0,%0,-1\n"
|
|
" stdcx. %0,0,%2\n"
|
|
" bne- 1b"
|
|
: "=&r" (t), "+m" (v->counter)
|
|
: "r" (&v->counter)
|
|
: "cc", "xer");
|
|
|
|
return t;
|
|
}
|
|
|
|
#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
|
|
#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
|
|
|
|
/*
|
|
* Atomically test *v and decrement if it is greater than 0.
|
|
* The function returns the old value of *v minus 1.
|
|
*/
|
|
static __inline__ s64 atomic64_dec_if_positive(atomic64_t *v)
|
|
{
|
|
s64 t;
|
|
|
|
__asm__ __volatile__(
|
|
PPC_ATOMIC_ENTRY_BARRIER
|
|
"1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
|
|
addic. %0,%0,-1\n\
|
|
blt- 2f\n\
|
|
stdcx. %0,0,%1\n\
|
|
bne- 1b"
|
|
PPC_ATOMIC_EXIT_BARRIER
|
|
"\n\
|
|
2:" : "=&r" (t)
|
|
: "r" (&v->counter)
|
|
: "cc", "xer", "memory");
|
|
|
|
return t;
|
|
}
|
|
#define atomic64_dec_if_positive atomic64_dec_if_positive
|
|
|
|
#define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
|
|
#define atomic64_cmpxchg_relaxed(v, o, n) \
|
|
cmpxchg_relaxed(&((v)->counter), (o), (n))
|
|
#define atomic64_cmpxchg_acquire(v, o, n) \
|
|
cmpxchg_acquire(&((v)->counter), (o), (n))
|
|
|
|
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
|
|
#define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
|
|
|
|
/**
|
|
* atomic64_fetch_add_unless - add unless the number is a given value
|
|
* @v: pointer of type atomic64_t
|
|
* @a: the amount to add to v...
|
|
* @u: ...unless v is equal to u.
|
|
*
|
|
* Atomically adds @a to @v, so long as it was not @u.
|
|
* Returns the old value of @v.
|
|
*/
|
|
static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
|
|
{
|
|
s64 t;
|
|
|
|
__asm__ __volatile__ (
|
|
PPC_ATOMIC_ENTRY_BARRIER
|
|
"1: ldarx %0,0,%1 # atomic64_fetch_add_unless\n\
|
|
cmpd 0,%0,%3 \n\
|
|
beq 2f \n\
|
|
add %0,%2,%0 \n"
|
|
" stdcx. %0,0,%1 \n\
|
|
bne- 1b \n"
|
|
PPC_ATOMIC_EXIT_BARRIER
|
|
" subf %0,%2,%0 \n\
|
|
2:"
|
|
: "=&r" (t)
|
|
: "r" (&v->counter), "r" (a), "r" (u)
|
|
: "cc", "memory");
|
|
|
|
return t;
|
|
}
|
|
#define atomic64_fetch_add_unless atomic64_fetch_add_unless
|
|
|
|
/**
|
|
* atomic_inc64_not_zero - increment unless the number is zero
|
|
* @v: pointer of type atomic64_t
|
|
*
|
|
* Atomically increments @v by 1, so long as @v is non-zero.
|
|
* Returns non-zero if @v was non-zero, and zero otherwise.
|
|
*/
|
|
static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
|
|
{
|
|
s64 t1, t2;
|
|
|
|
__asm__ __volatile__ (
|
|
PPC_ATOMIC_ENTRY_BARRIER
|
|
"1: ldarx %0,0,%2 # atomic64_inc_not_zero\n\
|
|
cmpdi 0,%0,0\n\
|
|
beq- 2f\n\
|
|
addic %1,%0,1\n\
|
|
stdcx. %1,0,%2\n\
|
|
bne- 1b\n"
|
|
PPC_ATOMIC_EXIT_BARRIER
|
|
"\n\
|
|
2:"
|
|
: "=&r" (t1), "=&r" (t2)
|
|
: "r" (&v->counter)
|
|
: "cc", "xer", "memory");
|
|
|
|
return t1 != 0;
|
|
}
|
|
#define atomic64_inc_not_zero(v) atomic64_inc_not_zero((v))
|
|
|
|
#endif /* __powerpc64__ */
|
|
|
|
#endif /* __KERNEL__ */
|
|
#endif /* _ASM_POWERPC_ATOMIC_H_ */
|