2005-09-23 02:20:04 +07:00
|
|
|
#ifndef _ASM_POWERPC_ATOMIC_H_
|
|
|
|
#define _ASM_POWERPC_ATOMIC_H_
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* PowerPC atomic operations
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifdef __KERNEL__
|
2012-03-29 00:30:02 +07:00
|
|
|
#include <linux/types.h>
|
|
|
|
#include <asm/cmpxchg.h>
|
2014-03-14 01:00:35 +07:00
|
|
|
#include <asm/barrier.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2005-09-23 02:20:04 +07:00
|
|
|
#define ATOMIC_INIT(i) { (i) }
|
2005-04-17 05:20:36 +07:00
|
|
|
|
powerpc: atomic: Implement atomic{, 64}_*_return_* variants
On powerpc, acquire and release semantics can be achieved with
lightweight barriers("lwsync" and "ctrl+isync"), which can be used to
implement __atomic_op_{acquire,release}.
For release semantics, since we only need to ensure all memory accesses
that issue before must take effects before the -store- part of the
atomics, "lwsync" is what we only need. On the platform without
"lwsync", "sync" should be used. Therefore in __atomic_op_release() we
use PPC_RELEASE_BARRIER.
For acquire semantics, "lwsync" is what we only need for the similar
reason. However on the platform without "lwsync", we can use "isync"
rather than "sync" as an acquire barrier. Therefore in
__atomic_op_acquire() we use PPC_ACQUIRE_BARRIER, which is barrier() on
UP, "lwsync" if available and "isync" otherwise.
Implement atomic{,64}_{add,sub,inc,dec}_return_relaxed, and build other
variants with these helpers.
Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-01-06 09:08:25 +07:00
|
|
|
/*
|
|
|
|
* Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
|
|
|
|
* a "bne-" instruction at the end, so an isync is enough as a acquire barrier
|
|
|
|
* on the platform without lwsync.
|
|
|
|
*/
|
|
|
|
#define __atomic_op_acquire(op, args...) \
|
|
|
|
({ \
|
|
|
|
typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
|
|
|
|
__asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory"); \
|
|
|
|
__ret; \
|
|
|
|
})
|
|
|
|
|
|
|
|
#define __atomic_op_release(op, args...) \
|
|
|
|
({ \
|
|
|
|
__asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory"); \
|
|
|
|
op##_relaxed(args); \
|
|
|
|
})
|
|
|
|
|
2007-08-11 07:15:30 +07:00
|
|
|
static __inline__ int atomic_read(const atomic_t *v)
|
|
|
|
{
|
|
|
|
int t;
|
|
|
|
|
|
|
|
__asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
|
|
|
|
|
|
|
|
return t;
|
|
|
|
}
|
|
|
|
|
|
|
|
static __inline__ void atomic_set(atomic_t *v, int i)
|
|
|
|
{
|
|
|
|
__asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-03-27 00:11:31 +07:00
|
|
|
#define ATOMIC_OP(op, asm_op) \
|
|
|
|
static __inline__ void atomic_##op(int a, atomic_t *v) \
|
|
|
|
{ \
|
|
|
|
int t; \
|
|
|
|
\
|
|
|
|
__asm__ __volatile__( \
|
|
|
|
"1: lwarx %0,0,%3 # atomic_" #op "\n" \
|
|
|
|
#asm_op " %0,%2,%0\n" \
|
|
|
|
PPC405_ERR77(0,%3) \
|
|
|
|
" stwcx. %0,0,%3 \n" \
|
|
|
|
" bne- 1b\n" \
|
|
|
|
: "=&r" (t), "+m" (v->counter) \
|
|
|
|
: "r" (a), "r" (&v->counter) \
|
|
|
|
: "cc"); \
|
|
|
|
} \
|
|
|
|
|
powerpc: atomic: Implement atomic{, 64}_*_return_* variants
On powerpc, acquire and release semantics can be achieved with
lightweight barriers("lwsync" and "ctrl+isync"), which can be used to
implement __atomic_op_{acquire,release}.
For release semantics, since we only need to ensure all memory accesses
that issue before must take effects before the -store- part of the
atomics, "lwsync" is what we only need. On the platform without
"lwsync", "sync" should be used. Therefore in __atomic_op_release() we
use PPC_RELEASE_BARRIER.
For acquire semantics, "lwsync" is what we only need for the similar
reason. However on the platform without "lwsync", we can use "isync"
rather than "sync" as an acquire barrier. Therefore in
__atomic_op_acquire() we use PPC_ACQUIRE_BARRIER, which is barrier() on
UP, "lwsync" if available and "isync" otherwise.
Implement atomic{,64}_{add,sub,inc,dec}_return_relaxed, and build other
variants with these helpers.
Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-01-06 09:08:25 +07:00
|
|
|
#define ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
|
|
|
|
static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \
|
2014-03-27 00:11:31 +07:00
|
|
|
{ \
|
|
|
|
int t; \
|
|
|
|
\
|
|
|
|
__asm__ __volatile__( \
|
powerpc: atomic: Implement atomic{, 64}_*_return_* variants
On powerpc, acquire and release semantics can be achieved with
lightweight barriers("lwsync" and "ctrl+isync"), which can be used to
implement __atomic_op_{acquire,release}.
For release semantics, since we only need to ensure all memory accesses
that issue before must take effects before the -store- part of the
atomics, "lwsync" is what we only need. On the platform without
"lwsync", "sync" should be used. Therefore in __atomic_op_release() we
use PPC_RELEASE_BARRIER.
For acquire semantics, "lwsync" is what we only need for the similar
reason. However on the platform without "lwsync", we can use "isync"
rather than "sync" as an acquire barrier. Therefore in
__atomic_op_acquire() we use PPC_ACQUIRE_BARRIER, which is barrier() on
UP, "lwsync" if available and "isync" otherwise.
Implement atomic{,64}_{add,sub,inc,dec}_return_relaxed, and build other
variants with these helpers.
Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-01-06 09:08:25 +07:00
|
|
|
"1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \
|
|
|
|
#asm_op " %0,%2,%0\n" \
|
|
|
|
PPC405_ERR77(0, %3) \
|
|
|
|
" stwcx. %0,0,%3\n" \
|
2014-03-27 00:11:31 +07:00
|
|
|
" bne- 1b\n" \
|
powerpc: atomic: Implement atomic{, 64}_*_return_* variants
On powerpc, acquire and release semantics can be achieved with
lightweight barriers("lwsync" and "ctrl+isync"), which can be used to
implement __atomic_op_{acquire,release}.
For release semantics, since we only need to ensure all memory accesses
that issue before must take effects before the -store- part of the
atomics, "lwsync" is what we only need. On the platform without
"lwsync", "sync" should be used. Therefore in __atomic_op_release() we
use PPC_RELEASE_BARRIER.
For acquire semantics, "lwsync" is what we only need for the similar
reason. However on the platform without "lwsync", we can use "isync"
rather than "sync" as an acquire barrier. Therefore in
__atomic_op_acquire() we use PPC_ACQUIRE_BARRIER, which is barrier() on
UP, "lwsync" if available and "isync" otherwise.
Implement atomic{,64}_{add,sub,inc,dec}_return_relaxed, and build other
variants with these helpers.
Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-01-06 09:08:25 +07:00
|
|
|
: "=&r" (t), "+m" (v->counter) \
|
2014-03-27 00:11:31 +07:00
|
|
|
: "r" (a), "r" (&v->counter) \
|
powerpc: atomic: Implement atomic{, 64}_*_return_* variants
On powerpc, acquire and release semantics can be achieved with
lightweight barriers("lwsync" and "ctrl+isync"), which can be used to
implement __atomic_op_{acquire,release}.
For release semantics, since we only need to ensure all memory accesses
that issue before must take effects before the -store- part of the
atomics, "lwsync" is what we only need. On the platform without
"lwsync", "sync" should be used. Therefore in __atomic_op_release() we
use PPC_RELEASE_BARRIER.
For acquire semantics, "lwsync" is what we only need for the similar
reason. However on the platform without "lwsync", we can use "isync"
rather than "sync" as an acquire barrier. Therefore in
__atomic_op_acquire() we use PPC_ACQUIRE_BARRIER, which is barrier() on
UP, "lwsync" if available and "isync" otherwise.
Implement atomic{,64}_{add,sub,inc,dec}_return_relaxed, and build other
variants with these helpers.
Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-01-06 09:08:25 +07:00
|
|
|
: "cc"); \
|
2014-03-27 00:11:31 +07:00
|
|
|
\
|
|
|
|
return t; \
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
powerpc: atomic: Implement atomic{, 64}_*_return_* variants
On powerpc, acquire and release semantics can be achieved with
lightweight barriers("lwsync" and "ctrl+isync"), which can be used to
implement __atomic_op_{acquire,release}.
For release semantics, since we only need to ensure all memory accesses
that issue before must take effects before the -store- part of the
atomics, "lwsync" is what we only need. On the platform without
"lwsync", "sync" should be used. Therefore in __atomic_op_release() we
use PPC_RELEASE_BARRIER.
For acquire semantics, "lwsync" is what we only need for the similar
reason. However on the platform without "lwsync", we can use "isync"
rather than "sync" as an acquire barrier. Therefore in
__atomic_op_acquire() we use PPC_ACQUIRE_BARRIER, which is barrier() on
UP, "lwsync" if available and "isync" otherwise.
Implement atomic{,64}_{add,sub,inc,dec}_return_relaxed, and build other
variants with these helpers.
Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-01-06 09:08:25 +07:00
|
|
|
#define ATOMIC_OPS(op, asm_op) \
|
|
|
|
ATOMIC_OP(op, asm_op) \
|
|
|
|
ATOMIC_OP_RETURN_RELAXED(op, asm_op)
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-03-27 00:11:31 +07:00
|
|
|
ATOMIC_OPS(add, add)
|
|
|
|
ATOMIC_OPS(sub, subf)
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-04-24 00:46:23 +07:00
|
|
|
ATOMIC_OP(and, and)
|
|
|
|
ATOMIC_OP(or, or)
|
|
|
|
ATOMIC_OP(xor, xor)
|
|
|
|
|
powerpc: atomic: Implement atomic{, 64}_*_return_* variants
On powerpc, acquire and release semantics can be achieved with
lightweight barriers("lwsync" and "ctrl+isync"), which can be used to
implement __atomic_op_{acquire,release}.
For release semantics, since we only need to ensure all memory accesses
that issue before must take effects before the -store- part of the
atomics, "lwsync" is what we only need. On the platform without
"lwsync", "sync" should be used. Therefore in __atomic_op_release() we
use PPC_RELEASE_BARRIER.
For acquire semantics, "lwsync" is what we only need for the similar
reason. However on the platform without "lwsync", we can use "isync"
rather than "sync" as an acquire barrier. Therefore in
__atomic_op_acquire() we use PPC_ACQUIRE_BARRIER, which is barrier() on
UP, "lwsync" if available and "isync" otherwise.
Implement atomic{,64}_{add,sub,inc,dec}_return_relaxed, and build other
variants with these helpers.
Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-01-06 09:08:25 +07:00
|
|
|
#define atomic_add_return_relaxed atomic_add_return_relaxed
|
|
|
|
#define atomic_sub_return_relaxed atomic_sub_return_relaxed
|
|
|
|
|
2014-03-27 00:11:31 +07:00
|
|
|
#undef ATOMIC_OPS
|
powerpc: atomic: Implement atomic{, 64}_*_return_* variants
On powerpc, acquire and release semantics can be achieved with
lightweight barriers("lwsync" and "ctrl+isync"), which can be used to
implement __atomic_op_{acquire,release}.
For release semantics, since we only need to ensure all memory accesses
that issue before must take effects before the -store- part of the
atomics, "lwsync" is what we only need. On the platform without
"lwsync", "sync" should be used. Therefore in __atomic_op_release() we
use PPC_RELEASE_BARRIER.
For acquire semantics, "lwsync" is what we only need for the similar
reason. However on the platform without "lwsync", we can use "isync"
rather than "sync" as an acquire barrier. Therefore in
__atomic_op_acquire() we use PPC_ACQUIRE_BARRIER, which is barrier() on
UP, "lwsync" if available and "isync" otherwise.
Implement atomic{,64}_{add,sub,inc,dec}_return_relaxed, and build other
variants with these helpers.
Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-01-06 09:08:25 +07:00
|
|
|
#undef ATOMIC_OP_RETURN_RELAXED
|
2014-03-27 00:11:31 +07:00
|
|
|
#undef ATOMIC_OP
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
|
|
|
|
|
|
|
|
static __inline__ void atomic_inc(atomic_t *v)
|
|
|
|
{
|
|
|
|
int t;
|
|
|
|
|
|
|
|
__asm__ __volatile__(
|
|
|
|
"1: lwarx %0,0,%2 # atomic_inc\n\
|
|
|
|
addic %0,%0,1\n"
|
|
|
|
PPC405_ERR77(0,%2)
|
|
|
|
" stwcx. %0,0,%2 \n\
|
|
|
|
bne- 1b"
|
2006-07-09 05:00:28 +07:00
|
|
|
: "=&r" (t), "+m" (v->counter)
|
|
|
|
: "r" (&v->counter)
|
2008-11-06 01:39:27 +07:00
|
|
|
: "cc", "xer");
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
powerpc: atomic: Implement atomic{, 64}_*_return_* variants
On powerpc, acquire and release semantics can be achieved with
lightweight barriers("lwsync" and "ctrl+isync"), which can be used to
implement __atomic_op_{acquire,release}.
For release semantics, since we only need to ensure all memory accesses
that issue before must take effects before the -store- part of the
atomics, "lwsync" is what we only need. On the platform without
"lwsync", "sync" should be used. Therefore in __atomic_op_release() we
use PPC_RELEASE_BARRIER.
For acquire semantics, "lwsync" is what we only need for the similar
reason. However on the platform without "lwsync", we can use "isync"
rather than "sync" as an acquire barrier. Therefore in
__atomic_op_acquire() we use PPC_ACQUIRE_BARRIER, which is barrier() on
UP, "lwsync" if available and "isync" otherwise.
Implement atomic{,64}_{add,sub,inc,dec}_return_relaxed, and build other
variants with these helpers.
Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-01-06 09:08:25 +07:00
|
|
|
static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
int t;
|
|
|
|
|
|
|
|
__asm__ __volatile__(
|
powerpc: atomic: Implement atomic{, 64}_*_return_* variants
On powerpc, acquire and release semantics can be achieved with
lightweight barriers("lwsync" and "ctrl+isync"), which can be used to
implement __atomic_op_{acquire,release}.
For release semantics, since we only need to ensure all memory accesses
that issue before must take effects before the -store- part of the
atomics, "lwsync" is what we only need. On the platform without
"lwsync", "sync" should be used. Therefore in __atomic_op_release() we
use PPC_RELEASE_BARRIER.
For acquire semantics, "lwsync" is what we only need for the similar
reason. However on the platform without "lwsync", we can use "isync"
rather than "sync" as an acquire barrier. Therefore in
__atomic_op_acquire() we use PPC_ACQUIRE_BARRIER, which is barrier() on
UP, "lwsync" if available and "isync" otherwise.
Implement atomic{,64}_{add,sub,inc,dec}_return_relaxed, and build other
variants with these helpers.
Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-01-06 09:08:25 +07:00
|
|
|
"1: lwarx %0,0,%2 # atomic_inc_return_relaxed\n"
|
|
|
|
" addic %0,%0,1\n"
|
|
|
|
PPC405_ERR77(0, %2)
|
|
|
|
" stwcx. %0,0,%2\n"
|
|
|
|
" bne- 1b"
|
|
|
|
: "=&r" (t), "+m" (v->counter)
|
2005-04-17 05:20:36 +07:00
|
|
|
: "r" (&v->counter)
|
powerpc: atomic: Implement atomic{, 64}_*_return_* variants
On powerpc, acquire and release semantics can be achieved with
lightweight barriers("lwsync" and "ctrl+isync"), which can be used to
implement __atomic_op_{acquire,release}.
For release semantics, since we only need to ensure all memory accesses
that issue before must take effects before the -store- part of the
atomics, "lwsync" is what we only need. On the platform without
"lwsync", "sync" should be used. Therefore in __atomic_op_release() we
use PPC_RELEASE_BARRIER.
For acquire semantics, "lwsync" is what we only need for the similar
reason. However on the platform without "lwsync", we can use "isync"
rather than "sync" as an acquire barrier. Therefore in
__atomic_op_acquire() we use PPC_ACQUIRE_BARRIER, which is barrier() on
UP, "lwsync" if available and "isync" otherwise.
Implement atomic{,64}_{add,sub,inc,dec}_return_relaxed, and build other
variants with these helpers.
Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-01-06 09:08:25 +07:00
|
|
|
: "cc", "xer");
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
return t;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* atomic_inc_and_test - increment and test
|
|
|
|
* @v: pointer of type atomic_t
|
|
|
|
*
|
|
|
|
* Atomically increments @v by 1
|
|
|
|
* and returns true if the result is zero, or false for all
|
|
|
|
* other cases.
|
|
|
|
*/
|
|
|
|
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
|
|
|
|
|
|
|
|
static __inline__ void atomic_dec(atomic_t *v)
|
|
|
|
{
|
|
|
|
int t;
|
|
|
|
|
|
|
|
__asm__ __volatile__(
|
|
|
|
"1: lwarx %0,0,%2 # atomic_dec\n\
|
|
|
|
addic %0,%0,-1\n"
|
|
|
|
PPC405_ERR77(0,%2)\
|
|
|
|
" stwcx. %0,0,%2\n\
|
|
|
|
bne- 1b"
|
2006-07-09 05:00:28 +07:00
|
|
|
: "=&r" (t), "+m" (v->counter)
|
|
|
|
: "r" (&v->counter)
|
2008-11-06 01:39:27 +07:00
|
|
|
: "cc", "xer");
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
powerpc: atomic: Implement atomic{, 64}_*_return_* variants
On powerpc, acquire and release semantics can be achieved with
lightweight barriers("lwsync" and "ctrl+isync"), which can be used to
implement __atomic_op_{acquire,release}.
For release semantics, since we only need to ensure all memory accesses
that issue before must take effects before the -store- part of the
atomics, "lwsync" is what we only need. On the platform without
"lwsync", "sync" should be used. Therefore in __atomic_op_release() we
use PPC_RELEASE_BARRIER.
For acquire semantics, "lwsync" is what we only need for the similar
reason. However on the platform without "lwsync", we can use "isync"
rather than "sync" as an acquire barrier. Therefore in
__atomic_op_acquire() we use PPC_ACQUIRE_BARRIER, which is barrier() on
UP, "lwsync" if available and "isync" otherwise.
Implement atomic{,64}_{add,sub,inc,dec}_return_relaxed, and build other
variants with these helpers.
Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-01-06 09:08:25 +07:00
|
|
|
static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
int t;
|
|
|
|
|
|
|
|
__asm__ __volatile__(
|
powerpc: atomic: Implement atomic{, 64}_*_return_* variants
On powerpc, acquire and release semantics can be achieved with
lightweight barriers("lwsync" and "ctrl+isync"), which can be used to
implement __atomic_op_{acquire,release}.
For release semantics, since we only need to ensure all memory accesses
that issue before must take effects before the -store- part of the
atomics, "lwsync" is what we only need. On the platform without
"lwsync", "sync" should be used. Therefore in __atomic_op_release() we
use PPC_RELEASE_BARRIER.
For acquire semantics, "lwsync" is what we only need for the similar
reason. However on the platform without "lwsync", we can use "isync"
rather than "sync" as an acquire barrier. Therefore in
__atomic_op_acquire() we use PPC_ACQUIRE_BARRIER, which is barrier() on
UP, "lwsync" if available and "isync" otherwise.
Implement atomic{,64}_{add,sub,inc,dec}_return_relaxed, and build other
variants with these helpers.
Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-01-06 09:08:25 +07:00
|
|
|
"1: lwarx %0,0,%2 # atomic_dec_return_relaxed\n"
|
|
|
|
" addic %0,%0,-1\n"
|
|
|
|
PPC405_ERR77(0, %2)
|
|
|
|
" stwcx. %0,0,%2\n"
|
|
|
|
" bne- 1b"
|
|
|
|
: "=&r" (t), "+m" (v->counter)
|
2005-04-17 05:20:36 +07:00
|
|
|
: "r" (&v->counter)
|
powerpc: atomic: Implement atomic{, 64}_*_return_* variants
On powerpc, acquire and release semantics can be achieved with
lightweight barriers("lwsync" and "ctrl+isync"), which can be used to
implement __atomic_op_{acquire,release}.
For release semantics, since we only need to ensure all memory accesses
that issue before must take effects before the -store- part of the
atomics, "lwsync" is what we only need. On the platform without
"lwsync", "sync" should be used. Therefore in __atomic_op_release() we
use PPC_RELEASE_BARRIER.
For acquire semantics, "lwsync" is what we only need for the similar
reason. However on the platform without "lwsync", we can use "isync"
rather than "sync" as an acquire barrier. Therefore in
__atomic_op_acquire() we use PPC_ACQUIRE_BARRIER, which is barrier() on
UP, "lwsync" if available and "isync" otherwise.
Implement atomic{,64}_{add,sub,inc,dec}_return_relaxed, and build other
variants with these helpers.
Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-01-06 09:08:25 +07:00
|
|
|
: "cc", "xer");
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
return t;
|
|
|
|
}
|
|
|
|
|
powerpc: atomic: Implement atomic{, 64}_*_return_* variants
On powerpc, acquire and release semantics can be achieved with
lightweight barriers("lwsync" and "ctrl+isync"), which can be used to
implement __atomic_op_{acquire,release}.
For release semantics, since we only need to ensure all memory accesses
that issue before must take effects before the -store- part of the
atomics, "lwsync" is what we only need. On the platform without
"lwsync", "sync" should be used. Therefore in __atomic_op_release() we
use PPC_RELEASE_BARRIER.
For acquire semantics, "lwsync" is what we only need for the similar
reason. However on the platform without "lwsync", we can use "isync"
rather than "sync" as an acquire barrier. Therefore in
__atomic_op_acquire() we use PPC_ACQUIRE_BARRIER, which is barrier() on
UP, "lwsync" if available and "isync" otherwise.
Implement atomic{,64}_{add,sub,inc,dec}_return_relaxed, and build other
variants with these helpers.
Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-01-06 09:08:25 +07:00
|
|
|
#define atomic_inc_return_relaxed atomic_inc_return_relaxed
|
|
|
|
#define atomic_dec_return_relaxed atomic_dec_return_relaxed
|
|
|
|
|
2007-05-08 14:34:27 +07:00
|
|
|
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
|
powerpc: atomic: Implement acquire/release/relaxed variants for cmpxchg
Implement cmpxchg{,64}_relaxed and atomic{,64}_cmpxchg_relaxed, based on
which _release variants can be built.
To avoid superfluous barriers in _acquire variants, we implement these
operations with assembly code rather use __atomic_op_acquire() to build
them automatically.
For the same reason, we keep the assembly implementation of fully
ordered cmpxchg operations.
However, we don't do the similar for _release, because that will require
putting barriers in the middle of ll/sc loops, which is probably a bad
idea.
Note cmpxchg{,64}_relaxed and atomic{,64}_cmpxchg_relaxed are not
compiler barriers.
Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2015-12-15 21:24:17 +07:00
|
|
|
#define atomic_cmpxchg_relaxed(v, o, n) \
|
|
|
|
cmpxchg_relaxed(&((v)->counter), (o), (n))
|
|
|
|
#define atomic_cmpxchg_acquire(v, o, n) \
|
|
|
|
cmpxchg_acquire(&((v)->counter), (o), (n))
|
|
|
|
|
2006-01-10 06:59:17 +07:00
|
|
|
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
|
2015-12-15 21:24:16 +07:00
|
|
|
#define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
|
2005-11-14 07:07:24 +07:00
|
|
|
|
2005-11-14 07:07:25 +07:00
|
|
|
/**
|
2011-07-27 06:09:07 +07:00
|
|
|
* __atomic_add_unless - add unless the number is a given value
|
2005-11-14 07:07:25 +07:00
|
|
|
* @v: pointer of type atomic_t
|
|
|
|
* @a: the amount to add to v...
|
|
|
|
* @u: ...unless v is equal to u.
|
|
|
|
*
|
|
|
|
* Atomically adds @a to @v, so long as it was not @u.
|
2011-07-27 06:09:07 +07:00
|
|
|
* Returns the old value of @v.
|
2005-11-14 07:07:25 +07:00
|
|
|
*/
|
2011-07-27 06:09:07 +07:00
|
|
|
static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
|
2006-02-20 16:41:40 +07:00
|
|
|
{
|
|
|
|
int t;
|
|
|
|
|
|
|
|
__asm__ __volatile__ (
|
2011-11-16 00:11:27 +07:00
|
|
|
PPC_ATOMIC_ENTRY_BARRIER
|
2011-07-27 06:09:07 +07:00
|
|
|
"1: lwarx %0,0,%1 # __atomic_add_unless\n\
|
2006-02-20 16:41:40 +07:00
|
|
|
cmpw 0,%0,%3 \n\
|
|
|
|
beq- 2f \n\
|
|
|
|
add %0,%2,%0 \n"
|
|
|
|
PPC405_ERR77(0,%2)
|
|
|
|
" stwcx. %0,0,%1 \n\
|
|
|
|
bne- 1b \n"
|
2011-11-16 00:11:27 +07:00
|
|
|
PPC_ATOMIC_EXIT_BARRIER
|
2006-02-20 16:41:40 +07:00
|
|
|
" subf %0,%2,%0 \n\
|
|
|
|
2:"
|
|
|
|
: "=&r" (t)
|
|
|
|
: "r" (&v->counter), "r" (a), "r" (u)
|
|
|
|
: "cc", "memory");
|
|
|
|
|
2011-07-27 06:09:07 +07:00
|
|
|
return t;
|
2006-02-20 16:41:40 +07:00
|
|
|
}
|
|
|
|
|
2012-03-01 04:12:16 +07:00
|
|
|
/**
|
|
|
|
* atomic_inc_not_zero - increment unless the number is zero
|
|
|
|
* @v: pointer of type atomic_t
|
|
|
|
*
|
|
|
|
* Atomically increments @v by 1, so long as @v is non-zero.
|
|
|
|
* Returns non-zero if @v was non-zero, and zero otherwise.
|
|
|
|
*/
|
|
|
|
static __inline__ int atomic_inc_not_zero(atomic_t *v)
|
|
|
|
{
|
|
|
|
int t1, t2;
|
|
|
|
|
|
|
|
__asm__ __volatile__ (
|
|
|
|
PPC_ATOMIC_ENTRY_BARRIER
|
|
|
|
"1: lwarx %0,0,%2 # atomic_inc_not_zero\n\
|
|
|
|
cmpwi 0,%0,0\n\
|
|
|
|
beq- 2f\n\
|
|
|
|
addic %1,%0,1\n"
|
|
|
|
PPC405_ERR77(0,%2)
|
|
|
|
" stwcx. %1,0,%2\n\
|
|
|
|
bne- 1b\n"
|
|
|
|
PPC_ATOMIC_EXIT_BARRIER
|
|
|
|
"\n\
|
|
|
|
2:"
|
|
|
|
: "=&r" (t1), "=&r" (t2)
|
|
|
|
: "r" (&v->counter)
|
|
|
|
: "cc", "xer", "memory");
|
|
|
|
|
|
|
|
return t1;
|
|
|
|
}
|
|
|
|
#define atomic_inc_not_zero(v) atomic_inc_not_zero((v))
|
2005-11-14 07:07:25 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
|
|
|
|
#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Atomically test *v and decrement if it is greater than 0.
|
2007-01-17 23:50:20 +07:00
|
|
|
* The function returns the old value of *v minus 1, even if
|
|
|
|
* the atomic variable, v, was not decremented.
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
|
|
|
static __inline__ int atomic_dec_if_positive(atomic_t *v)
|
|
|
|
{
|
|
|
|
int t;
|
|
|
|
|
|
|
|
__asm__ __volatile__(
|
2011-11-16 00:11:27 +07:00
|
|
|
PPC_ATOMIC_ENTRY_BARRIER
|
2005-04-17 05:20:36 +07:00
|
|
|
"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
|
2007-01-17 23:50:20 +07:00
|
|
|
cmpwi %0,1\n\
|
|
|
|
addi %0,%0,-1\n\
|
2005-04-17 05:20:36 +07:00
|
|
|
blt- 2f\n"
|
|
|
|
PPC405_ERR77(0,%1)
|
|
|
|
" stwcx. %0,0,%1\n\
|
|
|
|
bne- 1b"
|
2011-11-16 00:11:27 +07:00
|
|
|
PPC_ATOMIC_EXIT_BARRIER
|
2005-04-17 05:20:36 +07:00
|
|
|
"\n\
|
2007-01-17 23:50:20 +07:00
|
|
|
2:" : "=&b" (t)
|
2005-04-17 05:20:36 +07:00
|
|
|
: "r" (&v->counter)
|
|
|
|
: "cc", "memory");
|
|
|
|
|
|
|
|
return t;
|
|
|
|
}
|
2012-10-09 06:32:18 +07:00
|
|
|
#define atomic_dec_if_positive atomic_dec_if_positive
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2005-11-10 11:51:14 +07:00
|
|
|
#ifdef __powerpc64__
|
|
|
|
|
|
|
|
#define ATOMIC64_INIT(i) { (i) }
|
|
|
|
|
2007-08-11 07:15:30 +07:00
|
|
|
static __inline__ long atomic64_read(const atomic64_t *v)
|
|
|
|
{
|
|
|
|
long t;
|
|
|
|
|
|
|
|
__asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
|
|
|
|
|
|
|
|
return t;
|
|
|
|
}
|
|
|
|
|
|
|
|
static __inline__ void atomic64_set(atomic64_t *v, long i)
|
|
|
|
{
|
|
|
|
__asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
|
|
|
|
}
|
2005-11-10 11:51:14 +07:00
|
|
|
|
2014-03-27 00:11:31 +07:00
|
|
|
#define ATOMIC64_OP(op, asm_op) \
|
|
|
|
static __inline__ void atomic64_##op(long a, atomic64_t *v) \
|
|
|
|
{ \
|
|
|
|
long t; \
|
|
|
|
\
|
|
|
|
__asm__ __volatile__( \
|
|
|
|
"1: ldarx %0,0,%3 # atomic64_" #op "\n" \
|
|
|
|
#asm_op " %0,%2,%0\n" \
|
|
|
|
" stdcx. %0,0,%3 \n" \
|
|
|
|
" bne- 1b\n" \
|
|
|
|
: "=&r" (t), "+m" (v->counter) \
|
|
|
|
: "r" (a), "r" (&v->counter) \
|
|
|
|
: "cc"); \
|
2005-11-10 11:51:14 +07:00
|
|
|
}
|
|
|
|
|
powerpc: atomic: Implement atomic{, 64}_*_return_* variants
On powerpc, acquire and release semantics can be achieved with
lightweight barriers("lwsync" and "ctrl+isync"), which can be used to
implement __atomic_op_{acquire,release}.
For release semantics, since we only need to ensure all memory accesses
that issue before must take effects before the -store- part of the
atomics, "lwsync" is what we only need. On the platform without
"lwsync", "sync" should be used. Therefore in __atomic_op_release() we
use PPC_RELEASE_BARRIER.
For acquire semantics, "lwsync" is what we only need for the similar
reason. However on the platform without "lwsync", we can use "isync"
rather than "sync" as an acquire barrier. Therefore in
__atomic_op_acquire() we use PPC_ACQUIRE_BARRIER, which is barrier() on
UP, "lwsync" if available and "isync" otherwise.
Implement atomic{,64}_{add,sub,inc,dec}_return_relaxed, and build other
variants with these helpers.
Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-01-06 09:08:25 +07:00
|
|
|
#define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
|
|
|
|
static inline long \
|
|
|
|
atomic64_##op##_return_relaxed(long a, atomic64_t *v) \
|
2014-03-27 00:11:31 +07:00
|
|
|
{ \
|
|
|
|
long t; \
|
|
|
|
\
|
|
|
|
__asm__ __volatile__( \
|
powerpc: atomic: Implement atomic{, 64}_*_return_* variants
On powerpc, acquire and release semantics can be achieved with
lightweight barriers("lwsync" and "ctrl+isync"), which can be used to
implement __atomic_op_{acquire,release}.
For release semantics, since we only need to ensure all memory accesses
that issue before must take effects before the -store- part of the
atomics, "lwsync" is what we only need. On the platform without
"lwsync", "sync" should be used. Therefore in __atomic_op_release() we
use PPC_RELEASE_BARRIER.
For acquire semantics, "lwsync" is what we only need for the similar
reason. However on the platform without "lwsync", we can use "isync"
rather than "sync" as an acquire barrier. Therefore in
__atomic_op_acquire() we use PPC_ACQUIRE_BARRIER, which is barrier() on
UP, "lwsync" if available and "isync" otherwise.
Implement atomic{,64}_{add,sub,inc,dec}_return_relaxed, and build other
variants with these helpers.
Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-01-06 09:08:25 +07:00
|
|
|
"1: ldarx %0,0,%3 # atomic64_" #op "_return_relaxed\n" \
|
|
|
|
#asm_op " %0,%2,%0\n" \
|
|
|
|
" stdcx. %0,0,%3\n" \
|
2014-03-27 00:11:31 +07:00
|
|
|
" bne- 1b\n" \
|
powerpc: atomic: Implement atomic{, 64}_*_return_* variants
On powerpc, acquire and release semantics can be achieved with
lightweight barriers("lwsync" and "ctrl+isync"), which can be used to
implement __atomic_op_{acquire,release}.
For release semantics, since we only need to ensure all memory accesses
that issue before must take effects before the -store- part of the
atomics, "lwsync" is what we only need. On the platform without
"lwsync", "sync" should be used. Therefore in __atomic_op_release() we
use PPC_RELEASE_BARRIER.
For acquire semantics, "lwsync" is what we only need for the similar
reason. However on the platform without "lwsync", we can use "isync"
rather than "sync" as an acquire barrier. Therefore in
__atomic_op_acquire() we use PPC_ACQUIRE_BARRIER, which is barrier() on
UP, "lwsync" if available and "isync" otherwise.
Implement atomic{,64}_{add,sub,inc,dec}_return_relaxed, and build other
variants with these helpers.
Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-01-06 09:08:25 +07:00
|
|
|
: "=&r" (t), "+m" (v->counter) \
|
2014-03-27 00:11:31 +07:00
|
|
|
: "r" (a), "r" (&v->counter) \
|
powerpc: atomic: Implement atomic{, 64}_*_return_* variants
On powerpc, acquire and release semantics can be achieved with
lightweight barriers("lwsync" and "ctrl+isync"), which can be used to
implement __atomic_op_{acquire,release}.
For release semantics, since we only need to ensure all memory accesses
that issue before must take effects before the -store- part of the
atomics, "lwsync" is what we only need. On the platform without
"lwsync", "sync" should be used. Therefore in __atomic_op_release() we
use PPC_RELEASE_BARRIER.
For acquire semantics, "lwsync" is what we only need for the similar
reason. However on the platform without "lwsync", we can use "isync"
rather than "sync" as an acquire barrier. Therefore in
__atomic_op_acquire() we use PPC_ACQUIRE_BARRIER, which is barrier() on
UP, "lwsync" if available and "isync" otherwise.
Implement atomic{,64}_{add,sub,inc,dec}_return_relaxed, and build other
variants with these helpers.
Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-01-06 09:08:25 +07:00
|
|
|
: "cc"); \
|
2014-03-27 00:11:31 +07:00
|
|
|
\
|
|
|
|
return t; \
|
2005-11-10 11:51:14 +07:00
|
|
|
}
|
|
|
|
|
powerpc: atomic: Implement atomic{, 64}_*_return_* variants
On powerpc, acquire and release semantics can be achieved with
lightweight barriers("lwsync" and "ctrl+isync"), which can be used to
implement __atomic_op_{acquire,release}.
For release semantics, since we only need to ensure all memory accesses
that issue before must take effects before the -store- part of the
atomics, "lwsync" is what we only need. On the platform without
"lwsync", "sync" should be used. Therefore in __atomic_op_release() we
use PPC_RELEASE_BARRIER.
For acquire semantics, "lwsync" is what we only need for the similar
reason. However on the platform without "lwsync", we can use "isync"
rather than "sync" as an acquire barrier. Therefore in
__atomic_op_acquire() we use PPC_ACQUIRE_BARRIER, which is barrier() on
UP, "lwsync" if available and "isync" otherwise.
Implement atomic{,64}_{add,sub,inc,dec}_return_relaxed, and build other
variants with these helpers.
Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-01-06 09:08:25 +07:00
|
|
|
#define ATOMIC64_OPS(op, asm_op) \
|
|
|
|
ATOMIC64_OP(op, asm_op) \
|
|
|
|
ATOMIC64_OP_RETURN_RELAXED(op, asm_op)
|
2005-11-10 11:51:14 +07:00
|
|
|
|
2014-03-27 00:11:31 +07:00
|
|
|
ATOMIC64_OPS(add, add)
|
|
|
|
ATOMIC64_OPS(sub, subf)
|
2014-04-24 00:46:23 +07:00
|
|
|
ATOMIC64_OP(and, and)
|
|
|
|
ATOMIC64_OP(or, or)
|
|
|
|
ATOMIC64_OP(xor, xor)
|
2005-11-10 11:51:14 +07:00
|
|
|
|
powerpc: atomic: Implement atomic{, 64}_*_return_* variants
On powerpc, acquire and release semantics can be achieved with
lightweight barriers("lwsync" and "ctrl+isync"), which can be used to
implement __atomic_op_{acquire,release}.
For release semantics, since we only need to ensure all memory accesses
that issue before must take effects before the -store- part of the
atomics, "lwsync" is what we only need. On the platform without
"lwsync", "sync" should be used. Therefore in __atomic_op_release() we
use PPC_RELEASE_BARRIER.
For acquire semantics, "lwsync" is what we only need for the similar
reason. However on the platform without "lwsync", we can use "isync"
rather than "sync" as an acquire barrier. Therefore in
__atomic_op_acquire() we use PPC_ACQUIRE_BARRIER, which is barrier() on
UP, "lwsync" if available and "isync" otherwise.
Implement atomic{,64}_{add,sub,inc,dec}_return_relaxed, and build other
variants with these helpers.
Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-01-06 09:08:25 +07:00
|
|
|
#define atomic64_add_return_relaxed atomic64_add_return_relaxed
|
|
|
|
#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
|
|
|
|
|
|
|
|
#undef ATOPIC64_OPS
|
|
|
|
#undef ATOMIC64_OP_RETURN_RELAXED
|
2014-03-27 00:11:31 +07:00
|
|
|
#undef ATOMIC64_OP
|
2005-11-10 11:51:14 +07:00
|
|
|
|
2014-03-27 00:11:31 +07:00
|
|
|
#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
|
2005-11-10 11:51:14 +07:00
|
|
|
|
|
|
|
static __inline__ void atomic64_inc(atomic64_t *v)
|
|
|
|
{
|
|
|
|
long t;
|
|
|
|
|
|
|
|
__asm__ __volatile__(
|
|
|
|
"1: ldarx %0,0,%2 # atomic64_inc\n\
|
|
|
|
addic %0,%0,1\n\
|
|
|
|
stdcx. %0,0,%2 \n\
|
|
|
|
bne- 1b"
|
2006-07-09 05:00:28 +07:00
|
|
|
: "=&r" (t), "+m" (v->counter)
|
|
|
|
: "r" (&v->counter)
|
2008-11-06 01:39:27 +07:00
|
|
|
: "cc", "xer");
|
2005-11-10 11:51:14 +07:00
|
|
|
}
|
|
|
|
|
powerpc: atomic: Implement atomic{, 64}_*_return_* variants
On powerpc, acquire and release semantics can be achieved with
lightweight barriers("lwsync" and "ctrl+isync"), which can be used to
implement __atomic_op_{acquire,release}.
For release semantics, since we only need to ensure all memory accesses
that issue before must take effects before the -store- part of the
atomics, "lwsync" is what we only need. On the platform without
"lwsync", "sync" should be used. Therefore in __atomic_op_release() we
use PPC_RELEASE_BARRIER.
For acquire semantics, "lwsync" is what we only need for the similar
reason. However on the platform without "lwsync", we can use "isync"
rather than "sync" as an acquire barrier. Therefore in
__atomic_op_acquire() we use PPC_ACQUIRE_BARRIER, which is barrier() on
UP, "lwsync" if available and "isync" otherwise.
Implement atomic{,64}_{add,sub,inc,dec}_return_relaxed, and build other
variants with these helpers.
Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-01-06 09:08:25 +07:00
|
|
|
static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v)
|
2005-11-10 11:51:14 +07:00
|
|
|
{
|
|
|
|
long t;
|
|
|
|
|
|
|
|
__asm__ __volatile__(
|
powerpc: atomic: Implement atomic{, 64}_*_return_* variants
On powerpc, acquire and release semantics can be achieved with
lightweight barriers("lwsync" and "ctrl+isync"), which can be used to
implement __atomic_op_{acquire,release}.
For release semantics, since we only need to ensure all memory accesses
that issue before must take effects before the -store- part of the
atomics, "lwsync" is what we only need. On the platform without
"lwsync", "sync" should be used. Therefore in __atomic_op_release() we
use PPC_RELEASE_BARRIER.
For acquire semantics, "lwsync" is what we only need for the similar
reason. However on the platform without "lwsync", we can use "isync"
rather than "sync" as an acquire barrier. Therefore in
__atomic_op_acquire() we use PPC_ACQUIRE_BARRIER, which is barrier() on
UP, "lwsync" if available and "isync" otherwise.
Implement atomic{,64}_{add,sub,inc,dec}_return_relaxed, and build other
variants with these helpers.
Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-01-06 09:08:25 +07:00
|
|
|
"1: ldarx %0,0,%2 # atomic64_inc_return_relaxed\n"
|
|
|
|
" addic %0,%0,1\n"
|
|
|
|
" stdcx. %0,0,%2\n"
|
|
|
|
" bne- 1b"
|
|
|
|
: "=&r" (t), "+m" (v->counter)
|
2005-11-10 11:51:14 +07:00
|
|
|
: "r" (&v->counter)
|
powerpc: atomic: Implement atomic{, 64}_*_return_* variants
On powerpc, acquire and release semantics can be achieved with
lightweight barriers("lwsync" and "ctrl+isync"), which can be used to
implement __atomic_op_{acquire,release}.
For release semantics, since we only need to ensure all memory accesses
that issue before must take effects before the -store- part of the
atomics, "lwsync" is what we only need. On the platform without
"lwsync", "sync" should be used. Therefore in __atomic_op_release() we
use PPC_RELEASE_BARRIER.
For acquire semantics, "lwsync" is what we only need for the similar
reason. However on the platform without "lwsync", we can use "isync"
rather than "sync" as an acquire barrier. Therefore in
__atomic_op_acquire() we use PPC_ACQUIRE_BARRIER, which is barrier() on
UP, "lwsync" if available and "isync" otherwise.
Implement atomic{,64}_{add,sub,inc,dec}_return_relaxed, and build other
variants with these helpers.
Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-01-06 09:08:25 +07:00
|
|
|
: "cc", "xer");
|
2005-11-10 11:51:14 +07:00
|
|
|
|
|
|
|
return t;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* atomic64_inc_and_test - increment and test
|
|
|
|
* @v: pointer of type atomic64_t
|
|
|
|
*
|
|
|
|
* Atomically increments @v by 1
|
|
|
|
* and returns true if the result is zero, or false for all
|
|
|
|
* other cases.
|
|
|
|
*/
|
|
|
|
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
|
|
|
|
|
|
|
|
static __inline__ void atomic64_dec(atomic64_t *v)
|
|
|
|
{
|
|
|
|
long t;
|
|
|
|
|
|
|
|
__asm__ __volatile__(
|
|
|
|
"1: ldarx %0,0,%2 # atomic64_dec\n\
|
|
|
|
addic %0,%0,-1\n\
|
|
|
|
stdcx. %0,0,%2\n\
|
|
|
|
bne- 1b"
|
2006-07-09 05:00:28 +07:00
|
|
|
: "=&r" (t), "+m" (v->counter)
|
|
|
|
: "r" (&v->counter)
|
2008-11-06 01:39:27 +07:00
|
|
|
: "cc", "xer");
|
2005-11-10 11:51:14 +07:00
|
|
|
}
|
|
|
|
|
powerpc: atomic: Implement atomic{, 64}_*_return_* variants
On powerpc, acquire and release semantics can be achieved with
lightweight barriers("lwsync" and "ctrl+isync"), which can be used to
implement __atomic_op_{acquire,release}.
For release semantics, since we only need to ensure all memory accesses
that issue before must take effects before the -store- part of the
atomics, "lwsync" is what we only need. On the platform without
"lwsync", "sync" should be used. Therefore in __atomic_op_release() we
use PPC_RELEASE_BARRIER.
For acquire semantics, "lwsync" is what we only need for the similar
reason. However on the platform without "lwsync", we can use "isync"
rather than "sync" as an acquire barrier. Therefore in
__atomic_op_acquire() we use PPC_ACQUIRE_BARRIER, which is barrier() on
UP, "lwsync" if available and "isync" otherwise.
Implement atomic{,64}_{add,sub,inc,dec}_return_relaxed, and build other
variants with these helpers.
Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-01-06 09:08:25 +07:00
|
|
|
static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v)
|
2005-11-10 11:51:14 +07:00
|
|
|
{
|
|
|
|
long t;
|
|
|
|
|
|
|
|
__asm__ __volatile__(
|
powerpc: atomic: Implement atomic{, 64}_*_return_* variants
On powerpc, acquire and release semantics can be achieved with
lightweight barriers("lwsync" and "ctrl+isync"), which can be used to
implement __atomic_op_{acquire,release}.
For release semantics, since we only need to ensure all memory accesses
that issue before must take effects before the -store- part of the
atomics, "lwsync" is what we only need. On the platform without
"lwsync", "sync" should be used. Therefore in __atomic_op_release() we
use PPC_RELEASE_BARRIER.
For acquire semantics, "lwsync" is what we only need for the similar
reason. However on the platform without "lwsync", we can use "isync"
rather than "sync" as an acquire barrier. Therefore in
__atomic_op_acquire() we use PPC_ACQUIRE_BARRIER, which is barrier() on
UP, "lwsync" if available and "isync" otherwise.
Implement atomic{,64}_{add,sub,inc,dec}_return_relaxed, and build other
variants with these helpers.
Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-01-06 09:08:25 +07:00
|
|
|
"1: ldarx %0,0,%2 # atomic64_dec_return_relaxed\n"
|
|
|
|
" addic %0,%0,-1\n"
|
|
|
|
" stdcx. %0,0,%2\n"
|
|
|
|
" bne- 1b"
|
|
|
|
: "=&r" (t), "+m" (v->counter)
|
2005-11-10 11:51:14 +07:00
|
|
|
: "r" (&v->counter)
|
powerpc: atomic: Implement atomic{, 64}_*_return_* variants
On powerpc, acquire and release semantics can be achieved with
lightweight barriers("lwsync" and "ctrl+isync"), which can be used to
implement __atomic_op_{acquire,release}.
For release semantics, since we only need to ensure all memory accesses
that issue before must take effects before the -store- part of the
atomics, "lwsync" is what we only need. On the platform without
"lwsync", "sync" should be used. Therefore in __atomic_op_release() we
use PPC_RELEASE_BARRIER.
For acquire semantics, "lwsync" is what we only need for the similar
reason. However on the platform without "lwsync", we can use "isync"
rather than "sync" as an acquire barrier. Therefore in
__atomic_op_acquire() we use PPC_ACQUIRE_BARRIER, which is barrier() on
UP, "lwsync" if available and "isync" otherwise.
Implement atomic{,64}_{add,sub,inc,dec}_return_relaxed, and build other
variants with these helpers.
Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-01-06 09:08:25 +07:00
|
|
|
: "cc", "xer");
|
2005-11-10 11:51:14 +07:00
|
|
|
|
|
|
|
return t;
|
|
|
|
}
|
|
|
|
|
powerpc: atomic: Implement atomic{, 64}_*_return_* variants
On powerpc, acquire and release semantics can be achieved with
lightweight barriers("lwsync" and "ctrl+isync"), which can be used to
implement __atomic_op_{acquire,release}.
For release semantics, since we only need to ensure all memory accesses
that issue before must take effects before the -store- part of the
atomics, "lwsync" is what we only need. On the platform without
"lwsync", "sync" should be used. Therefore in __atomic_op_release() we
use PPC_RELEASE_BARRIER.
For acquire semantics, "lwsync" is what we only need for the similar
reason. However on the platform without "lwsync", we can use "isync"
rather than "sync" as an acquire barrier. Therefore in
__atomic_op_acquire() we use PPC_ACQUIRE_BARRIER, which is barrier() on
UP, "lwsync" if available and "isync" otherwise.
Implement atomic{,64}_{add,sub,inc,dec}_return_relaxed, and build other
variants with these helpers.
Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-01-06 09:08:25 +07:00
|
|
|
#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
|
|
|
|
#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
|
|
|
|
|
2005-11-10 11:51:14 +07:00
|
|
|
#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
|
|
|
|
#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Atomically test *v and decrement if it is greater than 0.
|
|
|
|
* The function returns the old value of *v minus 1.
|
|
|
|
*/
|
|
|
|
static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
|
|
|
|
{
|
|
|
|
long t;
|
|
|
|
|
|
|
|
__asm__ __volatile__(
|
2011-11-16 00:11:27 +07:00
|
|
|
PPC_ATOMIC_ENTRY_BARRIER
|
2005-11-10 11:51:14 +07:00
|
|
|
"1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
|
|
|
|
addic. %0,%0,-1\n\
|
|
|
|
blt- 2f\n\
|
|
|
|
stdcx. %0,0,%1\n\
|
|
|
|
bne- 1b"
|
2011-11-16 00:11:27 +07:00
|
|
|
PPC_ATOMIC_EXIT_BARRIER
|
2005-11-10 11:51:14 +07:00
|
|
|
"\n\
|
|
|
|
2:" : "=&r" (t)
|
|
|
|
: "r" (&v->counter)
|
2008-11-06 01:39:27 +07:00
|
|
|
: "cc", "xer", "memory");
|
2005-11-10 11:51:14 +07:00
|
|
|
|
|
|
|
return t;
|
|
|
|
}
|
|
|
|
|
2007-05-08 14:34:27 +07:00
|
|
|
#define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
|
powerpc: atomic: Implement acquire/release/relaxed variants for cmpxchg
Implement cmpxchg{,64}_relaxed and atomic{,64}_cmpxchg_relaxed, based on
which _release variants can be built.
To avoid superfluous barriers in _acquire variants, we implement these
operations with assembly code rather use __atomic_op_acquire() to build
them automatically.
For the same reason, we keep the assembly implementation of fully
ordered cmpxchg operations.
However, we don't do the similar for _release, because that will require
putting barriers in the middle of ll/sc loops, which is probably a bad
idea.
Note cmpxchg{,64}_relaxed and atomic{,64}_cmpxchg_relaxed are not
compiler barriers.
Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2015-12-15 21:24:17 +07:00
|
|
|
#define atomic64_cmpxchg_relaxed(v, o, n) \
|
|
|
|
cmpxchg_relaxed(&((v)->counter), (o), (n))
|
|
|
|
#define atomic64_cmpxchg_acquire(v, o, n) \
|
|
|
|
cmpxchg_acquire(&((v)->counter), (o), (n))
|
|
|
|
|
2007-01-25 23:15:52 +07:00
|
|
|
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
|
2015-12-15 21:24:16 +07:00
|
|
|
#define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
|
2007-01-25 23:15:52 +07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* atomic64_add_unless - add unless the number is a given value
|
|
|
|
* @v: pointer of type atomic64_t
|
|
|
|
* @a: the amount to add to v...
|
|
|
|
* @u: ...unless v is equal to u.
|
|
|
|
*
|
|
|
|
* Atomically adds @a to @v, so long as it was not @u.
|
2011-07-27 06:09:07 +07:00
|
|
|
* Returns the old value of @v.
|
2007-01-25 23:15:52 +07:00
|
|
|
*/
|
|
|
|
static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
|
|
|
|
{
|
|
|
|
long t;
|
|
|
|
|
|
|
|
__asm__ __volatile__ (
|
2011-11-16 00:11:27 +07:00
|
|
|
PPC_ATOMIC_ENTRY_BARRIER
|
2011-07-27 06:09:07 +07:00
|
|
|
"1: ldarx %0,0,%1 # __atomic_add_unless\n\
|
2007-01-25 23:15:52 +07:00
|
|
|
cmpd 0,%0,%3 \n\
|
|
|
|
beq- 2f \n\
|
|
|
|
add %0,%2,%0 \n"
|
|
|
|
" stdcx. %0,0,%1 \n\
|
|
|
|
bne- 1b \n"
|
2011-11-16 00:11:27 +07:00
|
|
|
PPC_ATOMIC_EXIT_BARRIER
|
2007-01-25 23:15:52 +07:00
|
|
|
" subf %0,%2,%0 \n\
|
|
|
|
2:"
|
|
|
|
: "=&r" (t)
|
|
|
|
: "r" (&v->counter), "r" (a), "r" (u)
|
|
|
|
: "cc", "memory");
|
|
|
|
|
|
|
|
return t != u;
|
|
|
|
}
|
|
|
|
|
2012-03-01 04:12:16 +07:00
|
|
|
/**
|
|
|
|
* atomic_inc64_not_zero - increment unless the number is zero
|
|
|
|
* @v: pointer of type atomic64_t
|
|
|
|
*
|
|
|
|
* Atomically increments @v by 1, so long as @v is non-zero.
|
|
|
|
* Returns non-zero if @v was non-zero, and zero otherwise.
|
|
|
|
*/
|
|
|
|
static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
|
|
|
|
{
|
|
|
|
long t1, t2;
|
|
|
|
|
|
|
|
__asm__ __volatile__ (
|
|
|
|
PPC_ATOMIC_ENTRY_BARRIER
|
|
|
|
"1: ldarx %0,0,%2 # atomic64_inc_not_zero\n\
|
|
|
|
cmpdi 0,%0,0\n\
|
|
|
|
beq- 2f\n\
|
|
|
|
addic %1,%0,1\n\
|
|
|
|
stdcx. %1,0,%2\n\
|
|
|
|
bne- 1b\n"
|
|
|
|
PPC_ATOMIC_EXIT_BARRIER
|
|
|
|
"\n\
|
|
|
|
2:"
|
|
|
|
: "=&r" (t1), "=&r" (t2)
|
|
|
|
: "r" (&v->counter)
|
|
|
|
: "cc", "xer", "memory");
|
|
|
|
|
|
|
|
return t1;
|
|
|
|
}
|
2007-01-25 23:15:52 +07:00
|
|
|
|
2005-11-10 11:51:14 +07:00
|
|
|
#endif /* __powerpc64__ */
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
#endif /* __KERNEL__ */
|
2005-09-23 02:20:04 +07:00
|
|
|
#endif /* _ASM_POWERPC_ATOMIC_H_ */
|