2005-04-17 05:20:36 +07:00
|
|
|
/*
|
2008-08-02 16:55:55 +07:00
|
|
|
* arch/arm/include/asm/atomic.h
|
2005-04-17 05:20:36 +07:00
|
|
|
*
|
|
|
|
* Copyright (C) 1996 Russell King.
|
|
|
|
* Copyright (C) 2002 Deep Blue Solutions Ltd.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*/
|
|
|
|
#ifndef __ASM_ARM_ATOMIC_H
|
|
|
|
#define __ASM_ARM_ATOMIC_H
|
|
|
|
|
2005-11-17 00:23:57 +07:00
|
|
|
#include <linux/compiler.h>
|
2013-07-04 17:43:18 +07:00
|
|
|
#include <linux/prefetch.h>
|
2009-01-07 05:40:39 +07:00
|
|
|
#include <linux/types.h>
|
2012-03-29 00:30:01 +07:00
|
|
|
#include <linux/irqflags.h>
|
|
|
|
#include <asm/barrier.h>
|
|
|
|
#include <asm/cmpxchg.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
#define ATOMIC_INIT(i) { (i) }
|
|
|
|
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
|
2009-09-19 05:27:05 +07:00
|
|
|
/*
|
|
|
|
* On ARM, ordinary assignment (str instruction) doesn't clear the local
|
|
|
|
* strex/ldrex monitor on some implementations. The reason we can use it for
|
|
|
|
* atomic_set() is the clrex or dummy strex done on every exception return.
|
|
|
|
*/
|
2015-09-18 16:13:10 +07:00
|
|
|
#define atomic_read(v) READ_ONCE((v)->counter)
|
|
|
|
#define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
#if __LINUX_ARM_ARCH__ >= 6
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ARMv6 UP and SMP safe atomic ops. We use load exclusive and
|
|
|
|
* store exclusive to ensure that these are atomic. We may loop
|
2009-09-19 05:27:05 +07:00
|
|
|
* to ensure that the update happens.
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
2009-05-26 02:58:00 +07:00
|
|
|
|
2014-03-23 22:38:18 +07:00
|
|
|
#define ATOMIC_OP(op, c_op, asm_op) \
|
|
|
|
static inline void atomic_##op(int i, atomic_t *v) \
|
|
|
|
{ \
|
|
|
|
unsigned long tmp; \
|
|
|
|
int result; \
|
|
|
|
\
|
|
|
|
prefetchw(&v->counter); \
|
|
|
|
__asm__ __volatile__("@ atomic_" #op "\n" \
|
|
|
|
"1: ldrex %0, [%3]\n" \
|
|
|
|
" " #asm_op " %0, %0, %4\n" \
|
|
|
|
" strex %1, %0, [%3]\n" \
|
|
|
|
" teq %1, #0\n" \
|
|
|
|
" bne 1b" \
|
|
|
|
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
|
|
|
|
: "r" (&v->counter), "Ir" (i) \
|
|
|
|
: "cc"); \
|
|
|
|
} \
|
|
|
|
|
|
|
|
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
|
2015-08-06 23:54:44 +07:00
|
|
|
static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
|
2014-03-23 22:38:18 +07:00
|
|
|
{ \
|
|
|
|
unsigned long tmp; \
|
|
|
|
int result; \
|
|
|
|
\
|
|
|
|
prefetchw(&v->counter); \
|
|
|
|
\
|
|
|
|
__asm__ __volatile__("@ atomic_" #op "_return\n" \
|
|
|
|
"1: ldrex %0, [%3]\n" \
|
|
|
|
" " #asm_op " %0, %0, %4\n" \
|
|
|
|
" strex %1, %0, [%3]\n" \
|
|
|
|
" teq %1, #0\n" \
|
|
|
|
" bne 1b" \
|
|
|
|
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
|
|
|
|
: "r" (&v->counter), "Ir" (i) \
|
|
|
|
: "cc"); \
|
|
|
|
\
|
|
|
|
return result; \
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
locking/atomic, arch/arm: Implement atomic{,64}_fetch_{add,sub,and,andnot,or,xor}{,_relaxed,_acquire,_release}()
Implement FETCH-OP atomic primitives, these are very similar to the
existing OP-RETURN primitives we already have, except they return the
value of the atomic variable _before_ modification.
This is especially useful for irreversible operations -- such as
bitops (because it becomes impossible to reconstruct the state prior
to modification).
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Will Deacon <will.deacon@arm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Davidlohr Bueso <dbueso@suse.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arch@vger.kernel.org
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-04-18 06:10:52 +07:00
|
|
|
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
|
|
|
|
static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
|
|
|
|
{ \
|
|
|
|
unsigned long tmp; \
|
|
|
|
int result, val; \
|
|
|
|
\
|
|
|
|
prefetchw(&v->counter); \
|
|
|
|
\
|
|
|
|
__asm__ __volatile__("@ atomic_fetch_" #op "\n" \
|
|
|
|
"1: ldrex %0, [%4]\n" \
|
|
|
|
" " #asm_op " %1, %0, %5\n" \
|
|
|
|
" strex %2, %1, [%4]\n" \
|
|
|
|
" teq %2, #0\n" \
|
|
|
|
" bne 1b" \
|
|
|
|
: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \
|
|
|
|
: "r" (&v->counter), "Ir" (i) \
|
|
|
|
: "cc"); \
|
|
|
|
\
|
|
|
|
return result; \
|
|
|
|
}
|
|
|
|
|
2015-08-06 23:54:44 +07:00
|
|
|
#define atomic_add_return_relaxed atomic_add_return_relaxed
|
|
|
|
#define atomic_sub_return_relaxed atomic_sub_return_relaxed
|
locking/atomic, arch/arm: Implement atomic{,64}_fetch_{add,sub,and,andnot,or,xor}{,_relaxed,_acquire,_release}()
Implement FETCH-OP atomic primitives, these are very similar to the
existing OP-RETURN primitives we already have, except they return the
value of the atomic variable _before_ modification.
This is especially useful for irreversible operations -- such as
bitops (because it becomes impossible to reconstruct the state prior
to modification).
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Will Deacon <will.deacon@arm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Davidlohr Bueso <dbueso@suse.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arch@vger.kernel.org
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-04-18 06:10:52 +07:00
|
|
|
#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
|
|
|
|
#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
|
|
|
|
|
|
|
|
#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
|
|
|
|
#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
|
|
|
|
#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
|
|
|
|
#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
|
2015-08-06 23:54:44 +07:00
|
|
|
|
|
|
|
static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
|
2005-11-14 07:07:24 +07:00
|
|
|
{
|
2013-10-26 21:07:25 +07:00
|
|
|
int oldval;
|
|
|
|
unsigned long res;
|
2005-11-14 07:07:24 +07:00
|
|
|
|
2014-02-21 23:01:48 +07:00
|
|
|
prefetchw(&ptr->counter);
|
2009-05-26 02:58:00 +07:00
|
|
|
|
2005-11-14 07:07:24 +07:00
|
|
|
do {
|
|
|
|
__asm__ __volatile__("@ atomic_cmpxchg\n"
|
ARM: 6212/1: atomic ops: add memory constraints to inline asm
Currently, the 32-bit and 64-bit atomic operations on ARM do not
include memory constraints in the inline assembly blocks. In the
case of barrier-less operations [for example, atomic_add], this
means that the compiler may constant fold values which have actually
been modified by a call to an atomic operation.
This issue can be observed in the atomic64_test routine in
<kernel root>/lib/atomic64_test.c:
00000000 <test_atomic64>:
0: e1a0c00d mov ip, sp
4: e92dd830 push {r4, r5, fp, ip, lr, pc}
8: e24cb004 sub fp, ip, #4
c: e24dd008 sub sp, sp, #8
10: e24b3014 sub r3, fp, #20
14: e30d000d movw r0, #53261 ; 0xd00d
18: e3011337 movw r1, #4919 ; 0x1337
1c: e34c0001 movt r0, #49153 ; 0xc001
20: e34a1aa3 movt r1, #43683 ; 0xaaa3
24: e16300f8 strd r0, [r3, #-8]!
28: e30c0afe movw r0, #51966 ; 0xcafe
2c: e30b1eef movw r1, #48879 ; 0xbeef
30: e34d0eaf movt r0, #57007 ; 0xdeaf
34: e34d1ead movt r1, #57005 ; 0xdead
38: e1b34f9f ldrexd r4, [r3]
3c: e1a34f90 strexd r4, r0, [r3]
40: e3340000 teq r4, #0
44: 1afffffb bne 38 <test_atomic64+0x38>
48: e59f0004 ldr r0, [pc, #4] ; 54 <test_atomic64+0x54>
4c: e3a0101e mov r1, #30
50: ebfffffe bl 0 <__bug>
54: 00000000 .word 0x00000000
The atomic64_set (0x38-0x44) writes to the atomic64_t, but the
compiler doesn't see this, assumes the test condition is always
false and generates an unconditional branch to __bug. The rest of the
test is optimised away.
This patch adds suitable memory constraints to the atomic operations on ARM
to ensure that the compiler is informed of the correct data hazards. We have
to use the "Qo" constraints to avoid hitting the GCC anomaly described at
http://gcc.gnu.org/bugzilla/show_bug.cgi?id=44492 , where the compiler
makes assumptions about the writeback in the addressing mode used by the
inline assembly. These constraints forbid the use of auto{inc,dec} addressing
modes, so it doesn't matter if we don't use the operand exactly once.
Cc: stable@kernel.org
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-07-08 16:59:16 +07:00
|
|
|
"ldrex %1, [%3]\n"
|
2005-11-16 22:05:11 +07:00
|
|
|
"mov %0, #0\n"
|
ARM: 6212/1: atomic ops: add memory constraints to inline asm
Currently, the 32-bit and 64-bit atomic operations on ARM do not
include memory constraints in the inline assembly blocks. In the
case of barrier-less operations [for example, atomic_add], this
means that the compiler may constant fold values which have actually
been modified by a call to an atomic operation.
This issue can be observed in the atomic64_test routine in
<kernel root>/lib/atomic64_test.c:
00000000 <test_atomic64>:
0: e1a0c00d mov ip, sp
4: e92dd830 push {r4, r5, fp, ip, lr, pc}
8: e24cb004 sub fp, ip, #4
c: e24dd008 sub sp, sp, #8
10: e24b3014 sub r3, fp, #20
14: e30d000d movw r0, #53261 ; 0xd00d
18: e3011337 movw r1, #4919 ; 0x1337
1c: e34c0001 movt r0, #49153 ; 0xc001
20: e34a1aa3 movt r1, #43683 ; 0xaaa3
24: e16300f8 strd r0, [r3, #-8]!
28: e30c0afe movw r0, #51966 ; 0xcafe
2c: e30b1eef movw r1, #48879 ; 0xbeef
30: e34d0eaf movt r0, #57007 ; 0xdeaf
34: e34d1ead movt r1, #57005 ; 0xdead
38: e1b34f9f ldrexd r4, [r3]
3c: e1a34f90 strexd r4, r0, [r3]
40: e3340000 teq r4, #0
44: 1afffffb bne 38 <test_atomic64+0x38>
48: e59f0004 ldr r0, [pc, #4] ; 54 <test_atomic64+0x54>
4c: e3a0101e mov r1, #30
50: ebfffffe bl 0 <__bug>
54: 00000000 .word 0x00000000
The atomic64_set (0x38-0x44) writes to the atomic64_t, but the
compiler doesn't see this, assumes the test condition is always
false and generates an unconditional branch to __bug. The rest of the
test is optimised away.
This patch adds suitable memory constraints to the atomic operations on ARM
to ensure that the compiler is informed of the correct data hazards. We have
to use the "Qo" constraints to avoid hitting the GCC anomaly described at
http://gcc.gnu.org/bugzilla/show_bug.cgi?id=44492 , where the compiler
makes assumptions about the writeback in the addressing mode used by the
inline assembly. These constraints forbid the use of auto{inc,dec} addressing
modes, so it doesn't matter if we don't use the operand exactly once.
Cc: stable@kernel.org
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-07-08 16:59:16 +07:00
|
|
|
"teq %1, %4\n"
|
|
|
|
"strexeq %0, %5, [%3]\n"
|
|
|
|
: "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
|
2005-11-14 07:07:24 +07:00
|
|
|
: "r" (&ptr->counter), "Ir" (old), "r" (new)
|
|
|
|
: "cc");
|
|
|
|
} while (res);
|
|
|
|
|
|
|
|
return oldval;
|
|
|
|
}
|
2015-08-06 23:54:44 +07:00
|
|
|
#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
|
2005-11-14 07:07:24 +07:00
|
|
|
|
atomics/treewide: Rename __atomic_add_unless() => atomic_fetch_add_unless()
While __atomic_add_unless() was originally intended as a building-block
for atomic_add_unless(), it's now used in a number of places around the
kernel. It's the only common atomic operation named __atomic*(), rather
than atomic_*(), and for consistency it would be better named
atomic_fetch_add_unless().
This lack of consistency is slightly confusing, and gets in the way of
scripting atomics. Given that, let's clean things up and promote it to
an official part of the atomics API, in the form of
atomic_fetch_add_unless().
This patch converts definitions and invocations over to the new name,
including the instrumented version, using the following script:
----
git grep -w __atomic_add_unless | while read line; do
sed -i '{s/\<__atomic_add_unless\>/atomic_fetch_add_unless/}' "${line%%:*}";
done
git grep -w __arch_atomic_add_unless | while read line; do
sed -i '{s/\<__arch_atomic_add_unless\>/arch_atomic_fetch_add_unless/}' "${line%%:*}";
done
----
Note that we do not have atomic{64,_long}_fetch_add_unless(), which will
be introduced by later patches.
There should be no functional change as a result of this patch.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Will Deacon <will.deacon@arm.com>
Acked-by: Geert Uytterhoeven <geert@linux-m68k.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Palmer Dabbelt <palmer@sifive.com>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/lkml/20180621121321.4761-2-mark.rutland@arm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2018-06-21 19:13:04 +07:00
|
|
|
static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
|
2014-02-21 23:01:48 +07:00
|
|
|
{
|
|
|
|
int oldval, newval;
|
|
|
|
unsigned long tmp;
|
|
|
|
|
|
|
|
smp_mb();
|
|
|
|
prefetchw(&v->counter);
|
|
|
|
|
|
|
|
__asm__ __volatile__ ("@ atomic_add_unless\n"
|
|
|
|
"1: ldrex %0, [%4]\n"
|
|
|
|
" teq %0, %5\n"
|
|
|
|
" beq 2f\n"
|
|
|
|
" add %1, %0, %6\n"
|
|
|
|
" strex %2, %1, [%4]\n"
|
|
|
|
" teq %2, #0\n"
|
|
|
|
" bne 1b\n"
|
|
|
|
"2:"
|
|
|
|
: "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
|
|
|
|
: "r" (&v->counter), "r" (u), "r" (a)
|
|
|
|
: "cc");
|
|
|
|
|
|
|
|
if (oldval != u)
|
|
|
|
smp_mb();
|
|
|
|
|
|
|
|
return oldval;
|
|
|
|
}
|
2018-06-21 19:13:09 +07:00
|
|
|
#define atomic_fetch_add_unless atomic_fetch_add_unless
|
2014-02-21 23:01:48 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
#else /* ARM_ARCH_6 */
|
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
#error SMP not supported on pre-ARMv6 CPUs
|
|
|
|
#endif
|
|
|
|
|
2014-03-23 22:38:18 +07:00
|
|
|
#define ATOMIC_OP(op, c_op, asm_op) \
|
|
|
|
static inline void atomic_##op(int i, atomic_t *v) \
|
|
|
|
{ \
|
|
|
|
unsigned long flags; \
|
|
|
|
\
|
|
|
|
raw_local_irq_save(flags); \
|
|
|
|
v->counter c_op i; \
|
|
|
|
raw_local_irq_restore(flags); \
|
|
|
|
} \
|
|
|
|
|
|
|
|
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
|
|
|
|
static inline int atomic_##op##_return(int i, atomic_t *v) \
|
|
|
|
{ \
|
|
|
|
unsigned long flags; \
|
|
|
|
int val; \
|
|
|
|
\
|
|
|
|
raw_local_irq_save(flags); \
|
|
|
|
v->counter c_op i; \
|
|
|
|
val = v->counter; \
|
|
|
|
raw_local_irq_restore(flags); \
|
|
|
|
\
|
|
|
|
return val; \
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
locking/atomic, arch/arm: Implement atomic{,64}_fetch_{add,sub,and,andnot,or,xor}{,_relaxed,_acquire,_release}()
Implement FETCH-OP atomic primitives, these are very similar to the
existing OP-RETURN primitives we already have, except they return the
value of the atomic variable _before_ modification.
This is especially useful for irreversible operations -- such as
bitops (because it becomes impossible to reconstruct the state prior
to modification).
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Will Deacon <will.deacon@arm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Davidlohr Bueso <dbueso@suse.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arch@vger.kernel.org
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-04-18 06:10:52 +07:00
|
|
|
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
|
|
|
|
static inline int atomic_fetch_##op(int i, atomic_t *v) \
|
|
|
|
{ \
|
|
|
|
unsigned long flags; \
|
|
|
|
int val; \
|
|
|
|
\
|
|
|
|
raw_local_irq_save(flags); \
|
|
|
|
val = v->counter; \
|
|
|
|
v->counter c_op i; \
|
|
|
|
raw_local_irq_restore(flags); \
|
|
|
|
\
|
|
|
|
return val; \
|
|
|
|
}
|
|
|
|
|
2005-11-14 07:07:24 +07:00
|
|
|
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
unsigned long flags;
|
|
|
|
|
2006-09-16 16:47:18 +07:00
|
|
|
raw_local_irq_save(flags);
|
2005-11-14 07:07:24 +07:00
|
|
|
ret = v->counter;
|
|
|
|
if (likely(ret == old))
|
|
|
|
v->counter = new;
|
2006-09-16 16:47:18 +07:00
|
|
|
raw_local_irq_restore(flags);
|
2005-11-14 07:07:24 +07:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-06-21 19:13:21 +07:00
|
|
|
#define atomic_fetch_andnot atomic_fetch_andnot
|
|
|
|
|
2014-02-21 23:01:48 +07:00
|
|
|
#endif /* __LINUX_ARM_ARCH__ */
|
|
|
|
|
2014-03-23 22:38:18 +07:00
|
|
|
#define ATOMIC_OPS(op, c_op, asm_op) \
|
|
|
|
ATOMIC_OP(op, c_op, asm_op) \
|
locking/atomic, arch/arm: Implement atomic{,64}_fetch_{add,sub,and,andnot,or,xor}{,_relaxed,_acquire,_release}()
Implement FETCH-OP atomic primitives, these are very similar to the
existing OP-RETURN primitives we already have, except they return the
value of the atomic variable _before_ modification.
This is especially useful for irreversible operations -- such as
bitops (because it becomes impossible to reconstruct the state prior
to modification).
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Will Deacon <will.deacon@arm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Davidlohr Bueso <dbueso@suse.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arch@vger.kernel.org
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-04-18 06:10:52 +07:00
|
|
|
ATOMIC_OP_RETURN(op, c_op, asm_op) \
|
|
|
|
ATOMIC_FETCH_OP(op, c_op, asm_op)
|
2014-03-23 22:38:18 +07:00
|
|
|
|
|
|
|
ATOMIC_OPS(add, +=, add)
|
|
|
|
ATOMIC_OPS(sub, -=, sub)
|
|
|
|
|
2014-04-24 01:04:39 +07:00
|
|
|
#define atomic_andnot atomic_andnot
|
|
|
|
|
locking/atomic, arch/arm: Implement atomic{,64}_fetch_{add,sub,and,andnot,or,xor}{,_relaxed,_acquire,_release}()
Implement FETCH-OP atomic primitives, these are very similar to the
existing OP-RETURN primitives we already have, except they return the
value of the atomic variable _before_ modification.
This is especially useful for irreversible operations -- such as
bitops (because it becomes impossible to reconstruct the state prior
to modification).
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Will Deacon <will.deacon@arm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Davidlohr Bueso <dbueso@suse.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arch@vger.kernel.org
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-04-18 06:10:52 +07:00
|
|
|
#undef ATOMIC_OPS
|
|
|
|
#define ATOMIC_OPS(op, c_op, asm_op) \
|
|
|
|
ATOMIC_OP(op, c_op, asm_op) \
|
|
|
|
ATOMIC_FETCH_OP(op, c_op, asm_op)
|
|
|
|
|
|
|
|
ATOMIC_OPS(and, &=, and)
|
|
|
|
ATOMIC_OPS(andnot, &= ~, bic)
|
|
|
|
ATOMIC_OPS(or, |=, orr)
|
|
|
|
ATOMIC_OPS(xor, ^=, eor)
|
2014-04-24 01:04:39 +07:00
|
|
|
|
2014-03-23 22:38:18 +07:00
|
|
|
#undef ATOMIC_OPS
|
locking/atomic, arch/arm: Implement atomic{,64}_fetch_{add,sub,and,andnot,or,xor}{,_relaxed,_acquire,_release}()
Implement FETCH-OP atomic primitives, these are very similar to the
existing OP-RETURN primitives we already have, except they return the
value of the atomic variable _before_ modification.
This is especially useful for irreversible operations -- such as
bitops (because it becomes impossible to reconstruct the state prior
to modification).
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Will Deacon <will.deacon@arm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Davidlohr Bueso <dbueso@suse.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arch@vger.kernel.org
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-04-18 06:10:52 +07:00
|
|
|
#undef ATOMIC_FETCH_OP
|
2014-03-23 22:38:18 +07:00
|
|
|
#undef ATOMIC_OP_RETURN
|
|
|
|
#undef ATOMIC_OP
|
|
|
|
|
2014-02-21 23:01:48 +07:00
|
|
|
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
|
|
|
|
|
2010-01-21 01:05:07 +07:00
|
|
|
#ifndef CONFIG_GENERIC_ATOMIC64
|
|
|
|
typedef struct {
|
2013-10-26 21:07:04 +07:00
|
|
|
long long counter;
|
2010-01-21 01:05:07 +07:00
|
|
|
} atomic64_t;
|
|
|
|
|
|
|
|
#define ATOMIC64_INIT(i) { (i) }
|
|
|
|
|
2013-03-28 17:25:03 +07:00
|
|
|
#ifdef CONFIG_ARM_LPAE
|
2013-10-26 21:07:04 +07:00
|
|
|
static inline long long atomic64_read(const atomic64_t *v)
|
2013-03-28 17:25:03 +07:00
|
|
|
{
|
2013-10-26 21:07:04 +07:00
|
|
|
long long result;
|
2013-03-28 17:25:03 +07:00
|
|
|
|
|
|
|
__asm__ __volatile__("@ atomic64_read\n"
|
|
|
|
" ldrd %0, %H0, [%1]"
|
|
|
|
: "=&r" (result)
|
|
|
|
: "r" (&v->counter), "Qo" (v->counter)
|
|
|
|
);
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2013-10-26 21:07:04 +07:00
|
|
|
static inline void atomic64_set(atomic64_t *v, long long i)
|
2013-03-28 17:25:03 +07:00
|
|
|
{
|
|
|
|
__asm__ __volatile__("@ atomic64_set\n"
|
|
|
|
" strd %2, %H2, [%1]"
|
|
|
|
: "=Qo" (v->counter)
|
|
|
|
: "r" (&v->counter), "r" (i)
|
|
|
|
);
|
|
|
|
}
|
|
|
|
#else
|
2013-10-26 21:07:04 +07:00
|
|
|
static inline long long atomic64_read(const atomic64_t *v)
|
2010-01-21 01:05:07 +07:00
|
|
|
{
|
2013-10-26 21:07:04 +07:00
|
|
|
long long result;
|
2010-01-21 01:05:07 +07:00
|
|
|
|
|
|
|
__asm__ __volatile__("@ atomic64_read\n"
|
|
|
|
" ldrexd %0, %H0, [%1]"
|
|
|
|
: "=&r" (result)
|
ARM: 6212/1: atomic ops: add memory constraints to inline asm
Currently, the 32-bit and 64-bit atomic operations on ARM do not
include memory constraints in the inline assembly blocks. In the
case of barrier-less operations [for example, atomic_add], this
means that the compiler may constant fold values which have actually
been modified by a call to an atomic operation.
This issue can be observed in the atomic64_test routine in
<kernel root>/lib/atomic64_test.c:
00000000 <test_atomic64>:
0: e1a0c00d mov ip, sp
4: e92dd830 push {r4, r5, fp, ip, lr, pc}
8: e24cb004 sub fp, ip, #4
c: e24dd008 sub sp, sp, #8
10: e24b3014 sub r3, fp, #20
14: e30d000d movw r0, #53261 ; 0xd00d
18: e3011337 movw r1, #4919 ; 0x1337
1c: e34c0001 movt r0, #49153 ; 0xc001
20: e34a1aa3 movt r1, #43683 ; 0xaaa3
24: e16300f8 strd r0, [r3, #-8]!
28: e30c0afe movw r0, #51966 ; 0xcafe
2c: e30b1eef movw r1, #48879 ; 0xbeef
30: e34d0eaf movt r0, #57007 ; 0xdeaf
34: e34d1ead movt r1, #57005 ; 0xdead
38: e1b34f9f ldrexd r4, [r3]
3c: e1a34f90 strexd r4, r0, [r3]
40: e3340000 teq r4, #0
44: 1afffffb bne 38 <test_atomic64+0x38>
48: e59f0004 ldr r0, [pc, #4] ; 54 <test_atomic64+0x54>
4c: e3a0101e mov r1, #30
50: ebfffffe bl 0 <__bug>
54: 00000000 .word 0x00000000
The atomic64_set (0x38-0x44) writes to the atomic64_t, but the
compiler doesn't see this, assumes the test condition is always
false and generates an unconditional branch to __bug. The rest of the
test is optimised away.
This patch adds suitable memory constraints to the atomic operations on ARM
to ensure that the compiler is informed of the correct data hazards. We have
to use the "Qo" constraints to avoid hitting the GCC anomaly described at
http://gcc.gnu.org/bugzilla/show_bug.cgi?id=44492 , where the compiler
makes assumptions about the writeback in the addressing mode used by the
inline assembly. These constraints forbid the use of auto{inc,dec} addressing
modes, so it doesn't matter if we don't use the operand exactly once.
Cc: stable@kernel.org
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-07-08 16:59:16 +07:00
|
|
|
: "r" (&v->counter), "Qo" (v->counter)
|
2010-01-21 01:05:07 +07:00
|
|
|
);
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2013-10-26 21:07:04 +07:00
|
|
|
static inline void atomic64_set(atomic64_t *v, long long i)
|
2010-01-21 01:05:07 +07:00
|
|
|
{
|
2013-10-26 21:07:04 +07:00
|
|
|
long long tmp;
|
2010-01-21 01:05:07 +07:00
|
|
|
|
2013-07-04 17:43:18 +07:00
|
|
|
prefetchw(&v->counter);
|
2010-01-21 01:05:07 +07:00
|
|
|
__asm__ __volatile__("@ atomic64_set\n"
|
ARM: 6212/1: atomic ops: add memory constraints to inline asm
Currently, the 32-bit and 64-bit atomic operations on ARM do not
include memory constraints in the inline assembly blocks. In the
case of barrier-less operations [for example, atomic_add], this
means that the compiler may constant fold values which have actually
been modified by a call to an atomic operation.
This issue can be observed in the atomic64_test routine in
<kernel root>/lib/atomic64_test.c:
00000000 <test_atomic64>:
0: e1a0c00d mov ip, sp
4: e92dd830 push {r4, r5, fp, ip, lr, pc}
8: e24cb004 sub fp, ip, #4
c: e24dd008 sub sp, sp, #8
10: e24b3014 sub r3, fp, #20
14: e30d000d movw r0, #53261 ; 0xd00d
18: e3011337 movw r1, #4919 ; 0x1337
1c: e34c0001 movt r0, #49153 ; 0xc001
20: e34a1aa3 movt r1, #43683 ; 0xaaa3
24: e16300f8 strd r0, [r3, #-8]!
28: e30c0afe movw r0, #51966 ; 0xcafe
2c: e30b1eef movw r1, #48879 ; 0xbeef
30: e34d0eaf movt r0, #57007 ; 0xdeaf
34: e34d1ead movt r1, #57005 ; 0xdead
38: e1b34f9f ldrexd r4, [r3]
3c: e1a34f90 strexd r4, r0, [r3]
40: e3340000 teq r4, #0
44: 1afffffb bne 38 <test_atomic64+0x38>
48: e59f0004 ldr r0, [pc, #4] ; 54 <test_atomic64+0x54>
4c: e3a0101e mov r1, #30
50: ebfffffe bl 0 <__bug>
54: 00000000 .word 0x00000000
The atomic64_set (0x38-0x44) writes to the atomic64_t, but the
compiler doesn't see this, assumes the test condition is always
false and generates an unconditional branch to __bug. The rest of the
test is optimised away.
This patch adds suitable memory constraints to the atomic operations on ARM
to ensure that the compiler is informed of the correct data hazards. We have
to use the "Qo" constraints to avoid hitting the GCC anomaly described at
http://gcc.gnu.org/bugzilla/show_bug.cgi?id=44492 , where the compiler
makes assumptions about the writeback in the addressing mode used by the
inline assembly. These constraints forbid the use of auto{inc,dec} addressing
modes, so it doesn't matter if we don't use the operand exactly once.
Cc: stable@kernel.org
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-07-08 16:59:16 +07:00
|
|
|
"1: ldrexd %0, %H0, [%2]\n"
|
|
|
|
" strexd %0, %3, %H3, [%2]\n"
|
2010-01-21 01:05:07 +07:00
|
|
|
" teq %0, #0\n"
|
|
|
|
" bne 1b"
|
ARM: 6212/1: atomic ops: add memory constraints to inline asm
Currently, the 32-bit and 64-bit atomic operations on ARM do not
include memory constraints in the inline assembly blocks. In the
case of barrier-less operations [for example, atomic_add], this
means that the compiler may constant fold values which have actually
been modified by a call to an atomic operation.
This issue can be observed in the atomic64_test routine in
<kernel root>/lib/atomic64_test.c:
00000000 <test_atomic64>:
0: e1a0c00d mov ip, sp
4: e92dd830 push {r4, r5, fp, ip, lr, pc}
8: e24cb004 sub fp, ip, #4
c: e24dd008 sub sp, sp, #8
10: e24b3014 sub r3, fp, #20
14: e30d000d movw r0, #53261 ; 0xd00d
18: e3011337 movw r1, #4919 ; 0x1337
1c: e34c0001 movt r0, #49153 ; 0xc001
20: e34a1aa3 movt r1, #43683 ; 0xaaa3
24: e16300f8 strd r0, [r3, #-8]!
28: e30c0afe movw r0, #51966 ; 0xcafe
2c: e30b1eef movw r1, #48879 ; 0xbeef
30: e34d0eaf movt r0, #57007 ; 0xdeaf
34: e34d1ead movt r1, #57005 ; 0xdead
38: e1b34f9f ldrexd r4, [r3]
3c: e1a34f90 strexd r4, r0, [r3]
40: e3340000 teq r4, #0
44: 1afffffb bne 38 <test_atomic64+0x38>
48: e59f0004 ldr r0, [pc, #4] ; 54 <test_atomic64+0x54>
4c: e3a0101e mov r1, #30
50: ebfffffe bl 0 <__bug>
54: 00000000 .word 0x00000000
The atomic64_set (0x38-0x44) writes to the atomic64_t, but the
compiler doesn't see this, assumes the test condition is always
false and generates an unconditional branch to __bug. The rest of the
test is optimised away.
This patch adds suitable memory constraints to the atomic operations on ARM
to ensure that the compiler is informed of the correct data hazards. We have
to use the "Qo" constraints to avoid hitting the GCC anomaly described at
http://gcc.gnu.org/bugzilla/show_bug.cgi?id=44492 , where the compiler
makes assumptions about the writeback in the addressing mode used by the
inline assembly. These constraints forbid the use of auto{inc,dec} addressing
modes, so it doesn't matter if we don't use the operand exactly once.
Cc: stable@kernel.org
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-07-08 16:59:16 +07:00
|
|
|
: "=&r" (tmp), "=Qo" (v->counter)
|
2010-01-21 01:05:07 +07:00
|
|
|
: "r" (&v->counter), "r" (i)
|
|
|
|
: "cc");
|
|
|
|
}
|
2013-03-28 17:25:03 +07:00
|
|
|
#endif
|
2010-01-21 01:05:07 +07:00
|
|
|
|
2014-03-23 22:38:18 +07:00
|
|
|
#define ATOMIC64_OP(op, op1, op2) \
|
|
|
|
static inline void atomic64_##op(long long i, atomic64_t *v) \
|
|
|
|
{ \
|
|
|
|
long long result; \
|
|
|
|
unsigned long tmp; \
|
|
|
|
\
|
|
|
|
prefetchw(&v->counter); \
|
|
|
|
__asm__ __volatile__("@ atomic64_" #op "\n" \
|
|
|
|
"1: ldrexd %0, %H0, [%3]\n" \
|
|
|
|
" " #op1 " %Q0, %Q0, %Q4\n" \
|
|
|
|
" " #op2 " %R0, %R0, %R4\n" \
|
|
|
|
" strexd %1, %0, %H0, [%3]\n" \
|
|
|
|
" teq %1, #0\n" \
|
|
|
|
" bne 1b" \
|
|
|
|
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
|
|
|
|
: "r" (&v->counter), "r" (i) \
|
|
|
|
: "cc"); \
|
|
|
|
} \
|
|
|
|
|
|
|
|
#define ATOMIC64_OP_RETURN(op, op1, op2) \
|
2015-08-06 23:54:44 +07:00
|
|
|
static inline long long \
|
|
|
|
atomic64_##op##_return_relaxed(long long i, atomic64_t *v) \
|
2014-03-23 22:38:18 +07:00
|
|
|
{ \
|
|
|
|
long long result; \
|
|
|
|
unsigned long tmp; \
|
|
|
|
\
|
|
|
|
prefetchw(&v->counter); \
|
|
|
|
\
|
|
|
|
__asm__ __volatile__("@ atomic64_" #op "_return\n" \
|
|
|
|
"1: ldrexd %0, %H0, [%3]\n" \
|
|
|
|
" " #op1 " %Q0, %Q0, %Q4\n" \
|
|
|
|
" " #op2 " %R0, %R0, %R4\n" \
|
|
|
|
" strexd %1, %0, %H0, [%3]\n" \
|
|
|
|
" teq %1, #0\n" \
|
|
|
|
" bne 1b" \
|
|
|
|
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
|
|
|
|
: "r" (&v->counter), "r" (i) \
|
|
|
|
: "cc"); \
|
|
|
|
\
|
|
|
|
return result; \
|
2010-01-21 01:05:07 +07:00
|
|
|
}
|
|
|
|
|
locking/atomic, arch/arm: Implement atomic{,64}_fetch_{add,sub,and,andnot,or,xor}{,_relaxed,_acquire,_release}()
Implement FETCH-OP atomic primitives, these are very similar to the
existing OP-RETURN primitives we already have, except they return the
value of the atomic variable _before_ modification.
This is especially useful for irreversible operations -- such as
bitops (because it becomes impossible to reconstruct the state prior
to modification).
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Will Deacon <will.deacon@arm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Davidlohr Bueso <dbueso@suse.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arch@vger.kernel.org
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-04-18 06:10:52 +07:00
|
|
|
#define ATOMIC64_FETCH_OP(op, op1, op2) \
|
|
|
|
static inline long long \
|
|
|
|
atomic64_fetch_##op##_relaxed(long long i, atomic64_t *v) \
|
|
|
|
{ \
|
|
|
|
long long result, val; \
|
|
|
|
unsigned long tmp; \
|
|
|
|
\
|
|
|
|
prefetchw(&v->counter); \
|
|
|
|
\
|
|
|
|
__asm__ __volatile__("@ atomic64_fetch_" #op "\n" \
|
|
|
|
"1: ldrexd %0, %H0, [%4]\n" \
|
|
|
|
" " #op1 " %Q1, %Q0, %Q5\n" \
|
|
|
|
" " #op2 " %R1, %R0, %R5\n" \
|
|
|
|
" strexd %2, %1, %H1, [%4]\n" \
|
|
|
|
" teq %2, #0\n" \
|
|
|
|
" bne 1b" \
|
|
|
|
: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \
|
|
|
|
: "r" (&v->counter), "r" (i) \
|
|
|
|
: "cc"); \
|
|
|
|
\
|
|
|
|
return result; \
|
|
|
|
}
|
|
|
|
|
2014-03-23 22:38:18 +07:00
|
|
|
#define ATOMIC64_OPS(op, op1, op2) \
|
|
|
|
ATOMIC64_OP(op, op1, op2) \
|
locking/atomic, arch/arm: Implement atomic{,64}_fetch_{add,sub,and,andnot,or,xor}{,_relaxed,_acquire,_release}()
Implement FETCH-OP atomic primitives, these are very similar to the
existing OP-RETURN primitives we already have, except they return the
value of the atomic variable _before_ modification.
This is especially useful for irreversible operations -- such as
bitops (because it becomes impossible to reconstruct the state prior
to modification).
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Will Deacon <will.deacon@arm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Davidlohr Bueso <dbueso@suse.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arch@vger.kernel.org
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-04-18 06:10:52 +07:00
|
|
|
ATOMIC64_OP_RETURN(op, op1, op2) \
|
|
|
|
ATOMIC64_FETCH_OP(op, op1, op2)
|
2010-01-21 01:05:07 +07:00
|
|
|
|
2014-03-23 22:38:18 +07:00
|
|
|
ATOMIC64_OPS(add, adds, adc)
|
|
|
|
ATOMIC64_OPS(sub, subs, sbc)
|
2010-01-21 01:05:07 +07:00
|
|
|
|
2015-08-06 23:54:44 +07:00
|
|
|
#define atomic64_add_return_relaxed atomic64_add_return_relaxed
|
|
|
|
#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
|
locking/atomic, arch/arm: Implement atomic{,64}_fetch_{add,sub,and,andnot,or,xor}{,_relaxed,_acquire,_release}()
Implement FETCH-OP atomic primitives, these are very similar to the
existing OP-RETURN primitives we already have, except they return the
value of the atomic variable _before_ modification.
This is especially useful for irreversible operations -- such as
bitops (because it becomes impossible to reconstruct the state prior
to modification).
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Will Deacon <will.deacon@arm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Davidlohr Bueso <dbueso@suse.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arch@vger.kernel.org
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-04-18 06:10:52 +07:00
|
|
|
#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
|
|
|
|
#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
|
|
|
|
|
|
|
|
#undef ATOMIC64_OPS
|
|
|
|
#define ATOMIC64_OPS(op, op1, op2) \
|
|
|
|
ATOMIC64_OP(op, op1, op2) \
|
|
|
|
ATOMIC64_FETCH_OP(op, op1, op2)
|
2015-08-06 23:54:44 +07:00
|
|
|
|
2014-04-24 01:04:39 +07:00
|
|
|
#define atomic64_andnot atomic64_andnot
|
|
|
|
|
locking/atomic, arch/arm: Implement atomic{,64}_fetch_{add,sub,and,andnot,or,xor}{,_relaxed,_acquire,_release}()
Implement FETCH-OP atomic primitives, these are very similar to the
existing OP-RETURN primitives we already have, except they return the
value of the atomic variable _before_ modification.
This is especially useful for irreversible operations -- such as
bitops (because it becomes impossible to reconstruct the state prior
to modification).
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Will Deacon <will.deacon@arm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Davidlohr Bueso <dbueso@suse.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arch@vger.kernel.org
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-04-18 06:10:52 +07:00
|
|
|
ATOMIC64_OPS(and, and, and)
|
|
|
|
ATOMIC64_OPS(andnot, bic, bic)
|
|
|
|
ATOMIC64_OPS(or, orr, orr)
|
|
|
|
ATOMIC64_OPS(xor, eor, eor)
|
|
|
|
|
|
|
|
#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
|
|
|
|
#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
|
|
|
|
#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
|
|
|
|
#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
|
2014-04-24 01:04:39 +07:00
|
|
|
|
2014-03-23 22:38:18 +07:00
|
|
|
#undef ATOMIC64_OPS
|
locking/atomic, arch/arm: Implement atomic{,64}_fetch_{add,sub,and,andnot,or,xor}{,_relaxed,_acquire,_release}()
Implement FETCH-OP atomic primitives, these are very similar to the
existing OP-RETURN primitives we already have, except they return the
value of the atomic variable _before_ modification.
This is especially useful for irreversible operations -- such as
bitops (because it becomes impossible to reconstruct the state prior
to modification).
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Will Deacon <will.deacon@arm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Davidlohr Bueso <dbueso@suse.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arch@vger.kernel.org
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-04-18 06:10:52 +07:00
|
|
|
#undef ATOMIC64_FETCH_OP
|
2014-03-23 22:38:18 +07:00
|
|
|
#undef ATOMIC64_OP_RETURN
|
|
|
|
#undef ATOMIC64_OP
|
2010-01-21 01:05:07 +07:00
|
|
|
|
2015-08-06 23:54:44 +07:00
|
|
|
static inline long long
|
|
|
|
atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
|
2010-01-21 01:05:07 +07:00
|
|
|
{
|
2013-10-26 21:07:04 +07:00
|
|
|
long long oldval;
|
2010-01-21 01:05:07 +07:00
|
|
|
unsigned long res;
|
|
|
|
|
2014-02-21 23:01:48 +07:00
|
|
|
prefetchw(&ptr->counter);
|
2010-01-21 01:05:07 +07:00
|
|
|
|
|
|
|
do {
|
|
|
|
__asm__ __volatile__("@ atomic64_cmpxchg\n"
|
ARM: 6212/1: atomic ops: add memory constraints to inline asm
Currently, the 32-bit and 64-bit atomic operations on ARM do not
include memory constraints in the inline assembly blocks. In the
case of barrier-less operations [for example, atomic_add], this
means that the compiler may constant fold values which have actually
been modified by a call to an atomic operation.
This issue can be observed in the atomic64_test routine in
<kernel root>/lib/atomic64_test.c:
00000000 <test_atomic64>:
0: e1a0c00d mov ip, sp
4: e92dd830 push {r4, r5, fp, ip, lr, pc}
8: e24cb004 sub fp, ip, #4
c: e24dd008 sub sp, sp, #8
10: e24b3014 sub r3, fp, #20
14: e30d000d movw r0, #53261 ; 0xd00d
18: e3011337 movw r1, #4919 ; 0x1337
1c: e34c0001 movt r0, #49153 ; 0xc001
20: e34a1aa3 movt r1, #43683 ; 0xaaa3
24: e16300f8 strd r0, [r3, #-8]!
28: e30c0afe movw r0, #51966 ; 0xcafe
2c: e30b1eef movw r1, #48879 ; 0xbeef
30: e34d0eaf movt r0, #57007 ; 0xdeaf
34: e34d1ead movt r1, #57005 ; 0xdead
38: e1b34f9f ldrexd r4, [r3]
3c: e1a34f90 strexd r4, r0, [r3]
40: e3340000 teq r4, #0
44: 1afffffb bne 38 <test_atomic64+0x38>
48: e59f0004 ldr r0, [pc, #4] ; 54 <test_atomic64+0x54>
4c: e3a0101e mov r1, #30
50: ebfffffe bl 0 <__bug>
54: 00000000 .word 0x00000000
The atomic64_set (0x38-0x44) writes to the atomic64_t, but the
compiler doesn't see this, assumes the test condition is always
false and generates an unconditional branch to __bug. The rest of the
test is optimised away.
This patch adds suitable memory constraints to the atomic operations on ARM
to ensure that the compiler is informed of the correct data hazards. We have
to use the "Qo" constraints to avoid hitting the GCC anomaly described at
http://gcc.gnu.org/bugzilla/show_bug.cgi?id=44492 , where the compiler
makes assumptions about the writeback in the addressing mode used by the
inline assembly. These constraints forbid the use of auto{inc,dec} addressing
modes, so it doesn't matter if we don't use the operand exactly once.
Cc: stable@kernel.org
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-07-08 16:59:16 +07:00
|
|
|
"ldrexd %1, %H1, [%3]\n"
|
2010-01-21 01:05:07 +07:00
|
|
|
"mov %0, #0\n"
|
ARM: 6212/1: atomic ops: add memory constraints to inline asm
Currently, the 32-bit and 64-bit atomic operations on ARM do not
include memory constraints in the inline assembly blocks. In the
case of barrier-less operations [for example, atomic_add], this
means that the compiler may constant fold values which have actually
been modified by a call to an atomic operation.
This issue can be observed in the atomic64_test routine in
<kernel root>/lib/atomic64_test.c:
00000000 <test_atomic64>:
0: e1a0c00d mov ip, sp
4: e92dd830 push {r4, r5, fp, ip, lr, pc}
8: e24cb004 sub fp, ip, #4
c: e24dd008 sub sp, sp, #8
10: e24b3014 sub r3, fp, #20
14: e30d000d movw r0, #53261 ; 0xd00d
18: e3011337 movw r1, #4919 ; 0x1337
1c: e34c0001 movt r0, #49153 ; 0xc001
20: e34a1aa3 movt r1, #43683 ; 0xaaa3
24: e16300f8 strd r0, [r3, #-8]!
28: e30c0afe movw r0, #51966 ; 0xcafe
2c: e30b1eef movw r1, #48879 ; 0xbeef
30: e34d0eaf movt r0, #57007 ; 0xdeaf
34: e34d1ead movt r1, #57005 ; 0xdead
38: e1b34f9f ldrexd r4, [r3]
3c: e1a34f90 strexd r4, r0, [r3]
40: e3340000 teq r4, #0
44: 1afffffb bne 38 <test_atomic64+0x38>
48: e59f0004 ldr r0, [pc, #4] ; 54 <test_atomic64+0x54>
4c: e3a0101e mov r1, #30
50: ebfffffe bl 0 <__bug>
54: 00000000 .word 0x00000000
The atomic64_set (0x38-0x44) writes to the atomic64_t, but the
compiler doesn't see this, assumes the test condition is always
false and generates an unconditional branch to __bug. The rest of the
test is optimised away.
This patch adds suitable memory constraints to the atomic operations on ARM
to ensure that the compiler is informed of the correct data hazards. We have
to use the "Qo" constraints to avoid hitting the GCC anomaly described at
http://gcc.gnu.org/bugzilla/show_bug.cgi?id=44492 , where the compiler
makes assumptions about the writeback in the addressing mode used by the
inline assembly. These constraints forbid the use of auto{inc,dec} addressing
modes, so it doesn't matter if we don't use the operand exactly once.
Cc: stable@kernel.org
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-07-08 16:59:16 +07:00
|
|
|
"teq %1, %4\n"
|
|
|
|
"teqeq %H1, %H4\n"
|
|
|
|
"strexdeq %0, %5, %H5, [%3]"
|
|
|
|
: "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
|
2010-01-21 01:05:07 +07:00
|
|
|
: "r" (&ptr->counter), "r" (old), "r" (new)
|
|
|
|
: "cc");
|
|
|
|
} while (res);
|
|
|
|
|
|
|
|
return oldval;
|
|
|
|
}
|
2015-08-06 23:54:44 +07:00
|
|
|
#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
|
2010-01-21 01:05:07 +07:00
|
|
|
|
2015-08-06 23:54:44 +07:00
|
|
|
static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
|
2010-01-21 01:05:07 +07:00
|
|
|
{
|
2013-10-26 21:07:04 +07:00
|
|
|
long long result;
|
2010-01-21 01:05:07 +07:00
|
|
|
unsigned long tmp;
|
|
|
|
|
2014-02-21 23:01:48 +07:00
|
|
|
prefetchw(&ptr->counter);
|
2010-01-21 01:05:07 +07:00
|
|
|
|
|
|
|
__asm__ __volatile__("@ atomic64_xchg\n"
|
ARM: 6212/1: atomic ops: add memory constraints to inline asm
Currently, the 32-bit and 64-bit atomic operations on ARM do not
include memory constraints in the inline assembly blocks. In the
case of barrier-less operations [for example, atomic_add], this
means that the compiler may constant fold values which have actually
been modified by a call to an atomic operation.
This issue can be observed in the atomic64_test routine in
<kernel root>/lib/atomic64_test.c:
00000000 <test_atomic64>:
0: e1a0c00d mov ip, sp
4: e92dd830 push {r4, r5, fp, ip, lr, pc}
8: e24cb004 sub fp, ip, #4
c: e24dd008 sub sp, sp, #8
10: e24b3014 sub r3, fp, #20
14: e30d000d movw r0, #53261 ; 0xd00d
18: e3011337 movw r1, #4919 ; 0x1337
1c: e34c0001 movt r0, #49153 ; 0xc001
20: e34a1aa3 movt r1, #43683 ; 0xaaa3
24: e16300f8 strd r0, [r3, #-8]!
28: e30c0afe movw r0, #51966 ; 0xcafe
2c: e30b1eef movw r1, #48879 ; 0xbeef
30: e34d0eaf movt r0, #57007 ; 0xdeaf
34: e34d1ead movt r1, #57005 ; 0xdead
38: e1b34f9f ldrexd r4, [r3]
3c: e1a34f90 strexd r4, r0, [r3]
40: e3340000 teq r4, #0
44: 1afffffb bne 38 <test_atomic64+0x38>
48: e59f0004 ldr r0, [pc, #4] ; 54 <test_atomic64+0x54>
4c: e3a0101e mov r1, #30
50: ebfffffe bl 0 <__bug>
54: 00000000 .word 0x00000000
The atomic64_set (0x38-0x44) writes to the atomic64_t, but the
compiler doesn't see this, assumes the test condition is always
false and generates an unconditional branch to __bug. The rest of the
test is optimised away.
This patch adds suitable memory constraints to the atomic operations on ARM
to ensure that the compiler is informed of the correct data hazards. We have
to use the "Qo" constraints to avoid hitting the GCC anomaly described at
http://gcc.gnu.org/bugzilla/show_bug.cgi?id=44492 , where the compiler
makes assumptions about the writeback in the addressing mode used by the
inline assembly. These constraints forbid the use of auto{inc,dec} addressing
modes, so it doesn't matter if we don't use the operand exactly once.
Cc: stable@kernel.org
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-07-08 16:59:16 +07:00
|
|
|
"1: ldrexd %0, %H0, [%3]\n"
|
|
|
|
" strexd %1, %4, %H4, [%3]\n"
|
2010-01-21 01:05:07 +07:00
|
|
|
" teq %1, #0\n"
|
|
|
|
" bne 1b"
|
ARM: 6212/1: atomic ops: add memory constraints to inline asm
Currently, the 32-bit and 64-bit atomic operations on ARM do not
include memory constraints in the inline assembly blocks. In the
case of barrier-less operations [for example, atomic_add], this
means that the compiler may constant fold values which have actually
been modified by a call to an atomic operation.
This issue can be observed in the atomic64_test routine in
<kernel root>/lib/atomic64_test.c:
00000000 <test_atomic64>:
0: e1a0c00d mov ip, sp
4: e92dd830 push {r4, r5, fp, ip, lr, pc}
8: e24cb004 sub fp, ip, #4
c: e24dd008 sub sp, sp, #8
10: e24b3014 sub r3, fp, #20
14: e30d000d movw r0, #53261 ; 0xd00d
18: e3011337 movw r1, #4919 ; 0x1337
1c: e34c0001 movt r0, #49153 ; 0xc001
20: e34a1aa3 movt r1, #43683 ; 0xaaa3
24: e16300f8 strd r0, [r3, #-8]!
28: e30c0afe movw r0, #51966 ; 0xcafe
2c: e30b1eef movw r1, #48879 ; 0xbeef
30: e34d0eaf movt r0, #57007 ; 0xdeaf
34: e34d1ead movt r1, #57005 ; 0xdead
38: e1b34f9f ldrexd r4, [r3]
3c: e1a34f90 strexd r4, r0, [r3]
40: e3340000 teq r4, #0
44: 1afffffb bne 38 <test_atomic64+0x38>
48: e59f0004 ldr r0, [pc, #4] ; 54 <test_atomic64+0x54>
4c: e3a0101e mov r1, #30
50: ebfffffe bl 0 <__bug>
54: 00000000 .word 0x00000000
The atomic64_set (0x38-0x44) writes to the atomic64_t, but the
compiler doesn't see this, assumes the test condition is always
false and generates an unconditional branch to __bug. The rest of the
test is optimised away.
This patch adds suitable memory constraints to the atomic operations on ARM
to ensure that the compiler is informed of the correct data hazards. We have
to use the "Qo" constraints to avoid hitting the GCC anomaly described at
http://gcc.gnu.org/bugzilla/show_bug.cgi?id=44492 , where the compiler
makes assumptions about the writeback in the addressing mode used by the
inline assembly. These constraints forbid the use of auto{inc,dec} addressing
modes, so it doesn't matter if we don't use the operand exactly once.
Cc: stable@kernel.org
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-07-08 16:59:16 +07:00
|
|
|
: "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
|
2010-01-21 01:05:07 +07:00
|
|
|
: "r" (&ptr->counter), "r" (new)
|
|
|
|
: "cc");
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
2015-08-06 23:54:44 +07:00
|
|
|
#define atomic64_xchg_relaxed atomic64_xchg_relaxed
|
2010-01-21 01:05:07 +07:00
|
|
|
|
2013-10-26 21:07:04 +07:00
|
|
|
static inline long long atomic64_dec_if_positive(atomic64_t *v)
|
2010-01-21 01:05:07 +07:00
|
|
|
{
|
2013-10-26 21:07:04 +07:00
|
|
|
long long result;
|
2010-01-21 01:05:07 +07:00
|
|
|
unsigned long tmp;
|
|
|
|
|
|
|
|
smp_mb();
|
2014-02-21 23:01:48 +07:00
|
|
|
prefetchw(&v->counter);
|
2010-01-21 01:05:07 +07:00
|
|
|
|
|
|
|
__asm__ __volatile__("@ atomic64_dec_if_positive\n"
|
ARM: 6212/1: atomic ops: add memory constraints to inline asm
Currently, the 32-bit and 64-bit atomic operations on ARM do not
include memory constraints in the inline assembly blocks. In the
case of barrier-less operations [for example, atomic_add], this
means that the compiler may constant fold values which have actually
been modified by a call to an atomic operation.
This issue can be observed in the atomic64_test routine in
<kernel root>/lib/atomic64_test.c:
00000000 <test_atomic64>:
0: e1a0c00d mov ip, sp
4: e92dd830 push {r4, r5, fp, ip, lr, pc}
8: e24cb004 sub fp, ip, #4
c: e24dd008 sub sp, sp, #8
10: e24b3014 sub r3, fp, #20
14: e30d000d movw r0, #53261 ; 0xd00d
18: e3011337 movw r1, #4919 ; 0x1337
1c: e34c0001 movt r0, #49153 ; 0xc001
20: e34a1aa3 movt r1, #43683 ; 0xaaa3
24: e16300f8 strd r0, [r3, #-8]!
28: e30c0afe movw r0, #51966 ; 0xcafe
2c: e30b1eef movw r1, #48879 ; 0xbeef
30: e34d0eaf movt r0, #57007 ; 0xdeaf
34: e34d1ead movt r1, #57005 ; 0xdead
38: e1b34f9f ldrexd r4, [r3]
3c: e1a34f90 strexd r4, r0, [r3]
40: e3340000 teq r4, #0
44: 1afffffb bne 38 <test_atomic64+0x38>
48: e59f0004 ldr r0, [pc, #4] ; 54 <test_atomic64+0x54>
4c: e3a0101e mov r1, #30
50: ebfffffe bl 0 <__bug>
54: 00000000 .word 0x00000000
The atomic64_set (0x38-0x44) writes to the atomic64_t, but the
compiler doesn't see this, assumes the test condition is always
false and generates an unconditional branch to __bug. The rest of the
test is optimised away.
This patch adds suitable memory constraints to the atomic operations on ARM
to ensure that the compiler is informed of the correct data hazards. We have
to use the "Qo" constraints to avoid hitting the GCC anomaly described at
http://gcc.gnu.org/bugzilla/show_bug.cgi?id=44492 , where the compiler
makes assumptions about the writeback in the addressing mode used by the
inline assembly. These constraints forbid the use of auto{inc,dec} addressing
modes, so it doesn't matter if we don't use the operand exactly once.
Cc: stable@kernel.org
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-07-08 16:59:16 +07:00
|
|
|
"1: ldrexd %0, %H0, [%3]\n"
|
2013-07-26 23:28:53 +07:00
|
|
|
" subs %Q0, %Q0, #1\n"
|
|
|
|
" sbc %R0, %R0, #0\n"
|
|
|
|
" teq %R0, #0\n"
|
2010-01-21 01:05:07 +07:00
|
|
|
" bmi 2f\n"
|
ARM: 6212/1: atomic ops: add memory constraints to inline asm
Currently, the 32-bit and 64-bit atomic operations on ARM do not
include memory constraints in the inline assembly blocks. In the
case of barrier-less operations [for example, atomic_add], this
means that the compiler may constant fold values which have actually
been modified by a call to an atomic operation.
This issue can be observed in the atomic64_test routine in
<kernel root>/lib/atomic64_test.c:
00000000 <test_atomic64>:
0: e1a0c00d mov ip, sp
4: e92dd830 push {r4, r5, fp, ip, lr, pc}
8: e24cb004 sub fp, ip, #4
c: e24dd008 sub sp, sp, #8
10: e24b3014 sub r3, fp, #20
14: e30d000d movw r0, #53261 ; 0xd00d
18: e3011337 movw r1, #4919 ; 0x1337
1c: e34c0001 movt r0, #49153 ; 0xc001
20: e34a1aa3 movt r1, #43683 ; 0xaaa3
24: e16300f8 strd r0, [r3, #-8]!
28: e30c0afe movw r0, #51966 ; 0xcafe
2c: e30b1eef movw r1, #48879 ; 0xbeef
30: e34d0eaf movt r0, #57007 ; 0xdeaf
34: e34d1ead movt r1, #57005 ; 0xdead
38: e1b34f9f ldrexd r4, [r3]
3c: e1a34f90 strexd r4, r0, [r3]
40: e3340000 teq r4, #0
44: 1afffffb bne 38 <test_atomic64+0x38>
48: e59f0004 ldr r0, [pc, #4] ; 54 <test_atomic64+0x54>
4c: e3a0101e mov r1, #30
50: ebfffffe bl 0 <__bug>
54: 00000000 .word 0x00000000
The atomic64_set (0x38-0x44) writes to the atomic64_t, but the
compiler doesn't see this, assumes the test condition is always
false and generates an unconditional branch to __bug. The rest of the
test is optimised away.
This patch adds suitable memory constraints to the atomic operations on ARM
to ensure that the compiler is informed of the correct data hazards. We have
to use the "Qo" constraints to avoid hitting the GCC anomaly described at
http://gcc.gnu.org/bugzilla/show_bug.cgi?id=44492 , where the compiler
makes assumptions about the writeback in the addressing mode used by the
inline assembly. These constraints forbid the use of auto{inc,dec} addressing
modes, so it doesn't matter if we don't use the operand exactly once.
Cc: stable@kernel.org
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-07-08 16:59:16 +07:00
|
|
|
" strexd %1, %0, %H0, [%3]\n"
|
2010-01-21 01:05:07 +07:00
|
|
|
" teq %1, #0\n"
|
|
|
|
" bne 1b\n"
|
|
|
|
"2:"
|
ARM: 6212/1: atomic ops: add memory constraints to inline asm
Currently, the 32-bit and 64-bit atomic operations on ARM do not
include memory constraints in the inline assembly blocks. In the
case of barrier-less operations [for example, atomic_add], this
means that the compiler may constant fold values which have actually
been modified by a call to an atomic operation.
This issue can be observed in the atomic64_test routine in
<kernel root>/lib/atomic64_test.c:
00000000 <test_atomic64>:
0: e1a0c00d mov ip, sp
4: e92dd830 push {r4, r5, fp, ip, lr, pc}
8: e24cb004 sub fp, ip, #4
c: e24dd008 sub sp, sp, #8
10: e24b3014 sub r3, fp, #20
14: e30d000d movw r0, #53261 ; 0xd00d
18: e3011337 movw r1, #4919 ; 0x1337
1c: e34c0001 movt r0, #49153 ; 0xc001
20: e34a1aa3 movt r1, #43683 ; 0xaaa3
24: e16300f8 strd r0, [r3, #-8]!
28: e30c0afe movw r0, #51966 ; 0xcafe
2c: e30b1eef movw r1, #48879 ; 0xbeef
30: e34d0eaf movt r0, #57007 ; 0xdeaf
34: e34d1ead movt r1, #57005 ; 0xdead
38: e1b34f9f ldrexd r4, [r3]
3c: e1a34f90 strexd r4, r0, [r3]
40: e3340000 teq r4, #0
44: 1afffffb bne 38 <test_atomic64+0x38>
48: e59f0004 ldr r0, [pc, #4] ; 54 <test_atomic64+0x54>
4c: e3a0101e mov r1, #30
50: ebfffffe bl 0 <__bug>
54: 00000000 .word 0x00000000
The atomic64_set (0x38-0x44) writes to the atomic64_t, but the
compiler doesn't see this, assumes the test condition is always
false and generates an unconditional branch to __bug. The rest of the
test is optimised away.
This patch adds suitable memory constraints to the atomic operations on ARM
to ensure that the compiler is informed of the correct data hazards. We have
to use the "Qo" constraints to avoid hitting the GCC anomaly described at
http://gcc.gnu.org/bugzilla/show_bug.cgi?id=44492 , where the compiler
makes assumptions about the writeback in the addressing mode used by the
inline assembly. These constraints forbid the use of auto{inc,dec} addressing
modes, so it doesn't matter if we don't use the operand exactly once.
Cc: stable@kernel.org
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-07-08 16:59:16 +07:00
|
|
|
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
|
2010-01-21 01:05:07 +07:00
|
|
|
: "r" (&v->counter)
|
|
|
|
: "cc");
|
|
|
|
|
|
|
|
smp_mb();
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
2018-06-21 19:13:20 +07:00
|
|
|
#define atomic64_dec_if_positive atomic64_dec_if_positive
|
2010-01-21 01:05:07 +07:00
|
|
|
|
2018-06-21 19:13:14 +07:00
|
|
|
static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a,
|
|
|
|
long long u)
|
2010-01-21 01:05:07 +07:00
|
|
|
{
|
2018-06-21 19:13:14 +07:00
|
|
|
long long oldval, newval;
|
2010-01-21 01:05:07 +07:00
|
|
|
unsigned long tmp;
|
|
|
|
|
|
|
|
smp_mb();
|
2014-02-21 23:01:48 +07:00
|
|
|
prefetchw(&v->counter);
|
2010-01-21 01:05:07 +07:00
|
|
|
|
|
|
|
__asm__ __volatile__("@ atomic64_add_unless\n"
|
ARM: 6212/1: atomic ops: add memory constraints to inline asm
Currently, the 32-bit and 64-bit atomic operations on ARM do not
include memory constraints in the inline assembly blocks. In the
case of barrier-less operations [for example, atomic_add], this
means that the compiler may constant fold values which have actually
been modified by a call to an atomic operation.
This issue can be observed in the atomic64_test routine in
<kernel root>/lib/atomic64_test.c:
00000000 <test_atomic64>:
0: e1a0c00d mov ip, sp
4: e92dd830 push {r4, r5, fp, ip, lr, pc}
8: e24cb004 sub fp, ip, #4
c: e24dd008 sub sp, sp, #8
10: e24b3014 sub r3, fp, #20
14: e30d000d movw r0, #53261 ; 0xd00d
18: e3011337 movw r1, #4919 ; 0x1337
1c: e34c0001 movt r0, #49153 ; 0xc001
20: e34a1aa3 movt r1, #43683 ; 0xaaa3
24: e16300f8 strd r0, [r3, #-8]!
28: e30c0afe movw r0, #51966 ; 0xcafe
2c: e30b1eef movw r1, #48879 ; 0xbeef
30: e34d0eaf movt r0, #57007 ; 0xdeaf
34: e34d1ead movt r1, #57005 ; 0xdead
38: e1b34f9f ldrexd r4, [r3]
3c: e1a34f90 strexd r4, r0, [r3]
40: e3340000 teq r4, #0
44: 1afffffb bne 38 <test_atomic64+0x38>
48: e59f0004 ldr r0, [pc, #4] ; 54 <test_atomic64+0x54>
4c: e3a0101e mov r1, #30
50: ebfffffe bl 0 <__bug>
54: 00000000 .word 0x00000000
The atomic64_set (0x38-0x44) writes to the atomic64_t, but the
compiler doesn't see this, assumes the test condition is always
false and generates an unconditional branch to __bug. The rest of the
test is optimised away.
This patch adds suitable memory constraints to the atomic operations on ARM
to ensure that the compiler is informed of the correct data hazards. We have
to use the "Qo" constraints to avoid hitting the GCC anomaly described at
http://gcc.gnu.org/bugzilla/show_bug.cgi?id=44492 , where the compiler
makes assumptions about the writeback in the addressing mode used by the
inline assembly. These constraints forbid the use of auto{inc,dec} addressing
modes, so it doesn't matter if we don't use the operand exactly once.
Cc: stable@kernel.org
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-07-08 16:59:16 +07:00
|
|
|
"1: ldrexd %0, %H0, [%4]\n"
|
|
|
|
" teq %0, %5\n"
|
|
|
|
" teqeq %H0, %H5\n"
|
2010-01-21 01:05:07 +07:00
|
|
|
" beq 2f\n"
|
2018-06-21 19:13:14 +07:00
|
|
|
" adds %Q1, %Q0, %Q6\n"
|
|
|
|
" adc %R1, %R0, %R6\n"
|
|
|
|
" strexd %2, %1, %H1, [%4]\n"
|
2010-01-21 01:05:07 +07:00
|
|
|
" teq %2, #0\n"
|
|
|
|
" bne 1b\n"
|
|
|
|
"2:"
|
2018-06-21 19:13:14 +07:00
|
|
|
: "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
|
2010-01-21 01:05:07 +07:00
|
|
|
: "r" (&v->counter), "r" (u), "r" (a)
|
|
|
|
: "cc");
|
|
|
|
|
2018-06-21 19:13:14 +07:00
|
|
|
if (oldval != u)
|
2010-01-21 01:05:07 +07:00
|
|
|
smp_mb();
|
|
|
|
|
2018-06-21 19:13:14 +07:00
|
|
|
return oldval;
|
2010-01-21 01:05:07 +07:00
|
|
|
}
|
2018-06-21 19:13:14 +07:00
|
|
|
#define atomic64_fetch_add_unless atomic64_fetch_add_unless
|
2010-01-21 01:05:07 +07:00
|
|
|
|
2011-07-27 06:09:08 +07:00
|
|
|
#endif /* !CONFIG_GENERIC_ATOMIC64 */
|
2005-04-17 05:20:36 +07:00
|
|
|
#endif
|
|
|
|
#endif
|