mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-15 13:06:41 +07:00
398aa66827
Currently, the 32-bit and 64-bit atomic operations on ARM do not include memory constraints in the inline assembly blocks. In the case of barrier-less operations [for example, atomic_add], this means that the compiler may constant fold values which have actually been modified by a call to an atomic operation. This issue can be observed in the atomic64_test routine in <kernel root>/lib/atomic64_test.c: 00000000 <test_atomic64>: 0: e1a0c00d mov ip, sp 4: e92dd830 push {r4, r5, fp, ip, lr, pc} 8: e24cb004 sub fp, ip, #4 c: e24dd008 sub sp, sp, #8 10: e24b3014 sub r3, fp, #20 14: e30d000d movw r0, #53261 ; 0xd00d 18: e3011337 movw r1, #4919 ; 0x1337 1c: e34c0001 movt r0, #49153 ; 0xc001 20: e34a1aa3 movt r1, #43683 ; 0xaaa3 24: e16300f8 strd r0, [r3, #-8]! 28: e30c0afe movw r0, #51966 ; 0xcafe 2c: e30b1eef movw r1, #48879 ; 0xbeef 30: e34d0eaf movt r0, #57007 ; 0xdeaf 34: e34d1ead movt r1, #57005 ; 0xdead 38: e1b34f9f ldrexd r4, [r3] 3c: e1a34f90 strexd r4, r0, [r3] 40: e3340000 teq r4, #0 44: 1afffffb bne 38 <test_atomic64+0x38> 48: e59f0004 ldr r0, [pc, #4] ; 54 <test_atomic64+0x54> 4c: e3a0101e mov r1, #30 50: ebfffffe bl 0 <__bug> 54: 00000000 .word 0x00000000 The atomic64_set (0x38-0x44) writes to the atomic64_t, but the compiler doesn't see this, assumes the test condition is always false and generates an unconditional branch to __bug. The rest of the test is optimised away. This patch adds suitable memory constraints to the atomic operations on ARM to ensure that the compiler is informed of the correct data hazards. We have to use the "Qo" constraints to avoid hitting the GCC anomaly described at http://gcc.gnu.org/bugzilla/show_bug.cgi?id=44492 , where the compiler makes assumptions about the writeback in the addressing mode used by the inline assembly. These constraints forbid the use of auto{inc,dec} addressing modes, so it doesn't matter if we don't use the operand exactly once. Cc: stable@kernel.org Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
469 lines
9.5 KiB
C
469 lines
9.5 KiB
C
/*
|
|
* arch/arm/include/asm/atomic.h
|
|
*
|
|
* Copyright (C) 1996 Russell King.
|
|
* Copyright (C) 2002 Deep Blue Solutions Ltd.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*/
|
|
#ifndef __ASM_ARM_ATOMIC_H
|
|
#define __ASM_ARM_ATOMIC_H
|
|
|
|
#include <linux/compiler.h>
|
|
#include <linux/types.h>
|
|
#include <asm/system.h>
|
|
|
|
#define ATOMIC_INIT(i) { (i) }
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
/*
|
|
* On ARM, ordinary assignment (str instruction) doesn't clear the local
|
|
* strex/ldrex monitor on some implementations. The reason we can use it for
|
|
* atomic_set() is the clrex or dummy strex done on every exception return.
|
|
*/
|
|
#define atomic_read(v) (*(volatile int *)&(v)->counter)
|
|
#define atomic_set(v,i) (((v)->counter) = (i))
|
|
|
|
#if __LINUX_ARM_ARCH__ >= 6
|
|
|
|
/*
|
|
* ARMv6 UP and SMP safe atomic ops. We use load exclusive and
|
|
* store exclusive to ensure that these are atomic. We may loop
|
|
* to ensure that the update happens.
|
|
*/
|
|
static inline void atomic_add(int i, atomic_t *v)
|
|
{
|
|
unsigned long tmp;
|
|
int result;
|
|
|
|
__asm__ __volatile__("@ atomic_add\n"
|
|
"1: ldrex %0, [%3]\n"
|
|
" add %0, %0, %4\n"
|
|
" strex %1, %0, [%3]\n"
|
|
" teq %1, #0\n"
|
|
" bne 1b"
|
|
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
|
|
: "r" (&v->counter), "Ir" (i)
|
|
: "cc");
|
|
}
|
|
|
|
static inline int atomic_add_return(int i, atomic_t *v)
|
|
{
|
|
unsigned long tmp;
|
|
int result;
|
|
|
|
smp_mb();
|
|
|
|
__asm__ __volatile__("@ atomic_add_return\n"
|
|
"1: ldrex %0, [%3]\n"
|
|
" add %0, %0, %4\n"
|
|
" strex %1, %0, [%3]\n"
|
|
" teq %1, #0\n"
|
|
" bne 1b"
|
|
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
|
|
: "r" (&v->counter), "Ir" (i)
|
|
: "cc");
|
|
|
|
smp_mb();
|
|
|
|
return result;
|
|
}
|
|
|
|
static inline void atomic_sub(int i, atomic_t *v)
|
|
{
|
|
unsigned long tmp;
|
|
int result;
|
|
|
|
__asm__ __volatile__("@ atomic_sub\n"
|
|
"1: ldrex %0, [%3]\n"
|
|
" sub %0, %0, %4\n"
|
|
" strex %1, %0, [%3]\n"
|
|
" teq %1, #0\n"
|
|
" bne 1b"
|
|
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
|
|
: "r" (&v->counter), "Ir" (i)
|
|
: "cc");
|
|
}
|
|
|
|
static inline int atomic_sub_return(int i, atomic_t *v)
|
|
{
|
|
unsigned long tmp;
|
|
int result;
|
|
|
|
smp_mb();
|
|
|
|
__asm__ __volatile__("@ atomic_sub_return\n"
|
|
"1: ldrex %0, [%3]\n"
|
|
" sub %0, %0, %4\n"
|
|
" strex %1, %0, [%3]\n"
|
|
" teq %1, #0\n"
|
|
" bne 1b"
|
|
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
|
|
: "r" (&v->counter), "Ir" (i)
|
|
: "cc");
|
|
|
|
smp_mb();
|
|
|
|
return result;
|
|
}
|
|
|
|
static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
|
|
{
|
|
unsigned long oldval, res;
|
|
|
|
smp_mb();
|
|
|
|
do {
|
|
__asm__ __volatile__("@ atomic_cmpxchg\n"
|
|
"ldrex %1, [%3]\n"
|
|
"mov %0, #0\n"
|
|
"teq %1, %4\n"
|
|
"strexeq %0, %5, [%3]\n"
|
|
: "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
|
|
: "r" (&ptr->counter), "Ir" (old), "r" (new)
|
|
: "cc");
|
|
} while (res);
|
|
|
|
smp_mb();
|
|
|
|
return oldval;
|
|
}
|
|
|
|
static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
|
|
{
|
|
unsigned long tmp, tmp2;
|
|
|
|
__asm__ __volatile__("@ atomic_clear_mask\n"
|
|
"1: ldrex %0, [%3]\n"
|
|
" bic %0, %0, %4\n"
|
|
" strex %1, %0, [%3]\n"
|
|
" teq %1, #0\n"
|
|
" bne 1b"
|
|
: "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr)
|
|
: "r" (addr), "Ir" (mask)
|
|
: "cc");
|
|
}
|
|
|
|
#else /* ARM_ARCH_6 */
|
|
|
|
#ifdef CONFIG_SMP
|
|
#error SMP not supported on pre-ARMv6 CPUs
|
|
#endif
|
|
|
|
static inline int atomic_add_return(int i, atomic_t *v)
|
|
{
|
|
unsigned long flags;
|
|
int val;
|
|
|
|
raw_local_irq_save(flags);
|
|
val = v->counter;
|
|
v->counter = val += i;
|
|
raw_local_irq_restore(flags);
|
|
|
|
return val;
|
|
}
|
|
#define atomic_add(i, v) (void) atomic_add_return(i, v)
|
|
|
|
static inline int atomic_sub_return(int i, atomic_t *v)
|
|
{
|
|
unsigned long flags;
|
|
int val;
|
|
|
|
raw_local_irq_save(flags);
|
|
val = v->counter;
|
|
v->counter = val -= i;
|
|
raw_local_irq_restore(flags);
|
|
|
|
return val;
|
|
}
|
|
#define atomic_sub(i, v) (void) atomic_sub_return(i, v)
|
|
|
|
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
|
|
{
|
|
int ret;
|
|
unsigned long flags;
|
|
|
|
raw_local_irq_save(flags);
|
|
ret = v->counter;
|
|
if (likely(ret == old))
|
|
v->counter = new;
|
|
raw_local_irq_restore(flags);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
|
|
{
|
|
unsigned long flags;
|
|
|
|
raw_local_irq_save(flags);
|
|
*addr &= ~mask;
|
|
raw_local_irq_restore(flags);
|
|
}
|
|
|
|
#endif /* __LINUX_ARM_ARCH__ */
|
|
|
|
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
|
|
|
|
static inline int atomic_add_unless(atomic_t *v, int a, int u)
|
|
{
|
|
int c, old;
|
|
|
|
c = atomic_read(v);
|
|
while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
|
|
c = old;
|
|
return c != u;
|
|
}
|
|
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
|
|
|
|
#define atomic_inc(v) atomic_add(1, v)
|
|
#define atomic_dec(v) atomic_sub(1, v)
|
|
|
|
#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
|
|
#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
|
|
#define atomic_inc_return(v) (atomic_add_return(1, v))
|
|
#define atomic_dec_return(v) (atomic_sub_return(1, v))
|
|
#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
|
|
|
|
#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
|
|
|
|
#define smp_mb__before_atomic_dec() smp_mb()
|
|
#define smp_mb__after_atomic_dec() smp_mb()
|
|
#define smp_mb__before_atomic_inc() smp_mb()
|
|
#define smp_mb__after_atomic_inc() smp_mb()
|
|
|
|
#ifndef CONFIG_GENERIC_ATOMIC64
|
|
typedef struct {
|
|
u64 __aligned(8) counter;
|
|
} atomic64_t;
|
|
|
|
#define ATOMIC64_INIT(i) { (i) }
|
|
|
|
static inline u64 atomic64_read(atomic64_t *v)
|
|
{
|
|
u64 result;
|
|
|
|
__asm__ __volatile__("@ atomic64_read\n"
|
|
" ldrexd %0, %H0, [%1]"
|
|
: "=&r" (result)
|
|
: "r" (&v->counter), "Qo" (v->counter)
|
|
);
|
|
|
|
return result;
|
|
}
|
|
|
|
static inline void atomic64_set(atomic64_t *v, u64 i)
|
|
{
|
|
u64 tmp;
|
|
|
|
__asm__ __volatile__("@ atomic64_set\n"
|
|
"1: ldrexd %0, %H0, [%2]\n"
|
|
" strexd %0, %3, %H3, [%2]\n"
|
|
" teq %0, #0\n"
|
|
" bne 1b"
|
|
: "=&r" (tmp), "=Qo" (v->counter)
|
|
: "r" (&v->counter), "r" (i)
|
|
: "cc");
|
|
}
|
|
|
|
static inline void atomic64_add(u64 i, atomic64_t *v)
|
|
{
|
|
u64 result;
|
|
unsigned long tmp;
|
|
|
|
__asm__ __volatile__("@ atomic64_add\n"
|
|
"1: ldrexd %0, %H0, [%3]\n"
|
|
" adds %0, %0, %4\n"
|
|
" adc %H0, %H0, %H4\n"
|
|
" strexd %1, %0, %H0, [%3]\n"
|
|
" teq %1, #0\n"
|
|
" bne 1b"
|
|
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
|
|
: "r" (&v->counter), "r" (i)
|
|
: "cc");
|
|
}
|
|
|
|
static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
|
|
{
|
|
u64 result;
|
|
unsigned long tmp;
|
|
|
|
smp_mb();
|
|
|
|
__asm__ __volatile__("@ atomic64_add_return\n"
|
|
"1: ldrexd %0, %H0, [%3]\n"
|
|
" adds %0, %0, %4\n"
|
|
" adc %H0, %H0, %H4\n"
|
|
" strexd %1, %0, %H0, [%3]\n"
|
|
" teq %1, #0\n"
|
|
" bne 1b"
|
|
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
|
|
: "r" (&v->counter), "r" (i)
|
|
: "cc");
|
|
|
|
smp_mb();
|
|
|
|
return result;
|
|
}
|
|
|
|
static inline void atomic64_sub(u64 i, atomic64_t *v)
|
|
{
|
|
u64 result;
|
|
unsigned long tmp;
|
|
|
|
__asm__ __volatile__("@ atomic64_sub\n"
|
|
"1: ldrexd %0, %H0, [%3]\n"
|
|
" subs %0, %0, %4\n"
|
|
" sbc %H0, %H0, %H4\n"
|
|
" strexd %1, %0, %H0, [%3]\n"
|
|
" teq %1, #0\n"
|
|
" bne 1b"
|
|
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
|
|
: "r" (&v->counter), "r" (i)
|
|
: "cc");
|
|
}
|
|
|
|
static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
|
|
{
|
|
u64 result;
|
|
unsigned long tmp;
|
|
|
|
smp_mb();
|
|
|
|
__asm__ __volatile__("@ atomic64_sub_return\n"
|
|
"1: ldrexd %0, %H0, [%3]\n"
|
|
" subs %0, %0, %4\n"
|
|
" sbc %H0, %H0, %H4\n"
|
|
" strexd %1, %0, %H0, [%3]\n"
|
|
" teq %1, #0\n"
|
|
" bne 1b"
|
|
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
|
|
: "r" (&v->counter), "r" (i)
|
|
: "cc");
|
|
|
|
smp_mb();
|
|
|
|
return result;
|
|
}
|
|
|
|
static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
|
|
{
|
|
u64 oldval;
|
|
unsigned long res;
|
|
|
|
smp_mb();
|
|
|
|
do {
|
|
__asm__ __volatile__("@ atomic64_cmpxchg\n"
|
|
"ldrexd %1, %H1, [%3]\n"
|
|
"mov %0, #0\n"
|
|
"teq %1, %4\n"
|
|
"teqeq %H1, %H4\n"
|
|
"strexdeq %0, %5, %H5, [%3]"
|
|
: "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
|
|
: "r" (&ptr->counter), "r" (old), "r" (new)
|
|
: "cc");
|
|
} while (res);
|
|
|
|
smp_mb();
|
|
|
|
return oldval;
|
|
}
|
|
|
|
static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
|
|
{
|
|
u64 result;
|
|
unsigned long tmp;
|
|
|
|
smp_mb();
|
|
|
|
__asm__ __volatile__("@ atomic64_xchg\n"
|
|
"1: ldrexd %0, %H0, [%3]\n"
|
|
" strexd %1, %4, %H4, [%3]\n"
|
|
" teq %1, #0\n"
|
|
" bne 1b"
|
|
: "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
|
|
: "r" (&ptr->counter), "r" (new)
|
|
: "cc");
|
|
|
|
smp_mb();
|
|
|
|
return result;
|
|
}
|
|
|
|
static inline u64 atomic64_dec_if_positive(atomic64_t *v)
|
|
{
|
|
u64 result;
|
|
unsigned long tmp;
|
|
|
|
smp_mb();
|
|
|
|
__asm__ __volatile__("@ atomic64_dec_if_positive\n"
|
|
"1: ldrexd %0, %H0, [%3]\n"
|
|
" subs %0, %0, #1\n"
|
|
" sbc %H0, %H0, #0\n"
|
|
" teq %H0, #0\n"
|
|
" bmi 2f\n"
|
|
" strexd %1, %0, %H0, [%3]\n"
|
|
" teq %1, #0\n"
|
|
" bne 1b\n"
|
|
"2:"
|
|
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
|
|
: "r" (&v->counter)
|
|
: "cc");
|
|
|
|
smp_mb();
|
|
|
|
return result;
|
|
}
|
|
|
|
static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
|
|
{
|
|
u64 val;
|
|
unsigned long tmp;
|
|
int ret = 1;
|
|
|
|
smp_mb();
|
|
|
|
__asm__ __volatile__("@ atomic64_add_unless\n"
|
|
"1: ldrexd %0, %H0, [%4]\n"
|
|
" teq %0, %5\n"
|
|
" teqeq %H0, %H5\n"
|
|
" moveq %1, #0\n"
|
|
" beq 2f\n"
|
|
" adds %0, %0, %6\n"
|
|
" adc %H0, %H0, %H6\n"
|
|
" strexd %2, %0, %H0, [%4]\n"
|
|
" teq %2, #0\n"
|
|
" bne 1b\n"
|
|
"2:"
|
|
: "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
|
|
: "r" (&v->counter), "r" (u), "r" (a)
|
|
: "cc");
|
|
|
|
if (ret)
|
|
smp_mb();
|
|
|
|
return ret;
|
|
}
|
|
|
|
#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
|
|
#define atomic64_inc(v) atomic64_add(1LL, (v))
|
|
#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
|
|
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
|
|
#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
|
|
#define atomic64_dec(v) atomic64_sub(1LL, (v))
|
|
#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
|
|
#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
|
|
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
|
|
|
|
#else /* !CONFIG_GENERIC_ATOMIC64 */
|
|
#include <asm-generic/atomic64.h>
|
|
#endif
|
|
#include <asm-generic/atomic-long.h>
|
|
#endif
|
|
#endif
|