linux_dsm_epyc7002/arch/alpha/include/asm/cmpxchg.h
Andrea Parri fbfcd01991 locking/xchg/alpha: Remove superfluous memory barriers from the _local() variants
The following two commits:

  79d442461d ("locking/xchg/alpha: Clean up barrier usage by using smp_mb() in place of __ASM__MB")
  472e8c55cf ("locking/xchg/alpha: Fix xchg() and cmpxchg() memory ordering bugs")

... ended up adding unnecessary barriers to the _local() variants on Alpha,
which the previous code took care to avoid.

Fix them by adding the smp_mb() into the cmpxchg() macro rather than into the
____cmpxchg() variants.

Reported-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Andrea Parri <parri.andrea@gmail.com>
Cc: Alan Stern <stern@rowland.harvard.edu>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-alpha@vger.kernel.org
Fixes: 472e8c55cf ("locking/xchg/alpha: Fix xchg() and cmpxchg() memory ordering bugs")
Fixes: 79d442461d ("locking/xchg/alpha: Clean up barrier usage by using smp_mb() in place of __ASM__MB")
Link: http://lkml.kernel.org/r/1519704058-13430-1-git-send-email-parri.andrea@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2018-03-12 10:59:03 +01:00

77 lines
1.9 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ALPHA_CMPXCHG_H
#define _ALPHA_CMPXCHG_H
/*
* Atomic exchange routines.
*/
#define ____xchg(type, args...) __xchg ## type ## _local(args)
#define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args)
#include <asm/xchg.h>
#define xchg_local(ptr, x) \
({ \
__typeof__(*(ptr)) _x_ = (x); \
(__typeof__(*(ptr))) __xchg_local((ptr), (unsigned long)_x_, \
sizeof(*(ptr))); \
})
#define cmpxchg_local(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
(__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
(unsigned long)_n_, \
sizeof(*(ptr))); \
})
#define cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_local((ptr), (o), (n)); \
})
#undef ____xchg
#undef ____cmpxchg
#define ____xchg(type, args...) __xchg ##type(args)
#define ____cmpxchg(type, args...) __cmpxchg ##type(args)
#include <asm/xchg.h>
/*
* The leading and the trailing memory barriers guarantee that these
* operations are fully ordered.
*/
#define xchg(ptr, x) \
({ \
__typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) _x_ = (x); \
smp_mb(); \
__ret = (__typeof__(*(ptr))) \
__xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
smp_mb(); \
__ret; \
})
#define cmpxchg(ptr, o, n) \
({ \
__typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
smp_mb(); \
__ret = (__typeof__(*(ptr))) __cmpxchg((ptr), \
(unsigned long)_o_, (unsigned long)_n_, sizeof(*(ptr)));\
smp_mb(); \
__ret; \
})
#define cmpxchg64(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg((ptr), (o), (n)); \
})
#undef ____cmpxchg
#endif /* _ALPHA_CMPXCHG_H */