linux_dsm_epyc7002/arch/powerpc/include/asm/synch.h
Benjamin Herrenschmidt b97021f855 powerpc: Fix atomic_xxx_return barrier semantics
The Documentation/memory-barriers.txt document requires that atomic
operations that return a value act as a memory barrier both before
and after the actual atomic operation.

Our current implementation doesn't guarantee this. More specifically,
while a load following the isync can not be issued before stwcx. has
completed, that completion doesn't architecturally means that the
result of stwcx. is visible to other processors (or any previous stores
for that matter) (typically, the other processors L1 caches can still
hold the old value).

This has caused an actual crash in RCU torture testing on Power 7

This fixes it by changing those atomic ops to use new macros instead
of RELEASE/ACQUIRE barriers, called ATOMIC_ENTRY and ATMOIC_EXIT barriers,
which are then defined respectively to lwsync and sync.

I haven't had a chance to measure the performance impact (or rather
what I measured with kernel compiles is in the noise, I yet have to
find a more precise benchmark)

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2011-11-17 16:26:07 +11:00

58 lines
1.4 KiB
C

#ifndef _ASM_POWERPC_SYNCH_H
#define _ASM_POWERPC_SYNCH_H
#ifdef __KERNEL__
#include <linux/stringify.h>
#include <asm/feature-fixups.h>
#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC)
#define __SUBARCH_HAS_LWSYNC
#endif
#ifndef __ASSEMBLY__
extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup;
extern void do_lwsync_fixups(unsigned long value, void *fixup_start,
void *fixup_end);
extern void do_final_fixups(void);
static inline void eieio(void)
{
__asm__ __volatile__ ("eieio" : : : "memory");
}
static inline void isync(void)
{
__asm__ __volatile__ ("isync" : : : "memory");
}
#endif /* __ASSEMBLY__ */
#if defined(__powerpc64__)
# define LWSYNC lwsync
#elif defined(CONFIG_E500)
# define LWSYNC \
START_LWSYNC_SECTION(96); \
sync; \
MAKE_LWSYNC_SECTION_ENTRY(96, __lwsync_fixup);
#else
# define LWSYNC sync
#endif
#ifdef CONFIG_SMP
#define __PPC_ACQUIRE_BARRIER \
START_LWSYNC_SECTION(97); \
isync; \
MAKE_LWSYNC_SECTION_ENTRY(97, __lwsync_fixup);
#define PPC_ACQUIRE_BARRIER "\n" stringify_in_c(__PPC_ACQUIRE_BARRIER)
#define PPC_RELEASE_BARRIER stringify_in_c(LWSYNC) "\n"
#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(LWSYNC) "\n"
#define PPC_ATOMIC_EXIT_BARRIER "\n" stringify_in_c(sync) "\n"
#else
#define PPC_ACQUIRE_BARRIER
#define PPC_RELEASE_BARRIER
#define PPC_ATOMIC_ENTRY_BARRIER
#define PPC_ATOMIC_EXIT_BARRIER
#endif
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_SYNCH_H */