mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
be261ffce6
AMD and Intel both have serializing lfence (X86_FEATURE_LFENCE_RDTSC). They've both had it for a long time, and AMD has had it enabled in Linux since Spectre v1 was announced. Back then, there was a proposal to remove the serializing mfence feature bit (X86_FEATURE_MFENCE_RDTSC), since both AMD and Intel have serializing lfence. At the time, it was (ahem) speculated that some hypervisors might not yet support its removal, so it remained for the time being. Now a year-and-a-half later, it should be safe to remove. I asked Andrew Cooper about whether it's still needed: So if you're virtualised, you've got no choice in the matter. lfence is either dispatch-serialising or not on AMD, and you won't be able to change it. Furthermore, you can't accurately tell what state the bit is in, because the MSR might not be virtualised at all, or may not reflect the true state in hardware. Worse still, attempting to set the bit may not be successful even if there isn't a fault for doing so. Xen sets the DE_CFG bit unconditionally, as does Linux by the looks of things (see MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT). ISTR other hypervisor vendors saying the same, but I don't have any information to hand. If you are running under a hypervisor which has been updated, then lfence will almost certainly be dispatch-serialising in practice, and you'll almost certainly see the bit already set in DE_CFG. If you're running under a hypervisor which hasn't been patched since Spectre, you've already lost in many more ways. I'd argue that X86_FEATURE_MFENCE_RDTSC is not worth keeping. So remove it. This will reduce some code rot, and also make it easier to hook barrier_nospec() up to a cmdline disable for performance raisins, without having to need an alternative_3() macro. Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Link: https://lkml.kernel.org/r/d990aa51e40063acb9888e8c1b688e41355a9588.1562255067.git.jpoimboe@redhat.com
88 lines
2.4 KiB
C
88 lines
2.4 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _ASM_X86_BARRIER_H
|
|
#define _ASM_X86_BARRIER_H
|
|
|
|
#include <asm/alternative.h>
|
|
#include <asm/nops.h>
|
|
|
|
/*
|
|
* Force strict CPU ordering.
|
|
* And yes, this might be required on UP too when we're talking
|
|
* to devices.
|
|
*/
|
|
|
|
#ifdef CONFIG_X86_32
|
|
#define mb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "mfence", \
|
|
X86_FEATURE_XMM2) ::: "memory", "cc")
|
|
#define rmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "lfence", \
|
|
X86_FEATURE_XMM2) ::: "memory", "cc")
|
|
#define wmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "sfence", \
|
|
X86_FEATURE_XMM2) ::: "memory", "cc")
|
|
#else
|
|
#define mb() asm volatile("mfence":::"memory")
|
|
#define rmb() asm volatile("lfence":::"memory")
|
|
#define wmb() asm volatile("sfence" ::: "memory")
|
|
#endif
|
|
|
|
/**
|
|
* array_index_mask_nospec() - generate a mask that is ~0UL when the
|
|
* bounds check succeeds and 0 otherwise
|
|
* @index: array element index
|
|
* @size: number of elements in array
|
|
*
|
|
* Returns:
|
|
* 0 - (index < size)
|
|
*/
|
|
static inline unsigned long array_index_mask_nospec(unsigned long index,
|
|
unsigned long size)
|
|
{
|
|
unsigned long mask;
|
|
|
|
asm volatile ("cmp %1,%2; sbb %0,%0;"
|
|
:"=r" (mask)
|
|
:"g"(size),"r" (index)
|
|
:"cc");
|
|
return mask;
|
|
}
|
|
|
|
/* Override the default implementation from linux/nospec.h. */
|
|
#define array_index_mask_nospec array_index_mask_nospec
|
|
|
|
/* Prevent speculative execution past this barrier. */
|
|
#define barrier_nospec() alternative("", "lfence", X86_FEATURE_LFENCE_RDTSC)
|
|
|
|
#define dma_rmb() barrier()
|
|
#define dma_wmb() barrier()
|
|
|
|
#ifdef CONFIG_X86_32
|
|
#define __smp_mb() asm volatile("lock; addl $0,-4(%%esp)" ::: "memory", "cc")
|
|
#else
|
|
#define __smp_mb() asm volatile("lock; addl $0,-4(%%rsp)" ::: "memory", "cc")
|
|
#endif
|
|
#define __smp_rmb() dma_rmb()
|
|
#define __smp_wmb() barrier()
|
|
#define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
|
|
|
|
#define __smp_store_release(p, v) \
|
|
do { \
|
|
compiletime_assert_atomic_type(*p); \
|
|
barrier(); \
|
|
WRITE_ONCE(*p, v); \
|
|
} while (0)
|
|
|
|
#define __smp_load_acquire(p) \
|
|
({ \
|
|
typeof(*p) ___p1 = READ_ONCE(*p); \
|
|
compiletime_assert_atomic_type(*p); \
|
|
barrier(); \
|
|
___p1; \
|
|
})
|
|
|
|
/* Atomic operations are already serializing on x86 */
|
|
#define __smp_mb__before_atomic() do { } while (0)
|
|
#define __smp_mb__after_atomic() do { } while (0)
|
|
|
|
#include <asm-generic/barrier.h>
|
|
|
|
#endif /* _ASM_X86_BARRIER_H */
|