linux_dsm_epyc7002/arch/powerpc/include/asm/security_features.h
Nicholas Piggin a048a07d7f powerpc/64s: Add support for a store forwarding barrier at kernel entry/exit
On some CPUs we can prevent a vulnerability related to store-to-load
forwarding by preventing store forwarding between privilege domains,
by inserting a barrier in kernel entry and exit paths.

This is known to be the case on at least Power7, Power8 and Power9
powerpc CPUs.

Barriers must be inserted generally before the first load after moving
to a higher privilege, and after the last store before moving to a
lower privilege, HV and PR privilege transitions must be protected.

Barriers are added as patch sections, with all kernel/hypervisor entry
points patched, and the exit points to lower privilge levels patched
similarly to the RFI flush patching.

Firmware advertisement is not implemented yet, so CPU flush types
are hard coded.

Thanks to Michal Suchánek for bug fixes and review.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Mauricio Faria de Oliveira <mauricfo@linux.vnet.ibm.com>
Signed-off-by: Michael Neuling <mikey@neuling.org>
Signed-off-by: Michal Suchánek <msuchanek@suse.de>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-05-21 20:45:31 -07:00

86 lines
2.4 KiB
C

/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Security related feature bit definitions.
*
* Copyright 2018, Michael Ellerman, IBM Corporation.
*/
#ifndef _ASM_POWERPC_SECURITY_FEATURES_H
#define _ASM_POWERPC_SECURITY_FEATURES_H
extern unsigned long powerpc_security_features;
extern bool rfi_flush;
/* These are bit flags */
enum stf_barrier_type {
STF_BARRIER_NONE = 0x1,
STF_BARRIER_FALLBACK = 0x2,
STF_BARRIER_EIEIO = 0x4,
STF_BARRIER_SYNC_ORI = 0x8,
};
void setup_stf_barrier(void);
void do_stf_barrier_fixups(enum stf_barrier_type types);
static inline void security_ftr_set(unsigned long feature)
{
powerpc_security_features |= feature;
}
static inline void security_ftr_clear(unsigned long feature)
{
powerpc_security_features &= ~feature;
}
static inline bool security_ftr_enabled(unsigned long feature)
{
return !!(powerpc_security_features & feature);
}
// Features indicating support for Spectre/Meltdown mitigations
// The L1-D cache can be flushed with ori r30,r30,0
#define SEC_FTR_L1D_FLUSH_ORI30 0x0000000000000001ull
// The L1-D cache can be flushed with mtspr 882,r0 (aka SPRN_TRIG2)
#define SEC_FTR_L1D_FLUSH_TRIG2 0x0000000000000002ull
// ori r31,r31,0 acts as a speculation barrier
#define SEC_FTR_SPEC_BAR_ORI31 0x0000000000000004ull
// Speculation past bctr is disabled
#define SEC_FTR_BCCTRL_SERIALISED 0x0000000000000008ull
// Entries in L1-D are private to a SMT thread
#define SEC_FTR_L1D_THREAD_PRIV 0x0000000000000010ull
// Indirect branch prediction cache disabled
#define SEC_FTR_COUNT_CACHE_DISABLED 0x0000000000000020ull
// Features indicating need for Spectre/Meltdown mitigations
// The L1-D cache should be flushed on MSR[HV] 1->0 transition (hypervisor to guest)
#define SEC_FTR_L1D_FLUSH_HV 0x0000000000000040ull
// The L1-D cache should be flushed on MSR[PR] 0->1 transition (kernel to userspace)
#define SEC_FTR_L1D_FLUSH_PR 0x0000000000000080ull
// A speculation barrier should be used for bounds checks (Spectre variant 1)
#define SEC_FTR_BNDS_CHK_SPEC_BAR 0x0000000000000100ull
// Firmware configuration indicates user favours security over performance
#define SEC_FTR_FAVOUR_SECURITY 0x0000000000000200ull
// Features enabled by default
#define SEC_FTR_DEFAULT \
(SEC_FTR_L1D_FLUSH_HV | \
SEC_FTR_L1D_FLUSH_PR | \
SEC_FTR_BNDS_CHK_SPEC_BAR | \
SEC_FTR_FAVOUR_SECURITY)
#endif /* _ASM_POWERPC_SECURITY_FEATURES_H */