x86/bugs: Rework spec_ctrl base and mask logic

x86_spec_ctrL_mask is intended to mask out bits from a MSR_SPEC_CTRL value
which are not to be modified. However the implementation is not really used
and the bitmask was inverted to make a check easier, which was removed in
"x86/bugs: Remove x86_spec_ctrl_set()"

Aside of that it is missing the STIBP bit if it is supported by the
platform, so if the mask would be used in x86_virt_spec_ctrl() then it
would prevent a guest from setting STIBP.

Add the STIBP bit if supported and use the mask in x86_virt_spec_ctrl() to
sanitize the value which is supplied by the guest.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Borislav Petkov <bp@suse.de>
This commit is contained in:
Thomas Gleixner 2018-05-12 20:10:00 +02:00
parent 4b59bdb569
commit be6fcb5478

View File

@ -42,7 +42,7 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
* The vendor and possibly platform specific bits which can be modified in * The vendor and possibly platform specific bits which can be modified in
* x86_spec_ctrl_base. * x86_spec_ctrl_base.
*/ */
static u64 __ro_after_init x86_spec_ctrl_mask = ~SPEC_CTRL_IBRS; static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
/* /*
* AMD specific MSR info for Speculative Store Bypass control. * AMD specific MSR info for Speculative Store Bypass control.
@ -68,6 +68,10 @@ void __init check_bugs(void)
if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
/* Allow STIBP in MSR_SPEC_CTRL if supported */
if (boot_cpu_has(X86_FEATURE_STIBP))
x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
/* Select the proper spectre mitigation before patching alternatives */ /* Select the proper spectre mitigation before patching alternatives */
spectre_v2_select_mitigation(); spectre_v2_select_mitigation();
@ -136,18 +140,26 @@ static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
void void
x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest) x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
{ {
u64 msrval, guestval, hostval = x86_spec_ctrl_base;
struct thread_info *ti = current_thread_info(); struct thread_info *ti = current_thread_info();
u64 msr, host = x86_spec_ctrl_base;
/* Is MSR_SPEC_CTRL implemented ? */ /* Is MSR_SPEC_CTRL implemented ? */
if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) { if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
/*
* Restrict guest_spec_ctrl to supported values. Clear the
* modifiable bits in the host base value and or the
* modifiable bits from the guest value.
*/
guestval = hostval & ~x86_spec_ctrl_mask;
guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
/* SSBD controlled in MSR_SPEC_CTRL */ /* SSBD controlled in MSR_SPEC_CTRL */
if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD)) if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
host |= ssbd_tif_to_spec_ctrl(ti->flags); hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
if (host != guest_spec_ctrl) { if (hostval != guestval) {
msr = setguest ? guest_spec_ctrl : host; msrval = setguest ? guestval : hostval;
wrmsrl(MSR_IA32_SPEC_CTRL, msr); wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
} }
} }
} }
@ -493,7 +505,7 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
switch (boot_cpu_data.x86_vendor) { switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_INTEL: case X86_VENDOR_INTEL:
x86_spec_ctrl_base |= SPEC_CTRL_SSBD; x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
x86_spec_ctrl_mask &= ~SPEC_CTRL_SSBD; x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
break; break;
case X86_VENDOR_AMD: case X86_VENDOR_AMD: