From bf9c912f9a649776c2d741310486a6984edaac72 Mon Sep 17 00:00:00 2001 From: Ricardo Neri Date: Thu, 6 Aug 2020 20:28:33 -0700 Subject: [PATCH 01/11] x86/cpu: Use SERIALIZE in sync_core() when available The SERIALIZE instruction gives software a way to force the processor to complete all modifications to flags, registers and memory from previous instructions and drain all buffered writes to memory before the next instruction is fetched and executed. Thus, it serves the purpose of sync_core(). Use it when available. Suggested-by: Andy Lutomirski Signed-off-by: Ricardo Neri Signed-off-by: Borislav Petkov Reviewed-by: Tony Luck Link: https://lkml.kernel.org/r/20200807032833.17484-1-ricardo.neri-calderon@linux.intel.com --- arch/x86/include/asm/special_insns.h | 6 ++++++ arch/x86/include/asm/sync_core.h | 26 ++++++++++++++++++-------- 2 files changed, 24 insertions(+), 8 deletions(-) diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h index 59a3e13204c3..5999b0b3dd4a 100644 --- a/arch/x86/include/asm/special_insns.h +++ b/arch/x86/include/asm/special_insns.h @@ -234,6 +234,12 @@ static inline void clwb(volatile void *__p) #define nop() asm volatile ("nop") +static inline void serialize(void) +{ + /* Instruction opcode for SERIALIZE; supported in binutils >= 2.35. */ + asm volatile(".byte 0xf, 0x1, 0xe8" ::: "memory"); +} + #endif /* __KERNEL__ */ #endif /* _ASM_X86_SPECIAL_INSNS_H */ diff --git a/arch/x86/include/asm/sync_core.h b/arch/x86/include/asm/sync_core.h index fdb5b356e59b..4631c0f969d4 100644 --- a/arch/x86/include/asm/sync_core.h +++ b/arch/x86/include/asm/sync_core.h @@ -5,6 +5,7 @@ #include #include #include +#include #ifdef CONFIG_X86_32 static inline void iret_to_self(void) @@ -54,14 +55,23 @@ static inline void iret_to_self(void) static inline void sync_core(void) { /* - * There are quite a few ways to do this. IRET-to-self is nice - * because it works on every CPU, at any CPL (so it's compatible - * with paravirtualization), and it never exits to a hypervisor. - * The only down sides are that it's a bit slow (it seems to be - * a bit more than 2x slower than the fastest options) and that - * it unmasks NMIs. The "push %cs" is needed because, in - * paravirtual environments, __KERNEL_CS may not be a valid CS - * value when we do IRET directly. + * The SERIALIZE instruction is the most straightforward way to + * do this but it not universally available. + */ + if (static_cpu_has(X86_FEATURE_SERIALIZE)) { + serialize(); + return; + } + + /* + * For all other processors, there are quite a few ways to do this. + * IRET-to-self is nice because it works on every CPU, at any CPL + * (so it's compatible with paravirtualization), and it never exits + * to a hypervisor. The only down sides are that it's a bit slow + * (it seems to be a bit more than 2x slower than the fastest + * options) and that it unmasks NMIs. The "push %cs" is needed + * because, in paravirtual environments, __KERNEL_CS may not be a + * valid CS value when we do IRET directly. * * In case NMI unmasking or performance ever becomes a problem, * the next best option appears to be MOV-to-CR2 and an From 86109813990b5d6d6cfb8072382ee69d11ea9460 Mon Sep 17 00:00:00 2001 From: Uros Bizjak Date: Tue, 7 Jul 2020 19:47:22 +0200 Subject: [PATCH 02/11] x86/cpu: Use XGETBV and XSETBV mnemonics in fpu/internal.h Current minimum required version of binutils is 2.23, which supports XGETBV and XSETBV instruction mnemonics. Replace the byte-wise specification of XGETBV and XSETBV with these proper mnemonics. Signed-off-by: Uros Bizjak Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200707174722.58651-1-ubizjak@gmail.com --- arch/x86/include/asm/fpu/internal.h | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h index 0a460f2a3f90..21a8b5259477 100644 --- a/arch/x86/include/asm/fpu/internal.h +++ b/arch/x86/include/asm/fpu/internal.h @@ -602,9 +602,7 @@ static inline u64 xgetbv(u32 index) { u32 eax, edx; - asm volatile(".byte 0x0f,0x01,0xd0" /* xgetbv */ - : "=a" (eax), "=d" (edx) - : "c" (index)); + asm volatile("xgetbv" : "=a" (eax), "=d" (edx) : "c" (index)); return eax + ((u64)edx << 32); } @@ -613,8 +611,7 @@ static inline void xsetbv(u32 index, u64 value) u32 eax = value; u32 edx = value >> 32; - asm volatile(".byte 0x0f,0x01,0xd1" /* xsetbv */ - : : "a" (eax), "d" (edx), "c" (index)); + asm volatile("xsetbv" :: "a" (eax), "d" (edx), "c" (index)); } #endif /* _ASM_X86_FPU_INTERNAL_H */ From 40eb0cb4939e462acfedea8c8064571e886b9773 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 18 Aug 2020 07:31:30 +0200 Subject: [PATCH 03/11] x86/cpu: Fix typos and improve the comments in sync_core() - Fix typos. - Move the compiler barrier comment to the top, because it's valid for the whole function, not just the legacy branch. Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20200818053130.GA3161093@gmail.com Reviewed-by: Ricardo Neri --- arch/x86/include/asm/sync_core.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/arch/x86/include/asm/sync_core.h b/arch/x86/include/asm/sync_core.h index 4631c0f969d4..0fd4a9dfb29c 100644 --- a/arch/x86/include/asm/sync_core.h +++ b/arch/x86/include/asm/sync_core.h @@ -47,16 +47,19 @@ static inline void iret_to_self(void) * * b) Text was modified on a different CPU, may subsequently be * executed on this CPU, and you want to make sure the new version - * gets executed. This generally means you're calling this in a IPI. + * gets executed. This generally means you're calling this in an IPI. * * If you're calling this for a different reason, you're probably doing * it wrong. + * + * Like all of Linux's memory ordering operations, this is a + * compiler barrier as well. */ static inline void sync_core(void) { /* * The SERIALIZE instruction is the most straightforward way to - * do this but it not universally available. + * do this, but it is not universally available. */ if (static_cpu_has(X86_FEATURE_SERIALIZE)) { serialize(); @@ -67,10 +70,10 @@ static inline void sync_core(void) * For all other processors, there are quite a few ways to do this. * IRET-to-self is nice because it works on every CPU, at any CPL * (so it's compatible with paravirtualization), and it never exits - * to a hypervisor. The only down sides are that it's a bit slow + * to a hypervisor. The only downsides are that it's a bit slow * (it seems to be a bit more than 2x slower than the fastest - * options) and that it unmasks NMIs. The "push %cs" is needed - * because, in paravirtual environments, __KERNEL_CS may not be a + * options) and that it unmasks NMIs. The "push %cs" is needed, + * because in paravirtual environments __KERNEL_CS may not be a * valid CS value when we do IRET directly. * * In case NMI unmasking or performance ever becomes a problem, @@ -81,9 +84,6 @@ static inline void sync_core(void) * CPUID is the conventional way, but it's nasty: it doesn't * exist on some 486-like CPUs, and it usually exits to a * hypervisor. - * - * Like all of Linux's memory ordering operations, this is a - * compiler barrier as well. */ iret_to_self(); } From b91e7089ae70d2f7c81a4456e5b78fef498663d9 Mon Sep 17 00:00:00 2001 From: Brendan Shanks Date: Fri, 10 Jul 2020 15:45:25 -0700 Subject: [PATCH 04/11] x86/umip: Add emulation/spoofing for SLDT and STR instructions Add emulation/spoofing of SLDT and STR for both 32- and 64-bit processes. Wine users have found a small number of Windows apps using SLDT that were crashing when run on UMIP-enabled systems. Originally-by: Ricardo Neri Reported-by: Andreas Rammhold Signed-off-by: Brendan Shanks Signed-off-by: Borislav Petkov Acked-by: Andy Lutomirski Reviewed-by: Ricardo Neri Tested-by: Ricardo Neri Link: https://lkml.kernel.org/r/20200710224525.21966-1-bshanks@codeweavers.com --- arch/x86/kernel/umip.c | 40 +++++++++++++++++++++++++++------------- 1 file changed, 27 insertions(+), 13 deletions(-) diff --git a/arch/x86/kernel/umip.c b/arch/x86/kernel/umip.c index 8d5cbe1bbb3b..2c304fd0bb1a 100644 --- a/arch/x86/kernel/umip.c +++ b/arch/x86/kernel/umip.c @@ -45,11 +45,12 @@ * value that, lies close to the top of the kernel memory. The limit for the GDT * and the IDT are set to zero. * - * Given that SLDT and STR are not commonly used in programs that run on WineHQ - * or DOSEMU2, they are not emulated. - * - * The instruction smsw is emulated to return the value that the register CR0 + * The instruction SMSW is emulated to return the value that the register CR0 * has at boot time as set in the head_32. + * SLDT and STR are emulated to return the values that the kernel programmatically + * assigns: + * - SLDT returns (GDT_ENTRY_LDT * 8) if an LDT has been set, 0 if not. + * - STR returns (GDT_ENTRY_TSS * 8). * * Emulation is provided for both 32-bit and 64-bit processes. * @@ -244,16 +245,34 @@ static int emulate_umip_insn(struct insn *insn, int umip_inst, *data_size += UMIP_GDT_IDT_LIMIT_SIZE; memcpy(data, &dummy_limit, UMIP_GDT_IDT_LIMIT_SIZE); - } else if (umip_inst == UMIP_INST_SMSW) { - unsigned long dummy_value = CR0_STATE; + } else if (umip_inst == UMIP_INST_SMSW || umip_inst == UMIP_INST_SLDT || + umip_inst == UMIP_INST_STR) { + unsigned long dummy_value; + + if (umip_inst == UMIP_INST_SMSW) { + dummy_value = CR0_STATE; + } else if (umip_inst == UMIP_INST_STR) { + dummy_value = GDT_ENTRY_TSS * 8; + } else if (umip_inst == UMIP_INST_SLDT) { +#ifdef CONFIG_MODIFY_LDT_SYSCALL + down_read(¤t->mm->context.ldt_usr_sem); + if (current->mm->context.ldt) + dummy_value = GDT_ENTRY_LDT * 8; + else + dummy_value = 0; + up_read(¤t->mm->context.ldt_usr_sem); +#else + dummy_value = 0; +#endif + } /* - * Even though the CR0 register has 4 bytes, the number + * For these 3 instructions, the number * of bytes to be copied in the result buffer is determined * by whether the operand is a register or a memory location. * If operand is a register, return as many bytes as the operand * size. If operand is memory, return only the two least - * siginificant bytes of CR0. + * siginificant bytes. */ if (X86_MODRM_MOD(insn->modrm.value) == 3) *data_size = insn->opnd_bytes; @@ -261,7 +280,6 @@ static int emulate_umip_insn(struct insn *insn, int umip_inst, *data_size = 2; memcpy(data, &dummy_value, *data_size); - /* STR and SLDT are not emulated */ } else { return -EINVAL; } @@ -383,10 +401,6 @@ bool fixup_umip_exception(struct pt_regs *regs) umip_pr_warn(regs, "%s instruction cannot be used by applications.\n", umip_insns[umip_inst]); - /* Do not emulate (spoof) SLDT or STR. */ - if (umip_inst == UMIP_INST_STR || umip_inst == UMIP_INST_SLDT) - return false; - umip_pr_warn(regs, "For now, expensive software emulation returns the result.\n"); if (emulate_umip_insn(&insn, umip_inst, dummy_data, &dummy_data_size, From 18ec63faefb3fd311822556cd9b949f66b1eecee Mon Sep 17 00:00:00 2001 From: Kyung Min Park Date: Tue, 25 Aug 2020 08:47:57 +0800 Subject: [PATCH 05/11] x86/cpufeatures: Enumerate TSX suspend load address tracking instructions Intel TSX suspend load tracking instructions aim to give a way to choose which memory accesses do not need to be tracked in the TSX read set. Add TSX suspend load tracking CPUID feature flag TSXLDTRK for enumeration. A processor supports Intel TSX suspend load address tracking if CPUID.0x07.0x0:EDX[16] is present. Two instructions XSUSLDTRK, XRESLDTRK are available when this feature is present. The CPU feature flag is shown as "tsxldtrk" in /proc/cpuinfo. Signed-off-by: Kyung Min Park Signed-off-by: Cathy Zhang Signed-off-by: Borislav Petkov Reviewed-by: Tony Luck Link: https://lkml.kernel.org/r/1598316478-23337-2-git-send-email-cathy.zhang@intel.com --- arch/x86/include/asm/cpufeatures.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 2901d5df4366..83fc9d38eb1f 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -368,6 +368,7 @@ #define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */ #define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */ #define X86_FEATURE_SERIALIZE (18*32+14) /* SERIALIZE instruction */ +#define X86_FEATURE_TSXLDTRK (18*32+16) /* TSX Suspend Load Address Tracking */ #define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */ #define X86_FEATURE_ARCH_LBR (18*32+19) /* Intel ARCH LBR */ #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ From 61aa9a0a5eae2100c171698bffabde8d5e9f694d Mon Sep 17 00:00:00 2001 From: Cathy Zhang Date: Tue, 25 Aug 2020 08:47:58 +0800 Subject: [PATCH 06/11] x86/kvm: Expose TSX Suspend Load Tracking feature TSX suspend load tracking instruction is supported by the Intel uarch Sapphire Rapids. It aims to give a way to choose which memory accesses do not need to be tracked in the TSX read set. It's availability is indicated as CPUID.(EAX=7,ECX=0):EDX[bit 16]. Expose TSX Suspend Load Address Tracking feature in KVM CPUID, so KVM could pass this information to guests and they can make use of this feature accordingly. Signed-off-by: Cathy Zhang Signed-off-by: Borislav Petkov Reviewed-by: Tony Luck Link: https://lkml.kernel.org/r/1598316478-23337-3-git-send-email-cathy.zhang@intel.com --- arch/x86/kvm/cpuid.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 3fd6eec202d7..7456f9ad424b 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -371,7 +371,7 @@ void kvm_set_cpu_caps(void) F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) | F(MD_CLEAR) | F(AVX512_VP2INTERSECT) | F(FSRM) | - F(SERIALIZE) + F(SERIALIZE) | F(TSXLDTRK) ); /* TSC_ADJUST and ARCH_CAPABILITIES are emulated in software. */ From 8687bdc04128b2bd16faaae11db10128ad0da7b8 Mon Sep 17 00:00:00 2001 From: Tony W Wang-oc Date: Tue, 8 Sep 2020 18:57:45 +0800 Subject: [PATCH 07/11] x86/cpu/centaur: Replace two-condition switch-case with an if statement Use a normal if statements instead of a two-condition switch-case. [ bp: Massage commit message. ] Signed-off-by: Tony W Wang-oc Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/1599562666-31351-2-git-send-email-TonyWWang-oc@zhaoxin.com --- arch/x86/kernel/cpu/centaur.c | 25 +++++++++---------------- 1 file changed, 9 insertions(+), 16 deletions(-) diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index c5cf336e5077..5f811586a23c 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c @@ -90,18 +90,14 @@ enum { static void early_init_centaur(struct cpuinfo_x86 *c) { - switch (c->x86) { #ifdef CONFIG_X86_32 - case 5: - /* Emulate MTRRs using Centaur's MCR. */ + /* Emulate MTRRs using Centaur's MCR. */ + if (c->x86 == 5) set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR); - break; #endif - case 6: - if (c->x86_model >= 0xf) - set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); - break; - } + if (c->x86 == 6 && c->x86_model >= 0xf) + set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); + #ifdef CONFIG_X86_64 set_cpu_cap(c, X86_FEATURE_SYSENTER32); #endif @@ -145,9 +141,8 @@ static void init_centaur(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); } - switch (c->x86) { #ifdef CONFIG_X86_32 - case 5: + if (c->x86 == 5) { switch (c->x86_model) { case 4: name = "C6"; @@ -207,12 +202,10 @@ static void init_centaur(struct cpuinfo_x86 *c) c->x86_cache_size = (cc>>24)+(dd>>24); } sprintf(c->x86_model_id, "WinChip %s", name); - break; -#endif - case 6: - init_c3(c); - break; } +#endif + if (c->x86 == 6) + init_c3(c); #ifdef CONFIG_X86_64 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); #endif From 33b4711df4c1b3aec7c267c60fc24abccfadd40c Mon Sep 17 00:00:00 2001 From: Tony W Wang-oc Date: Tue, 8 Sep 2020 18:57:46 +0800 Subject: [PATCH 08/11] x86/cpu/centaur: Add Centaur family >=7 CPUs initialization support Add Centaur family >=7 CPUs specific initialization support. Signed-off-by: Tony W Wang-oc Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/1599562666-31351-3-git-send-email-TonyWWang-oc@zhaoxin.com --- arch/x86/kernel/cpu/centaur.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index 5f811586a23c..345f7d905db6 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c @@ -65,6 +65,9 @@ static void init_c3(struct cpuinfo_x86 *c) c->x86_cache_alignment = c->x86_clflush_size * 2; set_cpu_cap(c, X86_FEATURE_REP_GOOD); } + + if (c->x86 >= 7) + set_cpu_cap(c, X86_FEATURE_REP_GOOD); } enum { @@ -95,7 +98,8 @@ static void early_init_centaur(struct cpuinfo_x86 *c) if (c->x86 == 5) set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR); #endif - if (c->x86 == 6 && c->x86_model >= 0xf) + if ((c->x86 == 6 && c->x86_model >= 0xf) || + (c->x86 >= 7)) set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); #ifdef CONFIG_X86_64 @@ -204,7 +208,7 @@ static void init_centaur(struct cpuinfo_x86 *c) sprintf(c->x86_model_id, "WinChip %s", name); } #endif - if (c->x86 == 6) + if (c->x86 == 6 || c->x86 >= 7) init_c3(c); #ifdef CONFIG_X86_64 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); From 5866e9205b47a983a77ebc8654949f696342f2ab Mon Sep 17 00:00:00 2001 From: Krish Sadhukhan Date: Thu, 17 Sep 2020 21:20:36 +0000 Subject: [PATCH 09/11] x86/cpu: Add hardware-enforced cache coherency as a CPUID feature In some hardware implementations, coherency between the encrypted and unencrypted mappings of the same physical page is enforced. In such a system, it is not required for software to flush the page from all CPU caches in the system prior to changing the value of the C-bit for a page. This hardware- enforced cache coherency is indicated by EAX[10] in CPUID leaf 0x8000001f. [ bp: Use one of the free slots in word 3. ] Suggested-by: Tom Lendacky Signed-off-by: Krish Sadhukhan Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200917212038.5090-2-krish.sadhukhan@oracle.com --- arch/x86/include/asm/cpufeatures.h | 2 +- arch/x86/kernel/cpu/scattered.c | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 83fc9d38eb1f..50b2a8d85ef0 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -96,7 +96,7 @@ #define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in IA32 userspace */ #define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */ #define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */ -/* free ( 3*32+17) */ +#define X86_FEATURE_SME_COHERENT ( 3*32+17) /* "" AMD hardware-enforced cache coherency */ #define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" LFENCE synchronizes RDTSC */ #define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */ #define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */ diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c index 62b137c3c97a..3221b71a0df8 100644 --- a/arch/x86/kernel/cpu/scattered.c +++ b/arch/x86/kernel/cpu/scattered.c @@ -41,6 +41,7 @@ static const struct cpuid_bit cpuid_bits[] = { { X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 }, { X86_FEATURE_SME, CPUID_EAX, 0, 0x8000001f, 0 }, { X86_FEATURE_SEV, CPUID_EAX, 1, 0x8000001f, 0 }, + { X86_FEATURE_SME_COHERENT, CPUID_EAX, 10, 0x8000001f, 0 }, { 0, 0, 0, 0, 0 } }; From 75d1cc0e05af579301ce4e49cf6399be4b4e6e76 Mon Sep 17 00:00:00 2001 From: Krish Sadhukhan Date: Thu, 17 Sep 2020 21:20:37 +0000 Subject: [PATCH 10/11] x86/mm/pat: Don't flush cache if hardware enforces cache coherency across encryption domnains In some hardware implementations, coherency between the encrypted and unencrypted mappings of the same physical page is enforced. In such a system, it is not required for software to flush the page from all CPU caches in the system prior to changing the value of the C-bit for the page. So check that bit before flushing the cache. [ bp: Massage commit message. ] Suggested-by: Tom Lendacky Signed-off-by: Krish Sadhukhan Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200917212038.5090-3-krish.sadhukhan@oracle.com --- arch/x86/mm/pat/set_memory.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c index d1b2a889f035..40baa90e74f4 100644 --- a/arch/x86/mm/pat/set_memory.c +++ b/arch/x86/mm/pat/set_memory.c @@ -1999,7 +1999,7 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc) /* * Before changing the encryption attribute, we need to flush caches. */ - cpa_flush(&cpa, 1); + cpa_flush(&cpa, !this_cpu_has(X86_FEATURE_SME_COHERENT)); ret = __change_page_attr_set_clr(&cpa, 1); From e1ebb2b49048c4767cfa0d8466f9c701e549fa5e Mon Sep 17 00:00:00 2001 From: Krish Sadhukhan Date: Thu, 17 Sep 2020 21:20:38 +0000 Subject: [PATCH 11/11] KVM: SVM: Don't flush cache if hardware enforces cache coherency across encryption domains In some hardware implementations, coherency between the encrypted and unencrypted mappings of the same physical page in a VM is enforced. In such a system, it is not required for software to flush the VM's page from all CPU caches in the system prior to changing the value of the C-bit for the page. So check that bit before flushing the cache. Signed-off-by: Krish Sadhukhan Signed-off-by: Borislav Petkov Acked-by: Paolo Bonzini Link: https://lkml.kernel.org/r/20200917212038.5090-4-krish.sadhukhan@oracle.com --- arch/x86/kvm/svm/sev.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 402dc4234e39..567792fbbc9f 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -384,7 +384,8 @@ static void sev_clflush_pages(struct page *pages[], unsigned long npages) uint8_t *page_virtual; unsigned long i; - if (npages == 0 || pages == NULL) + if (this_cpu_has(X86_FEATURE_SME_COHERENT) || npages == 0 || + pages == NULL) return; for (i = 0; i < npages; i++) {