mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-26 01:50:54 +07:00
KVM: VMX: add support for switching of PERF_GLOBAL_CTRL
Some cpus have special support for switching PERF_GLOBAL_CTRL msr. Add logic to detect if such support exists and works properly and extend msr switching code to use it if available. Also extend number of generic msr switching entries to 8. Signed-off-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
parent
52e16b185f
commit
8bf00a5299
@ -118,7 +118,7 @@ module_param(ple_gap, int, S_IRUGO);
|
||||
static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
|
||||
module_param(ple_window, int, S_IRUGO);
|
||||
|
||||
#define NR_AUTOLOAD_MSRS 1
|
||||
#define NR_AUTOLOAD_MSRS 8
|
||||
#define VMCS02_POOL_SIZE 1
|
||||
|
||||
struct vmcs {
|
||||
@ -622,6 +622,7 @@ static unsigned long *vmx_msr_bitmap_legacy;
|
||||
static unsigned long *vmx_msr_bitmap_longmode;
|
||||
|
||||
static bool cpu_has_load_ia32_efer;
|
||||
static bool cpu_has_load_perf_global_ctrl;
|
||||
|
||||
static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
|
||||
static DEFINE_SPINLOCK(vmx_vpid_lock);
|
||||
@ -1191,15 +1192,34 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
|
||||
vmcs_write32(EXCEPTION_BITMAP, eb);
|
||||
}
|
||||
|
||||
static void clear_atomic_switch_msr_special(unsigned long entry,
|
||||
unsigned long exit)
|
||||
{
|
||||
vmcs_clear_bits(VM_ENTRY_CONTROLS, entry);
|
||||
vmcs_clear_bits(VM_EXIT_CONTROLS, exit);
|
||||
}
|
||||
|
||||
static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
|
||||
{
|
||||
unsigned i;
|
||||
struct msr_autoload *m = &vmx->msr_autoload;
|
||||
|
||||
if (msr == MSR_EFER && cpu_has_load_ia32_efer) {
|
||||
vmcs_clear_bits(VM_ENTRY_CONTROLS, VM_ENTRY_LOAD_IA32_EFER);
|
||||
vmcs_clear_bits(VM_EXIT_CONTROLS, VM_EXIT_LOAD_IA32_EFER);
|
||||
return;
|
||||
switch (msr) {
|
||||
case MSR_EFER:
|
||||
if (cpu_has_load_ia32_efer) {
|
||||
clear_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER,
|
||||
VM_EXIT_LOAD_IA32_EFER);
|
||||
return;
|
||||
}
|
||||
break;
|
||||
case MSR_CORE_PERF_GLOBAL_CTRL:
|
||||
if (cpu_has_load_perf_global_ctrl) {
|
||||
clear_atomic_switch_msr_special(
|
||||
VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
|
||||
VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
|
||||
return;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
for (i = 0; i < m->nr; ++i)
|
||||
@ -1215,18 +1235,44 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
|
||||
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
|
||||
}
|
||||
|
||||
static void add_atomic_switch_msr_special(unsigned long entry,
|
||||
unsigned long exit, unsigned long guest_val_vmcs,
|
||||
unsigned long host_val_vmcs, u64 guest_val, u64 host_val)
|
||||
{
|
||||
vmcs_write64(guest_val_vmcs, guest_val);
|
||||
vmcs_write64(host_val_vmcs, host_val);
|
||||
vmcs_set_bits(VM_ENTRY_CONTROLS, entry);
|
||||
vmcs_set_bits(VM_EXIT_CONTROLS, exit);
|
||||
}
|
||||
|
||||
static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
|
||||
u64 guest_val, u64 host_val)
|
||||
{
|
||||
unsigned i;
|
||||
struct msr_autoload *m = &vmx->msr_autoload;
|
||||
|
||||
if (msr == MSR_EFER && cpu_has_load_ia32_efer) {
|
||||
vmcs_write64(GUEST_IA32_EFER, guest_val);
|
||||
vmcs_write64(HOST_IA32_EFER, host_val);
|
||||
vmcs_set_bits(VM_ENTRY_CONTROLS, VM_ENTRY_LOAD_IA32_EFER);
|
||||
vmcs_set_bits(VM_EXIT_CONTROLS, VM_EXIT_LOAD_IA32_EFER);
|
||||
return;
|
||||
switch (msr) {
|
||||
case MSR_EFER:
|
||||
if (cpu_has_load_ia32_efer) {
|
||||
add_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER,
|
||||
VM_EXIT_LOAD_IA32_EFER,
|
||||
GUEST_IA32_EFER,
|
||||
HOST_IA32_EFER,
|
||||
guest_val, host_val);
|
||||
return;
|
||||
}
|
||||
break;
|
||||
case MSR_CORE_PERF_GLOBAL_CTRL:
|
||||
if (cpu_has_load_perf_global_ctrl) {
|
||||
add_atomic_switch_msr_special(
|
||||
VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
|
||||
VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
|
||||
GUEST_IA32_PERF_GLOBAL_CTRL,
|
||||
HOST_IA32_PERF_GLOBAL_CTRL,
|
||||
guest_val, host_val);
|
||||
return;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
for (i = 0; i < m->nr; ++i)
|
||||
@ -2455,6 +2501,42 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
|
||||
&& allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
|
||||
VM_EXIT_LOAD_IA32_EFER);
|
||||
|
||||
cpu_has_load_perf_global_ctrl =
|
||||
allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS,
|
||||
VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
|
||||
&& allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
|
||||
VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
|
||||
|
||||
/*
|
||||
* Some cpus support VM_ENTRY_(LOAD|SAVE)_IA32_PERF_GLOBAL_CTRL
|
||||
* but due to arrata below it can't be used. Workaround is to use
|
||||
* msr load mechanism to switch IA32_PERF_GLOBAL_CTRL.
|
||||
*
|
||||
* VM Exit May Incorrectly Clear IA32_PERF_GLOBAL_CTRL [34:32]
|
||||
*
|
||||
* AAK155 (model 26)
|
||||
* AAP115 (model 30)
|
||||
* AAT100 (model 37)
|
||||
* BC86,AAY89,BD102 (model 44)
|
||||
* BA97 (model 46)
|
||||
*
|
||||
*/
|
||||
if (cpu_has_load_perf_global_ctrl && boot_cpu_data.x86 == 0x6) {
|
||||
switch (boot_cpu_data.x86_model) {
|
||||
case 26:
|
||||
case 30:
|
||||
case 37:
|
||||
case 44:
|
||||
case 46:
|
||||
cpu_has_load_perf_global_ctrl = false;
|
||||
printk_once(KERN_WARNING"kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL "
|
||||
"does not work properly. Using workaround\n");
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user