2019-05-29 21:17:54 +07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2017-01-19 06:45:02 +07:00
|
|
|
/*
|
|
|
|
* X86 specific Hyper-V initialization code.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2016, Microsoft, Inc.
|
|
|
|
*
|
|
|
|
* Author : K. Y. Srinivasan <kys@microsoft.com>
|
|
|
|
*/
|
|
|
|
|
2018-09-19 05:29:50 +07:00
|
|
|
#include <linux/efi.h>
|
2017-01-19 06:45:02 +07:00
|
|
|
#include <linux/types.h>
|
2018-01-24 20:23:33 +07:00
|
|
|
#include <asm/apic.h>
|
|
|
|
#include <asm/desc.h>
|
2017-01-19 06:45:02 +07:00
|
|
|
#include <asm/hypervisor.h>
|
2018-03-20 21:02:05 +07:00
|
|
|
#include <asm/hyperv-tlfs.h>
|
2017-01-19 06:45:02 +07:00
|
|
|
#include <asm/mshyperv.h>
|
|
|
|
#include <linux/version.h>
|
|
|
|
#include <linux/vmalloc.h>
|
|
|
|
#include <linux/mm.h>
|
2017-03-05 08:27:11 +07:00
|
|
|
#include <linux/hyperv.h>
|
2017-08-02 23:09:18 +07:00
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/cpuhotplug.h>
|
2019-07-01 11:26:06 +07:00
|
|
|
#include <clocksource/hyperv_timer.h>
|
2017-01-19 06:45:02 +07:00
|
|
|
|
2017-08-02 23:09:14 +07:00
|
|
|
void *hv_hypercall_pg;
|
|
|
|
EXPORT_SYMBOL_GPL(hv_hypercall_pg);
|
2017-02-04 23:57:13 +07:00
|
|
|
|
2017-08-02 23:09:18 +07:00
|
|
|
u32 *hv_vp_index;
|
|
|
|
EXPORT_SYMBOL_GPL(hv_vp_index);
|
|
|
|
|
2018-03-20 21:02:08 +07:00
|
|
|
struct hv_vp_assist_page **hv_vp_assist_page;
|
|
|
|
EXPORT_SYMBOL_GPL(hv_vp_assist_page);
|
|
|
|
|
2018-05-17 04:53:31 +07:00
|
|
|
void __percpu **hyperv_pcpu_input_arg;
|
|
|
|
EXPORT_SYMBOL_GPL(hyperv_pcpu_input_arg);
|
|
|
|
|
2017-10-06 22:48:54 +07:00
|
|
|
u32 hv_max_vp_index;
|
2019-03-01 14:04:17 +07:00
|
|
|
EXPORT_SYMBOL_GPL(hv_max_vp_index);
|
2017-10-06 22:48:54 +07:00
|
|
|
|
2017-08-02 23:09:18 +07:00
|
|
|
static int hv_cpu_init(unsigned int cpu)
|
|
|
|
{
|
|
|
|
u64 msr_vp_index;
|
2018-03-20 21:02:08 +07:00
|
|
|
struct hv_vp_assist_page **hvp = &hv_vp_assist_page[smp_processor_id()];
|
2018-05-17 04:53:31 +07:00
|
|
|
void **input_arg;
|
2019-03-14 12:46:51 +07:00
|
|
|
struct page *pg;
|
2018-05-17 04:53:31 +07:00
|
|
|
|
|
|
|
input_arg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg);
|
2019-03-14 12:46:51 +07:00
|
|
|
pg = alloc_page(GFP_KERNEL);
|
|
|
|
if (unlikely(!pg))
|
|
|
|
return -ENOMEM;
|
|
|
|
*input_arg = page_address(pg);
|
2017-08-02 23:09:18 +07:00
|
|
|
|
|
|
|
hv_get_vp_index(msr_vp_index);
|
|
|
|
|
|
|
|
hv_vp_index[smp_processor_id()] = msr_vp_index;
|
|
|
|
|
2017-10-06 22:48:54 +07:00
|
|
|
if (msr_vp_index > hv_max_vp_index)
|
|
|
|
hv_max_vp_index = msr_vp_index;
|
|
|
|
|
2018-03-20 21:02:08 +07:00
|
|
|
if (!hv_vp_assist_page)
|
|
|
|
return 0;
|
|
|
|
|
x86/hyper-v: Zero out the VP ASSIST PAGE on allocation
The VP ASSIST PAGE is an "overlay" page (see Hyper-V TLFS's Section
5.2.1 "GPA Overlay Pages" for the details) and here is an excerpt:
"The hypervisor defines several special pages that "overlay" the guest's
Guest Physical Addresses (GPA) space. Overlays are addressed GPA but are
not included in the normal GPA map maintained internally by the hypervisor.
Conceptually, they exist in a separate map that overlays the GPA map.
If a page within the GPA space is overlaid, any SPA page mapped to the
GPA page is effectively "obscured" and generally unreachable by the
virtual processor through processor memory accesses.
If an overlay page is disabled, the underlying GPA page is "uncovered",
and an existing mapping becomes accessible to the guest."
SPA = System Physical Address = the final real physical address.
When a CPU (e.g. CPU1) is onlined, hv_cpu_init() allocates the VP ASSIST
PAGE and enables the EOI optimization for this CPU by writing the MSR
HV_X64_MSR_VP_ASSIST_PAGE. From now on, hvp->apic_assist belongs to the
special SPA page, and this CPU *always* uses hvp->apic_assist (which is
shared with the hypervisor) to decide if it needs to write the EOI MSR.
When a CPU is offlined then on the outgoing CPU:
1. hv_cpu_die() disables the EOI optimizaton for this CPU, and from
now on hvp->apic_assist belongs to the original "normal" SPA page;
2. the remaining work of stopping this CPU is done
3. this CPU is completely stopped.
Between 1 and 3, this CPU can still receive interrupts (e.g. reschedule
IPIs from CPU0, and Local APIC timer interrupts), and this CPU *must* write
the EOI MSR for every interrupt received, otherwise the hypervisor may not
deliver further interrupts, which may be needed to completely stop the CPU.
So, after the EOI optimization is disabled in hv_cpu_die(), it's required
that the hvp->apic_assist's bit0 is zero, which is not guaranteed by the
current allocation mode because it lacks __GFP_ZERO. As a consequence the
bit might be set and interrupt handling would not write the EOI MSR causing
interrupt delivery to become stuck.
Add the missing __GFP_ZERO to the allocation.
Note 1: after the "normal" SPA page is allocted and zeroed out, neither the
hypervisor nor the guest writes into the page, so the page remains with
zeros.
Note 2: see Section 10.3.5 "EOI Assist" for the details of the EOI
optimization. When the optimization is enabled, the guest can still write
the EOI MSR register irrespective of the "No EOI required" value, but
that's slower than the optimized assist based variant.
Fixes: ba696429d290 ("x86/hyper-v: Implement EOI assist")
Signed-off-by: Dexuan Cui <decui@microsoft.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: stable@vger.kernel.org
Link: https://lkml.kernel.org/r/ <PU1P153MB0169B716A637FABF07433C04BFCB0@PU1P153MB0169.APCP153.PROD.OUTLOOK.COM
2019-07-19 10:22:35 +07:00
|
|
|
/*
|
|
|
|
* The VP ASSIST PAGE is an "overlay" page (see Hyper-V TLFS's Section
|
|
|
|
* 5.2.1 "GPA Overlay Pages"). Here it must be zeroed out to make sure
|
|
|
|
* we always write the EOI MSR in hv_apic_eoi_write() *after* the
|
|
|
|
* EOI optimization is disabled in hv_cpu_die(), otherwise a CPU may
|
|
|
|
* not be stopped in the case of CPU offlining and the VM will hang.
|
|
|
|
*/
|
|
|
|
if (!*hvp) {
|
|
|
|
*hvp = __vmalloc(PAGE_SIZE, GFP_KERNEL | __GFP_ZERO,
|
|
|
|
PAGE_KERNEL);
|
|
|
|
}
|
2018-03-20 21:02:08 +07:00
|
|
|
|
|
|
|
if (*hvp) {
|
|
|
|
u64 val;
|
|
|
|
|
|
|
|
val = vmalloc_to_pfn(*hvp);
|
|
|
|
val = (val << HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT) |
|
|
|
|
HV_X64_MSR_VP_ASSIST_PAGE_ENABLE;
|
|
|
|
|
|
|
|
wrmsrl(HV_X64_MSR_VP_ASSIST_PAGE, val);
|
|
|
|
}
|
|
|
|
|
2017-08-02 23:09:18 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-01-24 20:23:33 +07:00
|
|
|
static void (*hv_reenlightenment_cb)(void);
|
|
|
|
|
|
|
|
static void hv_reenlightenment_notify(struct work_struct *dummy)
|
|
|
|
{
|
|
|
|
struct hv_tsc_emulation_status emu_status;
|
|
|
|
|
|
|
|
rdmsrl(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status);
|
|
|
|
|
|
|
|
/* Don't issue the callback if TSC accesses are not emulated */
|
|
|
|
if (hv_reenlightenment_cb && emu_status.inprogress)
|
|
|
|
hv_reenlightenment_cb();
|
|
|
|
}
|
|
|
|
static DECLARE_DELAYED_WORK(hv_reenlightenment_work, hv_reenlightenment_notify);
|
|
|
|
|
|
|
|
void hyperv_stop_tsc_emulation(void)
|
|
|
|
{
|
|
|
|
u64 freq;
|
|
|
|
struct hv_tsc_emulation_status emu_status;
|
|
|
|
|
|
|
|
rdmsrl(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status);
|
|
|
|
emu_status.inprogress = 0;
|
|
|
|
wrmsrl(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status);
|
|
|
|
|
|
|
|
rdmsrl(HV_X64_MSR_TSC_FREQUENCY, freq);
|
|
|
|
tsc_khz = div64_u64(freq, 1000);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(hyperv_stop_tsc_emulation);
|
|
|
|
|
|
|
|
static inline bool hv_reenlightenment_available(void)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Check for required features and priviliges to make TSC frequency
|
|
|
|
* change notifications work.
|
|
|
|
*/
|
|
|
|
return ms_hyperv.features & HV_X64_ACCESS_FREQUENCY_MSRS &&
|
|
|
|
ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE &&
|
|
|
|
ms_hyperv.features & HV_X64_ACCESS_REENLIGHTENMENT;
|
|
|
|
}
|
|
|
|
|
|
|
|
__visible void __irq_entry hyperv_reenlightenment_intr(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
entering_ack_irq();
|
|
|
|
|
2018-01-24 20:23:35 +07:00
|
|
|
inc_irq_stat(irq_hv_reenlightenment_count);
|
|
|
|
|
2018-01-24 20:23:33 +07:00
|
|
|
schedule_delayed_work(&hv_reenlightenment_work, HZ/10);
|
|
|
|
|
|
|
|
exiting_irq();
|
|
|
|
}
|
|
|
|
|
|
|
|
void set_hv_tscchange_cb(void (*cb)(void))
|
|
|
|
{
|
|
|
|
struct hv_reenlightenment_control re_ctrl = {
|
|
|
|
.vector = HYPERV_REENLIGHTENMENT_VECTOR,
|
|
|
|
.enabled = 1,
|
|
|
|
.target_vp = hv_vp_index[smp_processor_id()]
|
|
|
|
};
|
|
|
|
struct hv_tsc_emulation_control emu_ctrl = {.enabled = 1};
|
|
|
|
|
|
|
|
if (!hv_reenlightenment_available()) {
|
|
|
|
pr_warn("Hyper-V: reenlightenment support is unavailable\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
hv_reenlightenment_cb = cb;
|
|
|
|
|
|
|
|
/* Make sure callback is registered before we write to MSRs */
|
|
|
|
wmb();
|
|
|
|
|
|
|
|
wrmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl));
|
|
|
|
wrmsrl(HV_X64_MSR_TSC_EMULATION_CONTROL, *((u64 *)&emu_ctrl));
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(set_hv_tscchange_cb);
|
|
|
|
|
|
|
|
void clear_hv_tscchange_cb(void)
|
|
|
|
{
|
|
|
|
struct hv_reenlightenment_control re_ctrl;
|
|
|
|
|
|
|
|
if (!hv_reenlightenment_available())
|
|
|
|
return;
|
|
|
|
|
|
|
|
rdmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *(u64 *)&re_ctrl);
|
|
|
|
re_ctrl.enabled = 0;
|
|
|
|
wrmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *(u64 *)&re_ctrl);
|
|
|
|
|
|
|
|
hv_reenlightenment_cb = NULL;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(clear_hv_tscchange_cb);
|
|
|
|
|
2018-01-24 20:23:34 +07:00
|
|
|
static int hv_cpu_die(unsigned int cpu)
|
|
|
|
{
|
|
|
|
struct hv_reenlightenment_control re_ctrl;
|
|
|
|
unsigned int new_cpu;
|
2018-05-17 04:53:31 +07:00
|
|
|
unsigned long flags;
|
|
|
|
void **input_arg;
|
|
|
|
void *input_pg = NULL;
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
input_arg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg);
|
|
|
|
input_pg = *input_arg;
|
|
|
|
*input_arg = NULL;
|
|
|
|
local_irq_restore(flags);
|
|
|
|
free_page((unsigned long)input_pg);
|
2018-01-24 20:23:34 +07:00
|
|
|
|
2018-03-20 21:02:08 +07:00
|
|
|
if (hv_vp_assist_page && hv_vp_assist_page[cpu])
|
|
|
|
wrmsrl(HV_X64_MSR_VP_ASSIST_PAGE, 0);
|
|
|
|
|
2018-01-24 20:23:34 +07:00
|
|
|
if (hv_reenlightenment_cb == NULL)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
rdmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl));
|
|
|
|
if (re_ctrl.target_vp == hv_vp_index[cpu]) {
|
|
|
|
/* Reassign to some other online CPU */
|
|
|
|
new_cpu = cpumask_any_but(cpu_online_mask, cpu);
|
|
|
|
|
|
|
|
re_ctrl.target_vp = hv_vp_index[new_cpu];
|
|
|
|
wrmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl));
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-09-19 05:29:50 +07:00
|
|
|
static int __init hv_pci_init(void)
|
|
|
|
{
|
|
|
|
int gen2vm = efi_enabled(EFI_BOOT);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For Generation-2 VM, we exit from pci_arch_init() by returning 0.
|
|
|
|
* The purpose is to suppress the harmless warning:
|
|
|
|
* "PCI: Fatal: No config space access function found"
|
|
|
|
*/
|
|
|
|
if (gen2vm)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* For Generation-1 VM, we'll proceed in pci_arch_init(). */
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2017-01-19 06:45:02 +07:00
|
|
|
/*
|
|
|
|
* This function is to be invoked early in the boot sequence after the
|
|
|
|
* hypervisor has been detected.
|
|
|
|
*
|
|
|
|
* 1. Setup the hypercall page.
|
2017-01-20 01:51:46 +07:00
|
|
|
* 2. Register Hyper-V specific clocksource.
|
2018-05-17 04:53:30 +07:00
|
|
|
* 3. Setup Hyper-V specific APIC entry points.
|
2017-01-19 06:45:02 +07:00
|
|
|
*/
|
2018-05-17 04:53:30 +07:00
|
|
|
void __init hyperv_init(void)
|
2017-01-19 06:45:02 +07:00
|
|
|
{
|
2018-01-24 20:23:31 +07:00
|
|
|
u64 guest_id, required_msrs;
|
2017-01-19 06:45:02 +07:00
|
|
|
union hv_x64_msr_hypercall_contents hypercall_msr;
|
2018-07-04 06:01:55 +07:00
|
|
|
int cpuhp, i;
|
2017-01-19 06:45:02 +07:00
|
|
|
|
2017-11-09 20:27:36 +07:00
|
|
|
if (x86_hyper_type != X86_HYPER_MS_HYPERV)
|
2017-01-19 06:45:02 +07:00
|
|
|
return;
|
|
|
|
|
2018-01-24 20:23:31 +07:00
|
|
|
/* Absolutely required MSRs */
|
|
|
|
required_msrs = HV_X64_MSR_HYPERCALL_AVAILABLE |
|
|
|
|
HV_X64_MSR_VP_INDEX_AVAILABLE;
|
|
|
|
|
|
|
|
if ((ms_hyperv.features & required_msrs) != required_msrs)
|
|
|
|
return;
|
|
|
|
|
2018-05-17 04:53:31 +07:00
|
|
|
/*
|
|
|
|
* Allocate the per-CPU state for the hypercall input arg.
|
|
|
|
* If this allocation fails, we will not be able to setup
|
|
|
|
* (per-CPU) hypercall input page and thus this failure is
|
|
|
|
* fatal on Hyper-V.
|
|
|
|
*/
|
|
|
|
hyperv_pcpu_input_arg = alloc_percpu(void *);
|
|
|
|
|
|
|
|
BUG_ON(hyperv_pcpu_input_arg == NULL);
|
|
|
|
|
2017-08-02 23:09:18 +07:00
|
|
|
/* Allocate percpu VP index */
|
|
|
|
hv_vp_index = kmalloc_array(num_possible_cpus(), sizeof(*hv_vp_index),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!hv_vp_index)
|
|
|
|
return;
|
|
|
|
|
2018-07-04 06:01:55 +07:00
|
|
|
for (i = 0; i < num_possible_cpus(); i++)
|
|
|
|
hv_vp_index[i] = VP_INVAL;
|
|
|
|
|
2018-03-20 21:02:08 +07:00
|
|
|
hv_vp_assist_page = kcalloc(num_possible_cpus(),
|
|
|
|
sizeof(*hv_vp_assist_page), GFP_KERNEL);
|
|
|
|
if (!hv_vp_assist_page) {
|
|
|
|
ms_hyperv.hints &= ~HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;
|
2017-08-02 23:09:18 +07:00
|
|
|
goto free_vp_index;
|
2018-03-20 21:02:08 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
cpuhp = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/hyperv_init:online",
|
|
|
|
hv_cpu_init, hv_cpu_die);
|
|
|
|
if (cpuhp < 0)
|
|
|
|
goto free_vp_assist_page;
|
2017-08-02 23:09:18 +07:00
|
|
|
|
2017-01-19 06:45:02 +07:00
|
|
|
/*
|
|
|
|
* Setup the hypercall page and enable hypercalls.
|
|
|
|
* 1. Register the guest ID
|
|
|
|
* 2. Enable the hypercall and register the hypercall page
|
|
|
|
*/
|
|
|
|
guest_id = generate_guest_id(0, LINUX_VERSION_CODE, 0);
|
|
|
|
wrmsrl(HV_X64_MSR_GUEST_OS_ID, guest_id);
|
|
|
|
|
2017-08-02 23:09:14 +07:00
|
|
|
hv_hypercall_pg = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX);
|
|
|
|
if (hv_hypercall_pg == NULL) {
|
2017-01-19 06:45:02 +07:00
|
|
|
wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
|
2018-03-20 21:02:08 +07:00
|
|
|
goto remove_cpuhp_state;
|
2017-01-19 06:45:02 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
|
|
|
|
hypercall_msr.enable = 1;
|
2017-08-02 23:09:14 +07:00
|
|
|
hypercall_msr.guest_physical_address = vmalloc_to_pfn(hv_hypercall_pg);
|
2017-01-19 06:45:02 +07:00
|
|
|
wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
|
2017-01-20 01:51:46 +07:00
|
|
|
|
2018-05-17 04:53:30 +07:00
|
|
|
hv_apic_init();
|
|
|
|
|
2018-09-19 05:29:50 +07:00
|
|
|
x86_init.pci.arch_init = hv_pci_init;
|
|
|
|
|
2019-07-01 11:26:06 +07:00
|
|
|
/* Register Hyper-V specific clocksource */
|
|
|
|
hv_init_clocksource();
|
2017-08-02 23:09:18 +07:00
|
|
|
return;
|
|
|
|
|
2018-03-20 21:02:08 +07:00
|
|
|
remove_cpuhp_state:
|
|
|
|
cpuhp_remove_state(cpuhp);
|
|
|
|
free_vp_assist_page:
|
|
|
|
kfree(hv_vp_assist_page);
|
|
|
|
hv_vp_assist_page = NULL;
|
2017-08-02 23:09:18 +07:00
|
|
|
free_vp_index:
|
|
|
|
kfree(hv_vp_index);
|
|
|
|
hv_vp_index = NULL;
|
2017-01-19 06:45:02 +07:00
|
|
|
}
|
2017-01-19 06:45:03 +07:00
|
|
|
|
2017-01-29 02:37:14 +07:00
|
|
|
/*
|
|
|
|
* This routine is called before kexec/kdump, it does the required cleanup.
|
|
|
|
*/
|
|
|
|
void hyperv_cleanup(void)
|
|
|
|
{
|
|
|
|
union hv_x64_msr_hypercall_contents hypercall_msr;
|
|
|
|
|
|
|
|
/* Reset our OS id */
|
|
|
|
wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
|
|
|
|
|
2019-03-06 18:18:27 +07:00
|
|
|
/*
|
|
|
|
* Reset hypercall page reference before reset the page,
|
|
|
|
* let hypercall operations fail safely rather than
|
|
|
|
* panic the kernel for using invalid hypercall page
|
|
|
|
*/
|
|
|
|
hv_hypercall_pg = NULL;
|
|
|
|
|
2017-01-29 02:37:14 +07:00
|
|
|
/* Reset the hypercall page */
|
|
|
|
hypercall_msr.as_uint64 = 0;
|
|
|
|
wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
|
2017-01-29 02:37:15 +07:00
|
|
|
|
|
|
|
/* Reset the TSC page */
|
|
|
|
hypercall_msr.as_uint64 = 0;
|
|
|
|
wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64);
|
2017-01-29 02:37:14 +07:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(hyperv_cleanup);
|
|
|
|
|
2017-10-30 01:33:41 +07:00
|
|
|
void hyperv_report_panic(struct pt_regs *regs, long err)
|
2017-01-20 01:51:48 +07:00
|
|
|
{
|
|
|
|
static bool panic_reported;
|
2017-10-30 01:33:41 +07:00
|
|
|
u64 guest_id;
|
2017-01-20 01:51:48 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We prefer to report panic on 'die' chain as we have proper
|
|
|
|
* registers to report, but if we miss it (e.g. on BUG()) we need
|
|
|
|
* to report it on 'panic'.
|
|
|
|
*/
|
|
|
|
if (panic_reported)
|
|
|
|
return;
|
|
|
|
panic_reported = true;
|
|
|
|
|
2017-10-30 01:33:41 +07:00
|
|
|
rdmsrl(HV_X64_MSR_GUEST_OS_ID, guest_id);
|
|
|
|
|
|
|
|
wrmsrl(HV_X64_MSR_CRASH_P0, err);
|
|
|
|
wrmsrl(HV_X64_MSR_CRASH_P1, guest_id);
|
|
|
|
wrmsrl(HV_X64_MSR_CRASH_P2, regs->ip);
|
|
|
|
wrmsrl(HV_X64_MSR_CRASH_P3, regs->ax);
|
|
|
|
wrmsrl(HV_X64_MSR_CRASH_P4, regs->sp);
|
2017-01-20 01:51:48 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Let Hyper-V know there is crash data available
|
|
|
|
*/
|
|
|
|
wrmsrl(HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_CRASH_NOTIFY);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(hyperv_report_panic);
|
2017-01-20 01:51:49 +07:00
|
|
|
|
2018-07-08 09:56:51 +07:00
|
|
|
/**
|
|
|
|
* hyperv_report_panic_msg - report panic message to Hyper-V
|
|
|
|
* @pa: physical address of the panic page containing the message
|
|
|
|
* @size: size of the message in the page
|
|
|
|
*/
|
|
|
|
void hyperv_report_panic_msg(phys_addr_t pa, size_t size)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* P3 to contain the physical address of the panic page & P4 to
|
|
|
|
* contain the size of the panic data in that page. Rest of the
|
|
|
|
* registers are no-op when the NOTIFY_MSG flag is set.
|
|
|
|
*/
|
|
|
|
wrmsrl(HV_X64_MSR_CRASH_P0, 0);
|
|
|
|
wrmsrl(HV_X64_MSR_CRASH_P1, 0);
|
|
|
|
wrmsrl(HV_X64_MSR_CRASH_P2, 0);
|
|
|
|
wrmsrl(HV_X64_MSR_CRASH_P3, pa);
|
|
|
|
wrmsrl(HV_X64_MSR_CRASH_P4, size);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Let Hyper-V know there is crash data available along with
|
|
|
|
* the panic message.
|
|
|
|
*/
|
|
|
|
wrmsrl(HV_X64_MSR_CRASH_CTL,
|
|
|
|
(HV_CRASH_CTL_CRASH_NOTIFY | HV_CRASH_CTL_CRASH_NOTIFY_MSG));
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(hyperv_report_panic_msg);
|
|
|
|
|
2017-12-23 01:19:02 +07:00
|
|
|
bool hv_is_hyperv_initialized(void)
|
2017-01-20 01:51:49 +07:00
|
|
|
{
|
|
|
|
union hv_x64_msr_hypercall_contents hypercall_msr;
|
|
|
|
|
2017-12-23 01:19:02 +07:00
|
|
|
/*
|
|
|
|
* Ensure that we're really on Hyper-V, and not a KVM or Xen
|
|
|
|
* emulation of Hyper-V
|
|
|
|
*/
|
|
|
|
if (x86_hyper_type != X86_HYPER_MS_HYPERV)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Verify that earlier initialization succeeded by checking
|
|
|
|
* that the hypercall page is setup
|
|
|
|
*/
|
2017-01-20 01:51:49 +07:00
|
|
|
hypercall_msr.as_uint64 = 0;
|
|
|
|
rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
|
|
|
|
|
2017-12-23 01:19:02 +07:00
|
|
|
return hypercall_msr.enable;
|
2017-01-20 01:51:49 +07:00
|
|
|
}
|
2017-12-23 01:19:02 +07:00
|
|
|
EXPORT_SYMBOL_GPL(hv_is_hyperv_initialized);
|