- Support for new VMBus protocols (Andrea Parri).

- Hibernation support (Dexuan Cui).
 - Latency testing framework (Branden Bonaby).
 - Decoupling Hyper-V page size from guest page size (Himadri Pandya).
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEE4n5dijQDou9mhzu83qZv95d3LNwFAl3f5YIACgkQ3qZv95d3
 LNzBww/8Cpv/BnOs2cp56OhC+2++3YlWfmxGnvQb9h52weElgr1AZF33lAynp8BZ
 YssOcDnS/G2iAkNDffbQA7s3WTwIjP1weJibOeKbtcXp4SuhNR3gnJafufNddNDv
 bw8ZReLQV7hy3sHb3OUx0aJk5Mssp0N9ZpxRilyIpLELPfVp63gFebq6s1MQYljk
 BAiNO4SKqsGQGZApt2F4Cc3hX2wU2ZfiDm6SifXiLYITGnvilIn7XFIht+2jJBWS
 CdzRoGXcwhQhlj68XWlc89SOzJb7vVUMO1sr84psfbQ2LbhJU8lfJKRJ4b4lR07Z
 Uv5FYxjr14S65fv7DkzCfWU+uPN/sObG4pPXihlfqcTraOvYLQ6/x8cw+9tGZg4H
 aTtnF40hnO81aKsvPAeIsSzVkoyPaSrt7KKhk+Bw/5EUDTTNp6EbIuL4xwnKt6Rt
 2UpA5HM9guQqNb6OZrjlpZfJgd9bNP4CZLBTfOukmnZpONKr2Wv3wubcwQJ8ibQc
 1WZ5SfN2Wmg999Ski7j9qzHk0tWJxa6SX+2NLEHRKxy2nJSJ1zlAr//bznMyMgH/
 yKPDaSkOFoy0aqiTKV2WzuOY6FGXTrSo5vq8YAgYRgp3xB+5+7zLeqlj3ipXhLYE
 HH/eqB27eSnvi0jpub4TbszGJG0o4Z1aYx3aHYYqrOfWX/A5Vls=
 =oJGE
 -----END PGP SIGNATURE-----

Merge tag 'hyperv-next-signed' of git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux

Pull Hyper-V updates from Sasha Levin:

 - support for new VMBus protocols (Andrea Parri)

 - hibernation support (Dexuan Cui)

 - latency testing framework (Branden Bonaby)

 - decoupling Hyper-V page size from guest page size (Himadri Pandya)

* tag 'hyperv-next-signed' of git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux: (22 commits)
  Drivers: hv: vmbus: Fix crash handler reset of Hyper-V synic
  drivers/hv: Replace binary semaphore with mutex
  drivers: iommu: hyperv: Make HYPERV_IOMMU only available on x86
  HID: hyperv: Add the support of hibernation
  hv_balloon: Add the support of hibernation
  x86/hyperv: Implement hv_is_hibernation_supported()
  Drivers: hv: balloon: Remove dependencies on guest page size
  Drivers: hv: vmbus: Remove dependencies on guest page size
  x86: hv: Add function to allocate zeroed page for Hyper-V
  Drivers: hv: util: Specify ring buffer size using Hyper-V page size
  Drivers: hv: Specify receive buffer size using Hyper-V page size
  tools: hv: add vmbus testing tool
  drivers: hv: vmbus: Introduce latency testing
  video: hyperv: hyperv_fb: Support deferred IO for Hyper-V frame buffer driver
  video: hyperv: hyperv_fb: Obtain screen resolution from Hyper-V host
  hv_netvsc: Add the support of hibernation
  hv_sock: Add the support of hibernation
  video: hyperv_fb: Add the support of hibernation
  scsi: storvsc: Add the support of hibernation
  Drivers: hv: vmbus: Add module parameter to cap the VMBus version
  ...
This commit is contained in:
Linus Torvalds 2019-11-30 14:50:51 -08:00
commit 0dd0c8f7db
27 changed files with 1386 additions and 116 deletions

View File

@ -0,0 +1,23 @@
What: /sys/kernel/debug/hyperv/<UUID>/fuzz_test_state
Date: October 2019
KernelVersion: 5.5
Contact: Branden Bonaby <brandonbonaby94@gmail.com>
Description: Fuzz testing status of a vmbus device, whether its in an ON
state or a OFF state
Users: Debugging tools
What: /sys/kernel/debug/hyperv/<UUID>/delay/fuzz_test_buffer_interrupt_delay
Date: October 2019
KernelVersion: 5.5
Contact: Branden Bonaby <brandonbonaby94@gmail.com>
Description: Fuzz testing buffer interrupt delay value between 0 - 1000
microseconds (inclusive).
Users: Debugging tools
What: /sys/kernel/debug/hyperv/<UUID>/delay/fuzz_test_message_delay
Date: October 2019
KernelVersion: 5.5
Contact: Branden Bonaby <brandonbonaby94@gmail.com>
Description: Fuzz testing message delay value between 0 - 1000 microseconds
(inclusive).
Users: Debugging tools

View File

@ -7654,6 +7654,7 @@ F: include/uapi/linux/hyperv.h
F: include/asm-generic/mshyperv.h
F: tools/hv/
F: Documentation/ABI/stable/sysfs-bus-vmbus
F: Documentation/ABI/testing/debugfs-hyperv
HYPERBUS SUPPORT
M: Vignesh Raghavendra <vigneshr@ti.com>

View File

@ -7,6 +7,7 @@
* Author : K. Y. Srinivasan <kys@microsoft.com>
*/
#include <linux/acpi.h>
#include <linux/efi.h>
#include <linux/types.h>
#include <asm/apic.h>
@ -45,6 +46,14 @@ void *hv_alloc_hyperv_page(void)
}
EXPORT_SYMBOL_GPL(hv_alloc_hyperv_page);
void *hv_alloc_hyperv_zeroed_page(void)
{
BUILD_BUG_ON(PAGE_SIZE != HV_HYP_PAGE_SIZE);
return (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
}
EXPORT_SYMBOL_GPL(hv_alloc_hyperv_zeroed_page);
void hv_free_hyperv_page(unsigned long addr)
{
free_page(addr);
@ -437,3 +446,9 @@ bool hv_is_hyperv_initialized(void)
return hypercall_msr.enable;
}
EXPORT_SYMBOL_GPL(hv_is_hyperv_initialized);
bool hv_is_hibernation_supported(void)
{
return acpi_sleep_state_supported(ACPI_STATE_S4);
}
EXPORT_SYMBOL_GPL(hv_is_hibernation_supported);

View File

@ -219,6 +219,7 @@ static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu)
void __init hyperv_init(void);
void hyperv_setup_mmu_ops(void);
void *hv_alloc_hyperv_page(void);
void *hv_alloc_hyperv_zeroed_page(void);
void hv_free_hyperv_page(unsigned long addr);
void hyperv_reenlightenment_intr(struct pt_regs *regs);
void set_hv_tscchange_cb(void (*cb)(void));

View File

@ -192,6 +192,9 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
if (desc->bLength == 0)
goto cleanup;
/* The pointer is not NULL when we resume from hibernation */
if (input_device->hid_desc != NULL)
kfree(input_device->hid_desc);
input_device->hid_desc = kmemdup(desc, desc->bLength, GFP_ATOMIC);
if (!input_device->hid_desc)
@ -203,6 +206,9 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
goto cleanup;
}
/* The pointer is not NULL when we resume from hibernation */
if (input_device->report_desc != NULL)
kfree(input_device->report_desc);
input_device->report_desc = kzalloc(input_device->report_desc_size,
GFP_ATOMIC);
@ -342,6 +348,8 @@ static int mousevsc_connect_to_vsp(struct hv_device *device)
struct mousevsc_prt_msg *request;
struct mousevsc_prt_msg *response;
reinit_completion(&input_dev->wait_event);
request = &input_dev->protocol_req;
memset(request, 0, sizeof(struct mousevsc_prt_msg));
@ -541,6 +549,30 @@ static int mousevsc_remove(struct hv_device *dev)
return 0;
}
static int mousevsc_suspend(struct hv_device *dev)
{
vmbus_close(dev->channel);
return 0;
}
static int mousevsc_resume(struct hv_device *dev)
{
int ret;
ret = vmbus_open(dev->channel,
INPUTVSC_SEND_RING_BUFFER_SIZE,
INPUTVSC_RECV_RING_BUFFER_SIZE,
NULL, 0,
mousevsc_on_channel_callback,
dev);
if (ret)
return ret;
ret = mousevsc_connect_to_vsp(dev);
return ret;
}
static const struct hv_vmbus_device_id id_table[] = {
/* Mouse guid */
{ HV_MOUSE_GUID, },
@ -554,6 +586,8 @@ static struct hv_driver mousevsc_drv = {
.id_table = id_table,
.probe = mousevsc_probe,
.remove = mousevsc_remove,
.suspend = mousevsc_suspend,
.resume = mousevsc_resume,
.driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},

View File

@ -9,4 +9,5 @@ CFLAGS_hv_balloon.o = -I$(src)
hv_vmbus-y := vmbus_drv.o \
hv.o connection.o channel.o \
channel_mgmt.o ring_buffer.o hv_trace.o
hv_vmbus-$(CONFIG_HYPERV_TESTING) += hv_debugfs.o
hv_utils-y := hv_util.o hv_kvp.o hv_snapshot.o hv_fcopy.o hv_utils_transport.o

View File

@ -14,6 +14,7 @@
#include <linux/wait.h>
#include <linux/delay.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/hyperv.h>
@ -40,29 +41,30 @@ EXPORT_SYMBOL_GPL(vmbus_connection);
__u32 vmbus_proto_version;
EXPORT_SYMBOL_GPL(vmbus_proto_version);
static __u32 vmbus_get_next_version(__u32 current_version)
{
switch (current_version) {
case (VERSION_WIN7):
return VERSION_WS2008;
/*
* Table of VMBus versions listed from newest to oldest.
*/
static __u32 vmbus_versions[] = {
VERSION_WIN10_V5_2,
VERSION_WIN10_V5_1,
VERSION_WIN10_V5,
VERSION_WIN10_V4_1,
VERSION_WIN10,
VERSION_WIN8_1,
VERSION_WIN8,
VERSION_WIN7,
VERSION_WS2008
};
case (VERSION_WIN8):
return VERSION_WIN7;
/*
* Maximal VMBus protocol version guests can negotiate. Useful to cap the
* VMBus version for testing and debugging purpose.
*/
static uint max_version = VERSION_WIN10_V5_2;
case (VERSION_WIN8_1):
return VERSION_WIN8;
case (VERSION_WIN10):
return VERSION_WIN8_1;
case (VERSION_WIN10_V5):
return VERSION_WIN10;
case (VERSION_WS2008):
default:
return VERSION_INVAL;
}
}
module_param(max_version, uint, S_IRUGO);
MODULE_PARM_DESC(max_version,
"Maximal VMBus protocol version which can be negotiated");
int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version)
{
@ -80,12 +82,12 @@ int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version)
msg->vmbus_version_requested = version;
/*
* VMBus protocol 5.0 (VERSION_WIN10_V5) requires that we must use
* VMBUS_MESSAGE_CONNECTION_ID_4 for the Initiate Contact Message,
* VMBus protocol 5.0 (VERSION_WIN10_V5) and higher require that we must
* use VMBUS_MESSAGE_CONNECTION_ID_4 for the Initiate Contact Message,
* and for subsequent messages, we must use the Message Connection ID
* field in the host-returned Version Response Message. And, with
* VERSION_WIN10_V5, we don't use msg->interrupt_page, but we tell
* the host explicitly that we still use VMBUS_MESSAGE_SINT(2) for
* VERSION_WIN10_V5 and higher, we don't use msg->interrupt_page, but we
* tell the host explicitly that we still use VMBUS_MESSAGE_SINT(2) for
* compatibility.
*
* On old hosts, we should always use VMBUS_MESSAGE_CONNECTION_ID (1).
@ -169,8 +171,8 @@ int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version)
*/
int vmbus_connect(void)
{
int ret = 0;
struct vmbus_channel_msginfo *msginfo = NULL;
int i, ret = 0;
__u32 version;
/* Initialize the vmbus connection */
@ -206,7 +208,7 @@ int vmbus_connect(void)
* abstraction stuff
*/
vmbus_connection.int_page =
(void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, 0);
(void *)hv_alloc_hyperv_zeroed_page();
if (vmbus_connection.int_page == NULL) {
ret = -ENOMEM;
goto cleanup;
@ -215,14 +217,14 @@ int vmbus_connect(void)
vmbus_connection.recv_int_page = vmbus_connection.int_page;
vmbus_connection.send_int_page =
(void *)((unsigned long)vmbus_connection.int_page +
(PAGE_SIZE >> 1));
(HV_HYP_PAGE_SIZE >> 1));
/*
* Setup the monitor notification facility. The 1st page for
* parent->child and the 2nd page for child->parent
*/
vmbus_connection.monitor_pages[0] = (void *)__get_free_pages((GFP_KERNEL|__GFP_ZERO), 0);
vmbus_connection.monitor_pages[1] = (void *)__get_free_pages((GFP_KERNEL|__GFP_ZERO), 0);
vmbus_connection.monitor_pages[0] = (void *)hv_alloc_hyperv_zeroed_page();
vmbus_connection.monitor_pages[1] = (void *)hv_alloc_hyperv_zeroed_page();
if ((vmbus_connection.monitor_pages[0] == NULL) ||
(vmbus_connection.monitor_pages[1] == NULL)) {
ret = -ENOMEM;
@ -244,21 +246,21 @@ int vmbus_connect(void)
* version.
*/
version = VERSION_CURRENT;
for (i = 0; ; i++) {
if (i == ARRAY_SIZE(vmbus_versions))
goto cleanup;
version = vmbus_versions[i];
if (version > max_version)
continue;
do {
ret = vmbus_negotiate_version(msginfo, version);
if (ret == -ETIMEDOUT)
goto cleanup;
if (vmbus_connection.conn_state == CONNECTED)
break;
version = vmbus_get_next_version(version);
} while (version != VERSION_INVAL);
if (version == VERSION_INVAL)
goto cleanup;
}
vmbus_proto_version = version;
pr_info("Vmbus version:%d.%d\n",
@ -295,12 +297,12 @@ void vmbus_disconnect(void)
destroy_workqueue(vmbus_connection.work_queue);
if (vmbus_connection.int_page) {
free_pages((unsigned long)vmbus_connection.int_page, 0);
hv_free_hyperv_page((unsigned long)vmbus_connection.int_page);
vmbus_connection.int_page = NULL;
}
free_pages((unsigned long)vmbus_connection.monitor_pages[0], 0);
free_pages((unsigned long)vmbus_connection.monitor_pages[1], 0);
hv_free_hyperv_page((unsigned long)vmbus_connection.monitor_pages[0]);
hv_free_hyperv_page((unsigned long)vmbus_connection.monitor_pages[1]);
vmbus_connection.monitor_pages[0] = NULL;
vmbus_connection.monitor_pages[1] = NULL;
}
@ -361,6 +363,7 @@ void vmbus_on_event(unsigned long data)
trace_vmbus_on_event(channel);
hv_debug_delay_test(channel, INTERRUPT_DELAY);
do {
void (*callback_fn)(void *);
@ -413,7 +416,7 @@ int vmbus_post_msg(void *buffer, size_t buflen, bool can_sleep)
case HV_STATUS_INVALID_CONNECTION_ID:
/*
* See vmbus_negotiate_version(): VMBus protocol 5.0
* requires that we must use
* and higher require that we must use
* VMBUS_MESSAGE_CONNECTION_ID_4 for the Initiate
* Contact message, but on old hosts that only
* support VMBus protocol 4.0 or lower, here we get

View File

@ -23,6 +23,9 @@
#include <linux/percpu_counter.h>
#include <linux/hyperv.h>
#include <asm/hyperv-tlfs.h>
#include <asm/mshyperv.h>
#define CREATE_TRACE_POINTS
#include "hv_trace_balloon.h"
@ -341,8 +344,6 @@ struct dm_unballoon_response {
*
* mem_range: Memory range to hot add.
*
* On Linux we currently don't support this since we cannot hot add
* arbitrary granularity of memory.
*/
struct dm_hot_add {
@ -457,6 +458,7 @@ struct hot_add_wrk {
struct work_struct wrk;
};
static bool allow_hibernation;
static bool hot_add = true;
static bool do_hot_add;
/*
@ -477,7 +479,7 @@ module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
static atomic_t trans_id = ATOMIC_INIT(0);
static int dm_ring_size = (5 * PAGE_SIZE);
static int dm_ring_size = 20 * 1024;
/*
* Driver specific state.
@ -493,10 +495,10 @@ enum hv_dm_state {
};
static __u8 recv_buffer[PAGE_SIZE];
static __u8 balloon_up_send_buffer[PAGE_SIZE];
#define PAGES_IN_2M 512
#define HA_CHUNK (32 * 1024)
static __u8 recv_buffer[HV_HYP_PAGE_SIZE];
static __u8 balloon_up_send_buffer[HV_HYP_PAGE_SIZE];
#define PAGES_IN_2M (2 * 1024 * 1024 / PAGE_SIZE)
#define HA_CHUNK (128 * 1024 * 1024 / PAGE_SIZE)
struct hv_dynmem_device {
struct hv_device *dev;
@ -1053,8 +1055,12 @@ static void hot_add_req(struct work_struct *dummy)
else
resp.result = 0;
if (!do_hot_add || (resp.page_count == 0))
pr_err("Memory hot add failed\n");
if (!do_hot_add || resp.page_count == 0) {
if (!allow_hibernation)
pr_err("Memory hot add failed\n");
else
pr_info("Ignore hot-add request!\n");
}
dm->state = DM_INITIALIZED;
resp.hdr.trans_id = atomic_inc_return(&trans_id);
@ -1076,7 +1082,7 @@ static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
__u64 *max_page_count = (__u64 *)&info_hdr[1];
pr_info("Max. dynamic memory size: %llu MB\n",
(*max_page_count) >> (20 - PAGE_SHIFT));
(*max_page_count) >> (20 - HV_HYP_PAGE_SHIFT));
}
break;
@ -1218,7 +1224,7 @@ static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
for (i = 0; (i * alloc_unit) < num_pages; i++) {
if (bl_resp->hdr.size + sizeof(union dm_mem_page_range) >
PAGE_SIZE)
HV_HYP_PAGE_SIZE)
return i * alloc_unit;
/*
@ -1274,9 +1280,9 @@ static void balloon_up(struct work_struct *dummy)
/*
* We will attempt 2M allocations. However, if we fail to
* allocate 2M chunks, we will go back to 4k allocations.
* allocate 2M chunks, we will go back to PAGE_SIZE allocations.
*/
alloc_unit = 512;
alloc_unit = PAGES_IN_2M;
avail_pages = si_mem_available();
floor = compute_balloon_floor();
@ -1292,7 +1298,7 @@ static void balloon_up(struct work_struct *dummy)
}
while (!done) {
memset(balloon_up_send_buffer, 0, PAGE_SIZE);
memset(balloon_up_send_buffer, 0, HV_HYP_PAGE_SIZE);
bl_resp = (struct dm_balloon_response *)balloon_up_send_buffer;
bl_resp->hdr.type = DM_BALLOON_RESPONSE;
bl_resp->hdr.size = sizeof(struct dm_balloon_response);
@ -1491,7 +1497,7 @@ static void balloon_onchannelcallback(void *context)
memset(recv_buffer, 0, sizeof(recv_buffer));
vmbus_recvpacket(dev->channel, recv_buffer,
PAGE_SIZE, &recvlen, &requestid);
HV_HYP_PAGE_SIZE, &recvlen, &requestid);
if (recvlen > 0) {
dm_msg = (struct dm_message *)recv_buffer;
@ -1509,6 +1515,11 @@ static void balloon_onchannelcallback(void *context)
break;
case DM_BALLOON_REQUEST:
if (allow_hibernation) {
pr_info("Ignore balloon-up request!\n");
break;
}
if (dm->state == DM_BALLOON_UP)
pr_warn("Currently ballooning\n");
bal_msg = (struct dm_balloon *)recv_buffer;
@ -1518,6 +1529,11 @@ static void balloon_onchannelcallback(void *context)
break;
case DM_UNBALLOON_REQUEST:
if (allow_hibernation) {
pr_info("Ignore balloon-down request!\n");
break;
}
dm->state = DM_BALLOON_DOWN;
balloon_down(dm,
(struct dm_unballoon_request *)recv_buffer);
@ -1623,6 +1639,11 @@ static int balloon_connect_vsp(struct hv_device *dev)
cap_msg.hdr.size = sizeof(struct dm_capabilities);
cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
/*
* When hibernation (i.e. virtual ACPI S4 state) is enabled, the host
* currently still requires the bits to be set, so we have to add code
* to fail the host's hot-add and balloon up/down requests, if any.
*/
cap_msg.caps.cap_bits.balloon = 1;
cap_msg.caps.cap_bits.hot_add = 1;
@ -1672,6 +1693,10 @@ static int balloon_probe(struct hv_device *dev,
{
int ret;
allow_hibernation = hv_is_hibernation_supported();
if (allow_hibernation)
hot_add = false;
#ifdef CONFIG_MEMORY_HOTPLUG
do_hot_add = hot_add;
#else
@ -1711,6 +1736,8 @@ static int balloon_probe(struct hv_device *dev,
return 0;
probe_error:
dm_device.state = DM_INIT_ERROR;
dm_device.thread = NULL;
vmbus_close(dev->channel);
#ifdef CONFIG_MEMORY_HOTPLUG
unregister_memory_notifier(&hv_memory_nb);
@ -1752,6 +1779,59 @@ static int balloon_remove(struct hv_device *dev)
return 0;
}
static int balloon_suspend(struct hv_device *hv_dev)
{
struct hv_dynmem_device *dm = hv_get_drvdata(hv_dev);
tasklet_disable(&hv_dev->channel->callback_event);
cancel_work_sync(&dm->balloon_wrk.wrk);
cancel_work_sync(&dm->ha_wrk.wrk);
if (dm->thread) {
kthread_stop(dm->thread);
dm->thread = NULL;
vmbus_close(hv_dev->channel);
}
tasklet_enable(&hv_dev->channel->callback_event);
return 0;
}
static int balloon_resume(struct hv_device *dev)
{
int ret;
dm_device.state = DM_INITIALIZING;
ret = balloon_connect_vsp(dev);
if (ret != 0)
goto out;
dm_device.thread =
kthread_run(dm_thread_func, &dm_device, "hv_balloon");
if (IS_ERR(dm_device.thread)) {
ret = PTR_ERR(dm_device.thread);
dm_device.thread = NULL;
goto close_channel;
}
dm_device.state = DM_INITIALIZED;
return 0;
close_channel:
vmbus_close(dev->channel);
out:
dm_device.state = DM_INIT_ERROR;
#ifdef CONFIG_MEMORY_HOTPLUG
unregister_memory_notifier(&hv_memory_nb);
restore_online_page_callback(&hv_online_page);
#endif
return ret;
}
static const struct hv_vmbus_device_id id_table[] = {
/* Dynamic Memory Class ID */
/* 525074DC-8985-46e2-8057-A307DC18A502 */
@ -1766,6 +1846,8 @@ static struct hv_driver balloon_drv = {
.id_table = id_table,
.probe = balloon_probe,
.remove = balloon_remove,
.suspend = balloon_suspend,
.resume = balloon_resume,
.driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},

178
drivers/hv/hv_debugfs.c Normal file
View File

@ -0,0 +1,178 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Authors:
* Branden Bonaby <brandonbonaby94@gmail.com>
*/
#include <linux/hyperv.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/err.h>
#include "hyperv_vmbus.h"
struct dentry *hv_debug_root;
static int hv_debugfs_delay_get(void *data, u64 *val)
{
*val = *(u32 *)data;
return 0;
}
static int hv_debugfs_delay_set(void *data, u64 val)
{
if (val > 1000)
return -EINVAL;
*(u32 *)data = val;
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(hv_debugfs_delay_fops, hv_debugfs_delay_get,
hv_debugfs_delay_set, "%llu\n");
static int hv_debugfs_state_get(void *data, u64 *val)
{
*val = *(bool *)data;
return 0;
}
static int hv_debugfs_state_set(void *data, u64 val)
{
if (val == 1)
*(bool *)data = true;
else if (val == 0)
*(bool *)data = false;
else
return -EINVAL;
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(hv_debugfs_state_fops, hv_debugfs_state_get,
hv_debugfs_state_set, "%llu\n");
/* Setup delay files to store test values */
static int hv_debug_delay_files(struct hv_device *dev, struct dentry *root)
{
struct vmbus_channel *channel = dev->channel;
char *buffer = "fuzz_test_buffer_interrupt_delay";
char *message = "fuzz_test_message_delay";
int *buffer_val = &channel->fuzz_testing_interrupt_delay;
int *message_val = &channel->fuzz_testing_message_delay;
struct dentry *buffer_file, *message_file;
buffer_file = debugfs_create_file(buffer, 0644, root,
buffer_val,
&hv_debugfs_delay_fops);
if (IS_ERR(buffer_file)) {
pr_debug("debugfs_hyperv: file %s not created\n", buffer);
return PTR_ERR(buffer_file);
}
message_file = debugfs_create_file(message, 0644, root,
message_val,
&hv_debugfs_delay_fops);
if (IS_ERR(message_file)) {
pr_debug("debugfs_hyperv: file %s not created\n", message);
return PTR_ERR(message_file);
}
return 0;
}
/* Setup test state value for vmbus device */
static int hv_debug_set_test_state(struct hv_device *dev, struct dentry *root)
{
struct vmbus_channel *channel = dev->channel;
bool *state = &channel->fuzz_testing_state;
char *status = "fuzz_test_state";
struct dentry *test_state;
test_state = debugfs_create_file(status, 0644, root,
state,
&hv_debugfs_state_fops);
if (IS_ERR(test_state)) {
pr_debug("debugfs_hyperv: file %s not created\n", status);
return PTR_ERR(test_state);
}
return 0;
}
/* Bind hv device to a dentry for debugfs */
static void hv_debug_set_dir_dentry(struct hv_device *dev, struct dentry *root)
{
if (hv_debug_root)
dev->debug_dir = root;
}
/* Create all test dentry's and names for fuzz testing */
int hv_debug_add_dev_dir(struct hv_device *dev)
{
const char *device = dev_name(&dev->device);
char *delay_name = "delay";
struct dentry *delay, *dev_root;
int ret;
if (!IS_ERR(hv_debug_root)) {
dev_root = debugfs_create_dir(device, hv_debug_root);
if (IS_ERR(dev_root)) {
pr_debug("debugfs_hyperv: hyperv/%s/ not created\n",
device);
return PTR_ERR(dev_root);
}
hv_debug_set_test_state(dev, dev_root);
hv_debug_set_dir_dentry(dev, dev_root);
delay = debugfs_create_dir(delay_name, dev_root);
if (IS_ERR(delay)) {
pr_debug("debugfs_hyperv: hyperv/%s/%s/ not created\n",
device, delay_name);
return PTR_ERR(delay);
}
ret = hv_debug_delay_files(dev, delay);
return ret;
}
pr_debug("debugfs_hyperv: hyperv/ not in root debugfs path\n");
return PTR_ERR(hv_debug_root);
}
/* Remove dentry associated with released hv device */
void hv_debug_rm_dev_dir(struct hv_device *dev)
{
if (!IS_ERR(hv_debug_root))
debugfs_remove_recursive(dev->debug_dir);
}
/* Remove all dentrys associated with vmbus testing */
void hv_debug_rm_all_dir(void)
{
debugfs_remove_recursive(hv_debug_root);
}
/* Delay buffer/message reads on a vmbus channel */
void hv_debug_delay_test(struct vmbus_channel *channel, enum delay delay_type)
{
struct vmbus_channel *test_channel = channel->primary_channel ?
channel->primary_channel :
channel;
bool state = test_channel->fuzz_testing_state;
if (state) {
if (delay_type == 0)
udelay(test_channel->fuzz_testing_interrupt_delay);
else
udelay(test_channel->fuzz_testing_message_delay);
}
}
/* Initialize top dentry for vmbus testing */
int hv_debug_init(void)
{
hv_debug_root = debugfs_create_dir("hyperv", NULL);
if (IS_ERR(hv_debug_root)) {
pr_debug("debugfs_hyperv: hyperv/ not created\n");
return PTR_ERR(hv_debug_root);
}
return 0;
}

View File

@ -13,6 +13,7 @@
#include <linux/workqueue.h>
#include <linux/hyperv.h>
#include <linux/sched.h>
#include <asm/hyperv-tlfs.h>
#include "hyperv_vmbus.h"
#include "hv_utils_transport.h"
@ -234,7 +235,7 @@ void hv_fcopy_onchannelcallback(void *context)
if (fcopy_transaction.state > HVUTIL_READY)
return;
vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen,
vmbus_recvpacket(channel, recv_buffer, HV_HYP_PAGE_SIZE * 2, &recvlen,
&requestid);
if (recvlen <= 0)
return;

View File

@ -27,6 +27,7 @@
#include <linux/connector.h>
#include <linux/workqueue.h>
#include <linux/hyperv.h>
#include <asm/hyperv-tlfs.h>
#include "hyperv_vmbus.h"
#include "hv_utils_transport.h"
@ -661,7 +662,7 @@ void hv_kvp_onchannelcallback(void *context)
if (kvp_transaction.state > HVUTIL_READY)
return;
vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 4, &recvlen,
vmbus_recvpacket(channel, recv_buffer, HV_HYP_PAGE_SIZE * 4, &recvlen,
&requestid);
if (recvlen > 0) {

View File

@ -12,6 +12,7 @@
#include <linux/connector.h>
#include <linux/workqueue.h>
#include <linux/hyperv.h>
#include <asm/hyperv-tlfs.h>
#include "hyperv_vmbus.h"
#include "hv_utils_transport.h"
@ -297,7 +298,7 @@ void hv_vss_onchannelcallback(void *context)
if (vss_transaction.state > HVUTIL_READY)
return;
vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen,
vmbus_recvpacket(channel, recv_buffer, HV_HYP_PAGE_SIZE * 2, &recvlen,
&requestid);
if (recvlen > 0) {

View File

@ -136,7 +136,7 @@ static void shutdown_onchannelcallback(void *context)
struct icmsg_hdr *icmsghdrp;
vmbus_recvpacket(channel, shut_txf_buf,
PAGE_SIZE, &recvlen, &requestid);
HV_HYP_PAGE_SIZE, &recvlen, &requestid);
if (recvlen > 0) {
icmsghdrp = (struct icmsg_hdr *)&shut_txf_buf[
@ -284,7 +284,7 @@ static void timesync_onchannelcallback(void *context)
u8 *time_txf_buf = util_timesynch.recv_buffer;
vmbus_recvpacket(channel, time_txf_buf,
PAGE_SIZE, &recvlen, &requestid);
HV_HYP_PAGE_SIZE, &recvlen, &requestid);
if (recvlen > 0) {
icmsghdrp = (struct icmsg_hdr *)&time_txf_buf[
@ -346,7 +346,7 @@ static void heartbeat_onchannelcallback(void *context)
while (1) {
vmbus_recvpacket(channel, hbeat_txf_buf,
PAGE_SIZE, &recvlen, &requestid);
HV_HYP_PAGE_SIZE, &recvlen, &requestid);
if (!recvlen)
break;
@ -390,7 +390,7 @@ static int util_probe(struct hv_device *dev,
(struct hv_util_service *)dev_id->driver_data;
int ret;
srv->recv_buffer = kmalloc(PAGE_SIZE * 4, GFP_KERNEL);
srv->recv_buffer = kmalloc(HV_HYP_PAGE_SIZE * 4, GFP_KERNEL);
if (!srv->recv_buffer)
return -ENOMEM;
srv->channel = dev->channel;
@ -413,8 +413,9 @@ static int util_probe(struct hv_device *dev,
hv_set_drvdata(dev, srv);
ret = vmbus_open(dev->channel, 4 * PAGE_SIZE, 4 * PAGE_SIZE, NULL, 0,
srv->util_cb, dev->channel);
ret = vmbus_open(dev->channel, 4 * HV_HYP_PAGE_SIZE,
4 * HV_HYP_PAGE_SIZE, NULL, 0, srv->util_cb,
dev->channel);
if (ret)
goto error;

View File

@ -385,4 +385,35 @@ enum hvutil_device_state {
HVUTIL_DEVICE_DYING, /* driver unload is in progress */
};
enum delay {
INTERRUPT_DELAY = 0,
MESSAGE_DELAY = 1,
};
#ifdef CONFIG_HYPERV_TESTING
int hv_debug_add_dev_dir(struct hv_device *dev);
void hv_debug_rm_dev_dir(struct hv_device *dev);
void hv_debug_rm_all_dir(void);
int hv_debug_init(void);
void hv_debug_delay_test(struct vmbus_channel *channel, enum delay delay_type);
#else /* CONFIG_HYPERV_TESTING */
static inline void hv_debug_rm_dev_dir(struct hv_device *dev) {};
static inline void hv_debug_rm_all_dir(void) {};
static inline void hv_debug_delay_test(struct vmbus_channel *channel,
enum delay delay_type) {};
static inline int hv_debug_init(void)
{
return -1;
}
static inline int hv_debug_add_dev_dir(struct hv_device *dev)
{
return -1;
}
#endif /* CONFIG_HYPERV_TESTING */
#endif /* _HYPERV_VMBUS_H */

View File

@ -396,6 +396,7 @@ struct vmpacket_descriptor *hv_pkt_iter_first(struct vmbus_channel *channel)
struct hv_ring_buffer_info *rbi = &channel->inbound;
struct vmpacket_descriptor *desc;
hv_debug_delay_test(channel, MESSAGE_DELAY);
if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor))
return NULL;
@ -421,6 +422,7 @@ __hv_pkt_iter_next(struct vmbus_channel *channel,
u32 packetlen = desc->len8 << 3;
u32 dsize = rbi->ring_datasize;
hv_debug_delay_test(channel, MESSAGE_DELAY);
/* bump offset to next potential packet */
rbi->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
if (rbi->priv_read_index >= dsize)

View File

@ -79,7 +79,7 @@ static struct notifier_block hyperv_panic_block = {
static const char *fb_mmio_name = "fb_range";
static struct resource *fb_mmio;
static struct resource *hyperv_mmio;
static DEFINE_SEMAPHORE(hyperv_mmio_lock);
static DEFINE_MUTEX(hyperv_mmio_lock);
static int vmbus_exists(void)
{
@ -960,6 +960,8 @@ static void vmbus_device_release(struct device *device)
struct hv_device *hv_dev = device_to_hv_device(device);
struct vmbus_channel *channel = hv_dev->channel;
hv_debug_rm_dev_dir(hv_dev);
mutex_lock(&vmbus_connection.channel_mutex);
hv_process_channel_removal(channel);
mutex_unlock(&vmbus_connection.channel_mutex);
@ -1273,7 +1275,7 @@ static void hv_kmsg_dump(struct kmsg_dumper *dumper,
* Write dump contents to the page. No need to synchronize; panic should
* be single-threaded.
*/
kmsg_dump_get_buffer(dumper, true, hv_panic_page, PAGE_SIZE,
kmsg_dump_get_buffer(dumper, true, hv_panic_page, HV_HYP_PAGE_SIZE,
&bytes_written);
if (bytes_written)
hyperv_report_panic_msg(panic_pa, bytes_written);
@ -1373,7 +1375,7 @@ static int vmbus_bus_init(void)
*/
hv_get_crash_ctl(hyperv_crash_ctl);
if (hyperv_crash_ctl & HV_CRASH_CTL_CRASH_NOTIFY_MSG) {
hv_panic_page = (void *)get_zeroed_page(GFP_KERNEL);
hv_panic_page = (void *)hv_alloc_hyperv_zeroed_page();
if (hv_panic_page) {
ret = kmsg_dump_register(&hv_kmsg_dumper);
if (ret)
@ -1401,7 +1403,7 @@ static int vmbus_bus_init(void)
hv_remove_vmbus_irq();
bus_unregister(&hv_bus);
free_page((unsigned long)hv_panic_page);
hv_free_hyperv_page((unsigned long)hv_panic_page);
unregister_sysctl_table(hv_ctl_table_hdr);
hv_ctl_table_hdr = NULL;
return ret;
@ -1809,6 +1811,7 @@ int vmbus_device_register(struct hv_device *child_device_obj)
pr_err("Unable to register primary channeln");
goto err_kset_unregister;
}
hv_debug_add_dev_dir(child_device_obj);
return 0;
@ -2010,7 +2013,7 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
int retval;
retval = -ENXIO;
down(&hyperv_mmio_lock);
mutex_lock(&hyperv_mmio_lock);
/*
* If overlaps with frame buffers are allowed, then first attempt to
@ -2057,7 +2060,7 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
}
exit:
up(&hyperv_mmio_lock);
mutex_unlock(&hyperv_mmio_lock);
return retval;
}
EXPORT_SYMBOL_GPL(vmbus_allocate_mmio);
@ -2074,7 +2077,7 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size)
{
struct resource *iter;
down(&hyperv_mmio_lock);
mutex_lock(&hyperv_mmio_lock);
for (iter = hyperv_mmio; iter; iter = iter->sibling) {
if ((iter->start >= start + size) || (iter->end <= start))
continue;
@ -2082,7 +2085,7 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size)
__release_region(iter, start, size);
}
release_mem_region(start, size);
up(&hyperv_mmio_lock);
mutex_unlock(&hyperv_mmio_lock);
}
EXPORT_SYMBOL_GPL(vmbus_free_mmio);
@ -2215,8 +2218,7 @@ static int vmbus_bus_resume(struct device *dev)
* We only use the 'vmbus_proto_version', which was in use before
* hibernation, to re-negotiate with the host.
*/
if (vmbus_proto_version == VERSION_INVAL ||
vmbus_proto_version == 0) {
if (!vmbus_proto_version) {
pr_err("Invalid proto version = 0x%x\n", vmbus_proto_version);
return -EINVAL;
}
@ -2303,7 +2305,7 @@ static void hv_crash_handler(struct pt_regs *regs)
vmbus_connection.conn_state = DISCONNECTED;
cpu = smp_processor_id();
hv_stimer_cleanup(cpu);
hv_synic_cleanup(cpu);
hv_synic_disable_regs(cpu);
hyperv_cleanup();
};
@ -2373,6 +2375,7 @@ static int __init hv_acpi_init(void)
ret = -ETIMEDOUT;
goto cleanup;
}
hv_debug_init();
ret = vmbus_bus_init();
if (ret)
@ -2409,6 +2412,8 @@ static void __exit vmbus_exit(void)
tasklet_kill(&hv_cpu->msg_dpc);
}
hv_debug_rm_all_dir();
vmbus_free_channels();
if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {

View File

@ -467,7 +467,7 @@ config QCOM_IOMMU
config HYPERV_IOMMU
bool "Hyper-V x2APIC IRQ Handling"
depends on HYPERV
depends on HYPERV && X86
select IOMMU_API
default HYPERV
help

View File

@ -955,6 +955,9 @@ struct net_device_context {
u32 vf_alloc;
/* Serial number of the VF to team with */
u32 vf_serial;
/* Used to temporarily save the config info across hibernation */
struct netvsc_device_info *saved_netvsc_dev_info;
};
/* Per channel data */

View File

@ -2424,6 +2424,61 @@ static int netvsc_remove(struct hv_device *dev)
return 0;
}
static int netvsc_suspend(struct hv_device *dev)
{
struct net_device_context *ndev_ctx;
struct net_device *vf_netdev, *net;
struct netvsc_device *nvdev;
int ret;
net = hv_get_drvdata(dev);
ndev_ctx = netdev_priv(net);
cancel_delayed_work_sync(&ndev_ctx->dwork);
rtnl_lock();
nvdev = rtnl_dereference(ndev_ctx->nvdev);
if (nvdev == NULL) {
ret = -ENODEV;
goto out;
}
vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
if (vf_netdev)
netvsc_unregister_vf(vf_netdev);
/* Save the current config info */
ndev_ctx->saved_netvsc_dev_info = netvsc_devinfo_get(nvdev);
ret = netvsc_detach(net, nvdev);
out:
rtnl_unlock();
return ret;
}
static int netvsc_resume(struct hv_device *dev)
{
struct net_device *net = hv_get_drvdata(dev);
struct net_device_context *net_device_ctx;
struct netvsc_device_info *device_info;
int ret;
rtnl_lock();
net_device_ctx = netdev_priv(net);
device_info = net_device_ctx->saved_netvsc_dev_info;
ret = netvsc_attach(net, device_info);
rtnl_unlock();
kfree(device_info);
net_device_ctx->saved_netvsc_dev_info = NULL;
return ret;
}
static const struct hv_vmbus_device_id id_table[] = {
/* Network guid */
{ HV_NIC_GUID, },
@ -2438,6 +2493,8 @@ static struct hv_driver netvsc_drv = {
.id_table = id_table,
.probe = netvsc_probe,
.remove = netvsc_remove,
.suspend = netvsc_suspend,
.resume = netvsc_resume,
.driver = {
.probe_type = PROBE_FORCE_SYNCHRONOUS,
},

View File

@ -1727,6 +1727,13 @@ static const struct hv_vmbus_device_id id_table[] = {
MODULE_DEVICE_TABLE(vmbus, id_table);
static const struct { guid_t guid; } fc_guid = { HV_SYNTHFC_GUID };
static bool hv_dev_is_fc(struct hv_device *hv_dev)
{
return guid_equal(&fc_guid.guid, &hv_dev->dev_type);
}
static int storvsc_probe(struct hv_device *device,
const struct hv_vmbus_device_id *dev_id)
{
@ -1934,11 +1941,45 @@ static int storvsc_remove(struct hv_device *dev)
return 0;
}
static int storvsc_suspend(struct hv_device *hv_dev)
{
struct storvsc_device *stor_device = hv_get_drvdata(hv_dev);
struct Scsi_Host *host = stor_device->host;
struct hv_host_device *host_dev = shost_priv(host);
storvsc_wait_to_drain(stor_device);
drain_workqueue(host_dev->handle_error_wq);
vmbus_close(hv_dev->channel);
memset(stor_device->stor_chns, 0,
num_possible_cpus() * sizeof(void *));
kfree(stor_device->stor_chns);
stor_device->stor_chns = NULL;
cpumask_clear(&stor_device->alloced_cpus);
return 0;
}
static int storvsc_resume(struct hv_device *hv_dev)
{
int ret;
ret = storvsc_connect_to_vsp(hv_dev, storvsc_ringbuffer_size,
hv_dev_is_fc(hv_dev));
return ret;
}
static struct hv_driver storvsc_drv = {
.name = KBUILD_MODNAME,
.id_table = id_table,
.probe = storvsc_probe,
.remove = storvsc_remove,
.suspend = storvsc_suspend,
.resume = storvsc_resume,
.driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},

View File

@ -2214,6 +2214,7 @@ config FB_HYPERV
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_DEFERRED_IO
help
This framebuffer driver supports Microsoft Hyper-V Synthetic Video.

View File

@ -23,6 +23,14 @@
*
* Portrait orientation is also supported:
* For example: video=hyperv_fb:864x1152
*
* When a Windows 10 RS5+ host is used, the virtual machine screen
* resolution is obtained from the host. The "video=hyperv_fb" option is
* not needed, but still can be used to overwrite what the host specifies.
* The VM resolution on the host could be set by executing the powershell
* "set-vmvideo" command. For example
* set-vmvideo -vmname name -horizontalresolution:1920 \
* -verticalresolution:1200 -resolutiontype single
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@ -34,6 +42,7 @@
#include <linux/fb.h>
#include <linux/pci.h>
#include <linux/efi.h>
#include <linux/console.h>
#include <linux/hyperv.h>
@ -44,6 +53,10 @@
#define SYNTHVID_VERSION(major, minor) ((minor) << 16 | (major))
#define SYNTHVID_VERSION_WIN7 SYNTHVID_VERSION(3, 0)
#define SYNTHVID_VERSION_WIN8 SYNTHVID_VERSION(3, 2)
#define SYNTHVID_VERSION_WIN10 SYNTHVID_VERSION(3, 5)
#define SYNTHVID_VER_GET_MAJOR(ver) (ver & 0x0000ffff)
#define SYNTHVID_VER_GET_MINOR(ver) ((ver & 0xffff0000) >> 16)
#define SYNTHVID_DEPTH_WIN7 16
#define SYNTHVID_DEPTH_WIN8 32
@ -82,16 +95,25 @@ enum synthvid_msg_type {
SYNTHVID_POINTER_SHAPE = 8,
SYNTHVID_FEATURE_CHANGE = 9,
SYNTHVID_DIRT = 10,
SYNTHVID_RESOLUTION_REQUEST = 13,
SYNTHVID_RESOLUTION_RESPONSE = 14,
SYNTHVID_MAX = 11
SYNTHVID_MAX = 15
};
#define SYNTHVID_EDID_BLOCK_SIZE 128
#define SYNTHVID_MAX_RESOLUTION_COUNT 64
struct hvd_screen_info {
u16 width;
u16 height;
} __packed;
struct synthvid_msg_hdr {
u32 type;
u32 size; /* size of this header + payload after this field*/
} __packed;
struct synthvid_version_req {
u32 version;
} __packed;
@ -102,6 +124,19 @@ struct synthvid_version_resp {
u8 max_video_outputs;
} __packed;
struct synthvid_supported_resolution_req {
u8 maximum_resolution_count;
} __packed;
struct synthvid_supported_resolution_resp {
u8 edid_block[SYNTHVID_EDID_BLOCK_SIZE];
u8 resolution_count;
u8 default_resolution_index;
u8 is_standard;
struct hvd_screen_info
supported_resolution[SYNTHVID_MAX_RESOLUTION_COUNT];
} __packed;
struct synthvid_vram_location {
u64 user_ctx;
u8 is_vram_gpa_specified;
@ -187,6 +222,8 @@ struct synthvid_msg {
struct synthvid_pointer_shape ptr_shape;
struct synthvid_feature_change feature_chg;
struct synthvid_dirt dirt;
struct synthvid_supported_resolution_req resolution_req;
struct synthvid_supported_resolution_resp resolution_resp;
};
} __packed;
@ -201,6 +238,7 @@ struct synthvid_msg {
#define RING_BUFSIZE (256 * 1024)
#define VSP_TIMEOUT (10 * HZ)
#define HVFB_UPDATE_DELAY (HZ / 20)
#define HVFB_ONDEMAND_THROTTLE (HZ / 20)
struct hvfb_par {
struct fb_info *info;
@ -211,6 +249,7 @@ struct hvfb_par {
struct delayed_work dwork;
bool update;
bool update_saved; /* The value of 'update' before hibernation */
u32 pseudo_palette[16];
u8 init_buf[MAX_VMBUS_PKT_SIZE];
@ -220,12 +259,25 @@ struct hvfb_par {
bool synchronous_fb;
struct notifier_block hvfb_panic_nb;
/* Memory for deferred IO and frame buffer itself */
unsigned char *dio_vp;
unsigned char *mmio_vp;
unsigned long mmio_pp;
/* Dirty rectangle, protected by delayed_refresh_lock */
int x1, y1, x2, y2;
bool delayed_refresh;
spinlock_t delayed_refresh_lock;
};
static uint screen_width = HVFB_WIDTH;
static uint screen_height = HVFB_HEIGHT;
static uint screen_width_max = HVFB_WIDTH;
static uint screen_height_max = HVFB_HEIGHT;
static uint screen_depth;
static uint screen_fb_size;
static uint dio_fb_size; /* FB size for deferred IO */
/* Send message to Hyper-V host */
static inline int synthvid_send(struct hv_device *hdev,
@ -312,28 +364,88 @@ static int synthvid_send_ptr(struct hv_device *hdev)
}
/* Send updated screen area (dirty rectangle) location to host */
static int synthvid_update(struct fb_info *info)
static int
synthvid_update(struct fb_info *info, int x1, int y1, int x2, int y2)
{
struct hv_device *hdev = device_to_hv_device(info->device);
struct synthvid_msg msg;
memset(&msg, 0, sizeof(struct synthvid_msg));
if (x2 == INT_MAX)
x2 = info->var.xres;
if (y2 == INT_MAX)
y2 = info->var.yres;
msg.vid_hdr.type = SYNTHVID_DIRT;
msg.vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
sizeof(struct synthvid_dirt);
msg.dirt.video_output = 0;
msg.dirt.dirt_count = 1;
msg.dirt.rect[0].x1 = 0;
msg.dirt.rect[0].y1 = 0;
msg.dirt.rect[0].x2 = info->var.xres;
msg.dirt.rect[0].y2 = info->var.yres;
msg.dirt.rect[0].x1 = (x1 > x2) ? 0 : x1;
msg.dirt.rect[0].y1 = (y1 > y2) ? 0 : y1;
msg.dirt.rect[0].x2 =
(x2 < x1 || x2 > info->var.xres) ? info->var.xres : x2;
msg.dirt.rect[0].y2 =
(y2 < y1 || y2 > info->var.yres) ? info->var.yres : y2;
synthvid_send(hdev, &msg);
return 0;
}
static void hvfb_docopy(struct hvfb_par *par,
unsigned long offset,
unsigned long size)
{
if (!par || !par->mmio_vp || !par->dio_vp || !par->fb_ready ||
size == 0 || offset >= dio_fb_size)
return;
if (offset + size > dio_fb_size)
size = dio_fb_size - offset;
memcpy(par->mmio_vp + offset, par->dio_vp + offset, size);
}
/* Deferred IO callback */
static void synthvid_deferred_io(struct fb_info *p,
struct list_head *pagelist)
{
struct hvfb_par *par = p->par;
struct page *page;
unsigned long start, end;
int y1, y2, miny, maxy;
miny = INT_MAX;
maxy = 0;
/*
* Merge dirty pages. It is possible that last page cross
* over the end of frame buffer row yres. This is taken care of
* in synthvid_update function by clamping the y2
* value to yres.
*/
list_for_each_entry(page, pagelist, lru) {
start = page->index << PAGE_SHIFT;
end = start + PAGE_SIZE - 1;
y1 = start / p->fix.line_length;
y2 = end / p->fix.line_length;
miny = min_t(int, miny, y1);
maxy = max_t(int, maxy, y2);
/* Copy from dio space to mmio address */
if (par->fb_ready)
hvfb_docopy(par, start, PAGE_SIZE);
}
if (par->fb_ready && par->update)
synthvid_update(p, 0, miny, p->var.xres, maxy + 1);
}
static struct fb_deferred_io synthvid_defio = {
.delay = HZ / 20,
.deferred_io = synthvid_deferred_io,
};
/*
* Actions on received messages from host:
@ -354,6 +466,7 @@ static void synthvid_recv_sub(struct hv_device *hdev)
/* Complete the wait event */
if (msg->vid_hdr.type == SYNTHVID_VERSION_RESPONSE ||
msg->vid_hdr.type == SYNTHVID_RESOLUTION_RESPONSE ||
msg->vid_hdr.type == SYNTHVID_VRAM_LOCATION_ACK) {
memcpy(par->init_buf, msg, MAX_VMBUS_PKT_SIZE);
complete(&par->wait);
@ -400,6 +513,17 @@ static void synthvid_receive(void *ctx)
} while (bytes_recvd > 0 && ret == 0);
}
/* Check if the ver1 version is equal or greater than ver2 */
static inline bool synthvid_ver_ge(u32 ver1, u32 ver2)
{
if (SYNTHVID_VER_GET_MAJOR(ver1) > SYNTHVID_VER_GET_MAJOR(ver2) ||
(SYNTHVID_VER_GET_MAJOR(ver1) == SYNTHVID_VER_GET_MAJOR(ver2) &&
SYNTHVID_VER_GET_MINOR(ver1) >= SYNTHVID_VER_GET_MINOR(ver2)))
return true;
return false;
}
/* Check synthetic video protocol version with the host */
static int synthvid_negotiate_ver(struct hv_device *hdev, u32 ver)
{
@ -428,6 +552,64 @@ static int synthvid_negotiate_ver(struct hv_device *hdev, u32 ver)
}
par->synthvid_version = ver;
pr_info("Synthvid Version major %d, minor %d\n",
SYNTHVID_VER_GET_MAJOR(ver), SYNTHVID_VER_GET_MINOR(ver));
out:
return ret;
}
/* Get current resolution from the host */
static int synthvid_get_supported_resolution(struct hv_device *hdev)
{
struct fb_info *info = hv_get_drvdata(hdev);
struct hvfb_par *par = info->par;
struct synthvid_msg *msg = (struct synthvid_msg *)par->init_buf;
int ret = 0;
unsigned long t;
u8 index;
int i;
memset(msg, 0, sizeof(struct synthvid_msg));
msg->vid_hdr.type = SYNTHVID_RESOLUTION_REQUEST;
msg->vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
sizeof(struct synthvid_supported_resolution_req);
msg->resolution_req.maximum_resolution_count =
SYNTHVID_MAX_RESOLUTION_COUNT;
synthvid_send(hdev, msg);
t = wait_for_completion_timeout(&par->wait, VSP_TIMEOUT);
if (!t) {
pr_err("Time out on waiting resolution response\n");
ret = -ETIMEDOUT;
goto out;
}
if (msg->resolution_resp.resolution_count == 0) {
pr_err("No supported resolutions\n");
ret = -ENODEV;
goto out;
}
index = msg->resolution_resp.default_resolution_index;
if (index >= msg->resolution_resp.resolution_count) {
pr_err("Invalid resolution index: %d\n", index);
ret = -ENODEV;
goto out;
}
for (i = 0; i < msg->resolution_resp.resolution_count; i++) {
screen_width_max = max_t(unsigned int, screen_width_max,
msg->resolution_resp.supported_resolution[i].width);
screen_height_max = max_t(unsigned int, screen_height_max,
msg->resolution_resp.supported_resolution[i].height);
}
screen_width =
msg->resolution_resp.supported_resolution[index].width;
screen_height =
msg->resolution_resp.supported_resolution[index].height;
out:
return ret;
@ -448,11 +630,27 @@ static int synthvid_connect_vsp(struct hv_device *hdev)
}
/* Negotiate the protocol version with host */
if (vmbus_proto_version == VERSION_WS2008 ||
vmbus_proto_version == VERSION_WIN7)
ret = synthvid_negotiate_ver(hdev, SYNTHVID_VERSION_WIN7);
else
switch (vmbus_proto_version) {
case VERSION_WIN10:
case VERSION_WIN10_V5:
ret = synthvid_negotiate_ver(hdev, SYNTHVID_VERSION_WIN10);
if (!ret)
break;
/* Fallthrough */
case VERSION_WIN8:
case VERSION_WIN8_1:
ret = synthvid_negotiate_ver(hdev, SYNTHVID_VERSION_WIN8);
if (!ret)
break;
/* Fallthrough */
case VERSION_WS2008:
case VERSION_WIN7:
ret = synthvid_negotiate_ver(hdev, SYNTHVID_VERSION_WIN7);
break;
default:
ret = synthvid_negotiate_ver(hdev, SYNTHVID_VERSION_WIN10);
break;
}
if (ret) {
pr_err("Synthetic video device version not accepted\n");
@ -464,6 +662,12 @@ static int synthvid_connect_vsp(struct hv_device *hdev)
else
screen_depth = SYNTHVID_DEPTH_WIN8;
if (synthvid_ver_ge(par->synthvid_version, SYNTHVID_VERSION_WIN10)) {
ret = synthvid_get_supported_resolution(hdev);
if (ret)
pr_info("Failed to get supported resolution from host, use default\n");
}
screen_fb_size = hdev->channel->offermsg.offer.
mmio_megabytes * 1024 * 1024;
@ -488,7 +692,7 @@ static int synthvid_send_config(struct hv_device *hdev)
msg->vid_hdr.type = SYNTHVID_VRAM_LOCATION;
msg->vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
sizeof(struct synthvid_vram_location);
msg->vram.user_ctx = msg->vram.vram_gpa = info->fix.smem_start;
msg->vram.user_ctx = msg->vram.vram_gpa = par->mmio_pp;
msg->vram.is_vram_gpa_specified = 1;
synthvid_send(hdev, msg);
@ -498,7 +702,7 @@ static int synthvid_send_config(struct hv_device *hdev)
ret = -ETIMEDOUT;
goto out;
}
if (msg->vram_ack.user_ctx != info->fix.smem_start) {
if (msg->vram_ack.user_ctx != par->mmio_pp) {
pr_err("Unable to set VRAM location\n");
ret = -ENODEV;
goto out;
@ -515,19 +719,77 @@ static int synthvid_send_config(struct hv_device *hdev)
/*
* Delayed work callback:
* It is called at HVFB_UPDATE_DELAY or longer time interval to process
* screen updates. It is re-scheduled if further update is necessary.
* It is scheduled to call whenever update request is received and it has
* not been called in last HVFB_ONDEMAND_THROTTLE time interval.
*/
static void hvfb_update_work(struct work_struct *w)
{
struct hvfb_par *par = container_of(w, struct hvfb_par, dwork.work);
struct fb_info *info = par->info;
unsigned long flags;
int x1, x2, y1, y2;
int j;
if (par->fb_ready)
synthvid_update(info);
spin_lock_irqsave(&par->delayed_refresh_lock, flags);
/* Reset the request flag */
par->delayed_refresh = false;
if (par->update)
schedule_delayed_work(&par->dwork, HVFB_UPDATE_DELAY);
/* Store the dirty rectangle to local variables */
x1 = par->x1;
x2 = par->x2;
y1 = par->y1;
y2 = par->y2;
/* Clear dirty rectangle */
par->x1 = par->y1 = INT_MAX;
par->x2 = par->y2 = 0;
spin_unlock_irqrestore(&par->delayed_refresh_lock, flags);
if (x1 > info->var.xres || x2 > info->var.xres ||
y1 > info->var.yres || y2 > info->var.yres || x2 <= x1)
return;
/* Copy the dirty rectangle to frame buffer memory */
for (j = y1; j < y2; j++) {
hvfb_docopy(par,
j * info->fix.line_length +
(x1 * screen_depth / 8),
(x2 - x1) * screen_depth / 8);
}
/* Refresh */
if (par->fb_ready && par->update)
synthvid_update(info, x1, y1, x2, y2);
}
/*
* Control the on-demand refresh frequency. It schedules a delayed
* screen update if it has not yet.
*/
static void hvfb_ondemand_refresh_throttle(struct hvfb_par *par,
int x1, int y1, int w, int h)
{
unsigned long flags;
int x2 = x1 + w;
int y2 = y1 + h;
spin_lock_irqsave(&par->delayed_refresh_lock, flags);
/* Merge dirty rectangle */
par->x1 = min_t(int, par->x1, x1);
par->y1 = min_t(int, par->y1, y1);
par->x2 = max_t(int, par->x2, x2);
par->y2 = max_t(int, par->y2, y2);
/* Schedule a delayed screen update if not yet */
if (par->delayed_refresh == false) {
schedule_delayed_work(&par->dwork,
HVFB_ONDEMAND_THROTTLE);
par->delayed_refresh = true;
}
spin_unlock_irqrestore(&par->delayed_refresh_lock, flags);
}
static int hvfb_on_panic(struct notifier_block *nb,
@ -539,7 +801,8 @@ static int hvfb_on_panic(struct notifier_block *nb,
par = container_of(nb, struct hvfb_par, hvfb_panic_nb);
par->synchronous_fb = true;
info = par->info;
synthvid_update(info);
hvfb_docopy(par, 0, dio_fb_size);
synthvid_update(info, 0, 0, INT_MAX, INT_MAX);
return NOTIFY_DONE;
}
@ -600,7 +863,10 @@ static void hvfb_cfb_fillrect(struct fb_info *p,
cfb_fillrect(p, rect);
if (par->synchronous_fb)
synthvid_update(p);
synthvid_update(p, 0, 0, INT_MAX, INT_MAX);
else
hvfb_ondemand_refresh_throttle(par, rect->dx, rect->dy,
rect->width, rect->height);
}
static void hvfb_cfb_copyarea(struct fb_info *p,
@ -610,7 +876,10 @@ static void hvfb_cfb_copyarea(struct fb_info *p,
cfb_copyarea(p, area);
if (par->synchronous_fb)
synthvid_update(p);
synthvid_update(p, 0, 0, INT_MAX, INT_MAX);
else
hvfb_ondemand_refresh_throttle(par, area->dx, area->dy,
area->width, area->height);
}
static void hvfb_cfb_imageblit(struct fb_info *p,
@ -620,7 +889,10 @@ static void hvfb_cfb_imageblit(struct fb_info *p,
cfb_imageblit(p, image);
if (par->synchronous_fb)
synthvid_update(p);
synthvid_update(p, 0, 0, INT_MAX, INT_MAX);
else
hvfb_ondemand_refresh_throttle(par, image->dx, image->dy,
image->width, image->height);
}
static struct fb_ops hvfb_ops = {
@ -653,6 +925,8 @@ static void hvfb_get_option(struct fb_info *info)
}
if (x < HVFB_WIDTH_MIN || y < HVFB_HEIGHT_MIN ||
(synthvid_ver_ge(par->synthvid_version, SYNTHVID_VERSION_WIN10) &&
(x > screen_width_max || y > screen_height_max)) ||
(par->synthvid_version == SYNTHVID_VERSION_WIN8 &&
x * y * screen_depth / 8 > SYNTHVID_FB_SIZE_WIN8) ||
(par->synthvid_version == SYNTHVID_VERSION_WIN7 &&
@ -677,6 +951,9 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
resource_size_t pot_start, pot_end;
int ret;
dio_fb_size =
screen_width * screen_height * screen_depth / 8;
if (gen2vm) {
pot_start = 0;
pot_end = -1;
@ -689,8 +966,12 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
}
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
pci_resource_len(pdev, 0) < screen_fb_size)
pci_resource_len(pdev, 0) < screen_fb_size) {
pr_err("Resource not available or (0x%lx < 0x%lx)\n",
(unsigned long) pci_resource_len(pdev, 0),
(unsigned long) screen_fb_size);
goto err1;
}
pot_end = pci_resource_end(pdev, 0);
pot_start = pot_end - screen_fb_size + 1;
@ -707,9 +988,14 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
if (!fb_virt)
goto err2;
/* Allocate memory for deferred IO */
par->dio_vp = vzalloc(round_up(dio_fb_size, PAGE_SIZE));
if (par->dio_vp == NULL)
goto err3;
info->apertures = alloc_apertures(1);
if (!info->apertures)
goto err3;
goto err4;
if (gen2vm) {
info->apertures->ranges[0].base = screen_info.lfb_base;
@ -721,16 +1007,23 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
info->apertures->ranges[0].size = pci_resource_len(pdev, 0);
}
/* Physical address of FB device */
par->mmio_pp = par->mem->start;
/* Virtual address of FB device */
par->mmio_vp = (unsigned char *) fb_virt;
info->fix.smem_start = par->mem->start;
info->fix.smem_len = screen_fb_size;
info->screen_base = fb_virt;
info->screen_size = screen_fb_size;
info->fix.smem_len = dio_fb_size;
info->screen_base = par->dio_vp;
info->screen_size = dio_fb_size;
if (!gen2vm)
pci_dev_put(pdev);
return 0;
err4:
vfree(par->dio_vp);
err3:
iounmap(fb_virt);
err2:
@ -748,6 +1041,7 @@ static void hvfb_putmem(struct fb_info *info)
{
struct hvfb_par *par = info->par;
vfree(par->dio_vp);
iounmap(info->screen_base);
vmbus_free_mmio(par->mem->start, screen_fb_size);
par->mem = NULL;
@ -771,6 +1065,11 @@ static int hvfb_probe(struct hv_device *hdev,
init_completion(&par->wait);
INIT_DELAYED_WORK(&par->dwork, hvfb_update_work);
par->delayed_refresh = false;
spin_lock_init(&par->delayed_refresh_lock);
par->x1 = par->y1 = INT_MAX;
par->x2 = par->y2 = 0;
/* Connect to VSP */
hv_set_drvdata(hdev, info);
ret = synthvid_connect_vsp(hdev);
@ -779,17 +1078,16 @@ static int hvfb_probe(struct hv_device *hdev,
goto error1;
}
hvfb_get_option(info);
pr_info("Screen resolution: %dx%d, Color depth: %d\n",
screen_width, screen_height, screen_depth);
ret = hvfb_getmem(hdev, info);
if (ret) {
pr_err("No memory for framebuffer\n");
goto error2;
}
hvfb_get_option(info);
pr_info("Screen resolution: %dx%d, Color depth: %d\n",
screen_width, screen_height, screen_depth);
/* Set up fb_info */
info->flags = FBINFO_DEFAULT;
@ -823,6 +1121,10 @@ static int hvfb_probe(struct hv_device *hdev,
info->fbops = &hvfb_ops;
info->pseudo_palette = par->pseudo_palette;
/* Initialize deferred IO */
info->fbdefio = &synthvid_defio;
fb_deferred_io_init(info);
/* Send config to host */
ret = synthvid_send_config(hdev);
if (ret)
@ -844,6 +1146,7 @@ static int hvfb_probe(struct hv_device *hdev,
return 0;
error:
fb_deferred_io_cleanup(info);
hvfb_putmem(info);
error2:
vmbus_close(hdev->channel);
@ -866,6 +1169,8 @@ static int hvfb_remove(struct hv_device *hdev)
par->update = false;
par->fb_ready = false;
fb_deferred_io_cleanup(info);
unregister_framebuffer(info);
cancel_delayed_work_sync(&par->dwork);
@ -878,6 +1183,61 @@ static int hvfb_remove(struct hv_device *hdev)
return 0;
}
static int hvfb_suspend(struct hv_device *hdev)
{
struct fb_info *info = hv_get_drvdata(hdev);
struct hvfb_par *par = info->par;
console_lock();
/* 1 means do suspend */
fb_set_suspend(info, 1);
cancel_delayed_work_sync(&par->dwork);
par->update_saved = par->update;
par->update = false;
par->fb_ready = false;
vmbus_close(hdev->channel);
console_unlock();
return 0;
}
static int hvfb_resume(struct hv_device *hdev)
{
struct fb_info *info = hv_get_drvdata(hdev);
struct hvfb_par *par = info->par;
int ret;
console_lock();
ret = synthvid_connect_vsp(hdev);
if (ret != 0)
goto out;
ret = synthvid_send_config(hdev);
if (ret != 0) {
vmbus_close(hdev->channel);
goto out;
}
par->fb_ready = true;
par->update = par->update_saved;
schedule_delayed_work(&par->dwork, HVFB_UPDATE_DELAY);
/* 0 means do resume */
fb_set_suspend(info, 0);
out:
console_unlock();
return ret;
}
static const struct pci_device_id pci_stub_id_table[] = {
{
@ -901,6 +1261,8 @@ static struct hv_driver hvfb_drv = {
.id_table = id_table,
.probe = hvfb_probe,
.remove = hvfb_remove,
.suspend = hvfb_suspend,
.resume = hvfb_resume,
.driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},

View File

@ -166,10 +166,12 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset,
void hyperv_report_panic(struct pt_regs *regs, long err);
void hyperv_report_panic_msg(phys_addr_t pa, size_t size);
bool hv_is_hyperv_initialized(void);
bool hv_is_hibernation_supported(void);
void hyperv_cleanup(void);
void hv_setup_sched_clock(void *sched_clock);
#else /* CONFIG_HYPERV */
static inline bool hv_is_hyperv_initialized(void) { return false; }
static inline bool hv_is_hibernation_supported(void) { return false; }
static inline void hyperv_cleanup(void) {}
#endif /* CONFIG_HYPERV */

View File

@ -182,19 +182,21 @@ static inline u32 hv_get_avail_to_write_percent(
* 2 . 4 (Windows 8)
* 3 . 0 (Windows 8 R2)
* 4 . 0 (Windows 10)
* 4 . 1 (Windows 10 RS3)
* 5 . 0 (Newer Windows 10)
* 5 . 1 (Windows 10 RS4)
* 5 . 2 (Windows Server 2019, RS5)
*/
#define VERSION_WS2008 ((0 << 16) | (13))
#define VERSION_WIN7 ((1 << 16) | (1))
#define VERSION_WIN8 ((2 << 16) | (4))
#define VERSION_WIN8_1 ((3 << 16) | (0))
#define VERSION_WIN10 ((4 << 16) | (0))
#define VERSION_WIN10 ((4 << 16) | (0))
#define VERSION_WIN10_V4_1 ((4 << 16) | (1))
#define VERSION_WIN10_V5 ((5 << 16) | (0))
#define VERSION_INVAL -1
#define VERSION_CURRENT VERSION_WIN10_V5
#define VERSION_WIN10_V5_1 ((5 << 16) | (1))
#define VERSION_WIN10_V5_2 ((5 << 16) | (2))
/* Make maximum size of pipe payload of 16K */
#define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384)
@ -932,6 +934,21 @@ struct vmbus_channel {
* full outbound ring buffer.
*/
u64 out_full_first;
/* enabling/disabling fuzz testing on the channel (default is false)*/
bool fuzz_testing_state;
/*
* Interrupt delay will delay the guest from emptying the ring buffer
* for a specific amount of time. The delay is in microseconds and will
* be between 1 to a maximum of 1000, its default is 0 (no delay).
* The Message delay will delay guest reading on a per message basis
* in microseconds between 1 to 1000 with the default being 0
* (no delay).
*/
u32 fuzz_testing_interrupt_delay;
u32 fuzz_testing_message_delay;
};
static inline bool is_hvsock_channel(const struct vmbus_channel *c)
@ -1180,6 +1197,10 @@ struct hv_device {
struct vmbus_channel *channel;
struct kset *channels_kset;
/* place holder to keep track of the dir for hv device in debugfs */
struct dentry *debug_dir;
};

View File

@ -2167,4 +2167,11 @@ config IO_STRICT_DEVMEM
source "arch/$(SRCARCH)/Kconfig.debug"
config HYPERV_TESTING
bool "Microsoft Hyper-V driver testing"
default n
depends on HYPERV && DEBUG_FS
help
Select this option to enable Hyper-V vmbus testing.
endmenu # Kernel hacking

View File

@ -922,6 +922,24 @@ static int hvs_remove(struct hv_device *hdev)
return 0;
}
/* hv_sock connections can not persist across hibernation, and all the hv_sock
* channels are forced to be rescinded before hibernation: see
* vmbus_bus_suspend(). Here the dummy hvs_suspend() and hvs_resume()
* are only needed because hibernation requires that every vmbus device's
* driver should have a .suspend and .resume callback: see vmbus_suspend().
*/
static int hvs_suspend(struct hv_device *hv_dev)
{
/* Dummy */
return 0;
}
static int hvs_resume(struct hv_device *dev)
{
/* Dummy */
return 0;
}
/* This isn't really used. See vmbus_match() and vmbus_probe() */
static const struct hv_vmbus_device_id id_table[] = {
{},
@ -933,6 +951,8 @@ static struct hv_driver hvs_drv = {
.id_table = id_table,
.probe = hvs_probe,
.remove = hvs_remove,
.suspend = hvs_suspend,
.resume = hvs_resume,
};
static int __init hvs_init(void)

376
tools/hv/vmbus_testing Executable file
View File

@ -0,0 +1,376 @@
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0
#
# Program to allow users to fuzz test Hyper-V drivers
# by interfacing with Hyper-V debugfs attributes.
# Current test methods available:
# 1. delay testing
#
# Current file/directory structure of hyper-V debugfs:
# /sys/kernel/debug/hyperv/UUID
# /sys/kernel/debug/hyperv/UUID/<test-state filename>
# /sys/kernel/debug/hyperv/UUID/<test-method sub-directory>
#
# author: Branden Bonaby <brandonbonaby94@gmail.com>
import os
import cmd
import argparse
import glob
from argparse import RawDescriptionHelpFormatter
from argparse import RawTextHelpFormatter
from enum import Enum
# Do not change unless, you change the debugfs attributes
# in /drivers/hv/debugfs.c. All fuzz testing
# attributes will start with "fuzz_test".
# debugfs path for hyperv must exist before proceeding
debugfs_hyperv_path = "/sys/kernel/debug/hyperv"
if not os.path.isdir(debugfs_hyperv_path):
print("{} doesn't exist/check permissions".format(debugfs_hyperv_path))
exit(-1)
class dev_state(Enum):
off = 0
on = 1
# File names, that correspond to the files created in
# /drivers/hv/debugfs.c
class f_names(Enum):
state_f = "fuzz_test_state"
buff_f = "fuzz_test_buffer_interrupt_delay"
mess_f = "fuzz_test_message_delay"
# Both single_actions and all_actions are used
# for error checking and to allow for some subparser
# names to be abbreviated. Do not abbreviate the
# test method names, as it will become less intuitive
# as to what the user can do. If you do decide to
# abbreviate the test method name, make sure the main
# function reflects this change.
all_actions = [
"disable_all",
"D",
"enable_all",
"view_all",
"V"
]
single_actions = [
"disable_single",
"d",
"enable_single",
"view_single",
"v"
]
def main():
file_map = recursive_file_lookup(debugfs_hyperv_path, dict())
args = parse_args()
if (not args.action):
print ("Error, no options selected...exiting")
exit(-1)
arg_set = { k for (k,v) in vars(args).items() if v and k != "action" }
arg_set.add(args.action)
path = args.path if "path" in arg_set else None
if (path and path[-1] == "/"):
path = path[:-1]
validate_args_path(path, arg_set, file_map)
if (path and "enable_single" in arg_set):
state_path = locate_state(path, file_map)
set_test_state(state_path, dev_state.on.value, args.quiet)
# Use subparsers as the key for different actions
if ("delay" in arg_set):
validate_delay_values(args.delay_time)
if (args.enable_all):
set_delay_all_devices(file_map, args.delay_time,
args.quiet)
else:
set_delay_values(path, file_map, args.delay_time,
args.quiet)
elif ("disable_all" in arg_set or "D" in arg_set):
disable_all_testing(file_map)
elif ("disable_single" in arg_set or "d" in arg_set):
disable_testing_single_device(path, file_map)
elif ("view_all" in arg_set or "V" in arg_set):
get_all_devices_test_status(file_map)
elif ("view_single" in arg_set or "v" in arg_set):
get_device_test_values(path, file_map)
# Get the state location
def locate_state(device, file_map):
return file_map[device][f_names.state_f.value]
# Validate delay values to make sure they are acceptable to
# enable delays on a device
def validate_delay_values(delay):
if (delay[0] == -1 and delay[1] == -1):
print("\nError, At least 1 value must be greater than 0")
exit(-1)
for i in delay:
if (i < -1 or i == 0 or i > 1000):
print("\nError, Values must be equal to -1 "
"or be > 0 and <= 1000")
exit(-1)
# Validate argument path
def validate_args_path(path, arg_set, file_map):
if (not path and any(element in arg_set for element in single_actions)):
print("Error, path (-p) REQUIRED for the specified option. "
"Use (-h) to check usage.")
exit(-1)
elif (path and any(item in arg_set for item in all_actions)):
print("Error, path (-p) NOT REQUIRED for the specified option. "
"Use (-h) to check usage." )
exit(-1)
elif (path not in file_map and any(item in arg_set
for item in single_actions)):
print("Error, path '{}' not a valid vmbus device".format(path))
exit(-1)
# display Testing status of single device
def get_device_test_values(path, file_map):
for name in file_map[path]:
file_location = file_map[path][name]
print( name + " = " + str(read_test_files(file_location)))
# Create a map of the vmbus devices and their associated files
# [key=device, value = [key = filename, value = file path]]
def recursive_file_lookup(path, file_map):
for f_path in glob.iglob(path + '**/*'):
if (os.path.isfile(f_path)):
if (f_path.rsplit("/",2)[0] == debugfs_hyperv_path):
directory = f_path.rsplit("/",1)[0]
else:
directory = f_path.rsplit("/",2)[0]
f_name = f_path.split("/")[-1]
if (file_map.get(directory)):
file_map[directory].update({f_name:f_path})
else:
file_map[directory] = {f_name:f_path}
elif (os.path.isdir(f_path)):
recursive_file_lookup(f_path,file_map)
return file_map
# display Testing state of devices
def get_all_devices_test_status(file_map):
for device in file_map:
if (get_test_state(locate_state(device, file_map)) is 1):
print("Testing = ON for: {}"
.format(device.split("/")[5]))
else:
print("Testing = OFF for: {}"
.format(device.split("/")[5]))
# read the vmbus device files, path must be absolute path before calling
def read_test_files(path):
try:
with open(path,"r") as f:
file_value = f.readline().strip()
return int(file_value)
except IOError as e:
errno, strerror = e.args
print("I/O error({0}): {1} on file {2}"
.format(errno, strerror, path))
exit(-1)
except ValueError:
print ("Element to int conversion error in: \n{}".format(path))
exit(-1)
# writing to vmbus device files, path must be absolute path before calling
def write_test_files(path, value):
try:
with open(path,"w") as f:
f.write("{}".format(value))
except IOError as e:
errno, strerror = e.args
print("I/O error({0}): {1} on file {2}"
.format(errno, strerror, path))
exit(-1)
# set testing state of device
def set_test_state(state_path, state_value, quiet):
write_test_files(state_path, state_value)
if (get_test_state(state_path) is 1):
if (not quiet):
print("Testing = ON for device: {}"
.format(state_path.split("/")[5]))
else:
if (not quiet):
print("Testing = OFF for device: {}"
.format(state_path.split("/")[5]))
# get testing state of device
def get_test_state(state_path):
#state == 1 - test = ON
#state == 0 - test = OFF
return read_test_files(state_path)
# write 1 - 1000 microseconds, into a single device using the
# fuzz_test_buffer_interrupt_delay and fuzz_test_message_delay
# debugfs attributes
def set_delay_values(device, file_map, delay_length, quiet):
try:
interrupt = file_map[device][f_names.buff_f.value]
message = file_map[device][f_names.mess_f.value]
# delay[0]- buffer interrupt delay, delay[1]- message delay
if (delay_length[0] >= 0 and delay_length[0] <= 1000):
write_test_files(interrupt, delay_length[0])
if (delay_length[1] >= 0 and delay_length[1] <= 1000):
write_test_files(message, delay_length[1])
if (not quiet):
print("Buffer delay testing = {} for: {}"
.format(read_test_files(interrupt),
interrupt.split("/")[5]))
print("Message delay testing = {} for: {}"
.format(read_test_files(message),
message.split("/")[5]))
except IOError as e:
errno, strerror = e.args
print("I/O error({0}): {1} on files {2}{3}"
.format(errno, strerror, interrupt, message))
exit(-1)
# enabling delay testing on all devices
def set_delay_all_devices(file_map, delay, quiet):
for device in (file_map):
set_test_state(locate_state(device, file_map),
dev_state.on.value,
quiet)
set_delay_values(device, file_map, delay, quiet)
# disable all testing on a SINGLE device.
def disable_testing_single_device(device, file_map):
for name in file_map[device]:
file_location = file_map[device][name]
write_test_files(file_location, dev_state.off.value)
print("ALL testing now OFF for {}".format(device.split("/")[-1]))
# disable all testing on ALL devices
def disable_all_testing(file_map):
for device in file_map:
disable_testing_single_device(device, file_map)
def parse_args():
parser = argparse.ArgumentParser(prog = "vmbus_testing",usage ="\n"
"%(prog)s [delay] [-h] [-e|-E] -t [-p]\n"
"%(prog)s [view_all | V] [-h]\n"
"%(prog)s [disable_all | D] [-h]\n"
"%(prog)s [disable_single | d] [-h|-p]\n"
"%(prog)s [view_single | v] [-h|-p]\n"
"%(prog)s --version\n",
description = "\nUse lsvmbus to get vmbus device type "
"information.\n" "\nThe debugfs root path is "
"/sys/kernel/debug/hyperv",
formatter_class = RawDescriptionHelpFormatter)
subparsers = parser.add_subparsers(dest = "action")
parser.add_argument("--version", action = "version",
version = '%(prog)s 0.1.0')
parser.add_argument("-q","--quiet", action = "store_true",
help = "silence none important test messages."
" This will only work when enabling testing"
" on a device.")
# Use the path parser to hold the --path attribute so it can
# be shared between subparsers. Also do the same for the state
# parser, as all testing methods will use --enable_all and
# enable_single.
path_parser = argparse.ArgumentParser(add_help=False)
path_parser.add_argument("-p","--path", metavar = "",
help = "Debugfs path to a vmbus device. The path "
"must be the absolute path to the device.")
state_parser = argparse.ArgumentParser(add_help=False)
state_group = state_parser.add_mutually_exclusive_group(required = True)
state_group.add_argument("-E", "--enable_all", action = "store_const",
const = "enable_all",
help = "Enable the specified test type "
"on ALL vmbus devices.")
state_group.add_argument("-e", "--enable_single",
action = "store_const",
const = "enable_single",
help = "Enable the specified test type on a "
"SINGLE vmbus device.")
parser_delay = subparsers.add_parser("delay",
parents = [state_parser, path_parser],
help = "Delay the ring buffer interrupt or the "
"ring buffer message reads in microseconds.",
prog = "vmbus_testing",
usage = "%(prog)s [-h]\n"
"%(prog)s -E -t [value] [value]\n"
"%(prog)s -e -t [value] [value] -p",
description = "Delay the ring buffer interrupt for "
"vmbus devices, or delay the ring buffer message "
"reads for vmbus devices (both in microseconds). This "
"is only on the host to guest channel.")
parser_delay.add_argument("-t", "--delay_time", metavar = "", nargs = 2,
type = check_range, default =[0,0], required = (True),
help = "Set [buffer] & [message] delay time. "
"Value constraints: -1 == value "
"or 0 < value <= 1000.\n"
"Use -1 to keep the previous value for that delay "
"type, or a value > 0 <= 1000 to change the delay "
"time.")
parser_dis_all = subparsers.add_parser("disable_all",
aliases = ['D'], prog = "vmbus_testing",
usage = "%(prog)s [disable_all | D] -h\n"
"%(prog)s [disable_all | D]\n",
help = "Disable ALL testing on ALL vmbus devices.",
description = "Disable ALL testing on ALL vmbus "
"devices.")
parser_dis_single = subparsers.add_parser("disable_single",
aliases = ['d'],
parents = [path_parser], prog = "vmbus_testing",
usage = "%(prog)s [disable_single | d] -h\n"
"%(prog)s [disable_single | d] -p\n",
help = "Disable ALL testing on a SINGLE vmbus device.",
description = "Disable ALL testing on a SINGLE vmbus "
"device.")
parser_view_all = subparsers.add_parser("view_all", aliases = ['V'],
help = "View the test state for ALL vmbus devices.",
prog = "vmbus_testing",
usage = "%(prog)s [view_all | V] -h\n"
"%(prog)s [view_all | V]\n",
description = "This shows the test state for ALL the "
"vmbus devices.")
parser_view_single = subparsers.add_parser("view_single",
aliases = ['v'],parents = [path_parser],
help = "View the test values for a SINGLE vmbus "
"device.",
description = "This shows the test values for a SINGLE "
"vmbus device.", prog = "vmbus_testing",
usage = "%(prog)s [view_single | v] -h\n"
"%(prog)s [view_single | v] -p")
return parser.parse_args()
# value checking for range checking input in parser
def check_range(arg1):
try:
val = int(arg1)
except ValueError as err:
raise argparse.ArgumentTypeError(str(err))
if val < -1 or val > 1000:
message = ("\n\nvalue must be -1 or 0 < value <= 1000. "
"Value program received: {}\n").format(val)
raise argparse.ArgumentTypeError(message)
return val
if __name__ == "__main__":
main()