2008-10-28 00:41:46 +07:00
|
|
|
/*
|
|
|
|
* VMware Detection code.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2008, VMware, Inc.
|
|
|
|
* Author : Alok N Kataria <akataria@vmware.com>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful, but
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
|
|
* NON INFRINGEMENT. See the GNU General Public License for more
|
|
|
|
* details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/dmi.h>
|
2016-07-14 07:18:56 +07:00
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/export.h>
|
2016-10-28 14:54:32 +07:00
|
|
|
#include <linux/clocksource.h>
|
2008-10-28 00:41:46 +07:00
|
|
|
#include <asm/div64.h>
|
2009-08-20 22:06:25 +07:00
|
|
|
#include <asm/x86_init.h>
|
2010-05-08 06:57:28 +07:00
|
|
|
#include <asm/hypervisor.h>
|
2016-10-14 01:45:39 +07:00
|
|
|
#include <asm/timer.h>
|
2016-10-05 03:11:48 +07:00
|
|
|
#include <asm/apic.h>
|
2016-10-28 14:54:32 +07:00
|
|
|
#include <asm/timer.h>
|
|
|
|
|
|
|
|
#undef pr_fmt
|
|
|
|
#define pr_fmt(fmt) "vmware: " fmt
|
2008-10-28 00:41:46 +07:00
|
|
|
|
|
|
|
#define CPUID_VMWARE_INFO_LEAF 0x40000000
|
|
|
|
#define VMWARE_HYPERVISOR_MAGIC 0x564D5868
|
|
|
|
#define VMWARE_HYPERVISOR_PORT 0x5658
|
|
|
|
|
|
|
|
#define VMWARE_PORT_CMD_GETVERSION 10
|
|
|
|
#define VMWARE_PORT_CMD_GETHZ 45
|
2013-01-18 06:44:42 +07:00
|
|
|
#define VMWARE_PORT_CMD_GETVCPU_INFO 68
|
|
|
|
#define VMWARE_PORT_CMD_LEGACY_X2APIC 3
|
|
|
|
#define VMWARE_PORT_CMD_VCPU_RESERVED 31
|
2008-10-28 00:41:46 +07:00
|
|
|
|
|
|
|
#define VMWARE_PORT(cmd, eax, ebx, ecx, edx) \
|
|
|
|
__asm__("inl (%%dx)" : \
|
|
|
|
"=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) : \
|
|
|
|
"0"(VMWARE_HYPERVISOR_MAGIC), \
|
|
|
|
"1"(VMWARE_PORT_CMD_##cmd), \
|
2008-11-04 02:31:28 +07:00
|
|
|
"2"(VMWARE_HYPERVISOR_PORT), "3"(UINT_MAX) : \
|
2008-10-28 00:41:46 +07:00
|
|
|
"memory");
|
|
|
|
|
2016-10-20 12:02:11 +07:00
|
|
|
static unsigned long vmware_tsc_khz __ro_after_init;
|
|
|
|
|
2008-10-28 00:41:46 +07:00
|
|
|
static inline int __vmware_platform(void)
|
|
|
|
{
|
|
|
|
uint32_t eax, ebx, ecx, edx;
|
|
|
|
VMWARE_PORT(GETVERSION, eax, ebx, ecx, edx);
|
|
|
|
return eax != (uint32_t)-1 && ebx == VMWARE_HYPERVISOR_MAGIC;
|
|
|
|
}
|
|
|
|
|
2009-08-20 22:06:25 +07:00
|
|
|
static unsigned long vmware_get_tsc_khz(void)
|
2008-10-28 00:41:46 +07:00
|
|
|
{
|
2016-10-20 12:02:11 +07:00
|
|
|
return vmware_tsc_khz;
|
2008-10-28 00:41:46 +07:00
|
|
|
}
|
|
|
|
|
2016-10-28 14:54:31 +07:00
|
|
|
#ifdef CONFIG_PARAVIRT
|
2016-10-28 14:54:32 +07:00
|
|
|
static struct cyc2ns_data vmware_cyc2ns __ro_after_init;
|
|
|
|
static int vmw_sched_clock __initdata = 1;
|
2008-10-28 00:41:46 +07:00
|
|
|
|
2016-10-28 14:54:32 +07:00
|
|
|
static __init int setup_vmw_sched_clock(char *s)
|
|
|
|
{
|
|
|
|
vmw_sched_clock = 0;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
early_param("no-vmw-sched-clock", setup_vmw_sched_clock);
|
|
|
|
|
|
|
|
static unsigned long long vmware_sched_clock(void)
|
|
|
|
{
|
|
|
|
unsigned long long ns;
|
2010-08-03 06:10:37 +07:00
|
|
|
|
2016-10-28 14:54:32 +07:00
|
|
|
ns = mul_u64_u32_shr(rdtsc(), vmware_cyc2ns.cyc2ns_mul,
|
|
|
|
vmware_cyc2ns.cyc2ns_shift);
|
|
|
|
ns -= vmware_cyc2ns.cyc2ns_offset;
|
|
|
|
return ns;
|
2008-10-28 00:41:46 +07:00
|
|
|
}
|
|
|
|
|
2016-10-28 14:54:32 +07:00
|
|
|
static void __init vmware_sched_clock_setup(void)
|
|
|
|
{
|
|
|
|
struct cyc2ns_data *d = &vmware_cyc2ns;
|
|
|
|
unsigned long long tsc_now = rdtsc();
|
|
|
|
|
|
|
|
clocks_calc_mult_shift(&d->cyc2ns_mul, &d->cyc2ns_shift,
|
|
|
|
vmware_tsc_khz, NSEC_PER_MSEC, 0);
|
|
|
|
d->cyc2ns_offset = mul_u64_u32_shr(tsc_now, d->cyc2ns_mul,
|
|
|
|
d->cyc2ns_shift);
|
|
|
|
|
|
|
|
pv_time_ops.sched_clock = vmware_sched_clock;
|
|
|
|
pr_info("using sched offset of %llu ns\n", d->cyc2ns_offset);
|
|
|
|
}
|
|
|
|
|
2016-10-28 14:54:31 +07:00
|
|
|
static void __init vmware_paravirt_ops_setup(void)
|
|
|
|
{
|
|
|
|
pv_info.name = "VMware hypervisor";
|
|
|
|
pv_cpu_ops.io_delay = paravirt_nop;
|
2016-10-28 14:54:32 +07:00
|
|
|
|
|
|
|
if (vmware_tsc_khz && vmw_sched_clock)
|
|
|
|
vmware_sched_clock_setup();
|
2016-10-28 14:54:31 +07:00
|
|
|
}
|
|
|
|
#else
|
|
|
|
#define vmware_paravirt_ops_setup() do {} while (0)
|
|
|
|
#endif
|
|
|
|
|
2010-05-08 06:57:28 +07:00
|
|
|
static void __init vmware_platform_setup(void)
|
2009-08-20 22:06:25 +07:00
|
|
|
{
|
|
|
|
uint32_t eax, ebx, ecx, edx;
|
2016-10-20 12:02:11 +07:00
|
|
|
uint64_t lpj, tsc_khz;
|
2009-08-20 22:06:25 +07:00
|
|
|
|
|
|
|
VMWARE_PORT(GETHZ, eax, ebx, ecx, edx);
|
|
|
|
|
2016-10-05 03:11:48 +07:00
|
|
|
if (ebx != UINT_MAX) {
|
2016-10-20 12:02:11 +07:00
|
|
|
lpj = tsc_khz = eax | (((uint64_t)ebx) << 32);
|
|
|
|
do_div(tsc_khz, 1000);
|
|
|
|
WARN_ON(tsc_khz >> 32);
|
|
|
|
pr_info("TSC freq read from hypervisor : %lu.%03lu MHz\n",
|
|
|
|
(unsigned long) tsc_khz / 1000,
|
|
|
|
(unsigned long) tsc_khz % 1000);
|
|
|
|
|
|
|
|
if (!preset_lpj) {
|
|
|
|
do_div(lpj, HZ);
|
|
|
|
preset_lpj = lpj;
|
|
|
|
}
|
|
|
|
|
|
|
|
vmware_tsc_khz = tsc_khz;
|
2009-08-20 22:06:25 +07:00
|
|
|
x86_platform.calibrate_tsc = vmware_get_tsc_khz;
|
2016-10-28 14:54:30 +07:00
|
|
|
x86_platform.calibrate_cpu = vmware_get_tsc_khz;
|
2016-10-20 12:02:11 +07:00
|
|
|
|
2016-10-05 03:11:48 +07:00
|
|
|
#ifdef CONFIG_X86_LOCAL_APIC
|
|
|
|
/* Skip lapic calibration since we know the bus frequency. */
|
|
|
|
lapic_timer_frequency = ecx / HZ;
|
|
|
|
pr_info("Host bus clock speed read from hypervisor : %u Hz\n",
|
|
|
|
ecx);
|
|
|
|
#endif
|
|
|
|
} else {
|
2016-02-02 10:45:02 +07:00
|
|
|
pr_warn("Failed to get TSC freq from the hypervisor\n");
|
2016-10-05 03:11:48 +07:00
|
|
|
}
|
2016-10-14 01:45:39 +07:00
|
|
|
|
2016-10-28 14:54:31 +07:00
|
|
|
vmware_paravirt_ops_setup();
|
2016-12-13 06:29:06 +07:00
|
|
|
|
2016-10-14 01:45:39 +07:00
|
|
|
#ifdef CONFIG_X86_IO_APIC
|
|
|
|
no_timer_check = 1;
|
|
|
|
#endif
|
2009-08-20 22:06:25 +07:00
|
|
|
}
|
|
|
|
|
2008-11-04 06:50:38 +07:00
|
|
|
/*
|
2011-03-18 02:24:16 +07:00
|
|
|
* While checking the dmi string information, just checking the product
|
2008-11-04 06:50:38 +07:00
|
|
|
* serial key should be enough, as this will always have a VMware
|
|
|
|
* specific string when running under VMware hypervisor.
|
|
|
|
*/
|
2013-07-25 15:54:35 +07:00
|
|
|
static uint32_t __init vmware_platform(void)
|
2008-10-28 00:41:46 +07:00
|
|
|
{
|
2016-03-29 22:41:55 +07:00
|
|
|
if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
|
2010-05-08 06:57:28 +07:00
|
|
|
unsigned int eax;
|
|
|
|
unsigned int hyper_vendor_id[3];
|
|
|
|
|
|
|
|
cpuid(CPUID_VMWARE_INFO_LEAF, &eax, &hyper_vendor_id[0],
|
|
|
|
&hyper_vendor_id[1], &hyper_vendor_id[2]);
|
|
|
|
if (!memcmp(hyper_vendor_id, "VMwareVMware", 12))
|
2013-07-25 15:54:35 +07:00
|
|
|
return CPUID_VMWARE_INFO_LEAF;
|
2008-11-04 06:50:38 +07:00
|
|
|
} else if (dmi_available && dmi_name_in_serial("VMware") &&
|
2008-10-28 00:41:46 +07:00
|
|
|
__vmware_platform())
|
2013-07-25 15:54:35 +07:00
|
|
|
return 1;
|
2008-10-28 00:41:46 +07:00
|
|
|
|
2013-07-25 15:54:35 +07:00
|
|
|
return 0;
|
2008-10-28 00:41:46 +07:00
|
|
|
}
|
|
|
|
|
2008-11-01 02:01:58 +07:00
|
|
|
/*
|
|
|
|
* VMware hypervisor takes care of exporting a reliable TSC to the guest.
|
|
|
|
* Still, due to timing difference when running on virtual cpus, the TSC can
|
|
|
|
* be marked as unstable in some cases. For example, the TSC sync check at
|
|
|
|
* bootup can fail due to a marginal offset between vcpus' TSCs (though the
|
|
|
|
* TSCs do not drift from each other). Also, the ACPI PM timer clocksource
|
|
|
|
* is not suitable as a watchdog when running on a hypervisor because the
|
|
|
|
* kernel may miss a wrap of the counter if the vcpu is descheduled for a
|
|
|
|
* long time. To skip these checks at runtime we set these capability bits,
|
|
|
|
* so that the kernel could just trust the hypervisor with providing a
|
|
|
|
* reliable virtual TSC that is suitable for timekeeping.
|
|
|
|
*/
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 05:23:59 +07:00
|
|
|
static void vmware_set_cpu_features(struct cpuinfo_x86 *c)
|
2008-11-01 02:01:58 +07:00
|
|
|
{
|
|
|
|
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
|
|
|
|
set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE);
|
|
|
|
}
|
2010-05-08 06:57:28 +07:00
|
|
|
|
2013-01-18 06:44:42 +07:00
|
|
|
/* Checks if hypervisor supports x2apic without VT-D interrupt remapping. */
|
|
|
|
static bool __init vmware_legacy_x2apic_available(void)
|
|
|
|
{
|
|
|
|
uint32_t eax, ebx, ecx, edx;
|
|
|
|
VMWARE_PORT(GETVCPU_INFO, eax, ebx, ecx, edx);
|
|
|
|
return (eax & (1 << VMWARE_PORT_CMD_VCPU_RESERVED)) == 0 &&
|
|
|
|
(eax & (1 << VMWARE_PORT_CMD_LEGACY_X2APIC)) != 0;
|
|
|
|
}
|
|
|
|
|
2010-05-08 06:57:28 +07:00
|
|
|
const __refconst struct hypervisor_x86 x86_hyper_vmware = {
|
|
|
|
.name = "VMware",
|
|
|
|
.detect = vmware_platform,
|
|
|
|
.set_cpu_features = vmware_set_cpu_features,
|
|
|
|
.init_platform = vmware_platform_setup,
|
2013-01-18 06:44:42 +07:00
|
|
|
.x2apic_available = vmware_legacy_x2apic_available,
|
2010-05-08 06:57:28 +07:00
|
|
|
};
|
2010-05-09 15:10:34 +07:00
|
|
|
EXPORT_SYMBOL(x86_hyper_vmware);
|