mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 01:30:55 +07:00
A set of fixes and updates for x86:
- Unbreak paravirt VDSO clocks. While the VDSO code was moved into lib for sharing a subtle check for the validity of paravirt clocks got replaced. While the replacement works perfectly fine for bare metal as the update of the VDSO clock mode is synchronous, it fails for paravirt clocks because the hypervisor can invalidate them asynchronous. Bring it back as an optional function so it does not inflict this on architectures which are free of PV damage. - Fix the jiffies to jiffies64 mapping on 64bit so it does not trigger an ODR violation on newer compilers - Three fixes for the SSBD and *IB* speculation mitigation maze to ensure consistency, not disabling of some *IB* variants wrongly and to prevent a rogue cross process shutdown of SSBD. All marked for stable. - Add yet more CPU models to the splitlock detection capable list !@#%$! - Bring the pr_info() back which tells that TSC deadline timer is enabled. - Reboot quirk for MacBook6,1 -----BEGIN PGP SIGNATURE----- iQJHBAABCgAxFiEEQp8+kY+LLUocC4bMphj1TA10mKEFAl7ie1oTHHRnbHhAbGlu dXRyb25peC5kZQAKCRCmGPVMDXSYofXrEACDD0mNBU2c4vQiR+n4d41PqW1p15DM /wG7dYqYt2RdR6qOAspmNL5ilUP+L+eoT/86U9y0g4j3FtTREqyy6mpWE4MQzqaQ eKWVoeYt7l9QbR1kP4eks1CN94OyVBUPo3P78UPruWMB11iyKjyrkEdsDmRSLOdr 6doqMFGHgowrQRwsLPFUt7b2lls6ssOSYgM/ChHi2Iga431ZuYYcRe2mNVsvqx3n 0N7QZlJ/LivXdCmdpe3viMBsDaomiXAloKUo+HqgrCLYFXefLtfOq09U7FpddYqH ztxbGW/7gFn2HEbmdeaiufux263MdHtnjvdPhQZKHuyQmZzzxDNBFgOILSrBJb5y qLYJGhMa0sEwMBM9MMItomNgZnOITQ3WGYAdSCg3mG3jK4EXzr6aQm/Qz5SI+Cte bQKB2dgR53Gw/1uc7F5qMGQ2NzeUbKycT0ZbF3vkUPVh1kdU3juIntsovv2lFeBe Rog/rZliT1xdHrGAHRbubb2/3v66CSodMoYz0eQtr241Oz0LGwnyFqLN3qcZVLDt OtxHQ3bbaxevDEetJXfSh3CfHKNYMToAcszmGDse3MJxC7DL5AA51OegMa/GYOX6 r5J99MUsEzZQoQYyXFf1MjwgxH4CQK1xBBUXYaVG65AcmhT21YbNWnCbxgf7hW+V hqaaUSig4V3NLw== =VlBk -----END PGP SIGNATURE----- Merge tag 'x86-urgent-2020-06-11' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull more x86 updates from Thomas Gleixner: "A set of fixes and updates for x86: - Unbreak paravirt VDSO clocks. While the VDSO code was moved into lib for sharing a subtle check for the validity of paravirt clocks got replaced. While the replacement works perfectly fine for bare metal as the update of the VDSO clock mode is synchronous, it fails for paravirt clocks because the hypervisor can invalidate them asynchronously. Bring it back as an optional function so it does not inflict this on architectures which are free of PV damage. - Fix the jiffies to jiffies64 mapping on 64bit so it does not trigger an ODR violation on newer compilers - Three fixes for the SSBD and *IB* speculation mitigation maze to ensure consistency, not disabling of some *IB* variants wrongly and to prevent a rogue cross process shutdown of SSBD. All marked for stable. - Add yet more CPU models to the splitlock detection capable list !@#%$! - Bring the pr_info() back which tells that TSC deadline timer is enabled. - Reboot quirk for MacBook6,1" * tag 'x86-urgent-2020-06-11' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/vdso: Unbreak paravirt VDSO clocks lib/vdso: Provide sanity check for cycles (again) clocksource: Remove obsolete ifdef x86_64: Fix jiffies ODR violation x86/speculation: PR_SPEC_FORCE_DISABLE enforcement for indirect branches. x86/speculation: Prevent rogue cross-process SSBD shutdown x86/speculation: Avoid force-disabling IBPB based on STIBP and enhanced IBRS. x86/cpu: Add Sapphire Rapids CPU model number x86/split_lock: Add Icelake microserver and Tigerlake CPU models x86/apic: Make TSC deadline timer detection message visible x86/reboot/quirks: Add MacBook6,1 reboot quirk
This commit is contained in:
commit
6a45a65888
@ -89,6 +89,8 @@
|
||||
#define INTEL_FAM6_COMETLAKE 0xA5
|
||||
#define INTEL_FAM6_COMETLAKE_L 0xA6
|
||||
|
||||
#define INTEL_FAM6_SAPPHIRERAPIDS_X 0x8F
|
||||
|
||||
/* "Small Core" Processors (Atom) */
|
||||
|
||||
#define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */
|
||||
|
@ -271,6 +271,24 @@ static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
|
||||
return __vdso_data;
|
||||
}
|
||||
|
||||
static inline bool arch_vdso_clocksource_ok(const struct vdso_data *vd)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
#define vdso_clocksource_ok arch_vdso_clocksource_ok
|
||||
|
||||
/*
|
||||
* Clocksource read value validation to handle PV and HyperV clocksources
|
||||
* which can be invalidated asynchronously and indicate invalidation by
|
||||
* returning U64_MAX, which can be effectively tested by checking for a
|
||||
* negative value after casting it to s64.
|
||||
*/
|
||||
static inline bool arch_vdso_cycles_ok(u64 cycles)
|
||||
{
|
||||
return (s64)cycles >= 0;
|
||||
}
|
||||
#define vdso_cycles_ok arch_vdso_cycles_ok
|
||||
|
||||
/*
|
||||
* x86 specific delta calculation.
|
||||
*
|
||||
|
@ -2060,7 +2060,7 @@ void __init init_apic_mappings(void)
|
||||
unsigned int new_apicid;
|
||||
|
||||
if (apic_validate_deadline_timer())
|
||||
pr_debug("TSC deadline timer available\n");
|
||||
pr_info("TSC deadline timer available\n");
|
||||
|
||||
if (x2apic_mode) {
|
||||
boot_cpu_physical_apicid = read_apic_id();
|
||||
|
@ -588,7 +588,9 @@ early_param("nospectre_v1", nospectre_v1_cmdline);
|
||||
static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
|
||||
SPECTRE_V2_NONE;
|
||||
|
||||
static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init =
|
||||
static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =
|
||||
SPECTRE_V2_USER_NONE;
|
||||
static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init =
|
||||
SPECTRE_V2_USER_NONE;
|
||||
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
@ -734,15 +736,6 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* At this point, an STIBP mode other than "off" has been set.
|
||||
* If STIBP support is not being forced, check if STIBP always-on
|
||||
* is preferred.
|
||||
*/
|
||||
if (mode != SPECTRE_V2_USER_STRICT &&
|
||||
boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
|
||||
mode = SPECTRE_V2_USER_STRICT_PREFERRED;
|
||||
|
||||
/* Initialize Indirect Branch Prediction Barrier */
|
||||
if (boot_cpu_has(X86_FEATURE_IBPB)) {
|
||||
setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
|
||||
@ -765,23 +758,36 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
|
||||
pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
|
||||
static_key_enabled(&switch_mm_always_ibpb) ?
|
||||
"always-on" : "conditional");
|
||||
|
||||
spectre_v2_user_ibpb = mode;
|
||||
}
|
||||
|
||||
/* If enhanced IBRS is enabled no STIBP required */
|
||||
if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
|
||||
/*
|
||||
* If enhanced IBRS is enabled or SMT impossible, STIBP is not
|
||||
* required.
|
||||
*/
|
||||
if (!smt_possible || spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
|
||||
return;
|
||||
|
||||
/*
|
||||
* If SMT is not possible or STIBP is not available clear the STIBP
|
||||
* mode.
|
||||
* At this point, an STIBP mode other than "off" has been set.
|
||||
* If STIBP support is not being forced, check if STIBP always-on
|
||||
* is preferred.
|
||||
*/
|
||||
if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP))
|
||||
if (mode != SPECTRE_V2_USER_STRICT &&
|
||||
boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
|
||||
mode = SPECTRE_V2_USER_STRICT_PREFERRED;
|
||||
|
||||
/*
|
||||
* If STIBP is not available, clear the STIBP mode.
|
||||
*/
|
||||
if (!boot_cpu_has(X86_FEATURE_STIBP))
|
||||
mode = SPECTRE_V2_USER_NONE;
|
||||
|
||||
spectre_v2_user_stibp = mode;
|
||||
|
||||
set_mode:
|
||||
spectre_v2_user = mode;
|
||||
/* Only print the STIBP mode when SMT possible */
|
||||
if (smt_possible)
|
||||
pr_info("%s\n", spectre_v2_user_strings[mode]);
|
||||
pr_info("%s\n", spectre_v2_user_strings[mode]);
|
||||
}
|
||||
|
||||
static const char * const spectre_v2_strings[] = {
|
||||
@ -1014,7 +1020,7 @@ void cpu_bugs_smt_update(void)
|
||||
{
|
||||
mutex_lock(&spec_ctrl_mutex);
|
||||
|
||||
switch (spectre_v2_user) {
|
||||
switch (spectre_v2_user_stibp) {
|
||||
case SPECTRE_V2_USER_NONE:
|
||||
break;
|
||||
case SPECTRE_V2_USER_STRICT:
|
||||
@ -1257,14 +1263,19 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
|
||||
{
|
||||
switch (ctrl) {
|
||||
case PR_SPEC_ENABLE:
|
||||
if (spectre_v2_user == SPECTRE_V2_USER_NONE)
|
||||
if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
|
||||
return 0;
|
||||
/*
|
||||
* Indirect branch speculation is always disabled in strict
|
||||
* mode.
|
||||
* mode. It can neither be enabled if it was force-disabled
|
||||
* by a previous prctl call.
|
||||
|
||||
*/
|
||||
if (spectre_v2_user == SPECTRE_V2_USER_STRICT ||
|
||||
spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED)
|
||||
if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ||
|
||||
task_spec_ib_force_disable(task))
|
||||
return -EPERM;
|
||||
task_clear_spec_ib_disable(task);
|
||||
task_update_spec_tif(task);
|
||||
@ -1275,10 +1286,12 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
|
||||
* Indirect branch speculation is always allowed when
|
||||
* mitigation is force disabled.
|
||||
*/
|
||||
if (spectre_v2_user == SPECTRE_V2_USER_NONE)
|
||||
if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
|
||||
return -EPERM;
|
||||
if (spectre_v2_user == SPECTRE_V2_USER_STRICT ||
|
||||
spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED)
|
||||
if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
|
||||
return 0;
|
||||
task_set_spec_ib_disable(task);
|
||||
if (ctrl == PR_SPEC_FORCE_DISABLE)
|
||||
@ -1309,7 +1322,8 @@ void arch_seccomp_spec_mitigate(struct task_struct *task)
|
||||
{
|
||||
if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
|
||||
ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
|
||||
if (spectre_v2_user == SPECTRE_V2_USER_SECCOMP)
|
||||
if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP)
|
||||
ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
|
||||
}
|
||||
#endif
|
||||
@ -1340,22 +1354,24 @@ static int ib_prctl_get(struct task_struct *task)
|
||||
if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
|
||||
return PR_SPEC_NOT_AFFECTED;
|
||||
|
||||
switch (spectre_v2_user) {
|
||||
case SPECTRE_V2_USER_NONE:
|
||||
if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
|
||||
return PR_SPEC_ENABLE;
|
||||
case SPECTRE_V2_USER_PRCTL:
|
||||
case SPECTRE_V2_USER_SECCOMP:
|
||||
else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
|
||||
return PR_SPEC_DISABLE;
|
||||
else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
|
||||
spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) {
|
||||
if (task_spec_ib_force_disable(task))
|
||||
return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
|
||||
if (task_spec_ib_disable(task))
|
||||
return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
|
||||
return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
|
||||
case SPECTRE_V2_USER_STRICT:
|
||||
case SPECTRE_V2_USER_STRICT_PREFERRED:
|
||||
return PR_SPEC_DISABLE;
|
||||
default:
|
||||
} else
|
||||
return PR_SPEC_NOT_AFFECTED;
|
||||
}
|
||||
}
|
||||
|
||||
int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
|
||||
@ -1594,7 +1610,7 @@ static char *stibp_state(void)
|
||||
if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
|
||||
return "";
|
||||
|
||||
switch (spectre_v2_user) {
|
||||
switch (spectre_v2_user_stibp) {
|
||||
case SPECTRE_V2_USER_NONE:
|
||||
return ", STIBP: disabled";
|
||||
case SPECTRE_V2_USER_STRICT:
|
||||
|
@ -1142,9 +1142,12 @@ void switch_to_sld(unsigned long tifn)
|
||||
static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = {
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, 0),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, 0),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, 0),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, 1),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, 1),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, 1),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, 1),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, 1),
|
||||
{}
|
||||
};
|
||||
|
||||
|
@ -545,28 +545,20 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp,
|
||||
|
||||
lockdep_assert_irqs_disabled();
|
||||
|
||||
/*
|
||||
* If TIF_SSBD is different, select the proper mitigation
|
||||
* method. Note that if SSBD mitigation is disabled or permanentely
|
||||
* enabled this branch can't be taken because nothing can set
|
||||
* TIF_SSBD.
|
||||
*/
|
||||
if (tif_diff & _TIF_SSBD) {
|
||||
if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) {
|
||||
/* Handle change of TIF_SSBD depending on the mitigation method. */
|
||||
if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) {
|
||||
if (tif_diff & _TIF_SSBD)
|
||||
amd_set_ssb_virt_state(tifn);
|
||||
} else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
|
||||
} else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
|
||||
if (tif_diff & _TIF_SSBD)
|
||||
amd_set_core_ssb_state(tifn);
|
||||
} else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
|
||||
static_cpu_has(X86_FEATURE_AMD_SSBD)) {
|
||||
msr |= ssbd_tif_to_spec_ctrl(tifn);
|
||||
updmsr = true;
|
||||
}
|
||||
} else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
|
||||
static_cpu_has(X86_FEATURE_AMD_SSBD)) {
|
||||
updmsr |= !!(tif_diff & _TIF_SSBD);
|
||||
msr |= ssbd_tif_to_spec_ctrl(tifn);
|
||||
}
|
||||
|
||||
/*
|
||||
* Only evaluate TIF_SPEC_IB if conditional STIBP is enabled,
|
||||
* otherwise avoid the MSR write.
|
||||
*/
|
||||
/* Only evaluate TIF_SPEC_IB if conditional STIBP is enabled. */
|
||||
if (IS_ENABLED(CONFIG_SMP) &&
|
||||
static_branch_unlikely(&switch_to_cond_stibp)) {
|
||||
updmsr |= !!(tif_diff & _TIF_SPEC_IB);
|
||||
|
@ -197,6 +197,14 @@ static const struct dmi_system_id reboot_dmi_table[] __initconst = {
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5"),
|
||||
},
|
||||
},
|
||||
{ /* Handle problems with rebooting on Apple MacBook6,1 */
|
||||
.callback = set_pci_reboot,
|
||||
.ident = "Apple MacBook6,1",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "MacBook6,1"),
|
||||
},
|
||||
},
|
||||
{ /* Handle problems with rebooting on Apple MacBookPro5 */
|
||||
.callback = set_pci_reboot,
|
||||
.ident = "Apple MacBookPro5",
|
||||
|
@ -25,10 +25,6 @@
|
||||
#include <asm/hpet.h>
|
||||
#include <asm/time.h>
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
__visible volatile unsigned long jiffies __cacheline_aligned_in_smp = INITIAL_JIFFIES;
|
||||
#endif
|
||||
|
||||
unsigned long profile_pc(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long pc = instruction_pointer(regs);
|
||||
|
@ -40,13 +40,13 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT)
|
||||
#ifdef CONFIG_X86_32
|
||||
OUTPUT_ARCH(i386)
|
||||
ENTRY(phys_startup_32)
|
||||
jiffies = jiffies_64;
|
||||
#else
|
||||
OUTPUT_ARCH(i386:x86-64)
|
||||
ENTRY(phys_startup_64)
|
||||
jiffies_64 = jiffies;
|
||||
#endif
|
||||
|
||||
jiffies = jiffies_64;
|
||||
|
||||
#if defined(CONFIG_X86_64)
|
||||
/*
|
||||
* On 64-bit, align RODATA to 2MB so we retain large page mappings for
|
||||
|
@ -928,14 +928,12 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
|
||||
|
||||
clocksource_arch_init(cs);
|
||||
|
||||
#ifdef CONFIG_GENERIC_VDSO_CLOCK_MODE
|
||||
if (cs->vdso_clock_mode < 0 ||
|
||||
cs->vdso_clock_mode >= VDSO_CLOCKMODE_MAX) {
|
||||
pr_warn("clocksource %s registered with invalid VDSO mode %d. Disabling VDSO support.\n",
|
||||
cs->name, cs->vdso_clock_mode);
|
||||
cs->vdso_clock_mode = VDSO_CLOCKMODE_NONE;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Initialize mult/shift and max_idle_ns */
|
||||
__clocksource_update_freq_scale(cs, scale, freq);
|
||||
|
@ -38,6 +38,13 @@ static inline bool vdso_clocksource_ok(const struct vdso_data *vd)
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef vdso_cycles_ok
|
||||
static inline bool vdso_cycles_ok(u64 cycles)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TIME_NS
|
||||
static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
|
||||
struct __kernel_timespec *ts)
|
||||
@ -62,6 +69,8 @@ static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
|
||||
return -1;
|
||||
|
||||
cycles = __arch_get_hw_counter(vd->clock_mode);
|
||||
if (unlikely(!vdso_cycles_ok(cycles)))
|
||||
return -1;
|
||||
ns = vdso_ts->nsec;
|
||||
last = vd->cycle_last;
|
||||
ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
|
||||
@ -130,6 +139,8 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
|
||||
return -1;
|
||||
|
||||
cycles = __arch_get_hw_counter(vd->clock_mode);
|
||||
if (unlikely(!vdso_cycles_ok(cycles)))
|
||||
return -1;
|
||||
ns = vdso_ts->nsec;
|
||||
last = vd->cycle_last;
|
||||
ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
|
||||
|
Loading…
Reference in New Issue
Block a user