mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-22 19:24:27 +07:00
833f32d763
Currently, leapsecond adjustments are done at tick time. As a result, the leapsecond was applied at the first timer tick *after* the leapsecond (~1-10ms late depending on HZ), rather then exactly on the second edge. This was in part historical from back when we were always tick based, but correcting this since has been avoided since it adds extra conditional checks in the gettime fastpath, which has performance overhead. However, it was recently pointed out that ABS_TIME CLOCK_REALTIME timers set for right after the leapsecond could fire a second early, since some timers may be expired before we trigger the timekeeping timer, which then applies the leapsecond. This isn't quite as bad as it sounds, since behaviorally it is similar to what is possible w/ ntpd made leapsecond adjustments done w/o using the kernel discipline. Where due to latencies, timers may fire just prior to the settimeofday call. (Also, one should note that all applications using CLOCK_REALTIME timers should always be careful, since they are prone to quirks from settimeofday() disturbances.) However, the purpose of having the kernel do the leap adjustment is to avoid such latencies, so I think this is worth fixing. So in order to properly keep those timers from firing a second early, this patch modifies the ntp and timekeeping logic so that we keep enough state so that the update_base_offsets_now accessor, which provides the hrtimer core the current time, can check and apply the leapsecond adjustment on the second edge. This prevents the hrtimer core from expiring timers too early. This patch does not modify any other time read path, so no additional overhead is incurred. However, this also means that the leap-second continues to be applied at tick time for all other read-paths. Apologies to Richard Cochran, who pushed for similar changes years ago, which I resisted due to the concerns about the performance overhead. While I suspect this isn't extremely critical, folks who care about strict leap-second correctness will likely want to watch this. Potentially a -stable candidate eventually. Originally-suggested-by: Richard Cochran <richardcochran@gmail.com> Reported-by: Daniel Bristot de Oliveira <bristot@redhat.com> Reported-by: Prarit Bhargava <prarit@redhat.com> Signed-off-by: John Stultz <john.stultz@linaro.org> Cc: Richard Cochran <richardcochran@gmail.com> Cc: Jan Kara <jack@suse.cz> Cc: Jiri Bohac <jbohac@suse.cz> Cc: Shuah Khan <shuahkh@osg.samsung.com> Cc: Ingo Molnar <mingo@kernel.org> Link: http://lkml.kernel.org/r/1434063297-28657-4-git-send-email-john.stultz@linaro.org Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
151 lines
5.0 KiB
C
151 lines
5.0 KiB
C
/*
|
|
* You SHOULD NOT be including this unless you're vsyscall
|
|
* handling code or timekeeping internal code!
|
|
*/
|
|
|
|
#ifndef _LINUX_TIMEKEEPER_INTERNAL_H
|
|
#define _LINUX_TIMEKEEPER_INTERNAL_H
|
|
|
|
#include <linux/clocksource.h>
|
|
#include <linux/jiffies.h>
|
|
#include <linux/time.h>
|
|
|
|
/**
|
|
* struct tk_read_base - base structure for timekeeping readout
|
|
* @clock: Current clocksource used for timekeeping.
|
|
* @read: Read function of @clock
|
|
* @mask: Bitmask for two's complement subtraction of non 64bit clocks
|
|
* @cycle_last: @clock cycle value at last update
|
|
* @mult: (NTP adjusted) multiplier for scaled math conversion
|
|
* @shift: Shift value for scaled math conversion
|
|
* @xtime_nsec: Shifted (fractional) nano seconds offset for readout
|
|
* @base: ktime_t (nanoseconds) base time for readout
|
|
*
|
|
* This struct has size 56 byte on 64 bit. Together with a seqcount it
|
|
* occupies a single 64byte cache line.
|
|
*
|
|
* The struct is separate from struct timekeeper as it is also used
|
|
* for a fast NMI safe accessors.
|
|
*/
|
|
struct tk_read_base {
|
|
struct clocksource *clock;
|
|
cycle_t (*read)(struct clocksource *cs);
|
|
cycle_t mask;
|
|
cycle_t cycle_last;
|
|
u32 mult;
|
|
u32 shift;
|
|
u64 xtime_nsec;
|
|
ktime_t base;
|
|
};
|
|
|
|
/**
|
|
* struct timekeeper - Structure holding internal timekeeping values.
|
|
* @tkr_mono: The readout base structure for CLOCK_MONOTONIC
|
|
* @tkr_raw: The readout base structure for CLOCK_MONOTONIC_RAW
|
|
* @xtime_sec: Current CLOCK_REALTIME time in seconds
|
|
* @ktime_sec: Current CLOCK_MONOTONIC time in seconds
|
|
* @wall_to_monotonic: CLOCK_REALTIME to CLOCK_MONOTONIC offset
|
|
* @offs_real: Offset clock monotonic -> clock realtime
|
|
* @offs_boot: Offset clock monotonic -> clock boottime
|
|
* @offs_tai: Offset clock monotonic -> clock tai
|
|
* @tai_offset: The current UTC to TAI offset in seconds
|
|
* @clock_was_set_seq: The sequence number of clock was set events
|
|
* @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second
|
|
* @raw_time: Monotonic raw base time in timespec64 format
|
|
* @cycle_interval: Number of clock cycles in one NTP interval
|
|
* @xtime_interval: Number of clock shifted nano seconds in one NTP
|
|
* interval.
|
|
* @xtime_remainder: Shifted nano seconds left over when rounding
|
|
* @cycle_interval
|
|
* @raw_interval: Raw nano seconds accumulated per NTP interval.
|
|
* @ntp_error: Difference between accumulated time and NTP time in ntp
|
|
* shifted nano seconds.
|
|
* @ntp_error_shift: Shift conversion between clock shifted nano seconds and
|
|
* ntp shifted nano seconds.
|
|
* @last_warning: Warning ratelimiter (DEBUG_TIMEKEEPING)
|
|
* @underflow_seen: Underflow warning flag (DEBUG_TIMEKEEPING)
|
|
* @overflow_seen: Overflow warning flag (DEBUG_TIMEKEEPING)
|
|
*
|
|
* Note: For timespec(64) based interfaces wall_to_monotonic is what
|
|
* we need to add to xtime (or xtime corrected for sub jiffie times)
|
|
* to get to monotonic time. Monotonic is pegged at zero at system
|
|
* boot time, so wall_to_monotonic will be negative, however, we will
|
|
* ALWAYS keep the tv_nsec part positive so we can use the usual
|
|
* normalization.
|
|
*
|
|
* wall_to_monotonic is moved after resume from suspend for the
|
|
* monotonic time not to jump. We need to add total_sleep_time to
|
|
* wall_to_monotonic to get the real boot based time offset.
|
|
*
|
|
* wall_to_monotonic is no longer the boot time, getboottime must be
|
|
* used instead.
|
|
*/
|
|
struct timekeeper {
|
|
struct tk_read_base tkr_mono;
|
|
struct tk_read_base tkr_raw;
|
|
u64 xtime_sec;
|
|
unsigned long ktime_sec;
|
|
struct timespec64 wall_to_monotonic;
|
|
ktime_t offs_real;
|
|
ktime_t offs_boot;
|
|
ktime_t offs_tai;
|
|
s32 tai_offset;
|
|
unsigned int clock_was_set_seq;
|
|
ktime_t next_leap_ktime;
|
|
struct timespec64 raw_time;
|
|
|
|
/* The following members are for timekeeping internal use */
|
|
cycle_t cycle_interval;
|
|
u64 xtime_interval;
|
|
s64 xtime_remainder;
|
|
u32 raw_interval;
|
|
/* The ntp_tick_length() value currently being used.
|
|
* This cached copy ensures we consistently apply the tick
|
|
* length for an entire tick, as ntp_tick_length may change
|
|
* mid-tick, and we don't want to apply that new value to
|
|
* the tick in progress.
|
|
*/
|
|
u64 ntp_tick;
|
|
/* Difference between accumulated time and NTP time in ntp
|
|
* shifted nano seconds. */
|
|
s64 ntp_error;
|
|
u32 ntp_error_shift;
|
|
u32 ntp_err_mult;
|
|
#ifdef CONFIG_DEBUG_TIMEKEEPING
|
|
long last_warning;
|
|
/*
|
|
* These simple flag variables are managed
|
|
* without locks, which is racy, but they are
|
|
* ok since we don't really care about being
|
|
* super precise about how many events were
|
|
* seen, just that a problem was observed.
|
|
*/
|
|
int underflow_seen;
|
|
int overflow_seen;
|
|
#endif
|
|
};
|
|
|
|
#ifdef CONFIG_GENERIC_TIME_VSYSCALL
|
|
|
|
extern void update_vsyscall(struct timekeeper *tk);
|
|
extern void update_vsyscall_tz(void);
|
|
|
|
#elif defined(CONFIG_GENERIC_TIME_VSYSCALL_OLD)
|
|
|
|
extern void update_vsyscall_old(struct timespec *ts, struct timespec *wtm,
|
|
struct clocksource *c, u32 mult,
|
|
cycle_t cycle_last);
|
|
extern void update_vsyscall_tz(void);
|
|
|
|
#else
|
|
|
|
static inline void update_vsyscall(struct timekeeper *tk)
|
|
{
|
|
}
|
|
static inline void update_vsyscall_tz(void)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
#endif /* _LINUX_TIMEKEEPER_INTERNAL_H */
|