2012-09-05 02:12:07 +07:00
|
|
|
/*
|
|
|
|
* You SHOULD NOT be including this unless you're vsyscall
|
|
|
|
* handling code or timekeeping internal code!
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _LINUX_TIMEKEEPER_INTERNAL_H
|
|
|
|
#define _LINUX_TIMEKEEPER_INTERNAL_H
|
|
|
|
|
|
|
|
#include <linux/clocksource.h>
|
|
|
|
#include <linux/jiffies.h>
|
|
|
|
#include <linux/time.h>
|
|
|
|
|
2014-07-17 04:05:16 +07:00
|
|
|
/**
|
|
|
|
* struct tk_read_base - base structure for timekeeping readout
|
|
|
|
* @clock: Current clocksource used for timekeeping.
|
|
|
|
* @read: Read function of @clock
|
|
|
|
* @mask: Bitmask for two's complement subtraction of non 64bit clocks
|
|
|
|
* @cycle_last: @clock cycle value at last update
|
|
|
|
* @mult: NTP adjusted multiplier for scaled math conversion
|
|
|
|
* @shift: Shift value for scaled math conversion
|
|
|
|
* @xtime_nsec: Shifted (fractional) nano seconds offset for readout
|
|
|
|
* @base_mono: ktime_t (nanoseconds) base time for readout
|
2014-07-17 04:04:07 +07:00
|
|
|
*
|
2014-07-17 04:05:16 +07:00
|
|
|
* This struct has size 56 byte on 64 bit. Together with a seqcount it
|
|
|
|
* occupies a single 64byte cache line.
|
2014-07-17 04:04:07 +07:00
|
|
|
*
|
2014-07-17 04:05:16 +07:00
|
|
|
* The struct is separate from struct timekeeper as it is also used
|
|
|
|
* for a fast NMI safe accessor to clock monotonic.
|
2014-07-17 04:04:07 +07:00
|
|
|
*/
|
2014-07-17 04:05:16 +07:00
|
|
|
struct tk_read_base {
|
2012-09-05 02:12:07 +07:00
|
|
|
struct clocksource *clock;
|
2014-07-17 04:05:15 +07:00
|
|
|
cycle_t (*read)(struct clocksource *cs);
|
|
|
|
cycle_t mask;
|
2014-07-17 04:05:13 +07:00
|
|
|
cycle_t cycle_last;
|
2012-09-05 02:12:07 +07:00
|
|
|
u32 mult;
|
|
|
|
u32 shift;
|
2014-07-17 04:04:07 +07:00
|
|
|
u64 xtime_nsec;
|
2014-07-17 04:04:10 +07:00
|
|
|
ktime_t base_mono;
|
2014-07-17 04:05:16 +07:00
|
|
|
};
|
2014-07-17 04:04:10 +07:00
|
|
|
|
2014-07-17 04:05:16 +07:00
|
|
|
/**
|
|
|
|
* struct timekeeper - Structure holding internal timekeeping values.
|
|
|
|
* @tkr: The readout base structure
|
|
|
|
* @xtime_sec: Current CLOCK_REALTIME time in seconds
|
|
|
|
* @wall_to_monotonic: CLOCK_REALTIME to CLOCK_MONOTONIC offset
|
|
|
|
* @offs_real: Offset clock monotonic -> clock realtime
|
|
|
|
* @offs_boot: Offset clock monotonic -> clock boottime
|
|
|
|
* @offs_tai: Offset clock monotonic -> clock tai
|
|
|
|
* @tai_offset: The current UTC to TAI offset in seconds
|
|
|
|
* @base_raw: Monotonic raw base time in ktime_t format
|
|
|
|
* @raw_time: Monotonic raw base time in timespec64 format
|
|
|
|
* @cycle_interval: Number of clock cycles in one NTP interval
|
|
|
|
* @xtime_interval: Number of clock shifted nano seconds in one NTP
|
|
|
|
* interval.
|
|
|
|
* @xtime_remainder: Shifted nano seconds left over when rounding
|
|
|
|
* @cycle_interval
|
|
|
|
* @raw_interval: Raw nano seconds accumulated per NTP interval.
|
|
|
|
* @ntp_error: Difference between accumulated time and NTP time in ntp
|
|
|
|
* shifted nano seconds.
|
|
|
|
* @ntp_error_shift: Shift conversion between clock shifted nano seconds and
|
|
|
|
* ntp shifted nano seconds.
|
|
|
|
*
|
|
|
|
* Note: For timespec(64) based interfaces wall_to_monotonic is what
|
|
|
|
* we need to add to xtime (or xtime corrected for sub jiffie times)
|
|
|
|
* to get to monotonic time. Monotonic is pegged at zero at system
|
|
|
|
* boot time, so wall_to_monotonic will be negative, however, we will
|
|
|
|
* ALWAYS keep the tv_nsec part positive so we can use the usual
|
|
|
|
* normalization.
|
|
|
|
*
|
|
|
|
* wall_to_monotonic is moved after resume from suspend for the
|
|
|
|
* monotonic time not to jump. We need to add total_sleep_time to
|
|
|
|
* wall_to_monotonic to get the real boot based time offset.
|
|
|
|
*
|
|
|
|
* wall_to_monotonic is no longer the boot time, getboottime must be
|
|
|
|
* used instead.
|
|
|
|
*/
|
|
|
|
struct timekeeper {
|
|
|
|
struct tk_read_base tkr;
|
2014-07-17 04:04:07 +07:00
|
|
|
u64 xtime_sec;
|
|
|
|
struct timespec64 wall_to_monotonic;
|
|
|
|
ktime_t offs_real;
|
|
|
|
ktime_t offs_boot;
|
|
|
|
ktime_t offs_tai;
|
|
|
|
s32 tai_offset;
|
2014-07-17 04:05:04 +07:00
|
|
|
ktime_t base_raw;
|
2014-07-17 04:04:07 +07:00
|
|
|
struct timespec64 raw_time;
|
|
|
|
|
2014-07-17 04:05:16 +07:00
|
|
|
/* The following members are for timekeeping internal use */
|
2012-09-05 02:12:07 +07:00
|
|
|
cycle_t cycle_interval;
|
|
|
|
u64 xtime_interval;
|
|
|
|
s64 xtime_remainder;
|
|
|
|
u32 raw_interval;
|
2014-04-24 10:53:29 +07:00
|
|
|
/* The ntp_tick_length() value currently being used.
|
|
|
|
* This cached copy ensures we consistently apply the tick
|
|
|
|
* length for an entire tick, as ntp_tick_length may change
|
|
|
|
* mid-tick, and we don't want to apply that new value to
|
|
|
|
* the tick in progress.
|
|
|
|
*/
|
|
|
|
u64 ntp_tick;
|
|
|
|
/* Difference between accumulated time and NTP time in ntp
|
|
|
|
* shifted nano seconds. */
|
2012-09-05 02:12:07 +07:00
|
|
|
s64 ntp_error;
|
2014-07-17 04:04:07 +07:00
|
|
|
u32 ntp_error_shift;
|
timekeeping: Rework frequency adjustments to work better w/ nohz
The existing timekeeping_adjust logic has always been complicated
to understand. Further, since it was developed prior to NOHZ becoming
common, its not surprising it performs poorly when NOHZ is enabled.
Since Miroslav pointed out the problematic nature of the existing code
in the NOHZ case, I've tried to refactor the code to perform better.
The problem with the previous approach was that it tried to adjust
for the total cumulative error using a scaled dampening factor. This
resulted in large errors to be corrected slowly, while small errors
were corrected quickly. With NOHZ the timekeeping code doesn't know
how far out the next tick will be, so this results in bad
over-correction to small errors, and insufficient correction to large
errors.
Inspired by Miroslav's patch, I've refactored the code to try to
address the correction in two steps.
1) Check the future freq error for the next tick, and if the frequency
error is large, try to make sure we correct it so it doesn't cause
much accumulated error.
2) Then make a small single unit adjustment to correct any cumulative
error that has collected over time.
This method performs fairly well in the simulator Miroslav created.
Major credit to Miroslav for pointing out the issue, providing the
original patch to resolve this, a simulator for testing, as well as
helping debug and resolve issues in my implementation so that it
performed closer to his original implementation.
Cc: Miroslav Lichvar <mlichvar@redhat.com>
Cc: Richard Cochran <richardcochran@gmail.com>
Cc: Prarit Bhargava <prarit@redhat.com>
Reported-by: Miroslav Lichvar <mlichvar@redhat.com>
Signed-off-by: John Stultz <john.stultz@linaro.org>
2013-12-07 08:25:21 +07:00
|
|
|
u32 ntp_err_mult;
|
2012-09-05 02:12:07 +07:00
|
|
|
};
|
2012-09-05 02:27:48 +07:00
|
|
|
|
2012-09-12 06:58:13 +07:00
|
|
|
#ifdef CONFIG_GENERIC_TIME_VSYSCALL
|
|
|
|
|
|
|
|
extern void update_vsyscall(struct timekeeper *tk);
|
|
|
|
extern void update_vsyscall_tz(void);
|
2012-09-05 02:27:48 +07:00
|
|
|
|
2012-09-12 06:58:13 +07:00
|
|
|
#elif defined(CONFIG_GENERIC_TIME_VSYSCALL_OLD)
|
|
|
|
|
|
|
|
extern void update_vsyscall_old(struct timespec *ts, struct timespec *wtm,
|
2014-07-17 04:05:13 +07:00
|
|
|
struct clocksource *c, u32 mult,
|
2014-07-26 11:37:19 +07:00
|
|
|
cycle_t cycle_last);
|
2012-09-05 02:27:48 +07:00
|
|
|
extern void update_vsyscall_tz(void);
|
2012-09-12 06:58:13 +07:00
|
|
|
|
2012-09-05 02:27:48 +07:00
|
|
|
#else
|
2012-09-12 06:58:13 +07:00
|
|
|
|
|
|
|
static inline void update_vsyscall(struct timekeeper *tk)
|
2012-09-05 02:27:48 +07:00
|
|
|
{
|
|
|
|
}
|
|
|
|
static inline void update_vsyscall_tz(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2012-09-05 02:12:07 +07:00
|
|
|
#endif /* _LINUX_TIMEKEEPER_INTERNAL_H */
|