2014-07-17 04:03:58 +07:00
|
|
|
#ifndef _LINUX_TIME64_H
|
|
|
|
#define _LINUX_TIME64_H
|
|
|
|
|
|
|
|
#include <uapi/linux/time.h>
|
2015-04-09 08:04:40 +07:00
|
|
|
#include <linux/math64.h>
|
2014-07-17 04:03:58 +07:00
|
|
|
|
|
|
|
typedef __s64 time64_t;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This wants to go into uapi/linux/time.h once we agreed about the
|
|
|
|
* userspace interfaces.
|
|
|
|
*/
|
|
|
|
#if __BITS_PER_LONG == 64
|
|
|
|
# define timespec64 timespec
|
2015-07-29 18:58:15 +07:00
|
|
|
#define itimerspec64 itimerspec
|
2014-07-17 04:03:58 +07:00
|
|
|
#else
|
|
|
|
struct timespec64 {
|
|
|
|
time64_t tv_sec; /* seconds */
|
|
|
|
long tv_nsec; /* nanoseconds */
|
|
|
|
};
|
2015-07-29 18:58:15 +07:00
|
|
|
|
|
|
|
struct itimerspec64 {
|
|
|
|
struct timespec64 it_interval;
|
|
|
|
struct timespec64 it_value;
|
|
|
|
};
|
|
|
|
|
2014-07-17 04:03:58 +07:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Parameters used to convert the timespec values: */
|
|
|
|
#define MSEC_PER_SEC 1000L
|
|
|
|
#define USEC_PER_MSEC 1000L
|
|
|
|
#define NSEC_PER_USEC 1000L
|
|
|
|
#define NSEC_PER_MSEC 1000000L
|
|
|
|
#define USEC_PER_SEC 1000000L
|
|
|
|
#define NSEC_PER_SEC 1000000000L
|
|
|
|
#define FSEC_PER_SEC 1000000000000000LL
|
|
|
|
|
|
|
|
/* Located here for timespec[64]_valid_strict */
|
time: Prevent early expiry of hrtimers[CLOCK_REALTIME] at the leap second edge
Currently, leapsecond adjustments are done at tick time. As a result,
the leapsecond was applied at the first timer tick *after* the
leapsecond (~1-10ms late depending on HZ), rather then exactly on the
second edge.
This was in part historical from back when we were always tick based,
but correcting this since has been avoided since it adds extra
conditional checks in the gettime fastpath, which has performance
overhead.
However, it was recently pointed out that ABS_TIME CLOCK_REALTIME
timers set for right after the leapsecond could fire a second early,
since some timers may be expired before we trigger the timekeeping
timer, which then applies the leapsecond.
This isn't quite as bad as it sounds, since behaviorally it is similar
to what is possible w/ ntpd made leapsecond adjustments done w/o using
the kernel discipline. Where due to latencies, timers may fire just
prior to the settimeofday call. (Also, one should note that all
applications using CLOCK_REALTIME timers should always be careful,
since they are prone to quirks from settimeofday() disturbances.)
However, the purpose of having the kernel do the leap adjustment is to
avoid such latencies, so I think this is worth fixing.
So in order to properly keep those timers from firing a second early,
this patch modifies the ntp and timekeeping logic so that we keep
enough state so that the update_base_offsets_now accessor, which
provides the hrtimer core the current time, can check and apply the
leapsecond adjustment on the second edge. This prevents the hrtimer
core from expiring timers too early.
This patch does not modify any other time read path, so no additional
overhead is incurred. However, this also means that the leap-second
continues to be applied at tick time for all other read-paths.
Apologies to Richard Cochran, who pushed for similar changes years
ago, which I resisted due to the concerns about the performance
overhead.
While I suspect this isn't extremely critical, folks who care about
strict leap-second correctness will likely want to watch
this. Potentially a -stable candidate eventually.
Originally-suggested-by: Richard Cochran <richardcochran@gmail.com>
Reported-by: Daniel Bristot de Oliveira <bristot@redhat.com>
Reported-by: Prarit Bhargava <prarit@redhat.com>
Signed-off-by: John Stultz <john.stultz@linaro.org>
Cc: Richard Cochran <richardcochran@gmail.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jiri Bohac <jbohac@suse.cz>
Cc: Shuah Khan <shuahkh@osg.samsung.com>
Cc: Ingo Molnar <mingo@kernel.org>
Link: http://lkml.kernel.org/r/1434063297-28657-4-git-send-email-john.stultz@linaro.org
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2015-06-12 05:54:55 +07:00
|
|
|
#define TIME64_MAX ((s64)~((u64)1 << 63))
|
2014-07-17 04:03:58 +07:00
|
|
|
#define KTIME_MAX ((s64)~((u64)1 << 63))
|
|
|
|
#define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
|
|
|
|
|
|
|
|
#if __BITS_PER_LONG == 64
|
|
|
|
|
2014-07-17 04:03:59 +07:00
|
|
|
static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64)
|
|
|
|
{
|
|
|
|
return ts64;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
|
|
|
|
{
|
|
|
|
return ts;
|
|
|
|
}
|
|
|
|
|
2015-07-29 18:58:15 +07:00
|
|
|
static inline struct itimerspec itimerspec64_to_itimerspec(struct itimerspec64 *its64)
|
|
|
|
{
|
|
|
|
return *its64;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct itimerspec64 itimerspec_to_itimerspec64(struct itimerspec *its)
|
|
|
|
{
|
|
|
|
return *its;
|
|
|
|
}
|
|
|
|
|
2014-07-17 04:03:58 +07:00
|
|
|
# define timespec64_equal timespec_equal
|
|
|
|
# define timespec64_compare timespec_compare
|
|
|
|
# define set_normalized_timespec64 set_normalized_timespec
|
|
|
|
# define timespec64_add_safe timespec_add_safe
|
|
|
|
# define timespec64_add timespec_add
|
|
|
|
# define timespec64_sub timespec_sub
|
|
|
|
# define timespec64_valid timespec_valid
|
|
|
|
# define timespec64_valid_strict timespec_valid_strict
|
|
|
|
# define timespec64_to_ns timespec_to_ns
|
|
|
|
# define ns_to_timespec64 ns_to_timespec
|
|
|
|
# define timespec64_add_ns timespec_add_ns
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
2014-07-17 04:03:59 +07:00
|
|
|
static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64)
|
|
|
|
{
|
|
|
|
struct timespec ret;
|
|
|
|
|
|
|
|
ret.tv_sec = (time_t)ts64.tv_sec;
|
|
|
|
ret.tv_nsec = ts64.tv_nsec;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
|
|
|
|
{
|
|
|
|
struct timespec64 ret;
|
|
|
|
|
|
|
|
ret.tv_sec = ts.tv_sec;
|
|
|
|
ret.tv_nsec = ts.tv_nsec;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-07-29 18:58:15 +07:00
|
|
|
static inline struct itimerspec itimerspec64_to_itimerspec(struct itimerspec64 *its64)
|
|
|
|
{
|
|
|
|
struct itimerspec ret;
|
|
|
|
|
|
|
|
ret.it_interval = timespec64_to_timespec(its64->it_interval);
|
|
|
|
ret.it_value = timespec64_to_timespec(its64->it_value);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct itimerspec64 itimerspec_to_itimerspec64(struct itimerspec *its)
|
|
|
|
{
|
|
|
|
struct itimerspec64 ret;
|
|
|
|
|
|
|
|
ret.it_interval = timespec_to_timespec64(its->it_interval);
|
|
|
|
ret.it_value = timespec_to_timespec64(its->it_value);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-07-17 04:03:58 +07:00
|
|
|
static inline int timespec64_equal(const struct timespec64 *a,
|
|
|
|
const struct timespec64 *b)
|
|
|
|
{
|
|
|
|
return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* lhs < rhs: return <0
|
|
|
|
* lhs == rhs: return 0
|
|
|
|
* lhs > rhs: return >0
|
|
|
|
*/
|
|
|
|
static inline int timespec64_compare(const struct timespec64 *lhs, const struct timespec64 *rhs)
|
|
|
|
{
|
|
|
|
if (lhs->tv_sec < rhs->tv_sec)
|
|
|
|
return -1;
|
|
|
|
if (lhs->tv_sec > rhs->tv_sec)
|
|
|
|
return 1;
|
|
|
|
return lhs->tv_nsec - rhs->tv_nsec;
|
|
|
|
}
|
|
|
|
|
|
|
|
extern void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* timespec64_add_safe assumes both values are positive and checks for
|
|
|
|
* overflow. It will return TIME_T_MAX if the returned value would be
|
|
|
|
* smaller then either of the arguments.
|
|
|
|
*/
|
|
|
|
extern struct timespec64 timespec64_add_safe(const struct timespec64 lhs,
|
|
|
|
const struct timespec64 rhs);
|
|
|
|
|
|
|
|
|
|
|
|
static inline struct timespec64 timespec64_add(struct timespec64 lhs,
|
|
|
|
struct timespec64 rhs)
|
|
|
|
{
|
|
|
|
struct timespec64 ts_delta;
|
|
|
|
set_normalized_timespec64(&ts_delta, lhs.tv_sec + rhs.tv_sec,
|
|
|
|
lhs.tv_nsec + rhs.tv_nsec);
|
|
|
|
return ts_delta;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* sub = lhs - rhs, in normalized form
|
|
|
|
*/
|
|
|
|
static inline struct timespec64 timespec64_sub(struct timespec64 lhs,
|
|
|
|
struct timespec64 rhs)
|
|
|
|
{
|
|
|
|
struct timespec64 ts_delta;
|
|
|
|
set_normalized_timespec64(&ts_delta, lhs.tv_sec - rhs.tv_sec,
|
|
|
|
lhs.tv_nsec - rhs.tv_nsec);
|
|
|
|
return ts_delta;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Returns true if the timespec64 is norm, false if denorm:
|
|
|
|
*/
|
|
|
|
static inline bool timespec64_valid(const struct timespec64 *ts)
|
|
|
|
{
|
|
|
|
/* Dates before 1970 are bogus */
|
|
|
|
if (ts->tv_sec < 0)
|
|
|
|
return false;
|
|
|
|
/* Can't have more nanoseconds then a second */
|
|
|
|
if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
|
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool timespec64_valid_strict(const struct timespec64 *ts)
|
|
|
|
{
|
|
|
|
if (!timespec64_valid(ts))
|
|
|
|
return false;
|
|
|
|
/* Disallow values that could overflow ktime_t */
|
|
|
|
if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX)
|
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* timespec64_to_ns - Convert timespec64 to nanoseconds
|
|
|
|
* @ts: pointer to the timespec64 variable to be converted
|
|
|
|
*
|
|
|
|
* Returns the scalar nanosecond representation of the timespec64
|
|
|
|
* parameter.
|
|
|
|
*/
|
|
|
|
static inline s64 timespec64_to_ns(const struct timespec64 *ts)
|
|
|
|
{
|
|
|
|
return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ns_to_timespec64 - Convert nanoseconds to timespec64
|
|
|
|
* @nsec: the nanoseconds value to be converted
|
|
|
|
*
|
|
|
|
* Returns the timespec64 representation of the nsec parameter.
|
|
|
|
*/
|
|
|
|
extern struct timespec64 ns_to_timespec64(const s64 nsec);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* timespec64_add_ns - Adds nanoseconds to a timespec64
|
|
|
|
* @a: pointer to timespec64 to be incremented
|
|
|
|
* @ns: unsigned nanoseconds value to be added
|
|
|
|
*
|
|
|
|
* This must always be inlined because its used from the x86-64 vdso,
|
|
|
|
* which cannot call other kernel functions.
|
|
|
|
*/
|
|
|
|
static __always_inline void timespec64_add_ns(struct timespec64 *a, u64 ns)
|
|
|
|
{
|
|
|
|
a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, NSEC_PER_SEC, &ns);
|
|
|
|
a->tv_nsec = ns;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif /* _LINUX_TIME64_H */
|