mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-27 12:45:16 +07:00
034bda1cd5
Pull x86 vdso updates from Ingo Molnar: "Two main changes: - Cleanups, simplifications and CLOCK_TAI support (Thomas Gleixner) - Improve code generation (Andy Lutomirski)" * 'x86-vdso-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/vdso: Rearrange do_hres() to improve code generation x86/vdso: Document vgtod_ts better x86/vdso: Remove "memory" clobbers in the vDSO syscall fallbacks x66/vdso: Add CLOCK_TAI support x86/vdso: Move cycle_last handling into the caller x86/vdso: Simplify the invalid vclock case x86/vdso: Replace the clockid switch case x86/vdso: Collapse coarse functions x86/vdso: Collapse high resolution functions x86/vdso: Introduce and use vgtod_ts x86/vdso: Use unsigned int consistently for vsyscall_gtod_data:: Seq x86/vdso: Enforce 64bit clocksource x86/time: Implement clocksource_arch_init() clocksource: Provide clocksource_arch_init()
94 lines
1.9 KiB
C
94 lines
1.9 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _ASM_X86_VGTOD_H
|
|
#define _ASM_X86_VGTOD_H
|
|
|
|
#include <linux/compiler.h>
|
|
#include <linux/clocksource.h>
|
|
|
|
#include <uapi/linux/time.h>
|
|
|
|
#ifdef BUILD_VDSO32_64
|
|
typedef u64 gtod_long_t;
|
|
#else
|
|
typedef unsigned long gtod_long_t;
|
|
#endif
|
|
|
|
/*
|
|
* There is one of these objects in the vvar page for each
|
|
* vDSO-accelerated clockid. For high-resolution clocks, this encodes
|
|
* the time corresponding to vsyscall_gtod_data.cycle_last. For coarse
|
|
* clocks, this encodes the actual time.
|
|
*
|
|
* To confuse the reader, for high-resolution clocks, nsec is left-shifted
|
|
* by vsyscall_gtod_data.shift.
|
|
*/
|
|
struct vgtod_ts {
|
|
u64 sec;
|
|
u64 nsec;
|
|
};
|
|
|
|
#define VGTOD_BASES (CLOCK_TAI + 1)
|
|
#define VGTOD_HRES (BIT(CLOCK_REALTIME) | BIT(CLOCK_MONOTONIC) | BIT(CLOCK_TAI))
|
|
#define VGTOD_COARSE (BIT(CLOCK_REALTIME_COARSE) | BIT(CLOCK_MONOTONIC_COARSE))
|
|
|
|
/*
|
|
* vsyscall_gtod_data will be accessed by 32 and 64 bit code at the same time
|
|
* so be carefull by modifying this structure.
|
|
*/
|
|
struct vsyscall_gtod_data {
|
|
unsigned int seq;
|
|
|
|
int vclock_mode;
|
|
u64 cycle_last;
|
|
u64 mask;
|
|
u32 mult;
|
|
u32 shift;
|
|
|
|
struct vgtod_ts basetime[VGTOD_BASES];
|
|
|
|
int tz_minuteswest;
|
|
int tz_dsttime;
|
|
};
|
|
extern struct vsyscall_gtod_data vsyscall_gtod_data;
|
|
|
|
extern int vclocks_used;
|
|
static inline bool vclock_was_used(int vclock)
|
|
{
|
|
return READ_ONCE(vclocks_used) & (1 << vclock);
|
|
}
|
|
|
|
static inline unsigned int gtod_read_begin(const struct vsyscall_gtod_data *s)
|
|
{
|
|
unsigned int ret;
|
|
|
|
repeat:
|
|
ret = READ_ONCE(s->seq);
|
|
if (unlikely(ret & 1)) {
|
|
cpu_relax();
|
|
goto repeat;
|
|
}
|
|
smp_rmb();
|
|
return ret;
|
|
}
|
|
|
|
static inline int gtod_read_retry(const struct vsyscall_gtod_data *s,
|
|
unsigned int start)
|
|
{
|
|
smp_rmb();
|
|
return unlikely(s->seq != start);
|
|
}
|
|
|
|
static inline void gtod_write_begin(struct vsyscall_gtod_data *s)
|
|
{
|
|
++s->seq;
|
|
smp_wmb();
|
|
}
|
|
|
|
static inline void gtod_write_end(struct vsyscall_gtod_data *s)
|
|
{
|
|
smp_wmb();
|
|
++s->seq;
|
|
}
|
|
|
|
#endif /* _ASM_X86_VGTOD_H */
|