mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-23 17:02:23 +07:00
bd902c5362
It makes me uncomfortable that even modern systems grant every process direct read access to the HPET. While fixing this for real without regressing anything is a mess (unmapping the HPET is tricky because we don't adequately track all the mappings), we can do almost as well by tracking which vclocks have ever been used and only allowing pages associated with used vclocks to be faulted in. This will cause rogue programs that try to peek at the HPET to get SIGBUS instead on most systems. We can't restrict faults to vclock pages that are associated with the currently selected vclock due to a race: a process could start to access the HPET for the first time and race against a switch away from the HPET as the current clocksource. We can't segfault the process trying to peek at the HPET in this case, even though the process isn't going to do anything useful with the data. Signed-off-by: Andy Lutomirski <luto@kernel.org> Reviewed-by: Kees Cook <keescook@chromium.org> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Quentin Casasnovas <quentin.casasnovas@oracle.com> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/e79d06295625c02512277737ab55085a498ac5d8.1451446564.git.luto@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
101 lines
2.0 KiB
C
101 lines
2.0 KiB
C
#ifndef _ASM_X86_VGTOD_H
|
|
#define _ASM_X86_VGTOD_H
|
|
|
|
#include <linux/compiler.h>
|
|
#include <linux/clocksource.h>
|
|
|
|
#ifdef BUILD_VDSO32_64
|
|
typedef u64 gtod_long_t;
|
|
#else
|
|
typedef unsigned long gtod_long_t;
|
|
#endif
|
|
/*
|
|
* vsyscall_gtod_data will be accessed by 32 and 64 bit code at the same time
|
|
* so be carefull by modifying this structure.
|
|
*/
|
|
struct vsyscall_gtod_data {
|
|
unsigned seq;
|
|
|
|
int vclock_mode;
|
|
cycle_t cycle_last;
|
|
cycle_t mask;
|
|
u32 mult;
|
|
u32 shift;
|
|
|
|
/* open coded 'struct timespec' */
|
|
u64 wall_time_snsec;
|
|
gtod_long_t wall_time_sec;
|
|
gtod_long_t monotonic_time_sec;
|
|
u64 monotonic_time_snsec;
|
|
gtod_long_t wall_time_coarse_sec;
|
|
gtod_long_t wall_time_coarse_nsec;
|
|
gtod_long_t monotonic_time_coarse_sec;
|
|
gtod_long_t monotonic_time_coarse_nsec;
|
|
|
|
int tz_minuteswest;
|
|
int tz_dsttime;
|
|
};
|
|
extern struct vsyscall_gtod_data vsyscall_gtod_data;
|
|
|
|
extern int vclocks_used;
|
|
static inline bool vclock_was_used(int vclock)
|
|
{
|
|
return READ_ONCE(vclocks_used) & (1 << vclock);
|
|
}
|
|
|
|
static inline unsigned gtod_read_begin(const struct vsyscall_gtod_data *s)
|
|
{
|
|
unsigned ret;
|
|
|
|
repeat:
|
|
ret = ACCESS_ONCE(s->seq);
|
|
if (unlikely(ret & 1)) {
|
|
cpu_relax();
|
|
goto repeat;
|
|
}
|
|
smp_rmb();
|
|
return ret;
|
|
}
|
|
|
|
static inline int gtod_read_retry(const struct vsyscall_gtod_data *s,
|
|
unsigned start)
|
|
{
|
|
smp_rmb();
|
|
return unlikely(s->seq != start);
|
|
}
|
|
|
|
static inline void gtod_write_begin(struct vsyscall_gtod_data *s)
|
|
{
|
|
++s->seq;
|
|
smp_wmb();
|
|
}
|
|
|
|
static inline void gtod_write_end(struct vsyscall_gtod_data *s)
|
|
{
|
|
smp_wmb();
|
|
++s->seq;
|
|
}
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
#define VGETCPU_CPU_MASK 0xfff
|
|
|
|
static inline unsigned int __getcpu(void)
|
|
{
|
|
unsigned int p;
|
|
|
|
/*
|
|
* Load per CPU data from GDT. LSL is faster than RDTSCP and
|
|
* works on all CPUs. This is volatile so that it orders
|
|
* correctly wrt barrier() and to keep gcc from cleverly
|
|
* hoisting it out of the calling function.
|
|
*/
|
|
asm volatile ("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
|
|
|
|
return p;
|
|
}
|
|
|
|
#endif /* CONFIG_X86_64 */
|
|
|
|
#endif /* _ASM_X86_VGTOD_H */
|