mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 09:40:58 +07:00
Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull vdso timer fixes from Thomas Gleixner: "A series of commits to deal with the regression caused by the generic VDSO implementation. The usage of clock_gettime64() for 32bit compat fallback syscalls caused seccomp filters to kill innocent processes because they only allow clock_gettime(). Handle the compat syscalls with clock_gettime() as before, which is not a functional problem for the VDSO as the legacy compat application interface is not y2038 safe anyway. It's just extra fallback code which needs to be implemented on every architecture. It's opt in for now so that it does not break the compile of already converted architectures in linux-next. Once these are fixed, the #ifdeffery goes away. So much for trying to be smart and reuse code..." * 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: arm64: compat: vdso: Use legacy syscalls as fallback x86/vdso/32: Use 32bit syscall fallback lib/vdso/32: Provide legacy syscall fallbacks lib/vdso: Move fallback invocation to the callers lib/vdso/32: Remove inconsistent NULL pointer checks
This commit is contained in:
commit
0432a0a066
@ -16,6 +16,8 @@
|
||||
|
||||
#define VDSO_HAS_CLOCK_GETRES 1
|
||||
|
||||
#define VDSO_HAS_32BIT_FALLBACK 1
|
||||
|
||||
static __always_inline
|
||||
int gettimeofday_fallback(struct __kernel_old_timeval *_tv,
|
||||
struct timezone *_tz)
|
||||
@ -51,6 +53,23 @@ long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static __always_inline
|
||||
long clock_gettime32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
|
||||
{
|
||||
register struct old_timespec32 *ts asm("r1") = _ts;
|
||||
register clockid_t clkid asm("r0") = _clkid;
|
||||
register long ret asm ("r0");
|
||||
register long nr asm("r7") = __NR_compat_clock_gettime;
|
||||
|
||||
asm volatile(
|
||||
" swi #0\n"
|
||||
: "=r" (ret)
|
||||
: "r" (clkid), "r" (ts), "r" (nr)
|
||||
: "memory");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static __always_inline
|
||||
int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
|
||||
{
|
||||
@ -72,6 +91,27 @@ int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static __always_inline
|
||||
int clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
|
||||
{
|
||||
register struct old_timespec32 *ts asm("r1") = _ts;
|
||||
register clockid_t clkid asm("r0") = _clkid;
|
||||
register long ret asm ("r0");
|
||||
register long nr asm("r7") = __NR_compat_clock_getres;
|
||||
|
||||
/* The checks below are required for ABI consistency with arm */
|
||||
if ((_clkid >= MAX_CLOCKS) && (_ts == NULL))
|
||||
return -EINVAL;
|
||||
|
||||
asm volatile(
|
||||
" swi #0\n"
|
||||
: "=r" (ret)
|
||||
: "r" (clkid), "r" (ts), "r" (nr)
|
||||
: "memory");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static __always_inline u64 __arch_get_hw_counter(s32 clock_mode)
|
||||
{
|
||||
u64 res;
|
||||
|
@ -96,6 +96,8 @@ long clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
|
||||
|
||||
#else
|
||||
|
||||
#define VDSO_HAS_32BIT_FALLBACK 1
|
||||
|
||||
static __always_inline
|
||||
long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
|
||||
{
|
||||
@ -113,6 +115,23 @@ long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static __always_inline
|
||||
long clock_gettime32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
|
||||
{
|
||||
long ret;
|
||||
|
||||
asm (
|
||||
"mov %%ebx, %%edx \n"
|
||||
"mov %[clock], %%ebx \n"
|
||||
"call __kernel_vsyscall \n"
|
||||
"mov %%edx, %%ebx \n"
|
||||
: "=a" (ret), "=m" (*_ts)
|
||||
: "0" (__NR_clock_gettime), [clock] "g" (_clkid), "c" (_ts)
|
||||
: "edx");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static __always_inline
|
||||
long gettimeofday_fallback(struct __kernel_old_timeval *_tv,
|
||||
struct timezone *_tz)
|
||||
@ -148,6 +167,23 @@ clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static __always_inline
|
||||
long clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
|
||||
{
|
||||
long ret;
|
||||
|
||||
asm (
|
||||
"mov %%ebx, %%edx \n"
|
||||
"mov %[clock], %%ebx \n"
|
||||
"call __kernel_vsyscall \n"
|
||||
"mov %%edx, %%ebx \n"
|
||||
: "=a" (ret), "=m" (*_ts)
|
||||
: "0" (__NR_clock_getres), [clock] "g" (_clkid), "c" (_ts)
|
||||
: "edx");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PARAVIRT_CLOCK
|
||||
|
@ -51,7 +51,7 @@ static int do_hres(const struct vdso_data *vd, clockid_t clk,
|
||||
ns = vdso_ts->nsec;
|
||||
last = vd->cycle_last;
|
||||
if (unlikely((s64)cycles < 0))
|
||||
return clock_gettime_fallback(clk, ts);
|
||||
return -1;
|
||||
|
||||
ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
|
||||
ns >>= vd->shift;
|
||||
@ -82,14 +82,14 @@ static void do_coarse(const struct vdso_data *vd, clockid_t clk,
|
||||
}
|
||||
|
||||
static __maybe_unused int
|
||||
__cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
|
||||
__cvdso_clock_gettime_common(clockid_t clock, struct __kernel_timespec *ts)
|
||||
{
|
||||
const struct vdso_data *vd = __arch_get_vdso_data();
|
||||
u32 msk;
|
||||
|
||||
/* Check for negative values or invalid clocks */
|
||||
if (unlikely((u32) clock >= MAX_CLOCKS))
|
||||
goto fallback;
|
||||
return -1;
|
||||
|
||||
/*
|
||||
* Convert the clockid to a bitmask and use it to check which
|
||||
@ -104,9 +104,17 @@ __cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
|
||||
} else if (msk & VDSO_RAW) {
|
||||
return do_hres(&vd[CS_RAW], clock, ts);
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
fallback:
|
||||
return clock_gettime_fallback(clock, ts);
|
||||
static __maybe_unused int
|
||||
__cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
|
||||
{
|
||||
int ret = __cvdso_clock_gettime_common(clock, ts);
|
||||
|
||||
if (unlikely(ret))
|
||||
return clock_gettime_fallback(clock, ts);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __maybe_unused int
|
||||
@ -115,20 +123,21 @@ __cvdso_clock_gettime32(clockid_t clock, struct old_timespec32 *res)
|
||||
struct __kernel_timespec ts;
|
||||
int ret;
|
||||
|
||||
if (res == NULL)
|
||||
goto fallback;
|
||||
ret = __cvdso_clock_gettime_common(clock, &ts);
|
||||
|
||||
ret = __cvdso_clock_gettime(clock, &ts);
|
||||
#ifdef VDSO_HAS_32BIT_FALLBACK
|
||||
if (unlikely(ret))
|
||||
return clock_gettime32_fallback(clock, res);
|
||||
#else
|
||||
if (unlikely(ret))
|
||||
ret = clock_gettime_fallback(clock, &ts);
|
||||
#endif
|
||||
|
||||
if (ret == 0) {
|
||||
if (likely(!ret)) {
|
||||
res->tv_sec = ts.tv_sec;
|
||||
res->tv_nsec = ts.tv_nsec;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
fallback:
|
||||
return clock_gettime_fallback(clock, (struct __kernel_timespec *)res);
|
||||
}
|
||||
|
||||
static __maybe_unused int
|
||||
@ -169,17 +178,18 @@ static __maybe_unused time_t __cvdso_time(time_t *time)
|
||||
|
||||
#ifdef VDSO_HAS_CLOCK_GETRES
|
||||
static __maybe_unused
|
||||
int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res)
|
||||
int __cvdso_clock_getres_common(clockid_t clock, struct __kernel_timespec *res)
|
||||
{
|
||||
const struct vdso_data *vd = __arch_get_vdso_data();
|
||||
u64 ns;
|
||||
u64 hrtimer_res;
|
||||
u32 msk;
|
||||
u64 hrtimer_res = READ_ONCE(vd[CS_HRES_COARSE].hrtimer_res);
|
||||
u64 ns;
|
||||
|
||||
/* Check for negative values or invalid clocks */
|
||||
if (unlikely((u32) clock >= MAX_CLOCKS))
|
||||
goto fallback;
|
||||
return -1;
|
||||
|
||||
hrtimer_res = READ_ONCE(vd[CS_HRES_COARSE].hrtimer_res);
|
||||
/*
|
||||
* Convert the clockid to a bitmask and use it to check which
|
||||
* clocks are handled in the VDSO directly.
|
||||
@ -201,18 +211,22 @@ int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res)
|
||||
*/
|
||||
ns = hrtimer_res;
|
||||
} else {
|
||||
goto fallback;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (res) {
|
||||
res->tv_sec = 0;
|
||||
res->tv_nsec = ns;
|
||||
}
|
||||
res->tv_sec = 0;
|
||||
res->tv_nsec = ns;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
fallback:
|
||||
return clock_getres_fallback(clock, res);
|
||||
int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res)
|
||||
{
|
||||
int ret = __cvdso_clock_getres_common(clock, res);
|
||||
|
||||
if (unlikely(ret))
|
||||
return clock_getres_fallback(clock, res);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __maybe_unused int
|
||||
@ -221,19 +235,20 @@ __cvdso_clock_getres_time32(clockid_t clock, struct old_timespec32 *res)
|
||||
struct __kernel_timespec ts;
|
||||
int ret;
|
||||
|
||||
if (res == NULL)
|
||||
goto fallback;
|
||||
ret = __cvdso_clock_getres_common(clock, &ts);
|
||||
|
||||
ret = __cvdso_clock_getres(clock, &ts);
|
||||
#ifdef VDSO_HAS_32BIT_FALLBACK
|
||||
if (unlikely(ret))
|
||||
return clock_getres32_fallback(clock, res);
|
||||
#else
|
||||
if (unlikely(ret))
|
||||
ret = clock_getres_fallback(clock, &ts);
|
||||
#endif
|
||||
|
||||
if (ret == 0) {
|
||||
if (likely(!ret)) {
|
||||
res->tv_sec = ts.tv_sec;
|
||||
res->tv_nsec = ts.tv_nsec;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
fallback:
|
||||
return clock_getres_fallback(clock, (struct __kernel_timespec *)res);
|
||||
}
|
||||
#endif /* VDSO_HAS_CLOCK_GETRES */
|
||||
|
Loading…
Reference in New Issue
Block a user