mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-26 17:45:12 +07:00
a7f71a2c89
Provide the arm64 compat (AArch32) vDSO in kernel/vdso32 in a similar way to what happens in kernel/vdso. The compat vDSO leverages on an adaptation of the arm architecture code with few changes: - Use of lib/vdso for gettimeofday - Implement a syscall based fallback - Introduce clock_getres() for the compat library - Implement trampolines - Implement elf note To build the compat vDSO a 32 bit compiler is required and needs to be specified via CONFIG_CROSS_COMPILE_COMPAT_VDSO. The code is not yet enabled as other prerequisites are missing. Signed-off-by: Vincenzo Frascino <vincenzo.frascino@arm.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Shijith Thotton <sthotton@marvell.com> Tested-by: Andre Przywara <andre.przywara@arm.com> Cc: linux-arch@vger.kernel.org Cc: linux-arm-kernel@lists.infradead.org Cc: linux-mips@vger.kernel.org Cc: linux-kselftest@vger.kernel.org Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Russell King <linux@armlinux.org.uk> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Paul Burton <paul.burton@mips.com> Cc: Daniel Lezcano <daniel.lezcano@linaro.org> Cc: Mark Salyzyn <salyzyn@android.com> Cc: Peter Collingbourne <pcc@google.com> Cc: Shuah Khan <shuah@kernel.org> Cc: Dmitry Safonov <0x7f454c46@gmail.com> Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Huw Davies <huw@codeweavers.com> Link: https://lkml.kernel.org/r/20190621095252.32307-11-vincenzo.frascino@arm.com
109 lines
2.7 KiB
C
109 lines
2.7 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* Copyright (C) 2018 ARM Limited
|
|
*/
|
|
#ifndef __ASM_VDSO_GETTIMEOFDAY_H
|
|
#define __ASM_VDSO_GETTIMEOFDAY_H
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
#include <asm/unistd.h>
|
|
#include <uapi/linux/time.h>
|
|
|
|
#include <asm/vdso/compat_barrier.h>
|
|
|
|
#define VDSO_HAS_CLOCK_GETRES 1
|
|
|
|
static __always_inline
|
|
int gettimeofday_fallback(struct __kernel_old_timeval *_tv,
|
|
struct timezone *_tz)
|
|
{
|
|
register struct timezone *tz asm("r1") = _tz;
|
|
register struct __kernel_old_timeval *tv asm("r0") = _tv;
|
|
register long ret asm ("r0");
|
|
register long nr asm("r7") = __NR_compat_gettimeofday;
|
|
|
|
asm volatile(
|
|
" swi #0\n"
|
|
: "=r" (ret)
|
|
: "r" (tv), "r" (tz), "r" (nr)
|
|
: "memory");
|
|
|
|
return ret;
|
|
}
|
|
|
|
static __always_inline
|
|
long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
|
|
{
|
|
register struct __kernel_timespec *ts asm("r1") = _ts;
|
|
register clockid_t clkid asm("r0") = _clkid;
|
|
register long ret asm ("r0");
|
|
register long nr asm("r7") = __NR_compat_clock_gettime64;
|
|
|
|
asm volatile(
|
|
" swi #0\n"
|
|
: "=r" (ret)
|
|
: "r" (clkid), "r" (ts), "r" (nr)
|
|
: "memory");
|
|
|
|
return ret;
|
|
}
|
|
|
|
static __always_inline
|
|
int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
|
|
{
|
|
register struct __kernel_timespec *ts asm("r1") = _ts;
|
|
register clockid_t clkid asm("r0") = _clkid;
|
|
register long ret asm ("r0");
|
|
register long nr asm("r7") = __NR_compat_clock_getres_time64;
|
|
|
|
/* The checks below are required for ABI consistency with arm */
|
|
if ((_clkid >= MAX_CLOCKS) && (_ts == NULL))
|
|
return -EINVAL;
|
|
|
|
asm volatile(
|
|
" swi #0\n"
|
|
: "=r" (ret)
|
|
: "r" (clkid), "r" (ts), "r" (nr)
|
|
: "memory");
|
|
|
|
return ret;
|
|
}
|
|
|
|
static __always_inline u64 __arch_get_hw_counter(s32 clock_mode)
|
|
{
|
|
u64 res;
|
|
|
|
isb();
|
|
asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (res));
|
|
|
|
return res;
|
|
}
|
|
|
|
static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
|
|
{
|
|
const struct vdso_data *ret;
|
|
|
|
/*
|
|
* This simply puts &_vdso_data into ret. The reason why we don't use
|
|
* `ret = _vdso_data` is that the compiler tends to optimise this in a
|
|
* very suboptimal way: instead of keeping &_vdso_data in a register,
|
|
* it goes through a relocation almost every time _vdso_data must be
|
|
* accessed (even in subfunctions). This is both time and space
|
|
* consuming: each relocation uses a word in the code section, and it
|
|
* has to be loaded at runtime.
|
|
*
|
|
* This trick hides the assignment from the compiler. Since it cannot
|
|
* track where the pointer comes from, it will only use one relocation
|
|
* where __arch_get_vdso_data() is called, and then keep the result in
|
|
* a register.
|
|
*/
|
|
asm volatile("mov %0, %1" : "=r"(ret) : "r"(_vdso_data));
|
|
|
|
return ret;
|
|
}
|
|
|
|
#endif /* !__ASSEMBLY__ */
|
|
|
|
#endif /* __ASM_VDSO_GETTIMEOFDAY_H */
|