mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-02-21 12:38:09 +07:00

A bunch of good stuff in here: - Wire up support for qspinlock, replacing our trusty ticket lock code - Add an IPI to flush_icache_range() to ensure that stale instructions fetched into the pipeline are discarded along with the I-cache lines - Support for the GCC "stackleak" plugin - Support for restartable sequences, plus an arm64 port for the selftest - Kexec/kdump support on systems booting with ACPI - Rewrite of our syscall entry code in C, which allows us to zero the GPRs on entry from userspace - Support for chained PMU counters, allowing 64-bit event counters to be constructed on current CPUs - Ensure scheduler topology information is kept up-to-date with CPU hotplug events - Re-enable support for huge vmalloc/IO mappings now that the core code has the correct hooks to use break-before-make sequences - Miscellaneous, non-critical fixes and cleanups -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABCgAGBQJbbV41AAoJELescNyEwWM0WoEIALhrKtsIn6vqFlSs/w6aDuJL cMWmFxjTaKLmIq2+cJIdFLOJ3CH80Pu9gB+nEv/k+cZdCTfUVKfRf28HTpmYWsht bb4AhdHMC7yFW752BHk+mzJspeC8h/2Rm8wMuNVplZ3MkPrwo3vsiuJTofLhVL/y BihlU3+5sfBvCYIsWnuEZIev+/I/s/qm1ASiqIcKSrFRZP6VTt5f9TC75vFI8seW 7yc3odKb0CArexB8yBjiPNziehctQF42doxQyL45hezLfWw4qdgHOSiwyiOMxEz9 Fwwpp8Tx33SKLNJgqoqYznGW9PhYJ7n2Kslv19uchJrEV+mds82vdDNaWRULld4= =kQn6 -----END PGP SIGNATURE----- Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux Pull arm64 updates from Will Deacon: "A bunch of good stuff in here. Worth noting is that we've pulled in the x86/mm branch from -tip so that we can make use of the core ioremap changes which allow us to put down huge mappings in the vmalloc area without screwing up the TLB. Much of the positive diffstat is because of the rseq selftest for arm64. Summary: - Wire up support for qspinlock, replacing our trusty ticket lock code - Add an IPI to flush_icache_range() to ensure that stale instructions fetched into the pipeline are discarded along with the I-cache lines - Support for the GCC "stackleak" plugin - Support for restartable sequences, plus an arm64 port for the selftest - Kexec/kdump support on systems booting with ACPI - Rewrite of our syscall entry code in C, which allows us to zero the GPRs on entry from userspace - Support for chained PMU counters, allowing 64-bit event counters to be constructed on current CPUs - Ensure scheduler topology information is kept up-to-date with CPU hotplug events - Re-enable support for huge vmalloc/IO mappings now that the core code has the correct hooks to use break-before-make sequences - Miscellaneous, non-critical fixes and cleanups" * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (90 commits) arm64: alternative: Use true and false for boolean values arm64: kexec: Add comment to explain use of __flush_icache_range() arm64: sdei: Mark sdei stack helper functions as static arm64, kaslr: export offset in VMCOREINFO ELF notes arm64: perf: Add cap_user_time aarch64 efi/libstub: Only disable stackleak plugin for arm64 arm64: drop unused kernel_neon_begin_partial() macro arm64: kexec: machine_kexec should call __flush_icache_range arm64: svc: Ensure hardirq tracing is updated before return arm64: mm: Export __sync_icache_dcache() for xen-privcmd drivers/perf: arm-ccn: Use devm_ioremap_resource() to map memory arm64: Add support for STACKLEAK gcc plugin arm64: Add stack information to on_accessible_stack drivers/perf: hisi: update the sccl_id/ccl_id when MT is supported arm64: fix ACPI dependencies rseq/selftests: Add support for arm64 arm64: acpi: fix alignment fault in accessing ACPI efi/arm: map UEFI memory map even w/o runtime services enabled efi/arm: preserve early mapping of UEFI memory map longer for BGRT drivers: acpi: add dependency of EFI for arm64 ...
164 lines
4.1 KiB
C
164 lines
4.1 KiB
C
/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
|
|
/*
|
|
* rseq.h
|
|
*
|
|
* (C) Copyright 2016-2018 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
|
|
*/
|
|
|
|
#ifndef RSEQ_H
|
|
#define RSEQ_H
|
|
|
|
#include <stdint.h>
|
|
#include <stdbool.h>
|
|
#include <pthread.h>
|
|
#include <signal.h>
|
|
#include <sched.h>
|
|
#include <errno.h>
|
|
#include <stdio.h>
|
|
#include <stdlib.h>
|
|
#include <sched.h>
|
|
#include <linux/rseq.h>
|
|
|
|
/*
|
|
* Empty code injection macros, override when testing.
|
|
* It is important to consider that the ASM injection macros need to be
|
|
* fully reentrant (e.g. do not modify the stack).
|
|
*/
|
|
#ifndef RSEQ_INJECT_ASM
|
|
#define RSEQ_INJECT_ASM(n)
|
|
#endif
|
|
|
|
#ifndef RSEQ_INJECT_C
|
|
#define RSEQ_INJECT_C(n)
|
|
#endif
|
|
|
|
#ifndef RSEQ_INJECT_INPUT
|
|
#define RSEQ_INJECT_INPUT
|
|
#endif
|
|
|
|
#ifndef RSEQ_INJECT_CLOBBER
|
|
#define RSEQ_INJECT_CLOBBER
|
|
#endif
|
|
|
|
#ifndef RSEQ_INJECT_FAILED
|
|
#define RSEQ_INJECT_FAILED
|
|
#endif
|
|
|
|
extern __thread volatile struct rseq __rseq_abi;
|
|
|
|
#define rseq_likely(x) __builtin_expect(!!(x), 1)
|
|
#define rseq_unlikely(x) __builtin_expect(!!(x), 0)
|
|
#define rseq_barrier() __asm__ __volatile__("" : : : "memory")
|
|
|
|
#define RSEQ_ACCESS_ONCE(x) (*(__volatile__ __typeof__(x) *)&(x))
|
|
#define RSEQ_WRITE_ONCE(x, v) __extension__ ({ RSEQ_ACCESS_ONCE(x) = (v); })
|
|
#define RSEQ_READ_ONCE(x) RSEQ_ACCESS_ONCE(x)
|
|
|
|
#define __rseq_str_1(x) #x
|
|
#define __rseq_str(x) __rseq_str_1(x)
|
|
|
|
#define rseq_log(fmt, args...) \
|
|
fprintf(stderr, fmt "(in %s() at " __FILE__ ":" __rseq_str(__LINE__)"\n", \
|
|
## args, __func__)
|
|
|
|
#define rseq_bug(fmt, args...) \
|
|
do { \
|
|
rseq_log(fmt, ##args); \
|
|
abort(); \
|
|
} while (0)
|
|
|
|
#if defined(__x86_64__) || defined(__i386__)
|
|
#include <rseq-x86.h>
|
|
#elif defined(__ARMEL__)
|
|
#include <rseq-arm.h>
|
|
#elif defined (__AARCH64EL__)
|
|
#include <rseq-arm64.h>
|
|
#elif defined(__PPC__)
|
|
#include <rseq-ppc.h>
|
|
#elif defined(__mips__)
|
|
#include <rseq-mips.h>
|
|
#elif defined(__s390__)
|
|
#include <rseq-s390.h>
|
|
#else
|
|
#error unsupported target
|
|
#endif
|
|
|
|
/*
|
|
* Register rseq for the current thread. This needs to be called once
|
|
* by any thread which uses restartable sequences, before they start
|
|
* using restartable sequences, to ensure restartable sequences
|
|
* succeed. A restartable sequence executed from a non-registered
|
|
* thread will always fail.
|
|
*/
|
|
int rseq_register_current_thread(void);
|
|
|
|
/*
|
|
* Unregister rseq for current thread.
|
|
*/
|
|
int rseq_unregister_current_thread(void);
|
|
|
|
/*
|
|
* Restartable sequence fallback for reading the current CPU number.
|
|
*/
|
|
int32_t rseq_fallback_current_cpu(void);
|
|
|
|
/*
|
|
* Values returned can be either the current CPU number, -1 (rseq is
|
|
* uninitialized), or -2 (rseq initialization has failed).
|
|
*/
|
|
static inline int32_t rseq_current_cpu_raw(void)
|
|
{
|
|
return RSEQ_ACCESS_ONCE(__rseq_abi.cpu_id);
|
|
}
|
|
|
|
/*
|
|
* Returns a possible CPU number, which is typically the current CPU.
|
|
* The returned CPU number can be used to prepare for an rseq critical
|
|
* section, which will confirm whether the cpu number is indeed the
|
|
* current one, and whether rseq is initialized.
|
|
*
|
|
* The CPU number returned by rseq_cpu_start should always be validated
|
|
* by passing it to a rseq asm sequence, or by comparing it to the
|
|
* return value of rseq_current_cpu_raw() if the rseq asm sequence
|
|
* does not need to be invoked.
|
|
*/
|
|
static inline uint32_t rseq_cpu_start(void)
|
|
{
|
|
return RSEQ_ACCESS_ONCE(__rseq_abi.cpu_id_start);
|
|
}
|
|
|
|
static inline uint32_t rseq_current_cpu(void)
|
|
{
|
|
int32_t cpu;
|
|
|
|
cpu = rseq_current_cpu_raw();
|
|
if (rseq_unlikely(cpu < 0))
|
|
cpu = rseq_fallback_current_cpu();
|
|
return cpu;
|
|
}
|
|
|
|
static inline void rseq_clear_rseq_cs(void)
|
|
{
|
|
#ifdef __LP64__
|
|
__rseq_abi.rseq_cs.ptr = 0;
|
|
#else
|
|
__rseq_abi.rseq_cs.ptr.ptr32 = 0;
|
|
#endif
|
|
}
|
|
|
|
/*
|
|
* rseq_prepare_unload() should be invoked by each thread executing a rseq
|
|
* critical section at least once between their last critical section and
|
|
* library unload of the library defining the rseq critical section
|
|
* (struct rseq_cs). This also applies to use of rseq in code generated by
|
|
* JIT: rseq_prepare_unload() should be invoked at least once by each
|
|
* thread executing a rseq critical section before reclaim of the memory
|
|
* holding the struct rseq_cs.
|
|
*/
|
|
static inline void rseq_prepare_unload(void)
|
|
{
|
|
rseq_clear_rseq_cs();
|
|
}
|
|
|
|
#endif /* RSEQ_H_ */
|