mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-26 13:15:09 +07:00
c9b012e5f4
Plenty of acronym soup here: - Initial support for the Scalable Vector Extension (SVE) - Improved handling for SError interrupts (required to handle RAS events) - Enable GCC support for 128-bit integer types - Remove kernel text addresses from backtraces and register dumps - Use of WFE to implement long delay()s - ACPI IORT updates from Lorenzo Pieralisi - Perf PMU driver for the Statistical Profiling Extension (SPE) - Perf PMU driver for Hisilicon's system PMUs - Misc cleanups and non-critical fixes -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABCgAGBQJaCcLqAAoJELescNyEwWM0JREH/2FbmD/khGzEtP8LW+o9D8iV TBM02uWQxS1bbO1pV2vb+512YQO+iWfeQwJH9Jv2FZcrMvFv7uGRnYgAnJuXNGrl W+LL6OhN22A24LSawC437RU3Xe7GqrtONIY/yLeJBPablfcDGzPK1eHRA0pUzcyX VlyDruSHWX44VGBPV6JRd3x0vxpV8syeKOjbRvopRfn3Nwkbd76V3YSfEgwoTG5W ET1sOnXLmHHdeifn/l1Am5FX1FYstpcd7usUTJ4Oto8y7e09tw3bGJCD0aMJ3vow v1pCUWohEw7fHqoPc9rTrc1QEnkdML4vjJvMPUzwyTfPrN+7uEuMIEeJierW+qE= =0qrg -----END PGP SIGNATURE----- Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux Pull arm64 updates from Will Deacon: "The big highlight is support for the Scalable Vector Extension (SVE) which required extensive ABI work to ensure we don't break existing applications by blowing away their signal stack with the rather large new vector context (<= 2 kbit per vector register). There's further work to be done optimising things like exception return, but the ABI is solid now. Much of the line count comes from some new PMU drivers we have, but they're pretty self-contained and I suspect we'll have more of them in future. Plenty of acronym soup here: - initial support for the Scalable Vector Extension (SVE) - improved handling for SError interrupts (required to handle RAS events) - enable GCC support for 128-bit integer types - remove kernel text addresses from backtraces and register dumps - use of WFE to implement long delay()s - ACPI IORT updates from Lorenzo Pieralisi - perf PMU driver for the Statistical Profiling Extension (SPE) - perf PMU driver for Hisilicon's system PMUs - misc cleanups and non-critical fixes" * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (97 commits) arm64: Make ARMV8_DEPRECATED depend on SYSCTL arm64: Implement __lshrti3 library function arm64: support __int128 on gcc 5+ arm64/sve: Add documentation arm64/sve: Detect SVE and activate runtime support arm64/sve: KVM: Hide SVE from CPU features exposed to guests arm64/sve: KVM: Treat guest SVE use as undefined instruction execution arm64/sve: KVM: Prevent guests from using SVE arm64/sve: Add sysctl to set the default vector length for new processes arm64/sve: Add prctl controls for userspace vector length management arm64/sve: ptrace and ELF coredump support arm64/sve: Preserve SVE registers around EFI runtime service calls arm64/sve: Preserve SVE registers around kernel-mode NEON use arm64/sve: Probe SVE capabilities and usable vector lengths arm64: cpufeature: Move sys_caps_initialised declarations arm64/sve: Backend logic for setting the vector length arm64/sve: Signal handling support arm64/sve: Support vector length resetting for new processes arm64/sve: Core task context handling arm64/sve: Low-level CPU setup ...
135 lines
3.6 KiB
C
135 lines
3.6 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
#include <linux/ftrace.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/slab.h>
|
|
#include <asm/alternative.h>
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/cpufeature.h>
|
|
#include <asm/daifflags.h>
|
|
#include <asm/debug-monitors.h>
|
|
#include <asm/exec.h>
|
|
#include <asm/pgtable.h>
|
|
#include <asm/memory.h>
|
|
#include <asm/mmu_context.h>
|
|
#include <asm/smp_plat.h>
|
|
#include <asm/suspend.h>
|
|
|
|
/*
|
|
* This is allocated by cpu_suspend_init(), and used to store a pointer to
|
|
* the 'struct sleep_stack_data' the contains a particular CPUs state.
|
|
*/
|
|
unsigned long *sleep_save_stash;
|
|
|
|
/*
|
|
* This hook is provided so that cpu_suspend code can restore HW
|
|
* breakpoints as early as possible in the resume path, before reenabling
|
|
* debug exceptions. Code cannot be run from a CPU PM notifier since by the
|
|
* time the notifier runs debug exceptions might have been enabled already,
|
|
* with HW breakpoints registers content still in an unknown state.
|
|
*/
|
|
static int (*hw_breakpoint_restore)(unsigned int);
|
|
void __init cpu_suspend_set_dbg_restorer(int (*hw_bp_restore)(unsigned int))
|
|
{
|
|
/* Prevent multiple restore hook initializations */
|
|
if (WARN_ON(hw_breakpoint_restore))
|
|
return;
|
|
hw_breakpoint_restore = hw_bp_restore;
|
|
}
|
|
|
|
void notrace __cpu_suspend_exit(void)
|
|
{
|
|
unsigned int cpu = smp_processor_id();
|
|
|
|
/*
|
|
* We are resuming from reset with the idmap active in TTBR0_EL1.
|
|
* We must uninstall the idmap and restore the expected MMU
|
|
* state before we can possibly return to userspace.
|
|
*/
|
|
cpu_uninstall_idmap();
|
|
|
|
/*
|
|
* PSTATE was not saved over suspend/resume, re-enable any detected
|
|
* features that might not have been set correctly.
|
|
*/
|
|
asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,
|
|
CONFIG_ARM64_PAN));
|
|
uao_thread_switch(current);
|
|
|
|
/*
|
|
* Restore HW breakpoint registers to sane values
|
|
* before debug exceptions are possibly reenabled
|
|
* by cpu_suspend()s local_daif_restore() call.
|
|
*/
|
|
if (hw_breakpoint_restore)
|
|
hw_breakpoint_restore(cpu);
|
|
}
|
|
|
|
/*
|
|
* cpu_suspend
|
|
*
|
|
* arg: argument to pass to the finisher function
|
|
* fn: finisher function pointer
|
|
*
|
|
*/
|
|
int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
|
|
{
|
|
int ret = 0;
|
|
unsigned long flags;
|
|
struct sleep_stack_data state;
|
|
|
|
/*
|
|
* From this point debug exceptions are disabled to prevent
|
|
* updates to mdscr register (saved and restored along with
|
|
* general purpose registers) from kernel debuggers.
|
|
*/
|
|
flags = local_daif_save();
|
|
|
|
/*
|
|
* Function graph tracer state gets incosistent when the kernel
|
|
* calls functions that never return (aka suspend finishers) hence
|
|
* disable graph tracing during their execution.
|
|
*/
|
|
pause_graph_tracing();
|
|
|
|
if (__cpu_suspend_enter(&state)) {
|
|
/* Call the suspend finisher */
|
|
ret = fn(arg);
|
|
|
|
/*
|
|
* Never gets here, unless the suspend finisher fails.
|
|
* Successful cpu_suspend() should return from cpu_resume(),
|
|
* returning through this code path is considered an error
|
|
* If the return value is set to 0 force ret = -EOPNOTSUPP
|
|
* to make sure a proper error condition is propagated
|
|
*/
|
|
if (!ret)
|
|
ret = -EOPNOTSUPP;
|
|
} else {
|
|
__cpu_suspend_exit();
|
|
}
|
|
|
|
unpause_graph_tracing();
|
|
|
|
/*
|
|
* Restore pstate flags. OS lock and mdscr have been already
|
|
* restored, so from this point onwards, debugging is fully
|
|
* renabled if it was enabled when core started shutdown.
|
|
*/
|
|
local_daif_restore(flags);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int __init cpu_suspend_init(void)
|
|
{
|
|
/* ctx_ptr is an array of physical addresses */
|
|
sleep_save_stash = kcalloc(mpidr_hash_size(), sizeof(*sleep_save_stash),
|
|
GFP_KERNEL);
|
|
|
|
if (WARN_ON(!sleep_save_stash))
|
|
return -ENOMEM;
|
|
|
|
return 0;
|
|
}
|
|
early_initcall(cpu_suspend_init);
|