mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-27 01:25:35 +07:00
5388a5b821
machine_crash_nonpanic_core() does this: while (1) cpu_relax(); because the kernel has crashed, and we have no known safe way to deal with the CPU. So, we place the CPU into an infinite loop which we expect it to never exit - at least not until the system as a whole is reset by some method. In the absence of erratum 754327, this code assembles to: b . In other words, an infinite loop. When erratum 754327 is enabled, this becomes: 1: dmb b 1b It has been observed that on some systems (eg, OMAP4) where, if a crash is triggered, the system tries to kexec into the panic kernel, but fails after taking the secondary CPU down - placing it into one of these loops. This causes the system to livelock, and the most noticable effect is the system stops after issuing: Loading crashdump kernel... to the system console. The tested as working solution I came up with was to add wfe() to these infinite loops thusly: while (1) { cpu_relax(); wfe(); } which, without 754327 builds to: 1: wfe b 1b or with 754327 is enabled: 1: dmb wfe b 1b Adding "wfe" does two things depending on the environment we're running under: - where we're running on bare metal, and the processor implements "wfe", it stops us spinning endlessly in a loop where we're never going to do any useful work. - if we're running in a VM, it allows the CPU to be given back to the hypervisor and rescheduled for other purposes (maybe a different VM) rather than wasting CPU cycles inside a crashed VM. However, in light of erratum 794072, Will Deacon wanted to see 10 nops as well - which is reasonable to cover the case where we have erratum 754327 enabled _and_ we have a processor that doesn't implement the wfe hint. So, we now end up with: 1: wfe b 1b when erratum 754327 is disabled, or: 1: dmb nop nop nop nop nop nop nop nop nop nop wfe b 1b when erratum 754327 is enabled. We also get the dmb + 10 nop sequence elsewhere in the kernel, in terminating loops. This is reasonable - it means we get the workaround for erratum 794072 when erratum 754327 is enabled, but still relinquish the dead processor - either by placing it in a lower power mode when wfe is implemented as such or by returning it to the hypervisior, or in the case where wfe is a no-op, we use the workaround specified in erratum 794072 to avoid the problem. These as two entirely orthogonal problems - the 10 nops addresses erratum 794072, and the wfe is an optimisation that makes the system more efficient when crashed either in terms of power consumption or by allowing the host/other VMs to make use of the CPU. I don't see any reason not to use kexec() inside a VM - it has the potential to provide automated recovery from a failure of the VMs kernel with the opportunity for saving a crashdump of the failure. A panic() with a reboot timeout won't do that, and reading the libvirt documentation, setting on_reboot to "preserve" won't either (the documentation states "The preserve action for an on_reboot event is treated as a destroy".) Surely it has to be a good thing to avoiding having CPUs spinning inside a VM that is doing no useful work. Acked-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
151 lines
3.5 KiB
C
151 lines
3.5 KiB
C
/*
|
|
* arch/arm/include/asm/processor.h
|
|
*
|
|
* Copyright (C) 1995-1999 Russell King
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*/
|
|
|
|
#ifndef __ASM_ARM_PROCESSOR_H
|
|
#define __ASM_ARM_PROCESSOR_H
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
#include <asm/hw_breakpoint.h>
|
|
#include <asm/ptrace.h>
|
|
#include <asm/types.h>
|
|
#include <asm/unified.h>
|
|
|
|
#ifdef __KERNEL__
|
|
#define STACK_TOP ((current->personality & ADDR_LIMIT_32BIT) ? \
|
|
TASK_SIZE : TASK_SIZE_26)
|
|
#define STACK_TOP_MAX TASK_SIZE
|
|
#endif
|
|
|
|
struct debug_info {
|
|
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
|
struct perf_event *hbp[ARM_MAX_HBP_SLOTS];
|
|
#endif
|
|
};
|
|
|
|
struct thread_struct {
|
|
/* fault info */
|
|
unsigned long address;
|
|
unsigned long trap_no;
|
|
unsigned long error_code;
|
|
/* debugging */
|
|
struct debug_info debug;
|
|
};
|
|
|
|
/*
|
|
* Everything usercopied to/from thread_struct is statically-sized, so
|
|
* no hardened usercopy whitelist is needed.
|
|
*/
|
|
static inline void arch_thread_struct_whitelist(unsigned long *offset,
|
|
unsigned long *size)
|
|
{
|
|
*offset = *size = 0;
|
|
}
|
|
|
|
#define INIT_THREAD { }
|
|
|
|
#define start_thread(regs,pc,sp) \
|
|
({ \
|
|
unsigned long r7, r8, r9; \
|
|
\
|
|
if (IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC)) { \
|
|
r7 = regs->ARM_r7; \
|
|
r8 = regs->ARM_r8; \
|
|
r9 = regs->ARM_r9; \
|
|
} \
|
|
memset(regs->uregs, 0, sizeof(regs->uregs)); \
|
|
if (IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC) && \
|
|
current->personality & FDPIC_FUNCPTRS) { \
|
|
regs->ARM_r7 = r7; \
|
|
regs->ARM_r8 = r8; \
|
|
regs->ARM_r9 = r9; \
|
|
regs->ARM_r10 = current->mm->start_data; \
|
|
} else if (!IS_ENABLED(CONFIG_MMU)) \
|
|
regs->ARM_r10 = current->mm->start_data; \
|
|
if (current->personality & ADDR_LIMIT_32BIT) \
|
|
regs->ARM_cpsr = USR_MODE; \
|
|
else \
|
|
regs->ARM_cpsr = USR26_MODE; \
|
|
if (elf_hwcap & HWCAP_THUMB && pc & 1) \
|
|
regs->ARM_cpsr |= PSR_T_BIT; \
|
|
regs->ARM_cpsr |= PSR_ENDSTATE; \
|
|
regs->ARM_pc = pc & ~1; /* pc */ \
|
|
regs->ARM_sp = sp; /* sp */ \
|
|
})
|
|
|
|
/* Forward declaration, a strange C thing */
|
|
struct task_struct;
|
|
|
|
/* Free all resources held by a thread. */
|
|
extern void release_thread(struct task_struct *);
|
|
|
|
unsigned long get_wchan(struct task_struct *p);
|
|
|
|
#if __LINUX_ARM_ARCH__ == 6 || defined(CONFIG_ARM_ERRATA_754327)
|
|
#define cpu_relax() \
|
|
do { \
|
|
smp_mb(); \
|
|
__asm__ __volatile__("nop; nop; nop; nop; nop; nop; nop; nop; nop; nop;"); \
|
|
} while (0)
|
|
#else
|
|
#define cpu_relax() barrier()
|
|
#endif
|
|
|
|
#define task_pt_regs(p) \
|
|
((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
|
|
|
|
#define KSTK_EIP(tsk) task_pt_regs(tsk)->ARM_pc
|
|
#define KSTK_ESP(tsk) task_pt_regs(tsk)->ARM_sp
|
|
|
|
#ifdef CONFIG_SMP
|
|
#define __ALT_SMP_ASM(smp, up) \
|
|
"9998: " smp "\n" \
|
|
" .pushsection \".alt.smp.init\", \"a\"\n" \
|
|
" .long 9998b\n" \
|
|
" " up "\n" \
|
|
" .popsection\n"
|
|
#else
|
|
#define __ALT_SMP_ASM(smp, up) up
|
|
#endif
|
|
|
|
/*
|
|
* Prefetching support - only ARMv5.
|
|
*/
|
|
#if __LINUX_ARM_ARCH__ >= 5
|
|
|
|
#define ARCH_HAS_PREFETCH
|
|
static inline void prefetch(const void *ptr)
|
|
{
|
|
__asm__ __volatile__(
|
|
"pld\t%a0"
|
|
:: "p" (ptr));
|
|
}
|
|
|
|
#if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
|
|
#define ARCH_HAS_PREFETCHW
|
|
static inline void prefetchw(const void *ptr)
|
|
{
|
|
__asm__ __volatile__(
|
|
".arch_extension mp\n"
|
|
__ALT_SMP_ASM(
|
|
WASM(pldw) "\t%a0",
|
|
WASM(pld) "\t%a0"
|
|
)
|
|
:: "p" (ptr));
|
|
}
|
|
#endif
|
|
#endif
|
|
|
|
#define HAVE_ARCH_PICK_MMAP_LAYOUT
|
|
|
|
#endif
|
|
|
|
#endif /* __ASM_ARM_PROCESSOR_H */
|