mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-19 05:58:42 +07:00
f5df269618
The Software Delegated Exception Interface (SDEI) is an ARM standard for registering callbacks from the platform firmware into the OS. This is typically used to implement RAS notifications. Such notifications enter the kernel at the registered entry-point with the register values of the interrupted CPU context. Because this is not a CPU exception, it cannot reuse the existing entry code. (crucially we don't implicitly know which exception level we interrupted), Add the entry point to entry.S to set us up for calling into C code. If the event interrupted code that had interrupts masked, we always return to that location. Otherwise we pretend this was an IRQ, and use SDEI's complete_and_resume call to return to vbar_el1 + offset. This allows the kernel to deliver signals to user space processes. For KVM this triggers the world switch, a quick spin round vcpu_run, then back into the guest, unless there are pending signals. Add sdei_mask_local_cpu() calls to the smp_send_stop() code, this covers the panic() code-path, which doesn't invoke cpuhotplug notifiers. Because we can interrupt entry-from/exit-to another EL, we can't trust the value in sp_el0 or x29, even if we interrupted the kernel, in this case the code in entry.S will save/restore sp_el0 and use the value in __entry_task. When we have VMAP stacks we can interrupt the stack-overflow test, which stirs x0 into sp, meaning we have to have our own VMAP stacks. For now these are allocated when we probe the interface. Future patches will add refcounting hooks to allow the arch code to allocate them lazily. Signed-off-by: James Morse <james.morse@arm.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
96 lines
2.5 KiB
C
96 lines
2.5 KiB
C
/*
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
*/
|
|
#ifndef __ASM_STACKTRACE_H
|
|
#define __ASM_STACKTRACE_H
|
|
|
|
#include <linux/percpu.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/sched/task_stack.h>
|
|
|
|
#include <asm/memory.h>
|
|
#include <asm/ptrace.h>
|
|
#include <asm/sdei.h>
|
|
|
|
struct stackframe {
|
|
unsigned long fp;
|
|
unsigned long pc;
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
unsigned int graph;
|
|
#endif
|
|
};
|
|
|
|
extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame);
|
|
extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
|
|
int (*fn)(struct stackframe *, void *), void *data);
|
|
extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk);
|
|
|
|
DECLARE_PER_CPU(unsigned long *, irq_stack_ptr);
|
|
|
|
static inline bool on_irq_stack(unsigned long sp)
|
|
{
|
|
unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr);
|
|
unsigned long high = low + IRQ_STACK_SIZE;
|
|
|
|
if (!low)
|
|
return false;
|
|
|
|
return (low <= sp && sp < high);
|
|
}
|
|
|
|
static inline bool on_task_stack(struct task_struct *tsk, unsigned long sp)
|
|
{
|
|
unsigned long low = (unsigned long)task_stack_page(tsk);
|
|
unsigned long high = low + THREAD_SIZE;
|
|
|
|
return (low <= sp && sp < high);
|
|
}
|
|
|
|
#ifdef CONFIG_VMAP_STACK
|
|
DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
|
|
|
|
static inline bool on_overflow_stack(unsigned long sp)
|
|
{
|
|
unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack);
|
|
unsigned long high = low + OVERFLOW_STACK_SIZE;
|
|
|
|
return (low <= sp && sp < high);
|
|
}
|
|
#else
|
|
static inline bool on_overflow_stack(unsigned long sp) { return false; }
|
|
#endif
|
|
|
|
/*
|
|
* We can only safely access per-cpu stacks from current in a non-preemptible
|
|
* context.
|
|
*/
|
|
static inline bool on_accessible_stack(struct task_struct *tsk, unsigned long sp)
|
|
{
|
|
if (on_task_stack(tsk, sp))
|
|
return true;
|
|
if (tsk != current || preemptible())
|
|
return false;
|
|
if (on_irq_stack(sp))
|
|
return true;
|
|
if (on_overflow_stack(sp))
|
|
return true;
|
|
if (on_sdei_stack(sp))
|
|
return true;
|
|
|
|
return false;
|
|
}
|
|
|
|
#endif /* __ASM_STACKTRACE_H */
|