mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-22 15:45:09 +07:00
6727ad9e20
When doing an nmi backtrace of many cores, most of which are idle, the output is a little overwhelming and very uninformative. Suppress messages for cpus that are idling when they are interrupted and just emit one line, "NMI backtrace for N skipped: idling at pc 0xNNN". We do this by grouping all the cpuidle code together into a new .cpuidle.text section, and then checking the address of the interrupted PC to see if it lies within that section. This commit suitably tags x86 and tile idle routines, and only adds in the minimal framework for other architectures. Link: http://lkml.kernel.org/r/1472487169-14923-5-git-send-email-cmetcalf@mellanox.com Signed-off-by: Chris Metcalf <cmetcalf@mellanox.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Daniel Thompson <daniel.thompson@linaro.org> [arm] Tested-by: Petr Mladek <pmladek@suse.com> Cc: Aaron Tomlin <atomlin@redhat.com> Cc: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net> Cc: Russell King <linux@arm.linux.org.uk> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
203 lines
4.0 KiB
C
203 lines
4.0 KiB
C
#ifndef _X86_IRQFLAGS_H_
|
|
#define _X86_IRQFLAGS_H_
|
|
|
|
#include <asm/processor-flags.h>
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
/* Provide __cpuidle; we can't safely include <linux/cpu.h> */
|
|
#define __cpuidle __attribute__((__section__(".cpuidle.text")))
|
|
|
|
/*
|
|
* Interrupt control:
|
|
*/
|
|
|
|
static inline unsigned long native_save_fl(void)
|
|
{
|
|
unsigned long flags;
|
|
|
|
/*
|
|
* "=rm" is safe here, because "pop" adjusts the stack before
|
|
* it evaluates its effective address -- this is part of the
|
|
* documented behavior of the "pop" instruction.
|
|
*/
|
|
asm volatile("# __raw_save_flags\n\t"
|
|
"pushf ; pop %0"
|
|
: "=rm" (flags)
|
|
: /* no input */
|
|
: "memory");
|
|
|
|
return flags;
|
|
}
|
|
|
|
static inline void native_restore_fl(unsigned long flags)
|
|
{
|
|
asm volatile("push %0 ; popf"
|
|
: /* no output */
|
|
:"g" (flags)
|
|
:"memory", "cc");
|
|
}
|
|
|
|
static inline void native_irq_disable(void)
|
|
{
|
|
asm volatile("cli": : :"memory");
|
|
}
|
|
|
|
static inline void native_irq_enable(void)
|
|
{
|
|
asm volatile("sti": : :"memory");
|
|
}
|
|
|
|
static inline __cpuidle void native_safe_halt(void)
|
|
{
|
|
asm volatile("sti; hlt": : :"memory");
|
|
}
|
|
|
|
static inline __cpuidle void native_halt(void)
|
|
{
|
|
asm volatile("hlt": : :"memory");
|
|
}
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_PARAVIRT
|
|
#include <asm/paravirt.h>
|
|
#else
|
|
#ifndef __ASSEMBLY__
|
|
#include <linux/types.h>
|
|
|
|
static inline notrace unsigned long arch_local_save_flags(void)
|
|
{
|
|
return native_save_fl();
|
|
}
|
|
|
|
static inline notrace void arch_local_irq_restore(unsigned long flags)
|
|
{
|
|
native_restore_fl(flags);
|
|
}
|
|
|
|
static inline notrace void arch_local_irq_disable(void)
|
|
{
|
|
native_irq_disable();
|
|
}
|
|
|
|
static inline notrace void arch_local_irq_enable(void)
|
|
{
|
|
native_irq_enable();
|
|
}
|
|
|
|
/*
|
|
* Used in the idle loop; sti takes one instruction cycle
|
|
* to complete:
|
|
*/
|
|
static inline __cpuidle void arch_safe_halt(void)
|
|
{
|
|
native_safe_halt();
|
|
}
|
|
|
|
/*
|
|
* Used when interrupts are already enabled or to
|
|
* shutdown the processor:
|
|
*/
|
|
static inline __cpuidle void halt(void)
|
|
{
|
|
native_halt();
|
|
}
|
|
|
|
/*
|
|
* For spinlocks, etc:
|
|
*/
|
|
static inline notrace unsigned long arch_local_irq_save(void)
|
|
{
|
|
unsigned long flags = arch_local_save_flags();
|
|
arch_local_irq_disable();
|
|
return flags;
|
|
}
|
|
#else
|
|
|
|
#define ENABLE_INTERRUPTS(x) sti
|
|
#define DISABLE_INTERRUPTS(x) cli
|
|
|
|
#ifdef CONFIG_X86_64
|
|
#define SWAPGS swapgs
|
|
/*
|
|
* Currently paravirt can't handle swapgs nicely when we
|
|
* don't have a stack we can rely on (such as a user space
|
|
* stack). So we either find a way around these or just fault
|
|
* and emulate if a guest tries to call swapgs directly.
|
|
*
|
|
* Either way, this is a good way to document that we don't
|
|
* have a reliable stack. x86_64 only.
|
|
*/
|
|
#define SWAPGS_UNSAFE_STACK swapgs
|
|
|
|
#define PARAVIRT_ADJUST_EXCEPTION_FRAME /* */
|
|
|
|
#define INTERRUPT_RETURN jmp native_iret
|
|
#define USERGS_SYSRET64 \
|
|
swapgs; \
|
|
sysretq;
|
|
#define USERGS_SYSRET32 \
|
|
swapgs; \
|
|
sysretl
|
|
|
|
#else
|
|
#define INTERRUPT_RETURN iret
|
|
#define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
|
|
#define GET_CR0_INTO_EAX movl %cr0, %eax
|
|
#endif
|
|
|
|
|
|
#endif /* __ASSEMBLY__ */
|
|
#endif /* CONFIG_PARAVIRT */
|
|
|
|
#ifndef __ASSEMBLY__
|
|
static inline int arch_irqs_disabled_flags(unsigned long flags)
|
|
{
|
|
return !(flags & X86_EFLAGS_IF);
|
|
}
|
|
|
|
static inline int arch_irqs_disabled(void)
|
|
{
|
|
unsigned long flags = arch_local_save_flags();
|
|
|
|
return arch_irqs_disabled_flags(flags);
|
|
}
|
|
#endif /* !__ASSEMBLY__ */
|
|
|
|
#ifdef __ASSEMBLY__
|
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
|
# define TRACE_IRQS_ON call trace_hardirqs_on_thunk;
|
|
# define TRACE_IRQS_OFF call trace_hardirqs_off_thunk;
|
|
#else
|
|
# define TRACE_IRQS_ON
|
|
# define TRACE_IRQS_OFF
|
|
#endif
|
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
# ifdef CONFIG_X86_64
|
|
# define LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk
|
|
# define LOCKDEP_SYS_EXIT_IRQ \
|
|
TRACE_IRQS_ON; \
|
|
sti; \
|
|
call lockdep_sys_exit_thunk; \
|
|
cli; \
|
|
TRACE_IRQS_OFF;
|
|
# else
|
|
# define LOCKDEP_SYS_EXIT \
|
|
pushl %eax; \
|
|
pushl %ecx; \
|
|
pushl %edx; \
|
|
call lockdep_sys_exit; \
|
|
popl %edx; \
|
|
popl %ecx; \
|
|
popl %eax;
|
|
# define LOCKDEP_SYS_EXIT_IRQ
|
|
# endif
|
|
#else
|
|
# define LOCKDEP_SYS_EXIT
|
|
# define LOCKDEP_SYS_EXIT_IRQ
|
|
#endif
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
#endif
|