mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-27 00:25:21 +07:00
9a01c3ed5c
Patch series "improvements to the nmi_backtrace code" v9. This patch series modifies the trigger_xxx_backtrace() NMI-based remote backtracing code to make it more flexible, and makes a few small improvements along the way. The motivation comes from the task isolation code, where there are scenarios where we want to be able to diagnose a case where some cpu is about to interrupt a task-isolated cpu. It can be helpful to see both where the interrupting cpu is, and also an approximation of where the cpu that is being interrupted is. The nmi_backtrace framework allows us to discover the stack of the interrupted cpu. I've tested that the change works as desired on tile, and build-tested x86, arm, mips, and sparc64. For x86 I confirmed that the generic cpuidle stuff as well as the architecture-specific routines are in the new cpuidle section. For arm, mips, and sparc I just build-tested it and made sure the generic cpuidle routines were in the new cpuidle section, but I didn't attempt to figure out which the platform-specific idle routines might be. That might be more usefully done by someone with platform experience in follow-up patches. This patch (of 4): Currently you can only request a backtrace of either all cpus, or all cpus but yourself. It can also be helpful to request a remote backtrace of a single cpu, and since we want that, the logical extension is to support a cpumask as the underlying primitive. This change modifies the existing lib/nmi_backtrace.c code to take a cpumask as its basic primitive, and modifies the linux/nmi.h code to use the new "cpumask" method instead. The existing clients of nmi_backtrace (arm and x86) are converted to using the new cpumask approach in this change. The other users of the backtracing API (sparc64 and mips) are converted to use the cpumask approach rather than the all/allbutself approach. The mips code ignored the "include_self" boolean but with this change it will now also dump a local backtrace if requested. Link: http://lkml.kernel.org/r/1472487169-14923-2-git-send-email-cmetcalf@mellanox.com Signed-off-by: Chris Metcalf <cmetcalf@mellanox.com> Tested-by: Daniel Thompson <daniel.thompson@linaro.org> [arm] Reviewed-by: Aaron Tomlin <atomlin@redhat.com> Reviewed-by: Petr Mladek <pmladek@suse.com> Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net> Cc: Russell King <linux@arm.linux.org.uk> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@elte.hu> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: David Miller <davem@davemloft.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
100 lines
3.0 KiB
C
100 lines
3.0 KiB
C
/* irq.h: IRQ registers on the 64-bit Sparc.
|
|
*
|
|
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
|
|
* Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
|
|
*/
|
|
|
|
#ifndef _SPARC64_IRQ_H
|
|
#define _SPARC64_IRQ_H
|
|
|
|
#include <linux/linkage.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/interrupt.h>
|
|
#include <asm/pil.h>
|
|
#include <asm/ptrace.h>
|
|
|
|
/* IMAP/ICLR register defines */
|
|
#define IMAP_VALID 0x80000000UL /* IRQ Enabled */
|
|
#define IMAP_TID_UPA 0x7c000000UL /* UPA TargetID */
|
|
#define IMAP_TID_JBUS 0x7c000000UL /* JBUS TargetID */
|
|
#define IMAP_TID_SHIFT 26
|
|
#define IMAP_AID_SAFARI 0x7c000000UL /* Safari AgentID */
|
|
#define IMAP_AID_SHIFT 26
|
|
#define IMAP_NID_SAFARI 0x03e00000UL /* Safari NodeID */
|
|
#define IMAP_NID_SHIFT 21
|
|
#define IMAP_IGN 0x000007c0UL /* IRQ Group Number */
|
|
#define IMAP_INO 0x0000003fUL /* IRQ Number */
|
|
#define IMAP_INR 0x000007ffUL /* Full interrupt number*/
|
|
|
|
#define ICLR_IDLE 0x00000000UL /* Idle state */
|
|
#define ICLR_TRANSMIT 0x00000001UL /* Transmit state */
|
|
#define ICLR_PENDING 0x00000003UL /* Pending state */
|
|
|
|
/* The largest number of unique interrupt sources we support.
|
|
* If this needs to ever be larger than 255, you need to change
|
|
* the type of ino_bucket->irq as appropriate.
|
|
*
|
|
* ino_bucket->irq allocation is made during {sun4v_,}build_irq().
|
|
*/
|
|
#define NR_IRQS (2048)
|
|
|
|
void irq_install_pre_handler(int irq,
|
|
void (*func)(unsigned int, void *, void *),
|
|
void *arg1, void *arg2);
|
|
#define irq_canonicalize(irq) (irq)
|
|
unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap);
|
|
unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino);
|
|
unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino);
|
|
unsigned int sun4v_build_msi(u32 devhandle, unsigned int *irq_p,
|
|
unsigned int msi_devino_start,
|
|
unsigned int msi_devino_end);
|
|
void sun4v_destroy_msi(unsigned int irq);
|
|
unsigned int sun4u_build_msi(u32 portid, unsigned int *irq_p,
|
|
unsigned int msi_devino_start,
|
|
unsigned int msi_devino_end,
|
|
unsigned long imap_base,
|
|
unsigned long iclr_base);
|
|
void sun4u_destroy_msi(unsigned int irq);
|
|
|
|
unsigned int irq_alloc(unsigned int dev_handle, unsigned int dev_ino);
|
|
void irq_free(unsigned int irq);
|
|
|
|
void __init init_IRQ(void);
|
|
void fixup_irqs(void);
|
|
|
|
static inline void set_softint(unsigned long bits)
|
|
{
|
|
__asm__ __volatile__("wr %0, 0x0, %%set_softint"
|
|
: /* No outputs */
|
|
: "r" (bits));
|
|
}
|
|
|
|
static inline void clear_softint(unsigned long bits)
|
|
{
|
|
__asm__ __volatile__("wr %0, 0x0, %%clear_softint"
|
|
: /* No outputs */
|
|
: "r" (bits));
|
|
}
|
|
|
|
static inline unsigned long get_softint(void)
|
|
{
|
|
unsigned long retval;
|
|
|
|
__asm__ __volatile__("rd %%softint, %0"
|
|
: "=r" (retval));
|
|
return retval;
|
|
}
|
|
|
|
void arch_trigger_cpumask_backtrace(const struct cpumask *mask,
|
|
bool exclude_self);
|
|
#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
|
|
|
|
extern void *hardirq_stack[NR_CPUS];
|
|
extern void *softirq_stack[NR_CPUS];
|
|
#define __ARCH_HAS_DO_SOFTIRQ
|
|
|
|
#define NO_IRQ 0xffffffff
|
|
|
|
#endif
|