mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-25 11:30:54 +07:00
Merge branch 'x86-apic-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 apic updates from Ingo Molnar: "The main changes in this cycle were: - introduce optimized single IPI sending methods on modern APICs (Linus Torvalds, Thomas Gleixner) - kexec/crash APIC handling fixes and enhancements (Hidehiro Kawai) - extend lapic vector saving/restoring to the CMCI (MCE) vector as well (Juergen Gross) - various fixes and enhancements (Jake Oshins, Len Brown)" * 'x86-apic-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (23 commits) x86/irq: Export functions to allow MSI domains in modules Documentation: Document kernel.panic_on_io_nmi sysctl x86/nmi: Save regs in crash dump on external NMI x86/apic: Introduce apic_extnmi command line parameter kexec: Fix race between panic() and crash_kexec() panic, x86: Allow CPUs to save registers even if looping in NMI context panic, x86: Fix re-entrance problem due to panic on NMI x86/apic: Fix the saving and restoring of lapic vectors during suspend/resume x86/smpboot: Re-enable init_udelay=0 by default on modern CPUs x86/smp: Remove single IPI wrapper x86/apic: Use default send single IPI wrapper x86/apic: Provide default send single IPI wrapper x86/apic: Implement single IPI for apic_noop x86/apic: Wire up single IPI for apic_numachip x86/apic: Wire up single IPI for x2apic_uv x86/apic: Implement single IPI for x2apic_phys x86/apic: Wire up single IPI for bigsmp_apic x86/apic: Remove pointless indirections from bigsmp_apic x86/apic: Wire up single IPI for apic_physflat x86/apic: Remove pointless indirections from apic_physflat ...
This commit is contained in:
commit
4f19b8803b
@ -472,6 +472,15 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
||||
Change the amount of debugging information output
|
||||
when initialising the APIC and IO-APIC components.
|
||||
|
||||
apic_extnmi= [APIC,X86] External NMI delivery setting
|
||||
Format: { bsp (default) | all | none }
|
||||
bsp: External NMI is delivered only to CPU 0
|
||||
all: External NMIs are broadcast to all CPUs as a
|
||||
backup of CPU 0
|
||||
none: External NMI is masked for all CPUs. This is
|
||||
useful so that a dump capture kernel won't be
|
||||
shot down by NMI
|
||||
|
||||
autoconf= [IPV6]
|
||||
See Documentation/networking/ipv6.txt.
|
||||
|
||||
|
@ -551,6 +551,21 @@ the recommended setting is 60.
|
||||
|
||||
==============================================================
|
||||
|
||||
panic_on_io_nmi:
|
||||
|
||||
Controls the kernel's behavior when a CPU receives an NMI caused by
|
||||
an IO error.
|
||||
|
||||
0: try to continue operation (default)
|
||||
|
||||
1: panic immediately. The IO error triggered an NMI. This indicates a
|
||||
serious system condition which could result in IO data corruption.
|
||||
Rather than continuing, panicking might be a better choice. Some
|
||||
servers issue this sort of NMI when the dump button is pushed,
|
||||
and you can use this option to take a crash dump.
|
||||
|
||||
==============================================================
|
||||
|
||||
panic_on_oops:
|
||||
|
||||
Controls the kernel's behaviour when an oops or BUG is encountered.
|
||||
|
@ -23,6 +23,11 @@
|
||||
#define APIC_VERBOSE 1
|
||||
#define APIC_DEBUG 2
|
||||
|
||||
/* Macros for apic_extnmi which controls external NMI masking */
|
||||
#define APIC_EXTNMI_BSP 0 /* Default */
|
||||
#define APIC_EXTNMI_ALL 1
|
||||
#define APIC_EXTNMI_NONE 2
|
||||
|
||||
/*
|
||||
* Define the default level of output to be very little
|
||||
* This can be turned up by using apic=verbose for more
|
||||
@ -303,6 +308,7 @@ struct apic {
|
||||
unsigned int *apicid);
|
||||
|
||||
/* ipi */
|
||||
void (*send_IPI)(int cpu, int vector);
|
||||
void (*send_IPI_mask)(const struct cpumask *mask, int vector);
|
||||
void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
|
||||
int vector);
|
||||
|
@ -119,6 +119,8 @@ static inline void
|
||||
native_apic_mem_write(APIC_ICR, cfg);
|
||||
}
|
||||
|
||||
extern void default_send_IPI_single(int cpu, int vector);
|
||||
extern void default_send_IPI_single_phys(int cpu, int vector);
|
||||
extern void default_send_IPI_mask_sequence_phys(const struct cpumask *mask,
|
||||
int vector);
|
||||
extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
|
||||
|
@ -1,7 +1,13 @@
|
||||
#ifndef _ASM_X86_MSI_H
|
||||
#define _ASM_X86_MSI_H
|
||||
#include <asm/hw_irq.h>
|
||||
#include <asm/irqdomain.h>
|
||||
|
||||
typedef struct irq_alloc_info msi_alloc_info_t;
|
||||
|
||||
int pci_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec,
|
||||
msi_alloc_info_t *arg);
|
||||
|
||||
void pci_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc);
|
||||
|
||||
#endif /* _ASM_X86_MSI_H */
|
||||
|
@ -25,5 +25,6 @@ void __noreturn machine_real_restart(unsigned int type);
|
||||
|
||||
typedef void (*nmi_shootdown_cb)(int, struct pt_regs*);
|
||||
void nmi_shootdown_cpus(nmi_shootdown_cb callback);
|
||||
void run_crash_ipi_callback(struct pt_regs *regs);
|
||||
|
||||
#endif /* _ASM_X86_REBOOT_H */
|
||||
|
@ -81,6 +81,12 @@ physid_mask_t phys_cpu_present_map;
|
||||
*/
|
||||
static unsigned int disabled_cpu_apicid __read_mostly = BAD_APICID;
|
||||
|
||||
/*
|
||||
* This variable controls which CPUs receive external NMIs. By default,
|
||||
* external NMIs are delivered only to the BSP.
|
||||
*/
|
||||
static int apic_extnmi = APIC_EXTNMI_BSP;
|
||||
|
||||
/*
|
||||
* Map cpu index to physical APIC ID
|
||||
*/
|
||||
@ -1161,6 +1167,8 @@ void __init init_bsp_APIC(void)
|
||||
value = APIC_DM_NMI;
|
||||
if (!lapic_is_integrated()) /* 82489DX */
|
||||
value |= APIC_LVT_LEVEL_TRIGGER;
|
||||
if (apic_extnmi == APIC_EXTNMI_NONE)
|
||||
value |= APIC_LVT_MASKED;
|
||||
apic_write(APIC_LVT1, value);
|
||||
}
|
||||
|
||||
@ -1378,9 +1386,11 @@ void setup_local_APIC(void)
|
||||
apic_write(APIC_LVT0, value);
|
||||
|
||||
/*
|
||||
* only the BP should see the LINT1 NMI signal, obviously.
|
||||
* Only the BSP sees the LINT1 NMI signal by default. This can be
|
||||
* modified by apic_extnmi= boot option.
|
||||
*/
|
||||
if (!cpu)
|
||||
if ((!cpu && apic_extnmi != APIC_EXTNMI_NONE) ||
|
||||
apic_extnmi == APIC_EXTNMI_ALL)
|
||||
value = APIC_DM_NMI;
|
||||
else
|
||||
value = APIC_DM_NMI | APIC_LVT_MASKED;
|
||||
@ -2270,6 +2280,7 @@ static struct {
|
||||
unsigned int apic_tmict;
|
||||
unsigned int apic_tdcr;
|
||||
unsigned int apic_thmr;
|
||||
unsigned int apic_cmci;
|
||||
} apic_pm_state;
|
||||
|
||||
static int lapic_suspend(void)
|
||||
@ -2299,6 +2310,10 @@ static int lapic_suspend(void)
|
||||
if (maxlvt >= 5)
|
||||
apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
|
||||
#endif
|
||||
#ifdef CONFIG_X86_MCE_INTEL
|
||||
if (maxlvt >= 6)
|
||||
apic_pm_state.apic_cmci = apic_read(APIC_LVTCMCI);
|
||||
#endif
|
||||
|
||||
local_irq_save(flags);
|
||||
disable_local_APIC();
|
||||
@ -2355,9 +2370,13 @@ static void lapic_resume(void)
|
||||
apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
|
||||
apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
|
||||
apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
|
||||
#if defined(CONFIG_X86_MCE_INTEL)
|
||||
#ifdef CONFIG_X86_THERMAL_VECTOR
|
||||
if (maxlvt >= 5)
|
||||
apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
|
||||
#endif
|
||||
#ifdef CONFIG_X86_MCE_INTEL
|
||||
if (maxlvt >= 6)
|
||||
apic_write(APIC_LVTCMCI, apic_pm_state.apic_cmci);
|
||||
#endif
|
||||
if (maxlvt >= 4)
|
||||
apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
|
||||
@ -2548,3 +2567,23 @@ static int __init apic_set_disabled_cpu_apicid(char *arg)
|
||||
return 0;
|
||||
}
|
||||
early_param("disable_cpu_apicid", apic_set_disabled_cpu_apicid);
|
||||
|
||||
static int __init apic_set_extnmi(char *arg)
|
||||
{
|
||||
if (!arg)
|
||||
return -EINVAL;
|
||||
|
||||
if (!strncmp("all", arg, 3))
|
||||
apic_extnmi = APIC_EXTNMI_ALL;
|
||||
else if (!strncmp("none", arg, 4))
|
||||
apic_extnmi = APIC_EXTNMI_NONE;
|
||||
else if (!strncmp("bsp", arg, 3))
|
||||
apic_extnmi = APIC_EXTNMI_BSP;
|
||||
else {
|
||||
pr_warn("Unknown external NMI delivery mode `%s' ignored\n", arg);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_param("apic_extnmi", apic_set_extnmi);
|
||||
|
@ -185,6 +185,7 @@ static struct apic apic_flat = {
|
||||
|
||||
.cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and,
|
||||
|
||||
.send_IPI = default_send_IPI_single,
|
||||
.send_IPI_mask = flat_send_IPI_mask,
|
||||
.send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself,
|
||||
.send_IPI_allbutself = flat_send_IPI_allbutself,
|
||||
@ -230,17 +231,6 @@ static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector)
|
||||
{
|
||||
default_send_IPI_mask_sequence_phys(cpumask, vector);
|
||||
}
|
||||
|
||||
static void physflat_send_IPI_mask_allbutself(const struct cpumask *cpumask,
|
||||
int vector)
|
||||
{
|
||||
default_send_IPI_mask_allbutself_phys(cpumask, vector);
|
||||
}
|
||||
|
||||
static void physflat_send_IPI_allbutself(int vector)
|
||||
{
|
||||
default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
|
||||
@ -248,7 +238,7 @@ static void physflat_send_IPI_allbutself(int vector)
|
||||
|
||||
static void physflat_send_IPI_all(int vector)
|
||||
{
|
||||
physflat_send_IPI_mask(cpu_online_mask, vector);
|
||||
default_send_IPI_mask_sequence_phys(cpu_online_mask, vector);
|
||||
}
|
||||
|
||||
static int physflat_probe(void)
|
||||
@ -292,8 +282,9 @@ static struct apic apic_physflat = {
|
||||
|
||||
.cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
|
||||
|
||||
.send_IPI_mask = physflat_send_IPI_mask,
|
||||
.send_IPI_mask_allbutself = physflat_send_IPI_mask_allbutself,
|
||||
.send_IPI = default_send_IPI_single_phys,
|
||||
.send_IPI_mask = default_send_IPI_mask_sequence_phys,
|
||||
.send_IPI_mask_allbutself = default_send_IPI_mask_allbutself_phys,
|
||||
.send_IPI_allbutself = physflat_send_IPI_allbutself,
|
||||
.send_IPI_all = physflat_send_IPI_all,
|
||||
.send_IPI_self = apic_send_IPI_self,
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include <asm/e820.h>
|
||||
|
||||
static void noop_init_apic_ldr(void) { }
|
||||
static void noop_send_IPI(int cpu, int vector) { }
|
||||
static void noop_send_IPI_mask(const struct cpumask *cpumask, int vector) { }
|
||||
static void noop_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector) { }
|
||||
static void noop_send_IPI_allbutself(int vector) { }
|
||||
@ -144,6 +145,7 @@ struct apic apic_noop = {
|
||||
|
||||
.cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and,
|
||||
|
||||
.send_IPI = noop_send_IPI,
|
||||
.send_IPI_mask = noop_send_IPI_mask,
|
||||
.send_IPI_mask_allbutself = noop_send_IPI_mask_allbutself,
|
||||
.send_IPI_allbutself = noop_send_IPI_allbutself,
|
||||
|
@ -273,6 +273,7 @@ static const struct apic apic_numachip1 __refconst = {
|
||||
|
||||
.cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
|
||||
|
||||
.send_IPI = numachip_send_IPI_one,
|
||||
.send_IPI_mask = numachip_send_IPI_mask,
|
||||
.send_IPI_mask_allbutself = numachip_send_IPI_mask_allbutself,
|
||||
.send_IPI_allbutself = numachip_send_IPI_allbutself,
|
||||
@ -324,6 +325,7 @@ static const struct apic apic_numachip2 __refconst = {
|
||||
|
||||
.cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
|
||||
|
||||
.send_IPI = numachip_send_IPI_one,
|
||||
.send_IPI_mask = numachip_send_IPI_mask,
|
||||
.send_IPI_mask_allbutself = numachip_send_IPI_mask_allbutself,
|
||||
.send_IPI_allbutself = numachip_send_IPI_allbutself,
|
||||
|
@ -96,11 +96,6 @@ static int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb)
|
||||
return cpuid_apic >> index_msb;
|
||||
}
|
||||
|
||||
static inline void bigsmp_send_IPI_mask(const struct cpumask *mask, int vector)
|
||||
{
|
||||
default_send_IPI_mask_sequence_phys(mask, vector);
|
||||
}
|
||||
|
||||
static void bigsmp_send_IPI_allbutself(int vector)
|
||||
{
|
||||
default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
|
||||
@ -108,7 +103,7 @@ static void bigsmp_send_IPI_allbutself(int vector)
|
||||
|
||||
static void bigsmp_send_IPI_all(int vector)
|
||||
{
|
||||
bigsmp_send_IPI_mask(cpu_online_mask, vector);
|
||||
default_send_IPI_mask_sequence_phys(cpu_online_mask, vector);
|
||||
}
|
||||
|
||||
static int dmi_bigsmp; /* can be set by dmi scanners */
|
||||
@ -180,7 +175,8 @@ static struct apic apic_bigsmp = {
|
||||
|
||||
.cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
|
||||
|
||||
.send_IPI_mask = bigsmp_send_IPI_mask,
|
||||
.send_IPI = default_send_IPI_single_phys,
|
||||
.send_IPI_mask = default_send_IPI_mask_sequence_phys,
|
||||
.send_IPI_mask_allbutself = NULL,
|
||||
.send_IPI_allbutself = bigsmp_send_IPI_allbutself,
|
||||
.send_IPI_all = bigsmp_send_IPI_all,
|
||||
|
@ -18,6 +18,16 @@
|
||||
#include <asm/proto.h>
|
||||
#include <asm/ipi.h>
|
||||
|
||||
void default_send_IPI_single_phys(int cpu, int vector)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, cpu),
|
||||
vector, APIC_DEST_PHYSICAL);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector)
|
||||
{
|
||||
unsigned long query_cpu;
|
||||
@ -55,6 +65,14 @@ void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper function for APICs which insist on cpumasks
|
||||
*/
|
||||
void default_send_IPI_single(int cpu, int vector)
|
||||
{
|
||||
apic->send_IPI_mask(cpumask_of(cpu), vector);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
|
||||
void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
|
||||
|
@ -96,8 +96,8 @@ static irq_hw_number_t pci_msi_get_hwirq(struct msi_domain_info *info,
|
||||
return arg->msi_hwirq;
|
||||
}
|
||||
|
||||
static int pci_msi_prepare(struct irq_domain *domain, struct device *dev,
|
||||
int nvec, msi_alloc_info_t *arg)
|
||||
int pci_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec,
|
||||
msi_alloc_info_t *arg)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct msi_desc *desc = first_pci_msi_entry(pdev);
|
||||
@ -113,11 +113,13 @@ static int pci_msi_prepare(struct irq_domain *domain, struct device *dev,
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_msi_prepare);
|
||||
|
||||
static void pci_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
|
||||
void pci_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
|
||||
{
|
||||
arg->msi_hwirq = pci_msi_domain_calc_hwirq(arg->msi_dev, desc);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_msi_set_desc);
|
||||
|
||||
static struct msi_domain_ops pci_msi_domain_ops = {
|
||||
.get_hwirq = pci_msi_get_hwirq,
|
||||
|
@ -105,6 +105,7 @@ static struct apic apic_default = {
|
||||
|
||||
.cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and,
|
||||
|
||||
.send_IPI = default_send_IPI_single,
|
||||
.send_IPI_mask = default_send_IPI_mask_logical,
|
||||
.send_IPI_mask_allbutself = default_send_IPI_mask_allbutself_logical,
|
||||
.send_IPI_allbutself = default_send_IPI_allbutself,
|
||||
|
@ -29,6 +29,7 @@ struct apic_chip_data {
|
||||
};
|
||||
|
||||
struct irq_domain *x86_vector_domain;
|
||||
EXPORT_SYMBOL_GPL(x86_vector_domain);
|
||||
static DEFINE_RAW_SPINLOCK(vector_lock);
|
||||
static cpumask_var_t vector_cpumask;
|
||||
static struct irq_chip lapic_controller;
|
||||
@ -66,6 +67,7 @@ struct irq_cfg *irqd_cfg(struct irq_data *irq_data)
|
||||
|
||||
return data ? &data->cfg : NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irqd_cfg);
|
||||
|
||||
struct irq_cfg *irq_cfg(unsigned int irq)
|
||||
{
|
||||
|
@ -23,6 +23,14 @@ static inline u32 x2apic_cluster(int cpu)
|
||||
return per_cpu(x86_cpu_to_logical_apicid, cpu) >> 16;
|
||||
}
|
||||
|
||||
static void x2apic_send_IPI(int cpu, int vector)
|
||||
{
|
||||
u32 dest = per_cpu(x86_cpu_to_logical_apicid, cpu);
|
||||
|
||||
x2apic_wrmsr_fence();
|
||||
__x2apic_send_IPI_dest(dest, vector, APIC_DEST_LOGICAL);
|
||||
}
|
||||
|
||||
static void
|
||||
__x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
|
||||
{
|
||||
@ -266,6 +274,7 @@ static struct apic apic_x2apic_cluster = {
|
||||
|
||||
.cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
|
||||
|
||||
.send_IPI = x2apic_send_IPI,
|
||||
.send_IPI_mask = x2apic_send_IPI_mask,
|
||||
.send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
|
||||
.send_IPI_allbutself = x2apic_send_IPI_allbutself,
|
||||
|
@ -36,6 +36,14 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
|
||||
return x2apic_enabled() && (x2apic_phys || x2apic_fadt_phys());
|
||||
}
|
||||
|
||||
static void x2apic_send_IPI(int cpu, int vector)
|
||||
{
|
||||
u32 dest = per_cpu(x86_cpu_to_apicid, cpu);
|
||||
|
||||
x2apic_wrmsr_fence();
|
||||
__x2apic_send_IPI_dest(dest, vector, APIC_DEST_PHYSICAL);
|
||||
}
|
||||
|
||||
static void
|
||||
__x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
|
||||
{
|
||||
@ -122,6 +130,7 @@ static struct apic apic_x2apic_phys = {
|
||||
|
||||
.cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
|
||||
|
||||
.send_IPI = x2apic_send_IPI,
|
||||
.send_IPI_mask = x2apic_send_IPI_mask,
|
||||
.send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
|
||||
.send_IPI_allbutself = x2apic_send_IPI_allbutself,
|
||||
|
@ -406,6 +406,7 @@ static struct apic __refdata apic_x2apic_uv_x = {
|
||||
|
||||
.cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and,
|
||||
|
||||
.send_IPI = uv_send_IPI_one,
|
||||
.send_IPI_mask = uv_send_IPI_mask,
|
||||
.send_IPI_mask_allbutself = uv_send_IPI_mask_allbutself,
|
||||
.send_IPI_allbutself = uv_send_IPI_allbutself,
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include <asm/mach_traps.h>
|
||||
#include <asm/nmi.h>
|
||||
#include <asm/x86_init.h>
|
||||
#include <asm/reboot.h>
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/nmi.h>
|
||||
@ -231,7 +232,7 @@ pci_serr_error(unsigned char reason, struct pt_regs *regs)
|
||||
#endif
|
||||
|
||||
if (panic_on_unrecovered_nmi)
|
||||
panic("NMI: Not continuing");
|
||||
nmi_panic(regs, "NMI: Not continuing");
|
||||
|
||||
pr_emerg("Dazed and confused, but trying to continue\n");
|
||||
|
||||
@ -255,8 +256,16 @@ io_check_error(unsigned char reason, struct pt_regs *regs)
|
||||
reason, smp_processor_id());
|
||||
show_regs(regs);
|
||||
|
||||
if (panic_on_io_nmi)
|
||||
panic("NMI IOCK error: Not continuing");
|
||||
if (panic_on_io_nmi) {
|
||||
nmi_panic(regs, "NMI IOCK error: Not continuing");
|
||||
|
||||
/*
|
||||
* If we end up here, it means we have received an NMI while
|
||||
* processing panic(). Simply return without delaying and
|
||||
* re-enabling NMIs.
|
||||
*/
|
||||
return;
|
||||
}
|
||||
|
||||
/* Re-enable the IOCK line, wait for a few seconds */
|
||||
reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK;
|
||||
@ -297,7 +306,7 @@ unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
|
||||
|
||||
pr_emerg("Do you have a strange power saving mode enabled?\n");
|
||||
if (unknown_nmi_panic || panic_on_unrecovered_nmi)
|
||||
panic("NMI: Not continuing");
|
||||
nmi_panic(regs, "NMI: Not continuing");
|
||||
|
||||
pr_emerg("Dazed and confused, but trying to continue\n");
|
||||
}
|
||||
@ -348,8 +357,19 @@ static void default_do_nmi(struct pt_regs *regs)
|
||||
return;
|
||||
}
|
||||
|
||||
/* Non-CPU-specific NMI: NMI sources can be processed on any CPU */
|
||||
raw_spin_lock(&nmi_reason_lock);
|
||||
/*
|
||||
* Non-CPU-specific NMI: NMI sources can be processed on any CPU.
|
||||
*
|
||||
* Another CPU may be processing panic routines while holding
|
||||
* nmi_reason_lock. Check if the CPU issued the IPI for crash dumping,
|
||||
* and if so, call its callback directly. If there is no CPU preparing
|
||||
* crash dump, we simply loop here.
|
||||
*/
|
||||
while (!raw_spin_trylock(&nmi_reason_lock)) {
|
||||
run_crash_ipi_callback(regs);
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
reason = x86_platform.get_nmi_reason();
|
||||
|
||||
if (reason & NMI_REASON_MASK) {
|
||||
|
@ -718,6 +718,7 @@ static int crashing_cpu;
|
||||
static nmi_shootdown_cb shootdown_callback;
|
||||
|
||||
static atomic_t waiting_for_crash_ipi;
|
||||
static int crash_ipi_issued;
|
||||
|
||||
static int crash_nmi_callback(unsigned int val, struct pt_regs *regs)
|
||||
{
|
||||
@ -780,6 +781,9 @@ void nmi_shootdown_cpus(nmi_shootdown_cb callback)
|
||||
|
||||
smp_send_nmi_allbutself();
|
||||
|
||||
/* Kick CPUs looping in NMI context. */
|
||||
WRITE_ONCE(crash_ipi_issued, 1);
|
||||
|
||||
msecs = 1000; /* Wait at most a second for the other cpus to stop */
|
||||
while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
|
||||
mdelay(1);
|
||||
@ -788,9 +792,35 @@ void nmi_shootdown_cpus(nmi_shootdown_cb callback)
|
||||
|
||||
/* Leave the nmi callback set */
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if the crash dumping IPI got issued and if so, call its callback
|
||||
* directly. This function is used when we have already been in NMI handler.
|
||||
* It doesn't return.
|
||||
*/
|
||||
void run_crash_ipi_callback(struct pt_regs *regs)
|
||||
{
|
||||
if (crash_ipi_issued)
|
||||
crash_nmi_callback(0, regs);
|
||||
}
|
||||
|
||||
/* Override the weak function in kernel/panic.c */
|
||||
void nmi_panic_self_stop(struct pt_regs *regs)
|
||||
{
|
||||
while (1) {
|
||||
/* If no CPU is preparing crash dump, we simply loop here. */
|
||||
run_crash_ipi_callback(regs);
|
||||
cpu_relax();
|
||||
}
|
||||
}
|
||||
|
||||
#else /* !CONFIG_SMP */
|
||||
void nmi_shootdown_cpus(nmi_shootdown_cb callback)
|
||||
{
|
||||
/* No other CPUs to shoot down */
|
||||
}
|
||||
|
||||
void run_crash_ipi_callback(struct pt_regs *regs)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
@ -125,12 +125,12 @@ static void native_smp_send_reschedule(int cpu)
|
||||
WARN_ON(1);
|
||||
return;
|
||||
}
|
||||
apic->send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR);
|
||||
apic->send_IPI(cpu, RESCHEDULE_VECTOR);
|
||||
}
|
||||
|
||||
void native_send_call_func_single_ipi(int cpu)
|
||||
{
|
||||
apic->send_IPI_mask(cpumask_of(cpu), CALL_FUNCTION_SINGLE_VECTOR);
|
||||
apic->send_IPI(cpu, CALL_FUNCTION_SINGLE_VECTOR);
|
||||
}
|
||||
|
||||
void native_send_call_func_ipi(const struct cpumask *mask)
|
||||
|
@ -255,6 +255,7 @@ extern long (*panic_blink)(int state);
|
||||
__printf(1, 2)
|
||||
void panic(const char *fmt, ...)
|
||||
__noreturn __cold;
|
||||
void nmi_panic_self_stop(struct pt_regs *);
|
||||
extern void oops_enter(void);
|
||||
extern void oops_exit(void);
|
||||
void print_oops_end_marker(void);
|
||||
@ -445,6 +446,33 @@ extern int sysctl_panic_on_stackoverflow;
|
||||
|
||||
extern bool crash_kexec_post_notifiers;
|
||||
|
||||
/*
|
||||
* panic_cpu is used for synchronizing panic() and crash_kexec() execution. It
|
||||
* holds a CPU number which is executing panic() currently. A value of
|
||||
* PANIC_CPU_INVALID means no CPU has entered panic() or crash_kexec().
|
||||
*/
|
||||
extern atomic_t panic_cpu;
|
||||
#define PANIC_CPU_INVALID -1
|
||||
|
||||
/*
|
||||
* A variant of panic() called from NMI context. We return if we've already
|
||||
* panicked on this CPU. If another CPU already panicked, loop in
|
||||
* nmi_panic_self_stop() which can provide architecture dependent code such
|
||||
* as saving register state for crash dump.
|
||||
*/
|
||||
#define nmi_panic(regs, fmt, ...) \
|
||||
do { \
|
||||
int old_cpu, cpu; \
|
||||
\
|
||||
cpu = raw_smp_processor_id(); \
|
||||
old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, cpu); \
|
||||
\
|
||||
if (old_cpu == PANIC_CPU_INVALID) \
|
||||
panic(fmt, ##__VA_ARGS__); \
|
||||
else if (old_cpu != cpu) \
|
||||
nmi_panic_self_stop(regs); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* Only to be used by arch init code. If the user over-wrote the default
|
||||
* CONFIG_PANIC_TIMEOUT, honor it.
|
||||
|
@ -237,6 +237,7 @@ extern int kexec_purgatory_get_set_symbol(struct kimage *image,
|
||||
unsigned int size, bool get_value);
|
||||
extern void *kexec_purgatory_get_symbol_addr(struct kimage *image,
|
||||
const char *name);
|
||||
extern void __crash_kexec(struct pt_regs *);
|
||||
extern void crash_kexec(struct pt_regs *);
|
||||
int kexec_should_crash(struct task_struct *);
|
||||
void crash_save_cpu(struct pt_regs *regs, int cpu);
|
||||
@ -332,6 +333,7 @@ int __weak arch_kexec_apply_relocations(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
|
||||
#else /* !CONFIG_KEXEC_CORE */
|
||||
struct pt_regs;
|
||||
struct task_struct;
|
||||
static inline void __crash_kexec(struct pt_regs *regs) { }
|
||||
static inline void crash_kexec(struct pt_regs *regs) { }
|
||||
static inline int kexec_should_crash(struct task_struct *p) { return 0; }
|
||||
#define kexec_in_progress false
|
||||
|
@ -853,7 +853,12 @@ struct kimage *kexec_image;
|
||||
struct kimage *kexec_crash_image;
|
||||
int kexec_load_disabled;
|
||||
|
||||
void crash_kexec(struct pt_regs *regs)
|
||||
/*
|
||||
* No panic_cpu check version of crash_kexec(). This function is called
|
||||
* only when panic_cpu holds the current CPU number; this is the only CPU
|
||||
* which processes crash_kexec routines.
|
||||
*/
|
||||
void __crash_kexec(struct pt_regs *regs)
|
||||
{
|
||||
/* Take the kexec_mutex here to prevent sys_kexec_load
|
||||
* running on one cpu from replacing the crash kernel
|
||||
@ -876,6 +881,29 @@ void crash_kexec(struct pt_regs *regs)
|
||||
}
|
||||
}
|
||||
|
||||
void crash_kexec(struct pt_regs *regs)
|
||||
{
|
||||
int old_cpu, this_cpu;
|
||||
|
||||
/*
|
||||
* Only one CPU is allowed to execute the crash_kexec() code as with
|
||||
* panic(). Otherwise parallel calls of panic() and crash_kexec()
|
||||
* may stop each other. To exclude them, we use panic_cpu here too.
|
||||
*/
|
||||
this_cpu = raw_smp_processor_id();
|
||||
old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu);
|
||||
if (old_cpu == PANIC_CPU_INVALID) {
|
||||
/* This is the 1st CPU which comes here, so go ahead. */
|
||||
__crash_kexec(regs);
|
||||
|
||||
/*
|
||||
* Reset panic_cpu to allow another panic()/crash_kexec()
|
||||
* call.
|
||||
*/
|
||||
atomic_set(&panic_cpu, PANIC_CPU_INVALID);
|
||||
}
|
||||
}
|
||||
|
||||
size_t crash_get_memory_size(void)
|
||||
{
|
||||
size_t size = 0;
|
||||
|
@ -61,6 +61,17 @@ void __weak panic_smp_self_stop(void)
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
/*
|
||||
* Stop ourselves in NMI context if another CPU has already panicked. Arch code
|
||||
* may override this to prepare for crash dumping, e.g. save regs info.
|
||||
*/
|
||||
void __weak nmi_panic_self_stop(struct pt_regs *regs)
|
||||
{
|
||||
panic_smp_self_stop();
|
||||
}
|
||||
|
||||
atomic_t panic_cpu = ATOMIC_INIT(PANIC_CPU_INVALID);
|
||||
|
||||
/**
|
||||
* panic - halt the system
|
||||
* @fmt: The text string to print
|
||||
@ -71,17 +82,17 @@ void __weak panic_smp_self_stop(void)
|
||||
*/
|
||||
void panic(const char *fmt, ...)
|
||||
{
|
||||
static DEFINE_SPINLOCK(panic_lock);
|
||||
static char buf[1024];
|
||||
va_list args;
|
||||
long i, i_next = 0;
|
||||
int state = 0;
|
||||
int old_cpu, this_cpu;
|
||||
|
||||
/*
|
||||
* Disable local interrupts. This will prevent panic_smp_self_stop
|
||||
* from deadlocking the first cpu that invokes the panic, since
|
||||
* there is nothing to prevent an interrupt handler (that runs
|
||||
* after the panic_lock is acquired) from invoking panic again.
|
||||
* after setting panic_cpu) from invoking panic() again.
|
||||
*/
|
||||
local_irq_disable();
|
||||
|
||||
@ -94,8 +105,16 @@ void panic(const char *fmt, ...)
|
||||
* multiple parallel invocations of panic, all other CPUs either
|
||||
* stop themself or will wait until they are stopped by the 1st CPU
|
||||
* with smp_send_stop().
|
||||
*
|
||||
* `old_cpu == PANIC_CPU_INVALID' means this is the 1st CPU which
|
||||
* comes here, so go ahead.
|
||||
* `old_cpu == this_cpu' means we came from nmi_panic() which sets
|
||||
* panic_cpu to this CPU. In this case, this is also the 1st CPU.
|
||||
*/
|
||||
if (!spin_trylock(&panic_lock))
|
||||
this_cpu = raw_smp_processor_id();
|
||||
old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu);
|
||||
|
||||
if (old_cpu != PANIC_CPU_INVALID && old_cpu != this_cpu)
|
||||
panic_smp_self_stop();
|
||||
|
||||
console_verbose();
|
||||
@ -117,9 +136,11 @@ void panic(const char *fmt, ...)
|
||||
* everything else.
|
||||
* If we want to run this after calling panic_notifiers, pass
|
||||
* the "crash_kexec_post_notifiers" option to the kernel.
|
||||
*
|
||||
* Bypass the panic_cpu check and call __crash_kexec directly.
|
||||
*/
|
||||
if (!crash_kexec_post_notifiers)
|
||||
crash_kexec(NULL);
|
||||
__crash_kexec(NULL);
|
||||
|
||||
/*
|
||||
* Note smp_send_stop is the usual smp shutdown function, which
|
||||
@ -142,9 +163,11 @@ void panic(const char *fmt, ...)
|
||||
* panic_notifiers and dumping kmsg before kdump.
|
||||
* Note: since some panic_notifiers can make crashed kernel
|
||||
* more unstable, it can increase risks of the kdump failure too.
|
||||
*
|
||||
* Bypass the panic_cpu check and call __crash_kexec directly.
|
||||
*/
|
||||
if (crash_kexec_post_notifiers)
|
||||
crash_kexec(NULL);
|
||||
__crash_kexec(NULL);
|
||||
|
||||
bust_spinlocks(0);
|
||||
|
||||
|
@ -351,7 +351,7 @@ static void watchdog_overflow_callback(struct perf_event *event,
|
||||
trigger_allbutself_cpu_backtrace();
|
||||
|
||||
if (hardlockup_panic)
|
||||
panic("Hard LOCKUP");
|
||||
nmi_panic(regs, "Hard LOCKUP");
|
||||
|
||||
__this_cpu_write(hard_watchdog_warn, true);
|
||||
return;
|
||||
|
Loading…
Reference in New Issue
Block a user