mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 10:20:49 +07:00
044d0d6de9
Problem:
raw_local_irq_save(); // software state on
local_irq_save(); // software state off
...
local_irq_restore(); // software state still off, because we don't enable IRQs
raw_local_irq_restore(); // software state still off, *whoopsie*
existing instances:
- lock_acquire()
raw_local_irq_save()
__lock_acquire()
arch_spin_lock(&graph_lock)
pv_wait() := kvm_wait() (same or worse for Xen/HyperV)
local_irq_save()
- trace_clock_global()
raw_local_irq_save()
arch_spin_lock()
pv_wait() := kvm_wait()
local_irq_save()
- apic_retrigger_irq()
raw_local_irq_save()
apic->send_IPI() := default_send_IPI_single_phys()
local_irq_save()
Possible solutions:
A) make it work by enabling the tracing inside raw_*()
B) make it work by keeping tracing disabled inside raw_*()
C) call it broken and clean it up now
Now, given that the only reason to use the raw_* variant is because you don't
want tracing. Therefore A) seems like a weird option (although it can be done).
C) is tempting, but OTOH it ends up converting a _lot_ of code to raw just
because there is one raw user, this strips the validation/tracing off for all
the other users.
So we pick B) and declare any code that ends up doing:
raw_local_irq_save()
local_irq_save()
lockdep_assert_irqs_disabled();
broken. AFAICT this problem has existed forever, the only reason it came
up is because commit: 859d069ee1
("lockdep: Prepare for NMI IRQ
state tracking") changed IRQ tracing vs lockdep recursion and the
first instance is fairly common, the other cases hardly ever happen.
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
[rewrote changelog]
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Marco Elver <elver@google.com>
Link: https://lkml.kernel.org/r/20200723105615.1268126-1-npiggin@gmail.com
252 lines
7.4 KiB
C
252 lines
7.4 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* include/linux/irqflags.h
|
|
*
|
|
* IRQ flags tracing: follow the state of the hardirq and softirq flags and
|
|
* provide callbacks for transitions between ON and OFF states.
|
|
*
|
|
* This file gets included from lowlevel asm headers too, to provide
|
|
* wrapped versions of the local_irq_*() APIs, based on the
|
|
* raw_local_irq_*() macros from the lowlevel headers.
|
|
*/
|
|
#ifndef _LINUX_TRACE_IRQFLAGS_H
|
|
#define _LINUX_TRACE_IRQFLAGS_H
|
|
|
|
#include <linux/typecheck.h>
|
|
#include <asm/irqflags.h>
|
|
#include <asm/percpu.h>
|
|
|
|
/* Currently lockdep_softirqs_on/off is used only by lockdep */
|
|
#ifdef CONFIG_PROVE_LOCKING
|
|
extern void lockdep_softirqs_on(unsigned long ip);
|
|
extern void lockdep_softirqs_off(unsigned long ip);
|
|
extern void lockdep_hardirqs_on_prepare(unsigned long ip);
|
|
extern void lockdep_hardirqs_on(unsigned long ip);
|
|
extern void lockdep_hardirqs_off(unsigned long ip);
|
|
#else
|
|
static inline void lockdep_softirqs_on(unsigned long ip) { }
|
|
static inline void lockdep_softirqs_off(unsigned long ip) { }
|
|
static inline void lockdep_hardirqs_on_prepare(unsigned long ip) { }
|
|
static inline void lockdep_hardirqs_on(unsigned long ip) { }
|
|
static inline void lockdep_hardirqs_off(unsigned long ip) { }
|
|
#endif
|
|
|
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
|
|
|
/* Per-task IRQ trace events information. */
|
|
struct irqtrace_events {
|
|
unsigned int irq_events;
|
|
unsigned long hardirq_enable_ip;
|
|
unsigned long hardirq_disable_ip;
|
|
unsigned int hardirq_enable_event;
|
|
unsigned int hardirq_disable_event;
|
|
unsigned long softirq_disable_ip;
|
|
unsigned long softirq_enable_ip;
|
|
unsigned int softirq_disable_event;
|
|
unsigned int softirq_enable_event;
|
|
};
|
|
|
|
DECLARE_PER_CPU(int, hardirqs_enabled);
|
|
DECLARE_PER_CPU(int, hardirq_context);
|
|
|
|
extern void trace_hardirqs_on_prepare(void);
|
|
extern void trace_hardirqs_off_finish(void);
|
|
extern void trace_hardirqs_on(void);
|
|
extern void trace_hardirqs_off(void);
|
|
|
|
# define lockdep_hardirq_context() (raw_cpu_read(hardirq_context))
|
|
# define lockdep_softirq_context(p) ((p)->softirq_context)
|
|
# define lockdep_hardirqs_enabled() (this_cpu_read(hardirqs_enabled))
|
|
# define lockdep_softirqs_enabled(p) ((p)->softirqs_enabled)
|
|
# define lockdep_hardirq_enter() \
|
|
do { \
|
|
if (__this_cpu_inc_return(hardirq_context) == 1)\
|
|
current->hardirq_threaded = 0; \
|
|
} while (0)
|
|
# define lockdep_hardirq_threaded() \
|
|
do { \
|
|
current->hardirq_threaded = 1; \
|
|
} while (0)
|
|
# define lockdep_hardirq_exit() \
|
|
do { \
|
|
__this_cpu_dec(hardirq_context); \
|
|
} while (0)
|
|
# define lockdep_softirq_enter() \
|
|
do { \
|
|
current->softirq_context++; \
|
|
} while (0)
|
|
# define lockdep_softirq_exit() \
|
|
do { \
|
|
current->softirq_context--; \
|
|
} while (0)
|
|
|
|
# define lockdep_hrtimer_enter(__hrtimer) \
|
|
({ \
|
|
bool __expires_hardirq = true; \
|
|
\
|
|
if (!__hrtimer->is_hard) { \
|
|
current->irq_config = 1; \
|
|
__expires_hardirq = false; \
|
|
} \
|
|
__expires_hardirq; \
|
|
})
|
|
|
|
# define lockdep_hrtimer_exit(__expires_hardirq) \
|
|
do { \
|
|
if (!__expires_hardirq) \
|
|
current->irq_config = 0; \
|
|
} while (0)
|
|
|
|
# define lockdep_posixtimer_enter() \
|
|
do { \
|
|
current->irq_config = 1; \
|
|
} while (0)
|
|
|
|
# define lockdep_posixtimer_exit() \
|
|
do { \
|
|
current->irq_config = 0; \
|
|
} while (0)
|
|
|
|
# define lockdep_irq_work_enter(__work) \
|
|
do { \
|
|
if (!(atomic_read(&__work->flags) & IRQ_WORK_HARD_IRQ))\
|
|
current->irq_config = 1; \
|
|
} while (0)
|
|
# define lockdep_irq_work_exit(__work) \
|
|
do { \
|
|
if (!(atomic_read(&__work->flags) & IRQ_WORK_HARD_IRQ))\
|
|
current->irq_config = 0; \
|
|
} while (0)
|
|
|
|
#else
|
|
# define trace_hardirqs_on_prepare() do { } while (0)
|
|
# define trace_hardirqs_off_finish() do { } while (0)
|
|
# define trace_hardirqs_on() do { } while (0)
|
|
# define trace_hardirqs_off() do { } while (0)
|
|
# define lockdep_hardirq_context() 0
|
|
# define lockdep_softirq_context(p) 0
|
|
# define lockdep_hardirqs_enabled() 0
|
|
# define lockdep_softirqs_enabled(p) 0
|
|
# define lockdep_hardirq_enter() do { } while (0)
|
|
# define lockdep_hardirq_threaded() do { } while (0)
|
|
# define lockdep_hardirq_exit() do { } while (0)
|
|
# define lockdep_softirq_enter() do { } while (0)
|
|
# define lockdep_softirq_exit() do { } while (0)
|
|
# define lockdep_hrtimer_enter(__hrtimer) false
|
|
# define lockdep_hrtimer_exit(__context) do { } while (0)
|
|
# define lockdep_posixtimer_enter() do { } while (0)
|
|
# define lockdep_posixtimer_exit() do { } while (0)
|
|
# define lockdep_irq_work_enter(__work) do { } while (0)
|
|
# define lockdep_irq_work_exit(__work) do { } while (0)
|
|
#endif
|
|
|
|
#if defined(CONFIG_IRQSOFF_TRACER) || \
|
|
defined(CONFIG_PREEMPT_TRACER)
|
|
extern void stop_critical_timings(void);
|
|
extern void start_critical_timings(void);
|
|
#else
|
|
# define stop_critical_timings() do { } while (0)
|
|
# define start_critical_timings() do { } while (0)
|
|
#endif
|
|
|
|
/*
|
|
* Wrap the arch provided IRQ routines to provide appropriate checks.
|
|
*/
|
|
#define raw_local_irq_disable() arch_local_irq_disable()
|
|
#define raw_local_irq_enable() arch_local_irq_enable()
|
|
#define raw_local_irq_save(flags) \
|
|
do { \
|
|
typecheck(unsigned long, flags); \
|
|
flags = arch_local_irq_save(); \
|
|
} while (0)
|
|
#define raw_local_irq_restore(flags) \
|
|
do { \
|
|
typecheck(unsigned long, flags); \
|
|
arch_local_irq_restore(flags); \
|
|
} while (0)
|
|
#define raw_local_save_flags(flags) \
|
|
do { \
|
|
typecheck(unsigned long, flags); \
|
|
flags = arch_local_save_flags(); \
|
|
} while (0)
|
|
#define raw_irqs_disabled_flags(flags) \
|
|
({ \
|
|
typecheck(unsigned long, flags); \
|
|
arch_irqs_disabled_flags(flags); \
|
|
})
|
|
#define raw_irqs_disabled() (arch_irqs_disabled())
|
|
#define raw_safe_halt() arch_safe_halt()
|
|
|
|
/*
|
|
* The local_irq_*() APIs are equal to the raw_local_irq*()
|
|
* if !TRACE_IRQFLAGS.
|
|
*/
|
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
|
|
|
#define local_irq_enable() \
|
|
do { \
|
|
trace_hardirqs_on(); \
|
|
raw_local_irq_enable(); \
|
|
} while (0)
|
|
|
|
#define local_irq_disable() \
|
|
do { \
|
|
bool was_disabled = raw_irqs_disabled();\
|
|
raw_local_irq_disable(); \
|
|
if (!was_disabled) \
|
|
trace_hardirqs_off(); \
|
|
} while (0)
|
|
|
|
#define local_irq_save(flags) \
|
|
do { \
|
|
raw_local_irq_save(flags); \
|
|
if (!raw_irqs_disabled_flags(flags)) \
|
|
trace_hardirqs_off(); \
|
|
} while (0)
|
|
|
|
#define local_irq_restore(flags) \
|
|
do { \
|
|
if (!raw_irqs_disabled_flags(flags)) \
|
|
trace_hardirqs_on(); \
|
|
raw_local_irq_restore(flags); \
|
|
} while (0)
|
|
|
|
#define safe_halt() \
|
|
do { \
|
|
trace_hardirqs_on(); \
|
|
raw_safe_halt(); \
|
|
} while (0)
|
|
|
|
|
|
#else /* !CONFIG_TRACE_IRQFLAGS */
|
|
|
|
#define local_irq_enable() do { raw_local_irq_enable(); } while (0)
|
|
#define local_irq_disable() do { raw_local_irq_disable(); } while (0)
|
|
#define local_irq_save(flags) do { raw_local_irq_save(flags); } while (0)
|
|
#define local_irq_restore(flags) do { raw_local_irq_restore(flags); } while (0)
|
|
#define safe_halt() do { raw_safe_halt(); } while (0)
|
|
|
|
#endif /* CONFIG_TRACE_IRQFLAGS */
|
|
|
|
#define local_save_flags(flags) raw_local_save_flags(flags)
|
|
|
|
/*
|
|
* Some architectures don't define arch_irqs_disabled(), so even if either
|
|
* definition would be fine we need to use different ones for the time being
|
|
* to avoid build issues.
|
|
*/
|
|
#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
|
|
#define irqs_disabled() \
|
|
({ \
|
|
unsigned long _flags; \
|
|
raw_local_save_flags(_flags); \
|
|
raw_irqs_disabled_flags(_flags); \
|
|
})
|
|
#else /* !CONFIG_TRACE_IRQFLAGS_SUPPORT */
|
|
#define irqs_disabled() raw_irqs_disabled()
|
|
#endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */
|
|
|
|
#define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags)
|
|
|
|
#endif
|