mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-09 06:56:39 +07:00
18258f7239
synchronize_irq() waits for hard irq and threaded handlers to complete before returning. For some special cases we only need to make sure that the hard interrupt part of the irq line is not in progress when we disabled the - possibly shared - interrupt at the device level. A proper use case for this was provided by Russell. The sdhci driver requires some irq triggered functions to be run in thread context. The current implementation of the thread context is a sdio private kthread construct, which has quite some shortcomings. These can be avoided when the thread is directly associated to the device interrupt via the generic threaded irq infrastructure. Though there is a corner case related to run time power management where one side disables the device interrupts at the device level and needs to make sure, that an already running hard interrupt handler has completed before proceeding further. Though that hard interrupt handler might wake the associated thread, which in turn can request the runtime PM to reenable the device. Using synchronize_irq() leads to an immediate deadlock of the irq thread waiting for the PM lock and the synchronize_irq() waiting for the irq thread to complete. Due to the fact that it is sufficient for this case to ensure that no hard irq handler is executing a new function which avoids the check for the thread is required. Add a function, which just monitors the hard irq parts and ignores the threaded handlers. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Russell King <linux@arm.linux.org.uk> Cc: Chris Ball <chris@printf.net> Acked-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20140215003823.653236081@linutronix.de
83 lines
1.7 KiB
C
83 lines
1.7 KiB
C
#ifndef LINUX_HARDIRQ_H
|
|
#define LINUX_HARDIRQ_H
|
|
|
|
#include <linux/preempt_mask.h>
|
|
#include <linux/lockdep.h>
|
|
#include <linux/ftrace_irq.h>
|
|
#include <linux/vtime.h>
|
|
#include <asm/hardirq.h>
|
|
|
|
|
|
extern void synchronize_irq(unsigned int irq);
|
|
extern void synchronize_hardirq(unsigned int irq);
|
|
|
|
#if defined(CONFIG_TINY_RCU)
|
|
|
|
static inline void rcu_nmi_enter(void)
|
|
{
|
|
}
|
|
|
|
static inline void rcu_nmi_exit(void)
|
|
{
|
|
}
|
|
|
|
#else
|
|
extern void rcu_nmi_enter(void);
|
|
extern void rcu_nmi_exit(void);
|
|
#endif
|
|
|
|
/*
|
|
* It is safe to do non-atomic ops on ->hardirq_context,
|
|
* because NMI handlers may not preempt and the ops are
|
|
* always balanced, so the interrupted value of ->hardirq_context
|
|
* will always be restored.
|
|
*/
|
|
#define __irq_enter() \
|
|
do { \
|
|
account_irq_enter_time(current); \
|
|
preempt_count_add(HARDIRQ_OFFSET); \
|
|
trace_hardirq_enter(); \
|
|
} while (0)
|
|
|
|
/*
|
|
* Enter irq context (on NO_HZ, update jiffies):
|
|
*/
|
|
extern void irq_enter(void);
|
|
|
|
/*
|
|
* Exit irq context without processing softirqs:
|
|
*/
|
|
#define __irq_exit() \
|
|
do { \
|
|
trace_hardirq_exit(); \
|
|
account_irq_exit_time(current); \
|
|
preempt_count_sub(HARDIRQ_OFFSET); \
|
|
} while (0)
|
|
|
|
/*
|
|
* Exit irq context and process softirqs if needed:
|
|
*/
|
|
extern void irq_exit(void);
|
|
|
|
#define nmi_enter() \
|
|
do { \
|
|
lockdep_off(); \
|
|
ftrace_nmi_enter(); \
|
|
BUG_ON(in_nmi()); \
|
|
preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
|
|
rcu_nmi_enter(); \
|
|
trace_hardirq_enter(); \
|
|
} while (0)
|
|
|
|
#define nmi_exit() \
|
|
do { \
|
|
trace_hardirq_exit(); \
|
|
rcu_nmi_exit(); \
|
|
BUG_ON(!in_nmi()); \
|
|
preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
|
|
ftrace_nmi_exit(); \
|
|
lockdep_on(); \
|
|
} while (0)
|
|
|
|
#endif /* LINUX_HARDIRQ_H */
|