mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 14:00:58 +07:00
Merge branch 'core-printk-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-printk-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: lockdep: Fix trace_[soft,hard]irqs_[on,off]() recursion printk: Fix console_sem vs logbuf_lock unlock race printk: Release console_sem after logbuf_lock
This commit is contained in:
commit
391d6276db
@ -2481,15 +2481,10 @@ mark_held_locks(struct task_struct *curr, enum mark_type mark)
|
||||
/*
|
||||
* Hardirqs will be enabled:
|
||||
*/
|
||||
void trace_hardirqs_on_caller(unsigned long ip)
|
||||
static void __trace_hardirqs_on_caller(unsigned long ip)
|
||||
{
|
||||
struct task_struct *curr = current;
|
||||
|
||||
time_hardirqs_on(CALLER_ADDR0, ip);
|
||||
|
||||
if (unlikely(!debug_locks || current->lockdep_recursion))
|
||||
return;
|
||||
|
||||
if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled)))
|
||||
return;
|
||||
|
||||
@ -2505,8 +2500,6 @@ void trace_hardirqs_on_caller(unsigned long ip)
|
||||
/* we'll do an OFF -> ON transition: */
|
||||
curr->hardirqs_enabled = 1;
|
||||
|
||||
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
|
||||
return;
|
||||
if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
|
||||
return;
|
||||
/*
|
||||
@ -2528,6 +2521,21 @@ void trace_hardirqs_on_caller(unsigned long ip)
|
||||
curr->hardirq_enable_event = ++curr->irq_events;
|
||||
debug_atomic_inc(hardirqs_on_events);
|
||||
}
|
||||
|
||||
void trace_hardirqs_on_caller(unsigned long ip)
|
||||
{
|
||||
time_hardirqs_on(CALLER_ADDR0, ip);
|
||||
|
||||
if (unlikely(!debug_locks || current->lockdep_recursion))
|
||||
return;
|
||||
|
||||
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
|
||||
return;
|
||||
|
||||
current->lockdep_recursion = 1;
|
||||
__trace_hardirqs_on_caller(ip);
|
||||
current->lockdep_recursion = 0;
|
||||
}
|
||||
EXPORT_SYMBOL(trace_hardirqs_on_caller);
|
||||
|
||||
void trace_hardirqs_on(void)
|
||||
@ -2577,7 +2585,7 @@ void trace_softirqs_on(unsigned long ip)
|
||||
{
|
||||
struct task_struct *curr = current;
|
||||
|
||||
if (unlikely(!debug_locks))
|
||||
if (unlikely(!debug_locks || current->lockdep_recursion))
|
||||
return;
|
||||
|
||||
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
|
||||
@ -2588,6 +2596,7 @@ void trace_softirqs_on(unsigned long ip)
|
||||
return;
|
||||
}
|
||||
|
||||
current->lockdep_recursion = 1;
|
||||
/*
|
||||
* We'll do an OFF -> ON transition:
|
||||
*/
|
||||
@ -2602,6 +2611,7 @@ void trace_softirqs_on(unsigned long ip)
|
||||
*/
|
||||
if (curr->hardirqs_enabled)
|
||||
mark_held_locks(curr, SOFTIRQ);
|
||||
current->lockdep_recursion = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2611,7 +2621,7 @@ void trace_softirqs_off(unsigned long ip)
|
||||
{
|
||||
struct task_struct *curr = current;
|
||||
|
||||
if (unlikely(!debug_locks))
|
||||
if (unlikely(!debug_locks || current->lockdep_recursion))
|
||||
return;
|
||||
|
||||
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
|
||||
|
@ -782,7 +782,7 @@ static inline int can_use_console(unsigned int cpu)
|
||||
static int console_trylock_for_printk(unsigned int cpu)
|
||||
__releases(&logbuf_lock)
|
||||
{
|
||||
int retval = 0;
|
||||
int retval = 0, wake = 0;
|
||||
|
||||
if (console_trylock()) {
|
||||
retval = 1;
|
||||
@ -795,12 +795,14 @@ static int console_trylock_for_printk(unsigned int cpu)
|
||||
*/
|
||||
if (!can_use_console(cpu)) {
|
||||
console_locked = 0;
|
||||
up(&console_sem);
|
||||
wake = 1;
|
||||
retval = 0;
|
||||
}
|
||||
}
|
||||
printk_cpu = UINT_MAX;
|
||||
spin_unlock(&logbuf_lock);
|
||||
if (wake)
|
||||
up(&console_sem);
|
||||
return retval;
|
||||
}
|
||||
static const char recursion_bug_msg [] =
|
||||
@ -1242,7 +1244,7 @@ void console_unlock(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned _con_start, _log_end;
|
||||
unsigned wake_klogd = 0;
|
||||
unsigned wake_klogd = 0, retry = 0;
|
||||
|
||||
if (console_suspended) {
|
||||
up(&console_sem);
|
||||
@ -1251,6 +1253,7 @@ void console_unlock(void)
|
||||
|
||||
console_may_schedule = 0;
|
||||
|
||||
again:
|
||||
for ( ; ; ) {
|
||||
spin_lock_irqsave(&logbuf_lock, flags);
|
||||
wake_klogd |= log_start - log_end;
|
||||
@ -1271,8 +1274,23 @@ void console_unlock(void)
|
||||
if (unlikely(exclusive_console))
|
||||
exclusive_console = NULL;
|
||||
|
||||
spin_unlock(&logbuf_lock);
|
||||
|
||||
up(&console_sem);
|
||||
|
||||
/*
|
||||
* Someone could have filled up the buffer again, so re-check if there's
|
||||
* something to flush. In case we cannot trylock the console_sem again,
|
||||
* there's a new owner and the console_unlock() from them will do the
|
||||
* flush, no worries.
|
||||
*/
|
||||
spin_lock(&logbuf_lock);
|
||||
if (con_start != log_end)
|
||||
retry = 1;
|
||||
spin_unlock_irqrestore(&logbuf_lock, flags);
|
||||
if (retry && console_trylock())
|
||||
goto again;
|
||||
|
||||
if (wake_klogd)
|
||||
wake_up_klogd();
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user