mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-01-18 15:06:23 +07:00
Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull core/locking changes from Ingo Molnar: "Main changes: - another mutex optimization, from Davidlohr Bueso - improved lglock lockdep tracking, from Michel Lespinasse - [ assorted smaller updates, improvements, cleanups. ]" * 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: generic-ipi/locking: Fix misleading smp_call_function_any() description hung_task debugging: Print more info when reporting the problem mutex: Avoid label warning when !CONFIG_MUTEX_SPIN_ON_OWNER mutex: Do not unnecessarily deal with waiters mutex: Fix/document access-once assumption in mutex_can_spin_on_owner() lglock: Update lockdep annotations to report recursive local locks lockdep: Introduce lock_acquire_exclusive()/shared() helper macros
This commit is contained in:
commit
4689550bb2
@ -365,7 +365,7 @@ extern void lockdep_trace_alloc(gfp_t mask);
|
||||
|
||||
#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
|
||||
|
||||
#else /* !LOCKDEP */
|
||||
#else /* !CONFIG_LOCKDEP */
|
||||
|
||||
static inline void lockdep_off(void)
|
||||
{
|
||||
@ -479,82 +479,36 @@ static inline void print_irqtrace_events(struct task_struct *curr)
|
||||
* on the per lock-class debug mode:
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
|
||||
# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i)
|
||||
#define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i)
|
||||
#define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 2, n, i)
|
||||
#define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 2, n, i)
|
||||
#else
|
||||
# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
|
||||
# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, NULL, i)
|
||||
#define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
|
||||
#define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i)
|
||||
#define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i)
|
||||
#endif
|
||||
|
||||
#define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
|
||||
#define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
|
||||
#define spin_release(l, n, i) lock_release(l, n, i)
|
||||
#else
|
||||
# define spin_acquire(l, s, t, i) do { } while (0)
|
||||
# define spin_release(l, n, i) do { } while (0)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
# ifdef CONFIG_PROVE_LOCKING
|
||||
# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
|
||||
# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, NULL, i)
|
||||
# else
|
||||
# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
|
||||
# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, NULL, i)
|
||||
# endif
|
||||
#define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
|
||||
#define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
|
||||
#define rwlock_release(l, n, i) lock_release(l, n, i)
|
||||
#else
|
||||
# define rwlock_acquire(l, s, t, i) do { } while (0)
|
||||
# define rwlock_acquire_read(l, s, t, i) do { } while (0)
|
||||
# define rwlock_release(l, n, i) do { } while (0)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
# ifdef CONFIG_PROVE_LOCKING
|
||||
# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
|
||||
# define mutex_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i)
|
||||
# else
|
||||
# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
|
||||
# define mutex_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
|
||||
# endif
|
||||
#define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
|
||||
#define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
|
||||
#define mutex_release(l, n, i) lock_release(l, n, i)
|
||||
#else
|
||||
# define mutex_acquire(l, s, t, i) do { } while (0)
|
||||
# define mutex_acquire_nest(l, s, t, n, i) do { } while (0)
|
||||
# define mutex_release(l, n, i) do { } while (0)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
# ifdef CONFIG_PROVE_LOCKING
|
||||
# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
|
||||
# define rwsem_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i)
|
||||
# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i)
|
||||
# else
|
||||
# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
|
||||
# define rwsem_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
|
||||
# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i)
|
||||
# endif
|
||||
#define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
|
||||
#define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
|
||||
#define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i)
|
||||
# define rwsem_release(l, n, i) lock_release(l, n, i)
|
||||
#else
|
||||
# define rwsem_acquire(l, s, t, i) do { } while (0)
|
||||
# define rwsem_acquire_nest(l, s, t, n, i) do { } while (0)
|
||||
# define rwsem_acquire_read(l, s, t, i) do { } while (0)
|
||||
# define rwsem_release(l, n, i) do { } while (0)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
# ifdef CONFIG_PROVE_LOCKING
|
||||
# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_)
|
||||
# define lock_map_acquire_read(l) lock_acquire(l, 0, 0, 2, 2, NULL, _THIS_IP_)
|
||||
# else
|
||||
# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_)
|
||||
# define lock_map_acquire_read(l) lock_acquire(l, 0, 0, 2, 1, NULL, _THIS_IP_)
|
||||
# endif
|
||||
#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
|
||||
#define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
|
||||
# define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
|
||||
#else
|
||||
# define lock_map_acquire(l) do { } while (0)
|
||||
# define lock_map_acquire_read(l) do { } while (0)
|
||||
# define lock_map_release(l) do { } while (0)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
# define might_lock(lock) \
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include <linux/lockdep.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/sysctl.h>
|
||||
#include <linux/utsname.h>
|
||||
|
||||
/*
|
||||
* The number of tasks checked:
|
||||
@ -99,9 +100,13 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
|
||||
* Ok, the task did not get scheduled for more than 2 minutes,
|
||||
* complain:
|
||||
*/
|
||||
printk(KERN_ERR "INFO: task %s:%d blocked for more than "
|
||||
"%ld seconds.\n", t->comm, t->pid, timeout);
|
||||
printk(KERN_ERR "\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\""
|
||||
pr_err("INFO: task %s:%d blocked for more than %ld seconds.\n",
|
||||
t->comm, t->pid, timeout);
|
||||
pr_err(" %s %s %.*s\n",
|
||||
print_tainted(), init_utsname()->release,
|
||||
(int)strcspn(init_utsname()->version, " "),
|
||||
init_utsname()->version);
|
||||
pr_err("\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\""
|
||||
" disables this message.\n");
|
||||
sched_show_task(t);
|
||||
debug_show_held_locks(t);
|
||||
|
@ -21,7 +21,7 @@ void lg_local_lock(struct lglock *lg)
|
||||
arch_spinlock_t *lock;
|
||||
|
||||
preempt_disable();
|
||||
rwlock_acquire_read(&lg->lock_dep_map, 0, 0, _RET_IP_);
|
||||
lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
|
||||
lock = this_cpu_ptr(lg->lock);
|
||||
arch_spin_lock(lock);
|
||||
}
|
||||
@ -31,7 +31,7 @@ void lg_local_unlock(struct lglock *lg)
|
||||
{
|
||||
arch_spinlock_t *lock;
|
||||
|
||||
rwlock_release(&lg->lock_dep_map, 1, _RET_IP_);
|
||||
lock_release(&lg->lock_dep_map, 1, _RET_IP_);
|
||||
lock = this_cpu_ptr(lg->lock);
|
||||
arch_spin_unlock(lock);
|
||||
preempt_enable();
|
||||
@ -43,7 +43,7 @@ void lg_local_lock_cpu(struct lglock *lg, int cpu)
|
||||
arch_spinlock_t *lock;
|
||||
|
||||
preempt_disable();
|
||||
rwlock_acquire_read(&lg->lock_dep_map, 0, 0, _RET_IP_);
|
||||
lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
|
||||
lock = per_cpu_ptr(lg->lock, cpu);
|
||||
arch_spin_lock(lock);
|
||||
}
|
||||
@ -53,7 +53,7 @@ void lg_local_unlock_cpu(struct lglock *lg, int cpu)
|
||||
{
|
||||
arch_spinlock_t *lock;
|
||||
|
||||
rwlock_release(&lg->lock_dep_map, 1, _RET_IP_);
|
||||
lock_release(&lg->lock_dep_map, 1, _RET_IP_);
|
||||
lock = per_cpu_ptr(lg->lock, cpu);
|
||||
arch_spin_unlock(lock);
|
||||
preempt_enable();
|
||||
@ -65,7 +65,7 @@ void lg_global_lock(struct lglock *lg)
|
||||
int i;
|
||||
|
||||
preempt_disable();
|
||||
rwlock_acquire(&lg->lock_dep_map, 0, 0, _RET_IP_);
|
||||
lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
|
||||
for_each_possible_cpu(i) {
|
||||
arch_spinlock_t *lock;
|
||||
lock = per_cpu_ptr(lg->lock, i);
|
||||
@ -78,7 +78,7 @@ void lg_global_unlock(struct lglock *lg)
|
||||
{
|
||||
int i;
|
||||
|
||||
rwlock_release(&lg->lock_dep_map, 1, _RET_IP_);
|
||||
lock_release(&lg->lock_dep_map, 1, _RET_IP_);
|
||||
for_each_possible_cpu(i) {
|
||||
arch_spinlock_t *lock;
|
||||
lock = per_cpu_ptr(lg->lock, i);
|
||||
|
@ -209,11 +209,13 @@ int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
|
||||
*/
|
||||
static inline int mutex_can_spin_on_owner(struct mutex *lock)
|
||||
{
|
||||
struct task_struct *owner;
|
||||
int retval = 1;
|
||||
|
||||
rcu_read_lock();
|
||||
if (lock->owner)
|
||||
retval = lock->owner->on_cpu;
|
||||
owner = ACCESS_ONCE(lock->owner);
|
||||
if (owner)
|
||||
retval = owner->on_cpu;
|
||||
rcu_read_unlock();
|
||||
/*
|
||||
* if lock->owner is not set, the mutex owner may have just acquired
|
||||
@ -461,7 +463,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
||||
* performed the optimistic spinning cannot be done.
|
||||
*/
|
||||
if (ACCESS_ONCE(ww->ctx))
|
||||
break;
|
||||
goto slowpath;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -472,7 +474,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
||||
owner = ACCESS_ONCE(lock->owner);
|
||||
if (owner && !mutex_spin_on_owner(lock, owner)) {
|
||||
mspin_unlock(MLOCK(lock), &node);
|
||||
break;
|
||||
goto slowpath;
|
||||
}
|
||||
|
||||
if ((atomic_read(&lock->count) == 1) &&
|
||||
@ -499,7 +501,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
||||
* the owner complete.
|
||||
*/
|
||||
if (!owner && (need_resched() || rt_task(task)))
|
||||
break;
|
||||
goto slowpath;
|
||||
|
||||
/*
|
||||
* The cpu_relax() call is a compiler barrier which forces
|
||||
@ -513,6 +515,10 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
||||
#endif
|
||||
spin_lock_mutex(&lock->wait_lock, flags);
|
||||
|
||||
/* once more, can we acquire the lock? */
|
||||
if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, 0) == 1))
|
||||
goto skip_wait;
|
||||
|
||||
debug_mutex_lock_common(lock, &waiter);
|
||||
debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
|
||||
|
||||
@ -520,9 +526,6 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
||||
list_add_tail(&waiter.list, &lock->wait_list);
|
||||
waiter.task = task;
|
||||
|
||||
if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, -1) == 1))
|
||||
goto done;
|
||||
|
||||
lock_contended(&lock->dep_map, ip);
|
||||
|
||||
for (;;) {
|
||||
@ -561,24 +564,25 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
||||
schedule_preempt_disabled();
|
||||
spin_lock_mutex(&lock->wait_lock, flags);
|
||||
}
|
||||
|
||||
done:
|
||||
lock_acquired(&lock->dep_map, ip);
|
||||
/* got the lock - rejoice! */
|
||||
mutex_remove_waiter(lock, &waiter, current_thread_info());
|
||||
/* set it to 0 if there are no waiters left: */
|
||||
if (likely(list_empty(&lock->wait_list)))
|
||||
atomic_set(&lock->count, 0);
|
||||
debug_mutex_free_waiter(&waiter);
|
||||
|
||||
skip_wait:
|
||||
/* got the lock - cleanup and rejoice! */
|
||||
lock_acquired(&lock->dep_map, ip);
|
||||
mutex_set_owner(lock);
|
||||
|
||||
if (!__builtin_constant_p(ww_ctx == NULL)) {
|
||||
struct ww_mutex *ww = container_of(lock,
|
||||
struct ww_mutex,
|
||||
base);
|
||||
struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
|
||||
struct mutex_waiter *cur;
|
||||
|
||||
/*
|
||||
* This branch gets optimized out for the common case,
|
||||
* and is only important for ww_mutex_lock.
|
||||
*/
|
||||
|
||||
ww_mutex_lock_acquired(ww, ww_ctx);
|
||||
ww->ctx = ww_ctx;
|
||||
|
||||
@ -592,15 +596,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
||||
}
|
||||
}
|
||||
|
||||
/* set it to 0 if there are no waiters left: */
|
||||
if (likely(list_empty(&lock->wait_list)))
|
||||
atomic_set(&lock->count, 0);
|
||||
|
||||
spin_unlock_mutex(&lock->wait_lock, flags);
|
||||
|
||||
debug_mutex_free_waiter(&waiter);
|
||||
preempt_enable();
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
|
@ -278,8 +278,6 @@ EXPORT_SYMBOL(smp_call_function_single);
|
||||
* @wait: If true, wait until function has completed.
|
||||
*
|
||||
* Returns 0 on success, else a negative status code (if no cpus were online).
|
||||
* Note that @wait will be implicitly turned on in case of allocation failures,
|
||||
* since we fall back to on-stack allocation.
|
||||
*
|
||||
* Selection preference:
|
||||
* 1) current cpu if in @mask
|
||||
|
Loading…
Reference in New Issue
Block a user