mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-05 07:26:48 +07:00
c86ad14d30
Pull locking updates from Ingo Molnar: "The locking tree was busier in this cycle than the usual pattern - a couple of major projects happened to coincide. The main changes are: - implement the atomic_fetch_{add,sub,and,or,xor}() API natively across all SMP architectures (Peter Zijlstra) - add atomic_fetch_{inc/dec}() as well, using the generic primitives (Davidlohr Bueso) - optimize various aspects of rwsems (Jason Low, Davidlohr Bueso, Waiman Long) - optimize smp_cond_load_acquire() on arm64 and implement LSE based atomic{,64}_fetch_{add,sub,and,andnot,or,xor}{,_relaxed,_acquire,_release}() on arm64 (Will Deacon) - introduce smp_acquire__after_ctrl_dep() and fix various barrier mis-uses and bugs (Peter Zijlstra) - after discovering ancient spin_unlock_wait() barrier bugs in its implementation and usage, strengthen its semantics and update/fix usage sites (Peter Zijlstra) - optimize mutex_trylock() fastpath (Peter Zijlstra) - ... misc fixes and cleanups" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (67 commits) locking/atomic: Introduce inc/dec variants for the atomic_fetch_$op() API locking/barriers, arch/arm64: Implement LDXR+WFE based smp_cond_load_acquire() locking/static_keys: Fix non static symbol Sparse warning locking/qspinlock: Use __this_cpu_dec() instead of full-blown this_cpu_dec() locking/atomic, arch/tile: Fix tilepro build locking/atomic, arch/m68k: Remove comment locking/atomic, arch/arc: Fix build locking/Documentation: Clarify limited control-dependency scope locking/atomic, arch/rwsem: Employ atomic_long_fetch_add() locking/atomic, arch/qrwlock: Employ atomic_fetch_add_acquire() locking/atomic, arch/mips: Convert to _relaxed atomics locking/atomic, arch/alpha: Convert to _relaxed atomics locking/atomic: Remove the deprecated atomic_{set,clear}_mask() functions locking/atomic: Remove linux/atomic.h:atomic_fetch_or() locking/atomic: Implement atomic{,64,_long}_fetch_{add,sub,and,andnot,or,xor}{,_relaxed,_acquire,_release}() locking/atomic: Fix atomic64_relaxed() bits locking/atomic, arch/xtensa: Implement atomic_fetch_{add,sub,and,or,xor}() locking/atomic, arch/x86: Implement atomic{,64}_fetch_{add,sub,and,or,xor}() locking/atomic, arch/tile: Implement atomic{,64}_fetch_{add,sub,and,or,xor}() locking/atomic, arch/sparc: Implement atomic{,64}_fetch_{add,sub,and,or,xor}() ...
56 lines
1.7 KiB
C
56 lines
1.7 KiB
C
/*
|
|
* Mutexes: blocking mutual exclusion locks
|
|
*
|
|
* started by Ingo Molnar:
|
|
*
|
|
* Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
|
|
*
|
|
* This file contains mutex debugging related internal declarations,
|
|
* prototypes and inline functions, for the CONFIG_DEBUG_MUTEXES case.
|
|
* More details are in kernel/mutex-debug.c.
|
|
*/
|
|
|
|
/*
|
|
* This must be called with lock->wait_lock held.
|
|
*/
|
|
extern void debug_mutex_lock_common(struct mutex *lock,
|
|
struct mutex_waiter *waiter);
|
|
extern void debug_mutex_wake_waiter(struct mutex *lock,
|
|
struct mutex_waiter *waiter);
|
|
extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
|
|
extern void debug_mutex_add_waiter(struct mutex *lock,
|
|
struct mutex_waiter *waiter,
|
|
struct task_struct *task);
|
|
extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
|
|
struct task_struct *task);
|
|
extern void debug_mutex_unlock(struct mutex *lock);
|
|
extern void debug_mutex_init(struct mutex *lock, const char *name,
|
|
struct lock_class_key *key);
|
|
|
|
static inline void mutex_set_owner(struct mutex *lock)
|
|
{
|
|
WRITE_ONCE(lock->owner, current);
|
|
}
|
|
|
|
static inline void mutex_clear_owner(struct mutex *lock)
|
|
{
|
|
WRITE_ONCE(lock->owner, NULL);
|
|
}
|
|
|
|
#define spin_lock_mutex(lock, flags) \
|
|
do { \
|
|
struct mutex *l = container_of(lock, struct mutex, wait_lock); \
|
|
\
|
|
DEBUG_LOCKS_WARN_ON(in_interrupt()); \
|
|
local_irq_save(flags); \
|
|
arch_spin_lock(&(lock)->rlock.raw_lock);\
|
|
DEBUG_LOCKS_WARN_ON(l->magic != l); \
|
|
} while (0)
|
|
|
|
#define spin_unlock_mutex(lock, flags) \
|
|
do { \
|
|
arch_spin_unlock(&(lock)->rlock.raw_lock); \
|
|
local_irq_restore(flags); \
|
|
preempt_check_resched(); \
|
|
} while (0)
|