mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-14 13:06:45 +07:00
3552a07a9c
As of654672d4ba
(locking/atomics: Add _{acquire|release|relaxed}() variants of some atomic operations) and6d79ef2d30
(locking, asm-generic: Add _{relaxed|acquire|release}() variants for 'atomic_long_t'), weakly ordered archs can benefit from more relaxed use of barriers when locking and unlocking, instead of regular full barrier semantics. While currently only arm64 supports such optimizations, updating corresponding locking primitives serves for other archs to immediately benefit as well, once the necessary machinery is implemented of course. Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Paul E.McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Will Deacon <will.deacon@arm.com> Cc: linux-kernel@vger.kernel.org Link: http://lkml.kernel.org/r/1443643395-17016-5-git-send-email-dave@stgolabs.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
113 lines
3.3 KiB
C
113 lines
3.3 KiB
C
/*
|
|
* MCS lock defines
|
|
*
|
|
* This file contains the main data structure and API definitions of MCS lock.
|
|
*
|
|
* The MCS lock (proposed by Mellor-Crummey and Scott) is a simple spin-lock
|
|
* with the desirable properties of being fair, and with each cpu trying
|
|
* to acquire the lock spinning on a local variable.
|
|
* It avoids expensive cache bouncings that common test-and-set spin-lock
|
|
* implementations incur.
|
|
*/
|
|
#ifndef __LINUX_MCS_SPINLOCK_H
|
|
#define __LINUX_MCS_SPINLOCK_H
|
|
|
|
#include <asm/mcs_spinlock.h>
|
|
|
|
struct mcs_spinlock {
|
|
struct mcs_spinlock *next;
|
|
int locked; /* 1 if lock acquired */
|
|
int count; /* nesting count, see qspinlock.c */
|
|
};
|
|
|
|
#ifndef arch_mcs_spin_lock_contended
|
|
/*
|
|
* Using smp_load_acquire() provides a memory barrier that ensures
|
|
* subsequent operations happen after the lock is acquired.
|
|
*/
|
|
#define arch_mcs_spin_lock_contended(l) \
|
|
do { \
|
|
while (!(smp_load_acquire(l))) \
|
|
cpu_relax_lowlatency(); \
|
|
} while (0)
|
|
#endif
|
|
|
|
#ifndef arch_mcs_spin_unlock_contended
|
|
/*
|
|
* smp_store_release() provides a memory barrier to ensure all
|
|
* operations in the critical section has been completed before
|
|
* unlocking.
|
|
*/
|
|
#define arch_mcs_spin_unlock_contended(l) \
|
|
smp_store_release((l), 1)
|
|
#endif
|
|
|
|
/*
|
|
* Note: the smp_load_acquire/smp_store_release pair is not
|
|
* sufficient to form a full memory barrier across
|
|
* cpus for many architectures (except x86) for mcs_unlock and mcs_lock.
|
|
* For applications that need a full barrier across multiple cpus
|
|
* with mcs_unlock and mcs_lock pair, smp_mb__after_unlock_lock() should be
|
|
* used after mcs_lock.
|
|
*/
|
|
|
|
/*
|
|
* In order to acquire the lock, the caller should declare a local node and
|
|
* pass a reference of the node to this function in addition to the lock.
|
|
* If the lock has already been acquired, then this will proceed to spin
|
|
* on this node->locked until the previous lock holder sets the node->locked
|
|
* in mcs_spin_unlock().
|
|
*/
|
|
static inline
|
|
void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
|
|
{
|
|
struct mcs_spinlock *prev;
|
|
|
|
/* Init node */
|
|
node->locked = 0;
|
|
node->next = NULL;
|
|
|
|
prev = xchg_acquire(lock, node);
|
|
if (likely(prev == NULL)) {
|
|
/*
|
|
* Lock acquired, don't need to set node->locked to 1. Threads
|
|
* only spin on its own node->locked value for lock acquisition.
|
|
* However, since this thread can immediately acquire the lock
|
|
* and does not proceed to spin on its own node->locked, this
|
|
* value won't be used. If a debug mode is needed to
|
|
* audit lock status, then set node->locked value here.
|
|
*/
|
|
return;
|
|
}
|
|
WRITE_ONCE(prev->next, node);
|
|
|
|
/* Wait until the lock holder passes the lock down. */
|
|
arch_mcs_spin_lock_contended(&node->locked);
|
|
}
|
|
|
|
/*
|
|
* Releases the lock. The caller should pass in the corresponding node that
|
|
* was used to acquire the lock.
|
|
*/
|
|
static inline
|
|
void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
|
|
{
|
|
struct mcs_spinlock *next = READ_ONCE(node->next);
|
|
|
|
if (likely(!next)) {
|
|
/*
|
|
* Release the lock by setting it to NULL
|
|
*/
|
|
if (likely(cmpxchg_release(lock, node, NULL) == node))
|
|
return;
|
|
/* Wait until the next pointer is set */
|
|
while (!(next = READ_ONCE(node->next)))
|
|
cpu_relax_lowlatency();
|
|
}
|
|
|
|
/* Pass lock to next waiter. */
|
|
arch_mcs_spin_unlock_contended(&next->locked);
|
|
}
|
|
|
|
#endif /* __LINUX_MCS_SPINLOCK_H */
|