mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-29 20:06:38 +07:00
60063497a9
This allows us to move duplicated code in <asm/atomic.h> (atomic_inc_not_zero() for now) to <linux/atomic.h> Signed-off-by: Arun Sharma <asharma@fb.com> Reviewed-by: Eric Dumazet <eric.dumazet@gmail.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: David Miller <davem@davemloft.net> Cc: Eric Dumazet <eric.dumazet@gmail.com> Acked-by: Mike Frysinger <vapier@gentoo.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
100 lines
2.2 KiB
C
100 lines
2.2 KiB
C
#ifndef __LINUX_BIT_SPINLOCK_H
|
|
#define __LINUX_BIT_SPINLOCK_H
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/preempt.h>
|
|
#include <linux/atomic.h>
|
|
|
|
/*
|
|
* bit-based spin_lock()
|
|
*
|
|
* Don't use this unless you really need to: spin_lock() and spin_unlock()
|
|
* are significantly faster.
|
|
*/
|
|
static inline void bit_spin_lock(int bitnum, unsigned long *addr)
|
|
{
|
|
/*
|
|
* Assuming the lock is uncontended, this never enters
|
|
* the body of the outer loop. If it is contended, then
|
|
* within the inner loop a non-atomic test is used to
|
|
* busywait with less bus contention for a good time to
|
|
* attempt to acquire the lock bit.
|
|
*/
|
|
preempt_disable();
|
|
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
|
|
while (unlikely(test_and_set_bit_lock(bitnum, addr))) {
|
|
preempt_enable();
|
|
do {
|
|
cpu_relax();
|
|
} while (test_bit(bitnum, addr));
|
|
preempt_disable();
|
|
}
|
|
#endif
|
|
__acquire(bitlock);
|
|
}
|
|
|
|
/*
|
|
* Return true if it was acquired
|
|
*/
|
|
static inline int bit_spin_trylock(int bitnum, unsigned long *addr)
|
|
{
|
|
preempt_disable();
|
|
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
|
|
if (unlikely(test_and_set_bit_lock(bitnum, addr))) {
|
|
preempt_enable();
|
|
return 0;
|
|
}
|
|
#endif
|
|
__acquire(bitlock);
|
|
return 1;
|
|
}
|
|
|
|
/*
|
|
* bit-based spin_unlock()
|
|
*/
|
|
static inline void bit_spin_unlock(int bitnum, unsigned long *addr)
|
|
{
|
|
#ifdef CONFIG_DEBUG_SPINLOCK
|
|
BUG_ON(!test_bit(bitnum, addr));
|
|
#endif
|
|
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
|
|
clear_bit_unlock(bitnum, addr);
|
|
#endif
|
|
preempt_enable();
|
|
__release(bitlock);
|
|
}
|
|
|
|
/*
|
|
* bit-based spin_unlock()
|
|
* non-atomic version, which can be used eg. if the bit lock itself is
|
|
* protecting the rest of the flags in the word.
|
|
*/
|
|
static inline void __bit_spin_unlock(int bitnum, unsigned long *addr)
|
|
{
|
|
#ifdef CONFIG_DEBUG_SPINLOCK
|
|
BUG_ON(!test_bit(bitnum, addr));
|
|
#endif
|
|
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
|
|
__clear_bit_unlock(bitnum, addr);
|
|
#endif
|
|
preempt_enable();
|
|
__release(bitlock);
|
|
}
|
|
|
|
/*
|
|
* Return true if the lock is held.
|
|
*/
|
|
static inline int bit_spin_is_locked(int bitnum, unsigned long *addr)
|
|
{
|
|
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
|
|
return test_bit(bitnum, addr);
|
|
#elif defined CONFIG_PREEMPT_COUNT
|
|
return preempt_count();
|
|
#else
|
|
return 1;
|
|
#endif
|
|
}
|
|
|
|
#endif /* __LINUX_BIT_SPINLOCK_H */
|
|
|