mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-22 10:24:15 +07:00
d157bd860f
The rwsem-xadd count has been converted to an atomic variable and the rwsem code now directly uses atomic_long_add() and atomic_long_add_return(), so we can remove the arch implementations of rwsem_atomic_add() and rwsem_atomic_update(). Signed-off-by: Jason Low <jason.low2@hpe.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Christoph Lameter <cl@linux.com> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Cc: Jason Low <jason.low2@hp.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Matt Turner <mattst88@gmail.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Hurley <peter@hurleysoftware.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Richard Henderson <rth@twiddle.net> Cc: Terry Rudd <terry.rudd@hpe.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tim Chen <tim.c.chen@linux.intel.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Waiman Long <Waiman.Long@hpe.com> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
196 lines
4.3 KiB
C
196 lines
4.3 KiB
C
#ifndef _ALPHA_RWSEM_H
|
|
#define _ALPHA_RWSEM_H
|
|
|
|
/*
|
|
* Written by Ivan Kokshaysky <ink@jurassic.park.msu.ru>, 2001.
|
|
* Based on asm-alpha/semaphore.h and asm-i386/rwsem.h
|
|
*/
|
|
|
|
#ifndef _LINUX_RWSEM_H
|
|
#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
|
|
#endif
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
#include <linux/compiler.h>
|
|
|
|
#define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
|
|
#define RWSEM_ACTIVE_BIAS 0x0000000000000001L
|
|
#define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
|
|
#define RWSEM_WAITING_BIAS (-0x0000000100000000L)
|
|
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
|
|
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
|
|
|
|
static inline void __down_read(struct rw_semaphore *sem)
|
|
{
|
|
long oldcount;
|
|
#ifndef CONFIG_SMP
|
|
oldcount = sem->count.counter;
|
|
sem->count.counter += RWSEM_ACTIVE_READ_BIAS;
|
|
#else
|
|
long temp;
|
|
__asm__ __volatile__(
|
|
"1: ldq_l %0,%1\n"
|
|
" addq %0,%3,%2\n"
|
|
" stq_c %2,%1\n"
|
|
" beq %2,2f\n"
|
|
" mb\n"
|
|
".subsection 2\n"
|
|
"2: br 1b\n"
|
|
".previous"
|
|
:"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
|
|
:"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory");
|
|
#endif
|
|
if (unlikely(oldcount < 0))
|
|
rwsem_down_read_failed(sem);
|
|
}
|
|
|
|
/*
|
|
* trylock for reading -- returns 1 if successful, 0 if contention
|
|
*/
|
|
static inline int __down_read_trylock(struct rw_semaphore *sem)
|
|
{
|
|
long old, new, res;
|
|
|
|
res = atomic_long_read(&sem->count);
|
|
do {
|
|
new = res + RWSEM_ACTIVE_READ_BIAS;
|
|
if (new <= 0)
|
|
break;
|
|
old = res;
|
|
res = atomic_long_cmpxchg(&sem->count, old, new);
|
|
} while (res != old);
|
|
return res >= 0 ? 1 : 0;
|
|
}
|
|
|
|
static inline long ___down_write(struct rw_semaphore *sem)
|
|
{
|
|
long oldcount;
|
|
#ifndef CONFIG_SMP
|
|
oldcount = sem->count.counter;
|
|
sem->count.counter += RWSEM_ACTIVE_WRITE_BIAS;
|
|
#else
|
|
long temp;
|
|
__asm__ __volatile__(
|
|
"1: ldq_l %0,%1\n"
|
|
" addq %0,%3,%2\n"
|
|
" stq_c %2,%1\n"
|
|
" beq %2,2f\n"
|
|
" mb\n"
|
|
".subsection 2\n"
|
|
"2: br 1b\n"
|
|
".previous"
|
|
:"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
|
|
:"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory");
|
|
#endif
|
|
return oldcount;
|
|
}
|
|
|
|
static inline void __down_write(struct rw_semaphore *sem)
|
|
{
|
|
if (unlikely(___down_write(sem)))
|
|
rwsem_down_write_failed(sem);
|
|
}
|
|
|
|
static inline int __down_write_killable(struct rw_semaphore *sem)
|
|
{
|
|
if (unlikely(___down_write(sem)))
|
|
if (IS_ERR(rwsem_down_write_failed_killable(sem)))
|
|
return -EINTR;
|
|
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* trylock for writing -- returns 1 if successful, 0 if contention
|
|
*/
|
|
static inline int __down_write_trylock(struct rw_semaphore *sem)
|
|
{
|
|
long ret = atomic_long_cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
|
|
RWSEM_ACTIVE_WRITE_BIAS);
|
|
if (ret == RWSEM_UNLOCKED_VALUE)
|
|
return 1;
|
|
return 0;
|
|
}
|
|
|
|
static inline void __up_read(struct rw_semaphore *sem)
|
|
{
|
|
long oldcount;
|
|
#ifndef CONFIG_SMP
|
|
oldcount = sem->count.counter;
|
|
sem->count.counter -= RWSEM_ACTIVE_READ_BIAS;
|
|
#else
|
|
long temp;
|
|
__asm__ __volatile__(
|
|
" mb\n"
|
|
"1: ldq_l %0,%1\n"
|
|
" subq %0,%3,%2\n"
|
|
" stq_c %2,%1\n"
|
|
" beq %2,2f\n"
|
|
".subsection 2\n"
|
|
"2: br 1b\n"
|
|
".previous"
|
|
:"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
|
|
:"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory");
|
|
#endif
|
|
if (unlikely(oldcount < 0))
|
|
if ((int)oldcount - RWSEM_ACTIVE_READ_BIAS == 0)
|
|
rwsem_wake(sem);
|
|
}
|
|
|
|
static inline void __up_write(struct rw_semaphore *sem)
|
|
{
|
|
long count;
|
|
#ifndef CONFIG_SMP
|
|
sem->count.counter -= RWSEM_ACTIVE_WRITE_BIAS;
|
|
count = sem->count.counter;
|
|
#else
|
|
long temp;
|
|
__asm__ __volatile__(
|
|
" mb\n"
|
|
"1: ldq_l %0,%1\n"
|
|
" subq %0,%3,%2\n"
|
|
" stq_c %2,%1\n"
|
|
" beq %2,2f\n"
|
|
" subq %0,%3,%0\n"
|
|
".subsection 2\n"
|
|
"2: br 1b\n"
|
|
".previous"
|
|
:"=&r" (count), "=m" (sem->count), "=&r" (temp)
|
|
:"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory");
|
|
#endif
|
|
if (unlikely(count))
|
|
if ((int)count == 0)
|
|
rwsem_wake(sem);
|
|
}
|
|
|
|
/*
|
|
* downgrade write lock to read lock
|
|
*/
|
|
static inline void __downgrade_write(struct rw_semaphore *sem)
|
|
{
|
|
long oldcount;
|
|
#ifndef CONFIG_SMP
|
|
oldcount = sem->count.counter;
|
|
sem->count.counter -= RWSEM_WAITING_BIAS;
|
|
#else
|
|
long temp;
|
|
__asm__ __volatile__(
|
|
"1: ldq_l %0,%1\n"
|
|
" addq %0,%3,%2\n"
|
|
" stq_c %2,%1\n"
|
|
" beq %2,2f\n"
|
|
" mb\n"
|
|
".subsection 2\n"
|
|
"2: br 1b\n"
|
|
".previous"
|
|
:"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
|
|
:"Ir" (-RWSEM_WAITING_BIAS), "m" (sem->count) : "memory");
|
|
#endif
|
|
if (unlikely(oldcount < 0))
|
|
rwsem_downgrade_wake(sem);
|
|
}
|
|
|
|
#endif /* __KERNEL__ */
|
|
#endif /* _ALPHA_RWSEM_H */
|