mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-24 19:40:12 +07:00
a4c1887d4c
The arch_{read,spin,write}_lock_flags() macros are simply mapped to the non-flags versions by the majority of architectures, so do this in core code and remove the dummy implementations. Also remove the implementation in spinlock_up.h, since all callers of do_raw_spin_lock_flags() call local_irq_save(flags) anyway. Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: paulmck@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/1507055129-12300-4-git-send-email-will.deacon@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
82 lines
1.9 KiB
C
82 lines
1.9 KiB
C
/*
|
|
* Copyright 2004-2009 Analog Devices Inc.
|
|
*
|
|
* Licensed under the GPL-2 or later.
|
|
*/
|
|
|
|
#ifndef __BFIN_SPINLOCK_H
|
|
#define __BFIN_SPINLOCK_H
|
|
|
|
#ifndef CONFIG_SMP
|
|
# include <asm-generic/spinlock.h>
|
|
#else
|
|
|
|
#include <linux/atomic.h>
|
|
#include <asm/processor.h>
|
|
#include <asm/barrier.h>
|
|
|
|
asmlinkage int __raw_spin_is_locked_asm(volatile int *ptr);
|
|
asmlinkage void __raw_spin_lock_asm(volatile int *ptr);
|
|
asmlinkage int __raw_spin_trylock_asm(volatile int *ptr);
|
|
asmlinkage void __raw_spin_unlock_asm(volatile int *ptr);
|
|
asmlinkage void __raw_read_lock_asm(volatile int *ptr);
|
|
asmlinkage int __raw_read_trylock_asm(volatile int *ptr);
|
|
asmlinkage void __raw_read_unlock_asm(volatile int *ptr);
|
|
asmlinkage void __raw_write_lock_asm(volatile int *ptr);
|
|
asmlinkage int __raw_write_trylock_asm(volatile int *ptr);
|
|
asmlinkage void __raw_write_unlock_asm(volatile int *ptr);
|
|
|
|
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
|
|
{
|
|
return __raw_spin_is_locked_asm(&lock->lock);
|
|
}
|
|
|
|
static inline void arch_spin_lock(arch_spinlock_t *lock)
|
|
{
|
|
__raw_spin_lock_asm(&lock->lock);
|
|
}
|
|
|
|
static inline int arch_spin_trylock(arch_spinlock_t *lock)
|
|
{
|
|
return __raw_spin_trylock_asm(&lock->lock);
|
|
}
|
|
|
|
static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
|
{
|
|
__raw_spin_unlock_asm(&lock->lock);
|
|
}
|
|
|
|
static inline void arch_read_lock(arch_rwlock_t *rw)
|
|
{
|
|
__raw_read_lock_asm(&rw->lock);
|
|
}
|
|
|
|
static inline int arch_read_trylock(arch_rwlock_t *rw)
|
|
{
|
|
return __raw_read_trylock_asm(&rw->lock);
|
|
}
|
|
|
|
static inline void arch_read_unlock(arch_rwlock_t *rw)
|
|
{
|
|
__raw_read_unlock_asm(&rw->lock);
|
|
}
|
|
|
|
static inline void arch_write_lock(arch_rwlock_t *rw)
|
|
{
|
|
__raw_write_lock_asm(&rw->lock);
|
|
}
|
|
|
|
static inline int arch_write_trylock(arch_rwlock_t *rw)
|
|
{
|
|
return __raw_write_trylock_asm(&rw->lock);
|
|
}
|
|
|
|
static inline void arch_write_unlock(arch_rwlock_t *rw)
|
|
{
|
|
__raw_write_unlock_asm(&rw->lock);
|
|
}
|
|
|
|
#endif
|
|
|
|
#endif /* !__BFIN_SPINLOCK_H */
|