mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-23 17:22:01 +07:00
a4c1887d4c
The arch_{read,spin,write}_lock_flags() macros are simply mapped to the non-flags versions by the majority of architectures, so do this in core code and remove the dummy implementations. Also remove the implementation in spinlock_up.h, since all callers of do_raw_spin_lock_flags() call local_irq_save(flags) anyway. Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: paulmck@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/1507055129-12300-4-git-send-email-will.deacon@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
139 lines
3.9 KiB
C
139 lines
3.9 KiB
C
/*
|
|
* Copyright 2011 Tilera Corporation. All Rights Reserved.
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation, version 2.
|
|
*
|
|
* This program is distributed in the hope that it will be useful, but
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
* NON INFRINGEMENT. See the GNU General Public License for
|
|
* more details.
|
|
*
|
|
* 64-bit SMP ticket spinlocks, allowing only a single CPU anywhere
|
|
* (the type definitions are in asm/spinlock_types.h)
|
|
*/
|
|
|
|
#ifndef _ASM_TILE_SPINLOCK_64_H
|
|
#define _ASM_TILE_SPINLOCK_64_H
|
|
|
|
#include <linux/compiler.h>
|
|
|
|
/* Shifts and masks for the various fields in "lock". */
|
|
#define __ARCH_SPIN_CURRENT_SHIFT 17
|
|
#define __ARCH_SPIN_NEXT_MASK 0x7fff
|
|
#define __ARCH_SPIN_NEXT_OVERFLOW 0x8000
|
|
|
|
/*
|
|
* Return the "current" portion of a ticket lock value,
|
|
* i.e. the number that currently owns the lock.
|
|
*/
|
|
static inline u32 arch_spin_current(u32 val)
|
|
{
|
|
return val >> __ARCH_SPIN_CURRENT_SHIFT;
|
|
}
|
|
|
|
/*
|
|
* Return the "next" portion of a ticket lock value,
|
|
* i.e. the number that the next task to try to acquire the lock will get.
|
|
*/
|
|
static inline u32 arch_spin_next(u32 val)
|
|
{
|
|
return val & __ARCH_SPIN_NEXT_MASK;
|
|
}
|
|
|
|
/* The lock is locked if a task would have to wait to get it. */
|
|
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
|
|
{
|
|
/* Use READ_ONCE() to ensure that calling this in a loop is OK. */
|
|
u32 val = READ_ONCE(lock->lock);
|
|
return arch_spin_current(val) != arch_spin_next(val);
|
|
}
|
|
|
|
/* Bump the current ticket so the next task owns the lock. */
|
|
static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
|
{
|
|
wmb(); /* guarantee anything modified under the lock is visible */
|
|
__insn_fetchadd4(&lock->lock, 1U << __ARCH_SPIN_CURRENT_SHIFT);
|
|
}
|
|
|
|
void arch_spin_lock_slow(arch_spinlock_t *lock, u32 val);
|
|
|
|
/* Grab the "next" ticket number and bump it atomically.
|
|
* If the current ticket is not ours, go to the slow path.
|
|
* We also take the slow path if the "next" value overflows.
|
|
*/
|
|
static inline void arch_spin_lock(arch_spinlock_t *lock)
|
|
{
|
|
u32 val = __insn_fetchadd4(&lock->lock, 1);
|
|
u32 ticket = val & (__ARCH_SPIN_NEXT_MASK | __ARCH_SPIN_NEXT_OVERFLOW);
|
|
if (unlikely(arch_spin_current(val) != ticket))
|
|
arch_spin_lock_slow(lock, ticket);
|
|
}
|
|
|
|
/* Try to get the lock, and return whether we succeeded. */
|
|
int arch_spin_trylock(arch_spinlock_t *lock);
|
|
|
|
/*
|
|
* Read-write spinlocks, allowing multiple readers
|
|
* but only one writer.
|
|
*
|
|
* We use fetchadd() for readers, and fetchor() with the sign bit
|
|
* for writers.
|
|
*/
|
|
|
|
#define __WRITE_LOCK_BIT (1 << 31)
|
|
|
|
static inline int arch_write_val_locked(int val)
|
|
{
|
|
return val < 0; /* Optimize "val & __WRITE_LOCK_BIT". */
|
|
}
|
|
|
|
extern void __read_lock_failed(arch_rwlock_t *rw);
|
|
|
|
static inline void arch_read_lock(arch_rwlock_t *rw)
|
|
{
|
|
u32 val = __insn_fetchaddgez4(&rw->lock, 1);
|
|
if (unlikely(arch_write_val_locked(val)))
|
|
__read_lock_failed(rw);
|
|
}
|
|
|
|
extern void __write_lock_failed(arch_rwlock_t *rw, u32 val);
|
|
|
|
static inline void arch_write_lock(arch_rwlock_t *rw)
|
|
{
|
|
u32 val = __insn_fetchor4(&rw->lock, __WRITE_LOCK_BIT);
|
|
if (unlikely(val != 0))
|
|
__write_lock_failed(rw, val);
|
|
}
|
|
|
|
static inline void arch_read_unlock(arch_rwlock_t *rw)
|
|
{
|
|
__insn_mf();
|
|
__insn_fetchadd4(&rw->lock, -1);
|
|
}
|
|
|
|
static inline void arch_write_unlock(arch_rwlock_t *rw)
|
|
{
|
|
__insn_mf();
|
|
__insn_exch4(&rw->lock, 0); /* Avoid waiting in the write buffer. */
|
|
}
|
|
|
|
static inline int arch_read_trylock(arch_rwlock_t *rw)
|
|
{
|
|
return !arch_write_val_locked(__insn_fetchaddgez4(&rw->lock, 1));
|
|
}
|
|
|
|
static inline int arch_write_trylock(arch_rwlock_t *rw)
|
|
{
|
|
u32 val = __insn_fetchor4(&rw->lock, __WRITE_LOCK_BIT);
|
|
if (likely(val == 0))
|
|
return 1;
|
|
if (!arch_write_val_locked(val))
|
|
__insn_fetchand4(&rw->lock, ~__WRITE_LOCK_BIT);
|
|
return 0;
|
|
}
|
|
|
|
#endif /* _ASM_TILE_SPINLOCK_64_H */
|