mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-25 14:40:53 +07:00
59aabfc7e9
In up_write()/up_read(), rwsem_wake() will be called whenever it detects that some writers/readers are waiting. The rwsem_wake() function will take the wait_lock and call __rwsem_do_wake() to do the real wakeup. For a heavily contended rwsem, doing a spin_lock() on wait_lock will cause further contention on the heavily contended rwsem cacheline resulting in delay in the completion of the up_read/up_write operations. This patch makes the wait_lock taking and the call to __rwsem_do_wake() optional if at least one spinning writer is present. The spinning writer will be able to take the rwsem and call rwsem_wake() later when it calls up_write(). With the presence of a spinning writer, rwsem_wake() will now try to acquire the lock using trylock. If that fails, it will just quit. Suggested-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Waiman Long <Waiman.Long@hp.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Davidlohr Bueso <dave@stgolabs.net> Acked-by: Jason Low <jason.low2@hp.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Douglas Hatch <doug.hatch@hp.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Scott J Norton <scott.norton@hp.com> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/1430428337-16802-2-git-send-email-Waiman.Long@hp.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
41 lines
1.0 KiB
C
41 lines
1.0 KiB
C
#ifndef __LINUX_OSQ_LOCK_H
|
|
#define __LINUX_OSQ_LOCK_H
|
|
|
|
/*
|
|
* An MCS like lock especially tailored for optimistic spinning for sleeping
|
|
* lock implementations (mutex, rwsem, etc).
|
|
*/
|
|
struct optimistic_spin_node {
|
|
struct optimistic_spin_node *next, *prev;
|
|
int locked; /* 1 if lock acquired */
|
|
int cpu; /* encoded CPU # + 1 value */
|
|
};
|
|
|
|
struct optimistic_spin_queue {
|
|
/*
|
|
* Stores an encoded value of the CPU # of the tail node in the queue.
|
|
* If the queue is empty, then it's set to OSQ_UNLOCKED_VAL.
|
|
*/
|
|
atomic_t tail;
|
|
};
|
|
|
|
#define OSQ_UNLOCKED_VAL (0)
|
|
|
|
/* Init macro and function. */
|
|
#define OSQ_LOCK_UNLOCKED { ATOMIC_INIT(OSQ_UNLOCKED_VAL) }
|
|
|
|
static inline void osq_lock_init(struct optimistic_spin_queue *lock)
|
|
{
|
|
atomic_set(&lock->tail, OSQ_UNLOCKED_VAL);
|
|
}
|
|
|
|
extern bool osq_lock(struct optimistic_spin_queue *lock);
|
|
extern void osq_unlock(struct optimistic_spin_queue *lock);
|
|
|
|
static inline bool osq_is_locked(struct optimistic_spin_queue *lock)
|
|
{
|
|
return atomic_read(&lock->tail) != OSQ_UNLOCKED_VAL;
|
|
}
|
|
|
|
#endif
|