mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-21 21:11:47 +07:00
cfd8983f03
We've unconditionally used the queued spinlock for many releases now. Its time to remove the old ticket lock code. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Waiman Long <waiman.long@hpe.com> Cc: Waiman.Long@hpe.com Cc: david.vrabel@citrix.com Cc: dhowells@redhat.com Cc: pbonzini@redhat.com Cc: xen-devel@lists.xenproject.org Link: http://lkml.kernel.org/r/20160518184302.GO3193@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
36 lines
925 B
C
36 lines
925 B
C
/*
|
|
* Split spinlock implementation out into its own file, so it can be
|
|
* compiled in a FTRACE-compatible way.
|
|
*/
|
|
#include <linux/spinlock.h>
|
|
#include <linux/export.h>
|
|
#include <linux/jump_label.h>
|
|
|
|
#include <asm/paravirt.h>
|
|
|
|
__visible void __native_queued_spin_unlock(struct qspinlock *lock)
|
|
{
|
|
native_queued_spin_unlock(lock);
|
|
}
|
|
|
|
PV_CALLEE_SAVE_REGS_THUNK(__native_queued_spin_unlock);
|
|
|
|
bool pv_is_native_spin_unlock(void)
|
|
{
|
|
return pv_lock_ops.queued_spin_unlock.func ==
|
|
__raw_callee_save___native_queued_spin_unlock;
|
|
}
|
|
|
|
struct pv_lock_ops pv_lock_ops = {
|
|
#ifdef CONFIG_SMP
|
|
.queued_spin_lock_slowpath = native_queued_spin_lock_slowpath,
|
|
.queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock),
|
|
.wait = paravirt_nop,
|
|
.kick = paravirt_nop,
|
|
#endif /* SMP */
|
|
};
|
|
EXPORT_SYMBOL(pv_lock_ops);
|
|
|
|
struct static_key paravirt_ticketlocks_enabled = STATIC_KEY_INIT_FALSE;
|
|
EXPORT_SYMBOL(paravirt_ticketlocks_enabled);
|