mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-16 06:36:46 +07:00
625e88be1f
'struct __qspinlock' provides a handy union of fields so that subcomponents of the lockword can be accessed by name, without having to manage shifts and masks explicitly and take endianness into account. This is useful in qspinlock.h and also potentially in arch headers, so move the 'struct __qspinlock' into 'struct qspinlock' and kill the extra definition. Signed-off-by: Will Deacon <will.deacon@arm.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Waiman Long <longman@redhat.com> Acked-by: Boqun Feng <boqun.feng@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-arm-kernel@lists.infradead.org Cc: paulmck@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/1524738868-31318-3-git-send-email-will.deacon@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
70 lines
1.9 KiB
C
70 lines
1.9 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __ASM_QSPINLOCK_PARAVIRT_H
|
|
#define __ASM_QSPINLOCK_PARAVIRT_H
|
|
|
|
/*
|
|
* For x86-64, PV_CALLEE_SAVE_REGS_THUNK() saves and restores 8 64-bit
|
|
* registers. For i386, however, only 1 32-bit register needs to be saved
|
|
* and restored. So an optimized version of __pv_queued_spin_unlock() is
|
|
* hand-coded for 64-bit, but it isn't worthwhile to do it for 32-bit.
|
|
*/
|
|
#ifdef CONFIG_64BIT
|
|
|
|
PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock_slowpath);
|
|
#define __pv_queued_spin_unlock __pv_queued_spin_unlock
|
|
#define PV_UNLOCK "__raw_callee_save___pv_queued_spin_unlock"
|
|
#define PV_UNLOCK_SLOWPATH "__raw_callee_save___pv_queued_spin_unlock_slowpath"
|
|
|
|
/*
|
|
* Optimized assembly version of __raw_callee_save___pv_queued_spin_unlock
|
|
* which combines the registers saving trunk and the body of the following
|
|
* C code:
|
|
*
|
|
* void __pv_queued_spin_unlock(struct qspinlock *lock)
|
|
* {
|
|
* u8 lockval = cmpxchg(&lock->locked, _Q_LOCKED_VAL, 0);
|
|
*
|
|
* if (likely(lockval == _Q_LOCKED_VAL))
|
|
* return;
|
|
* pv_queued_spin_unlock_slowpath(lock, lockval);
|
|
* }
|
|
*
|
|
* For x86-64,
|
|
* rdi = lock (first argument)
|
|
* rsi = lockval (second argument)
|
|
* rdx = internal variable (set to 0)
|
|
*/
|
|
asm (".pushsection .text;"
|
|
".globl " PV_UNLOCK ";"
|
|
".type " PV_UNLOCK ", @function;"
|
|
".align 4,0x90;"
|
|
PV_UNLOCK ": "
|
|
FRAME_BEGIN
|
|
"push %rdx;"
|
|
"mov $0x1,%eax;"
|
|
"xor %edx,%edx;"
|
|
"lock cmpxchg %dl,(%rdi);"
|
|
"cmp $0x1,%al;"
|
|
"jne .slowpath;"
|
|
"pop %rdx;"
|
|
FRAME_END
|
|
"ret;"
|
|
".slowpath: "
|
|
"push %rsi;"
|
|
"movzbl %al,%esi;"
|
|
"call " PV_UNLOCK_SLOWPATH ";"
|
|
"pop %rsi;"
|
|
"pop %rdx;"
|
|
FRAME_END
|
|
"ret;"
|
|
".size " PV_UNLOCK ", .-" PV_UNLOCK ";"
|
|
".popsection");
|
|
|
|
#else /* CONFIG_64BIT */
|
|
|
|
extern void __pv_queued_spin_unlock(struct qspinlock *lock);
|
|
PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock);
|
|
|
|
#endif /* CONFIG_64BIT */
|
|
#endif
|