mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-30 06:46:41 +07:00
b445e26cbf
In particular, avoid membar instructions in the delay slot of a jmpl instruction. UltraSPARC-I, II, IIi, and IIe have a bug, documented in the UltraSPARC-IIi User's Manual, Appendix K, Erratum 51 The long and short of it is that if the IMU unit misses on a branch or jmpl, and there is a store buffer synchronizing membar in the delay slot, the chip can stop fetching instructions. If interrupts are enabled or some other trap is enabled, the chip will unwedge itself, but performance will suffer. We already had a workaround for this bug in a few spots, but it's better to have the entire tree sanitized for this rule. Signed-off-by: David S. Miller <davem@davemloft.net>
81 lines
1.5 KiB
ArmAsm
81 lines
1.5 KiB
ArmAsm
/* $Id: dec_and_lock.S,v 1.5 2001/11/18 00:12:56 davem Exp $
|
|
* dec_and_lock.S: Sparc64 version of "atomic_dec_and_lock()"
|
|
* using cas and ldstub instructions.
|
|
*
|
|
* Copyright (C) 2000 David S. Miller (davem@redhat.com)
|
|
*/
|
|
#include <linux/config.h>
|
|
#include <asm/thread_info.h>
|
|
|
|
.text
|
|
.align 64
|
|
|
|
/* CAS basically works like this:
|
|
*
|
|
* void CAS(MEM, REG1, REG2)
|
|
* {
|
|
* START_ATOMIC();
|
|
* if (*(MEM) == REG1) {
|
|
* TMP = *(MEM);
|
|
* *(MEM) = REG2;
|
|
* REG2 = TMP;
|
|
* } else
|
|
* REG2 = *(MEM);
|
|
* END_ATOMIC();
|
|
* }
|
|
*/
|
|
|
|
.globl _atomic_dec_and_lock
|
|
_atomic_dec_and_lock: /* %o0 = counter, %o1 = lock */
|
|
loop1: lduw [%o0], %g2
|
|
subcc %g2, 1, %g7
|
|
be,pn %icc, start_to_zero
|
|
nop
|
|
nzero: cas [%o0], %g2, %g7
|
|
cmp %g2, %g7
|
|
bne,pn %icc, loop1
|
|
mov 0, %g1
|
|
|
|
out:
|
|
membar #StoreLoad | #StoreStore
|
|
retl
|
|
mov %g1, %o0
|
|
start_to_zero:
|
|
#ifdef CONFIG_PREEMPT
|
|
ldsw [%g6 + TI_PRE_COUNT], %g3
|
|
add %g3, 1, %g3
|
|
stw %g3, [%g6 + TI_PRE_COUNT]
|
|
#endif
|
|
to_zero:
|
|
ldstub [%o1], %g3
|
|
membar #StoreLoad | #StoreStore
|
|
brnz,pn %g3, spin_on_lock
|
|
nop
|
|
loop2: cas [%o0], %g2, %g7 /* ASSERT(g7 == 0) */
|
|
cmp %g2, %g7
|
|
|
|
be,pt %icc, out
|
|
mov 1, %g1
|
|
lduw [%o0], %g2
|
|
subcc %g2, 1, %g7
|
|
be,pn %icc, loop2
|
|
nop
|
|
membar #StoreStore | #LoadStore
|
|
stb %g0, [%o1]
|
|
#ifdef CONFIG_PREEMPT
|
|
ldsw [%g6 + TI_PRE_COUNT], %g3
|
|
sub %g3, 1, %g3
|
|
stw %g3, [%g6 + TI_PRE_COUNT]
|
|
#endif
|
|
|
|
b,pt %xcc, nzero
|
|
nop
|
|
spin_on_lock:
|
|
ldub [%o1], %g3
|
|
membar #LoadLoad
|
|
brnz,pt %g3, spin_on_lock
|
|
nop
|
|
ba,pt %xcc, to_zero
|
|
nop
|
|
nop
|