mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-19 01:48:11 +07:00
b0d8003ef4
Mostly complete rewrite of the FRV atomic implementation, instead of using assembly files, use inline assembler. The out-of-line CONFIG option makes a bit of a mess of things, but a little CPP trickery gets that done too. FRV already had the atomic logic ops but under a non standard name, the reimplementation provides the generic names and provides the intermediate form required for the bitops implementation. The slightly inconsistent __atomic32_fetch_##op naming is because __atomic_fetch_##op conlicts with GCC builtin functions. The 64bit atomic ops use the inline assembly %Ln construct to access the low word register (r+1), afaik this construct was not previously used in the kernel and is completely undocumented, but I found it in the FRV GCC code and it seems to work. FRV had a non-standard definition of atomic_{clear,set}_mask() which would work types other than atomic_t, the one user relying on that (arch/frv/kernel/dma.c) got converted to use the new intermediate form. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
69 lines
1.9 KiB
ArmAsm
69 lines
1.9 KiB
ArmAsm
/* kernel atomic64 operations
|
|
*
|
|
* For an explanation of how atomic ops work in this arch, see:
|
|
* Documentation/frv/atomic-ops.txt
|
|
*
|
|
* Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
|
|
* Written by David Howells (dhowells@redhat.com)
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the License, or (at your option) any later version.
|
|
*/
|
|
|
|
#include <asm/spr-regs.h>
|
|
|
|
.text
|
|
.balign 4
|
|
|
|
|
|
###############################################################################
|
|
#
|
|
# uint64_t __xchg_64(uint64_t i, uint64_t *v)
|
|
#
|
|
###############################################################################
|
|
.globl __xchg_64
|
|
.type __xchg_64,@function
|
|
__xchg_64:
|
|
or.p gr8,gr8,gr4
|
|
or gr9,gr9,gr5
|
|
0:
|
|
orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
|
|
ckeq icc3,cc7
|
|
ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */
|
|
orcr cc7,cc7,cc3 /* set CC3 to true */
|
|
cstd.p gr4,@(gr10,gr0) ,cc3,#1
|
|
corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
|
|
beq icc3,#0,0b
|
|
bralr
|
|
|
|
.size __xchg_64, .-__xchg_64
|
|
|
|
###############################################################################
|
|
#
|
|
# uint64_t __cmpxchg_64(uint64_t test, uint64_t new, uint64_t *v)
|
|
#
|
|
###############################################################################
|
|
.globl __cmpxchg_64
|
|
.type __cmpxchg_64,@function
|
|
__cmpxchg_64:
|
|
or.p gr8,gr8,gr4
|
|
or gr9,gr9,gr5
|
|
0:
|
|
orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
|
|
ckeq icc3,cc7
|
|
ldd.p @(gr12,gr0),gr8 /* LDD.P/ORCR must be atomic */
|
|
orcr cc7,cc7,cc3
|
|
subcc gr8,gr4,gr0,icc0
|
|
subcc.p gr9,gr5,gr0,icc1
|
|
bnelr icc0,#0
|
|
bnelr icc1,#0
|
|
cstd.p gr10,@(gr12,gr0) ,cc3,#1
|
|
corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
|
|
beq icc3,#0,0b
|
|
bralr
|
|
|
|
.size __cmpxchg_64, .-__cmpxchg_64
|
|
|