mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-23 13:32:50 +07:00
8478132a87
This reverts commit 4dd1837d75
.
Moving the exports for assembly code into the assembly files breaks
KSYM trimming, but also breaks modversions.
While fixing the KSYM trimming is trivial, fixing modversions brings
us to a technically worse position that we had prior to the above
change:
- We end up with the prototype definitions divorsed from everything
else, which means that adding or removing assembly level ksyms
become more fragile:
* if adding a new assembly ksyms export, a missed prototype in
asm-prototypes.h results in a successful build if no module in
the selected configuration makes use of the symbol.
* when removing a ksyms export, asm-prototypes.h will get forgotten,
with armksyms.c, you'll get a build error if you forget to touch
the file.
- We end up with the same amount of include files and prototypes,
they're just in a header file instead of a .c file with their
exports.
As for lines of code, we don't get much of a size reduction:
(original commit)
47 files changed, 131 insertions(+), 208 deletions(-)
(fix for ksyms trimming)
7 files changed, 18 insertions(+), 5 deletions(-)
(two fixes for modversions)
1 file changed, 34 insertions(+)
3 files changed, 7 insertions(+), 2 deletions(-)
which results in a net total of only 25 lines deleted.
As there does not seem to be much benefit from this change of approach,
revert the change.
Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
107 lines
2.1 KiB
C
107 lines
2.1 KiB
C
#include <asm/assembler.h>
|
|
#include <asm/unwind.h>
|
|
|
|
#if __LINUX_ARM_ARCH__ >= 6
|
|
.macro bitop, name, instr
|
|
ENTRY( \name )
|
|
UNWIND( .fnstart )
|
|
ands ip, r1, #3
|
|
strneb r1, [ip] @ assert word-aligned
|
|
mov r2, #1
|
|
and r3, r0, #31 @ Get bit offset
|
|
mov r0, r0, lsr #5
|
|
add r1, r1, r0, lsl #2 @ Get word offset
|
|
#if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
|
|
.arch_extension mp
|
|
ALT_SMP(W(pldw) [r1])
|
|
ALT_UP(W(nop))
|
|
#endif
|
|
mov r3, r2, lsl r3
|
|
1: ldrex r2, [r1]
|
|
\instr r2, r2, r3
|
|
strex r0, r2, [r1]
|
|
cmp r0, #0
|
|
bne 1b
|
|
bx lr
|
|
UNWIND( .fnend )
|
|
ENDPROC(\name )
|
|
.endm
|
|
|
|
.macro testop, name, instr, store
|
|
ENTRY( \name )
|
|
UNWIND( .fnstart )
|
|
ands ip, r1, #3
|
|
strneb r1, [ip] @ assert word-aligned
|
|
mov r2, #1
|
|
and r3, r0, #31 @ Get bit offset
|
|
mov r0, r0, lsr #5
|
|
add r1, r1, r0, lsl #2 @ Get word offset
|
|
mov r3, r2, lsl r3 @ create mask
|
|
smp_dmb
|
|
#if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
|
|
.arch_extension mp
|
|
ALT_SMP(W(pldw) [r1])
|
|
ALT_UP(W(nop))
|
|
#endif
|
|
1: ldrex r2, [r1]
|
|
ands r0, r2, r3 @ save old value of bit
|
|
\instr r2, r2, r3 @ toggle bit
|
|
strex ip, r2, [r1]
|
|
cmp ip, #0
|
|
bne 1b
|
|
smp_dmb
|
|
cmp r0, #0
|
|
movne r0, #1
|
|
2: bx lr
|
|
UNWIND( .fnend )
|
|
ENDPROC(\name )
|
|
.endm
|
|
#else
|
|
.macro bitop, name, instr
|
|
ENTRY( \name )
|
|
UNWIND( .fnstart )
|
|
ands ip, r1, #3
|
|
strneb r1, [ip] @ assert word-aligned
|
|
and r2, r0, #31
|
|
mov r0, r0, lsr #5
|
|
mov r3, #1
|
|
mov r3, r3, lsl r2
|
|
save_and_disable_irqs ip
|
|
ldr r2, [r1, r0, lsl #2]
|
|
\instr r2, r2, r3
|
|
str r2, [r1, r0, lsl #2]
|
|
restore_irqs ip
|
|
ret lr
|
|
UNWIND( .fnend )
|
|
ENDPROC(\name )
|
|
.endm
|
|
|
|
/**
|
|
* testop - implement a test_and_xxx_bit operation.
|
|
* @instr: operational instruction
|
|
* @store: store instruction
|
|
*
|
|
* Note: we can trivially conditionalise the store instruction
|
|
* to avoid dirtying the data cache.
|
|
*/
|
|
.macro testop, name, instr, store
|
|
ENTRY( \name )
|
|
UNWIND( .fnstart )
|
|
ands ip, r1, #3
|
|
strneb r1, [ip] @ assert word-aligned
|
|
and r3, r0, #31
|
|
mov r0, r0, lsr #5
|
|
save_and_disable_irqs ip
|
|
ldr r2, [r1, r0, lsl #2]!
|
|
mov r0, #1
|
|
tst r2, r0, lsl r3
|
|
\instr r2, r2, r0, lsl r3
|
|
\store r2, [r1]
|
|
moveq r0, #0
|
|
restore_irqs ip
|
|
ret lr
|
|
UNWIND( .fnend )
|
|
ENDPROC(\name )
|
|
.endm
|
|
#endif
|