mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-18 19:47:49 +07:00
4716e488ab
Commit350779a29f
("powerpc: Handle most loads and stores in instruction emulation code", 2017-08-30) changed the register usage in get_vr and put_vr with the aim of leaving the register number in r3 untouched on return. Unfortunately, r6 was not a good choice, as the callers as of350779a29f
store a MSR value in r6. Then, in commitc22435a5f3
("powerpc: Emulate FP/vector/VSX loads/stores correctly when regs not live", 2017-08-30), the saving and restoring of the MSR got moved into get_vr and put_vr. Either way, the effect is that we put a value in MSR that only has the 0x3f8 bits non-zero, meaning that we are switching to 32-bit mode. That leads to a crash like this: Unable to handle kernel paging request for instruction fetch Faulting instruction address: 0x0007bea0 Oops: Kernel access of bad area, sig: 11 [#12] LE SMP NR_CPUS=2048 NUMA PowerNV Modules linked in: vmx_crypto binfmt_misc ip_tables x_tables autofs4 crc32c_vpmsum CPU: 6 PID: 32659 Comm: trashy_testcase Tainted: G D 4.13.0-rc2-00313-gf3026f57e6ed-dirty #23 task: c000000f1bb9e780 task.stack: c000000f1ba98000 NIP: 000000000007bea0 LR: c00000000007b054 CTR: c00000000007be70 REGS: c000000f1ba9b960 TRAP: 0400 Tainted: G D (4.13.0-rc2-00313-gf3026f57e6ed-dirty) MSR: 10000000400010a1 <HV,ME,IR,LE> CR: 48000228 XER: 00000000 CFAR: c00000000007be74 SOFTE: 1 GPR00: c00000000007b054 c000000f1ba9bbe0 c000000000e6e000 000000000000001d GPR04: c000000f1ba9bc00 c00000000007be70 00000000000000e8 9000000002009033 GPR08: 0000000002000000 100000000282f033 000000000b0a0900 0000000000001009 GPR12: 0000000000000000 c00000000fd42100 0706050303020100 a5a5a5a5a5a5a5a5 GPR16: 2e2e2e2e2e2de70c 2e2e2e2e2e2e2e2d 0000000000ff00ff 0606040202020000 GPR20: 000000000000005b ffffffffffffffff 0000000003020100 0000000000000000 GPR24: c000000f1ab90020 c000000f1ba9bc00 0000000000000001 0000000000000001 GPR28: c000000f1ba9bc90 c000000f1ba9bea0 000000000b0a0908 0000000000000001 NIP [000000000007bea0] 0x7bea0 LR [c00000000007b054] emulate_loadstore+0x1044/0x1280 Call Trace: [c000000f1ba9bbe0] [c000000000076b80] analyse_instr+0x60/0x34f0 (unreliable) [c000000f1ba9bc70] [c00000000007b7ec] emulate_step+0x23c/0x544 [c000000f1ba9bce0] [c000000000053424] arch_uprobe_skip_sstep+0x24/0x40 [c000000f1ba9bd00] [c00000000024b2f8] uprobe_notify_resume+0x598/0xba0 [c000000f1ba9be00] [c00000000001c284] do_notify_resume+0xd4/0xf0 [c000000f1ba9be30] [c00000000000bd44] ret_from_except_lite+0x70/0x74 Instruction dump: XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX ---[ end trace a7ae7a7f3e0256b5 ]--- To fix this, we just revert to using r3 as before, since the callers don't rely on r3 being left unmodified. Fortunately, this can't be triggered by a misaligned load or store, because vector loads and stores truncate misaligned addresses rather than taking an alignment interrupt. It can be triggered using uprobes. Fixes:350779a29f
("powerpc: Handle most loads and stores in instruction emulation code") Reported-by: Anton Blanchard <anton@ozlabs.org> Signed-off-by: Paul Mackerras <paulus@ozlabs.org> Tested-by: Anton Blanchard <anton@samba.org> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
245 lines
3.9 KiB
ArmAsm
245 lines
3.9 KiB
ArmAsm
/*
|
|
* Floating-point, VMX/Altivec and VSX loads and stores
|
|
* for use in instruction emulation.
|
|
*
|
|
* Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the License, or (at your option) any later version.
|
|
*/
|
|
|
|
#include <asm/processor.h>
|
|
#include <asm/ppc_asm.h>
|
|
#include <asm/ppc-opcode.h>
|
|
#include <asm/reg.h>
|
|
#include <asm/asm-offsets.h>
|
|
#include <linux/errno.h>
|
|
|
|
#ifdef CONFIG_PPC_FPU
|
|
|
|
#define STKFRM (PPC_MIN_STKFRM + 16)
|
|
|
|
/* Get the contents of frN into *p; N is in r3 and p is in r4. */
|
|
_GLOBAL(get_fpr)
|
|
mflr r0
|
|
mfmsr r6
|
|
ori r7, r6, MSR_FP
|
|
MTMSRD(r7)
|
|
isync
|
|
rlwinm r3,r3,3,0xf8
|
|
bcl 20,31,1f
|
|
reg = 0
|
|
.rept 32
|
|
stfd reg, 0(r4)
|
|
b 2f
|
|
reg = reg + 1
|
|
.endr
|
|
1: mflr r5
|
|
add r5,r3,r5
|
|
mtctr r5
|
|
mtlr r0
|
|
bctr
|
|
2: MTMSRD(r6)
|
|
isync
|
|
blr
|
|
|
|
/* Put the contents of *p into frN; N is in r3 and p is in r4. */
|
|
_GLOBAL(put_fpr)
|
|
mflr r0
|
|
mfmsr r6
|
|
ori r7, r6, MSR_FP
|
|
MTMSRD(r7)
|
|
isync
|
|
rlwinm r3,r3,3,0xf8
|
|
bcl 20,31,1f
|
|
reg = 0
|
|
.rept 32
|
|
lfd reg, 0(r4)
|
|
b 2f
|
|
reg = reg + 1
|
|
.endr
|
|
1: mflr r5
|
|
add r5,r3,r5
|
|
mtctr r5
|
|
mtlr r0
|
|
bctr
|
|
2: MTMSRD(r6)
|
|
isync
|
|
blr
|
|
|
|
#ifdef CONFIG_ALTIVEC
|
|
/* Get the contents of vrN into *p; N is in r3 and p is in r4. */
|
|
_GLOBAL(get_vr)
|
|
mflr r0
|
|
mfmsr r6
|
|
oris r7, r6, MSR_VEC@h
|
|
MTMSRD(r7)
|
|
isync
|
|
rlwinm r3,r3,3,0xf8
|
|
bcl 20,31,1f
|
|
reg = 0
|
|
.rept 32
|
|
stvx reg, 0, r4
|
|
b 2f
|
|
reg = reg + 1
|
|
.endr
|
|
1: mflr r5
|
|
add r5,r3,r5
|
|
mtctr r5
|
|
mtlr r0
|
|
bctr
|
|
2: MTMSRD(r6)
|
|
isync
|
|
blr
|
|
|
|
/* Put the contents of *p into vrN; N is in r3 and p is in r4. */
|
|
_GLOBAL(put_vr)
|
|
mflr r0
|
|
mfmsr r6
|
|
oris r7, r6, MSR_VEC@h
|
|
MTMSRD(r7)
|
|
isync
|
|
rlwinm r3,r3,3,0xf8
|
|
bcl 20,31,1f
|
|
reg = 0
|
|
.rept 32
|
|
lvx reg, 0, r4
|
|
b 2f
|
|
reg = reg + 1
|
|
.endr
|
|
1: mflr r5
|
|
add r5,r3,r5
|
|
mtctr r5
|
|
mtlr r0
|
|
bctr
|
|
2: MTMSRD(r6)
|
|
isync
|
|
blr
|
|
#endif /* CONFIG_ALTIVEC */
|
|
|
|
#ifdef CONFIG_VSX
|
|
/* Get the contents of vsN into vs0; N is in r3. */
|
|
_GLOBAL(get_vsr)
|
|
mflr r0
|
|
rlwinm r3,r3,3,0x1f8
|
|
bcl 20,31,1f
|
|
blr /* vs0 is already in vs0 */
|
|
nop
|
|
reg = 1
|
|
.rept 63
|
|
XXLOR(0,reg,reg)
|
|
blr
|
|
reg = reg + 1
|
|
.endr
|
|
1: mflr r5
|
|
add r5,r3,r5
|
|
mtctr r5
|
|
mtlr r0
|
|
bctr
|
|
|
|
/* Put the contents of vs0 into vsN; N is in r3. */
|
|
_GLOBAL(put_vsr)
|
|
mflr r0
|
|
rlwinm r3,r3,3,0x1f8
|
|
bcl 20,31,1f
|
|
blr /* v0 is already in v0 */
|
|
nop
|
|
reg = 1
|
|
.rept 63
|
|
XXLOR(reg,0,0)
|
|
blr
|
|
reg = reg + 1
|
|
.endr
|
|
1: mflr r5
|
|
add r5,r3,r5
|
|
mtctr r5
|
|
mtlr r0
|
|
bctr
|
|
|
|
/* Load VSX reg N from vector doubleword *p. N is in r3, p in r4. */
|
|
_GLOBAL(load_vsrn)
|
|
PPC_STLU r1,-STKFRM(r1)
|
|
mflr r0
|
|
PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1)
|
|
mfmsr r6
|
|
oris r7,r6,MSR_VSX@h
|
|
cmpwi cr7,r3,0
|
|
li r8,STKFRM-16
|
|
MTMSRD(r7)
|
|
isync
|
|
beq cr7,1f
|
|
STXVD2X(0,R1,R8)
|
|
1: LXVD2X(0,R0,R4)
|
|
#ifdef __LITTLE_ENDIAN__
|
|
XXSWAPD(0,0)
|
|
#endif
|
|
beq cr7,4f
|
|
bl put_vsr
|
|
LXVD2X(0,R1,R8)
|
|
4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
|
|
mtlr r0
|
|
MTMSRD(r6)
|
|
isync
|
|
addi r1,r1,STKFRM
|
|
blr
|
|
|
|
/* Store VSX reg N to vector doubleword *p. N is in r3, p in r4. */
|
|
_GLOBAL(store_vsrn)
|
|
PPC_STLU r1,-STKFRM(r1)
|
|
mflr r0
|
|
PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1)
|
|
mfmsr r6
|
|
oris r7,r6,MSR_VSX@h
|
|
li r8,STKFRM-16
|
|
MTMSRD(r7)
|
|
isync
|
|
STXVD2X(0,R1,R8)
|
|
bl get_vsr
|
|
#ifdef __LITTLE_ENDIAN__
|
|
XXSWAPD(0,0)
|
|
#endif
|
|
STXVD2X(0,R0,R4)
|
|
LXVD2X(0,R1,R8)
|
|
PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
|
|
mtlr r0
|
|
MTMSRD(r6)
|
|
isync
|
|
mr r3,r9
|
|
addi r1,r1,STKFRM
|
|
blr
|
|
#endif /* CONFIG_VSX */
|
|
|
|
/* Convert single-precision to double, without disturbing FPRs. */
|
|
/* conv_sp_to_dp(float *sp, double *dp) */
|
|
_GLOBAL(conv_sp_to_dp)
|
|
mfmsr r6
|
|
ori r7, r6, MSR_FP
|
|
MTMSRD(r7)
|
|
isync
|
|
stfd fr0, -16(r1)
|
|
lfs fr0, 0(r3)
|
|
stfd fr0, 0(r4)
|
|
lfd fr0, -16(r1)
|
|
MTMSRD(r6)
|
|
isync
|
|
blr
|
|
|
|
/* Convert single-precision to double, without disturbing FPRs. */
|
|
/* conv_sp_to_dp(double *dp, float *sp) */
|
|
_GLOBAL(conv_dp_to_sp)
|
|
mfmsr r6
|
|
ori r7, r6, MSR_FP
|
|
MTMSRD(r7)
|
|
isync
|
|
stfd fr0, -16(r1)
|
|
lfd fr0, 0(r3)
|
|
stfs fr0, 0(r4)
|
|
lfd fr0, -16(r1)
|
|
MTMSRD(r6)
|
|
isync
|
|
blr
|
|
|
|
#endif /* CONFIG_PPC_FPU */
|