mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 07:05:12 +07:00
8b3c569a39
Commit 1400eb6
(MIPS: r4k,octeon,r2300: stack protector: change canary
per task) was merged in v3.11 and introduced assembly in the MIPS resume
functions to update the value of the current canary in
__stack_chk_guard. However it used PTR_L resulting in a load of the
canary value, instead of PTR_LA to construct its address. The value is
intended to be random but is then treated as an address in the
subsequent LONG_S (store).
This was observed to cause a fault and panic:
CPU 0 Unable to handle kernel paging request at virtual address 139fea20, epc == 8000cc0c, ra == 8034f2a4
Oops[#1]:
...
$24 : 139fea20 1e1f7cb6
...
Call Trace:
[<8000cc0c>] resume+0xac/0x118
[<8034f2a4>] __schedule+0x5f8/0x78c
[<8034f4e0>] schedule_preempt_disabled+0x20/0x2c
[<80348eec>] rest_init+0x74/0x84
[<804dc990>] start_kernel+0x43c/0x454
Code: 3c18804b 8f184030 8cb901f8 <af190000> 00c0e021 8cb002f0 8cb102f4 8cb202f8 8cb302fc
This can also be forced by modifying
arch/mips/include/asm/stackprotector.h so that the default
__stack_chk_guard value is more likely to be a bad (or unaligned)
pointer.
Fix it to use PTR_LA instead, to load the address of the canary value,
which the LONG_S can then use to write into it.
Reported-by: bobjones (via #mipslinux on IRC)
Signed-off-by: James Hogan <james.hogan@imgtec.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Gregory Fong <gregory.0xf0@gmail.com>
Cc: linux-mips@linux-mips.org
Cc: stable@vger.kernel.org
Patchwork: https://patchwork.linux-mips.org/patch/6026/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
168 lines
3.3 KiB
ArmAsm
168 lines
3.3 KiB
ArmAsm
/*
|
|
* r2300_switch.S: R2300 specific task switching code.
|
|
*
|
|
* Copyright (C) 1994, 1995, 1996, 1999 by Ralf Baechle
|
|
* Copyright (C) 1994, 1995, 1996 by Andreas Busse
|
|
*
|
|
* Multi-cpu abstraction and macros for easier reading:
|
|
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
|
|
*
|
|
* Further modifications to make this work:
|
|
* Copyright (c) 1998-2000 Harald Koerfgen
|
|
*/
|
|
#include <asm/asm.h>
|
|
#include <asm/cachectl.h>
|
|
#include <asm/fpregdef.h>
|
|
#include <asm/mipsregs.h>
|
|
#include <asm/asm-offsets.h>
|
|
#include <asm/regdef.h>
|
|
#include <asm/stackframe.h>
|
|
#include <asm/thread_info.h>
|
|
|
|
#include <asm/asmmacro.h>
|
|
|
|
.set mips1
|
|
.align 5
|
|
|
|
/*
|
|
* Offset to the current process status flags, the first 32 bytes of the
|
|
* stack are not used.
|
|
*/
|
|
#define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS)
|
|
|
|
/*
|
|
* FPU context is saved iff the process has used it's FPU in the current
|
|
* time slice as indicated by TIF_USEDFPU. In any case, the CU1 bit for user
|
|
* space STATUS register should be 0, so that a process *always* starts its
|
|
* userland with FPU disabled after each context switch.
|
|
*
|
|
* FPU will be enabled as soon as the process accesses FPU again, through
|
|
* do_cpu() trap.
|
|
*/
|
|
|
|
/*
|
|
* task_struct *resume(task_struct *prev, task_struct *next,
|
|
* struct thread_info *next_ti, int usedfpu)
|
|
*/
|
|
LEAF(resume)
|
|
mfc0 t1, CP0_STATUS
|
|
sw t1, THREAD_STATUS(a0)
|
|
cpu_save_nonscratch a0
|
|
sw ra, THREAD_REG31(a0)
|
|
|
|
beqz a3, 1f
|
|
|
|
PTR_L t3, TASK_THREAD_INFO(a0)
|
|
|
|
/*
|
|
* clear saved user stack CU1 bit
|
|
*/
|
|
lw t0, ST_OFF(t3)
|
|
li t1, ~ST0_CU1
|
|
and t0, t0, t1
|
|
sw t0, ST_OFF(t3)
|
|
|
|
fpu_save_single a0, t0 # clobbers t0
|
|
|
|
1:
|
|
|
|
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
|
|
PTR_LA t8, __stack_chk_guard
|
|
LONG_L t9, TASK_STACK_CANARY(a1)
|
|
LONG_S t9, 0(t8)
|
|
#endif
|
|
|
|
/*
|
|
* The order of restoring the registers takes care of the race
|
|
* updating $28, $29 and kernelsp without disabling ints.
|
|
*/
|
|
move $28, a2
|
|
cpu_restore_nonscratch a1
|
|
|
|
addiu t1, $28, _THREAD_SIZE - 32
|
|
sw t1, kernelsp
|
|
|
|
mfc0 t1, CP0_STATUS /* Do we really need this? */
|
|
li a3, 0xff01
|
|
and t1, a3
|
|
lw a2, THREAD_STATUS(a1)
|
|
nor a3, $0, a3
|
|
and a2, a3
|
|
or a2, t1
|
|
mtc0 a2, CP0_STATUS
|
|
move v0, a0
|
|
jr ra
|
|
END(resume)
|
|
|
|
/*
|
|
* Save a thread's fp context.
|
|
*/
|
|
LEAF(_save_fp)
|
|
fpu_save_single a0, t1 # clobbers t1
|
|
jr ra
|
|
END(_save_fp)
|
|
|
|
/*
|
|
* Restore a thread's fp context.
|
|
*/
|
|
LEAF(_restore_fp)
|
|
fpu_restore_single a0, t1 # clobbers t1
|
|
jr ra
|
|
END(_restore_fp)
|
|
|
|
/*
|
|
* Load the FPU with signalling NANS. This bit pattern we're using has
|
|
* the property that no matter whether considered as single or as double
|
|
* precision represents signaling NANS.
|
|
*
|
|
* We initialize fcr31 to rounding to nearest, no exceptions.
|
|
*/
|
|
|
|
#define FPU_DEFAULT 0x00000000
|
|
|
|
LEAF(_init_fpu)
|
|
mfc0 t0, CP0_STATUS
|
|
li t1, ST0_CU1
|
|
or t0, t1
|
|
mtc0 t0, CP0_STATUS
|
|
|
|
li t1, FPU_DEFAULT
|
|
ctc1 t1, fcr31
|
|
|
|
li t0, -1
|
|
|
|
mtc1 t0, $f0
|
|
mtc1 t0, $f1
|
|
mtc1 t0, $f2
|
|
mtc1 t0, $f3
|
|
mtc1 t0, $f4
|
|
mtc1 t0, $f5
|
|
mtc1 t0, $f6
|
|
mtc1 t0, $f7
|
|
mtc1 t0, $f8
|
|
mtc1 t0, $f9
|
|
mtc1 t0, $f10
|
|
mtc1 t0, $f11
|
|
mtc1 t0, $f12
|
|
mtc1 t0, $f13
|
|
mtc1 t0, $f14
|
|
mtc1 t0, $f15
|
|
mtc1 t0, $f16
|
|
mtc1 t0, $f17
|
|
mtc1 t0, $f18
|
|
mtc1 t0, $f19
|
|
mtc1 t0, $f20
|
|
mtc1 t0, $f21
|
|
mtc1 t0, $f22
|
|
mtc1 t0, $f23
|
|
mtc1 t0, $f24
|
|
mtc1 t0, $f25
|
|
mtc1 t0, $f26
|
|
mtc1 t0, $f27
|
|
mtc1 t0, $f28
|
|
mtc1 t0, $f29
|
|
mtc1 t0, $f30
|
|
mtc1 t0, $f31
|
|
jr ra
|
|
END(_init_fpu)
|