2019-04-30 19:38:50 +07:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
#ifndef __HEAD_32_H__
|
|
|
|
#define __HEAD_32_H__
|
|
|
|
|
|
|
|
#include <asm/ptrace.h> /* for STACK_FRAME_REGS_MARKER */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Exception entry code. This code runs with address translation
|
|
|
|
* turned off, i.e. using physical addresses.
|
|
|
|
* We assume sprg3 has the physical address of the current
|
|
|
|
* task's thread_struct.
|
|
|
|
*/
|
2019-12-21 15:32:27 +07:00
|
|
|
.macro EXCEPTION_PROLOG handle_dar_dsisr=0
|
|
|
|
EXCEPTION_PROLOG_0 handle_dar_dsisr=\handle_dar_dsisr
|
2019-12-21 15:32:23 +07:00
|
|
|
EXCEPTION_PROLOG_1
|
2019-12-21 15:32:27 +07:00
|
|
|
EXCEPTION_PROLOG_2 handle_dar_dsisr=\handle_dar_dsisr
|
2019-12-21 15:32:23 +07:00
|
|
|
.endm
|
|
|
|
|
2019-12-21 15:32:27 +07:00
|
|
|
.macro EXCEPTION_PROLOG_0 handle_dar_dsisr=0
|
2019-04-30 19:38:50 +07:00
|
|
|
mtspr SPRN_SPRG_SCRATCH0,r10
|
|
|
|
mtspr SPRN_SPRG_SCRATCH1,r11
|
2019-12-21 15:32:27 +07:00
|
|
|
#ifdef CONFIG_VMAP_STACK
|
|
|
|
mfspr r10, SPRN_SPRG_THREAD
|
|
|
|
.if \handle_dar_dsisr
|
|
|
|
mfspr r11, SPRN_DAR
|
|
|
|
stw r11, DAR(r10)
|
|
|
|
mfspr r11, SPRN_DSISR
|
|
|
|
stw r11, DSISR(r10)
|
|
|
|
.endif
|
|
|
|
mfspr r11, SPRN_SRR0
|
|
|
|
stw r11, SRR0(r10)
|
|
|
|
#endif
|
2019-12-21 15:32:25 +07:00
|
|
|
mfspr r11, SPRN_SRR1 /* check whether user or kernel */
|
2019-12-21 15:32:27 +07:00
|
|
|
#ifdef CONFIG_VMAP_STACK
|
|
|
|
stw r11, SRR1(r10)
|
|
|
|
#endif
|
2019-04-30 19:38:50 +07:00
|
|
|
mfcr r10
|
2019-12-21 15:32:25 +07:00
|
|
|
andi. r11, r11, MSR_PR
|
2019-04-30 19:38:50 +07:00
|
|
|
.endm
|
|
|
|
|
2019-12-21 15:32:38 +07:00
|
|
|
.macro EXCEPTION_PROLOG_1 for_rtas=0
|
2020-09-07 20:42:10 +07:00
|
|
|
#ifdef CONFIG_VMAP_STACK
|
|
|
|
mr r11, r1
|
|
|
|
subi r1, r1, INT_FRAME_SIZE /* use r1 if kernel */
|
|
|
|
beq 1f
|
|
|
|
mfspr r1,SPRN_SPRG_THREAD
|
|
|
|
lwz r1,TASK_STACK-THREAD(r1)
|
|
|
|
addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE
|
|
|
|
#else
|
2019-12-21 15:32:27 +07:00
|
|
|
subi r11, r1, INT_FRAME_SIZE /* use r1 if kernel */
|
2019-04-30 19:38:50 +07:00
|
|
|
beq 1f
|
|
|
|
mfspr r11,SPRN_SPRG_THREAD
|
|
|
|
lwz r11,TASK_STACK-THREAD(r11)
|
2019-12-21 15:32:27 +07:00
|
|
|
addi r11, r11, THREAD_SIZE - INT_FRAME_SIZE
|
2020-09-07 20:42:10 +07:00
|
|
|
#endif
|
2019-12-21 15:32:27 +07:00
|
|
|
1:
|
2020-09-07 20:42:09 +07:00
|
|
|
tophys_novmstack r11, r11
|
powerpc/32: Add early stack overflow detection with VMAP stack.
To avoid recursive faults, stack overflow detection has to be
performed before writing in the stack in exception prologs.
Do it by checking the alignment. If the stack pointer alignment is
wrong, it means it is pointing to the following or preceding page.
Without VMAP stack, a stack overflow is catastrophic. With VMAP
stack, a stack overflow isn't destructive, so don't panic. Kill
the task with SIGSEGV instead.
A dedicated overflow stack is set up for each CPU.
lkdtm: Performing direct entry EXHAUST_STACK
lkdtm: Calling function with 512 frame size to depth 32 ...
lkdtm: loop 32/32 ...
lkdtm: loop 31/32 ...
lkdtm: loop 30/32 ...
lkdtm: loop 29/32 ...
lkdtm: loop 28/32 ...
lkdtm: loop 27/32 ...
lkdtm: loop 26/32 ...
lkdtm: loop 25/32 ...
lkdtm: loop 24/32 ...
lkdtm: loop 23/32 ...
lkdtm: loop 22/32 ...
lkdtm: loop 21/32 ...
lkdtm: loop 20/32 ...
Kernel stack overflow in process test[359], r1=c900c008
Oops: Kernel stack overflow, sig: 6 [#1]
BE PAGE_SIZE=4K MMU=Hash PowerMac
Modules linked in:
CPU: 0 PID: 359 Comm: test Not tainted 5.3.0-rc7+ #2225
NIP: c0622060 LR: c0626710 CTR: 00000000
REGS: c0895f48 TRAP: 0000 Not tainted (5.3.0-rc7+)
MSR: 00001032 <ME,IR,DR,RI> CR: 28004224 XER: 00000000
GPR00: c0626ca4 c900c008 c783c000 c07335cc c900c010 c07335cc c900c0f0 c07335cc
GPR08: c900c0f0 00000001 00000000 00000000 28008222 00000000 00000000 00000000
GPR16: 00000000 00000000 10010128 10010000 b799c245 10010158 c07335cc 00000025
GPR24: c0690000 c08b91d4 c068f688 00000020 c900c0f0 c068f668 c08b95b4 c08b91d4
NIP [c0622060] format_decode+0x0/0x4d4
LR [c0626710] vsnprintf+0x80/0x5fc
Call Trace:
[c900c068] [c0626ca4] vscnprintf+0x18/0x48
[c900c078] [c007b944] vprintk_store+0x40/0x214
[c900c0b8] [c007bf50] vprintk_emit+0x90/0x1dc
[c900c0e8] [c007c5cc] printk+0x50/0x60
[c900c128] [c03da5b0] recursive_loop+0x44/0x6c
[c900c338] [c03da5c4] recursive_loop+0x58/0x6c
[c900c548] [c03da5c4] recursive_loop+0x58/0x6c
[c900c758] [c03da5c4] recursive_loop+0x58/0x6c
[c900c968] [c03da5c4] recursive_loop+0x58/0x6c
[c900cb78] [c03da5c4] recursive_loop+0x58/0x6c
[c900cd88] [c03da5c4] recursive_loop+0x58/0x6c
[c900cf98] [c03da5c4] recursive_loop+0x58/0x6c
[c900d1a8] [c03da5c4] recursive_loop+0x58/0x6c
[c900d3b8] [c03da5c4] recursive_loop+0x58/0x6c
[c900d5c8] [c03da5c4] recursive_loop+0x58/0x6c
[c900d7d8] [c03da5c4] recursive_loop+0x58/0x6c
[c900d9e8] [c03da5c4] recursive_loop+0x58/0x6c
[c900dbf8] [c03da5c4] recursive_loop+0x58/0x6c
[c900de08] [c03da67c] lkdtm_EXHAUST_STACK+0x30/0x4c
[c900de18] [c03da3e8] direct_entry+0xc8/0x140
[c900de48] [c029fb40] full_proxy_write+0x64/0xcc
[c900de68] [c01500f8] __vfs_write+0x30/0x1d0
[c900dee8] [c0152cb8] vfs_write+0xb8/0x1d4
[c900df08] [c0152f7c] ksys_write+0x58/0xe8
[c900df38] [c0014208] ret_from_syscall+0x0/0x34
--- interrupt: c01 at 0xf806664
LR = 0x1000c868
Instruction dump:
4bffff91 80010014 7c832378 7c0803a6 38210010 4e800020 3d20c08a 3ca0c089
8089a0cc 38a58f0c 38600001 4ba2d494 <9421ffe0> 7c0802a6 bfc10018 7c9f2378
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/1b89c121b4070c7ee99e4f22cc178f15a736b07b.1576916812.git.christophe.leroy@c-s.fr
2019-12-21 15:32:29 +07:00
|
|
|
#ifdef CONFIG_VMAP_STACK
|
2020-09-07 20:42:10 +07:00
|
|
|
mtcrf 0x7f, r1
|
powerpc/32: Add early stack overflow detection with VMAP stack.
To avoid recursive faults, stack overflow detection has to be
performed before writing in the stack in exception prologs.
Do it by checking the alignment. If the stack pointer alignment is
wrong, it means it is pointing to the following or preceding page.
Without VMAP stack, a stack overflow is catastrophic. With VMAP
stack, a stack overflow isn't destructive, so don't panic. Kill
the task with SIGSEGV instead.
A dedicated overflow stack is set up for each CPU.
lkdtm: Performing direct entry EXHAUST_STACK
lkdtm: Calling function with 512 frame size to depth 32 ...
lkdtm: loop 32/32 ...
lkdtm: loop 31/32 ...
lkdtm: loop 30/32 ...
lkdtm: loop 29/32 ...
lkdtm: loop 28/32 ...
lkdtm: loop 27/32 ...
lkdtm: loop 26/32 ...
lkdtm: loop 25/32 ...
lkdtm: loop 24/32 ...
lkdtm: loop 23/32 ...
lkdtm: loop 22/32 ...
lkdtm: loop 21/32 ...
lkdtm: loop 20/32 ...
Kernel stack overflow in process test[359], r1=c900c008
Oops: Kernel stack overflow, sig: 6 [#1]
BE PAGE_SIZE=4K MMU=Hash PowerMac
Modules linked in:
CPU: 0 PID: 359 Comm: test Not tainted 5.3.0-rc7+ #2225
NIP: c0622060 LR: c0626710 CTR: 00000000
REGS: c0895f48 TRAP: 0000 Not tainted (5.3.0-rc7+)
MSR: 00001032 <ME,IR,DR,RI> CR: 28004224 XER: 00000000
GPR00: c0626ca4 c900c008 c783c000 c07335cc c900c010 c07335cc c900c0f0 c07335cc
GPR08: c900c0f0 00000001 00000000 00000000 28008222 00000000 00000000 00000000
GPR16: 00000000 00000000 10010128 10010000 b799c245 10010158 c07335cc 00000025
GPR24: c0690000 c08b91d4 c068f688 00000020 c900c0f0 c068f668 c08b95b4 c08b91d4
NIP [c0622060] format_decode+0x0/0x4d4
LR [c0626710] vsnprintf+0x80/0x5fc
Call Trace:
[c900c068] [c0626ca4] vscnprintf+0x18/0x48
[c900c078] [c007b944] vprintk_store+0x40/0x214
[c900c0b8] [c007bf50] vprintk_emit+0x90/0x1dc
[c900c0e8] [c007c5cc] printk+0x50/0x60
[c900c128] [c03da5b0] recursive_loop+0x44/0x6c
[c900c338] [c03da5c4] recursive_loop+0x58/0x6c
[c900c548] [c03da5c4] recursive_loop+0x58/0x6c
[c900c758] [c03da5c4] recursive_loop+0x58/0x6c
[c900c968] [c03da5c4] recursive_loop+0x58/0x6c
[c900cb78] [c03da5c4] recursive_loop+0x58/0x6c
[c900cd88] [c03da5c4] recursive_loop+0x58/0x6c
[c900cf98] [c03da5c4] recursive_loop+0x58/0x6c
[c900d1a8] [c03da5c4] recursive_loop+0x58/0x6c
[c900d3b8] [c03da5c4] recursive_loop+0x58/0x6c
[c900d5c8] [c03da5c4] recursive_loop+0x58/0x6c
[c900d7d8] [c03da5c4] recursive_loop+0x58/0x6c
[c900d9e8] [c03da5c4] recursive_loop+0x58/0x6c
[c900dbf8] [c03da5c4] recursive_loop+0x58/0x6c
[c900de08] [c03da67c] lkdtm_EXHAUST_STACK+0x30/0x4c
[c900de18] [c03da3e8] direct_entry+0xc8/0x140
[c900de48] [c029fb40] full_proxy_write+0x64/0xcc
[c900de68] [c01500f8] __vfs_write+0x30/0x1d0
[c900dee8] [c0152cb8] vfs_write+0xb8/0x1d4
[c900df08] [c0152f7c] ksys_write+0x58/0xe8
[c900df38] [c0014208] ret_from_syscall+0x0/0x34
--- interrupt: c01 at 0xf806664
LR = 0x1000c868
Instruction dump:
4bffff91 80010014 7c832378 7c0803a6 38210010 4e800020 3d20c08a 3ca0c089
8089a0cc 38a58f0c 38600001 4ba2d494 <9421ffe0> 7c0802a6 bfc10018 7c9f2378
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/1b89c121b4070c7ee99e4f22cc178f15a736b07b.1576916812.git.christophe.leroy@c-s.fr
2019-12-21 15:32:29 +07:00
|
|
|
bt 32 - THREAD_ALIGN_SHIFT, stack_overflow
|
|
|
|
#endif
|
2019-04-30 19:38:50 +07:00
|
|
|
.endm
|
|
|
|
|
2019-12-21 15:32:27 +07:00
|
|
|
.macro EXCEPTION_PROLOG_2 handle_dar_dsisr=0
|
2020-09-07 20:42:09 +07:00
|
|
|
#ifdef CONFIG_VMAP_STACK
|
powerpc/32s: Fix DSI and ISI exceptions for CONFIG_VMAP_STACK
hash_page() needs to read page tables from kernel memory. When entire
kernel memory is mapped by BATs, which is normally the case when
CONFIG_STRICT_KERNEL_RWX is not set, it works even if the page hosting
the page table is not referenced in the MMU hash table.
However, if the page where the page table resides is not covered by
a BAT, a DSI fault can be encountered from hash_page(), and it loops
forever. This can happen when CONFIG_STRICT_KERNEL_RWX is selected
and the alignment of the different regions is too small to allow
covering the entire memory with BATs. This also happens when
CONFIG_DEBUG_PAGEALLOC is selected or when booting with 'nobats'
flag.
Also, if the page containing the kernel stack is not present in the
MMU hash table, registers cannot be saved and a recursive DSI fault
is encountered.
To allow hash_page() to properly do its job at all time and load the
MMU hash table whenever needed, it must run with data MMU disabled.
This means it must be called before re-enabling data MMU. To allow
this, registers clobbered by hash_page() and create_hpte() have to
be saved in the thread struct together with SRR0, SSR1, DAR and DSISR.
It is also necessary to ensure that DSI prolog doesn't overwrite
regs saved by prolog of the current running exception. That means:
- DSI can only use SPRN_SPRG_SCRATCH0
- Exceptions must free SPRN_SPRG_SCRATCH0 before writing to the stack.
This also fixes the Oops reported by Erhard when create_hpte() is
called by add_hash_page().
Due to prolog size increase, a few more exceptions had to get split
in two parts.
Fixes: cd08f109e262 ("powerpc/32s: Enable CONFIG_VMAP_STACK")
Reported-by: Erhard F. <erhard_f@mailbox.org>
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Tested-by: Erhard F. <erhard_f@mailbox.org>
Tested-by: Larry Finger <Larry.Finger@lwfinger.net>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://bugzilla.kernel.org/show_bug.cgi?id=206501
Link: https://lore.kernel.org/r/64a4aa44686e9fd4b01333401367029771d9b231.1581761633.git.christophe.leroy@c-s.fr
2020-02-15 17:14:25 +07:00
|
|
|
mtcr r10
|
2020-09-07 20:42:09 +07:00
|
|
|
li r10, MSR_KERNEL & ~(MSR_IR | MSR_RI) /* can take DTLB miss */
|
|
|
|
mtmsr r10
|
|
|
|
isync
|
powerpc/32s: Fix DSI and ISI exceptions for CONFIG_VMAP_STACK
hash_page() needs to read page tables from kernel memory. When entire
kernel memory is mapped by BATs, which is normally the case when
CONFIG_STRICT_KERNEL_RWX is not set, it works even if the page hosting
the page table is not referenced in the MMU hash table.
However, if the page where the page table resides is not covered by
a BAT, a DSI fault can be encountered from hash_page(), and it loops
forever. This can happen when CONFIG_STRICT_KERNEL_RWX is selected
and the alignment of the different regions is too small to allow
covering the entire memory with BATs. This also happens when
CONFIG_DEBUG_PAGEALLOC is selected or when booting with 'nobats'
flag.
Also, if the page containing the kernel stack is not present in the
MMU hash table, registers cannot be saved and a recursive DSI fault
is encountered.
To allow hash_page() to properly do its job at all time and load the
MMU hash table whenever needed, it must run with data MMU disabled.
This means it must be called before re-enabling data MMU. To allow
this, registers clobbered by hash_page() and create_hpte() have to
be saved in the thread struct together with SRR0, SSR1, DAR and DSISR.
It is also necessary to ensure that DSI prolog doesn't overwrite
regs saved by prolog of the current running exception. That means:
- DSI can only use SPRN_SPRG_SCRATCH0
- Exceptions must free SPRN_SPRG_SCRATCH0 before writing to the stack.
This also fixes the Oops reported by Erhard when create_hpte() is
called by add_hash_page().
Due to prolog size increase, a few more exceptions had to get split
in two parts.
Fixes: cd08f109e262 ("powerpc/32s: Enable CONFIG_VMAP_STACK")
Reported-by: Erhard F. <erhard_f@mailbox.org>
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Tested-by: Erhard F. <erhard_f@mailbox.org>
Tested-by: Larry Finger <Larry.Finger@lwfinger.net>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://bugzilla.kernel.org/show_bug.cgi?id=206501
Link: https://lore.kernel.org/r/64a4aa44686e9fd4b01333401367029771d9b231.1581761633.git.christophe.leroy@c-s.fr
2020-02-15 17:14:25 +07:00
|
|
|
#else
|
2019-04-30 19:38:50 +07:00
|
|
|
stw r10,_CCR(r11) /* save registers */
|
powerpc/32s: Fix DSI and ISI exceptions for CONFIG_VMAP_STACK
hash_page() needs to read page tables from kernel memory. When entire
kernel memory is mapped by BATs, which is normally the case when
CONFIG_STRICT_KERNEL_RWX is not set, it works even if the page hosting
the page table is not referenced in the MMU hash table.
However, if the page where the page table resides is not covered by
a BAT, a DSI fault can be encountered from hash_page(), and it loops
forever. This can happen when CONFIG_STRICT_KERNEL_RWX is selected
and the alignment of the different regions is too small to allow
covering the entire memory with BATs. This also happens when
CONFIG_DEBUG_PAGEALLOC is selected or when booting with 'nobats'
flag.
Also, if the page containing the kernel stack is not present in the
MMU hash table, registers cannot be saved and a recursive DSI fault
is encountered.
To allow hash_page() to properly do its job at all time and load the
MMU hash table whenever needed, it must run with data MMU disabled.
This means it must be called before re-enabling data MMU. To allow
this, registers clobbered by hash_page() and create_hpte() have to
be saved in the thread struct together with SRR0, SSR1, DAR and DSISR.
It is also necessary to ensure that DSI prolog doesn't overwrite
regs saved by prolog of the current running exception. That means:
- DSI can only use SPRN_SPRG_SCRATCH0
- Exceptions must free SPRN_SPRG_SCRATCH0 before writing to the stack.
This also fixes the Oops reported by Erhard when create_hpte() is
called by add_hash_page().
Due to prolog size increase, a few more exceptions had to get split
in two parts.
Fixes: cd08f109e262 ("powerpc/32s: Enable CONFIG_VMAP_STACK")
Reported-by: Erhard F. <erhard_f@mailbox.org>
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Tested-by: Erhard F. <erhard_f@mailbox.org>
Tested-by: Larry Finger <Larry.Finger@lwfinger.net>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://bugzilla.kernel.org/show_bug.cgi?id=206501
Link: https://lore.kernel.org/r/64a4aa44686e9fd4b01333401367029771d9b231.1581761633.git.christophe.leroy@c-s.fr
2020-02-15 17:14:25 +07:00
|
|
|
#endif
|
|
|
|
mfspr r10, SPRN_SPRG_SCRATCH0
|
2020-09-07 20:42:10 +07:00
|
|
|
#ifdef CONFIG_VMAP_STACK
|
|
|
|
stw r11,GPR1(r1)
|
|
|
|
stw r11,0(r1)
|
|
|
|
mr r11, r1
|
|
|
|
#else
|
|
|
|
stw r1,GPR1(r11)
|
|
|
|
stw r1,0(r11)
|
|
|
|
tovirt(r1, r11) /* set new kernel sp */
|
|
|
|
#endif
|
2019-04-30 19:38:50 +07:00
|
|
|
stw r12,GPR12(r11)
|
|
|
|
stw r9,GPR9(r11)
|
|
|
|
stw r10,GPR10(r11)
|
2020-09-07 20:42:09 +07:00
|
|
|
#ifdef CONFIG_VMAP_STACK
|
powerpc/32s: Fix DSI and ISI exceptions for CONFIG_VMAP_STACK
hash_page() needs to read page tables from kernel memory. When entire
kernel memory is mapped by BATs, which is normally the case when
CONFIG_STRICT_KERNEL_RWX is not set, it works even if the page hosting
the page table is not referenced in the MMU hash table.
However, if the page where the page table resides is not covered by
a BAT, a DSI fault can be encountered from hash_page(), and it loops
forever. This can happen when CONFIG_STRICT_KERNEL_RWX is selected
and the alignment of the different regions is too small to allow
covering the entire memory with BATs. This also happens when
CONFIG_DEBUG_PAGEALLOC is selected or when booting with 'nobats'
flag.
Also, if the page containing the kernel stack is not present in the
MMU hash table, registers cannot be saved and a recursive DSI fault
is encountered.
To allow hash_page() to properly do its job at all time and load the
MMU hash table whenever needed, it must run with data MMU disabled.
This means it must be called before re-enabling data MMU. To allow
this, registers clobbered by hash_page() and create_hpte() have to
be saved in the thread struct together with SRR0, SSR1, DAR and DSISR.
It is also necessary to ensure that DSI prolog doesn't overwrite
regs saved by prolog of the current running exception. That means:
- DSI can only use SPRN_SPRG_SCRATCH0
- Exceptions must free SPRN_SPRG_SCRATCH0 before writing to the stack.
This also fixes the Oops reported by Erhard when create_hpte() is
called by add_hash_page().
Due to prolog size increase, a few more exceptions had to get split
in two parts.
Fixes: cd08f109e262 ("powerpc/32s: Enable CONFIG_VMAP_STACK")
Reported-by: Erhard F. <erhard_f@mailbox.org>
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Tested-by: Erhard F. <erhard_f@mailbox.org>
Tested-by: Larry Finger <Larry.Finger@lwfinger.net>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://bugzilla.kernel.org/show_bug.cgi?id=206501
Link: https://lore.kernel.org/r/64a4aa44686e9fd4b01333401367029771d9b231.1581761633.git.christophe.leroy@c-s.fr
2020-02-15 17:14:25 +07:00
|
|
|
mfcr r10
|
|
|
|
stw r10, _CCR(r11)
|
|
|
|
#endif
|
2019-04-30 19:38:50 +07:00
|
|
|
mfspr r12,SPRN_SPRG_SCRATCH1
|
|
|
|
stw r12,GPR11(r11)
|
|
|
|
mflr r10
|
|
|
|
stw r10,_LINK(r11)
|
2019-12-21 15:32:27 +07:00
|
|
|
#ifdef CONFIG_VMAP_STACK
|
|
|
|
mfspr r12, SPRN_SPRG_THREAD
|
|
|
|
tovirt(r12, r12)
|
|
|
|
.if \handle_dar_dsisr
|
|
|
|
lwz r10, DAR(r12)
|
|
|
|
stw r10, _DAR(r11)
|
|
|
|
lwz r10, DSISR(r12)
|
|
|
|
stw r10, _DSISR(r11)
|
|
|
|
.endif
|
|
|
|
lwz r9, SRR1(r12)
|
powerpc/32s: Fix DSI and ISI exceptions for CONFIG_VMAP_STACK
hash_page() needs to read page tables from kernel memory. When entire
kernel memory is mapped by BATs, which is normally the case when
CONFIG_STRICT_KERNEL_RWX is not set, it works even if the page hosting
the page table is not referenced in the MMU hash table.
However, if the page where the page table resides is not covered by
a BAT, a DSI fault can be encountered from hash_page(), and it loops
forever. This can happen when CONFIG_STRICT_KERNEL_RWX is selected
and the alignment of the different regions is too small to allow
covering the entire memory with BATs. This also happens when
CONFIG_DEBUG_PAGEALLOC is selected or when booting with 'nobats'
flag.
Also, if the page containing the kernel stack is not present in the
MMU hash table, registers cannot be saved and a recursive DSI fault
is encountered.
To allow hash_page() to properly do its job at all time and load the
MMU hash table whenever needed, it must run with data MMU disabled.
This means it must be called before re-enabling data MMU. To allow
this, registers clobbered by hash_page() and create_hpte() have to
be saved in the thread struct together with SRR0, SSR1, DAR and DSISR.
It is also necessary to ensure that DSI prolog doesn't overwrite
regs saved by prolog of the current running exception. That means:
- DSI can only use SPRN_SPRG_SCRATCH0
- Exceptions must free SPRN_SPRG_SCRATCH0 before writing to the stack.
This also fixes the Oops reported by Erhard when create_hpte() is
called by add_hash_page().
Due to prolog size increase, a few more exceptions had to get split
in two parts.
Fixes: cd08f109e262 ("powerpc/32s: Enable CONFIG_VMAP_STACK")
Reported-by: Erhard F. <erhard_f@mailbox.org>
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Tested-by: Erhard F. <erhard_f@mailbox.org>
Tested-by: Larry Finger <Larry.Finger@lwfinger.net>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://bugzilla.kernel.org/show_bug.cgi?id=206501
Link: https://lore.kernel.org/r/64a4aa44686e9fd4b01333401367029771d9b231.1581761633.git.christophe.leroy@c-s.fr
2020-02-15 17:14:25 +07:00
|
|
|
andi. r10, r9, MSR_PR
|
2019-12-21 15:32:27 +07:00
|
|
|
lwz r12, SRR0(r12)
|
|
|
|
#else
|
2019-04-30 19:38:50 +07:00
|
|
|
mfspr r12,SPRN_SRR0
|
|
|
|
mfspr r9,SPRN_SRR1
|
2019-12-21 15:32:27 +07:00
|
|
|
#endif
|
2019-04-30 19:38:56 +07:00
|
|
|
#ifdef CONFIG_40x
|
|
|
|
rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */
|
2019-12-21 15:32:27 +07:00
|
|
|
#else
|
|
|
|
#ifdef CONFIG_VMAP_STACK
|
|
|
|
li r10, MSR_KERNEL & ~MSR_IR /* can take exceptions */
|
2019-04-30 19:38:56 +07:00
|
|
|
#else
|
2019-04-30 19:38:50 +07:00
|
|
|
li r10,MSR_KERNEL & ~(MSR_IR|MSR_DR) /* can take exceptions */
|
2019-12-21 15:32:27 +07:00
|
|
|
#endif
|
2019-12-21 15:32:22 +07:00
|
|
|
mtmsr r10 /* (except for mach check in rtas) */
|
2019-04-30 19:38:56 +07:00
|
|
|
#endif
|
2019-04-30 19:38:50 +07:00
|
|
|
stw r0,GPR0(r11)
|
|
|
|
lis r10,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
|
|
|
|
addi r10,r10,STACK_FRAME_REGS_MARKER@l
|
|
|
|
stw r10,8(r11)
|
|
|
|
SAVE_4GPRS(3, r11)
|
|
|
|
SAVE_2GPRS(7, r11)
|
|
|
|
.endm
|
|
|
|
|
2019-04-30 19:39:02 +07:00
|
|
|
.macro SYSCALL_ENTRY trapno
|
|
|
|
mfspr r12,SPRN_SPRG_THREAD
|
2020-01-31 18:34:54 +07:00
|
|
|
mfspr r9, SPRN_SRR1
|
2019-12-21 15:32:27 +07:00
|
|
|
#ifdef CONFIG_VMAP_STACK
|
2020-01-31 18:34:54 +07:00
|
|
|
mfspr r11, SPRN_SRR0
|
2020-01-31 18:34:55 +07:00
|
|
|
mtctr r11
|
2019-12-21 15:32:27 +07:00
|
|
|
#endif
|
2020-01-31 18:34:54 +07:00
|
|
|
andi. r11, r9, MSR_PR
|
2019-04-30 19:39:02 +07:00
|
|
|
lwz r11,TASK_STACK-THREAD(r12)
|
2020-01-31 18:34:54 +07:00
|
|
|
beq- 99f
|
2019-12-21 15:32:27 +07:00
|
|
|
addi r11, r11, THREAD_SIZE - INT_FRAME_SIZE
|
|
|
|
#ifdef CONFIG_VMAP_STACK
|
2020-01-31 18:34:55 +07:00
|
|
|
li r10, MSR_KERNEL & ~(MSR_IR | MSR_RI) /* can take DTLB miss */
|
|
|
|
mtmsr r10
|
2019-12-21 15:32:27 +07:00
|
|
|
isync
|
|
|
|
#endif
|
|
|
|
tovirt_vmstack r12, r12
|
|
|
|
tophys_novmstack r11, r11
|
2020-01-31 18:34:54 +07:00
|
|
|
mflr r10
|
|
|
|
stw r10, _LINK(r11)
|
2019-12-21 15:32:27 +07:00
|
|
|
#ifdef CONFIG_VMAP_STACK
|
2020-01-31 18:34:55 +07:00
|
|
|
mfctr r10
|
2019-12-21 15:32:27 +07:00
|
|
|
#else
|
2019-04-30 19:39:02 +07:00
|
|
|
mfspr r10,SPRN_SRR0
|
2019-12-21 15:32:27 +07:00
|
|
|
#endif
|
2019-04-30 19:39:02 +07:00
|
|
|
stw r1,GPR1(r11)
|
|
|
|
stw r1,0(r11)
|
2019-12-21 15:32:27 +07:00
|
|
|
tovirt_novmstack r1, r11 /* set new kernel sp */
|
2019-04-30 19:39:02 +07:00
|
|
|
stw r10,_NIP(r11)
|
2020-01-31 18:34:55 +07:00
|
|
|
mfcr r10
|
|
|
|
rlwinm r10,r10,0,4,2 /* Clear SO bit in CR */
|
|
|
|
stw r10,_CCR(r11) /* save registers */
|
2019-04-30 19:39:02 +07:00
|
|
|
#ifdef CONFIG_40x
|
|
|
|
rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */
|
2019-12-21 15:32:27 +07:00
|
|
|
#else
|
|
|
|
#ifdef CONFIG_VMAP_STACK
|
|
|
|
LOAD_REG_IMMEDIATE(r10, MSR_KERNEL & ~MSR_IR) /* can take exceptions */
|
2019-04-30 19:39:02 +07:00
|
|
|
#else
|
2019-08-20 21:34:13 +07:00
|
|
|
LOAD_REG_IMMEDIATE(r10, MSR_KERNEL & ~(MSR_IR|MSR_DR)) /* can take exceptions */
|
2019-12-21 15:32:27 +07:00
|
|
|
#endif
|
2019-12-21 15:32:22 +07:00
|
|
|
mtmsr r10 /* (except for mach check in rtas) */
|
2019-04-30 19:39:02 +07:00
|
|
|
#endif
|
|
|
|
lis r10,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
|
|
|
|
stw r2,GPR2(r11)
|
|
|
|
addi r10,r10,STACK_FRAME_REGS_MARKER@l
|
|
|
|
stw r9,_MSR(r11)
|
|
|
|
li r2, \trapno + 1
|
|
|
|
stw r10,8(r11)
|
|
|
|
stw r2,_TRAP(r11)
|
|
|
|
SAVE_GPR(0, r11)
|
|
|
|
SAVE_4GPRS(3, r11)
|
|
|
|
SAVE_2GPRS(7, r11)
|
|
|
|
addi r11,r1,STACK_FRAME_OVERHEAD
|
|
|
|
addi r2,r12,-THREAD
|
|
|
|
stw r11,PT_REGS(r12)
|
|
|
|
#if defined(CONFIG_40x)
|
|
|
|
/* Check to see if the dbcr0 register is set up to debug. Use the
|
|
|
|
internal debug mode bit to do this. */
|
|
|
|
lwz r12,THREAD_DBCR0(r12)
|
|
|
|
andis. r12,r12,DBCR0_IDM@h
|
|
|
|
#endif
|
|
|
|
ACCOUNT_CPU_USER_ENTRY(r2, r11, r12)
|
|
|
|
#if defined(CONFIG_40x)
|
|
|
|
beq+ 3f
|
|
|
|
/* From user and task is ptraced - load up global dbcr0 */
|
|
|
|
li r12,-1 /* clear all pending debug events */
|
|
|
|
mtspr SPRN_DBSR,r12
|
|
|
|
lis r11,global_dbcr0@ha
|
|
|
|
tophys(r11,r11)
|
|
|
|
addi r11,r11,global_dbcr0@l
|
|
|
|
lwz r12,0(r11)
|
|
|
|
mtspr SPRN_DBCR0,r12
|
|
|
|
lwz r12,4(r11)
|
|
|
|
addi r12,r12,-1
|
|
|
|
stw r12,4(r11)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
3:
|
2019-12-21 15:32:27 +07:00
|
|
|
tovirt_novmstack r2, r2 /* set r2 to current */
|
2019-04-30 19:39:02 +07:00
|
|
|
lis r11, transfer_to_syscall@h
|
|
|
|
ori r11, r11, transfer_to_syscall@l
|
|
|
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
|
|
|
/*
|
|
|
|
* If MSR is changing we need to keep interrupts disabled at this point
|
|
|
|
* otherwise we might risk taking an interrupt before we tell lockdep
|
|
|
|
* they are enabled.
|
|
|
|
*/
|
2019-08-20 21:34:13 +07:00
|
|
|
LOAD_REG_IMMEDIATE(r10, MSR_KERNEL)
|
2019-04-30 19:39:02 +07:00
|
|
|
rlwimi r10, r9, 0, MSR_EE
|
|
|
|
#else
|
2019-08-20 21:34:13 +07:00
|
|
|
LOAD_REG_IMMEDIATE(r10, MSR_KERNEL | MSR_EE)
|
2019-04-30 19:39:02 +07:00
|
|
|
#endif
|
|
|
|
#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
|
|
|
|
mtspr SPRN_NRI, r0
|
|
|
|
#endif
|
|
|
|
mtspr SPRN_SRR1,r10
|
|
|
|
mtspr SPRN_SRR0,r11
|
|
|
|
SYNC
|
|
|
|
RFI /* jump to handler, enable MMU */
|
2020-01-31 18:34:54 +07:00
|
|
|
99: b ret_from_kernel_syscall
|
2019-04-30 19:39:02 +07:00
|
|
|
.endm
|
|
|
|
|
2019-12-21 15:32:26 +07:00
|
|
|
.macro save_dar_dsisr_on_stack reg1, reg2, sp
|
2019-12-21 15:32:27 +07:00
|
|
|
#ifndef CONFIG_VMAP_STACK
|
2019-12-21 15:32:26 +07:00
|
|
|
mfspr \reg1, SPRN_DAR
|
|
|
|
mfspr \reg2, SPRN_DSISR
|
|
|
|
stw \reg1, _DAR(\sp)
|
|
|
|
stw \reg2, _DSISR(\sp)
|
2019-12-21 15:32:27 +07:00
|
|
|
#endif
|
2019-12-21 15:32:26 +07:00
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro get_and_save_dar_dsisr_on_stack reg1, reg2, sp
|
2019-12-21 15:32:27 +07:00
|
|
|
#ifdef CONFIG_VMAP_STACK
|
|
|
|
lwz \reg1, _DAR(\sp)
|
|
|
|
lwz \reg2, _DSISR(\sp)
|
|
|
|
#else
|
2019-12-21 15:32:26 +07:00
|
|
|
save_dar_dsisr_on_stack \reg1, \reg2, \sp
|
2019-12-21 15:32:27 +07:00
|
|
|
#endif
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro tovirt_vmstack dst, src
|
|
|
|
#ifdef CONFIG_VMAP_STACK
|
|
|
|
tovirt(\dst, \src)
|
|
|
|
#else
|
|
|
|
.ifnc \dst, \src
|
|
|
|
mr \dst, \src
|
|
|
|
.endif
|
|
|
|
#endif
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro tovirt_novmstack dst, src
|
|
|
|
#ifndef CONFIG_VMAP_STACK
|
|
|
|
tovirt(\dst, \src)
|
|
|
|
#else
|
|
|
|
.ifnc \dst, \src
|
|
|
|
mr \dst, \src
|
|
|
|
.endif
|
|
|
|
#endif
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro tophys_novmstack dst, src
|
|
|
|
#ifndef CONFIG_VMAP_STACK
|
|
|
|
tophys(\dst, \src)
|
|
|
|
#else
|
|
|
|
.ifnc \dst, \src
|
|
|
|
mr \dst, \src
|
|
|
|
.endif
|
|
|
|
#endif
|
2019-12-21 15:32:26 +07:00
|
|
|
.endm
|
|
|
|
|
2019-04-30 19:38:50 +07:00
|
|
|
/*
|
|
|
|
* Note: code which follows this uses cr0.eq (set if from kernel),
|
|
|
|
* r11, r12 (SRR0), and r9 (SRR1).
|
|
|
|
*
|
|
|
|
* Note2: once we have set r1 we are in a position to take exceptions
|
|
|
|
* again, and we could thus set MSR:RI at that point.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Exception vectors.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_PPC_BOOK3S
|
|
|
|
#define START_EXCEPTION(n, label) \
|
|
|
|
. = n; \
|
|
|
|
DO_KVM n; \
|
|
|
|
label:
|
|
|
|
|
|
|
|
#else
|
|
|
|
#define START_EXCEPTION(n, label) \
|
|
|
|
. = n; \
|
|
|
|
label:
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define EXCEPTION(n, label, hdlr, xfer) \
|
|
|
|
START_EXCEPTION(n, label) \
|
|
|
|
EXCEPTION_PROLOG; \
|
|
|
|
addi r3,r1,STACK_FRAME_OVERHEAD; \
|
|
|
|
xfer(n, hdlr)
|
|
|
|
|
2019-04-30 19:39:00 +07:00
|
|
|
#define EXC_XFER_TEMPLATE(hdlr, trap, msr, tfer, ret) \
|
2019-04-30 19:38:50 +07:00
|
|
|
li r10,trap; \
|
|
|
|
stw r10,_TRAP(r11); \
|
2019-08-20 21:34:13 +07:00
|
|
|
LOAD_REG_IMMEDIATE(r10, msr); \
|
2019-04-30 19:38:50 +07:00
|
|
|
bl tfer; \
|
|
|
|
.long hdlr; \
|
|
|
|
.long ret
|
|
|
|
|
|
|
|
#define EXC_XFER_STD(n, hdlr) \
|
2019-04-30 19:39:00 +07:00
|
|
|
EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, transfer_to_handler_full, \
|
2019-04-30 19:38:50 +07:00
|
|
|
ret_from_except_full)
|
|
|
|
|
|
|
|
#define EXC_XFER_LITE(n, hdlr) \
|
2019-04-30 19:39:00 +07:00
|
|
|
EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, transfer_to_handler, \
|
2019-04-30 19:38:50 +07:00
|
|
|
ret_from_except)
|
|
|
|
|
powerpc/32: Add early stack overflow detection with VMAP stack.
To avoid recursive faults, stack overflow detection has to be
performed before writing in the stack in exception prologs.
Do it by checking the alignment. If the stack pointer alignment is
wrong, it means it is pointing to the following or preceding page.
Without VMAP stack, a stack overflow is catastrophic. With VMAP
stack, a stack overflow isn't destructive, so don't panic. Kill
the task with SIGSEGV instead.
A dedicated overflow stack is set up for each CPU.
lkdtm: Performing direct entry EXHAUST_STACK
lkdtm: Calling function with 512 frame size to depth 32 ...
lkdtm: loop 32/32 ...
lkdtm: loop 31/32 ...
lkdtm: loop 30/32 ...
lkdtm: loop 29/32 ...
lkdtm: loop 28/32 ...
lkdtm: loop 27/32 ...
lkdtm: loop 26/32 ...
lkdtm: loop 25/32 ...
lkdtm: loop 24/32 ...
lkdtm: loop 23/32 ...
lkdtm: loop 22/32 ...
lkdtm: loop 21/32 ...
lkdtm: loop 20/32 ...
Kernel stack overflow in process test[359], r1=c900c008
Oops: Kernel stack overflow, sig: 6 [#1]
BE PAGE_SIZE=4K MMU=Hash PowerMac
Modules linked in:
CPU: 0 PID: 359 Comm: test Not tainted 5.3.0-rc7+ #2225
NIP: c0622060 LR: c0626710 CTR: 00000000
REGS: c0895f48 TRAP: 0000 Not tainted (5.3.0-rc7+)
MSR: 00001032 <ME,IR,DR,RI> CR: 28004224 XER: 00000000
GPR00: c0626ca4 c900c008 c783c000 c07335cc c900c010 c07335cc c900c0f0 c07335cc
GPR08: c900c0f0 00000001 00000000 00000000 28008222 00000000 00000000 00000000
GPR16: 00000000 00000000 10010128 10010000 b799c245 10010158 c07335cc 00000025
GPR24: c0690000 c08b91d4 c068f688 00000020 c900c0f0 c068f668 c08b95b4 c08b91d4
NIP [c0622060] format_decode+0x0/0x4d4
LR [c0626710] vsnprintf+0x80/0x5fc
Call Trace:
[c900c068] [c0626ca4] vscnprintf+0x18/0x48
[c900c078] [c007b944] vprintk_store+0x40/0x214
[c900c0b8] [c007bf50] vprintk_emit+0x90/0x1dc
[c900c0e8] [c007c5cc] printk+0x50/0x60
[c900c128] [c03da5b0] recursive_loop+0x44/0x6c
[c900c338] [c03da5c4] recursive_loop+0x58/0x6c
[c900c548] [c03da5c4] recursive_loop+0x58/0x6c
[c900c758] [c03da5c4] recursive_loop+0x58/0x6c
[c900c968] [c03da5c4] recursive_loop+0x58/0x6c
[c900cb78] [c03da5c4] recursive_loop+0x58/0x6c
[c900cd88] [c03da5c4] recursive_loop+0x58/0x6c
[c900cf98] [c03da5c4] recursive_loop+0x58/0x6c
[c900d1a8] [c03da5c4] recursive_loop+0x58/0x6c
[c900d3b8] [c03da5c4] recursive_loop+0x58/0x6c
[c900d5c8] [c03da5c4] recursive_loop+0x58/0x6c
[c900d7d8] [c03da5c4] recursive_loop+0x58/0x6c
[c900d9e8] [c03da5c4] recursive_loop+0x58/0x6c
[c900dbf8] [c03da5c4] recursive_loop+0x58/0x6c
[c900de08] [c03da67c] lkdtm_EXHAUST_STACK+0x30/0x4c
[c900de18] [c03da3e8] direct_entry+0xc8/0x140
[c900de48] [c029fb40] full_proxy_write+0x64/0xcc
[c900de68] [c01500f8] __vfs_write+0x30/0x1d0
[c900dee8] [c0152cb8] vfs_write+0xb8/0x1d4
[c900df08] [c0152f7c] ksys_write+0x58/0xe8
[c900df38] [c0014208] ret_from_syscall+0x0/0x34
--- interrupt: c01 at 0xf806664
LR = 0x1000c868
Instruction dump:
4bffff91 80010014 7c832378 7c0803a6 38210010 4e800020 3d20c08a 3ca0c089
8089a0cc 38a58f0c 38600001 4ba2d494 <9421ffe0> 7c0802a6 bfc10018 7c9f2378
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/1b89c121b4070c7ee99e4f22cc178f15a736b07b.1576916812.git.christophe.leroy@c-s.fr
2019-12-21 15:32:29 +07:00
|
|
|
.macro vmap_stack_overflow_exception
|
|
|
|
#ifdef CONFIG_VMAP_STACK
|
|
|
|
#ifdef CONFIG_SMP
|
2020-09-07 20:42:10 +07:00
|
|
|
mfspr r1, SPRN_SPRG_THREAD
|
|
|
|
lwz r1, TASK_CPU - THREAD(r1)
|
|
|
|
slwi r1, r1, 3
|
|
|
|
addis r1, r1, emergency_ctx@ha
|
powerpc/32: Add early stack overflow detection with VMAP stack.
To avoid recursive faults, stack overflow detection has to be
performed before writing in the stack in exception prologs.
Do it by checking the alignment. If the stack pointer alignment is
wrong, it means it is pointing to the following or preceding page.
Without VMAP stack, a stack overflow is catastrophic. With VMAP
stack, a stack overflow isn't destructive, so don't panic. Kill
the task with SIGSEGV instead.
A dedicated overflow stack is set up for each CPU.
lkdtm: Performing direct entry EXHAUST_STACK
lkdtm: Calling function with 512 frame size to depth 32 ...
lkdtm: loop 32/32 ...
lkdtm: loop 31/32 ...
lkdtm: loop 30/32 ...
lkdtm: loop 29/32 ...
lkdtm: loop 28/32 ...
lkdtm: loop 27/32 ...
lkdtm: loop 26/32 ...
lkdtm: loop 25/32 ...
lkdtm: loop 24/32 ...
lkdtm: loop 23/32 ...
lkdtm: loop 22/32 ...
lkdtm: loop 21/32 ...
lkdtm: loop 20/32 ...
Kernel stack overflow in process test[359], r1=c900c008
Oops: Kernel stack overflow, sig: 6 [#1]
BE PAGE_SIZE=4K MMU=Hash PowerMac
Modules linked in:
CPU: 0 PID: 359 Comm: test Not tainted 5.3.0-rc7+ #2225
NIP: c0622060 LR: c0626710 CTR: 00000000
REGS: c0895f48 TRAP: 0000 Not tainted (5.3.0-rc7+)
MSR: 00001032 <ME,IR,DR,RI> CR: 28004224 XER: 00000000
GPR00: c0626ca4 c900c008 c783c000 c07335cc c900c010 c07335cc c900c0f0 c07335cc
GPR08: c900c0f0 00000001 00000000 00000000 28008222 00000000 00000000 00000000
GPR16: 00000000 00000000 10010128 10010000 b799c245 10010158 c07335cc 00000025
GPR24: c0690000 c08b91d4 c068f688 00000020 c900c0f0 c068f668 c08b95b4 c08b91d4
NIP [c0622060] format_decode+0x0/0x4d4
LR [c0626710] vsnprintf+0x80/0x5fc
Call Trace:
[c900c068] [c0626ca4] vscnprintf+0x18/0x48
[c900c078] [c007b944] vprintk_store+0x40/0x214
[c900c0b8] [c007bf50] vprintk_emit+0x90/0x1dc
[c900c0e8] [c007c5cc] printk+0x50/0x60
[c900c128] [c03da5b0] recursive_loop+0x44/0x6c
[c900c338] [c03da5c4] recursive_loop+0x58/0x6c
[c900c548] [c03da5c4] recursive_loop+0x58/0x6c
[c900c758] [c03da5c4] recursive_loop+0x58/0x6c
[c900c968] [c03da5c4] recursive_loop+0x58/0x6c
[c900cb78] [c03da5c4] recursive_loop+0x58/0x6c
[c900cd88] [c03da5c4] recursive_loop+0x58/0x6c
[c900cf98] [c03da5c4] recursive_loop+0x58/0x6c
[c900d1a8] [c03da5c4] recursive_loop+0x58/0x6c
[c900d3b8] [c03da5c4] recursive_loop+0x58/0x6c
[c900d5c8] [c03da5c4] recursive_loop+0x58/0x6c
[c900d7d8] [c03da5c4] recursive_loop+0x58/0x6c
[c900d9e8] [c03da5c4] recursive_loop+0x58/0x6c
[c900dbf8] [c03da5c4] recursive_loop+0x58/0x6c
[c900de08] [c03da67c] lkdtm_EXHAUST_STACK+0x30/0x4c
[c900de18] [c03da3e8] direct_entry+0xc8/0x140
[c900de48] [c029fb40] full_proxy_write+0x64/0xcc
[c900de68] [c01500f8] __vfs_write+0x30/0x1d0
[c900dee8] [c0152cb8] vfs_write+0xb8/0x1d4
[c900df08] [c0152f7c] ksys_write+0x58/0xe8
[c900df38] [c0014208] ret_from_syscall+0x0/0x34
--- interrupt: c01 at 0xf806664
LR = 0x1000c868
Instruction dump:
4bffff91 80010014 7c832378 7c0803a6 38210010 4e800020 3d20c08a 3ca0c089
8089a0cc 38a58f0c 38600001 4ba2d494 <9421ffe0> 7c0802a6 bfc10018 7c9f2378
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/1b89c121b4070c7ee99e4f22cc178f15a736b07b.1576916812.git.christophe.leroy@c-s.fr
2019-12-21 15:32:29 +07:00
|
|
|
#else
|
2020-09-07 20:42:10 +07:00
|
|
|
lis r1, emergency_ctx@ha
|
powerpc/32: Add early stack overflow detection with VMAP stack.
To avoid recursive faults, stack overflow detection has to be
performed before writing in the stack in exception prologs.
Do it by checking the alignment. If the stack pointer alignment is
wrong, it means it is pointing to the following or preceding page.
Without VMAP stack, a stack overflow is catastrophic. With VMAP
stack, a stack overflow isn't destructive, so don't panic. Kill
the task with SIGSEGV instead.
A dedicated overflow stack is set up for each CPU.
lkdtm: Performing direct entry EXHAUST_STACK
lkdtm: Calling function with 512 frame size to depth 32 ...
lkdtm: loop 32/32 ...
lkdtm: loop 31/32 ...
lkdtm: loop 30/32 ...
lkdtm: loop 29/32 ...
lkdtm: loop 28/32 ...
lkdtm: loop 27/32 ...
lkdtm: loop 26/32 ...
lkdtm: loop 25/32 ...
lkdtm: loop 24/32 ...
lkdtm: loop 23/32 ...
lkdtm: loop 22/32 ...
lkdtm: loop 21/32 ...
lkdtm: loop 20/32 ...
Kernel stack overflow in process test[359], r1=c900c008
Oops: Kernel stack overflow, sig: 6 [#1]
BE PAGE_SIZE=4K MMU=Hash PowerMac
Modules linked in:
CPU: 0 PID: 359 Comm: test Not tainted 5.3.0-rc7+ #2225
NIP: c0622060 LR: c0626710 CTR: 00000000
REGS: c0895f48 TRAP: 0000 Not tainted (5.3.0-rc7+)
MSR: 00001032 <ME,IR,DR,RI> CR: 28004224 XER: 00000000
GPR00: c0626ca4 c900c008 c783c000 c07335cc c900c010 c07335cc c900c0f0 c07335cc
GPR08: c900c0f0 00000001 00000000 00000000 28008222 00000000 00000000 00000000
GPR16: 00000000 00000000 10010128 10010000 b799c245 10010158 c07335cc 00000025
GPR24: c0690000 c08b91d4 c068f688 00000020 c900c0f0 c068f668 c08b95b4 c08b91d4
NIP [c0622060] format_decode+0x0/0x4d4
LR [c0626710] vsnprintf+0x80/0x5fc
Call Trace:
[c900c068] [c0626ca4] vscnprintf+0x18/0x48
[c900c078] [c007b944] vprintk_store+0x40/0x214
[c900c0b8] [c007bf50] vprintk_emit+0x90/0x1dc
[c900c0e8] [c007c5cc] printk+0x50/0x60
[c900c128] [c03da5b0] recursive_loop+0x44/0x6c
[c900c338] [c03da5c4] recursive_loop+0x58/0x6c
[c900c548] [c03da5c4] recursive_loop+0x58/0x6c
[c900c758] [c03da5c4] recursive_loop+0x58/0x6c
[c900c968] [c03da5c4] recursive_loop+0x58/0x6c
[c900cb78] [c03da5c4] recursive_loop+0x58/0x6c
[c900cd88] [c03da5c4] recursive_loop+0x58/0x6c
[c900cf98] [c03da5c4] recursive_loop+0x58/0x6c
[c900d1a8] [c03da5c4] recursive_loop+0x58/0x6c
[c900d3b8] [c03da5c4] recursive_loop+0x58/0x6c
[c900d5c8] [c03da5c4] recursive_loop+0x58/0x6c
[c900d7d8] [c03da5c4] recursive_loop+0x58/0x6c
[c900d9e8] [c03da5c4] recursive_loop+0x58/0x6c
[c900dbf8] [c03da5c4] recursive_loop+0x58/0x6c
[c900de08] [c03da67c] lkdtm_EXHAUST_STACK+0x30/0x4c
[c900de18] [c03da3e8] direct_entry+0xc8/0x140
[c900de48] [c029fb40] full_proxy_write+0x64/0xcc
[c900de68] [c01500f8] __vfs_write+0x30/0x1d0
[c900dee8] [c0152cb8] vfs_write+0xb8/0x1d4
[c900df08] [c0152f7c] ksys_write+0x58/0xe8
[c900df38] [c0014208] ret_from_syscall+0x0/0x34
--- interrupt: c01 at 0xf806664
LR = 0x1000c868
Instruction dump:
4bffff91 80010014 7c832378 7c0803a6 38210010 4e800020 3d20c08a 3ca0c089
8089a0cc 38a58f0c 38600001 4ba2d494 <9421ffe0> 7c0802a6 bfc10018 7c9f2378
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/1b89c121b4070c7ee99e4f22cc178f15a736b07b.1576916812.git.christophe.leroy@c-s.fr
2019-12-21 15:32:29 +07:00
|
|
|
#endif
|
2020-09-07 20:42:10 +07:00
|
|
|
lwz r1, emergency_ctx@l(r1)
|
|
|
|
cmpwi cr1, r1, 0
|
powerpc/32: Add early stack overflow detection with VMAP stack.
To avoid recursive faults, stack overflow detection has to be
performed before writing in the stack in exception prologs.
Do it by checking the alignment. If the stack pointer alignment is
wrong, it means it is pointing to the following or preceding page.
Without VMAP stack, a stack overflow is catastrophic. With VMAP
stack, a stack overflow isn't destructive, so don't panic. Kill
the task with SIGSEGV instead.
A dedicated overflow stack is set up for each CPU.
lkdtm: Performing direct entry EXHAUST_STACK
lkdtm: Calling function with 512 frame size to depth 32 ...
lkdtm: loop 32/32 ...
lkdtm: loop 31/32 ...
lkdtm: loop 30/32 ...
lkdtm: loop 29/32 ...
lkdtm: loop 28/32 ...
lkdtm: loop 27/32 ...
lkdtm: loop 26/32 ...
lkdtm: loop 25/32 ...
lkdtm: loop 24/32 ...
lkdtm: loop 23/32 ...
lkdtm: loop 22/32 ...
lkdtm: loop 21/32 ...
lkdtm: loop 20/32 ...
Kernel stack overflow in process test[359], r1=c900c008
Oops: Kernel stack overflow, sig: 6 [#1]
BE PAGE_SIZE=4K MMU=Hash PowerMac
Modules linked in:
CPU: 0 PID: 359 Comm: test Not tainted 5.3.0-rc7+ #2225
NIP: c0622060 LR: c0626710 CTR: 00000000
REGS: c0895f48 TRAP: 0000 Not tainted (5.3.0-rc7+)
MSR: 00001032 <ME,IR,DR,RI> CR: 28004224 XER: 00000000
GPR00: c0626ca4 c900c008 c783c000 c07335cc c900c010 c07335cc c900c0f0 c07335cc
GPR08: c900c0f0 00000001 00000000 00000000 28008222 00000000 00000000 00000000
GPR16: 00000000 00000000 10010128 10010000 b799c245 10010158 c07335cc 00000025
GPR24: c0690000 c08b91d4 c068f688 00000020 c900c0f0 c068f668 c08b95b4 c08b91d4
NIP [c0622060] format_decode+0x0/0x4d4
LR [c0626710] vsnprintf+0x80/0x5fc
Call Trace:
[c900c068] [c0626ca4] vscnprintf+0x18/0x48
[c900c078] [c007b944] vprintk_store+0x40/0x214
[c900c0b8] [c007bf50] vprintk_emit+0x90/0x1dc
[c900c0e8] [c007c5cc] printk+0x50/0x60
[c900c128] [c03da5b0] recursive_loop+0x44/0x6c
[c900c338] [c03da5c4] recursive_loop+0x58/0x6c
[c900c548] [c03da5c4] recursive_loop+0x58/0x6c
[c900c758] [c03da5c4] recursive_loop+0x58/0x6c
[c900c968] [c03da5c4] recursive_loop+0x58/0x6c
[c900cb78] [c03da5c4] recursive_loop+0x58/0x6c
[c900cd88] [c03da5c4] recursive_loop+0x58/0x6c
[c900cf98] [c03da5c4] recursive_loop+0x58/0x6c
[c900d1a8] [c03da5c4] recursive_loop+0x58/0x6c
[c900d3b8] [c03da5c4] recursive_loop+0x58/0x6c
[c900d5c8] [c03da5c4] recursive_loop+0x58/0x6c
[c900d7d8] [c03da5c4] recursive_loop+0x58/0x6c
[c900d9e8] [c03da5c4] recursive_loop+0x58/0x6c
[c900dbf8] [c03da5c4] recursive_loop+0x58/0x6c
[c900de08] [c03da67c] lkdtm_EXHAUST_STACK+0x30/0x4c
[c900de18] [c03da3e8] direct_entry+0xc8/0x140
[c900de48] [c029fb40] full_proxy_write+0x64/0xcc
[c900de68] [c01500f8] __vfs_write+0x30/0x1d0
[c900dee8] [c0152cb8] vfs_write+0xb8/0x1d4
[c900df08] [c0152f7c] ksys_write+0x58/0xe8
[c900df38] [c0014208] ret_from_syscall+0x0/0x34
--- interrupt: c01 at 0xf806664
LR = 0x1000c868
Instruction dump:
4bffff91 80010014 7c832378 7c0803a6 38210010 4e800020 3d20c08a 3ca0c089
8089a0cc 38a58f0c 38600001 4ba2d494 <9421ffe0> 7c0802a6 bfc10018 7c9f2378
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/1b89c121b4070c7ee99e4f22cc178f15a736b07b.1576916812.git.christophe.leroy@c-s.fr
2019-12-21 15:32:29 +07:00
|
|
|
bne cr1, 1f
|
2020-09-07 20:42:10 +07:00
|
|
|
lis r1, init_thread_union@ha
|
|
|
|
addi r1, r1, init_thread_union@l
|
|
|
|
1: addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE
|
powerpc/32: Add early stack overflow detection with VMAP stack.
To avoid recursive faults, stack overflow detection has to be
performed before writing in the stack in exception prologs.
Do it by checking the alignment. If the stack pointer alignment is
wrong, it means it is pointing to the following or preceding page.
Without VMAP stack, a stack overflow is catastrophic. With VMAP
stack, a stack overflow isn't destructive, so don't panic. Kill
the task with SIGSEGV instead.
A dedicated overflow stack is set up for each CPU.
lkdtm: Performing direct entry EXHAUST_STACK
lkdtm: Calling function with 512 frame size to depth 32 ...
lkdtm: loop 32/32 ...
lkdtm: loop 31/32 ...
lkdtm: loop 30/32 ...
lkdtm: loop 29/32 ...
lkdtm: loop 28/32 ...
lkdtm: loop 27/32 ...
lkdtm: loop 26/32 ...
lkdtm: loop 25/32 ...
lkdtm: loop 24/32 ...
lkdtm: loop 23/32 ...
lkdtm: loop 22/32 ...
lkdtm: loop 21/32 ...
lkdtm: loop 20/32 ...
Kernel stack overflow in process test[359], r1=c900c008
Oops: Kernel stack overflow, sig: 6 [#1]
BE PAGE_SIZE=4K MMU=Hash PowerMac
Modules linked in:
CPU: 0 PID: 359 Comm: test Not tainted 5.3.0-rc7+ #2225
NIP: c0622060 LR: c0626710 CTR: 00000000
REGS: c0895f48 TRAP: 0000 Not tainted (5.3.0-rc7+)
MSR: 00001032 <ME,IR,DR,RI> CR: 28004224 XER: 00000000
GPR00: c0626ca4 c900c008 c783c000 c07335cc c900c010 c07335cc c900c0f0 c07335cc
GPR08: c900c0f0 00000001 00000000 00000000 28008222 00000000 00000000 00000000
GPR16: 00000000 00000000 10010128 10010000 b799c245 10010158 c07335cc 00000025
GPR24: c0690000 c08b91d4 c068f688 00000020 c900c0f0 c068f668 c08b95b4 c08b91d4
NIP [c0622060] format_decode+0x0/0x4d4
LR [c0626710] vsnprintf+0x80/0x5fc
Call Trace:
[c900c068] [c0626ca4] vscnprintf+0x18/0x48
[c900c078] [c007b944] vprintk_store+0x40/0x214
[c900c0b8] [c007bf50] vprintk_emit+0x90/0x1dc
[c900c0e8] [c007c5cc] printk+0x50/0x60
[c900c128] [c03da5b0] recursive_loop+0x44/0x6c
[c900c338] [c03da5c4] recursive_loop+0x58/0x6c
[c900c548] [c03da5c4] recursive_loop+0x58/0x6c
[c900c758] [c03da5c4] recursive_loop+0x58/0x6c
[c900c968] [c03da5c4] recursive_loop+0x58/0x6c
[c900cb78] [c03da5c4] recursive_loop+0x58/0x6c
[c900cd88] [c03da5c4] recursive_loop+0x58/0x6c
[c900cf98] [c03da5c4] recursive_loop+0x58/0x6c
[c900d1a8] [c03da5c4] recursive_loop+0x58/0x6c
[c900d3b8] [c03da5c4] recursive_loop+0x58/0x6c
[c900d5c8] [c03da5c4] recursive_loop+0x58/0x6c
[c900d7d8] [c03da5c4] recursive_loop+0x58/0x6c
[c900d9e8] [c03da5c4] recursive_loop+0x58/0x6c
[c900dbf8] [c03da5c4] recursive_loop+0x58/0x6c
[c900de08] [c03da67c] lkdtm_EXHAUST_STACK+0x30/0x4c
[c900de18] [c03da3e8] direct_entry+0xc8/0x140
[c900de48] [c029fb40] full_proxy_write+0x64/0xcc
[c900de68] [c01500f8] __vfs_write+0x30/0x1d0
[c900dee8] [c0152cb8] vfs_write+0xb8/0x1d4
[c900df08] [c0152f7c] ksys_write+0x58/0xe8
[c900df38] [c0014208] ret_from_syscall+0x0/0x34
--- interrupt: c01 at 0xf806664
LR = 0x1000c868
Instruction dump:
4bffff91 80010014 7c832378 7c0803a6 38210010 4e800020 3d20c08a 3ca0c089
8089a0cc 38a58f0c 38600001 4ba2d494 <9421ffe0> 7c0802a6 bfc10018 7c9f2378
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/1b89c121b4070c7ee99e4f22cc178f15a736b07b.1576916812.git.christophe.leroy@c-s.fr
2019-12-21 15:32:29 +07:00
|
|
|
EXCEPTION_PROLOG_2
|
|
|
|
SAVE_NVGPRS(r11)
|
|
|
|
addi r3, r1, STACK_FRAME_OVERHEAD
|
|
|
|
EXC_XFER_STD(0, stack_overflow_exception)
|
|
|
|
#endif
|
|
|
|
.endm
|
|
|
|
|
2019-04-30 19:38:50 +07:00
|
|
|
#endif /* __HEAD_32_H__ */
|