mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 11:20:49 +07:00
feee1b8c49
The kernel code instrumentation in stackleak gcc plugin works in two stages. At first, stack tracking is added to GIMPLE representation of every function (except some special cases). And later, when stack frame size info is available, stack tracking is removed from the RTL representation of the functions with small stack frame. There is an unwanted side-effect for these functions: some of them do useless work with caller-saved registers. As an example of such case, proc_sys_write without() instrumentation: 55 push %rbp 41 b8 01 00 00 00 mov $0x1,%r8d 48 89 e5 mov %rsp,%rbp e8 11 ff ff ff callq ffffffff81284610 <proc_sys_call_handler> 5d pop %rbp c3 retq 0f 1f 44 00 00 nopl 0x0(%rax,%rax,1) 66 2e 0f 1f 84 00 00 nopw %cs:0x0(%rax,%rax,1) 00 00 00 proc_sys_write() with instrumentation: 55 push %rbp 48 89 e5 mov %rsp,%rbp 41 56 push %r14 41 55 push %r13 41 54 push %r12 53 push %rbx 49 89 f4 mov %rsi,%r12 48 89 fb mov %rdi,%rbx 49 89 d5 mov %rdx,%r13 49 89 ce mov %rcx,%r14 4c 89 f1 mov %r14,%rcx 4c 89 ea mov %r13,%rdx 4c 89 e6 mov %r12,%rsi 48 89 df mov %rbx,%rdi 41 b8 01 00 00 00 mov $0x1,%r8d e8 f2 fe ff ff callq ffffffff81298e80 <proc_sys_call_handler> 5b pop %rbx 41 5c pop %r12 41 5d pop %r13 41 5e pop %r14 5d pop %rbp c3 retq 66 0f 1f 84 00 00 00 nopw 0x0(%rax,%rax,1) 00 00 Let's improve the instrumentation to avoid this: 1. Make stackleak_track_stack() save all register that it works with. Use no_caller_saved_registers attribute for that function. This attribute is available for x86_64 and i386 starting from gcc-7. 2. Insert calling stackleak_track_stack() in asm: asm volatile("call stackleak_track_stack" :: "r" (current_stack_pointer)) Here we use ASM_CALL_CONSTRAINT trick from arch/x86/include/asm/asm.h. The input constraint is taken into account during gcc shrink-wrapping optimization. It is needed to be sure that stackleak_track_stack() call is inserted after the prologue of the containing function, when the stack frame is prepared. This work is a deep reengineering of the idea described on grsecurity blog https://grsecurity.net/resolving_an_unfortunate_stackleak_interaction Signed-off-by: Alexander Popov <alex.popov@linux.com> Acked-by: Miguel Ojeda <miguel.ojeda.sandonis@gmail.com> Link: https://lore.kernel.org/r/20200624123330.83226-5-alex.popov@linux.com Signed-off-by: Kees Cook <keescook@chromium.org>
127 lines
3.6 KiB
C
127 lines
3.6 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* This code fills the used part of the kernel stack with a poison value
|
|
* before returning to userspace. It's part of the STACKLEAK feature
|
|
* ported from grsecurity/PaX.
|
|
*
|
|
* Author: Alexander Popov <alex.popov@linux.com>
|
|
*
|
|
* STACKLEAK reduces the information which kernel stack leak bugs can
|
|
* reveal and blocks some uninitialized stack variable attacks.
|
|
*/
|
|
|
|
#include <linux/stackleak.h>
|
|
#include <linux/kprobes.h>
|
|
|
|
#ifdef CONFIG_STACKLEAK_RUNTIME_DISABLE
|
|
#include <linux/jump_label.h>
|
|
#include <linux/sysctl.h>
|
|
|
|
static DEFINE_STATIC_KEY_FALSE(stack_erasing_bypass);
|
|
|
|
int stack_erasing_sysctl(struct ctl_table *table, int write,
|
|
void __user *buffer, size_t *lenp, loff_t *ppos)
|
|
{
|
|
int ret = 0;
|
|
int state = !static_branch_unlikely(&stack_erasing_bypass);
|
|
int prev_state = state;
|
|
|
|
table->data = &state;
|
|
table->maxlen = sizeof(int);
|
|
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
|
|
state = !!state;
|
|
if (ret || !write || state == prev_state)
|
|
return ret;
|
|
|
|
if (state)
|
|
static_branch_disable(&stack_erasing_bypass);
|
|
else
|
|
static_branch_enable(&stack_erasing_bypass);
|
|
|
|
pr_warn("stackleak: kernel stack erasing is %s\n",
|
|
state ? "enabled" : "disabled");
|
|
return ret;
|
|
}
|
|
|
|
#define skip_erasing() static_branch_unlikely(&stack_erasing_bypass)
|
|
#else
|
|
#define skip_erasing() false
|
|
#endif /* CONFIG_STACKLEAK_RUNTIME_DISABLE */
|
|
|
|
asmlinkage void notrace stackleak_erase(void)
|
|
{
|
|
/* It would be nice not to have 'kstack_ptr' and 'boundary' on stack */
|
|
unsigned long kstack_ptr = current->lowest_stack;
|
|
unsigned long boundary = (unsigned long)end_of_stack(current);
|
|
unsigned int poison_count = 0;
|
|
const unsigned int depth = STACKLEAK_SEARCH_DEPTH / sizeof(unsigned long);
|
|
|
|
if (skip_erasing())
|
|
return;
|
|
|
|
/* Check that 'lowest_stack' value is sane */
|
|
if (unlikely(kstack_ptr - boundary >= THREAD_SIZE))
|
|
kstack_ptr = boundary;
|
|
|
|
/* Search for the poison value in the kernel stack */
|
|
while (kstack_ptr > boundary && poison_count <= depth) {
|
|
if (*(unsigned long *)kstack_ptr == STACKLEAK_POISON)
|
|
poison_count++;
|
|
else
|
|
poison_count = 0;
|
|
|
|
kstack_ptr -= sizeof(unsigned long);
|
|
}
|
|
|
|
/*
|
|
* One 'long int' at the bottom of the thread stack is reserved and
|
|
* should not be poisoned (see CONFIG_SCHED_STACK_END_CHECK=y).
|
|
*/
|
|
if (kstack_ptr == boundary)
|
|
kstack_ptr += sizeof(unsigned long);
|
|
|
|
#ifdef CONFIG_STACKLEAK_METRICS
|
|
current->prev_lowest_stack = kstack_ptr;
|
|
#endif
|
|
|
|
/*
|
|
* Now write the poison value to the kernel stack. Start from
|
|
* 'kstack_ptr' and move up till the new 'boundary'. We assume that
|
|
* the stack pointer doesn't change when we write poison.
|
|
*/
|
|
if (on_thread_stack())
|
|
boundary = current_stack_pointer;
|
|
else
|
|
boundary = current_top_of_stack();
|
|
|
|
while (kstack_ptr < boundary) {
|
|
*(unsigned long *)kstack_ptr = STACKLEAK_POISON;
|
|
kstack_ptr += sizeof(unsigned long);
|
|
}
|
|
|
|
/* Reset the 'lowest_stack' value for the next syscall */
|
|
current->lowest_stack = current_top_of_stack() - THREAD_SIZE/64;
|
|
}
|
|
NOKPROBE_SYMBOL(stackleak_erase);
|
|
|
|
void __used __no_caller_saved_registers notrace stackleak_track_stack(void)
|
|
{
|
|
unsigned long sp = current_stack_pointer;
|
|
|
|
/*
|
|
* Having CONFIG_STACKLEAK_TRACK_MIN_SIZE larger than
|
|
* STACKLEAK_SEARCH_DEPTH makes the poison search in
|
|
* stackleak_erase() unreliable. Let's prevent that.
|
|
*/
|
|
BUILD_BUG_ON(CONFIG_STACKLEAK_TRACK_MIN_SIZE > STACKLEAK_SEARCH_DEPTH);
|
|
|
|
/* 'lowest_stack' should be aligned on the register width boundary */
|
|
sp = ALIGN(sp, sizeof(unsigned long));
|
|
if (sp < current->lowest_stack &&
|
|
sp >= (unsigned long)task_stack_page(current) +
|
|
sizeof(unsigned long)) {
|
|
current->lowest_stack = sp;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(stackleak_track_stack);
|