mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-22 11:21:16 +07:00
75045f77f7
Currently, most fixups for attempting to access userspace memory are handled using _ASM_EXTABLE, which is also used for various other types of fixups (e.g. safe MSR access, IRET failures, and a bunch of other things). In order to make it possible to add special safety checks to uaccess fixups (in particular, checking whether the fault address is actually in userspace), introduce a new exception table handler ex_handler_uaccess() and wire it up to all the user access fixups (excluding ones that already use _ASM_EXTABLE_EX). Signed-off-by: Jann Horn <jannh@google.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Kees Cook <keescook@chromium.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: kernel-hardening@lists.openwall.com Cc: dvyukov@google.com Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: "Naveen N. Rao" <naveen.n.rao@linux.vnet.ibm.com> Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: linux-fsdevel@vger.kernel.org Cc: Borislav Petkov <bp@alien8.de> Link: https://lkml.kernel.org/r/20180828201421.157735-5-jannh@google.com
144 lines
3.0 KiB
ArmAsm
144 lines
3.0 KiB
ArmAsm
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* __get_user functions.
|
|
*
|
|
* (C) Copyright 1998 Linus Torvalds
|
|
* (C) Copyright 2005 Andi Kleen
|
|
* (C) Copyright 2008 Glauber Costa
|
|
*
|
|
* These functions have a non-standard call interface
|
|
* to make them more efficient, especially as they
|
|
* return an error value in addition to the "real"
|
|
* return value.
|
|
*/
|
|
|
|
/*
|
|
* __get_user_X
|
|
*
|
|
* Inputs: %[r|e]ax contains the address.
|
|
*
|
|
* Outputs: %[r|e]ax is error code (0 or -EFAULT)
|
|
* %[r|e]dx contains zero-extended value
|
|
* %ecx contains the high half for 32-bit __get_user_8
|
|
*
|
|
*
|
|
* These functions should not modify any other registers,
|
|
* as they get called from within inline assembly.
|
|
*/
|
|
|
|
#include <linux/linkage.h>
|
|
#include <asm/page_types.h>
|
|
#include <asm/errno.h>
|
|
#include <asm/asm-offsets.h>
|
|
#include <asm/thread_info.h>
|
|
#include <asm/asm.h>
|
|
#include <asm/smap.h>
|
|
#include <asm/export.h>
|
|
|
|
.text
|
|
ENTRY(__get_user_1)
|
|
mov PER_CPU_VAR(current_task), %_ASM_DX
|
|
cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
|
|
jae bad_get_user
|
|
sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */
|
|
and %_ASM_DX, %_ASM_AX
|
|
ASM_STAC
|
|
1: movzbl (%_ASM_AX),%edx
|
|
xor %eax,%eax
|
|
ASM_CLAC
|
|
ret
|
|
ENDPROC(__get_user_1)
|
|
EXPORT_SYMBOL(__get_user_1)
|
|
|
|
ENTRY(__get_user_2)
|
|
add $1,%_ASM_AX
|
|
jc bad_get_user
|
|
mov PER_CPU_VAR(current_task), %_ASM_DX
|
|
cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
|
|
jae bad_get_user
|
|
sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */
|
|
and %_ASM_DX, %_ASM_AX
|
|
ASM_STAC
|
|
2: movzwl -1(%_ASM_AX),%edx
|
|
xor %eax,%eax
|
|
ASM_CLAC
|
|
ret
|
|
ENDPROC(__get_user_2)
|
|
EXPORT_SYMBOL(__get_user_2)
|
|
|
|
ENTRY(__get_user_4)
|
|
add $3,%_ASM_AX
|
|
jc bad_get_user
|
|
mov PER_CPU_VAR(current_task), %_ASM_DX
|
|
cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
|
|
jae bad_get_user
|
|
sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */
|
|
and %_ASM_DX, %_ASM_AX
|
|
ASM_STAC
|
|
3: movl -3(%_ASM_AX),%edx
|
|
xor %eax,%eax
|
|
ASM_CLAC
|
|
ret
|
|
ENDPROC(__get_user_4)
|
|
EXPORT_SYMBOL(__get_user_4)
|
|
|
|
ENTRY(__get_user_8)
|
|
#ifdef CONFIG_X86_64
|
|
add $7,%_ASM_AX
|
|
jc bad_get_user
|
|
mov PER_CPU_VAR(current_task), %_ASM_DX
|
|
cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
|
|
jae bad_get_user
|
|
sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */
|
|
and %_ASM_DX, %_ASM_AX
|
|
ASM_STAC
|
|
4: movq -7(%_ASM_AX),%rdx
|
|
xor %eax,%eax
|
|
ASM_CLAC
|
|
ret
|
|
#else
|
|
add $7,%_ASM_AX
|
|
jc bad_get_user_8
|
|
mov PER_CPU_VAR(current_task), %_ASM_DX
|
|
cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
|
|
jae bad_get_user_8
|
|
sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */
|
|
and %_ASM_DX, %_ASM_AX
|
|
ASM_STAC
|
|
4: movl -7(%_ASM_AX),%edx
|
|
5: movl -3(%_ASM_AX),%ecx
|
|
xor %eax,%eax
|
|
ASM_CLAC
|
|
ret
|
|
#endif
|
|
ENDPROC(__get_user_8)
|
|
EXPORT_SYMBOL(__get_user_8)
|
|
|
|
|
|
bad_get_user:
|
|
xor %edx,%edx
|
|
mov $(-EFAULT),%_ASM_AX
|
|
ASM_CLAC
|
|
ret
|
|
END(bad_get_user)
|
|
|
|
#ifdef CONFIG_X86_32
|
|
bad_get_user_8:
|
|
xor %edx,%edx
|
|
xor %ecx,%ecx
|
|
mov $(-EFAULT),%_ASM_AX
|
|
ASM_CLAC
|
|
ret
|
|
END(bad_get_user_8)
|
|
#endif
|
|
|
|
_ASM_EXTABLE_UA(1b, bad_get_user)
|
|
_ASM_EXTABLE_UA(2b, bad_get_user)
|
|
_ASM_EXTABLE_UA(3b, bad_get_user)
|
|
#ifdef CONFIG_X86_64
|
|
_ASM_EXTABLE_UA(4b, bad_get_user)
|
|
#else
|
|
_ASM_EXTABLE_UA(4b, bad_get_user_8)
|
|
_ASM_EXTABLE_UA(5b, bad_get_user_8)
|
|
#endif
|