mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-17 11:46:51 +07:00
6dcc5627f6
These are all functions which are invoked from elsewhere, so annotate them as global using the new SYM_FUNC_START and their ENDPROC's by SYM_FUNC_END. Make sure ENTRY/ENDPROC is not defined on X86_64, given these were the last users. Signed-off-by: Jiri Slaby <jslaby@suse.cz> Signed-off-by: Borislav Petkov <bp@suse.de> Reviewed-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> [hibernate] Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> [xen bits] Acked-by: Herbert Xu <herbert@gondor.apana.org.au> [crypto] Cc: Allison Randal <allison@lohutok.net> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Andy Shevchenko <andy@infradead.org> Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org> Cc: Armijn Hemel <armijn@tjaldur.nl> Cc: Cao jin <caoj.fnst@cn.fujitsu.com> Cc: Darren Hart <dvhart@infradead.org> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Enrico Weigelt <info@metux.net> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Herbert Xu <herbert@gondor.apana.org.au> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jim Mattson <jmattson@google.com> Cc: Joerg Roedel <joro@8bytes.org> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Juergen Gross <jgross@suse.com> Cc: Kate Stewart <kstewart@linuxfoundation.org> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: kvm ML <kvm@vger.kernel.org> Cc: Len Brown <len.brown@intel.com> Cc: linux-arch@vger.kernel.org Cc: linux-crypto@vger.kernel.org Cc: linux-efi <linux-efi@vger.kernel.org> Cc: linux-efi@vger.kernel.org Cc: linux-pm@vger.kernel.org Cc: Mark Rutland <mark.rutland@arm.com> Cc: Matt Fleming <matt@codeblueprint.co.uk> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Pavel Machek <pavel@ucw.cz> Cc: Peter Zijlstra <peterz@infradead.org> Cc: platform-driver-x86@vger.kernel.org Cc: "Radim Krčmář" <rkrcmar@redhat.com> Cc: Sean Christopherson <sean.j.christopherson@intel.com> Cc: Stefano Stabellini <sstabellini@kernel.org> Cc: "Steven Rostedt (VMware)" <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vitaly Kuznetsov <vkuznets@redhat.com> Cc: Wanpeng Li <wanpengli@tencent.com> Cc: Wei Huang <wei@redhat.com> Cc: x86-ml <x86@kernel.org> Cc: xen-devel@lists.xenproject.org Cc: Xiaoyao Li <xiaoyao.li@linux.intel.com> Link: https://lkml.kernel.org/r/20191011115108.12392-25-jslaby@suse.cz
146 lines
3.2 KiB
ArmAsm
146 lines
3.2 KiB
ArmAsm
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* __get_user functions.
|
|
*
|
|
* (C) Copyright 1998 Linus Torvalds
|
|
* (C) Copyright 2005 Andi Kleen
|
|
* (C) Copyright 2008 Glauber Costa
|
|
*
|
|
* These functions have a non-standard call interface
|
|
* to make them more efficient, especially as they
|
|
* return an error value in addition to the "real"
|
|
* return value.
|
|
*/
|
|
|
|
/*
|
|
* __get_user_X
|
|
*
|
|
* Inputs: %[r|e]ax contains the address.
|
|
*
|
|
* Outputs: %[r|e]ax is error code (0 or -EFAULT)
|
|
* %[r|e]dx contains zero-extended value
|
|
* %ecx contains the high half for 32-bit __get_user_8
|
|
*
|
|
*
|
|
* These functions should not modify any other registers,
|
|
* as they get called from within inline assembly.
|
|
*/
|
|
|
|
#include <linux/linkage.h>
|
|
#include <asm/page_types.h>
|
|
#include <asm/errno.h>
|
|
#include <asm/asm-offsets.h>
|
|
#include <asm/thread_info.h>
|
|
#include <asm/asm.h>
|
|
#include <asm/smap.h>
|
|
#include <asm/export.h>
|
|
|
|
.text
|
|
SYM_FUNC_START(__get_user_1)
|
|
mov PER_CPU_VAR(current_task), %_ASM_DX
|
|
cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
|
|
jae bad_get_user
|
|
sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */
|
|
and %_ASM_DX, %_ASM_AX
|
|
ASM_STAC
|
|
1: movzbl (%_ASM_AX),%edx
|
|
xor %eax,%eax
|
|
ASM_CLAC
|
|
ret
|
|
SYM_FUNC_END(__get_user_1)
|
|
EXPORT_SYMBOL(__get_user_1)
|
|
|
|
SYM_FUNC_START(__get_user_2)
|
|
add $1,%_ASM_AX
|
|
jc bad_get_user
|
|
mov PER_CPU_VAR(current_task), %_ASM_DX
|
|
cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
|
|
jae bad_get_user
|
|
sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */
|
|
and %_ASM_DX, %_ASM_AX
|
|
ASM_STAC
|
|
2: movzwl -1(%_ASM_AX),%edx
|
|
xor %eax,%eax
|
|
ASM_CLAC
|
|
ret
|
|
SYM_FUNC_END(__get_user_2)
|
|
EXPORT_SYMBOL(__get_user_2)
|
|
|
|
SYM_FUNC_START(__get_user_4)
|
|
add $3,%_ASM_AX
|
|
jc bad_get_user
|
|
mov PER_CPU_VAR(current_task), %_ASM_DX
|
|
cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
|
|
jae bad_get_user
|
|
sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */
|
|
and %_ASM_DX, %_ASM_AX
|
|
ASM_STAC
|
|
3: movl -3(%_ASM_AX),%edx
|
|
xor %eax,%eax
|
|
ASM_CLAC
|
|
ret
|
|
SYM_FUNC_END(__get_user_4)
|
|
EXPORT_SYMBOL(__get_user_4)
|
|
|
|
SYM_FUNC_START(__get_user_8)
|
|
#ifdef CONFIG_X86_64
|
|
add $7,%_ASM_AX
|
|
jc bad_get_user
|
|
mov PER_CPU_VAR(current_task), %_ASM_DX
|
|
cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
|
|
jae bad_get_user
|
|
sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */
|
|
and %_ASM_DX, %_ASM_AX
|
|
ASM_STAC
|
|
4: movq -7(%_ASM_AX),%rdx
|
|
xor %eax,%eax
|
|
ASM_CLAC
|
|
ret
|
|
#else
|
|
add $7,%_ASM_AX
|
|
jc bad_get_user_8
|
|
mov PER_CPU_VAR(current_task), %_ASM_DX
|
|
cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
|
|
jae bad_get_user_8
|
|
sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */
|
|
and %_ASM_DX, %_ASM_AX
|
|
ASM_STAC
|
|
4: movl -7(%_ASM_AX),%edx
|
|
5: movl -3(%_ASM_AX),%ecx
|
|
xor %eax,%eax
|
|
ASM_CLAC
|
|
ret
|
|
#endif
|
|
SYM_FUNC_END(__get_user_8)
|
|
EXPORT_SYMBOL(__get_user_8)
|
|
|
|
|
|
SYM_CODE_START_LOCAL(.Lbad_get_user_clac)
|
|
ASM_CLAC
|
|
bad_get_user:
|
|
xor %edx,%edx
|
|
mov $(-EFAULT),%_ASM_AX
|
|
ret
|
|
SYM_CODE_END(.Lbad_get_user_clac)
|
|
|
|
#ifdef CONFIG_X86_32
|
|
SYM_CODE_START_LOCAL(.Lbad_get_user_8_clac)
|
|
ASM_CLAC
|
|
bad_get_user_8:
|
|
xor %edx,%edx
|
|
xor %ecx,%ecx
|
|
mov $(-EFAULT),%_ASM_AX
|
|
ret
|
|
SYM_CODE_END(.Lbad_get_user_8_clac)
|
|
#endif
|
|
|
|
_ASM_EXTABLE_UA(1b, .Lbad_get_user_clac)
|
|
_ASM_EXTABLE_UA(2b, .Lbad_get_user_clac)
|
|
_ASM_EXTABLE_UA(3b, .Lbad_get_user_clac)
|
|
#ifdef CONFIG_X86_64
|
|
_ASM_EXTABLE_UA(4b, .Lbad_get_user_clac)
|
|
#else
|
|
_ASM_EXTABLE_UA(4b, .Lbad_get_user_8_clac)
|
|
_ASM_EXTABLE_UA(5b, .Lbad_get_user_8_clac)
|
|
#endif
|