mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-27 07:35:12 +07:00
6d685e5318
These are all functions which are invoked from elsewhere, so annotate them as global using the new SYM_FUNC_START and their ENDPROC's by SYM_FUNC_END. Now, ENTRY/ENDPROC can be forced to be undefined on X86, so do so. Signed-off-by: Jiri Slaby <jslaby@suse.cz> Signed-off-by: Borislav Petkov <bp@suse.de> Cc: Allison Randal <allison@lohutok.net> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Andy Shevchenko <andy@infradead.org> Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org> Cc: Bill Metzenthen <billm@melbpc.org.au> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: Darren Hart <dvhart@infradead.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Herbert Xu <herbert@gondor.apana.org.au> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: linux-arch@vger.kernel.org Cc: linux-crypto@vger.kernel.org Cc: linux-efi <linux-efi@vger.kernel.org> Cc: linux-efi@vger.kernel.org Cc: linux-pm@vger.kernel.org Cc: Mark Rutland <mark.rutland@arm.com> Cc: Matt Fleming <matt@codeblueprint.co.uk> Cc: Pavel Machek <pavel@ucw.cz> Cc: platform-driver-x86@vger.kernel.org Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will@kernel.org> Cc: x86-ml <x86@kernel.org> Link: https://lkml.kernel.org/r/20191011115108.12392-28-jslaby@suse.cz
143 lines
3.3 KiB
ArmAsm
143 lines
3.3 KiB
ArmAsm
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*---------------------------------------------------------------------------+
|
|
| round_Xsig.S |
|
|
| |
|
|
| Copyright (C) 1992,1993,1994,1995 |
|
|
| W. Metzenthen, 22 Parker St, Ormond, Vic 3163, |
|
|
| Australia. E-mail billm@jacobi.maths.monash.edu.au |
|
|
| |
|
|
| Normalize and round a 12 byte quantity. |
|
|
| Call from C as: |
|
|
| int round_Xsig(Xsig *n) |
|
|
| |
|
|
| Normalize a 12 byte quantity. |
|
|
| Call from C as: |
|
|
| int norm_Xsig(Xsig *n) |
|
|
| |
|
|
| Each function returns the size of the shift (nr of bits). |
|
|
| |
|
|
+---------------------------------------------------------------------------*/
|
|
.file "round_Xsig.S"
|
|
|
|
#include "fpu_emu.h"
|
|
|
|
|
|
.text
|
|
SYM_FUNC_START(round_Xsig)
|
|
pushl %ebp
|
|
movl %esp,%ebp
|
|
pushl %ebx /* Reserve some space */
|
|
pushl %ebx
|
|
pushl %esi
|
|
|
|
movl PARAM1,%esi
|
|
|
|
movl 8(%esi),%edx
|
|
movl 4(%esi),%ebx
|
|
movl (%esi),%eax
|
|
|
|
movl $0,-4(%ebp)
|
|
|
|
orl %edx,%edx /* ms bits */
|
|
js L_round /* Already normalized */
|
|
jnz L_shift_1 /* Shift left 1 - 31 bits */
|
|
|
|
movl %ebx,%edx
|
|
movl %eax,%ebx
|
|
xorl %eax,%eax
|
|
movl $-32,-4(%ebp)
|
|
|
|
/* We need to shift left by 1 - 31 bits */
|
|
L_shift_1:
|
|
bsrl %edx,%ecx /* get the required shift in %ecx */
|
|
subl $31,%ecx
|
|
negl %ecx
|
|
subl %ecx,-4(%ebp)
|
|
shld %cl,%ebx,%edx
|
|
shld %cl,%eax,%ebx
|
|
shl %cl,%eax
|
|
|
|
L_round:
|
|
testl $0x80000000,%eax
|
|
jz L_exit
|
|
|
|
addl $1,%ebx
|
|
adcl $0,%edx
|
|
jnz L_exit
|
|
|
|
movl $0x80000000,%edx
|
|
incl -4(%ebp)
|
|
|
|
L_exit:
|
|
movl %edx,8(%esi)
|
|
movl %ebx,4(%esi)
|
|
movl %eax,(%esi)
|
|
|
|
movl -4(%ebp),%eax
|
|
|
|
popl %esi
|
|
popl %ebx
|
|
leave
|
|
ret
|
|
SYM_FUNC_END(round_Xsig)
|
|
|
|
|
|
|
|
SYM_FUNC_START(norm_Xsig)
|
|
pushl %ebp
|
|
movl %esp,%ebp
|
|
pushl %ebx /* Reserve some space */
|
|
pushl %ebx
|
|
pushl %esi
|
|
|
|
movl PARAM1,%esi
|
|
|
|
movl 8(%esi),%edx
|
|
movl 4(%esi),%ebx
|
|
movl (%esi),%eax
|
|
|
|
movl $0,-4(%ebp)
|
|
|
|
orl %edx,%edx /* ms bits */
|
|
js L_n_exit /* Already normalized */
|
|
jnz L_n_shift_1 /* Shift left 1 - 31 bits */
|
|
|
|
movl %ebx,%edx
|
|
movl %eax,%ebx
|
|
xorl %eax,%eax
|
|
movl $-32,-4(%ebp)
|
|
|
|
orl %edx,%edx /* ms bits */
|
|
js L_n_exit /* Normalized now */
|
|
jnz L_n_shift_1 /* Shift left 1 - 31 bits */
|
|
|
|
movl %ebx,%edx
|
|
movl %eax,%ebx
|
|
xorl %eax,%eax
|
|
addl $-32,-4(%ebp)
|
|
jmp L_n_exit /* Might not be normalized,
|
|
but shift no more. */
|
|
|
|
/* We need to shift left by 1 - 31 bits */
|
|
L_n_shift_1:
|
|
bsrl %edx,%ecx /* get the required shift in %ecx */
|
|
subl $31,%ecx
|
|
negl %ecx
|
|
subl %ecx,-4(%ebp)
|
|
shld %cl,%ebx,%edx
|
|
shld %cl,%eax,%ebx
|
|
shl %cl,%eax
|
|
|
|
L_n_exit:
|
|
movl %edx,8(%esi)
|
|
movl %ebx,4(%esi)
|
|
movl %eax,(%esi)
|
|
|
|
movl -4(%ebp),%eax
|
|
|
|
popl %esi
|
|
popl %ebx
|
|
leave
|
|
ret
|
|
SYM_FUNC_END(norm_Xsig)
|