mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-23 09:48:39 +07:00
30d6e0a419
There is code duplicated over all architecture's headers for futex_atomic_op_inuser. Namely op decoding, access_ok check for uaddr, and comparison of the result. Remove this duplication and leave up to the arches only the needed assembly which is now in arch_futex_atomic_op_inuser. This effectively distributes the Will Deacon's arm64 fix for undefined behaviour reported by UBSAN to all architectures. The fix was done in commit5f16a046f8
(arm64: futex: Fix undefined behaviour with FUTEX_OP_OPARG_SHIFT usage). Look there for an example dump. And as suggested by Thomas, check for negative oparg too, because it was also reported to cause undefined behaviour report. Note that s390 removed access_ok check ind12a29703
("s390/uaccess: remove pointless access_ok() checks") as access_ok there returns true. We introduce it back to the helper for the sake of simplicity (it gets optimized away anyway). Signed-off-by: Jiri Slaby <jslaby@suse.cz> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Russell King <rmk+kernel@armlinux.org.uk> Acked-by: Michael Ellerman <mpe@ellerman.id.au> (powerpc) Acked-by: Heiko Carstens <heiko.carstens@de.ibm.com> [s390] Acked-by: Chris Metcalf <cmetcalf@mellanox.com> [for tile] Reviewed-by: Darren Hart (VMware) <dvhart@infradead.org> Reviewed-by: Will Deacon <will.deacon@arm.com> [core/arm64] Cc: linux-mips@linux-mips.org Cc: Rich Felker <dalias@libc.org> Cc: linux-ia64@vger.kernel.org Cc: linux-sh@vger.kernel.org Cc: peterz@infradead.org Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Paul Mackerras <paulus@samba.org> Cc: sparclinux@vger.kernel.org Cc: Jonas Bonn <jonas@southpole.se> Cc: linux-s390@vger.kernel.org Cc: linux-arch@vger.kernel.org Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Cc: linux-hexagon@vger.kernel.org Cc: Helge Deller <deller@gmx.de> Cc: "James E.J. Bottomley" <jejb@parisc-linux.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Matt Turner <mattst88@gmail.com> Cc: linux-snps-arc@lists.infradead.org Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: linux-xtensa@linux-xtensa.org Cc: Stefan Kristiansson <stefan.kristiansson@saunalahti.fi> Cc: openrisc@lists.librecores.org Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Cc: Stafford Horne <shorne@gmail.com> Cc: linux-arm-kernel@lists.infradead.org Cc: Richard Henderson <rth@twiddle.net> Cc: Chris Zankel <chris@zankel.net> Cc: Michal Simek <monstr@monstr.eu> Cc: Tony Luck <tony.luck@intel.com> Cc: linux-parisc@vger.kernel.org Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Richard Kuo <rkuo@codeaurora.org> Cc: linux-alpha@vger.kernel.org Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: linuxppc-dev@lists.ozlabs.org Cc: "David S. Miller" <davem@davemloft.net> Link: http://lkml.kernel.org/r/20170824073105.3901-1-jslaby@suse.cz
202 lines
4.9 KiB
C
202 lines
4.9 KiB
C
/*
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
* for more details.
|
|
*
|
|
* Copyright (c) 2006 Ralf Baechle (ralf@linux-mips.org)
|
|
*/
|
|
#ifndef _ASM_FUTEX_H
|
|
#define _ASM_FUTEX_H
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
#include <linux/futex.h>
|
|
#include <linux/uaccess.h>
|
|
#include <asm/asm-eva.h>
|
|
#include <asm/barrier.h>
|
|
#include <asm/compiler.h>
|
|
#include <asm/errno.h>
|
|
#include <asm/war.h>
|
|
|
|
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
|
|
{ \
|
|
if (cpu_has_llsc && R10000_LLSC_WAR) { \
|
|
__asm__ __volatile__( \
|
|
" .set push \n" \
|
|
" .set noat \n" \
|
|
" .set arch=r4000 \n" \
|
|
"1: ll %1, %4 # __futex_atomic_op \n" \
|
|
" .set mips0 \n" \
|
|
" " insn " \n" \
|
|
" .set arch=r4000 \n" \
|
|
"2: sc $1, %2 \n" \
|
|
" beqzl $1, 1b \n" \
|
|
__WEAK_LLSC_MB \
|
|
"3: \n" \
|
|
" .insn \n" \
|
|
" .set pop \n" \
|
|
" .set mips0 \n" \
|
|
" .section .fixup,\"ax\" \n" \
|
|
"4: li %0, %6 \n" \
|
|
" j 3b \n" \
|
|
" .previous \n" \
|
|
" .section __ex_table,\"a\" \n" \
|
|
" "__UA_ADDR "\t1b, 4b \n" \
|
|
" "__UA_ADDR "\t2b, 4b \n" \
|
|
" .previous \n" \
|
|
: "=r" (ret), "=&r" (oldval), \
|
|
"=" GCC_OFF_SMALL_ASM() (*uaddr) \
|
|
: "0" (0), GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oparg), \
|
|
"i" (-EFAULT) \
|
|
: "memory"); \
|
|
} else if (cpu_has_llsc) { \
|
|
__asm__ __volatile__( \
|
|
" .set push \n" \
|
|
" .set noat \n" \
|
|
" .set "MIPS_ISA_ARCH_LEVEL" \n" \
|
|
"1: "user_ll("%1", "%4")" # __futex_atomic_op\n" \
|
|
" .set mips0 \n" \
|
|
" " insn " \n" \
|
|
" .set "MIPS_ISA_ARCH_LEVEL" \n" \
|
|
"2: "user_sc("$1", "%2")" \n" \
|
|
" beqz $1, 1b \n" \
|
|
__WEAK_LLSC_MB \
|
|
"3: \n" \
|
|
" .insn \n" \
|
|
" .set pop \n" \
|
|
" .set mips0 \n" \
|
|
" .section .fixup,\"ax\" \n" \
|
|
"4: li %0, %6 \n" \
|
|
" j 3b \n" \
|
|
" .previous \n" \
|
|
" .section __ex_table,\"a\" \n" \
|
|
" "__UA_ADDR "\t1b, 4b \n" \
|
|
" "__UA_ADDR "\t2b, 4b \n" \
|
|
" .previous \n" \
|
|
: "=r" (ret), "=&r" (oldval), \
|
|
"=" GCC_OFF_SMALL_ASM() (*uaddr) \
|
|
: "0" (0), GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oparg), \
|
|
"i" (-EFAULT) \
|
|
: "memory"); \
|
|
} else \
|
|
ret = -ENOSYS; \
|
|
}
|
|
|
|
static inline int
|
|
arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
|
|
{
|
|
int oldval = 0, ret;
|
|
|
|
pagefault_disable();
|
|
|
|
switch (op) {
|
|
case FUTEX_OP_SET:
|
|
__futex_atomic_op("move $1, %z5", ret, oldval, uaddr, oparg);
|
|
break;
|
|
|
|
case FUTEX_OP_ADD:
|
|
__futex_atomic_op("addu $1, %1, %z5",
|
|
ret, oldval, uaddr, oparg);
|
|
break;
|
|
case FUTEX_OP_OR:
|
|
__futex_atomic_op("or $1, %1, %z5",
|
|
ret, oldval, uaddr, oparg);
|
|
break;
|
|
case FUTEX_OP_ANDN:
|
|
__futex_atomic_op("and $1, %1, %z5",
|
|
ret, oldval, uaddr, ~oparg);
|
|
break;
|
|
case FUTEX_OP_XOR:
|
|
__futex_atomic_op("xor $1, %1, %z5",
|
|
ret, oldval, uaddr, oparg);
|
|
break;
|
|
default:
|
|
ret = -ENOSYS;
|
|
}
|
|
|
|
pagefault_enable();
|
|
|
|
if (!ret)
|
|
*oval = oldval;
|
|
|
|
return ret;
|
|
}
|
|
|
|
static inline int
|
|
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
|
u32 oldval, u32 newval)
|
|
{
|
|
int ret = 0;
|
|
u32 val;
|
|
|
|
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
|
return -EFAULT;
|
|
|
|
if (cpu_has_llsc && R10000_LLSC_WAR) {
|
|
__asm__ __volatile__(
|
|
"# futex_atomic_cmpxchg_inatomic \n"
|
|
" .set push \n"
|
|
" .set noat \n"
|
|
" .set arch=r4000 \n"
|
|
"1: ll %1, %3 \n"
|
|
" bne %1, %z4, 3f \n"
|
|
" .set mips0 \n"
|
|
" move $1, %z5 \n"
|
|
" .set arch=r4000 \n"
|
|
"2: sc $1, %2 \n"
|
|
" beqzl $1, 1b \n"
|
|
__WEAK_LLSC_MB
|
|
"3: \n"
|
|
" .insn \n"
|
|
" .set pop \n"
|
|
" .section .fixup,\"ax\" \n"
|
|
"4: li %0, %6 \n"
|
|
" j 3b \n"
|
|
" .previous \n"
|
|
" .section __ex_table,\"a\" \n"
|
|
" "__UA_ADDR "\t1b, 4b \n"
|
|
" "__UA_ADDR "\t2b, 4b \n"
|
|
" .previous \n"
|
|
: "+r" (ret), "=&r" (val), "=" GCC_OFF_SMALL_ASM() (*uaddr)
|
|
: GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
|
|
"i" (-EFAULT)
|
|
: "memory");
|
|
} else if (cpu_has_llsc) {
|
|
__asm__ __volatile__(
|
|
"# futex_atomic_cmpxchg_inatomic \n"
|
|
" .set push \n"
|
|
" .set noat \n"
|
|
" .set "MIPS_ISA_ARCH_LEVEL" \n"
|
|
"1: "user_ll("%1", "%3")" \n"
|
|
" bne %1, %z4, 3f \n"
|
|
" .set mips0 \n"
|
|
" move $1, %z5 \n"
|
|
" .set "MIPS_ISA_ARCH_LEVEL" \n"
|
|
"2: "user_sc("$1", "%2")" \n"
|
|
" beqz $1, 1b \n"
|
|
__WEAK_LLSC_MB
|
|
"3: \n"
|
|
" .insn \n"
|
|
" .set pop \n"
|
|
" .section .fixup,\"ax\" \n"
|
|
"4: li %0, %6 \n"
|
|
" j 3b \n"
|
|
" .previous \n"
|
|
" .section __ex_table,\"a\" \n"
|
|
" "__UA_ADDR "\t1b, 4b \n"
|
|
" "__UA_ADDR "\t2b, 4b \n"
|
|
" .previous \n"
|
|
: "+r" (ret), "=&r" (val), "=" GCC_OFF_SMALL_ASM() (*uaddr)
|
|
: GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
|
|
"i" (-EFAULT)
|
|
: "memory");
|
|
} else
|
|
return -ENOSYS;
|
|
|
|
*uval = val;
|
|
return ret;
|
|
}
|
|
|
|
#endif
|
|
#endif /* _ASM_FUTEX_H */
|