mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-23 17:34:09 +07:00
30d6e0a419
There is code duplicated over all architecture's headers for futex_atomic_op_inuser. Namely op decoding, access_ok check for uaddr, and comparison of the result. Remove this duplication and leave up to the arches only the needed assembly which is now in arch_futex_atomic_op_inuser. This effectively distributes the Will Deacon's arm64 fix for undefined behaviour reported by UBSAN to all architectures. The fix was done in commit5f16a046f8
(arm64: futex: Fix undefined behaviour with FUTEX_OP_OPARG_SHIFT usage). Look there for an example dump. And as suggested by Thomas, check for negative oparg too, because it was also reported to cause undefined behaviour report. Note that s390 removed access_ok check ind12a29703
("s390/uaccess: remove pointless access_ok() checks") as access_ok there returns true. We introduce it back to the helper for the sake of simplicity (it gets optimized away anyway). Signed-off-by: Jiri Slaby <jslaby@suse.cz> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Russell King <rmk+kernel@armlinux.org.uk> Acked-by: Michael Ellerman <mpe@ellerman.id.au> (powerpc) Acked-by: Heiko Carstens <heiko.carstens@de.ibm.com> [s390] Acked-by: Chris Metcalf <cmetcalf@mellanox.com> [for tile] Reviewed-by: Darren Hart (VMware) <dvhart@infradead.org> Reviewed-by: Will Deacon <will.deacon@arm.com> [core/arm64] Cc: linux-mips@linux-mips.org Cc: Rich Felker <dalias@libc.org> Cc: linux-ia64@vger.kernel.org Cc: linux-sh@vger.kernel.org Cc: peterz@infradead.org Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Paul Mackerras <paulus@samba.org> Cc: sparclinux@vger.kernel.org Cc: Jonas Bonn <jonas@southpole.se> Cc: linux-s390@vger.kernel.org Cc: linux-arch@vger.kernel.org Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Cc: linux-hexagon@vger.kernel.org Cc: Helge Deller <deller@gmx.de> Cc: "James E.J. Bottomley" <jejb@parisc-linux.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Matt Turner <mattst88@gmail.com> Cc: linux-snps-arc@lists.infradead.org Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: linux-xtensa@linux-xtensa.org Cc: Stefan Kristiansson <stefan.kristiansson@saunalahti.fi> Cc: openrisc@lists.librecores.org Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Cc: Stafford Horne <shorne@gmail.com> Cc: linux-arm-kernel@lists.infradead.org Cc: Richard Henderson <rth@twiddle.net> Cc: Chris Zankel <chris@zankel.net> Cc: Michal Simek <monstr@monstr.eu> Cc: Tony Luck <tony.luck@intel.com> Cc: linux-parisc@vger.kernel.org Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Richard Kuo <rkuo@codeaurora.org> Cc: linux-alpha@vger.kernel.org Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: linuxppc-dev@lists.ozlabs.org Cc: "David S. Miller" <davem@davemloft.net> Link: http://lkml.kernel.org/r/20170824073105.3901-1-jslaby@suse.cz
167 lines
4.2 KiB
C
167 lines
4.2 KiB
C
/*
|
|
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation, version 2.
|
|
*
|
|
* This program is distributed in the hope that it will be useful, but
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
* NON INFRINGEMENT. See the GNU General Public License for
|
|
* more details.
|
|
*
|
|
* These routines make two important assumptions:
|
|
*
|
|
* 1. atomic_t is really an int and can be freely cast back and forth
|
|
* (validated in __init_atomic_per_cpu).
|
|
*
|
|
* 2. userspace uses sys_cmpxchg() for all atomic operations, thus using
|
|
* the same locking convention that all the kernel atomic routines use.
|
|
*/
|
|
|
|
#ifndef _ASM_TILE_FUTEX_H
|
|
#define _ASM_TILE_FUTEX_H
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
#include <linux/futex.h>
|
|
#include <linux/uaccess.h>
|
|
#include <linux/errno.h>
|
|
#include <asm/atomic.h>
|
|
|
|
/*
|
|
* Support macros for futex operations. Do not use these macros directly.
|
|
* They assume "ret", "val", "oparg", and "uaddr" in the lexical context.
|
|
* __futex_cmpxchg() additionally assumes "oldval".
|
|
*/
|
|
|
|
#ifdef __tilegx__
|
|
|
|
#define __futex_asm(OP) \
|
|
asm("1: {" #OP " %1, %3, %4; movei %0, 0 }\n" \
|
|
".pushsection .fixup,\"ax\"\n" \
|
|
"0: { movei %0, %5; j 9f }\n" \
|
|
".section __ex_table,\"a\"\n" \
|
|
".align 8\n" \
|
|
".quad 1b, 0b\n" \
|
|
".popsection\n" \
|
|
"9:" \
|
|
: "=r" (ret), "=r" (val), "+m" (*(uaddr)) \
|
|
: "r" (uaddr), "r" (oparg), "i" (-EFAULT))
|
|
|
|
#define __futex_set() __futex_asm(exch4)
|
|
#define __futex_add() __futex_asm(fetchadd4)
|
|
#define __futex_or() __futex_asm(fetchor4)
|
|
#define __futex_andn() ({ oparg = ~oparg; __futex_asm(fetchand4); })
|
|
#define __futex_cmpxchg() \
|
|
({ __insn_mtspr(SPR_CMPEXCH_VALUE, oldval); __futex_asm(cmpexch4); })
|
|
|
|
#define __futex_xor() \
|
|
({ \
|
|
u32 oldval, n = oparg; \
|
|
if ((ret = __get_user(oldval, uaddr)) == 0) { \
|
|
do { \
|
|
oparg = oldval ^ n; \
|
|
__futex_cmpxchg(); \
|
|
} while (ret == 0 && oldval != val); \
|
|
} \
|
|
})
|
|
|
|
/* No need to prefetch, since the atomic ops go to the home cache anyway. */
|
|
#define __futex_prolog()
|
|
|
|
#else
|
|
|
|
#define __futex_call(FN) \
|
|
{ \
|
|
struct __get_user gu = FN((u32 __force *)uaddr, lock, oparg); \
|
|
val = gu.val; \
|
|
ret = gu.err; \
|
|
}
|
|
|
|
#define __futex_set() __futex_call(__atomic32_xchg)
|
|
#define __futex_add() __futex_call(__atomic32_xchg_add)
|
|
#define __futex_or() __futex_call(__atomic32_fetch_or)
|
|
#define __futex_andn() __futex_call(__atomic32_fetch_andn)
|
|
#define __futex_xor() __futex_call(__atomic32_fetch_xor)
|
|
|
|
#define __futex_cmpxchg() \
|
|
{ \
|
|
struct __get_user gu = __atomic32_cmpxchg((u32 __force *)uaddr, \
|
|
lock, oldval, oparg); \
|
|
val = gu.val; \
|
|
ret = gu.err; \
|
|
}
|
|
|
|
/*
|
|
* Find the lock pointer for the atomic calls to use, and issue a
|
|
* prefetch to the user address to bring it into cache. Similar to
|
|
* __atomic_setup(), but we can't do a read into the L1 since it might
|
|
* fault; instead we do a prefetch into the L2.
|
|
*/
|
|
#define __futex_prolog() \
|
|
int *lock; \
|
|
__insn_prefetch(uaddr); \
|
|
lock = __atomic_hashed_lock((int __force *)uaddr)
|
|
#endif
|
|
|
|
static inline int arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval,
|
|
u32 __user *uaddr)
|
|
{
|
|
int uninitialized_var(val), ret;
|
|
|
|
__futex_prolog();
|
|
|
|
/* The 32-bit futex code makes this assumption, so validate it here. */
|
|
BUILD_BUG_ON(sizeof(atomic_t) != sizeof(int));
|
|
|
|
pagefault_disable();
|
|
switch (op) {
|
|
case FUTEX_OP_SET:
|
|
__futex_set();
|
|
break;
|
|
case FUTEX_OP_ADD:
|
|
__futex_add();
|
|
break;
|
|
case FUTEX_OP_OR:
|
|
__futex_or();
|
|
break;
|
|
case FUTEX_OP_ANDN:
|
|
__futex_andn();
|
|
break;
|
|
case FUTEX_OP_XOR:
|
|
__futex_xor();
|
|
break;
|
|
default:
|
|
ret = -ENOSYS;
|
|
break;
|
|
}
|
|
pagefault_enable();
|
|
|
|
if (!ret)
|
|
*oval = val;
|
|
|
|
return ret;
|
|
}
|
|
|
|
static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
|
u32 oldval, u32 oparg)
|
|
{
|
|
int ret, val;
|
|
|
|
__futex_prolog();
|
|
|
|
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
|
return -EFAULT;
|
|
|
|
__futex_cmpxchg();
|
|
|
|
*uval = val;
|
|
return ret;
|
|
}
|
|
|
|
#endif /* !__ASSEMBLY__ */
|
|
|
|
#endif /* _ASM_TILE_FUTEX_H */
|