mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-27 19:35:15 +07:00
96d4f267e4
Nobody has actually used the type (VERIFY_READ vs VERIFY_WRITE) argument of the user address range verification function since we got rid of the old racy i386-only code to walk page tables by hand. It existed because the original 80386 would not honor the write protect bit when in kernel mode, so you had to do COW by hand before doing any user access. But we haven't supported that in a long time, and these days the 'type' argument is a purely historical artifact. A discussion about extending 'user_access_begin()' to do the range checking resulted this patch, because there is no way we're going to move the old VERIFY_xyz interface to that model. And it's best done at the end of the merge window when I've done most of my merges, so let's just get this done once and for all. This patch was mostly done with a sed-script, with manual fix-ups for the cases that weren't of the trivial 'access_ok(VERIFY_xyz' form. There were a couple of notable cases: - csky still had the old "verify_area()" name as an alias. - the iter_iov code had magical hardcoded knowledge of the actual values of VERIFY_{READ,WRITE} (not that they mattered, since nothing really used it) - microblaze used the type argument for a debug printout but other than those oddities this should be a total no-op patch. I tried to fix up all architectures, did fairly extensive grepping for access_ok() uses, and the changes are trivial, but I may have missed something. Any missed conversion should be trivially fixable, though. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
130 lines
3.0 KiB
C
130 lines
3.0 KiB
C
/*
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
*/
|
|
#ifndef __ASM_FUTEX_H
|
|
#define __ASM_FUTEX_H
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
#include <linux/futex.h>
|
|
#include <linux/uaccess.h>
|
|
|
|
#include <asm/errno.h>
|
|
|
|
#define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \
|
|
do { \
|
|
uaccess_enable(); \
|
|
asm volatile( \
|
|
" prfm pstl1strm, %2\n" \
|
|
"1: ldxr %w1, %2\n" \
|
|
insn "\n" \
|
|
"2: stlxr %w3, %w0, %2\n" \
|
|
" cbnz %w3, 1b\n" \
|
|
" dmb ish\n" \
|
|
"3:\n" \
|
|
" .pushsection .fixup,\"ax\"\n" \
|
|
" .align 2\n" \
|
|
"4: mov %w0, %w5\n" \
|
|
" b 3b\n" \
|
|
" .popsection\n" \
|
|
_ASM_EXTABLE(1b, 4b) \
|
|
_ASM_EXTABLE(2b, 4b) \
|
|
: "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \
|
|
: "r" (oparg), "Ir" (-EFAULT) \
|
|
: "memory"); \
|
|
uaccess_disable(); \
|
|
} while (0)
|
|
|
|
static inline int
|
|
arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr)
|
|
{
|
|
int oldval = 0, ret, tmp;
|
|
u32 __user *uaddr = __uaccess_mask_ptr(_uaddr);
|
|
|
|
pagefault_disable();
|
|
|
|
switch (op) {
|
|
case FUTEX_OP_SET:
|
|
__futex_atomic_op("mov %w0, %w4",
|
|
ret, oldval, uaddr, tmp, oparg);
|
|
break;
|
|
case FUTEX_OP_ADD:
|
|
__futex_atomic_op("add %w0, %w1, %w4",
|
|
ret, oldval, uaddr, tmp, oparg);
|
|
break;
|
|
case FUTEX_OP_OR:
|
|
__futex_atomic_op("orr %w0, %w1, %w4",
|
|
ret, oldval, uaddr, tmp, oparg);
|
|
break;
|
|
case FUTEX_OP_ANDN:
|
|
__futex_atomic_op("and %w0, %w1, %w4",
|
|
ret, oldval, uaddr, tmp, ~oparg);
|
|
break;
|
|
case FUTEX_OP_XOR:
|
|
__futex_atomic_op("eor %w0, %w1, %w4",
|
|
ret, oldval, uaddr, tmp, oparg);
|
|
break;
|
|
default:
|
|
ret = -ENOSYS;
|
|
}
|
|
|
|
pagefault_enable();
|
|
|
|
if (!ret)
|
|
*oval = oldval;
|
|
|
|
return ret;
|
|
}
|
|
|
|
static inline int
|
|
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
|
|
u32 oldval, u32 newval)
|
|
{
|
|
int ret = 0;
|
|
u32 val, tmp;
|
|
u32 __user *uaddr;
|
|
|
|
if (!access_ok(_uaddr, sizeof(u32)))
|
|
return -EFAULT;
|
|
|
|
uaddr = __uaccess_mask_ptr(_uaddr);
|
|
uaccess_enable();
|
|
asm volatile("// futex_atomic_cmpxchg_inatomic\n"
|
|
" prfm pstl1strm, %2\n"
|
|
"1: ldxr %w1, %2\n"
|
|
" sub %w3, %w1, %w4\n"
|
|
" cbnz %w3, 3f\n"
|
|
"2: stlxr %w3, %w5, %2\n"
|
|
" cbnz %w3, 1b\n"
|
|
" dmb ish\n"
|
|
"3:\n"
|
|
" .pushsection .fixup,\"ax\"\n"
|
|
"4: mov %w0, %w6\n"
|
|
" b 3b\n"
|
|
" .popsection\n"
|
|
_ASM_EXTABLE(1b, 4b)
|
|
_ASM_EXTABLE(2b, 4b)
|
|
: "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp)
|
|
: "r" (oldval), "r" (newval), "Ir" (-EFAULT)
|
|
: "memory");
|
|
uaccess_disable();
|
|
|
|
*uval = val;
|
|
return ret;
|
|
}
|
|
|
|
#endif /* __KERNEL__ */
|
|
#endif /* __ASM_FUTEX_H */
|