mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-23 12:17:16 +07:00
9063c61fd5
The discussion about using "access_ok()" in get_user_pages_fast() (see
commit 7f81890687
: "x86: don't use
'access_ok()' as a range check in get_user_pages_fast()" for details and
end result), made us notice that x86-64 was really being very sloppy
about virtual address checking.
So be way more careful and straightforward about masking x86-64 virtual
addresses:
- All the VIRTUAL_MASK* variants now cover half of the address
space, it's not like we can use the full mask on a signed
integer, and the larger mask just invites mistakes when
applying it to either half of the 48-bit address space.
- /proc/kcore's kc_offset_to_vaddr() becomes a lot more
obvious when it transforms a file offset into a
(kernel-half) virtual address.
- Unify/simplify the 32-bit and 64-bit USER_DS definition to
be based on TASK_SIZE_MAX.
This cleanup and more careful/obvious user virtual address checking also
uncovered a buglet in the x86-64 implementation of strnlen_user(): it
would do an "access_ok()" check on the whole potential area, even if the
string itself was much shorter, and thus return an error even for valid
strings. Our sloppy checking had hidden this.
So this fixes 'strnlen_user()' to do this properly, the same way we
already handled user strings in 'strncpy_from_user()'. Namely by just
checking the first byte, and then relying on fault handling for the
rest. That always works, since we impose a guard page that cannot be
mapped at the end of the user space address space (and even if we
didn't, we'd have the address space hole).
Acked-by: Ingo Molnar <mingo@elte.hu>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Alan Cox <alan@lxorguk.ukuu.org.uk>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
184 lines
4.0 KiB
C
184 lines
4.0 KiB
C
/*
|
|
* User address space access functions.
|
|
*
|
|
* Copyright 1997 Andi Kleen <ak@muc.de>
|
|
* Copyright 1997 Linus Torvalds
|
|
* Copyright 2002 Andi Kleen <ak@suse.de>
|
|
*/
|
|
#include <linux/module.h>
|
|
#include <asm/uaccess.h>
|
|
|
|
/*
|
|
* Copy a null terminated string from userspace.
|
|
*/
|
|
|
|
#define __do_strncpy_from_user(dst,src,count,res) \
|
|
do { \
|
|
long __d0, __d1, __d2; \
|
|
might_fault(); \
|
|
__asm__ __volatile__( \
|
|
" testq %1,%1\n" \
|
|
" jz 2f\n" \
|
|
"0: lodsb\n" \
|
|
" stosb\n" \
|
|
" testb %%al,%%al\n" \
|
|
" jz 1f\n" \
|
|
" decq %1\n" \
|
|
" jnz 0b\n" \
|
|
"1: subq %1,%0\n" \
|
|
"2:\n" \
|
|
".section .fixup,\"ax\"\n" \
|
|
"3: movq %5,%0\n" \
|
|
" jmp 2b\n" \
|
|
".previous\n" \
|
|
_ASM_EXTABLE(0b,3b) \
|
|
: "=&r"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1), \
|
|
"=&D" (__d2) \
|
|
: "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \
|
|
: "memory"); \
|
|
} while (0)
|
|
|
|
long
|
|
__strncpy_from_user(char *dst, const char __user *src, long count)
|
|
{
|
|
long res;
|
|
__do_strncpy_from_user(dst, src, count, res);
|
|
return res;
|
|
}
|
|
EXPORT_SYMBOL(__strncpy_from_user);
|
|
|
|
long
|
|
strncpy_from_user(char *dst, const char __user *src, long count)
|
|
{
|
|
long res = -EFAULT;
|
|
if (access_ok(VERIFY_READ, src, 1))
|
|
return __strncpy_from_user(dst, src, count);
|
|
return res;
|
|
}
|
|
EXPORT_SYMBOL(strncpy_from_user);
|
|
|
|
/*
|
|
* Zero Userspace
|
|
*/
|
|
|
|
unsigned long __clear_user(void __user *addr, unsigned long size)
|
|
{
|
|
long __d0;
|
|
might_fault();
|
|
/* no memory constraint because it doesn't change any memory gcc knows
|
|
about */
|
|
asm volatile(
|
|
" testq %[size8],%[size8]\n"
|
|
" jz 4f\n"
|
|
"0: movq %[zero],(%[dst])\n"
|
|
" addq %[eight],%[dst]\n"
|
|
" decl %%ecx ; jnz 0b\n"
|
|
"4: movq %[size1],%%rcx\n"
|
|
" testl %%ecx,%%ecx\n"
|
|
" jz 2f\n"
|
|
"1: movb %b[zero],(%[dst])\n"
|
|
" incq %[dst]\n"
|
|
" decl %%ecx ; jnz 1b\n"
|
|
"2:\n"
|
|
".section .fixup,\"ax\"\n"
|
|
"3: lea 0(%[size1],%[size8],8),%[size8]\n"
|
|
" jmp 2b\n"
|
|
".previous\n"
|
|
_ASM_EXTABLE(0b,3b)
|
|
_ASM_EXTABLE(1b,2b)
|
|
: [size8] "=&c"(size), [dst] "=&D" (__d0)
|
|
: [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
|
|
[zero] "r" (0UL), [eight] "r" (8UL));
|
|
return size;
|
|
}
|
|
EXPORT_SYMBOL(__clear_user);
|
|
|
|
unsigned long clear_user(void __user *to, unsigned long n)
|
|
{
|
|
if (access_ok(VERIFY_WRITE, to, n))
|
|
return __clear_user(to, n);
|
|
return n;
|
|
}
|
|
EXPORT_SYMBOL(clear_user);
|
|
|
|
/*
|
|
* Return the size of a string (including the ending 0)
|
|
*
|
|
* Return 0 on exception, a value greater than N if too long
|
|
*/
|
|
|
|
long __strnlen_user(const char __user *s, long n)
|
|
{
|
|
long res = 0;
|
|
char c;
|
|
|
|
while (1) {
|
|
if (res>n)
|
|
return n+1;
|
|
if (__get_user(c, s))
|
|
return 0;
|
|
if (!c)
|
|
return res+1;
|
|
res++;
|
|
s++;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(__strnlen_user);
|
|
|
|
long strnlen_user(const char __user *s, long n)
|
|
{
|
|
if (!access_ok(VERIFY_READ, s, 1))
|
|
return 0;
|
|
return __strnlen_user(s, n);
|
|
}
|
|
EXPORT_SYMBOL(strnlen_user);
|
|
|
|
long strlen_user(const char __user *s)
|
|
{
|
|
long res = 0;
|
|
char c;
|
|
|
|
for (;;) {
|
|
if (get_user(c, s))
|
|
return 0;
|
|
if (!c)
|
|
return res+1;
|
|
res++;
|
|
s++;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(strlen_user);
|
|
|
|
unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
|
|
{
|
|
if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
|
|
return copy_user_generic((__force void *)to, (__force void *)from, len);
|
|
}
|
|
return len;
|
|
}
|
|
EXPORT_SYMBOL(copy_in_user);
|
|
|
|
/*
|
|
* Try to copy last bytes and clear the rest if needed.
|
|
* Since protection fault in copy_from/to_user is not a normal situation,
|
|
* it is not necessary to optimize tail handling.
|
|
*/
|
|
unsigned long
|
|
copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
|
|
{
|
|
char c;
|
|
unsigned zero_len;
|
|
|
|
for (; len; --len) {
|
|
if (__get_user_nocheck(c, from++, sizeof(char)))
|
|
break;
|
|
if (__put_user_nocheck(c, to++, sizeof(char)))
|
|
break;
|
|
}
|
|
|
|
for (c = 0, zero_len = len; zerorest && zero_len; --zero_len)
|
|
if (__put_user_nocheck(c, to++, sizeof(char)))
|
|
break;
|
|
return len;
|
|
}
|