mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-27 22:45:11 +07:00
f180053694
Impact: standardize IO on cached ops
On modern CPUs it is almost always a bad idea to use non-temporal stores,
as the regression in this commit has shown it:
30d697f
: x86: fix performance regression in write() syscall
The kernel simply has no good information about whether using non-temporal
stores is a good idea or not - and trying to add heuristics only increases
complexity and inserts fragility.
The regression on cached write()s took very long to be found - over two
years. So dont take any chances and let the hardware decide how it makes
use of its caches.
The only exception is drivers/gpu/drm/i915/i915_gem.c: there were we are
absolutely sure that another entity (the GPU) will pick up the dirty
data immediately and that the CPU will not touch that data before the
GPU will.
Also, keep the _nocache() primitives to make it easier for people to
experiment with these details. There may be more clear-cut cases where
non-cached copies can be used, outside of filemap.c.
Cc: Salman Qazi <sqazi@google.com>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
219 lines
6.5 KiB
C
219 lines
6.5 KiB
C
#ifndef _ASM_X86_UACCESS_32_H
|
|
#define _ASM_X86_UACCESS_32_H
|
|
|
|
/*
|
|
* User space memory access functions
|
|
*/
|
|
#include <linux/errno.h>
|
|
#include <linux/thread_info.h>
|
|
#include <linux/prefetch.h>
|
|
#include <linux/string.h>
|
|
#include <asm/asm.h>
|
|
#include <asm/page.h>
|
|
|
|
unsigned long __must_check __copy_to_user_ll
|
|
(void __user *to, const void *from, unsigned long n);
|
|
unsigned long __must_check __copy_from_user_ll
|
|
(void *to, const void __user *from, unsigned long n);
|
|
unsigned long __must_check __copy_from_user_ll_nozero
|
|
(void *to, const void __user *from, unsigned long n);
|
|
unsigned long __must_check __copy_from_user_ll_nocache
|
|
(void *to, const void __user *from, unsigned long n);
|
|
unsigned long __must_check __copy_from_user_ll_nocache_nozero
|
|
(void *to, const void __user *from, unsigned long n);
|
|
|
|
/**
|
|
* __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
|
|
* @to: Destination address, in user space.
|
|
* @from: Source address, in kernel space.
|
|
* @n: Number of bytes to copy.
|
|
*
|
|
* Context: User context only.
|
|
*
|
|
* Copy data from kernel space to user space. Caller must check
|
|
* the specified block with access_ok() before calling this function.
|
|
* The caller should also make sure he pins the user space address
|
|
* so that the we don't result in page fault and sleep.
|
|
*
|
|
* Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault
|
|
* we return the initial request size (1, 2 or 4), as copy_*_user should do.
|
|
* If a store crosses a page boundary and gets a fault, the x86 will not write
|
|
* anything, so this is accurate.
|
|
*/
|
|
|
|
static __always_inline unsigned long __must_check
|
|
__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
|
|
{
|
|
if (__builtin_constant_p(n)) {
|
|
unsigned long ret;
|
|
|
|
switch (n) {
|
|
case 1:
|
|
__put_user_size(*(u8 *)from, (u8 __user *)to,
|
|
1, ret, 1);
|
|
return ret;
|
|
case 2:
|
|
__put_user_size(*(u16 *)from, (u16 __user *)to,
|
|
2, ret, 2);
|
|
return ret;
|
|
case 4:
|
|
__put_user_size(*(u32 *)from, (u32 __user *)to,
|
|
4, ret, 4);
|
|
return ret;
|
|
}
|
|
}
|
|
return __copy_to_user_ll(to, from, n);
|
|
}
|
|
|
|
/**
|
|
* __copy_to_user: - Copy a block of data into user space, with less checking.
|
|
* @to: Destination address, in user space.
|
|
* @from: Source address, in kernel space.
|
|
* @n: Number of bytes to copy.
|
|
*
|
|
* Context: User context only. This function may sleep.
|
|
*
|
|
* Copy data from kernel space to user space. Caller must check
|
|
* the specified block with access_ok() before calling this function.
|
|
*
|
|
* Returns number of bytes that could not be copied.
|
|
* On success, this will be zero.
|
|
*/
|
|
static __always_inline unsigned long __must_check
|
|
__copy_to_user(void __user *to, const void *from, unsigned long n)
|
|
{
|
|
might_fault();
|
|
return __copy_to_user_inatomic(to, from, n);
|
|
}
|
|
|
|
static __always_inline unsigned long
|
|
__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
|
|
{
|
|
/* Avoid zeroing the tail if the copy fails..
|
|
* If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
|
|
* but as the zeroing behaviour is only significant when n is not
|
|
* constant, that shouldn't be a problem.
|
|
*/
|
|
if (__builtin_constant_p(n)) {
|
|
unsigned long ret;
|
|
|
|
switch (n) {
|
|
case 1:
|
|
__get_user_size(*(u8 *)to, from, 1, ret, 1);
|
|
return ret;
|
|
case 2:
|
|
__get_user_size(*(u16 *)to, from, 2, ret, 2);
|
|
return ret;
|
|
case 4:
|
|
__get_user_size(*(u32 *)to, from, 4, ret, 4);
|
|
return ret;
|
|
}
|
|
}
|
|
return __copy_from_user_ll_nozero(to, from, n);
|
|
}
|
|
|
|
/**
|
|
* __copy_from_user: - Copy a block of data from user space, with less checking.
|
|
* @to: Destination address, in kernel space.
|
|
* @from: Source address, in user space.
|
|
* @n: Number of bytes to copy.
|
|
*
|
|
* Context: User context only. This function may sleep.
|
|
*
|
|
* Copy data from user space to kernel space. Caller must check
|
|
* the specified block with access_ok() before calling this function.
|
|
*
|
|
* Returns number of bytes that could not be copied.
|
|
* On success, this will be zero.
|
|
*
|
|
* If some data could not be copied, this function will pad the copied
|
|
* data to the requested size using zero bytes.
|
|
*
|
|
* An alternate version - __copy_from_user_inatomic() - may be called from
|
|
* atomic context and will fail rather than sleep. In this case the
|
|
* uncopied bytes will *NOT* be padded with zeros. See fs/filemap.h
|
|
* for explanation of why this is needed.
|
|
*/
|
|
static __always_inline unsigned long
|
|
__copy_from_user(void *to, const void __user *from, unsigned long n)
|
|
{
|
|
might_fault();
|
|
if (__builtin_constant_p(n)) {
|
|
unsigned long ret;
|
|
|
|
switch (n) {
|
|
case 1:
|
|
__get_user_size(*(u8 *)to, from, 1, ret, 1);
|
|
return ret;
|
|
case 2:
|
|
__get_user_size(*(u16 *)to, from, 2, ret, 2);
|
|
return ret;
|
|
case 4:
|
|
__get_user_size(*(u32 *)to, from, 4, ret, 4);
|
|
return ret;
|
|
}
|
|
}
|
|
return __copy_from_user_ll(to, from, n);
|
|
}
|
|
|
|
static __always_inline unsigned long __copy_from_user_nocache(void *to,
|
|
const void __user *from, unsigned long n)
|
|
{
|
|
might_fault();
|
|
if (__builtin_constant_p(n)) {
|
|
unsigned long ret;
|
|
|
|
switch (n) {
|
|
case 1:
|
|
__get_user_size(*(u8 *)to, from, 1, ret, 1);
|
|
return ret;
|
|
case 2:
|
|
__get_user_size(*(u16 *)to, from, 2, ret, 2);
|
|
return ret;
|
|
case 4:
|
|
__get_user_size(*(u32 *)to, from, 4, ret, 4);
|
|
return ret;
|
|
}
|
|
}
|
|
return __copy_from_user_ll_nocache(to, from, n);
|
|
}
|
|
|
|
static __always_inline unsigned long
|
|
__copy_from_user_inatomic_nocache(void *to, const void __user *from,
|
|
unsigned long n)
|
|
{
|
|
return __copy_from_user_ll_nocache_nozero(to, from, n);
|
|
}
|
|
|
|
unsigned long __must_check copy_to_user(void __user *to,
|
|
const void *from, unsigned long n);
|
|
unsigned long __must_check copy_from_user(void *to,
|
|
const void __user *from,
|
|
unsigned long n);
|
|
long __must_check strncpy_from_user(char *dst, const char __user *src,
|
|
long count);
|
|
long __must_check __strncpy_from_user(char *dst,
|
|
const char __user *src, long count);
|
|
|
|
/**
|
|
* strlen_user: - Get the size of a string in user space.
|
|
* @str: The string to measure.
|
|
*
|
|
* Context: User context only. This function may sleep.
|
|
*
|
|
* Get the size of a NUL-terminated string in user space.
|
|
*
|
|
* Returns the size of the string INCLUDING the terminating NUL.
|
|
* On exception, returns 0.
|
|
*
|
|
* If there is a limit on the length of a valid string, you may wish to
|
|
* consider using strnlen_user() instead.
|
|
*/
|
|
#define strlen_user(str) strnlen_user(str, LONG_MAX)
|
|
|
|
long strnlen_user(const char __user *str, long n);
|
|
unsigned long __must_check clear_user(void __user *mem, unsigned long len);
|
|
unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
|
|
|
|
#endif /* _ASM_X86_UACCESS_32_H */
|