mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
f180053694
Impact: standardize IO on cached ops
On modern CPUs it is almost always a bad idea to use non-temporal stores,
as the regression in this commit has shown it:
30d697f
: x86: fix performance regression in write() syscall
The kernel simply has no good information about whether using non-temporal
stores is a good idea or not - and trying to add heuristics only increases
complexity and inserts fragility.
The regression on cached write()s took very long to be found - over two
years. So dont take any chances and let the hardware decide how it makes
use of its caches.
The only exception is drivers/gpu/drm/i915/i915_gem.c: there were we are
absolutely sure that another entity (the GPU) will pick up the dirty
data immediately and that the CPU will not touch that data before the
GPU will.
Also, keep the _nocache() primitives to make it easier for people to
experiment with these details. There may be more clear-cut cases where
non-cached copies can be used, outside of filemap.c.
Cc: Salman Qazi <sqazi@google.com>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
209 lines
5.7 KiB
C
209 lines
5.7 KiB
C
#ifndef _ASM_X86_UACCESS_64_H
|
|
#define _ASM_X86_UACCESS_64_H
|
|
|
|
/*
|
|
* User space memory access functions
|
|
*/
|
|
#include <linux/compiler.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/prefetch.h>
|
|
#include <linux/lockdep.h>
|
|
#include <asm/page.h>
|
|
|
|
/*
|
|
* Copy To/From Userspace
|
|
*/
|
|
|
|
/* Handles exceptions in both to and from, but doesn't do access_ok */
|
|
__must_check unsigned long
|
|
copy_user_generic(void *to, const void *from, unsigned len);
|
|
|
|
__must_check unsigned long
|
|
copy_to_user(void __user *to, const void *from, unsigned len);
|
|
__must_check unsigned long
|
|
copy_from_user(void *to, const void __user *from, unsigned len);
|
|
__must_check unsigned long
|
|
copy_in_user(void __user *to, const void __user *from, unsigned len);
|
|
|
|
static __always_inline __must_check
|
|
int __copy_from_user(void *dst, const void __user *src, unsigned size)
|
|
{
|
|
int ret = 0;
|
|
|
|
might_fault();
|
|
if (!__builtin_constant_p(size))
|
|
return copy_user_generic(dst, (__force void *)src, size);
|
|
switch (size) {
|
|
case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
|
|
ret, "b", "b", "=q", 1);
|
|
return ret;
|
|
case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
|
|
ret, "w", "w", "=r", 2);
|
|
return ret;
|
|
case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
|
|
ret, "l", "k", "=r", 4);
|
|
return ret;
|
|
case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
|
|
ret, "q", "", "=r", 8);
|
|
return ret;
|
|
case 10:
|
|
__get_user_asm(*(u64 *)dst, (u64 __user *)src,
|
|
ret, "q", "", "=r", 10);
|
|
if (unlikely(ret))
|
|
return ret;
|
|
__get_user_asm(*(u16 *)(8 + (char *)dst),
|
|
(u16 __user *)(8 + (char __user *)src),
|
|
ret, "w", "w", "=r", 2);
|
|
return ret;
|
|
case 16:
|
|
__get_user_asm(*(u64 *)dst, (u64 __user *)src,
|
|
ret, "q", "", "=r", 16);
|
|
if (unlikely(ret))
|
|
return ret;
|
|
__get_user_asm(*(u64 *)(8 + (char *)dst),
|
|
(u64 __user *)(8 + (char __user *)src),
|
|
ret, "q", "", "=r", 8);
|
|
return ret;
|
|
default:
|
|
return copy_user_generic(dst, (__force void *)src, size);
|
|
}
|
|
}
|
|
|
|
static __always_inline __must_check
|
|
int __copy_to_user(void __user *dst, const void *src, unsigned size)
|
|
{
|
|
int ret = 0;
|
|
|
|
might_fault();
|
|
if (!__builtin_constant_p(size))
|
|
return copy_user_generic((__force void *)dst, src, size);
|
|
switch (size) {
|
|
case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
|
|
ret, "b", "b", "iq", 1);
|
|
return ret;
|
|
case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
|
|
ret, "w", "w", "ir", 2);
|
|
return ret;
|
|
case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
|
|
ret, "l", "k", "ir", 4);
|
|
return ret;
|
|
case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
|
|
ret, "q", "", "ir", 8);
|
|
return ret;
|
|
case 10:
|
|
__put_user_asm(*(u64 *)src, (u64 __user *)dst,
|
|
ret, "q", "", "ir", 10);
|
|
if (unlikely(ret))
|
|
return ret;
|
|
asm("":::"memory");
|
|
__put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
|
|
ret, "w", "w", "ir", 2);
|
|
return ret;
|
|
case 16:
|
|
__put_user_asm(*(u64 *)src, (u64 __user *)dst,
|
|
ret, "q", "", "ir", 16);
|
|
if (unlikely(ret))
|
|
return ret;
|
|
asm("":::"memory");
|
|
__put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
|
|
ret, "q", "", "ir", 8);
|
|
return ret;
|
|
default:
|
|
return copy_user_generic((__force void *)dst, src, size);
|
|
}
|
|
}
|
|
|
|
static __always_inline __must_check
|
|
int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
|
|
{
|
|
int ret = 0;
|
|
|
|
might_fault();
|
|
if (!__builtin_constant_p(size))
|
|
return copy_user_generic((__force void *)dst,
|
|
(__force void *)src, size);
|
|
switch (size) {
|
|
case 1: {
|
|
u8 tmp;
|
|
__get_user_asm(tmp, (u8 __user *)src,
|
|
ret, "b", "b", "=q", 1);
|
|
if (likely(!ret))
|
|
__put_user_asm(tmp, (u8 __user *)dst,
|
|
ret, "b", "b", "iq", 1);
|
|
return ret;
|
|
}
|
|
case 2: {
|
|
u16 tmp;
|
|
__get_user_asm(tmp, (u16 __user *)src,
|
|
ret, "w", "w", "=r", 2);
|
|
if (likely(!ret))
|
|
__put_user_asm(tmp, (u16 __user *)dst,
|
|
ret, "w", "w", "ir", 2);
|
|
return ret;
|
|
}
|
|
|
|
case 4: {
|
|
u32 tmp;
|
|
__get_user_asm(tmp, (u32 __user *)src,
|
|
ret, "l", "k", "=r", 4);
|
|
if (likely(!ret))
|
|
__put_user_asm(tmp, (u32 __user *)dst,
|
|
ret, "l", "k", "ir", 4);
|
|
return ret;
|
|
}
|
|
case 8: {
|
|
u64 tmp;
|
|
__get_user_asm(tmp, (u64 __user *)src,
|
|
ret, "q", "", "=r", 8);
|
|
if (likely(!ret))
|
|
__put_user_asm(tmp, (u64 __user *)dst,
|
|
ret, "q", "", "ir", 8);
|
|
return ret;
|
|
}
|
|
default:
|
|
return copy_user_generic((__force void *)dst,
|
|
(__force void *)src, size);
|
|
}
|
|
}
|
|
|
|
__must_check long
|
|
strncpy_from_user(char *dst, const char __user *src, long count);
|
|
__must_check long
|
|
__strncpy_from_user(char *dst, const char __user *src, long count);
|
|
__must_check long strnlen_user(const char __user *str, long n);
|
|
__must_check long __strnlen_user(const char __user *str, long n);
|
|
__must_check long strlen_user(const char __user *str);
|
|
__must_check unsigned long clear_user(void __user *mem, unsigned long len);
|
|
__must_check unsigned long __clear_user(void __user *mem, unsigned long len);
|
|
|
|
__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
|
|
unsigned size);
|
|
|
|
static __must_check __always_inline int
|
|
__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
|
|
{
|
|
return copy_user_generic((__force void *)dst, src, size);
|
|
}
|
|
|
|
extern long __copy_user_nocache(void *dst, const void __user *src,
|
|
unsigned size, int zerorest);
|
|
|
|
static inline int
|
|
__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
|
|
{
|
|
might_sleep();
|
|
return __copy_user_nocache(dst, src, size, 1);
|
|
}
|
|
|
|
static inline int
|
|
__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
|
|
unsigned size)
|
|
{
|
|
return __copy_user_nocache(dst, src, size, 0);
|
|
}
|
|
|
|
unsigned long
|
|
copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
|
|
|
|
#endif /* _ASM_X86_UACCESS_64_H */
|