mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
ec6347bb43
In reaction to a proposal to introduce a memcpy_mcsafe_fast() implementation Linus points out that memcpy_mcsafe() is poorly named relative to communicating the scope of the interface. Specifically what addresses are valid to pass as source, destination, and what faults / exceptions are handled. Of particular concern is that even though x86 might be able to handle the semantics of copy_mc_to_user() with its common copy_user_generic() implementation other archs likely need / want an explicit path for this case: On Fri, May 1, 2020 at 11:28 AM Linus Torvalds <torvalds@linux-foundation.org> wrote: > > On Thu, Apr 30, 2020 at 6:21 PM Dan Williams <dan.j.williams@intel.com> wrote: > > > > However now I see that copy_user_generic() works for the wrong reason. > > It works because the exception on the source address due to poison > > looks no different than a write fault on the user address to the > > caller, it's still just a short copy. So it makes copy_to_user() work > > for the wrong reason relative to the name. > > Right. > > And it won't work that way on other architectures. On x86, we have a > generic function that can take faults on either side, and we use it > for both cases (and for the "in_user" case too), but that's an > artifact of the architecture oddity. > > In fact, it's probably wrong even on x86 - because it can hide bugs - > but writing those things is painful enough that everybody prefers > having just one function. Replace a single top-level memcpy_mcsafe() with either copy_mc_to_user(), or copy_mc_to_kernel(). Introduce an x86 copy_mc_fragile() name as the rename for the low-level x86 implementation formerly named memcpy_mcsafe(). It is used as the slow / careful backend that is supplanted by a fast copy_mc_generic() in a follow-on patch. One side-effect of this reorganization is that separating copy_mc_64.S to its own file means that perf no longer needs to track dependencies for its memcpy_64.S benchmarks. [ bp: Massage a bit. ] Signed-off-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Borislav Petkov <bp@suse.de> Reviewed-by: Tony Luck <tony.luck@intel.com> Acked-by: Michael Ellerman <mpe@ellerman.id.au> Cc: <stable@vger.kernel.org> Link: http://lore.kernel.org/r/CAHk-=wjSqtXAqfUJxFtWNwmguFASTgB0dz1dT3V-78Quiezqbg@mail.gmail.com Link: https://lkml.kernel.org/r/160195561680.2163339.11574962055305783722.stgit@dwillia2-desk3.amr.corp.intel.com
86 lines
2.5 KiB
C
86 lines
2.5 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _ASM_POWERPC_STRING_H
|
|
#define _ASM_POWERPC_STRING_H
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
#ifndef CONFIG_KASAN
|
|
#define __HAVE_ARCH_STRNCPY
|
|
#define __HAVE_ARCH_STRNCMP
|
|
#define __HAVE_ARCH_MEMCHR
|
|
#define __HAVE_ARCH_MEMCMP
|
|
#define __HAVE_ARCH_MEMSET16
|
|
#endif
|
|
|
|
#define __HAVE_ARCH_MEMSET
|
|
#define __HAVE_ARCH_MEMCPY
|
|
#define __HAVE_ARCH_MEMMOVE
|
|
#define __HAVE_ARCH_MEMCPY_FLUSHCACHE
|
|
|
|
extern char * strcpy(char *,const char *);
|
|
extern char * strncpy(char *,const char *, __kernel_size_t);
|
|
extern __kernel_size_t strlen(const char *);
|
|
extern int strcmp(const char *,const char *);
|
|
extern int strncmp(const char *, const char *, __kernel_size_t);
|
|
extern char * strcat(char *, const char *);
|
|
extern void * memset(void *,int,__kernel_size_t);
|
|
extern void * memcpy(void *,const void *,__kernel_size_t);
|
|
extern void * memmove(void *,const void *,__kernel_size_t);
|
|
extern int memcmp(const void *,const void *,__kernel_size_t);
|
|
extern void * memchr(const void *,int,__kernel_size_t);
|
|
void memcpy_flushcache(void *dest, const void *src, size_t size);
|
|
|
|
void *__memset(void *s, int c, __kernel_size_t count);
|
|
void *__memcpy(void *to, const void *from, __kernel_size_t n);
|
|
void *__memmove(void *to, const void *from, __kernel_size_t n);
|
|
|
|
#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
|
|
/*
|
|
* For files that are not instrumented (e.g. mm/slub.c) we
|
|
* should use not instrumented version of mem* functions.
|
|
*/
|
|
#define memcpy(dst, src, len) __memcpy(dst, src, len)
|
|
#define memmove(dst, src, len) __memmove(dst, src, len)
|
|
#define memset(s, c, n) __memset(s, c, n)
|
|
|
|
#ifndef __NO_FORTIFY
|
|
#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
|
|
#endif
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_PPC64
|
|
#ifndef CONFIG_KASAN
|
|
#define __HAVE_ARCH_MEMSET32
|
|
#define __HAVE_ARCH_MEMSET64
|
|
|
|
extern void *__memset16(uint16_t *, uint16_t v, __kernel_size_t);
|
|
extern void *__memset32(uint32_t *, uint32_t v, __kernel_size_t);
|
|
extern void *__memset64(uint64_t *, uint64_t v, __kernel_size_t);
|
|
|
|
static inline void *memset16(uint16_t *p, uint16_t v, __kernel_size_t n)
|
|
{
|
|
return __memset16(p, v, n * 2);
|
|
}
|
|
|
|
static inline void *memset32(uint32_t *p, uint32_t v, __kernel_size_t n)
|
|
{
|
|
return __memset32(p, v, n * 4);
|
|
}
|
|
|
|
static inline void *memset64(uint64_t *p, uint64_t v, __kernel_size_t n)
|
|
{
|
|
return __memset64(p, v, n * 8);
|
|
}
|
|
#endif
|
|
#else
|
|
#ifndef CONFIG_KASAN
|
|
#define __HAVE_ARCH_STRLEN
|
|
#endif
|
|
|
|
extern void *memset16(uint16_t *, uint16_t, __kernel_size_t);
|
|
#endif
|
|
#endif /* __KERNEL__ */
|
|
|
|
#endif /* _ASM_POWERPC_STRING_H */
|