mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 07:05:12 +07:00
26deb04342
CONFIG_KASAN implements wrappers for memcpy() memmove() and memset()
Those wrappers are doing the verification then call respectively
__memcpy() __memmove() and __memset(). The arches are therefore
expected to rename their optimised functions that way.
For files on which KASAN is inhibited, #defines are used to allow
them to directly call optimised versions of the functions without
going through the KASAN wrappers.
See commit 393f203f5f
("x86_64: kasan: add interceptors for
memset/memmove/memcpy functions") for details.
Other string / mem functions do not (yet) have kasan wrappers,
we therefore have to fallback to the generic versions when
KASAN is active, otherwise KASAN checks will be skipped.
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
[mpe: Fixups to keep selftests working]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
86 lines
2.5 KiB
C
86 lines
2.5 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _ASM_POWERPC_STRING_H
|
|
#define _ASM_POWERPC_STRING_H
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
#ifndef CONFIG_KASAN
|
|
#define __HAVE_ARCH_STRNCPY
|
|
#define __HAVE_ARCH_STRNCMP
|
|
#define __HAVE_ARCH_MEMCHR
|
|
#define __HAVE_ARCH_MEMCMP
|
|
#define __HAVE_ARCH_MEMSET16
|
|
#endif
|
|
|
|
#define __HAVE_ARCH_MEMSET
|
|
#define __HAVE_ARCH_MEMCPY
|
|
#define __HAVE_ARCH_MEMMOVE
|
|
#define __HAVE_ARCH_MEMCPY_FLUSHCACHE
|
|
|
|
extern char * strcpy(char *,const char *);
|
|
extern char * strncpy(char *,const char *, __kernel_size_t);
|
|
extern __kernel_size_t strlen(const char *);
|
|
extern int strcmp(const char *,const char *);
|
|
extern int strncmp(const char *, const char *, __kernel_size_t);
|
|
extern char * strcat(char *, const char *);
|
|
extern void * memset(void *,int,__kernel_size_t);
|
|
extern void * memcpy(void *,const void *,__kernel_size_t);
|
|
extern void * memmove(void *,const void *,__kernel_size_t);
|
|
extern int memcmp(const void *,const void *,__kernel_size_t);
|
|
extern void * memchr(const void *,int,__kernel_size_t);
|
|
extern void * memcpy_flushcache(void *,const void *,__kernel_size_t);
|
|
|
|
void *__memset(void *s, int c, __kernel_size_t count);
|
|
void *__memcpy(void *to, const void *from, __kernel_size_t n);
|
|
void *__memmove(void *to, const void *from, __kernel_size_t n);
|
|
|
|
#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
|
|
/*
|
|
* For files that are not instrumented (e.g. mm/slub.c) we
|
|
* should use not instrumented version of mem* functions.
|
|
*/
|
|
#define memcpy(dst, src, len) __memcpy(dst, src, len)
|
|
#define memmove(dst, src, len) __memmove(dst, src, len)
|
|
#define memset(s, c, n) __memset(s, c, n)
|
|
|
|
#ifndef __NO_FORTIFY
|
|
#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
|
|
#endif
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_PPC64
|
|
#ifndef CONFIG_KASAN
|
|
#define __HAVE_ARCH_MEMSET32
|
|
#define __HAVE_ARCH_MEMSET64
|
|
|
|
extern void *__memset16(uint16_t *, uint16_t v, __kernel_size_t);
|
|
extern void *__memset32(uint32_t *, uint32_t v, __kernel_size_t);
|
|
extern void *__memset64(uint64_t *, uint64_t v, __kernel_size_t);
|
|
|
|
static inline void *memset16(uint16_t *p, uint16_t v, __kernel_size_t n)
|
|
{
|
|
return __memset16(p, v, n * 2);
|
|
}
|
|
|
|
static inline void *memset32(uint32_t *p, uint32_t v, __kernel_size_t n)
|
|
{
|
|
return __memset32(p, v, n * 4);
|
|
}
|
|
|
|
static inline void *memset64(uint64_t *p, uint64_t v, __kernel_size_t n)
|
|
{
|
|
return __memset64(p, v, n * 8);
|
|
}
|
|
#endif
|
|
#else
|
|
#ifndef CONFIG_KASAN
|
|
#define __HAVE_ARCH_STRLEN
|
|
#endif
|
|
|
|
extern void *memset16(uint16_t *, uint16_t, __kernel_size_t);
|
|
#endif
|
|
#endif /* __KERNEL__ */
|
|
|
|
#endif /* _ASM_POWERPC_STRING_H */
|