mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-15 12:07:12 +07:00
f85612967c
The hooks that we modify are: - Page fault handler (to handle kmemcheck faults) - Debug exception handler (to hide pages after single-stepping the instruction that caused the page fault) Also redefine memset() to use the optimized version if kmemcheck is enabled. (Thanks to Pekka Enberg for minimizing the impact on the page fault handler.) As kmemcheck doesn't handle MMX/SSE instructions (yet), we also disable the optimized xor code, and rely instead on the generic C implementation in order to avoid false-positive warnings. Signed-off-by: Vegard Nossum <vegardno@ifi.uio.no> [whitespace fixlet] Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi> Signed-off-by: Ingo Molnar <mingo@elte.hu> [rebased for mainline inclusion] Signed-off-by: Vegard Nossum <vegardno@ifi.uio.no>
69 lines
1.8 KiB
C
69 lines
1.8 KiB
C
#ifndef _ASM_X86_STRING_64_H
|
|
#define _ASM_X86_STRING_64_H
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
/* Written 2002 by Andi Kleen */
|
|
|
|
/* Only used for special circumstances. Stolen from i386/string.h */
|
|
static __always_inline void *__inline_memcpy(void *to, const void *from, size_t n)
|
|
{
|
|
unsigned long d0, d1, d2;
|
|
asm volatile("rep ; movsl\n\t"
|
|
"testb $2,%b4\n\t"
|
|
"je 1f\n\t"
|
|
"movsw\n"
|
|
"1:\ttestb $1,%b4\n\t"
|
|
"je 2f\n\t"
|
|
"movsb\n"
|
|
"2:"
|
|
: "=&c" (d0), "=&D" (d1), "=&S" (d2)
|
|
: "0" (n / 4), "q" (n), "1" ((long)to), "2" ((long)from)
|
|
: "memory");
|
|
return to;
|
|
}
|
|
|
|
/* Even with __builtin_ the compiler may decide to use the out of line
|
|
function. */
|
|
|
|
#define __HAVE_ARCH_MEMCPY 1
|
|
#ifndef CONFIG_KMEMCHECK
|
|
#if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4
|
|
extern void *memcpy(void *to, const void *from, size_t len);
|
|
#else
|
|
extern void *__memcpy(void *to, const void *from, size_t len);
|
|
#define memcpy(dst, src, len) \
|
|
({ \
|
|
size_t __len = (len); \
|
|
void *__ret; \
|
|
if (__builtin_constant_p(len) && __len >= 64) \
|
|
__ret = __memcpy((dst), (src), __len); \
|
|
else \
|
|
__ret = __builtin_memcpy((dst), (src), __len); \
|
|
__ret; \
|
|
})
|
|
#endif
|
|
#else
|
|
/*
|
|
* kmemcheck becomes very happy if we use the REP instructions unconditionally,
|
|
* because it means that we know both memory operands in advance.
|
|
*/
|
|
#define memcpy(dst, src, len) __inline_memcpy((dst), (src), (len))
|
|
#endif
|
|
|
|
#define __HAVE_ARCH_MEMSET
|
|
void *memset(void *s, int c, size_t n);
|
|
|
|
#define __HAVE_ARCH_MEMMOVE
|
|
void *memmove(void *dest, const void *src, size_t count);
|
|
|
|
int memcmp(const void *cs, const void *ct, size_t count);
|
|
size_t strlen(const char *s);
|
|
char *strcpy(char *dest, const char *src);
|
|
char *strcat(char *dest, const char *src);
|
|
int strcmp(const char *cs, const char *ct);
|
|
|
|
#endif /* __KERNEL__ */
|
|
|
|
#endif /* _ASM_X86_STRING_64_H */
|