mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-30 14:06:51 +07:00
0316bec22e
With this patch kasan will be able to catch bugs in memory allocated by slub. Initially all objects in newly allocated slab page, marked as redzone. Later, when allocation of slub object happens, requested by caller number of bytes marked as accessible, and the rest of the object (including slub's metadata) marked as redzone (inaccessible). We also mark object as accessible if ksize was called for this object. There is some places in kernel where ksize function is called to inquire size of really allocated area. Such callers could validly access whole allocated memory, so it should be marked as accessible. Code in slub.c and slab_common.c files could validly access to object's metadata, so instrumentation for this files are disabled. Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com> Signed-off-by: Dmitry Chernenkov <dmitryc@google.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Konstantin Serebryany <kcc@google.com> Signed-off-by: Andrey Konovalov <adech.fo@gmail.com> Cc: Yuri Gribov <tetra2005@gmail.com> Cc: Konstantin Khlebnikov <koct9i@gmail.com> Cc: Sasha Levin <sasha.levin@oracle.com> Cc: Christoph Lameter <cl@linux.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
42 lines
1.1 KiB
C
42 lines
1.1 KiB
C
#ifndef __MM_KASAN_KASAN_H
|
|
#define __MM_KASAN_KASAN_H
|
|
|
|
#include <linux/kasan.h>
|
|
|
|
#define KASAN_SHADOW_SCALE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT)
|
|
#define KASAN_SHADOW_MASK (KASAN_SHADOW_SCALE_SIZE - 1)
|
|
|
|
#define KASAN_FREE_PAGE 0xFF /* page was freed */
|
|
#define KASAN_FREE_PAGE 0xFF /* page was freed */
|
|
#define KASAN_PAGE_REDZONE 0xFE /* redzone for kmalloc_large allocations */
|
|
#define KASAN_KMALLOC_REDZONE 0xFC /* redzone inside slub object */
|
|
#define KASAN_KMALLOC_FREE 0xFB /* object was freed (kmem_cache_free/kfree) */
|
|
|
|
|
|
struct kasan_access_info {
|
|
const void *access_addr;
|
|
const void *first_bad_addr;
|
|
size_t access_size;
|
|
bool is_write;
|
|
unsigned long ip;
|
|
};
|
|
|
|
void kasan_report_error(struct kasan_access_info *info);
|
|
void kasan_report_user_access(struct kasan_access_info *info);
|
|
|
|
static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
|
|
{
|
|
return (void *)(((unsigned long)shadow_addr - KASAN_SHADOW_OFFSET)
|
|
<< KASAN_SHADOW_SCALE_SHIFT);
|
|
}
|
|
|
|
static inline bool kasan_enabled(void)
|
|
{
|
|
return !current->kasan_depth;
|
|
}
|
|
|
|
void kasan_report(unsigned long addr, size_t size,
|
|
bool is_write, unsigned long ip);
|
|
|
|
#endif
|