mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
bebf56a1b1
This feature let us to detect accesses out of bounds of global variables. This will work as for globals in kernel image, so for globals in modules. Currently this won't work for symbols in user-specified sections (e.g. __init, __read_mostly, ...) The idea of this is simple. Compiler increases each global variable by redzone size and add constructors invoking __asan_register_globals() function. Information about global variable (address, size, size with redzone ...) passed to __asan_register_globals() so we could poison variable's redzone. This patch also forces module_alloc() to return 8*PAGE_SIZE aligned address making shadow memory handling ( kasan_module_alloc()/kasan_module_free() ) more simple. Such alignment guarantees that each shadow page backing modules address space correspond to only one module_alloc() allocation. Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Konstantin Serebryany <kcc@google.com> Cc: Dmitry Chernenkov <dmitryc@google.com> Signed-off-by: Andrey Konovalov <adech.fo@gmail.com> Cc: Yuri Gribov <tetra2005@gmail.com> Cc: Konstantin Khlebnikov <koct9i@gmail.com> Cc: Sasha Levin <sasha.levin@oracle.com> Cc: Christoph Lameter <cl@linux.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
90 lines
2.8 KiB
C
90 lines
2.8 KiB
C
#ifndef _LINUX_KASAN_H
|
|
#define _LINUX_KASAN_H
|
|
|
|
#include <linux/types.h>
|
|
|
|
struct kmem_cache;
|
|
struct page;
|
|
|
|
#ifdef CONFIG_KASAN
|
|
|
|
#define KASAN_SHADOW_SCALE_SHIFT 3
|
|
#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
|
|
|
|
#include <asm/kasan.h>
|
|
#include <linux/sched.h>
|
|
|
|
static inline void *kasan_mem_to_shadow(const void *addr)
|
|
{
|
|
return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
|
|
+ KASAN_SHADOW_OFFSET;
|
|
}
|
|
|
|
/* Enable reporting bugs after kasan_disable_current() */
|
|
static inline void kasan_enable_current(void)
|
|
{
|
|
current->kasan_depth++;
|
|
}
|
|
|
|
/* Disable reporting bugs for current task */
|
|
static inline void kasan_disable_current(void)
|
|
{
|
|
current->kasan_depth--;
|
|
}
|
|
|
|
void kasan_unpoison_shadow(const void *address, size_t size);
|
|
|
|
void kasan_alloc_pages(struct page *page, unsigned int order);
|
|
void kasan_free_pages(struct page *page, unsigned int order);
|
|
|
|
void kasan_poison_slab(struct page *page);
|
|
void kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
|
|
void kasan_poison_object_data(struct kmem_cache *cache, void *object);
|
|
|
|
void kasan_kmalloc_large(const void *ptr, size_t size);
|
|
void kasan_kfree_large(const void *ptr);
|
|
void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size);
|
|
void kasan_krealloc(const void *object, size_t new_size);
|
|
|
|
void kasan_slab_alloc(struct kmem_cache *s, void *object);
|
|
void kasan_slab_free(struct kmem_cache *s, void *object);
|
|
|
|
#define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
|
|
|
|
int kasan_module_alloc(void *addr, size_t size);
|
|
void kasan_module_free(void *addr);
|
|
|
|
#else /* CONFIG_KASAN */
|
|
|
|
#define MODULE_ALIGN 1
|
|
|
|
static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
|
|
|
|
static inline void kasan_enable_current(void) {}
|
|
static inline void kasan_disable_current(void) {}
|
|
|
|
static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
|
|
static inline void kasan_free_pages(struct page *page, unsigned int order) {}
|
|
|
|
static inline void kasan_poison_slab(struct page *page) {}
|
|
static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
|
|
void *object) {}
|
|
static inline void kasan_poison_object_data(struct kmem_cache *cache,
|
|
void *object) {}
|
|
|
|
static inline void kasan_kmalloc_large(void *ptr, size_t size) {}
|
|
static inline void kasan_kfree_large(const void *ptr) {}
|
|
static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
|
|
size_t size) {}
|
|
static inline void kasan_krealloc(const void *object, size_t new_size) {}
|
|
|
|
static inline void kasan_slab_alloc(struct kmem_cache *s, void *object) {}
|
|
static inline void kasan_slab_free(struct kmem_cache *s, void *object) {}
|
|
|
|
static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
|
|
static inline void kasan_module_free(void *addr) {}
|
|
|
|
#endif /* CONFIG_KASAN */
|
|
|
|
#endif /* LINUX_KASAN_H */
|