mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-12 06:26:42 +07:00
8c87df457c
gcc permitting variable length arrays makes the current construct used for BUILD_BUG_ON() useless, as that doesn't produce any diagnostic if the controlling expression isn't really constant. Instead, this patch makes it so that a bit field gets used here. Consequently, those uses where the condition isn't really constant now also need fixing. Note that in the gfp.h, kmemcheck.h, and virtio_config.h cases MAYBE_BUILD_BUG_ON() really just serves documentation purposes - even if the expression is compile time constant (__builtin_constant_p() yields true), the array is still deemed of variable length by gcc, and hence the whole expression doesn't have the intended effect. [akpm@linux-foundation.org: make arch/sparc/include/asm/vio.h compile] [akpm@linux-foundation.org: more nonsensical assertions in tpm.c..] Signed-off-by: Jan Beulich <jbeulich@novell.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Rajiv Andrade <srajiv@linux.vnet.ibm.com> Cc: Mimi Zohar <zohar@us.ibm.com> Cc: James Morris <jmorris@namei.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
166 lines
4.3 KiB
C
166 lines
4.3 KiB
C
#ifndef LINUX_KMEMCHECK_H
|
|
#define LINUX_KMEMCHECK_H
|
|
|
|
#include <linux/mm_types.h>
|
|
#include <linux/types.h>
|
|
|
|
#ifdef CONFIG_KMEMCHECK
|
|
extern int kmemcheck_enabled;
|
|
|
|
/* The slab-related functions. */
|
|
void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node);
|
|
void kmemcheck_free_shadow(struct page *page, int order);
|
|
void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
|
|
size_t size);
|
|
void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size);
|
|
|
|
void kmemcheck_pagealloc_alloc(struct page *p, unsigned int order,
|
|
gfp_t gfpflags);
|
|
|
|
void kmemcheck_show_pages(struct page *p, unsigned int n);
|
|
void kmemcheck_hide_pages(struct page *p, unsigned int n);
|
|
|
|
bool kmemcheck_page_is_tracked(struct page *p);
|
|
|
|
void kmemcheck_mark_unallocated(void *address, unsigned int n);
|
|
void kmemcheck_mark_uninitialized(void *address, unsigned int n);
|
|
void kmemcheck_mark_initialized(void *address, unsigned int n);
|
|
void kmemcheck_mark_freed(void *address, unsigned int n);
|
|
|
|
void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n);
|
|
void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n);
|
|
void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n);
|
|
|
|
int kmemcheck_show_addr(unsigned long address);
|
|
int kmemcheck_hide_addr(unsigned long address);
|
|
|
|
bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size);
|
|
|
|
#else
|
|
#define kmemcheck_enabled 0
|
|
|
|
static inline void
|
|
kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
|
|
{
|
|
}
|
|
|
|
static inline void
|
|
kmemcheck_free_shadow(struct page *page, int order)
|
|
{
|
|
}
|
|
|
|
static inline void
|
|
kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
|
|
size_t size)
|
|
{
|
|
}
|
|
|
|
static inline void kmemcheck_slab_free(struct kmem_cache *s, void *object,
|
|
size_t size)
|
|
{
|
|
}
|
|
|
|
static inline void kmemcheck_pagealloc_alloc(struct page *p,
|
|
unsigned int order, gfp_t gfpflags)
|
|
{
|
|
}
|
|
|
|
static inline bool kmemcheck_page_is_tracked(struct page *p)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline void kmemcheck_mark_unallocated(void *address, unsigned int n)
|
|
{
|
|
}
|
|
|
|
static inline void kmemcheck_mark_uninitialized(void *address, unsigned int n)
|
|
{
|
|
}
|
|
|
|
static inline void kmemcheck_mark_initialized(void *address, unsigned int n)
|
|
{
|
|
}
|
|
|
|
static inline void kmemcheck_mark_freed(void *address, unsigned int n)
|
|
{
|
|
}
|
|
|
|
static inline void kmemcheck_mark_unallocated_pages(struct page *p,
|
|
unsigned int n)
|
|
{
|
|
}
|
|
|
|
static inline void kmemcheck_mark_uninitialized_pages(struct page *p,
|
|
unsigned int n)
|
|
{
|
|
}
|
|
|
|
static inline void kmemcheck_mark_initialized_pages(struct page *p,
|
|
unsigned int n)
|
|
{
|
|
}
|
|
|
|
static inline bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size)
|
|
{
|
|
return true;
|
|
}
|
|
|
|
#endif /* CONFIG_KMEMCHECK */
|
|
|
|
/*
|
|
* Bitfield annotations
|
|
*
|
|
* How to use: If you have a struct using bitfields, for example
|
|
*
|
|
* struct a {
|
|
* int x:8, y:8;
|
|
* };
|
|
*
|
|
* then this should be rewritten as
|
|
*
|
|
* struct a {
|
|
* kmemcheck_bitfield_begin(flags);
|
|
* int x:8, y:8;
|
|
* kmemcheck_bitfield_end(flags);
|
|
* };
|
|
*
|
|
* Now the "flags_begin" and "flags_end" members may be used to refer to the
|
|
* beginning and end, respectively, of the bitfield (and things like
|
|
* &x.flags_begin is allowed). As soon as the struct is allocated, the bit-
|
|
* fields should be annotated:
|
|
*
|
|
* struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL);
|
|
* kmemcheck_annotate_bitfield(a, flags);
|
|
*
|
|
* Note: We provide the same definitions for both kmemcheck and non-
|
|
* kmemcheck kernels. This makes it harder to introduce accidental errors. It
|
|
* is also allowed to pass NULL pointers to kmemcheck_annotate_bitfield().
|
|
*/
|
|
#define kmemcheck_bitfield_begin(name) \
|
|
int name##_begin[0];
|
|
|
|
#define kmemcheck_bitfield_end(name) \
|
|
int name##_end[0];
|
|
|
|
#define kmemcheck_annotate_bitfield(ptr, name) \
|
|
do { \
|
|
int _n; \
|
|
\
|
|
if (!ptr) \
|
|
break; \
|
|
\
|
|
_n = (long) &((ptr)->name##_end) \
|
|
- (long) &((ptr)->name##_begin); \
|
|
MAYBE_BUILD_BUG_ON(_n < 0); \
|
|
\
|
|
kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \
|
|
} while (0)
|
|
|
|
#define kmemcheck_annotate_variable(var) \
|
|
do { \
|
|
kmemcheck_mark_initialized(&(var), sizeof(var)); \
|
|
} while (0) \
|
|
|
|
#endif /* LINUX_KMEMCHECK_H */
|