2008-11-25 22:55:53 +07:00
|
|
|
#include <linux/gfp.h>
|
kmemcheck: add mm functions
With kmemcheck enabled, the slab allocator needs to do this:
1. Tell kmemcheck to allocate the shadow memory which stores the status of
each byte in the allocation proper, e.g. whether it is initialized or
uninitialized.
2. Tell kmemcheck which parts of memory that should be marked uninitialized.
There are actually a few more states, such as "not yet allocated" and
"recently freed".
If a slab cache is set up using the SLAB_NOTRACK flag, it will never return
memory that can take page faults because of kmemcheck.
If a slab cache is NOT set up using the SLAB_NOTRACK flag, callers can still
request memory with the __GFP_NOTRACK flag. This does not prevent the page
faults from occuring, however, but marks the object in question as being
initialized so that no warnings will ever be produced for this object.
In addition to (and in contrast to) __GFP_NOTRACK, the
__GFP_NOTRACK_FALSE_POSITIVE flag indicates that the allocation should
not be tracked _because_ it would produce a false positive. Their values
are identical, but need not be so in the future (for example, we could now
enable/disable false positives with a config option).
Parts of this patch were contributed by Pekka Enberg but merged for
atomicity.
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
[rebased for mainline inclusion]
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
2008-05-31 20:56:17 +07:00
|
|
|
#include <linux/mm_types.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/slab.h>
|
2014-10-10 05:26:00 +07:00
|
|
|
#include "slab.h"
|
kmemcheck: add mm functions
With kmemcheck enabled, the slab allocator needs to do this:
1. Tell kmemcheck to allocate the shadow memory which stores the status of
each byte in the allocation proper, e.g. whether it is initialized or
uninitialized.
2. Tell kmemcheck which parts of memory that should be marked uninitialized.
There are actually a few more states, such as "not yet allocated" and
"recently freed".
If a slab cache is set up using the SLAB_NOTRACK flag, it will never return
memory that can take page faults because of kmemcheck.
If a slab cache is NOT set up using the SLAB_NOTRACK flag, callers can still
request memory with the __GFP_NOTRACK flag. This does not prevent the page
faults from occuring, however, but marks the object in question as being
initialized so that no warnings will ever be produced for this object.
In addition to (and in contrast to) __GFP_NOTRACK, the
__GFP_NOTRACK_FALSE_POSITIVE flag indicates that the allocation should
not be tracked _because_ it would produce a false positive. Their values
are identical, but need not be so in the future (for example, we could now
enable/disable false positives with a config option).
Parts of this patch were contributed by Pekka Enberg but merged for
atomicity.
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
[rebased for mainline inclusion]
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
2008-05-31 20:56:17 +07:00
|
|
|
#include <linux/kmemcheck.h>
|
|
|
|
|
2008-11-25 22:55:53 +07:00
|
|
|
void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
|
kmemcheck: add mm functions
With kmemcheck enabled, the slab allocator needs to do this:
1. Tell kmemcheck to allocate the shadow memory which stores the status of
each byte in the allocation proper, e.g. whether it is initialized or
uninitialized.
2. Tell kmemcheck which parts of memory that should be marked uninitialized.
There are actually a few more states, such as "not yet allocated" and
"recently freed".
If a slab cache is set up using the SLAB_NOTRACK flag, it will never return
memory that can take page faults because of kmemcheck.
If a slab cache is NOT set up using the SLAB_NOTRACK flag, callers can still
request memory with the __GFP_NOTRACK flag. This does not prevent the page
faults from occuring, however, but marks the object in question as being
initialized so that no warnings will ever be produced for this object.
In addition to (and in contrast to) __GFP_NOTRACK, the
__GFP_NOTRACK_FALSE_POSITIVE flag indicates that the allocation should
not be tracked _because_ it would produce a false positive. Their values
are identical, but need not be so in the future (for example, we could now
enable/disable false positives with a config option).
Parts of this patch were contributed by Pekka Enberg but merged for
atomicity.
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
[rebased for mainline inclusion]
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
2008-05-31 20:56:17 +07:00
|
|
|
{
|
|
|
|
struct page *shadow;
|
|
|
|
int pages;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
pages = 1 << order;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* With kmemcheck enabled, we need to allocate a memory area for the
|
|
|
|
* shadow bits as well.
|
|
|
|
*/
|
2008-11-25 22:55:53 +07:00
|
|
|
shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order);
|
kmemcheck: add mm functions
With kmemcheck enabled, the slab allocator needs to do this:
1. Tell kmemcheck to allocate the shadow memory which stores the status of
each byte in the allocation proper, e.g. whether it is initialized or
uninitialized.
2. Tell kmemcheck which parts of memory that should be marked uninitialized.
There are actually a few more states, such as "not yet allocated" and
"recently freed".
If a slab cache is set up using the SLAB_NOTRACK flag, it will never return
memory that can take page faults because of kmemcheck.
If a slab cache is NOT set up using the SLAB_NOTRACK flag, callers can still
request memory with the __GFP_NOTRACK flag. This does not prevent the page
faults from occuring, however, but marks the object in question as being
initialized so that no warnings will ever be produced for this object.
In addition to (and in contrast to) __GFP_NOTRACK, the
__GFP_NOTRACK_FALSE_POSITIVE flag indicates that the allocation should
not be tracked _because_ it would produce a false positive. Their values
are identical, but need not be so in the future (for example, we could now
enable/disable false positives with a config option).
Parts of this patch were contributed by Pekka Enberg but merged for
atomicity.
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
[rebased for mainline inclusion]
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
2008-05-31 20:56:17 +07:00
|
|
|
if (!shadow) {
|
|
|
|
if (printk_ratelimit())
|
2016-03-18 04:19:50 +07:00
|
|
|
pr_err("kmemcheck: failed to allocate shadow bitmap\n");
|
kmemcheck: add mm functions
With kmemcheck enabled, the slab allocator needs to do this:
1. Tell kmemcheck to allocate the shadow memory which stores the status of
each byte in the allocation proper, e.g. whether it is initialized or
uninitialized.
2. Tell kmemcheck which parts of memory that should be marked uninitialized.
There are actually a few more states, such as "not yet allocated" and
"recently freed".
If a slab cache is set up using the SLAB_NOTRACK flag, it will never return
memory that can take page faults because of kmemcheck.
If a slab cache is NOT set up using the SLAB_NOTRACK flag, callers can still
request memory with the __GFP_NOTRACK flag. This does not prevent the page
faults from occuring, however, but marks the object in question as being
initialized so that no warnings will ever be produced for this object.
In addition to (and in contrast to) __GFP_NOTRACK, the
__GFP_NOTRACK_FALSE_POSITIVE flag indicates that the allocation should
not be tracked _because_ it would produce a false positive. Their values
are identical, but need not be so in the future (for example, we could now
enable/disable false positives with a config option).
Parts of this patch were contributed by Pekka Enberg but merged for
atomicity.
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
[rebased for mainline inclusion]
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
2008-05-31 20:56:17 +07:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
for(i = 0; i < pages; ++i)
|
|
|
|
page[i].shadow = page_address(&shadow[i]);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Mark it as non-present for the MMU so that our accesses to
|
|
|
|
* this memory will trigger a page fault and let us analyze
|
|
|
|
* the memory accesses.
|
|
|
|
*/
|
|
|
|
kmemcheck_hide_pages(page, pages);
|
|
|
|
}
|
|
|
|
|
2008-11-25 22:55:53 +07:00
|
|
|
void kmemcheck_free_shadow(struct page *page, int order)
|
kmemcheck: add mm functions
With kmemcheck enabled, the slab allocator needs to do this:
1. Tell kmemcheck to allocate the shadow memory which stores the status of
each byte in the allocation proper, e.g. whether it is initialized or
uninitialized.
2. Tell kmemcheck which parts of memory that should be marked uninitialized.
There are actually a few more states, such as "not yet allocated" and
"recently freed".
If a slab cache is set up using the SLAB_NOTRACK flag, it will never return
memory that can take page faults because of kmemcheck.
If a slab cache is NOT set up using the SLAB_NOTRACK flag, callers can still
request memory with the __GFP_NOTRACK flag. This does not prevent the page
faults from occuring, however, but marks the object in question as being
initialized so that no warnings will ever be produced for this object.
In addition to (and in contrast to) __GFP_NOTRACK, the
__GFP_NOTRACK_FALSE_POSITIVE flag indicates that the allocation should
not be tracked _because_ it would produce a false positive. Their values
are identical, but need not be so in the future (for example, we could now
enable/disable false positives with a config option).
Parts of this patch were contributed by Pekka Enberg but merged for
atomicity.
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
[rebased for mainline inclusion]
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
2008-05-31 20:56:17 +07:00
|
|
|
{
|
|
|
|
struct page *shadow;
|
|
|
|
int pages;
|
|
|
|
int i;
|
|
|
|
|
2008-11-25 22:55:53 +07:00
|
|
|
if (!kmemcheck_page_is_tracked(page))
|
|
|
|
return;
|
|
|
|
|
kmemcheck: add mm functions
With kmemcheck enabled, the slab allocator needs to do this:
1. Tell kmemcheck to allocate the shadow memory which stores the status of
each byte in the allocation proper, e.g. whether it is initialized or
uninitialized.
2. Tell kmemcheck which parts of memory that should be marked uninitialized.
There are actually a few more states, such as "not yet allocated" and
"recently freed".
If a slab cache is set up using the SLAB_NOTRACK flag, it will never return
memory that can take page faults because of kmemcheck.
If a slab cache is NOT set up using the SLAB_NOTRACK flag, callers can still
request memory with the __GFP_NOTRACK flag. This does not prevent the page
faults from occuring, however, but marks the object in question as being
initialized so that no warnings will ever be produced for this object.
In addition to (and in contrast to) __GFP_NOTRACK, the
__GFP_NOTRACK_FALSE_POSITIVE flag indicates that the allocation should
not be tracked _because_ it would produce a false positive. Their values
are identical, but need not be so in the future (for example, we could now
enable/disable false positives with a config option).
Parts of this patch were contributed by Pekka Enberg but merged for
atomicity.
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
[rebased for mainline inclusion]
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
2008-05-31 20:56:17 +07:00
|
|
|
pages = 1 << order;
|
|
|
|
|
|
|
|
kmemcheck_show_pages(page, pages);
|
|
|
|
|
|
|
|
shadow = virt_to_page(page[0].shadow);
|
|
|
|
|
|
|
|
for(i = 0; i < pages; ++i)
|
|
|
|
page[i].shadow = NULL;
|
|
|
|
|
|
|
|
__free_pages(shadow, order);
|
|
|
|
}
|
|
|
|
|
|
|
|
void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
|
|
|
|
size_t size)
|
|
|
|
{
|
2016-03-16 04:53:44 +07:00
|
|
|
if (unlikely(!object)) /* Skip object if allocation failed */
|
|
|
|
return;
|
|
|
|
|
kmemcheck: add mm functions
With kmemcheck enabled, the slab allocator needs to do this:
1. Tell kmemcheck to allocate the shadow memory which stores the status of
each byte in the allocation proper, e.g. whether it is initialized or
uninitialized.
2. Tell kmemcheck which parts of memory that should be marked uninitialized.
There are actually a few more states, such as "not yet allocated" and
"recently freed".
If a slab cache is set up using the SLAB_NOTRACK flag, it will never return
memory that can take page faults because of kmemcheck.
If a slab cache is NOT set up using the SLAB_NOTRACK flag, callers can still
request memory with the __GFP_NOTRACK flag. This does not prevent the page
faults from occuring, however, but marks the object in question as being
initialized so that no warnings will ever be produced for this object.
In addition to (and in contrast to) __GFP_NOTRACK, the
__GFP_NOTRACK_FALSE_POSITIVE flag indicates that the allocation should
not be tracked _because_ it would produce a false positive. Their values
are identical, but need not be so in the future (for example, we could now
enable/disable false positives with a config option).
Parts of this patch were contributed by Pekka Enberg but merged for
atomicity.
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
[rebased for mainline inclusion]
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
2008-05-31 20:56:17 +07:00
|
|
|
/*
|
|
|
|
* Has already been memset(), which initializes the shadow for us
|
|
|
|
* as well.
|
|
|
|
*/
|
|
|
|
if (gfpflags & __GFP_ZERO)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* No need to initialize the shadow of a non-tracked slab. */
|
|
|
|
if (s->flags & SLAB_NOTRACK)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (!kmemcheck_enabled || gfpflags & __GFP_NOTRACK) {
|
|
|
|
/*
|
|
|
|
* Allow notracked objects to be allocated from
|
|
|
|
* tracked caches. Note however that these objects
|
|
|
|
* will still get page faults on access, they just
|
|
|
|
* won't ever be flagged as uninitialized. If page
|
|
|
|
* faults are not acceptable, the slab cache itself
|
|
|
|
* should be marked NOTRACK.
|
|
|
|
*/
|
|
|
|
kmemcheck_mark_initialized(object, size);
|
|
|
|
} else if (!s->ctor) {
|
|
|
|
/*
|
|
|
|
* New objects should be marked uninitialized before
|
|
|
|
* they're returned to the called.
|
|
|
|
*/
|
|
|
|
kmemcheck_mark_uninitialized(object, size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size)
|
|
|
|
{
|
|
|
|
/* TODO: RCU freeing is unsupported for now; hide false positives. */
|
|
|
|
if (!s->ctor && !(s->flags & SLAB_DESTROY_BY_RCU))
|
|
|
|
kmemcheck_mark_freed(object, size);
|
|
|
|
}
|
2008-11-25 22:55:53 +07:00
|
|
|
|
|
|
|
void kmemcheck_pagealloc_alloc(struct page *page, unsigned int order,
|
|
|
|
gfp_t gfpflags)
|
|
|
|
{
|
|
|
|
int pages;
|
|
|
|
|
|
|
|
if (gfpflags & (__GFP_HIGHMEM | __GFP_NOTRACK))
|
|
|
|
return;
|
|
|
|
|
|
|
|
pages = 1 << order;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* NOTE: We choose to track GFP_ZERO pages too; in fact, they
|
|
|
|
* can become uninitialized by copying uninitialized memory
|
|
|
|
* into them.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* XXX: Can use zone->node for node? */
|
|
|
|
kmemcheck_alloc_shadow(page, order, gfpflags, -1);
|
|
|
|
|
|
|
|
if (gfpflags & __GFP_ZERO)
|
|
|
|
kmemcheck_mark_initialized_pages(page, pages);
|
|
|
|
else
|
|
|
|
kmemcheck_mark_uninitialized_pages(page, pages);
|
|
|
|
}
|